diff --git a/.gitattributes b/.gitattributes index 2b60545d0896994614f3d176c0b790e960941e53..6453e9c87b94b2169b560d5333e220a9cd8ab7a5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -7544,3 +7544,32 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2023/Visual[[:space:]]Imitation[[:space:]]Learning[[:space:]]with[[:space:]]Patch[[:space:]]Rewards/7ff3492f-7d60-4240-bcc1-cecd00ae1b72_origin.pdf filter=lfs diff=lfs merge=lfs -text 2023/Visually-Augmented[[:space:]]Language[[:space:]]Modeling/fea11c19-f3c1-4765-aaca-6a4d7a8ff1ea_origin.pdf filter=lfs diff=lfs merge=lfs -text 2023/VoGE_[[:space:]]A[[:space:]]Differentiable[[:space:]]Volume[[:space:]]Renderer[[:space:]]using[[:space:]]Gaussian[[:space:]]Ellipsoids[[:space:]]for[[:space:]]Analysis-by-Synthesis/babfe1c6-0687-43cf-a603-79217b17846f_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Voint[[:space:]]Cloud_[[:space:]]Multi-View[[:space:]]Point[[:space:]]Cloud[[:space:]]Representation[[:space:]]for[[:space:]]3D[[:space:]]Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Volumetric[[:space:]]Optimal[[:space:]]Transportation[[:space:]]by[[:space:]]Fast[[:space:]]Fourier[[:space:]]Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Wasserstein[[:space:]]Auto-encoded[[:space:]]MDPs_[[:space:]]Formal[[:space:]]Verification[[:space:]]of[[:space:]]Efficiently[[:space:]]Distilled[[:space:]]RL[[:space:]]Policies[[:space:]]with[[:space:]]Many-sided[[:space:]]Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Weakly[[:space:]]Supervised[[:space:]]Explainable[[:space:]]Phrasal[[:space:]]Reasoning[[:space:]]with[[:space:]]Neural[[:space:]]Fuzzy[[:space:]]Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Weakly[[:space:]]Supervised[[:space:]]Knowledge[[:space:]]Transfer[[:space:]]with[[:space:]]Probabilistic[[:space:]]Logical[[:space:]]Reasoning[[:space:]]for[[:space:]]Object[[:space:]]Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Weakly-supervised[[:space:]]HOI[[:space:]]Detection[[:space:]]via[[:space:]]Prior-guided[[:space:]]Bi-level[[:space:]]Representation[[:space:]]Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Weighted[[:space:]]Clock[[:space:]]Logic[[:space:]]Point[[:space:]]Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Weighted[[:space:]]Ensemble[[:space:]]Self-Supervised[[:space:]]Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/What[[:space:]]Can[[:space:]]we[[:space:]]Learn[[:space:]]From[[:space:]]The[[:space:]]Selective[[:space:]]Prediction[[:space:]]And[[:space:]]Uncertainty[[:space:]]Estimation[[:space:]]Performance[[:space:]]Of[[:space:]]523[[:space:]]Imagenet[[:space:]]Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/What[[:space:]]Do[[:space:]]Self-Supervised[[:space:]]Vision[[:space:]]Transformers[[:space:]]Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/What[[:space:]]Is[[:space:]]Missing[[:space:]]in[[:space:]]IRM[[:space:]]Training[[:space:]]and[[:space:]]Evaluation_[[:space:]]Challenges[[:space:]]and[[:space:]]Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/What[[:space:]]Makes[[:space:]]Convolutional[[:space:]]Models[[:space:]]Great[[:space:]]on[[:space:]]Long[[:space:]]Sequence[[:space:]]Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/What[[:space:]]shapes[[:space:]]the[[:space:]]loss[[:space:]]landscape[[:space:]]of[[:space:]]self[[:space:]]supervised[[:space:]]learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/When[[:space:]]Data[[:space:]]Geometry[[:space:]]Meets[[:space:]]Deep[[:space:]]Function_[[:space:]]Generalizing[[:space:]]Offline[[:space:]]Reinforcement[[:space:]]Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/When[[:space:]]to[[:space:]]Make[[:space:]]and[[:space:]]Break[[:space:]]Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Where[[:space:]]to[[:space:]]Diffuse,[[:space:]]How[[:space:]]to[[:space:]]Diffuse,[[:space:]]and[[:space:]]How[[:space:]]to[[:space:]]Get[[:space:]]Back_[[:space:]]Automated[[:space:]]Learning[[:space:]]for[[:space:]]Multivariate[[:space:]]Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Which[[:space:]]Layer[[:space:]]is[[:space:]]Learning[[:space:]]Faster_[[:space:]]A[[:space:]]Systematic[[:space:]]Exploration[[:space:]]of[[:space:]]Layer-wise[[:space:]]Convergence[[:space:]]Rate[[:space:]]for[[:space:]]Deep[[:space:]]Neural[[:space:]]Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Why[[:space:]](and[[:space:]]When)[[:space:]]does[[:space:]]Local[[:space:]]SGD[[:space:]]Generalize[[:space:]]Better[[:space:]]than[[:space:]]SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Why[[:space:]]adversarial[[:space:]]training[[:space:]]can[[:space:]]hurt[[:space:]]robust[[:space:]]accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/WiNeRT_[[:space:]]Towards[[:space:]]Neural[[:space:]]Ray[[:space:]]Tracing[[:space:]]for[[:space:]]Wireless[[:space:]]Channel[[:space:]]Modelling[[:space:]]and[[:space:]]Differentiable[[:space:]]Simulations/41f81ce5-4453-4061-b257-336a66f472e8_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Winning[[:space:]]Both[[:space:]]the[[:space:]]Accuracy[[:space:]]of[[:space:]]Floating[[:space:]]Point[[:space:]]Activation[[:space:]]and[[:space:]]the[[:space:]]Simplicity[[:space:]]of[[:space:]]Integer[[:space:]]Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Words[[:space:]]are[[:space:]]all[[:space:]]you[[:space:]]need_[[:space:]]Language[[:space:]]as[[:space:]]an[[:space:]]approximation[[:space:]]for[[:space:]]human[[:space:]]similarity[[:space:]]judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Write[[:space:]]and[[:space:]]Paint_[[:space:]]Generative[[:space:]]Vision-Language[[:space:]]Models[[:space:]]are[[:space:]]Unified[[:space:]]Modal[[:space:]]Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Your[[:space:]]Contrastive[[:space:]]Learning[[:space:]]Is[[:space:]]Secretly[[:space:]]Doing[[:space:]]Stochastic[[:space:]]Neighbor[[:space:]]Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/Zeroth-Order[[:space:]]Optimization[[:space:]]with[[:space:]]Trajectory-Informed[[:space:]]Derivative[[:space:]]Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/f-DM_[[:space:]]A[[:space:]]Multi-stage[[:space:]]Diffusion[[:space:]]Model[[:space:]]via[[:space:]]Progressive[[:space:]]Signal[[:space:]]Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/kNN-Diffusion_[[:space:]]Image[[:space:]]Generation[[:space:]]via[[:space:]]Large-Scale[[:space:]]Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/simpleKT_[[:space:]]A[[:space:]]Simple[[:space:]]But[[:space:]]Tough-to-Beat[[:space:]]Baseline[[:space:]]for[[:space:]]Knowledge[[:space:]]Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/wav2tok_[[:space:]]Deep[[:space:]]Sequence[[:space:]]Tokenizer[[:space:]]for[[:space:]]Audio[[:space:]]Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_content_list.json b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9b73d8b92184cbd7c9fee1e1f2cf25e5e8d0fb04 --- /dev/null +++ b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_content_list.json @@ -0,0 +1,3380 @@ +[ + { + "type": "text", + "text": "VOINT CLOUD: MULTI-VIEW POINT CLOUD REPRESENTATION FOR 3D UNDERSTANDING", + "text_level": 1, + "bbox": [ + 171, + 98, + 828, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abdullah Hamdi", + "bbox": [ + 181, + 169, + 325, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Silvio Giancola", + "bbox": [ + 431, + 169, + 558, + 183 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Bernard Ghanem", + "bbox": [ + 666, + 170, + 813, + 183 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia {abdullah.hamdi, silvio.giancola, bernard.ghanem}@kaust.edu.sa", + "bbox": [ + 181, + 205, + 898, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 449, + 270, + 547, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multi-view projection methods have demonstrated promising performance on 3D understanding tasks like 3D classification and segmentation. However, it remains unclear how to combine such multi-view methods with the widely available 3D point clouds. Previous methods use unlearned heuristics to combine features at the point level. To this end, we introduce the concept of the multi-view point cloud (Voint cloud), representing each 3D point as a set of features extracted from several view-points. This novel 3D Voint cloud representation combines the compactness of 3D point cloud representation with the natural view-awareness of multi-view representation. Naturally, we can equip this new representation with convolutional and pooling operations. We deploy a Voint neural network (VointNet) to learn representations in the Voint space. Our novel representation achieves state-of-the-art performance on 3D classification, shape retrieval, and robust 3D part segmentation on standard benchmarks (ScanObjectNN, ShapeNet Core55, and ShapeNet Parts).1", + "bbox": [ + 228, + 303, + 767, + 512 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 540, + 341, + 555 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A fundamental question in 3D computer vision and computer graphics is how to represent 3D data (Mescheder et al., 2019; Qi et al., 2017a; Maturana & Scherer, 2015). This question becomes particularly vital given how the success of deep learning in 2D computer vision has pushed for the wide adoption of deep learning in 3D vision and graphics. In fact, deep networks already achieve impressive results in 3D classification (Hamdi et al., 2021), 3D segmentation (Hu et al., 2021), 3D detection (Liu et al., 2021a), 3D reconstruction (Mescheder et al., 2019), and novel view synthesis (Mildenhall et al., 2020). 3D computer vision networks either rely on direct 3D representations, indirect 2D projection on images, or a mixture of both. Direct approaches operate on 3D data commonly represented with point clouds (Qi et al., 2017a), meshes (Feng et al., 2019), or voxels (Choy et al., 2019). In contrast, indirect approaches commonly render multiple 2D views of objects or scenes (Su et al., 2015), and process each image with a traditional 2D image-based architecture. The human visual system is closer to such a multi-view indirect approach for 3D understanding, as it receives streams of rendered images rather than explicit 3D data.", + "bbox": [ + 169, + 571, + 826, + 767 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tackling 3D vision tasks with indirect approaches has three main advantages: (i) mature and transferable 2D computer vision models (CNNs, Transformers, etc.), (ii) large and diverse labeled image datasets for pre-training (e.g. ImageNet (Russakovsky et al., 2014)), and (iii) the multi-view images give context-rich features based on the viewing angle, which are different from the geometric 3D neighborhood features. Multi-view approaches achieve impressive performance in 3D shape classification and segmentation (Wei et al., 2020; Hamdi et al., 2021; Dai & Nießner, 2018). However, the challenge with the multi-view representation (especially for dense predictions) lies in properly aggregating the per-view features with 3D point clouds. The appropriate aggregation is necessary to obtain representative 3D point", + "bbox": [ + 169, + 772, + 826, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1The code is available at https://github.com/ajhamdi/vointcloud", + "bbox": [ + 191, + 909, + 627, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0f87af169b3654c00a374430901e2066ece51e5796b3b55094f704f8b831430b.jpg", + "image_caption": [ + "Figure 1: 3D Voint Clouds. We propose the multi-view point cloud (Voint cloud), a novel 3D representation that is compact and naturally descriptive of view projections of a 3D point cloud. Each point in the 3D cloud is tagged with a Voint, which accumulates view-features for that point. Note that not all 3D points are visible from all views. The set of Voints constructs a Voint cloud." + ], + "image_footnote": [], + "bbox": [ + 233, + 99, + 751, + 296 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "clouds with a single feature per point suitable for typical point cloud processing pipelines. Previous multi-view works rely on heuristics (e.g. average or label mode pooling) after mapping pixels to points (Kundu et al., 2020; Wang et al., 2019a), or multi-view fusion with voxels (Dai & Nießner, 2018). Such setups might not be optimal for a few reasons. (i) Such heuristics may aggregate information of misleading projections that are obtained from arbitrary view-points. For example, looking at an object from the bottom and processing that view independently can carry wrong information about the object's content when combined with other views. (ii) The views lack geometric 3D information.", + "bbox": [ + 169, + 450, + 823, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we propose a new hybrid 3D data structure that inherits the merits of point clouds (i.e. compactness, flexibility, and 3D descriptiveness) and leverages the benefits of rich perceptual features of multi-view projections. We call this new representation multi-view point cloud (or Voint cloud) and illustrate it in Figure 1. A Voint cloud is a set of Voints, where each Voint is a set of view-dependent features (view-features) that correspond to the same point in the 3D point cloud. The cardinality of these view-features may differ from one Voint to another. In Table 1, we compare some of the widely used 3D representations and our Voint cloud representation. Voint clouds inherit the characteristics of the parent explicit 3D point clouds, which facilitates learning Voint representations for a variety of vision applications (e.g. point cloud classification and segmentation). To deploy deep learning on the new Voint space, we define basic operations on Voints, such as pooling and convolution. Based on these operations, we define a practical way of building Voint neural networks that we dub VointNet. VointNet takes a Voint cloud and outputs point cloud features for 3D point cloud processing. We show how learning this Voint cloud representation leads to strong performance and gained robustness for the tasks of 3D classification, 3D object retrieval, and 3D part segmentation on standard benchmarks like ScanObjectNN (Uy et al., 2019), and ShapeNet (Chang et al., 2015).", + "bbox": [ + 169, + 569, + 826, + 806 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contributions: (i) We propose a novel multi-view 3D point cloud representation (denoted as Voint cloud), which represents each point (namely a Voint) as a set of features from different view-points. (ii) We define pooling and convolutional operations at the Voint level to construct a Voint Neural Network (VointNet) capable of learning to aggregate information from multiple views in the Voint space. (iii) Our VointNet reaches state-of-the-art performance on several 3D understanding tasks, including 3D shape classification, retrieval, and robust part segmentation. Further, VointNet achieves robustness improvement to occlusion and rotation.", + "bbox": [ + 169, + 811, + 826, + 924 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 506, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/447151a4e6a0df30286a663cddb261b16740e2bedc4d81013124f7e5464df52e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
3D RepresentationExplicitnessView-BasedMain Use3D Expressiveness
Point CloudsExplicitX3D UnderstandingMedium
Multi-View ProjectionsImplicit3D UnderstandingLow
VoxelsExplicitX3D UnderstandingMedium
MeshExplicitX3D ModelingHigh
NeRFsImplicitNovel View SynthesisMedium
Voint Clouds (ours)Explicit3D UnderstandingMedium
", + "bbox": [ + 176, + 101, + 820, + 209 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1: Comparison of Different 3D Representations. We compare some of the widely used 3D representations to our proposed Voint cloud. Note that our Voint cloud shares the view-dependency of NeRFs (Mildenhall et al., 2020) while inheriting the merits of 3D point clouds.", + "bbox": [ + 169, + 218, + 823, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 291, + 354, + 306 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Learning on 3D Point Clouds. 3D point clouds are widely used for 3D representation in computer vision due to their compactness, flexibility, and because they can be obtained naturally from sensors like LiDAR and RGBD cameras. PointNet (Qi et al., 2017a) paved the way as the first deep learning algorithm to operate directly on 3D point clouds. It computes point features independently and aggregates them using an order-invariant function like max-pooling. Subsequent works focused on finding neighborhoods of points to define point convolutional operations (Qi et al., 2017b; Wang et al., 2019c; Li et al., 2018; Han et al., 2019). Several recent works combine point cloud representations with other 3D modalities like voxels (Liu et al., 2019b; You et al., 2018) or multi-view images (Jaritz et al., 2019). We propose a novel Voint cloud representation for 3D shapes and investigates novel architectures that aggregate view-dependent features at the 3D point level.", + "bbox": [ + 169, + 321, + 826, + 474 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multi-View Applications. The idea of using 2D images to understand the 3D world was initially proposed in 1994 by Bradski et. al. (Bradski & Grossberg, 1994). This intuitive multi-view approach was combined with deep learning for 3D understanding in MVCNN (Su et al., 2015). A line of works continued developing multi-view approaches for classification and retrieval by improving the aggregation of the view-features from each image view (Kanezaki et al., 2018; Esteves et al., 2019; Cohen & Welling, 2016; Wei et al., 2020; Hamdi et al., 2021). In this work, we fuse the concept of multi-view into the 3D structure itself, such that every 3D point would have an independent set of view-features according to the view-points available in the setup. Our Voints are aligned with the sampled 3D point cloud, offering a compact representation that allows for efficient computation and memory usage while maintaining the view-dependent component that facilitates view-based learning for vision.", + "bbox": [ + 169, + 481, + 823, + 635 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Hybrid Multi-View with 3D Data. On the task of 3D semantic segmentation, a smaller number of works tried to follow the multi-view approach (Dai & Nießner, 2018; Kundu et al., 2020; Wang et al., 2019a; Kalogerakis et al., 2017; Jaritz et al., 2019; Liu et al., 2021b; Lyu et al., 2020). A problem arises when combining view features to represent local points/voxels while preserving local geometric features. These methods tend to average the view-features (Kundu et al., 2020; Kalogerakis et al., 2017), propagate the labels only (Wang et al., 2019a), learn from reconstructed points in the neighborhood (Jaritz et al., 2019), order points on a single grid (Lyu et al., 2020), or combine the multi-view features with 3D voxel features (Dai & Nießner, 2018; Hou et al., 2019). To this end, our proposed VointNet operates on the Voint cloud space while preserving the compactness and 3D descriptiveness of the original point cloud. VointNet leverages the power of multi-view features with learned aggregation on the view-features applied to each point independently.", + "bbox": [ + 169, + 641, + 826, + 809 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 METHODOLOGY", + "text_level": 1, + "bbox": [ + 171, + 835, + 344, + 852 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The primary assumption in our work is that surface 3D points are spherical functions, i.e. their representations depend on the viewing angles observing them. This condition contrasts with most 3D point cloud processing pipelines that assume a view-independent representation of 3D point clouds. The full pipeline is illustrated in Figure 2.", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 508, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f18c527b4598c89667a2cfd0519a8caecd049ba9b97eafab366dccb7b44f9b72.jpg", + "image_caption": [ + "Figure 2: Learning from Voint Clouds. To construct a 3D Voint cloud $\\widehat{\\mathcal{X}}$ , a renderer $\\mathbf{R}$ renders the point cloud $\\mathcal{X}$ from view-points $\\mathcal{U}$ and image features are extracted from the generated images via a 2D backbone $\\mathbf{C}$ . The image features are then unprojected to the Voint cloud by $\\Phi_{\\mathbf{B}}$ and passed to VointNet $\\widehat{\\mathbf{F}}$ . To learn both $\\mathbf{C}$ and $\\widehat{\\mathbf{F}}$ , a 3D loss on the output points is used with an optional auxiliary 2D loss on $\\mathbf{C}$ ." + ], + "image_footnote": [], + "bbox": [ + 173, + 99, + 828, + 236 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 3D VOINT CLOUD", + "text_level": 1, + "bbox": [ + 171, + 339, + 352, + 353 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "From Point Clouds to Voint Clouds. A 3D point cloud is a compact 3D representation composed of sampled points on the surface of a 3D object or a scene and can be obtained by different sensors like LiDAR (Chen et al., 2017) or as a result of reconstruction (Okutomi & Kanade, 1993). Formally, we define the coordinate function for the surface $g_{\\mathrm{s}}(\\mathbf{x}) : \\mathbb{R}^3 \\to \\mathbb{R}$ as the Sign Distance Function (SDF) in the continuous Euclidean space (Park et al., 2019; Mescheder et al., 2019). The 3D iso-surface is then defined as the set of all points $\\mathbf{x}$ that satisfy the condition $g_{\\mathrm{s}}(\\mathbf{x}) = 0$ . We define a surface 3D point cloud $\\mathcal{X} \\in \\mathbb{R}^{N \\times 3}$ as a set of $N$ 3D points, where each point $\\mathbf{x}_i \\in \\mathbb{R}^3$ is represented by its 3D coordinates $(x_i, y_i, z_i)$ and satisfies the iso-surface condition as follows: $\\mathcal{X} = \\{\\mathbf{x}_i \\in \\mathbb{R}^3 \\mid g_{\\mathrm{s}}(\\mathbf{x}_i) = 0\\}_{i=1}^N$ . In this work, we aim to fuse the view-dependency to 3D point. Inspired by NeRFs (Mildenhall et al., 2020), we assume that surface points also depend on the view direction from which they are being observed. Specifically, there exists a continuous implicit spherical function $\\mathbf{g}(\\mathbf{x}, \\mathbf{u}) : \\mathbb{R}^5 \\to \\mathbb{R}^d$ that defines the features of each point $\\mathbf{x}$ depending on the view-point direction $\\mathbf{u}$ . Given a set of $M$ view-point directions $\\mathcal{U} \\in \\mathbb{R}^{M \\times 2}$ , a Voint $\\widehat{\\mathbf{x}} \\in \\mathbb{R}^{M \\times d}$ is a set of $M$ view-dependent features of size $d$ for the sphere centered at point $\\mathbf{x}$ as follows.", + "bbox": [ + 169, + 359, + 826, + 575 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} \\left(\\mathbf {x} _ {i}, \\mathbf {u} _ {j}\\right) \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X} \\right\\} _ {j = 1} ^ {M} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 577, + 823, + 601 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Voint cloud $\\widehat{\\mathcal{X}}\\in \\mathbb{R}^{N\\times M\\times d} = \\{\\widehat{\\mathbf{x}}_i\\}_{i = 1}^N$ is the set of all $N$ Voints $\\widehat{\\mathbf{x}}_i$ corresponding to the parent point cloud $\\mathcal{X}$ . Note that we typically do not have access to the underlying implicit function $\\mathbf{g}$ and we approximate it with the following three steps.", + "bbox": [ + 169, + 604, + 823, + 651 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1-Multi-View Projection. As mentioned earlier, a Voint combines multiple view-features of the same 3D point. These view-features come from a multi-view projection of the points by a point cloud renderer $\\mathbf{R}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times 3}$ that renders the point cloud $\\mathcal{X}$ from multiple view-points $\\mathcal{U}$ into $M$ images of size $H\\times W\\times 3$ . In addition to projecting the point cloud into the image space, $\\mathbf{R}$ defines the index mapping $\\mathbf{B}\\in \\{0,\\dots,N\\}^{M\\times H\\times W}$ between each pixel to the N points and background it renders. Also, $\\mathbf{R}$ outputs the visibility binary matrix $\\mathbf{V}\\in \\{0,1\\}^{N\\times M}$ for each point from each view. Since not all points appear in all the views due to pixel discretization, the visibility score $\\mathbf{V}_{i,j}$ defines if the Voint $\\hat{\\mathbf{x}}_i$ is visible in the view $\\mathbf{u}_j$ . The matrix $\\mathbf{B}$ is crucial for unprojection, while $\\mathbf{V}$ is needed for defining meaningful operations on Voints.", + "2-Multi-View Feature Extraction. The rendered images are processed by a function $\\mathbf{C}:\\mathbb{R}^{M\\times H\\times W\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times d}$ that extracts image features, as shown in Figure 2. If $\\mathbf{C}$ is the identity function, all the view-features would typically the RGB value of the corresponding point. However, the $\\mathbf{C}$ function can be a 2D network dedicated to the downstream task and can extract useful global and local features about each view.", + "3-Multi-View Unprojection. We propose a module $\\Phi_{\\mathbf{B}}:\\mathbb{R}^{M\\times H\\times W\\times d}\\to \\mathbb{R}^{N\\times M\\times d}$ that unprojects the 2D features from each pixel to be 3D view-features at the corresponding voint. Using the mapping $\\mathbf{B}$ created by the renderer, $\\Phi_{\\mathbf{B}}$ forms the Voint cloud features $\\widehat{\\mathcal{X}}$" + ], + "bbox": [ + 169, + 656, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To summarize, the output Voint cloud is described by Eq (1), where $\\mathbf{g}(\\mathbf{x}_i,\\mathbf{u}_j) = \\Phi_{\\mathbf{B}}\\big(\\mathbf{C}(\\mathbf{R}(\\mathcal{X},\\mathbf{u}_j))\\big)_i$ and the features are only defined for a view $j$ of Voint $\\hat{\\mathbf{x}}_i$ if $\\mathbf{V}_{i,j} = 1$ .", + "bbox": [ + 169, + 103, + 823, + 136 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 OPERATIONS ON 3D VOINT CLOUDS", + "text_level": 1, + "bbox": [ + 169, + 150, + 485, + 164 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We show in the Appendix that a functional form of max-pooled individual view-features of a set of angles can approximate any function in the spherical coordinates. We provide a theorem that extends PointNet's theorem of point cloud functional composition (Qi et al., 2017a) and its Universal Approximation to spherical functions underlying Voints. Next, we define a set of operations on Voints as building blocks for Voint neural networks (VointNet).", + "bbox": [ + 169, + 170, + 826, + 242 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "VointMax. We define VointMax as max-pooling on the visible view-features along the views dimension of the voint $\\hat{\\mathbf{x}}$ . For all $i \\in 1,2,\\dots,N$ and $j \\in 1,2,\\dots,M$ ,", + "bbox": [ + 169, + 247, + 823, + 276 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {V o i n t M a x} \\left(\\widehat {\\mathbf {x}} _ {i}\\right) = \\max _ {j} \\widehat {\\mathbf {x}} _ {i, j}, \\quad \\text {s . t .} \\quad \\mathbf {V} _ {i, j} = 1 \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 282, + 821, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "VointConv. We define the convolution operation $h_{\\mathrm{V}}: \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'}$ as any learnable function that operates on the Voint space with shared weights on all the Voints and has the view-features input size $d$ and outputs view-features of size $d'$ and consists of $l_{V}$ layers. A simple example of this VointConv operation is the shared MLP applied only on the visible view-features. We provide further details for such operations in Section 4.2, which result in different non-exhaustive variants of VointNet.", + "bbox": [ + 169, + 316, + 823, + 404 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 LEARNING ON 3D VOINT CLOUDS", + "text_level": 1, + "bbox": [ + 169, + 419, + 467, + 434 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "VointNet. The goal of the VointNet model is to obtain multi-view point cloud features that can be subsequently used by any point cloud processing pipeline. The VointNet module $\\widehat{\\mathbf{F}}:\\mathbb{R}^{N\\times M\\times d}\\to \\mathbb{R}^{N\\times d}$ is defined as follows.", + "bbox": [ + 169, + 440, + 823, + 484 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\mathbf {F}} (\\widehat {\\boldsymbol {\\chi}}) = h _ {\\mathrm {P}} \\left(\\operatorname {V o i n t M a x} \\left(h _ {\\mathrm {V}} (\\widehat {\\boldsymbol {\\chi}})\\right)\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 489, + 823, + 516 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $h_{\\mathrm{P}}$ is any point convolutional operation (e.g. shared MLP or EdgeConv). VointNet $\\widehat{\\mathbf{F}}$ transforms the individual view-features using the learned VointConv $h_{\\mathrm{V}}$ before VointMax is applied on the view-features to obtain point features.", + "bbox": [ + 169, + 523, + 823, + 568 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "VointNet Pipeline for 3D Point Cloud Processing. The full pipeline is described in Figure 2. The loss for this pipeline can be described as follows:", + "bbox": [ + 169, + 573, + 823, + 603 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\boldsymbol {\\theta} _ {\\mathbf {C}}, \\boldsymbol {\\theta} _ {\\widehat {\\mathbf {F}}}} {\\arg \\min } \\sum_ {i} ^ {N} L \\left(\\widehat {\\mathbf {F}} \\left(\\Phi_ {\\mathbf {B}} \\left(\\mathbf {C} \\left(\\mathbf {R} (\\mathcal {X}, \\mathcal {U})\\right)\\right)\\right) _ {i}, \\mathbf {y} _ {i}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 609, + 823, + 652 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $L$ is a Cross-Entropy (CE) loss defined on all the training points $\\mathcal{X}$ , and $\\{y_i\\}_{i=1}^N$ defines the labels of these points. The other components $(\\mathbf{R}, \\Phi_{\\mathbf{B}}, \\mathcal{U}, \\mathbf{C})$ are all defined before. The weights to be jointly learned are those of the 2D backbone $(\\theta_{\\mathbf{C}})$ and those of the VointNet $(\\theta_{\\widehat{\\mathbf{F}}})$ using the same 3D loss. An auxiliary 2D loss on $\\theta_{\\mathbf{C}}$ can be optionally added for supervision at the image level. For classification, the entire object can be treated as a single Voint, and the global features of each view would be the view-features of that Voint. We analyze different setups in detail in Section 6.", + "bbox": [ + 169, + 660, + 826, + 760 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 169, + 779, + 333, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 EXPERIMENTAL SETUP", + "text_level": 1, + "bbox": [ + 171, + 805, + 385, + 819 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We benchmark VointNet on the challenging and realistic ScanObjectNN dataset for 3D point cloud classification (Uy et al., 2019). The dataset has three variants, includes background and occlusion, and has 15 categories and 2,902 point clouds. For the shape retrieval task, we benchmark on ShapeNet Core55 as a subset of ShapeNet (Chang et al., 2015). The dataset consists of 51,162 3D mesh objects labeled with 55 object classes. We follow the MVTN's setup (Hamdi et al., 2021) in sampling 5,000 points from each mesh object to obtain point cloud. On the other hand, for the task of shape part segmentation,", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/c1e67f51fccdb55e90718f0ed7376ad44684b9a3217a843d35efb1f85679549b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodData TypeClassificationAccuracy
OBJ_BGOBJ_ONLYHardest
PointNet (Qi et al., 2017a)Points73.379.268.0
SpiderCNN (Xu et al., 2018)Points77.179.573.7
PointNet ++ (Qi et al., 2017b)Points82.384.377.9
PointCNN (Li et al., 2018)Points86.185.578.5
DGCNN (Wang et al., 2019c)Points82.886.278.1
SimpleView (Goyal et al., 2021)M-View--79.5
BGA-DGCNN (Uy et al., 2019)Points--79.7
BGA-PN++ (Uy et al., 2019)Points--80.2
MVTN (Hamdi et al., 2021)M-View92.692.382.8
VointNet (ours)Voints93.794.085.4
", + "bbox": [ + 210, + 99, + 782, + 284 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: 3D Point Cloud Classification on ScanObjectNN. We report the accuracy of VointNet in 3D point cloud classification on three different variants of ScanObjectNN (Uy et al., 2019). Bold denotes the best result in its setup. Note that the Hardest variant includes rotated and translated objects, which highlights the benefits of Voints on challenging scenarios.", + "bbox": [ + 169, + 294, + 826, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "we test on ShapeNet Parts (Yi et al., 2016), a subset of ShapeNet (Chang et al., 2015) that consists of 16,872 point cloud objects from 16 categories and 50 parts. For occlusion robustness, we follow MVTN (Hamdi et al., 2021) and test on ModelNet40 (Wu et al., 2015), which is composed of 40 classes and 12,311 3D objects.", + "bbox": [ + 169, + 388, + 826, + 446 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. For 3D point cloud classification, we report the overall accuracy, while shape retrieval is evaluated using mean Average Precision (mAP) over test queries (Hamdi et al., 2021). 3D semantic segmentation is evaluated using mean Intersection over Union (mIoU) on points. For part segmentation, we report Instance-averaged mIoU (Ins. mIoU).", + "bbox": [ + 169, + 453, + 826, + 511 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We include PointNet (Qi et al., 2017a), PointNet++ (Qi et al., 2017b), DGCNN (Wang et al., 2019c), as baselines that use point clouds. We also compare against multi-view classification approaches like MVCNN (Su et al., 2015), SimpleView (Goyal et al., 2021), and MVTN (Hamdi et al., 2021) as baselines for classification and retrieval and adopt some of the multi-view segmentation baselines (e.g. Label Fusion (Wang et al., 2019a) and Mean Fusion (Kundu et al., 2020)) for part segmentation.", + "bbox": [ + 169, + 515, + 826, + 602 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 VOINTNET VARIANTS", + "text_level": 1, + "bbox": [ + 171, + 633, + 377, + 647 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "VointNet in Eq (3) relies on the VointConv operation $h_{\\mathrm{V}}$ as the basic building block. Here, we briefly describe three examples of $h_{\\mathrm{V}}$ operations VointNet uses.", + "bbox": [ + 169, + 661, + 826, + 691 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Shared Multi-Layer Perceptron (MLP). It is the most basic VointConv formulation. For a layer $l$ , the features of Voint $i$ at view $j$ are updated to layer $l + 1$ as: $\\mathbf{h}_{i,j}^{l + 1} = \\rho (\\mathbf{h}_{i,j}^{l}\\mathcal{W}_{\\rho})$ , where $\\rho$ is the shared MLP with weights $\\mathcal{W}_{\\rho}$ followed by normalization and a nonlinear function (e.g. ReLU). This operation is applied on all Voints independently and only involves the visible views-features for each Voint. This formulation extends the shared MLP formulation for PointNet (Qi et al., 2017a) to work on Voints' view-features.", + "bbox": [ + 169, + 696, + 826, + 787 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Graph Convolution (GCN). We define a fully connected graph for each Voint by creating a virtual center node connected to all the view-features to aggregate their information (similar to \"cls\" token in ViT (Dosovitskiy et al., 2021)). Then, the graph convolution can be defined as the shared MLP (as described above) but on the edge features between all view features, followed by a max pool on the graph neighbors. An additional shared MLP is used before the final output.", + "bbox": [ + 169, + 790, + 826, + 876 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Graph Attention (GAT). A graph attention operation can be defined just like the GCN operation above but with learned attention weights on the graph neighbor's features before averaging them. A shared MLP computes these weights.", + "bbox": [ + 169, + 881, + 825, + 926 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/43cae6bd71c73b896b64451d5fa0cf6f9e718ca55b6a5a098f3c506b4d7bab28.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ResultsMVCNN (Su et al., 2015)RotNet (Kanezaki et al., 2018)ViewGCN (Wei et al., 2020)MVTN (Hamdi et al., 2021)VointNet (ours)
ShapeNet73.577.278.482.983.3
Retr. mAP
", + "bbox": [ + 178, + 101, + 816, + 161 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/445f205d21dada46bd83cd6fc5ea7c9095b7ba54b7050cce4f43195cf2fb11f9.jpg", + "table_caption": [ + "Table 3: 3D Shape Retrieval. We report 3D shape retrieval mAP on ShapeNet Core55 (Chang et al., 2015; Sfikas et al., 2017). VointNet achieves state-of-the-art results on this benchmark." + ], + "table_footnote": [], + "table_body": "
MethodData TypePart Segmentation
(Unrotated)(Rotated)
PointNet (Qi et al., 2017a)Points80.136.6 ±0.2
DGCNN (Wang et al., 2019c)Points80.137.1 ±0.2
CurveNet (Xiang et al., 2021)Points84.932.3 ±0.0
Label Fuse (Wang et al., 2019a)M-View80.061.4 ±0.2
Mean Fuse (Kundu et al., 2020)M-View77.562.0 ±0.2
VointNet (ours)Voints81.262.4 ±0.2
", + "bbox": [ + 263, + 217, + 730, + 344 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4: Robust 3D Part Segmentation on ShapeNet Parts. We compare the Inst. mIoU of VointNet against other methods in 3D segmentation on ShapeNet Parts (Yi et al., 2016). At test time, we randomly rotate the objects and report the results over ten runs. Note how VointNet's performance largely exceeds the point baselines in the realistic rotated scenarios, while exceeding multi-view baselines on the unrotated benchmark. All the results are reproduced in our setup.", + "bbox": [ + 169, + 354, + 823, + 419 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 446, + 415, + 462 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Rendering and Unprojection. We choose the differentiable point cloud renderer $\\mathbf{R}$ from Pytorch3D (Ravi et al., 2020) in our pipeline for its speed and compatibility with Pytorch libraries (Paszke et al., 2017). We render point clouds on multi-view images with size $224 \\times 224 \\times 3$ . We color the points by their normals' values or keep them white if the normals are not available. Following a similar procedure to (Wei et al., 2020; Hamdi et al., 2021), the view-points setup is randomized during training (using $M = 8$ views) and fixed to spherical views in testing (using $M = 12$ views).", + "bbox": [ + 169, + 469, + 823, + 569 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Architectures. For the 2D backbone $\\mathbf{C}$ , we use ViT-B (Dosovitskiy et al., 2021) (with pretrained weights from TIMM library (Wightman, 2019)) for classification and DeepLabV3 (Chen et al., 2018) for segmentation. We use the 3D CE loss on the 3D point cloud output and the 2D CE loss when the loss is defined on the pixels. The feature dimension of the VointNet architectures is $d = 64$ , and the depth is $l_{V} = 4$ layers in $h_V$ . The main results are based on VointNet (MLP), unless otherwise specified as in Section 6, where we study in details the effect of VointConv $h_\\mathrm{V}$ and $\\mathbf{C}$ .", + "bbox": [ + 169, + 573, + 823, + 672 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training Setup. We train our pipeline in two stages, where we start by training the 2D backbone on the 2D projected labels of the points, then train the entire pipeline end-to-end while focusing the training on the VointNet part. We use the AdamW optimizer (Loshchilov & Hutter, 2017) with an initial learning rate of 0.0005 and a step learning rate schedule of $33.3\\%$ every 12 epochs for 40 epochs. The pipeline is trained with one NVIDIA Tesla V100 GPU. We do not use any data augmentation. More details about the training setup (loss and rendering), VointNet, and the 2D backbone architectures can be found in the Appendix.", + "bbox": [ + 169, + 678, + 826, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 RESULTS", + "text_level": 1, + "bbox": [ + 171, + 797, + 285, + 814 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The main test results of our Voint formulations are summarized in Tables 2,3, 4, and 5. We achieve state-of-the-art performance in the task of 3D classification, retrieval, and robust 3D part segmentation. More importantly, under the realistic rotated setups of ScanObjectNN and ShapeNet Parts, we improve over $7.2\\%$ Acc. and $25\\%$ mIoU respectively compared to point baselines Qi et al. (2017a); Wang et al. (2019c). Following common practice Hamdi et al. (2021), we report the best results out of four runs in benchmark tables, but detailed results are provided in the Appendix.", + "bbox": [ + 169, + 825, + 823, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 508, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/123fba4fd773417d0a4163ecd2fb10d4b2176e02cc5cd0cc864f083a9c86b7cd.jpg", + "image_caption": [ + "Figure 3: Qualitative Comparison for Part Segmentation. We compare our VointNet 3D segmentation predictions to Mean Fuse (Kundu et al., 2020) that is using the same trained 2D backbone. Note how VointNet distinguishes detailed parts (e.g. the car window frame)." + ], + "image_footnote": [], + "bbox": [ + 210, + 98, + 395, + 262 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cd772a1ce730ead1824f2b7f7a275f785539b7534c5bcf3091b1539db62ac8d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 405, + 97, + 580, + 262 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/62f87428c6e762834173ef8afc04dd088d57d8d321339eb21332f097f9ee4c51.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 98, + 787, + 260 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 3D SHAPE CLASSIFICATION", + "text_level": 1, + "bbox": [ + 171, + 348, + 419, + 362 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2 reports the classification accuracy on the 3D point cloud classification task on ScanObjectNN Uy et al. (2019). It benchmarks VointNet against other recent and strong baselines Hamdi et al. (2021); Goyal et al. (2021); Hamdi et al. (2021). VointNet demonstrates state-of-the-art results on all the variants, including the challenging Hardest (PB_T50_RS) variant that includes challenging scenarios of rotated and translated objects. The increase in performance $(+2.6\\%)$ is significant on this variant, which highlights the benefits of Voints on challenging scenarios, with further affirming results in Section 5.4. We follow exactly the same procedure as in MVTN Hamdi et al. (2021).", + "bbox": [ + 169, + 371, + 823, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 3D SHAPE RETRIEVAL", + "text_level": 1, + "bbox": [ + 171, + 507, + 383, + 522 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3 benchmarks the 3D shape retrieval mAP on ShapeNet Core55 Chang et al. (2015). VointNet achieves state-of-the-art performance on ShapeNet Core55. Baseline results are reported from Hamdi et al. (2021).", + "bbox": [ + 169, + 535, + 826, + 579 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 ROBUST 3D PART SEGMENTATION", + "text_level": 1, + "bbox": [ + 171, + 602, + 470, + 616 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4 reports the Instance-averaged segmentation mIoU of VointNet compared with other methods on ShapeNet Parts Yi et al. (2016). Two variants of the benchmark are reported: unrotated normalized setup, and the rotated realistic setup. For the rotated setup, we follow the previous 3D literature Liu et al. (2019a); Hamdi et al. (2021; 2020) by testing the robustness of trained models by perturbing the shapes in ShapeNet Parts with random rotations at test time (ten runs) and report the averages in Table 4. Note VointNet's improvement over Mean Fuse Kundu et al. (2020) and Label Fuse Wang et al. (2019a) on unrotated setup despite that both baselines use the same trained 2D backbone as VointNet. Also, for rotated setups, point methods don't work as well. All the results in Table 4 are reproduced by our code in the same setup (see the code attached in supplementary material). Figure 3 shows qualitative 3D segmentation results for VointNet and Mean Fuse Kundu et al. (2020) as compared to the ground truth.", + "bbox": [ + 169, + 625, + 826, + 792 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4 OCCLUSION ROBUSTNESS", + "text_level": 1, + "bbox": [ + 171, + 816, + 401, + 830 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "One of the aspects of the robustness of 3D classification models that have been recently studied is their robustness to occlusion, as detailed in MVTN Hamdi et al. (2021). These simulated occlusions are introduced at test time, and the average test accuracy is reported on each cropping ratio. We benchmark our VointNet against recent baselines in Table 5. PointNet Qi et al. (2017a) and DGCNN Wang et al. (2019c) are used as point-based baselines, and MVTN Hamdi et al. (2021) as a multi-view baseline.", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 509, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/30dc8227dc10160b94dd67f71c770196730058327863fce4635d5d874f0483e7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodData TypeOcclusion Ratio
00.10.20.30.5
PointNet (Qi et al., 2017a)Points89.188.286.181.653.5
DGCNN (Wang et al., 2019c)Points92.177.174.571.230.1
PCT (Guo et al., 2021)Points93.392.691.188.261.9
MVTN (Hamdi et al., 2021)M-View93.890.389.988.367.1
VointNet (ours)Voints92.891.691.289.166.1
", + "bbox": [ + 207, + 99, + 792, + 218 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ca2196b969d0352d172fa93d3fa3d0cc81b1a8cce986db6e983565528fffa072.jpg", + "image_caption": [ + "Figure 4: Effect of the Number of Views. We plot Ins. mIoU of 3D segmentation vs. the number of views $(M)$ used in inference on ShapeNet Parts. Note VointNet's consistent improvement over Mean Fuse (Kundu et al., 2020) and Label Fuse (Wang et al., 2019a). Both baselines use the same trained 2D backbone as VointNet and are tested on the same unrotated setup." + ], + "image_footnote": [], + "bbox": [ + 282, + 287, + 714, + 445 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 ANALYSIS AND INSIGHTS", + "text_level": 1, + "bbox": [ + 171, + 535, + 424, + 550 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Number of Views. We study the effect of the number of views $M$ on the performance of 3D part segmentation using multiple views. We compare Mean Fuse (Kundu et al., 2020) and Label Fuse (Wang et al., 2019a) to our VointNet when all of them have the same trained 2D backbone. The views are randomly picked, and the experiments are repeated four times. Ins. mIoU with confidence intervals are shown in Figure 4. We observe a consistent improvement with VointNet over the other two baselines across different numbers of views.", + "bbox": [ + 169, + 561, + 826, + 645 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/2a22c063d89175dd92ab9d6f9cd53b5f9aeda1a9455dbf46b83ab612d4b262d9.jpg", + "table_caption": [ + "Table 5: Occlusion Robustness for 3D Classification. We report the test accuracy on ModelNet40 (Wu et al., 2015) for different occlusion ratios of the data to measure occlusion robustness of different 3D methods." + ], + "table_footnote": [], + "table_body": "
2D BackboneVointConvResults
FCNDeepLabV3MLPGCNGATInst. mIoU
---78.8 ± 0.2
---77.6 ± 0.2
---77.1 ± 0.2
---80.6 ± 0.1
---77.2 ± 0.4
---80.4 ± 0.2
", + "bbox": [ + 307, + 657, + 689, + 781 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 6: Ablation Study for 3D Segmentation. We ablate different components of VointNet (2D backbone and VointConv choice) and report Ins. mIoU performance on ShapeNet Parts.", + "bbox": [ + 169, + 792, + 823, + 821 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Choice of Backbones. We ablate the choice of the 2D backbone and the VointConv operation used in VointNet and report the segmentation Ins. mIoU results in Table 6. Note how the 2D backbone greatly affects performance, while the VointConv operation type does not. This ablation highlights the importance of the 2D backbone in VointNet pipeline and motivates the use of the simplest variant of VointNet (MLP). We provide a detailed study of more factors as well as compute and memory costs in the Appendix.", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 LIMITATIONS AND ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 555, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "One aspect limiting the performance of Voints is how well-trained the 2D backbone is for the downstream 3D task. In most cases, the 2D backbone must be pretrained with enough data to learn meaningful information for VointNet. Another aspect that limits the capability of the Voint cloud is how to properly select the view-points for segmentation. Addressing these limitations is an important direction for future work. Also, extending Voint learning on more 3D tasks like 3D scene segmentation and 3D object detection is left for future work.", + "bbox": [ + 169, + 128, + 826, + 212 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgments. This work was supported by the King Abdullah University of Science and Technology (KAUST) Office of Sponsored Research through the Visual Computing Center (VCC) funding and the SDAIA-KAUST Center of Excellence in Data Science and Artificial Intelligence (SDAIA-KAUST AI)", + "bbox": [ + 169, + 218, + 826, + 275 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 294, + 290, + 309 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Gary Bradski and Stephen Grossberg. Recognition of 3-d objects from multiple 2-d views by a self-organizing neural architecture. In *From Statistics to Neural Networks*, pp. 349–375. Springer, 1994.", + "Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], Stanford University — Princeton University — Toyota Technological Institute at Chicago, 2015.", + "Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In Proceedings of the European conference on computer vision (ECCV), pp. 801-818, 2018.", + "Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 1907-1915, 2017.", + "Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3075-3084, 2019.", + "Taco Cohen and Max Welling. Group equivariant convolutional networks. In International conference on machine learning, pp. 2990-2999, 2016.", + "Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multi-view prediction for 3d semantic scene segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 452-468, 2018.", + "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021.", + "Carlos Esteves, Yinshuang Xu, Christine Allen-Blanchette, and Kostas Daniilidis. Equivariant multi-view networks. In Proceedings of the IEEE International Conference on Computer Vision, pp. 1568-1577, 2019.", + "Yutong Feng, Yifan Feng, Haoxuan You, Xibin Zhao, and Yue Gao. Meshnet: Mesh neural network for 3d shape representation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 8279-8286, 2019.", + "Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, and Jia Deng. Revisiting point cloud shape classification with a simple and effective baseline. In ICML, 2021.", + "Meng-Hao Guo, Jun-Xiong Cai, Zheng-Ning Liu, Tai-Jiang Mu, Ralph R Martin, and Shi-Min Hu. Pct: Point cloud transformer. Computational Visual Media, 7(2):187-199, 2021." + ], + "bbox": [ + 171, + 316, + 828, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Abdullah Hamdi, Sara Rojas, Ali Thabet, and Bernard Ghanem. Advpc: Transferable adversarial perturbations on 3d point clouds. In Computer Vision - ECCV 2020, pp. 241-257, Cham, 2020. Springer International Publishing. ISBN 978-3-030-58610-2.", + "Abdullah Hamdi, Silvio Giancola, and Bernard Ghanem. Mvtn: Multi-view transformation network for 3d shape recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 1-11, October 2021.", + "Zhizhong Han, Xiyang Wang, Yu-Shen Liu, and Matthias Zwicker. Multi-angle point cloud-vae: Unsupervised feature learning for 3d point clouds from multiple angles by joint self-reconstruction and half-to-half prediction. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 10441-10450. IEEE, 2019.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. CoRR, abs/1512.03385, 2015. URL http://arxiv.org/abs/1512.03385.", + "Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4421-4430, 2019.", + "Wenbo Hu, Hengshuang Zhao, Li Jiang, Jiaya Jia, and Tien-Tsin Wong. Bidirectional projection network for cross dimension scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14373-14382, 2021.", + "Maximilian Jaritz, Jiayuan Gu, and Hao Su. Multi-view pointnet for 3d scene understanding. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 0-0, 2019.", + "Evangelos Kalogerakis, Melinos Averkiou, Subhransu Maji, and Siddhartha Chaudhuri. 3d shape segmentation with projective convolutional networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3779-3788, 2017.", + "Asako Kanezaki, Yasuyuki Matsushita, and Yoshifumi Nishida. Rotationnet: Joint object categorization and pose estimation using multiviews from unsupervised viewpoints. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5010-5019, 2018.", + "Abhijit Kundu, Xiaoqi Yin, Alireza Fathi, David Ross, Brian Brewington, Thomas Funkhouser, and Caroline Pantofaru. Virtual multi-view fusion for 3d semantic segmentation. In European Conference on Computer Vision (ECCV), pp. 518-535. Springer, 2020.", + "Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. Pointcnn: Convolution on x-transformed points. In Advances in neural information processing systems (NIPS), pp. 820-830, 2018.", + "Yongcheng Liu, Bin Fan, Shiming Xiang, and Chunhong Pan. Relation-shape convolutional neural network for point cloud analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8895-8904, 2019a.", + "Ze Liu, Zheng Zhang, Yue Cao, Han Hu, and Xin Tong. Group-free 3d object detection via transformers. arXiv preprint arXiv:2104.00678, 2021a.", + "Zhengzhe Liu, Xiaojuan Qi, and Chi-Wing Fu. 3d-to-2d distillation for indoor scene parsing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4464-4474, 2021b.", + "Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Point-voxel cnn for efficient 3d deep learning. In Advances in Neural Information Processing Systems, pp. 965-975, 2019b.", + "Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3431-3440, 2015." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.", + "Yecheng Lyu, Xinming Huang, and Ziming Zhang. Learning to segment 3d point clouds in 2d image space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12255-12264, 2020.", + "Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7210-7219, 2021.", + "Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 922-928. IEEE, 2015.", + "Leonard McMillan and Gary Bishop. Plenoptic modeling: An image-based rendering system. In Proceedings of the 22nd annual conference on Computer graphics and interactive techniques, pp. 39-46, 1995.", + "Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4460-4470, 2019.", + "Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pp. 405-421. Springer, 2020.", + "Masatoshi Okutomi and Takeo Kanade. A multiple-baseline stereo. IEEE Transactions on pattern analysis and machine intelligence, 15(4):353-363, 1993.", + "Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 165-174, 2019.", + "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017.", + "Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10318-10327, 2021.", + "Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 652-660, 2017a.", + "Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In Advances in neural information processing systems (NIPS), pp. 5099-5108, 2017b.", + "Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020.", + "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael S. Bernstein, Alexander C. Berg, and Fei-Fei Li. Imagenet large scale visual recognition challenge. CoRR, abs/1409.0575, 2014. URL http://arxiv.org/abs/1409.0575." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Konstantinos Sfikas, Theoharis Theoharis, and Ioannis Pratikakis. Exploiting the PANorama Representation for Convolutional Neural Network Classification and Retrieval. In Ioannis Pratikakis, Florent Dupont, and Maks Ovsjanikov (eds.), Eurographics Workshop on 3D Object Retrieval, pp. 1-7. The Eurographics Association, 2017. ISBN 978-3-03868-030-7. doi: 10.2312/3dor.20171045.", + "Hang Su, Subhransu Maji, Evangelos Kalogerakis, and Erik Learned-Miller. Multi-view convolutional neural networks for 3d shape recognition. In Proceedings of the IEEE international conference on computer vision, pp. 945-953, 2015.", + "Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6411–6420, 2019.", + "Mikaela Angelina Uy, Quang-Hieu Pham, Binh-Son Hua, Duc Thanh Nguyen, and Sai-Kit Yeung. Revisiting point cloud classification: A new benchmark dataset and classification model on real-world data. In International Conference on Computer Vision (ICCV), 2019.", + "Brian H Wang, Wei-Lun Chao, Yan Wang, Bharath Hariharan, Kilian Q Weinberger, and Mark Campbell. Ldls: 3-d object segmentation through label diffusion from 2-d images. IEEE Robotics and Automation Letters, 4(3):2902-2909, 2019a.", + "He Wang, Srinath Sridhar, Jingwei Huang, Julien Valentin, Shuran Song, and Leonidas J Guibas. Normalized object coordinate space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2642-2651, 2019b.", + "Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E. Sarma, Michael M. Bronstein, and Justin M. Solomon. Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics (TOG), 2019c.", + "Xin Wei, Ruixuan Yu, and Jian Sun. View-gen: View-based graph convolutional network for 3d shape analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1850-1859, 2020.", + "Ross Wightman. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019.", + "Zhirong Wu, S. Song, A. Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and J. Xiao. 3d shapenets: A deep representation for volumetric shapes. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1912-1920, 2015.", + "Tiang Xiang, Chaoyi Zhang, Yang Song, Jianhui Yu, and Weidong Cai. Walk in the cloud: Learning curves for point clouds shape analysis. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 915-924, October 2021.", + "Yifan Xu, Tianqi Fan, Mingye Xu, Long Zeng, and Yu Qiao. SpiderCNN: Deep learning on point sets with parameterized convolutional filters. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 87-102, 2018.", + "Li Yi, Vladimir G Kim, Duygu Ceylan, I-Chao Shen, Mengyan Yan, Hao Su, Cewu Lu, Qixing Huang, Alla Sheffer, and Leonidas Guibas. A scalable active framework for region annotation in 3d shape collections. ACM Transactions on Graphics (ToG), 35(6):1-12, 2016.", + "Haoxuan You, Yifan Feng, Rongrong Ji, and Yue Gao. Pvnet: A joint convolutional network of point cloud and multi-view for 3d shape recognition. In Proceedings of the 26th ACM international conference on Multimedia, pp. 1310-1318, 2018.", + "Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In ICCV, 2021.", + "Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip Torr, and Vladlen Koltun. Point transformer. arXiv preprint arXiv:2012.09164, 2020." + ], + "bbox": [ + 171, + 102, + 826, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "APPENDIX", + "text_level": 1, + "bbox": [ + 171, + 102, + 267, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A DETAILED FORMULATIONS", + "text_level": 1, + "bbox": [ + 171, + 133, + 446, + 150 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 TOY EXAMPLE", + "text_level": 1, + "bbox": [ + 171, + 164, + 333, + 179 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In the toy 2D example in Figure 5, the center point (represented by a circular function $g$ ) is viewed from various view-points $u_{j}$ that are agnostic to the underlying function itself. In many applications, it is desired to have a single feature representing each point in the point cloud. When the projected values of $g$ from these $u_{j}$ view-points are aggregated together (e.g. by max/mean pool) to get a constant representation of that point, the underlying properties of $g$ are lost. We build our Voint representation to keep the structure of $g$ intact by taking the full set $\\{(u_{j},g(u_{j}))\\}_{j = 1}^{5}$ in learning the aggregations.", + "bbox": [ + 169, + 190, + 826, + 291 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 FUNCTIONAL FORM OF VOINTNET", + "text_level": 1, + "bbox": [ + 171, + 304, + 478, + 318 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We can look at a simplified setup to decide on the functional form of the deep neural network that operates in the Voint space. In this simplified setup, we consider a 2D example (instead of 3D Voints) and assume that a circular function describes a point at the center. The center point will assume its value according to the angle $u$ . The following Theorem 1 proves that for any continuous set function $f$ that operates on any set of $M$ angles $\\{u_1, \\dots, u_M\\}$ , there exists an equivalent composite function consisting of transformed max-pooled individual view-features. This composition is the functional form we describe later for Voint neural networks", + "bbox": [ + 169, + 329, + 823, + 441 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Theorem 1 Suppose $f: \\mathcal{S} \\to \\mathbb{R}$ is a continuous set function operating on an angles set $\\mathcal{S} = \\{u \\mid u \\in [0,2\\pi]\\}$ . The continuity of $f$ is based on the Hausdorff distance $d_H$ between two sets of angles, where $d_H(\\mathcal{S},\\mathcal{S}') = \\max_{u_i' \\in \\mathcal{S}'} \\min_{u_i \\in \\mathcal{S}} d_A(u_i,u_i')$ , and $d_A$ is the smallest positive angle between two angles $d_A(u,u') = \\min(|u - u'|, 2\\pi - |u - u'|)$ . Then, for every $\\epsilon > 0$ , and $\\mathcal{U} = \\{u_1,\\dots,u_M\\} \\subset \\mathcal{S}$ , there exists a continuous function $\\mathbf{h}$ and a symmetric function $g(u_1,\\dots,u_M) = \\gamma \\circ \\mathrm{MAX}$ , such that:", + "bbox": [ + 169, + 450, + 826, + 537 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\left| f (\\mathcal {U}) - \\gamma \\left(\\operatorname {M A X} \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right)\\right) \\right| < \\epsilon , \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 540, + 823, + 566 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\gamma$ is a continuous function, and MAX is an element-wise vector max operator.", + "bbox": [ + 169, + 568, + 779, + 584 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Proof. By the continuity of $f$ , we take $\\delta_{\\epsilon}$ so that $|f(\\mathcal{U}) - f(\\mathcal{U}')| < \\epsilon$ for any $\\mathcal{U}, \\mathcal{U}' \\subset \\mathcal{S}$ if $d_H(\\mathcal{U}, \\mathcal{U}') < \\delta_{\\epsilon}$ . Define $K = [2\\pi/\\delta_{\\epsilon}]$ , which split $[0, 2\\pi]$ into $K$ intervals evenly and define an auxiliary function that maps an angle to the beginning of the interval it lies in:", + "bbox": [ + 169, + 592, + 826, + 635 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma (u) = \\frac {\\lfloor K u \\rfloor}{K}\n$$\n", + "text_format": "latex", + "bbox": [ + 446, + 637, + 549, + 667 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Let $\\tilde{\\mathcal{U}} = \\sigma(u): u \\in \\mathcal{U}$ , then", + "bbox": [ + 171, + 669, + 370, + 686 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\left| f (\\mathcal {U}) - f (\\tilde {\\mathcal {U}}) \\right| < \\epsilon \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 433, + 685, + 823, + 703 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Let $h_k(u) = e^{-d\\left(u, \\left[\\frac{k-1}{K}, \\frac{k}{K}\\right]\\right)}$ be a soft indicator function where $d\\left(u, \\left[\\frac{k-1}{K}, \\frac{k}{K}\\right]\\right) = \\min\\left(d_A\\left(u, \\frac{k-1}{K}\\right), d_A\\left(u, \\frac{k}{K}\\right)\\right)$ is the distance between angle $u$ to interval $\\left[\\frac{k-1}{K}, \\frac{k}{K}\\right]$ . Let $\\mathbf{h}(u) = [h_1(u); \\ldots; h_K(u)]$ , then $\\mathbf{h}: \\mathbb{R} \\to \\mathbb{R}^K$", + "bbox": [ + 169, + 705, + 823, + 758 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Let $q_{j}(u_{1},\\ldots ,u_{M}) = \\max \\{h_{j}(u_{1}),\\ldots ,h_{j}(u_{M})\\}$ , indicating the occupancy of the $j$ -th interval by angles in $\\mathcal{U}$ . Let $\\mathbf{q} = [q_1;\\dots;q_K]$ , then $\\mathbf{q}:[0,2\\pi ]^M\\to \\{0,1\\} ^K$ is a symmetric function, indicating the occupancy of each interval by angles in $\\mathcal{U}$ .", + "bbox": [ + 169, + 762, + 823, + 808 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Define $\\zeta : \\{0,1\\}^K \\to S$ as $\\zeta(\\mathbf{q}) = \\left\\{\\frac{k-1}{K} : q_k \\geq 1\\right\\}$ which maps the occupancy vector to a set which contains the left end of each angle interval. It is straightforward to show:", + "bbox": [ + 169, + 811, + 823, + 842 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\zeta (\\mathbf {q} (\\mathcal {U})) \\equiv \\tilde {\\mathcal {U}} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 447, + 844, + 823, + 862 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Let $\\gamma : \\mathbb{R}^K \\to \\mathbb{R}$ be a continuous function such that $\\gamma(\\mathbf{q}) = f(\\zeta(\\mathbf{q}))$ for $\\mathbf{q} \\in \\{0,1\\}^K$ . Then from Eq (6) and Eq (7),", + "bbox": [ + 169, + 864, + 823, + 893 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left| \\gamma (\\mathbf {q} (\\mathcal {U})) - f (\\mathcal {U}) \\right| (8) \\\\ = \\left| f \\left(\\zeta (\\mathbf {q} (\\mathcal {U}))\\right) - f (\\mathcal {U}) \\right| < \\epsilon (8) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 893, + 823, + 926 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/72e8bc6c21ce842713cf2f761952fd837f95ca87330b4317430bb398147d99b3.jpg", + "image_caption": [ + "Figure 5: A Toy 2D Example of Voints. Voints assume view-dependency for every 3D point. Here, we look at a single 2D point at the center with a circular function $g(u) = \\mathrm{sign}(\\cos u)$ from five arbitrary view-points $\\{u_j\\}_{j=1}^5$ . Trying to reduce $g$ to a single value based on $u_j$ projections undermines the underlying structure of $g$ . We take the full set $\\{(u_j, g(u_j))\\}_{j=1}^5$ as a representation of $g$ and learn a set function $f$ on these view-features for a more informative manner of representation aggregation." + ], + "image_footnote": [], + "bbox": [ + 271, + 102, + 702, + 349 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Note that $\\gamma (\\mathbf{q}(\\mathcal{U}))$ can be rewritten as follows:", + "bbox": [ + 171, + 462, + 516, + 478 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\gamma \\left(\\mathbf {q} \\left(\\mathcal {U}\\right)\\right) = \\gamma \\left(\\mathbf {q} \\left(u _ {1}, \\dots , u _ {M}\\right)\\right) \\\\ = \\gamma (\\operatorname {M A X} \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right)) \\tag {9} \\\\ = (\\gamma \\circ \\operatorname {M A X}) \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 484, + 821, + 535 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Since $\\gamma \\circ$ MAX is a symmetric function and from Eq (8) and Eq (9), we reach to the main result in Eq (5). This concludes the proof.", + "bbox": [ + 169, + 541, + 823, + 570 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.3 3D VOINT CLOUD", + "text_level": 1, + "bbox": [ + 171, + 585, + 356, + 599 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Plenoptic and Spherical Coordinate Functions. The Plenoptic function was first introduced by McMillan and Bishop (McMillan & Bishop, 1995) in 1995 as a general function that describes the visible world. The Plenoptic function $P$ is a continuous spherical function that describes the visibility at any Euclidean 3D point in space $(V_x, V_y, V_x)$ when looking into any direction $(\\theta, \\phi)$ across wavelength $\\lambda$ at time $t$ . It is defined as $p = P(\\theta, \\phi, \\lambda, V_x, V_y, V_x, t)$ . Such a remarkable and compact formulation covers all the images observed as just samples of the function $P$ . For fixed time and wavelength, the reduced Plenoptic function $P$ becomes $p = P(\\theta, \\phi, V_x, V_y, V_x,)$ which can describe any field in 3D space. This shortened formulation is what Neural Radiance Fields (NeRFs) (Mildenhall et al., 2020; Pumarola et al., 2021; Martin-Brualla et al., 2021) try to learn with MLPs to describe the radiance and RGB values in the continuous Euclidean space with a dependency on the view direction $(\\theta, \\phi)$ . In the same spirit of the Plenoptic function and NeRFs, the Voint cloud representation relies on the viewing angles $(\\theta, \\phi)$ to define the view-features. The problem with the plenoptic functions $P$ , and subsequently NeRFs, is that they are very high dimensional, and any attempt to densely represent the scene with discrete and fixed data will cause memory and compute issues (Yu et al., 2021; Pumarola et al., 2021). Unlike NERFs (Mildenhall et al., 2020) that define dense 3D volumes, we focus only on the surface of the 3D shapes with our Voint clouds representation. Our Voints are in the order of the sampled point cloud, offering a compact representation that allows for efficient computation and memory while maintaining the view-dependent component that facilitates view-based learning.", + "bbox": [ + 169, + 611, + 826, + 888 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "From Point Clouds to Voint Clouds. Implicit representation of 3D surfaces typically aims to learn an implicit function $g_{\\mathrm{s}}(\\mathbf{x}) : \\mathbb{R}^3 \\to \\mathbb{R}$ that define the Sign Distance Function", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "(SDF) or the occupancy in the continuous Euclidean space (Park et al., 2019; Mescheder et al., 2019). The 3D iso-surface is then defined as the set of all points $\\mathbf{x}$ that satisfy the condition $g_{\\mathrm{s}}(\\mathbf{x}) = 0$ (assuming $g_{\\mathrm{s}}(\\mathbf{x})$ as SDF hereafter). We define a surface 3D point cloud $\\mathcal{X} \\in \\mathbb{R}^{N \\times 3}$ , as a set of $N$ 3D points, where each point $\\mathbf{x}_i \\in \\mathbb{R}^3$ is represented by its 3D coordinates $(x_i, y_i, z_i)$ and satisfy the iso-surface condition as follows.", + "bbox": [ + 169, + 103, + 823, + 174 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {X} = \\left\\{\\mathbf {x} _ {i} \\in \\mathbb {R} ^ {3} \\mid g _ {\\mathrm {s}} (\\mathbf {x} _ {i}) = 0 \\right\\} _ {i = 1} ^ {N} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 196, + 823, + 217 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Here, we assume that surface points also depend on the view direction from which they are being observed. Specifically, there exists a continuous implicit spherical function $\\mathbf{g}(\\mathbf{x},\\mathbf{u}):$ $\\mathbb{R}^5\\to \\mathbb{R}^d$ that defines the features at each point $\\mathbf{x}$ depending on the view direction $\\mathbf{u}$ . Given a set of $M$ view-point directions $\\mathcal{U}\\in \\mathbb{R}^{M\\times 2}$ , a Voint $\\widehat{\\mathbf{x}}\\in \\mathbb{R}^{M\\times d}$ is a set of $M$ view-dependent features of size $d$ for the sphere centered at point $\\mathbf{x}$ . The Voint cloud $\\widehat{\\mathcal{X}}\\in \\mathbb{R}^{N\\times M\\times d}$ is the set of all $N$ Voints $\\widehat{\\mathbf{x}}$ .", + "bbox": [ + 169, + 232, + 826, + 316 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} \\left(\\mathbf {x} _ {i}, \\mathbf {u} _ {j}\\right) \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X} \\right\\} _ {j = 1} ^ {M} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 340, + 821, + 371 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\mathcal {X}} = \\left\\{\\widehat {\\mathbf {x}} _ {i} \\in \\mathbb {R} ^ {M \\times d} \\right\\} _ {i = 1} ^ {N}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 368, + 524, + 388 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Note that we typically do not have access to the underlying implicit function $\\mathbf{g}$ and we approximate it by 2D projection, feature extraction, and then un-projection as we show next.", + "bbox": [ + 169, + 402, + 826, + 433 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "1-Multi-View Projection. As mentioned earlier, a Voint combines multiple view-features of the same 3D point. These view-features come from a multi-view projection of the points by a point cloud renderer $\\mathbf{R}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times 3}$ that renders the point cloud $\\mathcal{X}$ from multiple view-points $\\mathcal{U}$ into $M$ images of size $H\\times W\\times 3$ . In addition to projecting the point cloud into the image space, $\\mathbf{R}$ defines the mapping $\\mathbf{B}\\in \\{0,\\dots,N\\}^{M\\times H\\times W}$ between each pixel to the N points and background it renders. Also, $\\mathbf{R}$ outputs the visibility binary matrix $\\mathbf{V}\\in \\{0,1\\}^{N\\times M}$ for each point from each view. Since not all points appear in all the views due to pixel discretization, the visibility score $\\mathbf{V}_{i,j}$ defines if the Voint $\\hat{\\mathbf{x}}_i$ is visible in the view $\\mathbf{u}_j$ . The matrix $\\mathbf{B}$ is crucial for unprojection, while $\\mathbf{V}$ is needed for defining meaningful operations on Voints.", + "bbox": [ + 169, + 438, + 823, + 578 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "2-Multi-View Feature Extraction. The rendered images are processed by a function $\\mathbf{C}:\\mathbb{R}^{M\\times H\\times W\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times d}$ that extracts image features. If $\\mathbf{C}$ is the identity function, all the view-features would be identical for each Voint (typically the RGB value of the corresponding point). However, the $\\mathbf{C}$ function can be a 2D network dedicated to the downstream task and can extract useful global and local features about each view.", + "bbox": [ + 169, + 584, + 823, + 655 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3-Multi-View Unprojection. We propose a module $\\Phi_{\\mathbf{B}}:\\mathbb{R}^{M\\times H\\times W\\times d}\\to \\mathbb{R}^{N\\times M\\times d}$ that unprojects the 2D features from each pixel to be 3D view-features at the corresponding Voint. This is performed by using the mapping $\\mathbf{B}$ created by the renderer to form the Voint cloud features $\\widehat{\\mathcal{X}}$ . Note that the points are not necessarily visible from all the views, and some Voints that are not visible from any of the $M$ views will not receive any features. We post-process these empty points ( $\\sim 0.5\\%$ of points during inference) to be filled with nearest 3D neighbors features. The output Voint cloud features would be described as follows.", + "bbox": [ + 169, + 660, + 826, + 762 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} _ {i, j,:} \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X}, \\mathbf {V} _ {i, j} = 1 \\right\\} _ {j = 1} ^ {M}\n$$\n", + "text_format": "latex", + "bbox": [ + 348, + 782, + 653, + 806 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {g} _ {:, j} = \\Phi_ {\\mathbf {B}} \\left(\\mathbf {C} \\left(\\mathbf {R} \\left(\\mathcal {X}, \\mathbf {u} _ {j}\\right)\\right), \\mathbf {B}\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 809, + 821, + 825 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\mathcal {X}} = \\left\\{\\widehat {\\mathbf {x}} _ {i} \\in \\mathbb {R} ^ {M \\times d} \\right\\} _ {i = 1} ^ {N}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 828, + 508, + 849 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.4 VOINT OPERATIONS", + "text_level": 1, + "bbox": [ + 171, + 868, + 369, + 881 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "VointMax. In order to learn a neural network in the Voint space in the form dictated by Theorem 1, we need to define some basic differentiable operations on the Voint space. The", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/4b68f906dad809b2cdbb8df39da391275635ec0e528c03019df6e17bc56ee754.jpg", + "image_caption": [ + "Figure 6: VointNet Variants. We propose three variants of VointNet that use three different examples of VointConv operation $h_v$ : shared MLP (MLP), Graph Convolution (GCN), and Graph Attention (GAT). Here we highlight the main difference between VointNet (MLP) that shares the MLP on all the view-features and VointNet (GCN) that creates a fully connected graph on the view-features and learn an MLP on the edge view-features. VointNet (GAT) is similar to VointNet (GCN) in addition to learning attention weights for each view-feature in weighted average aggregation." + ], + "image_footnote": [], + "bbox": [ + 282, + 104, + 723, + 311 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "max operation on the Voint cloud can be defined as follows.", + "bbox": [ + 171, + 441, + 602, + 455 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {V o i n t M a x} (\\widehat {\\mathbf {x}}) = \\max \\widehat {\\mathbf {x}} _ {i, j}, \\forall i, j \\\\ \\left(1 3\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 462, + 821, + 489 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {s . t .} i \\in 1, 2, \\dots , N, j \\in 1, 2, \\dots , M, \\mathbf {V} _ {i, j} = 1\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 489, + 656, + 505 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Equivalently, $\\mathrm{VointMax}(\\widehat{\\mathbf{x}}) = \\max_j\\left(\\widehat{\\mathbf{x}}_{:,j} - \\infty \\overline{\\mathbf{V}}_{:,j}\\right)$ , where $\\overline{\\mathbf{V}}$ is the complement of $\\mathbf{V}$ .", + "bbox": [ + 171, + 512, + 781, + 531 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "VointConv. We define the convolution operation $h_{\\mathrm{V}}: \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'}$ as any learnable function that operates on the Voint space with shared weights on all the Voints and has the view-features input size $d$ and outputs view-features of size $d'$ and consists of $l_{V}$ layers. Examples of this VointConv operation include the following operations applied only on the visible view-features: a shared MLP, a graph convolution, and a graph attention. We detail these operations later in Section A.6, which result in different non-exhaustive variants of VointNet.", + "bbox": [ + 169, + 539, + 823, + 640 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.5 LEARNING ON 3D VOINT CLOUDS", + "text_level": 1, + "bbox": [ + 171, + 657, + 472, + 671 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "VpointNet. Typical 3D point cloud classifiers with a feature max pooling layer work as in Eq (14), where $h_{\\mathrm{mlp}}$ and $h_{\\mathrm{Pconv}}$ are the MLP and point Convolutional $(1 \\times 1$ or edge) layers, respectively. This produces a K-class classifier $\\mathbf{F}$ .", + "bbox": [ + 169, + 683, + 823, + 724 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} (\\mathcal {X}) = h _ {\\operatorname {m l p}} \\left(\\max _ {\\mathbf {x} _ {i} \\in \\mathcal {X}} \\left\\{h _ {\\text {P c o n v}} \\left(\\mathbf {x} _ {i}\\right) \\right\\}\\right) \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 732, + 821, + 755 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Here, $\\mathbf{F}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^K$ produces the logits layer of the classifier with size $K$ . On the other hand, the goal of the VointNet model is to get multi-view point cloud features that can be used after which by any point cloud processing pipeline. The VointNet module $\\widehat{\\mathbf{F}}:\\mathbb{R}^{N\\times M\\times d}\\rightarrow \\mathbb{R}^{N\\times d}$ as follows.", + "bbox": [ + 169, + 763, + 823, + 821 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\mathbf {F}} (\\widehat {\\mathcal {X}}) = h _ {\\mathrm {P}} \\left(\\operatorname {V o i n t M a x} \\left(h _ {\\mathrm {V}} (\\widehat {\\mathcal {X}})\\right)\\right), \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 827, + 823, + 853 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.6 VOINTNET VARIANTS", + "text_level": 1, + "bbox": [ + 171, + 867, + 380, + 881 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We define the convolution operation $h_{\\mathrm{V}} \\colon \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'}$ in VointNet from Eq (15) as any learnable function that operates on the Voint space with shared weights on all the", + "bbox": [ + 169, + 893, + 823, + 924 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Voints and has the view-features input size $d$ and outputs view-features of size $d'$ and consists of $l_V$ layers. Examples of this VointConv operation include the following:", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Shared MLP. It is the most basic Voint neural network. For layer $l$ , the features of Voint i at view j is updated as follows to layer $l + 1$", + "bbox": [ + 169, + 138, + 825, + 167 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\mathbf {h} _ {i, j} ^ {l} \\mathcal {W} _ {\\rho}\\right), \\forall i, j \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 172, + 823, + 199 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {s . t .} i \\in {1, 2, \\dots , N}, j \\in {1, 2, \\dots , M}, \\mathbf {V} _ {i, j} = 1\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 195, + 674, + 210 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\rho$ is the shared MLP with weights $\\mathcal{W}_{\\rho}$ followed by normalization and nonlinear function (e.g. ReLU) applied on all Voints independently at the visible views features for each Voint. This formulation extends the shared MLP formulation for PointNet (Qi et al., 2017a) to make the MLP shared across the Voints and the views-features.", + "bbox": [ + 169, + 215, + 826, + 271 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Graph Convolution (GCN). Just like how DGCNN (Wang et al., 2019c) extended PointNet (Qi et al., 2017a) by taking the neighborhood information and extract edge features, we extend the basic VointNet formulation in Eq (15). We define a fully connected graph for each Voint along the views dimension by creating a center virtual node connected to all the view features (similar to the classification token in ViT (Dosovitskiy et al., 2021)). This center virtual view-feature would be assigned the index $j = 0$ and can be initialized with zeros as the \"cls\" token in ViT (Dosovitskiy et al., 2021). Then, Voint graph convolution operation can be defined as follows to update the activations from layer $l$ to $l + 1$", + "bbox": [ + 169, + 277, + 823, + 390 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\left(\\max _ {k} \\psi \\left(\\left(\\mathbf {h} _ {i, j} ^ {l}, \\mathbf {h} _ {i, k} ^ {l}\\right) \\mathcal {W} _ {\\psi}\\right)\\right) \\mathcal {W} _ {\\rho}\\right) \\forall i, j \\in \\{1, 2, \\dots , N - 1, 0, 1, M \\} \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 393, + 823, + 436 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\forall i, j, k \\quad \\text {s . t .} \\quad i \\in 1, 2, \\dots , N, j \\in 0, 1, \\dots , M \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 422, + 821, + 445 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nk \\in 0, 1, \\dots , M, k \\neq j, \\mathbf {V} _ {i, j} = 1\n$$\n", + "text_format": "latex", + "bbox": [ + 410, + 446, + 651, + 464 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\rho, \\psi$ are two different shared MLPs as in Eq (16). The difference between VointNet (MLP) and VointNet (GCN) is highlighted in Figure 6.", + "bbox": [ + 169, + 468, + 823, + 498 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Graph Attention (GAT). Similar to how Point Transformer (Zhao et al., 2020) extended the graph convolution by adding attention to DGCNN (Wang et al., 2019c), we extend the basic Voint GraphConv formulation in Eq (17). Voint graph attention operation can be defined as follows to update the activations from layer $l$ to $l + 1$", + "bbox": [ + 169, + 503, + 823, + 560 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\left(\\sum_ {k = 0, k \\neq j} ^ {M} \\eta_ {k} \\psi \\left((\\mathbf {h} _ {i, j} ^ {l}, \\mathbf {h} _ {i, k} ^ {l}) \\mathcal {W} _ {\\psi}\\right)\\right) \\mathcal {W} _ {\\rho}\\right) \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 565, + 823, + 617 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\forall i, j \\mathrm {s . t .} i \\in 1, 2, \\dots , N, j \\in 0, 1, \\dots , M\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 616, + 607, + 632 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\eta_ {k} = \\zeta \\left(\\mathbf {h} _ {i, k} ^ {l} \\mathcal {W} _ {\\zeta}\\right), \\mathbf {V} _ {i, j} = 1\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 635, + 593, + 654 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\rho, \\psi, \\zeta$ are three different shared MLPs as in Eq (16), and $\\eta_{k}$ are the learned attention weights for each neighbor view-feature.", + "bbox": [ + 169, + 659, + 823, + 686 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B DETAILED EXPERIMENTAL SETUP", + "text_level": 1, + "bbox": [ + 171, + 705, + 509, + 722 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B.1 DATASETS", + "text_level": 1, + "bbox": [ + 171, + 737, + 295, + 751 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "ScanObjectNN: 3D Point Cloud Classification. We follow the literature (Goyal et al., 2021; Hamdi et al., 2021) on testing 3D classification in the challenging ScanObjectNN (Uy et al., 2019) point cloud dataset, since it includes background and considers occlusions. The dataset is composed of 2902 point clouds divided into 15 object categories. We use 2048 sampled points per object for Voint learning. We benchmark on its variants: Object only, Object with Background, and the Hardest perturbed variant (PB_T50_RS variant). Visualization is provided in Figure 7 of some of the renderings used in training the 2D backbone in our pipeline.", + "bbox": [ + 169, + 762, + 826, + 875 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "ShapeNet Core55: 3D Shape Retrieval. The shape retrieval challenge SHREC (Sfikas et al., 2017) uses ShapeNet Core55 is a subset of ShapeNet (Chang et al., 2015) for benchmarking. The dataset consists of 51,162 3D mesh objects labeled with 55 object classes. The", + "bbox": [ + 169, + 881, + 826, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "training, validation, and test sets consist of 35764, 5133, and 10265 shapes. We create a dataset of point clouds by sampling 5000 points from each mesh object as in MVTN (Hamdi et al., 2021).", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "ShapeNet Parts: 3D Part Segmentation. ShapeNet Parts is a subset of ShapeNet (Chang et al., 2015) that consists of 13,998 point cloud objects for train and 2,874 objects for the test from 16 categories and 50 parts. It is designed for the part segmentation task (Yi et al., 2016). Visualization is provided in Figure 10 of some of the renderings used in training the 2D backbone in our pipeline colored with the ground truth segmentation labels.", + "bbox": [ + 169, + 152, + 826, + 224 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "ModelNet40: 3D Shape Classification Occlusion Robustness. ModelNet40 (Wu et al., 2015) is composed of 12,311 3D objects (9,843/2,468 in training/testing) labelled with 40 object classes. We sample 2048 points clouds from the objects following previous works (Qi et al., 2017b; Zhao et al., 2020). Visualization is provided in Figure 8 of some of the renderings used in training the 2D backbone in our pipeline.", + "bbox": [ + 169, + 229, + 826, + 301 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B.2 METRICS", + "text_level": 1, + "bbox": [ + 171, + 319, + 290, + 332 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Classification Accuracy. The standard evaluation metric in 3D classification is accuracy. We report overall accuracy (percentage of correctly classified test samples) and average per-class accuracy (mean of all true class accuracies).", + "bbox": [ + 169, + 345, + 826, + 390 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Retrieval mAP. Shape retrieval is evaluated by mean Average Precision (mAP) over test queries. For every query shape $\\mathbf{S}_q$ from the test set, AP is defined as $AP = \\frac{1}{\\mathrm{GTP}}\\sum_{n}^{N}\\frac{\\mathbb{1}(\\mathbf{S}_n)}{n}$ , where $GTP$ is the number of ground truth positives, $N$ is the size of the ordered training set, and $\\mathbb{1}(\\mathbf{S}_n) = 1$ if the shape $\\mathbf{S}_n$ is from the same class label of query $\\mathbf{S}_q$ . We average the retrieval AP over the test set to measure retrieval mAP.", + "bbox": [ + 169, + 395, + 826, + 469 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Segmentation mIoU. Semantic Segmentation is evaluated by mean Intersection over Union (mIoU) over pixels or points. For every class label, measure the size of the intersection mask between the ground truth points of that label and the predicted points as that label. Then, divide by the size of the union mask of the same label to get IoU. This procedure is repeated over all the labels, and averaging the IoUs gives mIoU. We report two types of mIoUs: Instance-averaged mIoU (averages all mIoUs across all objects) and Category-averaged mIoU (averages all mIoU from shapes of the same category, and then average those across object categories).", + "bbox": [ + 169, + 474, + 826, + 588 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B.3 BASELINES", + "text_level": 1, + "bbox": [ + 171, + 606, + 302, + 619 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Point Cloud Networks. We include PointNet (Qi et al., 2017a), PointNet++ (Qi et al., 2017b), DGCNN (Wang et al., 2019c), PVNet (You et al., 2018), and KPConv (Thomas et al., 2019), Point Transformer (Zhao et al., 2020) and CurveNet (Xiang et al., 2021) as baselines that use point clouds. These methods leverage different convolution operators on point clouds by aggregating local and global point information.", + "bbox": [ + 169, + 633, + 826, + 704 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Multi-View Networks. We also compare against multi-view classification approaches like MVCNN (Su et al., 2015) and MVTN (Hamdi et al., 2021) as baselines for classification and retrieval. Since there is no available multi-view pipeline for 3D part segmentation, we adopt some of the multi-view segmentation baselines (e.g. Label Fusion (Wang et al., 2019a) and Mean Fusion (Kundu et al., 2020)) for part segmentation to work in the Voint space.", + "bbox": [ + 169, + 709, + 823, + 781 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B.4 IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 799, + 419, + 811 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Rendering and Un-Projection. We choose the differentiable point cloud renderer $\\mathbf{R}$ from Pytorch3D (Ravi et al., 2020) in our pipeline for its speed and compatibility with Pytorch libraries (Paszke et al., 2017). We render multi-view images with size $224 \\times 224 \\times 3$ . We color the points by their normals' values or keep them white if the normals are not available. Following a similar procedure to (Wei et al., 2020; Hamdi et al., 2021), the view-point setup is randomized during training (using $M = 8$ views) and fixed to spherical views in testing (using $M = 12$ views).", + "bbox": [ + 169, + 825, + 826, + 926 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/b72ea466f6ac3834f0aaa6df8cef30defba9035c4d7883401a3643a21dbd1009.jpg", + "image_caption": [ + "Figure 7: ScanObjectNN Variants. We show examples of point cloud renderings of different variants of the ScanObjectNN (Uy et al., 2019). These renderings are used in training VointNet for 3D point cloud classification." + ], + "image_footnote": [], + "bbox": [ + 176, + 99, + 805, + 474 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Architectures. For the 2D backbone, we use ViT (Dosovitskiy et al., 2021) (with pretrained weights from TIMM library (Wightman, 2019)) for classification and DeepLabV3 (Chen et al., 2018) for segmentation. We used parallel heads for each object category for part segmentation since the task is solely focused on parts. We use the 3D cross-entropy loss on the 3D point cloud output and the 2D cross-entropy loss when the loss is defined on the pixels. When used, the linear tradeoff coefficient of the 2D loss term is set to 0.003. To balance the frequency of objects in part segmentation, we multiply the loss by the frequency of the object class of each object we segment. The feature dimension of the VointNet architectures is $d = 64$ , and the depth is $l_{V} = 4$ layers in $h_V$ . The main results are based on VointNet (MLP) variant unless otherwise specified. The coordinates $\\mathbf{x}$ can be optionally appended to the input view-features $\\hat{\\mathbf{x}}$ , which can improve the performance but reduce the rotation robustness as we show later in Section C.1 and Table 9.", + "bbox": [ + 169, + 556, + 823, + 723 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Training Setup. We train our pipeline in two stages, where we start by training the 2D backbone on the 2D projected labels of the points, then train the full pipeline end-to-end while focusing the training on the VointNet part. We use the AdamW optimizer (Loshchilov & Hutter, 2017) with an initial learning rate of 0.0005 and a step learning rate schedule of $33.3\\%$ every 12 epochs for 40 epochs. The pipeline is trained with one NVIDIA Tesla V100 GPU. We do not use any data augmentation.", + "bbox": [ + 169, + 729, + 826, + 814 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 508, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/37f5bed1a1b60474aaf51dd5ca7f2d0a5227449d29ac440e1539667849c0ab69.jpg", + "image_caption": [ + "Figure 8: ModelNet40. We show some examples of point cloud renderings of ModelNet40 (Wu et al., 2015) used for 3D classification robustness in our setup." + ], + "image_footnote": [], + "bbox": [ + 179, + 186, + 821, + 402 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/67afcf6005c307d2f1f9b41ca7f0088ec0f12e2feb5ef06c405e1dae574656c4.jpg", + "image_caption": [ + "Figure 9: ShapeNet Core55. We show some examples of point cloud renderings of ShapeNet Core55 (Chang et al., 2015) used for 3D shape retrieval in our setup." + ], + "image_footnote": [], + "bbox": [ + 179, + 623, + 818, + 795 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 508, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/e89f3da343521484611f5cf0479ba70b7aecbbbadd1cf045bfef60c2f4c38f75.jpg", + "image_caption": [ + "Figure 10: ShapeNet Parts. We show some examples of point cloud renderings of ShapeNet Parts (Yi et al., 2016) colored with ground truth segmentation labels. We use these renderings as 2D ground truth to pre-train the 2D backbone $\\mathbf{C}$ for 2D segmentation before training VointNet's pipeline for 3D segmentation." + ], + "image_footnote": [], + "bbox": [ + 191, + 308, + 805, + 645 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 508, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/750c7a72dd8d9e59d54fdfc951b9e3037f1c65fd56ebb86bf7e9a166ab3143c6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodData TypeClassification \nModelNet40Shape Retrieval \nShapeNetCore
PointNet (Qi et al., 2017a)Points89.2-
PointNet++ (Qi et al., 2017b)Points91.9-
DGCNN (Wang et al., 2019c)Points92.2-
KPConv(Thomas et al., 2019)Points92.9-
PCT(Guo et al., 2021)Points93.3-
CurveNet(Xiang et al., 2021)Points93.8-
ReVGG (Sfikas et al., 2017)M-View-74.9
MVCNN (Su et al., 2015)M-View90.173.5
ViewGCN (Wei et al., 2020)M-View93.378.4
MVTN (Hamdi et al., 2021)M-View93.882.9
VointNet (ours)Voints92.883.3
", + "bbox": [ + 210, + 99, + 781, + 310 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/41246f32cbd2857f30db87a511231e2d1edb924ba5054097da9bb72efc2a5bd0.jpg", + "table_caption": [ + "Table 7: 3D Shape Classification and Retrieval. We report VointNet's classification accuracy on ModelNet40 (Wu et al., 2015) and its 3D shape retrieval mAP on ShapeNet Core55 (Chang et al., 2015; Sfikas et al., 2017). Baseline results are reported from (Hamdi et al., 2021; Zhao et al., 2020; Xiang et al., 2021)." + ], + "table_footnote": [], + "table_body": "
MethodRotation Perturbations Range
±90°±180°
PointNet (Qi et al., 2017a)88.742.538.6
PointNet ++ (Qi et al., 2017b)88.247.939.7
RSCNN (Liu et al., 2019a)90.390.390.3
MVTN (Hamdi et al., 2021)91.790.891.2
VointNet (ours)91.590.991.1
", + "bbox": [ + 227, + 386, + 772, + 518 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Table 8: Rotation Robustness for 3D Classification. At test time, we randomly rotate objects in ModelNet40 (Wu et al., 2015) around the Y-axis (gravity) with different ranges and report the overall accuracy.", + "bbox": [ + 169, + 527, + 823, + 568 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C ADDITIONAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 594, + 408, + 609 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C.1 MODEL ROBUSTNESS", + "text_level": 1, + "bbox": [ + 171, + 626, + 375, + 640 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Rotation Robustness for 3D Classification. We follow the standard practice in 3D shape classification literature by testing the robustness of trained models to perturbations at test time (Liu et al., 2019a; Hamdi et al., 2021). We perturb the shapes with random rotations around the Y-axis (gravity-axis) contained within $\\pm 90^{\\circ}$ and $\\pm 180^{\\circ}$ and report the test accuracy over ten runs in Table 8.", + "bbox": [ + 169, + 652, + 823, + 723 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Rotation Robustness for 3D Segmentation. We follow the previous 3D literature by testing the robustness of trained models to perturbations at test time (Liu et al., 2019a; Hamdi et al., 2021; 2020). We perturb the shapes in ShapeNet Parts with random rotations in $SO(3)$ at test time (ten runs) and report Ins. mIoU in Table 9. Note how our VointNet performance largely exceeds the baselines in this realistic unaligned scenario. We can augment the training with rotated objects for the baselines, which improves their robustness, but loses performance on the unrated setup. Adding xyz coordinates to the view-features of VointNet improves the performance on an unrotated setup but negatively affects the robustness to rotations. The discrepancy between the Voint results and the results of some point cloud methods is that Voints heavily depend on the underlying 2D backbone and inherit all its biases, especially those from pretraining. Hence, the 2D backbone limits what the performance can reach with VointNet. We study the effect of the backbone in detail in Section C.2. Figure 11 shows qualitative 3D segmentation results for VointNet and Mean Fuse (Kundu et al., 2020) as compared to the ground truth.", + "bbox": [ + 169, + 729, + 826, + 924 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 508, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/c0f67dd099eabf40717ae821ea6c91cf6e25d08f174f16aa5741910c05ec8464.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Ground TruthVointNet (ours)Mean Fuse (Kundu et al., 2020)
", + "bbox": [ + 176, + 178, + 808, + 777 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Figure 11: Qualitative Comparison for 3D Part Segmentation. We compare our VointNet 3D segmentation prediction to Mean Fuse (Kundu et al., 2020) that is using the same trained 2D backbone. Note how VointNet distinguishes detailed parts (e.g. the car window frame). Beware that visualization colors can shift if an extra label is predicted (e.g. the motorbike labels are correct).", + "bbox": [ + 169, + 787, + 823, + 840 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 508, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/995f26b33c9c01e09924d342dbc90441cbc64347a4d93936f5be014a04e28cca.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodSegmentation UnrotatedUnder Rotation Rotated
PointNet (Qi et al., 2017a)80.136.6 ±0.2
DGCNN (Wang et al., 2019c)80.137.1 ±0.2
PointNet + Aug.65.865.8 ±0.1
DGCNN + Aug.60.760.7 ±0.2
Mean Fuse (Kundu et al., 2020)79.161.6 ±0.1
Label Fuse (Wang et al., 2019a)78.961.0 ±0.1
VointNet (w/o xyz)79.665.4 ±0.1
VointNet (w/o xyz) + Aug.68.068.5 ±0.1
VointNet (w/ xyz)81.261.5 ±0.2
", + "bbox": [ + 227, + 99, + 772, + 305 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 9: Rotation Robustness for 3D Part Segmentation. At test time, we randomly rotate objects from ShapeNet Parts (Yi et al., 2016) and report the Ins. mIoUs of our VointNet compared to trained PointNet (Qi et al., 2017a) and DGCNN (Wang et al., 2019c). Note how VointNet's performance largely exceeds the baselines in realistic unaligned scenarios, highlighting the benefit of view dependency. If we use rotation augmentation in training for the baselines, the rotated performance improves, but the unrotated performance drops.", + "bbox": [ + 169, + 315, + 823, + 395 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "C.2 DETAILED ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 436, + 372, + 450 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Effect of Pretraining. We study the effect of pretraining the 2D backbone C for 3D classification on ModelNet40. Training a ViT with Mean Fuse for 3D classification on ModelNet40 obtains 92.2 test Acc. with ImageNet pretraining and 80.0 test Acc. from scratch. Other multi-view networks, e.g. MVCNN (Su et al., 2015), ViewGCN(Wei et al., 2020), and MVTN(Hamdi et al., 2021) all use ImageNet pretraining, which is not unique to Voints.", + "bbox": [ + 169, + 469, + 823, + 554 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Classification Backbone. We study the effect of ablating the 2D backbone C for 3D classification on ModelNet40. We show in Table 10 the performance of VointNet (MLP) when Vit-B (Dosovitskiy et al., 2021) and ResNet-18 (He et al., 2015) are used. We also show that following the per-point classification setup instead of the per-shape for 3D shape classification leads to worse performance for VointNet and the naive multi-view. This is why we used the per-shape approach when adopting VointNet for 3D classification (using one Voint for the entire shape).", + "bbox": [ + 169, + 560, + 823, + 660 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Number of points and visibility. Table 11 studies the effect of point number on 3D part segmentation performance, when different numbers of views are used. The visibility ratio is also reported in each case.", + "bbox": [ + 169, + 665, + 823, + 708 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Points color. We colored the points with ground truth normals as in Figure 16, when they are available (ShapeNet Parts), and we used white colors as in Figure 9, when other baselines do not use normals. We ablate the color of the points on VointNet (MLP) with normals colors, white color, and NOCs colors (Wang et al., 2019b). We obtain the following segmentation mIoU results: (normals: 80.6), (white: 74.7), and (NOCs: 57.9).", + "bbox": [ + 169, + 713, + 823, + 786 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Time and Memory Requirements. To assess the contribution of the Voint module, we take a macroscopic look at the time and memory requirements of each component in the pipeline. We record the number of floating-point operations (GFLOPs) and the time of a forward pass for a single input sample. In Table 12, the VointNet module contributes negligibly to the memory requirements compared to multi-view and point networks.", + "bbox": [ + 169, + 790, + 823, + 863 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Feature Size $(d)$ . We study the effect of the feature size $d$ on the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 12. We note that the performance peaks at $d = 128$ , but it is close to what we use in the main results $(d = 64)$ .", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/5b8250558e6b8a4d19403fa499c51cae4773de01bff308653a28becf579f32e7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
View Aggregation2D Backbone
ResNet18 (per-shape)ViT-B (per-shape)DeepLabV3 (per-point)
VointNet91.292.810.2
", + "bbox": [ + 274, + 101, + 722, + 172 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/cc83f3828e47c7e918fde7fc94ade7a6d2d77e3e267c06ff9562780583fa4ed7.jpg", + "table_caption": [ + "Table 10: Ablation Study for 3D Classification. We study the effect of different 2D backbone for ModelNet40 3D classification task. We compare VointNet's performance to naive multi-view (e.g. MVCNN (Su et al., 2015) or Mean Fuse (Kundu et al., 2020)) using the same 2D backbone. Note that using the per-point classification setup instead of the per-shape for 3D shape classification leads to worse performance for VointNet and the naive multi-view." + ], + "table_footnote": [], + "table_body": "
Points #MetricNumber of Views
24812
500visibility99.199.9100100
mIoU69.273.976.076.4
1000visibility98.099.7100100
mIoU69.574.376.577.1
2000visibility95.799.299.899.9
mIoU69.775.077.778.5
", + "bbox": [ + 299, + 262, + 694, + 411 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 11: Analysis on Number of Points and Visibility. We show the Instance mIoUs and visibility ratio $(1 - \\frac{\\text{empty}}{\\text{total}})\\%$ of our VointNet on ShapeNet Parts when varying points # and number of views.", + "bbox": [ + 169, + 417, + 823, + 458 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Model Depth $(l_v)$ . We study the effect of the model depth $l_v$ on the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 13. We note that model depth of VointNet does not enhance the performance significantly. Our choice of $l_v = 4$ balances the performance and the memory/computations requirements of VointNet (MLP).", + "bbox": [ + 169, + 484, + 823, + 556 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Distance to the Object. We study the effect of distance to the object in rendering as in Figure 17 to the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 14. We note that our default choice of 1.0 is actually reasonable. This choice of distance shows the object entirely (as illustrated in Figure 17), but also cover the details needed for small parts segmentation (see Figure 11).", + "bbox": [ + 169, + 561, + 823, + 647 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Image Size $(H,W)$ . We study the effect of the image size $H\\& W$ on the performance of Mean Fuse (Kundu et al., 2020) baseline when training the 2D backbone for 3D part segmentation. We plot the results (with confidence intervals) in Figure 15.", + "bbox": [ + 169, + 651, + 823, + 696 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Number of Views on Classification. We study the effect of the number of views (M) on classification accuracy on ModelNet40 Wu et al. (2015) of VointNet and report results in Table 13.", + "bbox": [ + 169, + 700, + 823, + 743 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Unprojection Operation Speed. We evaluate the speed of the unprojection operation $\\Phi_{\\mathbf{B}}$ and report average latency of 10,000 runs (in ms) in Table 14.", + "bbox": [ + 169, + 750, + 823, + 779 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Unprojection Operation Speed. We evaluate the speed of the point cloud renderer $\\mathbb{R}$ used in Voint pipeline from Pytroch3D Ravi et al. (2020) and report average latency of 1,000 renderings (in ms/image) in Table 15.", + "bbox": [ + 169, + 785, + 823, + 829 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "C.3 VISUALIZATIONS", + "text_level": 1, + "bbox": [ + 171, + 844, + 339, + 858 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "In Figure 16 and 17, we visualize the multi-view renderings of the point clouds along with the 2D learned features based on the DeepLabV3 (Chen et al., 2018) backbone. These features are then unprojected and transformed by VointNet to obtain 3D semantic labels.", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/d829431daf1246a7ceb47159b5c196960df1873d4a760e44d8ce9658fb1116aa.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
NetworkGFLOPsTime (ms)Parameters # (M)
MVCNN (Su et al., 2015)43.7239.8911.20
ViewGCN (Wei et al., 2020)44.1926.0623.56
ResNet 18 (He et al., 2015)3.643.7011.20
ResNet 50 (He et al., 2015)8.249.4223.59
ViT-B (Dosovitskiy et al., 2021)33.7012.4686.57
ViT-L (Dosovitskiy et al., 2021)119.3029.28304.33
FCN (Long et al., 2015)53.1310.3432.97
DeeplabV3 (Chen et al., 2018)92.6120.6258.64
PointNet (Qi et al., 2017a)1.784.243.50
DGCNN (Wang et al., 2019c)10.420.9516.350
MVTN (Hamdi et al., 2021)1.784.243.5
VointNet (MLP)1.902.900.04
VointNet (GCN)16.1832.100.05
VointNet (GAT)32.0568.710.07
Full Voint pipeline94.5123.5058.68
", + "bbox": [ + 225, + 167, + 772, + 404 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Table 12: Time and Memory Requirements. We assess the contribution of the Voint module to the time and memory requirements in the multi-view and point cloud pipeline. Note that VointNet (shared MLP) is almost 100 times smaller than PointNet (Qi et al., 2017a).", + "bbox": [ + 169, + 414, + 823, + 455 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/3413317cf245b6b22fd5a908edc91f26e73ed14cffeb764360672238e7bce506.jpg", + "image_caption": [ + "Figure 12: The Effect of Feature Size $d$ . We plot Ins. mIoU of 3D segmentation vs. the feature size $d$ used in training on ShapeNet Parts (Yi et al., 2016). We note that the performance peaks at $d = 128$ , but it is close to what we use in the main results ( $d = 64$ )." + ], + "image_footnote": [], + "bbox": [ + 299, + 619, + 660, + 801 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 506, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/01e61314143ad25c176cabb5dc11f6aaf12fcedd834931452f72552917cdb345.jpg", + "image_caption": [ + "Figure 13: The Effect of Model Depth $l_{v}$ . We plot Ins. mIoU of 3D segmentation vs. the model depth $l_{v}$ used in training on ShapeNet Parts (Yi et al., 2016). We note that model depth of VointNet does not enhance the performance significantly. Our choice of $l_{v} = 4$ balances the performance and the memory/computations requirements of VointNet (MLP)." + ], + "image_footnote": [], + "bbox": [ + 308, + 125, + 660, + 305 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/1b0ad412327d57e53c249108a0b45a1fb42553e42323dd97babc2396302b514e.jpg", + "image_caption": [ + "Figure 14: The Effect of Distance to the Object. We plot Ins. mIoU of 3D segmentation vs. the distance to the object used in inference on ShapeNet Parts (Yi et al., 2016). We note that our default choice of 1.0 is actually reasonable. This choice of distance shows the object entirely (as illustrated in Figure 17), but also cover the details needed for small parts segmentation (see Figure 11)." + ], + "image_footnote": [], + "bbox": [ + 299, + 405, + 660, + 588 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/e3aa40f0be56a237c5c00ea85726607b465624d13e968d151ab38d5a45b193ed.jpg", + "image_caption": [ + "Figure 15: The Effect of Image Size $H, W$ . We plot Ins. mIoU of 3D segmentation vs. the image size used in inference on ShapeNet Parts (Yi et al., 2016)." + ], + "image_footnote": [], + "bbox": [ + 299, + 700, + 660, + 882 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/177dcc285a4a120e89a761450d372408dc02ebe73f3aab8f9b2f0e8c97be5aba.jpg", + "image_caption": [ + "Figure 16: Multi-view Projected Segmentation 1. We show how, after rendering points, we can segment in the image space. For each example, we show (INPUT): the projections of the points (colored with normals) used in training with random view-points. (PRED 2D): the segmentation prediction of the 2D backbone (DeepLabV3) (Chen et al., 2018). (PRED 3D): the unprojected 3D segmentation prediction. $(GT)$ : the 3D segmentation ground truth." + ], + "image_footnote": [], + "bbox": [ + 171, + 268, + 808, + 679 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/5399f426231ab43347b5b869edd7def9af0be33cbd699d4076f8bc3b337f0b29.jpg", + "image_caption": [ + "Figure 17: Multi-view Projected Segmentation 2. We show how, after rendering points, we can segment in the image space. For each example, we show (INPUT): the projections of the points (colored with normals) used in training with random view-points. (PRED 2D): the segmentation prediction of the 2D backbone (DeepLabV3) (Chen et al., 2018). (PRED 3D): the unprojected 3D segmentation prediction. $(GT)$ : the 3D segmentation ground truth." + ], + "image_footnote": [], + "bbox": [ + 169, + 268, + 813, + 679 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/7fe5f73dd0d8516e3571c811e3776eddb0ec78c3b93a42b81b5126691366d2e7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodNumber of Views
46810
VointNet (Cls. Acc.)90.390.892.092.3
", + "bbox": [ + 292, + 178, + 707, + 244 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/b5fa20ab585c027c5157c8a5ec86b579f3ba75389f4a0bb7c53af833c0695071.jpg", + "table_caption": [ + "Table 13: Effect of the Number of Views on Classification. We report the classification accuracy of VointNet vs. the number of views (M) used in the training on ModelNet40." + ], + "table_footnote": [], + "table_body": "
MethodNumber of Views
124681012
Features Unprojection3.05.311.4515.717.229.724.0
Labels Unprojection2.62.53.43.13.03.23.6
", + "bbox": [ + 207, + 444, + 790, + 525 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/6e94e575c88bc9441fdd34764c3925897250def2c383d42d063aecb560801196.jpg", + "table_caption": [ + "Table 14: Unprojection Operation Speed. We report the average latency (in ms) over 10,000 runs of the unprojection operation with its two forms: features unprojection (used in mean) and labels unprojection (used in mode)." + ], + "table_footnote": [], + "table_body": "
CriteriaNumber of Points
1e21e31e41e51e6
Point Rendering Speed (ms/image)7.27.67.710.437.7
", + "bbox": [ + 207, + 738, + 790, + 806 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Table 15: Point Rendering Speed. We report the average rendering speed (in ms/image) over 1,000 renderings of the point cloud renderer Ravi et al. (2020) used in Voint clouds.", + "bbox": [ + 169, + 815, + 823, + 843 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 508, + 47 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 30 + } +] \ No newline at end of file diff --git a/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_model.json b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_model.json new file mode 100644 index 0000000000000000000000000000000000000000..8ce14fdd669f3e6e1f61babc5411987f73ff3c28 --- /dev/null +++ b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_model.json @@ -0,0 +1,4101 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.829, + 0.149 + ], + "angle": 0, + "content": "VOINT CLOUD: MULTI-VIEW POINT CLOUD REPRESENTATION FOR 3D UNDERSTANDING" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.17, + 0.326, + 0.185 + ], + "angle": 0, + "content": "Abdullah Hamdi" + }, + { + "type": "text", + "bbox": [ + 0.433, + 0.17, + 0.56, + 0.184 + ], + "angle": 0, + "content": "Silvio Giancola" + }, + { + "type": "text", + "bbox": [ + 0.668, + 0.171, + 0.815, + 0.184 + ], + "angle": 0, + "content": "Bernard Ghanem" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.206, + 0.9, + 0.235 + ], + "angle": 0, + "content": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia {abdullah.hamdi, silvio.giancola, bernard.ghanem}@kaust.edu.sa" + }, + { + "type": "title", + "bbox": [ + 0.45, + 0.271, + 0.548, + 0.287 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.304, + 0.769, + 0.513 + ], + "angle": 0, + "content": "Multi-view projection methods have demonstrated promising performance on 3D understanding tasks like 3D classification and segmentation. However, it remains unclear how to combine such multi-view methods with the widely available 3D point clouds. Previous methods use unlearned heuristics to combine features at the point level. To this end, we introduce the concept of the multi-view point cloud (Voint cloud), representing each 3D point as a set of features extracted from several view-points. This novel 3D Voint cloud representation combines the compactness of 3D point cloud representation with the natural view-awareness of multi-view representation. Naturally, we can equip this new representation with convolutional and pooling operations. We deploy a Voint neural network (VointNet) to learn representations in the Voint space. Our novel representation achieves state-of-the-art performance on 3D classification, shape retrieval, and robust 3D part segmentation on standard benchmarks (ScanObjectNN, ShapeNet Core55, and ShapeNet Parts).1" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.541, + 0.342, + 0.556 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.572, + 0.828, + 0.768 + ], + "angle": 0, + "content": "A fundamental question in 3D computer vision and computer graphics is how to represent 3D data (Mescheder et al., 2019; Qi et al., 2017a; Maturana & Scherer, 2015). This question becomes particularly vital given how the success of deep learning in 2D computer vision has pushed for the wide adoption of deep learning in 3D vision and graphics. In fact, deep networks already achieve impressive results in 3D classification (Hamdi et al., 2021), 3D segmentation (Hu et al., 2021), 3D detection (Liu et al., 2021a), 3D reconstruction (Mescheder et al., 2019), and novel view synthesis (Mildenhall et al., 2020). 3D computer vision networks either rely on direct 3D representations, indirect 2D projection on images, or a mixture of both. Direct approaches operate on 3D data commonly represented with point clouds (Qi et al., 2017a), meshes (Feng et al., 2019), or voxels (Choy et al., 2019). In contrast, indirect approaches commonly render multiple 2D views of objects or scenes (Su et al., 2015), and process each image with a traditional 2D image-based architecture. The human visual system is closer to such a multi-view indirect approach for 3D understanding, as it receives streams of rendered images rather than explicit 3D data." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.773, + 0.828, + 0.901 + ], + "angle": 0, + "content": "Tackling 3D vision tasks with indirect approaches has three main advantages: (i) mature and transferable 2D computer vision models (CNNs, Transformers, etc.), (ii) large and diverse labeled image datasets for pre-training (e.g. ImageNet (Russakovsky et al., 2014)), and (iii) the multi-view images give context-rich features based on the viewing angle, which are different from the geometric 3D neighborhood features. Multi-view approaches achieve impressive performance in 3D shape classification and segmentation (Wei et al., 2020; Hamdi et al., 2021; Dai & Nießner, 2018). However, the challenge with the multi-view representation (especially for dense predictions) lies in properly aggregating the per-view features with 3D point clouds. The appropriate aggregation is necessary to obtain representative 3D point" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.629, + 0.925 + ], + "angle": 0, + "content": "1The code is available at https://github.com/ajhamdi/vointcloud" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.508, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.1, + 0.753, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.306, + 0.828, + 0.361 + ], + "angle": 0, + "content": "Figure 1: 3D Voint Clouds. We propose the multi-view point cloud (Voint cloud), a novel 3D representation that is compact and naturally descriptive of view projections of a 3D point cloud. Each point in the 3D cloud is tagged with a Voint, which accumulates view-features for that point. Note that not all 3D points are visible from all views. The set of Voints constructs a Voint cloud." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.452, + 0.825, + 0.563 + ], + "angle": 0, + "content": "clouds with a single feature per point suitable for typical point cloud processing pipelines. Previous multi-view works rely on heuristics (e.g. average or label mode pooling) after mapping pixels to points (Kundu et al., 2020; Wang et al., 2019a), or multi-view fusion with voxels (Dai & Nießner, 2018). Such setups might not be optimal for a few reasons. (i) Such heuristics may aggregate information of misleading projections that are obtained from arbitrary view-points. For example, looking at an object from the bottom and processing that view independently can carry wrong information about the object's content when combined with other views. (ii) The views lack geometric 3D information." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.57, + 0.827, + 0.807 + ], + "angle": 0, + "content": "To this end, we propose a new hybrid 3D data structure that inherits the merits of point clouds (i.e. compactness, flexibility, and 3D descriptiveness) and leverages the benefits of rich perceptual features of multi-view projections. We call this new representation multi-view point cloud (or Voint cloud) and illustrate it in Figure 1. A Voint cloud is a set of Voints, where each Voint is a set of view-dependent features (view-features) that correspond to the same point in the 3D point cloud. The cardinality of these view-features may differ from one Voint to another. In Table 1, we compare some of the widely used 3D representations and our Voint cloud representation. Voint clouds inherit the characteristics of the parent explicit 3D point clouds, which facilitates learning Voint representations for a variety of vision applications (e.g. point cloud classification and segmentation). To deploy deep learning on the new Voint space, we define basic operations on Voints, such as pooling and convolution. Based on these operations, we define a practical way of building Voint neural networks that we dub VointNet. VointNet takes a Voint cloud and outputs point cloud features for 3D point cloud processing. We show how learning this Voint cloud representation leads to strong performance and gained robustness for the tasks of 3D classification, 3D object retrieval, and 3D part segmentation on standard benchmarks like ScanObjectNN (Uy et al., 2019), and ShapeNet (Chang et al., 2015)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.813, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Contributions: (i) We propose a novel multi-view 3D point cloud representation (denoted as Voint cloud), which represents each point (namely a Voint) as a set of features from different view-points. (ii) We define pooling and convolutional operations at the Voint level to construct a Voint Neural Network (VointNet) capable of learning to aggregate information from multiple views in the Voint space. (iii) Our VointNet reaches state-of-the-art performance on several 3D understanding tasks, including 3D shape classification, retrieval, and robust part segmentation. Further, VointNet achieves robustness improvement to occlusion and rotation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.178, + 0.102, + 0.821, + 0.21 + ], + "angle": 0, + "content": "
3D RepresentationExplicitnessView-BasedMain Use3D Expressiveness
Point CloudsExplicitX3D UnderstandingMedium
Multi-View ProjectionsImplicit3D UnderstandingLow
VoxelsExplicitX3D UnderstandingMedium
MeshExplicitX3D ModelingHigh
NeRFsImplicitNovel View SynthesisMedium
Voint Clouds (ours)Explicit3D UnderstandingMedium
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.219, + 0.825, + 0.26 + ], + "angle": 0, + "content": "Table 1: Comparison of Different 3D Representations. We compare some of the widely used 3D representations to our proposed Voint cloud. Note that our Voint cloud shares the view-dependency of NeRFs (Mildenhall et al., 2020) while inheriting the merits of 3D point clouds." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.292, + 0.356, + 0.308 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.322, + 0.827, + 0.476 + ], + "angle": 0, + "content": "Learning on 3D Point Clouds. 3D point clouds are widely used for 3D representation in computer vision due to their compactness, flexibility, and because they can be obtained naturally from sensors like LiDAR and RGBD cameras. PointNet (Qi et al., 2017a) paved the way as the first deep learning algorithm to operate directly on 3D point clouds. It computes point features independently and aggregates them using an order-invariant function like max-pooling. Subsequent works focused on finding neighborhoods of points to define point convolutional operations (Qi et al., 2017b; Wang et al., 2019c; Li et al., 2018; Han et al., 2019). Several recent works combine point cloud representations with other 3D modalities like voxels (Liu et al., 2019b; You et al., 2018) or multi-view images (Jaritz et al., 2019). We propose a novel Voint cloud representation for 3D shapes and investigates novel architectures that aggregate view-dependent features at the 3D point level." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.482, + 0.825, + 0.636 + ], + "angle": 0, + "content": "Multi-View Applications. The idea of using 2D images to understand the 3D world was initially proposed in 1994 by Bradski et. al. (Bradski & Grossberg, 1994). This intuitive multi-view approach was combined with deep learning for 3D understanding in MVCNN (Su et al., 2015). A line of works continued developing multi-view approaches for classification and retrieval by improving the aggregation of the view-features from each image view (Kanezaki et al., 2018; Esteves et al., 2019; Cohen & Welling, 2016; Wei et al., 2020; Hamdi et al., 2021). In this work, we fuse the concept of multi-view into the 3D structure itself, such that every 3D point would have an independent set of view-features according to the view-points available in the setup. Our Voints are aligned with the sampled 3D point cloud, offering a compact representation that allows for efficient computation and memory usage while maintaining the view-dependent component that facilitates view-based learning for vision." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.642, + 0.827, + 0.81 + ], + "angle": 0, + "content": "Hybrid Multi-View with 3D Data. On the task of 3D semantic segmentation, a smaller number of works tried to follow the multi-view approach (Dai & Nießner, 2018; Kundu et al., 2020; Wang et al., 2019a; Kalogerakis et al., 2017; Jaritz et al., 2019; Liu et al., 2021b; Lyu et al., 2020). A problem arises when combining view features to represent local points/voxels while preserving local geometric features. These methods tend to average the view-features (Kundu et al., 2020; Kalogerakis et al., 2017), propagate the labels only (Wang et al., 2019a), learn from reconstructed points in the neighborhood (Jaritz et al., 2019), order points on a single grid (Lyu et al., 2020), or combine the multi-view features with 3D voxel features (Dai & Nießner, 2018; Hou et al., 2019). To this end, our proposed VointNet operates on the Voint cloud space while preserving the compactness and 3D descriptiveness of the original point cloud. VointNet leverages the power of multi-view features with learned aggregation on the view-features applied to each point independently." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.837, + 0.346, + 0.853 + ], + "angle": 0, + "content": "3 METHODOLOGY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "The primary assumption in our work is that surface 3D points are spherical functions, i.e. their representations depend on the viewing angles observing them. This condition contrasts with most 3D point cloud processing pipelines that assume a view-independent representation of 3D point clouds. The full pipeline is illustrated in Figure 2." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.101, + 0.829, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.25, + 0.828, + 0.318 + ], + "angle": 0, + "content": "Figure 2: Learning from Voint Clouds. To construct a 3D Voint cloud \\(\\widehat{\\mathcal{X}}\\), a renderer \\(\\mathbf{R}\\) renders the point cloud \\(\\mathcal{X}\\) from view-points \\(\\mathcal{U}\\) and image features are extracted from the generated images via a 2D backbone \\(\\mathbf{C}\\). The image features are then unprojected to the Voint cloud by \\(\\Phi_{\\mathbf{B}}\\) and passed to VointNet \\(\\widehat{\\mathbf{F}}\\). To learn both \\(\\mathbf{C}\\) and \\(\\widehat{\\mathbf{F}}\\), a 3D loss on the output points is used with an optional auxiliary 2D loss on \\(\\mathbf{C}\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.34, + 0.353, + 0.354 + ], + "angle": 0, + "content": "3.1 3D VOINT CLOUD" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.36, + 0.828, + 0.576 + ], + "angle": 0, + "content": "From Point Clouds to Voint Clouds. A 3D point cloud is a compact 3D representation composed of sampled points on the surface of a 3D object or a scene and can be obtained by different sensors like LiDAR (Chen et al., 2017) or as a result of reconstruction (Okutomi & Kanade, 1993). Formally, we define the coordinate function for the surface \\( g_{\\mathrm{s}}(\\mathbf{x}) : \\mathbb{R}^3 \\to \\mathbb{R} \\) as the Sign Distance Function (SDF) in the continuous Euclidean space (Park et al., 2019; Mescheder et al., 2019). The 3D iso-surface is then defined as the set of all points \\( \\mathbf{x} \\) that satisfy the condition \\( g_{\\mathrm{s}}(\\mathbf{x}) = 0 \\). We define a surface 3D point cloud \\( \\mathcal{X} \\in \\mathbb{R}^{N \\times 3} \\) as a set of \\( N \\) 3D points, where each point \\( \\mathbf{x}_i \\in \\mathbb{R}^3 \\) is represented by its 3D coordinates \\( (x_i, y_i, z_i) \\) and satisfies the iso-surface condition as follows: \\( \\mathcal{X} = \\{\\mathbf{x}_i \\in \\mathbb{R}^3 \\mid g_{\\mathrm{s}}(\\mathbf{x}_i) = 0\\}_{i=1}^N \\). In this work, we aim to fuse the view-dependency to 3D point. Inspired by NeRFs (Mildenhall et al., 2020), we assume that surface points also depend on the view direction from which they are being observed. Specifically, there exists a continuous implicit spherical function \\( \\mathbf{g}(\\mathbf{x}, \\mathbf{u}) : \\mathbb{R}^5 \\to \\mathbb{R}^d \\) that defines the features of each point \\( \\mathbf{x} \\) depending on the view-point direction \\( \\mathbf{u} \\). Given a set of \\( M \\) view-point directions \\( \\mathcal{U} \\in \\mathbb{R}^{M \\times 2} \\), a Voint \\( \\widehat{\\mathbf{x}} \\in \\mathbb{R}^{M \\times d} \\) is a set of \\( M \\) view-dependent features of size \\( d \\) for the sphere centered at point \\( \\mathbf{x} \\) as follows." + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.578, + 0.825, + 0.602 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} \\left(\\mathbf {x} _ {i}, \\mathbf {u} _ {j}\\right) \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X} \\right\\} _ {j = 1} ^ {M} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.606, + 0.825, + 0.652 + ], + "angle": 0, + "content": "The Voint cloud \\(\\widehat{\\mathcal{X}}\\in \\mathbb{R}^{N\\times M\\times d} = \\{\\widehat{\\mathbf{x}}_i\\}_{i = 1}^N\\) is the set of all \\(N\\) Voints \\(\\widehat{\\mathbf{x}}_i\\) corresponding to the parent point cloud \\(\\mathcal{X}\\). Note that we typically do not have access to the underlying implicit function \\(\\mathbf{g}\\) and we approximate it with the following three steps." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.657, + 0.826, + 0.797 + ], + "angle": 0, + "content": "1-Multi-View Projection. As mentioned earlier, a Voint combines multiple view-features of the same 3D point. These view-features come from a multi-view projection of the points by a point cloud renderer \\(\\mathbf{R}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times 3}\\) that renders the point cloud \\(\\mathcal{X}\\) from multiple view-points \\(\\mathcal{U}\\) into \\(M\\) images of size \\(H\\times W\\times 3\\). In addition to projecting the point cloud into the image space, \\(\\mathbf{R}\\) defines the index mapping \\(\\mathbf{B}\\in \\{0,\\dots,N\\}^{M\\times H\\times W}\\) between each pixel to the N points and background it renders. Also, \\(\\mathbf{R}\\) outputs the visibility binary matrix \\(\\mathbf{V}\\in \\{0,1\\}^{N\\times M}\\) for each point from each view. Since not all points appear in all the views due to pixel discretization, the visibility score \\(\\mathbf{V}_{i,j}\\) defines if the Voint \\(\\hat{\\mathbf{x}}_i\\) is visible in the view \\(\\mathbf{u}_j\\). The matrix \\(\\mathbf{B}\\) is crucial for unprojection, while \\(\\mathbf{V}\\) is needed for defining meaningful operations on Voints." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.803, + 0.825, + 0.874 + ], + "angle": 0, + "content": "2-Multi-View Feature Extraction. The rendered images are processed by a function \\(\\mathbf{C}:\\mathbb{R}^{M\\times H\\times W\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times d}\\) that extracts image features, as shown in Figure 2. If \\(\\mathbf{C}\\) is the identity function, all the view-features would typically the RGB value of the corresponding point. However, the \\(\\mathbf{C}\\) function can be a 2D network dedicated to the downstream task and can extract useful global and local features about each view." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.879, + 0.827, + 0.926 + ], + "angle": 0, + "content": "3-Multi-View Unprojection. We propose a module \\(\\Phi_{\\mathbf{B}}:\\mathbb{R}^{M\\times H\\times W\\times d}\\to \\mathbb{R}^{N\\times M\\times d}\\) that unprojects the 2D features from each pixel to be 3D view-features at the corresponding voint. Using the mapping \\(\\mathbf{B}\\) created by the renderer, \\(\\Phi_{\\mathbf{B}}\\) forms the Voint cloud features \\(\\widehat{\\mathcal{X}}\\)" + }, + { + "type": "list", + "bbox": [ + 0.17, + 0.657, + 0.827, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.137 + ], + "angle": 0, + "content": "To summarize, the output Voint cloud is described by Eq (1), where \\(\\mathbf{g}(\\mathbf{x}_i,\\mathbf{u}_j) = \\Phi_{\\mathbf{B}}\\big(\\mathbf{C}(\\mathbf{R}(\\mathcal{X},\\mathbf{u}_j))\\big)_i\\) and the features are only defined for a view \\(j\\) of Voint \\(\\hat{\\mathbf{x}}_i\\) if \\(\\mathbf{V}_{i,j} = 1\\)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.151, + 0.486, + 0.165 + ], + "angle": 0, + "content": "3.2 OPERATIONS ON 3D VOINT CLOUDS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.171, + 0.827, + 0.243 + ], + "angle": 0, + "content": "We show in the Appendix that a functional form of max-pooled individual view-features of a set of angles can approximate any function in the spherical coordinates. We provide a theorem that extends PointNet's theorem of point cloud functional composition (Qi et al., 2017a) and its Universal Approximation to spherical functions underlying Voints. Next, we define a set of operations on Voints as building blocks for Voint neural networks (VointNet)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.248, + 0.825, + 0.277 + ], + "angle": 0, + "content": "VointMax. We define VointMax as max-pooling on the visible view-features along the views dimension of the voint \\(\\hat{\\mathbf{x}}\\). For all \\(i \\in 1,2,\\dots,N\\) and \\(j \\in 1,2,\\dots,M\\)," + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.283, + 0.823, + 0.307 + ], + "angle": 0, + "content": "\\[\n\\operatorname {V o i n t M a x} \\left(\\widehat {\\mathbf {x}} _ {i}\\right) = \\max _ {j} \\widehat {\\mathbf {x}} _ {i, j}, \\quad \\text {s . t .} \\quad \\mathbf {V} _ {i, j} = 1 \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.317, + 0.825, + 0.405 + ], + "angle": 0, + "content": "VointConv. We define the convolution operation \\( h_{\\mathrm{V}}: \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'} \\) as any learnable function that operates on the Voint space with shared weights on all the Voints and has the view-features input size \\( d \\) and outputs view-features of size \\( d' \\) and consists of \\( l_{V} \\) layers. A simple example of this VointConv operation is the shared MLP applied only on the visible view-features. We provide further details for such operations in Section 4.2, which result in different non-exhaustive variants of VointNet." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.42, + 0.468, + 0.435 + ], + "angle": 0, + "content": "3.3 LEARNING ON 3D VOINT CLOUDS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.441, + 0.825, + 0.485 + ], + "angle": 0, + "content": "VointNet. The goal of the VointNet model is to obtain multi-view point cloud features that can be subsequently used by any point cloud processing pipeline. The VointNet module \\(\\widehat{\\mathbf{F}}:\\mathbb{R}^{N\\times M\\times d}\\to \\mathbb{R}^{N\\times d}\\) is defined as follows." + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.49, + 0.825, + 0.517 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\mathbf {F}} (\\widehat {\\boldsymbol {\\chi}}) = h _ {\\mathrm {P}} \\left(\\operatorname {V o i n t M a x} \\left(h _ {\\mathrm {V}} (\\widehat {\\boldsymbol {\\chi}})\\right)\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.524, + 0.825, + 0.569 + ], + "angle": 0, + "content": "where \\( h_{\\mathrm{P}} \\) is any point convolutional operation (e.g. shared MLP or EdgeConv). VointNet \\( \\widehat{\\mathbf{F}} \\) transforms the individual view-features using the learned VointConv \\( h_{\\mathrm{V}} \\) before VointMax is applied on the view-features to obtain point features." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.574, + 0.825, + 0.604 + ], + "angle": 0, + "content": "VointNet Pipeline for 3D Point Cloud Processing. The full pipeline is described in Figure 2. The loss for this pipeline can be described as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.61, + 0.825, + 0.653 + ], + "angle": 0, + "content": "\\[\n\\underset {\\boldsymbol {\\theta} _ {\\mathbf {C}}, \\boldsymbol {\\theta} _ {\\widehat {\\mathbf {F}}}} {\\arg \\min } \\sum_ {i} ^ {N} L \\left(\\widehat {\\mathbf {F}} \\left(\\Phi_ {\\mathbf {B}} \\left(\\mathbf {C} \\left(\\mathbf {R} (\\mathcal {X}, \\mathcal {U})\\right)\\right)\\right) _ {i}, \\mathbf {y} _ {i}\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.661, + 0.827, + 0.761 + ], + "angle": 0, + "content": "where \\(L\\) is a Cross-Entropy (CE) loss defined on all the training points \\(\\mathcal{X}\\), and \\(\\{y_i\\}_{i=1}^N\\) defines the labels of these points. The other components \\((\\mathbf{R}, \\Phi_{\\mathbf{B}}, \\mathcal{U}, \\mathbf{C})\\) are all defined before. The weights to be jointly learned are those of the 2D backbone \\((\\theta_{\\mathbf{C}})\\) and those of the VointNet \\((\\theta_{\\widehat{\\mathbf{F}}})\\) using the same 3D loss. An auxiliary 2D loss on \\(\\theta_{\\mathbf{C}}\\) can be optionally added for supervision at the image level. For classification, the entire object can be treated as a single Voint, and the global features of each view would be the view-features of that Voint. We analyze different setups in detail in Section 6." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.78, + 0.334, + 0.795 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.806, + 0.386, + 0.82 + ], + "angle": 0, + "content": "4.1 EXPERIMENTAL SETUP" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Datasets. We benchmark VointNet on the challenging and realistic ScanObjectNN dataset for 3D point cloud classification (Uy et al., 2019). The dataset has three variants, includes background and occlusion, and has 15 categories and 2,902 point clouds. For the shape retrieval task, we benchmark on ShapeNet Core55 as a subset of ShapeNet (Chang et al., 2015). The dataset consists of 51,162 3D mesh objects labeled with 55 object classes. We follow the MVTN's setup (Hamdi et al., 2021) in sampling 5,000 points from each mesh object to obtain point cloud. On the other hand, for the task of shape part segmentation," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.212, + 0.101, + 0.784, + 0.285 + ], + "angle": 0, + "content": "
MethodData TypeClassificationAccuracy
OBJ_BGOBJ_ONLYHardest
PointNet (Qi et al., 2017a)Points73.379.268.0
SpiderCNN (Xu et al., 2018)Points77.179.573.7
PointNet ++ (Qi et al., 2017b)Points82.384.377.9
PointCNN (Li et al., 2018)Points86.185.578.5
DGCNN (Wang et al., 2019c)Points82.886.278.1
SimpleView (Goyal et al., 2021)M-View--79.5
BGA-DGCNN (Uy et al., 2019)Points--79.7
BGA-PN++ (Uy et al., 2019)Points--80.2
MVTN (Hamdi et al., 2021)M-View92.692.382.8
VointNet (ours)Voints93.794.085.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.295, + 0.828, + 0.351 + ], + "angle": 0, + "content": "Table 2: 3D Point Cloud Classification on ScanObjectNN. We report the accuracy of VointNet in 3D point cloud classification on three different variants of ScanObjectNN (Uy et al., 2019). Bold denotes the best result in its setup. Note that the Hardest variant includes rotated and translated objects, which highlights the benefits of Voints on challenging scenarios." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.39, + 0.827, + 0.448 + ], + "angle": 0, + "content": "we test on ShapeNet Parts (Yi et al., 2016), a subset of ShapeNet (Chang et al., 2015) that consists of 16,872 point cloud objects from 16 categories and 50 parts. For occlusion robustness, we follow MVTN (Hamdi et al., 2021) and test on ModelNet40 (Wu et al., 2015), which is composed of 40 classes and 12,311 3D objects." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.454, + 0.828, + 0.512 + ], + "angle": 0, + "content": "Metrics. For 3D point cloud classification, we report the overall accuracy, while shape retrieval is evaluated using mean Average Precision (mAP) over test queries (Hamdi et al., 2021). 3D semantic segmentation is evaluated using mean Intersection over Union (mIoU) on points. For part segmentation, we report Instance-averaged mIoU (Ins. mIoU)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.516, + 0.828, + 0.603 + ], + "angle": 0, + "content": "Baselines. We include PointNet (Qi et al., 2017a), PointNet++ (Qi et al., 2017b), DGCNN (Wang et al., 2019c), as baselines that use point clouds. We also compare against multi-view classification approaches like MVCNN (Su et al., 2015), SimpleView (Goyal et al., 2021), and MVTN (Hamdi et al., 2021) as baselines for classification and retrieval and adopt some of the multi-view segmentation baselines (e.g. Label Fusion (Wang et al., 2019a) and Mean Fusion (Kundu et al., 2020)) for part segmentation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.635, + 0.379, + 0.648 + ], + "angle": 0, + "content": "4.2 VOINTNET VARIANTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.662, + 0.827, + 0.692 + ], + "angle": 0, + "content": "VointNet in Eq (3) relies on the VointConv operation \\( h_{\\mathrm{V}} \\) as the basic building block. Here, we briefly describe three examples of \\( h_{\\mathrm{V}} \\) operations VointNet uses." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.698, + 0.828, + 0.788 + ], + "angle": 0, + "content": "Shared Multi-Layer Perceptron (MLP). It is the most basic VointConv formulation. For a layer \\(l\\), the features of Voint \\(i\\) at view \\(j\\) are updated to layer \\(l + 1\\) as: \\(\\mathbf{h}_{i,j}^{l + 1} = \\rho (\\mathbf{h}_{i,j}^{l}\\mathcal{W}_{\\rho})\\), where \\(\\rho\\) is the shared MLP with weights \\(\\mathcal{W}_{\\rho}\\) followed by normalization and a nonlinear function (e.g. ReLU). This operation is applied on all Voints independently and only involves the visible views-features for each Voint. This formulation extends the shared MLP formulation for PointNet (Qi et al., 2017a) to work on Voints' view-features." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.791, + 0.828, + 0.877 + ], + "angle": 0, + "content": "Graph Convolution (GCN). We define a fully connected graph for each Voint by creating a virtual center node connected to all the view-features to aggregate their information (similar to \"cls\" token in ViT (Dosovitskiy et al., 2021)). Then, the graph convolution can be defined as the shared MLP (as described above) but on the edge features between all view features, followed by a max pool on the graph neighbors. An additional shared MLP is used before the final output." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.826, + 0.927 + ], + "angle": 0, + "content": "Graph Attention (GAT). A graph attention operation can be defined just like the GCN operation above but with learned attention weights on the graph neighbor's features before averaging them. A shared MLP computes these weights." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.102, + 0.817, + 0.162 + ], + "angle": 0, + "content": "
ResultsMVCNN (Su et al., 2015)RotNet (Kanezaki et al., 2018)ViewGCN (Wei et al., 2020)MVTN (Hamdi et al., 2021)VointNet (ours)
ShapeNet73.577.278.482.983.3
Retr. mAP
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.175, + 0.825, + 0.203 + ], + "angle": 0, + "content": "Table 3: 3D Shape Retrieval. We report 3D shape retrieval mAP on ShapeNet Core55 (Chang et al., 2015; Sfikas et al., 2017). VointNet achieves state-of-the-art results on this benchmark." + }, + { + "type": "table", + "bbox": [ + 0.264, + 0.218, + 0.731, + 0.345 + ], + "angle": 0, + "content": "
MethodData TypePart Segmentation
(Unrotated)(Rotated)
PointNet (Qi et al., 2017a)Points80.136.6 ±0.2
DGCNN (Wang et al., 2019c)Points80.137.1 ±0.2
CurveNet (Xiang et al., 2021)Points84.932.3 ±0.0
Label Fuse (Wang et al., 2019a)M-View80.061.4 ±0.2
Mean Fuse (Kundu et al., 2020)M-View77.562.0 ±0.2
VointNet (ours)Voints81.262.4 ±0.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.355, + 0.825, + 0.42 + ], + "angle": 0, + "content": "Table 4: Robust 3D Part Segmentation on ShapeNet Parts. We compare the Inst. mIoU of VointNet against other methods in 3D segmentation on ShapeNet Parts (Yi et al., 2016). At test time, we randomly rotate the objects and report the results over ten runs. Note how VointNet's performance largely exceeds the point baselines in the realistic rotated scenarios, while exceeding multi-view baselines on the unrotated benchmark. All the results are reproduced in our setup." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.448, + 0.416, + 0.463 + ], + "angle": 0, + "content": "4.3 IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.47, + 0.825, + 0.57 + ], + "angle": 0, + "content": "Rendering and Unprojection. We choose the differentiable point cloud renderer \\(\\mathbf{R}\\) from Pytorch3D (Ravi et al., 2020) in our pipeline for its speed and compatibility with Pytorch libraries (Paszke et al., 2017). We render point clouds on multi-view images with size \\(224 \\times 224 \\times 3\\). We color the points by their normals' values or keep them white if the normals are not available. Following a similar procedure to (Wei et al., 2020; Hamdi et al., 2021), the view-points setup is randomized during training (using \\(M = 8\\) views) and fixed to spherical views in testing (using \\(M = 12\\) views)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.574, + 0.825, + 0.673 + ], + "angle": 0, + "content": "Architectures. For the 2D backbone \\(\\mathbf{C}\\), we use ViT-B (Dosovitskiy et al., 2021) (with pretrained weights from TIMM library (Wightman, 2019)) for classification and DeepLabV3 (Chen et al., 2018) for segmentation. We use the 3D CE loss on the 3D point cloud output and the 2D CE loss when the loss is defined on the pixels. The feature dimension of the VointNet architectures is \\(d = 64\\), and the depth is \\(l_{V} = 4\\) layers in \\(h_V\\). The main results are based on VointNet (MLP), unless otherwise specified as in Section 6, where we study in details the effect of VointConv \\(h_\\mathrm{V}\\) and \\(\\mathbf{C}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.679, + 0.827, + 0.779 + ], + "angle": 0, + "content": "Training Setup. We train our pipeline in two stages, where we start by training the 2D backbone on the 2D projected labels of the points, then train the entire pipeline end-to-end while focusing the training on the VointNet part. We use the AdamW optimizer (Loshchilov & Hutter, 2017) with an initial learning rate of 0.0005 and a step learning rate schedule of \\(33.3\\%\\) every 12 epochs for 40 epochs. The pipeline is trained with one NVIDIA Tesla V100 GPU. We do not use any data augmentation. More details about the training setup (loss and rendering), VointNet, and the 2D backbone architectures can be found in the Appendix." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.799, + 0.287, + 0.815 + ], + "angle": 0, + "content": "5 RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.825, + 0.926 + ], + "angle": 0, + "content": "The main test results of our Voint formulations are summarized in Tables 2,3, 4, and 5. We achieve state-of-the-art performance in the task of 3D classification, retrieval, and robust 3D part segmentation. More importantly, under the realistic rotated setups of ScanObjectNN and ShapeNet Parts, we improve over \\(7.2\\%\\) Acc. and \\(25\\%\\) mIoU respectively compared to point baselines Qi et al. (2017a); Wang et al. (2019c). Following common practice Hamdi et al. (2021), we report the best results out of four runs in benchmark tables, but detailed results are provided in the Appendix." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.51, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.211, + 0.099, + 0.396, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.406, + 0.098, + 0.581, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.099, + 0.789, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.275, + 0.825, + 0.318 + ], + "angle": 0, + "content": "Figure 3: Qualitative Comparison for Part Segmentation. We compare our VointNet 3D segmentation predictions to Mean Fuse (Kundu et al., 2020) that is using the same trained 2D backbone. Note how VointNet distinguishes detailed parts (e.g. the car window frame)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.349, + 0.42, + 0.363 + ], + "angle": 0, + "content": "5.1 3D SHAPE CLASSIFICATION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.372, + 0.825, + 0.485 + ], + "angle": 0, + "content": "Table 2 reports the classification accuracy on the 3D point cloud classification task on ScanObjectNN Uy et al. (2019). It benchmarks VointNet against other recent and strong baselines Hamdi et al. (2021); Goyal et al. (2021); Hamdi et al. (2021). VointNet demonstrates state-of-the-art results on all the variants, including the challenging Hardest (PB_T50_RS) variant that includes challenging scenarios of rotated and translated objects. The increase in performance \\((+2.6\\%)\\) is significant on this variant, which highlights the benefits of Voints on challenging scenarios, with further affirming results in Section 5.4. We follow exactly the same procedure as in MVTN Hamdi et al. (2021)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.508, + 0.384, + 0.523 + ], + "angle": 0, + "content": "5.2 3D SHAPE RETRIEVAL" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.536, + 0.827, + 0.58 + ], + "angle": 0, + "content": "Table 3 benchmarks the 3D shape retrieval mAP on ShapeNet Core55 Chang et al. (2015). VointNet achieves state-of-the-art performance on ShapeNet Core55. Baseline results are reported from Hamdi et al. (2021)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.603, + 0.472, + 0.617 + ], + "angle": 0, + "content": "5.3 ROBUST 3D PART SEGMENTATION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.626, + 0.827, + 0.793 + ], + "angle": 0, + "content": "Table 4 reports the Instance-averaged segmentation mIoU of VointNet compared with other methods on ShapeNet Parts Yi et al. (2016). Two variants of the benchmark are reported: unrotated normalized setup, and the rotated realistic setup. For the rotated setup, we follow the previous 3D literature Liu et al. (2019a); Hamdi et al. (2021; 2020) by testing the robustness of trained models by perturbing the shapes in ShapeNet Parts with random rotations at test time (ten runs) and report the averages in Table 4. Note VointNet's improvement over Mean Fuse Kundu et al. (2020) and Label Fuse Wang et al. (2019a) on unrotated setup despite that both baselines use the same trained 2D backbone as VointNet. Also, for rotated setups, point methods don't work as well. All the results in Table 4 are reproduced by our code in the same setup (see the code attached in supplementary material). Figure 3 shows qualitative 3D segmentation results for VointNet and Mean Fuse Kundu et al. (2020) as compared to the ground truth." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.817, + 0.403, + 0.831 + ], + "angle": 0, + "content": "5.4 OCCLUSION ROBUSTNESS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.827, + 0.926 + ], + "angle": 0, + "content": "One of the aspects of the robustness of 3D classification models that have been recently studied is their robustness to occlusion, as detailed in MVTN Hamdi et al. (2021). These simulated occlusions are introduced at test time, and the average test accuracy is reported on each cropping ratio. We benchmark our VointNet against recent baselines in Table 5. PointNet Qi et al. (2017a) and DGCNN Wang et al. (2019c) are used as point-based baselines, and MVTN Hamdi et al. (2021) as a multi-view baseline." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.208, + 0.101, + 0.794, + 0.219 + ], + "angle": 0, + "content": "
MethodData TypeOcclusion Ratio
00.10.20.30.5
PointNet (Qi et al., 2017a)Points89.188.286.181.653.5
DGCNN (Wang et al., 2019c)Points92.177.174.571.230.1
PCT (Guo et al., 2021)Points93.392.691.188.261.9
MVTN (Hamdi et al., 2021)M-View93.890.389.988.367.1
VointNet (ours)Voints92.891.691.289.166.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.229, + 0.825, + 0.268 + ], + "angle": 0, + "content": "Table 5: Occlusion Robustness for 3D Classification. We report the test accuracy on ModelNet40 (Wu et al., 2015) for different occlusion ratios of the data to measure occlusion robustness of different 3D methods." + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.289, + 0.715, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.456, + 0.825, + 0.509 + ], + "angle": 0, + "content": "Figure 4: Effect of the Number of Views. We plot Ins. mIoU of 3D segmentation vs. the number of views \\((M)\\) used in inference on ShapeNet Parts. Note VointNet's consistent improvement over Mean Fuse (Kundu et al., 2020) and Label Fuse (Wang et al., 2019a). Both baselines use the same trained 2D backbone as VointNet and are tested on the same unrotated setup." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.536, + 0.425, + 0.551 + ], + "angle": 0, + "content": "6 ANALYSIS AND INSIGHTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.562, + 0.827, + 0.646 + ], + "angle": 0, + "content": "Number of Views. We study the effect of the number of views \\( M \\) on the performance of 3D part segmentation using multiple views. We compare Mean Fuse (Kundu et al., 2020) and Label Fuse (Wang et al., 2019a) to our VointNet when all of them have the same trained 2D backbone. The views are randomly picked, and the experiments are repeated four times. Ins. mIoU with confidence intervals are shown in Figure 4. We observe a consistent improvement with VointNet over the other two baselines across different numbers of views." + }, + { + "type": "table", + "bbox": [ + 0.308, + 0.658, + 0.691, + 0.782 + ], + "angle": 0, + "content": "
2D BackboneVointConvResults
FCNDeepLabV3MLPGCNGATInst. mIoU
---78.8 ± 0.2
---77.6 ± 0.2
---77.1 ± 0.2
---80.6 ± 0.1
---77.2 ± 0.4
---80.4 ± 0.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.794, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Table 6: Ablation Study for 3D Segmentation. We ablate different components of VointNet (2D backbone and VointConv choice) and report Ins. mIoU performance on ShapeNet Parts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Choice of Backbones. We ablate the choice of the 2D backbone and the VointConv operation used in VointNet and report the segmentation Ins. mIoU results in Table 6. Note how the 2D backbone greatly affects performance, while the VointConv operation type does not. This ablation highlights the importance of the 2D backbone in VointNet pipeline and motivates the use of the simplest variant of VointNet (MLP). We provide a detailed study of more factors as well as compute and memory costs in the Appendix." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.557, + 0.119 + ], + "angle": 0, + "content": "7 LIMITATIONS AND ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.129, + 0.827, + 0.213 + ], + "angle": 0, + "content": "One aspect limiting the performance of Voints is how well-trained the 2D backbone is for the downstream 3D task. In most cases, the 2D backbone must be pretrained with enough data to learn meaningful information for VointNet. Another aspect that limits the capability of the Voint cloud is how to properly select the view-points for segmentation. Addressing these limitations is an important direction for future work. Also, extending Voint learning on more 3D tasks like 3D scene segmentation and 3D object detection is left for future work." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.219, + 0.828, + 0.276 + ], + "angle": 0, + "content": "Acknowledgments. This work was supported by the King Abdullah University of Science and Technology (KAUST) Office of Sponsored Research through the Visual Computing Center (VCC) funding and the SDAIA-KAUST Center of Excellence in Data Science and Artificial Intelligence (SDAIA-KAUST AI)" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.295, + 0.292, + 0.31 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.318, + 0.829, + 0.361 + ], + "angle": 0, + "content": "Gary Bradski and Stephen Grossberg. Recognition of 3-d objects from multiple 2-d views by a self-organizing neural architecture. In *From Statistics to Neural Networks*, pp. 349–375. Springer, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.369, + 0.829, + 0.44 + ], + "angle": 0, + "content": "Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], Stanford University — Princeton University — Toyota Technological Institute at Chicago, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.448, + 0.828, + 0.492 + ], + "angle": 0, + "content": "Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In Proceedings of the European conference on computer vision (ECCV), pp. 801-818, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.499, + 0.827, + 0.543 + ], + "angle": 0, + "content": "Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 1907-1915, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.551, + 0.828, + 0.594 + ], + "angle": 0, + "content": "Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3075-3084, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.602, + 0.827, + 0.632 + ], + "angle": 0, + "content": "Taco Cohen and Max Welling. Group equivariant convolutional networks. In International conference on machine learning, pp. 2990-2999, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.639, + 0.827, + 0.683 + ], + "angle": 0, + "content": "Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multi-view prediction for 3d semantic scene segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 452-468, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.69, + 0.827, + 0.747 + ], + "angle": 0, + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.755, + 0.825, + 0.799 + ], + "angle": 0, + "content": "Carlos Esteves, Yinshuang Xu, Christine Allen-Blanchette, and Kostas Daniilidis. Equivariant multi-view networks. In Proceedings of the IEEE International Conference on Computer Vision, pp. 1568-1577, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.807, + 0.825, + 0.85 + ], + "angle": 0, + "content": "Yutong Feng, Yifan Feng, Haoxuan You, Xibin Zhao, and Yue Gao. Meshnet: Mesh neural network for 3d shape representation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 8279-8286, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.858, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, and Jia Deng. Revisiting point cloud shape classification with a simple and effective baseline. In ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Meng-Hao Guo, Jun-Xiong Cai, Zheng-Ning Liu, Tai-Jiang Mu, Ralph R Martin, and Shi-Min Hu. Pct: Point cloud transformer. Computational Visual Media, 7(2):187-199, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.318, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.148 + ], + "angle": 0, + "content": "Abdullah Hamdi, Sara Rojas, Ali Thabet, and Bernard Ghanem. Advpc: Transferable adversarial perturbations on 3d point clouds. In Computer Vision - ECCV 2020, pp. 241-257, Cham, 2020. Springer International Publishing. ISBN 978-3-030-58610-2." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.827, + 0.2 + ], + "angle": 0, + "content": "Abdullah Hamdi, Silvio Giancola, and Bernard Ghanem. Mvtn: Multi-view transformation network for 3d shape recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 1-11, October 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.826, + 0.265 + ], + "angle": 0, + "content": "Zhizhong Han, Xiyang Wang, Yu-Shen Liu, and Matthias Zwicker. Multi-angle point cloud-vae: Unsupervised feature learning for 3d point clouds from multiple angles by joint self-reconstruction and half-to-half prediction. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 10441-10450. IEEE, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.273, + 0.824, + 0.304 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. CoRR, abs/1512.03385, 2015. URL http://arxiv.org/abs/1512.03385." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.31, + 0.826, + 0.355 + ], + "angle": 0, + "content": "Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4421-4430, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.363, + 0.827, + 0.407 + ], + "angle": 0, + "content": "Wenbo Hu, Hengshuang Zhao, Li Jiang, Jiaya Jia, and Tien-Tsin Wong. Bidirectional projection network for cross dimension scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14373-14382, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.415, + 0.827, + 0.458 + ], + "angle": 0, + "content": "Maximilian Jaritz, Jiayuan Gu, and Hao Su. Multi-view pointnet for 3d scene understanding. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 0-0, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.467, + 0.826, + 0.511 + ], + "angle": 0, + "content": "Evangelos Kalogerakis, Melinos Averkiou, Subhransu Maji, and Siddhartha Chaudhuri. 3d shape segmentation with projective convolutional networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3779-3788, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.518, + 0.826, + 0.575 + ], + "angle": 0, + "content": "Asako Kanezaki, Yasuyuki Matsushita, and Yoshifumi Nishida. Rotationnet: Joint object categorization and pose estimation using multiviews from unsupervised viewpoints. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5010-5019, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.584, + 0.827, + 0.641 + ], + "angle": 0, + "content": "Abhijit Kundu, Xiaoqi Yin, Alireza Fathi, David Ross, Brian Brewington, Thomas Funkhouser, and Caroline Pantofaru. Virtual multi-view fusion for 3d semantic segmentation. In European Conference on Computer Vision (ECCV), pp. 518-535. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.65, + 0.826, + 0.694 + ], + "angle": 0, + "content": "Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. Pointcnn: Convolution on x-transformed points. In Advances in neural information processing systems (NIPS), pp. 820-830, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.702, + 0.826, + 0.746 + ], + "angle": 0, + "content": "Yongcheng Liu, Bin Fan, Shiming Xiang, and Chunhong Pan. Relation-shape convolutional neural network for point cloud analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8895-8904, 2019a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.754, + 0.824, + 0.784 + ], + "angle": 0, + "content": "Ze Liu, Zheng Zhang, Yue Cao, Han Hu, and Xin Tong. Group-free 3d object detection via transformers. arXiv preprint arXiv:2104.00678, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.792, + 0.827, + 0.836 + ], + "angle": 0, + "content": "Zhengzhe Liu, Xiaojuan Qi, and Chi-Wing Fu. 3d-to-2d distillation for indoor scene parsing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4464-4474, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.824, + 0.874 + ], + "angle": 0, + "content": "Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Point-voxel cnn for efficient 3d deep learning. In Advances in Neural Information Processing Systems, pp. 965-975, 2019b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3431-3440, 2015." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.133 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.142, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Yecheng Lyu, Xinming Huang, and Ziming Zhang. Learning to segment 3d point clouds in 2d image space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12255-12264, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.197, + 0.826, + 0.255 + ], + "angle": 0, + "content": "Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7210-7219, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.265, + 0.826, + 0.309 + ], + "angle": 0, + "content": "Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 922-928. IEEE, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.318, + 0.826, + 0.362 + ], + "angle": 0, + "content": "Leonard McMillan and Gary Bishop. Plenoptic modeling: An image-based rendering system. In Proceedings of the 22nd annual conference on Computer graphics and interactive techniques, pp. 39-46, 1995." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.371, + 0.826, + 0.428 + ], + "angle": 0, + "content": "Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4460-4470, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.439, + 0.826, + 0.483 + ], + "angle": 0, + "content": "Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pp. 405-421. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.493, + 0.826, + 0.523 + ], + "angle": 0, + "content": "Masatoshi Okutomi and Takeo Kanade. A multiple-baseline stereo. IEEE Transactions on pattern analysis and machine intelligence, 15(4):353-363, 1993." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.532, + 0.826, + 0.59 + ], + "angle": 0, + "content": "Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 165-174, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.6, + 0.826, + 0.643 + ], + "angle": 0, + "content": "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.653, + 0.826, + 0.697 + ], + "angle": 0, + "content": "Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10318-10327, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.706, + 0.826, + 0.751 + ], + "angle": 0, + "content": "Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 652-660, 2017a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.76, + 0.826, + 0.804 + ], + "angle": 0, + "content": "Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In Advances in neural information processing systems (NIPS), pp. 5099-5108, 2017b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.814, + 0.826, + 0.857 + ], + "angle": 0, + "content": "Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.867, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael S. Bernstein, Alexander C. Berg, and Fei-Fei Li. Imagenet large scale visual recognition challenge. CoRR, abs/1409.0575, 2014. URL http://arxiv.org/abs/1409.0575." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.175 + ], + "angle": 0, + "content": "Konstantinos Sfikas, Theoharis Theoharis, and Ioannis Pratikakis. Exploiting the PANorama Representation for Convolutional Neural Network Classification and Retrieval. In Ioannis Pratikakis, Florent Dupont, and Maks Ovsjanikov (eds.), Eurographics Workshop on 3D Object Retrieval, pp. 1-7. The Eurographics Association, 2017. ISBN 978-3-03868-030-7. doi: 10.2312/3dor.20171045." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.181, + 0.826, + 0.226 + ], + "angle": 0, + "content": "Hang Su, Subhransu Maji, Evangelos Kalogerakis, and Erik Learned-Miller. Multi-view convolutional neural networks for 3d shape recognition. In Proceedings of the IEEE international conference on computer vision, pp. 945-953, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.23, + 0.827, + 0.289 + ], + "angle": 0, + "content": "Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6411–6420, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.295, + 0.827, + 0.34 + ], + "angle": 0, + "content": "Mikaela Angelina Uy, Quang-Hieu Pham, Binh-Son Hua, Duc Thanh Nguyen, and Sai-Kit Yeung. Revisiting point cloud classification: A new benchmark dataset and classification model on real-world data. In International Conference on Computer Vision (ICCV), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.345, + 0.827, + 0.389 + ], + "angle": 0, + "content": "Brian H Wang, Wei-Lun Chao, Yan Wang, Bharath Hariharan, Kilian Q Weinberger, and Mark Campbell. Ldls: 3-d object segmentation through label diffusion from 2-d images. IEEE Robotics and Automation Letters, 4(3):2902-2909, 2019a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.395, + 0.826, + 0.453 + ], + "angle": 0, + "content": "He Wang, Srinath Sridhar, Jingwei Huang, Julien Valentin, Shuran Song, and Leonidas J Guibas. Normalized object coordinate space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2642-2651, 2019b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.459, + 0.827, + 0.504 + ], + "angle": 0, + "content": "Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E. Sarma, Michael M. Bronstein, and Justin M. Solomon. Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics (TOG), 2019c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.509, + 0.827, + 0.554 + ], + "angle": 0, + "content": "Xin Wei, Ruixuan Yu, and Jian Sun. View-gen: View-based graph convolutional network for 3d shape analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1850-1859, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.559, + 0.827, + 0.59 + ], + "angle": 0, + "content": "Ross Wightman. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.595, + 0.826, + 0.64 + ], + "angle": 0, + "content": "Zhirong Wu, S. Song, A. Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and J. Xiao. 3d shapenets: A deep representation for volumetric shapes. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1912-1920, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.645, + 0.826, + 0.69 + ], + "angle": 0, + "content": "Tiang Xiang, Chaoyi Zhang, Yang Song, Jianhui Yu, and Weidong Cai. Walk in the cloud: Learning curves for point clouds shape analysis. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 915-924, October 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.695, + 0.826, + 0.74 + ], + "angle": 0, + "content": "Yifan Xu, Tianqi Fan, Mingye Xu, Long Zeng, and Yu Qiao. SpiderCNN: Deep learning on point sets with parameterized convolutional filters. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 87-102, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.745, + 0.826, + 0.802 + ], + "angle": 0, + "content": "Li Yi, Vladimir G Kim, Duygu Ceylan, I-Chao Shen, Mengyan Yan, Hao Su, Cewu Lu, Qixing Huang, Alla Sheffer, and Leonidas Guibas. A scalable active framework for region annotation in 3d shape collections. ACM Transactions on Graphics (ToG), 35(6):1-12, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.809, + 0.826, + 0.854 + ], + "angle": 0, + "content": "Haoxuan You, Yifan Feng, Rongrong Ji, and Yue Gao. Pvnet: A joint convolutional network of point cloud and multi-view for 3d shape recognition. In Proceedings of the 26th ACM international conference on Multimedia, pp. 1310-1318, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.859, + 0.826, + 0.89 + ], + "angle": 0, + "content": "Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In ICCV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip Torr, and Vladlen Koltun. Point transformer. arXiv preprint arXiv:2012.09164, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.269, + 0.119 + ], + "angle": 0, + "content": "APPENDIX" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.135, + 0.447, + 0.151 + ], + "angle": 0, + "content": "A DETAILED FORMULATIONS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.165, + 0.334, + 0.18 + ], + "angle": 0, + "content": "A.1 TOY EXAMPLE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.191, + 0.827, + 0.292 + ], + "angle": 0, + "content": "In the toy 2D example in Figure 5, the center point (represented by a circular function \\( g \\)) is viewed from various view-points \\( u_{j} \\) that are agnostic to the underlying function itself. In many applications, it is desired to have a single feature representing each point in the point cloud. When the projected values of \\( g \\) from these \\( u_{j} \\) view-points are aggregated together (e.g. by max/mean pool) to get a constant representation of that point, the underlying properties of \\( g \\) are lost. We build our Voint representation to keep the structure of \\( g \\) intact by taking the full set \\( \\{(u_{j},g(u_{j}))\\}_{j = 1}^{5} \\) in learning the aggregations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.305, + 0.479, + 0.319 + ], + "angle": 0, + "content": "A.2 FUNCTIONAL FORM OF VOINTNET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.33, + 0.825, + 0.442 + ], + "angle": 0, + "content": "We can look at a simplified setup to decide on the functional form of the deep neural network that operates in the Voint space. In this simplified setup, we consider a 2D example (instead of 3D Voints) and assume that a circular function describes a point at the center. The center point will assume its value according to the angle \\( u \\). The following Theorem 1 proves that for any continuous set function \\( f \\) that operates on any set of \\( M \\) angles \\( \\{u_1, \\dots, u_M\\} \\), there exists an equivalent composite function consisting of transformed max-pooled individual view-features. This composition is the functional form we describe later for Voint neural networks" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.827, + 0.538 + ], + "angle": 0, + "content": "Theorem 1 Suppose \\( f: \\mathcal{S} \\to \\mathbb{R} \\) is a continuous set function operating on an angles set \\( \\mathcal{S} = \\{u \\mid u \\in [0,2\\pi]\\} \\). The continuity of \\( f \\) is based on the Hausdorff distance \\( d_H \\) between two sets of angles, where \\( d_H(\\mathcal{S},\\mathcal{S}') = \\max_{u_i' \\in \\mathcal{S}'} \\min_{u_i \\in \\mathcal{S}} d_A(u_i,u_i') \\), and \\( d_A \\) is the smallest positive angle between two angles \\( d_A(u,u') = \\min(|u - u'|, 2\\pi - |u - u'|) \\). Then, for every \\( \\epsilon > 0 \\), and \\( \\mathcal{U} = \\{u_1,\\dots,u_M\\} \\subset \\mathcal{S} \\), there exists a continuous function \\( \\mathbf{h} \\) and a symmetric function \\( g(u_1,\\dots,u_M) = \\gamma \\circ \\mathrm{MAX} \\), such that:" + }, + { + "type": "equation", + "bbox": [ + 0.335, + 0.541, + 0.825, + 0.568 + ], + "angle": 0, + "content": "\\[\n\\left| f (\\mathcal {U}) - \\gamma \\left(\\operatorname {M A X} \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right)\\right) \\right| < \\epsilon , \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.569, + 0.781, + 0.585 + ], + "angle": 0, + "content": "where \\(\\gamma\\) is a continuous function, and MAX is an element-wise vector max operator." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.593, + 0.827, + 0.636 + ], + "angle": 0, + "content": "Proof. By the continuity of \\( f \\), we take \\( \\delta_{\\epsilon} \\) so that \\( |f(\\mathcal{U}) - f(\\mathcal{U}')| < \\epsilon \\) for any \\( \\mathcal{U}, \\mathcal{U}' \\subset \\mathcal{S} \\) if \\( d_H(\\mathcal{U}, \\mathcal{U}') < \\delta_{\\epsilon} \\). Define \\( K = [2\\pi/\\delta_{\\epsilon}] \\), which split \\( [0, 2\\pi] \\) into \\( K \\) intervals evenly and define an auxiliary function that maps an angle to the beginning of the interval it lies in:" + }, + { + "type": "equation", + "bbox": [ + 0.447, + 0.638, + 0.55, + 0.668 + ], + "angle": 0, + "content": "\\[\n\\sigma (u) = \\frac {\\lfloor K u \\rfloor}{K}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.67, + 0.371, + 0.687 + ], + "angle": 0, + "content": "Let \\(\\tilde{\\mathcal{U}} = \\sigma(u): u \\in \\mathcal{U}\\), then" + }, + { + "type": "equation", + "bbox": [ + 0.434, + 0.686, + 0.825, + 0.704 + ], + "angle": 0, + "content": "\\[\n\\left| f (\\mathcal {U}) - f (\\tilde {\\mathcal {U}}) \\right| < \\epsilon \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.706, + 0.825, + 0.759 + ], + "angle": 0, + "content": "Let \\( h_k(u) = e^{-d\\left(u, \\left[\\frac{k-1}{K}, \\frac{k}{K}\\right]\\right)} \\) be a soft indicator function where \\( d\\left(u, \\left[\\frac{k-1}{K}, \\frac{k}{K}\\right]\\right) = \\min\\left(d_A\\left(u, \\frac{k-1}{K}\\right), d_A\\left(u, \\frac{k}{K}\\right)\\right) \\) is the distance between angle \\( u \\) to interval \\( \\left[\\frac{k-1}{K}, \\frac{k}{K}\\right] \\). Let \\( \\mathbf{h}(u) = [h_1(u); \\ldots; h_K(u)] \\), then \\( \\mathbf{h}: \\mathbb{R} \\to \\mathbb{R}^K \\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.763, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Let \\( q_{j}(u_{1},\\ldots ,u_{M}) = \\max \\{h_{j}(u_{1}),\\ldots ,h_{j}(u_{M})\\} \\), indicating the occupancy of the \\( j \\)-th interval by angles in \\( \\mathcal{U} \\). Let \\( \\mathbf{q} = [q_1;\\dots;q_K] \\), then \\( \\mathbf{q}:[0,2\\pi ]^M\\to \\{0,1\\} ^K \\) is a symmetric function, indicating the occupancy of each interval by angles in \\( \\mathcal{U} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.825, + 0.843 + ], + "angle": 0, + "content": "Define \\(\\zeta : \\{0,1\\}^K \\to S\\) as \\(\\zeta(\\mathbf{q}) = \\left\\{\\frac{k-1}{K} : q_k \\geq 1\\right\\}\\) which maps the occupancy vector to a set which contains the left end of each angle interval. It is straightforward to show:" + }, + { + "type": "equation", + "bbox": [ + 0.449, + 0.845, + 0.825, + 0.863 + ], + "angle": 0, + "content": "\\[\n\\zeta (\\mathbf {q} (\\mathcal {U})) \\equiv \\tilde {\\mathcal {U}} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.865, + 0.825, + 0.895 + ], + "angle": 0, + "content": "Let \\(\\gamma : \\mathbb{R}^K \\to \\mathbb{R}\\) be a continuous function such that \\(\\gamma(\\mathbf{q}) = f(\\zeta(\\mathbf{q}))\\) for \\(\\mathbf{q} \\in \\{0,1\\}^K\\). Then from Eq (6) and Eq (7)," + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.895, + 0.824, + 0.928 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left| \\gamma (\\mathbf {q} (\\mathcal {U})) - f (\\mathcal {U}) \\right| (8) \\\\ = \\left| f \\left(\\zeta (\\mathbf {q} (\\mathcal {U}))\\right) - f (\\mathcal {U}) \\right| < \\epsilon (8) \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.103, + 0.703, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.359, + 0.828, + 0.44 + ], + "angle": 0, + "content": "Figure 5: A Toy 2D Example of Voints. Voints assume view-dependency for every 3D point. Here, we look at a single 2D point at the center with a circular function \\( g(u) = \\mathrm{sign}(\\cos u) \\) from five arbitrary view-points \\( \\{u_j\\}_{j=1}^5 \\). Trying to reduce \\( g \\) to a single value based on \\( u_j \\) projections undermines the underlying structure of \\( g \\). We take the full set \\( \\{(u_j, g(u_j))\\}_{j=1}^5 \\) as a representation of \\( g \\) and learn a set function \\( f \\) on these view-features for a more informative manner of representation aggregation." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.463, + 0.517, + 0.479 + ], + "angle": 0, + "content": "Note that \\(\\gamma (\\mathbf{q}(\\mathcal{U}))\\) can be rewritten as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.346, + 0.485, + 0.822, + 0.536 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\gamma \\left(\\mathbf {q} \\left(\\mathcal {U}\\right)\\right) = \\gamma \\left(\\mathbf {q} \\left(u _ {1}, \\dots , u _ {M}\\right)\\right) \\\\ = \\gamma (\\operatorname {M A X} \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right)) \\tag {9} \\\\ = (\\gamma \\circ \\operatorname {M A X}) \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.825, + 0.571 + ], + "angle": 0, + "content": "Since \\(\\gamma \\circ\\) MAX is a symmetric function and from Eq (8) and Eq (9), we reach to the main result in Eq (5). This concludes the proof." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.586, + 0.357, + 0.6 + ], + "angle": 0, + "content": "A.3 3D VOINT CLOUD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.612, + 0.827, + 0.89 + ], + "angle": 0, + "content": "Plenoptic and Spherical Coordinate Functions. The Plenoptic function was first introduced by McMillan and Bishop (McMillan & Bishop, 1995) in 1995 as a general function that describes the visible world. The Plenoptic function \\( P \\) is a continuous spherical function that describes the visibility at any Euclidean 3D point in space \\( (V_x, V_y, V_x) \\) when looking into any direction \\( (\\theta, \\phi) \\) across wavelength \\( \\lambda \\) at time \\( t \\). It is defined as \\( p = P(\\theta, \\phi, \\lambda, V_x, V_y, V_x, t) \\). Such a remarkable and compact formulation covers all the images observed as just samples of the function \\( P \\). For fixed time and wavelength, the reduced Plenoptic function \\( P \\) becomes \\( p = P(\\theta, \\phi, V_x, V_y, V_x,) \\) which can describe any field in 3D space. This shortened formulation is what Neural Radiance Fields (NeRFs) (Mildenhall et al., 2020; Pumarola et al., 2021; Martin-Brualla et al., 2021) try to learn with MLPs to describe the radiance and RGB values in the continuous Euclidean space with a dependency on the view direction \\( (\\theta, \\phi) \\). In the same spirit of the Plenoptic function and NeRFs, the Voint cloud representation relies on the viewing angles \\( (\\theta, \\phi) \\) to define the view-features. The problem with the plenoptic functions \\( P \\), and subsequently NeRFs, is that they are very high dimensional, and any attempt to densely represent the scene with discrete and fixed data will cause memory and compute issues (Yu et al., 2021; Pumarola et al., 2021). Unlike NERFs (Mildenhall et al., 2020) that define dense 3D volumes, we focus only on the surface of the 3D shapes with our Voint clouds representation. Our Voints are in the order of the sampled point cloud, offering a compact representation that allows for efficient computation and memory while maintaining the view-dependent component that facilitates view-based learning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "From Point Clouds to Voint Clouds. Implicit representation of 3D surfaces typically aims to learn an implicit function \\( g_{\\mathrm{s}}(\\mathbf{x}) : \\mathbb{R}^3 \\to \\mathbb{R} \\) that define the Sign Distance Function" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.175 + ], + "angle": 0, + "content": "(SDF) or the occupancy in the continuous Euclidean space (Park et al., 2019; Mescheder et al., 2019). The 3D iso-surface is then defined as the set of all points \\(\\mathbf{x}\\) that satisfy the condition \\(g_{\\mathrm{s}}(\\mathbf{x}) = 0\\) (assuming \\(g_{\\mathrm{s}}(\\mathbf{x})\\) as SDF hereafter). We define a surface 3D point cloud \\(\\mathcal{X} \\in \\mathbb{R}^{N \\times 3}\\), as a set of \\(N\\) 3D points, where each point \\(\\mathbf{x}_i \\in \\mathbb{R}^3\\) is represented by its 3D coordinates \\((x_i, y_i, z_i)\\) and satisfy the iso-surface condition as follows." + }, + { + "type": "equation", + "bbox": [ + 0.396, + 0.198, + 0.825, + 0.218 + ], + "angle": 0, + "content": "\\[\n\\mathcal {X} = \\left\\{\\mathbf {x} _ {i} \\in \\mathbb {R} ^ {3} \\mid g _ {\\mathrm {s}} (\\mathbf {x} _ {i}) = 0 \\right\\} _ {i = 1} ^ {N} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.233, + 0.827, + 0.318 + ], + "angle": 0, + "content": "Here, we assume that surface points also depend on the view direction from which they are being observed. Specifically, there exists a continuous implicit spherical function \\(\\mathbf{g}(\\mathbf{x},\\mathbf{u}):\\) \\(\\mathbb{R}^5\\to \\mathbb{R}^d\\) that defines the features at each point \\(\\mathbf{x}\\) depending on the view direction \\(\\mathbf{u}\\). Given a set of \\(M\\) view-point directions \\(\\mathcal{U}\\in \\mathbb{R}^{M\\times 2}\\), a Voint \\(\\widehat{\\mathbf{x}}\\in \\mathbb{R}^{M\\times d}\\) is a set of \\(M\\) view-dependent features of size \\(d\\) for the sphere centered at point \\(\\mathbf{x}\\). The Voint cloud \\(\\widehat{\\mathcal{X}}\\in \\mathbb{R}^{N\\times M\\times d}\\) is the set of all \\(N\\) Voints \\(\\widehat{\\mathbf{x}}\\)." + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.342, + 0.823, + 0.372 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} \\left(\\mathbf {x} _ {i}, \\mathbf {u} _ {j}\\right) \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X} \\right\\} _ {j = 1} ^ {M} \\tag {11}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.369, + 0.526, + 0.39 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\mathcal {X}} = \\left\\{\\widehat {\\mathbf {x}} _ {i} \\in \\mathbb {R} ^ {M \\times d} \\right\\} _ {i = 1} ^ {N}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.404, + 0.827, + 0.434 + ], + "angle": 0, + "content": "Note that we typically do not have access to the underlying implicit function \\(\\mathbf{g}\\) and we approximate it by 2D projection, feature extraction, and then un-projection as we show next." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.439, + 0.825, + 0.579 + ], + "angle": 0, + "content": "1-Multi-View Projection. As mentioned earlier, a Voint combines multiple view-features of the same 3D point. These view-features come from a multi-view projection of the points by a point cloud renderer \\(\\mathbf{R}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times 3}\\) that renders the point cloud \\(\\mathcal{X}\\) from multiple view-points \\(\\mathcal{U}\\) into \\(M\\) images of size \\(H\\times W\\times 3\\). In addition to projecting the point cloud into the image space, \\(\\mathbf{R}\\) defines the mapping \\(\\mathbf{B}\\in \\{0,\\dots,N\\}^{M\\times H\\times W}\\) between each pixel to the N points and background it renders. Also, \\(\\mathbf{R}\\) outputs the visibility binary matrix \\(\\mathbf{V}\\in \\{0,1\\}^{N\\times M}\\) for each point from each view. Since not all points appear in all the views due to pixel discretization, the visibility score \\(\\mathbf{V}_{i,j}\\) defines if the Voint \\(\\hat{\\mathbf{x}}_i\\) is visible in the view \\(\\mathbf{u}_j\\). The matrix \\(\\mathbf{B}\\) is crucial for unprojection, while \\(\\mathbf{V}\\) is needed for defining meaningful operations on Voints." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.585, + 0.825, + 0.656 + ], + "angle": 0, + "content": "2-Multi-View Feature Extraction. The rendered images are processed by a function \\(\\mathbf{C}:\\mathbb{R}^{M\\times H\\times W\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times d}\\) that extracts image features. If \\(\\mathbf{C}\\) is the identity function, all the view-features would be identical for each Voint (typically the RGB value of the corresponding point). However, the \\(\\mathbf{C}\\) function can be a 2D network dedicated to the downstream task and can extract useful global and local features about each view." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.661, + 0.827, + 0.763 + ], + "angle": 0, + "content": "3-Multi-View Unprojection. We propose a module \\(\\Phi_{\\mathbf{B}}:\\mathbb{R}^{M\\times H\\times W\\times d}\\to \\mathbb{R}^{N\\times M\\times d}\\) that unprojects the 2D features from each pixel to be 3D view-features at the corresponding Voint. This is performed by using the mapping \\(\\mathbf{B}\\) created by the renderer to form the Voint cloud features \\(\\widehat{\\mathcal{X}}\\). Note that the points are not necessarily visible from all the views, and some Voints that are not visible from any of the \\(M\\) views will not receive any features. We post-process these empty points (\\(\\sim 0.5\\%\\) of points during inference) to be filled with nearest 3D neighbors features. The output Voint cloud features would be described as follows." + }, + { + "type": "equation", + "bbox": [ + 0.349, + 0.784, + 0.654, + 0.808 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} _ {i, j,:} \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X}, \\mathbf {V} _ {i, j} = 1 \\right\\} _ {j = 1} ^ {M}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.81, + 0.823, + 0.827 + ], + "angle": 0, + "content": "\\[\n\\mathbf {g} _ {:, j} = \\Phi_ {\\mathbf {B}} \\left(\\mathbf {C} \\left(\\mathbf {R} \\left(\\mathcal {X}, \\mathbf {u} _ {j}\\right)\\right), \\mathbf {B}\\right) \\tag {12}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.353, + 0.829, + 0.509, + 0.851 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\mathcal {X}} = \\left\\{\\widehat {\\mathbf {x}} _ {i} \\in \\mathbb {R} ^ {M \\times d} \\right\\} _ {i = 1} ^ {N}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.869, + 0.37, + 0.882 + ], + "angle": 0, + "content": "A.4 VOINT OPERATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "VointMax. In order to learn a neural network in the Voint space in the form dictated by Theorem 1, we need to define some basic differentiable operations on the Voint space. The" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.105, + 0.725, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.325, + 0.825, + 0.415 + ], + "angle": 0, + "content": "Figure 6: VointNet Variants. We propose three variants of VointNet that use three different examples of VointConv operation \\( h_v \\): shared MLP (MLP), Graph Convolution (GCN), and Graph Attention (GAT). Here we highlight the main difference between VointNet (MLP) that shares the MLP on all the view-features and VointNet (GCN) that creates a fully connected graph on the view-features and learn an MLP on the edge view-features. VointNet (GAT) is similar to VointNet (GCN) in addition to learning attention weights for each view-feature in weighted average aggregation." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.442, + 0.604, + 0.457 + ], + "angle": 0, + "content": "max operation on the Voint cloud can be defined as follows." + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.463, + 0.822, + 0.491 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {V o i n t M a x} (\\widehat {\\mathbf {x}}) = \\max \\widehat {\\mathbf {x}} _ {i, j}, \\forall i, j \\\\ \\left(1 3\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.49, + 0.658, + 0.506 + ], + "angle": 0, + "content": "\\[\n\\mathrm {s . t .} i \\in 1, 2, \\dots , N, j \\in 1, 2, \\dots , M, \\mathbf {V} _ {i, j} = 1\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.513, + 0.782, + 0.532 + ], + "angle": 0, + "content": "Equivalently, \\(\\mathrm{VointMax}(\\widehat{\\mathbf{x}}) = \\max_j\\left(\\widehat{\\mathbf{x}}_{:,j} - \\infty \\overline{\\mathbf{V}}_{:,j}\\right)\\), where \\(\\overline{\\mathbf{V}}\\) is the complement of \\(\\mathbf{V}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.54, + 0.825, + 0.641 + ], + "angle": 0, + "content": "VointConv. We define the convolution operation \\( h_{\\mathrm{V}}: \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'} \\) as any learnable function that operates on the Voint space with shared weights on all the Voints and has the view-features input size \\( d \\) and outputs view-features of size \\( d' \\) and consists of \\( l_{V} \\) layers. Examples of this VointConv operation include the following operations applied only on the visible view-features: a shared MLP, a graph convolution, and a graph attention. We detail these operations later in Section A.6, which result in different non-exhaustive variants of VointNet." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.658, + 0.473, + 0.672 + ], + "angle": 0, + "content": "A.5 LEARNING ON 3D VOINT CLOUDS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.825, + 0.726 + ], + "angle": 0, + "content": "VpointNet. Typical 3D point cloud classifiers with a feature max pooling layer work as in Eq (14), where \\( h_{\\mathrm{mlp}} \\) and \\( h_{\\mathrm{Pconv}} \\) are the MLP and point Convolutional \\( (1 \\times 1 \\) or edge) layers, respectively. This produces a K-class classifier \\( \\mathbf{F} \\)." + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.733, + 0.823, + 0.756 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} (\\mathcal {X}) = h _ {\\operatorname {m l p}} \\left(\\max _ {\\mathbf {x} _ {i} \\in \\mathcal {X}} \\left\\{h _ {\\text {P c o n v}} \\left(\\mathbf {x} _ {i}\\right) \\right\\}\\right) \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Here, \\(\\mathbf{F}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^K\\) produces the logits layer of the classifier with size \\(K\\). On the other hand, the goal of the VointNet model is to get multi-view point cloud features that can be used after which by any point cloud processing pipeline. The VointNet module \\(\\widehat{\\mathbf{F}}:\\mathbb{R}^{N\\times M\\times d}\\rightarrow \\mathbb{R}^{N\\times d}\\) as follows." + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.828, + 0.824, + 0.854 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\mathbf {F}} (\\widehat {\\mathcal {X}}) = h _ {\\mathrm {P}} \\left(\\operatorname {V o i n t M a x} \\left(h _ {\\mathrm {V}} (\\widehat {\\mathcal {X}})\\right)\\right), \\tag {15}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.868, + 0.382, + 0.882 + ], + "angle": 0, + "content": "A.6 VOINTNET VARIANTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.894, + 0.825, + 0.925 + ], + "angle": 0, + "content": "We define the convolution operation \\( h_{\\mathrm{V}} \\colon \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'} \\) in VointNet from Eq (15) as any learnable function that operates on the Voint space with shared weights on all the" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "Voints and has the view-features input size \\(d\\) and outputs view-features of size \\(d'\\) and consists of \\(l_V\\) layers. Examples of this VointConv operation include the following:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.139, + 0.826, + 0.168 + ], + "angle": 0, + "content": "Shared MLP. It is the most basic Voint neural network. For layer \\(l\\), the features of Voint i at view j is updated as follows to layer \\(l + 1\\)" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.173, + 0.825, + 0.2 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\mathbf {h} _ {i, j} ^ {l} \\mathcal {W} _ {\\rho}\\right), \\forall i, j \\tag {16}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.196, + 0.676, + 0.212 + ], + "angle": 0, + "content": "\\[\n\\mathrm {s . t .} i \\in {1, 2, \\dots , N}, j \\in {1, 2, \\dots , M}, \\mathbf {V} _ {i, j} = 1\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.216, + 0.827, + 0.272 + ], + "angle": 0, + "content": "where \\(\\rho\\) is the shared MLP with weights \\(\\mathcal{W}_{\\rho}\\) followed by normalization and nonlinear function (e.g. ReLU) applied on all Voints independently at the visible views features for each Voint. This formulation extends the shared MLP formulation for PointNet (Qi et al., 2017a) to make the MLP shared across the Voints and the views-features." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.278, + 0.825, + 0.391 + ], + "angle": 0, + "content": "Graph Convolution (GCN). Just like how DGCNN (Wang et al., 2019c) extended PointNet (Qi et al., 2017a) by taking the neighborhood information and extract edge features, we extend the basic VointNet formulation in Eq (15). We define a fully connected graph for each Voint along the views dimension by creating a center virtual node connected to all the view features (similar to the classification token in ViT (Dosovitskiy et al., 2021)). This center virtual view-feature would be assigned the index \\( j = 0 \\) and can be initialized with zeros as the \"cls\" token in ViT (Dosovitskiy et al., 2021). Then, Voint graph convolution operation can be defined as follows to update the activations from layer \\( l \\) to \\( l + 1 \\)" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.395, + 0.824, + 0.437 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\left(\\max _ {k} \\psi \\left(\\left(\\mathbf {h} _ {i, j} ^ {l}, \\mathbf {h} _ {i, k} ^ {l}\\right) \\mathcal {W} _ {\\psi}\\right)\\right) \\mathcal {W} _ {\\rho}\\right) \\forall i, j \\in \\{1, 2, \\dots , N - 1, 0, 1, M \\} \\tag {17}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.345, + 0.424, + 0.823, + 0.446 + ], + "angle": 0, + "content": "\\[\n\\forall i, j, k \\quad \\text {s . t .} \\quad i \\in 1, 2, \\dots , N, j \\in 0, 1, \\dots , M \\tag {17}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.411, + 0.448, + 0.652, + 0.465 + ], + "angle": 0, + "content": "\\[\nk \\in 0, 1, \\dots , M, k \\neq j, \\mathbf {V} _ {i, j} = 1\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.469, + 0.825, + 0.499 + ], + "angle": 0, + "content": "where \\(\\rho, \\psi\\) are two different shared MLPs as in Eq (16). The difference between VointNet (MLP) and VointNet (GCN) is highlighted in Figure 6." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.505, + 0.825, + 0.561 + ], + "angle": 0, + "content": "Graph Attention (GAT). Similar to how Point Transformer (Zhao et al., 2020) extended the graph convolution by adding attention to DGCNN (Wang et al., 2019c), we extend the basic Voint GraphConv formulation in Eq (17). Voint graph attention operation can be defined as follows to update the activations from layer \\(l\\) to \\(l + 1\\)" + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.566, + 0.824, + 0.618 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\left(\\sum_ {k = 0, k \\neq j} ^ {M} \\eta_ {k} \\psi \\left((\\mathbf {h} _ {i, j} ^ {l}, \\mathbf {h} _ {i, k} ^ {l}) \\mathcal {W} _ {\\psi}\\right)\\right) \\mathcal {W} _ {\\rho}\\right) \\tag {18}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.325, + 0.617, + 0.608, + 0.633 + ], + "angle": 0, + "content": "\\[\n\\forall i, j \\mathrm {s . t .} i \\in 1, 2, \\dots , N, j \\in 0, 1, \\dots , M\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.636, + 0.594, + 0.655 + ], + "angle": 0, + "content": "\\[\n\\eta_ {k} = \\zeta \\left(\\mathbf {h} _ {i, k} ^ {l} \\mathcal {W} _ {\\zeta}\\right), \\mathbf {V} _ {i, j} = 1\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.66, + 0.825, + 0.688 + ], + "angle": 0, + "content": "where \\(\\rho, \\psi, \\zeta\\) are three different shared MLPs as in Eq (16), and \\(\\eta_{k}\\) are the learned attention weights for each neighbor view-feature." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.707, + 0.51, + 0.723 + ], + "angle": 0, + "content": "B DETAILED EXPERIMENTAL SETUP" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.738, + 0.297, + 0.752 + ], + "angle": 0, + "content": "B.1 DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.763, + 0.827, + 0.876 + ], + "angle": 0, + "content": "ScanObjectNN: 3D Point Cloud Classification. We follow the literature (Goyal et al., 2021; Hamdi et al., 2021) on testing 3D classification in the challenging ScanObjectNN (Uy et al., 2019) point cloud dataset, since it includes background and considers occlusions. The dataset is composed of 2902 point clouds divided into 15 object categories. We use 2048 sampled points per object for Voint learning. We benchmark on its variants: Object only, Object with Background, and the Hardest perturbed variant (PB_T50_RS variant). Visualization is provided in Figure 7 of some of the renderings used in training the 2D backbone in our pipeline." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.827, + 0.926 + ], + "angle": 0, + "content": "ShapeNet Core55: 3D Shape Retrieval. The shape retrieval challenge SHREC (Sfikas et al., 2017) uses ShapeNet Core55 is a subset of ShapeNet (Chang et al., 2015) for benchmarking. The dataset consists of 51,162 3D mesh objects labeled with 55 object classes. The" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "training, validation, and test sets consist of 35764, 5133, and 10265 shapes. We create a dataset of point clouds by sampling 5000 points from each mesh object as in MVTN (Hamdi et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.153, + 0.827, + 0.226 + ], + "angle": 0, + "content": "ShapeNet Parts: 3D Part Segmentation. ShapeNet Parts is a subset of ShapeNet (Chang et al., 2015) that consists of 13,998 point cloud objects for train and 2,874 objects for the test from 16 categories and 50 parts. It is designed for the part segmentation task (Yi et al., 2016). Visualization is provided in Figure 10 of some of the renderings used in training the 2D backbone in our pipeline colored with the ground truth segmentation labels." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.23, + 0.828, + 0.302 + ], + "angle": 0, + "content": "ModelNet40: 3D Shape Classification Occlusion Robustness. ModelNet40 (Wu et al., 2015) is composed of 12,311 3D objects (9,843/2,468 in training/testing) labelled with 40 object classes. We sample 2048 points clouds from the objects following previous works (Qi et al., 2017b; Zhao et al., 2020). Visualization is provided in Figure 8 of some of the renderings used in training the 2D backbone in our pipeline." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.32, + 0.291, + 0.333 + ], + "angle": 0, + "content": "B.2 METRICS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.347, + 0.827, + 0.391 + ], + "angle": 0, + "content": "Classification Accuracy. The standard evaluation metric in 3D classification is accuracy. We report overall accuracy (percentage of correctly classified test samples) and average per-class accuracy (mean of all true class accuracies)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.396, + 0.827, + 0.47 + ], + "angle": 0, + "content": "Retrieval mAP. Shape retrieval is evaluated by mean Average Precision (mAP) over test queries. For every query shape \\(\\mathbf{S}_q\\) from the test set, AP is defined as \\(AP = \\frac{1}{\\mathrm{GTP}}\\sum_{n}^{N}\\frac{\\mathbb{1}(\\mathbf{S}_n)}{n}\\), where \\(GTP\\) is the number of ground truth positives, \\(N\\) is the size of the ordered training set, and \\(\\mathbb{1}(\\mathbf{S}_n) = 1\\) if the shape \\(\\mathbf{S}_n\\) is from the same class label of query \\(\\mathbf{S}_q\\). We average the retrieval AP over the test set to measure retrieval mAP." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.476, + 0.827, + 0.589 + ], + "angle": 0, + "content": "Segmentation mIoU. Semantic Segmentation is evaluated by mean Intersection over Union (mIoU) over pixels or points. For every class label, measure the size of the intersection mask between the ground truth points of that label and the predicted points as that label. Then, divide by the size of the union mask of the same label to get IoU. This procedure is repeated over all the labels, and averaging the IoUs gives mIoU. We report two types of mIoUs: Instance-averaged mIoU (averages all mIoUs across all objects) and Category-averaged mIoU (averages all mIoU from shapes of the same category, and then average those across object categories)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.607, + 0.303, + 0.62 + ], + "angle": 0, + "content": "B.3 BASELINES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.634, + 0.827, + 0.705 + ], + "angle": 0, + "content": "Point Cloud Networks. We include PointNet (Qi et al., 2017a), PointNet++ (Qi et al., 2017b), DGCNN (Wang et al., 2019c), PVNet (You et al., 2018), and KPConv (Thomas et al., 2019), Point Transformer (Zhao et al., 2020) and CurveNet (Xiang et al., 2021) as baselines that use point clouds. These methods leverage different convolution operators on point clouds by aggregating local and global point information." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.71, + 0.825, + 0.782 + ], + "angle": 0, + "content": "Multi-View Networks. We also compare against multi-view classification approaches like MVCNN (Su et al., 2015) and MVTN (Hamdi et al., 2021) as baselines for classification and retrieval. Since there is no available multi-view pipeline for 3D part segmentation, we adopt some of the multi-view segmentation baselines (e.g. Label Fusion (Wang et al., 2019a) and Mean Fusion (Kundu et al., 2020)) for part segmentation to work in the Voint space." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.8, + 0.421, + 0.813 + ], + "angle": 0, + "content": "B.4 IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Rendering and Un-Projection. We choose the differentiable point cloud renderer \\(\\mathbf{R}\\) from Pytorch3D (Ravi et al., 2020) in our pipeline for its speed and compatibility with Pytorch libraries (Paszke et al., 2017). We render multi-view images with size \\(224 \\times 224 \\times 3\\). We color the points by their normals' values or keep them white if the normals are not available. Following a similar procedure to (Wei et al., 2020; Hamdi et al., 2021), the view-point setup is randomized during training (using \\(M = 8\\) views) and fixed to spherical views in testing (using \\(M = 12\\) views)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.509, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.101, + 0.806, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.531 + ], + "angle": 0, + "content": "Figure 7: ScanObjectNN Variants. We show examples of point cloud renderings of different variants of the ScanObjectNN (Uy et al., 2019). These renderings are used in training VointNet for 3D point cloud classification." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.557, + 0.825, + 0.724 + ], + "angle": 0, + "content": "Architectures. For the 2D backbone, we use ViT (Dosovitskiy et al., 2021) (with pretrained weights from TIMM library (Wightman, 2019)) for classification and DeepLabV3 (Chen et al., 2018) for segmentation. We used parallel heads for each object category for part segmentation since the task is solely focused on parts. We use the 3D cross-entropy loss on the 3D point cloud output and the 2D cross-entropy loss when the loss is defined on the pixels. When used, the linear tradeoff coefficient of the 2D loss term is set to 0.003. To balance the frequency of objects in part segmentation, we multiply the loss by the frequency of the object class of each object we segment. The feature dimension of the VointNet architectures is \\( d = 64 \\), and the depth is \\( l_{V} = 4 \\) layers in \\( h_V \\). The main results are based on VointNet (MLP) variant unless otherwise specified. The coordinates \\( \\mathbf{x} \\) can be optionally appended to the input view-features \\( \\hat{\\mathbf{x}} \\), which can improve the performance but reduce the rotation robustness as we show later in Section C.1 and Table 9." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.827, + 0.815 + ], + "angle": 0, + "content": "Training Setup. We train our pipeline in two stages, where we start by training the 2D backbone on the 2D projected labels of the points, then train the full pipeline end-to-end while focusing the training on the VointNet part. We use the AdamW optimizer (Loshchilov & Hutter, 2017) with an initial learning rate of 0.0005 and a step learning rate schedule of \\(33.3\\%\\) every 12 epochs for 40 epochs. The pipeline is trained with one NVIDIA Tesla V100 GPU. We do not use any data augmentation." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.187, + 0.822, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.412, + 0.828, + 0.442 + ], + "angle": 0, + "content": "Figure 8: ModelNet40. We show some examples of point cloud renderings of ModelNet40 (Wu et al., 2015) used for 3D classification robustness in our setup." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.624, + 0.82, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.804, + 0.828, + 0.836 + ], + "angle": 0, + "content": "Figure 9: ShapeNet Core55. We show some examples of point cloud renderings of ShapeNet Core55 (Chang et al., 2015) used for 3D shape retrieval in our setup." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.309, + 0.806, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.657, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Figure 10: ShapeNet Parts. We show some examples of point cloud renderings of ShapeNet Parts (Yi et al., 2016) colored with ground truth segmentation labels. We use these renderings as 2D ground truth to pre-train the 2D backbone \\(\\mathbf{C}\\) for 2D segmentation before training VointNet's pipeline for 3D segmentation." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.212, + 0.101, + 0.782, + 0.311 + ], + "angle": 0, + "content": "
MethodData TypeClassification \nModelNet40Shape Retrieval \nShapeNetCore
PointNet (Qi et al., 2017a)Points89.2-
PointNet++ (Qi et al., 2017b)Points91.9-
DGCNN (Wang et al., 2019c)Points92.2-
KPConv(Thomas et al., 2019)Points92.9-
PCT(Guo et al., 2021)Points93.3-
CurveNet(Xiang et al., 2021)Points93.8-
ReVGG (Sfikas et al., 2017)M-View-74.9
MVCNN (Su et al., 2015)M-View90.173.5
ViewGCN (Wei et al., 2020)M-View93.378.4
MVTN (Hamdi et al., 2021)M-View93.882.9
VointNet (ours)Voints92.883.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.32, + 0.828, + 0.375 + ], + "angle": 0, + "content": "Table 7: 3D Shape Classification and Retrieval. We report VointNet's classification accuracy on ModelNet40 (Wu et al., 2015) and its 3D shape retrieval mAP on ShapeNet Core55 (Chang et al., 2015; Sfikas et al., 2017). Baseline results are reported from (Hamdi et al., 2021; Zhao et al., 2020; Xiang et al., 2021)." + }, + { + "type": "table", + "bbox": [ + 0.228, + 0.387, + 0.773, + 0.519 + ], + "angle": 0, + "content": "
MethodRotation Perturbations Range
±90°±180°
PointNet (Qi et al., 2017a)88.742.538.6
PointNet ++ (Qi et al., 2017b)88.247.939.7
RSCNN (Liu et al., 2019a)90.390.390.3
MVTN (Hamdi et al., 2021)91.790.891.2
VointNet (ours)91.590.991.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.529, + 0.825, + 0.569 + ], + "angle": 0, + "content": "Table 8: Rotation Robustness for 3D Classification. At test time, we randomly rotate objects in ModelNet40 (Wu et al., 2015) around the Y-axis (gravity) with different ranges and report the overall accuracy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.595, + 0.409, + 0.61 + ], + "angle": 0, + "content": "C ADDITIONAL RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.627, + 0.377, + 0.641 + ], + "angle": 0, + "content": "C.1 MODEL ROBUSTNESS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.653, + 0.825, + 0.724 + ], + "angle": 0, + "content": "Rotation Robustness for 3D Classification. We follow the standard practice in 3D shape classification literature by testing the robustness of trained models to perturbations at test time (Liu et al., 2019a; Hamdi et al., 2021). We perturb the shapes with random rotations around the Y-axis (gravity-axis) contained within \\(\\pm 90^{\\circ}\\) and \\(\\pm 180^{\\circ}\\) and report the test accuracy over ten runs in Table 8." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.73, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Rotation Robustness for 3D Segmentation. We follow the previous 3D literature by testing the robustness of trained models to perturbations at test time (Liu et al., 2019a; Hamdi et al., 2021; 2020). We perturb the shapes in ShapeNet Parts with random rotations in \\(SO(3)\\) at test time (ten runs) and report Ins. mIoU in Table 9. Note how our VointNet performance largely exceeds the baselines in this realistic unaligned scenario. We can augment the training with rotated objects for the baselines, which improves their robustness, but loses performance on the unrated setup. Adding xyz coordinates to the view-features of VointNet improves the performance on an unrotated setup but negatively affects the robustness to rotations. The discrepancy between the Voint results and the results of some point cloud methods is that Voints heavily depend on the underlying 2D backbone and inherit all its biases, especially those from pretraining. Hence, the 2D backbone limits what the performance can reach with VointNet. We study the effect of the backbone in detail in Section C.2. Figure 11 shows qualitative 3D segmentation results for VointNet and Mean Fuse (Kundu et al., 2020) as compared to the ground truth." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.178, + 0.179, + 0.809, + 0.778 + ], + "angle": 0, + "content": "
Ground TruthVointNet (ours)Mean Fuse (Kundu et al., 2020)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.788, + 0.825, + 0.842 + ], + "angle": 0, + "content": "Figure 11: Qualitative Comparison for 3D Part Segmentation. We compare our VointNet 3D segmentation prediction to Mean Fuse (Kundu et al., 2020) that is using the same trained 2D backbone. Note how VointNet distinguishes detailed parts (e.g. the car window frame). Beware that visualization colors can shift if an extra label is predicted (e.g. the motorbike labels are correct)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.228, + 0.101, + 0.773, + 0.306 + ], + "angle": 0, + "content": "
MethodSegmentation UnrotatedUnder Rotation Rotated
PointNet (Qi et al., 2017a)80.136.6 ±0.2
DGCNN (Wang et al., 2019c)80.137.1 ±0.2
PointNet + Aug.65.865.8 ±0.1
DGCNN + Aug.60.760.7 ±0.2
Mean Fuse (Kundu et al., 2020)79.161.6 ±0.1
Label Fuse (Wang et al., 2019a)78.961.0 ±0.1
VointNet (w/o xyz)79.665.4 ±0.1
VointNet (w/o xyz) + Aug.68.068.5 ±0.1
VointNet (w/ xyz)81.261.5 ±0.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.316, + 0.825, + 0.396 + ], + "angle": 0, + "content": "Table 9: Rotation Robustness for 3D Part Segmentation. At test time, we randomly rotate objects from ShapeNet Parts (Yi et al., 2016) and report the Ins. mIoUs of our VointNet compared to trained PointNet (Qi et al., 2017a) and DGCNN (Wang et al., 2019c). Note how VointNet's performance largely exceeds the baselines in realistic unaligned scenarios, highlighting the benefit of view dependency. If we use rotation augmentation in training for the baselines, the rotated performance improves, but the unrotated performance drops." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.437, + 0.373, + 0.451 + ], + "angle": 0, + "content": "C.2 DETAILED ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.47, + 0.825, + 0.555 + ], + "angle": 0, + "content": "Effect of Pretraining. We study the effect of pretraining the 2D backbone C for 3D classification on ModelNet40. Training a ViT with Mean Fuse for 3D classification on ModelNet40 obtains 92.2 test Acc. with ImageNet pretraining and 80.0 test Acc. from scratch. Other multi-view networks, e.g. MVCNN (Su et al., 2015), ViewGCN(Wei et al., 2020), and MVTN(Hamdi et al., 2021) all use ImageNet pretraining, which is not unique to Voints." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.561, + 0.825, + 0.661 + ], + "angle": 0, + "content": "Classification Backbone. We study the effect of ablating the 2D backbone C for 3D classification on ModelNet40. We show in Table 10 the performance of VointNet (MLP) when Vit-B (Dosovitskiy et al., 2021) and ResNet-18 (He et al., 2015) are used. We also show that following the per-point classification setup instead of the per-shape for 3D shape classification leads to worse performance for VointNet and the naive multi-view. This is why we used the per-shape approach when adopting VointNet for 3D classification (using one Voint for the entire shape)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.666, + 0.825, + 0.709 + ], + "angle": 0, + "content": "Number of points and visibility. Table 11 studies the effect of point number on 3D part segmentation performance, when different numbers of views are used. The visibility ratio is also reported in each case." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.714, + 0.825, + 0.787 + ], + "angle": 0, + "content": "Points color. We colored the points with ground truth normals as in Figure 16, when they are available (ShapeNet Parts), and we used white colors as in Figure 9, when other baselines do not use normals. We ablate the color of the points on VointNet (MLP) with normals colors, white color, and NOCs colors (Wang et al., 2019b). We obtain the following segmentation mIoU results: (normals: 80.6), (white: 74.7), and (NOCs: 57.9)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.791, + 0.825, + 0.864 + ], + "angle": 0, + "content": "Time and Memory Requirements. To assess the contribution of the Voint module, we take a macroscopic look at the time and memory requirements of each component in the pipeline. We record the number of floating-point operations (GFLOPs) and the time of a forward pass for a single input sample. In Table 12, the VointNet module contributes negligibly to the memory requirements compared to multi-view and point networks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Feature Size \\((d)\\). We study the effect of the feature size \\(d\\) on the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 12. We note that the performance peaks at \\(d = 128\\), but it is close to what we use in the main results \\((d = 64)\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.276, + 0.102, + 0.723, + 0.174 + ], + "angle": 0, + "content": "
View Aggregation2D Backbone
ResNet18 (per-shape)ViT-B (per-shape)DeepLabV3 (per-point)
VointNet91.292.810.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.185, + 0.827, + 0.249 + ], + "angle": 0, + "content": "Table 10: Ablation Study for 3D Classification. We study the effect of different 2D backbone for ModelNet40 3D classification task. We compare VointNet's performance to naive multi-view (e.g. MVCNN (Su et al., 2015) or Mean Fuse (Kundu et al., 2020)) using the same 2D backbone. Note that using the per-point classification setup instead of the per-shape for 3D shape classification leads to worse performance for VointNet and the naive multi-view." + }, + { + "type": "table", + "bbox": [ + 0.3, + 0.263, + 0.695, + 0.412 + ], + "angle": 0, + "content": "
Points #MetricNumber of Views
24812
500visibility99.199.9100100
mIoU69.273.976.076.4
1000visibility98.099.7100100
mIoU69.574.376.577.1
2000visibility95.799.299.899.9
mIoU69.775.077.778.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.419, + 0.825, + 0.459 + ], + "angle": 0, + "content": "Table 11: Analysis on Number of Points and Visibility. We show the Instance mIoUs and visibility ratio \\((1 - \\frac{\\text{empty}}{\\text{total}})\\%\\) of our VointNet on ShapeNet Parts when varying points # and number of views." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.485, + 0.825, + 0.557 + ], + "angle": 0, + "content": "Model Depth \\((l_v)\\). We study the effect of the model depth \\(l_v\\) on the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 13. We note that model depth of VointNet does not enhance the performance significantly. Our choice of \\(l_v = 4\\) balances the performance and the memory/computations requirements of VointNet (MLP)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.562, + 0.825, + 0.648 + ], + "angle": 0, + "content": "Distance to the Object. We study the effect of distance to the object in rendering as in Figure 17 to the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 14. We note that our default choice of 1.0 is actually reasonable. This choice of distance shows the object entirely (as illustrated in Figure 17), but also cover the details needed for small parts segmentation (see Figure 11)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.652, + 0.825, + 0.697 + ], + "angle": 0, + "content": "Image Size \\((H,W)\\). We study the effect of the image size \\(H\\& W\\) on the performance of Mean Fuse (Kundu et al., 2020) baseline when training the 2D backbone for 3D part segmentation. We plot the results (with confidence intervals) in Figure 15." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.701, + 0.825, + 0.744 + ], + "angle": 0, + "content": "Number of Views on Classification. We study the effect of the number of views (M) on classification accuracy on ModelNet40 Wu et al. (2015) of VointNet and report results in Table 13." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.751, + 0.825, + 0.78 + ], + "angle": 0, + "content": "Unprojection Operation Speed. We evaluate the speed of the unprojection operation \\(\\Phi_{\\mathbf{B}}\\) and report average latency of 10,000 runs (in ms) in Table 14." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.786, + 0.825, + 0.83 + ], + "angle": 0, + "content": "Unprojection Operation Speed. We evaluate the speed of the point cloud renderer \\(\\mathbb{R}\\) used in Voint pipeline from Pytroch3D Ravi et al. (2020) and report average latency of 1,000 renderings (in ms/image) in Table 15." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.845, + 0.341, + 0.859 + ], + "angle": 0, + "content": "C.3 VISUALIZATIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.87, + 0.825, + 0.914 + ], + "angle": 0, + "content": "In Figure 16 and 17, we visualize the multi-view renderings of the point clouds along with the 2D learned features based on the DeepLabV3 (Chen et al., 2018) backbone. These features are then unprojected and transformed by VointNet to obtain 3D semantic labels." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.507, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.227, + 0.168, + 0.773, + 0.405 + ], + "angle": 0, + "content": "
NetworkGFLOPsTime (ms)Parameters # (M)
MVCNN (Su et al., 2015)43.7239.8911.20
ViewGCN (Wei et al., 2020)44.1926.0623.56
ResNet 18 (He et al., 2015)3.643.7011.20
ResNet 50 (He et al., 2015)8.249.4223.59
ViT-B (Dosovitskiy et al., 2021)33.7012.4686.57
ViT-L (Dosovitskiy et al., 2021)119.3029.28304.33
FCN (Long et al., 2015)53.1310.3432.97
DeeplabV3 (Chen et al., 2018)92.6120.6258.64
PointNet (Qi et al., 2017a)1.784.243.50
DGCNN (Wang et al., 2019c)10.420.9516.350
MVTN (Hamdi et al., 2021)1.784.243.5
VointNet (MLP)1.902.900.04
VointNet (GCN)16.1832.100.05
VointNet (GAT)32.0568.710.07
Full Voint pipeline94.5123.5058.68
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.415, + 0.825, + 0.456 + ], + "angle": 0, + "content": "Table 12: Time and Memory Requirements. We assess the contribution of the Voint module to the time and memory requirements in the multi-view and point cloud pipeline. Note that VointNet (shared MLP) is almost 100 times smaller than PointNet (Qi et al., 2017a)." + }, + { + "type": "image", + "bbox": [ + 0.3, + 0.62, + 0.661, + 0.802 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.81, + 0.825, + 0.855 + ], + "angle": 0, + "content": "Figure 12: The Effect of Feature Size \\( d \\). We plot Ins. mIoU of 3D segmentation vs. the feature size \\( d \\) used in training on ShapeNet Parts (Yi et al., 2016). We note that the performance peaks at \\( d = 128 \\), but it is close to what we use in the main results (\\( d = 64 \\))." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.125, + 0.661, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.315, + 0.828, + 0.375 + ], + "angle": 0, + "content": "Figure 13: The Effect of Model Depth \\( l_{v} \\). We plot Ins. mIoU of 3D segmentation vs. the model depth \\( l_{v} \\) used in training on ShapeNet Parts (Yi et al., 2016). We note that model depth of VointNet does not enhance the performance significantly. Our choice of \\( l_{v} = 4 \\) balances the performance and the memory/computations requirements of VointNet (MLP)." + }, + { + "type": "image", + "bbox": [ + 0.3, + 0.406, + 0.661, + 0.589 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.596, + 0.825, + 0.669 + ], + "angle": 0, + "content": "Figure 14: The Effect of Distance to the Object. We plot Ins. mIoU of 3D segmentation vs. the distance to the object used in inference on ShapeNet Parts (Yi et al., 2016). We note that our default choice of 1.0 is actually reasonable. This choice of distance shows the object entirely (as illustrated in Figure 17), but also cover the details needed for small parts segmentation (see Figure 11)." + }, + { + "type": "image", + "bbox": [ + 0.3, + 0.701, + 0.661, + 0.883 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.89, + 0.825, + 0.92 + ], + "angle": 0, + "content": "Figure 15: The Effect of Image Size \\( H, W \\). We plot Ins. mIoU of 3D segmentation vs. the image size used in inference on ShapeNet Parts (Yi et al., 2016)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.269, + 0.81, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.688, + 0.825, + 0.754 + ], + "angle": 0, + "content": "Figure 16: Multi-view Projected Segmentation 1. We show how, after rendering points, we can segment in the image space. For each example, we show (INPUT): the projections of the points (colored with normals) used in training with random view-points. (PRED 2D): the segmentation prediction of the 2D backbone (DeepLabV3) (Chen et al., 2018). (PRED 3D): the unprojected 3D segmentation prediction. \\((GT)\\): the 3D segmentation ground truth." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.171, + 0.269, + 0.814, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.688, + 0.825, + 0.754 + ], + "angle": 0, + "content": "Figure 17: Multi-view Projected Segmentation 2. We show how, after rendering points, we can segment in the image space. For each example, we show (INPUT): the projections of the points (colored with normals) used in training with random view-points. (PRED 2D): the segmentation prediction of the 2D backbone (DeepLabV3) (Chen et al., 2018). (PRED 3D): the unprojected 3D segmentation prediction. \\((GT)\\): the 3D segmentation ground truth." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.509, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.293, + 0.179, + 0.709, + 0.245 + ], + "angle": 0, + "content": "
MethodNumber of Views
46810
VointNet (Cls. Acc.)90.390.892.092.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.254, + 0.825, + 0.281 + ], + "angle": 0, + "content": "Table 13: Effect of the Number of Views on Classification. We report the classification accuracy of VointNet vs. the number of views (M) used in the training on ModelNet40." + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.445, + 0.791, + 0.526 + ], + "angle": 0, + "content": "
MethodNumber of Views
124681012
Features Unprojection3.05.311.4515.717.229.724.0
Labels Unprojection2.62.53.43.13.03.23.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.536, + 0.825, + 0.577 + ], + "angle": 0, + "content": "Table 14: Unprojection Operation Speed. We report the average latency (in ms) over 10,000 runs of the unprojection operation with its two forms: features unprojection (used in mean) and labels unprojection (used in mode)." + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.739, + 0.791, + 0.807 + ], + "angle": 0, + "content": "
CriteriaNumber of Points
1e21e31e41e51e6
Point Rendering Speed (ms/image)7.27.67.710.437.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.816, + 0.825, + 0.844 + ], + "angle": 0, + "content": "Table 15: Point Rendering Speed. We report the average rendering speed (in ms/image) over 1,000 renderings of the point cloud renderer Ravi et al. (2020) used in Voint clouds." + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "31" + } + ] +] \ No newline at end of file diff --git a/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_origin.pdf b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cfa374cb5ef8da946150f4d9b50e41416df6789a --- /dev/null +++ b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/70b114f7-aea9-450a-a38b-661ed1d2e4cb_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5c63e52812e7af5aa05bac7be47de3de07ece92994a41a64092cfde6a35ee7f +size 32955658 diff --git a/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/full.md b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e4749f50107bb1de89869a59aa11538ec3c9c255 --- /dev/null +++ b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/full.md @@ -0,0 +1,582 @@ +# VOINT CLOUD: MULTI-VIEW POINT CLOUD REPRESENTATION FOR 3D UNDERSTANDING + +Abdullah Hamdi + +Silvio Giancola + +Bernard Ghanem + +King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia {abdullah.hamdi, silvio.giancola, bernard.ghanem}@kaust.edu.sa + +# ABSTRACT + +Multi-view projection methods have demonstrated promising performance on 3D understanding tasks like 3D classification and segmentation. However, it remains unclear how to combine such multi-view methods with the widely available 3D point clouds. Previous methods use unlearned heuristics to combine features at the point level. To this end, we introduce the concept of the multi-view point cloud (Voint cloud), representing each 3D point as a set of features extracted from several view-points. This novel 3D Voint cloud representation combines the compactness of 3D point cloud representation with the natural view-awareness of multi-view representation. Naturally, we can equip this new representation with convolutional and pooling operations. We deploy a Voint neural network (VointNet) to learn representations in the Voint space. Our novel representation achieves state-of-the-art performance on 3D classification, shape retrieval, and robust 3D part segmentation on standard benchmarks (ScanObjectNN, ShapeNet Core55, and ShapeNet Parts).1 + +# 1 INTRODUCTION + +A fundamental question in 3D computer vision and computer graphics is how to represent 3D data (Mescheder et al., 2019; Qi et al., 2017a; Maturana & Scherer, 2015). This question becomes particularly vital given how the success of deep learning in 2D computer vision has pushed for the wide adoption of deep learning in 3D vision and graphics. In fact, deep networks already achieve impressive results in 3D classification (Hamdi et al., 2021), 3D segmentation (Hu et al., 2021), 3D detection (Liu et al., 2021a), 3D reconstruction (Mescheder et al., 2019), and novel view synthesis (Mildenhall et al., 2020). 3D computer vision networks either rely on direct 3D representations, indirect 2D projection on images, or a mixture of both. Direct approaches operate on 3D data commonly represented with point clouds (Qi et al., 2017a), meshes (Feng et al., 2019), or voxels (Choy et al., 2019). In contrast, indirect approaches commonly render multiple 2D views of objects or scenes (Su et al., 2015), and process each image with a traditional 2D image-based architecture. The human visual system is closer to such a multi-view indirect approach for 3D understanding, as it receives streams of rendered images rather than explicit 3D data. + +Tackling 3D vision tasks with indirect approaches has three main advantages: (i) mature and transferable 2D computer vision models (CNNs, Transformers, etc.), (ii) large and diverse labeled image datasets for pre-training (e.g. ImageNet (Russakovsky et al., 2014)), and (iii) the multi-view images give context-rich features based on the viewing angle, which are different from the geometric 3D neighborhood features. Multi-view approaches achieve impressive performance in 3D shape classification and segmentation (Wei et al., 2020; Hamdi et al., 2021; Dai & Nießner, 2018). However, the challenge with the multi-view representation (especially for dense predictions) lies in properly aggregating the per-view features with 3D point clouds. The appropriate aggregation is necessary to obtain representative 3D point + +![](images/0f87af169b3654c00a374430901e2066ece51e5796b3b55094f704f8b831430b.jpg) +Figure 1: 3D Voint Clouds. We propose the multi-view point cloud (Voint cloud), a novel 3D representation that is compact and naturally descriptive of view projections of a 3D point cloud. Each point in the 3D cloud is tagged with a Voint, which accumulates view-features for that point. Note that not all 3D points are visible from all views. The set of Voints constructs a Voint cloud. + +clouds with a single feature per point suitable for typical point cloud processing pipelines. Previous multi-view works rely on heuristics (e.g. average or label mode pooling) after mapping pixels to points (Kundu et al., 2020; Wang et al., 2019a), or multi-view fusion with voxels (Dai & Nießner, 2018). Such setups might not be optimal for a few reasons. (i) Such heuristics may aggregate information of misleading projections that are obtained from arbitrary view-points. For example, looking at an object from the bottom and processing that view independently can carry wrong information about the object's content when combined with other views. (ii) The views lack geometric 3D information. + +To this end, we propose a new hybrid 3D data structure that inherits the merits of point clouds (i.e. compactness, flexibility, and 3D descriptiveness) and leverages the benefits of rich perceptual features of multi-view projections. We call this new representation multi-view point cloud (or Voint cloud) and illustrate it in Figure 1. A Voint cloud is a set of Voints, where each Voint is a set of view-dependent features (view-features) that correspond to the same point in the 3D point cloud. The cardinality of these view-features may differ from one Voint to another. In Table 1, we compare some of the widely used 3D representations and our Voint cloud representation. Voint clouds inherit the characteristics of the parent explicit 3D point clouds, which facilitates learning Voint representations for a variety of vision applications (e.g. point cloud classification and segmentation). To deploy deep learning on the new Voint space, we define basic operations on Voints, such as pooling and convolution. Based on these operations, we define a practical way of building Voint neural networks that we dub VointNet. VointNet takes a Voint cloud and outputs point cloud features for 3D point cloud processing. We show how learning this Voint cloud representation leads to strong performance and gained robustness for the tasks of 3D classification, 3D object retrieval, and 3D part segmentation on standard benchmarks like ScanObjectNN (Uy et al., 2019), and ShapeNet (Chang et al., 2015). + +Contributions: (i) We propose a novel multi-view 3D point cloud representation (denoted as Voint cloud), which represents each point (namely a Voint) as a set of features from different view-points. (ii) We define pooling and convolutional operations at the Voint level to construct a Voint Neural Network (VointNet) capable of learning to aggregate information from multiple views in the Voint space. (iii) Our VointNet reaches state-of-the-art performance on several 3D understanding tasks, including 3D shape classification, retrieval, and robust part segmentation. Further, VointNet achieves robustness improvement to occlusion and rotation. + +
3D RepresentationExplicitnessView-BasedMain Use3D Expressiveness
Point CloudsExplicitX3D UnderstandingMedium
Multi-View ProjectionsImplicit3D UnderstandingLow
VoxelsExplicitX3D UnderstandingMedium
MeshExplicitX3D ModelingHigh
NeRFsImplicitNovel View SynthesisMedium
Voint Clouds (ours)Explicit3D UnderstandingMedium
+ +Table 1: Comparison of Different 3D Representations. We compare some of the widely used 3D representations to our proposed Voint cloud. Note that our Voint cloud shares the view-dependency of NeRFs (Mildenhall et al., 2020) while inheriting the merits of 3D point clouds. + +# 2 RELATED WORK + +Learning on 3D Point Clouds. 3D point clouds are widely used for 3D representation in computer vision due to their compactness, flexibility, and because they can be obtained naturally from sensors like LiDAR and RGBD cameras. PointNet (Qi et al., 2017a) paved the way as the first deep learning algorithm to operate directly on 3D point clouds. It computes point features independently and aggregates them using an order-invariant function like max-pooling. Subsequent works focused on finding neighborhoods of points to define point convolutional operations (Qi et al., 2017b; Wang et al., 2019c; Li et al., 2018; Han et al., 2019). Several recent works combine point cloud representations with other 3D modalities like voxels (Liu et al., 2019b; You et al., 2018) or multi-view images (Jaritz et al., 2019). We propose a novel Voint cloud representation for 3D shapes and investigates novel architectures that aggregate view-dependent features at the 3D point level. + +Multi-View Applications. The idea of using 2D images to understand the 3D world was initially proposed in 1994 by Bradski et. al. (Bradski & Grossberg, 1994). This intuitive multi-view approach was combined with deep learning for 3D understanding in MVCNN (Su et al., 2015). A line of works continued developing multi-view approaches for classification and retrieval by improving the aggregation of the view-features from each image view (Kanezaki et al., 2018; Esteves et al., 2019; Cohen & Welling, 2016; Wei et al., 2020; Hamdi et al., 2021). In this work, we fuse the concept of multi-view into the 3D structure itself, such that every 3D point would have an independent set of view-features according to the view-points available in the setup. Our Voints are aligned with the sampled 3D point cloud, offering a compact representation that allows for efficient computation and memory usage while maintaining the view-dependent component that facilitates view-based learning for vision. + +Hybrid Multi-View with 3D Data. On the task of 3D semantic segmentation, a smaller number of works tried to follow the multi-view approach (Dai & Nießner, 2018; Kundu et al., 2020; Wang et al., 2019a; Kalogerakis et al., 2017; Jaritz et al., 2019; Liu et al., 2021b; Lyu et al., 2020). A problem arises when combining view features to represent local points/voxels while preserving local geometric features. These methods tend to average the view-features (Kundu et al., 2020; Kalogerakis et al., 2017), propagate the labels only (Wang et al., 2019a), learn from reconstructed points in the neighborhood (Jaritz et al., 2019), order points on a single grid (Lyu et al., 2020), or combine the multi-view features with 3D voxel features (Dai & Nießner, 2018; Hou et al., 2019). To this end, our proposed VointNet operates on the Voint cloud space while preserving the compactness and 3D descriptiveness of the original point cloud. VointNet leverages the power of multi-view features with learned aggregation on the view-features applied to each point independently. + +# 3 METHODOLOGY + +The primary assumption in our work is that surface 3D points are spherical functions, i.e. their representations depend on the viewing angles observing them. This condition contrasts with most 3D point cloud processing pipelines that assume a view-independent representation of 3D point clouds. The full pipeline is illustrated in Figure 2. + +![](images/f18c527b4598c89667a2cfd0519a8caecd049ba9b97eafab366dccb7b44f9b72.jpg) +Figure 2: Learning from Voint Clouds. To construct a 3D Voint cloud $\widehat{\mathcal{X}}$ , a renderer $\mathbf{R}$ renders the point cloud $\mathcal{X}$ from view-points $\mathcal{U}$ and image features are extracted from the generated images via a 2D backbone $\mathbf{C}$ . The image features are then unprojected to the Voint cloud by $\Phi_{\mathbf{B}}$ and passed to VointNet $\widehat{\mathbf{F}}$ . To learn both $\mathbf{C}$ and $\widehat{\mathbf{F}}$ , a 3D loss on the output points is used with an optional auxiliary 2D loss on $\mathbf{C}$ . + +# 3.1 3D VOINT CLOUD + +From Point Clouds to Voint Clouds. A 3D point cloud is a compact 3D representation composed of sampled points on the surface of a 3D object or a scene and can be obtained by different sensors like LiDAR (Chen et al., 2017) or as a result of reconstruction (Okutomi & Kanade, 1993). Formally, we define the coordinate function for the surface $g_{\mathrm{s}}(\mathbf{x}) : \mathbb{R}^3 \to \mathbb{R}$ as the Sign Distance Function (SDF) in the continuous Euclidean space (Park et al., 2019; Mescheder et al., 2019). The 3D iso-surface is then defined as the set of all points $\mathbf{x}$ that satisfy the condition $g_{\mathrm{s}}(\mathbf{x}) = 0$ . We define a surface 3D point cloud $\mathcal{X} \in \mathbb{R}^{N \times 3}$ as a set of $N$ 3D points, where each point $\mathbf{x}_i \in \mathbb{R}^3$ is represented by its 3D coordinates $(x_i, y_i, z_i)$ and satisfies the iso-surface condition as follows: $\mathcal{X} = \{\mathbf{x}_i \in \mathbb{R}^3 \mid g_{\mathrm{s}}(\mathbf{x}_i) = 0\}_{i=1}^N$ . In this work, we aim to fuse the view-dependency to 3D point. Inspired by NeRFs (Mildenhall et al., 2020), we assume that surface points also depend on the view direction from which they are being observed. Specifically, there exists a continuous implicit spherical function $\mathbf{g}(\mathbf{x}, \mathbf{u}) : \mathbb{R}^5 \to \mathbb{R}^d$ that defines the features of each point $\mathbf{x}$ depending on the view-point direction $\mathbf{u}$ . Given a set of $M$ view-point directions $\mathcal{U} \in \mathbb{R}^{M \times 2}$ , a Voint $\widehat{\mathbf{x}} \in \mathbb{R}^{M \times d}$ is a set of $M$ view-dependent features of size $d$ for the sphere centered at point $\mathbf{x}$ as follows. + +$$ +\widehat {\mathbf {x}} _ {i} = \left\{\mathbf {g} \left(\mathbf {x} _ {i}, \mathbf {u} _ {j}\right) \in \mathbb {R} ^ {d} \mid \mathbf {x} _ {i} \in \mathcal {X} \right\} _ {j = 1} ^ {M} \tag {1} +$$ + +The Voint cloud $\widehat{\mathcal{X}}\in \mathbb{R}^{N\times M\times d} = \{\widehat{\mathbf{x}}_i\}_{i = 1}^N$ is the set of all $N$ Voints $\widehat{\mathbf{x}}_i$ corresponding to the parent point cloud $\mathcal{X}$ . Note that we typically do not have access to the underlying implicit function $\mathbf{g}$ and we approximate it with the following three steps. + +1-Multi-View Projection. As mentioned earlier, a Voint combines multiple view-features of the same 3D point. These view-features come from a multi-view projection of the points by a point cloud renderer $\mathbf{R}:\mathbb{R}^{N\times 3}\to \mathbb{R}^{M\times H\times W\times 3}$ that renders the point cloud $\mathcal{X}$ from multiple view-points $\mathcal{U}$ into $M$ images of size $H\times W\times 3$ . In addition to projecting the point cloud into the image space, $\mathbf{R}$ defines the index mapping $\mathbf{B}\in \{0,\dots,N\}^{M\times H\times W}$ between each pixel to the N points and background it renders. Also, $\mathbf{R}$ outputs the visibility binary matrix $\mathbf{V}\in \{0,1\}^{N\times M}$ for each point from each view. Since not all points appear in all the views due to pixel discretization, the visibility score $\mathbf{V}_{i,j}$ defines if the Voint $\hat{\mathbf{x}}_i$ is visible in the view $\mathbf{u}_j$ . The matrix $\mathbf{B}$ is crucial for unprojection, while $\mathbf{V}$ is needed for defining meaningful operations on Voints. +2-Multi-View Feature Extraction. The rendered images are processed by a function $\mathbf{C}:\mathbb{R}^{M\times H\times W\times 3}\to \mathbb{R}^{M\times H\times W\times d}$ that extracts image features, as shown in Figure 2. If $\mathbf{C}$ is the identity function, all the view-features would typically the RGB value of the corresponding point. However, the $\mathbf{C}$ function can be a 2D network dedicated to the downstream task and can extract useful global and local features about each view. +3-Multi-View Unprojection. We propose a module $\Phi_{\mathbf{B}}:\mathbb{R}^{M\times H\times W\times d}\to \mathbb{R}^{N\times M\times d}$ that unprojects the 2D features from each pixel to be 3D view-features at the corresponding voint. Using the mapping $\mathbf{B}$ created by the renderer, $\Phi_{\mathbf{B}}$ forms the Voint cloud features $\widehat{\mathcal{X}}$ + +To summarize, the output Voint cloud is described by Eq (1), where $\mathbf{g}(\mathbf{x}_i,\mathbf{u}_j) = \Phi_{\mathbf{B}}\big(\mathbf{C}(\mathbf{R}(\mathcal{X},\mathbf{u}_j))\big)_i$ and the features are only defined for a view $j$ of Voint $\hat{\mathbf{x}}_i$ if $\mathbf{V}_{i,j} = 1$ . + +# 3.2 OPERATIONS ON 3D VOINT CLOUDS + +We show in the Appendix that a functional form of max-pooled individual view-features of a set of angles can approximate any function in the spherical coordinates. We provide a theorem that extends PointNet's theorem of point cloud functional composition (Qi et al., 2017a) and its Universal Approximation to spherical functions underlying Voints. Next, we define a set of operations on Voints as building blocks for Voint neural networks (VointNet). + +VointMax. We define VointMax as max-pooling on the visible view-features along the views dimension of the voint $\hat{\mathbf{x}}$ . For all $i \in 1,2,\dots,N$ and $j \in 1,2,\dots,M$ , + +$$ +\operatorname {V o i n t M a x} \left(\widehat {\mathbf {x}} _ {i}\right) = \max _ {j} \widehat {\mathbf {x}} _ {i, j}, \quad \text {s . t .} \quad \mathbf {V} _ {i, j} = 1 \tag {2} +$$ + +VointConv. We define the convolution operation $h_{\mathrm{V}}: \mathbb{R}^{N \times M \times d} \to \mathbb{R}^{N \times M \times d'}$ as any learnable function that operates on the Voint space with shared weights on all the Voints and has the view-features input size $d$ and outputs view-features of size $d'$ and consists of $l_{V}$ layers. A simple example of this VointConv operation is the shared MLP applied only on the visible view-features. We provide further details for such operations in Section 4.2, which result in different non-exhaustive variants of VointNet. + +# 3.3 LEARNING ON 3D VOINT CLOUDS + +VointNet. The goal of the VointNet model is to obtain multi-view point cloud features that can be subsequently used by any point cloud processing pipeline. The VointNet module $\widehat{\mathbf{F}}:\mathbb{R}^{N\times M\times d}\to \mathbb{R}^{N\times d}$ is defined as follows. + +$$ +\widehat {\mathbf {F}} (\widehat {\boldsymbol {\chi}}) = h _ {\mathrm {P}} \left(\operatorname {V o i n t M a x} \left(h _ {\mathrm {V}} (\widehat {\boldsymbol {\chi}})\right)\right), \tag {3} +$$ + +where $h_{\mathrm{P}}$ is any point convolutional operation (e.g. shared MLP or EdgeConv). VointNet $\widehat{\mathbf{F}}$ transforms the individual view-features using the learned VointConv $h_{\mathrm{V}}$ before VointMax is applied on the view-features to obtain point features. + +VointNet Pipeline for 3D Point Cloud Processing. The full pipeline is described in Figure 2. The loss for this pipeline can be described as follows: + +$$ +\underset {\boldsymbol {\theta} _ {\mathbf {C}}, \boldsymbol {\theta} _ {\widehat {\mathbf {F}}}} {\arg \min } \sum_ {i} ^ {N} L \left(\widehat {\mathbf {F}} \left(\Phi_ {\mathbf {B}} \left(\mathbf {C} \left(\mathbf {R} (\mathcal {X}, \mathcal {U})\right)\right)\right) _ {i}, \mathbf {y} _ {i}\right), \tag {4} +$$ + +where $L$ is a Cross-Entropy (CE) loss defined on all the training points $\mathcal{X}$ , and $\{y_i\}_{i=1}^N$ defines the labels of these points. The other components $(\mathbf{R}, \Phi_{\mathbf{B}}, \mathcal{U}, \mathbf{C})$ are all defined before. The weights to be jointly learned are those of the 2D backbone $(\theta_{\mathbf{C}})$ and those of the VointNet $(\theta_{\widehat{\mathbf{F}}})$ using the same 3D loss. An auxiliary 2D loss on $\theta_{\mathbf{C}}$ can be optionally added for supervision at the image level. For classification, the entire object can be treated as a single Voint, and the global features of each view would be the view-features of that Voint. We analyze different setups in detail in Section 6. + +# 4 EXPERIMENTS + +# 4.1 EXPERIMENTAL SETUP + +Datasets. We benchmark VointNet on the challenging and realistic ScanObjectNN dataset for 3D point cloud classification (Uy et al., 2019). The dataset has three variants, includes background and occlusion, and has 15 categories and 2,902 point clouds. For the shape retrieval task, we benchmark on ShapeNet Core55 as a subset of ShapeNet (Chang et al., 2015). The dataset consists of 51,162 3D mesh objects labeled with 55 object classes. We follow the MVTN's setup (Hamdi et al., 2021) in sampling 5,000 points from each mesh object to obtain point cloud. On the other hand, for the task of shape part segmentation, + +
MethodData TypeClassificationAccuracy
OBJ_BGOBJ_ONLYHardest
PointNet (Qi et al., 2017a)Points73.379.268.0
SpiderCNN (Xu et al., 2018)Points77.179.573.7
PointNet ++ (Qi et al., 2017b)Points82.384.377.9
PointCNN (Li et al., 2018)Points86.185.578.5
DGCNN (Wang et al., 2019c)Points82.886.278.1
SimpleView (Goyal et al., 2021)M-View--79.5
BGA-DGCNN (Uy et al., 2019)Points--79.7
BGA-PN++ (Uy et al., 2019)Points--80.2
MVTN (Hamdi et al., 2021)M-View92.692.382.8
VointNet (ours)Voints93.794.085.4
+ +Table 2: 3D Point Cloud Classification on ScanObjectNN. We report the accuracy of VointNet in 3D point cloud classification on three different variants of ScanObjectNN (Uy et al., 2019). Bold denotes the best result in its setup. Note that the Hardest variant includes rotated and translated objects, which highlights the benefits of Voints on challenging scenarios. + +we test on ShapeNet Parts (Yi et al., 2016), a subset of ShapeNet (Chang et al., 2015) that consists of 16,872 point cloud objects from 16 categories and 50 parts. For occlusion robustness, we follow MVTN (Hamdi et al., 2021) and test on ModelNet40 (Wu et al., 2015), which is composed of 40 classes and 12,311 3D objects. + +Metrics. For 3D point cloud classification, we report the overall accuracy, while shape retrieval is evaluated using mean Average Precision (mAP) over test queries (Hamdi et al., 2021). 3D semantic segmentation is evaluated using mean Intersection over Union (mIoU) on points. For part segmentation, we report Instance-averaged mIoU (Ins. mIoU). + +Baselines. We include PointNet (Qi et al., 2017a), PointNet++ (Qi et al., 2017b), DGCNN (Wang et al., 2019c), as baselines that use point clouds. We also compare against multi-view classification approaches like MVCNN (Su et al., 2015), SimpleView (Goyal et al., 2021), and MVTN (Hamdi et al., 2021) as baselines for classification and retrieval and adopt some of the multi-view segmentation baselines (e.g. Label Fusion (Wang et al., 2019a) and Mean Fusion (Kundu et al., 2020)) for part segmentation. + +# 4.2 VOINTNET VARIANTS + +VointNet in Eq (3) relies on the VointConv operation $h_{\mathrm{V}}$ as the basic building block. Here, we briefly describe three examples of $h_{\mathrm{V}}$ operations VointNet uses. + +Shared Multi-Layer Perceptron (MLP). It is the most basic VointConv formulation. For a layer $l$ , the features of Voint $i$ at view $j$ are updated to layer $l + 1$ as: $\mathbf{h}_{i,j}^{l + 1} = \rho (\mathbf{h}_{i,j}^{l}\mathcal{W}_{\rho})$ , where $\rho$ is the shared MLP with weights $\mathcal{W}_{\rho}$ followed by normalization and a nonlinear function (e.g. ReLU). This operation is applied on all Voints independently and only involves the visible views-features for each Voint. This formulation extends the shared MLP formulation for PointNet (Qi et al., 2017a) to work on Voints' view-features. + +Graph Convolution (GCN). We define a fully connected graph for each Voint by creating a virtual center node connected to all the view-features to aggregate their information (similar to "cls" token in ViT (Dosovitskiy et al., 2021)). Then, the graph convolution can be defined as the shared MLP (as described above) but on the edge features between all view features, followed by a max pool on the graph neighbors. An additional shared MLP is used before the final output. + +Graph Attention (GAT). A graph attention operation can be defined just like the GCN operation above but with learned attention weights on the graph neighbor's features before averaging them. A shared MLP computes these weights. + +
ResultsMVCNN (Su et al., 2015)RotNet (Kanezaki et al., 2018)ViewGCN (Wei et al., 2020)MVTN (Hamdi et al., 2021)VointNet (ours)
ShapeNet73.577.278.482.983.3
Retr. mAP
+ +Table 3: 3D Shape Retrieval. We report 3D shape retrieval mAP on ShapeNet Core55 (Chang et al., 2015; Sfikas et al., 2017). VointNet achieves state-of-the-art results on this benchmark. + +
MethodData TypePart Segmentation
(Unrotated)(Rotated)
PointNet (Qi et al., 2017a)Points80.136.6 ±0.2
DGCNN (Wang et al., 2019c)Points80.137.1 ±0.2
CurveNet (Xiang et al., 2021)Points84.932.3 ±0.0
Label Fuse (Wang et al., 2019a)M-View80.061.4 ±0.2
Mean Fuse (Kundu et al., 2020)M-View77.562.0 ±0.2
VointNet (ours)Voints81.262.4 ±0.2
+ +Table 4: Robust 3D Part Segmentation on ShapeNet Parts. We compare the Inst. mIoU of VointNet against other methods in 3D segmentation on ShapeNet Parts (Yi et al., 2016). At test time, we randomly rotate the objects and report the results over ten runs. Note how VointNet's performance largely exceeds the point baselines in the realistic rotated scenarios, while exceeding multi-view baselines on the unrotated benchmark. All the results are reproduced in our setup. + +# 4.3 IMPLEMENTATION DETAILS + +Rendering and Unprojection. We choose the differentiable point cloud renderer $\mathbf{R}$ from Pytorch3D (Ravi et al., 2020) in our pipeline for its speed and compatibility with Pytorch libraries (Paszke et al., 2017). We render point clouds on multi-view images with size $224 \times 224 \times 3$ . We color the points by their normals' values or keep them white if the normals are not available. Following a similar procedure to (Wei et al., 2020; Hamdi et al., 2021), the view-points setup is randomized during training (using $M = 8$ views) and fixed to spherical views in testing (using $M = 12$ views). + +Architectures. For the 2D backbone $\mathbf{C}$ , we use ViT-B (Dosovitskiy et al., 2021) (with pretrained weights from TIMM library (Wightman, 2019)) for classification and DeepLabV3 (Chen et al., 2018) for segmentation. We use the 3D CE loss on the 3D point cloud output and the 2D CE loss when the loss is defined on the pixels. The feature dimension of the VointNet architectures is $d = 64$ , and the depth is $l_{V} = 4$ layers in $h_V$ . The main results are based on VointNet (MLP), unless otherwise specified as in Section 6, where we study in details the effect of VointConv $h_\mathrm{V}$ and $\mathbf{C}$ . + +Training Setup. We train our pipeline in two stages, where we start by training the 2D backbone on the 2D projected labels of the points, then train the entire pipeline end-to-end while focusing the training on the VointNet part. We use the AdamW optimizer (Loshchilov & Hutter, 2017) with an initial learning rate of 0.0005 and a step learning rate schedule of $33.3\%$ every 12 epochs for 40 epochs. The pipeline is trained with one NVIDIA Tesla V100 GPU. We do not use any data augmentation. More details about the training setup (loss and rendering), VointNet, and the 2D backbone architectures can be found in the Appendix. + +# 5 RESULTS + +The main test results of our Voint formulations are summarized in Tables 2,3, 4, and 5. We achieve state-of-the-art performance in the task of 3D classification, retrieval, and robust 3D part segmentation. More importantly, under the realistic rotated setups of ScanObjectNN and ShapeNet Parts, we improve over $7.2\%$ Acc. and $25\%$ mIoU respectively compared to point baselines Qi et al. (2017a); Wang et al. (2019c). Following common practice Hamdi et al. (2021), we report the best results out of four runs in benchmark tables, but detailed results are provided in the Appendix. + +![](images/123fba4fd773417d0a4163ecd2fb10d4b2176e02cc5cd0cc864f083a9c86b7cd.jpg) +Figure 3: Qualitative Comparison for Part Segmentation. We compare our VointNet 3D segmentation predictions to Mean Fuse (Kundu et al., 2020) that is using the same trained 2D backbone. Note how VointNet distinguishes detailed parts (e.g. the car window frame). + +![](images/cd772a1ce730ead1824f2b7f7a275f785539b7534c5bcf3091b1539db62ac8d3.jpg) + +![](images/62f87428c6e762834173ef8afc04dd088d57d8d321339eb21332f097f9ee4c51.jpg) + +# 5.1 3D SHAPE CLASSIFICATION + +Table 2 reports the classification accuracy on the 3D point cloud classification task on ScanObjectNN Uy et al. (2019). It benchmarks VointNet against other recent and strong baselines Hamdi et al. (2021); Goyal et al. (2021); Hamdi et al. (2021). VointNet demonstrates state-of-the-art results on all the variants, including the challenging Hardest (PB_T50_RS) variant that includes challenging scenarios of rotated and translated objects. The increase in performance $(+2.6\%)$ is significant on this variant, which highlights the benefits of Voints on challenging scenarios, with further affirming results in Section 5.4. We follow exactly the same procedure as in MVTN Hamdi et al. (2021). + +# 5.2 3D SHAPE RETRIEVAL + +Table 3 benchmarks the 3D shape retrieval mAP on ShapeNet Core55 Chang et al. (2015). VointNet achieves state-of-the-art performance on ShapeNet Core55. Baseline results are reported from Hamdi et al. (2021). + +# 5.3 ROBUST 3D PART SEGMENTATION + +Table 4 reports the Instance-averaged segmentation mIoU of VointNet compared with other methods on ShapeNet Parts Yi et al. (2016). Two variants of the benchmark are reported: unrotated normalized setup, and the rotated realistic setup. For the rotated setup, we follow the previous 3D literature Liu et al. (2019a); Hamdi et al. (2021; 2020) by testing the robustness of trained models by perturbing the shapes in ShapeNet Parts with random rotations at test time (ten runs) and report the averages in Table 4. Note VointNet's improvement over Mean Fuse Kundu et al. (2020) and Label Fuse Wang et al. (2019a) on unrotated setup despite that both baselines use the same trained 2D backbone as VointNet. Also, for rotated setups, point methods don't work as well. All the results in Table 4 are reproduced by our code in the same setup (see the code attached in supplementary material). Figure 3 shows qualitative 3D segmentation results for VointNet and Mean Fuse Kundu et al. (2020) as compared to the ground truth. + +# 5.4 OCCLUSION ROBUSTNESS + +One of the aspects of the robustness of 3D classification models that have been recently studied is their robustness to occlusion, as detailed in MVTN Hamdi et al. (2021). These simulated occlusions are introduced at test time, and the average test accuracy is reported on each cropping ratio. We benchmark our VointNet against recent baselines in Table 5. PointNet Qi et al. (2017a) and DGCNN Wang et al. (2019c) are used as point-based baselines, and MVTN Hamdi et al. (2021) as a multi-view baseline. + +
MethodData TypeOcclusion Ratio
00.10.20.30.5
PointNet (Qi et al., 2017a)Points89.188.286.181.653.5
DGCNN (Wang et al., 2019c)Points92.177.174.571.230.1
PCT (Guo et al., 2021)Points93.392.691.188.261.9
MVTN (Hamdi et al., 2021)M-View93.890.389.988.367.1
VointNet (ours)Voints92.891.691.289.166.1
+ +![](images/ca2196b969d0352d172fa93d3fa3d0cc81b1a8cce986db6e983565528fffa072.jpg) +Figure 4: Effect of the Number of Views. We plot Ins. mIoU of 3D segmentation vs. the number of views $(M)$ used in inference on ShapeNet Parts. Note VointNet's consistent improvement over Mean Fuse (Kundu et al., 2020) and Label Fuse (Wang et al., 2019a). Both baselines use the same trained 2D backbone as VointNet and are tested on the same unrotated setup. + +# 6 ANALYSIS AND INSIGHTS + +Number of Views. We study the effect of the number of views $M$ on the performance of 3D part segmentation using multiple views. We compare Mean Fuse (Kundu et al., 2020) and Label Fuse (Wang et al., 2019a) to our VointNet when all of them have the same trained 2D backbone. The views are randomly picked, and the experiments are repeated four times. Ins. mIoU with confidence intervals are shown in Figure 4. We observe a consistent improvement with VointNet over the other two baselines across different numbers of views. + +Table 5: Occlusion Robustness for 3D Classification. We report the test accuracy on ModelNet40 (Wu et al., 2015) for different occlusion ratios of the data to measure occlusion robustness of different 3D methods. + +
2D BackboneVointConvResults
FCNDeepLabV3MLPGCNGATInst. mIoU
---78.8 ± 0.2
---77.6 ± 0.2
---77.1 ± 0.2
---80.6 ± 0.1
---77.2 ± 0.4
---80.4 ± 0.2
+ +Table 6: Ablation Study for 3D Segmentation. We ablate different components of VointNet (2D backbone and VointConv choice) and report Ins. mIoU performance on ShapeNet Parts. + +Choice of Backbones. We ablate the choice of the 2D backbone and the VointConv operation used in VointNet and report the segmentation Ins. mIoU results in Table 6. Note how the 2D backbone greatly affects performance, while the VointConv operation type does not. This ablation highlights the importance of the 2D backbone in VointNet pipeline and motivates the use of the simplest variant of VointNet (MLP). We provide a detailed study of more factors as well as compute and memory costs in the Appendix. + +# 7 LIMITATIONS AND ACKNOWLEDGMENTS + +One aspect limiting the performance of Voints is how well-trained the 2D backbone is for the downstream 3D task. In most cases, the 2D backbone must be pretrained with enough data to learn meaningful information for VointNet. Another aspect that limits the capability of the Voint cloud is how to properly select the view-points for segmentation. Addressing these limitations is an important direction for future work. Also, extending Voint learning on more 3D tasks like 3D scene segmentation and 3D object detection is left for future work. + +Acknowledgments. This work was supported by the King Abdullah University of Science and Technology (KAUST) Office of Sponsored Research through the Visual Computing Center (VCC) funding and the SDAIA-KAUST Center of Excellence in Data Science and Artificial Intelligence (SDAIA-KAUST AI) + +# REFERENCES + +Gary Bradski and Stephen Grossberg. Recognition of 3-d objects from multiple 2-d views by a self-organizing neural architecture. In *From Statistics to Neural Networks*, pp. 349–375. Springer, 1994. +Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], Stanford University — Princeton University — Toyota Technological Institute at Chicago, 2015. +Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In Proceedings of the European conference on computer vision (ECCV), pp. 801-818, 2018. +Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 1907-1915, 2017. +Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3075-3084, 2019. +Taco Cohen and Max Welling. Group equivariant convolutional networks. In International conference on machine learning, pp. 2990-2999, 2016. +Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multi-view prediction for 3d semantic scene segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 452-468, 2018. +Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. +Carlos Esteves, Yinshuang Xu, Christine Allen-Blanchette, and Kostas Daniilidis. Equivariant multi-view networks. In Proceedings of the IEEE International Conference on Computer Vision, pp. 1568-1577, 2019. +Yutong Feng, Yifan Feng, Haoxuan You, Xibin Zhao, and Yue Gao. Meshnet: Mesh neural network for 3d shape representation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 8279-8286, 2019. +Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, and Jia Deng. Revisiting point cloud shape classification with a simple and effective baseline. In ICML, 2021. +Meng-Hao Guo, Jun-Xiong Cai, Zheng-Ning Liu, Tai-Jiang Mu, Ralph R Martin, and Shi-Min Hu. Pct: Point cloud transformer. Computational Visual Media, 7(2):187-199, 2021. + +Abdullah Hamdi, Sara Rojas, Ali Thabet, and Bernard Ghanem. Advpc: Transferable adversarial perturbations on 3d point clouds. In Computer Vision - ECCV 2020, pp. 241-257, Cham, 2020. Springer International Publishing. ISBN 978-3-030-58610-2. +Abdullah Hamdi, Silvio Giancola, and Bernard Ghanem. Mvtn: Multi-view transformation network for 3d shape recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 1-11, October 2021. +Zhizhong Han, Xiyang Wang, Yu-Shen Liu, and Matthias Zwicker. Multi-angle point cloud-vae: Unsupervised feature learning for 3d point clouds from multiple angles by joint self-reconstruction and half-to-half prediction. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 10441-10450. IEEE, 2019. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. CoRR, abs/1512.03385, 2015. URL http://arxiv.org/abs/1512.03385. +Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4421-4430, 2019. +Wenbo Hu, Hengshuang Zhao, Li Jiang, Jiaya Jia, and Tien-Tsin Wong. Bidirectional projection network for cross dimension scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14373-14382, 2021. +Maximilian Jaritz, Jiayuan Gu, and Hao Su. Multi-view pointnet for 3d scene understanding. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 0-0, 2019. +Evangelos Kalogerakis, Melinos Averkiou, Subhransu Maji, and Siddhartha Chaudhuri. 3d shape segmentation with projective convolutional networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3779-3788, 2017. +Asako Kanezaki, Yasuyuki Matsushita, and Yoshifumi Nishida. Rotationnet: Joint object categorization and pose estimation using multiviews from unsupervised viewpoints. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5010-5019, 2018. +Abhijit Kundu, Xiaoqi Yin, Alireza Fathi, David Ross, Brian Brewington, Thomas Funkhouser, and Caroline Pantofaru. Virtual multi-view fusion for 3d semantic segmentation. In European Conference on Computer Vision (ECCV), pp. 518-535. Springer, 2020. +Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. Pointcnn: Convolution on x-transformed points. In Advances in neural information processing systems (NIPS), pp. 820-830, 2018. +Yongcheng Liu, Bin Fan, Shiming Xiang, and Chunhong Pan. Relation-shape convolutional neural network for point cloud analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8895-8904, 2019a. +Ze Liu, Zheng Zhang, Yue Cao, Han Hu, and Xin Tong. Group-free 3d object detection via transformers. arXiv preprint arXiv:2104.00678, 2021a. +Zhengzhe Liu, Xiaojuan Qi, and Chi-Wing Fu. 3d-to-2d distillation for indoor scene parsing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4464-4474, 2021b. +Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Point-voxel cnn for efficient 3d deep learning. In Advances in Neural Information Processing Systems, pp. 965-975, 2019b. +Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3431-3440, 2015. + +Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. +Yecheng Lyu, Xinming Huang, and Ziming Zhang. Learning to segment 3d point clouds in 2d image space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12255-12264, 2020. +Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7210-7219, 2021. +Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 922-928. IEEE, 2015. +Leonard McMillan and Gary Bishop. Plenoptic modeling: An image-based rendering system. In Proceedings of the 22nd annual conference on Computer graphics and interactive techniques, pp. 39-46, 1995. +Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4460-4470, 2019. +Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pp. 405-421. Springer, 2020. +Masatoshi Okutomi and Takeo Kanade. A multiple-baseline stereo. IEEE Transactions on pattern analysis and machine intelligence, 15(4):353-363, 1993. +Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 165-174, 2019. +Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. +Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10318-10327, 2021. +Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 652-660, 2017a. +Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In Advances in neural information processing systems (NIPS), pp. 5099-5108, 2017b. +Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020. +Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael S. Bernstein, Alexander C. Berg, and Fei-Fei Li. Imagenet large scale visual recognition challenge. CoRR, abs/1409.0575, 2014. URL http://arxiv.org/abs/1409.0575. + +Konstantinos Sfikas, Theoharis Theoharis, and Ioannis Pratikakis. Exploiting the PANorama Representation for Convolutional Neural Network Classification and Retrieval. In Ioannis Pratikakis, Florent Dupont, and Maks Ovsjanikov (eds.), Eurographics Workshop on 3D Object Retrieval, pp. 1-7. The Eurographics Association, 2017. ISBN 978-3-03868-030-7. doi: 10.2312/3dor.20171045. +Hang Su, Subhransu Maji, Evangelos Kalogerakis, and Erik Learned-Miller. Multi-view convolutional neural networks for 3d shape recognition. In Proceedings of the IEEE international conference on computer vision, pp. 945-953, 2015. +Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6411–6420, 2019. +Mikaela Angelina Uy, Quang-Hieu Pham, Binh-Son Hua, Duc Thanh Nguyen, and Sai-Kit Yeung. Revisiting point cloud classification: A new benchmark dataset and classification model on real-world data. In International Conference on Computer Vision (ICCV), 2019. +Brian H Wang, Wei-Lun Chao, Yan Wang, Bharath Hariharan, Kilian Q Weinberger, and Mark Campbell. Ldls: 3-d object segmentation through label diffusion from 2-d images. IEEE Robotics and Automation Letters, 4(3):2902-2909, 2019a. +He Wang, Srinath Sridhar, Jingwei Huang, Julien Valentin, Shuran Song, and Leonidas J Guibas. Normalized object coordinate space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2642-2651, 2019b. +Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E. Sarma, Michael M. Bronstein, and Justin M. Solomon. Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics (TOG), 2019c. +Xin Wei, Ruixuan Yu, and Jian Sun. View-gen: View-based graph convolutional network for 3d shape analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1850-1859, 2020. +Ross Wightman. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019. +Zhirong Wu, S. Song, A. Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and J. Xiao. 3d shapenets: A deep representation for volumetric shapes. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1912-1920, 2015. +Tiang Xiang, Chaoyi Zhang, Yang Song, Jianhui Yu, and Weidong Cai. Walk in the cloud: Learning curves for point clouds shape analysis. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 915-924, October 2021. +Yifan Xu, Tianqi Fan, Mingye Xu, Long Zeng, and Yu Qiao. SpiderCNN: Deep learning on point sets with parameterized convolutional filters. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 87-102, 2018. +Li Yi, Vladimir G Kim, Duygu Ceylan, I-Chao Shen, Mengyan Yan, Hao Su, Cewu Lu, Qixing Huang, Alla Sheffer, and Leonidas Guibas. A scalable active framework for region annotation in 3d shape collections. ACM Transactions on Graphics (ToG), 35(6):1-12, 2016. +Haoxuan You, Yifan Feng, Rongrong Ji, and Yue Gao. Pvnet: A joint convolutional network of point cloud and multi-view for 3d shape recognition. In Proceedings of the 26th ACM international conference on Multimedia, pp. 1310-1318, 2018. +Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In ICCV, 2021. +Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip Torr, and Vladlen Koltun. Point transformer. arXiv preprint arXiv:2012.09164, 2020. + +# APPENDIX + +# A DETAILED FORMULATIONS + +# A.1 TOY EXAMPLE + +In the toy 2D example in Figure 5, the center point (represented by a circular function $g$ ) is viewed from various view-points $u_{j}$ that are agnostic to the underlying function itself. In many applications, it is desired to have a single feature representing each point in the point cloud. When the projected values of $g$ from these $u_{j}$ view-points are aggregated together (e.g. by max/mean pool) to get a constant representation of that point, the underlying properties of $g$ are lost. We build our Voint representation to keep the structure of $g$ intact by taking the full set $\{(u_{j},g(u_{j}))\}_{j = 1}^{5}$ in learning the aggregations. + +# A.2 FUNCTIONAL FORM OF VOINTNET + +We can look at a simplified setup to decide on the functional form of the deep neural network that operates in the Voint space. In this simplified setup, we consider a 2D example (instead of 3D Voints) and assume that a circular function describes a point at the center. The center point will assume its value according to the angle $u$ . The following Theorem 1 proves that for any continuous set function $f$ that operates on any set of $M$ angles $\{u_1, \dots, u_M\}$ , there exists an equivalent composite function consisting of transformed max-pooled individual view-features. This composition is the functional form we describe later for Voint neural networks + +Theorem 1 Suppose $f: \mathcal{S} \to \mathbb{R}$ is a continuous set function operating on an angles set $\mathcal{S} = \{u \mid u \in [0,2\pi]\}$ . The continuity of $f$ is based on the Hausdorff distance $d_H$ between two sets of angles, where $d_H(\mathcal{S},\mathcal{S}') = \max_{u_i' \in \mathcal{S}'} \min_{u_i \in \mathcal{S}} d_A(u_i,u_i')$ , and $d_A$ is the smallest positive angle between two angles $d_A(u,u') = \min(|u - u'|, 2\pi - |u - u'|)$ . Then, for every $\epsilon > 0$ , and $\mathcal{U} = \{u_1,\dots,u_M\} \subset \mathcal{S}$ , there exists a continuous function $\mathbf{h}$ and a symmetric function $g(u_1,\dots,u_M) = \gamma \circ \mathrm{MAX}$ , such that: + +$$ +\left| f (\mathcal {U}) - \gamma \left(\operatorname {M A X} \left(\mathbf {h} \left(u _ {1}\right), \dots , \mathbf {h} \left(u _ {M}\right)\right)\right) \right| < \epsilon , \tag {5} +$$ + +where $\gamma$ is a continuous function, and MAX is an element-wise vector max operator. + +Proof. By the continuity of $f$ , we take $\delta_{\epsilon}$ so that $|f(\mathcal{U}) - f(\mathcal{U}')| < \epsilon$ for any $\mathcal{U}, \mathcal{U}' \subset \mathcal{S}$ if $d_H(\mathcal{U}, \mathcal{U}') < \delta_{\epsilon}$ . Define $K = [2\pi/\delta_{\epsilon}]$ , which split $[0, 2\pi]$ into $K$ intervals evenly and define an auxiliary function that maps an angle to the beginning of the interval it lies in: + +$$ +\sigma (u) = \frac {\lfloor K u \rfloor}{K} +$$ + +Let $\tilde{\mathcal{U}} = \sigma(u): u \in \mathcal{U}$ , then + +$$ +\left| f (\mathcal {U}) - f (\tilde {\mathcal {U}}) \right| < \epsilon \tag {6} +$$ + +Let $h_k(u) = e^{-d\left(u, \left[\frac{k-1}{K}, \frac{k}{K}\right]\right)}$ be a soft indicator function where $d\left(u, \left[\frac{k-1}{K}, \frac{k}{K}\right]\right) = \min\left(d_A\left(u, \frac{k-1}{K}\right), d_A\left(u, \frac{k}{K}\right)\right)$ is the distance between angle $u$ to interval $\left[\frac{k-1}{K}, \frac{k}{K}\right]$ . Let $\mathbf{h}(u) = [h_1(u); \ldots; h_K(u)]$ , then $\mathbf{h}: \mathbb{R} \to \mathbb{R}^K$ + +Let $q_{j}(u_{1},\ldots ,u_{M}) = \max \{h_{j}(u_{1}),\ldots ,h_{j}(u_{M})\}$ , indicating the occupancy of the $j$ -th interval by angles in $\mathcal{U}$ . Let $\mathbf{q} = [q_1;\dots;q_K]$ , then $\mathbf{q}:[0,2\pi ]^M\to \{0,1\} ^K$ is a symmetric function, indicating the occupancy of each interval by angles in $\mathcal{U}$ . + +Define $\zeta : \{0,1\}^K \to S$ as $\zeta(\mathbf{q}) = \left\{\frac{k-1}{K} : q_k \geq 1\right\}$ which maps the occupancy vector to a set which contains the left end of each angle interval. It is straightforward to show: + +$$ +\zeta (\mathbf {q} (\mathcal {U})) \equiv \tilde {\mathcal {U}} \tag {7} +$$ + +Let $\gamma : \mathbb{R}^K \to \mathbb{R}$ be a continuous function such that $\gamma(\mathbf{q}) = f(\zeta(\mathbf{q}))$ for $\mathbf{q} \in \{0,1\}^K$ . Then from Eq (6) and Eq (7), + +$$ +\begin{array}{l} \left| \gamma (\mathbf {q} (\mathcal {U})) - f (\mathcal {U}) \right| (8) \\ = \left| f \left(\zeta (\mathbf {q} (\mathcal {U}))\right) - f (\mathcal {U}) \right| < \epsilon (8) \\ \end{array} +$$ + +![](images/72e8bc6c21ce842713cf2f761952fd837f95ca87330b4317430bb398147d99b3.jpg) +Figure 5: A Toy 2D Example of Voints. Voints assume view-dependency for every 3D point. Here, we look at a single 2D point at the center with a circular function $g(u) = \mathrm{sign}(\cos u)$ from five arbitrary view-points $\{u_j\}_{j=1}^5$ . Trying to reduce $g$ to a single value based on $u_j$ projections undermines the underlying structure of $g$ . We take the full set $\{(u_j, g(u_j))\}_{j=1}^5$ as a representation of $g$ and learn a set function $f$ on these view-features for a more informative manner of representation aggregation. + +Note that $\gamma (\mathbf{q}(\mathcal{U}))$ can be rewritten as follows: + +$$ +\begin{array}{l} \gamma \left(\mathbf {q} \left(\mathcal {U}\right)\right) = \gamma \left(\mathbf {q} \left(u _ {1}, \dots , u _ {M}\right)\right) \\ = \gamma (\operatorname {M A X} \left(\mathbf {h} \left(u _ {1}\right), \dots , \mathbf {h} \left(u _ {M}\right)\right)) \tag {9} \\ = (\gamma \circ \operatorname {M A X}) \left(\mathbf {h} \left(u _ {1}\right), \dots , \mathbf {h} \left(u _ {M}\right)\right) \\ \end{array} +$$ + +Since $\gamma \circ$ MAX is a symmetric function and from Eq (8) and Eq (9), we reach to the main result in Eq (5). This concludes the proof. + +# A.3 3D VOINT CLOUD + +Plenoptic and Spherical Coordinate Functions. The Plenoptic function was first introduced by McMillan and Bishop (McMillan & Bishop, 1995) in 1995 as a general function that describes the visible world. The Plenoptic function $P$ is a continuous spherical function that describes the visibility at any Euclidean 3D point in space $(V_x, V_y, V_x)$ when looking into any direction $(\theta, \phi)$ across wavelength $\lambda$ at time $t$ . It is defined as $p = P(\theta, \phi, \lambda, V_x, V_y, V_x, t)$ . Such a remarkable and compact formulation covers all the images observed as just samples of the function $P$ . For fixed time and wavelength, the reduced Plenoptic function $P$ becomes $p = P(\theta, \phi, V_x, V_y, V_x,)$ which can describe any field in 3D space. This shortened formulation is what Neural Radiance Fields (NeRFs) (Mildenhall et al., 2020; Pumarola et al., 2021; Martin-Brualla et al., 2021) try to learn with MLPs to describe the radiance and RGB values in the continuous Euclidean space with a dependency on the view direction $(\theta, \phi)$ . In the same spirit of the Plenoptic function and NeRFs, the Voint cloud representation relies on the viewing angles $(\theta, \phi)$ to define the view-features. The problem with the plenoptic functions $P$ , and subsequently NeRFs, is that they are very high dimensional, and any attempt to densely represent the scene with discrete and fixed data will cause memory and compute issues (Yu et al., 2021; Pumarola et al., 2021). Unlike NERFs (Mildenhall et al., 2020) that define dense 3D volumes, we focus only on the surface of the 3D shapes with our Voint clouds representation. Our Voints are in the order of the sampled point cloud, offering a compact representation that allows for efficient computation and memory while maintaining the view-dependent component that facilitates view-based learning. + +From Point Clouds to Voint Clouds. Implicit representation of 3D surfaces typically aims to learn an implicit function $g_{\mathrm{s}}(\mathbf{x}) : \mathbb{R}^3 \to \mathbb{R}$ that define the Sign Distance Function + +(SDF) or the occupancy in the continuous Euclidean space (Park et al., 2019; Mescheder et al., 2019). The 3D iso-surface is then defined as the set of all points $\mathbf{x}$ that satisfy the condition $g_{\mathrm{s}}(\mathbf{x}) = 0$ (assuming $g_{\mathrm{s}}(\mathbf{x})$ as SDF hereafter). We define a surface 3D point cloud $\mathcal{X} \in \mathbb{R}^{N \times 3}$ , as a set of $N$ 3D points, where each point $\mathbf{x}_i \in \mathbb{R}^3$ is represented by its 3D coordinates $(x_i, y_i, z_i)$ and satisfy the iso-surface condition as follows. + +$$ +\mathcal {X} = \left\{\mathbf {x} _ {i} \in \mathbb {R} ^ {3} \mid g _ {\mathrm {s}} (\mathbf {x} _ {i}) = 0 \right\} _ {i = 1} ^ {N} \tag {10} +$$ + +Here, we assume that surface points also depend on the view direction from which they are being observed. Specifically, there exists a continuous implicit spherical function $\mathbf{g}(\mathbf{x},\mathbf{u}):$ $\mathbb{R}^5\to \mathbb{R}^d$ that defines the features at each point $\mathbf{x}$ depending on the view direction $\mathbf{u}$ . Given a set of $M$ view-point directions $\mathcal{U}\in \mathbb{R}^{M\times 2}$ , a Voint $\widehat{\mathbf{x}}\in \mathbb{R}^{M\times d}$ is a set of $M$ view-dependent features of size $d$ for the sphere centered at point $\mathbf{x}$ . The Voint cloud $\widehat{\mathcal{X}}\in \mathbb{R}^{N\times M\times d}$ is the set of all $N$ Voints $\widehat{\mathbf{x}}$ . + +$$ +\widehat {\mathbf {x}} _ {i} = \left\{\mathbf {g} \left(\mathbf {x} _ {i}, \mathbf {u} _ {j}\right) \in \mathbb {R} ^ {d} \mid \mathbf {x} _ {i} \in \mathcal {X} \right\} _ {j = 1} ^ {M} \tag {11} +$$ + +$$ +\widehat {\mathcal {X}} = \left\{\widehat {\mathbf {x}} _ {i} \in \mathbb {R} ^ {M \times d} \right\} _ {i = 1} ^ {N} +$$ + +Note that we typically do not have access to the underlying implicit function $\mathbf{g}$ and we approximate it by 2D projection, feature extraction, and then un-projection as we show next. + +1-Multi-View Projection. As mentioned earlier, a Voint combines multiple view-features of the same 3D point. These view-features come from a multi-view projection of the points by a point cloud renderer $\mathbf{R}:\mathbb{R}^{N\times 3}\to \mathbb{R}^{M\times H\times W\times 3}$ that renders the point cloud $\mathcal{X}$ from multiple view-points $\mathcal{U}$ into $M$ images of size $H\times W\times 3$ . In addition to projecting the point cloud into the image space, $\mathbf{R}$ defines the mapping $\mathbf{B}\in \{0,\dots,N\}^{M\times H\times W}$ between each pixel to the N points and background it renders. Also, $\mathbf{R}$ outputs the visibility binary matrix $\mathbf{V}\in \{0,1\}^{N\times M}$ for each point from each view. Since not all points appear in all the views due to pixel discretization, the visibility score $\mathbf{V}_{i,j}$ defines if the Voint $\hat{\mathbf{x}}_i$ is visible in the view $\mathbf{u}_j$ . The matrix $\mathbf{B}$ is crucial for unprojection, while $\mathbf{V}$ is needed for defining meaningful operations on Voints. + +2-Multi-View Feature Extraction. The rendered images are processed by a function $\mathbf{C}:\mathbb{R}^{M\times H\times W\times 3}\to \mathbb{R}^{M\times H\times W\times d}$ that extracts image features. If $\mathbf{C}$ is the identity function, all the view-features would be identical for each Voint (typically the RGB value of the corresponding point). However, the $\mathbf{C}$ function can be a 2D network dedicated to the downstream task and can extract useful global and local features about each view. + +3-Multi-View Unprojection. We propose a module $\Phi_{\mathbf{B}}:\mathbb{R}^{M\times H\times W\times d}\to \mathbb{R}^{N\times M\times d}$ that unprojects the 2D features from each pixel to be 3D view-features at the corresponding Voint. This is performed by using the mapping $\mathbf{B}$ created by the renderer to form the Voint cloud features $\widehat{\mathcal{X}}$ . Note that the points are not necessarily visible from all the views, and some Voints that are not visible from any of the $M$ views will not receive any features. We post-process these empty points ( $\sim 0.5\%$ of points during inference) to be filled with nearest 3D neighbors features. The output Voint cloud features would be described as follows. + +$$ +\widehat {\mathbf {x}} _ {i} = \left\{\mathbf {g} _ {i, j,:} \in \mathbb {R} ^ {d} \mid \mathbf {x} _ {i} \in \mathcal {X}, \mathbf {V} _ {i, j} = 1 \right\} _ {j = 1} ^ {M} +$$ + +$$ +\mathbf {g} _ {:, j} = \Phi_ {\mathbf {B}} \left(\mathbf {C} \left(\mathbf {R} \left(\mathcal {X}, \mathbf {u} _ {j}\right)\right), \mathbf {B}\right) \tag {12} +$$ + +$$ +\widehat {\mathcal {X}} = \left\{\widehat {\mathbf {x}} _ {i} \in \mathbb {R} ^ {M \times d} \right\} _ {i = 1} ^ {N} +$$ + +# A.4 VOINT OPERATIONS + +VointMax. In order to learn a neural network in the Voint space in the form dictated by Theorem 1, we need to define some basic differentiable operations on the Voint space. The + +![](images/4b68f906dad809b2cdbb8df39da391275635ec0e528c03019df6e17bc56ee754.jpg) +Figure 6: VointNet Variants. We propose three variants of VointNet that use three different examples of VointConv operation $h_v$ : shared MLP (MLP), Graph Convolution (GCN), and Graph Attention (GAT). Here we highlight the main difference between VointNet (MLP) that shares the MLP on all the view-features and VointNet (GCN) that creates a fully connected graph on the view-features and learn an MLP on the edge view-features. VointNet (GAT) is similar to VointNet (GCN) in addition to learning attention weights for each view-feature in weighted average aggregation. + +max operation on the Voint cloud can be defined as follows. + +$$ +\begin{array}{l} \operatorname {V o i n t M a x} (\widehat {\mathbf {x}}) = \max \widehat {\mathbf {x}} _ {i, j}, \forall i, j \\ \left(1 3\right) \\ \end{array} +$$ + +$$ +\mathrm {s . t .} i \in 1, 2, \dots , N, j \in 1, 2, \dots , M, \mathbf {V} _ {i, j} = 1 +$$ + +Equivalently, $\mathrm{VointMax}(\widehat{\mathbf{x}}) = \max_j\left(\widehat{\mathbf{x}}_{:,j} - \infty \overline{\mathbf{V}}_{:,j}\right)$ , where $\overline{\mathbf{V}}$ is the complement of $\mathbf{V}$ . + +VointConv. We define the convolution operation $h_{\mathrm{V}}: \mathbb{R}^{N \times M \times d} \to \mathbb{R}^{N \times M \times d'}$ as any learnable function that operates on the Voint space with shared weights on all the Voints and has the view-features input size $d$ and outputs view-features of size $d'$ and consists of $l_{V}$ layers. Examples of this VointConv operation include the following operations applied only on the visible view-features: a shared MLP, a graph convolution, and a graph attention. We detail these operations later in Section A.6, which result in different non-exhaustive variants of VointNet. + +# A.5 LEARNING ON 3D VOINT CLOUDS + +VpointNet. Typical 3D point cloud classifiers with a feature max pooling layer work as in Eq (14), where $h_{\mathrm{mlp}}$ and $h_{\mathrm{Pconv}}$ are the MLP and point Convolutional $(1 \times 1$ or edge) layers, respectively. This produces a K-class classifier $\mathbf{F}$ . + +$$ +\mathbf {F} (\mathcal {X}) = h _ {\operatorname {m l p}} \left(\max _ {\mathbf {x} _ {i} \in \mathcal {X}} \left\{h _ {\text {P c o n v}} \left(\mathbf {x} _ {i}\right) \right\}\right) \tag {14} +$$ + +Here, $\mathbf{F}:\mathbb{R}^{N\times 3}\to \mathbb{R}^K$ produces the logits layer of the classifier with size $K$ . On the other hand, the goal of the VointNet model is to get multi-view point cloud features that can be used after which by any point cloud processing pipeline. The VointNet module $\widehat{\mathbf{F}}:\mathbb{R}^{N\times M\times d}\rightarrow \mathbb{R}^{N\times d}$ as follows. + +$$ +\widehat {\mathbf {F}} (\widehat {\mathcal {X}}) = h _ {\mathrm {P}} \left(\operatorname {V o i n t M a x} \left(h _ {\mathrm {V}} (\widehat {\mathcal {X}})\right)\right), \tag {15} +$$ + +# A.6 VOINTNET VARIANTS + +We define the convolution operation $h_{\mathrm{V}} \colon \mathbb{R}^{N \times M \times d} \to \mathbb{R}^{N \times M \times d'}$ in VointNet from Eq (15) as any learnable function that operates on the Voint space with shared weights on all the + +Voints and has the view-features input size $d$ and outputs view-features of size $d'$ and consists of $l_V$ layers. Examples of this VointConv operation include the following: + +Shared MLP. It is the most basic Voint neural network. For layer $l$ , the features of Voint i at view j is updated as follows to layer $l + 1$ + +$$ +\mathbf {h} _ {i, j} ^ {l + 1} = \rho \left(\mathbf {h} _ {i, j} ^ {l} \mathcal {W} _ {\rho}\right), \forall i, j \tag {16} +$$ + +$$ +\mathrm {s . t .} i \in {1, 2, \dots , N}, j \in {1, 2, \dots , M}, \mathbf {V} _ {i, j} = 1 +$$ + +where $\rho$ is the shared MLP with weights $\mathcal{W}_{\rho}$ followed by normalization and nonlinear function (e.g. ReLU) applied on all Voints independently at the visible views features for each Voint. This formulation extends the shared MLP formulation for PointNet (Qi et al., 2017a) to make the MLP shared across the Voints and the views-features. + +Graph Convolution (GCN). Just like how DGCNN (Wang et al., 2019c) extended PointNet (Qi et al., 2017a) by taking the neighborhood information and extract edge features, we extend the basic VointNet formulation in Eq (15). We define a fully connected graph for each Voint along the views dimension by creating a center virtual node connected to all the view features (similar to the classification token in ViT (Dosovitskiy et al., 2021)). This center virtual view-feature would be assigned the index $j = 0$ and can be initialized with zeros as the "cls" token in ViT (Dosovitskiy et al., 2021). Then, Voint graph convolution operation can be defined as follows to update the activations from layer $l$ to $l + 1$ + +$$ +\mathbf {h} _ {i, j} ^ {l + 1} = \rho \left(\left(\max _ {k} \psi \left(\left(\mathbf {h} _ {i, j} ^ {l}, \mathbf {h} _ {i, k} ^ {l}\right) \mathcal {W} _ {\psi}\right)\right) \mathcal {W} _ {\rho}\right) \forall i, j \in \{1, 2, \dots , N - 1, 0, 1, M \} \tag {17} +$$ + +$$ +\forall i, j, k \quad \text {s . t .} \quad i \in 1, 2, \dots , N, j \in 0, 1, \dots , M \tag {17} +$$ + +$$ +k \in 0, 1, \dots , M, k \neq j, \mathbf {V} _ {i, j} = 1 +$$ + +where $\rho, \psi$ are two different shared MLPs as in Eq (16). The difference between VointNet (MLP) and VointNet (GCN) is highlighted in Figure 6. + +Graph Attention (GAT). Similar to how Point Transformer (Zhao et al., 2020) extended the graph convolution by adding attention to DGCNN (Wang et al., 2019c), we extend the basic Voint GraphConv formulation in Eq (17). Voint graph attention operation can be defined as follows to update the activations from layer $l$ to $l + 1$ + +$$ +\mathbf {h} _ {i, j} ^ {l + 1} = \rho \left(\left(\sum_ {k = 0, k \neq j} ^ {M} \eta_ {k} \psi \left((\mathbf {h} _ {i, j} ^ {l}, \mathbf {h} _ {i, k} ^ {l}) \mathcal {W} _ {\psi}\right)\right) \mathcal {W} _ {\rho}\right) \tag {18} +$$ + +$$ +\forall i, j \mathrm {s . t .} i \in 1, 2, \dots , N, j \in 0, 1, \dots , M +$$ + +$$ +\eta_ {k} = \zeta \left(\mathbf {h} _ {i, k} ^ {l} \mathcal {W} _ {\zeta}\right), \mathbf {V} _ {i, j} = 1 +$$ + +where $\rho, \psi, \zeta$ are three different shared MLPs as in Eq (16), and $\eta_{k}$ are the learned attention weights for each neighbor view-feature. + +# B DETAILED EXPERIMENTAL SETUP + +# B.1 DATASETS + +ScanObjectNN: 3D Point Cloud Classification. We follow the literature (Goyal et al., 2021; Hamdi et al., 2021) on testing 3D classification in the challenging ScanObjectNN (Uy et al., 2019) point cloud dataset, since it includes background and considers occlusions. The dataset is composed of 2902 point clouds divided into 15 object categories. We use 2048 sampled points per object for Voint learning. We benchmark on its variants: Object only, Object with Background, and the Hardest perturbed variant (PB_T50_RS variant). Visualization is provided in Figure 7 of some of the renderings used in training the 2D backbone in our pipeline. + +ShapeNet Core55: 3D Shape Retrieval. The shape retrieval challenge SHREC (Sfikas et al., 2017) uses ShapeNet Core55 is a subset of ShapeNet (Chang et al., 2015) for benchmarking. The dataset consists of 51,162 3D mesh objects labeled with 55 object classes. The + +training, validation, and test sets consist of 35764, 5133, and 10265 shapes. We create a dataset of point clouds by sampling 5000 points from each mesh object as in MVTN (Hamdi et al., 2021). + +ShapeNet Parts: 3D Part Segmentation. ShapeNet Parts is a subset of ShapeNet (Chang et al., 2015) that consists of 13,998 point cloud objects for train and 2,874 objects for the test from 16 categories and 50 parts. It is designed for the part segmentation task (Yi et al., 2016). Visualization is provided in Figure 10 of some of the renderings used in training the 2D backbone in our pipeline colored with the ground truth segmentation labels. + +ModelNet40: 3D Shape Classification Occlusion Robustness. ModelNet40 (Wu et al., 2015) is composed of 12,311 3D objects (9,843/2,468 in training/testing) labelled with 40 object classes. We sample 2048 points clouds from the objects following previous works (Qi et al., 2017b; Zhao et al., 2020). Visualization is provided in Figure 8 of some of the renderings used in training the 2D backbone in our pipeline. + +# B.2 METRICS + +Classification Accuracy. The standard evaluation metric in 3D classification is accuracy. We report overall accuracy (percentage of correctly classified test samples) and average per-class accuracy (mean of all true class accuracies). + +Retrieval mAP. Shape retrieval is evaluated by mean Average Precision (mAP) over test queries. For every query shape $\mathbf{S}_q$ from the test set, AP is defined as $AP = \frac{1}{\mathrm{GTP}}\sum_{n}^{N}\frac{\mathbb{1}(\mathbf{S}_n)}{n}$ , where $GTP$ is the number of ground truth positives, $N$ is the size of the ordered training set, and $\mathbb{1}(\mathbf{S}_n) = 1$ if the shape $\mathbf{S}_n$ is from the same class label of query $\mathbf{S}_q$ . We average the retrieval AP over the test set to measure retrieval mAP. + +Segmentation mIoU. Semantic Segmentation is evaluated by mean Intersection over Union (mIoU) over pixels or points. For every class label, measure the size of the intersection mask between the ground truth points of that label and the predicted points as that label. Then, divide by the size of the union mask of the same label to get IoU. This procedure is repeated over all the labels, and averaging the IoUs gives mIoU. We report two types of mIoUs: Instance-averaged mIoU (averages all mIoUs across all objects) and Category-averaged mIoU (averages all mIoU from shapes of the same category, and then average those across object categories). + +# B.3 BASELINES + +Point Cloud Networks. We include PointNet (Qi et al., 2017a), PointNet++ (Qi et al., 2017b), DGCNN (Wang et al., 2019c), PVNet (You et al., 2018), and KPConv (Thomas et al., 2019), Point Transformer (Zhao et al., 2020) and CurveNet (Xiang et al., 2021) as baselines that use point clouds. These methods leverage different convolution operators on point clouds by aggregating local and global point information. + +Multi-View Networks. We also compare against multi-view classification approaches like MVCNN (Su et al., 2015) and MVTN (Hamdi et al., 2021) as baselines for classification and retrieval. Since there is no available multi-view pipeline for 3D part segmentation, we adopt some of the multi-view segmentation baselines (e.g. Label Fusion (Wang et al., 2019a) and Mean Fusion (Kundu et al., 2020)) for part segmentation to work in the Voint space. + +# B.4 IMPLEMENTATION DETAILS + +Rendering and Un-Projection. We choose the differentiable point cloud renderer $\mathbf{R}$ from Pytorch3D (Ravi et al., 2020) in our pipeline for its speed and compatibility with Pytorch libraries (Paszke et al., 2017). We render multi-view images with size $224 \times 224 \times 3$ . We color the points by their normals' values or keep them white if the normals are not available. Following a similar procedure to (Wei et al., 2020; Hamdi et al., 2021), the view-point setup is randomized during training (using $M = 8$ views) and fixed to spherical views in testing (using $M = 12$ views). + +![](images/b72ea466f6ac3834f0aaa6df8cef30defba9035c4d7883401a3643a21dbd1009.jpg) +Figure 7: ScanObjectNN Variants. We show examples of point cloud renderings of different variants of the ScanObjectNN (Uy et al., 2019). These renderings are used in training VointNet for 3D point cloud classification. + +Architectures. For the 2D backbone, we use ViT (Dosovitskiy et al., 2021) (with pretrained weights from TIMM library (Wightman, 2019)) for classification and DeepLabV3 (Chen et al., 2018) for segmentation. We used parallel heads for each object category for part segmentation since the task is solely focused on parts. We use the 3D cross-entropy loss on the 3D point cloud output and the 2D cross-entropy loss when the loss is defined on the pixels. When used, the linear tradeoff coefficient of the 2D loss term is set to 0.003. To balance the frequency of objects in part segmentation, we multiply the loss by the frequency of the object class of each object we segment. The feature dimension of the VointNet architectures is $d = 64$ , and the depth is $l_{V} = 4$ layers in $h_V$ . The main results are based on VointNet (MLP) variant unless otherwise specified. The coordinates $\mathbf{x}$ can be optionally appended to the input view-features $\hat{\mathbf{x}}$ , which can improve the performance but reduce the rotation robustness as we show later in Section C.1 and Table 9. + +Training Setup. We train our pipeline in two stages, where we start by training the 2D backbone on the 2D projected labels of the points, then train the full pipeline end-to-end while focusing the training on the VointNet part. We use the AdamW optimizer (Loshchilov & Hutter, 2017) with an initial learning rate of 0.0005 and a step learning rate schedule of $33.3\%$ every 12 epochs for 40 epochs. The pipeline is trained with one NVIDIA Tesla V100 GPU. We do not use any data augmentation. + +![](images/37f5bed1a1b60474aaf51dd5ca7f2d0a5227449d29ac440e1539667849c0ab69.jpg) +Figure 8: ModelNet40. We show some examples of point cloud renderings of ModelNet40 (Wu et al., 2015) used for 3D classification robustness in our setup. + +![](images/67afcf6005c307d2f1f9b41ca7f0088ec0f12e2feb5ef06c405e1dae574656c4.jpg) +Figure 9: ShapeNet Core55. We show some examples of point cloud renderings of ShapeNet Core55 (Chang et al., 2015) used for 3D shape retrieval in our setup. + +![](images/e89f3da343521484611f5cf0479ba70b7aecbbbadd1cf045bfef60c2f4c38f75.jpg) +Figure 10: ShapeNet Parts. We show some examples of point cloud renderings of ShapeNet Parts (Yi et al., 2016) colored with ground truth segmentation labels. We use these renderings as 2D ground truth to pre-train the 2D backbone $\mathbf{C}$ for 2D segmentation before training VointNet's pipeline for 3D segmentation. + +
MethodData TypeClassification +ModelNet40Shape Retrieval +ShapeNetCore
PointNet (Qi et al., 2017a)Points89.2-
PointNet++ (Qi et al., 2017b)Points91.9-
DGCNN (Wang et al., 2019c)Points92.2-
KPConv(Thomas et al., 2019)Points92.9-
PCT(Guo et al., 2021)Points93.3-
CurveNet(Xiang et al., 2021)Points93.8-
ReVGG (Sfikas et al., 2017)M-View-74.9
MVCNN (Su et al., 2015)M-View90.173.5
ViewGCN (Wei et al., 2020)M-View93.378.4
MVTN (Hamdi et al., 2021)M-View93.882.9
VointNet (ours)Voints92.883.3
+ +Table 7: 3D Shape Classification and Retrieval. We report VointNet's classification accuracy on ModelNet40 (Wu et al., 2015) and its 3D shape retrieval mAP on ShapeNet Core55 (Chang et al., 2015; Sfikas et al., 2017). Baseline results are reported from (Hamdi et al., 2021; Zhao et al., 2020; Xiang et al., 2021). + +
MethodRotation Perturbations Range
±90°±180°
PointNet (Qi et al., 2017a)88.742.538.6
PointNet ++ (Qi et al., 2017b)88.247.939.7
RSCNN (Liu et al., 2019a)90.390.390.3
MVTN (Hamdi et al., 2021)91.790.891.2
VointNet (ours)91.590.991.1
+ +Table 8: Rotation Robustness for 3D Classification. At test time, we randomly rotate objects in ModelNet40 (Wu et al., 2015) around the Y-axis (gravity) with different ranges and report the overall accuracy. + +# C ADDITIONAL RESULTS + +# C.1 MODEL ROBUSTNESS + +Rotation Robustness for 3D Classification. We follow the standard practice in 3D shape classification literature by testing the robustness of trained models to perturbations at test time (Liu et al., 2019a; Hamdi et al., 2021). We perturb the shapes with random rotations around the Y-axis (gravity-axis) contained within $\pm 90^{\circ}$ and $\pm 180^{\circ}$ and report the test accuracy over ten runs in Table 8. + +Rotation Robustness for 3D Segmentation. We follow the previous 3D literature by testing the robustness of trained models to perturbations at test time (Liu et al., 2019a; Hamdi et al., 2021; 2020). We perturb the shapes in ShapeNet Parts with random rotations in $SO(3)$ at test time (ten runs) and report Ins. mIoU in Table 9. Note how our VointNet performance largely exceeds the baselines in this realistic unaligned scenario. We can augment the training with rotated objects for the baselines, which improves their robustness, but loses performance on the unrated setup. Adding xyz coordinates to the view-features of VointNet improves the performance on an unrotated setup but negatively affects the robustness to rotations. The discrepancy between the Voint results and the results of some point cloud methods is that Voints heavily depend on the underlying 2D backbone and inherit all its biases, especially those from pretraining. Hence, the 2D backbone limits what the performance can reach with VointNet. We study the effect of the backbone in detail in Section C.2. Figure 11 shows qualitative 3D segmentation results for VointNet and Mean Fuse (Kundu et al., 2020) as compared to the ground truth. + +
Ground TruthVointNet (ours)Mean Fuse (Kundu et al., 2020)
+ +Figure 11: Qualitative Comparison for 3D Part Segmentation. We compare our VointNet 3D segmentation prediction to Mean Fuse (Kundu et al., 2020) that is using the same trained 2D backbone. Note how VointNet distinguishes detailed parts (e.g. the car window frame). Beware that visualization colors can shift if an extra label is predicted (e.g. the motorbike labels are correct). + +
MethodSegmentation UnrotatedUnder Rotation Rotated
PointNet (Qi et al., 2017a)80.136.6 ±0.2
DGCNN (Wang et al., 2019c)80.137.1 ±0.2
PointNet + Aug.65.865.8 ±0.1
DGCNN + Aug.60.760.7 ±0.2
Mean Fuse (Kundu et al., 2020)79.161.6 ±0.1
Label Fuse (Wang et al., 2019a)78.961.0 ±0.1
VointNet (w/o xyz)79.665.4 ±0.1
VointNet (w/o xyz) + Aug.68.068.5 ±0.1
VointNet (w/ xyz)81.261.5 ±0.2
+ +Table 9: Rotation Robustness for 3D Part Segmentation. At test time, we randomly rotate objects from ShapeNet Parts (Yi et al., 2016) and report the Ins. mIoUs of our VointNet compared to trained PointNet (Qi et al., 2017a) and DGCNN (Wang et al., 2019c). Note how VointNet's performance largely exceeds the baselines in realistic unaligned scenarios, highlighting the benefit of view dependency. If we use rotation augmentation in training for the baselines, the rotated performance improves, but the unrotated performance drops. + +# C.2 DETAILED ANALYSIS + +Effect of Pretraining. We study the effect of pretraining the 2D backbone C for 3D classification on ModelNet40. Training a ViT with Mean Fuse for 3D classification on ModelNet40 obtains 92.2 test Acc. with ImageNet pretraining and 80.0 test Acc. from scratch. Other multi-view networks, e.g. MVCNN (Su et al., 2015), ViewGCN(Wei et al., 2020), and MVTN(Hamdi et al., 2021) all use ImageNet pretraining, which is not unique to Voints. + +Classification Backbone. We study the effect of ablating the 2D backbone C for 3D classification on ModelNet40. We show in Table 10 the performance of VointNet (MLP) when Vit-B (Dosovitskiy et al., 2021) and ResNet-18 (He et al., 2015) are used. We also show that following the per-point classification setup instead of the per-shape for 3D shape classification leads to worse performance for VointNet and the naive multi-view. This is why we used the per-shape approach when adopting VointNet for 3D classification (using one Voint for the entire shape). + +Number of points and visibility. Table 11 studies the effect of point number on 3D part segmentation performance, when different numbers of views are used. The visibility ratio is also reported in each case. + +Points color. We colored the points with ground truth normals as in Figure 16, when they are available (ShapeNet Parts), and we used white colors as in Figure 9, when other baselines do not use normals. We ablate the color of the points on VointNet (MLP) with normals colors, white color, and NOCs colors (Wang et al., 2019b). We obtain the following segmentation mIoU results: (normals: 80.6), (white: 74.7), and (NOCs: 57.9). + +Time and Memory Requirements. To assess the contribution of the Voint module, we take a macroscopic look at the time and memory requirements of each component in the pipeline. We record the number of floating-point operations (GFLOPs) and the time of a forward pass for a single input sample. In Table 12, the VointNet module contributes negligibly to the memory requirements compared to multi-view and point networks. + +Feature Size $(d)$ . We study the effect of the feature size $d$ on the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 12. We note that the performance peaks at $d = 128$ , but it is close to what we use in the main results $(d = 64)$ . + +
View Aggregation2D Backbone
ResNet18 (per-shape)ViT-B (per-shape)DeepLabV3 (per-point)
VointNet91.292.810.2
+ +Table 10: Ablation Study for 3D Classification. We study the effect of different 2D backbone for ModelNet40 3D classification task. We compare VointNet's performance to naive multi-view (e.g. MVCNN (Su et al., 2015) or Mean Fuse (Kundu et al., 2020)) using the same 2D backbone. Note that using the per-point classification setup instead of the per-shape for 3D shape classification leads to worse performance for VointNet and the naive multi-view. + +
Points #MetricNumber of Views
24812
500visibility99.199.9100100
mIoU69.273.976.076.4
1000visibility98.099.7100100
mIoU69.574.376.577.1
2000visibility95.799.299.899.9
mIoU69.775.077.778.5
+ +Table 11: Analysis on Number of Points and Visibility. We show the Instance mIoUs and visibility ratio $(1 - \frac{\text{empty}}{\text{total}})\%$ of our VointNet on ShapeNet Parts when varying points # and number of views. + +Model Depth $(l_v)$ . We study the effect of the model depth $l_v$ on the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 13. We note that model depth of VointNet does not enhance the performance significantly. Our choice of $l_v = 4$ balances the performance and the memory/computations requirements of VointNet (MLP). + +Distance to the Object. We study the effect of distance to the object in rendering as in Figure 17 to the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 14. We note that our default choice of 1.0 is actually reasonable. This choice of distance shows the object entirely (as illustrated in Figure 17), but also cover the details needed for small parts segmentation (see Figure 11). + +Image Size $(H,W)$ . We study the effect of the image size $H\& W$ on the performance of Mean Fuse (Kundu et al., 2020) baseline when training the 2D backbone for 3D part segmentation. We plot the results (with confidence intervals) in Figure 15. + +Number of Views on Classification. We study the effect of the number of views (M) on classification accuracy on ModelNet40 Wu et al. (2015) of VointNet and report results in Table 13. + +Unprojection Operation Speed. We evaluate the speed of the unprojection operation $\Phi_{\mathbf{B}}$ and report average latency of 10,000 runs (in ms) in Table 14. + +Unprojection Operation Speed. We evaluate the speed of the point cloud renderer $\mathbb{R}$ used in Voint pipeline from Pytroch3D Ravi et al. (2020) and report average latency of 1,000 renderings (in ms/image) in Table 15. + +# C.3 VISUALIZATIONS + +In Figure 16 and 17, we visualize the multi-view renderings of the point clouds along with the 2D learned features based on the DeepLabV3 (Chen et al., 2018) backbone. These features are then unprojected and transformed by VointNet to obtain 3D semantic labels. + +
NetworkGFLOPsTime (ms)Parameters # (M)
MVCNN (Su et al., 2015)43.7239.8911.20
ViewGCN (Wei et al., 2020)44.1926.0623.56
ResNet 18 (He et al., 2015)3.643.7011.20
ResNet 50 (He et al., 2015)8.249.4223.59
ViT-B (Dosovitskiy et al., 2021)33.7012.4686.57
ViT-L (Dosovitskiy et al., 2021)119.3029.28304.33
FCN (Long et al., 2015)53.1310.3432.97
DeeplabV3 (Chen et al., 2018)92.6120.6258.64
PointNet (Qi et al., 2017a)1.784.243.50
DGCNN (Wang et al., 2019c)10.420.9516.350
MVTN (Hamdi et al., 2021)1.784.243.5
VointNet (MLP)1.902.900.04
VointNet (GCN)16.1832.100.05
VointNet (GAT)32.0568.710.07
Full Voint pipeline94.5123.5058.68
+ +Table 12: Time and Memory Requirements. We assess the contribution of the Voint module to the time and memory requirements in the multi-view and point cloud pipeline. Note that VointNet (shared MLP) is almost 100 times smaller than PointNet (Qi et al., 2017a). + +![](images/3413317cf245b6b22fd5a908edc91f26e73ed14cffeb764360672238e7bce506.jpg) +Figure 12: The Effect of Feature Size $d$ . We plot Ins. mIoU of 3D segmentation vs. the feature size $d$ used in training on ShapeNet Parts (Yi et al., 2016). We note that the performance peaks at $d = 128$ , but it is close to what we use in the main results ( $d = 64$ ). + +![](images/01e61314143ad25c176cabb5dc11f6aaf12fcedd834931452f72552917cdb345.jpg) +Figure 13: The Effect of Model Depth $l_{v}$ . We plot Ins. mIoU of 3D segmentation vs. the model depth $l_{v}$ used in training on ShapeNet Parts (Yi et al., 2016). We note that model depth of VointNet does not enhance the performance significantly. Our choice of $l_{v} = 4$ balances the performance and the memory/computations requirements of VointNet (MLP). + +![](images/1b0ad412327d57e53c249108a0b45a1fb42553e42323dd97babc2396302b514e.jpg) +Figure 14: The Effect of Distance to the Object. We plot Ins. mIoU of 3D segmentation vs. the distance to the object used in inference on ShapeNet Parts (Yi et al., 2016). We note that our default choice of 1.0 is actually reasonable. This choice of distance shows the object entirely (as illustrated in Figure 17), but also cover the details needed for small parts segmentation (see Figure 11). + +![](images/e3aa40f0be56a237c5c00ea85726607b465624d13e968d151ab38d5a45b193ed.jpg) +Figure 15: The Effect of Image Size $H, W$ . We plot Ins. mIoU of 3D segmentation vs. the image size used in inference on ShapeNet Parts (Yi et al., 2016). + +![](images/177dcc285a4a120e89a761450d372408dc02ebe73f3aab8f9b2f0e8c97be5aba.jpg) +Figure 16: Multi-view Projected Segmentation 1. We show how, after rendering points, we can segment in the image space. For each example, we show (INPUT): the projections of the points (colored with normals) used in training with random view-points. (PRED 2D): the segmentation prediction of the 2D backbone (DeepLabV3) (Chen et al., 2018). (PRED 3D): the unprojected 3D segmentation prediction. $(GT)$ : the 3D segmentation ground truth. + +![](images/5399f426231ab43347b5b869edd7def9af0be33cbd699d4076f8bc3b337f0b29.jpg) +Figure 17: Multi-view Projected Segmentation 2. We show how, after rendering points, we can segment in the image space. For each example, we show (INPUT): the projections of the points (colored with normals) used in training with random view-points. (PRED 2D): the segmentation prediction of the 2D backbone (DeepLabV3) (Chen et al., 2018). (PRED 3D): the unprojected 3D segmentation prediction. $(GT)$ : the 3D segmentation ground truth. + +
MethodNumber of Views
46810
VointNet (Cls. Acc.)90.390.892.092.3
+ +Table 13: Effect of the Number of Views on Classification. We report the classification accuracy of VointNet vs. the number of views (M) used in the training on ModelNet40. + +
MethodNumber of Views
124681012
Features Unprojection3.05.311.4515.717.229.724.0
Labels Unprojection2.62.53.43.13.03.23.6
+ +Table 14: Unprojection Operation Speed. We report the average latency (in ms) over 10,000 runs of the unprojection operation with its two forms: features unprojection (used in mean) and labels unprojection (used in mode). + +
CriteriaNumber of Points
1e21e31e41e51e6
Point Rendering Speed (ms/image)7.27.67.710.437.7
+ +Table 15: Point Rendering Speed. We report the average rendering speed (in ms/image) over 1,000 renderings of the point cloud renderer Ravi et al. (2020) used in Voint clouds. \ No newline at end of file diff --git a/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/images.zip b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..3594dc74c00684270edb8efd4db02142e253c875 --- /dev/null +++ b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8db3161437e2e62992f49e8aed224279e01ea8c5b4e4650d4370ce5f8bd8788b +size 1800055 diff --git a/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/layout.json b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e4fe7244bf972142ecff5a51e006f96f600135f0 --- /dev/null +++ b/2023/Voint Cloud_ Multi-View Point Cloud Representation for 3D Understanding/layout.json @@ -0,0 +1,18535 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 507, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 507, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 507, + 118 + ], + "type": "text", + "content": "VOINT CLOUD: MULTI-VIEW POINT CLOUD REPRESENTATION FOR 3D UNDERSTANDING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 134, + 199, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 134, + 199, + 146 + ], + "spans": [ + { + "bbox": [ + 111, + 134, + 199, + 146 + ], + "type": "text", + "content": "Abdullah Hamdi" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 264, + 134, + 342, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 134, + 342, + 145 + ], + "spans": [ + { + "bbox": [ + 264, + 134, + 342, + 145 + ], + "type": "text", + "content": "Silvio Giancola" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 408, + 135, + 498, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 135, + 498, + 145 + ], + "spans": [ + { + "bbox": [ + 408, + 135, + 498, + 145 + ], + "type": "text", + "content": "Bernard Ghanem" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 163, + 550, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 163, + 550, + 186 + ], + "spans": [ + { + "bbox": [ + 111, + 163, + 550, + 186 + ], + "type": "text", + "content": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia {abdullah.hamdi, silvio.giancola, bernard.ghanem}@kaust.edu.sa" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 275, + 214, + 335, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 275, + 214, + 335, + 227 + ], + "spans": [ + { + "bbox": [ + 275, + 214, + 335, + 227 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 240, + 470, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 240, + 470, + 406 + ], + "spans": [ + { + "bbox": [ + 140, + 240, + 470, + 406 + ], + "type": "text", + "content": "Multi-view projection methods have demonstrated promising performance on 3D understanding tasks like 3D classification and segmentation. However, it remains unclear how to combine such multi-view methods with the widely available 3D point clouds. Previous methods use unlearned heuristics to combine features at the point level. To this end, we introduce the concept of the multi-view point cloud (Voint cloud), representing each 3D point as a set of features extracted from several view-points. This novel 3D Voint cloud representation combines the compactness of 3D point cloud representation with the natural view-awareness of multi-view representation. Naturally, we can equip this new representation with convolutional and pooling operations. We deploy a Voint neural network (VointNet) to learn representations in the Voint space. Our novel representation achieves state-of-the-art performance on 3D classification, shape retrieval, and robust 3D part segmentation on standard benchmarks (ScanObjectNN, ShapeNet Core55, and ShapeNet Parts).1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 428, + 209, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 428, + 209, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 209, + 440 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 453, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 506, + 608 + ], + "type": "text", + "content": "A fundamental question in 3D computer vision and computer graphics is how to represent 3D data (Mescheder et al., 2019; Qi et al., 2017a; Maturana & Scherer, 2015). This question becomes particularly vital given how the success of deep learning in 2D computer vision has pushed for the wide adoption of deep learning in 3D vision and graphics. In fact, deep networks already achieve impressive results in 3D classification (Hamdi et al., 2021), 3D segmentation (Hu et al., 2021), 3D detection (Liu et al., 2021a), 3D reconstruction (Mescheder et al., 2019), and novel view synthesis (Mildenhall et al., 2020). 3D computer vision networks either rely on direct 3D representations, indirect 2D projection on images, or a mixture of both. Direct approaches operate on 3D data commonly represented with point clouds (Qi et al., 2017a), meshes (Feng et al., 2019), or voxels (Choy et al., 2019). In contrast, indirect approaches commonly render multiple 2D views of objects or scenes (Su et al., 2015), and process each image with a traditional 2D image-based architecture. The human visual system is closer to such a multi-view indirect approach for 3D understanding, as it receives streams of rendered images rather than explicit 3D data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 612, + 506, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 713 + ], + "type": "text", + "content": "Tackling 3D vision tasks with indirect approaches has three main advantages: (i) mature and transferable 2D computer vision models (CNNs, Transformers, etc.), (ii) large and diverse labeled image datasets for pre-training (e.g. ImageNet (Russakovsky et al., 2014)), and (iii) the multi-view images give context-rich features based on the viewing angle, which are different from the geometric 3D neighborhood features. Multi-view approaches achieve impressive performance in 3D shape classification and segmentation (Wei et al., 2020; Hamdi et al., 2021; Dai & Nießner, 2018). However, the challenge with the multi-view representation (especially for dense predictions) lies in properly aggregating the per-view features with 3D point clouds. The appropriate aggregation is necessary to obtain representative 3D point" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 384, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 384, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 384, + 732 + ], + "type": "text", + "content": "1The code is available at https://github.com/ajhamdi/vointcloud" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 143, + 79, + 460, + 235 + ], + "blocks": [ + { + "bbox": [ + 143, + 79, + 460, + 235 + ], + "lines": [ + { + "bbox": [ + 143, + 79, + 460, + 235 + ], + "spans": [ + { + "bbox": [ + 143, + 79, + 460, + 235 + ], + "type": "image", + "image_path": "0f87af169b3654c00a374430901e2066ece51e5796b3b55094f704f8b831430b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 242, + 506, + 285 + ], + "lines": [ + { + "bbox": [ + 104, + 242, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 104, + 242, + 506, + 285 + ], + "type": "text", + "content": "Figure 1: 3D Voint Clouds. We propose the multi-view point cloud (Voint cloud), a novel 3D representation that is compact and naturally descriptive of view projections of a 3D point cloud. Each point in the 3D cloud is tagged with a Voint, which accumulates view-features for that point. Note that not all 3D points are visible from all views. The set of Voints constructs a Voint cloud." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 357, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 445 + ], + "type": "text", + "content": "clouds with a single feature per point suitable for typical point cloud processing pipelines. Previous multi-view works rely on heuristics (e.g. average or label mode pooling) after mapping pixels to points (Kundu et al., 2020; Wang et al., 2019a), or multi-view fusion with voxels (Dai & Nießner, 2018). Such setups might not be optimal for a few reasons. (i) Such heuristics may aggregate information of misleading projections that are obtained from arbitrary view-points. For example, looking at an object from the bottom and processing that view independently can carry wrong information about the object's content when combined with other views. (ii) The views lack geometric 3D information." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 451, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 506, + 639 + ], + "type": "text", + "content": "To this end, we propose a new hybrid 3D data structure that inherits the merits of point clouds (i.e. compactness, flexibility, and 3D descriptiveness) and leverages the benefits of rich perceptual features of multi-view projections. We call this new representation multi-view point cloud (or Voint cloud) and illustrate it in Figure 1. A Voint cloud is a set of Voints, where each Voint is a set of view-dependent features (view-features) that correspond to the same point in the 3D point cloud. The cardinality of these view-features may differ from one Voint to another. In Table 1, we compare some of the widely used 3D representations and our Voint cloud representation. Voint clouds inherit the characteristics of the parent explicit 3D point clouds, which facilitates learning Voint representations for a variety of vision applications (e.g. point cloud classification and segmentation). To deploy deep learning on the new Voint space, we define basic operations on Voints, such as pooling and convolution. Based on these operations, we define a practical way of building Voint neural networks that we dub VointNet. VointNet takes a Voint cloud and outputs point cloud features for 3D point cloud processing. We show how learning this Voint cloud representation leads to strong performance and gained robustness for the tasks of 3D classification, 3D object retrieval, and 3D part segmentation on standard benchmarks like ScanObjectNN (Uy et al., 2019), and ShapeNet (Chang et al., 2015)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 643, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 732 + ], + "type": "text", + "content": "Contributions: (i) We propose a novel multi-view 3D point cloud representation (denoted as Voint cloud), which represents each point (namely a Voint) as a set of features from different view-points. (ii) We define pooling and convolutional operations at the Voint level to construct a Voint Neural Network (VointNet) capable of learning to aggregate information from multiple views in the Voint space. (iii) Our VointNet reaches state-of-the-art performance on several 3D understanding tasks, including 3D shape classification, retrieval, and robust part segmentation. Further, VointNet achieves robustness improvement to occlusion and rotation." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 310, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 310, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 310, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 80, + 502, + 166 + ], + "blocks": [ + { + "bbox": [ + 108, + 80, + 502, + 166 + ], + "lines": [ + { + "bbox": [ + 108, + 80, + 502, + 166 + ], + "spans": [ + { + "bbox": [ + 108, + 80, + 502, + 166 + ], + "type": "table", + "html": "
3D RepresentationExplicitnessView-BasedMain Use3D Expressiveness
Point CloudsExplicitX3D UnderstandingMedium
Multi-View ProjectionsImplicit3D UnderstandingLow
VoxelsExplicitX3D UnderstandingMedium
MeshExplicitX3D ModelingHigh
NeRFsImplicitNovel View SynthesisMedium
Voint Clouds (ours)Explicit3D UnderstandingMedium
", + "image_path": "447151a4e6a0df30286a663cddb261b16740e2bedc4d81013124f7e5464df52e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 173, + 504, + 205 + ], + "lines": [ + { + "bbox": [ + 104, + 173, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 504, + 205 + ], + "type": "text", + "content": "Table 1: Comparison of Different 3D Representations. We compare some of the widely used 3D representations to our proposed Voint cloud. Note that our Voint cloud shares the view-dependency of NeRFs (Mildenhall et al., 2020) while inheriting the merits of 3D point clouds." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 231, + 217, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 217, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 217, + 243 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 255, + 506, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 506, + 376 + ], + "type": "text", + "content": "Learning on 3D Point Clouds. 3D point clouds are widely used for 3D representation in computer vision due to their compactness, flexibility, and because they can be obtained naturally from sensors like LiDAR and RGBD cameras. PointNet (Qi et al., 2017a) paved the way as the first deep learning algorithm to operate directly on 3D point clouds. It computes point features independently and aggregates them using an order-invariant function like max-pooling. Subsequent works focused on finding neighborhoods of points to define point convolutional operations (Qi et al., 2017b; Wang et al., 2019c; Li et al., 2018; Han et al., 2019). Several recent works combine point cloud representations with other 3D modalities like voxels (Liu et al., 2019b; You et al., 2018) or multi-view images (Jaritz et al., 2019). We propose a novel Voint cloud representation for 3D shapes and investigates novel architectures that aggregate view-dependent features at the 3D point level." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 381, + 504, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 381, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 381, + 504, + 503 + ], + "type": "text", + "content": "Multi-View Applications. The idea of using 2D images to understand the 3D world was initially proposed in 1994 by Bradski et. al. (Bradski & Grossberg, 1994). This intuitive multi-view approach was combined with deep learning for 3D understanding in MVCNN (Su et al., 2015). A line of works continued developing multi-view approaches for classification and retrieval by improving the aggregation of the view-features from each image view (Kanezaki et al., 2018; Esteves et al., 2019; Cohen & Welling, 2016; Wei et al., 2020; Hamdi et al., 2021). In this work, we fuse the concept of multi-view into the 3D structure itself, such that every 3D point would have an independent set of view-features according to the view-points available in the setup. Our Voints are aligned with the sampled 3D point cloud, offering a compact representation that allows for efficient computation and memory usage while maintaining the view-dependent component that facilitates view-based learning for vision." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 508, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 508, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 508, + 506, + 641 + ], + "type": "text", + "content": "Hybrid Multi-View with 3D Data. On the task of 3D semantic segmentation, a smaller number of works tried to follow the multi-view approach (Dai & Nießner, 2018; Kundu et al., 2020; Wang et al., 2019a; Kalogerakis et al., 2017; Jaritz et al., 2019; Liu et al., 2021b; Lyu et al., 2020). A problem arises when combining view features to represent local points/voxels while preserving local geometric features. These methods tend to average the view-features (Kundu et al., 2020; Kalogerakis et al., 2017), propagate the labels only (Wang et al., 2019a), learn from reconstructed points in the neighborhood (Jaritz et al., 2019), order points on a single grid (Lyu et al., 2020), or combine the multi-view features with 3D voxel features (Dai & Nießner, 2018; Hou et al., 2019). To this end, our proposed VointNet operates on the Voint cloud space while preserving the compactness and 3D descriptiveness of the original point cloud. VointNet leverages the power of multi-view features with learned aggregation on the view-features applied to each point independently." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 662, + 211, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 662, + 211, + 675 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 211, + 675 + ], + "type": "text", + "content": "3 METHODOLOGY" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "The primary assumption in our work is that surface 3D points are spherical functions, i.e. their representations depend on the viewing angles observing them. This condition contrasts with most 3D point cloud processing pipelines that assume a view-independent representation of 3D point clouds. The full pipeline is illustrated in Figure 2." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 507, + 187 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 507, + 187 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 507, + 187 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 507, + 187 + ], + "type": "image", + "image_path": "f18c527b4598c89667a2cfd0519a8caecd049ba9b97eafab366dccb7b44f9b72.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": "Figure 2: Learning from Voint Clouds. To construct a 3D Voint cloud " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{X}}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": ", a renderer " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": " renders the point cloud " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": " from view-points " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": " and image features are extracted from the generated images via a 2D backbone " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": ". The image features are then unprojected to the Voint cloud by " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathbf{B}}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": " and passed to VointNet " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{F}}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": ". To learn both " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{F}}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": ", a 3D loss on the output points is used with an optional auxiliary 2D loss on " + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 198, + 506, + 251 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 269, + 216, + 280 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 269, + 216, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 216, + 280 + ], + "type": "text", + "content": "3.1 3D VOINT CLOUD" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": "From Point Clouds to Voint Clouds. A 3D point cloud is a compact 3D representation composed of sampled points on the surface of a 3D object or a scene and can be obtained by different sensors like LiDAR (Chen et al., 2017) or as a result of reconstruction (Okutomi & Kanade, 1993). Formally, we define the coordinate function for the surface " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "g_{\\mathrm{s}}(\\mathbf{x}) : \\mathbb{R}^3 \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " as the Sign Distance Function (SDF) in the continuous Euclidean space (Park et al., 2019; Mescheder et al., 2019). The 3D iso-surface is then defined as the set of all points " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " that satisfy the condition " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "g_{\\mathrm{s}}(\\mathbf{x}) = 0" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": ". We define a surface 3D point cloud " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{X} \\in \\mathbb{R}^{N \\times 3}" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " as a set of " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " 3D points, where each point " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " is represented by its 3D coordinates " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "(x_i, y_i, z_i)" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " and satisfies the iso-surface condition as follows: " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = \\{\\mathbf{x}_i \\in \\mathbb{R}^3 \\mid g_{\\mathrm{s}}(\\mathbf{x}_i) = 0\\}_{i=1}^N" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": ". In this work, we aim to fuse the view-dependency to 3D point. Inspired by NeRFs (Mildenhall et al., 2020), we assume that surface points also depend on the view direction from which they are being observed. Specifically, there exists a continuous implicit spherical function " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{g}(\\mathbf{x}, \\mathbf{u}) : \\mathbb{R}^5 \\to \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " that defines the features of each point " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " depending on the view-point direction " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": ". Given a set of " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " view-point directions " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{U} \\in \\mathbb{R}^{M \\times 2}" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": ", a Voint " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{x}} \\in \\mathbb{R}^{M \\times d}" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " is a set of " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " view-dependent features of size " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " for the sphere centered at point " + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 285, + 506, + 456 + ], + "type": "text", + "content": " as follows." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 223, + 457, + 504, + 476 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 457, + 504, + 476 + ], + "spans": [ + { + "bbox": [ + 223, + 457, + 504, + 476 + ], + "type": "interline_equation", + "content": "\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} \\left(\\mathbf {x} _ {i}, \\mathbf {u} _ {j}\\right) \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X} \\right\\} _ {j = 1} ^ {M} \\tag {1}", + "image_path": "945e99d44c6599ab833f3f5b32f7d3834e42335e4aac353a06cd895b30ca2997.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "text", + "content": "The Voint cloud " + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{X}}\\in \\mathbb{R}^{N\\times M\\times d} = \\{\\widehat{\\mathbf{x}}_i\\}_{i = 1}^N" + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "text", + "content": " is the set of all " + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "text", + "content": " Voints " + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{x}}_i" + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "text", + "content": " corresponding to the parent point cloud " + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "text", + "content": ". Note that we typically do not have access to the underlying implicit function " + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{g}" + }, + { + "bbox": [ + 104, + 479, + 504, + 516 + ], + "type": "text", + "content": " and we approximate it with the following three steps." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 520, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": "1-Multi-View Projection. As mentioned earlier, a Voint combines multiple view-features of the same 3D point. These view-features come from a multi-view projection of the points by a point cloud renderer " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{R}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times 3}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " that renders the point cloud " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " from multiple view-points " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " images of size " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "H\\times W\\times 3" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": ". In addition to projecting the point cloud into the image space, " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " defines the index mapping " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{B}\\in \\{0,\\dots,N\\}^{M\\times H\\times W}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " between each pixel to the N points and background it renders. Also, " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " outputs the visibility binary matrix " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{V}\\in \\{0,1\\}^{N\\times M}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " for each point from each view. Since not all points appear in all the views due to pixel discretization, the visibility score " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{i,j}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " defines if the Voint " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_i" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " is visible in the view " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_j" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": ". The matrix " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " is crucial for unprojection, while " + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 104, + 520, + 505, + 631 + ], + "type": "text", + "content": " is needed for defining meaningful operations on Voints." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "type": "text", + "content": "2-Multi-View Feature Extraction. The rendered images are processed by a function " + }, + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "type": "inline_equation", + "content": "\\mathbf{C}:\\mathbb{R}^{M\\times H\\times W\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times d}" + }, + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "type": "text", + "content": " that extracts image features, as shown in Figure 2. If " + }, + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "type": "text", + "content": " is the identity function, all the view-features would typically the RGB value of the corresponding point. However, the " + }, + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 635, + 504, + 692 + ], + "type": "text", + "content": " function can be a 2D network dedicated to the downstream task and can extract useful global and local features about each view." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "text", + "content": "3-Multi-View Unprojection. We propose a module " + }, + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathbf{B}}:\\mathbb{R}^{M\\times H\\times W\\times d}\\to \\mathbb{R}^{N\\times M\\times d}" + }, + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "text", + "content": " that unprojects the 2D features from each pixel to be 3D view-features at the corresponding voint. Using the mapping " + }, + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "text", + "content": " created by the renderer, " + }, + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathbf{B}}" + }, + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "text", + "content": " forms the Voint cloud features " + }, + { + "bbox": [ + 104, + 696, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{X}}" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "text", + "content": "To summarize, the output Voint cloud is described by Eq (1), where " + }, + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\mathbf{g}(\\mathbf{x}_i,\\mathbf{u}_j) = \\Phi_{\\mathbf{B}}\\big(\\mathbf{C}(\\mathbf{R}(\\mathcal{X},\\mathbf{u}_j))\\big)_i" + }, + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "text", + "content": " and the features are only defined for a view " + }, + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "text", + "content": " of Voint " + }, + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_i" + }, + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{i,j} = 1" + }, + { + "bbox": [ + 104, + 82, + 504, + 108 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 119, + 297, + 130 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 119, + 297, + 130 + ], + "spans": [ + { + "bbox": [ + 104, + 119, + 297, + 130 + ], + "type": "text", + "content": "3.2 OPERATIONS ON 3D VOINT CLOUDS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 135, + 506, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 135, + 506, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 135, + 506, + 192 + ], + "type": "text", + "content": "We show in the Appendix that a functional form of max-pooled individual view-features of a set of angles can approximate any function in the spherical coordinates. We provide a theorem that extends PointNet's theorem of point cloud functional composition (Qi et al., 2017a) and its Universal Approximation to spherical functions underlying Voints. Next, we define a set of operations on Voints as building blocks for Voint neural networks (VointNet)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "spans": [ + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "type": "text", + "content": "VointMax. We define VointMax as max-pooling on the visible view-features along the views dimension of the voint " + }, + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "type": "text", + "content": ". For all " + }, + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "type": "inline_equation", + "content": "i \\in 1,2,\\dots,N" + }, + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "type": "inline_equation", + "content": "j \\in 1,2,\\dots,M" + }, + { + "bbox": [ + 104, + 196, + 504, + 219 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 212, + 224, + 503, + 243 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 224, + 503, + 243 + ], + "spans": [ + { + "bbox": [ + 212, + 224, + 503, + 243 + ], + "type": "interline_equation", + "content": "\\operatorname {V o i n t M a x} \\left(\\widehat {\\mathbf {x}} _ {i}\\right) = \\max _ {j} \\widehat {\\mathbf {x}} _ {i, j}, \\quad \\text {s . t .} \\quad \\mathbf {V} _ {i, j} = 1 \\tag {2}", + "image_path": "784333b8f482f568db5c5500ae777dcc290286b688bf21dbabbcde71d7adcadd.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "text", + "content": "VointConv. We define the convolution operation " + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{V}}: \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'}" + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "text", + "content": " as any learnable function that operates on the Voint space with shared weights on all the Voints and has the view-features input size " + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "text", + "content": " and outputs view-features of size " + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "inline_equation", + "content": "d'" + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "text", + "content": " and consists of " + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "inline_equation", + "content": "l_{V}" + }, + { + "bbox": [ + 104, + 251, + 504, + 320 + ], + "type": "text", + "content": " layers. A simple example of this VointConv operation is the shared MLP applied only on the visible view-features. We provide further details for such operations in Section 4.2, which result in different non-exhaustive variants of VointNet." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 332, + 286, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 332, + 286, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 332, + 286, + 344 + ], + "type": "text", + "content": "3.3 LEARNING ON 3D VOINT CLOUDS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 349, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 349, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 504, + 384 + ], + "type": "text", + "content": "VointNet. The goal of the VointNet model is to obtain multi-view point cloud features that can be subsequently used by any point cloud processing pipeline. The VointNet module " + }, + { + "bbox": [ + 104, + 349, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{F}}:\\mathbb{R}^{N\\times M\\times d}\\to \\mathbb{R}^{N\\times d}" + }, + { + "bbox": [ + 104, + 349, + 504, + 384 + ], + "type": "text", + "content": " is defined as follows." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 226, + 388, + 504, + 409 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 388, + 504, + 409 + ], + "spans": [ + { + "bbox": [ + 226, + 388, + 504, + 409 + ], + "type": "interline_equation", + "content": "\\widehat {\\mathbf {F}} (\\widehat {\\boldsymbol {\\chi}}) = h _ {\\mathrm {P}} \\left(\\operatorname {V o i n t M a x} \\left(h _ {\\mathrm {V}} (\\widehat {\\boldsymbol {\\chi}})\\right)\\right), \\tag {3}", + "image_path": "57ded056defe4abc79c205bea398841679037a1f4f13f5c33a6de7082a4d5af5.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{P}}" + }, + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "type": "text", + "content": " is any point convolutional operation (e.g. shared MLP or EdgeConv). VointNet " + }, + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{F}}" + }, + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "type": "text", + "content": " transforms the individual view-features using the learned VointConv " + }, + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{V}}" + }, + { + "bbox": [ + 104, + 415, + 504, + 450 + ], + "type": "text", + "content": " before VointMax is applied on the view-features to obtain point features." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "text", + "content": "VointNet Pipeline for 3D Point Cloud Processing. The full pipeline is described in Figure 2. The loss for this pipeline can be described as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 195, + 483, + 504, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 483, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 195, + 483, + 504, + 517 + ], + "type": "interline_equation", + "content": "\\underset {\\boldsymbol {\\theta} _ {\\mathbf {C}}, \\boldsymbol {\\theta} _ {\\widehat {\\mathbf {F}}}} {\\arg \\min } \\sum_ {i} ^ {N} L \\left(\\widehat {\\mathbf {F}} \\left(\\Phi_ {\\mathbf {B}} \\left(\\mathbf {C} \\left(\\mathbf {R} (\\mathcal {X}, \\mathcal {U})\\right)\\right)\\right) _ {i}, \\mathbf {y} _ {i}\\right), \\tag {4}", + "image_path": "320f81887fad39d0781607ecd44b6781b9baf32b4fd2e2a0551099c39a8e4ec3.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "content": " is a Cross-Entropy (CE) loss defined on all the training points " + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "inline_equation", + "content": "\\{y_i\\}_{i=1}^N" + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "content": " defines the labels of these points. The other components " + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "inline_equation", + "content": "(\\mathbf{R}, \\Phi_{\\mathbf{B}}, \\mathcal{U}, \\mathbf{C})" + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "content": " are all defined before. The weights to be jointly learned are those of the 2D backbone " + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "inline_equation", + "content": "(\\theta_{\\mathbf{C}})" + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "content": " and those of the VointNet " + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "inline_equation", + "content": "(\\theta_{\\widehat{\\mathbf{F}}})" + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "content": " using the same 3D loss. An auxiliary 2D loss on " + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathbf{C}}" + }, + { + "bbox": [ + 104, + 523, + 506, + 602 + ], + "type": "text", + "content": " can be optionally added for supervision at the image level. For classification, the entire object can be treated as a single Voint, and the global features of each view would be the view-features of that Voint. We analyze different setups in detail in Section 6." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 617, + 204, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 204, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 204, + 629 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 638, + 236, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 638, + 236, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 236, + 649 + ], + "type": "text", + "content": "4.1 EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "Datasets. We benchmark VointNet on the challenging and realistic ScanObjectNN dataset for 3D point cloud classification (Uy et al., 2019). The dataset has three variants, includes background and occlusion, and has 15 categories and 2,902 point clouds. For the shape retrieval task, we benchmark on ShapeNet Core55 as a subset of ShapeNet (Chang et al., 2015). The dataset consists of 51,162 3D mesh objects labeled with 55 object classes. We follow the MVTN's setup (Hamdi et al., 2021) in sampling 5,000 points from each mesh object to obtain point cloud. On the other hand, for the task of shape part segmentation," + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 129, + 79, + 479, + 225 + ], + "blocks": [ + { + "bbox": [ + 129, + 79, + 479, + 225 + ], + "lines": [ + { + "bbox": [ + 129, + 79, + 479, + 225 + ], + "spans": [ + { + "bbox": [ + 129, + 79, + 479, + 225 + ], + "type": "table", + "html": "
MethodData TypeClassificationAccuracy
OBJ_BGOBJ_ONLYHardest
PointNet (Qi et al., 2017a)Points73.379.268.0
SpiderCNN (Xu et al., 2018)Points77.179.573.7
PointNet ++ (Qi et al., 2017b)Points82.384.377.9
PointCNN (Li et al., 2018)Points86.185.578.5
DGCNN (Wang et al., 2019c)Points82.886.278.1
SimpleView (Goyal et al., 2021)M-View--79.5
BGA-DGCNN (Uy et al., 2019)Points--79.7
BGA-PN++ (Uy et al., 2019)Points--80.2
MVTN (Hamdi et al., 2021)M-View92.692.382.8
VointNet (ours)Voints93.794.085.4
", + "image_path": "c1e67f51fccdb55e90718f0ed7376ad44684b9a3217a843d35efb1f85679549b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 233, + 506, + 277 + ], + "lines": [ + { + "bbox": [ + 104, + 233, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 233, + 506, + 277 + ], + "type": "text", + "content": "Table 2: 3D Point Cloud Classification on ScanObjectNN. We report the accuracy of VointNet in 3D point cloud classification on three different variants of ScanObjectNN (Uy et al., 2019). Bold denotes the best result in its setup. Note that the Hardest variant includes rotated and translated objects, which highlights the benefits of Voints on challenging scenarios." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 308, + 506, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 354 + ], + "type": "text", + "content": "we test on ShapeNet Parts (Yi et al., 2016), a subset of ShapeNet (Chang et al., 2015) that consists of 16,872 point cloud objects from 16 categories and 50 parts. For occlusion robustness, we follow MVTN (Hamdi et al., 2021) and test on ModelNet40 (Wu et al., 2015), which is composed of 40 classes and 12,311 3D objects." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 359, + 506, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 506, + 405 + ], + "type": "text", + "content": "Metrics. For 3D point cloud classification, we report the overall accuracy, while shape retrieval is evaluated using mean Average Precision (mAP) over test queries (Hamdi et al., 2021). 3D semantic segmentation is evaluated using mean Intersection over Union (mIoU) on points. For part segmentation, we report Instance-averaged mIoU (Ins. mIoU)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 408, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 408, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 408, + 506, + 477 + ], + "type": "text", + "content": "Baselines. We include PointNet (Qi et al., 2017a), PointNet++ (Qi et al., 2017b), DGCNN (Wang et al., 2019c), as baselines that use point clouds. We also compare against multi-view classification approaches like MVCNN (Su et al., 2015), SimpleView (Goyal et al., 2021), and MVTN (Hamdi et al., 2021) as baselines for classification and retrieval and adopt some of the multi-view segmentation baselines (e.g. Label Fusion (Wang et al., 2019a) and Mean Fusion (Kundu et al., 2020)) for part segmentation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 502, + 231, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 231, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 231, + 513 + ], + "type": "text", + "content": "4.2 VOINTNET VARIANTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 524, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 524, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 524, + 506, + 548 + ], + "type": "text", + "content": "VointNet in Eq (3) relies on the VointConv operation " + }, + { + "bbox": [ + 104, + 524, + 506, + 548 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{V}}" + }, + { + "bbox": [ + 104, + 524, + 506, + 548 + ], + "type": "text", + "content": " as the basic building block. Here, we briefly describe three examples of " + }, + { + "bbox": [ + 104, + 524, + 506, + 548 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{V}}" + }, + { + "bbox": [ + 104, + 524, + 506, + 548 + ], + "type": "text", + "content": " operations VointNet uses." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "content": "Shared Multi-Layer Perceptron (MLP). It is the most basic VointConv formulation. For a layer " + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "content": ", the features of Voint " + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "content": " at view " + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "content": " are updated to layer " + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "inline_equation", + "content": "l + 1" + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "content": " as: " + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_{i,j}^{l + 1} = \\rho (\\mathbf{h}_{i,j}^{l}\\mathcal{W}_{\\rho})" + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "content": " is the shared MLP with weights " + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\rho}" + }, + { + "bbox": [ + 104, + 552, + 506, + 624 + ], + "type": "text", + "content": " followed by normalization and a nonlinear function (e.g. ReLU). This operation is applied on all Voints independently and only involves the visible views-features for each Voint. This formulation extends the shared MLP formulation for PointNet (Qi et al., 2017a) to work on Voints' view-features." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 626, + 506, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 506, + 694 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 506, + 694 + ], + "type": "text", + "content": "Graph Convolution (GCN). We define a fully connected graph for each Voint by creating a virtual center node connected to all the view-features to aggregate their information (similar to \"cls\" token in ViT (Dosovitskiy et al., 2021)). Then, the graph convolution can be defined as the shared MLP (as described above) but on the edge features between all view features, followed by a max pool on the graph neighbors. An additional shared MLP is used before the final output." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": "Graph Attention (GAT). A graph attention operation can be defined just like the GCN operation above but with learned attention weights on the graph neighbor's features before averaging them. A shared MLP computes these weights." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 80, + 500, + 128 + ], + "blocks": [ + { + "bbox": [ + 109, + 80, + 500, + 128 + ], + "lines": [ + { + "bbox": [ + 109, + 80, + 500, + 128 + ], + "spans": [ + { + "bbox": [ + 109, + 80, + 500, + 128 + ], + "type": "table", + "html": "
ResultsMVCNN (Su et al., 2015)RotNet (Kanezaki et al., 2018)ViewGCN (Wei et al., 2020)MVTN (Hamdi et al., 2021)VointNet (ours)
ShapeNet73.577.278.482.983.3
Retr. mAP
", + "image_path": "43cae6bd71c73b896b64451d5fa0cf6f9e718ca55b6a5a098f3c506b4d7bab28.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 161, + 172, + 447, + 273 + ], + "blocks": [ + { + "bbox": [ + 104, + 138, + 504, + 160 + ], + "lines": [ + { + "bbox": [ + 104, + 138, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 138, + 504, + 160 + ], + "type": "text", + "content": "Table 3: 3D Shape Retrieval. We report 3D shape retrieval mAP on ShapeNet Core55 (Chang et al., 2015; Sfikas et al., 2017). VointNet achieves state-of-the-art results on this benchmark." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 161, + 172, + 447, + 273 + ], + "lines": [ + { + "bbox": [ + 161, + 172, + 447, + 273 + ], + "spans": [ + { + "bbox": [ + 161, + 172, + 447, + 273 + ], + "type": "table", + "html": "
MethodData TypePart Segmentation
(Unrotated)(Rotated)
PointNet (Qi et al., 2017a)Points80.136.6 ±0.2
DGCNN (Wang et al., 2019c)Points80.137.1 ±0.2
CurveNet (Xiang et al., 2021)Points84.932.3 ±0.0
Label Fuse (Wang et al., 2019a)M-View80.061.4 ±0.2
Mean Fuse (Kundu et al., 2020)M-View77.562.0 ±0.2
VointNet (ours)Voints81.262.4 ±0.2
", + "image_path": "445f205d21dada46bd83cd6fc5ea7c9095b7ba54b7050cce4f43195cf2fb11f9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 281, + 504, + 332 + ], + "lines": [ + { + "bbox": [ + 104, + 281, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 504, + 332 + ], + "type": "text", + "content": "Table 4: Robust 3D Part Segmentation on ShapeNet Parts. We compare the Inst. mIoU of VointNet against other methods in 3D segmentation on ShapeNet Parts (Yi et al., 2016). At test time, we randomly rotate the objects and report the results over ten runs. Note how VointNet's performance largely exceeds the point baselines in the realistic rotated scenarios, while exceeding multi-view baselines on the unrotated benchmark. All the results are reproduced in our setup." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 354, + 254, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 254, + 366 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 254, + 366 + ], + "type": "text", + "content": "4.3 IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "text", + "content": "Rendering and Unprojection. We choose the differentiable point cloud renderer " + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "text", + "content": " from Pytorch3D (Ravi et al., 2020) in our pipeline for its speed and compatibility with Pytorch libraries (Paszke et al., 2017). We render point clouds on multi-view images with size " + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "inline_equation", + "content": "224 \\times 224 \\times 3" + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "text", + "content": ". We color the points by their normals' values or keep them white if the normals are not available. Following a similar procedure to (Wei et al., 2020; Hamdi et al., 2021), the view-points setup is randomized during training (using " + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "inline_equation", + "content": "M = 8" + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "text", + "content": " views) and fixed to spherical views in testing (using " + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "inline_equation", + "content": "M = 12" + }, + { + "bbox": [ + 104, + 372, + 504, + 451 + ], + "type": "text", + "content": " views)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "text", + "content": "Architectures. For the 2D backbone " + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "text", + "content": ", we use ViT-B (Dosovitskiy et al., 2021) (with pretrained weights from TIMM library (Wightman, 2019)) for classification and DeepLabV3 (Chen et al., 2018) for segmentation. We use the 3D CE loss on the 3D point cloud output and the 2D CE loss when the loss is defined on the pixels. The feature dimension of the VointNet architectures is " + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "inline_equation", + "content": "d = 64" + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "text", + "content": ", and the depth is " + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "inline_equation", + "content": "l_{V} = 4" + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "text", + "content": " layers in " + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "inline_equation", + "content": "h_V" + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "text", + "content": ". The main results are based on VointNet (MLP), unless otherwise specified as in Section 6, where we study in details the effect of VointConv " + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "inline_equation", + "content": "h_\\mathrm{V}" + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 454, + 504, + 533 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 537, + 506, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 506, + 616 + ], + "type": "text", + "content": "Training Setup. We train our pipeline in two stages, where we start by training the 2D backbone on the 2D projected labels of the points, then train the entire pipeline end-to-end while focusing the training on the VointNet part. We use the AdamW optimizer (Loshchilov & Hutter, 2017) with an initial learning rate of 0.0005 and a step learning rate schedule of " + }, + { + "bbox": [ + 104, + 537, + 506, + 616 + ], + "type": "inline_equation", + "content": "33.3\\%" + }, + { + "bbox": [ + 104, + 537, + 506, + 616 + ], + "type": "text", + "content": " every 12 epochs for 40 epochs. The pipeline is trained with one NVIDIA Tesla V100 GPU. We do not use any data augmentation. More details about the training setup (loss and rendering), VointNet, and the 2D backbone architectures can be found in the Appendix." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 632, + 175, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 632, + 175, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 175, + 645 + ], + "type": "text", + "content": "5 RESULTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": "The main test results of our Voint formulations are summarized in Tables 2,3, 4, and 5. We achieve state-of-the-art performance in the task of 3D classification, retrieval, and robust 3D part segmentation. More importantly, under the realistic rotated setups of ScanObjectNN and ShapeNet Parts, we improve over " + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "inline_equation", + "content": "7.2\\%" + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": " Acc. and " + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": " mIoU respectively compared to point baselines Qi et al. (2017a); Wang et al. (2019c). Following common practice Hamdi et al. (2021), we report the best results out of four runs in benchmark tables, but detailed results are provided in the Appendix." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 129, + 78, + 242, + 208 + ], + "blocks": [ + { + "bbox": [ + 129, + 78, + 242, + 208 + ], + "lines": [ + { + "bbox": [ + 129, + 78, + 242, + 208 + ], + "spans": [ + { + "bbox": [ + 129, + 78, + 242, + 208 + ], + "type": "image", + "image_path": "123fba4fd773417d0a4163ecd2fb10d4b2176e02cc5cd0cc864f083a9c86b7cd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "text", + "content": "Figure 3: Qualitative Comparison for Part Segmentation. We compare our VointNet 3D segmentation predictions to Mean Fuse (Kundu et al., 2020) that is using the same trained 2D backbone. Note how VointNet distinguishes detailed parts (e.g. the car window frame)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 248, + 77, + 355, + 208 + ], + "blocks": [ + { + "bbox": [ + 248, + 77, + 355, + 208 + ], + "lines": [ + { + "bbox": [ + 248, + 77, + 355, + 208 + ], + "spans": [ + { + "bbox": [ + 248, + 77, + 355, + 208 + ], + "type": "image", + "image_path": "cd772a1ce730ead1824f2b7f7a275f785539b7534c5bcf3091b1539db62ac8d3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 358, + 78, + 482, + 206 + ], + "blocks": [ + { + "bbox": [ + 358, + 78, + 482, + 206 + ], + "lines": [ + { + "bbox": [ + 358, + 78, + 482, + 206 + ], + "spans": [ + { + "bbox": [ + 358, + 78, + 482, + 206 + ], + "type": "image", + "image_path": "62f87428c6e762834173ef8afc04dd088d57d8d321339eb21332f097f9ee4c51.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 276, + 257, + 287 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 276, + 257, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 276, + 257, + 287 + ], + "type": "text", + "content": "5.1 3D SHAPE CLASSIFICATION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 294, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 294, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 504, + 384 + ], + "type": "text", + "content": "Table 2 reports the classification accuracy on the 3D point cloud classification task on ScanObjectNN Uy et al. (2019). It benchmarks VointNet against other recent and strong baselines Hamdi et al. (2021); Goyal et al. (2021); Hamdi et al. (2021). VointNet demonstrates state-of-the-art results on all the variants, including the challenging Hardest (PB_T50_RS) variant that includes challenging scenarios of rotated and translated objects. The increase in performance " + }, + { + "bbox": [ + 104, + 294, + 504, + 384 + ], + "type": "inline_equation", + "content": "(+2.6\\%)" + }, + { + "bbox": [ + 104, + 294, + 504, + 384 + ], + "type": "text", + "content": " is significant on this variant, which highlights the benefits of Voints on challenging scenarios, with further affirming results in Section 5.4. We follow exactly the same procedure as in MVTN Hamdi et al. (2021)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 402, + 235, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 235, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 235, + 414 + ], + "type": "text", + "content": "5.2 3D SHAPE RETRIEVAL" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 424, + 506, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 424, + 506, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 424, + 506, + 459 + ], + "type": "text", + "content": "Table 3 benchmarks the 3D shape retrieval mAP on ShapeNet Core55 Chang et al. (2015). VointNet achieves state-of-the-art performance on ShapeNet Core55. Baseline results are reported from Hamdi et al. (2021)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 477, + 288, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 477, + 288, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 288, + 488 + ], + "type": "text", + "content": "5.3 ROBUST 3D PART SEGMENTATION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 495, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 628 + ], + "type": "text", + "content": "Table 4 reports the Instance-averaged segmentation mIoU of VointNet compared with other methods on ShapeNet Parts Yi et al. (2016). Two variants of the benchmark are reported: unrotated normalized setup, and the rotated realistic setup. For the rotated setup, we follow the previous 3D literature Liu et al. (2019a); Hamdi et al. (2021; 2020) by testing the robustness of trained models by perturbing the shapes in ShapeNet Parts with random rotations at test time (ten runs) and report the averages in Table 4. Note VointNet's improvement over Mean Fuse Kundu et al. (2020) and Label Fuse Wang et al. (2019a) on unrotated setup despite that both baselines use the same trained 2D backbone as VointNet. Also, for rotated setups, point methods don't work as well. All the results in Table 4 are reproduced by our code in the same setup (see the code attached in supplementary material). Figure 3 shows qualitative 3D segmentation results for VointNet and Mean Fuse Kundu et al. (2020) as compared to the ground truth." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 647, + 246, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 246, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 246, + 658 + ], + "type": "text", + "content": "5.4 OCCLUSION ROBUSTNESS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "One of the aspects of the robustness of 3D classification models that have been recently studied is their robustness to occlusion, as detailed in MVTN Hamdi et al. (2021). These simulated occlusions are introduced at test time, and the average test accuracy is reported on each cropping ratio. We benchmark our VointNet against recent baselines in Table 5. PointNet Qi et al. (2017a) and DGCNN Wang et al. (2019c) are used as point-based baselines, and MVTN Hamdi et al. (2021) as a multi-view baseline." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 312, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 312, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 312, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 127, + 79, + 485, + 173 + ], + "blocks": [ + { + "bbox": [ + 127, + 79, + 485, + 173 + ], + "lines": [ + { + "bbox": [ + 127, + 79, + 485, + 173 + ], + "spans": [ + { + "bbox": [ + 127, + 79, + 485, + 173 + ], + "type": "table", + "html": "
MethodData TypeOcclusion Ratio
00.10.20.30.5
PointNet (Qi et al., 2017a)Points89.188.286.181.653.5
DGCNN (Wang et al., 2019c)Points92.177.174.571.230.1
PCT (Guo et al., 2021)Points93.392.691.188.261.9
MVTN (Hamdi et al., 2021)M-View93.890.389.988.367.1
VointNet (ours)Voints92.891.691.289.166.1
", + "image_path": "30dc8227dc10160b94dd67f71c770196730058327863fce4635d5d874f0483e7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 173, + 228, + 437, + 353 + ], + "blocks": [ + { + "bbox": [ + 173, + 228, + 437, + 353 + ], + "lines": [ + { + "bbox": [ + 173, + 228, + 437, + 353 + ], + "spans": [ + { + "bbox": [ + 173, + 228, + 437, + 353 + ], + "type": "image", + "image_path": "ca2196b969d0352d172fa93d3fa3d0cc81b1a8cce986db6e983565528fffa072.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 361, + 504, + 403 + ], + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 403 + ], + "type": "text", + "content": "Figure 4: Effect of the Number of Views. We plot Ins. mIoU of 3D segmentation vs. the number of views " + }, + { + "bbox": [ + 104, + 361, + 504, + 403 + ], + "type": "inline_equation", + "content": "(M)" + }, + { + "bbox": [ + 104, + 361, + 504, + 403 + ], + "type": "text", + "content": " used in inference on ShapeNet Parts. Note VointNet's consistent improvement over Mean Fuse (Kundu et al., 2020) and Label Fuse (Wang et al., 2019a). Both baselines use the same trained 2D backbone as VointNet and are tested on the same unrotated setup." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 424, + 260, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 424, + 260, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 260, + 436 + ], + "type": "text", + "content": "6 ANALYSIS AND INSIGHTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 445, + 506, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 506, + 511 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 506, + 511 + ], + "type": "text", + "content": "Number of Views. We study the effect of the number of views " + }, + { + "bbox": [ + 104, + 445, + 506, + 511 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 445, + 506, + 511 + ], + "type": "text", + "content": " on the performance of 3D part segmentation using multiple views. We compare Mean Fuse (Kundu et al., 2020) and Label Fuse (Wang et al., 2019a) to our VointNet when all of them have the same trained 2D backbone. The views are randomly picked, and the experiments are repeated four times. Ins. mIoU with confidence intervals are shown in Figure 4. We observe a consistent improvement with VointNet over the other two baselines across different numbers of views." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 188, + 521, + 422, + 619 + ], + "blocks": [ + { + "bbox": [ + 104, + 181, + 504, + 212 + ], + "lines": [ + { + "bbox": [ + 104, + 181, + 504, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 504, + 212 + ], + "type": "text", + "content": "Table 5: Occlusion Robustness for 3D Classification. We report the test accuracy on ModelNet40 (Wu et al., 2015) for different occlusion ratios of the data to measure occlusion robustness of different 3D methods." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 188, + 521, + 422, + 619 + ], + "lines": [ + { + "bbox": [ + 188, + 521, + 422, + 619 + ], + "spans": [ + { + "bbox": [ + 188, + 521, + 422, + 619 + ], + "type": "table", + "html": "
2D BackboneVointConvResults
FCNDeepLabV3MLPGCNGATInst. mIoU
---78.8 ± 0.2
---77.6 ± 0.2
---77.1 ± 0.2
---80.6 ± 0.1
---77.2 ± 0.4
---80.4 ± 0.2
", + "image_path": "2a22c063d89175dd92ab9d6f9cd53b5f9aeda1a9455dbf46b83ab612d4b262d9.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "type": "text", + "content": "Table 6: Ablation Study for 3D Segmentation. We ablate different components of VointNet (2D backbone and VointConv choice) and report Ins. mIoU performance on ShapeNet Parts." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "Choice of Backbones. We ablate the choice of the 2D backbone and the VointConv operation used in VointNet and report the segmentation Ins. mIoU results in Table 6. Note how the 2D backbone greatly affects performance, while the VointConv operation type does not. This ablation highlights the importance of the 2D backbone in VointNet pipeline and motivates the use of the simplest variant of VointNet (MLP). We provide a detailed study of more factors as well as compute and memory costs in the Appendix." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 340, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 340, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 340, + 94 + ], + "type": "text", + "content": "7 LIMITATIONS AND ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 168 + ], + "type": "text", + "content": "One aspect limiting the performance of Voints is how well-trained the 2D backbone is for the downstream 3D task. In most cases, the 2D backbone must be pretrained with enough data to learn meaningful information for VointNet. Another aspect that limits the capability of the Voint cloud is how to properly select the view-points for segmentation. Addressing these limitations is an important direction for future work. Also, extending Voint learning on more 3D tasks like 3D scene segmentation and 3D object detection is left for future work." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 173, + 506, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 173, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 506, + 218 + ], + "type": "text", + "content": "Acknowledgments. This work was supported by the King Abdullah University of Science and Technology (KAUST) Office of Sponsored Research through the Visual Computing Center (VCC) funding and the SDAIA-KAUST Center of Excellence in Data Science and Artificial Intelligence (SDAIA-KAUST AI)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 233, + 178, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 233, + 178, + 245 + ], + "spans": [ + { + "bbox": [ + 106, + 233, + 178, + 245 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 251, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 251, + 507, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 251, + 507, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 251, + 507, + 285 + ], + "type": "text", + "content": "Gary Bradski and Stephen Grossberg. Recognition of 3-d objects from multiple 2-d views by a self-organizing neural architecture. In *From Statistics to Neural Networks*, pp. 349–375. Springer, 1994." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 292, + 507, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 292, + 507, + 348 + ], + "spans": [ + { + "bbox": [ + 106, + 292, + 507, + 348 + ], + "type": "text", + "content": "Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], Stanford University — Princeton University — Toyota Technological Institute at Chicago, 2015." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 354, + 506, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 506, + 389 + ], + "type": "text", + "content": "Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In Proceedings of the European conference on computer vision (ECCV), pp. 801-818, 2018." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 395, + 506, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 395, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 395, + 506, + 430 + ], + "type": "text", + "content": "Xiaozhi Chen, Huimin Ma, Ji Wan, Bo Li, and Tian Xia. Multi-view 3d object detection network for autonomous driving. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 1907-1915, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 436, + 506, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 506, + 470 + ], + "type": "text", + "content": "Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3075-3084, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 476, + 506, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 476, + 506, + 500 + ], + "spans": [ + { + "bbox": [ + 105, + 476, + 506, + 500 + ], + "type": "text", + "content": "Taco Cohen and Max Welling. Group equivariant convolutional networks. In International conference on machine learning, pp. 2990-2999, 2016." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 506, + 506, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 506, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 506, + 540 + ], + "type": "text", + "content": "Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multi-view prediction for 3d semantic scene segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 452-468, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 546, + 506, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 546, + 506, + 591 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 506, + 591 + ], + "type": "text", + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 597, + 504, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 504, + 632 + ], + "type": "text", + "content": "Carlos Esteves, Yinshuang Xu, Christine Allen-Blanchette, and Kostas Daniilidis. Equivariant multi-view networks. In Proceedings of the IEEE International Conference on Computer Vision, pp. 1568-1577, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 639, + 504, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 504, + 673 + ], + "type": "text", + "content": "Yutong Feng, Yifan Feng, Haoxuan You, Xibin Zhao, and Yue Gao. Meshnet: Mesh neural network for 3d shape representation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 8279-8286, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "type": "text", + "content": "Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, and Jia Deng. Revisiting point cloud shape classification with a simple and effective baseline. In ICML, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "text", + "content": "Meng-Hao Guo, Jun-Xiong Cai, Zheng-Ning Liu, Tai-Jiang Mu, Ralph R Martin, and Shi-Min Hu. Pct: Point cloud transformer. Computational Visual Media, 7(2):187-199, 2021." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "text", + "content": "Abdullah Hamdi, Sara Rojas, Ali Thabet, and Bernard Ghanem. Advpc: Transferable adversarial perturbations on 3d point clouds. In Computer Vision - ECCV 2020, pp. 241-257, Cham, 2020. Springer International Publishing. ISBN 978-3-030-58610-2." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "type": "text", + "content": "Abdullah Hamdi, Silvio Giancola, and Bernard Ghanem. Mvtn: Multi-view transformation network for 3d shape recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 1-11, October 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 209 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 209 + ], + "type": "text", + "content": "Zhizhong Han, Xiyang Wang, Yu-Shen Liu, and Matthias Zwicker. Multi-angle point cloud-vae: Unsupervised feature learning for 3d point clouds from multiple angles by joint self-reconstruction and half-to-half prediction. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 10441-10450. IEEE, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 216, + 504, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 216, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 107, + 216, + 504, + 240 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. CoRR, abs/1512.03385, 2015. URL http://arxiv.org/abs/1512.03385." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 245, + 505, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 505, + 281 + ], + "type": "text", + "content": "Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4421-4430, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 287, + 506, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 287, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 506, + 322 + ], + "type": "text", + "content": "Wenbo Hu, Hengshuang Zhao, Li Jiang, Jiaya Jia, and Tien-Tsin Wong. Bidirectional projection network for cross dimension scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14373-14382, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 328, + 506, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 328, + 506, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 506, + 362 + ], + "type": "text", + "content": "Maximilian Jaritz, Jiayuan Gu, and Hao Su. Multi-view pointnet for 3d scene understanding. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 0-0, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 369, + 505, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 505, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 404 + ], + "type": "text", + "content": "Evangelos Kalogerakis, Melinos Averkiou, Subhransu Maji, and Siddhartha Chaudhuri. 3d shape segmentation with projective convolutional networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3779-3788, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 410, + 505, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 505, + 455 + ], + "type": "text", + "content": "Asako Kanezaki, Yasuyuki Matsushita, and Yoshifumi Nishida. Rotationnet: Joint object categorization and pose estimation using multiviews from unsupervised viewpoints. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5010-5019, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 462, + 506, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 462, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 506, + 507 + ], + "type": "text", + "content": "Abhijit Kundu, Xiaoqi Yin, Alireza Fathi, David Ross, Brian Brewington, Thomas Funkhouser, and Caroline Pantofaru. Virtual multi-view fusion for 3d semantic segmentation. In European Conference on Computer Vision (ECCV), pp. 518-535. Springer, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 514, + 505, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 505, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 505, + 549 + ], + "type": "text", + "content": "Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. Pointcnn: Convolution on x-transformed points. In Advances in neural information processing systems (NIPS), pp. 820-830, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 555, + 505, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 505, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 590 + ], + "type": "text", + "content": "Yongcheng Liu, Bin Fan, Shiming Xiang, and Chunhong Pan. Relation-shape convolutional neural network for point cloud analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8895-8904, 2019a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 597, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 504, + 620 + ], + "type": "text", + "content": "Ze Liu, Zheng Zhang, Yue Cao, Han Hu, and Xin Tong. Group-free 3d object detection via transformers. arXiv preprint arXiv:2104.00678, 2021a." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 627, + 506, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 506, + 662 + ], + "type": "text", + "content": "Zhengzhe Liu, Xiaojuan Qi, and Chi-Wing Fu. 3d-to-2d distillation for indoor scene parsing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4464-4474, 2021b." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 668, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 504, + 692 + ], + "type": "text", + "content": "Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Point-voxel cnn for efficient 3d deep learning. In Advances in Neural Information Processing Systems, pp. 965-975, 2019b." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "text", + "content": "Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3431-3440, 2015." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 105 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 112, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 112, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 505, + 148 + ], + "type": "text", + "content": "Yecheng Lyu, Xinming Huang, and Ziming Zhang. Learning to segment 3d point clouds in 2d image space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12255-12264, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 156, + 505, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 201 + ], + "type": "text", + "content": "Ricardo Martin-Brualla, Noha Radwan, Mehdi SM Sajjadi, Jonathan T Barron, Alexey Dosovitskiy, and Daniel Duckworth. Nerf in the wild: Neural radiance fields for unconstrained photo collections. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7210-7219, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 209, + 505, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 209, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 505, + 244 + ], + "type": "text", + "content": "Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 922-928. IEEE, 2015." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 251, + 505, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 505, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 505, + 286 + ], + "type": "text", + "content": "Leonard McMillan and Gary Bishop. Plenoptic modeling: An image-based rendering system. In Proceedings of the 22nd annual conference on Computer graphics and interactive techniques, pp. 39-46, 1995." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 293, + 505, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 505, + 338 + ], + "type": "text", + "content": "Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4460-4470, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 347, + 505, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 347, + 505, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 347, + 505, + 382 + ], + "type": "text", + "content": "Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pp. 405-421. Springer, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 390, + 505, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 505, + 414 + ], + "type": "text", + "content": "Masatoshi Okutomi and Takeo Kanade. A multiple-baseline stereo. IEEE Transactions on pattern analysis and machine intelligence, 15(4):353-363, 1993." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 421, + 505, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 421, + 505, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 421, + 505, + 467 + ], + "type": "text", + "content": "Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 165-174, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 475, + 505, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 505, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 505, + 509 + ], + "type": "text", + "content": "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 517, + 505, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 517, + 505, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 505, + 552 + ], + "type": "text", + "content": "Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10318-10327, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 559, + 505, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 559, + 505, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 505, + 594 + ], + "type": "text", + "content": "Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 652-660, 2017a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 601, + 505, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 505, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 505, + 636 + ], + "type": "text", + "content": "Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In Advances in neural information processing systems (NIPS), pp. 5099-5108, 2017b." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 644, + 505, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 644, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 505, + 678 + ], + "type": "text", + "content": "Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 686, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 686, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 505, + 732 + ], + "type": "text", + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael S. Bernstein, Alexander C. Berg, and Fei-Fei Li. Imagenet large scale visual recognition challenge. CoRR, abs/1409.0575, 2014. URL http://arxiv.org/abs/1409.0575." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "type": "text", + "content": "Konstantinos Sfikas, Theoharis Theoharis, and Ioannis Pratikakis. Exploiting the PANorama Representation for Convolutional Neural Network Classification and Retrieval. In Ioannis Pratikakis, Florent Dupont, and Maks Ovsjanikov (eds.), Eurographics Workshop on 3D Object Retrieval, pp. 1-7. The Eurographics Association, 2017. ISBN 978-3-03868-030-7. doi: 10.2312/3dor.20171045." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 143, + 505, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 143, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 143, + 505, + 178 + ], + "type": "text", + "content": "Hang Su, Subhransu Maji, Evangelos Kalogerakis, and Erik Learned-Miller. Multi-view convolutional neural networks for 3d shape recognition. In Proceedings of the IEEE international conference on computer vision, pp. 945-953, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 182, + 506, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 506, + 228 + ], + "type": "text", + "content": "Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6411–6420, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 233, + 506, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 506, + 269 + ], + "type": "text", + "content": "Mikaela Angelina Uy, Quang-Hieu Pham, Binh-Son Hua, Duc Thanh Nguyen, and Sai-Kit Yeung. Revisiting point cloud classification: A new benchmark dataset and classification model on real-world data. In International Conference on Computer Vision (ICCV), 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 273, + 506, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 273, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 506, + 308 + ], + "type": "text", + "content": "Brian H Wang, Wei-Lun Chao, Yan Wang, Bharath Hariharan, Kilian Q Weinberger, and Mark Campbell. Ldls: 3-d object segmentation through label diffusion from 2-d images. IEEE Robotics and Automation Letters, 4(3):2902-2909, 2019a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 312, + 505, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 505, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 505, + 358 + ], + "type": "text", + "content": "He Wang, Srinath Sridhar, Jingwei Huang, Julien Valentin, Shuran Song, and Leonidas J Guibas. Normalized object coordinate space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2642-2651, 2019b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 363, + 506, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 363, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 506, + 399 + ], + "type": "text", + "content": "Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E. Sarma, Michael M. Bronstein, and Justin M. Solomon. Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics (TOG), 2019c." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 403, + 506, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 403, + 506, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 506, + 438 + ], + "type": "text", + "content": "Xin Wei, Ruixuan Yu, and Jian Sun. View-gen: View-based graph convolutional network for 3d shape analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1850-1859, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 442, + 506, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 442, + 506, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 442, + 506, + 467 + ], + "type": "text", + "content": "Ross Wightman. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 471, + 505, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 505, + 506 + ], + "type": "text", + "content": "Zhirong Wu, S. Song, A. Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and J. Xiao. 3d shapenets: A deep representation for volumetric shapes. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1912-1920, 2015." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 510, + 505, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 505, + 546 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 505, + 546 + ], + "type": "text", + "content": "Tiang Xiang, Chaoyi Zhang, Yang Song, Jianhui Yu, and Weidong Cai. Walk in the cloud: Learning curves for point clouds shape analysis. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 915-924, October 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 550, + 505, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 550, + 505, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 505, + 586 + ], + "type": "text", + "content": "Yifan Xu, Tianqi Fan, Mingye Xu, Long Zeng, and Yu Qiao. SpiderCNN: Deep learning on point sets with parameterized convolutional filters. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 87-102, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 590, + 505, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 590, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 590, + 505, + 635 + ], + "type": "text", + "content": "Li Yi, Vladimir G Kim, Duygu Ceylan, I-Chao Shen, Mengyan Yan, Hao Su, Cewu Lu, Qixing Huang, Alla Sheffer, and Leonidas Guibas. A scalable active framework for region annotation in 3d shape collections. ACM Transactions on Graphics (ToG), 35(6):1-12, 2016." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 640, + 505, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 640, + 505, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 640, + 505, + 676 + ], + "type": "text", + "content": "Haoxuan You, Yifan Feng, Rongrong Ji, and Yue Gao. Pvnet: A joint convolutional network of point cloud and multi-view for 3d shape recognition. In Proceedings of the 26th ACM international conference on Multimedia, pp. 1310-1318, 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 680, + 505, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 680, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 505, + 704 + ], + "type": "text", + "content": "Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In ICCV, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "type": "text", + "content": "Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip Torr, and Vladlen Koltun. Point transformer. arXiv preprint arXiv:2012.09164, 2020." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 164, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 164, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 164, + 94 + ], + "type": "text", + "content": "APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 273, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 273, + 119 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 273, + 119 + ], + "type": "text", + "content": "A DETAILED FORMULATIONS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 130, + 204, + 142 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 130, + 204, + 142 + ], + "spans": [ + { + "bbox": [ + 105, + 130, + 204, + 142 + ], + "type": "text", + "content": "A.1 TOY EXAMPLE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "content": "In the toy 2D example in Figure 5, the center point (represented by a circular function " + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "content": ") is viewed from various view-points " + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "inline_equation", + "content": "u_{j}" + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "content": " that are agnostic to the underlying function itself. In many applications, it is desired to have a single feature representing each point in the point cloud. When the projected values of " + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "content": " from these " + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "inline_equation", + "content": "u_{j}" + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "content": " view-points are aggregated together (e.g. by max/mean pool) to get a constant representation of that point, the underlying properties of " + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "content": " are lost. We build our Voint representation to keep the structure of " + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "content": " intact by taking the full set " + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "inline_equation", + "content": "\\{(u_{j},g(u_{j}))\\}_{j = 1}^{5}" + }, + { + "bbox": [ + 104, + 151, + 506, + 231 + ], + "type": "text", + "content": " in learning the aggregations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 241, + 293, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 241, + 293, + 252 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 293, + 252 + ], + "type": "text", + "content": "A.2 FUNCTIONAL FORM OF VOINTNET" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "text", + "content": "We can look at a simplified setup to decide on the functional form of the deep neural network that operates in the Voint space. In this simplified setup, we consider a 2D example (instead of 3D Voints) and assume that a circular function describes a point at the center. The center point will assume its value according to the angle " + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "text", + "content": ". The following Theorem 1 proves that for any continuous set function " + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "text", + "content": " that operates on any set of " + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "text", + "content": " angles " + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "inline_equation", + "content": "\\{u_1, \\dots, u_M\\}" + }, + { + "bbox": [ + 104, + 261, + 504, + 350 + ], + "type": "text", + "content": ", there exists an equivalent composite function consisting of transformed max-pooled individual view-features. This composition is the functional form we describe later for Voint neural networks" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": "Theorem 1 Suppose " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "f: \\mathcal{S} \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": " is a continuous set function operating on an angles set " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{S} = \\{u \\mid u \\in [0,2\\pi]\\}" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": ". The continuity of " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": " is based on the Hausdorff distance " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "d_H" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": " between two sets of angles, where " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "d_H(\\mathcal{S},\\mathcal{S}') = \\max_{u_i' \\in \\mathcal{S}'} \\min_{u_i \\in \\mathcal{S}} d_A(u_i,u_i')" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "d_A" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": " is the smallest positive angle between two angles " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "d_A(u,u') = \\min(|u - u'|, 2\\pi - |u - u'|)" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": ". Then, for every " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{U} = \\{u_1,\\dots,u_M\\} \\subset \\mathcal{S}" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": ", there exists a continuous function " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{h}" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": " and a symmetric function " + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "inline_equation", + "content": "g(u_1,\\dots,u_M) = \\gamma \\circ \\mathrm{MAX}" + }, + { + "bbox": [ + 104, + 357, + 506, + 426 + ], + "type": "text", + "content": ", such that:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 205, + 428, + 504, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 428, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 205, + 428, + 504, + 449 + ], + "type": "interline_equation", + "content": "\\left| f (\\mathcal {U}) - \\gamma \\left(\\operatorname {M A X} \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right)\\right) \\right| < \\epsilon , \\tag {5}", + "image_path": "b941733c57a2bcc1328225d416acdabc2e62b77035d871fa3599ba6c46cf5ce3.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 450, + 477, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 477, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 477, + 463 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 450, + 477, + 463 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 450, + 477, + 463 + ], + "type": "text", + "content": " is a continuous function, and MAX is an element-wise vector max operator." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": "Proof. By the continuity of " + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": ", we take " + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\delta_{\\epsilon}" + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "inline_equation", + "content": "|f(\\mathcal{U}) - f(\\mathcal{U}')| < \\epsilon" + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\mathcal{U}, \\mathcal{U}' \\subset \\mathcal{S}" + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "inline_equation", + "content": "d_H(\\mathcal{U}, \\mathcal{U}') < \\delta_{\\epsilon}" + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": ". Define " + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "inline_equation", + "content": "K = [2\\pi/\\delta_{\\epsilon}]" + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": ", which split " + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "inline_equation", + "content": "[0, 2\\pi]" + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 469, + 506, + 503 + ], + "type": "text", + "content": " intervals evenly and define an auxiliary function that maps an angle to the beginning of the interval it lies in:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 273, + 505, + 336, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 505, + 336, + 529 + ], + "spans": [ + { + "bbox": [ + 273, + 505, + 336, + 529 + ], + "type": "interline_equation", + "content": "\\sigma (u) = \\frac {\\lfloor K u \\rfloor}{K}", + "image_path": "c279598317bdca2567bab477cfce802003255f2c1b4bd321f6305d0c4cc7713f.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 530, + 227, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 530, + 227, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 227, + 544 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 105, + 530, + 227, + 544 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{U}} = \\sigma(u): u \\in \\mathcal{U}" + }, + { + "bbox": [ + 105, + 530, + 227, + 544 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 265, + 543, + 504, + 557 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 543, + 504, + 557 + ], + "spans": [ + { + "bbox": [ + 265, + 543, + 504, + 557 + ], + "type": "interline_equation", + "content": "\\left| f (\\mathcal {U}) - f (\\tilde {\\mathcal {U}}) \\right| < \\epsilon \\tag {6}", + "image_path": "44e52c4ab13d94b8bad03c5d092d188d9bae7090851ab26a5888f807d11e97aa.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "inline_equation", + "content": "h_k(u) = e^{-d\\left(u, \\left[\\frac{k-1}{K}, \\frac{k}{K}\\right]\\right)}" + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "text", + "content": " be a soft indicator function where " + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "inline_equation", + "content": "d\\left(u, \\left[\\frac{k-1}{K}, \\frac{k}{K}\\right]\\right) = \\min\\left(d_A\\left(u, \\frac{k-1}{K}\\right), d_A\\left(u, \\frac{k}{K}\\right)\\right)" + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "text", + "content": " is the distance between angle " + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "text", + "content": " to interval " + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "inline_equation", + "content": "\\left[\\frac{k-1}{K}, \\frac{k}{K}\\right]" + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{h}(u) = [h_1(u); \\ldots; h_K(u)]" + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 559, + 504, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{h}: \\mathbb{R} \\to \\mathbb{R}^K" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "inline_equation", + "content": "q_{j}(u_{1},\\ldots ,u_{M}) = \\max \\{h_{j}(u_{1}),\\ldots ,h_{j}(u_{M})\\}" + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "text", + "content": ", indicating the occupancy of the " + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "text", + "content": "-th interval by angles in " + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "inline_equation", + "content": "\\mathbf{q} = [q_1;\\dots;q_K]" + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "inline_equation", + "content": "\\mathbf{q}:[0,2\\pi ]^M\\to \\{0,1\\} ^K" + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "text", + "content": " is a symmetric function, indicating the occupancy of each interval by angles in " + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 104, + 604, + 504, + 640 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 643, + 504, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 504, + 667 + ], + "type": "text", + "content": "Define " + }, + { + "bbox": [ + 104, + 643, + 504, + 667 + ], + "type": "inline_equation", + "content": "\\zeta : \\{0,1\\}^K \\to S" + }, + { + "bbox": [ + 104, + 643, + 504, + 667 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 643, + 504, + 667 + ], + "type": "inline_equation", + "content": "\\zeta(\\mathbf{q}) = \\left\\{\\frac{k-1}{K} : q_k \\geq 1\\right\\}" + }, + { + "bbox": [ + 104, + 643, + 504, + 667 + ], + "type": "text", + "content": " which maps the occupancy vector to a set which contains the left end of each angle interval. It is straightforward to show:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 274, + 669, + 504, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 669, + 504, + 683 + ], + "spans": [ + { + "bbox": [ + 274, + 669, + 504, + 683 + ], + "type": "interline_equation", + "content": "\\zeta (\\mathbf {q} (\\mathcal {U})) \\equiv \\tilde {\\mathcal {U}} \\tag {7}", + "image_path": "7db7135b05a535440ad4377da692c916fd990a5027a654bbd7ce36913033e086.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "inline_equation", + "content": "\\gamma : \\mathbb{R}^K \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "text", + "content": " be a continuous function such that " + }, + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "inline_equation", + "content": "\\gamma(\\mathbf{q}) = f(\\zeta(\\mathbf{q}))" + }, + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "inline_equation", + "content": "\\mathbf{q} \\in \\{0,1\\}^K" + }, + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "text", + "content": ". Then from Eq (6) and Eq (7)," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 246, + 708, + 504, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 708, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 246, + 708, + 504, + 734 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left| \\gamma (\\mathbf {q} (\\mathcal {U})) - f (\\mathcal {U}) \\right| (8) \\\\ = \\left| f \\left(\\zeta (\\mathbf {q} (\\mathcal {U}))\\right) - f (\\mathcal {U}) \\right| < \\epsilon (8) \\\\ \\end{array}", + "image_path": "57ae49fb30e5cef020c20af80dd4ac3ef419f4b707762edacab66e5fb6f5d3ba.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 166, + 81, + 430, + 277 + ], + "blocks": [ + { + "bbox": [ + 166, + 81, + 430, + 277 + ], + "lines": [ + { + "bbox": [ + 166, + 81, + 430, + 277 + ], + "spans": [ + { + "bbox": [ + 166, + 81, + 430, + 277 + ], + "type": "image", + "image_path": "72e8bc6c21ce842713cf2f761952fd837f95ca87330b4317430bb398147d99b3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "lines": [ + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": "Figure 5: A Toy 2D Example of Voints. Voints assume view-dependency for every 3D point. Here, we look at a single 2D point at the center with a circular function " + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "inline_equation", + "content": "g(u) = \\mathrm{sign}(\\cos u)" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": " from five arbitrary view-points " + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "inline_equation", + "content": "\\{u_j\\}_{j=1}^5" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": ". Trying to reduce " + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": " to a single value based on " + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "inline_equation", + "content": "u_j" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": " projections undermines the underlying structure of " + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": ". We take the full set " + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "inline_equation", + "content": "\\{(u_j, g(u_j))\\}_{j=1}^5" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": " as a representation of " + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": " and learn a set function " + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 284, + 506, + 348 + ], + "type": "text", + "content": " on these view-features for a more informative manner of representation aggregation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 366, + 316, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 366, + 316, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 316, + 379 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 105, + 366, + 316, + 379 + ], + "type": "inline_equation", + "content": "\\gamma (\\mathbf{q}(\\mathcal{U}))" + }, + { + "bbox": [ + 105, + 366, + 316, + 379 + ], + "type": "text", + "content": " can be rewritten as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 211, + 384, + 503, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 384, + 503, + 424 + ], + "spans": [ + { + "bbox": [ + 211, + 384, + 503, + 424 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\gamma \\left(\\mathbf {q} \\left(\\mathcal {U}\\right)\\right) = \\gamma \\left(\\mathbf {q} \\left(u _ {1}, \\dots , u _ {M}\\right)\\right) \\\\ = \\gamma (\\operatorname {M A X} \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right)) \\tag {9} \\\\ = (\\gamma \\circ \\operatorname {M A X}) \\left(\\mathbf {h} \\left(u _ {1}\\right), \\dots , \\mathbf {h} \\left(u _ {M}\\right)\\right) \\\\ \\end{array}", + "image_path": "99af568d516e8e1789bf962bcbffd77a36640e9e7f985641a2569e3a3a71e945.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "\\gamma \\circ" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": " MAX is a symmetric function and from Eq (8) and Eq (9), we reach to the main result in Eq (5). This concludes the proof." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 464, + 218, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 218, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 218, + 475 + ], + "type": "text", + "content": "A.3 3D VOINT CLOUD" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": "Plenoptic and Spherical Coordinate Functions. The Plenoptic function was first introduced by McMillan and Bishop (McMillan & Bishop, 1995) in 1995 as a general function that describes the visible world. The Plenoptic function " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": " is a continuous spherical function that describes the visibility at any Euclidean 3D point in space " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "(V_x, V_y, V_x)" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": " when looking into any direction " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "(\\theta, \\phi)" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": " across wavelength " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": ". It is defined as " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "p = P(\\theta, \\phi, \\lambda, V_x, V_y, V_x, t)" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": ". Such a remarkable and compact formulation covers all the images observed as just samples of the function " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": ". For fixed time and wavelength, the reduced Plenoptic function " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": " becomes " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "p = P(\\theta, \\phi, V_x, V_y, V_x,)" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": " which can describe any field in 3D space. This shortened formulation is what Neural Radiance Fields (NeRFs) (Mildenhall et al., 2020; Pumarola et al., 2021; Martin-Brualla et al., 2021) try to learn with MLPs to describe the radiance and RGB values in the continuous Euclidean space with a dependency on the view direction " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "(\\theta, \\phi)" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": ". In the same spirit of the Plenoptic function and NeRFs, the Voint cloud representation relies on the viewing angles " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "(\\theta, \\phi)" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": " to define the view-features. The problem with the plenoptic functions " + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 484, + 506, + 704 + ], + "type": "text", + "content": ", and subsequently NeRFs, is that they are very high dimensional, and any attempt to densely represent the scene with discrete and fixed data will cause memory and compute issues (Yu et al., 2021; Pumarola et al., 2021). Unlike NERFs (Mildenhall et al., 2020) that define dense 3D volumes, we focus only on the surface of the 3D shapes with our Voint clouds representation. Our Voints are in the order of the sampled point cloud, offering a compact representation that allows for efficient computation and memory while maintaining the view-dependent component that facilitates view-based learning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "From Point Clouds to Voint Clouds. Implicit representation of 3D surfaces typically aims to learn an implicit function " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "g_{\\mathrm{s}}(\\mathbf{x}) : \\mathbb{R}^3 \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " that define the Sign Distance Function" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": "(SDF) or the occupancy in the continuous Euclidean space (Park et al., 2019; Mescheder et al., 2019). The 3D iso-surface is then defined as the set of all points " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " that satisfy the condition " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "g_{\\mathrm{s}}(\\mathbf{x}) = 0" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " (assuming " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "g_{\\mathrm{s}}(\\mathbf{x})" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " as SDF hereafter). We define a surface 3D point cloud " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\mathcal{X} \\in \\mathbb{R}^{N \\times 3}" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": ", as a set of " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " 3D points, where each point " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " is represented by its 3D coordinates " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "(x_i, y_i, z_i)" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " and satisfy the iso-surface condition as follows." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 242, + 156, + 504, + 172 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 156, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 242, + 156, + 504, + 172 + ], + "type": "interline_equation", + "content": "\\mathcal {X} = \\left\\{\\mathbf {x} _ {i} \\in \\mathbb {R} ^ {3} \\mid g _ {\\mathrm {s}} (\\mathbf {x} _ {i}) = 0 \\right\\} _ {i = 1} ^ {N} \\tag {10}", + "image_path": "af1637e6dc5b7a79f00ad13e0231ebceef77550927b00b2b9de3e754645a3a33.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": "Here, we assume that surface points also depend on the view direction from which they are being observed. Specifically, there exists a continuous implicit spherical function " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{g}(\\mathbf{x},\\mathbf{u}):" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^5\\to \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": " that defines the features at each point " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": " depending on the view direction " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": ". Given a set of " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": " view-point directions " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{U}\\in \\mathbb{R}^{M\\times 2}" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": ", a Voint " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{x}}\\in \\mathbb{R}^{M\\times d}" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": " is a set of " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": " view-dependent features of size " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": " for the sphere centered at point " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": ". The Voint cloud " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{X}}\\in \\mathbb{R}^{N\\times M\\times d}" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": " is the set of all " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": " Voints " + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{x}}" + }, + { + "bbox": [ + 104, + 184, + 506, + 251 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 225, + 270, + 503, + 294 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 270, + 503, + 294 + ], + "spans": [ + { + "bbox": [ + 225, + 270, + 503, + 294 + ], + "type": "interline_equation", + "content": "\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} \\left(\\mathbf {x} _ {i}, \\mathbf {u} _ {j}\\right) \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X} \\right\\} _ {j = 1} ^ {M} \\tag {11}", + "image_path": "430ef2225b1b483f4fdb810d971b9fe1850c2e1dc23f87a9ed7661fb97222cc6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 227, + 292, + 321, + 308 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 292, + 321, + 308 + ], + "spans": [ + { + "bbox": [ + 227, + 292, + 321, + 308 + ], + "type": "interline_equation", + "content": "\\widehat {\\mathcal {X}} = \\left\\{\\widehat {\\mathbf {x}} _ {i} \\in \\mathbb {R} ^ {M \\times d} \\right\\} _ {i = 1} ^ {N}", + "image_path": "5ea141c275285d15b5bb92958b305b38108759e4e30cbcf167da69a27920e00b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 319, + 506, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 506, + 343 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 506, + 343 + ], + "type": "text", + "content": "Note that we typically do not have access to the underlying implicit function " + }, + { + "bbox": [ + 104, + 319, + 506, + 343 + ], + "type": "inline_equation", + "content": "\\mathbf{g}" + }, + { + "bbox": [ + 104, + 319, + 506, + 343 + ], + "type": "text", + "content": " and we approximate it by 2D projection, feature extraction, and then un-projection as we show next." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": "1-Multi-View Projection. As mentioned earlier, a Voint combines multiple view-features of the same 3D point. These view-features come from a multi-view projection of the points by a point cloud renderer " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{R}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times 3}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " that renders the point cloud " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " from multiple view-points " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " images of size " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "H\\times W\\times 3" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": ". In addition to projecting the point cloud into the image space, " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " defines the mapping " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{B}\\in \\{0,\\dots,N\\}^{M\\times H\\times W}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " between each pixel to the N points and background it renders. Also, " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " outputs the visibility binary matrix " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{V}\\in \\{0,1\\}^{N\\times M}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " for each point from each view. Since not all points appear in all the views due to pixel discretization, the visibility score " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{i,j}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " defines if the Voint " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_i" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " is visible in the view " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_j" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": ". The matrix " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " is crucial for unprojection, while " + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 104, + 347, + 504, + 458 + ], + "type": "text", + "content": " is needed for defining meaningful operations on Voints." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "type": "text", + "content": "2-Multi-View Feature Extraction. The rendered images are processed by a function " + }, + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "type": "inline_equation", + "content": "\\mathbf{C}:\\mathbb{R}^{M\\times H\\times W\\times 3}\\to \\mathbb{R}^{M\\times H\\times W\\times d}" + }, + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "type": "text", + "content": " that extracts image features. If " + }, + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "type": "text", + "content": " is the identity function, all the view-features would be identical for each Voint (typically the RGB value of the corresponding point). However, the " + }, + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 463, + 504, + 519 + ], + "type": "text", + "content": " function can be a 2D network dedicated to the downstream task and can extract useful global and local features about each view." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "text", + "content": "3-Multi-View Unprojection. We propose a module " + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathbf{B}}:\\mathbb{R}^{M\\times H\\times W\\times d}\\to \\mathbb{R}^{N\\times M\\times d}" + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "text", + "content": " that unprojects the 2D features from each pixel to be 3D view-features at the corresponding Voint. This is performed by using the mapping " + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "text", + "content": " created by the renderer to form the Voint cloud features " + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{X}}" + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "text", + "content": ". Note that the points are not necessarily visible from all the views, and some Voints that are not visible from any of the " + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "text", + "content": " views will not receive any features. We post-process these empty points (" + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "inline_equation", + "content": "\\sim 0.5\\%" + }, + { + "bbox": [ + 104, + 523, + 506, + 604 + ], + "type": "text", + "content": " of points during inference) to be filled with nearest 3D neighbors features. The output Voint cloud features would be described as follows." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 213, + 620, + 400, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 620, + 400, + 639 + ], + "spans": [ + { + "bbox": [ + 213, + 620, + 400, + 639 + ], + "type": "interline_equation", + "content": "\\widehat {\\mathbf {x}} _ {i} = \\left\\{\\mathbf {g} _ {i, j,:} \\in \\mathbb {R} ^ {d} \\mid \\mathbf {x} _ {i} \\in \\mathcal {X}, \\mathbf {V} _ {i, j} = 1 \\right\\} _ {j = 1} ^ {M}", + "image_path": "8a1145cfb5335b8c2d5a41e694f624c6252585b8744472e13d4c0df9249b4eec.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 209, + 641, + 503, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 641, + 503, + 654 + ], + "spans": [ + { + "bbox": [ + 209, + 641, + 503, + 654 + ], + "type": "interline_equation", + "content": "\\mathbf {g} _ {:, j} = \\Phi_ {\\mathbf {B}} \\left(\\mathbf {C} \\left(\\mathbf {R} \\left(\\mathcal {X}, \\mathbf {u} _ {j}\\right)\\right), \\mathbf {B}\\right) \\tag {12}", + "image_path": "ff25a026d0bf5dd8c139113bcccc31989201f1afefb196f38e51d627b06e9eaf.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 216, + 656, + 311, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 656, + 311, + 673 + ], + "spans": [ + { + "bbox": [ + 216, + 656, + 311, + 673 + ], + "type": "interline_equation", + "content": "\\widehat {\\mathcal {X}} = \\left\\{\\widehat {\\mathbf {x}} _ {i} \\in \\mathbb {R} ^ {M \\times d} \\right\\} _ {i = 1} ^ {N}", + "image_path": "362f7b34e1558ac9d79c58cfc6e282e270a56fc9aca65b0e8c78df98b60527fa.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 688, + 226, + 698 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 688, + 226, + 698 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 226, + 698 + ], + "type": "text", + "content": "A.4 VOINT OPERATIONS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "VointMax. In order to learn a neural network in the Voint space in the form dictated by Theorem 1, we need to define some basic differentiable operations on the Voint space. The" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 173, + 83, + 443, + 247 + ], + "blocks": [ + { + "bbox": [ + 173, + 83, + 443, + 247 + ], + "lines": [ + { + "bbox": [ + 173, + 83, + 443, + 247 + ], + "spans": [ + { + "bbox": [ + 173, + 83, + 443, + 247 + ], + "type": "image", + "image_path": "4b68f906dad809b2cdbb8df39da391275635ec0e528c03019df6e17bc56ee754.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 257, + 504, + 328 + ], + "lines": [ + { + "bbox": [ + 104, + 257, + 504, + 328 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 504, + 328 + ], + "type": "text", + "content": "Figure 6: VointNet Variants. We propose three variants of VointNet that use three different examples of VointConv operation " + }, + { + "bbox": [ + 104, + 257, + 504, + 328 + ], + "type": "inline_equation", + "content": "h_v" + }, + { + "bbox": [ + 104, + 257, + 504, + 328 + ], + "type": "text", + "content": ": shared MLP (MLP), Graph Convolution (GCN), and Graph Attention (GAT). Here we highlight the main difference between VointNet (MLP) that shares the MLP on all the view-features and VointNet (GCN) that creates a fully connected graph on the view-features and learn an MLP on the edge view-features. VointNet (GAT) is similar to VointNet (GCN) in addition to learning attention weights for each view-feature in weighted average aggregation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 350, + 369, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 350, + 369, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 369, + 361 + ], + "type": "text", + "content": "max operation on the Voint cloud can be defined as follows." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 206, + 366, + 503, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 366, + 503, + 388 + ], + "spans": [ + { + "bbox": [ + 206, + 366, + 503, + 388 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {V o i n t M a x} (\\widehat {\\mathbf {x}}) = \\max \\widehat {\\mathbf {x}} _ {i, j}, \\forall i, j \\\\ \\left(1 3\\right) \\\\ \\end{array}", + "image_path": "db366991bce2c906ae76ca3f4366272445f1ed2ed32be85a850310bc6edf9b3c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 206, + 388, + 402, + 400 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 388, + 402, + 400 + ], + "spans": [ + { + "bbox": [ + 206, + 388, + 402, + 400 + ], + "type": "interline_equation", + "content": "\\mathrm {s . t .} i \\in 1, 2, \\dots , N, j \\in 1, 2, \\dots , M, \\mathbf {V} _ {i, j} = 1", + "image_path": "29f792b601736a83ae7c41c9d438d67c2e2312bff64bb6e59d0c06ee4dadf12c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "type": "text", + "content": "Equivalently, " + }, + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "type": "inline_equation", + "content": "\\mathrm{VointMax}(\\widehat{\\mathbf{x}}) = \\max_j\\left(\\widehat{\\mathbf{x}}_{:,j} - \\infty \\overline{\\mathbf{V}}_{:,j}\\right)" + }, + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{V}}" + }, + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "type": "text", + "content": " is the complement of " + }, + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 105, + 406, + 478, + 421 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "text", + "content": "VointConv. We define the convolution operation " + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{V}}: \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'}" + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "text", + "content": " as any learnable function that operates on the Voint space with shared weights on all the Voints and has the view-features input size " + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "text", + "content": " and outputs view-features of size " + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "inline_equation", + "content": "d'" + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "text", + "content": " and consists of " + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "inline_equation", + "content": "l_{V}" + }, + { + "bbox": [ + 104, + 427, + 504, + 507 + ], + "type": "text", + "content": " layers. Examples of this VointConv operation include the following operations applied only on the visible view-features: a shared MLP, a graph convolution, and a graph attention. We detail these operations later in Section A.6, which result in different non-exhaustive variants of VointNet." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 521, + 289, + 532 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 289, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 289, + 532 + ], + "type": "text", + "content": "A.5 LEARNING ON 3D VOINT CLOUDS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "text", + "content": "VpointNet. Typical 3D point cloud classifiers with a feature max pooling layer work as in Eq (14), where " + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{mlp}}" + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{Pconv}}" + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "text", + "content": " are the MLP and point Convolutional " + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "inline_equation", + "content": "(1 \\times 1" + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "text", + "content": " or edge) layers, respectively. This produces a K-class classifier " + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 104, + 541, + 504, + 574 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 231, + 580, + 503, + 598 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 580, + 503, + 598 + ], + "spans": [ + { + "bbox": [ + 231, + 580, + 503, + 598 + ], + "type": "interline_equation", + "content": "\\mathbf {F} (\\mathcal {X}) = h _ {\\operatorname {m l p}} \\left(\\max _ {\\mathbf {x} _ {i} \\in \\mathcal {X}} \\left\\{h _ {\\text {P c o n v}} \\left(\\mathbf {x} _ {i}\\right) \\right\\}\\right) \\tag {14}", + "image_path": "92b3ae5fa6e4668dc1f2029140169a38b4125e13a8863845ad072709f26afe0d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\mathbf{F}:\\mathbb{R}^{N\\times 3}\\to \\mathbb{R}^K" + }, + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "type": "text", + "content": " produces the logits layer of the classifier with size " + }, + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "type": "text", + "content": ". On the other hand, the goal of the VointNet model is to get multi-view point cloud features that can be used after which by any point cloud processing pipeline. The VointNet module " + }, + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{F}}:\\mathbb{R}^{N\\times M\\times d}\\rightarrow \\mathbb{R}^{N\\times d}" + }, + { + "bbox": [ + 104, + 605, + 504, + 651 + ], + "type": "text", + "content": " as follows." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 226, + 655, + 504, + 676 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 655, + 504, + 676 + ], + "spans": [ + { + "bbox": [ + 226, + 655, + 504, + 676 + ], + "type": "interline_equation", + "content": "\\widehat {\\mathbf {F}} (\\widehat {\\mathcal {X}}) = h _ {\\mathrm {P}} \\left(\\operatorname {V o i n t M a x} \\left(h _ {\\mathrm {V}} (\\widehat {\\mathcal {X}})\\right)\\right), \\tag {15}", + "image_path": "b9408ddfa96f2e345587eedbe6ae9448508bbff551b9b6b817e7fd1cdf329a90.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 687, + 233, + 698 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 233, + 698 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 233, + 698 + ], + "type": "text", + "content": "A.6 VOINTNET VARIANTS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 708, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 504, + 732 + ], + "type": "text", + "content": "We define the convolution operation " + }, + { + "bbox": [ + 104, + 708, + 504, + 732 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{V}} \\colon \\mathbb{R}^{N \\times M \\times d} \\to \\mathbb{R}^{N \\times M \\times d'}" + }, + { + "bbox": [ + 104, + 708, + 504, + 732 + ], + "type": "text", + "content": " in VointNet from Eq (15) as any learnable function that operates on the Voint space with shared weights on all the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Voints and has the view-features input size " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " and outputs view-features of size " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "d'" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " and consists of " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "l_V" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " layers. Examples of this VointConv operation include the following:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 505, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 505, + 133 + ], + "type": "text", + "content": "Shared MLP. It is the most basic Voint neural network. For layer " + }, + { + "bbox": [ + 104, + 110, + 505, + 133 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 110, + 505, + 133 + ], + "type": "text", + "content": ", the features of Voint i at view j is updated as follows to layer " + }, + { + "bbox": [ + 104, + 110, + 505, + 133 + ], + "type": "inline_equation", + "content": "l + 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 195, + 137, + 504, + 158 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 137, + 504, + 158 + ], + "spans": [ + { + "bbox": [ + 195, + 137, + 504, + 158 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\mathbf {h} _ {i, j} ^ {l} \\mathcal {W} _ {\\rho}\\right), \\forall i, j \\tag {16}", + "image_path": "fbd00ee9ecb6856330464131248cda2de0dc161a675371326a7f64ed79c865d5.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 216, + 155, + 413, + 167 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 155, + 413, + 167 + ], + "spans": [ + { + "bbox": [ + 216, + 155, + 413, + 167 + ], + "type": "interline_equation", + "content": "\\mathrm {s . t .} i \\in {1, 2, \\dots , N}, j \\in {1, 2, \\dots , M}, \\mathbf {V} _ {i, j} = 1", + "image_path": "a2beb8d1bb0698c5ea13e25344bac6fe0700464a9e78b072d54c7a275b6d0378.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 171, + 506, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 215 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 215 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 171, + 506, + 215 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 171, + 506, + 215 + ], + "type": "text", + "content": " is the shared MLP with weights " + }, + { + "bbox": [ + 104, + 171, + 506, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\rho}" + }, + { + "bbox": [ + 104, + 171, + 506, + 215 + ], + "type": "text", + "content": " followed by normalization and nonlinear function (e.g. ReLU) applied on all Voints independently at the visible views features for each Voint. This formulation extends the shared MLP formulation for PointNet (Qi et al., 2017a) to make the MLP shared across the Voints and the views-features." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 220, + 504, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 309 + ], + "type": "text", + "content": "Graph Convolution (GCN). Just like how DGCNN (Wang et al., 2019c) extended PointNet (Qi et al., 2017a) by taking the neighborhood information and extract edge features, we extend the basic VointNet formulation in Eq (15). We define a fully connected graph for each Voint along the views dimension by creating a center virtual node connected to all the view features (similar to the classification token in ViT (Dosovitskiy et al., 2021)). This center virtual view-feature would be assigned the index " + }, + { + "bbox": [ + 104, + 220, + 504, + 309 + ], + "type": "inline_equation", + "content": "j = 0" + }, + { + "bbox": [ + 104, + 220, + 504, + 309 + ], + "type": "text", + "content": " and can be initialized with zeros as the \"cls\" token in ViT (Dosovitskiy et al., 2021). Then, Voint graph convolution operation can be defined as follows to update the activations from layer " + }, + { + "bbox": [ + 104, + 220, + 504, + 309 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 220, + 504, + 309 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 220, + 504, + 309 + ], + "type": "inline_equation", + "content": "l + 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 209, + 312, + 504, + 346 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 312, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 209, + 312, + 504, + 346 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\left(\\max _ {k} \\psi \\left(\\left(\\mathbf {h} _ {i, j} ^ {l}, \\mathbf {h} _ {i, k} ^ {l}\\right) \\mathcal {W} _ {\\psi}\\right)\\right) \\mathcal {W} _ {\\rho}\\right) \\forall i, j \\in \\{1, 2, \\dots , N - 1, 0, 1, M \\} \\tag {17}", + "image_path": "3edd7051ecabc41774c59a1110b02cdb075fe70aa280b44fd09b6ff833473a8b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 211, + 335, + 503, + 353 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 335, + 503, + 353 + ], + "spans": [ + { + "bbox": [ + 211, + 335, + 503, + 353 + ], + "type": "interline_equation", + "content": "\\forall i, j, k \\quad \\text {s . t .} \\quad i \\in 1, 2, \\dots , N, j \\in 0, 1, \\dots , M \\tag {17}", + "image_path": "711ecee670ebd9802e274c6e32c5f3e13a1e0192b5fcda90bf5dec1733eb46d6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 251, + 354, + 399, + 368 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 354, + 399, + 368 + ], + "spans": [ + { + "bbox": [ + 251, + 354, + 399, + 368 + ], + "type": "interline_equation", + "content": "k \\in 0, 1, \\dots , M, k \\neq j, \\mathbf {V} _ {i, j} = 1", + "image_path": "7f69e4f75091e3732de6db4ce946f87a23511e95923b7da7ddfe218489148288.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 371, + 504, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 504, + 395 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 371, + 504, + 395 + ], + "type": "inline_equation", + "content": "\\rho, \\psi" + }, + { + "bbox": [ + 104, + 371, + 504, + 395 + ], + "type": "text", + "content": " are two different shared MLPs as in Eq (16). The difference between VointNet (MLP) and VointNet (GCN) is highlighted in Figure 6." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 399, + 504, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 504, + 444 + ], + "type": "text", + "content": "Graph Attention (GAT). Similar to how Point Transformer (Zhao et al., 2020) extended the graph convolution by adding attention to DGCNN (Wang et al., 2019c), we extend the basic Voint GraphConv formulation in Eq (17). Voint graph attention operation can be defined as follows to update the activations from layer " + }, + { + "bbox": [ + 104, + 399, + 504, + 444 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 399, + 504, + 444 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 399, + 504, + 444 + ], + "type": "inline_equation", + "content": "l + 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 196, + 448, + 504, + 489 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 448, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 196, + 448, + 504, + 489 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {i, j} ^ {l + 1} = \\rho \\left(\\left(\\sum_ {k = 0, k \\neq j} ^ {M} \\eta_ {k} \\psi \\left((\\mathbf {h} _ {i, j} ^ {l}, \\mathbf {h} _ {i, k} ^ {l}) \\mathcal {W} _ {\\psi}\\right)\\right) \\mathcal {W} _ {\\rho}\\right) \\tag {18}", + "image_path": "9465a7d7b8988fd82092770e14e4f3a476f58720e2ed90391b4cb58c7ded1993.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 198, + 488, + 372, + 501 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 488, + 372, + 501 + ], + "spans": [ + { + "bbox": [ + 198, + 488, + 372, + 501 + ], + "type": "interline_equation", + "content": "\\forall i, j \\mathrm {s . t .} i \\in 1, 2, \\dots , N, j \\in 0, 1, \\dots , M", + "image_path": "50f3edcd21e66f489fe72646065d69993a210ec5529e3b9ca6b227a7c0d354e1.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 238, + 503, + 363, + 518 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 503, + 363, + 518 + ], + "spans": [ + { + "bbox": [ + 238, + 503, + 363, + 518 + ], + "type": "interline_equation", + "content": "\\eta_ {k} = \\zeta \\left(\\mathbf {h} _ {i, k} ^ {l} \\mathcal {W} _ {\\zeta}\\right), \\mathbf {V} _ {i, j} = 1", + "image_path": "aec5901147070e43ebc36ae4bd16b9e7fc124b26ed37508b859d89cfa6cebebc.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 522, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 504, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 522, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\rho, \\psi, \\zeta" + }, + { + "bbox": [ + 104, + 522, + 504, + 544 + ], + "type": "text", + "content": " are three different shared MLPs as in Eq (16), and " + }, + { + "bbox": [ + 104, + 522, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\eta_{k}" + }, + { + "bbox": [ + 104, + 522, + 504, + 544 + ], + "type": "text", + "content": " are the learned attention weights for each neighbor view-feature." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 559, + 312, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 559, + 312, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 312, + 572 + ], + "type": "text", + "content": "B DETAILED EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 584, + 181, + 595 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 181, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 181, + 595 + ], + "type": "text", + "content": "B.1 DATASETS" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 604, + 506, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 506, + 693 + ], + "type": "text", + "content": "ScanObjectNN: 3D Point Cloud Classification. We follow the literature (Goyal et al., 2021; Hamdi et al., 2021) on testing 3D classification in the challenging ScanObjectNN (Uy et al., 2019) point cloud dataset, since it includes background and considers occlusions. The dataset is composed of 2902 point clouds divided into 15 object categories. We use 2048 sampled points per object for Voint learning. We benchmark on its variants: Object only, Object with Background, and the Hardest perturbed variant (PB_T50_RS variant). Visualization is provided in Figure 7 of some of the renderings used in training the 2D backbone in our pipeline." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "ShapeNet Core55: 3D Shape Retrieval. The shape retrieval challenge SHREC (Sfikas et al., 2017) uses ShapeNet Core55 is a subset of ShapeNet (Chang et al., 2015) for benchmarking. The dataset consists of 51,162 3D mesh objects labeled with 55 object classes. The" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "training, validation, and test sets consist of 35764, 5133, and 10265 shapes. We create a dataset of point clouds by sampling 5000 points from each mesh object as in MVTN (Hamdi et al., 2021)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 506, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 178 + ], + "type": "text", + "content": "ShapeNet Parts: 3D Part Segmentation. ShapeNet Parts is a subset of ShapeNet (Chang et al., 2015) that consists of 13,998 point cloud objects for train and 2,874 objects for the test from 16 categories and 50 parts. It is designed for the part segmentation task (Yi et al., 2016). Visualization is provided in Figure 10 of some of the renderings used in training the 2D backbone in our pipeline colored with the ground truth segmentation labels." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 506, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 239 + ], + "type": "text", + "content": "ModelNet40: 3D Shape Classification Occlusion Robustness. ModelNet40 (Wu et al., 2015) is composed of 12,311 3D objects (9,843/2,468 in training/testing) labelled with 40 object classes. We sample 2048 points clouds from the objects following previous works (Qi et al., 2017b; Zhao et al., 2020). Visualization is provided in Figure 8 of some of the renderings used in training the 2D backbone in our pipeline." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 253, + 178, + 263 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 253, + 178, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 178, + 263 + ], + "type": "text", + "content": "B.2 METRICS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 274, + 506, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 506, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 506, + 309 + ], + "type": "text", + "content": "Classification Accuracy. The standard evaluation metric in 3D classification is accuracy. We report overall accuracy (percentage of correctly classified test samples) and average per-class accuracy (mean of all true class accuracies)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "content": "Retrieval mAP. Shape retrieval is evaluated by mean Average Precision (mAP) over test queries. For every query shape " + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_q" + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "content": " from the test set, AP is defined as " + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "inline_equation", + "content": "AP = \\frac{1}{\\mathrm{GTP}}\\sum_{n}^{N}\\frac{\\mathbb{1}(\\mathbf{S}_n)}{n}" + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "inline_equation", + "content": "GTP" + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "content": " is the number of ground truth positives, " + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "content": " is the size of the ordered training set, and " + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "inline_equation", + "content": "\\mathbb{1}(\\mathbf{S}_n) = 1" + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "content": " if the shape " + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_n" + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "content": " is from the same class label of query " + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_q" + }, + { + "bbox": [ + 104, + 313, + 506, + 372 + ], + "type": "text", + "content": ". We average the retrieval AP over the test set to measure retrieval mAP." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 376, + 506, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 506, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 506, + 466 + ], + "type": "text", + "content": "Segmentation mIoU. Semantic Segmentation is evaluated by mean Intersection over Union (mIoU) over pixels or points. For every class label, measure the size of the intersection mask between the ground truth points of that label and the predicted points as that label. Then, divide by the size of the union mask of the same label to get IoU. This procedure is repeated over all the labels, and averaging the IoUs gives mIoU. We report two types of mIoUs: Instance-averaged mIoU (averages all mIoUs across all objects) and Category-averaged mIoU (averages all mIoU from shapes of the same category, and then average those across object categories)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 480, + 185, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 480, + 185, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 185, + 491 + ], + "type": "text", + "content": "B.3 BASELINES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 502, + 506, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 506, + 558 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 506, + 558 + ], + "type": "text", + "content": "Point Cloud Networks. We include PointNet (Qi et al., 2017a), PointNet++ (Qi et al., 2017b), DGCNN (Wang et al., 2019c), PVNet (You et al., 2018), and KPConv (Thomas et al., 2019), Point Transformer (Zhao et al., 2020) and CurveNet (Xiang et al., 2021) as baselines that use point clouds. These methods leverage different convolution operators on point clouds by aggregating local and global point information." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 562, + 504, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 562, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 562, + 504, + 619 + ], + "type": "text", + "content": "Multi-View Networks. We also compare against multi-view classification approaches like MVCNN (Su et al., 2015) and MVTN (Hamdi et al., 2021) as baselines for classification and retrieval. Since there is no available multi-view pipeline for 3D part segmentation, we adopt some of the multi-view segmentation baselines (e.g. Label Fusion (Wang et al., 2019a) and Mean Fusion (Kundu et al., 2020)) for part segmentation to work in the Voint space." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 633, + 257, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 633, + 257, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 257, + 643 + ], + "type": "text", + "content": "B.4 IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": "Rendering and Un-Projection. We choose the differentiable point cloud renderer " + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": " from Pytorch3D (Ravi et al., 2020) in our pipeline for its speed and compatibility with Pytorch libraries (Paszke et al., 2017). We render multi-view images with size " + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "inline_equation", + "content": "224 \\times 224 \\times 3" + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": ". We color the points by their normals' values or keep them white if the normals are not available. Following a similar procedure to (Wei et al., 2020; Hamdi et al., 2021), the view-point setup is randomized during training (using " + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "inline_equation", + "content": "M = 8" + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": " views) and fixed to spherical views in testing (using " + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "inline_equation", + "content": "M = 12" + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": " views)." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 79, + 493, + 376 + ], + "blocks": [ + { + "bbox": [ + 108, + 79, + 493, + 376 + ], + "lines": [ + { + "bbox": [ + 108, + 79, + 493, + 376 + ], + "spans": [ + { + "bbox": [ + 108, + 79, + 493, + 376 + ], + "type": "image", + "image_path": "b72ea466f6ac3834f0aaa6df8cef30defba9035c4d7883401a3643a21dbd1009.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 388, + 504, + 420 + ], + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 420 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 420 + ], + "type": "text", + "content": "Figure 7: ScanObjectNN Variants. We show examples of point cloud renderings of different variants of the ScanObjectNN (Uy et al., 2019). These renderings are used in training VointNet for 3D point cloud classification." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "text", + "content": "Architectures. For the 2D backbone, we use ViT (Dosovitskiy et al., 2021) (with pretrained weights from TIMM library (Wightman, 2019)) for classification and DeepLabV3 (Chen et al., 2018) for segmentation. We used parallel heads for each object category for part segmentation since the task is solely focused on parts. We use the 3D cross-entropy loss on the 3D point cloud output and the 2D cross-entropy loss when the loss is defined on the pixels. When used, the linear tradeoff coefficient of the 2D loss term is set to 0.003. To balance the frequency of objects in part segmentation, we multiply the loss by the frequency of the object class of each object we segment. The feature dimension of the VointNet architectures is " + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "inline_equation", + "content": "d = 64" + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "text", + "content": ", and the depth is " + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "inline_equation", + "content": "l_{V} = 4" + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "text", + "content": " layers in " + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "inline_equation", + "content": "h_V" + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "text", + "content": ". The main results are based on VointNet (MLP) variant unless otherwise specified. The coordinates " + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "text", + "content": " can be optionally appended to the input view-features " + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 104, + 441, + 504, + 573 + ], + "type": "text", + "content": ", which can improve the performance but reduce the rotation robustness as we show later in Section C.1 and Table 9." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 578, + 506, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 645 + ], + "type": "text", + "content": "Training Setup. We train our pipeline in two stages, where we start by training the 2D backbone on the 2D projected labels of the points, then train the full pipeline end-to-end while focusing the training on the VointNet part. We use the AdamW optimizer (Loshchilov & Hutter, 2017) with an initial learning rate of 0.0005 and a step learning rate schedule of " + }, + { + "bbox": [ + 104, + 578, + 506, + 645 + ], + "type": "inline_equation", + "content": "33.3\\%" + }, + { + "bbox": [ + 104, + 578, + 506, + 645 + ], + "type": "text", + "content": " every 12 epochs for 40 epochs. The pipeline is trained with one NVIDIA Tesla V100 GPU. We do not use any data augmentation." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 148, + 503, + 319 + ], + "blocks": [ + { + "bbox": [ + 110, + 148, + 503, + 319 + ], + "lines": [ + { + "bbox": [ + 110, + 148, + 503, + 319 + ], + "spans": [ + { + "bbox": [ + 110, + 148, + 503, + 319 + ], + "type": "image", + "image_path": "37f5bed1a1b60474aaf51dd5ca7f2d0a5227449d29ac440e1539667849c0ab69.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 326, + 506, + 350 + ], + "lines": [ + { + "bbox": [ + 104, + 326, + 506, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 506, + 350 + ], + "type": "text", + "content": "Figure 8: ModelNet40. We show some examples of point cloud renderings of ModelNet40 (Wu et al., 2015) used for 3D classification robustness in our setup." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 110, + 494, + 501, + 630 + ], + "blocks": [ + { + "bbox": [ + 110, + 494, + 501, + 630 + ], + "lines": [ + { + "bbox": [ + 110, + 494, + 501, + 630 + ], + "spans": [ + { + "bbox": [ + 110, + 494, + 501, + 630 + ], + "type": "image", + "image_path": "67afcf6005c307d2f1f9b41ca7f0088ec0f12e2feb5ef06c405e1dae574656c4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 636, + 506, + 662 + ], + "lines": [ + { + "bbox": [ + 104, + 636, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 636, + 506, + 662 + ], + "type": "text", + "content": "Figure 9: ShapeNet Core55. We show some examples of point cloud renderings of ShapeNet Core55 (Chang et al., 2015) used for 3D shape retrieval in our setup." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 117, + 244, + 493, + 511 + ], + "blocks": [ + { + "bbox": [ + 117, + 244, + 493, + 511 + ], + "lines": [ + { + "bbox": [ + 117, + 244, + 493, + 511 + ], + "spans": [ + { + "bbox": [ + 117, + 244, + 493, + 511 + ], + "type": "image", + "image_path": "e89f3da343521484611f5cf0479ba70b7aecbbbadd1cf045bfef60c2f4c38f75.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 520, + 504, + 565 + ], + "lines": [ + { + "bbox": [ + 104, + 520, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 504, + 565 + ], + "type": "text", + "content": "Figure 10: ShapeNet Parts. We show some examples of point cloud renderings of ShapeNet Parts (Yi et al., 2016) colored with ground truth segmentation labels. We use these renderings as 2D ground truth to pre-train the 2D backbone " + }, + { + "bbox": [ + 104, + 520, + 504, + 565 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 104, + 520, + 504, + 565 + ], + "type": "text", + "content": " for 2D segmentation before training VointNet's pipeline for 3D segmentation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 129, + 79, + 478, + 246 + ], + "blocks": [ + { + "bbox": [ + 129, + 79, + 478, + 246 + ], + "lines": [ + { + "bbox": [ + 129, + 79, + 478, + 246 + ], + "spans": [ + { + "bbox": [ + 129, + 79, + 478, + 246 + ], + "type": "table", + "html": "
MethodData TypeClassification \nModelNet40Shape Retrieval \nShapeNetCore
PointNet (Qi et al., 2017a)Points89.2-
PointNet++ (Qi et al., 2017b)Points91.9-
DGCNN (Wang et al., 2019c)Points92.2-
KPConv(Thomas et al., 2019)Points92.9-
PCT(Guo et al., 2021)Points93.3-
CurveNet(Xiang et al., 2021)Points93.8-
ReVGG (Sfikas et al., 2017)M-View-74.9
MVCNN (Su et al., 2015)M-View90.173.5
ViewGCN (Wei et al., 2020)M-View93.378.4
MVTN (Hamdi et al., 2021)M-View93.882.9
VointNet (ours)Voints92.883.3
", + "image_path": "750c7a72dd8d9e59d54fdfc951b9e3037f1c65fd56ebb86bf7e9a166ab3143c6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 139, + 306, + 473, + 411 + ], + "blocks": [ + { + "bbox": [ + 104, + 253, + 506, + 297 + ], + "lines": [ + { + "bbox": [ + 104, + 253, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 506, + 297 + ], + "type": "text", + "content": "Table 7: 3D Shape Classification and Retrieval. We report VointNet's classification accuracy on ModelNet40 (Wu et al., 2015) and its 3D shape retrieval mAP on ShapeNet Core55 (Chang et al., 2015; Sfikas et al., 2017). Baseline results are reported from (Hamdi et al., 2021; Zhao et al., 2020; Xiang et al., 2021)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 139, + 306, + 473, + 411 + ], + "lines": [ + { + "bbox": [ + 139, + 306, + 473, + 411 + ], + "spans": [ + { + "bbox": [ + 139, + 306, + 473, + 411 + ], + "type": "table", + "html": "
MethodRotation Perturbations Range
±90°±180°
PointNet (Qi et al., 2017a)88.742.538.6
PointNet ++ (Qi et al., 2017b)88.247.939.7
RSCNN (Liu et al., 2019a)90.390.390.3
MVTN (Hamdi et al., 2021)91.790.891.2
VointNet (ours)91.590.991.1
", + "image_path": "41246f32cbd2857f30db87a511231e2d1edb924ba5054097da9bb72efc2a5bd0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 418, + 504, + 450 + ], + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 450 + ], + "type": "text", + "content": "Table 8: Rotation Robustness for 3D Classification. At test time, we randomly rotate objects in ModelNet40 (Wu et al., 2015) around the Y-axis (gravity) with different ranges and report the overall accuracy." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 471, + 250, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 250, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 250, + 483 + ], + "type": "text", + "content": "C ADDITIONAL RESULTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 496, + 230, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 230, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 230, + 507 + ], + "type": "text", + "content": "C.1 MODEL ROBUSTNESS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 517, + 504, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 504, + 573 + ], + "type": "text", + "content": "Rotation Robustness for 3D Classification. We follow the standard practice in 3D shape classification literature by testing the robustness of trained models to perturbations at test time (Liu et al., 2019a; Hamdi et al., 2021). We perturb the shapes with random rotations around the Y-axis (gravity-axis) contained within " + }, + { + "bbox": [ + 104, + 517, + 504, + 573 + ], + "type": "inline_equation", + "content": "\\pm 90^{\\circ}" + }, + { + "bbox": [ + 104, + 517, + 504, + 573 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 517, + 504, + 573 + ], + "type": "inline_equation", + "content": "\\pm 180^{\\circ}" + }, + { + "bbox": [ + 104, + 517, + 504, + 573 + ], + "type": "text", + "content": " and report the test accuracy over ten runs in Table 8." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 578, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 732 + ], + "type": "text", + "content": "Rotation Robustness for 3D Segmentation. We follow the previous 3D literature by testing the robustness of trained models to perturbations at test time (Liu et al., 2019a; Hamdi et al., 2021; 2020). We perturb the shapes in ShapeNet Parts with random rotations in " + }, + { + "bbox": [ + 104, + 578, + 506, + 732 + ], + "type": "inline_equation", + "content": "SO(3)" + }, + { + "bbox": [ + 104, + 578, + 506, + 732 + ], + "type": "text", + "content": " at test time (ten runs) and report Ins. mIoU in Table 9. Note how our VointNet performance largely exceeds the baselines in this realistic unaligned scenario. We can augment the training with rotated objects for the baselines, which improves their robustness, but loses performance on the unrated setup. Adding xyz coordinates to the view-features of VointNet improves the performance on an unrotated setup but negatively affects the robustness to rotations. The discrepancy between the Voint results and the results of some point cloud methods is that Voints heavily depend on the underlying 2D backbone and inherit all its biases, especially those from pretraining. Hence, the 2D backbone limits what the performance can reach with VointNet. We study the effect of the backbone in detail in Section C.2. Figure 11 shows qualitative 3D segmentation results for VointNet and Mean Fuse (Kundu et al., 2020) as compared to the ground truth." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 141, + 495, + 616 + ], + "blocks": [ + { + "bbox": [ + 108, + 141, + 495, + 616 + ], + "lines": [ + { + "bbox": [ + 108, + 141, + 495, + 616 + ], + "spans": [ + { + "bbox": [ + 108, + 141, + 495, + 616 + ], + "type": "table", + "html": "
Ground TruthVointNet (ours)Mean Fuse (Kundu et al., 2020)
", + "image_path": "c0f67dd099eabf40717ae821ea6c91cf6e25d08f174f16aa5741910c05ec8464.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 624, + 504, + 666 + ], + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 666 + ], + "type": "text", + "content": "Figure 11: Qualitative Comparison for 3D Part Segmentation. We compare our VointNet 3D segmentation prediction to Mean Fuse (Kundu et al., 2020) that is using the same trained 2D backbone. Note how VointNet distinguishes detailed parts (e.g. the car window frame). Beware that visualization colors can shift if an extra label is predicted (e.g. the motorbike labels are correct)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 139, + 79, + 473, + 242 + ], + "blocks": [ + { + "bbox": [ + 139, + 79, + 473, + 242 + ], + "lines": [ + { + "bbox": [ + 139, + 79, + 473, + 242 + ], + "spans": [ + { + "bbox": [ + 139, + 79, + 473, + 242 + ], + "type": "table", + "html": "
MethodSegmentation UnrotatedUnder Rotation Rotated
PointNet (Qi et al., 2017a)80.136.6 ±0.2
DGCNN (Wang et al., 2019c)80.137.1 ±0.2
PointNet + Aug.65.865.8 ±0.1
DGCNN + Aug.60.760.7 ±0.2
Mean Fuse (Kundu et al., 2020)79.161.6 ±0.1
Label Fuse (Wang et al., 2019a)78.961.0 ±0.1
VointNet (w/o xyz)79.665.4 ±0.1
VointNet (w/o xyz) + Aug.68.068.5 ±0.1
VointNet (w/ xyz)81.261.5 ±0.2
", + "image_path": "995f26b33c9c01e09924d342dbc90441cbc64347a4d93936f5be014a04e28cca.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 250, + 504, + 313 + ], + "lines": [ + { + "bbox": [ + 104, + 250, + 504, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 250, + 504, + 313 + ], + "type": "text", + "content": "Table 9: Rotation Robustness for 3D Part Segmentation. At test time, we randomly rotate objects from ShapeNet Parts (Yi et al., 2016) and report the Ins. mIoUs of our VointNet compared to trained PointNet (Qi et al., 2017a) and DGCNN (Wang et al., 2019c). Note how VointNet's performance largely exceeds the baselines in realistic unaligned scenarios, highlighting the benefit of view dependency. If we use rotation augmentation in training for the baselines, the rotated performance improves, but the unrotated performance drops." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 346, + 228, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 228, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 228, + 357 + ], + "type": "text", + "content": "C.2 DETAILED ANALYSIS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 372, + 504, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 504, + 439 + ], + "type": "text", + "content": "Effect of Pretraining. We study the effect of pretraining the 2D backbone C for 3D classification on ModelNet40. Training a ViT with Mean Fuse for 3D classification on ModelNet40 obtains 92.2 test Acc. with ImageNet pretraining and 80.0 test Acc. from scratch. Other multi-view networks, e.g. MVCNN (Su et al., 2015), ViewGCN(Wei et al., 2020), and MVTN(Hamdi et al., 2021) all use ImageNet pretraining, which is not unique to Voints." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 444, + 504, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 444, + 504, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 444, + 504, + 523 + ], + "type": "text", + "content": "Classification Backbone. We study the effect of ablating the 2D backbone C for 3D classification on ModelNet40. We show in Table 10 the performance of VointNet (MLP) when Vit-B (Dosovitskiy et al., 2021) and ResNet-18 (He et al., 2015) are used. We also show that following the per-point classification setup instead of the per-shape for 3D shape classification leads to worse performance for VointNet and the naive multi-view. This is why we used the per-shape approach when adopting VointNet for 3D classification (using one Voint for the entire shape)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 527, + 504, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 527, + 504, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 504, + 561 + ], + "type": "text", + "content": "Number of points and visibility. Table 11 studies the effect of point number on 3D part segmentation performance, when different numbers of views are used. The visibility ratio is also reported in each case." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 565, + 504, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 565, + 504, + 623 + ], + "spans": [ + { + "bbox": [ + 104, + 565, + 504, + 623 + ], + "type": "text", + "content": "Points color. We colored the points with ground truth normals as in Figure 16, when they are available (ShapeNet Parts), and we used white colors as in Figure 9, when other baselines do not use normals. We ablate the color of the points on VointNet (MLP) with normals colors, white color, and NOCs colors (Wang et al., 2019b). We obtain the following segmentation mIoU results: (normals: 80.6), (white: 74.7), and (NOCs: 57.9)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 626, + 504, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 684 + ], + "type": "text", + "content": "Time and Memory Requirements. To assess the contribution of the Voint module, we take a macroscopic look at the time and memory requirements of each component in the pipeline. We record the number of floating-point operations (GFLOPs) and the time of a forward pass for a single input sample. In Table 12, the VointNet module contributes negligibly to the memory requirements compared to multi-view and point networks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "Feature Size " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "(d)" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ". We study the effect of the feature size " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " on the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 12. We note that the performance peaks at " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "d = 128" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ", but it is close to what we use in the main results " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "(d = 64)" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 168, + 80, + 442, + 137 + ], + "blocks": [ + { + "bbox": [ + 168, + 80, + 442, + 137 + ], + "lines": [ + { + "bbox": [ + 168, + 80, + 442, + 137 + ], + "spans": [ + { + "bbox": [ + 168, + 80, + 442, + 137 + ], + "type": "table", + "html": "
View Aggregation2D Backbone
ResNet18 (per-shape)ViT-B (per-shape)DeepLabV3 (per-point)
VointNet91.292.810.2
", + "image_path": "5b8250558e6b8a4d19403fa499c51cae4773de01bff308653a28becf579f32e7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 183, + 208, + 425, + 326 + ], + "blocks": [ + { + "bbox": [ + 104, + 146, + 506, + 197 + ], + "lines": [ + { + "bbox": [ + 104, + 146, + 506, + 197 + ], + "spans": [ + { + "bbox": [ + 104, + 146, + 506, + 197 + ], + "type": "text", + "content": "Table 10: Ablation Study for 3D Classification. We study the effect of different 2D backbone for ModelNet40 3D classification task. We compare VointNet's performance to naive multi-view (e.g. MVCNN (Su et al., 2015) or Mean Fuse (Kundu et al., 2020)) using the same 2D backbone. Note that using the per-point classification setup instead of the per-shape for 3D shape classification leads to worse performance for VointNet and the naive multi-view." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 183, + 208, + 425, + 326 + ], + "lines": [ + { + "bbox": [ + 183, + 208, + 425, + 326 + ], + "spans": [ + { + "bbox": [ + 183, + 208, + 425, + 326 + ], + "type": "table", + "html": "
Points #MetricNumber of Views
24812
500visibility99.199.9100100
mIoU69.273.976.076.4
1000visibility98.099.7100100
mIoU69.574.376.577.1
2000visibility95.799.299.899.9
mIoU69.775.077.778.5
", + "image_path": "cc83f3828e47c7e918fde7fc94ade7a6d2d77e3e267c06ff9562780583fa4ed7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 331, + 504, + 363 + ], + "lines": [ + { + "bbox": [ + 104, + 331, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 504, + 363 + ], + "type": "text", + "content": "Table 11: Analysis on Number of Points and Visibility. We show the Instance mIoUs and visibility ratio " + }, + { + "bbox": [ + 104, + 331, + 504, + 363 + ], + "type": "inline_equation", + "content": "(1 - \\frac{\\text{empty}}{\\text{total}})\\%" + }, + { + "bbox": [ + 104, + 331, + 504, + 363 + ], + "type": "text", + "content": " of our VointNet on ShapeNet Parts when varying points # and number of views." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "type": "text", + "content": "Model Depth " + }, + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "type": "inline_equation", + "content": "(l_v)" + }, + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "type": "text", + "content": ". We study the effect of the model depth " + }, + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "type": "inline_equation", + "content": "l_v" + }, + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "type": "text", + "content": " on the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 13. We note that model depth of VointNet does not enhance the performance significantly. Our choice of " + }, + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "type": "inline_equation", + "content": "l_v = 4" + }, + { + "bbox": [ + 104, + 384, + 504, + 441 + ], + "type": "text", + "content": " balances the performance and the memory/computations requirements of VointNet (MLP)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 445, + 504, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 504, + 513 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 504, + 513 + ], + "type": "text", + "content": "Distance to the Object. We study the effect of distance to the object in rendering as in Figure 17 to the performance of VointNet (MLP) in 3D part segmentation on ShapeNet Parts (Yi et al., 2016) and plot the results (with confidence intervals) in Figure 14. We note that our default choice of 1.0 is actually reasonable. This choice of distance shows the object entirely (as illustrated in Figure 17), but also cover the details needed for small parts segmentation (see Figure 11)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 516, + 504, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 504, + 552 + ], + "type": "text", + "content": "Image Size " + }, + { + "bbox": [ + 104, + 516, + 504, + 552 + ], + "type": "inline_equation", + "content": "(H,W)" + }, + { + "bbox": [ + 104, + 516, + 504, + 552 + ], + "type": "text", + "content": ". We study the effect of the image size " + }, + { + "bbox": [ + 104, + 516, + 504, + 552 + ], + "type": "inline_equation", + "content": "H\\& W" + }, + { + "bbox": [ + 104, + 516, + 504, + 552 + ], + "type": "text", + "content": " on the performance of Mean Fuse (Kundu et al., 2020) baseline when training the 2D backbone for 3D part segmentation. We plot the results (with confidence intervals) in Figure 15." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 555, + 504, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 589 + ], + "type": "text", + "content": "Number of Views on Classification. We study the effect of the number of views (M) on classification accuracy on ModelNet40 Wu et al. (2015) of VointNet and report results in Table 13." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 594, + 504, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 617 + ], + "type": "text", + "content": "Unprojection Operation Speed. We evaluate the speed of the unprojection operation " + }, + { + "bbox": [ + 104, + 594, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\Phi_{\\mathbf{B}}" + }, + { + "bbox": [ + 104, + 594, + 504, + 617 + ], + "type": "text", + "content": " and report average latency of 10,000 runs (in ms) in Table 14." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 622, + 504, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 622, + 504, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 622, + 504, + 657 + ], + "type": "text", + "content": "Unprojection Operation Speed. We evaluate the speed of the point cloud renderer " + }, + { + "bbox": [ + 104, + 622, + 504, + 657 + ], + "type": "inline_equation", + "content": "\\mathbb{R}" + }, + { + "bbox": [ + 104, + 622, + 504, + 657 + ], + "type": "text", + "content": " used in Voint pipeline from Pytroch3D Ravi et al. (2020) and report average latency of 1,000 renderings (in ms/image) in Table 15." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 669, + 208, + 680 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 669, + 208, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 669, + 208, + 680 + ], + "type": "text", + "content": "C.3 VISUALIZATIONS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "In Figure 16 and 17, we visualize the multi-view renderings of the point clouds along with the 2D learned features based on the DeepLabV3 (Chen et al., 2018) backbone. These features are then unprojected and transformed by VointNet to obtain 3D semantic labels." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 138, + 133, + 473, + 320 + ], + "blocks": [ + { + "bbox": [ + 138, + 133, + 473, + 320 + ], + "lines": [ + { + "bbox": [ + 138, + 133, + 473, + 320 + ], + "spans": [ + { + "bbox": [ + 138, + 133, + 473, + 320 + ], + "type": "table", + "html": "
NetworkGFLOPsTime (ms)Parameters # (M)
MVCNN (Su et al., 2015)43.7239.8911.20
ViewGCN (Wei et al., 2020)44.1926.0623.56
ResNet 18 (He et al., 2015)3.643.7011.20
ResNet 50 (He et al., 2015)8.249.4223.59
ViT-B (Dosovitskiy et al., 2021)33.7012.4686.57
ViT-L (Dosovitskiy et al., 2021)119.3029.28304.33
FCN (Long et al., 2015)53.1310.3432.97
DeeplabV3 (Chen et al., 2018)92.6120.6258.64
PointNet (Qi et al., 2017a)1.784.243.50
DGCNN (Wang et al., 2019c)10.420.9516.350
MVTN (Hamdi et al., 2021)1.784.243.5
VointNet (MLP)1.902.900.04
VointNet (GCN)16.1832.100.05
VointNet (GAT)32.0568.710.07
Full Voint pipeline94.5123.5058.68
", + "image_path": "d829431daf1246a7ceb47159b5c196960df1873d4a760e44d8ce9658fb1116aa.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 328, + 504, + 361 + ], + "lines": [ + { + "bbox": [ + 104, + 328, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 504, + 361 + ], + "type": "text", + "content": "Table 12: Time and Memory Requirements. We assess the contribution of the Voint module to the time and memory requirements in the multi-view and point cloud pipeline. Note that VointNet (shared MLP) is almost 100 times smaller than PointNet (Qi et al., 2017a)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 183, + 491, + 404, + 635 + ], + "blocks": [ + { + "bbox": [ + 183, + 491, + 404, + 635 + ], + "lines": [ + { + "bbox": [ + 183, + 491, + 404, + 635 + ], + "spans": [ + { + "bbox": [ + 183, + 491, + 404, + 635 + ], + "type": "image", + "image_path": "3413317cf245b6b22fd5a908edc91f26e73ed14cffeb764360672238e7bce506.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "lines": [ + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "text", + "content": "Figure 12: The Effect of Feature Size " + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "text", + "content": ". We plot Ins. mIoU of 3D segmentation vs. the feature size " + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "text", + "content": " used in training on ShapeNet Parts (Yi et al., 2016). We note that the performance peaks at " + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "inline_equation", + "content": "d = 128" + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "text", + "content": ", but it is close to what we use in the main results (" + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "inline_equation", + "content": "d = 64" + }, + { + "bbox": [ + 104, + 641, + 504, + 677 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 310, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 310, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 310, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 189, + 99, + 404, + 242 + ], + "blocks": [ + { + "bbox": [ + 189, + 99, + 404, + 242 + ], + "lines": [ + { + "bbox": [ + 189, + 99, + 404, + 242 + ], + "spans": [ + { + "bbox": [ + 189, + 99, + 404, + 242 + ], + "type": "image", + "image_path": "01e61314143ad25c176cabb5dc11f6aaf12fcedd834931452f72552917cdb345.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "lines": [ + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "type": "text", + "content": "Figure 13: The Effect of Model Depth " + }, + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "type": "inline_equation", + "content": "l_{v}" + }, + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "type": "text", + "content": ". We plot Ins. mIoU of 3D segmentation vs. the model depth " + }, + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "type": "inline_equation", + "content": "l_{v}" + }, + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "type": "text", + "content": " used in training on ShapeNet Parts (Yi et al., 2016). We note that model depth of VointNet does not enhance the performance significantly. Our choice of " + }, + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "type": "inline_equation", + "content": "l_{v} = 4" + }, + { + "bbox": [ + 104, + 249, + 506, + 297 + ], + "type": "text", + "content": " balances the performance and the memory/computations requirements of VointNet (MLP)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 183, + 321, + 404, + 466 + ], + "blocks": [ + { + "bbox": [ + 183, + 321, + 404, + 466 + ], + "lines": [ + { + "bbox": [ + 183, + 321, + 404, + 466 + ], + "spans": [ + { + "bbox": [ + 183, + 321, + 404, + 466 + ], + "type": "image", + "image_path": "1b0ad412327d57e53c249108a0b45a1fb42553e42323dd97babc2396302b514e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 472, + 504, + 529 + ], + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 529 + ], + "type": "text", + "content": "Figure 14: The Effect of Distance to the Object. We plot Ins. mIoU of 3D segmentation vs. the distance to the object used in inference on ShapeNet Parts (Yi et al., 2016). We note that our default choice of 1.0 is actually reasonable. This choice of distance shows the object entirely (as illustrated in Figure 17), but also cover the details needed for small parts segmentation (see Figure 11)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 183, + 555, + 404, + 699 + ], + "blocks": [ + { + "bbox": [ + 183, + 555, + 404, + 699 + ], + "lines": [ + { + "bbox": [ + 183, + 555, + 404, + 699 + ], + "spans": [ + { + "bbox": [ + 183, + 555, + 404, + 699 + ], + "type": "image", + "image_path": "e3aa40f0be56a237c5c00ea85726607b465624d13e968d151ab38d5a45b193ed.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 704, + 504, + 728 + ], + "lines": [ + { + "bbox": [ + 104, + 704, + 504, + 728 + ], + "spans": [ + { + "bbox": [ + 104, + 704, + 504, + 728 + ], + "type": "text", + "content": "Figure 15: The Effect of Image Size " + }, + { + "bbox": [ + 104, + 704, + 504, + 728 + ], + "type": "inline_equation", + "content": "H, W" + }, + { + "bbox": [ + 104, + 704, + 504, + 728 + ], + "type": "text", + "content": ". We plot Ins. mIoU of 3D segmentation vs. the image size used in inference on ShapeNet Parts (Yi et al., 2016)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 105, + 213, + 495, + 538 + ], + "blocks": [ + { + "bbox": [ + 105, + 213, + 495, + 538 + ], + "lines": [ + { + "bbox": [ + 105, + 213, + 495, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 495, + 538 + ], + "type": "image", + "image_path": "177dcc285a4a120e89a761450d372408dc02ebe73f3aab8f9b2f0e8c97be5aba.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "lines": [ + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "type": "text", + "content": "Figure 16: Multi-view Projected Segmentation 1. We show how, after rendering points, we can segment in the image space. For each example, we show (INPUT): the projections of the points (colored with normals) used in training with random view-points. (PRED 2D): the segmentation prediction of the 2D backbone (DeepLabV3) (Chen et al., 2018). (PRED 3D): the unprojected 3D segmentation prediction. " + }, + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "type": "inline_equation", + "content": "(GT)" + }, + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "type": "text", + "content": ": the 3D segmentation ground truth." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 104, + 213, + 498, + 538 + ], + "blocks": [ + { + "bbox": [ + 104, + 213, + 498, + 538 + ], + "lines": [ + { + "bbox": [ + 104, + 213, + 498, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 498, + 538 + ], + "type": "image", + "image_path": "5399f426231ab43347b5b869edd7def9af0be33cbd699d4076f8bc3b337f0b29.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "lines": [ + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "type": "text", + "content": "Figure 17: Multi-view Projected Segmentation 2. We show how, after rendering points, we can segment in the image space. For each example, we show (INPUT): the projections of the points (colored with normals) used in training with random view-points. (PRED 2D): the segmentation prediction of the 2D backbone (DeepLabV3) (Chen et al., 2018). (PRED 3D): the unprojected 3D segmentation prediction. " + }, + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "type": "inline_equation", + "content": "(GT)" + }, + { + "bbox": [ + 104, + 544, + 504, + 597 + ], + "type": "text", + "content": ": the 3D segmentation ground truth." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 179, + 141, + 433, + 194 + ], + "blocks": [ + { + "bbox": [ + 179, + 141, + 433, + 194 + ], + "lines": [ + { + "bbox": [ + 179, + 141, + 433, + 194 + ], + "spans": [ + { + "bbox": [ + 179, + 141, + 433, + 194 + ], + "type": "table", + "html": "
MethodNumber of Views
46810
VointNet (Cls. Acc.)90.390.892.092.3
", + "image_path": "7fe5f73dd0d8516e3571c811e3776eddb0ec78c3b93a42b81b5126691366d2e7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 127, + 352, + 484, + 416 + ], + "blocks": [ + { + "bbox": [ + 104, + 201, + 504, + 222 + ], + "lines": [ + { + "bbox": [ + 104, + 201, + 504, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 504, + 222 + ], + "type": "text", + "content": "Table 13: Effect of the Number of Views on Classification. We report the classification accuracy of VointNet vs. the number of views (M) used in the training on ModelNet40." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 352, + 484, + 416 + ], + "lines": [ + { + "bbox": [ + 127, + 352, + 484, + 416 + ], + "spans": [ + { + "bbox": [ + 127, + 352, + 484, + 416 + ], + "type": "table", + "html": "
MethodNumber of Views
124681012
Features Unprojection3.05.311.4515.717.229.724.0
Labels Unprojection2.62.53.43.13.03.23.6
", + "image_path": "b5fa20ab585c027c5157c8a5ec86b579f3ba75389f4a0bb7c53af833c0695071.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 127, + 585, + 484, + 639 + ], + "blocks": [ + { + "bbox": [ + 104, + 424, + 504, + 456 + ], + "lines": [ + { + "bbox": [ + 104, + 424, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 424, + 504, + 456 + ], + "type": "text", + "content": "Table 14: Unprojection Operation Speed. We report the average latency (in ms) over 10,000 runs of the unprojection operation with its two forms: features unprojection (used in mean) and labels unprojection (used in mode)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 585, + 484, + 639 + ], + "lines": [ + { + "bbox": [ + 127, + 585, + 484, + 639 + ], + "spans": [ + { + "bbox": [ + 127, + 585, + 484, + 639 + ], + "type": "table", + "html": "
CriteriaNumber of Points
1e21e31e41e51e6
Point Rendering Speed (ms/image)7.27.67.710.437.7
", + "image_path": "6e94e575c88bc9441fdd34764c3925897250def2c383d42d063aecb560801196.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 646, + 504, + 668 + ], + "lines": [ + { + "bbox": [ + 104, + 646, + 504, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 646, + 504, + 668 + ], + "type": "text", + "content": "Table 15: Point Rendering Speed. We report the average rendering speed (in ms/image) over 1,000 renderings of the point cloud renderer Ravi et al. (2020) used in Voint clouds." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 311, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_content_list.json b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ceeaf217c3e10210feed2bfdff648b7ad48e8c13 --- /dev/null +++ b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_content_list.json @@ -0,0 +1,5182 @@ +[ + { + "type": "text", + "text": "VOLUMETRIC OPTIMAL TRANSPORTATION BY FAST FOURIER TRANSFORM", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Na Lei*", + "bbox": [ + 184, + 170, + 248, + 183 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dalian University of Technology \nnalei@dlut.edu.cn", + "bbox": [ + 181, + 184, + 400, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dongsheng An", + "bbox": [ + 594, + 170, + 712, + 185 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Stony Brook University\ndoan@cs.stonybrook.edu", + "bbox": [ + 594, + 185, + 813, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Min Zhang", + "bbox": [ + 181, + 233, + 264, + 248 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhejiang University min_zhang@zju.edu.cn", + "bbox": [ + 181, + 248, + 377, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaoyin Xu", + "bbox": [ + 398, + 233, + 482, + 248 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Harvard Medical School \nxxu@bwh.harvard.edu", + "bbox": [ + 398, + 248, + 588, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xianfeng Gu", + "bbox": [ + 609, + 233, + 702, + 248 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Stony Brook University gu@cs.stonybrook.edu", + "bbox": [ + 609, + 248, + 808, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 311, + 545, + 327 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The optimal transportation map finds the most economical way to transport one probability measure to another, and it has been applied in a broad range of applications in machine learning and computer vision. By the Brenier theory, computing the optimal transport map is equivalent to solving a Monge-Ampère equation, which is highly non-linear. Therefore, the computation of optimal transportation maps is intrinsically challenging. In this work, we propose a novel and powerful method, the FFT-OT (fast Fourier transform-optimal transport), to compute the 3-dimensional OT problems. The method is based on several key ideas: first, the Monge-Ampère equation is linearized to a sequence of linear elliptic PDEs with spacial and temporal variant coefficients; second, the obliqueness property of optimal transportation maps is reformulated as a Neumann boundary condition; and third, the variant coefficient elliptic PDEs are approximated by constant coefficient elliptic PDEs and solved by FFT on GPUs. We also prove that the algorithm converges linearly. Experimental results show that the FFT-OT algorithm is more than a hundred times faster than the conventional methods based on the convex geometry. Furthermore, the method can be directly applied for sampling from complex 3D density functions in machine learning and magnifying the volumetric data in medical imaging.", + "bbox": [ + 228, + 344, + 767, + 595 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 623, + 336, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Optimal transportation (OT) transports one probability measure to another in the most economical way, and it plays a fundamental role in areas like machine learning Courty et al. (2017); Altschuler et al. (2019), computer vision Arjovsky et al. (2017); Tolstikhin et al. (2018); An et al. (2020), and computer graphics Solomon et al. (2015); Nader & Guennebaud (2018). Given a Riemannian manifold $X$ , all the probability distributions on $X$ form an infinite dimensional space $\\mathcal{P}(X)$ . Given any two distributions $\\mu, \\nu \\in \\mathcal{P}(X)$ , the optimal transportation map defines a distance between them, and the McCann interpolation McCann (1997) defines the geodesic connecting them. Hence optimal transportation equips $\\mathcal{P}(X)$ with a Riemannian metric and defines its covariant differentiation, which provides a variational calculus framework for optimization in it.", + "bbox": [ + 169, + 655, + 826, + 781 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As the optimal transportation problem is highly non-linear, it is quite challenging to compute the OT maps. Recently, researchers have developed many algorithms. The geometric variational approach Aurenhammer et al. (1998); Gu et al. (2016); Levy (2015) based on the Brenier theorem Brenier (1991) is capable of achieving high accuracy for low dimensional problems, but it requires complicated geometric data structure and the storage complexity grows exponentially as the dimension increases. The Sinkhorn method Cuturi (2013) based on the Kantorovich theorem adds an entropic regularizer to the primal problem and can handle high dimensional tasks, but it suffers from the intrinsic approximation error.", + "bbox": [ + 169, + 787, + 826, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* indicates equal contribution", + "bbox": [ + 189, + 910, + 370, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We propose a novel method to tackle this challenging problem through Fast Fourier Transformation (FFT). According to the Brenier theorem Brenier (1991), under the quadratic distance cost, the optimal transportation map is the gradient of the Brenier potential, which satisfies the Monge-Ampère equation. With the continuity method Delanoë (1991), the Monge-Ampère equation can be linearized as a sequence of elliptic partial differential equations (PDEs) with spacial and temporal variant coefficients. By iteratively solving the linearized Monge-Ampère equations, we can obtain the OT map. Specifically, we propose to approximate the linearized Monge-Ampère equation by constant coefficient elliptic PDEs and solve them using the FFT on GPUs.", + "bbox": [ + 169, + 103, + 826, + 217 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our proposed FFT-OT method has many merits: (i) it is generalizable for arbitrary dimension; (ii) it has a linear convergence rate, namely the approximation error decays exponentially fast; (iii) in each iteration, the computational complexity of FFT is $O(n \\log n)$ , thus our algorithm can solve large scale OT problems; and (iv) it is highly parallelable and can be efficiently implemented on GPUs. We demonstrate the efficiency of the FFT-OT algorithm by solving the volumetric OT problems for machine learning and medical imaging applications including sampling from given 3D density functions and volumetric magnifier. The algorithm also has its own limitations: (i) although it can be generalized to any dimensions, the storage complexity increases exponentially with respect to the dimension, so its power is limited by the memory size of the GPUs; (ii) Since the algorithm uses FFT, the current version of the method only works well for continuous density functions. (iii) In this work, we mainly focus on the computation of the OT map from the uniform distribution to another arbitrary continuous distribution. To extend the method to find the OT map between any two continuous measures, we can compute two OT maps from the uniform distribution to the both continuous measures, then combine them together. The combination will give a reasonable approximation of the OT map Nader & Guennebaud (2018).", + "bbox": [ + 169, + 220, + 826, + 431 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Though Lei and Gu Lei & Gu (2021) also uses FFT to solve the 2-dimensional OT problem, our method differs their works in the following two aspects: (i) Lei and Gu's method uses the fixed point method to compute the 2D OT problems, ours is based on the linearization of the Monge-Ampère operator to solve the 3D OT problems, these are two different methodologies in PDE theory; (ii) In our paper, we also provide the theoretical convergence analysis of the proposed method. For more detailed analysis and related work, please refer to the Appendix A.", + "bbox": [ + 169, + 436, + 826, + 523 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 OPTIMAL TRANSPORTATION THEORY", + "text_level": 1, + "bbox": [ + 171, + 541, + 519, + 556 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we review the fundamental concepts and theorems of the OT problem and the Monge-Amperè equation, more details can be found in Villani (2008).", + "bbox": [ + 169, + 571, + 823, + 602 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Optimal Transportation Map and the Monge-Ampère equation Suppose the source domain $\\Omega$ is an open set in $\\mathbb{R}^d$ with the probability measure $\\mu$ , the target domain $\\Sigma$ is with the probability measure $\\nu$ . Both $\\mu$ and $\\nu$ have density functions $d\\mu(x) = f(x)dx$ and $d\\nu(y) = g(y)dy$ , respectively, with the equal total mass: $\\int_{\\Omega} f(x)dx = \\int_{\\Sigma} g(y)dy$ , which is called the balance condition.", + "bbox": [ + 169, + 614, + 823, + 674 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Suppose $T: \\Omega \\to \\Sigma$ is a measurable map. The mapping $T$ is called measure preserving and denoted as $T_{\\#} \\mu = \\nu$ if the following relation", + "bbox": [ + 169, + 676, + 826, + 708 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mu (T ^ {- 1} (A)) = \\nu (A) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 433, + 712, + 823, + 729 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "for every Borel subset $A \\subset \\Sigma$ . A cost function $c: \\Omega \\times \\Sigma \\to \\mathbb{R}$ measures the transportation cost for transporting the unit mass from $x \\in \\Omega$ to $y \\in \\Sigma$ .", + "bbox": [ + 169, + 733, + 823, + 762 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Problem 1 (Monge). The optimal transportation problem finds the measure preserving map with the minimal total transportation cost,", + "bbox": [ + 169, + 765, + 826, + 795 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {T _ {\\#} \\mu = \\nu} \\int_ {\\Omega} c (x, T (x)) f (x) d x\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 797, + 596, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The solution to the Monge's problem is called the optimal transport map between $\\mu$ and $\\nu$ . The existence, uniqueness and regularity of OT maps depend on the boundedness and the continuity of the density functions, the convexity of the supporting domains, the continuity of their boundaries, and the cost function. In our current work, we focus on the similar situation in Saumier et al. (2013),", + "bbox": [ + 169, + 842, + 826, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- The cost function is quadratic Euclidean distance $c(x, y) = \\| x - y \\|^2 / 2$ ;", + "bbox": [ + 215, + 907, + 710, + 926 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The supports of the source and the target measures are the canonical cube $\\Omega = [-1, 1]^3$ , which is uniformly convex;", + "- The source and the target measures $\\mu, \\nu$ are absolutely continuous with respect to the Lebesgue measure, their densities $f, g$ are positive and bounded away from zero;" + ], + "bbox": [ + 215, + 103, + 821, + 165 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n0 < m < f, g < M,\n$$\n", + "text_format": "latex", + "bbox": [ + 457, + 167, + 596, + 184 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and $f,g$ are of class $C^\\alpha (\\Omega)$", + "bbox": [ + 228, + 186, + 419, + 203 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- The boundary condition is second boundary condition (OT boundary condition), $T(\\Omega) = \\Omega$ .", + "bbox": [ + 215, + 205, + 823, + 234 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Then according to (Villani (2003) Theorem 14.4, Saumier et al. (2013) Theorem 2.1), the OT maps $T: \\Omega \\to \\Omega$ exists and is unique and invertible ( $\\mu$ a.e), and the Brenier potential is of class $C^{2,\\beta}(\\bar{\\Omega})$ form some $0 < \\beta < \\alpha$ .", + "bbox": [ + 169, + 244, + 823, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Theorem 2. Assume that $\\Omega, \\mu, \\nu, f$ and $g$ are defined as above. Then there exists a convex function $u: \\Omega \\to \\mathbb{R}$ , $u \\in C^{2,\\beta}(\\Omega)$ for some $0 < \\beta < \\alpha$ , such that $\\nabla u$ pushes $\\mu$ forward to $\\nu$ , $(\\nabla u)_{\\#} \\mu = \\nu$ . Moreover, $\\nabla u$ is unique and invertible ( $\\mu$ a.e.), and its inverse $\\nabla v$ satisfies $(\\nabla v)_{\\#} \\nu = \\mu$ .", + "bbox": [ + 169, + 290, + 823, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We call such a convex function $u$ the Brenier potential, it satisfies the Monge-Ampère equation,", + "bbox": [ + 169, + 343, + 800, + 358 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\det D ^ {2} u (x) = \\frac {f (x)}{g \\circ \\nabla u (x)}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 362, + 821, + 391 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "with the boundary condition $\\nabla u(\\Omega) = \\Sigma$ . Then finding the optimal transportation map is equivalent to solving the corresponding Monge-Ampère equation. In the current work, the target measure is always the Lebesgue measure, and the source density $f$ is of class $C^{2,\\alpha}(\\Omega)$ .", + "bbox": [ + 169, + 393, + 823, + 439 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Linearized Monge-Ampère Operator The Monge-Ampère operator is defined as", + "bbox": [ + 169, + 450, + 725, + 467 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {M A} [ u ] = \\det D ^ {2} u,\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 470, + 558, + 488 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "which is highly non-linear. It can be linearized as following:", + "bbox": [ + 169, + 491, + 570, + 506 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {M A} [ u + \\varepsilon v ] = \\det (D ^ {2} u + \\varepsilon D ^ {2} v) \\approx \\det D ^ {2} u + \\varepsilon \\operatorname {T r a c e} (\\operatorname {A d j} (D ^ {2} u) \\cdot D ^ {2} v), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 271, + 508, + 823, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\operatorname{Adj}(A)$ is the adjoint (co-factor) matrix of $A$ , $\\operatorname{Adj}(A) := \\det(A)A^{-T}$ . Therefore the linearized Monge-Ampère operator is defined as", + "bbox": [ + 169, + 529, + 823, + 559 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {D M A} _ {u} [ v ] := \\operatorname {T r a c e} \\left(\\operatorname {A d j} \\left(D ^ {2} u\\right) \\cdot D ^ {2} v\\right) = \\sum_ {p, q = 1} ^ {d} u ^ {p q} (x) \\partial_ {p} \\partial_ {q} v (x), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 563, + 823, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $(u^{pq}) = \\mathrm{Adj}(D^2 u)$ is the adjoint matrix of the Hessian of $u$ , and $\\partial_p\\partial_q\\coloneqq \\frac{\\partial^2}{\\partial x_p\\partial x_q}$ .", + "bbox": [ + 169, + 606, + 754, + 630 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Continuity Method For simplicity, we assume the source domain coincides with the target domain, that is $\\Omega = \\Sigma$ , and the target density is $g(x) \\equiv 1$ . The Monge-Ampère equation Eqn. (2) is simplified as $\\operatorname{det}D^{2}u(x) = f(x)$ . Define a flow of density as", + "bbox": [ + 169, + 642, + 823, + 685 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\rho (x, t) = (1 - t) + t f (x), \\quad t \\in [ 0, 1 ]. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 688, + 823, + 704 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The corresponding flow of the Brenier potentials is $u(x,t):\\Omega \\times [0,1]\\to \\mathbb{R}$", + "bbox": [ + 169, + 705, + 676, + 722 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\det D _ {x} ^ {2} u (x, t) = \\rho (x, t), \\quad s. t. \\nabla_ {x} u (x, t) (\\Omega) = \\Omega ,\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 726, + 663, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $D_x^2 u(x,t)$ is the Hessian of $u(x,t)$ with respect to $x$ , and $u(x,1)$ is the solution to the initial Monge-Ampère equation Eqn. (2). Take the derivative w.r.t. time $t$ on both sides of the linearized Monge-Ampère operator Eqn. (4), we obtain an elliptic PDE with the spacial and temporal variant coefficients of the unknown $v(x,t) \\coloneqq \\dot{u} (x,t)$ , namely the \"velocity\" of the Brenier potential,", + "bbox": [ + 169, + 746, + 823, + 804 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {D M A} _ {u} [ v ] = \\sum_ {p, q = 1} ^ {d} u ^ {p q} (x, t) \\partial_ {p} \\partial_ {q} v (x, t) = \\frac {\\partial}{\\partial t} \\rho (x, t) = f (x) - 1. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 808, + 823, + 845 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "At time $t = 0$ , the initial Brenier potential is known as $u(x,0) = \\frac{1}{2}\\| x\\|^2$ . Suppose at time $t$ , we have obtained $u(x,t)$ already, then we can compute the adjoint matrix $u^{pq}(x,t)$ of the Hessian $D_x^2 u(x,t)$ and solve Eqn. (6) to get the velocity $v(x,t) = \\dot{u} (x,t)$ . In turn, we move forward to time $t + \\delta t$ , and update $u(x,t + \\delta t)$ by $u(x,t) + \\dot{u} (x,t)\\delta t$ . By repeating this procedure, eventually we reach time $t = 1$ and obtain the solution $u(x)\\coloneqq u(x,1)$ to the initial Monge-Ampère Eqn. (2).", + "bbox": [ + 169, + 851, + 825, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Obliqueness Boundary Condition Suppose the boundary of $\\Omega$ is $C^1$ almost everywhere, therefore at a $C^1$ point $x\\in \\partial \\Omega$ , the outer normal $\\mathbf{n}(x)$ is well defined. For almost every boundary point $x\\in \\partial \\Omega$ , the obliqueness condition is represented as", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\langle \\mathbf {n} (x), \\mathbf {n} (\\nabla u (x)) \\rangle \\geq 0. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 424, + 152, + 823, + 169 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Suppose $\\Omega$ is a cuboid and has 6 faces, if a boundary point $x\\in \\partial \\Omega$ is on a face, by the cyclic monotonicity of the map and the strict convexity of $u$ Villani (2008), its image $\\nabla u(x)$ must be on the same face of $x$ , namely,", + "bbox": [ + 169, + 174, + 823, + 215 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\langle \\nabla u (x) - x, \\mathbf {n} (x) \\rangle = 0. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 217, + 823, + 231 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We can rewrite the Brenier potential as $u(x_{1},x_{2},\\ldots ,x_{d}) = \\frac{1}{2}\\sum_{i = 1}^{d}x_{i}^{2} + v(x_{1},\\dots ,x_{d})$ , then $\\nabla u(x) - x = \\nabla v(x)$ . By Eqn. (8), $v(x)$ satisfies the Neumann boundary condition,", + "bbox": [ + 169, + 234, + 823, + 266 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial v}{\\partial \\mathbf {n}} (x) = 0, \\quad x \\in \\partial \\Omega . \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 424, + 272, + 823, + 299 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similarly, the velocity of the (modified) Brenier potential $v$ in Eqn. (6) also satisfies the Neumann boundary condition. The analysis about the existence and regularity of the solutions to Eqn. (6) with boundary condition Eqn. (9) can be found in the supplementary material.", + "bbox": [ + 169, + 305, + 823, + 348 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 COMPUTATIONAL ALGORITHM", + "text_level": 1, + "bbox": [ + 171, + 367, + 464, + 383 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here we introduce the 3-dimensional FFT-OT algorithm, which can be generalized to any dimensions. We approximate the Monge-Ampère equation by a sequence of constant coefficient elliptic PDEs, and solve them by FFT on GPUs. More detailed analysis about the solution of the discretized Monge-Ampère equation, and the proofs of the lemmas and theorems are given by Appendix B.", + "bbox": [ + 169, + 398, + 823, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 CONTINUITY METHOD FOR SOLVING THE MONGE-AMPERE EQUATION", + "text_level": 1, + "bbox": [ + 171, + 470, + 709, + 486 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By using the continuity method, we can solve the Monge-Ampère equation iteratively. For simplicity, we assume the target measure is the Lebesgue's measure with $g \\equiv 1$ . At the $n$ -th iteration, the Brenier potential is represented as $\\frac{1}{2} \\| x \\|^2 + u_n(x)$ , its Hessian matrix is $H_n(x) \\coloneqq \\mathrm{I} + D^2 u_n(x)$ , the corresponding density function is defined as the determinant of the Hessian $\\rho_n = \\operatorname*{det}(H_n)$ , and the velocity of the Brenier potential is $v_n(x)$ . In the beginning, the Brenier potential $u_0(x)$ is zero, the Hessian is $H_0 = \\mathrm{I}$ and the density is $\\rho_0 = 1$ . At the $n$ -th step, we compute the adjoint matrix $[H_n^{pq}(x)]$ of the Hessian matrix $H_n(x)$ for any $x \\in \\Omega$ . According to Eqn. (3), the velocity $v_n(x)$ satisfies the variant coefficient elliptic PDE induced by the linearized Monge-Ampère operator,", + "bbox": [ + 169, + 497, + 823, + 609 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {D M A} _ {u _ {n}} [ v _ {n} ] = \\sum_ {p, q = 0} ^ {2} H _ {n} ^ {p q} (x) \\partial_ {p} \\partial_ {q} v _ {n} (x) = \\frac {1}{\\tau} \\left(f (x) - \\rho_ {n} (x)\\right). \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 617, + 823, + 655 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that the right hand side of Eqn. (6) is the difference between the initial and the target densities, whereas here it is replaced by the difference between the initial and the current densities. The step length parameter $\\tau \\geq 1$ can be chosen to guarantee the convergence Loepers & Rapetti (2005).", + "bbox": [ + 169, + 662, + 823, + 705 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The elliptic PDE Eqn. (10) is with spatially variant coefficients. Although the traditional finite element method (FEM) can solve it using the GMRES algorithm Saad (2003), this algorithm can not be directly accelerated by GPUs. To overcome this difficulty, we approximate Eqn. (10) by a much simpler elliptic PDE with constant coefficients, which can be directly solved using the following FFT-OT algorithm pipeline Alg. 1 on GPUs in Appendix C.", + "bbox": [ + 169, + 710, + 823, + 782 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At the $n$ -th iteration, after obtaining the adjoint matrix $[H_n^{pq}(x)], x \\in \\Omega$ , we compute the mean adjoint matrix $[\\bar{H}_n^{pq}(x)]$", + "bbox": [ + 169, + 787, + 823, + 818 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {H} _ {n} ^ {p q} := \\frac {\\int_ {\\Omega} H _ {n} ^ {p q} (x) \\rho_ {n} (x) d x}{\\int_ {\\Omega} \\rho_ {n} (x) d x}, \\quad p, q = 0, 1, 2 \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 823, + 823, + 856 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and replace the elliptic PDE Eqn.(10) with variant coefficients by the elliptic PDE with constant coefficients,", + "bbox": [ + 169, + 862, + 823, + 888 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\overline {{\\mathrm {D M A}}} _ {u _ {n}} [ v _ {n} ] = \\sum_ {p, q = 0} ^ {2} \\bar {H} _ {n} ^ {p q} \\partial_ {p} \\partial_ {q} v _ {n} (x) = \\frac {1}{\\tau} (f (x) - \\rho_ {n} (x)), \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 890, + 823, + 926 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\overline{\\mathrm{DMA}}$ is called the mean linearized Monge-Ampère operator.", + "bbox": [ + 169, + 103, + 617, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Then we solve the constant coefficient elliptic PDE Eqn. (12) by FFT Algorithm Alg. 2 in Appendix C. Although the original variant coefficient PDE Eqn. (10) is replaced by its constant coefficient approximation Eqn. (12), the algorithm still converges to the solution with a linear convergence rate. This replacement allows the whole algorithm to be solved by FFT on GPUs, which greatly improves the computational efficiency.", + "bbox": [ + 169, + 125, + 823, + 194 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theorem 3 (main). Given a domain $\\Omega \\subset \\mathbb{R}^d$ , which is a canonical cuboid $\\Omega = [-1,1]^d$ , and a positive density function $f:\\Omega \\to \\mathbb{R}$ with the balance condition $\\int_{\\Omega}f(x)dx = \\int_{\\Omega}dx$ , suppose the mirror reflection extension Eqn. (14) of $f$ to the flat torus $\\tilde{f}:\\mathbb{T}^n\\to \\mathbb{R}$ is $C^\\alpha$ , $\\alpha \\in (0,1)$ , then the Monge-Ampère equation,", + "bbox": [ + 169, + 196, + 823, + 256 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd e t D ^ {2} u (x) = f (x), \\quad \\nabla u (\\Omega) = \\Omega\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 256, + 616, + 272 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "can be solved using the FFT-OT Algorithm Alg. 1 in Appendix C. In particular, one can choose the step length parameter $\\tau$ , such that there is a constant $0 < \\gamma < 1$ that the approximation error satisfies", + "bbox": [ + 169, + 273, + 823, + 316 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| f - \\rho_ {n + 1} \\right\\| ^ {2} < C \\gamma^ {n}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 429, + 315, + 821, + 330 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "namely the algorithm has a linear convergence rate.", + "bbox": [ + 169, + 327, + 516, + 340 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 FFT SOLVER FOR CONSTANT COEFFICIENT ELLIPTIC PDES", + "text_level": 1, + "bbox": [ + 171, + 359, + 630, + 373 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To solve the constant coefficient elliptic PDE Eqn. (12), we first extend the PDE to the flat torus by mirror reflection, then discretize the domain and compute the differential operators by central difference scheme. Finally the PDE is converted to algebraic equations in the frequency domain by FFT and can be efficiently solved on GPUs.", + "bbox": [ + 169, + 386, + 823, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Extension by Mirror Reflection Suppose $\\Omega = [0,1]^3$ and $f:\\Omega \\to \\mathbb{R}$ are given, we extend $\\Omega$ to $\\tilde{\\Omega} = [-1,1]^3$ and $f$ to $\\tilde{f}:\\tilde{\\Omega}\\rightarrow \\mathbb{R}$ by mirror reflection", + "bbox": [ + 169, + 455, + 823, + 488 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {f} (x, y, z) = f (| x |, | y |, | z |), \\quad \\forall (x, y, z) \\in \\tilde {\\Omega}. \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 491, + 823, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By definition, $\\tilde{f}$ satisfies the periodic boundary condition and can be treated as a function defined on the flat torus $\\mathbb{T}^3$ . $\\tilde{\\Omega}$ is one of the fundamental domain of $\\mathbb{T}^3$ . The constant coefficients $a^{p,q}$ keep unchanged. Then we solve the following constant coefficient elliptic PDE Eqn. (18) $L[\\tilde{u}] = \\tilde{f}$ with the periodic boundary condition. Finally, the restriction of $\\tilde{u}$ on $\\Omega$ gives the initial solution $u$ to $L[u] = f$ with Neumann boundary condition.", + "bbox": [ + 169, + 513, + 823, + 588 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the following, to avoid using overly complicated symbols, we use $(u,f,\\Omega)$ to represent $(\\tilde{u},\\tilde{f},\\tilde{\\Omega})$ for simplicity.", + "bbox": [ + 169, + 595, + 823, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Tessellation Suppose $\\Omega = [-1,1]^3$ is the canonical cube (a fundamental domain of a flat torus), we tessellate it to the regular cells, and the centers of the cells form a grid $M\\times N\\times L$ . The Brenier potential $u:\\Omega \\to \\mathbb{R}$ is discretized to a tensor $u_{i,j,k}$ with $\\{i,j,k\\} \\in \\{0,\\dots ,M - 1\\} \\times \\{0,\\dots ,N - 1\\} \\times \\{0,\\dots ,L - 1\\}$ . The spacial step lengths are $(h_x,h_y,h_z) = (2 / M,2 / N,2 / L)$ . The coordinate of each sample point $(x_{i},y_{j},z_{k})$ is $(x_{i},y_{j},z_{k}) = (-1 + h_{x}(i + 1 / 2), - 1 + h_{y}(j + 1 / 2), - 1 + h_{z}(k + 1 / 2))$ . The periodic boundary condition is then formulated as", + "bbox": [ + 169, + 638, + 823, + 724 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nu _ {i, j, k} = u _ {i + \\alpha M, j + \\beta N, k + \\gamma L}, \\quad \\alpha , \\beta , \\gamma \\in \\mathbb {Z}. \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 727, + 823, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finite Difference Differential Operator We use the standard central differences to compute the differential operators. The first order derivative $\\mathcal{D}_x$ is approximated by", + "bbox": [ + 169, + 752, + 823, + 781 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {x} u _ {i, j, k} = \\frac {u _ {i + 1 , j , k} - u _ {i - 1 , j , k}}{2 h _ {x}},\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 785, + 606, + 813 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the index $i + 1$ means $i + 1$ modulus $M$ . The operators $\\mathcal{D}_y, \\mathcal{D}_z$ are defined in a similar way. The second order derivative operator $\\mathcal{D}_{xx}$ and $\\mathcal{D}_{xy}$ are approximated by", + "bbox": [ + 169, + 816, + 823, + 845 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {x x} ^ {2} u _ {i, j, k} = \\frac {u _ {i + 1 , j , k} + u _ {i - 1 , j , k} - 2 u _ {i , j , k}}{h _ {x} ^ {2}}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 849, + 643, + 877 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {x y} ^ {2} u _ {i, j, k} = \\frac {u _ {i + 1 , j + 1 , k} + u _ {i - 1 , j - 1 , k} - u _ {i + 1 , j - 1 , k} - u _ {i - 1 , j + 1 , k}}{4 h _ {x} h _ {y}}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 878, + 692, + 906 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The other operators $\\mathcal{D}_{yy},\\mathcal{D}_{zz},\\mathcal{D}_{yz}$ and $\\mathcal{D}_{xz}$ are defined similarly.", + "bbox": [ + 171, + 909, + 606, + 926 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Discrete Fourier Transformation The discrete Fourier transformation (DFT) of $u_{i,j,k}$ is given by", + "bbox": [ + 169, + 103, + 823, + 119 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {u} _ {m, n, l} = \\sum_ {i = 0} ^ {M - 1} \\sum_ {j = 0} ^ {N - 1} \\sum_ {k = 0} ^ {L - 1} u _ {i, j, k} \\hat {\\omega} _ {m n l} \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 121, + 823, + 159 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nu _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\omega_ {m n l} \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 162, + 825, + 199 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\hat{\\omega}_{mnl} = e^{-\\iota \\frac{2\\pi mi}{M}}e^{-\\iota \\frac{2\\pi nj}{N}}e^{-\\iota \\frac{2\\pi lk}{L}}$ , $\\omega_{mnl} = e^{\\iota \\frac{2\\pi mi}{M}}e^{\\iota \\frac{2\\pi nj}{N}}e^{\\iota \\frac{2\\pi lk}{L}}$ and $\\iota = \\sqrt{-1}$ , $\\{m,n,l\\}$ are the indices of the frequency coefficients. By using DFT, the differential operators are converted to algebraic operators in the frequency domain.", + "bbox": [ + 169, + 200, + 823, + 246 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Lemma 4. Suppose the discrete function is $u_{i,j,k}$ , with the discrete Fourier transformation Eqn. (16) and Eqn. (17), by using the central difference scheme, the first order differential operator is given by", + "bbox": [ + 169, + 248, + 825, + 277 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {x} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {\\sin \\frac {2 \\pi m}{M}}{h _ {x}} \\omega_ {m n l}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 277, + 678, + 319 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the second order differential operators are represented by", + "bbox": [ + 171, + 321, + 553, + 335 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {x x} ^ {2} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} \\omega_ {m n l}\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 338, + 704, + 378 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {x y} ^ {2} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} \\omega_ {m n l}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 382, + 714, + 424 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The other differential operators $\\mathcal{D}_y, \\mathcal{D}_z, \\mathcal{D}_{yy}, \\mathcal{D}_{zz}, \\mathcal{D}_{yz}$ and $\\mathcal{D}_{xz}$ are also represented accordingly. The detailed proofs can be found in the supplementary material.", + "bbox": [ + 169, + 431, + 823, + 462 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "FFT Solver Suppose we want to solve an elliptic PDE with constant coefficients on $\\Omega \\subset \\mathbb{R}^3$", + "bbox": [ + 169, + 474, + 795, + 491 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nL [ u ] := \\left(\\sum_ {p = 0} ^ {2} \\sum_ {q = 0} ^ {2} a ^ {p, q} \\partial_ {p} \\partial_ {q} + \\sum_ {r = 0} ^ {2} b ^ {r} \\partial_ {r} + c\\right) u (x) = f (x), \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 492, + 823, + 534 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "with the periodic boundary condition, where $a^{p,q}, b^r, c$ are constants, the matrix $(a^{p,q})$ is positive definite, namely the PDE is uniformly elliptic. By the discrete Fourier transformation $\\mathcal{F}$ , we convert the differential equation to an algebraic equation in the frequency domain,", + "bbox": [ + 169, + 537, + 826, + 579 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {p = 0} ^ {2} \\sum_ {q = 0} ^ {2} a ^ {p, q} \\mathcal {F} (\\partial_ {p} \\partial_ {q} u) + \\sum_ {r = 0} ^ {2} b ^ {r} \\mathcal {F} (\\partial_ {r} u) + c \\mathcal {F} (u) = \\mathcal {F} (f)\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 580, + 689, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "By applying Lemma 4 and defining", + "bbox": [ + 171, + 632, + 410, + 647 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\lambda_ {m, n, l} = a ^ {0, 0} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} + a ^ {1, 1} \\frac {2 (\\cos \\frac {2 \\pi n}{N} - 1)}{h _ {y} ^ {2}} \\\\ + a ^ {2, 2} \\frac {2 (\\cos \\frac {2 \\pi l}{L} - 1)}{h _ {z} ^ {2}} - \\left(a ^ {0, 1} + a ^ {1, 0}\\right) \\frac {\\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} (19) \\\\ - \\left(a ^ {1, 2} + a ^ {2, 1}\\right) \\frac {\\sin \\frac {2 \\pi n}{N} \\sin \\frac {2 \\pi l}{L}}{h _ {y} h _ {z}} - \\left(a ^ {0, 2} + a ^ {2, 0}\\right) \\frac {\\sin \\frac {2 \\pi l}{L} \\sin \\frac {2 \\pi m}{M}}{h _ {z} h _ {x}} (19) \\\\ + b ^ {0} \\frac {\\sin \\frac {2 \\pi m}{M}}{h _ {x}} + b ^ {1} \\frac {\\sin \\frac {2 \\pi n}{N}}{h _ {y}} + b ^ {2} \\frac {\\sin \\frac {2 \\pi l}{L}}{h _ {z}} + c \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 648, + 823, + 781 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We have the algebraic equations in frequency domain,", + "bbox": [ + 171, + 782, + 529, + 797 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {u} _ {m, n, l} \\lambda_ {m, n, l} = \\hat {f} _ {m, n, l}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 799, + 573, + 816 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "With $\\hat{u}_{m,n,l}$ 's, we can easily obtain $u_{i,j,k}$ 's by the Inverse Discrete Fourier Transform (IDFT), which means solving the constant coefficient elliptic equation. The algorithm is described in Alg. 2 in Appendix C.", + "bbox": [ + 169, + 818, + 825, + 861 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The FFT for solving the constant coefficient elliptic PDE can be efficiently computed with GPUs. Moreover, the algorithm Alg. 2 solves the constant coefficient elliptic PDEs with a periodic boundary condition, which can be generalized to solving the same type of PDEs with Neumann boundary condition by extending the PDE to the flat torus $\\mathbb{T}^3$ using mirror reflection Eqn. (14).", + "bbox": [ + 169, + 867, + 825, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 419, + 118 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we firstly show that the our proposed FFT-OT algorithm converges linearly and runs $100 \\times$ faster than the conventional convex geometry based solver Levy (2015), then demonstrate the method in two applications: 3D adaptive sampling and Volume Magnifier. All the algorithms are developed using generic C++ with CUDA Toolkit. All the experiments are conducted on a Windows laptop with Intel Core i7-7700HQ CPU with 16 GB memory and NVIDIA GeForce GTX 1060 Graphics Cards. More experiments can be found in Appendix D.", + "bbox": [ + 169, + 133, + 826, + 219 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 RUNNING TIME AND CONVERGENCE ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 234, + 547, + 250 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To show the performance of the proposed method, we experiment on the density functions defined by the Gaussian mixture models. To be specific, the domain is a cube $\\Omega = [0,1]^3$ , the 3-dimensional density function defined on $\\Omega$ is set to be $f(x) = \\sum_{i=1}^{30} p_i \\mathcal{N}(\\mu_i, \\Sigma_i)$ , where $\\mathcal{N}(\\mu_i, \\Sigma_i)$ represents Gaussian distribution with mean $\\mu_i$ and variance $\\Sigma_i = \\mathrm{diag}(\\sigma_{i0}^2, \\sigma_{i1}^2, \\sigma_{i2}^2)$ . $\\mu_i \\in \\mathbb{R}^3$ is uniformly sampled from $[0,1]^3$ , $\\sigma_{ij}$ is uniformly sampled from $[0,0.5]$ , $p_i \\in \\mathbb{R}$ is uniformly sampled from $[0.2,1]$ and normalized such that $\\int_{\\Omega} f(x) dx = 1$ . Thus the source distribution $\\mu$ is a complicated Gaussian mixture distribution restricted on $\\Omega$ . Then by mirror reflection in Sec. 3.2, we obtain the complex density function which is defined on $[-1,1]^3$ and satisfies the periodic boundary condition.", + "bbox": [ + 169, + 261, + 823, + 378 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We directly use the FFT-OT algorithm Alg. 1 to solve the linearized Monge-Ampère equation. With the approximation error threshold $\\varepsilon = 1.0 \\times 10^{-6}$ and the resolution $256 \\times 256 \\times 256$ , the running time for our FFT-OT algorithm with double precision on GPU is less than 175 seconds. The conventional convex geometry based algorithm for 3D optimal transportation Levy (2015) can neither handle such large data sets nor be implemented on GPUs. It can only compute OT map with resolution no greater than $100 \\times 100 \\times 100$ on our system, which takes about 2700 seconds. When handling problem with $128 \\times 128 \\times 128$ resolution, our FFT-OT consumes about", + "bbox": [ + 169, + 383, + 552, + 549 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3bdbb91d57bc03d187391b82d1f4bbac774ffb0ed2c127ee3cfab9ecf8513c36.jpg", + "image_caption": [ + "Figure 1: Convergence Analysis." + ], + "image_footnote": [], + "bbox": [ + 563, + 378, + 821, + 532 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "20.3 seconds, which is $130 \\times$ faster than the power diagram based method Levy (2015).", + "bbox": [ + 171, + 550, + 746, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fig. 1 shows the approximation error for the above Gaussian mixture density with respect to iterations, namely $\\log \\| f - \\rho_n\\| _2^2$ . Our algorithm does converge linearly and the result is consistent with the prediction Eqn. (13) in Thm. 3. Therefore, this experiment validates the theorem.", + "bbox": [ + 169, + 571, + 823, + 613 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 3D ADAPTIVE SAMPLING", + "text_level": 1, + "bbox": [ + 171, + 630, + 393, + 643 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Generating random samples matching a given density function plays an essential role in the applications like Monte-Carlo integration or stippling. Efficiently obtaining high quality samples is still an on-going research topic Bauer et al. (2015); Perrier et al. (2018). And optimal transportation has been successfully applied for generating high quality 2D samples de Goes et al. (2012); Nader & Guennebaud (2018). Most of the current research focuses on generating 2D samples fitting the given density function. Here we apply the proposed 3D FFT-OT method to generate high quality 3D samples according to the given complex density functions. To the best of our knowledge, it is the first work that uses OT to sample from 3D density functions.", + "bbox": [ + 169, + 656, + 823, + 768 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Suppose the source probability distribution $d\\mu (x) = f(x)dx$ is defined on $\\Omega = [0,1]^3$ with $\\mu (\\Omega) = 1$ . The target distribution $d\\nu (y) = dy$ is the uniform distribution. We use the FFT-OT algorithm Alg. 1 to compute the OT map $T:\\Omega \\to \\Omega ,T_{\\#}\\mu = \\nu$ . The domain is tessellated to a $256\\times 256\\times 256$ grid. For each $x_{ijk},i,j,k\\in \\{0,1,\\ldots ,255\\}$ , the image $T(x_{ijk})$ can be obtained. We use $\\{T(x_{ijk})\\}$ as vertices to compute the Delaunay triangulation of $\\Omega$ . Then representing the OT map $T:(\\Omega ,\\mu)\\rightarrow (\\Omega ,\\nu)$ as a piecewise linear map, the restriction of $T$ on each tetrahedron is a linear map. Then the inverse OT map $T^{-1}:(\\Omega ,\\nu)\\to (\\Omega ,\\mu)$ is also a piecewise linear map. Namely, given a grid point $y_{mnl}$ , we can find a tetrahedron containing it. Suppose the vertices of the tetrahedron are $\\{T(x_i),T(x_j),T(x_k),T(x_l)\\}$ , then $y_{mnl}$ is computed as", + "bbox": [ + 169, + 773, + 826, + 902 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\ny _ {m n l} = \\lambda_ {i} T (x _ {i}) + \\lambda_ {j} T (x _ {j}) + \\lambda_ {k} T (x _ {k}) + \\lambda_ {l} T (x _ {l}),\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 907, + 671, + 926 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9e23e79754394ea47c674701a280bdd55feaa52de2d09dfbac5d654f3479e131.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 184, + 99, + 277, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6c4973590e35572795f31cbf0ccd3c7259833b41ad4e2dbbced9a255031e0226.jpg", + "image_caption": [ + "(a) Density" + ], + "image_footnote": [], + "bbox": [ + 184, + 172, + 277, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/991c27da91d2b9a35751811c8d8b66d71b4f0b1d623541b6f4a316a83e509cc3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 292, + 99, + 385, + 172 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1c460ddc7081f3eddd7743544cfbccc8449855610c14e1204499168b0c73fe58.jpg", + "image_caption": [ + "(b) Rejection" + ], + "image_footnote": [], + "bbox": [ + 292, + 172, + 383, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/42274fe3238f11b799d712596f55048296cca366c58c386d20c9588fa5a82ca7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 99, + 491, + 171 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6393df53e6e6e271b41246b6537f60061ba7c4fb93111daf4b4d0b10c4620dfc.jpg", + "image_caption": [ + "(c) MH", + "Figure 2: 3D density function sampling. (a) The density functions in a slice. The slices in each row come from two different density functions. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with $T^{-1}$ . (f) The sampling results by mapping the grid centers back with $T^{-1}$ . The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better." + ], + "image_footnote": [], + "bbox": [ + 398, + 172, + 491, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1386ac29405621587ac210b503a5dea958f573ac4514d9efc83e5c0d92511114.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 99, + 598, + 171 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/eff55229582a2e4bb9d8e713da05b6ad53348e1f5cfef7d1f2838fb12460df68.jpg", + "image_caption": [ + "(d) Slice" + ], + "image_footnote": [], + "bbox": [ + 506, + 172, + 598, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d85bc0ba5e2c8b1233922b11ac9edf9eca3bde735bd03384e7b6bf4159723282.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 101, + 705, + 171 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8e2babcbb74fefc8012dd3da3d63874b37804f4626866aa9648c765bad8df38a.jpg", + "image_caption": [ + "(e) Ours-R" + ], + "image_footnote": [], + "bbox": [ + 614, + 172, + 702, + 242 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/12b78ae2688e92da4208cb0e7c2c9586e51e58ef5e36438feac9884a3758799f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 720, + 101, + 810, + 171 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8a65f5c0f7af9a0785980ddfcfbb259e088777602f0ed19e24504b5b81ae417e.jpg", + "image_caption": [ + "(f) Ours-G" + ], + "image_footnote": [], + "bbox": [ + 720, + 172, + 810, + 242 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where the non-negative barycenter coordinates satisfy $\\lambda_{i} + \\lambda_{j} + \\lambda_{k} + \\lambda_{l} = 1$ . Then the image of the inverse OT map is given by", + "bbox": [ + 169, + 372, + 823, + 402 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nT ^ {- 1} \\left(y _ {m n l}\\right) = \\lambda_ {i} x _ {i} + \\lambda_ {j} x _ {j} + \\lambda_ {k} x _ {k} + \\lambda_ {l} x _ {l}. \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 407, + 823, + 424 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We generate random samples $\\{y_k\\}$ according to the uniform distribution $\\nu$ on $\\Omega$ , then their images $\\{T^{-1}(y_k)\\}$ are the desired random samples following the distribution $\\mu$ .", + "bbox": [ + 171, + 430, + 823, + 460 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In our experiment, we use the same Gaussian mixture settings of the density function as Sec. 4.1. Fig. 2 visualizes the generated samples. We randomly pick the $k$ -th slice along the $z$ -direction from the discretized volume, draw the source density function on this slice, and use pixel intensity to represent the density in Fig. 2(a). (i) We uniformly generate $100k$ random samples $\\{y_k\\} \\subset \\Omega$ , and obtain the desired random samples by applying the inverse OT map $\\{T^{-1}(y_k)\\}$ . (ii) We also set $\\{y_k\\}$ as the grid centers of $\\Omega$ and obtain the corresponding samples of the desired distribution $\\mu$ . The samples around the $k$ -th slice of both sampling strategies are plotted in Fig. 2(e) and Fig. 2(f).", + "bbox": [ + 169, + 465, + 823, + 564 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "By visual comparison, it is obvious that the distributions of Fig. 2(e) and Fig. 2(f) are consistent with the density function in Fig. 2(a). The consistency of the boundary of Fig. 2(e) and (f) and Fig. 2(a) also verifies the obliqueness boundary condition of the Monge-Ampère equation. To further show the performance of the proposed method, we compare it with the classical sampling methods, namely rejection sampling, the Metropolis-Hastings algorithm Bishop (2006) and the slice sampling Neal (2003), shown in Fig. 2(b), Fig. 2(c) and Fig. 2(d). To quantitatively compare the sampling results, we use the Chi-square goodness-of-fit test, which firstly groups the data and then computes the $L^2$ norm of the difference between the actual number of observations in each group and the expected number of observations. In our experiment, we set the group number to $64 \\times 64 \\times 64$ and use 500K samples to make the comparison. The corresponding $L^2$ norm of each method is shown in the top-right of the corresponding figure. We can see that the both sampling strategies of our method give smaller scores than the classical ones.", + "bbox": [ + 169, + 569, + 826, + 737 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 VOLUMETRIC MAGNIFIER", + "text_level": 1, + "bbox": [ + 171, + 753, + 397, + 767 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In reality, physical magnifiers can only magnify planar images. In medical image processing, it is highly desirable to magnify certain regions of the 3D MRIs or CT images. Our algorithm can address such requests with the user prescribed region of interest (ROI) and magnifying factor. Suppose the ROI is a symmetric region with the center $(\\bar{x},\\bar{y},\\bar{z})\\in \\Omega$ and the radius $\\sigma_x,\\sigma_y,\\sigma_z$ in different directions. The density function $f$ of the source measure $\\mu$ is defined as", + "bbox": [ + 169, + 779, + 823, + 849 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nf (x, y, z) = 0. 5 + 0. 5 e ^ {- ((x - \\bar {x}) ^ {2} / 2 \\sigma_ {x} ^ {2} + (y - \\bar {y}) ^ {2} / 2 \\sigma_ {y} ^ {2} + (z - \\bar {z}) ^ {2} / 2 \\sigma_ {z} ^ {2})}\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 854, + 700, + 875 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compute OT map $T: (\\Omega, \\mu) \\to (\\Omega, \\nu)$ , where $\\nu$ is the uniform distribution. Similar to the method in 3D adaptive sampling, we compute the Delaunay triangulation of the images $\\{T(x_{ijk})\\}$ , then the OT map $T$ is represented as a piecewise linear map. The inverse optimal transportation map", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9617a55da512e830fdb2957b053afbf67cde99d45ee37404750cdf5b376f62f5.jpg", + "image_caption": [ + "Figure 3: The volume magnifier of an aneurysm. The first column shows the original volumetric data, and the last three columns give the magnified data from the same viewpoints with different magnifying ratios. The yellow circle denotes the ROI/aneurysm. To obtain the results, we set $\\sigma = \\sigma_{x} = \\sigma_{y} = \\sigma_{z}$ , and they are 0.83, 0.75 and 0.5 respectively." + ], + "image_footnote": [], + "bbox": [ + 189, + 99, + 333, + 210 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/69a02b53691bc471fb821d5c6db1f725236ce6031bb0554dec9901a519abf12c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 99, + 491, + 210 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/386aab8fb7671eec7accf9774320c70703684277bb174009e9564739ebd35a58.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 99, + 650, + 210 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f7423a5b99e94f5d341b8159bc08750b17410a0ccaf0730d033af287440fc51a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 99, + 807, + 210 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/fdcd37e360c4edc5d7205291a179e64e74a004e8852cee7f98b9ecdc15549027.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 189, + 282, + 331, + 393 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/d3a86e9df0f9fb6cc6fd84a90e8d1778a943ac75a38ee32c4e3d8969df70c85c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 282, + 490, + 393 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ebdff74f3bbc7c0e5b13f7d1bf88838e5fe7f7443c9a08fbce7813d4e1a0a048.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 282, + 650, + 393 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3a119ac03a5c3ee978ad5e6cd7f6c4ada4884521e6cfa58455f65ee717b8c464.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 282, + 807, + 393 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b5c18de141438767f08b957306c042f2c43ef845f677c23d259a0301c1abc03b.jpg", + "image_caption": [ + "Figure 4: The volume magnifier of the knee. The first row gives the original volumetric data with different ROIs denoted by the blue boxes from different viewpoints, and the second row shows the corresponding magnified results. In the experiments we set $\\sigma_{x} = \\sigma_{y} = \\sigma_{z} = 0.75$ ." + ], + "image_footnote": [], + "bbox": [ + 187, + 395, + 331, + 505 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/7b8683c61289f9240fae83afa24973daf767edbc881a65835aed49339dc711b1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 395, + 490, + 505 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/5e2ab732a05ed454bef976b47f9fba3031bd402970ee1fba72556b924a40eb13.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 395, + 650, + 505 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/6447c7ba8e3eff44e23870d8d42c9cf37c5a1d7ecfa3d6fa64a0ce607cbb3218.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 395, + 807, + 505 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "$T^{-1}:(\\Omega ,\\nu)\\to (\\Omega ,\\mu)$ is also piecewise linear. For each grid point $y_{mnl}\\in \\Omega$ we use Eqn. (20) to find its pre-image. Similarly, its corresponding intensity $I_{mnl}$ is computed by linear interpolation. Then we obtain the new volumetric data $\\{I_{mnl}\\}$ with the magnified ROI and visualize the result with Voreen Meyer-Spradow et al. (2009).", + "bbox": [ + 169, + 577, + 823, + 635 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Fig. 3 demonstrates our volumetric magnifier by magnifying an aneurysm on blood vessel Hansen & Johnson (2004). We choose the aneurysm region as the ROI. The first column gives the snapshot of the blood vessel, and the yellow circle denotes the location of the aneurysm. The last three columns show the magnified aneurysm with different magnifying ratio from the same viewpoints. Moreover, we show the magnified volumetric knee from different viewpoints with different ROIs denoted by the blue boxes in Fig. 4. Our method only magnifies the ROIs and keeps other regions unchanged. Compared with the traditional method requiring tedious zoom in/out, our method only magnifies the ROI region and keeps the whole subject in the field of view, which enables doctors to visualize the overall anatomy while scrutinize detailed anatomical structure at the same time.", + "bbox": [ + 169, + 641, + 826, + 767 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 791, + 318, + 806 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we propose the FFT-OT method to solve the optimal transportation problem. According to the Brenier theory, under the quadratic distance cost, finding the solution to the OT problem is equivalent to solving the Monge-Ampère equation, which can be linearized as a sequence of variant coefficient elliptic PDEs. Later, the variant coefficient PDEs are approximated by constant coefficient PDEs and solved by Fast Fourier Transformation. We also prove that the proposed method converges linearly. Experiments on volumetric data show that the FFT-OT can be used to sample from complex 3D density functions and magnify the volumetric data in medical images.", + "bbox": [ + 169, + 825, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 171, + 102, + 361, + 118 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "This research was partially supported by National Key R&D Program of China 2021YFA1003003 and NSFC No. 61936002, T2225012. This work was also partially supported by NIH 3R01LM012434-05S1, 1R21EB029733-01A1, NSF FAIN-2115095 and NSF CMMI-1762287.", + "bbox": [ + 171, + 132, + 826, + 176 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 196, + 287, + 212 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Mokhtar Z. Alaya, Maxime Berar, Gilles Gasso, and Alain Rakotomamonjy. Screening sinkhorn algorithm for regularized optimal transport. In Advances in Neural Information Processing Systems 32, 2019.", + "Jose I. Aliaga, Ernesto Dufrechou, Pablo Ezzatti, and Enrique S. Quintana-Orti. An efficient gpu version of the preconditioned gmres method. The Journal of Supercomputing, 75, 2019.", + "Jason Altschuler, Jonathan Niles-Weed, and Philippe Rigollet. Near-linear time approximation algorithms for optimal transport via sinkhorn iteration. In Advances in Neural Information Processing Systems 30, 2017.", + "Jason Altschuler, Francis Bach, Alessandro Rudi, and Jonathan Niles-Weed. Massively scalable sinkhorn distances via the nystrom method. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper/2019/file/f55cadb97eaff2ba1980e001b0bd9842-Paper.pdf.", + "Dongsheng An, Yang Guo, Na Lei, Zhongxuan Luo, Shing-Tung Yau, and Xianfeng Gu. Ae-ot: A new generative model based on extended semi-discrete optimal transport. In International Conference on Learning Representations, 2020.", + "Dongsheng An, Na Lei, and Xianfeng Gu. Efficient optimal transport algorithm by accelerated gradient descent. In The Thirty-Sixth AAAI Conference on Artificial Intelligence (AAAI), 2022.", + "Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In ICML, pp. 214-223, 2017.", + "F. Aurenhammer, F. Hoffmann, and B. Aronov. Minkowski-type theorems and least-squares clustering. Algorithmica, 1998.", + "Martin Bauer, Sarang Joshi, and Klas Modin. Diffeomorphic density matching by optimal information transport. SIAM Journal on Imaging Sciences, 8, 2015.", + "J.D. Benamou, Y. Brenier, and K. Guittet. The Monge-Kantorovitch mass transfer and its computational fluid mechanics formulation. International Journal for Numerical Methods in Fluids, 2002.", + "Jean-David Benamou, Brittany D. Froese, and Adam M. Oberman. Numerical solution of the optimal transportation problem using the monge-ampère equation. J. Comput. Phys, 2014.", + "Christopher M. Bishop. Pattern Recognition and Machine Learning. Springer, 2006.", + "Y. Brenier. Polar decomposition and increasing rearrangement of vector fields. C. R. Acad. Sci. Paris Sr. I Math., 305(19):805-808, 1987.", + "Y. Brenier. Polar factorization and monotone rearrangement of vector-valued functions. Comm. Pure Appl. Math., 44(4):375-417, 1991.", + "Dario Cordero-Erausquin. Sur le transport de mesures periodiques monotone maps preserving periodic measures. Comptes Rendus de l'Académie des Sciences - Series I - Mathematics, 329: 199-202, 1999.", + "N. Courty, R. Flamary, D. Tuia, and A. Rakotomamonjy. Optimal transport for domain adaptation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 39(9):1853-1865, 2017." + ], + "bbox": [ + 171, + 220, + 825, + 926 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transportation distances. In International Conference on Neural Information Processing Systems, 2013.", + "F. de Goes, K. Breeden, V. Ostromoukhov, and M. Desbrun. Blue noise through optimal transport. ACM Trans. Graph. (SIGGRAPH Asia), 31, 2012.", + "Philippe Delanoë. Classical solvability in dimension two of the second boundary-value problem associated with the Monge-Ampère operator. Annales de l'I.H.P. Analyse non linéaire, 8(5): 443-457, 1991.", + "Pavel Dvurechensky, Alexander Gasnikov, and Alexey Kroshnin. Computational optimal transport: Complexity by accelerated gradient descent is better than by sinkhorn's algorithm. In Proceedings of the 35th International Conference on Machine Learning. PMLR, 2018.", + "Suli Endre. Lecture Notes on Finite Element Methods for Partial Differential Equations. University of Oxford, 2020.", + "David Xianfeng Gu, Feng Luo, Jian Sun, and Shing-Tung Yau. Variational principles for minkowski type problems, discrete optimal transport, and discrete monge-ampère equations. *Asian Journal of Mathematics*, 2016.", + "Charles D. Hansen and Chris R. Johnson. Visualization Handbook. Academic Press, 2004.", + "Jun Kitagawa, Quentin Mérigot, and Boris Thibert. Convergence of a newton algorithm for semi-discrete optimal transport. Journal of the European Mathematical Society, 2019.", + "Na Lei and Xianfeng Gu. Fft-ot: A fast algorithm for optimal transportation. In Proceedings of International Conference on Computer Vision (ICCV), 2021.", + "Bruno Levy. A numerical algorithm for 12 semi-discrete optimal transport in 3d. ESAIM: M2AN, 49 (6):1693-1715, 2015.", + "Gregorire Loeper and Francesca Rapetti. Numerical solution of the monge-ampère equation by a newton's algorithm. C. R. Acad. Paris, pp. 319-324, 2005.", + "Robert J. McCann. A convexityprincipleforinteractinggases. Advances in mathematics, 128:153-179, 1997.", + "Quentin Merigot. A multiscale approach to optimal transport. Computer Graphics Forum., 2011.", + "Jennis Meyer-Spradow, Timo Ropinski, Jörg Mensmann, and Klaus H. Hinrichs. Voreen: A rapid-prototyping environment for ray-casting-based volume visualizations. IEEE Computer Graphics and Applications, 2009.", + "Georges Nader and Gael Guennebaud. Instant transport maps on 2d grids. ACM Trans. Graph., 37 (6), 2018.", + "Radford M. Neal. Slice sampling. The Annals of Statistics, 2003.", + "Nicolas Papadakis, Gabriel Peyre, and Edouard Oudet. Optimal transport with proximal splitting. SIAM Journal on Imaging Sciences, 2014.", + "Hélène Perrier, David Coeurjolly, Feng Xie, Matt Pharr, Pat Hanrahan, and VictorOstromoukhov. Sequences with low-discrepancy blue-noise 2-d projections. Computer Graphics Forum, 2018.", + "Gabriel Peyre and Marco Cuturi. Computational optimal transport. Found. Trends Mach. Learn., 11 (5-6):355-607, 2019.", + "Yousef Saad. Iterative Methods For Sparse Linear Systems. Society of Industrial and Applied Mathematics, 2003.", + "Filippo Santambrogio. Optimal Transport for Applied Mathematicians. Springer, 2015.", + "Louis-Philippe Saumier, Martial Agueh, and Boualem Khouider. An efficient numerical algorithm for the $l^2$ optimal transport problem with periodic densities. IMA Journal of Applied Mathematics, 80:135-157, 2013." + ], + "bbox": [ + 171, + 102, + 826, + 922 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yuliy Schwartzburg, Romain Testuz, Andrea Tagliasacchi, and Mark Pauly. High-contrast computational caustic design. ACM Trans. Graph., 33(4), July 2014. ISSN 0730-0301. doi: 10.1145/2601097.2601200. URL https://doi.org/10.1145/2601097.2601200.", + "Justin Solomon, Fernando de Goes, Gabriel PeyrÅ, Marco Cuturi, Adrian Butscher, Andy Nguyen, Tao Du, and Leonidas Guibas. Convolutional Wasserstein distances: Efficient optimal transportation on geometric domains. ACM Transactions on Graphics (TOG), 2015.", + "Kehua Su, Wei Chen, Na Lei, Junwei Zhang, Kun Qian, and Xianfeng Gu. Volume preserving mesh parameterization based on optimal mass transportation. Comput. Aided Des., 82:42-56, 2017.", + "Ilya Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Schoelkopf. Wasserstein auto-encoders. In ICLR, 2018.", + "Cédric Villani. Topics in Optimal transportation. AMS, 2003.", + "Cédric Villani. Optimal transport: old and new, volume 338. Springer Science & Business Media, 2008." + ], + "bbox": [ + 171, + 102, + 825, + 333 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A RELATED WORK", + "text_level": 1, + "bbox": [ + 174, + 102, + 348, + 116 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "There is a huge literature about optimal transportation. Here we will only briefly review the most related works. For detailed reviews, we refer readers to Santambrogio (2015); Peyre & Cuturi (2019).", + "bbox": [ + 174, + 138, + 823, + 165 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The first type of algorithms is based on the Kantorovich theory. When both the input and output domains are Dirac masses, the Kantorovich problem can be treated as a standard linear programming (LP) task. In order to tackle large data sets, Cuturi (2013) adds an entropic regularizer to the original LP problem and the regularized problem can be quickly solved by the Sinkhorn algorithm. Recently, various algorithms have been proposed to further accelerate the computation by improving the efficiency of matrix-vector multiplications, including the Greenkhorn Altschuler et al. (2017), Sreenkhorn Alaya et al. (2019) and the NYS-SINK Altschuler et al. (2019) algorithms. Dvurechensky et al. Dvurechensky et al. (2018) also propose the adaptive primal-dual accelerated gradient descent algorithm (APDAGD) to solve the discrete OT problem. An et al. An et al. (2022) compute the approximate OT plan by smoothing the dual Kantorovich problem and solving it with the FISTA method. This kind of methods have limitations: (i) they only give transport plans and cannot produce the bijective transportation maps; and (ii) the computational complexity is too high to apply them in the scenarios with huge number of samples.", + "bbox": [ + 174, + 172, + 823, + 353 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The second type of algorithms is based on the Brenier theory Brenier (1987) and its intrinsic connection with convex geometry Gu et al. (2016). The semi-discrete OT algorithm proposed in Aurenhammer et al. (1998) finds the transport map between a continuous distribution and a discrete measure via a variational approach by dynamically constructing the power diagrams. Its efficiency can be further improved Levy (2015); Merigot (2011) by the multi-resolution strategy. The algorithms proposed in Kitagawa et al. (2019); Su et al. (2017) also improve the efficiency by applying the Newton's method. When both the source and target measures are continuous, some interpolation methods are necessary Schwartzburg et al. (2014). The major drawback of this type of algorithms is the high computational complexity of constructing the dynamic power diagram, which prevents them from handling high dimensional tasks. For example, for the 3D OT problems, these algorithms usually run very slow.", + "bbox": [ + 174, + 359, + 823, + 513 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The third type of algorithms is based on computational fluid dynamics Benamou et al. (2002); Papadakis et al. (2014). These methods aim at finding a special temporal-spacial flow field that transports the initial source density to the target density with the minimal total kinetic energy. Then the diffeomorphism induced by the flow gives the optimal transport map under the quadratic Euclidean distance cost. However, this kind of algorithms are difficult to extend to high dimensional space.", + "bbox": [ + 174, + 520, + 823, + 589 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The fourth type of algorithms directly solve the Monge-Ampère equation using numerical methods. Loeper and Rapetti Loeper & Rapetti (2005) propose to solve the linearized Monge-Ampère equation defined on a flat torus in each iteration. Its corresponding variant coefficient elliptic PDE is converted to a positive definite linear system using the finite-difference scheme, which can be solved by the BiCG algorithm Endre (2020). Benamou et al. Benamou et al. (2014) propose to solve the linearized Monge-Ampère on more general domains using Newton's method. Nader and Guennebaud Nader & Guennebaud (2018) apply the similar discretization strategy and solve the Monge-Ampère equation by conjugate gradient method. Saumier et al. Saumier et al. (2013) propose to solve the linearized Monge-Ampère equation using FFT. In each iteration the elliptic PDE with spacial and temporal variant coefficients is converted to a group of linear equations in the frequency domain, which is solved by the GMRES algorithm. Although the GMRES algorithm can be implemented on GPUs Aliaga et al. (2019), there is no available open source code. The work in Saumier et al. (2013) focuses on periodic boundary condition, but this our proposed work focuses on general second boundary condition; the work in Saumier et al. (2013) concerns planar OT maps, ours emphasizes on volumetric OT maps, which has higher complexity. The work in Saumier et al. (2013) can handle more general target measures, the proposed work currently only deals with the Lebesgue target measure. Nevertheless, the current work can be directly generalized to handle general target measures as well. Lei and Gu Lei & Gu (2021) use the fixed point method to compute the 2-dimensional OT problem based on FFT, but it cannot be extended to solve the 3-dimensional problems.", + "bbox": [ + 174, + 597, + 823, + 875 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this work, we combine the idea of linearizing the Monge-Ampère equation Loeper & Rapetti (2005) and the idea of FFT Saumier et al. (2013). The key novelty of our proposed method is to use the mean linearized Monge-Ampère operator Eqn. (12) to replace the conventional linearized", + "bbox": [ + 174, + 881, + 823, + 922 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 491, + 948, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Monge-Ampere operator Eqn. (10). This replacement allows the algorithm to be implemented on GPUs and makes the algorithm hundreds of times faster. In the following, we compute the 3-dimensional optimal transport problem by applying the proposed algorithm. Our method also runs more than $100 \\times$ faster than the convex geometry based method Levy (2015).", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B APPENDIX THEORY", + "text_level": 1, + "bbox": [ + 171, + 179, + 372, + 194 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In the section, we give the detailed proofs for several lemmas and theorems. Some of them are well known in the Monge-Ampère PDE field and the applied mathematics field, we include them for the completeness.", + "bbox": [ + 169, + 209, + 823, + 252 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.1 EXISTENCE OF THE SOLUTION TO THE TIME DEPENDENT MONGE-AMPERE EQNUATION", + "text_level": 1, + "bbox": [ + 171, + 268, + 743, + 297 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Let $\\mathbb{T}^n = \\mathbb{R}^n / \\mathbb{Z}^n$ be the $n$ -dimensional flat torus. Below we sometimes identify it with $\\Omega = [0,1]^n$ and assume all data are periodic. The existence and regularity of solutions to the Monge-Ampère equation are given by the following theorem,", + "bbox": [ + 169, + 306, + 823, + 349 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Theorem 5. Suppose a positive density function $f: \\Omega \\to \\mathbb{R}$ is defined on $\\Omega = [0,1]^n$ , such that $\\int_{\\Omega} f(x) dx = 1$ , and $f \\in C^{\\alpha}(\\Omega)$ , then the solution $u: \\Omega \\times [0,1]$ to the time-dependent Monge-Ampère equation", + "bbox": [ + 169, + 352, + 825, + 395 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\det D _ {x} ^ {2} u (x, t) = (1 - t) + t f (x), \\quad \\nabla_ {x} u (x, t) (\\Omega) = \\Omega \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 393, + 821, + 410 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "exists and is unique up to a constant. Furthermore, there exist constants $0 < \\lambda < \\Lambda$ , such that", + "bbox": [ + 169, + 411, + 790, + 426 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda \\sum_ {p = 1} ^ {n} \\xi_ {p} ^ {2} \\leq \\sum_ {p, q = 1} ^ {n} u ^ {p q} (x, t) \\xi_ {p} \\xi_ {q} \\leq \\Lambda \\sum_ {p = 1} ^ {n} \\xi_ {p} ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\forall (x, t) \\in \\Omega \\times [ 0, 1 ]. \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 243, + 429, + 823, + 470 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We refer readers to Cordero-Erasquin (1999) for detailed proof.", + "bbox": [ + 171, + 479, + 602, + 494 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Weak Solution In practice, we compute the weak solution of the linearized Monge-Ampère Eqn. (6) using numerical methods. We first rewrite the differential operator to a divergence form, then define a bi-linear form.", + "bbox": [ + 169, + 510, + 823, + 551 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Since $(u^{pq}(x,t))$ is the adjoint matrix of $D_x^2 u(x,t)$ , by direct computation, we obtain", + "bbox": [ + 169, + 558, + 735, + 574 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {p = 1} ^ {n} \\partial_ {p} u ^ {p q} (x, t) = 0, \\quad \\forall (x, t) \\in \\Omega \\times [ 0, 1 ], \\quad \\forall q = 1, \\dots , n. \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 578, + 823, + 618 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "so Eqn. (6) can be converted into the divergence form:", + "bbox": [ + 171, + 621, + 529, + 636 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) = \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {p} \\partial_ {q} v + \\sum_ {q = 1} ^ {n} \\left(\\sum_ {p = 1} ^ {n} \\partial_ {p} u ^ {p q}\\right) \\partial_ {q} v = \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {p} \\partial_ {q} v,\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 638, + 779, + 681 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "we obtain", + "bbox": [ + 171, + 686, + 240, + 698 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} (x, t) \\partial_ {q} v (x, t)\\right) = f (x) - 1. \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 343, + 695, + 823, + 736 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "with Neumann boundary condition", + "bbox": [ + 171, + 738, + 405, + 752 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial v (x , t)}{\\partial \\mathbf {n}} = 0, \\quad \\forall (x, t) \\in \\partial \\Omega \\times [ 0, 1 ]. \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 756, + 823, + 785 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For any $w\\in H^{1}(\\Omega)$ , by differentiation of product, we obtain", + "bbox": [ + 169, + 789, + 575, + 804 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w + \\sum_ {p = 1} ^ {n} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) \\partial_ {p} w = \\sum_ {p = 1} ^ {n} \\partial_ {p} \\left[ \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 806, + 766, + 849 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "by integrating both sides, and from the fact that $v$ satisfies the Neumann boundary condition, we deduce", + "bbox": [ + 169, + 853, + 823, + 880 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega} \\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w + \\int_ {\\Omega} \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {q} v \\partial_ {p} w = \\int_ {\\partial \\Omega} \\sum_ {p = 1} ^ {n} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w = 0. \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 882, + 823, + 928 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For any fixed time $t \\in [0,1]$ , by the divergence form, we can construct a bilinear form $a: H^1(\\Omega) \\times H^1(\\Omega)$ and a linear form $l: H^1(\\Omega) \\to \\mathbb{R}$ ,", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\na (v, w) = \\sum_ {p, q = 1} ^ {n} \\int_ {\\Omega} u ^ {p q} \\partial_ {p} v \\partial_ {q} w, \\quad l (w) = - \\int_ {\\Omega} (f - 1) w d x. \\tag {27}\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 141, + 823, + 181 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A weak solution to Eqn. (24) is a function $v \\in H^{1}(\\Omega)$ , such that", + "bbox": [ + 171, + 190, + 596, + 205 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\na (v, w) = l (w), \\quad \\forall w \\in H ^ {1} (\\Omega). \\tag {28}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 213, + 823, + 231 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "By the uniform ellipticity Eqn. (22), the Lax-Milgram theorem Endre (2020) shows the existence of the weak solution.", + "bbox": [ + 169, + 244, + 826, + 273 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.2 DISCRETE LINEARIZED MONGE-AMPERE EQUATION SOLVABILITY", + "text_level": 1, + "bbox": [ + 171, + 291, + 681, + 306 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Galerkin Method In practice, we construct a triangulation $\\mathcal{T}$ of $\\Omega$ , such that the ratio between the diameter and inscribe-sphere radius of each simplex is bounded, and variation of the diameters of all the simplexes is small. We call such kind of $\\mathcal{T}$ a quasi-uniform triangulation, and denote the largest diameter as $h$ . For each vertex $v_{i} \\in \\mathcal{T}$ , we construct a piecewise linear base function $\\varphi_{i}$ , such that $\\varphi_{i}$ is linear on each triangle, $\\varphi_{i}(v_{j})$ is $\\delta_{ij}$ . We define a finite dimensional subspace $V_{h} \\subset H^{1}(\\Omega)$ ,", + "bbox": [ + 169, + 316, + 823, + 388 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nV _ {h} := \\left\\{v _ {h} (x) := \\sum_ {v _ {i} \\in \\mathcal {T}} \\lambda_ {i} \\varphi_ {i} (x), \\lambda_ {i} \\in \\mathbb {R} \\right\\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 396, + 643, + 439 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Given a function $u \\in H^{1}(\\Omega)$ , we use $u_{h} \\in V_{h}$ to denote its approximation in $V_{h}$ . Furthermore, $u_{h} = \\sum_{i} \\lambda_{i} \\varphi_{i}$ , we also use $u_{h}$ to represent the coefficient vector $(\\lambda_1, \\lambda_2, \\dots, \\lambda_k)^T$ depending on the context. The weak solution Eqn. (28) to the Monge-Ampère equation (6) is equivalent to find a $v \\in H^{1}(\\Omega)$ , such that $a(v, w) = l(w)$ for all $w \\in H^{1}(\\Omega)$ . In discrete cases, we want to find $v_{h} \\in V_{h}$ , such that", + "bbox": [ + 169, + 446, + 823, + 516 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\na \\left(v _ {h}, w _ {h}\\right) = l \\left(w _ {h}\\right), \\quad \\forall w _ {h} \\in V _ {h}. \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 517, + 823, + 534 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Eqn. (29) is equivalent to the linear system,", + "bbox": [ + 171, + 537, + 460, + 551 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\left( \\begin{array}{c c c c} a \\left(\\varphi_ {1}, \\varphi_ {1}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {1}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {1}\\right) \\\\ a \\left(\\varphi_ {1}, \\varphi_ {2}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {2}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {2}\\right) \\\\ \\vdots & \\vdots & & \\vdots \\\\ a \\left(\\varphi_ {1}, \\varphi_ {N}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {N}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {N}\\right) \\end{array} \\right) \\left( \\begin{array}{c} \\lambda_ {1} \\\\ \\lambda_ {2} \\\\ \\vdots \\\\ \\lambda_ {N} \\end{array} \\right) = \\left( \\begin{array}{c} l \\left(\\varphi_ {1}\\right) \\\\ l \\left(\\varphi_ {2}\\right) \\\\ \\vdots \\\\ l \\left(\\varphi_ {N}\\right) \\end{array} \\right) \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 559, + 825, + 628 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "From the weak solution to the linearized Monge-Ampère equation (10), we obtain the linear system Eqn. (30). We denote the stiffness matrix $A = (a(\\varphi_i, \\varphi_j))$ . By the uniform ellipticity Eqn. (22), and $V_h \\subset H^1(\\Omega)$", + "bbox": [ + 169, + 642, + 823, + 685 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\na (v, v) \\geq \\lambda \\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 686, + 575, + 705 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Assume $\\int_{\\Omega} v dx = 0$ , by Poincaré inequality,", + "bbox": [ + 171, + 709, + 468, + 727 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2} \\geq C _ {1} (\\Omega) \\| v \\| _ {L} ^ {2} (\\Omega), \\quad \\forall v \\in H ^ {1} (\\Omega), \\int_ {\\Omega} v d x = 0,\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 734, + 699, + 766 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where the constant $C_1(\\Omega)$ depends on $\\Omega$ . Combine the above two inequalities, we obtain", + "bbox": [ + 169, + 773, + 754, + 787 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\na (v, v) \\geq c \\| v \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v \\in H ^ {1} (\\Omega), \\int_ {\\Omega} v d x = 0. \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 796, + 825, + 829 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Similarly, By the uniform ellipticity Eqn. 22, and $V_{h}\\subset H^{1}(\\Omega)$", + "bbox": [ + 169, + 835, + 588, + 852 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\na (v, v) \\leq \\Lambda \\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 418, + 859, + 576, + 878 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For linear finite element and quasi-uniform triangulation, we have the inverse Poincaré inequality,", + "bbox": [ + 169, + 886, + 813, + 901 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\nabla v _ {h} \\right\\| _ {L ^ {2}} ^ {2} \\leq C _ {2} (\\Omega) h ^ {- 1} \\left\\| v _ {h} \\right\\| _ {L ^ {2}} ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 907, + 602, + 926 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $h$ is the diameter of each element. Combine the above two inequalities, we obtain", + "bbox": [ + 169, + 103, + 754, + 119 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\na \\left(v _ {h}, v _ {h}\\right) \\leq C \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v _ {h} \\in V _ {h}. \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 125, + 823, + 143 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "By combining the inequalities Eqn. (31) and Eqn. (32), we obtain", + "bbox": [ + 171, + 150, + 607, + 166 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{C _ {3}} \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} \\leq a (v _ {h}, v _ {h}) \\leq C _ {3} \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v _ {h} \\in V _ {h}, \\int_ {\\Omega} v _ {h} = 0, \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 172, + 825, + 204 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $C_3 > 1$ is a constant. Suppose $v_{h} = \\sum_{i = 1}^{n}\\xi_{i}\\varphi_{i}$ , then", + "bbox": [ + 169, + 210, + 570, + 228 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} = \\int_ {\\Omega} v _ {h} ^ {2} d x = \\sum_ {i, j = 1} ^ {n} \\xi_ {i} \\xi_ {j} \\int_ {\\Omega} \\varphi_ {i} (x) \\varphi_ {j} (x) d x = \\xi^ {T} \\Phi \\xi ,\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 234, + 709, + 273 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $\\xi = (\\xi_{i})$ and the matrix $\\Phi = \\left(\\int_{\\Omega}\\varphi_{i}\\varphi_{j}\\right)$ is positive definite. Therefore,", + "bbox": [ + 169, + 282, + 679, + 301 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} \\Phi \\xi < C _ {4} \\| \\xi \\| ^ {2}. \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 306, + 825, + 338 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "By $a(v_h,v_h) = \\xi^T A\\xi$ , combing inequalities Eqn. (33) and Eqn. (34), we obtain", + "bbox": [ + 169, + 345, + 697, + 362 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{C _ {3} C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} A \\xi \\leq C _ {3} C _ {4} \\| \\xi \\| ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\sum_ {i = 1} ^ {n} \\xi_ {i} = 0, \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 369, + 825, + 407 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $C_3C_4 > 1$ . This proves the following lemma,", + "bbox": [ + 169, + 414, + 517, + 430 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma 6. By using Galerkin method using linear elements to numerically approximate the weak solution Eqn. (28) to the linearized Monge-Ampère Eqn. (6), if the uniform ellipticity Eqn. (22) holds, and the triangulation $\\mathcal{T}$ is quasi-uniform, then the stiffness matrix of the linear system Eqn. (30) is positive definite on the space $\\sum_{i=1}^{n} \\xi_i = 0$ ,", + "bbox": [ + 169, + 433, + 826, + 492 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{C _ {3} C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} A \\xi \\leq C _ {3} C _ {4} \\| \\xi \\| ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\sum_ {i = 1} ^ {n} \\xi_ {i} = 0, \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 498, + 825, + 537 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $C_3C_4 > 1$", + "bbox": [ + 171, + 542, + 292, + 559 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Since the uniform ellipticity Eqn. (22) holds for any time $t \\in [0,1]$ , then we obtain", + "bbox": [ + 169, + 569, + 717, + 585 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Corollary 7. By using Galerkin method with linear elements on quasi-uniform triangulations, the linearized Monge-Ampère equation in the continuity method Eqn. (6) always has a solution $v_h \\in V_h$ for any $t \\in [0,1]$ .", + "bbox": [ + 169, + 588, + 823, + 633 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Please note that the central differential scheme can be treated as Galerkin's method on a special uniform triangulation. Therefore, the above estimates still hold.", + "bbox": [ + 169, + 642, + 823, + 672 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.3 CONVERGENCE RATE", + "text_level": 1, + "bbox": [ + 171, + 688, + 370, + 702 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Theorem 8 (main). Given a domain $\\Omega \\subset \\mathbb{R}^n$ , which is a canonical cuboid $\\Omega = [-1,1]^n$ , and a positive density function $f:\\Omega \\to \\mathbb{R}$ with the balance condition", + "bbox": [ + 169, + 713, + 826, + 743 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\Omega} f (x) d x = \\int_ {\\Omega} 1 \\cdot d x,\n$$\n", + "text_format": "latex", + "bbox": [ + 415, + 750, + 580, + 782 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "suppose the mirror reflection extension Eqn. (14) of $f$ to the flat torus $\\tilde{f} : \\mathbb{T}^n \\to \\mathbb{R}$ is $C^\\alpha$ , $\\alpha \\in (0,1)$ , then Monge-Ampère equation,", + "bbox": [ + 169, + 789, + 823, + 821 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nd e t D ^ {2} u (x) = f (x), \\quad \\nabla u (\\Omega) = \\Omega\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 825, + 619, + 843 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "can be solved using FFT-OT Algorithm Alg. (1). In particular, one can choose the step length parameter $\\tau$ , such that there is a constant $0 < \\gamma < 1$ , the approximation error satisfies", + "bbox": [ + 169, + 849, + 825, + 878 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| f - \\rho_ {k + 1} \\right\\| ^ {2} < C \\gamma^ {k},\n$$\n", + "text_format": "latex", + "bbox": [ + 424, + 885, + 573, + 902 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "namely the algorithm has a linear convergence rate.", + "bbox": [ + 169, + 909, + 517, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 509, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. Suppose at the $k + 1$ -th iteration, $\\rho_{k + 1} = \\operatorname*{det}(I + D^2 u_{k + 1})$ , $\\| v_k\\| \\sim O(\\tau^{-1})$", + "bbox": [ + 169, + 102, + 736, + 119 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} f - \\rho_ {k + 1} = f - \\det (I + \\mathcal {D} ^ {2} u _ {k} + \\mathcal {D} ^ {2} v _ {k}) \\\\ = f - \\det (I + \\mathcal {D} ^ {2} u _ {k}) - \\sum_ {p q} u _ {k} ^ {p q} \\partial_ {p} \\partial_ {q} v _ {k} + o (\\tau^ {- 1}) \\\\ = \\left(f - \\rho_ {k}\\right) - L _ {k} \\left[ v _ {k} \\right] + o \\left(\\tau^ {- 1}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 126, + 696, + 200 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $L_{k}[v_{k}] = \\sum_{pq}u_{k}^{pq}\\partial_{p}\\partial_{q}v_{k}$ . Hence by integration by parts Eqn. (27),", + "bbox": [ + 169, + 208, + 665, + 226 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| f - \\rho_ {k + 1} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} = \\left\\| f - \\rho_ {k} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} - 2 \\int_ {\\Omega} L _ {k} [ v _ {k} ] (f - \\rho_ {k}) + o (\\tau^ {- 1}) \\\\ = \\left\\| f - \\rho_ {k} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} + 2 a _ {k} (f - \\rho_ {k}, v _ {k}) + o (\\tau^ {- 1}) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 234, + 728, + 287 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $a_{k}$ is the bilinear form in Eqn.(27). In the discrete case, all functions are in $V_{h}$ , we denote", + "bbox": [ + 169, + 295, + 805, + 311 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\| u _ {h} \\| _ {\\Phi} ^ {2} := \\| u _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} = u _ {h} ^ {T} \\Phi u _ {h}, \\quad \\| u _ {h} \\| ^ {2} := u _ {h} ^ {T} u _ {h}, \\quad \\| u _ {h} \\| _ {A} ^ {2} := u _ {h} ^ {T} A u _ {h},\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 316, + 743, + 338 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "by the inequality Eqn. (34) and Eqn. 35,", + "bbox": [ + 169, + 345, + 439, + 361 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{C _ {4}} \\| u _ {h} \\| ^ {2} \\leq \\| u _ {h} \\| _ {\\Phi} ^ {2} \\leq C _ {4} \\| u _ {h} \\| ^ {2}, \\quad \\frac {1}{C _ {3} C _ {4}} \\| u _ {h} \\| ^ {2} \\leq \\| u _ {h} \\| _ {A} ^ {2} \\leq C _ {3} C _ {4} \\| u _ {h} \\| ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 368, + 746, + 398 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Therefore", + "bbox": [ + 171, + 405, + 243, + 420 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} = \\left\\| f _ {h} - \\rho_ {h, k} \\right\\| _ {\\Phi} ^ {2} - 2 \\tau^ {- 1} \\left(f - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} \\left(f _ {h} - \\rho_ {h, k}\\right) + o \\left(\\tau^ {- 1}\\right), \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 426, + 823, + 446 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $A_{k}$ is the stiffness matrix in Eqn.(30), and $\\bar{A}_k$ is the mean stiffness matrix. (By the uniform ellipticity Eqn. (22), the eigen values of the adjoint matrix $(u^{pq})(x,t)$ is uniformly bounded away from zero in the space $\\mathcal{H} := \\{\\xi \\in \\mathbb{R}^n | \\sum_i \\xi_i = 0\\}$ , so the eigen value of the mean adjoint matrix $\\bar{u}^{pq}(t)$ is bounded away from zero in $\\mathcal{H}$ . After discretization, the eigen values of $\\bar{A}_k$ is strictly positive in $\\mathcal{H}$ , hence $\\bar{A}_k$ is invertible in $\\mathcal{H}$ . In the following discussion, the term $o(\\tau^{-1})$ will be ignored.) Remark that the following displayed equation is a scalar", + "bbox": [ + 169, + 453, + 823, + 540 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f - \\rho_ {h, k}) = \\mathrm {t r} \\left(\\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f _ {h} - \\rho_ {h, k})\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 547, + 733, + 566 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Since $A_{k}$ and $\\bar{A}_{k}$ are symmetric, positive definite on the space $\\sum_{i}\\xi_{i} = 0$ , $\\| A_k\\| _2\\leq C_3C_4$ and $\\| \\bar{A}_k\\| _2\\leq C_3C_4$ , so are their inverses. Since $A_{n}$ and $\\bar{A}_n$ are symmetric, positive definite on the space orthogonal to $(1,1,\\ldots ,1)^T$ , by Eqn. (35) and $\\| A_k\\bar{A}_k^{-1}\\| \\leq \\| A_k\\| \\| \\bar{A}_k^{-1}\\|$ , we have", + "bbox": [ + 169, + 574, + 823, + 623 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}} \\| f _ {h} - \\rho_ {h, k} \\| _ {\\Phi} ^ {2} \\leq \\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f _ {h} - \\rho_ {h, k}).\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 628, + 692, + 664 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Plug into Eqn. (37), we have", + "bbox": [ + 169, + 669, + 366, + 685 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} \\leq \\left(1 - \\frac {1}{\\tau} \\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}}\\right) \\left\\| f _ {h} - \\rho_ {h, k} \\right\\| _ {\\Phi} ^ {2} \\leq \\left(1 - \\frac {1}{\\tau} \\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}}\\right) ^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| _ {\\Phi} ^ {2}. \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 693, + 825, + 729 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We can choose the step-length $\\tau^{-1}$ , such that $\\gamma \\in (0, 1)$ , where", + "bbox": [ + 171, + 737, + 591, + 753 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\gamma = 1 - \\frac {(n - 1)}{\\tau C _ {3} ^ {2} C _ {4} ^ {3}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 434, + 761, + 558, + 796 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Therefore", + "bbox": [ + 171, + 801, + 241, + 815 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} \\leq \\gamma^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| _ {\\Phi} ^ {2} \\leq C _ {4} \\gamma^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| ^ {2}. \\tag {39}\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 814, + 823, + 833 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/9d32cf41dd420f054c911100857edaa1d69208dd0df5a90d593bd716b8f123a5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 837, + 825, + 849 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.4 DIFFERENTIAL OPERATOR USING FFT", + "text_level": 1, + "bbox": [ + 171, + 869, + 486, + 883 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "By using the Discrete Fourier Transformation, the differential operators can be converted to algebraic operators in the frequency domain.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lemma 9. Suppose the discrete function is $u_{i,j,k}$ , with discrete Fourier transformation", + "bbox": [ + 169, + 103, + 746, + 119 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nu _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} e ^ {\\sqrt {- 1} \\frac {2 \\pi m i}{M}} e ^ {\\sqrt {- 1} \\frac {2 \\pi n j}{N}} e ^ {\\sqrt {- 1} \\frac {2 \\pi l k}{L}}\n$$\n", + "text_format": "latex", + "bbox": [ + 277, + 122, + 718, + 164 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "then the differential operator using central difference $\\partial_i\\partial_i u_{i,j,k}$ is given by", + "bbox": [ + 169, + 166, + 665, + 181 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {i} \\partial_ {i} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} \\left(u _ {i + 1, j, k} + u _ {i - 1, j, k} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 \\left(\\cos \\frac {2 \\pi m}{M} - 1\\right)}{h _ {x} ^ {2}} e ^ {\\imath \\frac {2 \\pi m i}{M}} e ^ {\\imath \\frac {2 \\pi n j}{N}} e ^ {\\imath \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 184, + 750, + 260 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\iota = \\sqrt{-1}$ , and $\\partial_i\\partial_ju_{i,j,k}$ is given by,", + "bbox": [ + 169, + 263, + 460, + 280 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {i} \\partial_ {j} u _ {i, j, k} = \\frac {1}{4 h _ {x} h _ {y}} \\left(u _ {i + 1, j + 1, k} + u _ {i - 1, j - 1, k} - u _ {i + 1, j - 1, k} - u _ {i - 1, j + 1, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 282, + 756, + 359 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Proof. By equations", + "bbox": [ + 171, + 373, + 312, + 388 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\cos (A + \\alpha) + \\cos (A - \\alpha) - 2 \\cos (A) \\\\ = (\\cos A \\cos \\alpha - \\sin A \\sin \\alpha) + (\\cos A \\cos \\alpha + \\sin A \\sin \\alpha) - 2 \\cos A \\\\ = 2 (\\cos \\alpha - 1) \\cos A \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 391, + 727, + 441 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 445, + 200, + 458 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sin (A + \\alpha) + \\sin (A - \\alpha) - 2 \\sin (A) \\\\ = (\\sin A \\cos \\alpha + \\cos A \\sin \\alpha) + (\\sin A \\cos \\alpha - \\cos A \\sin \\alpha) - 2 \\cos A \\\\ = 2 (\\cos \\alpha - 1) \\sin A \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 459, + 728, + 510 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "we obtain", + "bbox": [ + 171, + 513, + 241, + 527 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{h _ {x} ^ {2}} \\left[ e ^ {\\iota^ {\\frac {2 \\pi m (i + 1)}{M}}} + e ^ {\\iota^ {\\frac {2 \\pi m (i - 1)}{M}}} - 2 e ^ {\\iota^ {\\frac {2 \\pi m i}{M}}} \\right] = \\frac {2 \\left(\\cos \\frac {2 \\pi m}{M} - 1\\right)}{h _ {x} ^ {2}} e ^ {\\iota^ {\\frac {2 \\pi m i}{M}}}\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 523, + 715, + 559 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "by direct computation, we have", + "bbox": [ + 169, + 559, + 382, + 574 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {i} \\partial_ {i} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} (u _ {i + 1, j, k} + u _ {i - 1, j, k} - 2 u _ {i, j, k}) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {e ^ {\\iota \\frac {2 \\pi m (i + 1)}{M}} + e ^ {\\iota \\frac {2 \\pi m (i - 1)}{M}} - 2 e ^ {\\iota \\frac {2 \\pi m i}{M}}}{h _ {x} ^ {2}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 575, + 790, + 696 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Similarly, by equations", + "bbox": [ + 171, + 699, + 326, + 714 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\cos (A + \\alpha + B + \\beta) + \\cos (A - \\alpha + B - \\beta) - \\cos (A + \\alpha + B - \\beta) - \\cos (A - \\alpha + B + \\beta) \\\\ = \\cos (A + B + \\alpha + \\beta) + \\cos (A + B - \\alpha - \\beta) - \\cos (A + B + \\alpha - \\beta) - \\cos (A + B - \\alpha + \\beta) \\\\ = 2 \\cos (A + B) \\cos (\\alpha + \\beta) - 2 \\cos (A + B) \\cos (\\alpha - \\beta) \\\\ = 2 \\cos (A + B) (\\cos (\\alpha + \\beta) - \\cos (\\alpha - \\beta)) \\\\ = 2 \\cos (A + B) (\\cos \\alpha \\cos \\beta - \\sin \\alpha \\sin \\beta - \\cos \\alpha \\cos \\beta - \\sin \\alpha - \\sin \\beta) \\\\ = - 4 \\cos (A + B) \\sin \\alpha \\sin \\beta \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 715, + 792, + 811 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 816, + 200, + 828 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sin (A + \\alpha + B + \\beta) + \\sin (A - \\alpha + B - \\beta) - \\sin (A + \\alpha + B - \\beta) - \\sin (A - \\alpha + B + \\beta) \\\\ = \\sin (A + B + \\alpha + \\beta) + \\sin (A + B - \\alpha - \\beta) - \\sin (A + B + \\alpha - \\beta) - \\sin (A + B - \\alpha + \\beta) \\\\ = 2 \\sin (A + B) \\cos (\\alpha + \\beta) - 2 \\sin (A + B) \\cos (\\alpha - \\beta) \\\\ = 2 \\sin (A + B) (\\cos (\\alpha + \\beta) - \\cos (\\alpha - \\beta)) \\\\ = 2 \\sin (A + B) (\\cos \\alpha \\cos \\beta - \\sin \\alpha \\sin \\beta - \\cos \\alpha \\cos \\beta - \\sin \\alpha - \\sin \\beta) \\\\ = - 4 \\sin (A + B) \\sin \\alpha \\sin \\beta \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 830, + 790, + 926 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "we deduce the following equation,", + "bbox": [ + 171, + 104, + 403, + 119 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {i} \\partial_ {j} u _ {i, j, k} = \\frac {1}{4 h _ {x} h _ {y}} \\left(u _ {i + 1, j + 1, k} + u _ {i - 1, j - 1, k} - u _ {i + 1, j - 1, k} - u _ {i - 1, j + 1, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 127, + 754, + 200 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/128785ac32bd0a8f19798902b6a1d6a700bd904a841a4eb14140d17f13a3b664.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 209, + 823, + 220 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Similarly, we have the representations of other differential operators in the frequency domain,", + "bbox": [ + 171, + 239, + 785, + 255 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {j} \\partial_ {j} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} \\left(u _ {i, j + 1, k} + u _ {i, j - 1, k} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi n}{N} - 1)}{h _ {y} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 262, + 748, + 335 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {k} \\partial_ {k} u _ {i, j, k} = \\frac {1}{h _ {z} ^ {2}} \\left(u _ {i, j, k + 1} + u _ {i, j, k - 1} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 \\left(\\cos \\frac {2 \\pi l}{L} - 1\\right)}{h _ {z} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 345, + 746, + 419 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {j} \\partial_ {k} u _ {i, j, k} = \\frac {1}{4 h _ {y} h _ {z}} \\left(u _ {i, j + 1, k + 1} + u _ {i, j - 1, k - 1} - u _ {i, j + 1, k - 1} - u _ {i, j - 1, k + 1}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi n}{N} \\sin \\frac {2 \\pi l}{L}}{h _ {y} h _ {z}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 443, + 751, + 517 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {k} \\partial_ {i} u _ {i, j, k} = \\frac {1}{4 h _ {z} h _ {x}} \\left(u _ {i + 1, j, k + 1} + u _ {i - 1, j, k - 1} - u _ {i + 1, j, k - 1} - u _ {i - 1, j, k + 1}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi l}{L} \\sin \\frac {2 \\pi m}{M}}{h _ {z} h _ {x}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 527, + 753, + 599 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C ALGORITHM PIPELINES", + "text_level": 1, + "bbox": [ + 171, + 622, + 406, + 637 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In this section, we give the algorithm pipeline of the FFT-OT in Alg. 1 and the details to solve the costant coefficient elliptic PDE through FFT in Alg. 2.", + "bbox": [ + 169, + 652, + 823, + 683 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Algorithm 1: FFT-OT", + "text_level": 1, + "bbox": [ + 173, + 700, + 323, + 715 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Input: Domain $\\Omega = [-1, 1]^3$ , the source density function $f > 0$ , the target density $g = 1$ , step length $\\tau$ , approximation error threshold $\\varepsilon$", + "bbox": [ + 173, + 720, + 790, + 744 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Output: Solution $\\frac{1}{2}\\| x\\|^2 + u_n$ to the Monge-Ampère Eqn. (2) with the corresponding boundary condition.", + "bbox": [ + 173, + 746, + 743, + 768 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Initialize $u_0(x) = 0$", + "bbox": [ + 197, + 771, + 323, + 784 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "while true do", + "bbox": [ + 199, + 785, + 279, + 795 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Compute the Hessian matrix $D^2 u_n(x)$", + "bbox": [ + 199, + 797, + 433, + 810 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Compute the density function $\\rho_{n}(x)\\gets \\operatorname *{det}(I + D^{2}u_{n}(x))$", + "bbox": [ + 199, + 810, + 553, + 823 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "if $\\| f - \\rho_n\\|_{L_2(\\Omega)} < \\varepsilon$ then", + "bbox": [ + 199, + 823, + 367, + 835 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Break;", + "bbox": [ + 199, + 835, + 264, + 845 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Compute the adjoint matrix $[H_n^{pq}(x)]\\gets \\mathrm{Adj}(I + D^2 u_n(x))$", + "bbox": [ + 199, + 849, + 565, + 863 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Compute the mean adjoint matrix $[H_n^{pq}]$ using Eqn. (11);", + "bbox": [ + 199, + 864, + 534, + 875 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Solve the constant coefficient elliptic PDE (12) using the FFT Solver Alg. 2;", + "bbox": [ + 199, + 876, + 650, + 888 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Update the Brenier potential $u_{n + 1}(x) \\gets u_n + \\tau v_n$ ;", + "bbox": [ + 199, + 888, + 509, + 901 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/d61c13d938ba3e3e011a9da39cfe736b7b910a39b97e25db82c6206119535e6e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 99, + 277, + 174 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/44e79679a69958eabe685acf804b31adb3d9599663bed582cdc5120bd078deb4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 176, + 277, + 250 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/1e925c9b7f9647892a7ea3fbfddb61e80987d81ba2f6646e4581b685a2fb9402.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 251, + 277, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/fbb2d06d3357e0337f05a41ceb38dde94cda28eb36ddfaed59169e9c28b735c1.jpg", + "image_caption": [ + "(a) Density" + ], + "image_footnote": [], + "bbox": [ + 183, + 327, + 277, + 400 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/3ae8a214333cab2e75560cf2faed28321156b390f144dbbdac2346938375b192.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 99, + 390, + 174 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/7a32bc28f59db7be75eb28365f52fb5978fc247d00945738d29d8b27caf7778c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 176, + 390, + 250 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/e494bc42a1aee6690ee63f3e1884e1f5b88d7d1c5347206571a5bd629f439cae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 251, + 390, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/4126d843e81326161452112c5f97ad88a01e118bcc4743555b0a982acb664266.jpg", + "image_caption": [ + "(b) Rejection" + ], + "image_footnote": [], + "bbox": [ + 295, + 327, + 390, + 398 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/1f5a7553399bb362b5bd8eed40380cd780f118852192b67f50de4b54c3f6c612.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 99, + 501, + 174 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/b637a1f2ee6ff502245d3ce9ab33b5a84d0d1e67c0550b102ee5658949ea98c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 176, + 501, + 250 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/8ee81eb856ffcc90253543aece5f629617a0c4b424a18fdddd4d4cb6a1953052.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 251, + 501, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/58ae9947b1a4e94f5726fd435e640a4184c32f9790027899c0b277b8abe3ab76.jpg", + "image_caption": [ + "(c) MH" + ], + "image_footnote": [], + "bbox": [ + 406, + 327, + 501, + 400 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/2d347f904fa60d1b03ebaa080f4fd81a76d3650db8736de9e8879d7c30cb0ce7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 99, + 612, + 174 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/ec24c9178f566cf4b8f34ca5fa4af2218552888e0a66d54fccc9a561be2d7ab5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 176, + 612, + 250 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/5cb225d82aa21de75571e1da0d0cd4719d28deccd58fbbb9d338b0e80644afd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 251, + 612, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/02063de3e6ff0a0c1fc406d3985036a189531ee95712e25fed72960b5493b688.jpg", + "image_caption": [ + "(d) Slice" + ], + "image_footnote": [], + "bbox": [ + 517, + 327, + 612, + 398 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/af1aa4f0612f357b809fff64e0055dcd3eb08a278a3526ead2603686e412890b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 102, + 723, + 174 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/f7e41c15e16491b309e865d95b76e88783dc5c13fc07702f1ef3b8f9db2cd246.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 176, + 723, + 250 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/80125737bb277afa86ba721eeb59fe6a247694e77ec0c822bc2ff888c8b3b27c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 251, + 723, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/fa1bf5ab48c383d9d7520f197517751a02857b01e395036a260f7ca4df30098e.jpg", + "image_caption": [ + "(e) Ours-rand" + ], + "image_footnote": [], + "bbox": [ + 630, + 327, + 723, + 398 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/ed40012dcc547db48f95acbf7f256edfeb9868e75cb741cda923f9ab383df9ea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 101, + 834, + 174 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/56efa3f542a9c1d5eeaacc4e0771ff63903e8ad30a5981e2d2b9b9ff6a17c45d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 176, + 834, + 250 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/2aa7363eaa0da9ad29c7a1e8a9519b144066355e9cbf1199d988c6a33c228c87.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 251, + 834, + 325 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/cff095d6b8df797d938236d52afdc9be601480f956c2306b2b11b66a54c0d80b.jpg", + "image_caption": [ + "(f) Ours-grid", + "Figure 5: 3D density function sampling. (a) The density functions in different slices of the same model, namely the 40th, 56th, 72th and 80th. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with $T^{-1}$ . (f) The sampling results by mapping the grid centers back with $T^{-1}$ . The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. Zoom in for better visualization." + ], + "image_footnote": [], + "bbox": [ + 741, + 327, + 834, + 400 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Algorithm 2: FFT Solver for the Constant Coefficient Elliptic PDE", + "text_level": 1, + "bbox": [ + 173, + 518, + 617, + 532 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Input: Domain $\\Omega = [-1,1]^3$ , $M,N,L$ , $\\{a^{pq}\\}$ , $b^r$ , $c$ , function $f$ with the periodic boundary condition", + "bbox": [ + 173, + 536, + 777, + 551 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Output: Solution $u$ to the elliptic PDE Eqn. (18)", + "bbox": [ + 173, + 551, + 465, + 563 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Discretize the domain $\\Omega$ to a $M\\times N\\times L$ grid;", + "bbox": [ + 187, + 564, + 470, + 575 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Sample the function $f$ to $f_{i,j,k}$", + "bbox": [ + 187, + 575, + 375, + 589 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Compute FFT using Eqn. (16), $\\{\\hat{f}_{m,n,l}\\} \\gets \\mathrm{FFT}(\\{f_{i,j,k}\\})$ ;", + "bbox": [ + 187, + 589, + 539, + 604 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "for $(m,n,l)\\in [0,M - 1]\\times [0,N - 1]\\times [0,L - 1]$ do", + "bbox": [ + 187, + 604, + 524, + 617 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Compute the factor $\\lambda_{m,n,l}$ using Eqn. (19);", + "bbox": [ + 197, + 617, + 455, + 628 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "if $\\lambda_{m,n,l}$ is 0 then", + "bbox": [ + 197, + 628, + 312, + 638 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "$\\hat{u}_{m,n,l}\\gets 0;$", + "bbox": [ + 197, + 640, + 299, + 652 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "else", + "bbox": [ + 197, + 652, + 223, + 662 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "$\\hat{u}_{m,n,l}\\gets \\hat{f}_{m,n,l} / \\lambda_{m,n,l};$", + "bbox": [ + 223, + 665, + 377, + 681 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Compute the Inverse FFT using Eqn. (17), $\\{u_{i,j,k}\\} \\gets \\mathrm{IFFT}(\\{\\hat{u}_{m,n,l}\\})$ ;", + "bbox": [ + 187, + 686, + 612, + 700 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Return $\\{u_{i,j,k}\\}$", + "bbox": [ + 189, + 700, + 285, + 714 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D APPENDIX EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 747, + 423, + 762 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In this section, as a compensation of the experiments in the main paper, we give more results on the 3D adaptive sampling and volumetric magnifier.", + "bbox": [ + 169, + 779, + 823, + 808 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D.1 MORE RESULTS ON 3D ADAPTIVE SAMPLING", + "text_level": 1, + "bbox": [ + 171, + 824, + 534, + 838 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In the experiments, we set the density function $f(x) = \\sum_{i=1}^{30} p_i \\mathcal{N}(\\mu_i, \\Sigma_i)$ , where $\\mathcal{N}(\\mu_i, \\Sigma_i)$ represents Gaussian distribution with mean $\\mu_i$ and variance $\\Sigma_i = \\mathrm{diag}(\\sigma_{i0}^2, \\sigma_{i1}^2, \\sigma_{i2}^2)$ . $\\mu_i \\in \\mathbb{R}^3$ is uniformly sampled from $[0,1]^3$ , $\\sigma_{ij}$ is uniformly sampled from $[0,0.5]$ , $p_i \\in \\mathbb{R}$ is uniformly sampled from $[0.2,1]$ and normalized such that $\\int_{\\Omega} f(x) dx = 1$ . Thus the source distribution $\\mu$ is a complicated Gaussian mixture distribution restricted on $\\Omega = [0,1]^3$ . After computing the OT map", + "bbox": [ + 169, + 849, + 825, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/b632fd714fbfdb632a3fadf61432a89b4935cba5a58458f57721ec541b864b8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 99, + 277, + 174 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/ce168d8abe6ef7856881a01379eafc45ff484d3f482f28bbd9121ee6f360fcd8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 176, + 277, + 250 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/1d0ed36963385a1cdeb5ba7d7491cde70599c9d615925ec1eb0eb33d253da61a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 251, + 277, + 325 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/eb9ce5bc43fe4ea1e02f33564d44774cad8052be6a1d9599c507358664a72adc.jpg", + "image_caption": [ + "(a) Density" + ], + "image_footnote": [], + "bbox": [ + 183, + 327, + 277, + 400 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/3a3dafa3734f7c62672195aae43f7933770947304af30b77cb539a98c00c056c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 99, + 390, + 174 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/8a81d3b1245a6f48c68ece122d2b992520da7cf082d951fe4aa566e2d2b3fd5d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 176, + 390, + 250 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/251020e575b7b5f86fb186ef23a404656f39073fc82867552527c84baf964ab9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 251, + 390, + 325 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/3ccc7c884bf260d3d7a589e79b9b4fa1d982905da5ec2d50108ac999beea27b7.jpg", + "image_caption": [ + "(b) Rejection" + ], + "image_footnote": [], + "bbox": [ + 295, + 327, + 390, + 400 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/eaf96e2003f1ea59584af9725a79164f6b33ab7b581efab41acb30dee13b0bf8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 99, + 501, + 174 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/fd227b3612f179fff0a571758e1efca8a888148821fc29a10a544659f4bd65e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 176, + 501, + 250 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/8e0e7a79899d3ecec4623468e3128ae43c89a6526bdb894bf3beecf22dd3eddd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 251, + 501, + 325 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/80942969ab76543868ba09a0801227a57eb9b33db9897b7f213aa7c1f5bc3163.jpg", + "image_caption": [ + "(c) MH" + ], + "image_footnote": [], + "bbox": [ + 406, + 327, + 501, + 400 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/28c4c3972de99557357c14c6b87619ff7a34e1166bc578df3c195d38f4ea8a05.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 99, + 612, + 174 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/cf31df0e7b0570b35ed2fca7512ae315ab5ca51d3b38856e83b45d9d5e4b9902.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 176, + 612, + 250 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/0ecdda00548d3aec475ef7e2e5395252966aa131b202cd34fc6a6ce13f498fb3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 251, + 612, + 325 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/40d3fab3b9d3f33210e869c2778b9abe4e6549645280d60ee8b9fbaa682466a3.jpg", + "image_caption": [ + "(d) Slice", + "Figure 6: 3D density function sampling. (a) The density functions in different slices of the same model, namely the 56th, 64th, 80th and 88th. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with $T^{-1}$ . (f) The sampling results by mapping the grid centers back with $T^{-1}$ . The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. Zoom in for better visualization." + ], + "image_footnote": [], + "bbox": [ + 517, + 327, + 612, + 400 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/a44e65cf4720fb3a9025753064e926dfdb8bde3b86e5bc3e5cf471a43408922e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 101, + 723, + 172 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/15e043fbbd4253c3678c4a6c8987915513b324058b4a6013c68462e6053f8c4b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 176, + 723, + 250 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/569538f217b565d540365f94cdc03f275139cbdb91aa6587279a13dbd86cbd41.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 251, + 722, + 324 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/2dbd3cd34ac41eb59e083df05340237d5623a52dac6d916612bb68fbf4b503eb.jpg", + "image_caption": [ + "(e) Ours-rand" + ], + "image_footnote": [], + "bbox": [ + 630, + 327, + 722, + 398 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/8edc0a8c659ae779ef7604f2c644f9c4736a8e2a02872c6fb5ab0ca4a2a395f6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 101, + 834, + 172 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/89522a5ec4fc0c4966fa43f854ea69059752ee3f5a866a35ba53e8a54ef7f3ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 176, + 834, + 250 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/0ebd275f7cefa65894cf1568baccba33a3509c5fff52d6a3c85a0338be5d4ad8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 251, + 834, + 324 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/f85149e701cb473dacd334bb3fafafc12d8d82164ca006fc6af47bd045e3392c.jpg", + "image_caption": [ + "(f) Ours-grid" + ], + "image_footnote": [], + "bbox": [ + 741, + 327, + 834, + 400 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "$T$ from $\\mu$ to the uniform distribution $\\nu$ defined on $[-1,1]^3$ , we conduct two groups of experiments: (i) we map the cell centers of the grid $\\{y_k\\}$ of $[-1,1]^3$ back to $[-1,1]^3$ through the inverse OT map $T^{-1}(y_k)$ defined by Eqn. (20); (ii) we randomly sample $100k$ samples $\\{y_k\\}$ from the Uniform distribution defined in $[-1,1]^3$ , then map them back to $[-1,1]^3$ through the inverse OT map $T^{-1}(y_k)$ . In order to keep the consistency with the mirror reflection process in the FFT-OT algorithm, we also reflect the generated samples back to $\\Omega$ . To visualize the results of the $k$ th slice, we plot the samples whose $z$ coordinates satisfy the inequality,", + "bbox": [ + 169, + 523, + 823, + 625 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\nk / 1 2 8 - 1 / 2 5 6 \\leq z \\leq k / 1 2 8 + 1 / 2 5 6.\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 630, + 630, + 646 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In Fig. 5 and Fig. 6, we give more sampling results of different slices correspond to the two models used in Fig. 2 in the main paper. Fig. 5 visualize the density function restricted on the 40th, 56th, 72th and 80th slices for different methods of the model displayed in the first row of 2. Fig. 6 visualize the density function restricted on the 56th, 64th, 80th and 88th slices for different methods of the model displayed in the second row of 2. Compared with the classical methods, the both sampling strategies of our method give decent sampling results that fit the prescribed density function well. Moreover, the number of generated samples for different slices of the same 3D model fits the density functions restricted to the corresponding slices well, namely more samples are generated in the brighter regions for different slices.", + "bbox": [ + 169, + 660, + 823, + 787 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D.2 MORE RESULTS ON VOLUMETRIC MAGNIFIER", + "text_level": 1, + "bbox": [ + 171, + 801, + 537, + 816 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this experiment, we magnify the volumetric MRI image of the aneurysm by different amplification factors. In Fig. 7, we show the original aneurysm viewed from difference angles in the first column. The last three columns give the magnified results with different amplification factors from the viewpoints same as those in the first column. We can see that the aneurysm region is successfully magnified by different factors and the rest parts of the volume nearly keeps the same.", + "bbox": [ + 169, + 828, + 823, + 898 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/fb7d09d5709a40cfb69539473eaed5d763a441ad2075023fee75460f50533828.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 297, + 334, + 415 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/11a8bfcfe495005c1b746312291aa2cded56fd424078f9ac0e844becb48e7b4e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 417, + 334, + 534 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/bc21db47976ef52cdf7e67a303c072525938597b4d3b10adab258e8ae0eff02c.jpg", + "image_caption": [ + "(a) Original" + ], + "image_footnote": [], + "bbox": [ + 183, + 537, + 334, + 654 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e66ecdc0147b920220d6ad7885d69226c479c799bcf72243d8bbd04c7128c3f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 299, + 503, + 415 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/56de3e5a0cf32f1b7bfb23652890c5c3d8b7521e6d37d17261226aba14fa84b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 417, + 503, + 534 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/856064b9ce45d2bc94f593509ee413a02c6a616819f4afd839216a8291a836f0.jpg", + "image_caption": [ + "(b) Magnifying ratio 1" + ], + "image_footnote": [], + "bbox": [ + 352, + 537, + 503, + 654 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/612523e4a7d50159c05d7ceb09aa87af4ce236800deb9243b95993711e37bcb6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 299, + 671, + 415 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/891f404debf9f17eba4150df5625178a5feb7d425472183b96528929f320b7ad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 417, + 671, + 534 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/7ade992cc0161eba250766e4f278b57ec9d994c8ed45392476a9053a21254b72.jpg", + "image_caption": [ + "(c) Magnifying ratio 2" + ], + "image_footnote": [], + "bbox": [ + 521, + 537, + 671, + 654 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/3710da88f2a7dd69ed1eea4fd8ab45ed766d513a4ed1084de222e94bdf4cc8e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 299, + 839, + 415 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/1152a6ab31b4aec89407cf6eba702c2129f587ffc0f15c6a290b6cd99606045c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 417, + 839, + 534 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/bf5e003805ef42d6c857e8ec10508c0cd76198e6f141390526699155a9ca25dc.jpg", + "image_caption": [ + "(d) Magnifying ratio 3", + "Figure 7: The volume magnifier of an aneurysm. The first column shows the original volumetric data from different viewpoints, and the last three columns give the magnified data from the same viewpoints of the first column with different magnifying ratios. The yellow circles denote the aneurysm or the ROIs." + ], + "image_footnote": [], + "bbox": [ + 689, + 537, + 839, + 654 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + } +] \ No newline at end of file diff --git a/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_model.json b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a1fd20cdaec55abb14a0a6cbbd5300f0c2802457 --- /dev/null +++ b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_model.json @@ -0,0 +1,5656 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.147 + ], + "angle": 0, + "content": "VOLUMETRIC OPTIMAL TRANSPORTATION BY FAST FOURIER TRANSFORM" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.171, + 0.249, + 0.184 + ], + "angle": 0, + "content": "Na Lei*" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.185, + 0.401, + 0.213 + ], + "angle": 0, + "content": "Dalian University of Technology \nnalei@dlut.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.595, + 0.171, + 0.714, + 0.186 + ], + "angle": 0, + "content": "Dongsheng An" + }, + { + "type": "text", + "bbox": [ + 0.595, + 0.186, + 0.815, + 0.213 + ], + "angle": 0, + "content": "Stony Brook University\ndoan@cs.stonybrook.edu" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.234, + 0.266, + 0.249 + ], + "angle": 0, + "content": "Min Zhang" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.249, + 0.379, + 0.277 + ], + "angle": 0, + "content": "Zhejiang University min_zhang@zju.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.4, + 0.234, + 0.483, + 0.249 + ], + "angle": 0, + "content": "Xiaoyin Xu" + }, + { + "type": "text", + "bbox": [ + 0.4, + 0.249, + 0.589, + 0.276 + ], + "angle": 0, + "content": "Harvard Medical School \nxxu@bwh.harvard.edu" + }, + { + "type": "text", + "bbox": [ + 0.61, + 0.234, + 0.703, + 0.249 + ], + "angle": 0, + "content": "Xianfeng Gu" + }, + { + "type": "text", + "bbox": [ + 0.61, + 0.249, + 0.81, + 0.277 + ], + "angle": 0, + "content": "Stony Brook University gu@cs.stonybrook.edu" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.313, + 0.547, + 0.328 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.345, + 0.768, + 0.596 + ], + "angle": 0, + "content": "The optimal transportation map finds the most economical way to transport one probability measure to another, and it has been applied in a broad range of applications in machine learning and computer vision. By the Brenier theory, computing the optimal transport map is equivalent to solving a Monge-Ampère equation, which is highly non-linear. Therefore, the computation of optimal transportation maps is intrinsically challenging. In this work, we propose a novel and powerful method, the FFT-OT (fast Fourier transform-optimal transport), to compute the 3-dimensional OT problems. The method is based on several key ideas: first, the Monge-Ampère equation is linearized to a sequence of linear elliptic PDEs with spacial and temporal variant coefficients; second, the obliqueness property of optimal transportation maps is reformulated as a Neumann boundary condition; and third, the variant coefficient elliptic PDEs are approximated by constant coefficient elliptic PDEs and solved by FFT on GPUs. We also prove that the algorithm converges linearly. Experimental results show that the FFT-OT algorithm is more than a hundred times faster than the conventional methods based on the convex geometry. Furthermore, the method can be directly applied for sampling from complex 3D density functions in machine learning and magnifying the volumetric data in medical imaging." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.624, + 0.338, + 0.64 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.656, + 0.827, + 0.782 + ], + "angle": 0, + "content": "Optimal transportation (OT) transports one probability measure to another in the most economical way, and it plays a fundamental role in areas like machine learning Courty et al. (2017); Altschuler et al. (2019), computer vision Arjovsky et al. (2017); Tolstikhin et al. (2018); An et al. (2020), and computer graphics Solomon et al. (2015); Nader & Guennebaud (2018). Given a Riemannian manifold \\(X\\), all the probability distributions on \\(X\\) form an infinite dimensional space \\(\\mathcal{P}(X)\\). Given any two distributions \\(\\mu, \\nu \\in \\mathcal{P}(X)\\), the optimal transportation map defines a distance between them, and the McCann interpolation McCann (1997) defines the geodesic connecting them. Hence optimal transportation equips \\(\\mathcal{P}(X)\\) with a Riemannian metric and defines its covariant differentiation, which provides a variational calculus framework for optimization in it." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.788, + 0.827, + 0.902 + ], + "angle": 0, + "content": "As the optimal transportation problem is highly non-linear, it is quite challenging to compute the OT maps. Recently, researchers have developed many algorithms. The geometric variational approach Aurenhammer et al. (1998); Gu et al. (2016); Levy (2015) based on the Brenier theorem Brenier (1991) is capable of achieving high accuracy for low dimensional problems, but it requires complicated geometric data structure and the storage complexity grows exponentially as the dimension increases. The Sinkhorn method Cuturi (2013) based on the Kantorovich theorem adds an entropic regularizer to the primal problem and can handle high dimensional tasks, but it suffers from the intrinsic approximation error." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.911, + 0.371, + 0.925 + ], + "angle": 0, + "content": "* indicates equal contribution" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.218 + ], + "angle": 0, + "content": "We propose a novel method to tackle this challenging problem through Fast Fourier Transformation (FFT). According to the Brenier theorem Brenier (1991), under the quadratic distance cost, the optimal transportation map is the gradient of the Brenier potential, which satisfies the Monge-Ampère equation. With the continuity method Delanoë (1991), the Monge-Ampère equation can be linearized as a sequence of elliptic partial differential equations (PDEs) with spacial and temporal variant coefficients. By iteratively solving the linearized Monge-Ampère equations, we can obtain the OT map. Specifically, we propose to approximate the linearized Monge-Ampère equation by constant coefficient elliptic PDEs and solve them using the FFT on GPUs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.222, + 0.828, + 0.432 + ], + "angle": 0, + "content": "Our proposed FFT-OT method has many merits: (i) it is generalizable for arbitrary dimension; (ii) it has a linear convergence rate, namely the approximation error decays exponentially fast; (iii) in each iteration, the computational complexity of FFT is \\( O(n \\log n) \\), thus our algorithm can solve large scale OT problems; and (iv) it is highly parallelable and can be efficiently implemented on GPUs. We demonstrate the efficiency of the FFT-OT algorithm by solving the volumetric OT problems for machine learning and medical imaging applications including sampling from given 3D density functions and volumetric magnifier. The algorithm also has its own limitations: (i) although it can be generalized to any dimensions, the storage complexity increases exponentially with respect to the dimension, so its power is limited by the memory size of the GPUs; (ii) Since the algorithm uses FFT, the current version of the method only works well for continuous density functions. (iii) In this work, we mainly focus on the computation of the OT map from the uniform distribution to another arbitrary continuous distribution. To extend the method to find the OT map between any two continuous measures, we can compute two OT maps from the uniform distribution to the both continuous measures, then combine them together. The combination will give a reasonable approximation of the OT map Nader & Guennebaud (2018)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.437, + 0.828, + 0.524 + ], + "angle": 0, + "content": "Though Lei and Gu Lei & Gu (2021) also uses FFT to solve the 2-dimensional OT problem, our method differs their works in the following two aspects: (i) Lei and Gu's method uses the fixed point method to compute the 2D OT problems, ours is based on the linearization of the Monge-Ampère operator to solve the 3D OT problems, these are two different methodologies in PDE theory; (ii) In our paper, we also provide the theoretical convergence analysis of the proposed method. For more detailed analysis and related work, please refer to the Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.542, + 0.52, + 0.558 + ], + "angle": 0, + "content": "2 OPTIMAL TRANSPORTATION THEORY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.572, + 0.825, + 0.603 + ], + "angle": 0, + "content": "In this section, we review the fundamental concepts and theorems of the OT problem and the Monge-Amperè equation, more details can be found in Villani (2008)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.616, + 0.825, + 0.675 + ], + "angle": 0, + "content": "Optimal Transportation Map and the Monge-Ampère equation Suppose the source domain \\(\\Omega\\) is an open set in \\(\\mathbb{R}^d\\) with the probability measure \\(\\mu\\), the target domain \\(\\Sigma\\) is with the probability measure \\(\\nu\\). Both \\(\\mu\\) and \\(\\nu\\) have density functions \\(d\\mu(x) = f(x)dx\\) and \\(d\\nu(y) = g(y)dy\\), respectively, with the equal total mass: \\(\\int_{\\Omega} f(x)dx = \\int_{\\Sigma} g(y)dy\\), which is called the balance condition." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.678, + 0.827, + 0.709 + ], + "angle": 0, + "content": "Suppose \\( T: \\Omega \\to \\Sigma \\) is a measurable map. The mapping \\( T \\) is called measure preserving and denoted as \\( T_{\\#} \\mu = \\nu \\) if the following relation" + }, + { + "type": "equation", + "bbox": [ + 0.434, + 0.713, + 0.825, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\mu (T ^ {- 1} (A)) = \\nu (A) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.734, + 0.825, + 0.763 + ], + "angle": 0, + "content": "for every Borel subset \\(A \\subset \\Sigma\\). A cost function \\(c: \\Omega \\times \\Sigma \\to \\mathbb{R}\\) measures the transportation cost for transporting the unit mass from \\(x \\in \\Omega\\) to \\(y \\in \\Sigma\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.766, + 0.827, + 0.796 + ], + "angle": 0, + "content": "Problem 1 (Monge). The optimal transportation problem finds the measure preserving map with the minimal total transportation cost," + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.799, + 0.597, + 0.832 + ], + "angle": 0, + "content": "\\[\n\\min _ {T _ {\\#} \\mu = \\nu} \\int_ {\\Omega} c (x, T (x)) f (x) d x\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.828, + 0.901 + ], + "angle": 0, + "content": "The solution to the Monge's problem is called the optimal transport map between \\(\\mu\\) and \\(\\nu\\). The existence, uniqueness and regularity of OT maps depend on the boundedness and the continuity of the density functions, the convexity of the supporting domains, the continuity of their boundaries, and the cost function. In our current work, we focus on the similar situation in Saumier et al. (2013)," + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.909, + 0.712, + 0.927 + ], + "angle": 0, + "content": "- The cost function is quadratic Euclidean distance \\( c(x, y) = \\| x - y \\|^2 / 2 \\);" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.104, + 0.822, + 0.133 + ], + "angle": 0, + "content": "- The supports of the source and the target measures are the canonical cube \\(\\Omega = [-1, 1]^3\\), which is uniformly convex;" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.137, + 0.823, + 0.166 + ], + "angle": 0, + "content": "- The source and the target measures \\(\\mu, \\nu\\) are absolutely continuous with respect to the Lebesgue measure, their densities \\(f, g\\) are positive and bounded away from zero;" + }, + { + "type": "list", + "bbox": [ + 0.216, + 0.104, + 0.823, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.458, + 0.169, + 0.597, + 0.185 + ], + "angle": 0, + "content": "\\[\n0 < m < f, g < M,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.188, + 0.421, + 0.204 + ], + "angle": 0, + "content": "and \\(f,g\\) are of class \\(C^\\alpha (\\Omega)\\)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.207, + 0.825, + 0.235 + ], + "angle": 0, + "content": "- The boundary condition is second boundary condition (OT boundary condition), \\( T(\\Omega) = \\Omega \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.245, + 0.825, + 0.288 + ], + "angle": 0, + "content": "Then according to (Villani (2003) Theorem 14.4, Saumier et al. (2013) Theorem 2.1), the OT maps \\( T: \\Omega \\to \\Omega \\) exists and is unique and invertible (\\( \\mu \\) a.e), and the Brenier potential is of class \\( C^{2,\\beta}(\\bar{\\Omega}) \\) form some \\( 0 < \\beta < \\alpha \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.291, + 0.825, + 0.336 + ], + "angle": 0, + "content": "Theorem 2. Assume that \\(\\Omega, \\mu, \\nu, f\\) and \\(g\\) are defined as above. Then there exists a convex function \\(u: \\Omega \\to \\mathbb{R}\\), \\(u \\in C^{2,\\beta}(\\Omega)\\) for some \\(0 < \\beta < \\alpha\\), such that \\(\\nabla u\\) pushes \\(\\mu\\) forward to \\(\\nu\\), \\((\\nabla u)_{\\#} \\mu = \\nu\\). Moreover, \\(\\nabla u\\) is unique and invertible (\\(\\mu\\) a.e.), and its inverse \\(\\nabla v\\) satisfies \\((\\nabla v)_{\\#} \\nu = \\mu\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.344, + 0.801, + 0.359 + ], + "angle": 0, + "content": "We call such a convex function \\(u\\) the Brenier potential, it satisfies the Monge-Ampère equation," + }, + { + "type": "equation", + "bbox": [ + 0.415, + 0.363, + 0.823, + 0.392 + ], + "angle": 0, + "content": "\\[\n\\det D ^ {2} u (x) = \\frac {f (x)}{g \\circ \\nabla u (x)}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.395, + 0.825, + 0.44 + ], + "angle": 0, + "content": "with the boundary condition \\(\\nabla u(\\Omega) = \\Sigma\\). Then finding the optimal transportation map is equivalent to solving the corresponding Monge-Ampère equation. In the current work, the target measure is always the Lebesgue measure, and the source density \\(f\\) is of class \\(C^{2,\\alpha}(\\Omega)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.452, + 0.727, + 0.468 + ], + "angle": 0, + "content": "Linearized Monge-Ampère Operator The Monge-Ampère operator is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.471, + 0.56, + 0.489 + ], + "angle": 0, + "content": "\\[\n\\mathrm {M A} [ u ] = \\det D ^ {2} u,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.492, + 0.571, + 0.507 + ], + "angle": 0, + "content": "which is highly non-linear. It can be linearized as following:" + }, + { + "type": "equation", + "bbox": [ + 0.272, + 0.51, + 0.825, + 0.528 + ], + "angle": 0, + "content": "\\[\n\\mathrm {M A} [ u + \\varepsilon v ] = \\det (D ^ {2} u + \\varepsilon D ^ {2} v) \\approx \\det D ^ {2} u + \\varepsilon \\operatorname {T r a c e} (\\operatorname {A d j} (D ^ {2} u) \\cdot D ^ {2} v), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.53, + 0.825, + 0.56 + ], + "angle": 0, + "content": "where \\(\\operatorname{Adj}(A)\\) is the adjoint (co-factor) matrix of \\(A\\), \\(\\operatorname{Adj}(A) := \\det(A)A^{-T}\\). Therefore the linearized Monge-Ampère operator is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.564, + 0.825, + 0.603 + ], + "angle": 0, + "content": "\\[\n\\mathrm {D M A} _ {u} [ v ] := \\operatorname {T r a c e} \\left(\\operatorname {A d j} \\left(D ^ {2} u\\right) \\cdot D ^ {2} v\\right) = \\sum_ {p, q = 1} ^ {d} u ^ {p q} (x) \\partial_ {p} \\partial_ {q} v (x), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.607, + 0.755, + 0.631 + ], + "angle": 0, + "content": "where \\((u^{pq}) = \\mathrm{Adj}(D^2 u)\\) is the adjoint matrix of the Hessian of \\(u\\), and \\(\\partial_p\\partial_q\\coloneqq \\frac{\\partial^2}{\\partial x_p\\partial x_q}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.825, + 0.686 + ], + "angle": 0, + "content": "Continuity Method For simplicity, we assume the source domain coincides with the target domain, that is \\(\\Omega = \\Sigma\\), and the target density is \\(g(x) \\equiv 1\\). The Monge-Ampère equation Eqn. (2) is simplified as \\(\\operatorname{det}D^{2}u(x) = f(x)\\). Define a flow of density as" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.689, + 0.825, + 0.705 + ], + "angle": 0, + "content": "\\[\n\\rho (x, t) = (1 - t) + t f (x), \\quad t \\in [ 0, 1 ]. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.707, + 0.678, + 0.723 + ], + "angle": 0, + "content": "The corresponding flow of the Brenier potentials is \\( u(x,t):\\Omega \\times [0,1]\\to \\mathbb{R} \\)" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.727, + 0.664, + 0.744 + ], + "angle": 0, + "content": "\\[\n\\det D _ {x} ^ {2} u (x, t) = \\rho (x, t), \\quad s. t. \\nabla_ {x} u (x, t) (\\Omega) = \\Omega ,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.747, + 0.825, + 0.805 + ], + "angle": 0, + "content": "where \\( D_x^2 u(x,t) \\) is the Hessian of \\( u(x,t) \\) with respect to \\( x \\), and \\( u(x,1) \\) is the solution to the initial Monge-Ampère equation Eqn. (2). Take the derivative w.r.t. time \\( t \\) on both sides of the linearized Monge-Ampère operator Eqn. (4), we obtain an elliptic PDE with the spacial and temporal variant coefficients of the unknown \\( v(x,t) \\coloneqq \\dot{u} (x,t) \\), namely the \"velocity\" of the Brenier potential," + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.809, + 0.825, + 0.847 + ], + "angle": 0, + "content": "\\[\n\\mathrm {D M A} _ {u} [ v ] = \\sum_ {p, q = 1} ^ {d} u ^ {p q} (x, t) \\partial_ {p} \\partial_ {q} v (x, t) = \\frac {\\partial}{\\partial t} \\rho (x, t) = f (x) - 1. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.852, + 0.826, + 0.926 + ], + "angle": 0, + "content": "At time \\( t = 0 \\), the initial Brenier potential is known as \\( u(x,0) = \\frac{1}{2}\\| x\\|^2 \\). Suppose at time \\( t \\), we have obtained \\( u(x,t) \\) already, then we can compute the adjoint matrix \\( u^{pq}(x,t) \\) of the Hessian \\( D_x^2 u(x,t) \\) and solve Eqn. (6) to get the velocity \\( v(x,t) = \\dot{u} (x,t) \\). In turn, we move forward to time \\( t + \\delta t \\), and update \\( u(x,t + \\delta t) \\) by \\( u(x,t) + \\dot{u} (x,t)\\delta t \\). By repeating this procedure, eventually we reach time \\( t = 1 \\) and obtain the solution \\( u(x)\\coloneqq u(x,1) \\) to the initial Monge-Ampère Eqn. (2)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "Obliqueness Boundary Condition Suppose the boundary of \\(\\Omega\\) is \\(C^1\\) almost everywhere, therefore at a \\(C^1\\) point \\(x\\in \\partial \\Omega\\), the outer normal \\(\\mathbf{n}(x)\\) is well defined. For almost every boundary point \\(x\\in \\partial \\Omega\\), the obliqueness condition is represented as" + }, + { + "type": "equation", + "bbox": [ + 0.425, + 0.154, + 0.825, + 0.17 + ], + "angle": 0, + "content": "\\[\n\\langle \\mathbf {n} (x), \\mathbf {n} (\\nabla u (x)) \\rangle \\geq 0. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.175, + 0.825, + 0.217 + ], + "angle": 0, + "content": "Suppose \\(\\Omega\\) is a cuboid and has 6 faces, if a boundary point \\(x\\in \\partial \\Omega\\) is on a face, by the cyclic monotonicity of the map and the strict convexity of \\(u\\) Villani (2008), its image \\(\\nabla u(x)\\) must be on the same face of \\(x\\), namely," + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.218, + 0.825, + 0.232 + ], + "angle": 0, + "content": "\\[\n\\langle \\nabla u (x) - x, \\mathbf {n} (x) \\rangle = 0. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.236, + 0.825, + 0.267 + ], + "angle": 0, + "content": "We can rewrite the Brenier potential as \\( u(x_{1},x_{2},\\ldots ,x_{d}) = \\frac{1}{2}\\sum_{i = 1}^{d}x_{i}^{2} + v(x_{1},\\dots ,x_{d}) \\), then \\( \\nabla u(x) - x = \\nabla v(x) \\). By Eqn. (8), \\( v(x) \\) satisfies the Neumann boundary condition," + }, + { + "type": "equation", + "bbox": [ + 0.425, + 0.273, + 0.825, + 0.3 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial v}{\\partial \\mathbf {n}} (x) = 0, \\quad x \\in \\partial \\Omega . \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.306, + 0.825, + 0.349 + ], + "angle": 0, + "content": "Similarly, the velocity of the (modified) Brenier potential \\( v \\) in Eqn. (6) also satisfies the Neumann boundary condition. The analysis about the existence and regularity of the solutions to Eqn. (6) with boundary condition Eqn. (9) can be found in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.368, + 0.465, + 0.384 + ], + "angle": 0, + "content": "3 COMPUTATIONAL ALGORITHM" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.399, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Here we introduce the 3-dimensional FFT-OT algorithm, which can be generalized to any dimensions. We approximate the Monge-Ampère equation by a sequence of constant coefficient elliptic PDEs, and solve them by FFT on GPUs. More detailed analysis about the solution of the discretized Monge-Ampère equation, and the proofs of the lemmas and theorems are given by Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.472, + 0.71, + 0.487 + ], + "angle": 0, + "content": "3.1 CONTINUITY METHOD FOR SOLVING THE MONGE-AMPERE EQUATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.498, + 0.825, + 0.611 + ], + "angle": 0, + "content": "By using the continuity method, we can solve the Monge-Ampère equation iteratively. For simplicity, we assume the target measure is the Lebesgue's measure with \\( g \\equiv 1 \\). At the \\( n \\)-th iteration, the Brenier potential is represented as \\( \\frac{1}{2} \\| x \\|^2 + u_n(x) \\), its Hessian matrix is \\( H_n(x) \\coloneqq \\mathrm{I} + D^2 u_n(x) \\), the corresponding density function is defined as the determinant of the Hessian \\( \\rho_n = \\operatorname*{det}(H_n) \\), and the velocity of the Brenier potential is \\( v_n(x) \\). In the beginning, the Brenier potential \\( u_0(x) \\) is zero, the Hessian is \\( H_0 = \\mathrm{I} \\) and the density is \\( \\rho_0 = 1 \\). At the \\( n \\)-th step, we compute the adjoint matrix \\( [H_n^{pq}(x)] \\) of the Hessian matrix \\( H_n(x) \\) for any \\( x \\in \\Omega \\). According to Eqn. (3), the velocity \\( v_n(x) \\) satisfies the variant coefficient elliptic PDE induced by the linearized Monge-Ampère operator," + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.618, + 0.825, + 0.656 + ], + "angle": 0, + "content": "\\[\n\\mathrm {D M A} _ {u _ {n}} [ v _ {n} ] = \\sum_ {p, q = 0} ^ {2} H _ {n} ^ {p q} (x) \\partial_ {p} \\partial_ {q} v _ {n} (x) = \\frac {1}{\\tau} \\left(f (x) - \\rho_ {n} (x)\\right). \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.663, + 0.825, + 0.707 + ], + "angle": 0, + "content": "Note that the right hand side of Eqn. (6) is the difference between the initial and the target densities, whereas here it is replaced by the difference between the initial and the current densities. The step length parameter \\(\\tau \\geq 1\\) can be chosen to guarantee the convergence Loepers & Rapetti (2005)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.711, + 0.825, + 0.783 + ], + "angle": 0, + "content": "The elliptic PDE Eqn. (10) is with spatially variant coefficients. Although the traditional finite element method (FEM) can solve it using the GMRES algorithm Saad (2003), this algorithm can not be directly accelerated by GPUs. To overcome this difficulty, we approximate Eqn. (10) by a much simpler elliptic PDE with constant coefficients, which can be directly solved using the following FFT-OT algorithm pipeline Alg. 1 on GPUs in Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.825, + 0.819 + ], + "angle": 0, + "content": "At the \\(n\\)-th iteration, after obtaining the adjoint matrix \\([H_n^{pq}(x)], x \\in \\Omega\\), we compute the mean adjoint matrix \\([\\bar{H}_n^{pq}(x)]\\)" + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.824, + 0.825, + 0.857 + ], + "angle": 0, + "content": "\\[\n\\bar {H} _ {n} ^ {p q} := \\frac {\\int_ {\\Omega} H _ {n} ^ {p q} (x) \\rho_ {n} (x) d x}{\\int_ {\\Omega} \\rho_ {n} (x) d x}, \\quad p, q = 0, 1, 2 \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.863, + 0.825, + 0.89 + ], + "angle": 0, + "content": "and replace the elliptic PDE Eqn.(10) with variant coefficients by the elliptic PDE with constant coefficients," + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.891, + 0.825, + 0.928 + ], + "angle": 0, + "content": "\\[\n\\overline {{\\mathrm {D M A}}} _ {u _ {n}} [ v _ {n} ] = \\sum_ {p, q = 0} ^ {2} \\bar {H} _ {n} ^ {p q} \\partial_ {p} \\partial_ {q} v _ {n} (x) = \\frac {1}{\\tau} (f (x) - \\rho_ {n} (x)), \\tag {12}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.618, + 0.12 + ], + "angle": 0, + "content": "where \\(\\overline{\\mathrm{DMA}}\\) is called the mean linearized Monge-Ampère operator." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.125, + 0.825, + 0.195 + ], + "angle": 0, + "content": "Then we solve the constant coefficient elliptic PDE Eqn. (12) by FFT Algorithm Alg. 2 in Appendix C. Although the original variant coefficient PDE Eqn. (10) is replaced by its constant coefficient approximation Eqn. (12), the algorithm still converges to the solution with a linear convergence rate. This replacement allows the whole algorithm to be solved by FFT on GPUs, which greatly improves the computational efficiency." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.198, + 0.825, + 0.257 + ], + "angle": 0, + "content": "Theorem 3 (main). Given a domain \\(\\Omega \\subset \\mathbb{R}^d\\), which is a canonical cuboid \\(\\Omega = [-1,1]^d\\), and a positive density function \\(f:\\Omega \\to \\mathbb{R}\\) with the balance condition \\(\\int_{\\Omega}f(x)dx = \\int_{\\Omega}dx\\), suppose the mirror reflection extension Eqn. (14) of \\(f\\) to the flat torus \\(\\tilde{f}:\\mathbb{T}^n\\to \\mathbb{R}\\) is \\(C^\\alpha\\), \\(\\alpha \\in (0,1)\\), then the Monge-Ampère equation," + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.257, + 0.617, + 0.273 + ], + "angle": 0, + "content": "\\[\nd e t D ^ {2} u (x) = f (x), \\quad \\nabla u (\\Omega) = \\Omega\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.275, + 0.825, + 0.317 + ], + "angle": 0, + "content": "can be solved using the FFT-OT Algorithm Alg. 1 in Appendix C. In particular, one can choose the step length parameter \\(\\tau\\), such that there is a constant \\(0 < \\gamma < 1\\) that the approximation error satisfies" + }, + { + "type": "equation", + "bbox": [ + 0.43, + 0.316, + 0.823, + 0.332 + ], + "angle": 0, + "content": "\\[\n\\left\\| f - \\rho_ {n + 1} \\right\\| ^ {2} < C \\gamma^ {n}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.328, + 0.517, + 0.341 + ], + "angle": 0, + "content": "namely the algorithm has a linear convergence rate." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.36, + 0.631, + 0.374 + ], + "angle": 0, + "content": "3.2 FFT SOLVER FOR CONSTANT COEFFICIENT ELLIPTIC PDES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.443 + ], + "angle": 0, + "content": "To solve the constant coefficient elliptic PDE Eqn. (12), we first extend the PDE to the flat torus by mirror reflection, then discretize the domain and compute the differential operators by central difference scheme. Finally the PDE is converted to algebraic equations in the frequency domain by FFT and can be efficiently solved on GPUs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.457, + 0.825, + 0.489 + ], + "angle": 0, + "content": "Extension by Mirror Reflection Suppose \\(\\Omega = [0,1]^3\\) and \\(f:\\Omega \\to \\mathbb{R}\\) are given, we extend \\(\\Omega\\) to \\(\\tilde{\\Omega} = [-1,1]^3\\) and \\(f\\) to \\(\\tilde{f}:\\tilde{\\Omega}\\rightarrow \\mathbb{R}\\) by mirror reflection" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.492, + 0.825, + 0.51 + ], + "angle": 0, + "content": "\\[\n\\tilde {f} (x, y, z) = f (| x |, | y |, | z |), \\quad \\forall (x, y, z) \\in \\tilde {\\Omega}. \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.514, + 0.825, + 0.589 + ], + "angle": 0, + "content": "By definition, \\(\\tilde{f}\\) satisfies the periodic boundary condition and can be treated as a function defined on the flat torus \\(\\mathbb{T}^3\\). \\(\\tilde{\\Omega}\\) is one of the fundamental domain of \\(\\mathbb{T}^3\\). The constant coefficients \\(a^{p,q}\\) keep unchanged. Then we solve the following constant coefficient elliptic PDE Eqn. (18) \\(L[\\tilde{u}] = \\tilde{f}\\) with the periodic boundary condition. Finally, the restriction of \\(\\tilde{u}\\) on \\(\\Omega\\) gives the initial solution \\(u\\) to \\(L[u] = f\\) with Neumann boundary condition." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.596, + 0.825, + 0.626 + ], + "angle": 0, + "content": "In the following, to avoid using overly complicated symbols, we use \\((u,f,\\Omega)\\) to represent \\((\\tilde{u},\\tilde{f},\\tilde{\\Omega})\\) for simplicity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.825, + 0.725 + ], + "angle": 0, + "content": "Tessellation Suppose \\(\\Omega = [-1,1]^3\\) is the canonical cube (a fundamental domain of a flat torus), we tessellate it to the regular cells, and the centers of the cells form a grid \\(M\\times N\\times L\\). The Brenier potential \\(u:\\Omega \\to \\mathbb{R}\\) is discretized to a tensor \\(u_{i,j,k}\\) with \\(\\{i,j,k\\} \\in \\{0,\\dots ,M - 1\\} \\times \\{0,\\dots ,N - 1\\} \\times \\{0,\\dots ,L - 1\\}\\). The spacial step lengths are \\((h_x,h_y,h_z) = (2 / M,2 / N,2 / L)\\). The coordinate of each sample point \\((x_{i},y_{j},z_{k})\\) is \\((x_{i},y_{j},z_{k}) = (-1 + h_{x}(i + 1 / 2), - 1 + h_{y}(j + 1 / 2), - 1 + h_{z}(k + 1 / 2))\\). The periodic boundary condition is then formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.728, + 0.825, + 0.743 + ], + "angle": 0, + "content": "\\[\nu _ {i, j, k} = u _ {i + \\alpha M, j + \\beta N, k + \\gamma L}, \\quad \\alpha , \\beta , \\gamma \\in \\mathbb {Z}. \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.782 + ], + "angle": 0, + "content": "Finite Difference Differential Operator We use the standard central differences to compute the differential operators. The first order derivative \\(\\mathcal{D}_x\\) is approximated by" + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.786, + 0.607, + 0.814 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {x} u _ {i, j, k} = \\frac {u _ {i + 1 , j , k} - u _ {i - 1 , j , k}}{2 h _ {x}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.817, + 0.825, + 0.847 + ], + "angle": 0, + "content": "where the index \\(i + 1\\) means \\(i + 1\\) modulus \\(M\\). The operators \\(\\mathcal{D}_y, \\mathcal{D}_z\\) are defined in a similar way. The second order derivative operator \\(\\mathcal{D}_{xx}\\) and \\(\\mathcal{D}_{xy}\\) are approximated by" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.85, + 0.644, + 0.878 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {x x} ^ {2} u _ {i, j, k} = \\frac {u _ {i + 1 , j , k} + u _ {i - 1 , j , k} - 2 u _ {i , j , k}}{h _ {x} ^ {2}}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.88, + 0.694, + 0.907 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {x y} ^ {2} u _ {i, j, k} = \\frac {u _ {i + 1 , j + 1 , k} + u _ {i - 1 , j - 1 , k} - u _ {i + 1 , j - 1 , k} - u _ {i - 1 , j + 1 , k}}{4 h _ {x} h _ {y}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.607, + 0.927 + ], + "angle": 0, + "content": "The other operators \\(\\mathcal{D}_{yy},\\mathcal{D}_{zz},\\mathcal{D}_{yz}\\) and \\(\\mathcal{D}_{xz}\\) are defined similarly." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.12 + ], + "angle": 0, + "content": "Discrete Fourier Transformation The discrete Fourier transformation (DFT) of \\( u_{i,j,k} \\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.122, + 0.825, + 0.16 + ], + "angle": 0, + "content": "\\[\n\\hat {u} _ {m, n, l} = \\sum_ {i = 0} ^ {M - 1} \\sum_ {j = 0} ^ {N - 1} \\sum_ {k = 0} ^ {L - 1} u _ {i, j, k} \\hat {\\omega} _ {m n l} \\tag {16}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.164, + 0.826, + 0.2 + ], + "angle": 0, + "content": "\\[\nu _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\omega_ {m n l} \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.201, + 0.825, + 0.247 + ], + "angle": 0, + "content": "where \\(\\hat{\\omega}_{mnl} = e^{-\\iota \\frac{2\\pi mi}{M}}e^{-\\iota \\frac{2\\pi nj}{N}}e^{-\\iota \\frac{2\\pi lk}{L}}\\), \\(\\omega_{mnl} = e^{\\iota \\frac{2\\pi mi}{M}}e^{\\iota \\frac{2\\pi nj}{N}}e^{\\iota \\frac{2\\pi lk}{L}}\\) and \\(\\iota = \\sqrt{-1}\\), \\(\\{m,n,l\\}\\) are the indices of the frequency coefficients. By using DFT, the differential operators are converted to algebraic operators in the frequency domain." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.249, + 0.826, + 0.278 + ], + "angle": 0, + "content": "Lemma 4. Suppose the discrete function is \\( u_{i,j,k} \\), with the discrete Fourier transformation Eqn. (16) and Eqn. (17), by using the central difference scheme, the first order differential operator is given by" + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.279, + 0.679, + 0.32 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {x} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {\\sin \\frac {2 \\pi m}{M}}{h _ {x}} \\omega_ {m n l}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.322, + 0.555, + 0.337 + ], + "angle": 0, + "content": "the second order differential operators are represented by" + }, + { + "type": "equation", + "bbox": [ + 0.281, + 0.339, + 0.705, + 0.38 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {x x} ^ {2} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} \\omega_ {m n l}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.283, + 0.383, + 0.715, + 0.425 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {x y} ^ {2} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} \\omega_ {m n l}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.825, + 0.463 + ], + "angle": 0, + "content": "The other differential operators \\(\\mathcal{D}_y, \\mathcal{D}_z, \\mathcal{D}_{yy}, \\mathcal{D}_{zz}, \\mathcal{D}_{yz}\\) and \\(\\mathcal{D}_{xz}\\) are also represented accordingly. The detailed proofs can be found in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.475, + 0.797, + 0.492 + ], + "angle": 0, + "content": "FFT Solver Suppose we want to solve an elliptic PDE with constant coefficients on \\(\\Omega \\subset \\mathbb{R}^3\\)" + }, + { + "type": "equation", + "bbox": [ + 0.301, + 0.493, + 0.825, + 0.535 + ], + "angle": 0, + "content": "\\[\nL [ u ] := \\left(\\sum_ {p = 0} ^ {2} \\sum_ {q = 0} ^ {2} a ^ {p, q} \\partial_ {p} \\partial_ {q} + \\sum_ {r = 0} ^ {2} b ^ {r} \\partial_ {r} + c\\right) u (x) = f (x), \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.538, + 0.827, + 0.58 + ], + "angle": 0, + "content": "with the periodic boundary condition, where \\(a^{p,q}, b^r, c\\) are constants, the matrix \\((a^{p,q})\\) is positive definite, namely the PDE is uniformly elliptic. By the discrete Fourier transformation \\(\\mathcal{F}\\), we convert the differential equation to an algebraic equation in the frequency domain," + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.582, + 0.69, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\sum_ {p = 0} ^ {2} \\sum_ {q = 0} ^ {2} a ^ {p, q} \\mathcal {F} (\\partial_ {p} \\partial_ {q} u) + \\sum_ {r = 0} ^ {2} b ^ {r} \\mathcal {F} (\\partial_ {r} u) + c \\mathcal {F} (u) = \\mathcal {F} (f)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.633, + 0.411, + 0.648 + ], + "angle": 0, + "content": "By applying Lemma 4 and defining" + }, + { + "type": "equation", + "bbox": [ + 0.284, + 0.65, + 0.825, + 0.782 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\lambda_ {m, n, l} = a ^ {0, 0} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} + a ^ {1, 1} \\frac {2 (\\cos \\frac {2 \\pi n}{N} - 1)}{h _ {y} ^ {2}} \\\\ + a ^ {2, 2} \\frac {2 (\\cos \\frac {2 \\pi l}{L} - 1)}{h _ {z} ^ {2}} - \\left(a ^ {0, 1} + a ^ {1, 0}\\right) \\frac {\\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} (19) \\\\ - \\left(a ^ {1, 2} + a ^ {2, 1}\\right) \\frac {\\sin \\frac {2 \\pi n}{N} \\sin \\frac {2 \\pi l}{L}}{h _ {y} h _ {z}} - \\left(a ^ {0, 2} + a ^ {2, 0}\\right) \\frac {\\sin \\frac {2 \\pi l}{L} \\sin \\frac {2 \\pi m}{M}}{h _ {z} h _ {x}} (19) \\\\ + b ^ {0} \\frac {\\sin \\frac {2 \\pi m}{M}}{h _ {x}} + b ^ {1} \\frac {\\sin \\frac {2 \\pi n}{N}}{h _ {y}} + b ^ {2} \\frac {\\sin \\frac {2 \\pi l}{L}}{h _ {z}} + c \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.784, + 0.531, + 0.798 + ], + "angle": 0, + "content": "We have the algebraic equations in frequency domain," + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.8, + 0.574, + 0.818 + ], + "angle": 0, + "content": "\\[\n\\hat {u} _ {m, n, l} \\lambda_ {m, n, l} = \\hat {f} _ {m, n, l}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.819, + 0.826, + 0.862 + ], + "angle": 0, + "content": "With \\(\\hat{u}_{m,n,l}\\)'s, we can easily obtain \\(u_{i,j,k}\\)'s by the Inverse Discrete Fourier Transform (IDFT), which means solving the constant coefficient elliptic equation. The algorithm is described in Alg. 2 in Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.826, + 0.926 + ], + "angle": 0, + "content": "The FFT for solving the constant coefficient elliptic PDE can be efficiently computed with GPUs. Moreover, the algorithm Alg. 2 solves the constant coefficient elliptic PDEs with a periodic boundary condition, which can be generalized to solving the same type of PDEs with Neumann boundary condition by extending the PDE to the flat torus \\(\\mathbb{T}^3\\) using mirror reflection Eqn. (14)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.421, + 0.119 + ], + "angle": 0, + "content": "4 EXPERIMENTAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.22 + ], + "angle": 0, + "content": "In this section, we firstly show that the our proposed FFT-OT algorithm converges linearly and runs \\(100 \\times\\) faster than the conventional convex geometry based solver Levy (2015), then demonstrate the method in two applications: 3D adaptive sampling and Volume Magnifier. All the algorithms are developed using generic C++ with CUDA Toolkit. All the experiments are conducted on a Windows laptop with Intel Core i7-7700HQ CPU with 16 GB memory and NVIDIA GeForce GTX 1060 Graphics Cards. More experiments can be found in Appendix D." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.236, + 0.548, + 0.25 + ], + "angle": 0, + "content": "4.1 RUNNING TIME AND CONVERGENCE ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.262, + 0.825, + 0.379 + ], + "angle": 0, + "content": "To show the performance of the proposed method, we experiment on the density functions defined by the Gaussian mixture models. To be specific, the domain is a cube \\(\\Omega = [0,1]^3\\), the 3-dimensional density function defined on \\(\\Omega\\) is set to be \\(f(x) = \\sum_{i=1}^{30} p_i \\mathcal{N}(\\mu_i, \\Sigma_i)\\), where \\(\\mathcal{N}(\\mu_i, \\Sigma_i)\\) represents Gaussian distribution with mean \\(\\mu_i\\) and variance \\(\\Sigma_i = \\mathrm{diag}(\\sigma_{i0}^2, \\sigma_{i1}^2, \\sigma_{i2}^2)\\). \\(\\mu_i \\in \\mathbb{R}^3\\) is uniformly sampled from \\([0,1]^3\\), \\(\\sigma_{ij}\\) is uniformly sampled from \\([0,0.5]\\), \\(p_i \\in \\mathbb{R}\\) is uniformly sampled from \\([0.2,1]\\) and normalized such that \\(\\int_{\\Omega} f(x) dx = 1\\). Thus the source distribution \\(\\mu\\) is a complicated Gaussian mixture distribution restricted on \\(\\Omega\\). Then by mirror reflection in Sec. 3.2, we obtain the complex density function which is defined on \\([-1,1]^3\\) and satisfies the periodic boundary condition." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.384, + 0.553, + 0.55 + ], + "angle": 0, + "content": "We directly use the FFT-OT algorithm Alg. 1 to solve the linearized Monge-Ampère equation. With the approximation error threshold \\(\\varepsilon = 1.0 \\times 10^{-6}\\) and the resolution \\(256 \\times 256 \\times 256\\), the running time for our FFT-OT algorithm with double precision on GPU is less than 175 seconds. The conventional convex geometry based algorithm for 3D optimal transportation Levy (2015) can neither handle such large data sets nor be implemented on GPUs. It can only compute OT map with resolution no greater than \\(100 \\times 100 \\times 100\\) on our system, which takes about 2700 seconds. When handling problem with \\(128 \\times 128 \\times 128\\) resolution, our FFT-OT consumes about" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.38, + 0.822, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.532, + 0.794, + 0.545 + ], + "angle": 0, + "content": "Figure 1: Convergence Analysis." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.551, + 0.747, + 0.566 + ], + "angle": 0, + "content": "20.3 seconds, which is \\(130 \\times\\) faster than the power diagram based method Levy (2015)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.572, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Fig. 1 shows the approximation error for the above Gaussian mixture density with respect to iterations, namely \\(\\log \\| f - \\rho_n\\| _2^2\\). Our algorithm does converge linearly and the result is consistent with the prediction Eqn. (13) in Thm. 3. Therefore, this experiment validates the theorem." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.631, + 0.395, + 0.644 + ], + "angle": 0, + "content": "4.2 3D ADAPTIVE SAMPLING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.825, + 0.77 + ], + "angle": 0, + "content": "Generating random samples matching a given density function plays an essential role in the applications like Monte-Carlo integration or stippling. Efficiently obtaining high quality samples is still an on-going research topic Bauer et al. (2015); Perrier et al. (2018). And optimal transportation has been successfully applied for generating high quality 2D samples de Goes et al. (2012); Nader & Guennebaud (2018). Most of the current research focuses on generating 2D samples fitting the given density function. Here we apply the proposed 3D FFT-OT method to generate high quality 3D samples according to the given complex density functions. To the best of our knowledge, it is the first work that uses OT to sample from 3D density functions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.775, + 0.827, + 0.903 + ], + "angle": 0, + "content": "Suppose the source probability distribution \\(d\\mu (x) = f(x)dx\\) is defined on \\(\\Omega = [0,1]^3\\) with \\(\\mu (\\Omega) = 1\\). The target distribution \\(d\\nu (y) = dy\\) is the uniform distribution. We use the FFT-OT algorithm Alg. 1 to compute the OT map \\(T:\\Omega \\to \\Omega ,T_{\\#}\\mu = \\nu\\). The domain is tessellated to a \\(256\\times 256\\times 256\\) grid. For each \\(x_{ijk},i,j,k\\in \\{0,1,\\ldots ,255\\}\\), the image \\(T(x_{ijk})\\) can be obtained. We use \\(\\{T(x_{ijk})\\}\\) as vertices to compute the Delaunay triangulation of \\(\\Omega\\). Then representing the OT map \\(T:(\\Omega ,\\mu)\\rightarrow (\\Omega ,\\nu)\\) as a piecewise linear map, the restriction of \\(T\\) on each tetrahedron is a linear map. Then the inverse OT map \\(T^{-1}:(\\Omega ,\\nu)\\to (\\Omega ,\\mu)\\) is also a piecewise linear map. Namely, given a grid point \\(y_{mnl}\\), we can find a tetrahedron containing it. Suppose the vertices of the tetrahedron are \\(\\{T(x_i),T(x_j),T(x_k),T(x_l)\\}\\), then \\(y_{mnl}\\) is computed as" + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.909, + 0.673, + 0.927 + ], + "angle": 0, + "content": "\\[\ny _ {m n l} = \\lambda_ {i} T (x _ {i}) + \\lambda_ {j} T (x _ {j}) + \\lambda_ {k} T (x _ {k}) + \\lambda_ {l} T (x _ {l}),\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.101, + 0.278, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.173, + 0.278, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.245, + 0.266, + 0.258 + ], + "angle": 0, + "content": "(a) Density" + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.101, + 0.386, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.173, + 0.385, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.299, + 0.245, + 0.379, + 0.258 + ], + "angle": 0, + "content": "(b) Rejection" + }, + { + "type": "image", + "bbox": [ + 0.4, + 0.101, + 0.493, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.4, + 0.173, + 0.492, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.423, + 0.245, + 0.469, + 0.258 + ], + "angle": 0, + "content": "(c) MH" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.101, + 0.599, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.173, + 0.599, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.526, + 0.245, + 0.579, + 0.257 + ], + "angle": 0, + "content": "(d) Slice" + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.102, + 0.706, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.173, + 0.704, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.627, + 0.245, + 0.692, + 0.257 + ], + "angle": 0, + "content": "(e) Ours-R" + }, + { + "type": "image", + "bbox": [ + 0.722, + 0.102, + 0.812, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.722, + 0.173, + 0.812, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.734, + 0.245, + 0.799, + 0.257 + ], + "angle": 0, + "content": "(f) Ours-G" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.27, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Figure 2: 3D density function sampling. (a) The density functions in a slice. The slices in each row come from two different density functions. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with \\( T^{-1} \\). (f) The sampling results by mapping the grid centers back with \\( T^{-1} \\). The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.403 + ], + "angle": 0, + "content": "where the non-negative barycenter coordinates satisfy \\(\\lambda_{i} + \\lambda_{j} + \\lambda_{k} + \\lambda_{l} = 1\\). Then the image of the inverse OT map is given by" + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.408, + 0.825, + 0.425 + ], + "angle": 0, + "content": "\\[\nT ^ {- 1} \\left(y _ {m n l}\\right) = \\lambda_ {i} x _ {i} + \\lambda_ {j} x _ {j} + \\lambda_ {k} x _ {k} + \\lambda_ {l} x _ {l}. \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.431, + 0.825, + 0.461 + ], + "angle": 0, + "content": "We generate random samples \\(\\{y_k\\}\\) according to the uniform distribution \\(\\nu\\) on \\(\\Omega\\), then their images \\(\\{T^{-1}(y_k)\\}\\) are the desired random samples following the distribution \\(\\mu\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.466, + 0.825, + 0.565 + ], + "angle": 0, + "content": "In our experiment, we use the same Gaussian mixture settings of the density function as Sec. 4.1. Fig. 2 visualizes the generated samples. We randomly pick the \\(k\\)-th slice along the \\(z\\)-direction from the discretized volume, draw the source density function on this slice, and use pixel intensity to represent the density in Fig. 2(a). (i) We uniformly generate \\(100k\\) random samples \\(\\{y_k\\} \\subset \\Omega\\), and obtain the desired random samples by applying the inverse OT map \\(\\{T^{-1}(y_k)\\}\\). (ii) We also set \\(\\{y_k\\}\\) as the grid centers of \\(\\Omega\\) and obtain the corresponding samples of the desired distribution \\(\\mu\\). The samples around the \\(k\\)-th slice of both sampling strategies are plotted in Fig. 2(e) and Fig. 2(f)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.57, + 0.827, + 0.738 + ], + "angle": 0, + "content": "By visual comparison, it is obvious that the distributions of Fig. 2(e) and Fig. 2(f) are consistent with the density function in Fig. 2(a). The consistency of the boundary of Fig. 2(e) and (f) and Fig. 2(a) also verifies the obliqueness boundary condition of the Monge-Ampère equation. To further show the performance of the proposed method, we compare it with the classical sampling methods, namely rejection sampling, the Metropolis-Hastings algorithm Bishop (2006) and the slice sampling Neal (2003), shown in Fig. 2(b), Fig. 2(c) and Fig. 2(d). To quantitatively compare the sampling results, we use the Chi-square goodness-of-fit test, which firstly groups the data and then computes the \\( L^2 \\) norm of the difference between the actual number of observations in each group and the expected number of observations. In our experiment, we set the group number to \\( 64 \\times 64 \\times 64 \\) and use 500K samples to make the comparison. The corresponding \\( L^2 \\) norm of each method is shown in the top-right of the corresponding figure. We can see that the both sampling strategies of our method give smaller scores than the classical ones." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.754, + 0.398, + 0.768 + ], + "angle": 0, + "content": "4.3 VOLUMETRIC MAGNIFIER" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.78, + 0.825, + 0.851 + ], + "angle": 0, + "content": "In reality, physical magnifiers can only magnify planar images. In medical image processing, it is highly desirable to magnify certain regions of the 3D MRIs or CT images. Our algorithm can address such requests with the user prescribed region of interest (ROI) and magnifying factor. Suppose the ROI is a symmetric region with the center \\((\\bar{x},\\bar{y},\\bar{z})\\in \\Omega\\) and the radius \\(\\sigma_x,\\sigma_y,\\sigma_z\\) in different directions. The density function \\(f\\) of the source measure \\(\\mu\\) is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.294, + 0.856, + 0.702, + 0.876 + ], + "angle": 0, + "content": "\\[\nf (x, y, z) = 0. 5 + 0. 5 e ^ {- ((x - \\bar {x}) ^ {2} / 2 \\sigma_ {x} ^ {2} + (y - \\bar {y}) ^ {2} / 2 \\sigma_ {y} ^ {2} + (z - \\bar {z}) ^ {2} / 2 \\sigma_ {z} ^ {2})}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We compute OT map \\( T: (\\Omega, \\mu) \\to (\\Omega, \\nu) \\), where \\( \\nu \\) is the uniform distribution. Similar to the method in 3D adaptive sampling, we compute the Delaunay triangulation of the images \\( \\{T(x_{ijk})\\} \\), then the OT map \\( T \\) is represented as a piecewise linear map. The inverse optimal transportation map" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.19, + 0.101, + 0.334, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.101, + 0.492, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.101, + 0.651, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.101, + 0.808, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.225, + 0.825, + 0.276 + ], + "angle": 0, + "content": "Figure 3: The volume magnifier of an aneurysm. The first column shows the original volumetric data, and the last three columns give the magnified data from the same viewpoints with different magnifying ratios. The yellow circle denotes the ROI/aneurysm. To obtain the results, we set \\(\\sigma = \\sigma_{x} = \\sigma_{y} = \\sigma_{z}\\), and they are 0.83, 0.75 and 0.5 respectively." + }, + { + "type": "image", + "bbox": [ + 0.19, + 0.284, + 0.333, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.284, + 0.491, + 0.394 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.284, + 0.651, + 0.394 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.284, + 0.808, + 0.394 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.396, + 0.333, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.396, + 0.491, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.396, + 0.651, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.396, + 0.808, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.519, + 0.825, + 0.56 + ], + "angle": 0, + "content": "Figure 4: The volume magnifier of the knee. The first row gives the original volumetric data with different ROIs denoted by the blue boxes from different viewpoints, and the second row shows the corresponding magnified results. In the experiments we set \\(\\sigma_{x} = \\sigma_{y} = \\sigma_{z} = 0.75\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.825, + 0.636 + ], + "angle": 0, + "content": "\\(T^{-1}:(\\Omega ,\\nu)\\to (\\Omega ,\\mu)\\) is also piecewise linear. For each grid point \\(y_{mnl}\\in \\Omega\\) we use Eqn. (20) to find its pre-image. Similarly, its corresponding intensity \\(I_{mnl}\\) is computed by linear interpolation. Then we obtain the new volumetric data \\(\\{I_{mnl}\\}\\) with the magnified ROI and visualize the result with Voreen Meyer-Spradow et al. (2009)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.642, + 0.827, + 0.768 + ], + "angle": 0, + "content": "Fig. 3 demonstrates our volumetric magnifier by magnifying an aneurysm on blood vessel Hansen & Johnson (2004). We choose the aneurysm region as the ROI. The first column gives the snapshot of the blood vessel, and the yellow circle denotes the location of the aneurysm. The last three columns show the magnified aneurysm with different magnifying ratio from the same viewpoints. Moreover, we show the magnified volumetric knee from different viewpoints with different ROIs denoted by the blue boxes in Fig. 4. Our method only magnifies the ROIs and keeps other regions unchanged. Compared with the traditional method requiring tedious zoom in/out, our method only magnifies the ROI region and keeps the whole subject in the field of view, which enables doctors to visualize the overall anatomy while scrutinize detailed anatomical structure at the same time." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.792, + 0.32, + 0.807 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In this paper, we propose the FFT-OT method to solve the optimal transportation problem. According to the Brenier theory, under the quadratic distance cost, finding the solution to the OT problem is equivalent to solving the Monge-Ampère equation, which can be linearized as a sequence of variant coefficient elliptic PDEs. Later, the variant coefficient PDEs are approximated by constant coefficient PDEs and solved by Fast Fourier Transformation. We also prove that the proposed method converges linearly. Experiments on volumetric data show that the FFT-OT can be used to sample from complex 3D density functions and magnify the volumetric data in medical images." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.362, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENT" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.133, + 0.828, + 0.178 + ], + "angle": 0, + "content": "This research was partially supported by National Key R&D Program of China 2021YFA1003003 and NSFC No. 61936002, T2225012. This work was also partially supported by NIH 3R01LM012434-05S1, 1R21EB029733-01A1, NSF FAIN-2115095 and NSF CMMI-1762287." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.197, + 0.289, + 0.213 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.221, + 0.826, + 0.266 + ], + "angle": 0, + "content": "Mokhtar Z. Alaya, Maxime Berar, Gilles Gasso, and Alain Rakotomamonjy. Screening sinkhorn algorithm for regularized optimal transport. In Advances in Neural Information Processing Systems 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.273, + 0.825, + 0.306 + ], + "angle": 0, + "content": "Jose I. Aliaga, Ernesto Dufrechou, Pablo Ezzatti, and Enrique S. Quintana-Orti. An efficient gpu version of the preconditioned gmres method. The Journal of Supercomputing, 75, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.312, + 0.825, + 0.357 + ], + "angle": 0, + "content": "Jason Altschuler, Jonathan Niles-Weed, and Philippe Rigollet. Near-linear time approximation algorithms for optimal transport via sinkhorn iteration. In Advances in Neural Information Processing Systems 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.364, + 0.826, + 0.438 + ], + "angle": 0, + "content": "Jason Altschuler, Francis Bach, Alessandro Rudi, and Jonathan Niles-Weed. Massively scalable sinkhorn distances via the nystrom method. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper/2019/file/f55cadb97eaff2ba1980e001b0bd9842-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.444, + 0.826, + 0.489 + ], + "angle": 0, + "content": "Dongsheng An, Yang Guo, Na Lei, Zhongxuan Luo, Shing-Tung Yau, and Xianfeng Gu. Ae-ot: A new generative model based on extended semi-discrete optimal transport. In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.497, + 0.825, + 0.529 + ], + "angle": 0, + "content": "Dongsheng An, Na Lei, and Xianfeng Gu. Efficient optimal transport algorithm by accelerated gradient descent. In The Thirty-Sixth AAAI Conference on Artificial Intelligence (AAAI), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.535, + 0.825, + 0.566 + ], + "angle": 0, + "content": "Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In ICML, pp. 214-223, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.574, + 0.825, + 0.605 + ], + "angle": 0, + "content": "F. Aurenhammer, F. Hoffmann, and B. Aronov. Minkowski-type theorems and least-squares clustering. Algorithmica, 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.612, + 0.825, + 0.643 + ], + "angle": 0, + "content": "Martin Bauer, Sarang Joshi, and Klas Modin. Diffeomorphic density matching by optimal information transport. SIAM Journal on Imaging Sciences, 8, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.65, + 0.825, + 0.693 + ], + "angle": 0, + "content": "J.D. Benamou, Y. Brenier, and K. Guittet. The Monge-Kantorovitch mass transfer and its computational fluid mechanics formulation. International Journal for Numerical Methods in Fluids, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.702, + 0.825, + 0.734 + ], + "angle": 0, + "content": "Jean-David Benamou, Brittany D. Froese, and Adam M. Oberman. Numerical solution of the optimal transportation problem using the monge-ampère equation. J. Comput. Phys, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.741, + 0.731, + 0.758 + ], + "angle": 0, + "content": "Christopher M. Bishop. Pattern Recognition and Machine Learning. Springer, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.766, + 0.825, + 0.797 + ], + "angle": 0, + "content": "Y. Brenier. Polar decomposition and increasing rearrangement of vector fields. C. R. Acad. Sci. Paris Sr. I Math., 305(19):805-808, 1987." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.804, + 0.825, + 0.836 + ], + "angle": 0, + "content": "Y. Brenier. Polar factorization and monotone rearrangement of vector-valued functions. Comm. Pure Appl. Math., 44(4):375-417, 1991." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.825, + 0.886 + ], + "angle": 0, + "content": "Dario Cordero-Erausquin. Sur le transport de mesures periodiques monotone maps preserving periodic measures. Comptes Rendus de l'Académie des Sciences - Series I - Mathematics, 329: 199-202, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.927 + ], + "angle": 0, + "content": "N. Courty, R. Flamary, D. Tuia, and A. Rakotomamonjy. Optimal transport for domain adaptation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 39(9):1853-1865, 2017." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.221, + 0.826, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transportation distances. In International Conference on Neural Information Processing Systems, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.825, + 0.171 + ], + "angle": 0, + "content": "F. de Goes, K. Breeden, V. Ostromoukhov, and M. Desbrun. Blue noise through optimal transport. ACM Trans. Graph. (SIGGRAPH Asia), 31, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.178, + 0.825, + 0.22 + ], + "angle": 0, + "content": "Philippe Delanoë. Classical solvability in dimension two of the second boundary-value problem associated with the Monge-Ampère operator. Annales de l'I.H.P. Analyse non linéaire, 8(5): 443-457, 1991." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.228, + 0.825, + 0.272 + ], + "angle": 0, + "content": "Pavel Dvurechensky, Alexander Gasnikov, and Alexey Kroshnin. Computational optimal transport: Complexity by accelerated gradient descent is better than by sinkhorn's algorithm. In Proceedings of the 35th International Conference on Machine Learning. PMLR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.279, + 0.825, + 0.309 + ], + "angle": 0, + "content": "Suli Endre. Lecture Notes on Finite Element Methods for Partial Differential Equations. University of Oxford, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.316, + 0.825, + 0.36 + ], + "angle": 0, + "content": "David Xianfeng Gu, Feng Luo, Jian Sun, and Shing-Tung Yau. Variational principles for minkowski type problems, discrete optimal transport, and discrete monge-ampère equations. *Asian Journal of Mathematics*, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.367, + 0.77, + 0.383 + ], + "angle": 0, + "content": "Charles D. Hansen and Chris R. Johnson. Visualization Handbook. Academic Press, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.39, + 0.825, + 0.421 + ], + "angle": 0, + "content": "Jun Kitagawa, Quentin Mérigot, and Boris Thibert. Convergence of a newton algorithm for semi-discrete optimal transport. Journal of the European Mathematical Society, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.428, + 0.827, + 0.458 + ], + "angle": 0, + "content": "Na Lei and Xianfeng Gu. Fft-ot: A fast algorithm for optimal transportation. In Proceedings of International Conference on Computer Vision (ICCV), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.465, + 0.825, + 0.494 + ], + "angle": 0, + "content": "Bruno Levy. A numerical algorithm for 12 semi-discrete optimal transport in 3d. ESAIM: M2AN, 49 (6):1693-1715, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.502, + 0.825, + 0.532 + ], + "angle": 0, + "content": "Gregorire Loeper and Francesca Rapetti. Numerical solution of the monge-ampère equation by a newton's algorithm. C. R. Acad. Paris, pp. 319-324, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.538, + 0.825, + 0.568 + ], + "angle": 0, + "content": "Robert J. McCann. A convexityprincipleforinteractinggases. Advances in mathematics, 128:153-179, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.576, + 0.81, + 0.592 + ], + "angle": 0, + "content": "Quentin Merigot. A multiscale approach to optimal transport. Computer Graphics Forum., 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.599, + 0.825, + 0.642 + ], + "angle": 0, + "content": "Jennis Meyer-Spradow, Timo Ropinski, Jörg Mensmann, and Klaus H. Hinrichs. Voreen: A rapid-prototyping environment for ray-casting-based volume visualizations. IEEE Computer Graphics and Applications, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.65, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Georges Nader and Gael Guennebaud. Instant transport maps on 2d grids. ACM Trans. Graph., 37 (6), 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.687, + 0.603, + 0.703 + ], + "angle": 0, + "content": "Radford M. Neal. Slice sampling. The Annals of Statistics, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.71, + 0.825, + 0.74 + ], + "angle": 0, + "content": "Nicolas Papadakis, Gabriel Peyre, and Edouard Oudet. Optimal transport with proximal splitting. SIAM Journal on Imaging Sciences, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.747, + 0.825, + 0.777 + ], + "angle": 0, + "content": "Hélène Perrier, David Coeurjolly, Feng Xie, Matt Pharr, Pat Hanrahan, and VictorOstromoukhov. Sequences with low-discrepancy blue-noise 2-d projections. Computer Graphics Forum, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.784, + 0.825, + 0.814 + ], + "angle": 0, + "content": "Gabriel Peyre and Marco Cuturi. Computational optimal transport. Found. Trends Mach. Learn., 11 (5-6):355-607, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.821, + 0.825, + 0.85 + ], + "angle": 0, + "content": "Yousef Saad. Iterative Methods For Sparse Linear Systems. Society of Industrial and Applied Mathematics, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.858, + 0.748, + 0.875 + ], + "angle": 0, + "content": "Filippo Santambrogio. Optimal Transport for Applied Mathematicians. Springer, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Louis-Philippe Saumier, Martial Agueh, and Boualem Khouider. An efficient numerical algorithm for the \\(l^2\\) optimal transport problem with periodic densities. IMA Journal of Applied Mathematics, 80:135-157, 2013." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Yuliy Schwartzburg, Romain Testuz, Andrea Tagliasacchi, and Mark Pauly. High-contrast computational caustic design. ACM Trans. Graph., 33(4), July 2014. ISSN 0730-0301. doi: 10.1145/2601097.2601200. URL https://doi.org/10.1145/2601097.2601200." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.825, + 0.199 + ], + "angle": 0, + "content": "Justin Solomon, Fernando de Goes, Gabriel PeyrÅ, Marco Cuturi, Adrian Butscher, Andy Nguyen, Tao Du, and Leonidas Guibas. Convolutional Wasserstein distances: Efficient optimal transportation on geometric domains. ACM Transactions on Graphics (TOG), 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.825, + 0.237 + ], + "angle": 0, + "content": "Kehua Su, Wei Chen, Na Lei, Junwei Zhang, Kun Qian, and Xianfeng Gu. Volume preserving mesh parameterization based on optimal mass transportation. Comput. Aided Des., 82:42-56, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.244, + 0.825, + 0.274 + ], + "angle": 0, + "content": "Ilya Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Schoelkopf. Wasserstein auto-encoders. In ICLR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.282, + 0.583, + 0.298 + ], + "angle": 0, + "content": "Cédric Villani. Topics in Optimal transportation. AMS, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.306, + 0.825, + 0.334 + ], + "angle": 0, + "content": "Cédric Villani. Optimal transport: old and new, volume 338. Springer Science & Business Media, 2008." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.826, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.103, + 0.349, + 0.117 + ], + "angle": 0, + "content": "A RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.139, + 0.824, + 0.166 + ], + "angle": 0, + "content": "There is a huge literature about optimal transportation. Here we will only briefly review the most related works. For detailed reviews, we refer readers to Santambrogio (2015); Peyre & Cuturi (2019)." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.173, + 0.824, + 0.354 + ], + "angle": 0, + "content": "The first type of algorithms is based on the Kantorovich theory. When both the input and output domains are Dirac masses, the Kantorovich problem can be treated as a standard linear programming (LP) task. In order to tackle large data sets, Cuturi (2013) adds an entropic regularizer to the original LP problem and the regularized problem can be quickly solved by the Sinkhorn algorithm. Recently, various algorithms have been proposed to further accelerate the computation by improving the efficiency of matrix-vector multiplications, including the Greenkhorn Altschuler et al. (2017), Sreenkhorn Alaya et al. (2019) and the NYS-SINK Altschuler et al. (2019) algorithms. Dvurechensky et al. Dvurechensky et al. (2018) also propose the adaptive primal-dual accelerated gradient descent algorithm (APDAGD) to solve the discrete OT problem. An et al. An et al. (2022) compute the approximate OT plan by smoothing the dual Kantorovich problem and solving it with the FISTA method. This kind of methods have limitations: (i) they only give transport plans and cannot produce the bijective transportation maps; and (ii) the computational complexity is too high to apply them in the scenarios with huge number of samples." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.361, + 0.824, + 0.514 + ], + "angle": 0, + "content": "The second type of algorithms is based on the Brenier theory Brenier (1987) and its intrinsic connection with convex geometry Gu et al. (2016). The semi-discrete OT algorithm proposed in Aurenhammer et al. (1998) finds the transport map between a continuous distribution and a discrete measure via a variational approach by dynamically constructing the power diagrams. Its efficiency can be further improved Levy (2015); Merigot (2011) by the multi-resolution strategy. The algorithms proposed in Kitagawa et al. (2019); Su et al. (2017) also improve the efficiency by applying the Newton's method. When both the source and target measures are continuous, some interpolation methods are necessary Schwartzburg et al. (2014). The major drawback of this type of algorithms is the high computational complexity of constructing the dynamic power diagram, which prevents them from handling high dimensional tasks. For example, for the 3D OT problems, these algorithms usually run very slow." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.521, + 0.824, + 0.59 + ], + "angle": 0, + "content": "The third type of algorithms is based on computational fluid dynamics Benamou et al. (2002); Papadakis et al. (2014). These methods aim at finding a special temporal-spacial flow field that transports the initial source density to the target density with the minimal total kinetic energy. Then the diffeomorphism induced by the flow gives the optimal transport map under the quadratic Euclidean distance cost. However, this kind of algorithms are difficult to extend to high dimensional space." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.598, + 0.824, + 0.875 + ], + "angle": 0, + "content": "The fourth type of algorithms directly solve the Monge-Ampère equation using numerical methods. Loeper and Rapetti Loeper & Rapetti (2005) propose to solve the linearized Monge-Ampère equation defined on a flat torus in each iteration. Its corresponding variant coefficient elliptic PDE is converted to a positive definite linear system using the finite-difference scheme, which can be solved by the BiCG algorithm Endre (2020). Benamou et al. Benamou et al. (2014) propose to solve the linearized Monge-Ampère on more general domains using Newton's method. Nader and Guennebaud Nader & Guennebaud (2018) apply the similar discretization strategy and solve the Monge-Ampère equation by conjugate gradient method. Saumier et al. Saumier et al. (2013) propose to solve the linearized Monge-Ampère equation using FFT. In each iteration the elliptic PDE with spacial and temporal variant coefficients is converted to a group of linear equations in the frequency domain, which is solved by the GMRES algorithm. Although the GMRES algorithm can be implemented on GPUs Aliaga et al. (2019), there is no available open source code. The work in Saumier et al. (2013) focuses on periodic boundary condition, but this our proposed work focuses on general second boundary condition; the work in Saumier et al. (2013) concerns planar OT maps, ours emphasizes on volumetric OT maps, which has higher complexity. The work in Saumier et al. (2013) can handle more general target measures, the proposed work currently only deals with the Lebesgue target measure. Nevertheless, the current work can be directly generalized to handle general target measures as well. Lei and Gu Lei & Gu (2021) use the fixed point method to compute the 2-dimensional OT problem based on FFT, but it cannot be extended to solve the 3-dimensional problems." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.882, + 0.824, + 0.923 + ], + "angle": 0, + "content": "In this work, we combine the idea of linearizing the Monge-Ampère equation Loeper & Rapetti (2005) and the idea of FFT Saumier et al. (2013). The key novelty of our proposed method is to use the mean linearized Monge-Ampère operator Eqn. (12) to replace the conventional linearized" + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "Monge-Ampere operator Eqn. (10). This replacement allows the algorithm to be implemented on GPUs and makes the algorithm hundreds of times faster. In the following, we compute the 3-dimensional optimal transport problem by applying the proposed algorithm. Our method also runs more than \\(100 \\times\\) faster than the convex geometry based method Levy (2015)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.18, + 0.373, + 0.195 + ], + "angle": 0, + "content": "B APPENDIX THEORY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.21, + 0.825, + 0.253 + ], + "angle": 0, + "content": "In the section, we give the detailed proofs for several lemmas and theorems. Some of them are well known in the Monge-Ampère PDE field and the applied mathematics field, we include them for the completeness." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.269, + 0.744, + 0.298 + ], + "angle": 0, + "content": "B.1 EXISTENCE OF THE SOLUTION TO THE TIME DEPENDENT MONGE-AMPERE EQNUATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.825, + 0.351 + ], + "angle": 0, + "content": "Let \\(\\mathbb{T}^n = \\mathbb{R}^n / \\mathbb{Z}^n\\) be the \\(n\\)-dimensional flat torus. Below we sometimes identify it with \\(\\Omega = [0,1]^n\\) and assume all data are periodic. The existence and regularity of solutions to the Monge-Ampère equation are given by the following theorem," + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.353, + 0.826, + 0.396 + ], + "angle": 0, + "content": "Theorem 5. Suppose a positive density function \\( f: \\Omega \\to \\mathbb{R} \\) is defined on \\( \\Omega = [0,1]^n \\), such that \\( \\int_{\\Omega} f(x) dx = 1 \\), and \\( f \\in C^{\\alpha}(\\Omega) \\), then the solution \\( u: \\Omega \\times [0,1] \\) to the time-dependent Monge-Ampère equation" + }, + { + "type": "equation", + "bbox": [ + 0.316, + 0.395, + 0.823, + 0.411 + ], + "angle": 0, + "content": "\\[\n\\det D _ {x} ^ {2} u (x, t) = (1 - t) + t f (x), \\quad \\nabla_ {x} u (x, t) (\\Omega) = \\Omega \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.412, + 0.792, + 0.427 + ], + "angle": 0, + "content": "exists and is unique up to a constant. Furthermore, there exist constants \\(0 < \\lambda < \\Lambda\\), such that" + }, + { + "type": "equation", + "bbox": [ + 0.244, + 0.43, + 0.825, + 0.471 + ], + "angle": 0, + "content": "\\[\n\\lambda \\sum_ {p = 1} ^ {n} \\xi_ {p} ^ {2} \\leq \\sum_ {p, q = 1} ^ {n} u ^ {p q} (x, t) \\xi_ {p} \\xi_ {q} \\leq \\Lambda \\sum_ {p = 1} ^ {n} \\xi_ {p} ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\forall (x, t) \\in \\Omega \\times [ 0, 1 ]. \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.481, + 0.604, + 0.496 + ], + "angle": 0, + "content": "We refer readers to Cordero-Erasquin (1999) for detailed proof." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.511, + 0.825, + 0.553 + ], + "angle": 0, + "content": "Weak Solution In practice, we compute the weak solution of the linearized Monge-Ampère Eqn. (6) using numerical methods. We first rewrite the differential operator to a divergence form, then define a bi-linear form." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.559, + 0.736, + 0.575 + ], + "angle": 0, + "content": "Since \\((u^{pq}(x,t))\\) is the adjoint matrix of \\(D_x^2 u(x,t)\\), by direct computation, we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.293, + 0.579, + 0.825, + 0.619 + ], + "angle": 0, + "content": "\\[\n\\sum_ {p = 1} ^ {n} \\partial_ {p} u ^ {p q} (x, t) = 0, \\quad \\forall (x, t) \\in \\Omega \\times [ 0, 1 ], \\quad \\forall q = 1, \\dots , n. \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.622, + 0.531, + 0.637 + ], + "angle": 0, + "content": "so Eqn. (6) can be converted into the divergence form:" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.64, + 0.78, + 0.683 + ], + "angle": 0, + "content": "\\[\n\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) = \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {p} \\partial_ {q} v + \\sum_ {q = 1} ^ {n} \\left(\\sum_ {p = 1} ^ {n} \\partial_ {p} u ^ {p q}\\right) \\partial_ {q} v = \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {p} \\partial_ {q} v,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.687, + 0.241, + 0.699 + ], + "angle": 0, + "content": "we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.344, + 0.696, + 0.825, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} (x, t) \\partial_ {q} v (x, t)\\right) = f (x) - 1. \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.739, + 0.406, + 0.753 + ], + "angle": 0, + "content": "with Neumann boundary condition" + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.757, + 0.825, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial v (x , t)}{\\partial \\mathbf {n}} = 0, \\quad \\forall (x, t) \\in \\partial \\Omega \\times [ 0, 1 ]. \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.79, + 0.576, + 0.805 + ], + "angle": 0, + "content": "For any \\(w\\in H^{1}(\\Omega)\\), by differentiation of product, we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.808, + 0.767, + 0.851 + ], + "angle": 0, + "content": "\\[\n\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w + \\sum_ {p = 1} ^ {n} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) \\partial_ {p} w = \\sum_ {p = 1} ^ {n} \\partial_ {p} \\left[ \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.854, + 0.825, + 0.881 + ], + "angle": 0, + "content": "by integrating both sides, and from the fact that \\( v \\) satisfies the Neumann boundary condition, we deduce" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.883, + 0.825, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega} \\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w + \\int_ {\\Omega} \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {q} v \\partial_ {p} w = \\int_ {\\partial \\Omega} \\sum_ {p = 1} ^ {n} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w = 0. \\tag {26}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "For any fixed time \\(t \\in [0,1]\\), by the divergence form, we can construct a bilinear form \\(a: H^1(\\Omega) \\times H^1(\\Omega)\\) and a linear form \\(l: H^1(\\Omega) \\to \\mathbb{R}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.142, + 0.825, + 0.183 + ], + "angle": 0, + "content": "\\[\na (v, w) = \\sum_ {p, q = 1} ^ {n} \\int_ {\\Omega} u ^ {p q} \\partial_ {p} v \\partial_ {q} w, \\quad l (w) = - \\int_ {\\Omega} (f - 1) w d x. \\tag {27}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.191, + 0.598, + 0.206 + ], + "angle": 0, + "content": "A weak solution to Eqn. (24) is a function \\( v \\in H^{1}(\\Omega) \\), such that" + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.214, + 0.825, + 0.232 + ], + "angle": 0, + "content": "\\[\na (v, w) = l (w), \\quad \\forall w \\in H ^ {1} (\\Omega). \\tag {28}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.246, + 0.827, + 0.274 + ], + "angle": 0, + "content": "By the uniform ellipticity Eqn. (22), the Lax-Milgram theorem Endre (2020) shows the existence of the weak solution." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.292, + 0.682, + 0.307 + ], + "angle": 0, + "content": "B.2 DISCRETE LINEARIZED MONGE-AMPERE EQUATION SOLVABILITY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.318, + 0.825, + 0.39 + ], + "angle": 0, + "content": "Galerkin Method In practice, we construct a triangulation \\(\\mathcal{T}\\) of \\(\\Omega\\), such that the ratio between the diameter and inscribe-sphere radius of each simplex is bounded, and variation of the diameters of all the simplexes is small. We call such kind of \\(\\mathcal{T}\\) a quasi-uniform triangulation, and denote the largest diameter as \\(h\\). For each vertex \\(v_{i} \\in \\mathcal{T}\\), we construct a piecewise linear base function \\(\\varphi_{i}\\), such that \\(\\varphi_{i}\\) is linear on each triangle, \\(\\varphi_{i}(v_{j})\\) is \\(\\delta_{ij}\\). We define a finite dimensional subspace \\(V_{h} \\subset H^{1}(\\Omega)\\)," + }, + { + "type": "equation", + "bbox": [ + 0.353, + 0.397, + 0.644, + 0.44 + ], + "angle": 0, + "content": "\\[\nV _ {h} := \\left\\{v _ {h} (x) := \\sum_ {v _ {i} \\in \\mathcal {T}} \\lambda_ {i} \\varphi_ {i} (x), \\lambda_ {i} \\in \\mathbb {R} \\right\\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.448, + 0.825, + 0.517 + ], + "angle": 0, + "content": "Given a function \\( u \\in H^{1}(\\Omega) \\), we use \\( u_{h} \\in V_{h} \\) to denote its approximation in \\( V_{h} \\). Furthermore, \\( u_{h} = \\sum_{i} \\lambda_{i} \\varphi_{i} \\), we also use \\( u_{h} \\) to represent the coefficient vector \\( (\\lambda_1, \\lambda_2, \\dots, \\lambda_k)^T \\) depending on the context. The weak solution Eqn. (28) to the Monge-Ampère equation (6) is equivalent to find a \\( v \\in H^{1}(\\Omega) \\), such that \\( a(v, w) = l(w) \\) for all \\( w \\in H^{1}(\\Omega) \\). In discrete cases, we want to find \\( v_{h} \\in V_{h} \\), such that" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.518, + 0.825, + 0.535 + ], + "angle": 0, + "content": "\\[\na \\left(v _ {h}, w _ {h}\\right) = l \\left(w _ {h}\\right), \\quad \\forall w _ {h} \\in V _ {h}. \\tag {29}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.538, + 0.461, + 0.553 + ], + "angle": 0, + "content": "Eqn. (29) is equivalent to the linear system," + }, + { + "type": "equation", + "bbox": [ + 0.247, + 0.56, + 0.826, + 0.63 + ], + "angle": 0, + "content": "\\[\n\\left( \\begin{array}{c c c c} a \\left(\\varphi_ {1}, \\varphi_ {1}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {1}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {1}\\right) \\\\ a \\left(\\varphi_ {1}, \\varphi_ {2}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {2}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {2}\\right) \\\\ \\vdots & \\vdots & & \\vdots \\\\ a \\left(\\varphi_ {1}, \\varphi_ {N}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {N}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {N}\\right) \\end{array} \\right) \\left( \\begin{array}{c} \\lambda_ {1} \\\\ \\lambda_ {2} \\\\ \\vdots \\\\ \\lambda_ {N} \\end{array} \\right) = \\left( \\begin{array}{c} l \\left(\\varphi_ {1}\\right) \\\\ l \\left(\\varphi_ {2}\\right) \\\\ \\vdots \\\\ l \\left(\\varphi_ {N}\\right) \\end{array} \\right) \\tag {30}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.825, + 0.686 + ], + "angle": 0, + "content": "From the weak solution to the linearized Monge-Ampère equation (10), we obtain the linear system Eqn. (30). We denote the stiffness matrix \\(A = (a(\\varphi_i, \\varphi_j))\\). By the uniform ellipticity Eqn. (22), and \\(V_h \\subset H^1(\\Omega)\\)" + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.687, + 0.576, + 0.706 + ], + "angle": 0, + "content": "\\[\na (v, v) \\geq \\lambda \\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.71, + 0.469, + 0.728 + ], + "angle": 0, + "content": "Assume \\(\\int_{\\Omega} v dx = 0\\), by Poincaré inequality," + }, + { + "type": "equation", + "bbox": [ + 0.298, + 0.735, + 0.7, + 0.767 + ], + "angle": 0, + "content": "\\[\n\\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2} \\geq C _ {1} (\\Omega) \\| v \\| _ {L} ^ {2} (\\Omega), \\quad \\forall v \\in H ^ {1} (\\Omega), \\int_ {\\Omega} v d x = 0,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.774, + 0.756, + 0.789 + ], + "angle": 0, + "content": "where the constant \\(C_1(\\Omega)\\) depends on \\(\\Omega\\). Combine the above two inequalities, we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.797, + 0.826, + 0.83 + ], + "angle": 0, + "content": "\\[\na (v, v) \\geq c \\| v \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v \\in H ^ {1} (\\Omega), \\int_ {\\Omega} v d x = 0. \\tag {31}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.837, + 0.589, + 0.853 + ], + "angle": 0, + "content": "Similarly, By the uniform ellipticity Eqn. 22, and \\(V_{h}\\subset H^{1}(\\Omega)\\)" + }, + { + "type": "equation", + "bbox": [ + 0.419, + 0.86, + 0.577, + 0.88 + ], + "angle": 0, + "content": "\\[\na (v, v) \\leq \\Lambda \\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.887, + 0.814, + 0.902 + ], + "angle": 0, + "content": "For linear finite element and quasi-uniform triangulation, we have the inverse Poincaré inequality," + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.908, + 0.604, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\nabla v _ {h} \\right\\| _ {L ^ {2}} ^ {2} \\leq C _ {2} (\\Omega) h ^ {- 1} \\left\\| v _ {h} \\right\\| _ {L ^ {2}} ^ {2}.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.755, + 0.12 + ], + "angle": 0, + "content": "where \\( h \\) is the diameter of each element. Combine the above two inequalities, we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.126, + 0.825, + 0.144 + ], + "angle": 0, + "content": "\\[\na \\left(v _ {h}, v _ {h}\\right) \\leq C \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v _ {h} \\in V _ {h}. \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.151, + 0.608, + 0.167 + ], + "angle": 0, + "content": "By combining the inequalities Eqn. (31) and Eqn. (32), we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.173, + 0.826, + 0.205 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{C _ {3}} \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} \\leq a (v _ {h}, v _ {h}) \\leq C _ {3} \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v _ {h} \\in V _ {h}, \\int_ {\\Omega} v _ {h} = 0, \\tag {33}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.211, + 0.571, + 0.229 + ], + "angle": 0, + "content": "where \\(C_3 > 1\\) is a constant. Suppose \\(v_{h} = \\sum_{i = 1}^{n}\\xi_{i}\\varphi_{i}\\), then" + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.236, + 0.71, + 0.275 + ], + "angle": 0, + "content": "\\[\n\\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} = \\int_ {\\Omega} v _ {h} ^ {2} d x = \\sum_ {i, j = 1} ^ {n} \\xi_ {i} \\xi_ {j} \\int_ {\\Omega} \\varphi_ {i} (x) \\varphi_ {j} (x) d x = \\xi^ {T} \\Phi \\xi ,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.284, + 0.681, + 0.302 + ], + "angle": 0, + "content": "where \\(\\xi = (\\xi_{i})\\) and the matrix \\(\\Phi = \\left(\\int_{\\Omega}\\varphi_{i}\\varphi_{j}\\right)\\) is positive definite. Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.308, + 0.826, + 0.339 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} \\Phi \\xi < C _ {4} \\| \\xi \\| ^ {2}. \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.346, + 0.698, + 0.363 + ], + "angle": 0, + "content": "By \\(a(v_h,v_h) = \\xi^T A\\xi\\), combing inequalities Eqn. (33) and Eqn. (34), we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.37, + 0.826, + 0.408 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{C _ {3} C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} A \\xi \\leq C _ {3} C _ {4} \\| \\xi \\| ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\sum_ {i = 1} ^ {n} \\xi_ {i} = 0, \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.415, + 0.518, + 0.431 + ], + "angle": 0, + "content": "where \\(C_3C_4 > 1\\). This proves the following lemma," + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.434, + 0.827, + 0.493 + ], + "angle": 0, + "content": "Lemma 6. By using Galerkin method using linear elements to numerically approximate the weak solution Eqn. (28) to the linearized Monge-Ampère Eqn. (6), if the uniform ellipticity Eqn. (22) holds, and the triangulation \\(\\mathcal{T}\\) is quasi-uniform, then the stiffness matrix of the linear system Eqn. (30) is positive definite on the space \\(\\sum_{i=1}^{n} \\xi_i = 0\\)," + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.499, + 0.826, + 0.539 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{C _ {3} C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} A \\xi \\leq C _ {3} C _ {4} \\| \\xi \\| ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\sum_ {i = 1} ^ {n} \\xi_ {i} = 0, \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.544, + 0.294, + 0.56 + ], + "angle": 0, + "content": "where \\(C_3C_4 > 1\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.57, + 0.718, + 0.587 + ], + "angle": 0, + "content": "Since the uniform ellipticity Eqn. (22) holds for any time \\(t \\in [0,1]\\), then we obtain" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.589, + 0.825, + 0.634 + ], + "angle": 0, + "content": "Corollary 7. By using Galerkin method with linear elements on quasi-uniform triangulations, the linearized Monge-Ampère equation in the continuity method Eqn. (6) always has a solution \\( v_h \\in V_h \\) for any \\( t \\in [0,1] \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.825, + 0.673 + ], + "angle": 0, + "content": "Please note that the central differential scheme can be treated as Galerkin's method on a special uniform triangulation. Therefore, the above estimates still hold." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.689, + 0.371, + 0.703 + ], + "angle": 0, + "content": "B.3 CONVERGENCE RATE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.714, + 0.827, + 0.744 + ], + "angle": 0, + "content": "Theorem 8 (main). Given a domain \\(\\Omega \\subset \\mathbb{R}^n\\), which is a canonical cuboid \\(\\Omega = [-1,1]^n\\), and a positive density function \\(f:\\Omega \\to \\mathbb{R}\\) with the balance condition" + }, + { + "type": "equation", + "bbox": [ + 0.416, + 0.75, + 0.581, + 0.784 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\Omega} f (x) d x = \\int_ {\\Omega} 1 \\cdot d x,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.79, + 0.825, + 0.822 + ], + "angle": 0, + "content": "suppose the mirror reflection extension Eqn. (14) of \\( f \\) to the flat torus \\( \\tilde{f} : \\mathbb{T}^n \\to \\mathbb{R} \\) is \\( C^\\alpha \\), \\( \\alpha \\in (0,1) \\), then Monge-Ampère equation," + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.826, + 0.62, + 0.844 + ], + "angle": 0, + "content": "\\[\nd e t D ^ {2} u (x) = f (x), \\quad \\nabla u (\\Omega) = \\Omega\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.85, + 0.826, + 0.88 + ], + "angle": 0, + "content": "can be solved using FFT-OT Algorithm Alg. (1). In particular, one can choose the step length parameter \\(\\tau\\), such that there is a constant \\(0 < \\gamma < 1\\), the approximation error satisfies" + }, + { + "type": "equation", + "bbox": [ + 0.425, + 0.886, + 0.574, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\left\\| f - \\rho_ {k + 1} \\right\\| ^ {2} < C \\gamma^ {k},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.91, + 0.518, + 0.926 + ], + "angle": 0, + "content": "namely the algorithm has a linear convergence rate." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.737, + 0.121 + ], + "angle": 0, + "content": "Proof. Suppose at the \\(k + 1\\)-th iteration, \\(\\rho_{k + 1} = \\operatorname*{det}(I + D^2 u_{k + 1})\\), \\(\\| v_k\\| \\sim O(\\tau^{-1})\\)" + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.127, + 0.697, + 0.202 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} f - \\rho_ {k + 1} = f - \\det (I + \\mathcal {D} ^ {2} u _ {k} + \\mathcal {D} ^ {2} v _ {k}) \\\\ = f - \\det (I + \\mathcal {D} ^ {2} u _ {k}) - \\sum_ {p q} u _ {k} ^ {p q} \\partial_ {p} \\partial_ {q} v _ {k} + o (\\tau^ {- 1}) \\\\ = \\left(f - \\rho_ {k}\\right) - L _ {k} \\left[ v _ {k} \\right] + o \\left(\\tau^ {- 1}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.209, + 0.666, + 0.227 + ], + "angle": 0, + "content": "where \\(L_{k}[v_{k}] = \\sum_{pq}u_{k}^{pq}\\partial_{p}\\partial_{q}v_{k}\\). Hence by integration by parts Eqn. (27)," + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.235, + 0.73, + 0.289 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| f - \\rho_ {k + 1} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} = \\left\\| f - \\rho_ {k} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} - 2 \\int_ {\\Omega} L _ {k} [ v _ {k} ] (f - \\rho_ {k}) + o (\\tau^ {- 1}) \\\\ = \\left\\| f - \\rho_ {k} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} + 2 a _ {k} (f - \\rho_ {k}, v _ {k}) + o (\\tau^ {- 1}) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.296, + 0.806, + 0.312 + ], + "angle": 0, + "content": "where \\(a_{k}\\) is the bilinear form in Eqn.(27). In the discrete case, all functions are in \\(V_{h}\\), we denote" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.318, + 0.745, + 0.339 + ], + "angle": 0, + "content": "\\[\n\\| u _ {h} \\| _ {\\Phi} ^ {2} := \\| u _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} = u _ {h} ^ {T} \\Phi u _ {h}, \\quad \\| u _ {h} \\| ^ {2} := u _ {h} ^ {T} u _ {h}, \\quad \\| u _ {h} \\| _ {A} ^ {2} := u _ {h} ^ {T} A u _ {h},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.346, + 0.441, + 0.362 + ], + "angle": 0, + "content": "by the inequality Eqn. (34) and Eqn. 35," + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.369, + 0.747, + 0.4 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{C _ {4}} \\| u _ {h} \\| ^ {2} \\leq \\| u _ {h} \\| _ {\\Phi} ^ {2} \\leq C _ {4} \\| u _ {h} \\| ^ {2}, \\quad \\frac {1}{C _ {3} C _ {4}} \\| u _ {h} \\| ^ {2} \\leq \\| u _ {h} \\| _ {A} ^ {2} \\leq C _ {3} C _ {4} \\| u _ {h} \\| ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.406, + 0.245, + 0.421 + ], + "angle": 0, + "content": "Therefore" + }, + { + "type": "equation", + "bbox": [ + 0.206, + 0.428, + 0.825, + 0.448 + ], + "angle": 0, + "content": "\\[\n\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} = \\left\\| f _ {h} - \\rho_ {h, k} \\right\\| _ {\\Phi} ^ {2} - 2 \\tau^ {- 1} \\left(f - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} \\left(f _ {h} - \\rho_ {h, k}\\right) + o \\left(\\tau^ {- 1}\\right), \\tag {37}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.825, + 0.541 + ], + "angle": 0, + "content": "where \\(A_{k}\\) is the stiffness matrix in Eqn.(30), and \\(\\bar{A}_k\\) is the mean stiffness matrix. (By the uniform ellipticity Eqn. (22), the eigen values of the adjoint matrix \\((u^{pq})(x,t)\\) is uniformly bounded away from zero in the space \\(\\mathcal{H} := \\{\\xi \\in \\mathbb{R}^n | \\sum_i \\xi_i = 0\\}\\), so the eigen value of the mean adjoint matrix \\(\\bar{u}^{pq}(t)\\) is bounded away from zero in \\(\\mathcal{H}\\). After discretization, the eigen values of \\(\\bar{A}_k\\) is strictly positive in \\(\\mathcal{H}\\), hence \\(\\bar{A}_k\\) is invertible in \\(\\mathcal{H}\\). In the following discussion, the term \\(o(\\tau^{-1})\\) will be ignored.) Remark that the following displayed equation is a scalar" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.548, + 0.735, + 0.568 + ], + "angle": 0, + "content": "\\[\n\\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f - \\rho_ {h, k}) = \\mathrm {t r} \\left(\\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f _ {h} - \\rho_ {h, k})\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.575, + 0.825, + 0.624 + ], + "angle": 0, + "content": "Since \\( A_{k} \\) and \\( \\bar{A}_{k} \\) are symmetric, positive definite on the space \\( \\sum_{i}\\xi_{i} = 0 \\), \\( \\| A_k\\| _2\\leq C_3C_4 \\) and \\( \\| \\bar{A}_k\\| _2\\leq C_3C_4 \\), so are their inverses. Since \\( A_{n} \\) and \\( \\bar{A}_n \\) are symmetric, positive definite on the space orthogonal to \\( (1,1,\\ldots ,1)^T \\), by Eqn. (35) and \\( \\| A_k\\bar{A}_k^{-1}\\| \\leq \\| A_k\\| \\| \\bar{A}_k^{-1}\\| \\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.63, + 0.694, + 0.665 + ], + "angle": 0, + "content": "\\[\n\\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}} \\| f _ {h} - \\rho_ {h, k} \\| _ {\\Phi} ^ {2} \\leq \\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f _ {h} - \\rho_ {h, k}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.367, + 0.686 + ], + "angle": 0, + "content": "Plug into Eqn. (37), we have" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.694, + 0.826, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} \\leq \\left(1 - \\frac {1}{\\tau} \\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}}\\right) \\left\\| f _ {h} - \\rho_ {h, k} \\right\\| _ {\\Phi} ^ {2} \\leq \\left(1 - \\frac {1}{\\tau} \\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}}\\right) ^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| _ {\\Phi} ^ {2}. \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.738, + 0.592, + 0.755 + ], + "angle": 0, + "content": "We can choose the step-length \\(\\tau^{-1}\\), such that \\(\\gamma \\in (0, 1)\\), where" + }, + { + "type": "equation", + "bbox": [ + 0.436, + 0.762, + 0.56, + 0.797 + ], + "angle": 0, + "content": "\\[\n\\gamma = 1 - \\frac {(n - 1)}{\\tau C _ {3} ^ {2} C _ {4} ^ {3}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.802, + 0.243, + 0.816 + ], + "angle": 0, + "content": "Therefore" + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.815, + 0.825, + 0.834 + ], + "angle": 0, + "content": "\\[\n\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} \\leq \\gamma^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| _ {\\Phi} ^ {2} \\leq C _ {4} \\gamma^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| ^ {2}. \\tag {39}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.838, + 0.826, + 0.851 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.488, + 0.884 + ], + "angle": 0, + "content": "B.4 DIFFERENTIAL OPERATOR USING FFT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "By using the Discrete Fourier Transformation, the differential operators can be converted to algebraic operators in the frequency domain." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.747, + 0.12 + ], + "angle": 0, + "content": "Lemma 9. Suppose the discrete function is \\( u_{i,j,k} \\), with discrete Fourier transformation" + }, + { + "type": "equation", + "bbox": [ + 0.278, + 0.123, + 0.719, + 0.165 + ], + "angle": 0, + "content": "\\[\nu _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} e ^ {\\sqrt {- 1} \\frac {2 \\pi m i}{M}} e ^ {\\sqrt {- 1} \\frac {2 \\pi n j}{N}} e ^ {\\sqrt {- 1} \\frac {2 \\pi l k}{L}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.666, + 0.183 + ], + "angle": 0, + "content": "then the differential operator using central difference \\(\\partial_i\\partial_i u_{i,j,k}\\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.185, + 0.75, + 0.261 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {i} \\partial_ {i} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} \\left(u _ {i + 1, j, k} + u _ {i - 1, j, k} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 \\left(\\cos \\frac {2 \\pi m}{M} - 1\\right)}{h _ {x} ^ {2}} e ^ {\\imath \\frac {2 \\pi m i}{M}} e ^ {\\imath \\frac {2 \\pi n j}{N}} e ^ {\\imath \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.265, + 0.462, + 0.281 + ], + "angle": 0, + "content": "where \\(\\iota = \\sqrt{-1}\\), and \\(\\partial_i\\partial_ju_{i,j,k}\\) is given by," + }, + { + "type": "equation", + "bbox": [ + 0.237, + 0.284, + 0.757, + 0.36 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {i} \\partial_ {j} u _ {i, j, k} = \\frac {1}{4 h _ {x} h _ {y}} \\left(u _ {i + 1, j + 1, k} + u _ {i - 1, j - 1, k} - u _ {i + 1, j - 1, k} - u _ {i - 1, j + 1, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.374, + 0.313, + 0.389 + ], + "angle": 0, + "content": "Proof. By equations" + }, + { + "type": "equation", + "bbox": [ + 0.27, + 0.392, + 0.728, + 0.442 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\cos (A + \\alpha) + \\cos (A - \\alpha) - 2 \\cos (A) \\\\ = (\\cos A \\cos \\alpha - \\sin A \\sin \\alpha) + (\\cos A \\cos \\alpha + \\sin A \\sin \\alpha) - 2 \\cos A \\\\ = 2 (\\cos \\alpha - 1) \\cos A \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.446, + 0.202, + 0.459 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.27, + 0.46, + 0.729, + 0.511 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sin (A + \\alpha) + \\sin (A - \\alpha) - 2 \\sin (A) \\\\ = (\\sin A \\cos \\alpha + \\cos A \\sin \\alpha) + (\\sin A \\cos \\alpha - \\cos A \\sin \\alpha) - 2 \\cos A \\\\ = 2 (\\cos \\alpha - 1) \\sin A \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.515, + 0.242, + 0.528 + ], + "angle": 0, + "content": "we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.28, + 0.525, + 0.717, + 0.56 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{h _ {x} ^ {2}} \\left[ e ^ {\\iota^ {\\frac {2 \\pi m (i + 1)}{M}}} + e ^ {\\iota^ {\\frac {2 \\pi m (i - 1)}{M}}} - 2 e ^ {\\iota^ {\\frac {2 \\pi m i}{M}}} \\right] = \\frac {2 \\left(\\cos \\frac {2 \\pi m}{M} - 1\\right)}{h _ {x} ^ {2}} e ^ {\\iota^ {\\frac {2 \\pi m i}{M}}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.56, + 0.383, + 0.575 + ], + "angle": 0, + "content": "by direct computation, we have" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.577, + 0.791, + 0.697 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {i} \\partial_ {i} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} (u _ {i + 1, j, k} + u _ {i - 1, j, k} - 2 u _ {i, j, k}) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {e ^ {\\iota \\frac {2 \\pi m (i + 1)}{M}} + e ^ {\\iota \\frac {2 \\pi m (i - 1)}{M}} - 2 e ^ {\\iota \\frac {2 \\pi m i}{M}}}{h _ {x} ^ {2}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.7, + 0.328, + 0.715 + ], + "angle": 0, + "content": "Similarly, by equations" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.717, + 0.794, + 0.813 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\cos (A + \\alpha + B + \\beta) + \\cos (A - \\alpha + B - \\beta) - \\cos (A + \\alpha + B - \\beta) - \\cos (A - \\alpha + B + \\beta) \\\\ = \\cos (A + B + \\alpha + \\beta) + \\cos (A + B - \\alpha - \\beta) - \\cos (A + B + \\alpha - \\beta) - \\cos (A + B - \\alpha + \\beta) \\\\ = 2 \\cos (A + B) \\cos (\\alpha + \\beta) - 2 \\cos (A + B) \\cos (\\alpha - \\beta) \\\\ = 2 \\cos (A + B) (\\cos (\\alpha + \\beta) - \\cos (\\alpha - \\beta)) \\\\ = 2 \\cos (A + B) (\\cos \\alpha \\cos \\beta - \\sin \\alpha \\sin \\beta - \\cos \\alpha \\cos \\beta - \\sin \\alpha - \\sin \\beta) \\\\ = - 4 \\cos (A + B) \\sin \\alpha \\sin \\beta \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.817, + 0.201, + 0.829 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.207, + 0.831, + 0.791, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sin (A + \\alpha + B + \\beta) + \\sin (A - \\alpha + B - \\beta) - \\sin (A + \\alpha + B - \\beta) - \\sin (A - \\alpha + B + \\beta) \\\\ = \\sin (A + B + \\alpha + \\beta) + \\sin (A + B - \\alpha - \\beta) - \\sin (A + B + \\alpha - \\beta) - \\sin (A + B - \\alpha + \\beta) \\\\ = 2 \\sin (A + B) \\cos (\\alpha + \\beta) - 2 \\sin (A + B) \\cos (\\alpha - \\beta) \\\\ = 2 \\sin (A + B) (\\cos (\\alpha + \\beta) - \\cos (\\alpha - \\beta)) \\\\ = 2 \\sin (A + B) (\\cos \\alpha \\cos \\beta - \\sin \\alpha \\sin \\beta - \\cos \\alpha \\cos \\beta - \\sin \\alpha - \\sin \\beta) \\\\ = - 4 \\sin (A + B) \\sin \\alpha \\sin \\beta \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.405, + 0.12 + ], + "angle": 0, + "content": "we deduce the following equation," + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.128, + 0.756, + 0.202 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {i} \\partial_ {j} u _ {i, j, k} = \\frac {1}{4 h _ {x} h _ {y}} \\left(u _ {i + 1, j + 1, k} + u _ {i - 1, j - 1, k} - u _ {i + 1, j - 1, k} - u _ {i - 1, j + 1, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.21, + 0.824, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.24, + 0.787, + 0.256 + ], + "angle": 0, + "content": "Similarly, we have the representations of other differential operators in the frequency domain," + }, + { + "type": "equation", + "bbox": [ + 0.247, + 0.263, + 0.749, + 0.337 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {j} \\partial_ {j} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} \\left(u _ {i, j + 1, k} + u _ {i, j - 1, k} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi n}{N} - 1)}{h _ {y} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.346, + 0.748, + 0.42 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {k} \\partial_ {k} u _ {i, j, k} = \\frac {1}{h _ {z} ^ {2}} \\left(u _ {i, j, k + 1} + u _ {i, j, k - 1} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 \\left(\\cos \\frac {2 \\pi l}{L} - 1\\right)}{h _ {z} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.444, + 0.753, + 0.518 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {j} \\partial_ {k} u _ {i, j, k} = \\frac {1}{4 h _ {y} h _ {z}} \\left(u _ {i, j + 1, k + 1} + u _ {i, j - 1, k - 1} - u _ {i, j + 1, k - 1} - u _ {i, j - 1, k + 1}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi n}{N} \\sin \\frac {2 \\pi l}{L}}{h _ {y} h _ {z}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.528, + 0.754, + 0.6 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {k} \\partial_ {i} u _ {i, j, k} = \\frac {1}{4 h _ {z} h _ {x}} \\left(u _ {i + 1, j, k + 1} + u _ {i - 1, j, k - 1} - u _ {i + 1, j, k - 1} - u _ {i - 1, j, k + 1}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi l}{L} \\sin \\frac {2 \\pi m}{M}}{h _ {z} h _ {x}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.623, + 0.408, + 0.638 + ], + "angle": 0, + "content": "C ALGORITHM PIPELINES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.654, + 0.825, + 0.684 + ], + "angle": 0, + "content": "In this section, we give the algorithm pipeline of the FFT-OT in Alg. 1 and the details to solve the costant coefficient elliptic PDE through FFT in Alg. 2." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.702, + 0.324, + 0.717 + ], + "angle": 0, + "content": "Algorithm 1: FFT-OT" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.721, + 0.792, + 0.746 + ], + "angle": 0, + "content": "Input: Domain \\(\\Omega = [-1, 1]^3\\), the source density function \\(f > 0\\), the target density \\(g = 1\\), step length \\(\\tau\\), approximation error threshold \\(\\varepsilon\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.747, + 0.744, + 0.77 + ], + "angle": 0, + "content": "Output: Solution \\(\\frac{1}{2}\\| x\\|^2 + u_n\\) to the Monge-Ampère Eqn. (2) with the corresponding boundary condition." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.772, + 0.325, + 0.785 + ], + "angle": 0, + "content": "Initialize \\(u_0(x) = 0\\)" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.786, + 0.281, + 0.796 + ], + "angle": 0, + "content": "while true do" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.798, + 0.434, + 0.811 + ], + "angle": 0, + "content": "Compute the Hessian matrix \\( D^2 u_n(x) \\)" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.811, + 0.555, + 0.824 + ], + "angle": 0, + "content": "Compute the density function \\(\\rho_{n}(x)\\gets \\operatorname *{det}(I + D^{2}u_{n}(x))\\)" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.824, + 0.368, + 0.836 + ], + "angle": 0, + "content": "if \\(\\| f - \\rho_n\\|_{L_2(\\Omega)} < \\varepsilon\\) then" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.836, + 0.266, + 0.847 + ], + "angle": 0, + "content": "Break;" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.851, + 0.566, + 0.864 + ], + "angle": 0, + "content": "Compute the adjoint matrix \\([H_n^{pq}(x)]\\gets \\mathrm{Adj}(I + D^2 u_n(x))\\)" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.865, + 0.535, + 0.876 + ], + "angle": 0, + "content": "Compute the mean adjoint matrix \\([H_n^{pq}]\\) using Eqn. (11);" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.877, + 0.651, + 0.889 + ], + "angle": 0, + "content": "Solve the constant coefficient elliptic PDE (12) using the FFT Solver Alg. 2;" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.89, + 0.511, + 0.902 + ], + "angle": 0, + "content": "Update the Brenier potential \\( u_{n + 1}(x) \\gets u_n + \\tau v_n \\);" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.101, + 0.279, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.178, + 0.278, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.252, + 0.278, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.328, + 0.278, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.403, + 0.268, + 0.417 + ], + "angle": 0, + "content": "(a) Density" + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.101, + 0.391, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.178, + 0.391, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.252, + 0.391, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.328, + 0.391, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.299, + 0.403, + 0.386, + 0.417 + ], + "angle": 0, + "content": "(b) Rejection" + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.101, + 0.502, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.178, + 0.502, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.252, + 0.502, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.328, + 0.502, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.429, + 0.403, + 0.479, + 0.417 + ], + "angle": 0, + "content": "(c) MH" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.101, + 0.614, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.178, + 0.614, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.252, + 0.614, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.328, + 0.614, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.537, + 0.403, + 0.595, + 0.417 + ], + "angle": 0, + "content": "(d) Slice" + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.103, + 0.725, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.178, + 0.724, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.252, + 0.724, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.328, + 0.724, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.632, + 0.403, + 0.722, + 0.416 + ], + "angle": 0, + "content": "(e) Ours-rand" + }, + { + "type": "image", + "bbox": [ + 0.742, + 0.102, + 0.835, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.742, + 0.178, + 0.835, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.742, + 0.252, + 0.835, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.742, + 0.328, + 0.835, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.403, + 0.829, + 0.417 + ], + "angle": 0, + "content": "(f) Ours-grid" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.422, + 0.825, + 0.5 + ], + "angle": 0, + "content": "Figure 5: 3D density function sampling. (a) The density functions in different slices of the same model, namely the 40th, 56th, 72th and 80th. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with \\( T^{-1} \\). (f) The sampling results by mapping the grid centers back with \\( T^{-1} \\). The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. Zoom in for better visualization." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.519, + 0.618, + 0.534 + ], + "angle": 0, + "content": "Algorithm 2: FFT Solver for the Constant Coefficient Elliptic PDE" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.537, + 0.779, + 0.552 + ], + "angle": 0, + "content": "Input: Domain \\(\\Omega = [-1,1]^3\\), \\(M,N,L\\), \\(\\{a^{pq}\\}\\), \\(b^r\\), \\(c\\), function \\(f\\) with the periodic boundary condition" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.552, + 0.466, + 0.564 + ], + "angle": 0, + "content": "Output: Solution \\(u\\) to the elliptic PDE Eqn. (18)" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.565, + 0.472, + 0.577 + ], + "angle": 0, + "content": "Discretize the domain \\(\\Omega\\) to a \\(M\\times N\\times L\\) grid;" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.577, + 0.377, + 0.59 + ], + "angle": 0, + "content": "Sample the function \\(f\\) to \\(f_{i,j,k}\\)" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.59, + 0.54, + 0.605 + ], + "angle": 0, + "content": "Compute FFT using Eqn. (16), \\(\\{\\hat{f}_{m,n,l}\\} \\gets \\mathrm{FFT}(\\{f_{i,j,k}\\})\\);" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.605, + 0.526, + 0.618 + ], + "angle": 0, + "content": "for \\((m,n,l)\\in [0,M - 1]\\times [0,N - 1]\\times [0,L - 1]\\) do" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.618, + 0.456, + 0.629 + ], + "angle": 0, + "content": "Compute the factor \\(\\lambda_{m,n,l}\\) using Eqn. (19);" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.629, + 0.313, + 0.64 + ], + "angle": 0, + "content": "if \\(\\lambda_{m,n,l}\\) is 0 then" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.641, + 0.3, + 0.653 + ], + "angle": 0, + "content": "\\(\\hat{u}_{m,n,l}\\gets 0;\\)" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.654, + 0.225, + 0.664 + ], + "angle": 0, + "content": "else" + }, + { + "type": "text", + "bbox": [ + 0.224, + 0.666, + 0.378, + 0.682 + ], + "angle": 0, + "content": "\\(\\hat{u}_{m,n,l}\\gets \\hat{f}_{m,n,l} / \\lambda_{m,n,l};\\)" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.688, + 0.614, + 0.702 + ], + "angle": 0, + "content": "Compute the Inverse FFT using Eqn. (17), \\(\\{u_{i,j,k}\\} \\gets \\mathrm{IFFT}(\\{\\hat{u}_{m,n,l}\\})\\);" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.702, + 0.286, + 0.715 + ], + "angle": 0, + "content": "Return \\(\\{u_{i,j,k}\\}\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.748, + 0.424, + 0.763 + ], + "angle": 0, + "content": "D APPENDIX EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.78, + 0.825, + 0.809 + ], + "angle": 0, + "content": "In this section, as a compensation of the experiments in the main paper, we give more results on the 3D adaptive sampling and volumetric magnifier." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.825, + 0.535, + 0.839 + ], + "angle": 0, + "content": "D.1 MORE RESULTS ON 3D ADAPTIVE SAMPLING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.85, + 0.826, + 0.926 + ], + "angle": 0, + "content": "In the experiments, we set the density function \\( f(x) = \\sum_{i=1}^{30} p_i \\mathcal{N}(\\mu_i, \\Sigma_i) \\), where \\( \\mathcal{N}(\\mu_i, \\Sigma_i) \\) represents Gaussian distribution with mean \\( \\mu_i \\) and variance \\( \\Sigma_i = \\mathrm{diag}(\\sigma_{i0}^2, \\sigma_{i1}^2, \\sigma_{i2}^2) \\). \\( \\mu_i \\in \\mathbb{R}^3 \\) is uniformly sampled from \\( [0,1]^3 \\), \\( \\sigma_{ij} \\) is uniformly sampled from \\( [0,0.5] \\), \\( p_i \\in \\mathbb{R} \\) is uniformly sampled from \\( [0.2,1] \\) and normalized such that \\( \\int_{\\Omega} f(x) dx = 1 \\). Thus the source distribution \\( \\mu \\) is a complicated Gaussian mixture distribution restricted on \\( \\Omega = [0,1]^3 \\). After computing the OT map" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.101, + 0.279, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.178, + 0.278, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.252, + 0.278, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.328, + 0.278, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.403, + 0.268, + 0.417 + ], + "angle": 0, + "content": "(a) Density" + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.101, + 0.391, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.178, + 0.391, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.252, + 0.391, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.328, + 0.391, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.299, + 0.403, + 0.386, + 0.417 + ], + "angle": 0, + "content": "(b) Rejection" + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.101, + 0.502, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.178, + 0.502, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.252, + 0.502, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.328, + 0.502, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.429, + 0.403, + 0.479, + 0.417 + ], + "angle": 0, + "content": "(c) MH" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.101, + 0.614, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.178, + 0.614, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.252, + 0.614, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.328, + 0.614, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.537, + 0.403, + 0.595, + 0.417 + ], + "angle": 0, + "content": "(d) Slice" + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.102, + 0.725, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.178, + 0.724, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.252, + 0.723, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.328, + 0.723, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.632, + 0.403, + 0.722, + 0.416 + ], + "angle": 0, + "content": "(e) Ours-rand" + }, + { + "type": "image", + "bbox": [ + 0.742, + 0.102, + 0.835, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.742, + 0.178, + 0.835, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.742, + 0.252, + 0.835, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.742, + 0.328, + 0.835, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.403, + 0.83, + 0.417 + ], + "angle": 0, + "content": "(f) Ours-grid" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.422, + 0.825, + 0.499 + ], + "angle": 0, + "content": "Figure 6: 3D density function sampling. (a) The density functions in different slices of the same model, namely the 56th, 64th, 80th and 88th. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with \\( T^{-1} \\). (f) The sampling results by mapping the grid centers back with \\( T^{-1} \\). The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. Zoom in for better visualization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.825, + 0.625 + ], + "angle": 0, + "content": "\\(T\\) from \\(\\mu\\) to the uniform distribution \\(\\nu\\) defined on \\([-1,1]^3\\), we conduct two groups of experiments: (i) we map the cell centers of the grid \\(\\{y_k\\}\\) of \\([-1,1]^3\\) back to \\([-1,1]^3\\) through the inverse OT map \\(T^{-1}(y_k)\\) defined by Eqn. (20); (ii) we randomly sample \\(100k\\) samples \\(\\{y_k\\}\\) from the Uniform distribution defined in \\([-1,1]^3\\), then map them back to \\([-1,1]^3\\) through the inverse OT map \\(T^{-1}(y_k)\\). In order to keep the consistency with the mirror reflection process in the FFT-OT algorithm, we also reflect the generated samples back to \\(\\Omega\\). To visualize the results of the \\(k\\)th slice, we plot the samples whose \\(z\\) coordinates satisfy the inequality," + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.631, + 0.631, + 0.647 + ], + "angle": 0, + "content": "\\[\nk / 1 2 8 - 1 / 2 5 6 \\leq z \\leq k / 1 2 8 + 1 / 2 5 6.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.661, + 0.825, + 0.788 + ], + "angle": 0, + "content": "In Fig. 5 and Fig. 6, we give more sampling results of different slices correspond to the two models used in Fig. 2 in the main paper. Fig. 5 visualize the density function restricted on the 40th, 56th, 72th and 80th slices for different methods of the model displayed in the first row of 2. Fig. 6 visualize the density function restricted on the 56th, 64th, 80th and 88th slices for different methods of the model displayed in the second row of 2. Compared with the classical methods, the both sampling strategies of our method give decent sampling results that fit the prescribed density function well. Moreover, the number of generated samples for different slices of the same 3D model fits the density functions restricted to the corresponding slices well, namely more samples are generated in the brighter regions for different slices." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.803, + 0.538, + 0.818 + ], + "angle": 0, + "content": "D.2 MORE RESULTS ON VOLUMETRIC MAGNIFIER" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.825, + 0.9 + ], + "angle": 0, + "content": "In this experiment, we magnify the volumetric MRI image of the aneurysm by different amplification factors. In Fig. 7, we show the original aneurysm viewed from difference angles in the first column. The last three columns give the magnified results with different amplification factors from the viewpoints same as those in the first column. We can see that the aneurysm region is successfully magnified by different factors and the rest parts of the volume nearly keeps the same." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.299, + 0.335, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.419, + 0.335, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.538, + 0.335, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.658, + 0.3, + 0.672 + ], + "angle": 0, + "content": "(a) Original" + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.3, + 0.504, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.419, + 0.504, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.538, + 0.504, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.658, + 0.501, + 0.672 + ], + "angle": 0, + "content": "(b) Magnifying ratio 1" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.3, + 0.672, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.419, + 0.672, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.538, + 0.672, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.523, + 0.658, + 0.671, + 0.672 + ], + "angle": 0, + "content": "(c) Magnifying ratio 2" + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.3, + 0.841, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.419, + 0.841, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.539, + 0.841, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.692, + 0.658, + 0.839, + 0.672 + ], + "angle": 0, + "content": "(d) Magnifying ratio 3" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.683, + 0.825, + 0.723 + ], + "angle": 0, + "content": "Figure 7: The volume magnifier of an aneurysm. The first column shows the original volumetric data from different viewpoints, and the last three columns give the magnified data from the same viewpoints of the first column with different magnifying ratios. The yellow circles denote the aneurysm or the ROIs." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ] +] \ No newline at end of file diff --git a/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_origin.pdf b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f382ac1eb3e225168f430817549c26f0bef37a4a --- /dev/null +++ b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/468f5fc6-f60a-4c98-879c-a2f5d8b676d8_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13a314e552dbb5251d7ac7911371c568802a78352508b84faa906964f206d518 +size 21978046 diff --git a/2023/Volumetric Optimal Transportation by Fast Fourier Transform/full.md b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/full.md new file mode 100644 index 0000000000000000000000000000000000000000..890a2da54a323f666fffcbde96cd4dfcb6ad039a --- /dev/null +++ b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/full.md @@ -0,0 +1,1027 @@ +# VOLUMETRIC OPTIMAL TRANSPORTATION BY FAST FOURIER TRANSFORM + +Na Lei* + +Dalian University of Technology +nalei@dlut.edu.cn + +Dongsheng An + +Stony Brook University +doan@cs.stonybrook.edu + +Min Zhang + +Zhejiang University min_zhang@zju.edu.cn + +Xiaoyin Xu + +Harvard Medical School +xxu@bwh.harvard.edu + +Xianfeng Gu + +Stony Brook University gu@cs.stonybrook.edu + +# ABSTRACT + +The optimal transportation map finds the most economical way to transport one probability measure to another, and it has been applied in a broad range of applications in machine learning and computer vision. By the Brenier theory, computing the optimal transport map is equivalent to solving a Monge-Ampère equation, which is highly non-linear. Therefore, the computation of optimal transportation maps is intrinsically challenging. In this work, we propose a novel and powerful method, the FFT-OT (fast Fourier transform-optimal transport), to compute the 3-dimensional OT problems. The method is based on several key ideas: first, the Monge-Ampère equation is linearized to a sequence of linear elliptic PDEs with spacial and temporal variant coefficients; second, the obliqueness property of optimal transportation maps is reformulated as a Neumann boundary condition; and third, the variant coefficient elliptic PDEs are approximated by constant coefficient elliptic PDEs and solved by FFT on GPUs. We also prove that the algorithm converges linearly. Experimental results show that the FFT-OT algorithm is more than a hundred times faster than the conventional methods based on the convex geometry. Furthermore, the method can be directly applied for sampling from complex 3D density functions in machine learning and magnifying the volumetric data in medical imaging. + +# 1 INTRODUCTION + +Optimal transportation (OT) transports one probability measure to another in the most economical way, and it plays a fundamental role in areas like machine learning Courty et al. (2017); Altschuler et al. (2019), computer vision Arjovsky et al. (2017); Tolstikhin et al. (2018); An et al. (2020), and computer graphics Solomon et al. (2015); Nader & Guennebaud (2018). Given a Riemannian manifold $X$ , all the probability distributions on $X$ form an infinite dimensional space $\mathcal{P}(X)$ . Given any two distributions $\mu, \nu \in \mathcal{P}(X)$ , the optimal transportation map defines a distance between them, and the McCann interpolation McCann (1997) defines the geodesic connecting them. Hence optimal transportation equips $\mathcal{P}(X)$ with a Riemannian metric and defines its covariant differentiation, which provides a variational calculus framework for optimization in it. + +As the optimal transportation problem is highly non-linear, it is quite challenging to compute the OT maps. Recently, researchers have developed many algorithms. The geometric variational approach Aurenhammer et al. (1998); Gu et al. (2016); Levy (2015) based on the Brenier theorem Brenier (1991) is capable of achieving high accuracy for low dimensional problems, but it requires complicated geometric data structure and the storage complexity grows exponentially as the dimension increases. The Sinkhorn method Cuturi (2013) based on the Kantorovich theorem adds an entropic regularizer to the primal problem and can handle high dimensional tasks, but it suffers from the intrinsic approximation error. + +We propose a novel method to tackle this challenging problem through Fast Fourier Transformation (FFT). According to the Brenier theorem Brenier (1991), under the quadratic distance cost, the optimal transportation map is the gradient of the Brenier potential, which satisfies the Monge-Ampère equation. With the continuity method Delanoë (1991), the Monge-Ampère equation can be linearized as a sequence of elliptic partial differential equations (PDEs) with spacial and temporal variant coefficients. By iteratively solving the linearized Monge-Ampère equations, we can obtain the OT map. Specifically, we propose to approximate the linearized Monge-Ampère equation by constant coefficient elliptic PDEs and solve them using the FFT on GPUs. + +Our proposed FFT-OT method has many merits: (i) it is generalizable for arbitrary dimension; (ii) it has a linear convergence rate, namely the approximation error decays exponentially fast; (iii) in each iteration, the computational complexity of FFT is $O(n \log n)$ , thus our algorithm can solve large scale OT problems; and (iv) it is highly parallelable and can be efficiently implemented on GPUs. We demonstrate the efficiency of the FFT-OT algorithm by solving the volumetric OT problems for machine learning and medical imaging applications including sampling from given 3D density functions and volumetric magnifier. The algorithm also has its own limitations: (i) although it can be generalized to any dimensions, the storage complexity increases exponentially with respect to the dimension, so its power is limited by the memory size of the GPUs; (ii) Since the algorithm uses FFT, the current version of the method only works well for continuous density functions. (iii) In this work, we mainly focus on the computation of the OT map from the uniform distribution to another arbitrary continuous distribution. To extend the method to find the OT map between any two continuous measures, we can compute two OT maps from the uniform distribution to the both continuous measures, then combine them together. The combination will give a reasonable approximation of the OT map Nader & Guennebaud (2018). + +Though Lei and Gu Lei & Gu (2021) also uses FFT to solve the 2-dimensional OT problem, our method differs their works in the following two aspects: (i) Lei and Gu's method uses the fixed point method to compute the 2D OT problems, ours is based on the linearization of the Monge-Ampère operator to solve the 3D OT problems, these are two different methodologies in PDE theory; (ii) In our paper, we also provide the theoretical convergence analysis of the proposed method. For more detailed analysis and related work, please refer to the Appendix A. + +# 2 OPTIMAL TRANSPORTATION THEORY + +In this section, we review the fundamental concepts and theorems of the OT problem and the Monge-Amperè equation, more details can be found in Villani (2008). + +Optimal Transportation Map and the Monge-Ampère equation Suppose the source domain $\Omega$ is an open set in $\mathbb{R}^d$ with the probability measure $\mu$ , the target domain $\Sigma$ is with the probability measure $\nu$ . Both $\mu$ and $\nu$ have density functions $d\mu(x) = f(x)dx$ and $d\nu(y) = g(y)dy$ , respectively, with the equal total mass: $\int_{\Omega} f(x)dx = \int_{\Sigma} g(y)dy$ , which is called the balance condition. + +Suppose $T: \Omega \to \Sigma$ is a measurable map. The mapping $T$ is called measure preserving and denoted as $T_{\#} \mu = \nu$ if the following relation + +$$ +\mu (T ^ {- 1} (A)) = \nu (A) \tag {1} +$$ + +for every Borel subset $A \subset \Sigma$ . A cost function $c: \Omega \times \Sigma \to \mathbb{R}$ measures the transportation cost for transporting the unit mass from $x \in \Omega$ to $y \in \Sigma$ . + +Problem 1 (Monge). The optimal transportation problem finds the measure preserving map with the minimal total transportation cost, + +$$ +\min _ {T _ {\#} \mu = \nu} \int_ {\Omega} c (x, T (x)) f (x) d x +$$ + +The solution to the Monge's problem is called the optimal transport map between $\mu$ and $\nu$ . The existence, uniqueness and regularity of OT maps depend on the boundedness and the continuity of the density functions, the convexity of the supporting domains, the continuity of their boundaries, and the cost function. In our current work, we focus on the similar situation in Saumier et al. (2013), + +- The cost function is quadratic Euclidean distance $c(x, y) = \| x - y \|^2 / 2$ ; + +- The supports of the source and the target measures are the canonical cube $\Omega = [-1, 1]^3$ , which is uniformly convex; +- The source and the target measures $\mu, \nu$ are absolutely continuous with respect to the Lebesgue measure, their densities $f, g$ are positive and bounded away from zero; + +$$ +0 < m < f, g < M, +$$ + +and $f,g$ are of class $C^\alpha (\Omega)$ + +- The boundary condition is second boundary condition (OT boundary condition), $T(\Omega) = \Omega$ . + +Then according to (Villani (2003) Theorem 14.4, Saumier et al. (2013) Theorem 2.1), the OT maps $T: \Omega \to \Omega$ exists and is unique and invertible ( $\mu$ a.e), and the Brenier potential is of class $C^{2,\beta}(\bar{\Omega})$ form some $0 < \beta < \alpha$ . + +Theorem 2. Assume that $\Omega, \mu, \nu, f$ and $g$ are defined as above. Then there exists a convex function $u: \Omega \to \mathbb{R}$ , $u \in C^{2,\beta}(\Omega)$ for some $0 < \beta < \alpha$ , such that $\nabla u$ pushes $\mu$ forward to $\nu$ , $(\nabla u)_{\#} \mu = \nu$ . Moreover, $\nabla u$ is unique and invertible ( $\mu$ a.e.), and its inverse $\nabla v$ satisfies $(\nabla v)_{\#} \nu = \mu$ . + +We call such a convex function $u$ the Brenier potential, it satisfies the Monge-Ampère equation, + +$$ +\det D ^ {2} u (x) = \frac {f (x)}{g \circ \nabla u (x)}. \tag {2} +$$ + +with the boundary condition $\nabla u(\Omega) = \Sigma$ . Then finding the optimal transportation map is equivalent to solving the corresponding Monge-Ampère equation. In the current work, the target measure is always the Lebesgue measure, and the source density $f$ is of class $C^{2,\alpha}(\Omega)$ . + +Linearized Monge-Ampère Operator The Monge-Ampère operator is defined as + +$$ +\mathrm {M A} [ u ] = \det D ^ {2} u, +$$ + +which is highly non-linear. It can be linearized as following: + +$$ +\mathrm {M A} [ u + \varepsilon v ] = \det (D ^ {2} u + \varepsilon D ^ {2} v) \approx \det D ^ {2} u + \varepsilon \operatorname {T r a c e} (\operatorname {A d j} (D ^ {2} u) \cdot D ^ {2} v), \tag {3} +$$ + +where $\operatorname{Adj}(A)$ is the adjoint (co-factor) matrix of $A$ , $\operatorname{Adj}(A) := \det(A)A^{-T}$ . Therefore the linearized Monge-Ampère operator is defined as + +$$ +\mathrm {D M A} _ {u} [ v ] := \operatorname {T r a c e} \left(\operatorname {A d j} \left(D ^ {2} u\right) \cdot D ^ {2} v\right) = \sum_ {p, q = 1} ^ {d} u ^ {p q} (x) \partial_ {p} \partial_ {q} v (x), \tag {4} +$$ + +where $(u^{pq}) = \mathrm{Adj}(D^2 u)$ is the adjoint matrix of the Hessian of $u$ , and $\partial_p\partial_q\coloneqq \frac{\partial^2}{\partial x_p\partial x_q}$ . + +Continuity Method For simplicity, we assume the source domain coincides with the target domain, that is $\Omega = \Sigma$ , and the target density is $g(x) \equiv 1$ . The Monge-Ampère equation Eqn. (2) is simplified as $\operatorname{det}D^{2}u(x) = f(x)$ . Define a flow of density as + +$$ +\rho (x, t) = (1 - t) + t f (x), \quad t \in [ 0, 1 ]. \tag {5} +$$ + +The corresponding flow of the Brenier potentials is $u(x,t):\Omega \times [0,1]\to \mathbb{R}$ + +$$ +\det D _ {x} ^ {2} u (x, t) = \rho (x, t), \quad s. t. \nabla_ {x} u (x, t) (\Omega) = \Omega , +$$ + +where $D_x^2 u(x,t)$ is the Hessian of $u(x,t)$ with respect to $x$ , and $u(x,1)$ is the solution to the initial Monge-Ampère equation Eqn. (2). Take the derivative w.r.t. time $t$ on both sides of the linearized Monge-Ampère operator Eqn. (4), we obtain an elliptic PDE with the spacial and temporal variant coefficients of the unknown $v(x,t) \coloneqq \dot{u} (x,t)$ , namely the "velocity" of the Brenier potential, + +$$ +\mathrm {D M A} _ {u} [ v ] = \sum_ {p, q = 1} ^ {d} u ^ {p q} (x, t) \partial_ {p} \partial_ {q} v (x, t) = \frac {\partial}{\partial t} \rho (x, t) = f (x) - 1. \tag {6} +$$ + +At time $t = 0$ , the initial Brenier potential is known as $u(x,0) = \frac{1}{2}\| x\|^2$ . Suppose at time $t$ , we have obtained $u(x,t)$ already, then we can compute the adjoint matrix $u^{pq}(x,t)$ of the Hessian $D_x^2 u(x,t)$ and solve Eqn. (6) to get the velocity $v(x,t) = \dot{u} (x,t)$ . In turn, we move forward to time $t + \delta t$ , and update $u(x,t + \delta t)$ by $u(x,t) + \dot{u} (x,t)\delta t$ . By repeating this procedure, eventually we reach time $t = 1$ and obtain the solution $u(x)\coloneqq u(x,1)$ to the initial Monge-Ampère Eqn. (2). + +Obliqueness Boundary Condition Suppose the boundary of $\Omega$ is $C^1$ almost everywhere, therefore at a $C^1$ point $x\in \partial \Omega$ , the outer normal $\mathbf{n}(x)$ is well defined. For almost every boundary point $x\in \partial \Omega$ , the obliqueness condition is represented as + +$$ +\langle \mathbf {n} (x), \mathbf {n} (\nabla u (x)) \rangle \geq 0. \tag {7} +$$ + +Suppose $\Omega$ is a cuboid and has 6 faces, if a boundary point $x\in \partial \Omega$ is on a face, by the cyclic monotonicity of the map and the strict convexity of $u$ Villani (2008), its image $\nabla u(x)$ must be on the same face of $x$ , namely, + +$$ +\langle \nabla u (x) - x, \mathbf {n} (x) \rangle = 0. \tag {8} +$$ + +We can rewrite the Brenier potential as $u(x_{1},x_{2},\ldots ,x_{d}) = \frac{1}{2}\sum_{i = 1}^{d}x_{i}^{2} + v(x_{1},\dots ,x_{d})$ , then $\nabla u(x) - x = \nabla v(x)$ . By Eqn. (8), $v(x)$ satisfies the Neumann boundary condition, + +$$ +\frac {\partial v}{\partial \mathbf {n}} (x) = 0, \quad x \in \partial \Omega . \tag {9} +$$ + +Similarly, the velocity of the (modified) Brenier potential $v$ in Eqn. (6) also satisfies the Neumann boundary condition. The analysis about the existence and regularity of the solutions to Eqn. (6) with boundary condition Eqn. (9) can be found in the supplementary material. + +# 3 COMPUTATIONAL ALGORITHM + +Here we introduce the 3-dimensional FFT-OT algorithm, which can be generalized to any dimensions. We approximate the Monge-Ampère equation by a sequence of constant coefficient elliptic PDEs, and solve them by FFT on GPUs. More detailed analysis about the solution of the discretized Monge-Ampère equation, and the proofs of the lemmas and theorems are given by Appendix B. + +# 3.1 CONTINUITY METHOD FOR SOLVING THE MONGE-AMPERE EQUATION + +By using the continuity method, we can solve the Monge-Ampère equation iteratively. For simplicity, we assume the target measure is the Lebesgue's measure with $g \equiv 1$ . At the $n$ -th iteration, the Brenier potential is represented as $\frac{1}{2} \| x \|^2 + u_n(x)$ , its Hessian matrix is $H_n(x) \coloneqq \mathrm{I} + D^2 u_n(x)$ , the corresponding density function is defined as the determinant of the Hessian $\rho_n = \operatorname*{det}(H_n)$ , and the velocity of the Brenier potential is $v_n(x)$ . In the beginning, the Brenier potential $u_0(x)$ is zero, the Hessian is $H_0 = \mathrm{I}$ and the density is $\rho_0 = 1$ . At the $n$ -th step, we compute the adjoint matrix $[H_n^{pq}(x)]$ of the Hessian matrix $H_n(x)$ for any $x \in \Omega$ . According to Eqn. (3), the velocity $v_n(x)$ satisfies the variant coefficient elliptic PDE induced by the linearized Monge-Ampère operator, + +$$ +\mathrm {D M A} _ {u _ {n}} [ v _ {n} ] = \sum_ {p, q = 0} ^ {2} H _ {n} ^ {p q} (x) \partial_ {p} \partial_ {q} v _ {n} (x) = \frac {1}{\tau} \left(f (x) - \rho_ {n} (x)\right). \tag {10} +$$ + +Note that the right hand side of Eqn. (6) is the difference between the initial and the target densities, whereas here it is replaced by the difference between the initial and the current densities. The step length parameter $\tau \geq 1$ can be chosen to guarantee the convergence Loepers & Rapetti (2005). + +The elliptic PDE Eqn. (10) is with spatially variant coefficients. Although the traditional finite element method (FEM) can solve it using the GMRES algorithm Saad (2003), this algorithm can not be directly accelerated by GPUs. To overcome this difficulty, we approximate Eqn. (10) by a much simpler elliptic PDE with constant coefficients, which can be directly solved using the following FFT-OT algorithm pipeline Alg. 1 on GPUs in Appendix C. + +At the $n$ -th iteration, after obtaining the adjoint matrix $[H_n^{pq}(x)], x \in \Omega$ , we compute the mean adjoint matrix $[\bar{H}_n^{pq}(x)]$ + +$$ +\bar {H} _ {n} ^ {p q} := \frac {\int_ {\Omega} H _ {n} ^ {p q} (x) \rho_ {n} (x) d x}{\int_ {\Omega} \rho_ {n} (x) d x}, \quad p, q = 0, 1, 2 \tag {11} +$$ + +and replace the elliptic PDE Eqn.(10) with variant coefficients by the elliptic PDE with constant coefficients, + +$$ +\overline {{\mathrm {D M A}}} _ {u _ {n}} [ v _ {n} ] = \sum_ {p, q = 0} ^ {2} \bar {H} _ {n} ^ {p q} \partial_ {p} \partial_ {q} v _ {n} (x) = \frac {1}{\tau} (f (x) - \rho_ {n} (x)), \tag {12} +$$ + +where $\overline{\mathrm{DMA}}$ is called the mean linearized Monge-Ampère operator. + +Then we solve the constant coefficient elliptic PDE Eqn. (12) by FFT Algorithm Alg. 2 in Appendix C. Although the original variant coefficient PDE Eqn. (10) is replaced by its constant coefficient approximation Eqn. (12), the algorithm still converges to the solution with a linear convergence rate. This replacement allows the whole algorithm to be solved by FFT on GPUs, which greatly improves the computational efficiency. + +Theorem 3 (main). Given a domain $\Omega \subset \mathbb{R}^d$ , which is a canonical cuboid $\Omega = [-1,1]^d$ , and a positive density function $f:\Omega \to \mathbb{R}$ with the balance condition $\int_{\Omega}f(x)dx = \int_{\Omega}dx$ , suppose the mirror reflection extension Eqn. (14) of $f$ to the flat torus $\tilde{f}:\mathbb{T}^n\to \mathbb{R}$ is $C^\alpha$ , $\alpha \in (0,1)$ , then the Monge-Ampère equation, + +$$ +d e t D ^ {2} u (x) = f (x), \quad \nabla u (\Omega) = \Omega +$$ + +can be solved using the FFT-OT Algorithm Alg. 1 in Appendix C. In particular, one can choose the step length parameter $\tau$ , such that there is a constant $0 < \gamma < 1$ that the approximation error satisfies + +$$ +\left\| f - \rho_ {n + 1} \right\| ^ {2} < C \gamma^ {n}, \tag {13} +$$ + +namely the algorithm has a linear convergence rate. + +# 3.2 FFT SOLVER FOR CONSTANT COEFFICIENT ELLIPTIC PDES + +To solve the constant coefficient elliptic PDE Eqn. (12), we first extend the PDE to the flat torus by mirror reflection, then discretize the domain and compute the differential operators by central difference scheme. Finally the PDE is converted to algebraic equations in the frequency domain by FFT and can be efficiently solved on GPUs. + +Extension by Mirror Reflection Suppose $\Omega = [0,1]^3$ and $f:\Omega \to \mathbb{R}$ are given, we extend $\Omega$ to $\tilde{\Omega} = [-1,1]^3$ and $f$ to $\tilde{f}:\tilde{\Omega}\rightarrow \mathbb{R}$ by mirror reflection + +$$ +\tilde {f} (x, y, z) = f (| x |, | y |, | z |), \quad \forall (x, y, z) \in \tilde {\Omega}. \tag {14} +$$ + +By definition, $\tilde{f}$ satisfies the periodic boundary condition and can be treated as a function defined on the flat torus $\mathbb{T}^3$ . $\tilde{\Omega}$ is one of the fundamental domain of $\mathbb{T}^3$ . The constant coefficients $a^{p,q}$ keep unchanged. Then we solve the following constant coefficient elliptic PDE Eqn. (18) $L[\tilde{u}] = \tilde{f}$ with the periodic boundary condition. Finally, the restriction of $\tilde{u}$ on $\Omega$ gives the initial solution $u$ to $L[u] = f$ with Neumann boundary condition. + +In the following, to avoid using overly complicated symbols, we use $(u,f,\Omega)$ to represent $(\tilde{u},\tilde{f},\tilde{\Omega})$ for simplicity. + +Tessellation Suppose $\Omega = [-1,1]^3$ is the canonical cube (a fundamental domain of a flat torus), we tessellate it to the regular cells, and the centers of the cells form a grid $M\times N\times L$ . The Brenier potential $u:\Omega \to \mathbb{R}$ is discretized to a tensor $u_{i,j,k}$ with $\{i,j,k\} \in \{0,\dots ,M - 1\} \times \{0,\dots ,N - 1\} \times \{0,\dots ,L - 1\}$ . The spacial step lengths are $(h_x,h_y,h_z) = (2 / M,2 / N,2 / L)$ . The coordinate of each sample point $(x_{i},y_{j},z_{k})$ is $(x_{i},y_{j},z_{k}) = (-1 + h_{x}(i + 1 / 2), - 1 + h_{y}(j + 1 / 2), - 1 + h_{z}(k + 1 / 2))$ . The periodic boundary condition is then formulated as + +$$ +u _ {i, j, k} = u _ {i + \alpha M, j + \beta N, k + \gamma L}, \quad \alpha , \beta , \gamma \in \mathbb {Z}. \tag {15} +$$ + +Finite Difference Differential Operator We use the standard central differences to compute the differential operators. The first order derivative $\mathcal{D}_x$ is approximated by + +$$ +\mathcal {D} _ {x} u _ {i, j, k} = \frac {u _ {i + 1 , j , k} - u _ {i - 1 , j , k}}{2 h _ {x}}, +$$ + +where the index $i + 1$ means $i + 1$ modulus $M$ . The operators $\mathcal{D}_y, \mathcal{D}_z$ are defined in a similar way. The second order derivative operator $\mathcal{D}_{xx}$ and $\mathcal{D}_{xy}$ are approximated by + +$$ +\mathcal {D} _ {x x} ^ {2} u _ {i, j, k} = \frac {u _ {i + 1 , j , k} + u _ {i - 1 , j , k} - 2 u _ {i , j , k}}{h _ {x} ^ {2}} +$$ + +$$ +\mathcal {D} _ {x y} ^ {2} u _ {i, j, k} = \frac {u _ {i + 1 , j + 1 , k} + u _ {i - 1 , j - 1 , k} - u _ {i + 1 , j - 1 , k} - u _ {i - 1 , j + 1 , k}}{4 h _ {x} h _ {y}} +$$ + +The other operators $\mathcal{D}_{yy},\mathcal{D}_{zz},\mathcal{D}_{yz}$ and $\mathcal{D}_{xz}$ are defined similarly. + +Discrete Fourier Transformation The discrete Fourier transformation (DFT) of $u_{i,j,k}$ is given by + +$$ +\hat {u} _ {m, n, l} = \sum_ {i = 0} ^ {M - 1} \sum_ {j = 0} ^ {N - 1} \sum_ {k = 0} ^ {L - 1} u _ {i, j, k} \hat {\omega} _ {m n l} \tag {16} +$$ + +$$ +u _ {i, j, k} = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \omega_ {m n l} \tag {17} +$$ + +where $\hat{\omega}_{mnl} = e^{-\iota \frac{2\pi mi}{M}}e^{-\iota \frac{2\pi nj}{N}}e^{-\iota \frac{2\pi lk}{L}}$ , $\omega_{mnl} = e^{\iota \frac{2\pi mi}{M}}e^{\iota \frac{2\pi nj}{N}}e^{\iota \frac{2\pi lk}{L}}$ and $\iota = \sqrt{-1}$ , $\{m,n,l\}$ are the indices of the frequency coefficients. By using DFT, the differential operators are converted to algebraic operators in the frequency domain. + +Lemma 4. Suppose the discrete function is $u_{i,j,k}$ , with the discrete Fourier transformation Eqn. (16) and Eqn. (17), by using the central difference scheme, the first order differential operator is given by + +$$ +\mathcal {D} _ {x} u _ {i, j, k} = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {\sin \frac {2 \pi m}{M}}{h _ {x}} \omega_ {m n l} +$$ + +the second order differential operators are represented by + +$$ +\mathcal {D} _ {x x} ^ {2} u _ {i, j, k} = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {2 (\cos \frac {2 \pi m}{M} - 1)}{h _ {x} ^ {2}} \omega_ {m n l} +$$ + +$$ +\mathcal {D} _ {x y} ^ {2} u _ {i, j, k} = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {- \sin \frac {2 \pi m}{M} \sin \frac {2 \pi n}{N}}{h _ {x} h _ {y}} \omega_ {m n l} +$$ + +The other differential operators $\mathcal{D}_y, \mathcal{D}_z, \mathcal{D}_{yy}, \mathcal{D}_{zz}, \mathcal{D}_{yz}$ and $\mathcal{D}_{xz}$ are also represented accordingly. The detailed proofs can be found in the supplementary material. + +FFT Solver Suppose we want to solve an elliptic PDE with constant coefficients on $\Omega \subset \mathbb{R}^3$ + +$$ +L [ u ] := \left(\sum_ {p = 0} ^ {2} \sum_ {q = 0} ^ {2} a ^ {p, q} \partial_ {p} \partial_ {q} + \sum_ {r = 0} ^ {2} b ^ {r} \partial_ {r} + c\right) u (x) = f (x), \tag {18} +$$ + +with the periodic boundary condition, where $a^{p,q}, b^r, c$ are constants, the matrix $(a^{p,q})$ is positive definite, namely the PDE is uniformly elliptic. By the discrete Fourier transformation $\mathcal{F}$ , we convert the differential equation to an algebraic equation in the frequency domain, + +$$ +\sum_ {p = 0} ^ {2} \sum_ {q = 0} ^ {2} a ^ {p, q} \mathcal {F} (\partial_ {p} \partial_ {q} u) + \sum_ {r = 0} ^ {2} b ^ {r} \mathcal {F} (\partial_ {r} u) + c \mathcal {F} (u) = \mathcal {F} (f) +$$ + +By applying Lemma 4 and defining + +$$ +\begin{array}{l} \lambda_ {m, n, l} = a ^ {0, 0} \frac {2 (\cos \frac {2 \pi m}{M} - 1)}{h _ {x} ^ {2}} + a ^ {1, 1} \frac {2 (\cos \frac {2 \pi n}{N} - 1)}{h _ {y} ^ {2}} \\ + a ^ {2, 2} \frac {2 (\cos \frac {2 \pi l}{L} - 1)}{h _ {z} ^ {2}} - \left(a ^ {0, 1} + a ^ {1, 0}\right) \frac {\sin \frac {2 \pi m}{M} \sin \frac {2 \pi n}{N}}{h _ {x} h _ {y}} (19) \\ - \left(a ^ {1, 2} + a ^ {2, 1}\right) \frac {\sin \frac {2 \pi n}{N} \sin \frac {2 \pi l}{L}}{h _ {y} h _ {z}} - \left(a ^ {0, 2} + a ^ {2, 0}\right) \frac {\sin \frac {2 \pi l}{L} \sin \frac {2 \pi m}{M}}{h _ {z} h _ {x}} (19) \\ + b ^ {0} \frac {\sin \frac {2 \pi m}{M}}{h _ {x}} + b ^ {1} \frac {\sin \frac {2 \pi n}{N}}{h _ {y}} + b ^ {2} \frac {\sin \frac {2 \pi l}{L}}{h _ {z}} + c \\ \end{array} +$$ + +We have the algebraic equations in frequency domain, + +$$ +\hat {u} _ {m, n, l} \lambda_ {m, n, l} = \hat {f} _ {m, n, l} +$$ + +With $\hat{u}_{m,n,l}$ 's, we can easily obtain $u_{i,j,k}$ 's by the Inverse Discrete Fourier Transform (IDFT), which means solving the constant coefficient elliptic equation. The algorithm is described in Alg. 2 in Appendix C. + +The FFT for solving the constant coefficient elliptic PDE can be efficiently computed with GPUs. Moreover, the algorithm Alg. 2 solves the constant coefficient elliptic PDEs with a periodic boundary condition, which can be generalized to solving the same type of PDEs with Neumann boundary condition by extending the PDE to the flat torus $\mathbb{T}^3$ using mirror reflection Eqn. (14). + +# 4 EXPERIMENTAL RESULTS + +In this section, we firstly show that the our proposed FFT-OT algorithm converges linearly and runs $100 \times$ faster than the conventional convex geometry based solver Levy (2015), then demonstrate the method in two applications: 3D adaptive sampling and Volume Magnifier. All the algorithms are developed using generic C++ with CUDA Toolkit. All the experiments are conducted on a Windows laptop with Intel Core i7-7700HQ CPU with 16 GB memory and NVIDIA GeForce GTX 1060 Graphics Cards. More experiments can be found in Appendix D. + +# 4.1 RUNNING TIME AND CONVERGENCE ANALYSIS + +To show the performance of the proposed method, we experiment on the density functions defined by the Gaussian mixture models. To be specific, the domain is a cube $\Omega = [0,1]^3$ , the 3-dimensional density function defined on $\Omega$ is set to be $f(x) = \sum_{i=1}^{30} p_i \mathcal{N}(\mu_i, \Sigma_i)$ , where $\mathcal{N}(\mu_i, \Sigma_i)$ represents Gaussian distribution with mean $\mu_i$ and variance $\Sigma_i = \mathrm{diag}(\sigma_{i0}^2, \sigma_{i1}^2, \sigma_{i2}^2)$ . $\mu_i \in \mathbb{R}^3$ is uniformly sampled from $[0,1]^3$ , $\sigma_{ij}$ is uniformly sampled from $[0,0.5]$ , $p_i \in \mathbb{R}$ is uniformly sampled from $[0.2,1]$ and normalized such that $\int_{\Omega} f(x) dx = 1$ . Thus the source distribution $\mu$ is a complicated Gaussian mixture distribution restricted on $\Omega$ . Then by mirror reflection in Sec. 3.2, we obtain the complex density function which is defined on $[-1,1]^3$ and satisfies the periodic boundary condition. + +We directly use the FFT-OT algorithm Alg. 1 to solve the linearized Monge-Ampère equation. With the approximation error threshold $\varepsilon = 1.0 \times 10^{-6}$ and the resolution $256 \times 256 \times 256$ , the running time for our FFT-OT algorithm with double precision on GPU is less than 175 seconds. The conventional convex geometry based algorithm for 3D optimal transportation Levy (2015) can neither handle such large data sets nor be implemented on GPUs. It can only compute OT map with resolution no greater than $100 \times 100 \times 100$ on our system, which takes about 2700 seconds. When handling problem with $128 \times 128 \times 128$ resolution, our FFT-OT consumes about + +![](images/3bdbb91d57bc03d187391b82d1f4bbac774ffb0ed2c127ee3cfab9ecf8513c36.jpg) +Figure 1: Convergence Analysis. + +20.3 seconds, which is $130 \times$ faster than the power diagram based method Levy (2015). + +Fig. 1 shows the approximation error for the above Gaussian mixture density with respect to iterations, namely $\log \| f - \rho_n\| _2^2$ . Our algorithm does converge linearly and the result is consistent with the prediction Eqn. (13) in Thm. 3. Therefore, this experiment validates the theorem. + +# 4.2 3D ADAPTIVE SAMPLING + +Generating random samples matching a given density function plays an essential role in the applications like Monte-Carlo integration or stippling. Efficiently obtaining high quality samples is still an on-going research topic Bauer et al. (2015); Perrier et al. (2018). And optimal transportation has been successfully applied for generating high quality 2D samples de Goes et al. (2012); Nader & Guennebaud (2018). Most of the current research focuses on generating 2D samples fitting the given density function. Here we apply the proposed 3D FFT-OT method to generate high quality 3D samples according to the given complex density functions. To the best of our knowledge, it is the first work that uses OT to sample from 3D density functions. + +Suppose the source probability distribution $d\mu (x) = f(x)dx$ is defined on $\Omega = [0,1]^3$ with $\mu (\Omega) = 1$ . The target distribution $d\nu (y) = dy$ is the uniform distribution. We use the FFT-OT algorithm Alg. 1 to compute the OT map $T:\Omega \to \Omega ,T_{\#}\mu = \nu$ . The domain is tessellated to a $256\times 256\times 256$ grid. For each $x_{ijk},i,j,k\in \{0,1,\ldots ,255\}$ , the image $T(x_{ijk})$ can be obtained. We use $\{T(x_{ijk})\}$ as vertices to compute the Delaunay triangulation of $\Omega$ . Then representing the OT map $T:(\Omega ,\mu)\rightarrow (\Omega ,\nu)$ as a piecewise linear map, the restriction of $T$ on each tetrahedron is a linear map. Then the inverse OT map $T^{-1}:(\Omega ,\nu)\to (\Omega ,\mu)$ is also a piecewise linear map. Namely, given a grid point $y_{mnl}$ , we can find a tetrahedron containing it. Suppose the vertices of the tetrahedron are $\{T(x_i),T(x_j),T(x_k),T(x_l)\}$ , then $y_{mnl}$ is computed as + +$$ +y _ {m n l} = \lambda_ {i} T (x _ {i}) + \lambda_ {j} T (x _ {j}) + \lambda_ {k} T (x _ {k}) + \lambda_ {l} T (x _ {l}), +$$ + +![](images/9e23e79754394ea47c674701a280bdd55feaa52de2d09dfbac5d654f3479e131.jpg) + +![](images/6c4973590e35572795f31cbf0ccd3c7259833b41ad4e2dbbced9a255031e0226.jpg) +(a) Density + +![](images/991c27da91d2b9a35751811c8d8b66d71b4f0b1d623541b6f4a316a83e509cc3.jpg) + +![](images/1c460ddc7081f3eddd7743544cfbccc8449855610c14e1204499168b0c73fe58.jpg) +(b) Rejection + +![](images/42274fe3238f11b799d712596f55048296cca366c58c386d20c9588fa5a82ca7.jpg) + +![](images/6393df53e6e6e271b41246b6537f60061ba7c4fb93111daf4b4d0b10c4620dfc.jpg) +(c) MH +Figure 2: 3D density function sampling. (a) The density functions in a slice. The slices in each row come from two different density functions. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with $T^{-1}$ . (f) The sampling results by mapping the grid centers back with $T^{-1}$ . The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. + +![](images/1386ac29405621587ac210b503a5dea958f573ac4514d9efc83e5c0d92511114.jpg) + +![](images/eff55229582a2e4bb9d8e713da05b6ad53348e1f5cfef7d1f2838fb12460df68.jpg) +(d) Slice + +![](images/d85bc0ba5e2c8b1233922b11ac9edf9eca3bde735bd03384e7b6bf4159723282.jpg) + +![](images/8e2babcbb74fefc8012dd3da3d63874b37804f4626866aa9648c765bad8df38a.jpg) +(e) Ours-R + +![](images/12b78ae2688e92da4208cb0e7c2c9586e51e58ef5e36438feac9884a3758799f.jpg) + +![](images/8a65f5c0f7af9a0785980ddfcfbb259e088777602f0ed19e24504b5b81ae417e.jpg) +(f) Ours-G + +where the non-negative barycenter coordinates satisfy $\lambda_{i} + \lambda_{j} + \lambda_{k} + \lambda_{l} = 1$ . Then the image of the inverse OT map is given by + +$$ +T ^ {- 1} \left(y _ {m n l}\right) = \lambda_ {i} x _ {i} + \lambda_ {j} x _ {j} + \lambda_ {k} x _ {k} + \lambda_ {l} x _ {l}. \tag {20} +$$ + +We generate random samples $\{y_k\}$ according to the uniform distribution $\nu$ on $\Omega$ , then their images $\{T^{-1}(y_k)\}$ are the desired random samples following the distribution $\mu$ . + +In our experiment, we use the same Gaussian mixture settings of the density function as Sec. 4.1. Fig. 2 visualizes the generated samples. We randomly pick the $k$ -th slice along the $z$ -direction from the discretized volume, draw the source density function on this slice, and use pixel intensity to represent the density in Fig. 2(a). (i) We uniformly generate $100k$ random samples $\{y_k\} \subset \Omega$ , and obtain the desired random samples by applying the inverse OT map $\{T^{-1}(y_k)\}$ . (ii) We also set $\{y_k\}$ as the grid centers of $\Omega$ and obtain the corresponding samples of the desired distribution $\mu$ . The samples around the $k$ -th slice of both sampling strategies are plotted in Fig. 2(e) and Fig. 2(f). + +By visual comparison, it is obvious that the distributions of Fig. 2(e) and Fig. 2(f) are consistent with the density function in Fig. 2(a). The consistency of the boundary of Fig. 2(e) and (f) and Fig. 2(a) also verifies the obliqueness boundary condition of the Monge-Ampère equation. To further show the performance of the proposed method, we compare it with the classical sampling methods, namely rejection sampling, the Metropolis-Hastings algorithm Bishop (2006) and the slice sampling Neal (2003), shown in Fig. 2(b), Fig. 2(c) and Fig. 2(d). To quantitatively compare the sampling results, we use the Chi-square goodness-of-fit test, which firstly groups the data and then computes the $L^2$ norm of the difference between the actual number of observations in each group and the expected number of observations. In our experiment, we set the group number to $64 \times 64 \times 64$ and use 500K samples to make the comparison. The corresponding $L^2$ norm of each method is shown in the top-right of the corresponding figure. We can see that the both sampling strategies of our method give smaller scores than the classical ones. + +# 4.3 VOLUMETRIC MAGNIFIER + +In reality, physical magnifiers can only magnify planar images. In medical image processing, it is highly desirable to magnify certain regions of the 3D MRIs or CT images. Our algorithm can address such requests with the user prescribed region of interest (ROI) and magnifying factor. Suppose the ROI is a symmetric region with the center $(\bar{x},\bar{y},\bar{z})\in \Omega$ and the radius $\sigma_x,\sigma_y,\sigma_z$ in different directions. The density function $f$ of the source measure $\mu$ is defined as + +$$ +f (x, y, z) = 0. 5 + 0. 5 e ^ {- ((x - \bar {x}) ^ {2} / 2 \sigma_ {x} ^ {2} + (y - \bar {y}) ^ {2} / 2 \sigma_ {y} ^ {2} + (z - \bar {z}) ^ {2} / 2 \sigma_ {z} ^ {2})} +$$ + +We compute OT map $T: (\Omega, \mu) \to (\Omega, \nu)$ , where $\nu$ is the uniform distribution. Similar to the method in 3D adaptive sampling, we compute the Delaunay triangulation of the images $\{T(x_{ijk})\}$ , then the OT map $T$ is represented as a piecewise linear map. The inverse optimal transportation map + +![](images/9617a55da512e830fdb2957b053afbf67cde99d45ee37404750cdf5b376f62f5.jpg) +Figure 3: The volume magnifier of an aneurysm. The first column shows the original volumetric data, and the last three columns give the magnified data from the same viewpoints with different magnifying ratios. The yellow circle denotes the ROI/aneurysm. To obtain the results, we set $\sigma = \sigma_{x} = \sigma_{y} = \sigma_{z}$ , and they are 0.83, 0.75 and 0.5 respectively. + +![](images/69a02b53691bc471fb821d5c6db1f725236ce6031bb0554dec9901a519abf12c.jpg) + +![](images/386aab8fb7671eec7accf9774320c70703684277bb174009e9564739ebd35a58.jpg) + +![](images/f7423a5b99e94f5d341b8159bc08750b17410a0ccaf0730d033af287440fc51a.jpg) + +![](images/fdcd37e360c4edc5d7205291a179e64e74a004e8852cee7f98b9ecdc15549027.jpg) + +![](images/d3a86e9df0f9fb6cc6fd84a90e8d1778a943ac75a38ee32c4e3d8969df70c85c.jpg) + +![](images/ebdff74f3bbc7c0e5b13f7d1bf88838e5fe7f7443c9a08fbce7813d4e1a0a048.jpg) + +![](images/3a119ac03a5c3ee978ad5e6cd7f6c4ada4884521e6cfa58455f65ee717b8c464.jpg) + +![](images/b5c18de141438767f08b957306c042f2c43ef845f677c23d259a0301c1abc03b.jpg) +Figure 4: The volume magnifier of the knee. The first row gives the original volumetric data with different ROIs denoted by the blue boxes from different viewpoints, and the second row shows the corresponding magnified results. In the experiments we set $\sigma_{x} = \sigma_{y} = \sigma_{z} = 0.75$ . + +![](images/7b8683c61289f9240fae83afa24973daf767edbc881a65835aed49339dc711b1.jpg) + +![](images/5e2ab732a05ed454bef976b47f9fba3031bd402970ee1fba72556b924a40eb13.jpg) + +![](images/6447c7ba8e3eff44e23870d8d42c9cf37c5a1d7ecfa3d6fa64a0ce607cbb3218.jpg) + +$T^{-1}:(\Omega ,\nu)\to (\Omega ,\mu)$ is also piecewise linear. For each grid point $y_{mnl}\in \Omega$ we use Eqn. (20) to find its pre-image. Similarly, its corresponding intensity $I_{mnl}$ is computed by linear interpolation. Then we obtain the new volumetric data $\{I_{mnl}\}$ with the magnified ROI and visualize the result with Voreen Meyer-Spradow et al. (2009). + +Fig. 3 demonstrates our volumetric magnifier by magnifying an aneurysm on blood vessel Hansen & Johnson (2004). We choose the aneurysm region as the ROI. The first column gives the snapshot of the blood vessel, and the yellow circle denotes the location of the aneurysm. The last three columns show the magnified aneurysm with different magnifying ratio from the same viewpoints. Moreover, we show the magnified volumetric knee from different viewpoints with different ROIs denoted by the blue boxes in Fig. 4. Our method only magnifies the ROIs and keeps other regions unchanged. Compared with the traditional method requiring tedious zoom in/out, our method only magnifies the ROI region and keeps the whole subject in the field of view, which enables doctors to visualize the overall anatomy while scrutinize detailed anatomical structure at the same time. + +# 5 CONCLUSION + +In this paper, we propose the FFT-OT method to solve the optimal transportation problem. According to the Brenier theory, under the quadratic distance cost, finding the solution to the OT problem is equivalent to solving the Monge-Ampère equation, which can be linearized as a sequence of variant coefficient elliptic PDEs. Later, the variant coefficient PDEs are approximated by constant coefficient PDEs and solved by Fast Fourier Transformation. We also prove that the proposed method converges linearly. Experiments on volumetric data show that the FFT-OT can be used to sample from complex 3D density functions and magnify the volumetric data in medical images. + +# ACKNOWLEDGEMENT + +This research was partially supported by National Key R&D Program of China 2021YFA1003003 and NSFC No. 61936002, T2225012. This work was also partially supported by NIH 3R01LM012434-05S1, 1R21EB029733-01A1, NSF FAIN-2115095 and NSF CMMI-1762287. + +# REFERENCES + +Mokhtar Z. Alaya, Maxime Berar, Gilles Gasso, and Alain Rakotomamonjy. Screening sinkhorn algorithm for regularized optimal transport. In Advances in Neural Information Processing Systems 32, 2019. +Jose I. Aliaga, Ernesto Dufrechou, Pablo Ezzatti, and Enrique S. Quintana-Orti. An efficient gpu version of the preconditioned gmres method. The Journal of Supercomputing, 75, 2019. +Jason Altschuler, Jonathan Niles-Weed, and Philippe Rigollet. Near-linear time approximation algorithms for optimal transport via sinkhorn iteration. In Advances in Neural Information Processing Systems 30, 2017. +Jason Altschuler, Francis Bach, Alessandro Rudi, and Jonathan Niles-Weed. Massively scalable sinkhorn distances via the nystrom method. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper/2019/file/f55cadb97eaff2ba1980e001b0bd9842-Paper.pdf. +Dongsheng An, Yang Guo, Na Lei, Zhongxuan Luo, Shing-Tung Yau, and Xianfeng Gu. Ae-ot: A new generative model based on extended semi-discrete optimal transport. In International Conference on Learning Representations, 2020. +Dongsheng An, Na Lei, and Xianfeng Gu. Efficient optimal transport algorithm by accelerated gradient descent. In The Thirty-Sixth AAAI Conference on Artificial Intelligence (AAAI), 2022. +Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In ICML, pp. 214-223, 2017. +F. Aurenhammer, F. Hoffmann, and B. Aronov. Minkowski-type theorems and least-squares clustering. Algorithmica, 1998. +Martin Bauer, Sarang Joshi, and Klas Modin. Diffeomorphic density matching by optimal information transport. SIAM Journal on Imaging Sciences, 8, 2015. +J.D. Benamou, Y. Brenier, and K. Guittet. The Monge-Kantorovitch mass transfer and its computational fluid mechanics formulation. International Journal for Numerical Methods in Fluids, 2002. +Jean-David Benamou, Brittany D. Froese, and Adam M. Oberman. Numerical solution of the optimal transportation problem using the monge-ampère equation. J. Comput. Phys, 2014. +Christopher M. Bishop. Pattern Recognition and Machine Learning. Springer, 2006. +Y. Brenier. Polar decomposition and increasing rearrangement of vector fields. C. R. Acad. Sci. Paris Sr. I Math., 305(19):805-808, 1987. +Y. Brenier. Polar factorization and monotone rearrangement of vector-valued functions. Comm. Pure Appl. Math., 44(4):375-417, 1991. +Dario Cordero-Erausquin. Sur le transport de mesures periodiques monotone maps preserving periodic measures. Comptes Rendus de l'Académie des Sciences - Series I - Mathematics, 329: 199-202, 1999. +N. Courty, R. Flamary, D. Tuia, and A. Rakotomamonjy. Optimal transport for domain adaptation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 39(9):1853-1865, 2017. + +Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transportation distances. In International Conference on Neural Information Processing Systems, 2013. +F. de Goes, K. Breeden, V. Ostromoukhov, and M. Desbrun. Blue noise through optimal transport. ACM Trans. Graph. (SIGGRAPH Asia), 31, 2012. +Philippe Delanoë. Classical solvability in dimension two of the second boundary-value problem associated with the Monge-Ampère operator. Annales de l'I.H.P. Analyse non linéaire, 8(5): 443-457, 1991. +Pavel Dvurechensky, Alexander Gasnikov, and Alexey Kroshnin. Computational optimal transport: Complexity by accelerated gradient descent is better than by sinkhorn's algorithm. In Proceedings of the 35th International Conference on Machine Learning. PMLR, 2018. +Suli Endre. Lecture Notes on Finite Element Methods for Partial Differential Equations. University of Oxford, 2020. +David Xianfeng Gu, Feng Luo, Jian Sun, and Shing-Tung Yau. Variational principles for minkowski type problems, discrete optimal transport, and discrete monge-ampère equations. *Asian Journal of Mathematics*, 2016. +Charles D. Hansen and Chris R. Johnson. Visualization Handbook. Academic Press, 2004. +Jun Kitagawa, Quentin Mérigot, and Boris Thibert. Convergence of a newton algorithm for semi-discrete optimal transport. Journal of the European Mathematical Society, 2019. +Na Lei and Xianfeng Gu. Fft-ot: A fast algorithm for optimal transportation. In Proceedings of International Conference on Computer Vision (ICCV), 2021. +Bruno Levy. A numerical algorithm for 12 semi-discrete optimal transport in 3d. ESAIM: M2AN, 49 (6):1693-1715, 2015. +Gregorire Loeper and Francesca Rapetti. Numerical solution of the monge-ampère equation by a newton's algorithm. C. R. Acad. Paris, pp. 319-324, 2005. +Robert J. McCann. A convexityprincipleforinteractinggases. Advances in mathematics, 128:153-179, 1997. +Quentin Merigot. A multiscale approach to optimal transport. Computer Graphics Forum., 2011. +Jennis Meyer-Spradow, Timo Ropinski, Jörg Mensmann, and Klaus H. Hinrichs. Voreen: A rapid-prototyping environment for ray-casting-based volume visualizations. IEEE Computer Graphics and Applications, 2009. +Georges Nader and Gael Guennebaud. Instant transport maps on 2d grids. ACM Trans. Graph., 37 (6), 2018. +Radford M. Neal. Slice sampling. The Annals of Statistics, 2003. +Nicolas Papadakis, Gabriel Peyre, and Edouard Oudet. Optimal transport with proximal splitting. SIAM Journal on Imaging Sciences, 2014. +Hélène Perrier, David Coeurjolly, Feng Xie, Matt Pharr, Pat Hanrahan, and VictorOstromoukhov. Sequences with low-discrepancy blue-noise 2-d projections. Computer Graphics Forum, 2018. +Gabriel Peyre and Marco Cuturi. Computational optimal transport. Found. Trends Mach. Learn., 11 (5-6):355-607, 2019. +Yousef Saad. Iterative Methods For Sparse Linear Systems. Society of Industrial and Applied Mathematics, 2003. +Filippo Santambrogio. Optimal Transport for Applied Mathematicians. Springer, 2015. +Louis-Philippe Saumier, Martial Agueh, and Boualem Khouider. An efficient numerical algorithm for the $l^2$ optimal transport problem with periodic densities. IMA Journal of Applied Mathematics, 80:135-157, 2013. + +Yuliy Schwartzburg, Romain Testuz, Andrea Tagliasacchi, and Mark Pauly. High-contrast computational caustic design. ACM Trans. Graph., 33(4), July 2014. ISSN 0730-0301. doi: 10.1145/2601097.2601200. URL https://doi.org/10.1145/2601097.2601200. +Justin Solomon, Fernando de Goes, Gabriel PeyrÅ, Marco Cuturi, Adrian Butscher, Andy Nguyen, Tao Du, and Leonidas Guibas. Convolutional Wasserstein distances: Efficient optimal transportation on geometric domains. ACM Transactions on Graphics (TOG), 2015. +Kehua Su, Wei Chen, Na Lei, Junwei Zhang, Kun Qian, and Xianfeng Gu. Volume preserving mesh parameterization based on optimal mass transportation. Comput. Aided Des., 82:42-56, 2017. +Ilya Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Schoelkopf. Wasserstein auto-encoders. In ICLR, 2018. +Cédric Villani. Topics in Optimal transportation. AMS, 2003. +Cédric Villani. Optimal transport: old and new, volume 338. Springer Science & Business Media, 2008. + +# A RELATED WORK + +There is a huge literature about optimal transportation. Here we will only briefly review the most related works. For detailed reviews, we refer readers to Santambrogio (2015); Peyre & Cuturi (2019). + +The first type of algorithms is based on the Kantorovich theory. When both the input and output domains are Dirac masses, the Kantorovich problem can be treated as a standard linear programming (LP) task. In order to tackle large data sets, Cuturi (2013) adds an entropic regularizer to the original LP problem and the regularized problem can be quickly solved by the Sinkhorn algorithm. Recently, various algorithms have been proposed to further accelerate the computation by improving the efficiency of matrix-vector multiplications, including the Greenkhorn Altschuler et al. (2017), Sreenkhorn Alaya et al. (2019) and the NYS-SINK Altschuler et al. (2019) algorithms. Dvurechensky et al. Dvurechensky et al. (2018) also propose the adaptive primal-dual accelerated gradient descent algorithm (APDAGD) to solve the discrete OT problem. An et al. An et al. (2022) compute the approximate OT plan by smoothing the dual Kantorovich problem and solving it with the FISTA method. This kind of methods have limitations: (i) they only give transport plans and cannot produce the bijective transportation maps; and (ii) the computational complexity is too high to apply them in the scenarios with huge number of samples. + +The second type of algorithms is based on the Brenier theory Brenier (1987) and its intrinsic connection with convex geometry Gu et al. (2016). The semi-discrete OT algorithm proposed in Aurenhammer et al. (1998) finds the transport map between a continuous distribution and a discrete measure via a variational approach by dynamically constructing the power diagrams. Its efficiency can be further improved Levy (2015); Merigot (2011) by the multi-resolution strategy. The algorithms proposed in Kitagawa et al. (2019); Su et al. (2017) also improve the efficiency by applying the Newton's method. When both the source and target measures are continuous, some interpolation methods are necessary Schwartzburg et al. (2014). The major drawback of this type of algorithms is the high computational complexity of constructing the dynamic power diagram, which prevents them from handling high dimensional tasks. For example, for the 3D OT problems, these algorithms usually run very slow. + +The third type of algorithms is based on computational fluid dynamics Benamou et al. (2002); Papadakis et al. (2014). These methods aim at finding a special temporal-spacial flow field that transports the initial source density to the target density with the minimal total kinetic energy. Then the diffeomorphism induced by the flow gives the optimal transport map under the quadratic Euclidean distance cost. However, this kind of algorithms are difficult to extend to high dimensional space. + +The fourth type of algorithms directly solve the Monge-Ampère equation using numerical methods. Loeper and Rapetti Loeper & Rapetti (2005) propose to solve the linearized Monge-Ampère equation defined on a flat torus in each iteration. Its corresponding variant coefficient elliptic PDE is converted to a positive definite linear system using the finite-difference scheme, which can be solved by the BiCG algorithm Endre (2020). Benamou et al. Benamou et al. (2014) propose to solve the linearized Monge-Ampère on more general domains using Newton's method. Nader and Guennebaud Nader & Guennebaud (2018) apply the similar discretization strategy and solve the Monge-Ampère equation by conjugate gradient method. Saumier et al. Saumier et al. (2013) propose to solve the linearized Monge-Ampère equation using FFT. In each iteration the elliptic PDE with spacial and temporal variant coefficients is converted to a group of linear equations in the frequency domain, which is solved by the GMRES algorithm. Although the GMRES algorithm can be implemented on GPUs Aliaga et al. (2019), there is no available open source code. The work in Saumier et al. (2013) focuses on periodic boundary condition, but this our proposed work focuses on general second boundary condition; the work in Saumier et al. (2013) concerns planar OT maps, ours emphasizes on volumetric OT maps, which has higher complexity. The work in Saumier et al. (2013) can handle more general target measures, the proposed work currently only deals with the Lebesgue target measure. Nevertheless, the current work can be directly generalized to handle general target measures as well. Lei and Gu Lei & Gu (2021) use the fixed point method to compute the 2-dimensional OT problem based on FFT, but it cannot be extended to solve the 3-dimensional problems. + +In this work, we combine the idea of linearizing the Monge-Ampère equation Loeper & Rapetti (2005) and the idea of FFT Saumier et al. (2013). The key novelty of our proposed method is to use the mean linearized Monge-Ampère operator Eqn. (12) to replace the conventional linearized + +Monge-Ampere operator Eqn. (10). This replacement allows the algorithm to be implemented on GPUs and makes the algorithm hundreds of times faster. In the following, we compute the 3-dimensional optimal transport problem by applying the proposed algorithm. Our method also runs more than $100 \times$ faster than the convex geometry based method Levy (2015). + +# B APPENDIX THEORY + +In the section, we give the detailed proofs for several lemmas and theorems. Some of them are well known in the Monge-Ampère PDE field and the applied mathematics field, we include them for the completeness. + +# B.1 EXISTENCE OF THE SOLUTION TO THE TIME DEPENDENT MONGE-AMPERE EQNUATION + +Let $\mathbb{T}^n = \mathbb{R}^n / \mathbb{Z}^n$ be the $n$ -dimensional flat torus. Below we sometimes identify it with $\Omega = [0,1]^n$ and assume all data are periodic. The existence and regularity of solutions to the Monge-Ampère equation are given by the following theorem, + +Theorem 5. Suppose a positive density function $f: \Omega \to \mathbb{R}$ is defined on $\Omega = [0,1]^n$ , such that $\int_{\Omega} f(x) dx = 1$ , and $f \in C^{\alpha}(\Omega)$ , then the solution $u: \Omega \times [0,1]$ to the time-dependent Monge-Ampère equation + +$$ +\det D _ {x} ^ {2} u (x, t) = (1 - t) + t f (x), \quad \nabla_ {x} u (x, t) (\Omega) = \Omega \tag {21} +$$ + +exists and is unique up to a constant. Furthermore, there exist constants $0 < \lambda < \Lambda$ , such that + +$$ +\lambda \sum_ {p = 1} ^ {n} \xi_ {p} ^ {2} \leq \sum_ {p, q = 1} ^ {n} u ^ {p q} (x, t) \xi_ {p} \xi_ {q} \leq \Lambda \sum_ {p = 1} ^ {n} \xi_ {p} ^ {2}, \quad \forall \xi \in \mathbb {R} ^ {n}, \forall (x, t) \in \Omega \times [ 0, 1 ]. \tag {22} +$$ + +We refer readers to Cordero-Erasquin (1999) for detailed proof. + +Weak Solution In practice, we compute the weak solution of the linearized Monge-Ampère Eqn. (6) using numerical methods. We first rewrite the differential operator to a divergence form, then define a bi-linear form. + +Since $(u^{pq}(x,t))$ is the adjoint matrix of $D_x^2 u(x,t)$ , by direct computation, we obtain + +$$ +\sum_ {p = 1} ^ {n} \partial_ {p} u ^ {p q} (x, t) = 0, \quad \forall (x, t) \in \Omega \times [ 0, 1 ], \quad \forall q = 1, \dots , n. \tag {23} +$$ + +so Eqn. (6) can be converted into the divergence form: + +$$ +\sum_ {p = 1} ^ {n} \partial_ {p} \left(\sum_ {q = 1} ^ {n} u ^ {p q} \partial_ {q} v\right) = \sum_ {p, q = 1} ^ {n} u ^ {p q} \partial_ {p} \partial_ {q} v + \sum_ {q = 1} ^ {n} \left(\sum_ {p = 1} ^ {n} \partial_ {p} u ^ {p q}\right) \partial_ {q} v = \sum_ {p, q = 1} ^ {n} u ^ {p q} \partial_ {p} \partial_ {q} v, +$$ + +we obtain + +$$ +\sum_ {p = 1} ^ {n} \partial_ {p} \left(\sum_ {q = 1} ^ {n} u ^ {p q} (x, t) \partial_ {q} v (x, t)\right) = f (x) - 1. \tag {24} +$$ + +with Neumann boundary condition + +$$ +\frac {\partial v (x , t)}{\partial \mathbf {n}} = 0, \quad \forall (x, t) \in \partial \Omega \times [ 0, 1 ]. \tag {25} +$$ + +For any $w\in H^{1}(\Omega)$ , by differentiation of product, we obtain + +$$ +\sum_ {p = 1} ^ {n} \partial_ {p} \left(\sum_ {q = 1} ^ {n} u ^ {p q} \partial_ {q} v\right) w + \sum_ {p = 1} ^ {n} \left(\sum_ {q = 1} ^ {n} u ^ {p q} \partial_ {q} v\right) \partial_ {p} w = \sum_ {p = 1} ^ {n} \partial_ {p} \left[ \left(\sum_ {q = 1} ^ {n} u ^ {p q} \partial_ {q} v\right) w \right] +$$ + +by integrating both sides, and from the fact that $v$ satisfies the Neumann boundary condition, we deduce + +$$ +\int_ {\Omega} \sum_ {p = 1} ^ {n} \partial_ {p} \left(\sum_ {q = 1} ^ {n} u ^ {p q} \partial_ {q} v\right) w + \int_ {\Omega} \sum_ {p, q = 1} ^ {n} u ^ {p q} \partial_ {q} v \partial_ {p} w = \int_ {\partial \Omega} \sum_ {p = 1} ^ {n} \left(\sum_ {q = 1} ^ {n} u ^ {p q} \partial_ {q} v\right) w = 0. \tag {26} +$$ + +For any fixed time $t \in [0,1]$ , by the divergence form, we can construct a bilinear form $a: H^1(\Omega) \times H^1(\Omega)$ and a linear form $l: H^1(\Omega) \to \mathbb{R}$ , + +$$ +a (v, w) = \sum_ {p, q = 1} ^ {n} \int_ {\Omega} u ^ {p q} \partial_ {p} v \partial_ {q} w, \quad l (w) = - \int_ {\Omega} (f - 1) w d x. \tag {27} +$$ + +A weak solution to Eqn. (24) is a function $v \in H^{1}(\Omega)$ , such that + +$$ +a (v, w) = l (w), \quad \forall w \in H ^ {1} (\Omega). \tag {28} +$$ + +By the uniform ellipticity Eqn. (22), the Lax-Milgram theorem Endre (2020) shows the existence of the weak solution. + +# B.2 DISCRETE LINEARIZED MONGE-AMPERE EQUATION SOLVABILITY + +Galerkin Method In practice, we construct a triangulation $\mathcal{T}$ of $\Omega$ , such that the ratio between the diameter and inscribe-sphere radius of each simplex is bounded, and variation of the diameters of all the simplexes is small. We call such kind of $\mathcal{T}$ a quasi-uniform triangulation, and denote the largest diameter as $h$ . For each vertex $v_{i} \in \mathcal{T}$ , we construct a piecewise linear base function $\varphi_{i}$ , such that $\varphi_{i}$ is linear on each triangle, $\varphi_{i}(v_{j})$ is $\delta_{ij}$ . We define a finite dimensional subspace $V_{h} \subset H^{1}(\Omega)$ , + +$$ +V _ {h} := \left\{v _ {h} (x) := \sum_ {v _ {i} \in \mathcal {T}} \lambda_ {i} \varphi_ {i} (x), \lambda_ {i} \in \mathbb {R} \right\}. +$$ + +Given a function $u \in H^{1}(\Omega)$ , we use $u_{h} \in V_{h}$ to denote its approximation in $V_{h}$ . Furthermore, $u_{h} = \sum_{i} \lambda_{i} \varphi_{i}$ , we also use $u_{h}$ to represent the coefficient vector $(\lambda_1, \lambda_2, \dots, \lambda_k)^T$ depending on the context. The weak solution Eqn. (28) to the Monge-Ampère equation (6) is equivalent to find a $v \in H^{1}(\Omega)$ , such that $a(v, w) = l(w)$ for all $w \in H^{1}(\Omega)$ . In discrete cases, we want to find $v_{h} \in V_{h}$ , such that + +$$ +a \left(v _ {h}, w _ {h}\right) = l \left(w _ {h}\right), \quad \forall w _ {h} \in V _ {h}. \tag {29} +$$ + +Eqn. (29) is equivalent to the linear system, + +$$ +\left( \begin{array}{c c c c} a \left(\varphi_ {1}, \varphi_ {1}\right) & a \left(\varphi_ {2}, \varphi_ {1}\right) & \dots & a \left(\varphi_ {N}, \varphi_ {1}\right) \\ a \left(\varphi_ {1}, \varphi_ {2}\right) & a \left(\varphi_ {2}, \varphi_ {2}\right) & \dots & a \left(\varphi_ {N}, \varphi_ {2}\right) \\ \vdots & \vdots & & \vdots \\ a \left(\varphi_ {1}, \varphi_ {N}\right) & a \left(\varphi_ {2}, \varphi_ {N}\right) & \dots & a \left(\varphi_ {N}, \varphi_ {N}\right) \end{array} \right) \left( \begin{array}{c} \lambda_ {1} \\ \lambda_ {2} \\ \vdots \\ \lambda_ {N} \end{array} \right) = \left( \begin{array}{c} l \left(\varphi_ {1}\right) \\ l \left(\varphi_ {2}\right) \\ \vdots \\ l \left(\varphi_ {N}\right) \end{array} \right) \tag {30} +$$ + +From the weak solution to the linearized Monge-Ampère equation (10), we obtain the linear system Eqn. (30). We denote the stiffness matrix $A = (a(\varphi_i, \varphi_j))$ . By the uniform ellipticity Eqn. (22), and $V_h \subset H^1(\Omega)$ + +$$ +a (v, v) \geq \lambda \| \nabla v \| _ {L ^ {2} (\Omega)} ^ {2} +$$ + +Assume $\int_{\Omega} v dx = 0$ , by Poincaré inequality, + +$$ +\| \nabla v \| _ {L ^ {2} (\Omega)} ^ {2} \geq C _ {1} (\Omega) \| v \| _ {L} ^ {2} (\Omega), \quad \forall v \in H ^ {1} (\Omega), \int_ {\Omega} v d x = 0, +$$ + +where the constant $C_1(\Omega)$ depends on $\Omega$ . Combine the above two inequalities, we obtain + +$$ +a (v, v) \geq c \| v \| _ {L ^ {2} (\Omega)} ^ {2}, \quad \forall v \in H ^ {1} (\Omega), \int_ {\Omega} v d x = 0. \tag {31} +$$ + +Similarly, By the uniform ellipticity Eqn. 22, and $V_{h}\subset H^{1}(\Omega)$ + +$$ +a (v, v) \leq \Lambda \| \nabla v \| _ {L ^ {2} (\Omega)} ^ {2} +$$ + +For linear finite element and quasi-uniform triangulation, we have the inverse Poincaré inequality, + +$$ +\left\| \nabla v _ {h} \right\| _ {L ^ {2}} ^ {2} \leq C _ {2} (\Omega) h ^ {- 1} \left\| v _ {h} \right\| _ {L ^ {2}} ^ {2}. +$$ + +where $h$ is the diameter of each element. Combine the above two inequalities, we obtain + +$$ +a \left(v _ {h}, v _ {h}\right) \leq C \| v _ {h} \| _ {L ^ {2} (\Omega)} ^ {2}, \quad \forall v _ {h} \in V _ {h}. \tag {32} +$$ + +By combining the inequalities Eqn. (31) and Eqn. (32), we obtain + +$$ +\frac {1}{C _ {3}} \| v _ {h} \| _ {L ^ {2} (\Omega)} ^ {2} \leq a (v _ {h}, v _ {h}) \leq C _ {3} \| v _ {h} \| _ {L ^ {2} (\Omega)} ^ {2}, \quad \forall v _ {h} \in V _ {h}, \int_ {\Omega} v _ {h} = 0, \tag {33} +$$ + +where $C_3 > 1$ is a constant. Suppose $v_{h} = \sum_{i = 1}^{n}\xi_{i}\varphi_{i}$ , then + +$$ +\| v _ {h} \| _ {L ^ {2} (\Omega)} ^ {2} = \int_ {\Omega} v _ {h} ^ {2} d x = \sum_ {i, j = 1} ^ {n} \xi_ {i} \xi_ {j} \int_ {\Omega} \varphi_ {i} (x) \varphi_ {j} (x) d x = \xi^ {T} \Phi \xi , +$$ + +where $\xi = (\xi_{i})$ and the matrix $\Phi = \left(\int_{\Omega}\varphi_{i}\varphi_{j}\right)$ is positive definite. Therefore, + +$$ +\frac {1}{C _ {4}} \| \xi \| ^ {2} \leq \xi^ {T} \Phi \xi < C _ {4} \| \xi \| ^ {2}. \tag {34} +$$ + +By $a(v_h,v_h) = \xi^T A\xi$ , combing inequalities Eqn. (33) and Eqn. (34), we obtain + +$$ +\frac {1}{C _ {3} C _ {4}} \| \xi \| ^ {2} \leq \xi^ {T} A \xi \leq C _ {3} C _ {4} \| \xi \| ^ {2}, \quad \forall \xi \in \mathbb {R} ^ {n}, \sum_ {i = 1} ^ {n} \xi_ {i} = 0, \tag {35} +$$ + +where $C_3C_4 > 1$ . This proves the following lemma, + +Lemma 6. By using Galerkin method using linear elements to numerically approximate the weak solution Eqn. (28) to the linearized Monge-Ampère Eqn. (6), if the uniform ellipticity Eqn. (22) holds, and the triangulation $\mathcal{T}$ is quasi-uniform, then the stiffness matrix of the linear system Eqn. (30) is positive definite on the space $\sum_{i=1}^{n} \xi_i = 0$ , + +$$ +\frac {1}{C _ {3} C _ {4}} \| \xi \| ^ {2} \leq \xi^ {T} A \xi \leq C _ {3} C _ {4} \| \xi \| ^ {2}, \quad \forall \xi \in \mathbb {R} ^ {n}, \sum_ {i = 1} ^ {n} \xi_ {i} = 0, \tag {36} +$$ + +where $C_3C_4 > 1$ + +Since the uniform ellipticity Eqn. (22) holds for any time $t \in [0,1]$ , then we obtain + +Corollary 7. By using Galerkin method with linear elements on quasi-uniform triangulations, the linearized Monge-Ampère equation in the continuity method Eqn. (6) always has a solution $v_h \in V_h$ for any $t \in [0,1]$ . + +Please note that the central differential scheme can be treated as Galerkin's method on a special uniform triangulation. Therefore, the above estimates still hold. + +# B.3 CONVERGENCE RATE + +Theorem 8 (main). Given a domain $\Omega \subset \mathbb{R}^n$ , which is a canonical cuboid $\Omega = [-1,1]^n$ , and a positive density function $f:\Omega \to \mathbb{R}$ with the balance condition + +$$ +\int_ {\Omega} f (x) d x = \int_ {\Omega} 1 \cdot d x, +$$ + +suppose the mirror reflection extension Eqn. (14) of $f$ to the flat torus $\tilde{f} : \mathbb{T}^n \to \mathbb{R}$ is $C^\alpha$ , $\alpha \in (0,1)$ , then Monge-Ampère equation, + +$$ +d e t D ^ {2} u (x) = f (x), \quad \nabla u (\Omega) = \Omega +$$ + +can be solved using FFT-OT Algorithm Alg. (1). In particular, one can choose the step length parameter $\tau$ , such that there is a constant $0 < \gamma < 1$ , the approximation error satisfies + +$$ +\left\| f - \rho_ {k + 1} \right\| ^ {2} < C \gamma^ {k}, +$$ + +namely the algorithm has a linear convergence rate. + +Proof. Suppose at the $k + 1$ -th iteration, $\rho_{k + 1} = \operatorname*{det}(I + D^2 u_{k + 1})$ , $\| v_k\| \sim O(\tau^{-1})$ + +$$ +\begin{array}{l} f - \rho_ {k + 1} = f - \det (I + \mathcal {D} ^ {2} u _ {k} + \mathcal {D} ^ {2} v _ {k}) \\ = f - \det (I + \mathcal {D} ^ {2} u _ {k}) - \sum_ {p q} u _ {k} ^ {p q} \partial_ {p} \partial_ {q} v _ {k} + o (\tau^ {- 1}) \\ = \left(f - \rho_ {k}\right) - L _ {k} \left[ v _ {k} \right] + o \left(\tau^ {- 1}\right) \\ \end{array} +$$ + +where $L_{k}[v_{k}] = \sum_{pq}u_{k}^{pq}\partial_{p}\partial_{q}v_{k}$ . Hence by integration by parts Eqn. (27), + +$$ +\begin{array}{l} \left\| f - \rho_ {k + 1} \right\| _ {L ^ {2} (\Omega)} ^ {2} = \left\| f - \rho_ {k} \right\| _ {L ^ {2} (\Omega)} ^ {2} - 2 \int_ {\Omega} L _ {k} [ v _ {k} ] (f - \rho_ {k}) + o (\tau^ {- 1}) \\ = \left\| f - \rho_ {k} \right\| _ {L ^ {2} (\Omega)} ^ {2} + 2 a _ {k} (f - \rho_ {k}, v _ {k}) + o (\tau^ {- 1}) \\ \end{array} +$$ + +where $a_{k}$ is the bilinear form in Eqn.(27). In the discrete case, all functions are in $V_{h}$ , we denote + +$$ +\| u _ {h} \| _ {\Phi} ^ {2} := \| u _ {h} \| _ {L ^ {2} (\Omega)} ^ {2} = u _ {h} ^ {T} \Phi u _ {h}, \quad \| u _ {h} \| ^ {2} := u _ {h} ^ {T} u _ {h}, \quad \| u _ {h} \| _ {A} ^ {2} := u _ {h} ^ {T} A u _ {h}, +$$ + +by the inequality Eqn. (34) and Eqn. 35, + +$$ +\frac {1}{C _ {4}} \| u _ {h} \| ^ {2} \leq \| u _ {h} \| _ {\Phi} ^ {2} \leq C _ {4} \| u _ {h} \| ^ {2}, \quad \frac {1}{C _ {3} C _ {4}} \| u _ {h} \| ^ {2} \leq \| u _ {h} \| _ {A} ^ {2} \leq C _ {3} C _ {4} \| u _ {h} \| ^ {2}. +$$ + +Therefore + +$$ +\left\| f _ {h} - \rho_ {h, k + 1} \right\| _ {\Phi} ^ {2} = \left\| f _ {h} - \rho_ {h, k} \right\| _ {\Phi} ^ {2} - 2 \tau^ {- 1} \left(f - \rho_ {h, k}\right) ^ {T} A _ {k} \bar {A} _ {k} ^ {- 1} \left(f _ {h} - \rho_ {h, k}\right) + o \left(\tau^ {- 1}\right), \tag {37} +$$ + +where $A_{k}$ is the stiffness matrix in Eqn.(30), and $\bar{A}_k$ is the mean stiffness matrix. (By the uniform ellipticity Eqn. (22), the eigen values of the adjoint matrix $(u^{pq})(x,t)$ is uniformly bounded away from zero in the space $\mathcal{H} := \{\xi \in \mathbb{R}^n | \sum_i \xi_i = 0\}$ , so the eigen value of the mean adjoint matrix $\bar{u}^{pq}(t)$ is bounded away from zero in $\mathcal{H}$ . After discretization, the eigen values of $\bar{A}_k$ is strictly positive in $\mathcal{H}$ , hence $\bar{A}_k$ is invertible in $\mathcal{H}$ . In the following discussion, the term $o(\tau^{-1})$ will be ignored.) Remark that the following displayed equation is a scalar + +$$ +\left(f _ {h} - \rho_ {h, k}\right) ^ {T} A _ {k} \bar {A} _ {k} ^ {- 1} (f - \rho_ {h, k}) = \mathrm {t r} \left(\left(f _ {h} - \rho_ {h, k}\right) ^ {T} A _ {k} \bar {A} _ {k} ^ {- 1} (f _ {h} - \rho_ {h, k})\right) +$$ + +Since $A_{k}$ and $\bar{A}_{k}$ are symmetric, positive definite on the space $\sum_{i}\xi_{i} = 0$ , $\| A_k\| _2\leq C_3C_4$ and $\| \bar{A}_k\| _2\leq C_3C_4$ , so are their inverses. Since $A_{n}$ and $\bar{A}_n$ are symmetric, positive definite on the space orthogonal to $(1,1,\ldots ,1)^T$ , by Eqn. (35) and $\| A_k\bar{A}_k^{-1}\| \leq \| A_k\| \| \bar{A}_k^{-1}\|$ , we have + +$$ +\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}} \| f _ {h} - \rho_ {h, k} \| _ {\Phi} ^ {2} \leq \left(f _ {h} - \rho_ {h, k}\right) ^ {T} A _ {k} \bar {A} _ {k} ^ {- 1} (f _ {h} - \rho_ {h, k}). +$$ + +Plug into Eqn. (37), we have + +$$ +\left\| f _ {h} - \rho_ {h, k + 1} \right\| _ {\Phi} ^ {2} \leq \left(1 - \frac {1}{\tau} \frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}}\right) \left\| f _ {h} - \rho_ {h, k} \right\| _ {\Phi} ^ {2} \leq \left(1 - \frac {1}{\tau} \frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}}\right) ^ {k} \left\| f _ {h} - \rho_ {h, 0} \right\| _ {\Phi} ^ {2}. \tag {38} +$$ + +We can choose the step-length $\tau^{-1}$ , such that $\gamma \in (0, 1)$ , where + +$$ +\gamma = 1 - \frac {(n - 1)}{\tau C _ {3} ^ {2} C _ {4} ^ {3}}. +$$ + +Therefore + +$$ +\left\| f _ {h} - \rho_ {h, k + 1} \right\| _ {\Phi} ^ {2} \leq \gamma^ {k} \left\| f _ {h} - \rho_ {h, 0} \right\| _ {\Phi} ^ {2} \leq C _ {4} \gamma^ {k} \left\| f _ {h} - \rho_ {h, 0} \right\| ^ {2}. \tag {39} +$$ + +![](images/9d32cf41dd420f054c911100857edaa1d69208dd0df5a90d593bd716b8f123a5.jpg) + +# B.4 DIFFERENTIAL OPERATOR USING FFT + +By using the Discrete Fourier Transformation, the differential operators can be converted to algebraic operators in the frequency domain. + +Lemma 9. Suppose the discrete function is $u_{i,j,k}$ , with discrete Fourier transformation + +$$ +u _ {i, j, k} = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} e ^ {\sqrt {- 1} \frac {2 \pi m i}{M}} e ^ {\sqrt {- 1} \frac {2 \pi n j}{N}} e ^ {\sqrt {- 1} \frac {2 \pi l k}{L}} +$$ + +then the differential operator using central difference $\partial_i\partial_i u_{i,j,k}$ is given by + +$$ +\begin{array}{l} \partial_ {i} \partial_ {i} u _ {i, j, k} = \frac {1}{h _ {x} ^ {2}} \left(u _ {i + 1, j, k} + u _ {i - 1, j, k} - 2 u _ {i, j, k}\right) \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {2 \left(\cos \frac {2 \pi m}{M} - 1\right)}{h _ {x} ^ {2}} e ^ {\imath \frac {2 \pi m i}{M}} e ^ {\imath \frac {2 \pi n j}{N}} e ^ {\imath \frac {2 \pi l k}{L}} \\ \end{array} +$$ + +where $\iota = \sqrt{-1}$ , and $\partial_i\partial_ju_{i,j,k}$ is given by, + +$$ +\begin{array}{l} \partial_ {i} \partial_ {j} u _ {i, j, k} = \frac {1}{4 h _ {x} h _ {y}} \left(u _ {i + 1, j + 1, k} + u _ {i - 1, j - 1, k} - u _ {i + 1, j - 1, k} - u _ {i - 1, j + 1, k}\right) \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {- \sin \frac {2 \pi m}{M} \sin \frac {2 \pi n}{N}}{h _ {x} h _ {y}} e ^ {\iota \frac {2 \pi m i}{M}} e ^ {\iota \frac {2 \pi n j}{N}} e ^ {\iota \frac {2 \pi l k}{L}} \\ \end{array} +$$ + +Proof. By equations + +$$ +\begin{array}{l} \cos (A + \alpha) + \cos (A - \alpha) - 2 \cos (A) \\ = (\cos A \cos \alpha - \sin A \sin \alpha) + (\cos A \cos \alpha + \sin A \sin \alpha) - 2 \cos A \\ = 2 (\cos \alpha - 1) \cos A \\ \end{array} +$$ + +and + +$$ +\begin{array}{l} \sin (A + \alpha) + \sin (A - \alpha) - 2 \sin (A) \\ = (\sin A \cos \alpha + \cos A \sin \alpha) + (\sin A \cos \alpha - \cos A \sin \alpha) - 2 \cos A \\ = 2 (\cos \alpha - 1) \sin A \\ \end{array} +$$ + +we obtain + +$$ +\frac {1}{h _ {x} ^ {2}} \left[ e ^ {\iota^ {\frac {2 \pi m (i + 1)}{M}}} + e ^ {\iota^ {\frac {2 \pi m (i - 1)}{M}}} - 2 e ^ {\iota^ {\frac {2 \pi m i}{M}}} \right] = \frac {2 \left(\cos \frac {2 \pi m}{M} - 1\right)}{h _ {x} ^ {2}} e ^ {\iota^ {\frac {2 \pi m i}{M}}} +$$ + +by direct computation, we have + +$$ +\begin{array}{l} \partial_ {i} \partial_ {i} u _ {i, j, k} = \frac {1}{h _ {x} ^ {2}} (u _ {i + 1, j, k} + u _ {i - 1, j, k} - 2 u _ {i, j, k}) \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {e ^ {\iota \frac {2 \pi m (i + 1)}{M}} + e ^ {\iota \frac {2 \pi m (i - 1)}{M}} - 2 e ^ {\iota \frac {2 \pi m i}{M}}}{h _ {x} ^ {2}} e ^ {\iota \frac {2 \pi n j}{N}} e ^ {\iota \frac {2 \pi l k}{L}} \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {2 (\cos \frac {2 \pi m}{M} - 1)}{h _ {x} ^ {2}} e ^ {\iota \frac {2 \pi m i}{M}} e ^ {\iota \frac {2 \pi n j}{N}} e ^ {\iota \frac {2 \pi l k}{L}} \\ \end{array} +$$ + +Similarly, by equations + +$$ +\begin{array}{l} \cos (A + \alpha + B + \beta) + \cos (A - \alpha + B - \beta) - \cos (A + \alpha + B - \beta) - \cos (A - \alpha + B + \beta) \\ = \cos (A + B + \alpha + \beta) + \cos (A + B - \alpha - \beta) - \cos (A + B + \alpha - \beta) - \cos (A + B - \alpha + \beta) \\ = 2 \cos (A + B) \cos (\alpha + \beta) - 2 \cos (A + B) \cos (\alpha - \beta) \\ = 2 \cos (A + B) (\cos (\alpha + \beta) - \cos (\alpha - \beta)) \\ = 2 \cos (A + B) (\cos \alpha \cos \beta - \sin \alpha \sin \beta - \cos \alpha \cos \beta - \sin \alpha - \sin \beta) \\ = - 4 \cos (A + B) \sin \alpha \sin \beta \\ \end{array} +$$ + +and + +$$ +\begin{array}{l} \sin (A + \alpha + B + \beta) + \sin (A - \alpha + B - \beta) - \sin (A + \alpha + B - \beta) - \sin (A - \alpha + B + \beta) \\ = \sin (A + B + \alpha + \beta) + \sin (A + B - \alpha - \beta) - \sin (A + B + \alpha - \beta) - \sin (A + B - \alpha + \beta) \\ = 2 \sin (A + B) \cos (\alpha + \beta) - 2 \sin (A + B) \cos (\alpha - \beta) \\ = 2 \sin (A + B) (\cos (\alpha + \beta) - \cos (\alpha - \beta)) \\ = 2 \sin (A + B) (\cos \alpha \cos \beta - \sin \alpha \sin \beta - \cos \alpha \cos \beta - \sin \alpha - \sin \beta) \\ = - 4 \sin (A + B) \sin \alpha \sin \beta \\ \end{array} +$$ + +we deduce the following equation, + +$$ +\begin{array}{l} \partial_ {i} \partial_ {j} u _ {i, j, k} = \frac {1}{4 h _ {x} h _ {y}} \left(u _ {i + 1, j + 1, k} + u _ {i - 1, j - 1, k} - u _ {i + 1, j - 1, k} - u _ {i - 1, j + 1, k}\right) \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {- \sin \frac {2 \pi m}{M} \sin \frac {2 \pi n}{N}}{h _ {x} h _ {y}} e ^ {\iota \frac {2 \pi m i}{M}} e ^ {\iota \frac {2 \pi n j}{N}} e ^ {\iota \frac {2 \pi l k}{L}} \\ \end{array} +$$ + +![](images/128785ac32bd0a8f19798902b6a1d6a700bd904a841a4eb14140d17f13a3b664.jpg) + +Similarly, we have the representations of other differential operators in the frequency domain, + +$$ +\begin{array}{l} \partial_ {j} \partial_ {j} u _ {i, j, k} = \frac {1}{h _ {x} ^ {2}} \left(u _ {i, j + 1, k} + u _ {i, j - 1, k} - 2 u _ {i, j, k}\right) \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {2 (\cos \frac {2 \pi n}{N} - 1)}{h _ {y} ^ {2}} e ^ {\iota \frac {2 \pi m i}{M}} e ^ {\iota \frac {2 \pi n j}{N}} e ^ {\iota \frac {2 \pi l k}{L}} \\ \end{array} +$$ + +$$ +\begin{array}{l} \partial_ {k} \partial_ {k} u _ {i, j, k} = \frac {1}{h _ {z} ^ {2}} \left(u _ {i, j, k + 1} + u _ {i, j, k - 1} - 2 u _ {i, j, k}\right) \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {2 \left(\cos \frac {2 \pi l}{L} - 1\right)}{h _ {z} ^ {2}} e ^ {\iota \frac {2 \pi m i}{M}} e ^ {\iota \frac {2 \pi n j}{N}} e ^ {\iota \frac {2 \pi l k}{L}} \\ \end{array} +$$ + +$$ +\begin{array}{l} \partial_ {j} \partial_ {k} u _ {i, j, k} = \frac {1}{4 h _ {y} h _ {z}} \left(u _ {i, j + 1, k + 1} + u _ {i, j - 1, k - 1} - u _ {i, j + 1, k - 1} - u _ {i, j - 1, k + 1}\right) \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {- \sin \frac {2 \pi n}{N} \sin \frac {2 \pi l}{L}}{h _ {y} h _ {z}} e ^ {\iota \frac {2 \pi m i}{M}} e ^ {\iota \frac {2 \pi n j}{N}} e ^ {\iota \frac {2 \pi l k}{L}} \\ \end{array} +$$ + +$$ +\begin{array}{l} \partial_ {k} \partial_ {i} u _ {i, j, k} = \frac {1}{4 h _ {z} h _ {x}} \left(u _ {i + 1, j, k + 1} + u _ {i - 1, j, k - 1} - u _ {i + 1, j, k - 1} - u _ {i - 1, j, k + 1}\right) \\ = \frac {1}{M N L} \sum_ {m = 0} ^ {M - 1} \sum_ {n = 0} ^ {N - 1} \sum_ {l = 0} ^ {L - 1} \hat {u} _ {m, n, l} \frac {- \sin \frac {2 \pi l}{L} \sin \frac {2 \pi m}{M}}{h _ {z} h _ {x}} e ^ {\iota \frac {2 \pi m i}{M}} e ^ {\iota \frac {2 \pi n j}{N}} e ^ {\iota \frac {2 \pi l k}{L}} \\ \end{array} +$$ + +# C ALGORITHM PIPELINES + +In this section, we give the algorithm pipeline of the FFT-OT in Alg. 1 and the details to solve the costant coefficient elliptic PDE through FFT in Alg. 2. + +# Algorithm 1: FFT-OT + +Input: Domain $\Omega = [-1, 1]^3$ , the source density function $f > 0$ , the target density $g = 1$ , step length $\tau$ , approximation error threshold $\varepsilon$ + +Output: Solution $\frac{1}{2}\| x\|^2 + u_n$ to the Monge-Ampère Eqn. (2) with the corresponding boundary condition. + +Initialize $u_0(x) = 0$ + +while true do + +Compute the Hessian matrix $D^2 u_n(x)$ + +Compute the density function $\rho_{n}(x)\gets \operatorname *{det}(I + D^{2}u_{n}(x))$ + +if $\| f - \rho_n\|_{L_2(\Omega)} < \varepsilon$ then + +Break; + +Compute the adjoint matrix $[H_n^{pq}(x)]\gets \mathrm{Adj}(I + D^2 u_n(x))$ + +Compute the mean adjoint matrix $[H_n^{pq}]$ using Eqn. (11); + +Solve the constant coefficient elliptic PDE (12) using the FFT Solver Alg. 2; + +Update the Brenier potential $u_{n + 1}(x) \gets u_n + \tau v_n$ ; + +![](images/d61c13d938ba3e3e011a9da39cfe736b7b910a39b97e25db82c6206119535e6e.jpg) + +![](images/44e79679a69958eabe685acf804b31adb3d9599663bed582cdc5120bd078deb4.jpg) + +![](images/1e925c9b7f9647892a7ea3fbfddb61e80987d81ba2f6646e4581b685a2fb9402.jpg) + +![](images/fbb2d06d3357e0337f05a41ceb38dde94cda28eb36ddfaed59169e9c28b735c1.jpg) +(a) Density + +![](images/3ae8a214333cab2e75560cf2faed28321156b390f144dbbdac2346938375b192.jpg) + +![](images/7a32bc28f59db7be75eb28365f52fb5978fc247d00945738d29d8b27caf7778c.jpg) + +![](images/e494bc42a1aee6690ee63f3e1884e1f5b88d7d1c5347206571a5bd629f439cae.jpg) + +![](images/4126d843e81326161452112c5f97ad88a01e118bcc4743555b0a982acb664266.jpg) +(b) Rejection + +![](images/1f5a7553399bb362b5bd8eed40380cd780f118852192b67f50de4b54c3f6c612.jpg) + +![](images/b637a1f2ee6ff502245d3ce9ab33b5a84d0d1e67c0550b102ee5658949ea98c9.jpg) + +![](images/8ee81eb856ffcc90253543aece5f629617a0c4b424a18fdddd4d4cb6a1953052.jpg) + +![](images/58ae9947b1a4e94f5726fd435e640a4184c32f9790027899c0b277b8abe3ab76.jpg) +(c) MH + +![](images/2d347f904fa60d1b03ebaa080f4fd81a76d3650db8736de9e8879d7c30cb0ce7.jpg) + +![](images/ec24c9178f566cf4b8f34ca5fa4af2218552888e0a66d54fccc9a561be2d7ab5.jpg) + +![](images/5cb225d82aa21de75571e1da0d0cd4719d28deccd58fbbb9d338b0e80644afd2.jpg) + +![](images/02063de3e6ff0a0c1fc406d3985036a189531ee95712e25fed72960b5493b688.jpg) +(d) Slice + +![](images/af1aa4f0612f357b809fff64e0055dcd3eb08a278a3526ead2603686e412890b.jpg) + +![](images/f7e41c15e16491b309e865d95b76e88783dc5c13fc07702f1ef3b8f9db2cd246.jpg) + +![](images/80125737bb277afa86ba721eeb59fe6a247694e77ec0c822bc2ff888c8b3b27c.jpg) + +![](images/fa1bf5ab48c383d9d7520f197517751a02857b01e395036a260f7ca4df30098e.jpg) +(e) Ours-rand + +![](images/ed40012dcc547db48f95acbf7f256edfeb9868e75cb741cda923f9ab383df9ea.jpg) + +![](images/56efa3f542a9c1d5eeaacc4e0771ff63903e8ad30a5981e2d2b9b9ff6a17c45d.jpg) + +![](images/2aa7363eaa0da9ad29c7a1e8a9519b144066355e9cbf1199d988c6a33c228c87.jpg) + +![](images/cff095d6b8df797d938236d52afdc9be601480f956c2306b2b11b66a54c0d80b.jpg) +(f) Ours-grid +Figure 5: 3D density function sampling. (a) The density functions in different slices of the same model, namely the 40th, 56th, 72th and 80th. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with $T^{-1}$ . (f) The sampling results by mapping the grid centers back with $T^{-1}$ . The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. Zoom in for better visualization. + +# Algorithm 2: FFT Solver for the Constant Coefficient Elliptic PDE + +Input: Domain $\Omega = [-1,1]^3$ , $M,N,L$ , $\{a^{pq}\}$ , $b^r$ , $c$ , function $f$ with the periodic boundary condition + +Output: Solution $u$ to the elliptic PDE Eqn. (18) + +Discretize the domain $\Omega$ to a $M\times N\times L$ grid; + +Sample the function $f$ to $f_{i,j,k}$ + +Compute FFT using Eqn. (16), $\{\hat{f}_{m,n,l}\} \gets \mathrm{FFT}(\{f_{i,j,k}\})$ ; + +for $(m,n,l)\in [0,M - 1]\times [0,N - 1]\times [0,L - 1]$ do + +Compute the factor $\lambda_{m,n,l}$ using Eqn. (19); + +if $\lambda_{m,n,l}$ is 0 then + +$\hat{u}_{m,n,l}\gets 0;$ + +else + +$\hat{u}_{m,n,l}\gets \hat{f}_{m,n,l} / \lambda_{m,n,l};$ + +Compute the Inverse FFT using Eqn. (17), $\{u_{i,j,k}\} \gets \mathrm{IFFT}(\{\hat{u}_{m,n,l}\})$ ; + +Return $\{u_{i,j,k}\}$ + +# D APPENDIX EXPERIMENTS + +In this section, as a compensation of the experiments in the main paper, we give more results on the 3D adaptive sampling and volumetric magnifier. + +# D.1 MORE RESULTS ON 3D ADAPTIVE SAMPLING + +In the experiments, we set the density function $f(x) = \sum_{i=1}^{30} p_i \mathcal{N}(\mu_i, \Sigma_i)$ , where $\mathcal{N}(\mu_i, \Sigma_i)$ represents Gaussian distribution with mean $\mu_i$ and variance $\Sigma_i = \mathrm{diag}(\sigma_{i0}^2, \sigma_{i1}^2, \sigma_{i2}^2)$ . $\mu_i \in \mathbb{R}^3$ is uniformly sampled from $[0,1]^3$ , $\sigma_{ij}$ is uniformly sampled from $[0,0.5]$ , $p_i \in \mathbb{R}$ is uniformly sampled from $[0.2,1]$ and normalized such that $\int_{\Omega} f(x) dx = 1$ . Thus the source distribution $\mu$ is a complicated Gaussian mixture distribution restricted on $\Omega = [0,1]^3$ . After computing the OT map + +![](images/b632fd714fbfdb632a3fadf61432a89b4935cba5a58458f57721ec541b864b8a.jpg) + +![](images/ce168d8abe6ef7856881a01379eafc45ff484d3f482f28bbd9121ee6f360fcd8.jpg) + +![](images/1d0ed36963385a1cdeb5ba7d7491cde70599c9d615925ec1eb0eb33d253da61a.jpg) + +![](images/eb9ce5bc43fe4ea1e02f33564d44774cad8052be6a1d9599c507358664a72adc.jpg) +(a) Density + +![](images/3a3dafa3734f7c62672195aae43f7933770947304af30b77cb539a98c00c056c.jpg) + +![](images/8a81d3b1245a6f48c68ece122d2b992520da7cf082d951fe4aa566e2d2b3fd5d.jpg) + +![](images/251020e575b7b5f86fb186ef23a404656f39073fc82867552527c84baf964ab9.jpg) + +![](images/3ccc7c884bf260d3d7a589e79b9b4fa1d982905da5ec2d50108ac999beea27b7.jpg) +(b) Rejection + +![](images/eaf96e2003f1ea59584af9725a79164f6b33ab7b581efab41acb30dee13b0bf8.jpg) + +![](images/fd227b3612f179fff0a571758e1efca8a888148821fc29a10a544659f4bd65e7.jpg) + +![](images/8e0e7a79899d3ecec4623468e3128ae43c89a6526bdb894bf3beecf22dd3eddd.jpg) + +![](images/80942969ab76543868ba09a0801227a57eb9b33db9897b7f213aa7c1f5bc3163.jpg) +(c) MH + +![](images/28c4c3972de99557357c14c6b87619ff7a34e1166bc578df3c195d38f4ea8a05.jpg) + +![](images/cf31df0e7b0570b35ed2fca7512ae315ab5ca51d3b38856e83b45d9d5e4b9902.jpg) + +![](images/0ecdda00548d3aec475ef7e2e5395252966aa131b202cd34fc6a6ce13f498fb3.jpg) + +![](images/40d3fab3b9d3f33210e869c2778b9abe4e6549645280d60ee8b9fbaa682466a3.jpg) +(d) Slice +Figure 6: 3D density function sampling. (a) The density functions in different slices of the same model, namely the 56th, 64th, 80th and 88th. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with $T^{-1}$ . (f) The sampling results by mapping the grid centers back with $T^{-1}$ . The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. Zoom in for better visualization. + +![](images/a44e65cf4720fb3a9025753064e926dfdb8bde3b86e5bc3e5cf471a43408922e.jpg) + +![](images/15e043fbbd4253c3678c4a6c8987915513b324058b4a6013c68462e6053f8c4b.jpg) + +![](images/569538f217b565d540365f94cdc03f275139cbdb91aa6587279a13dbd86cbd41.jpg) + +![](images/2dbd3cd34ac41eb59e083df05340237d5623a52dac6d916612bb68fbf4b503eb.jpg) +(e) Ours-rand + +![](images/8edc0a8c659ae779ef7604f2c644f9c4736a8e2a02872c6fb5ab0ca4a2a395f6.jpg) + +![](images/89522a5ec4fc0c4966fa43f854ea69059752ee3f5a866a35ba53e8a54ef7f3ca.jpg) + +![](images/0ebd275f7cefa65894cf1568baccba33a3509c5fff52d6a3c85a0338be5d4ad8.jpg) + +![](images/f85149e701cb473dacd334bb3fafafc12d8d82164ca006fc6af47bd045e3392c.jpg) +(f) Ours-grid + +$T$ from $\mu$ to the uniform distribution $\nu$ defined on $[-1,1]^3$ , we conduct two groups of experiments: (i) we map the cell centers of the grid $\{y_k\}$ of $[-1,1]^3$ back to $[-1,1]^3$ through the inverse OT map $T^{-1}(y_k)$ defined by Eqn. (20); (ii) we randomly sample $100k$ samples $\{y_k\}$ from the Uniform distribution defined in $[-1,1]^3$ , then map them back to $[-1,1]^3$ through the inverse OT map $T^{-1}(y_k)$ . In order to keep the consistency with the mirror reflection process in the FFT-OT algorithm, we also reflect the generated samples back to $\Omega$ . To visualize the results of the $k$ th slice, we plot the samples whose $z$ coordinates satisfy the inequality, + +$$ +k / 1 2 8 - 1 / 2 5 6 \leq z \leq k / 1 2 8 + 1 / 2 5 6. +$$ + +In Fig. 5 and Fig. 6, we give more sampling results of different slices correspond to the two models used in Fig. 2 in the main paper. Fig. 5 visualize the density function restricted on the 40th, 56th, 72th and 80th slices for different methods of the model displayed in the first row of 2. Fig. 6 visualize the density function restricted on the 56th, 64th, 80th and 88th slices for different methods of the model displayed in the second row of 2. Compared with the classical methods, the both sampling strategies of our method give decent sampling results that fit the prescribed density function well. Moreover, the number of generated samples for different slices of the same 3D model fits the density functions restricted to the corresponding slices well, namely more samples are generated in the brighter regions for different slices. + +# D.2 MORE RESULTS ON VOLUMETRIC MAGNIFIER + +In this experiment, we magnify the volumetric MRI image of the aneurysm by different amplification factors. In Fig. 7, we show the original aneurysm viewed from difference angles in the first column. The last three columns give the magnified results with different amplification factors from the viewpoints same as those in the first column. We can see that the aneurysm region is successfully magnified by different factors and the rest parts of the volume nearly keeps the same. + +![](images/fb7d09d5709a40cfb69539473eaed5d763a441ad2075023fee75460f50533828.jpg) + +![](images/11a8bfcfe495005c1b746312291aa2cded56fd424078f9ac0e844becb48e7b4e.jpg) + +![](images/bc21db47976ef52cdf7e67a303c072525938597b4d3b10adab258e8ae0eff02c.jpg) +(a) Original + +![](images/e66ecdc0147b920220d6ad7885d69226c479c799bcf72243d8bbd04c7128c3f1.jpg) + +![](images/56de3e5a0cf32f1b7bfb23652890c5c3d8b7521e6d37d17261226aba14fa84b6.jpg) + +![](images/856064b9ce45d2bc94f593509ee413a02c6a616819f4afd839216a8291a836f0.jpg) +(b) Magnifying ratio 1 + +![](images/612523e4a7d50159c05d7ceb09aa87af4ce236800deb9243b95993711e37bcb6.jpg) + +![](images/891f404debf9f17eba4150df5625178a5feb7d425472183b96528929f320b7ad.jpg) + +![](images/7ade992cc0161eba250766e4f278b57ec9d994c8ed45392476a9053a21254b72.jpg) +(c) Magnifying ratio 2 + +![](images/3710da88f2a7dd69ed1eea4fd8ab45ed766d513a4ed1084de222e94bdf4cc8e4.jpg) + +![](images/1152a6ab31b4aec89407cf6eba702c2129f587ffc0f15c6a290b6cd99606045c.jpg) + +![](images/bf5e003805ef42d6c857e8ec10508c0cd76198e6f141390526699155a9ca25dc.jpg) +(d) Magnifying ratio 3 +Figure 7: The volume magnifier of an aneurysm. The first column shows the original volumetric data from different viewpoints, and the last three columns give the magnified data from the same viewpoints of the first column with different magnifying ratios. The yellow circles denote the aneurysm or the ROIs. \ No newline at end of file diff --git a/2023/Volumetric Optimal Transportation by Fast Fourier Transform/images.zip b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..68785046790528d61eafceca66b297e998425c13 --- /dev/null +++ b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3e6784f1d665df18582b18bc3300730af36b23294ceb6a13c60d0c1aad97a5 +size 1658623 diff --git a/2023/Volumetric Optimal Transportation by Fast Fourier Transform/layout.json b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b942fdc8abb798b5e4364154778967bf14b989e4 --- /dev/null +++ b/2023/Volumetric Optimal Transportation by Fast Fourier Transform/layout.json @@ -0,0 +1,26163 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "VOLUMETRIC OPTIMAL TRANSPORTATION BY FAST FOURIER TRANSFORM" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 113, + 135, + 152, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 135, + 152, + 145 + ], + "spans": [ + { + "bbox": [ + 113, + 135, + 152, + 145 + ], + "type": "text", + "content": "Na Lei*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 146, + 245, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 146, + 245, + 168 + ], + "spans": [ + { + "bbox": [ + 111, + 146, + 245, + 168 + ], + "type": "text", + "content": "Dalian University of Technology \nnalei@dlut.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 364, + 135, + 436, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 135, + 436, + 147 + ], + "spans": [ + { + "bbox": [ + 364, + 135, + 436, + 147 + ], + "type": "text", + "content": "Dongsheng An" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 364, + 147, + 498, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 147, + 498, + 168 + ], + "spans": [ + { + "bbox": [ + 364, + 147, + 498, + 168 + ], + "type": "text", + "content": "Stony Brook University\ndoan@cs.stonybrook.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 185, + 162, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 185, + 162, + 197 + ], + "spans": [ + { + "bbox": [ + 111, + 185, + 162, + 197 + ], + "type": "text", + "content": "Min Zhang" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 197, + 231, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 197, + 231, + 219 + ], + "spans": [ + { + "bbox": [ + 111, + 197, + 231, + 219 + ], + "type": "text", + "content": "Zhejiang University min_zhang@zju.edu.cn" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 244, + 185, + 295, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 185, + 295, + 197 + ], + "spans": [ + { + "bbox": [ + 244, + 185, + 295, + 197 + ], + "type": "text", + "content": "Xiaoyin Xu" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 244, + 197, + 360, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 197, + 360, + 218 + ], + "spans": [ + { + "bbox": [ + 244, + 197, + 360, + 218 + ], + "type": "text", + "content": "Harvard Medical School \nxxu@bwh.harvard.edu" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 373, + 185, + 430, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 185, + 430, + 197 + ], + "spans": [ + { + "bbox": [ + 373, + 185, + 430, + 197 + ], + "type": "text", + "content": "Xianfeng Gu" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 373, + 197, + 495, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 197, + 495, + 219 + ], + "spans": [ + { + "bbox": [ + 373, + 197, + 495, + 219 + ], + "type": "text", + "content": "Stony Brook University gu@cs.stonybrook.edu" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 276, + 247, + 334, + 259 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 247, + 334, + 259 + ], + "spans": [ + { + "bbox": [ + 276, + 247, + 334, + 259 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 273, + 470, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 273, + 470, + 472 + ], + "spans": [ + { + "bbox": [ + 140, + 273, + 470, + 472 + ], + "type": "text", + "content": "The optimal transportation map finds the most economical way to transport one probability measure to another, and it has been applied in a broad range of applications in machine learning and computer vision. By the Brenier theory, computing the optimal transport map is equivalent to solving a Monge-Ampère equation, which is highly non-linear. Therefore, the computation of optimal transportation maps is intrinsically challenging. In this work, we propose a novel and powerful method, the FFT-OT (fast Fourier transform-optimal transport), to compute the 3-dimensional OT problems. The method is based on several key ideas: first, the Monge-Ampère equation is linearized to a sequence of linear elliptic PDEs with spacial and temporal variant coefficients; second, the obliqueness property of optimal transportation maps is reformulated as a Neumann boundary condition; and third, the variant coefficient elliptic PDEs are approximated by constant coefficient elliptic PDEs and solved by FFT on GPUs. We also prove that the algorithm converges linearly. Experimental results show that the FFT-OT algorithm is more than a hundred times faster than the conventional methods based on the convex geometry. Furthermore, the method can be directly applied for sampling from complex 3D density functions in machine learning and magnifying the volumetric data in medical imaging." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 494, + 206, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 494, + 206, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 494, + 206, + 506 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "text", + "content": "Optimal transportation (OT) transports one probability measure to another in the most economical way, and it plays a fundamental role in areas like machine learning Courty et al. (2017); Altschuler et al. (2019), computer vision Arjovsky et al. (2017); Tolstikhin et al. (2018); An et al. (2020), and computer graphics Solomon et al. (2015); Nader & Guennebaud (2018). Given a Riemannian manifold " + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "text", + "content": ", all the probability distributions on " + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "text", + "content": " form an infinite dimensional space " + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "inline_equation", + "content": "\\mathcal{P}(X)" + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "text", + "content": ". Given any two distributions " + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "inline_equation", + "content": "\\mu, \\nu \\in \\mathcal{P}(X)" + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "text", + "content": ", the optimal transportation map defines a distance between them, and the McCann interpolation McCann (1997) defines the geodesic connecting them. Hence optimal transportation equips " + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "inline_equation", + "content": "\\mathcal{P}(X)" + }, + { + "bbox": [ + 104, + 519, + 506, + 619 + ], + "type": "text", + "content": " with a Riemannian metric and defines its covariant differentiation, which provides a variational calculus framework for optimization in it." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 624, + 506, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 506, + 714 + ], + "type": "text", + "content": "As the optimal transportation problem is highly non-linear, it is quite challenging to compute the OT maps. Recently, researchers have developed many algorithms. The geometric variational approach Aurenhammer et al. (1998); Gu et al. (2016); Levy (2015) based on the Brenier theorem Brenier (1991) is capable of achieving high accuracy for low dimensional problems, but it requires complicated geometric data structure and the storage complexity grows exponentially as the dimension increases. The Sinkhorn method Cuturi (2013) based on the Kantorovich theorem adds an entropic regularizer to the primal problem and can handle high dimensional tasks, but it suffers from the intrinsic approximation error." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 721, + 227, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 721, + 227, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 721, + 227, + 732 + ], + "type": "text", + "content": "* indicates equal contribution" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": "We propose a novel method to tackle this challenging problem through Fast Fourier Transformation (FFT). According to the Brenier theorem Brenier (1991), under the quadratic distance cost, the optimal transportation map is the gradient of the Brenier potential, which satisfies the Monge-Ampère equation. With the continuity method Delanoë (1991), the Monge-Ampère equation can be linearized as a sequence of elliptic partial differential equations (PDEs) with spacial and temporal variant coefficients. By iteratively solving the linearized Monge-Ampère equations, we can obtain the OT map. Specifically, we propose to approximate the linearized Monge-Ampère equation by constant coefficient elliptic PDEs and solve them using the FFT on GPUs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 175, + 506, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 506, + 342 + ], + "type": "text", + "content": "Our proposed FFT-OT method has many merits: (i) it is generalizable for arbitrary dimension; (ii) it has a linear convergence rate, namely the approximation error decays exponentially fast; (iii) in each iteration, the computational complexity of FFT is " + }, + { + "bbox": [ + 104, + 175, + 506, + 342 + ], + "type": "inline_equation", + "content": "O(n \\log n)" + }, + { + "bbox": [ + 104, + 175, + 506, + 342 + ], + "type": "text", + "content": ", thus our algorithm can solve large scale OT problems; and (iv) it is highly parallelable and can be efficiently implemented on GPUs. We demonstrate the efficiency of the FFT-OT algorithm by solving the volumetric OT problems for machine learning and medical imaging applications including sampling from given 3D density functions and volumetric magnifier. The algorithm also has its own limitations: (i) although it can be generalized to any dimensions, the storage complexity increases exponentially with respect to the dimension, so its power is limited by the memory size of the GPUs; (ii) Since the algorithm uses FFT, the current version of the method only works well for continuous density functions. (iii) In this work, we mainly focus on the computation of the OT map from the uniform distribution to another arbitrary continuous distribution. To extend the method to find the OT map between any two continuous measures, we can compute two OT maps from the uniform distribution to the both continuous measures, then combine them together. The combination will give a reasonable approximation of the OT map Nader & Guennebaud (2018)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 346, + 506, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 506, + 415 + ], + "type": "text", + "content": "Though Lei and Gu Lei & Gu (2021) also uses FFT to solve the 2-dimensional OT problem, our method differs their works in the following two aspects: (i) Lei and Gu's method uses the fixed point method to compute the 2D OT problems, ours is based on the linearization of the Monge-Ampère operator to solve the 3D OT problems, these are two different methodologies in PDE theory; (ii) In our paper, we also provide the theoretical convergence analysis of the proposed method. For more detailed analysis and related work, please refer to the Appendix A." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 429, + 318, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 429, + 318, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 318, + 441 + ], + "type": "text", + "content": "2 OPTIMAL TRANSPORTATION THEORY" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 453, + 504, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 477 + ], + "type": "text", + "content": "In this section, we review the fundamental concepts and theorems of the OT problem and the Monge-Amperè equation, more details can be found in Villani (2008)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": "Optimal Transportation Map and the Monge-Ampère equation Suppose the source domain " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": " is an open set in " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": " with the probability measure " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": ", the target domain " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": " is with the probability measure " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": ". Both " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": " have density functions " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "d\\mu(x) = f(x)dx" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "d\\nu(y) = g(y)dy" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": ", respectively, with the equal total mass: " + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\int_{\\Omega} f(x)dx = \\int_{\\Sigma} g(y)dy" + }, + { + "bbox": [ + 104, + 487, + 504, + 534 + ], + "type": "text", + "content": ", which is called the balance condition." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "type": "inline_equation", + "content": "T: \\Omega \\to \\Sigma" + }, + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "type": "text", + "content": " is a measurable map. The mapping " + }, + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "type": "text", + "content": " is called measure preserving and denoted as " + }, + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "type": "inline_equation", + "content": "T_{\\#} \\mu = \\nu" + }, + { + "bbox": [ + 104, + 536, + 506, + 561 + ], + "type": "text", + "content": " if the following relation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 265, + 564, + 504, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 564, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 265, + 564, + 504, + 578 + ], + "type": "interline_equation", + "content": "\\mu (T ^ {- 1} (A)) = \\nu (A) \\tag {1}", + "image_path": "12ede39d19f89c06e5facd99014dad12c4f4e175903dab5d940d434b4116afed.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "text", + "content": "for every Borel subset " + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "inline_equation", + "content": "A \\subset \\Sigma" + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "text", + "content": ". A cost function " + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "inline_equation", + "content": "c: \\Omega \\times \\Sigma \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "text", + "content": " measures the transportation cost for transporting the unit mass from " + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "inline_equation", + "content": "x \\in \\Omega" + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "inline_equation", + "content": "y \\in \\Sigma" + }, + { + "bbox": [ + 104, + 581, + 504, + 604 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 606, + 506, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 606, + 506, + 630 + ], + "spans": [ + { + "bbox": [ + 104, + 606, + 506, + 630 + ], + "type": "text", + "content": "Problem 1 (Monge). The optimal transportation problem finds the measure preserving map with the minimal total transportation cost," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 246, + 632, + 365, + 658 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 632, + 365, + 658 + ], + "spans": [ + { + "bbox": [ + 246, + 632, + 365, + 658 + ], + "type": "interline_equation", + "content": "\\min _ {T _ {\\#} \\mu = \\nu} \\int_ {\\Omega} c (x, T (x)) f (x) d x", + "image_path": "f61d16eaab3e4f032341bbc7409c028092446c27fe3a5dcfd7696907b4e3d76c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 667, + 506, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 506, + 713 + ], + "type": "text", + "content": "The solution to the Monge's problem is called the optimal transport map between " + }, + { + "bbox": [ + 104, + 667, + 506, + 713 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 667, + 506, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 667, + 506, + 713 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 104, + 667, + 506, + 713 + ], + "type": "text", + "content": ". The existence, uniqueness and regularity of OT maps depend on the boundedness and the continuity of the density functions, the convexity of the supporting domains, the continuity of their boundaries, and the cost function. In our current work, we focus on the similar situation in Saumier et al. (2013)," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 719, + 435, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 719, + 435, + 734 + ], + "spans": [ + { + "bbox": [ + 132, + 719, + 435, + 734 + ], + "type": "text", + "content": "- The cost function is quadratic Euclidean distance " + }, + { + "bbox": [ + 132, + 719, + 435, + 734 + ], + "type": "inline_equation", + "content": "c(x, y) = \\| x - y \\|^2 / 2" + }, + { + "bbox": [ + 132, + 719, + 435, + 734 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 82, + 503, + 131 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 132, + 82, + 503, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 82, + 503, + 105 + ], + "spans": [ + { + "bbox": [ + 132, + 82, + 503, + 105 + ], + "type": "text", + "content": "- The supports of the source and the target measures are the canonical cube " + }, + { + "bbox": [ + 132, + 82, + 503, + 105 + ], + "type": "inline_equation", + "content": "\\Omega = [-1, 1]^3" + }, + { + "bbox": [ + 132, + 82, + 503, + 105 + ], + "type": "text", + "content": ", which is uniformly convex;" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 108, + 503, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 108, + 503, + 131 + ], + "spans": [ + { + "bbox": [ + 132, + 108, + 503, + 131 + ], + "type": "text", + "content": "- The source and the target measures " + }, + { + "bbox": [ + 132, + 108, + 503, + 131 + ], + "type": "inline_equation", + "content": "\\mu, \\nu" + }, + { + "bbox": [ + 132, + 108, + 503, + 131 + ], + "type": "text", + "content": " are absolutely continuous with respect to the Lebesgue measure, their densities " + }, + { + "bbox": [ + 132, + 108, + 503, + 131 + ], + "type": "inline_equation", + "content": "f, g" + }, + { + "bbox": [ + 132, + 108, + 503, + 131 + ], + "type": "text", + "content": " are positive and bounded away from zero;" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 280, + 133, + 365, + 146 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 133, + 365, + 146 + ], + "spans": [ + { + "bbox": [ + 280, + 133, + 365, + 146 + ], + "type": "interline_equation", + "content": "0 < m < f, g < M,", + "image_path": "983cd4ac46ad93a3d73db26166f764002a1103cc1390a083b002ca0272635c57.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 148, + 257, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 148, + 257, + 161 + ], + "spans": [ + { + "bbox": [ + 140, + 148, + 257, + 161 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 140, + 148, + 257, + 161 + ], + "type": "inline_equation", + "content": "f,g" + }, + { + "bbox": [ + 140, + 148, + 257, + 161 + ], + "type": "text", + "content": " are of class " + }, + { + "bbox": [ + 140, + 148, + 257, + 161 + ], + "type": "inline_equation", + "content": "C^\\alpha (\\Omega)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 163, + 504, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 163, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 132, + 163, + 504, + 186 + ], + "type": "text", + "content": "- The boundary condition is second boundary condition (OT boundary condition), " + }, + { + "bbox": [ + 132, + 163, + 504, + 186 + ], + "type": "inline_equation", + "content": "T(\\Omega) = \\Omega" + }, + { + "bbox": [ + 132, + 163, + 504, + 186 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "text", + "content": "Then according to (Villani (2003) Theorem 14.4, Saumier et al. (2013) Theorem 2.1), the OT maps " + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "inline_equation", + "content": "T: \\Omega \\to \\Omega" + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "text", + "content": " exists and is unique and invertible (" + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "text", + "content": " a.e), and the Brenier potential is of class " + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "inline_equation", + "content": "C^{2,\\beta}(\\bar{\\Omega})" + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "text", + "content": " form some " + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "inline_equation", + "content": "0 < \\beta < \\alpha" + }, + { + "bbox": [ + 104, + 194, + 504, + 228 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": "Theorem 2. Assume that " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\Omega, \\mu, \\nu, f" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": " are defined as above. Then there exists a convex function " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "u: \\Omega \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "u \\in C^{2,\\beta}(\\Omega)" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "0 < \\beta < \\alpha" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\nabla u" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": " pushes " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": " forward to " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "(\\nabla u)_{\\#} \\mu = \\nu" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": ". Moreover, " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\nabla u" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": " is unique and invertible (" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": " a.e.), and its inverse " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\nabla v" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": " satisfies " + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "inline_equation", + "content": "(\\nabla v)_{\\#} \\nu = \\mu" + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 272, + 490, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 490, + 284 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 490, + 284 + ], + "type": "text", + "content": "We call such a convex function " + }, + { + "bbox": [ + 104, + 272, + 490, + 284 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 272, + 490, + 284 + ], + "type": "text", + "content": " the Brenier potential, it satisfies the Monge-Ampère equation," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 253, + 287, + 503, + 310 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 287, + 503, + 310 + ], + "spans": [ + { + "bbox": [ + 253, + 287, + 503, + 310 + ], + "type": "interline_equation", + "content": "\\det D ^ {2} u (x) = \\frac {f (x)}{g \\circ \\nabla u (x)}. \\tag {2}", + "image_path": "4dd2ab326e847d10c999c04c8d1f29102470d1fc5bc87fbbdbf2f204cda54724.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "type": "text", + "content": "with the boundary condition " + }, + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "type": "inline_equation", + "content": "\\nabla u(\\Omega) = \\Sigma" + }, + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "type": "text", + "content": ". Then finding the optimal transportation map is equivalent to solving the corresponding Monge-Ampère equation. In the current work, the target measure is always the Lebesgue measure, and the source density " + }, + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "type": "text", + "content": " is of class " + }, + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "type": "inline_equation", + "content": "C^{2,\\alpha}(\\Omega)" + }, + { + "bbox": [ + 104, + 312, + 504, + 348 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 357, + 444, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 444, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 444, + 370 + ], + "type": "text", + "content": "Linearized Monge-Ampère Operator The Monge-Ampère operator is defined as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 267, + 373, + 342, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 373, + 342, + 387 + ], + "spans": [ + { + "bbox": [ + 267, + 373, + 342, + 387 + ], + "type": "interline_equation", + "content": "\\mathrm {M A} [ u ] = \\det D ^ {2} u,", + "image_path": "164be6122164f019a047b07026f6aec29088bd8103fa9ecf04bb5717d43ddab9.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 389, + 349, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 389, + 349, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 389, + 349, + 401 + ], + "type": "text", + "content": "which is highly non-linear. It can be linearized as following:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 166, + 403, + 504, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 403, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 166, + 403, + 504, + 418 + ], + "type": "interline_equation", + "content": "\\mathrm {M A} [ u + \\varepsilon v ] = \\det (D ^ {2} u + \\varepsilon D ^ {2} v) \\approx \\det D ^ {2} u + \\varepsilon \\operatorname {T r a c e} (\\operatorname {A d j} (D ^ {2} u) \\cdot D ^ {2} v), \\tag {3}", + "image_path": "e4911fdc2905a330e1c64d6d8e4aa8bc410c9641b226f221248a1fadca12f70a.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "type": "inline_equation", + "content": "\\operatorname{Adj}(A)" + }, + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "type": "text", + "content": " is the adjoint (co-factor) matrix of " + }, + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "type": "inline_equation", + "content": "\\operatorname{Adj}(A) := \\det(A)A^{-T}" + }, + { + "bbox": [ + 104, + 419, + 504, + 443 + ], + "type": "text", + "content": ". Therefore the linearized Monge-Ampère operator is defined as" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 184, + 446, + 504, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 446, + 504, + 477 + ], + "spans": [ + { + "bbox": [ + 184, + 446, + 504, + 477 + ], + "type": "interline_equation", + "content": "\\mathrm {D M A} _ {u} [ v ] := \\operatorname {T r a c e} \\left(\\operatorname {A d j} \\left(D ^ {2} u\\right) \\cdot D ^ {2} v\\right) = \\sum_ {p, q = 1} ^ {d} u ^ {p q} (x) \\partial_ {p} \\partial_ {q} v (x), \\tag {4}", + "image_path": "0c0607cd3cb5a447fa453facc4174e4bc6b434f82d8625f732618a45f5c4a559.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "type": "inline_equation", + "content": "(u^{pq}) = \\mathrm{Adj}(D^2 u)" + }, + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "type": "text", + "content": " is the adjoint matrix of the Hessian of " + }, + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "type": "inline_equation", + "content": "\\partial_p\\partial_q\\coloneqq \\frac{\\partial^2}{\\partial x_p\\partial x_q}" + }, + { + "bbox": [ + 104, + 480, + 462, + 499 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "text", + "content": "Continuity Method For simplicity, we assume the source domain coincides with the target domain, that is " + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\Omega = \\Sigma" + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "text", + "content": ", and the target density is " + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "inline_equation", + "content": "g(x) \\equiv 1" + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "text", + "content": ". The Monge-Ampère equation Eqn. (2) is simplified as " + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\operatorname{det}D^{2}u(x) = f(x)" + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "text", + "content": ". Define a flow of density as" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 230, + 545, + 504, + 558 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 545, + 504, + 558 + ], + "spans": [ + { + "bbox": [ + 230, + 545, + 504, + 558 + ], + "type": "interline_equation", + "content": "\\rho (x, t) = (1 - t) + t f (x), \\quad t \\in [ 0, 1 ]. \\tag {5}", + "image_path": "7d057aec9778e03519664e0f658b879770728684ed6e4f3f175aa6699ad22f2d.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 559, + 414, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 414, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 414, + 572 + ], + "type": "text", + "content": "The corresponding flow of the Brenier potentials is " + }, + { + "bbox": [ + 104, + 559, + 414, + 572 + ], + "type": "inline_equation", + "content": "u(x,t):\\Omega \\times [0,1]\\to \\mathbb{R}" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 202, + 575, + 406, + 589 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 575, + 406, + 589 + ], + "spans": [ + { + "bbox": [ + 202, + 575, + 406, + 589 + ], + "type": "interline_equation", + "content": "\\det D _ {x} ^ {2} u (x, t) = \\rho (x, t), \\quad s. t. \\nabla_ {x} u (x, t) (\\Omega) = \\Omega ,", + "image_path": "8d649a3333ddaa7432dfbca064cfd999deba878ddb02f427b2264d2ba140414b.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "inline_equation", + "content": "D_x^2 u(x,t)" + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "text", + "content": " is the Hessian of " + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "inline_equation", + "content": "u(x,t)" + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "text", + "content": " with respect to " + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "inline_equation", + "content": "u(x,1)" + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "text", + "content": " is the solution to the initial Monge-Ampère equation Eqn. (2). Take the derivative w.r.t. time " + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "text", + "content": " on both sides of the linearized Monge-Ampère operator Eqn. (4), we obtain an elliptic PDE with the spacial and temporal variant coefficients of the unknown " + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "inline_equation", + "content": "v(x,t) \\coloneqq \\dot{u} (x,t)" + }, + { + "bbox": [ + 104, + 591, + 504, + 637 + ], + "type": "text", + "content": ", namely the \"velocity\" of the Brenier potential," + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 181, + 640, + 504, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 640, + 504, + 670 + ], + "spans": [ + { + "bbox": [ + 181, + 640, + 504, + 670 + ], + "type": "interline_equation", + "content": "\\mathrm {D M A} _ {u} [ v ] = \\sum_ {p, q = 1} ^ {d} u ^ {p q} (x, t) \\partial_ {p} \\partial_ {q} v (x, t) = \\frac {\\partial}{\\partial t} \\rho (x, t) = f (x) - 1. \\tag {6}", + "image_path": "eeb70efc7b4ebe887f61c8e3d7f1cc3d1abcd18a3cbdb1c3026905ee10870439.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": "At time " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": ", the initial Brenier potential is known as " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "u(x,0) = \\frac{1}{2}\\| x\\|^2" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": ". Suppose at time " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": ", we have obtained " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "u(x,t)" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": " already, then we can compute the adjoint matrix " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "u^{pq}(x,t)" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": " of the Hessian " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "D_x^2 u(x,t)" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": " and solve Eqn. (6) to get the velocity " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "v(x,t) = \\dot{u} (x,t)" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": ". In turn, we move forward to time " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "t + \\delta t" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": ", and update " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "u(x,t + \\delta t)" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "u(x,t) + \\dot{u} (x,t)\\delta t" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": ". By repeating this procedure, eventually we reach time " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": " and obtain the solution " + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "inline_equation", + "content": "u(x)\\coloneqq u(x,1)" + }, + { + "bbox": [ + 104, + 674, + 505, + 733 + ], + "type": "text", + "content": " to the initial Monge-Ampère Eqn. (2)." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "Obliqueness Boundary Condition Suppose the boundary of " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "C^1" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " almost everywhere, therefore at a " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "C^1" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " point " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "x\\in \\partial \\Omega" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": ", the outer normal " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "\\mathbf{n}(x)" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " is well defined. For almost every boundary point " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "x\\in \\partial \\Omega" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": ", the obliqueness condition is represented as" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 260, + 121, + 504, + 134 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 121, + 504, + 134 + ], + "spans": [ + { + "bbox": [ + 260, + 121, + 504, + 134 + ], + "type": "interline_equation", + "content": "\\langle \\mathbf {n} (x), \\mathbf {n} (\\nabla u (x)) \\rangle \\geq 0. \\tag {7}", + "image_path": "816a20b38f8a62287fd6f3021f12746fbdbea27e4f4facf71ec72da87bd6f215.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "text", + "content": " is a cuboid and has 6 faces, if a boundary point " + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "inline_equation", + "content": "x\\in \\partial \\Omega" + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "text", + "content": " is on a face, by the cyclic monotonicity of the map and the strict convexity of " + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "text", + "content": " Villani (2008), its image " + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "inline_equation", + "content": "\\nabla u(x)" + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "text", + "content": " must be on the same face of " + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 138, + 504, + 171 + ], + "type": "text", + "content": ", namely," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 258, + 172, + 504, + 183 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 172, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 258, + 172, + 504, + 183 + ], + "type": "interline_equation", + "content": "\\langle \\nabla u (x) - x, \\mathbf {n} (x) \\rangle = 0. \\tag {8}", + "image_path": "5ad514a70ba4209ce48f209ca5466210fb70b5303aebd63aa9ac377e8da69bc2.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "type": "text", + "content": "We can rewrite the Brenier potential as " + }, + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "type": "inline_equation", + "content": "u(x_{1},x_{2},\\ldots ,x_{d}) = \\frac{1}{2}\\sum_{i = 1}^{d}x_{i}^{2} + v(x_{1},\\dots ,x_{d})" + }, + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\nabla u(x) - x = \\nabla v(x)" + }, + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "type": "text", + "content": ". By Eqn. (8), " + }, + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "type": "inline_equation", + "content": "v(x)" + }, + { + "bbox": [ + 104, + 186, + 504, + 211 + ], + "type": "text", + "content": " satisfies the Neumann boundary condition," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 260, + 216, + 504, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 216, + 504, + 237 + ], + "spans": [ + { + "bbox": [ + 260, + 216, + 504, + 237 + ], + "type": "interline_equation", + "content": "\\frac {\\partial v}{\\partial \\mathbf {n}} (x) = 0, \\quad x \\in \\partial \\Omega . \\tag {9}", + "image_path": "dae681d522c88a3f91040801fa07619d2b9c60d5a16de913ba6413f182c5644f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 242, + 504, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 242, + 504, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 242, + 504, + 276 + ], + "type": "text", + "content": "Similarly, the velocity of the (modified) Brenier potential " + }, + { + "bbox": [ + 104, + 242, + 504, + 276 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 242, + 504, + 276 + ], + "type": "text", + "content": " in Eqn. (6) also satisfies the Neumann boundary condition. The analysis about the existence and regularity of the solutions to Eqn. (6) with boundary condition Eqn. (9) can be found in the supplementary material." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 291, + 284, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 291, + 284, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 284, + 304 + ], + "type": "text", + "content": "3 COMPUTATIONAL ALGORITHM" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 316, + 504, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 361 + ], + "type": "text", + "content": "Here we introduce the 3-dimensional FFT-OT algorithm, which can be generalized to any dimensions. We approximate the Monge-Ampère equation by a sequence of constant coefficient elliptic PDEs, and solve them by FFT on GPUs. More detailed analysis about the solution of the discretized Monge-Ampère equation, and the proofs of the lemmas and theorems are given by Appendix B." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 373, + 434, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 434, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 434, + 385 + ], + "type": "text", + "content": "3.1 CONTINUITY METHOD FOR SOLVING THE MONGE-AMPERE EQUATION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": "By using the continuity method, we can solve the Monge-Ampère equation iteratively. For simplicity, we assume the target measure is the Lebesgue's measure with " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "g \\equiv 1" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": ". At the " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": "-th iteration, the Brenier potential is represented as " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "\\frac{1}{2} \\| x \\|^2 + u_n(x)" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": ", its Hessian matrix is " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "H_n(x) \\coloneqq \\mathrm{I} + D^2 u_n(x)" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": ", the corresponding density function is defined as the determinant of the Hessian " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "\\rho_n = \\operatorname*{det}(H_n)" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": ", and the velocity of the Brenier potential is " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "v_n(x)" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": ". In the beginning, the Brenier potential " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "u_0(x)" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": " is zero, the Hessian is " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "H_0 = \\mathrm{I}" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": " and the density is " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "\\rho_0 = 1" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": ". At the " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": "-th step, we compute the adjoint matrix " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "[H_n^{pq}(x)]" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": " of the Hessian matrix " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "H_n(x)" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "x \\in \\Omega" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": ". According to Eqn. (3), the velocity " + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "inline_equation", + "content": "v_n(x)" + }, + { + "bbox": [ + 104, + 394, + 504, + 483 + ], + "type": "text", + "content": " satisfies the variant coefficient elliptic PDE induced by the linearized Monge-Ampère operator," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 188, + 489, + 504, + 519 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 489, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 188, + 489, + 504, + 519 + ], + "type": "interline_equation", + "content": "\\mathrm {D M A} _ {u _ {n}} [ v _ {n} ] = \\sum_ {p, q = 0} ^ {2} H _ {n} ^ {p q} (x) \\partial_ {p} \\partial_ {q} v _ {n} (x) = \\frac {1}{\\tau} \\left(f (x) - \\rho_ {n} (x)\\right). \\tag {10}", + "image_path": "1c294dc32cde2de47d6f3b75225f5e2118597d30c7b3d8144996c3624c727153.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 525, + 504, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 559 + ], + "type": "text", + "content": "Note that the right hand side of Eqn. (6) is the difference between the initial and the target densities, whereas here it is replaced by the difference between the initial and the current densities. The step length parameter " + }, + { + "bbox": [ + 104, + 525, + 504, + 559 + ], + "type": "inline_equation", + "content": "\\tau \\geq 1" + }, + { + "bbox": [ + 104, + 525, + 504, + 559 + ], + "type": "text", + "content": " can be chosen to guarantee the convergence Loepers & Rapetti (2005)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 563, + 504, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 504, + 620 + ], + "type": "text", + "content": "The elliptic PDE Eqn. (10) is with spatially variant coefficients. Although the traditional finite element method (FEM) can solve it using the GMRES algorithm Saad (2003), this algorithm can not be directly accelerated by GPUs. To overcome this difficulty, we approximate Eqn. (10) by a much simpler elliptic PDE with constant coefficients, which can be directly solved using the following FFT-OT algorithm pipeline Alg. 1 on GPUs in Appendix C." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 624, + 504, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 648 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 648 + ], + "type": "text", + "content": "At the " + }, + { + "bbox": [ + 104, + 624, + 504, + 648 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 624, + 504, + 648 + ], + "type": "text", + "content": "-th iteration, after obtaining the adjoint matrix " + }, + { + "bbox": [ + 104, + 624, + 504, + 648 + ], + "type": "inline_equation", + "content": "[H_n^{pq}(x)], x \\in \\Omega" + }, + { + "bbox": [ + 104, + 624, + 504, + 648 + ], + "type": "text", + "content": ", we compute the mean adjoint matrix " + }, + { + "bbox": [ + 104, + 624, + 504, + 648 + ], + "type": "inline_equation", + "content": "[\\bar{H}_n^{pq}(x)]" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 219, + 652, + 504, + 678 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 652, + 504, + 678 + ], + "spans": [ + { + "bbox": [ + 219, + 652, + 504, + 678 + ], + "type": "interline_equation", + "content": "\\bar {H} _ {n} ^ {p q} := \\frac {\\int_ {\\Omega} H _ {n} ^ {p q} (x) \\rho_ {n} (x) d x}{\\int_ {\\Omega} \\rho_ {n} (x) d x}, \\quad p, q = 0, 1, 2 \\tag {11}", + "image_path": "f1f0f7cc72912015953c2aaa511694b73ed53100f9681d3c76e67c8a23356f1f.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 683, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 683, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 683, + 504, + 704 + ], + "type": "text", + "content": "and replace the elliptic PDE Eqn.(10) with variant coefficients by the elliptic PDE with constant coefficients," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 195, + 705, + 504, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 705, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 195, + 705, + 504, + 734 + ], + "type": "interline_equation", + "content": "\\overline {{\\mathrm {D M A}}} _ {u _ {n}} [ v _ {n} ] = \\sum_ {p, q = 0} ^ {2} \\bar {H} _ {n} ^ {p q} \\partial_ {p} \\partial_ {q} v _ {n} (x) = \\frac {1}{\\tau} (f (x) - \\rho_ {n} (x)), \\tag {12}", + "image_path": "e2639974ce48ed2fdee0d1011686332001c2124c490a9f78f6b3a07ba7c4f682.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 378, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 378, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 378, + 95 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 82, + 378, + 95 + ], + "type": "inline_equation", + "content": "\\overline{\\mathrm{DMA}}" + }, + { + "bbox": [ + 104, + 82, + 378, + 95 + ], + "type": "text", + "content": " is called the mean linearized Monge-Ampère operator." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 99, + 504, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 99, + 504, + 154 + ], + "spans": [ + { + "bbox": [ + 104, + 99, + 504, + 154 + ], + "type": "text", + "content": "Then we solve the constant coefficient elliptic PDE Eqn. (12) by FFT Algorithm Alg. 2 in Appendix C. Although the original variant coefficient PDE Eqn. (10) is replaced by its constant coefficient approximation Eqn. (12), the algorithm still converges to the solution with a linear convergence rate. This replacement allows the whole algorithm to be solved by FFT on GPUs, which greatly improves the computational efficiency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": "Theorem 3 (main). Given a domain " + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "inline_equation", + "content": "\\Omega \\subset \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": ", which is a canonical cuboid " + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "inline_equation", + "content": "\\Omega = [-1,1]^d" + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": ", and a positive density function " + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "inline_equation", + "content": "f:\\Omega \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": " with the balance condition " + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "inline_equation", + "content": "\\int_{\\Omega}f(x)dx = \\int_{\\Omega}dx" + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": ", suppose the mirror reflection extension Eqn. (14) of " + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": " to the flat torus " + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "inline_equation", + "content": "\\tilde{f}:\\mathbb{T}^n\\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "inline_equation", + "content": "C^\\alpha" + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "inline_equation", + "content": "\\alpha \\in (0,1)" + }, + { + "bbox": [ + 104, + 156, + 504, + 203 + ], + "type": "text", + "content": ", then the Monge-Ampère equation," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 232, + 203, + 377, + 216 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 203, + 377, + 216 + ], + "spans": [ + { + "bbox": [ + 232, + 203, + 377, + 216 + ], + "type": "interline_equation", + "content": "d e t D ^ {2} u (x) = f (x), \\quad \\nabla u (\\Omega) = \\Omega", + "image_path": "17faa13b74d26b8e02f2b63869d43a3f37c0e96e8f1db8780d9752a8aaf2affc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "text", + "content": "can be solved using the FFT-OT Algorithm Alg. 1 in Appendix C. In particular, one can choose the step length parameter " + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "text", + "content": ", such that there is a constant " + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "inline_equation", + "content": "0 < \\gamma < 1" + }, + { + "bbox": [ + 104, + 217, + 504, + 251 + ], + "type": "text", + "content": " that the approximation error satisfies" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 263, + 250, + 503, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 250, + 503, + 262 + ], + "spans": [ + { + "bbox": [ + 263, + 250, + 503, + 262 + ], + "type": "interline_equation", + "content": "\\left\\| f - \\rho_ {n + 1} \\right\\| ^ {2} < C \\gamma^ {n}, \\tag {13}", + "image_path": "74ad0e51d0d69bda832ff00514f24aa5a2b0319a49d8d2aa26e09b0d748b2865.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 259, + 316, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 259, + 316, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 259, + 316, + 270 + ], + "type": "text", + "content": "namely the algorithm has a linear convergence rate." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 285, + 386, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 386, + 296 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 386, + 296 + ], + "type": "text", + "content": "3.2 FFT SOLVER FOR CONSTANT COEFFICIENT ELLIPTIC PDES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 306, + 504, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 350 + ], + "type": "text", + "content": "To solve the constant coefficient elliptic PDE Eqn. (12), we first extend the PDE to the flat torus by mirror reflection, then discretize the domain and compute the differential operators by central difference scheme. Finally the PDE is converted to algebraic equations in the frequency domain by FFT and can be efficiently solved on GPUs." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "text", + "content": "Extension by Mirror Reflection Suppose " + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "inline_equation", + "content": "\\Omega = [0,1]^3" + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "inline_equation", + "content": "f:\\Omega \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "text", + "content": " are given, we extend " + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "inline_equation", + "content": "\\tilde{\\Omega} = [-1,1]^3" + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "inline_equation", + "content": "\\tilde{f}:\\tilde{\\Omega}\\rightarrow \\mathbb{R}" + }, + { + "bbox": [ + 104, + 361, + 504, + 387 + ], + "type": "text", + "content": " by mirror reflection" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 219, + 389, + 504, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 389, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 219, + 389, + 504, + 403 + ], + "type": "interline_equation", + "content": "\\tilde {f} (x, y, z) = f (| x |, | y |, | z |), \\quad \\forall (x, y, z) \\in \\tilde {\\Omega}. \\tag {14}", + "image_path": "8c258b5080e9a00bf685c2e8144863e1f0cc033e53965fe0e781199243b642a3.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": "By definition, " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\tilde{f}" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": " satisfies the periodic boundary condition and can be treated as a function defined on the flat torus " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\mathbb{T}^3" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\tilde{\\Omega}" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": " is one of the fundamental domain of " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\mathbb{T}^3" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": ". The constant coefficients " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "a^{p,q}" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": " keep unchanged. Then we solve the following constant coefficient elliptic PDE Eqn. (18) " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "L[\\tilde{u}] = \\tilde{f}" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": " with the periodic boundary condition. Finally, the restriction of " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\tilde{u}" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": " gives the initial solution " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "inline_equation", + "content": "L[u] = f" + }, + { + "bbox": [ + 104, + 407, + 504, + 466 + ], + "type": "text", + "content": " with Neumann boundary condition." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 472, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 495 + ], + "type": "text", + "content": "In the following, to avoid using overly complicated symbols, we use " + }, + { + "bbox": [ + 104, + 472, + 504, + 495 + ], + "type": "inline_equation", + "content": "(u,f,\\Omega)" + }, + { + "bbox": [ + 104, + 472, + 504, + 495 + ], + "type": "text", + "content": " to represent " + }, + { + "bbox": [ + 104, + 472, + 504, + 495 + ], + "type": "inline_equation", + "content": "(\\tilde{u},\\tilde{f},\\tilde{\\Omega})" + }, + { + "bbox": [ + 104, + 472, + 504, + 495 + ], + "type": "text", + "content": " for simplicity." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": "Tessellation Suppose " + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\Omega = [-1,1]^3" + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": " is the canonical cube (a fundamental domain of a flat torus), we tessellate it to the regular cells, and the centers of the cells form a grid " + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "inline_equation", + "content": "M\\times N\\times L" + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": ". The Brenier potential " + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "inline_equation", + "content": "u:\\Omega \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": " is discretized to a tensor " + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "inline_equation", + "content": "u_{i,j,k}" + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\{i,j,k\\} \\in \\{0,\\dots ,M - 1\\} \\times \\{0,\\dots ,N - 1\\} \\times \\{0,\\dots ,L - 1\\}" + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": ". The spacial step lengths are " + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "inline_equation", + "content": "(h_x,h_y,h_z) = (2 / M,2 / N,2 / L)" + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": ". The coordinate of each sample point " + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "inline_equation", + "content": "(x_{i},y_{j},z_{k})" + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "inline_equation", + "content": "(x_{i},y_{j},z_{k}) = (-1 + h_{x}(i + 1 / 2), - 1 + h_{y}(j + 1 / 2), - 1 + h_{z}(k + 1 / 2))" + }, + { + "bbox": [ + 104, + 506, + 504, + 574 + ], + "type": "text", + "content": ". The periodic boundary condition is then formulated as" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 223, + 576, + 504, + 588 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 576, + 504, + 588 + ], + "spans": [ + { + "bbox": [ + 223, + 576, + 504, + 588 + ], + "type": "interline_equation", + "content": "u _ {i, j, k} = u _ {i + \\alpha M, j + \\beta N, k + \\gamma L}, \\quad \\alpha , \\beta , \\gamma \\in \\mathbb {Z}. \\tag {15}", + "image_path": "9d1046464461a98a2775143bb6508cd3db5540a17cd29d9cd26b04a33d7d7cc1.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 596, + 504, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 619 + ], + "type": "text", + "content": "Finite Difference Differential Operator We use the standard central differences to compute the differential operators. The first order derivative " + }, + { + "bbox": [ + 104, + 596, + 504, + 619 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_x" + }, + { + "bbox": [ + 104, + 596, + 504, + 619 + ], + "type": "text", + "content": " is approximated by" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 238, + 622, + 371, + 644 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 622, + 371, + 644 + ], + "spans": [ + { + "bbox": [ + 238, + 622, + 371, + 644 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {x} u _ {i, j, k} = \\frac {u _ {i + 1 , j , k} - u _ {i - 1 , j , k}}{2 h _ {x}},", + "image_path": "1c14e4d1f1b7180a973d4415bd85bb4f4b73ba69680d1b77f21760d642a7e557.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "content": "where the index " + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "inline_equation", + "content": "i + 1" + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "content": " means " + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "inline_equation", + "content": "i + 1" + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "content": " modulus " + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "content": ". The operators " + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_y, \\mathcal{D}_z" + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "content": " are defined in a similar way. The second order derivative operator " + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{xx}" + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{xy}" + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "content": " are approximated by" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 233, + 673, + 394, + 695 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 673, + 394, + 695 + ], + "spans": [ + { + "bbox": [ + 233, + 673, + 394, + 695 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {x x} ^ {2} u _ {i, j, k} = \\frac {u _ {i + 1 , j , k} + u _ {i - 1 , j , k} - 2 u _ {i , j , k}}{h _ {x} ^ {2}}", + "image_path": "b003a76e3a2ff18bd2fd8669d8265858112de5526dd4a6144268d9f9d19975bc.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 184, + 696, + 424, + 718 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 696, + 424, + 718 + ], + "spans": [ + { + "bbox": [ + 184, + 696, + 424, + 718 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {x y} ^ {2} u _ {i, j, k} = \\frac {u _ {i + 1 , j + 1 , k} + u _ {i - 1 , j - 1 , k} - u _ {i + 1 , j - 1 , k} - u _ {i - 1 , j + 1 , k}}{4 h _ {x} h _ {y}}", + "image_path": "820f8897daf4c0e362dc3068e60dbaa55e123d33e141d96e81ab437d5d434319.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 720, + 371, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 371, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 371, + 734 + ], + "type": "text", + "content": "The other operators " + }, + { + "bbox": [ + 105, + 720, + 371, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{yy},\\mathcal{D}_{zz},\\mathcal{D}_{yz}" + }, + { + "bbox": [ + 105, + 720, + 371, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 720, + 371, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{xz}" + }, + { + "bbox": [ + 105, + 720, + 371, + 734 + ], + "type": "text", + "content": " are defined similarly." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "text", + "content": "Discrete Fourier Transformation The discrete Fourier transformation (DFT) of " + }, + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "inline_equation", + "content": "u_{i,j,k}" + }, + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 239, + 96, + 504, + 126 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 96, + 504, + 126 + ], + "spans": [ + { + "bbox": [ + 239, + 96, + 504, + 126 + ], + "type": "interline_equation", + "content": "\\hat {u} _ {m, n, l} = \\sum_ {i = 0} ^ {M - 1} \\sum_ {j = 0} ^ {N - 1} \\sum_ {k = 0} ^ {L - 1} u _ {i, j, k} \\hat {\\omega} _ {m n l} \\tag {16}", + "image_path": "73a3fca30652ef5449c19b1f1f6e2e3215fe44f0575aa19d087676a225fe3d45.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 225, + 129, + 505, + 158 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 129, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 225, + 129, + 505, + 158 + ], + "type": "interline_equation", + "content": "u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\omega_ {m n l} \\tag {17}", + "image_path": "38f983f6153a80c9021901b6da4c7088f749276b92f12eb1f9ed6dc441d6e7e5.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\hat{\\omega}_{mnl} = e^{-\\iota \\frac{2\\pi mi}{M}}e^{-\\iota \\frac{2\\pi nj}{N}}e^{-\\iota \\frac{2\\pi lk}{L}}" + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\omega_{mnl} = e^{\\iota \\frac{2\\pi mi}{M}}e^{\\iota \\frac{2\\pi nj}{N}}e^{\\iota \\frac{2\\pi lk}{L}}" + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\iota = \\sqrt{-1}" + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\{m,n,l\\}" + }, + { + "bbox": [ + 104, + 159, + 504, + 195 + ], + "type": "text", + "content": " are the indices of the frequency coefficients. By using DFT, the differential operators are converted to algebraic operators in the frequency domain." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 197, + 505, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 197, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 197, + 505, + 220 + ], + "type": "text", + "content": "Lemma 4. Suppose the discrete function is " + }, + { + "bbox": [ + 104, + 197, + 505, + 220 + ], + "type": "inline_equation", + "content": "u_{i,j,k}" + }, + { + "bbox": [ + 104, + 197, + 505, + 220 + ], + "type": "text", + "content": ", with the discrete Fourier transformation Eqn. (16) and Eqn. (17), by using the central difference scheme, the first order differential operator is given by" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 195, + 220, + 415, + 253 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 220, + 415, + 253 + ], + "spans": [ + { + "bbox": [ + 195, + 220, + 415, + 253 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {x} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {\\sin \\frac {2 \\pi m}{M}}{h _ {x}} \\omega_ {m n l}", + "image_path": "3946c318f27924af416157040568c69e3b3f2ded69455afa80da7007ba67cff1.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 255, + 339, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 339, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 339, + 266 + ], + "type": "text", + "content": "the second order differential operators are represented by" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 171, + 268, + 431, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 268, + 431, + 300 + ], + "spans": [ + { + "bbox": [ + 171, + 268, + 431, + 300 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {x x} ^ {2} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} \\omega_ {m n l}", + "image_path": "1379d4c0081b3dd2a6750c1f5bfad27a34df82474ede2ee04d155604dcc9d3a3.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 173, + 303, + 437, + 336 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 303, + 437, + 336 + ], + "spans": [ + { + "bbox": [ + 173, + 303, + 437, + 336 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {x y} ^ {2} u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} \\omega_ {m n l}", + "image_path": "4341d64e93bfde414fe46da292ee9ed6a38f637f5f5fa5de81b54e20ec5b8ad0.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 342, + 504, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 366 + ], + "type": "text", + "content": "The other differential operators " + }, + { + "bbox": [ + 104, + 342, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_y, \\mathcal{D}_z, \\mathcal{D}_{yy}, \\mathcal{D}_{zz}, \\mathcal{D}_{yz}" + }, + { + "bbox": [ + 104, + 342, + 504, + 366 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 342, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{xz}" + }, + { + "bbox": [ + 104, + 342, + 504, + 366 + ], + "type": "text", + "content": " are also represented accordingly. The detailed proofs can be found in the supplementary material." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 376, + 487, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 487, + 389 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 487, + 389 + ], + "type": "text", + "content": "FFT Solver Suppose we want to solve an elliptic PDE with constant coefficients on " + }, + { + "bbox": [ + 104, + 376, + 487, + 389 + ], + "type": "inline_equation", + "content": "\\Omega \\subset \\mathbb{R}^3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 184, + 390, + 504, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 390, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 184, + 390, + 504, + 423 + ], + "type": "interline_equation", + "content": "L [ u ] := \\left(\\sum_ {p = 0} ^ {2} \\sum_ {q = 0} ^ {2} a ^ {p, q} \\partial_ {p} \\partial_ {q} + \\sum_ {r = 0} ^ {2} b ^ {r} \\partial_ {r} + c\\right) u (x) = f (x), \\tag {18}", + "image_path": "20f611b376a6fab66e97d7930ca543a52d9ab71e554610a55066227c15e78670.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "type": "text", + "content": "with the periodic boundary condition, where " + }, + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "type": "inline_equation", + "content": "a^{p,q}, b^r, c" + }, + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "type": "text", + "content": " are constants, the matrix " + }, + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "type": "inline_equation", + "content": "(a^{p,q})" + }, + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "type": "text", + "content": " is positive definite, namely the PDE is uniformly elliptic. By the discrete Fourier transformation " + }, + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 104, + 426, + 506, + 459 + ], + "type": "text", + "content": ", we convert the differential equation to an algebraic equation in the frequency domain," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 187, + 460, + 422, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 460, + 422, + 495 + ], + "spans": [ + { + "bbox": [ + 187, + 460, + 422, + 495 + ], + "type": "interline_equation", + "content": "\\sum_ {p = 0} ^ {2} \\sum_ {q = 0} ^ {2} a ^ {p, q} \\mathcal {F} (\\partial_ {p} \\partial_ {q} u) + \\sum_ {r = 0} ^ {2} b ^ {r} \\mathcal {F} (\\partial_ {r} u) + c \\mathcal {F} (u) = \\mathcal {F} (f)", + "image_path": "fc6719b17efd610a91ba21426485c5ba610ecc05a0bf2aa0239cb7eac8188d5b.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 501, + 251, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 501, + 251, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 251, + 513 + ], + "type": "text", + "content": "By applying Lemma 4 and defining" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 173, + 514, + 504, + 619 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 514, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 173, + 514, + 504, + 619 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\lambda_ {m, n, l} = a ^ {0, 0} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} + a ^ {1, 1} \\frac {2 (\\cos \\frac {2 \\pi n}{N} - 1)}{h _ {y} ^ {2}} \\\\ + a ^ {2, 2} \\frac {2 (\\cos \\frac {2 \\pi l}{L} - 1)}{h _ {z} ^ {2}} - \\left(a ^ {0, 1} + a ^ {1, 0}\\right) \\frac {\\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} (19) \\\\ - \\left(a ^ {1, 2} + a ^ {2, 1}\\right) \\frac {\\sin \\frac {2 \\pi n}{N} \\sin \\frac {2 \\pi l}{L}}{h _ {y} h _ {z}} - \\left(a ^ {0, 2} + a ^ {2, 0}\\right) \\frac {\\sin \\frac {2 \\pi l}{L} \\sin \\frac {2 \\pi m}{M}}{h _ {z} h _ {x}} (19) \\\\ + b ^ {0} \\frac {\\sin \\frac {2 \\pi m}{M}}{h _ {x}} + b ^ {1} \\frac {\\sin \\frac {2 \\pi n}{N}}{h _ {y}} + b ^ {2} \\frac {\\sin \\frac {2 \\pi l}{L}}{h _ {z}} + c \\\\ \\end{array}", + "image_path": "d61cf69ad04ddb74a3cafdd14f27bd9b06edf0e9bce07f76d3a895cc2eb48a94.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 620, + 324, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 620, + 324, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 324, + 632 + ], + "type": "text", + "content": "We have the algebraic equations in frequency domain," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 258, + 633, + 351, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 633, + 351, + 647 + ], + "spans": [ + { + "bbox": [ + 258, + 633, + 351, + 647 + ], + "type": "interline_equation", + "content": "\\hat {u} _ {m, n, l} \\lambda_ {m, n, l} = \\hat {f} _ {m, n, l}", + "image_path": "b7967fb37e51b2a9d408a8221c2158798e1d741abe69499f5e81d9b8c93e1ec5.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 648, + 505, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 505, + 682 + ], + "type": "text", + "content": "With " + }, + { + "bbox": [ + 104, + 648, + 505, + 682 + ], + "type": "inline_equation", + "content": "\\hat{u}_{m,n,l}" + }, + { + "bbox": [ + 104, + 648, + 505, + 682 + ], + "type": "text", + "content": "'s, we can easily obtain " + }, + { + "bbox": [ + 104, + 648, + 505, + 682 + ], + "type": "inline_equation", + "content": "u_{i,j,k}" + }, + { + "bbox": [ + 104, + 648, + 505, + 682 + ], + "type": "text", + "content": "'s by the Inverse Discrete Fourier Transform (IDFT), which means solving the constant coefficient elliptic equation. The algorithm is described in Alg. 2 in Appendix C." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "The FFT for solving the constant coefficient elliptic PDE can be efficiently computed with GPUs. Moreover, the algorithm Alg. 2 solves the constant coefficient elliptic PDEs with a periodic boundary condition, which can be generalized to solving the same type of PDEs with Neumann boundary condition by extending the PDE to the flat torus " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathbb{T}^3" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": " using mirror reflection Eqn. (14)." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 257, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 257, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 257, + 94 + ], + "type": "text", + "content": "4 EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 174 + ], + "type": "text", + "content": "In this section, we firstly show that the our proposed FFT-OT algorithm converges linearly and runs " + }, + { + "bbox": [ + 104, + 106, + 506, + 174 + ], + "type": "inline_equation", + "content": "100 \\times" + }, + { + "bbox": [ + 104, + 106, + 506, + 174 + ], + "type": "text", + "content": " faster than the conventional convex geometry based solver Levy (2015), then demonstrate the method in two applications: 3D adaptive sampling and Volume Magnifier. All the algorithms are developed using generic C++ with CUDA Toolkit. All the experiments are conducted on a Windows laptop with Intel Core i7-7700HQ CPU with 16 GB memory and NVIDIA GeForce GTX 1060 Graphics Cards. More experiments can be found in Appendix D." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 186, + 335, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 335, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 335, + 198 + ], + "type": "text", + "content": "4.1 RUNNING TIME AND CONVERGENCE ANALYSIS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": "To show the performance of the proposed method, we experiment on the density functions defined by the Gaussian mixture models. To be specific, the domain is a cube " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\Omega = [0,1]^3" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": ", the 3-dimensional density function defined on " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " is set to be " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "f(x) = \\sum_{i=1}^{30} p_i \\mathcal{N}(\\mu_i, \\Sigma_i)" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mu_i, \\Sigma_i)" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " represents Gaussian distribution with mean " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\mu_i" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " and variance " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\Sigma_i = \\mathrm{diag}(\\sigma_{i0}^2, \\sigma_{i1}^2, \\sigma_{i2}^2)" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\mu_i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " is uniformly sampled from " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "[0,1]^3" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\sigma_{ij}" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " is uniformly sampled from " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "[0,0.5]" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "p_i \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " is uniformly sampled from " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "[0.2,1]" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " and normalized such that " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\int_{\\Omega} f(x) dx = 1" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": ". Thus the source distribution " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " is a complicated Gaussian mixture distribution restricted on " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": ". Then by mirror reflection in Sec. 3.2, we obtain the complex density function which is defined on " + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "inline_equation", + "content": "[-1,1]^3" + }, + { + "bbox": [ + 104, + 207, + 504, + 300 + ], + "type": "text", + "content": " and satisfies the periodic boundary condition." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "text", + "content": "We directly use the FFT-OT algorithm Alg. 1 to solve the linearized Monge-Ampère equation. With the approximation error threshold " + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "inline_equation", + "content": "\\varepsilon = 1.0 \\times 10^{-6}" + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "text", + "content": " and the resolution " + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "inline_equation", + "content": "256 \\times 256 \\times 256" + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "text", + "content": ", the running time for our FFT-OT algorithm with double precision on GPU is less than 175 seconds. The conventional convex geometry based algorithm for 3D optimal transportation Levy (2015) can neither handle such large data sets nor be implemented on GPUs. It can only compute OT map with resolution no greater than " + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "inline_equation", + "content": "100 \\times 100 \\times 100" + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "text", + "content": " on our system, which takes about 2700 seconds. When handling problem with " + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "inline_equation", + "content": "128 \\times 128 \\times 128" + }, + { + "bbox": [ + 104, + 304, + 338, + 435 + ], + "type": "text", + "content": " resolution, our FFT-OT consumes about" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 345, + 300, + 503, + 422 + ], + "blocks": [ + { + "bbox": [ + 345, + 300, + 503, + 422 + ], + "lines": [ + { + "bbox": [ + 345, + 300, + 503, + 422 + ], + "spans": [ + { + "bbox": [ + 345, + 300, + 503, + 422 + ], + "type": "image", + "image_path": "3bdbb91d57bc03d187391b82d1f4bbac774ffb0ed2c127ee3cfab9ecf8513c36.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 421, + 485, + 431 + ], + "lines": [ + { + "bbox": [ + 362, + 421, + 485, + 431 + ], + "spans": [ + { + "bbox": [ + 362, + 421, + 485, + 431 + ], + "type": "text", + "content": "Figure 1: Convergence Analysis." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 436, + 457, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 457, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 457, + 448 + ], + "type": "text", + "content": "20.3 seconds, which is " + }, + { + "bbox": [ + 105, + 436, + 457, + 448 + ], + "type": "inline_equation", + "content": "130 \\times" + }, + { + "bbox": [ + 105, + 436, + 457, + 448 + ], + "type": "text", + "content": " faster than the power diagram based method Levy (2015)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 453, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 486 + ], + "type": "text", + "content": "Fig. 1 shows the approximation error for the above Gaussian mixture density with respect to iterations, namely " + }, + { + "bbox": [ + 104, + 453, + 504, + 486 + ], + "type": "inline_equation", + "content": "\\log \\| f - \\rho_n\\| _2^2" + }, + { + "bbox": [ + 104, + 453, + 504, + 486 + ], + "type": "text", + "content": ". Our algorithm does converge linearly and the result is consistent with the prediction Eqn. (13) in Thm. 3. Therefore, this experiment validates the theorem." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 499, + 241, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 499, + 241, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 241, + 510 + ], + "type": "text", + "content": "4.2 3D ADAPTIVE SAMPLING" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 520, + 504, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 504, + 609 + ], + "type": "text", + "content": "Generating random samples matching a given density function plays an essential role in the applications like Monte-Carlo integration or stippling. Efficiently obtaining high quality samples is still an on-going research topic Bauer et al. (2015); Perrier et al. (2018). And optimal transportation has been successfully applied for generating high quality 2D samples de Goes et al. (2012); Nader & Guennebaud (2018). Most of the current research focuses on generating 2D samples fitting the given density function. Here we apply the proposed 3D FFT-OT method to generate high quality 3D samples according to the given complex density functions. To the best of our knowledge, it is the first work that uses OT to sample from 3D density functions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": "Suppose the source probability distribution " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "d\\mu (x) = f(x)dx" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " is defined on " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "\\Omega = [0,1]^3" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "\\mu (\\Omega) = 1" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": ". The target distribution " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "d\\nu (y) = dy" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " is the uniform distribution. We use the FFT-OT algorithm Alg. 1 to compute the OT map " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "T:\\Omega \\to \\Omega ,T_{\\#}\\mu = \\nu" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": ". The domain is tessellated to a " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "256\\times 256\\times 256" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " grid. For each " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "x_{ijk},i,j,k\\in \\{0,1,\\ldots ,255\\}" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": ", the image " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "T(x_{ijk})" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " can be obtained. We use " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "\\{T(x_{ijk})\\}" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " as vertices to compute the Delaunay triangulation of " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": ". Then representing the OT map " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "T:(\\Omega ,\\mu)\\rightarrow (\\Omega ,\\nu)" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " as a piecewise linear map, the restriction of " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " on each tetrahedron is a linear map. Then the inverse OT map " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "T^{-1}:(\\Omega ,\\nu)\\to (\\Omega ,\\mu)" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " is also a piecewise linear map. Namely, given a grid point " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "y_{mnl}" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": ", we can find a tetrahedron containing it. Suppose the vertices of the tetrahedron are " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "\\{T(x_i),T(x_j),T(x_k),T(x_l)\\}" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "y_{mnl}" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " is computed as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 197, + 719, + 411, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 719, + 411, + 734 + ], + "spans": [ + { + "bbox": [ + 197, + 719, + 411, + 734 + ], + "type": "interline_equation", + "content": "y _ {m n l} = \\lambda_ {i} T (x _ {i}) + \\lambda_ {j} T (x _ {j}) + \\lambda_ {k} T (x _ {k}) + \\lambda_ {l} T (x _ {l}),", + "image_path": "b013e4a43366b34ea68fabae44b8de2908625a30385e9f579712811e1ebac939.jpg" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 113, + 79, + 170, + 137 + ], + "blocks": [ + { + "bbox": [ + 113, + 79, + 170, + 137 + ], + "lines": [ + { + "bbox": [ + 113, + 79, + 170, + 137 + ], + "spans": [ + { + "bbox": [ + 113, + 79, + 170, + 137 + ], + "type": "image", + "image_path": "9e23e79754394ea47c674701a280bdd55feaa52de2d09dfbac5d654f3479e131.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 113, + 137, + 170, + 192 + ], + "blocks": [ + { + "bbox": [ + 113, + 137, + 170, + 192 + ], + "lines": [ + { + "bbox": [ + 113, + 137, + 170, + 192 + ], + "spans": [ + { + "bbox": [ + 113, + 137, + 170, + 192 + ], + "type": "image", + "image_path": "6c4973590e35572795f31cbf0ccd3c7259833b41ad4e2dbbced9a255031e0226.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 194, + 162, + 204 + ], + "lines": [ + { + "bbox": [ + 120, + 194, + 162, + 204 + ], + "spans": [ + { + "bbox": [ + 120, + 194, + 162, + 204 + ], + "type": "text", + "content": "(a) Density" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 179, + 79, + 236, + 137 + ], + "blocks": [ + { + "bbox": [ + 179, + 79, + 236, + 137 + ], + "lines": [ + { + "bbox": [ + 179, + 79, + 236, + 137 + ], + "spans": [ + { + "bbox": [ + 179, + 79, + 236, + 137 + ], + "type": "image", + "image_path": "991c27da91d2b9a35751811c8d8b66d71b4f0b1d623541b6f4a316a83e509cc3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 179, + 137, + 235, + 192 + ], + "blocks": [ + { + "bbox": [ + 179, + 137, + 235, + 192 + ], + "lines": [ + { + "bbox": [ + 179, + 137, + 235, + 192 + ], + "spans": [ + { + "bbox": [ + 179, + 137, + 235, + 192 + ], + "type": "image", + "image_path": "1c460ddc7081f3eddd7743544cfbccc8449855610c14e1204499168b0c73fe58.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 182, + 194, + 231, + 204 + ], + "lines": [ + { + "bbox": [ + 182, + 194, + 231, + 204 + ], + "spans": [ + { + "bbox": [ + 182, + 194, + 231, + 204 + ], + "type": "text", + "content": "(b) Rejection" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 244, + 79, + 301, + 136 + ], + "blocks": [ + { + "bbox": [ + 244, + 79, + 301, + 136 + ], + "lines": [ + { + "bbox": [ + 244, + 79, + 301, + 136 + ], + "spans": [ + { + "bbox": [ + 244, + 79, + 301, + 136 + ], + "type": "image", + "image_path": "42274fe3238f11b799d712596f55048296cca366c58c386d20c9588fa5a82ca7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 244, + 137, + 301, + 192 + ], + "blocks": [ + { + "bbox": [ + 244, + 137, + 301, + 192 + ], + "lines": [ + { + "bbox": [ + 244, + 137, + 301, + 192 + ], + "spans": [ + { + "bbox": [ + 244, + 137, + 301, + 192 + ], + "type": "image", + "image_path": "6393df53e6e6e271b41246b6537f60061ba7c4fb93111daf4b4d0b10c4620dfc.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 258, + 194, + 287, + 204 + ], + "lines": [ + { + "bbox": [ + 258, + 194, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 258, + 194, + 287, + 204 + ], + "type": "text", + "content": "(c) MH" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 213, + 504, + 275 + ], + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 275 + ], + "type": "text", + "content": "Figure 2: 3D density function sampling. (a) The density functions in a slice. The slices in each row come from two different density functions. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with " + }, + { + "bbox": [ + 104, + 213, + 504, + 275 + ], + "type": "inline_equation", + "content": "T^{-1}" + }, + { + "bbox": [ + 104, + 213, + 504, + 275 + ], + "type": "text", + "content": ". (f) The sampling results by mapping the grid centers back with " + }, + { + "bbox": [ + 104, + 213, + 504, + 275 + ], + "type": "inline_equation", + "content": "T^{-1}" + }, + { + "bbox": [ + 104, + 213, + 504, + 275 + ], + "type": "text", + "content": ". The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 79, + 366, + 136 + ], + "blocks": [ + { + "bbox": [ + 310, + 79, + 366, + 136 + ], + "lines": [ + { + "bbox": [ + 310, + 79, + 366, + 136 + ], + "spans": [ + { + "bbox": [ + 310, + 79, + 366, + 136 + ], + "type": "image", + "image_path": "1386ac29405621587ac210b503a5dea958f573ac4514d9efc83e5c0d92511114.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 310, + 137, + 366, + 192 + ], + "blocks": [ + { + "bbox": [ + 310, + 137, + 366, + 192 + ], + "lines": [ + { + "bbox": [ + 310, + 137, + 366, + 192 + ], + "spans": [ + { + "bbox": [ + 310, + 137, + 366, + 192 + ], + "type": "image", + "image_path": "eff55229582a2e4bb9d8e713da05b6ad53348e1f5cfef7d1f2838fb12460df68.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 194, + 354, + 203 + ], + "lines": [ + { + "bbox": [ + 321, + 194, + 354, + 203 + ], + "spans": [ + { + "bbox": [ + 321, + 194, + 354, + 203 + ], + "type": "text", + "content": "(d) Slice" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 376, + 80, + 432, + 136 + ], + "blocks": [ + { + "bbox": [ + 376, + 80, + 432, + 136 + ], + "lines": [ + { + "bbox": [ + 376, + 80, + 432, + 136 + ], + "spans": [ + { + "bbox": [ + 376, + 80, + 432, + 136 + ], + "type": "image", + "image_path": "d85bc0ba5e2c8b1233922b11ac9edf9eca3bde735bd03384e7b6bf4159723282.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 376, + 137, + 430, + 192 + ], + "blocks": [ + { + "bbox": [ + 376, + 137, + 430, + 192 + ], + "lines": [ + { + "bbox": [ + 376, + 137, + 430, + 192 + ], + "spans": [ + { + "bbox": [ + 376, + 137, + 430, + 192 + ], + "type": "image", + "image_path": "8e2babcbb74fefc8012dd3da3d63874b37804f4626866aa9648c765bad8df38a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 383, + 194, + 423, + 203 + ], + "lines": [ + { + "bbox": [ + 383, + 194, + 423, + 203 + ], + "spans": [ + { + "bbox": [ + 383, + 194, + 423, + 203 + ], + "type": "text", + "content": "(e) Ours-R" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 441, + 80, + 496, + 136 + ], + "blocks": [ + { + "bbox": [ + 441, + 80, + 496, + 136 + ], + "lines": [ + { + "bbox": [ + 441, + 80, + 496, + 136 + ], + "spans": [ + { + "bbox": [ + 441, + 80, + 496, + 136 + ], + "type": "image", + "image_path": "12b78ae2688e92da4208cb0e7c2c9586e51e58ef5e36438feac9884a3758799f.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 441, + 137, + 496, + 192 + ], + "blocks": [ + { + "bbox": [ + 441, + 137, + 496, + 192 + ], + "lines": [ + { + "bbox": [ + 441, + 137, + 496, + 192 + ], + "spans": [ + { + "bbox": [ + 441, + 137, + 496, + 192 + ], + "type": "image", + "image_path": "8a65f5c0f7af9a0785980ddfcfbb259e088777602f0ed19e24504b5b81ae417e.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 449, + 194, + 488, + 203 + ], + "lines": [ + { + "bbox": [ + 449, + 194, + 488, + 203 + ], + "spans": [ + { + "bbox": [ + 449, + 194, + 488, + 203 + ], + "type": "text", + "content": "(f) Ours-G" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 295, + 504, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 319 + ], + "type": "text", + "content": "where the non-negative barycenter coordinates satisfy " + }, + { + "bbox": [ + 104, + 295, + 504, + 319 + ], + "type": "inline_equation", + "content": "\\lambda_{i} + \\lambda_{j} + \\lambda_{k} + \\lambda_{l} = 1" + }, + { + "bbox": [ + 104, + 295, + 504, + 319 + ], + "type": "text", + "content": ". Then the image of the inverse OT map is given by" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 220, + 323, + 504, + 336 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 323, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 220, + 323, + 504, + 336 + ], + "type": "interline_equation", + "content": "T ^ {- 1} \\left(y _ {m n l}\\right) = \\lambda_ {i} x _ {i} + \\lambda_ {j} x _ {j} + \\lambda_ {k} x _ {k} + \\lambda_ {l} x _ {l}. \\tag {20}", + "image_path": "111047af04e243017ad3d4f9ecd305654bd090c256b37ca19b02a42663f18c1b.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "text", + "content": "We generate random samples " + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\{y_k\\}" + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "text", + "content": " according to the uniform distribution " + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "text", + "content": ", then their images " + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\{T^{-1}(y_k)\\}" + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "text", + "content": " are the desired random samples following the distribution " + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 105, + 341, + 504, + 365 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": "In our experiment, we use the same Gaussian mixture settings of the density function as Sec. 4.1. Fig. 2 visualizes the generated samples. We randomly pick the " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": "-th slice along the " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": "-direction from the discretized volume, draw the source density function on this slice, and use pixel intensity to represent the density in Fig. 2(a). (i) We uniformly generate " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "100k" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": " random samples " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\{y_k\\} \\subset \\Omega" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": ", and obtain the desired random samples by applying the inverse OT map " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\{T^{-1}(y_k)\\}" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": ". (ii) We also set " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\{y_k\\}" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": " as the grid centers of " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": " and obtain the corresponding samples of the desired distribution " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": ". The samples around the " + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 369, + 504, + 447 + ], + "type": "text", + "content": "-th slice of both sampling strategies are plotted in Fig. 2(e) and Fig. 2(f)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "type": "text", + "content": "By visual comparison, it is obvious that the distributions of Fig. 2(e) and Fig. 2(f) are consistent with the density function in Fig. 2(a). The consistency of the boundary of Fig. 2(e) and (f) and Fig. 2(a) also verifies the obliqueness boundary condition of the Monge-Ampère equation. To further show the performance of the proposed method, we compare it with the classical sampling methods, namely rejection sampling, the Metropolis-Hastings algorithm Bishop (2006) and the slice sampling Neal (2003), shown in Fig. 2(b), Fig. 2(c) and Fig. 2(d). To quantitatively compare the sampling results, we use the Chi-square goodness-of-fit test, which firstly groups the data and then computes the " + }, + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "type": "inline_equation", + "content": "L^2" + }, + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "type": "text", + "content": " norm of the difference between the actual number of observations in each group and the expected number of observations. In our experiment, we set the group number to " + }, + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "type": "inline_equation", + "content": "64 \\times 64 \\times 64" + }, + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "type": "text", + "content": " and use 500K samples to make the comparison. The corresponding " + }, + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "type": "inline_equation", + "content": "L^2" + }, + { + "bbox": [ + 104, + 451, + 506, + 584 + ], + "type": "text", + "content": " norm of each method is shown in the top-right of the corresponding figure. We can see that the both sampling strategies of our method give smaller scores than the classical ones." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 597, + 243, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 243, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 243, + 608 + ], + "type": "text", + "content": "4.3 VOLUMETRIC MAGNIFIER" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "text", + "content": "In reality, physical magnifiers can only magnify planar images. In medical image processing, it is highly desirable to magnify certain regions of the 3D MRIs or CT images. Our algorithm can address such requests with the user prescribed region of interest (ROI) and magnifying factor. Suppose the ROI is a symmetric region with the center " + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "inline_equation", + "content": "(\\bar{x},\\bar{y},\\bar{z})\\in \\Omega" + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "text", + "content": " and the radius " + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "inline_equation", + "content": "\\sigma_x,\\sigma_y,\\sigma_z" + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "text", + "content": " in different directions. The density function " + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "text", + "content": " of the source measure " + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 617, + 504, + 673 + ], + "type": "text", + "content": " is defined as" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 179, + 677, + 429, + 693 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 677, + 429, + 693 + ], + "spans": [ + { + "bbox": [ + 179, + 677, + 429, + 693 + ], + "type": "interline_equation", + "content": "f (x, y, z) = 0. 5 + 0. 5 e ^ {- ((x - \\bar {x}) ^ {2} / 2 \\sigma_ {x} ^ {2} + (y - \\bar {y}) ^ {2} / 2 \\sigma_ {y} ^ {2} + (z - \\bar {z}) ^ {2} / 2 \\sigma_ {z} ^ {2})}", + "image_path": "ac0d48aea746fffe59ddbac058fe35da8a47db76332624d7dbc21b74b72c9796.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "We compute OT map " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "T: (\\Omega, \\mu) \\to (\\Omega, \\nu)" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": " is the uniform distribution. Similar to the method in 3D adaptive sampling, we compute the Delaunay triangulation of the images " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\{T(x_{ijk})\\}" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": ", then the OT map " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": " is represented as a piecewise linear map. The inverse optimal transportation map" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 79, + 204, + 167 + ], + "blocks": [ + { + "bbox": [ + 116, + 79, + 204, + 167 + ], + "lines": [ + { + "bbox": [ + 116, + 79, + 204, + 167 + ], + "spans": [ + { + "bbox": [ + 116, + 79, + 204, + 167 + ], + "type": "image", + "image_path": "9617a55da512e830fdb2957b053afbf67cde99d45ee37404750cdf5b376f62f5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 178, + 504, + 218 + ], + "lines": [ + { + "bbox": [ + 104, + 178, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 104, + 178, + 504, + 218 + ], + "type": "text", + "content": "Figure 3: The volume magnifier of an aneurysm. The first column shows the original volumetric data, and the last three columns give the magnified data from the same viewpoints with different magnifying ratios. The yellow circle denotes the ROI/aneurysm. To obtain the results, we set " + }, + { + "bbox": [ + 104, + 178, + 504, + 218 + ], + "type": "inline_equation", + "content": "\\sigma = \\sigma_{x} = \\sigma_{y} = \\sigma_{z}" + }, + { + "bbox": [ + 104, + 178, + 504, + 218 + ], + "type": "text", + "content": ", and they are 0.83, 0.75 and 0.5 respectively." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 211, + 79, + 301, + 167 + ], + "blocks": [ + { + "bbox": [ + 211, + 79, + 301, + 167 + ], + "lines": [ + { + "bbox": [ + 211, + 79, + 301, + 167 + ], + "spans": [ + { + "bbox": [ + 211, + 79, + 301, + 167 + ], + "type": "image", + "image_path": "69a02b53691bc471fb821d5c6db1f725236ce6031bb0554dec9901a519abf12c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 309, + 79, + 398, + 167 + ], + "blocks": [ + { + "bbox": [ + 309, + 79, + 398, + 167 + ], + "lines": [ + { + "bbox": [ + 309, + 79, + 398, + 167 + ], + "spans": [ + { + "bbox": [ + 309, + 79, + 398, + 167 + ], + "type": "image", + "image_path": "386aab8fb7671eec7accf9774320c70703684277bb174009e9564739ebd35a58.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 406, + 79, + 494, + 167 + ], + "blocks": [ + { + "bbox": [ + 406, + 79, + 494, + 167 + ], + "lines": [ + { + "bbox": [ + 406, + 79, + 494, + 167 + ], + "spans": [ + { + "bbox": [ + 406, + 79, + 494, + 167 + ], + "type": "image", + "image_path": "f7423a5b99e94f5d341b8159bc08750b17410a0ccaf0730d033af287440fc51a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 116, + 224, + 203, + 312 + ], + "blocks": [ + { + "bbox": [ + 116, + 224, + 203, + 312 + ], + "lines": [ + { + "bbox": [ + 116, + 224, + 203, + 312 + ], + "spans": [ + { + "bbox": [ + 116, + 224, + 203, + 312 + ], + "type": "image", + "image_path": "fdcd37e360c4edc5d7205291a179e64e74a004e8852cee7f98b9ecdc15549027.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 211, + 224, + 300, + 312 + ], + "blocks": [ + { + "bbox": [ + 211, + 224, + 300, + 312 + ], + "lines": [ + { + "bbox": [ + 211, + 224, + 300, + 312 + ], + "spans": [ + { + "bbox": [ + 211, + 224, + 300, + 312 + ], + "type": "image", + "image_path": "d3a86e9df0f9fb6cc6fd84a90e8d1778a943ac75a38ee32c4e3d8969df70c85c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 309, + 224, + 398, + 312 + ], + "blocks": [ + { + "bbox": [ + 309, + 224, + 398, + 312 + ], + "lines": [ + { + "bbox": [ + 309, + 224, + 398, + 312 + ], + "spans": [ + { + "bbox": [ + 309, + 224, + 398, + 312 + ], + "type": "image", + "image_path": "ebdff74f3bbc7c0e5b13f7d1bf88838e5fe7f7443c9a08fbce7813d4e1a0a048.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 406, + 224, + 494, + 312 + ], + "blocks": [ + { + "bbox": [ + 406, + 224, + 494, + 312 + ], + "lines": [ + { + "bbox": [ + 406, + 224, + 494, + 312 + ], + "spans": [ + { + "bbox": [ + 406, + 224, + 494, + 312 + ], + "type": "image", + "image_path": "3a119ac03a5c3ee978ad5e6cd7f6c4ada4884521e6cfa58455f65ee717b8c464.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 115, + 313, + 203, + 400 + ], + "blocks": [ + { + "bbox": [ + 115, + 313, + 203, + 400 + ], + "lines": [ + { + "bbox": [ + 115, + 313, + 203, + 400 + ], + "spans": [ + { + "bbox": [ + 115, + 313, + 203, + 400 + ], + "type": "image", + "image_path": "b5c18de141438767f08b957306c042f2c43ef845f677c23d259a0301c1abc03b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 411, + 504, + 443 + ], + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 443 + ], + "type": "text", + "content": "Figure 4: The volume magnifier of the knee. The first row gives the original volumetric data with different ROIs denoted by the blue boxes from different viewpoints, and the second row shows the corresponding magnified results. In the experiments we set " + }, + { + "bbox": [ + 104, + 411, + 504, + 443 + ], + "type": "inline_equation", + "content": "\\sigma_{x} = \\sigma_{y} = \\sigma_{z} = 0.75" + }, + { + "bbox": [ + 104, + 411, + 504, + 443 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 211, + 313, + 300, + 400 + ], + "blocks": [ + { + "bbox": [ + 211, + 313, + 300, + 400 + ], + "lines": [ + { + "bbox": [ + 211, + 313, + 300, + 400 + ], + "spans": [ + { + "bbox": [ + 211, + 313, + 300, + 400 + ], + "type": "image", + "image_path": "7b8683c61289f9240fae83afa24973daf767edbc881a65835aed49339dc711b1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 313, + 398, + 400 + ], + "blocks": [ + { + "bbox": [ + 309, + 313, + 398, + 400 + ], + "lines": [ + { + "bbox": [ + 309, + 313, + 398, + 400 + ], + "spans": [ + { + "bbox": [ + 309, + 313, + 398, + 400 + ], + "type": "image", + "image_path": "5e2ab732a05ed454bef976b47f9fba3031bd402970ee1fba72556b924a40eb13.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 406, + 313, + 494, + 400 + ], + "blocks": [ + { + "bbox": [ + 406, + 313, + 494, + 400 + ], + "lines": [ + { + "bbox": [ + 406, + 313, + 494, + 400 + ], + "spans": [ + { + "bbox": [ + 406, + 313, + 494, + 400 + ], + "type": "image", + "image_path": "6447c7ba8e3eff44e23870d8d42c9cf37c5a1d7ecfa3d6fa64a0ce607cbb3218.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "inline_equation", + "content": "T^{-1}:(\\Omega ,\\nu)\\to (\\Omega ,\\mu)" + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "text", + "content": " is also piecewise linear. For each grid point " + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "inline_equation", + "content": "y_{mnl}\\in \\Omega" + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "text", + "content": " we use Eqn. (20) to find its pre-image. Similarly, its corresponding intensity " + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "inline_equation", + "content": "I_{mnl}" + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "text", + "content": " is computed by linear interpolation. Then we obtain the new volumetric data " + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "inline_equation", + "content": "\\{I_{mnl}\\}" + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "text", + "content": " with the magnified ROI and visualize the result with Voreen Meyer-Spradow et al. (2009)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 508, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 508, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 508, + 506, + 608 + ], + "type": "text", + "content": "Fig. 3 demonstrates our volumetric magnifier by magnifying an aneurysm on blood vessel Hansen & Johnson (2004). We choose the aneurysm region as the ROI. The first column gives the snapshot of the blood vessel, and the yellow circle denotes the location of the aneurysm. The last three columns show the magnified aneurysm with different magnifying ratio from the same viewpoints. Moreover, we show the magnified volumetric knee from different viewpoints with different ROIs denoted by the blue boxes in Fig. 4. Our method only magnifies the ROIs and keeps other regions unchanged. Compared with the traditional method requiring tedious zoom in/out, our method only magnifies the ROI region and keeps the whole subject in the field of view, which enables doctors to visualize the overall anatomy while scrutinize detailed anatomical structure at the same time." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 627, + 195, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 195, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 195, + 639 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": "In this paper, we propose the FFT-OT method to solve the optimal transportation problem. According to the Brenier theory, under the quadratic distance cost, finding the solution to the OT problem is equivalent to solving the Monge-Ampère equation, which can be linearized as a sequence of variant coefficient elliptic PDEs. Later, the variant coefficient PDEs are approximated by constant coefficient PDEs and solved by Fast Fourier Transformation. We also prove that the proposed method converges linearly. Experiments on volumetric data show that the FFT-OT can be used to sample from complex 3D density functions and magnify the volumetric data in medical images." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 221, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 221, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 221, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 506, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 140 + ], + "type": "text", + "content": "This research was partially supported by National Key R&D Program of China 2021YFA1003003 and NSFC No. 61936002, T2225012. This work was also partially supported by NIH 3R01LM012434-05S1, 1R21EB029733-01A1, NSF FAIN-2115095 and NSF CMMI-1762287." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 156, + 176, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 176, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 176, + 168 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 175, + 505, + 734 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 175, + 505, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 505, + 210 + ], + "type": "text", + "content": "Mokhtar Z. Alaya, Maxime Berar, Gilles Gasso, and Alain Rakotomamonjy. Screening sinkhorn algorithm for regularized optimal transport. In Advances in Neural Information Processing Systems 32, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 216, + 504, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 216, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 504, + 242 + ], + "type": "text", + "content": "Jose I. Aliaga, Ernesto Dufrechou, Pablo Ezzatti, and Enrique S. Quintana-Orti. An efficient gpu version of the preconditioned gmres method. The Journal of Supercomputing, 75, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 247, + 504, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 504, + 282 + ], + "type": "text", + "content": "Jason Altschuler, Jonathan Niles-Weed, and Philippe Rigollet. Near-linear time approximation algorithms for optimal transport via sinkhorn iteration. In Advances in Neural Information Processing Systems 30, 2017." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 288, + 505, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 288, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 505, + 346 + ], + "type": "text", + "content": "Jason Altschuler, Francis Bach, Alessandro Rudi, and Jonathan Niles-Weed. Massively scalable sinkhorn distances via the nystrom method. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper/2019/file/f55cadb97eaff2ba1980e001b0bd9842-Paper.pdf." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 351, + 505, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 351, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 505, + 387 + ], + "type": "text", + "content": "Dongsheng An, Yang Guo, Na Lei, Zhongxuan Luo, Shing-Tung Yau, and Xianfeng Gu. Ae-ot: A new generative model based on extended semi-discrete optimal transport. In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 393, + 504, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 393, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 393, + 504, + 418 + ], + "type": "text", + "content": "Dongsheng An, Na Lei, and Xianfeng Gu. Efficient optimal transport algorithm by accelerated gradient descent. In The Thirty-Sixth AAAI Conference on Artificial Intelligence (AAAI), 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 423, + 504, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 423, + 504, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 423, + 504, + 448 + ], + "type": "text", + "content": "Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In ICML, pp. 214-223, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 454, + 504, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 454, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 454, + 504, + 479 + ], + "type": "text", + "content": "F. Aurenhammer, F. Hoffmann, and B. Aronov. Minkowski-type theorems and least-squares clustering. Algorithmica, 1998." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 484, + 504, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 504, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 504, + 509 + ], + "type": "text", + "content": "Martin Bauer, Sarang Joshi, and Klas Modin. Diffeomorphic density matching by optimal information transport. SIAM Journal on Imaging Sciences, 8, 2015." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 514, + 504, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 504, + 548 + ], + "type": "text", + "content": "J.D. Benamou, Y. Brenier, and K. Guittet. The Monge-Kantorovitch mass transfer and its computational fluid mechanics formulation. International Journal for Numerical Methods in Fluids, 2002." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 555, + 504, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 504, + 581 + ], + "type": "text", + "content": "Jean-David Benamou, Brittany D. Froese, and Adam M. Oberman. Numerical solution of the optimal transportation problem using the monge-ampère equation. J. Comput. Phys, 2014." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 586, + 447, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 447, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 447, + 600 + ], + "type": "text", + "content": "Christopher M. Bishop. Pattern Recognition and Machine Learning. Springer, 2006." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "type": "text", + "content": "Y. Brenier. Polar decomposition and increasing rearrangement of vector fields. C. R. Acad. Sci. Paris Sr. I Math., 305(19):805-808, 1987." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 636, + 504, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 636, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 504, + 662 + ], + "type": "text", + "content": "Y. Brenier. Polar factorization and monotone rearrangement of vector-valued functions. Comm. Pure Appl. Math., 44(4):375-417, 1991." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "type": "text", + "content": "Dario Cordero-Erausquin. Sur le transport de mesures periodiques monotone maps preserving periodic measures. Comptes Rendus de l'Académie des Sciences - Series I - Mathematics, 329: 199-202, 1999." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 708, + 504, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 734 + ], + "type": "text", + "content": "N. Courty, R. Flamary, D. Tuia, and A. Rakotomamonjy. Optimal transport for domain adaptation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 39(9):1853-1865, 2017." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transportation distances. In International Conference on Neural Information Processing Systems, 2013." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 504, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 504, + 135 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 504, + 135 + ], + "type": "text", + "content": "F. de Goes, K. Breeden, V. Ostromoukhov, and M. Desbrun. Blue noise through optimal transport. ACM Trans. Graph. (SIGGRAPH Asia), 31, 2012." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 504, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 504, + 174 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 504, + 174 + ], + "type": "text", + "content": "Philippe Delanoë. Classical solvability in dimension two of the second boundary-value problem associated with the Monge-Ampère operator. Annales de l'I.H.P. Analyse non linéaire, 8(5): 443-457, 1991." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 180, + 504, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 180, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 504, + 215 + ], + "type": "text", + "content": "Pavel Dvurechensky, Alexander Gasnikov, and Alexey Kroshnin. Computational optimal transport: Complexity by accelerated gradient descent is better than by sinkhorn's algorithm. In Proceedings of the 35th International Conference on Machine Learning. PMLR, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 220, + 504, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 504, + 244 + ], + "type": "text", + "content": "Suli Endre. Lecture Notes on Finite Element Methods for Partial Differential Equations. University of Oxford, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 250, + 504, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 250, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 504, + 285 + ], + "type": "text", + "content": "David Xianfeng Gu, Feng Luo, Jian Sun, and Shing-Tung Yau. Variational principles for minkowski type problems, discrete optimal transport, and discrete monge-ampère equations. *Asian Journal of Mathematics*, 2016." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 290, + 471, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 290, + 471, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 471, + 303 + ], + "type": "text", + "content": "Charles D. Hansen and Chris R. Johnson. Visualization Handbook. Academic Press, 2004." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 308, + 504, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 308, + 504, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 504, + 333 + ], + "type": "text", + "content": "Jun Kitagawa, Quentin Mérigot, and Boris Thibert. Convergence of a newton algorithm for semi-discrete optimal transport. Journal of the European Mathematical Society, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 338, + 506, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 338, + 506, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 506, + 362 + ], + "type": "text", + "content": "Na Lei and Xianfeng Gu. Fft-ot: A fast algorithm for optimal transportation. In Proceedings of International Conference on Computer Vision (ICCV), 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 368, + 504, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 368, + 504, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 504, + 391 + ], + "type": "text", + "content": "Bruno Levy. A numerical algorithm for 12 semi-discrete optimal transport in 3d. ESAIM: M2AN, 49 (6):1693-1715, 2015." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 397, + 504, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 397, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 397, + 504, + 421 + ], + "type": "text", + "content": "Gregorire Loeper and Francesca Rapetti. Numerical solution of the monge-ampère equation by a newton's algorithm. C. R. Acad. Paris, pp. 319-324, 2005." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 426, + 504, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 504, + 449 + ], + "type": "text", + "content": "Robert J. McCann. A convexityprincipleforinteractinggases. Advances in mathematics, 128:153-179, 1997." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 456, + 495, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 495, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 495, + 468 + ], + "type": "text", + "content": "Quentin Merigot. A multiscale approach to optimal transport. Computer Graphics Forum., 2011." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 474, + 504, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 474, + 504, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 474, + 504, + 508 + ], + "type": "text", + "content": "Jennis Meyer-Spradow, Timo Ropinski, Jörg Mensmann, and Klaus H. Hinrichs. Voreen: A rapid-prototyping environment for ray-casting-based volume visualizations. IEEE Computer Graphics and Applications, 2009." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 514, + 504, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 504, + 537 + ], + "type": "text", + "content": "Georges Nader and Gael Guennebaud. Instant transport maps on 2d grids. ACM Trans. Graph., 37 (6), 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 544, + 369, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 369, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 369, + 556 + ], + "type": "text", + "content": "Radford M. Neal. Slice sampling. The Annals of Statistics, 2003." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 562, + 504, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 562, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 504, + 586 + ], + "type": "text", + "content": "Nicolas Papadakis, Gabriel Peyre, and Edouard Oudet. Optimal transport with proximal splitting. SIAM Journal on Imaging Sciences, 2014." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 591, + 504, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 591, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 504, + 615 + ], + "type": "text", + "content": "Hélène Perrier, David Coeurjolly, Feng Xie, Matt Pharr, Pat Hanrahan, and VictorOstromoukhov. Sequences with low-discrepancy blue-noise 2-d projections. Computer Graphics Forum, 2018." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 620, + 504, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 620, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 504, + 644 + ], + "type": "text", + "content": "Gabriel Peyre and Marco Cuturi. Computational optimal transport. Found. Trends Mach. Learn., 11 (5-6):355-607, 2019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 650, + 504, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 650, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 504, + 673 + ], + "type": "text", + "content": "Yousef Saad. Iterative Methods For Sparse Linear Systems. Society of Industrial and Applied Mathematics, 2003." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 679, + 457, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 457, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 457, + 693 + ], + "type": "text", + "content": "Filippo Santambrogio. Optimal Transport for Applied Mathematicians. Springer, 2015." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 698, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 731 + ], + "type": "text", + "content": "Louis-Philippe Saumier, Martial Agueh, and Boualem Khouider. An efficient numerical algorithm for the " + }, + { + "bbox": [ + 105, + 698, + 504, + 731 + ], + "type": "inline_equation", + "content": "l^2" + }, + { + "bbox": [ + 105, + 698, + 504, + 731 + ], + "type": "text", + "content": " optimal transport problem with periodic densities. IMA Journal of Applied Mathematics, 80:135-157, 2013." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 264 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Yuliy Schwartzburg, Romain Testuz, Andrea Tagliasacchi, and Mark Pauly. High-contrast computational caustic design. ACM Trans. Graph., 33(4), July 2014. ISSN 0730-0301. doi: 10.1145/2601097.2601200. URL https://doi.org/10.1145/2601097.2601200." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 157 + ], + "type": "text", + "content": "Justin Solomon, Fernando de Goes, Gabriel PeyrÅ, Marco Cuturi, Adrian Butscher, Andy Nguyen, Tao Du, and Leonidas Guibas. Convolutional Wasserstein distances: Efficient optimal transportation on geometric domains. ACM Transactions on Graphics (TOG), 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "type": "text", + "content": "Kehua Su, Wei Chen, Na Lei, Junwei Zhang, Kun Qian, and Xianfeng Gu. Volume preserving mesh parameterization based on optimal mass transportation. Comput. Aided Des., 82:42-56, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "type": "text", + "content": "Ilya Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Schoelkopf. Wasserstein auto-encoders. In ICLR, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 356, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 356, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 356, + 236 + ], + "type": "text", + "content": "Cédric Villani. Topics in Optimal transportation. AMS, 2003." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 242, + 504, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 504, + 264 + ], + "type": "text", + "content": "Cédric Villani. Optimal transport: old and new, volume 338. Springer Science & Business Media, 2008." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 213, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 213, + 92 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 213, + 92 + ], + "type": "text", + "content": "A RELATED WORK" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 110, + 504, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 110, + 504, + 131 + ], + "spans": [ + { + "bbox": [ + 107, + 110, + 504, + 131 + ], + "type": "text", + "content": "There is a huge literature about optimal transportation. Here we will only briefly review the most related works. For detailed reviews, we refer readers to Santambrogio (2015); Peyre & Cuturi (2019)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 137, + 504, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 137, + 504, + 280 + ], + "spans": [ + { + "bbox": [ + 107, + 137, + 504, + 280 + ], + "type": "text", + "content": "The first type of algorithms is based on the Kantorovich theory. When both the input and output domains are Dirac masses, the Kantorovich problem can be treated as a standard linear programming (LP) task. In order to tackle large data sets, Cuturi (2013) adds an entropic regularizer to the original LP problem and the regularized problem can be quickly solved by the Sinkhorn algorithm. Recently, various algorithms have been proposed to further accelerate the computation by improving the efficiency of matrix-vector multiplications, including the Greenkhorn Altschuler et al. (2017), Sreenkhorn Alaya et al. (2019) and the NYS-SINK Altschuler et al. (2019) algorithms. Dvurechensky et al. Dvurechensky et al. (2018) also propose the adaptive primal-dual accelerated gradient descent algorithm (APDAGD) to solve the discrete OT problem. An et al. An et al. (2022) compute the approximate OT plan by smoothing the dual Kantorovich problem and solving it with the FISTA method. This kind of methods have limitations: (i) they only give transport plans and cannot produce the bijective transportation maps; and (ii) the computational complexity is too high to apply them in the scenarios with huge number of samples." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 285, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 285, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 107, + 285, + 504, + 407 + ], + "type": "text", + "content": "The second type of algorithms is based on the Brenier theory Brenier (1987) and its intrinsic connection with convex geometry Gu et al. (2016). The semi-discrete OT algorithm proposed in Aurenhammer et al. (1998) finds the transport map between a continuous distribution and a discrete measure via a variational approach by dynamically constructing the power diagrams. Its efficiency can be further improved Levy (2015); Merigot (2011) by the multi-resolution strategy. The algorithms proposed in Kitagawa et al. (2019); Su et al. (2017) also improve the efficiency by applying the Newton's method. When both the source and target measures are continuous, some interpolation methods are necessary Schwartzburg et al. (2014). The major drawback of this type of algorithms is the high computational complexity of constructing the dynamic power diagram, which prevents them from handling high dimensional tasks. For example, for the 3D OT problems, these algorithms usually run very slow." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 412, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 412, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 107, + 412, + 504, + 467 + ], + "type": "text", + "content": "The third type of algorithms is based on computational fluid dynamics Benamou et al. (2002); Papadakis et al. (2014). These methods aim at finding a special temporal-spacial flow field that transports the initial source density to the target density with the minimal total kinetic energy. Then the diffeomorphism induced by the flow gives the optimal transport map under the quadratic Euclidean distance cost. However, this kind of algorithms are difficult to extend to high dimensional space." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 473, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 473, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 107, + 473, + 504, + 693 + ], + "type": "text", + "content": "The fourth type of algorithms directly solve the Monge-Ampère equation using numerical methods. Loeper and Rapetti Loeper & Rapetti (2005) propose to solve the linearized Monge-Ampère equation defined on a flat torus in each iteration. Its corresponding variant coefficient elliptic PDE is converted to a positive definite linear system using the finite-difference scheme, which can be solved by the BiCG algorithm Endre (2020). Benamou et al. Benamou et al. (2014) propose to solve the linearized Monge-Ampère on more general domains using Newton's method. Nader and Guennebaud Nader & Guennebaud (2018) apply the similar discretization strategy and solve the Monge-Ampère equation by conjugate gradient method. Saumier et al. Saumier et al. (2013) propose to solve the linearized Monge-Ampère equation using FFT. In each iteration the elliptic PDE with spacial and temporal variant coefficients is converted to a group of linear equations in the frequency domain, which is solved by the GMRES algorithm. Although the GMRES algorithm can be implemented on GPUs Aliaga et al. (2019), there is no available open source code. The work in Saumier et al. (2013) focuses on periodic boundary condition, but this our proposed work focuses on general second boundary condition; the work in Saumier et al. (2013) concerns planar OT maps, ours emphasizes on volumetric OT maps, which has higher complexity. The work in Saumier et al. (2013) can handle more general target measures, the proposed work currently only deals with the Lebesgue target measure. Nevertheless, the current work can be directly generalized to handle general target measures as well. Lei and Gu Lei & Gu (2021) use the fixed point method to compute the 2-dimensional OT problem based on FFT, but it cannot be extended to solve the 3-dimensional problems." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 698, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 504, + 731 + ], + "type": "text", + "content": "In this work, we combine the idea of linearizing the Monge-Ampère equation Loeper & Rapetti (2005) and the idea of FFT Saumier et al. (2013). The key novelty of our proposed method is to use the mean linearized Monge-Ampère operator Eqn. (12) to replace the conventional linearized" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "Monge-Ampere operator Eqn. (10). This replacement allows the algorithm to be implemented on GPUs and makes the algorithm hundreds of times faster. In the following, we compute the 3-dimensional optimal transport problem by applying the proposed algorithm. Our method also runs more than " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "100 \\times" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " faster than the convex geometry based method Levy (2015)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 142, + 228, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 228, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 228, + 154 + ], + "type": "text", + "content": "B APPENDIX THEORY" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 166, + 504, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 166, + 504, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 166, + 504, + 200 + ], + "type": "text", + "content": "In the section, we give the detailed proofs for several lemmas and theorems. Some of them are well known in the Monge-Ampère PDE field and the applied mathematics field, we include them for the completeness." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 213, + 455, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 455, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 455, + 236 + ], + "type": "text", + "content": "B.1 EXISTENCE OF THE SOLUTION TO THE TIME DEPENDENT MONGE-AMPERE EQNUATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "inline_equation", + "content": "\\mathbb{T}^n = \\mathbb{R}^n / \\mathbb{Z}^n" + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "content": "-dimensional flat torus. Below we sometimes identify it with " + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "inline_equation", + "content": "\\Omega = [0,1]^n" + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "content": " and assume all data are periodic. The existence and regularity of solutions to the Monge-Ampère equation are given by the following theorem," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "text", + "content": "Theorem 5. Suppose a positive density function " + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "inline_equation", + "content": "f: \\Omega \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "text", + "content": " is defined on " + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "inline_equation", + "content": "\\Omega = [0,1]^n" + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "inline_equation", + "content": "\\int_{\\Omega} f(x) dx = 1" + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "inline_equation", + "content": "f \\in C^{\\alpha}(\\Omega)" + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "text", + "content": ", then the solution " + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "inline_equation", + "content": "u: \\Omega \\times [0,1]" + }, + { + "bbox": [ + 104, + 279, + 505, + 313 + ], + "type": "text", + "content": " to the time-dependent Monge-Ampère equation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 193, + 312, + 503, + 325 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 312, + 503, + 325 + ], + "spans": [ + { + "bbox": [ + 193, + 312, + 503, + 325 + ], + "type": "interline_equation", + "content": "\\det D _ {x} ^ {2} u (x, t) = (1 - t) + t f (x), \\quad \\nabla_ {x} u (x, t) (\\Omega) = \\Omega \\tag {21}", + "image_path": "bbbc5d0e06855c151e06b36a80c5f98d1c91e12880523860ac41a5c0d4e0e28d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 326, + 484, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 484, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 484, + 338 + ], + "type": "text", + "content": "exists and is unique up to a constant. Furthermore, there exist constants " + }, + { + "bbox": [ + 104, + 326, + 484, + 338 + ], + "type": "inline_equation", + "content": "0 < \\lambda < \\Lambda" + }, + { + "bbox": [ + 104, + 326, + 484, + 338 + ], + "type": "text", + "content": ", such that" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 149, + 340, + 504, + 373 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 340, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 149, + 340, + 504, + 373 + ], + "type": "interline_equation", + "content": "\\lambda \\sum_ {p = 1} ^ {n} \\xi_ {p} ^ {2} \\leq \\sum_ {p, q = 1} ^ {n} u ^ {p q} (x, t) \\xi_ {p} \\xi_ {q} \\leq \\Lambda \\sum_ {p = 1} ^ {n} \\xi_ {p} ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\forall (x, t) \\in \\Omega \\times [ 0, 1 ]. \\tag {22}", + "image_path": "f2e51c22a2b269c4c13aab9d8b586a727145068ded305922e749de3e038f9f99.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 380, + 369, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 369, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 369, + 392 + ], + "type": "text", + "content": "We refer readers to Cordero-Erasquin (1999) for detailed proof." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 404, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 404, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 504, + 437 + ], + "type": "text", + "content": "Weak Solution In practice, we compute the weak solution of the linearized Monge-Ampère Eqn. (6) using numerical methods. We first rewrite the differential operator to a divergence form, then define a bi-linear form." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 442, + 450, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 442, + 450, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 442, + 450, + 455 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 442, + 450, + 455 + ], + "type": "inline_equation", + "content": "(u^{pq}(x,t))" + }, + { + "bbox": [ + 104, + 442, + 450, + 455 + ], + "type": "text", + "content": " is the adjoint matrix of " + }, + { + "bbox": [ + 104, + 442, + 450, + 455 + ], + "type": "inline_equation", + "content": "D_x^2 u(x,t)" + }, + { + "bbox": [ + 104, + 442, + 450, + 455 + ], + "type": "text", + "content": ", by direct computation, we obtain" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 179, + 458, + 504, + 490 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 458, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 179, + 458, + 504, + 490 + ], + "type": "interline_equation", + "content": "\\sum_ {p = 1} ^ {n} \\partial_ {p} u ^ {p q} (x, t) = 0, \\quad \\forall (x, t) \\in \\Omega \\times [ 0, 1 ], \\quad \\forall q = 1, \\dots , n. \\tag {23}", + "image_path": "ae1dae614542153f995fbc153de7e8cfdb319e9c5d559f54a389eaa17eeb4b82.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 492, + 324, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 324, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 324, + 504 + ], + "type": "text", + "content": "so Eqn. (6) can be converted into the divergence form:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 506, + 477, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 506, + 477, + 540 + ], + "spans": [ + { + "bbox": [ + 132, + 506, + 477, + 540 + ], + "type": "interline_equation", + "content": "\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) = \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {p} \\partial_ {q} v + \\sum_ {q = 1} ^ {n} \\left(\\sum_ {p = 1} ^ {n} \\partial_ {p} u ^ {p q}\\right) \\partial_ {q} v = \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {p} \\partial_ {q} v,", + "image_path": "5a3bb320a744e7df197a8fd7c7b507897d52ce21946b22079e1fbeaaa42db34c.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 544, + 147, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 147, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 147, + 553 + ], + "type": "text", + "content": "we obtain" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 210, + 551, + 504, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 551, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 210, + 551, + 504, + 583 + ], + "type": "interline_equation", + "content": "\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} (x, t) \\partial_ {q} v (x, t)\\right) = f (x) - 1. \\tag {24}", + "image_path": "79db18f475d5df7f165f53f2547a35f16074fbfa64d155d5f4b04e87d4934a69.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 585, + 248, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 248, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 248, + 596 + ], + "type": "text", + "content": "with Neumann boundary condition" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 226, + 599, + 504, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 599, + 504, + 622 + ], + "spans": [ + { + "bbox": [ + 226, + 599, + 504, + 622 + ], + "type": "interline_equation", + "content": "\\frac {\\partial v (x , t)}{\\partial \\mathbf {n}} = 0, \\quad \\forall (x, t) \\in \\partial \\Omega \\times [ 0, 1 ]. \\tag {25}", + "image_path": "f5da788e1691489f8ed70e4df97a26f148918c49be826528c1b604b33fcfba0d.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 625, + 352, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 625, + 352, + 637 + ], + "spans": [ + { + "bbox": [ + 104, + 625, + 352, + 637 + ], + "type": "text", + "content": "For any " + }, + { + "bbox": [ + 104, + 625, + 352, + 637 + ], + "type": "inline_equation", + "content": "w\\in H^{1}(\\Omega)" + }, + { + "bbox": [ + 104, + 625, + 352, + 637 + ], + "type": "text", + "content": ", by differentiation of product, we obtain" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 137, + 639, + 469, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 639, + 469, + 673 + ], + "spans": [ + { + "bbox": [ + 137, + 639, + 469, + 673 + ], + "type": "interline_equation", + "content": "\\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w + \\sum_ {p = 1} ^ {n} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) \\partial_ {p} w = \\sum_ {p = 1} ^ {n} \\partial_ {p} \\left[ \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w \\right]", + "image_path": "cc398ca2765142ecd1e1f792aa6fcb2c5f14528f5c6d4f42764dde8777abf286.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 676, + 504, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 504, + 697 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 504, + 697 + ], + "type": "text", + "content": "by integrating both sides, and from the fact that " + }, + { + "bbox": [ + 104, + 676, + 504, + 697 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 676, + 504, + 697 + ], + "type": "text", + "content": " satisfies the Neumann boundary condition, we deduce" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 119, + 699, + 504, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 699, + 504, + 735 + ], + "spans": [ + { + "bbox": [ + 119, + 699, + 504, + 735 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega} \\sum_ {p = 1} ^ {n} \\partial_ {p} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w + \\int_ {\\Omega} \\sum_ {p, q = 1} ^ {n} u ^ {p q} \\partial_ {q} v \\partial_ {p} w = \\int_ {\\partial \\Omega} \\sum_ {p = 1} ^ {n} \\left(\\sum_ {q = 1} ^ {n} u ^ {p q} \\partial_ {q} v\\right) w = 0. \\tag {26}", + "image_path": "69028ee80f679350c57e635b1eb11bfdd9210c0aa3411bfd9a1dfc953eb65295.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "For any fixed time " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "t \\in [0,1]" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": ", by the divergence form, we can construct a bilinear form " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "a: H^1(\\Omega) \\times H^1(\\Omega)" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " and a linear form " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "l: H^1(\\Omega) \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 180, + 112, + 504, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 112, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 180, + 112, + 504, + 144 + ], + "type": "interline_equation", + "content": "a (v, w) = \\sum_ {p, q = 1} ^ {n} \\int_ {\\Omega} u ^ {p q} \\partial_ {p} v \\partial_ {q} w, \\quad l (w) = - \\int_ {\\Omega} (f - 1) w d x. \\tag {27}", + "image_path": "cfc813c98b7be0b91653e3958f4f777690bd8ef442f5f16bbcb2ba48c71fe69c.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 151, + 365, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 151, + 365, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 365, + 163 + ], + "type": "text", + "content": "A weak solution to Eqn. (24) is a function " + }, + { + "bbox": [ + 105, + 151, + 365, + 163 + ], + "type": "inline_equation", + "content": "v \\in H^{1}(\\Omega)" + }, + { + "bbox": [ + 105, + 151, + 365, + 163 + ], + "type": "text", + "content": ", such that" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 236, + 169, + 504, + 183 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 169, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 236, + 169, + 504, + 183 + ], + "type": "interline_equation", + "content": "a (v, w) = l (w), \\quad \\forall w \\in H ^ {1} (\\Omega). \\tag {28}", + "image_path": "444fdfca6f2e77d3d3aa3267217329e267d6a60bdb55e817ee60a2ea18af64c4.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 194, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 506, + 217 + ], + "type": "text", + "content": "By the uniform ellipticity Eqn. (22), the Lax-Milgram theorem Endre (2020) shows the existence of the weak solution." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 231, + 417, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 417, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 417, + 243 + ], + "type": "text", + "content": "B.2 DISCRETE LINEARIZED MONGE-AMPERE EQUATION SOLVABILITY" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": "Galerkin Method In practice, we construct a triangulation " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": ", such that the ratio between the diameter and inscribe-sphere radius of each simplex is bounded, and variation of the diameters of all the simplexes is small. We call such kind of " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": " a quasi-uniform triangulation, and denote the largest diameter as " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": ". For each vertex " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "v_{i} \\in \\mathcal{T}" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": ", we construct a piecewise linear base function " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "\\varphi_{i}" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "\\varphi_{i}" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": " is linear on each triangle, " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "\\varphi_{i}(v_{j})" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "\\delta_{ij}" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": ". We define a finite dimensional subspace " + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "inline_equation", + "content": "V_{h} \\subset H^{1}(\\Omega)" + }, + { + "bbox": [ + 104, + 251, + 504, + 308 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 216, + 314, + 394, + 348 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 314, + 394, + 348 + ], + "spans": [ + { + "bbox": [ + 216, + 314, + 394, + 348 + ], + "type": "interline_equation", + "content": "V _ {h} := \\left\\{v _ {h} (x) := \\sum_ {v _ {i} \\in \\mathcal {T}} \\lambda_ {i} \\varphi_ {i} (x), \\lambda_ {i} \\in \\mathbb {R} \\right\\}.", + "image_path": "8ffcb8c64483d43d77895930bee4a1b2e997ccfb8759addabaddc41444716841.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": "Given a function " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "u \\in H^{1}(\\Omega)" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": ", we use " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "u_{h} \\in V_{h}" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": " to denote its approximation in " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "V_{h}" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": ". Furthermore, " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "u_{h} = \\sum_{i} \\lambda_{i} \\varphi_{i}" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": ", we also use " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "u_{h}" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": " to represent the coefficient vector " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "(\\lambda_1, \\lambda_2, \\dots, \\lambda_k)^T" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": " depending on the context. The weak solution Eqn. (28) to the Monge-Ampère equation (6) is equivalent to find a " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "v \\in H^{1}(\\Omega)" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "a(v, w) = l(w)" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "w \\in H^{1}(\\Omega)" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": ". In discrete cases, we want to find " + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "inline_equation", + "content": "v_{h} \\in V_{h}" + }, + { + "bbox": [ + 104, + 354, + 504, + 409 + ], + "type": "text", + "content": ", such that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 235, + 410, + 504, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 410, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 235, + 410, + 504, + 423 + ], + "type": "interline_equation", + "content": "a \\left(v _ {h}, w _ {h}\\right) = l \\left(w _ {h}\\right), \\quad \\forall w _ {h} \\in V _ {h}. \\tag {29}", + "image_path": "cb1b54baf9c4387ececf2afd6dc69c25bc6c141b5c40595253e536d05027dcaa.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 426, + 282, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 282, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 282, + 437 + ], + "type": "text", + "content": "Eqn. (29) is equivalent to the linear system," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 151, + 443, + 505, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 443, + 505, + 498 + ], + "spans": [ + { + "bbox": [ + 151, + 443, + 505, + 498 + ], + "type": "interline_equation", + "content": "\\left( \\begin{array}{c c c c} a \\left(\\varphi_ {1}, \\varphi_ {1}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {1}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {1}\\right) \\\\ a \\left(\\varphi_ {1}, \\varphi_ {2}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {2}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {2}\\right) \\\\ \\vdots & \\vdots & & \\vdots \\\\ a \\left(\\varphi_ {1}, \\varphi_ {N}\\right) & a \\left(\\varphi_ {2}, \\varphi_ {N}\\right) & \\dots & a \\left(\\varphi_ {N}, \\varphi_ {N}\\right) \\end{array} \\right) \\left( \\begin{array}{c} \\lambda_ {1} \\\\ \\lambda_ {2} \\\\ \\vdots \\\\ \\lambda_ {N} \\end{array} \\right) = \\left( \\begin{array}{c} l \\left(\\varphi_ {1}\\right) \\\\ l \\left(\\varphi_ {2}\\right) \\\\ \\vdots \\\\ l \\left(\\varphi_ {N}\\right) \\end{array} \\right) \\tag {30}", + "image_path": "46f9b2b72ac39a57fb1c62cc1578843027c793f3ad3fd05cfda927f721ab2b43.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "text", + "content": "From the weak solution to the linearized Monge-Ampère equation (10), we obtain the linear system Eqn. (30). We denote the stiffness matrix " + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "inline_equation", + "content": "A = (a(\\varphi_i, \\varphi_j))" + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "text", + "content": ". By the uniform ellipticity Eqn. (22), and " + }, + { + "bbox": [ + 104, + 509, + 504, + 543 + ], + "type": "inline_equation", + "content": "V_h \\subset H^1(\\Omega)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 257, + 544, + 352, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 544, + 352, + 559 + ], + "spans": [ + { + "bbox": [ + 257, + 544, + 352, + 559 + ], + "type": "interline_equation", + "content": "a (v, v) \\geq \\lambda \\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2}", + "image_path": "d972924fd90c38b46b234d57c7061abed19f134cfda38e28104a520164efa6bd.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 562, + 287, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 562, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 287, + 576 + ], + "type": "text", + "content": "Assume " + }, + { + "bbox": [ + 105, + 562, + 287, + 576 + ], + "type": "inline_equation", + "content": "\\int_{\\Omega} v dx = 0" + }, + { + "bbox": [ + 105, + 562, + 287, + 576 + ], + "type": "text", + "content": ", by Poincaré inequality," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 182, + 582, + 428, + 607 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 582, + 428, + 607 + ], + "spans": [ + { + "bbox": [ + 182, + 582, + 428, + 607 + ], + "type": "interline_equation", + "content": "\\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2} \\geq C _ {1} (\\Omega) \\| v \\| _ {L} ^ {2} (\\Omega), \\quad \\forall v \\in H ^ {1} (\\Omega), \\int_ {\\Omega} v d x = 0,", + "image_path": "42f858a8c0042fa0bc4ca70eab2cbd6789662c8a98c6b943d15b4359295a1d86.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 613, + 462, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 462, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 462, + 624 + ], + "type": "text", + "content": "where the constant " + }, + { + "bbox": [ + 104, + 613, + 462, + 624 + ], + "type": "inline_equation", + "content": "C_1(\\Omega)" + }, + { + "bbox": [ + 104, + 613, + 462, + 624 + ], + "type": "text", + "content": " depends on " + }, + { + "bbox": [ + 104, + 613, + 462, + 624 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 613, + 462, + 624 + ], + "type": "text", + "content": ". Combine the above two inequalities, we obtain" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 201, + 631, + 505, + 657 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 631, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 201, + 631, + 505, + 657 + ], + "type": "interline_equation", + "content": "a (v, v) \\geq c \\| v \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v \\in H ^ {1} (\\Omega), \\int_ {\\Omega} v d x = 0. \\tag {31}", + "image_path": "a3a0c0b54531b99c6c292d55dba30254d234e8e9f0bd838e219fc16305da8e48.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 662, + 360, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 360, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 360, + 675 + ], + "type": "text", + "content": "Similarly, By the uniform ellipticity Eqn. 22, and " + }, + { + "bbox": [ + 104, + 662, + 360, + 675 + ], + "type": "inline_equation", + "content": "V_{h}\\subset H^{1}(\\Omega)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 256, + 681, + 353, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 681, + 353, + 696 + ], + "spans": [ + { + "bbox": [ + 256, + 681, + 353, + 696 + ], + "type": "interline_equation", + "content": "a (v, v) \\leq \\Lambda \\| \\nabla v \\| _ {L ^ {2} (\\Omega)} ^ {2}", + "image_path": "1ef012f351fc0721046e55c65433ee18c2aef37c5fe5af8718a2d4c391d00925.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 702, + 498, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 702, + 498, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 702, + 498, + 714 + ], + "type": "text", + "content": "For linear finite element and quasi-uniform triangulation, we have the inverse Poincaré inequality," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 239, + 719, + 369, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 719, + 369, + 734 + ], + "spans": [ + { + "bbox": [ + 239, + 719, + 369, + 734 + ], + "type": "interline_equation", + "content": "\\left\\| \\nabla v _ {h} \\right\\| _ {L ^ {2}} ^ {2} \\leq C _ {2} (\\Omega) h ^ {- 1} \\left\\| v _ {h} \\right\\| _ {L ^ {2}} ^ {2}.", + "image_path": "e02318fa6115d4fee0979e5c82f3603f5cea1f19c151468f9060c938d3e670af.jpg" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 462, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 462, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 462, + 95 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 82, + 462, + 95 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 104, + 82, + 462, + 95 + ], + "type": "text", + "content": " is the diameter of each element. Combine the above two inequalities, we obtain" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 225, + 99, + 504, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 99, + 504, + 114 + ], + "spans": [ + { + "bbox": [ + 225, + 99, + 504, + 114 + ], + "type": "interline_equation", + "content": "a \\left(v _ {h}, v _ {h}\\right) \\leq C \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v _ {h} \\in V _ {h}. \\tag {32}", + "image_path": "a5dde5be404be7df4e417b431a607bacc804590c3a60957f5e454fd5c9ac8ac7.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 119, + 372, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 119, + 372, + 132 + ], + "spans": [ + { + "bbox": [ + 105, + 119, + 372, + 132 + ], + "type": "text", + "content": "By combining the inequalities Eqn. (31) and Eqn. (32), we obtain" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 165, + 137, + 505, + 162 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 137, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 165, + 137, + 505, + 162 + ], + "type": "interline_equation", + "content": "\\frac {1}{C _ {3}} \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} \\leq a (v _ {h}, v _ {h}) \\leq C _ {3} \\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2}, \\quad \\forall v _ {h} \\in V _ {h}, \\int_ {\\Omega} v _ {h} = 0, \\tag {33}", + "image_path": "5038ffe57256093eefacec0936dcf5748ff6311846add1a144ed92eba85573e1.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 167, + 349, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 349, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 349, + 181 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 167, + 349, + 181 + ], + "type": "inline_equation", + "content": "C_3 > 1" + }, + { + "bbox": [ + 104, + 167, + 349, + 181 + ], + "type": "text", + "content": " is a constant. Suppose " + }, + { + "bbox": [ + 104, + 167, + 349, + 181 + ], + "type": "inline_equation", + "content": "v_{h} = \\sum_{i = 1}^{n}\\xi_{i}\\varphi_{i}" + }, + { + "bbox": [ + 104, + 167, + 349, + 181 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 177, + 186, + 434, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 186, + 434, + 217 + ], + "spans": [ + { + "bbox": [ + 177, + 186, + 434, + 217 + ], + "type": "interline_equation", + "content": "\\| v _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} = \\int_ {\\Omega} v _ {h} ^ {2} d x = \\sum_ {i, j = 1} ^ {n} \\xi_ {i} \\xi_ {j} \\int_ {\\Omega} \\varphi_ {i} (x) \\varphi_ {j} (x) d x = \\xi^ {T} \\Phi \\xi ,", + "image_path": "48efe9a7ee2dc08900a9b048d2e6b8ba98bc98d3c52b6a6d1e61b656be7becd1.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 224, + 416, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 224, + 416, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 416, + 239 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 224, + 416, + 239 + ], + "type": "inline_equation", + "content": "\\xi = (\\xi_{i})" + }, + { + "bbox": [ + 104, + 224, + 416, + 239 + ], + "type": "text", + "content": " and the matrix " + }, + { + "bbox": [ + 104, + 224, + 416, + 239 + ], + "type": "inline_equation", + "content": "\\Phi = \\left(\\int_{\\Omega}\\varphi_{i}\\varphi_{j}\\right)" + }, + { + "bbox": [ + 104, + 224, + 416, + 239 + ], + "type": "text", + "content": " is positive definite. Therefore," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 246, + 243, + 505, + 268 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 243, + 505, + 268 + ], + "spans": [ + { + "bbox": [ + 246, + 243, + 505, + 268 + ], + "type": "interline_equation", + "content": "\\frac {1}{C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} \\Phi \\xi < C _ {4} \\| \\xi \\| ^ {2}. \\tag {34}", + "image_path": "9f5d516e8332cde082ef1e4f60c1a7e2c8c83cbd3bc196871b45a6e23f2cc18f.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 274, + 427, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 427, + 287 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 427, + 287 + ], + "type": "text", + "content": "By " + }, + { + "bbox": [ + 104, + 274, + 427, + 287 + ], + "type": "inline_equation", + "content": "a(v_h,v_h) = \\xi^T A\\xi" + }, + { + "bbox": [ + 104, + 274, + 427, + 287 + ], + "type": "text", + "content": ", combing inequalities Eqn. (33) and Eqn. (34), we obtain" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 185, + 293, + 505, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 293, + 505, + 323 + ], + "spans": [ + { + "bbox": [ + 185, + 293, + 505, + 323 + ], + "type": "interline_equation", + "content": "\\frac {1}{C _ {3} C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} A \\xi \\leq C _ {3} C _ {4} \\| \\xi \\| ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\sum_ {i = 1} ^ {n} \\xi_ {i} = 0, \\tag {35}", + "image_path": "80e7da6488feda1e8601e0a9ca27ebf7a2edd74258be8ac16c5c3633674d0a13.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 328, + 317, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 328, + 317, + 341 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 317, + 341 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 328, + 317, + 341 + ], + "type": "inline_equation", + "content": "C_3C_4 > 1" + }, + { + "bbox": [ + 104, + 328, + 317, + 341 + ], + "type": "text", + "content": ". This proves the following lemma," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 343, + 506, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 343, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 343, + 506, + 390 + ], + "type": "text", + "content": "Lemma 6. By using Galerkin method using linear elements to numerically approximate the weak solution Eqn. (28) to the linearized Monge-Ampère Eqn. (6), if the uniform ellipticity Eqn. (22) holds, and the triangulation " + }, + { + "bbox": [ + 104, + 343, + 506, + 390 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 343, + 506, + 390 + ], + "type": "text", + "content": " is quasi-uniform, then the stiffness matrix of the linear system Eqn. (30) is positive definite on the space " + }, + { + "bbox": [ + 104, + 343, + 506, + 390 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{n} \\xi_i = 0" + }, + { + "bbox": [ + 104, + 343, + 506, + 390 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 185, + 395, + 505, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 395, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 185, + 395, + 505, + 426 + ], + "type": "interline_equation", + "content": "\\frac {1}{C _ {3} C _ {4}} \\| \\xi \\| ^ {2} \\leq \\xi^ {T} A \\xi \\leq C _ {3} C _ {4} \\| \\xi \\| ^ {2}, \\quad \\forall \\xi \\in \\mathbb {R} ^ {n}, \\sum_ {i = 1} ^ {n} \\xi_ {i} = 0, \\tag {36}", + "image_path": "18253ec34abb023cd18e7075a478b0da481c23ad1524229001148ccb5bbd8561.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 430, + 179, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 179, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 179, + 443 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 430, + 179, + 443 + ], + "type": "inline_equation", + "content": "C_3C_4 > 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 451, + 439, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 439, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 439, + 464 + ], + "type": "text", + "content": "Since the uniform ellipticity Eqn. (22) holds for any time " + }, + { + "bbox": [ + 104, + 451, + 439, + 464 + ], + "type": "inline_equation", + "content": "t \\in [0,1]" + }, + { + "bbox": [ + 104, + 451, + 439, + 464 + ], + "type": "text", + "content": ", then we obtain" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": "Corollary 7. By using Galerkin method with linear elements on quasi-uniform triangulations, the linearized Monge-Ampère equation in the continuity method Eqn. (6) always has a solution " + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "inline_equation", + "content": "v_h \\in V_h" + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "inline_equation", + "content": "t \\in [0,1]" + }, + { + "bbox": [ + 104, + 466, + 504, + 502 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 509, + 504, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 504, + 533 + ], + "type": "text", + "content": "Please note that the central differential scheme can be treated as Galerkin's method on a special uniform triangulation. Therefore, the above estimates still hold." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 545, + 227, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 545, + 227, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 227, + 556 + ], + "type": "text", + "content": "B.3 CONVERGENCE RATE" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "type": "text", + "content": "Theorem 8 (main). Given a domain " + }, + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "type": "inline_equation", + "content": "\\Omega \\subset \\mathbb{R}^n" + }, + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "type": "text", + "content": ", which is a canonical cuboid " + }, + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "type": "inline_equation", + "content": "\\Omega = [-1,1]^n" + }, + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "type": "text", + "content": ", and a positive density function " + }, + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "type": "inline_equation", + "content": "f:\\Omega \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 565, + 506, + 589 + ], + "type": "text", + "content": " with the balance condition" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 254, + 594, + 355, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 594, + 355, + 620 + ], + "spans": [ + { + "bbox": [ + 254, + 594, + 355, + 620 + ], + "type": "interline_equation", + "content": "\\int_ {\\Omega} f (x) d x = \\int_ {\\Omega} 1 \\cdot d x,", + "image_path": "a268d3311dc4f93b8edca6e5274e29105492203997f83f1debd9a2d89eb5d2da.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": "suppose the mirror reflection extension Eqn. (14) of " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": " to the flat torus " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\tilde{f} : \\mathbb{T}^n \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "C^\\alpha" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\alpha \\in (0,1)" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": ", then Monge-Ampère equation," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 230, + 654, + 379, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 654, + 379, + 668 + ], + "spans": [ + { + "bbox": [ + 230, + 654, + 379, + 668 + ], + "type": "interline_equation", + "content": "d e t D ^ {2} u (x) = f (x), \\quad \\nabla u (\\Omega) = \\Omega", + "image_path": "839ac9553d20bba6678e69ce3f3be02c0bcc7c43063e88def2bf9695698cb07f.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 673, + 505, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 673, + 505, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 505, + 696 + ], + "type": "text", + "content": "can be solved using FFT-OT Algorithm Alg. (1). In particular, one can choose the step length parameter " + }, + { + "bbox": [ + 104, + 673, + 505, + 696 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 673, + 505, + 696 + ], + "type": "text", + "content": ", such that there is a constant " + }, + { + "bbox": [ + 104, + 673, + 505, + 696 + ], + "type": "inline_equation", + "content": "0 < \\gamma < 1" + }, + { + "bbox": [ + 104, + 673, + 505, + 696 + ], + "type": "text", + "content": ", the approximation error satisfies" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 260, + 701, + 351, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 701, + 351, + 715 + ], + "spans": [ + { + "bbox": [ + 260, + 701, + 351, + 715 + ], + "type": "interline_equation", + "content": "\\left\\| f - \\rho_ {k + 1} \\right\\| ^ {2} < C \\gamma^ {k},", + "image_path": "a18fd937cf6515debf44ddabdf84fa578e2be8423ab658b9c29970368ae34064.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 720, + 317, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 720, + 317, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 720, + 317, + 733 + ], + "type": "text", + "content": "namely the algorithm has a linear convergence rate." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 451, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 451, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 451, + 95 + ], + "type": "text", + "content": "Proof. Suppose at the " + }, + { + "bbox": [ + 104, + 81, + 451, + 95 + ], + "type": "inline_equation", + "content": "k + 1" + }, + { + "bbox": [ + 104, + 81, + 451, + 95 + ], + "type": "text", + "content": "-th iteration, " + }, + { + "bbox": [ + 104, + 81, + 451, + 95 + ], + "type": "inline_equation", + "content": "\\rho_{k + 1} = \\operatorname*{det}(I + D^2 u_{k + 1})" + }, + { + "bbox": [ + 104, + 81, + 451, + 95 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 81, + 451, + 95 + ], + "type": "inline_equation", + "content": "\\| v_k\\| \\sim O(\\tau^{-1})" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 180, + 100, + 426, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 100, + 426, + 159 + ], + "spans": [ + { + "bbox": [ + 180, + 100, + 426, + 159 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} f - \\rho_ {k + 1} = f - \\det (I + \\mathcal {D} ^ {2} u _ {k} + \\mathcal {D} ^ {2} v _ {k}) \\\\ = f - \\det (I + \\mathcal {D} ^ {2} u _ {k}) - \\sum_ {p q} u _ {k} ^ {p q} \\partial_ {p} \\partial_ {q} v _ {k} + o (\\tau^ {- 1}) \\\\ = \\left(f - \\rho_ {k}\\right) - L _ {k} \\left[ v _ {k} \\right] + o \\left(\\tau^ {- 1}\\right) \\\\ \\end{array}", + "image_path": "5d41ebb22e79b647f31de020bc7b43f3c08ecaabf237852bf4fb9aed614eda0c.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 165, + 407, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 407, + 179 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 407, + 179 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 165, + 407, + 179 + ], + "type": "inline_equation", + "content": "L_{k}[v_{k}] = \\sum_{pq}u_{k}^{pq}\\partial_{p}\\partial_{q}v_{k}" + }, + { + "bbox": [ + 104, + 165, + 407, + 179 + ], + "type": "text", + "content": ". Hence by integration by parts Eqn. (27)," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 164, + 186, + 446, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 186, + 446, + 228 + ], + "spans": [ + { + "bbox": [ + 164, + 186, + 446, + 228 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| f - \\rho_ {k + 1} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} = \\left\\| f - \\rho_ {k} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} - 2 \\int_ {\\Omega} L _ {k} [ v _ {k} ] (f - \\rho_ {k}) + o (\\tau^ {- 1}) \\\\ = \\left\\| f - \\rho_ {k} \\right\\| _ {L ^ {2} (\\Omega)} ^ {2} + 2 a _ {k} (f - \\rho_ {k}, v _ {k}) + o (\\tau^ {- 1}) \\\\ \\end{array}", + "image_path": "913902d5ecc75920b3f21a0b3ed66e97845a02334669916d89f7e894b42c7544.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 234, + 493, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 493, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 493, + 247 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 234, + 493, + 247 + ], + "type": "inline_equation", + "content": "a_{k}" + }, + { + "bbox": [ + 104, + 234, + 493, + 247 + ], + "type": "text", + "content": " is the bilinear form in Eqn.(27). In the discrete case, all functions are in " + }, + { + "bbox": [ + 104, + 234, + 493, + 247 + ], + "type": "inline_equation", + "content": "V_{h}" + }, + { + "bbox": [ + 104, + 234, + 493, + 247 + ], + "type": "text", + "content": ", we denote" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 153, + 251, + 455, + 268 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 251, + 455, + 268 + ], + "spans": [ + { + "bbox": [ + 153, + 251, + 455, + 268 + ], + "type": "interline_equation", + "content": "\\| u _ {h} \\| _ {\\Phi} ^ {2} := \\| u _ {h} \\| _ {L ^ {2} (\\Omega)} ^ {2} = u _ {h} ^ {T} \\Phi u _ {h}, \\quad \\| u _ {h} \\| ^ {2} := u _ {h} ^ {T} u _ {h}, \\quad \\| u _ {h} \\| _ {A} ^ {2} := u _ {h} ^ {T} A u _ {h},", + "image_path": "e2a51f215f672f462c894c5876152ee14f35d0dc78e1b9ce0cc2ebe84799c270.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 274, + 269, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 269, + 286 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 269, + 286 + ], + "type": "text", + "content": "by the inequality Eqn. (34) and Eqn. 35," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 153, + 292, + 457, + 316 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 292, + 457, + 316 + ], + "spans": [ + { + "bbox": [ + 153, + 292, + 457, + 316 + ], + "type": "interline_equation", + "content": "\\frac {1}{C _ {4}} \\| u _ {h} \\| ^ {2} \\leq \\| u _ {h} \\| _ {\\Phi} ^ {2} \\leq C _ {4} \\| u _ {h} \\| ^ {2}, \\quad \\frac {1}{C _ {3} C _ {4}} \\| u _ {h} \\| ^ {2} \\leq \\| u _ {h} \\| _ {A} ^ {2} \\leq C _ {3} C _ {4} \\| u _ {h} \\| ^ {2}.", + "image_path": "ae71d6fc8b5a7b01335c92990463b470ff457a09f28a1f7389e73221f95fbf38.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 321, + 149, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 321, + 149, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 149, + 333 + ], + "type": "text", + "content": "Therefore" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 126, + 338, + 504, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 338, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 126, + 338, + 504, + 354 + ], + "type": "interline_equation", + "content": "\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} = \\left\\| f _ {h} - \\rho_ {h, k} \\right\\| _ {\\Phi} ^ {2} - 2 \\tau^ {- 1} \\left(f - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} \\left(f _ {h} - \\rho_ {h, k}\\right) + o \\left(\\tau^ {- 1}\\right), \\tag {37}", + "image_path": "4f69923c98fca425d001fd500616efac9cb103ab3dd7f61e5bb4fd327a41485a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "A_{k}" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": " is the stiffness matrix in Eqn.(30), and " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\bar{A}_k" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": " is the mean stiffness matrix. (By the uniform ellipticity Eqn. (22), the eigen values of the adjoint matrix " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "(u^{pq})(x,t)" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": " is uniformly bounded away from zero in the space " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\mathcal{H} := \\{\\xi \\in \\mathbb{R}^n | \\sum_i \\xi_i = 0\\}" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": ", so the eigen value of the mean adjoint matrix " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\bar{u}^{pq}(t)" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": " is bounded away from zero in " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": ". After discretization, the eigen values of " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\bar{A}_k" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": " is strictly positive in " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": ", hence " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\bar{A}_k" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": " is invertible in " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": ". In the following discussion, the term " + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "inline_equation", + "content": "o(\\tau^{-1})" + }, + { + "bbox": [ + 104, + 359, + 504, + 428 + ], + "type": "text", + "content": " will be ignored.) Remark that the following displayed equation is a scalar" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 160, + 434, + 449, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 434, + 449, + 449 + ], + "spans": [ + { + "bbox": [ + 160, + 434, + 449, + 449 + ], + "type": "interline_equation", + "content": "\\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f - \\rho_ {h, k}) = \\mathrm {t r} \\left(\\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f _ {h} - \\rho_ {h, k})\\right)", + "image_path": "1eeeaf092368e0a5e629e90ebc117fe3c2e2d7c2ef8895499e1c19332967e7a0.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "A_{k}" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "\\bar{A}_{k}" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": " are symmetric, positive definite on the space " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "\\sum_{i}\\xi_{i} = 0" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "\\| A_k\\| _2\\leq C_3C_4" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "\\| \\bar{A}_k\\| _2\\leq C_3C_4" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": ", so are their inverses. Since " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "A_{n}" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "\\bar{A}_n" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": " are symmetric, positive definite on the space orthogonal to " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "(1,1,\\ldots ,1)^T" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": ", by Eqn. (35) and " + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "inline_equation", + "content": "\\| A_k\\bar{A}_k^{-1}\\| \\leq \\| A_k\\| \\| \\bar{A}_k^{-1}\\|" + }, + { + "bbox": [ + 104, + 455, + 504, + 494 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 186, + 498, + 424, + 526 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 498, + 424, + 526 + ], + "spans": [ + { + "bbox": [ + 186, + 498, + 424, + 526 + ], + "type": "interline_equation", + "content": "\\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}} \\| f _ {h} - \\rho_ {h, k} \\| _ {\\Phi} ^ {2} \\leq \\left(f _ {h} - \\rho_ {h, k}\\right) ^ {T} A _ {k} \\bar {A} _ {k} ^ {- 1} (f _ {h} - \\rho_ {h, k}).", + "image_path": "e5472de298c9de8b550a4804a43d8a03f691e197f724b1c2445845b4914a2f7d.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 530, + 224, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 224, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 224, + 543 + ], + "type": "text", + "content": "Plug into Eqn. (37), we have" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 549, + 505, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 549, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 115, + 549, + 505, + 578 + ], + "type": "interline_equation", + "content": "\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} \\leq \\left(1 - \\frac {1}{\\tau} \\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}}\\right) \\left\\| f _ {h} - \\rho_ {h, k} \\right\\| _ {\\Phi} ^ {2} \\leq \\left(1 - \\frac {1}{\\tau} \\frac {(n - 1)}{C _ {3} ^ {2} C _ {4} ^ {3}}\\right) ^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| _ {\\Phi} ^ {2}. \\tag {38}", + "image_path": "41f9da677ff1f98b09abb943ff45bc21626e7d371506ed37989d6fc0ddf0b258.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 584, + 362, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 362, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 362, + 597 + ], + "type": "text", + "content": "We can choose the step-length " + }, + { + "bbox": [ + 105, + 584, + 362, + 597 + ], + "type": "inline_equation", + "content": "\\tau^{-1}" + }, + { + "bbox": [ + 105, + 584, + 362, + 597 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 105, + 584, + 362, + 597 + ], + "type": "inline_equation", + "content": "\\gamma \\in (0, 1)" + }, + { + "bbox": [ + 105, + 584, + 362, + 597 + ], + "type": "text", + "content": ", where" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 266, + 603, + 342, + 631 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 603, + 342, + 631 + ], + "spans": [ + { + "bbox": [ + 266, + 603, + 342, + 631 + ], + "type": "interline_equation", + "content": "\\gamma = 1 - \\frac {(n - 1)}{\\tau C _ {3} ^ {2} C _ {4} ^ {3}}.", + "image_path": "410f56aa16d09f955462b5c5e5ff4e9c6a375f24d42b3ca75e05e9bc9404f24e.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 635, + 148, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 148, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 148, + 646 + ], + "type": "text", + "content": "Therefore" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 185, + 645, + 504, + 660 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 645, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 185, + 645, + 504, + 660 + ], + "type": "interline_equation", + "content": "\\left\\| f _ {h} - \\rho_ {h, k + 1} \\right\\| _ {\\Phi} ^ {2} \\leq \\gamma^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| _ {\\Phi} ^ {2} \\leq C _ {4} \\gamma^ {k} \\left\\| f _ {h} - \\rho_ {h, 0} \\right\\| ^ {2}. \\tag {39}", + "image_path": "7eddb635d3bd752ed3f00b97c153970cf239a5f9c0d2febd15f5a1c833cfaf49.jpg" + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 494, + 663, + 505, + 673 + ], + "blocks": [ + { + "bbox": [ + 494, + 663, + 505, + 673 + ], + "lines": [ + { + "bbox": [ + 494, + 663, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 494, + 663, + 505, + 673 + ], + "type": "image", + "image_path": "9d32cf41dd420f054c911100857edaa1d69208dd0df5a90d593bd716b8f123a5.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 689, + 298, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 298, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 298, + 700 + ], + "type": "text", + "content": "B.4 DIFFERENTIAL OPERATOR USING FFT" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "By using the Discrete Fourier Transformation, the differential operators can be converted to algebraic operators in the frequency domain." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 457, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 457, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 457, + 95 + ], + "type": "text", + "content": "Lemma 9. Suppose the discrete function is " + }, + { + "bbox": [ + 104, + 82, + 457, + 95 + ], + "type": "inline_equation", + "content": "u_{i,j,k}" + }, + { + "bbox": [ + 104, + 82, + 457, + 95 + ], + "type": "text", + "content": ", with discrete Fourier transformation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 170, + 97, + 440, + 130 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 97, + 440, + 130 + ], + "spans": [ + { + "bbox": [ + 170, + 97, + 440, + 130 + ], + "type": "interline_equation", + "content": "u _ {i, j, k} = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} e ^ {\\sqrt {- 1} \\frac {2 \\pi m i}{M}} e ^ {\\sqrt {- 1} \\frac {2 \\pi n j}{N}} e ^ {\\sqrt {- 1} \\frac {2 \\pi l k}{L}}", + "image_path": "1f902be3e455f1eb749e55bd39d3def323f8404f35e1ef3149966a66d9e78713.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 132, + 407, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 407, + 144 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 407, + 144 + ], + "type": "text", + "content": "then the differential operator using central difference " + }, + { + "bbox": [ + 104, + 132, + 407, + 144 + ], + "type": "inline_equation", + "content": "\\partial_i\\partial_i u_{i,j,k}" + }, + { + "bbox": [ + 104, + 132, + 407, + 144 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 148, + 146, + 459, + 206 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 146, + 459, + 206 + ], + "spans": [ + { + "bbox": [ + 148, + 146, + 459, + 206 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {i} \\partial_ {i} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} \\left(u _ {i + 1, j, k} + u _ {i - 1, j, k} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 \\left(\\cos \\frac {2 \\pi m}{M} - 1\\right)}{h _ {x} ^ {2}} e ^ {\\imath \\frac {2 \\pi m i}{M}} e ^ {\\imath \\frac {2 \\pi n j}{N}} e ^ {\\imath \\frac {2 \\pi l k}{L}} \\\\ \\end{array}", + "image_path": "9b120119cc5dd0e4f5af3c5bc661ecc895bc305b80188b592e408accfbdb9b2b.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 209, + 282, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 282, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 282, + 222 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 209, + 282, + 222 + ], + "type": "inline_equation", + "content": "\\iota = \\sqrt{-1}" + }, + { + "bbox": [ + 104, + 209, + 282, + 222 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 209, + 282, + 222 + ], + "type": "inline_equation", + "content": "\\partial_i\\partial_ju_{i,j,k}" + }, + { + "bbox": [ + 104, + 209, + 282, + 222 + ], + "type": "text", + "content": " is given by," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 145, + 224, + 463, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 224, + 463, + 285 + ], + "spans": [ + { + "bbox": [ + 145, + 224, + 463, + 285 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {i} \\partial_ {j} u _ {i, j, k} = \\frac {1}{4 h _ {x} h _ {y}} \\left(u _ {i + 1, j + 1, k} + u _ {i - 1, j - 1, k} - u _ {i + 1, j - 1, k} - u _ {i - 1, j + 1, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}", + "image_path": "8f96aef974ac6680c07fdf5667e5d513d5e41a40458aabe7812147dc8dd18c79.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 296, + 191, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 296, + 191, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 296, + 191, + 308 + ], + "type": "text", + "content": "Proof. By equations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 165, + 310, + 445, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 310, + 445, + 350 + ], + "spans": [ + { + "bbox": [ + 165, + 310, + 445, + 350 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\cos (A + \\alpha) + \\cos (A - \\alpha) - 2 \\cos (A) \\\\ = (\\cos A \\cos \\alpha - \\sin A \\sin \\alpha) + (\\cos A \\cos \\alpha + \\sin A \\sin \\alpha) - 2 \\cos A \\\\ = 2 (\\cos \\alpha - 1) \\cos A \\\\ \\end{array}", + "image_path": "35642cd191921d6d36ea6ba4e95241e146c1d2f43931c3cb77b367cd08647abd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 353, + 123, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 353, + 123, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 123, + 363 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 165, + 364, + 446, + 404 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 364, + 446, + 404 + ], + "spans": [ + { + "bbox": [ + 165, + 364, + 446, + 404 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sin (A + \\alpha) + \\sin (A - \\alpha) - 2 \\sin (A) \\\\ = (\\sin A \\cos \\alpha + \\cos A \\sin \\alpha) + (\\sin A \\cos \\alpha - \\cos A \\sin \\alpha) - 2 \\cos A \\\\ = 2 (\\cos \\alpha - 1) \\sin A \\\\ \\end{array}", + "image_path": "a5545ff42a15256293eb30e0e4211b53b1ef6080953c92b6e0b242fa245f9481.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 407, + 148, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 407, + 148, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 148, + 418 + ], + "type": "text", + "content": "we obtain" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 171, + 415, + 438, + 443 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 415, + 438, + 443 + ], + "spans": [ + { + "bbox": [ + 171, + 415, + 438, + 443 + ], + "type": "interline_equation", + "content": "\\frac {1}{h _ {x} ^ {2}} \\left[ e ^ {\\iota^ {\\frac {2 \\pi m (i + 1)}{M}}} + e ^ {\\iota^ {\\frac {2 \\pi m (i - 1)}{M}}} - 2 e ^ {\\iota^ {\\frac {2 \\pi m i}{M}}} \\right] = \\frac {2 \\left(\\cos \\frac {2 \\pi m}{M} - 1\\right)}{h _ {x} ^ {2}} e ^ {\\iota^ {\\frac {2 \\pi m i}{M}}}", + "image_path": "82f904b458c5843fabbca5308fe5f6051d52d984d861616d17510e231c1a5146.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 443, + 234, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 443, + 234, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 234, + 455 + ], + "type": "text", + "content": "by direct computation, we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 125, + 456, + 484, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 456, + 484, + 552 + ], + "spans": [ + { + "bbox": [ + 125, + 456, + 484, + 552 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {i} \\partial_ {i} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} (u _ {i + 1, j, k} + u _ {i - 1, j, k} - 2 u _ {i, j, k}) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {e ^ {\\iota \\frac {2 \\pi m (i + 1)}{M}} + e ^ {\\iota \\frac {2 \\pi m (i - 1)}{M}} - 2 e ^ {\\iota \\frac {2 \\pi m i}{M}}}{h _ {x} ^ {2}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi m}{M} - 1)}{h _ {x} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}", + "image_path": "d9a1b8275e79cd58e12e2e527dde8ac6c07db81b2d0df5e09cb6adcda0a42f04.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 554, + 200, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 554, + 200, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 554, + 200, + 566 + ], + "type": "text", + "content": "Similarly, by equations" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 124, + 567, + 485, + 643 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 567, + 485, + 643 + ], + "spans": [ + { + "bbox": [ + 124, + 567, + 485, + 643 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\cos (A + \\alpha + B + \\beta) + \\cos (A - \\alpha + B - \\beta) - \\cos (A + \\alpha + B - \\beta) - \\cos (A - \\alpha + B + \\beta) \\\\ = \\cos (A + B + \\alpha + \\beta) + \\cos (A + B - \\alpha - \\beta) - \\cos (A + B + \\alpha - \\beta) - \\cos (A + B - \\alpha + \\beta) \\\\ = 2 \\cos (A + B) \\cos (\\alpha + \\beta) - 2 \\cos (A + B) \\cos (\\alpha - \\beta) \\\\ = 2 \\cos (A + B) (\\cos (\\alpha + \\beta) - \\cos (\\alpha - \\beta)) \\\\ = 2 \\cos (A + B) (\\cos \\alpha \\cos \\beta - \\sin \\alpha \\sin \\beta - \\cos \\alpha \\cos \\beta - \\sin \\alpha - \\sin \\beta) \\\\ = - 4 \\cos (A + B) \\sin \\alpha \\sin \\beta \\\\ \\end{array}", + "image_path": "915cf0f9416829c1ce6c5402030efa8b24fcc599bca1bb09d78a1b487b510fca.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 647, + 123, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 123, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 123, + 656 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 126, + 658, + 484, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 658, + 484, + 734 + ], + "spans": [ + { + "bbox": [ + 126, + 658, + 484, + 734 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sin (A + \\alpha + B + \\beta) + \\sin (A - \\alpha + B - \\beta) - \\sin (A + \\alpha + B - \\beta) - \\sin (A - \\alpha + B + \\beta) \\\\ = \\sin (A + B + \\alpha + \\beta) + \\sin (A + B - \\alpha - \\beta) - \\sin (A + B + \\alpha - \\beta) - \\sin (A + B - \\alpha + \\beta) \\\\ = 2 \\sin (A + B) \\cos (\\alpha + \\beta) - 2 \\sin (A + B) \\cos (\\alpha - \\beta) \\\\ = 2 \\sin (A + B) (\\cos (\\alpha + \\beta) - \\cos (\\alpha - \\beta)) \\\\ = 2 \\sin (A + B) (\\cos \\alpha \\cos \\beta - \\sin \\alpha \\sin \\beta - \\cos \\alpha \\cos \\beta - \\sin \\alpha - \\sin \\beta) \\\\ = - 4 \\sin (A + B) \\sin \\alpha \\sin \\beta \\\\ \\end{array}", + "image_path": "1aba66602311ae0e2eb731451154aeab8527def596f083366a6b913346fd381d.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 247, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 247, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 247, + 95 + ], + "type": "text", + "content": "we deduce the following equation," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 147, + 101, + 462, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 101, + 462, + 159 + ], + "spans": [ + { + "bbox": [ + 147, + 101, + 462, + 159 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {i} \\partial_ {j} u _ {i, j, k} = \\frac {1}{4 h _ {x} h _ {y}} \\left(u _ {i + 1, j + 1, k} + u _ {i - 1, j - 1, k} - u _ {i + 1, j - 1, k} - u _ {i - 1, j + 1, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi m}{M} \\sin \\frac {2 \\pi n}{N}}{h _ {x} h _ {y}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}", + "image_path": "6afbef586e02c503f2ffdff5ed780352fa5dd92b552bc373044caaef69cd286d.jpg" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 494, + 166, + 504, + 175 + ], + "blocks": [ + { + "bbox": [ + 494, + 166, + 504, + 175 + ], + "lines": [ + { + "bbox": [ + 494, + 166, + 504, + 175 + ], + "spans": [ + { + "bbox": [ + 494, + 166, + 504, + 175 + ], + "type": "image", + "image_path": "128785ac32bd0a8f19798902b6a1d6a700bd904a841a4eb14140d17f13a3b664.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 190, + 481, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 190, + 481, + 202 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 481, + 202 + ], + "type": "text", + "content": "Similarly, we have the representations of other differential operators in the frequency domain," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 151, + 208, + 458, + 266 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 208, + 458, + 266 + ], + "spans": [ + { + "bbox": [ + 151, + 208, + 458, + 266 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {j} \\partial_ {j} u _ {i, j, k} = \\frac {1}{h _ {x} ^ {2}} \\left(u _ {i, j + 1, k} + u _ {i, j - 1, k} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 (\\cos \\frac {2 \\pi n}{N} - 1)}{h _ {y} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}", + "image_path": "3cd2e13905b7f49617080a96e12bc7fdc86dff4869f8a578bcc3194eb2bb5b7b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 151, + 274, + 457, + 332 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 274, + 457, + 332 + ], + "spans": [ + { + "bbox": [ + 151, + 274, + 457, + 332 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {k} \\partial_ {k} u _ {i, j, k} = \\frac {1}{h _ {z} ^ {2}} \\left(u _ {i, j, k + 1} + u _ {i, j, k - 1} - 2 u _ {i, j, k}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {2 \\left(\\cos \\frac {2 \\pi l}{L} - 1\\right)}{h _ {z} ^ {2}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}", + "image_path": "20cc321ba02bac72c2b057d4f53d9fa0048b5e3b90920aec185de867c5aef0b5.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 148, + 351, + 460, + 410 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 351, + 460, + 410 + ], + "spans": [ + { + "bbox": [ + 148, + 351, + 460, + 410 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {j} \\partial_ {k} u _ {i, j, k} = \\frac {1}{4 h _ {y} h _ {z}} \\left(u _ {i, j + 1, k + 1} + u _ {i, j - 1, k - 1} - u _ {i, j + 1, k - 1} - u _ {i, j - 1, k + 1}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi n}{N} \\sin \\frac {2 \\pi l}{L}}{h _ {y} h _ {z}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}", + "image_path": "ee2419e7d94e1ee86870cf5e73f2105f4fa72d463141494947156f7b943cd3e9.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 148, + 418, + 461, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 418, + 461, + 475 + ], + "spans": [ + { + "bbox": [ + 148, + 418, + 461, + 475 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {k} \\partial_ {i} u _ {i, j, k} = \\frac {1}{4 h _ {z} h _ {x}} \\left(u _ {i + 1, j, k + 1} + u _ {i - 1, j, k - 1} - u _ {i + 1, j, k - 1} - u _ {i - 1, j, k + 1}\\right) \\\\ = \\frac {1}{M N L} \\sum_ {m = 0} ^ {M - 1} \\sum_ {n = 0} ^ {N - 1} \\sum_ {l = 0} ^ {L - 1} \\hat {u} _ {m, n, l} \\frac {- \\sin \\frac {2 \\pi l}{L} \\sin \\frac {2 \\pi m}{M}}{h _ {z} h _ {x}} e ^ {\\iota \\frac {2 \\pi m i}{M}} e ^ {\\iota \\frac {2 \\pi n j}{N}} e ^ {\\iota \\frac {2 \\pi l k}{L}} \\\\ \\end{array}", + "image_path": "fd31f5f7c9ae5a1dfdf3e978761e84e92e3e7fc3b11e329d08bf96cdbdd2cdcd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 493, + 249, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 249, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 249, + 505 + ], + "type": "text", + "content": "C ALGORITHM PIPELINES" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 517, + 504, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 504, + 541 + ], + "type": "text", + "content": "In this section, we give the algorithm pipeline of the FFT-OT in Alg. 1 and the details to solve the costant coefficient elliptic PDE through FFT in Alg. 2." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 555, + 198, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 555, + 198, + 567 + ], + "spans": [ + { + "bbox": [ + 106, + 555, + 198, + 567 + ], + "type": "text", + "content": "Algorithm 1: FFT-OT" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "text", + "content": "Input: Domain " + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "inline_equation", + "content": "\\Omega = [-1, 1]^3" + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "text", + "content": ", the source density function " + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "inline_equation", + "content": "f > 0" + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "text", + "content": ", the target density " + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "inline_equation", + "content": "g = 1" + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "text", + "content": ", step length " + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "text", + "content": ", approximation error threshold " + }, + { + "bbox": [ + 106, + 571, + 484, + 590 + ], + "type": "inline_equation", + "content": "\\varepsilon" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 591, + 455, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 591, + 455, + 609 + ], + "spans": [ + { + "bbox": [ + 106, + 591, + 455, + 609 + ], + "type": "text", + "content": "Output: Solution " + }, + { + "bbox": [ + 106, + 591, + 455, + 609 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}\\| x\\|^2 + u_n" + }, + { + "bbox": [ + 106, + 591, + 455, + 609 + ], + "type": "text", + "content": " to the Monge-Ampère Eqn. (2) with the corresponding boundary condition." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 611, + 198, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 611, + 198, + 621 + ], + "spans": [ + { + "bbox": [ + 121, + 611, + 198, + 621 + ], + "type": "text", + "content": "Initialize " + }, + { + "bbox": [ + 121, + 611, + 198, + 621 + ], + "type": "inline_equation", + "content": "u_0(x) = 0" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 122, + 622, + 171, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 622, + 171, + 630 + ], + "spans": [ + { + "bbox": [ + 122, + 622, + 171, + 630 + ], + "type": "text", + "content": "while true do" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 122, + 632, + 265, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 632, + 265, + 642 + ], + "spans": [ + { + "bbox": [ + 122, + 632, + 265, + 642 + ], + "type": "text", + "content": "Compute the Hessian matrix " + }, + { + "bbox": [ + 122, + 632, + 265, + 642 + ], + "type": "inline_equation", + "content": "D^2 u_n(x)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 122, + 642, + 339, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 642, + 339, + 652 + ], + "spans": [ + { + "bbox": [ + 122, + 642, + 339, + 652 + ], + "type": "text", + "content": "Compute the density function " + }, + { + "bbox": [ + 122, + 642, + 339, + 652 + ], + "type": "inline_equation", + "content": "\\rho_{n}(x)\\gets \\operatorname *{det}(I + D^{2}u_{n}(x))" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 122, + 652, + 225, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 652, + 225, + 662 + ], + "spans": [ + { + "bbox": [ + 122, + 652, + 225, + 662 + ], + "type": "text", + "content": "if " + }, + { + "bbox": [ + 122, + 652, + 225, + 662 + ], + "type": "inline_equation", + "content": "\\| f - \\rho_n\\|_{L_2(\\Omega)} < \\varepsilon" + }, + { + "bbox": [ + 122, + 652, + 225, + 662 + ], + "type": "text", + "content": " then" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 122, + 662, + 162, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 662, + 162, + 670 + ], + "spans": [ + { + "bbox": [ + 122, + 662, + 162, + 670 + ], + "type": "text", + "content": "Break;" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 122, + 673, + 346, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 673, + 346, + 684 + ], + "spans": [ + { + "bbox": [ + 122, + 673, + 346, + 684 + ], + "type": "text", + "content": "Compute the adjoint matrix " + }, + { + "bbox": [ + 122, + 673, + 346, + 684 + ], + "type": "inline_equation", + "content": "[H_n^{pq}(x)]\\gets \\mathrm{Adj}(I + D^2 u_n(x))" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 122, + 685, + 327, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 685, + 327, + 693 + ], + "spans": [ + { + "bbox": [ + 122, + 685, + 327, + 693 + ], + "type": "text", + "content": "Compute the mean adjoint matrix " + }, + { + "bbox": [ + 122, + 685, + 327, + 693 + ], + "type": "inline_equation", + "content": "[H_n^{pq}]" + }, + { + "bbox": [ + 122, + 685, + 327, + 693 + ], + "type": "text", + "content": " using Eqn. (11);" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 122, + 694, + 398, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 694, + 398, + 704 + ], + "spans": [ + { + "bbox": [ + 122, + 694, + 398, + 704 + ], + "type": "text", + "content": "Solve the constant coefficient elliptic PDE (12) using the FFT Solver Alg. 2;" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 122, + 704, + 312, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 704, + 312, + 714 + ], + "spans": [ + { + "bbox": [ + 122, + 704, + 312, + 714 + ], + "type": "text", + "content": "Update the Brenier potential " + }, + { + "bbox": [ + 122, + 704, + 312, + 714 + ], + "type": "inline_equation", + "content": "u_{n + 1}(x) \\gets u_n + \\tau v_n" + }, + { + "bbox": [ + 122, + 704, + 312, + 714 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 79, + 170, + 138 + ], + "blocks": [ + { + "bbox": [ + 112, + 79, + 170, + 138 + ], + "lines": [ + { + "bbox": [ + 112, + 79, + 170, + 138 + ], + "spans": [ + { + "bbox": [ + 112, + 79, + 170, + 138 + ], + "type": "image", + "image_path": "d61c13d938ba3e3e011a9da39cfe736b7b910a39b97e25db82c6206119535e6e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 112, + 140, + 170, + 198 + ], + "blocks": [ + { + "bbox": [ + 112, + 140, + 170, + 198 + ], + "lines": [ + { + "bbox": [ + 112, + 140, + 170, + 198 + ], + "spans": [ + { + "bbox": [ + 112, + 140, + 170, + 198 + ], + "type": "image", + "image_path": "44e79679a69958eabe685acf804b31adb3d9599663bed582cdc5120bd078deb4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 112, + 199, + 170, + 258 + ], + "blocks": [ + { + "bbox": [ + 112, + 199, + 170, + 258 + ], + "lines": [ + { + "bbox": [ + 112, + 199, + 170, + 258 + ], + "spans": [ + { + "bbox": [ + 112, + 199, + 170, + 258 + ], + "type": "image", + "image_path": "1e925c9b7f9647892a7ea3fbfddb61e80987d81ba2f6646e4581b685a2fb9402.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 112, + 259, + 170, + 317 + ], + "blocks": [ + { + "bbox": [ + 112, + 259, + 170, + 317 + ], + "lines": [ + { + "bbox": [ + 112, + 259, + 170, + 317 + ], + "spans": [ + { + "bbox": [ + 112, + 259, + 170, + 317 + ], + "type": "image", + "image_path": "fbb2d06d3357e0337f05a41ceb38dde94cda28eb36ddfaed59169e9c28b735c1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 319, + 164, + 330 + ], + "lines": [ + { + "bbox": [ + 118, + 319, + 164, + 330 + ], + "spans": [ + { + "bbox": [ + 118, + 319, + 164, + 330 + ], + "type": "text", + "content": "(a) Density" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 181, + 79, + 239, + 138 + ], + "blocks": [ + { + "bbox": [ + 181, + 79, + 239, + 138 + ], + "lines": [ + { + "bbox": [ + 181, + 79, + 239, + 138 + ], + "spans": [ + { + "bbox": [ + 181, + 79, + 239, + 138 + ], + "type": "image", + "image_path": "3ae8a214333cab2e75560cf2faed28321156b390f144dbbdac2346938375b192.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 181, + 140, + 239, + 198 + ], + "blocks": [ + { + "bbox": [ + 181, + 140, + 239, + 198 + ], + "lines": [ + { + "bbox": [ + 181, + 140, + 239, + 198 + ], + "spans": [ + { + "bbox": [ + 181, + 140, + 239, + 198 + ], + "type": "image", + "image_path": "7a32bc28f59db7be75eb28365f52fb5978fc247d00945738d29d8b27caf7778c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 181, + 199, + 239, + 258 + ], + "blocks": [ + { + "bbox": [ + 181, + 199, + 239, + 258 + ], + "lines": [ + { + "bbox": [ + 181, + 199, + 239, + 258 + ], + "spans": [ + { + "bbox": [ + 181, + 199, + 239, + 258 + ], + "type": "image", + "image_path": "e494bc42a1aee6690ee63f3e1884e1f5b88d7d1c5347206571a5bd629f439cae.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 181, + 259, + 239, + 316 + ], + "blocks": [ + { + "bbox": [ + 181, + 259, + 239, + 316 + ], + "lines": [ + { + "bbox": [ + 181, + 259, + 239, + 316 + ], + "spans": [ + { + "bbox": [ + 181, + 259, + 239, + 316 + ], + "type": "image", + "image_path": "4126d843e81326161452112c5f97ad88a01e118bcc4743555b0a982acb664266.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 182, + 319, + 236, + 330 + ], + "lines": [ + { + "bbox": [ + 182, + 319, + 236, + 330 + ], + "spans": [ + { + "bbox": [ + 182, + 319, + 236, + 330 + ], + "type": "text", + "content": "(b) Rejection" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 249, + 79, + 307, + 138 + ], + "blocks": [ + { + "bbox": [ + 249, + 79, + 307, + 138 + ], + "lines": [ + { + "bbox": [ + 249, + 79, + 307, + 138 + ], + "spans": [ + { + "bbox": [ + 249, + 79, + 307, + 138 + ], + "type": "image", + "image_path": "1f5a7553399bb362b5bd8eed40380cd780f118852192b67f50de4b54c3f6c612.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 249, + 140, + 307, + 198 + ], + "blocks": [ + { + "bbox": [ + 249, + 140, + 307, + 198 + ], + "lines": [ + { + "bbox": [ + 249, + 140, + 307, + 198 + ], + "spans": [ + { + "bbox": [ + 249, + 140, + 307, + 198 + ], + "type": "image", + "image_path": "b637a1f2ee6ff502245d3ce9ab33b5a84d0d1e67c0550b102ee5658949ea98c9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 249, + 199, + 307, + 258 + ], + "blocks": [ + { + "bbox": [ + 249, + 199, + 307, + 258 + ], + "lines": [ + { + "bbox": [ + 249, + 199, + 307, + 258 + ], + "spans": [ + { + "bbox": [ + 249, + 199, + 307, + 258 + ], + "type": "image", + "image_path": "8ee81eb856ffcc90253543aece5f629617a0c4b424a18fdddd4d4cb6a1953052.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 249, + 259, + 307, + 317 + ], + "blocks": [ + { + "bbox": [ + 249, + 259, + 307, + 317 + ], + "lines": [ + { + "bbox": [ + 249, + 259, + 307, + 317 + ], + "spans": [ + { + "bbox": [ + 249, + 259, + 307, + 317 + ], + "type": "image", + "image_path": "58ae9947b1a4e94f5726fd435e640a4184c32f9790027899c0b277b8abe3ab76.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 262, + 319, + 293, + 330 + ], + "lines": [ + { + "bbox": [ + 262, + 319, + 293, + 330 + ], + "spans": [ + { + "bbox": [ + 262, + 319, + 293, + 330 + ], + "type": "text", + "content": "(c) MH" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 317, + 79, + 375, + 138 + ], + "blocks": [ + { + "bbox": [ + 317, + 79, + 375, + 138 + ], + "lines": [ + { + "bbox": [ + 317, + 79, + 375, + 138 + ], + "spans": [ + { + "bbox": [ + 317, + 79, + 375, + 138 + ], + "type": "image", + "image_path": "2d347f904fa60d1b03ebaa080f4fd81a76d3650db8736de9e8879d7c30cb0ce7.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 317, + 140, + 375, + 198 + ], + "blocks": [ + { + "bbox": [ + 317, + 140, + 375, + 198 + ], + "lines": [ + { + "bbox": [ + 317, + 140, + 375, + 198 + ], + "spans": [ + { + "bbox": [ + 317, + 140, + 375, + 198 + ], + "type": "image", + "image_path": "ec24c9178f566cf4b8f34ca5fa4af2218552888e0a66d54fccc9a561be2d7ab5.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 317, + 199, + 375, + 258 + ], + "blocks": [ + { + "bbox": [ + 317, + 199, + 375, + 258 + ], + "lines": [ + { + "bbox": [ + 317, + 199, + 375, + 258 + ], + "spans": [ + { + "bbox": [ + 317, + 199, + 375, + 258 + ], + "type": "image", + "image_path": "5cb225d82aa21de75571e1da0d0cd4719d28deccd58fbbb9d338b0e80644afd2.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 317, + 259, + 375, + 316 + ], + "blocks": [ + { + "bbox": [ + 317, + 259, + 375, + 316 + ], + "lines": [ + { + "bbox": [ + 317, + 259, + 375, + 316 + ], + "spans": [ + { + "bbox": [ + 317, + 259, + 375, + 316 + ], + "type": "image", + "image_path": "02063de3e6ff0a0c1fc406d3985036a189531ee95712e25fed72960b5493b688.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 319, + 364, + 330 + ], + "lines": [ + { + "bbox": [ + 328, + 319, + 364, + 330 + ], + "spans": [ + { + "bbox": [ + 328, + 319, + 364, + 330 + ], + "type": "text", + "content": "(d) Slice" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 386, + 81, + 443, + 138 + ], + "blocks": [ + { + "bbox": [ + 386, + 81, + 443, + 138 + ], + "lines": [ + { + "bbox": [ + 386, + 81, + 443, + 138 + ], + "spans": [ + { + "bbox": [ + 386, + 81, + 443, + 138 + ], + "type": "image", + "image_path": "af1aa4f0612f357b809fff64e0055dcd3eb08a278a3526ead2603686e412890b.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 386, + 140, + 443, + 198 + ], + "blocks": [ + { + "bbox": [ + 386, + 140, + 443, + 198 + ], + "lines": [ + { + "bbox": [ + 386, + 140, + 443, + 198 + ], + "spans": [ + { + "bbox": [ + 386, + 140, + 443, + 198 + ], + "type": "image", + "image_path": "f7e41c15e16491b309e865d95b76e88783dc5c13fc07702f1ef3b8f9db2cd246.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 386, + 199, + 443, + 258 + ], + "blocks": [ + { + "bbox": [ + 386, + 199, + 443, + 258 + ], + "lines": [ + { + "bbox": [ + 386, + 199, + 443, + 258 + ], + "spans": [ + { + "bbox": [ + 386, + 199, + 443, + 258 + ], + "type": "image", + "image_path": "80125737bb277afa86ba721eeb59fe6a247694e77ec0c822bc2ff888c8b3b27c.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 386, + 259, + 443, + 316 + ], + "blocks": [ + { + "bbox": [ + 386, + 259, + 443, + 316 + ], + "lines": [ + { + "bbox": [ + 386, + 259, + 443, + 316 + ], + "spans": [ + { + "bbox": [ + 386, + 259, + 443, + 316 + ], + "type": "image", + "image_path": "fa1bf5ab48c383d9d7520f197517751a02857b01e395036a260f7ca4df30098e.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 386, + 319, + 441, + 329 + ], + "lines": [ + { + "bbox": [ + 386, + 319, + 441, + 329 + ], + "spans": [ + { + "bbox": [ + 386, + 319, + 441, + 329 + ], + "type": "text", + "content": "(e) Ours-rand" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 454, + 80, + 511, + 138 + ], + "blocks": [ + { + "bbox": [ + 454, + 80, + 511, + 138 + ], + "lines": [ + { + "bbox": [ + 454, + 80, + 511, + 138 + ], + "spans": [ + { + "bbox": [ + 454, + 80, + 511, + 138 + ], + "type": "image", + "image_path": "ed40012dcc547db48f95acbf7f256edfeb9868e75cb741cda923f9ab383df9ea.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 454, + 140, + 511, + 198 + ], + "blocks": [ + { + "bbox": [ + 454, + 140, + 511, + 198 + ], + "lines": [ + { + "bbox": [ + 454, + 140, + 511, + 198 + ], + "spans": [ + { + "bbox": [ + 454, + 140, + 511, + 198 + ], + "type": "image", + "image_path": "56efa3f542a9c1d5eeaacc4e0771ff63903e8ad30a5981e2d2b9b9ff6a17c45d.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 454, + 199, + 511, + 258 + ], + "blocks": [ + { + "bbox": [ + 454, + 199, + 511, + 258 + ], + "lines": [ + { + "bbox": [ + 454, + 199, + 511, + 258 + ], + "spans": [ + { + "bbox": [ + 454, + 199, + 511, + 258 + ], + "type": "image", + "image_path": "2aa7363eaa0da9ad29c7a1e8a9519b144066355e9cbf1199d988c6a33c228c87.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 454, + 259, + 511, + 317 + ], + "blocks": [ + { + "bbox": [ + 454, + 259, + 511, + 317 + ], + "lines": [ + { + "bbox": [ + 454, + 259, + 511, + 317 + ], + "spans": [ + { + "bbox": [ + 454, + 259, + 511, + 317 + ], + "type": "image", + "image_path": "cff095d6b8df797d938236d52afdc9be601480f956c2306b2b11b66a54c0d80b.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 319, + 507, + 330 + ], + "lines": [ + { + "bbox": [ + 454, + 319, + 507, + 330 + ], + "spans": [ + { + "bbox": [ + 454, + 319, + 507, + 330 + ], + "type": "text", + "content": "(f) Ours-grid" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 334, + 504, + 396 + ], + "lines": [ + { + "bbox": [ + 104, + 334, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 504, + 396 + ], + "type": "text", + "content": "Figure 5: 3D density function sampling. (a) The density functions in different slices of the same model, namely the 40th, 56th, 72th and 80th. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with " + }, + { + "bbox": [ + 104, + 334, + 504, + 396 + ], + "type": "inline_equation", + "content": "T^{-1}" + }, + { + "bbox": [ + 104, + 334, + 504, + 396 + ], + "type": "text", + "content": ". (f) The sampling results by mapping the grid centers back with " + }, + { + "bbox": [ + 104, + 334, + 504, + 396 + ], + "type": "inline_equation", + "content": "T^{-1}" + }, + { + "bbox": [ + 104, + 334, + 504, + 396 + ], + "type": "text", + "content": ". The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. Zoom in for better visualization." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 411, + 378, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 411, + 378, + 422 + ], + "spans": [ + { + "bbox": [ + 106, + 411, + 378, + 422 + ], + "type": "text", + "content": "Algorithm 2: FFT Solver for the Constant Coefficient Elliptic PDE" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "text", + "content": "Input: Domain " + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "inline_equation", + "content": "\\Omega = [-1,1]^3" + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "inline_equation", + "content": "M,N,L" + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "inline_equation", + "content": "\\{a^{pq}\\}" + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "inline_equation", + "content": "b^r" + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "text", + "content": ", function " + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 106, + 425, + 476, + 437 + ], + "type": "text", + "content": " with the periodic boundary condition" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 437, + 285, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 437, + 285, + 446 + ], + "spans": [ + { + "bbox": [ + 106, + 437, + 285, + 446 + ], + "type": "text", + "content": "Output: Solution " + }, + { + "bbox": [ + 106, + 437, + 285, + 446 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 106, + 437, + 285, + 446 + ], + "type": "text", + "content": " to the elliptic PDE Eqn. (18)" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 447, + 288, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 447, + 288, + 456 + ], + "spans": [ + { + "bbox": [ + 115, + 447, + 288, + 456 + ], + "type": "text", + "content": "Discretize the domain " + }, + { + "bbox": [ + 115, + 447, + 288, + 456 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 115, + 447, + 288, + 456 + ], + "type": "text", + "content": " to a " + }, + { + "bbox": [ + 115, + 447, + 288, + 456 + ], + "type": "inline_equation", + "content": "M\\times N\\times L" + }, + { + "bbox": [ + 115, + 447, + 288, + 456 + ], + "type": "text", + "content": " grid;" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 115, + 456, + 230, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 456, + 230, + 467 + ], + "spans": [ + { + "bbox": [ + 115, + 456, + 230, + 467 + ], + "type": "text", + "content": "Sample the function " + }, + { + "bbox": [ + 115, + 456, + 230, + 467 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 115, + 456, + 230, + 467 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 115, + 456, + 230, + 467 + ], + "type": "inline_equation", + "content": "f_{i,j,k}" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 467, + 330, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 467, + 330, + 479 + ], + "spans": [ + { + "bbox": [ + 115, + 467, + 330, + 479 + ], + "type": "text", + "content": "Compute FFT using Eqn. (16), " + }, + { + "bbox": [ + 115, + 467, + 330, + 479 + ], + "type": "inline_equation", + "content": "\\{\\hat{f}_{m,n,l}\\} \\gets \\mathrm{FFT}(\\{f_{i,j,k}\\})" + }, + { + "bbox": [ + 115, + 467, + 330, + 479 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 479, + 321, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 479, + 321, + 489 + ], + "spans": [ + { + "bbox": [ + 115, + 479, + 321, + 489 + ], + "type": "text", + "content": "for " + }, + { + "bbox": [ + 115, + 479, + 321, + 489 + ], + "type": "inline_equation", + "content": "(m,n,l)\\in [0,M - 1]\\times [0,N - 1]\\times [0,L - 1]" + }, + { + "bbox": [ + 115, + 479, + 321, + 489 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 121, + 489, + 279, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 489, + 279, + 498 + ], + "spans": [ + { + "bbox": [ + 121, + 489, + 279, + 498 + ], + "type": "text", + "content": "Compute the factor " + }, + { + "bbox": [ + 121, + 489, + 279, + 498 + ], + "type": "inline_equation", + "content": "\\lambda_{m,n,l}" + }, + { + "bbox": [ + 121, + 489, + 279, + 498 + ], + "type": "text", + "content": " using Eqn. (19);" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 121, + 498, + 191, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 498, + 191, + 506 + ], + "spans": [ + { + "bbox": [ + 121, + 498, + 191, + 506 + ], + "type": "text", + "content": "if " + }, + { + "bbox": [ + 121, + 498, + 191, + 506 + ], + "type": "inline_equation", + "content": "\\lambda_{m,n,l}" + }, + { + "bbox": [ + 121, + 498, + 191, + 506 + ], + "type": "text", + "content": " is 0 then" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 121, + 507, + 183, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 507, + 183, + 517 + ], + "spans": [ + { + "bbox": [ + 121, + 507, + 183, + 517 + ], + "type": "inline_equation", + "content": "\\hat{u}_{m,n,l}\\gets 0;" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 121, + 517, + 137, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 517, + 137, + 525 + ], + "spans": [ + { + "bbox": [ + 121, + 517, + 137, + 525 + ], + "type": "text", + "content": "else" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 137, + 527, + 231, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 527, + 231, + 540 + ], + "spans": [ + { + "bbox": [ + 137, + 527, + 231, + 540 + ], + "type": "inline_equation", + "content": "\\hat{u}_{m,n,l}\\gets \\hat{f}_{m,n,l} / \\lambda_{m,n,l};" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 544, + 375, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 544, + 375, + 555 + ], + "spans": [ + { + "bbox": [ + 115, + 544, + 375, + 555 + ], + "type": "text", + "content": "Compute the Inverse FFT using Eqn. (17), " + }, + { + "bbox": [ + 115, + 544, + 375, + 555 + ], + "type": "inline_equation", + "content": "\\{u_{i,j,k}\\} \\gets \\mathrm{IFFT}(\\{\\hat{u}_{m,n,l}\\})" + }, + { + "bbox": [ + 115, + 544, + 375, + 555 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 116, + 555, + 175, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 555, + 175, + 566 + ], + "spans": [ + { + "bbox": [ + 116, + 555, + 175, + 566 + ], + "type": "text", + "content": "Return " + }, + { + "bbox": [ + 116, + 555, + 175, + 566 + ], + "type": "inline_equation", + "content": "\\{u_{i,j,k}\\}" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 592, + 259, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 259, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 259, + 604 + ], + "type": "text", + "content": "D APPENDIX EXPERIMENTS" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 104, + 617, + 504, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 504, + 640 + ], + "type": "text", + "content": "In this section, as a compensation of the experiments in the main paper, we give more results on the 3D adaptive sampling and volumetric magnifier." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 653, + 327, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 653, + 327, + 664 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 327, + 664 + ], + "type": "text", + "content": "D.1 MORE RESULTS ON 3D ADAPTIVE SAMPLING" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": "In the experiments, we set the density function " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "f(x) = \\sum_{i=1}^{30} p_i \\mathcal{N}(\\mu_i, \\Sigma_i)" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mu_i, \\Sigma_i)" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": " represents Gaussian distribution with mean " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mu_i" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": " and variance " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\Sigma_i = \\mathrm{diag}(\\sigma_{i0}^2, \\sigma_{i1}^2, \\sigma_{i2}^2)" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mu_i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": " is uniformly sampled from " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "[0,1]^3" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\sigma_{ij}" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": " is uniformly sampled from " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "[0,0.5]" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "p_i \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": " is uniformly sampled from " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "[0.2,1]" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": " and normalized such that " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\int_{\\Omega} f(x) dx = 1" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": ". Thus the source distribution " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": " is a complicated Gaussian mixture distribution restricted on " + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\Omega = [0,1]^3" + }, + { + "bbox": [ + 104, + 673, + 505, + 733 + ], + "type": "text", + "content": ". After computing the OT map" + } + ] + } + ], + "index": 49 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 50 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 79, + 170, + 138 + ], + "blocks": [ + { + "bbox": [ + 112, + 79, + 170, + 138 + ], + "lines": [ + { + "bbox": [ + 112, + 79, + 170, + 138 + ], + "spans": [ + { + "bbox": [ + 112, + 79, + 170, + 138 + ], + "type": "image", + "image_path": "b632fd714fbfdb632a3fadf61432a89b4935cba5a58458f57721ec541b864b8a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 112, + 140, + 170, + 198 + ], + "blocks": [ + { + "bbox": [ + 112, + 140, + 170, + 198 + ], + "lines": [ + { + "bbox": [ + 112, + 140, + 170, + 198 + ], + "spans": [ + { + "bbox": [ + 112, + 140, + 170, + 198 + ], + "type": "image", + "image_path": "ce168d8abe6ef7856881a01379eafc45ff484d3f482f28bbd9121ee6f360fcd8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 112, + 199, + 170, + 258 + ], + "blocks": [ + { + "bbox": [ + 112, + 199, + 170, + 258 + ], + "lines": [ + { + "bbox": [ + 112, + 199, + 170, + 258 + ], + "spans": [ + { + "bbox": [ + 112, + 199, + 170, + 258 + ], + "type": "image", + "image_path": "1d0ed36963385a1cdeb5ba7d7491cde70599c9d615925ec1eb0eb33d253da61a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 112, + 259, + 170, + 317 + ], + "blocks": [ + { + "bbox": [ + 112, + 259, + 170, + 317 + ], + "lines": [ + { + "bbox": [ + 112, + 259, + 170, + 317 + ], + "spans": [ + { + "bbox": [ + 112, + 259, + 170, + 317 + ], + "type": "image", + "image_path": "eb9ce5bc43fe4ea1e02f33564d44774cad8052be6a1d9599c507358664a72adc.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 319, + 164, + 330 + ], + "lines": [ + { + "bbox": [ + 118, + 319, + 164, + 330 + ], + "spans": [ + { + "bbox": [ + 118, + 319, + 164, + 330 + ], + "type": "text", + "content": "(a) Density" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 181, + 79, + 239, + 138 + ], + "blocks": [ + { + "bbox": [ + 181, + 79, + 239, + 138 + ], + "lines": [ + { + "bbox": [ + 181, + 79, + 239, + 138 + ], + "spans": [ + { + "bbox": [ + 181, + 79, + 239, + 138 + ], + "type": "image", + "image_path": "3a3dafa3734f7c62672195aae43f7933770947304af30b77cb539a98c00c056c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 181, + 140, + 239, + 198 + ], + "blocks": [ + { + "bbox": [ + 181, + 140, + 239, + 198 + ], + "lines": [ + { + "bbox": [ + 181, + 140, + 239, + 198 + ], + "spans": [ + { + "bbox": [ + 181, + 140, + 239, + 198 + ], + "type": "image", + "image_path": "8a81d3b1245a6f48c68ece122d2b992520da7cf082d951fe4aa566e2d2b3fd5d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 181, + 199, + 239, + 258 + ], + "blocks": [ + { + "bbox": [ + 181, + 199, + 239, + 258 + ], + "lines": [ + { + "bbox": [ + 181, + 199, + 239, + 258 + ], + "spans": [ + { + "bbox": [ + 181, + 199, + 239, + 258 + ], + "type": "image", + "image_path": "251020e575b7b5f86fb186ef23a404656f39073fc82867552527c84baf964ab9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 181, + 259, + 239, + 317 + ], + "blocks": [ + { + "bbox": [ + 181, + 259, + 239, + 317 + ], + "lines": [ + { + "bbox": [ + 181, + 259, + 239, + 317 + ], + "spans": [ + { + "bbox": [ + 181, + 259, + 239, + 317 + ], + "type": "image", + "image_path": "3ccc7c884bf260d3d7a589e79b9b4fa1d982905da5ec2d50108ac999beea27b7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 182, + 319, + 236, + 330 + ], + "lines": [ + { + "bbox": [ + 182, + 319, + 236, + 330 + ], + "spans": [ + { + "bbox": [ + 182, + 319, + 236, + 330 + ], + "type": "text", + "content": "(b) Rejection" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 249, + 79, + 307, + 138 + ], + "blocks": [ + { + "bbox": [ + 249, + 79, + 307, + 138 + ], + "lines": [ + { + "bbox": [ + 249, + 79, + 307, + 138 + ], + "spans": [ + { + "bbox": [ + 249, + 79, + 307, + 138 + ], + "type": "image", + "image_path": "eaf96e2003f1ea59584af9725a79164f6b33ab7b581efab41acb30dee13b0bf8.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 249, + 140, + 307, + 198 + ], + "blocks": [ + { + "bbox": [ + 249, + 140, + 307, + 198 + ], + "lines": [ + { + "bbox": [ + 249, + 140, + 307, + 198 + ], + "spans": [ + { + "bbox": [ + 249, + 140, + 307, + 198 + ], + "type": "image", + "image_path": "fd227b3612f179fff0a571758e1efca8a888148821fc29a10a544659f4bd65e7.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 249, + 199, + 307, + 258 + ], + "blocks": [ + { + "bbox": [ + 249, + 199, + 307, + 258 + ], + "lines": [ + { + "bbox": [ + 249, + 199, + 307, + 258 + ], + "spans": [ + { + "bbox": [ + 249, + 199, + 307, + 258 + ], + "type": "image", + "image_path": "8e0e7a79899d3ecec4623468e3128ae43c89a6526bdb894bf3beecf22dd3eddd.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 249, + 259, + 307, + 317 + ], + "blocks": [ + { + "bbox": [ + 249, + 259, + 307, + 317 + ], + "lines": [ + { + "bbox": [ + 249, + 259, + 307, + 317 + ], + "spans": [ + { + "bbox": [ + 249, + 259, + 307, + 317 + ], + "type": "image", + "image_path": "80942969ab76543868ba09a0801227a57eb9b33db9897b7f213aa7c1f5bc3163.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 262, + 319, + 293, + 330 + ], + "lines": [ + { + "bbox": [ + 262, + 319, + 293, + 330 + ], + "spans": [ + { + "bbox": [ + 262, + 319, + 293, + 330 + ], + "type": "text", + "content": "(c) MH" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 317, + 79, + 375, + 138 + ], + "blocks": [ + { + "bbox": [ + 317, + 79, + 375, + 138 + ], + "lines": [ + { + "bbox": [ + 317, + 79, + 375, + 138 + ], + "spans": [ + { + "bbox": [ + 317, + 79, + 375, + 138 + ], + "type": "image", + "image_path": "28c4c3972de99557357c14c6b87619ff7a34e1166bc578df3c195d38f4ea8a05.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 317, + 140, + 375, + 198 + ], + "blocks": [ + { + "bbox": [ + 317, + 140, + 375, + 198 + ], + "lines": [ + { + "bbox": [ + 317, + 140, + 375, + 198 + ], + "spans": [ + { + "bbox": [ + 317, + 140, + 375, + 198 + ], + "type": "image", + "image_path": "cf31df0e7b0570b35ed2fca7512ae315ab5ca51d3b38856e83b45d9d5e4b9902.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 317, + 199, + 375, + 258 + ], + "blocks": [ + { + "bbox": [ + 317, + 199, + 375, + 258 + ], + "lines": [ + { + "bbox": [ + 317, + 199, + 375, + 258 + ], + "spans": [ + { + "bbox": [ + 317, + 199, + 375, + 258 + ], + "type": "image", + "image_path": "0ecdda00548d3aec475ef7e2e5395252966aa131b202cd34fc6a6ce13f498fb3.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 317, + 259, + 375, + 317 + ], + "blocks": [ + { + "bbox": [ + 317, + 259, + 375, + 317 + ], + "lines": [ + { + "bbox": [ + 317, + 259, + 375, + 317 + ], + "spans": [ + { + "bbox": [ + 317, + 259, + 375, + 317 + ], + "type": "image", + "image_path": "40d3fab3b9d3f33210e869c2778b9abe4e6549645280d60ee8b9fbaa682466a3.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 319, + 364, + 330 + ], + "lines": [ + { + "bbox": [ + 328, + 319, + 364, + 330 + ], + "spans": [ + { + "bbox": [ + 328, + 319, + 364, + 330 + ], + "type": "text", + "content": "(d) Slice" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 334, + 504, + 395 + ], + "lines": [ + { + "bbox": [ + 104, + 334, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 504, + 395 + ], + "type": "text", + "content": "Figure 6: 3D density function sampling. (a) The density functions in different slices of the same model, namely the 56th, 64th, 80th and 88th. (b)-(f) The samples obtained by different sampling methods. (b) Rejection sampling. (c) Metropolis-Hastings (MH) algorithm Bishop (2006). (d) Slice sampling Neal (2003). (e) The sampling results by mapping the random samples from the uniform distribution back to the desired distribution with " + }, + { + "bbox": [ + 104, + 334, + 504, + 395 + ], + "type": "inline_equation", + "content": "T^{-1}" + }, + { + "bbox": [ + 104, + 334, + 504, + 395 + ], + "type": "text", + "content": ". (f) The sampling results by mapping the grid centers back with " + }, + { + "bbox": [ + 104, + 334, + 504, + 395 + ], + "type": "inline_equation", + "content": "T^{-1}" + }, + { + "bbox": [ + 104, + 334, + 504, + 395 + ], + "type": "text", + "content": ". The scores of the top right give the results of the Chi-square goodness-of-fit test. Smaller means better. Zoom in for better visualization." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 386, + 80, + 443, + 137 + ], + "blocks": [ + { + "bbox": [ + 386, + 80, + 443, + 137 + ], + "lines": [ + { + "bbox": [ + 386, + 80, + 443, + 137 + ], + "spans": [ + { + "bbox": [ + 386, + 80, + 443, + 137 + ], + "type": "image", + "image_path": "a44e65cf4720fb3a9025753064e926dfdb8bde3b86e5bc3e5cf471a43408922e.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 386, + 140, + 443, + 198 + ], + "blocks": [ + { + "bbox": [ + 386, + 140, + 443, + 198 + ], + "lines": [ + { + "bbox": [ + 386, + 140, + 443, + 198 + ], + "spans": [ + { + "bbox": [ + 386, + 140, + 443, + 198 + ], + "type": "image", + "image_path": "15e043fbbd4253c3678c4a6c8987915513b324058b4a6013c68462e6053f8c4b.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 386, + 199, + 442, + 257 + ], + "blocks": [ + { + "bbox": [ + 386, + 199, + 442, + 257 + ], + "lines": [ + { + "bbox": [ + 386, + 199, + 442, + 257 + ], + "spans": [ + { + "bbox": [ + 386, + 199, + 442, + 257 + ], + "type": "image", + "image_path": "569538f217b565d540365f94cdc03f275139cbdb91aa6587279a13dbd86cbd41.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 386, + 259, + 442, + 316 + ], + "blocks": [ + { + "bbox": [ + 386, + 259, + 442, + 316 + ], + "lines": [ + { + "bbox": [ + 386, + 259, + 442, + 316 + ], + "spans": [ + { + "bbox": [ + 386, + 259, + 442, + 316 + ], + "type": "image", + "image_path": "2dbd3cd34ac41eb59e083df05340237d5623a52dac6d916612bb68fbf4b503eb.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 386, + 319, + 441, + 329 + ], + "lines": [ + { + "bbox": [ + 386, + 319, + 441, + 329 + ], + "spans": [ + { + "bbox": [ + 386, + 319, + 441, + 329 + ], + "type": "text", + "content": "(e) Ours-rand" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 454, + 80, + 511, + 137 + ], + "blocks": [ + { + "bbox": [ + 454, + 80, + 511, + 137 + ], + "lines": [ + { + "bbox": [ + 454, + 80, + 511, + 137 + ], + "spans": [ + { + "bbox": [ + 454, + 80, + 511, + 137 + ], + "type": "image", + "image_path": "8edc0a8c659ae779ef7604f2c644f9c4736a8e2a02872c6fb5ab0ca4a2a395f6.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 454, + 140, + 511, + 198 + ], + "blocks": [ + { + "bbox": [ + 454, + 140, + 511, + 198 + ], + "lines": [ + { + "bbox": [ + 454, + 140, + 511, + 198 + ], + "spans": [ + { + "bbox": [ + 454, + 140, + 511, + 198 + ], + "type": "image", + "image_path": "89522a5ec4fc0c4966fa43f854ea69059752ee3f5a866a35ba53e8a54ef7f3ca.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 454, + 199, + 511, + 257 + ], + "blocks": [ + { + "bbox": [ + 454, + 199, + 511, + 257 + ], + "lines": [ + { + "bbox": [ + 454, + 199, + 511, + 257 + ], + "spans": [ + { + "bbox": [ + 454, + 199, + 511, + 257 + ], + "type": "image", + "image_path": "0ebd275f7cefa65894cf1568baccba33a3509c5fff52d6a3c85a0338be5d4ad8.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 454, + 259, + 511, + 317 + ], + "blocks": [ + { + "bbox": [ + 454, + 259, + 511, + 317 + ], + "lines": [ + { + "bbox": [ + 454, + 259, + 511, + 317 + ], + "spans": [ + { + "bbox": [ + 454, + 259, + 511, + 317 + ], + "type": "image", + "image_path": "f85149e701cb473dacd334bb3fafafc12d8d82164ca006fc6af47bd045e3392c.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 319, + 507, + 330 + ], + "lines": [ + { + "bbox": [ + 454, + 319, + 507, + 330 + ], + "spans": [ + { + "bbox": [ + 454, + 319, + 507, + 330 + ], + "type": "text", + "content": "(f) Ours-grid" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " to the uniform distribution " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\nu" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " defined on " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "[-1,1]^3" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": ", we conduct two groups of experiments: (i) we map the cell centers of the grid " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\{y_k\\}" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "[-1,1]^3" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " back to " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "[-1,1]^3" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " through the inverse OT map " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "T^{-1}(y_k)" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " defined by Eqn. (20); (ii) we randomly sample " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "100k" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " samples " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\{y_k\\}" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " from the Uniform distribution defined in " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "[-1,1]^3" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": ", then map them back to " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "[-1,1]^3" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " through the inverse OT map " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "T^{-1}(y_k)" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": ". In order to keep the consistency with the mirror reflection process in the FFT-OT algorithm, we also reflect the generated samples back to " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": ". To visualize the results of the " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": "th slice, we plot the samples whose " + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 415, + 504, + 495 + ], + "type": "text", + "content": " coordinates satisfy the inequality," + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 223, + 499, + 386, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 499, + 386, + 512 + ], + "spans": [ + { + "bbox": [ + 223, + 499, + 386, + 512 + ], + "type": "interline_equation", + "content": "k / 1 2 8 - 1 / 2 5 6 \\leq z \\leq k / 1 2 8 + 1 / 2 5 6.", + "image_path": "27f3cca23c8b9445e20e370029ea618987d5de0dfaf8e6493fa649880aaef01f.jpg" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 523, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 504, + 624 + ], + "type": "text", + "content": "In Fig. 5 and Fig. 6, we give more sampling results of different slices correspond to the two models used in Fig. 2 in the main paper. Fig. 5 visualize the density function restricted on the 40th, 56th, 72th and 80th slices for different methods of the model displayed in the first row of 2. Fig. 6 visualize the density function restricted on the 56th, 64th, 80th and 88th slices for different methods of the model displayed in the second row of 2. Compared with the classical methods, the both sampling strategies of our method give decent sampling results that fit the prescribed density function well. Moreover, the number of generated samples for different slices of the same 3D model fits the density functions restricted to the corresponding slices well, namely more samples are generated in the brighter regions for different slices." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 635, + 329, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 329, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 329, + 647 + ], + "type": "text", + "content": "D.2 MORE RESULTS ON VOLUMETRIC MAGNIFIER" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "type": "text", + "content": "In this experiment, we magnify the volumetric MRI image of the aneurysm by different amplification factors. In Fig. 7, we show the original aneurysm viewed from difference angles in the first column. The last three columns give the magnified results with different amplification factors from the viewpoints same as those in the first column. We can see that the aneurysm region is successfully magnified by different factors and the rest parts of the volume nearly keeps the same." + } + ] + } + ], + "index": 36 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 236, + 205, + 329 + ], + "blocks": [ + { + "bbox": [ + 112, + 236, + 205, + 329 + ], + "lines": [ + { + "bbox": [ + 112, + 236, + 205, + 329 + ], + "spans": [ + { + "bbox": [ + 112, + 236, + 205, + 329 + ], + "type": "image", + "image_path": "fb7d09d5709a40cfb69539473eaed5d763a441ad2075023fee75460f50533828.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 112, + 331, + 205, + 423 + ], + "blocks": [ + { + "bbox": [ + 112, + 331, + 205, + 423 + ], + "lines": [ + { + "bbox": [ + 112, + 331, + 205, + 423 + ], + "spans": [ + { + "bbox": [ + 112, + 331, + 205, + 423 + ], + "type": "image", + "image_path": "11a8bfcfe495005c1b746312291aa2cded56fd424078f9ac0e844becb48e7b4e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 112, + 426, + 205, + 518 + ], + "blocks": [ + { + "bbox": [ + 112, + 426, + 205, + 518 + ], + "lines": [ + { + "bbox": [ + 112, + 426, + 205, + 518 + ], + "spans": [ + { + "bbox": [ + 112, + 426, + 205, + 518 + ], + "type": "image", + "image_path": "bc21db47976ef52cdf7e67a303c072525938597b4d3b10adab258e8ae0eff02c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 521, + 183, + 532 + ], + "lines": [ + { + "bbox": [ + 135, + 521, + 183, + 532 + ], + "spans": [ + { + "bbox": [ + 135, + 521, + 183, + 532 + ], + "type": "text", + "content": "(a) Original" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 216, + 237, + 308, + 329 + ], + "blocks": [ + { + "bbox": [ + 216, + 237, + 308, + 329 + ], + "lines": [ + { + "bbox": [ + 216, + 237, + 308, + 329 + ], + "spans": [ + { + "bbox": [ + 216, + 237, + 308, + 329 + ], + "type": "image", + "image_path": "e66ecdc0147b920220d6ad7885d69226c479c799bcf72243d8bbd04c7128c3f1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 216, + 331, + 308, + 423 + ], + "blocks": [ + { + "bbox": [ + 216, + 331, + 308, + 423 + ], + "lines": [ + { + "bbox": [ + 216, + 331, + 308, + 423 + ], + "spans": [ + { + "bbox": [ + 216, + 331, + 308, + 423 + ], + "type": "image", + "image_path": "56de3e5a0cf32f1b7bfb23652890c5c3d8b7521e6d37d17261226aba14fa84b6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 216, + 426, + 308, + 518 + ], + "blocks": [ + { + "bbox": [ + 216, + 426, + 308, + 518 + ], + "lines": [ + { + "bbox": [ + 216, + 426, + 308, + 518 + ], + "spans": [ + { + "bbox": [ + 216, + 426, + 308, + 518 + ], + "type": "image", + "image_path": "856064b9ce45d2bc94f593509ee413a02c6a616819f4afd839216a8291a836f0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 521, + 306, + 532 + ], + "lines": [ + { + "bbox": [ + 216, + 521, + 306, + 532 + ], + "spans": [ + { + "bbox": [ + 216, + 521, + 306, + 532 + ], + "type": "text", + "content": "(b) Magnifying ratio 1" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 319, + 237, + 411, + 329 + ], + "blocks": [ + { + "bbox": [ + 319, + 237, + 411, + 329 + ], + "lines": [ + { + "bbox": [ + 319, + 237, + 411, + 329 + ], + "spans": [ + { + "bbox": [ + 319, + 237, + 411, + 329 + ], + "type": "image", + "image_path": "612523e4a7d50159c05d7ceb09aa87af4ce236800deb9243b95993711e37bcb6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 319, + 331, + 411, + 423 + ], + "blocks": [ + { + "bbox": [ + 319, + 331, + 411, + 423 + ], + "lines": [ + { + "bbox": [ + 319, + 331, + 411, + 423 + ], + "spans": [ + { + "bbox": [ + 319, + 331, + 411, + 423 + ], + "type": "image", + "image_path": "891f404debf9f17eba4150df5625178a5feb7d425472183b96528929f320b7ad.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 319, + 426, + 411, + 518 + ], + "blocks": [ + { + "bbox": [ + 319, + 426, + 411, + 518 + ], + "lines": [ + { + "bbox": [ + 319, + 426, + 411, + 518 + ], + "spans": [ + { + "bbox": [ + 319, + 426, + 411, + 518 + ], + "type": "image", + "image_path": "7ade992cc0161eba250766e4f278b57ec9d994c8ed45392476a9053a21254b72.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 521, + 410, + 532 + ], + "lines": [ + { + "bbox": [ + 320, + 521, + 410, + 532 + ], + "spans": [ + { + "bbox": [ + 320, + 521, + 410, + 532 + ], + "type": "text", + "content": "(c) Magnifying ratio 2" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 422, + 237, + 514, + 329 + ], + "blocks": [ + { + "bbox": [ + 422, + 237, + 514, + 329 + ], + "lines": [ + { + "bbox": [ + 422, + 237, + 514, + 329 + ], + "spans": [ + { + "bbox": [ + 422, + 237, + 514, + 329 + ], + "type": "image", + "image_path": "3710da88f2a7dd69ed1eea4fd8ab45ed766d513a4ed1084de222e94bdf4cc8e4.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 422, + 331, + 514, + 423 + ], + "blocks": [ + { + "bbox": [ + 422, + 331, + 514, + 423 + ], + "lines": [ + { + "bbox": [ + 422, + 331, + 514, + 423 + ], + "spans": [ + { + "bbox": [ + 422, + 331, + 514, + 423 + ], + "type": "image", + "image_path": "1152a6ab31b4aec89407cf6eba702c2129f587ffc0f15c6a290b6cd99606045c.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 422, + 426, + 514, + 518 + ], + "blocks": [ + { + "bbox": [ + 422, + 426, + 514, + 518 + ], + "lines": [ + { + "bbox": [ + 422, + 426, + 514, + 518 + ], + "spans": [ + { + "bbox": [ + 422, + 426, + 514, + 518 + ], + "type": "image", + "image_path": "bf5e003805ef42d6c857e8ec10508c0cd76198e6f141390526699155a9ca25dc.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 423, + 521, + 513, + 532 + ], + "lines": [ + { + "bbox": [ + 423, + 521, + 513, + 532 + ], + "spans": [ + { + "bbox": [ + 423, + 521, + 513, + 532 + ], + "type": "text", + "content": "(d) Magnifying ratio 3" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 540, + 504, + 572 + ], + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 572 + ], + "type": "text", + "content": "Figure 7: The volume magnifier of an aneurysm. The first column shows the original volumetric data from different viewpoints, and the last three columns give the magnified data from the same viewpoints of the first column with different magnifying ratios. The yellow circles denote the aneurysm or the ROIs." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_content_list.json b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7bfe841e2837e0bca5ec6c15f2369941223525b9 --- /dev/null +++ b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_content_list.json @@ -0,0 +1,4452 @@ +[ + { + "type": "text", + "text": "WASSERSTEIN AUTO-ENCODEDMDPS", + "text_level": 1, + "bbox": [ + 171, + 99, + 638, + 119 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "FORMAL VERIFICATION OF EFFICIENTLY DISTILLED RL POLICIES WITH MANY-SIDED GUARANTEES", + "bbox": [ + 171, + 119, + 805, + 132 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Florent Delgrange", + "text_level": 1, + "bbox": [ + 181, + 157, + 313, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AI Lab, Vrije Universiteit Brussel (VUB)", + "bbox": [ + 181, + 172, + 457, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Antwerp", + "bbox": [ + 183, + 186, + 333, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "florent.delgrange@ai.vub.ac.be", + "bbox": [ + 183, + 200, + 478, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ann Nowé", + "text_level": 1, + "bbox": [ + 514, + 157, + 591, + 170 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AI Lab, VUB", + "bbox": [ + 516, + 171, + 607, + 185 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Guillermo A. Pérez", + "text_level": 1, + "bbox": [ + 643, + 157, + 782, + 170 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Antwerp", + "bbox": [ + 643, + 171, + 794, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Flanders Make", + "bbox": [ + 643, + 186, + 745, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 250, + 547, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although deep reinforcement learning (DRL) has many success stories, the large-scale deployment of policies learned through these advanced techniques in safety-critical scenarios is hindered by their lack of formal guarantees. Variational Markov Decision Processes (VAE-MDPs) are discrete latent space models that provide a reliable framework for distilling formally verifiable controllers from any RL policy. While the related guarantees address relevant practical aspects such as the satisfaction of performance and safety properties, the VAE approach suffers from several learning flaws (posterior collapse, slow learning speed, poor dynamics estimates), primarily due to the absence of abstraction and representation guarantees to support latent optimization. We introduce the Wasserstein auto-encoded MDP (WAE-MDP), a latent space model that fixes those issues by minimizing a penalized form of the optimal transport between the behaviors of the agent executing the original policy and the distilled policy, for which the formal guarantees apply. Our approach yields bisimulation guarantees while learning the distilled policy, allowing concrete optimization of the abstraction and representation model quality. Our experiments show that, besides distilling policies up to 10 times faster, the latent model quality is indeed better in general. Moreover, we present experiments from a simple time-to-failure verification algorithm on the latent space. The fact that our approach enables such simple verification techniques highlights its applicability.", + "bbox": [ + 228, + 279, + 769, + 544 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 566, + 336, + 580 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reinforcement learning (RL) is emerging as a solution of choice to address challenging real-word scenarios such as epidemic mitigation and prevention strategies (Libin et al., 2020), multi-energy management (Ceusters et al., 2021), or effective canal control (Ren et al., 2021). RL enables learning high performance controllers by introducing general nonlinear function approximators (such as neural networks) to scale with high-dimensional and continuous state-action spaces. This introduction, termed deep-RL, causes the loss of the conventional convergence guarantees of RL (Tsitsiklis, 1994) as well as those obtained in some continuous settings (Nowe, 1994), and hinders their wide roll-out in critical settings. This work enables the formal verification of any such policies, learned by agents interacting with unknown, continuous environments modeled as Markov decision processes (MDPs). Specifically, we learn a discrete representation of the state-action space of the MDP, which yield both a (smaller, explicit) latent space model and a distilled version of the RL policy, that are tractable for model checking (Baier & Katoen, 2008). The latter are supported by bisimulation guarantees: intuitively, the agent behaves similarly in the original and latent models. The strength of our approach is not simply that we verify that the RL agent meets a predefined set of specifications, but rather provide an abstract model on which the user can reason and check any desired agent property.", + "bbox": [ + 169, + 597, + 826, + 806 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Variational MDPs (VAE-MDPs, Delgrange et al. 2022) offer a valuable framework for doing so. The distillation is provided with PAC-verifiable bisimulation bounds guaranteeing that the agent behaves similarly (i) in the original and latent model (abstraction quality); (ii) from all original states embedded to the same discrete state (representation quality). Whilst the bounds offer a confidence metric that enables the verification of performance and safety properties, VAE-MDPs suffer from several learning flaws. First, training a VAE-MDP relies on variational proxies to the bisimulation bounds, meaning there is no learning guarantee on the quality of the latent model via its optimization. Second, variational autoencoders (VAEs) (Kingma & Welling, 2014; Hoffman et al., 2013) are known", + "bbox": [ + 169, + 811, + 828, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "to suffer from posterior collapse (e.g., Alemi et al. 2018) resulting in a deterministic mapping to a unique latent state in VAE-MDPs. Most of the training process focuses on handling this phenomenon and setting up the stage for the concrete distillation and abstraction, finally taking place in a second training phase. This requires extra regularizers, setting up annealing schemes and learning phases, and defining prioritized replay buffers to store transitions. Distillation through VAE-MDPs is thus a meticulous task, requiring a large step budget and tuning many hyperparameters.", + "bbox": [ + 169, + 103, + 826, + 188 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Building upon Wasserstein autoencoders (Tolstikhin et al., 2018) instead of VAEs, we introduce Wasserstein auto-encoded MDPs (WAE-MDPs), which overcome those limitations. Our WAE relies on the optimal transport (OT) from trace distributions resulting from the execution of the RL policy in the real environment to that reconstructed from the latent model operating under the distilled policy. In contrast to VAEs which rely on variational proxies, we derive a novel objective that directly incorporates the bisimulation bounds. Furthermore, while VAEs learn stochastic mappings to the latent space which need be determined or even entirely reconstructed from data at the deployment time to obtain the guarantees, our WAE has no such requirements, and learn all the necessary components to obtain the guarantees during learning, and does not require such post-processing operations.", + "bbox": [ + 169, + 193, + 826, + 321 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Those theoretical claims are reflected in our experiments: policies are distilled up to 10 times faster through WAE- than VAE-MDPs and provide better abstraction quality and performance in general, without the need for setting up annealing schemes and training phases, nor prioritized buffer and extra regularizer. Our distilled policies are able to recover (and sometimes even outperform) the original policy performance, highlighting the representation quality offered by our new framework: the distillation is able to remove some non-robustness of the input RL policy. Finally, we formally verified time-to-failure properties (e.g., Pnueli 1977) to emphasize the applicability of our approach.", + "bbox": [ + 169, + 325, + 826, + 426 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Other Related Work. Complementary works approach safe RL via formal methods (Junges et al., 2016; Alshiekh et al., 2018; Jansen et al., 2020; Simão et al., 2021), aimed at formally ensuring safety during RL, all of which require providing an abstract model of the safety aspects of the environment. They also include the work of Alamdari et al. (2020), applying synthesis and model checking on policies distilled from RL, without quality guarantees. Other frameworks share our goal of verifying deep-RL policies (Bacci & Parker, 2020; Carr et al., 2020) but rely on a known environment model, among other assumptions (e.g., deterministic or discrete environment). Finally, DeepSynth (Hasanbeig et al., 2021) allows learning a formal model from execution traces, with the different purpose of guiding the agent towards sparse and non-Markovian rewards.", + "bbox": [ + 169, + 433, + 828, + 559 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "On the latent space training side, WWAEs (Zhang et al., 2019) reuse OT as latent regularizer discrepancy (in Gaussian closed form), whereas we derive two regularizers involving OT. These two are, in contrast, optimized via the dual formulation of Wasserstein, as in Wasserstein-GANs (Arjovsky et al., 2017). Similarly to $VQ$ -VAEs (van den Oord et al., 2017) and Latent Bernoulli AEs (Fajtl et al., 2020), our latent space model learns discrete spaces via deterministic encoders, but relies on a smooth approximation instead of using the straight-through gradient estimator.", + "bbox": [ + 169, + 565, + 826, + 650 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Works on representation learning for RL (Gelada et al., 2019; Castro et al., 2021; Zhang et al., 2021; Zang et al., 2022) consider bisimulation metrics to optimize the representation quality, and aim at learning (continuous) representations which capture bisimulation, so that two states close in the representation are guaranteed to provide close and relevant information to optimize the performance of the controller. In particular, as in our work, DeepMDPs (Gelada et al., 2019) are learned by optimizing local losses, by assuming a deterministic MDP and without verifiable confidence measurement.", + "bbox": [ + 169, + 656, + 826, + 742 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 BACKGROUND", + "text_level": 1, + "bbox": [ + 171, + 760, + 328, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the following, we write $\\Delta(\\mathcal{X})$ for the set of measures over (complete, separable metric space) $\\mathcal{X}$ .", + "bbox": [ + 169, + 790, + 826, + 806 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Markov decision processes (MDPs) are tuples $\\mathcal{M} = \\langle \\mathcal{S},\\mathcal{A},\\mathbf{P},\\mathcal{R},\\ell ,\\mathbf{AP},s_I\\rangle$ where $\\mathcal{S}$ is a set of states; $\\mathcal{A}$ , a set of actions; $\\mathbf{P}\\colon S\\times \\mathcal{A}\\to \\Delta (\\mathcal{S})$ , a probability transition function that maps the current state and action to a distribution over the next states; $\\mathcal{R}\\colon S\\times \\mathcal{A}\\to \\mathbb{R}$ , a reward function; $\\ell \\colon S\\to 2^{\\mathbf{AP}}$ , a labeling function over a set of atomic propositions $\\mathbf{AP}$ ; and $s_I\\in S$ , the initial state. If $|\\mathcal{A}| = 1$ , $\\mathcal{M}$ is a fully stochastic process called a Markov chain (MC). We write $\\mathcal{M}_s$ for the MDP obtained when replacing the initial state of $\\mathcal{M}$ by $s\\in S$ . An agent interacting in $\\mathcal{M}$ produces trajectories, i.e., sequences of states and actions $\\tau = \\langle s_{0:T},a_{0:T - 1}\\rangle$ where $s_0 = s_I$ and $s_{t + 1}\\sim \\mathbf{P}(\\cdot |s_t,a_t)$ for $t < T$ . The set of infinite trajectories of $\\mathcal{M}$ is Traj. We assume $\\mathbf{AP}$ and", + "bbox": [ + 169, + 811, + 828, + 926 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d65866c58ffac9e4f32077722ee7503d76e7a60919cc34853594c4512a2a5dac.jpg", + "image_caption": [ + "(a) Execution of the latent policy $\\bar{\\pi}$ in the original and latent MDPs, and local losses." + ], + "image_footnote": [], + "bbox": [ + 181, + 102, + 411, + 248 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/da63f93f7e197340d3bcf8dd1aa131ca3c0690159db32c6ffb49a5cb8c36f4cf.jpg", + "image_caption": [ + "(b) Parallel execution of the original RL policy $\\pi$ in the original and latent MDPs, local losses, and steady-state regularizer.", + "Figure 1: Latent flows: arrows represent (stochastic) mappings, the original (resp. latent) state-action space is spread along the blue (resp. green) area, and distances are depicted in red. Distilling $\\pi$ into $\\bar{\\pi}$ via flow (b) by minimizing $\\mathcal{W}_{\\xi_{\\pi}}$ allows closing the gap between flows (a) and (b)." + ], + "image_footnote": [], + "bbox": [ + 455, + 102, + 802, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "labels being respectively one-hot and binary encoded. Given $\\mathsf{T} \\subseteq \\mathbf{AP}$ , we write $s \\models \\mathsf{T}$ if $s$ is labeled with $\\mathsf{T}$ , i.e., $\\ell(s) \\cap \\mathsf{T} \\neq \\emptyset$ , and $s \\models \\neg \\mathsf{T}$ for $s \\models \\mathsf{T}$ . We refer to MDPs with continuous state or action spaces as continuous MDPs. In that case, we assume $\\mathcal{S}$ and $\\mathcal{A}$ are complete separable metric spaces equipped with a Borel $\\sigma$ -algebra, and $\\ell^{-1}(\\mathsf{T})$ is Borel-measurable for any $\\mathsf{T} \\subseteq \\mathbf{AP}$ .", + "bbox": [ + 169, + 362, + 823, + 420 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Policies and stationary distributions. A (memoryless) policy $\\pi \\colon S \\to \\Delta(\\mathcal{A})$ prescribes which action to choose at each step of the interaction. The set of memoryless policies of $\\mathcal{M}$ is $\\Pi$ . The MDP $\\mathcal{M}$ and $\\pi \\in \\Pi$ induce an MC $\\mathcal{M}_{\\pi}$ with unique probability measure $\\mathbb{P}_{\\pi}^{\\mathcal{M}}$ on the Borel $\\sigma$ -algebra over measurable subsets $\\varphi \\subseteq \\text{Traj}$ (Puterman, 1994). We drop the superscript when the context is clear. Define $\\xi_{\\pi}^{t}(s' | s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_{s}}(\\{s_{0:\\infty}, a_{0:\\infty} | s_{t} = s'\\})$ as the distribution giving the probability of being in each state of $\\mathcal{M}_{s}$ after $t$ steps. $B \\subseteq S$ is a bottom strongly connected component (BSCC) of $\\mathcal{M}_{\\pi}$ if (i) $B$ is a maximal subset satisfying $\\xi_{\\pi}^{t}(s' | s) > 0$ for any $s, s' \\in B$ and some $t \\geqslant 0$ , and (ii) $\\mathbb{E}_{a \\sim \\pi(\\cdot|s)} \\mathbf{P}(B | s, a) = 1$ for all $s \\in S$ . The unique stationary distribution of $B$ is $\\xi_{\\pi} \\in \\Delta(B)$ . We write $s, a \\sim \\xi_{\\pi}$ for sampling $s$ from $\\xi_{\\pi}$ then $a$ from $\\pi$ . An MDP $\\mathcal{M}$ is ergodic if for all $\\pi \\in \\Pi$ , the state space of $\\mathcal{M}_{\\pi}$ consists of a unique aperiodic BSCC with $\\xi_{\\pi} = \\lim_{t \\to \\infty} \\xi_{\\pi}^{t}(\\cdot | s)$ for all $s \\in S$ .", + "bbox": [ + 169, + 430, + 826, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Value objectives. Given $\\pi \\in \\Pi$ , the value of a state $s \\in S$ is the expected value of a random variable obtained by running $\\pi$ from $s$ . For a discount factor $\\gamma \\in [0,1]$ , we consider the following objectives. (i) Discounted return: we write $V_{\\pi}(s) = \\mathbb{E}_{\\pi}^{\\mathcal{M}_s}\\left[\\sum_{t=0}^{\\infty} \\gamma^t \\mathcal{R}(s_t, a_t)\\right]$ for the expected discounted rewards accumulated along trajectories. The typical goal of an RL agent is to learn a policy $\\pi^\\star$ that maximizes $V_{\\pi^\\star}(s_I)$ through interactions with the (unknown) MDP; (ii) Reachability: let $\\mathsf{C}, \\mathsf{T} \\subseteq \\mathbf{AP}$ , the (constrained) reachability event is $\\mathsf{CUT} = \\{s_{0:\\infty}, a_{0:\\infty} | \\exists i \\in \\mathbb{N}, \\forall j < i, s_j \\models \\mathsf{C} \\wedge s_i \\models \\mathsf{T}\\} \\subseteq \\mathsf{Traj}$ . We write $V_{\\pi}^{\\varphi}(s) = \\mathbb{E}_{\\pi}^{\\mathcal{M}_s}\\left[\\gamma^{t^\\star} \\mathbf{1}_{\\langle s_{0:\\infty}, a_{0:\\infty} \\rangle \\in \\varphi}\\right]$ for the discounted probability of satisfying $\\varphi = \\mathsf{CUT}$ , where $t^\\star$ is the length of the shortest trajectory prefix that allows satisfying $\\varphi$ . Intuitively, this denotes the discounted return of remaining in a region of the MDP where states are labeled with $\\mathsf{C}$ , until visiting for the first time a goal state labeled with $\\mathsf{T}$ , and the return is the binary reward signal capturing this event. Safety w.r.t. failure states $\\mathsf{C}$ can be expressed as the safety-constrained reachability to a destination $\\mathsf{T}$ through $\\neg \\mathsf{CUT}$ . Notice that $V_{\\pi}^{\\varphi}(s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_s}(\\varphi)$ when $\\gamma = 1$ .", + "bbox": [ + 169, + 584, + 826, + 758 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Latent MDP. Given the original (continuous, possibly unknown) environment model $\\mathcal{M}$ , a latent space model is another (smaller, explicit) MDP $\\overline{\\mathcal{M}} = \\langle \\overline{S}, \\overline{\\mathcal{A}}, \\overline{\\mathbf{P}}, \\overline{\\mathcal{R}}, \\bar{\\ell}, \\mathbf{AP}, \\bar{s}_I \\rangle$ with state-action space linked to the original one via state and action embedding functions: $\\phi \\colon S \\to \\overline{S}$ and $\\psi \\colon \\overline{S} \\times \\overline{A} \\to A$ . We refer to $\\langle \\overline{\\mathcal{M}}, \\phi, \\psi \\rangle$ as a latent space model of $\\mathcal{M}$ and $\\overline{\\mathcal{M}}$ as its latent MDP. Our goal is to learn $\\langle \\overline{\\mathcal{M}}, \\phi, \\psi \\rangle$ by optimizing an equivalence criterion between the two models. We assume that $d_{\\overline{S}}$ is a metric on $\\overline{S}$ , and write $\\overline{\\Pi}$ for the set of policies of $\\overline{\\mathcal{M}}$ and $\\overline{V}_{\\overline{\\pi}}$ for the values of running $\\overline{\\pi} \\in \\overline{\\Pi}$ in $\\overline{\\mathcal{M}}$ . Remark 1 (Latent flow). The latent policy $\\overline{\\pi}$ can be seen as a policy in $\\mathcal{M}$ (cf. Fig. 1a): states passed to $\\overline{\\pi}$ are first embedded with $\\phi$ to the latent space, then the actions produced by $\\overline{\\pi}$ are executed via $\\psi$ in the original environment. Let $s \\in S$ , we write $\\bar{a} \\sim \\overline{\\pi}(\\cdot | s)$ for $\\overline{\\pi}(\\cdot | \\phi(s))$ , then the reward and next state are respectively given by $\\mathcal{R}(s, \\bar{a}) = \\mathcal{R}(s, \\psi(\\phi(s), \\bar{a}))$ and $s' \\sim \\mathbf{P}(\\cdot | s, \\bar{a}) = \\mathbf{P}(\\cdot | s, \\psi(\\phi(s), \\bar{a}))$ .", + "bbox": [ + 169, + 767, + 826, + 922 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Local losses allow quantifying the distance between the original and latent reward/transition functions in the local setting, i.e., under a given state-action distribution $\\xi \\in \\Delta(S \\times \\overline{\\mathcal{A}})$ :", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathcal {R}} ^ {\\xi} = \\underset {s, \\bar {a} \\sim \\xi} {\\mathbb {E}} \\left| \\mathcal {R} (s, \\bar {a}) - \\overline {{\\mathcal {R}}} (\\phi (s), \\bar {a}) \\right|, \\quad L _ {\\mathbf {P}} ^ {\\xi} = \\underset {s, \\bar {a} \\sim \\xi} {\\mathbb {E}} D \\big (\\phi \\mathbf {P} (\\cdot | s, \\bar {a}), \\overline {{\\mathbf {P}}} (\\cdot | \\phi (s), \\bar {a}) \\big)\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 136, + 782, + 162 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\phi \\mathbf{P}(\\cdot \\mid s,\\bar{a})$ is the distribution of drawing $s^\\prime \\sim \\mathbf{P}(\\cdot \\mid s,\\bar{a})$ then embedding $\\bar{s}^{\\prime} = \\phi (s^{\\prime})$ , and $D$ is a discrepancy measure. Fig 1a depicts the losses when states and actions are drawn from a stationary distribution $\\xi_{\\overline{\\pi}}$ resulting from running $\\bar{\\pi}\\in \\overline{\\Pi}$ in $\\mathcal{M}$ . In this work, we focus on the case where $D$ is the Wasserstein distance $W_{d_{\\overline{s}}}$ : given two distributions $P,Q$ over a measurable set $\\mathcal{X}$ equipped with a metric $d$ , $W_{d}$ is the solution of the optimal transport (OT) from $P$ to $Q$ , i.e., the minimum cost of changing $P$ into $Q$ (Villani, 2009): $W_{d}(P,Q) = \\inf_{\\lambda \\in \\Lambda (P,Q)}\\mathbb{E}_{x,y\\sim \\lambda}d(x,y)$ , $\\Lambda (P,Q)$ being the set of all couplings of $P$ and $Q$ . The Kantorovich duality yields $W_{d}(P,Q) = \\sup_{f\\in \\mathcal{F}_{d}}\\mathbb{E}_{x\\sim P}f(x) - \\mathbb{E}_{x\\sim Q}f(y)$ where $\\mathcal{F}_d$ is the set of 1-Lipschitz functions. Local losses are related to a well-established behavioral equivalence between transition systems, called bisimulation.", + "bbox": [ + 169, + 165, + 826, + 294 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Bisimulation. A bisimulation $\\mathcal{B}$ on $\\mathcal{M}$ is a behavioral equivalence between states $s_1, s_2 \\in S$ so that, $s_1 \\mathcal{B} s_2$ iff (i) $\\mathbf{P}(T \\mid s_1, a) = \\mathbf{P}(T \\mid s_2, a)$ , (ii) $\\ell(s_1) = \\ell(s_2)$ , and (iii) $\\mathcal{R}(s_1, a) = \\mathcal{R}(s_2, a)$ for each action $a \\in \\mathcal{A}$ and (Borel measurable) equivalence class $T \\in S / \\mathcal{B}$ . Properties of bisimulation include trajectory and value equivalence (Larsen & Skou, 1989; Givan et al., 2003). Requirements (ii) and (iii) can be respectively relaxed depending on whether we focus only on behaviors formalized through $\\mathbf{AP}$ or rewards. The relation can be extended to compare two MDPs (e.g., $\\mathcal{M}$ and $\\overline{\\mathcal{M}}$ ) by considering the disjoint union of their state space. We denote the largest bisimulation relation by $\\sim$ .", + "bbox": [ + 169, + 301, + 826, + 401 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Characterized by a logical family of functional expressions derived from a logic $\\mathcal{L}$ , bisimulation pseudometrics (Desharnais et al., 2004) generalize the notion of bisimilarity. More specifically, given a policy $\\pi \\in \\Pi$ , we consider a family $\\mathcal{F}$ of real-valued functions parameterized by a discount factor $\\gamma$ and defining the semantics of $\\mathcal{L}$ in $\\mathcal{M}_{\\pi}$ . Such functional expressions allow to formalize discounted properties such as reachability, safety, as well as general $\\omega$ -regular specifications (Chatterjee et al., 2010) and may include rewards as well (Ferns et al., 2014). The pseudometric $\\widetilde{d}_{\\pi}$ is defined as the largest behavioral difference $\\widetilde{d}_{\\pi}(s_1,s_2) = \\sup_{f\\in \\mathcal{F}}|f(s_1) - f(s_2)|$ , and its kernel is bisimilarity: $\\widetilde{d}_{\\pi}(s_1,s_2) = 0$ iff $s_1\\sim s_2$ . In particular, value functions are Lipschitz-continuous w.r.t. $\\widetilde{d}_{\\pi}$ : $|V_{\\pi}^{\\prime}(s_1) - V_{\\pi}^{\\prime}(s_2)|\\leqslant K\\widetilde{d}_{\\pi}(s_1,s_2)$ , where $K$ is $^{1 / (1 - \\gamma)}$ if rewards are included in $\\mathcal{F}$ and 1 otherwise. To ensure the upcoming bisimulation guarantees, we make the following assumptions:", + "bbox": [ + 169, + 406, + 826, + 556 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Assumption 2.1. MDP $\\mathcal{M}$ is ergodic, $\\operatorname{Im}(\\mathcal{R})$ is a bounded space scaled in $[-1/2, 1/2]$ , and the embedding function preserves the labels, i.e., $\\phi(s) = \\bar{s} \\implies \\ell(s) = \\bar{\\ell}(\\bar{s})$ for $s \\in S$ , $\\bar{s} \\in \\bar{S}$ .", + "bbox": [ + 169, + 556, + 823, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that the ergodicity assumption is compliant with episodic RL and a wide range of continuous learning tasks (see Huang 2020; Delgrange et al. 2022 for detailed discussions on this setting).", + "bbox": [ + 169, + 598, + 823, + 628 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Bisimulation bounds (Delgrange et al., 2022). $\\mathcal{M}$ being set over continuous spaces with possibly unknown dynamics, evaluating $\\tilde{d}$ can turn out to be particularly arduous, if not intractable. A solution is to evaluate the original and latent model bisimilarity via local losses: fix $\\bar{\\pi} \\in \\overline{\\Pi}$ , assume $\\overline{\\mathcal{M}}$ is discrete, then given the induced stationary distribution $\\xi_{\\bar{\\pi}}$ in $\\mathcal{M}$ , let $s_1, s_2 \\in S$ with $\\phi(s_1) = \\phi(s_2)$ :", + "bbox": [ + 169, + 636, + 823, + 695 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {s \\sim \\xi_ {\\bar {\\pi}}} {\\mathbb {E}} \\widetilde {d} _ {\\bar {\\pi}} (s, \\phi (s)) \\leqslant \\frac {L _ {\\mathcal {R}} ^ {\\xi_ {\\bar {\\pi}}} + \\gamma L _ {\\mathbf {P}} ^ {\\xi_ {\\bar {\\pi}}}}{1 - \\gamma}, \\quad \\widetilde {d} _ {\\bar {\\pi}} (s _ {1}, s _ {2}) \\leqslant \\left(\\frac {L _ {\\mathcal {R}} ^ {\\xi_ {\\bar {\\pi}}} + \\gamma L _ {\\mathbf {P}} ^ {\\xi_ {\\bar {\\pi}}}}{1 - \\gamma}\\right) \\left(\\xi_ {\\bar {\\pi}} ^ {- 1} (s _ {1}) + \\xi_ {\\bar {\\pi}} ^ {- 1} (s _ {2})\\right). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 696, + 823, + 732 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The two inequalities guarantee respectively the quality of the abstraction and representation: when local losses are small, (i) states and their embedding are bisimilarly close in average, and (ii) all states sharing the same discrete representation are bisimilarly close. The local losses and related bounds can be efficiently PAC-estimated. Our goal is to learn a latent model where the behaviors of the agent executing $\\bar{\\pi}$ can be formally verified, and the bounds offer a confidence metric allowing to lift the guarantees obtained this way back to the original model $\\mathcal{M}$ , when the latter operates under $\\bar{\\pi}$ . We show in the following how to learn a latent space model by optimizing the aforementioned bounds, and distill policies $\\pi \\in \\Pi$ obtained via any RL technique to a latent policy $\\bar{\\pi} \\in \\overline{\\Pi}$ .", + "bbox": [ + 169, + 734, + 826, + 847 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 WASSERSTEIN AUTO-ENCODEDMDPs", + "text_level": 1, + "bbox": [ + 171, + 864, + 529, + 880 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fix $\\overline{\\mathcal{M}}_{\\theta} = \\langle \\overline{S},\\overline{\\mathcal{A}},\\overline{\\mathbf{P}}_{\\theta},\\overline{\\mathcal{R}}_{\\theta},\\bar{\\ell},\\mathbf{AP},\\bar{s}_I\\rangle$ and $\\langle \\overline{\\mathcal{M}}_{\\theta},\\phi_{\\iota},\\psi_{\\theta}\\rangle$ as a latent space model of $\\mathcal{M}$ parameterized by $\\iota$ and $\\theta$ . Our method relies on learning a behavioral model $\\xi_{\\theta}$ of $\\mathcal{M}$ from which we can", + "bbox": [ + 169, + 893, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "retrieve the latent space model and distill $\\pi$ . This can be achieved via the minimization of a suitable discrepancy between $\\xi_{\\theta}$ and $\\mathcal{M}_{\\pi}$ . VAE-MDPs optimize a lower bound on the likelihood of the dynamics of $\\mathcal{M}_{\\pi}$ using the Kullback-Leibler divergence, yielding (i) $\\overline{\\mathcal{M}}_{\\theta}$ , (ii) a distillation $\\bar{\\pi}_{\\theta}$ of $\\pi$ , and (iii) $\\phi_{\\iota}$ and $\\psi_{\\theta}$ . Local losses are not directly minimized, but rather variational proxies that do not offer theoretical guarantees during the learning process. To control the local losses minimization and exploit their theoretical guarantees, we present a novel autoencoder that incorporates them in its objective, derived from the OT. Proofs of the claims made in this Section are provided in Appendix A.", + "bbox": [ + 169, + 103, + 826, + 203 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 THE OBJECTIVE FUNCTION", + "text_level": 1, + "bbox": [ + 171, + 218, + 406, + 232 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Assume that $S$ , $\\mathcal{A}$ , and $\\operatorname{Im}(\\mathcal{R})$ are respectively equipped with metrics $d_{\\mathcal{S}}$ , $d_{\\mathcal{A}}$ , and $d_{\\mathcal{R}}$ , we define the raw transition distance metric $\\vec{d}$ as the component-wise sum of distances between states, actions, and rewards occurring of along transitions: $\\vec{d}(\\langle s_1, a_1, r_1, s_1' \\rangle, \\langle s_2, a_2, r_2, s_2' \\rangle) = d_{\\mathcal{S}}(s_1, s_2) + d_{\\mathcal{A}}(a_1, a_2) + d_{\\mathcal{R}}(r_1, r_2) + d_{\\mathcal{S}}(s_1', s_2')$ . Given Assumption 2.1, we consider the OT between local distributions, where traces are drawn from episodic RL processes or infinite interactions (we show in Appendix A.1 that considering the OT between trace-based distributions in the limit amounts to reasoning about stationary distributions). Our goal is to minimize $W_{\\vec{d}}(\\xi_{\\pi}, \\xi_{\\theta})$ so that", + "bbox": [ + 169, + 243, + 826, + 349 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\xi_ {\\theta} (s, a, r, s ^ {\\prime}) = \\int_ {\\bar {S} \\times \\bar {A} \\times \\bar {S}} P _ {\\theta} (s, a, r, s ^ {\\prime} \\mid \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) d \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}} (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 354, + 825, + 388 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $P_{\\theta}$ is a transition decoder and $\\bar{\\xi}_{\\overline{\\pi}_{\\theta}}$ denotes the stationary distribution of the latent model $\\overline{\\mathcal{M}}_{\\theta}$ . As proved by Bousquet et al. (2017), this model allows to derive a simpler form of the OT: instead of finding the optimal coupling of (i) the stationary distribution $\\xi_{\\pi}$ of $\\mathcal{M}_{\\pi}$ and (ii) the behavioral model $\\xi_{\\theta}$ , in the primal definition of $W_{\\vec{d}}(\\xi_{\\pi},\\xi_{\\theta})$ , it is sufficient to find an encoder $q$ whose marginal is given by $Q(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a,s^{\\prime}\\sim \\xi_{\\pi}}q(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\mid s,a,s^{\\prime})$ and identical to $\\xi_{\\pi}$ . This is summarized in the following Theorem, yielding a particular case of Wasserstein-autoencoder Tolstikhin et al. (2018):", + "bbox": [ + 169, + 393, + 823, + 481 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theorem 3.1. Let $\\xi_{\\theta}$ and $P_{\\theta}$ be respectively a behavioral model and transition decoder as defined in Eq. 2, $\\mathcal{G}_{\\theta} \\colon \\overline{\\mathcal{S}} \\to \\mathcal{S}$ be a state-wise decoder, and $\\psi_{\\theta}$ be an action embedding function. Assume $P_{\\theta}$ is deterministic with Dirac function $G_{\\theta}(\\bar{s}, \\bar{a}, \\bar{s}') = \\langle \\mathcal{G}_{\\theta}(\\bar{s}), \\psi_{\\theta}(\\bar{s}, \\bar{a}), \\overline{\\mathcal{R}}_{\\theta}(\\bar{s}, \\bar{a}), \\mathcal{G}_{\\theta}(\\bar{s}') \\rangle$ , then", + "bbox": [ + 169, + 484, + 823, + 527 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nW_{\\vec{d}}(\\xi_{\\pi},\\xi_{\\theta}) = \\inf_{q:Q = \\bar{\\xi}_{\\pi_{\\theta}}}\\mathbb{E}_{\\substack{s,a,r,s^{\\prime}\\sim \\xi_{\\pi}\\\\ \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim q(\\cdot |s,a,s^{\\prime})}}\\mathbb{E}_{\\substack{\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim q(\\cdot |s,a,s^{\\prime})}}\\vec{d}\\bigl(\\bigl\\langle s,a,r,s^{\\prime}\\bigr\\rangle ,G_{\\theta}\\bigl(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\bigr)\\bigr).\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 535, + 756, + 564 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Henceforth, fix $\\phi_{\\iota} \\colon S \\to \\bar{S}$ and $\\phi_{\\iota}^{\\mathcal{A}} \\colon \\bar{S} \\times \\mathcal{A} \\to \\Delta(\\overline{\\mathcal{A}})$ as parameterized state and action encoders with $\\phi_{\\iota}(\\bar{s}, \\bar{a}, \\bar{s}' \\mid s, a, s') = \\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}} \\cdot \\phi_{\\iota}^{\\mathcal{A}}(\\bar{a} \\mid \\bar{s}, a) \\cdot \\mathbf{1}_{\\phi_{\\iota}(s')} = \\bar{s}'$ , and define the marginal encoder as $Q_{\\iota} = \\mathbb{E}_{s, a, s' \\sim \\xi_{\\pi}} \\phi_{\\iota}(\\cdot \\mid s, a, s')$ . Training the model components can be achieved via the objective:", + "bbox": [ + 169, + 578, + 823, + 626 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min_{\\iota ,\\theta}\\underset {s,a,r,s^{\\prime}\\sim \\xi_{\\pi}}{\\mathbb{E}}\\underset {\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim \\phi_{\\iota}(\\cdot |s,a,s^{\\prime})}{\\mathbb{E}} \\vec{d}\\bigl(\\bigl\\langle s, a,r,s^{\\prime}\\bigr\\rangle ,G_{\\theta}\\bigl(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\bigr)\\bigr) + \\beta \\cdot D\\bigl(Q_{\\iota},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}\\bigr),\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 632, + 759, + 660 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $D$ is an arbitrary discrepancy metric and $\\beta > 0$ a hyperparameter. Intuitively, the encoder $\\phi_{\\iota}$ can be learned by enforcing its marginal distribution $Q_{\\iota}$ to match $\\bar{\\xi}_{\\overline{\\pi}_{\\theta}}$ through this discrepancy.", + "bbox": [ + 169, + 665, + 823, + 694 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Remark 2. If $\\mathcal{M}$ has a discrete action space, then learning $\\overline{\\mathcal{A}}$ is not necessary. We can set $\\overline{\\mathcal{A}} = \\mathcal{A}$ using identity functions for the action encoder and decoder (details in Appendix A.2).", + "bbox": [ + 169, + 696, + 823, + 726 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When $\\pi$ is executed in $\\mathcal{M}$ , observe that its parallel execution in $\\overline{\\mathcal{M}}_{\\theta}$ is enabled by the action encoder $\\phi_{\\iota}^{A}$ : given an original state $s \\in S$ , $\\pi$ first prescribes the action $a \\sim \\pi(\\cdot \\mid s)$ , which is then embedded in the latent space via $\\bar{a} \\sim \\phi_{\\iota}^{A}(\\cdot \\mid \\phi_{\\iota}(s), a)$ (cf. Fig. 1b). This parallel execution, along with setting $D$ to $W_{\\vec{d}}$ , yield an upper bound on the latent regularization, compliant with the bisimulation bounds. A two-fold regularizer is obtained thereby, defining the foundations of our objective function:", + "bbox": [ + 169, + 734, + 826, + 806 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Lemma 3.2. Define $\\mathcal{T}(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a\\sim \\xi_{\\pi}}[\\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)\\cdot \\overline{\\mathbf{P}}_{\\theta}(\\bar{s}^{\\prime}\\mid \\bar{s},\\bar{a})]$ as the distribution of drawing state-action pairs from interacting with $\\mathcal{M}$ , embedding them to the latent spaces, and finally letting them transition to their successor state in $\\overline{\\mathcal{M}}_{\\theta}$ . Then, $W_{\\vec{d}}(Q_{\\iota},\\bar{\\xi}_{\\overline{\\pi}_{\\theta}})\\leqslant W_{\\vec{d}}(\\bar{\\xi}_{\\overline{\\pi}_{\\theta}},\\mathcal{T}) + L_{\\mathbf{P}}^{\\xi_{\\pi}}$", + "bbox": [ + 169, + 809, + 828, + 858 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We therefore define the $\\mathrm{W}^2\\mathrm{AE}$ -MDP (Wasserstein-Wasserstein auto-encoded MDP) objective as:", + "bbox": [ + 169, + 867, + 803, + 882 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min_{\\substack{\\iota ,\\theta \\\\ \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim \\phi_{\\iota}(\\cdot |s,a,s^{\\prime})}}\\mathbb{E}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\bar{s})) + d_{\\mathcal{A}}(a,\\psi_{\\theta}(\\bar{s},\\bar{a})) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\bar{s}^{\\prime}\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\pi}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\pi}} + L_{\\mathbf{P}}^{\\xi_{\\pi}}),\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 888, + 818, + 928 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Wasserstein $^2$ Auto-Encoded MDP" + ], + "code_body": "Input: batch size $N$ , max. step $T$ , no. of regularizer updates $m$ , penalty coefficient $\\delta > 0$ for $t = 1$ to $T$ do \nfor $i = 1$ to $N$ do \nSample a transition $s_i, a_i, r_i, s_i'$ from the original environment via $\\xi_{\\pi}$ \nEmbed the transition into the latent space by drawing $\\bar{s}_i, \\bar{a}_i, \\bar{s}_i'$ from $\\phi_\\iota(\\cdot \\mid s_i, a_i, s_i')$ \nMake the latent space model transition to the next latent state: $\\bar{s}_i^\\star \\sim \\overline{\\mathbf{P}}_\\theta(\\cdot \\mid \\bar{s}_i, \\bar{a}_i)$ \nSample a latent transition from $\\bar{\\xi}_{\\overline{\\pi}_\\theta} \\colon z_i \\sim \\bar{\\xi}_{\\overline{\\pi}_\\theta}, \\bar{a}_i' \\sim \\overline{\\pi}_\\theta(\\cdot \\mid z_i)$ , and $z_i' \\sim \\overline{\\mathbf{P}}_\\theta(\\cdot \\mid z_i, \\bar{a}_i')$ $\\mathcal{W} \\gets \\sum_{i=1}^{N} \\varphi_\\omega^\\xi(\\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star) - \\varphi_\\omega^\\xi(z_i, \\bar{a}_i', z_i') + \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, \\bar{s}_i') - \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star)$ $P \\gets \\sum_{i=1}^{N} \\mathrm{GP}\\big(\\varphi_\\omega^\\xi, \\langle \\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star \\rangle, \\langle z_i, \\bar{a}_i', z_i' \\rangle\\big) + \\mathrm{GP}\\big(x \\mapsto \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, x), \\bar{s}_i', \\bar{s}_i^\\star\\big)$ \nUpdate the Lipschitz networks parameters $\\omega$ by ascending $1/N \\cdot (\\beta \\mathcal{W} - \\delta P)$ \nif $t$ mod $m = 0$ then \n $\\mathcal{L} \\gets \\sum_{i=1}^{N} d_{\\mathcal{S}}(s_i, \\mathcal{G}_{\\theta}(\\bar{s}_i)) + d_{\\mathcal{A}}(a_i, \\psi_{\\theta}(\\bar{s}_i, \\bar{a}_i)) + d_{\\mathcal{R}}(r_i, \\overline{\\mathcal{R}}_{\\theta}(\\bar{s}_i, \\bar{a}_i)) + d_{\\mathcal{S}}(s_i', \\mathcal{G}_{\\theta}(\\bar{s}_i'))$ \nUpdate the latent space model parameters $\\langle \\iota, \\theta\\rangle$ by descending $1/N \\cdot (\\mathcal{L} + \\beta \\mathcal{W})$ \nfunction $\\mathrm{GP}(\\varphi_\\omega, x, y)$ $\\epsilon \\sim U(0,1)$ ; $\\tilde{x} \\gets \\epsilon x + (1 - \\epsilon)y$ \nreturn ( $\\|\\nabla_{\\tilde{x}}\\varphi_{\\omega}(\\tilde{x})\\| - 1)^2$", + "bbox": [ + 173, + 125, + 799, + 393 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathcal{W}_{\\xi_{\\pi}} = W_{\\vec{d}}\\big(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}\\big)$ and $L_{\\mathbf{P}}^{\\xi_{\\pi}}$ are respectively called steady-state and transition regularizers. The former allows to quantify the distance between the stationary distributions respectively induced by $\\pi$ in $\\mathcal{M}$ and $\\bar{\\pi}_{\\theta}$ in $\\overline{\\mathcal{M}}_{\\theta}$ , further enabling the distillation. The latter allows to learn the latent dynamics. Note that $L_{\\mathcal{R}}^{\\xi_{\\pi}}$ and $L_{\\mathbf{P}}^{\\xi_{\\pi}}$ — set over $\\xi_{\\pi}$ instead of $\\xi_{\\bar{\\pi}_{\\theta}}$ — are not sufficient to ensure the bisimulation bounds (Eq. 1): running $\\pi$ in $\\overline{\\mathcal{M}}_{\\theta}$ depends on the parallel execution of $\\pi$ in the original model, which does not permit its (conventional) verification. Breaking this dependency is enabled by learning the distillation $\\bar{\\pi}_{\\theta}$ through $\\mathcal{W}_{\\xi_{\\pi}}$ , as shown in Fig. 1b: minimizing $\\mathcal{W}_{\\xi_{\\pi}}$ allows to make $\\xi_{\\pi}$ and $\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}$ closer together, further bridging the gap of the discrepancy between $\\pi$ and $\\bar{\\pi}_{\\theta}$ . At any time, recovering the local losses along with the linked bisimulation bounds in the objective function of the $\\mathrm{W}^{2}\\mathrm{AE}$ -MDP is allowed by considering the latent policy resulting from this distillation:", + "bbox": [ + 169, + 422, + 826, + 570 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3.3. Assume that traces are generated by running a latent policy $\\bar{\\pi} \\in \\overline{\\Pi}$ in the original environment and let $d_{\\mathcal{R}}$ be the usual Euclidean distance, then the $W^{2}$ AE-MDP objective is", + "bbox": [ + 169, + 573, + 823, + 603 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\min_{\\iota ,\\theta}\\mathbb{E}_{s,s^{\\prime}\\sim \\xi_{\\overline{\\pi}}}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\phi_{\\iota}(s))) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\phi_{\\iota}\\big(s^{\\prime}\\big)\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\overline{\\pi}}} + L_{\\mathbf{P}}^{\\xi_{\\overline{\\pi}}}).\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 609, + 764, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Optimizing the regularizers is enabled by the dual form of the OT: we introduce two parameterized networks, $\\varphi_{\\omega}^{\\xi}$ and $\\varphi_{\\omega}^{\\mathbf{P}}$ , constrained to be 1-Lipschitz and trained to attain the supremum of the dual:", + "bbox": [ + 169, + 656, + 823, + 686 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {W} _ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega} \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\star} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\xi} (\\phi_ {\\iota} (s), \\bar {a}, \\bar {s} ^ {\\star}) - \\underset {z, \\bar {a} ^ {\\prime}, z ^ {\\prime} \\sim \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\xi} (z, \\bar {a} ^ {\\prime}, z ^ {\\prime})\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 691, + 813, + 720 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi} \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} (\\cdot | s, a, s ^ {\\prime})} {\\mathbb {E}} \\underset {s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}} {\\mathbb {E}} \\left[ \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\star} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\star}) \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 729, + 797, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Details to derive this tractable form of $L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)$ are in Appendix A.5. The networks are constrained via the gradient penalty approach of Gulrajani et al. (2017), leveraging that any differentiable function is 1-Lipschitz iff it has gradients with norm at most 1 everywhere (we show in Appendix A.6 this is still valid for relaxations of discrete spaces). The final learning process is presented in Algorithm 1.", + "bbox": [ + 169, + 768, + 823, + 825 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 DISCRETE LATENT SPACES", + "text_level": 1, + "bbox": [ + 171, + 842, + 403, + 856 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To enable the verification of latent models supported by the bisimulation guarantees of Eq. 1, we focus on the special case of discrete latent space models. Our approach relies on continuous relaxation of discrete random variables, regulated by some temperature parameter(s) $\\lambda$ : discrete random variables are retrieved as $\\lambda \\rightarrow 0$ , which amounts to applying a rounding operator. For training, we use the", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ba6acc59c6dd787cf773da4990450ac8c3811bb05c69258cb6c36e0405fa4212.jpg", + "image_caption": [ + "Figure 2: W $^2$ AE-MDP architecture. Distances are depicted by red dotted lines." + ], + "image_footnote": [], + "bbox": [ + 173, + 98, + 828, + 340 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "temperature-controlled relaxations to differentiate the objective and let the gradient flow through the network. When we deploy the latent policy in the environment and formally check the latent model, the zero-temperature limit is used. An overview of the approach is depicted in Fig. 2.", + "bbox": [ + 169, + 391, + 826, + 436 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "State encoder. We work with a binary representation of the latent states. First, this induces compact networks, able to deal with a large discrete space via a tractable number of parameter variables. But most importantly, this ensures that Assumption 2.1 is satisfied: let $n = \\log_2|\\bar{S}|$ , we reserve $|\\mathbf{AP}|$ bits in $\\bar{S}$ and each time $s\\in S$ is passed to $\\phi_{\\iota}$ , $n - |\\mathbf{AP}|$ bits are produced and concatenated with $\\ell (s)$ , ensuring a perfect reconstruction of the labels and further bisimulation bounds. To produce Bernoulli variables, $\\phi_{\\iota}$ deterministically maps $s$ to a latent code $z$ , passed to the Heaviside $H(z) = \\mathbf{1}_{z > 0}$ . We train $\\phi_{\\iota}$ by using the smooth approximation $H_{\\lambda}(z) = \\sigma (^{2}z / \\lambda)$ , satisfying $H = \\lim_{\\lambda \\to 0}H_{\\lambda}$ .", + "bbox": [ + 169, + 444, + 826, + 546 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Latent distributions. Besides the discontinuity of their latent image space, a major challenge of optimizing over discrete distributions is sampling, required to be a differentiable operation. We circumvent this by using concrete distributions (Jang et al., 2017; Maddison et al., 2017): the idea is to sample reparameterizable random variables from $\\lambda$ -parameterized distributions, and applying a differentiable, nonlinear operator in downstream. We use the Gumbel softmax trick to sample from distributions over (one-hot encoded) latent actions $(\\phi_{\\iota}^{A}, \\bar{\\pi}_{\\theta})$ . For binary distributions $(\\overline{\\mathbf{P}}_{\\theta}, \\bar{\\xi}_{\\bar{\\pi}_{\\theta}})$ , each relaxed Bernoulli with logit $\\alpha$ is retrieved by drawing a logistic random variable located in $\\alpha/\\lambda$ and scaled to $1/\\lambda$ , then applying a sigmoid in downstream. We emphasize that this trick alone (as used by Corneil et al. 2018; Delgrange et al. 2022) is not sufficient: it yields independent Bernoullis, being too restrictive in general, which prevents from learning sound transition dynamics (cf. Example 1).", + "bbox": [ + 169, + 554, + 828, + 696 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Example 1. Let $\\overline{\\mathcal{M}}$ be the discrete MC of Fig. 3. In one-hot, $\\mathbf{AP} = \\{\\text{goal}: \\langle 1,0\\rangle$ , unsafe: $\\langle 0,1\\rangle\\}$ . We assume that 3 bits are used for the (binary) state space, with $\\bar{S} = \\{\\bar{s}_0:\\langle 0,0,0\\rangle,\\bar{s}_1:\\langle 1,0,0\\rangle,\\bar{s}_2:\\langle 0,1,0\\rangle,\\bar{s}_3:\\langle 0,1,1\\rangle\\}$ (the two first bits are reserved for the labels). Considering each bit as being independent is not sufficient to learn $\\overline{\\mathbf{P}}$ : the optimal estimation $\\overline{\\mathbf{P}}_{\\theta^*}(\\cdot \\mid \\bar{s}_0)$ is in that case represented by the independent Bernoulli vector $\\mathbf{b} = \\langle 1 / 2,1 / 2,1 / 4\\rangle$ , giving the probability to go from $\\bar{s}_0$ to each bit independently. This yields a poor estimation of", + "bbox": [ + 169, + 710, + 553, + 851 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0629839b821124de73e3d0a3e4a307a3344303d23062ef901fc0208c05f77eb4.jpg", + "image_caption": [ + "Figure 3: Markov Chain with four states; labels are drawn next to their state." + ], + "image_footnote": [], + "bbox": [ + 575, + 714, + 826, + 790 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the actual transition function: $\\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_0\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot (1 - \\mathbf{b}_2)\\cdot (1 - \\mathbf{b}_3) = \\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_1\\mid \\bar{s}_0) = \\mathbf{b}_1\\cdot (1 - \\mathbf{b}_2)\\cdot (1 - \\mathbf{b}_3) = \\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_2\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot \\mathbf{b}_2\\cdot (1 - \\mathbf{b}_3) = 3 / 16,\\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_3\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot \\mathbf{b}_2\\cdot \\mathbf{b}_3 = 1 / 16.$", + "bbox": [ + 169, + 851, + 828, + 883 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We consider instead relaxed multivariate Bernoulli distributions by decomposing $P \\in \\Delta(\\bar{S})$ as a product of conditionals: $P(\\bar{s}) = \\prod_{i=1}^{n} P(\\bar{s}_i \\mid \\bar{s}_{1:i-1})$ where $\\bar{s}_i$ is the $i^{\\text{th}}$ entry (bit) of $\\bar{s}$ . We learn", + "bbox": [ + 169, + 892, + 826, + 926 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9c55334604e272a24a2d2c459da443d86f0342e1fd7a55b89480a32c1fca213f.jpg", + "image_caption": [ + "(a) $\\mathrm{W}^2\\mathrm{AE}$ -MDP objective: reconstruction loss, transition and steady-state regularizers" + ], + "image_footnote": [], + "bbox": [ + 183, + 104, + 300, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/714939f162e90922feac0a1018e369dcfa27d5f6eec47ad4de6e24d64c45e0e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 303, + 104, + 415, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/08bc6b25f957e23bb23c7289fe848fcfe54d1ff27c81e02320a9067f2e82e8aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 416, + 104, + 529, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7b54af92e28e4b018ff7fd0a910a2c83b0aa1aa63bb9634874cc3e34a1196259.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 534, + 104, + 640, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2b18f0b54e58bdbb50c1f701e27bce3cfbfb89e25b9addfa9341989094d23ab1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 642, + 104, + 818, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/536af967a77909eae3bf9ace5be851abc0cff870cd3527cf2c4253a3e2289f5e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 186, + 200, + 310, + 263 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0cc522ef4cb964743c61a30daf501ba6b5148d2149b2839dd93fc7ddb89629f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 313, + 200, + 419, + 263 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2e1876c67cc627048c46904e159bf1f7220478afda0433f6a1e357849f6ebeb0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 200, + 529, + 263 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e2eac6b2937052e31cf28f322688c4dca02183fb5ac7b7271aeb9a4b06321bb7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 534, + 200, + 643, + 263 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a8c766d4827e2b8abc267f67b11b833d2e53501b4c4cb0974dc12cdd91d97cb1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 200, + 818, + 263 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/df8a04275e496f0fbac36084da0e6177998fbf260116ab4544543c0c9aeea6d5.jpg", + "image_caption": [ + "(b) PAC local losses approximation for an error of at most $10^{-2}$ and probability confidence 0.955" + ], + "image_footnote": [], + "bbox": [ + 183, + 294, + 295, + 359 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/73c72890fd47c7601ae57c2406a94476c8475834315122aa719fdcd53a86acf3.jpg", + "image_caption": [ + "(c) Episode return obtained when executing the distilled policy in the original MDP (averaged over 30 episodes)" + ], + "image_footnote": [], + "bbox": [ + 302, + 294, + 410, + 359 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/424acf2d704a79f4faa63a8ad3b3f9a19381996e84a5600f23ce499f220ce9e1.jpg", + "image_caption": [ + "Figure 4: For each environment, we trained five different instances of the models with different random seeds: the solid line is the median and the shaded interval the interquartile range." + ], + "image_footnote": [], + "bbox": [ + 416, + 294, + 524, + 359 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1952d42234fa0463371b4de55b130d80d41790bdf5356204e2292de79b8ec308.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 531, + 294, + 640, + 359 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1ed507649916381414d2551a56cfda6d2a52c21abecffdfc6917fc5cf6f73be0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 294, + 818, + 358 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "such distributions by introducing a masked autoregressive flow (MAF, Papamakarios et al. 2017) for relaxed Bernoullis via the recursion: $\\bar{s}_i = \\sigma(l_i + \\alpha_i / \\lambda)$ , where $l_i \\sim \\mathrm{Logistic}(0,1)$ , $\\alpha_i = f_i(\\bar{s}_{1:i-1})$ , and $f$ is a MADE (Germain et al., 2015), a feedforward network implementing the conditional output dependency on the inputs via a mask that only keeps the necessary connections to enforce the conditional property. We use this MAF to model $\\overline{\\mathbf{P}}_\\theta$ and the dynamics related to the labels in $\\bar{\\xi}_{\\overline{\\pi}_\\theta}$ . We fix the logits of the remaining $n - |\\mathbf{AP}|$ bits to 0 to allow for a fairly distributed latent space.", + "bbox": [ + 169, + 449, + 826, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 554, + 328, + 569 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We evaluate the quality of latent space models learned and policies distilled through $\\mathrm{W}^2\\mathrm{AE}$ -MDPs. To do so, we first trained deep-RL policies (DQN, Mnih et al. 2015 on discrete, and SAC, Haarnoja et al. 2018 on continuous action spaces) for various OpenAI benchmarks (Brockman et al., 2016), which we then distill via our approach (Figure 4). We thus evaluate (a) the $\\mathrm{W}^2\\mathrm{AE}$ -MDP training metrics, (b) the abstraction and representation quality via PAC local losses upper bounds (Delgrange et al., 2022), and (c) the distilled policy performance when deployed in the original environment. The confidence metrics and performance are compared with those of VAE-MDPs. Finally, we formally verify properties in the latent model. The exact setting to reproduce our results is in Appendix B.", + "bbox": [ + 169, + 585, + 826, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Learning metrics. The objective (Fig. 4a) is a weighted sum of the reconstruction loss and the two Wasserstein regularizers. The choice of $\\beta$ defines the optimization direction. In contrast to VAEs (cf. Appendix C), WAEs indeed naturally avoid posterior collapse (Tolstikhin et al., 2018), indicating that the latent space is consistently distributed. Optimizing the objective (Fig. 4a) effectively allows minimizing the local losses (Fig. 4b) and recovering the performance of the original policy (Fig. 4c).", + "bbox": [ + 169, + 707, + 826, + 780 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Local losses. For V- and WAEs, we formally evaluate PAC upper bounds on $L_{\\mathcal{R}}^{\\xi \\bar{\\pi}_{\\theta}}$ and $L_{\\mathbf{P}}^{\\xi \\bar{\\pi}_{\\theta}}$ via the algorithm of Delgrange et al. (2022) (Fig 4b). The lower the local losses, the closer $\\mathcal{M}$ and $\\overline{\\mathcal{M}}_{\\theta}$ are in terms of behaviors induced by $\\bar{\\pi}_{\\theta}$ (cf. Eq. 1). In VAEs, the losses are evaluated on a transition function $\\hat{\\mathbf{P}}$ obtained via frequency estimation of the latent transition dynamics (Delgrange et al., 2022), by reconstructing the transition model a posteriori and collecting data to estimate the transition probabilities (e.g., Bazille et al. 2020; Corneil et al. 2018). We thus also report the metrics for $\\hat{\\mathbf{P}}$ . Our bounds quickly converge to close values in general for $\\overline{\\mathbf{P}}_{\\theta}$ and $\\hat{\\mathbf{P}}$ , whereas for VAEs, the convergence is slow and unstable, with $\\hat{\\mathbf{P}}$ offering better bounds. We emphasize that WAEs do not require this additional reconstruction step to obtain losses that can be leveraged to assess the", + "bbox": [ + 168, + 790, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/37042cca8921ef3c7dfcc02425615c4885de29a516230f68998f39a729e87a6e.jpg", + "table_caption": [ + "Table 1: Formal Verification of distilled policies. Values are computed for $\\gamma = {0.99}$ (lower is better)." + ], + "table_footnote": [], + "table_body": "
Environmentstep (105)SA|S||A|LξπθR(PAC)LξπθP(PAC)||Vπθ||V̅πθ(¯sI)
CartPole1.2⊆ R4{1,2}51220.004996530.3996363.712130.0316655
MountainCar2.32⊆ R2{1,2}102420.01417630.3823232.837140
Acrobot4.3⊆ R6{1,2,3}819230.03476980.6494782.220060.0021911
LunarLander3.2⊆ R8[-1,1]21638430.02072050.1313570.03728830.0702039
Pendulum3.7⊆ R3[-2,2]819230.02667450.5395084.330060.0348492
", + "bbox": [ + 202, + 127, + 792, + 208 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "quality of the model, in contrast to VAEs, where learning $\\overline{\\mathbf{P}}_{\\theta}$ was performed via overly restrictive distributions, leading to poor estimation in general (cf. Ex. 1). Finally, when the distilled policies offer comparable performance (Fig. 4c), our bounds are either close to or better than those of VAEs.", + "bbox": [ + 169, + 229, + 826, + 273 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Distillation. The bisimulation guarantees (Eq. 1) are only valid for $\\bar{\\pi}_{\\theta}$ , the policy under which formal properties can be verified. It is crucial that $\\bar{\\pi}_{\\theta}$ achieves performance close to $\\pi$ , the original one, when deployed in the RL environment. We evaluate the performance of $\\bar{\\pi}_{\\theta}$ via the undiscounted episode return $\\mathbf{R}_{\\bar{\\pi}_{\\theta}}$ obtained by running $\\bar{\\pi}_{\\theta}$ in the original model $\\mathcal{M}$ . We observe that $\\mathbf{R}_{\\bar{\\pi}_{\\theta}}$ approaches faster the original performance $\\mathbf{R}_{\\pi}$ for W- than VAEs: WAEs converge in a few steps for all environments, whereas the full learning budget is sometimes necessary with VAEs. The success in recovering the original performance emphasizes the representation quality guarantees (Eq. 1) induced by WAEs: when local losses are minimized, all original states that are embedded to the same representation are bisimilarly close. Distilling the policy over the new representation, albeit discrete and hence coarser, still achieves effective performance since $\\phi_{\\iota}$ keeps only what is important to preserve behaviors, and thus values. Furthermore, the distillation can remove some non-robustness obtained during RL: $\\bar{\\pi}_{\\theta}$ prescribes the same actions for bisimilarly close states, whereas this is not necessarily the case for $\\pi$ .", + "bbox": [ + 169, + 282, + 826, + 450 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Formal verification. To formally verify $\\overline{\\mathcal{M}}_{\\theta}$ , we implemented a value iteration (VI) engine, handling the neural network encoding of the latent space for discounted properties, which is one of the most popular algorithms for checking property probabilities in MDPs (e.g., Baier & Katoen 2008; Hensel et al. 2021; Kwiatkowska et al. 2022). We verify time-to-failure properties $\\varphi$ , often used to check the failure rate of a system (Pnueli, 1977) by measuring whether the agent fails before the end of the episode. Although simple, such properties highlight the applicability of our approach on reachability events, which are building blocks to verify MDPs (Baier & Katoen 2008; cf. Appendix B.7). In particular, we checked whether the agent reaches an unsafe position or angle (CartPole, LunarLander), does not reach its goal position (MountainCar, Acrobot), and does not reach and stay in a safe region of the system (Pendulum). Results are in Table 1: for each environment, we select the distilled policy which gives the best trade-off between performance (episode return) and abstraction quality (local losses). As extra confidence metric, we report the value difference $\\| V_{\\overline{\\pi}_{\\theta}} \\| = |V_{\\overline{\\pi}_{\\theta}}(s_I) - \\bar{V}_{\\overline{\\pi}_{\\theta}}(\\bar{s}_I)|$ obtained by executing $\\overline{\\pi}_{\\theta}$ in $\\mathcal{M}$ and $\\overline{\\mathcal{M}}_{\\theta}$ ( $V_{\\overline{\\pi}_{\\theta}}(\\cdot)$ is averaged while $\\bar{V}_{\\overline{\\pi}_{\\theta}}(\\cdot)$ is formally computed).", + "bbox": [ + 169, + 459, + 826, + 647 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 662, + 320, + 679 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We presented WAE-MDPs, a framework for learning formally verifiable distillations of RL policies with bisimulation guarantees. The latter, along with the learned abstraction of the unknown continuous environment to a discrete model, enables the verification. Our method overcomes the limitations of VAE-MDPs and our results show that it outperforms the latter in terms of learning speed, model quality, and performance, in addition to being supported by stronger learning guarantees. As mentioned by Delgrange et al. (2022), distillation failure reveals the lack of robustness of original RL policies. In particular, we found that distilling highly noise-sensitive RL policies (such as robotics simulations, e.g., Todorov et al. 2012) is laborious, even though the result remains formally verifiable.", + "bbox": [ + 169, + 694, + 826, + 806 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We demonstrated the feasibility of our approach through the verification of reachability objectives, which are building blocks for stochastic model-checking (Baier & Katoen, 2008). Besides the scope of this work, the verification of general discounted $\\omega$ -regular properties is theoretically allowed in our model via the reachability to components of standard constructions based on automata products (e.g., Baier et al. 2016; Sickert et al. 2016), and discounted games algorithms (Chatterjee et al., 2010). Beyond distillation, our results, supported by Thm. 3.3, suggest that our WAE-MDP can be used as a general latent space learner for RL, further opening possibilities to combine RL and formal methods online when no formal model is a priori known, and address this way safety in RL with guarantees.", + "bbox": [ + 169, + 811, + 828, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REPRODUCIBILITY STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 104, + 393, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We referenced in the main text the Appendix parts presenting the proofs or additional details of every claim, Assumption, Lemma, and Theorem occurring in the paper. In addition, Appendix B is dedicated to the presentation of the setup, hyperparameters, and other extra details required for reproducing the results of Section 4. We provide the source code of the implementation of our approach in Supplementary material $^{1}$ , and we also provide the models saved during training that we used for model checking (i.e., reproducing the results of Table 1). Additionally, we present in a notebook (evaluation.html) videos demonstrating how our distilled policies behave in each environment, and code snippets showing how we formally verified the policies.", + "bbox": [ + 169, + 127, + 826, + 239 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 253, + 328, + 268 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This research received funding from the Flemish Government (AI Research Program) and was supported by the DESCARTES iBOF project. G.A. Perez is also supported by the Belgian FWO \"SAILor\" project (G030020N). We thank Raphael Avalos for his valuable feedback during the preparation of this manuscript.", + "bbox": [ + 169, + 277, + 823, + 335 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 354, + 287, + 369 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Parand Alizadeh Alamdari, Guy Avni, Thomas A. Henzinger, and Anna Lukina. Formal methods with a touch of magic. In 2020 Formal Methods in Computer Aided Design, FMCAD 2020, Haifa, Israel, September 21-24, 2020, pp. 138-147. IEEE, 2020. doi: 10.34727/2020/isbn.978-3-85448-042-6_21. URL https://doi.org/10.34727/2020/isbn.978-3-85448-042-6_21.", + "Alexander A. Alemi, Ben Poole, Ian Fischer, Joshua V. Dillon, Rif A. Saurous, and Kevin Murphy. Fixing a broken ELBO. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 159-168. PMLR, 2018. URL http://proceedings.mlr.press/v80/alemi18a.html.", + "Mohammed Alshiekh, Roderick Bloem, Rüdiger Ehlers, Bettina Könighofer, Scott Niekum, and Ufuk Topcu. Safe reinforcement learning via shielding. In Sheila A. McIlraith and Kilian Q. Weinberger (eds.), Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pp. 2669-2678. AAAI Press, 2018. URL https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/view/17211.", + "Martín Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 214-223. PMLR, 2017. URL http://proceedings.mlr.press/v70/arjovsky17a.html.", + "Edoardo Bacci and David Parker. Probabilistic guarantees for safe deep reinforcement learning. In Nathalie Bertrand and Nils Jansen (eds.), Formal Modeling and Analysis of Timed Systems - 18th International Conference, FORMATS 2020, Vienna, Austria, September 1-3, 2020, Proceedings, volume 12288 of LNCS, pp. 231-248. Springer, 2020. doi: 10.1007/978-3-030-57628-8_14. URL https://doi.org/10.1007/978-3-030-57628-8_14.", + "Christel Baier and Joost-Pieter Katoen. Principles of model checking. MIT Press, 2008. ISBN 978-0-262-02649-9.", + "Christel Baier, Stefan Kiefer, Joachim Klein, Sascha Klüppelholz, David Müller, and James Worrell. Markov chains and unambiguous büchi automata. In Swarat Chaudhuri and Azadeh Farzan (eds.), Computer Aided Verification - 28th International Conference, CAV 2016, Toronto, ON, Canada, July 17-23, 2016, Proceedings, Part I, volume 9779 of Lecture Notes in Computer Science, pp. 23-42. Springer, 2016. doi: 10.1007/978-3-319-41528-4_2. URL https://doi.org/10.1007/978-3-319-41528-4_2." + ], + "bbox": [ + 173, + 377, + 828, + 902 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_footnote", + "text": "available at https://github.com/florentdelgrange/wae_mdp", + "bbox": [ + 192, + 909, + 651, + 924 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Hugo Bazille, Blaise Genest, Cyrille Jégourel, and Jun Sun. Global PAC bounds for learning discrete time markov chains. In Shuvendu K. Lahiri and Chao Wang (eds.), Computer Aided Verification - 32nd International Conference, CAV 2020, Los Angeles, CA, USA, July 21-24, 2020, Proceedings, Part II, volume 12225 of Lecture Notes in Computer Science, pp. 304-326. Springer, 2020. doi: 10.1007/978-3-030-53291-8\\_17. URL https://doi.org/10.1007/978-3-030-53291-8_17.", + "O. Bousquet, S. Gelly, I. Tolstikhin, Carl-Johann Simon-Gabriel, and B. Schölkopf. From optimal transport to generative modeling: the vegan cookbook. arXiv: Machine Learning, 2017.", + "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym. CoRR, abs/1606.01540, 2016. URL http://arxiv.org/abs/1606.01540.", + "Steven Carr, Nils Jansen, and Ufuk Topcu. Verifiable rnn-based policies for pomdps under temporal logic constraints. In Christian Bessiere (ed.), Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI 2020, pp. 4121-4127. ijcai.org, 2020. doi: 10.24963/ijcai.2020/570. URL https://doi.org/10.24963/ijcai.2020/570.", + "Pablo Samuel Castro, Tyler Kastner, Prakash Panangaden, and Mark Rowland. Mico: Improved representations via sampling-based state similarity for markov decision processes. In Marc'Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan (eds.), Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pp. 30113-30126, 2021. URL https://proceedings.neurips.cc/paper/2021/hash/ fd06b8ea02fe5b1c2496fe1700e9d16c-AAbstract.html.", + "Glenn Ceusters, Roman Cantú Rodríguez, Alberte Bouso García, Rüdiger Franke, Geert Deconinck, Lieve Helsen, Ann Nowé, Maarten Messagie, and Luis Ramirez Camargo. Model-predictive control and reinforcement learning in multi-energy system case studies. Applied Energy, 303:117634, 2021. ISSN 0306-2619. doi: https://doi.org/10.1016/j.apenergy.2021.117634. URL https://www.sciencedirect.com/science/article/pii/S0306261921010011.", + "Krishnendu Chatterjee, Luca de Alfaro, Rupak Majumdar, and Vishwanath Raman. Algorithms for game metrics (full version). Log. Methods Comput. Sci., 6(3), 2010. URL http://arxiv.org/abs/0809.4326.", + "Dane S. Corneil, Wulfram Gerstner, and Johanni Brea. Efficient modelbased deep reinforcement learning with variational state tabulation. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 1057-1066. PMLR, 2018. URL http://proceedings.mlr.press/v80/corneil18a.html.", + "Florent Delgrange, Ann Nowé, and Guillermo A. Pérez. Distillation of rl policies with formal guarantees via variational abstraction of markov decision processes. Proceedings of the AAAI Conference on Artificial Intelligence, 36(6):6497-6505, Jun. 2022. doi: 10.1609/aaai.v36i6.20602. URL https://ojs.aaaai.org/index.php/AAAI/article/view/20602.", + "Josée Desharnais, Vineet Gupta, Radha Jagadeesan, and Prakash Panangaden. Metrics for labelled markov processes. Theor. Comput. Sci., 318(3):323-354, 2004. doi: 10.1016/j.tcs.2003.09.013. URL https://doi.org/10.1016/j.tcs.2003.09.013.", + "Jiri Fajtl, Vasileios Argyriou, Dorothy Monekosso, and Paolo Remagnino. Latent bernoulli autoencoder. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 2964-2974. PMLR, 2020. URL http://proceedings.mlr.press/v119/fajtl20a.html.", + "Norm Ferns, Doina Precup, and Sophia Knight. Bisimulation for markov decision processes through families of functional expressions. In Franck van Breugel, Elham Kashefi, Catuscia Palamidessi, and Jan Rutten (eds.), Horizons of the Mind. A Tribute to Prakash Panangaden - Essays Dedicated to Prakash Panangaden on the Occasion of His 60th Birthday, volume 8464 of LNCS, pp. 319-342." + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Springer, 2014. doi: 10.1007/978-3-319-06880-0_17. URL https://doi.org/10.1007/978-3-319-06880-0_17.", + "Carles Gelada, Saurabh Kumar, Jacob Buckman, Ofir Nachum, and Marc G. Bellemare. Deepmdp: Learning continuous latent space models for representation learning. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 2170-2179. PMLR, 2019. URL http://proceedings.mlr.press/v97/gelada19a.html.", + "Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: masked autoencoder for distribution estimation. In Francis R. Bach and David M. Blei (eds.), Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pp. 881-889. JMLR.org, 2015. URL http://proceedings.mlr.press/v37/germain15.html.", + "Robert Givan, Thomas L. Dean, and Matthew Greig. Equivalence notions and model minimization in markov decision processes. Artif. Intell., 147(1-2):163-223, 2003. doi: 10.1016/S0004-3702(02) 00376-4. URL https://doi.org/10.1016/S0004-3702(02)00376-4.", + "Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C. Courville. Improved training of wasserstein gans. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 5767-5777, 2017. URL https://proceedings.neurips.cc/paper/2017/bit/892c3b1c6dcbd52936e27cbd0ff683d6-Abstract.html.", + "Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 1856-1865. PMLR, 2018. URL http://proceedings.mlr.press/v80/haarnoja18b.html.", + "Mohammadhosein Hasanbeig, Natasha Yogananda Jeppu, Alessandro Abate, Tom Melham, and Daniel Kroening. Deepsynth: Automata synthesis for automatic task segmentation in deep reinforcement learning. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI 2021, Thirty-Third Conference on Innovative Applications of Artificial Intelligence, IAAI 2021, The Eleventh Symposium on Educational Advances in Artificial Intelligence, EAAI 2021, Virtual Event, February 2-9, 2021, pp. 7647-7656. AAAI Press, 2021. URL https://ojs.aaai.org/index.php/AAAI/article/view/16935.", + "Christian Hensel, Sebastian Junges, Joost-Pieter Katoen, Tim Quatmann, and Matthias Volk. The probabilistic model checker storm. International Journal on Software Tools for Technology Transfer, 2021. ISSN 1433-2787. doi: 10.1007/s10009-021-00633-z. URL https://doi.org/10.1007/s10009-021-00633-z.", + "Matthew D. Hoffman, David M. Blei, Chong Wang, and John W. Paisley. Stochastic variational inference. J. Mach. Learn. Res., 14(1):1303-1347, 2013. URL http://dl.acm.org/citation.cfm?id=2502622.", + "Bojun Huang. Steady state analysis of episodic reinforcement learning. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/69bfa2aa2b7b139ff581a806abf0a886-Abstract.html.", + "Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. URL https://openreview.net/forum?id=rkE3y85ee." + ], + "bbox": [ + 173, + 102, + 826, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Nils Jansen, Bettina Konighofer, Sebastian Junges, Alex Serban, and Roderick Bloem. Safe Reinforcement Learning Using Probabilistic Shields (Invited Paper). In Igor Konnov and Laura Kovács (eds.), 31st International Conference on Concurrency Theory (CONCUR 2020), volume 171 of Leibniz International Proceedings in Informatics (LIPics), pp. 3:1-3:16, Dagstuhl, Germany, 2020. Schloss Dagstuhl-Leibniz-Zentrum für Informatik. ISBN 978-3-95977-160-3. doi: 10.4230/LIPics.CONCUR.2020.3. URL https://drops.dagstuhl.de/opus/volltexte/2020/12815.", + "Sebastian Junges, Nils Jansen, Christian Dehnert, Ufuk Topcu, and Joost-Pieter Katoen. Safety-constrained reinforcement learning for mdps. In Marsha Chechik and Jean-François Raskin (eds.), Tools and Algorithms for the Construction and Analysis of Systems - 22nd International Conference, TACAS 2016, Eindhoven, The Netherlands, April 2-8, 2016, Proceedings, volume 9636 of LNCS, pp. 130-146. Springer, 2016. doi: 10.1007/978-3-662-49674-9_8. URL https://doi.org/10.1007/978-3-662-49674-9_8.", + "Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. In Yoshua Bengio and Yann LeCun (eds.), 2nd International Conference on Learning Representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference Track Proceedings, 2014. URL http://arxiv.org/abs/1312.6114.", + "Marta Kwiatkowska, Gethin Norman, and David Parker. Probabilistic model checking and autonomy. Annual Review of Control, Robotics, and Autonomous Systems, 5(1):385-410, 2022. doi: 10.1146/annurev-control-042820-010947. URL https://doi.org/10.1146/annurev-control-042820-010947.", + "Kim Guldstrand Larsen and Arne Skou. Bisimulation through probabilistic testing. In Conference Record of the Sixteenth Annual ACM Symposium on Principles of Programming Languages, Austin, Texas, USA, January 11-13, 1989, pp. 344-352. ACM Press, 1989. doi: 10.1145/75277.75307. URL https://doi.org/10.1145/75277.75307.", + "Pieter J. K. Libin, Arno Moonens, Timothy Verstraeten, Fabian Perez-Sanjines, Niel Hens, Philippe Lemey, and Ann Nowé. Deep reinforcement learning for large-scale epidemic control. In Yuxiao Dong, Georgiana Ifrim, Dunja Mladenic, Craig Saunders, and Sofie Van Hoecke (eds.), Machine Learning and Knowledge Discovery in Databases. Applied Data Science and Demo Track - European Conference, ECML PKDD 2020, Ghent, Belgium, September 14-18, 2020, Proceedings, Part V, volume 12461 of Lecture Notes in Computer Science, pp. 155-170. Springer, 2020. doi: 10.1007/978-3-030-67670-4_10. URL https://doi.org/10.1007/978-3-030-67670-4_10.", + "Michael L. Littman, Ufuk Topcu, Jie Fu, Charles Lee Isbell Jr., Min Wen, and James MacGlashan. Environment-independent task specifications via GLTL. CoRR, abs/1704.04341, 2017. URL http://arxiv.org/abs/1704.04341.", + "Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The concrete distribution: A continuous relaxation of discrete random variables. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. URL https://openreview.net/forum?id=S1jE5L5gl.", + "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A. Rusu, Joel Veness, Marc G. Bellemare, Alex Graves, Martin A. Riedmiller, Andreas Fidjeland, Georg Ostrovski, Stig Petersen, Charles Beattie, Amir Sadik, Ioannis Antonoglou, Helen King, Dharshan Kumaran, Daan Wierstra, Shane Legg, and Demis Hassabis. Human-level control through deep reinforcement learning. Nat., 518(7540):529-533, 2015. doi: 10.1038/nature14236. URL https://doi.org/10.1038/nature14236.", + "Ann Nowe. Synthesis of \"safe\" fuzzy controllers based on reinforcement learning. PhD thesis, Vrije Universiteit Brussel, 1994.", + "George Papamakarios, Iain Murray, and Theo Pavlakou. Masked autoregressive flow for density estimation. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017," + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Long Beach, CA, USA, pp. 2338-2347, 2017. URL https://proceedings.neurips.cc/paper/2017/hash/6c1da886822c67822bcf3679d04369fa-Abstract.html.", + "Amir Pnueli. The temporal logic of programs. In 18th Annual Symposium on Foundations of Computer Science, Providence, Rhode Island, USA, 31 October - 1 November 1977, pp. 46-57. IEEE Computer Society, 1977. doi: 10.1109/SFCS.1977.32. URL https://doi.org/10.1109/SFCS.1977.32.", + "Martin L. Puterman. Markov Decision Processes: Discrete Stochastic Dynamic Programming. Wiley Series in Probability and Statistics. Wiley, 1994. ISBN 978-0-47161977-2. doi: 10.1002/9780470316887. URL https://doi.org/10.1002/9780470316887.", + "Tao Ren, Jianwei Niu, Jiahe Cui, Zhenchao Ouyang, and Xuefeng Liu. An application of multi-objective reinforcement learning for efficient model-free control of canals deployed with iot networks. Journal of Network and Computer Applications, 182:103049, 2021. ISSN 1084-8045. doi: https://doi.org/10.1016/j.jnca.2021.103049. URL https://www.sciencedirect.com/science/article/pii/S1084804521000734.", + "Salomon Sickert, Javier Esparza, Stefan Jaax, and Jan Kretínský. Limit-deterministic büchi automata for linear temporal logic. In Swarat Chaudhuri and Azadeh Farzan (eds.), Computer Aided Verification - 28th International Conference, CAV 2016, Toronto, ON, Canada, July 17-23, 2016, Proceedings, Part II, volume 9780 of Lecture Notes in Computer Science, pp. 312-332. Springer, 2016. doi: 10.1007/978-3-319-41540-6\\_17. URL https://doi.org/10.1007/978-3-319-41540-6_17.", + "Thiago D. Simão, Nils Jansen, and Matthijs T. J. Span. Always safe: Reinforcement learning without safety constraint violations during training. In Frank Dignum, Alessio Lomuscio, Ulle Endriss, and Ann Nowé (eds.), AAMAS '21: 20th International Conference on Autonomous Agents and Multiagent Systems, Virtual Event, United Kingdom, May 3-7, 2021, pp. 1226-1235. ACM, 2021. URL https://dl.acm.org/doi/10.5555/3463952.3464094.", + "Emanuel Todorov, Tom Erez, and Yuval Tassa. Mujoco: A physics engine for model-based control. In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 5026-5033. IEEE, 2012.", + "Ilya O. Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Scholkopf. Wasserstein autoencoders. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=HkL7n1-0b.", + "John N. Tsitsiklis. Asynchronous stochastic approximation and q-learning. Mach. Learn., 16(3):185-202, 1994. doi: 10.1007/BF00993306. URL https://doi.org/10.1007/BF00993306.", + "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, 4-9 December 2017, Long Beach, CA, USA, pp. 6306-6315, 2017. URL http://papers.nips.cc/paper/7210-neural-discrete-representation-learning.", + "Cédric Villani. Optimal Transport: Old and New. Springer Berlin Heidelberg, Berlin, Heidelberg, 2009. ISBN 978-3-540-71050-9. doi: 10.1007/978-3-540-71050-9_6. URL https://doi.org/10.1007/978-3-540-71050-9_6.", + "Andrew M. Wells, Morteza Lahijanian, Lydia E. Kavraki, and Moshe Y. Vardi. Ltlf synthesis on probabilistic systems. In Jean-François Raskin and Davide Bresolin (eds.), Proceedings 11th International Symposium on Games, Automata, Logics, and Formal Verification, GandALF 2020, Brussels, Belgium, September 21-22, 2020, volume 326 of EPTCS, pp. 166-181, 2020. doi: 10.4204/EPTCS.326.11. URL https://doi.org/10.4204/EPTCS.326.11.", + "Hongyu Zang, Xin Li, and Mingzhong Wang. Simsr: Simple distance-based state representations for deep reinforcement learning. Proceedings of the AAAI Conference on Artificial Intelligence, 36 (8):8997-9005, Jun. 2022. doi: 10.1609/aaai.v36i8.20883. URL https://ojs.aaaai.org/index.php/AAAI/article/view/20883." + ], + "bbox": [ + 171, + 103, + 828, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Amy Zhang, Rowan Thomas McAllister, Roberto Calandra, Yarin Gal, and Sergey Levine. Learning invariant representations for reinforcement learning without reconstruction. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=-2FCwDKRREu.", + "Shunkang Zhang, Yuan Gao, Yuling Jiao, Jin Liu, Yang Wang, and Can Yang. Wasserstein-wasserstein auto-encoders. CoRR, abs/1902.09323, 2019. URL http://arxiv.org/abs/1902.09323." + ], + "bbox": [ + 173, + 102, + 828, + 210 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "APPENDIX", + "text_level": 1, + "bbox": [ + 171, + 103, + 266, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A THEORETICAL DETAILS ON WAE-MDPS", + "text_level": 1, + "bbox": [ + 171, + 133, + 552, + 151 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.1 THE DISCREPANCY MEASURE", + "text_level": 1, + "bbox": [ + 171, + 166, + 429, + 180 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We show that reasoning about discrepancy measures between stationary distributions is sound in the context of infinite interaction and episodic RL processes. Let $P_{\\theta}$ be a parameterized behavioral model that generate finite traces from the original environment (i.e., finite sequences of state, actions, and rewards of the form $\\langle s_{0:T},a_{0:T - 1},r_{0:T - 1}\\rangle$ ), our goal is to find the best parameter $\\theta$ which offers the most accurate reconstruction of the original traces issued from the original model $\\mathcal{M}$ operating under $\\pi$ . We demonstrate that, in the limit, considering the OT between trace-based distributions is equivalent to considering the OT between the stationary distribution of $\\mathcal{M}_{\\pi}$ and the one of the behavioral model.", + "bbox": [ + 169, + 191, + 826, + 303 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Let us first formally recall the definition of the metric on the transitions of the MDP.", + "bbox": [ + 171, + 310, + 725, + 324 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Raw transition distance. Assume that $S$ , $\\mathcal{A}$ , and $\\operatorname{Im}(\\mathcal{R})$ are respectively equipped with metric $d_S$ , $d_{\\mathcal{A}}$ , and $d_{\\mathcal{R}}$ , let us define the raw transition distance metric over transitions of $\\mathcal{M}$ , i.e., tuples of the form $\\langle s, a, r, s' \\rangle$ , as $\\vec{d} \\colon S \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times S$ ,", + "bbox": [ + 169, + 334, + 826, + 380 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\vec {d} \\big (\\left\\langle s _ {1}, a _ {1}, r _ {1}, s _ {1} ^ {\\prime} \\right\\rangle , \\left\\langle s _ {2}, a _ {2}, r _ {2}, s _ {2} ^ {\\prime} \\right\\rangle \\big) = d _ {\\mathcal {S}} (s _ {1}, s _ {2}) + d _ {\\mathcal {A}} (a _ {1}, a _ {2}) + d _ {\\mathcal {R}} (r _ {1}, r _ {2}) + d _ {\\mathcal {S}} \\big (s _ {1} ^ {\\prime}, s _ {2} ^ {\\prime} \\big).\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 383, + 799, + 405 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In a nutshell, $\\vec{d}$ consists of the sum of the distance of all the transition components. Note that it is a well defined distance metric since the sum of distances preserves the identity of indiscernible, symmetry, and triangle inequality.", + "bbox": [ + 169, + 409, + 826, + 454 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Trace-based distributions. The raw distance $\\vec{d}$ allows to reason about transitions, we thus consider the distribution over transitions which occur along traces of length $T$ to compare the dynamics of the original and behavioral models:", + "bbox": [ + 169, + 464, + 825, + 507 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} _ {\\pi} [ T ] (s, a, r, s ^ {\\prime}) = \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\xi_ {\\pi} ^ {t} (s | s _ {I}) \\cdot \\pi (a | s) \\cdot \\mathbf {P} (s ^ {\\prime} | s, a) \\cdot \\mathbf {1} _ {r = \\mathcal {R} (s, a)}, \\text {a n d}\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 512, + 759, + 551 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {P} _ {\\theta} [ T ] \\big (s, a, r, s ^ {\\prime} \\big) = \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\underset {s _ {0: t}, a _ {0: t - 1}, r _ {0: t - 1} \\sim P _ {\\theta} [ t ]} {\\mathbb {E}} \\mathbf {1} _ {\\langle s _ {t - 1}, a _ {t - 1} r _ {t - 1}, s _ {t} \\rangle = \\langle s, a, r, s ^ {\\prime} \\rangle},\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 555, + 751, + 594 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $P_{\\theta}[T]$ denotes the distribution over traces of length $T$ , generated from $P_{\\theta}$ . Intuitively, $\\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(s \\mid s_{I})$ can be seen as the fraction of the time spent in $s$ along traces of length $T$ , starting from the initial state Kulkarni (1995). Therefore, drawing $\\langle s, a, r, s' \\rangle \\sim \\mathcal{D}_{\\pi}[T]$ trivially follows: it is equivalent to drawing $s$ from $\\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(\\cdot \\mid s_{I})$ , then respectively $a$ and $s'$ from $\\pi(\\cdot \\mid s)$ and $\\mathbf{P}(\\cdot \\mid s, a)$ , to finally obtain $r = \\mathcal{R}(s, a)$ . Given $T \\in \\mathbb{N}$ , our objective is to minimize the Wasserstein distance between those distributions: $W_{\\vec{d}}(\\mathcal{D}_{\\pi}[T], \\mathcal{P}_{\\theta}[T])$ . The following Lemma enables optimizing the Wasserstein distance between the original MDP and the behavioral model when traces are drawn from episodic RL processes or infinite interactions (Huang, 2020).", + "bbox": [ + 169, + 598, + 823, + 717 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma A.1. Assume the existence of a stationary behavioral model $\\xi_{\\theta} = \\lim_{T\\to \\infty}\\mathcal{P}_{\\theta}[T]$ , then", + "bbox": [ + 169, + 719, + 803, + 734 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {T \\to \\infty} W _ {\\vec {d}} \\left(\\mathcal {D} _ {\\pi} [ T ], \\mathcal {P} _ {\\theta} [ T ]\\right) = W _ {\\vec {d}} \\left(\\xi_ {\\pi}, \\xi_ {\\theta}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 737, + 635, + 760 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. First, note that $\\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(\\cdot \\mid s_{I})$ weakly converges to $\\xi_{\\pi}$ as $T$ goes to $\\infty$ Kulkarni (1995). The result follows then from (Villani, 2009, Corollary 6.9).", + "bbox": [ + 169, + 776, + 826, + 808 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.2 DEALING WITH DISCRETE ACTIONS", + "text_level": 1, + "bbox": [ + 171, + 823, + 470, + 835 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "When the policy $\\pi$ executed in $\\mathcal{M}$ already produces discrete actions, learning a latent action space is, in many cases, not necessary. We thus make the following assumptions:", + "bbox": [ + 169, + 848, + 826, + 877 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Assumption A.2. Let $\\pi \\colon S \\to \\Delta(\\mathcal{A}^{\\star})$ be the policy executed in $\\mathcal{M}$ and assume that $\\mathcal{A}^{\\star}$ is a (tractable) finite set. Then, we take $\\overline{\\mathcal{A}} = \\mathcal{A}^{\\star}$ and $\\phi_{\\iota}^{\\mathcal{A}}$ as the identity function, i.e., $\\phi_{\\iota}^{\\mathcal{A}}: \\overline{S} \\times \\mathcal{A}^{\\star} \\to \\mathcal{A}^{\\star}, \\langle \\overline{s}, a^{\\star} \\rangle \\mapsto a^{\\star}$ .", + "bbox": [ + 169, + 878, + 825, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Assumption A.3. Assume that the action space of the original environment $\\mathcal{M}$ is a (tractable) finite set. Then, we take $\\psi_{\\theta}$ as the identity function, i.e., $\\psi_{\\theta} = \\phi_{\\iota}^{A}$ .", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Concretely, the premise of Assumption A.2 typically occurs when $\\pi$ is a latent policy (see Rem. 1) or when $\\mathcal{M}$ has already a discrete action space. In the latter case, Assumption A.2 and A.3 amount to setting $\\bar{\\mathcal{A}} = \\mathcal{A}$ and ignoring the action encoder and embedding function. Note that if a discrete action space is too large, or if the user explicitly aims for a coarser space, then the former is not considered as tractable, these assumptions do not hold, and the action space is abstracted to a smaller set of discrete actions.", + "bbox": [ + 169, + 141, + 826, + 224 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.3 PROOF OF LEMMA 3.2", + "text_level": 1, + "bbox": [ + 171, + 241, + 375, + 255 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Notation. From now on, we write $\\phi_{\\iota}(\\bar{s},\\bar{a}\\mid s,a) = \\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)$ .", + "bbox": [ + 169, + 268, + 663, + 287 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lemma 3.2. Define $\\mathcal{T}(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a\\sim \\xi_{\\pi}}[\\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)\\cdot \\overline{\\mathbf{P}}_{\\theta}(\\bar{s}^{\\prime}\\mid \\bar{s},\\bar{a})]$ as the distribution of drawing state-action pairs from interacting with $\\mathcal{M}$ , embedding them to the latent spaces, and finally letting them transition to their successor state in $\\overline{\\mathcal{M}}_{\\theta}$ . Then, $W_{\\vec{d}}(Q_{\\iota},\\bar{\\xi}_{\\overline{\\pi}_{\\theta}})\\leqslant W_{\\vec{d}}(\\bar{\\xi}_{\\overline{\\pi}_{\\theta}},\\mathcal{T}) + L_{\\mathbf{P}}^{\\xi_{\\pi}}$", + "bbox": [ + 169, + 289, + 826, + 338 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. Wasserstein is compliant with the triangular inequality (Villani, 2009), which gives us:", + "bbox": [ + 169, + 349, + 789, + 364 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nW _ {\\vec {d}} \\left(Q _ {\\iota}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right) \\leqslant W _ {\\vec {d}} \\left(Q _ {\\iota}, \\mathcal {T}\\right) + W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\mathcal {T}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 364, + 653, + 383 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 171, + 383, + 217, + 397 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} W _ {\\tilde {d}} \\left(\\mathcal {T}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right) \\quad (\\text {n o t e t h a t} W _ {\\tilde {d}} \\text {i s r e f l e x i v e (V i l l a n i , 2 0 0 9)} \\\\ = \\sup _ {f \\in \\mathcal {F} _ {\\bar {d}}} \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\underset {\\bar {s} \\sim \\bar {\\xi} _ {\\pi_ {\\theta}}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\bar {\\pi} _ {\\theta} (\\cdot | \\bar {s})} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}), \\text {a n d} \\\\ W _ {\\vec {d}} (Q _ {\\iota}, \\mathcal {T}) \\\\ = \\sup _ {f \\in \\mathcal {F} _ {\\vec {d}} s, a, s ^ {\\prime} \\sim \\xi_ {\\pi}} \\mathbb {E} _ {\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} (\\cdot | s, a, s ^ {\\prime})} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\mathbb {E} _ {s, a \\sim \\xi_ {\\pi}} \\mathbb {E} _ {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} \\mathbb {E} _ {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) (3) \\\\ \\leqslant \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\sup _ {f \\in \\mathcal {F} _ {\\bar {d}} ^ {-} s ^ {\\prime} \\sim \\mathbf {P} (\\cdot | s, a)} \\underset {\\sim \\mathbf {P} (\\cdot | s, a)} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\phi_ {\\iota} (s ^ {\\prime})) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) (4) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {A} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} \\sup _ {f \\in \\mathcal {F} _ {d} _ {\\overline {{\\mathcal {S}}}}} \\underset {\\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a)} {\\mathbb {E}} f (\\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})} {\\mathbb {E}} f (\\bar {s} ^ {\\prime}) (5) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\ell} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\ell} (s), a)} {\\mathbb {E}} W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\bar {\\mathbf {P}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 400, + 823, + 597 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We pass from Eq. 3 to Eq. 4 by the Jensen's inequality. To see how we pass from Eq. 4 to Eq. 5, notice that", + "bbox": [ + 169, + 599, + 826, + 627 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} _ {\\vec {d}} = \\left\\{f \\colon f \\left(\\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant \\vec {d} \\left(\\left\\langle \\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime} \\right\\rangle , \\left\\langle \\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime} \\right\\rangle\\right) \\right\\}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 628, + 709, + 654 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} _ {\\vec {d}} = \\left\\{f \\colon f \\left(\\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1}, \\bar {s} _ {2}\\right) + d _ {\\bar {A}} \\left(\\bar {a} _ {1}, \\bar {a} _ {2}\\right) + d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\}\n$$\n", + "text_format": "latex", + "bbox": [ + 227, + 656, + 769, + 672 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Observe now that $\\bar{s}$ and $\\bar{a}$ are fixed in the supremum computation of Eq. 4: all functions $f$ considered and taken from $\\mathcal{F}_{\\bar{d}}$ are of the form $f(\\bar{s},\\bar{a},\\cdot)$ . It is thus sufficient to consider the supremum over functions from the following subset of $\\mathcal{F}_{\\bar{d}}$ :", + "bbox": [ + 169, + 674, + 826, + 718 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\{f \\colon f (\\bar {s}, \\bar {a}, \\bar {s} _ {1} ^ {\\prime}) - f (\\bar {s}, \\bar {a}, \\bar {s} _ {2} ^ {\\prime}) \\leqslant d _ {\\bar {\\mathcal {S}}} (\\bar {s}, \\bar {s}) + d _ {\\bar {\\mathcal {A}}} (\\bar {a}, \\bar {a}) + d _ {\\bar {\\mathcal {S}}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\}\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 720, + 728, + 739 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "(for $\\bar{s},\\bar{a}$ drawn from $\\phi_{\\iota}$ )", + "bbox": [ + 663, + 741, + 823, + 756 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} = \\left\\{f: f \\left(\\bar {s}, \\bar {a}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s}, \\bar {a}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\} \\\\ = \\left\\{f: f \\left(\\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\} \\\\ = \\mathcal {F} _ {d _ {\\bar {\\mathcal {S}}}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 760, + 578, + 816 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Given a state $s \\in S$ in the original model, the (parallel) execution of $\\pi$ in $\\overline{\\mathcal{M}}_{\\theta}$ is enabled through $\\pi(a, \\bar{a} | s) = \\pi(a | s) \\cdot \\phi_{\\ell}^{\\mathcal{A}}(\\bar{a} | \\phi_{\\ell}(s), a)$ (cf. Fig. 1b). The local transition loss resulting from this interaction is:", + "bbox": [ + 169, + 820, + 826, + 861 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} = \\underset {s, \\langle a, \\bar {a} \\rangle \\sim \\xi_ {\\pi}} {\\mathbb {E}} W _ {d _ {\\bar {S}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\bar {\\mathbf {P}} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {A} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 281, + 864, + 715, + 921 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "which finally yields the result.", + "bbox": [ + 171, + 103, + 374, + 118 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/852e86e2e3bda2a9e8c7ef0d0e216a8b0fe5f8b4b9662c714d1c462f9b283e6f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 104, + 825, + 116 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.4 PROOF OF THEOREM 3.3", + "text_level": 1, + "bbox": [ + 171, + 135, + 392, + 148 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Before proving Theorem 3.3, let us introduce the following Lemma, that explicitly demonstrates the link between the transition regularizer of the $\\mathrm{W}^2\\mathrm{AE}$ -MDP objective and the local transition loss required to obtain the guarantees related to the bisimulation bounds of Eq. 1.", + "bbox": [ + 169, + 162, + 823, + 205 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Lemma A.4. Assume that traces are generated by running $\\bar{\\pi} \\in \\overline{\\Pi}$ in the original environment, then", + "bbox": [ + 169, + 208, + 823, + 223 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {s, a ^ {\\star} \\sim \\xi_ {\\pi} \\bar {a} \\sim \\phi_ {\\iota} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\iota} (s), a ^ {\\star})} {\\mathbb {E}} W _ {d _ {\\overline {{S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a ^ {\\star}), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) = L _ {\\mathbf {P}} ^ {\\xi_ {\\overline {{\\pi}}}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 231, + 728, + 258 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Proof. Since the latent policy $\\bar{\\pi}$ generates latent actions, Assumption A.2 holds, which means:", + "bbox": [ + 169, + 273, + 792, + 290 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathop{\\mathbb{E}}_{s,a^{\\star}\\sim \\xi_{\\overline{\\pi}}}\\mathop{\\mathbb{E}}_{\\bar{a}\\sim \\phi_{\\iota}^{A}(\\cdot |\\phi_{\\iota}(s),a^{\\star})}W_{d_{\\overline{\\mathfrak{S}}}}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot |s,a^{\\star}),\\overline{\\mathbf{P}}_{\\theta}(\\cdot |\\phi_{\\iota}(s),\\bar{a})\\right) \\\\ = \\underset {s, \\bar {a} \\sim \\xi_ {\\bar {\\pi}}} {\\mathbb {E}} W _ {d _ {\\bar {S}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, \\bar {a}), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) \\\\ = L _ {\\mathbf {P}} ^ {\\xi_ {\\overline {{\\pi}}}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 297, + 705, + 375 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/8baf2a20bda1cb7ecb0faf699ee64776daaa409b45835778693cc866c2d3e653.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 382, + 825, + 393 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Theorem 3.3. Assume that traces are generated by running a latent policy $\\bar{\\pi} \\in \\overline{\\Pi}$ in the original environment and let $d_{\\mathcal{R}}$ be the usual Euclidean distance, then the $W^{2}$ AE-MDP objective is", + "bbox": [ + 169, + 404, + 825, + 434 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\min_{\\iota ,\\theta}\\underset {s,s^{\\prime}\\sim \\xi_{\\pi}}{\\mathbb{E}}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\phi_{\\iota}(s))) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\phi_{\\iota}\\big(s^{\\prime}\\big)\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\overline{\\pi}}} + L_{\\mathbf{P}}^{\\xi_{\\overline{\\pi}}}).\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 441, + 764, + 468 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Proof. We distinguish two cases: (i) the case where the original and latent models share the same discrete action space, i.e., $\\mathcal{A} = \\overline{\\mathcal{A}}$ , and (ii) the case where the two have a different action space (e.g., when the original action space is continuous), i.e., $\\mathcal{A} \\neq \\overline{\\mathcal{A}}$ . In both cases, the local losses term follows by definition of $L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}}$ and Lemma A.4. When $d_{\\mathcal{R}}$ is the Euclidean distance (or even the $L_{1}$ distance since rewards are scalar values), the expected reward distance occurring in the expected trace-distance term $\\vec{d}$ in the $\\mathrm{W}^2\\mathrm{AE}$ -MDP objective directly translates to the local loss $L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}}$ . Concerning the local transition loss, in case (i), the result naturally follows from Assumption A.2 and A.3. In case (ii), only Assumption A.2 holds, meaning the action encoder term of the $\\mathrm{W}^2\\mathrm{AE}$ -MDP objective is ignored, but not the action embedding term appearing in $G_{\\theta}$ . Given $s \\sim \\xi_{\\overline{\\pi}}$ , recall that executing $\\overline{\\pi}$ in $\\mathcal{M}$ amounts to embedding the produced latent actions $\\bar{a} \\sim \\overline{\\pi}(\\cdot \\mid \\phi_{\\iota}(s))$ back to the original environment via $a = \\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a})$ (cf. Rem. 1 and Fig. 1a). Therefore, the projection of $\\vec{d}(\\langle s, a, r, s' \\rangle, G_{\\theta}(\\phi_{\\iota}(s), \\bar{a}, \\phi_{\\iota}(s')))$ on the action space $\\mathcal{A}$ is $d_{\\mathcal{A}}(\\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a}), \\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a})) = 0$ for $r = \\mathcal{R}(s, a)$ and $s' \\sim \\mathbf{P}(\\cdot \\mid s, a)$ .", + "bbox": [ + 169, + 483, + 826, + 674 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.5 OPTIMIZING THE TRANSITION REGULARIZER", + "text_level": 1, + "bbox": [ + 171, + 689, + 535, + 703 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In the following, we detail how we derive a tractable form of our transition regularizer $L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)$ . Optimizing the ground Kantorovich-Rubinstein duality is enabled via the introduction of a parameterized, 1-Lipschitz network $\\varphi_{\\omega}^{\\mathbf{P}}$ , that need to be trained to attain the supremum of the dual:", + "bbox": [ + 169, + 714, + 826, + 758 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\max _ {\\omega : \\varphi_ {\\omega} ^ {\\mathbf {P}} \\in \\mathcal {F} _ {d}} \\underset {\\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a)} {\\max _ {\\bar {s} ^ {\\prime} \\sim \\varphi_ {\\iota} (\\cdot | s, a)}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (\\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\prime} \\sim \\bar {\\mathbf {P}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (\\bar {s} ^ {\\prime}).\n$$\n", + "text_format": "latex", + "bbox": [ + 220, + 766, + 774, + 796 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Under this form, optimizing $L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)$ is intractable due to the expectation over the maximum. The following Lemma allows us rewriting $L_{\\mathbf{P}}^{\\xi_{\\pi}}$ to make the optimization tractable through Monte Carlo estimation.", + "bbox": [ + 169, + 805, + 823, + 851 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Lemma A.5. Let $\\mathcal{X},\\mathcal{Y}$ be two measurable sets, $\\xi \\in \\Delta (\\mathcal{X})$ $P\\colon \\mathcal{X}\\to \\Delta (\\mathcal{Y}),Q\\colon \\mathcal{X}\\to \\Delta (\\mathcal{Y})$ , and $d\\colon \\mathcal{Y}\\times \\mathcal{Y}\\rightarrow [0, + \\infty [$ be a metric on $\\mathcal{V}$ . Then,", + "bbox": [ + 169, + 854, + 825, + 886 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {x \\sim \\xi} {\\mathbb {E}} W _ {d} \\left(P (\\cdot \\mid x), Q (\\cdot \\mid x)\\right) = \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {2}) \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 893, + 785, + 929 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Proof. Our objective is to show that", + "bbox": [ + 171, + 104, + 413, + 118 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (y _ {1}) (x) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (y _ {2}) (x) \\right] (6) \\\\ = \\sup _ {\\varphi : \\mathcal {X} \\rightarrow \\mathcal {F} _ {d}} \\mathbb {E} _ {x \\sim \\xi} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) \\left(y _ {1}\\right) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) \\left(y _ {2}\\right)\\right] (7) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 125, + 823, + 200 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We start with $(6) \\leqslant (7)$ . Construct $\\varphi^{\\star} \\colon \\mathcal{X} \\to \\mathcal{F}_d$ by setting for all $x \\in \\mathcal{X}$", + "bbox": [ + 169, + 205, + 648, + 220 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi^{\\star}(x) = \\arg \\sup_{f\\in \\mathcal{F}_{d}}\\underset {y_{1}\\sim P(\\cdot |x)}{\\mathbb{E}}f(y_{1}) - \\underset {y_{2}\\sim Q(\\cdot |x)}{\\mathbb{E}}f(y_{2}).\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 224, + 671, + 251 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This gives us", + "bbox": [ + 171, + 256, + 263, + 271 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\right] \\\\ = \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\right] \\\\ \\leqslant \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\mathbb {E} _ {x \\sim \\xi} \\left[ \\mathbb {E} _ {y _ {1} \\sim P (\\cdot | x)} \\varphi (x) (y _ {1}) - \\mathbb {E} _ {y _ {2} \\sim Q (\\cdot | x)} \\varphi (x) (y _ {2}) \\right]. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 276, + 694, + 390 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "It remains to show that $(6) \\geqslant (7)$ . Take", + "bbox": [ + 171, + 393, + 429, + 409 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi^ {\\star} = \\arg \\operatorname * {s u p} _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {2}) \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 414, + 718, + 448 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Then, for all $x\\in \\mathcal{X}$ we have $\\varphi^{\\star}(x)\\in \\mathcal{F}_d$ which means:", + "bbox": [ + 169, + 453, + 539, + 468 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\\\ \\leqslant \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 474, + 645, + 527 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This finally yields", + "bbox": [ + 171, + 532, + 294, + 547 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\right] \\\\ \\leqslant \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\right]. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 553, + 666, + 630 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/ddd0a968d5f3afaaae13a27c6e3caf51bfd225352e0358b48e17b9095eb06313.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 633, + 823, + 646 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Corollary A.5.1. Let $\\xi_{\\pi}$ be a stationary distribution of $\\mathcal{M}_{\\pi}$ and $\\mathcal{X} = S\\times \\mathcal{A}\\times \\overline{S}\\times \\overline{\\mathcal{A}}$ , then", + "bbox": [ + 169, + 655, + 781, + 671 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} = \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d} _ {\\overline {{S}}}} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\left[ \\varphi (s, a, \\bar {s}, \\bar {a}) \\big (\\phi_ {\\iota} (s ^ {\\prime}) \\big) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, a)} {\\mathbb {E}} \\varphi (s, a, \\bar {s}, \\bar {a}) \\big (\\bar {s} ^ {\\prime} \\big) \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 676, + 800, + 719 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Consequently, we rewrite $L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)$ as a tractable maximization:", + "bbox": [ + 169, + 733, + 583, + 750 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega : \\varphi_ {\\omega} ^ {\\mathbf {P}} \\in \\mathcal {F} _ {d _ {\\bar {g}}}} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi} \\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\underset {s, a, \\bar {a}, \\bar {a} \\sim \\phi_ {\\iota} (s ^ {\\prime})} {\\mathbb {E}} \\left[ \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\phi_ {\\iota} (s ^ {\\prime})) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 755, + 826, + 797 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.6 THE LATENT METRIC", + "text_level": 1, + "bbox": [ + 171, + 811, + 370, + 824 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In the following, we show that considering the Euclidean distance for $\\vec{d}$ and $d_{\\overline{\\mathcal{S}}}$ in the latent space for optimizing the regularizers $\\mathcal{W}_{\\xi_{\\pi}}$ and $L_{\\mathbf{P}}^{\\xi_{\\pi}}$ is Lipschitz equivalent to considering a continuous $\\lambda$ -relaxation of the discrete metric $\\mathbf{1}_{\\neq}(\\pmb{x},\\pmb{y}) = \\mathbf{1}_{x\\neq y}$ . Consequently, this also means it is consistently sufficient to enforce 1-Lipschitzness via the gradient penalty approach of Gulrajani et al. (2017) during training to maintain the guarantees linked to the regularizers in the zero-temperature limit, when the spaces are discrete.", + "bbox": [ + 169, + 835, + 826, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Lemma A.6. Let $d$ be the usual Euclidean distance and $d_{\\lambda} \\colon [0,1]^n \\times [0,1]^n \\to [0,1[$ , $\\langle \\pmb{x}, \\pmb{y} \\rangle \\mapsto \\frac{d(\\pmb{x}, \\pmb{y})}{\\lambda + d(\\pmb{x}, \\pmb{y})}$ for $\\lambda \\in ]0,1]$ and $n \\in \\mathbb{N}$ , then $d_{\\lambda}$ is a distance metric.", + "bbox": [ + 169, + 103, + 823, + 140 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Proof. The function $d_{\\lambda}$ is a metric iff it satisfies the following axioms:", + "bbox": [ + 171, + 152, + 635, + 169 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Identity of indiscernibles: If $\\pmb{x} = \\pmb{y}$ , then $d_{\\lambda}(\\pmb{x}, \\pmb{y}) = \\frac{d(\\pmb{x}, \\pmb{y})}{\\lambda + d(\\pmb{x}, \\pmb{y})} = \\frac{0}{\\lambda + 0} = 0$ since $d$ is a distance metric. Assume now that $d_{\\lambda}(\\pmb{x}, \\pmb{y}) = 0$ and take $\\alpha = d(\\pmb{x}, \\pmb{y})$ , for any $\\pmb{x}, \\pmb{y}$ . Thus, $\\alpha \\in [0, +\\infty[$ and $0 = \\frac{\\alpha}{\\lambda + \\alpha}$ is only achieved in $\\alpha = 0$ , which only occurs whenever $\\pmb{x} = \\pmb{y}$ since $d$ is a distance metric.", + "2. Symmetry:" + ], + "bbox": [ + 210, + 183, + 826, + 272 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) = \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\\\ = \\frac {d (\\boldsymbol {y} , \\boldsymbol {x})}{\\lambda + d (\\boldsymbol {y} , \\boldsymbol {x})} \\quad (d \\text {i s a d i s t a n c e m e t r i c}) \\\\ = d _ {\\lambda} (\\boldsymbol {y}, \\boldsymbol {x}) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 277, + 823, + 363 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "3. Triangle inequality: Let $\\mathbf{x}, \\mathbf{y}, \\mathbf{z} \\in [0,1]^n$ , the triangle inequality holds iff", + "bbox": [ + 209, + 377, + 712, + 393 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) + d _ {\\lambda} (\\boldsymbol {y}, \\boldsymbol {z}) \\geqslant d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {z}) (8) \\\\ \\equiv \\quad \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} + \\frac {d (\\boldsymbol {y} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {y} , \\boldsymbol {z})} \\geqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {z})} \\\\ \\equiv \\quad \\frac {\\lambda d (\\boldsymbol {x} , \\boldsymbol {y}) + \\lambda d (\\boldsymbol {y} , \\boldsymbol {z}) + 2 d (\\boldsymbol {x} , \\boldsymbol {y}) d (\\boldsymbol {y} , \\boldsymbol {z})}{\\lambda^ {2} + \\lambda d (\\boldsymbol {x} , \\boldsymbol {y}) + \\lambda d (\\boldsymbol {y} , \\boldsymbol {z}) + d (\\boldsymbol {x} , \\boldsymbol {y}) d (\\boldsymbol {y} , \\boldsymbol {z})} \\geqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {z})} \\\\ \\equiv \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) + 2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + \\\\ \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) + 2 d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\\\ \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\\\ \\left(\\text {c r o s s - p r o d u c t , w i t h} \\lambda > 0 \\text {a n d} \\operatorname {I m} (d) \\in [ 0, \\infty [\\right) \\\\ \\equiv \\quad \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) + 2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) (9) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 398, + 823, + 602 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Since $d$ is a distance metric, we have", + "bbox": [ + 228, + 609, + 475, + 625 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 405, + 631, + 823, + 648 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "and $\\operatorname {Im}(d)\\in [0,\\infty [$ , meaning", + "bbox": [ + 228, + 655, + 426, + 671 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\geqslant 0 \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 676, + 823, + 694 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "By Eq. 10 and 11, the inequality of Eq. 9 holds. Furthermore, the fact that Eq. 8 and 9 are equivalent yields the result.", + "bbox": [ + 169, + 707, + 826, + 734 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Lemma A.7. Let $d$ , $d_{\\lambda}$ as defined above, then $(i)d_{\\lambda}\\xrightarrow[\\lambda\\to 0]{\\longrightarrow}\\mathbf{1}_{\\neq}$ and (ii) $d,d_{\\lambda}$ are Lipschitz-equivalent.", + "bbox": [ + 169, + 744, + 826, + 765 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Proof. Part (i) is straightforward by definition of $d_{\\lambda}$ . Distances $d$ and $d_{\\lambda}$ are Lipschitz equivalent if and only if $\\exists a, b > 0$ such that $\\forall x, y \\in [0,1]^n$ ,", + "bbox": [ + 169, + 780, + 826, + 809 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} a \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant b \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\\\ \\equiv a \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\leqslant b \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\\\ \\equiv \\quad a \\leqslant \\frac {1}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\leqslant b \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 815, + 637, + 901 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Taking $a = \\frac{1}{\\lambda + \\sqrt{n}}$ and $b = \\frac{1}{\\lambda}$ yields the result.", + "bbox": [ + 169, + 907, + 478, + 928 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Corollary A.7.1. For all $\\beta \\geqslant 1 / \\lambda$ , $s \\in S$ , $a \\in \\mathcal{A}$ , $\\bar{s} \\in \\overline{S}$ , and $\\bar{a} \\in \\overline{\\mathcal{A}}$ , we have", + "bbox": [ + 169, + 102, + 669, + 119 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. $W_{d_{\\lambda}}(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}})\\leqslant \\beta \\cdot W_{d}(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}})$", + "2. $W_{d_{\\lambda}}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot \\mid s,a),\\overline{\\mathbf{P}}_{\\theta}(\\cdot \\mid \\bar{s},\\bar{a})\\right)\\leqslant \\beta \\cdot W_{d}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot \\mid s,a),\\overline{\\mathbf{P}}_{\\theta}(\\cdot \\mid \\bar{s},\\bar{a})\\right)$" + ], + "bbox": [ + 209, + 128, + 694, + 174 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Proof. By Lipschitz equivalence, taking $\\beta \\geqslant 1 / \\lambda$ ensures that $\\forall n\\in \\mathbb{N},\\forall \\pmb {x},\\pmb {y}\\in [0,1]^n,d_\\lambda (\\pmb {x},\\pmb {y})\\leqslant \\beta \\cdot d(\\pmb {x},\\pmb {y})$ . Moreover, for any distributions $P,Q,W_{d_{\\lambda}}(P,Q)\\leqslant \\beta \\cdot W_{d}(P,Q)$ (cf., e.g., Gelada et al. 2019, Lemma A.4 for details).", + "bbox": [ + 169, + 190, + 826, + 233 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In practice, taking the hyperparameter $\\beta \\geqslant 1 / \\lambda$ in the $\\mathrm{W}^2\\mathrm{AE}$ -MDP ensures that minimizing the $\\beta$ -scaled regularizers w.r.t. $d$ also minimizes the regularizers w.r.t. the $\\lambda$ -relaxation $d_{\\lambda}$ , being the discrete distribution in the zero-temperature limit. Note that optimizing over two different $\\beta_{1}, \\beta_{2}$ instead of a unique scale factor $\\beta$ is also a good practice to interpolate between the two regularizers.", + "bbox": [ + 169, + 251, + 826, + 309 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "B EXPERIMENT DETAILS", + "text_level": 1, + "bbox": [ + 171, + 329, + 401, + 345 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The code for conducting and replicating our experiments is available at https://github.com/florentdelgrange/wae_mdp.", + "bbox": [ + 169, + 361, + 826, + 391 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "B.1 SETUP", + "text_level": 1, + "bbox": [ + 171, + 407, + 264, + 422 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We used TENSORFLOW 2.7.0 (Abadi et al., 2015) to implement the neural network architecture of our W $^2$ AE-MDP, TENSORFLOW PROBABILITY 0.15.0 (Dillon et al., 2017) to handle the probabilistic components of the latent model (e.g., latent distributions with reparameterization tricks, masked autoregressive flows, etc.), as well as TF-AGENTS 0.11.0 (Guadarrama et al., 2018) to handle the RL parts of the framework.", + "bbox": [ + 169, + 433, + 826, + 503 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Models have been trained on a cluster running under CentOS Linux 7 (Core) composed of a mix of nodes containing Intel processors with the following CPU microarchitectures: (i) 10-core INTEL E5-2680v2, (ii) 14-core INTEL E5-2680v4, and (iii) 20-core INTEL Xeon Gold 6148. We used 8 cores and 32 GB of memory for each run.", + "bbox": [ + 169, + 511, + 825, + 568 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "B.2 STATIONARY DISTRIBUTION", + "text_level": 1, + "bbox": [ + 171, + 585, + 415, + 599 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To sample from the stationary distribution $\\xi_{\\pi}$ of episodic learning environments operating under $\\pi \\in \\Pi$ , we implemented the recursive $\\epsilon$ -perturbation trick of Huang (2020). In a nutshell, the reset of the environment is explicitly added to the state space of $\\mathcal{M}$ , which is entered at the end of each episode and left with probability $1 - \\epsilon$ to start a new one. We also added a special atomic proposition reset into $\\mathbf{AP}$ to label this reset state and reason about episodic behaviors. For instance, this allows verifying whether the agent behaves safely during the entire episode, or if it is able to reach a goal before the end of the episode.", + "bbox": [ + 169, + 611, + 826, + 709 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "B.3 ENVIRONMENTS WITH INITIAL DISTRIBUTION", + "text_level": 1, + "bbox": [ + 171, + 727, + 535, + 741 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Many environments do not necessarily have a single initial state, but rather an initial distribution over states $d_I \\in \\Delta(S)$ . In that case, the results presented in this paper remain unchanged: it suffices to add a dummy state $s^\\star$ to the state space $S \\cup \\{s^\\star\\}$ so that $s_I = s^\\star$ with the transition dynamics $\\mathbf{P}(s' \\mid s^\\star, a) = d_I(s')$ for any action $a \\in \\mathcal{A}$ . Therefore, each time the reset of the environment is triggered, we make the MDP entering the initial state $s^\\star$ , then transitioning to $s'$ according to $d_I$ .", + "bbox": [ + 169, + 753, + 825, + 824 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "B.4 LATENT SPACE DISTRIBUTION", + "text_level": 1, + "bbox": [ + 171, + 840, + 426, + 854 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "As pointed out in Sect. 4, posterior collapse is naturally avoided when optimizing $\\mathrm{W}^2\\mathrm{AE}$ -MDP. To illustrate that, we report the distribution of latent states produced by $\\phi_{\\iota}$ during training (Fig. 5). The plots reveal that the latent space generated by mapping original states drawn from $\\xi_{\\pi}$ during training to $\\bar{S}$ via $\\phi_{\\iota}$ is fairly distributed, for each environment.", + "bbox": [ + 169, + 867, + 825, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/f9b11e840e926f6a557915662bc20036b7684f609694f4f1a267709f4ba6d4c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 187, + 104, + 388, + 226 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/8de01355e219f78cb7b403d44ffb0758b1492b29fc6e25149cb3d30dff48d553.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 107, + 599, + 227 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/c3d02556c539def1c1129ec5b832efd5ae42a21e70f72d962abf4fb2ae673ff8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 108, + 810, + 227 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/a076bd8afdd0ce5bc0a31146c4232995dc4a7ad674884a472320e5eb16858cc0.jpg", + "image_caption": [ + "Figure 5: Latent space distribution along training steps. The intensity of the blue hue corresponds to the frequency of latent states produced by $\\phi_{\\ell}$ during training." + ], + "image_footnote": [], + "bbox": [ + 295, + 234, + 495, + 349 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/d32d8c2a325754534be9499ca240d18de6ba6c2edff8a225444843df1390755c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 234, + 700, + 349 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/76961331004e819990562a69015ed71c0e400d697121e59f354e6d0f7a022e93.jpg", + "image_caption": [ + "Figure 6: Absolute value difference $\\| V_{\\bar{\\pi}_{\\theta}}\\|$ reported along training steps." + ], + "image_footnote": [], + "bbox": [ + 173, + 407, + 315, + 501 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/18694fdb7e5dddee82b29a6cd1e4a68cd5b89af0c4c965d16530ef2ebdef5271.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 407, + 439, + 500 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e106eabe078beab95b972a69189af22111abb78e6639e9af76f8e4f5cd5f62c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 442, + 409, + 566, + 500 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/3777b6eee1c8d9c394343378f40ac5aa2095fb85a998828f9198189142f87869.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 409, + 691, + 500 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/975397e4c6281b932be427001e50d427d0925f0174224f6aa25b3fca5d334927.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 696, + 409, + 821, + 501 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "B.5 DISTANCE METRICS: STATE, ACTION, AND REWARD RECONSTRUCTION", + "text_level": 1, + "bbox": [ + 171, + 555, + 710, + 569 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The choice of the distance functions $d_{\\mathcal{S}}$ , $d_{\\mathcal{A}}$ , and $d_{\\mathcal{R}}$ , plays a role in the success of our approach. The usual Euclidean distance is often a good choice for all the transition components, but the scale, dimensionality, and nature of the inputs sometimes require using scaled, normalized, or other kinds of distances to allow the network to reconstruct each component. While we did not observe such requirements in our experiments (where we simply used the Euclidean distance), high dimensional observations (e.g., images) are an example of data which could require tuning the state-distance function in such a way, to make sure that the optimization of the reward or action reconstruction will not be disfavored compared to that of the states.", + "bbox": [ + 169, + 580, + 826, + 693 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "B.6 VALUE DIFFERENCE", + "text_level": 1, + "bbox": [ + 171, + 710, + 357, + 724 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In addition to reporting the quality guarantees of the model along training steps through local losses (cf. Figure 4b), our experiments revealed that the absolute value difference $\\| V_{\\overline{\\pi}_{\\theta}}\\|$ between the original and latent models operating under the latent policy quickly decreases and tends to converge to values in the same range (Figure 6). This is consistent with the fact that minimizing local losses lead to close behaviors (cf. Eq. 1) and that the value function is Lipschitz-continuous w.r.t. $\\widetilde{d}_{\\overline{\\pi}_{\\theta}}$ (cf. Section 2).", + "bbox": [ + 169, + 737, + 823, + 810 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "B.7 REMARK ON FORMAL VERIFICATION", + "text_level": 1, + "bbox": [ + 171, + 828, + 472, + 840 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Recall that our bisimulation guarantees come by construction of the latent space. Essentially, our learning algorithm spits out a distilled policy and a latent state space which already yields a guaranteed bisimulation distance between the original MDP and the latent MDP. This is the crux of how we enable verification techniques like model checking. In particular, bisimulation guarantees mean that reachability probabilities in the latent MDP compared to those in the original one are close.", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Furthermore, the value difference of (omega-regular) properties (formulated through mu-calculus) obtained in the two models is bounded by this distance (cf. Sect. 2 and Chatterjee et al. 2010).", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Reachability is the key ingredient to model-check MDPs. Model-checking properties is in most cases performed by reduction to the reachability of components or regions of the MDP: it either consists of (i) iteratively checking the reachability of the parts of the state space satisfying path formulae that comprise the specification, through a tree-like decomposition of the latter (e.g., for (P,R-)CTL properties, cf. Baier & Katoen 2008), or (ii) checking the reachability to the part of the state space of a product of the MDP with a memory structure or an automaton that embeds the omega-regular property — e.g., for LTL (Baier et al., 2016; Sickert et al., 2016), LTLf (Wells et al., 2020), or GLTL (Littman et al., 2017), among other specification formalisms. The choice of specification formalism is up to the user and depends on the case study. The scope of this work is focusing on learning to distill RL policies with bisimulation guarantees so that model checking can be applied, in order to reason about the behaviors of the agent. That being said, reachability is all we need to show that model checking can be applied.", + "bbox": [ + 169, + 143, + 826, + 310 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "B.8 HYPERPARAMETERS", + "text_level": 1, + "bbox": [ + 171, + 329, + 361, + 343 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "$\\mathbf{W}^2\\mathbf{AE}$ -MDP parameters. All components (e.g., functions or distribution locations and scales, see Fig. 2) are represented and inferred by neural networks (multilayer perceptrons). All the networks share the same architecture (i.e., number of layers and neurons per layer). We use a simple uniform experience replay of size $10^{6}$ to store the transitions and sample them. The training starts when the agent has collected $10^{4}$ transitions in $\\mathcal{M}$ . We used minibatches of size 128 to optimize the objective and we applied a minibatch update every time the agent executing $\\pi$ has performed 16 steps in $\\mathcal{M}$ . We use the recursive $\\epsilon$ -perturbation trick of Huang (2020) with $\\epsilon = 3/4$ : when an episode ends, it restarts from the initial state with probability $1/4$ ; before re-starting an episode, the time spent in the reset state labeled with reset follows then the geometric distribution with expectation $\\epsilon/1 - \\epsilon = 3$ . We chose the same latent state-action space size than Delgrange et al. (2022), except for LunarLander that we decreased to $\\log_2|\\bar{S}| = 14$ and $|\\bar{\\mathcal{A}}| = 3$ to improve the scalability of the verification.", + "bbox": [ + 169, + 359, + 826, + 515 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "VAE-MDPs parameters. For the comparison of Sect. 4, we used the exact same VAE-MDP hyperparameter set as prescribed by Delgrange et al. (2022), except for the state-action space of LunarLander that we also changed for scalability and fair comparison purpose.", + "bbox": [ + 169, + 523, + 826, + 568 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Hyperparameter search. To evaluate our $\\mathrm{W}^2\\mathrm{AE}$ -MDP, we realized a search in the parameter space defined in Table 2. The best parameters found (in terms of trade-off between performance and latent quality) are reported in Table 3. We used two different optimizers for minimizing the loss (referred to as the minimizer) and computing the Wasserstein terms (referred to as the maximizer). We used ADAM (Kingma & Ba, 2015) for the two, but we allow for different learning rates $\\mathrm{ADAM}_{\\alpha}$ and exponential decays $\\mathrm{ADAM}_{\\beta_1}$ , $\\mathrm{ADAM}_{\\beta_2}$ . We also found that polynomial decay for $\\mathrm{ADAM}_{\\alpha}$ (e.g., to $10^{-5}$ for $4 \\cdot 10^{5}$ steps) is a good practice to stabilize the experiment learning curves, but is not necessary to obtain high-quality and performing distillation. Concerning the continuous relaxation of discrete distributions, we used a different temperature for each distribution, as Maddison et al. (2017) pointed out that doing so is valuable to improve the results. We further followed the guidelines of Maddison et al. (2017) to choose the interval of temperatures and did not schedule any annealing scheme (in contrast to VAE-MDPs). Essentially, the search reveals that the regularizer scale factors $\\beta$ . (defining the optimization direction) as well as the encoder and latent transition temperatures are important to improve the performance of distilled policies. For the encoder temperature, we found a nice spot in $\\lambda_{\\phi_\\varepsilon} = 2/3$ , which provides the best performance in general, whereas the choice of $\\lambda_{\\overline{\\mathbb{P}}_\\theta}$ and $\\beta$ : are (latent-) environment dependent. The importance of the temperature parameters for the continuous relaxation of discrete distributions is consistent with the results of (Maddison et al., 2017), revealing that the success of the relaxation depends on the choice of the temperature for the different latent space sizes.", + "bbox": [ + 169, + 579, + 826, + 843 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Labeling functions. We used the same labeling functions as those described by Delgrange et al. (2022). For completeness, we recall the labeling function used for each environment in Table 4.", + "bbox": [ + 169, + 854, + 826, + 883 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_footnote", + "text": "2The code for conducting the VAE-MDPs experiments is available at https://github.com/ florentdelgrange/vae_mdp (GNU General Public License v3.0).", + "bbox": [ + 171, + 896, + 823, + 924 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/c8077c6228f8b6d307808cb89e7805b4342533e4cd0afc26097c7cdd72bc3597.jpg", + "table_caption": [ + "Table 2: Hyperparameter search. ${\\lambda }_{X}$ refers to the temperature used for ${\\mathrm{W}}^{2}\\mathrm{{AE}}$ -MDP component $X$ ." + ], + "table_footnote": [], + "table_body": "
ParameterRange
ADAMα (minimizer){0.0001,0.0002,0.0003,0.001}
ADAMα (maximizer){0.0001,0.0002,0.0003,0.001}
ADAMβ1{0,0.5,0.9}
ADAMβ2{0.9,0.999}
neurons per layer{64,128,256,512}
number of hidden layers{1,2,3}
activation{ReLU,LeakyReLU,tanh,softplus(2x+2)/2-1(smooth ELU)}
βwξπ{10,25,50,75,100}
βLξπ{10,25,50,75,100}
m{5,10,15,20}
δ{10,20}
use ε-mimic (cf. Delgrange et al. 2022){True,False} (if True, a decay rate of 10-5is used)
λPθ{0.1,1/3,1/2,2/3,3/5,0.99}
λφl{0.1,1/3,1/2,2/3,3/5,0.99}
λπθ{1/|A|-1,1/(|A|-1).15}
λφlA{1/|A|-1,1/(|A|-1).15}
", + "bbox": [ + 173, + 189, + 823, + 440 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/adfc48efc581154e4267d0056f6bbe8723b4996827a4634b9850cd1c9e2d0808.jpg", + "table_caption": [ + "Table 3: Final hyperparameters used to evaluate ${\\mathrm{W}}^{2}\\mathrm{{AE}}$ -MDPs in Sect. 4" + ], + "table_footnote": [], + "table_body": "
CartPoleMountainCarAcrobotLunarLanderPendulum
log2|S|910131413
|A|2 = |A|2 = |A|3 = |A|33
activationtanhReLULeaky ReluReLUReLU
layers[64, 64, 64][512, 512][512, 512][256][256, 256, 256]
ADAMα (minimizer)0.00020.00010.00020.00030.0003
ADAMα (maximizer)0.00020.00010.00010.00030.0003
ADAMβ10.50000.5
ADAMβ20.9990.9990.9990.9990.999
βLξπ1025105025
βWξπ751001010025
m52020155
δ2010202010
ε00000.5
λPθ1/31/30.10.752/3
λφi1/32/32/32/32/3
λπθ2/31/30.50.50.5
λφiA///1/31/3
", + "bbox": [ + 173, + 598, + 821, + 859 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/e94801594180b591b6249b492680ab69acf93c3c35dfe6059c5b868094ed9570.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
EnvironmentS⊆Description, for s ∈ Sℓ(s) = <p1, ..., pn, preset>
CartPoleR4• s1: cart position\n• s2: cart velocity\n• s3: pole angle (rad)\n• s4: pole velocity at tip• p1 = 1s1≥1.5: unsafe cart position\n• p2 = 1s3≥0.15: unsafe pole angle
MountainCarR2• s1: position\n• s2: velocity• p1 = 1s1>1.5: target position\n• p2 = 1s1≥-1/2: right-hand side of the mountain\n• p3 = 1s2≥0: car going forward
AcrobotR6Let θ1, θ2 ∈ [0, 2π] be the angles of the two rotational joints,\n• s1 = cos(θ1)\n• s2 = sin(θ1)\n• s3 = cos(θ2)\n• s4 = sin(θ2)\n• s5: angular velocity 1\n• s6: angular velocity 2• p1 = 1-s1-s3·s1+s4·s2>1: RL agent target\n• p2 = 1s1≥0: θ1 ∈ [0, π/2] ∪ [3π/2, 2π]\n• p3 = 1s2≥0: θ1 ∈ [0, π]\n• p4 = 1s3≥0: θ2 ∈ [0, π/2] ∪ [3π/2, 2π]\n• p5 = 1s4≥0: θ2 ∈ [0, π]\n• p6 = 1s5≥0: positive angular velocity (1)\n• p7 = 1s6≥0: positive angular velocity (2)
PendulumR3Let θ ∈ [0, 2π] be the joint angle\n• s1 = cos(θ)\n• s2 = sin(θ)\n• s3: angular velocity• p1 = 1s1≥cos(π/3): safe joint angle\n• p2 = 1s1≥0: θ ∈ [0, π/2] ∪ [3π/2, 2π]\n• p3 = 1s2≥0: θ ∈ [0, π]\n• p4 = 1s3≥0: positive angular velocity
LunarLanderR8• s1: horizontal coordinates\n• s2: vertical coordinates\n• s3: horizontal speed\n• s4: vertical speed\n• s5: ship angle\n• s6: angular speed\n• s7: left leg contact\n• s8: right leg contact• p1: unsafe angle\n• p2: leg ground contact\n• p3: lands rapidly\n• p4: left inclination\n• p5: right inclination\n• p6: motors shut down
", + "bbox": [ + 173, + 101, + 823, + 481 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 4: Labeling functions for the OpenAI environments considered in our experiments (Delgrange et al., 2022). We provide a short description of the state space and the meaning of each atomic proposition. Recall that labels are binary encoded, for $n = |\\mathbf{AP}| - 1$ (one bit is reserved for reset) and $p_{\\mathrm{reset}} = 1$ iff $s$ is a reset state (cf. Appendix B.2).", + "bbox": [ + 169, + 489, + 826, + 547 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Time to failure properties. Based on the labeling described in Table 4, we formally detail the time to failure properties checked in Sect. 4 whose results are listed in Table 1 for each environment. Let $\\text{Reset} = \\{\\text{reset}\\} = \\langle 0, \\dots, 1 \\rangle$ (we assume here that the last bit indicates whether the current state is a reset state or not) and define $s \\models \\mathsf{L}_1 \\land \\mathsf{L}_2$ iff $s \\models \\mathsf{L}_1$ and $s \\models \\mathsf{L}_2$ for any $s \\in S$ , then", + "bbox": [ + 169, + 571, + 823, + 630 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- CartPole: $\\varphi = \\neg$ Reset $\\mathcal{U}$ Unsafe, where Unsafe $= \\langle 1,1,0\\rangle$", + "- MountainCar: $\\varphi = \\neg$ GoalU Reset, where Goal $= \\langle 1,0,0,0\\rangle$", + "- Acrobot: $\\varphi = \\neg$ GoalU Reset, where Goal $= \\langle 1,0,\\dots ,0\\rangle$", + "- LunarLander: $\\varphi = \\neg$ SafeLanding $\\mathcal{U}$ Reset, where SafeLanding $=$ GroundContact $\\land$ MotorsOff, GroundContact $=\\langle 0,1,0,0,0,0,0\\rangle$ , and MotorsOff $=\\langle 0,0,0,0,0,1,0\\rangle$", + "- Pendulum: $\\varphi = \\diamondsuit (\\neg \\text{Safe} \\land \\bigcirc \\text{Reset})$ , where Safe = $\\langle 1,0,0,0,0\\rangle$ , $\\diamondsuit \\mathsf{T} = \\neg \\emptyset \\mathcal{U} \\mathsf{T}$ , and $s_i \\models \\bigcirc \\mathsf{T}$ iff $s_{i+1} \\models \\mathsf{T}$ , for any $\\mathsf{T} \\subseteq \\mathbf{AP}$ , $s_{i:\\infty}, a_{i:\\infty} \\in \\text{Traj}$ . Intuitively, $\\varphi$ denotes the event of ending an episode in an unsafe state, just before resetting the environment, which means that either the agent never reached the safe region or it reached and left it at some point. Formally, $\\varphi = \\{s_{0:\\infty}, a_{0:\\infty} \\mid \\exists i \\in \\mathbb{N}, s_i \\models \\text{Safe} \\land s_{i+1} \\models \\text{Reset}\\} \\subseteq \\text{Traj}$ ." + ], + "bbox": [ + 215, + 640, + 823, + 800 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "C ON THE CURSE OF VARIATIONAL MODELING", + "text_level": 1, + "bbox": [ + 171, + 823, + 584, + 838 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Posterior collapse is a well known issue occurring in variational models (see, e.g., Alemi et al. 2018; Tolstikhin et al. 2018; He et al. 2019; Dong et al. 2020) which intuitively results in a degenerate local optimum where the model learns to ignore the latent space and use only the reconstruction functions (i.e., the decoding distribution) to optimize the objective. VAE-MDPs are no exception, as pointed out in the original paper (Delgrange et al., 2022, Section 4.3 and Appendix C.2).", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/85a29476b5c38b313eac6a809b2a1d01b774ebfb0476120102db2a2c5df884c2.jpg", + "image_caption": [ + "(a) Latent space distribution along training steps. The intensity of the blue hue corresponds to the frequency of latent states produced from $\\phi_{\\iota}$ during training. The vanilla model collapses to a single state." + ], + "image_footnote": [], + "bbox": [ + 178, + 104, + 367, + 218 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/5e1e6827c55e71f4a031d936a287450b1b4acbb5c82f45c46029490dc63a095b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 375, + 103, + 563, + 218 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/07df2d4f32a0d23b07beeedbfc742076773ffc890df1b7262e13683eef5f714b.jpg", + "image_caption": [ + "(b) Rate of the variational model." + ], + "image_footnote": [], + "bbox": [ + 575, + 128, + 803, + 248 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/8ddee2b133ea3f2a9f0eaf010868ff0e9ac0b02a43b19636f55ecd3f5cabea1e.jpg", + "image_caption": [ + "(c) Distortion of the variational model." + ], + "image_footnote": [], + "bbox": [ + 173, + 291, + 367, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/aab5da58b05380464cfc1a5680fc87d6b66a66e0017fa4a7b8f0b83a44a66b16.jpg", + "image_caption": [ + "(d) Average point-wise entropy of $\\phi_{\\iota}(\\cdot \\mid s)$ , for $s \\in S$ drawn from the interaction with the original environment." + ], + "image_footnote": [], + "bbox": [ + 403, + 291, + 589, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/14909e47b3550db438ea0715e7f9590574bf78942418fe291acc608c902c230a.jpg", + "image_caption": [ + "(e) Performance of the resulting distilled policy $\\bar{\\pi}_{\\theta}$ when deployed in the original environment (averaged over 30 episodes).", + "Figure 7: Comparison of the VAE-MDP in the CartPole environment (i) when the distortion and the rate are minimized as is (vanilla model) and (ii) when it makes use of annealing schemes, entropy regularization, and prioritized experience replay to avoid posterior collapse (cf. Delgrange et al. 2022). While the former clearly fails to learn a useful latent representation, the later does so meticulously and smoothly in two distinguishable phases: first, $\\phi_{\\iota}$ focuses on fairly distributing the latent space, setting up the stage to the concrete optimization occurring from step $4\\cdot 10^{5}$ , where the entropy of $\\phi_{\\iota}$ is lowered, which allows to get the rate of the variational model away from zero. Five instances of the models are trained with different random seeds, with the same hyperparameters than in Sect. 4." + ], + "image_footnote": [], + "bbox": [ + 622, + 292, + 805, + 386 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Formally, VAE- and WAE-MDPs optimize their objective by minimizing two losses: a reconstruction cost plus a regularizer term which penalizes a discrepancy between the encoding distribution and the dynamics of the latent space model. In VAE-MDPs, the former corresponds to the distortion, and the later to the rate of the variational model (further details are given in Alemi et al. 2018; Delgrange et al. 2022), while in our WAE-MDPs, the former corresponds to the raw transition distance and the later to both the steady-state and transition regularizers. Notably, the rate minimization of VAE-MDPs involves regularizing a stochastic embedding function $\\phi_{\\iota}(\\cdot | s)$ point-wise, i.e., for all different input states $s \\in S$ drawn from the interaction with the original environment. In contrast, the latent space regularization of the WAE-MDP involves the marginal embedding distribution $Q_{\\iota}$ where the embedding function $\\phi_{\\iota}$ is not required to be stochastic. Alemi et al. (2018) showed that posterior collapse occurs in VAEs when the rate of the variational model is close to zero, leading to low-quality representation.", + "bbox": [ + 169, + 595, + 826, + 763 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Posterior collapse in VAE-MDPs. We illustrate the sensitivity of VAE-MDPs to the posterior collapse problem in Fig. 7, through the CartPole environment3: minimizing the distortion and the rate as is yields an embedding function which maps deterministically every input state to the same sink latent state (cf. Fig. 7a). Precisely, there is a latent state $\\bar{s} \\in \\bar{S}$ so that $\\phi_{\\nu}(\\bar{s} \\mid s) \\approx 1$ and $\\overline{\\mathbf{P}}_{\\theta}(\\bar{s} \\mid \\bar{s}, \\bar{a}) \\approx 1$ whatever the state $s \\in S$ and action $\\bar{a} \\in \\overline{A}$ . This is a form of posterior collapse, the resulting rate quickly drops to zero (cf. Fig 7b), and the resulting latent representation yields no information at all. This phenomenon is handled in VAE-MDPs by using (i) prioritized replay buffers that allow to focus on inputs that led to bad representation, and (ii) modifying the objective", + "bbox": [ + 169, + 772, + 826, + 887 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_footnote", + "text": "3In fact, the phenomenon of collapsing to few state occurs for all the environments considered in this paper when their prioritized experience replay is not used, as illustrated in Delgrange et al., 2022, Appendix C.2.", + "bbox": [ + 169, + 897, + 823, + 925 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "function for learning the latent space model — the so-called evidence lower bound (Hoffman et al., 2013; Kingma & Welling, 2014), or ELBO for short — and set up annealing schemes to eventually recover the ELBO at the end of the training process. Consequently, the resulting learning procedure focuses primarily on fairly distributing the latent space, to avoid it to collapse to a single latent state, to the detriment of learning the dynamics of the environment and the distillation of the RL policy. Then, the annealing scheme allows to make the model learn to finally smoothly use the latent space to maximize the ELBO, and achieve consequently a lower distortion at the \"price\" of a higher rate.", + "bbox": [ + 169, + 103, + 826, + 203 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Impact of the resulting learning procedure. The aforementioned annealing process, used to avoid that every state collapses to the same representation, possibly induces a high entropy embedding function (Fig. 7d), which further complicates the learning of the model dynamics and the distillation in the first stage of the training process. In fact, in this particular case, one can observe that the entropy reaches its maximal value, which yields a fully random state embedding function. Recall that the VAE-MDP latent space is learned through independent Bernoulli distributions. Fig. 7d reports values centered around 4.188 in the first training phase, which corresponds to the entropy of the state embedding function when $\\phi_{\\iota}(\\cdot |s)$ is uniformly distributed over $\\bar{S}$ for any state $s\\in S$ .. $H(\\phi_{\\iota}(\\cdot |s)) = \\sum_{i = 0}^{\\log_2|\\bar{S}| - |\\mathbf{AP}| = 6} - p_i\\log p_i - (1 - p_i)\\log (1 - p_i) = 4.188,$ where $p_i = 1 / 2$ for all $i.$ The rate (Fig. 7b) drops to zero since the divergence pulls the latent dynamics towards this high entropy (yet another form of posterior collapse), which hinders the latent space model to learn a useful representation. However, the annealing scheme increases the rate importance along training steps, which enables the optimization to eventually leave this local optimum (here around $4\\cdot 10^{5}$ training steps). This allows the learning procedure to leave the zero-rate spot, reduce the distortion (Fig. 7c), and finally distill the original policy (Fig. 7e).", + "bbox": [ + 169, + 210, + 826, + 429 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "As a result, the whole engineering required to mitigate posterior collapse slows down the training procedure. This phenomenon is reflected in Fig. 4: VAE-MDPs need several steps to stabilize and set up the stage to the concrete optimization, whereas WAE-MDPs have no such requirements since they naturally do not suffer from collapsing issues (cf. Fig. 5), and are consequently faster to train.", + "bbox": [ + 169, + 434, + 823, + 492 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Lack of representation guarantees. On the theoretical side, since VAE-MDPs are optimized via the ELBO and the local losses via the related variational proxies, VAE-MDPs do not leverage the representation quality guarantees induced by local losses (Eq. 1) during the learning procedure (as explicitly pointed out by Delgrange et al., 2022, Sect. 4.1.): in contrast to WAE-MDPs, when two original states are embedded to the same latent, abstract state, the former are not guaranteed to be bisimilarly close (i.e., the agent is not guaranteed to behave the same way from those two states by executing the policy), meaning those proxies do not prevent original states having distant values collapsing together to the same latent representation.", + "bbox": [ + 169, + 501, + 823, + 613 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "INDEX OF NOTATIONS", + "text_level": 1, + "bbox": [ + 171, + 633, + 359, + 648 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\mathbf{1}_{[cond]}$ indicator function: 1 if the statement [cond] is true, and 0 otherwise", + "bbox": [ + 173, + 679, + 673, + 695 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\mathcal{F}_d$ Set of 1-Lipschitz functions w.r.t. the distance metric $d$", + "bbox": [ + 173, + 698, + 589, + 713 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\sigma$ Sigmoid function, with $\\sigma (x) = 1 / 1 + \\exp (-x)$", + "bbox": [ + 173, + 718, + 514, + 733 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$f_{\\theta}$ A function $f_{\\theta} \\colon \\mathcal{X} \\to \\mathbb{R}$ modeled by a neural network, parameterized by $\\theta$ , where $\\mathcal{X}$ is any measurable set", + "bbox": [ + 173, + 737, + 823, + 763 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Latent Space Model", + "text_level": 1, + "bbox": [ + 171, + 770, + 316, + 784 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\overline{\\mathcal{M}} = \\langle \\overline{S}, \\overline{\\mathcal{A}}, \\overline{\\mathbf{P}}, \\overline{\\mathcal{R}}, \\bar{\\ell}, \\mathbf{AP}, \\bar{s}_I \\rangle$ Latent MDP with state space $\\overline{S}$ , action space $\\overline{\\mathcal{A}}$ , reward function $\\overline{\\mathcal{R}}$ , labeling function $\\bar{\\ell}$ , atomic proposition space $\\mathbf{AP}$ , and initial state $\\bar{s}_I$ .", + "bbox": [ + 173, + 787, + 826, + 821 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\langle \\overline{\\mathcal{M}},\\phi ,\\psi \\rangle$ Latent space model of $\\mathcal{M}$", + "bbox": [ + 173, + 824, + 424, + 840 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\bar{a}$ Latent action in $\\overline{\\mathcal{A}}$", + "bbox": [ + 173, + 845, + 352, + 861 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\bar{\\pi}$ Latent policy $\\bar{\\pi}:\\bar{S}\\to \\mathcal{A}$ ; can be executed in $\\mathcal{M}$ via $\\phi$ : $\\bar{\\pi} (\\cdot \\mid \\phi (s))$", + "bbox": [ + 173, + 866, + 661, + 883 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$d_{\\overline{S}}$ Distance metric over $\\bar{S}$", + "bbox": [ + 173, + 887, + 382, + 902 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\phi$ (20 State embedding function, from $\\mathcal{S}$ to $\\overline{\\mathcal{S}}$", + "bbox": [ + 173, + 907, + 486, + 922 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "$\\psi$ Action embedding function, from $\\overline{S}\\times \\overline{A}$ to $\\mathcal{A}$", + "bbox": [ + 169, + 102, + 532, + 119 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\phi \\mathbf{P}$ Distribution of drawing $s^\\prime \\sim \\mathbf{P}(\\cdot \\mid s,a)$ , then embedding $\\bar{s}^{\\prime} = \\phi (s^{\\prime})$ , for any state $s\\in S$ and action $a\\in \\mathcal{A}$", + "bbox": [ + 169, + 121, + 825, + 150 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$L_{\\mathcal{R}}^{\\xi}$ Local reward loss under distribution $\\xi$", + "bbox": [ + 173, + 152, + 478, + 172 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$L_{\\mathbf{P}}^{\\xi}$ Local transition loss under distribution $\\xi$", + "bbox": [ + 173, + 174, + 495, + 194 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\overline{\\Pi}$ Set of (memoryless) latent policies", + "bbox": [ + 173, + 196, + 459, + 214 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\bar{s}$ Latent state in $\\bar{S}$", + "bbox": [ + 173, + 215, + 339, + 231 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\overrightarrow{V_{\\pi}}$ Latent value function", + "bbox": [ + 173, + 234, + 370, + 252 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Markov Decision Processes", + "text_level": 1, + "bbox": [ + 169, + 255, + 366, + 268 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\mathcal{M} = \\langle S, \\mathcal{A}, \\mathbf{P}, \\mathcal{R}, \\ell, \\mathbf{AP}, s_I \\rangle$ MDP $\\mathcal{M}$ with state space $S$ , action space $\\mathcal{A}$ , transition function $\\mathbf{P}$ , labeling function $\\ell$ , atomic proposition space $\\mathbf{AP}$ , and initial state $s_I$ .", + "bbox": [ + 169, + 272, + 826, + 304 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$a$ Action in $\\mathcal{A}$", + "bbox": [ + 173, + 306, + 310, + 321 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\widetilde{d}_{\\pi}$ Bisimulation pseudometric", + "bbox": [ + 173, + 323, + 406, + 340 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\gamma$ Discount factor in [0, 1]", + "bbox": [ + 173, + 343, + 387, + 361 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$d_{\\mathcal{A}}$ Metric over the action space", + "bbox": [ + 173, + 363, + 415, + 378 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$d_{\\mathcal{R}}$ Metric over $\\operatorname {Im}(\\mathcal{R})$", + "bbox": [ + 173, + 381, + 356, + 397 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$d_{\\mathcal{S}}$ Metric over the state space", + "bbox": [ + 173, + 401, + 405, + 417 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\xi_{\\pi}^{t}$ Limiting distribution of the MDP defined as $\\xi_{\\pi}^{t}(s^{\\prime}\\mid s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_{s}}\\left(\\left\\{s_{0:\\infty},a_{0:\\infty}\\mid s_{t} = s^{\\prime}\\right\\}\\right)$ , for any source state $s\\in S$", + "bbox": [ + 173, + 419, + 825, + 450 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "II Set of memoryless policies of $\\mathcal{M}$", + "bbox": [ + 173, + 452, + 449, + 469 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\pi$ (204 Memoryless policy $\\pi \\colon S\\to \\Delta (\\mathcal{A})$", + "bbox": [ + 173, + 472, + 455, + 488 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\mathbb{P}_{\\pi}^{\\mathcal{M}}$ Unique probability measure induced by the policy $\\pi$ in $\\mathcal{M}$ on the Borel $\\sigma$ -algebra over measurable subsets of $Traj$", + "bbox": [ + 173, + 491, + 825, + 523 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "CUT Constrained reachability event", + "bbox": [ + 173, + 525, + 429, + 541 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\mathcal{M}_s$ MDP obtained by replacing the initial state of $\\mathcal{M}$ by $s\\in S$", + "bbox": [ + 173, + 544, + 614, + 561 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$s$ (20 State in $\\mathcal{S}$", + "bbox": [ + 173, + 564, + 295, + 578 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\xi_{\\pi}$ Stationary distribution of $\\mathcal{M}$ induced by the policy $\\pi$", + "bbox": [ + 173, + 582, + 576, + 599 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\vec{d}$ Raw transition distance, i.e., metric over $\\mathcal{S} \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times \\mathcal{S}$", + "bbox": [ + 173, + 602, + 638, + 619 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Traj Set of infinite trajectories of $\\mathcal{M}$", + "bbox": [ + 173, + 623, + 437, + 638 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\tau = \\langle s_{0:T}, a_{0:T-1} \\rangle$ Trajectory", + "bbox": [ + 173, + 642, + 370, + 657 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$V_{\\pi}$ Value function for the policy $\\pi$", + "bbox": [ + 173, + 679, + 431, + 695 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Probability / Measure Theory", + "text_level": 1, + "bbox": [ + 173, + 698, + 382, + 714 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$D$ Discrepancy measure; $D(P,Q)$ is the discrepancy between distributions $P,Q\\in \\Delta (\\mathcal{X})$", + "bbox": [ + 173, + 717, + 792, + 733 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\Delta (\\mathcal{X})$ Set of measures over a complete, separable metric space $\\mathcal{X}$", + "bbox": [ + 173, + 736, + 614, + 752 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Logistic $(\\mu, s)$ Logistic distribution with location parameter $\\mu$ and scale parameter $s$", + "bbox": [ + 173, + 755, + 727, + 771 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$W_{d}$ Wasserstein distance w.r.t. the metric $d$ ; $W_{d}(P,Q)$ is the Wasserstein distance between distributions $P, Q \\in \\Delta(\\mathcal{X})$", + "bbox": [ + 173, + 773, + 823, + 804 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Wasserstein Auto-encoded MDP", + "text_level": 1, + "bbox": [ + 173, + 806, + 401, + 820 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\xi_{\\theta}$ Behavioral model: distribution over $\\mathcal{S} \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times \\mathcal{S}$", + "bbox": [ + 173, + 844, + 609, + 861 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$G_{\\theta}$ Mapping $\\langle \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\rangle \\mapsto \\langle \\mathcal{G}_{\\theta}(\\bar{s}),\\psi_{\\theta}(\\bar{s},\\bar{a}),\\overline{\\mathcal{R}}_{\\theta}(\\bar{s},\\bar{a}),\\mathcal{G}_{\\theta}(\\bar{s}^{\\prime})\\rangle$", + "bbox": [ + 173, + 864, + 604, + 882 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\phi_{\\iota}^{A}$ Action encoder mapping $\\overline{S}\\times \\mathcal{A}$ to $\\Delta (\\overline{\\mathcal{A}})$", + "bbox": [ + 173, + 885, + 501, + 904 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "$\\mathcal{G}_{\\theta}$ State-wise decoder, from $\\bar{S}$ to $S$", + "bbox": [ + 173, + 907, + 441, + 924 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "$Q_{\\iota}$ Marginal encoding distribution over $\\overline{S} \\times \\overline{A} \\times \\overline{S}: \\mathbb{E}_{s,a,s' \\sim \\xi_{\\pi}} \\phi_{\\iota}(\\cdot \\mid s,a,s')$", + "$\\bar{\\xi}_{\\bar{\\pi}_\\theta}$ Stationary distribution of the latent model $\\overline{\\mathcal{M}}_{\\theta}$ , parameterized by $\\theta$", + "$\\mathcal{W}_{\\xi_{\\overline{\\pi}}}$ Steady-state regularizer", + "$\\varphi_{\\omega}^{\\xi}$ Steady-state Lipschitz network", + "$\\lambda$ Temperature parameter", + "$\\mathcal{T}$ Distribution of drawing state-action pairs from interacting with $\\mathcal{M}$ , embedding them to the latent spaces, and finally letting them transition to their successor state in $\\overline{\\mathcal{M}}_{\\theta}$ , in $\\Delta (\\bar{S}\\times \\bar{A}\\times \\bar{S})$", + "$\\varphi_{\\omega}^{\\mathbf{P}}$ Transition Lipschitz network" + ], + "bbox": [ + 171, + 102, + 823, + 272 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "ADDITIONAL REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 291, + 401, + 306 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S. Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, Geoffrey Irving, Michael Isard, Yangqing Jia, Rafal Jozefowicz, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dandelion Mane, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Mike Schuster, Jonathon Shlens, Benoit Steiner, Ilya Sutskever, Kunal Talwar, Paul Tucker, Vincent Vanhoucke, Vijay Vasudevan, Fernanda Viégas, Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. TensorFlow: Large-scale machine learning on heterogeneous systems, 2015. URL https://www.tensorflow.org/. Software available from tensorflow.org.", + "Alexander A. Alemi, Ben Poole, Ian Fischer, Joshua V. Dillon, Rif A. Saurous, and Kevin Murphy. Fixing a broken ELBO. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 159-168. PMLR, 2018. URL http://proceedings.mlr.press/v80/alemi18a.html.", + "Joshua V. Dillon, Ian Langmore, Dustin Tran, Eugene Brevdo, Srinivas Vasudevan, Dave Moore, Brian Patton, Alex Alemi, Matt Hoffman, and Rif A. Saurous. Tensorflow distributions, 2017.", + "Zhe Dong, Bryan A. Seybold, Kevin Murphy, and Hung H. Bui. Collapsed amortized variational inference for switching nonlinear dynamical systems. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 2638-2647. PMLR, 2020. URL http://proceedings.mlr.press/v119/dong20e.html.", + "Sergio Guadarrama, Anoop Korattikara, Oscar Ramirez, Pablo Castro, Ethan Holly, Sam Fishman, Ke Wang, Ekaterina Gonina, Neal Wu, Efi Kokiopoulou, Luciano Sbaiz, Jamie Smith, Gábor Bartók, Jesse Berent, Chris Harris, Vincent Vanhoucke, and Eugene Brevdo. TF-Agents: A library for reinforcement learning in tensorflow. https://github.com/tensorflow/agents, 2018. URL https://github.com/tensorflow/agents. [Online; accessed 25-June-2019].", + "Junxian He, Daniel Spokoyny, Graham Neubig, and Taylor Berg-Kirkpatrick. Lapping inference networks and posterior collapse in variational autoencoders. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=rylDfnCqF7.", + "Matthew D. Hoffman, David M. Blei, Chong Wang, and John W. Paisley. Stochastic variational inference. J. Mach. Learn. Res., 14(1):1303-1347, 2013. URL http://dl.acm.org/citation.cfm?id=2502622.", + "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. URL http://arxiv.org/abs/1412.6980." + ], + "bbox": [ + 171, + 315, + 828, + 924 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. In Yoshua Bengio and Yann LeCun (eds.), 2nd International Conference on Learning Representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference Track Proceedings, 2014. URL http://arxiv.org/abs/1312.6114.", + "Vidyadhar G. Kulkarni. Modeling and Analysis of Stochastic Systems. Chapman & Hall, Ltd., GBR, 1995. ISBN 0412049910.", + "Ilya O. Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Scholkopf. Wasserstein autoencoders. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=HkL7n1-0b." + ], + "bbox": [ + 174, + 102, + 826, + 263 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 29 + } +] \ No newline at end of file diff --git a/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_model.json b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_model.json new file mode 100644 index 0000000000000000000000000000000000000000..bdabbbb34fc622bfc7c71a14934ecd265ffa6d01 --- /dev/null +++ b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_model.json @@ -0,0 +1,5342 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.64, + 0.12 + ], + "angle": 0, + "content": "WASSERSTEIN AUTO-ENCODEDMDPS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.12, + 0.806, + 0.133 + ], + "angle": 0, + "content": "FORMAL VERIFICATION OF EFFICIENTLY DISTILLED RL POLICIES WITH MANY-SIDED GUARANTEES" + }, + { + "type": "title", + "bbox": [ + 0.182, + 0.158, + 0.314, + 0.173 + ], + "angle": 0, + "content": "Florent Delgrange" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.173, + 0.459, + 0.187 + ], + "angle": 0, + "content": "AI Lab, Vrije Universiteit Brussel (VUB)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.187, + 0.334, + 0.201 + ], + "angle": 0, + "content": "University of Antwerp" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.201, + 0.48, + 0.214 + ], + "angle": 0, + "content": "florent.delgrange@ai.vub.ac.be" + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.158, + 0.593, + 0.171 + ], + "angle": 0, + "content": "Ann Nowé" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.172, + 0.609, + 0.186 + ], + "angle": 0, + "content": "AI Lab, VUB" + }, + { + "type": "title", + "bbox": [ + 0.644, + 0.158, + 0.783, + 0.171 + ], + "angle": 0, + "content": "Guillermo A. Pérez" + }, + { + "type": "text", + "bbox": [ + 0.644, + 0.172, + 0.795, + 0.187 + ], + "angle": 0, + "content": "University of Antwerp" + }, + { + "type": "text", + "bbox": [ + 0.644, + 0.187, + 0.746, + 0.199 + ], + "angle": 0, + "content": "Flanders Make" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.251, + 0.548, + 0.266 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.28, + 0.771, + 0.545 + ], + "angle": 0, + "content": "Although deep reinforcement learning (DRL) has many success stories, the large-scale deployment of policies learned through these advanced techniques in safety-critical scenarios is hindered by their lack of formal guarantees. Variational Markov Decision Processes (VAE-MDPs) are discrete latent space models that provide a reliable framework for distilling formally verifiable controllers from any RL policy. While the related guarantees address relevant practical aspects such as the satisfaction of performance and safety properties, the VAE approach suffers from several learning flaws (posterior collapse, slow learning speed, poor dynamics estimates), primarily due to the absence of abstraction and representation guarantees to support latent optimization. We introduce the Wasserstein auto-encoded MDP (WAE-MDP), a latent space model that fixes those issues by minimizing a penalized form of the optimal transport between the behaviors of the agent executing the original policy and the distilled policy, for which the formal guarantees apply. Our approach yields bisimulation guarantees while learning the distilled policy, allowing concrete optimization of the abstraction and representation model quality. Our experiments show that, besides distilling policies up to 10 times faster, the latent model quality is indeed better in general. Moreover, we present experiments from a simple time-to-failure verification algorithm on the latent space. The fact that our approach enables such simple verification techniques highlights its applicability." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.567, + 0.338, + 0.582 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.598, + 0.828, + 0.807 + ], + "angle": 0, + "content": "Reinforcement learning (RL) is emerging as a solution of choice to address challenging real-word scenarios such as epidemic mitigation and prevention strategies (Libin et al., 2020), multi-energy management (Ceusters et al., 2021), or effective canal control (Ren et al., 2021). RL enables learning high performance controllers by introducing general nonlinear function approximators (such as neural networks) to scale with high-dimensional and continuous state-action spaces. This introduction, termed deep-RL, causes the loss of the conventional convergence guarantees of RL (Tsitsiklis, 1994) as well as those obtained in some continuous settings (Nowe, 1994), and hinders their wide roll-out in critical settings. This work enables the formal verification of any such policies, learned by agents interacting with unknown, continuous environments modeled as Markov decision processes (MDPs). Specifically, we learn a discrete representation of the state-action space of the MDP, which yield both a (smaller, explicit) latent space model and a distilled version of the RL policy, that are tractable for model checking (Baier & Katoen, 2008). The latter are supported by bisimulation guarantees: intuitively, the agent behaves similarly in the original and latent models. The strength of our approach is not simply that we verify that the RL agent meets a predefined set of specifications, but rather provide an abstract model on which the user can reason and check any desired agent property." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.813, + 0.829, + 0.926 + ], + "angle": 0, + "content": "Variational MDPs (VAE-MDPs, Delgrange et al. 2022) offer a valuable framework for doing so. The distillation is provided with PAC-verifiable bisimulation bounds guaranteeing that the agent behaves similarly (i) in the original and latent model (abstraction quality); (ii) from all original states embedded to the same discrete state (representation quality). Whilst the bounds offer a confidence metric that enables the verification of performance and safety properties, VAE-MDPs suffer from several learning flaws. First, training a VAE-MDP relies on variational proxies to the bisimulation bounds, meaning there is no learning guarantee on the quality of the latent model via its optimization. Second, variational autoencoders (VAEs) (Kingma & Welling, 2014; Hoffman et al., 2013) are known" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.189 + ], + "angle": 0, + "content": "to suffer from posterior collapse (e.g., Alemi et al. 2018) resulting in a deterministic mapping to a unique latent state in VAE-MDPs. Most of the training process focuses on handling this phenomenon and setting up the stage for the concrete distillation and abstraction, finally taking place in a second training phase. This requires extra regularizers, setting up annealing schemes and learning phases, and defining prioritized replay buffers to store transitions. Distillation through VAE-MDPs is thus a meticulous task, requiring a large step budget and tuning many hyperparameters." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.194, + 0.828, + 0.322 + ], + "angle": 0, + "content": "Building upon Wasserstein autoencoders (Tolstikhin et al., 2018) instead of VAEs, we introduce Wasserstein auto-encoded MDPs (WAE-MDPs), which overcome those limitations. Our WAE relies on the optimal transport (OT) from trace distributions resulting from the execution of the RL policy in the real environment to that reconstructed from the latent model operating under the distilled policy. In contrast to VAEs which rely on variational proxies, we derive a novel objective that directly incorporates the bisimulation bounds. Furthermore, while VAEs learn stochastic mappings to the latent space which need be determined or even entirely reconstructed from data at the deployment time to obtain the guarantees, our WAE has no such requirements, and learn all the necessary components to obtain the guarantees during learning, and does not require such post-processing operations." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.327, + 0.828, + 0.427 + ], + "angle": 0, + "content": "Those theoretical claims are reflected in our experiments: policies are distilled up to 10 times faster through WAE- than VAE-MDPs and provide better abstraction quality and performance in general, without the need for setting up annealing schemes and training phases, nor prioritized buffer and extra regularizer. Our distilled policies are able to recover (and sometimes even outperform) the original policy performance, highlighting the representation quality offered by our new framework: the distillation is able to remove some non-robustness of the input RL policy. Finally, we formally verified time-to-failure properties (e.g., Pnueli 1977) to emphasize the applicability of our approach." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.434, + 0.829, + 0.56 + ], + "angle": 0, + "content": "Other Related Work. Complementary works approach safe RL via formal methods (Junges et al., 2016; Alshiekh et al., 2018; Jansen et al., 2020; Simão et al., 2021), aimed at formally ensuring safety during RL, all of which require providing an abstract model of the safety aspects of the environment. They also include the work of Alamdari et al. (2020), applying synthesis and model checking on policies distilled from RL, without quality guarantees. Other frameworks share our goal of verifying deep-RL policies (Bacci & Parker, 2020; Carr et al., 2020) but rely on a known environment model, among other assumptions (e.g., deterministic or discrete environment). Finally, DeepSynth (Hasanbeig et al., 2021) allows learning a formal model from execution traces, with the different purpose of guiding the agent towards sparse and non-Markovian rewards." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.566, + 0.828, + 0.651 + ], + "angle": 0, + "content": "On the latent space training side, WWAEs (Zhang et al., 2019) reuse OT as latent regularizer discrepancy (in Gaussian closed form), whereas we derive two regularizers involving OT. These two are, in contrast, optimized via the dual formulation of Wasserstein, as in Wasserstein-GANs (Arjovsky et al., 2017). Similarly to \\( VQ \\)-VAEs (van den Oord et al., 2017) and Latent Bernoulli AEs (Fajtl et al., 2020), our latent space model learns discrete spaces via deterministic encoders, but relies on a smooth approximation instead of using the straight-through gradient estimator." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.657, + 0.828, + 0.743 + ], + "angle": 0, + "content": "Works on representation learning for RL (Gelada et al., 2019; Castro et al., 2021; Zhang et al., 2021; Zang et al., 2022) consider bisimulation metrics to optimize the representation quality, and aim at learning (continuous) representations which capture bisimulation, so that two states close in the representation are guaranteed to provide close and relevant information to optimize the performance of the controller. In particular, as in our work, DeepMDPs (Gelada et al., 2019) are learned by optimizing local losses, by assuming a deterministic MDP and without verifiable confidence measurement." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.761, + 0.33, + 0.777 + ], + "angle": 0, + "content": "2 BACKGROUND" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.791, + 0.828, + 0.808 + ], + "angle": 0, + "content": "In the following, we write \\(\\Delta(\\mathcal{X})\\) for the set of measures over (complete, separable metric space) \\(\\mathcal{X}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.812, + 0.829, + 0.927 + ], + "angle": 0, + "content": "Markov decision processes (MDPs) are tuples \\(\\mathcal{M} = \\langle \\mathcal{S},\\mathcal{A},\\mathbf{P},\\mathcal{R},\\ell ,\\mathbf{AP},s_I\\rangle\\) where \\(\\mathcal{S}\\) is a set of states; \\(\\mathcal{A}\\), a set of actions; \\(\\mathbf{P}\\colon S\\times \\mathcal{A}\\to \\Delta (\\mathcal{S})\\), a probability transition function that maps the current state and action to a distribution over the next states; \\(\\mathcal{R}\\colon S\\times \\mathcal{A}\\to \\mathbb{R}\\), a reward function; \\(\\ell \\colon S\\to 2^{\\mathbf{AP}}\\), a labeling function over a set of atomic propositions \\(\\mathbf{AP}\\); and \\(s_I\\in S\\), the initial state. If \\(|\\mathcal{A}| = 1\\), \\(\\mathcal{M}\\) is a fully stochastic process called a Markov chain (MC). We write \\(\\mathcal{M}_s\\) for the MDP obtained when replacing the initial state of \\(\\mathcal{M}\\) by \\(s\\in S\\). An agent interacting in \\(\\mathcal{M}\\) produces trajectories, i.e., sequences of states and actions \\(\\tau = \\langle s_{0:T},a_{0:T - 1}\\rangle\\) where \\(s_0 = s_I\\) and \\(s_{t + 1}\\sim \\mathbf{P}(\\cdot |s_t,a_t)\\) for \\(t < T\\). The set of infinite trajectories of \\(\\mathcal{M}\\) is Traj. We assume \\(\\mathbf{AP}\\) and" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.103, + 0.413, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.254, + 0.42, + 0.281 + ], + "angle": 0, + "content": "(a) Execution of the latent policy \\(\\bar{\\pi}\\) in the original and latent MDPs, and local losses." + }, + { + "type": "image", + "bbox": [ + 0.456, + 0.103, + 0.803, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.439, + 0.254, + 0.816, + 0.281 + ], + "angle": 0, + "content": "(b) Parallel execution of the original RL policy \\(\\pi\\) in the original and latent MDPs, local losses, and steady-state regularizer." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.292, + 0.825, + 0.336 + ], + "angle": 0, + "content": "Figure 1: Latent flows: arrows represent (stochastic) mappings, the original (resp. latent) state-action space is spread along the blue (resp. green) area, and distances are depicted in red. Distilling \\(\\pi\\) into \\(\\bar{\\pi}\\) via flow (b) by minimizing \\(\\mathcal{W}_{\\xi_{\\pi}}\\) allows closing the gap between flows (a) and (b)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.363, + 0.825, + 0.421 + ], + "angle": 0, + "content": "labels being respectively one-hot and binary encoded. Given \\(\\mathsf{T} \\subseteq \\mathbf{AP}\\), we write \\(s \\models \\mathsf{T}\\) if \\(s\\) is labeled with \\(\\mathsf{T}\\), i.e., \\(\\ell(s) \\cap \\mathsf{T} \\neq \\emptyset\\), and \\(s \\models \\neg \\mathsf{T}\\) for \\(s \\models \\mathsf{T}\\). We refer to MDPs with continuous state or action spaces as continuous MDPs. In that case, we assume \\(\\mathcal{S}\\) and \\(\\mathcal{A}\\) are complete separable metric spaces equipped with a Borel \\(\\sigma\\)-algebra, and \\(\\ell^{-1}(\\mathsf{T})\\) is Borel-measurable for any \\(\\mathsf{T} \\subseteq \\mathbf{AP}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.431, + 0.827, + 0.576 + ], + "angle": 0, + "content": "Policies and stationary distributions. A (memoryless) policy \\(\\pi \\colon S \\to \\Delta(\\mathcal{A})\\) prescribes which action to choose at each step of the interaction. The set of memoryless policies of \\(\\mathcal{M}\\) is \\(\\Pi\\). The MDP \\(\\mathcal{M}\\) and \\(\\pi \\in \\Pi\\) induce an MC \\(\\mathcal{M}_{\\pi}\\) with unique probability measure \\(\\mathbb{P}_{\\pi}^{\\mathcal{M}}\\) on the Borel \\(\\sigma\\)-algebra over measurable subsets \\(\\varphi \\subseteq \\text{Traj}\\) (Puterman, 1994). We drop the superscript when the context is clear. Define \\(\\xi_{\\pi}^{t}(s' | s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_{s}}(\\{s_{0:\\infty}, a_{0:\\infty} | s_{t} = s'\\})\\) as the distribution giving the probability of being in each state of \\(\\mathcal{M}_{s}\\) after \\(t\\) steps. \\(B \\subseteq S\\) is a bottom strongly connected component (BSCC) of \\(\\mathcal{M}_{\\pi}\\) if (i) \\(B\\) is a maximal subset satisfying \\(\\xi_{\\pi}^{t}(s' | s) > 0\\) for any \\(s, s' \\in B\\) and some \\(t \\geqslant 0\\), and (ii) \\(\\mathbb{E}_{a \\sim \\pi(\\cdot|s)} \\mathbf{P}(B | s, a) = 1\\) for all \\(s \\in S\\). The unique stationary distribution of \\(B\\) is \\(\\xi_{\\pi} \\in \\Delta(B)\\). We write \\(s, a \\sim \\xi_{\\pi}\\) for sampling \\(s\\) from \\(\\xi_{\\pi}\\) then \\(a\\) from \\(\\pi\\). An MDP \\(\\mathcal{M}\\) is ergodic if for all \\(\\pi \\in \\Pi\\), the state space of \\(\\mathcal{M}_{\\pi}\\) consists of a unique aperiodic BSCC with \\(\\xi_{\\pi} = \\lim_{t \\to \\infty} \\xi_{\\pi}^{t}(\\cdot | s)\\) for all \\(s \\in S\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.585, + 0.827, + 0.76 + ], + "angle": 0, + "content": "Value objectives. Given \\(\\pi \\in \\Pi\\), the value of a state \\(s \\in S\\) is the expected value of a random variable obtained by running \\(\\pi\\) from \\(s\\). For a discount factor \\(\\gamma \\in [0,1]\\), we consider the following objectives. (i) Discounted return: we write \\(V_{\\pi}(s) = \\mathbb{E}_{\\pi}^{\\mathcal{M}_s}\\left[\\sum_{t=0}^{\\infty} \\gamma^t \\mathcal{R}(s_t, a_t)\\right]\\) for the expected discounted rewards accumulated along trajectories. The typical goal of an RL agent is to learn a policy \\(\\pi^\\star\\) that maximizes \\(V_{\\pi^\\star}(s_I)\\) through interactions with the (unknown) MDP; (ii) Reachability: let \\(\\mathsf{C}, \\mathsf{T} \\subseteq \\mathbf{AP}\\), the (constrained) reachability event is \\(\\mathsf{CUT} = \\{s_{0:\\infty}, a_{0:\\infty} | \\exists i \\in \\mathbb{N}, \\forall j < i, s_j \\models \\mathsf{C} \\wedge s_i \\models \\mathsf{T}\\} \\subseteq \\mathsf{Traj}\\). We write \\(V_{\\pi}^{\\varphi}(s) = \\mathbb{E}_{\\pi}^{\\mathcal{M}_s}\\left[\\gamma^{t^\\star} \\mathbf{1}_{\\langle s_{0:\\infty}, a_{0:\\infty} \\rangle \\in \\varphi}\\right]\\) for the discounted probability of satisfying \\(\\varphi = \\mathsf{CUT}\\), where \\(t^\\star\\) is the length of the shortest trajectory prefix that allows satisfying \\(\\varphi\\). Intuitively, this denotes the discounted return of remaining in a region of the MDP where states are labeled with \\(\\mathsf{C}\\), until visiting for the first time a goal state labeled with \\(\\mathsf{T}\\), and the return is the binary reward signal capturing this event. Safety w.r.t. failure states \\(\\mathsf{C}\\) can be expressed as the safety-constrained reachability to a destination \\(\\mathsf{T}\\) through \\(\\neg \\mathsf{CUT}\\). Notice that \\(V_{\\pi}^{\\varphi}(s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_s}(\\varphi)\\) when \\(\\gamma = 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.768, + 0.827, + 0.923 + ], + "angle": 0, + "content": "Latent MDP. Given the original (continuous, possibly unknown) environment model \\(\\mathcal{M}\\), a latent space model is another (smaller, explicit) MDP \\(\\overline{\\mathcal{M}} = \\langle \\overline{S}, \\overline{\\mathcal{A}}, \\overline{\\mathbf{P}}, \\overline{\\mathcal{R}}, \\bar{\\ell}, \\mathbf{AP}, \\bar{s}_I \\rangle\\) with state-action space linked to the original one via state and action embedding functions: \\(\\phi \\colon S \\to \\overline{S}\\) and \\(\\psi \\colon \\overline{S} \\times \\overline{A} \\to A\\). We refer to \\(\\langle \\overline{\\mathcal{M}}, \\phi, \\psi \\rangle\\) as a latent space model of \\(\\mathcal{M}\\) and \\(\\overline{\\mathcal{M}}\\) as its latent MDP. Our goal is to learn \\(\\langle \\overline{\\mathcal{M}}, \\phi, \\psi \\rangle\\) by optimizing an equivalence criterion between the two models. We assume that \\(d_{\\overline{S}}\\) is a metric on \\(\\overline{S}\\), and write \\(\\overline{\\Pi}\\) for the set of policies of \\(\\overline{\\mathcal{M}}\\) and \\(\\overline{V}_{\\overline{\\pi}}\\) for the values of running \\(\\overline{\\pi} \\in \\overline{\\Pi}\\) in \\(\\overline{\\mathcal{M}}\\). Remark 1 (Latent flow). The latent policy \\(\\overline{\\pi}\\) can be seen as a policy in \\(\\mathcal{M}\\) (cf. Fig. 1a): states passed to \\(\\overline{\\pi}\\) are first embedded with \\(\\phi\\) to the latent space, then the actions produced by \\(\\overline{\\pi}\\) are executed via \\(\\psi\\) in the original environment. Let \\(s \\in S\\), we write \\(\\bar{a} \\sim \\overline{\\pi}(\\cdot | s)\\) for \\(\\overline{\\pi}(\\cdot | \\phi(s))\\), then the reward and next state are respectively given by \\(\\mathcal{R}(s, \\bar{a}) = \\mathcal{R}(s, \\psi(\\phi(s), \\bar{a}))\\) and \\(s' \\sim \\mathbf{P}(\\cdot | s, \\bar{a}) = \\mathbf{P}(\\cdot | s, \\psi(\\phi(s), \\bar{a}))\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Local losses allow quantifying the distance between the original and latent reward/transition functions in the local setting, i.e., under a given state-action distribution \\(\\xi \\in \\Delta(S \\times \\overline{\\mathcal{A}})\\):" + }, + { + "type": "equation", + "bbox": [ + 0.213, + 0.137, + 0.783, + 0.163 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathcal {R}} ^ {\\xi} = \\underset {s, \\bar {a} \\sim \\xi} {\\mathbb {E}} \\left| \\mathcal {R} (s, \\bar {a}) - \\overline {{\\mathcal {R}}} (\\phi (s), \\bar {a}) \\right|, \\quad L _ {\\mathbf {P}} ^ {\\xi} = \\underset {s, \\bar {a} \\sim \\xi} {\\mathbb {E}} D \\big (\\phi \\mathbf {P} (\\cdot | s, \\bar {a}), \\overline {{\\mathbf {P}}} (\\cdot | \\phi (s), \\bar {a}) \\big)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.166, + 0.827, + 0.295 + ], + "angle": 0, + "content": "where \\(\\phi \\mathbf{P}(\\cdot \\mid s,\\bar{a})\\) is the distribution of drawing \\(s^\\prime \\sim \\mathbf{P}(\\cdot \\mid s,\\bar{a})\\) then embedding \\(\\bar{s}^{\\prime} = \\phi (s^{\\prime})\\), and \\(D\\) is a discrepancy measure. Fig 1a depicts the losses when states and actions are drawn from a stationary distribution \\(\\xi_{\\overline{\\pi}}\\) resulting from running \\(\\bar{\\pi}\\in \\overline{\\Pi}\\) in \\(\\mathcal{M}\\). In this work, we focus on the case where \\(D\\) is the Wasserstein distance \\(W_{d_{\\overline{s}}}\\): given two distributions \\(P,Q\\) over a measurable set \\(\\mathcal{X}\\) equipped with a metric \\(d\\), \\(W_{d}\\) is the solution of the optimal transport (OT) from \\(P\\) to \\(Q\\), i.e., the minimum cost of changing \\(P\\) into \\(Q\\) (Villani, 2009): \\(W_{d}(P,Q) = \\inf_{\\lambda \\in \\Lambda (P,Q)}\\mathbb{E}_{x,y\\sim \\lambda}d(x,y)\\), \\(\\Lambda (P,Q)\\) being the set of all couplings of \\(P\\) and \\(Q\\). The Kantorovich duality yields \\(W_{d}(P,Q) = \\sup_{f\\in \\mathcal{F}_{d}}\\mathbb{E}_{x\\sim P}f(x) - \\mathbb{E}_{x\\sim Q}f(y)\\) where \\(\\mathcal{F}_d\\) is the set of 1-Lipschitz functions. Local losses are related to a well-established behavioral equivalence between transition systems, called bisimulation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.303, + 0.827, + 0.402 + ], + "angle": 0, + "content": "Bisimulation. A bisimulation \\(\\mathcal{B}\\) on \\(\\mathcal{M}\\) is a behavioral equivalence between states \\(s_1, s_2 \\in S\\) so that, \\(s_1 \\mathcal{B} s_2\\) iff (i) \\(\\mathbf{P}(T \\mid s_1, a) = \\mathbf{P}(T \\mid s_2, a)\\), (ii) \\(\\ell(s_1) = \\ell(s_2)\\), and (iii) \\(\\mathcal{R}(s_1, a) = \\mathcal{R}(s_2, a)\\) for each action \\(a \\in \\mathcal{A}\\) and (Borel measurable) equivalence class \\(T \\in S / \\mathcal{B}\\). Properties of bisimulation include trajectory and value equivalence (Larsen & Skou, 1989; Givan et al., 2003). Requirements (ii) and (iii) can be respectively relaxed depending on whether we focus only on behaviors formalized through \\(\\mathbf{AP}\\) or rewards. The relation can be extended to compare two MDPs (e.g., \\(\\mathcal{M}\\) and \\(\\overline{\\mathcal{M}}\\)) by considering the disjoint union of their state space. We denote the largest bisimulation relation by \\(\\sim\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.407, + 0.828, + 0.557 + ], + "angle": 0, + "content": "Characterized by a logical family of functional expressions derived from a logic \\(\\mathcal{L}\\), bisimulation pseudometrics (Desharnais et al., 2004) generalize the notion of bisimilarity. More specifically, given a policy \\(\\pi \\in \\Pi\\), we consider a family \\(\\mathcal{F}\\) of real-valued functions parameterized by a discount factor \\(\\gamma\\) and defining the semantics of \\(\\mathcal{L}\\) in \\(\\mathcal{M}_{\\pi}\\). Such functional expressions allow to formalize discounted properties such as reachability, safety, as well as general \\(\\omega\\)-regular specifications (Chatterjee et al., 2010) and may include rewards as well (Ferns et al., 2014). The pseudometric \\(\\widetilde{d}_{\\pi}\\) is defined as the largest behavioral difference \\(\\widetilde{d}_{\\pi}(s_1,s_2) = \\sup_{f\\in \\mathcal{F}}|f(s_1) - f(s_2)|\\), and its kernel is bisimilarity: \\(\\widetilde{d}_{\\pi}(s_1,s_2) = 0\\) iff \\(s_1\\sim s_2\\). In particular, value functions are Lipschitz-continuous w.r.t. \\(\\widetilde{d}_{\\pi}\\): \\(|V_{\\pi}^{\\prime}(s_1) - V_{\\pi}^{\\prime}(s_2)|\\leqslant K\\widetilde{d}_{\\pi}(s_1,s_2)\\), where \\(K\\) is \\(^{1 / (1 - \\gamma)}\\) if rewards are included in \\(\\mathcal{F}\\) and 1 otherwise. To ensure the upcoming bisimulation guarantees, we make the following assumptions:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.558, + 0.825, + 0.59 + ], + "angle": 0, + "content": "Assumption 2.1. MDP \\(\\mathcal{M}\\) is ergodic, \\(\\operatorname{Im}(\\mathcal{R})\\) is a bounded space scaled in \\([-1/2, 1/2]\\), and the embedding function preserves the labels, i.e., \\(\\phi(s) = \\bar{s} \\implies \\ell(s) = \\bar{\\ell}(\\bar{s})\\) for \\(s \\in S\\), \\(\\bar{s} \\in \\bar{S}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.599, + 0.825, + 0.629 + ], + "angle": 0, + "content": "Note that the ergodicity assumption is compliant with episodic RL and a wide range of continuous learning tasks (see Huang 2020; Delgrange et al. 2022 for detailed discussions on this setting)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.637, + 0.825, + 0.696 + ], + "angle": 0, + "content": "Bisimulation bounds (Delgrange et al., 2022). \\(\\mathcal{M}\\) being set over continuous spaces with possibly unknown dynamics, evaluating \\(\\tilde{d}\\) can turn out to be particularly arduous, if not intractable. A solution is to evaluate the original and latent model bisimilarity via local losses: fix \\(\\bar{\\pi} \\in \\overline{\\Pi}\\), assume \\(\\overline{\\mathcal{M}}\\) is discrete, then given the induced stationary distribution \\(\\xi_{\\bar{\\pi}}\\) in \\(\\mathcal{M}\\), let \\(s_1, s_2 \\in S\\) with \\(\\phi(s_1) = \\phi(s_2)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.698, + 0.825, + 0.733 + ], + "angle": 0, + "content": "\\[\n\\underset {s \\sim \\xi_ {\\bar {\\pi}}} {\\mathbb {E}} \\widetilde {d} _ {\\bar {\\pi}} (s, \\phi (s)) \\leqslant \\frac {L _ {\\mathcal {R}} ^ {\\xi_ {\\bar {\\pi}}} + \\gamma L _ {\\mathbf {P}} ^ {\\xi_ {\\bar {\\pi}}}}{1 - \\gamma}, \\quad \\widetilde {d} _ {\\bar {\\pi}} (s _ {1}, s _ {2}) \\leqslant \\left(\\frac {L _ {\\mathcal {R}} ^ {\\xi_ {\\bar {\\pi}}} + \\gamma L _ {\\mathbf {P}} ^ {\\xi_ {\\bar {\\pi}}}}{1 - \\gamma}\\right) \\left(\\xi_ {\\bar {\\pi}} ^ {- 1} (s _ {1}) + \\xi_ {\\bar {\\pi}} ^ {- 1} (s _ {2})\\right). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.827, + 0.848 + ], + "angle": 0, + "content": "The two inequalities guarantee respectively the quality of the abstraction and representation: when local losses are small, (i) states and their embedding are bisimilarly close in average, and (ii) all states sharing the same discrete representation are bisimilarly close. The local losses and related bounds can be efficiently PAC-estimated. Our goal is to learn a latent model where the behaviors of the agent executing \\(\\bar{\\pi}\\) can be formally verified, and the bounds offer a confidence metric allowing to lift the guarantees obtained this way back to the original model \\(\\mathcal{M}\\), when the latter operates under \\(\\bar{\\pi}\\). We show in the following how to learn a latent space model by optimizing the aforementioned bounds, and distill policies \\(\\pi \\in \\Pi\\) obtained via any RL technique to a latent policy \\(\\bar{\\pi} \\in \\overline{\\Pi}\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.866, + 0.53, + 0.881 + ], + "angle": 0, + "content": "3 WASSERSTEIN AUTO-ENCODEDMDPs" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.894, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Fix \\(\\overline{\\mathcal{M}}_{\\theta} = \\langle \\overline{S},\\overline{\\mathcal{A}},\\overline{\\mathbf{P}}_{\\theta},\\overline{\\mathcal{R}}_{\\theta},\\bar{\\ell},\\mathbf{AP},\\bar{s}_I\\rangle\\) and \\(\\langle \\overline{\\mathcal{M}}_{\\theta},\\phi_{\\iota},\\psi_{\\theta}\\rangle\\) as a latent space model of \\(\\mathcal{M}\\) parameterized by \\(\\iota\\) and \\(\\theta\\). Our method relies on learning a behavioral model \\(\\xi_{\\theta}\\) of \\(\\mathcal{M}\\) from which we can" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.204 + ], + "angle": 0, + "content": "retrieve the latent space model and distill \\(\\pi\\). This can be achieved via the minimization of a suitable discrepancy between \\(\\xi_{\\theta}\\) and \\(\\mathcal{M}_{\\pi}\\). VAE-MDPs optimize a lower bound on the likelihood of the dynamics of \\(\\mathcal{M}_{\\pi}\\) using the Kullback-Leibler divergence, yielding (i) \\(\\overline{\\mathcal{M}}_{\\theta}\\), (ii) a distillation \\(\\bar{\\pi}_{\\theta}\\) of \\(\\pi\\), and (iii) \\(\\phi_{\\iota}\\) and \\(\\psi_{\\theta}\\). Local losses are not directly minimized, but rather variational proxies that do not offer theoretical guarantees during the learning process. To control the local losses minimization and exploit their theoretical guarantees, we present a novel autoencoder that incorporates them in its objective, derived from the OT. Proofs of the claims made in this Section are provided in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.219, + 0.408, + 0.233 + ], + "angle": 0, + "content": "3.1 THE OBJECTIVE FUNCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.828, + 0.35 + ], + "angle": 0, + "content": "Assume that \\(S\\), \\(\\mathcal{A}\\), and \\(\\operatorname{Im}(\\mathcal{R})\\) are respectively equipped with metrics \\(d_{\\mathcal{S}}\\), \\(d_{\\mathcal{A}}\\), and \\(d_{\\mathcal{R}}\\), we define the raw transition distance metric \\(\\vec{d}\\) as the component-wise sum of distances between states, actions, and rewards occurring of along transitions: \\(\\vec{d}(\\langle s_1, a_1, r_1, s_1' \\rangle, \\langle s_2, a_2, r_2, s_2' \\rangle) = d_{\\mathcal{S}}(s_1, s_2) + d_{\\mathcal{A}}(a_1, a_2) + d_{\\mathcal{R}}(r_1, r_2) + d_{\\mathcal{S}}(s_1', s_2')\\). Given Assumption 2.1, we consider the OT between local distributions, where traces are drawn from episodic RL processes or infinite interactions (we show in Appendix A.1 that considering the OT between trace-based distributions in the limit amounts to reasoning about stationary distributions). Our goal is to minimize \\(W_{\\vec{d}}(\\xi_{\\pi}, \\xi_{\\theta})\\) so that" + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.355, + 0.826, + 0.389 + ], + "angle": 0, + "content": "\\[\n\\xi_ {\\theta} (s, a, r, s ^ {\\prime}) = \\int_ {\\bar {S} \\times \\bar {A} \\times \\bar {S}} P _ {\\theta} (s, a, r, s ^ {\\prime} \\mid \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) d \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}} (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.395, + 0.825, + 0.482 + ], + "angle": 0, + "content": "where \\(P_{\\theta}\\) is a transition decoder and \\(\\bar{\\xi}_{\\overline{\\pi}_{\\theta}}\\) denotes the stationary distribution of the latent model \\(\\overline{\\mathcal{M}}_{\\theta}\\). As proved by Bousquet et al. (2017), this model allows to derive a simpler form of the OT: instead of finding the optimal coupling of (i) the stationary distribution \\(\\xi_{\\pi}\\) of \\(\\mathcal{M}_{\\pi}\\) and (ii) the behavioral model \\(\\xi_{\\theta}\\), in the primal definition of \\(W_{\\vec{d}}(\\xi_{\\pi},\\xi_{\\theta})\\), it is sufficient to find an encoder \\(q\\) whose marginal is given by \\(Q(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a,s^{\\prime}\\sim \\xi_{\\pi}}q(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\mid s,a,s^{\\prime})\\) and identical to \\(\\xi_{\\pi}\\). This is summarized in the following Theorem, yielding a particular case of Wasserstein-autoencoder Tolstikhin et al. (2018):" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.485, + 0.825, + 0.529 + ], + "angle": 0, + "content": "Theorem 3.1. Let \\(\\xi_{\\theta}\\) and \\(P_{\\theta}\\) be respectively a behavioral model and transition decoder as defined in Eq. 2, \\(\\mathcal{G}_{\\theta} \\colon \\overline{\\mathcal{S}} \\to \\mathcal{S}\\) be a state-wise decoder, and \\(\\psi_{\\theta}\\) be an action embedding function. Assume \\(P_{\\theta}\\) is deterministic with Dirac function \\(G_{\\theta}(\\bar{s}, \\bar{a}, \\bar{s}') = \\langle \\mathcal{G}_{\\theta}(\\bar{s}), \\psi_{\\theta}(\\bar{s}, \\bar{a}), \\overline{\\mathcal{R}}_{\\theta}(\\bar{s}, \\bar{a}), \\mathcal{G}_{\\theta}(\\bar{s}') \\rangle\\), then" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.536, + 0.758, + 0.565 + ], + "angle": 0, + "content": "\\[\nW_{\\vec{d}}(\\xi_{\\pi},\\xi_{\\theta}) = \\inf_{q:Q = \\bar{\\xi}_{\\pi_{\\theta}}}\\mathbb{E}_{\\substack{s,a,r,s^{\\prime}\\sim \\xi_{\\pi}\\\\ \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim q(\\cdot |s,a,s^{\\prime})}}\\mathbb{E}_{\\substack{\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim q(\\cdot |s,a,s^{\\prime})}}\\vec{d}\\bigl(\\bigl\\langle s,a,r,s^{\\prime}\\bigr\\rangle ,G_{\\theta}\\bigl(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\bigr)\\bigr).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.579, + 0.825, + 0.627 + ], + "angle": 0, + "content": "Henceforth, fix \\(\\phi_{\\iota} \\colon S \\to \\bar{S}\\) and \\(\\phi_{\\iota}^{\\mathcal{A}} \\colon \\bar{S} \\times \\mathcal{A} \\to \\Delta(\\overline{\\mathcal{A}})\\) as parameterized state and action encoders with \\(\\phi_{\\iota}(\\bar{s}, \\bar{a}, \\bar{s}' \\mid s, a, s') = \\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}} \\cdot \\phi_{\\iota}^{\\mathcal{A}}(\\bar{a} \\mid \\bar{s}, a) \\cdot \\mathbf{1}_{\\phi_{\\iota}(s')} = \\bar{s}'\\), and define the marginal encoder as \\(Q_{\\iota} = \\mathbb{E}_{s, a, s' \\sim \\xi_{\\pi}} \\phi_{\\iota}(\\cdot \\mid s, a, s')\\). Training the model components can be achieved via the objective:" + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.633, + 0.76, + 0.661 + ], + "angle": 0, + "content": "\\[\n\\min_{\\iota ,\\theta}\\underset {s,a,r,s^{\\prime}\\sim \\xi_{\\pi}}{\\mathbb{E}}\\underset {\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim \\phi_{\\iota}(\\cdot |s,a,s^{\\prime})}{\\mathbb{E}} \\vec{d}\\bigl(\\bigl\\langle s, a,r,s^{\\prime}\\bigr\\rangle ,G_{\\theta}\\bigl(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\bigr)\\bigr) + \\beta \\cdot D\\bigl(Q_{\\iota},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}\\bigr),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.666, + 0.825, + 0.695 + ], + "angle": 0, + "content": "where \\(D\\) is an arbitrary discrepancy metric and \\(\\beta > 0\\) a hyperparameter. Intuitively, the encoder \\(\\phi_{\\iota}\\) can be learned by enforcing its marginal distribution \\(Q_{\\iota}\\) to match \\(\\bar{\\xi}_{\\overline{\\pi}_{\\theta}}\\) through this discrepancy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.697, + 0.825, + 0.727 + ], + "angle": 0, + "content": "Remark 2. If \\(\\mathcal{M}\\) has a discrete action space, then learning \\(\\overline{\\mathcal{A}}\\) is not necessary. We can set \\(\\overline{\\mathcal{A}} = \\mathcal{A}\\) using identity functions for the action encoder and decoder (details in Appendix A.2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.827, + 0.807 + ], + "angle": 0, + "content": "When \\(\\pi\\) is executed in \\(\\mathcal{M}\\), observe that its parallel execution in \\(\\overline{\\mathcal{M}}_{\\theta}\\) is enabled by the action encoder \\(\\phi_{\\iota}^{A}\\): given an original state \\(s \\in S\\), \\(\\pi\\) first prescribes the action \\(a \\sim \\pi(\\cdot \\mid s)\\), which is then embedded in the latent space via \\(\\bar{a} \\sim \\phi_{\\iota}^{A}(\\cdot \\mid \\phi_{\\iota}(s), a)\\) (cf. Fig. 1b). This parallel execution, along with setting \\(D\\) to \\(W_{\\vec{d}}\\), yield an upper bound on the latent regularization, compliant with the bisimulation bounds. A two-fold regularizer is obtained thereby, defining the foundations of our objective function:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.81, + 0.829, + 0.859 + ], + "angle": 0, + "content": "Lemma 3.2. Define \\(\\mathcal{T}(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a\\sim \\xi_{\\pi}}[\\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)\\cdot \\overline{\\mathbf{P}}_{\\theta}(\\bar{s}^{\\prime}\\mid \\bar{s},\\bar{a})]\\) as the distribution of drawing state-action pairs from interacting with \\(\\mathcal{M}\\), embedding them to the latent spaces, and finally letting them transition to their successor state in \\(\\overline{\\mathcal{M}}_{\\theta}\\). Then, \\(W_{\\vec{d}}(Q_{\\iota},\\bar{\\xi}_{\\overline{\\pi}_{\\theta}})\\leqslant W_{\\vec{d}}(\\bar{\\xi}_{\\overline{\\pi}_{\\theta}},\\mathcal{T}) + L_{\\mathbf{P}}^{\\xi_{\\pi}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.805, + 0.883 + ], + "angle": 0, + "content": "We therefore define the \\( \\mathrm{W}^2\\mathrm{AE} \\)-MDP (Wasserstein-Wasserstein auto-encoded MDP) objective as:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.889, + 0.819, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\min_{\\substack{\\iota ,\\theta \\\\ \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim \\phi_{\\iota}(\\cdot |s,a,s^{\\prime})}}\\mathbb{E}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\bar{s})) + d_{\\mathcal{A}}(a,\\psi_{\\theta}(\\bar{s},\\bar{a})) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\bar{s}^{\\prime}\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\pi}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\pi}} + L_{\\mathbf{P}}^{\\xi_{\\pi}}),\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.108, + 0.494, + 0.124 + ], + "angle": 0, + "content": "Algorithm 1: Wasserstein\\(^2\\) Auto-Encoded MDP" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.126, + 0.8, + 0.395 + ], + "angle": 0, + "content": "Input: batch size \\(N\\), max. step \\(T\\), no. of regularizer updates \\(m\\), penalty coefficient \\(\\delta > 0\\) for \\(t = 1\\) to \\(T\\) do \nfor \\(i = 1\\) to \\(N\\) do \nSample a transition \\(s_i, a_i, r_i, s_i'\\) from the original environment via \\(\\xi_{\\pi}\\) \nEmbed the transition into the latent space by drawing \\(\\bar{s}_i, \\bar{a}_i, \\bar{s}_i'\\) from \\(\\phi_\\iota(\\cdot \\mid s_i, a_i, s_i')\\) \nMake the latent space model transition to the next latent state: \\(\\bar{s}_i^\\star \\sim \\overline{\\mathbf{P}}_\\theta(\\cdot \\mid \\bar{s}_i, \\bar{a}_i)\\) \nSample a latent transition from \\(\\bar{\\xi}_{\\overline{\\pi}_\\theta} \\colon z_i \\sim \\bar{\\xi}_{\\overline{\\pi}_\\theta}, \\bar{a}_i' \\sim \\overline{\\pi}_\\theta(\\cdot \\mid z_i)\\), and \\(z_i' \\sim \\overline{\\mathbf{P}}_\\theta(\\cdot \\mid z_i, \\bar{a}_i')\\) \n\\(\\mathcal{W} \\gets \\sum_{i=1}^{N} \\varphi_\\omega^\\xi(\\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star) - \\varphi_\\omega^\\xi(z_i, \\bar{a}_i', z_i') + \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, \\bar{s}_i') - \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star)\\) \n\\(P \\gets \\sum_{i=1}^{N} \\mathrm{GP}\\big(\\varphi_\\omega^\\xi, \\langle \\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star \\rangle, \\langle z_i, \\bar{a}_i', z_i' \\rangle\\big) + \\mathrm{GP}\\big(x \\mapsto \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, x), \\bar{s}_i', \\bar{s}_i^\\star\\big)\\) \nUpdate the Lipschitz networks parameters \\(\\omega\\) by ascending \\(1/N \\cdot (\\beta \\mathcal{W} - \\delta P)\\) \nif \\(t\\) mod \\(m = 0\\) then \n\\(\\mathcal{L} \\gets \\sum_{i=1}^{N} d_{\\mathcal{S}}(s_i, \\mathcal{G}_{\\theta}(\\bar{s}_i)) + d_{\\mathcal{A}}(a_i, \\psi_{\\theta}(\\bar{s}_i, \\bar{a}_i)) + d_{\\mathcal{R}}(r_i, \\overline{\\mathcal{R}}_{\\theta}(\\bar{s}_i, \\bar{a}_i)) + d_{\\mathcal{S}}(s_i', \\mathcal{G}_{\\theta}(\\bar{s}_i'))\\) \nUpdate the latent space model parameters \\(\\langle \\iota, \\theta\\rangle\\) by descending \\(1/N \\cdot (\\mathcal{L} + \\beta \\mathcal{W})\\) \nfunction \\(\\mathrm{GP}(\\varphi_\\omega, x, y)\\) \n\\(\\epsilon \\sim U(0,1)\\); \\(\\tilde{x} \\gets \\epsilon x + (1 - \\epsilon)y\\) \nreturn (\\(\\|\\nabla_{\\tilde{x}}\\varphi_{\\omega}(\\tilde{x})\\| - 1)^2\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.424, + 0.827, + 0.571 + ], + "angle": 0, + "content": "where \\(\\mathcal{W}_{\\xi_{\\pi}} = W_{\\vec{d}}\\big(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}\\big)\\) and \\(L_{\\mathbf{P}}^{\\xi_{\\pi}}\\) are respectively called steady-state and transition regularizers. The former allows to quantify the distance between the stationary distributions respectively induced by \\(\\pi\\) in \\(\\mathcal{M}\\) and \\(\\bar{\\pi}_{\\theta}\\) in \\(\\overline{\\mathcal{M}}_{\\theta}\\), further enabling the distillation. The latter allows to learn the latent dynamics. Note that \\(L_{\\mathcal{R}}^{\\xi_{\\pi}}\\) and \\(L_{\\mathbf{P}}^{\\xi_{\\pi}}\\) — set over \\(\\xi_{\\pi}\\) instead of \\(\\xi_{\\bar{\\pi}_{\\theta}}\\) — are not sufficient to ensure the bisimulation bounds (Eq. 1): running \\(\\pi\\) in \\(\\overline{\\mathcal{M}}_{\\theta}\\) depends on the parallel execution of \\(\\pi\\) in the original model, which does not permit its (conventional) verification. Breaking this dependency is enabled by learning the distillation \\(\\bar{\\pi}_{\\theta}\\) through \\(\\mathcal{W}_{\\xi_{\\pi}}\\), as shown in Fig. 1b: minimizing \\(\\mathcal{W}_{\\xi_{\\pi}}\\) allows to make \\(\\xi_{\\pi}\\) and \\(\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}\\) closer together, further bridging the gap of the discrepancy between \\(\\pi\\) and \\(\\bar{\\pi}_{\\theta}\\). At any time, recovering the local losses along with the linked bisimulation bounds in the objective function of the \\(\\mathrm{W}^{2}\\mathrm{AE}\\)-MDP is allowed by considering the latent policy resulting from this distillation:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.574, + 0.825, + 0.604 + ], + "angle": 0, + "content": "Theorem 3.3. Assume that traces are generated by running a latent policy \\(\\bar{\\pi} \\in \\overline{\\Pi}\\) in the original environment and let \\(d_{\\mathcal{R}}\\) be the usual Euclidean distance, then the \\(W^{2}\\)AE-MDP objective is" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.611, + 0.766, + 0.637 + ], + "angle": 0, + "content": "\\[\n\\min_{\\iota ,\\theta}\\mathbb{E}_{s,s^{\\prime}\\sim \\xi_{\\overline{\\pi}}}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\phi_{\\iota}(s))) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\phi_{\\iota}\\big(s^{\\prime}\\big)\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\overline{\\pi}}} + L_{\\mathbf{P}}^{\\xi_{\\overline{\\pi}}}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.825, + 0.687 + ], + "angle": 0, + "content": "Optimizing the regularizers is enabled by the dual form of the OT: we introduce two parameterized networks, \\(\\varphi_{\\omega}^{\\xi}\\) and \\(\\varphi_{\\omega}^{\\mathbf{P}}\\), constrained to be 1-Lipschitz and trained to attain the supremum of the dual:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.693, + 0.815, + 0.722 + ], + "angle": 0, + "content": "\\[\n\\mathcal {W} _ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega} \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\star} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\xi} (\\phi_ {\\iota} (s), \\bar {a}, \\bar {s} ^ {\\star}) - \\underset {z, \\bar {a} ^ {\\prime}, z ^ {\\prime} \\sim \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\xi} (z, \\bar {a} ^ {\\prime}, z ^ {\\prime})\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.73, + 0.799, + 0.763 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi} \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} (\\cdot | s, a, s ^ {\\prime})} {\\mathbb {E}} \\underset {s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}} {\\mathbb {E}} \\left[ \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\star} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\star}) \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.769, + 0.825, + 0.827 + ], + "angle": 0, + "content": "Details to derive this tractable form of \\(L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)\\) are in Appendix A.5. The networks are constrained via the gradient penalty approach of Gulrajani et al. (2017), leveraging that any differentiable function is 1-Lipschitz iff it has gradients with norm at most 1 everywhere (we show in Appendix A.6 this is still valid for relaxations of discrete spaces). The final learning process is presented in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.843, + 0.404, + 0.857 + ], + "angle": 0, + "content": "3.2 DISCRETE LATENT SPACES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "To enable the verification of latent models supported by the bisimulation guarantees of Eq. 1, we focus on the special case of discrete latent space models. Our approach relies on continuous relaxation of discrete random variables, regulated by some temperature parameter(s) \\(\\lambda\\): discrete random variables are retrieved as \\(\\lambda \\rightarrow 0\\), which amounts to applying a rounding operator. For training, we use the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.099, + 0.829, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.239, + 0.349, + 0.757, + 0.365 + ], + "angle": 0, + "content": "Figure 2: W\\(^2\\)AE-MDP architecture. Distances are depicted by red dotted lines." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.392, + 0.828, + 0.437 + ], + "angle": 0, + "content": "temperature-controlled relaxations to differentiate the objective and let the gradient flow through the network. When we deploy the latent policy in the environment and formally check the latent model, the zero-temperature limit is used. An overview of the approach is depicted in Fig. 2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.828, + 0.547 + ], + "angle": 0, + "content": "State encoder. We work with a binary representation of the latent states. First, this induces compact networks, able to deal with a large discrete space via a tractable number of parameter variables. But most importantly, this ensures that Assumption 2.1 is satisfied: let \\( n = \\log_2|\\bar{S}| \\), we reserve \\( |\\mathbf{AP}| \\) bits in \\( \\bar{S} \\) and each time \\( s\\in S \\) is passed to \\( \\phi_{\\iota} \\), \\( n - |\\mathbf{AP}| \\) bits are produced and concatenated with \\( \\ell (s) \\), ensuring a perfect reconstruction of the labels and further bisimulation bounds. To produce Bernoulli variables, \\( \\phi_{\\iota} \\) deterministically maps \\( s \\) to a latent code \\( z \\), passed to the Heaviside \\( H(z) = \\mathbf{1}_{z > 0} \\). We train \\( \\phi_{\\iota} \\) by using the smooth approximation \\( H_{\\lambda}(z) = \\sigma (^{2}z / \\lambda) \\), satisfying \\( H = \\lim_{\\lambda \\to 0}H_{\\lambda} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.555, + 0.829, + 0.698 + ], + "angle": 0, + "content": "Latent distributions. Besides the discontinuity of their latent image space, a major challenge of optimizing over discrete distributions is sampling, required to be a differentiable operation. We circumvent this by using concrete distributions (Jang et al., 2017; Maddison et al., 2017): the idea is to sample reparameterizable random variables from \\(\\lambda\\)-parameterized distributions, and applying a differentiable, nonlinear operator in downstream. We use the Gumbel softmax trick to sample from distributions over (one-hot encoded) latent actions \\((\\phi_{\\iota}^{A}, \\bar{\\pi}_{\\theta})\\). For binary distributions \\((\\overline{\\mathbf{P}}_{\\theta}, \\bar{\\xi}_{\\bar{\\pi}_{\\theta}})\\), each relaxed Bernoulli with logit \\(\\alpha\\) is retrieved by drawing a logistic random variable located in \\(\\alpha/\\lambda\\) and scaled to \\(1/\\lambda\\), then applying a sigmoid in downstream. We emphasize that this trick alone (as used by Corneil et al. 2018; Delgrange et al. 2022) is not sufficient: it yields independent Bernoullis, being too restrictive in general, which prevents from learning sound transition dynamics (cf. Example 1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.711, + 0.554, + 0.852 + ], + "angle": 0, + "content": "Example 1. Let \\(\\overline{\\mathcal{M}}\\) be the discrete MC of Fig. 3. In one-hot, \\(\\mathbf{AP} = \\{\\text{goal}: \\langle 1,0\\rangle\\), unsafe: \\(\\langle 0,1\\rangle\\}\\). We assume that 3 bits are used for the (binary) state space, with \\(\\bar{S} = \\{\\bar{s}_0:\\langle 0,0,0\\rangle,\\bar{s}_1:\\langle 1,0,0\\rangle,\\bar{s}_2:\\langle 0,1,0\\rangle,\\bar{s}_3:\\langle 0,1,1\\rangle\\}\\) (the two first bits are reserved for the labels). Considering each bit as being independent is not sufficient to learn \\(\\overline{\\mathbf{P}}\\): the optimal estimation \\(\\overline{\\mathbf{P}}_{\\theta^*}(\\cdot \\mid \\bar{s}_0)\\) is in that case represented by the independent Bernoulli vector \\(\\mathbf{b} = \\langle 1 / 2,1 / 2,1 / 4\\rangle\\), giving the probability to go from \\(\\bar{s}_0\\) to each bit independently. This yields a poor estimation of" + }, + { + "type": "image", + "bbox": [ + 0.576, + 0.715, + 0.828, + 0.791 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.56, + 0.804, + 0.828, + 0.832 + ], + "angle": 0, + "content": "Figure 3: Markov Chain with four states; labels are drawn next to their state." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.852, + 0.829, + 0.885 + ], + "angle": 0, + "content": "the actual transition function: \\(\\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_0\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot (1 - \\mathbf{b}_2)\\cdot (1 - \\mathbf{b}_3) = \\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_1\\mid \\bar{s}_0) = \\mathbf{b}_1\\cdot (1 - \\mathbf{b}_2)\\cdot (1 - \\mathbf{b}_3) = \\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_2\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot \\mathbf{b}_2\\cdot (1 - \\mathbf{b}_3) = 3 / 16,\\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_3\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot \\mathbf{b}_2\\cdot \\mathbf{b}_3 = 1 / 16.\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.893, + 0.828, + 0.927 + ], + "angle": 0, + "content": "We consider instead relaxed multivariate Bernoulli distributions by decomposing \\( P \\in \\Delta(\\bar{S}) \\) as a product of conditionals: \\( P(\\bar{s}) = \\prod_{i=1}^{n} P(\\bar{s}_i \\mid \\bar{s}_{1:i-1}) \\) where \\( \\bar{s}_i \\) is the \\( i^{\\text{th}} \\) entry (bit) of \\( \\bar{s} \\). We learn" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.106, + 0.302, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.305, + 0.106, + 0.416, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.418, + 0.106, + 0.53, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.535, + 0.106, + 0.642, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.106, + 0.819, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.181, + 0.753, + 0.198 + ], + "angle": 0, + "content": "(a) \\(\\mathrm{W}^2\\mathrm{AE}\\)-MDP objective: reconstruction loss, transition and steady-state regularizers" + }, + { + "type": "image", + "bbox": [ + 0.187, + 0.201, + 0.311, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.201, + 0.421, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.201, + 0.531, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.535, + 0.201, + 0.644, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.201, + 0.819, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.209, + 0.276, + 0.787, + 0.291 + ], + "angle": 0, + "content": "(b) PAC local losses approximation for an error of at most \\(10^{-2}\\) and probability confidence 0.955" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.295, + 0.297, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.295, + 0.411, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.417, + 0.295, + 0.526, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.295, + 0.641, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.295, + 0.819, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.37, + 0.825, + 0.385 + ], + "angle": 0, + "content": "(c) Episode return obtained when executing the distilled policy in the original MDP (averaged over 30 episodes)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.395, + 0.825, + 0.425 + ], + "angle": 0, + "content": "Figure 4: For each environment, we trained five different instances of the models with different random seeds: the solid line is the median and the shaded interval the interquartile range." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.45, + 0.827, + 0.537 + ], + "angle": 0, + "content": "such distributions by introducing a masked autoregressive flow (MAF, Papamakarios et al. 2017) for relaxed Bernoullis via the recursion: \\(\\bar{s}_i = \\sigma(l_i + \\alpha_i / \\lambda)\\), where \\(l_i \\sim \\mathrm{Logistic}(0,1)\\), \\(\\alpha_i = f_i(\\bar{s}_{1:i-1})\\), and \\(f\\) is a MADE (Germain et al., 2015), a feedforward network implementing the conditional output dependency on the inputs via a mask that only keeps the necessary connections to enforce the conditional property. We use this MAF to model \\(\\overline{\\mathbf{P}}_\\theta\\) and the dynamics related to the labels in \\(\\bar{\\xi}_{\\overline{\\pi}_\\theta}\\). We fix the logits of the remaining \\(n - |\\mathbf{AP}|\\) bits to 0 to allow for a fairly distributed latent space." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.555, + 0.329, + 0.57 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.586, + 0.827, + 0.701 + ], + "angle": 0, + "content": "We evaluate the quality of latent space models learned and policies distilled through \\(\\mathrm{W}^2\\mathrm{AE}\\)-MDPs. To do so, we first trained deep-RL policies (DQN, Mnih et al. 2015 on discrete, and SAC, Haarnoja et al. 2018 on continuous action spaces) for various OpenAI benchmarks (Brockman et al., 2016), which we then distill via our approach (Figure 4). We thus evaluate (a) the \\(\\mathrm{W}^2\\mathrm{AE}\\)-MDP training metrics, (b) the abstraction and representation quality via PAC local losses upper bounds (Delgrange et al., 2022), and (c) the distilled policy performance when deployed in the original environment. The confidence metrics and performance are compared with those of VAE-MDPs. Finally, we formally verify properties in the latent model. The exact setting to reproduce our results is in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.708, + 0.828, + 0.781 + ], + "angle": 0, + "content": "Learning metrics. The objective (Fig. 4a) is a weighted sum of the reconstruction loss and the two Wasserstein regularizers. The choice of \\(\\beta\\) defines the optimization direction. In contrast to VAEs (cf. Appendix C), WAEs indeed naturally avoid posterior collapse (Tolstikhin et al., 2018), indicating that the latent space is consistently distributed. Optimizing the objective (Fig. 4a) effectively allows minimizing the local losses (Fig. 4b) and recovering the performance of the original policy (Fig. 4c)." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.791, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Local losses. For V- and WAEs, we formally evaluate PAC upper bounds on \\(L_{\\mathcal{R}}^{\\xi \\bar{\\pi}_{\\theta}}\\) and \\(L_{\\mathbf{P}}^{\\xi \\bar{\\pi}_{\\theta}}\\) via the algorithm of Delgrange et al. (2022) (Fig 4b). The lower the local losses, the closer \\(\\mathcal{M}\\) and \\(\\overline{\\mathcal{M}}_{\\theta}\\) are in terms of behaviors induced by \\(\\bar{\\pi}_{\\theta}\\) (cf. Eq. 1). In VAEs, the losses are evaluated on a transition function \\(\\hat{\\mathbf{P}}\\) obtained via frequency estimation of the latent transition dynamics (Delgrange et al., 2022), by reconstructing the transition model a posteriori and collecting data to estimate the transition probabilities (e.g., Bazille et al. 2020; Corneil et al. 2018). We thus also report the metrics for \\(\\hat{\\mathbf{P}}\\). Our bounds quickly converge to close values in general for \\(\\overline{\\mathbf{P}}_{\\theta}\\) and \\(\\hat{\\mathbf{P}}\\), whereas for VAEs, the convergence is slow and unstable, with \\(\\hat{\\mathbf{P}}\\) offering better bounds. We emphasize that WAEs do not require this additional reconstruction step to obtain losses that can be leveraged to assess the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.825, + 0.117 + ], + "angle": 0, + "content": "Table 1: Formal Verification of distilled policies. Values are computed for \\( \\gamma = {0.99} \\) (lower is better)." + }, + { + "type": "table", + "bbox": [ + 0.204, + 0.128, + 0.794, + 0.209 + ], + "angle": 0, + "content": "
Environmentstep (105)SA|S||A|LξπθR(PAC)LξπθP(PAC)||Vπθ||V̅πθ(¯sI)
CartPole1.2⊆ R4{1,2}51220.004996530.3996363.712130.0316655
MountainCar2.32⊆ R2{1,2}102420.01417630.3823232.837140
Acrobot4.3⊆ R6{1,2,3}819230.03476980.6494782.220060.0021911
LunarLander3.2⊆ R8[-1,1]21638430.02072050.1313570.03728830.0702039
Pendulum3.7⊆ R3[-2,2]819230.02667450.5395084.330060.0348492
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.231, + 0.827, + 0.274 + ], + "angle": 0, + "content": "quality of the model, in contrast to VAEs, where learning \\(\\overline{\\mathbf{P}}_{\\theta}\\) was performed via overly restrictive distributions, leading to poor estimation in general (cf. Ex. 1). Finally, when the distilled policies offer comparable performance (Fig. 4c), our bounds are either close to or better than those of VAEs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.283, + 0.828, + 0.452 + ], + "angle": 0, + "content": "Distillation. The bisimulation guarantees (Eq. 1) are only valid for \\(\\bar{\\pi}_{\\theta}\\), the policy under which formal properties can be verified. It is crucial that \\(\\bar{\\pi}_{\\theta}\\) achieves performance close to \\(\\pi\\), the original one, when deployed in the RL environment. We evaluate the performance of \\(\\bar{\\pi}_{\\theta}\\) via the undiscounted episode return \\(\\mathbf{R}_{\\bar{\\pi}_{\\theta}}\\) obtained by running \\(\\bar{\\pi}_{\\theta}\\) in the original model \\(\\mathcal{M}\\). We observe that \\(\\mathbf{R}_{\\bar{\\pi}_{\\theta}}\\) approaches faster the original performance \\(\\mathbf{R}_{\\pi}\\) for W- than VAEs: WAEs converge in a few steps for all environments, whereas the full learning budget is sometimes necessary with VAEs. The success in recovering the original performance emphasizes the representation quality guarantees (Eq. 1) induced by WAEs: when local losses are minimized, all original states that are embedded to the same representation are bisimilarly close. Distilling the policy over the new representation, albeit discrete and hence coarser, still achieves effective performance since \\(\\phi_{\\iota}\\) keeps only what is important to preserve behaviors, and thus values. Furthermore, the distillation can remove some non-robustness obtained during RL: \\(\\bar{\\pi}_{\\theta}\\) prescribes the same actions for bisimilarly close states, whereas this is not necessarily the case for \\(\\pi\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.46, + 0.828, + 0.648 + ], + "angle": 0, + "content": "Formal verification. To formally verify \\(\\overline{\\mathcal{M}}_{\\theta}\\), we implemented a value iteration (VI) engine, handling the neural network encoding of the latent space for discounted properties, which is one of the most popular algorithms for checking property probabilities in MDPs (e.g., Baier & Katoen 2008; Hensel et al. 2021; Kwiatkowska et al. 2022). We verify time-to-failure properties \\(\\varphi\\), often used to check the failure rate of a system (Pnueli, 1977) by measuring whether the agent fails before the end of the episode. Although simple, such properties highlight the applicability of our approach on reachability events, which are building blocks to verify MDPs (Baier & Katoen 2008; cf. Appendix B.7). In particular, we checked whether the agent reaches an unsafe position or angle (CartPole, LunarLander), does not reach its goal position (MountainCar, Acrobot), and does not reach and stay in a safe region of the system (Pendulum). Results are in Table 1: for each environment, we select the distilled policy which gives the best trade-off between performance (episode return) and abstraction quality (local losses). As extra confidence metric, we report the value difference \\(\\| V_{\\overline{\\pi}_{\\theta}} \\| = |V_{\\overline{\\pi}_{\\theta}}(s_I) - \\bar{V}_{\\overline{\\pi}_{\\theta}}(\\bar{s}_I)|\\) obtained by executing \\(\\overline{\\pi}_{\\theta}\\) in \\(\\mathcal{M}\\) and \\(\\overline{\\mathcal{M}}_{\\theta}\\) (\\(V_{\\overline{\\pi}_{\\theta}}(\\cdot)\\) is averaged while \\(\\bar{V}_{\\overline{\\pi}_{\\theta}}(\\cdot)\\) is formally computed)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.664, + 0.321, + 0.68 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.695, + 0.827, + 0.807 + ], + "angle": 0, + "content": "We presented WAE-MDPs, a framework for learning formally verifiable distillations of RL policies with bisimulation guarantees. The latter, along with the learned abstraction of the unknown continuous environment to a discrete model, enables the verification. Our method overcomes the limitations of VAE-MDPs and our results show that it outperforms the latter in terms of learning speed, model quality, and performance, in addition to being supported by stronger learning guarantees. As mentioned by Delgrange et al. (2022), distillation failure reveals the lack of robustness of original RL policies. In particular, we found that distilling highly noise-sensitive RL policies (such as robotics simulations, e.g., Todorov et al. 2012) is laborious, even though the result remains formally verifiable." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.813, + 0.829, + 0.926 + ], + "angle": 0, + "content": "We demonstrated the feasibility of our approach through the verification of reachability objectives, which are building blocks for stochastic model-checking (Baier & Katoen, 2008). Besides the scope of this work, the verification of general discounted \\(\\omega\\)-regular properties is theoretically allowed in our model via the reachability to components of standard constructions based on automata products (e.g., Baier et al. 2016; Sickert et al. 2016), and discounted games algorithms (Chatterjee et al., 2010). Beyond distillation, our results, supported by Thm. 3.3, suggest that our WAE-MDP can be used as a general latent space learner for RL, further opening possibilities to combine RL and formal methods online when no formal model is a priori known, and address this way safety in RL with guarantees." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.105, + 0.395, + 0.119 + ], + "angle": 0, + "content": "REPRODUCIBILITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.128, + 0.827, + 0.241 + ], + "angle": 0, + "content": "We referenced in the main text the Appendix parts presenting the proofs or additional details of every claim, Assumption, Lemma, and Theorem occurring in the paper. In addition, Appendix B is dedicated to the presentation of the setup, hyperparameters, and other extra details required for reproducing the results of Section 4. We provide the source code of the implementation of our approach in Supplementary material \\(^{1}\\), and we also provide the models saved during training that we used for model checking (i.e., reproducing the results of Table 1). Additionally, we present in a notebook (evaluation.html) videos demonstrating how our distilled policies behave in each environment, and code snippets showing how we formally verified the policies." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.255, + 0.329, + 0.269 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.279, + 0.825, + 0.337 + ], + "angle": 0, + "content": "This research received funding from the Flemish Government (AI Research Program) and was supported by the DESCARTES iBOF project. G.A. Perez is also supported by the Belgian FWO \"SAILor\" project (G030020N). We thank Raphael Avalos for his valuable feedback during the preparation of this manuscript." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.355, + 0.288, + 0.37 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.378, + 0.827, + 0.435 + ], + "angle": 0, + "content": "Parand Alizadeh Alamdari, Guy Avni, Thomas A. Henzinger, and Anna Lukina. Formal methods with a touch of magic. In 2020 Formal Methods in Computer Aided Design, FMCAD 2020, Haifa, Israel, September 21-24, 2020, pp. 138-147. IEEE, 2020. doi: 10.34727/2020/isbn.978-3-85448-042-6_21. URL https://doi.org/10.34727/2020/isbn.978-3-85448-042-6_21." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.442, + 0.828, + 0.513 + ], + "angle": 0, + "content": "Alexander A. Alemi, Ben Poole, Ian Fischer, Joshua V. Dillon, Rif A. Saurous, and Kevin Murphy. Fixing a broken ELBO. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 159-168. PMLR, 2018. URL http://proceedings.mlr.press/v80/alemi18a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.52, + 0.829, + 0.619 + ], + "angle": 0, + "content": "Mohammed Alshiekh, Roderick Bloem, Rüdiger Ehlers, Bettina Könighofer, Scott Niekum, and Ufuk Topcu. Safe reinforcement learning via shielding. In Sheila A. McIlraith and Kilian Q. Weinberger (eds.), Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pp. 2669-2678. AAAI Press, 2018. URL https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/view/17211." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.626, + 0.829, + 0.698 + ], + "angle": 0, + "content": "Martín Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 214-223. PMLR, 2017. URL http://proceedings.mlr.press/v70/arjovsky17a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.704, + 0.829, + 0.775 + ], + "angle": 0, + "content": "Edoardo Bacci and David Parker. Probabilistic guarantees for safe deep reinforcement learning. In Nathalie Bertrand and Nils Jansen (eds.), Formal Modeling and Analysis of Timed Systems - 18th International Conference, FORMATS 2020, Vienna, Austria, September 1-3, 2020, Proceedings, volume 12288 of LNCS, pp. 231-248. Springer, 2020. doi: 10.1007/978-3-030-57628-8_14. URL https://doi.org/10.1007/978-3-030-57628-8_14." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.782, + 0.825, + 0.811 + ], + "angle": 0, + "content": "Christel Baier and Joost-Pieter Katoen. Principles of model checking. MIT Press, 2008. ISBN 978-0-262-02649-9." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.818, + 0.829, + 0.903 + ], + "angle": 0, + "content": "Christel Baier, Stefan Kiefer, Joachim Klein, Sascha Klüppelholz, David Müller, and James Worrell. Markov chains and unambiguous büchi automata. In Swarat Chaudhuri and Azadeh Farzan (eds.), Computer Aided Verification - 28th International Conference, CAV 2016, Toronto, ON, Canada, July 17-23, 2016, Proceedings, Part I, volume 9779 of Lecture Notes in Computer Science, pp. 23-42. Springer, 2016. doi: 10.1007/978-3-319-41528-4_2. URL https://doi.org/10.1007/978-3-319-41528-4_2." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.378, + 0.829, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.91, + 0.652, + 0.925 + ], + "angle": 0, + "content": "available at https://github.com/florentdelgrange/wae_mdp" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.189 + ], + "angle": 0, + "content": "Hugo Bazille, Blaise Genest, Cyrille Jégourel, and Jun Sun. Global PAC bounds for learning discrete time markov chains. In Shuvendu K. Lahiri and Chao Wang (eds.), Computer Aided Verification - 32nd International Conference, CAV 2020, Los Angeles, CA, USA, July 21-24, 2020, Proceedings, Part II, volume 12225 of Lecture Notes in Computer Science, pp. 304-326. Springer, 2020. doi: 10.1007/978-3-030-53291-8\\_17. URL https://doi.org/10.1007/978-3-030-53291-8_17." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.197, + 0.826, + 0.227 + ], + "angle": 0, + "content": "O. Bousquet, S. Gelly, I. Tolstikhin, Carl-Johann Simon-Gabriel, and B. Schölkopf. From optimal transport to generative modeling: the vegan cookbook. arXiv: Machine Learning, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.235, + 0.826, + 0.279 + ], + "angle": 0, + "content": "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym. CoRR, abs/1606.01540, 2016. URL http://arxiv.org/abs/1606.01540." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.287, + 0.826, + 0.346 + ], + "angle": 0, + "content": "Steven Carr, Nils Jansen, and Ufuk Topcu. Verifiable rnn-based policies for pomdps under temporal logic constraints. In Christian Bessiere (ed.), Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI 2020, pp. 4121-4127. ijcai.org, 2020. doi: 10.24963/ijcai.2020/570. URL https://doi.org/10.24963/ijcai.2020/570." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.353, + 0.826, + 0.451 + ], + "angle": 0, + "content": "Pablo Samuel Castro, Tyler Kastner, Prakash Panangaden, and Mark Rowland. Mico: Improved representations via sampling-based state similarity for markov decision processes. In Marc'Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan (eds.), Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pp. 30113-30126, 2021. URL https://proceedings.neurips.cc/paper/2021/hash/ fd06b8ea02fe5b1c2496fe1700e9d16c-AAbstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.46, + 0.826, + 0.531 + ], + "angle": 0, + "content": "Glenn Ceusters, Roman Cantú Rodríguez, Alberte Bouso García, Rüdiger Franke, Geert Deconinck, Lieve Helsen, Ann Nowé, Maarten Messagie, and Luis Ramirez Camargo. Model-predictive control and reinforcement learning in multi-energy system case studies. Applied Energy, 303:117634, 2021. ISSN 0306-2619. doi: https://doi.org/10.1016/j.apenergy.2021.117634. URL https://www.sciencedirect.com/science/article/pii/S0306261921010011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.539, + 0.829, + 0.582 + ], + "angle": 0, + "content": "Krishnendu Chatterjee, Luca de Alfaro, Rupak Majumdar, and Vishwanath Raman. Algorithms for game metrics (full version). Log. Methods Comput. Sci., 6(3), 2010. URL http://arxiv.org/abs/0809.4326." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.591, + 0.829, + 0.676 + ], + "angle": 0, + "content": "Dane S. Corneil, Wulfram Gerstner, and Johanni Brea. Efficient modelbased deep reinforcement learning with variational state tabulation. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 1057-1066. PMLR, 2018. URL http://proceedings.mlr.press/v80/corneil18a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.685, + 0.829, + 0.742 + ], + "angle": 0, + "content": "Florent Delgrange, Ann Nowé, and Guillermo A. Pérez. Distillation of rl policies with formal guarantees via variational abstraction of markov decision processes. Proceedings of the AAAI Conference on Artificial Intelligence, 36(6):6497-6505, Jun. 2022. doi: 10.1609/aaai.v36i6.20602. URL https://ojs.aaaai.org/index.php/AAAI/article/view/20602." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.75, + 0.829, + 0.794 + ], + "angle": 0, + "content": "Josée Desharnais, Vineet Gupta, Radha Jagadeesan, and Prakash Panangaden. Metrics for labelled markov processes. Theor. Comput. Sci., 318(3):323-354, 2004. doi: 10.1016/j.tcs.2003.09.013. URL https://doi.org/10.1016/j.tcs.2003.09.013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.802, + 0.829, + 0.86 + ], + "angle": 0, + "content": "Jiri Fajtl, Vasileios Argyriou, Dorothy Monekosso, and Paolo Remagnino. Latent bernoulli autoencoder. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 2964-2974. PMLR, 2020. URL http://proceedings.mlr.press/v119/fajtl20a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Norm Ferns, Doina Precup, and Sophia Knight. Bisimulation for markov decision processes through families of functional expressions. In Franck van Breugel, Elham Kashefi, Catuscia Palamidessi, and Jan Rutten (eds.), Horizons of the Mind. A Tribute to Prakash Panangaden - Essays Dedicated to Prakash Panangaden on the Occasion of His 60th Birthday, volume 8464 of LNCS, pp. 319-342." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.103, + 0.826, + 0.133 + ], + "angle": 0, + "content": "Springer, 2014. doi: 10.1007/978-3-319-06880-0_17. URL https://doi.org/10.1007/978-3-319-06880-0_17." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.14, + 0.828, + 0.224 + ], + "angle": 0, + "content": "Carles Gelada, Saurabh Kumar, Jacob Buckman, Ofir Nachum, and Marc G. Bellemare. Deepmdp: Learning continuous latent space models for representation learning. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 2170-2179. PMLR, 2019. URL http://proceedings.mlr.press/v97/gelada19a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.231, + 0.827, + 0.303 + ], + "angle": 0, + "content": "Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: masked autoencoder for distribution estimation. In Francis R. Bach and David M. Blei (eds.), Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pp. 881-889. JMLR.org, 2015. URL http://proceedings.mlr.press/v37/germain15.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.309, + 0.825, + 0.354 + ], + "angle": 0, + "content": "Robert Givan, Thomas L. Dean, and Matthew Greig. Equivalence notions and model minimization in markov decision processes. Artif. Intell., 147(1-2):163-223, 2003. doi: 10.1016/S0004-3702(02) 00376-4. URL https://doi.org/10.1016/S0004-3702(02)00376-4." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.36, + 0.827, + 0.457 + ], + "angle": 0, + "content": "Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C. Courville. Improved training of wasserstein gans. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 5767-5777, 2017. URL https://proceedings.neurips.cc/paper/2017/bit/892c3b1c6dcbd52936e27cbd0ff683d6-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.465, + 0.828, + 0.55 + ], + "angle": 0, + "content": "Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 1856-1865. PMLR, 2018. URL http://proceedings.mlr.press/v80/haarnoja18b.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.556, + 0.828, + 0.655 + ], + "angle": 0, + "content": "Mohammadhosein Hasanbeig, Natasha Yogananda Jeppu, Alessandro Abate, Tom Melham, and Daniel Kroening. Deepsynth: Automata synthesis for automatic task segmentation in deep reinforcement learning. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI 2021, Thirty-Third Conference on Innovative Applications of Artificial Intelligence, IAAI 2021, The Eleventh Symposium on Educational Advances in Artificial Intelligence, EAAI 2021, Virtual Event, February 2-9, 2021, pp. 7647-7656. AAAI Press, 2021. URL https://ojs.aaai.org/index.php/AAAI/article/view/16935." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.662, + 0.828, + 0.718 + ], + "angle": 0, + "content": "Christian Hensel, Sebastian Junges, Joost-Pieter Katoen, Tim Quatmann, and Matthias Volk. The probabilistic model checker storm. International Journal on Software Tools for Technology Transfer, 2021. ISSN 1433-2787. doi: 10.1007/s10009-021-00633-z. URL https://doi.org/10.1007/s10009-021-00633-z." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.726, + 0.828, + 0.768 + ], + "angle": 0, + "content": "Matthew D. Hoffman, David M. Blei, Chong Wang, and John W. Paisley. Stochastic variational inference. J. Mach. Learn. Res., 14(1):1303-1347, 2013. URL http://dl.acm.org/citation.cfm?id=2502622." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.776, + 0.828, + 0.86 + ], + "angle": 0, + "content": "Bojun Huang. Steady state analysis of episodic reinforcement learning. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/69bfa2aa2b7b139ff581a806abf0a886-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.868, + 0.828, + 0.924 + ], + "angle": 0, + "content": "Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. URL https://openreview.net/forum?id=rkE3y85ee." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.828, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.203 + ], + "angle": 0, + "content": "Nils Jansen, Bettina Konighofer, Sebastian Junges, Alex Serban, and Roderick Bloem. Safe Reinforcement Learning Using Probabilistic Shields (Invited Paper). In Igor Konnov and Laura Kovács (eds.), 31st International Conference on Concurrency Theory (CONCUR 2020), volume 171 of Leibniz International Proceedings in Informatics (LIPics), pp. 3:1-3:16, Dagstuhl, Germany, 2020. Schloss Dagstuhl-Leibniz-Zentrum für Informatik. ISBN 978-3-95977-160-3. doi: 10.4230/LIPics.CONCUR.2020.3. URL https://drops.dagstuhl.de/opus/volltexte/2020/12815." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.21, + 0.829, + 0.296 + ], + "angle": 0, + "content": "Sebastian Junges, Nils Jansen, Christian Dehnert, Ufuk Topcu, and Joost-Pieter Katoen. Safety-constrained reinforcement learning for mdps. In Marsha Chechik and Jean-François Raskin (eds.), Tools and Algorithms for the Construction and Analysis of Systems - 22nd International Conference, TACAS 2016, Eindhoven, The Netherlands, April 2-8, 2016, Proceedings, volume 9636 of LNCS, pp. 130-146. Springer, 2016. doi: 10.1007/978-3-662-49674-9_8. URL https://doi.org/10.1007/978-3-662-49674-9_8." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.303, + 0.829, + 0.361 + ], + "angle": 0, + "content": "Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. In Yoshua Bengio and Yann LeCun (eds.), 2nd International Conference on Learning Representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference Track Proceedings, 2014. URL http://arxiv.org/abs/1312.6114." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.369, + 0.829, + 0.426 + ], + "angle": 0, + "content": "Marta Kwiatkowska, Gethin Norman, and David Parker. Probabilistic model checking and autonomy. Annual Review of Control, Robotics, and Autonomous Systems, 5(1):385-410, 2022. doi: 10.1146/annurev-control-042820-010947. URL https://doi.org/10.1146/annurev-control-042820-010947." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.434, + 0.829, + 0.491 + ], + "angle": 0, + "content": "Kim Guldstrand Larsen and Arne Skou. Bisimulation through probabilistic testing. In Conference Record of the Sixteenth Annual ACM Symposium on Principles of Programming Languages, Austin, Texas, USA, January 11-13, 1989, pp. 344-352. ACM Press, 1989. doi: 10.1145/75277.75307. URL https://doi.org/10.1145/75277.75307." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.499, + 0.829, + 0.612 + ], + "angle": 0, + "content": "Pieter J. K. Libin, Arno Moonens, Timothy Verstraeten, Fabian Perez-Sanjines, Niel Hens, Philippe Lemey, and Ann Nowé. Deep reinforcement learning for large-scale epidemic control. In Yuxiao Dong, Georgiana Ifrim, Dunja Mladenic, Craig Saunders, and Sofie Van Hoecke (eds.), Machine Learning and Knowledge Discovery in Databases. Applied Data Science and Demo Track - European Conference, ECML PKDD 2020, Ghent, Belgium, September 14-18, 2020, Proceedings, Part V, volume 12461 of Lecture Notes in Computer Science, pp. 155-170. Springer, 2020. doi: 10.1007/978-3-030-67670-4_10. URL https://doi.org/10.1007/978-3-030-67670-4_10." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.62, + 0.829, + 0.664 + ], + "angle": 0, + "content": "Michael L. Littman, Ufuk Topcu, Jie Fu, Charles Lee Isbell Jr., Min Wen, and James MacGlashan. Environment-independent task specifications via GLTL. CoRR, abs/1704.04341, 2017. URL http://arxiv.org/abs/1704.04341." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.672, + 0.829, + 0.73 + ], + "angle": 0, + "content": "Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The concrete distribution: A continuous relaxation of discrete random variables. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. URL https://openreview.net/forum?id=S1jE5L5gl." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.737, + 0.829, + 0.822 + ], + "angle": 0, + "content": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A. Rusu, Joel Veness, Marc G. Bellemare, Alex Graves, Martin A. Riedmiller, Andreas Fidjeland, Georg Ostrovski, Stig Petersen, Charles Beattie, Amir Sadik, Ioannis Antonoglou, Helen King, Dharshan Kumaran, Daan Wierstra, Shane Legg, and Demis Hassabis. Human-level control through deep reinforcement learning. Nat., 518(7540):529-533, 2015. doi: 10.1038/nature14236. URL https://doi.org/10.1038/nature14236." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.83, + 0.829, + 0.859 + ], + "angle": 0, + "content": "Ann Nowe. Synthesis of \"safe\" fuzzy controllers based on reinforcement learning. PhD thesis, Vrije Universiteit Brussel, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.829, + 0.925 + ], + "angle": 0, + "content": "George Papamakarios, Iain Murray, and Theo Pavlakou. Masked autoregressive flow for density estimation. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017," + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.104, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Long Beach, CA, USA, pp. 2338-2347, 2017. URL https://proceedings.neurips.cc/paper/2017/hash/6c1da886822c67822bcf3679d04369fa-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.14, + 0.829, + 0.196 + ], + "angle": 0, + "content": "Amir Pnueli. The temporal logic of programs. In 18th Annual Symposium on Foundations of Computer Science, Providence, Rhode Island, USA, 31 October - 1 November 1977, pp. 46-57. IEEE Computer Society, 1977. doi: 10.1109/SFCS.1977.32. URL https://doi.org/10.1109/SFCS.1977.32." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.204, + 0.827, + 0.247 + ], + "angle": 0, + "content": "Martin L. Puterman. Markov Decision Processes: Discrete Stochastic Dynamic Programming. Wiley Series in Probability and Statistics. Wiley, 1994. ISBN 978-0-47161977-2. doi: 10.1002/9780470316887. URL https://doi.org/10.1002/9780470316887." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.254, + 0.828, + 0.325 + ], + "angle": 0, + "content": "Tao Ren, Jianwei Niu, Jiahe Cui, Zhenchao Ouyang, and Xuefeng Liu. An application of multi-objective reinforcement learning for efficient model-free control of canals deployed with iot networks. Journal of Network and Computer Applications, 182:103049, 2021. ISSN 1084-8045. doi: https://doi.org/10.1016/j.jnca.2021.103049. URL https://www.sciencedirect.com/science/article/pii/S1084804521000734." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.331, + 0.828, + 0.415 + ], + "angle": 0, + "content": "Salomon Sickert, Javier Esparza, Stefan Jaax, and Jan Kretínský. Limit-deterministic büchi automata for linear temporal logic. In Swarat Chaudhuri and Azadeh Farzan (eds.), Computer Aided Verification - 28th International Conference, CAV 2016, Toronto, ON, Canada, July 17-23, 2016, Proceedings, Part II, volume 9780 of Lecture Notes in Computer Science, pp. 312-332. Springer, 2016. doi: 10.1007/978-3-319-41540-6\\_17. URL https://doi.org/10.1007/978-3-319-41540-6_17." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.422, + 0.828, + 0.493 + ], + "angle": 0, + "content": "Thiago D. Simão, Nils Jansen, and Matthijs T. J. Span. Always safe: Reinforcement learning without safety constraint violations during training. In Frank Dignum, Alessio Lomuscio, Ulle Endriss, and Ann Nowé (eds.), AAMAS '21: 20th International Conference on Autonomous Agents and Multiagent Systems, Virtual Event, United Kingdom, May 3-7, 2021, pp. 1226-1235. ACM, 2021. URL https://dl.acm.org/doi/10.5555/3463952.3464094." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.5, + 0.828, + 0.543 + ], + "angle": 0, + "content": "Emanuel Todorov, Tom Erez, and Yuval Tassa. Mujoco: A physics engine for model-based control. In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 5026-5033. IEEE, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.55, + 0.828, + 0.607 + ], + "angle": 0, + "content": "Ilya O. Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Scholkopf. Wasserstein autoencoders. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=HkL7n1-0b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.614, + 0.828, + 0.643 + ], + "angle": 0, + "content": "John N. Tsitsiklis. Asynchronous stochastic approximation and q-learning. Mach. Learn., 16(3):185-202, 1994. doi: 10.1007/BF00993306. URL https://doi.org/10.1007/BF00993306." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.649, + 0.828, + 0.734 + ], + "angle": 0, + "content": "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, 4-9 December 2017, Long Beach, CA, USA, pp. 6306-6315, 2017. URL http://papers.nips.cc/paper/7210-neural-discrete-representation-learning." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.741, + 0.828, + 0.784 + ], + "angle": 0, + "content": "Cédric Villani. Optimal Transport: Old and New. Springer Berlin Heidelberg, Berlin, Heidelberg, 2009. ISBN 978-3-540-71050-9. doi: 10.1007/978-3-540-71050-9_6. URL https://doi.org/10.1007/978-3-540-71050-9_6." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.791, + 0.828, + 0.861 + ], + "angle": 0, + "content": "Andrew M. Wells, Morteza Lahijanian, Lydia E. Kavraki, and Moshe Y. Vardi. Ltlf synthesis on probabilistic systems. In Jean-François Raskin and Davide Bresolin (eds.), Proceedings 11th International Symposium on Games, Automata, Logics, and Formal Verification, GandALF 2020, Brussels, Belgium, September 21-22, 2020, volume 326 of EPTCS, pp. 166-181, 2020. doi: 10.4204/EPTCS.326.11. URL https://doi.org/10.4204/EPTCS.326.11." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.868, + 0.828, + 0.925 + ], + "angle": 0, + "content": "Hongyu Zang, Xin Li, and Mingzhong Wang. Simsr: Simple distance-based state representations for deep reinforcement learning. Proceedings of the AAAI Conference on Artificial Intelligence, 36 (8):8997-9005, Jun. 2022. doi: 10.1609/aaai.v36i8.20883. URL https://ojs.aaaai.org/index.php/AAAI/article/view/20883." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.829, + 0.162 + ], + "angle": 0, + "content": "Amy Zhang, Rowan Thomas McAllister, Roberto Calandra, Yarin Gal, and Sergey Levine. Learning invariant representations for reinforcement learning without reconstruction. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=-2FCwDKRREu." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.169, + 0.829, + 0.212 + ], + "angle": 0, + "content": "Shunkang Zhang, Yuan Gao, Yuling Jiao, Jin Liu, Yang Wang, and Can Yang. Wasserstein-wasserstein auto-encoders. CoRR, abs/1902.09323, 2019. URL http://arxiv.org/abs/1902.09323." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.829, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.267, + 0.119 + ], + "angle": 0, + "content": "APPENDIX" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.135, + 0.553, + 0.152 + ], + "angle": 0, + "content": "A THEORETICAL DETAILS ON WAE-MDPS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.167, + 0.431, + 0.181 + ], + "angle": 0, + "content": "A.1 THE DISCREPANCY MEASURE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.192, + 0.827, + 0.304 + ], + "angle": 0, + "content": "We show that reasoning about discrepancy measures between stationary distributions is sound in the context of infinite interaction and episodic RL processes. Let \\(P_{\\theta}\\) be a parameterized behavioral model that generate finite traces from the original environment (i.e., finite sequences of state, actions, and rewards of the form \\(\\langle s_{0:T},a_{0:T - 1},r_{0:T - 1}\\rangle\\)), our goal is to find the best parameter \\(\\theta\\) which offers the most accurate reconstruction of the original traces issued from the original model \\(\\mathcal{M}\\) operating under \\(\\pi\\). We demonstrate that, in the limit, considering the OT between trace-based distributions is equivalent to considering the OT between the stationary distribution of \\(\\mathcal{M}_{\\pi}\\) and the one of the behavioral model." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.311, + 0.727, + 0.325 + ], + "angle": 0, + "content": "Let us first formally recall the definition of the metric on the transitions of the MDP." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.827, + 0.381 + ], + "angle": 0, + "content": "Raw transition distance. Assume that \\(S\\), \\(\\mathcal{A}\\), and \\(\\operatorname{Im}(\\mathcal{R})\\) are respectively equipped with metric \\(d_S\\), \\(d_{\\mathcal{A}}\\), and \\(d_{\\mathcal{R}}\\), let us define the raw transition distance metric over transitions of \\(\\mathcal{M}\\), i.e., tuples of the form \\(\\langle s, a, r, s' \\rangle\\), as \\(\\vec{d} \\colon S \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times S\\)," + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.385, + 0.8, + 0.406 + ], + "angle": 0, + "content": "\\[\n\\vec {d} \\big (\\left\\langle s _ {1}, a _ {1}, r _ {1}, s _ {1} ^ {\\prime} \\right\\rangle , \\left\\langle s _ {2}, a _ {2}, r _ {2}, s _ {2} ^ {\\prime} \\right\\rangle \\big) = d _ {\\mathcal {S}} (s _ {1}, s _ {2}) + d _ {\\mathcal {A}} (a _ {1}, a _ {2}) + d _ {\\mathcal {R}} (r _ {1}, r _ {2}) + d _ {\\mathcal {S}} \\big (s _ {1} ^ {\\prime}, s _ {2} ^ {\\prime} \\big).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.41, + 0.827, + 0.455 + ], + "angle": 0, + "content": "In a nutshell, \\(\\vec{d}\\) consists of the sum of the distance of all the transition components. Note that it is a well defined distance metric since the sum of distances preserves the identity of indiscernible, symmetry, and triangle inequality." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.465, + 0.826, + 0.508 + ], + "angle": 0, + "content": "Trace-based distributions. The raw distance \\(\\vec{d}\\) allows to reason about transitions, we thus consider the distribution over transitions which occur along traces of length \\(T\\) to compare the dynamics of the original and behavioral models:" + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.513, + 0.76, + 0.553 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} _ {\\pi} [ T ] (s, a, r, s ^ {\\prime}) = \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\xi_ {\\pi} ^ {t} (s | s _ {I}) \\cdot \\pi (a | s) \\cdot \\mathbf {P} (s ^ {\\prime} | s, a) \\cdot \\mathbf {1} _ {r = \\mathcal {R} (s, a)}, \\text {a n d}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.556, + 0.753, + 0.595 + ], + "angle": 0, + "content": "\\[\n\\mathcal {P} _ {\\theta} [ T ] \\big (s, a, r, s ^ {\\prime} \\big) = \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\underset {s _ {0: t}, a _ {0: t - 1}, r _ {0: t - 1} \\sim P _ {\\theta} [ t ]} {\\mathbb {E}} \\mathbf {1} _ {\\langle s _ {t - 1}, a _ {t - 1} r _ {t - 1}, s _ {t} \\rangle = \\langle s, a, r, s ^ {\\prime} \\rangle},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.599, + 0.825, + 0.718 + ], + "angle": 0, + "content": "where \\(P_{\\theta}[T]\\) denotes the distribution over traces of length \\(T\\), generated from \\(P_{\\theta}\\). Intuitively, \\(\\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(s \\mid s_{I})\\) can be seen as the fraction of the time spent in \\(s\\) along traces of length \\(T\\), starting from the initial state Kulkarni (1995). Therefore, drawing \\(\\langle s, a, r, s' \\rangle \\sim \\mathcal{D}_{\\pi}[T]\\) trivially follows: it is equivalent to drawing \\(s\\) from \\(\\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(\\cdot \\mid s_{I})\\), then respectively \\(a\\) and \\(s'\\) from \\(\\pi(\\cdot \\mid s)\\) and \\(\\mathbf{P}(\\cdot \\mid s, a)\\), to finally obtain \\(r = \\mathcal{R}(s, a)\\). Given \\(T \\in \\mathbb{N}\\), our objective is to minimize the Wasserstein distance between those distributions: \\(W_{\\vec{d}}(\\mathcal{D}_{\\pi}[T], \\mathcal{P}_{\\theta}[T])\\). The following Lemma enables optimizing the Wasserstein distance between the original MDP and the behavioral model when traces are drawn from episodic RL processes or infinite interactions (Huang, 2020)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.72, + 0.805, + 0.736 + ], + "angle": 0, + "content": "Lemma A.1. Assume the existence of a stationary behavioral model \\(\\xi_{\\theta} = \\lim_{T\\to \\infty}\\mathcal{P}_{\\theta}[T]\\), then" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.738, + 0.637, + 0.761 + ], + "angle": 0, + "content": "\\[\n\\lim _ {T \\to \\infty} W _ {\\vec {d}} \\left(\\mathcal {D} _ {\\pi} [ T ], \\mathcal {P} _ {\\theta} [ T ]\\right) = W _ {\\vec {d}} \\left(\\xi_ {\\pi}, \\xi_ {\\theta}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.777, + 0.827, + 0.809 + ], + "angle": 0, + "content": "Proof. First, note that \\( \\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(\\cdot \\mid s_{I}) \\) weakly converges to \\( \\xi_{\\pi} \\) as \\( T \\) goes to \\( \\infty \\) Kulkarni (1995). The result follows then from (Villani, 2009, Corollary 6.9)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.824, + 0.471, + 0.837 + ], + "angle": 0, + "content": "A.2 DEALING WITH DISCRETE ACTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.849, + 0.827, + 0.878 + ], + "angle": 0, + "content": "When the policy \\(\\pi\\) executed in \\(\\mathcal{M}\\) already produces discrete actions, learning a latent action space is, in many cases, not necessary. We thus make the following assumptions:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.88, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Assumption A.2. Let \\(\\pi \\colon S \\to \\Delta(\\mathcal{A}^{\\star})\\) be the policy executed in \\(\\mathcal{M}\\) and assume that \\(\\mathcal{A}^{\\star}\\) is a (tractable) finite set. Then, we take \\(\\overline{\\mathcal{A}} = \\mathcal{A}^{\\star}\\) and \\(\\phi_{\\iota}^{\\mathcal{A}}\\) as the identity function, i.e., \\(\\phi_{\\iota}^{\\mathcal{A}}: \\overline{S} \\times \\mathcal{A}^{\\star} \\to \\mathcal{A}^{\\star}, \\langle \\overline{s}, a^{\\star} \\rangle \\mapsto a^{\\star}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "Assumption A.3. Assume that the action space of the original environment \\(\\mathcal{M}\\) is a (tractable) finite set. Then, we take \\(\\psi_{\\theta}\\) as the identity function, i.e., \\(\\psi_{\\theta} = \\phi_{\\iota}^{A}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.142, + 0.827, + 0.226 + ], + "angle": 0, + "content": "Concretely, the premise of Assumption A.2 typically occurs when \\(\\pi\\) is a latent policy (see Rem. 1) or when \\(\\mathcal{M}\\) has already a discrete action space. In the latter case, Assumption A.2 and A.3 amount to setting \\(\\bar{\\mathcal{A}} = \\mathcal{A}\\) and ignoring the action encoder and embedding function. Note that if a discrete action space is too large, or if the user explicitly aims for a coarser space, then the former is not considered as tractable, these assumptions do not hold, and the action space is abstracted to a smaller set of discrete actions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.242, + 0.376, + 0.256 + ], + "angle": 0, + "content": "A.3 PROOF OF LEMMA 3.2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.664, + 0.288 + ], + "angle": 0, + "content": "Notation. From now on, we write \\(\\phi_{\\iota}(\\bar{s},\\bar{a}\\mid s,a) = \\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.29, + 0.828, + 0.339 + ], + "angle": 0, + "content": "Lemma 3.2. Define \\(\\mathcal{T}(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a\\sim \\xi_{\\pi}}[\\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)\\cdot \\overline{\\mathbf{P}}_{\\theta}(\\bar{s}^{\\prime}\\mid \\bar{s},\\bar{a})]\\) as the distribution of drawing state-action pairs from interacting with \\(\\mathcal{M}\\), embedding them to the latent spaces, and finally letting them transition to their successor state in \\(\\overline{\\mathcal{M}}_{\\theta}\\). Then, \\(W_{\\vec{d}}(Q_{\\iota},\\bar{\\xi}_{\\overline{\\pi}_{\\theta}})\\leqslant W_{\\vec{d}}(\\bar{\\xi}_{\\overline{\\pi}_{\\theta}},\\mathcal{T}) + L_{\\mathbf{P}}^{\\xi_{\\pi}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.35, + 0.79, + 0.365 + ], + "angle": 0, + "content": "Proof. Wasserstein is compliant with the triangular inequality (Villani, 2009), which gives us:" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.366, + 0.655, + 0.385 + ], + "angle": 0, + "content": "\\[\nW _ {\\vec {d}} \\left(Q _ {\\iota}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right) \\leqslant W _ {\\vec {d}} \\left(Q _ {\\iota}, \\mathcal {T}\\right) + W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\mathcal {T}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.385, + 0.218, + 0.398 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.401, + 0.825, + 0.598 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} W _ {\\tilde {d}} \\left(\\mathcal {T}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right) \\quad (\\text {n o t e t h a t} W _ {\\tilde {d}} \\text {i s r e f l e x i v e (V i l l a n i , 2 0 0 9)} \\\\ = \\sup _ {f \\in \\mathcal {F} _ {\\bar {d}}} \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\underset {\\bar {s} \\sim \\bar {\\xi} _ {\\pi_ {\\theta}}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\bar {\\pi} _ {\\theta} (\\cdot | \\bar {s})} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}), \\text {a n d} \\\\ W _ {\\vec {d}} (Q _ {\\iota}, \\mathcal {T}) \\\\ = \\sup _ {f \\in \\mathcal {F} _ {\\vec {d}} s, a, s ^ {\\prime} \\sim \\xi_ {\\pi}} \\mathbb {E} _ {\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} (\\cdot | s, a, s ^ {\\prime})} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\mathbb {E} _ {s, a \\sim \\xi_ {\\pi}} \\mathbb {E} _ {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} \\mathbb {E} _ {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) (3) \\\\ \\leqslant \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\sup _ {f \\in \\mathcal {F} _ {\\bar {d}} ^ {-} s ^ {\\prime} \\sim \\mathbf {P} (\\cdot | s, a)} \\underset {\\sim \\mathbf {P} (\\cdot | s, a)} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\phi_ {\\iota} (s ^ {\\prime})) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) (4) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {A} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} \\sup _ {f \\in \\mathcal {F} _ {d} _ {\\overline {{\\mathcal {S}}}}} \\underset {\\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a)} {\\mathbb {E}} f (\\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})} {\\mathbb {E}} f (\\bar {s} ^ {\\prime}) (5) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\ell} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\ell} (s), a)} {\\mathbb {E}} W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\bar {\\mathbf {P}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.601, + 0.827, + 0.628 + ], + "angle": 0, + "content": "We pass from Eq. 3 to Eq. 4 by the Jensen's inequality. To see how we pass from Eq. 4 to Eq. 5, notice that" + }, + { + "type": "equation", + "bbox": [ + 0.227, + 0.63, + 0.71, + 0.655 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} _ {\\vec {d}} = \\left\\{f \\colon f \\left(\\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant \\vec {d} \\left(\\left\\langle \\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime} \\right\\rangle , \\left\\langle \\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime} \\right\\rangle\\right) \\right\\}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.228, + 0.657, + 0.771, + 0.674 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} _ {\\vec {d}} = \\left\\{f \\colon f \\left(\\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1}, \\bar {s} _ {2}\\right) + d _ {\\bar {A}} \\left(\\bar {a} _ {1}, \\bar {a} _ {2}\\right) + d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.675, + 0.827, + 0.719 + ], + "angle": 0, + "content": "Observe now that \\(\\bar{s}\\) and \\(\\bar{a}\\) are fixed in the supremum computation of Eq. 4: all functions \\(f\\) considered and taken from \\(\\mathcal{F}_{\\bar{d}}\\) are of the form \\(f(\\bar{s},\\bar{a},\\cdot)\\). It is thus sufficient to consider the supremum over functions from the following subset of \\(\\mathcal{F}_{\\bar{d}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.722, + 0.73, + 0.74 + ], + "angle": 0, + "content": "\\[\n\\{f \\colon f (\\bar {s}, \\bar {a}, \\bar {s} _ {1} ^ {\\prime}) - f (\\bar {s}, \\bar {a}, \\bar {s} _ {2} ^ {\\prime}) \\leqslant d _ {\\bar {\\mathcal {S}}} (\\bar {s}, \\bar {s}) + d _ {\\bar {\\mathcal {A}}} (\\bar {a}, \\bar {a}) + d _ {\\bar {\\mathcal {S}}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.664, + 0.742, + 0.824, + 0.757 + ], + "angle": 0, + "content": "(for \\(\\bar{s},\\bar{a}\\) drawn from \\(\\phi_{\\iota}\\))" + }, + { + "type": "equation", + "bbox": [ + 0.269, + 0.761, + 0.58, + 0.818 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} = \\left\\{f: f \\left(\\bar {s}, \\bar {a}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s}, \\bar {a}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\} \\\\ = \\left\\{f: f \\left(\\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\} \\\\ = \\mathcal {F} _ {d _ {\\bar {\\mathcal {S}}}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.821, + 0.827, + 0.862 + ], + "angle": 0, + "content": "Given a state \\( s \\in S \\) in the original model, the (parallel) execution of \\( \\pi \\) in \\( \\overline{\\mathcal{M}}_{\\theta} \\) is enabled through \\( \\pi(a, \\bar{a} | s) = \\pi(a | s) \\cdot \\phi_{\\ell}^{\\mathcal{A}}(\\bar{a} | \\phi_{\\ell}(s), a) \\) (cf. Fig. 1b). The local transition loss resulting from this interaction is:" + }, + { + "type": "equation", + "bbox": [ + 0.282, + 0.865, + 0.717, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} = \\underset {s, \\langle a, \\bar {a} \\rangle \\sim \\xi_ {\\pi}} {\\mathbb {E}} W _ {d _ {\\bar {S}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\bar {\\mathbf {P}} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {A} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.375, + 0.119 + ], + "angle": 0, + "content": "which finally yields the result." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.105, + 0.826, + 0.117 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.136, + 0.393, + 0.15 + ], + "angle": 0, + "content": "A.4 PROOF OF THEOREM 3.3" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.163, + 0.825, + 0.206 + ], + "angle": 0, + "content": "Before proving Theorem 3.3, let us introduce the following Lemma, that explicitly demonstrates the link between the transition regularizer of the \\(\\mathrm{W}^2\\mathrm{AE}\\)-MDP objective and the local transition loss required to obtain the guarantees related to the bisimulation bounds of Eq. 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.209, + 0.825, + 0.224 + ], + "angle": 0, + "content": "Lemma A.4. Assume that traces are generated by running \\(\\bar{\\pi} \\in \\overline{\\Pi}\\) in the original environment, then" + }, + { + "type": "equation", + "bbox": [ + 0.269, + 0.232, + 0.729, + 0.26 + ], + "angle": 0, + "content": "\\[\n\\underset {s, a ^ {\\star} \\sim \\xi_ {\\pi} \\bar {a} \\sim \\phi_ {\\iota} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\iota} (s), a ^ {\\star})} {\\mathbb {E}} W _ {d _ {\\overline {{S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a ^ {\\star}), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) = L _ {\\mathbf {P}} ^ {\\xi_ {\\overline {{\\pi}}}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.275, + 0.794, + 0.291 + ], + "angle": 0, + "content": "Proof. Since the latent policy \\(\\bar{\\pi}\\) generates latent actions, Assumption A.2 holds, which means:" + }, + { + "type": "equation", + "bbox": [ + 0.293, + 0.298, + 0.707, + 0.376 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathop{\\mathbb{E}}_{s,a^{\\star}\\sim \\xi_{\\overline{\\pi}}}\\mathop{\\mathbb{E}}_{\\bar{a}\\sim \\phi_{\\iota}^{A}(\\cdot |\\phi_{\\iota}(s),a^{\\star})}W_{d_{\\overline{\\mathfrak{S}}}}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot |s,a^{\\star}),\\overline{\\mathbf{P}}_{\\theta}(\\cdot |\\phi_{\\iota}(s),\\bar{a})\\right) \\\\ = \\underset {s, \\bar {a} \\sim \\xi_ {\\bar {\\pi}}} {\\mathbb {E}} W _ {d _ {\\bar {S}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, \\bar {a}), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) \\\\ = L _ {\\mathbf {P}} ^ {\\xi_ {\\overline {{\\pi}}}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.383, + 0.826, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.405, + 0.826, + 0.435 + ], + "angle": 0, + "content": "Theorem 3.3. Assume that traces are generated by running a latent policy \\(\\bar{\\pi} \\in \\overline{\\Pi}\\) in the original environment and let \\(d_{\\mathcal{R}}\\) be the usual Euclidean distance, then the \\(W^{2}\\)AE-MDP objective is" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.442, + 0.766, + 0.469 + ], + "angle": 0, + "content": "\\[\n\\min_{\\iota ,\\theta}\\underset {s,s^{\\prime}\\sim \\xi_{\\pi}}{\\mathbb{E}}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\phi_{\\iota}(s))) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\phi_{\\iota}\\big(s^{\\prime}\\big)\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\overline{\\pi}}} + L_{\\mathbf{P}}^{\\xi_{\\overline{\\pi}}}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.484, + 0.827, + 0.675 + ], + "angle": 0, + "content": "Proof. We distinguish two cases: (i) the case where the original and latent models share the same discrete action space, i.e., \\(\\mathcal{A} = \\overline{\\mathcal{A}}\\), and (ii) the case where the two have a different action space (e.g., when the original action space is continuous), i.e., \\(\\mathcal{A} \\neq \\overline{\\mathcal{A}}\\). In both cases, the local losses term follows by definition of \\(L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}}\\) and Lemma A.4. When \\(d_{\\mathcal{R}}\\) is the Euclidean distance (or even the \\(L_{1}\\) distance since rewards are scalar values), the expected reward distance occurring in the expected trace-distance term \\(\\vec{d}\\) in the \\(\\mathrm{W}^2\\mathrm{AE}\\)-MDP objective directly translates to the local loss \\(L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}}\\). Concerning the local transition loss, in case (i), the result naturally follows from Assumption A.2 and A.3. In case (ii), only Assumption A.2 holds, meaning the action encoder term of the \\(\\mathrm{W}^2\\mathrm{AE}\\)-MDP objective is ignored, but not the action embedding term appearing in \\(G_{\\theta}\\). Given \\(s \\sim \\xi_{\\overline{\\pi}}\\), recall that executing \\(\\overline{\\pi}\\) in \\(\\mathcal{M}\\) amounts to embedding the produced latent actions \\(\\bar{a} \\sim \\overline{\\pi}(\\cdot \\mid \\phi_{\\iota}(s))\\) back to the original environment via \\(a = \\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a})\\) (cf. Rem. 1 and Fig. 1a). Therefore, the projection of \\(\\vec{d}(\\langle s, a, r, s' \\rangle, G_{\\theta}(\\phi_{\\iota}(s), \\bar{a}, \\phi_{\\iota}(s')))\\) on the action space \\(\\mathcal{A}\\) is \\(d_{\\mathcal{A}}(\\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a}), \\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a})) = 0\\) for \\(r = \\mathcal{R}(s, a)\\) and \\(s' \\sim \\mathbf{P}(\\cdot \\mid s, a)\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.69, + 0.536, + 0.704 + ], + "angle": 0, + "content": "A.5 OPTIMIZING THE TRANSITION REGULARIZER" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.715, + 0.827, + 0.76 + ], + "angle": 0, + "content": "In the following, we detail how we derive a tractable form of our transition regularizer \\(L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)\\). Optimizing the ground Kantorovich-Rubinstein duality is enabled via the introduction of a parameterized, 1-Lipschitz network \\(\\varphi_{\\omega}^{\\mathbf{P}}\\), that need to be trained to attain the supremum of the dual:" + }, + { + "type": "equation", + "bbox": [ + 0.221, + 0.767, + 0.775, + 0.797 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\max _ {\\omega : \\varphi_ {\\omega} ^ {\\mathbf {P}} \\in \\mathcal {F} _ {d}} \\underset {\\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a)} {\\max _ {\\bar {s} ^ {\\prime} \\sim \\varphi_ {\\iota} (\\cdot | s, a)}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (\\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\prime} \\sim \\bar {\\mathbf {P}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (\\bar {s} ^ {\\prime}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.825, + 0.852 + ], + "angle": 0, + "content": "Under this form, optimizing \\( L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega) \\) is intractable due to the expectation over the maximum. The following Lemma allows us rewriting \\( L_{\\mathbf{P}}^{\\xi_{\\pi}} \\) to make the optimization tractable through Monte Carlo estimation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.826, + 0.887 + ], + "angle": 0, + "content": "Lemma A.5. Let \\(\\mathcal{X},\\mathcal{Y}\\) be two measurable sets, \\(\\xi \\in \\Delta (\\mathcal{X})\\) \\(P\\colon \\mathcal{X}\\to \\Delta (\\mathcal{Y}),Q\\colon \\mathcal{X}\\to \\Delta (\\mathcal{Y})\\) , and \\(d\\colon \\mathcal{Y}\\times \\mathcal{Y}\\rightarrow [0, + \\infty [\\) be a metric on \\(\\mathcal{V}\\) . Then," + }, + { + "type": "equation", + "bbox": [ + 0.211, + 0.894, + 0.786, + 0.93 + ], + "angle": 0, + "content": "\\[\n\\underset {x \\sim \\xi} {\\mathbb {E}} W _ {d} \\left(P (\\cdot \\mid x), Q (\\cdot \\mid x)\\right) = \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {2}) \\right]\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.414, + 0.119 + ], + "angle": 0, + "content": "Proof. Our objective is to show that" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.125, + 0.825, + 0.202 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (y _ {1}) (x) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (y _ {2}) (x) \\right] (6) \\\\ = \\sup _ {\\varphi : \\mathcal {X} \\rightarrow \\mathcal {F} _ {d}} \\mathbb {E} _ {x \\sim \\xi} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) \\left(y _ {1}\\right) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) \\left(y _ {2}\\right)\\right] (7) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.206, + 0.649, + 0.221 + ], + "angle": 0, + "content": "We start with \\((6) \\leqslant (7)\\). Construct \\(\\varphi^{\\star} \\colon \\mathcal{X} \\to \\mathcal{F}_d\\) by setting for all \\(x \\in \\mathcal{X}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.325, + 0.226, + 0.672, + 0.252 + ], + "angle": 0, + "content": "\\[\n\\varphi^{\\star}(x) = \\arg \\sup_{f\\in \\mathcal{F}_{d}}\\underset {y_{1}\\sim P(\\cdot |x)}{\\mathbb{E}}f(y_{1}) - \\underset {y_{2}\\sim Q(\\cdot |x)}{\\mathbb{E}}f(y_{2}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.257, + 0.264, + 0.272 + ], + "angle": 0, + "content": "This gives us" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.277, + 0.695, + 0.391 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\right] \\\\ = \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\right] \\\\ \\leqslant \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\mathbb {E} _ {x \\sim \\xi} \\left[ \\mathbb {E} _ {y _ {1} \\sim P (\\cdot | x)} \\varphi (x) (y _ {1}) - \\mathbb {E} _ {y _ {2} \\sim Q (\\cdot | x)} \\varphi (x) (y _ {2}) \\right]. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.395, + 0.431, + 0.41 + ], + "angle": 0, + "content": "It remains to show that \\((6) \\geqslant (7)\\). Take" + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.415, + 0.72, + 0.449 + ], + "angle": 0, + "content": "\\[\n\\varphi^ {\\star} = \\arg \\operatorname * {s u p} _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {2}) \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.54, + 0.469 + ], + "angle": 0, + "content": "Then, for all \\(x\\in \\mathcal{X}\\) we have \\(\\varphi^{\\star}(x)\\in \\mathcal{F}_d\\) which means:" + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.475, + 0.646, + 0.528 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\\\ \\leqslant \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.533, + 0.295, + 0.548 + ], + "angle": 0, + "content": "This finally yields" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.554, + 0.667, + 0.631 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\right] \\\\ \\leqslant \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\right]. \\\\ \\end{array}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.635, + 0.824, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.782, + 0.672 + ], + "angle": 0, + "content": "Corollary A.5.1. Let \\(\\xi_{\\pi}\\) be a stationary distribution of \\(\\mathcal{M}_{\\pi}\\) and \\(\\mathcal{X} = S\\times \\mathcal{A}\\times \\overline{S}\\times \\overline{\\mathcal{A}}\\) , then" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.677, + 0.801, + 0.72 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} = \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d} _ {\\overline {{S}}}} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\left[ \\varphi (s, a, \\bar {s}, \\bar {a}) \\big (\\phi_ {\\iota} (s ^ {\\prime}) \\big) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, a)} {\\mathbb {E}} \\varphi (s, a, \\bar {s}, \\bar {a}) \\big (\\bar {s} ^ {\\prime} \\big) \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.734, + 0.584, + 0.751 + ], + "angle": 0, + "content": "Consequently, we rewrite \\(L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)\\) as a tractable maximization:" + }, + { + "type": "equation", + "bbox": [ + 0.172, + 0.756, + 0.828, + 0.799 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega : \\varphi_ {\\omega} ^ {\\mathbf {P}} \\in \\mathcal {F} _ {d _ {\\bar {g}}}} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi} \\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\underset {s, a, \\bar {a}, \\bar {a} \\sim \\phi_ {\\iota} (s ^ {\\prime})} {\\mathbb {E}} \\left[ \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\phi_ {\\iota} (s ^ {\\prime})) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) \\right].\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.812, + 0.371, + 0.825 + ], + "angle": 0, + "content": "A.6 THE LATENT METRIC" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.837, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In the following, we show that considering the Euclidean distance for \\(\\vec{d}\\) and \\(d_{\\overline{\\mathcal{S}}}\\) in the latent space for optimizing the regularizers \\(\\mathcal{W}_{\\xi_{\\pi}}\\) and \\(L_{\\mathbf{P}}^{\\xi_{\\pi}}\\) is Lipschitz equivalent to considering a continuous \\(\\lambda\\)-relaxation of the discrete metric \\(\\mathbf{1}_{\\neq}(\\pmb{x},\\pmb{y}) = \\mathbf{1}_{x\\neq y}\\). Consequently, this also means it is consistently sufficient to enforce 1-Lipschitzness via the gradient penalty approach of Gulrajani et al. (2017) during training to maintain the guarantees linked to the regularizers in the zero-temperature limit, when the spaces are discrete." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.141 + ], + "angle": 0, + "content": "Lemma A.6. Let \\(d\\) be the usual Euclidean distance and \\(d_{\\lambda} \\colon [0,1]^n \\times [0,1]^n \\to [0,1[\\), \\(\\langle \\pmb{x}, \\pmb{y} \\rangle \\mapsto \\frac{d(\\pmb{x}, \\pmb{y})}{\\lambda + d(\\pmb{x}, \\pmb{y})}\\) for \\(\\lambda \\in ]0,1]\\) and \\(n \\in \\mathbb{N}\\), then \\(d_{\\lambda}\\) is a distance metric." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.154, + 0.637, + 0.17 + ], + "angle": 0, + "content": "Proof. The function \\( d_{\\lambda} \\) is a metric iff it satisfies the following axioms:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.184, + 0.827, + 0.248 + ], + "angle": 0, + "content": "1. Identity of indiscernibles: If \\( \\pmb{x} = \\pmb{y} \\), then \\( d_{\\lambda}(\\pmb{x}, \\pmb{y}) = \\frac{d(\\pmb{x}, \\pmb{y})}{\\lambda + d(\\pmb{x}, \\pmb{y})} = \\frac{0}{\\lambda + 0} = 0 \\) since \\( d \\) is a distance metric. Assume now that \\( d_{\\lambda}(\\pmb{x}, \\pmb{y}) = 0 \\) and take \\( \\alpha = d(\\pmb{x}, \\pmb{y}) \\), for any \\( \\pmb{x}, \\pmb{y} \\). Thus, \\( \\alpha \\in [0, +\\infty[ \\) and \\( 0 = \\frac{\\alpha}{\\lambda + \\alpha} \\) is only achieved in \\( \\alpha = 0 \\), which only occurs whenever \\( \\pmb{x} = \\pmb{y} \\) since \\( d \\) is a distance metric." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.257, + 0.306, + 0.273 + ], + "angle": 0, + "content": "2. Symmetry:" + }, + { + "type": "list", + "bbox": [ + 0.211, + 0.184, + 0.827, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.278, + 0.824, + 0.364 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) = \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\\\ = \\frac {d (\\boldsymbol {y} , \\boldsymbol {x})}{\\lambda + d (\\boldsymbol {y} , \\boldsymbol {x})} \\quad (d \\text {i s a d i s t a n c e m e t r i c}) \\\\ = d _ {\\lambda} (\\boldsymbol {y}, \\boldsymbol {x}) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.378, + 0.714, + 0.394 + ], + "angle": 0, + "content": "3. Triangle inequality: Let \\( \\mathbf{x}, \\mathbf{y}, \\mathbf{z} \\in [0,1]^n \\), the triangle inequality holds iff" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.4, + 0.824, + 0.603 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) + d _ {\\lambda} (\\boldsymbol {y}, \\boldsymbol {z}) \\geqslant d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {z}) (8) \\\\ \\equiv \\quad \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} + \\frac {d (\\boldsymbol {y} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {y} , \\boldsymbol {z})} \\geqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {z})} \\\\ \\equiv \\quad \\frac {\\lambda d (\\boldsymbol {x} , \\boldsymbol {y}) + \\lambda d (\\boldsymbol {y} , \\boldsymbol {z}) + 2 d (\\boldsymbol {x} , \\boldsymbol {y}) d (\\boldsymbol {y} , \\boldsymbol {z})}{\\lambda^ {2} + \\lambda d (\\boldsymbol {x} , \\boldsymbol {y}) + \\lambda d (\\boldsymbol {y} , \\boldsymbol {z}) + d (\\boldsymbol {x} , \\boldsymbol {y}) d (\\boldsymbol {y} , \\boldsymbol {z})} \\geqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {z})} \\\\ \\equiv \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) + 2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + \\\\ \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) + 2 d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\\\ \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\\\ \\left(\\text {c r o s s - p r o d u c t , w i t h} \\lambda > 0 \\text {a n d} \\operatorname {I m} (d) \\in [ 0, \\infty [\\right) \\\\ \\equiv \\quad \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) + 2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) (9) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.611, + 0.477, + 0.625 + ], + "angle": 0, + "content": "Since \\(d\\) is a distance metric, we have" + }, + { + "type": "equation", + "bbox": [ + 0.406, + 0.632, + 0.825, + 0.649 + ], + "angle": 0, + "content": "\\[\n\\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.656, + 0.428, + 0.672 + ], + "angle": 0, + "content": "and \\(\\operatorname {Im}(d)\\in [0,\\infty [\\) , meaning" + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.678, + 0.825, + 0.695 + ], + "angle": 0, + "content": "\\[\n2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\geqslant 0 \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.708, + 0.827, + 0.736 + ], + "angle": 0, + "content": "By Eq. 10 and 11, the inequality of Eq. 9 holds. Furthermore, the fact that Eq. 8 and 9 are equivalent yields the result." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.827, + 0.766 + ], + "angle": 0, + "content": "Lemma A.7. Let \\(d\\), \\(d_{\\lambda}\\) as defined above, then \\((i)d_{\\lambda}\\xrightarrow[\\lambda\\to 0]{\\longrightarrow}\\mathbf{1}_{\\neq}\\) and (ii) \\(d,d_{\\lambda}\\) are Lipschitz-equivalent." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.781, + 0.827, + 0.81 + ], + "angle": 0, + "content": "Proof. Part (i) is straightforward by definition of \\( d_{\\lambda} \\). Distances \\( d \\) and \\( d_{\\lambda} \\) are Lipschitz equivalent if and only if \\( \\exists a, b > 0 \\) such that \\( \\forall x, y \\in [0,1]^n \\)," + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.816, + 0.638, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} a \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant b \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\\\ \\equiv a \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\leqslant b \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\\\ \\equiv \\quad a \\leqslant \\frac {1}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\leqslant b \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.908, + 0.48, + 0.929 + ], + "angle": 0, + "content": "Taking \\( a = \\frac{1}{\\lambda + \\sqrt{n}} \\) and \\( b = \\frac{1}{\\lambda} \\) yields the result." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.67, + 0.12 + ], + "angle": 0, + "content": "Corollary A.7.1. For all \\(\\beta \\geqslant 1 / \\lambda\\), \\(s \\in S\\), \\(a \\in \\mathcal{A}\\), \\(\\bar{s} \\in \\overline{S}\\), and \\(\\bar{a} \\in \\overline{\\mathcal{A}}\\), we have" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.129, + 0.452, + 0.148 + ], + "angle": 0, + "content": "1. \\(W_{d_{\\lambda}}(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}})\\leqslant \\beta \\cdot W_{d}(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}})\\)" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.156, + 0.695, + 0.175 + ], + "angle": 0, + "content": "2. \\(W_{d_{\\lambda}}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot \\mid s,a),\\overline{\\mathbf{P}}_{\\theta}(\\cdot \\mid \\bar{s},\\bar{a})\\right)\\leqslant \\beta \\cdot W_{d}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot \\mid s,a),\\overline{\\mathbf{P}}_{\\theta}(\\cdot \\mid \\bar{s},\\bar{a})\\right)\\)" + }, + { + "type": "list", + "bbox": [ + 0.21, + 0.129, + 0.695, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.191, + 0.828, + 0.234 + ], + "angle": 0, + "content": "Proof. By Lipschitz equivalence, taking \\(\\beta \\geqslant 1 / \\lambda\\) ensures that \\(\\forall n\\in \\mathbb{N},\\forall \\pmb {x},\\pmb {y}\\in [0,1]^n,d_\\lambda (\\pmb {x},\\pmb {y})\\leqslant \\beta \\cdot d(\\pmb {x},\\pmb {y})\\). Moreover, for any distributions \\(P,Q,W_{d_{\\lambda}}(P,Q)\\leqslant \\beta \\cdot W_{d}(P,Q)\\) (cf., e.g., Gelada et al. 2019, Lemma A.4 for details)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.252, + 0.828, + 0.31 + ], + "angle": 0, + "content": "In practice, taking the hyperparameter \\(\\beta \\geqslant 1 / \\lambda\\) in the \\(\\mathrm{W}^2\\mathrm{AE}\\)-MDP ensures that minimizing the \\(\\beta\\)-scaled regularizers w.r.t. \\(d\\) also minimizes the regularizers w.r.t. the \\(\\lambda\\)-relaxation \\(d_{\\lambda}\\), being the discrete distribution in the zero-temperature limit. Note that optimizing over two different \\(\\beta_{1}, \\beta_{2}\\) instead of a unique scale factor \\(\\beta\\) is also a good practice to interpolate between the two regularizers." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.33, + 0.402, + 0.347 + ], + "angle": 0, + "content": "B EXPERIMENT DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.362, + 0.827, + 0.392 + ], + "angle": 0, + "content": "The code for conducting and replicating our experiments is available at https://github.com/florentdelgrange/wae_mdp." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.409, + 0.266, + 0.423 + ], + "angle": 0, + "content": "B.1 SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.434, + 0.827, + 0.505 + ], + "angle": 0, + "content": "We used TENSORFLOW 2.7.0 (Abadi et al., 2015) to implement the neural network architecture of our W\\(^2\\)AE-MDP, TENSORFLOW PROBABILITY 0.15.0 (Dillon et al., 2017) to handle the probabilistic components of the latent model (e.g., latent distributions with reparameterization tricks, masked autoregressive flows, etc.), as well as TF-AGENTS 0.11.0 (Guadarrama et al., 2018) to handle the RL parts of the framework." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.512, + 0.826, + 0.569 + ], + "angle": 0, + "content": "Models have been trained on a cluster running under CentOS Linux 7 (Core) composed of a mix of nodes containing Intel processors with the following CPU microarchitectures: (i) 10-core INTEL E5-2680v2, (ii) 14-core INTEL E5-2680v4, and (iii) 20-core INTEL Xeon Gold 6148. We used 8 cores and 32 GB of memory for each run." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.586, + 0.416, + 0.6 + ], + "angle": 0, + "content": "B.2 STATIONARY DISTRIBUTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.612, + 0.827, + 0.71 + ], + "angle": 0, + "content": "To sample from the stationary distribution \\(\\xi_{\\pi}\\) of episodic learning environments operating under \\(\\pi \\in \\Pi\\), we implemented the recursive \\(\\epsilon\\)-perturbation trick of Huang (2020). In a nutshell, the reset of the environment is explicitly added to the state space of \\(\\mathcal{M}\\), which is entered at the end of each episode and left with probability \\(1 - \\epsilon\\) to start a new one. We also added a special atomic proposition reset into \\(\\mathbf{AP}\\) to label this reset state and reason about episodic behaviors. For instance, this allows verifying whether the agent behaves safely during the entire episode, or if it is able to reach a goal before the end of the episode." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.728, + 0.536, + 0.742 + ], + "angle": 0, + "content": "B.3 ENVIRONMENTS WITH INITIAL DISTRIBUTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.754, + 0.826, + 0.825 + ], + "angle": 0, + "content": "Many environments do not necessarily have a single initial state, but rather an initial distribution over states \\( d_I \\in \\Delta(S) \\). In that case, the results presented in this paper remain unchanged: it suffices to add a dummy state \\( s^\\star \\) to the state space \\( S \\cup \\{s^\\star\\} \\) so that \\( s_I = s^\\star \\) with the transition dynamics \\( \\mathbf{P}(s' \\mid s^\\star, a) = d_I(s') \\) for any action \\( a \\in \\mathcal{A} \\). Therefore, each time the reset of the environment is triggered, we make the MDP entering the initial state \\( s^\\star \\), then transitioning to \\( s' \\) according to \\( d_I \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.842, + 0.427, + 0.856 + ], + "angle": 0, + "content": "B.4 LATENT SPACE DISTRIBUTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.826, + 0.926 + ], + "angle": 0, + "content": "As pointed out in Sect. 4, posterior collapse is naturally avoided when optimizing \\( \\mathrm{W}^2\\mathrm{AE} \\)-MDP. To illustrate that, we report the distribution of latent states produced by \\( \\phi_{\\iota} \\) during training (Fig. 5). The plots reveal that the latent space generated by mapping original states drawn from \\( \\xi_{\\pi} \\) during training to \\( \\bar{S} \\) via \\( \\phi_{\\iota} \\) is fairly distributed, for each environment." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.105, + 0.389, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.108, + 0.6, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.109, + 0.811, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.236, + 0.496, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.236, + 0.702, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.365, + 0.825, + 0.396 + ], + "angle": 0, + "content": "Figure 5: Latent space distribution along training steps. The intensity of the blue hue corresponds to the frequency of latent states produced by \\(\\phi_{\\ell}\\) during training." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.409, + 0.316, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.322, + 0.409, + 0.44, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.444, + 0.41, + 0.567, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.41, + 0.692, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.41, + 0.822, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.513, + 0.735, + 0.529 + ], + "angle": 0, + "content": "Figure 6: Absolute value difference \\(\\| V_{\\bar{\\pi}_{\\theta}}\\|\\) reported along training steps." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.556, + 0.711, + 0.57 + ], + "angle": 0, + "content": "B.5 DISTANCE METRICS: STATE, ACTION, AND REWARD RECONSTRUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.582, + 0.827, + 0.694 + ], + "angle": 0, + "content": "The choice of the distance functions \\( d_{\\mathcal{S}} \\), \\( d_{\\mathcal{A}} \\), and \\( d_{\\mathcal{R}} \\), plays a role in the success of our approach. The usual Euclidean distance is often a good choice for all the transition components, but the scale, dimensionality, and nature of the inputs sometimes require using scaled, normalized, or other kinds of distances to allow the network to reconstruct each component. While we did not observe such requirements in our experiments (where we simply used the Euclidean distance), high dimensional observations (e.g., images) are an example of data which could require tuning the state-distance function in such a way, to make sure that the optimization of the reward or action reconstruction will not be disfavored compared to that of the states." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.712, + 0.358, + 0.725 + ], + "angle": 0, + "content": "B.6 VALUE DIFFERENCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.738, + 0.825, + 0.811 + ], + "angle": 0, + "content": "In addition to reporting the quality guarantees of the model along training steps through local losses (cf. Figure 4b), our experiments revealed that the absolute value difference \\(\\| V_{\\overline{\\pi}_{\\theta}}\\|\\) between the original and latent models operating under the latent policy quickly decreases and tends to converge to values in the same range (Figure 6). This is consistent with the fact that minimizing local losses lead to close behaviors (cf. Eq. 1) and that the value function is Lipschitz-continuous w.r.t. \\(\\widetilde{d}_{\\overline{\\pi}_{\\theta}}\\) (cf. Section 2)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.829, + 0.473, + 0.842 + ], + "angle": 0, + "content": "B.7 REMARK ON FORMAL VERIFICATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Recall that our bisimulation guarantees come by construction of the latent space. Essentially, our learning algorithm spits out a distilled policy and a latent state space which already yields a guaranteed bisimulation distance between the original MDP and the latent MDP. This is the crux of how we enable verification techniques like model checking. In particular, bisimulation guarantees mean that reachability probabilities in the latent MDP compared to those in the original one are close." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Furthermore, the value difference of (omega-regular) properties (formulated through mu-calculus) obtained in the two models is bounded by this distance (cf. Sect. 2 and Chatterjee et al. 2010)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.144, + 0.827, + 0.311 + ], + "angle": 0, + "content": "Reachability is the key ingredient to model-check MDPs. Model-checking properties is in most cases performed by reduction to the reachability of components or regions of the MDP: it either consists of (i) iteratively checking the reachability of the parts of the state space satisfying path formulae that comprise the specification, through a tree-like decomposition of the latter (e.g., for (P,R-)CTL properties, cf. Baier & Katoen 2008), or (ii) checking the reachability to the part of the state space of a product of the MDP with a memory structure or an automaton that embeds the omega-regular property — e.g., for LTL (Baier et al., 2016; Sickert et al., 2016), LTLf (Wells et al., 2020), or GLTL (Littman et al., 2017), among other specification formalisms. The choice of specification formalism is up to the user and depends on the case study. The scope of this work is focusing on learning to distill RL policies with bisimulation guarantees so that model checking can be applied, in order to reason about the behaviors of the agent. That being said, reachability is all we need to show that model checking can be applied." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.33, + 0.362, + 0.344 + ], + "angle": 0, + "content": "B.8 HYPERPARAMETERS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.361, + 0.827, + 0.516 + ], + "angle": 0, + "content": "\\(\\mathbf{W}^2\\mathbf{AE}\\)-MDP parameters. All components (e.g., functions or distribution locations and scales, see Fig. 2) are represented and inferred by neural networks (multilayer perceptrons). All the networks share the same architecture (i.e., number of layers and neurons per layer). We use a simple uniform experience replay of size \\(10^{6}\\) to store the transitions and sample them. The training starts when the agent has collected \\(10^{4}\\) transitions in \\(\\mathcal{M}\\). We used minibatches of size 128 to optimize the objective and we applied a minibatch update every time the agent executing \\(\\pi\\) has performed 16 steps in \\(\\mathcal{M}\\). We use the recursive \\(\\epsilon\\)-perturbation trick of Huang (2020) with \\(\\epsilon = 3/4\\): when an episode ends, it restarts from the initial state with probability \\(1/4\\); before re-starting an episode, the time spent in the reset state labeled with reset follows then the geometric distribution with expectation \\(\\epsilon/1 - \\epsilon = 3\\). We chose the same latent state-action space size than Delgrange et al. (2022), except for LunarLander that we decreased to \\(\\log_2|\\bar{S}| = 14\\) and \\(|\\bar{\\mathcal{A}}| = 3\\) to improve the scalability of the verification." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.827, + 0.569 + ], + "angle": 0, + "content": "VAE-MDPs parameters. For the comparison of Sect. 4, we used the exact same VAE-MDP hyperparameter set as prescribed by Delgrange et al. (2022), except for the state-action space of LunarLander that we also changed for scalability and fair comparison purpose." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.58, + 0.827, + 0.844 + ], + "angle": 0, + "content": "Hyperparameter search. To evaluate our \\(\\mathrm{W}^2\\mathrm{AE}\\)-MDP, we realized a search in the parameter space defined in Table 2. The best parameters found (in terms of trade-off between performance and latent quality) are reported in Table 3. We used two different optimizers for minimizing the loss (referred to as the minimizer) and computing the Wasserstein terms (referred to as the maximizer). We used ADAM (Kingma & Ba, 2015) for the two, but we allow for different learning rates \\(\\mathrm{ADAM}_{\\alpha}\\) and exponential decays \\(\\mathrm{ADAM}_{\\beta_1}\\), \\(\\mathrm{ADAM}_{\\beta_2}\\). We also found that polynomial decay for \\(\\mathrm{ADAM}_{\\alpha}\\) (e.g., to \\(10^{-5}\\) for \\(4 \\cdot 10^{5}\\) steps) is a good practice to stabilize the experiment learning curves, but is not necessary to obtain high-quality and performing distillation. Concerning the continuous relaxation of discrete distributions, we used a different temperature for each distribution, as Maddison et al. (2017) pointed out that doing so is valuable to improve the results. We further followed the guidelines of Maddison et al. (2017) to choose the interval of temperatures and did not schedule any annealing scheme (in contrast to VAE-MDPs). Essentially, the search reveals that the regularizer scale factors \\(\\beta\\). (defining the optimization direction) as well as the encoder and latent transition temperatures are important to improve the performance of distilled policies. For the encoder temperature, we found a nice spot in \\(\\lambda_{\\phi_\\varepsilon} = 2/3\\), which provides the best performance in general, whereas the choice of \\(\\lambda_{\\overline{\\mathbb{P}}_\\theta}\\) and \\(\\beta\\): are (latent-) environment dependent. The importance of the temperature parameters for the continuous relaxation of discrete distributions is consistent with the results of (Maddison et al., 2017), revealing that the success of the relaxation depends on the choice of the temperature for the different latent space sizes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.884 + ], + "angle": 0, + "content": "Labeling functions. We used the same labeling functions as those described by Delgrange et al. (2022). For completeness, we recall the labeling function used for each environment in Table 4." + }, + { + "type": "page_footnote", + "bbox": [ + 0.172, + 0.897, + 0.825, + 0.925 + ], + "angle": 0, + "content": "2The code for conducting the VAE-MDPs experiments is available at https://github.com/ florentdelgrange/vae_mdp (GNU General Public License v3.0)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.173, + 0.163, + 0.825, + 0.181 + ], + "angle": 0, + "content": "Table 2: Hyperparameter search. \\( {\\lambda }_{X} \\) refers to the temperature used for \\( {\\mathrm{W}}^{2}\\mathrm{{AE}} \\) -MDP component \\( X \\) ." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.19, + 0.825, + 0.441 + ], + "angle": 0, + "content": "
ParameterRange
ADAMα (minimizer){0.0001,0.0002,0.0003,0.001}
ADAMα (maximizer){0.0001,0.0002,0.0003,0.001}
ADAMβ1{0,0.5,0.9}
ADAMβ2{0.9,0.999}
neurons per layer{64,128,256,512}
number of hidden layers{1,2,3}
activation{ReLU,LeakyReLU,tanh,softplus(2x+2)/2-1(smooth ELU)}
βwξπ{10,25,50,75,100}
βLξπ{10,25,50,75,100}
m{5,10,15,20}
δ{10,20}
use ε-mimic (cf. Delgrange et al. 2022){True,False} (if True, a decay rate of 10-5is used)
λPθ{0.1,1/3,1/2,2/3,3/5,0.99}
λφl{0.1,1/3,1/2,2/3,3/5,0.99}
λπθ{1/|A|-1,1/(|A|-1).15}
λφlA{1/|A|-1,1/(|A|-1).15}
" + }, + { + "type": "table_caption", + "bbox": [ + 0.262, + 0.572, + 0.737, + 0.589 + ], + "angle": 0, + "content": "Table 3: Final hyperparameters used to evaluate \\( {\\mathrm{W}}^{2}\\mathrm{{AE}} \\) -MDPs in Sect. 4" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.599, + 0.822, + 0.86 + ], + "angle": 0, + "content": "
CartPoleMountainCarAcrobotLunarLanderPendulum
log2|S|910131413
|A|2 = |A|2 = |A|3 = |A|33
activationtanhReLULeaky ReluReLUReLU
layers[64, 64, 64][512, 512][512, 512][256][256, 256, 256]
ADAMα (minimizer)0.00020.00010.00020.00030.0003
ADAMα (maximizer)0.00020.00010.00010.00030.0003
ADAMβ10.50000.5
ADAMβ20.9990.9990.9990.9990.999
βLξπ1025105025
βWξπ751001010025
m52020155
δ2010202010
ε00000.5
λPθ1/31/30.10.752/3
λφi1/32/32/32/32/3
λπθ2/31/30.50.50.5
λφiA///1/31/3
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.102, + 0.825, + 0.482 + ], + "angle": 0, + "content": "
EnvironmentS⊆Description, for s ∈ Sℓ(s) = <p1, ..., pn, preset>
CartPoleR4• s1: cart position\n• s2: cart velocity\n• s3: pole angle (rad)\n• s4: pole velocity at tip• p1 = 1s1≥1.5: unsafe cart position\n• p2 = 1s3≥0.15: unsafe pole angle
MountainCarR2• s1: position\n• s2: velocity• p1 = 1s1>1.5: target position\n• p2 = 1s1≥-1/2: right-hand side of the mountain\n• p3 = 1s2≥0: car going forward
AcrobotR6Let θ1, θ2 ∈ [0, 2π] be the angles of the two rotational joints,\n• s1 = cos(θ1)\n• s2 = sin(θ1)\n• s3 = cos(θ2)\n• s4 = sin(θ2)\n• s5: angular velocity 1\n• s6: angular velocity 2• p1 = 1-s1-s3·s1+s4·s2>1: RL agent target\n• p2 = 1s1≥0: θ1 ∈ [0, π/2] ∪ [3π/2, 2π]\n• p3 = 1s2≥0: θ1 ∈ [0, π]\n• p4 = 1s3≥0: θ2 ∈ [0, π/2] ∪ [3π/2, 2π]\n• p5 = 1s4≥0: θ2 ∈ [0, π]\n• p6 = 1s5≥0: positive angular velocity (1)\n• p7 = 1s6≥0: positive angular velocity (2)
PendulumR3Let θ ∈ [0, 2π] be the joint angle\n• s1 = cos(θ)\n• s2 = sin(θ)\n• s3: angular velocity• p1 = 1s1≥cos(π/3): safe joint angle\n• p2 = 1s1≥0: θ ∈ [0, π/2] ∪ [3π/2, 2π]\n• p3 = 1s2≥0: θ ∈ [0, π]\n• p4 = 1s3≥0: positive angular velocity
LunarLanderR8• s1: horizontal coordinates\n• s2: vertical coordinates\n• s3: horizontal speed\n• s4: vertical speed\n• s5: ship angle\n• s6: angular speed\n• s7: left leg contact\n• s8: right leg contact• p1: unsafe angle\n• p2: leg ground contact\n• p3: lands rapidly\n• p4: left inclination\n• p5: right inclination\n• p6: motors shut down
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.491, + 0.827, + 0.549 + ], + "angle": 0, + "content": "Table 4: Labeling functions for the OpenAI environments considered in our experiments (Delgrange et al., 2022). We provide a short description of the state space and the meaning of each atomic proposition. Recall that labels are binary encoded, for \\( n = |\\mathbf{AP}| - 1 \\) (one bit is reserved for reset) and \\( p_{\\mathrm{reset}} = 1 \\) iff \\( s \\) is a reset state (cf. Appendix B.2)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.573, + 0.825, + 0.631 + ], + "angle": 0, + "content": "Time to failure properties. Based on the labeling described in Table 4, we formally detail the time to failure properties checked in Sect. 4 whose results are listed in Table 1 for each environment. Let \\(\\text{Reset} = \\{\\text{reset}\\} = \\langle 0, \\dots, 1 \\rangle\\) (we assume here that the last bit indicates whether the current state is a reset state or not) and define \\(s \\models \\mathsf{L}_1 \\land \\mathsf{L}_2\\) iff \\(s \\models \\mathsf{L}_1\\) and \\(s \\models \\mathsf{L}_2\\) for any \\(s \\in S\\), then" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.641, + 0.622, + 0.657 + ], + "angle": 0, + "content": "- CartPole: \\(\\varphi = \\neg\\) Reset \\(\\mathcal{U}\\) Unsafe, where Unsafe \\(= \\langle 1,1,0\\rangle\\)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.66, + 0.637, + 0.675 + ], + "angle": 0, + "content": "- MountainCar: \\(\\varphi = \\neg\\) GoalU Reset, where Goal \\(= \\langle 1,0,0,0\\rangle\\)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.678, + 0.614, + 0.693 + ], + "angle": 0, + "content": "- Acrobot: \\(\\varphi = \\neg\\) GoalU Reset, where Goal \\(= \\langle 1,0,\\dots ,0\\rangle\\)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.696, + 0.822, + 0.726 + ], + "angle": 0, + "content": "- LunarLander: \\(\\varphi = \\neg\\) SafeLanding \\(\\mathcal{U}\\) Reset, where SafeLanding \\(=\\) GroundContact \\(\\land\\) MotorsOff, GroundContact \\(=\\langle 0,1,0,0,0,0,0\\rangle\\), and MotorsOff \\(=\\langle 0,0,0,0,0,1,0\\rangle\\)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.729, + 0.825, + 0.801 + ], + "angle": 0, + "content": "- Pendulum: \\(\\varphi = \\diamondsuit (\\neg \\text{Safe} \\land \\bigcirc \\text{Reset})\\), where Safe = \\(\\langle 1,0,0,0,0\\rangle\\), \\(\\diamondsuit \\mathsf{T} = \\neg \\emptyset \\mathcal{U} \\mathsf{T}\\), and \\(s_i \\models \\bigcirc \\mathsf{T}\\) iff \\(s_{i+1} \\models \\mathsf{T}\\), for any \\(\\mathsf{T} \\subseteq \\mathbf{AP}\\), \\(s_{i:\\infty}, a_{i:\\infty} \\in \\text{Traj}\\). Intuitively, \\(\\varphi\\) denotes the event of ending an episode in an unsafe state, just before resetting the environment, which means that either the agent never reached the safe region or it reached and left it at some point. Formally, \\(\\varphi = \\{s_{0:\\infty}, a_{0:\\infty} \\mid \\exists i \\in \\mathbb{N}, s_i \\models \\text{Safe} \\land s_{i+1} \\models \\text{Reset}\\} \\subseteq \\text{Traj}\\)." + }, + { + "type": "list", + "bbox": [ + 0.216, + 0.641, + 0.825, + 0.801 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.824, + 0.585, + 0.839 + ], + "angle": 0, + "content": "C ON THE CURSE OF VARIATIONAL MODELING" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Posterior collapse is a well known issue occurring in variational models (see, e.g., Alemi et al. 2018; Tolstikhin et al. 2018; He et al. 2019; Dong et al. 2020) which intuitively results in a degenerate local optimum where the model learns to ignore the latent space and use only the reconstruction functions (i.e., the decoding distribution) to optimize the objective. VAE-MDPs are no exception, as pointed out in the original paper (Delgrange et al., 2022, Section 4.3 and Appendix C.2)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.105, + 0.368, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.377, + 0.104, + 0.564, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.576, + 0.129, + 0.804, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.596, + 0.254, + 0.795, + 0.267 + ], + "angle": 0, + "content": "(b) Rate of the variational model." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.228, + 0.568, + 0.28 + ], + "angle": 0, + "content": "(a) Latent space distribution along training steps. The intensity of the blue hue corresponds to the frequency of latent states produced from \\(\\phi_{\\iota}\\) during training. The vanilla model collapses to a single state." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.292, + 0.368, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.394, + 0.384, + 0.419 + ], + "angle": 0, + "content": "(c) Distortion of the variational model." + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.292, + 0.59, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.396, + 0.394, + 0.611, + 0.444 + ], + "angle": 0, + "content": "(d) Average point-wise entropy of \\(\\phi_{\\iota}(\\cdot \\mid s)\\), for \\(s \\in S\\) drawn from the interaction with the original environment." + }, + { + "type": "image", + "bbox": [ + 0.624, + 0.293, + 0.807, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.392, + 0.827, + 0.444 + ], + "angle": 0, + "content": "(e) Performance of the resulting distilled policy \\(\\bar{\\pi}_{\\theta}\\) when deployed in the original environment (averaged over 30 episodes)." + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.457, + 0.828, + 0.57 + ], + "angle": 0, + "content": "Figure 7: Comparison of the VAE-MDP in the CartPole environment (i) when the distortion and the rate are minimized as is (vanilla model) and (ii) when it makes use of annealing schemes, entropy regularization, and prioritized experience replay to avoid posterior collapse (cf. Delgrange et al. 2022). While the former clearly fails to learn a useful latent representation, the later does so meticulously and smoothly in two distinguishable phases: first, \\(\\phi_{\\iota}\\) focuses on fairly distributing the latent space, setting up the stage to the concrete optimization occurring from step \\(4\\cdot 10^{5}\\), where the entropy of \\(\\phi_{\\iota}\\) is lowered, which allows to get the rate of the variational model away from zero. Five instances of the models are trained with different random seeds, with the same hyperparameters than in Sect. 4." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.596, + 0.828, + 0.765 + ], + "angle": 0, + "content": "Formally, VAE- and WAE-MDPs optimize their objective by minimizing two losses: a reconstruction cost plus a regularizer term which penalizes a discrepancy between the encoding distribution and the dynamics of the latent space model. In VAE-MDPs, the former corresponds to the distortion, and the later to the rate of the variational model (further details are given in Alemi et al. 2018; Delgrange et al. 2022), while in our WAE-MDPs, the former corresponds to the raw transition distance and the later to both the steady-state and transition regularizers. Notably, the rate minimization of VAE-MDPs involves regularizing a stochastic embedding function \\(\\phi_{\\iota}(\\cdot | s)\\) point-wise, i.e., for all different input states \\(s \\in S\\) drawn from the interaction with the original environment. In contrast, the latent space regularization of the WAE-MDP involves the marginal embedding distribution \\(Q_{\\iota}\\) where the embedding function \\(\\phi_{\\iota}\\) is not required to be stochastic. Alemi et al. (2018) showed that posterior collapse occurs in VAEs when the rate of the variational model is close to zero, leading to low-quality representation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.773, + 0.828, + 0.888 + ], + "angle": 0, + "content": "Posterior collapse in VAE-MDPs. We illustrate the sensitivity of VAE-MDPs to the posterior collapse problem in Fig. 7, through the CartPole environment3: minimizing the distortion and the rate as is yields an embedding function which maps deterministically every input state to the same sink latent state (cf. Fig. 7a). Precisely, there is a latent state \\(\\bar{s} \\in \\bar{S}\\) so that \\(\\phi_{\\nu}(\\bar{s} \\mid s) \\approx 1\\) and \\(\\overline{\\mathbf{P}}_{\\theta}(\\bar{s} \\mid \\bar{s}, \\bar{a}) \\approx 1\\) whatever the state \\(s \\in S\\) and action \\(\\bar{a} \\in \\overline{A}\\). This is a form of posterior collapse, the resulting rate quickly drops to zero (cf. Fig 7b), and the resulting latent representation yields no information at all. This phenomenon is handled in VAE-MDPs by using (i) prioritized replay buffers that allow to focus on inputs that led to bad representation, and (ii) modifying the objective" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.825, + 0.926 + ], + "angle": 0, + "content": "3In fact, the phenomenon of collapsing to few state occurs for all the environments considered in this paper when their prioritized experience replay is not used, as illustrated in Delgrange et al., 2022, Appendix C.2." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.204 + ], + "angle": 0, + "content": "function for learning the latent space model — the so-called evidence lower bound (Hoffman et al., 2013; Kingma & Welling, 2014), or ELBO for short — and set up annealing schemes to eventually recover the ELBO at the end of the training process. Consequently, the resulting learning procedure focuses primarily on fairly distributing the latent space, to avoid it to collapse to a single latent state, to the detriment of learning the dynamics of the environment and the distillation of the RL policy. Then, the annealing scheme allows to make the model learn to finally smoothly use the latent space to maximize the ELBO, and achieve consequently a lower distortion at the \"price\" of a higher rate." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.212, + 0.828, + 0.43 + ], + "angle": 0, + "content": "Impact of the resulting learning procedure. The aforementioned annealing process, used to avoid that every state collapses to the same representation, possibly induces a high entropy embedding function (Fig. 7d), which further complicates the learning of the model dynamics and the distillation in the first stage of the training process. In fact, in this particular case, one can observe that the entropy reaches its maximal value, which yields a fully random state embedding function. Recall that the VAE-MDP latent space is learned through independent Bernoulli distributions. Fig. 7d reports values centered around 4.188 in the first training phase, which corresponds to the entropy of the state embedding function when \\(\\phi_{\\iota}(\\cdot |s)\\) is uniformly distributed over \\(\\bar{S}\\) for any state \\(s\\in S\\) .. \\(H(\\phi_{\\iota}(\\cdot |s)) = \\sum_{i = 0}^{\\log_2|\\bar{S}| - |\\mathbf{AP}| = 6} - p_i\\log p_i - (1 - p_i)\\log (1 - p_i) = 4.188,\\) where \\(p_i = 1 / 2\\) for all \\(i.\\) The rate (Fig. 7b) drops to zero since the divergence pulls the latent dynamics towards this high entropy (yet another form of posterior collapse), which hinders the latent space model to learn a useful representation. However, the annealing scheme increases the rate importance along training steps, which enables the optimization to eventually leave this local optimum (here around \\(4\\cdot 10^{5}\\) training steps). This allows the learning procedure to leave the zero-rate spot, reduce the distortion (Fig. 7c), and finally distill the original policy (Fig. 7e)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.435, + 0.825, + 0.493 + ], + "angle": 0, + "content": "As a result, the whole engineering required to mitigate posterior collapse slows down the training procedure. This phenomenon is reflected in Fig. 4: VAE-MDPs need several steps to stabilize and set up the stage to the concrete optimization, whereas WAE-MDPs have no such requirements since they naturally do not suffer from collapsing issues (cf. Fig. 5), and are consequently faster to train." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.502, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Lack of representation guarantees. On the theoretical side, since VAE-MDPs are optimized via the ELBO and the local losses via the related variational proxies, VAE-MDPs do not leverage the representation quality guarantees induced by local losses (Eq. 1) during the learning procedure (as explicitly pointed out by Delgrange et al., 2022, Sect. 4.1.): in contrast to WAE-MDPs, when two original states are embedded to the same latent, abstract state, the former are not guaranteed to be bisimilarly close (i.e., the agent is not guaranteed to behave the same way from those two states by executing the policy), meaning those proxies do not prevent original states having distant values collapsing together to the same latent representation." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.634, + 0.361, + 0.649 + ], + "angle": 0, + "content": "INDEX OF NOTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.68, + 0.674, + 0.696 + ], + "angle": 0, + "content": "\\(\\mathbf{1}_{[cond]}\\) indicator function: 1 if the statement [cond] is true, and 0 otherwise" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.699, + 0.591, + 0.714 + ], + "angle": 0, + "content": "\\(\\mathcal{F}_d\\) Set of 1-Lipschitz functions w.r.t. the distance metric \\(d\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.719, + 0.515, + 0.734 + ], + "angle": 0, + "content": "\\(\\sigma\\) Sigmoid function, with \\(\\sigma (x) = 1 / 1 + \\exp (-x)\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.738, + 0.825, + 0.765 + ], + "angle": 0, + "content": "\\(f_{\\theta}\\) A function \\(f_{\\theta} \\colon \\mathcal{X} \\to \\mathbb{R}\\) modeled by a neural network, parameterized by \\(\\theta\\), where \\(\\mathcal{X}\\) is any measurable set" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.771, + 0.318, + 0.785 + ], + "angle": 0, + "content": "Latent Space Model" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.788, + 0.827, + 0.822 + ], + "angle": 0, + "content": "\\(\\overline{\\mathcal{M}} = \\langle \\overline{S}, \\overline{\\mathcal{A}}, \\overline{\\mathbf{P}}, \\overline{\\mathcal{R}}, \\bar{\\ell}, \\mathbf{AP}, \\bar{s}_I \\rangle\\) Latent MDP with state space \\(\\overline{S}\\), action space \\(\\overline{\\mathcal{A}}\\), reward function \\(\\overline{\\mathcal{R}}\\), labeling function \\(\\bar{\\ell}\\), atomic proposition space \\(\\mathbf{AP}\\), and initial state \\(\\bar{s}_I\\)." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.825, + 0.425, + 0.842 + ], + "angle": 0, + "content": "\\(\\langle \\overline{\\mathcal{M}},\\phi ,\\psi \\rangle\\) Latent space model of \\(\\mathcal{M}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.847, + 0.353, + 0.862 + ], + "angle": 0, + "content": "\\(\\bar{a}\\) Latent action in \\(\\overline{\\mathcal{A}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.867, + 0.663, + 0.884 + ], + "angle": 0, + "content": "\\(\\bar{\\pi}\\) Latent policy \\(\\bar{\\pi}:\\bar{S}\\to \\mathcal{A}\\); can be executed in \\(\\mathcal{M}\\) via \\(\\phi\\): \\(\\bar{\\pi} (\\cdot \\mid \\phi (s))\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.888, + 0.383, + 0.904 + ], + "angle": 0, + "content": "\\(d_{\\overline{S}}\\) Distance metric over \\(\\bar{S}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.909, + 0.487, + 0.924 + ], + "angle": 0, + "content": "\\(\\phi\\) (20 State embedding function, from \\(\\mathcal{S}\\) to \\(\\overline{\\mathcal{S}}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.533, + 0.12 + ], + "angle": 0, + "content": "\\(\\psi\\) Action embedding function, from \\(\\overline{S}\\times \\overline{A}\\) to \\(\\mathcal{A}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.122, + 0.826, + 0.151 + ], + "angle": 0, + "content": "\\(\\phi \\mathbf{P}\\) Distribution of drawing \\(s^\\prime \\sim \\mathbf{P}(\\cdot \\mid s,a)\\), then embedding \\(\\bar{s}^{\\prime} = \\phi (s^{\\prime})\\), for any state \\(s\\in S\\) and action \\(a\\in \\mathcal{A}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.153, + 0.48, + 0.173 + ], + "angle": 0, + "content": "\\(L_{\\mathcal{R}}^{\\xi}\\) Local reward loss under distribution \\(\\xi\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.175, + 0.496, + 0.195 + ], + "angle": 0, + "content": "\\(L_{\\mathbf{P}}^{\\xi}\\) Local transition loss under distribution \\(\\xi\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.198, + 0.46, + 0.215 + ], + "angle": 0, + "content": "\\(\\overline{\\Pi}\\) Set of (memoryless) latent policies" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.217, + 0.341, + 0.232 + ], + "angle": 0, + "content": "\\(\\bar{s}\\) Latent state in \\(\\bar{S}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.235, + 0.371, + 0.253 + ], + "angle": 0, + "content": "\\(\\overrightarrow{V_{\\pi}}\\) Latent value function" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.256, + 0.367, + 0.27 + ], + "angle": 0, + "content": "Markov Decision Processes" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.828, + 0.305 + ], + "angle": 0, + "content": "\\(\\mathcal{M} = \\langle S, \\mathcal{A}, \\mathbf{P}, \\mathcal{R}, \\ell, \\mathbf{AP}, s_I \\rangle\\) MDP \\(\\mathcal{M}\\) with state space \\(S\\), action space \\(\\mathcal{A}\\), transition function \\(\\mathbf{P}\\), labeling function \\(\\ell\\), atomic proposition space \\(\\mathbf{AP}\\), and initial state \\(s_I\\)." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.308, + 0.312, + 0.322 + ], + "angle": 0, + "content": "\\(a\\) Action in \\(\\mathcal{A}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.324, + 0.408, + 0.342 + ], + "angle": 0, + "content": "\\(\\widetilde{d}_{\\pi}\\) Bisimulation pseudometric" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.344, + 0.388, + 0.362 + ], + "angle": 0, + "content": "\\(\\gamma\\) Discount factor in [0, 1]" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.364, + 0.416, + 0.38 + ], + "angle": 0, + "content": "\\(d_{\\mathcal{A}}\\) Metric over the action space" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.382, + 0.357, + 0.398 + ], + "angle": 0, + "content": "\\(d_{\\mathcal{R}}\\) Metric over \\(\\operatorname {Im}(\\mathcal{R})\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.402, + 0.406, + 0.418 + ], + "angle": 0, + "content": "\\(d_{\\mathcal{S}}\\) Metric over the state space" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.42, + 0.826, + 0.451 + ], + "angle": 0, + "content": "\\(\\xi_{\\pi}^{t}\\) Limiting distribution of the MDP defined as \\(\\xi_{\\pi}^{t}(s^{\\prime}\\mid s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_{s}}\\left(\\left\\{s_{0:\\infty},a_{0:\\infty}\\mid s_{t} = s^{\\prime}\\right\\}\\right)\\), for any source state \\(s\\in S\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.453, + 0.45, + 0.47 + ], + "angle": 0, + "content": "II Set of memoryless policies of \\(\\mathcal{M}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.473, + 0.457, + 0.489 + ], + "angle": 0, + "content": "\\(\\pi\\) (204 Memoryless policy \\(\\pi \\colon S\\to \\Delta (\\mathcal{A})\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.492, + 0.826, + 0.524 + ], + "angle": 0, + "content": "\\(\\mathbb{P}_{\\pi}^{\\mathcal{M}}\\) Unique probability measure induced by the policy \\(\\pi\\) in \\(\\mathcal{M}\\) on the Borel \\(\\sigma\\)-algebra over measurable subsets of \\(Traj\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.526, + 0.431, + 0.542 + ], + "angle": 0, + "content": "CUT Constrained reachability event" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.545, + 0.615, + 0.562 + ], + "angle": 0, + "content": "\\(\\mathcal{M}_s\\) MDP obtained by replacing the initial state of \\(\\mathcal{M}\\) by \\(s\\in S\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.565, + 0.297, + 0.579 + ], + "angle": 0, + "content": "\\(s\\) (20 State in \\(\\mathcal{S}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.583, + 0.578, + 0.6 + ], + "angle": 0, + "content": "\\(\\xi_{\\pi}\\) Stationary distribution of \\(\\mathcal{M}\\) induced by the policy \\(\\pi\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.603, + 0.64, + 0.621 + ], + "angle": 0, + "content": "\\(\\vec{d}\\) Raw transition distance, i.e., metric over \\(\\mathcal{S} \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times \\mathcal{S}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.624, + 0.438, + 0.64 + ], + "angle": 0, + "content": "Traj Set of infinite trajectories of \\(\\mathcal{M}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.643, + 0.371, + 0.659 + ], + "angle": 0, + "content": "\\(\\tau = \\langle s_{0:T}, a_{0:T-1} \\rangle\\) Trajectory" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.68, + 0.433, + 0.696 + ], + "angle": 0, + "content": "\\(V_{\\pi}\\) Value function for the policy \\(\\pi\\)" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.699, + 0.383, + 0.715 + ], + "angle": 0, + "content": "Probability / Measure Theory" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.718, + 0.793, + 0.734 + ], + "angle": 0, + "content": "\\(D\\) Discrepancy measure; \\(D(P,Q)\\) is the discrepancy between distributions \\(P,Q\\in \\Delta (\\mathcal{X})\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.737, + 0.616, + 0.753 + ], + "angle": 0, + "content": "\\(\\Delta (\\mathcal{X})\\) Set of measures over a complete, separable metric space \\(\\mathcal{X}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.756, + 0.728, + 0.772 + ], + "angle": 0, + "content": "Logistic \\((\\mu, s)\\) Logistic distribution with location parameter \\(\\mu\\) and scale parameter \\(s\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.775, + 0.825, + 0.805 + ], + "angle": 0, + "content": "\\(W_{d}\\) Wasserstein distance w.r.t. the metric \\(d\\); \\(W_{d}(P,Q)\\) is the Wasserstein distance between distributions \\(P, Q \\in \\Delta(\\mathcal{X})\\)" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.808, + 0.402, + 0.821 + ], + "angle": 0, + "content": "Wasserstein Auto-encoded MDP" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.845, + 0.61, + 0.862 + ], + "angle": 0, + "content": "\\(\\xi_{\\theta}\\) Behavioral model: distribution over \\(\\mathcal{S} \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times \\mathcal{S}\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.865, + 0.605, + 0.883 + ], + "angle": 0, + "content": "\\(G_{\\theta}\\) Mapping \\(\\langle \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\rangle \\mapsto \\langle \\mathcal{G}_{\\theta}(\\bar{s}),\\psi_{\\theta}(\\bar{s},\\bar{a}),\\overline{\\mathcal{R}}_{\\theta}(\\bar{s},\\bar{a}),\\mathcal{G}_{\\theta}(\\bar{s}^{\\prime})\\rangle\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.886, + 0.502, + 0.905 + ], + "angle": 0, + "content": "\\(\\phi_{\\iota}^{A}\\) Action encoder mapping \\(\\overline{S}\\times \\mathcal{A}\\) to \\(\\Delta (\\overline{\\mathcal{A}})\\)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.909, + 0.442, + 0.925 + ], + "angle": 0, + "content": "\\(\\mathcal{G}_{\\theta}\\) State-wise decoder, from \\(\\bar{S}\\) to \\(S\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.717, + 0.121 + ], + "angle": 0, + "content": "\\(Q_{\\iota}\\) Marginal encoding distribution over \\(\\overline{S} \\times \\overline{A} \\times \\overline{S}: \\mathbb{E}_{s,a,s' \\sim \\xi_{\\pi}} \\phi_{\\iota}(\\cdot \\mid s,a,s')\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.125, + 0.669, + 0.142 + ], + "angle": 0, + "content": "\\(\\bar{\\xi}_{\\bar{\\pi}_\\theta}\\) Stationary distribution of the latent model \\(\\overline{\\mathcal{M}}_{\\theta}\\), parameterized by \\(\\theta\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.146, + 0.387, + 0.162 + ], + "angle": 0, + "content": "\\(\\mathcal{W}_{\\xi_{\\overline{\\pi}}}\\) Steady-state regularizer" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.166, + 0.434, + 0.183 + ], + "angle": 0, + "content": "\\(\\varphi_{\\omega}^{\\xi}\\) Steady-state Lipschitz network" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.187, + 0.383, + 0.201 + ], + "angle": 0, + "content": "\\(\\lambda\\) Temperature parameter" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.825, + 0.251 + ], + "angle": 0, + "content": "\\(\\mathcal{T}\\) Distribution of drawing state-action pairs from interacting with \\(\\mathcal{M}\\), embedding them to the latent spaces, and finally letting them transition to their successor state in \\(\\overline{\\mathcal{M}}_{\\theta}\\), in \\(\\Delta (\\bar{S}\\times \\bar{A}\\times \\bar{S})\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.255, + 0.42, + 0.273 + ], + "angle": 0, + "content": "\\(\\varphi_{\\omega}^{\\mathbf{P}}\\) Transition Lipschitz network" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.825, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.292, + 0.402, + 0.308 + ], + "angle": 0, + "content": "ADDITIONAL REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.316, + 0.827, + 0.442 + ], + "angle": 0, + "content": "Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S. Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, Geoffrey Irving, Michael Isard, Yangqing Jia, Rafal Jozefowicz, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dandelion Mane, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Mike Schuster, Jonathon Shlens, Benoit Steiner, Ilya Sutskever, Kunal Talwar, Paul Tucker, Vincent Vanhoucke, Vijay Vasudevan, Fernanda Viégas, Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. TensorFlow: Large-scale machine learning on heterogeneous systems, 2015. URL https://www.tensorflow.org/. Software available from tensorflow.org." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.452, + 0.828, + 0.524 + ], + "angle": 0, + "content": "Alexander A. Alemi, Ben Poole, Ian Fischer, Joshua V. Dillon, Rif A. Saurous, and Kevin Murphy. Fixing a broken ELBO. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 159-168. PMLR, 2018. URL http://proceedings.mlr.press/v80/alemi18a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.533, + 0.827, + 0.562 + ], + "angle": 0, + "content": "Joshua V. Dillon, Ian Langmore, Dustin Tran, Eugene Brevdo, Srinivas Vasudevan, Dave Moore, Brian Patton, Alex Alemi, Matt Hoffman, and Rif A. Saurous. Tensorflow distributions, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.572, + 0.827, + 0.643 + ], + "angle": 0, + "content": "Zhe Dong, Bryan A. Seybold, Kevin Murphy, and Hung H. Bui. Collapsed amortized variational inference for switching nonlinear dynamical systems. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 2638-2647. PMLR, 2020. URL http://proceedings.mlr.press/v119/dong20e.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.653, + 0.827, + 0.738 + ], + "angle": 0, + "content": "Sergio Guadarrama, Anoop Korattikara, Oscar Ramirez, Pablo Castro, Ethan Holly, Sam Fishman, Ke Wang, Ekaterina Gonina, Neal Wu, Efi Kokiopoulou, Luciano Sbaiz, Jamie Smith, Gábor Bartók, Jesse Berent, Chris Harris, Vincent Vanhoucke, and Eugene Brevdo. TF-Agents: A library for reinforcement learning in tensorflow. https://github.com/tensorflow/agents, 2018. URL https://github.com/tensorflow/agents. [Online; accessed 25-June-2019]." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.748, + 0.827, + 0.805 + ], + "angle": 0, + "content": "Junxian He, Daniel Spokoyny, Graham Neubig, and Taylor Berg-Kirkpatrick. Lapping inference networks and posterior collapse in variational autoencoders. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=rylDfnCqF7." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.814, + 0.829, + 0.857 + ], + "angle": 0, + "content": "Matthew D. Hoffman, David M. Blei, Chong Wang, and John W. Paisley. Stochastic variational inference. J. Mach. Learn. Res., 14(1):1303-1347, 2013. URL http://dl.acm.org/citation.cfm?id=2502622." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. URL http://arxiv.org/abs/1412.6980." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.316, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.161 + ], + "angle": 0, + "content": "Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. In Yoshua Bengio and Yann LeCun (eds.), 2nd International Conference on Learning Representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference Track Proceedings, 2014. URL http://arxiv.org/abs/1312.6114." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.169, + 0.827, + 0.198 + ], + "angle": 0, + "content": "Vidyadhar G. Kulkarni. Modeling and Analysis of Stochastic Systems. Chapman & Hall, Ltd., GBR, 1995. ISBN 0412049910." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.207, + 0.828, + 0.264 + ], + "angle": 0, + "content": "Ilya O. Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Scholkopf. Wasserstein autoencoders. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=HkL7n1-0b." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "30" + } + ] +] \ No newline at end of file diff --git a/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_origin.pdf b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..962ad2096f2cebca9169618eb16151d52d1c1dcc --- /dev/null +++ b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/7aa139d3-a427-412b-84b8-883489a7c318_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79884101850741402a86f73e8f487ee7c838e6b12fb9157e654d96bed21eb5e1 +size 2107861 diff --git a/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/full.md b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ca616c6af842fc65e3a7179ea918e0c9f238fecd --- /dev/null +++ b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/full.md @@ -0,0 +1,844 @@ +# WASSERSTEIN AUTO-ENCODEDMDPS + +FORMAL VERIFICATION OF EFFICIENTLY DISTILLED RL POLICIES WITH MANY-SIDED GUARANTEES + +# Florent Delgrange + +AI Lab, Vrije Universiteit Brussel (VUB) + +University of Antwerp + +florent.delgrange@ai.vub.ac.be + +# Ann Nowé + +AI Lab, VUB + +# Guillermo A. Pérez + +University of Antwerp + +Flanders Make + +# ABSTRACT + +Although deep reinforcement learning (DRL) has many success stories, the large-scale deployment of policies learned through these advanced techniques in safety-critical scenarios is hindered by their lack of formal guarantees. Variational Markov Decision Processes (VAE-MDPs) are discrete latent space models that provide a reliable framework for distilling formally verifiable controllers from any RL policy. While the related guarantees address relevant practical aspects such as the satisfaction of performance and safety properties, the VAE approach suffers from several learning flaws (posterior collapse, slow learning speed, poor dynamics estimates), primarily due to the absence of abstraction and representation guarantees to support latent optimization. We introduce the Wasserstein auto-encoded MDP (WAE-MDP), a latent space model that fixes those issues by minimizing a penalized form of the optimal transport between the behaviors of the agent executing the original policy and the distilled policy, for which the formal guarantees apply. Our approach yields bisimulation guarantees while learning the distilled policy, allowing concrete optimization of the abstraction and representation model quality. Our experiments show that, besides distilling policies up to 10 times faster, the latent model quality is indeed better in general. Moreover, we present experiments from a simple time-to-failure verification algorithm on the latent space. The fact that our approach enables such simple verification techniques highlights its applicability. + +# 1 INTRODUCTION + +Reinforcement learning (RL) is emerging as a solution of choice to address challenging real-word scenarios such as epidemic mitigation and prevention strategies (Libin et al., 2020), multi-energy management (Ceusters et al., 2021), or effective canal control (Ren et al., 2021). RL enables learning high performance controllers by introducing general nonlinear function approximators (such as neural networks) to scale with high-dimensional and continuous state-action spaces. This introduction, termed deep-RL, causes the loss of the conventional convergence guarantees of RL (Tsitsiklis, 1994) as well as those obtained in some continuous settings (Nowe, 1994), and hinders their wide roll-out in critical settings. This work enables the formal verification of any such policies, learned by agents interacting with unknown, continuous environments modeled as Markov decision processes (MDPs). Specifically, we learn a discrete representation of the state-action space of the MDP, which yield both a (smaller, explicit) latent space model and a distilled version of the RL policy, that are tractable for model checking (Baier & Katoen, 2008). The latter are supported by bisimulation guarantees: intuitively, the agent behaves similarly in the original and latent models. The strength of our approach is not simply that we verify that the RL agent meets a predefined set of specifications, but rather provide an abstract model on which the user can reason and check any desired agent property. + +Variational MDPs (VAE-MDPs, Delgrange et al. 2022) offer a valuable framework for doing so. The distillation is provided with PAC-verifiable bisimulation bounds guaranteeing that the agent behaves similarly (i) in the original and latent model (abstraction quality); (ii) from all original states embedded to the same discrete state (representation quality). Whilst the bounds offer a confidence metric that enables the verification of performance and safety properties, VAE-MDPs suffer from several learning flaws. First, training a VAE-MDP relies on variational proxies to the bisimulation bounds, meaning there is no learning guarantee on the quality of the latent model via its optimization. Second, variational autoencoders (VAEs) (Kingma & Welling, 2014; Hoffman et al., 2013) are known + +to suffer from posterior collapse (e.g., Alemi et al. 2018) resulting in a deterministic mapping to a unique latent state in VAE-MDPs. Most of the training process focuses on handling this phenomenon and setting up the stage for the concrete distillation and abstraction, finally taking place in a second training phase. This requires extra regularizers, setting up annealing schemes and learning phases, and defining prioritized replay buffers to store transitions. Distillation through VAE-MDPs is thus a meticulous task, requiring a large step budget and tuning many hyperparameters. + +Building upon Wasserstein autoencoders (Tolstikhin et al., 2018) instead of VAEs, we introduce Wasserstein auto-encoded MDPs (WAE-MDPs), which overcome those limitations. Our WAE relies on the optimal transport (OT) from trace distributions resulting from the execution of the RL policy in the real environment to that reconstructed from the latent model operating under the distilled policy. In contrast to VAEs which rely on variational proxies, we derive a novel objective that directly incorporates the bisimulation bounds. Furthermore, while VAEs learn stochastic mappings to the latent space which need be determined or even entirely reconstructed from data at the deployment time to obtain the guarantees, our WAE has no such requirements, and learn all the necessary components to obtain the guarantees during learning, and does not require such post-processing operations. + +Those theoretical claims are reflected in our experiments: policies are distilled up to 10 times faster through WAE- than VAE-MDPs and provide better abstraction quality and performance in general, without the need for setting up annealing schemes and training phases, nor prioritized buffer and extra regularizer. Our distilled policies are able to recover (and sometimes even outperform) the original policy performance, highlighting the representation quality offered by our new framework: the distillation is able to remove some non-robustness of the input RL policy. Finally, we formally verified time-to-failure properties (e.g., Pnueli 1977) to emphasize the applicability of our approach. + +Other Related Work. Complementary works approach safe RL via formal methods (Junges et al., 2016; Alshiekh et al., 2018; Jansen et al., 2020; Simão et al., 2021), aimed at formally ensuring safety during RL, all of which require providing an abstract model of the safety aspects of the environment. They also include the work of Alamdari et al. (2020), applying synthesis and model checking on policies distilled from RL, without quality guarantees. Other frameworks share our goal of verifying deep-RL policies (Bacci & Parker, 2020; Carr et al., 2020) but rely on a known environment model, among other assumptions (e.g., deterministic or discrete environment). Finally, DeepSynth (Hasanbeig et al., 2021) allows learning a formal model from execution traces, with the different purpose of guiding the agent towards sparse and non-Markovian rewards. + +On the latent space training side, WWAEs (Zhang et al., 2019) reuse OT as latent regularizer discrepancy (in Gaussian closed form), whereas we derive two regularizers involving OT. These two are, in contrast, optimized via the dual formulation of Wasserstein, as in Wasserstein-GANs (Arjovsky et al., 2017). Similarly to $VQ$ -VAEs (van den Oord et al., 2017) and Latent Bernoulli AEs (Fajtl et al., 2020), our latent space model learns discrete spaces via deterministic encoders, but relies on a smooth approximation instead of using the straight-through gradient estimator. + +Works on representation learning for RL (Gelada et al., 2019; Castro et al., 2021; Zhang et al., 2021; Zang et al., 2022) consider bisimulation metrics to optimize the representation quality, and aim at learning (continuous) representations which capture bisimulation, so that two states close in the representation are guaranteed to provide close and relevant information to optimize the performance of the controller. In particular, as in our work, DeepMDPs (Gelada et al., 2019) are learned by optimizing local losses, by assuming a deterministic MDP and without verifiable confidence measurement. + +# 2 BACKGROUND + +In the following, we write $\Delta(\mathcal{X})$ for the set of measures over (complete, separable metric space) $\mathcal{X}$ . + +Markov decision processes (MDPs) are tuples $\mathcal{M} = \langle \mathcal{S},\mathcal{A},\mathbf{P},\mathcal{R},\ell ,\mathbf{AP},s_I\rangle$ where $\mathcal{S}$ is a set of states; $\mathcal{A}$ , a set of actions; $\mathbf{P}\colon S\times \mathcal{A}\to \Delta (\mathcal{S})$ , a probability transition function that maps the current state and action to a distribution over the next states; $\mathcal{R}\colon S\times \mathcal{A}\to \mathbb{R}$ , a reward function; $\ell \colon S\to 2^{\mathbf{AP}}$ , a labeling function over a set of atomic propositions $\mathbf{AP}$ ; and $s_I\in S$ , the initial state. If $|\mathcal{A}| = 1$ , $\mathcal{M}$ is a fully stochastic process called a Markov chain (MC). We write $\mathcal{M}_s$ for the MDP obtained when replacing the initial state of $\mathcal{M}$ by $s\in S$ . An agent interacting in $\mathcal{M}$ produces trajectories, i.e., sequences of states and actions $\tau = \langle s_{0:T},a_{0:T - 1}\rangle$ where $s_0 = s_I$ and $s_{t + 1}\sim \mathbf{P}(\cdot |s_t,a_t)$ for $t < T$ . The set of infinite trajectories of $\mathcal{M}$ is Traj. We assume $\mathbf{AP}$ and + +![](images/d65866c58ffac9e4f32077722ee7503d76e7a60919cc34853594c4512a2a5dac.jpg) +(a) Execution of the latent policy $\bar{\pi}$ in the original and latent MDPs, and local losses. + +![](images/da63f93f7e197340d3bcf8dd1aa131ca3c0690159db32c6ffb49a5cb8c36f4cf.jpg) +(b) Parallel execution of the original RL policy $\pi$ in the original and latent MDPs, local losses, and steady-state regularizer. +Figure 1: Latent flows: arrows represent (stochastic) mappings, the original (resp. latent) state-action space is spread along the blue (resp. green) area, and distances are depicted in red. Distilling $\pi$ into $\bar{\pi}$ via flow (b) by minimizing $\mathcal{W}_{\xi_{\pi}}$ allows closing the gap between flows (a) and (b). + +labels being respectively one-hot and binary encoded. Given $\mathsf{T} \subseteq \mathbf{AP}$ , we write $s \models \mathsf{T}$ if $s$ is labeled with $\mathsf{T}$ , i.e., $\ell(s) \cap \mathsf{T} \neq \emptyset$ , and $s \models \neg \mathsf{T}$ for $s \models \mathsf{T}$ . We refer to MDPs with continuous state or action spaces as continuous MDPs. In that case, we assume $\mathcal{S}$ and $\mathcal{A}$ are complete separable metric spaces equipped with a Borel $\sigma$ -algebra, and $\ell^{-1}(\mathsf{T})$ is Borel-measurable for any $\mathsf{T} \subseteq \mathbf{AP}$ . + +Policies and stationary distributions. A (memoryless) policy $\pi \colon S \to \Delta(\mathcal{A})$ prescribes which action to choose at each step of the interaction. The set of memoryless policies of $\mathcal{M}$ is $\Pi$ . The MDP $\mathcal{M}$ and $\pi \in \Pi$ induce an MC $\mathcal{M}_{\pi}$ with unique probability measure $\mathbb{P}_{\pi}^{\mathcal{M}}$ on the Borel $\sigma$ -algebra over measurable subsets $\varphi \subseteq \text{Traj}$ (Puterman, 1994). We drop the superscript when the context is clear. Define $\xi_{\pi}^{t}(s' | s) = \mathbb{P}_{\pi}^{\mathcal{M}_{s}}(\{s_{0:\infty}, a_{0:\infty} | s_{t} = s'\})$ as the distribution giving the probability of being in each state of $\mathcal{M}_{s}$ after $t$ steps. $B \subseteq S$ is a bottom strongly connected component (BSCC) of $\mathcal{M}_{\pi}$ if (i) $B$ is a maximal subset satisfying $\xi_{\pi}^{t}(s' | s) > 0$ for any $s, s' \in B$ and some $t \geqslant 0$ , and (ii) $\mathbb{E}_{a \sim \pi(\cdot|s)} \mathbf{P}(B | s, a) = 1$ for all $s \in S$ . The unique stationary distribution of $B$ is $\xi_{\pi} \in \Delta(B)$ . We write $s, a \sim \xi_{\pi}$ for sampling $s$ from $\xi_{\pi}$ then $a$ from $\pi$ . An MDP $\mathcal{M}$ is ergodic if for all $\pi \in \Pi$ , the state space of $\mathcal{M}_{\pi}$ consists of a unique aperiodic BSCC with $\xi_{\pi} = \lim_{t \to \infty} \xi_{\pi}^{t}(\cdot | s)$ for all $s \in S$ . + +Value objectives. Given $\pi \in \Pi$ , the value of a state $s \in S$ is the expected value of a random variable obtained by running $\pi$ from $s$ . For a discount factor $\gamma \in [0,1]$ , we consider the following objectives. (i) Discounted return: we write $V_{\pi}(s) = \mathbb{E}_{\pi}^{\mathcal{M}_s}\left[\sum_{t=0}^{\infty} \gamma^t \mathcal{R}(s_t, a_t)\right]$ for the expected discounted rewards accumulated along trajectories. The typical goal of an RL agent is to learn a policy $\pi^\star$ that maximizes $V_{\pi^\star}(s_I)$ through interactions with the (unknown) MDP; (ii) Reachability: let $\mathsf{C}, \mathsf{T} \subseteq \mathbf{AP}$ , the (constrained) reachability event is $\mathsf{CUT} = \{s_{0:\infty}, a_{0:\infty} | \exists i \in \mathbb{N}, \forall j < i, s_j \models \mathsf{C} \wedge s_i \models \mathsf{T}\} \subseteq \mathsf{Traj}$ . We write $V_{\pi}^{\varphi}(s) = \mathbb{E}_{\pi}^{\mathcal{M}_s}\left[\gamma^{t^\star} \mathbf{1}_{\langle s_{0:\infty}, a_{0:\infty} \rangle \in \varphi}\right]$ for the discounted probability of satisfying $\varphi = \mathsf{CUT}$ , where $t^\star$ is the length of the shortest trajectory prefix that allows satisfying $\varphi$ . Intuitively, this denotes the discounted return of remaining in a region of the MDP where states are labeled with $\mathsf{C}$ , until visiting for the first time a goal state labeled with $\mathsf{T}$ , and the return is the binary reward signal capturing this event. Safety w.r.t. failure states $\mathsf{C}$ can be expressed as the safety-constrained reachability to a destination $\mathsf{T}$ through $\neg \mathsf{CUT}$ . Notice that $V_{\pi}^{\varphi}(s) = \mathbb{P}_{\pi}^{\mathcal{M}_s}(\varphi)$ when $\gamma = 1$ . + +Latent MDP. Given the original (continuous, possibly unknown) environment model $\mathcal{M}$ , a latent space model is another (smaller, explicit) MDP $\overline{\mathcal{M}} = \langle \overline{S}, \overline{\mathcal{A}}, \overline{\mathbf{P}}, \overline{\mathcal{R}}, \bar{\ell}, \mathbf{AP}, \bar{s}_I \rangle$ with state-action space linked to the original one via state and action embedding functions: $\phi \colon S \to \overline{S}$ and $\psi \colon \overline{S} \times \overline{A} \to A$ . We refer to $\langle \overline{\mathcal{M}}, \phi, \psi \rangle$ as a latent space model of $\mathcal{M}$ and $\overline{\mathcal{M}}$ as its latent MDP. Our goal is to learn $\langle \overline{\mathcal{M}}, \phi, \psi \rangle$ by optimizing an equivalence criterion between the two models. We assume that $d_{\overline{S}}$ is a metric on $\overline{S}$ , and write $\overline{\Pi}$ for the set of policies of $\overline{\mathcal{M}}$ and $\overline{V}_{\overline{\pi}}$ for the values of running $\overline{\pi} \in \overline{\Pi}$ in $\overline{\mathcal{M}}$ . Remark 1 (Latent flow). The latent policy $\overline{\pi}$ can be seen as a policy in $\mathcal{M}$ (cf. Fig. 1a): states passed to $\overline{\pi}$ are first embedded with $\phi$ to the latent space, then the actions produced by $\overline{\pi}$ are executed via $\psi$ in the original environment. Let $s \in S$ , we write $\bar{a} \sim \overline{\pi}(\cdot | s)$ for $\overline{\pi}(\cdot | \phi(s))$ , then the reward and next state are respectively given by $\mathcal{R}(s, \bar{a}) = \mathcal{R}(s, \psi(\phi(s), \bar{a}))$ and $s' \sim \mathbf{P}(\cdot | s, \bar{a}) = \mathbf{P}(\cdot | s, \psi(\phi(s), \bar{a}))$ . + +Local losses allow quantifying the distance between the original and latent reward/transition functions in the local setting, i.e., under a given state-action distribution $\xi \in \Delta(S \times \overline{\mathcal{A}})$ : + +$$ +L _ {\mathcal {R}} ^ {\xi} = \underset {s, \bar {a} \sim \xi} {\mathbb {E}} \left| \mathcal {R} (s, \bar {a}) - \overline {{\mathcal {R}}} (\phi (s), \bar {a}) \right|, \quad L _ {\mathbf {P}} ^ {\xi} = \underset {s, \bar {a} \sim \xi} {\mathbb {E}} D \big (\phi \mathbf {P} (\cdot | s, \bar {a}), \overline {{\mathbf {P}}} (\cdot | \phi (s), \bar {a}) \big) +$$ + +where $\phi \mathbf{P}(\cdot \mid s,\bar{a})$ is the distribution of drawing $s^\prime \sim \mathbf{P}(\cdot \mid s,\bar{a})$ then embedding $\bar{s}^{\prime} = \phi (s^{\prime})$ , and $D$ is a discrepancy measure. Fig 1a depicts the losses when states and actions are drawn from a stationary distribution $\xi_{\overline{\pi}}$ resulting from running $\bar{\pi}\in \overline{\Pi}$ in $\mathcal{M}$ . In this work, we focus on the case where $D$ is the Wasserstein distance $W_{d_{\overline{s}}}$ : given two distributions $P,Q$ over a measurable set $\mathcal{X}$ equipped with a metric $d$ , $W_{d}$ is the solution of the optimal transport (OT) from $P$ to $Q$ , i.e., the minimum cost of changing $P$ into $Q$ (Villani, 2009): $W_{d}(P,Q) = \inf_{\lambda \in \Lambda (P,Q)}\mathbb{E}_{x,y\sim \lambda}d(x,y)$ , $\Lambda (P,Q)$ being the set of all couplings of $P$ and $Q$ . The Kantorovich duality yields $W_{d}(P,Q) = \sup_{f\in \mathcal{F}_{d}}\mathbb{E}_{x\sim P}f(x) - \mathbb{E}_{x\sim Q}f(y)$ where $\mathcal{F}_d$ is the set of 1-Lipschitz functions. Local losses are related to a well-established behavioral equivalence between transition systems, called bisimulation. + +Bisimulation. A bisimulation $\mathcal{B}$ on $\mathcal{M}$ is a behavioral equivalence between states $s_1, s_2 \in S$ so that, $s_1 \mathcal{B} s_2$ iff (i) $\mathbf{P}(T \mid s_1, a) = \mathbf{P}(T \mid s_2, a)$ , (ii) $\ell(s_1) = \ell(s_2)$ , and (iii) $\mathcal{R}(s_1, a) = \mathcal{R}(s_2, a)$ for each action $a \in \mathcal{A}$ and (Borel measurable) equivalence class $T \in S / \mathcal{B}$ . Properties of bisimulation include trajectory and value equivalence (Larsen & Skou, 1989; Givan et al., 2003). Requirements (ii) and (iii) can be respectively relaxed depending on whether we focus only on behaviors formalized through $\mathbf{AP}$ or rewards. The relation can be extended to compare two MDPs (e.g., $\mathcal{M}$ and $\overline{\mathcal{M}}$ ) by considering the disjoint union of their state space. We denote the largest bisimulation relation by $\sim$ . + +Characterized by a logical family of functional expressions derived from a logic $\mathcal{L}$ , bisimulation pseudometrics (Desharnais et al., 2004) generalize the notion of bisimilarity. More specifically, given a policy $\pi \in \Pi$ , we consider a family $\mathcal{F}$ of real-valued functions parameterized by a discount factor $\gamma$ and defining the semantics of $\mathcal{L}$ in $\mathcal{M}_{\pi}$ . Such functional expressions allow to formalize discounted properties such as reachability, safety, as well as general $\omega$ -regular specifications (Chatterjee et al., 2010) and may include rewards as well (Ferns et al., 2014). The pseudometric $\widetilde{d}_{\pi}$ is defined as the largest behavioral difference $\widetilde{d}_{\pi}(s_1,s_2) = \sup_{f\in \mathcal{F}}|f(s_1) - f(s_2)|$ , and its kernel is bisimilarity: $\widetilde{d}_{\pi}(s_1,s_2) = 0$ iff $s_1\sim s_2$ . In particular, value functions are Lipschitz-continuous w.r.t. $\widetilde{d}_{\pi}$ : $|V_{\pi}^{\prime}(s_1) - V_{\pi}^{\prime}(s_2)|\leqslant K\widetilde{d}_{\pi}(s_1,s_2)$ , where $K$ is $^{1 / (1 - \gamma)}$ if rewards are included in $\mathcal{F}$ and 1 otherwise. To ensure the upcoming bisimulation guarantees, we make the following assumptions: + +Assumption 2.1. MDP $\mathcal{M}$ is ergodic, $\operatorname{Im}(\mathcal{R})$ is a bounded space scaled in $[-1/2, 1/2]$ , and the embedding function preserves the labels, i.e., $\phi(s) = \bar{s} \implies \ell(s) = \bar{\ell}(\bar{s})$ for $s \in S$ , $\bar{s} \in \bar{S}$ . + +Note that the ergodicity assumption is compliant with episodic RL and a wide range of continuous learning tasks (see Huang 2020; Delgrange et al. 2022 for detailed discussions on this setting). + +Bisimulation bounds (Delgrange et al., 2022). $\mathcal{M}$ being set over continuous spaces with possibly unknown dynamics, evaluating $\tilde{d}$ can turn out to be particularly arduous, if not intractable. A solution is to evaluate the original and latent model bisimilarity via local losses: fix $\bar{\pi} \in \overline{\Pi}$ , assume $\overline{\mathcal{M}}$ is discrete, then given the induced stationary distribution $\xi_{\bar{\pi}}$ in $\mathcal{M}$ , let $s_1, s_2 \in S$ with $\phi(s_1) = \phi(s_2)$ : + +$$ +\underset {s \sim \xi_ {\bar {\pi}}} {\mathbb {E}} \widetilde {d} _ {\bar {\pi}} (s, \phi (s)) \leqslant \frac {L _ {\mathcal {R}} ^ {\xi_ {\bar {\pi}}} + \gamma L _ {\mathbf {P}} ^ {\xi_ {\bar {\pi}}}}{1 - \gamma}, \quad \widetilde {d} _ {\bar {\pi}} (s _ {1}, s _ {2}) \leqslant \left(\frac {L _ {\mathcal {R}} ^ {\xi_ {\bar {\pi}}} + \gamma L _ {\mathbf {P}} ^ {\xi_ {\bar {\pi}}}}{1 - \gamma}\right) \left(\xi_ {\bar {\pi}} ^ {- 1} (s _ {1}) + \xi_ {\bar {\pi}} ^ {- 1} (s _ {2})\right). \tag {1} +$$ + +The two inequalities guarantee respectively the quality of the abstraction and representation: when local losses are small, (i) states and their embedding are bisimilarly close in average, and (ii) all states sharing the same discrete representation are bisimilarly close. The local losses and related bounds can be efficiently PAC-estimated. Our goal is to learn a latent model where the behaviors of the agent executing $\bar{\pi}$ can be formally verified, and the bounds offer a confidence metric allowing to lift the guarantees obtained this way back to the original model $\mathcal{M}$ , when the latter operates under $\bar{\pi}$ . We show in the following how to learn a latent space model by optimizing the aforementioned bounds, and distill policies $\pi \in \Pi$ obtained via any RL technique to a latent policy $\bar{\pi} \in \overline{\Pi}$ . + +# 3 WASSERSTEIN AUTO-ENCODEDMDPs + +Fix $\overline{\mathcal{M}}_{\theta} = \langle \overline{S},\overline{\mathcal{A}},\overline{\mathbf{P}}_{\theta},\overline{\mathcal{R}}_{\theta},\bar{\ell},\mathbf{AP},\bar{s}_I\rangle$ and $\langle \overline{\mathcal{M}}_{\theta},\phi_{\iota},\psi_{\theta}\rangle$ as a latent space model of $\mathcal{M}$ parameterized by $\iota$ and $\theta$ . Our method relies on learning a behavioral model $\xi_{\theta}$ of $\mathcal{M}$ from which we can + +retrieve the latent space model and distill $\pi$ . This can be achieved via the minimization of a suitable discrepancy between $\xi_{\theta}$ and $\mathcal{M}_{\pi}$ . VAE-MDPs optimize a lower bound on the likelihood of the dynamics of $\mathcal{M}_{\pi}$ using the Kullback-Leibler divergence, yielding (i) $\overline{\mathcal{M}}_{\theta}$ , (ii) a distillation $\bar{\pi}_{\theta}$ of $\pi$ , and (iii) $\phi_{\iota}$ and $\psi_{\theta}$ . Local losses are not directly minimized, but rather variational proxies that do not offer theoretical guarantees during the learning process. To control the local losses minimization and exploit their theoretical guarantees, we present a novel autoencoder that incorporates them in its objective, derived from the OT. Proofs of the claims made in this Section are provided in Appendix A. + +# 3.1 THE OBJECTIVE FUNCTION + +Assume that $S$ , $\mathcal{A}$ , and $\operatorname{Im}(\mathcal{R})$ are respectively equipped with metrics $d_{\mathcal{S}}$ , $d_{\mathcal{A}}$ , and $d_{\mathcal{R}}$ , we define the raw transition distance metric $\vec{d}$ as the component-wise sum of distances between states, actions, and rewards occurring of along transitions: $\vec{d}(\langle s_1, a_1, r_1, s_1' \rangle, \langle s_2, a_2, r_2, s_2' \rangle) = d_{\mathcal{S}}(s_1, s_2) + d_{\mathcal{A}}(a_1, a_2) + d_{\mathcal{R}}(r_1, r_2) + d_{\mathcal{S}}(s_1', s_2')$ . Given Assumption 2.1, we consider the OT between local distributions, where traces are drawn from episodic RL processes or infinite interactions (we show in Appendix A.1 that considering the OT between trace-based distributions in the limit amounts to reasoning about stationary distributions). Our goal is to minimize $W_{\vec{d}}(\xi_{\pi}, \xi_{\theta})$ so that + +$$ +\xi_ {\theta} (s, a, r, s ^ {\prime}) = \int_ {\bar {S} \times \bar {A} \times \bar {S}} P _ {\theta} (s, a, r, s ^ {\prime} \mid \bar {s}, \bar {a}, \bar {s} ^ {\prime}) d \bar {\xi} _ {\bar {\pi} _ {\theta}} (\bar {s}, \bar {a}, \bar {s} ^ {\prime}), \tag {2} +$$ + +where $P_{\theta}$ is a transition decoder and $\bar{\xi}_{\overline{\pi}_{\theta}}$ denotes the stationary distribution of the latent model $\overline{\mathcal{M}}_{\theta}$ . As proved by Bousquet et al. (2017), this model allows to derive a simpler form of the OT: instead of finding the optimal coupling of (i) the stationary distribution $\xi_{\pi}$ of $\mathcal{M}_{\pi}$ and (ii) the behavioral model $\xi_{\theta}$ , in the primal definition of $W_{\vec{d}}(\xi_{\pi},\xi_{\theta})$ , it is sufficient to find an encoder $q$ whose marginal is given by $Q(\bar{s},\bar{a},\bar{s}^{\prime}) = \mathbb{E}_{s,a,s^{\prime}\sim \xi_{\pi}}q(\bar{s},\bar{a},\bar{s}^{\prime}\mid s,a,s^{\prime})$ and identical to $\xi_{\pi}$ . This is summarized in the following Theorem, yielding a particular case of Wasserstein-autoencoder Tolstikhin et al. (2018): + +Theorem 3.1. Let $\xi_{\theta}$ and $P_{\theta}$ be respectively a behavioral model and transition decoder as defined in Eq. 2, $\mathcal{G}_{\theta} \colon \overline{\mathcal{S}} \to \mathcal{S}$ be a state-wise decoder, and $\psi_{\theta}$ be an action embedding function. Assume $P_{\theta}$ is deterministic with Dirac function $G_{\theta}(\bar{s}, \bar{a}, \bar{s}') = \langle \mathcal{G}_{\theta}(\bar{s}), \psi_{\theta}(\bar{s}, \bar{a}), \overline{\mathcal{R}}_{\theta}(\bar{s}, \bar{a}), \mathcal{G}_{\theta}(\bar{s}') \rangle$ , then + +$$ +W_{\vec{d}}(\xi_{\pi},\xi_{\theta}) = \inf_{q:Q = \bar{\xi}_{\pi_{\theta}}}\mathbb{E}_{\substack{s,a,r,s^{\prime}\sim \xi_{\pi}\\ \bar{s},\bar{a},\bar{s}^{\prime}\sim q(\cdot |s,a,s^{\prime})}}\mathbb{E}_{\substack{\bar{s},\bar{a},\bar{s}^{\prime}\sim q(\cdot |s,a,s^{\prime})}}\vec{d}\bigl(\bigl\langle s,a,r,s^{\prime}\bigr\rangle ,G_{\theta}\bigl(\bar{s},\bar{a},\bar{s}^{\prime}\bigr)\bigr). +$$ + +Henceforth, fix $\phi_{\iota} \colon S \to \bar{S}$ and $\phi_{\iota}^{\mathcal{A}} \colon \bar{S} \times \mathcal{A} \to \Delta(\overline{\mathcal{A}})$ as parameterized state and action encoders with $\phi_{\iota}(\bar{s}, \bar{a}, \bar{s}' \mid s, a, s') = \mathbf{1}_{\phi_{\iota}(s) = \bar{s}} \cdot \phi_{\iota}^{\mathcal{A}}(\bar{a} \mid \bar{s}, a) \cdot \mathbf{1}_{\phi_{\iota}(s')} = \bar{s}'$ , and define the marginal encoder as $Q_{\iota} = \mathbb{E}_{s, a, s' \sim \xi_{\pi}} \phi_{\iota}(\cdot \mid s, a, s')$ . Training the model components can be achieved via the objective: + +$$ +\min_{\iota ,\theta}\underset {s,a,r,s^{\prime}\sim \xi_{\pi}}{\mathbb{E}}\underset {\bar{s},\bar{a},\bar{s}^{\prime}\sim \phi_{\iota}(\cdot |s,a,s^{\prime})}{\mathbb{E}} \vec{d}\bigl(\bigl\langle s, a,r,s^{\prime}\bigr\rangle ,G_{\theta}\bigl(\bar{s},\bar{a},\bar{s}^{\prime}\bigr)\bigr) + \beta \cdot D\bigl(Q_{\iota},\bar{\xi}_{\bar{\pi}_{\theta}}\bigr), +$$ + +where $D$ is an arbitrary discrepancy metric and $\beta > 0$ a hyperparameter. Intuitively, the encoder $\phi_{\iota}$ can be learned by enforcing its marginal distribution $Q_{\iota}$ to match $\bar{\xi}_{\overline{\pi}_{\theta}}$ through this discrepancy. + +Remark 2. If $\mathcal{M}$ has a discrete action space, then learning $\overline{\mathcal{A}}$ is not necessary. We can set $\overline{\mathcal{A}} = \mathcal{A}$ using identity functions for the action encoder and decoder (details in Appendix A.2). + +When $\pi$ is executed in $\mathcal{M}$ , observe that its parallel execution in $\overline{\mathcal{M}}_{\theta}$ is enabled by the action encoder $\phi_{\iota}^{A}$ : given an original state $s \in S$ , $\pi$ first prescribes the action $a \sim \pi(\cdot \mid s)$ , which is then embedded in the latent space via $\bar{a} \sim \phi_{\iota}^{A}(\cdot \mid \phi_{\iota}(s), a)$ (cf. Fig. 1b). This parallel execution, along with setting $D$ to $W_{\vec{d}}$ , yield an upper bound on the latent regularization, compliant with the bisimulation bounds. A two-fold regularizer is obtained thereby, defining the foundations of our objective function: + +Lemma 3.2. Define $\mathcal{T}(\bar{s},\bar{a},\bar{s}^{\prime}) = \mathbb{E}_{s,a\sim \xi_{\pi}}[\mathbf{1}_{\phi_{\iota}(s) = \bar{s}}\cdot \phi_{\iota}^{A}(\bar{a}\mid \bar{s},a)\cdot \overline{\mathbf{P}}_{\theta}(\bar{s}^{\prime}\mid \bar{s},\bar{a})]$ as the distribution of drawing state-action pairs from interacting with $\mathcal{M}$ , embedding them to the latent spaces, and finally letting them transition to their successor state in $\overline{\mathcal{M}}_{\theta}$ . Then, $W_{\vec{d}}(Q_{\iota},\bar{\xi}_{\overline{\pi}_{\theta}})\leqslant W_{\vec{d}}(\bar{\xi}_{\overline{\pi}_{\theta}},\mathcal{T}) + L_{\mathbf{P}}^{\xi_{\pi}}$ + +We therefore define the $\mathrm{W}^2\mathrm{AE}$ -MDP (Wasserstein-Wasserstein auto-encoded MDP) objective as: + +$$ +\min_{\substack{\iota ,\theta \\ \bar{s},\bar{a},\bar{s}^{\prime}\sim \phi_{\iota}(\cdot |s,a,s^{\prime})}}\mathbb{E}\left[d_{\mathcal{S}}(s,\mathcal{G}_{\theta}(\bar{s})) + d_{\mathcal{A}}(a,\psi_{\theta}(\bar{s},\bar{a})) + d_{\mathcal{S}}\big(s^{\prime},\mathcal{G}_{\theta}\big(\bar{s}^{\prime}\big)\big)\right] + L_{\mathcal{R}}^{\xi_{\pi}} + \beta \cdot (\mathcal{W}_{\xi_{\pi}} + L_{\mathbf{P}}^{\xi_{\pi}}), +$$ + +Algorithm 1: Wasserstein $^2$ Auto-Encoded MDP +Input: batch size $N$ , max. step $T$ , no. of regularizer updates $m$ , penalty coefficient $\delta > 0$ for $t = 1$ to $T$ do +for $i = 1$ to $N$ do +Sample a transition $s_i, a_i, r_i, s_i'$ from the original environment via $\xi_{\pi}$ +Embed the transition into the latent space by drawing $\bar{s}_i, \bar{a}_i, \bar{s}_i'$ from $\phi_\iota(\cdot \mid s_i, a_i, s_i')$ +Make the latent space model transition to the next latent state: $\bar{s}_i^\star \sim \overline{\mathbf{P}}_\theta(\cdot \mid \bar{s}_i, \bar{a}_i)$ +Sample a latent transition from $\bar{\xi}_{\overline{\pi}_\theta} \colon z_i \sim \bar{\xi}_{\overline{\pi}_\theta}, \bar{a}_i' \sim \overline{\pi}_\theta(\cdot \mid z_i)$ , and $z_i' \sim \overline{\mathbf{P}}_\theta(\cdot \mid z_i, \bar{a}_i')$ $\mathcal{W} \gets \sum_{i=1}^{N} \varphi_\omega^\xi(\bar{s}_i, \bar{a}_i, \bar{s}_i^\star) - \varphi_\omega^\xi(z_i, \bar{a}_i', z_i') + \varphi_\omega^{\mathbf{P}}(s_i, a_i, \bar{s}_i, \bar{a}_i, \bar{s}_i') - \varphi_\omega^{\mathbf{P}}(s_i, a_i, \bar{s}_i, \bar{a}_i, \bar{s}_i^\star)$ $P \gets \sum_{i=1}^{N} \mathrm{GP}\big(\varphi_\omega^\xi, \langle \bar{s}_i, \bar{a}_i, \bar{s}_i^\star \rangle, \langle z_i, \bar{a}_i', z_i' \rangle\big) + \mathrm{GP}\big(x \mapsto \varphi_\omega^{\mathbf{P}}(s_i, a_i, \bar{s}_i, \bar{a}_i, x), \bar{s}_i', \bar{s}_i^\star\big)$ +Update the Lipschitz networks parameters $\omega$ by ascending $1/N \cdot (\beta \mathcal{W} - \delta P)$ +if $t$ mod $m = 0$ then + $\mathcal{L} \gets \sum_{i=1}^{N} d_{\mathcal{S}}(s_i, \mathcal{G}_{\theta}(\bar{s}_i)) + d_{\mathcal{A}}(a_i, \psi_{\theta}(\bar{s}_i, \bar{a}_i)) + d_{\mathcal{R}}(r_i, \overline{\mathcal{R}}_{\theta}(\bar{s}_i, \bar{a}_i)) + d_{\mathcal{S}}(s_i', \mathcal{G}_{\theta}(\bar{s}_i'))$ +Update the latent space model parameters $\langle \iota, \theta\rangle$ by descending $1/N \cdot (\mathcal{L} + \beta \mathcal{W})$ +function $\mathrm{GP}(\varphi_\omega, x, y)$ $\epsilon \sim U(0,1)$ ; $\tilde{x} \gets \epsilon x + (1 - \epsilon)y$ +return ( $\|\nabla_{\tilde{x}}\varphi_{\omega}(\tilde{x})\| - 1)^2$ + +where $\mathcal{W}_{\xi_{\pi}} = W_{\vec{d}}\big(\mathcal{T},\bar{\xi}_{\bar{\pi}_{\theta}}\big)$ and $L_{\mathbf{P}}^{\xi_{\pi}}$ are respectively called steady-state and transition regularizers. The former allows to quantify the distance between the stationary distributions respectively induced by $\pi$ in $\mathcal{M}$ and $\bar{\pi}_{\theta}$ in $\overline{\mathcal{M}}_{\theta}$ , further enabling the distillation. The latter allows to learn the latent dynamics. Note that $L_{\mathcal{R}}^{\xi_{\pi}}$ and $L_{\mathbf{P}}^{\xi_{\pi}}$ — set over $\xi_{\pi}$ instead of $\xi_{\bar{\pi}_{\theta}}$ — are not sufficient to ensure the bisimulation bounds (Eq. 1): running $\pi$ in $\overline{\mathcal{M}}_{\theta}$ depends on the parallel execution of $\pi$ in the original model, which does not permit its (conventional) verification. Breaking this dependency is enabled by learning the distillation $\bar{\pi}_{\theta}$ through $\mathcal{W}_{\xi_{\pi}}$ , as shown in Fig. 1b: minimizing $\mathcal{W}_{\xi_{\pi}}$ allows to make $\xi_{\pi}$ and $\bar{\xi}_{\bar{\pi}_{\theta}}$ closer together, further bridging the gap of the discrepancy between $\pi$ and $\bar{\pi}_{\theta}$ . At any time, recovering the local losses along with the linked bisimulation bounds in the objective function of the $\mathrm{W}^{2}\mathrm{AE}$ -MDP is allowed by considering the latent policy resulting from this distillation: + +Theorem 3.3. Assume that traces are generated by running a latent policy $\bar{\pi} \in \overline{\Pi}$ in the original environment and let $d_{\mathcal{R}}$ be the usual Euclidean distance, then the $W^{2}$ AE-MDP objective is + +$$ +\min_{\iota ,\theta}\mathbb{E}_{s,s^{\prime}\sim \xi_{\overline{\pi}}}\left[d_{\mathcal{S}}(s,\mathcal{G}_{\theta}(\phi_{\iota}(s))) + d_{\mathcal{S}}\big(s^{\prime},\mathcal{G}_{\theta}\big(\phi_{\iota}\big(s^{\prime}\big)\big)\big)\right] + L_{\mathcal{R}}^{\xi_{\overline{\pi}}} + \beta \cdot (\mathcal{W}_{\xi_{\overline{\pi}}} + L_{\mathbf{P}}^{\xi_{\overline{\pi}}}). +$$ + +Optimizing the regularizers is enabled by the dual form of the OT: we introduce two parameterized networks, $\varphi_{\omega}^{\xi}$ and $\varphi_{\omega}^{\mathbf{P}}$ , constrained to be 1-Lipschitz and trained to attain the supremum of the dual: + +$$ +\mathcal {W} _ {\xi_ {\pi}} (\omega) = \max _ {\omega} \underset {s, a \sim \xi_ {\pi}} {\mathbb {E}} \underset {\bar {a} \sim \phi_ {\iota} ^ {\mathcal {A}} (\cdot | \phi_ {\iota} (s), a)} {\mathbb {E}} \underset {\bar {s} ^ {\star} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \phi_ {\iota} (s), \bar {a})} {\mathbb {E}} \varphi_ {\omega} ^ {\xi} (\phi_ {\iota} (s), \bar {a}, \bar {s} ^ {\star}) - \underset {z, \bar {a} ^ {\prime}, z ^ {\prime} \sim \bar {\xi} _ {\bar {\pi} _ {\theta}}} {\mathbb {E}} \varphi_ {\omega} ^ {\xi} (z, \bar {a} ^ {\prime}, z ^ {\prime}) +$$ + +$$ +L _ {\mathbf {P}} ^ {\xi_ {\pi}} (\omega) = \max _ {\omega} \underset {s, a, s ^ {\prime} \sim \xi_ {\pi} \bar {s}, \bar {a}, \bar {s} ^ {\prime} \sim \phi_ {\iota} (\cdot | s, a, s ^ {\prime})} {\mathbb {E}} \underset {s, a, \bar {s}, \bar {a}, \bar {s} ^ {\prime}} {\mathbb {E}} \left[ \varphi_ {\omega} ^ {\mathbf {P}} (s, a, \bar {s}, \bar {a}, \bar {s} ^ {\prime}) - \underset {\bar {s} ^ {\star} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \bar {s}, \bar {a})} {\mathbb {E}} \varphi_ {\omega} ^ {\mathbf {P}} (s, a, \bar {s}, \bar {a}, \bar {s} ^ {\star}) \right] +$$ + +Details to derive this tractable form of $L_{\mathbf{P}}^{\xi_{\pi}}(\omega)$ are in Appendix A.5. The networks are constrained via the gradient penalty approach of Gulrajani et al. (2017), leveraging that any differentiable function is 1-Lipschitz iff it has gradients with norm at most 1 everywhere (we show in Appendix A.6 this is still valid for relaxations of discrete spaces). The final learning process is presented in Algorithm 1. + +# 3.2 DISCRETE LATENT SPACES + +To enable the verification of latent models supported by the bisimulation guarantees of Eq. 1, we focus on the special case of discrete latent space models. Our approach relies on continuous relaxation of discrete random variables, regulated by some temperature parameter(s) $\lambda$ : discrete random variables are retrieved as $\lambda \rightarrow 0$ , which amounts to applying a rounding operator. For training, we use the + +![](images/ba6acc59c6dd787cf773da4990450ac8c3811bb05c69258cb6c36e0405fa4212.jpg) +Figure 2: W $^2$ AE-MDP architecture. Distances are depicted by red dotted lines. + +temperature-controlled relaxations to differentiate the objective and let the gradient flow through the network. When we deploy the latent policy in the environment and formally check the latent model, the zero-temperature limit is used. An overview of the approach is depicted in Fig. 2. + +State encoder. We work with a binary representation of the latent states. First, this induces compact networks, able to deal with a large discrete space via a tractable number of parameter variables. But most importantly, this ensures that Assumption 2.1 is satisfied: let $n = \log_2|\bar{S}|$ , we reserve $|\mathbf{AP}|$ bits in $\bar{S}$ and each time $s\in S$ is passed to $\phi_{\iota}$ , $n - |\mathbf{AP}|$ bits are produced and concatenated with $\ell (s)$ , ensuring a perfect reconstruction of the labels and further bisimulation bounds. To produce Bernoulli variables, $\phi_{\iota}$ deterministically maps $s$ to a latent code $z$ , passed to the Heaviside $H(z) = \mathbf{1}_{z > 0}$ . We train $\phi_{\iota}$ by using the smooth approximation $H_{\lambda}(z) = \sigma (^{2}z / \lambda)$ , satisfying $H = \lim_{\lambda \to 0}H_{\lambda}$ . + +Latent distributions. Besides the discontinuity of their latent image space, a major challenge of optimizing over discrete distributions is sampling, required to be a differentiable operation. We circumvent this by using concrete distributions (Jang et al., 2017; Maddison et al., 2017): the idea is to sample reparameterizable random variables from $\lambda$ -parameterized distributions, and applying a differentiable, nonlinear operator in downstream. We use the Gumbel softmax trick to sample from distributions over (one-hot encoded) latent actions $(\phi_{\iota}^{A}, \bar{\pi}_{\theta})$ . For binary distributions $(\overline{\mathbf{P}}_{\theta}, \bar{\xi}_{\bar{\pi}_{\theta}})$ , each relaxed Bernoulli with logit $\alpha$ is retrieved by drawing a logistic random variable located in $\alpha/\lambda$ and scaled to $1/\lambda$ , then applying a sigmoid in downstream. We emphasize that this trick alone (as used by Corneil et al. 2018; Delgrange et al. 2022) is not sufficient: it yields independent Bernoullis, being too restrictive in general, which prevents from learning sound transition dynamics (cf. Example 1). + +Example 1. Let $\overline{\mathcal{M}}$ be the discrete MC of Fig. 3. In one-hot, $\mathbf{AP} = \{\text{goal}: \langle 1,0\rangle$ , unsafe: $\langle 0,1\rangle\}$ . We assume that 3 bits are used for the (binary) state space, with $\bar{S} = \{\bar{s}_0:\langle 0,0,0\rangle,\bar{s}_1:\langle 1,0,0\rangle,\bar{s}_2:\langle 0,1,0\rangle,\bar{s}_3:\langle 0,1,1\rangle\}$ (the two first bits are reserved for the labels). Considering each bit as being independent is not sufficient to learn $\overline{\mathbf{P}}$ : the optimal estimation $\overline{\mathbf{P}}_{\theta^*}(\cdot \mid \bar{s}_0)$ is in that case represented by the independent Bernoulli vector $\mathbf{b} = \langle 1 / 2,1 / 2,1 / 4\rangle$ , giving the probability to go from $\bar{s}_0$ to each bit independently. This yields a poor estimation of + +![](images/0629839b821124de73e3d0a3e4a307a3344303d23062ef901fc0208c05f77eb4.jpg) +Figure 3: Markov Chain with four states; labels are drawn next to their state. + +the actual transition function: $\overline{\mathbf{P}}_{\theta^{\star}}(\bar{s}_0\mid \bar{s}_0) = (1 - \mathbf{b}_1)\cdot (1 - \mathbf{b}_2)\cdot (1 - \mathbf{b}_3) = \overline{\mathbf{P}}_{\theta^{\star}}(\bar{s}_1\mid \bar{s}_0) = \mathbf{b}_1\cdot (1 - \mathbf{b}_2)\cdot (1 - \mathbf{b}_3) = \overline{\mathbf{P}}_{\theta^{\star}}(\bar{s}_2\mid \bar{s}_0) = (1 - \mathbf{b}_1)\cdot \mathbf{b}_2\cdot (1 - \mathbf{b}_3) = 3 / 16,\overline{\mathbf{P}}_{\theta^{\star}}(\bar{s}_3\mid \bar{s}_0) = (1 - \mathbf{b}_1)\cdot \mathbf{b}_2\cdot \mathbf{b}_3 = 1 / 16.$ + +We consider instead relaxed multivariate Bernoulli distributions by decomposing $P \in \Delta(\bar{S})$ as a product of conditionals: $P(\bar{s}) = \prod_{i=1}^{n} P(\bar{s}_i \mid \bar{s}_{1:i-1})$ where $\bar{s}_i$ is the $i^{\text{th}}$ entry (bit) of $\bar{s}$ . We learn + +![](images/9c55334604e272a24a2d2c459da443d86f0342e1fd7a55b89480a32c1fca213f.jpg) +(a) $\mathrm{W}^2\mathrm{AE}$ -MDP objective: reconstruction loss, transition and steady-state regularizers + +![](images/714939f162e90922feac0a1018e369dcfa27d5f6eec47ad4de6e24d64c45e0e6.jpg) + +![](images/08bc6b25f957e23bb23c7289fe848fcfe54d1ff27c81e02320a9067f2e82e8aa.jpg) + +![](images/7b54af92e28e4b018ff7fd0a910a2c83b0aa1aa63bb9634874cc3e34a1196259.jpg) + +![](images/2b18f0b54e58bdbb50c1f701e27bce3cfbfb89e25b9addfa9341989094d23ab1.jpg) + +![](images/536af967a77909eae3bf9ace5be851abc0cff870cd3527cf2c4253a3e2289f5e.jpg) + +![](images/0cc522ef4cb964743c61a30daf501ba6b5148d2149b2839dd93fc7ddb89629f2.jpg) + +![](images/2e1876c67cc627048c46904e159bf1f7220478afda0433f6a1e357849f6ebeb0.jpg) + +![](images/e2eac6b2937052e31cf28f322688c4dca02183fb5ac7b7271aeb9a4b06321bb7.jpg) + +![](images/a8c766d4827e2b8abc267f67b11b833d2e53501b4c4cb0974dc12cdd91d97cb1.jpg) + +![](images/df8a04275e496f0fbac36084da0e6177998fbf260116ab4544543c0c9aeea6d5.jpg) +(b) PAC local losses approximation for an error of at most $10^{-2}$ and probability confidence 0.955 + +![](images/73c72890fd47c7601ae57c2406a94476c8475834315122aa719fdcd53a86acf3.jpg) +(c) Episode return obtained when executing the distilled policy in the original MDP (averaged over 30 episodes) + +![](images/424acf2d704a79f4faa63a8ad3b3f9a19381996e84a5600f23ce499f220ce9e1.jpg) +Figure 4: For each environment, we trained five different instances of the models with different random seeds: the solid line is the median and the shaded interval the interquartile range. + +![](images/1952d42234fa0463371b4de55b130d80d41790bdf5356204e2292de79b8ec308.jpg) + +![](images/1ed507649916381414d2551a56cfda6d2a52c21abecffdfc6917fc5cf6f73be0.jpg) + +such distributions by introducing a masked autoregressive flow (MAF, Papamakarios et al. 2017) for relaxed Bernoullis via the recursion: $\bar{s}_i = \sigma(l_i + \alpha_i / \lambda)$ , where $l_i \sim \mathrm{Logistic}(0,1)$ , $\alpha_i = f_i(\bar{s}_{1:i-1})$ , and $f$ is a MADE (Germain et al., 2015), a feedforward network implementing the conditional output dependency on the inputs via a mask that only keeps the necessary connections to enforce the conditional property. We use this MAF to model $\overline{\mathbf{P}}_\theta$ and the dynamics related to the labels in $\bar{\xi}_{\overline{\pi}_\theta}$ . We fix the logits of the remaining $n - |\mathbf{AP}|$ bits to 0 to allow for a fairly distributed latent space. + +# 4 EXPERIMENTS + +We evaluate the quality of latent space models learned and policies distilled through $\mathrm{W}^2\mathrm{AE}$ -MDPs. To do so, we first trained deep-RL policies (DQN, Mnih et al. 2015 on discrete, and SAC, Haarnoja et al. 2018 on continuous action spaces) for various OpenAI benchmarks (Brockman et al., 2016), which we then distill via our approach (Figure 4). We thus evaluate (a) the $\mathrm{W}^2\mathrm{AE}$ -MDP training metrics, (b) the abstraction and representation quality via PAC local losses upper bounds (Delgrange et al., 2022), and (c) the distilled policy performance when deployed in the original environment. The confidence metrics and performance are compared with those of VAE-MDPs. Finally, we formally verify properties in the latent model. The exact setting to reproduce our results is in Appendix B. + +Learning metrics. The objective (Fig. 4a) is a weighted sum of the reconstruction loss and the two Wasserstein regularizers. The choice of $\beta$ defines the optimization direction. In contrast to VAEs (cf. Appendix C), WAEs indeed naturally avoid posterior collapse (Tolstikhin et al., 2018), indicating that the latent space is consistently distributed. Optimizing the objective (Fig. 4a) effectively allows minimizing the local losses (Fig. 4b) and recovering the performance of the original policy (Fig. 4c). + +Local losses. For V- and WAEs, we formally evaluate PAC upper bounds on $L_{\mathcal{R}}^{\xi \bar{\pi}_{\theta}}$ and $L_{\mathbf{P}}^{\xi \bar{\pi}_{\theta}}$ via the algorithm of Delgrange et al. (2022) (Fig 4b). The lower the local losses, the closer $\mathcal{M}$ and $\overline{\mathcal{M}}_{\theta}$ are in terms of behaviors induced by $\bar{\pi}_{\theta}$ (cf. Eq. 1). In VAEs, the losses are evaluated on a transition function $\hat{\mathbf{P}}$ obtained via frequency estimation of the latent transition dynamics (Delgrange et al., 2022), by reconstructing the transition model a posteriori and collecting data to estimate the transition probabilities (e.g., Bazille et al. 2020; Corneil et al. 2018). We thus also report the metrics for $\hat{\mathbf{P}}$ . Our bounds quickly converge to close values in general for $\overline{\mathbf{P}}_{\theta}$ and $\hat{\mathbf{P}}$ , whereas for VAEs, the convergence is slow and unstable, with $\hat{\mathbf{P}}$ offering better bounds. We emphasize that WAEs do not require this additional reconstruction step to obtain losses that can be leveraged to assess the + +Table 1: Formal Verification of distilled policies. Values are computed for $\gamma = {0.99}$ (lower is better). + +
Environmentstep (105)SA|S||A|LξπθR(PAC)LξπθP(PAC)||Vπθ||V̅πθ(¯sI)
CartPole1.2⊆ R4{1,2}51220.004996530.3996363.712130.0316655
MountainCar2.32⊆ R2{1,2}102420.01417630.3823232.837140
Acrobot4.3⊆ R6{1,2,3}819230.03476980.6494782.220060.0021911
LunarLander3.2⊆ R8[-1,1]21638430.02072050.1313570.03728830.0702039
Pendulum3.7⊆ R3[-2,2]819230.02667450.5395084.330060.0348492
+ +quality of the model, in contrast to VAEs, where learning $\overline{\mathbf{P}}_{\theta}$ was performed via overly restrictive distributions, leading to poor estimation in general (cf. Ex. 1). Finally, when the distilled policies offer comparable performance (Fig. 4c), our bounds are either close to or better than those of VAEs. + +Distillation. The bisimulation guarantees (Eq. 1) are only valid for $\bar{\pi}_{\theta}$ , the policy under which formal properties can be verified. It is crucial that $\bar{\pi}_{\theta}$ achieves performance close to $\pi$ , the original one, when deployed in the RL environment. We evaluate the performance of $\bar{\pi}_{\theta}$ via the undiscounted episode return $\mathbf{R}_{\bar{\pi}_{\theta}}$ obtained by running $\bar{\pi}_{\theta}$ in the original model $\mathcal{M}$ . We observe that $\mathbf{R}_{\bar{\pi}_{\theta}}$ approaches faster the original performance $\mathbf{R}_{\pi}$ for W- than VAEs: WAEs converge in a few steps for all environments, whereas the full learning budget is sometimes necessary with VAEs. The success in recovering the original performance emphasizes the representation quality guarantees (Eq. 1) induced by WAEs: when local losses are minimized, all original states that are embedded to the same representation are bisimilarly close. Distilling the policy over the new representation, albeit discrete and hence coarser, still achieves effective performance since $\phi_{\iota}$ keeps only what is important to preserve behaviors, and thus values. Furthermore, the distillation can remove some non-robustness obtained during RL: $\bar{\pi}_{\theta}$ prescribes the same actions for bisimilarly close states, whereas this is not necessarily the case for $\pi$ . + +Formal verification. To formally verify $\overline{\mathcal{M}}_{\theta}$ , we implemented a value iteration (VI) engine, handling the neural network encoding of the latent space for discounted properties, which is one of the most popular algorithms for checking property probabilities in MDPs (e.g., Baier & Katoen 2008; Hensel et al. 2021; Kwiatkowska et al. 2022). We verify time-to-failure properties $\varphi$ , often used to check the failure rate of a system (Pnueli, 1977) by measuring whether the agent fails before the end of the episode. Although simple, such properties highlight the applicability of our approach on reachability events, which are building blocks to verify MDPs (Baier & Katoen 2008; cf. Appendix B.7). In particular, we checked whether the agent reaches an unsafe position or angle (CartPole, LunarLander), does not reach its goal position (MountainCar, Acrobot), and does not reach and stay in a safe region of the system (Pendulum). Results are in Table 1: for each environment, we select the distilled policy which gives the best trade-off between performance (episode return) and abstraction quality (local losses). As extra confidence metric, we report the value difference $\| V_{\overline{\pi}_{\theta}} \| = |V_{\overline{\pi}_{\theta}}(s_I) - \bar{V}_{\overline{\pi}_{\theta}}(\bar{s}_I)|$ obtained by executing $\overline{\pi}_{\theta}$ in $\mathcal{M}$ and $\overline{\mathcal{M}}_{\theta}$ ( $V_{\overline{\pi}_{\theta}}(\cdot)$ is averaged while $\bar{V}_{\overline{\pi}_{\theta}}(\cdot)$ is formally computed). + +# 5 CONCLUSION + +We presented WAE-MDPs, a framework for learning formally verifiable distillations of RL policies with bisimulation guarantees. The latter, along with the learned abstraction of the unknown continuous environment to a discrete model, enables the verification. Our method overcomes the limitations of VAE-MDPs and our results show that it outperforms the latter in terms of learning speed, model quality, and performance, in addition to being supported by stronger learning guarantees. As mentioned by Delgrange et al. (2022), distillation failure reveals the lack of robustness of original RL policies. In particular, we found that distilling highly noise-sensitive RL policies (such as robotics simulations, e.g., Todorov et al. 2012) is laborious, even though the result remains formally verifiable. + +We demonstrated the feasibility of our approach through the verification of reachability objectives, which are building blocks for stochastic model-checking (Baier & Katoen, 2008). Besides the scope of this work, the verification of general discounted $\omega$ -regular properties is theoretically allowed in our model via the reachability to components of standard constructions based on automata products (e.g., Baier et al. 2016; Sickert et al. 2016), and discounted games algorithms (Chatterjee et al., 2010). Beyond distillation, our results, supported by Thm. 3.3, suggest that our WAE-MDP can be used as a general latent space learner for RL, further opening possibilities to combine RL and formal methods online when no formal model is a priori known, and address this way safety in RL with guarantees. + +# REPRODUCIBILITY STATEMENT + +We referenced in the main text the Appendix parts presenting the proofs or additional details of every claim, Assumption, Lemma, and Theorem occurring in the paper. In addition, Appendix B is dedicated to the presentation of the setup, hyperparameters, and other extra details required for reproducing the results of Section 4. We provide the source code of the implementation of our approach in Supplementary material $^{1}$ , and we also provide the models saved during training that we used for model checking (i.e., reproducing the results of Table 1). Additionally, we present in a notebook (evaluation.html) videos demonstrating how our distilled policies behave in each environment, and code snippets showing how we formally verified the policies. + +# ACKNOWLEDGMENTS + +This research received funding from the Flemish Government (AI Research Program) and was supported by the DESCARTES iBOF project. G.A. Perez is also supported by the Belgian FWO "SAILor" project (G030020N). We thank Raphael Avalos for his valuable feedback during the preparation of this manuscript. + +# REFERENCES + +Parand Alizadeh Alamdari, Guy Avni, Thomas A. Henzinger, and Anna Lukina. Formal methods with a touch of magic. In 2020 Formal Methods in Computer Aided Design, FMCAD 2020, Haifa, Israel, September 21-24, 2020, pp. 138-147. IEEE, 2020. doi: 10.34727/2020/isbn.978-3-85448-042-6_21. URL https://doi.org/10.34727/2020/isbn.978-3-85448-042-6_21. +Alexander A. Alemi, Ben Poole, Ian Fischer, Joshua V. Dillon, Rif A. Saurous, and Kevin Murphy. Fixing a broken ELBO. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 159-168. PMLR, 2018. URL http://proceedings.mlr.press/v80/alemi18a.html. +Mohammed Alshiekh, Roderick Bloem, Rüdiger Ehlers, Bettina Könighofer, Scott Niekum, and Ufuk Topcu. Safe reinforcement learning via shielding. In Sheila A. McIlraith and Kilian Q. Weinberger (eds.), Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pp. 2669-2678. AAAI Press, 2018. URL https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/view/17211. +Martín Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 214-223. PMLR, 2017. URL http://proceedings.mlr.press/v70/arjovsky17a.html. +Edoardo Bacci and David Parker. Probabilistic guarantees for safe deep reinforcement learning. In Nathalie Bertrand and Nils Jansen (eds.), Formal Modeling and Analysis of Timed Systems - 18th International Conference, FORMATS 2020, Vienna, Austria, September 1-3, 2020, Proceedings, volume 12288 of LNCS, pp. 231-248. Springer, 2020. doi: 10.1007/978-3-030-57628-8_14. URL https://doi.org/10.1007/978-3-030-57628-8_14. +Christel Baier and Joost-Pieter Katoen. Principles of model checking. MIT Press, 2008. ISBN 978-0-262-02649-9. +Christel Baier, Stefan Kiefer, Joachim Klein, Sascha Klüppelholz, David Müller, and James Worrell. Markov chains and unambiguous büchi automata. In Swarat Chaudhuri and Azadeh Farzan (eds.), Computer Aided Verification - 28th International Conference, CAV 2016, Toronto, ON, Canada, July 17-23, 2016, Proceedings, Part I, volume 9779 of Lecture Notes in Computer Science, pp. 23-42. Springer, 2016. doi: 10.1007/978-3-319-41528-4_2. URL https://doi.org/10.1007/978-3-319-41528-4_2. + +Hugo Bazille, Blaise Genest, Cyrille Jégourel, and Jun Sun. Global PAC bounds for learning discrete time markov chains. In Shuvendu K. Lahiri and Chao Wang (eds.), Computer Aided Verification - 32nd International Conference, CAV 2020, Los Angeles, CA, USA, July 21-24, 2020, Proceedings, Part II, volume 12225 of Lecture Notes in Computer Science, pp. 304-326. Springer, 2020. doi: 10.1007/978-3-030-53291-8\_17. URL https://doi.org/10.1007/978-3-030-53291-8_17. +O. Bousquet, S. Gelly, I. Tolstikhin, Carl-Johann Simon-Gabriel, and B. Schölkopf. From optimal transport to generative modeling: the vegan cookbook. arXiv: Machine Learning, 2017. +Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym. CoRR, abs/1606.01540, 2016. URL http://arxiv.org/abs/1606.01540. +Steven Carr, Nils Jansen, and Ufuk Topcu. Verifiable rnn-based policies for pomdps under temporal logic constraints. In Christian Bessiere (ed.), Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI 2020, pp. 4121-4127. ijcai.org, 2020. doi: 10.24963/ijcai.2020/570. URL https://doi.org/10.24963/ijcai.2020/570. +Pablo Samuel Castro, Tyler Kastner, Prakash Panangaden, and Mark Rowland. Mico: Improved representations via sampling-based state similarity for markov decision processes. In Marc'Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan (eds.), Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pp. 30113-30126, 2021. URL https://proceedings.neurips.cc/paper/2021/hash/ fd06b8ea02fe5b1c2496fe1700e9d16c-AAbstract.html. +Glenn Ceusters, Roman Cantú Rodríguez, Alberte Bouso García, Rüdiger Franke, Geert Deconinck, Lieve Helsen, Ann Nowé, Maarten Messagie, and Luis Ramirez Camargo. Model-predictive control and reinforcement learning in multi-energy system case studies. Applied Energy, 303:117634, 2021. ISSN 0306-2619. doi: https://doi.org/10.1016/j.apenergy.2021.117634. URL https://www.sciencedirect.com/science/article/pii/S0306261921010011. +Krishnendu Chatterjee, Luca de Alfaro, Rupak Majumdar, and Vishwanath Raman. Algorithms for game metrics (full version). Log. Methods Comput. Sci., 6(3), 2010. URL http://arxiv.org/abs/0809.4326. +Dane S. Corneil, Wulfram Gerstner, and Johanni Brea. Efficient modelbased deep reinforcement learning with variational state tabulation. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 1057-1066. PMLR, 2018. URL http://proceedings.mlr.press/v80/corneil18a.html. +Florent Delgrange, Ann Nowé, and Guillermo A. Pérez. Distillation of rl policies with formal guarantees via variational abstraction of markov decision processes. Proceedings of the AAAI Conference on Artificial Intelligence, 36(6):6497-6505, Jun. 2022. doi: 10.1609/aaai.v36i6.20602. URL https://ojs.aaaai.org/index.php/AAAI/article/view/20602. +Josée Desharnais, Vineet Gupta, Radha Jagadeesan, and Prakash Panangaden. Metrics for labelled markov processes. Theor. Comput. Sci., 318(3):323-354, 2004. doi: 10.1016/j.tcs.2003.09.013. URL https://doi.org/10.1016/j.tcs.2003.09.013. +Jiri Fajtl, Vasileios Argyriou, Dorothy Monekosso, and Paolo Remagnino. Latent bernoulli autoencoder. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 2964-2974. PMLR, 2020. URL http://proceedings.mlr.press/v119/fajtl20a.html. +Norm Ferns, Doina Precup, and Sophia Knight. Bisimulation for markov decision processes through families of functional expressions. In Franck van Breugel, Elham Kashefi, Catuscia Palamidessi, and Jan Rutten (eds.), Horizons of the Mind. A Tribute to Prakash Panangaden - Essays Dedicated to Prakash Panangaden on the Occasion of His 60th Birthday, volume 8464 of LNCS, pp. 319-342. + +Springer, 2014. doi: 10.1007/978-3-319-06880-0_17. URL https://doi.org/10.1007/978-3-319-06880-0_17. +Carles Gelada, Saurabh Kumar, Jacob Buckman, Ofir Nachum, and Marc G. Bellemare. Deepmdp: Learning continuous latent space models for representation learning. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 2170-2179. PMLR, 2019. URL http://proceedings.mlr.press/v97/gelada19a.html. +Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: masked autoencoder for distribution estimation. In Francis R. Bach and David M. Blei (eds.), Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pp. 881-889. JMLR.org, 2015. URL http://proceedings.mlr.press/v37/germain15.html. +Robert Givan, Thomas L. Dean, and Matthew Greig. Equivalence notions and model minimization in markov decision processes. Artif. Intell., 147(1-2):163-223, 2003. doi: 10.1016/S0004-3702(02) 00376-4. URL https://doi.org/10.1016/S0004-3702(02)00376-4. +Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C. Courville. Improved training of wasserstein gans. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 5767-5777, 2017. URL https://proceedings.neurips.cc/paper/2017/bit/892c3b1c6dcbd52936e27cbd0ff683d6-Abstract.html. +Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 1856-1865. PMLR, 2018. URL http://proceedings.mlr.press/v80/haarnoja18b.html. +Mohammadhosein Hasanbeig, Natasha Yogananda Jeppu, Alessandro Abate, Tom Melham, and Daniel Kroening. Deepsynth: Automata synthesis for automatic task segmentation in deep reinforcement learning. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI 2021, Thirty-Third Conference on Innovative Applications of Artificial Intelligence, IAAI 2021, The Eleventh Symposium on Educational Advances in Artificial Intelligence, EAAI 2021, Virtual Event, February 2-9, 2021, pp. 7647-7656. AAAI Press, 2021. URL https://ojs.aaai.org/index.php/AAAI/article/view/16935. +Christian Hensel, Sebastian Junges, Joost-Pieter Katoen, Tim Quatmann, and Matthias Volk. The probabilistic model checker storm. International Journal on Software Tools for Technology Transfer, 2021. ISSN 1433-2787. doi: 10.1007/s10009-021-00633-z. URL https://doi.org/10.1007/s10009-021-00633-z. +Matthew D. Hoffman, David M. Blei, Chong Wang, and John W. Paisley. Stochastic variational inference. J. Mach. Learn. Res., 14(1):1303-1347, 2013. URL http://dl.acm.org/citation.cfm?id=2502622. +Bojun Huang. Steady state analysis of episodic reinforcement learning. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/69bfa2aa2b7b139ff581a806abf0a886-Abstract.html. +Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. URL https://openreview.net/forum?id=rkE3y85ee. + +Nils Jansen, Bettina Konighofer, Sebastian Junges, Alex Serban, and Roderick Bloem. Safe Reinforcement Learning Using Probabilistic Shields (Invited Paper). In Igor Konnov and Laura Kovács (eds.), 31st International Conference on Concurrency Theory (CONCUR 2020), volume 171 of Leibniz International Proceedings in Informatics (LIPics), pp. 3:1-3:16, Dagstuhl, Germany, 2020. Schloss Dagstuhl-Leibniz-Zentrum für Informatik. ISBN 978-3-95977-160-3. doi: 10.4230/LIPics.CONCUR.2020.3. URL https://drops.dagstuhl.de/opus/volltexte/2020/12815. +Sebastian Junges, Nils Jansen, Christian Dehnert, Ufuk Topcu, and Joost-Pieter Katoen. Safety-constrained reinforcement learning for mdps. In Marsha Chechik and Jean-François Raskin (eds.), Tools and Algorithms for the Construction and Analysis of Systems - 22nd International Conference, TACAS 2016, Eindhoven, The Netherlands, April 2-8, 2016, Proceedings, volume 9636 of LNCS, pp. 130-146. Springer, 2016. doi: 10.1007/978-3-662-49674-9_8. URL https://doi.org/10.1007/978-3-662-49674-9_8. +Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. In Yoshua Bengio and Yann LeCun (eds.), 2nd International Conference on Learning Representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference Track Proceedings, 2014. URL http://arxiv.org/abs/1312.6114. +Marta Kwiatkowska, Gethin Norman, and David Parker. Probabilistic model checking and autonomy. Annual Review of Control, Robotics, and Autonomous Systems, 5(1):385-410, 2022. doi: 10.1146/annurev-control-042820-010947. URL https://doi.org/10.1146/annurev-control-042820-010947. +Kim Guldstrand Larsen and Arne Skou. Bisimulation through probabilistic testing. In Conference Record of the Sixteenth Annual ACM Symposium on Principles of Programming Languages, Austin, Texas, USA, January 11-13, 1989, pp. 344-352. ACM Press, 1989. doi: 10.1145/75277.75307. URL https://doi.org/10.1145/75277.75307. +Pieter J. K. Libin, Arno Moonens, Timothy Verstraeten, Fabian Perez-Sanjines, Niel Hens, Philippe Lemey, and Ann Nowé. Deep reinforcement learning for large-scale epidemic control. In Yuxiao Dong, Georgiana Ifrim, Dunja Mladenic, Craig Saunders, and Sofie Van Hoecke (eds.), Machine Learning and Knowledge Discovery in Databases. Applied Data Science and Demo Track - European Conference, ECML PKDD 2020, Ghent, Belgium, September 14-18, 2020, Proceedings, Part V, volume 12461 of Lecture Notes in Computer Science, pp. 155-170. Springer, 2020. doi: 10.1007/978-3-030-67670-4_10. URL https://doi.org/10.1007/978-3-030-67670-4_10. +Michael L. Littman, Ufuk Topcu, Jie Fu, Charles Lee Isbell Jr., Min Wen, and James MacGlashan. Environment-independent task specifications via GLTL. CoRR, abs/1704.04341, 2017. URL http://arxiv.org/abs/1704.04341. +Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The concrete distribution: A continuous relaxation of discrete random variables. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. URL https://openreview.net/forum?id=S1jE5L5gl. +Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A. Rusu, Joel Veness, Marc G. Bellemare, Alex Graves, Martin A. Riedmiller, Andreas Fidjeland, Georg Ostrovski, Stig Petersen, Charles Beattie, Amir Sadik, Ioannis Antonoglou, Helen King, Dharshan Kumaran, Daan Wierstra, Shane Legg, and Demis Hassabis. Human-level control through deep reinforcement learning. Nat., 518(7540):529-533, 2015. doi: 10.1038/nature14236. URL https://doi.org/10.1038/nature14236. +Ann Nowe. Synthesis of "safe" fuzzy controllers based on reinforcement learning. PhD thesis, Vrije Universiteit Brussel, 1994. +George Papamakarios, Iain Murray, and Theo Pavlakou. Masked autoregressive flow for density estimation. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, + +Long Beach, CA, USA, pp. 2338-2347, 2017. URL https://proceedings.neurips.cc/paper/2017/hash/6c1da886822c67822bcf3679d04369fa-Abstract.html. +Amir Pnueli. The temporal logic of programs. In 18th Annual Symposium on Foundations of Computer Science, Providence, Rhode Island, USA, 31 October - 1 November 1977, pp. 46-57. IEEE Computer Society, 1977. doi: 10.1109/SFCS.1977.32. URL https://doi.org/10.1109/SFCS.1977.32. +Martin L. Puterman. Markov Decision Processes: Discrete Stochastic Dynamic Programming. Wiley Series in Probability and Statistics. Wiley, 1994. ISBN 978-0-47161977-2. doi: 10.1002/9780470316887. URL https://doi.org/10.1002/9780470316887. +Tao Ren, Jianwei Niu, Jiahe Cui, Zhenchao Ouyang, and Xuefeng Liu. An application of multi-objective reinforcement learning for efficient model-free control of canals deployed with iot networks. Journal of Network and Computer Applications, 182:103049, 2021. ISSN 1084-8045. doi: https://doi.org/10.1016/j.jnca.2021.103049. URL https://www.sciencedirect.com/science/article/pii/S1084804521000734. +Salomon Sickert, Javier Esparza, Stefan Jaax, and Jan Kretínský. Limit-deterministic büchi automata for linear temporal logic. In Swarat Chaudhuri and Azadeh Farzan (eds.), Computer Aided Verification - 28th International Conference, CAV 2016, Toronto, ON, Canada, July 17-23, 2016, Proceedings, Part II, volume 9780 of Lecture Notes in Computer Science, pp. 312-332. Springer, 2016. doi: 10.1007/978-3-319-41540-6\_17. URL https://doi.org/10.1007/978-3-319-41540-6_17. +Thiago D. Simão, Nils Jansen, and Matthijs T. J. Span. Always safe: Reinforcement learning without safety constraint violations during training. In Frank Dignum, Alessio Lomuscio, Ulle Endriss, and Ann Nowé (eds.), AAMAS '21: 20th International Conference on Autonomous Agents and Multiagent Systems, Virtual Event, United Kingdom, May 3-7, 2021, pp. 1226-1235. ACM, 2021. URL https://dl.acm.org/doi/10.5555/3463952.3464094. +Emanuel Todorov, Tom Erez, and Yuval Tassa. Mujoco: A physics engine for model-based control. In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 5026-5033. IEEE, 2012. +Ilya O. Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Scholkopf. Wasserstein autoencoders. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=HkL7n1-0b. +John N. Tsitsiklis. Asynchronous stochastic approximation and q-learning. Mach. Learn., 16(3):185-202, 1994. doi: 10.1007/BF00993306. URL https://doi.org/10.1007/BF00993306. +Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, 4-9 December 2017, Long Beach, CA, USA, pp. 6306-6315, 2017. URL http://papers.nips.cc/paper/7210-neural-discrete-representation-learning. +Cédric Villani. Optimal Transport: Old and New. Springer Berlin Heidelberg, Berlin, Heidelberg, 2009. ISBN 978-3-540-71050-9. doi: 10.1007/978-3-540-71050-9_6. URL https://doi.org/10.1007/978-3-540-71050-9_6. +Andrew M. Wells, Morteza Lahijanian, Lydia E. Kavraki, and Moshe Y. Vardi. Ltlf synthesis on probabilistic systems. In Jean-François Raskin and Davide Bresolin (eds.), Proceedings 11th International Symposium on Games, Automata, Logics, and Formal Verification, GandALF 2020, Brussels, Belgium, September 21-22, 2020, volume 326 of EPTCS, pp. 166-181, 2020. doi: 10.4204/EPTCS.326.11. URL https://doi.org/10.4204/EPTCS.326.11. +Hongyu Zang, Xin Li, and Mingzhong Wang. Simsr: Simple distance-based state representations for deep reinforcement learning. Proceedings of the AAAI Conference on Artificial Intelligence, 36 (8):8997-9005, Jun. 2022. doi: 10.1609/aaai.v36i8.20883. URL https://ojs.aaaai.org/index.php/AAAI/article/view/20883. + +Amy Zhang, Rowan Thomas McAllister, Roberto Calandra, Yarin Gal, and Sergey Levine. Learning invariant representations for reinforcement learning without reconstruction. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=-2FCwDKRREu. +Shunkang Zhang, Yuan Gao, Yuling Jiao, Jin Liu, Yang Wang, and Can Yang. Wasserstein-wasserstein auto-encoders. CoRR, abs/1902.09323, 2019. URL http://arxiv.org/abs/1902.09323. + +# APPENDIX + +# A THEORETICAL DETAILS ON WAE-MDPS + +# A.1 THE DISCREPANCY MEASURE + +We show that reasoning about discrepancy measures between stationary distributions is sound in the context of infinite interaction and episodic RL processes. Let $P_{\theta}$ be a parameterized behavioral model that generate finite traces from the original environment (i.e., finite sequences of state, actions, and rewards of the form $\langle s_{0:T},a_{0:T - 1},r_{0:T - 1}\rangle$ ), our goal is to find the best parameter $\theta$ which offers the most accurate reconstruction of the original traces issued from the original model $\mathcal{M}$ operating under $\pi$ . We demonstrate that, in the limit, considering the OT between trace-based distributions is equivalent to considering the OT between the stationary distribution of $\mathcal{M}_{\pi}$ and the one of the behavioral model. + +Let us first formally recall the definition of the metric on the transitions of the MDP. + +Raw transition distance. Assume that $S$ , $\mathcal{A}$ , and $\operatorname{Im}(\mathcal{R})$ are respectively equipped with metric $d_S$ , $d_{\mathcal{A}}$ , and $d_{\mathcal{R}}$ , let us define the raw transition distance metric over transitions of $\mathcal{M}$ , i.e., tuples of the form $\langle s, a, r, s' \rangle$ , as $\vec{d} \colon S \times \mathcal{A} \times \operatorname{Im}(\mathcal{R}) \times S$ , + +$$ +\vec {d} \big (\left\langle s _ {1}, a _ {1}, r _ {1}, s _ {1} ^ {\prime} \right\rangle , \left\langle s _ {2}, a _ {2}, r _ {2}, s _ {2} ^ {\prime} \right\rangle \big) = d _ {\mathcal {S}} (s _ {1}, s _ {2}) + d _ {\mathcal {A}} (a _ {1}, a _ {2}) + d _ {\mathcal {R}} (r _ {1}, r _ {2}) + d _ {\mathcal {S}} \big (s _ {1} ^ {\prime}, s _ {2} ^ {\prime} \big). +$$ + +In a nutshell, $\vec{d}$ consists of the sum of the distance of all the transition components. Note that it is a well defined distance metric since the sum of distances preserves the identity of indiscernible, symmetry, and triangle inequality. + +Trace-based distributions. The raw distance $\vec{d}$ allows to reason about transitions, we thus consider the distribution over transitions which occur along traces of length $T$ to compare the dynamics of the original and behavioral models: + +$$ +\mathcal {D} _ {\pi} [ T ] (s, a, r, s ^ {\prime}) = \frac {1}{T} \sum_ {t = 1} ^ {T} \xi_ {\pi} ^ {t} (s | s _ {I}) \cdot \pi (a | s) \cdot \mathbf {P} (s ^ {\prime} | s, a) \cdot \mathbf {1} _ {r = \mathcal {R} (s, a)}, \text {a n d} +$$ + +$$ +\mathcal {P} _ {\theta} [ T ] \big (s, a, r, s ^ {\prime} \big) = \frac {1}{T} \sum_ {t = 1} ^ {T} \underset {s _ {0: t}, a _ {0: t - 1}, r _ {0: t - 1} \sim P _ {\theta} [ t ]} {\mathbb {E}} \mathbf {1} _ {\langle s _ {t - 1}, a _ {t - 1} r _ {t - 1}, s _ {t} \rangle = \langle s, a, r, s ^ {\prime} \rangle}, +$$ + +where $P_{\theta}[T]$ denotes the distribution over traces of length $T$ , generated from $P_{\theta}$ . Intuitively, $\frac{1}{T} \cdot \sum_{t=1}^{T} \xi_{\pi}^{t}(s \mid s_{I})$ can be seen as the fraction of the time spent in $s$ along traces of length $T$ , starting from the initial state Kulkarni (1995). Therefore, drawing $\langle s, a, r, s' \rangle \sim \mathcal{D}_{\pi}[T]$ trivially follows: it is equivalent to drawing $s$ from $\frac{1}{T} \cdot \sum_{t=1}^{T} \xi_{\pi}^{t}(\cdot \mid s_{I})$ , then respectively $a$ and $s'$ from $\pi(\cdot \mid s)$ and $\mathbf{P}(\cdot \mid s, a)$ , to finally obtain $r = \mathcal{R}(s, a)$ . Given $T \in \mathbb{N}$ , our objective is to minimize the Wasserstein distance between those distributions: $W_{\vec{d}}(\mathcal{D}_{\pi}[T], \mathcal{P}_{\theta}[T])$ . The following Lemma enables optimizing the Wasserstein distance between the original MDP and the behavioral model when traces are drawn from episodic RL processes or infinite interactions (Huang, 2020). + +Lemma A.1. Assume the existence of a stationary behavioral model $\xi_{\theta} = \lim_{T\to \infty}\mathcal{P}_{\theta}[T]$ , then + +$$ +\lim _ {T \to \infty} W _ {\vec {d}} \left(\mathcal {D} _ {\pi} [ T ], \mathcal {P} _ {\theta} [ T ]\right) = W _ {\vec {d}} \left(\xi_ {\pi}, \xi_ {\theta}\right). +$$ + +Proof. First, note that $\frac{1}{T} \cdot \sum_{t=1}^{T} \xi_{\pi}^{t}(\cdot \mid s_{I})$ weakly converges to $\xi_{\pi}$ as $T$ goes to $\infty$ Kulkarni (1995). The result follows then from (Villani, 2009, Corollary 6.9). + +# A.2 DEALING WITH DISCRETE ACTIONS + +When the policy $\pi$ executed in $\mathcal{M}$ already produces discrete actions, learning a latent action space is, in many cases, not necessary. We thus make the following assumptions: + +Assumption A.2. Let $\pi \colon S \to \Delta(\mathcal{A}^{\star})$ be the policy executed in $\mathcal{M}$ and assume that $\mathcal{A}^{\star}$ is a (tractable) finite set. Then, we take $\overline{\mathcal{A}} = \mathcal{A}^{\star}$ and $\phi_{\iota}^{\mathcal{A}}$ as the identity function, i.e., $\phi_{\iota}^{\mathcal{A}}: \overline{S} \times \mathcal{A}^{\star} \to \mathcal{A}^{\star}, \langle \overline{s}, a^{\star} \rangle \mapsto a^{\star}$ . + +Assumption A.3. Assume that the action space of the original environment $\mathcal{M}$ is a (tractable) finite set. Then, we take $\psi_{\theta}$ as the identity function, i.e., $\psi_{\theta} = \phi_{\iota}^{A}$ . + +Concretely, the premise of Assumption A.2 typically occurs when $\pi$ is a latent policy (see Rem. 1) or when $\mathcal{M}$ has already a discrete action space. In the latter case, Assumption A.2 and A.3 amount to setting $\bar{\mathcal{A}} = \mathcal{A}$ and ignoring the action encoder and embedding function. Note that if a discrete action space is too large, or if the user explicitly aims for a coarser space, then the former is not considered as tractable, these assumptions do not hold, and the action space is abstracted to a smaller set of discrete actions. + +# A.3 PROOF OF LEMMA 3.2 + +Notation. From now on, we write $\phi_{\iota}(\bar{s},\bar{a}\mid s,a) = \mathbf{1}_{\phi_{\iota}(s) = \bar{s}}\cdot \phi_{\iota}^{A}(\bar{a}\mid \bar{s},a)$ . + +Lemma 3.2. Define $\mathcal{T}(\bar{s},\bar{a},\bar{s}^{\prime}) = \mathbb{E}_{s,a\sim \xi_{\pi}}[\mathbf{1}_{\phi_{\iota}(s) = \bar{s}}\cdot \phi_{\iota}^{A}(\bar{a}\mid \bar{s},a)\cdot \overline{\mathbf{P}}_{\theta}(\bar{s}^{\prime}\mid \bar{s},\bar{a})]$ as the distribution of drawing state-action pairs from interacting with $\mathcal{M}$ , embedding them to the latent spaces, and finally letting them transition to their successor state in $\overline{\mathcal{M}}_{\theta}$ . Then, $W_{\vec{d}}(Q_{\iota},\bar{\xi}_{\overline{\pi}_{\theta}})\leqslant W_{\vec{d}}(\bar{\xi}_{\overline{\pi}_{\theta}},\mathcal{T}) + L_{\mathbf{P}}^{\xi_{\pi}}$ + +Proof. Wasserstein is compliant with the triangular inequality (Villani, 2009), which gives us: + +$$ +W _ {\vec {d}} \left(Q _ {\iota}, \bar {\xi} _ {\bar {\pi} _ {\theta}}\right) \leqslant W _ {\vec {d}} \left(Q _ {\iota}, \mathcal {T}\right) + W _ {d _ {\bar {\mathfrak {S}}}} \left(\mathcal {T}, \bar {\xi} _ {\bar {\pi} _ {\theta}}\right), +$$ + +where + +$$ +\begin{array}{l} W _ {\tilde {d}} \left(\mathcal {T}, \bar {\xi} _ {\bar {\pi} _ {\theta}}\right) \quad (\text {n o t e t h a t} W _ {\tilde {d}} \text {i s r e f l e x i v e (V i l l a n i , 2 0 0 9)} \\ = \sup _ {f \in \mathcal {F} _ {\bar {d}}} \underset {s, a \sim \xi_ {\pi}} {\mathbb {E}} \underset {\bar {s}, \bar {a} \sim \phi_ {\iota} (\cdot | s, a)} {\mathbb {E}} \underset {\bar {s} ^ {\prime} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \bar {s}, \bar {a})} {\mathbb {E}} f (\bar {s}, \bar {a}, \bar {s} ^ {\prime}) - \underset {\bar {s} \sim \bar {\xi} _ {\pi_ {\theta}}} {\mathbb {E}} \underset {\bar {a} \sim \bar {\pi} _ {\theta} (\cdot | \bar {s})} {\mathbb {E}} \underset {\bar {s} ^ {\prime} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \bar {s}, \bar {a})} {\mathbb {E}} f (\bar {s}, \bar {a}, \bar {s} ^ {\prime}), \text {a n d} \\ W _ {\vec {d}} (Q _ {\iota}, \mathcal {T}) \\ = \sup _ {f \in \mathcal {F} _ {\vec {d}} s, a, s ^ {\prime} \sim \xi_ {\pi}} \mathbb {E} _ {\bar {s}, \bar {a}, \bar {s} ^ {\prime} \sim \phi_ {\iota} (\cdot | s, a, s ^ {\prime})} f (\bar {s}, \bar {a}, \bar {s} ^ {\prime}) - \mathbb {E} _ {s, a \sim \xi_ {\pi}} \mathbb {E} _ {\bar {s}, \bar {a} \sim \phi_ {\iota} (\cdot | s, a)} \mathbb {E} _ {\bar {s} ^ {\prime} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \bar {s}, \bar {a})} f (\bar {s}, \bar {a}, \bar {s} ^ {\prime}) (3) \\ \leqslant \underset {s, a \sim \xi_ {\pi}} {\mathbb {E}} \underset {\bar {s}, \bar {a} \sim \phi_ {\iota} (\cdot | s, a)} {\mathbb {E}} \sup _ {f \in \mathcal {F} _ {\bar {d}} ^ {-} s ^ {\prime} \sim \mathbf {P} (\cdot | s, a)} \underset {\sim \mathbf {P} (\cdot | s, a)} {\mathbb {E}} f (\bar {s}, \bar {a}, \phi_ {\iota} (s ^ {\prime})) - \underset {\bar {s} ^ {\prime} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \bar {s}, \bar {a})} {\mathbb {E}} f (\bar {s}, \bar {a}, \bar {s} ^ {\prime}) (4) \\ = \underset {s, a \sim \xi_ {\pi}} {\mathbb {E}} \underset {\bar {a} \sim \phi_ {\iota} ^ {A} (\cdot | \phi_ {\iota} (s), a)} {\mathbb {E}} \sup _ {f \in \mathcal {F} _ {d} _ {\overline {{\mathcal {S}}}}} \underset {\bar {s} ^ {\prime} \sim \phi_ {\iota} \mathbf {P} (\cdot | s, a)} {\mathbb {E}} f (\bar {s} ^ {\prime}) - \underset {\bar {s} ^ {\prime} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \phi_ {\iota} (s), \bar {a})} {\mathbb {E}} f (\bar {s} ^ {\prime}) (5) \\ = \underset {s, a \sim \xi_ {\pi}} {\mathbb {E}} \underset {\bar {a} \sim \phi_ {\ell} ^ {\mathcal {A}} (\cdot | \phi_ {\ell} (s), a)} {\mathbb {E}} W _ {d _ {\bar {\mathfrak {S}}}} \left(\phi_ {\iota} \mathbf {P} (\cdot | s, a), \bar {\mathbf {P}} _ {\theta} (\cdot | \phi_ {\iota} (s), \bar {a})\right). \\ \end{array} +$$ + +We pass from Eq. 3 to Eq. 4 by the Jensen's inequality. To see how we pass from Eq. 4 to Eq. 5, notice that + +$$ +\mathcal {F} _ {\vec {d}} = \left\{f \colon f \left(\bar {s} _ {1}, \bar {a} _ {1}, \bar {s} _ {1} ^ {\prime}\right) - f \left(\bar {s} _ {2}, \bar {a} _ {2}, \bar {s} _ {2} ^ {\prime}\right) \leqslant \vec {d} \left(\left\langle \bar {s} _ {1}, \bar {a} _ {1}, \bar {s} _ {1} ^ {\prime} \right\rangle , \left\langle \bar {s} _ {2}, \bar {a} _ {2}, \bar {s} _ {2} ^ {\prime} \right\rangle\right) \right\} +$$ + +$$ +\mathcal {F} _ {\vec {d}} = \left\{f \colon f \left(\bar {s} _ {1}, \bar {a} _ {1}, \bar {s} _ {1} ^ {\prime}\right) - f \left(\bar {s} _ {2}, \bar {a} _ {2}, \bar {s} _ {2} ^ {\prime}\right) \leqslant d _ {\bar {S}} \left(\bar {s} _ {1}, \bar {s} _ {2}\right) + d _ {\bar {A}} \left(\bar {a} _ {1}, \bar {a} _ {2}\right) + d _ {\bar {S}} \left(\bar {s} _ {1} ^ {\prime}, \bar {s} _ {2} ^ {\prime}\right) \right\} +$$ + +Observe now that $\bar{s}$ and $\bar{a}$ are fixed in the supremum computation of Eq. 4: all functions $f$ considered and taken from $\mathcal{F}_{\bar{d}}$ are of the form $f(\bar{s},\bar{a},\cdot)$ . It is thus sufficient to consider the supremum over functions from the following subset of $\mathcal{F}_{\bar{d}}$ : + +$$ +\{f \colon f (\bar {s}, \bar {a}, \bar {s} _ {1} ^ {\prime}) - f (\bar {s}, \bar {a}, \bar {s} _ {2} ^ {\prime}) \leqslant d _ {\bar {\mathcal {S}}} (\bar {s}, \bar {s}) + d _ {\bar {\mathcal {A}}} (\bar {a}, \bar {a}) + d _ {\bar {\mathcal {S}}} \left(\bar {s} _ {1} ^ {\prime}, \bar {s} _ {2} ^ {\prime}\right) \} +$$ + +(for $\bar{s},\bar{a}$ drawn from $\phi_{\iota}$ ) + +$$ +\begin{array}{l} = \left\{f: f \left(\bar {s}, \bar {a}, \bar {s} _ {1} ^ {\prime}\right) - f \left(\bar {s}, \bar {a}, \bar {s} _ {2} ^ {\prime}\right) \leqslant d _ {\bar {S}} \left(\bar {s} _ {1} ^ {\prime}, \bar {s} _ {2} ^ {\prime}\right) \right\} \\ = \left\{f: f \left(\bar {s} _ {1} ^ {\prime}\right) - f \left(\bar {s} _ {2} ^ {\prime}\right) \leqslant d _ {\bar {S}} \left(\bar {s} _ {1} ^ {\prime}, \bar {s} _ {2} ^ {\prime}\right) \right\} \\ = \mathcal {F} _ {d _ {\bar {\mathcal {S}}}}. \\ \end{array} +$$ + +Given a state $s \in S$ in the original model, the (parallel) execution of $\pi$ in $\overline{\mathcal{M}}_{\theta}$ is enabled through $\pi(a, \bar{a} | s) = \pi(a | s) \cdot \phi_{\ell}^{\mathcal{A}}(\bar{a} | \phi_{\ell}(s), a)$ (cf. Fig. 1b). The local transition loss resulting from this interaction is: + +$$ +\begin{array}{l} L _ {\mathbf {P}} ^ {\xi_ {\pi}} = \underset {s, \langle a, \bar {a} \rangle \sim \xi_ {\pi}} {\mathbb {E}} W _ {d _ {\bar {S}}} \left(\phi_ {\iota} \mathbf {P} (\cdot | s, a), \bar {\mathbf {P}} (\cdot | \phi_ {\iota} (s), \bar {a})\right) \\ = \underset {s, a \sim \xi_ {\pi}} {\mathbb {E}} \underset {\bar {a} \sim \phi_ {\iota} ^ {A} (\cdot | \phi_ {\iota} (s), a)} {\mathbb {E}} W _ {d _ {\bar {\mathfrak {S}}}} \left(\phi_ {\iota} \mathbf {P} (\cdot | s, a), \overline {{\mathbf {P}}} _ {\theta} (\cdot | \phi_ {\iota} (s), \bar {a})\right), \\ \end{array} +$$ + +which finally yields the result. + +![](images/852e86e2e3bda2a9e8c7ef0d0e216a8b0fe5f8b4b9662c714d1c462f9b283e6f.jpg) + +# A.4 PROOF OF THEOREM 3.3 + +Before proving Theorem 3.3, let us introduce the following Lemma, that explicitly demonstrates the link between the transition regularizer of the $\mathrm{W}^2\mathrm{AE}$ -MDP objective and the local transition loss required to obtain the guarantees related to the bisimulation bounds of Eq. 1. + +Lemma A.4. Assume that traces are generated by running $\bar{\pi} \in \overline{\Pi}$ in the original environment, then + +$$ +\underset {s, a ^ {\star} \sim \xi_ {\pi} \bar {a} \sim \phi_ {\iota} ^ {\mathcal {A}} (\cdot | \phi_ {\iota} (s), a ^ {\star})} {\mathbb {E}} W _ {d _ {\overline {{S}}}} \left(\phi_ {\iota} \mathbf {P} (\cdot | s, a ^ {\star}), \overline {{\mathbf {P}}} _ {\theta} (\cdot | \phi_ {\iota} (s), \bar {a})\right) = L _ {\mathbf {P}} ^ {\xi_ {\overline {{\pi}}}}. +$$ + +Proof. Since the latent policy $\bar{\pi}$ generates latent actions, Assumption A.2 holds, which means: + +$$ +\begin{array}{l} \mathop{\mathbb{E}}_{s,a^{\star}\sim \xi_{\overline{\pi}}}\mathop{\mathbb{E}}_{\bar{a}\sim \phi_{\iota}^{A}(\cdot |\phi_{\iota}(s),a^{\star})}W_{d_{\overline{\mathfrak{S}}}}\left(\phi_{\iota}\mathbf{P}(\cdot |s,a^{\star}),\overline{\mathbf{P}}_{\theta}(\cdot |\phi_{\iota}(s),\bar{a})\right) \\ = \underset {s, \bar {a} \sim \xi_ {\bar {\pi}}} {\mathbb {E}} W _ {d _ {\bar {S}}} \left(\phi_ {\iota} \mathbf {P} (\cdot | s, \bar {a}), \overline {{\mathbf {P}}} _ {\theta} (\cdot | \phi_ {\iota} (s), \bar {a})\right) \\ = L _ {\mathbf {P}} ^ {\xi_ {\overline {{\pi}}}}. \\ \end{array} +$$ + +![](images/8baf2a20bda1cb7ecb0faf699ee64776daaa409b45835778693cc866c2d3e653.jpg) + +Theorem 3.3. Assume that traces are generated by running a latent policy $\bar{\pi} \in \overline{\Pi}$ in the original environment and let $d_{\mathcal{R}}$ be the usual Euclidean distance, then the $W^{2}$ AE-MDP objective is + +$$ +\min_{\iota ,\theta}\underset {s,s^{\prime}\sim \xi_{\pi}}{\mathbb{E}}\left[d_{\mathcal{S}}(s,\mathcal{G}_{\theta}(\phi_{\iota}(s))) + d_{\mathcal{S}}\big(s^{\prime},\mathcal{G}_{\theta}\big(\phi_{\iota}\big(s^{\prime}\big)\big)\big)\right] + L_{\mathcal{R}}^{\xi_{\overline{\pi}}} + \beta \cdot (\mathcal{W}_{\xi_{\overline{\pi}}} + L_{\mathbf{P}}^{\xi_{\overline{\pi}}}). +$$ + +Proof. We distinguish two cases: (i) the case where the original and latent models share the same discrete action space, i.e., $\mathcal{A} = \overline{\mathcal{A}}$ , and (ii) the case where the two have a different action space (e.g., when the original action space is continuous), i.e., $\mathcal{A} \neq \overline{\mathcal{A}}$ . In both cases, the local losses term follows by definition of $L_{\mathcal{R}}^{\xi_{\overline{\pi}}}$ and Lemma A.4. When $d_{\mathcal{R}}$ is the Euclidean distance (or even the $L_{1}$ distance since rewards are scalar values), the expected reward distance occurring in the expected trace-distance term $\vec{d}$ in the $\mathrm{W}^2\mathrm{AE}$ -MDP objective directly translates to the local loss $L_{\mathcal{R}}^{\xi_{\overline{\pi}}}$ . Concerning the local transition loss, in case (i), the result naturally follows from Assumption A.2 and A.3. In case (ii), only Assumption A.2 holds, meaning the action encoder term of the $\mathrm{W}^2\mathrm{AE}$ -MDP objective is ignored, but not the action embedding term appearing in $G_{\theta}$ . Given $s \sim \xi_{\overline{\pi}}$ , recall that executing $\overline{\pi}$ in $\mathcal{M}$ amounts to embedding the produced latent actions $\bar{a} \sim \overline{\pi}(\cdot \mid \phi_{\iota}(s))$ back to the original environment via $a = \psi_{\theta}(\phi_{\iota}(s), \bar{a})$ (cf. Rem. 1 and Fig. 1a). Therefore, the projection of $\vec{d}(\langle s, a, r, s' \rangle, G_{\theta}(\phi_{\iota}(s), \bar{a}, \phi_{\iota}(s')))$ on the action space $\mathcal{A}$ is $d_{\mathcal{A}}(\psi_{\theta}(\phi_{\iota}(s), \bar{a}), \psi_{\theta}(\phi_{\iota}(s), \bar{a})) = 0$ for $r = \mathcal{R}(s, a)$ and $s' \sim \mathbf{P}(\cdot \mid s, a)$ . + +# A.5 OPTIMIZING THE TRANSITION REGULARIZER + +In the following, we detail how we derive a tractable form of our transition regularizer $L_{\mathbf{P}}^{\xi_{\pi}}(\omega)$ . Optimizing the ground Kantorovich-Rubinstein duality is enabled via the introduction of a parameterized, 1-Lipschitz network $\varphi_{\omega}^{\mathbf{P}}$ , that need to be trained to attain the supremum of the dual: + +$$ +L _ {\mathbf {P}} ^ {\xi_ {\pi}} (\omega) = \underset {s, a \sim \xi_ {\pi}} {\mathbb {E}} \underset {\bar {s}, \bar {a} \sim \phi_ {\iota} (\cdot | s, a)} {\mathbb {E}} \max _ {\omega : \varphi_ {\omega} ^ {\mathbf {P}} \in \mathcal {F} _ {d}} \underset {\bar {s} ^ {\prime} \sim \phi_ {\iota} \mathbf {P} (\cdot | s, a)} {\max _ {\bar {s} ^ {\prime} \sim \varphi_ {\iota} (\cdot | s, a)}} \varphi_ {\omega} ^ {\mathbf {P}} (\bar {s} ^ {\prime}) - \underset {\bar {s} ^ {\prime} \sim \bar {\mathbf {P}} _ {\theta} (\cdot | \bar {s}, \bar {a})} {\mathbb {E}} \varphi_ {\omega} ^ {\mathbf {P}} (\bar {s} ^ {\prime}). +$$ + +Under this form, optimizing $L_{\mathbf{P}}^{\xi_{\pi}}(\omega)$ is intractable due to the expectation over the maximum. The following Lemma allows us rewriting $L_{\mathbf{P}}^{\xi_{\pi}}$ to make the optimization tractable through Monte Carlo estimation. + +Lemma A.5. Let $\mathcal{X},\mathcal{Y}$ be two measurable sets, $\xi \in \Delta (\mathcal{X})$ $P\colon \mathcal{X}\to \Delta (\mathcal{Y}),Q\colon \mathcal{X}\to \Delta (\mathcal{Y})$ , and $d\colon \mathcal{Y}\times \mathcal{Y}\rightarrow [0, + \infty [$ be a metric on $\mathcal{V}$ . Then, + +$$ +\underset {x \sim \xi} {\mathbb {E}} W _ {d} \left(P (\cdot \mid x), Q (\cdot \mid x)\right) = \sup _ {\varphi \colon \mathcal {X} \to \mathcal {F} _ {d}} \underset {x \sim \xi} {\mathbb {E}} \left[ \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} \varphi (x) (y _ {1}) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} \varphi (x) (y _ {2}) \right] +$$ + +Proof. Our objective is to show that + +$$ +\begin{array}{l} \underset {x \sim \xi} {\mathbb {E}} \left[ \sup _ {f \in \mathcal {F} _ {d}} \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} \varphi (y _ {1}) (x) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} \varphi (y _ {2}) (x) \right] (6) \\ = \sup _ {\varphi : \mathcal {X} \rightarrow \mathcal {F} _ {d}} \mathbb {E} _ {x \sim \xi} \left[ \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} \varphi (x) \left(y _ {1}\right) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} \varphi (x) \left(y _ {2}\right)\right] (7) \\ \end{array} +$$ + +We start with $(6) \leqslant (7)$ . Construct $\varphi^{\star} \colon \mathcal{X} \to \mathcal{F}_d$ by setting for all $x \in \mathcal{X}$ + +$$ +\varphi^{\star}(x) = \arg \sup_{f\in \mathcal{F}_{d}}\underset {y_{1}\sim P(\cdot |x)}{\mathbb{E}}f(y_{1}) - \underset {y_{2}\sim Q(\cdot |x)}{\mathbb{E}}f(y_{2}). +$$ + +This gives us + +$$ +\begin{array}{l} \underset {x \sim \xi} {\mathbb {E}} \left[ \sup _ {f \in \mathcal {F} _ {d}} \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} f (y _ {1}) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} f (y _ {2}) \right] \\ = \underset {x \sim \xi} {\mathbb {E}} \left[ \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} \varphi^ {\star} (x) (y _ {1}) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} \varphi^ {\star} (x) (y _ {2}) \right] \\ \leqslant \sup _ {\varphi \colon \mathcal {X} \to \mathcal {F} _ {d}} \mathbb {E} _ {x \sim \xi} \left[ \mathbb {E} _ {y _ {1} \sim P (\cdot | x)} \varphi (x) (y _ {1}) - \mathbb {E} _ {y _ {2} \sim Q (\cdot | x)} \varphi (x) (y _ {2}) \right]. \\ \end{array} +$$ + +It remains to show that $(6) \geqslant (7)$ . Take + +$$ +\varphi^ {\star} = \arg \operatorname * {s u p} _ {\varphi \colon \mathcal {X} \to \mathcal {F} _ {d}} \underset {x \sim \xi} {\mathbb {E}} \left[ \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} \varphi (x) (y _ {1}) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} \varphi (x) (y _ {2}) \right]. +$$ + +Then, for all $x\in \mathcal{X}$ we have $\varphi^{\star}(x)\in \mathcal{F}_d$ which means: + +$$ +\begin{array}{l} \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} \varphi^ {\star} (x) (y _ {1}) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} \varphi^ {\star} (x) (y _ {2}) \\ \leqslant \sup _ {f \in \mathcal {F} _ {d}} \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} f (y _ {1}) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} f (y _ {2}) \\ \end{array} +$$ + +This finally yields + +$$ +\begin{array}{l} \underset {x \sim \xi} {\mathbb {E}} \left[ \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} \varphi^ {\star} (x) (y _ {1}) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} \varphi^ {\star} (x) (y _ {2}) \right] \\ \leqslant \underset {x \sim \xi} {\mathbb {E}} \left[ \sup _ {f \in \mathcal {F} _ {d}} \underset {y _ {1} \sim P (\cdot | x)} {\mathbb {E}} f (y _ {1}) - \underset {y _ {2} \sim Q (\cdot | x)} {\mathbb {E}} f (y _ {2}) \right]. \\ \end{array} +$$ + +![](images/ddd0a968d5f3afaaae13a27c6e3caf51bfd225352e0358b48e17b9095eb06313.jpg) + +Corollary A.5.1. Let $\xi_{\pi}$ be a stationary distribution of $\mathcal{M}_{\pi}$ and $\mathcal{X} = S\times \mathcal{A}\times \overline{S}\times \overline{\mathcal{A}}$ , then + +$$ +L _ {\mathbf {P}} ^ {\xi_ {\pi}} = \sup _ {\varphi \colon \mathcal {X} \to \mathcal {F} _ {d} _ {\overline {{S}}}} \underset {s, a, s ^ {\prime} \sim \xi_ {\pi}} {\mathbb {E}} \underset {\bar {s}, \bar {a} \sim \phi_ {\iota} (\cdot | s, a)} {\mathbb {E}} \left[ \varphi (s, a, \bar {s}, \bar {a}) \big (\phi_ {\iota} (s ^ {\prime}) \big) - \underset {\bar {s} ^ {\prime} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \bar {s}, a)} {\mathbb {E}} \varphi (s, a, \bar {s}, \bar {a}) \big (\bar {s} ^ {\prime} \big) \right] +$$ + +Consequently, we rewrite $L_{\mathbf{P}}^{\xi_{\pi}}(\omega)$ as a tractable maximization: + +$$ +L _ {\mathbf {P}} ^ {\xi_ {\pi}} (\omega) = \max _ {\omega : \varphi_ {\omega} ^ {\mathbf {P}} \in \mathcal {F} _ {d _ {\bar {g}}}} \underset {s, a, s ^ {\prime} \sim \xi_ {\pi} \bar {s}, \bar {a} \sim \phi_ {\iota} (\cdot | s, a)} {\mathbb {E}} \underset {s, a, \bar {a}, \bar {a} \sim \phi_ {\iota} (s ^ {\prime})} {\mathbb {E}} \left[ \varphi_ {\omega} ^ {\mathbf {P}} (s, a, \bar {s}, \bar {a}, \phi_ {\iota} (s ^ {\prime})) - \underset {\bar {s} ^ {\prime} \sim \overline {{\mathbf {P}}} _ {\theta} (\cdot | \bar {s}, \bar {a})} {\mathbb {E}} \varphi_ {\omega} ^ {\mathbf {P}} (s, a, \bar {s}, \bar {a}, \bar {s} ^ {\prime}) \right]. +$$ + +# A.6 THE LATENT METRIC + +In the following, we show that considering the Euclidean distance for $\vec{d}$ and $d_{\overline{\mathcal{S}}}$ in the latent space for optimizing the regularizers $\mathcal{W}_{\xi_{\pi}}$ and $L_{\mathbf{P}}^{\xi_{\pi}}$ is Lipschitz equivalent to considering a continuous $\lambda$ -relaxation of the discrete metric $\mathbf{1}_{\neq}(\pmb{x},\pmb{y}) = \mathbf{1}_{x\neq y}$ . Consequently, this also means it is consistently sufficient to enforce 1-Lipschitzness via the gradient penalty approach of Gulrajani et al. (2017) during training to maintain the guarantees linked to the regularizers in the zero-temperature limit, when the spaces are discrete. + +Lemma A.6. Let $d$ be the usual Euclidean distance and $d_{\lambda} \colon [0,1]^n \times [0,1]^n \to [0,1[$ , $\langle \pmb{x}, \pmb{y} \rangle \mapsto \frac{d(\pmb{x}, \pmb{y})}{\lambda + d(\pmb{x}, \pmb{y})}$ for $\lambda \in ]0,1]$ and $n \in \mathbb{N}$ , then $d_{\lambda}$ is a distance metric. + +Proof. The function $d_{\lambda}$ is a metric iff it satisfies the following axioms: + +1. Identity of indiscernibles: If $\pmb{x} = \pmb{y}$ , then $d_{\lambda}(\pmb{x}, \pmb{y}) = \frac{d(\pmb{x}, \pmb{y})}{\lambda + d(\pmb{x}, \pmb{y})} = \frac{0}{\lambda + 0} = 0$ since $d$ is a distance metric. Assume now that $d_{\lambda}(\pmb{x}, \pmb{y}) = 0$ and take $\alpha = d(\pmb{x}, \pmb{y})$ , for any $\pmb{x}, \pmb{y}$ . Thus, $\alpha \in [0, +\infty[$ and $0 = \frac{\alpha}{\lambda + \alpha}$ is only achieved in $\alpha = 0$ , which only occurs whenever $\pmb{x} = \pmb{y}$ since $d$ is a distance metric. +2. Symmetry: + +$$ +\begin{array}{l} d _ {\lambda} (\boldsymbol {x}, \boldsymbol {y}) = \frac {d (\boldsymbol {x} , \boldsymbol {y})}{\lambda + d (\boldsymbol {x} , \boldsymbol {y})} \\ = \frac {d (\boldsymbol {y} , \boldsymbol {x})}{\lambda + d (\boldsymbol {y} , \boldsymbol {x})} \quad (d \text {i s a d i s t a n c e m e t r i c}) \\ = d _ {\lambda} (\boldsymbol {y}, \boldsymbol {x}) \\ \end{array} +$$ + +3. Triangle inequality: Let $\mathbf{x}, \mathbf{y}, \mathbf{z} \in [0,1]^n$ , the triangle inequality holds iff + +$$ +\begin{array}{l} d _ {\lambda} (\boldsymbol {x}, \boldsymbol {y}) + d _ {\lambda} (\boldsymbol {y}, \boldsymbol {z}) \geqslant d _ {\lambda} (\boldsymbol {x}, \boldsymbol {z}) (8) \\ \equiv \quad \frac {d (\boldsymbol {x} , \boldsymbol {y})}{\lambda + d (\boldsymbol {x} , \boldsymbol {y})} + \frac {d (\boldsymbol {y} , \boldsymbol {z})}{\lambda + d (\boldsymbol {y} , \boldsymbol {z})} \geqslant \frac {d (\boldsymbol {x} , \boldsymbol {z})}{\lambda + d (\boldsymbol {x} , \boldsymbol {z})} \\ \equiv \quad \frac {\lambda d (\boldsymbol {x} , \boldsymbol {y}) + \lambda d (\boldsymbol {y} , \boldsymbol {z}) + 2 d (\boldsymbol {x} , \boldsymbol {y}) d (\boldsymbol {y} , \boldsymbol {z})}{\lambda^ {2} + \lambda d (\boldsymbol {x} , \boldsymbol {y}) + \lambda d (\boldsymbol {y} , \boldsymbol {z}) + d (\boldsymbol {x} , \boldsymbol {y}) d (\boldsymbol {y} , \boldsymbol {z})} \geqslant \frac {d (\boldsymbol {x} , \boldsymbol {z})}{\lambda + d (\boldsymbol {x} , \boldsymbol {z})} \\ \equiv \lambda^ {2} d (\boldsymbol {x}, \boldsymbol {y}) + \lambda^ {2} d (\boldsymbol {y}, \boldsymbol {z}) + 2 \lambda d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {y}, \boldsymbol {z}) + \\ \lambda d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {x}, \boldsymbol {z}) + \lambda d (\boldsymbol {y}, \boldsymbol {z}) d (\boldsymbol {x}, \boldsymbol {z}) + 2 d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {y}, \boldsymbol {z}) d (\boldsymbol {x}, \boldsymbol {z}) \\ \geqslant \lambda^ {2} d (\boldsymbol {x}, \boldsymbol {z}) + \lambda d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {x}, \boldsymbol {z}) + \lambda d (\boldsymbol {y}, \boldsymbol {z}) d (\boldsymbol {x}, \boldsymbol {z}) + d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {y}, \boldsymbol {z}) d (\boldsymbol {x}, \boldsymbol {z}) \\ \left(\text {c r o s s - p r o d u c t , w i t h} \lambda > 0 \text {a n d} \operatorname {I m} (d) \in [ 0, \infty [\right) \\ \equiv \quad \lambda^ {2} d (\boldsymbol {x}, \boldsymbol {y}) + \lambda^ {2} d (\boldsymbol {y}, \boldsymbol {z}) + 2 \lambda d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {y}, \boldsymbol {z}) + d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {y}, \boldsymbol {z}) d (\boldsymbol {x}, \boldsymbol {z}) \geqslant \lambda^ {2} d (\boldsymbol {x}, \boldsymbol {z}) (9) \\ \end{array} +$$ + +Since $d$ is a distance metric, we have + +$$ +\lambda^ {2} d (\boldsymbol {x}, \boldsymbol {y}) + \lambda^ {2} d (\boldsymbol {y}, \boldsymbol {z}) \geqslant \lambda^ {2} d (\boldsymbol {x}, \boldsymbol {z}) \tag {10} +$$ + +and $\operatorname {Im}(d)\in [0,\infty [$ , meaning + +$$ +2 \lambda d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {y}, \boldsymbol {z}) + d (\boldsymbol {x}, \boldsymbol {y}) d (\boldsymbol {y}, \boldsymbol {z}) d (\boldsymbol {x}, \boldsymbol {z}) \geqslant 0 \tag {11} +$$ + +By Eq. 10 and 11, the inequality of Eq. 9 holds. Furthermore, the fact that Eq. 8 and 9 are equivalent yields the result. + +Lemma A.7. Let $d$ , $d_{\lambda}$ as defined above, then $(i)d_{\lambda}\xrightarrow[\lambda\to 0]{\longrightarrow}\mathbf{1}_{\neq}$ and (ii) $d,d_{\lambda}$ are Lipschitz-equivalent. + +Proof. Part (i) is straightforward by definition of $d_{\lambda}$ . Distances $d$ and $d_{\lambda}$ are Lipschitz equivalent if and only if $\exists a, b > 0$ such that $\forall x, y \in [0,1]^n$ , + +$$ +\begin{array}{l} a \cdot d (\boldsymbol {x}, \boldsymbol {y}) \leqslant d _ {\lambda} (\boldsymbol {x}, \boldsymbol {y}) \leqslant b \cdot d (\boldsymbol {x}, \boldsymbol {y}) \\ \equiv a \cdot d (\boldsymbol {x}, \boldsymbol {y}) \leqslant \frac {d (\boldsymbol {x} , \boldsymbol {y})}{\lambda + d (\boldsymbol {x} , \boldsymbol {y})} \leqslant b \cdot d (\boldsymbol {x}, \boldsymbol {y}) \\ \equiv \quad a \leqslant \frac {1}{\lambda + d (\boldsymbol {x} , \boldsymbol {y})} \leqslant b \\ \end{array} +$$ + +Taking $a = \frac{1}{\lambda + \sqrt{n}}$ and $b = \frac{1}{\lambda}$ yields the result. + +Corollary A.7.1. For all $\beta \geqslant 1 / \lambda$ , $s \in S$ , $a \in \mathcal{A}$ , $\bar{s} \in \overline{S}$ , and $\bar{a} \in \overline{\mathcal{A}}$ , we have + +1. $W_{d_{\lambda}}(\mathcal{T},\bar{\xi}_{\bar{\pi}_{\theta}})\leqslant \beta \cdot W_{d}(\mathcal{T},\bar{\xi}_{\bar{\pi}_{\theta}})$ +2. $W_{d_{\lambda}}\left(\phi_{\iota}\mathbf{P}(\cdot \mid s,a),\overline{\mathbf{P}}_{\theta}(\cdot \mid \bar{s},\bar{a})\right)\leqslant \beta \cdot W_{d}\left(\phi_{\iota}\mathbf{P}(\cdot \mid s,a),\overline{\mathbf{P}}_{\theta}(\cdot \mid \bar{s},\bar{a})\right)$ + +Proof. By Lipschitz equivalence, taking $\beta \geqslant 1 / \lambda$ ensures that $\forall n\in \mathbb{N},\forall \pmb {x},\pmb {y}\in [0,1]^n,d_\lambda (\pmb {x},\pmb {y})\leqslant \beta \cdot d(\pmb {x},\pmb {y})$ . Moreover, for any distributions $P,Q,W_{d_{\lambda}}(P,Q)\leqslant \beta \cdot W_{d}(P,Q)$ (cf., e.g., Gelada et al. 2019, Lemma A.4 for details). + +In practice, taking the hyperparameter $\beta \geqslant 1 / \lambda$ in the $\mathrm{W}^2\mathrm{AE}$ -MDP ensures that minimizing the $\beta$ -scaled regularizers w.r.t. $d$ also minimizes the regularizers w.r.t. the $\lambda$ -relaxation $d_{\lambda}$ , being the discrete distribution in the zero-temperature limit. Note that optimizing over two different $\beta_{1}, \beta_{2}$ instead of a unique scale factor $\beta$ is also a good practice to interpolate between the two regularizers. + +# B EXPERIMENT DETAILS + +The code for conducting and replicating our experiments is available at https://github.com/florentdelgrange/wae_mdp. + +# B.1 SETUP + +We used TENSORFLOW 2.7.0 (Abadi et al., 2015) to implement the neural network architecture of our W $^2$ AE-MDP, TENSORFLOW PROBABILITY 0.15.0 (Dillon et al., 2017) to handle the probabilistic components of the latent model (e.g., latent distributions with reparameterization tricks, masked autoregressive flows, etc.), as well as TF-AGENTS 0.11.0 (Guadarrama et al., 2018) to handle the RL parts of the framework. + +Models have been trained on a cluster running under CentOS Linux 7 (Core) composed of a mix of nodes containing Intel processors with the following CPU microarchitectures: (i) 10-core INTEL E5-2680v2, (ii) 14-core INTEL E5-2680v4, and (iii) 20-core INTEL Xeon Gold 6148. We used 8 cores and 32 GB of memory for each run. + +# B.2 STATIONARY DISTRIBUTION + +To sample from the stationary distribution $\xi_{\pi}$ of episodic learning environments operating under $\pi \in \Pi$ , we implemented the recursive $\epsilon$ -perturbation trick of Huang (2020). In a nutshell, the reset of the environment is explicitly added to the state space of $\mathcal{M}$ , which is entered at the end of each episode and left with probability $1 - \epsilon$ to start a new one. We also added a special atomic proposition reset into $\mathbf{AP}$ to label this reset state and reason about episodic behaviors. For instance, this allows verifying whether the agent behaves safely during the entire episode, or if it is able to reach a goal before the end of the episode. + +# B.3 ENVIRONMENTS WITH INITIAL DISTRIBUTION + +Many environments do not necessarily have a single initial state, but rather an initial distribution over states $d_I \in \Delta(S)$ . In that case, the results presented in this paper remain unchanged: it suffices to add a dummy state $s^\star$ to the state space $S \cup \{s^\star\}$ so that $s_I = s^\star$ with the transition dynamics $\mathbf{P}(s' \mid s^\star, a) = d_I(s')$ for any action $a \in \mathcal{A}$ . Therefore, each time the reset of the environment is triggered, we make the MDP entering the initial state $s^\star$ , then transitioning to $s'$ according to $d_I$ . + +# B.4 LATENT SPACE DISTRIBUTION + +As pointed out in Sect. 4, posterior collapse is naturally avoided when optimizing $\mathrm{W}^2\mathrm{AE}$ -MDP. To illustrate that, we report the distribution of latent states produced by $\phi_{\iota}$ during training (Fig. 5). The plots reveal that the latent space generated by mapping original states drawn from $\xi_{\pi}$ during training to $\bar{S}$ via $\phi_{\iota}$ is fairly distributed, for each environment. + +![](images/f9b11e840e926f6a557915662bc20036b7684f609694f4f1a267709f4ba6d4c5.jpg) + +![](images/8de01355e219f78cb7b403d44ffb0758b1492b29fc6e25149cb3d30dff48d553.jpg) + +![](images/c3d02556c539def1c1129ec5b832efd5ae42a21e70f72d962abf4fb2ae673ff8.jpg) + +![](images/a076bd8afdd0ce5bc0a31146c4232995dc4a7ad674884a472320e5eb16858cc0.jpg) +Figure 5: Latent space distribution along training steps. The intensity of the blue hue corresponds to the frequency of latent states produced by $\phi_{\ell}$ during training. + +![](images/d32d8c2a325754534be9499ca240d18de6ba6c2edff8a225444843df1390755c.jpg) + +![](images/76961331004e819990562a69015ed71c0e400d697121e59f354e6d0f7a022e93.jpg) +Figure 6: Absolute value difference $\| V_{\bar{\pi}_{\theta}}\|$ reported along training steps. + +![](images/18694fdb7e5dddee82b29a6cd1e4a68cd5b89af0c4c965d16530ef2ebdef5271.jpg) + +![](images/e106eabe078beab95b972a69189af22111abb78e6639e9af76f8e4f5cd5f62c1.jpg) + +![](images/3777b6eee1c8d9c394343378f40ac5aa2095fb85a998828f9198189142f87869.jpg) + +![](images/975397e4c6281b932be427001e50d427d0925f0174224f6aa25b3fca5d334927.jpg) + +# B.5 DISTANCE METRICS: STATE, ACTION, AND REWARD RECONSTRUCTION + +The choice of the distance functions $d_{\mathcal{S}}$ , $d_{\mathcal{A}}$ , and $d_{\mathcal{R}}$ , plays a role in the success of our approach. The usual Euclidean distance is often a good choice for all the transition components, but the scale, dimensionality, and nature of the inputs sometimes require using scaled, normalized, or other kinds of distances to allow the network to reconstruct each component. While we did not observe such requirements in our experiments (where we simply used the Euclidean distance), high dimensional observations (e.g., images) are an example of data which could require tuning the state-distance function in such a way, to make sure that the optimization of the reward or action reconstruction will not be disfavored compared to that of the states. + +# B.6 VALUE DIFFERENCE + +In addition to reporting the quality guarantees of the model along training steps through local losses (cf. Figure 4b), our experiments revealed that the absolute value difference $\| V_{\overline{\pi}_{\theta}}\|$ between the original and latent models operating under the latent policy quickly decreases and tends to converge to values in the same range (Figure 6). This is consistent with the fact that minimizing local losses lead to close behaviors (cf. Eq. 1) and that the value function is Lipschitz-continuous w.r.t. $\widetilde{d}_{\overline{\pi}_{\theta}}$ (cf. Section 2). + +# B.7 REMARK ON FORMAL VERIFICATION + +Recall that our bisimulation guarantees come by construction of the latent space. Essentially, our learning algorithm spits out a distilled policy and a latent state space which already yields a guaranteed bisimulation distance between the original MDP and the latent MDP. This is the crux of how we enable verification techniques like model checking. In particular, bisimulation guarantees mean that reachability probabilities in the latent MDP compared to those in the original one are close. + +Furthermore, the value difference of (omega-regular) properties (formulated through mu-calculus) obtained in the two models is bounded by this distance (cf. Sect. 2 and Chatterjee et al. 2010). + +Reachability is the key ingredient to model-check MDPs. Model-checking properties is in most cases performed by reduction to the reachability of components or regions of the MDP: it either consists of (i) iteratively checking the reachability of the parts of the state space satisfying path formulae that comprise the specification, through a tree-like decomposition of the latter (e.g., for (P,R-)CTL properties, cf. Baier & Katoen 2008), or (ii) checking the reachability to the part of the state space of a product of the MDP with a memory structure or an automaton that embeds the omega-regular property — e.g., for LTL (Baier et al., 2016; Sickert et al., 2016), LTLf (Wells et al., 2020), or GLTL (Littman et al., 2017), among other specification formalisms. The choice of specification formalism is up to the user and depends on the case study. The scope of this work is focusing on learning to distill RL policies with bisimulation guarantees so that model checking can be applied, in order to reason about the behaviors of the agent. That being said, reachability is all we need to show that model checking can be applied. + +# B.8 HYPERPARAMETERS + +$\mathbf{W}^2\mathbf{AE}$ -MDP parameters. All components (e.g., functions or distribution locations and scales, see Fig. 2) are represented and inferred by neural networks (multilayer perceptrons). All the networks share the same architecture (i.e., number of layers and neurons per layer). We use a simple uniform experience replay of size $10^{6}$ to store the transitions and sample them. The training starts when the agent has collected $10^{4}$ transitions in $\mathcal{M}$ . We used minibatches of size 128 to optimize the objective and we applied a minibatch update every time the agent executing $\pi$ has performed 16 steps in $\mathcal{M}$ . We use the recursive $\epsilon$ -perturbation trick of Huang (2020) with $\epsilon = 3/4$ : when an episode ends, it restarts from the initial state with probability $1/4$ ; before re-starting an episode, the time spent in the reset state labeled with reset follows then the geometric distribution with expectation $\epsilon/1 - \epsilon = 3$ . We chose the same latent state-action space size than Delgrange et al. (2022), except for LunarLander that we decreased to $\log_2|\bar{S}| = 14$ and $|\bar{\mathcal{A}}| = 3$ to improve the scalability of the verification. + +VAE-MDPs parameters. For the comparison of Sect. 4, we used the exact same VAE-MDP hyperparameter set as prescribed by Delgrange et al. (2022), except for the state-action space of LunarLander that we also changed for scalability and fair comparison purpose. + +Hyperparameter search. To evaluate our $\mathrm{W}^2\mathrm{AE}$ -MDP, we realized a search in the parameter space defined in Table 2. The best parameters found (in terms of trade-off between performance and latent quality) are reported in Table 3. We used two different optimizers for minimizing the loss (referred to as the minimizer) and computing the Wasserstein terms (referred to as the maximizer). We used ADAM (Kingma & Ba, 2015) for the two, but we allow for different learning rates $\mathrm{ADAM}_{\alpha}$ and exponential decays $\mathrm{ADAM}_{\beta_1}$ , $\mathrm{ADAM}_{\beta_2}$ . We also found that polynomial decay for $\mathrm{ADAM}_{\alpha}$ (e.g., to $10^{-5}$ for $4 \cdot 10^{5}$ steps) is a good practice to stabilize the experiment learning curves, but is not necessary to obtain high-quality and performing distillation. Concerning the continuous relaxation of discrete distributions, we used a different temperature for each distribution, as Maddison et al. (2017) pointed out that doing so is valuable to improve the results. We further followed the guidelines of Maddison et al. (2017) to choose the interval of temperatures and did not schedule any annealing scheme (in contrast to VAE-MDPs). Essentially, the search reveals that the regularizer scale factors $\beta$ . (defining the optimization direction) as well as the encoder and latent transition temperatures are important to improve the performance of distilled policies. For the encoder temperature, we found a nice spot in $\lambda_{\phi_\varepsilon} = 2/3$ , which provides the best performance in general, whereas the choice of $\lambda_{\overline{\mathbb{P}}_\theta}$ and $\beta$ : are (latent-) environment dependent. The importance of the temperature parameters for the continuous relaxation of discrete distributions is consistent with the results of (Maddison et al., 2017), revealing that the success of the relaxation depends on the choice of the temperature for the different latent space sizes. + +Labeling functions. We used the same labeling functions as those described by Delgrange et al. (2022). For completeness, we recall the labeling function used for each environment in Table 4. + +Table 2: Hyperparameter search. ${\lambda }_{X}$ refers to the temperature used for ${\mathrm{W}}^{2}\mathrm{{AE}}$ -MDP component $X$ . + +
ParameterRange
ADAMα (minimizer){0.0001,0.0002,0.0003,0.001}
ADAMα (maximizer){0.0001,0.0002,0.0003,0.001}
ADAMβ1{0,0.5,0.9}
ADAMβ2{0.9,0.999}
neurons per layer{64,128,256,512}
number of hidden layers{1,2,3}
activation{ReLU,LeakyReLU,tanh,softplus(2x+2)/2-1(smooth ELU)}
βwξπ{10,25,50,75,100}
βLξπ{10,25,50,75,100}
m{5,10,15,20}
δ{10,20}
use ε-mimic (cf. Delgrange et al. 2022){True,False} (if True, a decay rate of 10-5is used)
λPθ{0.1,1/3,1/2,2/3,3/5,0.99}
λφl{0.1,1/3,1/2,2/3,3/5,0.99}
λπθ{1/|A|-1,1/(|A|-1).15}
λφlA{1/|A|-1,1/(|A|-1).15}
+ +Table 3: Final hyperparameters used to evaluate ${\mathrm{W}}^{2}\mathrm{{AE}}$ -MDPs in Sect. 4 + +
CartPoleMountainCarAcrobotLunarLanderPendulum
log2|S|910131413
|A|2 = |A|2 = |A|3 = |A|33
activationtanhReLULeaky ReluReLUReLU
layers[64, 64, 64][512, 512][512, 512][256][256, 256, 256]
ADAMα (minimizer)0.00020.00010.00020.00030.0003
ADAMα (maximizer)0.00020.00010.00010.00030.0003
ADAMβ10.50000.5
ADAMβ20.9990.9990.9990.9990.999
βLξπ1025105025
βWξπ751001010025
m52020155
δ2010202010
ε00000.5
λPθ1/31/30.10.752/3
λφi1/32/32/32/32/3
λπθ2/31/30.50.50.5
λφiA///1/31/3
+ +
EnvironmentS⊆Description, for s ∈ Sℓ(s) = <p1, ..., pn, preset>
CartPoleR4• s1: cart position +• s2: cart velocity +• s3: pole angle (rad) +• s4: pole velocity at tip• p1 = 1s1≥1.5: unsafe cart position +• p2 = 1s3≥0.15: unsafe pole angle
MountainCarR2• s1: position +• s2: velocity• p1 = 1s1>1.5: target position +• p2 = 1s1≥-1/2: right-hand side of the mountain +• p3 = 1s2≥0: car going forward
AcrobotR6Let θ1, θ2 ∈ [0, 2π] be the angles of the two rotational joints, +• s1 = cos(θ1) +• s2 = sin(θ1) +• s3 = cos(θ2) +• s4 = sin(θ2) +• s5: angular velocity 1 +• s6: angular velocity 2• p1 = 1-s1-s3·s1+s4·s2>1: RL agent target +• p2 = 1s1≥0: θ1 ∈ [0, π/2] ∪ [3π/2, 2π] +• p3 = 1s2≥0: θ1 ∈ [0, π] +• p4 = 1s3≥0: θ2 ∈ [0, π/2] ∪ [3π/2, 2π] +• p5 = 1s4≥0: θ2 ∈ [0, π] +• p6 = 1s5≥0: positive angular velocity (1) +• p7 = 1s6≥0: positive angular velocity (2)
PendulumR3Let θ ∈ [0, 2π] be the joint angle +• s1 = cos(θ) +• s2 = sin(θ) +• s3: angular velocity• p1 = 1s1≥cos(π/3): safe joint angle +• p2 = 1s1≥0: θ ∈ [0, π/2] ∪ [3π/2, 2π] +• p3 = 1s2≥0: θ ∈ [0, π] +• p4 = 1s3≥0: positive angular velocity
LunarLanderR8• s1: horizontal coordinates +• s2: vertical coordinates +• s3: horizontal speed +• s4: vertical speed +• s5: ship angle +• s6: angular speed +• s7: left leg contact +• s8: right leg contact• p1: unsafe angle +• p2: leg ground contact +• p3: lands rapidly +• p4: left inclination +• p5: right inclination +• p6: motors shut down
+ +Table 4: Labeling functions for the OpenAI environments considered in our experiments (Delgrange et al., 2022). We provide a short description of the state space and the meaning of each atomic proposition. Recall that labels are binary encoded, for $n = |\mathbf{AP}| - 1$ (one bit is reserved for reset) and $p_{\mathrm{reset}} = 1$ iff $s$ is a reset state (cf. Appendix B.2). + +Time to failure properties. Based on the labeling described in Table 4, we formally detail the time to failure properties checked in Sect. 4 whose results are listed in Table 1 for each environment. Let $\text{Reset} = \{\text{reset}\} = \langle 0, \dots, 1 \rangle$ (we assume here that the last bit indicates whether the current state is a reset state or not) and define $s \models \mathsf{L}_1 \land \mathsf{L}_2$ iff $s \models \mathsf{L}_1$ and $s \models \mathsf{L}_2$ for any $s \in S$ , then + +- CartPole: $\varphi = \neg$ Reset $\mathcal{U}$ Unsafe, where Unsafe $= \langle 1,1,0\rangle$ +- MountainCar: $\varphi = \neg$ GoalU Reset, where Goal $= \langle 1,0,0,0\rangle$ +- Acrobot: $\varphi = \neg$ GoalU Reset, where Goal $= \langle 1,0,\dots ,0\rangle$ +- LunarLander: $\varphi = \neg$ SafeLanding $\mathcal{U}$ Reset, where SafeLanding $=$ GroundContact $\land$ MotorsOff, GroundContact $=\langle 0,1,0,0,0,0,0\rangle$ , and MotorsOff $=\langle 0,0,0,0,0,1,0\rangle$ +- Pendulum: $\varphi = \diamondsuit (\neg \text{Safe} \land \bigcirc \text{Reset})$ , where Safe = $\langle 1,0,0,0,0\rangle$ , $\diamondsuit \mathsf{T} = \neg \emptyset \mathcal{U} \mathsf{T}$ , and $s_i \models \bigcirc \mathsf{T}$ iff $s_{i+1} \models \mathsf{T}$ , for any $\mathsf{T} \subseteq \mathbf{AP}$ , $s_{i:\infty}, a_{i:\infty} \in \text{Traj}$ . Intuitively, $\varphi$ denotes the event of ending an episode in an unsafe state, just before resetting the environment, which means that either the agent never reached the safe region or it reached and left it at some point. Formally, $\varphi = \{s_{0:\infty}, a_{0:\infty} \mid \exists i \in \mathbb{N}, s_i \models \text{Safe} \land s_{i+1} \models \text{Reset}\} \subseteq \text{Traj}$ . + +# C ON THE CURSE OF VARIATIONAL MODELING + +Posterior collapse is a well known issue occurring in variational models (see, e.g., Alemi et al. 2018; Tolstikhin et al. 2018; He et al. 2019; Dong et al. 2020) which intuitively results in a degenerate local optimum where the model learns to ignore the latent space and use only the reconstruction functions (i.e., the decoding distribution) to optimize the objective. VAE-MDPs are no exception, as pointed out in the original paper (Delgrange et al., 2022, Section 4.3 and Appendix C.2). + +![](images/85a29476b5c38b313eac6a809b2a1d01b774ebfb0476120102db2a2c5df884c2.jpg) +(a) Latent space distribution along training steps. The intensity of the blue hue corresponds to the frequency of latent states produced from $\phi_{\iota}$ during training. The vanilla model collapses to a single state. + +![](images/5e1e6827c55e71f4a031d936a287450b1b4acbb5c82f45c46029490dc63a095b.jpg) + +![](images/07df2d4f32a0d23b07beeedbfc742076773ffc890df1b7262e13683eef5f714b.jpg) +(b) Rate of the variational model. + +![](images/8ddee2b133ea3f2a9f0eaf010868ff0e9ac0b02a43b19636f55ecd3f5cabea1e.jpg) +(c) Distortion of the variational model. + +![](images/aab5da58b05380464cfc1a5680fc87d6b66a66e0017fa4a7b8f0b83a44a66b16.jpg) +(d) Average point-wise entropy of $\phi_{\iota}(\cdot \mid s)$ , for $s \in S$ drawn from the interaction with the original environment. + +![](images/14909e47b3550db438ea0715e7f9590574bf78942418fe291acc608c902c230a.jpg) +(e) Performance of the resulting distilled policy $\bar{\pi}_{\theta}$ when deployed in the original environment (averaged over 30 episodes). +Figure 7: Comparison of the VAE-MDP in the CartPole environment (i) when the distortion and the rate are minimized as is (vanilla model) and (ii) when it makes use of annealing schemes, entropy regularization, and prioritized experience replay to avoid posterior collapse (cf. Delgrange et al. 2022). While the former clearly fails to learn a useful latent representation, the later does so meticulously and smoothly in two distinguishable phases: first, $\phi_{\iota}$ focuses on fairly distributing the latent space, setting up the stage to the concrete optimization occurring from step $4\cdot 10^{5}$ , where the entropy of $\phi_{\iota}$ is lowered, which allows to get the rate of the variational model away from zero. Five instances of the models are trained with different random seeds, with the same hyperparameters than in Sect. 4. + +Formally, VAE- and WAE-MDPs optimize their objective by minimizing two losses: a reconstruction cost plus a regularizer term which penalizes a discrepancy between the encoding distribution and the dynamics of the latent space model. In VAE-MDPs, the former corresponds to the distortion, and the later to the rate of the variational model (further details are given in Alemi et al. 2018; Delgrange et al. 2022), while in our WAE-MDPs, the former corresponds to the raw transition distance and the later to both the steady-state and transition regularizers. Notably, the rate minimization of VAE-MDPs involves regularizing a stochastic embedding function $\phi_{\iota}(\cdot | s)$ point-wise, i.e., for all different input states $s \in S$ drawn from the interaction with the original environment. In contrast, the latent space regularization of the WAE-MDP involves the marginal embedding distribution $Q_{\iota}$ where the embedding function $\phi_{\iota}$ is not required to be stochastic. Alemi et al. (2018) showed that posterior collapse occurs in VAEs when the rate of the variational model is close to zero, leading to low-quality representation. + +Posterior collapse in VAE-MDPs. We illustrate the sensitivity of VAE-MDPs to the posterior collapse problem in Fig. 7, through the CartPole environment3: minimizing the distortion and the rate as is yields an embedding function which maps deterministically every input state to the same sink latent state (cf. Fig. 7a). Precisely, there is a latent state $\bar{s} \in \bar{S}$ so that $\phi_{\nu}(\bar{s} \mid s) \approx 1$ and $\overline{\mathbf{P}}_{\theta}(\bar{s} \mid \bar{s}, \bar{a}) \approx 1$ whatever the state $s \in S$ and action $\bar{a} \in \overline{A}$ . This is a form of posterior collapse, the resulting rate quickly drops to zero (cf. Fig 7b), and the resulting latent representation yields no information at all. This phenomenon is handled in VAE-MDPs by using (i) prioritized replay buffers that allow to focus on inputs that led to bad representation, and (ii) modifying the objective + +function for learning the latent space model — the so-called evidence lower bound (Hoffman et al., 2013; Kingma & Welling, 2014), or ELBO for short — and set up annealing schemes to eventually recover the ELBO at the end of the training process. Consequently, the resulting learning procedure focuses primarily on fairly distributing the latent space, to avoid it to collapse to a single latent state, to the detriment of learning the dynamics of the environment and the distillation of the RL policy. Then, the annealing scheme allows to make the model learn to finally smoothly use the latent space to maximize the ELBO, and achieve consequently a lower distortion at the "price" of a higher rate. + +Impact of the resulting learning procedure. The aforementioned annealing process, used to avoid that every state collapses to the same representation, possibly induces a high entropy embedding function (Fig. 7d), which further complicates the learning of the model dynamics and the distillation in the first stage of the training process. In fact, in this particular case, one can observe that the entropy reaches its maximal value, which yields a fully random state embedding function. Recall that the VAE-MDP latent space is learned through independent Bernoulli distributions. Fig. 7d reports values centered around 4.188 in the first training phase, which corresponds to the entropy of the state embedding function when $\phi_{\iota}(\cdot |s)$ is uniformly distributed over $\bar{S}$ for any state $s\in S$ .. $H(\phi_{\iota}(\cdot |s)) = \sum_{i = 0}^{\log_2|\bar{S}| - |\mathbf{AP}| = 6} - p_i\log p_i - (1 - p_i)\log (1 - p_i) = 4.188,$ where $p_i = 1 / 2$ for all $i.$ The rate (Fig. 7b) drops to zero since the divergence pulls the latent dynamics towards this high entropy (yet another form of posterior collapse), which hinders the latent space model to learn a useful representation. However, the annealing scheme increases the rate importance along training steps, which enables the optimization to eventually leave this local optimum (here around $4\cdot 10^{5}$ training steps). This allows the learning procedure to leave the zero-rate spot, reduce the distortion (Fig. 7c), and finally distill the original policy (Fig. 7e). + +As a result, the whole engineering required to mitigate posterior collapse slows down the training procedure. This phenomenon is reflected in Fig. 4: VAE-MDPs need several steps to stabilize and set up the stage to the concrete optimization, whereas WAE-MDPs have no such requirements since they naturally do not suffer from collapsing issues (cf. Fig. 5), and are consequently faster to train. + +Lack of representation guarantees. On the theoretical side, since VAE-MDPs are optimized via the ELBO and the local losses via the related variational proxies, VAE-MDPs do not leverage the representation quality guarantees induced by local losses (Eq. 1) during the learning procedure (as explicitly pointed out by Delgrange et al., 2022, Sect. 4.1.): in contrast to WAE-MDPs, when two original states are embedded to the same latent, abstract state, the former are not guaranteed to be bisimilarly close (i.e., the agent is not guaranteed to behave the same way from those two states by executing the policy), meaning those proxies do not prevent original states having distant values collapsing together to the same latent representation. + +# INDEX OF NOTATIONS + +$\mathbf{1}_{[cond]}$ indicator function: 1 if the statement [cond] is true, and 0 otherwise + +$\mathcal{F}_d$ Set of 1-Lipschitz functions w.r.t. the distance metric $d$ + +$\sigma$ Sigmoid function, with $\sigma (x) = 1 / 1 + \exp (-x)$ + +$f_{\theta}$ A function $f_{\theta} \colon \mathcal{X} \to \mathbb{R}$ modeled by a neural network, parameterized by $\theta$ , where $\mathcal{X}$ is any measurable set + +# Latent Space Model + +$\overline{\mathcal{M}} = \langle \overline{S}, \overline{\mathcal{A}}, \overline{\mathbf{P}}, \overline{\mathcal{R}}, \bar{\ell}, \mathbf{AP}, \bar{s}_I \rangle$ Latent MDP with state space $\overline{S}$ , action space $\overline{\mathcal{A}}$ , reward function $\overline{\mathcal{R}}$ , labeling function $\bar{\ell}$ , atomic proposition space $\mathbf{AP}$ , and initial state $\bar{s}_I$ . + +$\langle \overline{\mathcal{M}},\phi ,\psi \rangle$ Latent space model of $\mathcal{M}$ + +$\bar{a}$ Latent action in $\overline{\mathcal{A}}$ + +$\bar{\pi}$ Latent policy $\bar{\pi}:\bar{S}\to \mathcal{A}$ ; can be executed in $\mathcal{M}$ via $\phi$ : $\bar{\pi} (\cdot \mid \phi (s))$ + +$d_{\overline{S}}$ Distance metric over $\bar{S}$ + +$\phi$ (20 State embedding function, from $\mathcal{S}$ to $\overline{\mathcal{S}}$ + +$\psi$ Action embedding function, from $\overline{S}\times \overline{A}$ to $\mathcal{A}$ + +$\phi \mathbf{P}$ Distribution of drawing $s^\prime \sim \mathbf{P}(\cdot \mid s,a)$ , then embedding $\bar{s}^{\prime} = \phi (s^{\prime})$ , for any state $s\in S$ and action $a\in \mathcal{A}$ + +$L_{\mathcal{R}}^{\xi}$ Local reward loss under distribution $\xi$ + +$L_{\mathbf{P}}^{\xi}$ Local transition loss under distribution $\xi$ + +$\overline{\Pi}$ Set of (memoryless) latent policies + +$\bar{s}$ Latent state in $\bar{S}$ + +$\overrightarrow{V_{\pi}}$ Latent value function + +# Markov Decision Processes + +$\mathcal{M} = \langle S, \mathcal{A}, \mathbf{P}, \mathcal{R}, \ell, \mathbf{AP}, s_I \rangle$ MDP $\mathcal{M}$ with state space $S$ , action space $\mathcal{A}$ , transition function $\mathbf{P}$ , labeling function $\ell$ , atomic proposition space $\mathbf{AP}$ , and initial state $s_I$ . + +$a$ Action in $\mathcal{A}$ + +$\widetilde{d}_{\pi}$ Bisimulation pseudometric + +$\gamma$ Discount factor in [0, 1] + +$d_{\mathcal{A}}$ Metric over the action space + +$d_{\mathcal{R}}$ Metric over $\operatorname {Im}(\mathcal{R})$ + +$d_{\mathcal{S}}$ Metric over the state space + +$\xi_{\pi}^{t}$ Limiting distribution of the MDP defined as $\xi_{\pi}^{t}(s^{\prime}\mid s) = \mathbb{P}_{\pi}^{\mathcal{M}_{s}}\left(\left\{s_{0:\infty},a_{0:\infty}\mid s_{t} = s^{\prime}\right\}\right)$ , for any source state $s\in S$ + +II Set of memoryless policies of $\mathcal{M}$ + +$\pi$ (204 Memoryless policy $\pi \colon S\to \Delta (\mathcal{A})$ + +$\mathbb{P}_{\pi}^{\mathcal{M}}$ Unique probability measure induced by the policy $\pi$ in $\mathcal{M}$ on the Borel $\sigma$ -algebra over measurable subsets of $Traj$ + +CUT Constrained reachability event + +$\mathcal{M}_s$ MDP obtained by replacing the initial state of $\mathcal{M}$ by $s\in S$ + +$s$ (20 State in $\mathcal{S}$ + +$\xi_{\pi}$ Stationary distribution of $\mathcal{M}$ induced by the policy $\pi$ + +$\vec{d}$ Raw transition distance, i.e., metric over $\mathcal{S} \times \mathcal{A} \times \operatorname{Im}(\mathcal{R}) \times \mathcal{S}$ + +Traj Set of infinite trajectories of $\mathcal{M}$ + +$\tau = \langle s_{0:T}, a_{0:T-1} \rangle$ Trajectory + +$V_{\pi}$ Value function for the policy $\pi$ + +# Probability / Measure Theory + +$D$ Discrepancy measure; $D(P,Q)$ is the discrepancy between distributions $P,Q\in \Delta (\mathcal{X})$ + +$\Delta (\mathcal{X})$ Set of measures over a complete, separable metric space $\mathcal{X}$ + +Logistic $(\mu, s)$ Logistic distribution with location parameter $\mu$ and scale parameter $s$ + +$W_{d}$ Wasserstein distance w.r.t. the metric $d$ ; $W_{d}(P,Q)$ is the Wasserstein distance between distributions $P, Q \in \Delta(\mathcal{X})$ + +# Wasserstein Auto-encoded MDP + +$\xi_{\theta}$ Behavioral model: distribution over $\mathcal{S} \times \mathcal{A} \times \operatorname{Im}(\mathcal{R}) \times \mathcal{S}$ + +$G_{\theta}$ Mapping $\langle \bar{s},\bar{a},\bar{s}^{\prime}\rangle \mapsto \langle \mathcal{G}_{\theta}(\bar{s}),\psi_{\theta}(\bar{s},\bar{a}),\overline{\mathcal{R}}_{\theta}(\bar{s},\bar{a}),\mathcal{G}_{\theta}(\bar{s}^{\prime})\rangle$ + +$\phi_{\iota}^{A}$ Action encoder mapping $\overline{S}\times \mathcal{A}$ to $\Delta (\overline{\mathcal{A}})$ + +$\mathcal{G}_{\theta}$ State-wise decoder, from $\bar{S}$ to $S$ + +$Q_{\iota}$ Marginal encoding distribution over $\overline{S} \times \overline{A} \times \overline{S}: \mathbb{E}_{s,a,s' \sim \xi_{\pi}} \phi_{\iota}(\cdot \mid s,a,s')$ +$\bar{\xi}_{\bar{\pi}_\theta}$ Stationary distribution of the latent model $\overline{\mathcal{M}}_{\theta}$ , parameterized by $\theta$ +$\mathcal{W}_{\xi_{\overline{\pi}}}$ Steady-state regularizer +$\varphi_{\omega}^{\xi}$ Steady-state Lipschitz network +$\lambda$ Temperature parameter +$\mathcal{T}$ Distribution of drawing state-action pairs from interacting with $\mathcal{M}$ , embedding them to the latent spaces, and finally letting them transition to their successor state in $\overline{\mathcal{M}}_{\theta}$ , in $\Delta (\bar{S}\times \bar{A}\times \bar{S})$ +$\varphi_{\omega}^{\mathbf{P}}$ Transition Lipschitz network + +# ADDITIONAL REFERENCES + +Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S. Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, Geoffrey Irving, Michael Isard, Yangqing Jia, Rafal Jozefowicz, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dandelion Mane, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Mike Schuster, Jonathon Shlens, Benoit Steiner, Ilya Sutskever, Kunal Talwar, Paul Tucker, Vincent Vanhoucke, Vijay Vasudevan, Fernanda Viégas, Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. TensorFlow: Large-scale machine learning on heterogeneous systems, 2015. URL https://www.tensorflow.org/. Software available from tensorflow.org. +Alexander A. Alemi, Ben Poole, Ian Fischer, Joshua V. Dillon, Rif A. Saurous, and Kevin Murphy. Fixing a broken ELBO. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 159-168. PMLR, 2018. URL http://proceedings.mlr.press/v80/alemi18a.html. +Joshua V. Dillon, Ian Langmore, Dustin Tran, Eugene Brevdo, Srinivas Vasudevan, Dave Moore, Brian Patton, Alex Alemi, Matt Hoffman, and Rif A. Saurous. Tensorflow distributions, 2017. +Zhe Dong, Bryan A. Seybold, Kevin Murphy, and Hung H. Bui. Collapsed amortized variational inference for switching nonlinear dynamical systems. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 2638-2647. PMLR, 2020. URL http://proceedings.mlr.press/v119/dong20e.html. +Sergio Guadarrama, Anoop Korattikara, Oscar Ramirez, Pablo Castro, Ethan Holly, Sam Fishman, Ke Wang, Ekaterina Gonina, Neal Wu, Efi Kokiopoulou, Luciano Sbaiz, Jamie Smith, Gábor Bartók, Jesse Berent, Chris Harris, Vincent Vanhoucke, and Eugene Brevdo. TF-Agents: A library for reinforcement learning in tensorflow. https://github.com/tensorflow/agents, 2018. URL https://github.com/tensorflow/agents. [Online; accessed 25-June-2019]. +Junxian He, Daniel Spokoyny, Graham Neubig, and Taylor Berg-Kirkpatrick. Lapping inference networks and posterior collapse in variational autoencoders. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=rylDfnCqF7. +Matthew D. Hoffman, David M. Blei, Chong Wang, and John W. Paisley. Stochastic variational inference. J. Mach. Learn. Res., 14(1):1303-1347, 2013. URL http://dl.acm.org/citation.cfm?id=2502622. +Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. URL http://arxiv.org/abs/1412.6980. + +Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. In Yoshua Bengio and Yann LeCun (eds.), 2nd International Conference on Learning Representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference Track Proceedings, 2014. URL http://arxiv.org/abs/1312.6114. +Vidyadhar G. Kulkarni. Modeling and Analysis of Stochastic Systems. Chapman & Hall, Ltd., GBR, 1995. ISBN 0412049910. +Ilya O. Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Scholkopf. Wasserstein autoencoders. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=HkL7n1-0b. \ No newline at end of file diff --git a/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/images.zip b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..527ed045b5afe516552753d80d65fe0ce9778ab1 --- /dev/null +++ b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec55c06c108285561a9e039b958d98781e5b76c4618265dfc81952c665445a9 +size 1103958 diff --git a/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/layout.json b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2d102d300e7a36fb3680ff35794df014a64b23de --- /dev/null +++ b/2023/Wasserstein Auto-encoded MDPs_ Formal Verification of Efficiently Distilled RL Policies with Many-sided Guarantees/layout.json @@ -0,0 +1,31507 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 391, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 391, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 391, + 95 + ], + "type": "text", + "content": "WASSERSTEIN AUTO-ENCODEDMDPS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 95, + 493, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 95, + 493, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 493, + 105 + ], + "type": "text", + "content": "FORMAL VERIFICATION OF EFFICIENTLY DISTILLED RL POLICIES WITH MANY-SIDED GUARANTEES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 125, + 192, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 125, + 192, + 137 + ], + "spans": [ + { + "bbox": [ + 111, + 125, + 192, + 137 + ], + "type": "text", + "content": "Florent Delgrange" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 137, + 280, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 137, + 280, + 148 + ], + "spans": [ + { + "bbox": [ + 111, + 137, + 280, + 148 + ], + "type": "text", + "content": "AI Lab, Vrije Universiteit Brussel (VUB)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 148, + 204, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 148, + 204, + 159 + ], + "spans": [ + { + "bbox": [ + 112, + 148, + 204, + 159 + ], + "type": "text", + "content": "University of Antwerp" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 112, + 159, + 293, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 159, + 293, + 169 + ], + "spans": [ + { + "bbox": [ + 112, + 159, + 293, + 169 + ], + "type": "text", + "content": "florent.delgrange@ai.vub.ac.be" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 315, + 125, + 362, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 125, + 362, + 135 + ], + "spans": [ + { + "bbox": [ + 315, + 125, + 362, + 135 + ], + "type": "text", + "content": "Ann Nowé" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 136, + 372, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 136, + 372, + 147 + ], + "spans": [ + { + "bbox": [ + 316, + 136, + 372, + 147 + ], + "type": "text", + "content": "AI Lab, VUB" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 394, + 125, + 479, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 125, + 479, + 135 + ], + "spans": [ + { + "bbox": [ + 394, + 125, + 479, + 135 + ], + "type": "text", + "content": "Guillermo A. Pérez" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 394, + 136, + 486, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 136, + 486, + 148 + ], + "spans": [ + { + "bbox": [ + 394, + 136, + 486, + 148 + ], + "type": "text", + "content": "University of Antwerp" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 394, + 148, + 456, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 148, + 456, + 157 + ], + "spans": [ + { + "bbox": [ + 394, + 148, + 456, + 157 + ], + "type": "text", + "content": "Flanders Make" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 276, + 198, + 335, + 210 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 198, + 335, + 210 + ], + "spans": [ + { + "bbox": [ + 276, + 198, + 335, + 210 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 221, + 471, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 221, + 471, + 431 + ], + "spans": [ + { + "bbox": [ + 140, + 221, + 471, + 431 + ], + "type": "text", + "content": "Although deep reinforcement learning (DRL) has many success stories, the large-scale deployment of policies learned through these advanced techniques in safety-critical scenarios is hindered by their lack of formal guarantees. Variational Markov Decision Processes (VAE-MDPs) are discrete latent space models that provide a reliable framework for distilling formally verifiable controllers from any RL policy. While the related guarantees address relevant practical aspects such as the satisfaction of performance and safety properties, the VAE approach suffers from several learning flaws (posterior collapse, slow learning speed, poor dynamics estimates), primarily due to the absence of abstraction and representation guarantees to support latent optimization. We introduce the Wasserstein auto-encoded MDP (WAE-MDP), a latent space model that fixes those issues by minimizing a penalized form of the optimal transport between the behaviors of the agent executing the original policy and the distilled policy, for which the formal guarantees apply. Our approach yields bisimulation guarantees while learning the distilled policy, allowing concrete optimization of the abstraction and representation model quality. Our experiments show that, besides distilling policies up to 10 times faster, the latent model quality is indeed better in general. Moreover, we present experiments from a simple time-to-failure verification algorithm on the latent space. The fact that our approach enables such simple verification techniques highlights its applicability." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 449, + 206, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 449, + 206, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 449, + 206, + 460 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 473, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 473, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 473, + 506, + 639 + ], + "type": "text", + "content": "Reinforcement learning (RL) is emerging as a solution of choice to address challenging real-word scenarios such as epidemic mitigation and prevention strategies (Libin et al., 2020), multi-energy management (Ceusters et al., 2021), or effective canal control (Ren et al., 2021). RL enables learning high performance controllers by introducing general nonlinear function approximators (such as neural networks) to scale with high-dimensional and continuous state-action spaces. This introduction, termed deep-RL, causes the loss of the conventional convergence guarantees of RL (Tsitsiklis, 1994) as well as those obtained in some continuous settings (Nowe, 1994), and hinders their wide roll-out in critical settings. This work enables the formal verification of any such policies, learned by agents interacting with unknown, continuous environments modeled as Markov decision processes (MDPs). Specifically, we learn a discrete representation of the state-action space of the MDP, which yield both a (smaller, explicit) latent space model and a distilled version of the RL policy, that are tractable for model checking (Baier & Katoen, 2008). The latter are supported by bisimulation guarantees: intuitively, the agent behaves similarly in the original and latent models. The strength of our approach is not simply that we verify that the RL agent meets a predefined set of specifications, but rather provide an abstract model on which the user can reason and check any desired agent property." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "content": "Variational MDPs (VAE-MDPs, Delgrange et al. 2022) offer a valuable framework for doing so. The distillation is provided with PAC-verifiable bisimulation bounds guaranteeing that the agent behaves similarly (i) in the original and latent model (abstraction quality); (ii) from all original states embedded to the same discrete state (representation quality). Whilst the bounds offer a confidence metric that enables the verification of performance and safety properties, VAE-MDPs suffer from several learning flaws. First, training a VAE-MDP relies on variational proxies to the bisimulation bounds, meaning there is no learning guarantee on the quality of the latent model via its optimization. Second, variational autoencoders (VAEs) (Kingma & Welling, 2014; Hoffman et al., 2013) are known" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": "to suffer from posterior collapse (e.g., Alemi et al. 2018) resulting in a deterministic mapping to a unique latent state in VAE-MDPs. Most of the training process focuses on handling this phenomenon and setting up the stage for the concrete distillation and abstraction, finally taking place in a second training phase. This requires extra regularizers, setting up annealing schemes and learning phases, and defining prioritized replay buffers to store transitions. Distillation through VAE-MDPs is thus a meticulous task, requiring a large step budget and tuning many hyperparameters." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 153, + 506, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 153, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 153, + 506, + 255 + ], + "type": "text", + "content": "Building upon Wasserstein autoencoders (Tolstikhin et al., 2018) instead of VAEs, we introduce Wasserstein auto-encoded MDPs (WAE-MDPs), which overcome those limitations. Our WAE relies on the optimal transport (OT) from trace distributions resulting from the execution of the RL policy in the real environment to that reconstructed from the latent model operating under the distilled policy. In contrast to VAEs which rely on variational proxies, we derive a novel objective that directly incorporates the bisimulation bounds. Furthermore, while VAEs learn stochastic mappings to the latent space which need be determined or even entirely reconstructed from data at the deployment time to obtain the guarantees, our WAE has no such requirements, and learn all the necessary components to obtain the guarantees during learning, and does not require such post-processing operations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 258, + 506, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 338 + ], + "type": "text", + "content": "Those theoretical claims are reflected in our experiments: policies are distilled up to 10 times faster through WAE- than VAE-MDPs and provide better abstraction quality and performance in general, without the need for setting up annealing schemes and training phases, nor prioritized buffer and extra regularizer. Our distilled policies are able to recover (and sometimes even outperform) the original policy performance, highlighting the representation quality offered by our new framework: the distillation is able to remove some non-robustness of the input RL policy. Finally, we formally verified time-to-failure properties (e.g., Pnueli 1977) to emphasize the applicability of our approach." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 343, + 507, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 343, + 507, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 343, + 507, + 443 + ], + "type": "text", + "content": "Other Related Work. Complementary works approach safe RL via formal methods (Junges et al., 2016; Alshiekh et al., 2018; Jansen et al., 2020; Simão et al., 2021), aimed at formally ensuring safety during RL, all of which require providing an abstract model of the safety aspects of the environment. They also include the work of Alamdari et al. (2020), applying synthesis and model checking on policies distilled from RL, without quality guarantees. Other frameworks share our goal of verifying deep-RL policies (Bacci & Parker, 2020; Carr et al., 2020) but rely on a known environment model, among other assumptions (e.g., deterministic or discrete environment). Finally, DeepSynth (Hasanbeig et al., 2021) allows learning a formal model from execution traces, with the different purpose of guiding the agent towards sparse and non-Markovian rewards." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 448, + 506, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 506, + 515 + ], + "type": "text", + "content": "On the latent space training side, WWAEs (Zhang et al., 2019) reuse OT as latent regularizer discrepancy (in Gaussian closed form), whereas we derive two regularizers involving OT. These two are, in contrast, optimized via the dual formulation of Wasserstein, as in Wasserstein-GANs (Arjovsky et al., 2017). Similarly to " + }, + { + "bbox": [ + 104, + 448, + 506, + 515 + ], + "type": "inline_equation", + "content": "VQ" + }, + { + "bbox": [ + 104, + 448, + 506, + 515 + ], + "type": "text", + "content": "-VAEs (van den Oord et al., 2017) and Latent Bernoulli AEs (Fajtl et al., 2020), our latent space model learns discrete spaces via deterministic encoders, but relies on a smooth approximation instead of using the straight-through gradient estimator." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 520, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 506, + 588 + ], + "type": "text", + "content": "Works on representation learning for RL (Gelada et al., 2019; Castro et al., 2021; Zhang et al., 2021; Zang et al., 2022) consider bisimulation metrics to optimize the representation quality, and aim at learning (continuous) representations which capture bisimulation, so that two states close in the representation are guaranteed to provide close and relevant information to optimize the performance of the controller. In particular, as in our work, DeepMDPs (Gelada et al., 2019) are learned by optimizing local losses, by assuming a deterministic MDP and without verifiable confidence measurement." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 602, + 201, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 201, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 201, + 615 + ], + "type": "text", + "content": "2 BACKGROUND" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 626, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 506, + 639 + ], + "type": "text", + "content": "In the following, we write " + }, + { + "bbox": [ + 104, + 626, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\Delta(\\mathcal{X})" + }, + { + "bbox": [ + 104, + 626, + 506, + 639 + ], + "type": "text", + "content": " for the set of measures over (complete, separable metric space) " + }, + { + "bbox": [ + 104, + 626, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 626, + 506, + 639 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": "Markov decision processes (MDPs) are tuples " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{M} = \\langle \\mathcal{S},\\mathcal{A},\\mathbf{P},\\mathcal{R},\\ell ,\\mathbf{AP},s_I\\rangle" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " is a set of states; " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": ", a set of actions; " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{P}\\colon S\\times \\mathcal{A}\\to \\Delta (\\mathcal{S})" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": ", a probability transition function that maps the current state and action to a distribution over the next states; " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{R}\\colon S\\times \\mathcal{A}\\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": ", a reward function; " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\ell \\colon S\\to 2^{\\mathbf{AP}}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": ", a labeling function over a set of atomic propositions " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{AP}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": "; and " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "s_I\\in S" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": ", the initial state. If " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "|\\mathcal{A}| = 1" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " is a fully stochastic process called a Markov chain (MC). We write " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_s" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " for the MDP obtained when replacing the initial state of " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "s\\in S" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": ". An agent interacting in " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " produces trajectories, i.e., sequences of states and actions " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\tau = \\langle s_{0:T},a_{0:T - 1}\\rangle" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "s_0 = s_I" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "s_{t + 1}\\sim \\mathbf{P}(\\cdot |s_t,a_t)" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "t < T" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": ". The set of infinite trajectories of " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " is Traj. We assume " + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{AP}" + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 81, + 252, + 197 + ], + "blocks": [ + { + "bbox": [ + 111, + 81, + 252, + 197 + ], + "lines": [ + { + "bbox": [ + 111, + 81, + 252, + 197 + ], + "spans": [ + { + "bbox": [ + 111, + 81, + 252, + 197 + ], + "type": "image", + "image_path": "d65866c58ffac9e4f32077722ee7503d76e7a60919cc34853594c4512a2a5dac.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 201, + 257, + 222 + ], + "lines": [ + { + "bbox": [ + 105, + 201, + 257, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 257, + 222 + ], + "type": "text", + "content": "(a) Execution of the latent policy " + }, + { + "bbox": [ + 105, + 201, + 257, + 222 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}" + }, + { + "bbox": [ + 105, + 201, + 257, + 222 + ], + "type": "text", + "content": " in the original and latent MDPs, and local losses." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 279, + 81, + 491, + 198 + ], + "blocks": [ + { + "bbox": [ + 279, + 81, + 491, + 198 + ], + "lines": [ + { + "bbox": [ + 279, + 81, + 491, + 198 + ], + "spans": [ + { + "bbox": [ + 279, + 81, + 491, + 198 + ], + "type": "image", + "image_path": "da63f93f7e197340d3bcf8dd1aa131ca3c0690159db32c6ffb49a5cb8c36f4cf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 268, + 201, + 499, + 222 + ], + "lines": [ + { + "bbox": [ + 268, + 201, + 499, + 222 + ], + "spans": [ + { + "bbox": [ + 268, + 201, + 499, + 222 + ], + "type": "text", + "content": "(b) Parallel execution of the original RL policy " + }, + { + "bbox": [ + 268, + 201, + 499, + 222 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 268, + 201, + 499, + 222 + ], + "type": "text", + "content": " in the original and latent MDPs, local losses, and steady-state regularizer." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "lines": [ + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "type": "text", + "content": "Figure 1: Latent flows: arrows represent (stochastic) mappings, the original (resp. latent) state-action space is spread along the blue (resp. green) area, and distances are depicted in red. Distilling " + }, + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}" + }, + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "type": "text", + "content": " via flow (b) by minimizing " + }, + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 231, + 504, + 266 + ], + "type": "text", + "content": " allows closing the gap between flows (a) and (b)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": "labels being respectively one-hot and binary encoded. Given " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "\\mathsf{T} \\subseteq \\mathbf{AP}" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": ", we write " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "s \\models \\mathsf{T}" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": " is labeled with " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "\\mathsf{T}" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "\\ell(s) \\cap \\mathsf{T} \\neq \\emptyset" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "s \\models \\neg \\mathsf{T}" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "s \\models \\mathsf{T}" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": ". We refer to MDPs with continuous state or action spaces as continuous MDPs. In that case, we assume " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": " are complete separable metric spaces equipped with a Borel " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": "-algebra, and " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "\\ell^{-1}(\\mathsf{T})" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": " is Borel-measurable for any " + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "inline_equation", + "content": "\\mathsf{T} \\subseteq \\mathbf{AP}" + }, + { + "bbox": [ + 104, + 287, + 504, + 333 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": "Policies and stationary distributions. A (memoryless) policy " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\pi \\colon S \\to \\Delta(\\mathcal{A})" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " prescribes which action to choose at each step of the interaction. The set of memoryless policies of " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\Pi" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": ". The MDP " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\pi \\in \\Pi" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " induce an MC " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " with unique probability measure " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_{\\pi}^{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " on the Borel " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": "-algebra over measurable subsets " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\varphi \\subseteq \\text{Traj}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " (Puterman, 1994). We drop the superscript when the context is clear. Define " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}^{t}(s' | s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_{s}}(\\{s_{0:\\infty}, a_{0:\\infty} | s_{t} = s'\\})" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " as the distribution giving the probability of being in each state of " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{s}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " steps. " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "B \\subseteq S" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " is a bottom strongly connected component (BSCC) of " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " if (i) " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " is a maximal subset satisfying " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}^{t}(s' | s) > 0" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "s, s' \\in B" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " and some " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "t \\geqslant 0" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": ", and (ii) " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{a \\sim \\pi(\\cdot|s)} \\mathbf{P}(B | s, a) = 1" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": ". The unique stationary distribution of " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi} \\in \\Delta(B)" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": ". We write " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "s, a \\sim \\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " for sampling " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": ". An MDP " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " is ergodic if for all " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\pi \\in \\Pi" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": ", the state space of " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " consists of a unique aperiodic BSCC with " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi} = \\lim_{t \\to \\infty} \\xi_{\\pi}^{t}(\\cdot | s)" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 341, + 506, + 456 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": "Value objectives. Given " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\pi \\in \\Pi" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ", the value of a state " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " is the expected value of a random variable obtained by running " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ". For a discount factor " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\gamma \\in [0,1]" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ", we consider the following objectives. (i) Discounted return: we write " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "V_{\\pi}(s) = \\mathbb{E}_{\\pi}^{\\mathcal{M}_s}\\left[\\sum_{t=0}^{\\infty} \\gamma^t \\mathcal{R}(s_t, a_t)\\right]" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " for the expected discounted rewards accumulated along trajectories. The typical goal of an RL agent is to learn a policy " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\pi^\\star" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " that maximizes " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "V_{\\pi^\\star}(s_I)" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " through interactions with the (unknown) MDP; (ii) Reachability: let " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\mathsf{C}, \\mathsf{T} \\subseteq \\mathbf{AP}" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ", the (constrained) reachability event is " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\mathsf{CUT} = \\{s_{0:\\infty}, a_{0:\\infty} | \\exists i \\in \\mathbb{N}, \\forall j < i, s_j \\models \\mathsf{C} \\wedge s_i \\models \\mathsf{T}\\} \\subseteq \\mathsf{Traj}" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ". We write " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "V_{\\pi}^{\\varphi}(s) = \\mathbb{E}_{\\pi}^{\\mathcal{M}_s}\\left[\\gamma^{t^\\star} \\mathbf{1}_{\\langle s_{0:\\infty}, a_{0:\\infty} \\rangle \\in \\varphi}\\right]" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " for the discounted probability of satisfying " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\varphi = \\mathsf{CUT}" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "t^\\star" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " is the length of the shortest trajectory prefix that allows satisfying " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\varphi" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ". Intuitively, this denotes the discounted return of remaining in a region of the MDP where states are labeled with " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\mathsf{C}" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ", until visiting for the first time a goal state labeled with " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\mathsf{T}" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ", and the return is the binary reward signal capturing this event. Safety w.r.t. failure states " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\mathsf{C}" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " can be expressed as the safety-constrained reachability to a destination " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\mathsf{T}" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " through " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\neg \\mathsf{CUT}" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": ". Notice that " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "V_{\\pi}^{\\varphi}(s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_s}(\\varphi)" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\gamma = 1" + }, + { + "bbox": [ + 104, + 463, + 506, + 601 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": "Latent MDP. Given the original (continuous, possibly unknown) environment model " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": ", a latent space model is another (smaller, explicit) MDP " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}} = \\langle \\overline{S}, \\overline{\\mathcal{A}}, \\overline{\\mathbf{P}}, \\overline{\\mathcal{R}}, \\bar{\\ell}, \\mathbf{AP}, \\bar{s}_I \\rangle" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " with state-action space linked to the original one via state and action embedding functions: " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\phi \\colon S \\to \\overline{S}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\psi \\colon \\overline{S} \\times \\overline{A} \\to A" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": ". We refer to " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\langle \\overline{\\mathcal{M}}, \\phi, \\psi \\rangle" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " as a latent space model of " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " as its latent MDP. Our goal is to learn " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\langle \\overline{\\mathcal{M}}, \\phi, \\psi \\rangle" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " by optimizing an equivalence criterion between the two models. We assume that " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "d_{\\overline{S}}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " is a metric on " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{S}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": ", and write " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\Pi}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " for the set of policies of " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{V}_{\\overline{\\pi}}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " for the values of running " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\pi} \\in \\overline{\\Pi}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": ". Remark 1 (Latent flow). The latent policy " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\pi}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " can be seen as a policy in " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " (cf. Fig. 1a): states passed to " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\pi}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " are first embedded with " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " to the latent space, then the actions produced by " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\pi}" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " are executed via " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " in the original environment. Let " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": ", we write " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\bar{a} \\sim \\overline{\\pi}(\\cdot | s)" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\pi}(\\cdot | \\phi(s))" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": ", then the reward and next state are respectively given by " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(s, \\bar{a}) = \\mathcal{R}(s, \\psi(\\phi(s), \\bar{a}))" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "inline_equation", + "content": "s' \\sim \\mathbf{P}(\\cdot | s, \\bar{a}) = \\mathbf{P}(\\cdot | s, \\psi(\\phi(s), \\bar{a}))" + }, + { + "bbox": [ + 104, + 608, + 506, + 731 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "Local losses allow quantifying the distance between the original and latent reward/transition functions in the local setting, i.e., under a given state-action distribution " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\xi \\in \\Delta(S \\times \\overline{\\mathcal{A}})" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 130, + 108, + 479, + 129 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 108, + 479, + 129 + ], + "spans": [ + { + "bbox": [ + 130, + 108, + 479, + 129 + ], + "type": "interline_equation", + "content": "L _ {\\mathcal {R}} ^ {\\xi} = \\underset {s, \\bar {a} \\sim \\xi} {\\mathbb {E}} \\left| \\mathcal {R} (s, \\bar {a}) - \\overline {{\\mathcal {R}}} (\\phi (s), \\bar {a}) \\right|, \\quad L _ {\\mathbf {P}} ^ {\\xi} = \\underset {s, \\bar {a} \\sim \\xi} {\\mathbb {E}} D \\big (\\phi \\mathbf {P} (\\cdot | s, \\bar {a}), \\overline {{\\mathbf {P}}} (\\cdot | \\phi (s), \\bar {a}) \\big)", + "image_path": "31b06de0bf39a80e9d5ffd37fb2e4a24401343bd72156afec903684ebc1d160f.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\phi \\mathbf{P}(\\cdot \\mid s,\\bar{a})" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " is the distribution of drawing " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "s^\\prime \\sim \\mathbf{P}(\\cdot \\mid s,\\bar{a})" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " then embedding " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\bar{s}^{\\prime} = \\phi (s^{\\prime})" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " is a discrepancy measure. Fig 1a depicts the losses when states and actions are drawn from a stationary distribution " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\xi_{\\overline{\\pi}}" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " resulting from running " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}\\in \\overline{\\Pi}" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": ". In this work, we focus on the case where " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " is the Wasserstein distance " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "W_{d_{\\overline{s}}}" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": ": given two distributions " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "P,Q" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " over a measurable set " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " equipped with a metric " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "W_{d}" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " is the solution of the optimal transport (OT) from " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": ", i.e., the minimum cost of changing " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " (Villani, 2009): " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "W_{d}(P,Q) = \\inf_{\\lambda \\in \\Lambda (P,Q)}\\mathbb{E}_{x,y\\sim \\lambda}d(x,y)" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\Lambda (P,Q)" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " being the set of all couplings of " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": ". The Kantorovich duality yields " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "W_{d}(P,Q) = \\sup_{f\\in \\mathcal{F}_{d}}\\mathbb{E}_{x\\sim P}f(x) - \\mathbb{E}_{x\\sim Q}f(y)" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_d" + }, + { + "bbox": [ + 104, + 131, + 506, + 233 + ], + "type": "text", + "content": " is the set of 1-Lipschitz functions. Local losses are related to a well-established behavioral equivalence between transition systems, called bisimulation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": "Bisimulation. A bisimulation " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": " is a behavioral equivalence between states " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "s_1, s_2 \\in S" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": " so that, " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "s_1 \\mathcal{B} s_2" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": " iff (i) " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\mathbf{P}(T \\mid s_1, a) = \\mathbf{P}(T \\mid s_2, a)" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": ", (ii) " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\ell(s_1) = \\ell(s_2)" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": ", and (iii) " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(s_1, a) = \\mathcal{R}(s_2, a)" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": " for each action " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "a \\in \\mathcal{A}" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": " and (Borel measurable) equivalence class " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "T \\in S / \\mathcal{B}" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": ". Properties of bisimulation include trajectory and value equivalence (Larsen & Skou, 1989; Givan et al., 2003). Requirements (ii) and (iii) can be respectively relaxed depending on whether we focus only on behaviors formalized through " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\mathbf{AP}" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": " or rewards. The relation can be extended to compare two MDPs (e.g., " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": ") by considering the disjoint union of their state space. We denote the largest bisimulation relation by " + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 104, + 239, + 506, + 318 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": "Characterized by a logical family of functional expressions derived from a logic " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": ", bisimulation pseudometrics (Desharnais et al., 2004) generalize the notion of bisimilarity. More specifically, given a policy " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\pi \\in \\Pi" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": ", we consider a family " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": " of real-valued functions parameterized by a discount factor " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": " and defining the semantics of " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": ". Such functional expressions allow to formalize discounted properties such as reachability, safety, as well as general " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": "-regular specifications (Chatterjee et al., 2010) and may include rewards as well (Ferns et al., 2014). The pseudometric " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\widetilde{d}_{\\pi}" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": " is defined as the largest behavioral difference " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\widetilde{d}_{\\pi}(s_1,s_2) = \\sup_{f\\in \\mathcal{F}}|f(s_1) - f(s_2)|" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": ", and its kernel is bisimilarity: " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\widetilde{d}_{\\pi}(s_1,s_2) = 0" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": " iff " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "s_1\\sim s_2" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": ". In particular, value functions are Lipschitz-continuous w.r.t. " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\widetilde{d}_{\\pi}" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "|V_{\\pi}^{\\prime}(s_1) - V_{\\pi}^{\\prime}(s_2)|\\leqslant K\\widetilde{d}_{\\pi}(s_1,s_2)" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "^{1 / (1 - \\gamma)}" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": " if rewards are included in " + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 104, + 322, + 506, + 441 + ], + "type": "text", + "content": " and 1 otherwise. To ensure the upcoming bisimulation guarantees, we make the following assumptions:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "text", + "content": "Assumption 2.1. MDP " + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "text", + "content": " is ergodic, " + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\operatorname{Im}(\\mathcal{R})" + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "text", + "content": " is a bounded space scaled in " + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "inline_equation", + "content": "[-1/2, 1/2]" + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "text", + "content": ", and the embedding function preserves the labels, i.e., " + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\phi(s) = \\bar{s} \\implies \\ell(s) = \\bar{\\ell}(\\bar{s})" + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\bar{s} \\in \\bar{S}" + }, + { + "bbox": [ + 104, + 441, + 504, + 467 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 474, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 498 + ], + "type": "text", + "content": "Note that the ergodicity assumption is compliant with episodic RL and a wide range of continuous learning tasks (see Huang 2020; Delgrange et al. 2022 for detailed discussions on this setting)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": "Bisimulation bounds (Delgrange et al., 2022). " + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": " being set over continuous spaces with possibly unknown dynamics, evaluating " + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "inline_equation", + "content": "\\tilde{d}" + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": " can turn out to be particularly arduous, if not intractable. A solution is to evaluate the original and latent model bisimilarity via local losses: fix " + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "inline_equation", + "content": "\\bar{\\pi} \\in \\overline{\\Pi}" + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": ", assume " + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": " is discrete, then given the induced stationary distribution " + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "inline_equation", + "content": "\\xi_{\\bar{\\pi}}" + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "inline_equation", + "content": "s_1, s_2 \\in S" + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "inline_equation", + "content": "\\phi(s_1) = \\phi(s_2)" + }, + { + "bbox": [ + 104, + 504, + 504, + 551 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 118, + 552, + 504, + 580 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 552, + 504, + 580 + ], + "spans": [ + { + "bbox": [ + 118, + 552, + 504, + 580 + ], + "type": "interline_equation", + "content": "\\underset {s \\sim \\xi_ {\\bar {\\pi}}} {\\mathbb {E}} \\widetilde {d} _ {\\bar {\\pi}} (s, \\phi (s)) \\leqslant \\frac {L _ {\\mathcal {R}} ^ {\\xi_ {\\bar {\\pi}}} + \\gamma L _ {\\mathbf {P}} ^ {\\xi_ {\\bar {\\pi}}}}{1 - \\gamma}, \\quad \\widetilde {d} _ {\\bar {\\pi}} (s _ {1}, s _ {2}) \\leqslant \\left(\\frac {L _ {\\mathcal {R}} ^ {\\xi_ {\\bar {\\pi}}} + \\gamma L _ {\\mathbf {P}} ^ {\\xi_ {\\bar {\\pi}}}}{1 - \\gamma}\\right) \\left(\\xi_ {\\bar {\\pi}} ^ {- 1} (s _ {1}) + \\xi_ {\\bar {\\pi}} ^ {- 1} (s _ {2})\\right). \\tag {1}", + "image_path": "95d2ad17ae822963790236ac94da40987fb5f95cab7f490b74dbfaf6fa52bfa8.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "content": "The two inequalities guarantee respectively the quality of the abstraction and representation: when local losses are small, (i) states and their embedding are bisimilarly close in average, and (ii) all states sharing the same discrete representation are bisimilarly close. The local losses and related bounds can be efficiently PAC-estimated. Our goal is to learn a latent model where the behaviors of the agent executing " + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}" + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "content": " can be formally verified, and the bounds offer a confidence metric allowing to lift the guarantees obtained this way back to the original model " + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "content": ", when the latter operates under " + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}" + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "content": ". We show in the following how to learn a latent space model by optimizing the aforementioned bounds, and distill policies " + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "inline_equation", + "content": "\\pi \\in \\Pi" + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "content": " obtained via any RL technique to a latent policy " + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "inline_equation", + "content": "\\bar{\\pi} \\in \\overline{\\Pi}" + }, + { + "bbox": [ + 104, + 582, + 506, + 671 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 685, + 324, + 697 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 685, + 324, + 697 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 324, + 697 + ], + "type": "text", + "content": "3 WASSERSTEIN AUTO-ENCODEDMDPs" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "content": "Fix " + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta} = \\langle \\overline{S},\\overline{\\mathcal{A}},\\overline{\\mathbf{P}}_{\\theta},\\overline{\\mathcal{R}}_{\\theta},\\bar{\\ell},\\mathbf{AP},\\bar{s}_I\\rangle" + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\langle \\overline{\\mathcal{M}}_{\\theta},\\phi_{\\iota},\\psi_{\\theta}\\rangle" + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "content": " as a latent space model of " + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\iota" + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "content": ". Our method relies on learning a behavioral model " + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\xi_{\\theta}" + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 708, + 506, + 733 + ], + "type": "text", + "content": " from which we can" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "retrieve the latent space model and distill " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": ". This can be achieved via the minimization of a suitable discrepancy between " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\xi_{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": ". VAE-MDPs optimize a lower bound on the likelihood of the dynamics of " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": " using the Kullback-Leibler divergence, yielding (i) " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": ", (ii) a distillation " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": ", and (iii) " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\psi_{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": ". Local losses are not directly minimized, but rather variational proxies that do not offer theoretical guarantees during the learning process. To control the local losses minimization and exploit their theoretical guarantees, we present a novel autoencoder that incorporates them in its objective, derived from the OT. Proofs of the claims made in this Section are provided in Appendix A." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 173, + 249, + 184 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 173, + 249, + 184 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 249, + 184 + ], + "type": "text", + "content": "3.1 THE OBJECTIVE FUNCTION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": "Assume that " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\operatorname{Im}(\\mathcal{R})" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": " are respectively equipped with metrics " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{S}}" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{R}}" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": ", we define the raw transition distance metric " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\vec{d}" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": " as the component-wise sum of distances between states, actions, and rewards occurring of along transitions: " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\vec{d}(\\langle s_1, a_1, r_1, s_1' \\rangle, \\langle s_2, a_2, r_2, s_2' \\rangle) = d_{\\mathcal{S}}(s_1, s_2) + d_{\\mathcal{A}}(a_1, a_2) + d_{\\mathcal{R}}(r_1, r_2) + d_{\\mathcal{S}}(s_1', s_2')" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": ". Given Assumption 2.1, we consider the OT between local distributions, where traces are drawn from episodic RL processes or infinite interactions (we show in Appendix A.1 that considering the OT between trace-based distributions in the limit amounts to reasoning about stationary distributions). Our goal is to minimize " + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "inline_equation", + "content": "W_{\\vec{d}}(\\xi_{\\pi}, \\xi_{\\theta})" + }, + { + "bbox": [ + 104, + 193, + 506, + 277 + ], + "type": "text", + "content": " so that" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 177, + 281, + 505, + 308 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 281, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 177, + 281, + 505, + 308 + ], + "type": "interline_equation", + "content": "\\xi_ {\\theta} (s, a, r, s ^ {\\prime}) = \\int_ {\\bar {S} \\times \\bar {A} \\times \\bar {S}} P _ {\\theta} (s, a, r, s ^ {\\prime} \\mid \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) d \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}} (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}), \\tag {2}", + "image_path": "5eb87506683bb8c22e16fdd5578f38901153e94ba84b5f9adfa93fbc10447fd0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": " is a transition decoder and " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "\\bar{\\xi}_{\\overline{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": " denotes the stationary distribution of the latent model " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": ". As proved by Bousquet et al. (2017), this model allows to derive a simpler form of the OT: instead of finding the optimal coupling of (i) the stationary distribution " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": " and (ii) the behavioral model " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "\\xi_{\\theta}" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": ", in the primal definition of " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "W_{\\vec{d}}(\\xi_{\\pi},\\xi_{\\theta})" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": ", it is sufficient to find an encoder " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": " whose marginal is given by " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "Q(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a,s^{\\prime}\\sim \\xi_{\\pi}}q(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\mid s,a,s^{\\prime})" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": " and identical to " + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 312, + 504, + 381 + ], + "type": "text", + "content": ". This is summarized in the following Theorem, yielding a particular case of Wasserstein-autoencoder Tolstikhin et al. (2018):" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "text", + "content": "Theorem 3.1. Let " + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "inline_equation", + "content": "\\xi_{\\theta}" + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "text", + "content": " be respectively a behavioral model and transition decoder as defined in Eq. 2, " + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\theta} \\colon \\overline{\\mathcal{S}} \\to \\mathcal{S}" + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "text", + "content": " be a state-wise decoder, and " + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "inline_equation", + "content": "\\psi_{\\theta}" + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "text", + "content": " be an action embedding function. Assume " + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "text", + "content": " is deterministic with Dirac function " + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "inline_equation", + "content": "G_{\\theta}(\\bar{s}, \\bar{a}, \\bar{s}') = \\langle \\mathcal{G}_{\\theta}(\\bar{s}), \\psi_{\\theta}(\\bar{s}, \\bar{a}), \\overline{\\mathcal{R}}_{\\theta}(\\bar{s}, \\bar{a}), \\mathcal{G}_{\\theta}(\\bar{s}') \\rangle" + }, + { + "bbox": [ + 104, + 384, + 504, + 418 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 145, + 424, + 463, + 447 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 424, + 463, + 447 + ], + "spans": [ + { + "bbox": [ + 145, + 424, + 463, + 447 + ], + "type": "interline_equation", + "content": "W_{\\vec{d}}(\\xi_{\\pi},\\xi_{\\theta}) = \\inf_{q:Q = \\bar{\\xi}_{\\pi_{\\theta}}}\\mathbb{E}_{\\substack{s,a,r,s^{\\prime}\\sim \\xi_{\\pi}\\\\ \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim q(\\cdot |s,a,s^{\\prime})}}\\mathbb{E}_{\\substack{\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim q(\\cdot |s,a,s^{\\prime})}}\\vec{d}\\bigl(\\bigl\\langle s,a,r,s^{\\prime}\\bigr\\rangle ,G_{\\theta}\\bigl(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\bigr)\\bigr).", + "image_path": "41a1bf324f9515fcfdf804c66a3bbe982359c87aeda0c47a0f34224572cd4979.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "text", + "content": "Henceforth, fix " + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota} \\colon S \\to \\bar{S}" + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}^{\\mathcal{A}} \\colon \\bar{S} \\times \\mathcal{A} \\to \\Delta(\\overline{\\mathcal{A}})" + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "text", + "content": " as parameterized state and action encoders with " + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}(\\bar{s}, \\bar{a}, \\bar{s}' \\mid s, a, s') = \\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}} \\cdot \\phi_{\\iota}^{\\mathcal{A}}(\\bar{a} \\mid \\bar{s}, a) \\cdot \\mathbf{1}_{\\phi_{\\iota}(s')} = \\bar{s}'" + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "text", + "content": ", and define the marginal encoder as " + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "inline_equation", + "content": "Q_{\\iota} = \\mathbb{E}_{s, a, s' \\sim \\xi_{\\pi}} \\phi_{\\iota}(\\cdot \\mid s, a, s')" + }, + { + "bbox": [ + 104, + 458, + 504, + 496 + ], + "type": "text", + "content": ". Training the model components can be achieved via the objective:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 144, + 501, + 465, + 523 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 501, + 465, + 523 + ], + "spans": [ + { + "bbox": [ + 144, + 501, + 465, + 523 + ], + "type": "interline_equation", + "content": "\\min_{\\iota ,\\theta}\\underset {s,a,r,s^{\\prime}\\sim \\xi_{\\pi}}{\\mathbb{E}}\\underset {\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim \\phi_{\\iota}(\\cdot |s,a,s^{\\prime})}{\\mathbb{E}} \\vec{d}\\bigl(\\bigl\\langle s, a,r,s^{\\prime}\\bigr\\rangle ,G_{\\theta}\\bigl(\\bar{s},\\bar{a},\\bar{s}^{\\prime}\\bigr)\\bigr) + \\beta \\cdot D\\bigl(Q_{\\iota},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}\\bigr),", + "image_path": "90dcb606f558440b519bc4a6772bb625f2761fb04e30f47985ce54eaec0c1e54.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "text", + "content": " is an arbitrary discrepancy metric and " + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\beta > 0" + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "text", + "content": " a hyperparameter. Intuitively, the encoder " + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "text", + "content": " can be learned by enforcing its marginal distribution " + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "inline_equation", + "content": "Q_{\\iota}" + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "text", + "content": " to match " + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\bar{\\xi}_{\\overline{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 104, + 527, + 504, + 550 + ], + "type": "text", + "content": " through this discrepancy." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "text", + "content": "Remark 2. If " + }, + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "text", + "content": " has a discrete action space, then learning " + }, + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "text", + "content": " is not necessary. We can set " + }, + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{A}} = \\mathcal{A}" + }, + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "text", + "content": " using identity functions for the action encoder and decoder (details in Appendix A.2)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": " is executed in " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": ", observe that its parallel execution in " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": " is enabled by the action encoder " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}^{A}" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": ": given an original state " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": " first prescribes the action " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "a \\sim \\pi(\\cdot \\mid s)" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": ", which is then embedded in the latent space via " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\bar{a} \\sim \\phi_{\\iota}^{A}(\\cdot \\mid \\phi_{\\iota}(s), a)" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": " (cf. Fig. 1b). This parallel execution, along with setting " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "W_{\\vec{d}}" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": ", yield an upper bound on the latent regularization, compliant with the bisimulation bounds. A two-fold regularizer is obtained thereby, defining the foundations of our objective function:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "text", + "content": "Lemma 3.2. Define " + }, + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a\\sim \\xi_{\\pi}}[\\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)\\cdot \\overline{\\mathbf{P}}_{\\theta}(\\bar{s}^{\\prime}\\mid \\bar{s},\\bar{a})]" + }, + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "text", + "content": " as the distribution of drawing state-action pairs from interacting with " + }, + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "text", + "content": ", embedding them to the latent spaces, and finally letting them transition to their successor state in " + }, + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 104, + 641, + 507, + 680 + ], + "type": "inline_equation", + "content": "W_{\\vec{d}}(Q_{\\iota},\\bar{\\xi}_{\\overline{\\pi}_{\\theta}})\\leqslant W_{\\vec{d}}(\\bar{\\xi}_{\\overline{\\pi}_{\\theta}},\\mathcal{T}) + L_{\\mathbf{P}}^{\\xi_{\\pi}}" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 687, + 492, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 492, + 699 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 492, + 699 + ], + "type": "text", + "content": "We therefore define the " + }, + { + "bbox": [ + 104, + 687, + 492, + 699 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 687, + 492, + 699 + ], + "type": "text", + "content": "-MDP (Wasserstein-Wasserstein auto-encoded MDP) objective as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 110, + 704, + 501, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 704, + 501, + 735 + ], + "spans": [ + { + "bbox": [ + 110, + 704, + 501, + 735 + ], + "type": "interline_equation", + "content": "\\min_{\\substack{\\iota ,\\theta \\\\ \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\sim \\phi_{\\iota}(\\cdot |s,a,s^{\\prime})}}\\mathbb{E}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\bar{s})) + d_{\\mathcal{A}}(a,\\psi_{\\theta}(\\bar{s},\\bar{a})) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\bar{s}^{\\prime}\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\pi}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\pi}} + L_{\\mathbf{P}}^{\\xi_{\\pi}}),", + "image_path": "fd188215d65b1264db3f6564c5888166311ccedb6377401397fa25e295d522aa.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 99, + 489, + 312 + ], + "blocks": [ + { + "bbox": [ + 106, + 85, + 302, + 98 + ], + "lines": [ + { + "bbox": [ + 106, + 85, + 302, + 98 + ], + "spans": [ + { + "bbox": [ + 106, + 85, + 302, + 98 + ], + "type": "text", + "content": "Algorithm 1: Wasserstein" + }, + { + "bbox": [ + 106, + 85, + 302, + 98 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 106, + 85, + 302, + 98 + ], + "type": "text", + "content": " Auto-Encoded MDP" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "lines": [ + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "spans": [ + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": "Input: batch size " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": ", max. step " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": ", no. of regularizer updates " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": ", penalty coefficient " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\delta > 0" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " do \nfor " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "i = 1" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " do \nSample a transition " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "s_i, a_i, r_i, s_i'" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " from the original environment via " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " \nEmbed the transition into the latent space by drawing " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\bar{s}_i, \\bar{a}_i, \\bar{s}_i'" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\phi_\\iota(\\cdot \\mid s_i, a_i, s_i')" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " \nMake the latent space model transition to the next latent state: " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\bar{s}_i^\\star \\sim \\overline{\\mathbf{P}}_\\theta(\\cdot \\mid \\bar{s}_i, \\bar{a}_i)" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " \nSample a latent transition from " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\bar{\\xi}_{\\overline{\\pi}_\\theta} \\colon z_i \\sim \\bar{\\xi}_{\\overline{\\pi}_\\theta}, \\bar{a}_i' \\sim \\overline{\\pi}_\\theta(\\cdot \\mid z_i)" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "z_i' \\sim \\overline{\\mathbf{P}}_\\theta(\\cdot \\mid z_i, \\bar{a}_i')" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\mathcal{W} \\gets \\sum_{i=1}^{N} \\varphi_\\omega^\\xi(\\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star) - \\varphi_\\omega^\\xi(z_i, \\bar{a}_i', z_i') + \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, \\bar{s}_i') - \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star)" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "P \\gets \\sum_{i=1}^{N} \\mathrm{GP}\\big(\\varphi_\\omega^\\xi, \\langle \\bar{s}_i, \\bar{a}_i, \\bar{s}_i^\\star \\rangle, \\langle z_i, \\bar{a}_i', z_i' \\rangle\\big) + \\mathrm{GP}\\big(x \\mapsto \\varphi_\\omega^{\\mathbf{P}}(s_i, a_i, \\bar{s}_i, \\bar{a}_i, x), \\bar{s}_i', \\bar{s}_i^\\star\\big)" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " \nUpdate the Lipschitz networks parameters " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " by ascending " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "1/N \\cdot (\\beta \\mathcal{W} - \\delta P)" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " \nif " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " mod " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "m = 0" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " then \n" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\mathcal{L} \\gets \\sum_{i=1}^{N} d_{\\mathcal{S}}(s_i, \\mathcal{G}_{\\theta}(\\bar{s}_i)) + d_{\\mathcal{A}}(a_i, \\psi_{\\theta}(\\bar{s}_i, \\bar{a}_i)) + d_{\\mathcal{R}}(r_i, \\overline{\\mathcal{R}}_{\\theta}(\\bar{s}_i, \\bar{a}_i)) + d_{\\mathcal{S}}(s_i', \\mathcal{G}_{\\theta}(\\bar{s}_i'))" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " \nUpdate the latent space model parameters " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\langle \\iota, \\theta\\rangle" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " by descending " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "1/N \\cdot (\\mathcal{L} + \\beta \\mathcal{W})" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " \nfunction " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\mathrm{GP}(\\varphi_\\omega, x, y)" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim U(0,1)" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\tilde{x} \\gets \\epsilon x + (1 - \\epsilon)y" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "text", + "content": " \nreturn (" + }, + { + "bbox": [ + 106, + 99, + 489, + 312 + ], + "type": "inline_equation", + "content": "\\|\\nabla_{\\tilde{x}}\\varphi_{\\omega}(\\tilde{x})\\| - 1)^2" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\xi_{\\pi}} = W_{\\vec{d}}\\big(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}\\big)" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " are respectively called steady-state and transition regularizers. The former allows to quantify the distance between the stationary distributions respectively induced by " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": ", further enabling the distillation. The latter allows to learn the latent dynamics. Note that " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "L_{\\mathcal{R}}^{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " — set over " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\xi_{\\bar{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " — are not sufficient to ensure the bisimulation bounds (Eq. 1): running " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " depends on the parallel execution of " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " in the original model, which does not permit its (conventional) verification. Breaking this dependency is enabled by learning the distillation " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " through " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": ", as shown in Fig. 1b: minimizing " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " allows to make " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\bar{\\xi}_{\\bar{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " closer together, further bridging the gap of the discrepancy between " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": ". At any time, recovering the local losses along with the linked bisimulation bounds in the objective function of the " + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^{2}\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 335, + 506, + 452 + ], + "type": "text", + "content": "-MDP is allowed by considering the latent policy resulting from this distillation:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "text", + "content": "Theorem 3.3. Assume that traces are generated by running a latent policy " + }, + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\bar{\\pi} \\in \\overline{\\Pi}" + }, + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "text", + "content": " in the original environment and let " + }, + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{R}}" + }, + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "text", + "content": " be the usual Euclidean distance, then the " + }, + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "inline_equation", + "content": "W^{2}" + }, + { + "bbox": [ + 104, + 454, + 504, + 478 + ], + "type": "text", + "content": "AE-MDP objective is" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 483, + 468, + 504 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 483, + 468, + 504 + ], + "spans": [ + { + "bbox": [ + 140, + 483, + 468, + 504 + ], + "type": "interline_equation", + "content": "\\min_{\\iota ,\\theta}\\mathbb{E}_{s,s^{\\prime}\\sim \\xi_{\\overline{\\pi}}}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\phi_{\\iota}(s))) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\phi_{\\iota}\\big(s^{\\prime}\\big)\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\overline{\\pi}}} + L_{\\mathbf{P}}^{\\xi_{\\overline{\\pi}}}).", + "image_path": "c1592d5b369bc8d8ae438c097f356fe8ad98a1c89675f4facd55e92ddeb9c8d6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 520, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 504, + 544 + ], + "type": "text", + "content": "Optimizing the regularizers is enabled by the dual form of the OT: we introduce two parameterized networks, " + }, + { + "bbox": [ + 104, + 520, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\varphi_{\\omega}^{\\xi}" + }, + { + "bbox": [ + 104, + 520, + 504, + 544 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 520, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\varphi_{\\omega}^{\\mathbf{P}}" + }, + { + "bbox": [ + 104, + 520, + 504, + 544 + ], + "type": "text", + "content": ", constrained to be 1-Lipschitz and trained to attain the supremum of the dual:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 548, + 498, + 571 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 548, + 498, + 571 + ], + "spans": [ + { + "bbox": [ + 110, + 548, + 498, + 571 + ], + "type": "interline_equation", + "content": "\\mathcal {W} _ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega} \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\star} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\xi} (\\phi_ {\\iota} (s), \\bar {a}, \\bar {s} ^ {\\star}) - \\underset {z, \\bar {a} ^ {\\prime}, z ^ {\\prime} \\sim \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\xi} (z, \\bar {a} ^ {\\prime}, z ^ {\\prime})", + "image_path": "a2e2170f366d46fbc08c0673eab23695eeecc60a6fc5374adc053986565b5d2b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 120, + 578, + 488, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 578, + 488, + 604 + ], + "spans": [ + { + "bbox": [ + 120, + 578, + 488, + 604 + ], + "type": "interline_equation", + "content": "L _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi} \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} (\\cdot | s, a, s ^ {\\prime})} {\\mathbb {E}} \\underset {s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}} {\\mathbb {E}} \\left[ \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\star} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\star}) \\right]", + "image_path": "02fcdfbd7814e4822385cc6d51cfd4bd000054e2082ee20312656ba3d1cecc42.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "content": "Details to derive this tractable form of " + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)" + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "content": " are in Appendix A.5. The networks are constrained via the gradient penalty approach of Gulrajani et al. (2017), leveraging that any differentiable function is 1-Lipschitz iff it has gradients with norm at most 1 everywhere (we show in Appendix A.6 this is still valid for relaxations of discrete spaces). The final learning process is presented in Algorithm 1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 667, + 247, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 247, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 247, + 678 + ], + "type": "text", + "content": "3.2 DISCRETE LATENT SPACES" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "To enable the verification of latent models supported by the bisimulation guarantees of Eq. 1, we focus on the special case of discrete latent space models. Our approach relies on continuous relaxation of discrete random variables, regulated by some temperature parameter(s) " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ": discrete random variables are retrieved as " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\lambda \\rightarrow 0" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ", which amounts to applying a rounding operator. For training, we use the" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 78, + 507, + 270 + ], + "blocks": [ + { + "bbox": [ + 106, + 78, + 507, + 270 + ], + "lines": [ + { + "bbox": [ + 106, + 78, + 507, + 270 + ], + "spans": [ + { + "bbox": [ + 106, + 78, + 507, + 270 + ], + "type": "image", + "image_path": "ba6acc59c6dd787cf773da4990450ac8c3811bb05c69258cb6c36e0405fa4212.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 276, + 463, + 289 + ], + "lines": [ + { + "bbox": [ + 146, + 276, + 463, + 289 + ], + "spans": [ + { + "bbox": [ + 146, + 276, + 463, + 289 + ], + "type": "text", + "content": "Figure 2: W" + }, + { + "bbox": [ + 146, + 276, + 463, + 289 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 146, + 276, + 463, + 289 + ], + "type": "text", + "content": "AE-MDP architecture. Distances are depicted by red dotted lines." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 310, + 506, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 310, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 310, + 506, + 346 + ], + "type": "text", + "content": "temperature-controlled relaxations to differentiate the objective and let the gradient flow through the network. When we deploy the latent policy in the environment and formally check the latent model, the zero-temperature limit is used. An overview of the approach is depicted in Fig. 2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": "State encoder. We work with a binary representation of the latent states. First, this induces compact networks, able to deal with a large discrete space via a tractable number of parameter variables. But most importantly, this ensures that Assumption 2.1 is satisfied: let " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "n = \\log_2|\\bar{S}|" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": ", we reserve " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "|\\mathbf{AP}|" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": " bits in " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\bar{S}" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": " and each time " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "s\\in S" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": " is passed to " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "n - |\\mathbf{AP}|" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": " bits are produced and concatenated with " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\ell (s)" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": ", ensuring a perfect reconstruction of the labels and further bisimulation bounds. To produce Bernoulli variables, " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": " deterministically maps " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": " to a latent code " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": ", passed to the Heaviside " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "H(z) = \\mathbf{1}_{z > 0}" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": ". We train " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": " by using the smooth approximation " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "H_{\\lambda}(z) = \\sigma (^{2}z / \\lambda)" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": ", satisfying " + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "inline_equation", + "content": "H = \\lim_{\\lambda \\to 0}H_{\\lambda}" + }, + { + "bbox": [ + 104, + 352, + 506, + 433 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "text", + "content": "Latent distributions. Besides the discontinuity of their latent image space, a major challenge of optimizing over discrete distributions is sampling, required to be a differentiable operation. We circumvent this by using concrete distributions (Jang et al., 2017; Maddison et al., 2017): the idea is to sample reparameterizable random variables from " + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "text", + "content": "-parameterized distributions, and applying a differentiable, nonlinear operator in downstream. We use the Gumbel softmax trick to sample from distributions over (one-hot encoded) latent actions " + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "inline_equation", + "content": "(\\phi_{\\iota}^{A}, \\bar{\\pi}_{\\theta})" + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "text", + "content": ". For binary distributions " + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "inline_equation", + "content": "(\\overline{\\mathbf{P}}_{\\theta}, \\bar{\\xi}_{\\bar{\\pi}_{\\theta}})" + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "text", + "content": ", each relaxed Bernoulli with logit " + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "text", + "content": " is retrieved by drawing a logistic random variable located in " + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "inline_equation", + "content": "\\alpha/\\lambda" + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "text", + "content": " and scaled to " + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "inline_equation", + "content": "1/\\lambda" + }, + { + "bbox": [ + 104, + 439, + 507, + 552 + ], + "type": "text", + "content": ", then applying a sigmoid in downstream. We emphasize that this trick alone (as used by Corneil et al. 2018; Delgrange et al. 2022) is not sufficient: it yields independent Bernoullis, being too restrictive in general, which prevents from learning sound transition dynamics (cf. Example 1)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": "Example 1. Let " + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": " be the discrete MC of Fig. 3. In one-hot, " + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "inline_equation", + "content": "\\mathbf{AP} = \\{\\text{goal}: \\langle 1,0\\rangle" + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": ", unsafe: " + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "inline_equation", + "content": "\\langle 0,1\\rangle\\}" + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": ". We assume that 3 bits are used for the (binary) state space, with " + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "inline_equation", + "content": "\\bar{S} = \\{\\bar{s}_0:\\langle 0,0,0\\rangle,\\bar{s}_1:\\langle 1,0,0\\rangle,\\bar{s}_2:\\langle 0,1,0\\rangle,\\bar{s}_3:\\langle 0,1,1\\rangle\\}" + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": " (the two first bits are reserved for the labels). Considering each bit as being independent is not sufficient to learn " + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{P}}" + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": ": the optimal estimation " + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{P}}_{\\theta^*}(\\cdot \\mid \\bar{s}_0)" + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": " is in that case represented by the independent Bernoulli vector " + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "inline_equation", + "content": "\\mathbf{b} = \\langle 1 / 2,1 / 2,1 / 4\\rangle" + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": ", giving the probability to go from " + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "inline_equation", + "content": "\\bar{s}_0" + }, + { + "bbox": [ + 104, + 563, + 339, + 674 + ], + "type": "text", + "content": " to each bit independently. This yields a poor estimation of" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 352, + 566, + 506, + 626 + ], + "blocks": [ + { + "bbox": [ + 352, + 566, + 506, + 626 + ], + "lines": [ + { + "bbox": [ + 352, + 566, + 506, + 626 + ], + "spans": [ + { + "bbox": [ + 352, + 566, + 506, + 626 + ], + "type": "image", + "image_path": "0629839b821124de73e3d0a3e4a307a3344303d23062ef901fc0208c05f77eb4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 636, + 506, + 658 + ], + "lines": [ + { + "bbox": [ + 342, + 636, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 342, + 636, + 506, + 658 + ], + "type": "text", + "content": "Figure 3: Markov Chain with four states; labels are drawn next to their state." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 674, + 507, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 674, + 507, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 507, + 700 + ], + "type": "text", + "content": "the actual transition function: " + }, + { + "bbox": [ + 104, + 674, + 507, + 700 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_0\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot (1 - \\mathbf{b}_2)\\cdot (1 - \\mathbf{b}_3) = \\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_1\\mid \\bar{s}_0) = \\mathbf{b}_1\\cdot (1 - \\mathbf{b}_2)\\cdot (1 - \\mathbf{b}_3) = \\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_2\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot \\mathbf{b}_2\\cdot (1 - \\mathbf{b}_3) = 3 / 16,\\overline{\\mathbf{P}}_{\\theta^{\\star}}(\\bar{s}_3\\mid \\bar{s}_0) = (1 - \\mathbf{b}_1)\\cdot \\mathbf{b}_2\\cdot \\mathbf{b}_3 = 1 / 16." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "text", + "content": "We consider instead relaxed multivariate Bernoulli distributions by decomposing " + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "inline_equation", + "content": "P \\in \\Delta(\\bar{S})" + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "text", + "content": " as a product of conditionals: " + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "inline_equation", + "content": "P(\\bar{s}) = \\prod_{i=1}^{n} P(\\bar{s}_i \\mid \\bar{s}_{1:i-1})" + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\bar{s}_i" + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "inline_equation", + "content": "i^{\\text{th}}" + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "text", + "content": " entry (bit) of " + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\bar{s}" + }, + { + "bbox": [ + 104, + 707, + 506, + 734 + ], + "type": "text", + "content": ". We learn" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 83, + 184, + 135 + ], + "blocks": [ + { + "bbox": [ + 112, + 83, + 184, + 135 + ], + "lines": [ + { + "bbox": [ + 112, + 83, + 184, + 135 + ], + "spans": [ + { + "bbox": [ + 112, + 83, + 184, + 135 + ], + "type": "image", + "image_path": "9c55334604e272a24a2d2c459da443d86f0342e1fd7a55b89480a32c1fca213f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 143, + 460, + 156 + ], + "lines": [ + { + "bbox": [ + 149, + 143, + 460, + 156 + ], + "spans": [ + { + "bbox": [ + 149, + 143, + 460, + 156 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 149, + 143, + 460, + 156 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 149, + 143, + 460, + 156 + ], + "type": "text", + "content": "-MDP objective: reconstruction loss, transition and steady-state regularizers" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 186, + 83, + 254, + 135 + ], + "blocks": [ + { + "bbox": [ + 186, + 83, + 254, + 135 + ], + "lines": [ + { + "bbox": [ + 186, + 83, + 254, + 135 + ], + "spans": [ + { + "bbox": [ + 186, + 83, + 254, + 135 + ], + "type": "image", + "image_path": "714939f162e90922feac0a1018e369dcfa27d5f6eec47ad4de6e24d64c45e0e6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 255, + 83, + 324, + 135 + ], + "blocks": [ + { + "bbox": [ + 255, + 83, + 324, + 135 + ], + "lines": [ + { + "bbox": [ + 255, + 83, + 324, + 135 + ], + "spans": [ + { + "bbox": [ + 255, + 83, + 324, + 135 + ], + "type": "image", + "image_path": "08bc6b25f957e23bb23c7289fe848fcfe54d1ff27c81e02320a9067f2e82e8aa.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 327, + 83, + 392, + 135 + ], + "blocks": [ + { + "bbox": [ + 327, + 83, + 392, + 135 + ], + "lines": [ + { + "bbox": [ + 327, + 83, + 392, + 135 + ], + "spans": [ + { + "bbox": [ + 327, + 83, + 392, + 135 + ], + "type": "image", + "image_path": "7b54af92e28e4b018ff7fd0a910a2c83b0aa1aa63bb9634874cc3e34a1196259.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 393, + 83, + 501, + 135 + ], + "blocks": [ + { + "bbox": [ + 393, + 83, + 501, + 135 + ], + "lines": [ + { + "bbox": [ + 393, + 83, + 501, + 135 + ], + "spans": [ + { + "bbox": [ + 393, + 83, + 501, + 135 + ], + "type": "image", + "image_path": "2b18f0b54e58bdbb50c1f701e27bce3cfbfb89e25b9addfa9341989094d23ab1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 114, + 159, + 190, + 209 + ], + "blocks": [ + { + "bbox": [ + 114, + 159, + 190, + 209 + ], + "lines": [ + { + "bbox": [ + 114, + 159, + 190, + 209 + ], + "spans": [ + { + "bbox": [ + 114, + 159, + 190, + 209 + ], + "type": "image", + "image_path": "536af967a77909eae3bf9ace5be851abc0cff870cd3527cf2c4253a3e2289f5e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 192, + 159, + 257, + 209 + ], + "blocks": [ + { + "bbox": [ + 192, + 159, + 257, + 209 + ], + "lines": [ + { + "bbox": [ + 192, + 159, + 257, + 209 + ], + "spans": [ + { + "bbox": [ + 192, + 159, + 257, + 209 + ], + "type": "image", + "image_path": "0cc522ef4cb964743c61a30daf501ba6b5148d2149b2839dd93fc7ddb89629f2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 258, + 159, + 324, + 209 + ], + "blocks": [ + { + "bbox": [ + 258, + 159, + 324, + 209 + ], + "lines": [ + { + "bbox": [ + 258, + 159, + 324, + 209 + ], + "spans": [ + { + "bbox": [ + 258, + 159, + 324, + 209 + ], + "type": "image", + "image_path": "2e1876c67cc627048c46904e159bf1f7220478afda0433f6a1e357849f6ebeb0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 327, + 159, + 394, + 209 + ], + "blocks": [ + { + "bbox": [ + 327, + 159, + 394, + 209 + ], + "lines": [ + { + "bbox": [ + 327, + 159, + 394, + 209 + ], + "spans": [ + { + "bbox": [ + 327, + 159, + 394, + 209 + ], + "type": "image", + "image_path": "e2eac6b2937052e31cf28f322688c4dca02183fb5ac7b7271aeb9a4b06321bb7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 395, + 159, + 501, + 209 + ], + "blocks": [ + { + "bbox": [ + 395, + 159, + 501, + 209 + ], + "lines": [ + { + "bbox": [ + 395, + 159, + 501, + 209 + ], + "spans": [ + { + "bbox": [ + 395, + 159, + 501, + 209 + ], + "type": "image", + "image_path": "a8c766d4827e2b8abc267f67b11b833d2e53501b4c4cb0974dc12cdd91d97cb1.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 112, + 233, + 181, + 285 + ], + "blocks": [ + { + "bbox": [ + 127, + 218, + 481, + 230 + ], + "lines": [ + { + "bbox": [ + 127, + 218, + 481, + 230 + ], + "spans": [ + { + "bbox": [ + 127, + 218, + 481, + 230 + ], + "type": "text", + "content": "(b) PAC local losses approximation for an error of at most " + }, + { + "bbox": [ + 127, + 218, + 481, + 230 + ], + "type": "inline_equation", + "content": "10^{-2}" + }, + { + "bbox": [ + 127, + 218, + 481, + 230 + ], + "type": "text", + "content": " and probability confidence 0.955" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 112, + 233, + 181, + 285 + ], + "lines": [ + { + "bbox": [ + 112, + 233, + 181, + 285 + ], + "spans": [ + { + "bbox": [ + 112, + 233, + 181, + 285 + ], + "type": "image", + "image_path": "df8a04275e496f0fbac36084da0e6177998fbf260116ab4544543c0c9aeea6d5.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 185, + 233, + 251, + 285 + ], + "blocks": [ + { + "bbox": [ + 185, + 233, + 251, + 285 + ], + "lines": [ + { + "bbox": [ + 185, + 233, + 251, + 285 + ], + "spans": [ + { + "bbox": [ + 185, + 233, + 251, + 285 + ], + "type": "image", + "image_path": "73c72890fd47c7601ae57c2406a94476c8475834315122aa719fdcd53a86acf3.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 293, + 504, + 304 + ], + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 304 + ], + "type": "text", + "content": "(c) Episode return obtained when executing the distilled policy in the original MDP (averaged over 30 episodes)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 255, + 233, + 321, + 285 + ], + "blocks": [ + { + "bbox": [ + 255, + 233, + 321, + 285 + ], + "lines": [ + { + "bbox": [ + 255, + 233, + 321, + 285 + ], + "spans": [ + { + "bbox": [ + 255, + 233, + 321, + 285 + ], + "type": "image", + "image_path": "424acf2d704a79f4faa63a8ad3b3f9a19381996e84a5600f23ce499f220ce9e1.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 312, + 504, + 336 + ], + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 336 + ], + "type": "text", + "content": "Figure 4: For each environment, we trained five different instances of the models with different random seeds: the solid line is the median and the shaded interval the interquartile range." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 325, + 233, + 392, + 285 + ], + "blocks": [ + { + "bbox": [ + 325, + 233, + 392, + 285 + ], + "lines": [ + { + "bbox": [ + 325, + 233, + 392, + 285 + ], + "spans": [ + { + "bbox": [ + 325, + 233, + 392, + 285 + ], + "type": "image", + "image_path": "1952d42234fa0463371b4de55b130d80d41790bdf5356204e2292de79b8ec308.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 395, + 233, + 501, + 284 + ], + "blocks": [ + { + "bbox": [ + 395, + 233, + 501, + 284 + ], + "lines": [ + { + "bbox": [ + 395, + 233, + 501, + 284 + ], + "spans": [ + { + "bbox": [ + 395, + 233, + 501, + 284 + ], + "type": "image", + "image_path": "1ed507649916381414d2551a56cfda6d2a52c21abecffdfc6917fc5cf6f73be0.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "content": "such distributions by introducing a masked autoregressive flow (MAF, Papamakarios et al. 2017) for relaxed Bernoullis via the recursion: " + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\bar{s}_i = \\sigma(l_i + \\alpha_i / \\lambda)" + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "inline_equation", + "content": "l_i \\sim \\mathrm{Logistic}(0,1)" + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\alpha_i = f_i(\\bar{s}_{1:i-1})" + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "content": " is a MADE (Germain et al., 2015), a feedforward network implementing the conditional output dependency on the inputs via a mask that only keeps the necessary connections to enforce the conditional property. We use this MAF to model " + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{P}}_\\theta" + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "content": " and the dynamics related to the labels in " + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\bar{\\xi}_{\\overline{\\pi}_\\theta}" + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "content": ". We fix the logits of the remaining " + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "inline_equation", + "content": "n - |\\mathbf{AP}|" + }, + { + "bbox": [ + 104, + 356, + 506, + 425 + ], + "type": "text", + "content": " bits to 0 to allow for a fairly distributed latent space." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 439, + 201, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 439, + 201, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 201, + 451 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 464, + 506, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 506, + 555 + ], + "type": "text", + "content": "We evaluate the quality of latent space models learned and policies distilled through " + }, + { + "bbox": [ + 104, + 464, + 506, + 555 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 464, + 506, + 555 + ], + "type": "text", + "content": "-MDPs. To do so, we first trained deep-RL policies (DQN, Mnih et al. 2015 on discrete, and SAC, Haarnoja et al. 2018 on continuous action spaces) for various OpenAI benchmarks (Brockman et al., 2016), which we then distill via our approach (Figure 4). We thus evaluate (a) the " + }, + { + "bbox": [ + 104, + 464, + 506, + 555 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 464, + 506, + 555 + ], + "type": "text", + "content": "-MDP training metrics, (b) the abstraction and representation quality via PAC local losses upper bounds (Delgrange et al., 2022), and (c) the distilled policy performance when deployed in the original environment. The confidence metrics and performance are compared with those of VAE-MDPs. Finally, we formally verify properties in the latent model. The exact setting to reproduce our results is in Appendix B." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 560, + 506, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 560, + 506, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 560, + 506, + 618 + ], + "type": "text", + "content": "Learning metrics. The objective (Fig. 4a) is a weighted sum of the reconstruction loss and the two Wasserstein regularizers. The choice of " + }, + { + "bbox": [ + 104, + 560, + 506, + 618 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 560, + 506, + 618 + ], + "type": "text", + "content": " defines the optimization direction. In contrast to VAEs (cf. Appendix C), WAEs indeed naturally avoid posterior collapse (Tolstikhin et al., 2018), indicating that the latent space is consistently distributed. Optimizing the objective (Fig. 4a) effectively allows minimizing the local losses (Fig. 4b) and recovering the performance of the original policy (Fig. 4c)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": "Local losses. For V- and WAEs, we formally evaluate PAC upper bounds on " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "L_{\\mathcal{R}}^{\\xi \\bar{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi \\bar{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": " via the algorithm of Delgrange et al. (2022) (Fig 4b). The lower the local losses, the closer " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": " are in terms of behaviors induced by " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": " (cf. Eq. 1). In VAEs, the losses are evaluated on a transition function " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": " obtained via frequency estimation of the latent transition dynamics (Delgrange et al., 2022), by reconstructing the transition model a posteriori and collecting data to estimate the transition probabilities (e.g., Bazille et al. 2020; Corneil et al. 2018). We thus also report the metrics for " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": ". Our bounds quickly converge to close values in general for " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{P}}_{\\theta}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": ", whereas for VAEs, the convergence is slow and unstable, with " + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 103, + 626, + 506, + 733 + ], + "type": "text", + "content": " offering better bounds. We emphasize that WAEs do not require this additional reconstruction step to obtain losses that can be leveraged to assess the" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 124, + 101, + 485, + 165 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 504, + 92 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 504, + 92 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 504, + 92 + ], + "type": "text", + "content": "Table 1: Formal Verification of distilled policies. Values are computed for " + }, + { + "bbox": [ + 104, + 79, + 504, + 92 + ], + "type": "inline_equation", + "content": "\\gamma = {0.99}" + }, + { + "bbox": [ + 104, + 79, + 504, + 92 + ], + "type": "text", + "content": " (lower is better)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 124, + 101, + 485, + 165 + ], + "lines": [ + { + "bbox": [ + 124, + 101, + 485, + 165 + ], + "spans": [ + { + "bbox": [ + 124, + 101, + 485, + 165 + ], + "type": "table", + "html": "
Environmentstep (105)SA|S||A|LξπθR(PAC)LξπθP(PAC)||Vπθ||V̅πθ(¯sI)
CartPole1.2⊆ R4{1,2}51220.004996530.3996363.712130.0316655
MountainCar2.32⊆ R2{1,2}102420.01417630.3823232.837140
Acrobot4.3⊆ R6{1,2,3}819230.03476980.6494782.220060.0021911
LunarLander3.2⊆ R8[-1,1]21638430.02072050.1313570.03728830.0702039
Pendulum3.7⊆ R3[-2,2]819230.02667450.5395084.330060.0348492
", + "image_path": "37042cca8921ef3c7dfcc02425615c4885de29a516230f68998f39a729e87a6e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 217 + ], + "type": "text", + "content": "quality of the model, in contrast to VAEs, where learning " + }, + { + "bbox": [ + 104, + 182, + 506, + 217 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{P}}_{\\theta}" + }, + { + "bbox": [ + 104, + 182, + 506, + 217 + ], + "type": "text", + "content": " was performed via overly restrictive distributions, leading to poor estimation in general (cf. Ex. 1). Finally, when the distilled policies offer comparable performance (Fig. 4c), our bounds are either close to or better than those of VAEs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": "Distillation. The bisimulation guarantees (Eq. 1) are only valid for " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": ", the policy under which formal properties can be verified. It is crucial that " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": " achieves performance close to " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": ", the original one, when deployed in the RL environment. We evaluate the performance of " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": " via the undiscounted episode return " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{\\bar{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": " obtained by running " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": " in the original model " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": ". We observe that " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{\\bar{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": " approaches faster the original performance " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{\\pi}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": " for W- than VAEs: WAEs converge in a few steps for all environments, whereas the full learning budget is sometimes necessary with VAEs. The success in recovering the original performance emphasizes the representation quality guarantees (Eq. 1) induced by WAEs: when local losses are minimized, all original states that are embedded to the same representation are bisimilarly close. Distilling the policy over the new representation, albeit discrete and hence coarser, still achieves effective performance since " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": " keeps only what is important to preserve behaviors, and thus values. Furthermore, the distillation can remove some non-robustness obtained during RL: " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": " prescribes the same actions for bisimilarly close states, whereas this is not necessarily the case for " + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 224, + 506, + 357 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": "Formal verification. To formally verify " + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": ", we implemented a value iteration (VI) engine, handling the neural network encoding of the latent space for discounted properties, which is one of the most popular algorithms for checking property probabilities in MDPs (e.g., Baier & Katoen 2008; Hensel et al. 2021; Kwiatkowska et al. 2022). We verify time-to-failure properties " + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\varphi" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": ", often used to check the failure rate of a system (Pnueli, 1977) by measuring whether the agent fails before the end of the episode. Although simple, such properties highlight the applicability of our approach on reachability events, which are building blocks to verify MDPs (Baier & Katoen 2008; cf. Appendix B.7). In particular, we checked whether the agent reaches an unsafe position or angle (CartPole, LunarLander), does not reach its goal position (MountainCar, Acrobot), and does not reach and stay in a safe region of the system (Pendulum). Results are in Table 1: for each environment, we select the distilled policy which gives the best trade-off between performance (episode return) and abstraction quality (local losses). As extra confidence metric, we report the value difference " + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\| V_{\\overline{\\pi}_{\\theta}} \\| = |V_{\\overline{\\pi}_{\\theta}}(s_I) - \\bar{V}_{\\overline{\\pi}_{\\theta}}(\\bar{s}_I)|" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": " obtained by executing " + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\overline{\\pi}_{\\theta}" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "inline_equation", + "content": "V_{\\overline{\\pi}_{\\theta}}(\\cdot)" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": " is averaged while " + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\bar{V}_{\\overline{\\pi}_{\\theta}}(\\cdot)" + }, + { + "bbox": [ + 104, + 364, + 506, + 513 + ], + "type": "text", + "content": " is formally computed)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 525, + 196, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 196, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 196, + 538 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 550, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 506, + 639 + ], + "type": "text", + "content": "We presented WAE-MDPs, a framework for learning formally verifiable distillations of RL policies with bisimulation guarantees. The latter, along with the learned abstraction of the unknown continuous environment to a discrete model, enables the verification. Our method overcomes the limitations of VAE-MDPs and our results show that it outperforms the latter in terms of learning speed, model quality, and performance, in addition to being supported by stronger learning guarantees. As mentioned by Delgrange et al. (2022), distillation failure reveals the lack of robustness of original RL policies. In particular, we found that distilling highly noise-sensitive RL policies (such as robotics simulations, e.g., Todorov et al. 2012) is laborious, even though the result remains formally verifiable." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "content": "We demonstrated the feasibility of our approach through the verification of reachability objectives, which are building blocks for stochastic model-checking (Baier & Katoen, 2008). Besides the scope of this work, the verification of general discounted " + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "content": "-regular properties is theoretically allowed in our model via the reachability to components of standard constructions based on automata products (e.g., Baier et al. 2016; Sickert et al. 2016), and discounted games algorithms (Chatterjee et al., 2010). Beyond distillation, our results, supported by Thm. 3.3, suggest that our WAE-MDP can be used as a general latent space learner for RL, further opening possibilities to combine RL and formal methods online when no formal model is a priori known, and address this way safety in RL with guarantees." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 241, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 241, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 241, + 94 + ], + "type": "text", + "content": "REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 101, + 506, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 101, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 101, + 506, + 190 + ], + "type": "text", + "content": "We referenced in the main text the Appendix parts presenting the proofs or additional details of every claim, Assumption, Lemma, and Theorem occurring in the paper. In addition, Appendix B is dedicated to the presentation of the setup, hyperparameters, and other extra details required for reproducing the results of Section 4. We provide the source code of the implementation of our approach in Supplementary material " + }, + { + "bbox": [ + 104, + 101, + 506, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 104, + 101, + 506, + 190 + ], + "type": "text", + "content": ", and we also provide the models saved during training that we used for model checking (i.e., reproducing the results of Table 1). Additionally, we present in a notebook (evaluation.html) videos demonstrating how our distilled policies behave in each environment, and code snippets showing how we formally verified the policies." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 201, + 201, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 201, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 201, + 213 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 220, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 266 + ], + "type": "text", + "content": "This research received funding from the Flemish Government (AI Research Program) and was supported by the DESCARTES iBOF project. G.A. Perez is also supported by the Belgian FWO \"SAILor\" project (G030020N). We thank Raphael Avalos for his valuable feedback during the preparation of this manuscript." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 281, + 176, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 281, + 176, + 293 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 176, + 293 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 299, + 507, + 715 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 106, + 299, + 506, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 299, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 106, + 299, + 506, + 344 + ], + "type": "text", + "content": "Parand Alizadeh Alamdari, Guy Avni, Thomas A. Henzinger, and Anna Lukina. Formal methods with a touch of magic. In 2020 Formal Methods in Computer Aided Design, FMCAD 2020, Haifa, Israel, September 21-24, 2020, pp. 138-147. IEEE, 2020. doi: 10.34727/2020/isbn.978-3-85448-042-6_21. URL https://doi.org/10.34727/2020/isbn.978-3-85448-042-6_21." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 350, + 506, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 350, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 106, + 350, + 506, + 406 + ], + "type": "text", + "content": "Alexander A. Alemi, Ben Poole, Ian Fischer, Joshua V. Dillon, Rif A. Saurous, and Kevin Murphy. Fixing a broken ELBO. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 159-168. PMLR, 2018. URL http://proceedings.mlr.press/v80/alemi18a.html." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 411, + 507, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 411, + 507, + 490 + ], + "spans": [ + { + "bbox": [ + 106, + 411, + 507, + 490 + ], + "type": "text", + "content": "Mohammed Alshiekh, Roderick Bloem, Rüdiger Ehlers, Bettina Könighofer, Scott Niekum, and Ufuk Topcu. Safe reinforcement learning via shielding. In Sheila A. McIlraith and Kilian Q. Weinberger (eds.), Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pp. 2669-2678. AAAI Press, 2018. URL https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/view/17211." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 495, + 507, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 495, + 507, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 507, + 552 + ], + "type": "text", + "content": "Martín Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 214-223. PMLR, 2017. URL http://proceedings.mlr.press/v70/arjovsky17a.html." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 557, + 507, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 557, + 507, + 613 + ], + "spans": [ + { + "bbox": [ + 106, + 557, + 507, + 613 + ], + "type": "text", + "content": "Edoardo Bacci and David Parker. Probabilistic guarantees for safe deep reinforcement learning. In Nathalie Bertrand and Nils Jansen (eds.), Formal Modeling and Analysis of Timed Systems - 18th International Conference, FORMATS 2020, Vienna, Austria, September 1-3, 2020, Proceedings, volume 12288 of LNCS, pp. 231-248. Springer, 2020. doi: 10.1007/978-3-030-57628-8_14. URL https://doi.org/10.1007/978-3-030-57628-8_14." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 619, + 504, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 619, + 504, + 642 + ], + "spans": [ + { + "bbox": [ + 106, + 619, + 504, + 642 + ], + "type": "text", + "content": "Christel Baier and Joost-Pieter Katoen. Principles of model checking. MIT Press, 2008. ISBN 978-0-262-02649-9." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 647, + 507, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 647, + 507, + 715 + ], + "spans": [ + { + "bbox": [ + 106, + 647, + 507, + 715 + ], + "type": "text", + "content": "Christel Baier, Stefan Kiefer, Joachim Klein, Sascha Klüppelholz, David Müller, and James Worrell. Markov chains and unambiguous büchi automata. In Swarat Chaudhuri and Azadeh Farzan (eds.), Computer Aided Verification - 28th International Conference, CAV 2016, Toronto, ON, Canada, July 17-23, 2016, Proceedings, Part I, volume 9779 of Lecture Notes in Computer Science, pp. 23-42. Springer, 2016. doi: 10.1007/978-3-319-41528-4_2. URL https://doi.org/10.1007/978-3-319-41528-4_2." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 720, + 399, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 720, + 399, + 732 + ], + "spans": [ + { + "bbox": [ + 118, + 720, + 399, + 732 + ], + "type": "text", + "content": "available at https://github.com/florentdelgrange/wae_mdp" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 149 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 149 + ], + "type": "text", + "content": "Hugo Bazille, Blaise Genest, Cyrille Jégourel, and Jun Sun. Global PAC bounds for learning discrete time markov chains. In Shuvendu K. Lahiri and Chao Wang (eds.), Computer Aided Verification - 32nd International Conference, CAV 2020, Los Angeles, CA, USA, July 21-24, 2020, Proceedings, Part II, volume 12225 of Lecture Notes in Computer Science, pp. 304-326. Springer, 2020. doi: 10.1007/978-3-030-53291-8\\_17. URL https://doi.org/10.1007/978-3-030-53291-8_17." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 156, + 505, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 179 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 179 + ], + "type": "text", + "content": "O. Bousquet, S. Gelly, I. Tolstikhin, Carl-Johann Simon-Gabriel, and B. Schölkopf. From optimal transport to generative modeling: the vegan cookbook. arXiv: Machine Learning, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 186, + 505, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 505, + 220 + ], + "type": "text", + "content": "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym. CoRR, abs/1606.01540, 2016. URL http://arxiv.org/abs/1606.01540." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 227, + 505, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 227, + 505, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 505, + 274 + ], + "type": "text", + "content": "Steven Carr, Nils Jansen, and Ufuk Topcu. Verifiable rnn-based policies for pomdps under temporal logic constraints. In Christian Bessiere (ed.), Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI 2020, pp. 4121-4127. ijcai.org, 2020. doi: 10.24963/ijcai.2020/570. URL https://doi.org/10.24963/ijcai.2020/570." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 279, + 505, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 279, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 505, + 357 + ], + "type": "text", + "content": "Pablo Samuel Castro, Tyler Kastner, Prakash Panangaden, and Mark Rowland. Mico: Improved representations via sampling-based state similarity for markov decision processes. In Marc'Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan (eds.), Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pp. 30113-30126, 2021. URL https://proceedings.neurips.cc/paper/2021/hash/ fd06b8ea02fe5b1c2496fe1700e9d16c-AAbstract.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 364, + 505, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 505, + 420 + ], + "type": "text", + "content": "Glenn Ceusters, Roman Cantú Rodríguez, Alberte Bouso García, Rüdiger Franke, Geert Deconinck, Lieve Helsen, Ann Nowé, Maarten Messagie, and Luis Ramirez Camargo. Model-predictive control and reinforcement learning in multi-energy system case studies. Applied Energy, 303:117634, 2021. ISSN 0306-2619. doi: https://doi.org/10.1016/j.apenergy.2021.117634. URL https://www.sciencedirect.com/science/article/pii/S0306261921010011." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 426, + 507, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 507, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 507, + 460 + ], + "type": "text", + "content": "Krishnendu Chatterjee, Luca de Alfaro, Rupak Majumdar, and Vishwanath Raman. Algorithms for game metrics (full version). Log. Methods Comput. Sci., 6(3), 2010. URL http://arxiv.org/abs/0809.4326." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 468, + 507, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 507, + 535 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 507, + 535 + ], + "type": "text", + "content": "Dane S. Corneil, Wulfram Gerstner, and Johanni Brea. Efficient modelbased deep reinforcement learning with variational state tabulation. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 1057-1066. PMLR, 2018. URL http://proceedings.mlr.press/v80/corneil18a.html." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 542, + 507, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 542, + 507, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 507, + 587 + ], + "type": "text", + "content": "Florent Delgrange, Ann Nowé, and Guillermo A. Pérez. Distillation of rl policies with formal guarantees via variational abstraction of markov decision processes. Proceedings of the AAAI Conference on Artificial Intelligence, 36(6):6497-6505, Jun. 2022. doi: 10.1609/aaai.v36i6.20602. URL https://ojs.aaaai.org/index.php/AAAI/article/view/20602." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 594, + 507, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 507, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 507, + 628 + ], + "type": "text", + "content": "Josée Desharnais, Vineet Gupta, Radha Jagadeesan, and Prakash Panangaden. Metrics for labelled markov processes. Theor. Comput. Sci., 318(3):323-354, 2004. doi: 10.1016/j.tcs.2003.09.013. URL https://doi.org/10.1016/j.tcs.2003.09.013." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 635, + 507, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 507, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 507, + 681 + ], + "type": "text", + "content": "Jiri Fajtl, Vasileios Argyriou, Dorothy Monekosso, and Paolo Remagnino. Latent bernoulli autoencoder. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 2964-2974. PMLR, 2020. URL http://proceedings.mlr.press/v119/fajtl20a.html." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "type": "text", + "content": "Norm Ferns, Doina Precup, and Sophia Knight. Bisimulation for markov decision processes through families of functional expressions. In Franck van Breugel, Elham Kashefi, Catuscia Palamidessi, and Jan Rutten (eds.), Horizons of the Mind. A Tribute to Prakash Panangaden - Essays Dedicated to Prakash Panangaden on the Occasion of His 60th Birthday, volume 8464 of LNCS, pp. 319-342." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 115, + 81, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 81, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 115, + 81, + 505, + 105 + ], + "type": "text", + "content": "Springer, 2014. doi: 10.1007/978-3-319-06880-0_17. URL https://doi.org/10.1007/978-3-319-06880-0_17." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 110, + 506, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 110, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 107, + 110, + 506, + 177 + ], + "type": "text", + "content": "Carles Gelada, Saurabh Kumar, Jacob Buckman, Ofir Nachum, and Marc G. Bellemare. Deepmdp: Learning continuous latent space models for representation learning. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 2170-2179. PMLR, 2019. URL http://proceedings.mlr.press/v97/gelada19a.html." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 182, + 506, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 182, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 506, + 239 + ], + "type": "text", + "content": "Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE: masked autoencoder for distribution estimation. In Francis R. Bach and David M. Blei (eds.), Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pp. 881-889. JMLR.org, 2015. URL http://proceedings.mlr.press/v37/germain15.html." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 244, + 504, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 244, + 504, + 280 + ], + "spans": [ + { + "bbox": [ + 106, + 244, + 504, + 280 + ], + "type": "text", + "content": "Robert Givan, Thomas L. Dean, and Matthew Greig. Equivalence notions and model minimization in markov decision processes. Artif. Intell., 147(1-2):163-223, 2003. doi: 10.1016/S0004-3702(02) 00376-4. URL https://doi.org/10.1016/S0004-3702(02)00376-4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 285, + 506, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 285, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 285, + 506, + 361 + ], + "type": "text", + "content": "Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C. Courville. Improved training of wasserstein gans. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 5767-5777, 2017. URL https://proceedings.neurips.cc/paper/2017/bit/892c3b1c6dcbd52936e27cbd0ff683d6-Abstract.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 368, + 506, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 368, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 106, + 368, + 506, + 435 + ], + "type": "text", + "content": "Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 1856-1865. PMLR, 2018. URL http://proceedings.mlr.press/v80/haarnoja18b.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 440, + 506, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 440, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 106, + 440, + 506, + 518 + ], + "type": "text", + "content": "Mohammadhosein Hasanbeig, Natasha Yogananda Jeppu, Alessandro Abate, Tom Melham, and Daniel Kroening. Deepsynth: Automata synthesis for automatic task segmentation in deep reinforcement learning. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI 2021, Thirty-Third Conference on Innovative Applications of Artificial Intelligence, IAAI 2021, The Eleventh Symposium on Educational Advances in Artificial Intelligence, EAAI 2021, Virtual Event, February 2-9, 2021, pp. 7647-7656. AAAI Press, 2021. URL https://ojs.aaai.org/index.php/AAAI/article/view/16935." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 524, + 506, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 524, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 106, + 524, + 506, + 568 + ], + "type": "text", + "content": "Christian Hensel, Sebastian Junges, Joost-Pieter Katoen, Tim Quatmann, and Matthias Volk. The probabilistic model checker storm. International Journal on Software Tools for Technology Transfer, 2021. ISSN 1433-2787. doi: 10.1007/s10009-021-00633-z. URL https://doi.org/10.1007/s10009-021-00633-z." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 574, + 506, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 574, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 106, + 574, + 506, + 608 + ], + "type": "text", + "content": "Matthew D. Hoffman, David M. Blei, Chong Wang, and John W. Paisley. Stochastic variational inference. J. Mach. Learn. Res., 14(1):1303-1347, 2013. URL http://dl.acm.org/citation.cfm?id=2502622." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 614, + 506, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 614, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 106, + 614, + 506, + 681 + ], + "type": "text", + "content": "Bojun Huang. Steady state analysis of episodic reinforcement learning. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/69bfa2aa2b7b139ff581a806abf0a886-Abstract.html." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 687, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 687, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 506, + 731 + ], + "type": "text", + "content": "Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. URL https://openreview.net/forum?id=rkE3y85ee." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 160 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 160 + ], + "type": "text", + "content": "Nils Jansen, Bettina Konighofer, Sebastian Junges, Alex Serban, and Roderick Bloem. Safe Reinforcement Learning Using Probabilistic Shields (Invited Paper). In Igor Konnov and Laura Kovács (eds.), 31st International Conference on Concurrency Theory (CONCUR 2020), volume 171 of Leibniz International Proceedings in Informatics (LIPics), pp. 3:1-3:16, Dagstuhl, Germany, 2020. Schloss Dagstuhl-Leibniz-Zentrum für Informatik. ISBN 978-3-95977-160-3. doi: 10.4230/LIPics.CONCUR.2020.3. URL https://drops.dagstuhl.de/opus/volltexte/2020/12815." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 166, + 507, + 234 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 166, + 507, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 166, + 507, + 234 + ], + "type": "text", + "content": "Sebastian Junges, Nils Jansen, Christian Dehnert, Ufuk Topcu, and Joost-Pieter Katoen. Safety-constrained reinforcement learning for mdps. In Marsha Chechik and Jean-François Raskin (eds.), Tools and Algorithms for the Construction and Analysis of Systems - 22nd International Conference, TACAS 2016, Eindhoven, The Netherlands, April 2-8, 2016, Proceedings, volume 9636 of LNCS, pp. 130-146. Springer, 2016. doi: 10.1007/978-3-662-49674-9_8. URL https://doi.org/10.1007/978-3-662-49674-9_8." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 239, + 507, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 507, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 507, + 285 + ], + "type": "text", + "content": "Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. In Yoshua Bengio and Yann LeCun (eds.), 2nd International Conference on Learning Representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference Track Proceedings, 2014. URL http://arxiv.org/abs/1312.6114." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 292, + 507, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 292, + 507, + 337 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 507, + 337 + ], + "type": "text", + "content": "Marta Kwiatkowska, Gethin Norman, and David Parker. Probabilistic model checking and autonomy. Annual Review of Control, Robotics, and Autonomous Systems, 5(1):385-410, 2022. doi: 10.1146/annurev-control-042820-010947. URL https://doi.org/10.1146/annurev-control-042820-010947." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 343, + 507, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 343, + 507, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 507, + 388 + ], + "type": "text", + "content": "Kim Guldstrand Larsen and Arne Skou. Bisimulation through probabilistic testing. In Conference Record of the Sixteenth Annual ACM Symposium on Principles of Programming Languages, Austin, Texas, USA, January 11-13, 1989, pp. 344-352. ACM Press, 1989. doi: 10.1145/75277.75307. URL https://doi.org/10.1145/75277.75307." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 395, + 507, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 395, + 507, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 395, + 507, + 484 + ], + "type": "text", + "content": "Pieter J. K. Libin, Arno Moonens, Timothy Verstraeten, Fabian Perez-Sanjines, Niel Hens, Philippe Lemey, and Ann Nowé. Deep reinforcement learning for large-scale epidemic control. In Yuxiao Dong, Georgiana Ifrim, Dunja Mladenic, Craig Saunders, and Sofie Van Hoecke (eds.), Machine Learning and Knowledge Discovery in Databases. Applied Data Science and Demo Track - European Conference, ECML PKDD 2020, Ghent, Belgium, September 14-18, 2020, Proceedings, Part V, volume 12461 of Lecture Notes in Computer Science, pp. 155-170. Springer, 2020. doi: 10.1007/978-3-030-67670-4_10. URL https://doi.org/10.1007/978-3-030-67670-4_10." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 491, + 507, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 507, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 507, + 525 + ], + "type": "text", + "content": "Michael L. Littman, Ufuk Topcu, Jie Fu, Charles Lee Isbell Jr., Min Wen, and James MacGlashan. Environment-independent task specifications via GLTL. CoRR, abs/1704.04341, 2017. URL http://arxiv.org/abs/1704.04341." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 532, + 507, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 532, + 507, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 507, + 578 + ], + "type": "text", + "content": "Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The concrete distribution: A continuous relaxation of discrete random variables. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net, 2017. URL https://openreview.net/forum?id=S1jE5L5gl." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 583, + 507, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 583, + 507, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 507, + 651 + ], + "type": "text", + "content": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A. Rusu, Joel Veness, Marc G. Bellemare, Alex Graves, Martin A. Riedmiller, Andreas Fidjeland, Georg Ostrovski, Stig Petersen, Charles Beattie, Amir Sadik, Ioannis Antonoglou, Helen King, Dharshan Kumaran, Daan Wierstra, Shane Legg, and Demis Hassabis. Human-level control through deep reinforcement learning. Nat., 518(7540):529-533, 2015. doi: 10.1038/nature14236. URL https://doi.org/10.1038/nature14236." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 657, + 507, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 507, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 507, + 680 + ], + "type": "text", + "content": "Ann Nowe. Synthesis of \"safe\" fuzzy controllers based on reinforcement learning. PhD thesis, Vrije Universiteit Brussel, 1994." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "type": "text", + "content": "George Papamakarios, Iain Murray, and Theo Pavlakou. Masked autoregressive flow for density estimation. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017," + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 115, + 82, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 505, + 106 + ], + "type": "text", + "content": "Long Beach, CA, USA, pp. 2338-2347, 2017. URL https://proceedings.neurips.cc/paper/2017/hash/6c1da886822c67822bcf3679d04369fa-Abstract.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 110, + 507, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 110, + 507, + 155 + ], + "spans": [ + { + "bbox": [ + 107, + 110, + 507, + 155 + ], + "type": "text", + "content": "Amir Pnueli. The temporal logic of programs. In 18th Annual Symposium on Foundations of Computer Science, Providence, Rhode Island, USA, 31 October - 1 November 1977, pp. 46-57. IEEE Computer Society, 1977. doi: 10.1109/SFCS.1977.32. URL https://doi.org/10.1109/SFCS.1977.32." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 161, + 506, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 161, + 506, + 195 + ], + "spans": [ + { + "bbox": [ + 106, + 161, + 506, + 195 + ], + "type": "text", + "content": "Martin L. Puterman. Markov Decision Processes: Discrete Stochastic Dynamic Programming. Wiley Series in Probability and Statistics. Wiley, 1994. ISBN 978-0-47161977-2. doi: 10.1002/9780470316887. URL https://doi.org/10.1002/9780470316887." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 201, + 506, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 201, + 506, + 257 + ], + "spans": [ + { + "bbox": [ + 106, + 201, + 506, + 257 + ], + "type": "text", + "content": "Tao Ren, Jianwei Niu, Jiahe Cui, Zhenchao Ouyang, and Xuefeng Liu. An application of multi-objective reinforcement learning for efficient model-free control of canals deployed with iot networks. Journal of Network and Computer Applications, 182:103049, 2021. ISSN 1084-8045. doi: https://doi.org/10.1016/j.jnca.2021.103049. URL https://www.sciencedirect.com/science/article/pii/S1084804521000734." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 262, + 506, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 262, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 106, + 262, + 506, + 328 + ], + "type": "text", + "content": "Salomon Sickert, Javier Esparza, Stefan Jaax, and Jan Kretínský. Limit-deterministic büchi automata for linear temporal logic. In Swarat Chaudhuri and Azadeh Farzan (eds.), Computer Aided Verification - 28th International Conference, CAV 2016, Toronto, ON, Canada, July 17-23, 2016, Proceedings, Part II, volume 9780 of Lecture Notes in Computer Science, pp. 312-332. Springer, 2016. doi: 10.1007/978-3-319-41540-6\\_17. URL https://doi.org/10.1007/978-3-319-41540-6_17." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 334, + 506, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 334, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 106, + 334, + 506, + 390 + ], + "type": "text", + "content": "Thiago D. Simão, Nils Jansen, and Matthijs T. J. Span. Always safe: Reinforcement learning without safety constraint violations during training. In Frank Dignum, Alessio Lomuscio, Ulle Endriss, and Ann Nowé (eds.), AAMAS '21: 20th International Conference on Autonomous Agents and Multiagent Systems, Virtual Event, United Kingdom, May 3-7, 2021, pp. 1226-1235. ACM, 2021. URL https://dl.acm.org/doi/10.5555/3463952.3464094." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 396, + 506, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 396, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 106, + 396, + 506, + 430 + ], + "type": "text", + "content": "Emanuel Todorov, Tom Erez, and Yuval Tassa. Mujoco: A physics engine for model-based control. In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 5026-5033. IEEE, 2012." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 435, + 506, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 435, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 106, + 435, + 506, + 480 + ], + "type": "text", + "content": "Ilya O. Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Scholkopf. Wasserstein autoencoders. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=HkL7n1-0b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 486, + 506, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 486, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 506, + 509 + ], + "type": "text", + "content": "John N. Tsitsiklis. Asynchronous stochastic approximation and q-learning. Mach. Learn., 16(3):185-202, 1994. doi: 10.1007/BF00993306. URL https://doi.org/10.1007/BF00993306." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 514, + 506, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 514, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 106, + 514, + 506, + 581 + ], + "type": "text", + "content": "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, 4-9 December 2017, Long Beach, CA, USA, pp. 6306-6315, 2017. URL http://papers.nips.cc/paper/7210-neural-discrete-representation-learning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 586, + 506, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 586, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 106, + 586, + 506, + 620 + ], + "type": "text", + "content": "Cédric Villani. Optimal Transport: Old and New. Springer Berlin Heidelberg, Berlin, Heidelberg, 2009. ISBN 978-3-540-71050-9. doi: 10.1007/978-3-540-71050-9_6. URL https://doi.org/10.1007/978-3-540-71050-9_6." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 626, + 506, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 626, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 106, + 626, + 506, + 681 + ], + "type": "text", + "content": "Andrew M. Wells, Morteza Lahijanian, Lydia E. Kavraki, and Moshe Y. Vardi. Ltlf synthesis on probabilistic systems. In Jean-François Raskin and Davide Bresolin (eds.), Proceedings 11th International Symposium on Games, Automata, Logics, and Formal Verification, GandALF 2020, Brussels, Belgium, September 21-22, 2020, volume 326 of EPTCS, pp. 166-181, 2020. doi: 10.4204/EPTCS.326.11. URL https://doi.org/10.4204/EPTCS.326.11." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 687, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 687, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 506, + 732 + ], + "type": "text", + "content": "Hongyu Zang, Xin Li, and Mingzhong Wang. Simsr: Simple distance-based state representations for deep reinforcement learning. Proceedings of the AAAI Conference on Artificial Intelligence, 36 (8):8997-9005, Jun. 2022. doi: 10.1609/aaai.v36i8.20883. URL https://ojs.aaaai.org/index.php/AAAI/article/view/20883." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 507, + 167 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 106, + 81, + 507, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 507, + 128 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 507, + 128 + ], + "type": "text", + "content": "Amy Zhang, Rowan Thomas McAllister, Roberto Calandra, Yarin Gal, and Sergey Levine. Learning invariant representations for reinforcement learning without reconstruction. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=-2FCwDKRREu." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 133, + 507, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 133, + 507, + 167 + ], + "spans": [ + { + "bbox": [ + 106, + 133, + 507, + 167 + ], + "type": "text", + "content": "Shunkang Zhang, Yuan Gao, Yuling Jiao, Jin Liu, Yang Wang, and Can Yang. Wasserstein-wasserstein auto-encoders. CoRR, abs/1902.09323, 2019. URL http://arxiv.org/abs/1902.09323." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 163, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 163, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 163, + 94 + ], + "type": "text", + "content": "APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 338, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 338, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 338, + 120 + ], + "type": "text", + "content": "A THEORETICAL DETAILS ON WAE-MDPS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 132, + 263, + 143 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 132, + 263, + 143 + ], + "spans": [ + { + "bbox": [ + 105, + 132, + 263, + 143 + ], + "type": "text", + "content": "A.1 THE DISCREPANCY MEASURE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": "We show that reasoning about discrepancy measures between stationary distributions is sound in the context of infinite interaction and episodic RL processes. Let " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": " be a parameterized behavioral model that generate finite traces from the original environment (i.e., finite sequences of state, actions, and rewards of the form " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\langle s_{0:T},a_{0:T - 1},r_{0:T - 1}\\rangle" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": "), our goal is to find the best parameter " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": " which offers the most accurate reconstruction of the original traces issued from the original model " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": " operating under " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": ". We demonstrate that, in the limit, considering the OT between trace-based distributions is equivalent to considering the OT between the stationary distribution of " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": " and the one of the behavioral model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 246, + 444, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 246, + 444, + 257 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 444, + 257 + ], + "type": "text", + "content": "Let us first formally recall the definition of the metric on the transitions of the MDP." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": "Raw transition distance. Assume that " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\operatorname{Im}(\\mathcal{R})" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": " are respectively equipped with metric " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "d_S" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{R}}" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": ", let us define the raw transition distance metric over transitions of " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": ", i.e., tuples of the form " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\langle s, a, r, s' \\rangle" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": ", as " + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\vec{d} \\colon S \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times S" + }, + { + "bbox": [ + 104, + 265, + 506, + 301 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 119, + 304, + 489, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 304, + 489, + 321 + ], + "spans": [ + { + "bbox": [ + 119, + 304, + 489, + 321 + ], + "type": "interline_equation", + "content": "\\vec {d} \\big (\\left\\langle s _ {1}, a _ {1}, r _ {1}, s _ {1} ^ {\\prime} \\right\\rangle , \\left\\langle s _ {2}, a _ {2}, r _ {2}, s _ {2} ^ {\\prime} \\right\\rangle \\big) = d _ {\\mathcal {S}} (s _ {1}, s _ {2}) + d _ {\\mathcal {A}} (a _ {1}, a _ {2}) + d _ {\\mathcal {R}} (r _ {1}, r _ {2}) + d _ {\\mathcal {S}} \\big (s _ {1} ^ {\\prime}, s _ {2} ^ {\\prime} \\big).", + "image_path": "55daea030ae39dd0afa7059f55acf8eab31a5fca3624c06ef909cc38eacdee2c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 324, + 506, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 324, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 324, + 506, + 360 + ], + "type": "text", + "content": "In a nutshell, " + }, + { + "bbox": [ + 104, + 324, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\vec{d}" + }, + { + "bbox": [ + 104, + 324, + 506, + 360 + ], + "type": "text", + "content": " consists of the sum of the distance of all the transition components. Note that it is a well defined distance metric since the sum of distances preserves the identity of indiscernible, symmetry, and triangle inequality." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 368, + 505, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 368, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 505, + 402 + ], + "type": "text", + "content": "Trace-based distributions. The raw distance " + }, + { + "bbox": [ + 104, + 368, + 505, + 402 + ], + "type": "inline_equation", + "content": "\\vec{d}" + }, + { + "bbox": [ + 104, + 368, + 505, + 402 + ], + "type": "text", + "content": " allows to reason about transitions, we thus consider the distribution over transitions which occur along traces of length " + }, + { + "bbox": [ + 104, + 368, + 505, + 402 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 368, + 505, + 402 + ], + "type": "text", + "content": " to compare the dynamics of the original and behavioral models:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 144, + 406, + 465, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 406, + 465, + 437 + ], + "spans": [ + { + "bbox": [ + 144, + 406, + 465, + 437 + ], + "type": "interline_equation", + "content": "\\mathcal {D} _ {\\pi} [ T ] (s, a, r, s ^ {\\prime}) = \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\xi_ {\\pi} ^ {t} (s | s _ {I}) \\cdot \\pi (a | s) \\cdot \\mathbf {P} (s ^ {\\prime} | s, a) \\cdot \\mathbf {1} _ {r = \\mathcal {R} (s, a)}, \\text {a n d}", + "image_path": "2f768013cfaaec0dbc7f66c6cd49563cf389571f211be09da8e9af3baefc560a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 150, + 440, + 460, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 440, + 460, + 471 + ], + "spans": [ + { + "bbox": [ + 150, + 440, + 460, + 471 + ], + "type": "interline_equation", + "content": "\\mathcal {P} _ {\\theta} [ T ] \\big (s, a, r, s ^ {\\prime} \\big) = \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\underset {s _ {0: t}, a _ {0: t - 1}, r _ {0: t - 1} \\sim P _ {\\theta} [ t ]} {\\mathbb {E}} \\mathbf {1} _ {\\langle s _ {t - 1}, a _ {t - 1} r _ {t - 1}, s _ {t} \\rangle = \\langle s, a, r, s ^ {\\prime} \\rangle},", + "image_path": "9f588f173937068eedae6b7e2354c694a6dddbadcb5e5cdab2b9b30ee55db9b5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "P_{\\theta}[T]" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": " denotes the distribution over traces of length " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": ", generated from " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": ". Intuitively, " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "\\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(s \\mid s_{I})" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": " can be seen as the fraction of the time spent in " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": " along traces of length " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": ", starting from the initial state Kulkarni (1995). Therefore, drawing " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "\\langle s, a, r, s' \\rangle \\sim \\mathcal{D}_{\\pi}[T]" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": " trivially follows: it is equivalent to drawing " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "\\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(\\cdot \\mid s_{I})" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": ", then respectively " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "\\pi(\\cdot \\mid s)" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "\\mathbf{P}(\\cdot \\mid s, a)" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": ", to finally obtain " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "r = \\mathcal{R}(s, a)" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "T \\in \\mathbb{N}" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": ", our objective is to minimize the Wasserstein distance between those distributions: " + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "inline_equation", + "content": "W_{\\vec{d}}(\\mathcal{D}_{\\pi}[T], \\mathcal{P}_{\\theta}[T])" + }, + { + "bbox": [ + 104, + 474, + 504, + 568 + ], + "type": "text", + "content": ". The following Lemma enables optimizing the Wasserstein distance between the original MDP and the behavioral model when traces are drawn from episodic RL processes or infinite interactions (Huang, 2020)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 570, + 492, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 570, + 492, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 570, + 492, + 582 + ], + "type": "text", + "content": "Lemma A.1. Assume the existence of a stationary behavioral model " + }, + { + "bbox": [ + 104, + 570, + 492, + 582 + ], + "type": "inline_equation", + "content": "\\xi_{\\theta} = \\lim_{T\\to \\infty}\\mathcal{P}_{\\theta}[T]" + }, + { + "bbox": [ + 104, + 570, + 492, + 582 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 219, + 584, + 389, + 602 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 584, + 389, + 602 + ], + "spans": [ + { + "bbox": [ + 219, + 584, + 389, + 602 + ], + "type": "interline_equation", + "content": "\\lim _ {T \\to \\infty} W _ {\\vec {d}} \\left(\\mathcal {D} _ {\\pi} [ T ], \\mathcal {P} _ {\\theta} [ T ]\\right) = W _ {\\vec {d}} \\left(\\xi_ {\\pi}, \\xi_ {\\theta}\\right).", + "image_path": "271df2e0ffdeee712d72d6b99908d777574844d94765df46ae39e8224c0f8b76.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "text", + "content": "Proof. First, note that " + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\frac{1}{T} \\cdot \\sum_{t=1}^{T} \\xi_{\\pi}^{t}(\\cdot \\mid s_{I})" + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "text", + "content": " weakly converges to " + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "text", + "content": " goes to " + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\infty" + }, + { + "bbox": [ + 104, + 615, + 506, + 640 + ], + "type": "text", + "content": " Kulkarni (1995). The result follows then from (Villani, 2009, Corollary 6.9)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 652, + 288, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 652, + 288, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 652, + 288, + 662 + ], + "type": "text", + "content": "A.2 DEALING WITH DISCRETE ACTIONS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 672, + 506, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 672, + 506, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 506, + 695 + ], + "type": "text", + "content": "When the policy " + }, + { + "bbox": [ + 104, + 672, + 506, + 695 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 672, + 506, + 695 + ], + "type": "text", + "content": " executed in " + }, + { + "bbox": [ + 104, + 672, + 506, + 695 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 672, + 506, + 695 + ], + "type": "text", + "content": " already produces discrete actions, learning a latent action space is, in many cases, not necessary. We thus make the following assumptions:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "text", + "content": "Assumption A.2. Let " + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pi \\colon S \\to \\Delta(\\mathcal{A}^{\\star})" + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "text", + "content": " be the policy executed in " + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "text", + "content": " and assume that " + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\star}" + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "text", + "content": " is a (tractable) finite set. Then, we take " + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{A}} = \\mathcal{A}^{\\star}" + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}^{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "text", + "content": " as the identity function, i.e., " + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}^{\\mathcal{A}}: \\overline{S} \\times \\mathcal{A}^{\\star} \\to \\mathcal{A}^{\\star}, \\langle \\overline{s}, a^{\\star} \\rangle \\mapsto a^{\\star}" + }, + { + "bbox": [ + 104, + 696, + 505, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Assumption A.3. Assume that the action space of the original environment " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " is a (tractable) finite set. Then, we take " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\psi_{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " as the identity function, i.e., " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\psi_{\\theta} = \\phi_{\\iota}^{A}" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "type": "text", + "content": "Concretely, the premise of Assumption A.2 typically occurs when " + }, + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "type": "text", + "content": " is a latent policy (see Rem. 1) or when " + }, + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "type": "text", + "content": " has already a discrete action space. In the latter case, Assumption A.2 and A.3 amount to setting " + }, + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "type": "inline_equation", + "content": "\\bar{\\mathcal{A}} = \\mathcal{A}" + }, + { + "bbox": [ + 104, + 112, + 506, + 178 + ], + "type": "text", + "content": " and ignoring the action encoder and embedding function. Note that if a discrete action space is too large, or if the user explicitly aims for a coarser space, then the former is not considered as tractable, these assumptions do not hold, and the action space is abstracted to a smaller set of discrete actions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 191, + 230, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 191, + 230, + 202 + ], + "spans": [ + { + "bbox": [ + 105, + 191, + 230, + 202 + ], + "type": "text", + "content": "A.3 PROOF OF LEMMA 3.2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 213, + 406, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 406, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 406, + 228 + ], + "type": "text", + "content": "Notation. From now on, we write " + }, + { + "bbox": [ + 104, + 213, + 406, + 228 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}(\\bar{s},\\bar{a}\\mid s,a) = \\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)" + }, + { + "bbox": [ + 104, + 213, + 406, + 228 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "text", + "content": "Lemma 3.2. Define " + }, + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(\\bar{s},\\bar{a},\\bar{s}^{\\prime}) = \\mathbb{E}_{s,a\\sim \\xi_{\\pi}}[\\mathbf{1}_{\\phi_{\\iota}(s) = \\bar{s}}\\cdot \\phi_{\\iota}^{A}(\\bar{a}\\mid \\bar{s},a)\\cdot \\overline{\\mathbf{P}}_{\\theta}(\\bar{s}^{\\prime}\\mid \\bar{s},\\bar{a})]" + }, + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "text", + "content": " as the distribution of drawing state-action pairs from interacting with " + }, + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "text", + "content": ", embedding them to the latent spaces, and finally letting them transition to their successor state in " + }, + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 104, + 229, + 506, + 268 + ], + "type": "inline_equation", + "content": "W_{\\vec{d}}(Q_{\\iota},\\bar{\\xi}_{\\overline{\\pi}_{\\theta}})\\leqslant W_{\\vec{d}}(\\bar{\\xi}_{\\overline{\\pi}_{\\theta}},\\mathcal{T}) + L_{\\mathbf{P}}^{\\xi_{\\pi}}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 277, + 483, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 483, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 483, + 289 + ], + "type": "text", + "content": "Proof. Wasserstein is compliant with the triangular inequality (Villani, 2009), which gives us:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 209, + 289, + 400, + 304 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 289, + 400, + 304 + ], + "spans": [ + { + "bbox": [ + 209, + 289, + 400, + 304 + ], + "type": "interline_equation", + "content": "W _ {\\vec {d}} \\left(Q _ {\\iota}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right) \\leqslant W _ {\\vec {d}} \\left(Q _ {\\iota}, \\mathcal {T}\\right) + W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\mathcal {T}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right),", + "image_path": "2c1e4159f23b0e9d83bfc3c7ec3b179d76c0c15232c3f68ae97a512d18c69de5.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 304, + 133, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 133, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 133, + 315 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 317, + 504, + 473 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 317, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 121, + 317, + 504, + 473 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} W _ {\\tilde {d}} \\left(\\mathcal {T}, \\bar {\\xi} _ {\\bar {\\pi} _ {\\theta}}\\right) \\quad (\\text {n o t e t h a t} W _ {\\tilde {d}} \\text {i s r e f l e x i v e (V i l l a n i , 2 0 0 9)} \\\\ = \\sup _ {f \\in \\mathcal {F} _ {\\bar {d}}} \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\underset {\\bar {s} \\sim \\bar {\\xi} _ {\\pi_ {\\theta}}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\bar {\\pi} _ {\\theta} (\\cdot | \\bar {s})} {\\mathbb {E}} \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}), \\text {a n d} \\\\ W _ {\\vec {d}} (Q _ {\\iota}, \\mathcal {T}) \\\\ = \\sup _ {f \\in \\mathcal {F} _ {\\vec {d}} s, a, s ^ {\\prime} \\sim \\xi_ {\\pi}} \\mathbb {E} _ {\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} (\\cdot | s, a, s ^ {\\prime})} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) - \\mathbb {E} _ {s, a \\sim \\xi_ {\\pi}} \\mathbb {E} _ {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} \\mathbb {E} _ {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) (3) \\\\ \\leqslant \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\sup _ {f \\in \\mathcal {F} _ {\\bar {d}} ^ {-} s ^ {\\prime} \\sim \\mathbf {P} (\\cdot | s, a)} \\underset {\\sim \\mathbf {P} (\\cdot | s, a)} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\phi_ {\\iota} (s ^ {\\prime})) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} f (\\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) (4) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {A} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} \\sup _ {f \\in \\mathcal {F} _ {d} _ {\\overline {{\\mathcal {S}}}}} \\underset {\\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a)} {\\mathbb {E}} f (\\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})} {\\mathbb {E}} f (\\bar {s} ^ {\\prime}) (5) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\ell} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\ell} (s), a)} {\\mathbb {E}} W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\bar {\\mathbf {P}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right). \\\\ \\end{array}", + "image_path": "79cf08779bb2c6bf6110f288b5ab32a5f2567e85a8d566af9c6a2eb90c122110.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 475, + 506, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 506, + 497 + ], + "type": "text", + "content": "We pass from Eq. 3 to Eq. 4 by the Jensen's inequality. To see how we pass from Eq. 4 to Eq. 5, notice that" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 498, + 434, + 518 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 498, + 434, + 518 + ], + "spans": [ + { + "bbox": [ + 138, + 498, + 434, + 518 + ], + "type": "interline_equation", + "content": "\\mathcal {F} _ {\\vec {d}} = \\left\\{f \\colon f \\left(\\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant \\vec {d} \\left(\\left\\langle \\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime} \\right\\rangle , \\left\\langle \\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime} \\right\\rangle\\right) \\right\\}", + "image_path": "97d765709396b6f7ffbacd8305cd1da25c74ed74d7029a02f7f8371cf458aa8c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 139, + 520, + 471, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 520, + 471, + 533 + ], + "spans": [ + { + "bbox": [ + 139, + 520, + 471, + 533 + ], + "type": "interline_equation", + "content": "\\mathcal {F} _ {\\vec {d}} = \\left\\{f \\colon f \\left(\\bar {s} _ {1}, \\bar {a} _ {1}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2}, \\bar {a} _ {2}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1}, \\bar {s} _ {2}\\right) + d _ {\\bar {A}} \\left(\\bar {a} _ {1}, \\bar {a} _ {2}\\right) + d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\}", + "image_path": "a25c8b12ea4018922982609386c97ce2bab85cff5b82bc4af77e4ac13bd67975.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "text", + "content": "Observe now that " + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "inline_equation", + "content": "\\bar{s}" + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "inline_equation", + "content": "\\bar{a}" + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "text", + "content": " are fixed in the supremum computation of Eq. 4: all functions " + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "text", + "content": " considered and taken from " + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\bar{d}}" + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "text", + "content": " are of the form " + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "inline_equation", + "content": "f(\\bar{s},\\bar{a},\\cdot)" + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "text", + "content": ". It is thus sufficient to consider the supremum over functions from the following subset of " + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\bar{d}}" + }, + { + "bbox": [ + 104, + 534, + 506, + 569 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 175, + 571, + 446, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 571, + 446, + 586 + ], + "spans": [ + { + "bbox": [ + 175, + 571, + 446, + 586 + ], + "type": "interline_equation", + "content": "\\{f \\colon f (\\bar {s}, \\bar {a}, \\bar {s} _ {1} ^ {\\prime}) - f (\\bar {s}, \\bar {a}, \\bar {s} _ {2} ^ {\\prime}) \\leqslant d _ {\\bar {\\mathcal {S}}} (\\bar {s}, \\bar {s}) + d _ {\\bar {\\mathcal {A}}} (\\bar {a}, \\bar {a}) + d _ {\\bar {\\mathcal {S}}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\}", + "image_path": "e18ae755babe227e60343499c839e8b17d35457d161cb4b72540d864fcc2b5c1.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 406, + 587, + 504, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 587, + 504, + 599 + ], + "spans": [ + { + "bbox": [ + 406, + 587, + 504, + 599 + ], + "type": "text", + "content": "(for " + }, + { + "bbox": [ + 406, + 587, + 504, + 599 + ], + "type": "inline_equation", + "content": "\\bar{s},\\bar{a}" + }, + { + "bbox": [ + 406, + 587, + 504, + 599 + ], + "type": "text", + "content": " drawn from " + }, + { + "bbox": [ + 406, + 587, + 504, + 599 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 406, + 587, + 504, + 599 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 164, + 602, + 354, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 602, + 354, + 647 + ], + "spans": [ + { + "bbox": [ + 164, + 602, + 354, + 647 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} = \\left\\{f: f \\left(\\bar {s}, \\bar {a}, \\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s}, \\bar {a}, \\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\} \\\\ = \\left\\{f: f \\left(\\bar {s} _ {1} ^ {\\prime}\\right) - f \\left(\\bar {s} _ {2} ^ {\\prime}\\right) \\leqslant d _ {\\bar {S}} \\left(\\bar {s} _ {1} ^ {\\prime}, \\bar {s} _ {2} ^ {\\prime}\\right) \\right\\} \\\\ = \\mathcal {F} _ {d _ {\\bar {\\mathcal {S}}}}. \\\\ \\end{array}", + "image_path": "ff3005faa0424004252b9437683e5aef579e0e4a5f0b10115df973ea46ffb792.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "text", + "content": "Given a state " + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "text", + "content": " in the original model, the (parallel) execution of " + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "text", + "content": " is enabled through " + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "inline_equation", + "content": "\\pi(a, \\bar{a} | s) = \\pi(a | s) \\cdot \\phi_{\\ell}^{\\mathcal{A}}(\\bar{a} | \\phi_{\\ell}(s), a)" + }, + { + "bbox": [ + 104, + 650, + 506, + 682 + ], + "type": "text", + "content": " (cf. Fig. 1b). The local transition loss resulting from this interaction is:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 172, + 685, + 438, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 685, + 438, + 730 + ], + "spans": [ + { + "bbox": [ + 172, + 685, + 438, + 730 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} = \\underset {s, \\langle a, \\bar {a} \\rangle \\sim \\xi_ {\\pi}} {\\mathbb {E}} W _ {d _ {\\bar {S}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\bar {\\mathbf {P}} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) \\\\ = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {a} \\sim \\phi_ {\\iota} ^ {A} (\\cdot | \\phi_ {\\iota} (s), a)} {\\mathbb {E}} W _ {d _ {\\bar {\\mathfrak {S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right), \\\\ \\end{array}", + "image_path": "34113fc9e82d352d0698d9e24665488bff00d967efc39554470b2d24be6a6782.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 229, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 229, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 229, + 94 + ], + "type": "text", + "content": "which finally yields the result." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 494, + 83, + 505, + 92 + ], + "blocks": [ + { + "bbox": [ + 494, + 83, + 505, + 92 + ], + "lines": [ + { + "bbox": [ + 494, + 83, + 505, + 92 + ], + "spans": [ + { + "bbox": [ + 494, + 83, + 505, + 92 + ], + "type": "image", + "image_path": "852e86e2e3bda2a9e8c7ef0d0e216a8b0fe5f8b4b9662c714d1c462f9b283e6f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 107, + 240, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 107, + 240, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 107, + 240, + 118 + ], + "type": "text", + "content": "A.4 PROOF OF THEOREM 3.3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 129, + 504, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 129, + 504, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 129, + 504, + 163 + ], + "type": "text", + "content": "Before proving Theorem 3.3, let us introduce the following Lemma, that explicitly demonstrates the link between the transition regularizer of the " + }, + { + "bbox": [ + 104, + 129, + 504, + 163 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 129, + 504, + 163 + ], + "type": "text", + "content": "-MDP objective and the local transition loss required to obtain the guarantees related to the bisimulation bounds of Eq. 1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 165, + 504, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 504, + 177 + ], + "type": "text", + "content": "Lemma A.4. Assume that traces are generated by running " + }, + { + "bbox": [ + 104, + 165, + 504, + 177 + ], + "type": "inline_equation", + "content": "\\bar{\\pi} \\in \\overline{\\Pi}" + }, + { + "bbox": [ + 104, + 165, + 504, + 177 + ], + "type": "text", + "content": " in the original environment, then" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 164, + 183, + 446, + 205 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 183, + 446, + 205 + ], + "spans": [ + { + "bbox": [ + 164, + 183, + 446, + 205 + ], + "type": "interline_equation", + "content": "\\underset {s, a ^ {\\star} \\sim \\xi_ {\\pi} \\bar {a} \\sim \\phi_ {\\iota} ^ {\\mathcal {A}} (\\cdot | \\phi_ {\\iota} (s), a ^ {\\star})} {\\mathbb {E}} W _ {d _ {\\overline {{S}}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a ^ {\\star}), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) = L _ {\\mathbf {P}} ^ {\\xi_ {\\overline {{\\pi}}}}.", + "image_path": "3dad5f879136755df17f03911858f6eab83208be61af63d954e5722cc188e264.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 217, + 485, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 217, + 485, + 230 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 485, + 230 + ], + "type": "text", + "content": "Proof. Since the latent policy " + }, + { + "bbox": [ + 104, + 217, + 485, + 230 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}" + }, + { + "bbox": [ + 104, + 217, + 485, + 230 + ], + "type": "text", + "content": " generates latent actions, Assumption A.2 holds, which means:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 179, + 236, + 432, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 236, + 432, + 297 + ], + "spans": [ + { + "bbox": [ + 179, + 236, + 432, + 297 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathop{\\mathbb{E}}_{s,a^{\\star}\\sim \\xi_{\\overline{\\pi}}}\\mathop{\\mathbb{E}}_{\\bar{a}\\sim \\phi_{\\iota}^{A}(\\cdot |\\phi_{\\iota}(s),a^{\\star})}W_{d_{\\overline{\\mathfrak{S}}}}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot |s,a^{\\star}),\\overline{\\mathbf{P}}_{\\theta}(\\cdot |\\phi_{\\iota}(s),\\bar{a})\\right) \\\\ = \\underset {s, \\bar {a} \\sim \\xi_ {\\bar {\\pi}}} {\\mathbb {E}} W _ {d _ {\\bar {S}}} \\left(\\phi_ {\\iota} \\mathbf {P} (\\cdot | s, \\bar {a}), \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\phi_ {\\iota} (s), \\bar {a})\\right) \\\\ = L _ {\\mathbf {P}} ^ {\\xi_ {\\overline {{\\pi}}}}. \\\\ \\end{array}", + "image_path": "b80a77384df0421bde3b654e1c9253acb36c60f496fc26e6eec794816ef0d530.jpg" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 494, + 303, + 505, + 312 + ], + "blocks": [ + { + "bbox": [ + 494, + 303, + 505, + 312 + ], + "lines": [ + { + "bbox": [ + 494, + 303, + 505, + 312 + ], + "spans": [ + { + "bbox": [ + 494, + 303, + 505, + 312 + ], + "type": "image", + "image_path": "8baf2a20bda1cb7ecb0faf699ee64776daaa409b45835778693cc866c2d3e653.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "type": "text", + "content": "Theorem 3.3. Assume that traces are generated by running a latent policy " + }, + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "type": "inline_equation", + "content": "\\bar{\\pi} \\in \\overline{\\Pi}" + }, + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "type": "text", + "content": " in the original environment and let " + }, + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{R}}" + }, + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "type": "text", + "content": " be the usual Euclidean distance, then the " + }, + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "type": "inline_equation", + "content": "W^{2}" + }, + { + "bbox": [ + 104, + 320, + 505, + 344 + ], + "type": "text", + "content": "AE-MDP objective is" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 350, + 468, + 371 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 350, + 468, + 371 + ], + "spans": [ + { + "bbox": [ + 140, + 350, + 468, + 371 + ], + "type": "interline_equation", + "content": "\\min_{\\iota ,\\theta}\\underset {s,s^{\\prime}\\sim \\xi_{\\pi}}{\\mathbb{E}}\\left[d_{\\mathcal{S}}(s,\\mathcal{G}_{\\theta}(\\phi_{\\iota}(s))) + d_{\\mathcal{S}}\\big(s^{\\prime},\\mathcal{G}_{\\theta}\\big(\\phi_{\\iota}\\big(s^{\\prime}\\big)\\big)\\big)\\right] + L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}} + \\beta \\cdot (\\mathcal{W}_{\\xi_{\\overline{\\pi}}} + L_{\\mathbf{P}}^{\\xi_{\\overline{\\pi}}}).", + "image_path": "1478d8669ea2580a78797f2ec40e35b535171a97ed5a6839b7e95764f3a6efc4.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": "Proof. We distinguish two cases: (i) the case where the original and latent models share the same discrete action space, i.e., " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\overline{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": ", and (ii) the case where the two have a different action space (e.g., when the original action space is continuous), i.e., " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{A} \\neq \\overline{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": ". In both cases, the local losses term follows by definition of " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " and Lemma A.4. When " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{R}}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " is the Euclidean distance (or even the " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " distance since rewards are scalar values), the expected reward distance occurring in the expected trace-distance term " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\vec{d}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " in the " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": "-MDP objective directly translates to the local loss " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "L_{\\mathcal{R}}^{\\xi_{\\overline{\\pi}}}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": ". Concerning the local transition loss, in case (i), the result naturally follows from Assumption A.2 and A.3. In case (ii), only Assumption A.2 holds, meaning the action encoder term of the " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": "-MDP objective is ignored, but not the action embedding term appearing in " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "G_{\\theta}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "s \\sim \\xi_{\\overline{\\pi}}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": ", recall that executing " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\overline{\\pi}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " amounts to embedding the produced latent actions " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\bar{a} \\sim \\overline{\\pi}(\\cdot \\mid \\phi_{\\iota}(s))" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " back to the original environment via " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "a = \\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a})" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " (cf. Rem. 1 and Fig. 1a). Therefore, the projection of " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\vec{d}(\\langle s, a, r, s' \\rangle, G_{\\theta}(\\phi_{\\iota}(s), \\bar{a}, \\phi_{\\iota}(s')))" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " on the action space " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{A}}(\\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a}), \\psi_{\\theta}(\\phi_{\\iota}(s), \\bar{a})) = 0" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "r = \\mathcal{R}(s, a)" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "inline_equation", + "content": "s' \\sim \\mathbf{P}(\\cdot \\mid s, a)" + }, + { + "bbox": [ + 104, + 383, + 506, + 534 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 546, + 328, + 557 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 546, + 328, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 328, + 557 + ], + "type": "text", + "content": "A.5 OPTIMIZING THE TRANSITION REGULARIZER" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 566, + 506, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 566, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 506, + 601 + ], + "type": "text", + "content": "In the following, we detail how we derive a tractable form of our transition regularizer " + }, + { + "bbox": [ + 104, + 566, + 506, + 601 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)" + }, + { + "bbox": [ + 104, + 566, + 506, + 601 + ], + "type": "text", + "content": ". Optimizing the ground Kantorovich-Rubinstein duality is enabled via the introduction of a parameterized, 1-Lipschitz network " + }, + { + "bbox": [ + 104, + 566, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\varphi_{\\omega}^{\\mathbf{P}}" + }, + { + "bbox": [ + 104, + 566, + 506, + 601 + ], + "type": "text", + "content": ", that need to be trained to attain the supremum of the dual:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 135, + 607, + 474, + 631 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 607, + 474, + 631 + ], + "spans": [ + { + "bbox": [ + 135, + 607, + 474, + 631 + ], + "type": "interline_equation", + "content": "L _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\underset {s, a \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\max _ {\\omega : \\varphi_ {\\omega} ^ {\\mathbf {P}} \\in \\mathcal {F} _ {d}} \\underset {\\bar {s} ^ {\\prime} \\sim \\phi_ {\\iota} \\mathbf {P} (\\cdot | s, a)} {\\max _ {\\bar {s} ^ {\\prime} \\sim \\varphi_ {\\iota} (\\cdot | s, a)}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (\\bar {s} ^ {\\prime}) - \\underset {\\bar {s} ^ {\\prime} \\sim \\bar {\\mathbf {P}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (\\bar {s} ^ {\\prime}).", + "image_path": "e4e426dd21fb02d7287a99b397fdd310fbb6f29da3eb2ef948d47c972472a283.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 638, + 504, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 504, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 504, + 674 + ], + "type": "text", + "content": "Under this form, optimizing " + }, + { + "bbox": [ + 104, + 638, + 504, + 674 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)" + }, + { + "bbox": [ + 104, + 638, + 504, + 674 + ], + "type": "text", + "content": " is intractable due to the expectation over the maximum. The following Lemma allows us rewriting " + }, + { + "bbox": [ + 104, + 638, + 504, + 674 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 638, + 504, + 674 + ], + "type": "text", + "content": " to make the optimization tractable through Monte Carlo estimation." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "text", + "content": "Lemma A.5. Let " + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "inline_equation", + "content": "\\mathcal{X},\\mathcal{Y}" + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "text", + "content": " be two measurable sets, " + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "inline_equation", + "content": "\\xi \\in \\Delta (\\mathcal{X})" + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "inline_equation", + "content": "P\\colon \\mathcal{X}\\to \\Delta (\\mathcal{Y}),Q\\colon \\mathcal{X}\\to \\Delta (\\mathcal{Y})" + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "text", + "content": " , and " + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "inline_equation", + "content": "d\\colon \\mathcal{Y}\\times \\mathcal{Y}\\rightarrow [0, + \\infty [" + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "text", + "content": " be a metric on " + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 104, + 677, + 505, + 702 + ], + "type": "text", + "content": " . Then," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 129, + 708, + 481, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 708, + 481, + 736 + ], + "spans": [ + { + "bbox": [ + 129, + 708, + 481, + 736 + ], + "type": "interline_equation", + "content": "\\underset {x \\sim \\xi} {\\mathbb {E}} W _ {d} \\left(P (\\cdot \\mid x), Q (\\cdot \\mid x)\\right) = \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {2}) \\right]", + "image_path": "39cc9caa87b60928d26756c09ea822102e56b9f5b7260430bcfb1c3ac7544d08.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 253, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 253, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 253, + 94 + ], + "type": "text", + "content": "Proof. Our objective is to show that" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 188, + 99, + 504, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 99, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 188, + 99, + 504, + 159 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (y _ {1}) (x) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (y _ {2}) (x) \\right] (6) \\\\ = \\sup _ {\\varphi : \\mathcal {X} \\rightarrow \\mathcal {F} _ {d}} \\mathbb {E} _ {x \\sim \\xi} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) \\left(y _ {1}\\right) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) \\left(y _ {2}\\right)\\right] (7) \\\\ \\end{array}", + "image_path": "8cab264ade8baacbf6f09a7f4c6560f167fe52c41c759de6ce7728d997ff0ad9.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 163, + 397, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 163, + 397, + 175 + ], + "spans": [ + { + "bbox": [ + 104, + 163, + 397, + 175 + ], + "type": "text", + "content": "We start with " + }, + { + "bbox": [ + 104, + 163, + 397, + 175 + ], + "type": "inline_equation", + "content": "(6) \\leqslant (7)" + }, + { + "bbox": [ + 104, + 163, + 397, + 175 + ], + "type": "text", + "content": ". Construct " + }, + { + "bbox": [ + 104, + 163, + 397, + 175 + ], + "type": "inline_equation", + "content": "\\varphi^{\\star} \\colon \\mathcal{X} \\to \\mathcal{F}_d" + }, + { + "bbox": [ + 104, + 163, + 397, + 175 + ], + "type": "text", + "content": " by setting for all " + }, + { + "bbox": [ + 104, + 163, + 397, + 175 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{X}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 198, + 178, + 411, + 199 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 178, + 411, + 199 + ], + "spans": [ + { + "bbox": [ + 198, + 178, + 411, + 199 + ], + "type": "interline_equation", + "content": "\\varphi^{\\star}(x) = \\arg \\sup_{f\\in \\mathcal{F}_{d}}\\underset {y_{1}\\sim P(\\cdot |x)}{\\mathbb{E}}f(y_{1}) - \\underset {y_{2}\\sim Q(\\cdot |x)}{\\mathbb{E}}f(y_{2}).", + "image_path": "b2b7b13c3fb0e85119300c3b2e46c9805ccf9b00c00b846863b300eaf9d1e47d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 203, + 161, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 203, + 161, + 215 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 161, + 215 + ], + "type": "text", + "content": "This gives us" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 186, + 219, + 425, + 309 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 219, + 425, + 309 + ], + "spans": [ + { + "bbox": [ + 186, + 219, + 425, + 309 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\right] \\\\ = \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\right] \\\\ \\leqslant \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\mathbb {E} _ {x \\sim \\xi} \\left[ \\mathbb {E} _ {y _ {1} \\sim P (\\cdot | x)} \\varphi (x) (y _ {1}) - \\mathbb {E} _ {y _ {2} \\sim Q (\\cdot | x)} \\varphi (x) (y _ {2}) \\right]. \\\\ \\end{array}", + "image_path": "23eded40e8cf1d7e54b9623187b0eb73bc734e8a40a4d141221b6b50f90609c4.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 312, + 263, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 263, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 263, + 324 + ], + "type": "text", + "content": "It remains to show that " + }, + { + "bbox": [ + 105, + 312, + 263, + 324 + ], + "type": "inline_equation", + "content": "(6) \\geqslant (7)" + }, + { + "bbox": [ + 105, + 312, + 263, + 324 + ], + "type": "text", + "content": ". Take" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 168, + 328, + 440, + 355 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 328, + 440, + 355 + ], + "spans": [ + { + "bbox": [ + 168, + 328, + 440, + 355 + ], + "type": "interline_equation", + "content": "\\varphi^ {\\star} = \\arg \\operatorname * {s u p} _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d}} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi (x) (y _ {2}) \\right].", + "image_path": "95f1b4b2ac25bbbd4fc628e7733f09ae7857e09d0679c1b05667db757f22779d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 359, + 330, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 330, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 330, + 371 + ], + "type": "text", + "content": "Then, for all " + }, + { + "bbox": [ + 104, + 359, + 330, + 371 + ], + "type": "inline_equation", + "content": "x\\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 359, + 330, + 371 + ], + "type": "text", + "content": " we have " + }, + { + "bbox": [ + 104, + 359, + 330, + 371 + ], + "type": "inline_equation", + "content": "\\varphi^{\\star}(x)\\in \\mathcal{F}_d" + }, + { + "bbox": [ + 104, + 359, + 330, + 371 + ], + "type": "text", + "content": " which means:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 216, + 376, + 395, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 376, + 395, + 418 + ], + "spans": [ + { + "bbox": [ + 216, + 376, + 395, + 418 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\\\ \\leqslant \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\\\ \\end{array}", + "image_path": "276732eb77f317ef644179e499863a7131fa8656a7bef6a2e66a486b29f6ef51.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 422, + 180, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 180, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 180, + 434 + ], + "type": "text", + "content": "This finally yields" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 202, + 438, + 408, + 499 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 438, + 408, + 499 + ], + "spans": [ + { + "bbox": [ + 202, + 438, + 408, + 499 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} \\varphi^ {\\star} (x) (y _ {2}) \\right] \\\\ \\leqslant \\underset {x \\sim \\xi} {\\mathbb {E}} \\left[ \\sup _ {f \\in \\mathcal {F} _ {d}} \\underset {y _ {1} \\sim P (\\cdot | x)} {\\mathbb {E}} f (y _ {1}) - \\underset {y _ {2} \\sim Q (\\cdot | x)} {\\mathbb {E}} f (y _ {2}) \\right]. \\\\ \\end{array}", + "image_path": "8bf2ff555b941f2eade3ae048a6965fa657ac9be5cf59672361d713642aabb80.jpg" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 494, + 502, + 504, + 512 + ], + "blocks": [ + { + "bbox": [ + 494, + 502, + 504, + 512 + ], + "lines": [ + { + "bbox": [ + 494, + 502, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 494, + 502, + 504, + 512 + ], + "type": "image", + "image_path": "ddd0a968d5f3afaaae13a27c6e3caf51bfd225352e0358b48e17b9095eb06313.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "type": "text", + "content": "Corollary A.5.1. Let " + }, + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "type": "text", + "content": " be a stationary distribution of " + }, + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\pi}" + }, + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = S\\times \\mathcal{A}\\times \\overline{S}\\times \\overline{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 519, + 478, + 532 + ], + "type": "text", + "content": " , then" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 118, + 536, + 490, + 570 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 536, + 490, + 570 + ], + "spans": [ + { + "bbox": [ + 118, + 536, + 490, + 570 + ], + "type": "interline_equation", + "content": "L _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} = \\sup _ {\\varphi \\colon \\mathcal {X} \\to \\mathcal {F} _ {d} _ {\\overline {{S}}}} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi}} {\\mathbb {E}} \\underset {\\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\left[ \\varphi (s, a, \\bar {s}, \\bar {a}) \\big (\\phi_ {\\iota} (s ^ {\\prime}) \\big) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, a)} {\\mathbb {E}} \\varphi (s, a, \\bar {s}, \\bar {a}) \\big (\\bar {s} ^ {\\prime} \\big) \\right]", + "image_path": "eed0fcde6fa2d81e65c866c82704b1e460294256846a593b6a5144c656d22e1a.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 581, + 357, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 581, + 357, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 581, + 357, + 594 + ], + "type": "text", + "content": "Consequently, we rewrite " + }, + { + "bbox": [ + 104, + 581, + 357, + 594 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi_{\\pi}}(\\omega)" + }, + { + "bbox": [ + 104, + 581, + 357, + 594 + ], + "type": "text", + "content": " as a tractable maximization:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 598, + 506, + 632 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 598, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 506, + 632 + ], + "type": "interline_equation", + "content": "L _ {\\mathbf {P}} ^ {\\xi_ {\\pi}} (\\omega) = \\max _ {\\omega : \\varphi_ {\\omega} ^ {\\mathbf {P}} \\in \\mathcal {F} _ {d _ {\\bar {g}}}} \\underset {s, a, s ^ {\\prime} \\sim \\xi_ {\\pi} \\bar {s}, \\bar {a} \\sim \\phi_ {\\iota} (\\cdot | s, a)} {\\mathbb {E}} \\underset {s, a, \\bar {a}, \\bar {a} \\sim \\phi_ {\\iota} (s ^ {\\prime})} {\\mathbb {E}} \\left[ \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\phi_ {\\iota} (s ^ {\\prime})) - \\underset {\\bar {s} ^ {\\prime} \\sim \\overline {{\\mathbf {P}}} _ {\\theta} (\\cdot | \\bar {s}, \\bar {a})} {\\mathbb {E}} \\varphi_ {\\omega} ^ {\\mathbf {P}} (s, a, \\bar {s}, \\bar {a}, \\bar {s} ^ {\\prime}) \\right].", + "image_path": "76cfb208f23edb2bace971cf4d4146691c41135f4b0bd3c6ca5e6bd29f3d484b.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 643, + 227, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 643, + 227, + 653 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 227, + 653 + ], + "type": "text", + "content": "A.6 THE LATENT METRIC" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": "In the following, we show that considering the Euclidean distance for " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\vec{d}" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "d_{\\overline{\\mathcal{S}}}" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": " in the latent space for optimizing the regularizers " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi_{\\pi}}" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": " is Lipschitz equivalent to considering a continuous " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": "-relaxation of the discrete metric " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_{\\neq}(\\pmb{x},\\pmb{y}) = \\mathbf{1}_{x\\neq y}" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": ". Consequently, this also means it is consistently sufficient to enforce 1-Lipschitzness via the gradient penalty approach of Gulrajani et al. (2017) during training to maintain the guarantees linked to the regularizers in the zero-temperature limit, when the spaces are discrete." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "text", + "content": "Lemma A.6. Let " + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "text", + "content": " be the usual Euclidean distance and " + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "inline_equation", + "content": "d_{\\lambda} \\colon [0,1]^n \\times [0,1]^n \\to [0,1[" + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "inline_equation", + "content": "\\langle \\pmb{x}, \\pmb{y} \\rangle \\mapsto \\frac{d(\\pmb{x}, \\pmb{y})}{\\lambda + d(\\pmb{x}, \\pmb{y})}" + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "inline_equation", + "content": "\\lambda \\in ]0,1]" + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "inline_equation", + "content": "n \\in \\mathbb{N}" + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "inline_equation", + "content": "d_{\\lambda}" + }, + { + "bbox": [ + 104, + 82, + 504, + 111 + ], + "type": "text", + "content": " is a distance metric." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 389, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 389, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 389, + 134 + ], + "type": "text", + "content": "Proof. The function " + }, + { + "bbox": [ + 105, + 121, + 389, + 134 + ], + "type": "inline_equation", + "content": "d_{\\lambda}" + }, + { + "bbox": [ + 105, + 121, + 389, + 134 + ], + "type": "text", + "content": " is a metric iff it satisfies the following axioms:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 145, + 506, + 216 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": "1. Identity of indiscernibles: If " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\pmb{x} = \\pmb{y}" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "d_{\\lambda}(\\pmb{x}, \\pmb{y}) = \\frac{d(\\pmb{x}, \\pmb{y})}{\\lambda + d(\\pmb{x}, \\pmb{y})} = \\frac{0}{\\lambda + 0} = 0" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": " since " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": " is a distance metric. Assume now that " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "d_{\\lambda}(\\pmb{x}, \\pmb{y}) = 0" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": " and take " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\alpha = d(\\pmb{x}, \\pmb{y})" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": ", for any " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\pmb{x}, \\pmb{y}" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": ". Thus, " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\alpha \\in [0, +\\infty[" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "0 = \\frac{\\alpha}{\\lambda + \\alpha}" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": " is only achieved in " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": ", which only occurs whenever " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\pmb{x} = \\pmb{y}" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": " since " + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 129, + 145, + 506, + 196 + ], + "type": "text", + "content": " is a distance metric." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 129, + 203, + 187, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 203, + 187, + 216 + ], + "spans": [ + { + "bbox": [ + 129, + 203, + 187, + 216 + ], + "type": "text", + "content": "2. Symmetry:" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 226, + 220, + 504, + 288 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 220, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 226, + 220, + 504, + 288 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) = \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\\\ = \\frac {d (\\boldsymbol {y} , \\boldsymbol {x})}{\\lambda + d (\\boldsymbol {y} , \\boldsymbol {x})} \\quad (d \\text {i s a d i s t a n c e m e t r i c}) \\\\ = d _ {\\lambda} (\\boldsymbol {y}, \\boldsymbol {x}) \\\\ \\end{array}", + "image_path": "848905c57c56528bbe59a4800a7c7597647b37ee81c0ee93778c1158532fb39b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 128, + 299, + 436, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 299, + 436, + 312 + ], + "spans": [ + { + "bbox": [ + 128, + 299, + 436, + 312 + ], + "type": "text", + "content": "3. Triangle inequality: Let " + }, + { + "bbox": [ + 128, + 299, + 436, + 312 + ], + "type": "inline_equation", + "content": "\\mathbf{x}, \\mathbf{y}, \\mathbf{z} \\in [0,1]^n" + }, + { + "bbox": [ + 128, + 299, + 436, + 312 + ], + "type": "text", + "content": ", the triangle inequality holds iff" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 145, + 316, + 504, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 316, + 504, + 477 + ], + "spans": [ + { + "bbox": [ + 145, + 316, + 504, + 477 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) + d _ {\\lambda} (\\boldsymbol {y}, \\boldsymbol {z}) \\geqslant d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {z}) (8) \\\\ \\equiv \\quad \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} + \\frac {d (\\boldsymbol {y} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {y} , \\boldsymbol {z})} \\geqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {z})} \\\\ \\equiv \\quad \\frac {\\lambda d (\\boldsymbol {x} , \\boldsymbol {y}) + \\lambda d (\\boldsymbol {y} , \\boldsymbol {z}) + 2 d (\\boldsymbol {x} , \\boldsymbol {y}) d (\\boldsymbol {y} , \\boldsymbol {z})}{\\lambda^ {2} + \\lambda d (\\boldsymbol {x} , \\boldsymbol {y}) + \\lambda d (\\boldsymbol {y} , \\boldsymbol {z}) + d (\\boldsymbol {x} , \\boldsymbol {y}) d (\\boldsymbol {y} , \\boldsymbol {z})} \\geqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {z})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {z})} \\\\ \\equiv \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) + 2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + \\\\ \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) + 2 d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\\\ \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {x}, \\boldsymbol {z}) + \\lambda d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\\\ \\left(\\text {c r o s s - p r o d u c t , w i t h} \\lambda > 0 \\text {a n d} \\operatorname {I m} (d) \\in [ 0, \\infty [\\right) \\\\ \\equiv \\quad \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) + 2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) (9) \\\\ \\end{array}", + "image_path": "7c069f3a4ea5a2b2f78eaf29c6c965efba8082ba488b8b85f07679f976ed8d4a.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 483, + 291, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 483, + 291, + 495 + ], + "spans": [ + { + "bbox": [ + 140, + 483, + 291, + 495 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 140, + 483, + 291, + 495 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 140, + 483, + 291, + 495 + ], + "type": "text", + "content": " is a distance metric, we have" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 248, + 500, + 504, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 500, + 504, + 514 + ], + "spans": [ + { + "bbox": [ + 248, + 500, + 504, + 514 + ], + "type": "interline_equation", + "content": "\\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {y}) + \\lambda^ {2} d (\\boldsymbol {y}, \\boldsymbol {z}) \\geqslant \\lambda^ {2} d (\\boldsymbol {x}, \\boldsymbol {z}) \\tag {10}", + "image_path": "84ee3fd3b797a1f0e257c76b3c39c204dd835f4fc154c92230fe334951e402ce.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 519, + 261, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 519, + 261, + 532 + ], + "spans": [ + { + "bbox": [ + 140, + 519, + 261, + 532 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 140, + 519, + 261, + 532 + ], + "type": "inline_equation", + "content": "\\operatorname {Im}(d)\\in [0,\\infty [" + }, + { + "bbox": [ + 140, + 519, + 261, + 532 + ], + "type": "text", + "content": " , meaning" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 225, + 536, + 504, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 536, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 225, + 536, + 504, + 550 + ], + "type": "interline_equation", + "content": "2 \\lambda d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) + d (\\boldsymbol {x}, \\boldsymbol {y}) d (\\boldsymbol {y}, \\boldsymbol {z}) d (\\boldsymbol {x}, \\boldsymbol {z}) \\geqslant 0 \\tag {11}", + "image_path": "182c33be8c00e1c0131c442927f8a5fb3367bf18119f61bb7cbf5de4215b483e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 560, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 560, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 560, + 506, + 582 + ], + "type": "text", + "content": "By Eq. 10 and 11, the inequality of Eq. 9 holds. Furthermore, the fact that Eq. 8 and 9 are equivalent yields the result." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "text", + "content": "Lemma A.7. Let " + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "inline_equation", + "content": "d_{\\lambda}" + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "text", + "content": " as defined above, then " + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "inline_equation", + "content": "(i)d_{\\lambda}\\xrightarrow[\\lambda\\to 0]{\\longrightarrow}\\mathbf{1}_{\\neq}" + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "text", + "content": " and (ii) " + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "inline_equation", + "content": "d,d_{\\lambda}" + }, + { + "bbox": [ + 104, + 590, + 506, + 606 + ], + "type": "text", + "content": " are Lipschitz-equivalent." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "text", + "content": "Proof. Part (i) is straightforward by definition of " + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "inline_equation", + "content": "d_{\\lambda}" + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "text", + "content": ". Distances " + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "inline_equation", + "content": "d_{\\lambda}" + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "text", + "content": " are Lipschitz equivalent if and only if " + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "inline_equation", + "content": "\\exists a, b > 0" + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "inline_equation", + "content": "\\forall x, y \\in [0,1]^n" + }, + { + "bbox": [ + 104, + 618, + 506, + 641 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 222, + 646, + 390, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 646, + 390, + 714 + ], + "spans": [ + { + "bbox": [ + 222, + 646, + 390, + 714 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} a \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant d _ {\\lambda} (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant b \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\\\ \\equiv a \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\leqslant \\frac {d (\\boldsymbol {x} , \\boldsymbol {y})}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\leqslant b \\cdot d (\\boldsymbol {x}, \\boldsymbol {y}) \\\\ \\equiv \\quad a \\leqslant \\frac {1}{\\lambda + d (\\boldsymbol {x} , \\boldsymbol {y})} \\leqslant b \\\\ \\end{array}", + "image_path": "bf9c6a1a076da9ac56dce4abc2b4a81fdc004e42a61d7565e46ab136d850af33.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 719, + 293, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 719, + 293, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 719, + 293, + 735 + ], + "type": "text", + "content": "Taking " + }, + { + "bbox": [ + 104, + 719, + 293, + 735 + ], + "type": "inline_equation", + "content": "a = \\frac{1}{\\lambda + \\sqrt{n}}" + }, + { + "bbox": [ + 104, + 719, + 293, + 735 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 719, + 293, + 735 + ], + "type": "inline_equation", + "content": "b = \\frac{1}{\\lambda}" + }, + { + "bbox": [ + 104, + 719, + 293, + 735 + ], + "type": "text", + "content": " yields the result." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "text", + "content": "Corollary A.7.1. For all " + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "inline_equation", + "content": "\\beta \\geqslant 1 / \\lambda" + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "inline_equation", + "content": "a \\in \\mathcal{A}" + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "inline_equation", + "content": "\\bar{s} \\in \\overline{S}" + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "inline_equation", + "content": "\\bar{a} \\in \\overline{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 81, + 410, + 95 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 128, + 102, + 425, + 138 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 128, + 102, + 276, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 102, + 276, + 117 + ], + "spans": [ + { + "bbox": [ + 128, + 102, + 276, + 117 + ], + "type": "text", + "content": "1. " + }, + { + "bbox": [ + 128, + 102, + 276, + 117 + ], + "type": "inline_equation", + "content": "W_{d_{\\lambda}}(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}})\\leqslant \\beta \\cdot W_{d}(\\mathcal{T},\\bar{\\xi}_{\\bar{\\pi}_{\\theta}})" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 128, + 123, + 425, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 123, + 425, + 138 + ], + "spans": [ + { + "bbox": [ + 128, + 123, + 425, + 138 + ], + "type": "text", + "content": "2. " + }, + { + "bbox": [ + 128, + 123, + 425, + 138 + ], + "type": "inline_equation", + "content": "W_{d_{\\lambda}}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot \\mid s,a),\\overline{\\mathbf{P}}_{\\theta}(\\cdot \\mid \\bar{s},\\bar{a})\\right)\\leqslant \\beta \\cdot W_{d}\\left(\\phi_{\\iota}\\mathbf{P}(\\cdot \\mid s,a),\\overline{\\mathbf{P}}_{\\theta}(\\cdot \\mid \\bar{s},\\bar{a})\\right)" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "type": "text", + "content": "Proof. By Lipschitz equivalence, taking " + }, + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "type": "inline_equation", + "content": "\\beta \\geqslant 1 / \\lambda" + }, + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "type": "text", + "content": " ensures that " + }, + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "type": "inline_equation", + "content": "\\forall n\\in \\mathbb{N},\\forall \\pmb {x},\\pmb {y}\\in [0,1]^n,d_\\lambda (\\pmb {x},\\pmb {y})\\leqslant \\beta \\cdot d(\\pmb {x},\\pmb {y})" + }, + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "type": "text", + "content": ". Moreover, for any distributions " + }, + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "type": "inline_equation", + "content": "P,Q,W_{d_{\\lambda}}(P,Q)\\leqslant \\beta \\cdot W_{d}(P,Q)" + }, + { + "bbox": [ + 104, + 151, + 506, + 185 + ], + "type": "text", + "content": " (cf., e.g., Gelada et al. 2019, Lemma A.4 for details)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": "In practice, taking the hyperparameter " + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\beta \\geqslant 1 / \\lambda" + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": " in the " + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": "-MDP ensures that minimizing the " + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": "-scaled regularizers w.r.t. " + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": " also minimizes the regularizers w.r.t. the " + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": "-relaxation " + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "inline_equation", + "content": "d_{\\lambda}" + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": ", being the discrete distribution in the zero-temperature limit. Note that optimizing over two different " + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\beta_{1}, \\beta_{2}" + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": " instead of a unique scale factor " + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 199, + 506, + 245 + ], + "type": "text", + "content": " is also a good practice to interpolate between the two regularizers." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 261, + 246, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 261, + 246, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 246, + 274 + ], + "type": "text", + "content": "B EXPERIMENT DETAILS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 286, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 286, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 286, + 506, + 310 + ], + "type": "text", + "content": "The code for conducting and replicating our experiments is available at https://github.com/florentdelgrange/wae_mdp." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 323, + 162, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 162, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 162, + 335 + ], + "type": "text", + "content": "B.1 SETUP" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 343, + 506, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 343, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 343, + 506, + 399 + ], + "type": "text", + "content": "We used TENSORFLOW 2.7.0 (Abadi et al., 2015) to implement the neural network architecture of our W" + }, + { + "bbox": [ + 104, + 343, + 506, + 399 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 104, + 343, + 506, + 399 + ], + "type": "text", + "content": "AE-MDP, TENSORFLOW PROBABILITY 0.15.0 (Dillon et al., 2017) to handle the probabilistic components of the latent model (e.g., latent distributions with reparameterization tricks, masked autoregressive flows, etc.), as well as TF-AGENTS 0.11.0 (Guadarrama et al., 2018) to handle the RL parts of the framework." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 405, + 505, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 405, + 505, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 405, + 505, + 450 + ], + "type": "text", + "content": "Models have been trained on a cluster running under CentOS Linux 7 (Core) composed of a mix of nodes containing Intel processors with the following CPU microarchitectures: (i) 10-core INTEL E5-2680v2, (ii) 14-core INTEL E5-2680v4, and (iii) 20-core INTEL Xeon Gold 6148. We used 8 cores and 32 GB of memory for each run." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 464, + 254, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 254, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 254, + 475 + ], + "type": "text", + "content": "B.2 STATIONARY DISTRIBUTION" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "content": "To sample from the stationary distribution " + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "content": " of episodic learning environments operating under " + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "inline_equation", + "content": "\\pi \\in \\Pi" + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "content": ", we implemented the recursive " + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "content": "-perturbation trick of Huang (2020). In a nutshell, the reset of the environment is explicitly added to the state space of " + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "content": ", which is entered at the end of each episode and left with probability " + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "inline_equation", + "content": "1 - \\epsilon" + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "content": " to start a new one. We also added a special atomic proposition reset into " + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "inline_equation", + "content": "\\mathbf{AP}" + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "content": " to label this reset state and reason about episodic behaviors. For instance, this allows verifying whether the agent behaves safely during the entire episode, or if it is able to reach a goal before the end of the episode." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 576, + 328, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 328, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 328, + 587 + ], + "type": "text", + "content": "B.3 ENVIRONMENTS WITH INITIAL DISTRIBUTION" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": "Many environments do not necessarily have a single initial state, but rather an initial distribution over states " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "d_I \\in \\Delta(S)" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": ". In that case, the results presented in this paper remain unchanged: it suffices to add a dummy state " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "s^\\star" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": " to the state space " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "S \\cup \\{s^\\star\\}" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "s_I = s^\\star" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": " with the transition dynamics " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "\\mathbf{P}(s' \\mid s^\\star, a) = d_I(s')" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": " for any action " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "a \\in \\mathcal{A}" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": ". Therefore, each time the reset of the environment is triggered, we make the MDP entering the initial state " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "s^\\star" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": ", then transitioning to " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": " according to " + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "inline_equation", + "content": "d_I" + }, + { + "bbox": [ + 104, + 597, + 505, + 653 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 666, + 261, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 261, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 261, + 677 + ], + "type": "text", + "content": "B.4 LATENT SPACE DISTRIBUTION" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "As pointed out in Sect. 4, posterior collapse is naturally avoided when optimizing " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "-MDP. To illustrate that, we report the distribution of latent states produced by " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": " during training (Fig. 5). The plots reveal that the latent space generated by mapping original states drawn from " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": " during training to " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\bar{S}" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": " via " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": " is fairly distributed, for each environment." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 115, + 83, + 238, + 179 + ], + "blocks": [ + { + "bbox": [ + 115, + 83, + 238, + 179 + ], + "lines": [ + { + "bbox": [ + 115, + 83, + 238, + 179 + ], + "spans": [ + { + "bbox": [ + 115, + 83, + 238, + 179 + ], + "type": "image", + "image_path": "f9b11e840e926f6a557915662bc20036b7684f609694f4f1a267709f4ba6d4c5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 244, + 85, + 367, + 180 + ], + "blocks": [ + { + "bbox": [ + 244, + 85, + 367, + 180 + ], + "lines": [ + { + "bbox": [ + 244, + 85, + 367, + 180 + ], + "spans": [ + { + "bbox": [ + 244, + 85, + 367, + 180 + ], + "type": "image", + "image_path": "8de01355e219f78cb7b403d44ffb0758b1492b29fc6e25149cb3d30dff48d553.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 373, + 86, + 496, + 180 + ], + "blocks": [ + { + "bbox": [ + 373, + 86, + 496, + 180 + ], + "lines": [ + { + "bbox": [ + 373, + 86, + 496, + 180 + ], + "spans": [ + { + "bbox": [ + 373, + 86, + 496, + 180 + ], + "type": "image", + "image_path": "c3d02556c539def1c1129ec5b832efd5ae42a21e70f72d962abf4fb2ae673ff8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 181, + 186, + 303, + 277 + ], + "blocks": [ + { + "bbox": [ + 181, + 186, + 303, + 277 + ], + "lines": [ + { + "bbox": [ + 181, + 186, + 303, + 277 + ], + "spans": [ + { + "bbox": [ + 181, + 186, + 303, + 277 + ], + "type": "image", + "image_path": "a076bd8afdd0ce5bc0a31146c4232995dc4a7ad674884a472320e5eb16858cc0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "lines": [ + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "type": "text", + "content": "Figure 5: Latent space distribution along training steps. The intensity of the blue hue corresponds to the frequency of latent states produced by " + }, + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "type": "inline_equation", + "content": "\\phi_{\\ell}" + }, + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "type": "text", + "content": " during training." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 186, + 429, + 277 + ], + "blocks": [ + { + "bbox": [ + 310, + 186, + 429, + 277 + ], + "lines": [ + { + "bbox": [ + 310, + 186, + 429, + 277 + ], + "spans": [ + { + "bbox": [ + 310, + 186, + 429, + 277 + ], + "type": "image", + "image_path": "d32d8c2a325754534be9499ca240d18de6ba6c2edff8a225444843df1390755c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 106, + 323, + 193, + 397 + ], + "blocks": [ + { + "bbox": [ + 106, + 323, + 193, + 397 + ], + "lines": [ + { + "bbox": [ + 106, + 323, + 193, + 397 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 193, + 397 + ], + "type": "image", + "image_path": "76961331004e819990562a69015ed71c0e400d697121e59f354e6d0f7a022e93.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 406, + 449, + 418 + ], + "lines": [ + { + "bbox": [ + 159, + 406, + 449, + 418 + ], + "spans": [ + { + "bbox": [ + 159, + 406, + 449, + 418 + ], + "type": "text", + "content": "Figure 6: Absolute value difference " + }, + { + "bbox": [ + 159, + 406, + 449, + 418 + ], + "type": "inline_equation", + "content": "\\| V_{\\bar{\\pi}_{\\theta}}\\|" + }, + { + "bbox": [ + 159, + 406, + 449, + 418 + ], + "type": "text", + "content": " reported along training steps." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 197, + 323, + 269, + 396 + ], + "blocks": [ + { + "bbox": [ + 197, + 323, + 269, + 396 + ], + "lines": [ + { + "bbox": [ + 197, + 323, + 269, + 396 + ], + "spans": [ + { + "bbox": [ + 197, + 323, + 269, + 396 + ], + "type": "image", + "image_path": "18694fdb7e5dddee82b29a6cd1e4a68cd5b89af0c4c965d16530ef2ebdef5271.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 271, + 324, + 347, + 396 + ], + "blocks": [ + { + "bbox": [ + 271, + 324, + 347, + 396 + ], + "lines": [ + { + "bbox": [ + 271, + 324, + 347, + 396 + ], + "spans": [ + { + "bbox": [ + 271, + 324, + 347, + 396 + ], + "type": "image", + "image_path": "e106eabe078beab95b972a69189af22111abb78e6639e9af76f8e4f5cd5f62c1.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 350, + 324, + 423, + 396 + ], + "blocks": [ + { + "bbox": [ + 350, + 324, + 423, + 396 + ], + "lines": [ + { + "bbox": [ + 350, + 324, + 423, + 396 + ], + "spans": [ + { + "bbox": [ + 350, + 324, + 423, + 396 + ], + "type": "image", + "image_path": "3777b6eee1c8d9c394343378f40ac5aa2095fb85a998828f9198189142f87869.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 426, + 324, + 503, + 397 + ], + "blocks": [ + { + "bbox": [ + 426, + 324, + 503, + 397 + ], + "lines": [ + { + "bbox": [ + 426, + 324, + 503, + 397 + ], + "spans": [ + { + "bbox": [ + 426, + 324, + 503, + 397 + ], + "type": "image", + "image_path": "975397e4c6281b932be427001e50d427d0925f0174224f6aa25b3fca5d334927.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 440, + 435, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 435, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 435, + 451 + ], + "type": "text", + "content": "B.5 DISTANCE METRICS: STATE, ACTION, AND REWARD RECONSTRUCTION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "type": "text", + "content": "The choice of the distance functions " + }, + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{S}}" + }, + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{R}}" + }, + { + "bbox": [ + 104, + 460, + 506, + 549 + ], + "type": "text", + "content": ", plays a role in the success of our approach. The usual Euclidean distance is often a good choice for all the transition components, but the scale, dimensionality, and nature of the inputs sometimes require using scaled, normalized, or other kinds of distances to allow the network to reconstruct each component. While we did not observe such requirements in our experiments (where we simply used the Euclidean distance), high dimensional observations (e.g., images) are an example of data which could require tuning the state-distance function in such a way, to make sure that the optimization of the reward or action reconstruction will not be disfavored compared to that of the states." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 563, + 219, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 219, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 219, + 574 + ], + "type": "text", + "content": "B.6 VALUE DIFFERENCE" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 584, + 504, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 584, + 504, + 642 + ], + "spans": [ + { + "bbox": [ + 104, + 584, + 504, + 642 + ], + "type": "text", + "content": "In addition to reporting the quality guarantees of the model along training steps through local losses (cf. Figure 4b), our experiments revealed that the absolute value difference " + }, + { + "bbox": [ + 104, + 584, + 504, + 642 + ], + "type": "inline_equation", + "content": "\\| V_{\\overline{\\pi}_{\\theta}}\\|" + }, + { + "bbox": [ + 104, + 584, + 504, + 642 + ], + "type": "text", + "content": " between the original and latent models operating under the latent policy quickly decreases and tends to converge to values in the same range (Figure 6). This is consistent with the fact that minimizing local losses lead to close behaviors (cf. Eq. 1) and that the value function is Lipschitz-continuous w.r.t. " + }, + { + "bbox": [ + 104, + 584, + 504, + 642 + ], + "type": "inline_equation", + "content": "\\widetilde{d}_{\\overline{\\pi}_{\\theta}}" + }, + { + "bbox": [ + 104, + 584, + 504, + 642 + ], + "type": "text", + "content": " (cf. Section 2)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 656, + 289, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 289, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 289, + 666 + ], + "type": "text", + "content": "B.7 REMARK ON FORMAL VERIFICATION" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "Recall that our bisimulation guarantees come by construction of the latent space. Essentially, our learning algorithm spits out a distilled policy and a latent state space which already yields a guaranteed bisimulation distance between the original MDP and the latent MDP. This is the crux of how we enable verification techniques like model checking. In particular, bisimulation guarantees mean that reachability probabilities in the latent MDP compared to those in the original one are close." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "Furthermore, the value difference of (omega-regular) properties (formulated through mu-calculus) obtained in the two models is bounded by this distance (cf. Sect. 2 and Chatterjee et al. 2010)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 114, + 506, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 506, + 246 + ], + "type": "text", + "content": "Reachability is the key ingredient to model-check MDPs. Model-checking properties is in most cases performed by reduction to the reachability of components or regions of the MDP: it either consists of (i) iteratively checking the reachability of the parts of the state space satisfying path formulae that comprise the specification, through a tree-like decomposition of the latter (e.g., for (P,R-)CTL properties, cf. Baier & Katoen 2008), or (ii) checking the reachability to the part of the state space of a product of the MDP with a memory structure or an automaton that embeds the omega-regular property — e.g., for LTL (Baier et al., 2016; Sickert et al., 2016), LTLf (Wells et al., 2020), or GLTL (Littman et al., 2017), among other specification formalisms. The choice of specification formalism is up to the user and depends on the case study. The scope of this work is focusing on learning to distill RL policies with bisimulation guarantees so that model checking can be applied, in order to reason about the behaviors of the agent. That being said, reachability is all we need to show that model checking can be applied." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 261, + 221, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 261, + 221, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 221, + 272 + ], + "type": "text", + "content": "B.8 HYPERPARAMETERS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "\\mathbf{W}^2\\mathbf{AE}" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": "-MDP parameters. All components (e.g., functions or distribution locations and scales, see Fig. 2) are represented and inferred by neural networks (multilayer perceptrons). All the networks share the same architecture (i.e., number of layers and neurons per layer). We use a simple uniform experience replay of size " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "10^{6}" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": " to store the transitions and sample them. The training starts when the agent has collected " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "10^{4}" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": " transitions in " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": ". We used minibatches of size 128 to optimize the objective and we applied a minibatch update every time the agent executing " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": " has performed 16 steps in " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": ". We use the recursive " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": "-perturbation trick of Huang (2020) with " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "\\epsilon = 3/4" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": ": when an episode ends, it restarts from the initial state with probability " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "1/4" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": "; before re-starting an episode, the time spent in the reset state labeled with reset follows then the geometric distribution with expectation " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "\\epsilon/1 - \\epsilon = 3" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": ". We chose the same latent state-action space size than Delgrange et al. (2022), except for LunarLander that we decreased to " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "\\log_2|\\bar{S}| = 14" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "inline_equation", + "content": "|\\bar{\\mathcal{A}}| = 3" + }, + { + "bbox": [ + 104, + 285, + 506, + 408 + ], + "type": "text", + "content": " to improve the scalability of the verification." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 415, + 506, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 506, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 506, + 450 + ], + "type": "text", + "content": "VAE-MDPs parameters. For the comparison of Sect. 4, we used the exact same VAE-MDP hyperparameter set as prescribed by Delgrange et al. (2022), except for the state-action space of LunarLander that we also changed for scalability and fair comparison purpose." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": "Hyperparameter search. To evaluate our " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\mathrm{W}^2\\mathrm{AE}" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": "-MDP, we realized a search in the parameter space defined in Table 2. The best parameters found (in terms of trade-off between performance and latent quality) are reported in Table 3. We used two different optimizers for minimizing the loss (referred to as the minimizer) and computing the Wasserstein terms (referred to as the maximizer). We used ADAM (Kingma & Ba, 2015) for the two, but we allow for different learning rates " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\mathrm{ADAM}_{\\alpha}" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": " and exponential decays " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\mathrm{ADAM}_{\\beta_1}" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\mathrm{ADAM}_{\\beta_2}" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": ". We also found that polynomial decay for " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\mathrm{ADAM}_{\\alpha}" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": " (e.g., to " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "10^{-5}" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "4 \\cdot 10^{5}" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": " steps) is a good practice to stabilize the experiment learning curves, but is not necessary to obtain high-quality and performing distillation. Concerning the continuous relaxation of discrete distributions, we used a different temperature for each distribution, as Maddison et al. (2017) pointed out that doing so is valuable to improve the results. We further followed the guidelines of Maddison et al. (2017) to choose the interval of temperatures and did not schedule any annealing scheme (in contrast to VAE-MDPs). Essentially, the search reveals that the regularizer scale factors " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": ". (defining the optimization direction) as well as the encoder and latent transition temperatures are important to improve the performance of distilled policies. For the encoder temperature, we found a nice spot in " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\lambda_{\\phi_\\varepsilon} = 2/3" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": ", which provides the best performance in general, whereas the choice of " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\lambda_{\\overline{\\mathbb{P}}_\\theta}" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 459, + 506, + 668 + ], + "type": "text", + "content": ": are (latent-) environment dependent. The importance of the temperature parameters for the continuous relaxation of discrete distributions is consistent with the results of (Maddison et al., 2017), revealing that the success of the relaxation depends on the choice of the temperature for the different latent space sizes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 677, + 506, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 700 + ], + "type": "text", + "content": "Labeling functions. We used the same labeling functions as those described by Delgrange et al. (2022). For completeness, we recall the labeling function used for each environment in Table 4." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 710, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 710, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 504, + 732 + ], + "type": "text", + "content": "2The code for conducting the VAE-MDPs experiments is available at https://github.com/ florentdelgrange/vae_mdp (GNU General Public License v3.0)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 150, + 504, + 349 + ], + "blocks": [ + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "lines": [ + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "type": "text", + "content": "Table 2: Hyperparameter search. " + }, + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "type": "inline_equation", + "content": "{\\lambda }_{X}" + }, + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "type": "text", + "content": " refers to the temperature used for " + }, + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "type": "inline_equation", + "content": "{\\mathrm{W}}^{2}\\mathrm{{AE}}" + }, + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "type": "text", + "content": " -MDP component " + }, + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 105, + 129, + 504, + 143 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 150, + 504, + 349 + ], + "lines": [ + { + "bbox": [ + 106, + 150, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 504, + 349 + ], + "type": "table", + "html": "
ParameterRange
ADAMα (minimizer){0.0001,0.0002,0.0003,0.001}
ADAMα (maximizer){0.0001,0.0002,0.0003,0.001}
ADAMβ1{0,0.5,0.9}
ADAMβ2{0.9,0.999}
neurons per layer{64,128,256,512}
number of hidden layers{1,2,3}
activation{ReLU,LeakyReLU,tanh,softplus(2x+2)/2-1(smooth ELU)}
βwξπ{10,25,50,75,100}
βLξπ{10,25,50,75,100}
m{5,10,15,20}
δ{10,20}
use ε-mimic (cf. Delgrange et al. 2022){True,False} (if True, a decay rate of 10-5is used)
λPθ{0.1,1/3,1/2,2/3,3/5,0.99}
λφl{0.1,1/3,1/2,2/3,3/5,0.99}
λπθ{1/|A|-1,1/(|A|-1).15}
λφlA{1/|A|-1,1/(|A|-1).15}
", + "image_path": "c8077c6228f8b6d307808cb89e7805b4342533e4cd0afc26097c7cdd72bc3597.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 474, + 503, + 681 + ], + "blocks": [ + { + "bbox": [ + 160, + 453, + 451, + 466 + ], + "lines": [ + { + "bbox": [ + 160, + 453, + 451, + 466 + ], + "spans": [ + { + "bbox": [ + 160, + 453, + 451, + 466 + ], + "type": "text", + "content": "Table 3: Final hyperparameters used to evaluate " + }, + { + "bbox": [ + 160, + 453, + 451, + 466 + ], + "type": "inline_equation", + "content": "{\\mathrm{W}}^{2}\\mathrm{{AE}}" + }, + { + "bbox": [ + 160, + 453, + 451, + 466 + ], + "type": "text", + "content": " -MDPs in Sect. 4" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 474, + 503, + 681 + ], + "lines": [ + { + "bbox": [ + 106, + 474, + 503, + 681 + ], + "spans": [ + { + "bbox": [ + 106, + 474, + 503, + 681 + ], + "type": "table", + "html": "
CartPoleMountainCarAcrobotLunarLanderPendulum
log2|S|910131413
|A|2 = |A|2 = |A|3 = |A|33
activationtanhReLULeaky ReluReLUReLU
layers[64, 64, 64][512, 512][512, 512][256][256, 256, 256]
ADAMα (minimizer)0.00020.00010.00020.00030.0003
ADAMα (maximizer)0.00020.00010.00010.00030.0003
ADAMβ10.50000.5
ADAMβ20.9990.9990.9990.9990.999
βLξπ1025105025
βWξπ751001010025
m52020155
δ2010202010
ε00000.5
λPθ1/31/30.10.752/3
λφi1/32/32/32/32/3
λπθ2/31/30.50.50.5
λφiA///1/31/3
", + "image_path": "adfc48efc581154e4267d0056f6bbe8723b4996827a4634b9850cd1c9e2d0808.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 80, + 504, + 381 + ], + "blocks": [ + { + "bbox": [ + 106, + 80, + 504, + 381 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 504, + 381 + ], + "type": "table", + "html": "
EnvironmentS⊆Description, for s ∈ Sℓ(s) = <p1, ..., pn, preset>
CartPoleR4• s1: cart position\n• s2: cart velocity\n• s3: pole angle (rad)\n• s4: pole velocity at tip• p1 = 1s1≥1.5: unsafe cart position\n• p2 = 1s3≥0.15: unsafe pole angle
MountainCarR2• s1: position\n• s2: velocity• p1 = 1s1>1.5: target position\n• p2 = 1s1≥-1/2: right-hand side of the mountain\n• p3 = 1s2≥0: car going forward
AcrobotR6Let θ1, θ2 ∈ [0, 2π] be the angles of the two rotational joints,\n• s1 = cos(θ1)\n• s2 = sin(θ1)\n• s3 = cos(θ2)\n• s4 = sin(θ2)\n• s5: angular velocity 1\n• s6: angular velocity 2• p1 = 1-s1-s3·s1+s4·s2>1: RL agent target\n• p2 = 1s1≥0: θ1 ∈ [0, π/2] ∪ [3π/2, 2π]\n• p3 = 1s2≥0: θ1 ∈ [0, π]\n• p4 = 1s3≥0: θ2 ∈ [0, π/2] ∪ [3π/2, 2π]\n• p5 = 1s4≥0: θ2 ∈ [0, π]\n• p6 = 1s5≥0: positive angular velocity (1)\n• p7 = 1s6≥0: positive angular velocity (2)
PendulumR3Let θ ∈ [0, 2π] be the joint angle\n• s1 = cos(θ)\n• s2 = sin(θ)\n• s3: angular velocity• p1 = 1s1≥cos(π/3): safe joint angle\n• p2 = 1s1≥0: θ ∈ [0, π/2] ∪ [3π/2, 2π]\n• p3 = 1s2≥0: θ ∈ [0, π]\n• p4 = 1s3≥0: positive angular velocity
LunarLanderR8• s1: horizontal coordinates\n• s2: vertical coordinates\n• s3: horizontal speed\n• s4: vertical speed\n• s5: ship angle\n• s6: angular speed\n• s7: left leg contact\n• s8: right leg contact• p1: unsafe angle\n• p2: leg ground contact\n• p3: lands rapidly\n• p4: left inclination\n• p5: right inclination\n• p6: motors shut down
", + "image_path": "e94801594180b591b6249b492680ab69acf93c3c35dfe6059c5b868094ed9570.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "lines": [ + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "text", + "content": "Table 4: Labeling functions for the OpenAI environments considered in our experiments (Delgrange et al., 2022). We provide a short description of the state space and the meaning of each atomic proposition. Recall that labels are binary encoded, for " + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "inline_equation", + "content": "n = |\\mathbf{AP}| - 1" + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "text", + "content": " (one bit is reserved for reset) and " + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{reset}} = 1" + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "text", + "content": " iff " + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "text", + "content": " is a reset state (cf. Appendix B.2)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "text", + "content": "Time to failure properties. Based on the labeling described in Table 4, we formally detail the time to failure properties checked in Sect. 4 whose results are listed in Table 1 for each environment. Let " + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "inline_equation", + "content": "\\text{Reset} = \\{\\text{reset}\\} = \\langle 0, \\dots, 1 \\rangle" + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "text", + "content": " (we assume here that the last bit indicates whether the current state is a reset state or not) and define " + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "inline_equation", + "content": "s \\models \\mathsf{L}_1 \\land \\mathsf{L}_2" + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "text", + "content": " iff " + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "inline_equation", + "content": "s \\models \\mathsf{L}_1" + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "inline_equation", + "content": "s \\models \\mathsf{L}_2" + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 453, + 504, + 499 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 507, + 504, + 634 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 132, + 507, + 380, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 507, + 380, + 520 + ], + "spans": [ + { + "bbox": [ + 132, + 507, + 380, + 520 + ], + "type": "text", + "content": "- CartPole: " + }, + { + "bbox": [ + 132, + 507, + 380, + 520 + ], + "type": "inline_equation", + "content": "\\varphi = \\neg" + }, + { + "bbox": [ + 132, + 507, + 380, + 520 + ], + "type": "text", + "content": " Reset " + }, + { + "bbox": [ + 132, + 507, + 380, + 520 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 132, + 507, + 380, + 520 + ], + "type": "text", + "content": " Unsafe, where Unsafe " + }, + { + "bbox": [ + 132, + 507, + 380, + 520 + ], + "type": "inline_equation", + "content": "= \\langle 1,1,0\\rangle" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 522, + 389, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 522, + 389, + 534 + ], + "spans": [ + { + "bbox": [ + 132, + 522, + 389, + 534 + ], + "type": "text", + "content": "- MountainCar: " + }, + { + "bbox": [ + 132, + 522, + 389, + 534 + ], + "type": "inline_equation", + "content": "\\varphi = \\neg" + }, + { + "bbox": [ + 132, + 522, + 389, + 534 + ], + "type": "text", + "content": " GoalU Reset, where Goal " + }, + { + "bbox": [ + 132, + 522, + 389, + 534 + ], + "type": "inline_equation", + "content": "= \\langle 1,0,0,0\\rangle" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 536, + 375, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 536, + 375, + 548 + ], + "spans": [ + { + "bbox": [ + 132, + 536, + 375, + 548 + ], + "type": "text", + "content": "- Acrobot: " + }, + { + "bbox": [ + 132, + 536, + 375, + 548 + ], + "type": "inline_equation", + "content": "\\varphi = \\neg" + }, + { + "bbox": [ + 132, + 536, + 375, + 548 + ], + "type": "text", + "content": " GoalU Reset, where Goal " + }, + { + "bbox": [ + 132, + 536, + 375, + 548 + ], + "type": "inline_equation", + "content": "= \\langle 1,0,\\dots ,0\\rangle" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "spans": [ + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "text", + "content": "- LunarLander: " + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "inline_equation", + "content": "\\varphi = \\neg" + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "text", + "content": " SafeLanding " + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{U}" + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "text", + "content": " Reset, where SafeLanding " + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "text", + "content": " GroundContact " + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "inline_equation", + "content": "\\land" + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "text", + "content": " MotorsOff, GroundContact " + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "inline_equation", + "content": "=\\langle 0,1,0,0,0,0,0\\rangle" + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "text", + "content": ", and MotorsOff " + }, + { + "bbox": [ + 132, + 551, + 503, + 574 + ], + "type": "inline_equation", + "content": "=\\langle 0,0,0,0,0,1,0\\rangle" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "spans": [ + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": "- Pendulum: " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "\\varphi = \\diamondsuit (\\neg \\text{Safe} \\land \\bigcirc \\text{Reset})" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": ", where Safe = " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "\\langle 1,0,0,0,0\\rangle" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "\\diamondsuit \\mathsf{T} = \\neg \\emptyset \\mathcal{U} \\mathsf{T}" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "s_i \\models \\bigcirc \\mathsf{T}" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": " iff " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "s_{i+1} \\models \\mathsf{T}" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": ", for any " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "\\mathsf{T} \\subseteq \\mathbf{AP}" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "s_{i:\\infty}, a_{i:\\infty} \\in \\text{Traj}" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": ". Intuitively, " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "\\varphi" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": " denotes the event of ending an episode in an unsafe state, just before resetting the environment, which means that either the agent never reached the safe region or it reached and left it at some point. Formally, " + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "inline_equation", + "content": "\\varphi = \\{s_{0:\\infty}, a_{0:\\infty} \\mid \\exists i \\in \\mathbb{N}, s_i \\models \\text{Safe} \\land s_{i+1} \\models \\text{Reset}\\} \\subseteq \\text{Traj}" + }, + { + "bbox": [ + 132, + 577, + 504, + 634 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 652, + 358, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 652, + 358, + 664 + ], + "spans": [ + { + "bbox": [ + 105, + 652, + 358, + 664 + ], + "type": "text", + "content": "C ON THE CURSE OF VARIATIONAL MODELING" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "Posterior collapse is a well known issue occurring in variational models (see, e.g., Alemi et al. 2018; Tolstikhin et al. 2018; He et al. 2019; Dong et al. 2020) which intuitively results in a degenerate local optimum where the model learns to ignore the latent space and use only the reconstruction functions (i.e., the decoding distribution) to optimize the objective. VAE-MDPs are no exception, as pointed out in the original paper (Delgrange et al., 2022, Section 4.3 and Appendix C.2)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 83, + 225, + 173 + ], + "blocks": [ + { + "bbox": [ + 109, + 83, + 225, + 173 + ], + "lines": [ + { + "bbox": [ + 109, + 83, + 225, + 173 + ], + "spans": [ + { + "bbox": [ + 109, + 83, + 225, + 173 + ], + "type": "image", + "image_path": "85a29476b5c38b313eac6a809b2a1d01b774ebfb0476120102db2a2c5df884c2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 180, + 347, + 221 + ], + "lines": [ + { + "bbox": [ + 104, + 180, + 347, + 221 + ], + "spans": [ + { + "bbox": [ + 104, + 180, + 347, + 221 + ], + "type": "text", + "content": "(a) Latent space distribution along training steps. The intensity of the blue hue corresponds to the frequency of latent states produced from " + }, + { + "bbox": [ + 104, + 180, + 347, + 221 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 180, + 347, + 221 + ], + "type": "text", + "content": " during training. The vanilla model collapses to a single state." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 230, + 82, + 345, + 173 + ], + "blocks": [ + { + "bbox": [ + 230, + 82, + 345, + 173 + ], + "lines": [ + { + "bbox": [ + 230, + 82, + 345, + 173 + ], + "spans": [ + { + "bbox": [ + 230, + 82, + 345, + 173 + ], + "type": "image", + "image_path": "5e1e6827c55e71f4a031d936a287450b1b4acbb5c82f45c46029490dc63a095b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 352, + 102, + 492, + 197 + ], + "blocks": [ + { + "bbox": [ + 352, + 102, + 492, + 197 + ], + "lines": [ + { + "bbox": [ + 352, + 102, + 492, + 197 + ], + "spans": [ + { + "bbox": [ + 352, + 102, + 492, + 197 + ], + "type": "image", + "image_path": "07df2d4f32a0d23b07beeedbfc742076773ffc890df1b7262e13683eef5f714b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 201, + 486, + 211 + ], + "lines": [ + { + "bbox": [ + 364, + 201, + 486, + 211 + ], + "spans": [ + { + "bbox": [ + 364, + 201, + 486, + 211 + ], + "type": "text", + "content": "(b) Rate of the variational model." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 231, + 225, + 308 + ], + "blocks": [ + { + "bbox": [ + 106, + 231, + 225, + 308 + ], + "lines": [ + { + "bbox": [ + 106, + 231, + 225, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 225, + 308 + ], + "type": "image", + "image_path": "8ddee2b133ea3f2a9f0eaf010868ff0e9ac0b02a43b19636f55ecd3f5cabea1e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 312, + 235, + 331 + ], + "lines": [ + { + "bbox": [ + 105, + 312, + 235, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 235, + 331 + ], + "type": "text", + "content": "(c) Distortion of the variational model." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 247, + 231, + 361, + 308 + ], + "blocks": [ + { + "bbox": [ + 247, + 231, + 361, + 308 + ], + "lines": [ + { + "bbox": [ + 247, + 231, + 361, + 308 + ], + "spans": [ + { + "bbox": [ + 247, + 231, + 361, + 308 + ], + "type": "image", + "image_path": "aab5da58b05380464cfc1a5680fc87d6b66a66e0017fa4a7b8f0b83a44a66b16.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 242, + 312, + 373, + 351 + ], + "lines": [ + { + "bbox": [ + 242, + 312, + 373, + 351 + ], + "spans": [ + { + "bbox": [ + 242, + 312, + 373, + 351 + ], + "type": "text", + "content": "(d) Average point-wise entropy of " + }, + { + "bbox": [ + 242, + 312, + 373, + 351 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}(\\cdot \\mid s)" + }, + { + "bbox": [ + 242, + 312, + 373, + 351 + ], + "type": "text", + "content": ", for " + }, + { + "bbox": [ + 242, + 312, + 373, + 351 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 242, + 312, + 373, + 351 + ], + "type": "text", + "content": " drawn from the interaction with the original environment." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 381, + 232, + 493, + 306 + ], + "blocks": [ + { + "bbox": [ + 381, + 232, + 493, + 306 + ], + "lines": [ + { + "bbox": [ + 381, + 232, + 493, + 306 + ], + "spans": [ + { + "bbox": [ + 381, + 232, + 493, + 306 + ], + "type": "image", + "image_path": "14909e47b3550db438ea0715e7f9590574bf78942418fe291acc608c902c230a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 310, + 506, + 351 + ], + "lines": [ + { + "bbox": [ + 378, + 310, + 506, + 351 + ], + "spans": [ + { + "bbox": [ + 378, + 310, + 506, + 351 + ], + "type": "text", + "content": "(e) Performance of the resulting distilled policy " + }, + { + "bbox": [ + 378, + 310, + 506, + 351 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}_{\\theta}" + }, + { + "bbox": [ + 378, + 310, + 506, + 351 + ], + "type": "text", + "content": " when deployed in the original environment (averaged over 30 episodes)." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "lines": [ + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "type": "text", + "content": "Figure 7: Comparison of the VAE-MDP in the CartPole environment (i) when the distortion and the rate are minimized as is (vanilla model) and (ii) when it makes use of annealing schemes, entropy regularization, and prioritized experience replay to avoid posterior collapse (cf. Delgrange et al. 2022). While the former clearly fails to learn a useful latent representation, the later does so meticulously and smoothly in two distinguishable phases: first, " + }, + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "type": "text", + "content": " focuses on fairly distributing the latent space, setting up the stage to the concrete optimization occurring from step " + }, + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "type": "inline_equation", + "content": "4\\cdot 10^{5}" + }, + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "type": "text", + "content": ", where the entropy of " + }, + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 361, + 506, + 451 + ], + "type": "text", + "content": " is lowered, which allows to get the rate of the variational model away from zero. Five instances of the models are trained with different random seeds, with the same hyperparameters than in Sect. 4." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "text", + "content": "Formally, VAE- and WAE-MDPs optimize their objective by minimizing two losses: a reconstruction cost plus a regularizer term which penalizes a discrepancy between the encoding distribution and the dynamics of the latent space model. In VAE-MDPs, the former corresponds to the distortion, and the later to the rate of the variational model (further details are given in Alemi et al. 2018; Delgrange et al. 2022), while in our WAE-MDPs, the former corresponds to the raw transition distance and the later to both the steady-state and transition regularizers. Notably, the rate minimization of VAE-MDPs involves regularizing a stochastic embedding function " + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}(\\cdot | s)" + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "text", + "content": " point-wise, i.e., for all different input states " + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "text", + "content": " drawn from the interaction with the original environment. In contrast, the latent space regularization of the WAE-MDP involves the marginal embedding distribution " + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "inline_equation", + "content": "Q_{\\iota}" + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "text", + "content": " where the embedding function " + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}" + }, + { + "bbox": [ + 104, + 472, + 506, + 605 + ], + "type": "text", + "content": " is not required to be stochastic. Alemi et al. (2018) showed that posterior collapse occurs in VAEs when the rate of the variational model is close to zero, leading to low-quality representation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "text", + "content": "Posterior collapse in VAE-MDPs. We illustrate the sensitivity of VAE-MDPs to the posterior collapse problem in Fig. 7, through the CartPole environment3: minimizing the distortion and the rate as is yields an embedding function which maps deterministically every input state to the same sink latent state (cf. Fig. 7a). Precisely, there is a latent state " + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "inline_equation", + "content": "\\bar{s} \\in \\bar{S}" + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "inline_equation", + "content": "\\phi_{\\nu}(\\bar{s} \\mid s) \\approx 1" + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{P}}_{\\theta}(\\bar{s} \\mid \\bar{s}, \\bar{a}) \\approx 1" + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "text", + "content": " whatever the state " + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "inline_equation", + "content": "s \\in S" + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "text", + "content": " and action " + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "inline_equation", + "content": "\\bar{a} \\in \\overline{A}" + }, + { + "bbox": [ + 104, + 612, + 506, + 703 + ], + "type": "text", + "content": ". This is a form of posterior collapse, the resulting rate quickly drops to zero (cf. Fig 7b), and the resulting latent representation yields no information at all. This phenomenon is handled in VAE-MDPs by using (i) prioritized replay buffers that allow to focus on inputs that led to bad representation, and (ii) modifying the objective" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "type": "text", + "content": "3In fact, the phenomenon of collapsing to few state occurs for all the environments considered in this paper when their prioritized experience replay is not used, as illustrated in Delgrange et al., 2022, Appendix C.2." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "function for learning the latent space model — the so-called evidence lower bound (Hoffman et al., 2013; Kingma & Welling, 2014), or ELBO for short — and set up annealing schemes to eventually recover the ELBO at the end of the training process. Consequently, the resulting learning procedure focuses primarily on fairly distributing the latent space, to avoid it to collapse to a single latent state, to the detriment of learning the dynamics of the environment and the distillation of the RL policy. Then, the annealing scheme allows to make the model learn to finally smoothly use the latent space to maximize the ELBO, and achieve consequently a lower distortion at the \"price\" of a higher rate." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "content": "Impact of the resulting learning procedure. The aforementioned annealing process, used to avoid that every state collapses to the same representation, possibly induces a high entropy embedding function (Fig. 7d), which further complicates the learning of the model dynamics and the distillation in the first stage of the training process. In fact, in this particular case, one can observe that the entropy reaches its maximal value, which yields a fully random state embedding function. Recall that the VAE-MDP latent space is learned through independent Bernoulli distributions. Fig. 7d reports values centered around 4.188 in the first training phase, which corresponds to the entropy of the state embedding function when " + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}(\\cdot |s)" + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "content": " is uniformly distributed over " + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\bar{S}" + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "content": " for any state " + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "inline_equation", + "content": "s\\in S" + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "content": " .. " + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "inline_equation", + "content": "H(\\phi_{\\iota}(\\cdot |s)) = \\sum_{i = 0}^{\\log_2|\\bar{S}| - |\\mathbf{AP}| = 6} - p_i\\log p_i - (1 - p_i)\\log (1 - p_i) = 4.188," + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "inline_equation", + "content": "p_i = 1 / 2" + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "inline_equation", + "content": "i." + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "content": " The rate (Fig. 7b) drops to zero since the divergence pulls the latent dynamics towards this high entropy (yet another form of posterior collapse), which hinders the latent space model to learn a useful representation. However, the annealing scheme increases the rate importance along training steps, which enables the optimization to eventually leave this local optimum (here around " + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "inline_equation", + "content": "4\\cdot 10^{5}" + }, + { + "bbox": [ + 104, + 167, + 506, + 340 + ], + "type": "text", + "content": " training steps). This allows the learning procedure to leave the zero-rate spot, reduce the distortion (Fig. 7c), and finally distill the original policy (Fig. 7e)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 344, + 504, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 504, + 390 + ], + "type": "text", + "content": "As a result, the whole engineering required to mitigate posterior collapse slows down the training procedure. This phenomenon is reflected in Fig. 4: VAE-MDPs need several steps to stabilize and set up the stage to the concrete optimization, whereas WAE-MDPs have no such requirements since they naturally do not suffer from collapsing issues (cf. Fig. 5), and are consequently faster to train." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 397, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 486 + ], + "type": "text", + "content": "Lack of representation guarantees. On the theoretical side, since VAE-MDPs are optimized via the ELBO and the local losses via the related variational proxies, VAE-MDPs do not leverage the representation quality guarantees induced by local losses (Eq. 1) during the learning procedure (as explicitly pointed out by Delgrange et al., 2022, Sect. 4.1.): in contrast to WAE-MDPs, when two original states are embedded to the same latent, abstract state, the former are not guaranteed to be bisimilarly close (i.e., the agent is not guaranteed to behave the same way from those two states by executing the policy), meaning those proxies do not prevent original states having distant values collapsing together to the same latent representation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 502, + 220, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 220, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 220, + 514 + ], + "type": "text", + "content": "INDEX OF NOTATIONS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 538, + 412, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 538, + 412, + 551 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 412, + 551 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_{[cond]}" + }, + { + "bbox": [ + 106, + 538, + 412, + 551 + ], + "type": "text", + "content": " indicator function: 1 if the statement [cond] is true, and 0 otherwise" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 553, + 361, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 553, + 361, + 565 + ], + "spans": [ + { + "bbox": [ + 106, + 553, + 361, + 565 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_d" + }, + { + "bbox": [ + 106, + 553, + 361, + 565 + ], + "type": "text", + "content": " Set of 1-Lipschitz functions w.r.t. the distance metric " + }, + { + "bbox": [ + 106, + 553, + 361, + 565 + ], + "type": "inline_equation", + "content": "d" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 569, + 315, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 569, + 315, + 581 + ], + "spans": [ + { + "bbox": [ + 106, + 569, + 315, + 581 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 106, + 569, + 315, + 581 + ], + "type": "text", + "content": " Sigmoid function, with " + }, + { + "bbox": [ + 106, + 569, + 315, + 581 + ], + "type": "inline_equation", + "content": "\\sigma (x) = 1 / 1 + \\exp (-x)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "text", + "content": " A function " + }, + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "inline_equation", + "content": "f_{\\theta} \\colon \\mathcal{X} \\to \\mathbb{R}" + }, + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "text", + "content": " modeled by a neural network, parameterized by " + }, + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 106, + 584, + 504, + 605 + ], + "type": "text", + "content": " is any measurable set" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 610, + 194, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 610, + 194, + 621 + ], + "spans": [ + { + "bbox": [ + 105, + 610, + 194, + 621 + ], + "type": "text", + "content": "Latent Space Model" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}} = \\langle \\overline{S}, \\overline{\\mathcal{A}}, \\overline{\\mathbf{P}}, \\overline{\\mathcal{R}}, \\bar{\\ell}, \\mathbf{AP}, \\bar{s}_I \\rangle" + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "text", + "content": " Latent MDP with state space " + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\overline{S}" + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "text", + "content": ", action space " + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{A}}" + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "text", + "content": ", reward function " + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{R}}" + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "text", + "content": ", labeling function " + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\bar{\\ell}" + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "text", + "content": ", atomic proposition space " + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathbf{AP}" + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "text", + "content": ", and initial state " + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\bar{s}_I" + }, + { + "bbox": [ + 106, + 624, + 506, + 651 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 653, + 260, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 653, + 260, + 666 + ], + "spans": [ + { + "bbox": [ + 106, + 653, + 260, + 666 + ], + "type": "inline_equation", + "content": "\\langle \\overline{\\mathcal{M}},\\phi ,\\psi \\rangle" + }, + { + "bbox": [ + 106, + 653, + 260, + 666 + ], + "type": "text", + "content": " Latent space model of " + }, + { + "bbox": [ + 106, + 653, + 260, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 670, + 216, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 670, + 216, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 670, + 216, + 682 + ], + "type": "inline_equation", + "content": "\\bar{a}" + }, + { + "bbox": [ + 106, + 670, + 216, + 682 + ], + "type": "text", + "content": " Latent action in " + }, + { + "bbox": [ + 106, + 670, + 216, + 682 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{A}}" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}" + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "text", + "content": " Latent policy " + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "inline_equation", + "content": "\\bar{\\pi}:\\bar{S}\\to \\mathcal{A}" + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "text", + "content": "; can be executed in " + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "text", + "content": " via " + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 106, + 686, + 405, + 700 + ], + "type": "inline_equation", + "content": "\\bar{\\pi} (\\cdot \\mid \\phi (s))" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 703, + 234, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 703, + 234, + 715 + ], + "spans": [ + { + "bbox": [ + 106, + 703, + 234, + 715 + ], + "type": "inline_equation", + "content": "d_{\\overline{S}}" + }, + { + "bbox": [ + 106, + 703, + 234, + 715 + ], + "type": "text", + "content": " Distance metric over " + }, + { + "bbox": [ + 106, + 703, + 234, + 715 + ], + "type": "inline_equation", + "content": "\\bar{S}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 719, + 298, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 719, + 298, + 731 + ], + "spans": [ + { + "bbox": [ + 106, + 719, + 298, + 731 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 106, + 719, + 298, + 731 + ], + "type": "text", + "content": " (20 State embedding function, from " + }, + { + "bbox": [ + 106, + 719, + 298, + 731 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 106, + 719, + 298, + 731 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 719, + 298, + 731 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{S}}" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 326, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 326, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 326, + 95 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 81, + 326, + 95 + ], + "type": "text", + "content": " Action embedding function, from " + }, + { + "bbox": [ + 104, + 81, + 326, + 95 + ], + "type": "inline_equation", + "content": "\\overline{S}\\times \\overline{A}" + }, + { + "bbox": [ + 104, + 81, + 326, + 95 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 81, + 326, + 95 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "spans": [ + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "inline_equation", + "content": "\\phi \\mathbf{P}" + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "text", + "content": " Distribution of drawing " + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "inline_equation", + "content": "s^\\prime \\sim \\mathbf{P}(\\cdot \\mid s,a)" + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "text", + "content": ", then embedding " + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "inline_equation", + "content": "\\bar{s}^{\\prime} = \\phi (s^{\\prime})" + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "text", + "content": ", for any state " + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "inline_equation", + "content": "s\\in S" + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "text", + "content": " and action " + }, + { + "bbox": [ + 104, + 96, + 505, + 119 + ], + "type": "inline_equation", + "content": "a\\in \\mathcal{A}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 121, + 293, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 121, + 293, + 137 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 293, + 137 + ], + "type": "inline_equation", + "content": "L_{\\mathcal{R}}^{\\xi}" + }, + { + "bbox": [ + 106, + 121, + 293, + 137 + ], + "type": "text", + "content": " Local reward loss under distribution " + }, + { + "bbox": [ + 106, + 121, + 293, + 137 + ], + "type": "inline_equation", + "content": "\\xi" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 138, + 303, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 138, + 303, + 154 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 303, + 154 + ], + "type": "inline_equation", + "content": "L_{\\mathbf{P}}^{\\xi}" + }, + { + "bbox": [ + 106, + 138, + 303, + 154 + ], + "type": "text", + "content": " Local transition loss under distribution " + }, + { + "bbox": [ + 106, + 138, + 303, + 154 + ], + "type": "inline_equation", + "content": "\\xi" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 156, + 281, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 156, + 281, + 170 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 281, + 170 + ], + "type": "inline_equation", + "content": "\\overline{\\Pi}" + }, + { + "bbox": [ + 106, + 156, + 281, + 170 + ], + "type": "text", + "content": " Set of (memoryless) latent policies" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 171, + 208, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 171, + 208, + 183 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 208, + 183 + ], + "type": "inline_equation", + "content": "\\bar{s}" + }, + { + "bbox": [ + 106, + 171, + 208, + 183 + ], + "type": "text", + "content": " Latent state in " + }, + { + "bbox": [ + 106, + 171, + 208, + 183 + ], + "type": "inline_equation", + "content": "\\bar{S}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 186, + 227, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 186, + 227, + 200 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 227, + 200 + ], + "type": "inline_equation", + "content": "\\overrightarrow{V_{\\pi}}" + }, + { + "bbox": [ + 106, + 186, + 227, + 200 + ], + "type": "text", + "content": " Latent value function" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 202, + 224, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 202, + 224, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 224, + 213 + ], + "type": "text", + "content": "Markov Decision Processes" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "inline_equation", + "content": "\\mathcal{M} = \\langle S, \\mathcal{A}, \\mathbf{P}, \\mathcal{R}, \\ell, \\mathbf{AP}, s_I \\rangle" + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "content": " MDP " + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "content": " with state space " + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "content": ", action space " + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "content": ", transition function " + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "content": ", labeling function " + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "content": ", atomic proposition space " + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "inline_equation", + "content": "\\mathbf{AP}" + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "content": ", and initial state " + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "inline_equation", + "content": "s_I" + }, + { + "bbox": [ + 104, + 216, + 506, + 241 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 243, + 190, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 243, + 190, + 255 + ], + "spans": [ + { + "bbox": [ + 106, + 243, + 190, + 255 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 106, + 243, + 190, + 255 + ], + "type": "text", + "content": " Action in " + }, + { + "bbox": [ + 106, + 243, + 190, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 256, + 249, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 256, + 249, + 270 + ], + "spans": [ + { + "bbox": [ + 106, + 256, + 249, + 270 + ], + "type": "inline_equation", + "content": "\\widetilde{d}_{\\pi}" + }, + { + "bbox": [ + 106, + 256, + 249, + 270 + ], + "type": "text", + "content": " Bisimulation pseudometric" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 272, + 237, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 272, + 237, + 286 + ], + "spans": [ + { + "bbox": [ + 106, + 272, + 237, + 286 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 106, + 272, + 237, + 286 + ], + "type": "text", + "content": " Discount factor in [0, 1]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 288, + 254, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 288, + 254, + 300 + ], + "spans": [ + { + "bbox": [ + 106, + 288, + 254, + 300 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{A}}" + }, + { + "bbox": [ + 106, + 288, + 254, + 300 + ], + "type": "text", + "content": " Metric over the action space" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 302, + 218, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 302, + 218, + 315 + ], + "spans": [ + { + "bbox": [ + 106, + 302, + 218, + 315 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{R}}" + }, + { + "bbox": [ + 106, + 302, + 218, + 315 + ], + "type": "text", + "content": " Metric over " + }, + { + "bbox": [ + 106, + 302, + 218, + 315 + ], + "type": "inline_equation", + "content": "\\operatorname {Im}(\\mathcal{R})" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 318, + 248, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 318, + 248, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 318, + 248, + 331 + ], + "type": "inline_equation", + "content": "d_{\\mathcal{S}}" + }, + { + "bbox": [ + 106, + 318, + 248, + 331 + ], + "type": "text", + "content": " Metric over the state space" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 332, + 505, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 332, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 106, + 332, + 505, + 357 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}^{t}" + }, + { + "bbox": [ + 106, + 332, + 505, + 357 + ], + "type": "text", + "content": " Limiting distribution of the MDP defined as " + }, + { + "bbox": [ + 106, + 332, + 505, + 357 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}^{t}(s^{\\prime}\\mid s) = \\mathbb{P}_{\\pi}^{\\mathcal{M}_{s}}\\left(\\left\\{s_{0:\\infty},a_{0:\\infty}\\mid s_{t} = s^{\\prime}\\right\\}\\right)" + }, + { + "bbox": [ + 106, + 332, + 505, + 357 + ], + "type": "text", + "content": ", for any source state " + }, + { + "bbox": [ + 106, + 332, + 505, + 357 + ], + "type": "inline_equation", + "content": "s\\in S" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 358, + 275, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 358, + 275, + 372 + ], + "spans": [ + { + "bbox": [ + 106, + 358, + 275, + 372 + ], + "type": "text", + "content": "II Set of memoryless policies of " + }, + { + "bbox": [ + 106, + 358, + 275, + 372 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 374, + 279, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 374, + 279, + 387 + ], + "spans": [ + { + "bbox": [ + 106, + 374, + 279, + 387 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 106, + 374, + 279, + 387 + ], + "type": "text", + "content": " (204 Memoryless policy " + }, + { + "bbox": [ + 106, + 374, + 279, + 387 + ], + "type": "inline_equation", + "content": "\\pi \\colon S\\to \\Delta (\\mathcal{A})" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_{\\pi}^{\\mathcal{M}}" + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "text", + "content": " Unique probability measure induced by the policy " + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "text", + "content": " on the Borel " + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "text", + "content": "-algebra over measurable subsets of " + }, + { + "bbox": [ + 106, + 389, + 505, + 415 + ], + "type": "inline_equation", + "content": "Traj" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 416, + 263, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 416, + 263, + 429 + ], + "spans": [ + { + "bbox": [ + 106, + 416, + 263, + 429 + ], + "type": "text", + "content": "CUT Constrained reachability event" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 431, + 376, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 431, + 376, + 445 + ], + "spans": [ + { + "bbox": [ + 106, + 431, + 376, + 445 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_s" + }, + { + "bbox": [ + 106, + 431, + 376, + 445 + ], + "type": "text", + "content": " MDP obtained by replacing the initial state of " + }, + { + "bbox": [ + 106, + 431, + 376, + 445 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 106, + 431, + 376, + 445 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 106, + 431, + 376, + 445 + ], + "type": "inline_equation", + "content": "s\\in S" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 447, + 181, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 447, + 181, + 458 + ], + "spans": [ + { + "bbox": [ + 106, + 447, + 181, + 458 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 106, + 447, + 181, + 458 + ], + "type": "text", + "content": " (20 State in " + }, + { + "bbox": [ + 106, + 447, + 181, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 461, + 353, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 461, + 353, + 475 + ], + "spans": [ + { + "bbox": [ + 106, + 461, + 353, + 475 + ], + "type": "inline_equation", + "content": "\\xi_{\\pi}" + }, + { + "bbox": [ + 106, + 461, + 353, + 475 + ], + "type": "text", + "content": " Stationary distribution of " + }, + { + "bbox": [ + 106, + 461, + 353, + 475 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 106, + 461, + 353, + 475 + ], + "type": "text", + "content": " induced by the policy " + }, + { + "bbox": [ + 106, + 461, + 353, + 475 + ], + "type": "inline_equation", + "content": "\\pi" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 477, + 391, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 477, + 391, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 477, + 391, + 491 + ], + "type": "inline_equation", + "content": "\\vec{d}" + }, + { + "bbox": [ + 106, + 477, + 391, + 491 + ], + "type": "text", + "content": " Raw transition distance, i.e., metric over " + }, + { + "bbox": [ + 106, + 477, + 391, + 491 + ], + "type": "inline_equation", + "content": "\\mathcal{S} \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times \\mathcal{S}" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 494, + 268, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 494, + 268, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 494, + 268, + 506 + ], + "type": "text", + "content": "Traj Set of infinite trajectories of " + }, + { + "bbox": [ + 106, + 494, + 268, + 506 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 509, + 227, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 509, + 227, + 521 + ], + "spans": [ + { + "bbox": [ + 106, + 509, + 227, + 521 + ], + "type": "inline_equation", + "content": "\\tau = \\langle s_{0:T}, a_{0:T-1} \\rangle" + }, + { + "bbox": [ + 106, + 509, + 227, + 521 + ], + "type": "text", + "content": " Trajectory" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 538, + 264, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 538, + 264, + 551 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 264, + 551 + ], + "type": "inline_equation", + "content": "V_{\\pi}" + }, + { + "bbox": [ + 106, + 538, + 264, + 551 + ], + "type": "text", + "content": " Value function for the policy " + }, + { + "bbox": [ + 106, + 538, + 264, + 551 + ], + "type": "inline_equation", + "content": "\\pi" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 553, + 234, + 566 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 553, + 234, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 553, + 234, + 566 + ], + "type": "text", + "content": "Probability / Measure Theory" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 568, + 485, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 568, + 485, + 581 + ], + "spans": [ + { + "bbox": [ + 106, + 568, + 485, + 581 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 106, + 568, + 485, + 581 + ], + "type": "text", + "content": " Discrepancy measure; " + }, + { + "bbox": [ + 106, + 568, + 485, + 581 + ], + "type": "inline_equation", + "content": "D(P,Q)" + }, + { + "bbox": [ + 106, + 568, + 485, + 581 + ], + "type": "text", + "content": " is the discrepancy between distributions " + }, + { + "bbox": [ + 106, + 568, + 485, + 581 + ], + "type": "inline_equation", + "content": "P,Q\\in \\Delta (\\mathcal{X})" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 583, + 376, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 583, + 376, + 596 + ], + "spans": [ + { + "bbox": [ + 106, + 583, + 376, + 596 + ], + "type": "inline_equation", + "content": "\\Delta (\\mathcal{X})" + }, + { + "bbox": [ + 106, + 583, + 376, + 596 + ], + "type": "text", + "content": " Set of measures over a complete, separable metric space " + }, + { + "bbox": [ + 106, + 583, + 376, + 596 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 598, + 445, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 598, + 445, + 611 + ], + "spans": [ + { + "bbox": [ + 106, + 598, + 445, + 611 + ], + "type": "text", + "content": "Logistic " + }, + { + "bbox": [ + 106, + 598, + 445, + 611 + ], + "type": "inline_equation", + "content": "(\\mu, s)" + }, + { + "bbox": [ + 106, + 598, + 445, + 611 + ], + "type": "text", + "content": " Logistic distribution with location parameter " + }, + { + "bbox": [ + 106, + 598, + 445, + 611 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 106, + 598, + 445, + 611 + ], + "type": "text", + "content": " and scale parameter " + }, + { + "bbox": [ + 106, + 598, + 445, + 611 + ], + "type": "inline_equation", + "content": "s" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "type": "inline_equation", + "content": "W_{d}" + }, + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "type": "text", + "content": " Wasserstein distance w.r.t. the metric " + }, + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "type": "inline_equation", + "content": "W_{d}(P,Q)" + }, + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "type": "text", + "content": " is the Wasserstein distance between distributions " + }, + { + "bbox": [ + 106, + 613, + 504, + 637 + ], + "type": "inline_equation", + "content": "P, Q \\in \\Delta(\\mathcal{X})" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 639, + 246, + 650 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 639, + 246, + 650 + ], + "spans": [ + { + "bbox": [ + 106, + 639, + 246, + 650 + ], + "type": "text", + "content": "Wasserstein Auto-encoded MDP" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 669, + 373, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 669, + 373, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 669, + 373, + 682 + ], + "type": "inline_equation", + "content": "\\xi_{\\theta}" + }, + { + "bbox": [ + 106, + 669, + 373, + 682 + ], + "type": "text", + "content": " Behavioral model: distribution over " + }, + { + "bbox": [ + 106, + 669, + 373, + 682 + ], + "type": "inline_equation", + "content": "\\mathcal{S} \\times \\mathcal{A} \\times \\operatorname{Im}(\\mathcal{R}) \\times \\mathcal{S}" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 685, + 370, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 685, + 370, + 699 + ], + "spans": [ + { + "bbox": [ + 106, + 685, + 370, + 699 + ], + "type": "inline_equation", + "content": "G_{\\theta}" + }, + { + "bbox": [ + 106, + 685, + 370, + 699 + ], + "type": "text", + "content": " Mapping " + }, + { + "bbox": [ + 106, + 685, + 370, + 699 + ], + "type": "inline_equation", + "content": "\\langle \\bar{s},\\bar{a},\\bar{s}^{\\prime}\\rangle \\mapsto \\langle \\mathcal{G}_{\\theta}(\\bar{s}),\\psi_{\\theta}(\\bar{s},\\bar{a}),\\overline{\\mathcal{R}}_{\\theta}(\\bar{s},\\bar{a}),\\mathcal{G}_{\\theta}(\\bar{s}^{\\prime})\\rangle" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 701, + 307, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 701, + 307, + 716 + ], + "spans": [ + { + "bbox": [ + 106, + 701, + 307, + 716 + ], + "type": "inline_equation", + "content": "\\phi_{\\iota}^{A}" + }, + { + "bbox": [ + 106, + 701, + 307, + 716 + ], + "type": "text", + "content": " Action encoder mapping " + }, + { + "bbox": [ + 106, + 701, + 307, + 716 + ], + "type": "inline_equation", + "content": "\\overline{S}\\times \\mathcal{A}" + }, + { + "bbox": [ + 106, + 701, + 307, + 716 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 701, + 307, + 716 + ], + "type": "inline_equation", + "content": "\\Delta (\\overline{\\mathcal{A}})" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 719, + 270, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 719, + 270, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 719, + 270, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\theta}" + }, + { + "bbox": [ + 106, + 719, + 270, + 732 + ], + "type": "text", + "content": " State-wise decoder, from " + }, + { + "bbox": [ + 106, + 719, + 270, + 732 + ], + "type": "inline_equation", + "content": "\\bar{S}" + }, + { + "bbox": [ + 106, + 719, + 270, + 732 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 719, + 270, + 732 + ], + "type": "inline_equation", + "content": "S" + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 38 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 504, + 216 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 105, + 81, + 438, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 438, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 438, + 95 + ], + "type": "inline_equation", + "content": "Q_{\\iota}" + }, + { + "bbox": [ + 105, + 81, + 438, + 95 + ], + "type": "text", + "content": " Marginal encoding distribution over " + }, + { + "bbox": [ + 105, + 81, + 438, + 95 + ], + "type": "inline_equation", + "content": "\\overline{S} \\times \\overline{A} \\times \\overline{S}: \\mathbb{E}_{s,a,s' \\sim \\xi_{\\pi}} \\phi_{\\iota}(\\cdot \\mid s,a,s')" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 99, + 409, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 99, + 409, + 112 + ], + "spans": [ + { + "bbox": [ + 105, + 99, + 409, + 112 + ], + "type": "inline_equation", + "content": "\\bar{\\xi}_{\\bar{\\pi}_\\theta}" + }, + { + "bbox": [ + 105, + 99, + 409, + 112 + ], + "type": "text", + "content": " Stationary distribution of the latent model " + }, + { + "bbox": [ + 105, + 99, + 409, + 112 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 105, + 99, + 409, + 112 + ], + "type": "text", + "content": ", parameterized by " + }, + { + "bbox": [ + 105, + 99, + 409, + 112 + ], + "type": "inline_equation", + "content": "\\theta" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 115, + 236, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 115, + 236, + 128 + ], + "spans": [ + { + "bbox": [ + 106, + 115, + 236, + 128 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\xi_{\\overline{\\pi}}}" + }, + { + "bbox": [ + 106, + 115, + 236, + 128 + ], + "type": "text", + "content": " Steady-state regularizer" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 131, + 265, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 131, + 265, + 144 + ], + "spans": [ + { + "bbox": [ + 106, + 131, + 265, + 144 + ], + "type": "inline_equation", + "content": "\\varphi_{\\omega}^{\\xi}" + }, + { + "bbox": [ + 106, + 131, + 265, + 144 + ], + "type": "text", + "content": " Steady-state Lipschitz network" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 148, + 234, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 148, + 234, + 159 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 234, + 159 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 106, + 148, + 234, + 159 + ], + "type": "text", + "content": " Temperature parameter" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "text", + "content": " Distribution of drawing state-action pairs from interacting with " + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "text", + "content": ", embedding them to the latent spaces, and finally letting them transition to their successor state in " + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "inline_equation", + "content": "\\overline{\\mathcal{M}}_{\\theta}" + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "text", + "content": ", in " + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "inline_equation", + "content": "\\Delta (\\bar{S}\\times \\bar{A}\\times \\bar{S})" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 201, + 257, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 201, + 257, + 216 + ], + "spans": [ + { + "bbox": [ + 106, + 201, + 257, + 216 + ], + "type": "inline_equation", + "content": "\\varphi_{\\omega}^{\\mathbf{P}}" + }, + { + "bbox": [ + 106, + 201, + 257, + 216 + ], + "type": "text", + "content": " Transition Lipschitz network" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 105, + 231, + 246, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 246, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 246, + 243 + ], + "type": "text", + "content": "ADDITIONAL REFERENCES" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 250, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 250, + 506, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 250, + 506, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 506, + 350 + ], + "type": "text", + "content": "Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S. Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, Geoffrey Irving, Michael Isard, Yangqing Jia, Rafal Jozefowicz, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dandelion Mane, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Mike Schuster, Jonathon Shlens, Benoit Steiner, Ilya Sutskever, Kunal Talwar, Paul Tucker, Vincent Vanhoucke, Vijay Vasudevan, Fernanda Viégas, Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. TensorFlow: Large-scale machine learning on heterogeneous systems, 2015. URL https://www.tensorflow.org/. Software available from tensorflow.org." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 357, + 506, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 506, + 415 + ], + "type": "text", + "content": "Alexander A. Alemi, Ben Poole, Ian Fischer, Joshua V. Dillon, Rif A. Saurous, and Kevin Murphy. Fixing a broken ELBO. In Jennifer G. Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pp. 159-168. PMLR, 2018. URL http://proceedings.mlr.press/v80/alemi18a.html." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 422, + 506, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 506, + 445 + ], + "type": "text", + "content": "Joshua V. Dillon, Ian Langmore, Dustin Tran, Eugene Brevdo, Srinivas Vasudevan, Dave Moore, Brian Patton, Alex Alemi, Matt Hoffman, and Rif A. Saurous. Tensorflow distributions, 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 453, + 506, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 506, + 509 + ], + "type": "text", + "content": "Zhe Dong, Bryan A. Seybold, Kevin Murphy, and Hung H. Bui. Collapsed amortized variational inference for switching nonlinear dynamical systems. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 2638-2647. PMLR, 2020. URL http://proceedings.mlr.press/v119/dong20e.html." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 517, + 506, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 517, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 506, + 584 + ], + "type": "text", + "content": "Sergio Guadarrama, Anoop Korattikara, Oscar Ramirez, Pablo Castro, Ethan Holly, Sam Fishman, Ke Wang, Ekaterina Gonina, Neal Wu, Efi Kokiopoulou, Luciano Sbaiz, Jamie Smith, Gábor Bartók, Jesse Berent, Chris Harris, Vincent Vanhoucke, and Eugene Brevdo. TF-Agents: A library for reinforcement learning in tensorflow. https://github.com/tensorflow/agents, 2018. URL https://github.com/tensorflow/agents. [Online; accessed 25-June-2019]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 592, + 506, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 637 + ], + "type": "text", + "content": "Junxian He, Daniel Spokoyny, Graham Neubig, and Taylor Berg-Kirkpatrick. Lapping inference networks and posterior collapse in variational autoencoders. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=rylDfnCqF7." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 644, + 507, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 644, + 507, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 507, + 678 + ], + "type": "text", + "content": "Matthew D. Hoffman, David M. Blei, Chong Wang, and John W. Paisley. Stochastic variational inference. J. Mach. Learn. Res., 14(1):1303-1347, 2013. URL http://dl.acm.org/citation.cfm?id=2502622." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "type": "text", + "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. URL http://arxiv.org/abs/1412.6980." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 209 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "type": "text", + "content": "Diederik P. Kingma and Max Welling. Auto-encoding variational bayes. In Yoshua Bengio and Yann LeCun (eds.), 2nd International Conference on Learning Representations, ICLR 2014, Banff, AB, Canada, April 14-16, 2014, Conference Track Proceedings, 2014. URL http://arxiv.org/abs/1312.6114." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 133, + 506, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 133, + 506, + 156 + ], + "spans": [ + { + "bbox": [ + 107, + 133, + 506, + 156 + ], + "type": "text", + "content": "Vidyadhar G. Kulkarni. Modeling and Analysis of Stochastic Systems. Chapman & Hall, Ltd., GBR, 1995. ISBN 0412049910." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 163, + 506, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 163, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 107, + 163, + 506, + 209 + ], + "type": "text", + "content": "Ilya O. Tolstikhin, Olivier Bousquet, Sylvain Gelly, and Bernhard Scholkopf. Wasserstein autoencoders. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=HkL7n1-0b." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_content_list.json b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..59c35c27d134cbd9b9b4b58687c0d140c5793288 --- /dev/null +++ b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_content_list.json @@ -0,0 +1,2614 @@ +[ + { + "type": "text", + "text": "WEAKLY SUPERVISED EXPLAINABLE PHRASAL REASONING WITH NEURAL FUZZY LOGIC", + "text_level": 1, + "bbox": [ + 171, + 99, + 746, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zijun $\\mathbf{W}\\mathbf{u}^{*1}$ , Zi Xuan Zhang\\*, Atharva Naik+2, Zhijian Mei', Mauajama Firdaus', Lili Mou", + "bbox": [ + 181, + 167, + 839, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Dept. Computing Science & Alberta Machine Intelligence Institute (Amii), University of Alberta", + "bbox": [ + 181, + 184, + 826, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2Carnegie Mellon University", + "bbox": [ + 183, + 199, + 374, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zijun4, zixuan7, zimei1}@ualberta.ca, arnaik@cs.cmu.edu,", + "bbox": [ + 183, + 214, + 656, + 228 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{mauzama.03, doublepower.mou}@gmail.com", + "bbox": [ + 183, + 228, + 504, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Equal contribution, †Work done during the internship at UofA/Amii", + "bbox": [ + 183, + 241, + 635, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 292, + 547, + 308 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Natural language inference (NLI) aims to determine the logical relationship between two sentences, such as Entailment, Contradiction, and Neutral. In recent years, deep learning models have become a prevailing approach to NLI, but they lack interpretability and explainability. In this work, we address the explainability of NLI by weakly supervised logical reasoning, and propose an Explainable Phrasal Reasoning (EPR) approach. Our model first detects phrases as the semantic unit and aligns corresponding phrases in the two sentences. Then, the model predicts the NLI label for the aligned phrases, and induces the sentence label by fuzzy logic formulas. Our EPR is almost everywhere differentiable and thus the system can be trained end to end. In this way, we are able to provide explicit explanations of phrasal logical relationships in a weakly supervised manner. We further show that such reasoning results help textual explanation generation.1", + "bbox": [ + 228, + 323, + 769, + 491 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 513, + 338, + 530 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Natural language inference (NLI) aims to determine the logical relationship between two sentences (called a premise and a hypothesis), and target labels include Entailment, Contradiction, and Neutral (Bowman et al., 2015; MacCartney & Manning, 2008). Figure 1 gives an example, where the hypothesis contradicts the premise. NLI is important to natural language processing, because it involves logical reasoning and is a key problem in artificial intelligence. Previous work shows that NLI can be used in various downstream tasks, such as information retrieval (Karpukhin et al., 2020) and text summarization (Liu & Lapata, 2019).", + "bbox": [ + 169, + 545, + 826, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, deep learning has become a prevailing approach to NLI (Bowman et al., 2015; Mou et al., 2016; Wang & Jiang, 2016; Yoon et al., 2018). Especially, pretrained language models with the Transformer architecture (Vaswani et al., 2017) achieve state-of-the-art performance for the NLI task (Radford et al., 2018; Zhang et al., 2020). However, such deep learning models are black-box machinery and lack interpretability. In real applications, it is important to understand how these models make decisions (Rudin, 2019).", + "bbox": [ + 169, + 648, + 826, + 734 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Several studies have addressed the explainability of NLI models. Camburu et al. (2018) generate a textual explanation by sequence-to-sequence supervised learning, in addition to NLI classification; such an approach is multi-task learning of text classification and generation, which does not perform reasoning itself. MacCartney et al. (2008) propose a scoring model to align related phrases; Parikh et al. (2016) and Jiang et al. (2021) propose to obtain alignment by attention mechanisms. However, they only provide correlation information, instead of logical reasoning. Other work incorporates upward and downward monotonicity entailment reasoning for NLI (Hu et al., 2020; Chen et al., 2021), but these approaches are based on hand-crafted rules (e.g., every downward entailing some) and are restricted to Entailment only; they cannot handle Contradiction or Neutral.", + "bbox": [ + 169, + 739, + 828, + 867 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we address the explainability for NLI by weakly supervised phrasal logical reasoning. Our goal is to explain NLI predictions with phrasal logical relationships between the premise and", + "bbox": [ + 169, + 872, + 826, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Code and resources available at https://github.com/MANGA-UOFA/EPR", + "bbox": [ + 191, + 909, + 635, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "hypothesis. Intuitively, an NLI system with an explainable reasoning mechanism should be equipped with the following functionalities:", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The system should be able to detect corresponding phrases and tell their logical relationship, e.g., several men contradicting one man, but pull in a fishing net entailing holding the net (Figure 1).", + "2. The system should be able to induce sentence labels from phrasal reasoning. In the example, the two sentences are contradictory because there exists one contradictory phrase pair.", + "3. More importantly, such reasoning should be trained in a weakly supervised manner, i.e., the phrase-level predictions are trained from sentence labels only. Otherwise, the reasoning mechanism degrades to multi-task learning, which requires massive fine-grained human annotations." + ], + "bbox": [ + 174, + 138, + 825, + 237 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we propose an Explainable Phrasal Reasoning (EPR) approach to the NLI task. Our model obtains phrases as semantic units, and aligns corresponding phrases by embedding similarity. Then, we predict the NLI labels (namely, Entailment, Contradiction, and Neutral) for the aligned phrases. Finally, we propose to induce the sentence-level label from phrasal labels in a fuzzy logic manner (Zadeh, 1988; 1996). Our model is differentiable, and the phrasal reasoning component can be trained", + "bbox": [ + 169, + 243, + 485, + 395 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/36d8599dc69495cec1040aad3f195d48f60ea647a67099348cbb3ffd1e91bf76.jpg", + "image_caption": [ + "Figure 1: The natural language inference (NLI) task and desired phrasal reasoning." + ], + "image_footnote": [], + "bbox": [ + 498, + 246, + 823, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with the weak supervision of sentence NLI labels. In this way, our EPR approach satisfies all the desired properties mentioned above.", + "bbox": [ + 169, + 396, + 823, + 424 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In our experiments, we developed a comprehensive methodology (data annotation and evaluation metrics) to quantitatively evaluate phrasal reasoning performance, which has not been accomplished in previous work. We extend previous studies and obtain plausible baseline models. Results show that our EPR yields a much more meaningful explanation regarding $F$ scores against human annotation.", + "bbox": [ + 169, + 431, + 825, + 488 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To further demonstrate the quality of extracted phrasal relationships, we feed them to a textual explanation model. Results show that our EPR reasoning leads to an improvement of 2 points in BLEU scores, achieving a new state of the art on the e-SNLI dataset (Camburu et al., 2018).", + "bbox": [ + 169, + 494, + 823, + 536 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions are summarized as follows:", + "bbox": [ + 171, + 542, + 473, + 556 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. We formulate a phrasal reasoning task for natural language inference (NLI), addressing the interpretability of neural models.", + "2. We propose an EPR model that induces sentence-level NLI labels from explicit phrasal logical labels by neural fuzzy logic. EPR is able to perform reasoning in a weakly supervised way.", + "3. We annotated phrasal logical labels and designed a set of metrics to evaluate phrasal reasoning. We further use our reasoning results to improve textual explanation generation. Our code and annotated data are released for future studies." + ], + "bbox": [ + 174, + 564, + 825, + 660 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To the best of our knowledge, we are the first to develop a weakly supervised phrasal reasoning model for the NLI task.", + "bbox": [ + 169, + 667, + 823, + 696 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 726, + 346, + 742 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Natural Language Inference. MacCartney & Manning (2009) propose seven natural logic relations in addition to Entailment, Contradiction, and Neutral. MacCartney & Manning (2007) also distinguish upward entailment (every mammal upward entailing some mammal) and downward entailment (every mammal downward entailing every dog) as different categories. Manually designed lexicons and rules are used to interpret Entailment in a finer-grained manner, such as downward and upward entailment (Hu et al., 2020; Chen et al., 2021). Feng et al. (2020) apply such natural logic to NLI reasoning at the word level; however, our experiments will show that their word-level treatment is not an appropriate granularity, and they fail to achieve meaningful reasoning performance.", + "bbox": [ + 169, + 763, + 825, + 875 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The above reasoning schema focuses more on the quantifiers of first-order logic (Beltagy et al., 2016). However, the SNLI dataset (Bowman et al., 2015) we use only contains less than $5\\%$ samples with explicit quantifiers, and the seven-category schema complicates reasoning in the weakly supervised", + "bbox": [ + 169, + 881, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "setting. Instead, we adopt three-category NLI labels following the SNLI dataset. Our focus is entity-based reasoning, and the treatment of quantifiers is absorbed into phrases.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We also notice that previous work lacks explicit evaluation on the reasoning performance for NLI. For example, the SNLI dataset only provides sentence-level labels. The HELP (Yanaka et al., 2019a) and MED (Yanaka et al., 2019b) datasets concern monotonicity inference problems, where the label is also at the sentence level; they only consider Entailment, ignoring Contradiction and Neutral. Thus, we propose a comprehensive framework for the evaluation of NLI reasoning.", + "bbox": [ + 169, + 138, + 828, + 209 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "e-SNLI. Camburu et al. (2018) propose the e-SNLI task of textual explanation generation and use LSTM as a baseline. Kumar & Talukdar (2020) propose the NILE approach, using multiple decoders to generate explanations for all E, C, and N labels, and then predicting which to be selected. Zhao & Vydiswaran (2021) propose the LIREx approach, using additionally annotated rationales for explanation generation. Narang et al. (2020) finetune T5 with multiple explanation generation tasks. Although these systems can generate explanations, the nature of such finetuning approaches renders the explanation generator per se unexplainable. By contrast, we design a textual explanation generation model that utilizes our EPR's phrasal reasoning, obtained in a weakly supervised manner.", + "bbox": [ + 169, + 215, + 826, + 328 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neuro-Symbolic Approaches. In recent years, neuro-symbolic approaches have attracted increasing interest in the AI and NLP communities for interpreting deep learning models. Typically, these approaches are trained by reinforcement learning or its relaxation, such as attention and Gumbel-softmax (Jang et al., 2017), to reason about certain latent structures in a downstream task.", + "bbox": [ + 169, + 334, + 826, + 391 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For example, Lei et al. (2016) and Liu et al. (2018) extract key phrases or sentences for a text classification task. Lu et al. (2018) extract entities and relations for document understanding. Liang et al. (2017) and Mou et al. (2017) perform SQL-like execution based on input text for semantic parsing. Xiong et al. (2017) hop over a knowledge graph for reasoning the relationships between entities. Li et al. (2019) and Deshmukh et al. (2021) model symbolic actions for unsupervised syntactic structure induction. In the vision domain, Mao et al. (2019) propose a neuro-symbolic approach to learn visual concepts. Our work addresses logical reasoning for the NLI task, which is not tackled in previous neuro-symbolic studies.", + "bbox": [ + 169, + 397, + 825, + 508 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Fuzzy Logic. Fuzzy logic (Zadeh, 1988; 1996) models an assertion and performs logic calculation with probability. For example, a quantifier (e.g., \"most\") and assertion (e.g., \"ill\") are modeled by a score in $(0,1)$ ; the score of a conjunction $s(x_{1} \\wedge x_{2})$ is the product of $s(x_{1})$ and $s(x_{2})$ . In old-school fuzzy logic studies, the mapping from language to the score is usually given by human-defined heuristics (Zadeh, 1988; Nozaki et al., 1997), and may not be suited to the task of interest. By contrast, we train neural networks to predict the probability of phrasal logical relations, and induce the sentence NLI label by fuzzy logic formulas. Thus, our approach takes advantage of both worlds of symbolism and connectionism. Mahabadi et al. (2020) apply fuzzy logic formulas to replace multi-layer perceptrons for NLI. But they are unable to provide expressive reasoning because their fuzzy logic works on sentence features. Our work is inspired by Mahabadi et al. (2020). However, we propose to apply fuzzy logic to the detected and aligned phrases, enabling our approach to provide reasoning in a symbolic (i.e., expressive) way. We develop our own fuzzy logic formulas, which are also different from Mahabadi et al. (2020).", + "bbox": [ + 169, + 515, + 825, + 696 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 OUR EPR APPROACH", + "text_level": 1, + "bbox": [ + 171, + 719, + 387, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we describe our EPR approach in detail, also shown in Figure 2. It has three main components: phrase detection and alignment, phrasal NLI prediction, and sentence label induction.", + "bbox": [ + 169, + 752, + 823, + 782 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Phrase Detection and Alignment. In NLI, a data point consists of two sentences, a premise and a hypothesis. We first extract content phrases from both input sentences by rules and heuristics. For example, $\\left[\\mathrm{AUX}\\right] + \\left[\\mathrm{NOT}\\right] + \\mathrm{VERB} + \\left[\\mathrm{RP}\\right]$ is treated as a verb phrase. Full details are presented in Appendix A.1. Compared with the word level (Parikh et al., 2016; Feng et al., 2020), a phrase is a more meaningful semantic unit for logical reasoning.", + "bbox": [ + 169, + 787, + 825, + 859 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We then align corresponding phrases in the two sentences based on cosine similarity. Let $\\mathrm{P} = (\\mathrm{p}_1,\\dots ,\\mathrm{p}_M)$ and $\\mathrm{H} = (\\mathrm{h}_1,\\dots ,\\mathrm{h}_N)$ be the premise and hypothesis, respectively, where $\\mathrm{p}_m$ and $\\mathrm{h}_n$ are extracted phrases. We apply Sentence-BERT (Reimers & Gurevych, 2019) to each individual phrase and obtain the local phrase embeddings by $\\pmb {p}_m^{(L)} = \\mathrm{SBERT}(\\mathrm{p}_m),\\pmb {h}_n^{(L)} = \\mathrm{SBERT}(\\mathrm{h}_n)$ . We", + "bbox": [ + 169, + 864, + 825, + 926 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/07d7923cf7122154318a4e7621f0c71d8c910a16064898df435c9314cf0f5e25.jpg", + "image_caption": [ + "Figure 2: An overview of our Explainable Phrasal Reasoning (EPR) model." + ], + "image_footnote": [], + "bbox": [ + 173, + 85, + 823, + 253 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/097447f9650fc5c325d0f2583396151e431bcfe6cf322f290e1f781c90abffde.jpg", + "table_caption": [ + "Table 1: An example showing the importance of handling unaligned phrases (in highlight)." + ], + "table_footnote": [], + "table_body": "
Premise\nHypothesisPeople are shopping for fruit.\nPeople are shopping for fruit in the market.People are shopping for fruit in the market.\nPeople are shopping for fruit.
Sentence NLI[ ] Entailment [ ] Contradiction [√] Neutral[√] Entailment [ ] Contradiction [ ] Neutral
", + "bbox": [ + 192, + 301, + 808, + 345 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "also apply Sentence-BERT to the entire premise and hypothesis sentences to obtain the global phrase embeddings $\\pmb{p}_m^{(G)}$ and $\\pmb{h}_n^{(G)}$ by mean-pooling the features of the words in the phrase. The phrase similarity is given by", + "bbox": [ + 169, + 359, + 823, + 406 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\sin \\left(\\mathrm {p} _ {m}, \\mathrm {h} _ {n}\\right) = \\gamma \\cos \\left(\\boldsymbol {p} _ {m} ^ {(G)}, \\boldsymbol {h} _ {n} ^ {(G)}\\right) + (1 - \\gamma) \\cos \\left(\\boldsymbol {p} _ {m} ^ {(L)}, \\boldsymbol {h} _ {n} ^ {(L)}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 410, + 823, + 429 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\gamma$ is a hyperparameter balancing the lexical and contextual representations of a phrase (Hewitt & Manning, 2019). It is noted that Sentence-BERT is finetuned on paraphrase datasets, and thus is more suitable for phrasal similarity matching than pretrained language models (Devlin et al., 2019).", + "bbox": [ + 169, + 431, + 826, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We obtain phrase alignment between the premise and hypothesis in a heuristic way. For every phrase $\\mathrm{p}_m$ in the premise, we look for the most similar phrase $\\mathrm{h}_n$ from the hypothesis by", + "bbox": [ + 169, + 479, + 823, + 510 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nn = \\operatorname {a r g m a x} _ {n ^ {\\prime}} \\sin \\left(\\boldsymbol {p} _ {m}, \\boldsymbol {h} _ {n ^ {\\prime}}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 513, + 823, + 530 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Likewise, for every phrase $\\mathrm{h}_n$ in the hypothesis, we look for the most similar phrase $\\mathrm{p}_m$ from the premise. A phrase pair $(\\mathrm{p}_m, \\mathrm{h}_n)$ is considered to be aligned if $\\mathrm{h}_n$ is selected as the closest phrase to $\\mathrm{p}_m$ , and $\\mathrm{p}_m$ is the closest to $\\mathrm{h}_n$ . Such hard alignment differs from commonly used soft attention-based approaches (Parikh et al., 2016). Our alignment method can ensure the quality of phrase alignment, and more importantly, leave other phrases unaligned (e.g., helping each other in Figure 1), which are common in the NLI task. The process is illustrated in Figure 2a.", + "bbox": [ + 169, + 534, + 825, + 617 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Phrasal NLI Prediction. Our model then predicts the logical relationship of an aligned phrase pair $(p, h)$ among three target labels: Entailment, Contradiction, and Neutral. While previous work (Feng et al., 2020) identifies finer-grained labels for NLI, we do not follow their categorization, because it complicates the reasoning process and makes weakly supervised training more difficult. Instead, we adopt a three-way phrasal classification, which is consistent with sentence NLI labels.", + "bbox": [ + 169, + 625, + 826, + 695 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We represent a phrase, say, $p$ in the premise, by a vector embedding, and we consider two types of features: a local feature $\\pmb{p}^{(L)}$ and a global feature $\\pmb{p}^{(G)}$ , re-used from the phrase alignment component. They are concatenated as the phrase representation $\\pmb{p} = [p^{(L)}; p^{(G)}]$ . Likewise, the phrase representation for a hypothesis phrase $h$ is obtained in a similar way. Intuitively, local features force the model to perform reasoning in a serious manner, but global features are important to sentence-level prediction. Such intuition is also verified in an ablation study (§ 4.2).", + "bbox": [ + 169, + 700, + 823, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Then, we use a neural network to predict the phrasal NLI label (Entailment, Contradiction, and Neutral). This is given by the standard heuristic matching (Mou et al., 2016) based on phrase embeddings, followed by a multi-layer perceptron (MLP) and a three-way softmax layer:", + "bbox": [ + 169, + 794, + 823, + 837 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ P _ {\\text {p h r a s e}} (\\mathsf {E} | \\mathrm {p}, \\mathrm {h}); P _ {\\text {p h r a s e}} (\\mathsf {C} | \\mathrm {p}, \\mathrm {h}); P _ {\\text {p h r a s e}} (\\mathsf {N} | \\mathrm {p}, \\mathrm {h}) \\right] = \\operatorname {s o f t m a x} (\\operatorname {M L P} \\left(\\left[ \\boldsymbol {p}; \\boldsymbol {h}; \\left| \\boldsymbol {p} - \\boldsymbol {h} \\right|; \\boldsymbol {p} \\circ \\boldsymbol {h} \\right]\\right)) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 840, + 823, + 857 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\circ$ is the element-wise product, and the semicolon refers to column vector concatenation. E, C, and N refer to the Entailment, Contradiction, and Neutral labels, respectively.", + "bbox": [ + 169, + 859, + 826, + 888 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "It should be mentioned that a phrase may be unaligned, but plays an important role in sentence-level NLI prediction, as shown in Table 1. Thus, we would like to predict phrasal NLI labels for unaligned", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "phrases as well, but pair them with a special token $(\\mathrm{p}_{\\langle \\mathrm{EMPTY}\\rangle}$ or $\\mathrm{h}_{\\langle \\mathrm{EMPTY}\\rangle})$ , whose embedding is randomly initialized and learned by back-propagation.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Sentence Label Induction. We observe the sentence NLI label can be logically induced from phrasal NLI labels. Based on the definition of the NLI task, we develop the following induction rules.", + "bbox": [ + 169, + 138, + 823, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Entailment Rule: According to Bowman et al. (2015), a premise entailing a hypothesis means that, if the premise is true, then the hypothesis must be true. We find that this can be oftentimes transformed into phrasal relationships: a premise entails the hypothesis if all paired phrases have the label Entailment.", + "bbox": [ + 169, + 172, + 825, + 231 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Let $\\{(\\mathrm{p}_k,\\mathrm{h}_k)\\}_{k = 1}^K\\bigcup \\{(\\mathrm{p}_k,\\mathrm{h}_k)\\}_{k = K + 1}^{K'}$ be all phrase pairs. For $k = 1,\\dots ,K$ , they are aligned phrases; for $k = K + 1,\\dots ,K'$ , they are unaligned phrases paired with the special token, i.e., $\\mathrm{p}_k = \\mathrm{p}_{\\langle \\mathrm{EMPTY}\\rangle}$ or $\\mathrm{h}_k = \\mathrm{h}_{\\langle \\mathrm{EMPTY}\\rangle}$ . Then, we induce a sentence-level Entailment score by", + "bbox": [ + 169, + 234, + 826, + 284 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {s e n t e n c e}} (\\mathsf {E} | \\mathrm {P}, \\mathrm {H}) = \\left[ \\prod_ {k = 1} ^ {K ^ {\\prime}} P _ {\\text {p h r a s e}} (\\mathsf {E} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\right] ^ {\\frac {1}{K ^ {\\prime}}} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 287, + 823, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This works in a fuzzy logic fashion (Zadeh, 1988; 1996), deciding whether the sentence-level label should be Entailment considering the average of phrasal predictions. Here, we use the geometric mean, because it is biased towards low scores, i.e., if there exists one phrase pair with a low Entailment score, then the chance of sentence label being Entailment is also low. Unaligned pairs should be considered in Eq. (4), because an unaligned phrase may indicate Entailment, shown in the second example of Table 1. Notice that the resulting value $S_{\\text{sentence}}(\\mathsf{E}|\\mathsf{P}, \\mathsf{H})$ is not normalized with respect to Contradiction and Neutral; thus, we call it a score (instead of probability), which will be normalized afterwards.", + "bbox": [ + 169, + 319, + 823, + 431 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Contradiction Rule: Two sentences are contradictory if there exists (at least) one paired phrase labeled as Contradiction. The fuzzy logic version of this induction rule is given by", + "bbox": [ + 169, + 438, + 823, + 468 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {s e n t e n c e}} (\\mathbb {C} | \\mathrm {P}, \\mathrm {H}) = \\max _ {k = 1, \\dots , K} P _ {\\text {p h r a s e}} (\\mathbb {C} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 472, + 823, + 488 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, the max operator is used in the induction, because the contradiction rule is an existential statement, i.e., there exist(s) $\\cdots$ . Also, unaligned phrases are excluded in calculating the sentence-level Contradiction score, because an unaligned phrase indicates the corresponding information is missing in the other sentence and it cannot be Contradiction (recall examples in Table 1).", + "bbox": [ + 169, + 493, + 825, + 550 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Rule for Neutral: Two sentences are neutral if there exists (at least) one neutral phrase pair, but there does not exist any contradictory phrase pair. The fuzzy logic formula is", + "bbox": [ + 169, + 555, + 823, + 585 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {s e n t e n c e}} (\\mathrm {N} | \\mathrm {P}, \\mathrm {H}) = \\left[ \\max _ {k = 1, \\dots , K ^ {\\prime}} P _ {\\text {p h r a s e}} (\\mathrm {N} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\right] \\cdot \\left[ 1 - S _ {\\text {s e n t e n c e}} (\\mathrm {C} | \\mathrm {P}, \\mathrm {H}) \\right] \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 243, + 588, + 823, + 607 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The first factor determines whether there exists a Neutral phrase pair (including unaligned phrases, illustrated in the first example in Table 1). The second factor evaluates the negation of \"at least one contradictory phrase,\" as suggested in the second clause of the Rule for Neutral.", + "bbox": [ + 169, + 609, + 825, + 654 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, we normalize the scores into probabilities by dividing the sum, since all the scores are already positive. This is given by", + "bbox": [ + 169, + 659, + 823, + 689 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP _ {\\text {s e n t e n c e}} (\\mathrm {L} | \\cdot) = \\frac {1}{Z} S _ {\\text {s e n t e n c e}} (\\mathrm {L} | \\cdot) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 693, + 823, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathsf{L}\\in \\{\\mathsf{E},\\mathsf{C},\\mathsf{N}\\}$ , and $Z = S_{\\text{sentence}}(\\mathsf{E}|\\cdot) + S_{\\text{sentence}}(\\mathsf{C}|\\cdot) + S_{\\text{sentence}}(\\mathsf{N}|\\cdot)$ is the normalizing factor.", + "bbox": [ + 169, + 713, + 818, + 729 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training and Inference. We use cross-entropy loss to train our EPR model by minimizing $-\\log P_{\\text{sentence}}(\\mathsf{t}|\\cdot)$ , where $\\mathsf{t} \\in \\{\\mathsf{E}, \\mathsf{C}, \\mathsf{N}\\}$ is the groundtruth sentence-level label.", + "bbox": [ + 169, + 734, + 823, + 765 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our underlying logical reasoning component can be trained end-to-end by back-propagation in a weakly supervised manner, because the fuzzy logic rules are almost everywhere differentiable. Although the max operators in Eqs. (5) and (6) may not be differentiable at certain points, they are common in max-margin learning and the rectified linear unit (ReLU) activation functions, and do not cause trouble in back-propagation.", + "bbox": [ + 169, + 770, + 825, + 842 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Once our EPR model is trained, we can obtain both phrasal and sentence-level labels. This is accomplished by performing argmax on the predicted probabilities (3) and (7), respectively.", + "bbox": [ + 169, + 847, + 823, + 877 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "In traditional fuzzy logic, the conjunction is given by probability product (Zadeh, 1988). We find that this gives a too small Entailment score compared with Contradiction and Neutral scores, causing difficulties in end-to-end training. Thus, we take the geometric mean and maintain all the scores in the same magnitude.", + "bbox": [ + 169, + 883, + 825, + 925 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Improving Textual Explanation. Camburu et al. (2018) annotated a dataset to address NLI interpretability by generating an explanation sentence. For the example in Figure 1, the reference explanation is \"There cannot be one man and several men at same time.\"", + "bbox": [ + 169, + 103, + 826, + 147 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this part, we apply the predicted phrasal logical relationships to textual explanation generation and examine whether our EPR's output can help a downstream task. Figure 3 shows the overview of our textual explanation generator. We concatenate the premise and hypothesis in the form of “Premise : Hypothesis : …,” and feed it to a standard Transformer encoder (Vaswani et al., 2017).", + "bbox": [ + 169, + 152, + 826, + 209 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We utilize the phrase pairs and our predicted phrasal labels as factual knowledge to enhance the decoder. Specifically, our EPR model yields a set of tuples $\\{(\\mathrm{p}_k,\\mathrm{h}_k,\\mathrm{l}_k)\\}_{k = 1}^K$ for a sample, where $\\mathbf{l}_k\\in \\{\\mathsf{E},\\mathsf{N},\\mathsf{C}\\}$ is the predicted phrasal label for the aligned phrases, $\\mathrm{p}_k$ and $\\mathrm{h}_k$ . We embed phrases by Sentence-BERT: $\\pmb{p}^{(L)}$ and $\\pmb{h}^{(L)}$ ; the phrasal label is represented by a one-hot vector $\\pmb{l}_k = \\mathrm{onehot}(\\mathrm{l}_k)$ . They are concatenated as a vector $\\pmb{m}_k = [\\pmb {p}_k;\\pmb {h}_k;\\pmb {l}_k]$ . We compose the vectors as a factual memory matrix $\\mathbf{M} = [m_1^\\top ;\\dots ;m_K^\\top ]\\in \\mathbb{R}^{K\\times d}$ , where $d$ is the dimension of $\\pmb{m}_k$ .", + "bbox": [ + 169, + 215, + 826, + 303 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our decoder follows a standard Transformer architecture (Vaswani et al., 2017), but is equipped with additional attention mechanisms to the factual memory. Consider the $i$ th decoding step. We feed the factual memory to an MLP as $\\tilde{\\mathbf{M}} = \\mathrm{MLP}(\\mathbf{M})$ . We compute attention $\\pmb{a}$ over $\\tilde{\\mathbf{M}}$ with the embedding of the input $\\pmb{y}_{i-1}$ , and aggregate factual information $\\pmb{c}$ for the rows $\\pmb{m}_t$ in $\\mathbf{M}$ :", + "bbox": [ + 169, + 306, + 509, + 422 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {a} = \\operatorname {s o f t m a x} (\\tilde {\\mathbf {M}} \\boldsymbol {y} _ {i - 1}), \\quad \\boldsymbol {c} = \\sum_ {k = 1} ^ {K} a _ {k} \\tilde {\\boldsymbol {m}} _ {t} ^ {\\top}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 426, + 493, + 454 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $a_{k}$ is the kth element of the vector $\\pmb{a}$ and Figure 5. $\\hat{\\pmb{m}}_t$ is the kth row of the matrix $\\tilde{\\mathbf{M}}$ . The factual information $\\pmb{c}$ is fed to another layer $\\pmb{g}_i = \\mathrm{MLP}([c; y_{i-1}]) + c$ .", + "bbox": [ + 169, + 458, + 589, + 502 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/59828ff8e82cdca43543a438f05e4fee8d4571b8d4ce9ba761fa4e8c3e224c45.jpg", + "image_caption": [ + "Figure 3: Overview of the model for textual explanation generation." + ], + "image_footnote": [], + "bbox": [ + 517, + 314, + 823, + 441 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our Transformer decoder layer starts with self-attention $\\tilde{q}_i = \\mathrm{SelfAttn}(g_i)$ . Then, residual connection and layer normalization are applied as $q_{i} = \\mathrm{LayerNorm}(\\tilde{q}_{i} + g_{i})$ . A cross-attention mechanism obtains input information by $v_{i} = \\mathrm{CrossAttn}(q_{i},\\mathbf{H})$ , where $\\mathbf{H}$ is the representation given by the encoder. $v_{i}$ is fed to the Transformer's residual connection and layer normalization sub-layer. Multiple Transformer layers as mentioned above are stacked to form a deep architecture. The model is trained by standard cross-entropy loss against the reference explanation as in previous work (Kumar & Talukdar, 2020; Zhao & Vydiswaran, 2021; Narang et al., 2020).", + "bbox": [ + 169, + 507, + 826, + 606 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this way, the model is enhanced with factual information given by our EPR weakly supervised reasoning. Experiments will show that it largely improves the BLEU score by 2 points (§ 4.2), being a new state of the art. This further verifies that our EPR indeed yields meaningful phrasal explanations.", + "bbox": [ + 169, + 612, + 826, + 656 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 674, + 328, + 690 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 DATASETS AND EVALUATION METRICS", + "text_level": 1, + "bbox": [ + 171, + 705, + 486, + 720 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The main dataset we used in our experiments is the Stanford Natural Language Inference (SNLI) dataset (Bowman et al., 2015), which consists of 550K training samples, 10K validation samples, and another 10K test samples. Each data sample consists of two sentences (premise and hypothesis) and a sentence-level groundtruth label. For sentence-level NLI prediction, we still use accuracy to evaluate our approach, following previous work (Parikh et al., 2016; Chen et al., 2017; Radford et al., 2018).", + "bbox": [ + 169, + 727, + 826, + 799 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To evaluate the phrasal reasoning performance, we need additional human annotation and evaluation metrics, because most previous work only considers sentence-level performance (Feng et al., 2020) and has not performed quantitative phrasal reasoning evaluation. Although Camburu et al. (2018) annotated phrase highlights in their e-SNLI dataset, they are incomplete and do not provide logical relationships. Our annotators selected relevant phrases from two sentences and tagged them with phrasal NLI labels; they also selected and tagged unaligned phrases.", + "bbox": [ + 169, + 804, + 825, + 888 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3A groundtruth label is for a data point, which consists of two sentences. We call it a sentence-level label instead of phrasal labels.", + "bbox": [ + 169, + 896, + 823, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/4be99ff9235f201abf591af921705c8874960cd7b168cb3fe108a2986d9be9ac.jpg", + "table_caption": [ + "Table 2: Main results on the SNLI dataset. †Quoted from respective papers. ‡Obtained from the checkpoint sent by the authors. Other results are obtained by our experiments. GM and AM are the geometric and arithmetic means of the $F$ scores." + ], + "table_footnote": [], + "table_body": "
ModelSent AccReasoning Performance
FEFCFNFUPFUHGMAM
Human-84.7171.0155.1282.4661.8070.0771.02
Non-reasoning
Mahabadi et al. (2020)†85.1-------
LSTM (Wang & Jiang, 2016)†86.1-------
Transformer (Radford et al., 2018)89.9-------
SBERT (Reimers & Gurevych, 2019)91.4-------
Baselines
NNL (Feng et al., 2020)‡79.9162.7217.491.5066.220.000.0029.59
STP85.7662.4034.7637.0476.6151.8050.2052.52
GPT-3-Davinci (Brown et al., 2020)-53.7558.0016.1252.2431.0838.2342.24
Ours
EPR (Local, LM unfinetuned)76.33±0.4883.11±0.2938.73±0.8544.63±0.8876.6151.8056.39±0.4358.98±0.34
EPR (Local, LM finetuned)79.36±0.1382.44±0.2644.10±1.3244.69±3.2276.6151.8057.77±0.8559.93±0.67
EPR (Concat, LM unfinetuned)84.53±0.1973.29±0.6837.95±1.1640.56±1.1076.6151.8053.73±0.3956.04±0.33
EPR (Concat, LM finetuned)87.56±0.1569.91±1.2139.97±2.1243.31±2.7876.6151.8054.46±1.3556.32±1.13
", + "bbox": [ + 174, + 125, + 823, + 303 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further propose a set of $F$ -scores, which are a balanced measure of precision and recall between human annotation and model output for Entailment, Contradiction, Neutral, and Unaligned in terms of word indexes. Details of human annotation and evaluation metrics are shown in Appendix B.", + "bbox": [ + 169, + 323, + 826, + 367 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The inter-annotator agreement is presented in Table 2 in comparison with model performance (detailed in the next part). Here, we compute the agreement by treating one annotator as the ground truth and another as the system output; the score is averaged among all annotator pairs. As seen, humans generally achieve high agreement with each other, whereas model performance is relatively low. This shows that our task and metrics are well-defined, yet phrasal logical reasoning is a challenging task for machine learning models.", + "bbox": [ + 169, + 372, + 823, + 455 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Textual explanation generation was evaluated on the e-SNLI dataset (Camburu et al., 2018), which extends the SNLI dataset with one reference explanation for each training sample, and three reference explanations for each validation or test sample. Each reference explanation comes with highlighted rationales, a set of annotated words in the premise or hypothesis considered as the reason for the explanation annotation. We do not use these highlighted rationales, but enhance the neural model with EPR output for textual explanation generation. We follow previous work (Camburu et al., 2018; Narang et al., 2020), adopting BLEU (Papineni et al., 2002) and SacreBLEU (Post, 2018) scores as the evaluation metrics; they mainly differ in the tokenizer. Camburu et al. (2018) also report low consistency of the third annotated reference, and thus use only two references for evaluation. In our study, we consider both two-reference and three-reference BLEU/SacreBLEU. Appendix A.2 provides additional implementation details of textual explanation generation.", + "bbox": [ + 169, + 462, + 826, + 614 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 RESULTS", + "text_level": 1, + "bbox": [ + 171, + 638, + 277, + 651 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Phrasal Reasoning Performance. To the best of our knowledge, phrasal reasoning for NLI was not explicitly evaluated in previous literature. Therefore, we propose plausible extensions to previous studies as our baselines. We consider the study of Neural Natural Logic (NNL, Feng et al., 2020) as the first baseline. It applies an attention mechanism (Parikh et al., 2016), so that each word in the hypothesis is softly aligned with the words in the premise. Then, each word in the hypothesis is predicted with one of the seven natural logic relations proposed by MacCartney & Manning (2009). We consider the maximum attention score as the alignment, and map their seven natural logic relations to our three-category NLI labels: Equivalence, ForwardEntailment $\\mapsto$ Entailment; Negation, Alternation $\\mapsto$ Contradiction; and ReverseEntailment, Cover, Independence $\\mapsto$ Neutral.", + "bbox": [ + 169, + 666, + 826, + 792 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2 shows that the word-level NNL approach cannot perform meaningful phrasal reasoning, although our metrics have already excluded explicit evaluation of phrases. The low performance is because their soft attention leads to many misalignments, whereas their seven-category logical relations are too fine-grained and cause complications in weakly supervised reasoning. In addition, NNL does not allow unaligned words in the hypothesis, showing that such a model is inadequate for NLI reasoning. By contrast, our EPR model extracts phrases of meaningful semantic units, being an appropriate granularity of logical reasoning. Moreover, we work with three-category NLI labels following the sentence-level NLI task formulation. This actually restricts the model's capacity, forcing the model to perform serious phrasal reasoning.", + "bbox": [ + 169, + 799, + 826, + 924 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/542a9781d66257c9e1cbe4d880a2bd26cc1b2ee4d366cd4d7af5d38896e12a6d.jpg", + "table_caption": [ + "Table 3: Results of ablation studies on SNLI." + ], + "table_footnote": [], + "table_body": "
ModelFeaturesSent AccReasoning Performance
FEFCFNFUPFUHGMAM
Full modelLocal76.33±0.4883.11±0.2938.73±0.8544.63±0.8876.6151.8056.39±0.4358.98±0.34
Global84.03±0.1270.84±0.6035.12±0.9036.37±1.5276.6151.8051.41±0.6254.15±0.41
Concat84.53±0.1973.29±0.6837.95±1.1640.56±1.1076.6151.8053.73±0.3956.04±0.33
Random chunkerLocal72.4463.2122.6532.0465.9436.1340.5343.99
Global82.8158.0930.6427.4965.9436.1341.0543.66
Concat83.0958.7532.4131.1465.9436.1342.6644.87
Semantic role labelingLocal71.1073.7929.3928.9970.1943.1145.2749.09
Global82.8160.1432.0730.4870.1943.1144.6747.20
Concat83.1161.6431.7628.3370.1943.1144.1547.01
Random alignmentLocal68.5259.3221.7926.2051.4316.5031.0235.05
Global81.9953.8535.1031.3951.4316.5034.7137.66
Concat82.4957.2234.8330.9151.4316.5034.9738.18
Mean inductionLocal79.6177.3837.1436.1376.6151.8052.8455.81
Global83.8255.0829.9224.7076.6151.8043.8247.62
Concat84.9657.1231.9331.4176.6151.8046.9249.77
", + "bbox": [ + 176, + 95, + 823, + 282 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In addition, we include another intuitive SBERT-based competing model for comparison. We first apply our own heuristics of phrase detection and alignment (thus, the model will have the same $F_{\\mathsf{UP}}$ and $F_{\\mathsf{UH}}$ scores); then, we directly train the phrasal NLI predictor by sentence-level labels. We obtain the sentence NLI prediction by taking argmax over Eq. (7). We call this STP (Sentence label Training Phrases). As seen, STP provides some meaningful phrasal reasoning results, because the training can smooth out the noise of phrasal labels, which are directly set as the sentence-level labels. But still, its performance is significantly lower than our EPR model.", + "bbox": [ + 169, + 295, + 823, + 393 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We experimented with a baseline of few-shot prompting with GPT-3 (Brown et al., 2020), and the implementation detail is shown in Appendix A.2. We see that GPT-3 is able to provide more or less meaningful reasoning, and surprisingly the contradiction $F$ -score is higher than all competing methods. However, the overall mean $F$ scores are much lower. The results show that phrasal reasoning is challenging for pretrained language models, highlighting the importance of our task formulation and the proposed EPR approach even in the prompting era.", + "bbox": [ + 169, + 398, + 823, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Among our EPR variants, we see that EPR with local phrase embeddings achieves the highest reasoning performance, and that EPR with concatenated features achieves a good balance between sentence-level accuracy and reasoning. Our EPR variants were run 5 times with different initialization, and standard deviations are also reported in Table 3. As seen, our improvement compared with the best baseline is around 9.1-10.7 times the standard deviation in mean $F$ scores, which is a large margin. Suppose the $F$ scores are Gaussian distributed, $^{4}$ the improvement is also statistically significant ( $p$ -value $< 4.5\\mathrm{e} - 20$ comparing our worse variant with the best competing model by one-sided test).", + "bbox": [ + 169, + 489, + 823, + 589 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We further compare our EPR with non-reasoning models (Wang & Jiang, 2016; Radford et al., 2018), which are unable to provide phrasal explanations but may or may not achieve high sentence accuracy. The results show that our phrasal EPR model hurts the sentence-level accuracy by 2-4 points, when the model architecture is controlled. This resonates with traditional symbolic AI approaches (MacCartney & Manning, 2008), where interpretable models may not outperform black-box neural networks. Nevertheless, our sentence-level accuracy is still decent, outperforming a few classic neural models, including fuzzy logic applied to sentence embeddings (Mahabadi et al., 2020).", + "bbox": [ + 169, + 594, + 826, + 694 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Analysis. We consider several ablated models to verify the effect of every component in our EPR model. (1) Random chunker, which splits the sentence randomly based on the number of chunks detected by our system. (2) Random aligner, which randomly aligns phrases but keeps the number of aligned phrases unchanged. (3) Semantic role labeling, which uses the semantic roles, detected by AllenNLP (Gardner et al., 2018), as the reasoning unit. (4) Mean induction, which induces the sentence NLI label by the geometric mean of phrasal NLI prediction. In addition, we consider local phrase embedding features, global features, and their concatenation for the above model variants. Due to a large number of settings, each variant was run only once; we do not view this as a concern because Table 2 shows a low variance of our approach. Also, the underlying language model is un-finetuned in our ablation study, as it yields slightly lower performance but is much more efficient.", + "bbox": [ + 169, + 699, + 826, + 839 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As seen in Table 3, the random chunker and aligner yield poor phrasal reasoning performance, showing that working with meaningful semantic units and their alignments is important to logical reasoning. This also verifies that our word index-based metrics are able to evaluate phrase detection", + "bbox": [ + 169, + 845, + 823, + 888 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "4When the score has a low standard deviation, a Gaussian distribution is a reasonable assumption because the probability of exceeding the range of $F$ scores is extremely low.", + "bbox": [ + 169, + 897, + 823, + 924 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and alignment in an implicit manner. We further applied semantic role labeling as our reasoning unit. We find its performance is higher than the random chunker but lower than our method. This is because semantic role labeling is verb-centric, and the extracted spans may be incomplete.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Interestingly, local features yield higher reasoning performance, but global and concatenated features yield higher sentence accuracy. This is because global features provide aggregated information of the entire sentence and allow the model to bypass meaningful reasoning. In the variant of the mean induction, for example, the phrasal predictor can simply learn to predict the sentence-level label with global sentence information; then, the mean induction is an ensemble of multiple predictors. In this way, it achieves the highest sentence accuracy (0.43 points higher than our full model with concatenated features), but is 6 points lower in reasoning performance.", + "bbox": [ + 169, + 152, + 826, + 252 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This reminds us of the debate between old schools of AI (Chandrasekaran et al., 1988; Boucher & Dienes, 2003; Goel, 2022). Recent deep learning models take the connectionists' view, and generally outperform symbolists' approaches in terms of the ultimate prediction, but they lack expressible explanations. Combining neural and symbolic methods becomes a hot direction in recent AI research (Liang et al., 2017; Dong et al., 2018; Yi et al., 2018). In general, our EPR model with global features achieves high performance in both reasoning and ultimate prediction for the NLI task.", + "bbox": [ + 169, + 257, + 826, + 342 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Results of Textual Explanation Generation. In this part, we apply EPR's predicted output—phrasal logical relationships—as factual knowledge to textual explanation generation. Most previous studies use the groundtruth sentence-level NLI label and/or highlighted rationales. This requires human annotations, which are resource-consuming to obtain. By contrast, we require no extra human-annotated resources; our factual knowledge is based on our weakly supervised reasoning approach.", + "bbox": [ + 169, + 348, + 426, + 542 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4: Textual explanation results on e-SNLI. Previous work uses auxiliary information (L: the groundtruth NLI label; H: human-annotated highlights), but we use neither. ${}^{ \\dagger }$ Quoted from respective papers. ${}^{ \\ddagger }$ Evaluated by checkpoints. ${}^{\\parallel }$ Our replication with provided code.", + "bbox": [ + 433, + 348, + 826, + 421 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/c93ddad4ed53eb11322db0d24cf49b191601d3a191504367d83106df319839cd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelInfoBLEUSacreBLEU
LH2 refs3 refs2 refs3 refs
Camburu et al. (2018)†--27.58---
NILE (Kumar & Talukdar, 2020)∥-28.5737.7332.5141.78
NILE (Kumar & Talukdar, 2020)‡-28.6737.8432.7442.06
FinetunedWT5220M (Narang et al., 2020)†---32.40-
FinetunedWT511B (Narang et al., 2020)†---33.70-
LIREx (Zhao & Vydiswaran, 2021)∥17.2222.4021.2426.68
Finetune T560M--27.7536.7831.7440.89
+ Annotated Highlights64M27.9136.9032.2041.21
+ EPR Outputs64M (ours)--29.9138.3033.9642.63
", + "bbox": [ + 436, + 425, + 823, + 540 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4 shows our explanation generation performance on e-SNLI. Since evaluation metrics are not consistently used for explanation generation in previous studies, we replicate the approaches when the code or checkpoint is available. For large pretrained models, we quote results from the previous paper (Narang et al., 2020). Their model is called WT5, having 220M or 11B parameters depending on the underlying T5 model. Profoundly, we achieve higher performance with 60M-parameter T5-small, which is $3.3\\mathrm{x}$ and $170\\mathrm{x}$ smaller in model size than the two WT5 variants.", + "bbox": [ + 169, + 549, + 825, + 633 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In addition, we conducted a controlled experiment using the rationale highlights annotated by Camburu et al. (2018) for e-SNLI. It achieves a relatively small increase of 0.2-0.5 BLEU points, whereas our EPR's outputs yield a 2-point improvement. The difference in the performance gains shows that our EPR's phrasal logical relationships provide more valuable information than human-annotated highlights. In general, we achieve a new state of the art on e-SNLI with a small language model, demonstrating the importance of phrasal reasoning in textual explanations.", + "bbox": [ + 169, + 638, + 826, + 724 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Additional Results. We show additional results as appendices. § C.1: Reasoning performance on the MNLI dataset; § C.2: Error analysis; § C.3: Case studies of our EPR model; and § C.4: Case studies of textual explanation generation.", + "bbox": [ + 169, + 729, + 823, + 772 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Conclusion. The paper proposes an explainable phrasal reasoning (EPR) model for NLI with neural fuzzy logic, trained in a weakly supervised manner. We further propose an experimental design, including data annotation, evaluation metrics, and plausible baselines. Results show that phrasal reasoning for NLI is a meaningfully defined task, as humans can achieve high agreement. Our EPR achieves decent sentence-level accuracy, but much higher reasoning performance than all competing models. We also achieve a new state-of-the-art performance on e-SNLI textual explanation generation by applying EPR's phrasal logical relationships.", + "bbox": [ + 169, + 779, + 826, + 878 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 102, + 287, + 117 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Islam Beltagy, Stephen Roller, Pengxiang Cheng, Katrin Erk, and Raymond J Mooney. Representing meaning with a combination of logical and distributional models. Computational Linguistics, pp. 763-808, 2016. URL https://aclanthology.org/J16-4007/.", + "Luke Boucher and Zoltán Dienes. Two ways of learning associations. Cognitive Science, 27(6):807-842, 2003. URL https://www.sciencedirect.com/science/article/pii/S0364021303000715.", + "Samuel Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. A large annotated corpus for learning natural language inference. In EMNLP, pp. 632-642, 2015. URL https://aclanthology.org/D15-1075.", + "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In NeurIPS, pp. 1877-1901, 2020. URL https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf.", + "Oana-Maria Camburu, Tim Rocttäschel, Thomas Lukasiewicz, and Phil Blunsom. eSNLI: Natural language inference with natural language explanations. In NeurIPS, pp. 9539-9549, 2018. URL https://proceedings.neurips.cc/paper/2018/bit/4c7a167bb329bd92580a99ce422d6fa6-Abstract.html.", + "Balakrishnan Chandrasekaran, Askhok Goel, and Dean Allemang. Connectionism and information processing abstractions. AI Magazine, 9(4):24-24, 1988. URL https://ojs.aaaai.org/index.php/imagazine/article/view/951.", + "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. Enhanced LSTM for natural language inference. In ACL, pp. 1657-1668, 2017. URL https://aclanthology.org/P17-1152/.", + "Zeming Chen, Qiyue Gao, and Lawrence S Moss. NeuralLog: Natural language inference with joint neural and logical reasoning. arXiv preprint arXiv:2105.14167, 2021. URL https://arxiv.org/abs/2105.14167.", + "Anup Anand Deshmukh, Qianqiu Zhang, Ming Li, Jimmy Lin, and Lili Mou. Unsupervised chunking as syntactic structure induction with a knowledge-transfer approach. In Findings of EMNLP, pp. 3626-3634, 2021. URL https://aclanthology.org/2021.findings-emnlp.307.", + "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In *NAACL-HLT*, pp. 4171–4186, 2019. URL https://aclanthology.org/N19-1423.", + "Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah Smith. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. arXiv preprint arXiv:2002.06305, 2020. URL https://arxiv.org/abs/2002.06305.", + "Honghua Dong, Jiayuan Mao, Tian Lin, Chong Wang, Lihong Li, and Denny Zhou. Neural logic machines. In ICLR, 2018. URL https://openreview.net/forum?id=B1xY-hRctX.", + "Yufei Feng, Quan Liu, Michael Greenspan, Xiaodan Zhu, et al. Exploring end-to-end differentiable natural logic modeling. In COLING, pp. 1172-1185, 2020. URL https://aclanthology.org/2020.coling-main.101.", + "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson F. Liu, Matthew Peters, Michael Schmitz, and Luke Zettlemoyer. AllenNLP: A deep semantic natural language processing platform. In Proc. Workshop for NLP Open Source Software (NLP-OSS), pp. 1-6, 2018. URL https://aclanthology.org/W18-2501." + ], + "bbox": [ + 171, + 125, + 828, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 508, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ashok Goel. Looking back, looking ahead: Symbolic versus connectionist AI. AI Magazine, 42(4): 83-85, 2022. URL https://ojs.aaii.org/index.php/aimagazine/article/view/15111.", + "John Hewitt and Christopher D Manning. A structural probe for finding syntax in word representations. In NAACL-HLT, pp. 4129-4138, 2019. URL https://aclanthology.org/N19-1419.", + "Hai Hu, Qi Chen, Kyle Richardson, Atreyee Mukherjee, Lawrence S Moss, and Sandra Kübler. MonaLog: A lightweight system for natural language inference based on monotonicity. In Proc. Society for Computation in Linguistics, pp. 284-293, 2020. URL https://aclanthology.org/2020.scil-1.40/.", + "Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with Gumbel-softmax. In ICLR, 2017. URL https://openreview.net/forum?id=rkE3y85ee.", + "Zhongtao Jiang, Yanzhe Zhang, Zhao Yang, Jun Zhao, and Kang Liu. Alignment rationale for natural language inference. In ACL-IJCNLP, pp. 5372-5387, 2021. URL https://aclanthology.org/2021.acl-long.417/.", + "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. Dense passage retrieval for open-domain question answering. In EMNLP, pp. 6769-6781, 2020. URL https://aclanthology.org/2020.emnlp-main.550/.", + "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. URL https://arxiv.org/abs/1412.6980.", + "Sawan Kumar and Partha Talukdar. NILE: Natural language inference with faithful natural language explanations. In ACL, pp. 8730-8742, 2020. URL https://aclanthology.org/2020.acl-main.771.", + "Tao Lei, Regina Barzilay, and Tommi Jaakkola. Rationalizing neural predictions. In EMNLP, pp. 107-117, 2016. URL https://aclanthology.org/D16-1011/.", + "Bowen Li, Lili Mou, and Frank Keller. An imitation learning approach to unsupervised parsing. In ACL, pp. 3485-3492, 2019. URL https://aclanthology.org/P19-1338.", + "Chen Liang, Jonathan Berant, Quoc Le, Kenneth Forbus, and Ni Lao. Neural symbolic machines: Learning semantic parsers on Freebase with weak supervision. In ACL, pp. 23-33, 2017. URL https://aclanthology.org/P17-1003/.", + "Xianggen Liu, Lili Mou, Haotian Cui, Zhengdong Lu, and Sen Song. Jumper: Learning when to make classification decisions in reading. In *IJCAI*, pp. 4237-4243, 2018. URL https://www.ijcai.org/proceedings/2018/0589.pdf.", + "Yang Liu and Mirella Lapata. Text summarization with pretrained encoders. In EMNLP-IJCNLP, pp. 3730-3740, 2019. URL https://aclanthology.org/D19-1387/.", + "Zhengdong Lu, Xianggen Liu, Haotian Cui, Yukun Yan, and Daqi Zheng. Object-oriented neural programming (OONP) for document understanding. In ACL, pp. 2717-2726, 2018. URL https://aclanthology.org/P18-1253.", + "Bill MacCartney and Christopher D Manning. Natural logic for textual inference. In Proc. ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 193-200, 2007. URL https://aclanthology.org/W07-1431/.", + "Bill MacCartney and Christopher D. Manning. Modeling semantic containment and exclusion in natural language inference. In *COLING*, pp. 521-528, 2008. URL https://aclanthology.org/C08-1066.", + "Bill MacCartney and Christopher D Manning. An extended model of natural logic. In Proc. International Conference on Computational Semantics, pp. 140-156, 2009. URL https://aclanthology.org/W09-3714.", + "Bill MacCartney, Michel Galley, and Christopher D Manning. A phrase-based alignment model for natural language inference. In EMNLP, pp. 802-811, 2008. URL https://aclanthology.org/D08-1084." + ], + "bbox": [ + 171, + 102, + 826, + 922 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Rabeeh Karimi Mahabadi, Florian Mai, and James Henderson. Learning entailment-based sentence embeddings from natural language inference. Online Manuscript, 2020. URL https://openreview.net/forum?id=BkxackSKvH.", + "Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B. Tenenbaum, and Jiajun Wu. The neurosymbolic concept learner: Interpreting scenes, words, and sentences from natural supervision. In ICLR, 2019. URL https://openreview.net/forum?id=rJgM1hRctm.", + "Lili Mou, Rui Men, Ge Li, Yan Xu, Lu Zhang, Rui Yan, and Zhi Jin. Natural language inference by tree-based convolution and heuristic matching. In ACL, pp. 130-136, 2016. URL https://aclanthology.org/P16-2022.", + "Lili Mou, Zhengdong Lu, Hang Li, and Zhi Jin. Coupling distributed and symbolic execution for natural language queries. In ICML, pp. 2518-2526, 2017. URL https://proceedings.mlrpress/v70/mou17a.html.", + "Sharan Narang, Colin Raffel, Katherine Lee, Adam Roberts, Noah Fiedel, and Karishma Malkan. WT5?! Training text-to-text models to explain their predictions. arXiv preprint arXiv:2004.14546, 2020. URL https://arxiv.org/abs/2004.14546.", + "Ken Nozaki, Hisao Ishibuchi, and Hideo Tanaka. A simple but powerful heuristic method for generating fuzzy rules from numerical data. Fuzzy Sets and Systems, 86(3):251-270, 1997. URL https://www.sciencedirect.com/science/article/abs/pii/0165011495004130.", + "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. BLEU: A method for automatic evaluation of machine translation. In ACL, pp. 311-318, 2002. URL https://aclanthology.org/P02-1040.", + "Ankur Parikh, Oscar Täckström, Dipanjan Das, and Jakob Uszkoreit. A decomposable attention model for natural language inference. In EMNLP, pp. 2249-2255, 2016. URL https://aclanthology.org/D16-1244/.", + "Matt Post. A call for clarity in reporting BLEU scores. In Proc. Conference on Machine Translation: Research Papers, pp. 186-191, 2018. URL https://aclanthology.org/W18-6319.", + "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. OpenAI Blog, 2018. URL https://cdn.openai.com/research-covers/language-unsupervised/language understands_paper.pdf.", + "Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In EMNLP, 2019. URL https://aclanthology.org/D19-1410.", + "Cynthia Rudin. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1(5):206-215, 2019. URL https://www.nature.com/articles/s42256-019-0048-x.", + "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, pp. 5998-6008, 2017. URL https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf.", + "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In ICLR, 2019. URL https://openreview.net/forum?id=rJ4km2R5t7.", + "Shuohang Wang and Jing Jiang. Learning natural language inference with LSTM. In NAACL-HLT, pp. 1442-1451, 2016. URL https://aclanthology.org/N16-1170/.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed H. Chi, Quoc V Le, and Denny Zhou. Chain of thought prompting elicits reasoning in large language models. In NeurlPS, 2022. URL https://openreview.net/forum?id=._VjQlMeSB_J." + ], + "bbox": [ + 171, + 103, + 828, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In *NAACL-HLT*, pp. 1112–1122, 2018. URL https://aclanthology.org/N18-1101.", + "Wenhan Xiong, Thien Hoang, and William Yang Wang. DeepPath: A reinforcement learning method for knowledge graph reasoning. In EMNLP, pp. 564-573, 2017. URL https://aclanthology.org/D17-1060/.", + "Hitomi Yanaka, Koji Mineshima, Daisuke Bekki, Kentaro Inui, Satoshi Sekine, Lasha Abzianidze, and Johan Bos. HELP: A dataset for identifying shortcomings of neural models in monotonicity reasoning. In Proc. Conference on Lexical and Computational Semantics, pp. 250-255, 2019a. URL https://aclanthology.org/S19-1027.", + "Hitomi Yanaka, Koji Mineshima, Daisuke Bekki, Kentaro Inui, Satoshi Sekine, Lasha Abzianidze, and Johan Bos. Can neural networks understand monotonicity reasoning? In ACL BlackboxNLP Workshop, pp. 31-40, 2019b. URL https://aclanthology.org/W19-4804.", + "Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Josh Tenenbaum. Neural-symbolic VQA: Disentangling reasoning from vision and language understanding. In NeurIPS, 2018. URL https://proceedings.neurips.cc/paper/2018/file/5e388103a391daabe3de1d76a6739ccd-Paper.pdf.", + "Deunsol Yoon, Dongbok Lee, and SangKeun Lee. Dynamic self-attention: Computing attention over words dynamically for sentence embedding. arXiv preprint arXiv:1808.07383, 2018. URL https://arxiv.org/abs/1808.07383.", + "Lotfi A Zadeh. Fuzzy logic. Computer, 21(4):83-93, 1988. URL https://ieeexplore.ieee.org/abstract/document/53.", + "Lotfi A Zadeh. Fuzzy sets. In *Fuzzy Sets, Fuzzy Logic, and Fuzzy Systems*, pp. 394-432. World Scientific, 1996. URL https://www.worldscientific.com/doi/abs/10.1142/9789814261302_0021.", + "Zhuosheng Zhang, Yuwei Wu, Hai Zhao, Zuchao Li, Shuailiang Zhang, Xi Zhou, and Xiang Zhou. Semantics-aware BERT for language understanding. In AAAI, pp. 9628-9635, 2020. URL https://ojs.aaai.org/index.php/AAAI/article/view/6510.", + "Xinyan Zhao and V.G.Vinod Vydiswaran. LIREx: Augmenting language inference with relevant explanations. In AAAI, pp. 14532-14539, 2021. URL https://ojs.aaai.org/index.php/AAAI/article/view/17708." + ], + "bbox": [ + 171, + 102, + 826, + 621 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 647, + 441, + 662 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.1 PHRASE DETECTION", + "text_level": 1, + "bbox": [ + 171, + 679, + 362, + 693 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We present more details about our phrase detection. We use $\\mathrm{SpaCy}^5$ to obtain the part-of-speech (POS) tag $^6$ of every word. SpaCy also tags noun phrases. However, if a noun phrase follows a preposition (with a fine-grained POS tag being IN), we remove it from noun phrases but tag it as a prepositional phrase.", + "bbox": [ + 169, + 704, + 823, + 762 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In addition, we extract verbs by the POS tag VERB. A verb may be followed by a particle with the fine-grained POS tag being RP (e.g., show off). It is treated as a verb phrase. In order to handle negation, we allow optional AUX NOT before a verb, (e.g., could not help). This, however, only counts less than $1\\%$ in the dataset, and does not affect our model much.", + "bbox": [ + 169, + 767, + 823, + 824 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To capture other potential semantic units, we treat remaining open class words7 as individual phrases. Finally, the remaining non-content words (in the categories of closed words and others) are discarded (e.g., \"there is\"). This is appropriate, because they do not represent meaningful semantics or play a", + "bbox": [ + 169, + 830, + 828, + 875 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_footnote", + "text": "5https://spacy.io", + "bbox": [ + 192, + 882, + 320, + 896 + ], + "page_idx": 12 + }, + { + "type": "page_footnote", + "text": "See definitions in https://spacy.io/usage/linguistic-features", + "bbox": [ + 192, + 896, + 616, + 910 + ], + "page_idx": 12 + }, + { + "type": "page_footnote", + "text": "7https://universaldependencies.org/u/pos/", + "bbox": [ + 192, + 910, + 495, + 922 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/e08c6b3caad44318a6c42a1162f8e5d932ccb60cc225d6db912c64533b9aa893.jpg", + "table_caption": [ + "Table 5: Our rules for phrase detection. \"[\"] means the item is optional." + ], + "table_footnote": [], + "table_body": "
Example: The woman is showing off her blue dog at the playground.
NumberPhrase typeRuleExtracted phrase(s)
1Prepositional phraseIN + NPat the playground
2Noun phraseNPThe woman|her blue dog
3Verb phrase[AUX] + [NOT] + VERB + [RP]is showing off
4OthersOther open class words-
", + "bbox": [ + 207, + 126, + 790, + 213 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/8248aa4102f7171ad75b057337f1e3f4e19a75b3822c2d4ea449260d0811d919.jpg", + "image_caption": [ + "Figure 4: Results of tuning the coefficient of global features." + ], + "image_footnote": [], + "bbox": [ + 184, + 239, + 467, + 426 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/0103103fde1e521ef699f5f4acd134e4a8292249cc3d31cc07a1f2ae43aa5200.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 513, + 238, + 789, + 426 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "role in reasoning. Table 5 summarizes all the rules used in our approach. They are executed in order and extracted phrases are exclusive. For example, the playground in the phrase at the playground will not be treated as a standalone noun phrase, as it is already part of a prepositional phrase.", + "bbox": [ + 169, + 483, + 826, + 527 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Empirically, our rule-based approach works well for the NLI dataset, and our logical reasoning is at the granularity of the extracted phrases.", + "bbox": [ + 169, + 532, + 823, + 561 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 SETTINGS", + "text_level": 1, + "bbox": [ + 171, + 580, + 290, + 594 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Details of the EPR Model. We chose the pretrained model a11-mpnet-base- $v2^8$ from the SentenceBERT study (Reimers & Gurevych, 2019) and obtained 768-dimensional local and global phrase embeddings. Our MLP had the same dimension as the embeddings, i.e., 768D for the local and global variants, or 1536D for the concatenation variant. We chose the coefficient for the global feature in Eq. (1) from a candidate set of $\\{0.0, 0.2, 0.4, 0.6, 0.8, 1.0\\}$ . Figure 4 shows the hyperparameter tuning results on SNLI (mentioned in § 4.2) and MNLI (to be discussed in § C.1). We find that 0.4 yields the best sentence accuracy in SNLI, and that 1.0 is the best for MNLI. As our focus is on reasoning, we set the coefficient to be 0.6, because it yields the highest phrasal reasoning performance and decent sentence-level performance for both experiments and in terms of both geometric mean and arithmetic mean of $F$ scores. The pretrained language model (LM) was either finetuned or un-finetuned during training. Finetuning yields higher performance (Table 2), whereas un-finetuned LM is more efficient for in-depth analyses (Table 3). We trained the model with a batch size of 256. We used Adam (Kingma & Ba, 2015) with a learning rate of 5e-5, $\\beta_1 = 0.9$ , $\\beta_2 = 0.999$ , learning rate warm up over the first 10 percent of the total steps, and linear decay of the learning rate. The model was trained up to 3 epochs, following the common practice (Dodge et al., 2020). Our main model variants were trained 5 times with different parameter initializations, and we report the mean and standard deviation.", + "bbox": [ + 169, + 608, + 826, + 845 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Details of Textual Explanation Generation. We used the pretrained T5-small model for finetuning with a batch size of 32. The optimizer was Adam with an initial learning rate of 3e-4, $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.999$ , learning rate warm-up for the first 2 epochs, and linear decay of the learning rate up to 10", + "bbox": [ + 169, + 851, + 826, + 895 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "8https://www.sbert.net/docs/pretrained_models.html", + "bbox": [ + 189, + 909, + 563, + 922 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/1084e0ae9a63b49c6777536940cc338080d3b2b18180e7db18d99603a3053c0d.jpg", + "image_caption": [ + "Figure 5: The prompt for phrasal reasoning." + ], + "image_footnote": [], + "bbox": [ + 176, + 104, + 818, + 325 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "epochs; then we decreased the learning rate to 3e-6 and trained the model until the validation BLEU score did not increase for 2 epochs.", + "bbox": [ + 169, + 383, + 823, + 412 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Details of the Prompting Baseline. We adopted the GPT-3 (the text-davinci-003 version with 175B parameters) (Brown et al., 2020) as a prompting baseline to demonstrate large language models (LLMs)' phrasal reasoning ability.", + "bbox": [ + 169, + 419, + 823, + 463 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We consider exemplar-based prompting, because it is unlikely for an LLM to output structured reasoning results in a zero-shot manner. Moreover, our examples are chosen to cover all reasoning cases. We also set the temperature of decoding to 0 to obtain deterministic reasoning, following CoT prompting (Wei et al., 2022). Rule-based post-processing was applied to extract slot values. Figure 5 presents the prompt used for phrasal reasoning.", + "bbox": [ + 169, + 468, + 826, + 540 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B DATA ANNOTATION AND REASONING EVALUATION METRICS", + "text_level": 1, + "bbox": [ + 171, + 561, + 718, + 579 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Previous studies have not explicitly evaluated reasoning performance. Typically, they resort to sentence-level classification accuracy (Wang & Jiang, 2016; Mahabadi et al., 2020) or case studies (Parikh et al., 2016; Feng et al., 2020) to demonstrate the effectiveness of their alleged interpretable models, which we believe is inadequate.", + "bbox": [ + 169, + 595, + 826, + 654 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Therefore, we annotated a model-agnostic corpus about phrasal logical relationships and developed a set of metrics to evaluate the phrasal reasoning performance quantitatively. The resources are released on our website (Footnote 1) to facilitate future research.", + "bbox": [ + 169, + 659, + 826, + 702 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1 DATA ANNOTATION", + "text_level": 1, + "bbox": [ + 171, + 722, + 354, + 736 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We annotated the phrases and their logical relationships in a data sample. The annotators were asked to select corresponding phrases from both premise and hypothesis, and label them as either Entailment, Contradiction, or Neutral, with the sentence-level NLI label being given. Annotators could also select a phrase from either a premise or a hypothesis and label it as Unaligned. The process can be repeated until all phrases are labeled for a data sample. Figure 6 shows a screenshot of our annotation page. In the left panel, the annotator could select phrases in the two sentences and mark them with NLI labels. The annotator can view a sample's annotated phrases in the right panel and navigate through different samples.", + "bbox": [ + 169, + 750, + 826, + 862 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The annotation was performed by three in-lab researchers who are familiar with the NLI task. Our preliminary study shows low agreement when the annotators are unfamiliar with the task; thus it is inappropriate to recruit Mechanical Turks for annotation. We randomly selected 100 samples for annotation, following previous work on the textual explanation for SNLI (Camburu et al., 2018),", + "bbox": [ + 169, + 867, + 828, + 926 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/b4fe9d3be55d6fd2a0483619e745b2fe18ac33cf34df0d009798b3d99827c502.jpg", + "image_caption": [ + "Figure 6: A screenshot of the annotation page." + ], + "image_footnote": [], + "bbox": [ + 217, + 118, + 483, + 333 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/6b790ebae135bb16bcb093f146fe34e93ccaf5d0c14c254e08af31be87a70402.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 99, + 794, + 342 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 6: Examples illustrating the proposed metrics, where we consider the Entailment category. \"|\" refers to a phrase segmentation.", + "bbox": [ + 169, + 386, + 828, + 417 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/df2172cc26cc75e66eed3c168907cc417908a6b250f9a2b3c27a5721b2b68b43.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Example annotation of entailment (in highlight): Premise: A kid in red is playing in a garden. Hypothesis: A child in red is watching TV in the bedroom.
#Example OutputPE(P)PE(H)PERE(P)RE(H)REFEExplanation
1PH in a garden0000000Although in occurs in the annotation, the word indexes are different. The reasoning is wrong.
2PH watching TV1001000Mis-matched phrases in hypothesis. The reasoning is wrong.
3PH a kid | in red1111111All word indexes match the annotation. The reasoning is correct.
", + "bbox": [ + 173, + 425, + 826, + 550 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "which is adequate to show statistical significance. Since our annotation only concerns data samples, it is agnostic to any machine learning model.", + "bbox": [ + 169, + 580, + 826, + 611 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.2 EVALUATION METRICS FOR PHRASAL REASONING", + "text_level": 1, + "bbox": [ + 171, + 633, + 570, + 647 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We propose a set of $F$ -scores in Entailment, Contradiction, Neutral, and Unaligned to quantitatively evaluate the phrasal reasoning performance. We first introduce our metric for one data sample and then explain the extension to a corpus.", + "bbox": [ + 169, + 662, + 826, + 705 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Consider the Entailment category as an example. We first count the number of \"hits\" (true positives) between the word indexes of model output and annotation. Using word indexes (instead of words) rules out hitting the words in misaligned phrases (Example 1, Table 6). Then, we calculate precision scores for the premise and hypothesis, denoted by $P_{\\mathsf{E}}^{(P)}$ and $P_{\\mathsf{E}}^{(H)}$ , respectively. Their geometric mean $P_{\\mathsf{E}} = (P_{\\mathsf{E}}^{(P)}P_{\\mathsf{E}}^{(H)})^{1 / 2}$ is considered as the precision for Entailment. Here, the geometric mean rules out incorrect reasoning that hits either the premise or hypothesis, but not both (Example 2, Table 6). Further, we compute the recall score $R_{\\mathsf{E}}$ in a similar way, and finally obtain the $F$ -score by $F_{\\mathsf{E}} = \\frac{2P_{\\mathsf{E}}R_{\\mathsf{E}}}{P_{\\mathsf{E}} + R_{\\mathsf{E}}}$ . Likewise, $F_{\\mathsf{C}}$ and $F_{\\mathsf{N}}$ are calculated for Contradiction and Neutral. In addition, we compute the $F$ -score for unaligned phrases in premise and hypothesis, denoted by $F_{\\mathsf{UP}}$ and $F_{\\mathsf{UH}}$ , respectively.", + "bbox": [ + 169, + 710, + 828, + 862 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "When calculating our $F$ -scores for a corpus, we use micro-average, i.e., the precision and recall ratios are calculated in the corpus level. This is more stable, especially considering the varying lengths of sentences. Moreover, we compare model output against three annotators and perform an arithmetic average, further reducing the variance caused by ambiguity.", + "bbox": [ + 169, + 868, + 826, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/069dc294d304e9209f1a05756e1c3099de53c453468be6003ceb37d2d4d7a569.jpg", + "table_caption": [ + "Table 7: Results on MNLI. †Quoted from respective papers. ‡Our replication." + ], + "table_footnote": [], + "table_body": "
ModelSent AccReasoning Performance
FEFCFUPFUHGMAM
Human-85.1573.4473.1846.3167.8569.52
Non-reasoning methods
Mahabadi et al. (2020)†73.8------
LSTM (Wang et al., 2019)†72.2------
Transformer (Radford et al., 2018)82.1------
Reasoning methods
NNL (Feng et al., 2020)‡61.2850.3332.0049.780.000.0033.03
STP75.1555.4751.7264.3237.5751.3152.27
EPR (Concat, LM finetuned)79.65±0.1961.76±0.3252.09±0.4164.3237.5752.80±0.0753.93±0.07
", + "bbox": [ + 176, + 127, + 823, + 262 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "It should be emphasized that our metrics evaluate phrase detection and alignment in an implicit manner. A poor phrase detector and aligner will result in a low reasoning score (shown in our ablation study), but we do not explicitly calculate phrase detection and alignment accuracy. This helps us cope with the ambiguity of the phrase granularity (Example 3, Table 6).", + "bbox": [ + 169, + 286, + 823, + 345 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To summarize, we propose an evaluation framework including data annotation (§ B.1) and evaluation metrics (§ B.2). These are our contributions in formulating the phrasal reasoning task for NLI.", + "bbox": [ + 169, + 349, + 823, + 380 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C ADDITIONAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 398, + 400, + 414 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.1 RESULTS ON MNLI", + "text_level": 1, + "bbox": [ + 171, + 429, + 356, + 443 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this appendix, we provide additional results on the matched section of the MNLI dataset (Williams et al., 2018), which consists of 393K training samples, 10K validation samples, and another 10K test samples. It has the same format as the SNLI dataset, but samples come from multiple domains and are more diverse. We follow § 4.1 and use the same protocol to create the phrasal reasoning annotation for the MNLI dataset based on 100 randomly selected samples. However, we found that MNLI is much noisier than SNLI; particularly, the sentences labeled as Neutral in MNLI share few related phrases. For example, the two sentences do not have much in common in the sample \"Premise: If you still want to join, it might be worked.\" and \"Hypothesis: Your membership is the only way that this could work\". Moreover, the inter-human agreement is low in the Neutral category. Therefore, we believe the corpus quality is less satisfactory for Neutral. To ensure meaningful evaluation, we ignored the evaluation of Neutral in this experiment, although our reasoning approach is not changed. The remaining 60 samples containing Entailment and Contradiction serve as the MNLI phrasal reasoning corpus.", + "bbox": [ + 169, + 455, + 826, + 637 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We consider the EPR variant with concatenated local and global features, since the SNLI experiment shows it achieves a good balance between sentence-level accuracy and reasoning. Our models were run 5 times with different initializations.", + "bbox": [ + 169, + 643, + 825, + 685 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "As seen in Table 7, our EPR approach is again worse than humans, but largely improves the reasoning performance compared with NNL and STP baselines. Its sentence-level prediction is comparable to (although slightly lower than) finetuning Transformers. The results are highly consistent with SNLI experiments, showing the robustness of our approach.", + "bbox": [ + 169, + 691, + 823, + 750 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "It is important to notice that the EPR model here is trained on MNLI sentence labels, and is not transferred from the SNLI dataset. In our preliminary experiments, we tried transfer learning from SNLI to MNLI and failed to obtain satisfactory performance. We found that our EPR is more prone to the out-of-vocabulary issue (i.e., it does not predict well for the phrases in the new domain), whereas a black-box neural network may learn biased sentence patterns and achieve higher performance in transfer learning.", + "bbox": [ + 169, + 755, + 825, + 839 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.2 ERROR ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 854, + 344, + 871 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To show how phrasal reasoning affects sentence-level prediction, we perform an error analysis in Table 8. Specifically, we examine the reasoning performance (arithmetic mean of $F$ -scores) when the sentence label is correctly and incorrectly predicted on the SNLI dataset. As shown, EPR models", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/871764ec24497ef74204c8c12f868f5c8f850eb6a11f2e28cf349af2d2f1541b.jpg", + "table_caption": [ + "Table 8: Sentence-level prediction count and arithmetic average reasoning performance ( $F$ -score) when the sentence label is correctly and incorrectly predicted on the SNLI dataset." + ], + "table_footnote": [], + "table_body": "
Sentence-level predictionCount (in percentage)Reasoning performance (AMF)
Local finetunedConcat finetunedLocal finetunedConcat finetuned
Correct75.4±1.3687.8±0.7565.71±0.8358.68±0.67
Wrong24.6±1.3612.2±0.7540.74±2.0137.58±3.28
Overall100.0±0.00100.0±0.0059.93±0.6756.32±1.13
", + "bbox": [ + 176, + 133, + 823, + 205 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/4ca1c526a86db5151d021704156513e3929199ea3dc845ff255b4b07fbf2ab32.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Groundtruth: Entailment Prediction: Entailment\nThree young boys enjoying a day at the beach.\n(a)\nThe boys are in the beach.Groundtruth: Contradiction Prediction: Contradiction\nA man playing fetch with two brown dogs.\n(b)\nThe dogs are asleep.Entailment\nContradiction\nNeutral\nUnaligned
Groundtruth: Neutral Prediction: Neutral\nWalkers on a concrete boardwalk under a blue sky.\n(c)\nWalkers under a blue sky near the beach.Groundtruth: Entailment Prediction: Neutral\nAn elderly couple in heavy coats are looking at black and white photos displayed on a wall.\n(d)\nOctogenarians admiring the old photographs that decorated the wall.
", + "bbox": [ + 178, + 219, + 821, + 330 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figure 7: Examples of explainable phrasal reasoning predicted by our EPR model. Words in one color block are detected phrases, a dotted line shows the alignment of two phrases, and the color represents the predicted phrasal NLI label. In Example (d), EPR's prediction suggests the provided label in SNLI is incorrect.", + "bbox": [ + 169, + 343, + 823, + 398 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "with both local and concatenated features have much higher reasoning performance when sentence labels are correctly predicted than incorrectly predicted. The positive correlation between phrasal reasoning performance and sentence-level accuracy shows our fuzzy logic induction rules indeed make sense.", + "bbox": [ + 169, + 426, + 823, + 481 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We also find that the model with local features has a higher reasoning performance than with concatenated features, even when the sentence-level prediction is wrong. This is because the local model is unaware of the context of the sentences. Thus, it must perform strict phrasal reasoning based on the induction rules, even if in this case the reasoning process is imperfect and leads to sentence-level errors.", + "bbox": [ + 169, + 489, + 823, + 556 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.3 CASE STUDY OF EPR", + "text_level": 1, + "bbox": [ + 171, + 575, + 370, + 589 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We present case studies of EPR in Figure 7. Our EPR performs impressive reasoning for the NLI task, which is learned in a weakly supervised manner with only sentence-level labels.", + "bbox": [ + 169, + 601, + 823, + 631 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In Example (a), the two sentences are predicted Entailment because three young boys entails the boys and at the beach entails in the beach, whereas unaligned phrases enjoying and a day are allowed in the premise for Entailment. In Example (b), playing contradicts asleep, and the two sentences are also predicted Contradiction. Likewise, Example (c) is predicted Neutral because the aligned phrases on a concrete boardwalk and near the beach are neutral.", + "bbox": [ + 169, + 637, + 823, + 707 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In our study, we also find several interesting examples where EPR's reasoning provides clues suggesting that the target labels may be incorrect in the SNLI dataset. In Example (d), our model predicts Neutral for looking and admiring, as well as for at black and white photos and the old photographs. Thus, the two sentences are predicted Neutral instead of the provided label Entailment. We believe our model's reasoning and prediction are correct, because people looking at something may or may not admire it; a black-and-white photo may or may not be an old photo (as it could be a black-and-white artistic photo).", + "bbox": [ + 169, + 713, + 826, + 811 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.4 CASE STUDY OF THE TEXTUAL EXPLANATION GENERATION", + "text_level": 1, + "bbox": [ + 171, + 828, + 637, + 842 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We conduct another case study to show how EPR's reasoning is used in the textual explanation generation task. As seen in Figure 8, our EPR reasoning yields structured factual tuples: on a deserted beach entailing at the beach, Some dogs contradicting only one dog, and running unaligned (matched with a special token [EMPTY]). Our explanation generation model attends to these factual tuples, and the heat map shows that our model gives the most attention weights (with an average of", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/43654033e8b40a301fb9227d2167129a2029e33c1048e4832f7411e2c0a1b05b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Input Premise : Some dogs are running on a deserted beach.\nHypothesis : There is only one dog at the beach.
Label Contradiction (not used during our explanation generation)
EPR's Reasoning Output
Premise phraseHypothesis phraseEPR labelAttention score
on a deserted beachat the beachE23.16
Some dogsonly one dogC61.22
running[EMPTY]E15.62
Output explanation Some dogs is more than one dog.
Reference explanations:\n(1) Some is more than one, therefore there can't be only one dog.\n(2) Some indicates more than one dog. One dog is not some dogs.\n(3) Some dogs are not one dog.
", + "bbox": [ + 305, + 87, + 694, + 258 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/7aad61fa82aba6c0d7d230334d58e9d5a183d8bb903f0c5c9b91377fcfef4dd7.jpg", + "image_caption": [ + "Figure 8: Case study of the textual explanation generation. The heat map shows the step-by-step and average attention weights to the factual tuples (vertical axis)." + ], + "image_footnote": [], + "bbox": [ + 179, + 270, + 733, + 398 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "0.61) to the tuple, Some dogs contradicting only one dog, to generate the explanation \"Some dogs is more than one dog.\" This example illustrates that the factual tuples given by our EPR model provide meaningful information and can improve textual explanation generation.", + "bbox": [ + 169, + 465, + 823, + 510 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D LIMITATION AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 171, + 529, + 486, + 545 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This paper performs phrase detection and alignment by heuristics. They work well empirically in our experiments, although further improvement is possible (for example, by considering syntactic structures). However, our main focus is neural fuzzy logic for weakly supervised reasoning. This largely differs from previous work based on manually designed lexicons and rules (Hu et al., 2020; Chen et al., 2021).", + "bbox": [ + 169, + 560, + 823, + 631 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Our long-term goal is to develop a weakly supervised, end-to-end trained neuro-symbolic system that can extract semantic units and perform reasoning for a given downstream NLP task. This paper is an important milestone toward the long-term goal.", + "bbox": [ + 169, + 637, + 823, + 681 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E ETHICAL STATEMENTS", + "text_level": 1, + "bbox": [ + 171, + 700, + 398, + 715 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Our work involves human annotation of the phrasal logical relationships. Since the research subject here is logic (rather than humans), there are minimal ethical concerns. We nevertheless followed a standard protocol of human evaluation (involving identity protection, and proper compensation), approved by our institutional ethics board.", + "bbox": [ + 169, + 731, + 826, + 789 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 809, + 357, + 823 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We thank all reviewers and chairs for their valuable comments. The research is supported in part by the Natural Sciences and Engineering Research Council of Canada (NSERC) under Grant No. RGPIN2020-04465, the Amii Fellow Program, the Canada CIFAR AI Chair Program, a UAHJIC project, a donation from DeepMind, and the Digital Research Alliance of Canada (alliancecan.ca). Atharva Naik contributed to the research as an intern at the University of Alberta through the Mitacs Globalink program.", + "bbox": [ + 169, + 839, + 826, + 924 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_model.json b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7ea5d2f9dfdb2f32fed2186f848e886363165260 --- /dev/null +++ b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_model.json @@ -0,0 +1,3329 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.747, + 0.147 + ], + "angle": 0, + "content": "WEAKLY SUPERVISED EXPLAINABLE PHRASAL REASONING WITH NEURAL FUZZY LOGIC" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.169, + 0.84, + 0.185 + ], + "angle": 0, + "content": "Zijun \\(\\mathbf{W}\\mathbf{u}^{*1}\\) , Zi Xuan Zhang\\*, Atharva Naik+2, Zhijian Mei', Mauajama Firdaus', Lili Mou" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.185, + 0.827, + 0.2 + ], + "angle": 0, + "content": "\\(^{1}\\)Dept. Computing Science & Alberta Machine Intelligence Institute (Amii), University of Alberta" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.2, + 0.375, + 0.215 + ], + "angle": 0, + "content": "2Carnegie Mellon University" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.215, + 0.657, + 0.229 + ], + "angle": 0, + "content": "{zijun4, zixuan7, zimei1}@ualberta.ca, arnaik@cs.cmu.edu," + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.229, + 0.505, + 0.242 + ], + "angle": 0, + "content": "{mauzama.03, doublepower.mou}@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.242, + 0.637, + 0.257 + ], + "angle": 0, + "content": "*Equal contribution, †Work done during the internship at UofA/Amii" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.294, + 0.548, + 0.309 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.324, + 0.77, + 0.492 + ], + "angle": 0, + "content": "Natural language inference (NLI) aims to determine the logical relationship between two sentences, such as Entailment, Contradiction, and Neutral. In recent years, deep learning models have become a prevailing approach to NLI, but they lack interpretability and explainability. In this work, we address the explainability of NLI by weakly supervised logical reasoning, and propose an Explainable Phrasal Reasoning (EPR) approach. Our model first detects phrases as the semantic unit and aligns corresponding phrases in the two sentences. Then, the model predicts the NLI label for the aligned phrases, and induces the sentence label by fuzzy logic formulas. Our EPR is almost everywhere differentiable and thus the system can be trained end to end. In this way, we are able to provide explicit explanations of phrasal logical relationships in a weakly supervised manner. We further show that such reasoning results help textual explanation generation.1" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.515, + 0.339, + 0.531 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.546, + 0.828, + 0.645 + ], + "angle": 0, + "content": "Natural language inference (NLI) aims to determine the logical relationship between two sentences (called a premise and a hypothesis), and target labels include Entailment, Contradiction, and Neutral (Bowman et al., 2015; MacCartney & Manning, 2008). Figure 1 gives an example, where the hypothesis contradicts the premise. NLI is important to natural language processing, because it involves logical reasoning and is a key problem in artificial intelligence. Previous work shows that NLI can be used in various downstream tasks, such as information retrieval (Karpukhin et al., 2020) and text summarization (Liu & Lapata, 2019)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.65, + 0.828, + 0.735 + ], + "angle": 0, + "content": "In recent years, deep learning has become a prevailing approach to NLI (Bowman et al., 2015; Mou et al., 2016; Wang & Jiang, 2016; Yoon et al., 2018). Especially, pretrained language models with the Transformer architecture (Vaswani et al., 2017) achieve state-of-the-art performance for the NLI task (Radford et al., 2018; Zhang et al., 2020). However, such deep learning models are black-box machinery and lack interpretability. In real applications, it is important to understand how these models make decisions (Rudin, 2019)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.741, + 0.829, + 0.868 + ], + "angle": 0, + "content": "Several studies have addressed the explainability of NLI models. Camburu et al. (2018) generate a textual explanation by sequence-to-sequence supervised learning, in addition to NLI classification; such an approach is multi-task learning of text classification and generation, which does not perform reasoning itself. MacCartney et al. (2008) propose a scoring model to align related phrases; Parikh et al. (2016) and Jiang et al. (2021) propose to obtain alignment by attention mechanisms. However, they only provide correlation information, instead of logical reasoning. Other work incorporates upward and downward monotonicity entailment reasoning for NLI (Hu et al., 2020; Chen et al., 2021), but these approaches are based on hand-crafted rules (e.g., every downward entailing some) and are restricted to Entailment only; they cannot handle Contradiction or Neutral." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.873, + 0.828, + 0.903 + ], + "angle": 0, + "content": "In this work, we address the explainability for NLI by weakly supervised phrasal logical reasoning. Our goal is to explain NLI predictions with phrasal logical relationships between the premise and" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.637, + 0.925 + ], + "angle": 0, + "content": "\\(^{1}\\)Code and resources available at https://github.com/MANGA-UOFA/EPR" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "hypothesis. Intuitively, an NLI system with an explainable reasoning mechanism should be equipped with the following functionalities:" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.14, + 0.826, + 0.168 + ], + "angle": 0, + "content": "1. The system should be able to detect corresponding phrases and tell their logical relationship, e.g., several men contradicting one man, but pull in a fishing net entailing holding the net (Figure 1)." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.168, + 0.825, + 0.196 + ], + "angle": 0, + "content": "2. The system should be able to induce sentence labels from phrasal reasoning. In the example, the two sentences are contradictory because there exists one contradictory phrase pair." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.196, + 0.826, + 0.238 + ], + "angle": 0, + "content": "3. More importantly, such reasoning should be trained in a weakly supervised manner, i.e., the phrase-level predictions are trained from sentence labels only. Otherwise, the reasoning mechanism degrades to multi-task learning, which requires massive fine-grained human annotations." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.14, + 0.826, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.486, + 0.396 + ], + "angle": 0, + "content": "To this end, we propose an Explainable Phrasal Reasoning (EPR) approach to the NLI task. Our model obtains phrases as semantic units, and aligns corresponding phrases by embedding similarity. Then, we predict the NLI labels (namely, Entailment, Contradiction, and Neutral) for the aligned phrases. Finally, we propose to induce the sentence-level label from phrasal labels in a fuzzy logic manner (Zadeh, 1988; 1996). Our model is differentiable, and the phrasal reasoning component can be trained" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.247, + 0.824, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.355, + 0.825, + 0.384 + ], + "angle": 0, + "content": "Figure 1: The natural language inference (NLI) task and desired phrasal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.825, + 0.425 + ], + "angle": 0, + "content": "with the weak supervision of sentence NLI labels. In this way, our EPR approach satisfies all the desired properties mentioned above." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.826, + 0.489 + ], + "angle": 0, + "content": "In our experiments, we developed a comprehensive methodology (data annotation and evaluation metrics) to quantitatively evaluate phrasal reasoning performance, which has not been accomplished in previous work. We extend previous studies and obtain plausible baseline models. Results show that our EPR yields a much more meaningful explanation regarding \\( F \\) scores against human annotation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.825, + 0.537 + ], + "angle": 0, + "content": "To further demonstrate the quality of extracted phrasal relationships, we feed them to a textual explanation model. Results show that our EPR reasoning leads to an improvement of 2 points in BLEU scores, achieving a new state of the art on the e-SNLI dataset (Camburu et al., 2018)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.543, + 0.475, + 0.558 + ], + "angle": 0, + "content": "Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.565, + 0.825, + 0.592 + ], + "angle": 0, + "content": "1. We formulate a phrasal reasoning task for natural language inference (NLI), addressing the interpretability of neural models." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.593, + 0.825, + 0.621 + ], + "angle": 0, + "content": "2. We propose an EPR model that induces sentence-level NLI labels from explicit phrasal logical labels by neural fuzzy logic. EPR is able to perform reasoning in a weakly supervised way." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.621, + 0.826, + 0.661 + ], + "angle": 0, + "content": "3. We annotated phrasal logical labels and designed a set of metrics to evaluate phrasal reasoning. We further use our reasoning results to improve textual explanation generation. Our code and annotated data are released for future studies." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.565, + 0.826, + 0.661 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.669, + 0.825, + 0.698 + ], + "angle": 0, + "content": "To the best of our knowledge, we are the first to develop a weakly supervised phrasal reasoning model for the NLI task." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.727, + 0.347, + 0.743 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.826, + 0.876 + ], + "angle": 0, + "content": "Natural Language Inference. MacCartney & Manning (2009) propose seven natural logic relations in addition to Entailment, Contradiction, and Neutral. MacCartney & Manning (2007) also distinguish upward entailment (every mammal upward entailing some mammal) and downward entailment (every mammal downward entailing every dog) as different categories. Manually designed lexicons and rules are used to interpret Entailment in a finer-grained manner, such as downward and upward entailment (Hu et al., 2020; Chen et al., 2021). Feng et al. (2020) apply such natural logic to NLI reasoning at the word level; however, our experiments will show that their word-level treatment is not an appropriate granularity, and they fail to achieve meaningful reasoning performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.827, + 0.926 + ], + "angle": 0, + "content": "The above reasoning schema focuses more on the quantifiers of first-order logic (Beltagy et al., 2016). However, the SNLI dataset (Bowman et al., 2015) we use only contains less than \\(5\\%\\) samples with explicit quantifiers, and the seven-category schema complicates reasoning in the weakly supervised" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "setting. Instead, we adopt three-category NLI labels following the SNLI dataset. Our focus is entity-based reasoning, and the treatment of quantifiers is absorbed into phrases." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.829, + 0.21 + ], + "angle": 0, + "content": "We also notice that previous work lacks explicit evaluation on the reasoning performance for NLI. For example, the SNLI dataset only provides sentence-level labels. The HELP (Yanaka et al., 2019a) and MED (Yanaka et al., 2019b) datasets concern monotonicity inference problems, where the label is also at the sentence level; they only consider Entailment, ignoring Contradiction and Neutral. Thus, we propose a comprehensive framework for the evaluation of NLI reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.216, + 0.828, + 0.329 + ], + "angle": 0, + "content": "e-SNLI. Camburu et al. (2018) propose the e-SNLI task of textual explanation generation and use LSTM as a baseline. Kumar & Talukdar (2020) propose the NILE approach, using multiple decoders to generate explanations for all E, C, and N labels, and then predicting which to be selected. Zhao & Vydiswaran (2021) propose the LIREx approach, using additionally annotated rationales for explanation generation. Narang et al. (2020) finetune T5 with multiple explanation generation tasks. Although these systems can generate explanations, the nature of such finetuning approaches renders the explanation generator per se unexplainable. By contrast, we design a textual explanation generation model that utilizes our EPR's phrasal reasoning, obtained in a weakly supervised manner." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.828, + 0.392 + ], + "angle": 0, + "content": "Neuro-Symbolic Approaches. In recent years, neuro-symbolic approaches have attracted increasing interest in the AI and NLP communities for interpreting deep learning models. Typically, these approaches are trained by reinforcement learning or its relaxation, such as attention and Gumbel-softmax (Jang et al., 2017), to reason about certain latent structures in a downstream task." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.398, + 0.826, + 0.51 + ], + "angle": 0, + "content": "For example, Lei et al. (2016) and Liu et al. (2018) extract key phrases or sentences for a text classification task. Lu et al. (2018) extract entities and relations for document understanding. Liang et al. (2017) and Mou et al. (2017) perform SQL-like execution based on input text for semantic parsing. Xiong et al. (2017) hop over a knowledge graph for reasoning the relationships between entities. Li et al. (2019) and Deshmukh et al. (2021) model symbolic actions for unsupervised syntactic structure induction. In the vision domain, Mao et al. (2019) propose a neuro-symbolic approach to learn visual concepts. Our work addresses logical reasoning for the NLI task, which is not tackled in previous neuro-symbolic studies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.516, + 0.826, + 0.698 + ], + "angle": 0, + "content": "Fuzzy Logic. Fuzzy logic (Zadeh, 1988; 1996) models an assertion and performs logic calculation with probability. For example, a quantifier (e.g., \"most\") and assertion (e.g., \"ill\") are modeled by a score in \\((0,1)\\); the score of a conjunction \\(s(x_{1} \\wedge x_{2})\\) is the product of \\(s(x_{1})\\) and \\(s(x_{2})\\). In old-school fuzzy logic studies, the mapping from language to the score is usually given by human-defined heuristics (Zadeh, 1988; Nozaki et al., 1997), and may not be suited to the task of interest. By contrast, we train neural networks to predict the probability of phrasal logical relations, and induce the sentence NLI label by fuzzy logic formulas. Thus, our approach takes advantage of both worlds of symbolism and connectionism. Mahabadi et al. (2020) apply fuzzy logic formulas to replace multi-layer perceptrons for NLI. But they are unable to provide expressive reasoning because their fuzzy logic works on sentence features. Our work is inspired by Mahabadi et al. (2020). However, we propose to apply fuzzy logic to the detected and aligned phrases, enabling our approach to provide reasoning in a symbolic (i.e., expressive) way. We develop our own fuzzy logic formulas, which are also different from Mahabadi et al. (2020)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.72, + 0.388, + 0.736 + ], + "angle": 0, + "content": "3 OUR EPR APPROACH" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.783 + ], + "angle": 0, + "content": "In this section, we describe our EPR approach in detail, also shown in Figure 2. It has three main components: phrase detection and alignment, phrasal NLI prediction, and sentence label induction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.826, + 0.86 + ], + "angle": 0, + "content": "Phrase Detection and Alignment. In NLI, a data point consists of two sentences, a premise and a hypothesis. We first extract content phrases from both input sentences by rules and heuristics. For example, \\(\\left[\\mathrm{AUX}\\right] + \\left[\\mathrm{NOT}\\right] + \\mathrm{VERB} + \\left[\\mathrm{RP}\\right]\\) is treated as a verb phrase. Full details are presented in Appendix A.1. Compared with the word level (Parikh et al., 2016; Feng et al., 2020), a phrase is a more meaningful semantic unit for logical reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.866, + 0.826, + 0.927 + ], + "angle": 0, + "content": "We then align corresponding phrases in the two sentences based on cosine similarity. Let \\(\\mathrm{P} = (\\mathrm{p}_1,\\dots ,\\mathrm{p}_M)\\) and \\(\\mathrm{H} = (\\mathrm{h}_1,\\dots ,\\mathrm{h}_N)\\) be the premise and hypothesis, respectively, where \\(\\mathrm{p}_m\\) and \\(\\mathrm{h}_n\\) are extracted phrases. We apply Sentence-BERT (Reimers & Gurevych, 2019) to each individual phrase and obtain the local phrase embeddings by \\(\\pmb {p}_m^{(L)} = \\mathrm{SBERT}(\\mathrm{p}_m),\\pmb {h}_n^{(L)} = \\mathrm{SBERT}(\\mathrm{h}_n)\\). We" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.086, + 0.825, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.249, + 0.256, + 0.747, + 0.272 + ], + "angle": 0, + "content": "Figure 2: An overview of our Explainable Phrasal Reasoning (EPR) model." + }, + { + "type": "table_caption", + "bbox": [ + 0.2, + 0.284, + 0.796, + 0.3 + ], + "angle": 0, + "content": "Table 1: An example showing the importance of handling unaligned phrases (in highlight)." + }, + { + "type": "table", + "bbox": [ + 0.194, + 0.303, + 0.809, + 0.347 + ], + "angle": 0, + "content": "
Premise\nHypothesisPeople are shopping for fruit.\nPeople are shopping for fruit in the market.People are shopping for fruit in the market.\nPeople are shopping for fruit.
Sentence NLI[ ] Entailment [ ] Contradiction [√] Neutral[√] Entailment [ ] Contradiction [ ] Neutral
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.361, + 0.825, + 0.407 + ], + "angle": 0, + "content": "also apply Sentence-BERT to the entire premise and hypothesis sentences to obtain the global phrase embeddings \\( \\pmb{p}_m^{(G)} \\) and \\( \\pmb{h}_n^{(G)} \\) by mean-pooling the features of the words in the phrase. The phrase similarity is given by" + }, + { + "type": "equation", + "bbox": [ + 0.296, + 0.411, + 0.825, + 0.43 + ], + "angle": 0, + "content": "\\[\n\\sin \\left(\\mathrm {p} _ {m}, \\mathrm {h} _ {n}\\right) = \\gamma \\cos \\left(\\boldsymbol {p} _ {m} ^ {(G)}, \\boldsymbol {h} _ {n} ^ {(G)}\\right) + (1 - \\gamma) \\cos \\left(\\boldsymbol {p} _ {m} ^ {(L)}, \\boldsymbol {h} _ {n} ^ {(L)}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.827, + 0.476 + ], + "angle": 0, + "content": "where \\(\\gamma\\) is a hyperparameter balancing the lexical and contextual representations of a phrase (Hewitt & Manning, 2019). It is noted that Sentence-BERT is finetuned on paraphrase datasets, and thus is more suitable for phrasal similarity matching than pretrained language models (Devlin et al., 2019)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.825, + 0.511 + ], + "angle": 0, + "content": "We obtain phrase alignment between the premise and hypothesis in a heuristic way. For every phrase \\( \\mathrm{p}_m \\) in the premise, we look for the most similar phrase \\( \\mathrm{h}_n \\) from the hypothesis by" + }, + { + "type": "equation", + "bbox": [ + 0.399, + 0.515, + 0.825, + 0.531 + ], + "angle": 0, + "content": "\\[\nn = \\operatorname {a r g m a x} _ {n ^ {\\prime}} \\sin \\left(\\boldsymbol {p} _ {m}, \\boldsymbol {h} _ {n ^ {\\prime}}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.535, + 0.826, + 0.618 + ], + "angle": 0, + "content": "Likewise, for every phrase \\( \\mathrm{h}_n \\) in the hypothesis, we look for the most similar phrase \\( \\mathrm{p}_m \\) from the premise. A phrase pair \\( (\\mathrm{p}_m, \\mathrm{h}_n) \\) is considered to be aligned if \\( \\mathrm{h}_n \\) is selected as the closest phrase to \\( \\mathrm{p}_m \\), and \\( \\mathrm{p}_m \\) is the closest to \\( \\mathrm{h}_n \\). Such hard alignment differs from commonly used soft attention-based approaches (Parikh et al., 2016). Our alignment method can ensure the quality of phrase alignment, and more importantly, leave other phrases unaligned (e.g., helping each other in Figure 1), which are common in the NLI task. The process is illustrated in Figure 2a." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.625, + 0.827, + 0.696 + ], + "angle": 0, + "content": "Phrasal NLI Prediction. Our model then predicts the logical relationship of an aligned phrase pair \\((p, h)\\) among three target labels: Entailment, Contradiction, and Neutral. While previous work (Feng et al., 2020) identifies finer-grained labels for NLI, we do not follow their categorization, because it complicates the reasoning process and makes weakly supervised training more difficult. Instead, we adopt a three-way phrasal classification, which is consistent with sentence NLI labels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.702, + 0.825, + 0.789 + ], + "angle": 0, + "content": "We represent a phrase, say, \\( p \\) in the premise, by a vector embedding, and we consider two types of features: a local feature \\( \\pmb{p}^{(L)} \\) and a global feature \\( \\pmb{p}^{(G)} \\), re-used from the phrase alignment component. They are concatenated as the phrase representation \\( \\pmb{p} = [p^{(L)}; p^{(G)}] \\). Likewise, the phrase representation for a hypothesis phrase \\( h \\) is obtained in a similar way. Intuitively, local features force the model to perform reasoning in a serious manner, but global features are important to sentence-level prediction. Such intuition is also verified in an ablation study (§ 4.2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.795, + 0.825, + 0.838 + ], + "angle": 0, + "content": "Then, we use a neural network to predict the phrasal NLI label (Entailment, Contradiction, and Neutral). This is given by the standard heuristic matching (Mou et al., 2016) based on phrase embeddings, followed by a multi-layer perceptron (MLP) and a three-way softmax layer:" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.841, + 0.825, + 0.858 + ], + "angle": 0, + "content": "\\[\n\\left[ P _ {\\text {p h r a s e}} (\\mathsf {E} | \\mathrm {p}, \\mathrm {h}); P _ {\\text {p h r a s e}} (\\mathsf {C} | \\mathrm {p}, \\mathrm {h}); P _ {\\text {p h r a s e}} (\\mathsf {N} | \\mathrm {p}, \\mathrm {h}) \\right] = \\operatorname {s o f t m a x} (\\operatorname {M L P} \\left(\\left[ \\boldsymbol {p}; \\boldsymbol {h}; \\left| \\boldsymbol {p} - \\boldsymbol {h} \\right|; \\boldsymbol {p} \\circ \\boldsymbol {h} \\right]\\right)) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.861, + 0.827, + 0.89 + ], + "angle": 0, + "content": "where \\( \\circ \\) is the element-wise product, and the semicolon refers to column vector concatenation. E, C, and N refer to the Entailment, Contradiction, and Neutral labels, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "It should be mentioned that a phrase may be unaligned, but plays an important role in sentence-level NLI prediction, as shown in Table 1. Thus, we would like to predict phrasal NLI labels for unaligned" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "phrases as well, but pair them with a special token \\((\\mathrm{p}_{\\langle \\mathrm{EMPTY}\\rangle}\\) or \\(\\mathrm{h}_{\\langle \\mathrm{EMPTY}\\rangle})\\), whose embedding is randomly initialized and learned by back-propagation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.825, + 0.169 + ], + "angle": 0, + "content": "Sentence Label Induction. We observe the sentence NLI label can be logically induced from phrasal NLI labels. Based on the definition of the NLI task, we develop the following induction rules." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.174, + 0.826, + 0.232 + ], + "angle": 0, + "content": "Entailment Rule: According to Bowman et al. (2015), a premise entailing a hypothesis means that, if the premise is true, then the hypothesis must be true. We find that this can be oftentimes transformed into phrasal relationships: a premise entails the hypothesis if all paired phrases have the label Entailment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.236, + 0.827, + 0.285 + ], + "angle": 0, + "content": "Let \\(\\{(\\mathrm{p}_k,\\mathrm{h}_k)\\}_{k = 1}^K\\bigcup \\{(\\mathrm{p}_k,\\mathrm{h}_k)\\}_{k = K + 1}^{K'}\\) be all phrase pairs. For \\(k = 1,\\dots ,K\\), they are aligned phrases; for \\(k = K + 1,\\dots ,K'\\), they are unaligned phrases paired with the special token, i.e., \\(\\mathrm{p}_k = \\mathrm{p}_{\\langle \\mathrm{EMPTY}\\rangle}\\) or \\(\\mathrm{h}_k = \\mathrm{h}_{\\langle \\mathrm{EMPTY}\\rangle}\\). Then, we induce a sentence-level Entailment score by" + }, + { + "type": "equation", + "bbox": [ + 0.335, + 0.289, + 0.825, + 0.317 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {s e n t e n c e}} (\\mathsf {E} | \\mathrm {P}, \\mathrm {H}) = \\left[ \\prod_ {k = 1} ^ {K ^ {\\prime}} P _ {\\text {p h r a s e}} (\\mathsf {E} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\right] ^ {\\frac {1}{K ^ {\\prime}}} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.32, + 0.825, + 0.432 + ], + "angle": 0, + "content": "This works in a fuzzy logic fashion (Zadeh, 1988; 1996), deciding whether the sentence-level label should be Entailment considering the average of phrasal predictions. Here, we use the geometric mean, because it is biased towards low scores, i.e., if there exists one phrase pair with a low Entailment score, then the chance of sentence label being Entailment is also low. Unaligned pairs should be considered in Eq. (4), because an unaligned phrase may indicate Entailment, shown in the second example of Table 1. Notice that the resulting value \\( S_{\\text{sentence}}(\\mathsf{E}|\\mathsf{P}, \\mathsf{H}) \\) is not normalized with respect to Contradiction and Neutral; thus, we call it a score (instead of probability), which will be normalized afterwards." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.439, + 0.825, + 0.469 + ], + "angle": 0, + "content": "Contradiction Rule: Two sentences are contradictory if there exists (at least) one paired phrase labeled as Contradiction. The fuzzy logic version of this induction rule is given by" + }, + { + "type": "equation", + "bbox": [ + 0.332, + 0.473, + 0.825, + 0.489 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {s e n t e n c e}} (\\mathbb {C} | \\mathrm {P}, \\mathrm {H}) = \\max _ {k = 1, \\dots , K} P _ {\\text {p h r a s e}} (\\mathbb {C} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.494, + 0.826, + 0.551 + ], + "angle": 0, + "content": "Here, the max operator is used in the induction, because the contradiction rule is an existential statement, i.e., there exist(s) \\( \\cdots \\). Also, unaligned phrases are excluded in calculating the sentence-level Contradiction score, because an unaligned phrase indicates the corresponding information is missing in the other sentence and it cannot be Contradiction (recall examples in Table 1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.556, + 0.825, + 0.586 + ], + "angle": 0, + "content": "Rule for Neutral: Two sentences are neutral if there exists (at least) one neutral phrase pair, but there does not exist any contradictory phrase pair. The fuzzy logic formula is" + }, + { + "type": "equation", + "bbox": [ + 0.245, + 0.589, + 0.825, + 0.608 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {s e n t e n c e}} (\\mathrm {N} | \\mathrm {P}, \\mathrm {H}) = \\left[ \\max _ {k = 1, \\dots , K ^ {\\prime}} P _ {\\text {p h r a s e}} (\\mathrm {N} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\right] \\cdot \\left[ 1 - S _ {\\text {s e n t e n c e}} (\\mathrm {C} | \\mathrm {P}, \\mathrm {H}) \\right] \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.611, + 0.826, + 0.655 + ], + "angle": 0, + "content": "The first factor determines whether there exists a Neutral phrase pair (including unaligned phrases, illustrated in the first example in Table 1). The second factor evaluates the negation of \"at least one contradictory phrase,\" as suggested in the second clause of the Rule for Neutral." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.66, + 0.825, + 0.69 + ], + "angle": 0, + "content": "Finally, we normalize the scores into probabilities by dividing the sum, since all the scores are already positive. This is given by" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.694, + 0.825, + 0.711 + ], + "angle": 0, + "content": "\\[\nP _ {\\text {s e n t e n c e}} (\\mathrm {L} | \\cdot) = \\frac {1}{Z} S _ {\\text {s e n t e n c e}} (\\mathrm {L} | \\cdot) \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.714, + 0.82, + 0.731 + ], + "angle": 0, + "content": "where \\(\\mathsf{L}\\in \\{\\mathsf{E},\\mathsf{C},\\mathsf{N}\\}\\), and \\(Z = S_{\\text{sentence}}(\\mathsf{E}|\\cdot) + S_{\\text{sentence}}(\\mathsf{C}|\\cdot) + S_{\\text{sentence}}(\\mathsf{N}|\\cdot)\\) is the normalizing factor." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.825, + 0.766 + ], + "angle": 0, + "content": "Training and Inference. We use cross-entropy loss to train our EPR model by minimizing \\(-\\log P_{\\text{sentence}}(\\mathsf{t}|\\cdot)\\), where \\(\\mathsf{t} \\in \\{\\mathsf{E}, \\mathsf{C}, \\mathsf{N}\\}\\) is the groundtruth sentence-level label." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.771, + 0.826, + 0.843 + ], + "angle": 0, + "content": "Our underlying logical reasoning component can be trained end-to-end by back-propagation in a weakly supervised manner, because the fuzzy logic rules are almost everywhere differentiable. Although the max operators in Eqs. (5) and (6) may not be differentiable at certain points, they are common in max-margin learning and the rectified linear unit (ReLU) activation functions, and do not cause trouble in back-propagation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.848, + 0.825, + 0.878 + ], + "angle": 0, + "content": "Once our EPR model is trained, we can obtain both phrasal and sentence-level labels. This is accomplished by performing argmax on the predicted probabilities (3) and (7), respectively." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.826, + 0.926 + ], + "angle": 0, + "content": "In traditional fuzzy logic, the conjunction is given by probability product (Zadeh, 1988). We find that this gives a too small Entailment score compared with Contradiction and Neutral scores, causing difficulties in end-to-end training. Thus, we take the geometric mean and maintain all the scores in the same magnitude." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.148 + ], + "angle": 0, + "content": "Improving Textual Explanation. Camburu et al. (2018) annotated a dataset to address NLI interpretability by generating an explanation sentence. For the example in Figure 1, the reference explanation is \"There cannot be one man and several men at same time.\"" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.153, + 0.828, + 0.21 + ], + "angle": 0, + "content": "In this part, we apply the predicted phrasal logical relationships to textual explanation generation and examine whether our EPR's output can help a downstream task. Figure 3 shows the overview of our textual explanation generator. We concatenate the premise and hypothesis in the form of “Premise : Hypothesis : …,” and feed it to a standard Transformer encoder (Vaswani et al., 2017)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.216, + 0.828, + 0.304 + ], + "angle": 0, + "content": "We utilize the phrase pairs and our predicted phrasal labels as factual knowledge to enhance the decoder. Specifically, our EPR model yields a set of tuples \\(\\{(\\mathrm{p}_k,\\mathrm{h}_k,\\mathrm{l}_k)\\}_{k = 1}^K\\) for a sample, where \\(\\mathbf{l}_k\\in \\{\\mathsf{E},\\mathsf{N},\\mathsf{C}\\}\\) is the predicted phrasal label for the aligned phrases, \\(\\mathrm{p}_k\\) and \\(\\mathrm{h}_k\\). We embed phrases by Sentence-BERT: \\(\\pmb{p}^{(L)}\\) and \\(\\pmb{h}^{(L)}\\); the phrasal label is represented by a one-hot vector \\(\\pmb{l}_k = \\mathrm{onehot}(\\mathrm{l}_k)\\). They are concatenated as a vector \\(\\pmb{m}_k = [\\pmb {p}_k;\\pmb {h}_k;\\pmb {l}_k]\\). We compose the vectors as a factual memory matrix \\(\\mathbf{M} = [m_1^\\top ;\\dots ;m_K^\\top ]\\in \\mathbb{R}^{K\\times d}\\), where \\(d\\) is the dimension of \\(\\pmb{m}_k\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.511, + 0.424 + ], + "angle": 0, + "content": "Our decoder follows a standard Transformer architecture (Vaswani et al., 2017), but is equipped with additional attention mechanisms to the factual memory. Consider the \\(i\\)th decoding step. We feed the factual memory to an MLP as \\(\\tilde{\\mathbf{M}} = \\mathrm{MLP}(\\mathbf{M})\\). We compute attention \\(\\pmb{a}\\) over \\(\\tilde{\\mathbf{M}}\\) with the embedding of the input \\(\\pmb{y}_{i-1}\\), and aggregate factual information \\(\\pmb{c}\\) for the rows \\(\\pmb{m}_t\\) in \\(\\mathbf{M}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.183, + 0.428, + 0.495, + 0.455 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {a} = \\operatorname {s o f t m a x} (\\tilde {\\mathbf {M}} \\boldsymbol {y} _ {i - 1}), \\quad \\boldsymbol {c} = \\sum_ {k = 1} ^ {K} a _ {k} \\tilde {\\boldsymbol {m}} _ {t} ^ {\\top}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.59, + 0.503 + ], + "angle": 0, + "content": "where \\(a_{k}\\) is the kth element of the vector \\(\\pmb{a}\\) and Figure 5. \\(\\hat{\\pmb{m}}_t\\) is the kth row of the matrix \\(\\tilde{\\mathbf{M}}\\). The factual information \\(\\pmb{c}\\) is fed to another layer \\(\\pmb{g}_i = \\mathrm{MLP}([c; y_{i-1}]) + c\\)." + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.315, + 0.825, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.45, + 0.825, + 0.48 + ], + "angle": 0, + "content": "Figure 3: Overview of the model for textual explanation generation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.508, + 0.827, + 0.607 + ], + "angle": 0, + "content": "Our Transformer decoder layer starts with self-attention \\(\\tilde{q}_i = \\mathrm{SelfAttn}(g_i)\\). Then, residual connection and layer normalization are applied as \\(q_{i} = \\mathrm{LayerNorm}(\\tilde{q}_{i} + g_{i})\\). A cross-attention mechanism obtains input information by \\(v_{i} = \\mathrm{CrossAttn}(q_{i},\\mathbf{H})\\), where \\(\\mathbf{H}\\) is the representation given by the encoder. \\(v_{i}\\) is fed to the Transformer's residual connection and layer normalization sub-layer. Multiple Transformer layers as mentioned above are stacked to form a deep architecture. The model is trained by standard cross-entropy loss against the reference explanation as in previous work (Kumar & Talukdar, 2020; Zhao & Vydiswaran, 2021; Narang et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.613, + 0.828, + 0.657 + ], + "angle": 0, + "content": "In this way, the model is enhanced with factual information given by our EPR weakly supervised reasoning. Experiments will show that it largely improves the BLEU score by 2 points (§ 4.2), being a new state of the art. This further verifies that our EPR indeed yields meaningful phrasal explanations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.675, + 0.329, + 0.691 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.706, + 0.488, + 0.721 + ], + "angle": 0, + "content": "4.1 DATASETS AND EVALUATION METRICS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.728, + 0.827, + 0.8 + ], + "angle": 0, + "content": "The main dataset we used in our experiments is the Stanford Natural Language Inference (SNLI) dataset (Bowman et al., 2015), which consists of 550K training samples, 10K validation samples, and another 10K test samples. Each data sample consists of two sentences (premise and hypothesis) and a sentence-level groundtruth label. For sentence-level NLI prediction, we still use accuracy to evaluate our approach, following previous work (Parikh et al., 2016; Chen et al., 2017; Radford et al., 2018)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.805, + 0.826, + 0.89 + ], + "angle": 0, + "content": "To evaluate the phrasal reasoning performance, we need additional human annotation and evaluation metrics, because most previous work only considers sentence-level performance (Feng et al., 2020) and has not performed quantitative phrasal reasoning evaluation. Although Camburu et al. (2018) annotated phrase highlights in their e-SNLI dataset, they are incomplete and do not provide logical relationships. Our annotators selected relevant phrases from two sentences and tagged them with phrasal NLI labels; they also selected and tagged unaligned phrases." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.897, + 0.825, + 0.925 + ], + "angle": 0, + "content": "3A groundtruth label is for a data point, which consists of two sentences. We call it a sentence-level label instead of phrasal labels." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.081, + 0.825, + 0.123 + ], + "angle": 0, + "content": "Table 2: Main results on the SNLI dataset. †Quoted from respective papers. ‡Obtained from the checkpoint sent by the authors. Other results are obtained by our experiments. GM and AM are the geometric and arithmetic means of the \\( F \\) scores." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.125, + 0.825, + 0.304 + ], + "angle": 0, + "content": "
ModelSent AccReasoning Performance
FEFCFNFUPFUHGMAM
Human-84.7171.0155.1282.4661.8070.0771.02
Non-reasoning
Mahabadi et al. (2020)†85.1-------
LSTM (Wang & Jiang, 2016)†86.1-------
Transformer (Radford et al., 2018)89.9-------
SBERT (Reimers & Gurevych, 2019)91.4-------
Baselines
NNL (Feng et al., 2020)‡79.9162.7217.491.5066.220.000.0029.59
STP85.7662.4034.7637.0476.6151.8050.2052.52
GPT-3-Davinci (Brown et al., 2020)-53.7558.0016.1252.2431.0838.2342.24
Ours
EPR (Local, LM unfinetuned)76.33±0.4883.11±0.2938.73±0.8544.63±0.8876.6151.8056.39±0.4358.98±0.34
EPR (Local, LM finetuned)79.36±0.1382.44±0.2644.10±1.3244.69±3.2276.6151.8057.77±0.8559.93±0.67
EPR (Concat, LM unfinetuned)84.53±0.1973.29±0.6837.95±1.1640.56±1.1076.6151.8053.73±0.3956.04±0.33
EPR (Concat, LM finetuned)87.56±0.1569.91±1.2139.97±2.1243.31±2.7876.6151.8054.46±1.3556.32±1.13
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.324, + 0.827, + 0.368 + ], + "angle": 0, + "content": "We further propose a set of \\(F\\)-scores, which are a balanced measure of precision and recall between human annotation and model output for Entailment, Contradiction, Neutral, and Unaligned in terms of word indexes. Details of human annotation and evaluation metrics are shown in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.457 + ], + "angle": 0, + "content": "The inter-annotator agreement is presented in Table 2 in comparison with model performance (detailed in the next part). Here, we compute the agreement by treating one annotator as the ground truth and another as the system output; the score is averaged among all annotator pairs. As seen, humans generally achieve high agreement with each other, whereas model performance is relatively low. This shows that our task and metrics are well-defined, yet phrasal logical reasoning is a challenging task for machine learning models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.463, + 0.827, + 0.616 + ], + "angle": 0, + "content": "Textual explanation generation was evaluated on the e-SNLI dataset (Camburu et al., 2018), which extends the SNLI dataset with one reference explanation for each training sample, and three reference explanations for each validation or test sample. Each reference explanation comes with highlighted rationales, a set of annotated words in the premise or hypothesis considered as the reason for the explanation annotation. We do not use these highlighted rationales, but enhance the neural model with EPR output for textual explanation generation. We follow previous work (Camburu et al., 2018; Narang et al., 2020), adopting BLEU (Papineni et al., 2002) and SacreBLEU (Post, 2018) scores as the evaluation metrics; they mainly differ in the tokenizer. Camburu et al. (2018) also report low consistency of the third annotated reference, and thus use only two references for evaluation. In our study, we consider both two-reference and three-reference BLEU/SacreBLEU. Appendix A.2 provides additional implementation details of textual explanation generation." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.639, + 0.279, + 0.652 + ], + "angle": 0, + "content": "4.2 RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.827, + 0.793 + ], + "angle": 0, + "content": "Phrasal Reasoning Performance. To the best of our knowledge, phrasal reasoning for NLI was not explicitly evaluated in previous literature. Therefore, we propose plausible extensions to previous studies as our baselines. We consider the study of Neural Natural Logic (NNL, Feng et al., 2020) as the first baseline. It applies an attention mechanism (Parikh et al., 2016), so that each word in the hypothesis is softly aligned with the words in the premise. Then, each word in the hypothesis is predicted with one of the seven natural logic relations proposed by MacCartney & Manning (2009). We consider the maximum attention score as the alignment, and map their seven natural logic relations to our three-category NLI labels: Equivalence, ForwardEntailment \\(\\mapsto\\) Entailment; Negation, Alternation \\(\\mapsto\\) Contradiction; and ReverseEntailment, Cover, Independence \\(\\mapsto\\) Neutral." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.8, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Table 2 shows that the word-level NNL approach cannot perform meaningful phrasal reasoning, although our metrics have already excluded explicit evaluation of phrases. The low performance is because their soft attention leads to many misalignments, whereas their seven-category logical relations are too fine-grained and cause complications in weakly supervised reasoning. In addition, NNL does not allow unaligned words in the hypothesis, showing that such a model is inadequate for NLI reasoning. By contrast, our EPR model extracts phrases of meaningful semantic units, being an appropriate granularity of logical reasoning. Moreover, we work with three-category NLI labels following the sentence-level NLI task formulation. This actually restricts the model's capacity, forcing the model to perform serious phrasal reasoning." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.35, + 0.08, + 0.645, + 0.093 + ], + "angle": 0, + "content": "Table 3: Results of ablation studies on SNLI." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.097, + 0.825, + 0.284 + ], + "angle": 0, + "content": "
ModelFeaturesSent AccReasoning Performance
FEFCFNFUPFUHGMAM
Full modelLocal76.33±0.4883.11±0.2938.73±0.8544.63±0.8876.6151.8056.39±0.4358.98±0.34
Global84.03±0.1270.84±0.6035.12±0.9036.37±1.5276.6151.8051.41±0.6254.15±0.41
Concat84.53±0.1973.29±0.6837.95±1.1640.56±1.1076.6151.8053.73±0.3956.04±0.33
Random chunkerLocal72.4463.2122.6532.0465.9436.1340.5343.99
Global82.8158.0930.6427.4965.9436.1341.0543.66
Concat83.0958.7532.4131.1465.9436.1342.6644.87
Semantic role labelingLocal71.1073.7929.3928.9970.1943.1145.2749.09
Global82.8160.1432.0730.4870.1943.1144.6747.20
Concat83.1161.6431.7628.3370.1943.1144.1547.01
Random alignmentLocal68.5259.3221.7926.2051.4316.5031.0235.05
Global81.9953.8535.1031.3951.4316.5034.7137.66
Concat82.4957.2234.8330.9151.4316.5034.9738.18
Mean inductionLocal79.6177.3837.1436.1376.6151.8052.8455.81
Global83.8255.0829.9224.7076.6151.8043.8247.62
Concat84.9657.1231.9331.4176.6151.8046.9249.77
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.296, + 0.825, + 0.395 + ], + "angle": 0, + "content": "In addition, we include another intuitive SBERT-based competing model for comparison. We first apply our own heuristics of phrase detection and alignment (thus, the model will have the same \\( F_{\\mathsf{UP}} \\) and \\( F_{\\mathsf{UH}} \\) scores); then, we directly train the phrasal NLI predictor by sentence-level labels. We obtain the sentence NLI prediction by taking argmax over Eq. (7). We call this STP (Sentence label Training Phrases). As seen, STP provides some meaningful phrasal reasoning results, because the training can smooth out the noise of phrasal labels, which are directly set as the sentence-level labels. But still, its performance is significantly lower than our EPR model." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.4, + 0.825, + 0.485 + ], + "angle": 0, + "content": "We experimented with a baseline of few-shot prompting with GPT-3 (Brown et al., 2020), and the implementation detail is shown in Appendix A.2. We see that GPT-3 is able to provide more or less meaningful reasoning, and surprisingly the contradiction \\( F \\)-score is higher than all competing methods. However, the overall mean \\( F \\) scores are much lower. The results show that phrasal reasoning is challenging for pretrained language models, highlighting the importance of our task formulation and the proposed EPR approach even in the prompting era." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.491, + 0.825, + 0.59 + ], + "angle": 0, + "content": "Among our EPR variants, we see that EPR with local phrase embeddings achieves the highest reasoning performance, and that EPR with concatenated features achieves a good balance between sentence-level accuracy and reasoning. Our EPR variants were run 5 times with different initialization, and standard deviations are also reported in Table 3. As seen, our improvement compared with the best baseline is around 9.1-10.7 times the standard deviation in mean \\( F \\) scores, which is a large margin. Suppose the \\( F \\) scores are Gaussian distributed,\\(^{4}\\) the improvement is also statistically significant (\\( p \\)-value \\( < 4.5\\mathrm{e} - 20 \\) comparing our worse variant with the best competing model by one-sided test)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.595, + 0.827, + 0.695 + ], + "angle": 0, + "content": "We further compare our EPR with non-reasoning models (Wang & Jiang, 2016; Radford et al., 2018), which are unable to provide phrasal explanations but may or may not achieve high sentence accuracy. The results show that our phrasal EPR model hurts the sentence-level accuracy by 2-4 points, when the model architecture is controlled. This resonates with traditional symbolic AI approaches (MacCartney & Manning, 2008), where interpretable models may not outperform black-box neural networks. Nevertheless, our sentence-level accuracy is still decent, outperforming a few classic neural models, including fuzzy logic applied to sentence embeddings (Mahabadi et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.7, + 0.827, + 0.84 + ], + "angle": 0, + "content": "Analysis. We consider several ablated models to verify the effect of every component in our EPR model. (1) Random chunker, which splits the sentence randomly based on the number of chunks detected by our system. (2) Random aligner, which randomly aligns phrases but keeps the number of aligned phrases unchanged. (3) Semantic role labeling, which uses the semantic roles, detected by AllenNLP (Gardner et al., 2018), as the reasoning unit. (4) Mean induction, which induces the sentence NLI label by the geometric mean of phrasal NLI prediction. In addition, we consider local phrase embedding features, global features, and their concatenation for the above model variants. Due to a large number of settings, each variant was run only once; we do not view this as a concern because Table 2 shows a low variance of our approach. Also, the underlying language model is un-finetuned in our ablation study, as it yields slightly lower performance but is much more efficient." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.846, + 0.825, + 0.889 + ], + "angle": 0, + "content": "As seen in Table 3, the random chunker and aligner yield poor phrasal reasoning performance, showing that working with meaningful semantic units and their alignments is important to logical reasoning. This also verifies that our word index-based metrics are able to evaluate phrase detection" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.825, + 0.925 + ], + "angle": 0, + "content": "4When the score has a low standard deviation, a Gaussian distribution is a reasonable assumption because the probability of exceeding the range of \\(F\\) scores is extremely low." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "and alignment in an implicit manner. We further applied semantic role labeling as our reasoning unit. We find its performance is higher than the random chunker but lower than our method. This is because semantic role labeling is verb-centric, and the extracted spans may be incomplete." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.153, + 0.827, + 0.253 + ], + "angle": 0, + "content": "Interestingly, local features yield higher reasoning performance, but global and concatenated features yield higher sentence accuracy. This is because global features provide aggregated information of the entire sentence and allow the model to bypass meaningful reasoning. In the variant of the mean induction, for example, the phrasal predictor can simply learn to predict the sentence-level label with global sentence information; then, the mean induction is an ensemble of multiple predictors. In this way, it achieves the highest sentence accuracy (0.43 points higher than our full model with concatenated features), but is 6 points lower in reasoning performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.828, + 0.343 + ], + "angle": 0, + "content": "This reminds us of the debate between old schools of AI (Chandrasekaran et al., 1988; Boucher & Dienes, 2003; Goel, 2022). Recent deep learning models take the connectionists' view, and generally outperform symbolists' approaches in terms of the ultimate prediction, but they lack expressible explanations. Combining neural and symbolic methods becomes a hot direction in recent AI research (Liang et al., 2017; Dong et al., 2018; Yi et al., 2018). In general, our EPR model with global features achieves high performance in both reasoning and ultimate prediction for the NLI task." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.349, + 0.427, + 0.543 + ], + "angle": 0, + "content": "Results of Textual Explanation Generation. In this part, we apply EPR's predicted output—phrasal logical relationships—as factual knowledge to textual explanation generation. Most previous studies use the groundtruth sentence-level NLI label and/or highlighted rationales. This requires human annotations, which are resource-consuming to obtain. By contrast, we require no extra human-annotated resources; our factual knowledge is based on our weakly supervised reasoning approach." + }, + { + "type": "text", + "bbox": [ + 0.434, + 0.349, + 0.828, + 0.422 + ], + "angle": 0, + "content": "Table 4: Textual explanation results on e-SNLI. Previous work uses auxiliary information (L: the groundtruth NLI label; H: human-annotated highlights), but we use neither. \\( {}^{ \\dagger } \\) Quoted from respective papers. \\( {}^{ \\ddagger } \\) Evaluated by checkpoints. \\( {}^{\\parallel } \\) Our replication with provided code." + }, + { + "type": "table", + "bbox": [ + 0.437, + 0.426, + 0.825, + 0.541 + ], + "angle": 0, + "content": "
ModelInfoBLEUSacreBLEU
LH2 refs3 refs2 refs3 refs
Camburu et al. (2018)†--27.58---
NILE (Kumar & Talukdar, 2020)∥-28.5737.7332.5141.78
NILE (Kumar & Talukdar, 2020)‡-28.6737.8432.7442.06
FinetunedWT5220M (Narang et al., 2020)†---32.40-
FinetunedWT511B (Narang et al., 2020)†---33.70-
LIREx (Zhao & Vydiswaran, 2021)∥17.2222.4021.2426.68
Finetune T560M--27.7536.7831.7440.89
+ Annotated Highlights64M27.9136.9032.2041.21
+ EPR Outputs64M (ours)--29.9138.3033.9642.63
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.826, + 0.634 + ], + "angle": 0, + "content": "Table 4 shows our explanation generation performance on e-SNLI. Since evaluation metrics are not consistently used for explanation generation in previous studies, we replicate the approaches when the code or checkpoint is available. For large pretrained models, we quote results from the previous paper (Narang et al., 2020). Their model is called WT5, having 220M or 11B parameters depending on the underlying T5 model. Profoundly, we achieve higher performance with 60M-parameter T5-small, which is \\(3.3\\mathrm{x}\\) and \\(170\\mathrm{x}\\) smaller in model size than the two WT5 variants." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.827, + 0.725 + ], + "angle": 0, + "content": "In addition, we conducted a controlled experiment using the rationale highlights annotated by Camburu et al. (2018) for e-SNLI. It achieves a relatively small increase of 0.2-0.5 BLEU points, whereas our EPR's outputs yield a 2-point improvement. The difference in the performance gains shows that our EPR's phrasal logical relationships provide more valuable information than human-annotated highlights. In general, we achieve a new state of the art on e-SNLI with a small language model, demonstrating the importance of phrasal reasoning in textual explanations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.825, + 0.773 + ], + "angle": 0, + "content": "Additional Results. We show additional results as appendices. § C.1: Reasoning performance on the MNLI dataset; § C.2: Error analysis; § C.3: Case studies of our EPR model; and § C.4: Case studies of textual explanation generation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.78, + 0.827, + 0.879 + ], + "angle": 0, + "content": "Conclusion. The paper proposes an explainable phrasal reasoning (EPR) model for NLI with neural fuzzy logic, trained in a weakly supervised manner. We further propose an experimental design, including data annotation, evaluation metrics, and plausible baselines. Results show that phrasal reasoning for NLI is a meaningfully defined task, as humans can achieve high agreement. Our EPR achieves decent sentence-level accuracy, but much higher reasoning performance than all competing models. We also achieve a new state-of-the-art performance on e-SNLI textual explanation generation by applying EPR's phrasal logical relationships." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.103, + 0.289, + 0.118 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.126, + 0.829, + 0.17 + ], + "angle": 0, + "content": "Islam Beltagy, Stephen Roller, Pengxiang Cheng, Katrin Erk, and Raymond J Mooney. Representing meaning with a combination of logical and distributional models. Computational Linguistics, pp. 763-808, 2016. URL https://aclanthology.org/J16-4007/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.178, + 0.829, + 0.222 + ], + "angle": 0, + "content": "Luke Boucher and Zoltán Dienes. Two ways of learning associations. Cognitive Science, 27(6):807-842, 2003. URL https://www.sciencedirect.com/science/article/pii/S0364021303000715." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.23, + 0.828, + 0.274 + ], + "angle": 0, + "content": "Samuel Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. A large annotated corpus for learning natural language inference. In EMNLP, pp. 632-642, 2015. URL https://aclanthology.org/D15-1075." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.282, + 0.829, + 0.395 + ], + "angle": 0, + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In NeurIPS, pp. 1877-1901, 2020. URL https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.403, + 0.829, + 0.46 + ], + "angle": 0, + "content": "Oana-Maria Camburu, Tim Rocttäschel, Thomas Lukasiewicz, and Phil Blunsom. eSNLI: Natural language inference with natural language explanations. In NeurIPS, pp. 9539-9549, 2018. URL https://proceedings.neurips.cc/paper/2018/bit/4c7a167bb329bd92580a99ce422d6fa6-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.468, + 0.829, + 0.512 + ], + "angle": 0, + "content": "Balakrishnan Chandrasekaran, Askhok Goel, and Dean Allemang. Connectionism and information processing abstractions. AI Magazine, 9(4):24-24, 1988. URL https://ojs.aaaai.org/index.php/imagazine/article/view/951." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.52, + 0.829, + 0.562 + ], + "angle": 0, + "content": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. Enhanced LSTM for natural language inference. In ACL, pp. 1657-1668, 2017. URL https://aclanthology.org/P17-1152/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.572, + 0.829, + 0.615 + ], + "angle": 0, + "content": "Zeming Chen, Qiyue Gao, and Lawrence S Moss. NeuralLog: Natural language inference with joint neural and logical reasoning. arXiv preprint arXiv:2105.14167, 2021. URL https://arxiv.org/abs/2105.14167." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.623, + 0.829, + 0.667 + ], + "angle": 0, + "content": "Anup Anand Deshmukh, Qianqiu Zhang, Ming Li, Jimmy Lin, and Lili Mou. Unsupervised chunking as syntactic structure induction with a knowledge-transfer approach. In Findings of EMNLP, pp. 3626-3634, 2021. URL https://aclanthology.org/2021.findings-emnlp.307." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.675, + 0.829, + 0.718 + ], + "angle": 0, + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In *NAACL-HLT*, pp. 4171–4186, 2019. URL https://aclanthology.org/N19-1423." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.727, + 0.829, + 0.77 + ], + "angle": 0, + "content": "Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah Smith. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. arXiv preprint arXiv:2002.06305, 2020. URL https://arxiv.org/abs/2002.06305." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.778, + 0.829, + 0.809 + ], + "angle": 0, + "content": "Honghua Dong, Jiayuan Mao, Tian Lin, Chong Wang, Lihong Li, and Denny Zhou. Neural logic machines. In ICLR, 2018. URL https://openreview.net/forum?id=B1xY-hRctX." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.816, + 0.829, + 0.859 + ], + "angle": 0, + "content": "Yufei Feng, Quan Liu, Michael Greenspan, Xiaodan Zhu, et al. Exploring end-to-end differentiable natural logic modeling. In COLING, pp. 1172-1185, 2020. URL https://aclanthology.org/2020.coling-main.101." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson F. Liu, Matthew Peters, Michael Schmitz, and Luke Zettlemoyer. AllenNLP: A deep semantic natural language processing platform. In Proc. Workshop for NLP Open Source Software (NLP-OSS), pp. 1-6, 2018. URL https://aclanthology.org/W18-2501." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.126, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.134 + ], + "angle": 0, + "content": "Ashok Goel. Looking back, looking ahead: Symbolic versus connectionist AI. AI Magazine, 42(4): 83-85, 2022. URL https://ojs.aaii.org/index.php/aimagazine/article/view/15111." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.139, + 0.827, + 0.17 + ], + "angle": 0, + "content": "John Hewitt and Christopher D Manning. A structural probe for finding syntax in word representations. In NAACL-HLT, pp. 4129-4138, 2019. URL https://aclanthology.org/N19-1419." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.176, + 0.828, + 0.234 + ], + "angle": 0, + "content": "Hai Hu, Qi Chen, Kyle Richardson, Atreyee Mukherjee, Lawrence S Moss, and Sandra Kübler. MonaLog: A lightweight system for natural language inference based on monotonicity. In Proc. Society for Computation in Linguistics, pp. 284-293, 2020. URL https://aclanthology.org/2020.scil-1.40/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.24, + 0.825, + 0.272 + ], + "angle": 0, + "content": "Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with Gumbel-softmax. In ICLR, 2017. URL https://openreview.net/forum?id=rkE3y85ee." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.278, + 0.827, + 0.322 + ], + "angle": 0, + "content": "Zhongtao Jiang, Yanzhe Zhang, Zhao Yang, Jun Zhao, and Kang Liu. Alignment rationale for natural language inference. In ACL-IJCNLP, pp. 5372-5387, 2021. URL https://aclanthology.org/2021.acl-long.417/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.328, + 0.827, + 0.373 + ], + "angle": 0, + "content": "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. Dense passage retrieval for open-domain question answering. In EMNLP, pp. 6769-6781, 2020. URL https://aclanthology.org/2020.emnlp-main.550/." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.379, + 0.827, + 0.41 + ], + "angle": 0, + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. URL https://arxiv.org/abs/1412.6980." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.416, + 0.828, + 0.459 + ], + "angle": 0, + "content": "Sawan Kumar and Partha Talukdar. NILE: Natural language inference with faithful natural language explanations. In ACL, pp. 8730-8742, 2020. URL https://aclanthology.org/2020.acl-main.771." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.466, + 0.827, + 0.497 + ], + "angle": 0, + "content": "Tao Lei, Regina Barzilay, and Tommi Jaakkola. Rationalizing neural predictions. In EMNLP, pp. 107-117, 2016. URL https://aclanthology.org/D16-1011/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.503, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Bowen Li, Lili Mou, and Frank Keller. An imitation learning approach to unsupervised parsing. In ACL, pp. 3485-3492, 2019. URL https://aclanthology.org/P19-1338." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.54, + 0.827, + 0.584 + ], + "angle": 0, + "content": "Chen Liang, Jonathan Berant, Quoc Le, Kenneth Forbus, and Ni Lao. Neural symbolic machines: Learning semantic parsers on Freebase with weak supervision. In ACL, pp. 23-33, 2017. URL https://aclanthology.org/P17-1003/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.591, + 0.828, + 0.635 + ], + "angle": 0, + "content": "Xianggen Liu, Lili Mou, Haotian Cui, Zhengdong Lu, and Sen Song. Jumper: Learning when to make classification decisions in reading. In *IJCAI*, pp. 4237-4243, 2018. URL https://www.ijcai.org/proceedings/2018/0589.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.641, + 0.828, + 0.672 + ], + "angle": 0, + "content": "Yang Liu and Mirella Lapata. Text summarization with pretrained encoders. In EMNLP-IJCNLP, pp. 3730-3740, 2019. URL https://aclanthology.org/D19-1387/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.679, + 0.828, + 0.723 + ], + "angle": 0, + "content": "Zhengdong Lu, Xianggen Liu, Haotian Cui, Yukun Yan, and Daqi Zheng. Object-oriented neural programming (OONP) for document understanding. In ACL, pp. 2717-2726, 2018. URL https://aclanthology.org/P18-1253." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.729, + 0.828, + 0.773 + ], + "angle": 0, + "content": "Bill MacCartney and Christopher D Manning. Natural logic for textual inference. In Proc. ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 193-200, 2007. URL https://aclanthology.org/W07-1431/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.78, + 0.828, + 0.823 + ], + "angle": 0, + "content": "Bill MacCartney and Christopher D. Manning. Modeling semantic containment and exclusion in natural language inference. In *COLING*, pp. 521-528, 2008. URL https://aclanthology.org/C08-1066." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.83, + 0.828, + 0.874 + ], + "angle": 0, + "content": "Bill MacCartney and Christopher D Manning. An extended model of natural logic. In Proc. International Conference on Computational Semantics, pp. 140-156, 2009. URL https://aclanthology.org/W09-3714." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.881, + 0.828, + 0.924 + ], + "angle": 0, + "content": "Bill MacCartney, Michel Galley, and Christopher D Manning. A phrase-based alignment model for natural language inference. In EMNLP, pp. 802-811, 2008. URL https://aclanthology.org/D08-1084." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.828, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.104, + 0.829, + 0.147 + ], + "angle": 0, + "content": "Rabeeh Karimi Mahabadi, Florian Mai, and James Henderson. Learning entailment-based sentence embeddings from natural language inference. Online Manuscript, 2020. URL https://openreview.net/forum?id=BkxackSKvH." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.827, + 0.201 + ], + "angle": 0, + "content": "Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B. Tenenbaum, and Jiajun Wu. The neurosymbolic concept learner: Interpreting scenes, words, and sentences from natural supervision. In ICLR, 2019. URL https://openreview.net/forum?id=rJgM1hRctm." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.211, + 0.827, + 0.255 + ], + "angle": 0, + "content": "Lili Mou, Rui Men, Ge Li, Yan Xu, Lu Zhang, Rui Yan, and Zhi Jin. Natural language inference by tree-based convolution and heuristic matching. In ACL, pp. 130-136, 2016. URL https://aclanthology.org/P16-2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.265, + 0.827, + 0.308 + ], + "angle": 0, + "content": "Lili Mou, Zhengdong Lu, Hang Li, and Zhi Jin. Coupling distributed and symbolic execution for natural language queries. In ICML, pp. 2518-2526, 2017. URL https://proceedings.mlrpress/v70/mou17a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.318, + 0.827, + 0.362 + ], + "angle": 0, + "content": "Sharan Narang, Colin Raffel, Katherine Lee, Adam Roberts, Noah Fiedel, and Karishma Malkan. WT5?! Training text-to-text models to explain their predictions. arXiv preprint arXiv:2004.14546, 2020. URL https://arxiv.org/abs/2004.14546." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.372, + 0.827, + 0.416 + ], + "angle": 0, + "content": "Ken Nozaki, Hisao Ishibuchi, and Hideo Tanaka. A simple but powerful heuristic method for generating fuzzy rules from numerical data. Fuzzy Sets and Systems, 86(3):251-270, 1997. URL https://www.sciencedirect.com/science/article/abs/pii/0165011495004130." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.426, + 0.827, + 0.469 + ], + "angle": 0, + "content": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. BLEU: A method for automatic evaluation of machine translation. In ACL, pp. 311-318, 2002. URL https://aclanthology.org/P02-1040." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.479, + 0.827, + 0.523 + ], + "angle": 0, + "content": "Ankur Parikh, Oscar Täckström, Dipanjan Das, and Jakob Uszkoreit. A decomposable attention model for natural language inference. In EMNLP, pp. 2249-2255, 2016. URL https://aclanthology.org/D16-1244/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.533, + 0.827, + 0.564 + ], + "angle": 0, + "content": "Matt Post. A call for clarity in reporting BLEU scores. In Proc. Conference on Machine Translation: Research Papers, pp. 186-191, 2018. URL https://aclanthology.org/W18-6319." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.573, + 0.827, + 0.617 + ], + "angle": 0, + "content": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. OpenAI Blog, 2018. URL https://cdn.openai.com/research-covers/language-unsupervised/language understands_paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.626, + 0.827, + 0.657 + ], + "angle": 0, + "content": "Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In EMNLP, 2019. URL https://aclanthology.org/D19-1410." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.667, + 0.827, + 0.711 + ], + "angle": 0, + "content": "Cynthia Rudin. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1(5):206-215, 2019. URL https://www.nature.com/articles/s42256-019-0048-x." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.72, + 0.827, + 0.778 + ], + "angle": 0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, pp. 5998-6008, 2017. URL https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.788, + 0.827, + 0.831 + ], + "angle": 0, + "content": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In ICLR, 2019. URL https://openreview.net/forum?id=rJ4km2R5t7." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.842, + 0.827, + 0.872 + ], + "angle": 0, + "content": "Shuohang Wang and Jing Jiang. Learning natural language inference with LSTM. In NAACL-HLT, pp. 1442-1451, 2016. URL https://aclanthology.org/N16-1170/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed H. Chi, Quoc V Le, and Denny Zhou. Chain of thought prompting elicits reasoning in large language models. In NeurlPS, 2022. URL https://openreview.net/forum?id=._VjQlMeSB_J." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In *NAACL-HLT*, pp. 1112–1122, 2018. URL https://aclanthology.org/N18-1101." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.155, + 0.828, + 0.198 + ], + "angle": 0, + "content": "Wenhan Xiong, Thien Hoang, and William Yang Wang. DeepPath: A reinforcement learning method for knowledge graph reasoning. In EMNLP, pp. 564-573, 2017. URL https://aclanthology.org/D17-1060/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.206, + 0.828, + 0.263 + ], + "angle": 0, + "content": "Hitomi Yanaka, Koji Mineshima, Daisuke Bekki, Kentaro Inui, Satoshi Sekine, Lasha Abzianidze, and Johan Bos. HELP: A dataset for identifying shortcomings of neural models in monotonicity reasoning. In Proc. Conference on Lexical and Computational Semantics, pp. 250-255, 2019a. URL https://aclanthology.org/S19-1027." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.271, + 0.827, + 0.316 + ], + "angle": 0, + "content": "Hitomi Yanaka, Koji Mineshima, Daisuke Bekki, Kentaro Inui, Satoshi Sekine, Lasha Abzianidze, and Johan Bos. Can neural networks understand monotonicity reasoning? In ACL BlackboxNLP Workshop, pp. 31-40, 2019b. URL https://aclanthology.org/W19-4804." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.323, + 0.828, + 0.38 + ], + "angle": 0, + "content": "Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Josh Tenenbaum. Neural-symbolic VQA: Disentangling reasoning from vision and language understanding. In NeurIPS, 2018. URL https://proceedings.neurips.cc/paper/2018/file/5e388103a391daabe3de1d76a6739ccd-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.388, + 0.827, + 0.431 + ], + "angle": 0, + "content": "Deunsol Yoon, Dongbok Lee, and SangKeun Lee. Dynamic self-attention: Computing attention over words dynamically for sentence embedding. arXiv preprint arXiv:1808.07383, 2018. URL https://arxiv.org/abs/1808.07383." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.439, + 0.827, + 0.468 + ], + "angle": 0, + "content": "Lotfi A Zadeh. Fuzzy logic. Computer, 21(4):83-93, 1988. URL https://ieeexplore.ieee.org/abstract/document/53." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.477, + 0.828, + 0.518 + ], + "angle": 0, + "content": "Lotfi A Zadeh. Fuzzy sets. In *Fuzzy Sets, Fuzzy Logic, and Fuzzy Systems*, pp. 394-432. World Scientific, 1996. URL https://www.worldscientific.com/doi/abs/10.1142/9789814261302_0021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.527, + 0.828, + 0.571 + ], + "angle": 0, + "content": "Zhuosheng Zhang, Yuwei Wu, Hai Zhao, Zuchao Li, Shuailiang Zhang, Xi Zhou, and Xiang Zhou. Semantics-aware BERT for language understanding. In AAAI, pp. 9628-9635, 2020. URL https://ojs.aaai.org/index.php/AAAI/article/view/6510." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.579, + 0.828, + 0.622 + ], + "angle": 0, + "content": "Xinyan Zhao and V.G.Vinod Vydiswaran. LIREx: Augmenting language inference with relevant explanations. In AAAI, pp. 14532-14539, 2021. URL https://ojs.aaai.org/index.php/AAAI/article/view/17708." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.622 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.648, + 0.442, + 0.664 + ], + "angle": 0, + "content": "A IMPLEMENTATION DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.68, + 0.364, + 0.694 + ], + "angle": 0, + "content": "A.1 PHRASE DETECTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.825, + 0.763 + ], + "angle": 0, + "content": "We present more details about our phrase detection. We use \\(\\mathrm{SpaCy}^5\\) to obtain the part-of-speech (POS) tag\\(^6\\) of every word. SpaCy also tags noun phrases. However, if a noun phrase follows a preposition (with a fine-grained POS tag being IN), we remove it from noun phrases but tag it as a prepositional phrase." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.768, + 0.825, + 0.825 + ], + "angle": 0, + "content": "In addition, we extract verbs by the POS tag VERB. A verb may be followed by a particle with the fine-grained POS tag being RP (e.g., show off). It is treated as a verb phrase. In order to handle negation, we allow optional AUX NOT before a verb, (e.g., could not help). This, however, only counts less than \\(1\\%\\) in the dataset, and does not affect our model much." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.831, + 0.829, + 0.875 + ], + "angle": 0, + "content": "To capture other potential semantic units, we treat remaining open class words7 as individual phrases. Finally, the remaining non-content words (in the categories of closed words and others) are discarded (e.g., \"there is\"). This is appropriate, because they do not represent meaningful semantics or play a" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.883, + 0.321, + 0.897 + ], + "angle": 0, + "content": "5https://spacy.io" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.897, + 0.617, + 0.911 + ], + "angle": 0, + "content": "See definitions in https://spacy.io/usage/linguistic-features" + }, + { + "type": "page_footnote", + "bbox": [ + 0.194, + 0.911, + 0.496, + 0.924 + ], + "angle": 0, + "content": "7https://universaldependencies.org/u/pos/" + }, + { + "type": "list", + "bbox": [ + 0.193, + 0.883, + 0.617, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.263, + 0.101, + 0.735, + 0.117 + ], + "angle": 0, + "content": "Table 5: Our rules for phrase detection. \"[\"] means the item is optional." + }, + { + "type": "table", + "bbox": [ + 0.208, + 0.127, + 0.791, + 0.214 + ], + "angle": 0, + "content": "
Example: The woman is showing off her blue dog at the playground.
NumberPhrase typeRuleExtracted phrase(s)
1Prepositional phraseIN + NPat the playground
2Noun phraseNPThe woman|her blue dog
3Verb phrase[AUX] + [NOT] + VERB + [RP]is showing off
4OthersOther open class words-
" + }, + { + "type": "image", + "bbox": [ + 0.185, + 0.24, + 0.468, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.239, + 0.79, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.298, + 0.439, + 0.698, + 0.455 + ], + "angle": 0, + "content": "Figure 4: Results of tuning the coefficient of global features." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.484, + 0.827, + 0.528 + ], + "angle": 0, + "content": "role in reasoning. Table 5 summarizes all the rules used in our approach. They are executed in order and extracted phrases are exclusive. For example, the playground in the phrase at the playground will not be treated as a standalone noun phrase, as it is already part of a prepositional phrase." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.825, + 0.562 + ], + "angle": 0, + "content": "Empirically, our rule-based approach works well for the NLI dataset, and our logical reasoning is at the granularity of the extracted phrases." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.582, + 0.291, + 0.595 + ], + "angle": 0, + "content": "A.2 SETTINGS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.609, + 0.828, + 0.846 + ], + "angle": 0, + "content": "Details of the EPR Model. We chose the pretrained model a11-mpnet-base-\\(v2^8\\) from the SentenceBERT study (Reimers & Gurevych, 2019) and obtained 768-dimensional local and global phrase embeddings. Our MLP had the same dimension as the embeddings, i.e., 768D for the local and global variants, or 1536D for the concatenation variant. We chose the coefficient for the global feature in Eq. (1) from a candidate set of \\(\\{0.0, 0.2, 0.4, 0.6, 0.8, 1.0\\}\\). Figure 4 shows the hyperparameter tuning results on SNLI (mentioned in § 4.2) and MNLI (to be discussed in § C.1). We find that 0.4 yields the best sentence accuracy in SNLI, and that 1.0 is the best for MNLI. As our focus is on reasoning, we set the coefficient to be 0.6, because it yields the highest phrasal reasoning performance and decent sentence-level performance for both experiments and in terms of both geometric mean and arithmetic mean of \\(F\\) scores. The pretrained language model (LM) was either finetuned or un-finetuned during training. Finetuning yields higher performance (Table 2), whereas un-finetuned LM is more efficient for in-depth analyses (Table 3). We trained the model with a batch size of 256. We used Adam (Kingma & Ba, 2015) with a learning rate of 5e-5, \\(\\beta_1 = 0.9\\), \\(\\beta_2 = 0.999\\), learning rate warm up over the first 10 percent of the total steps, and linear decay of the learning rate. The model was trained up to 3 epochs, following the common practice (Dodge et al., 2020). Our main model variants were trained 5 times with different parameter initializations, and we report the mean and standard deviation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.852, + 0.828, + 0.896 + ], + "angle": 0, + "content": "Details of Textual Explanation Generation. We used the pretrained T5-small model for finetuning with a batch size of 32. The optimizer was Adam with an initial learning rate of 3e-4, \\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.999\\), learning rate warm-up for the first 2 epochs, and linear decay of the learning rate up to 10" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.564, + 0.924 + ], + "angle": 0, + "content": "8https://www.sbert.net/docs/pretrained_models.html" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.106, + 0.82, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.352, + 0.34, + 0.645, + 0.356 + ], + "angle": 0, + "content": "Figure 5: The prompt for phrasal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.385, + 0.825, + 0.414 + ], + "angle": 0, + "content": "epochs; then we decreased the learning rate to 3e-6 and trained the model until the validation BLEU score did not increase for 2 epochs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.42, + 0.825, + 0.464 + ], + "angle": 0, + "content": "Details of the Prompting Baseline. We adopted the GPT-3 (the text-davinci-003 version with 175B parameters) (Brown et al., 2020) as a prompting baseline to demonstrate large language models (LLMs)' phrasal reasoning ability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.469, + 0.827, + 0.541 + ], + "angle": 0, + "content": "We consider exemplar-based prompting, because it is unlikely for an LLM to output structured reasoning results in a zero-shot manner. Moreover, our examples are chosen to cover all reasoning cases. We also set the temperature of decoding to 0 to obtain deterministic reasoning, following CoT prompting (Wei et al., 2022). Rule-based post-processing was applied to extract slot values. Figure 5 presents the prompt used for phrasal reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.563, + 0.719, + 0.58 + ], + "angle": 0, + "content": "B DATA ANNOTATION AND REASONING EVALUATION METRICS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.597, + 0.827, + 0.655 + ], + "angle": 0, + "content": "Previous studies have not explicitly evaluated reasoning performance. Typically, they resort to sentence-level classification accuracy (Wang & Jiang, 2016; Mahabadi et al., 2020) or case studies (Parikh et al., 2016; Feng et al., 2020) to demonstrate the effectiveness of their alleged interpretable models, which we believe is inadequate." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.66, + 0.827, + 0.703 + ], + "angle": 0, + "content": "Therefore, we annotated a model-agnostic corpus about phrasal logical relationships and developed a set of metrics to evaluate the phrasal reasoning performance quantitatively. The resources are released on our website (Footnote 1) to facilitate future research." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.723, + 0.356, + 0.737 + ], + "angle": 0, + "content": "B.1 DATA ANNOTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.827, + 0.863 + ], + "angle": 0, + "content": "We annotated the phrases and their logical relationships in a data sample. The annotators were asked to select corresponding phrases from both premise and hypothesis, and label them as either Entailment, Contradiction, or Neutral, with the sentence-level NLI label being given. Annotators could also select a phrase from either a premise or a hypothesis and label it as Unaligned. The process can be repeated until all phrases are labeled for a data sample. Figure 6 shows a screenshot of our annotation page. In the left panel, the annotator could select phrases in the two sentences and mark them with NLI labels. The annotator can view a sample's annotated phrases in the right panel and navigate through different samples." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.829, + 0.927 + ], + "angle": 0, + "content": "The annotation was performed by three in-lab researchers who are familiar with the NLI task. Our preliminary study shows low agreement when the annotators are unfamiliar with the task; thus it is inappropriate to recruit Mechanical Turks for annotation. We randomly selected 100 samples for annotation, following previous work on the textual explanation for SNLI (Camburu et al., 2018)," + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.119, + 0.485, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.1, + 0.795, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.344, + 0.352, + 0.655, + 0.368 + ], + "angle": 0, + "content": "Figure 6: A screenshot of the annotation page." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.829, + 0.418 + ], + "angle": 0, + "content": "Table 6: Examples illustrating the proposed metrics, where we consider the Entailment category. \"|\" refers to a phrase segmentation." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.426, + 0.827, + 0.551 + ], + "angle": 0, + "content": "
Example annotation of entailment (in highlight): Premise: A kid in red is playing in a garden. Hypothesis: A child in red is watching TV in the bedroom.
#Example OutputPE(P)PE(H)PERE(P)RE(H)REFEExplanation
1PH in a garden0000000Although in occurs in the annotation, the word indexes are different. The reasoning is wrong.
2PH watching TV1001000Mis-matched phrases in hypothesis. The reasoning is wrong.
3PH a kid | in red1111111All word indexes match the annotation. The reasoning is correct.
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.582, + 0.827, + 0.612 + ], + "angle": 0, + "content": "which is adequate to show statistical significance. Since our annotation only concerns data samples, it is agnostic to any machine learning model." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.635, + 0.571, + 0.648 + ], + "angle": 0, + "content": "B.2 EVALUATION METRICS FOR PHRASAL REASONING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.664, + 0.828, + 0.707 + ], + "angle": 0, + "content": "We propose a set of \\( F \\)-scores in Entailment, Contradiction, Neutral, and Unaligned to quantitatively evaluate the phrasal reasoning performance. We first introduce our metric for one data sample and then explain the extension to a corpus." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.712, + 0.829, + 0.863 + ], + "angle": 0, + "content": "Consider the Entailment category as an example. We first count the number of \"hits\" (true positives) between the word indexes of model output and annotation. Using word indexes (instead of words) rules out hitting the words in misaligned phrases (Example 1, Table 6). Then, we calculate precision scores for the premise and hypothesis, denoted by \\( P_{\\mathsf{E}}^{(P)} \\) and \\( P_{\\mathsf{E}}^{(H)} \\), respectively. Their geometric mean \\( P_{\\mathsf{E}} = (P_{\\mathsf{E}}^{(P)}P_{\\mathsf{E}}^{(H)})^{1 / 2} \\) is considered as the precision for Entailment. Here, the geometric mean rules out incorrect reasoning that hits either the premise or hypothesis, but not both (Example 2, Table 6). Further, we compute the recall score \\( R_{\\mathsf{E}} \\) in a similar way, and finally obtain the \\( F \\)-score by \\( F_{\\mathsf{E}} = \\frac{2P_{\\mathsf{E}}R_{\\mathsf{E}}}{P_{\\mathsf{E}} + R_{\\mathsf{E}}} \\). Likewise, \\( F_{\\mathsf{C}} \\) and \\( F_{\\mathsf{N}} \\) are calculated for Contradiction and Neutral. In addition, we compute the \\( F \\)-score for unaligned phrases in premise and hypothesis, denoted by \\( F_{\\mathsf{UP}} \\) and \\( F_{\\mathsf{UH}} \\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.828, + 0.926 + ], + "angle": 0, + "content": "When calculating our \\(F\\)-scores for a corpus, we use micro-average, i.e., the precision and recall ratios are calculated in the corpus level. This is more stable, especially considering the varying lengths of sentences. Moreover, we compare model output against three annotators and perform an arithmetic average, further reducing the variance caused by ambiguity." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.242, + 0.102, + 0.754, + 0.119 + ], + "angle": 0, + "content": "Table 7: Results on MNLI. †Quoted from respective papers. ‡Our replication." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.128, + 0.825, + 0.263 + ], + "angle": 0, + "content": "
ModelSent AccReasoning Performance
FEFCFUPFUHGMAM
Human-85.1573.4473.1846.3167.8569.52
Non-reasoning methods
Mahabadi et al. (2020)†73.8------
LSTM (Wang et al., 2019)†72.2------
Transformer (Radford et al., 2018)82.1------
Reasoning methods
NNL (Feng et al., 2020)‡61.2850.3332.0049.780.000.0033.03
STP75.1555.4751.7264.3237.5751.3152.27
EPR (Concat, LM finetuned)79.65±0.1961.76±0.3252.09±0.4164.3237.5752.80±0.0753.93±0.07
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.287, + 0.825, + 0.346 + ], + "angle": 0, + "content": "It should be emphasized that our metrics evaluate phrase detection and alignment in an implicit manner. A poor phrase detector and aligner will result in a low reasoning score (shown in our ablation study), but we do not explicitly calculate phrase detection and alignment accuracy. This helps us cope with the ambiguity of the phrase granularity (Example 3, Table 6)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.351, + 0.825, + 0.381 + ], + "angle": 0, + "content": "To summarize, we propose an evaluation framework including data annotation (§ B.1) and evaluation metrics (§ B.2). These are our contributions in formulating the phrasal reasoning task for NLI." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.399, + 0.401, + 0.415 + ], + "angle": 0, + "content": "C ADDITIONAL RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.43, + 0.357, + 0.444 + ], + "angle": 0, + "content": "C.1 RESULTS ON MNLI" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.456, + 0.828, + 0.638 + ], + "angle": 0, + "content": "In this appendix, we provide additional results on the matched section of the MNLI dataset (Williams et al., 2018), which consists of 393K training samples, 10K validation samples, and another 10K test samples. It has the same format as the SNLI dataset, but samples come from multiple domains and are more diverse. We follow § 4.1 and use the same protocol to create the phrasal reasoning annotation for the MNLI dataset based on 100 randomly selected samples. However, we found that MNLI is much noisier than SNLI; particularly, the sentences labeled as Neutral in MNLI share few related phrases. For example, the two sentences do not have much in common in the sample \"Premise: If you still want to join, it might be worked.\" and \"Hypothesis: Your membership is the only way that this could work\". Moreover, the inter-human agreement is low in the Neutral category. Therefore, we believe the corpus quality is less satisfactory for Neutral. To ensure meaningful evaluation, we ignored the evaluation of Neutral in this experiment, although our reasoning approach is not changed. The remaining 60 samples containing Entailment and Contradiction serve as the MNLI phrasal reasoning corpus." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.644, + 0.826, + 0.686 + ], + "angle": 0, + "content": "We consider the EPR variant with concatenated local and global features, since the SNLI experiment shows it achieves a good balance between sentence-level accuracy and reasoning. Our models were run 5 times with different initializations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.693, + 0.825, + 0.75 + ], + "angle": 0, + "content": "As seen in Table 7, our EPR approach is again worse than humans, but largely improves the reasoning performance compared with NNL and STP baselines. Its sentence-level prediction is comparable to (although slightly lower than) finetuning Transformers. The results are highly consistent with SNLI experiments, showing the robustness of our approach." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.756, + 0.826, + 0.84 + ], + "angle": 0, + "content": "It is important to notice that the EPR model here is trained on MNLI sentence labels, and is not transferred from the SNLI dataset. In our preliminary experiments, we tried transfer learning from SNLI to MNLI and failed to obtain satisfactory performance. We found that our EPR is more prone to the out-of-vocabulary issue (i.e., it does not predict well for the phrases in the new domain), whereas a black-box neural network may learn biased sentence patterns and achieve higher performance in transfer learning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.856, + 0.345, + 0.872 + ], + "angle": 0, + "content": "C.2 ERROR ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "To show how phrasal reasoning affects sentence-level prediction, we perform an error analysis in Table 8. Specifically, we examine the reasoning performance (arithmetic mean of \\( F \\)-scores) when the sentence label is correctly and incorrectly predicted on the SNLI dataset. As shown, EPR models" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.094, + 0.828, + 0.123 + ], + "angle": 0, + "content": "Table 8: Sentence-level prediction count and arithmetic average reasoning performance (\\(F\\)-score) when the sentence label is correctly and incorrectly predicted on the SNLI dataset." + }, + { + "type": "table", + "bbox": [ + 0.178, + 0.135, + 0.825, + 0.207 + ], + "angle": 0, + "content": "
Sentence-level predictionCount (in percentage)Reasoning performance (AMF)
Local finetunedConcat finetunedLocal finetunedConcat finetuned
Correct75.4±1.3687.8±0.7565.71±0.8358.68±0.67
Wrong24.6±1.3612.2±0.7540.74±2.0137.58±3.28
Overall100.0±0.00100.0±0.0059.93±0.6756.32±1.13
" + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.22, + 0.822, + 0.332 + ], + "angle": 0, + "content": "
Groundtruth: Entailment Prediction: Entailment\nThree young boys enjoying a day at the beach.\n(a)\nThe boys are in the beach.Groundtruth: Contradiction Prediction: Contradiction\nA man playing fetch with two brown dogs.\n(b)\nThe dogs are asleep.Entailment\nContradiction\nNeutral\nUnaligned
Groundtruth: Neutral Prediction: Neutral\nWalkers on a concrete boardwalk under a blue sky.\n(c)\nWalkers under a blue sky near the beach.Groundtruth: Entailment Prediction: Neutral\nAn elderly couple in heavy coats are looking at black and white photos displayed on a wall.\n(d)\nOctogenarians admiring the old photographs that decorated the wall.
" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.344, + 0.825, + 0.4 + ], + "angle": 0, + "content": "Figure 7: Examples of explainable phrasal reasoning predicted by our EPR model. Words in one color block are detected phrases, a dotted line shows the alignment of two phrases, and the color represents the predicted phrasal NLI label. In Example (d), EPR's prediction suggests the provided label in SNLI is incorrect." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.427, + 0.825, + 0.482 + ], + "angle": 0, + "content": "with both local and concatenated features have much higher reasoning performance when sentence labels are correctly predicted than incorrectly predicted. The positive correlation between phrasal reasoning performance and sentence-level accuracy shows our fuzzy logic induction rules indeed make sense." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.49, + 0.825, + 0.558 + ], + "angle": 0, + "content": "We also find that the model with local features has a higher reasoning performance than with concatenated features, even when the sentence-level prediction is wrong. This is because the local model is unaware of the context of the sentences. Thus, it must perform strict phrasal reasoning based on the induction rules, even if in this case the reasoning process is imperfect and leads to sentence-level errors." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.576, + 0.371, + 0.59 + ], + "angle": 0, + "content": "C.3 CASE STUDY OF EPR" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.602, + 0.825, + 0.632 + ], + "angle": 0, + "content": "We present case studies of EPR in Figure 7. Our EPR performs impressive reasoning for the NLI task, which is learned in a weakly supervised manner with only sentence-level labels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.825, + 0.708 + ], + "angle": 0, + "content": "In Example (a), the two sentences are predicted Entailment because three young boys entails the boys and at the beach entails in the beach, whereas unaligned phrases enjoying and a day are allowed in the premise for Entailment. In Example (b), playing contradicts asleep, and the two sentences are also predicted Contradiction. Likewise, Example (c) is predicted Neutral because the aligned phrases on a concrete boardwalk and near the beach are neutral." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.714, + 0.827, + 0.812 + ], + "angle": 0, + "content": "In our study, we also find several interesting examples where EPR's reasoning provides clues suggesting that the target labels may be incorrect in the SNLI dataset. In Example (d), our model predicts Neutral for looking and admiring, as well as for at black and white photos and the old photographs. Thus, the two sentences are predicted Neutral instead of the provided label Entailment. We believe our model's reasoning and prediction are correct, because people looking at something may or may not admire it; a black-and-white photo may or may not be an old photo (as it could be a black-and-white artistic photo)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.829, + 0.638, + 0.843 + ], + "angle": 0, + "content": "C.4 CASE STUDY OF THE TEXTUAL EXPLANATION GENERATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We conduct another case study to show how EPR's reasoning is used in the textual explanation generation task. As seen in Figure 8, our EPR reasoning yields structured factual tuples: on a deserted beach entailing at the beach, Some dogs contradicting only one dog, and running unaligned (matched with a special token [EMPTY]). Our explanation generation model attends to these factual tuples, and the heat map shows that our model gives the most attention weights (with an average of" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.306, + 0.088, + 0.695, + 0.26 + ], + "angle": 0, + "content": "
Input Premise : Some dogs are running on a deserted beach.\nHypothesis : There is only one dog at the beach.
Label Contradiction (not used during our explanation generation)
EPR's Reasoning Output
Premise phraseHypothesis phraseEPR labelAttention score
on a deserted beachat the beachE23.16
Some dogsonly one dogC61.22
running[EMPTY]E15.62
Output explanation Some dogs is more than one dog.
Reference explanations:\n(1) Some is more than one, therefore there can't be only one dog.\n(2) Some indicates more than one dog. One dog is not some dogs.\n(3) Some dogs are not one dog.
" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.271, + 0.734, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.412, + 0.825, + 0.441 + ], + "angle": 0, + "content": "Figure 8: Case study of the textual explanation generation. The heat map shows the step-by-step and average attention weights to the factual tuples (vertical axis)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.467, + 0.825, + 0.511 + ], + "angle": 0, + "content": "0.61) to the tuple, Some dogs contradicting only one dog, to generate the explanation \"Some dogs is more than one dog.\" This example illustrates that the factual tuples given by our EPR model provide meaningful information and can improve textual explanation generation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.53, + 0.488, + 0.546 + ], + "angle": 0, + "content": "D LIMITATION AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.561, + 0.825, + 0.632 + ], + "angle": 0, + "content": "This paper performs phrase detection and alignment by heuristics. They work well empirically in our experiments, although further improvement is possible (for example, by considering syntactic structures). However, our main focus is neural fuzzy logic for weakly supervised reasoning. This largely differs from previous work based on manually designed lexicons and rules (Hu et al., 2020; Chen et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.825, + 0.682 + ], + "angle": 0, + "content": "Our long-term goal is to develop a weakly supervised, end-to-end trained neuro-symbolic system that can extract semantic units and perform reasoning for a given downstream NLP task. This paper is an important milestone toward the long-term goal." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.701, + 0.4, + 0.716 + ], + "angle": 0, + "content": "E ETHICAL STATEMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.732, + 0.827, + 0.79 + ], + "angle": 0, + "content": "Our work involves human annotation of the phrasal logical relationships. Since the research subject here is logic (rather than humans), there are minimal ethical concerns. We nevertheless followed a standard protocol of human evaluation (involving identity protection, and proper compensation), approved by our institutional ethics board." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.81, + 0.359, + 0.824 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.827, + 0.925 + ], + "angle": 0, + "content": "We thank all reviewers and chairs for their valuable comments. The research is supported in part by the Natural Sciences and Engineering Research Council of Canada (NSERC) under Grant No. RGPIN2020-04465, the Amii Fellow Program, the Canada CIFAR AI Chair Program, a UAHJIC project, a donation from DeepMind, and the Digital Research Alliance of Canada (alliancecan.ca). Atharva Naik contributed to the research as an intern at the University of Alberta through the Mitacs Globalink program." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ] +] \ No newline at end of file diff --git a/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_origin.pdf b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..76ab4686e142589fba2ef62d2999d92de1df5af9 --- /dev/null +++ b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/d5f92e4c-b0b4-48f2-acb6-1c3d35000445_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3139871c47fdb51b4114d680c0f4b39e9f7a9bcb406dab81325d2946d0dbf240 +size 1383458 diff --git a/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/full.md b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/full.md new file mode 100644 index 0000000000000000000000000000000000000000..168587c22ef529d9e3ba50deed57c8ce9de8c208 --- /dev/null +++ b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/full.md @@ -0,0 +1,464 @@ +# WEAKLY SUPERVISED EXPLAINABLE PHRASAL REASONING WITH NEURAL FUZZY LOGIC + +Zijun $\mathbf{W}\mathbf{u}^{*1}$ , Zi Xuan Zhang\*, Atharva Naik+2, Zhijian Mei', Mauajama Firdaus', Lili Mou + +$^{1}$ Dept. Computing Science & Alberta Machine Intelligence Institute (Amii), University of Alberta + +2Carnegie Mellon University + +{zijun4, zixuan7, zimei1}@ualberta.ca, arnaik@cs.cmu.edu, + +{mauzama.03, doublepower.mou}@gmail.com + +*Equal contribution, †Work done during the internship at UofA/Amii + +# ABSTRACT + +Natural language inference (NLI) aims to determine the logical relationship between two sentences, such as Entailment, Contradiction, and Neutral. In recent years, deep learning models have become a prevailing approach to NLI, but they lack interpretability and explainability. In this work, we address the explainability of NLI by weakly supervised logical reasoning, and propose an Explainable Phrasal Reasoning (EPR) approach. Our model first detects phrases as the semantic unit and aligns corresponding phrases in the two sentences. Then, the model predicts the NLI label for the aligned phrases, and induces the sentence label by fuzzy logic formulas. Our EPR is almost everywhere differentiable and thus the system can be trained end to end. In this way, we are able to provide explicit explanations of phrasal logical relationships in a weakly supervised manner. We further show that such reasoning results help textual explanation generation.1 + +# 1 INTRODUCTION + +Natural language inference (NLI) aims to determine the logical relationship between two sentences (called a premise and a hypothesis), and target labels include Entailment, Contradiction, and Neutral (Bowman et al., 2015; MacCartney & Manning, 2008). Figure 1 gives an example, where the hypothesis contradicts the premise. NLI is important to natural language processing, because it involves logical reasoning and is a key problem in artificial intelligence. Previous work shows that NLI can be used in various downstream tasks, such as information retrieval (Karpukhin et al., 2020) and text summarization (Liu & Lapata, 2019). + +In recent years, deep learning has become a prevailing approach to NLI (Bowman et al., 2015; Mou et al., 2016; Wang & Jiang, 2016; Yoon et al., 2018). Especially, pretrained language models with the Transformer architecture (Vaswani et al., 2017) achieve state-of-the-art performance for the NLI task (Radford et al., 2018; Zhang et al., 2020). However, such deep learning models are black-box machinery and lack interpretability. In real applications, it is important to understand how these models make decisions (Rudin, 2019). + +Several studies have addressed the explainability of NLI models. Camburu et al. (2018) generate a textual explanation by sequence-to-sequence supervised learning, in addition to NLI classification; such an approach is multi-task learning of text classification and generation, which does not perform reasoning itself. MacCartney et al. (2008) propose a scoring model to align related phrases; Parikh et al. (2016) and Jiang et al. (2021) propose to obtain alignment by attention mechanisms. However, they only provide correlation information, instead of logical reasoning. Other work incorporates upward and downward monotonicity entailment reasoning for NLI (Hu et al., 2020; Chen et al., 2021), but these approaches are based on hand-crafted rules (e.g., every downward entailing some) and are restricted to Entailment only; they cannot handle Contradiction or Neutral. + +In this work, we address the explainability for NLI by weakly supervised phrasal logical reasoning. Our goal is to explain NLI predictions with phrasal logical relationships between the premise and + +hypothesis. Intuitively, an NLI system with an explainable reasoning mechanism should be equipped with the following functionalities: + +1. The system should be able to detect corresponding phrases and tell their logical relationship, e.g., several men contradicting one man, but pull in a fishing net entailing holding the net (Figure 1). +2. The system should be able to induce sentence labels from phrasal reasoning. In the example, the two sentences are contradictory because there exists one contradictory phrase pair. +3. More importantly, such reasoning should be trained in a weakly supervised manner, i.e., the phrase-level predictions are trained from sentence labels only. Otherwise, the reasoning mechanism degrades to multi-task learning, which requires massive fine-grained human annotations. + +To this end, we propose an Explainable Phrasal Reasoning (EPR) approach to the NLI task. Our model obtains phrases as semantic units, and aligns corresponding phrases by embedding similarity. Then, we predict the NLI labels (namely, Entailment, Contradiction, and Neutral) for the aligned phrases. Finally, we propose to induce the sentence-level label from phrasal labels in a fuzzy logic manner (Zadeh, 1988; 1996). Our model is differentiable, and the phrasal reasoning component can be trained + +![](images/36d8599dc69495cec1040aad3f195d48f60ea647a67099348cbb3ffd1e91bf76.jpg) +Figure 1: The natural language inference (NLI) task and desired phrasal reasoning. + +with the weak supervision of sentence NLI labels. In this way, our EPR approach satisfies all the desired properties mentioned above. + +In our experiments, we developed a comprehensive methodology (data annotation and evaluation metrics) to quantitatively evaluate phrasal reasoning performance, which has not been accomplished in previous work. We extend previous studies and obtain plausible baseline models. Results show that our EPR yields a much more meaningful explanation regarding $F$ scores against human annotation. + +To further demonstrate the quality of extracted phrasal relationships, we feed them to a textual explanation model. Results show that our EPR reasoning leads to an improvement of 2 points in BLEU scores, achieving a new state of the art on the e-SNLI dataset (Camburu et al., 2018). + +Our contributions are summarized as follows: + +1. We formulate a phrasal reasoning task for natural language inference (NLI), addressing the interpretability of neural models. +2. We propose an EPR model that induces sentence-level NLI labels from explicit phrasal logical labels by neural fuzzy logic. EPR is able to perform reasoning in a weakly supervised way. +3. We annotated phrasal logical labels and designed a set of metrics to evaluate phrasal reasoning. We further use our reasoning results to improve textual explanation generation. Our code and annotated data are released for future studies. + +To the best of our knowledge, we are the first to develop a weakly supervised phrasal reasoning model for the NLI task. + +# 2 RELATED WORK + +Natural Language Inference. MacCartney & Manning (2009) propose seven natural logic relations in addition to Entailment, Contradiction, and Neutral. MacCartney & Manning (2007) also distinguish upward entailment (every mammal upward entailing some mammal) and downward entailment (every mammal downward entailing every dog) as different categories. Manually designed lexicons and rules are used to interpret Entailment in a finer-grained manner, such as downward and upward entailment (Hu et al., 2020; Chen et al., 2021). Feng et al. (2020) apply such natural logic to NLI reasoning at the word level; however, our experiments will show that their word-level treatment is not an appropriate granularity, and they fail to achieve meaningful reasoning performance. + +The above reasoning schema focuses more on the quantifiers of first-order logic (Beltagy et al., 2016). However, the SNLI dataset (Bowman et al., 2015) we use only contains less than $5\%$ samples with explicit quantifiers, and the seven-category schema complicates reasoning in the weakly supervised + +setting. Instead, we adopt three-category NLI labels following the SNLI dataset. Our focus is entity-based reasoning, and the treatment of quantifiers is absorbed into phrases. + +We also notice that previous work lacks explicit evaluation on the reasoning performance for NLI. For example, the SNLI dataset only provides sentence-level labels. The HELP (Yanaka et al., 2019a) and MED (Yanaka et al., 2019b) datasets concern monotonicity inference problems, where the label is also at the sentence level; they only consider Entailment, ignoring Contradiction and Neutral. Thus, we propose a comprehensive framework for the evaluation of NLI reasoning. + +e-SNLI. Camburu et al. (2018) propose the e-SNLI task of textual explanation generation and use LSTM as a baseline. Kumar & Talukdar (2020) propose the NILE approach, using multiple decoders to generate explanations for all E, C, and N labels, and then predicting which to be selected. Zhao & Vydiswaran (2021) propose the LIREx approach, using additionally annotated rationales for explanation generation. Narang et al. (2020) finetune T5 with multiple explanation generation tasks. Although these systems can generate explanations, the nature of such finetuning approaches renders the explanation generator per se unexplainable. By contrast, we design a textual explanation generation model that utilizes our EPR's phrasal reasoning, obtained in a weakly supervised manner. + +Neuro-Symbolic Approaches. In recent years, neuro-symbolic approaches have attracted increasing interest in the AI and NLP communities for interpreting deep learning models. Typically, these approaches are trained by reinforcement learning or its relaxation, such as attention and Gumbel-softmax (Jang et al., 2017), to reason about certain latent structures in a downstream task. + +For example, Lei et al. (2016) and Liu et al. (2018) extract key phrases or sentences for a text classification task. Lu et al. (2018) extract entities and relations for document understanding. Liang et al. (2017) and Mou et al. (2017) perform SQL-like execution based on input text for semantic parsing. Xiong et al. (2017) hop over a knowledge graph for reasoning the relationships between entities. Li et al. (2019) and Deshmukh et al. (2021) model symbolic actions for unsupervised syntactic structure induction. In the vision domain, Mao et al. (2019) propose a neuro-symbolic approach to learn visual concepts. Our work addresses logical reasoning for the NLI task, which is not tackled in previous neuro-symbolic studies. + +Fuzzy Logic. Fuzzy logic (Zadeh, 1988; 1996) models an assertion and performs logic calculation with probability. For example, a quantifier (e.g., "most") and assertion (e.g., "ill") are modeled by a score in $(0,1)$ ; the score of a conjunction $s(x_{1} \wedge x_{2})$ is the product of $s(x_{1})$ and $s(x_{2})$ . In old-school fuzzy logic studies, the mapping from language to the score is usually given by human-defined heuristics (Zadeh, 1988; Nozaki et al., 1997), and may not be suited to the task of interest. By contrast, we train neural networks to predict the probability of phrasal logical relations, and induce the sentence NLI label by fuzzy logic formulas. Thus, our approach takes advantage of both worlds of symbolism and connectionism. Mahabadi et al. (2020) apply fuzzy logic formulas to replace multi-layer perceptrons for NLI. But they are unable to provide expressive reasoning because their fuzzy logic works on sentence features. Our work is inspired by Mahabadi et al. (2020). However, we propose to apply fuzzy logic to the detected and aligned phrases, enabling our approach to provide reasoning in a symbolic (i.e., expressive) way. We develop our own fuzzy logic formulas, which are also different from Mahabadi et al. (2020). + +# 3 OUR EPR APPROACH + +In this section, we describe our EPR approach in detail, also shown in Figure 2. It has three main components: phrase detection and alignment, phrasal NLI prediction, and sentence label induction. + +Phrase Detection and Alignment. In NLI, a data point consists of two sentences, a premise and a hypothesis. We first extract content phrases from both input sentences by rules and heuristics. For example, $\left[\mathrm{AUX}\right] + \left[\mathrm{NOT}\right] + \mathrm{VERB} + \left[\mathrm{RP}\right]$ is treated as a verb phrase. Full details are presented in Appendix A.1. Compared with the word level (Parikh et al., 2016; Feng et al., 2020), a phrase is a more meaningful semantic unit for logical reasoning. + +We then align corresponding phrases in the two sentences based on cosine similarity. Let $\mathrm{P} = (\mathrm{p}_1,\dots ,\mathrm{p}_M)$ and $\mathrm{H} = (\mathrm{h}_1,\dots ,\mathrm{h}_N)$ be the premise and hypothesis, respectively, where $\mathrm{p}_m$ and $\mathrm{h}_n$ are extracted phrases. We apply Sentence-BERT (Reimers & Gurevych, 2019) to each individual phrase and obtain the local phrase embeddings by $\pmb {p}_m^{(L)} = \mathrm{SBERT}(\mathrm{p}_m),\pmb {h}_n^{(L)} = \mathrm{SBERT}(\mathrm{h}_n)$ . We + +![](images/07d7923cf7122154318a4e7621f0c71d8c910a16064898df435c9314cf0f5e25.jpg) +Figure 2: An overview of our Explainable Phrasal Reasoning (EPR) model. + +Table 1: An example showing the importance of handling unaligned phrases (in highlight). + +
Premise +HypothesisPeople are shopping for fruit. +People are shopping for fruit in the market.People are shopping for fruit in the market. +People are shopping for fruit.
Sentence NLI[ ] Entailment [ ] Contradiction [√] Neutral[√] Entailment [ ] Contradiction [ ] Neutral
+ +also apply Sentence-BERT to the entire premise and hypothesis sentences to obtain the global phrase embeddings $\pmb{p}_m^{(G)}$ and $\pmb{h}_n^{(G)}$ by mean-pooling the features of the words in the phrase. The phrase similarity is given by + +$$ +\sin \left(\mathrm {p} _ {m}, \mathrm {h} _ {n}\right) = \gamma \cos \left(\boldsymbol {p} _ {m} ^ {(G)}, \boldsymbol {h} _ {n} ^ {(G)}\right) + (1 - \gamma) \cos \left(\boldsymbol {p} _ {m} ^ {(L)}, \boldsymbol {h} _ {n} ^ {(L)}\right) \tag {1} +$$ + +where $\gamma$ is a hyperparameter balancing the lexical and contextual representations of a phrase (Hewitt & Manning, 2019). It is noted that Sentence-BERT is finetuned on paraphrase datasets, and thus is more suitable for phrasal similarity matching than pretrained language models (Devlin et al., 2019). + +We obtain phrase alignment between the premise and hypothesis in a heuristic way. For every phrase $\mathrm{p}_m$ in the premise, we look for the most similar phrase $\mathrm{h}_n$ from the hypothesis by + +$$ +n = \operatorname {a r g m a x} _ {n ^ {\prime}} \sin \left(\boldsymbol {p} _ {m}, \boldsymbol {h} _ {n ^ {\prime}}\right) \tag {2} +$$ + +Likewise, for every phrase $\mathrm{h}_n$ in the hypothesis, we look for the most similar phrase $\mathrm{p}_m$ from the premise. A phrase pair $(\mathrm{p}_m, \mathrm{h}_n)$ is considered to be aligned if $\mathrm{h}_n$ is selected as the closest phrase to $\mathrm{p}_m$ , and $\mathrm{p}_m$ is the closest to $\mathrm{h}_n$ . Such hard alignment differs from commonly used soft attention-based approaches (Parikh et al., 2016). Our alignment method can ensure the quality of phrase alignment, and more importantly, leave other phrases unaligned (e.g., helping each other in Figure 1), which are common in the NLI task. The process is illustrated in Figure 2a. + +Phrasal NLI Prediction. Our model then predicts the logical relationship of an aligned phrase pair $(p, h)$ among three target labels: Entailment, Contradiction, and Neutral. While previous work (Feng et al., 2020) identifies finer-grained labels for NLI, we do not follow their categorization, because it complicates the reasoning process and makes weakly supervised training more difficult. Instead, we adopt a three-way phrasal classification, which is consistent with sentence NLI labels. + +We represent a phrase, say, $p$ in the premise, by a vector embedding, and we consider two types of features: a local feature $\pmb{p}^{(L)}$ and a global feature $\pmb{p}^{(G)}$ , re-used from the phrase alignment component. They are concatenated as the phrase representation $\pmb{p} = [p^{(L)}; p^{(G)}]$ . Likewise, the phrase representation for a hypothesis phrase $h$ is obtained in a similar way. Intuitively, local features force the model to perform reasoning in a serious manner, but global features are important to sentence-level prediction. Such intuition is also verified in an ablation study (§ 4.2). + +Then, we use a neural network to predict the phrasal NLI label (Entailment, Contradiction, and Neutral). This is given by the standard heuristic matching (Mou et al., 2016) based on phrase embeddings, followed by a multi-layer perceptron (MLP) and a three-way softmax layer: + +$$ +\left[ P _ {\text {p h r a s e}} (\mathsf {E} | \mathrm {p}, \mathrm {h}); P _ {\text {p h r a s e}} (\mathsf {C} | \mathrm {p}, \mathrm {h}); P _ {\text {p h r a s e}} (\mathsf {N} | \mathrm {p}, \mathrm {h}) \right] = \operatorname {s o f t m a x} (\operatorname {M L P} \left(\left[ \boldsymbol {p}; \boldsymbol {h}; \left| \boldsymbol {p} - \boldsymbol {h} \right|; \boldsymbol {p} \circ \boldsymbol {h} \right]\right)) \tag {3} +$$ + +where $\circ$ is the element-wise product, and the semicolon refers to column vector concatenation. E, C, and N refer to the Entailment, Contradiction, and Neutral labels, respectively. + +It should be mentioned that a phrase may be unaligned, but plays an important role in sentence-level NLI prediction, as shown in Table 1. Thus, we would like to predict phrasal NLI labels for unaligned + +phrases as well, but pair them with a special token $(\mathrm{p}_{\langle \mathrm{EMPTY}\rangle}$ or $\mathrm{h}_{\langle \mathrm{EMPTY}\rangle})$ , whose embedding is randomly initialized and learned by back-propagation. + +Sentence Label Induction. We observe the sentence NLI label can be logically induced from phrasal NLI labels. Based on the definition of the NLI task, we develop the following induction rules. + +Entailment Rule: According to Bowman et al. (2015), a premise entailing a hypothesis means that, if the premise is true, then the hypothesis must be true. We find that this can be oftentimes transformed into phrasal relationships: a premise entails the hypothesis if all paired phrases have the label Entailment. + +Let $\{(\mathrm{p}_k,\mathrm{h}_k)\}_{k = 1}^K\bigcup \{(\mathrm{p}_k,\mathrm{h}_k)\}_{k = K + 1}^{K'}$ be all phrase pairs. For $k = 1,\dots ,K$ , they are aligned phrases; for $k = K + 1,\dots ,K'$ , they are unaligned phrases paired with the special token, i.e., $\mathrm{p}_k = \mathrm{p}_{\langle \mathrm{EMPTY}\rangle}$ or $\mathrm{h}_k = \mathrm{h}_{\langle \mathrm{EMPTY}\rangle}$ . Then, we induce a sentence-level Entailment score by + +$$ +S _ {\text {s e n t e n c e}} (\mathsf {E} | \mathrm {P}, \mathrm {H}) = \left[ \prod_ {k = 1} ^ {K ^ {\prime}} P _ {\text {p h r a s e}} (\mathsf {E} | \mathrm {p} _ {k}, \mathrm {h} _ {k}) \right] ^ {\frac {1}{K ^ {\prime}}} \tag {4} +$$ + +This works in a fuzzy logic fashion (Zadeh, 1988; 1996), deciding whether the sentence-level label should be Entailment considering the average of phrasal predictions. Here, we use the geometric mean, because it is biased towards low scores, i.e., if there exists one phrase pair with a low Entailment score, then the chance of sentence label being Entailment is also low. Unaligned pairs should be considered in Eq. (4), because an unaligned phrase may indicate Entailment, shown in the second example of Table 1. Notice that the resulting value $S_{\text{sentence}}(\mathsf{E}|\mathsf{P}, \mathsf{H})$ is not normalized with respect to Contradiction and Neutral; thus, we call it a score (instead of probability), which will be normalized afterwards. + +Contradiction Rule: Two sentences are contradictory if there exists (at least) one paired phrase labeled as Contradiction. The fuzzy logic version of this induction rule is given by + +$$ +S _ {\text {s e n t e n c e}} (\mathbb {C} | \mathrm {P}, \mathrm {H}) = \max _ {k = 1, \dots , K} P _ {\text {p h r a s e}} (\mathbb {C} | \mathrm {p} _ {k}, \mathrm {h} _ {k}) \tag {5} +$$ + +Here, the max operator is used in the induction, because the contradiction rule is an existential statement, i.e., there exist(s) $\cdots$ . Also, unaligned phrases are excluded in calculating the sentence-level Contradiction score, because an unaligned phrase indicates the corresponding information is missing in the other sentence and it cannot be Contradiction (recall examples in Table 1). + +Rule for Neutral: Two sentences are neutral if there exists (at least) one neutral phrase pair, but there does not exist any contradictory phrase pair. The fuzzy logic formula is + +$$ +S _ {\text {s e n t e n c e}} (\mathrm {N} | \mathrm {P}, \mathrm {H}) = \left[ \max _ {k = 1, \dots , K ^ {\prime}} P _ {\text {p h r a s e}} (\mathrm {N} | \mathrm {p} _ {k}, \mathrm {h} _ {k}) \right] \cdot \left[ 1 - S _ {\text {s e n t e n c e}} (\mathrm {C} | \mathrm {P}, \mathrm {H}) \right] \tag {6} +$$ + +The first factor determines whether there exists a Neutral phrase pair (including unaligned phrases, illustrated in the first example in Table 1). The second factor evaluates the negation of "at least one contradictory phrase," as suggested in the second clause of the Rule for Neutral. + +Finally, we normalize the scores into probabilities by dividing the sum, since all the scores are already positive. This is given by + +$$ +P _ {\text {s e n t e n c e}} (\mathrm {L} | \cdot) = \frac {1}{Z} S _ {\text {s e n t e n c e}} (\mathrm {L} | \cdot) \tag {7} +$$ + +where $\mathsf{L}\in \{\mathsf{E},\mathsf{C},\mathsf{N}\}$ , and $Z = S_{\text{sentence}}(\mathsf{E}|\cdot) + S_{\text{sentence}}(\mathsf{C}|\cdot) + S_{\text{sentence}}(\mathsf{N}|\cdot)$ is the normalizing factor. + +Training and Inference. We use cross-entropy loss to train our EPR model by minimizing $-\log P_{\text{sentence}}(\mathsf{t}|\cdot)$ , where $\mathsf{t} \in \{\mathsf{E}, \mathsf{C}, \mathsf{N}\}$ is the groundtruth sentence-level label. + +Our underlying logical reasoning component can be trained end-to-end by back-propagation in a weakly supervised manner, because the fuzzy logic rules are almost everywhere differentiable. Although the max operators in Eqs. (5) and (6) may not be differentiable at certain points, they are common in max-margin learning and the rectified linear unit (ReLU) activation functions, and do not cause trouble in back-propagation. + +Once our EPR model is trained, we can obtain both phrasal and sentence-level labels. This is accomplished by performing argmax on the predicted probabilities (3) and (7), respectively. + +Improving Textual Explanation. Camburu et al. (2018) annotated a dataset to address NLI interpretability by generating an explanation sentence. For the example in Figure 1, the reference explanation is "There cannot be one man and several men at same time." + +In this part, we apply the predicted phrasal logical relationships to textual explanation generation and examine whether our EPR's output can help a downstream task. Figure 3 shows the overview of our textual explanation generator. We concatenate the premise and hypothesis in the form of “Premise : Hypothesis : …,” and feed it to a standard Transformer encoder (Vaswani et al., 2017). + +We utilize the phrase pairs and our predicted phrasal labels as factual knowledge to enhance the decoder. Specifically, our EPR model yields a set of tuples $\{(\mathrm{p}_k,\mathrm{h}_k,\mathrm{l}_k)\}_{k = 1}^K$ for a sample, where $\mathbf{l}_k\in \{\mathsf{E},\mathsf{N},\mathsf{C}\}$ is the predicted phrasal label for the aligned phrases, $\mathrm{p}_k$ and $\mathrm{h}_k$ . We embed phrases by Sentence-BERT: $\pmb{p}^{(L)}$ and $\pmb{h}^{(L)}$ ; the phrasal label is represented by a one-hot vector $\pmb{l}_k = \mathrm{onehot}(\mathrm{l}_k)$ . They are concatenated as a vector $\pmb{m}_k = [\pmb {p}_k;\pmb {h}_k;\pmb {l}_k]$ . We compose the vectors as a factual memory matrix $\mathbf{M} = [m_1^\top ;\dots ;m_K^\top ]\in \mathbb{R}^{K\times d}$ , where $d$ is the dimension of $\pmb{m}_k$ . + +Our decoder follows a standard Transformer architecture (Vaswani et al., 2017), but is equipped with additional attention mechanisms to the factual memory. Consider the $i$ th decoding step. We feed the factual memory to an MLP as $\tilde{\mathbf{M}} = \mathrm{MLP}(\mathbf{M})$ . We compute attention $\pmb{a}$ over $\tilde{\mathbf{M}}$ with the embedding of the input $\pmb{y}_{i-1}$ , and aggregate factual information $\pmb{c}$ for the rows $\pmb{m}_t$ in $\mathbf{M}$ : + +$$ +\boldsymbol {a} = \operatorname {s o f t m a x} (\tilde {\mathbf {M}} \boldsymbol {y} _ {i - 1}), \quad \boldsymbol {c} = \sum_ {k = 1} ^ {K} a _ {k} \tilde {\boldsymbol {m}} _ {t} ^ {\top} +$$ + +where $a_{k}$ is the kth element of the vector $\pmb{a}$ and Figure 5. $\hat{\pmb{m}}_t$ is the kth row of the matrix $\tilde{\mathbf{M}}$ . The factual information $\pmb{c}$ is fed to another layer $\pmb{g}_i = \mathrm{MLP}([c; y_{i-1}]) + c$ . + +![](images/59828ff8e82cdca43543a438f05e4fee8d4571b8d4ce9ba761fa4e8c3e224c45.jpg) +Figure 3: Overview of the model for textual explanation generation. + +Our Transformer decoder layer starts with self-attention $\tilde{q}_i = \mathrm{SelfAttn}(g_i)$ . Then, residual connection and layer normalization are applied as $q_{i} = \mathrm{LayerNorm}(\tilde{q}_{i} + g_{i})$ . A cross-attention mechanism obtains input information by $v_{i} = \mathrm{CrossAttn}(q_{i},\mathbf{H})$ , where $\mathbf{H}$ is the representation given by the encoder. $v_{i}$ is fed to the Transformer's residual connection and layer normalization sub-layer. Multiple Transformer layers as mentioned above are stacked to form a deep architecture. The model is trained by standard cross-entropy loss against the reference explanation as in previous work (Kumar & Talukdar, 2020; Zhao & Vydiswaran, 2021; Narang et al., 2020). + +In this way, the model is enhanced with factual information given by our EPR weakly supervised reasoning. Experiments will show that it largely improves the BLEU score by 2 points (§ 4.2), being a new state of the art. This further verifies that our EPR indeed yields meaningful phrasal explanations. + +# 4 EXPERIMENTS + +# 4.1 DATASETS AND EVALUATION METRICS + +The main dataset we used in our experiments is the Stanford Natural Language Inference (SNLI) dataset (Bowman et al., 2015), which consists of 550K training samples, 10K validation samples, and another 10K test samples. Each data sample consists of two sentences (premise and hypothesis) and a sentence-level groundtruth label. For sentence-level NLI prediction, we still use accuracy to evaluate our approach, following previous work (Parikh et al., 2016; Chen et al., 2017; Radford et al., 2018). + +To evaluate the phrasal reasoning performance, we need additional human annotation and evaluation metrics, because most previous work only considers sentence-level performance (Feng et al., 2020) and has not performed quantitative phrasal reasoning evaluation. Although Camburu et al. (2018) annotated phrase highlights in their e-SNLI dataset, they are incomplete and do not provide logical relationships. Our annotators selected relevant phrases from two sentences and tagged them with phrasal NLI labels; they also selected and tagged unaligned phrases. + +Table 2: Main results on the SNLI dataset. †Quoted from respective papers. ‡Obtained from the checkpoint sent by the authors. Other results are obtained by our experiments. GM and AM are the geometric and arithmetic means of the $F$ scores. + +
ModelSent AccReasoning Performance
FEFCFNFUPFUHGMAM
Human-84.7171.0155.1282.4661.8070.0771.02
Non-reasoning
Mahabadi et al. (2020)†85.1-------
LSTM (Wang & Jiang, 2016)†86.1-------
Transformer (Radford et al., 2018)89.9-------
SBERT (Reimers & Gurevych, 2019)91.4-------
Baselines
NNL (Feng et al., 2020)‡79.9162.7217.491.5066.220.000.0029.59
STP85.7662.4034.7637.0476.6151.8050.2052.52
GPT-3-Davinci (Brown et al., 2020)-53.7558.0016.1252.2431.0838.2342.24
Ours
EPR (Local, LM unfinetuned)76.33±0.4883.11±0.2938.73±0.8544.63±0.8876.6151.8056.39±0.4358.98±0.34
EPR (Local, LM finetuned)79.36±0.1382.44±0.2644.10±1.3244.69±3.2276.6151.8057.77±0.8559.93±0.67
EPR (Concat, LM unfinetuned)84.53±0.1973.29±0.6837.95±1.1640.56±1.1076.6151.8053.73±0.3956.04±0.33
EPR (Concat, LM finetuned)87.56±0.1569.91±1.2139.97±2.1243.31±2.7876.6151.8054.46±1.3556.32±1.13
+ +We further propose a set of $F$ -scores, which are a balanced measure of precision and recall between human annotation and model output for Entailment, Contradiction, Neutral, and Unaligned in terms of word indexes. Details of human annotation and evaluation metrics are shown in Appendix B. + +The inter-annotator agreement is presented in Table 2 in comparison with model performance (detailed in the next part). Here, we compute the agreement by treating one annotator as the ground truth and another as the system output; the score is averaged among all annotator pairs. As seen, humans generally achieve high agreement with each other, whereas model performance is relatively low. This shows that our task and metrics are well-defined, yet phrasal logical reasoning is a challenging task for machine learning models. + +Textual explanation generation was evaluated on the e-SNLI dataset (Camburu et al., 2018), which extends the SNLI dataset with one reference explanation for each training sample, and three reference explanations for each validation or test sample. Each reference explanation comes with highlighted rationales, a set of annotated words in the premise or hypothesis considered as the reason for the explanation annotation. We do not use these highlighted rationales, but enhance the neural model with EPR output for textual explanation generation. We follow previous work (Camburu et al., 2018; Narang et al., 2020), adopting BLEU (Papineni et al., 2002) and SacreBLEU (Post, 2018) scores as the evaluation metrics; they mainly differ in the tokenizer. Camburu et al. (2018) also report low consistency of the third annotated reference, and thus use only two references for evaluation. In our study, we consider both two-reference and three-reference BLEU/SacreBLEU. Appendix A.2 provides additional implementation details of textual explanation generation. + +# 4.2 RESULTS + +Phrasal Reasoning Performance. To the best of our knowledge, phrasal reasoning for NLI was not explicitly evaluated in previous literature. Therefore, we propose plausible extensions to previous studies as our baselines. We consider the study of Neural Natural Logic (NNL, Feng et al., 2020) as the first baseline. It applies an attention mechanism (Parikh et al., 2016), so that each word in the hypothesis is softly aligned with the words in the premise. Then, each word in the hypothesis is predicted with one of the seven natural logic relations proposed by MacCartney & Manning (2009). We consider the maximum attention score as the alignment, and map their seven natural logic relations to our three-category NLI labels: Equivalence, ForwardEntailment $\mapsto$ Entailment; Negation, Alternation $\mapsto$ Contradiction; and ReverseEntailment, Cover, Independence $\mapsto$ Neutral. + +Table 2 shows that the word-level NNL approach cannot perform meaningful phrasal reasoning, although our metrics have already excluded explicit evaluation of phrases. The low performance is because their soft attention leads to many misalignments, whereas their seven-category logical relations are too fine-grained and cause complications in weakly supervised reasoning. In addition, NNL does not allow unaligned words in the hypothesis, showing that such a model is inadequate for NLI reasoning. By contrast, our EPR model extracts phrases of meaningful semantic units, being an appropriate granularity of logical reasoning. Moreover, we work with three-category NLI labels following the sentence-level NLI task formulation. This actually restricts the model's capacity, forcing the model to perform serious phrasal reasoning. + +Table 3: Results of ablation studies on SNLI. + +
ModelFeaturesSent AccReasoning Performance
FEFCFNFUPFUHGMAM
Full modelLocal76.33±0.4883.11±0.2938.73±0.8544.63±0.8876.6151.8056.39±0.4358.98±0.34
Global84.03±0.1270.84±0.6035.12±0.9036.37±1.5276.6151.8051.41±0.6254.15±0.41
Concat84.53±0.1973.29±0.6837.95±1.1640.56±1.1076.6151.8053.73±0.3956.04±0.33
Random chunkerLocal72.4463.2122.6532.0465.9436.1340.5343.99
Global82.8158.0930.6427.4965.9436.1341.0543.66
Concat83.0958.7532.4131.1465.9436.1342.6644.87
Semantic role labelingLocal71.1073.7929.3928.9970.1943.1145.2749.09
Global82.8160.1432.0730.4870.1943.1144.6747.20
Concat83.1161.6431.7628.3370.1943.1144.1547.01
Random alignmentLocal68.5259.3221.7926.2051.4316.5031.0235.05
Global81.9953.8535.1031.3951.4316.5034.7137.66
Concat82.4957.2234.8330.9151.4316.5034.9738.18
Mean inductionLocal79.6177.3837.1436.1376.6151.8052.8455.81
Global83.8255.0829.9224.7076.6151.8043.8247.62
Concat84.9657.1231.9331.4176.6151.8046.9249.77
+ +In addition, we include another intuitive SBERT-based competing model for comparison. We first apply our own heuristics of phrase detection and alignment (thus, the model will have the same $F_{\mathsf{UP}}$ and $F_{\mathsf{UH}}$ scores); then, we directly train the phrasal NLI predictor by sentence-level labels. We obtain the sentence NLI prediction by taking argmax over Eq. (7). We call this STP (Sentence label Training Phrases). As seen, STP provides some meaningful phrasal reasoning results, because the training can smooth out the noise of phrasal labels, which are directly set as the sentence-level labels. But still, its performance is significantly lower than our EPR model. + +We experimented with a baseline of few-shot prompting with GPT-3 (Brown et al., 2020), and the implementation detail is shown in Appendix A.2. We see that GPT-3 is able to provide more or less meaningful reasoning, and surprisingly the contradiction $F$ -score is higher than all competing methods. However, the overall mean $F$ scores are much lower. The results show that phrasal reasoning is challenging for pretrained language models, highlighting the importance of our task formulation and the proposed EPR approach even in the prompting era. + +Among our EPR variants, we see that EPR with local phrase embeddings achieves the highest reasoning performance, and that EPR with concatenated features achieves a good balance between sentence-level accuracy and reasoning. Our EPR variants were run 5 times with different initialization, and standard deviations are also reported in Table 3. As seen, our improvement compared with the best baseline is around 9.1-10.7 times the standard deviation in mean $F$ scores, which is a large margin. Suppose the $F$ scores are Gaussian distributed, $^{4}$ the improvement is also statistically significant ( $p$ -value $< 4.5\mathrm{e} - 20$ comparing our worse variant with the best competing model by one-sided test). + +We further compare our EPR with non-reasoning models (Wang & Jiang, 2016; Radford et al., 2018), which are unable to provide phrasal explanations but may or may not achieve high sentence accuracy. The results show that our phrasal EPR model hurts the sentence-level accuracy by 2-4 points, when the model architecture is controlled. This resonates with traditional symbolic AI approaches (MacCartney & Manning, 2008), where interpretable models may not outperform black-box neural networks. Nevertheless, our sentence-level accuracy is still decent, outperforming a few classic neural models, including fuzzy logic applied to sentence embeddings (Mahabadi et al., 2020). + +Analysis. We consider several ablated models to verify the effect of every component in our EPR model. (1) Random chunker, which splits the sentence randomly based on the number of chunks detected by our system. (2) Random aligner, which randomly aligns phrases but keeps the number of aligned phrases unchanged. (3) Semantic role labeling, which uses the semantic roles, detected by AllenNLP (Gardner et al., 2018), as the reasoning unit. (4) Mean induction, which induces the sentence NLI label by the geometric mean of phrasal NLI prediction. In addition, we consider local phrase embedding features, global features, and their concatenation for the above model variants. Due to a large number of settings, each variant was run only once; we do not view this as a concern because Table 2 shows a low variance of our approach. Also, the underlying language model is un-finetuned in our ablation study, as it yields slightly lower performance but is much more efficient. + +As seen in Table 3, the random chunker and aligner yield poor phrasal reasoning performance, showing that working with meaningful semantic units and their alignments is important to logical reasoning. This also verifies that our word index-based metrics are able to evaluate phrase detection + +and alignment in an implicit manner. We further applied semantic role labeling as our reasoning unit. We find its performance is higher than the random chunker but lower than our method. This is because semantic role labeling is verb-centric, and the extracted spans may be incomplete. + +Interestingly, local features yield higher reasoning performance, but global and concatenated features yield higher sentence accuracy. This is because global features provide aggregated information of the entire sentence and allow the model to bypass meaningful reasoning. In the variant of the mean induction, for example, the phrasal predictor can simply learn to predict the sentence-level label with global sentence information; then, the mean induction is an ensemble of multiple predictors. In this way, it achieves the highest sentence accuracy (0.43 points higher than our full model with concatenated features), but is 6 points lower in reasoning performance. + +This reminds us of the debate between old schools of AI (Chandrasekaran et al., 1988; Boucher & Dienes, 2003; Goel, 2022). Recent deep learning models take the connectionists' view, and generally outperform symbolists' approaches in terms of the ultimate prediction, but they lack expressible explanations. Combining neural and symbolic methods becomes a hot direction in recent AI research (Liang et al., 2017; Dong et al., 2018; Yi et al., 2018). In general, our EPR model with global features achieves high performance in both reasoning and ultimate prediction for the NLI task. + +Results of Textual Explanation Generation. In this part, we apply EPR's predicted output—phrasal logical relationships—as factual knowledge to textual explanation generation. Most previous studies use the groundtruth sentence-level NLI label and/or highlighted rationales. This requires human annotations, which are resource-consuming to obtain. By contrast, we require no extra human-annotated resources; our factual knowledge is based on our weakly supervised reasoning approach. + +Table 4: Textual explanation results on e-SNLI. Previous work uses auxiliary information (L: the groundtruth NLI label; H: human-annotated highlights), but we use neither. ${}^{ \dagger }$ Quoted from respective papers. ${}^{ \ddagger }$ Evaluated by checkpoints. ${}^{\parallel }$ Our replication with provided code. + +
ModelInfoBLEUSacreBLEU
LH2 refs3 refs2 refs3 refs
Camburu et al. (2018)†--27.58---
NILE (Kumar & Talukdar, 2020)∥-28.5737.7332.5141.78
NILE (Kumar & Talukdar, 2020)‡-28.6737.8432.7442.06
FinetunedWT5220M (Narang et al., 2020)†---32.40-
FinetunedWT511B (Narang et al., 2020)†---33.70-
LIREx (Zhao & Vydiswaran, 2021)∥17.2222.4021.2426.68
Finetune T560M--27.7536.7831.7440.89
+ Annotated Highlights64M27.9136.9032.2041.21
+ EPR Outputs64M (ours)--29.9138.3033.9642.63
+ +Table 4 shows our explanation generation performance on e-SNLI. Since evaluation metrics are not consistently used for explanation generation in previous studies, we replicate the approaches when the code or checkpoint is available. For large pretrained models, we quote results from the previous paper (Narang et al., 2020). Their model is called WT5, having 220M or 11B parameters depending on the underlying T5 model. Profoundly, we achieve higher performance with 60M-parameter T5-small, which is $3.3\mathrm{x}$ and $170\mathrm{x}$ smaller in model size than the two WT5 variants. + +In addition, we conducted a controlled experiment using the rationale highlights annotated by Camburu et al. (2018) for e-SNLI. It achieves a relatively small increase of 0.2-0.5 BLEU points, whereas our EPR's outputs yield a 2-point improvement. The difference in the performance gains shows that our EPR's phrasal logical relationships provide more valuable information than human-annotated highlights. In general, we achieve a new state of the art on e-SNLI with a small language model, demonstrating the importance of phrasal reasoning in textual explanations. + +Additional Results. We show additional results as appendices. § C.1: Reasoning performance on the MNLI dataset; § C.2: Error analysis; § C.3: Case studies of our EPR model; and § C.4: Case studies of textual explanation generation. + +Conclusion. The paper proposes an explainable phrasal reasoning (EPR) model for NLI with neural fuzzy logic, trained in a weakly supervised manner. We further propose an experimental design, including data annotation, evaluation metrics, and plausible baselines. Results show that phrasal reasoning for NLI is a meaningfully defined task, as humans can achieve high agreement. Our EPR achieves decent sentence-level accuracy, but much higher reasoning performance than all competing models. We also achieve a new state-of-the-art performance on e-SNLI textual explanation generation by applying EPR's phrasal logical relationships. + +# REFERENCES + +Islam Beltagy, Stephen Roller, Pengxiang Cheng, Katrin Erk, and Raymond J Mooney. Representing meaning with a combination of logical and distributional models. Computational Linguistics, pp. 763-808, 2016. URL https://aclanthology.org/J16-4007/. +Luke Boucher and Zoltán Dienes. Two ways of learning associations. Cognitive Science, 27(6):807-842, 2003. URL https://www.sciencedirect.com/science/article/pii/S0364021303000715. +Samuel Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. A large annotated corpus for learning natural language inference. In EMNLP, pp. 632-642, 2015. URL https://aclanthology.org/D15-1075. +Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In NeurIPS, pp. 1877-1901, 2020. URL https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf. +Oana-Maria Camburu, Tim Rocttäschel, Thomas Lukasiewicz, and Phil Blunsom. eSNLI: Natural language inference with natural language explanations. In NeurIPS, pp. 9539-9549, 2018. URL https://proceedings.neurips.cc/paper/2018/bit/4c7a167bb329bd92580a99ce422d6fa6-Abstract.html. +Balakrishnan Chandrasekaran, Askhok Goel, and Dean Allemang. Connectionism and information processing abstractions. AI Magazine, 9(4):24-24, 1988. URL https://ojs.aaaai.org/index.php/imagazine/article/view/951. +Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. Enhanced LSTM for natural language inference. In ACL, pp. 1657-1668, 2017. URL https://aclanthology.org/P17-1152/. +Zeming Chen, Qiyue Gao, and Lawrence S Moss. NeuralLog: Natural language inference with joint neural and logical reasoning. arXiv preprint arXiv:2105.14167, 2021. URL https://arxiv.org/abs/2105.14167. +Anup Anand Deshmukh, Qianqiu Zhang, Ming Li, Jimmy Lin, and Lili Mou. Unsupervised chunking as syntactic structure induction with a knowledge-transfer approach. In Findings of EMNLP, pp. 3626-3634, 2021. URL https://aclanthology.org/2021.findings-emnlp.307. +Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In *NAACL-HLT*, pp. 4171–4186, 2019. URL https://aclanthology.org/N19-1423. +Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah Smith. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. arXiv preprint arXiv:2002.06305, 2020. URL https://arxiv.org/abs/2002.06305. +Honghua Dong, Jiayuan Mao, Tian Lin, Chong Wang, Lihong Li, and Denny Zhou. Neural logic machines. In ICLR, 2018. URL https://openreview.net/forum?id=B1xY-hRctX. +Yufei Feng, Quan Liu, Michael Greenspan, Xiaodan Zhu, et al. Exploring end-to-end differentiable natural logic modeling. In COLING, pp. 1172-1185, 2020. URL https://aclanthology.org/2020.coling-main.101. +Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson F. Liu, Matthew Peters, Michael Schmitz, and Luke Zettlemoyer. AllenNLP: A deep semantic natural language processing platform. In Proc. Workshop for NLP Open Source Software (NLP-OSS), pp. 1-6, 2018. URL https://aclanthology.org/W18-2501. + +Ashok Goel. Looking back, looking ahead: Symbolic versus connectionist AI. AI Magazine, 42(4): 83-85, 2022. URL https://ojs.aaii.org/index.php/aimagazine/article/view/15111. +John Hewitt and Christopher D Manning. A structural probe for finding syntax in word representations. In NAACL-HLT, pp. 4129-4138, 2019. URL https://aclanthology.org/N19-1419. +Hai Hu, Qi Chen, Kyle Richardson, Atreyee Mukherjee, Lawrence S Moss, and Sandra Kübler. MonaLog: A lightweight system for natural language inference based on monotonicity. In Proc. Society for Computation in Linguistics, pp. 284-293, 2020. URL https://aclanthology.org/2020.scil-1.40/. +Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with Gumbel-softmax. In ICLR, 2017. URL https://openreview.net/forum?id=rkE3y85ee. +Zhongtao Jiang, Yanzhe Zhang, Zhao Yang, Jun Zhao, and Kang Liu. Alignment rationale for natural language inference. In ACL-IJCNLP, pp. 5372-5387, 2021. URL https://aclanthology.org/2021.acl-long.417/. +Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. Dense passage retrieval for open-domain question answering. In EMNLP, pp. 6769-6781, 2020. URL https://aclanthology.org/2020.emnlp-main.550/. +Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. URL https://arxiv.org/abs/1412.6980. +Sawan Kumar and Partha Talukdar. NILE: Natural language inference with faithful natural language explanations. In ACL, pp. 8730-8742, 2020. URL https://aclanthology.org/2020.acl-main.771. +Tao Lei, Regina Barzilay, and Tommi Jaakkola. Rationalizing neural predictions. In EMNLP, pp. 107-117, 2016. URL https://aclanthology.org/D16-1011/. +Bowen Li, Lili Mou, and Frank Keller. An imitation learning approach to unsupervised parsing. In ACL, pp. 3485-3492, 2019. URL https://aclanthology.org/P19-1338. +Chen Liang, Jonathan Berant, Quoc Le, Kenneth Forbus, and Ni Lao. Neural symbolic machines: Learning semantic parsers on Freebase with weak supervision. In ACL, pp. 23-33, 2017. URL https://aclanthology.org/P17-1003/. +Xianggen Liu, Lili Mou, Haotian Cui, Zhengdong Lu, and Sen Song. Jumper: Learning when to make classification decisions in reading. In *IJCAI*, pp. 4237-4243, 2018. URL https://www.ijcai.org/proceedings/2018/0589.pdf. +Yang Liu and Mirella Lapata. Text summarization with pretrained encoders. In EMNLP-IJCNLP, pp. 3730-3740, 2019. URL https://aclanthology.org/D19-1387/. +Zhengdong Lu, Xianggen Liu, Haotian Cui, Yukun Yan, and Daqi Zheng. Object-oriented neural programming (OONP) for document understanding. In ACL, pp. 2717-2726, 2018. URL https://aclanthology.org/P18-1253. +Bill MacCartney and Christopher D Manning. Natural logic for textual inference. In Proc. ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 193-200, 2007. URL https://aclanthology.org/W07-1431/. +Bill MacCartney and Christopher D. Manning. Modeling semantic containment and exclusion in natural language inference. In *COLING*, pp. 521-528, 2008. URL https://aclanthology.org/C08-1066. +Bill MacCartney and Christopher D Manning. An extended model of natural logic. In Proc. International Conference on Computational Semantics, pp. 140-156, 2009. URL https://aclanthology.org/W09-3714. +Bill MacCartney, Michel Galley, and Christopher D Manning. A phrase-based alignment model for natural language inference. In EMNLP, pp. 802-811, 2008. URL https://aclanthology.org/D08-1084. + +Rabeeh Karimi Mahabadi, Florian Mai, and James Henderson. Learning entailment-based sentence embeddings from natural language inference. Online Manuscript, 2020. URL https://openreview.net/forum?id=BkxackSKvH. +Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B. Tenenbaum, and Jiajun Wu. The neurosymbolic concept learner: Interpreting scenes, words, and sentences from natural supervision. In ICLR, 2019. URL https://openreview.net/forum?id=rJgM1hRctm. +Lili Mou, Rui Men, Ge Li, Yan Xu, Lu Zhang, Rui Yan, and Zhi Jin. Natural language inference by tree-based convolution and heuristic matching. In ACL, pp. 130-136, 2016. URL https://aclanthology.org/P16-2022. +Lili Mou, Zhengdong Lu, Hang Li, and Zhi Jin. Coupling distributed and symbolic execution for natural language queries. In ICML, pp. 2518-2526, 2017. URL https://proceedings.mlrpress/v70/mou17a.html. +Sharan Narang, Colin Raffel, Katherine Lee, Adam Roberts, Noah Fiedel, and Karishma Malkan. WT5?! Training text-to-text models to explain their predictions. arXiv preprint arXiv:2004.14546, 2020. URL https://arxiv.org/abs/2004.14546. +Ken Nozaki, Hisao Ishibuchi, and Hideo Tanaka. A simple but powerful heuristic method for generating fuzzy rules from numerical data. Fuzzy Sets and Systems, 86(3):251-270, 1997. URL https://www.sciencedirect.com/science/article/abs/pii/0165011495004130. +Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. BLEU: A method for automatic evaluation of machine translation. In ACL, pp. 311-318, 2002. URL https://aclanthology.org/P02-1040. +Ankur Parikh, Oscar Täckström, Dipanjan Das, and Jakob Uszkoreit. A decomposable attention model for natural language inference. In EMNLP, pp. 2249-2255, 2016. URL https://aclanthology.org/D16-1244/. +Matt Post. A call for clarity in reporting BLEU scores. In Proc. Conference on Machine Translation: Research Papers, pp. 186-191, 2018. URL https://aclanthology.org/W18-6319. +Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. OpenAI Blog, 2018. URL https://cdn.openai.com/research-covers/language-unsupervised/language understands_paper.pdf. +Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In EMNLP, 2019. URL https://aclanthology.org/D19-1410. +Cynthia Rudin. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1(5):206-215, 2019. URL https://www.nature.com/articles/s42256-019-0048-x. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, pp. 5998-6008, 2017. URL https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf. +Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In ICLR, 2019. URL https://openreview.net/forum?id=rJ4km2R5t7. +Shuohang Wang and Jing Jiang. Learning natural language inference with LSTM. In NAACL-HLT, pp. 1442-1451, 2016. URL https://aclanthology.org/N16-1170/. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed H. Chi, Quoc V Le, and Denny Zhou. Chain of thought prompting elicits reasoning in large language models. In NeurlPS, 2022. URL https://openreview.net/forum?id=._VjQlMeSB_J. + +Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In *NAACL-HLT*, pp. 1112–1122, 2018. URL https://aclanthology.org/N18-1101. +Wenhan Xiong, Thien Hoang, and William Yang Wang. DeepPath: A reinforcement learning method for knowledge graph reasoning. In EMNLP, pp. 564-573, 2017. URL https://aclanthology.org/D17-1060/. +Hitomi Yanaka, Koji Mineshima, Daisuke Bekki, Kentaro Inui, Satoshi Sekine, Lasha Abzianidze, and Johan Bos. HELP: A dataset for identifying shortcomings of neural models in monotonicity reasoning. In Proc. Conference on Lexical and Computational Semantics, pp. 250-255, 2019a. URL https://aclanthology.org/S19-1027. +Hitomi Yanaka, Koji Mineshima, Daisuke Bekki, Kentaro Inui, Satoshi Sekine, Lasha Abzianidze, and Johan Bos. Can neural networks understand monotonicity reasoning? In ACL BlackboxNLP Workshop, pp. 31-40, 2019b. URL https://aclanthology.org/W19-4804. +Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Josh Tenenbaum. Neural-symbolic VQA: Disentangling reasoning from vision and language understanding. In NeurIPS, 2018. URL https://proceedings.neurips.cc/paper/2018/file/5e388103a391daabe3de1d76a6739ccd-Paper.pdf. +Deunsol Yoon, Dongbok Lee, and SangKeun Lee. Dynamic self-attention: Computing attention over words dynamically for sentence embedding. arXiv preprint arXiv:1808.07383, 2018. URL https://arxiv.org/abs/1808.07383. +Lotfi A Zadeh. Fuzzy logic. Computer, 21(4):83-93, 1988. URL https://ieeexplore.ieee.org/abstract/document/53. +Lotfi A Zadeh. Fuzzy sets. In *Fuzzy Sets, Fuzzy Logic, and Fuzzy Systems*, pp. 394-432. World Scientific, 1996. URL https://www.worldscientific.com/doi/abs/10.1142/9789814261302_0021. +Zhuosheng Zhang, Yuwei Wu, Hai Zhao, Zuchao Li, Shuailiang Zhang, Xi Zhou, and Xiang Zhou. Semantics-aware BERT for language understanding. In AAAI, pp. 9628-9635, 2020. URL https://ojs.aaai.org/index.php/AAAI/article/view/6510. +Xinyan Zhao and V.G.Vinod Vydiswaran. LIREx: Augmenting language inference with relevant explanations. In AAAI, pp. 14532-14539, 2021. URL https://ojs.aaai.org/index.php/AAAI/article/view/17708. + +# A IMPLEMENTATION DETAILS + +# A.1 PHRASE DETECTION + +We present more details about our phrase detection. We use $\mathrm{SpaCy}^5$ to obtain the part-of-speech (POS) tag $^6$ of every word. SpaCy also tags noun phrases. However, if a noun phrase follows a preposition (with a fine-grained POS tag being IN), we remove it from noun phrases but tag it as a prepositional phrase. + +In addition, we extract verbs by the POS tag VERB. A verb may be followed by a particle with the fine-grained POS tag being RP (e.g., show off). It is treated as a verb phrase. In order to handle negation, we allow optional AUX NOT before a verb, (e.g., could not help). This, however, only counts less than $1\%$ in the dataset, and does not affect our model much. + +To capture other potential semantic units, we treat remaining open class words7 as individual phrases. Finally, the remaining non-content words (in the categories of closed words and others) are discarded (e.g., "there is"). This is appropriate, because they do not represent meaningful semantics or play a + +Table 5: Our rules for phrase detection. "["] means the item is optional. + +
Example: The woman is showing off her blue dog at the playground.
NumberPhrase typeRuleExtracted phrase(s)
1Prepositional phraseIN + NPat the playground
2Noun phraseNPThe woman|her blue dog
3Verb phrase[AUX] + [NOT] + VERB + [RP]is showing off
4OthersOther open class words-
+ +![](images/8248aa4102f7171ad75b057337f1e3f4e19a75b3822c2d4ea449260d0811d919.jpg) +Figure 4: Results of tuning the coefficient of global features. + +![](images/0103103fde1e521ef699f5f4acd134e4a8292249cc3d31cc07a1f2ae43aa5200.jpg) + +role in reasoning. Table 5 summarizes all the rules used in our approach. They are executed in order and extracted phrases are exclusive. For example, the playground in the phrase at the playground will not be treated as a standalone noun phrase, as it is already part of a prepositional phrase. + +Empirically, our rule-based approach works well for the NLI dataset, and our logical reasoning is at the granularity of the extracted phrases. + +# A.2 SETTINGS + +Details of the EPR Model. We chose the pretrained model a11-mpnet-base- $v2^8$ from the SentenceBERT study (Reimers & Gurevych, 2019) and obtained 768-dimensional local and global phrase embeddings. Our MLP had the same dimension as the embeddings, i.e., 768D for the local and global variants, or 1536D for the concatenation variant. We chose the coefficient for the global feature in Eq. (1) from a candidate set of $\{0.0, 0.2, 0.4, 0.6, 0.8, 1.0\}$ . Figure 4 shows the hyperparameter tuning results on SNLI (mentioned in § 4.2) and MNLI (to be discussed in § C.1). We find that 0.4 yields the best sentence accuracy in SNLI, and that 1.0 is the best for MNLI. As our focus is on reasoning, we set the coefficient to be 0.6, because it yields the highest phrasal reasoning performance and decent sentence-level performance for both experiments and in terms of both geometric mean and arithmetic mean of $F$ scores. The pretrained language model (LM) was either finetuned or un-finetuned during training. Finetuning yields higher performance (Table 2), whereas un-finetuned LM is more efficient for in-depth analyses (Table 3). We trained the model with a batch size of 256. We used Adam (Kingma & Ba, 2015) with a learning rate of 5e-5, $\beta_1 = 0.9$ , $\beta_2 = 0.999$ , learning rate warm up over the first 10 percent of the total steps, and linear decay of the learning rate. The model was trained up to 3 epochs, following the common practice (Dodge et al., 2020). Our main model variants were trained 5 times with different parameter initializations, and we report the mean and standard deviation. + +Details of Textual Explanation Generation. We used the pretrained T5-small model for finetuning with a batch size of 32. The optimizer was Adam with an initial learning rate of 3e-4, $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ , learning rate warm-up for the first 2 epochs, and linear decay of the learning rate up to 10 + +![](images/1084e0ae9a63b49c6777536940cc338080d3b2b18180e7db18d99603a3053c0d.jpg) +Figure 5: The prompt for phrasal reasoning. + +epochs; then we decreased the learning rate to 3e-6 and trained the model until the validation BLEU score did not increase for 2 epochs. + +Details of the Prompting Baseline. We adopted the GPT-3 (the text-davinci-003 version with 175B parameters) (Brown et al., 2020) as a prompting baseline to demonstrate large language models (LLMs)' phrasal reasoning ability. + +We consider exemplar-based prompting, because it is unlikely for an LLM to output structured reasoning results in a zero-shot manner. Moreover, our examples are chosen to cover all reasoning cases. We also set the temperature of decoding to 0 to obtain deterministic reasoning, following CoT prompting (Wei et al., 2022). Rule-based post-processing was applied to extract slot values. Figure 5 presents the prompt used for phrasal reasoning. + +# B DATA ANNOTATION AND REASONING EVALUATION METRICS + +Previous studies have not explicitly evaluated reasoning performance. Typically, they resort to sentence-level classification accuracy (Wang & Jiang, 2016; Mahabadi et al., 2020) or case studies (Parikh et al., 2016; Feng et al., 2020) to demonstrate the effectiveness of their alleged interpretable models, which we believe is inadequate. + +Therefore, we annotated a model-agnostic corpus about phrasal logical relationships and developed a set of metrics to evaluate the phrasal reasoning performance quantitatively. The resources are released on our website (Footnote 1) to facilitate future research. + +# B.1 DATA ANNOTATION + +We annotated the phrases and their logical relationships in a data sample. The annotators were asked to select corresponding phrases from both premise and hypothesis, and label them as either Entailment, Contradiction, or Neutral, with the sentence-level NLI label being given. Annotators could also select a phrase from either a premise or a hypothesis and label it as Unaligned. The process can be repeated until all phrases are labeled for a data sample. Figure 6 shows a screenshot of our annotation page. In the left panel, the annotator could select phrases in the two sentences and mark them with NLI labels. The annotator can view a sample's annotated phrases in the right panel and navigate through different samples. + +The annotation was performed by three in-lab researchers who are familiar with the NLI task. Our preliminary study shows low agreement when the annotators are unfamiliar with the task; thus it is inappropriate to recruit Mechanical Turks for annotation. We randomly selected 100 samples for annotation, following previous work on the textual explanation for SNLI (Camburu et al., 2018), + +![](images/b4fe9d3be55d6fd2a0483619e745b2fe18ac33cf34df0d009798b3d99827c502.jpg) +Figure 6: A screenshot of the annotation page. + +![](images/6b790ebae135bb16bcb093f146fe34e93ccaf5d0c14c254e08af31be87a70402.jpg) + +Table 6: Examples illustrating the proposed metrics, where we consider the Entailment category. "|" refers to a phrase segmentation. + +
Example annotation of entailment (in highlight): Premise: A kid in red is playing in a garden. Hypothesis: A child in red is watching TV in the bedroom.
#Example OutputPE(P)PE(H)PERE(P)RE(H)REFEExplanation
1PH in a garden0000000Although in occurs in the annotation, the word indexes are different. The reasoning is wrong.
2PH watching TV1001000Mis-matched phrases in hypothesis. The reasoning is wrong.
3PH a kid | in red1111111All word indexes match the annotation. The reasoning is correct.
+ +which is adequate to show statistical significance. Since our annotation only concerns data samples, it is agnostic to any machine learning model. + +# B.2 EVALUATION METRICS FOR PHRASAL REASONING + +We propose a set of $F$ -scores in Entailment, Contradiction, Neutral, and Unaligned to quantitatively evaluate the phrasal reasoning performance. We first introduce our metric for one data sample and then explain the extension to a corpus. + +Consider the Entailment category as an example. We first count the number of "hits" (true positives) between the word indexes of model output and annotation. Using word indexes (instead of words) rules out hitting the words in misaligned phrases (Example 1, Table 6). Then, we calculate precision scores for the premise and hypothesis, denoted by $P_{\mathsf{E}}^{(P)}$ and $P_{\mathsf{E}}^{(H)}$ , respectively. Their geometric mean $P_{\mathsf{E}} = (P_{\mathsf{E}}^{(P)}P_{\mathsf{E}}^{(H)})^{1 / 2}$ is considered as the precision for Entailment. Here, the geometric mean rules out incorrect reasoning that hits either the premise or hypothesis, but not both (Example 2, Table 6). Further, we compute the recall score $R_{\mathsf{E}}$ in a similar way, and finally obtain the $F$ -score by $F_{\mathsf{E}} = \frac{2P_{\mathsf{E}}R_{\mathsf{E}}}{P_{\mathsf{E}} + R_{\mathsf{E}}}$ . Likewise, $F_{\mathsf{C}}$ and $F_{\mathsf{N}}$ are calculated for Contradiction and Neutral. In addition, we compute the $F$ -score for unaligned phrases in premise and hypothesis, denoted by $F_{\mathsf{UP}}$ and $F_{\mathsf{UH}}$ , respectively. + +When calculating our $F$ -scores for a corpus, we use micro-average, i.e., the precision and recall ratios are calculated in the corpus level. This is more stable, especially considering the varying lengths of sentences. Moreover, we compare model output against three annotators and perform an arithmetic average, further reducing the variance caused by ambiguity. + +Table 7: Results on MNLI. †Quoted from respective papers. ‡Our replication. + +
ModelSent AccReasoning Performance
FEFCFUPFUHGMAM
Human-85.1573.4473.1846.3167.8569.52
Non-reasoning methods
Mahabadi et al. (2020)†73.8------
LSTM (Wang et al., 2019)†72.2------
Transformer (Radford et al., 2018)82.1------
Reasoning methods
NNL (Feng et al., 2020)‡61.2850.3332.0049.780.000.0033.03
STP75.1555.4751.7264.3237.5751.3152.27
EPR (Concat, LM finetuned)79.65±0.1961.76±0.3252.09±0.4164.3237.5752.80±0.0753.93±0.07
+ +It should be emphasized that our metrics evaluate phrase detection and alignment in an implicit manner. A poor phrase detector and aligner will result in a low reasoning score (shown in our ablation study), but we do not explicitly calculate phrase detection and alignment accuracy. This helps us cope with the ambiguity of the phrase granularity (Example 3, Table 6). + +To summarize, we propose an evaluation framework including data annotation (§ B.1) and evaluation metrics (§ B.2). These are our contributions in formulating the phrasal reasoning task for NLI. + +# C ADDITIONAL RESULTS + +# C.1 RESULTS ON MNLI + +In this appendix, we provide additional results on the matched section of the MNLI dataset (Williams et al., 2018), which consists of 393K training samples, 10K validation samples, and another 10K test samples. It has the same format as the SNLI dataset, but samples come from multiple domains and are more diverse. We follow § 4.1 and use the same protocol to create the phrasal reasoning annotation for the MNLI dataset based on 100 randomly selected samples. However, we found that MNLI is much noisier than SNLI; particularly, the sentences labeled as Neutral in MNLI share few related phrases. For example, the two sentences do not have much in common in the sample "Premise: If you still want to join, it might be worked." and "Hypothesis: Your membership is the only way that this could work". Moreover, the inter-human agreement is low in the Neutral category. Therefore, we believe the corpus quality is less satisfactory for Neutral. To ensure meaningful evaluation, we ignored the evaluation of Neutral in this experiment, although our reasoning approach is not changed. The remaining 60 samples containing Entailment and Contradiction serve as the MNLI phrasal reasoning corpus. + +We consider the EPR variant with concatenated local and global features, since the SNLI experiment shows it achieves a good balance between sentence-level accuracy and reasoning. Our models were run 5 times with different initializations. + +As seen in Table 7, our EPR approach is again worse than humans, but largely improves the reasoning performance compared with NNL and STP baselines. Its sentence-level prediction is comparable to (although slightly lower than) finetuning Transformers. The results are highly consistent with SNLI experiments, showing the robustness of our approach. + +It is important to notice that the EPR model here is trained on MNLI sentence labels, and is not transferred from the SNLI dataset. In our preliminary experiments, we tried transfer learning from SNLI to MNLI and failed to obtain satisfactory performance. We found that our EPR is more prone to the out-of-vocabulary issue (i.e., it does not predict well for the phrases in the new domain), whereas a black-box neural network may learn biased sentence patterns and achieve higher performance in transfer learning. + +# C.2 ERROR ANALYSIS + +To show how phrasal reasoning affects sentence-level prediction, we perform an error analysis in Table 8. Specifically, we examine the reasoning performance (arithmetic mean of $F$ -scores) when the sentence label is correctly and incorrectly predicted on the SNLI dataset. As shown, EPR models + +Table 8: Sentence-level prediction count and arithmetic average reasoning performance ( $F$ -score) when the sentence label is correctly and incorrectly predicted on the SNLI dataset. + +
Sentence-level predictionCount (in percentage)Reasoning performance (AMF)
Local finetunedConcat finetunedLocal finetunedConcat finetuned
Correct75.4±1.3687.8±0.7565.71±0.8358.68±0.67
Wrong24.6±1.3612.2±0.7540.74±2.0137.58±3.28
Overall100.0±0.00100.0±0.0059.93±0.6756.32±1.13
+ +
Groundtruth: Entailment Prediction: Entailment +Three young boys enjoying a day at the beach. +(a) +The boys are in the beach.Groundtruth: Contradiction Prediction: Contradiction +A man playing fetch with two brown dogs. +(b) +The dogs are asleep.Entailment +Contradiction +Neutral +Unaligned
Groundtruth: Neutral Prediction: Neutral +Walkers on a concrete boardwalk under a blue sky. +(c) +Walkers under a blue sky near the beach.Groundtruth: Entailment Prediction: Neutral +An elderly couple in heavy coats are looking at black and white photos displayed on a wall. +(d) +Octogenarians admiring the old photographs that decorated the wall.
+ +Figure 7: Examples of explainable phrasal reasoning predicted by our EPR model. Words in one color block are detected phrases, a dotted line shows the alignment of two phrases, and the color represents the predicted phrasal NLI label. In Example (d), EPR's prediction suggests the provided label in SNLI is incorrect. + +with both local and concatenated features have much higher reasoning performance when sentence labels are correctly predicted than incorrectly predicted. The positive correlation between phrasal reasoning performance and sentence-level accuracy shows our fuzzy logic induction rules indeed make sense. + +We also find that the model with local features has a higher reasoning performance than with concatenated features, even when the sentence-level prediction is wrong. This is because the local model is unaware of the context of the sentences. Thus, it must perform strict phrasal reasoning based on the induction rules, even if in this case the reasoning process is imperfect and leads to sentence-level errors. + +# C.3 CASE STUDY OF EPR + +We present case studies of EPR in Figure 7. Our EPR performs impressive reasoning for the NLI task, which is learned in a weakly supervised manner with only sentence-level labels. + +In Example (a), the two sentences are predicted Entailment because three young boys entails the boys and at the beach entails in the beach, whereas unaligned phrases enjoying and a day are allowed in the premise for Entailment. In Example (b), playing contradicts asleep, and the two sentences are also predicted Contradiction. Likewise, Example (c) is predicted Neutral because the aligned phrases on a concrete boardwalk and near the beach are neutral. + +In our study, we also find several interesting examples where EPR's reasoning provides clues suggesting that the target labels may be incorrect in the SNLI dataset. In Example (d), our model predicts Neutral for looking and admiring, as well as for at black and white photos and the old photographs. Thus, the two sentences are predicted Neutral instead of the provided label Entailment. We believe our model's reasoning and prediction are correct, because people looking at something may or may not admire it; a black-and-white photo may or may not be an old photo (as it could be a black-and-white artistic photo). + +# C.4 CASE STUDY OF THE TEXTUAL EXPLANATION GENERATION + +We conduct another case study to show how EPR's reasoning is used in the textual explanation generation task. As seen in Figure 8, our EPR reasoning yields structured factual tuples: on a deserted beach entailing at the beach, Some dogs contradicting only one dog, and running unaligned (matched with a special token [EMPTY]). Our explanation generation model attends to these factual tuples, and the heat map shows that our model gives the most attention weights (with an average of + +
Input Premise : Some dogs are running on a deserted beach. +Hypothesis : There is only one dog at the beach.
Label Contradiction (not used during our explanation generation)
EPR's Reasoning Output
Premise phraseHypothesis phraseEPR labelAttention score
on a deserted beachat the beachE23.16
Some dogsonly one dogC61.22
running[EMPTY]E15.62
Output explanation Some dogs is more than one dog.
Reference explanations: +(1) Some is more than one, therefore there can't be only one dog. +(2) Some indicates more than one dog. One dog is not some dogs. +(3) Some dogs are not one dog.
+ +![](images/7aad61fa82aba6c0d7d230334d58e9d5a183d8bb903f0c5c9b91377fcfef4dd7.jpg) +Figure 8: Case study of the textual explanation generation. The heat map shows the step-by-step and average attention weights to the factual tuples (vertical axis). + +0.61) to the tuple, Some dogs contradicting only one dog, to generate the explanation "Some dogs is more than one dog." This example illustrates that the factual tuples given by our EPR model provide meaningful information and can improve textual explanation generation. + +# D LIMITATION AND FUTURE WORK + +This paper performs phrase detection and alignment by heuristics. They work well empirically in our experiments, although further improvement is possible (for example, by considering syntactic structures). However, our main focus is neural fuzzy logic for weakly supervised reasoning. This largely differs from previous work based on manually designed lexicons and rules (Hu et al., 2020; Chen et al., 2021). + +Our long-term goal is to develop a weakly supervised, end-to-end trained neuro-symbolic system that can extract semantic units and perform reasoning for a given downstream NLP task. This paper is an important milestone toward the long-term goal. + +# E ETHICAL STATEMENTS + +Our work involves human annotation of the phrasal logical relationships. Since the research subject here is logic (rather than humans), there are minimal ethical concerns. We nevertheless followed a standard protocol of human evaluation (involving identity protection, and proper compensation), approved by our institutional ethics board. + +# ACKNOWLEDGMENTS + +We thank all reviewers and chairs for their valuable comments. The research is supported in part by the Natural Sciences and Engineering Research Council of Canada (NSERC) under Grant No. RGPIN2020-04465, the Amii Fellow Program, the Canada CIFAR AI Chair Program, a UAHJIC project, a donation from DeepMind, and the Digital Research Alliance of Canada (alliancecan.ca). Atharva Naik contributed to the research as an intern at the University of Alberta through the Mitacs Globalink program. \ No newline at end of file diff --git a/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/images.zip b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..fefb623bf03a10c422ae1013c7eba9a73097e7c7 --- /dev/null +++ b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f31dd8ed05c77954ccd22f7f9551b42d0afda22c9ee115265e0729d592a496a5 +size 876410 diff --git a/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/layout.json b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..df5eb86be4c5643068da47b9285901ac4555b2bd --- /dev/null +++ b/2023/Weakly Supervised Explainable Phrasal Reasoning with Neural Fuzzy Logic/layout.json @@ -0,0 +1,12601 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 457, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 457, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 457, + 116 + ], + "type": "text", + "content": "WEAKLY SUPERVISED EXPLAINABLE PHRASAL REASONING WITH NEURAL FUZZY LOGIC" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 133, + 514, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 133, + 514, + 146 + ], + "spans": [ + { + "bbox": [ + 111, + 133, + 514, + 146 + ], + "type": "text", + "content": "Zijun " + }, + { + "bbox": [ + 111, + 133, + 514, + 146 + ], + "type": "inline_equation", + "content": "\\mathbf{W}\\mathbf{u}^{*1}" + }, + { + "bbox": [ + 111, + 133, + 514, + 146 + ], + "type": "text", + "content": " , Zi Xuan Zhang\\*, Atharva Naik+2, Zhijian Mei', Mauajama Firdaus', Lili Mou" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 146, + 506, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 146, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 111, + 146, + 506, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 111, + 146, + 506, + 158 + ], + "type": "text", + "content": "Dept. Computing Science & Alberta Machine Intelligence Institute (Amii), University of Alberta" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 158, + 229, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 158, + 229, + 170 + ], + "spans": [ + { + "bbox": [ + 112, + 158, + 229, + 170 + ], + "type": "text", + "content": "2Carnegie Mellon University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 170, + 402, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 170, + 402, + 181 + ], + "spans": [ + { + "bbox": [ + 112, + 170, + 402, + 181 + ], + "type": "text", + "content": "{zijun4, zixuan7, zimei1}@ualberta.ca, arnaik@cs.cmu.edu," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 112, + 181, + 309, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 181, + 309, + 191 + ], + "spans": [ + { + "bbox": [ + 112, + 181, + 309, + 191 + ], + "type": "text", + "content": "{mauzama.03, doublepower.mou}@gmail.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 191, + 389, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 191, + 389, + 203 + ], + "spans": [ + { + "bbox": [ + 112, + 191, + 389, + 203 + ], + "type": "text", + "content": "*Equal contribution, †Work done during the internship at UofA/Amii" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 276, + 232, + 335, + 244 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 232, + 335, + 244 + ], + "spans": [ + { + "bbox": [ + 276, + 232, + 335, + 244 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 256, + 471, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 256, + 471, + 389 + ], + "spans": [ + { + "bbox": [ + 140, + 256, + 471, + 389 + ], + "type": "text", + "content": "Natural language inference (NLI) aims to determine the logical relationship between two sentences, such as Entailment, Contradiction, and Neutral. In recent years, deep learning models have become a prevailing approach to NLI, but they lack interpretability and explainability. In this work, we address the explainability of NLI by weakly supervised logical reasoning, and propose an Explainable Phrasal Reasoning (EPR) approach. Our model first detects phrases as the semantic unit and aligns corresponding phrases in the two sentences. Then, the model predicts the NLI label for the aligned phrases, and induces the sentence label by fuzzy logic formulas. Our EPR is almost everywhere differentiable and thus the system can be trained end to end. In this way, we are able to provide explicit explanations of phrasal logical relationships in a weakly supervised manner. We further show that such reasoning results help textual explanation generation.1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 407, + 207, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 407, + 207, + 420 + ], + "spans": [ + { + "bbox": [ + 106, + 407, + 207, + 420 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 432, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 432, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 432, + 506, + 510 + ], + "type": "text", + "content": "Natural language inference (NLI) aims to determine the logical relationship between two sentences (called a premise and a hypothesis), and target labels include Entailment, Contradiction, and Neutral (Bowman et al., 2015; MacCartney & Manning, 2008). Figure 1 gives an example, where the hypothesis contradicts the premise. NLI is important to natural language processing, because it involves logical reasoning and is a key problem in artificial intelligence. Previous work shows that NLI can be used in various downstream tasks, such as information retrieval (Karpukhin et al., 2020) and text summarization (Liu & Lapata, 2019)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "content": "In recent years, deep learning has become a prevailing approach to NLI (Bowman et al., 2015; Mou et al., 2016; Wang & Jiang, 2016; Yoon et al., 2018). Especially, pretrained language models with the Transformer architecture (Vaswani et al., 2017) achieve state-of-the-art performance for the NLI task (Radford et al., 2018; Zhang et al., 2020). However, such deep learning models are black-box machinery and lack interpretability. In real applications, it is important to understand how these models make decisions (Rudin, 2019)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 586, + 507, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 507, + 687 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 507, + 687 + ], + "type": "text", + "content": "Several studies have addressed the explainability of NLI models. Camburu et al. (2018) generate a textual explanation by sequence-to-sequence supervised learning, in addition to NLI classification; such an approach is multi-task learning of text classification and generation, which does not perform reasoning itself. MacCartney et al. (2008) propose a scoring model to align related phrases; Parikh et al. (2016) and Jiang et al. (2021) propose to obtain alignment by attention mechanisms. However, they only provide correlation information, instead of logical reasoning. Other work incorporates upward and downward monotonicity entailment reasoning for NLI (Hu et al., 2020; Chen et al., 2021), but these approaches are based on hand-crafted rules (e.g., every downward entailing some) and are restricted to Entailment only; they cannot handle Contradiction or Neutral." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 691, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 691, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 691, + 506, + 715 + ], + "type": "text", + "content": "In this work, we address the explainability for NLI by weakly supervised phrasal logical reasoning. Our goal is to explain NLI predictions with phrasal logical relationships between the premise and" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 389, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 389, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 389, + 732 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 117, + 720, + 389, + 732 + ], + "type": "text", + "content": "Code and resources available at https://github.com/MANGA-UOFA/EPR" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "hypothesis. Intuitively, an NLI system with an explainable reasoning mechanism should be equipped with the following functionalities:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 110, + 505, + 188 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 107, + 110, + 505, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 110, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 107, + 110, + 505, + 133 + ], + "type": "text", + "content": "1. The system should be able to detect corresponding phrases and tell their logical relationship, e.g., several men contradicting one man, but pull in a fishing net entailing holding the net (Figure 1)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 133, + 504, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 133, + 504, + 155 + ], + "spans": [ + { + "bbox": [ + 107, + 133, + 504, + 155 + ], + "type": "text", + "content": "2. The system should be able to induce sentence labels from phrasal reasoning. In the example, the two sentences are contradictory because there exists one contradictory phrase pair." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 155, + 505, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 155, + 505, + 188 + ], + "spans": [ + { + "bbox": [ + 107, + 155, + 505, + 188 + ], + "type": "text", + "content": "3. More importantly, such reasoning should be trained in a weakly supervised manner, i.e., the phrase-level predictions are trained from sentence labels only. Otherwise, the reasoning mechanism degrades to multi-task learning, which requires massive fine-grained human annotations." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 193, + 297, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 297, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 297, + 313 + ], + "type": "text", + "content": "To this end, we propose an Explainable Phrasal Reasoning (EPR) approach to the NLI task. Our model obtains phrases as semantic units, and aligns corresponding phrases by embedding similarity. Then, we predict the NLI labels (namely, Entailment, Contradiction, and Neutral) for the aligned phrases. Finally, we propose to induce the sentence-level label from phrasal labels in a fuzzy logic manner (Zadeh, 1988; 1996). Our model is differentiable, and the phrasal reasoning component can be trained" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 305, + 195, + 504, + 277 + ], + "blocks": [ + { + "bbox": [ + 305, + 195, + 504, + 277 + ], + "lines": [ + { + "bbox": [ + 305, + 195, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 305, + 195, + 504, + 277 + ], + "type": "image", + "image_path": "36d8599dc69495cec1040aad3f195d48f60ea647a67099348cbb3ffd1e91bf76.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 281, + 504, + 304 + ], + "lines": [ + { + "bbox": [ + 302, + 281, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 302, + 281, + 504, + 304 + ], + "type": "text", + "content": "Figure 1: The natural language inference (NLI) task and desired phrasal reasoning." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 314, + 504, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 504, + 336 + ], + "type": "text", + "content": "with the weak supervision of sentence NLI labels. In this way, our EPR approach satisfies all the desired properties mentioned above." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 342, + 505, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 505, + 387 + ], + "type": "text", + "content": "In our experiments, we developed a comprehensive methodology (data annotation and evaluation metrics) to quantitatively evaluate phrasal reasoning performance, which has not been accomplished in previous work. We extend previous studies and obtain plausible baseline models. Results show that our EPR yields a much more meaningful explanation regarding " + }, + { + "bbox": [ + 104, + 342, + 505, + 387 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 342, + 505, + 387 + ], + "type": "text", + "content": " scores against human annotation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 392, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 425 + ], + "type": "text", + "content": "To further demonstrate the quality of extracted phrasal relationships, we feed them to a textual explanation model. Results show that our EPR reasoning leads to an improvement of 2 points in BLEU scores, achieving a new state of the art on the e-SNLI dataset (Camburu et al., 2018)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 430, + 290, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 290, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 290, + 441 + ], + "type": "text", + "content": "Our contributions are summarized as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 447, + 505, + 523 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 108, + 447, + 504, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 447, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 108, + 447, + 504, + 468 + ], + "type": "text", + "content": "1. We formulate a phrasal reasoning task for natural language inference (NLI), addressing the interpretability of neural models." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 469, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 504, + 491 + ], + "type": "text", + "content": "2. We propose an EPR model that induces sentence-level NLI labels from explicit phrasal logical labels by neural fuzzy logic. EPR is able to perform reasoning in a weakly supervised way." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 491, + 505, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 491, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 107, + 491, + 505, + 523 + ], + "type": "text", + "content": "3. We annotated phrasal logical labels and designed a set of metrics to evaluate phrasal reasoning. We further use our reasoning results to improve textual explanation generation. Our code and annotated data are released for future studies." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 529, + 504, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 504, + 552 + ], + "type": "text", + "content": "To the best of our knowledge, we are the first to develop a weakly supervised phrasal reasoning model for the NLI task." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 575, + 212, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 575, + 212, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 212, + 588 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 605, + 505, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 505, + 693 + ], + "type": "text", + "content": "Natural Language Inference. MacCartney & Manning (2009) propose seven natural logic relations in addition to Entailment, Contradiction, and Neutral. MacCartney & Manning (2007) also distinguish upward entailment (every mammal upward entailing some mammal) and downward entailment (every mammal downward entailing every dog) as different categories. Manually designed lexicons and rules are used to interpret Entailment in a finer-grained manner, such as downward and upward entailment (Hu et al., 2020; Chen et al., 2021). Feng et al. (2020) apply such natural logic to NLI reasoning at the word level; however, our experiments will show that their word-level treatment is not an appropriate granularity, and they fail to achieve meaningful reasoning performance." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "The above reasoning schema focuses more on the quantifiers of first-order logic (Beltagy et al., 2016). However, the SNLI dataset (Bowman et al., 2015) we use only contains less than " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": " samples with explicit quantifiers, and the seven-category schema complicates reasoning in the weakly supervised" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "setting. Instead, we adopt three-category NLI labels following the SNLI dataset. Our focus is entity-based reasoning, and the treatment of quantifiers is absorbed into phrases." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 507, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 507, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 507, + 166 + ], + "type": "text", + "content": "We also notice that previous work lacks explicit evaluation on the reasoning performance for NLI. For example, the SNLI dataset only provides sentence-level labels. The HELP (Yanaka et al., 2019a) and MED (Yanaka et al., 2019b) datasets concern monotonicity inference problems, where the label is also at the sentence level; they only consider Entailment, ignoring Contradiction and Neutral. Thus, we propose a comprehensive framework for the evaluation of NLI reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 506, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 260 + ], + "type": "text", + "content": "e-SNLI. Camburu et al. (2018) propose the e-SNLI task of textual explanation generation and use LSTM as a baseline. Kumar & Talukdar (2020) propose the NILE approach, using multiple decoders to generate explanations for all E, C, and N labels, and then predicting which to be selected. Zhao & Vydiswaran (2021) propose the LIREx approach, using additionally annotated rationales for explanation generation. Narang et al. (2020) finetune T5 with multiple explanation generation tasks. Although these systems can generate explanations, the nature of such finetuning approaches renders the explanation generator per se unexplainable. By contrast, we design a textual explanation generation model that utilizes our EPR's phrasal reasoning, obtained in a weakly supervised manner." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 265, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 506, + 310 + ], + "type": "text", + "content": "Neuro-Symbolic Approaches. In recent years, neuro-symbolic approaches have attracted increasing interest in the AI and NLP communities for interpreting deep learning models. Typically, these approaches are trained by reinforcement learning or its relaxation, such as attention and Gumbel-softmax (Jang et al., 2017), to reason about certain latent structures in a downstream task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 315, + 505, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 505, + 403 + ], + "type": "text", + "content": "For example, Lei et al. (2016) and Liu et al. (2018) extract key phrases or sentences for a text classification task. Lu et al. (2018) extract entities and relations for document understanding. Liang et al. (2017) and Mou et al. (2017) perform SQL-like execution based on input text for semantic parsing. Xiong et al. (2017) hop over a knowledge graph for reasoning the relationships between entities. Li et al. (2019) and Deshmukh et al. (2021) model symbolic actions for unsupervised syntactic structure induction. In the vision domain, Mao et al. (2019) propose a neuro-symbolic approach to learn visual concepts. Our work addresses logical reasoning for the NLI task, which is not tackled in previous neuro-symbolic studies." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "text", + "content": "Fuzzy Logic. Fuzzy logic (Zadeh, 1988; 1996) models an assertion and performs logic calculation with probability. For example, a quantifier (e.g., \"most\") and assertion (e.g., \"ill\") are modeled by a score in " + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "inline_equation", + "content": "(0,1)" + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "text", + "content": "; the score of a conjunction " + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "inline_equation", + "content": "s(x_{1} \\wedge x_{2})" + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "text", + "content": " is the product of " + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "inline_equation", + "content": "s(x_{1})" + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "inline_equation", + "content": "s(x_{2})" + }, + { + "bbox": [ + 104, + 408, + 505, + 552 + ], + "type": "text", + "content": ". In old-school fuzzy logic studies, the mapping from language to the score is usually given by human-defined heuristics (Zadeh, 1988; Nozaki et al., 1997), and may not be suited to the task of interest. By contrast, we train neural networks to predict the probability of phrasal logical relations, and induce the sentence NLI label by fuzzy logic formulas. Thus, our approach takes advantage of both worlds of symbolism and connectionism. Mahabadi et al. (2020) apply fuzzy logic formulas to replace multi-layer perceptrons for NLI. But they are unable to provide expressive reasoning because their fuzzy logic works on sentence features. Our work is inspired by Mahabadi et al. (2020). However, we propose to apply fuzzy logic to the detected and aligned phrases, enabling our approach to provide reasoning in a symbolic (i.e., expressive) way. We develop our own fuzzy logic formulas, which are also different from Mahabadi et al. (2020)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 570, + 237, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 570, + 237, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 570, + 237, + 582 + ], + "type": "text", + "content": "3 OUR EPR APPROACH" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 596, + 504, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 620 + ], + "type": "text", + "content": "In this section, we describe our EPR approach in detail, also shown in Figure 2. It has three main components: phrase detection and alignment, phrasal NLI prediction, and sentence label induction." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 624, + 505, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 505, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 505, + 681 + ], + "type": "text", + "content": "Phrase Detection and Alignment. In NLI, a data point consists of two sentences, a premise and a hypothesis. We first extract content phrases from both input sentences by rules and heuristics. For example, " + }, + { + "bbox": [ + 104, + 624, + 505, + 681 + ], + "type": "inline_equation", + "content": "\\left[\\mathrm{AUX}\\right] + \\left[\\mathrm{NOT}\\right] + \\mathrm{VERB} + \\left[\\mathrm{RP}\\right]" + }, + { + "bbox": [ + 104, + 624, + 505, + 681 + ], + "type": "text", + "content": " is treated as a verb phrase. Full details are presented in Appendix A.1. Compared with the word level (Parikh et al., 2016; Feng et al., 2020), a phrase is a more meaningful semantic unit for logical reasoning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "text", + "content": "We then align corresponding phrases in the two sentences based on cosine similarity. Let " + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\mathrm{P} = (\\mathrm{p}_1,\\dots ,\\mathrm{p}_M)" + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\mathrm{H} = (\\mathrm{h}_1,\\dots ,\\mathrm{h}_N)" + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "text", + "content": " be the premise and hypothesis, respectively, where " + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\mathrm{p}_m" + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\mathrm{h}_n" + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "text", + "content": " are extracted phrases. We apply Sentence-BERT (Reimers & Gurevych, 2019) to each individual phrase and obtain the local phrase embeddings by " + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\pmb {p}_m^{(L)} = \\mathrm{SBERT}(\\mathrm{p}_m),\\pmb {h}_n^{(L)} = \\mathrm{SBERT}(\\mathrm{h}_n)" + }, + { + "bbox": [ + 104, + 685, + 505, + 734 + ], + "type": "text", + "content": ". We" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 68, + 504, + 201 + ], + "blocks": [ + { + "bbox": [ + 106, + 68, + 504, + 201 + ], + "lines": [ + { + "bbox": [ + 106, + 68, + 504, + 201 + ], + "spans": [ + { + "bbox": [ + 106, + 68, + 504, + 201 + ], + "type": "image", + "image_path": "07d7923cf7122154318a4e7621f0c71d8c910a16064898df435c9314cf0f5e25.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 152, + 202, + 457, + 215 + ], + "lines": [ + { + "bbox": [ + 152, + 202, + 457, + 215 + ], + "spans": [ + { + "bbox": [ + 152, + 202, + 457, + 215 + ], + "type": "text", + "content": "Figure 2: An overview of our Explainable Phrasal Reasoning (EPR) model." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 118, + 239, + 495, + 274 + ], + "blocks": [ + { + "bbox": [ + 122, + 224, + 487, + 237 + ], + "lines": [ + { + "bbox": [ + 122, + 224, + 487, + 237 + ], + "spans": [ + { + "bbox": [ + 122, + 224, + 487, + 237 + ], + "type": "text", + "content": "Table 1: An example showing the importance of handling unaligned phrases (in highlight)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 118, + 239, + 495, + 274 + ], + "lines": [ + { + "bbox": [ + 118, + 239, + 495, + 274 + ], + "spans": [ + { + "bbox": [ + 118, + 239, + 495, + 274 + ], + "type": "table", + "html": "
Premise\nHypothesisPeople are shopping for fruit.\nPeople are shopping for fruit in the market.People are shopping for fruit in the market.\nPeople are shopping for fruit.
Sentence NLI[ ] Entailment [ ] Contradiction [√] Neutral[√] Entailment [ ] Contradiction [ ] Neutral
", + "image_path": "097447f9650fc5c325d0f2583396151e431bcfe6cf322f290e1f781c90abffde.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 285, + 504, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 504, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 504, + 322 + ], + "type": "text", + "content": "also apply Sentence-BERT to the entire premise and hypothesis sentences to obtain the global phrase embeddings " + }, + { + "bbox": [ + 104, + 285, + 504, + 322 + ], + "type": "inline_equation", + "content": "\\pmb{p}_m^{(G)}" + }, + { + "bbox": [ + 104, + 285, + 504, + 322 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 285, + 504, + 322 + ], + "type": "inline_equation", + "content": "\\pmb{h}_n^{(G)}" + }, + { + "bbox": [ + 104, + 285, + 504, + 322 + ], + "type": "text", + "content": " by mean-pooling the features of the words in the phrase. The phrase similarity is given by" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 181, + 325, + 504, + 340 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 325, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 181, + 325, + 504, + 340 + ], + "type": "interline_equation", + "content": "\\sin \\left(\\mathrm {p} _ {m}, \\mathrm {h} _ {n}\\right) = \\gamma \\cos \\left(\\boldsymbol {p} _ {m} ^ {(G)}, \\boldsymbol {h} _ {n} ^ {(G)}\\right) + (1 - \\gamma) \\cos \\left(\\boldsymbol {p} _ {m} ^ {(L)}, \\boldsymbol {h} _ {n} ^ {(L)}\\right) \\tag {1}", + "image_path": "b00333fe4d54cff13da9ada18bd7394256b3bc70c6c6dfee739a85b95b77afd2.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 342, + 506, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 506, + 376 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 342, + 506, + 376 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 342, + 506, + 376 + ], + "type": "text", + "content": " is a hyperparameter balancing the lexical and contextual representations of a phrase (Hewitt & Manning, 2019). It is noted that Sentence-BERT is finetuned on paraphrase datasets, and thus is more suitable for phrasal similarity matching than pretrained language models (Devlin et al., 2019)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 380, + 504, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 404 + ], + "type": "text", + "content": "We obtain phrase alignment between the premise and hypothesis in a heuristic way. For every phrase " + }, + { + "bbox": [ + 104, + 380, + 504, + 404 + ], + "type": "inline_equation", + "content": "\\mathrm{p}_m" + }, + { + "bbox": [ + 104, + 380, + 504, + 404 + ], + "type": "text", + "content": " in the premise, we look for the most similar phrase " + }, + { + "bbox": [ + 104, + 380, + 504, + 404 + ], + "type": "inline_equation", + "content": "\\mathrm{h}_n" + }, + { + "bbox": [ + 104, + 380, + 504, + 404 + ], + "type": "text", + "content": " from the hypothesis by" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 244, + 407, + 504, + 420 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 407, + 504, + 420 + ], + "spans": [ + { + "bbox": [ + 244, + 407, + 504, + 420 + ], + "type": "interline_equation", + "content": "n = \\operatorname {a r g m a x} _ {n ^ {\\prime}} \\sin \\left(\\boldsymbol {p} _ {m}, \\boldsymbol {h} _ {n ^ {\\prime}}\\right) \\tag {2}", + "image_path": "128b40dc6f2475a1fbf68ccf5b622c344cb3784c118debfa9881055b0b40fdfd.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "content": "Likewise, for every phrase " + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{h}_n" + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "content": " in the hypothesis, we look for the most similar phrase " + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{p}_m" + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "content": " from the premise. A phrase pair " + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "inline_equation", + "content": "(\\mathrm{p}_m, \\mathrm{h}_n)" + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "content": " is considered to be aligned if " + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{h}_n" + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "content": " is selected as the closest phrase to " + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{p}_m" + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{p}_m" + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "content": " is the closest to " + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{h}_n" + }, + { + "bbox": [ + 104, + 423, + 505, + 489 + ], + "type": "text", + "content": ". Such hard alignment differs from commonly used soft attention-based approaches (Parikh et al., 2016). Our alignment method can ensure the quality of phrase alignment, and more importantly, leave other phrases unaligned (e.g., helping each other in Figure 1), which are common in the NLI task. The process is illustrated in Figure 2a." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 495, + 506, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 551 + ], + "type": "text", + "content": "Phrasal NLI Prediction. Our model then predicts the logical relationship of an aligned phrase pair " + }, + { + "bbox": [ + 104, + 495, + 506, + 551 + ], + "type": "inline_equation", + "content": "(p, h)" + }, + { + "bbox": [ + 104, + 495, + 506, + 551 + ], + "type": "text", + "content": " among three target labels: Entailment, Contradiction, and Neutral. While previous work (Feng et al., 2020) identifies finer-grained labels for NLI, we do not follow their categorization, because it complicates the reasoning process and makes weakly supervised training more difficult. Instead, we adopt a three-way phrasal classification, which is consistent with sentence NLI labels." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "content": "We represent a phrase, say, " + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "content": " in the premise, by a vector embedding, and we consider two types of features: a local feature " + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "inline_equation", + "content": "\\pmb{p}^{(L)}" + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "content": " and a global feature " + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "inline_equation", + "content": "\\pmb{p}^{(G)}" + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "content": ", re-used from the phrase alignment component. They are concatenated as the phrase representation " + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "inline_equation", + "content": "\\pmb{p} = [p^{(L)}; p^{(G)}]" + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "content": ". Likewise, the phrase representation for a hypothesis phrase " + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "content": " is obtained in a similar way. Intuitively, local features force the model to perform reasoning in a serious manner, but global features are important to sentence-level prediction. Such intuition is also verified in an ablation study (§ 4.2)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 629, + 504, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 629, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 629, + 504, + 663 + ], + "type": "text", + "content": "Then, we use a neural network to predict the phrasal NLI label (Entailment, Contradiction, and Neutral). This is given by the standard heuristic matching (Mou et al., 2016) based on phrase embeddings, followed by a multi-layer perceptron (MLP) and a three-way softmax layer:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 666, + 504, + 679 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 666, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 121, + 666, + 504, + 679 + ], + "type": "interline_equation", + "content": "\\left[ P _ {\\text {p h r a s e}} (\\mathsf {E} | \\mathrm {p}, \\mathrm {h}); P _ {\\text {p h r a s e}} (\\mathsf {C} | \\mathrm {p}, \\mathrm {h}); P _ {\\text {p h r a s e}} (\\mathsf {N} | \\mathrm {p}, \\mathrm {h}) \\right] = \\operatorname {s o f t m a x} (\\operatorname {M L P} \\left(\\left[ \\boldsymbol {p}; \\boldsymbol {h}; \\left| \\boldsymbol {p} - \\boldsymbol {h} \\right|; \\boldsymbol {p} \\circ \\boldsymbol {h} \\right]\\right)) \\tag {3}", + "image_path": "8452d7d052bb1987e403ba7815abd86603672f49bb9565174e768b334b1ad316.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 681, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 506, + 704 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 681, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\circ" + }, + { + "bbox": [ + 104, + 681, + 506, + 704 + ], + "type": "text", + "content": " is the element-wise product, and the semicolon refers to column vector concatenation. E, C, and N refer to the Entailment, Contradiction, and Neutral labels, respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "It should be mentioned that a phrase may be unaligned, but plays an important role in sentence-level NLI prediction, as shown in Table 1. Thus, we would like to predict phrasal NLI labels for unaligned" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "phrases as well, but pair them with a special token " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "(\\mathrm{p}_{\\langle \\mathrm{EMPTY}\\rangle}" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\mathrm{h}_{\\langle \\mathrm{EMPTY}\\rangle})" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": ", whose embedding is randomly initialized and learned by back-propagation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 504, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 133 + ], + "type": "text", + "content": "Sentence Label Induction. We observe the sentence NLI label can be logically induced from phrasal NLI labels. Based on the definition of the NLI task, we develop the following induction rules." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 137, + 505, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 137, + 505, + 183 + ], + "spans": [ + { + "bbox": [ + 104, + 137, + 505, + 183 + ], + "type": "text", + "content": "Entailment Rule: According to Bowman et al. (2015), a premise entailing a hypothesis means that, if the premise is true, then the hypothesis must be true. We find that this can be oftentimes transformed into phrasal relationships: a premise entails the hypothesis if all paired phrases have the label Entailment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "inline_equation", + "content": "\\{(\\mathrm{p}_k,\\mathrm{h}_k)\\}_{k = 1}^K\\bigcup \\{(\\mathrm{p}_k,\\mathrm{h}_k)\\}_{k = K + 1}^{K'}" + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "text", + "content": " be all phrase pairs. For " + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "inline_equation", + "content": "k = 1,\\dots ,K" + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "text", + "content": ", they are aligned phrases; for " + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "inline_equation", + "content": "k = K + 1,\\dots ,K'" + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "text", + "content": ", they are unaligned phrases paired with the special token, i.e., " + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "inline_equation", + "content": "\\mathrm{p}_k = \\mathrm{p}_{\\langle \\mathrm{EMPTY}\\rangle}" + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "inline_equation", + "content": "\\mathrm{h}_k = \\mathrm{h}_{\\langle \\mathrm{EMPTY}\\rangle}" + }, + { + "bbox": [ + 104, + 186, + 506, + 225 + ], + "type": "text", + "content": ". Then, we induce a sentence-level Entailment score by" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 205, + 228, + 504, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 228, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 205, + 228, + 504, + 251 + ], + "type": "interline_equation", + "content": "S _ {\\text {s e n t e n c e}} (\\mathsf {E} | \\mathrm {P}, \\mathrm {H}) = \\left[ \\prod_ {k = 1} ^ {K ^ {\\prime}} P _ {\\text {p h r a s e}} (\\mathsf {E} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\right] ^ {\\frac {1}{K ^ {\\prime}}} \\tag {4}", + "image_path": "68f5d112087dcb2d58bf7a3dba1defc0c09d9f713acfb9dd5e4e83b51a62f4ec.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 253, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 253, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 504, + 342 + ], + "type": "text", + "content": "This works in a fuzzy logic fashion (Zadeh, 1988; 1996), deciding whether the sentence-level label should be Entailment considering the average of phrasal predictions. Here, we use the geometric mean, because it is biased towards low scores, i.e., if there exists one phrase pair with a low Entailment score, then the chance of sentence label being Entailment is also low. Unaligned pairs should be considered in Eq. (4), because an unaligned phrase may indicate Entailment, shown in the second example of Table 1. Notice that the resulting value " + }, + { + "bbox": [ + 104, + 253, + 504, + 342 + ], + "type": "inline_equation", + "content": "S_{\\text{sentence}}(\\mathsf{E}|\\mathsf{P}, \\mathsf{H})" + }, + { + "bbox": [ + 104, + 253, + 504, + 342 + ], + "type": "text", + "content": " is not normalized with respect to Contradiction and Neutral; thus, we call it a score (instead of probability), which will be normalized afterwards." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 347, + 504, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 504, + 371 + ], + "type": "text", + "content": "Contradiction Rule: Two sentences are contradictory if there exists (at least) one paired phrase labeled as Contradiction. The fuzzy logic version of this induction rule is given by" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 203, + 374, + 504, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 374, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 203, + 374, + 504, + 387 + ], + "type": "interline_equation", + "content": "S _ {\\text {s e n t e n c e}} (\\mathbb {C} | \\mathrm {P}, \\mathrm {H}) = \\max _ {k = 1, \\dots , K} P _ {\\text {p h r a s e}} (\\mathbb {C} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\tag {5}", + "image_path": "8bc760e1dbfc5798f23ff0b190c734cb65bd317af0e26e1af16b92ec0db193a6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 391, + 505, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 391, + 505, + 436 + ], + "spans": [ + { + "bbox": [ + 104, + 391, + 505, + 436 + ], + "type": "text", + "content": "Here, the max operator is used in the induction, because the contradiction rule is an existential statement, i.e., there exist(s) " + }, + { + "bbox": [ + 104, + 391, + 505, + 436 + ], + "type": "inline_equation", + "content": "\\cdots" + }, + { + "bbox": [ + 104, + 391, + 505, + 436 + ], + "type": "text", + "content": ". Also, unaligned phrases are excluded in calculating the sentence-level Contradiction score, because an unaligned phrase indicates the corresponding information is missing in the other sentence and it cannot be Contradiction (recall examples in Table 1)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 440, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 440, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 440, + 504, + 464 + ], + "type": "text", + "content": "Rule for Neutral: Two sentences are neutral if there exists (at least) one neutral phrase pair, but there does not exist any contradictory phrase pair. The fuzzy logic formula is" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 149, + 466, + 504, + 481 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 466, + 504, + 481 + ], + "spans": [ + { + "bbox": [ + 149, + 466, + 504, + 481 + ], + "type": "interline_equation", + "content": "S _ {\\text {s e n t e n c e}} (\\mathrm {N} | \\mathrm {P}, \\mathrm {H}) = \\left[ \\max _ {k = 1, \\dots , K ^ {\\prime}} P _ {\\text {p h r a s e}} (\\mathrm {N} | \\mathrm {p} _ {k}, \\mathrm {h} _ {k}) \\right] \\cdot \\left[ 1 - S _ {\\text {s e n t e n c e}} (\\mathrm {C} | \\mathrm {P}, \\mathrm {H}) \\right] \\tag {6}", + "image_path": "b49cb6e277c44961514428dd38ce0b8bcc67fe4e618e0bbf9416b2f3f100f151.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 483, + 505, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 505, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 505, + 518 + ], + "type": "text", + "content": "The first factor determines whether there exists a Neutral phrase pair (including unaligned phrases, illustrated in the first example in Table 1). The second factor evaluates the negation of \"at least one contradictory phrase,\" as suggested in the second clause of the Rule for Neutral." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 522, + 504, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 504, + 546 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 504, + 546 + ], + "type": "text", + "content": "Finally, we normalize the scores into probabilities by dividing the sum, since all the scores are already positive. This is given by" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 243, + 549, + 504, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 549, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 243, + 549, + 504, + 563 + ], + "type": "interline_equation", + "content": "P _ {\\text {s e n t e n c e}} (\\mathrm {L} | \\cdot) = \\frac {1}{Z} S _ {\\text {s e n t e n c e}} (\\mathrm {L} | \\cdot) \\tag {7}", + "image_path": "7873e4e93554367e78a3e090da8b4fc16092f005de822d0e9877d0baea7abaff.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 565, + 501, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 565, + 501, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 565, + 501, + 578 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 565, + 501, + 578 + ], + "type": "inline_equation", + "content": "\\mathsf{L}\\in \\{\\mathsf{E},\\mathsf{C},\\mathsf{N}\\}" + }, + { + "bbox": [ + 104, + 565, + 501, + 578 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 565, + 501, + 578 + ], + "type": "inline_equation", + "content": "Z = S_{\\text{sentence}}(\\mathsf{E}|\\cdot) + S_{\\text{sentence}}(\\mathsf{C}|\\cdot) + S_{\\text{sentence}}(\\mathsf{N}|\\cdot)" + }, + { + "bbox": [ + 104, + 565, + 501, + 578 + ], + "type": "text", + "content": " is the normalizing factor." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "content": "Training and Inference. We use cross-entropy loss to train our EPR model by minimizing " + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "inline_equation", + "content": "-\\log P_{\\text{sentence}}(\\mathsf{t}|\\cdot)" + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\mathsf{t} \\in \\{\\mathsf{E}, \\mathsf{C}, \\mathsf{N}\\}" + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "content": " is the groundtruth sentence-level label." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 610, + 505, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 610, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 610, + 505, + 667 + ], + "type": "text", + "content": "Our underlying logical reasoning component can be trained end-to-end by back-propagation in a weakly supervised manner, because the fuzzy logic rules are almost everywhere differentiable. Although the max operators in Eqs. (5) and (6) may not be differentiable at certain points, they are common in max-margin learning and the rectified linear unit (ReLU) activation functions, and do not cause trouble in back-propagation." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 671, + 504, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 671, + 504, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 671, + 504, + 695 + ], + "type": "text", + "content": "Once our EPR model is trained, we can obtain both phrasal and sentence-level labels. This is accomplished by performing argmax on the predicted probabilities (3) and (7), respectively." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "text", + "content": "In traditional fuzzy logic, the conjunction is given by probability product (Zadeh, 1988). We find that this gives a too small Entailment score compared with Contradiction and Neutral scores, causing difficulties in end-to-end training. Thus, we take the geometric mean and maintain all the scores in the same magnitude." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": "Improving Textual Explanation. Camburu et al. (2018) annotated a dataset to address NLI interpretability by generating an explanation sentence. For the example in Figure 1, the reference explanation is \"There cannot be one man and several men at same time.\"" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 506, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 166 + ], + "type": "text", + "content": "In this part, we apply the predicted phrasal logical relationships to textual explanation generation and examine whether our EPR's output can help a downstream task. Figure 3 shows the overview of our textual explanation generator. We concatenate the premise and hypothesis in the form of “Premise : Hypothesis : …,” and feed it to a standard Transformer encoder (Vaswani et al., 2017)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": "We utilize the phrase pairs and our predicted phrasal labels as factual knowledge to enhance the decoder. Specifically, our EPR model yields a set of tuples " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\{(\\mathrm{p}_k,\\mathrm{h}_k,\\mathrm{l}_k)\\}_{k = 1}^K" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": " for a sample, where " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_k\\in \\{\\mathsf{E},\\mathsf{N},\\mathsf{C}\\}" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": " is the predicted phrasal label for the aligned phrases, " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathrm{p}_k" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathrm{h}_k" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": ". We embed phrases by Sentence-BERT: " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{p}^{(L)}" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{h}^{(L)}" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": "; the phrasal label is represented by a one-hot vector " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{l}_k = \\mathrm{onehot}(\\mathrm{l}_k)" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": ". They are concatenated as a vector " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{m}_k = [\\pmb {p}_k;\\pmb {h}_k;\\pmb {l}_k]" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": ". We compose the vectors as a factual memory matrix " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{M} = [m_1^\\top ;\\dots ;m_K^\\top ]\\in \\mathbb{R}^{K\\times d}" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": " is the dimension of " + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\pmb{m}_k" + }, + { + "bbox": [ + 104, + 171, + 506, + 240 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": "Our decoder follows a standard Transformer architecture (Vaswani et al., 2017), but is equipped with additional attention mechanisms to the factual memory. Consider the " + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": "th decoding step. We feed the factual memory to an MLP as " + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{M}} = \\mathrm{MLP}(\\mathbf{M})" + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": ". We compute attention " + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "inline_equation", + "content": "\\pmb{a}" + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{M}}" + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": " with the embedding of the input " + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "inline_equation", + "content": "\\pmb{y}_{i-1}" + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": ", and aggregate factual information " + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "inline_equation", + "content": "\\pmb{c}" + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": " for the rows " + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "inline_equation", + "content": "\\pmb{m}_t" + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 104, + 243, + 312, + 335 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 338, + 302, + 360 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 338, + 302, + 360 + ], + "spans": [ + { + "bbox": [ + 111, + 338, + 302, + 360 + ], + "type": "interline_equation", + "content": "\\boldsymbol {a} = \\operatorname {s o f t m a x} (\\tilde {\\mathbf {M}} \\boldsymbol {y} _ {i - 1}), \\quad \\boldsymbol {c} = \\sum_ {k = 1} ^ {K} a _ {k} \\tilde {\\boldsymbol {m}} _ {t} ^ {\\top}", + "image_path": "a05d3902fe8de135f784151ef3012c6709758961f29e72846535ef2ae0e7752c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "inline_equation", + "content": "a_{k}" + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "text", + "content": " is the kth element of the vector " + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "inline_equation", + "content": "\\pmb{a}" + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "text", + "content": " and Figure 5. " + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{m}}_t" + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "text", + "content": " is the kth row of the matrix " + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{M}}" + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "text", + "content": ". The factual information " + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "inline_equation", + "content": "\\pmb{c}" + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "text", + "content": " is fed to another layer " + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "inline_equation", + "content": "\\pmb{g}_i = \\mathrm{MLP}([c; y_{i-1}]) + c" + }, + { + "bbox": [ + 104, + 363, + 361, + 398 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 317, + 249, + 504, + 350 + ], + "blocks": [ + { + "bbox": [ + 317, + 249, + 504, + 350 + ], + "lines": [ + { + "bbox": [ + 317, + 249, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 317, + 249, + 504, + 350 + ], + "type": "image", + "image_path": "59828ff8e82cdca43543a438f05e4fee8d4571b8d4ce9ba761fa4e8c3e224c45.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 316, + 356, + 504, + 380 + ], + "lines": [ + { + "bbox": [ + 316, + 356, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 316, + 356, + 504, + 380 + ], + "type": "text", + "content": "Figure 3: Overview of the model for textual explanation generation." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": "Our Transformer decoder layer starts with self-attention " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "\\tilde{q}_i = \\mathrm{SelfAttn}(g_i)" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": ". Then, residual connection and layer normalization are applied as " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "q_{i} = \\mathrm{LayerNorm}(\\tilde{q}_{i} + g_{i})" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": ". A cross-attention mechanism obtains input information by " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "v_{i} = \\mathrm{CrossAttn}(q_{i},\\mathbf{H})" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "\\mathbf{H}" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": " is the representation given by the encoder. " + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 104, + 402, + 506, + 480 + ], + "type": "text", + "content": " is fed to the Transformer's residual connection and layer normalization sub-layer. Multiple Transformer layers as mentioned above are stacked to form a deep architecture. The model is trained by standard cross-entropy loss against the reference explanation as in previous work (Kumar & Talukdar, 2020; Zhao & Vydiswaran, 2021; Narang et al., 2020)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 485, + 506, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 506, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 506, + 520 + ], + "type": "text", + "content": "In this way, the model is enhanced with factual information given by our EPR weakly supervised reasoning. Experiments will show that it largely improves the BLEU score by 2 points (§ 4.2), being a new state of the art. This further verifies that our EPR indeed yields meaningful phrasal explanations." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 534, + 201, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 534, + 201, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 201, + 547 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 559, + 298, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 559, + 298, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 298, + 571 + ], + "type": "text", + "content": "4.1 DATASETS AND EVALUATION METRICS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 576, + 506, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 506, + 633 + ], + "type": "text", + "content": "The main dataset we used in our experiments is the Stanford Natural Language Inference (SNLI) dataset (Bowman et al., 2015), which consists of 550K training samples, 10K validation samples, and another 10K test samples. Each data sample consists of two sentences (premise and hypothesis) and a sentence-level groundtruth label. For sentence-level NLI prediction, we still use accuracy to evaluate our approach, following previous work (Parikh et al., 2016; Chen et al., 2017; Radford et al., 2018)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 637, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 637, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 637, + 505, + 704 + ], + "type": "text", + "content": "To evaluate the phrasal reasoning performance, we need additional human annotation and evaluation metrics, because most previous work only considers sentence-level performance (Feng et al., 2020) and has not performed quantitative phrasal reasoning evaluation. Although Camburu et al. (2018) annotated phrase highlights in their e-SNLI dataset, they are incomplete and do not provide logical relationships. Our annotators selected relevant phrases from two sentences and tagged them with phrasal NLI labels; they also selected and tagged unaligned phrases." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 710, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 732 + ], + "type": "text", + "content": "3A groundtruth label is for a data point, which consists of two sentences. We call it a sentence-level label instead of phrasal labels." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 99, + 504, + 240 + ], + "blocks": [ + { + "bbox": [ + 104, + 64, + 504, + 97 + ], + "lines": [ + { + "bbox": [ + 104, + 64, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 64, + 504, + 97 + ], + "type": "text", + "content": "Table 2: Main results on the SNLI dataset. †Quoted from respective papers. ‡Obtained from the checkpoint sent by the authors. Other results are obtained by our experiments. GM and AM are the geometric and arithmetic means of the " + }, + { + "bbox": [ + 104, + 64, + 504, + 97 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 64, + 504, + 97 + ], + "type": "text", + "content": " scores." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 99, + 504, + 240 + ], + "lines": [ + { + "bbox": [ + 107, + 99, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 107, + 99, + 504, + 240 + ], + "type": "table", + "html": "
ModelSent AccReasoning Performance
FEFCFNFUPFUHGMAM
Human-84.7171.0155.1282.4661.8070.0771.02
Non-reasoning
Mahabadi et al. (2020)†85.1-------
LSTM (Wang & Jiang, 2016)†86.1-------
Transformer (Radford et al., 2018)89.9-------
SBERT (Reimers & Gurevych, 2019)91.4-------
Baselines
NNL (Feng et al., 2020)‡79.9162.7217.491.5066.220.000.0029.59
STP85.7662.4034.7637.0476.6151.8050.2052.52
GPT-3-Davinci (Brown et al., 2020)-53.7558.0016.1252.2431.0838.2342.24
Ours
EPR (Local, LM unfinetuned)76.33±0.4883.11±0.2938.73±0.8544.63±0.8876.6151.8056.39±0.4358.98±0.34
EPR (Local, LM finetuned)79.36±0.1382.44±0.2644.10±1.3244.69±3.2276.6151.8057.77±0.8559.93±0.67
EPR (Concat, LM unfinetuned)84.53±0.1973.29±0.6837.95±1.1640.56±1.1076.6151.8053.73±0.3956.04±0.33
EPR (Concat, LM finetuned)87.56±0.1569.91±1.2139.97±2.1243.31±2.7876.6151.8054.46±1.3556.32±1.13
", + "image_path": "4be99ff9235f201abf591af921705c8874960cd7b168cb3fe108a2986d9be9ac.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 256, + 506, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 256, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 104, + 256, + 506, + 291 + ], + "type": "text", + "content": "We further propose a set of " + }, + { + "bbox": [ + 104, + 256, + 506, + 291 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 256, + 506, + 291 + ], + "type": "text", + "content": "-scores, which are a balanced measure of precision and recall between human annotation and model output for Entailment, Contradiction, Neutral, and Unaligned in terms of word indexes. Details of human annotation and evaluation metrics are shown in Appendix B." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 295, + 504, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 361 + ], + "type": "text", + "content": "The inter-annotator agreement is presented in Table 2 in comparison with model performance (detailed in the next part). Here, we compute the agreement by treating one annotator as the ground truth and another as the system output; the score is averaged among all annotator pairs. As seen, humans generally achieve high agreement with each other, whereas model performance is relatively low. This shows that our task and metrics are well-defined, yet phrasal logical reasoning is a challenging task for machine learning models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 366, + 506, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 506, + 487 + ], + "type": "text", + "content": "Textual explanation generation was evaluated on the e-SNLI dataset (Camburu et al., 2018), which extends the SNLI dataset with one reference explanation for each training sample, and three reference explanations for each validation or test sample. Each reference explanation comes with highlighted rationales, a set of annotated words in the premise or hypothesis considered as the reason for the explanation annotation. We do not use these highlighted rationales, but enhance the neural model with EPR output for textual explanation generation. We follow previous work (Camburu et al., 2018; Narang et al., 2020), adopting BLEU (Papineni et al., 2002) and SacreBLEU (Post, 2018) scores as the evaluation metrics; they mainly differ in the tokenizer. Camburu et al. (2018) also report low consistency of the third annotated reference, and thus use only two references for evaluation. In our study, we consider both two-reference and three-reference BLEU/SacreBLEU. Appendix A.2 provides additional implementation details of textual explanation generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 506, + 170, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 170, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 170, + 516 + ], + "type": "text", + "content": "4.2 RESULTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "type": "text", + "content": "Phrasal Reasoning Performance. To the best of our knowledge, phrasal reasoning for NLI was not explicitly evaluated in previous literature. Therefore, we propose plausible extensions to previous studies as our baselines. We consider the study of Neural Natural Logic (NNL, Feng et al., 2020) as the first baseline. It applies an attention mechanism (Parikh et al., 2016), so that each word in the hypothesis is softly aligned with the words in the premise. Then, each word in the hypothesis is predicted with one of the seven natural logic relations proposed by MacCartney & Manning (2009). We consider the maximum attention score as the alignment, and map their seven natural logic relations to our three-category NLI labels: Equivalence, ForwardEntailment " + }, + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "type": "inline_equation", + "content": "\\mapsto" + }, + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "type": "text", + "content": " Entailment; Negation, Alternation " + }, + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "type": "inline_equation", + "content": "\\mapsto" + }, + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "type": "text", + "content": " Contradiction; and ReverseEntailment, Cover, Independence " + }, + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "type": "inline_equation", + "content": "\\mapsto" + }, + { + "bbox": [ + 104, + 528, + 506, + 628 + ], + "type": "text", + "content": " Neutral." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 633, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 506, + 732 + ], + "type": "text", + "content": "Table 2 shows that the word-level NNL approach cannot perform meaningful phrasal reasoning, although our metrics have already excluded explicit evaluation of phrases. The low performance is because their soft attention leads to many misalignments, whereas their seven-category logical relations are too fine-grained and cause complications in weakly supervised reasoning. In addition, NNL does not allow unaligned words in the hypothesis, showing that such a model is inadequate for NLI reasoning. By contrast, our EPR model extracts phrases of meaningful semantic units, being an appropriate granularity of logical reasoning. Moreover, we work with three-category NLI labels following the sentence-level NLI task formulation. This actually restricts the model's capacity, forcing the model to perform serious phrasal reasoning." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 76, + 504, + 224 + ], + "blocks": [ + { + "bbox": [ + 214, + 63, + 394, + 73 + ], + "lines": [ + { + "bbox": [ + 214, + 63, + 394, + 73 + ], + "spans": [ + { + "bbox": [ + 214, + 63, + 394, + 73 + ], + "type": "text", + "content": "Table 3: Results of ablation studies on SNLI." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 76, + 504, + 224 + ], + "lines": [ + { + "bbox": [ + 108, + 76, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 108, + 76, + 504, + 224 + ], + "type": "table", + "html": "
ModelFeaturesSent AccReasoning Performance
FEFCFNFUPFUHGMAM
Full modelLocal76.33±0.4883.11±0.2938.73±0.8544.63±0.8876.6151.8056.39±0.4358.98±0.34
Global84.03±0.1270.84±0.6035.12±0.9036.37±1.5276.6151.8051.41±0.6254.15±0.41
Concat84.53±0.1973.29±0.6837.95±1.1640.56±1.1076.6151.8053.73±0.3956.04±0.33
Random chunkerLocal72.4463.2122.6532.0465.9436.1340.5343.99
Global82.8158.0930.6427.4965.9436.1341.0543.66
Concat83.0958.7532.4131.1465.9436.1342.6644.87
Semantic role labelingLocal71.1073.7929.3928.9970.1943.1145.2749.09
Global82.8160.1432.0730.4870.1943.1144.6747.20
Concat83.1161.6431.7628.3370.1943.1144.1547.01
Random alignmentLocal68.5259.3221.7926.2051.4316.5031.0235.05
Global81.9953.8535.1031.3951.4316.5034.7137.66
Concat82.4957.2234.8330.9151.4316.5034.9738.18
Mean inductionLocal79.6177.3837.1436.1376.6151.8052.8455.81
Global83.8255.0829.9224.7076.6151.8043.8247.62
Concat84.9657.1231.9331.4176.6151.8046.9249.77
", + "image_path": "542a9781d66257c9e1cbe4d880a2bd26cc1b2ee4d366cd4d7af5d38896e12a6d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "type": "text", + "content": "In addition, we include another intuitive SBERT-based competing model for comparison. We first apply our own heuristics of phrase detection and alignment (thus, the model will have the same " + }, + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "type": "inline_equation", + "content": "F_{\\mathsf{UP}}" + }, + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "type": "inline_equation", + "content": "F_{\\mathsf{UH}}" + }, + { + "bbox": [ + 104, + 234, + 504, + 312 + ], + "type": "text", + "content": " scores); then, we directly train the phrasal NLI predictor by sentence-level labels. We obtain the sentence NLI prediction by taking argmax over Eq. (7). We call this STP (Sentence label Training Phrases). As seen, STP provides some meaningful phrasal reasoning results, because the training can smooth out the noise of phrasal labels, which are directly set as the sentence-level labels. But still, its performance is significantly lower than our EPR model." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 316, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 384 + ], + "type": "text", + "content": "We experimented with a baseline of few-shot prompting with GPT-3 (Brown et al., 2020), and the implementation detail is shown in Appendix A.2. We see that GPT-3 is able to provide more or less meaningful reasoning, and surprisingly the contradiction " + }, + { + "bbox": [ + 104, + 316, + 504, + 384 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 316, + 504, + 384 + ], + "type": "text", + "content": "-score is higher than all competing methods. However, the overall mean " + }, + { + "bbox": [ + 104, + 316, + 504, + 384 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 316, + 504, + 384 + ], + "type": "text", + "content": " scores are much lower. The results show that phrasal reasoning is challenging for pretrained language models, highlighting the importance of our task formulation and the proposed EPR approach even in the prompting era." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "text", + "content": "Among our EPR variants, we see that EPR with local phrase embeddings achieves the highest reasoning performance, and that EPR with concatenated features achieves a good balance between sentence-level accuracy and reasoning. Our EPR variants were run 5 times with different initialization, and standard deviations are also reported in Table 3. As seen, our improvement compared with the best baseline is around 9.1-10.7 times the standard deviation in mean " + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "text", + "content": " scores, which is a large margin. Suppose the " + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "text", + "content": " scores are Gaussian distributed," + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "text", + "content": " the improvement is also statistically significant (" + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "text", + "content": "-value " + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "inline_equation", + "content": "< 4.5\\mathrm{e} - 20" + }, + { + "bbox": [ + 104, + 388, + 504, + 467 + ], + "type": "text", + "content": " comparing our worse variant with the best competing model by one-sided test)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 471, + 506, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 471, + 506, + 550 + ], + "spans": [ + { + "bbox": [ + 104, + 471, + 506, + 550 + ], + "type": "text", + "content": "We further compare our EPR with non-reasoning models (Wang & Jiang, 2016; Radford et al., 2018), which are unable to provide phrasal explanations but may or may not achieve high sentence accuracy. The results show that our phrasal EPR model hurts the sentence-level accuracy by 2-4 points, when the model architecture is controlled. This resonates with traditional symbolic AI approaches (MacCartney & Manning, 2008), where interpretable models may not outperform black-box neural networks. Nevertheless, our sentence-level accuracy is still decent, outperforming a few classic neural models, including fuzzy logic applied to sentence embeddings (Mahabadi et al., 2020)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 554, + 506, + 665 + ], + "type": "text", + "content": "Analysis. We consider several ablated models to verify the effect of every component in our EPR model. (1) Random chunker, which splits the sentence randomly based on the number of chunks detected by our system. (2) Random aligner, which randomly aligns phrases but keeps the number of aligned phrases unchanged. (3) Semantic role labeling, which uses the semantic roles, detected by AllenNLP (Gardner et al., 2018), as the reasoning unit. (4) Mean induction, which induces the sentence NLI label by the geometric mean of phrasal NLI prediction. In addition, we consider local phrase embedding features, global features, and their concatenation for the above model variants. Due to a large number of settings, each variant was run only once; we do not view this as a concern because Table 2 shows a low variance of our approach. Also, the underlying language model is un-finetuned in our ablation study, as it yields slightly lower performance but is much more efficient." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "type": "text", + "content": "As seen in Table 3, the random chunker and aligner yield poor phrasal reasoning performance, showing that working with meaningful semantic units and their alignments is important to logical reasoning. This also verifies that our word index-based metrics are able to evaluate phrase detection" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "text", + "content": "4When the score has a low standard deviation, a Gaussian distribution is a reasonable assumption because the probability of exceeding the range of " + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "text", + "content": " scores is extremely low." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "and alignment in an implicit manner. We further applied semantic role labeling as our reasoning unit. We find its performance is higher than the random chunker but lower than our method. This is because semantic role labeling is verb-centric, and the extracted spans may be incomplete." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "type": "text", + "content": "Interestingly, local features yield higher reasoning performance, but global and concatenated features yield higher sentence accuracy. This is because global features provide aggregated information of the entire sentence and allow the model to bypass meaningful reasoning. In the variant of the mean induction, for example, the phrasal predictor can simply learn to predict the sentence-level label with global sentence information; then, the mean induction is an ensemble of multiple predictors. In this way, it achieves the highest sentence accuracy (0.43 points higher than our full model with concatenated features), but is 6 points lower in reasoning performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 204, + 506, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 506, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 506, + 271 + ], + "type": "text", + "content": "This reminds us of the debate between old schools of AI (Chandrasekaran et al., 1988; Boucher & Dienes, 2003; Goel, 2022). Recent deep learning models take the connectionists' view, and generally outperform symbolists' approaches in terms of the ultimate prediction, but they lack expressible explanations. Combining neural and symbolic methods becomes a hot direction in recent AI research (Liang et al., 2017; Dong et al., 2018; Yi et al., 2018). In general, our EPR model with global features achieves high performance in both reasoning and ultimate prediction for the NLI task." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 276, + 261, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 276, + 261, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 276, + 261, + 430 + ], + "type": "text", + "content": "Results of Textual Explanation Generation. In this part, we apply EPR's predicted output—phrasal logical relationships—as factual knowledge to textual explanation generation. Most previous studies use the groundtruth sentence-level NLI label and/or highlighted rationales. This requires human annotations, which are resource-consuming to obtain. By contrast, we require no extra human-annotated resources; our factual knowledge is based on our weakly supervised reasoning approach." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "spans": [ + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "type": "text", + "content": "Table 4: Textual explanation results on e-SNLI. Previous work uses auxiliary information (L: the groundtruth NLI label; H: human-annotated highlights), but we use neither. " + }, + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "type": "inline_equation", + "content": "{}^{ \\dagger }" + }, + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "type": "text", + "content": " Quoted from respective papers. " + }, + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "type": "inline_equation", + "content": "{}^{ \\ddagger }" + }, + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "type": "text", + "content": " Evaluated by checkpoints. " + }, + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "type": "inline_equation", + "content": "{}^{\\parallel }" + }, + { + "bbox": [ + 265, + 276, + 506, + 334 + ], + "type": "text", + "content": " Our replication with provided code." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 267, + 337, + 504, + 428 + ], + "blocks": [ + { + "bbox": [ + 267, + 337, + 504, + 428 + ], + "lines": [ + { + "bbox": [ + 267, + 337, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 267, + 337, + 504, + 428 + ], + "type": "table", + "html": "
ModelInfoBLEUSacreBLEU
LH2 refs3 refs2 refs3 refs
Camburu et al. (2018)†--27.58---
NILE (Kumar & Talukdar, 2020)∥-28.5737.7332.5141.78
NILE (Kumar & Talukdar, 2020)‡-28.6737.8432.7442.06
FinetunedWT5220M (Narang et al., 2020)†---32.40-
FinetunedWT511B (Narang et al., 2020)†---33.70-
LIREx (Zhao & Vydiswaran, 2021)∥17.2222.4021.2426.68
Finetune T560M--27.7536.7831.7440.89
+ Annotated Highlights64M27.9136.9032.2041.21
+ EPR Outputs64M (ours)--29.9138.3033.9642.63
", + "image_path": "c93ddad4ed53eb11322db0d24cf49b191601d3a191504367d83106df319839cd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 435, + 505, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 505, + 502 + ], + "type": "text", + "content": "Table 4 shows our explanation generation performance on e-SNLI. Since evaluation metrics are not consistently used for explanation generation in previous studies, we replicate the approaches when the code or checkpoint is available. For large pretrained models, we quote results from the previous paper (Narang et al., 2020). Their model is called WT5, having 220M or 11B parameters depending on the underlying T5 model. Profoundly, we achieve higher performance with 60M-parameter T5-small, which is " + }, + { + "bbox": [ + 104, + 435, + 505, + 502 + ], + "type": "inline_equation", + "content": "3.3\\mathrm{x}" + }, + { + "bbox": [ + 104, + 435, + 505, + 502 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 435, + 505, + 502 + ], + "type": "inline_equation", + "content": "170\\mathrm{x}" + }, + { + "bbox": [ + 104, + 435, + 505, + 502 + ], + "type": "text", + "content": " smaller in model size than the two WT5 variants." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 506, + 506, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 506, + 574 + ], + "type": "text", + "content": "In addition, we conducted a controlled experiment using the rationale highlights annotated by Camburu et al. (2018) for e-SNLI. It achieves a relatively small increase of 0.2-0.5 BLEU points, whereas our EPR's outputs yield a 2-point improvement. The difference in the performance gains shows that our EPR's phrasal logical relationships provide more valuable information than human-annotated highlights. In general, we achieve a new state of the art on e-SNLI with a small language model, demonstrating the importance of phrasal reasoning in textual explanations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 578, + 504, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 504, + 612 + ], + "type": "text", + "content": "Additional Results. We show additional results as appendices. § C.1: Reasoning performance on the MNLI dataset; § C.2: Error analysis; § C.3: Case studies of our EPR model; and § C.4: Case studies of textual explanation generation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 617, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 506, + 696 + ], + "type": "text", + "content": "Conclusion. The paper proposes an explainable phrasal reasoning (EPR) model for NLI with neural fuzzy logic, trained in a weakly supervised manner. We further propose an experimental design, including data annotation, evaluation metrics, and plausible baselines. Results show that phrasal reasoning for NLI is a meaningfully defined task, as humans can achieve high agreement. Our EPR achieves decent sentence-level accuracy, but much higher reasoning performance than all competing models. We also achieve a new state-of-the-art performance on e-SNLI textual explanation generation by applying EPR's phrasal logical relationships." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 176, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 176, + 93 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 176, + 93 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 99, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 105, + 99, + 507, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 99, + 507, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 99, + 507, + 134 + ], + "type": "text", + "content": "Islam Beltagy, Stephen Roller, Pengxiang Cheng, Katrin Erk, and Raymond J Mooney. Representing meaning with a combination of logical and distributional models. Computational Linguistics, pp. 763-808, 2016. URL https://aclanthology.org/J16-4007/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 507, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 507, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 507, + 175 + ], + "type": "text", + "content": "Luke Boucher and Zoltán Dienes. Two ways of learning associations. Cognitive Science, 27(6):807-842, 2003. URL https://www.sciencedirect.com/science/article/pii/S0364021303000715." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 182, + 506, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 506, + 217 + ], + "type": "text", + "content": "Samuel Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. A large annotated corpus for learning natural language inference. In EMNLP, pp. 632-642, 2015. URL https://aclanthology.org/D15-1075." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 507, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 507, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 507, + 312 + ], + "type": "text", + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In NeurIPS, pp. 1877-1901, 2020. URL https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 319, + 507, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 507, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 507, + 364 + ], + "type": "text", + "content": "Oana-Maria Camburu, Tim Rocttäschel, Thomas Lukasiewicz, and Phil Blunsom. eSNLI: Natural language inference with natural language explanations. In NeurIPS, pp. 9539-9549, 2018. URL https://proceedings.neurips.cc/paper/2018/bit/4c7a167bb329bd92580a99ce422d6fa6-Abstract.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 370, + 507, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 370, + 507, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 507, + 405 + ], + "type": "text", + "content": "Balakrishnan Chandrasekaran, Askhok Goel, and Dean Allemang. Connectionism and information processing abstractions. AI Magazine, 9(4):24-24, 1988. URL https://ojs.aaaai.org/index.php/imagazine/article/view/951." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 411, + 507, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 507, + 445 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 507, + 445 + ], + "type": "text", + "content": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. Enhanced LSTM for natural language inference. In ACL, pp. 1657-1668, 2017. URL https://aclanthology.org/P17-1152/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 453, + 507, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 507, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 507, + 487 + ], + "type": "text", + "content": "Zeming Chen, Qiyue Gao, and Lawrence S Moss. NeuralLog: Natural language inference with joint neural and logical reasoning. arXiv preprint arXiv:2105.14167, 2021. URL https://arxiv.org/abs/2105.14167." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 493, + 507, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 507, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 507, + 528 + ], + "type": "text", + "content": "Anup Anand Deshmukh, Qianqiu Zhang, Ming Li, Jimmy Lin, and Lili Mou. Unsupervised chunking as syntactic structure induction with a knowledge-transfer approach. In Findings of EMNLP, pp. 3626-3634, 2021. URL https://aclanthology.org/2021.findings-emnlp.307." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 534, + 507, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 534, + 507, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 507, + 568 + ], + "type": "text", + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In *NAACL-HLT*, pp. 4171–4186, 2019. URL https://aclanthology.org/N19-1423." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 575, + 507, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 575, + 507, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 507, + 609 + ], + "type": "text", + "content": "Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah Smith. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. arXiv preprint arXiv:2002.06305, 2020. URL https://arxiv.org/abs/2002.06305." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 616, + 507, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 507, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 507, + 640 + ], + "type": "text", + "content": "Honghua Dong, Jiayuan Mao, Tian Lin, Chong Wang, Lihong Li, and Denny Zhou. Neural logic machines. In ICLR, 2018. URL https://openreview.net/forum?id=B1xY-hRctX." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 646, + 507, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 646, + 507, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 507, + 680 + ], + "type": "text", + "content": "Yufei Feng, Quan Liu, Michael Greenspan, Xiaodan Zhu, et al. Exploring end-to-end differentiable natural logic modeling. In COLING, pp. 1172-1185, 2020. URL https://aclanthology.org/2020.coling-main.101." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 507, + 732 + ], + "type": "text", + "content": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson F. Liu, Matthew Peters, Michael Schmitz, and Luke Zettlemoyer. AllenNLP: A deep semantic natural language processing platform. In Proc. Workshop for NLP Open Source Software (NLP-OSS), pp. 1-6, 2018. URL https://aclanthology.org/W18-2501." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 106 + ], + "type": "text", + "content": "Ashok Goel. Looking back, looking ahead: Symbolic versus connectionist AI. AI Magazine, 42(4): 83-85, 2022. URL https://ojs.aaii.org/index.php/aimagazine/article/view/15111." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 506, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 506, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 506, + 134 + ], + "type": "text", + "content": "John Hewitt and Christopher D Manning. A structural probe for finding syntax in word representations. In NAACL-HLT, pp. 4129-4138, 2019. URL https://aclanthology.org/N19-1419." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 139, + 506, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 139, + 506, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 506, + 185 + ], + "type": "text", + "content": "Hai Hu, Qi Chen, Kyle Richardson, Atreyee Mukherjee, Lawrence S Moss, and Sandra Kübler. MonaLog: A lightweight system for natural language inference based on monotonicity. In Proc. Society for Computation in Linguistics, pp. 284-293, 2020. URL https://aclanthology.org/2020.scil-1.40/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 190, + 504, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 190, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 504, + 215 + ], + "type": "text", + "content": "Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with Gumbel-softmax. In ICLR, 2017. URL https://openreview.net/forum?id=rkE3y85ee." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 220, + 506, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 506, + 255 + ], + "type": "text", + "content": "Zhongtao Jiang, Yanzhe Zhang, Zhao Yang, Jun Zhao, and Kang Liu. Alignment rationale for natural language inference. In ACL-IJCNLP, pp. 5372-5387, 2021. URL https://aclanthology.org/2021.acl-long.417/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 259, + 506, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 259, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 506, + 295 + ], + "type": "text", + "content": "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. Dense passage retrieval for open-domain question answering. In EMNLP, pp. 6769-6781, 2020. URL https://aclanthology.org/2020.emnlp-main.550/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 300, + 506, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 506, + 324 + ], + "type": "text", + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. URL https://arxiv.org/abs/1412.6980." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 329, + 506, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 329, + 506, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 329, + 506, + 363 + ], + "type": "text", + "content": "Sawan Kumar and Partha Talukdar. NILE: Natural language inference with faithful natural language explanations. In ACL, pp. 8730-8742, 2020. URL https://aclanthology.org/2020.acl-main.771." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 369, + 506, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 506, + 393 + ], + "type": "text", + "content": "Tao Lei, Regina Barzilay, and Tommi Jaakkola. Rationalizing neural predictions. In EMNLP, pp. 107-117, 2016. URL https://aclanthology.org/D16-1011/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 398, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 398, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 398, + 504, + 422 + ], + "type": "text", + "content": "Bowen Li, Lili Mou, and Frank Keller. An imitation learning approach to unsupervised parsing. In ACL, pp. 3485-3492, 2019. URL https://aclanthology.org/P19-1338." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 427, + 506, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 427, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 506, + 462 + ], + "type": "text", + "content": "Chen Liang, Jonathan Berant, Quoc Le, Kenneth Forbus, and Ni Lao. Neural symbolic machines: Learning semantic parsers on Freebase with weak supervision. In ACL, pp. 23-33, 2017. URL https://aclanthology.org/P17-1003/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 468, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 506, + 502 + ], + "type": "text", + "content": "Xianggen Liu, Lili Mou, Haotian Cui, Zhengdong Lu, and Sen Song. Jumper: Learning when to make classification decisions in reading. In *IJCAI*, pp. 4237-4243, 2018. URL https://www.ijcai.org/proceedings/2018/0589.pdf." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 507, + 506, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 507, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 507, + 506, + 532 + ], + "type": "text", + "content": "Yang Liu and Mirella Lapata. Text summarization with pretrained encoders. In EMNLP-IJCNLP, pp. 3730-3740, 2019. URL https://aclanthology.org/D19-1387/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 537, + 506, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 537, + 506, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 537, + 506, + 572 + ], + "type": "text", + "content": "Zhengdong Lu, Xianggen Liu, Haotian Cui, Yukun Yan, and Daqi Zheng. Object-oriented neural programming (OONP) for document understanding. In ACL, pp. 2717-2726, 2018. URL https://aclanthology.org/P18-1253." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 577, + 506, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 577, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 506, + 612 + ], + "type": "text", + "content": "Bill MacCartney and Christopher D Manning. Natural logic for textual inference. In Proc. ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 193-200, 2007. URL https://aclanthology.org/W07-1431/." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 617, + 506, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 617, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 506, + 651 + ], + "type": "text", + "content": "Bill MacCartney and Christopher D. Manning. Modeling semantic containment and exclusion in natural language inference. In *COLING*, pp. 521-528, 2008. URL https://aclanthology.org/C08-1066." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 657, + 506, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 506, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 506, + 692 + ], + "type": "text", + "content": "Bill MacCartney and Christopher D Manning. An extended model of natural logic. In Proc. International Conference on Computational Semantics, pp. 140-156, 2009. URL https://aclanthology.org/W09-3714." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 697, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 506, + 731 + ], + "type": "text", + "content": "Bill MacCartney, Michel Galley, and Christopher D Manning. A phrase-based alignment model for natural language inference. In EMNLP, pp. 802-811, 2008. URL https://aclanthology.org/D08-1084." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 82, + 507, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 507, + 116 + ], + "type": "text", + "content": "Rabeeh Karimi Mahabadi, Florian Mai, and James Henderson. Learning entailment-based sentence embeddings from natural language inference. Online Manuscript, 2020. URL https://openreview.net/forum?id=BkxackSKvH." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 506, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 159 + ], + "type": "text", + "content": "Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B. Tenenbaum, and Jiajun Wu. The neurosymbolic concept learner: Interpreting scenes, words, and sentences from natural supervision. In ICLR, 2019. URL https://openreview.net/forum?id=rJgM1hRctm." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 167, + 506, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 167, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 506, + 201 + ], + "type": "text", + "content": "Lili Mou, Rui Men, Ge Li, Yan Xu, Lu Zhang, Rui Yan, and Zhi Jin. Natural language inference by tree-based convolution and heuristic matching. In ACL, pp. 130-136, 2016. URL https://aclanthology.org/P16-2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 209, + 506, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 209, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 506, + 243 + ], + "type": "text", + "content": "Lili Mou, Zhengdong Lu, Hang Li, and Zhi Jin. Coupling distributed and symbolic execution for natural language queries. In ICML, pp. 2518-2526, 2017. URL https://proceedings.mlrpress/v70/mou17a.html." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 251, + 506, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 506, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 506, + 286 + ], + "type": "text", + "content": "Sharan Narang, Colin Raffel, Katherine Lee, Adam Roberts, Noah Fiedel, and Karishma Malkan. WT5?! Training text-to-text models to explain their predictions. arXiv preprint arXiv:2004.14546, 2020. URL https://arxiv.org/abs/2004.14546." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 294, + 506, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 294, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 294, + 506, + 329 + ], + "type": "text", + "content": "Ken Nozaki, Hisao Ishibuchi, and Hideo Tanaka. A simple but powerful heuristic method for generating fuzzy rules from numerical data. Fuzzy Sets and Systems, 86(3):251-270, 1997. URL https://www.sciencedirect.com/science/article/abs/pii/0165011495004130." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 337, + 506, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 337, + 506, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 337, + 506, + 371 + ], + "type": "text", + "content": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. BLEU: A method for automatic evaluation of machine translation. In ACL, pp. 311-318, 2002. URL https://aclanthology.org/P02-1040." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 379, + 506, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 379, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 506, + 414 + ], + "type": "text", + "content": "Ankur Parikh, Oscar Täckström, Dipanjan Das, and Jakob Uszkoreit. A decomposable attention model for natural language inference. In EMNLP, pp. 2249-2255, 2016. URL https://aclanthology.org/D16-1244/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 422, + 506, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 506, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 506, + 446 + ], + "type": "text", + "content": "Matt Post. A call for clarity in reporting BLEU scores. In Proc. Conference on Machine Translation: Research Papers, pp. 186-191, 2018. URL https://aclanthology.org/W18-6319." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 453, + 506, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 506, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 506, + 488 + ], + "type": "text", + "content": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. OpenAI Blog, 2018. URL https://cdn.openai.com/research-covers/language-unsupervised/language understands_paper.pdf." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 495, + 506, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 506, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 506, + 520 + ], + "type": "text", + "content": "Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In EMNLP, 2019. URL https://aclanthology.org/D19-1410." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 528, + 506, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 506, + 563 + ], + "type": "text", + "content": "Cynthia Rudin. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1(5):206-215, 2019. URL https://www.nature.com/articles/s42256-019-0048-x." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 570, + 506, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 570, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 105, + 570, + 506, + 616 + ], + "type": "text", + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, pp. 5998-6008, 2017. URL https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 624, + 506, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 506, + 658 + ], + "type": "text", + "content": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In ICLR, 2019. URL https://openreview.net/forum?id=rJ4km2R5t7." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 666, + 506, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 506, + 690 + ], + "type": "text", + "content": "Shuohang Wang and Jing Jiang. Learning natural language inference with LSTM. In NAACL-HLT, pp. 1442-1451, 2016. URL https://aclanthology.org/N16-1170/." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed H. Chi, Quoc V Le, and Denny Zhou. Chain of thought prompting elicits reasoning in large language models. In NeurlPS, 2022. URL https://openreview.net/forum?id=._VjQlMeSB_J." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 492 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "text", + "content": "Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In *NAACL-HLT*, pp. 1112–1122, 2018. URL https://aclanthology.org/N18-1101." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 122, + 506, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 122, + 506, + 156 + ], + "spans": [ + { + "bbox": [ + 107, + 122, + 506, + 156 + ], + "type": "text", + "content": "Wenhan Xiong, Thien Hoang, and William Yang Wang. DeepPath: A reinforcement learning method for knowledge graph reasoning. In EMNLP, pp. 564-573, 2017. URL https://aclanthology.org/D17-1060/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 163, + 506, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 506, + 208 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 506, + 208 + ], + "type": "text", + "content": "Hitomi Yanaka, Koji Mineshima, Daisuke Bekki, Kentaro Inui, Satoshi Sekine, Lasha Abzianidze, and Johan Bos. HELP: A dataset for identifying shortcomings of neural models in monotonicity reasoning. In Proc. Conference on Lexical and Computational Semantics, pp. 250-255, 2019a. URL https://aclanthology.org/S19-1027." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 214, + 506, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 214, + 506, + 250 + ], + "spans": [ + { + "bbox": [ + 106, + 214, + 506, + 250 + ], + "type": "text", + "content": "Hitomi Yanaka, Koji Mineshima, Daisuke Bekki, Kentaro Inui, Satoshi Sekine, Lasha Abzianidze, and Johan Bos. Can neural networks understand monotonicity reasoning? In ACL BlackboxNLP Workshop, pp. 31-40, 2019b. URL https://aclanthology.org/W19-4804." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 255, + 506, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 255, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 106, + 255, + 506, + 300 + ], + "type": "text", + "content": "Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Josh Tenenbaum. Neural-symbolic VQA: Disentangling reasoning from vision and language understanding. In NeurIPS, 2018. URL https://proceedings.neurips.cc/paper/2018/file/5e388103a391daabe3de1d76a6739ccd-Paper.pdf." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 307, + 506, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 307, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 106, + 307, + 506, + 341 + ], + "type": "text", + "content": "Deunsol Yoon, Dongbok Lee, and SangKeun Lee. Dynamic self-attention: Computing attention over words dynamically for sentence embedding. arXiv preprint arXiv:1808.07383, 2018. URL https://arxiv.org/abs/1808.07383." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 347, + 506, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 347, + 506, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 347, + 506, + 370 + ], + "type": "text", + "content": "Lotfi A Zadeh. Fuzzy logic. Computer, 21(4):83-93, 1988. URL https://ieeexplore.ieee.org/abstract/document/53." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 377, + 506, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 377, + 506, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 377, + 506, + 410 + ], + "type": "text", + "content": "Lotfi A Zadeh. Fuzzy sets. In *Fuzzy Sets, Fuzzy Logic, and Fuzzy Systems*, pp. 394-432. World Scientific, 1996. URL https://www.worldscientific.com/doi/abs/10.1142/9789814261302_0021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 417, + 506, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 417, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 506, + 452 + ], + "type": "text", + "content": "Zhuosheng Zhang, Yuwei Wu, Hai Zhao, Zuchao Li, Shuailiang Zhang, Xi Zhou, and Xiang Zhou. Semantics-aware BERT for language understanding. In AAAI, pp. 9628-9635, 2020. URL https://ojs.aaai.org/index.php/AAAI/article/view/6510." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 458, + 506, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 458, + 506, + 492 + ], + "spans": [ + { + "bbox": [ + 106, + 458, + 506, + 492 + ], + "type": "text", + "content": "Xinyan Zhao and V.G.Vinod Vydiswaran. LIREx: Augmenting language inference with relevant explanations. In AAAI, pp. 14532-14539, 2021. URL https://ojs.aaai.org/index.php/AAAI/article/view/17708." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 105, + 513, + 270, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 513, + 270, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 270, + 525 + ], + "type": "text", + "content": "A IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 538, + 222, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 538, + 222, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 222, + 549 + ], + "type": "text", + "content": "A.1 PHRASE DETECTION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 558, + 504, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 504, + 604 + ], + "type": "text", + "content": "We present more details about our phrase detection. We use " + }, + { + "bbox": [ + 104, + 558, + 504, + 604 + ], + "type": "inline_equation", + "content": "\\mathrm{SpaCy}^5" + }, + { + "bbox": [ + 104, + 558, + 504, + 604 + ], + "type": "text", + "content": " to obtain the part-of-speech (POS) tag" + }, + { + "bbox": [ + 104, + 558, + 504, + 604 + ], + "type": "inline_equation", + "content": "^6" + }, + { + "bbox": [ + 104, + 558, + 504, + 604 + ], + "type": "text", + "content": " of every word. SpaCy also tags noun phrases. However, if a noun phrase follows a preposition (with a fine-grained POS tag being IN), we remove it from noun phrases but tag it as a prepositional phrase." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "type": "text", + "content": "In addition, we extract verbs by the POS tag VERB. A verb may be followed by a particle with the fine-grained POS tag being RP (e.g., show off). It is treated as a verb phrase. In order to handle negation, we allow optional AUX NOT before a verb, (e.g., could not help). This, however, only counts less than " + }, + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "type": "text", + "content": " in the dataset, and does not affect our model much." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 658, + 507, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 507, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 507, + 693 + ], + "type": "text", + "content": "To capture other potential semantic units, we treat remaining open class words7 as individual phrases. Finally, the remaining non-content words (in the categories of closed words and others) are discarded (e.g., \"there is\"). This is appropriate, because they do not represent meaningful semantics or play a" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 699, + 196, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 699, + 196, + 710 + ], + "spans": [ + { + "bbox": [ + 118, + 699, + 196, + 710 + ], + "type": "text", + "content": "5https://spacy.io" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 118, + 710, + 377, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 377, + 721 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 377, + 721 + ], + "type": "text", + "content": "See definitions in https://spacy.io/usage/linguistic-features" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 118, + 721, + 303, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 303, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 303, + 731 + ], + "type": "text", + "content": "7https://universaldependencies.org/u/pos/" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 127, + 100, + 484, + 169 + ], + "blocks": [ + { + "bbox": [ + 160, + 79, + 449, + 92 + ], + "lines": [ + { + "bbox": [ + 160, + 79, + 449, + 92 + ], + "spans": [ + { + "bbox": [ + 160, + 79, + 449, + 92 + ], + "type": "text", + "content": "Table 5: Our rules for phrase detection. \"[\"] means the item is optional." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 100, + 484, + 169 + ], + "lines": [ + { + "bbox": [ + 127, + 100, + 484, + 169 + ], + "spans": [ + { + "bbox": [ + 127, + 100, + 484, + 169 + ], + "type": "table", + "html": "
Example: The woman is showing off her blue dog at the playground.
NumberPhrase typeRuleExtracted phrase(s)
1Prepositional phraseIN + NPat the playground
2Noun phraseNPThe woman|her blue dog
3Verb phrase[AUX] + [NOT] + VERB + [RP]is showing off
4OthersOther open class words-
", + "image_path": "e08c6b3caad44318a6c42a1162f8e5d932ccb60cc225d6db912c64533b9aa893.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 113, + 190, + 286, + 338 + ], + "blocks": [ + { + "bbox": [ + 113, + 190, + 286, + 338 + ], + "lines": [ + { + "bbox": [ + 113, + 190, + 286, + 338 + ], + "spans": [ + { + "bbox": [ + 113, + 190, + 286, + 338 + ], + "type": "image", + "image_path": "8248aa4102f7171ad75b057337f1e3f4e19a75b3822c2d4ea449260d0811d919.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 182, + 347, + 427, + 360 + ], + "lines": [ + { + "bbox": [ + 182, + 347, + 427, + 360 + ], + "spans": [ + { + "bbox": [ + 182, + 347, + 427, + 360 + ], + "type": "text", + "content": "Figure 4: Results of tuning the coefficient of global features." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 314, + 189, + 483, + 338 + ], + "blocks": [ + { + "bbox": [ + 314, + 189, + 483, + 338 + ], + "lines": [ + { + "bbox": [ + 314, + 189, + 483, + 338 + ], + "spans": [ + { + "bbox": [ + 314, + 189, + 483, + 338 + ], + "type": "image", + "image_path": "0103103fde1e521ef699f5f4acd134e4a8292249cc3d31cc07a1f2ae43aa5200.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 383, + 506, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 383, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 383, + 506, + 418 + ], + "type": "text", + "content": "role in reasoning. Table 5 summarizes all the rules used in our approach. They are executed in order and extracted phrases are exclusive. For example, the playground in the phrase at the playground will not be treated as a standalone noun phrase, as it is already part of a prepositional phrase." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 422, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 445 + ], + "type": "text", + "content": "Empirically, our rule-based approach works well for the NLI dataset, and our logical reasoning is at the granularity of the extracted phrases." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 460, + 178, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 178, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 178, + 471 + ], + "type": "text", + "content": "A.2 SETTINGS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "text", + "content": "Details of the EPR Model. We chose the pretrained model a11-mpnet-base-" + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "inline_equation", + "content": "v2^8" + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "text", + "content": " from the SentenceBERT study (Reimers & Gurevych, 2019) and obtained 768-dimensional local and global phrase embeddings. Our MLP had the same dimension as the embeddings, i.e., 768D for the local and global variants, or 1536D for the concatenation variant. We chose the coefficient for the global feature in Eq. (1) from a candidate set of " + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "inline_equation", + "content": "\\{0.0, 0.2, 0.4, 0.6, 0.8, 1.0\\}" + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "text", + "content": ". Figure 4 shows the hyperparameter tuning results on SNLI (mentioned in § 4.2) and MNLI (to be discussed in § C.1). We find that 0.4 yields the best sentence accuracy in SNLI, and that 1.0 is the best for MNLI. As our focus is on reasoning, we set the coefficient to be 0.6, because it yields the highest phrasal reasoning performance and decent sentence-level performance for both experiments and in terms of both geometric mean and arithmetic mean of " + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "text", + "content": " scores. The pretrained language model (LM) was either finetuned or un-finetuned during training. Finetuning yields higher performance (Table 2), whereas un-finetuned LM is more efficient for in-depth analyses (Table 3). We trained the model with a batch size of 256. We used Adam (Kingma & Ba, 2015) with a learning rate of 5e-5, " + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "inline_equation", + "content": "\\beta_1 = 0.9" + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "inline_equation", + "content": "\\beta_2 = 0.999" + }, + { + "bbox": [ + 104, + 482, + 506, + 670 + ], + "type": "text", + "content": ", learning rate warm up over the first 10 percent of the total steps, and linear decay of the learning rate. The model was trained up to 3 epochs, following the common practice (Dodge et al., 2020). Our main model variants were trained 5 times with different parameter initializations, and we report the mean and standard deviation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 674, + 506, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 674, + 506, + 709 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 506, + 709 + ], + "type": "text", + "content": "Details of Textual Explanation Generation. We used the pretrained T5-small model for finetuning with a batch size of 32. The optimizer was Adam with an initial learning rate of 3e-4, " + }, + { + "bbox": [ + 104, + 674, + 506, + 709 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 104, + 674, + 506, + 709 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 674, + 506, + 709 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.999" + }, + { + "bbox": [ + 104, + 674, + 506, + 709 + ], + "type": "text", + "content": ", learning rate warm-up for the first 2 epochs, and linear decay of the learning rate up to 10" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 345, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 345, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 345, + 731 + ], + "type": "text", + "content": "8https://www.sbert.net/docs/pretrained_models.html" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 83, + 501, + 258 + ], + "blocks": [ + { + "bbox": [ + 108, + 83, + 501, + 258 + ], + "lines": [ + { + "bbox": [ + 108, + 83, + 501, + 258 + ], + "spans": [ + { + "bbox": [ + 108, + 83, + 501, + 258 + ], + "type": "image", + "image_path": "1084e0ae9a63b49c6777536940cc338080d3b2b18180e7db18d99603a3053c0d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 215, + 269, + 394, + 281 + ], + "lines": [ + { + "bbox": [ + 215, + 269, + 394, + 281 + ], + "spans": [ + { + "bbox": [ + 215, + 269, + 394, + 281 + ], + "type": "text", + "content": "Figure 5: The prompt for phrasal reasoning." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "type": "text", + "content": "epochs; then we decreased the learning rate to 3e-6 and trained the model until the validation BLEU score did not increase for 2 epochs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 332, + 504, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 332, + 504, + 367 + ], + "spans": [ + { + "bbox": [ + 104, + 332, + 504, + 367 + ], + "type": "text", + "content": "Details of the Prompting Baseline. We adopted the GPT-3 (the text-davinci-003 version with 175B parameters) (Brown et al., 2020) as a prompting baseline to demonstrate large language models (LLMs)' phrasal reasoning ability." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 371, + 506, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 506, + 428 + ], + "type": "text", + "content": "We consider exemplar-based prompting, because it is unlikely for an LLM to output structured reasoning results in a zero-shot manner. Moreover, our examples are chosen to cover all reasoning cases. We also set the temperature of decoding to 0 to obtain deterministic reasoning, following CoT prompting (Wei et al., 2022). Rule-based post-processing was applied to extract slot values. Figure 5 presents the prompt used for phrasal reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 445, + 440, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 440, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 440, + 459 + ], + "type": "text", + "content": "B DATA ANNOTATION AND REASONING EVALUATION METRICS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 472, + 506, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 506, + 518 + ], + "type": "text", + "content": "Previous studies have not explicitly evaluated reasoning performance. Typically, they resort to sentence-level classification accuracy (Wang & Jiang, 2016; Mahabadi et al., 2020) or case studies (Parikh et al., 2016; Feng et al., 2020) to demonstrate the effectiveness of their alleged interpretable models, which we believe is inadequate." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 522, + 506, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 506, + 556 + ], + "type": "text", + "content": "Therefore, we annotated a model-agnostic corpus about phrasal logical relationships and developed a set of metrics to evaluate the phrasal reasoning performance quantitatively. The resources are released on our website (Footnote 1) to facilitate future research." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 572, + 217, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 572, + 217, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 217, + 583 + ], + "type": "text", + "content": "B.1 DATA ANNOTATION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 594, + 506, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 506, + 683 + ], + "type": "text", + "content": "We annotated the phrases and their logical relationships in a data sample. The annotators were asked to select corresponding phrases from both premise and hypothesis, and label them as either Entailment, Contradiction, or Neutral, with the sentence-level NLI label being given. Annotators could also select a phrase from either a premise or a hypothesis and label it as Unaligned. The process can be repeated until all phrases are labeled for a data sample. Figure 6 shows a screenshot of our annotation page. In the left panel, the annotator could select phrases in the two sentences and mark them with NLI labels. The annotator can view a sample's annotated phrases in the right panel and navigate through different samples." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 687, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 507, + 734 + ], + "type": "text", + "content": "The annotation was performed by three in-lab researchers who are familiar with the NLI task. Our preliminary study shows low agreement when the annotators are unfamiliar with the task; thus it is inappropriate to recruit Mechanical Turks for annotation. We randomly selected 100 samples for annotation, following previous work on the textual explanation for SNLI (Camburu et al., 2018)," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 94, + 296, + 264 + ], + "blocks": [ + { + "bbox": [ + 133, + 94, + 296, + 264 + ], + "lines": [ + { + "bbox": [ + 133, + 94, + 296, + 264 + ], + "spans": [ + { + "bbox": [ + 133, + 94, + 296, + 264 + ], + "type": "image", + "image_path": "b4fe9d3be55d6fd2a0483619e745b2fe18ac33cf34df0d009798b3d99827c502.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 210, + 278, + 400, + 291 + ], + "lines": [ + { + "bbox": [ + 210, + 278, + 400, + 291 + ], + "spans": [ + { + "bbox": [ + 210, + 278, + 400, + 291 + ], + "type": "text", + "content": "Figure 6: A screenshot of the annotation page." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 353, + 79, + 486, + 271 + ], + "blocks": [ + { + "bbox": [ + 353, + 79, + 486, + 271 + ], + "lines": [ + { + "bbox": [ + 353, + 79, + 486, + 271 + ], + "spans": [ + { + "bbox": [ + 353, + 79, + 486, + 271 + ], + "type": "image", + "image_path": "6b790ebae135bb16bcb093f146fe34e93ccaf5d0c14c254e08af31be87a70402.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 306, + 507, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 507, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 507, + 331 + ], + "type": "text", + "content": "Table 6: Examples illustrating the proposed metrics, where we consider the Entailment category. \"|\" refers to a phrase segmentation." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 106, + 337, + 506, + 436 + ], + "blocks": [ + { + "bbox": [ + 106, + 337, + 506, + 436 + ], + "lines": [ + { + "bbox": [ + 106, + 337, + 506, + 436 + ], + "spans": [ + { + "bbox": [ + 106, + 337, + 506, + 436 + ], + "type": "table", + "html": "
Example annotation of entailment (in highlight): Premise: A kid in red is playing in a garden. Hypothesis: A child in red is watching TV in the bedroom.
#Example OutputPE(P)PE(H)PERE(P)RE(H)REFEExplanation
1PH in a garden0000000Although in occurs in the annotation, the word indexes are different. The reasoning is wrong.
2PH watching TV1001000Mis-matched phrases in hypothesis. The reasoning is wrong.
3PH a kid | in red1111111All word indexes match the annotation. The reasoning is correct.
", + "image_path": "df2172cc26cc75e66eed3c168907cc417908a6b250f9a2b3c27a5721b2b68b43.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 460, + 506, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 506, + 484 + ], + "type": "text", + "content": "which is adequate to show statistical significance. Since our annotation only concerns data samples, it is agnostic to any machine learning model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 502, + 349, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 349, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 349, + 513 + ], + "type": "text", + "content": "B.2 EVALUATION METRICS FOR PHRASAL REASONING" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 525, + 506, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 506, + 559 + ], + "type": "text", + "content": "We propose a set of " + }, + { + "bbox": [ + 104, + 525, + 506, + 559 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 525, + 506, + 559 + ], + "type": "text", + "content": "-scores in Entailment, Contradiction, Neutral, and Unaligned to quantitatively evaluate the phrasal reasoning performance. We first introduce our metric for one data sample and then explain the extension to a corpus." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": "Consider the Entailment category as an example. We first count the number of \"hits\" (true positives) between the word indexes of model output and annotation. Using word indexes (instead of words) rules out hitting the words in misaligned phrases (Example 1, Table 6). Then, we calculate precision scores for the premise and hypothesis, denoted by " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "P_{\\mathsf{E}}^{(P)}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "P_{\\mathsf{E}}^{(H)}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": ", respectively. Their geometric mean " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "P_{\\mathsf{E}} = (P_{\\mathsf{E}}^{(P)}P_{\\mathsf{E}}^{(H)})^{1 / 2}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": " is considered as the precision for Entailment. Here, the geometric mean rules out incorrect reasoning that hits either the premise or hypothesis, but not both (Example 2, Table 6). Further, we compute the recall score " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "R_{\\mathsf{E}}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": " in a similar way, and finally obtain the " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": "-score by " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "F_{\\mathsf{E}} = \\frac{2P_{\\mathsf{E}}R_{\\mathsf{E}}}{P_{\\mathsf{E}} + R_{\\mathsf{E}}}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": ". Likewise, " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "F_{\\mathsf{C}}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "F_{\\mathsf{N}}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": " are calculated for Contradiction and Neutral. In addition, we compute the " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": "-score for unaligned phrases in premise and hypothesis, denoted by " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "F_{\\mathsf{UP}}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "inline_equation", + "content": "F_{\\mathsf{UH}}" + }, + { + "bbox": [ + 104, + 563, + 507, + 683 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 688, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 506, + 733 + ], + "type": "text", + "content": "When calculating our " + }, + { + "bbox": [ + 104, + 688, + 506, + 733 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 688, + 506, + 733 + ], + "type": "text", + "content": "-scores for a corpus, we use micro-average, i.e., the precision and recall ratios are calculated in the corpus level. This is more stable, especially considering the varying lengths of sentences. Moreover, we compare model output against three annotators and perform an arithmetic average, further reducing the variance caused by ambiguity." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 101, + 504, + 208 + ], + "blocks": [ + { + "bbox": [ + 148, + 80, + 461, + 94 + ], + "lines": [ + { + "bbox": [ + 148, + 80, + 461, + 94 + ], + "spans": [ + { + "bbox": [ + 148, + 80, + 461, + 94 + ], + "type": "text", + "content": "Table 7: Results on MNLI. †Quoted from respective papers. ‡Our replication." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 101, + 504, + 208 + ], + "lines": [ + { + "bbox": [ + 108, + 101, + 504, + 208 + ], + "spans": [ + { + "bbox": [ + 108, + 101, + 504, + 208 + ], + "type": "table", + "html": "
ModelSent AccReasoning Performance
FEFCFUPFUHGMAM
Human-85.1573.4473.1846.3167.8569.52
Non-reasoning methods
Mahabadi et al. (2020)†73.8------
LSTM (Wang et al., 2019)†72.2------
Transformer (Radford et al., 2018)82.1------
Reasoning methods
NNL (Feng et al., 2020)‡61.2850.3332.0049.780.000.0033.03
STP75.1555.4751.7264.3237.5751.3152.27
EPR (Concat, LM finetuned)79.65±0.1961.76±0.3252.09±0.4164.3237.5752.80±0.0753.93±0.07
", + "image_path": "069dc294d304e9209f1a05756e1c3099de53c453468be6003ceb37d2d4d7a569.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 227, + 504, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 227, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 104, + 227, + 504, + 274 + ], + "type": "text", + "content": "It should be emphasized that our metrics evaluate phrase detection and alignment in an implicit manner. A poor phrase detector and aligner will result in a low reasoning score (shown in our ablation study), but we do not explicitly calculate phrase detection and alignment accuracy. This helps us cope with the ambiguity of the phrase granularity (Example 3, Table 6)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "type": "text", + "content": "To summarize, we propose an evaluation framework including data annotation (§ B.1) and evaluation metrics (§ B.2). These are our contributions in formulating the phrasal reasoning task for NLI." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 316, + 245, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 245, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 245, + 328 + ], + "type": "text", + "content": "C ADDITIONAL RESULTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 340, + 218, + 351 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 340, + 218, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 218, + 351 + ], + "type": "text", + "content": "C.1 RESULTS ON MNLI" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 361, + 506, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 506, + 505 + ], + "type": "text", + "content": "In this appendix, we provide additional results on the matched section of the MNLI dataset (Williams et al., 2018), which consists of 393K training samples, 10K validation samples, and another 10K test samples. It has the same format as the SNLI dataset, but samples come from multiple domains and are more diverse. We follow § 4.1 and use the same protocol to create the phrasal reasoning annotation for the MNLI dataset based on 100 randomly selected samples. However, we found that MNLI is much noisier than SNLI; particularly, the sentences labeled as Neutral in MNLI share few related phrases. For example, the two sentences do not have much in common in the sample \"Premise: If you still want to join, it might be worked.\" and \"Hypothesis: Your membership is the only way that this could work\". Moreover, the inter-human agreement is low in the Neutral category. Therefore, we believe the corpus quality is less satisfactory for Neutral. To ensure meaningful evaluation, we ignored the evaluation of Neutral in this experiment, although our reasoning approach is not changed. The remaining 60 samples containing Entailment and Contradiction serve as the MNLI phrasal reasoning corpus." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 510, + 505, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 505, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 505, + 543 + ], + "type": "text", + "content": "We consider the EPR variant with concatenated local and global features, since the SNLI experiment shows it achieves a good balance between sentence-level accuracy and reasoning. Our models were run 5 times with different initializations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 548, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 594 + ], + "type": "text", + "content": "As seen in Table 7, our EPR approach is again worse than humans, but largely improves the reasoning performance compared with NNL and STP baselines. Its sentence-level prediction is comparable to (although slightly lower than) finetuning Transformers. The results are highly consistent with SNLI experiments, showing the robustness of our approach." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 598, + 505, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 598, + 505, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 598, + 505, + 665 + ], + "type": "text", + "content": "It is important to notice that the EPR model here is trained on MNLI sentence labels, and is not transferred from the SNLI dataset. In our preliminary experiments, we tried transfer learning from SNLI to MNLI and failed to obtain satisfactory performance. We found that our EPR is more prone to the out-of-vocabulary issue (i.e., it does not predict well for the phrases in the new domain), whereas a black-box neural network may learn biased sentence patterns and achieve higher performance in transfer learning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 677, + 211, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 211, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 211, + 690 + ], + "type": "text", + "content": "C.2 ERROR ANALYSIS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "To show how phrasal reasoning affects sentence-level prediction, we perform an error analysis in Table 8. Specifically, we examine the reasoning performance (arithmetic mean of " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "-scores) when the sentence label is correctly and incorrectly predicted on the SNLI dataset. As shown, EPR models" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 106, + 504, + 163 + ], + "blocks": [ + { + "bbox": [ + 104, + 74, + 506, + 97 + ], + "lines": [ + { + "bbox": [ + 104, + 74, + 506, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 74, + 506, + 97 + ], + "type": "text", + "content": "Table 8: Sentence-level prediction count and arithmetic average reasoning performance (" + }, + { + "bbox": [ + 104, + 74, + 506, + 97 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 74, + 506, + 97 + ], + "type": "text", + "content": "-score) when the sentence label is correctly and incorrectly predicted on the SNLI dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 106, + 504, + 163 + ], + "lines": [ + { + "bbox": [ + 108, + 106, + 504, + 163 + ], + "spans": [ + { + "bbox": [ + 108, + 106, + 504, + 163 + ], + "type": "table", + "html": "
Sentence-level predictionCount (in percentage)Reasoning performance (AMF)
Local finetunedConcat finetunedLocal finetunedConcat finetuned
Correct75.4±1.3687.8±0.7565.71±0.8358.68±0.67
Wrong24.6±1.3612.2±0.7540.74±2.0137.58±3.28
Overall100.0±0.00100.0±0.0059.93±0.6756.32±1.13
", + "image_path": "871764ec24497ef74204c8c12f868f5c8f850eb6a11f2e28cf349af2d2f1541b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 109, + 174, + 503, + 262 + ], + "blocks": [ + { + "bbox": [ + 109, + 174, + 503, + 262 + ], + "lines": [ + { + "bbox": [ + 109, + 174, + 503, + 262 + ], + "spans": [ + { + "bbox": [ + 109, + 174, + 503, + 262 + ], + "type": "table", + "html": "
Groundtruth: Entailment Prediction: Entailment\nThree young boys enjoying a day at the beach.\n(a)\nThe boys are in the beach.Groundtruth: Contradiction Prediction: Contradiction\nA man playing fetch with two brown dogs.\n(b)\nThe dogs are asleep.Entailment\nContradiction\nNeutral\nUnaligned
Groundtruth: Neutral Prediction: Neutral\nWalkers on a concrete boardwalk under a blue sky.\n(c)\nWalkers under a blue sky near the beach.Groundtruth: Entailment Prediction: Neutral\nAn elderly couple in heavy coats are looking at black and white photos displayed on a wall.\n(d)\nOctogenarians admiring the old photographs that decorated the wall.
", + "image_path": "4ca1c526a86db5151d021704156513e3929199ea3dc845ff255b4b07fbf2ab32.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 272, + 504, + 316 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 504, + 316 + ], + "type": "text", + "content": "Figure 7: Examples of explainable phrasal reasoning predicted by our EPR model. Words in one color block are detected phrases, a dotted line shows the alignment of two phrases, and the color represents the predicted phrasal NLI label. In Example (d), EPR's prediction suggests the provided label in SNLI is incorrect." + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "bbox": [ + 104, + 338, + 504, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 381 + ], + "type": "text", + "content": "with both local and concatenated features have much higher reasoning performance when sentence labels are correctly predicted than incorrectly predicted. The positive correlation between phrasal reasoning performance and sentence-level accuracy shows our fuzzy logic induction rules indeed make sense." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 388, + 504, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 441 + ], + "type": "text", + "content": "We also find that the model with local features has a higher reasoning performance than with concatenated features, even when the sentence-level prediction is wrong. This is because the local model is unaware of the context of the sentences. Thus, it must perform strict phrasal reasoning based on the induction rules, even if in this case the reasoning process is imperfect and leads to sentence-level errors." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 456, + 227, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 227, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 227, + 467 + ], + "type": "text", + "content": "C.3 CASE STUDY OF EPR" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 476, + 504, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 504, + 500 + ], + "type": "text", + "content": "We present case studies of EPR in Figure 7. Our EPR performs impressive reasoning for the NLI task, which is learned in a weakly supervised manner with only sentence-level labels." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "text", + "content": "In Example (a), the two sentences are predicted Entailment because three young boys entails the boys and at the beach entails in the beach, whereas unaligned phrases enjoying and a day are allowed in the premise for Entailment. In Example (b), playing contradicts asleep, and the two sentences are also predicted Contradiction. Likewise, Example (c) is predicted Neutral because the aligned phrases on a concrete boardwalk and near the beach are neutral." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 565, + 506, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 565, + 506, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 565, + 506, + 643 + ], + "type": "text", + "content": "In our study, we also find several interesting examples where EPR's reasoning provides clues suggesting that the target labels may be incorrect in the SNLI dataset. In Example (d), our model predicts Neutral for looking and admiring, as well as for at black and white photos and the old photographs. Thus, the two sentences are predicted Neutral instead of the provided label Entailment. We believe our model's reasoning and prediction are correct, because people looking at something may or may not admire it; a black-and-white photo may or may not be an old photo (as it could be a black-and-white artistic photo)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 656, + 390, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 390, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 390, + 667 + ], + "type": "text", + "content": "C.4 CASE STUDY OF THE TEXTUAL EXPLANATION GENERATION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "We conduct another case study to show how EPR's reasoning is used in the textual explanation generation task. As seen in Figure 8, our EPR reasoning yields structured factual tuples: on a deserted beach entailing at the beach, Some dogs contradicting only one dog, and running unaligned (matched with a special token [EMPTY]). Our explanation generation model attends to these factual tuples, and the heat map shows that our model gives the most attention weights (with an average of" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 187, + 69, + 425, + 205 + ], + "blocks": [ + { + "bbox": [ + 187, + 69, + 425, + 205 + ], + "lines": [ + { + "bbox": [ + 187, + 69, + 425, + 205 + ], + "spans": [ + { + "bbox": [ + 187, + 69, + 425, + 205 + ], + "type": "table", + "html": "
Input Premise : Some dogs are running on a deserted beach.\nHypothesis : There is only one dog at the beach.
Label Contradiction (not used during our explanation generation)
EPR's Reasoning Output
Premise phraseHypothesis phraseEPR labelAttention score
on a deserted beachat the beachE23.16
Some dogsonly one dogC61.22
running[EMPTY]E15.62
Output explanation Some dogs is more than one dog.
Reference explanations:\n(1) Some is more than one, therefore there can't be only one dog.\n(2) Some indicates more than one dog. One dog is not some dogs.\n(3) Some dogs are not one dog.
", + "image_path": "43654033e8b40a301fb9227d2167129a2029e33c1048e4832f7411e2c0a1b05b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 110, + 214, + 449, + 316 + ], + "blocks": [ + { + "bbox": [ + 110, + 214, + 449, + 316 + ], + "lines": [ + { + "bbox": [ + 110, + 214, + 449, + 316 + ], + "spans": [ + { + "bbox": [ + 110, + 214, + 449, + 316 + ], + "type": "image", + "image_path": "7aad61fa82aba6c0d7d230334d58e9d5a183d8bb903f0c5c9b91377fcfef4dd7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "lines": [ + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "type": "text", + "content": "Figure 8: Case study of the textual explanation generation. The heat map shows the step-by-step and average attention weights to the factual tuples (vertical axis)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 369, + 504, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 504, + 404 + ], + "type": "text", + "content": "0.61) to the tuple, Some dogs contradicting only one dog, to generate the explanation \"Some dogs is more than one dog.\" This example illustrates that the factual tuples given by our EPR model provide meaningful information and can improve textual explanation generation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 419, + 298, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 419, + 298, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 298, + 432 + ], + "type": "text", + "content": "D LIMITATION AND FUTURE WORK" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 444, + 504, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 444, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 444, + 504, + 500 + ], + "type": "text", + "content": "This paper performs phrase detection and alignment by heuristics. They work well empirically in our experiments, although further improvement is possible (for example, by considering syntactic structures). However, our main focus is neural fuzzy logic for weakly supervised reasoning. This largely differs from previous work based on manually designed lexicons and rules (Hu et al., 2020; Chen et al., 2021)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 505, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 504, + 540 + ], + "type": "text", + "content": "Our long-term goal is to develop a weakly supervised, end-to-end trained neuro-symbolic system that can extract semantic units and perform reasoning for a given downstream NLP task. This paper is an important milestone toward the long-term goal." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 555, + 244, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 244, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 244, + 567 + ], + "type": "text", + "content": "E ETHICAL STATEMENTS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 579, + 506, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 625 + ], + "type": "text", + "content": "Our work involves human annotation of the phrasal logical relationships. Since the research subject here is logic (rather than humans), there are minimal ethical concerns. We nevertheless followed a standard protocol of human evaluation (involving identity protection, and proper compensation), approved by our institutional ethics board." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 641, + 219, + 652 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 641, + 219, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 641, + 219, + 652 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 665, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 732 + ], + "type": "text", + "content": "We thank all reviewers and chairs for their valuable comments. The research is supported in part by the Natural Sciences and Engineering Research Council of Canada (NSERC) under Grant No. RGPIN2020-04465, the Amii Fellow Program, the Canada CIFAR AI Chair Program, a UAHJIC project, a donation from DeepMind, and the Digital Research Alliance of Canada (alliancecan.ca). Atharva Naik contributed to the research as an intern at the University of Alberta through the Mitacs Globalink program." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_content_list.json b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..b793df4060e0502468906d8bec1244ba746401d2 --- /dev/null +++ b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_content_list.json @@ -0,0 +1,2608 @@ +[ + { + "type": "text", + "text": "WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH PROBABILISTIC LOGICAL REASONING FOR OBJECT DETECTION", + "text_level": 1, + "bbox": [ + 171, + 99, + 828, + 171 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Martijn Oldenhof", + "bbox": [ + 181, + 194, + 312, + 208 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ESAT-STADIUS", + "bbox": [ + 181, + 209, + 305, + 222 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KU Leuven, Belgium", + "bbox": [ + 183, + 223, + 326, + 237 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "martijn. oldenhof@kuleuven.be", + "bbox": [ + 183, + 238, + 459, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Adam Arany", + "bbox": [ + 514, + 195, + 609, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ESAT-STADIUS", + "bbox": [ + 514, + 209, + 638, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KU Leuven, Belgium", + "bbox": [ + 514, + 223, + 658, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "adam.arany@esat.kuleuven.be", + "bbox": [ + 514, + 238, + 781, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yves Moreau", + "bbox": [ + 181, + 272, + 279, + 285 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ESAT-STADIUS", + "bbox": [ + 181, + 286, + 305, + 300 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KU Leuven, Belgium", + "bbox": [ + 183, + 301, + 326, + 314 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "yves.moreau@esat.kuleuven.be", + "bbox": [ + 183, + 315, + 459, + 328 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Edward De Brouwer", + "bbox": [ + 514, + 272, + 663, + 285 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ESAT-STADIUS", + "bbox": [ + 514, + 286, + 640, + 299 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KU Leuven, Belgium", + "bbox": [ + 514, + 300, + 660, + 315 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "edward.debrouwer@gmail.com", + "bbox": [ + 514, + 315, + 774, + 328 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 364, + 545, + 378 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Training object detection models usually requires instance-level annotations, such as the positions and labels of all objects present in each image. Such supervision is unfortunately not always available and, more often, only image-level information is provided, also known as weak supervision. Recent works have addressed this limitation by leveraging knowledge from a richly annotated domain. However, the scope of weak supervision supported by these approaches has been very restrictive, preventing them to use all available information. In this work, we propose ProbKT, a framework based on probabilistic logical reasoning that allows to train object detection models with arbitrary types of weak supervision. We empirically show on different datasets that using all available information is beneficial as our ProbKT leads to significant improvement on target domain and better generalization compared to existing baselines. We also showcase the ability of our approach to handle complex logic statements as supervision signal. Our code is available at https://github.com/molden/ProbKT", + "bbox": [ + 228, + 393, + 769, + 590 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 614, + 336, + 628 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Object detection is a fundamental ability of numerous high-level machine learning pipelines such as autonomous driving [4; 16], augmented reality [42] or image retrieval [17]. However, training state-of-the-art object detection models generally requires detailed image annotations such as the box-coordinates location and the labels of each object present in each image. If several large benchmark datasets with detailed annotations are available [26; 15], providing such detailed annotation on new specific datasets comes with a significant cost that is often not affordable for many applications.", + "bbox": [ + 169, + 645, + 826, + 729 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "More frequently, datasets come with only limited annotation, also referred to as weak supervision. This has sparked research in weakly-supervised object detection approaches [25; 6; 40], using techniques such as multiple instance learning [40] or variations of class activation maps [3]. However, these approaches have been shown to significantly underperform their fully-supervised counterparts in terms of robustness and accurate localization of the objects [39].", + "bbox": [ + 169, + 734, + 826, + 806 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "An appealing and intuitive approach to improve the performance of weakly supervised object detection is to perform transfer learning from an existing object detection model pre-trained on a fully annotated dataset [14; 46; 43]. This approach, also referred to as transfer learning or domain adaptation, consists in leveraging transferable knowledge from the pre-trained model (such as bounding boxes prediction capabilities) to the new weakly supervised domain. This transfer has been embodied in different ways in the literature. Examples include a simple fine-tuning of the classifier of bounding box proposals of the pre-trained model [43], or an iterative relabeling of the weakly supervised dataset for retraining a new full objects detection model on the re-labeled data [46].", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/243aadad3b25bc99b81b9d9924300b2d9c50d1ddde480b20145264765a247667.jpg", + "image_caption": [ + "Figure 1: ProbKT: Weakly supervised knowledge transfer with probabilistic logical reasoning. (Left) A model can be trained on the source domain using full supervision (labels, positions) but only on a limited set of shapes (cylinders and spheres). (Middle) The pre-trained model does not recognize the cubes from the target domain correctly. (Right) The model can adapt to the target domain after applying ProbKT and can recognize the cubes." + ], + "image_footnote": [], + "bbox": [ + 173, + 98, + 826, + 303 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, existing approaches are very restrictive in the type of weak supervision they are able to harness. Indeed, some do not support new object classes in the new domain [20], others can only use a label indicating the presence of an object class [46]. However, in practice, the supervision on the new domain can come in very different forms. For instance, the count of each object class can be given, such as in atom detection from molecule images where only chemical formula might be given. Or, when many objects are present on an image, a range can be provided instead of an exact class counts (e.g. \"there are at least 4 cats on this image\"). Crucially, this variety of potential supervisory signals on the target domain cannot be fully utilized by existing domain adaption approaches.", + "bbox": [ + 169, + 404, + 823, + 516 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this limitation, we introduce ProbKT, a novel framework that allows to generalize knowledge transfer in object detection to arbitrary types of weak supervision using neural probabilistic logical reasoning [27]. This paradigm allows to connect probabilistic outputs of neural networks with logical rules and to infer the resulting probability of particular queries. One can then evaluate the probability of a query such as \"the image contains at least two animals\" and differentiate through the probabilistic engine to train the underlying neural network. Our approach allows for arbitrarily complex logical statements and therefore supports weak supervision like class counts or ranges, among other. To our knowledge, this is the first approach to allow for such versatility in utilizing the available information on the new domain.", + "bbox": [ + 169, + 522, + 826, + 648 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To assess the capabilities of this framework, we provide extensive empirical analysis of multiple object detection datasets. Our approach also supports any type of objects detection backbone architecture. We thus use two popular backbone architectures, DETR [7] and RCNN [34] and evaluate their performance in terms of accuracy, convergence as well of generalization on out-of-distribution data. Our experiments show that, due to its ability to use the complete supervisory signal, our approach outperforms previous works in a wide range of setups.", + "bbox": [ + 169, + 655, + 826, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Key contributions: (1) We propose a novel knowledge transfer framework for object detection relying on probabilistic programming that uniquely allows using arbitrary types of weak supervision on the target domain. (2) We make our approach amenable to different levels of computational capabilities by proposing different approximations of ProbKT. (3) We provide an extensive experimental setup to study the capabilities of our framework for knowledge transfer and out-of-distribution generalization.", + "bbox": [ + 169, + 744, + 826, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORKS", + "text_level": 1, + "bbox": [ + 171, + 835, + 352, + 852 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A comparative summary of related works is given in Table 1. We distinguish three main categories: (1) pure weakly supervised object detection methods (WSOD) that do not leverage a richly annotated source domain, (2) unsupervised object detection methods with knowledge transfer (DA or domain adaptation methods) that do not use supervision on the target domain and (3) weakly supervised", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "object detection methods with knowledge transfer (WSOD w/transfer) that are restrictive in the type of supported weak supervision. To our knowledge, our work is the first to allow for arbitrary supervision on the target domain (and supporting new classes in the target domain) while also leveraging knowledge from richly annotated domains. ProbKT supports arbitrary weak supervision thanks to the inherited expressiveness of Prolog [41] which is based on a subset of first-order predicate logic, Horn clauses and is Turing-complete.", + "bbox": [ + 169, + 103, + 826, + 188 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Weakly supervised object detection (WSOD) This class of method allows training object detection models with only weak supervision. One can thus train these approaches directly on the target domain. However, they do not allow to leverage potentially available richly annotated datasets, which has been shown to lead to worse performance [39]. Different flavors of WSOD architectures have been proposed relying on a variety of implementations such as multiple instance learning (MIL)-based [25; 40] or class activation (CAM) based [47; 3]. In contrast to WSOD methods, our approach is designed to exploit existing richly annotated datasets and thus provides increased performance on the target domain. For a comprehensive review of WSOD methods we refer the reader to Shao et al. [39].", + "bbox": [ + 169, + 193, + 826, + 308 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Domain adaptation methods (DA) In contrast to WSOD methods, domain adaptation methods do rely on fully supervised source domain dataset. However, they do not assume any supervision on the target domain and are therefore not equipped to exploit such signal when available [37; 8; 22; 48].", + "bbox": [ + 169, + 311, + 825, + 356 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "WSOD with knowledge transfer Our approach belongs to the class of weakly supervised object detection models with knowledge transfer. These methods aim to transfer knowledge from a source domain, where full supervision is available, to a target domain where only weak labels are available. Existing work in this class of models only allows for limited type of supervision of the target domain. Most architectures only support a label indicating the presence or absence of a class of object in the image[14; 46; 43]. Inoue et al. [20] allows for class counts as weak supervision but unfortunately does not allow for new classes in the target domain. In contrast, ProbKT natively allows for class counts and new classes as well as other types of weak supervision.", + "bbox": [ + 169, + 361, + 826, + 474 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neural probabilistic logical reasoning Probabilistic logical reasoning combines logic and probability theory. Favored for its high-level reasoning abilities, it was introduced as an alternative way to deep learning in the quest for artificial intelligence [10]. Statistical artificial intelligence [32; 23] and probabilistic logic programming [11] are examples of areas relying on these premises. In a unification effort, researchers have proposed hybrid architectures, embedding both deep learning and logical reasoning components [38; 35]. Our work builds upon the recent advances in the field, where combinations of deep learning, logical, and probabilistic approaches were introduced [27], allowing high-level reasoning with uncertainty using differentiable neural network architectures.", + "bbox": [ + 169, + 479, + 826, + 592 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/ffed8d9d0b2a815dd61a3c3b9f359f8fe7ebf87864249d776cdd234705f0d080.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodTypeAnnotated source dom.Weak supervisionNew classesImplementation
Li et al. [25]WSODXpresence/absenceMIL-based
Bilen and Vedaldi [6]WSODXpresence/absencespatial pyramid pooling layer
Song et al. [40]WSODXpresence/absenceMIL based
Zhou et al. [47]WSODXmixCAM-based
Bae et al. [3]WSODXmixCAM based
Kundu et al. [24]DAone-shotClass-Incremental DA
Saito et al. [37]DAXXStrong-Weak Distribution Alignment
Chen et al. [8]DAXXAdversarial training
Kim et al. [22]DAXXAdversarial training and Domain Diversification
Zhu et al. [48]DAXXselective region adaptation framework
Deselaers et al. [14]WSOD w/transferpresence/absenceCRF-based, iteratively
Zhong et al. [46]WSOD w/transferpresence/absenceMIL based, iteratively
Uijlings et al. [43]WSOD w/transferpresence/absenceMIL based, non iteratively
Inoue et al. [20]WSOD w/transferclass countsXDA + pseudolabeling, iteratively
ProbKT (ours)WSOD w/transferarbitraryProbabilistic logical reasoning, iteratively
", + "bbox": [ + 173, + 597, + 823, + 752 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1: Summary table of related works with weakly supervised object detection(WSOD), Domain Adaptation(DA) and weakly supervised knowledge transfer methods (WSOD w/ transfer).", + "bbox": [ + 169, + 762, + 823, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 METHODOLOGY", + "text_level": 1, + "bbox": [ + 171, + 823, + 344, + 838 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 PROBLEM STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 854, + 370, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We consider the problem of weakly supervised knowledge transfer for object detection. Using a model trained on a richly annotated source domain, we aim at improving its performance on a less richly annotated target domain.", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let $\\mathcal{D}_s = \\{(I_s^i, b_s^i, y_s^i) : i = 1, \\dots, N_s)\\}$ be a dataset issued from the source domain and consisting of $N_s$ images $I_s$ along with their annotations. We write $b_s^i \\in \\mathbb{R}^{n_i \\times 4}$ and $y_s^i \\in \\{1, \\dots, K_s\\}^{n_i}$ for the box coordinates and class labels of objects in image $I_s^i$ , $n_i$ is the number of objects present in image $I_s^i$ and $K_s$ is the total number of object classes in the source domain. This represents the typical dataset required to train classical fully-supervised object detection architectures. The target dataset $\\mathcal{D}_t = \\{(I_t^i, q_t^i) : i = 1, \\dots, N_t)\\}$ contains $N_t$ image from the target domain along with image-level annotations $q_t^i$ . These annotations are logical statements about the content of the image in terms of object classes and their location. Examples include the presence of different classes in each image (i.e., the classical assumption in weakly supervised object detection) but also extends to the counts of classes or a complex combination of counts of objects attributes (e.g., \"two red objects, and at least two bicycles\"). What is more, the logical statements $q_t^i$ can include classes not already present in the source domain. This type of logical annotation is then strictly broader than the restrictive supervision usually assumed.", + "bbox": [ + 169, + 103, + 826, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Based on the availability of a source dataset and a target dataset as described above, our goal is then to harness the available detailed information from the source domain to perform accurate object detection on the target domain. A graphical illustration of this process is given in Figure 1.", + "bbox": [ + 169, + 290, + 823, + 335 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 BACKGROUND", + "text_level": 1, + "bbox": [ + 171, + 359, + 316, + 375 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 OBJECT DETECTION", + "text_level": 1, + "bbox": [ + 171, + 390, + 370, + 404 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Object detection aims at predicting the location and labels of objects in images. One then wishes to learn a parametric function $f_{\\theta}:\\mathcal{I}\\rightarrow \\{\\mathcal{B}\\times \\mathbb{R}^{K}\\}^{\\mathbb{Z}}$ with $f_{\\theta}(I) = \\{(\\hat{b},\\hat{p}_y)\\}^{\\hat{n}} = \\{(\\hat{b}_i,\\hat{p}_{y,i}):i = 1,\\dots,\\hat{n}\\}$ such that the distance between predicted and true boxes and labels, $d(\\{\\hat{(b},\\hat{p}_y)\\}^{\\hat{n}},\\{(b,y)\\}^{n})$ , is minimum. Objects detection architecture would usually output box features proposals $\\{h_i:i = 1,\\dots,\\hat{n}\\}$ conditioned on which they would predict the probability vector of class labels $\\hat{p}_{y,i} = g_p(h_i)$ and the box location predictions $\\hat{b}_i = g_b(h_i)$ using shared parametric functions $g_{p}(\\cdot)$ and $g_{b}(\\cdot)$ . For an object $n$ , we write the predicted probability of the object belonging to class $k$ as $\\hat{p}_{y,n}^{k}$ .", + "bbox": [ + 169, + 417, + 826, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 PROBABILISTIC LOGICAL REASONING", + "text_level": 1, + "bbox": [ + 171, + 551, + 491, + 566 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Probabilistic logical reasoning uses knowledge representation relying on probabilities that allow encoding uncertainty in knowledge. Such a knowledge is encoded in a probabilistic logical program $\\mathcal{P}$ as a set of $N$ probabilistic facts $U = \\{U_{1},\\dots,U_{N}\\}$ and $M$ logical rules $F = \\{f_{1},\\dots f_{M}\\}$ connecting them. A simple example of probabilistic fact is \"Alice and Bob will each pass their exam with probability 0.5\" and an example of logical rule is \"if both Alice and Bob pass their exam, they will host a party\". Combining probabilistic facts and logical rules, one can then construct complex probabilistic knowledge representation, that can also be depicted as probabilistic graphical models.", + "bbox": [ + 169, + 579, + 823, + 679 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Probabilistic logical programming allows to perform inference by computing the probability of a particular statement or query. For instance, one could query the probability that \"Alice and Bob will host a party\". This query is executed by summing over the probabilities of occurrence of the different worlds $w = \\{u_1, \\dots, u_N\\}$ (i.e. individual realization of the set of probabilistic facts) that are compatible with the query $q$ . The probability of a query $q$ in a program $\\mathcal{P}$ can then be inferred as $P_{\\mathcal{P}}(q) = \\sum_{w} P(w) \\cdot \\mathbb{I}[F(w) \\equiv q]$ , where $F(w) \\equiv q$ stands for the fact that propagation of the realization $w$ across the knowledge graph, according to the logical rules $F$ leads to $q$ being true.", + "bbox": [ + 169, + 684, + 823, + 784 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Remarkably, recent advances in probabilistic programming have led to learnable probabilistic facts [27]. In particular, the probability of a fact can be generated by a neural network with learnable weights. Such a learnable probabilistic fact is then referred to as a neural predicate $U^{\\theta}$ , where we make the dependence on the weights $\\theta$ explicit. One can then train these weights to minimize a loss that depend on the probability of a query $q$ : $\\hat{\\theta} = \\arg \\min_{\\theta} \\mathcal{L}(P(q \\mid \\theta))$ .", + "bbox": [ + 169, + 787, + 823, + 863 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our approach builds upon this ability to learn neural predicates and uses DeepProbLog [27] as the probabilistic reasoning backbone. DeepProbLog is a neural probabilistic logic programming language that allows to conveniently perform inference and differentiation with neural predicates. We refer the reader to the excellent introduction of Manhaeve et al. [28] for further details about this framework.", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 PROBKT: WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH PROBABILISTIC LOGICAL REASONING", + "text_level": 1, + "bbox": [ + 171, + 103, + 823, + 132 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A graphical description of our approach is presented in Figure 2. Our framework starts from a pre-trained object detection model $f_{\\theta}$ on the source domain. The backbone of this model is extracted and inserted into a new object detection model $f_{\\theta}^{*}$ with new target box position predictors and box label classifiers. This new model is then used to predict box proposals along with the corresponding box features on target domain images $I_{t}$ . These box features are then fed to a new target box position predictor and box label classifier. The predictions of this classifier are considered neural predicates and are given to a probabilistic logical module. This module evaluates the probability of queries $q_{t}$ , the loss, and the corresponding gradient that can be backpropagated to the classifier and the backbone. As we want to maximize the probability of the queries being true, we use the following loss function:", + "bbox": [ + 169, + 143, + 826, + 270 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\theta} = \\sum_ {\\left(I _ {t}, q _ {t}\\right) \\in \\mathcal {D} _ {t}} - \\log P _ {\\mathcal {P}} \\left(q _ {t} \\mid f _ {\\theta} ^ {*} \\left(I _ {t}\\right)\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 292, + 823, + 327 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In theory, the backbone can be trained end to end with this procedure. Our experiments showed that only updating the box features classifiers resulted in more stability as also shown in previous works [46]. We then adopt here the same iterative relabeling strategy, as described next.", + "bbox": [ + 169, + 339, + 823, + 383 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/dffc40d15684072de167b6480bb4e4eca8afe6c9bdae887f549c4e44330c8f86.jpg", + "image_caption": [ + "Figure 2: ProbKT. The pre-trained object detection backbone outputs the box features $h$ for the detected objects. Box classifiers (red) and box position predictors (blue) then predict corresponding label predictions $\\hat{p}_y$ and box position predictions $\\hat{b}$ that are fed to the probabilistic reasoning layer. This layer computes the probability of the query along with the gradients with respect to $\\hat{p}_y$ and $\\hat{b}$ that can be backpropagated through the entire network." + ], + "image_footnote": [], + "bbox": [ + 217, + 397, + 784, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.1 ITERATIVE RELABELING", + "text_level": 1, + "bbox": [ + 171, + 738, + 398, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The approach described above allows to fine-tune our model $f_{\\theta}^{*}$ to the target domain. To further improve the performance, we propose an iterative relabeling strategy that consists in multiple steps: fine-tuning, re-labeling and re-training. A similar has also been proposed by Zhong et al. [46].", + "bbox": [ + 169, + 762, + 826, + 805 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fine-tuning. This step corresponds to training ProbKT on the weakly supervised labels, by minimizing the loss of Equation 1.", + "bbox": [ + 169, + 811, + 823, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Re-labeling. Once ProbKT has been trained, we can use its predictions to annotate images in the target domain. In practice, we only relabel images for which the model predictions comply with the available query labels in order to avoid too noisy labels.", + "bbox": [ + 169, + 845, + 823, + 888 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Re-training. The re-labeled target domain can be used to re-train the object detection backbone of ProbKTin a fully-supervised fashion.", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This procedure can be repeated multiple times to improve the quality of the relabeling and the quantity of relabelled in the target domain dataset. A graphical representation of the relabeling pipeline is presented in Figure 3.", + "bbox": [ + 169, + 103, + 826, + 147 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0d147b1754e149afa89c63b8de727c6f155d030239363796f62a085bc8d10f91.jpg", + "image_caption": [ + "Figure 3: Iterative relabeling. A full cycle is composed of a fine-tuning, a re-labeling and a re-training step. After one cycle, the fine-tuning step and/or re-labeling step can be iteratively repeated." + ], + "image_footnote": [], + "bbox": [ + 173, + 159, + 826, + 231 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3.2 COMPUTATIONAL COMPLEXITY AND APPROXIMATIONS", + "text_level": 1, + "bbox": [ + 171, + 292, + 609, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The computational complexity of inference in probabilistic programming depends on the specific query $q$ and several approximations have been proposed for improving the computation time [44]. We propose two approaches for reducing the computational cost adapted to object detection: (1) filtering the data samples before applying ProbKT (see Appendix Section C.1) or (2) when the supervision consists of the class labels counts, considering only the most probable world (ProbKT*) instead of all possible worlds.", + "bbox": [ + 169, + 316, + 823, + 402 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3.3 PROBKT*: THE MOST PROBABLE WORLD AND CONNECTION TO HUNGARIAN MATCHING", + "text_level": 1, + "bbox": [ + 171, + 416, + 764, + 444 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The probabilistic inference step requires a smart aggregation of all worlds compatible with the query $q$ . Yet, in certain cases, one can reduce the computational cost by only considering the most probable world. Indeed, consider the case when the query consists of the list of different class labels in the images. For a number of boxes $\\hat{n}$ proposed by the objects detection model, the query can be written as the set of labels $q = \\{y^i : i = 1, \\dots, \\hat{n}\\}$ . If we further write $\\hat{p}_{y,n}^k$ as the probability of the label of box $n$ belonging to class $k$ given by the model (as introduced in Section 3.2.1), we have:", + "bbox": [ + 169, + 454, + 826, + 539 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nP _ {\\mathcal {P}} (q) = \\sum_ {j = 1} ^ {\\hat {n}!} \\hat {p} _ {y, 0} ^ {\\sigma_ {j} (0)} \\cdot \\hat {p} _ {y, 1} ^ {\\sigma_ {j} (1)} \\cdot \\ldots \\cdot \\hat {p} _ {y, \\hat {n}} ^ {\\sigma_ {j} (\\hat {n})} = \\sum_ {j = 1} ^ {\\hat {n}!} \\prod_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 545, + 686, + 588 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\sigma_{j}$ corresponds to the $j^{th}$ permutation of the query vector $q$ . To avoid the computation of each possible world contribution, one can only use the configuration with the largest contribution to $P_{\\mathcal{P}}(q)$ and discard the other ones.", + "bbox": [ + 169, + 595, + 823, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This possible world corresponds to the permutation $\\sigma^{*}$ that satisfies:", + "bbox": [ + 169, + 643, + 622, + 660 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma^ {*} = \\underset {\\sigma} {\\arg \\max} \\log (\\prod_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}) = \\underset {\\sigma} {\\arg \\max} \\sum_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)} = \\underset {\\sigma} {\\arg \\min} \\sum_ {n} (1 - \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}).\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 666, + 766, + 698 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Remarkably, this corresponds to the solution of the best alignment using the Hungarian matching algorithm with cost $c(n) = (1 - \\hat{p}_{y,n}^{\\sigma_j(n)})$ , as used, among others, in DETR [7]. Thus, when the query is the set of class labels, the most plausible world can thus be inferred with the Hungarian matching algorithm. In Appendix C.2, we also show that the gradient of ProbKT can be interpreted as a probability weighted extension of the gradient resulting from the Hungarian matching.", + "bbox": [ + 169, + 703, + 823, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 797, + 328, + 811 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 DATASETS", + "text_level": 1, + "bbox": [ + 171, + 828, + 289, + 842 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate our approach on three different datasets: (1) a CLEVR-mini dataset, (2) a Molecules dataset with images of chemical compounds, and (3) an MNIST-based object detection dataset. For each dataset, three subsets, corresponding to different domains, are used: (1) a source domain, (2) a target domain, and (3) an out-of-distribution domain (OOD). The source domain is the richly annotated domain that was used to pre-train the object detection model. The target domain is", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 960 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the domain of interest but with image-level annotations only. Lastly, the OOD domain contains images from a different distribution than the source and target domains and is used to study the generalizability of the models. Source and target domains are split into 5 folds of train and validation sets and an independent test set. We focused our experiments on the small sample regime (range 1k-2k numbers of samples) both for the source as the target domain. More details on each dataset can be found in Appendix B.", + "bbox": [ + 169, + 103, + 826, + 189 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 MODELS", + "text_level": 1, + "bbox": [ + 171, + 204, + 277, + 218 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the experiments, we apply our method ProbKT on two different pre-trained object detection backbone models: (1) DETR [7] and (2) FasterRCNN [34]. Both are pre-trained on the COCO dataset [26]. We also evaluate an Hungarian-algorithm approximation (ProbKT*) of our method when the weak supervision allows it. For sake of conciseness, we omit the results of ProbKT* here but they can be found in Appendix D. The details of the training procedures, as well as the hyper-parameters used for the different models and the different datasets are summarized in Table 4 in Appendix A.", + "bbox": [ + 169, + 229, + 823, + 316 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.1 BASELINE MODELS", + "text_level": 1, + "bbox": [ + 171, + 329, + 364, + 342 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Section 2, all available approaches for weakly supervised object detection are very restrictive in terms of the supervision signal they support. Our main comparison partner is the state of the art WSOD-transfer method [46].", + "bbox": [ + 169, + 352, + 823, + 397 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Additionally, we compare our approach against a Resnet50 [18] backbone pre-trained on ImageNet [12]. Fine-tuning is performed by adding an extra multitask regression layer that is trained to predict the individual counts of the objects in the image as in Xue et al. [45]. This architecture naturally relies only on label counts in the target images for fine-tuning. We then predict box predictions using class activation maps as in Bae et al. [3] to compare its performance on object localization. We call this approach Resnet50-CAM.", + "bbox": [ + 169, + 401, + 826, + 486 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "When the supervision signal allows it, we also compare with a DETR model trained end-to-end jointly on target and source domains, masking the box costs in the matching cost of the Hungarian algorithm for image-level annotated samples. We call this approach DETR-joint.", + "bbox": [ + 169, + 492, + 823, + 536 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b92c915e5e83b9e7260a3908fa40d0cde20e22714dfe2355770a2bd6ef9f706a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelData DomainCLEVR count acc.CLEVR mAP (mAP@IoU=0.5)Mol. count. accMol. mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.97 ± 0.0050.036 ± 0.014 (0.200 ± 0.071)0.978 ± 0.0040.0 ± 0.0 (0 ± 0)
Resnet50-CAMOOD0.831 ± 0.0160.029 ± 0.010 (0.153 ± 0.044)0.0 ± 0.0n/a*
Resnet50-CAMsource domain0.993 ± 0.0030.035 ± 0.019 (0.178 ± 0.084)0.828 ± 0.0210.0 ± 0.0 (0 ± 0)
WSOD-transfertarget domain0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)0.001 ± 0.00.018 ± 0.004 (0.061 ± 0.011)
WSOD-transferOOD0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)0.003 ± 0.002n/a*
WSOD-transfersource domain0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)0.0 ± 0.00.021 ± 0.003 (0.069 ± 0.009)
DETR-jointtarget domain0.159 ± 0.1330.579 ± 0.012 (0.684 ± 0.019)0.357 ± 0.1960.197 ± 0.055 (0.481 ± 0.071)
DETR-jointOOD0.084 ± 0.0390.534 ± 0.012 (0.66 ± 0.012)0.024 ± 0.021n/a*
DETR-jointsource dom.0.923 ± 0.0490.908 ± 0.017 (0.992 ± 0.001)0.232 ± 0.1270.23 ± 0.063 (0.565 ± 0.08)
RCNN (pre-trained)target domain0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)0.592 ± 0.0070.568 ± 0.005 (0.785 ± 0.004)
RCNN (pre-trained)OOD0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)0.348 ± 0.036n/a*
RCNN (pre-trained)source domain0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)0.948 ± 0.0040.737 ± 0.005 (0.979 ± 0.0)
DETR (pre-trained)target domain0.0 ± 0.00.498 ± 0.019 (0.533 ± 0.024)0.464 ± 0.0330.314 ± 0.006 (0.542 ± 0.006)
DETR (pre-trained)OOD0.0 ± 0.00.477 ± 0.013 (0.531 ± 0.021)0.002 ± 0.001n/a*
DETR (pre-trained)source domain0.97 ± 0.0090.945 ± 0.009 (0.992 ± 0.001)0.581 ± 0.0220.409 ± 0.005 (0.722 ± 0.004)
ProbKT (DETR)target domain0.946 ± 0.0140.803 ± 0.011 (0.989 ± 0.006)0.508 ± 0.0270.204 ± 0.02 (0.507 ± 0.014)
ProbKT (DETR)OOD0.726 ± 0.0350.715 ± 0.006 (0.974 ± 0.006)0.004 ± 0.003n/a*
ProbKT (DETR)source domain0.987 ± 0.0030.948 ± 0.005 (0.995 ± 0.001)0.549 ± 0.0260.38 ± 0.013 (0.713 ± 0.006)
ProbKT (RCNN)target domain0.975 ± 0.0030.856 ± 0.039 (0.993 ± 0.001)0.942 ± 0.0090.289 ± 0.041 (0.829 ± 0.054)
ProbKT (RCNN)OOD0.89 ± 0.0220.833 ± 0.042 (0.991 ± 0.001)0.603 ± 0.037n/a*
ProbKT (RCNN)source domain0.995 ± 0.0020.941 ± 0.041 (0.998 ± 0.001)0.96 ± 0.0020.666 ± 0.005 (0.978 ± 0.002)
", + "bbox": [ + 173, + 546, + 823, + 779 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Results of the experiments for the datasets: CLEVR-mini and Molecules. Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution. *: OOD test set of Molecules dataset has no bounding box labels.", + "bbox": [ + 169, + 787, + 823, + 832 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 EVALUATION METRICS", + "text_level": 1, + "bbox": [ + 171, + 856, + 375, + 871 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate the performance of the models on the different datasets based on two criteria: the count accuracy and the objects localization performance. The count accuracy measures the ratio of correct images where all individual counts of (all detected) objects are correct. To evaluate how well the", + "bbox": [ + 169, + 881, + 823, + 926 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "model is performing in localizing the different objects in the image we report the mean average precision (mAP) performance, a widely used metric for evaluating object detection models.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH CLASS COUNTS", + "text_level": 1, + "bbox": [ + 171, + 152, + 691, + 166 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We first investigate the performance of ProbKT when the weakly supervision consists of class counts only. The query $q$ for each image then consists of the number of objects from each class in the image. We evaluate the models on the CLEVR-mini and Molecules datasets. For the Molecules dataset, the query for an image containing 6 carbon atoms (C), 6 oxygen atoms (O) and 12 hydrogen atoms (H) would result in the following query: $q = ([C,O,H],[6,6,12])$ . These weak labels in the case of the Molecules dataset are widely and easily available in the form of the chemical formula of the molecule on the image (e.g $C_6H_{12}O_6$ ). The recognition of atomic level entities on images of molecules is a challenge in the field of Optical Chemical Structure Recognition (OCSR) [9; 33; 29; 19]. For the CLEVR-mini dataset, the query for an example image containing 2 spheres, 1 cylinder and 3 cubes would be $q = ([\\mathrm{Cube},\\mathrm{Cylinder},\\mathrm{Sphere}],[3,1,2])$ . Formal descriptions of the queries for each task are presented in Appendix E.", + "bbox": [ + 169, + 179, + 826, + 332 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results of the experiments are summarized in Table 2. We observe on both datasets that ProbKT is able to transfer knowledge from the source domain to the target domain and improve count accuracy on the target domain and in most cases also on the source domain. The count accuracy increases on both the target domain and on OOD, suggesting better generalization performance. This is in contrast with Resnet50-CAM which performs well on the target domain of the Molecules dataset but fails on OOD. We also note a significant improvement in object localization (mAP) for ProbKT on the CLEVR-mini dataset. However, fine-tuning seems detrimental for mAP on the Molecules dataset. This can be explained by the very small bounding boxes in the Molecules dataset. We therefore also report the mAP@IoU=0.5 where we observe some increase in performance after fine-tuning. Lastly, we observe that our approach outperforms WSOD-transfer on all metrics for both datasets. WSOD-transfer performs well on CLEVR-mini but fails for the Molecules dataset. This can be explained by the fact that this method only supports class indicators (whether a class is present in the image), which is particularly detrimental in molecules images containing a lot of objects.", + "bbox": [ + 169, + 339, + 826, + 521 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5 OTHER TYPES OF WEAK SUPERVISION", + "text_level": 1, + "bbox": [ + 171, + 540, + 478, + 554 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5.1 CLASS RANGES", + "text_level": 1, + "bbox": [ + 171, + 566, + 336, + 580 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The annotation of images is a tedious task, which limits the availability of fully annotated datasets. When the number of objects on an image is large, counting the exact number of objects of a particular class becomes too time-consuming. A typical annotation in this case consists oof class ranges where, instead of exact class counts, an interval is given for the count. For example an image from the CLEVR-mini dataset with more than 4 cubes, exactly 4 cylinders and less than 4 spheres would result in the following query: $q = ([\\text{cube}, \\text{cylinder}, \\text{sphere}], [[4, \\infty[, [4, 5[, [0, 4[}})$ . We evaluate this experimental setup and report results in Table 3. We observe that ProbKT performs significantly better than WSOD-transfer on count accuracy, which still uses only presence/absence labels. We note that Resnet50-CAM is unable to use this type of supervision and is thus reported as $n / a$ .", + "bbox": [ + 169, + 592, + 826, + 719 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9d4fed505265ed996e65eb8ef74e410c001db88ab11fd67c0481eca4169f992a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelData DomainMNIST count acc.MNIST sum acc.MNIST mAP (mAP@IoU=0.5)CLEVR* count acc.CLEVR* mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.044 ± 0.0410.506 ± 0.0630.003 ± 0.003(0.014 ± 0.011)n/an/a
Resnet50-CAMOOD0.01 ± 0.0090.015 ± 0.0040.003 ± 0.002(0.011 ± 0.007)n/an/a
Resnet50-CAMsource domain0.127 ± 0.1320.649 ± 0.1080.005 ± 0.004(0.028 ± 0.018)n/an/a
WSOD-transfertarget domainn/an/an/a0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)
WSOD-transferOODn/an/an/a0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)
WSOD-transfersource domainn/an/an/a0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)
RCNN (pre-trained)target domain0.292 ± 0.0050.298 ± 0.0050.632 ± 0.014 (0.685 ± 0.002)0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)
RCNN (pre-trained)OOD0.205 ± 0.0040.212 ± 0.0040.631 ± 0.013 (0.683 ± 0.002)0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)
RCNN (pre-trained)source domain0.961 ± 0.0080.961 ± 0.0080.917 ± 0.021 (0.988 ± 0.002)0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)
ProbKT (RCNN)target domain0.902 ± 0.0050.903 ± 0.0050.786 ± 0.021 (0.974 ± 0.001)0.971 ± 0.0060.838 ± 0.034 (0.993 ± 0.001)
ProbKT (RCNN)OOD0.863 ± 0.0080.865 ± 0.0080.778 ± 0.021 (0.97 ± 0.001)0.884 ± 0.010.812 ± 0.036 (0.991 ± 0.001)
ProbKT (RCNN)source domain0.967 ± 0.0040.967 ± 0.0040.873 ± 0.016 (0.989 ± 0.001)0.994 ± 0.0010.922 ± 0.035 (0.998 ± 0.001)
", + "bbox": [ + 173, + 734, + 823, + 858 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3: Results of the experiments on the MNIST object detection dataset and on CLEVR* dataset (*CLEVR uses ranges of class counts as labels instead of exact class counts). Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution.", + "bbox": [ + 169, + 866, + 823, + 909 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5.2 COMPLEX QUERIES", + "text_level": 1, + "bbox": [ + 171, + 103, + 366, + 118 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "More complex types of weak supervision than the ones considered above are also possible. To illustrate the capabilities of our approach, we build an MNIST object detection dataset where images show multiple digits as objects. Examples images are available in Appendix B. The weak supervision is here the sum of all digits in the image: $q = \\mathrm{SUM}(\\mathrm{digits})$ . Our ProbKT can seamlessly integrate this type of supervision as shown in Table 3. As all other baselines are unable process this type of supervision, we compare against a pre-trained RCNN and a variation of Resnet50-CAM where we add an extra neural network layer that sums the individual counts to give the resulting sum. We report count accuracy, mAP and sum accuracy. The sum accuracy measures the ratio of correct images where the predicted sum (instead of the label of the digits) is correct. Details about the results on extra experiments with DETR as backbone using complex types of weak supervision can be found in Appendix D.", + "bbox": [ + 169, + 128, + 826, + 282 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.6 ABLATION STUDIES", + "text_level": 1, + "bbox": [ + 171, + 297, + 354, + 311 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0e773b45cbf7640f37656d4e8b4687dcfb7363cfd1d1ec58084cdbf33c00298d.jpg", + "image_caption": [ + "(a) CLEVR iterative relabeling" + ], + "image_footnote": [], + "bbox": [ + 173, + 335, + 367, + 441 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/d17235340db1e49a74095c49b3c4b80909d704f18f417672a46e1577bd8f7bad.jpg", + "image_caption": [ + "Figure 4: Iterative relabeling performance for the different datasets. Iteration 0: pretrained on source domain. Iteration 1: fine-tuned. Iteration 2: re-labeled and re-trained. Iteration 3: relabeled and re-trained. Iteration 4: relabeled and re-trained." + ], + "image_footnote": [], + "bbox": [ + 385, + 333, + 584, + 441 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/01b3ce7dde365f828fc3b5f7694aacef5d650d5f027b39ff3eafe00a70f51d7e.jpg", + "image_caption": [ + "(b) Molecules iterative relabeling", + "(c) MNIST iterative relabeling" + ], + "image_footnote": [], + "bbox": [ + 606, + 333, + 805, + 441 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Iterative relabeling. In Figure 4, we plot the evolution of the performance on the test sets after multiple rounds of fine-tuning and re-labeling, as detailed in Section 3.3.1. The final performance reported in the results tables is selected based on best relabeling iteration on the validation dataset. We observe that iterative relabeling after fine-tuning can improve performance significantly. Nevertheless, the benefit of iterative relabeling is less pronounced for DETR on the Molecules dataset. We impute it to the fact that the fine-tuned DETR model is less accurate on this dataset.", + "bbox": [ + 169, + 537, + 823, + 621 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Object detection backbone", + "text_level": 1, + "bbox": [ + 171, + 627, + 362, + 642 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our method can seamlessly accommodate different object detection backbones. In Table 2, we present the results for our method with a DETR[7] and a FasterRCNN[34] backbone. We observe that FasterRCNN is typically performing better. In particular, the DETR backbone performs poorly on the Molecules dataset. This could be due to the small objects in the Molecules dataset. Indeed, Carion et al. [7] recommend to use DETR-DC5 or DETR-DC5-R101 for small objects instead.", + "bbox": [ + 169, + 648, + 826, + 719 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSIONS AND DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 739, + 478, + 753 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Objects detection models are a key component of machine learning deployment in the real world. However, training such models usually requires large amounts of richly annotated images that are often prohibitive for many applications. In this work, we proposed a novel approach to train object detection models by leveraging richly annotated datasets from other domains and allowing arbitrary types of weak supervision on the target domain. Our architecture relies on a probabilistic logical programming engine that efficiently blends the power of symbolic reasoning and deep learning architecture. As such, our model also inherits the current limitations from the probabilistic reasoning implementations, such as higher computational complexity. We proposed several approaches to speed-up the inference process significantly and our work will directly benefit from further advances in this field. Lastly, the versatility of probabilistic programming could help support other related tasks in the future, such as image to graph translation.", + "bbox": [ + 169, + 771, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Reproducibility Statement Details for reproducing all experiments shown in this work are available in Appendix E. More details on the datasets used in the experiments can be found in Appendix B.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 147, + 328, + 161 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "AA, MO and YM are funded by (1) Research Council KU Leuven: Symbiosis 4 (C14/22/125), Symbiosis3 (C14/18/092); (2) Federated cloud-based Artificial Intelligence-driven platform for liquid biopsy analyses (C3/20/100); (3) CELSA - Active Learning (CELSA/21/019); (4) European Union's Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 956832; (5) Flemish Government (FWO: SBO (S003422N), Elixir Belgium (I002819N), SB and Postdoctoral grants: S003422N, 1SB2721N, 1S98819N, 12Y5623N) and (6) VLAIO PM: Augmenting Therapeutic Effectiveness through Novel Analytics (HBC.2019.2528); (7) YM, AA, EDB, and MO are affiliated to Leuven.AI and received funding from the Flemish Government (AI Research Program). EDB is funded by a FWO-SB grant (S98819N). Computational resources and services used in this work were partly provided by the VSC (Flemish Supercomputer Center), funded by the Research Foundation - Flanders (FWO) and the Flemish Government - department EWI.", + "bbox": [ + 169, + 170, + 826, + 338 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 358, + 287, + 373 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Mnist object detection dataset. URL https://github.com/hukkelas/MNIST-ObjectDetection. accessed on 01.02.2022.", + "[2] Rdkit: Open-source cheminformatics. URL https://www.rdkit.org. accessed on 01.02.2022.", + "[3] Wonho Bae, Junhyug Noh, and Gunhee Kim. Rethinking class activation mapping for weakly supervised object localization. In European Conference on Computer Vision, pages 618-634. Springer, 2020.", + "[4] Aseem Behl, Omid Hosseini Jafari, Siva Karthik Mustikovela, Hassan Abu Alhaija, Carsten Rother, and Andreas Geiger. Bounding boxes, segmentations and object coordinates: How important is recognition for 3d scene flow estimation in autonomous driving scenarios? In Proceedings of the IEEE International Conference on Computer Vision, pages 2574-2583, 2017.", + "[5] Lukas Biewald. Experiment tracking with weights and biases, 2020. URL https://www.wandb.com/. Software available from wandb.com.", + "[6] Hakan Bilen and Andrea Vedaldi. Weakly supervised deep detection networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2846-2854, 2016.", + "[7] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020.", + "[8] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain adaptive faster r-cnn for object detection in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3339-3348, 2018.", + "[9] Djork-Arné Clevert, Tuan Le, Robin Winter, and Floriane Montanari. Img2mol-accurate smiles recognition from molecular graphical depictions. Chemical science, 12(42):14174-14181, 2021.", + "[10] Luc De Raedt and Kristian Kersting. Probabilistic logic learning. ACM SIGKDD Explorations Newsletter, 5(1):31-48, 2003.", + "[11] Luc De Raedt and Angelika Kimmig. Probabilistic (logic) programming concepts. Machine Learning, 100(1):5-47, 2015.", + "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009." + ], + "bbox": [ + 173, + 381, + 828, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Li Deng. The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE signal processing magazine, 29(6):141-142, 2012.", + "[14] Thomas Deselaers, Bogdan Alexe, and Vittorio Ferrari. Weakly supervised localization and learning with generic knowledge. International journal of computer vision, 100(3):275-293, 2012.", + "[15] M. Everingham, L. Van Gool, C. K. I. Williams, J. Winn, and A. Zisserman. The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 88(2):303-338, June 2010.", + "[16] Eleonora Giunchiglia, Mihaela Cătălina Stoian, Salman Khan, Fabio Cuzzolin, and Thomas Lukasiewicz. Road-r: The autonomous driving dataset with logical requirements. arXiv preprint arXiv:2210.01597, 2022.", + "[17] Ibtihaal M Hameed, Sadiq H Abdulhussain, and Basheera M Mahmmod. Content-based image retrieval: A review of recent trends. *Cogent Engineering*, 8(1):1927469, 2021.", + "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016.", + "[19] Rodrigo Hormazabal, Changyoung Park, Soonyoung Lee, Sehui Han, Yeonsik Jo, Jaewan Lee, Ahra Jo, Seung Hwan Kim, Jaegul Choo, Moontae Lee, et al. Cede: A collection of expert-curated datasets with atom-level entity annotations for optical chemical structure recognition.", + "[20] Naoto Inoue, Ryosuke Furuta, Toshihiko Yamasaki, and Kiyoharu Aizawa. Cross-domain weakly-supervised object detection through progressive domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5001-5009, 2018.", + "[21] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017.", + "[22] Taekyung Kim, Minki Jeong, Seunghyeon Kim, Seokeon Choi, and Changick Kim. Diversify and match: A domain adaptive representation learning paradigm for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12456-12465, 2019.", + "[23] Daphne Koller, Nir Friedman, Sašo Džeroski, Charles Sutton, Andrew McCallum, Avi Pfeffer, Pieter Abbeel, Ming-Fai Wong, Chris Meek, Jennifer Neville, et al. Introduction to statistical relational learning. MIT press, 2007.", + "[24] Jogendra Nath Kundu, Rahul Mysore Venkatesh, Naveen Venkat, Ambareesh Revanur, and R Venkatesh Babu. Class-incremental domain adaptation. In European Conference on Computer Vision, pages 53-69. Springer, 2020.", + "[25] Dong Li, Jia-Bin Huang, Yali Li, Shengjin Wang, and Ming-Hsuan Yang. Weakly supervised object localization with progressive domain adaptation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3512-3520, 2016.", + "[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014.", + "[27] Robin Manhaeve, Sebastijan Dumancic, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Deepproblog: Neural probabilistic logic programming. Advances in Neural Information Processing Systems, 31, 2018.", + "[28] Robin Manhaeve, Sebastijan Dumančić, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Neural probabilistic logic programming in deepproblog. Artificial Intelligence, 298: 103504, 2021." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[29] Martijn Oldenhof, Adam Arany, Yves Moreau, and Jaak Simm. Chemographer: optical graph recognition of chemical compounds by deep learning. Journal of chemical information and modeling, 60(10):4506-4517, 2020.", + "[30] Martijn Oldenhof, Adam Arany, Yves Moreau, and Jaak Simm. Self-labeling of fully mediating representations by graph alignment. In Benelux Conference on Artificial Intelligence, pages 46-65. Springer, 2021.", + "[31] Martijn Oldenhof, Ádám Arany, Yves Moreau, and Edward De Brouwer. Updating object detection models with probabilistic programming. 2022. ICML workshop - UpML.", + "[32] Luc De Raedt, Kristian Kersting, Siraam Natarajan, and David Poole. Statistical relational artificial intelligence: Logic, probability, and computation. Synthesis lectures on artificial intelligence and machine learning, 10(2):1-189, 2016.", + "[33] Kohulan Rajan, Achim Zielesny, and Christoph Steinbeck. Decimer: towards deep learning for chemical image recognition. Journal of Cheminformatics, 12(1):1-9, 2020.", + "[34] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015.", + "[35] Tim Rocktäschel and Sebastian Riedel. End-to-end differentiable proving. Advances in neural information processing systems, 30, 2017.", + "[36] Noureddin M Sadawi, Alan P Sexton, and Volker Sorge. Chemical structure recognition: a rule-based approach. In Document Recognition and Retrieval XIX, volume 8297, page 82970E. International Society for Optics and Photonics, 2012.", + "[37] Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada, and Kate Saenko. Strong-weak distribution alignment for adaptive object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6956–6965, 2019.", + "[38] Adam Santoro, David Raposo, David G Barrett, Mateusz Malinowski, Razvan Pascanu, Peter Battaglia, and Timothy Lillicrap. A simple neural network module for relational reasoning. Advances in neural information processing systems, 30, 2017.", + "[39] Feifei Shao, Long Chen, Jian Shao, Wei Ji, Shaoning Xiao, Lu Ye, Yueting Zhuang, and Jun Xiao. Deep learning for weakly-supervised object detection and localization: A survey. Neurocomputing, 2022.", + "[40] Hyun Oh Song, Ross Girshick, Stefanie Jegelka, Julien Mairal, Zaid Harchaoui, and Trevor Darrell. On learning to localize objects with minimal supervision. In International Conference on Machine Learning, pages 1611-1619. PMLR, 2014.", + "[41] Leon Sterling and Ehud Y Shapiro. The art of Prolog: advanced programming techniques. MIT press, 1994.", + "[42] Matteo Tomei, Marcella Cornia, Lorenzo Baraldi, and Rita Cucchiara. Art2real: Unfolding the reality of artworks via semantically-aware image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5849-5859, 2019.", + "[43] Jasper Uijlings, Stefan Popov, and Vittorio Ferrari. Revisiting knowledge transfer for training object class detectors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1101-1110, 2018.", + "[44] Thomas Winters, Giuseppe Marra, Robin Manhaeve, and Luc De Raedt. Deepstochlog: Neural stochastic logic programming. arXiv preprint arXiv:2106.12574, 2021.", + "[45] Yao Xue, Nilanjan Ray, Judith Hugh, and Gilbert Bigras. Cell counting by regression using convolutional neural network. In European Conference on Computer Vision, pages 274-290. Springer, 2016." + ], + "bbox": [ + 173, + 102, + 826, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[46] Yuanyi Zhong, Jianfeng Wang, Jian Peng, and Lei Zhang. Boosting weakly supervised object detection with progressive knowledge transfer. In European conference on computer vision, pages 615-631. Springer, 2020.", + "[47] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2921–2929, 2016.", + "[48] Xinge Zhu, Jiangmiao Pang, Ceyuan Yang, Jianping Shi, and Dahua Lin. Adapting object detectors via selective cross-domain alignment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 687-696, 2019." + ], + "bbox": [ + 171, + 102, + 826, + 250 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A TRAINING DETAILS", + "text_level": 1, + "bbox": [ + 174, + 102, + 370, + 116 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For the hyper-parameters the idea was to stay as close as possible to the defaults of the pre-trained standard models although some lightweight tuning was done. In Table 4 a summary is given for the hyper-parameters used for the different models.", + "bbox": [ + 174, + 133, + 823, + 175 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/1a7855b1571a57b9d911c5fb2fe516ac9cfcedac9e5308709342352c503fbf62.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Modeldatasetepochslrlr_step_sizelr-gammamomentumbatch sizeweight decayoptimizer
DETR pre-train (retrain)CLEVRmax 1000.00017 (7-8)0.180.0001AdamW
DETR pre-train (retrain)Mols.max 1000.000120 (20)0.180.0001AdamW
DETR pre-train (retrain)MNISTmax 1000.000115-20 (20)0.180.0001AdamW
RCNN pre-train (retrain)all datasetsmax 300.0055 (5)0.10.910.0005SGD
RCNN Finetuneall datasetsmax 200.00116Adam
DETR FinetuneCLEVR/Molsmax 200.00116Adam
DETR FinetuneMNISTmax 200.0116Adam
DETR Finetune*CLEVR/Molsmax 1000.002200.180.0001AdamW
RCNN Finetune*CLEVRmax 200.00115Adam
RCNN Finetune*Molsmax 200.0000115Adam
DETR masked box lossCLEVR/Molsmax 1000.000170.180.0001AdamW
Resnet50-CAM modelsall datasetsmax 5000.00132Adam
", + "bbox": [ + 173, + 189, + 823, + 320 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 4: Overview of hyperparameters for the different models, most hyperparamaters are left default from standard models. Tuning was mostly done on learning rate and learning rate scheduling. For every fold/dataset the best epoch/lr/lr_step_size model is used based on validation data.", + "bbox": [ + 174, + 329, + 823, + 372 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B DATASETS", + "text_level": 1, + "bbox": [ + 174, + 400, + 294, + 415 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We evaluate our approach on three different datasets: (1) a CLEVR-mini dataset, (2) a Molecules data set with images of chemical compounds, and (3) an MNIST-based object detection dataset. For each dataset, three subsets, corresponding to different domains, are used: (1) a source domain, (2) a target domain, and (3) an out-of-distribution domain (OOD). Source and target domains are split into 5 folds of train and validation sets and an independent test set. Sizes of the different splits per dataset are summarized in Table 5.", + "bbox": [ + 174, + 431, + 823, + 513 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 477, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 491, + 948, + 506, + 959 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/4654b6cb5b7c0ac89ad6173ef4a47655bb993f5dafcf0132b302ac68988e8906.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetTypeSplitSize (number of samples)
MNIST object detectionSourcetrain700
MNIST object detectionSourcevalidation300
MNIST object detectionSourcetest1000
MNIST object detectionTargettrain700
MNIST object detectionTargetvalidation300
MNIST object detectionTargettest1000
MNIST object detectionOODtest1000
MoleculesSourcetrain1400
MoleculesSourcevalidation600
MoleculesSourcetest1000
MoleculesTargettrain1400
MoleculesTargetvalidation600
MoleculesTargettest1000
MoleculesOODtest1000
", + "bbox": [ + 292, + 99, + 702, + 280 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 5: Dataset sizes for the different splits. For train and validations splits 5 folds are used.", + "bbox": [ + 192, + 289, + 803, + 305 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.0.1 CLEVR-MINI DATASET", + "text_level": 1, + "bbox": [ + 171, + 332, + 395, + 345 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The CLEVR-mini dataset for our experiments is a selection of samples from the CLEVR dataset [21]. The different types available in the CLEVR dataset are combinations of shapes (cube, sphere, and cylinder), materials (metal and rubber), and sizes (large and small). Colors are ignored as the images are first converted to grayscale before feeding them to the models. For the richly annotated source domain, we randomly select images with only sphere or cylinder-shaped objects (no cubes) and with a maximum of four objects per image and a minimum of three objects. For the weakly annotated target domain we experiment with two type of annotations. Firstly we experiment when we have the class counts of objects in the image available. Secondly, instead of the exact counts of classes in the image the annotations only specify if there is exactly one object class in the image or multiple. The advantage of this kind of labeling is that the annotator does not need to count the objects and instead just make a distinction of only one object class in image or multiple. The images in the target domain can contain all combinations of object types (including cube-shaped objects) and allow a minimum of five objects per image and a maximum of six objects per image. For the OOD dataset we also select images with all possible combinations of object types, always with 10 objects per image. Some example images from the CLEVR-mini dataset can be found in Figure 1.", + "bbox": [ + 169, + 356, + 826, + 565 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.0.2 MOLECULES DATASET", + "text_level": 1, + "bbox": [ + 171, + 580, + 385, + 594 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The Molecules dataset contains images depicting chemical compounds. For the richly annotated source domain, a procedure similar as described in Oldenhof et al. [29, 30] was executed using an RDKit [2] fork for generating the bounding box labels for the individual atoms present in the images. In the source domain, we allow the following atom types: carbon (C), hydrogen (H), oxygen (O), and nitrogen (N). In the weakly annotated target domain, we only have the counts of the atoms present which translates to the chemical formula of the molecule in the image ( $e.g. C_6H_{12}O_6$ ). The same classes from the source domain (C, H, O, and N) are also present in the target domain as well as an extra atom type: sulfur (S). The OOD test dataset consists of 1000 images from the external UoB dataset [36] where chemical compounds containing only the atom types present in the target domain (C, H, O, N, and S). Some example images from the Molecules dataset are visualized in Figure 5.", + "bbox": [ + 169, + 604, + 823, + 744 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.0.3 MNIST OBJECT DETECTION DATASET", + "text_level": 1, + "bbox": [ + 171, + 760, + 493, + 773 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The MNIST object detection dataset is generated [1] using the original MNIST dataset [13]. Each image consists of three MNIST digits randomly positioned in the image. The MNIST object detection dataset allows experimenting with a more arbitrary type of weak supervision. Each object in this dataset represents a digit that can be aggregated. This allows to label an image with only the sum of all digits in the image instead of the class counts of the objects. For the richly annotated source domain digits 7, 8, and 9 are left out. The weakly annotated target domain has all possible digit classes (0-9). The labels of the target domain only contain the sum of all digits. For the OOD test dataset, images are used that contain maximum of four MNIST digits, instead of three digits as in the other domains. Some example images from the MNIST object detection dataset are visualized in Figure 6.", + "bbox": [ + 169, + 784, + 823, + 924 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/8322a53663ffea603503373d41cb97feee570947a49aa14c27fc4c4d869989ed.jpg", + "image_caption": [ + "Figure 5: Weakly supervised knowledge transfer with probabilistic logical reasoning (ProbKT). On the left we have source domain where a model can be trained using bounding box information labels, positions) but only on a limited set of atom types (C,H,O,N). In the middle we can see that the pre-trained model is not able to recognize the sulfur (S) from target domain correctly. On the right we see that the model is able to adapt to target domain after probabilistic reasoning using weak labels (e.g. counts of objects on image) and is able to recognize the sulfur (S)." + ], + "image_footnote": [], + "bbox": [ + 179, + 99, + 821, + 301 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/a0b861faf5e883198d7f1afee3d04067d4e837bfd41f6b562175b9ecc4cec360.jpg", + "image_caption": [ + "Figure 6: Weakly supervised knowledge transfer with probabilistic logical reasoning (ProbKT). On the left we have source domain where a model can be trained using bounding box information labels, positions) but only on a limited set of digits (0, 1, 2, 3, 4, 5, 6). In the middle we can see that the pre-trained model is not able to recognize the digit eight (8) from target domain correctly. On the right we see that the model is able to adapt to target domain after probabilistic reasoning using weak labels (e.g. sum of digits on image) and is able to recognize the digit eight (8)." + ], + "image_footnote": [], + "bbox": [ + 178, + 436, + 821, + 640 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C PROBKT AND PROBKT* SUPPLEMENTARY DETAILS", + "text_level": 1, + "bbox": [ + 171, + 767, + 650, + 782 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 FILTERING SAMPLES", + "text_level": 1, + "bbox": [ + 171, + 803, + 362, + 816 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The computation complexity of inference in the probabilistic programming module grows with the number of possible worlds. In turn, the number of possible worlds grows with the number of probabilistic facts $\\hat{n}$ .", + "bbox": [ + 169, + 832, + 826, + 875 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "One avenue to reduce the computational cost of the inference step is then to artificially reduce the number of probabilistic facts in each image. Let $\\{\\hat{p}_{y,n} : n = 1, \\dots, \\hat{n}\\}$ and $q$ the corresponding inference query. We compute the filtered set of probabilistic facts $\\bar{p}_{y,n}$ by setting", + "bbox": [ + 169, + 881, + 825, + 926 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {p} _ {y, n} ^ {k} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\hat {p} _ {y, n} ^ {k} \\geq \\delta \\\\ 0 & \\text {i f} \\exists k ^ {\\prime} \\text {s . t .} \\hat {p} _ {y, n} ^ {k ^ {\\prime}} \\geq \\delta \\\\ \\hat {p} _ {y, n} ^ {k} & \\text {o t h e r w i s e .} \\end{array} \\quad \\text {a n d} \\quad \\hat {p} _ {y, n} ^ {k} < \\delta \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 122, + 823, + 175 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The parameter $\\delta \\in [0,1]$ is a threshold at which we consider the probabilistic fact as certain. A probability of 1 or 0 effectively discards the probabilistic fact $\\bar{p}_{y,n}$ from the inference procedure. However, we also have to update the inference query $q$ to reflect this filtration. We write $\\bar{q}$ the filtered query $q$ .", + "bbox": [ + 169, + 186, + 826, + 244 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Example To illustrate this filtration strategy let's consider an MNIST image with 3 digits in the image: $\\{3,4,7\\}$ . The query $q$ corresponds to the class labels in the images. That is $q = \\{3,4,7\\}$ . The object detection backbones outputs 3 box features with corresponding probabilities $\\{\\hat{p}_{y,0},\\hat{p}_{y,1},\\hat{p}_{y,2},\\}$ . Now let e.g. $\\hat{p}_{y,1}^3 = 0.99$ . We can filter out $\\hat{p}_{y,1}$ (i.e. the prediction for a digit 3 is certain), and compute the filtered query $\\bar{q} = \\{4,7\\}$ .", + "bbox": [ + 169, + 257, + 826, + 333 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Remark Equation 2 suggests a filtering based on the output probabilities only. However, one can also use information about the query for the filtration. For instance, one would only filter out a probabilistic fact if it is consistent with the query $q$ . In the example above, it would be wiser not to filter out e.g. $\\hat{p}_{y,1}^{9} = 0.99$ as no images are supposedly present in the image. One should then ideally propagate this probabilistic fact to the inference module such as to update the weights of the backbone and learn from this error.", + "bbox": [ + 169, + 345, + 823, + 429 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.2 GRADIENT OF THE LIKELIHOOD", + "text_level": 1, + "bbox": [ + 171, + 446, + 439, + 460 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The ProbKT likelihood has the following form:", + "bbox": [ + 171, + 472, + 488, + 488 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nP _ {\\mathcal {P}} (q) = \\sum_ {\\alpha \\in E _ {q}} \\prod_ {i} \\prod_ {j} \\hat {p} _ {i j} ^ {\\alpha_ {i j}},\n$$\n", + "text_format": "latex", + "bbox": [ + 405, + 510, + 589, + 546 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $\\alpha$ is a \"possible world\" matrix of indicator variables:", + "bbox": [ + 169, + 558, + 563, + 574 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {i j} = \\left\\{ \\begin{array}{l l} 1 & \\text {o b j e c t i i s o f c l a s s j} \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 598, + 596, + 631 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "and $E_{q}$ is the set of all possible $\\alpha$ worlds compatible with the logical annotation $q$ .", + "bbox": [ + 169, + 637, + 712, + 652 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lemma 1. The gradient of the likelihood has the following form:", + "bbox": [ + 169, + 656, + 602, + 671 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial P _ {\\mathcal {P}} (q)}{\\partial \\theta} = \\sum_ {i} \\sum_ {j} \\frac {\\partial p _ {i j}}{\\partial \\theta} C _ {i j},\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 676, + 596, + 714 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where the weight has the form:", + "bbox": [ + 171, + 719, + 380, + 734 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nC _ {i j} = P (E | O _ {i} = j) = \\sum_ {\\alpha \\in E | O _ {i = j}} \\prod_ {i ^ {\\prime}} \\prod_ {j ^ {\\prime}} I _ {(i \\neq i ^ {\\prime} \\lor j \\neq j ^ {\\prime})} p _ {i j} ^ {\\alpha_ {i j}}\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 739, + 684, + 775 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In case of the Hungarian matching the most probable possible word is selected, which corresponds to setting the conditional probability $P(E|O_{i} = j)$ to 1 if object $i$ is paired with label $j$ and 0 otherwise. The ProbKT gradient can be interpreted as a probability weighted extension of the gradient resulting from the Hungarian matching.", + "bbox": [ + 169, + 787, + 826, + 845 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D FULL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 864, + 333, + 878 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In Table 6, we present the full results for the MNIST experiment. We report the count accuracy (i.e., correct identification of the digits in the image), sum accuracy (i.e., correct estimation of the sum of", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/6f21851f82f09915b75762994ae1f994ccc2b3a5abbe9d170aebc51cf339c8c9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelTypemnist count acc.mnist sum acc.mnist mAP (mAP@IoU=0.5)
Resnet50-CAM (baseline)In-distribution0.044 ± 0.0410.506 ± 0.0630.003 ± 0.003(0.014 ± 0.011)
Resnet50-CAM (baseline)OOD0.01 ± 0.0090.015 ± 0.0040.003 ± 0.002(0.011 ± 0.007)
Resnet50-CAM (baseline)Source Domain0.127 ± 0.1320.649 ± 0.1080.005 ± 0.004(0.028 ± 0.018)
DETR (Pre-trained)In-distribution0.26 ± 0.0120.262 ± 0.010.518 ± 0.014 (0.637 ± 0.017)
DETR (Pre-trained)OOD0.173 ± 0.010.177 ± 0.0090.51 ± 0.012 (0.632 ± 0.015)
DETR (Pre-trained)Source Domain0.859 ± 0.0310.86 ± 0.0310.781 ± 0.009 (0.957 ± 0.008)
DETR (ProbKT)In-distribution0.662 ± 0.0640.664 ± 0.0650.615 ± 0.025 (0.856 ± 0.037)
DETR (ProbKT)OOD0.532 ± 0.0830.533 ± 0.0820.591 ± 0.03 (0.845 ± 0.038)
DETR (ProbKT)source domain0.878 ± 0.0230.879 ± 0.0230.737 ± 0.014 (0.952 ± 0.009)
RCNN (Pre-trained)In-distribution0.292 ± 0.0050.298 ± 0.0050.632 ± 0.014 (0.685 ± 0.002)
RCNN (Pre-trained)OOD0.205 ± 0.0040.212 ± 0.0040.631 ± 0.013 (0.683 ± 0.002)
RCNN (Pre-trained)source domain0.961 ± 0.0080.961 ± 0.0080.917 ± 0.021 (0.988 ± 0.002)
RCNN (ProbKT)In-distribution0.902 ± 0.0050.903 ± 0.0050.786 ± 0.021 (0.974 ± 0.001)
RCNN (ProbKT)OOD0.863 ± 0.0080.865 ± 0.0080.778 ± 0.021 (0.97 ± 0.001)
RCNN (ProbKT)source domain0.967 ± 0.0040.967 ± 0.0040.873 ± 0.016 (0.989 ± 0.001)
", + "bbox": [ + 173, + 101, + 823, + 323 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "the digits in the image) and the mean average precision (mAP) (i.e. a common object detection metric that reflects the ability to predict the positions and labels of the objects). We observe that the Resnet baseline performs poorly, lacking the necessary logic to process this dataset. We used both DETR and RCNN as object detection backbones in our experiments, showing high test accuracies when fine-tuned with our approach. As the results suggest, RCNN backbones lead to better performance than the DETR backbone.", + "bbox": [ + 169, + 426, + 823, + 510 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/a017321583d572f434443597395419482dc156f25ba94864aaa651942cc50b19.jpg", + "table_caption": [ + "Table 6: Results of the SUM experiments on the MNIST object detection dataset. Reported test accuracies over the 5 folds." + ], + "table_footnote": [], + "table_body": "
ModelData DomainCLEVR count acc.CLEVR mAP (mAP@IoU=0.5)Mol. count. accMol. mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.97 ± 0.0050.036 ± 0.014 (0.200 ± 0.071)0.978 ± 0.0040.0 ± 0.0 (0 ± 0)
Resnet50-CAMOOD0.831 ± 0.0160.029 ± 0.010 (0.153 ± 0.044)0.0 ± 0.0n/a1
Resnet50-CAMsource domain0.993 ± 0.0030.035 ± 0.019 (0.178 ± 0.084)0.828 ± 0.0210.0 ± 0.0 (0 ± 0)
WSOD-transfertarget domain0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)0.001 ± 0.00.018 ± 0.004 (0.061 ± 0.011)
WSOD-transferOOD0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)0.003 ± 0.002n/a1
WSOD-transfersource domain0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)0.0 ± 0.00.021 ± 0.003 (0.069 ± 0.009)
DETR-jointtarget domain0.159 ± 0.1330.579 ± 0.012 (0.684 ± 0.019)0.357 ± 0.1960.197 ± 0.055 (0.481 ± 0.071)
DETR-jointOOD0.084 ± 0.0390.534 ± 0.012 (0.66 ± 0.012)0.024 ± 0.021n/a1
DETR-jointsource dom.0.923 ± 0.0490.908 ± 0.017 (0.992 ± 0.001)0.232 ± 0.1270.23 ± 0.063 (0.565 ± 0.08)
DETR (Pre-trained)target domain0.0 ± 0.00.498 ± 0.019 (0.533 ± 0.024)0.464 ± 0.0330.314 ± 0.006 (0.542 ± 0.006)
DETR (Pre-trained)OOD0.0 ± 0.00.477 ± 0.013 (0.531 ± 0.021)0.002 ± 0.001n/a1
DETR (Pre-trained)source domain0.97 ± 0.0090.945 ± 0.009 (0.992 ± 0.001)0.581 ± 0.0220.409 ± 0.005 (0.722 ± 0.004)
ProbKT*(DETR)target domain0.949 ± 0.0050.728 ± 0.014 (0.99 ± 0.003)0.589 ± 0.0420.373 ± 0.02 (0.669 ± 0.045)
ProbKT*(DETR)OOD0.741 ± 0.0380.606 ± 0.017 (0.977 ± 0.004)0.008 ± 0.008n/a1
ProbKT*(DETR)source domain0.985 ± 0.0040.937 ± 0.006 (0.995 ± 0.001)0.275 ± 0.0660.371 ± 0.021 (0.649 ± 0.041)
ProbKT(DETR)target domain0.946 ± 0.0140.803 ± 0.011 (0.989 ± 0.006)0.508 ± 0.0270.204 ± 0.02 (0.507 ± 0.014)
ProbKT(DETR)OOD0.726 ± 0.0350.715 ± 0.006 (0.974 ± 0.006)0.004 ± 0.003n/a1
ProbKT(DETR)source domain0.987 ± 0.0030.948 ± 0.005 (0.995 ± 0.001)0.549 ± 0.0260.38 ± 0.013 (0.713 ± 0.006)
RCNN (pre-trained)target domain0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)0.592 ± 0.0070.568 ± 0.005 (0.785 ± 0.004)
RCNN (pre-trained)OOD0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)0.348 ± 0.036n/a1
RCNN (pre-trained)source domain0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)0.948 ± 0.0040.737 ± 0.005 (0.979 ± 0.0)
ProbKT*(RCNN)target domain0.974 ± 0.0040.855 ± 0.025 (0.994 ± 0.001)0.945 ± 0.0060.24 ± 0.042 (0.788 ± 0.073)
ProbKT*(RCNN)OOD0.901 ± 0.0170.827 ± 0.022 (0.991 ± 0.001)0.592 ± 0.032n/a1
ProbKT*(RCNN)source domain0.993 ± 0.0020.95 ± 0.021 (0.998 ± 0.0)0.96 ± 0.0030.655 ± 0.01 (0.974 ± 0.004)
ProbKT(RCNN)target domain0.975 ± 0.0030.856 ± 0.039 (0.993 ± 0.001)0.942 ± 0.0090.289 ± 0.041 (0.829 ± 0.054)
ProbKT(RCNN)OOD0.89 ± 0.0220.833 ± 0.042 (0.991 ± 0.001)0.603 ± 0.037n/a1
ProbKT(RCNN)source domain0.995 ± 0.0020.941 ± 0.041 (0.998 ± 0.001)0.96 ± 0.0020.666 ± 0.005 (0.978 ± 0.002)
", + "bbox": [ + 173, + 559, + 823, + 857 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 7: Results of the experiments for the datasets: CLEVR-mini and Molecules. Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution.", + "bbox": [ + 169, + 866, + 823, + 895 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "$^{1}$ OOD test set of Molecules dataset has no bounding box labels.", + "bbox": [ + 199, + 905, + 576, + 920 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E SOURCE CODE AND DATASETS", + "text_level": 1, + "bbox": [ + 171, + 102, + 460, + 118 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The source code and basic instructions are available on https://github.com/molden/ProbKT. The source code integrates features from the Weights & Biases (WandB) platform [5]. Basic features are supported without the need for an account on WandB but to make full use of all features we recommend to create an account.", + "bbox": [ + 169, + 133, + 826, + 189 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Datasets can be downloaded here:", + "bbox": [ + 171, + 196, + 398, + 210 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- CLEVR-mini dataset https://figshare.com/s/db012765e5a38e14ef9c", + "- Molecules dataset https://figshare.com/s/3dc3508d39bf4cff8c7f", + "- MNIST object detection dataset https://figshare.com/s/c760de026f000524db5a" + ], + "bbox": [ + 215, + 222, + 823, + 287 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for counting of objects on an image (as on CLEVR-mini dataset):", + "bbox": [ + 169, + 300, + 826, + 330 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": ":- use_module library(lists)). \nnn(mnist_net,[X],Y,[0,1,2,3,4,5,6,7,8,9,10,11]) :: digit(X,Y). \ncount([],X,0). \ncount([X|T],X,Y):- count(T,X,Z), Y is 1+Z. \ncount([X1|T],X,Z):- X1\\=X,count(T,X,Z). \ncountall(List,X,C) :- sort(List,List1), member(X,List1), count(List,X,C). \nroll([],L,L). \nroll([H|T],A,L):- roll(T,[Y|A],L), digit(H,Y). \ncountpart(List,[],[]) \ncountpart(List,[H|T],[F|L]):- countall(List,H,F), countpart(List,T,L). \ncount.objects(X,L,C):- roll(X,[],Result), countpart(Result,L,C).", + "guess_lang": "prolog", + "bbox": [ + 171, + 340, + 859, + 608 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The query $q$ in the case of class counts would be count.objects(X, L, C). For example an image $X$ with 1 small metal cube and 3 large rubber cylinders would result in the following query: count.objects(X, [small métal Cube, large rubber_cylinder], [1, 3]).", + "bbox": [ + 169, + 618, + 826, + 661 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for aggregating the digits on an image:", + "bbox": [ + 169, + 667, + 826, + 696 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": ": - use_module library(lists)). \nnn (mnist_net, [X], Y, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) :: digit(X,Y). \nsum([[], 0). \nsum([X|T], Y) : - sum(T,Z), Y is X+Z. \nroll([[], L, L). \nroll([H|T], A, L) : - roll(T, [Y|A], L), digit(H,Y). \nsum_digits(X,Y) : - roll(X, [], Result), sum(Result,Y).", + "guess_lang": "prolog", + "bbox": [ + 171, + 708, + 723, + 849 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The query $q$ in the case of sum of digits would be sum_digits(X, Y). For example an image $X$ with as sum of digits 12 would result in the following query: sum_digits(X, 12).", + "bbox": [ + 169, + 859, + 823, + 890 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for taking into account non-exact counts on images", + "bbox": [ + 169, + 895, + 825, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "-- useModule library(lists)) ;\ninn (mnist_net, [X], Y, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) :: digit(X,Y) . . . ..\ncount([ ], X, 0 ) ;\ncount([X|T], X,Y): - count(T,X,Z), Y is 1+Z;\ncount([X|T], X,Z): - X1 $\\equiv$ X, count(T,X,Z);\ncount(, X, 0 ) ;\ncount([X|T], X,Y): - count(T,X,Z), Y is 1+Z;\ncount(, X|T], X,Z): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X |7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(,X|7),x,C,A):- count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), $x\\text{、} C\\text{、} B\\text{、}$ ): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount (, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z)\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7),\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- Call\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- called\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- called\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- called\n\t\t\t-- call T\n\t\t\t-- called\n\t\t\t-- called\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[-X] -- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[y,-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y变压] [- roll(x,[Y变压] [- roll(x,[Y变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [-", + "guess_lang": "txt", + "bbox": [ + 169, + 103, + 781, + 577 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The query $q$ in the case of non exact counts of objects would be range_countobjects(X,L,C,S). For example an image $X$ with exactly one metal small cube and multiple rubber large spheres would result in the following query: range_countobjects(X,[s_metal Cube,l_rubber Sphere],[1,1],[0,1]).", + "bbox": [ + 169, + 587, + 823, + 645 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "E.1 INFERENCE EXAMPLE FOR MNIST DATASET", + "text_level": 1, + "bbox": [ + 171, + 659, + 527, + 674 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To illustrate the inference process let us follow the evaluation of the clause sum([x1, x2], 8), what can result from query sum>digits(X, 8) in case of two visible digit in the image X.", + "bbox": [ + 169, + 685, + 826, + 715 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "This clause is true if and only if $X_{1} + X_{2} = 8$ .", + "bbox": [ + 171, + 720, + 483, + 736 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In case of MNIST digits $\\{(0,1,\\dots ,9)\\}$ enumerating the possible worlds would give the following set:", + "bbox": [ + 169, + 742, + 823, + 770 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\{(0, 8), (1, 7), (2, 6), \\dots , (8, 0) \\} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 775, + 823, + 792 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "After summing the probability of all possible worlds we get:", + "bbox": [ + 171, + 805, + 571, + 820 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\np _ {1} (0) p _ {2} (8) + p _ {1} (1) p _ {2} (7) + \\dots + p _ {1} (0) p _ {2} (8), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 343, + 825, + 823, + 842 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $p_1$ and $p_2$ are the distribution of random variable $X_1$ and $X_2$ respectively.", + "bbox": [ + 169, + 845, + 700, + 863 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Or in a general form:", + "bbox": [ + 171, + 868, + 315, + 883 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\np _ {Y} (Y) = \\sum_ {X _ {1}} p _ {1} \\left(X _ {1}\\right) p _ {2} \\left(Y - X _ {1}\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 887, + 823, + 921 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "As expected the distribution of the sum is the convolution of the distributions of the two terms. This observation trivially generalizes to more than two terms. The cost function corresponding to the maximum likelihood estimation is the negative log-likelihood $-\\log (p_{Y}(Y))$ .", + "bbox": [ + 174, + 103, + 823, + 146 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + } +] \ No newline at end of file diff --git a/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_model.json b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_model.json new file mode 100644 index 0000000000000000000000000000000000000000..81e2a43f5ff16c4f5e90060c0affde19f78633c4 --- /dev/null +++ b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_model.json @@ -0,0 +1,3146 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.83, + 0.172 + ], + "angle": 0, + "content": "WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH PROBABILISTIC LOGICAL REASONING FOR OBJECT DETECTION" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.195, + 0.313, + 0.209 + ], + "angle": 0, + "content": "Martijn Oldenhof" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.21, + 0.307, + 0.223 + ], + "angle": 0, + "content": "ESAT-STADIUS" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.224, + 0.328, + 0.238 + ], + "angle": 0, + "content": "KU Leuven, Belgium" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.239, + 0.46, + 0.252 + ], + "angle": 0, + "content": "martijn. oldenhof@kuleuven.be" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.196, + 0.61, + 0.21 + ], + "angle": 0, + "content": "Adam Arany" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.21, + 0.639, + 0.224 + ], + "angle": 0, + "content": "ESAT-STADIUS" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.224, + 0.659, + 0.239 + ], + "angle": 0, + "content": "KU Leuven, Belgium" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.239, + 0.782, + 0.252 + ], + "angle": 0, + "content": "adam.arany@esat.kuleuven.be" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.273, + 0.28, + 0.286 + ], + "angle": 0, + "content": "Yves Moreau" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.287, + 0.307, + 0.301 + ], + "angle": 0, + "content": "ESAT-STADIUS" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.302, + 0.328, + 0.315 + ], + "angle": 0, + "content": "KU Leuven, Belgium" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.316, + 0.46, + 0.329 + ], + "angle": 0, + "content": "yves.moreau@esat.kuleuven.be" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.273, + 0.665, + 0.286 + ], + "angle": 0, + "content": "Edward De Brouwer" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.287, + 0.641, + 0.3 + ], + "angle": 0, + "content": "ESAT-STADIUS" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.301, + 0.661, + 0.316 + ], + "angle": 0, + "content": "KU Leuven, Belgium" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.316, + 0.775, + 0.329 + ], + "angle": 0, + "content": "edward.debrouwer@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.452, + 0.366, + 0.547, + 0.38 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.395, + 0.77, + 0.591 + ], + "angle": 0, + "content": "Training object detection models usually requires instance-level annotations, such as the positions and labels of all objects present in each image. Such supervision is unfortunately not always available and, more often, only image-level information is provided, also known as weak supervision. Recent works have addressed this limitation by leveraging knowledge from a richly annotated domain. However, the scope of weak supervision supported by these approaches has been very restrictive, preventing them to use all available information. In this work, we propose ProbKT, a framework based on probabilistic logical reasoning that allows to train object detection models with arbitrary types of weak supervision. We empirically show on different datasets that using all available information is beneficial as our ProbKT leads to significant improvement on target domain and better generalization compared to existing baselines. We also showcase the ability of our approach to handle complex logic statements as supervision signal. Our code is available at https://github.com/molden/ProbKT" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.615, + 0.338, + 0.63 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.828, + 0.73 + ], + "angle": 0, + "content": "Object detection is a fundamental ability of numerous high-level machine learning pipelines such as autonomous driving [4; 16], augmented reality [42] or image retrieval [17]. However, training state-of-the-art object detection models generally requires detailed image annotations such as the box-coordinates location and the labels of each object present in each image. If several large benchmark datasets with detailed annotations are available [26; 15], providing such detailed annotation on new specific datasets comes with a significant cost that is often not affordable for many applications." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.828, + 0.807 + ], + "angle": 0, + "content": "More frequently, datasets come with only limited annotation, also referred to as weak supervision. This has sparked research in weakly-supervised object detection approaches [25; 6; 40], using techniques such as multiple instance learning [40] or variations of class activation maps [3]. However, these approaches have been shown to significantly underperform their fully-supervised counterparts in terms of robustness and accurate localization of the objects [39]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.828, + 0.926 + ], + "angle": 0, + "content": "An appealing and intuitive approach to improve the performance of weakly supervised object detection is to perform transfer learning from an existing object detection model pre-trained on a fully annotated dataset [14; 46; 43]. This approach, also referred to as transfer learning or domain adaptation, consists in leveraging transferable knowledge from the pre-trained model (such as bounding boxes prediction capabilities) to the new weakly supervised domain. This transfer has been embodied in different ways in the literature. Examples include a simple fine-tuning of the classifier of bounding box proposals of the pre-trained model [43], or an iterative relabeling of the weakly supervised dataset for retraining a new full objects detection model on the re-labeled data [46]." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.099, + 0.827, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.308, + 0.828, + 0.38 + ], + "angle": 0, + "content": "Figure 1: ProbKT: Weakly supervised knowledge transfer with probabilistic logical reasoning. (Left) A model can be trained on the source domain using full supervision (labels, positions) but only on a limited set of shapes (cylinders and spheres). (Middle) The pre-trained model does not recognize the cubes from the target domain correctly. (Right) The model can adapt to the target domain after applying ProbKT and can recognize the cubes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.405, + 0.825, + 0.517 + ], + "angle": 0, + "content": "However, existing approaches are very restrictive in the type of weak supervision they are able to harness. Indeed, some do not support new object classes in the new domain [20], others can only use a label indicating the presence of an object class [46]. However, in practice, the supervision on the new domain can come in very different forms. For instance, the count of each object class can be given, such as in atom detection from molecule images where only chemical formula might be given. Or, when many objects are present on an image, a range can be provided instead of an exact class counts (e.g. \"there are at least 4 cats on this image\"). Crucially, this variety of potential supervisory signals on the target domain cannot be fully utilized by existing domain adaption approaches." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.523, + 0.827, + 0.65 + ], + "angle": 0, + "content": "To address this limitation, we introduce ProbKT, a novel framework that allows to generalize knowledge transfer in object detection to arbitrary types of weak supervision using neural probabilistic logical reasoning [27]. This paradigm allows to connect probabilistic outputs of neural networks with logical rules and to infer the resulting probability of particular queries. One can then evaluate the probability of a query such as \"the image contains at least two animals\" and differentiate through the probabilistic engine to train the underlying neural network. Our approach allows for arbitrarily complex logical statements and therefore supports weak supervision like class counts or ranges, among other. To our knowledge, this is the first approach to allow for such versatility in utilizing the available information on the new domain." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.827, + 0.74 + ], + "angle": 0, + "content": "To assess the capabilities of this framework, we provide extensive empirical analysis of multiple object detection datasets. Our approach also supports any type of objects detection backbone architecture. We thus use two popular backbone architectures, DETR [7] and RCNN [34] and evaluate their performance in terms of accuracy, convergence as well of generalization on out-of-distribution data. Our experiments show that, due to its ability to use the complete supervisory signal, our approach outperforms previous works in a wide range of setups." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.827, + 0.817 + ], + "angle": 0, + "content": "Key contributions: (1) We propose a novel knowledge transfer framework for object detection relying on probabilistic programming that uniquely allows using arbitrary types of weak supervision on the target domain. (2) We make our approach amenable to different levels of computational capabilities by proposing different approximations of ProbKT. (3) We provide an extensive experimental setup to study the capabilities of our framework for knowledge transfer and out-of-distribution generalization." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.837, + 0.353, + 0.853 + ], + "angle": 0, + "content": "2 RELATED WORKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "A comparative summary of related works is given in Table 1. We distinguish three main categories: (1) pure weakly supervised object detection methods (WSOD) that do not leverage a richly annotated source domain, (2) unsupervised object detection methods with knowledge transfer (DA or domain adaptation methods) that do not use supervision on the target domain and (3) weakly supervised" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.189 + ], + "angle": 0, + "content": "object detection methods with knowledge transfer (WSOD w/transfer) that are restrictive in the type of supported weak supervision. To our knowledge, our work is the first to allow for arbitrary supervision on the target domain (and supporting new classes in the target domain) while also leveraging knowledge from richly annotated domains. ProbKT supports arbitrary weak supervision thanks to the inherited expressiveness of Prolog [41] which is based on a subset of first-order predicate logic, Horn clauses and is Turing-complete." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.194, + 0.828, + 0.309 + ], + "angle": 0, + "content": "Weakly supervised object detection (WSOD) This class of method allows training object detection models with only weak supervision. One can thus train these approaches directly on the target domain. However, they do not allow to leverage potentially available richly annotated datasets, which has been shown to lead to worse performance [39]. Different flavors of WSOD architectures have been proposed relying on a variety of implementations such as multiple instance learning (MIL)-based [25; 40] or class activation (CAM) based [47; 3]. In contrast to WSOD methods, our approach is designed to exploit existing richly annotated datasets and thus provides increased performance on the target domain. For a comprehensive review of WSOD methods we refer the reader to Shao et al. [39]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.313, + 0.826, + 0.357 + ], + "angle": 0, + "content": "Domain adaptation methods (DA) In contrast to WSOD methods, domain adaptation methods do rely on fully supervised source domain dataset. However, they do not assume any supervision on the target domain and are therefore not equipped to exploit such signal when available [37; 8; 22; 48]." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.362, + 0.828, + 0.475 + ], + "angle": 0, + "content": "WSOD with knowledge transfer Our approach belongs to the class of weakly supervised object detection models with knowledge transfer. These methods aim to transfer knowledge from a source domain, where full supervision is available, to a target domain where only weak labels are available. Existing work in this class of models only allows for limited type of supervision of the target domain. Most architectures only support a label indicating the presence or absence of a class of object in the image[14; 46; 43]. Inoue et al. [20] allows for class counts as weak supervision but unfortunately does not allow for new classes in the target domain. In contrast, ProbKT natively allows for class counts and new classes as well as other types of weak supervision." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.481, + 0.827, + 0.593 + ], + "angle": 0, + "content": "Neural probabilistic logical reasoning Probabilistic logical reasoning combines logic and probability theory. Favored for its high-level reasoning abilities, it was introduced as an alternative way to deep learning in the quest for artificial intelligence [10]. Statistical artificial intelligence [32; 23] and probabilistic logic programming [11] are examples of areas relying on these premises. In a unification effort, researchers have proposed hybrid architectures, embedding both deep learning and logical reasoning components [38; 35]. Our work builds upon the recent advances in the field, where combinations of deep learning, logical, and probabilistic approaches were introduced [27], allowing high-level reasoning with uncertainty using differentiable neural network architectures." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.598, + 0.825, + 0.753 + ], + "angle": 0, + "content": "
MethodTypeAnnotated source dom.Weak supervisionNew classesImplementation
Li et al. [25]WSODXpresence/absenceMIL-based
Bilen and Vedaldi [6]WSODXpresence/absencespatial pyramid pooling layer
Song et al. [40]WSODXpresence/absenceMIL based
Zhou et al. [47]WSODXmixCAM-based
Bae et al. [3]WSODXmixCAM based
Kundu et al. [24]DAone-shotClass-Incremental DA
Saito et al. [37]DAXXStrong-Weak Distribution Alignment
Chen et al. [8]DAXXAdversarial training
Kim et al. [22]DAXXAdversarial training and Domain Diversification
Zhu et al. [48]DAXXselective region adaptation framework
Deselaers et al. [14]WSOD w/transferpresence/absenceCRF-based, iteratively
Zhong et al. [46]WSOD w/transferpresence/absenceMIL based, iteratively
Uijlings et al. [43]WSOD w/transferpresence/absenceMIL based, non iteratively
Inoue et al. [20]WSOD w/transferclass countsXDA + pseudolabeling, iteratively
ProbKT (ours)WSOD w/transferarbitraryProbabilistic logical reasoning, iteratively
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.763, + 0.825, + 0.793 + ], + "angle": 0, + "content": "Table 1: Summary table of related works with weakly supervised object detection(WSOD), Domain Adaptation(DA) and weakly supervised knowledge transfer methods (WSOD w/ transfer)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.824, + 0.345, + 0.839 + ], + "angle": 0, + "content": "3 METHODOLOGY" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.856, + 0.372, + 0.87 + ], + "angle": 0, + "content": "3.1 PROBLEM STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We consider the problem of weakly supervised knowledge transfer for object detection. Using a model trained on a richly annotated source domain, we aim at improving its performance on a less richly annotated target domain." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.288 + ], + "angle": 0, + "content": "Let \\(\\mathcal{D}_s = \\{(I_s^i, b_s^i, y_s^i) : i = 1, \\dots, N_s)\\}\\) be a dataset issued from the source domain and consisting of \\(N_s\\) images \\(I_s\\) along with their annotations. We write \\(b_s^i \\in \\mathbb{R}^{n_i \\times 4}\\) and \\(y_s^i \\in \\{1, \\dots, K_s\\}^{n_i}\\) for the box coordinates and class labels of objects in image \\(I_s^i\\), \\(n_i\\) is the number of objects present in image \\(I_s^i\\) and \\(K_s\\) is the total number of object classes in the source domain. This represents the typical dataset required to train classical fully-supervised object detection architectures. The target dataset \\(\\mathcal{D}_t = \\{(I_t^i, q_t^i) : i = 1, \\dots, N_t)\\}\\) contains \\(N_t\\) image from the target domain along with image-level annotations \\(q_t^i\\). These annotations are logical statements about the content of the image in terms of object classes and their location. Examples include the presence of different classes in each image (i.e., the classical assumption in weakly supervised object detection) but also extends to the counts of classes or a complex combination of counts of objects attributes (e.g., \"two red objects, and at least two bicycles\"). What is more, the logical statements \\(q_t^i\\) can include classes not already present in the source domain. This type of logical annotation is then strictly broader than the restrictive supervision usually assumed." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.291, + 0.825, + 0.337 + ], + "angle": 0, + "content": "Based on the availability of a source dataset and a target dataset as described above, our goal is then to harness the available detailed information from the source domain to perform accurate object detection on the target domain. A graphical illustration of this process is given in Figure 1." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.361, + 0.318, + 0.375 + ], + "angle": 0, + "content": "3.2 BACKGROUND" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.391, + 0.371, + 0.405 + ], + "angle": 0, + "content": "3.2.1 OBJECT DETECTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.419, + 0.828, + 0.533 + ], + "angle": 0, + "content": "Object detection aims at predicting the location and labels of objects in images. One then wishes to learn a parametric function \\( f_{\\theta}:\\mathcal{I}\\rightarrow \\{\\mathcal{B}\\times \\mathbb{R}^{K}\\}^{\\mathbb{Z}} \\) with \\( f_{\\theta}(I) = \\{(\\hat{b},\\hat{p}_y)\\}^{\\hat{n}} = \\{(\\hat{b}_i,\\hat{p}_{y,i}):i = 1,\\dots,\\hat{n}\\} \\) such that the distance between predicted and true boxes and labels, \\( d(\\{\\hat{(b},\\hat{p}_y)\\}^{\\hat{n}},\\{(b,y)\\}^{n}) \\), is minimum. Objects detection architecture would usually output box features proposals \\( \\{h_i:i = 1,\\dots,\\hat{n}\\} \\) conditioned on which they would predict the probability vector of class labels \\( \\hat{p}_{y,i} = g_p(h_i) \\) and the box location predictions \\( \\hat{b}_i = g_b(h_i) \\) using shared parametric functions \\( g_{p}(\\cdot) \\) and \\( g_{b}(\\cdot) \\). For an object \\( n \\), we write the predicted probability of the object belonging to class \\( k \\) as \\( \\hat{p}_{y,n}^{k} \\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.553, + 0.492, + 0.567 + ], + "angle": 0, + "content": "3.2.2 PROBABILISTIC LOGICAL REASONING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.58, + 0.825, + 0.68 + ], + "angle": 0, + "content": "Probabilistic logical reasoning uses knowledge representation relying on probabilities that allow encoding uncertainty in knowledge. Such a knowledge is encoded in a probabilistic logical program \\(\\mathcal{P}\\) as a set of \\(N\\) probabilistic facts \\(U = \\{U_{1},\\dots,U_{N}\\}\\) and \\(M\\) logical rules \\(F = \\{f_{1},\\dots f_{M}\\}\\) connecting them. A simple example of probabilistic fact is \"Alice and Bob will each pass their exam with probability 0.5\" and an example of logical rule is \"if both Alice and Bob pass their exam, they will host a party\". Combining probabilistic facts and logical rules, one can then construct complex probabilistic knowledge representation, that can also be depicted as probabilistic graphical models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.825, + 0.785 + ], + "angle": 0, + "content": "Probabilistic logical programming allows to perform inference by computing the probability of a particular statement or query. For instance, one could query the probability that \"Alice and Bob will host a party\". This query is executed by summing over the probabilities of occurrence of the different worlds \\( w = \\{u_1, \\dots, u_N\\} \\) (i.e. individual realization of the set of probabilistic facts) that are compatible with the query \\( q \\). The probability of a query \\( q \\) in a program \\( \\mathcal{P} \\) can then be inferred as \\( P_{\\mathcal{P}}(q) = \\sum_{w} P(w) \\cdot \\mathbb{I}[F(w) \\equiv q] \\), where \\( F(w) \\equiv q \\) stands for the fact that propagation of the realization \\( w \\) across the knowledge graph, according to the logical rules \\( F \\) leads to \\( q \\) being true." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.789, + 0.825, + 0.864 + ], + "angle": 0, + "content": "Remarkably, recent advances in probabilistic programming have led to learnable probabilistic facts [27]. In particular, the probability of a fact can be generated by a neural network with learnable weights. Such a learnable probabilistic fact is then referred to as a neural predicate \\( U^{\\theta} \\), where we make the dependence on the weights \\( \\theta \\) explicit. One can then train these weights to minimize a loss that depend on the probability of a query \\( q \\): \\( \\hat{\\theta} = \\arg \\min_{\\theta} \\mathcal{L}(P(q \\mid \\theta)) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Our approach builds upon this ability to learn neural predicates and uses DeepProbLog [27] as the probabilistic reasoning backbone. DeepProbLog is a neural probabilistic logic programming language that allows to conveniently perform inference and differentiation with neural predicates. We refer the reader to the excellent introduction of Manhaeve et al. [28] for further details about this framework." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "3.3 PROBKT: WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH PROBABILISTIC LOGICAL REASONING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.144, + 0.827, + 0.271 + ], + "angle": 0, + "content": "A graphical description of our approach is presented in Figure 2. Our framework starts from a pre-trained object detection model \\( f_{\\theta} \\) on the source domain. The backbone of this model is extracted and inserted into a new object detection model \\( f_{\\theta}^{*} \\) with new target box position predictors and box label classifiers. This new model is then used to predict box proposals along with the corresponding box features on target domain images \\( I_{t} \\). These box features are then fed to a new target box position predictor and box label classifier. The predictions of this classifier are considered neural predicates and are given to a probabilistic logical module. This module evaluates the probability of queries \\( q_{t} \\), the loss, and the corresponding gradient that can be backpropagated to the classifier and the backbone. As we want to maximize the probability of the queries being true, we use the following loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.293, + 0.825, + 0.328 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\theta} = \\sum_ {\\left(I _ {t}, q _ {t}\\right) \\in \\mathcal {D} _ {t}} - \\log P _ {\\mathcal {P}} \\left(q _ {t} \\mid f _ {\\theta} ^ {*} \\left(I _ {t}\\right)\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.825, + 0.385 + ], + "angle": 0, + "content": "In theory, the backbone can be trained end to end with this procedure. Our experiments showed that only updating the box features classifiers resulted in more stability as also shown in previous works [46]. We then adopt here the same iterative relabeling strategy, as described next." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.398, + 0.785, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.638, + 0.828, + 0.714 + ], + "angle": 0, + "content": "Figure 2: ProbKT. The pre-trained object detection backbone outputs the box features \\( h \\) for the detected objects. Box classifiers (red) and box position predictors (blue) then predict corresponding label predictions \\( \\hat{p}_y \\) and box position predictions \\( \\hat{b} \\) that are fed to the probabilistic reasoning layer. This layer computes the probability of the query along with the gradients with respect to \\( \\hat{p}_y \\) and \\( \\hat{b} \\) that can be backpropagated through the entire network." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.739, + 0.4, + 0.751 + ], + "angle": 0, + "content": "3.3.1 ITERATIVE RELABELING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.763, + 0.827, + 0.806 + ], + "angle": 0, + "content": "The approach described above allows to fine-tune our model \\( f_{\\theta}^{*} \\) to the target domain. To further improve the performance, we propose an iterative relabeling strategy that consists in multiple steps: fine-tuning, re-labeling and re-training. A similar has also been proposed by Zhong et al. [46]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.812, + 0.825, + 0.841 + ], + "angle": 0, + "content": "Fine-tuning. This step corresponds to training ProbKT on the weakly supervised labels, by minimizing the loss of Equation 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.847, + 0.825, + 0.89 + ], + "angle": 0, + "content": "Re-labeling. Once ProbKT has been trained, we can use its predictions to annotate images in the target domain. In practice, we only relabel images for which the model predictions comply with the available query labels in order to avoid too noisy labels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Re-training. The re-labeled target domain can be used to re-train the object detection backbone of ProbKTin a fully-supervised fashion." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.148 + ], + "angle": 0, + "content": "This procedure can be repeated multiple times to improve the quality of the relabeling and the quantity of relabelled in the target domain dataset. A graphical representation of the relabeling pipeline is presented in Figure 3." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.16, + 0.828, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.239, + 0.825, + 0.27 + ], + "angle": 0, + "content": "Figure 3: Iterative relabeling. A full cycle is composed of a fine-tuning, a re-labeling and a re-training step. After one cycle, the fine-tuning step and/or re-labeling step can be iteratively repeated." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.294, + 0.611, + 0.308 + ], + "angle": 0, + "content": "3.3.2 COMPUTATIONAL COMPLEXITY AND APPROXIMATIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.317, + 0.825, + 0.403 + ], + "angle": 0, + "content": "The computational complexity of inference in probabilistic programming depends on the specific query \\( q \\) and several approximations have been proposed for improving the computation time [44]. We propose two approaches for reducing the computational cost adapted to object detection: (1) filtering the data samples before applying ProbKT (see Appendix Section C.1) or (2) when the supervision consists of the class labels counts, considering only the most probable world (ProbKT*) instead of all possible worlds." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.417, + 0.766, + 0.445 + ], + "angle": 0, + "content": "3.3.3 PROBKT*: THE MOST PROBABLE WORLD AND CONNECTION TO HUNGARIAN MATCHING" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.455, + 0.827, + 0.54 + ], + "angle": 0, + "content": "The probabilistic inference step requires a smart aggregation of all worlds compatible with the query \\( q \\). Yet, in certain cases, one can reduce the computational cost by only considering the most probable world. Indeed, consider the case when the query consists of the list of different class labels in the images. For a number of boxes \\( \\hat{n} \\) proposed by the objects detection model, the query can be written as the set of labels \\( q = \\{y^i : i = 1, \\dots, \\hat{n}\\} \\). If we further write \\( \\hat{p}_{y,n}^k \\) as the probability of the label of box \\( n \\) belonging to class \\( k \\) given by the model (as introduced in Section 3.2.1), we have:" + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.546, + 0.687, + 0.589 + ], + "angle": 0, + "content": "\\[\nP _ {\\mathcal {P}} (q) = \\sum_ {j = 1} ^ {\\hat {n}!} \\hat {p} _ {y, 0} ^ {\\sigma_ {j} (0)} \\cdot \\hat {p} _ {y, 1} ^ {\\sigma_ {j} (1)} \\cdot \\ldots \\cdot \\hat {p} _ {y, \\hat {n}} ^ {\\sigma_ {j} (\\hat {n})} = \\sum_ {j = 1} ^ {\\hat {n}!} \\prod_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.596, + 0.825, + 0.639 + ], + "angle": 0, + "content": "where \\(\\sigma_{j}\\) corresponds to the \\(j^{th}\\) permutation of the query vector \\(q\\). To avoid the computation of each possible world contribution, one can only use the configuration with the largest contribution to \\(P_{\\mathcal{P}}(q)\\) and discard the other ones." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.645, + 0.624, + 0.661 + ], + "angle": 0, + "content": "This possible world corresponds to the permutation \\(\\sigma^{*}\\) that satisfies:" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.667, + 0.767, + 0.699 + ], + "angle": 0, + "content": "\\[\n\\sigma^ {*} = \\underset {\\sigma} {\\arg \\max} \\log (\\prod_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}) = \\underset {\\sigma} {\\arg \\max} \\sum_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)} = \\underset {\\sigma} {\\arg \\min} \\sum_ {n} (1 - \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.704, + 0.825, + 0.78 + ], + "angle": 0, + "content": "Remarkably, this corresponds to the solution of the best alignment using the Hungarian matching algorithm with cost \\( c(n) = (1 - \\hat{p}_{y,n}^{\\sigma_j(n)}) \\), as used, among others, in DETR [7]. Thus, when the query is the set of class labels, the most plausible world can thus be inferred with the Hungarian matching algorithm. In Appendix C.2, we also show that the gradient of ProbKT can be interpreted as a probability weighted extension of the gradient resulting from the Hungarian matching." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.798, + 0.329, + 0.813 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.829, + 0.29, + 0.843 + ], + "angle": 0, + "content": "4.1 DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We evaluate our approach on three different datasets: (1) a CLEVR-mini dataset, (2) a Molecules dataset with images of chemical compounds, and (3) an MNIST-based object detection dataset. For each dataset, three subsets, corresponding to different domains, are used: (1) a source domain, (2) a target domain, and (3) an out-of-distribution domain (OOD). The source domain is the richly annotated domain that was used to pre-train the object detection model. The target domain is" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.961 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.19 + ], + "angle": 0, + "content": "the domain of interest but with image-level annotations only. Lastly, the OOD domain contains images from a different distribution than the source and target domains and is used to study the generalizability of the models. Source and target domains are split into 5 folds of train and validation sets and an independent test set. We focused our experiments on the small sample regime (range 1k-2k numbers of samples) both for the source as the target domain. More details on each dataset can be found in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.205, + 0.279, + 0.219 + ], + "angle": 0, + "content": "4.2 MODELS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.231, + 0.825, + 0.317 + ], + "angle": 0, + "content": "In the experiments, we apply our method ProbKT on two different pre-trained object detection backbone models: (1) DETR [7] and (2) FasterRCNN [34]. Both are pre-trained on the COCO dataset [26]. We also evaluate an Hungarian-algorithm approximation (ProbKT*) of our method when the weak supervision allows it. For sake of conciseness, we omit the results of ProbKT* here but they can be found in Appendix D. The details of the training procedures, as well as the hyper-parameters used for the different models and the different datasets are summarized in Table 4 in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.33, + 0.365, + 0.343 + ], + "angle": 0, + "content": "4.2.1 BASELINE MODELS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.353, + 0.825, + 0.398 + ], + "angle": 0, + "content": "As shown in Section 2, all available approaches for weakly supervised object detection are very restrictive in terms of the supervision signal they support. Our main comparison partner is the state of the art WSOD-transfer method [46]." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.402, + 0.827, + 0.487 + ], + "angle": 0, + "content": "Additionally, we compare our approach against a Resnet50 [18] backbone pre-trained on ImageNet [12]. Fine-tuning is performed by adding an extra multitask regression layer that is trained to predict the individual counts of the objects in the image as in Xue et al. [45]. This architecture naturally relies only on label counts in the target images for fine-tuning. We then predict box predictions using class activation maps as in Bae et al. [3] to compare its performance on object localization. We call this approach Resnet50-CAM." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.493, + 0.825, + 0.537 + ], + "angle": 0, + "content": "When the supervision signal allows it, we also compare with a DETR model trained end-to-end jointly on target and source domains, masking the box costs in the matching cost of the Hungarian algorithm for image-level annotated samples. We call this approach DETR-joint." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.547, + 0.825, + 0.78 + ], + "angle": 0, + "content": "
ModelData DomainCLEVR count acc.CLEVR mAP (mAP@IoU=0.5)Mol. count. accMol. mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.97 ± 0.0050.036 ± 0.014 (0.200 ± 0.071)0.978 ± 0.0040.0 ± 0.0 (0 ± 0)
Resnet50-CAMOOD0.831 ± 0.0160.029 ± 0.010 (0.153 ± 0.044)0.0 ± 0.0n/a*
Resnet50-CAMsource domain0.993 ± 0.0030.035 ± 0.019 (0.178 ± 0.084)0.828 ± 0.0210.0 ± 0.0 (0 ± 0)
WSOD-transfertarget domain0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)0.001 ± 0.00.018 ± 0.004 (0.061 ± 0.011)
WSOD-transferOOD0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)0.003 ± 0.002n/a*
WSOD-transfersource domain0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)0.0 ± 0.00.021 ± 0.003 (0.069 ± 0.009)
DETR-jointtarget domain0.159 ± 0.1330.579 ± 0.012 (0.684 ± 0.019)0.357 ± 0.1960.197 ± 0.055 (0.481 ± 0.071)
DETR-jointOOD0.084 ± 0.0390.534 ± 0.012 (0.66 ± 0.012)0.024 ± 0.021n/a*
DETR-jointsource dom.0.923 ± 0.0490.908 ± 0.017 (0.992 ± 0.001)0.232 ± 0.1270.23 ± 0.063 (0.565 ± 0.08)
RCNN (pre-trained)target domain0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)0.592 ± 0.0070.568 ± 0.005 (0.785 ± 0.004)
RCNN (pre-trained)OOD0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)0.348 ± 0.036n/a*
RCNN (pre-trained)source domain0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)0.948 ± 0.0040.737 ± 0.005 (0.979 ± 0.0)
DETR (pre-trained)target domain0.0 ± 0.00.498 ± 0.019 (0.533 ± 0.024)0.464 ± 0.0330.314 ± 0.006 (0.542 ± 0.006)
DETR (pre-trained)OOD0.0 ± 0.00.477 ± 0.013 (0.531 ± 0.021)0.002 ± 0.001n/a*
DETR (pre-trained)source domain0.97 ± 0.0090.945 ± 0.009 (0.992 ± 0.001)0.581 ± 0.0220.409 ± 0.005 (0.722 ± 0.004)
ProbKT (DETR)target domain0.946 ± 0.0140.803 ± 0.011 (0.989 ± 0.006)0.508 ± 0.0270.204 ± 0.02 (0.507 ± 0.014)
ProbKT (DETR)OOD0.726 ± 0.0350.715 ± 0.006 (0.974 ± 0.006)0.004 ± 0.003n/a*
ProbKT (DETR)source domain0.987 ± 0.0030.948 ± 0.005 (0.995 ± 0.001)0.549 ± 0.0260.38 ± 0.013 (0.713 ± 0.006)
ProbKT (RCNN)target domain0.975 ± 0.0030.856 ± 0.039 (0.993 ± 0.001)0.942 ± 0.0090.289 ± 0.041 (0.829 ± 0.054)
ProbKT (RCNN)OOD0.89 ± 0.0220.833 ± 0.042 (0.991 ± 0.001)0.603 ± 0.037n/a*
ProbKT (RCNN)source domain0.995 ± 0.0020.941 ± 0.041 (0.998 ± 0.001)0.96 ± 0.0020.666 ± 0.005 (0.978 ± 0.002)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.789, + 0.825, + 0.833 + ], + "angle": 0, + "content": "Table 2: Results of the experiments for the datasets: CLEVR-mini and Molecules. Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution. *: OOD test set of Molecules dataset has no bounding box labels." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.857, + 0.377, + 0.872 + ], + "angle": 0, + "content": "4.3 EVALUATION METRICS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.825, + 0.927 + ], + "angle": 0, + "content": "We evaluate the performance of the models on the different datasets based on two criteria: the count accuracy and the objects localization performance. The count accuracy measures the ratio of correct images where all individual counts of (all detected) objects are correct. To evaluate how well the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "model is performing in localizing the different objects in the image we report the mean average precision (mAP) performance, a widely used metric for evaluating object detection models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.153, + 0.692, + 0.167 + ], + "angle": 0, + "content": "4.4 WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH CLASS COUNTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.18, + 0.827, + 0.333 + ], + "angle": 0, + "content": "We first investigate the performance of ProbKT when the weakly supervision consists of class counts only. The query \\( q \\) for each image then consists of the number of objects from each class in the image. We evaluate the models on the CLEVR-mini and Molecules datasets. For the Molecules dataset, the query for an image containing 6 carbon atoms (C), 6 oxygen atoms (O) and 12 hydrogen atoms (H) would result in the following query: \\( q = ([C,O,H],[6,6,12]) \\). These weak labels in the case of the Molecules dataset are widely and easily available in the form of the chemical formula of the molecule on the image (e.g \\( C_6H_{12}O_6 \\)). The recognition of atomic level entities on images of molecules is a challenge in the field of Optical Chemical Structure Recognition (OCSR) [9; 33; 29; 19]. For the CLEVR-mini dataset, the query for an example image containing 2 spheres, 1 cylinder and 3 cubes would be \\( q = ([\\mathrm{Cube},\\mathrm{Cylinder},\\mathrm{Sphere}],[3,1,2]) \\). Formal descriptions of the queries for each task are presented in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.34, + 0.828, + 0.522 + ], + "angle": 0, + "content": "Results of the experiments are summarized in Table 2. We observe on both datasets that ProbKT is able to transfer knowledge from the source domain to the target domain and improve count accuracy on the target domain and in most cases also on the source domain. The count accuracy increases on both the target domain and on OOD, suggesting better generalization performance. This is in contrast with Resnet50-CAM which performs well on the target domain of the Molecules dataset but fails on OOD. We also note a significant improvement in object localization (mAP) for ProbKT on the CLEVR-mini dataset. However, fine-tuning seems detrimental for mAP on the Molecules dataset. This can be explained by the very small bounding boxes in the Molecules dataset. We therefore also report the mAP@IoU=0.5 where we observe some increase in performance after fine-tuning. Lastly, we observe that our approach outperforms WSOD-transfer on all metrics for both datasets. WSOD-transfer performs well on CLEVR-mini but fails for the Molecules dataset. This can be explained by the fact that this method only supports class indicators (whether a class is present in the image), which is particularly detrimental in molecules images containing a lot of objects." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.541, + 0.479, + 0.555 + ], + "angle": 0, + "content": "4.5 OTHER TYPES OF WEAK SUPERVISION" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.568, + 0.338, + 0.582 + ], + "angle": 0, + "content": "4.5.1 CLASS RANGES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.593, + 0.827, + 0.72 + ], + "angle": 0, + "content": "The annotation of images is a tedious task, which limits the availability of fully annotated datasets. When the number of objects on an image is large, counting the exact number of objects of a particular class becomes too time-consuming. A typical annotation in this case consists oof class ranges where, instead of exact class counts, an interval is given for the count. For example an image from the CLEVR-mini dataset with more than 4 cubes, exactly 4 cylinders and less than 4 spheres would result in the following query: \\( q = ([\\text{cube}, \\text{cylinder}, \\text{sphere}], [[4, \\infty[, [4, 5[, [0, 4[}}) \\). We evaluate this experimental setup and report results in Table 3. We observe that ProbKT performs significantly better than WSOD-transfer on count accuracy, which still uses only presence/absence labels. We note that Resnet50-CAM is unable to use this type of supervision and is thus reported as \\( n / a \\)." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.735, + 0.825, + 0.859 + ], + "angle": 0, + "content": "
ModelData DomainMNIST count acc.MNIST sum acc.MNIST mAP (mAP@IoU=0.5)CLEVR* count acc.CLEVR* mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.044 ± 0.0410.506 ± 0.0630.003 ± 0.003(0.014 ± 0.011)n/an/a
Resnet50-CAMOOD0.01 ± 0.0090.015 ± 0.0040.003 ± 0.002(0.011 ± 0.007)n/an/a
Resnet50-CAMsource domain0.127 ± 0.1320.649 ± 0.1080.005 ± 0.004(0.028 ± 0.018)n/an/a
WSOD-transfertarget domainn/an/an/a0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)
WSOD-transferOODn/an/an/a0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)
WSOD-transfersource domainn/an/an/a0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)
RCNN (pre-trained)target domain0.292 ± 0.0050.298 ± 0.0050.632 ± 0.014 (0.685 ± 0.002)0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)
RCNN (pre-trained)OOD0.205 ± 0.0040.212 ± 0.0040.631 ± 0.013 (0.683 ± 0.002)0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)
RCNN (pre-trained)source domain0.961 ± 0.0080.961 ± 0.0080.917 ± 0.021 (0.988 ± 0.002)0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)
ProbKT (RCNN)target domain0.902 ± 0.0050.903 ± 0.0050.786 ± 0.021 (0.974 ± 0.001)0.971 ± 0.0060.838 ± 0.034 (0.993 ± 0.001)
ProbKT (RCNN)OOD0.863 ± 0.0080.865 ± 0.0080.778 ± 0.021 (0.97 ± 0.001)0.884 ± 0.010.812 ± 0.036 (0.991 ± 0.001)
ProbKT (RCNN)source domain0.967 ± 0.0040.967 ± 0.0040.873 ± 0.016 (0.989 ± 0.001)0.994 ± 0.0010.922 ± 0.035 (0.998 ± 0.001)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.867, + 0.825, + 0.91 + ], + "angle": 0, + "content": "Table 3: Results of the experiments on the MNIST object detection dataset and on CLEVR* dataset (*CLEVR uses ranges of class counts as labels instead of exact class counts). Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.367, + 0.119 + ], + "angle": 0, + "content": "4.5.2 COMPLEX QUERIES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.129, + 0.827, + 0.283 + ], + "angle": 0, + "content": "More complex types of weak supervision than the ones considered above are also possible. To illustrate the capabilities of our approach, we build an MNIST object detection dataset where images show multiple digits as objects. Examples images are available in Appendix B. The weak supervision is here the sum of all digits in the image: \\( q = \\mathrm{SUM}(\\mathrm{digits}) \\). Our ProbKT can seamlessly integrate this type of supervision as shown in Table 3. As all other baselines are unable process this type of supervision, we compare against a pre-trained RCNN and a variation of Resnet50-CAM where we add an extra neural network layer that sums the individual counts to give the resulting sum. We report count accuracy, mAP and sum accuracy. The sum accuracy measures the ratio of correct images where the predicted sum (instead of the label of the digits) is correct. Details about the results on extra experiments with DETR as backbone using complex types of weak supervision can be found in Appendix D." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.299, + 0.356, + 0.313 + ], + "angle": 0, + "content": "4.6 ABLATION STUDIES" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.337, + 0.368, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.186, + 0.448, + 0.372, + 0.461 + ], + "angle": 0, + "content": "(a) CLEVR iterative relabeling" + }, + { + "type": "image", + "bbox": [ + 0.387, + 0.334, + 0.585, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.396, + 0.447, + 0.595, + 0.461 + ], + "angle": 0, + "content": "(b) Molecules iterative relabeling" + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.334, + 0.807, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.448, + 0.807, + 0.462 + ], + "angle": 0, + "content": "(c) MNIST iterative relabeling" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.469, + 0.825, + 0.511 + ], + "angle": 0, + "content": "Figure 4: Iterative relabeling performance for the different datasets. Iteration 0: pretrained on source domain. Iteration 1: fine-tuned. Iteration 2: re-labeled and re-trained. Iteration 3: relabeled and re-trained. Iteration 4: relabeled and re-trained." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.538, + 0.825, + 0.622 + ], + "angle": 0, + "content": "Iterative relabeling. In Figure 4, we plot the evolution of the performance on the test sets after multiple rounds of fine-tuning and re-labeling, as detailed in Section 3.3.1. The final performance reported in the results tables is selected based on best relabeling iteration on the validation dataset. We observe that iterative relabeling after fine-tuning can improve performance significantly. Nevertheless, the benefit of iterative relabeling is less pronounced for DETR on the Molecules dataset. We impute it to the fact that the fine-tuned DETR model is less accurate on this dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.628, + 0.364, + 0.643 + ], + "angle": 0, + "content": "Object detection backbone" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.649, + 0.827, + 0.72 + ], + "angle": 0, + "content": "Our method can seamlessly accommodate different object detection backbones. In Table 2, we present the results for our method with a DETR[7] and a FasterRCNN[34] backbone. We observe that FasterRCNN is typically performing better. In particular, the DETR backbone performs poorly on the Molecules dataset. This could be due to the small objects in the Molecules dataset. Indeed, Carion et al. [7] recommend to use DETR-DC5 or DETR-DC5-R101 for small objects instead." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.74, + 0.479, + 0.755 + ], + "angle": 0, + "content": "5 CONCLUSIONS AND DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Objects detection models are a key component of machine learning deployment in the real world. However, training such models usually requires large amounts of richly annotated images that are often prohibitive for many applications. In this work, we proposed a novel approach to train object detection models by leveraging richly annotated datasets from other domains and allowing arbitrary types of weak supervision on the target domain. Our architecture relies on a probabilistic logical programming engine that efficiently blends the power of symbolic reasoning and deep learning architecture. As such, our model also inherits the current limitations from the probabilistic reasoning implementations, such as higher computational complexity. We proposed several approaches to speed-up the inference process significantly and our work will directly benefit from further advances in this field. Lastly, the versatility of probabilistic programming could help support other related tasks in the future, such as image to graph translation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "Reproducibility Statement Details for reproducing all experiments shown in this work are available in Appendix E. More details on the datasets used in the experiments can be found in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.148, + 0.33, + 0.162 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.171, + 0.828, + 0.339 + ], + "angle": 0, + "content": "AA, MO and YM are funded by (1) Research Council KU Leuven: Symbiosis 4 (C14/22/125), Symbiosis3 (C14/18/092); (2) Federated cloud-based Artificial Intelligence-driven platform for liquid biopsy analyses (C3/20/100); (3) CELSA - Active Learning (CELSA/21/019); (4) European Union's Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 956832; (5) Flemish Government (FWO: SBO (S003422N), Elixir Belgium (I002819N), SB and Postdoctoral grants: S003422N, 1SB2721N, 1S98819N, 12Y5623N) and (6) VLAIO PM: Augmenting Therapeutic Effectiveness through Novel Analytics (HBC.2019.2528); (7) YM, AA, EDB, and MO are affiliated to Leuven.AI and received funding from the Flemish Government (AI Research Program). EDB is funded by a FWO-SB grant (S98819N). Computational resources and services used in this work were partly provided by the VSC (Flemish Supercomputer Center), funded by the Research Foundation - Flanders (FWO) and the Flemish Government - department EWI." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.359, + 0.289, + 0.374 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.382, + 0.826, + 0.411 + ], + "angle": 0, + "content": "[1] Mnist object detection dataset. URL https://github.com/hukkelas/MNIST-ObjectDetection. accessed on 01.02.2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.418, + 0.826, + 0.447 + ], + "angle": 0, + "content": "[2] Rdkit: Open-source cheminformatics. URL https://www.rdkit.org. accessed on 01.02.2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.455, + 0.827, + 0.498 + ], + "angle": 0, + "content": "[3] Wonho Bae, Junhyug Noh, and Gunhee Kim. Rethinking class activation mapping for weakly supervised object localization. In European Conference on Computer Vision, pages 618-634. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.505, + 0.827, + 0.576 + ], + "angle": 0, + "content": "[4] Aseem Behl, Omid Hosseini Jafari, Siva Karthik Mustikovela, Hassan Abu Alhaija, Carsten Rother, and Andreas Geiger. Bounding boxes, segmentations and object coordinates: How important is recognition for 3d scene flow estimation in autonomous driving scenarios? In Proceedings of the IEEE International Conference on Computer Vision, pages 2574-2583, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.583, + 0.829, + 0.613 + ], + "angle": 0, + "content": "[5] Lukas Biewald. Experiment tracking with weights and biases, 2020. URL https://www.wandb.com/. Software available from wandb.com." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.62, + 0.827, + 0.65 + ], + "angle": 0, + "content": "[6] Hakan Bilen and Andrea Vedaldi. Weakly supervised deep detection networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2846-2854, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.657, + 0.825, + 0.701 + ], + "angle": 0, + "content": "[7] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.708, + 0.825, + 0.751 + ], + "angle": 0, + "content": "[8] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain adaptive faster r-cnn for object detection in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3339-3348, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.758, + 0.827, + 0.8 + ], + "angle": 0, + "content": "[9] Djork-Arné Clevert, Tuan Le, Robin Winter, and Floriane Montanari. Img2mol-accurate smiles recognition from molecular graphical depictions. Chemical science, 12(42):14174-14181, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.808, + 0.825, + 0.838 + ], + "angle": 0, + "content": "[10] Luc De Raedt and Kristian Kersting. Probabilistic logic learning. ACM SIGKDD Explorations Newsletter, 5(1):31-48, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.846, + 0.825, + 0.874 + ], + "angle": 0, + "content": "[11] Luc De Raedt and Angelika Kimmig. Probabilistic (logic) programming concepts. Machine Learning, 100(1):5-47, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.882, + 0.827, + 0.925 + ], + "angle": 0, + "content": "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.382, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[13] Li Deng. The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE signal processing magazine, 29(6):141-142, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.142, + 0.827, + 0.185 + ], + "angle": 0, + "content": "[14] Thomas Deselaers, Bogdan Alexe, and Vittorio Ferrari. Weakly supervised localization and learning with generic knowledge. International journal of computer vision, 100(3):275-293, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.193, + 0.825, + 0.235 + ], + "angle": 0, + "content": "[15] M. Everingham, L. Van Gool, C. K. I. Williams, J. Winn, and A. Zisserman. The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 88(2):303-338, June 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.245, + 0.825, + 0.288 + ], + "angle": 0, + "content": "[16] Eleonora Giunchiglia, Mihaela Cătălina Stoian, Salman Khan, Fabio Cuzzolin, and Thomas Lukasiewicz. Road-r: The autonomous driving dataset with logical requirements. arXiv preprint arXiv:2210.01597, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.297, + 0.825, + 0.327 + ], + "angle": 0, + "content": "[17] Ibtihaal M Hameed, Sadiq H Abdulhussain, and Basheera M Mahmmod. Content-based image retrieval: A review of recent trends. *Cogent Engineering*, 8(1):1927469, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.335, + 0.826, + 0.379 + ], + "angle": 0, + "content": "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.387, + 0.827, + 0.431 + ], + "angle": 0, + "content": "[19] Rodrigo Hormazabal, Changyoung Park, Soonyoung Lee, Sehui Han, Yeonsik Jo, Jaewan Lee, Ahra Jo, Seung Hwan Kim, Jaegul Choo, Moontae Lee, et al. Cede: A collection of expert-curated datasets with atom-level entity annotations for optical chemical structure recognition." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.439, + 0.827, + 0.483 + ], + "angle": 0, + "content": "[20] Naoto Inoue, Ryosuke Furuta, Toshihiko Yamasaki, and Kiyoharu Aizawa. Cross-domain weakly-supervised object detection through progressive domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5001-5009, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.491, + 0.827, + 0.548 + ], + "angle": 0, + "content": "[21] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.556, + 0.825, + 0.613 + ], + "angle": 0, + "content": "[22] Taekyung Kim, Minki Jeong, Seunghyeon Kim, Seokeon Choi, and Changick Kim. Diversify and match: A domain adaptive representation learning paradigm for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12456-12465, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.622, + 0.827, + 0.667 + ], + "angle": 0, + "content": "[23] Daphne Koller, Nir Friedman, Sašo Džeroski, Charles Sutton, Andrew McCallum, Avi Pfeffer, Pieter Abbeel, Ming-Fai Wong, Chris Meek, Jennifer Neville, et al. Introduction to statistical relational learning. MIT press, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.674, + 0.825, + 0.718 + ], + "angle": 0, + "content": "[24] Jogendra Nath Kundu, Rahul Mysore Venkatesh, Naveen Venkat, Ambareesh Revanur, and R Venkatesh Babu. Class-incremental domain adaptation. In European Conference on Computer Vision, pages 53-69. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.726, + 0.825, + 0.77 + ], + "angle": 0, + "content": "[25] Dong Li, Jia-Bin Huang, Yali Li, Shengjin Wang, and Ming-Hsuan Yang. Weakly supervised object localization with progressive domain adaptation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3512-3520, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.778, + 0.825, + 0.822 + ], + "angle": 0, + "content": "[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.829, + 0.827, + 0.873 + ], + "angle": 0, + "content": "[27] Robin Manhaeve, Sebastijan Dumancic, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Deepproblog: Neural probabilistic logic programming. Advances in Neural Information Processing Systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.881, + 0.827, + 0.925 + ], + "angle": 0, + "content": "[28] Robin Manhaeve, Sebastijan Dumančić, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Neural probabilistic logic programming in deepproblog. Artificial Intelligence, 298: 103504, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "[29] Martijn Oldenhof, Adam Arany, Yves Moreau, and Jaak Simm. Chemographer: optical graph recognition of chemical compounds by deep learning. Journal of chemical information and modeling, 60(10):4506-4517, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.157, + 0.825, + 0.2 + ], + "angle": 0, + "content": "[30] Martijn Oldenhof, Adam Arany, Yves Moreau, and Jaak Simm. Self-labeling of fully mediating representations by graph alignment. In Benelux Conference on Artificial Intelligence, pages 46-65. Springer, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.211, + 0.825, + 0.241 + ], + "angle": 0, + "content": "[31] Martijn Oldenhof, Ádám Arany, Yves Moreau, and Edward De Brouwer. Updating object detection models with probabilistic programming. 2022. ICML workshop - UpML." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.25, + 0.825, + 0.293 + ], + "angle": 0, + "content": "[32] Luc De Raedt, Kristian Kersting, Siraam Natarajan, and David Poole. Statistical relational artificial intelligence: Logic, probability, and computation. Synthesis lectures on artificial intelligence and machine learning, 10(2):1-189, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.303, + 0.825, + 0.332 + ], + "angle": 0, + "content": "[33] Kohulan Rajan, Achim Zielesny, and Christoph Steinbeck. Decimer: towards deep learning for chemical image recognition. Journal of Cheminformatics, 12(1):1-9, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.342, + 0.825, + 0.384 + ], + "angle": 0, + "content": "[34] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.395, + 0.825, + 0.424 + ], + "angle": 0, + "content": "[35] Tim Rocktäschel and Sebastian Riedel. End-to-end differentiable proving. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.434, + 0.827, + 0.477 + ], + "angle": 0, + "content": "[36] Noureddin M Sadawi, Alan P Sexton, and Volker Sorge. Chemical structure recognition: a rule-based approach. In Document Recognition and Retrieval XIX, volume 8297, page 82970E. International Society for Optics and Photonics, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.486, + 0.825, + 0.53 + ], + "angle": 0, + "content": "[37] Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada, and Kate Saenko. Strong-weak distribution alignment for adaptive object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6956–6965, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.539, + 0.827, + 0.583 + ], + "angle": 0, + "content": "[38] Adam Santoro, David Raposo, David G Barrett, Mateusz Malinowski, Razvan Pascanu, Peter Battaglia, and Timothy Lillicrap. A simple neural network module for relational reasoning. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.593, + 0.827, + 0.636 + ], + "angle": 0, + "content": "[39] Feifei Shao, Long Chen, Jian Shao, Wei Ji, Shaoning Xiao, Lu Ye, Yueting Zhuang, and Jun Xiao. Deep learning for weakly-supervised object detection and localization: A survey. Neurocomputing, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.645, + 0.825, + 0.688 + ], + "angle": 0, + "content": "[40] Hyun Oh Song, Ross Girshick, Stefanie Jegelka, Julien Mairal, Zaid Harchaoui, and Trevor Darrell. On learning to localize objects with minimal supervision. In International Conference on Machine Learning, pages 1611-1619. PMLR, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.698, + 0.825, + 0.727 + ], + "angle": 0, + "content": "[41] Leon Sterling and Ehud Y Shapiro. The art of Prolog: advanced programming techniques. MIT press, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.737, + 0.825, + 0.78 + ], + "angle": 0, + "content": "[42] Matteo Tomei, Marcella Cornia, Lorenzo Baraldi, and Rita Cucchiara. Art2real: Unfolding the reality of artworks via semantically-aware image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5849-5859, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.79, + 0.825, + 0.833 + ], + "angle": 0, + "content": "[43] Jasper Uijlings, Stefan Popov, and Vittorio Ferrari. Revisiting knowledge transfer for training object class detectors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1101-1110, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.843, + 0.825, + 0.872 + ], + "angle": 0, + "content": "[44] Thomas Winters, Giuseppe Marra, Robin Manhaeve, and Luc De Raedt. Deepstochlog: Neural stochastic logic programming. arXiv preprint arXiv:2106.12574, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.827, + 0.925 + ], + "angle": 0, + "content": "[45] Yao Xue, Nilanjan Ray, Judith Hugh, and Gilbert Bigras. Cell counting by regression using convolutional neural network. In European Conference on Computer Vision, pages 274-290. Springer, 2016." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.148 + ], + "angle": 0, + "content": "[46] Yuanyi Zhong, Jianfeng Wang, Jian Peng, and Lei Zhang. Boosting weakly supervised object detection with progressive knowledge transfer. In European conference on computer vision, pages 615-631. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.199 + ], + "angle": 0, + "content": "[47] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2921–2929, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.826, + 0.251 + ], + "angle": 0, + "content": "[48] Xinge Zhu, Jiangmiao Pang, Ceyuan Yang, Jianping Shi, and Dahua Lin. Adapting object detectors via selective cross-domain alignment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 687-696, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.103, + 0.371, + 0.117 + ], + "angle": 0, + "content": "A TRAINING DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.134, + 0.824, + 0.176 + ], + "angle": 0, + "content": "For the hyper-parameters the idea was to stay as close as possible to the defaults of the pre-trained standard models although some lightweight tuning was done. In Table 4 a summary is given for the hyper-parameters used for the different models." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.19, + 0.825, + 0.321 + ], + "angle": 0, + "content": "
Modeldatasetepochslrlr_step_sizelr-gammamomentumbatch sizeweight decayoptimizer
DETR pre-train (retrain)CLEVRmax 1000.00017 (7-8)0.180.0001AdamW
DETR pre-train (retrain)Mols.max 1000.000120 (20)0.180.0001AdamW
DETR pre-train (retrain)MNISTmax 1000.000115-20 (20)0.180.0001AdamW
RCNN pre-train (retrain)all datasetsmax 300.0055 (5)0.10.910.0005SGD
RCNN Finetuneall datasetsmax 200.00116Adam
DETR FinetuneCLEVR/Molsmax 200.00116Adam
DETR FinetuneMNISTmax 200.0116Adam
DETR Finetune*CLEVR/Molsmax 1000.002200.180.0001AdamW
RCNN Finetune*CLEVRmax 200.00115Adam
RCNN Finetune*Molsmax 200.0000115Adam
DETR masked box lossCLEVR/Molsmax 1000.000170.180.0001AdamW
Resnet50-CAM modelsall datasetsmax 5000.00132Adam
" + }, + { + "type": "table_caption", + "bbox": [ + 0.176, + 0.33, + 0.824, + 0.373 + ], + "angle": 0, + "content": "Table 4: Overview of hyperparameters for the different models, most hyperparamaters are left default from standard models. Tuning was mostly done on learning rate and learning rate scheduling. For every fold/dataset the best epoch/lr/lr_step_size model is used based on validation data." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.401, + 0.295, + 0.416 + ], + "angle": 0, + "content": "B DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.432, + 0.824, + 0.515 + ], + "angle": 0, + "content": "We evaluate our approach on three different datasets: (1) a CLEVR-mini dataset, (2) a Molecules data set with images of chemical compounds, and (3) an MNIST-based object detection dataset. For each dataset, three subsets, corresponding to different domains, are used: (1) a source domain, (2) a target domain, and (3) an out-of-distribution domain (OOD). Source and target domains are split into 5 folds of train and validation sets and an independent test set. Sizes of the different splits per dataset are summarized in Table 5." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.294, + 0.101, + 0.703, + 0.281 + ], + "angle": 0, + "content": "
DatasetTypeSplitSize (number of samples)
MNIST object detectionSourcetrain700
MNIST object detectionSourcevalidation300
MNIST object detectionSourcetest1000
MNIST object detectionTargettrain700
MNIST object detectionTargetvalidation300
MNIST object detectionTargettest1000
MNIST object detectionOODtest1000
MoleculesSourcetrain1400
MoleculesSourcevalidation600
MoleculesSourcetest1000
MoleculesTargettrain1400
MoleculesTargetvalidation600
MoleculesTargettest1000
MoleculesOODtest1000
" + }, + { + "type": "table_caption", + "bbox": [ + 0.193, + 0.29, + 0.804, + 0.306 + ], + "angle": 0, + "content": "Table 5: Dataset sizes for the different splits. For train and validations splits 5 folds are used." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.333, + 0.396, + 0.347 + ], + "angle": 0, + "content": "B.0.1 CLEVR-MINI DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.357, + 0.827, + 0.566 + ], + "angle": 0, + "content": "The CLEVR-mini dataset for our experiments is a selection of samples from the CLEVR dataset [21]. The different types available in the CLEVR dataset are combinations of shapes (cube, sphere, and cylinder), materials (metal and rubber), and sizes (large and small). Colors are ignored as the images are first converted to grayscale before feeding them to the models. For the richly annotated source domain, we randomly select images with only sphere or cylinder-shaped objects (no cubes) and with a maximum of four objects per image and a minimum of three objects. For the weakly annotated target domain we experiment with two type of annotations. Firstly we experiment when we have the class counts of objects in the image available. Secondly, instead of the exact counts of classes in the image the annotations only specify if there is exactly one object class in the image or multiple. The advantage of this kind of labeling is that the annotator does not need to count the objects and instead just make a distinction of only one object class in image or multiple. The images in the target domain can contain all combinations of object types (including cube-shaped objects) and allow a minimum of five objects per image and a maximum of six objects per image. For the OOD dataset we also select images with all possible combinations of object types, always with 10 objects per image. Some example images from the CLEVR-mini dataset can be found in Figure 1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.582, + 0.387, + 0.595 + ], + "angle": 0, + "content": "B.0.2 MOLECULES DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.606, + 0.825, + 0.746 + ], + "angle": 0, + "content": "The Molecules dataset contains images depicting chemical compounds. For the richly annotated source domain, a procedure similar as described in Oldenhof et al. [29, 30] was executed using an RDKit [2] fork for generating the bounding box labels for the individual atoms present in the images. In the source domain, we allow the following atom types: carbon (C), hydrogen (H), oxygen (O), and nitrogen (N). In the weakly annotated target domain, we only have the counts of the atoms present which translates to the chemical formula of the molecule in the image (\\(e.g. C_6H_{12}O_6\\)). The same classes from the source domain (C, H, O, and N) are also present in the target domain as well as an extra atom type: sulfur (S). The OOD test dataset consists of 1000 images from the external UoB dataset [36] where chemical compounds containing only the atom types present in the target domain (C, H, O, N, and S). Some example images from the Molecules dataset are visualized in Figure 5." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.761, + 0.495, + 0.775 + ], + "angle": 0, + "content": "B.0.3 MNIST OBJECT DETECTION DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.825, + 0.925 + ], + "angle": 0, + "content": "The MNIST object detection dataset is generated [1] using the original MNIST dataset [13]. Each image consists of three MNIST digits randomly positioned in the image. The MNIST object detection dataset allows experimenting with a more arbitrary type of weak supervision. Each object in this dataset represents a digit that can be aggregated. This allows to label an image with only the sum of all digits in the image instead of the class counts of the objects. For the richly annotated source domain digits 7, 8, and 9 are left out. The weakly annotated target domain has all possible digit classes (0-9). The labels of the target domain only contain the sum of all digits. For the OOD test dataset, images are used that contain maximum of four MNIST digits, instead of three digits as in the other domains. Some example images from the MNIST object detection dataset are visualized in Figure 6." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.101, + 0.823, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.31, + 0.828, + 0.396 + ], + "angle": 0, + "content": "Figure 5: Weakly supervised knowledge transfer with probabilistic logical reasoning (ProbKT). On the left we have source domain where a model can be trained using bounding box information labels, positions) but only on a limited set of atom types (C,H,O,N). In the middle we can see that the pre-trained model is not able to recognize the sulfur (S) from target domain correctly. On the right we see that the model is able to adapt to target domain after probabilistic reasoning using weak labels (e.g. counts of objects on image) and is able to recognize the sulfur (S)." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.437, + 0.823, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.648, + 0.828, + 0.734 + ], + "angle": 0, + "content": "Figure 6: Weakly supervised knowledge transfer with probabilistic logical reasoning (ProbKT). On the left we have source domain where a model can be trained using bounding box information labels, positions) but only on a limited set of digits (0, 1, 2, 3, 4, 5, 6). In the middle we can see that the pre-trained model is not able to recognize the digit eight (8) from target domain correctly. On the right we see that the model is able to adapt to target domain after probabilistic reasoning using weak labels (e.g. sum of digits on image) and is able to recognize the digit eight (8)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.768, + 0.651, + 0.784 + ], + "angle": 0, + "content": "C PROBKT AND PROBKT* SUPPLEMENTARY DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.804, + 0.363, + 0.818 + ], + "angle": 0, + "content": "C.1 FILTERING SAMPLES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.827, + 0.876 + ], + "angle": 0, + "content": "The computation complexity of inference in the probabilistic programming module grows with the number of possible worlds. In turn, the number of possible worlds grows with the number of probabilistic facts \\(\\hat{n}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.927 + ], + "angle": 0, + "content": "One avenue to reduce the computational cost of the inference step is then to artificially reduce the number of probabilistic facts in each image. Let \\(\\{\\hat{p}_{y,n} : n = 1, \\dots, \\hat{n}\\}\\) and \\(q\\) the corresponding inference query. We compute the filtered set of probabilistic facts \\(\\bar{p}_{y,n}\\) by setting" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.123, + 0.825, + 0.176 + ], + "angle": 0, + "content": "\\[\n\\bar {p} _ {y, n} ^ {k} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\hat {p} _ {y, n} ^ {k} \\geq \\delta \\\\ 0 & \\text {i f} \\exists k ^ {\\prime} \\text {s . t .} \\hat {p} _ {y, n} ^ {k ^ {\\prime}} \\geq \\delta \\\\ \\hat {p} _ {y, n} ^ {k} & \\text {o t h e r w i s e .} \\end{array} \\quad \\text {a n d} \\quad \\hat {p} _ {y, n} ^ {k} < \\delta \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.187, + 0.827, + 0.245 + ], + "angle": 0, + "content": "The parameter \\(\\delta \\in [0,1]\\) is a threshold at which we consider the probabilistic fact as certain. A probability of 1 or 0 effectively discards the probabilistic fact \\(\\bar{p}_{y,n}\\) from the inference procedure. However, we also have to update the inference query \\(q\\) to reflect this filtration. We write \\(\\bar{q}\\) the filtered query \\(q\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.828, + 0.334 + ], + "angle": 0, + "content": "Example To illustrate this filtration strategy let's consider an MNIST image with 3 digits in the image: \\(\\{3,4,7\\}\\). The query \\(q\\) corresponds to the class labels in the images. That is \\(q = \\{3,4,7\\}\\). The object detection backbones outputs 3 box features with corresponding probabilities \\(\\{\\hat{p}_{y,0},\\hat{p}_{y,1},\\hat{p}_{y,2},\\}\\). Now let e.g. \\(\\hat{p}_{y,1}^3 = 0.99\\). We can filter out \\(\\hat{p}_{y,1}\\) (i.e. the prediction for a digit 3 is certain), and compute the filtered query \\(\\bar{q} = \\{4,7\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.347, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Remark Equation 2 suggests a filtering based on the output probabilities only. However, one can also use information about the query for the filtration. For instance, one would only filter out a probabilistic fact if it is consistent with the query \\( q \\). In the example above, it would be wiser not to filter out e.g. \\( \\hat{p}_{y,1}^{9} = 0.99 \\) as no images are supposedly present in the image. One should then ideally propagate this probabilistic fact to the inference module such as to update the weights of the backbone and learn from this error." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.447, + 0.441, + 0.461 + ], + "angle": 0, + "content": "C.2 GRADIENT OF THE LIKELIHOOD" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.473, + 0.49, + 0.489 + ], + "angle": 0, + "content": "The ProbKT likelihood has the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.406, + 0.511, + 0.591, + 0.547 + ], + "angle": 0, + "content": "\\[\nP _ {\\mathcal {P}} (q) = \\sum_ {\\alpha \\in E _ {q}} \\prod_ {i} \\prod_ {j} \\hat {p} _ {i j} ^ {\\alpha_ {i j}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.559, + 0.564, + 0.575 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is a \"possible world\" matrix of indicator variables:" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.599, + 0.597, + 0.632 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {i j} = \\left\\{ \\begin{array}{l l} 1 & \\text {o b j e c t i i s o f c l a s s j} \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.714, + 0.654 + ], + "angle": 0, + "content": "and \\( E_{q} \\) is the set of all possible \\( \\alpha \\) worlds compatible with the logical annotation \\( q \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.604, + 0.672 + ], + "angle": 0, + "content": "Lemma 1. The gradient of the likelihood has the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.677, + 0.597, + 0.715 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial P _ {\\mathcal {P}} (q)}{\\partial \\theta} = \\sum_ {i} \\sum_ {j} \\frac {\\partial p _ {i j}}{\\partial \\theta} C _ {i j},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.72, + 0.381, + 0.735 + ], + "angle": 0, + "content": "where the weight has the form:" + }, + { + "type": "equation", + "bbox": [ + 0.309, + 0.74, + 0.685, + 0.776 + ], + "angle": 0, + "content": "\\[\nC _ {i j} = P (E | O _ {i} = j) = \\sum_ {\\alpha \\in E | O _ {i = j}} \\prod_ {i ^ {\\prime}} \\prod_ {j ^ {\\prime}} I _ {(i \\neq i ^ {\\prime} \\lor j \\neq j ^ {\\prime})} p _ {i j} ^ {\\alpha_ {i j}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.789, + 0.827, + 0.847 + ], + "angle": 0, + "content": "In case of the Hungarian matching the most probable possible word is selected, which corresponds to setting the conditional probability \\( P(E|O_{i} = j) \\) to 1 if object \\( i \\) is paired with label \\( j \\) and 0 otherwise. The ProbKT gradient can be interpreted as a probability weighted extension of the gradient resulting from the Hungarian matching." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.865, + 0.334, + 0.88 + ], + "angle": 0, + "content": "D FULL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In Table 6, we present the full results for the MNIST experiment. We report the count accuracy (i.e., correct identification of the digits in the image), sum accuracy (i.e., correct estimation of the sum of" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.102, + 0.825, + 0.324 + ], + "angle": 0, + "content": "
ModelTypemnist count acc.mnist sum acc.mnist mAP (mAP@IoU=0.5)
Resnet50-CAM (baseline)In-distribution0.044 ± 0.0410.506 ± 0.0630.003 ± 0.003(0.014 ± 0.011)
Resnet50-CAM (baseline)OOD0.01 ± 0.0090.015 ± 0.0040.003 ± 0.002(0.011 ± 0.007)
Resnet50-CAM (baseline)Source Domain0.127 ± 0.1320.649 ± 0.1080.005 ± 0.004(0.028 ± 0.018)
DETR (Pre-trained)In-distribution0.26 ± 0.0120.262 ± 0.010.518 ± 0.014 (0.637 ± 0.017)
DETR (Pre-trained)OOD0.173 ± 0.010.177 ± 0.0090.51 ± 0.012 (0.632 ± 0.015)
DETR (Pre-trained)Source Domain0.859 ± 0.0310.86 ± 0.0310.781 ± 0.009 (0.957 ± 0.008)
DETR (ProbKT)In-distribution0.662 ± 0.0640.664 ± 0.0650.615 ± 0.025 (0.856 ± 0.037)
DETR (ProbKT)OOD0.532 ± 0.0830.533 ± 0.0820.591 ± 0.03 (0.845 ± 0.038)
DETR (ProbKT)source domain0.878 ± 0.0230.879 ± 0.0230.737 ± 0.014 (0.952 ± 0.009)
RCNN (Pre-trained)In-distribution0.292 ± 0.0050.298 ± 0.0050.632 ± 0.014 (0.685 ± 0.002)
RCNN (Pre-trained)OOD0.205 ± 0.0040.212 ± 0.0040.631 ± 0.013 (0.683 ± 0.002)
RCNN (Pre-trained)source domain0.961 ± 0.0080.961 ± 0.0080.917 ± 0.021 (0.988 ± 0.002)
RCNN (ProbKT)In-distribution0.902 ± 0.0050.903 ± 0.0050.786 ± 0.021 (0.974 ± 0.001)
RCNN (ProbKT)OOD0.863 ± 0.0080.865 ± 0.0080.778 ± 0.021 (0.97 ± 0.001)
RCNN (ProbKT)source domain0.967 ± 0.0040.967 ± 0.0040.873 ± 0.016 (0.989 ± 0.001)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.334, + 0.825, + 0.362 + ], + "angle": 0, + "content": "Table 6: Results of the SUM experiments on the MNIST object detection dataset. Reported test accuracies over the 5 folds." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.428, + 0.825, + 0.511 + ], + "angle": 0, + "content": "the digits in the image) and the mean average precision (mAP) (i.e. a common object detection metric that reflects the ability to predict the positions and labels of the objects). We observe that the Resnet baseline performs poorly, lacking the necessary logic to process this dataset. We used both DETR and RCNN as object detection backbones in our experiments, showing high test accuracies when fine-tuned with our approach. As the results suggest, RCNN backbones lead to better performance than the DETR backbone." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.56, + 0.825, + 0.858 + ], + "angle": 0, + "content": "
ModelData DomainCLEVR count acc.CLEVR mAP (mAP@IoU=0.5)Mol. count. accMol. mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.97 ± 0.0050.036 ± 0.014 (0.200 ± 0.071)0.978 ± 0.0040.0 ± 0.0 (0 ± 0)
Resnet50-CAMOOD0.831 ± 0.0160.029 ± 0.010 (0.153 ± 0.044)0.0 ± 0.0n/a1
Resnet50-CAMsource domain0.993 ± 0.0030.035 ± 0.019 (0.178 ± 0.084)0.828 ± 0.0210.0 ± 0.0 (0 ± 0)
WSOD-transfertarget domain0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)0.001 ± 0.00.018 ± 0.004 (0.061 ± 0.011)
WSOD-transferOOD0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)0.003 ± 0.002n/a1
WSOD-transfersource domain0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)0.0 ± 0.00.021 ± 0.003 (0.069 ± 0.009)
DETR-jointtarget domain0.159 ± 0.1330.579 ± 0.012 (0.684 ± 0.019)0.357 ± 0.1960.197 ± 0.055 (0.481 ± 0.071)
DETR-jointOOD0.084 ± 0.0390.534 ± 0.012 (0.66 ± 0.012)0.024 ± 0.021n/a1
DETR-jointsource dom.0.923 ± 0.0490.908 ± 0.017 (0.992 ± 0.001)0.232 ± 0.1270.23 ± 0.063 (0.565 ± 0.08)
DETR (Pre-trained)target domain0.0 ± 0.00.498 ± 0.019 (0.533 ± 0.024)0.464 ± 0.0330.314 ± 0.006 (0.542 ± 0.006)
DETR (Pre-trained)OOD0.0 ± 0.00.477 ± 0.013 (0.531 ± 0.021)0.002 ± 0.001n/a1
DETR (Pre-trained)source domain0.97 ± 0.0090.945 ± 0.009 (0.992 ± 0.001)0.581 ± 0.0220.409 ± 0.005 (0.722 ± 0.004)
ProbKT*(DETR)target domain0.949 ± 0.0050.728 ± 0.014 (0.99 ± 0.003)0.589 ± 0.0420.373 ± 0.02 (0.669 ± 0.045)
ProbKT*(DETR)OOD0.741 ± 0.0380.606 ± 0.017 (0.977 ± 0.004)0.008 ± 0.008n/a1
ProbKT*(DETR)source domain0.985 ± 0.0040.937 ± 0.006 (0.995 ± 0.001)0.275 ± 0.0660.371 ± 0.021 (0.649 ± 0.041)
ProbKT(DETR)target domain0.946 ± 0.0140.803 ± 0.011 (0.989 ± 0.006)0.508 ± 0.0270.204 ± 0.02 (0.507 ± 0.014)
ProbKT(DETR)OOD0.726 ± 0.0350.715 ± 0.006 (0.974 ± 0.006)0.004 ± 0.003n/a1
ProbKT(DETR)source domain0.987 ± 0.0030.948 ± 0.005 (0.995 ± 0.001)0.549 ± 0.0260.38 ± 0.013 (0.713 ± 0.006)
RCNN (pre-trained)target domain0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)0.592 ± 0.0070.568 ± 0.005 (0.785 ± 0.004)
RCNN (pre-trained)OOD0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)0.348 ± 0.036n/a1
RCNN (pre-trained)source domain0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)0.948 ± 0.0040.737 ± 0.005 (0.979 ± 0.0)
ProbKT*(RCNN)target domain0.974 ± 0.0040.855 ± 0.025 (0.994 ± 0.001)0.945 ± 0.0060.24 ± 0.042 (0.788 ± 0.073)
ProbKT*(RCNN)OOD0.901 ± 0.0170.827 ± 0.022 (0.991 ± 0.001)0.592 ± 0.032n/a1
ProbKT*(RCNN)source domain0.993 ± 0.0020.95 ± 0.021 (0.998 ± 0.0)0.96 ± 0.0030.655 ± 0.01 (0.974 ± 0.004)
ProbKT(RCNN)target domain0.975 ± 0.0030.856 ± 0.039 (0.993 ± 0.001)0.942 ± 0.0090.289 ± 0.041 (0.829 ± 0.054)
ProbKT(RCNN)OOD0.89 ± 0.0220.833 ± 0.042 (0.991 ± 0.001)0.603 ± 0.037n/a1
ProbKT(RCNN)source domain0.995 ± 0.0020.941 ± 0.041 (0.998 ± 0.001)0.96 ± 0.0020.666 ± 0.005 (0.978 ± 0.002)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.867, + 0.825, + 0.896 + ], + "angle": 0, + "content": "Table 7: Results of the experiments for the datasets: CLEVR-mini and Molecules. Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.2, + 0.906, + 0.578, + 0.921 + ], + "angle": 0, + "content": "\\(^{1}\\)OOD test set of Molecules dataset has no bounding box labels." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.461, + 0.119 + ], + "angle": 0, + "content": "E SOURCE CODE AND DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.828, + 0.19 + ], + "angle": 0, + "content": "The source code and basic instructions are available on https://github.com/molden/ProbKT. The source code integrates features from the Weights & Biases (WandB) platform [5]. Basic features are supported without the need for an account on WandB but to make full use of all features we recommend to create an account." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.197, + 0.4, + 0.212 + ], + "angle": 0, + "content": "Datasets can be downloaded here:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.223, + 0.797, + 0.238 + ], + "angle": 0, + "content": "- CLEVR-mini dataset https://figshare.com/s/db012765e5a38e14ef9c" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.242, + 0.777, + 0.256 + ], + "angle": 0, + "content": "- Molecules dataset https://figshare.com/s/3dc3508d39bf4cff8c7f" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.261, + 0.825, + 0.288 + ], + "angle": 0, + "content": "- MNIST object detection dataset https://figshare.com/s/c760de026f000524db5a" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.223, + 0.825, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.301, + 0.828, + 0.331 + ], + "angle": 0, + "content": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for counting of objects on an image (as on CLEVR-mini dataset):" + }, + { + "type": "code", + "bbox": [ + 0.172, + 0.342, + 0.861, + 0.609 + ], + "angle": 0, + "content": ":- use_module library(lists)). \nnn(mnist_net,[X],Y,[0,1,2,3,4,5,6,7,8,9,10,11]) :: digit(X,Y). \ncount([],X,0). \ncount([X|T],X,Y):- count(T,X,Z), Y is 1+Z. \ncount([X1|T],X,Z):- X1\\=X,count(T,X,Z). \ncountall(List,X,C) :- sort(List,List1), member(X,List1), count(List,X,C). \nroll([],L,L). \nroll([H|T],A,L):- roll(T,[Y|A],L), digit(H,Y). \ncountpart(List,[],[]) \ncountpart(List,[H|T],[F|L]):- countall(List,H,F), countpart(List,T,L). \ncount.objects(X,L,C):- roll(X,[],Result), countpart(Result,L,C)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.619, + 0.828, + 0.662 + ], + "angle": 0, + "content": "The query \\( q \\) in the case of class counts would be count.objects(X, L, C). For example an image \\( X \\) with 1 small metal cube and 3 large rubber cylinders would result in the following query: count.objects(X, [small métal Cube, large rubber_cylinder], [1, 3])." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.668, + 0.828, + 0.698 + ], + "angle": 0, + "content": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for aggregating the digits on an image:" + }, + { + "type": "code", + "bbox": [ + 0.172, + 0.709, + 0.725, + 0.851 + ], + "angle": 0, + "content": ": - use_module library(lists)). \nnn (mnist_net, [X], Y, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) :: digit(X,Y). \nsum([[], 0). \nsum([X|T], Y) : - sum(T,Z), Y is X+Z. \nroll([[], L, L). \nroll([H|T], A, L) : - roll(T, [Y|A], L), digit(H,Y). \nsum_digits(X,Y) : - roll(X, [], Result), sum(Result,Y)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.861, + 0.825, + 0.891 + ], + "angle": 0, + "content": "The query \\( q \\) in the case of sum of digits would be sum_digits(X, Y). For example an image \\( X \\) with as sum of digits 12 would result in the following query: sum_digits(X, 12)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for taking into account non-exact counts on images" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code", + "bbox": [ + 0.171, + 0.104, + 0.782, + 0.578 + ], + "angle": 0, + "content": "-- useModule library(lists)) ;\ninn (mnist_net, [X], Y, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) :: digit(X,Y) . . . ..\ncount([ ], X, 0 ) ;\ncount([X|T], X,Y): - count(T,X,Z), Y is 1+Z;\ncount([X|T], X,Z): - X1 \\(\\equiv\\) X, count(T,X,Z);\ncount(, X, 0 ) ;\ncount([X|T], X,Y): - count(T,X,Z), Y is 1+Z;\ncount(, X|T], X,Z): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X |7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(,X|7),x,C,A):- count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), \\(x\\text{、} C\\text{、} B\\text{、}\\) ): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount (, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z)\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7),\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- Call\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- called\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- called\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- called\n\t\t\t-- call T\n\t\t\t-- called\n\t\t\t-- called\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[-X] -- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[y,-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y变压] [- roll(x,[Y变压] [- roll(x,[Y变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [-" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.588, + 0.825, + 0.646 + ], + "angle": 0, + "content": "The query \\( q \\) in the case of non exact counts of objects would be range_countobjects(X,L,C,S). For example an image \\( X \\) with exactly one metal small cube and multiple rubber large spheres would result in the following query: range_countobjects(X,[s_metal Cube,l_rubber Sphere],[1,1],[0,1])." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.66, + 0.528, + 0.675 + ], + "angle": 0, + "content": "E.1 INFERENCE EXAMPLE FOR MNIST DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.686, + 0.827, + 0.716 + ], + "angle": 0, + "content": "To illustrate the inference process let us follow the evaluation of the clause sum([x1, x2], 8), what can result from query sum>digits(X, 8) in case of two visible digit in the image X." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.721, + 0.485, + 0.737 + ], + "angle": 0, + "content": "This clause is true if and only if \\( X_{1} + X_{2} = 8 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.743, + 0.825, + 0.771 + ], + "angle": 0, + "content": "In case of MNIST digits \\(\\{(0,1,\\dots ,9)\\}\\) enumerating the possible worlds would give the following set:" + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.776, + 0.825, + 0.794 + ], + "angle": 0, + "content": "\\[\n\\{(0, 8), (1, 7), (2, 6), \\dots , (8, 0) \\} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.806, + 0.573, + 0.821 + ], + "angle": 0, + "content": "After summing the probability of all possible worlds we get:" + }, + { + "type": "equation", + "bbox": [ + 0.344, + 0.826, + 0.825, + 0.843 + ], + "angle": 0, + "content": "\\[\np _ {1} (0) p _ {2} (8) + p _ {1} (1) p _ {2} (7) + \\dots + p _ {1} (0) p _ {2} (8), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.847, + 0.701, + 0.864 + ], + "angle": 0, + "content": "where \\( p_1 \\) and \\( p_2 \\) are the distribution of random variable \\( X_1 \\) and \\( X_2 \\) respectively." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.869, + 0.316, + 0.884 + ], + "angle": 0, + "content": "Or in a general form:" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.888, + 0.825, + 0.922 + ], + "angle": 0, + "content": "\\[\np _ {Y} (Y) = \\sum_ {X _ {1}} p _ {1} \\left(X _ {1}\\right) p _ {2} \\left(Y - X _ {1}\\right). \\tag {5}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.479, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.104, + 0.825, + 0.147 + ], + "angle": 0, + "content": "As expected the distribution of the sum is the convolution of the distributions of the two terms. This observation trivially generalizes to more than two terms. The cost function corresponding to the maximum likelihood estimation is the negative log-likelihood \\( -\\log (p_{Y}(Y)) \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "21" + } + ] +] \ No newline at end of file diff --git a/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_origin.pdf b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9886a37a5c39ab18a95c551646e4e69153240ddc --- /dev/null +++ b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/95efb798-c3e3-43db-b4b3-c866d3d1db85_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:606d8ae42d37831de18fddb18faaa24d4b465e91b2b091b1d7070ef6eb6b9d6d +size 1521830 diff --git a/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/full.md b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3347fe1e586bc8dc39b9ce13dfaa180e0766e326 --- /dev/null +++ b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/full.md @@ -0,0 +1,633 @@ +# WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH PROBABILISTIC LOGICAL REASONING FOR OBJECT DETECTION + +Martijn Oldenhof + +ESAT-STADIUS + +KU Leuven, Belgium + +martijn. oldenhof@kuleuven.be + +Adam Arany + +ESAT-STADIUS + +KU Leuven, Belgium + +adam.arany@esat.kuleuven.be + +Yves Moreau + +ESAT-STADIUS + +KU Leuven, Belgium + +yves.moreau@esat.kuleuven.be + +Edward De Brouwer + +ESAT-STADIUS + +KU Leuven, Belgium + +edward.debrouwer@gmail.com + +# ABSTRACT + +Training object detection models usually requires instance-level annotations, such as the positions and labels of all objects present in each image. Such supervision is unfortunately not always available and, more often, only image-level information is provided, also known as weak supervision. Recent works have addressed this limitation by leveraging knowledge from a richly annotated domain. However, the scope of weak supervision supported by these approaches has been very restrictive, preventing them to use all available information. In this work, we propose ProbKT, a framework based on probabilistic logical reasoning that allows to train object detection models with arbitrary types of weak supervision. We empirically show on different datasets that using all available information is beneficial as our ProbKT leads to significant improvement on target domain and better generalization compared to existing baselines. We also showcase the ability of our approach to handle complex logic statements as supervision signal. Our code is available at https://github.com/molden/ProbKT + +# 1 INTRODUCTION + +Object detection is a fundamental ability of numerous high-level machine learning pipelines such as autonomous driving [4; 16], augmented reality [42] or image retrieval [17]. However, training state-of-the-art object detection models generally requires detailed image annotations such as the box-coordinates location and the labels of each object present in each image. If several large benchmark datasets with detailed annotations are available [26; 15], providing such detailed annotation on new specific datasets comes with a significant cost that is often not affordable for many applications. + +More frequently, datasets come with only limited annotation, also referred to as weak supervision. This has sparked research in weakly-supervised object detection approaches [25; 6; 40], using techniques such as multiple instance learning [40] or variations of class activation maps [3]. However, these approaches have been shown to significantly underperform their fully-supervised counterparts in terms of robustness and accurate localization of the objects [39]. + +An appealing and intuitive approach to improve the performance of weakly supervised object detection is to perform transfer learning from an existing object detection model pre-trained on a fully annotated dataset [14; 46; 43]. This approach, also referred to as transfer learning or domain adaptation, consists in leveraging transferable knowledge from the pre-trained model (such as bounding boxes prediction capabilities) to the new weakly supervised domain. This transfer has been embodied in different ways in the literature. Examples include a simple fine-tuning of the classifier of bounding box proposals of the pre-trained model [43], or an iterative relabeling of the weakly supervised dataset for retraining a new full objects detection model on the re-labeled data [46]. + +![](images/243aadad3b25bc99b81b9d9924300b2d9c50d1ddde480b20145264765a247667.jpg) +Figure 1: ProbKT: Weakly supervised knowledge transfer with probabilistic logical reasoning. (Left) A model can be trained on the source domain using full supervision (labels, positions) but only on a limited set of shapes (cylinders and spheres). (Middle) The pre-trained model does not recognize the cubes from the target domain correctly. (Right) The model can adapt to the target domain after applying ProbKT and can recognize the cubes. + +However, existing approaches are very restrictive in the type of weak supervision they are able to harness. Indeed, some do not support new object classes in the new domain [20], others can only use a label indicating the presence of an object class [46]. However, in practice, the supervision on the new domain can come in very different forms. For instance, the count of each object class can be given, such as in atom detection from molecule images where only chemical formula might be given. Or, when many objects are present on an image, a range can be provided instead of an exact class counts (e.g. "there are at least 4 cats on this image"). Crucially, this variety of potential supervisory signals on the target domain cannot be fully utilized by existing domain adaption approaches. + +To address this limitation, we introduce ProbKT, a novel framework that allows to generalize knowledge transfer in object detection to arbitrary types of weak supervision using neural probabilistic logical reasoning [27]. This paradigm allows to connect probabilistic outputs of neural networks with logical rules and to infer the resulting probability of particular queries. One can then evaluate the probability of a query such as "the image contains at least two animals" and differentiate through the probabilistic engine to train the underlying neural network. Our approach allows for arbitrarily complex logical statements and therefore supports weak supervision like class counts or ranges, among other. To our knowledge, this is the first approach to allow for such versatility in utilizing the available information on the new domain. + +To assess the capabilities of this framework, we provide extensive empirical analysis of multiple object detection datasets. Our approach also supports any type of objects detection backbone architecture. We thus use two popular backbone architectures, DETR [7] and RCNN [34] and evaluate their performance in terms of accuracy, convergence as well of generalization on out-of-distribution data. Our experiments show that, due to its ability to use the complete supervisory signal, our approach outperforms previous works in a wide range of setups. + +Key contributions: (1) We propose a novel knowledge transfer framework for object detection relying on probabilistic programming that uniquely allows using arbitrary types of weak supervision on the target domain. (2) We make our approach amenable to different levels of computational capabilities by proposing different approximations of ProbKT. (3) We provide an extensive experimental setup to study the capabilities of our framework for knowledge transfer and out-of-distribution generalization. + +# 2 RELATED WORKS + +A comparative summary of related works is given in Table 1. We distinguish three main categories: (1) pure weakly supervised object detection methods (WSOD) that do not leverage a richly annotated source domain, (2) unsupervised object detection methods with knowledge transfer (DA or domain adaptation methods) that do not use supervision on the target domain and (3) weakly supervised + +object detection methods with knowledge transfer (WSOD w/transfer) that are restrictive in the type of supported weak supervision. To our knowledge, our work is the first to allow for arbitrary supervision on the target domain (and supporting new classes in the target domain) while also leveraging knowledge from richly annotated domains. ProbKT supports arbitrary weak supervision thanks to the inherited expressiveness of Prolog [41] which is based on a subset of first-order predicate logic, Horn clauses and is Turing-complete. + +Weakly supervised object detection (WSOD) This class of method allows training object detection models with only weak supervision. One can thus train these approaches directly on the target domain. However, they do not allow to leverage potentially available richly annotated datasets, which has been shown to lead to worse performance [39]. Different flavors of WSOD architectures have been proposed relying on a variety of implementations such as multiple instance learning (MIL)-based [25; 40] or class activation (CAM) based [47; 3]. In contrast to WSOD methods, our approach is designed to exploit existing richly annotated datasets and thus provides increased performance on the target domain. For a comprehensive review of WSOD methods we refer the reader to Shao et al. [39]. + +Domain adaptation methods (DA) In contrast to WSOD methods, domain adaptation methods do rely on fully supervised source domain dataset. However, they do not assume any supervision on the target domain and are therefore not equipped to exploit such signal when available [37; 8; 22; 48]. + +WSOD with knowledge transfer Our approach belongs to the class of weakly supervised object detection models with knowledge transfer. These methods aim to transfer knowledge from a source domain, where full supervision is available, to a target domain where only weak labels are available. Existing work in this class of models only allows for limited type of supervision of the target domain. Most architectures only support a label indicating the presence or absence of a class of object in the image[14; 46; 43]. Inoue et al. [20] allows for class counts as weak supervision but unfortunately does not allow for new classes in the target domain. In contrast, ProbKT natively allows for class counts and new classes as well as other types of weak supervision. + +Neural probabilistic logical reasoning Probabilistic logical reasoning combines logic and probability theory. Favored for its high-level reasoning abilities, it was introduced as an alternative way to deep learning in the quest for artificial intelligence [10]. Statistical artificial intelligence [32; 23] and probabilistic logic programming [11] are examples of areas relying on these premises. In a unification effort, researchers have proposed hybrid architectures, embedding both deep learning and logical reasoning components [38; 35]. Our work builds upon the recent advances in the field, where combinations of deep learning, logical, and probabilistic approaches were introduced [27], allowing high-level reasoning with uncertainty using differentiable neural network architectures. + +
MethodTypeAnnotated source dom.Weak supervisionNew classesImplementation
Li et al. [25]WSODXpresence/absenceMIL-based
Bilen and Vedaldi [6]WSODXpresence/absencespatial pyramid pooling layer
Song et al. [40]WSODXpresence/absenceMIL based
Zhou et al. [47]WSODXmixCAM-based
Bae et al. [3]WSODXmixCAM based
Kundu et al. [24]DAone-shotClass-Incremental DA
Saito et al. [37]DAXXStrong-Weak Distribution Alignment
Chen et al. [8]DAXXAdversarial training
Kim et al. [22]DAXXAdversarial training and Domain Diversification
Zhu et al. [48]DAXXselective region adaptation framework
Deselaers et al. [14]WSOD w/transferpresence/absenceCRF-based, iteratively
Zhong et al. [46]WSOD w/transferpresence/absenceMIL based, iteratively
Uijlings et al. [43]WSOD w/transferpresence/absenceMIL based, non iteratively
Inoue et al. [20]WSOD w/transferclass countsXDA + pseudolabeling, iteratively
ProbKT (ours)WSOD w/transferarbitraryProbabilistic logical reasoning, iteratively
+ +Table 1: Summary table of related works with weakly supervised object detection(WSOD), Domain Adaptation(DA) and weakly supervised knowledge transfer methods (WSOD w/ transfer). + +# 3 METHODOLOGY + +# 3.1 PROBLEM STATEMENT + +We consider the problem of weakly supervised knowledge transfer for object detection. Using a model trained on a richly annotated source domain, we aim at improving its performance on a less richly annotated target domain. + +Let $\mathcal{D}_s = \{(I_s^i, b_s^i, y_s^i) : i = 1, \dots, N_s)\}$ be a dataset issued from the source domain and consisting of $N_s$ images $I_s$ along with their annotations. We write $b_s^i \in \mathbb{R}^{n_i \times 4}$ and $y_s^i \in \{1, \dots, K_s\}^{n_i}$ for the box coordinates and class labels of objects in image $I_s^i$ , $n_i$ is the number of objects present in image $I_s^i$ and $K_s$ is the total number of object classes in the source domain. This represents the typical dataset required to train classical fully-supervised object detection architectures. The target dataset $\mathcal{D}_t = \{(I_t^i, q_t^i) : i = 1, \dots, N_t)\}$ contains $N_t$ image from the target domain along with image-level annotations $q_t^i$ . These annotations are logical statements about the content of the image in terms of object classes and their location. Examples include the presence of different classes in each image (i.e., the classical assumption in weakly supervised object detection) but also extends to the counts of classes or a complex combination of counts of objects attributes (e.g., "two red objects, and at least two bicycles"). What is more, the logical statements $q_t^i$ can include classes not already present in the source domain. This type of logical annotation is then strictly broader than the restrictive supervision usually assumed. + +Based on the availability of a source dataset and a target dataset as described above, our goal is then to harness the available detailed information from the source domain to perform accurate object detection on the target domain. A graphical illustration of this process is given in Figure 1. + +# 3.2 BACKGROUND + +# 3.2.1 OBJECT DETECTION + +Object detection aims at predicting the location and labels of objects in images. One then wishes to learn a parametric function $f_{\theta}:\mathcal{I}\rightarrow \{\mathcal{B}\times \mathbb{R}^{K}\}^{\mathbb{Z}}$ with $f_{\theta}(I) = \{(\hat{b},\hat{p}_y)\}^{\hat{n}} = \{(\hat{b}_i,\hat{p}_{y,i}):i = 1,\dots,\hat{n}\}$ such that the distance between predicted and true boxes and labels, $d(\{\hat{(b},\hat{p}_y)\}^{\hat{n}},\{(b,y)\}^{n})$ , is minimum. Objects detection architecture would usually output box features proposals $\{h_i:i = 1,\dots,\hat{n}\}$ conditioned on which they would predict the probability vector of class labels $\hat{p}_{y,i} = g_p(h_i)$ and the box location predictions $\hat{b}_i = g_b(h_i)$ using shared parametric functions $g_{p}(\cdot)$ and $g_{b}(\cdot)$ . For an object $n$ , we write the predicted probability of the object belonging to class $k$ as $\hat{p}_{y,n}^{k}$ . + +# 3.2.2 PROBABILISTIC LOGICAL REASONING + +Probabilistic logical reasoning uses knowledge representation relying on probabilities that allow encoding uncertainty in knowledge. Such a knowledge is encoded in a probabilistic logical program $\mathcal{P}$ as a set of $N$ probabilistic facts $U = \{U_{1},\dots,U_{N}\}$ and $M$ logical rules $F = \{f_{1},\dots f_{M}\}$ connecting them. A simple example of probabilistic fact is "Alice and Bob will each pass their exam with probability 0.5" and an example of logical rule is "if both Alice and Bob pass their exam, they will host a party". Combining probabilistic facts and logical rules, one can then construct complex probabilistic knowledge representation, that can also be depicted as probabilistic graphical models. + +Probabilistic logical programming allows to perform inference by computing the probability of a particular statement or query. For instance, one could query the probability that "Alice and Bob will host a party". This query is executed by summing over the probabilities of occurrence of the different worlds $w = \{u_1, \dots, u_N\}$ (i.e. individual realization of the set of probabilistic facts) that are compatible with the query $q$ . The probability of a query $q$ in a program $\mathcal{P}$ can then be inferred as $P_{\mathcal{P}}(q) = \sum_{w} P(w) \cdot \mathbb{I}[F(w) \equiv q]$ , where $F(w) \equiv q$ stands for the fact that propagation of the realization $w$ across the knowledge graph, according to the logical rules $F$ leads to $q$ being true. + +Remarkably, recent advances in probabilistic programming have led to learnable probabilistic facts [27]. In particular, the probability of a fact can be generated by a neural network with learnable weights. Such a learnable probabilistic fact is then referred to as a neural predicate $U^{\theta}$ , where we make the dependence on the weights $\theta$ explicit. One can then train these weights to minimize a loss that depend on the probability of a query $q$ : $\hat{\theta} = \arg \min_{\theta} \mathcal{L}(P(q \mid \theta))$ . + +Our approach builds upon this ability to learn neural predicates and uses DeepProbLog [27] as the probabilistic reasoning backbone. DeepProbLog is a neural probabilistic logic programming language that allows to conveniently perform inference and differentiation with neural predicates. We refer the reader to the excellent introduction of Manhaeve et al. [28] for further details about this framework. + +# 3.3 PROBKT: WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH PROBABILISTIC LOGICAL REASONING + +A graphical description of our approach is presented in Figure 2. Our framework starts from a pre-trained object detection model $f_{\theta}$ on the source domain. The backbone of this model is extracted and inserted into a new object detection model $f_{\theta}^{*}$ with new target box position predictors and box label classifiers. This new model is then used to predict box proposals along with the corresponding box features on target domain images $I_{t}$ . These box features are then fed to a new target box position predictor and box label classifier. The predictions of this classifier are considered neural predicates and are given to a probabilistic logical module. This module evaluates the probability of queries $q_{t}$ , the loss, and the corresponding gradient that can be backpropagated to the classifier and the backbone. As we want to maximize the probability of the queries being true, we use the following loss function: + +$$ +\mathcal {L} _ {\theta} = \sum_ {\left(I _ {t}, q _ {t}\right) \in \mathcal {D} _ {t}} - \log P _ {\mathcal {P}} \left(q _ {t} \mid f _ {\theta} ^ {*} \left(I _ {t}\right)\right) \tag {1} +$$ + +In theory, the backbone can be trained end to end with this procedure. Our experiments showed that only updating the box features classifiers resulted in more stability as also shown in previous works [46]. We then adopt here the same iterative relabeling strategy, as described next. + +![](images/dffc40d15684072de167b6480bb4e4eca8afe6c9bdae887f549c4e44330c8f86.jpg) +Figure 2: ProbKT. The pre-trained object detection backbone outputs the box features $h$ for the detected objects. Box classifiers (red) and box position predictors (blue) then predict corresponding label predictions $\hat{p}_y$ and box position predictions $\hat{b}$ that are fed to the probabilistic reasoning layer. This layer computes the probability of the query along with the gradients with respect to $\hat{p}_y$ and $\hat{b}$ that can be backpropagated through the entire network. + +# 3.3.1 ITERATIVE RELABELING + +The approach described above allows to fine-tune our model $f_{\theta}^{*}$ to the target domain. To further improve the performance, we propose an iterative relabeling strategy that consists in multiple steps: fine-tuning, re-labeling and re-training. A similar has also been proposed by Zhong et al. [46]. + +Fine-tuning. This step corresponds to training ProbKT on the weakly supervised labels, by minimizing the loss of Equation 1. + +Re-labeling. Once ProbKT has been trained, we can use its predictions to annotate images in the target domain. In practice, we only relabel images for which the model predictions comply with the available query labels in order to avoid too noisy labels. + +Re-training. The re-labeled target domain can be used to re-train the object detection backbone of ProbKTin a fully-supervised fashion. + +This procedure can be repeated multiple times to improve the quality of the relabeling and the quantity of relabelled in the target domain dataset. A graphical representation of the relabeling pipeline is presented in Figure 3. + +![](images/0d147b1754e149afa89c63b8de727c6f155d030239363796f62a085bc8d10f91.jpg) +Figure 3: Iterative relabeling. A full cycle is composed of a fine-tuning, a re-labeling and a re-training step. After one cycle, the fine-tuning step and/or re-labeling step can be iteratively repeated. + +# 3.3.2 COMPUTATIONAL COMPLEXITY AND APPROXIMATIONS + +The computational complexity of inference in probabilistic programming depends on the specific query $q$ and several approximations have been proposed for improving the computation time [44]. We propose two approaches for reducing the computational cost adapted to object detection: (1) filtering the data samples before applying ProbKT (see Appendix Section C.1) or (2) when the supervision consists of the class labels counts, considering only the most probable world (ProbKT*) instead of all possible worlds. + +# 3.3.3 PROBKT*: THE MOST PROBABLE WORLD AND CONNECTION TO HUNGARIAN MATCHING + +The probabilistic inference step requires a smart aggregation of all worlds compatible with the query $q$ . Yet, in certain cases, one can reduce the computational cost by only considering the most probable world. Indeed, consider the case when the query consists of the list of different class labels in the images. For a number of boxes $\hat{n}$ proposed by the objects detection model, the query can be written as the set of labels $q = \{y^i : i = 1, \dots, \hat{n}\}$ . If we further write $\hat{p}_{y,n}^k$ as the probability of the label of box $n$ belonging to class $k$ given by the model (as introduced in Section 3.2.1), we have: + +$$ +P _ {\mathcal {P}} (q) = \sum_ {j = 1} ^ {\hat {n}!} \hat {p} _ {y, 0} ^ {\sigma_ {j} (0)} \cdot \hat {p} _ {y, 1} ^ {\sigma_ {j} (1)} \cdot \ldots \cdot \hat {p} _ {y, \hat {n}} ^ {\sigma_ {j} (\hat {n})} = \sum_ {j = 1} ^ {\hat {n}!} \prod_ {n} \hat {p} _ {y, n} ^ {\sigma_ {j} (n)} +$$ + +where $\sigma_{j}$ corresponds to the $j^{th}$ permutation of the query vector $q$ . To avoid the computation of each possible world contribution, one can only use the configuration with the largest contribution to $P_{\mathcal{P}}(q)$ and discard the other ones. + +This possible world corresponds to the permutation $\sigma^{*}$ that satisfies: + +$$ +\sigma^ {*} = \underset {\sigma} {\arg \max} \log (\prod_ {n} \hat {p} _ {y, n} ^ {\sigma_ {j} (n)}) = \underset {\sigma} {\arg \max} \sum_ {n} \hat {p} _ {y, n} ^ {\sigma_ {j} (n)} = \underset {\sigma} {\arg \min} \sum_ {n} (1 - \hat {p} _ {y, n} ^ {\sigma_ {j} (n)}). +$$ + +Remarkably, this corresponds to the solution of the best alignment using the Hungarian matching algorithm with cost $c(n) = (1 - \hat{p}_{y,n}^{\sigma_j(n)})$ , as used, among others, in DETR [7]. Thus, when the query is the set of class labels, the most plausible world can thus be inferred with the Hungarian matching algorithm. In Appendix C.2, we also show that the gradient of ProbKT can be interpreted as a probability weighted extension of the gradient resulting from the Hungarian matching. + +# 4 EXPERIMENTS + +# 4.1 DATASETS + +We evaluate our approach on three different datasets: (1) a CLEVR-mini dataset, (2) a Molecules dataset with images of chemical compounds, and (3) an MNIST-based object detection dataset. For each dataset, three subsets, corresponding to different domains, are used: (1) a source domain, (2) a target domain, and (3) an out-of-distribution domain (OOD). The source domain is the richly annotated domain that was used to pre-train the object detection model. The target domain is + +the domain of interest but with image-level annotations only. Lastly, the OOD domain contains images from a different distribution than the source and target domains and is used to study the generalizability of the models. Source and target domains are split into 5 folds of train and validation sets and an independent test set. We focused our experiments on the small sample regime (range 1k-2k numbers of samples) both for the source as the target domain. More details on each dataset can be found in Appendix B. + +# 4.2 MODELS + +In the experiments, we apply our method ProbKT on two different pre-trained object detection backbone models: (1) DETR [7] and (2) FasterRCNN [34]. Both are pre-trained on the COCO dataset [26]. We also evaluate an Hungarian-algorithm approximation (ProbKT*) of our method when the weak supervision allows it. For sake of conciseness, we omit the results of ProbKT* here but they can be found in Appendix D. The details of the training procedures, as well as the hyper-parameters used for the different models and the different datasets are summarized in Table 4 in Appendix A. + +# 4.2.1 BASELINE MODELS + +As shown in Section 2, all available approaches for weakly supervised object detection are very restrictive in terms of the supervision signal they support. Our main comparison partner is the state of the art WSOD-transfer method [46]. + +Additionally, we compare our approach against a Resnet50 [18] backbone pre-trained on ImageNet [12]. Fine-tuning is performed by adding an extra multitask regression layer that is trained to predict the individual counts of the objects in the image as in Xue et al. [45]. This architecture naturally relies only on label counts in the target images for fine-tuning. We then predict box predictions using class activation maps as in Bae et al. [3] to compare its performance on object localization. We call this approach Resnet50-CAM. + +When the supervision signal allows it, we also compare with a DETR model trained end-to-end jointly on target and source domains, masking the box costs in the matching cost of the Hungarian algorithm for image-level annotated samples. We call this approach DETR-joint. + +
ModelData DomainCLEVR count acc.CLEVR mAP (mAP@IoU=0.5)Mol. count. accMol. mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.97 ± 0.0050.036 ± 0.014 (0.200 ± 0.071)0.978 ± 0.0040.0 ± 0.0 (0 ± 0)
Resnet50-CAMOOD0.831 ± 0.0160.029 ± 0.010 (0.153 ± 0.044)0.0 ± 0.0n/a*
Resnet50-CAMsource domain0.993 ± 0.0030.035 ± 0.019 (0.178 ± 0.084)0.828 ± 0.0210.0 ± 0.0 (0 ± 0)
WSOD-transfertarget domain0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)0.001 ± 0.00.018 ± 0.004 (0.061 ± 0.011)
WSOD-transferOOD0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)0.003 ± 0.002n/a*
WSOD-transfersource domain0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)0.0 ± 0.00.021 ± 0.003 (0.069 ± 0.009)
DETR-jointtarget domain0.159 ± 0.1330.579 ± 0.012 (0.684 ± 0.019)0.357 ± 0.1960.197 ± 0.055 (0.481 ± 0.071)
DETR-jointOOD0.084 ± 0.0390.534 ± 0.012 (0.66 ± 0.012)0.024 ± 0.021n/a*
DETR-jointsource dom.0.923 ± 0.0490.908 ± 0.017 (0.992 ± 0.001)0.232 ± 0.1270.23 ± 0.063 (0.565 ± 0.08)
RCNN (pre-trained)target domain0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)0.592 ± 0.0070.568 ± 0.005 (0.785 ± 0.004)
RCNN (pre-trained)OOD0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)0.348 ± 0.036n/a*
RCNN (pre-trained)source domain0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)0.948 ± 0.0040.737 ± 0.005 (0.979 ± 0.0)
DETR (pre-trained)target domain0.0 ± 0.00.498 ± 0.019 (0.533 ± 0.024)0.464 ± 0.0330.314 ± 0.006 (0.542 ± 0.006)
DETR (pre-trained)OOD0.0 ± 0.00.477 ± 0.013 (0.531 ± 0.021)0.002 ± 0.001n/a*
DETR (pre-trained)source domain0.97 ± 0.0090.945 ± 0.009 (0.992 ± 0.001)0.581 ± 0.0220.409 ± 0.005 (0.722 ± 0.004)
ProbKT (DETR)target domain0.946 ± 0.0140.803 ± 0.011 (0.989 ± 0.006)0.508 ± 0.0270.204 ± 0.02 (0.507 ± 0.014)
ProbKT (DETR)OOD0.726 ± 0.0350.715 ± 0.006 (0.974 ± 0.006)0.004 ± 0.003n/a*
ProbKT (DETR)source domain0.987 ± 0.0030.948 ± 0.005 (0.995 ± 0.001)0.549 ± 0.0260.38 ± 0.013 (0.713 ± 0.006)
ProbKT (RCNN)target domain0.975 ± 0.0030.856 ± 0.039 (0.993 ± 0.001)0.942 ± 0.0090.289 ± 0.041 (0.829 ± 0.054)
ProbKT (RCNN)OOD0.89 ± 0.0220.833 ± 0.042 (0.991 ± 0.001)0.603 ± 0.037n/a*
ProbKT (RCNN)source domain0.995 ± 0.0020.941 ± 0.041 (0.998 ± 0.001)0.96 ± 0.0020.666 ± 0.005 (0.978 ± 0.002)
+ +Table 2: Results of the experiments for the datasets: CLEVR-mini and Molecules. Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution. *: OOD test set of Molecules dataset has no bounding box labels. + +# 4.3 EVALUATION METRICS + +We evaluate the performance of the models on the different datasets based on two criteria: the count accuracy and the objects localization performance. The count accuracy measures the ratio of correct images where all individual counts of (all detected) objects are correct. To evaluate how well the + +model is performing in localizing the different objects in the image we report the mean average precision (mAP) performance, a widely used metric for evaluating object detection models. + +# 4.4 WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH CLASS COUNTS + +We first investigate the performance of ProbKT when the weakly supervision consists of class counts only. The query $q$ for each image then consists of the number of objects from each class in the image. We evaluate the models on the CLEVR-mini and Molecules datasets. For the Molecules dataset, the query for an image containing 6 carbon atoms (C), 6 oxygen atoms (O) and 12 hydrogen atoms (H) would result in the following query: $q = ([C,O,H],[6,6,12])$ . These weak labels in the case of the Molecules dataset are widely and easily available in the form of the chemical formula of the molecule on the image (e.g $C_6H_{12}O_6$ ). The recognition of atomic level entities on images of molecules is a challenge in the field of Optical Chemical Structure Recognition (OCSR) [9; 33; 29; 19]. For the CLEVR-mini dataset, the query for an example image containing 2 spheres, 1 cylinder and 3 cubes would be $q = ([\mathrm{Cube},\mathrm{Cylinder},\mathrm{Sphere}],[3,1,2])$ . Formal descriptions of the queries for each task are presented in Appendix E. + +Results of the experiments are summarized in Table 2. We observe on both datasets that ProbKT is able to transfer knowledge from the source domain to the target domain and improve count accuracy on the target domain and in most cases also on the source domain. The count accuracy increases on both the target domain and on OOD, suggesting better generalization performance. This is in contrast with Resnet50-CAM which performs well on the target domain of the Molecules dataset but fails on OOD. We also note a significant improvement in object localization (mAP) for ProbKT on the CLEVR-mini dataset. However, fine-tuning seems detrimental for mAP on the Molecules dataset. This can be explained by the very small bounding boxes in the Molecules dataset. We therefore also report the mAP@IoU=0.5 where we observe some increase in performance after fine-tuning. Lastly, we observe that our approach outperforms WSOD-transfer on all metrics for both datasets. WSOD-transfer performs well on CLEVR-mini but fails for the Molecules dataset. This can be explained by the fact that this method only supports class indicators (whether a class is present in the image), which is particularly detrimental in molecules images containing a lot of objects. + +# 4.5 OTHER TYPES OF WEAK SUPERVISION + +# 4.5.1 CLASS RANGES + +The annotation of images is a tedious task, which limits the availability of fully annotated datasets. When the number of objects on an image is large, counting the exact number of objects of a particular class becomes too time-consuming. A typical annotation in this case consists oof class ranges where, instead of exact class counts, an interval is given for the count. For example an image from the CLEVR-mini dataset with more than 4 cubes, exactly 4 cylinders and less than 4 spheres would result in the following query: $q = ([\text{cube}, \text{cylinder}, \text{sphere}], [[4, \infty[, [4, 5[, [0, 4[}})$ . We evaluate this experimental setup and report results in Table 3. We observe that ProbKT performs significantly better than WSOD-transfer on count accuracy, which still uses only presence/absence labels. We note that Resnet50-CAM is unable to use this type of supervision and is thus reported as $n / a$ . + +
ModelData DomainMNIST count acc.MNIST sum acc.MNIST mAP (mAP@IoU=0.5)CLEVR* count acc.CLEVR* mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.044 ± 0.0410.506 ± 0.0630.003 ± 0.003(0.014 ± 0.011)n/an/a
Resnet50-CAMOOD0.01 ± 0.0090.015 ± 0.0040.003 ± 0.002(0.011 ± 0.007)n/an/a
Resnet50-CAMsource domain0.127 ± 0.1320.649 ± 0.1080.005 ± 0.004(0.028 ± 0.018)n/an/a
WSOD-transfertarget domainn/an/an/a0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)
WSOD-transferOODn/an/an/a0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)
WSOD-transfersource domainn/an/an/a0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)
RCNN (pre-trained)target domain0.292 ± 0.0050.298 ± 0.0050.632 ± 0.014 (0.685 ± 0.002)0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)
RCNN (pre-trained)OOD0.205 ± 0.0040.212 ± 0.0040.631 ± 0.013 (0.683 ± 0.002)0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)
RCNN (pre-trained)source domain0.961 ± 0.0080.961 ± 0.0080.917 ± 0.021 (0.988 ± 0.002)0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)
ProbKT (RCNN)target domain0.902 ± 0.0050.903 ± 0.0050.786 ± 0.021 (0.974 ± 0.001)0.971 ± 0.0060.838 ± 0.034 (0.993 ± 0.001)
ProbKT (RCNN)OOD0.863 ± 0.0080.865 ± 0.0080.778 ± 0.021 (0.97 ± 0.001)0.884 ± 0.010.812 ± 0.036 (0.991 ± 0.001)
ProbKT (RCNN)source domain0.967 ± 0.0040.967 ± 0.0040.873 ± 0.016 (0.989 ± 0.001)0.994 ± 0.0010.922 ± 0.035 (0.998 ± 0.001)
+ +Table 3: Results of the experiments on the MNIST object detection dataset and on CLEVR* dataset (*CLEVR uses ranges of class counts as labels instead of exact class counts). Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution. + +# 4.5.2 COMPLEX QUERIES + +More complex types of weak supervision than the ones considered above are also possible. To illustrate the capabilities of our approach, we build an MNIST object detection dataset where images show multiple digits as objects. Examples images are available in Appendix B. The weak supervision is here the sum of all digits in the image: $q = \mathrm{SUM}(\mathrm{digits})$ . Our ProbKT can seamlessly integrate this type of supervision as shown in Table 3. As all other baselines are unable process this type of supervision, we compare against a pre-trained RCNN and a variation of Resnet50-CAM where we add an extra neural network layer that sums the individual counts to give the resulting sum. We report count accuracy, mAP and sum accuracy. The sum accuracy measures the ratio of correct images where the predicted sum (instead of the label of the digits) is correct. Details about the results on extra experiments with DETR as backbone using complex types of weak supervision can be found in Appendix D. + +# 4.6 ABLATION STUDIES + +![](images/0e773b45cbf7640f37656d4e8b4687dcfb7363cfd1d1ec58084cdbf33c00298d.jpg) +(a) CLEVR iterative relabeling + +![](images/d17235340db1e49a74095c49b3c4b80909d704f18f417672a46e1577bd8f7bad.jpg) +Figure 4: Iterative relabeling performance for the different datasets. Iteration 0: pretrained on source domain. Iteration 1: fine-tuned. Iteration 2: re-labeled and re-trained. Iteration 3: relabeled and re-trained. Iteration 4: relabeled and re-trained. + +![](images/01b3ce7dde365f828fc3b5f7694aacef5d650d5f027b39ff3eafe00a70f51d7e.jpg) +(b) Molecules iterative relabeling +(c) MNIST iterative relabeling + +Iterative relabeling. In Figure 4, we plot the evolution of the performance on the test sets after multiple rounds of fine-tuning and re-labeling, as detailed in Section 3.3.1. The final performance reported in the results tables is selected based on best relabeling iteration on the validation dataset. We observe that iterative relabeling after fine-tuning can improve performance significantly. Nevertheless, the benefit of iterative relabeling is less pronounced for DETR on the Molecules dataset. We impute it to the fact that the fine-tuned DETR model is less accurate on this dataset. + +# Object detection backbone + +Our method can seamlessly accommodate different object detection backbones. In Table 2, we present the results for our method with a DETR[7] and a FasterRCNN[34] backbone. We observe that FasterRCNN is typically performing better. In particular, the DETR backbone performs poorly on the Molecules dataset. This could be due to the small objects in the Molecules dataset. Indeed, Carion et al. [7] recommend to use DETR-DC5 or DETR-DC5-R101 for small objects instead. + +# 5 CONCLUSIONS AND DISCUSSION + +Objects detection models are a key component of machine learning deployment in the real world. However, training such models usually requires large amounts of richly annotated images that are often prohibitive for many applications. In this work, we proposed a novel approach to train object detection models by leveraging richly annotated datasets from other domains and allowing arbitrary types of weak supervision on the target domain. Our architecture relies on a probabilistic logical programming engine that efficiently blends the power of symbolic reasoning and deep learning architecture. As such, our model also inherits the current limitations from the probabilistic reasoning implementations, such as higher computational complexity. We proposed several approaches to speed-up the inference process significantly and our work will directly benefit from further advances in this field. Lastly, the versatility of probabilistic programming could help support other related tasks in the future, such as image to graph translation. + +Reproducibility Statement Details for reproducing all experiments shown in this work are available in Appendix E. More details on the datasets used in the experiments can be found in Appendix B. + +# ACKNOWLEDGMENTS + +AA, MO and YM are funded by (1) Research Council KU Leuven: Symbiosis 4 (C14/22/125), Symbiosis3 (C14/18/092); (2) Federated cloud-based Artificial Intelligence-driven platform for liquid biopsy analyses (C3/20/100); (3) CELSA - Active Learning (CELSA/21/019); (4) European Union's Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 956832; (5) Flemish Government (FWO: SBO (S003422N), Elixir Belgium (I002819N), SB and Postdoctoral grants: S003422N, 1SB2721N, 1S98819N, 12Y5623N) and (6) VLAIO PM: Augmenting Therapeutic Effectiveness through Novel Analytics (HBC.2019.2528); (7) YM, AA, EDB, and MO are affiliated to Leuven.AI and received funding from the Flemish Government (AI Research Program). EDB is funded by a FWO-SB grant (S98819N). Computational resources and services used in this work were partly provided by the VSC (Flemish Supercomputer Center), funded by the Research Foundation - Flanders (FWO) and the Flemish Government - department EWI. + +# REFERENCES + +[1] Mnist object detection dataset. URL https://github.com/hukkelas/MNIST-ObjectDetection. accessed on 01.02.2022. +[2] Rdkit: Open-source cheminformatics. URL https://www.rdkit.org. accessed on 01.02.2022. +[3] Wonho Bae, Junhyug Noh, and Gunhee Kim. Rethinking class activation mapping for weakly supervised object localization. In European Conference on Computer Vision, pages 618-634. Springer, 2020. +[4] Aseem Behl, Omid Hosseini Jafari, Siva Karthik Mustikovela, Hassan Abu Alhaija, Carsten Rother, and Andreas Geiger. Bounding boxes, segmentations and object coordinates: How important is recognition for 3d scene flow estimation in autonomous driving scenarios? In Proceedings of the IEEE International Conference on Computer Vision, pages 2574-2583, 2017. +[5] Lukas Biewald. Experiment tracking with weights and biases, 2020. URL https://www.wandb.com/. Software available from wandb.com. +[6] Hakan Bilen and Andrea Vedaldi. Weakly supervised deep detection networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2846-2854, 2016. +[7] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. +[8] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain adaptive faster r-cnn for object detection in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3339-3348, 2018. +[9] Djork-Arné Clevert, Tuan Le, Robin Winter, and Floriane Montanari. Img2mol-accurate smiles recognition from molecular graphical depictions. Chemical science, 12(42):14174-14181, 2021. +[10] Luc De Raedt and Kristian Kersting. Probabilistic logic learning. ACM SIGKDD Explorations Newsletter, 5(1):31-48, 2003. +[11] Luc De Raedt and Angelika Kimmig. Probabilistic (logic) programming concepts. Machine Learning, 100(1):5-47, 2015. +[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. + +[13] Li Deng. The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE signal processing magazine, 29(6):141-142, 2012. +[14] Thomas Deselaers, Bogdan Alexe, and Vittorio Ferrari. Weakly supervised localization and learning with generic knowledge. International journal of computer vision, 100(3):275-293, 2012. +[15] M. Everingham, L. Van Gool, C. K. I. Williams, J. Winn, and A. Zisserman. The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 88(2):303-338, June 2010. +[16] Eleonora Giunchiglia, Mihaela Cătălina Stoian, Salman Khan, Fabio Cuzzolin, and Thomas Lukasiewicz. Road-r: The autonomous driving dataset with logical requirements. arXiv preprint arXiv:2210.01597, 2022. +[17] Ibtihaal M Hameed, Sadiq H Abdulhussain, and Basheera M Mahmmod. Content-based image retrieval: A review of recent trends. *Cogent Engineering*, 8(1):1927469, 2021. +[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. +[19] Rodrigo Hormazabal, Changyoung Park, Soonyoung Lee, Sehui Han, Yeonsik Jo, Jaewan Lee, Ahra Jo, Seung Hwan Kim, Jaegul Choo, Moontae Lee, et al. Cede: A collection of expert-curated datasets with atom-level entity annotations for optical chemical structure recognition. +[20] Naoto Inoue, Ryosuke Furuta, Toshihiko Yamasaki, and Kiyoharu Aizawa. Cross-domain weakly-supervised object detection through progressive domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5001-5009, 2018. +[21] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017. +[22] Taekyung Kim, Minki Jeong, Seunghyeon Kim, Seokeon Choi, and Changick Kim. Diversify and match: A domain adaptive representation learning paradigm for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12456-12465, 2019. +[23] Daphne Koller, Nir Friedman, Sašo Džeroski, Charles Sutton, Andrew McCallum, Avi Pfeffer, Pieter Abbeel, Ming-Fai Wong, Chris Meek, Jennifer Neville, et al. Introduction to statistical relational learning. MIT press, 2007. +[24] Jogendra Nath Kundu, Rahul Mysore Venkatesh, Naveen Venkat, Ambareesh Revanur, and R Venkatesh Babu. Class-incremental domain adaptation. In European Conference on Computer Vision, pages 53-69. Springer, 2020. +[25] Dong Li, Jia-Bin Huang, Yali Li, Shengjin Wang, and Ming-Hsuan Yang. Weakly supervised object localization with progressive domain adaptation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3512-3520, 2016. +[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. +[27] Robin Manhaeve, Sebastijan Dumancic, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Deepproblog: Neural probabilistic logic programming. Advances in Neural Information Processing Systems, 31, 2018. +[28] Robin Manhaeve, Sebastijan Dumančić, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Neural probabilistic logic programming in deepproblog. Artificial Intelligence, 298: 103504, 2021. + +[29] Martijn Oldenhof, Adam Arany, Yves Moreau, and Jaak Simm. Chemographer: optical graph recognition of chemical compounds by deep learning. Journal of chemical information and modeling, 60(10):4506-4517, 2020. +[30] Martijn Oldenhof, Adam Arany, Yves Moreau, and Jaak Simm. Self-labeling of fully mediating representations by graph alignment. In Benelux Conference on Artificial Intelligence, pages 46-65. Springer, 2021. +[31] Martijn Oldenhof, Ádám Arany, Yves Moreau, and Edward De Brouwer. Updating object detection models with probabilistic programming. 2022. ICML workshop - UpML. +[32] Luc De Raedt, Kristian Kersting, Siraam Natarajan, and David Poole. Statistical relational artificial intelligence: Logic, probability, and computation. Synthesis lectures on artificial intelligence and machine learning, 10(2):1-189, 2016. +[33] Kohulan Rajan, Achim Zielesny, and Christoph Steinbeck. Decimer: towards deep learning for chemical image recognition. Journal of Cheminformatics, 12(1):1-9, 2020. +[34] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. +[35] Tim Rocktäschel and Sebastian Riedel. End-to-end differentiable proving. Advances in neural information processing systems, 30, 2017. +[36] Noureddin M Sadawi, Alan P Sexton, and Volker Sorge. Chemical structure recognition: a rule-based approach. In Document Recognition and Retrieval XIX, volume 8297, page 82970E. International Society for Optics and Photonics, 2012. +[37] Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada, and Kate Saenko. Strong-weak distribution alignment for adaptive object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6956–6965, 2019. +[38] Adam Santoro, David Raposo, David G Barrett, Mateusz Malinowski, Razvan Pascanu, Peter Battaglia, and Timothy Lillicrap. A simple neural network module for relational reasoning. Advances in neural information processing systems, 30, 2017. +[39] Feifei Shao, Long Chen, Jian Shao, Wei Ji, Shaoning Xiao, Lu Ye, Yueting Zhuang, and Jun Xiao. Deep learning for weakly-supervised object detection and localization: A survey. Neurocomputing, 2022. +[40] Hyun Oh Song, Ross Girshick, Stefanie Jegelka, Julien Mairal, Zaid Harchaoui, and Trevor Darrell. On learning to localize objects with minimal supervision. In International Conference on Machine Learning, pages 1611-1619. PMLR, 2014. +[41] Leon Sterling and Ehud Y Shapiro. The art of Prolog: advanced programming techniques. MIT press, 1994. +[42] Matteo Tomei, Marcella Cornia, Lorenzo Baraldi, and Rita Cucchiara. Art2real: Unfolding the reality of artworks via semantically-aware image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5849-5859, 2019. +[43] Jasper Uijlings, Stefan Popov, and Vittorio Ferrari. Revisiting knowledge transfer for training object class detectors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1101-1110, 2018. +[44] Thomas Winters, Giuseppe Marra, Robin Manhaeve, and Luc De Raedt. Deepstochlog: Neural stochastic logic programming. arXiv preprint arXiv:2106.12574, 2021. +[45] Yao Xue, Nilanjan Ray, Judith Hugh, and Gilbert Bigras. Cell counting by regression using convolutional neural network. In European Conference on Computer Vision, pages 274-290. Springer, 2016. + +[46] Yuanyi Zhong, Jianfeng Wang, Jian Peng, and Lei Zhang. Boosting weakly supervised object detection with progressive knowledge transfer. In European conference on computer vision, pages 615-631. Springer, 2020. +[47] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2921–2929, 2016. +[48] Xinge Zhu, Jiangmiao Pang, Ceyuan Yang, Jianping Shi, and Dahua Lin. Adapting object detectors via selective cross-domain alignment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 687-696, 2019. + +# A TRAINING DETAILS + +For the hyper-parameters the idea was to stay as close as possible to the defaults of the pre-trained standard models although some lightweight tuning was done. In Table 4 a summary is given for the hyper-parameters used for the different models. + +
Modeldatasetepochslrlr_step_sizelr-gammamomentumbatch sizeweight decayoptimizer
DETR pre-train (retrain)CLEVRmax 1000.00017 (7-8)0.180.0001AdamW
DETR pre-train (retrain)Mols.max 1000.000120 (20)0.180.0001AdamW
DETR pre-train (retrain)MNISTmax 1000.000115-20 (20)0.180.0001AdamW
RCNN pre-train (retrain)all datasetsmax 300.0055 (5)0.10.910.0005SGD
RCNN Finetuneall datasetsmax 200.00116Adam
DETR FinetuneCLEVR/Molsmax 200.00116Adam
DETR FinetuneMNISTmax 200.0116Adam
DETR Finetune*CLEVR/Molsmax 1000.002200.180.0001AdamW
RCNN Finetune*CLEVRmax 200.00115Adam
RCNN Finetune*Molsmax 200.0000115Adam
DETR masked box lossCLEVR/Molsmax 1000.000170.180.0001AdamW
Resnet50-CAM modelsall datasetsmax 5000.00132Adam
+ +Table 4: Overview of hyperparameters for the different models, most hyperparamaters are left default from standard models. Tuning was mostly done on learning rate and learning rate scheduling. For every fold/dataset the best epoch/lr/lr_step_size model is used based on validation data. + +# B DATASETS + +We evaluate our approach on three different datasets: (1) a CLEVR-mini dataset, (2) a Molecules data set with images of chemical compounds, and (3) an MNIST-based object detection dataset. For each dataset, three subsets, corresponding to different domains, are used: (1) a source domain, (2) a target domain, and (3) an out-of-distribution domain (OOD). Source and target domains are split into 5 folds of train and validation sets and an independent test set. Sizes of the different splits per dataset are summarized in Table 5. + +
DatasetTypeSplitSize (number of samples)
MNIST object detectionSourcetrain700
MNIST object detectionSourcevalidation300
MNIST object detectionSourcetest1000
MNIST object detectionTargettrain700
MNIST object detectionTargetvalidation300
MNIST object detectionTargettest1000
MNIST object detectionOODtest1000
MoleculesSourcetrain1400
MoleculesSourcevalidation600
MoleculesSourcetest1000
MoleculesTargettrain1400
MoleculesTargetvalidation600
MoleculesTargettest1000
MoleculesOODtest1000
+ +Table 5: Dataset sizes for the different splits. For train and validations splits 5 folds are used. + +# B.0.1 CLEVR-MINI DATASET + +The CLEVR-mini dataset for our experiments is a selection of samples from the CLEVR dataset [21]. The different types available in the CLEVR dataset are combinations of shapes (cube, sphere, and cylinder), materials (metal and rubber), and sizes (large and small). Colors are ignored as the images are first converted to grayscale before feeding them to the models. For the richly annotated source domain, we randomly select images with only sphere or cylinder-shaped objects (no cubes) and with a maximum of four objects per image and a minimum of three objects. For the weakly annotated target domain we experiment with two type of annotations. Firstly we experiment when we have the class counts of objects in the image available. Secondly, instead of the exact counts of classes in the image the annotations only specify if there is exactly one object class in the image or multiple. The advantage of this kind of labeling is that the annotator does not need to count the objects and instead just make a distinction of only one object class in image or multiple. The images in the target domain can contain all combinations of object types (including cube-shaped objects) and allow a minimum of five objects per image and a maximum of six objects per image. For the OOD dataset we also select images with all possible combinations of object types, always with 10 objects per image. Some example images from the CLEVR-mini dataset can be found in Figure 1. + +# B.0.2 MOLECULES DATASET + +The Molecules dataset contains images depicting chemical compounds. For the richly annotated source domain, a procedure similar as described in Oldenhof et al. [29, 30] was executed using an RDKit [2] fork for generating the bounding box labels for the individual atoms present in the images. In the source domain, we allow the following atom types: carbon (C), hydrogen (H), oxygen (O), and nitrogen (N). In the weakly annotated target domain, we only have the counts of the atoms present which translates to the chemical formula of the molecule in the image ( $e.g. C_6H_{12}O_6$ ). The same classes from the source domain (C, H, O, and N) are also present in the target domain as well as an extra atom type: sulfur (S). The OOD test dataset consists of 1000 images from the external UoB dataset [36] where chemical compounds containing only the atom types present in the target domain (C, H, O, N, and S). Some example images from the Molecules dataset are visualized in Figure 5. + +# B.0.3 MNIST OBJECT DETECTION DATASET + +The MNIST object detection dataset is generated [1] using the original MNIST dataset [13]. Each image consists of three MNIST digits randomly positioned in the image. The MNIST object detection dataset allows experimenting with a more arbitrary type of weak supervision. Each object in this dataset represents a digit that can be aggregated. This allows to label an image with only the sum of all digits in the image instead of the class counts of the objects. For the richly annotated source domain digits 7, 8, and 9 are left out. The weakly annotated target domain has all possible digit classes (0-9). The labels of the target domain only contain the sum of all digits. For the OOD test dataset, images are used that contain maximum of four MNIST digits, instead of three digits as in the other domains. Some example images from the MNIST object detection dataset are visualized in Figure 6. + +![](images/8322a53663ffea603503373d41cb97feee570947a49aa14c27fc4c4d869989ed.jpg) +Figure 5: Weakly supervised knowledge transfer with probabilistic logical reasoning (ProbKT). On the left we have source domain where a model can be trained using bounding box information labels, positions) but only on a limited set of atom types (C,H,O,N). In the middle we can see that the pre-trained model is not able to recognize the sulfur (S) from target domain correctly. On the right we see that the model is able to adapt to target domain after probabilistic reasoning using weak labels (e.g. counts of objects on image) and is able to recognize the sulfur (S). + +![](images/a0b861faf5e883198d7f1afee3d04067d4e837bfd41f6b562175b9ecc4cec360.jpg) +Figure 6: Weakly supervised knowledge transfer with probabilistic logical reasoning (ProbKT). On the left we have source domain where a model can be trained using bounding box information labels, positions) but only on a limited set of digits (0, 1, 2, 3, 4, 5, 6). In the middle we can see that the pre-trained model is not able to recognize the digit eight (8) from target domain correctly. On the right we see that the model is able to adapt to target domain after probabilistic reasoning using weak labels (e.g. sum of digits on image) and is able to recognize the digit eight (8). + +# C PROBKT AND PROBKT* SUPPLEMENTARY DETAILS + +# C.1 FILTERING SAMPLES + +The computation complexity of inference in the probabilistic programming module grows with the number of possible worlds. In turn, the number of possible worlds grows with the number of probabilistic facts $\hat{n}$ . + +One avenue to reduce the computational cost of the inference step is then to artificially reduce the number of probabilistic facts in each image. Let $\{\hat{p}_{y,n} : n = 1, \dots, \hat{n}\}$ and $q$ the corresponding inference query. We compute the filtered set of probabilistic facts $\bar{p}_{y,n}$ by setting + +$$ +\bar {p} _ {y, n} ^ {k} = \left\{ \begin{array}{l l} 1 & \text {i f} \hat {p} _ {y, n} ^ {k} \geq \delta \\ 0 & \text {i f} \exists k ^ {\prime} \text {s . t .} \hat {p} _ {y, n} ^ {k ^ {\prime}} \geq \delta \\ \hat {p} _ {y, n} ^ {k} & \text {o t h e r w i s e .} \end{array} \quad \text {a n d} \quad \hat {p} _ {y, n} ^ {k} < \delta \right. \tag {2} +$$ + +The parameter $\delta \in [0,1]$ is a threshold at which we consider the probabilistic fact as certain. A probability of 1 or 0 effectively discards the probabilistic fact $\bar{p}_{y,n}$ from the inference procedure. However, we also have to update the inference query $q$ to reflect this filtration. We write $\bar{q}$ the filtered query $q$ . + +Example To illustrate this filtration strategy let's consider an MNIST image with 3 digits in the image: $\{3,4,7\}$ . The query $q$ corresponds to the class labels in the images. That is $q = \{3,4,7\}$ . The object detection backbones outputs 3 box features with corresponding probabilities $\{\hat{p}_{y,0},\hat{p}_{y,1},\hat{p}_{y,2},\}$ . Now let e.g. $\hat{p}_{y,1}^3 = 0.99$ . We can filter out $\hat{p}_{y,1}$ (i.e. the prediction for a digit 3 is certain), and compute the filtered query $\bar{q} = \{4,7\}$ . + +Remark Equation 2 suggests a filtering based on the output probabilities only. However, one can also use information about the query for the filtration. For instance, one would only filter out a probabilistic fact if it is consistent with the query $q$ . In the example above, it would be wiser not to filter out e.g. $\hat{p}_{y,1}^{9} = 0.99$ as no images are supposedly present in the image. One should then ideally propagate this probabilistic fact to the inference module such as to update the weights of the backbone and learn from this error. + +# C.2 GRADIENT OF THE LIKELIHOOD + +The ProbKT likelihood has the following form: + +$$ +P _ {\mathcal {P}} (q) = \sum_ {\alpha \in E _ {q}} \prod_ {i} \prod_ {j} \hat {p} _ {i j} ^ {\alpha_ {i j}}, +$$ + +where $\alpha$ is a "possible world" matrix of indicator variables: + +$$ +\alpha_ {i j} = \left\{ \begin{array}{l l} 1 & \text {o b j e c t i i s o f c l a s s j} \\ 0 & \text {o t h e r w i s e ,} \end{array} \right. +$$ + +and $E_{q}$ is the set of all possible $\alpha$ worlds compatible with the logical annotation $q$ . + +Lemma 1. The gradient of the likelihood has the following form: + +$$ +\frac {\partial P _ {\mathcal {P}} (q)}{\partial \theta} = \sum_ {i} \sum_ {j} \frac {\partial p _ {i j}}{\partial \theta} C _ {i j}, +$$ + +where the weight has the form: + +$$ +C _ {i j} = P (E | O _ {i} = j) = \sum_ {\alpha \in E | O _ {i = j}} \prod_ {i ^ {\prime}} \prod_ {j ^ {\prime}} I _ {(i \neq i ^ {\prime} \lor j \neq j ^ {\prime})} p _ {i j} ^ {\alpha_ {i j}} +$$ + +In case of the Hungarian matching the most probable possible word is selected, which corresponds to setting the conditional probability $P(E|O_{i} = j)$ to 1 if object $i$ is paired with label $j$ and 0 otherwise. The ProbKT gradient can be interpreted as a probability weighted extension of the gradient resulting from the Hungarian matching. + +# D FULL RESULTS + +In Table 6, we present the full results for the MNIST experiment. We report the count accuracy (i.e., correct identification of the digits in the image), sum accuracy (i.e., correct estimation of the sum of + +
ModelTypemnist count acc.mnist sum acc.mnist mAP (mAP@IoU=0.5)
Resnet50-CAM (baseline)In-distribution0.044 ± 0.0410.506 ± 0.0630.003 ± 0.003(0.014 ± 0.011)
Resnet50-CAM (baseline)OOD0.01 ± 0.0090.015 ± 0.0040.003 ± 0.002(0.011 ± 0.007)
Resnet50-CAM (baseline)Source Domain0.127 ± 0.1320.649 ± 0.1080.005 ± 0.004(0.028 ± 0.018)
DETR (Pre-trained)In-distribution0.26 ± 0.0120.262 ± 0.010.518 ± 0.014 (0.637 ± 0.017)
DETR (Pre-trained)OOD0.173 ± 0.010.177 ± 0.0090.51 ± 0.012 (0.632 ± 0.015)
DETR (Pre-trained)Source Domain0.859 ± 0.0310.86 ± 0.0310.781 ± 0.009 (0.957 ± 0.008)
DETR (ProbKT)In-distribution0.662 ± 0.0640.664 ± 0.0650.615 ± 0.025 (0.856 ± 0.037)
DETR (ProbKT)OOD0.532 ± 0.0830.533 ± 0.0820.591 ± 0.03 (0.845 ± 0.038)
DETR (ProbKT)source domain0.878 ± 0.0230.879 ± 0.0230.737 ± 0.014 (0.952 ± 0.009)
RCNN (Pre-trained)In-distribution0.292 ± 0.0050.298 ± 0.0050.632 ± 0.014 (0.685 ± 0.002)
RCNN (Pre-trained)OOD0.205 ± 0.0040.212 ± 0.0040.631 ± 0.013 (0.683 ± 0.002)
RCNN (Pre-trained)source domain0.961 ± 0.0080.961 ± 0.0080.917 ± 0.021 (0.988 ± 0.002)
RCNN (ProbKT)In-distribution0.902 ± 0.0050.903 ± 0.0050.786 ± 0.021 (0.974 ± 0.001)
RCNN (ProbKT)OOD0.863 ± 0.0080.865 ± 0.0080.778 ± 0.021 (0.97 ± 0.001)
RCNN (ProbKT)source domain0.967 ± 0.0040.967 ± 0.0040.873 ± 0.016 (0.989 ± 0.001)
+ +the digits in the image) and the mean average precision (mAP) (i.e. a common object detection metric that reflects the ability to predict the positions and labels of the objects). We observe that the Resnet baseline performs poorly, lacking the necessary logic to process this dataset. We used both DETR and RCNN as object detection backbones in our experiments, showing high test accuracies when fine-tuned with our approach. As the results suggest, RCNN backbones lead to better performance than the DETR backbone. + +Table 6: Results of the SUM experiments on the MNIST object detection dataset. Reported test accuracies over the 5 folds. + +
ModelData DomainCLEVR count acc.CLEVR mAP (mAP@IoU=0.5)Mol. count. accMol. mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.97 ± 0.0050.036 ± 0.014 (0.200 ± 0.071)0.978 ± 0.0040.0 ± 0.0 (0 ± 0)
Resnet50-CAMOOD0.831 ± 0.0160.029 ± 0.010 (0.153 ± 0.044)0.0 ± 0.0n/a1
Resnet50-CAMsource domain0.993 ± 0.0030.035 ± 0.019 (0.178 ± 0.084)0.828 ± 0.0210.0 ± 0.0 (0 ± 0)
WSOD-transfertarget domain0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)0.001 ± 0.00.018 ± 0.004 (0.061 ± 0.011)
WSOD-transferOOD0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)0.003 ± 0.002n/a1
WSOD-transfersource domain0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)0.0 ± 0.00.021 ± 0.003 (0.069 ± 0.009)
DETR-jointtarget domain0.159 ± 0.1330.579 ± 0.012 (0.684 ± 0.019)0.357 ± 0.1960.197 ± 0.055 (0.481 ± 0.071)
DETR-jointOOD0.084 ± 0.0390.534 ± 0.012 (0.66 ± 0.012)0.024 ± 0.021n/a1
DETR-jointsource dom.0.923 ± 0.0490.908 ± 0.017 (0.992 ± 0.001)0.232 ± 0.1270.23 ± 0.063 (0.565 ± 0.08)
DETR (Pre-trained)target domain0.0 ± 0.00.498 ± 0.019 (0.533 ± 0.024)0.464 ± 0.0330.314 ± 0.006 (0.542 ± 0.006)
DETR (Pre-trained)OOD0.0 ± 0.00.477 ± 0.013 (0.531 ± 0.021)0.002 ± 0.001n/a1
DETR (Pre-trained)source domain0.97 ± 0.0090.945 ± 0.009 (0.992 ± 0.001)0.581 ± 0.0220.409 ± 0.005 (0.722 ± 0.004)
ProbKT*(DETR)target domain0.949 ± 0.0050.728 ± 0.014 (0.99 ± 0.003)0.589 ± 0.0420.373 ± 0.02 (0.669 ± 0.045)
ProbKT*(DETR)OOD0.741 ± 0.0380.606 ± 0.017 (0.977 ± 0.004)0.008 ± 0.008n/a1
ProbKT*(DETR)source domain0.985 ± 0.0040.937 ± 0.006 (0.995 ± 0.001)0.275 ± 0.0660.371 ± 0.021 (0.649 ± 0.041)
ProbKT(DETR)target domain0.946 ± 0.0140.803 ± 0.011 (0.989 ± 0.006)0.508 ± 0.0270.204 ± 0.02 (0.507 ± 0.014)
ProbKT(DETR)OOD0.726 ± 0.0350.715 ± 0.006 (0.974 ± 0.006)0.004 ± 0.003n/a1
ProbKT(DETR)source domain0.987 ± 0.0030.948 ± 0.005 (0.995 ± 0.001)0.549 ± 0.0260.38 ± 0.013 (0.713 ± 0.006)
RCNN (pre-trained)target domain0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)0.592 ± 0.0070.568 ± 0.005 (0.785 ± 0.004)
RCNN (pre-trained)OOD0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)0.348 ± 0.036n/a1
RCNN (pre-trained)source domain0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)0.948 ± 0.0040.737 ± 0.005 (0.979 ± 0.0)
ProbKT*(RCNN)target domain0.974 ± 0.0040.855 ± 0.025 (0.994 ± 0.001)0.945 ± 0.0060.24 ± 0.042 (0.788 ± 0.073)
ProbKT*(RCNN)OOD0.901 ± 0.0170.827 ± 0.022 (0.991 ± 0.001)0.592 ± 0.032n/a1
ProbKT*(RCNN)source domain0.993 ± 0.0020.95 ± 0.021 (0.998 ± 0.0)0.96 ± 0.0030.655 ± 0.01 (0.974 ± 0.004)
ProbKT(RCNN)target domain0.975 ± 0.0030.856 ± 0.039 (0.993 ± 0.001)0.942 ± 0.0090.289 ± 0.041 (0.829 ± 0.054)
ProbKT(RCNN)OOD0.89 ± 0.0220.833 ± 0.042 (0.991 ± 0.001)0.603 ± 0.037n/a1
ProbKT(RCNN)source domain0.995 ± 0.0020.941 ± 0.041 (0.998 ± 0.001)0.96 ± 0.0020.666 ± 0.005 (0.978 ± 0.002)
+ +Table 7: Results of the experiments for the datasets: CLEVR-mini and Molecules. Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution. + +# E SOURCE CODE AND DATASETS + +The source code and basic instructions are available on https://github.com/molden/ProbKT. The source code integrates features from the Weights & Biases (WandB) platform [5]. Basic features are supported without the need for an account on WandB but to make full use of all features we recommend to create an account. + +Datasets can be downloaded here: + +- CLEVR-mini dataset https://figshare.com/s/db012765e5a38e14ef9c +- Molecules dataset https://figshare.com/s/3dc3508d39bf4cff8c7f +- MNIST object detection dataset https://figshare.com/s/c760de026f000524db5a + +ProbLog script used in the ProbKT Probabilistic logical reasoning framework for counting of objects on an image (as on CLEVR-mini dataset): + +```prolog +:- use_module library(lists)). +nn(mnist_net,[X],Y,[0,1,2,3,4,5,6,7,8,9,10,11]) :: digit(X,Y). +count([],X,0). +count([X|T],X,Y):- count(T,X,Z), Y is 1+Z. +count([X1|T],X,Z):- X1\=X,count(T,X,Z). +countall(List,X,C) :- sort(List,List1), member(X,List1), count(List,X,C). +roll([],L,L). +roll([H|T],A,L):- roll(T,[Y|A],L), digit(H,Y). +countpart(List,[],[]) +countpart(List,[H|T],[F|L]):- countall(List,H,F), countpart(List,T,L). +count.objects(X,L,C):- roll(X,[],Result), countpart(Result,L,C). +``` + +The query $q$ in the case of class counts would be count.objects(X, L, C). For example an image $X$ with 1 small metal cube and 3 large rubber cylinders would result in the following query: count.objects(X, [small métal Cube, large rubber_cylinder], [1, 3]). + +ProbLog script used in the ProbKT Probabilistic logical reasoning framework for aggregating the digits on an image: + +```prolog +: - use_module library(lists)). +nn (mnist_net, [X], Y, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) :: digit(X,Y). +sum([[], 0). +sum([X|T], Y) : - sum(T,Z), Y is X+Z. +roll([[], L, L). +roll([H|T], A, L) : - roll(T, [Y|A], L), digit(H,Y). +sum_digits(X,Y) : - roll(X, [], Result), sum(Result,Y). +``` + +The query $q$ in the case of sum of digits would be sum_digits(X, Y). For example an image $X$ with as sum of digits 12 would result in the following query: sum_digits(X, 12). + +ProbLog script used in the ProbKT Probabilistic logical reasoning framework for taking into account non-exact counts on images + +```txt +-- useModule library(lists)) ; +inn (mnist_net, [X], Y, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) :: digit(X,Y) . . . .. +count([ ], X, 0 ) ; +count([X|T], X,Y): - count(T,X,Z), Y is 1+Z; +count([X|T], X,Z): - X1 $\equiv$ X, count(T,X,Z); +count(, X, 0 ) ; +count([X|T], X,Y): - count(T,X,Z), Y is 1+Z; +count(, X|T], X,Z): - count(T,X,Z); +count(, X|T], X,C,A): - count(T,X,Z); +count(, X|T], X,C,A): - count(T,X,Z); +count(, X|T], X,C,A): - count(T,X,Z); +count(, X|T], X,C,A): - count(T,X,Z); +count(, X|T], X,C,A): - count(T,X,Z); +count(, X|T], X,C,A): - count(T,X,Z); +count(, X|T], x,C,A): - count(T,X,Z); +count(, X|T], x,C,A): - count(T,X,Z); +count(, X|T], x,C,A): - count(T,X,Z); +count(, X|T], x,C,A): - count(T,X,Z); +count(, X|T], x,C,A): - count(T,X,Z); +count(, X|T], x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X |7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(,X|7),x,C,A):- count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,A): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), $x\text{、} C\text{、} B\text{、}$ ): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, X|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count (, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z) +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,X,Z); +count(, x|7), x,C,B): - count(T,x,Z); +count(, x|7), x,C,B): - count(T,x,Z); +count(, x|7), x,C,B): - count(T,x,Z); +count(, x|7), x,C,B): - count(T,x,Z); +count(, x|7), x,C,B): - count(T,x,Z); +count(, x|7), x,C,B): - count(T,x,Z); +count(, x|7), x,C,B): - call T; +count(, x|7), x.C,B); -- call T; +count(, x|7), x.C,B); -- call T; +count(, x|7), x.C,B); -- call T; +count(, x|7), x.C,B); -- call T; +count(, x|7), x.C,B); -- call T; +count(, x|7), x.C,B); -- call T; +count(, x|7), x.C,B); -- call T; +count(, x|7), + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- Call + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- called + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- called + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T; + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- call T + -- called + -- call T + -- called + -- called + - roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]) + - roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]) + - roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]) + - roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S] + - roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S] + - roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S] + - roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S] + - roll(x,[Y],[A],[-X] -- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[y,-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y变压] [- roll(x,[Y变压] [- roll(x,[Y变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- +``` + +The query $q$ in the case of non exact counts of objects would be range_countobjects(X,L,C,S). For example an image $X$ with exactly one metal small cube and multiple rubber large spheres would result in the following query: range_countobjects(X,[s_metal Cube,l_rubber Sphere],[1,1],[0,1]). + +# E.1 INFERENCE EXAMPLE FOR MNIST DATASET + +To illustrate the inference process let us follow the evaluation of the clause sum([x1, x2], 8), what can result from query sum>digits(X, 8) in case of two visible digit in the image X. + +This clause is true if and only if $X_{1} + X_{2} = 8$ . + +In case of MNIST digits $\{(0,1,\dots ,9)\}$ enumerating the possible worlds would give the following set: + +$$ +\{(0, 8), (1, 7), (2, 6), \dots , (8, 0) \} \tag {3} +$$ + +After summing the probability of all possible worlds we get: + +$$ +p _ {1} (0) p _ {2} (8) + p _ {1} (1) p _ {2} (7) + \dots + p _ {1} (0) p _ {2} (8), \tag {4} +$$ + +where $p_1$ and $p_2$ are the distribution of random variable $X_1$ and $X_2$ respectively. + +Or in a general form: + +$$ +p _ {Y} (Y) = \sum_ {X _ {1}} p _ {1} \left(X _ {1}\right) p _ {2} \left(Y - X _ {1}\right). \tag {5} +$$ + +As expected the distribution of the sum is the convolution of the distributions of the two terms. This observation trivially generalizes to more than two terms. The cost function corresponding to the maximum likelihood estimation is the negative log-likelihood $-\log (p_{Y}(Y))$ . \ No newline at end of file diff --git a/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/images.zip b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d21ef410ca24320c53af0fa5374f06ca272591fb --- /dev/null +++ b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5643b4bdd88ab7818ad8e1bde25002306f826786a6f8f1613d639dc786c5fe89 +size 1001298 diff --git a/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/layout.json b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e679f9ffc5323c09169365142d0f1a661b0f7b37 --- /dev/null +++ b/2023/Weakly Supervised Knowledge Transfer with Probabilistic Logical Reasoning for Object Detection/layout.json @@ -0,0 +1,11958 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 507, + 136 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 507, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 507, + 136 + ], + "type": "text", + "content": "WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH PROBABILISTIC LOGICAL REASONING FOR OBJECT DETECTION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 154, + 191, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 154, + 191, + 165 + ], + "spans": [ + { + "bbox": [ + 111, + 154, + 191, + 165 + ], + "type": "text", + "content": "Martijn Oldenhof" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 166, + 187, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 166, + 187, + 176 + ], + "spans": [ + { + "bbox": [ + 111, + 166, + 187, + 176 + ], + "type": "text", + "content": "ESAT-STADIUS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 177, + 200, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 177, + 200, + 188 + ], + "spans": [ + { + "bbox": [ + 112, + 177, + 200, + 188 + ], + "type": "text", + "content": "KU Leuven, Belgium" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 189, + 281, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 189, + 281, + 199 + ], + "spans": [ + { + "bbox": [ + 112, + 189, + 281, + 199 + ], + "type": "text", + "content": "martijn. oldenhof@kuleuven.be" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 315, + 155, + 373, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 155, + 373, + 166 + ], + "spans": [ + { + "bbox": [ + 315, + 155, + 373, + 166 + ], + "type": "text", + "content": "Adam Arany" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 315, + 166, + 391, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 166, + 391, + 177 + ], + "spans": [ + { + "bbox": [ + 315, + 166, + 391, + 177 + ], + "type": "text", + "content": "ESAT-STADIUS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 177, + 403, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 177, + 403, + 189 + ], + "spans": [ + { + "bbox": [ + 315, + 177, + 403, + 189 + ], + "type": "text", + "content": "KU Leuven, Belgium" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 189, + 478, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 189, + 478, + 199 + ], + "spans": [ + { + "bbox": [ + 315, + 189, + 478, + 199 + ], + "type": "text", + "content": "adam.arany@esat.kuleuven.be" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 216, + 171, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 216, + 171, + 226 + ], + "spans": [ + { + "bbox": [ + 111, + 216, + 171, + 226 + ], + "type": "text", + "content": "Yves Moreau" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 227, + 187, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 227, + 187, + 238 + ], + "spans": [ + { + "bbox": [ + 111, + 227, + 187, + 238 + ], + "type": "text", + "content": "ESAT-STADIUS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 112, + 239, + 200, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 239, + 200, + 249 + ], + "spans": [ + { + "bbox": [ + 112, + 239, + 200, + 249 + ], + "type": "text", + "content": "KU Leuven, Belgium" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 112, + 250, + 281, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 250, + 281, + 260 + ], + "spans": [ + { + "bbox": [ + 112, + 250, + 281, + 260 + ], + "type": "text", + "content": "yves.moreau@esat.kuleuven.be" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 216, + 406, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 216, + 406, + 226 + ], + "spans": [ + { + "bbox": [ + 315, + 216, + 406, + 226 + ], + "type": "text", + "content": "Edward De Brouwer" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 227, + 392, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 227, + 392, + 237 + ], + "spans": [ + { + "bbox": [ + 315, + 227, + 392, + 237 + ], + "type": "text", + "content": "ESAT-STADIUS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 238, + 404, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 238, + 404, + 250 + ], + "spans": [ + { + "bbox": [ + 315, + 238, + 404, + 250 + ], + "type": "text", + "content": "KU Leuven, Belgium" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 250, + 474, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 250, + 474, + 260 + ], + "spans": [ + { + "bbox": [ + 315, + 250, + 474, + 260 + ], + "type": "text", + "content": "edward.debrouwer@gmail.com" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 276, + 289, + 334, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 289, + 334, + 300 + ], + "spans": [ + { + "bbox": [ + 276, + 289, + 334, + 300 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 140, + 312, + 471, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 312, + 471, + 468 + ], + "spans": [ + { + "bbox": [ + 140, + 312, + 471, + 468 + ], + "type": "text", + "content": "Training object detection models usually requires instance-level annotations, such as the positions and labels of all objects present in each image. Such supervision is unfortunately not always available and, more often, only image-level information is provided, also known as weak supervision. Recent works have addressed this limitation by leveraging knowledge from a richly annotated domain. However, the scope of weak supervision supported by these approaches has been very restrictive, preventing them to use all available information. In this work, we propose ProbKT, a framework based on probabilistic logical reasoning that allows to train object detection models with arbitrary types of weak supervision. We empirically show on different datasets that using all available information is beneficial as our ProbKT leads to significant improvement on target domain and better generalization compared to existing baselines. We also showcase the ability of our approach to handle complex logic statements as supervision signal. Our code is available at https://github.com/molden/ProbKT" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 487, + 206, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 487, + 206, + 498 + ], + "spans": [ + { + "bbox": [ + 106, + 487, + 206, + 498 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 511, + 506, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 506, + 578 + ], + "type": "text", + "content": "Object detection is a fundamental ability of numerous high-level machine learning pipelines such as autonomous driving [4; 16], augmented reality [42] or image retrieval [17]. However, training state-of-the-art object detection models generally requires detailed image annotations such as the box-coordinates location and the labels of each object present in each image. If several large benchmark datasets with detailed annotations are available [26; 15], providing such detailed annotation on new specific datasets comes with a significant cost that is often not affordable for many applications." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": "More frequently, datasets come with only limited annotation, also referred to as weak supervision. This has sparked research in weakly-supervised object detection approaches [25; 6; 40], using techniques such as multiple instance learning [40] or variations of class activation maps [3]. However, these approaches have been shown to significantly underperform their fully-supervised counterparts in terms of robustness and accurate localization of the objects [39]." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "An appealing and intuitive approach to improve the performance of weakly supervised object detection is to perform transfer learning from an existing object detection model pre-trained on a fully annotated dataset [14; 46; 43]. This approach, also referred to as transfer learning or domain adaptation, consists in leveraging transferable knowledge from the pre-trained model (such as bounding boxes prediction capabilities) to the new weakly supervised domain. This transfer has been embodied in different ways in the literature. Examples include a simple fine-tuning of the classifier of bounding box proposals of the pre-trained model [43], or an iterative relabeling of the weakly supervised dataset for retraining a new full objects detection model on the re-labeled data [46]." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 78, + 506, + 240 + ], + "blocks": [ + { + "bbox": [ + 106, + 78, + 506, + 240 + ], + "lines": [ + { + "bbox": [ + 106, + 78, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 106, + 78, + 506, + 240 + ], + "type": "image", + "image_path": "243aadad3b25bc99b81b9d9924300b2d9c50d1ddde480b20145264765a247667.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "text", + "content": "Figure 1: ProbKT: Weakly supervised knowledge transfer with probabilistic logical reasoning. (Left) A model can be trained on the source domain using full supervision (labels, positions) but only on a limited set of shapes (cylinders and spheres). (Middle) The pre-trained model does not recognize the cubes from the target domain correctly. (Right) The model can adapt to the target domain after applying ProbKT and can recognize the cubes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 320, + 504, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 320, + 504, + 409 + ], + "spans": [ + { + "bbox": [ + 104, + 320, + 504, + 409 + ], + "type": "text", + "content": "However, existing approaches are very restrictive in the type of weak supervision they are able to harness. Indeed, some do not support new object classes in the new domain [20], others can only use a label indicating the presence of an object class [46]. However, in practice, the supervision on the new domain can come in very different forms. For instance, the count of each object class can be given, such as in atom detection from molecule images where only chemical formula might be given. Or, when many objects are present on an image, a range can be provided instead of an exact class counts (e.g. \"there are at least 4 cats on this image\"). Crucially, this variety of potential supervisory signals on the target domain cannot be fully utilized by existing domain adaption approaches." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 414, + 506, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 414, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 104, + 414, + 506, + 514 + ], + "type": "text", + "content": "To address this limitation, we introduce ProbKT, a novel framework that allows to generalize knowledge transfer in object detection to arbitrary types of weak supervision using neural probabilistic logical reasoning [27]. This paradigm allows to connect probabilistic outputs of neural networks with logical rules and to infer the resulting probability of particular queries. One can then evaluate the probability of a query such as \"the image contains at least two animals\" and differentiate through the probabilistic engine to train the underlying neural network. Our approach allows for arbitrarily complex logical statements and therefore supports weak supervision like class counts or ranges, among other. To our knowledge, this is the first approach to allow for such versatility in utilizing the available information on the new domain." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 519, + 506, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 506, + 586 + ], + "type": "text", + "content": "To assess the capabilities of this framework, we provide extensive empirical analysis of multiple object detection datasets. Our approach also supports any type of objects detection backbone architecture. We thus use two popular backbone architectures, DETR [7] and RCNN [34] and evaluate their performance in terms of accuracy, convergence as well of generalization on out-of-distribution data. Our experiments show that, due to its ability to use the complete supervisory signal, our approach outperforms previous works in a wide range of setups." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 590, + 506, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 647 + ], + "type": "text", + "content": "Key contributions: (1) We propose a novel knowledge transfer framework for object detection relying on probabilistic programming that uniquely allows using arbitrary types of weak supervision on the target domain. (2) We make our approach amenable to different levels of computational capabilities by proposing different approximations of ProbKT. (3) We provide an extensive experimental setup to study the capabilities of our framework for knowledge transfer and out-of-distribution generalization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 662, + 216, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 662, + 216, + 675 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 216, + 675 + ], + "type": "text", + "content": "2 RELATED WORKS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "A comparative summary of related works is given in Table 1. We distinguish three main categories: (1) pure weakly supervised object detection methods (WSOD) that do not leverage a richly annotated source domain, (2) unsupervised object detection methods with knowledge transfer (DA or domain adaptation methods) that do not use supervision on the target domain and (3) weakly supervised" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": "object detection methods with knowledge transfer (WSOD w/transfer) that are restrictive in the type of supported weak supervision. To our knowledge, our work is the first to allow for arbitrary supervision on the target domain (and supporting new classes in the target domain) while also leveraging knowledge from richly annotated domains. ProbKT supports arbitrary weak supervision thanks to the inherited expressiveness of Prolog [41] which is based on a subset of first-order predicate logic, Horn clauses and is Turing-complete." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 153, + 506, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 153, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 104, + 153, + 506, + 244 + ], + "type": "text", + "content": "Weakly supervised object detection (WSOD) This class of method allows training object detection models with only weak supervision. One can thus train these approaches directly on the target domain. However, they do not allow to leverage potentially available richly annotated datasets, which has been shown to lead to worse performance [39]. Different flavors of WSOD architectures have been proposed relying on a variety of implementations such as multiple instance learning (MIL)-based [25; 40] or class activation (CAM) based [47; 3]. In contrast to WSOD methods, our approach is designed to exploit existing richly annotated datasets and thus provides increased performance on the target domain. For a comprehensive review of WSOD methods we refer the reader to Shao et al. [39]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 247, + 505, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 505, + 282 + ], + "type": "text", + "content": "Domain adaptation methods (DA) In contrast to WSOD methods, domain adaptation methods do rely on fully supervised source domain dataset. However, they do not assume any supervision on the target domain and are therefore not equipped to exploit such signal when available [37; 8; 22; 48]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 286, + 506, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 286, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 286, + 506, + 376 + ], + "type": "text", + "content": "WSOD with knowledge transfer Our approach belongs to the class of weakly supervised object detection models with knowledge transfer. These methods aim to transfer knowledge from a source domain, where full supervision is available, to a target domain where only weak labels are available. Existing work in this class of models only allows for limited type of supervision of the target domain. Most architectures only support a label indicating the presence or absence of a class of object in the image[14; 46; 43]. Inoue et al. [20] allows for class counts as weak supervision but unfortunately does not allow for new classes in the target domain. In contrast, ProbKT natively allows for class counts and new classes as well as other types of weak supervision." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 380, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 469 + ], + "type": "text", + "content": "Neural probabilistic logical reasoning Probabilistic logical reasoning combines logic and probability theory. Favored for its high-level reasoning abilities, it was introduced as an alternative way to deep learning in the quest for artificial intelligence [10]. Statistical artificial intelligence [32; 23] and probabilistic logic programming [11] are examples of areas relying on these premises. In a unification effort, researchers have proposed hybrid architectures, embedding both deep learning and logical reasoning components [38; 35]. Our work builds upon the recent advances in the field, where combinations of deep learning, logical, and probabilistic approaches were introduced [27], allowing high-level reasoning with uncertainty using differentiable neural network architectures." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 106, + 473, + 504, + 596 + ], + "blocks": [ + { + "bbox": [ + 106, + 473, + 504, + 596 + ], + "lines": [ + { + "bbox": [ + 106, + 473, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 106, + 473, + 504, + 596 + ], + "type": "table", + "html": "
MethodTypeAnnotated source dom.Weak supervisionNew classesImplementation
Li et al. [25]WSODXpresence/absenceMIL-based
Bilen and Vedaldi [6]WSODXpresence/absencespatial pyramid pooling layer
Song et al. [40]WSODXpresence/absenceMIL based
Zhou et al. [47]WSODXmixCAM-based
Bae et al. [3]WSODXmixCAM based
Kundu et al. [24]DAone-shotClass-Incremental DA
Saito et al. [37]DAXXStrong-Weak Distribution Alignment
Chen et al. [8]DAXXAdversarial training
Kim et al. [22]DAXXAdversarial training and Domain Diversification
Zhu et al. [48]DAXXselective region adaptation framework
Deselaers et al. [14]WSOD w/transferpresence/absenceCRF-based, iteratively
Zhong et al. [46]WSOD w/transferpresence/absenceMIL based, iteratively
Uijlings et al. [43]WSOD w/transferpresence/absenceMIL based, non iteratively
Inoue et al. [20]WSOD w/transferclass countsXDA + pseudolabeling, iteratively
ProbKT (ours)WSOD w/transferarbitraryProbabilistic logical reasoning, iteratively
", + "image_path": "ffed8d9d0b2a815dd61a3c3b9f359f8fe7ebf87864249d776cdd234705f0d080.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 604, + 504, + 628 + ], + "lines": [ + { + "bbox": [ + 104, + 604, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 504, + 628 + ], + "type": "text", + "content": "Table 1: Summary table of related works with weakly supervised object detection(WSOD), Domain Adaptation(DA) and weakly supervised knowledge transfer methods (WSOD w/ transfer)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 652, + 211, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 652, + 211, + 664 + ], + "spans": [ + { + "bbox": [ + 105, + 652, + 211, + 664 + ], + "type": "text", + "content": "3 METHODOLOGY" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 677, + 227, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 227, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 227, + 689 + ], + "type": "text", + "content": "3.1 PROBLEM STATEMENT" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "We consider the problem of weakly supervised knowledge transfer for object detection. Using a model trained on a richly annotated source domain, we aim at improving its performance on a less richly annotated target domain." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_s = \\{(I_s^i, b_s^i, y_s^i) : i = 1, \\dots, N_s)\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " be a dataset issued from the source domain and consisting of " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "N_s" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " images " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "I_s" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " along with their annotations. We write " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "b_s^i \\in \\mathbb{R}^{n_i \\times 4}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "y_s^i \\in \\{1, \\dots, K_s\\}^{n_i}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " for the box coordinates and class labels of objects in image " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "I_s^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "n_i" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " is the number of objects present in image " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "I_s^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "K_s" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " is the total number of object classes in the source domain. This represents the typical dataset required to train classical fully-supervised object detection architectures. The target dataset " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t = \\{(I_t^i, q_t^i) : i = 1, \\dots, N_t)\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " contains " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "N_t" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " image from the target domain along with image-level annotations " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "q_t^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": ". These annotations are logical statements about the content of the image in terms of object classes and their location. Examples include the presence of different classes in each image (i.e., the classical assumption in weakly supervised object detection) but also extends to the counts of classes or a complex combination of counts of objects attributes (e.g., \"two red objects, and at least two bicycles\"). What is more, the logical statements " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "q_t^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " can include classes not already present in the source domain. This type of logical annotation is then strictly broader than the restrictive supervision usually assumed." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 504, + 266 + ], + "type": "text", + "content": "Based on the availability of a source dataset and a target dataset as described above, our goal is then to harness the available detailed information from the source domain to perform accurate object detection on the target domain. A graphical illustration of this process is given in Figure 1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 285, + 194, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 194, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 194, + 297 + ], + "type": "text", + "content": "3.2 BACKGROUND" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 309, + 227, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 309, + 227, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 227, + 320 + ], + "type": "text", + "content": "3.2.1 OBJECT DETECTION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": "Object detection aims at predicting the location and labels of objects in images. One then wishes to learn a parametric function " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "f_{\\theta}:\\mathcal{I}\\rightarrow \\{\\mathcal{B}\\times \\mathbb{R}^{K}\\}^{\\mathbb{Z}}" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "f_{\\theta}(I) = \\{(\\hat{b},\\hat{p}_y)\\}^{\\hat{n}} = \\{(\\hat{b}_i,\\hat{p}_{y,i}):i = 1,\\dots,\\hat{n}\\}" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": " such that the distance between predicted and true boxes and labels, " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "d(\\{\\hat{(b},\\hat{p}_y)\\}^{\\hat{n}},\\{(b,y)\\}^{n})" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": ", is minimum. Objects detection architecture would usually output box features proposals " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "\\{h_i:i = 1,\\dots,\\hat{n}\\}" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": " conditioned on which they would predict the probability vector of class labels " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "\\hat{p}_{y,i} = g_p(h_i)" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": " and the box location predictions " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "\\hat{b}_i = g_b(h_i)" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": " using shared parametric functions " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "g_{p}(\\cdot)" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "g_{b}(\\cdot)" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": ". For an object " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": ", we write the predicted probability of the object belonging to class " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "inline_equation", + "content": "\\hat{p}_{y,n}^{k}" + }, + { + "bbox": [ + 104, + 331, + 506, + 422 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 437, + 301, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 301, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 301, + 449 + ], + "type": "text", + "content": "3.2.2 PROBABILISTIC LOGICAL REASONING" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "text", + "content": "Probabilistic logical reasoning uses knowledge representation relying on probabilities that allow encoding uncertainty in knowledge. Such a knowledge is encoded in a probabilistic logical program " + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "text", + "content": " as a set of " + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "text", + "content": " probabilistic facts " + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "inline_equation", + "content": "U = \\{U_{1},\\dots,U_{N}\\}" + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "text", + "content": " logical rules " + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "inline_equation", + "content": "F = \\{f_{1},\\dots f_{M}\\}" + }, + { + "bbox": [ + 104, + 459, + 504, + 538 + ], + "type": "text", + "content": " connecting them. A simple example of probabilistic fact is \"Alice and Bob will each pass their exam with probability 0.5\" and an example of logical rule is \"if both Alice and Bob pass their exam, they will host a party\". Combining probabilistic facts and logical rules, one can then construct complex probabilistic knowledge representation, that can also be depicted as probabilistic graphical models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": "Probabilistic logical programming allows to perform inference by computing the probability of a particular statement or query. For instance, one could query the probability that \"Alice and Bob will host a party\". This query is executed by summing over the probabilities of occurrence of the different worlds " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "w = \\{u_1, \\dots, u_N\\}" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": " (i.e. individual realization of the set of probabilistic facts) that are compatible with the query " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": ". The probability of a query " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": " in a program " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": " can then be inferred as " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "P_{\\mathcal{P}}(q) = \\sum_{w} P(w) \\cdot \\mathbb{I}[F(w) \\equiv q]" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "F(w) \\equiv q" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": " stands for the fact that propagation of the realization " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": " across the knowledge graph, according to the logical rules " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": " leads to " + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 542, + 504, + 621 + ], + "type": "text", + "content": " being true." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "text", + "content": "Remarkably, recent advances in probabilistic programming have led to learnable probabilistic facts [27]. In particular, the probability of a fact can be generated by a neural network with learnable weights. Such a learnable probabilistic fact is then referred to as a neural predicate " + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "inline_equation", + "content": "U^{\\theta}" + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "text", + "content": ", where we make the dependence on the weights " + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "text", + "content": " explicit. One can then train these weights to minimize a loss that depend on the probability of a query " + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "inline_equation", + "content": "\\hat{\\theta} = \\arg \\min_{\\theta} \\mathcal{L}(P(q \\mid \\theta))" + }, + { + "bbox": [ + 104, + 624, + 504, + 684 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "Our approach builds upon this ability to learn neural predicates and uses DeepProbLog [27] as the probabilistic reasoning backbone. DeepProbLog is a neural probabilistic logic programming language that allows to conveniently perform inference and differentiation with neural predicates. We refer the reader to the excellent introduction of Manhaeve et al. [28] for further details about this framework." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 504, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 504, + 105 + ], + "type": "text", + "content": "3.3 PROBKT: WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH PROBABILISTIC LOGICAL REASONING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "text", + "content": "A graphical description of our approach is presented in Figure 2. Our framework starts from a pre-trained object detection model " + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "text", + "content": " on the source domain. The backbone of this model is extracted and inserted into a new object detection model " + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "inline_equation", + "content": "f_{\\theta}^{*}" + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "text", + "content": " with new target box position predictors and box label classifiers. This new model is then used to predict box proposals along with the corresponding box features on target domain images " + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "text", + "content": ". These box features are then fed to a new target box position predictor and box label classifier. The predictions of this classifier are considered neural predicates and are given to a probabilistic logical module. This module evaluates the probability of queries " + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "inline_equation", + "content": "q_{t}" + }, + { + "bbox": [ + 104, + 114, + 506, + 214 + ], + "type": "text", + "content": ", the loss, and the corresponding gradient that can be backpropagated to the classifier and the backbone. As we want to maximize the probability of the queries being true, we use the following loss function:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 228, + 232, + 504, + 259 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 232, + 504, + 259 + ], + "spans": [ + { + "bbox": [ + 228, + 232, + 504, + 259 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\theta} = \\sum_ {\\left(I _ {t}, q _ {t}\\right) \\in \\mathcal {D} _ {t}} - \\log P _ {\\mathcal {P}} \\left(q _ {t} \\mid f _ {\\theta} ^ {*} \\left(I _ {t}\\right)\\right) \\tag {1}", + "image_path": "e51e773ecb59e8e62bb56f6831b1affbee791ce9d3b18e5fca573bcba0a9b3cf.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 269, + 504, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 504, + 304 + ], + "type": "text", + "content": "In theory, the backbone can be trained end to end with this procedure. Our experiments showed that only updating the box features classifiers resulted in more stability as also shown in previous works [46]. We then adopt here the same iterative relabeling strategy, as described next." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 133, + 315, + 480, + 495 + ], + "blocks": [ + { + "bbox": [ + 133, + 315, + 480, + 495 + ], + "lines": [ + { + "bbox": [ + 133, + 315, + 480, + 495 + ], + "spans": [ + { + "bbox": [ + 133, + 315, + 480, + 495 + ], + "type": "image", + "image_path": "dffc40d15684072de167b6480bb4e4eca8afe6c9bdae887f549c4e44330c8f86.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "lines": [ + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "text", + "content": "Figure 2: ProbKT. The pre-trained object detection backbone outputs the box features " + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "text", + "content": " for the detected objects. Box classifiers (red) and box position predictors (blue) then predict corresponding label predictions " + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "inline_equation", + "content": "\\hat{p}_y" + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "text", + "content": " and box position predictions " + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "inline_equation", + "content": "\\hat{b}" + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "text", + "content": " that are fed to the probabilistic reasoning layer. This layer computes the probability of the query along with the gradients with respect to " + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "inline_equation", + "content": "\\hat{p}_y" + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "inline_equation", + "content": "\\hat{b}" + }, + { + "bbox": [ + 104, + 505, + 506, + 565 + ], + "type": "text", + "content": " that can be backpropagated through the entire network." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 585, + 244, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 244, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 244, + 594 + ], + "type": "text", + "content": "3.3.1 ITERATIVE RELABELING" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 604, + 506, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 506, + 638 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 506, + 638 + ], + "type": "text", + "content": "The approach described above allows to fine-tune our model " + }, + { + "bbox": [ + 104, + 604, + 506, + 638 + ], + "type": "inline_equation", + "content": "f_{\\theta}^{*}" + }, + { + "bbox": [ + 104, + 604, + 506, + 638 + ], + "type": "text", + "content": " to the target domain. To further improve the performance, we propose an iterative relabeling strategy that consists in multiple steps: fine-tuning, re-labeling and re-training. A similar has also been proposed by Zhong et al. [46]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "text", + "content": "Fine-tuning. This step corresponds to training ProbKT on the weakly supervised labels, by minimizing the loss of Equation 1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 504, + 704 + ], + "type": "text", + "content": "Re-labeling. Once ProbKT has been trained, we can use its predictions to annotate images in the target domain. In practice, we only relabel images for which the model predictions comply with the available query labels in order to avoid too noisy labels." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "Re-training. The re-labeled target domain can be used to re-train the object detection backbone of ProbKTin a fully-supervised fashion." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": "This procedure can be repeated multiple times to improve the quality of the relabeling and the quantity of relabelled in the target domain dataset. A graphical representation of the relabeling pipeline is presented in Figure 3." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 126, + 506, + 183 + ], + "blocks": [ + { + "bbox": [ + 106, + 126, + 506, + 183 + ], + "lines": [ + { + "bbox": [ + 106, + 126, + 506, + 183 + ], + "spans": [ + { + "bbox": [ + 106, + 126, + 506, + 183 + ], + "type": "image", + "image_path": "0d147b1754e149afa89c63b8de727c6f155d030239363796f62a085bc8d10f91.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "lines": [ + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "text", + "content": "Figure 3: Iterative relabeling. A full cycle is composed of a fine-tuning, a re-labeling and a re-training step. After one cycle, the fine-tuning step and/or re-labeling step can be iteratively repeated." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 232, + 373, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 373, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 373, + 243 + ], + "type": "text", + "content": "3.3.2 COMPUTATIONAL COMPLEXITY AND APPROXIMATIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 251, + 504, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 251, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 251, + 504, + 319 + ], + "type": "text", + "content": "The computational complexity of inference in probabilistic programming depends on the specific query " + }, + { + "bbox": [ + 104, + 251, + 504, + 319 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 251, + 504, + 319 + ], + "type": "text", + "content": " and several approximations have been proposed for improving the computation time [44]. We propose two approaches for reducing the computational cost adapted to object detection: (1) filtering the data samples before applying ProbKT (see Appendix Section C.1) or (2) when the supervision consists of the class labels counts, considering only the most probable world (ProbKT*) instead of all possible worlds." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 330, + 468, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 330, + 468, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 330, + 468, + 352 + ], + "type": "text", + "content": "3.3.3 PROBKT*: THE MOST PROBABLE WORLD AND CONNECTION TO HUNGARIAN MATCHING" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "content": "The probabilistic inference step requires a smart aggregation of all worlds compatible with the query " + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "content": ". Yet, in certain cases, one can reduce the computational cost by only considering the most probable world. Indeed, consider the case when the query consists of the list of different class labels in the images. For a number of boxes " + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "inline_equation", + "content": "\\hat{n}" + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "content": " proposed by the objects detection model, the query can be written as the set of labels " + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "inline_equation", + "content": "q = \\{y^i : i = 1, \\dots, \\hat{n}\\}" + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "content": ". If we further write " + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "inline_equation", + "content": "\\hat{p}_{y,n}^k" + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "content": " as the probability of the label of box " + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "content": " belonging to class " + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 360, + 506, + 427 + ], + "type": "text", + "content": " given by the model (as introduced in Section 3.2.1), we have:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 189, + 432, + 420, + 466 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 432, + 420, + 466 + ], + "spans": [ + { + "bbox": [ + 189, + 432, + 420, + 466 + ], + "type": "interline_equation", + "content": "P _ {\\mathcal {P}} (q) = \\sum_ {j = 1} ^ {\\hat {n}!} \\hat {p} _ {y, 0} ^ {\\sigma_ {j} (0)} \\cdot \\hat {p} _ {y, 1} ^ {\\sigma_ {j} (1)} \\cdot \\ldots \\cdot \\hat {p} _ {y, \\hat {n}} ^ {\\sigma_ {j} (\\hat {n})} = \\sum_ {j = 1} ^ {\\hat {n}!} \\prod_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}", + "image_path": "978dc2030c92a0be7a8b5748f7fe70078585fe87d23a870e2fbb76040bf0d096.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\sigma_{j}" + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "content": " corresponds to the " + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "inline_equation", + "content": "j^{th}" + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "content": " permutation of the query vector " + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "content": ". To avoid the computation of each possible world contribution, one can only use the configuration with the largest contribution to " + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "inline_equation", + "content": "P_{\\mathcal{P}}(q)" + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "content": " and discard the other ones." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 510, + 381, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 381, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 381, + 523 + ], + "type": "text", + "content": "This possible world corresponds to the permutation " + }, + { + "bbox": [ + 104, + 510, + 381, + 523 + ], + "type": "inline_equation", + "content": "\\sigma^{*}" + }, + { + "bbox": [ + 104, + 510, + 381, + 523 + ], + "type": "text", + "content": " that satisfies:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 528, + 469, + 553 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 528, + 469, + 553 + ], + "spans": [ + { + "bbox": [ + 140, + 528, + 469, + 553 + ], + "type": "interline_equation", + "content": "\\sigma^ {*} = \\underset {\\sigma} {\\arg \\max} \\log (\\prod_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}) = \\underset {\\sigma} {\\arg \\max} \\sum_ {n} \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)} = \\underset {\\sigma} {\\arg \\min} \\sum_ {n} (1 - \\hat {p} _ {y, n} ^ {\\sigma_ {j} (n)}).", + "image_path": "19c6371d66873afdc2e9d9de86283b0a74e5fc63938efa30b71ba0c154d9c2a6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 557, + 504, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 504, + 617 + ], + "type": "text", + "content": "Remarkably, this corresponds to the solution of the best alignment using the Hungarian matching algorithm with cost " + }, + { + "bbox": [ + 104, + 557, + 504, + 617 + ], + "type": "inline_equation", + "content": "c(n) = (1 - \\hat{p}_{y,n}^{\\sigma_j(n)})" + }, + { + "bbox": [ + 104, + 557, + 504, + 617 + ], + "type": "text", + "content": ", as used, among others, in DETR [7]. Thus, when the query is the set of class labels, the most plausible world can thus be inferred with the Hungarian matching algorithm. In Appendix C.2, we also show that the gradient of ProbKT can be interpreted as a probability weighted extension of the gradient resulting from the Hungarian matching." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 632, + 201, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 632, + 201, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 201, + 643 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 656, + 177, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 177, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 177, + 667 + ], + "type": "text", + "content": "4.1 DATASETS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "We evaluate our approach on three different datasets: (1) a CLEVR-mini dataset, (2) a Molecules dataset with images of chemical compounds, and (3) an MNIST-based object detection dataset. For each dataset, three subsets, corresponding to different domains, are used: (1) a source domain, (2) a target domain, and (3) an out-of-distribution domain (OOD). The source domain is the richly annotated domain that was used to pre-train the object detection model. The target domain is" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "type": "text", + "content": "the domain of interest but with image-level annotations only. Lastly, the OOD domain contains images from a different distribution than the source and target domains and is used to study the generalizability of the models. Source and target domains are split into 5 folds of train and validation sets and an independent test set. We focused our experiments on the small sample regime (range 1k-2k numbers of samples) both for the source as the target domain. More details on each dataset can be found in Appendix B." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 162, + 170, + 173 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 162, + 170, + 173 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 170, + 173 + ], + "type": "text", + "content": "4.2 MODELS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 251 + ], + "type": "text", + "content": "In the experiments, we apply our method ProbKT on two different pre-trained object detection backbone models: (1) DETR [7] and (2) FasterRCNN [34]. Both are pre-trained on the COCO dataset [26]. We also evaluate an Hungarian-algorithm approximation (ProbKT*) of our method when the weak supervision allows it. For sake of conciseness, we omit the results of ProbKT* here but they can be found in Appendix D. The details of the training procedures, as well as the hyper-parameters used for the different models and the different datasets are summarized in Table 4 in Appendix A." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 261, + 223, + 271 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 261, + 223, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 223, + 271 + ], + "type": "text", + "content": "4.2.1 BASELINE MODELS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 279, + 504, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 279, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 279, + 504, + 315 + ], + "type": "text", + "content": "As shown in Section 2, all available approaches for weakly supervised object detection are very restrictive in terms of the supervision signal they support. Our main comparison partner is the state of the art WSOD-transfer method [46]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 318, + 506, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 318, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 104, + 318, + 506, + 385 + ], + "type": "text", + "content": "Additionally, we compare our approach against a Resnet50 [18] backbone pre-trained on ImageNet [12]. Fine-tuning is performed by adding an extra multitask regression layer that is trained to predict the individual counts of the objects in the image as in Xue et al. [45]. This architecture naturally relies only on label counts in the target images for fine-tuning. We then predict box predictions using class activation maps as in Bae et al. [3] to compare its performance on object localization. We call this approach Resnet50-CAM." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 390, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 390, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 390, + 504, + 425 + ], + "type": "text", + "content": "When the supervision signal allows it, we also compare with a DETR model trained end-to-end jointly on target and source domains, masking the box costs in the matching cost of the Hungarian algorithm for image-level annotated samples. We call this approach DETR-joint." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 106, + 433, + 504, + 617 + ], + "blocks": [ + { + "bbox": [ + 106, + 433, + 504, + 617 + ], + "lines": [ + { + "bbox": [ + 106, + 433, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 106, + 433, + 504, + 617 + ], + "type": "table", + "html": "
ModelData DomainCLEVR count acc.CLEVR mAP (mAP@IoU=0.5)Mol. count. accMol. mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.97 ± 0.0050.036 ± 0.014 (0.200 ± 0.071)0.978 ± 0.0040.0 ± 0.0 (0 ± 0)
Resnet50-CAMOOD0.831 ± 0.0160.029 ± 0.010 (0.153 ± 0.044)0.0 ± 0.0n/a*
Resnet50-CAMsource domain0.993 ± 0.0030.035 ± 0.019 (0.178 ± 0.084)0.828 ± 0.0210.0 ± 0.0 (0 ± 0)
WSOD-transfertarget domain0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)0.001 ± 0.00.018 ± 0.004 (0.061 ± 0.011)
WSOD-transferOOD0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)0.003 ± 0.002n/a*
WSOD-transfersource domain0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)0.0 ± 0.00.021 ± 0.003 (0.069 ± 0.009)
DETR-jointtarget domain0.159 ± 0.1330.579 ± 0.012 (0.684 ± 0.019)0.357 ± 0.1960.197 ± 0.055 (0.481 ± 0.071)
DETR-jointOOD0.084 ± 0.0390.534 ± 0.012 (0.66 ± 0.012)0.024 ± 0.021n/a*
DETR-jointsource dom.0.923 ± 0.0490.908 ± 0.017 (0.992 ± 0.001)0.232 ± 0.1270.23 ± 0.063 (0.565 ± 0.08)
RCNN (pre-trained)target domain0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)0.592 ± 0.0070.568 ± 0.005 (0.785 ± 0.004)
RCNN (pre-trained)OOD0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)0.348 ± 0.036n/a*
RCNN (pre-trained)source domain0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)0.948 ± 0.0040.737 ± 0.005 (0.979 ± 0.0)
DETR (pre-trained)target domain0.0 ± 0.00.498 ± 0.019 (0.533 ± 0.024)0.464 ± 0.0330.314 ± 0.006 (0.542 ± 0.006)
DETR (pre-trained)OOD0.0 ± 0.00.477 ± 0.013 (0.531 ± 0.021)0.002 ± 0.001n/a*
DETR (pre-trained)source domain0.97 ± 0.0090.945 ± 0.009 (0.992 ± 0.001)0.581 ± 0.0220.409 ± 0.005 (0.722 ± 0.004)
ProbKT (DETR)target domain0.946 ± 0.0140.803 ± 0.011 (0.989 ± 0.006)0.508 ± 0.0270.204 ± 0.02 (0.507 ± 0.014)
ProbKT (DETR)OOD0.726 ± 0.0350.715 ± 0.006 (0.974 ± 0.006)0.004 ± 0.003n/a*
ProbKT (DETR)source domain0.987 ± 0.0030.948 ± 0.005 (0.995 ± 0.001)0.549 ± 0.0260.38 ± 0.013 (0.713 ± 0.006)
ProbKT (RCNN)target domain0.975 ± 0.0030.856 ± 0.039 (0.993 ± 0.001)0.942 ± 0.0090.289 ± 0.041 (0.829 ± 0.054)
ProbKT (RCNN)OOD0.89 ± 0.0220.833 ± 0.042 (0.991 ± 0.001)0.603 ± 0.037n/a*
ProbKT (RCNN)source domain0.995 ± 0.0020.941 ± 0.041 (0.998 ± 0.001)0.96 ± 0.0020.666 ± 0.005 (0.978 ± 0.002)
", + "image_path": "b92c915e5e83b9e7260a3908fa40d0cde20e22714dfe2355770a2bd6ef9f706a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": "Table 2: Results of the experiments for the datasets: CLEVR-mini and Molecules. Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution. *: OOD test set of Molecules dataset has no bounding box labels." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 678, + 230, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 230, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 230, + 690 + ], + "type": "text", + "content": "4.3 EVALUATION METRICS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "content": "We evaluate the performance of the models on the different datasets based on two criteria: the count accuracy and the objects localization performance. The count accuracy measures the ratio of correct images where all individual counts of (all detected) objects are correct. To evaluate how well the" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "model is performing in localizing the different objects in the image we report the mean average precision (mAP) performance, a widely used metric for evaluating object detection models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 423, + 132 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 423, + 132 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 423, + 132 + ], + "type": "text", + "content": "4.4 WEAKLY SUPERVISED KNOWLEDGE TRANSFER WITH CLASS COUNTS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "spans": [ + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "text", + "content": "We first investigate the performance of ProbKT when the weakly supervision consists of class counts only. The query " + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "text", + "content": " for each image then consists of the number of objects from each class in the image. We evaluate the models on the CLEVR-mini and Molecules datasets. For the Molecules dataset, the query for an image containing 6 carbon atoms (C), 6 oxygen atoms (O) and 12 hydrogen atoms (H) would result in the following query: " + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "inline_equation", + "content": "q = ([C,O,H],[6,6,12])" + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "text", + "content": ". These weak labels in the case of the Molecules dataset are widely and easily available in the form of the chemical formula of the molecule on the image (e.g " + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "inline_equation", + "content": "C_6H_{12}O_6" + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "text", + "content": "). The recognition of atomic level entities on images of molecules is a challenge in the field of Optical Chemical Structure Recognition (OCSR) [9; 33; 29; 19]. For the CLEVR-mini dataset, the query for an example image containing 2 spheres, 1 cylinder and 3 cubes would be " + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "inline_equation", + "content": "q = ([\\mathrm{Cube},\\mathrm{Cylinder},\\mathrm{Sphere}],[3,1,2])" + }, + { + "bbox": [ + 104, + 142, + 506, + 263 + ], + "type": "text", + "content": ". Formal descriptions of the queries for each task are presented in Appendix E." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 269, + 506, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 506, + 413 + ], + "type": "text", + "content": "Results of the experiments are summarized in Table 2. We observe on both datasets that ProbKT is able to transfer knowledge from the source domain to the target domain and improve count accuracy on the target domain and in most cases also on the source domain. The count accuracy increases on both the target domain and on OOD, suggesting better generalization performance. This is in contrast with Resnet50-CAM which performs well on the target domain of the Molecules dataset but fails on OOD. We also note a significant improvement in object localization (mAP) for ProbKT on the CLEVR-mini dataset. However, fine-tuning seems detrimental for mAP on the Molecules dataset. This can be explained by the very small bounding boxes in the Molecules dataset. We therefore also report the mAP@IoU=0.5 where we observe some increase in performance after fine-tuning. Lastly, we observe that our approach outperforms WSOD-transfer on all metrics for both datasets. WSOD-transfer performs well on CLEVR-mini but fails for the Molecules dataset. This can be explained by the fact that this method only supports class indicators (whether a class is present in the image), which is particularly detrimental in molecules images containing a lot of objects." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 428, + 293, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 428, + 293, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 293, + 439 + ], + "type": "text", + "content": "4.5 OTHER TYPES OF WEAK SUPERVISION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 449, + 206, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 206, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 206, + 460 + ], + "type": "text", + "content": "4.5.1 CLASS RANGES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 469, + 506, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 506, + 570 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 506, + 570 + ], + "type": "text", + "content": "The annotation of images is a tedious task, which limits the availability of fully annotated datasets. When the number of objects on an image is large, counting the exact number of objects of a particular class becomes too time-consuming. A typical annotation in this case consists oof class ranges where, instead of exact class counts, an interval is given for the count. For example an image from the CLEVR-mini dataset with more than 4 cubes, exactly 4 cylinders and less than 4 spheres would result in the following query: " + }, + { + "bbox": [ + 104, + 469, + 506, + 570 + ], + "type": "inline_equation", + "content": "q = ([\\text{cube}, \\text{cylinder}, \\text{sphere}], [[4, \\infty[, [4, 5[, [0, 4[}})" + }, + { + "bbox": [ + 104, + 469, + 506, + 570 + ], + "type": "text", + "content": ". We evaluate this experimental setup and report results in Table 3. We observe that ProbKT performs significantly better than WSOD-transfer on count accuracy, which still uses only presence/absence labels. We note that Resnet50-CAM is unable to use this type of supervision and is thus reported as " + }, + { + "bbox": [ + 104, + 469, + 506, + 570 + ], + "type": "inline_equation", + "content": "n / a" + }, + { + "bbox": [ + 104, + 469, + 506, + 570 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 106, + 582, + 504, + 680 + ], + "blocks": [ + { + "bbox": [ + 106, + 582, + 504, + 680 + ], + "lines": [ + { + "bbox": [ + 106, + 582, + 504, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 582, + 504, + 680 + ], + "type": "table", + "html": "
ModelData DomainMNIST count acc.MNIST sum acc.MNIST mAP (mAP@IoU=0.5)CLEVR* count acc.CLEVR* mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.044 ± 0.0410.506 ± 0.0630.003 ± 0.003(0.014 ± 0.011)n/an/a
Resnet50-CAMOOD0.01 ± 0.0090.015 ± 0.0040.003 ± 0.002(0.011 ± 0.007)n/an/a
Resnet50-CAMsource domain0.127 ± 0.1320.649 ± 0.1080.005 ± 0.004(0.028 ± 0.018)n/an/a
WSOD-transfertarget domainn/an/an/a0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)
WSOD-transferOODn/an/an/a0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)
WSOD-transfersource domainn/an/an/a0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)
RCNN (pre-trained)target domain0.292 ± 0.0050.298 ± 0.0050.632 ± 0.014 (0.685 ± 0.002)0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)
RCNN (pre-trained)OOD0.205 ± 0.0040.212 ± 0.0040.631 ± 0.013 (0.683 ± 0.002)0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)
RCNN (pre-trained)source domain0.961 ± 0.0080.961 ± 0.0080.917 ± 0.021 (0.988 ± 0.002)0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)
ProbKT (RCNN)target domain0.902 ± 0.0050.903 ± 0.0050.786 ± 0.021 (0.974 ± 0.001)0.971 ± 0.0060.838 ± 0.034 (0.993 ± 0.001)
ProbKT (RCNN)OOD0.863 ± 0.0080.865 ± 0.0080.778 ± 0.021 (0.97 ± 0.001)0.884 ± 0.010.812 ± 0.036 (0.991 ± 0.001)
ProbKT (RCNN)source domain0.967 ± 0.0040.967 ± 0.0040.873 ± 0.016 (0.989 ± 0.001)0.994 ± 0.0010.922 ± 0.035 (0.998 ± 0.001)
", + "image_path": "9d4fed505265ed996e65eb8ef74e410c001db88ab11fd67c0481eca4169f992a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 686, + 504, + 720 + ], + "lines": [ + { + "bbox": [ + 104, + 686, + 504, + 720 + ], + "spans": [ + { + "bbox": [ + 104, + 686, + 504, + 720 + ], + "type": "text", + "content": "Table 3: Results of the experiments on the MNIST object detection dataset and on CLEVR* dataset (*CLEVR uses ranges of class counts as labels instead of exact class counts). Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 224, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 224, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 224, + 94 + ], + "type": "text", + "content": "4.5.2 COMPLEX QUERIES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "text", + "content": "More complex types of weak supervision than the ones considered above are also possible. To illustrate the capabilities of our approach, we build an MNIST object detection dataset where images show multiple digits as objects. Examples images are available in Appendix B. The weak supervision is here the sum of all digits in the image: " + }, + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "inline_equation", + "content": "q = \\mathrm{SUM}(\\mathrm{digits})" + }, + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "text", + "content": ". Our ProbKT can seamlessly integrate this type of supervision as shown in Table 3. As all other baselines are unable process this type of supervision, we compare against a pre-trained RCNN and a variation of Resnet50-CAM where we add an extra neural network layer that sums the individual counts to give the resulting sum. We report count accuracy, mAP and sum accuracy. The sum accuracy measures the ratio of correct images where the predicted sum (instead of the label of the digits) is correct. Details about the results on extra experiments with DETR as backbone using complex types of weak supervision can be found in Appendix D." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 236, + 217, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 217, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 217, + 247 + ], + "type": "text", + "content": "4.6 ABLATION STUDIES" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 266, + 225, + 350 + ], + "blocks": [ + { + "bbox": [ + 106, + 266, + 225, + 350 + ], + "lines": [ + { + "bbox": [ + 106, + 266, + 225, + 350 + ], + "spans": [ + { + "bbox": [ + 106, + 266, + 225, + 350 + ], + "type": "image", + "image_path": "0e773b45cbf7640f37656d4e8b4687dcfb7363cfd1d1ec58084cdbf33c00298d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 354, + 227, + 365 + ], + "lines": [ + { + "bbox": [ + 113, + 354, + 227, + 365 + ], + "spans": [ + { + "bbox": [ + 113, + 354, + 227, + 365 + ], + "type": "text", + "content": "(a) CLEVR iterative relabeling" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 236, + 264, + 358, + 350 + ], + "blocks": [ + { + "bbox": [ + 236, + 264, + 358, + 350 + ], + "lines": [ + { + "bbox": [ + 236, + 264, + 358, + 350 + ], + "spans": [ + { + "bbox": [ + 236, + 264, + 358, + 350 + ], + "type": "image", + "image_path": "d17235340db1e49a74095c49b3c4b80909d704f18f417672a46e1577bd8f7bad.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 371, + 504, + 404 + ], + "lines": [ + { + "bbox": [ + 104, + 371, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 504, + 404 + ], + "type": "text", + "content": "Figure 4: Iterative relabeling performance for the different datasets. Iteration 0: pretrained on source domain. Iteration 1: fine-tuned. Iteration 2: re-labeled and re-trained. Iteration 3: relabeled and re-trained. Iteration 4: relabeled and re-trained." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 371, + 264, + 493, + 350 + ], + "blocks": [ + { + "bbox": [ + 242, + 354, + 364, + 365 + ], + "lines": [ + { + "bbox": [ + 242, + 354, + 364, + 365 + ], + "spans": [ + { + "bbox": [ + 242, + 354, + 364, + 365 + ], + "type": "text", + "content": "(b) Molecules iterative relabeling" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 371, + 264, + 493, + 350 + ], + "lines": [ + { + "bbox": [ + 371, + 264, + 493, + 350 + ], + "spans": [ + { + "bbox": [ + 371, + 264, + 493, + 350 + ], + "type": "image", + "image_path": "01b3ce7dde365f828fc3b5f7694aacef5d650d5f027b39ff3eafe00a70f51d7e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 382, + 354, + 493, + 365 + ], + "lines": [ + { + "bbox": [ + 382, + 354, + 493, + 365 + ], + "spans": [ + { + "bbox": [ + 382, + 354, + 493, + 365 + ], + "type": "text", + "content": "(c) MNIST iterative relabeling" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 426, + 504, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 492 + ], + "type": "text", + "content": "Iterative relabeling. In Figure 4, we plot the evolution of the performance on the test sets after multiple rounds of fine-tuning and re-labeling, as detailed in Section 3.3.1. The final performance reported in the results tables is selected based on best relabeling iteration on the validation dataset. We observe that iterative relabeling after fine-tuning can improve performance significantly. Nevertheless, the benefit of iterative relabeling is less pronounced for DETR on the Molecules dataset. We impute it to the fact that the fine-tuned DETR model is less accurate on this dataset." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 497, + 222, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 497, + 222, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 222, + 509 + ], + "type": "text", + "content": "Object detection backbone" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 514, + 506, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 570 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 570 + ], + "type": "text", + "content": "Our method can seamlessly accommodate different object detection backbones. In Table 2, we present the results for our method with a DETR[7] and a FasterRCNN[34] backbone. We observe that FasterRCNN is typically performing better. In particular, the DETR backbone performs poorly on the Molecules dataset. This could be due to the small objects in the Molecules dataset. Indeed, Carion et al. [7] recommend to use DETR-DC5 or DETR-DC5-R101 for small objects instead." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 586, + 293, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 293, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 293, + 597 + ], + "type": "text", + "content": "5 CONCLUSIONS AND DISCUSSION" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": "Objects detection models are a key component of machine learning deployment in the real world. However, training such models usually requires large amounts of richly annotated images that are often prohibitive for many applications. In this work, we proposed a novel approach to train object detection models by leveraging richly annotated datasets from other domains and allowing arbitrary types of weak supervision on the target domain. Our architecture relies on a probabilistic logical programming engine that efficiently blends the power of symbolic reasoning and deep learning architecture. As such, our model also inherits the current limitations from the probabilistic reasoning implementations, such as higher computational complexity. We proposed several approaches to speed-up the inference process significantly and our work will directly benefit from further advances in this field. Lastly, the versatility of probabilistic programming could help support other related tasks in the future, such as image to graph translation." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Reproducibility Statement Details for reproducing all experiments shown in this work are available in Appendix E. More details on the datasets used in the experiments can be found in Appendix B." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 117, + 201, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 117, + 201, + 128 + ], + "spans": [ + { + "bbox": [ + 105, + 117, + 201, + 128 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 135, + 506, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 135, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 135, + 506, + 268 + ], + "type": "text", + "content": "AA, MO and YM are funded by (1) Research Council KU Leuven: Symbiosis 4 (C14/22/125), Symbiosis3 (C14/18/092); (2) Federated cloud-based Artificial Intelligence-driven platform for liquid biopsy analyses (C3/20/100); (3) CELSA - Active Learning (CELSA/21/019); (4) European Union's Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 956832; (5) Flemish Government (FWO: SBO (S003422N), Elixir Belgium (I002819N), SB and Postdoctoral grants: S003422N, 1SB2721N, 1S98819N, 12Y5623N) and (6) VLAIO PM: Augmenting Therapeutic Effectiveness through Novel Analytics (HBC.2019.2528); (7) YM, AA, EDB, and MO are affiliated to Leuven.AI and received funding from the Flemish Government (AI Research Program). EDB is funded by a FWO-SB grant (S98819N). Computational resources and services used in this work were partly provided by the VSC (Flemish Supercomputer Center), funded by the Research Foundation - Flanders (FWO) and the Flemish Government - department EWI." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 284, + 176, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 284, + 176, + 296 + ], + "spans": [ + { + "bbox": [ + 106, + 284, + 176, + 296 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 302, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 302, + 505, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 302, + 505, + 325 + ], + "spans": [ + { + "bbox": [ + 111, + 302, + 505, + 325 + ], + "type": "text", + "content": "[1] Mnist object detection dataset. URL https://github.com/hukkelas/MNIST-ObjectDetection. accessed on 01.02.2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 331, + 505, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 331, + 505, + 354 + ], + "spans": [ + { + "bbox": [ + 111, + 331, + 505, + 354 + ], + "type": "text", + "content": "[2] Rdkit: Open-source cheminformatics. URL https://www.rdkit.org. accessed on 01.02.2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 360, + 506, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 360, + 506, + 394 + ], + "spans": [ + { + "bbox": [ + 111, + 360, + 506, + 394 + ], + "type": "text", + "content": "[3] Wonho Bae, Junhyug Noh, and Gunhee Kim. Rethinking class activation mapping for weakly supervised object localization. In European Conference on Computer Vision, pages 618-634. Springer, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 399, + 506, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 399, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 111, + 399, + 506, + 456 + ], + "type": "text", + "content": "[4] Aseem Behl, Omid Hosseini Jafari, Siva Karthik Mustikovela, Hassan Abu Alhaija, Carsten Rother, and Andreas Geiger. Bounding boxes, segmentations and object coordinates: How important is recognition for 3d scene flow estimation in autonomous driving scenarios? In Proceedings of the IEEE International Conference on Computer Vision, pages 2574-2583, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 461, + 507, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 461, + 507, + 485 + ], + "spans": [ + { + "bbox": [ + 111, + 461, + 507, + 485 + ], + "type": "text", + "content": "[5] Lukas Biewald. Experiment tracking with weights and biases, 2020. URL https://www.wandb.com/. Software available from wandb.com." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 491, + 506, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 491, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 111, + 491, + 506, + 514 + ], + "type": "text", + "content": "[6] Hakan Bilen and Andrea Vedaldi. Weakly supervised deep detection networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2846-2854, 2016." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 520, + 504, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 520, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 111, + 520, + 504, + 555 + ], + "type": "text", + "content": "[7] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 560, + 504, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 560, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 111, + 560, + 504, + 594 + ], + "type": "text", + "content": "[8] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain adaptive faster r-cnn for object detection in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3339-3348, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 600, + 506, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 600, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 111, + 600, + 506, + 633 + ], + "type": "text", + "content": "[9] Djork-Arné Clevert, Tuan Le, Robin Winter, and Floriane Montanari. Img2mol-accurate smiles recognition from molecular graphical depictions. Chemical science, 12(42):14174-14181, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 639, + 504, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 639, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 639, + 504, + 663 + ], + "type": "text", + "content": "[10] Luc De Raedt and Kristian Kersting. Probabilistic logic learning. ACM SIGKDD Explorations Newsletter, 5(1):31-48, 2003." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 670, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 670, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 106, + 670, + 504, + 692 + ], + "type": "text", + "content": "[11] Luc De Raedt and Angelika Kimmig. Probabilistic (logic) programming concepts. Machine Learning, 100(1):5-47, 2015." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 698, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 698, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 506, + 732 + ], + "type": "text", + "content": "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "[13] Li Deng. The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE signal processing magazine, 29(6):141-142, 2012." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 112, + 506, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 112, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 506, + 146 + ], + "type": "text", + "content": "[14] Thomas Deselaers, Bogdan Alexe, and Vittorio Ferrari. Weakly supervised localization and learning with generic knowledge. International journal of computer vision, 100(3):275-293, 2012." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 504, + 186 + ], + "type": "text", + "content": "[15] M. Everingham, L. Van Gool, C. K. I. Williams, J. Winn, and A. Zisserman. The Pascal visual object classes (voc) challenge. International Journal of Computer Vision, 88(2):303-338, June 2010." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 194, + 504, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 504, + 228 + ], + "type": "text", + "content": "[16] Eleonora Giunchiglia, Mihaela Cătălina Stoian, Salman Khan, Fabio Cuzzolin, and Thomas Lukasiewicz. Road-r: The autonomous driving dataset with logical requirements. arXiv preprint arXiv:2210.01597, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 235, + 504, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 235, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 235, + 504, + 258 + ], + "type": "text", + "content": "[17] Ibtihaal M Hameed, Sadiq H Abdulhussain, and Basheera M Mahmmod. Content-based image retrieval: A review of recent trends. *Cogent Engineering*, 8(1):1927469, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 265, + 505, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 265, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 265, + 505, + 300 + ], + "type": "text", + "content": "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 306, + 506, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 306, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 506, + 341 + ], + "type": "text", + "content": "[19] Rodrigo Hormazabal, Changyoung Park, Soonyoung Lee, Sehui Han, Yeonsik Jo, Jaewan Lee, Ahra Jo, Seung Hwan Kim, Jaegul Choo, Moontae Lee, et al. Cede: A collection of expert-curated datasets with atom-level entity annotations for optical chemical structure recognition." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 347, + 506, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 347, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 347, + 506, + 382 + ], + "type": "text", + "content": "[20] Naoto Inoue, Ryosuke Furuta, Toshihiko Yamasaki, and Kiyoharu Aizawa. Cross-domain weakly-supervised object detection through progressive domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5001-5009, 2018." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 388, + 506, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 388, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 506, + 434 + ], + "type": "text", + "content": "[21] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 440, + 504, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 504, + 485 + ], + "type": "text", + "content": "[22] Taekyung Kim, Minki Jeong, Seunghyeon Kim, Seokeon Choi, and Changick Kim. Diversify and match: A domain adaptive representation learning paradigm for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12456-12465, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 492, + 506, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 506, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 506, + 528 + ], + "type": "text", + "content": "[23] Daphne Koller, Nir Friedman, Sašo Džeroski, Charles Sutton, Andrew McCallum, Avi Pfeffer, Pieter Abbeel, Ming-Fai Wong, Chris Meek, Jennifer Neville, et al. Introduction to statistical relational learning. MIT press, 2007." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 533, + 504, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 504, + 568 + ], + "type": "text", + "content": "[24] Jogendra Nath Kundu, Rahul Mysore Venkatesh, Naveen Venkat, Ambareesh Revanur, and R Venkatesh Babu. Class-incremental domain adaptation. In European Conference on Computer Vision, pages 53-69. Springer, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 574, + 504, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 574, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 504, + 609 + ], + "type": "text", + "content": "[25] Dong Li, Jia-Bin Huang, Yali Li, Shengjin Wang, and Ming-Hsuan Yang. Weakly supervised object localization with progressive domain adaptation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3512-3520, 2016." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 616, + 504, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 504, + 651 + ], + "type": "text", + "content": "[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "type": "text", + "content": "[27] Robin Manhaeve, Sebastijan Dumancic, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Deepproblog: Neural probabilistic logic programming. Advances in Neural Information Processing Systems, 31, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 697, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 506, + 732 + ], + "type": "text", + "content": "[28] Robin Manhaeve, Sebastijan Dumančić, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Neural probabilistic logic programming in deepproblog. Artificial Intelligence, 298: 103504, 2021." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "[29] Martijn Oldenhof, Adam Arany, Yves Moreau, and Jaak Simm. Chemographer: optical graph recognition of chemical compounds by deep learning. Journal of chemical information and modeling, 60(10):4506-4517, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 124, + 504, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 124, + 504, + 158 + ], + "spans": [ + { + "bbox": [ + 107, + 124, + 504, + 158 + ], + "type": "text", + "content": "[30] Martijn Oldenhof, Adam Arany, Yves Moreau, and Jaak Simm. Self-labeling of fully mediating representations by graph alignment. In Benelux Conference on Artificial Intelligence, pages 46-65. Springer, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 167, + 504, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 167, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 504, + 190 + ], + "type": "text", + "content": "[31] Martijn Oldenhof, Ádám Arany, Yves Moreau, and Edward De Brouwer. Updating object detection models with probabilistic programming. 2022. ICML workshop - UpML." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 198, + 504, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 198, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 107, + 198, + 504, + 232 + ], + "type": "text", + "content": "[32] Luc De Raedt, Kristian Kersting, Siraam Natarajan, and David Poole. Statistical relational artificial intelligence: Logic, probability, and computation. Synthesis lectures on artificial intelligence and machine learning, 10(2):1-189, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 239, + 504, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 239, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 239, + 504, + 262 + ], + "type": "text", + "content": "[33] Kohulan Rajan, Achim Zielesny, and Christoph Steinbeck. Decimer: towards deep learning for chemical image recognition. Journal of Cheminformatics, 12(1):1-9, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 270, + 504, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 270, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 107, + 270, + 504, + 304 + ], + "type": "text", + "content": "[34] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 312, + 504, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 312, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 107, + 312, + 504, + 335 + ], + "type": "text", + "content": "[35] Tim Rocktäschel and Sebastian Riedel. End-to-end differentiable proving. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 343, + 506, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 343, + 506, + 377 + ], + "spans": [ + { + "bbox": [ + 107, + 343, + 506, + 377 + ], + "type": "text", + "content": "[36] Noureddin M Sadawi, Alan P Sexton, and Volker Sorge. Chemical structure recognition: a rule-based approach. In Document Recognition and Retrieval XIX, volume 8297, page 82970E. International Society for Optics and Photonics, 2012." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 384, + 504, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 384, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 107, + 384, + 504, + 419 + ], + "type": "text", + "content": "[37] Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada, and Kate Saenko. Strong-weak distribution alignment for adaptive object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6956–6965, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 426, + 506, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 426, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 107, + 426, + 506, + 461 + ], + "type": "text", + "content": "[38] Adam Santoro, David Raposo, David G Barrett, Mateusz Malinowski, Razvan Pascanu, Peter Battaglia, and Timothy Lillicrap. A simple neural network module for relational reasoning. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 469, + 506, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 506, + 503 + ], + "type": "text", + "content": "[39] Feifei Shao, Long Chen, Jian Shao, Wei Ji, Shaoning Xiao, Lu Ye, Yueting Zhuang, and Jun Xiao. Deep learning for weakly-supervised object detection and localization: A survey. Neurocomputing, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 510, + 504, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 510, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 107, + 510, + 504, + 544 + ], + "type": "text", + "content": "[40] Hyun Oh Song, Ross Girshick, Stefanie Jegelka, Julien Mairal, Zaid Harchaoui, and Trevor Darrell. On learning to localize objects with minimal supervision. In International Conference on Machine Learning, pages 1611-1619. PMLR, 2014." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 552, + 504, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 552, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 107, + 552, + 504, + 575 + ], + "type": "text", + "content": "[41] Leon Sterling and Ehud Y Shapiro. The art of Prolog: advanced programming techniques. MIT press, 1994." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 583, + 504, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 583, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 107, + 583, + 504, + 617 + ], + "type": "text", + "content": "[42] Matteo Tomei, Marcella Cornia, Lorenzo Baraldi, and Rita Cucchiara. Art2real: Unfolding the reality of artworks via semantically-aware image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5849-5859, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 625, + 504, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 625, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 107, + 625, + 504, + 659 + ], + "type": "text", + "content": "[43] Jasper Uijlings, Stefan Popov, and Vittorio Ferrari. Revisiting knowledge transfer for training object class detectors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1101-1110, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 667, + 504, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 667, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 107, + 667, + 504, + 690 + ], + "type": "text", + "content": "[44] Thomas Winters, Giuseppe Marra, Robin Manhaeve, and Luc De Raedt. Deepstochlog: Neural stochastic logic programming. arXiv preprint arXiv:2106.12574, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 698, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 506, + 732 + ], + "type": "text", + "content": "[45] Yao Xue, Nilanjan Ray, Judith Hugh, and Gilbert Bigras. Cell counting by regression using convolutional neural network. In European Conference on Computer Vision, pages 274-290. Springer, 2016." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 198 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "type": "text", + "content": "[46] Yuanyi Zhong, Jianfeng Wang, Jian Peng, and Lei Zhang. Boosting weakly supervised object detection with progressive knowledge transfer. In European conference on computer vision, pages 615-631. Springer, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "[47] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2921–2929, 2016." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "text", + "content": "[48] Xinge Zhu, Jiangmiao Pang, Ceyuan Yang, Jianping Shi, and Dahua Lin. Adapting object detectors via selective cross-domain alignment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 687-696, 2019." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 227, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 227, + 92 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 227, + 92 + ], + "type": "text", + "content": "A TRAINING DETAILS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 106, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 106, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 107, + 106, + 504, + 139 + ], + "type": "text", + "content": "For the hyper-parameters the idea was to stay as close as possible to the defaults of the pre-trained standard models although some lightweight tuning was done. In Table 4 a summary is given for the hyper-parameters used for the different models." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 150, + 504, + 254 + ], + "blocks": [ + { + "bbox": [ + 106, + 150, + 504, + 254 + ], + "lines": [ + { + "bbox": [ + 106, + 150, + 504, + 254 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 504, + 254 + ], + "type": "table", + "html": "
Modeldatasetepochslrlr_step_sizelr-gammamomentumbatch sizeweight decayoptimizer
DETR pre-train (retrain)CLEVRmax 1000.00017 (7-8)0.180.0001AdamW
DETR pre-train (retrain)Mols.max 1000.000120 (20)0.180.0001AdamW
DETR pre-train (retrain)MNISTmax 1000.000115-20 (20)0.180.0001AdamW
RCNN pre-train (retrain)all datasetsmax 300.0055 (5)0.10.910.0005SGD
RCNN Finetuneall datasetsmax 200.00116Adam
DETR FinetuneCLEVR/Molsmax 200.00116Adam
DETR FinetuneMNISTmax 200.0116Adam
DETR Finetune*CLEVR/Molsmax 1000.002200.180.0001AdamW
RCNN Finetune*CLEVRmax 200.00115Adam
RCNN Finetune*Molsmax 200.0000115Adam
DETR masked box lossCLEVR/Molsmax 1000.000170.180.0001AdamW
Resnet50-CAM modelsall datasetsmax 5000.00132Adam
", + "image_path": "1a7855b1571a57b9d911c5fb2fe516ac9cfcedac9e5308709342352c503fbf62.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 261, + 504, + 295 + ], + "lines": [ + { + "bbox": [ + 107, + 261, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 107, + 261, + 504, + 295 + ], + "type": "text", + "content": "Table 4: Overview of hyperparameters for the different models, most hyperparamaters are left default from standard models. Tuning was mostly done on learning rate and learning rate scheduling. For every fold/dataset the best epoch/lr/lr_step_size model is used based on validation data." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 107, + 317, + 180, + 329 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 317, + 180, + 329 + ], + "spans": [ + { + "bbox": [ + 107, + 317, + 180, + 329 + ], + "type": "text", + "content": "B DATASETS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 342, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 342, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 107, + 342, + 504, + 407 + ], + "type": "text", + "content": "We evaluate our approach on three different datasets: (1) a CLEVR-mini dataset, (2) a Molecules data set with images of chemical compounds, and (3) an MNIST-based object detection dataset. For each dataset, three subsets, corresponding to different domains, are used: (1) a source domain, (2) a target domain, and (3) an out-of-distribution domain (OOD). Source and target domains are split into 5 folds of train and validation sets and an independent test set. Sizes of the different splits per dataset are summarized in Table 5." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 179, + 79, + 430, + 222 + ], + "blocks": [ + { + "bbox": [ + 179, + 79, + 430, + 222 + ], + "lines": [ + { + "bbox": [ + 179, + 79, + 430, + 222 + ], + "spans": [ + { + "bbox": [ + 179, + 79, + 430, + 222 + ], + "type": "table", + "html": "
DatasetTypeSplitSize (number of samples)
MNIST object detectionSourcetrain700
MNIST object detectionSourcevalidation300
MNIST object detectionSourcetest1000
MNIST object detectionTargettrain700
MNIST object detectionTargetvalidation300
MNIST object detectionTargettest1000
MNIST object detectionOODtest1000
MoleculesSourcetrain1400
MoleculesSourcevalidation600
MoleculesSourcetest1000
MoleculesTargettrain1400
MoleculesTargetvalidation600
MoleculesTargettest1000
MoleculesOODtest1000
", + "image_path": "4654b6cb5b7c0ac89ad6173ef4a47655bb993f5dafcf0132b302ac68988e8906.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 229, + 492, + 242 + ], + "lines": [ + { + "bbox": [ + 118, + 229, + 492, + 242 + ], + "spans": [ + { + "bbox": [ + 118, + 229, + 492, + 242 + ], + "type": "text", + "content": "Table 5: Dataset sizes for the different splits. For train and validations splits 5 folds are used." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 263, + 242, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 242, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 242, + 274 + ], + "type": "text", + "content": "B.0.1 CLEVR-MINI DATASET" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 282, + 506, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 506, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 506, + 448 + ], + "type": "text", + "content": "The CLEVR-mini dataset for our experiments is a selection of samples from the CLEVR dataset [21]. The different types available in the CLEVR dataset are combinations of shapes (cube, sphere, and cylinder), materials (metal and rubber), and sizes (large and small). Colors are ignored as the images are first converted to grayscale before feeding them to the models. For the richly annotated source domain, we randomly select images with only sphere or cylinder-shaped objects (no cubes) and with a maximum of four objects per image and a minimum of three objects. For the weakly annotated target domain we experiment with two type of annotations. Firstly we experiment when we have the class counts of objects in the image available. Secondly, instead of the exact counts of classes in the image the annotations only specify if there is exactly one object class in the image or multiple. The advantage of this kind of labeling is that the annotator does not need to count the objects and instead just make a distinction of only one object class in image or multiple. The images in the target domain can contain all combinations of object types (including cube-shaped objects) and allow a minimum of five objects per image and a maximum of six objects per image. For the OOD dataset we also select images with all possible combinations of object types, always with 10 objects per image. Some example images from the CLEVR-mini dataset can be found in Figure 1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 460, + 236, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 236, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 236, + 471 + ], + "type": "text", + "content": "B.0.2 MOLECULES DATASET" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 479, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 504, + 590 + ], + "type": "text", + "content": "The Molecules dataset contains images depicting chemical compounds. For the richly annotated source domain, a procedure similar as described in Oldenhof et al. [29, 30] was executed using an RDKit [2] fork for generating the bounding box labels for the individual atoms present in the images. In the source domain, we allow the following atom types: carbon (C), hydrogen (H), oxygen (O), and nitrogen (N). In the weakly annotated target domain, we only have the counts of the atoms present which translates to the chemical formula of the molecule in the image (" + }, + { + "bbox": [ + 104, + 479, + 504, + 590 + ], + "type": "inline_equation", + "content": "e.g. C_6H_{12}O_6" + }, + { + "bbox": [ + 104, + 479, + 504, + 590 + ], + "type": "text", + "content": "). The same classes from the source domain (C, H, O, and N) are also present in the target domain as well as an extra atom type: sulfur (S). The OOD test dataset consists of 1000 images from the external UoB dataset [36] where chemical compounds containing only the atom types present in the target domain (C, H, O, N, and S). Some example images from the Molecules dataset are visualized in Figure 5." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 602, + 302, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 302, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 302, + 613 + ], + "type": "text", + "content": "B.0.3 MNIST OBJECT DETECTION DATASET" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 621, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 504, + 732 + ], + "type": "text", + "content": "The MNIST object detection dataset is generated [1] using the original MNIST dataset [13]. Each image consists of three MNIST digits randomly positioned in the image. The MNIST object detection dataset allows experimenting with a more arbitrary type of weak supervision. Each object in this dataset represents a digit that can be aggregated. This allows to label an image with only the sum of all digits in the image instead of the class counts of the objects. For the richly annotated source domain digits 7, 8, and 9 are left out. The weakly annotated target domain has all possible digit classes (0-9). The labels of the target domain only contain the sum of all digits. For the OOD test dataset, images are used that contain maximum of four MNIST digits, instead of three digits as in the other domains. Some example images from the MNIST object detection dataset are visualized in Figure 6." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 79, + 503, + 239 + ], + "blocks": [ + { + "bbox": [ + 110, + 79, + 503, + 239 + ], + "lines": [ + { + "bbox": [ + 110, + 79, + 503, + 239 + ], + "spans": [ + { + "bbox": [ + 110, + 79, + 503, + 239 + ], + "type": "image", + "image_path": "8322a53663ffea603503373d41cb97feee570947a49aa14c27fc4c4d869989ed.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 245, + 506, + 313 + ], + "lines": [ + { + "bbox": [ + 104, + 245, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 245, + 506, + 313 + ], + "type": "text", + "content": "Figure 5: Weakly supervised knowledge transfer with probabilistic logical reasoning (ProbKT). On the left we have source domain where a model can be trained using bounding box information labels, positions) but only on a limited set of atom types (C,H,O,N). In the middle we can see that the pre-trained model is not able to recognize the sulfur (S) from target domain correctly. On the right we see that the model is able to adapt to target domain after probabilistic reasoning using weak labels (e.g. counts of objects on image) and is able to recognize the sulfur (S)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 109, + 346, + 503, + 507 + ], + "blocks": [ + { + "bbox": [ + 109, + 346, + 503, + 507 + ], + "lines": [ + { + "bbox": [ + 109, + 346, + 503, + 507 + ], + "spans": [ + { + "bbox": [ + 109, + 346, + 503, + 507 + ], + "type": "image", + "image_path": "a0b861faf5e883198d7f1afee3d04067d4e837bfd41f6b562175b9ecc4cec360.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 513, + 506, + 581 + ], + "lines": [ + { + "bbox": [ + 104, + 513, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 506, + 581 + ], + "type": "text", + "content": "Figure 6: Weakly supervised knowledge transfer with probabilistic logical reasoning (ProbKT). On the left we have source domain where a model can be trained using bounding box information labels, positions) but only on a limited set of digits (0, 1, 2, 3, 4, 5, 6). In the middle we can see that the pre-trained model is not able to recognize the digit eight (8) from target domain correctly. On the right we see that the model is able to adapt to target domain after probabilistic reasoning using weak labels (e.g. sum of digits on image) and is able to recognize the digit eight (8)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 608, + 398, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 608, + 398, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 398, + 620 + ], + "type": "text", + "content": "C PROBKT AND PROBKT* SUPPLEMENTARY DETAILS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 636, + 222, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 636, + 222, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 222, + 647 + ], + "type": "text", + "content": "C.1 FILTERING SAMPLES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 659, + 506, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 506, + 693 + ], + "type": "text", + "content": "The computation complexity of inference in the probabilistic programming module grows with the number of possible worlds. In turn, the number of possible worlds grows with the number of probabilistic facts " + }, + { + "bbox": [ + 104, + 659, + 506, + 693 + ], + "type": "inline_equation", + "content": "\\hat{n}" + }, + { + "bbox": [ + 104, + 659, + 506, + 693 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": "One avenue to reduce the computational cost of the inference step is then to artificially reduce the number of probabilistic facts in each image. Let " + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\{\\hat{p}_{y,n} : n = 1, \\dots, \\hat{n}\\}" + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": " the corresponding inference query. We compute the filtered set of probabilistic facts " + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\bar{p}_{y,n}" + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": " by setting" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 194, + 97, + 504, + 139 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 97, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 194, + 97, + 504, + 139 + ], + "type": "interline_equation", + "content": "\\bar {p} _ {y, n} ^ {k} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\hat {p} _ {y, n} ^ {k} \\geq \\delta \\\\ 0 & \\text {i f} \\exists k ^ {\\prime} \\text {s . t .} \\hat {p} _ {y, n} ^ {k ^ {\\prime}} \\geq \\delta \\\\ \\hat {p} _ {y, n} ^ {k} & \\text {o t h e r w i s e .} \\end{array} \\quad \\text {a n d} \\quad \\hat {p} _ {y, n} ^ {k} < \\delta \\right. \\tag {2}", + "image_path": "5651d0cbce1e0e37f9706368f6255491a87e03e24798dbbe4466d25da39d11f4.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "text", + "content": "The parameter " + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "inline_equation", + "content": "\\delta \\in [0,1]" + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "text", + "content": " is a threshold at which we consider the probabilistic fact as certain. A probability of 1 or 0 effectively discards the probabilistic fact " + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "inline_equation", + "content": "\\bar{p}_{y,n}" + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "text", + "content": " from the inference procedure. However, we also have to update the inference query " + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "text", + "content": " to reflect this filtration. We write " + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "inline_equation", + "content": "\\bar{q}" + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "text", + "content": " the filtered query " + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 148, + 506, + 194 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "content": "Example To illustrate this filtration strategy let's consider an MNIST image with 3 digits in the image: " + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "inline_equation", + "content": "\\{3,4,7\\}" + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "content": ". The query " + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "content": " corresponds to the class labels in the images. That is " + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "inline_equation", + "content": "q = \\{3,4,7\\}" + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "content": ". The object detection backbones outputs 3 box features with corresponding probabilities " + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "inline_equation", + "content": "\\{\\hat{p}_{y,0},\\hat{p}_{y,1},\\hat{p}_{y,2},\\}" + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "content": ". Now let e.g. " + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "inline_equation", + "content": "\\hat{p}_{y,1}^3 = 0.99" + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "content": ". We can filter out " + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "inline_equation", + "content": "\\hat{p}_{y,1}" + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "content": " (i.e. the prediction for a digit 3 is certain), and compute the filtered query " + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "inline_equation", + "content": "\\bar{q} = \\{4,7\\}" + }, + { + "bbox": [ + 104, + 204, + 506, + 264 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "type": "text", + "content": "Remark Equation 2 suggests a filtering based on the output probabilities only. However, one can also use information about the query for the filtration. For instance, one would only filter out a probabilistic fact if it is consistent with the query " + }, + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "type": "text", + "content": ". In the example above, it would be wiser not to filter out e.g. " + }, + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "type": "inline_equation", + "content": "\\hat{p}_{y,1}^{9} = 0.99" + }, + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "type": "text", + "content": " as no images are supposedly present in the image. One should then ideally propagate this probabilistic fact to the inference module such as to update the weights of the backbone and learn from this error." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 354, + 269, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 269, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 269, + 365 + ], + "type": "text", + "content": "C.2 GRADIENT OF THE LIKELIHOOD" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 374, + 299, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 299, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 299, + 387 + ], + "type": "text", + "content": "The ProbKT likelihood has the following form:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 248, + 404, + 361, + 433 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 404, + 361, + 433 + ], + "spans": [ + { + "bbox": [ + 248, + 404, + 361, + 433 + ], + "type": "interline_equation", + "content": "P _ {\\mathcal {P}} (q) = \\sum_ {\\alpha \\in E _ {q}} \\prod_ {i} \\prod_ {j} \\hat {p} _ {i j} ^ {\\alpha_ {i j}},", + "image_path": "4c50d9170530575173e69c7de9dcae8d39a7abd906b87352280200c5204c89dd.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 442, + 345, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 442, + 345, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 442, + 345, + 455 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 442, + 345, + 455 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 442, + 345, + 455 + ], + "type": "text", + "content": " is a \"possible world\" matrix of indicator variables:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 231, + 474, + 365, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 474, + 365, + 500 + ], + "spans": [ + { + "bbox": [ + 231, + 474, + 365, + 500 + ], + "type": "interline_equation", + "content": "\\alpha_ {i j} = \\left\\{ \\begin{array}{l l} 1 & \\text {o b j e c t i i s o f c l a s s j} \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right.", + "image_path": "613eda88ff0c109a8bfef960e623f2f6d57898fec7651de359dad2aabbdccd30.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "type": "inline_equation", + "content": "E_{q}" + }, + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "type": "text", + "content": " is the set of all possible " + }, + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "type": "text", + "content": " worlds compatible with the logical annotation " + }, + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 505, + 436, + 517 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 520, + 369, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 369, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 369, + 532 + ], + "type": "text", + "content": "Lemma 1. The gradient of the likelihood has the following form:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 245, + 536, + 365, + 566 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 536, + 365, + 566 + ], + "spans": [ + { + "bbox": [ + 245, + 536, + 365, + 566 + ], + "type": "interline_equation", + "content": "\\frac {\\partial P _ {\\mathcal {P}} (q)}{\\partial \\theta} = \\sum_ {i} \\sum_ {j} \\frac {\\partial p _ {i j}}{\\partial \\theta} C _ {i j},", + "image_path": "9b65235aa9a62381df153d8e66192e999ff167b933c982d1b0a86100b72cac1b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 570, + 233, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 570, + 233, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 570, + 233, + 582 + ], + "type": "text", + "content": "where the weight has the form:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 189, + 586, + 419, + 614 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 586, + 419, + 614 + ], + "spans": [ + { + "bbox": [ + 189, + 586, + 419, + 614 + ], + "type": "interline_equation", + "content": "C _ {i j} = P (E | O _ {i} = j) = \\sum_ {\\alpha \\in E | O _ {i = j}} \\prod_ {i ^ {\\prime}} \\prod_ {j ^ {\\prime}} I _ {(i \\neq i ^ {\\prime} \\lor j \\neq j ^ {\\prime})} p _ {i j} ^ {\\alpha_ {i j}}", + "image_path": "6396a297df279e83c745080df3e6169d2e5e03326ca1a38fe94942a4c4f6507f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "type": "text", + "content": "In case of the Hungarian matching the most probable possible word is selected, which corresponds to setting the conditional probability " + }, + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "type": "inline_equation", + "content": "P(E|O_{i} = j)" + }, + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "type": "text", + "content": " to 1 if object " + }, + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "type": "text", + "content": " is paired with label " + }, + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 624, + 506, + 670 + ], + "type": "text", + "content": " and 0 otherwise. The ProbKT gradient can be interpreted as a probability weighted extension of the gradient resulting from the Hungarian matching." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 685, + 204, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 685, + 204, + 696 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 204, + 696 + ], + "type": "text", + "content": "D FULL RESULTS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "In Table 6, we present the full results for the MNIST experiment. We report the count accuracy (i.e., correct identification of the digits in the image), sum accuracy (i.e., correct estimation of the sum of" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 80, + 504, + 256 + ], + "blocks": [ + { + "bbox": [ + 106, + 80, + 504, + 256 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 504, + 256 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 504, + 256 + ], + "type": "table", + "html": "
ModelTypemnist count acc.mnist sum acc.mnist mAP (mAP@IoU=0.5)
Resnet50-CAM (baseline)In-distribution0.044 ± 0.0410.506 ± 0.0630.003 ± 0.003(0.014 ± 0.011)
Resnet50-CAM (baseline)OOD0.01 ± 0.0090.015 ± 0.0040.003 ± 0.002(0.011 ± 0.007)
Resnet50-CAM (baseline)Source Domain0.127 ± 0.1320.649 ± 0.1080.005 ± 0.004(0.028 ± 0.018)
DETR (Pre-trained)In-distribution0.26 ± 0.0120.262 ± 0.010.518 ± 0.014 (0.637 ± 0.017)
DETR (Pre-trained)OOD0.173 ± 0.010.177 ± 0.0090.51 ± 0.012 (0.632 ± 0.015)
DETR (Pre-trained)Source Domain0.859 ± 0.0310.86 ± 0.0310.781 ± 0.009 (0.957 ± 0.008)
DETR (ProbKT)In-distribution0.662 ± 0.0640.664 ± 0.0650.615 ± 0.025 (0.856 ± 0.037)
DETR (ProbKT)OOD0.532 ± 0.0830.533 ± 0.0820.591 ± 0.03 (0.845 ± 0.038)
DETR (ProbKT)source domain0.878 ± 0.0230.879 ± 0.0230.737 ± 0.014 (0.952 ± 0.009)
RCNN (Pre-trained)In-distribution0.292 ± 0.0050.298 ± 0.0050.632 ± 0.014 (0.685 ± 0.002)
RCNN (Pre-trained)OOD0.205 ± 0.0040.212 ± 0.0040.631 ± 0.013 (0.683 ± 0.002)
RCNN (Pre-trained)source domain0.961 ± 0.0080.961 ± 0.0080.917 ± 0.021 (0.988 ± 0.002)
RCNN (ProbKT)In-distribution0.902 ± 0.0050.903 ± 0.0050.786 ± 0.021 (0.974 ± 0.001)
RCNN (ProbKT)OOD0.863 ± 0.0080.865 ± 0.0080.778 ± 0.021 (0.97 ± 0.001)
RCNN (ProbKT)source domain0.967 ± 0.0040.967 ± 0.0040.873 ± 0.016 (0.989 ± 0.001)
", + "image_path": "6f21851f82f09915b75762994ae1f994ccc2b3a5abbe9d170aebc51cf339c8c9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 338, + 504, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 404 + ], + "type": "text", + "content": "the digits in the image) and the mean average precision (mAP) (i.e. a common object detection metric that reflects the ability to predict the positions and labels of the objects). We observe that the Resnet baseline performs poorly, lacking the necessary logic to process this dataset. We used both DETR and RCNN as object detection backbones in our experiments, showing high test accuracies when fine-tuned with our approach. As the results suggest, RCNN backbones lead to better performance than the DETR backbone." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 106, + 443, + 504, + 679 + ], + "blocks": [ + { + "bbox": [ + 104, + 264, + 504, + 286 + ], + "lines": [ + { + "bbox": [ + 104, + 264, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 504, + 286 + ], + "type": "text", + "content": "Table 6: Results of the SUM experiments on the MNIST object detection dataset. Reported test accuracies over the 5 folds." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 443, + 504, + 679 + ], + "lines": [ + { + "bbox": [ + 106, + 443, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 106, + 443, + 504, + 679 + ], + "type": "table", + "html": "
ModelData DomainCLEVR count acc.CLEVR mAP (mAP@IoU=0.5)Mol. count. accMol. mAP (mAP@IoU=0.5)
Resnet50-CAMtarget domain0.97 ± 0.0050.036 ± 0.014 (0.200 ± 0.071)0.978 ± 0.0040.0 ± 0.0 (0 ± 0)
Resnet50-CAMOOD0.831 ± 0.0160.029 ± 0.010 (0.153 ± 0.044)0.0 ± 0.0n/a1
Resnet50-CAMsource domain0.993 ± 0.0030.035 ± 0.019 (0.178 ± 0.084)0.828 ± 0.0210.0 ± 0.0 (0 ± 0)
WSOD-transfertarget domain0.944 ± 0.0040.844 ± 0.005 (0.988 ± 0.001)0.001 ± 0.00.018 ± 0.004 (0.061 ± 0.011)
WSOD-transferOOD0.73 ± 0.0110.79 ± 0.005 (0.969 ± 0.001)0.003 ± 0.002n/a1
WSOD-transfersource domain0.989 ± 0.0010.926 ± 0.001 (0.995 ± 0.0)0.0 ± 0.00.021 ± 0.003 (0.069 ± 0.009)
DETR-jointtarget domain0.159 ± 0.1330.579 ± 0.012 (0.684 ± 0.019)0.357 ± 0.1960.197 ± 0.055 (0.481 ± 0.071)
DETR-jointOOD0.084 ± 0.0390.534 ± 0.012 (0.66 ± 0.012)0.024 ± 0.021n/a1
DETR-jointsource dom.0.923 ± 0.0490.908 ± 0.017 (0.992 ± 0.001)0.232 ± 0.1270.23 ± 0.063 (0.565 ± 0.08)
DETR (Pre-trained)target domain0.0 ± 0.00.498 ± 0.019 (0.533 ± 0.024)0.464 ± 0.0330.314 ± 0.006 (0.542 ± 0.006)
DETR (Pre-trained)OOD0.0 ± 0.00.477 ± 0.013 (0.531 ± 0.021)0.002 ± 0.001n/a1
DETR (Pre-trained)source domain0.97 ± 0.0090.945 ± 0.009 (0.992 ± 0.001)0.581 ± 0.0220.409 ± 0.005 (0.722 ± 0.004)
ProbKT*(DETR)target domain0.949 ± 0.0050.728 ± 0.014 (0.99 ± 0.003)0.589 ± 0.0420.373 ± 0.02 (0.669 ± 0.045)
ProbKT*(DETR)OOD0.741 ± 0.0380.606 ± 0.017 (0.977 ± 0.004)0.008 ± 0.008n/a1
ProbKT*(DETR)source domain0.985 ± 0.0040.937 ± 0.006 (0.995 ± 0.001)0.275 ± 0.0660.371 ± 0.021 (0.649 ± 0.041)
ProbKT(DETR)target domain0.946 ± 0.0140.803 ± 0.011 (0.989 ± 0.006)0.508 ± 0.0270.204 ± 0.02 (0.507 ± 0.014)
ProbKT(DETR)OOD0.726 ± 0.0350.715 ± 0.006 (0.974 ± 0.006)0.004 ± 0.003n/a1
ProbKT(DETR)source domain0.987 ± 0.0030.948 ± 0.005 (0.995 ± 0.001)0.549 ± 0.0260.38 ± 0.013 (0.713 ± 0.006)
RCNN (pre-trained)target domain0.0 ± 0.00.586 ± 0.014 (0.598 ± 0.013)0.592 ± 0.0070.568 ± 0.005 (0.785 ± 0.004)
RCNN (pre-trained)OOD0.0 ± 0.00.582 ± 0.012 (0.603 ± 0.011)0.348 ± 0.036n/a1
RCNN (pre-trained)source domain0.988 ± 0.0020.984 ± 0.01 (0.996 ± 0.0)0.948 ± 0.0040.737 ± 0.005 (0.979 ± 0.0)
ProbKT*(RCNN)target domain0.974 ± 0.0040.855 ± 0.025 (0.994 ± 0.001)0.945 ± 0.0060.24 ± 0.042 (0.788 ± 0.073)
ProbKT*(RCNN)OOD0.901 ± 0.0170.827 ± 0.022 (0.991 ± 0.001)0.592 ± 0.032n/a1
ProbKT*(RCNN)source domain0.993 ± 0.0020.95 ± 0.021 (0.998 ± 0.0)0.96 ± 0.0030.655 ± 0.01 (0.974 ± 0.004)
ProbKT(RCNN)target domain0.975 ± 0.0030.856 ± 0.039 (0.993 ± 0.001)0.942 ± 0.0090.289 ± 0.041 (0.829 ± 0.054)
ProbKT(RCNN)OOD0.89 ± 0.0220.833 ± 0.042 (0.991 ± 0.001)0.603 ± 0.037n/a1
ProbKT(RCNN)source domain0.995 ± 0.0020.941 ± 0.041 (0.998 ± 0.001)0.96 ± 0.0020.666 ± 0.005 (0.978 ± 0.002)
", + "image_path": "a017321583d572f434443597395419482dc156f25ba94864aaa651942cc50b19.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 686, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 104, + 686, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 104, + 686, + 504, + 709 + ], + "type": "text", + "content": "Table 7: Results of the experiments for the datasets: CLEVR-mini and Molecules. Reported test accuracies over the 5 folds. Best method is in bold for each metric and data distribution." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 122, + 717, + 353, + 729 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 717, + 353, + 729 + ], + "spans": [ + { + "bbox": [ + 122, + 717, + 353, + 729 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 122, + 717, + 353, + 729 + ], + "type": "text", + "content": "OOD test set of Molecules dataset has no bounding box labels." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 282, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 282, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 282, + 94 + ], + "type": "text", + "content": "E SOURCE CODE AND DATASETS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 150 + ], + "type": "text", + "content": "The source code and basic instructions are available on https://github.com/molden/ProbKT. The source code integrates features from the Weights & Biases (WandB) platform [5]. Basic features are supported without the need for an account on WandB but to make full use of all features we recommend to create an account." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 156, + 244, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 244, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 244, + 167 + ], + "type": "text", + "content": "Datasets can be downloaded here:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 176, + 504, + 228 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 132, + 176, + 487, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 176, + 487, + 188 + ], + "spans": [ + { + "bbox": [ + 132, + 176, + 487, + 188 + ], + "type": "text", + "content": "- CLEVR-mini dataset https://figshare.com/s/db012765e5a38e14ef9c" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 191, + 475, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 191, + 475, + 202 + ], + "spans": [ + { + "bbox": [ + 132, + 191, + 475, + 202 + ], + "type": "text", + "content": "- Molecules dataset https://figshare.com/s/3dc3508d39bf4cff8c7f" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 206, + 504, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 206, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 132, + 206, + 504, + 228 + ], + "type": "text", + "content": "- MNIST object detection dataset https://figshare.com/s/c760de026f000524db5a" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 238, + 506, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 238, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 238, + 506, + 262 + ], + "type": "text", + "content": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for counting of objects on an image (as on CLEVR-mini dataset):" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 105, + 270, + 526, + 482 + ], + "blocks": [ + { + "bbox": [ + 105, + 270, + 526, + 482 + ], + "lines": [ + { + "bbox": [ + 105, + 270, + 526, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 526, + 482 + ], + "type": "text", + "content": ":- use_module library(lists)). \nnn(mnist_net,[X],Y,[0,1,2,3,4,5,6,7,8,9,10,11]) :: digit(X,Y). \ncount([],X,0). \ncount([X|T],X,Y):- count(T,X,Z), Y is 1+Z. \ncount([X1|T],X,Z):- X1\\=X,count(T,X,Z). \ncountall(List,X,C) :- sort(List,List1), member(X,List1), count(List,X,C). \nroll([],L,L). \nroll([H|T],A,L):- roll(T,[Y|A],L), digit(H,Y). \ncountpart(List,[],[]) \ncountpart(List,[H|T],[F|L]):- countall(List,H,F), countpart(List,T,L). \ncount.objects(X,L,C):- roll(X,[],Result), countpart(Result,L,C)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "prolog" + }, + { + "bbox": [ + 104, + 490, + 506, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 506, + 524 + ], + "type": "text", + "content": "The query " + }, + { + "bbox": [ + 104, + 490, + 506, + 524 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 490, + 506, + 524 + ], + "type": "text", + "content": " in the case of class counts would be count.objects(X, L, C). For example an image " + }, + { + "bbox": [ + 104, + 490, + 506, + 524 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 490, + 506, + 524 + ], + "type": "text", + "content": " with 1 small metal cube and 3 large rubber cylinders would result in the following query: count.objects(X, [small métal Cube, large rubber_cylinder], [1, 3])." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 529, + 506, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 552 + ], + "type": "text", + "content": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for aggregating the digits on an image:" + } + ] + } + ], + "index": 11 + }, + { + "type": "code", + "bbox": [ + 105, + 561, + 443, + 673 + ], + "blocks": [ + { + "bbox": [ + 105, + 561, + 443, + 673 + ], + "lines": [ + { + "bbox": [ + 105, + 561, + 443, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 443, + 673 + ], + "type": "text", + "content": ": - use_module library(lists)). \nnn (mnist_net, [X], Y, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) :: digit(X,Y). \nsum([[], 0). \nsum([X|T], Y) : - sum(T,Z), Y is X+Z. \nroll([[], L, L). \nroll([H|T], A, L) : - roll(T, [Y|A], L), digit(H,Y). \nsum_digits(X,Y) : - roll(X, [], Result), sum(Result,Y)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "code", + "guess_lang": "prolog" + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "text", + "content": "The query " + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "text", + "content": " in the case of sum of digits would be sum_digits(X, Y). For example an image " + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "text", + "content": " with as sum of digits 12 would result in the following query: sum_digits(X, 12)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": "ProbLog script used in the ProbKT Probabilistic logical reasoning framework for taking into account non-exact counts on images" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 104, + 82, + 478, + 457 + ], + "blocks": [ + { + "bbox": [ + 104, + 82, + 478, + 457 + ], + "lines": [ + { + "bbox": [ + 104, + 82, + 478, + 457 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 478, + 457 + ], + "type": "text", + "content": "-- useModule library(lists)) ;\ninn (mnist_net, [X], Y, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) :: digit(X,Y) . . . ..\ncount([ ], X, 0 ) ;\ncount([X|T], X,Y): - count(T,X,Z), Y is 1+Z;\ncount([X|T], X,Z): - X1 " + }, + { + "bbox": [ + 104, + 82, + 478, + 457 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 104, + 82, + 478, + 457 + ], + "type": "text", + "content": " X, count(T,X,Z);\ncount(, X, 0 ) ;\ncount([X|T], X,Y): - count(T,X,Z), Y is 1+Z;\ncount(, X|T], X,Z): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], X,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|T], x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X |7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(,X|7),x,C,A):- count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,A): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), " + }, + { + "bbox": [ + 104, + 82, + 478, + 457 + ], + "type": "inline_equation", + "content": "x\\text{、} C\\text{、} B\\text{、}" + }, + { + "bbox": [ + 104, + 82, + 478, + 457 + ], + "type": "text", + "content": " ): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, X|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount (, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z)\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,X,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - count(T,x,Z);\ncount(, x|7), x,C,B): - call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7), x.C,B); -- call T;\ncount(, x|7),\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- Call\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- call T;\n\t\t-- called\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- called\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T;\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- call T\n\t\t\t-- called\n\t\t\t-- call T\n\t\t\t-- called\n\t\t\t-- called\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S])\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[L],[C],[S] -- roll(x,[Y],[A],[L],[C],[S]\n\t\t\t- roll(x,[Y],[A],[-X] -- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[y,-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y],[-X] [- roll(x,[Y变压] [- roll(x,[Y变压] [- roll(x,[Y变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- roll(x变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [- rollx变压] [-" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 104, + 465, + 504, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 465, + 504, + 511 + ], + "spans": [ + { + "bbox": [ + 104, + 465, + 504, + 511 + ], + "type": "text", + "content": "The query " + }, + { + "bbox": [ + 104, + 465, + 504, + 511 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 465, + 504, + 511 + ], + "type": "text", + "content": " in the case of non exact counts of objects would be range_countobjects(X,L,C,S). For example an image " + }, + { + "bbox": [ + 104, + 465, + 504, + 511 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 465, + 504, + 511 + ], + "type": "text", + "content": " with exactly one metal small cube and multiple rubber large spheres would result in the following query: range_countobjects(X,[s_metal Cube,l_rubber Sphere],[1,1],[0,1])." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 522, + 323, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 323, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 323, + 534 + ], + "type": "text", + "content": "E.1 INFERENCE EXAMPLE FOR MNIST DATASET" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 543, + 506, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 543, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 543, + 506, + 567 + ], + "type": "text", + "content": "To illustrate the inference process let us follow the evaluation of the clause sum([x1, x2], 8), what can result from query sum>digits(X, 8) in case of two visible digit in the image X." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 571, + 296, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 296, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 296, + 583 + ], + "type": "text", + "content": "This clause is true if and only if " + }, + { + "bbox": [ + 105, + 571, + 296, + 583 + ], + "type": "inline_equation", + "content": "X_{1} + X_{2} = 8" + }, + { + "bbox": [ + 105, + 571, + 296, + 583 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 588, + 504, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 588, + 504, + 610 + ], + "spans": [ + { + "bbox": [ + 104, + 588, + 504, + 610 + ], + "type": "text", + "content": "In case of MNIST digits " + }, + { + "bbox": [ + 104, + 588, + 504, + 610 + ], + "type": "inline_equation", + "content": "\\{(0,1,\\dots ,9)\\}" + }, + { + "bbox": [ + 104, + 588, + 504, + 610 + ], + "type": "text", + "content": " enumerating the possible worlds would give the following set:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 239, + 614, + 504, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 614, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 239, + 614, + 504, + 628 + ], + "type": "interline_equation", + "content": "\\{(0, 8), (1, 7), (2, 6), \\dots , (8, 0) \\} \\tag {3}", + "image_path": "3ea9fd70b06049f40bf687021cd39b90d4771e7e4f1340d2405e63dd56b965de.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 638, + 350, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 638, + 350, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 350, + 650 + ], + "type": "text", + "content": "After summing the probability of all possible worlds we get:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 210, + 654, + 504, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 654, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 210, + 654, + 504, + 667 + ], + "type": "interline_equation", + "content": "p _ {1} (0) p _ {2} (8) + p _ {1} (1) p _ {2} (7) + \\dots + p _ {1} (0) p _ {2} (8), \\tag {4}", + "image_path": "0ad8a02aa939bbf51b48b771a8aa8426f2be4bec9104d11815809f2f23f99f36.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "inline_equation", + "content": "p_1" + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "inline_equation", + "content": "p_2" + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "text", + "content": " are the distribution of random variable " + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "inline_equation", + "content": "X_1" + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "inline_equation", + "content": "X_2" + }, + { + "bbox": [ + 104, + 670, + 429, + 684 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 688, + 193, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 688, + 193, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 193, + 700 + ], + "type": "text", + "content": "Or in a general form:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 232, + 703, + 504, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 703, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 232, + 703, + 504, + 730 + ], + "type": "interline_equation", + "content": "p _ {Y} (Y) = \\sum_ {X _ {1}} p _ {1} \\left(X _ {1}\\right) p _ {2} \\left(Y - X _ {1}\\right). \\tag {5}", + "image_path": "274e607c8745a43427c4fdf09e943d4ec80127d9ad91c9a92d9eb3d8dcfe9a8d.jpg" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 82, + 504, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 504, + 116 + ], + "type": "text", + "content": "As expected the distribution of the sum is the convolution of the distributions of the two terms. This observation trivially generalizes to more than two terms. The cost function corresponding to the maximum likelihood estimation is the negative log-likelihood " + }, + { + "bbox": [ + 107, + 82, + 504, + 116 + ], + "type": "inline_equation", + "content": "-\\log (p_{Y}(Y))" + }, + { + "bbox": [ + 107, + 82, + 504, + 116 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_content_list.json b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ff04f332a99fce81a40241bb204a893cdd3f094a --- /dev/null +++ b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_content_list.json @@ -0,0 +1,2229 @@ +[ + { + "type": "text", + "text": "WEAKLY-SUPERVISED HOI DETECTION VIA PRIOR-GUIDED BI-LEVEL REPRESENTATION LEARNING", + "text_level": 1, + "bbox": [ + 171, + 99, + 828, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Bo Wan $^{1,*}$ , Yongfei Liu $^{2*}$ , Desen Zhou $^{2}$ , Tinne Tuytelaars $^{1}$ , Xuming He $^{2,3}$", + "bbox": [ + 179, + 167, + 705, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ KU Leuven, Leuven, Belgium; $^{2}$ ShanghaiTech University, Shanghai, China \n $^{3}$ Shanghai Engineering Research Center of Intelligent Vision and Imaging {bwan, tinne.tuytelaars}@esat.kuleuven.be {liuyf3,zhouds,hexm}@shanghaitech.edu.cn", + "bbox": [ + 183, + 185, + 723, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 260, + 547, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human object interaction (HOI) detection plays a crucial role in human-centric scene understanding and serves as a fundamental building-block for many vision tasks. One generalizable and scalable strategy for HOI detection is to use weak supervision, learning from image-level annotations only. This is inherently challenging due to ambiguous human-object associations, large search space of detecting HOIs and highly noisy training signal. A promising strategy to address those challenges is to exploit knowledge from large-scale pretrained models (e.g., CLIP), but a direct knowledge distillation strategy (Liao et al., 2022) does not perform well on the weakly-supervised setting. In contrast, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge at both image level and HOI instance level, and adopt a self-taught mechanism to prune incorrect human-object associations. Experimental results on HICO-DET and V-COCO show that our method outperforms the previous works by a sizable margin, showing the efficacy of our HOI representation.", + "bbox": [ + 228, + 282, + 769, + 477 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 497, + 336, + 512 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human object interaction detection aims to simultaneously localize the human-object regions in an image and to classify their interactions, which serves as a fundamental building-block in a wide range of tasks in human-centric artificial intelligence, such as human activity recognition (Heilbron et al., 2015; Tina et al., 2021), human motion tracking (Wafae et al., 2019; Nishimura et al., 2021) and anomalous behavior detection (Liu et al., 2018; Pang et al., 2020).", + "bbox": [ + 169, + 522, + 826, + 593 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Usually, HOI detection adopts a supervised learning paradigm (Gupta & Malik, 2015; Chao et al., 2018; Wan et al., 2019; Gao et al., 2020; Zhang et al., 2021c). This requires detailed annotations (i.e. human and object bounding boxes and their interaction types) in the training stage. However, such HOI annotations are expensive to collect and prone to labeling errors. In contrast, it is much easier to acquire image-level descriptions of target scenes. Consequently, a more scalable strategy for HOI detection is to learn from weak annotations at the image level, known as weakly-supervised HOI detection (Zhang et al., 2017). Learning under such weak supervision is particularly challenging mainly due to the lack of accurate visual-semantic associations, large search space of detecting HOIs and highly noisy training signal from only image level supervision.", + "bbox": [ + 169, + 599, + 826, + 726 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Most existing works (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) attempt to tackle the weakly-supervised HOI detection in a Multiple Instance Learning (MIL) framework (Ilse et al., 2018). They first utilize an object detector to generate human-object proposals and then train an interaction classifier with image-level labels as supervision. Despite promising results, these methods suffer from several weaknesses when coping with diverse and fine-grained HOIs. Firstly, they usually rely on visual representations derived from the external object detector, which mainly focus on the semantic concepts of the objects in the scene and hence are insufficient for capturing the concept of fine-grained interactions. Secondly, as the image-level supervision tends to ignore the imbalance in HOI classes, their representation learning is more susceptible to the dataset bias and dominated by frequent interaction classes. Finally, these methods learn the HOI concepts from a candidate set generated by pairing up all the human and object proposals, which is highly noisy and often leads to erroneous human-object associations for many interaction classes.", + "bbox": [ + 169, + 731, + 826, + 898 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal Contribution. Code is available at https://github.com/bobwan1995/Weakly-HOI.", + "bbox": [ + 189, + 909, + 712, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address the aforementioned limitations, we introduce a new weakly-supervised HOI detection strategy. It aims to incorporate the prior knowledge from pretrained foundation models to facilitate the HOI learning. In particular, we propose to integrate CLIP (Radford et al., 2021b), a large-scale vision-language pretrained model. This allows us to exploit the strong generalization capability of the CLIP representation for learning a better HOI representation under weak supervision. Compared to the representations learned by the object detector, the CLIP representations are inherently less object-centric, hence more likely to incorporate also aspects about the human-object interaction, as evidenced by Appendix A. Although a few works have successfully exploited CLIP for supervised HOI detection in the past, experimentally we find they do not perform well in the more challenging weakly-supervised setting (c.f. Appendix.B). We hypothesize this is because they only transfer knowledge at image level, and fail without supervision at the level of human-object pairs.", + "bbox": [ + 169, + 103, + 826, + 257 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge of HOIs at two different levels. First, at the image level, we utilize the visual and linguistic embeddings of the CLIP model to build a global HOI knowledge bank and generate image-level HOI predictions. In addition, for each human-object pair, we enrich the region-based HOI features by the HOI representations in the knowledge bank via a novel attention mechanism. Such a bi-level framework enables us to exploit the image-level supervision more effectively through the shared HOI knowledge bank, and to enhance the interaction feature learning by introducing the visual and text representations of the CLIP model.", + "bbox": [ + 169, + 263, + 826, + 376 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We instantiate our bi-level knowledge integration strategy as a modular deep neural network with a global and local branch. Given the human-object proposals generated by an off-the-shelf object detector, the global branch starts with a backbone network to compute image feature maps, which are used by a subsequent HOI recognition network to predict the image-wise HOI scores. The local branch builds a knowledge transfer network to extract the human-object features and augment them with the CLIP-guided knowledge bank, followed by a pairwise classification network to compute their relatedness and interaction scores1. The relatedness scores are used to prune incorrect human-object associations, which mitigates the issue of noisy proposals. Finally, the outputs of the two branches are fused to generate the final HOI scores.", + "bbox": [ + 169, + 381, + 826, + 507 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To train our HOI detection network with image-level annotations, we first initialize the backbone network and the HOI knowledge bank from the CLIP encoders, and then train the entire model in an end-to-end manner. In particular, we devise a novel multi-task weak supervision loss consisting of three terms: 1) an image-level HOI classification loss for the global branch; 2) an MIL-like loss for the interaction scores predicted by the local branch, which is defined on the aggregate of all the human-object pair predictions; 3) a self-taught classification loss for the relatedness of each human-object pair, which uses the interaction scores from the model itself as supervision.", + "bbox": [ + 169, + 513, + 826, + 612 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We validate our methods on two public benchmarks: HICO-DET (Chao et al., 2018) and V-COCO (Gupta & Malik, 2015). The empirical results and ablative studies show our method consistently achieves state-of-the-art performance on all benchmarks. In summary, our contributions are three-fold: (i) We exploit the CLIP knowledge to build a prior-enriched HOI representation, which is more robust for detecting fine-grained interaction types and under imbalanced data distributions. (ii) We develop a self-taught relatedness classification loss to alleviate the problem of mis-association between human-object pairs. (iii) Our approach achieves state-of-the-art performance on the weakly-supervised HOI detection task on both benchmarks.", + "bbox": [ + 169, + 618, + 828, + 729 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORKS", + "text_level": 1, + "bbox": [ + 171, + 739, + 352, + 755 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "HOI detection: Most works on supervised HOI detection can be categorized in two groups: two-stage and one-stage HOI detection. Two-stage methods first generate a set of human-object proposals with an external object detector, then classify their interactions. They mainly focus on exploring additional human pose information (Wan et al., 2019; Li et al., 2020a; Gupta et al., 2019), pairwise relatedness (Li et al., 2019a; Zhou et al., 2020) or modeling relations between object and human (Gao et al., 2020; Zhang et al., 2021c; Ulutan et al., 2020; Zhou & Chi, 2019), to enhance the HOI representations. One-stage methods predict human & object locations and their interaction types simultaneously in an end-to-end manner, which are currently dominated by transformer-based architectures (Carion et al., 2020; Kim et al., 2022; Dong et al., 2022; Zhang et al., 2021a;b).", + "bbox": [ + 169, + 760, + 828, + 887 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "${}^{1}$ Relatedness indicates whether a human-object pair has a relation, and interaction scores are multi-label scores on the interaction space.", + "bbox": [ + 169, + 897, + 823, + 924 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e651bacab4fc1cd655ae1937f2758bab90cffc62a4962ef321be81b8fb18d4d7.jpg", + "image_caption": [ + "Figure 1: Model Overview: There are four modules in our network: a backbone Network, an HOI recognition network, a knowledge transfer network and a pairwise classification network." + ], + "image_footnote": [], + "bbox": [ + 204, + 102, + 789, + 325 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Supervised methods show superior performance, but require labor-intensive HOI annotations that are infeasible to obtain in many scenarios. Thus, in this work we focus on HOI detection under weak supervision.", + "bbox": [ + 169, + 372, + 823, + 415 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Weakly-supervised HOI detection: Weakly-supervised HOI detection aims to learn instance-level HOIs with only image-level annotations. (Prest et al., 2011) learns a set of binary action classifiers based on detected human-object pairs, where human proposal is obtained from a part-based human detector and object is derived from the relative position with respect to the human. PPR-FCN (Zhang et al., 2017) employs a parallel FCN to perform pair selection and classification. Explainable-HOI (Baldassarre et al., 2020) adopts graph nets to capture relations for better image-level HOI recognition, and uses backward explanation for instance-level HOI detection. MX-HOI (Kumaraswamy et al., 2021) proposes a momentum-independent learning strategy to utilize strong & weak labels simultaneously. AlignFormer (Kilickaya & Smeulders, 2021) proposes an align layer in transformer framework, which utilizes geometric & visual priors to generate pseudo alignments for training. Those methods focus on learning HOIs with advanced network structures or better pseudo alignments. However, they still suffer from noisy human-object associations and ambiguous interaction types. To address those challenges, we exploit prior knowledge from CLIP to build a discriminative HOI representations.", + "bbox": [ + 169, + 422, + 826, + 618 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Knowledge exploitation of pretrained V&L models: Recently, CLIP (Radford et al., 2021a) model has demonstrated strong generalization to various downstream tasks (Ghiasi et al., 2021; Du et al., 2022; Gu et al., 2021). Some works also explore CLIP knowledge in supervised HOI detection, e.g., CATN (Dong et al., 2022) initializes the object query with category-aware semantic information from CLIP text encoder, and GEN-VLTK (Liao et al., 2022) employs image feature distillation and classifier initialization with HOI prompts. However, they only exploit CLIP knowledge at a coarse level and require detailed annotations of human-object pairs. It is non-trivial to extend such strategies to the weak supervision paradigm due to highly noisy training signals. In our work, we build a deep connection between CLIP and HOI representation by incorporating the prior knowledge of HOIs at both image and HOI instance levels.", + "bbox": [ + 169, + 626, + 826, + 767 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 METHOD", + "text_level": 1, + "bbox": [ + 171, + 780, + 282, + 794 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 PROBLEM SETUP AND METHOD OVERVIEW", + "text_level": 1, + "bbox": [ + 171, + 804, + 509, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Problem setup Given an input image $I$ , the task of weakly-supervised HOI detection aims to localize and recognize the human-object interactions, while only the corresponding image-level HOI categories are available for training. Formally, we aim to learn a HOI detector $\\mathcal{M}$ , which takes an image $I$ as input and generates a set of tuples $\\mathcal{O} = \\{(\\mathbf{x}_h,\\mathbf{x}_o,c_o,a_{h,o},R_{h,o}^a)\\}$ , i.e., $\\mathcal{O} = \\mathcal{M}(I)$ . Here each tuple indicates a HOI instance, in which $\\mathbf{x}_h,\\mathbf{x}_o\\in \\mathbb{R}^4$ represent human and object bounding boxes, $c_{o}\\in \\{1,\\dots,C\\}$ is the object category, $a_{h,o}\\in \\{1,\\dots,A\\}$ denotes the interaction class associated with $\\mathbf{x}_h$ and $\\mathbf{x}_o$ , and $R_{h,o}^{a}\\in \\mathbb{R}$ is the HOI score. For the weakly-supervised setting,", + "bbox": [ + 169, + 823, + 826, + 926 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "each training image is annotated with a set of HOI categories $\\mathcal{R} = \\{r^{*}\\}$ at the image level only, where $r^{*} \\in \\{1, \\dots, N\\}$ is an index to a combination of ground-truth object category $c^{*}$ and interaction category $a^{*}$ , and $N$ denotes the number of all possible HOI combinations defined on the dataset.", + "bbox": [ + 169, + 103, + 826, + 147 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Method Overview As we lack supervision for the HOI locations, we adopt a typical hypothesize-and-recognize strategy (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) for HOI detection: first we generate a set of human and object proposals with an off-the-shelf object detector (Ren et al., 2015) and then predict the interaction class for all human-object combinations.", + "bbox": [ + 169, + 148, + 826, + 207 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike other methods, we do not re-use the feature maps of the object or human detector - we only keep the bounding boxes. Instead, we learn a new representation optimized for the HOI task. This is challenging under the weak setting as the model learning is noisy, but feasible by leveraging the rich semantic knowledge from a pretrained large-scale multimodal model, like CLIP. However, the naive knowledge integration strategies for supervised setting fail when directly applied in the weak setting, as evidenced by our experiments in Appendix.B", + "bbox": [ + 169, + 212, + 826, + 297 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our framework adopts two philosophies to address the challenges in the weakly-supervised HOI task: the first is to integrate the prior knowledge into discriminative representation learning, and the second is to suppress noise in learning. For the first philosophy, we utilize the prior knowledge from CLIP to guide the representation learning in both global image-level and fine-grained human-object pairs, which is instantiated by a bi-level knowledge integration strategy. For the second philosophy, we adopt an effective self-taught learning mechanism to suppress the irrelevant pairs.", + "bbox": [ + 169, + 303, + 826, + 388 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We instantiate the bi-level knowledge integration strategy with a two-branch deep network. Our detection pipeline starts with a set of human proposals with detection scores $\\{(\\mathbf{x}_h, s_h)\\}$ , and object proposals with their categories and detection scores $\\{(\\mathbf{x}_o, c_o, s_o)\\}$ . Then, the global branch performs image-level HOI recognition by utilizing a CLIP-initialized HOI knowledge bank as a classifier. This allows us to exploit both visual and text encoders from CLIP to generate better HOI representations. In parallel, for each human-object pair $(\\mathbf{x}_h, \\mathbf{x}_o)$ , the local branch explicitly augments the pairwise HOI features with the HOI knowledge bank to then identify their relatedness and interaction classes.", + "bbox": [ + 169, + 393, + 826, + 492 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To train our model, we use a multi-task loss, which incorporates a HOI recognition loss defined on image-wise HOIs for the visual encoder and knowledge bank finetuning, and a self-taught relatedness classification for suppressing the background human-object associations, on top of the standard MIL-based loss. We first present model details in Sec.3.2, followed by the training strategy in Sec.3.3.", + "bbox": [ + 169, + 498, + 826, + 555 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 MODEL DESIGN", + "text_level": 1, + "bbox": [ + 171, + 559, + 328, + 573 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Now we introduce our bi-level knowledge integration strategy, where the aim is to exploit CLIP textual embeddings of HOI labels as a HOI knowledge bank for the HOI representation learning, and to transfer such knowledge both at image level as well as at the level of human-object pairs for interaction predictions. Specifically, as shown in Fig. 1, our network consists of a global branch and a local branch. The global branch includes a backbone network (Sec.3.2.1) that extracts image features, and a HOI recognition network (Sec.3.2.2) that uses a HOI knowledge bank based on CLIP to predict image-level HOI scores. For each human-object proposal generated by an off-the-shelf object detector, the local branch employs a knowledge transfer network (Sec.3.2.3) to compute its feature representation with enhancement from the HOI knowledge bank, and a pairwise classification network (Sec.3.2.4) to compute their relatedness and interaction scores. Finally, we generate the final HOI detection scores by combining global HOI scores with local predictions (Sec. 3.2.5).", + "bbox": [ + 169, + 578, + 826, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "HOI Knowledge Bank Generation CLIP builds a powerful vision-language model by pretraining on large-scale image-text pairs. It consists of a visual encoder $\\mathcal{F}_V$ and textual encoder $\\mathcal{F}_T$ , mapping both visual and textual inputs to a shared latent space. Here, we exploit CLIP to generate a HOI knowledge bank. We take a similar prompt strategy as in CLIP, adopting a common template 'a person {verb} a/an {object}' to convert HOI labels into text prompts (e.g., converting 'drive car' to 'a person driving a car'). Then we input the sentences into the CLIP textual encoder $\\mathcal{F}_T$ to initialize the HOI knowledge bank $\\mathcal{W}_T \\in \\mathbb{R}^{N \\cdot D}$ , with $D$ denoting the feature dimension. One can think of $\\mathcal{W}_T$ as a set of 'prototypes' in feature space, one for each HOI in the dataset.", + "bbox": [ + 169, + 738, + 826, + 851 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 GLOBAL BRANCH: BACKBONE NETWORK", + "text_level": 1, + "bbox": [ + 171, + 861, + 519, + 875 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To incorporate CLIP for feature extraction, we initialize the backbone network (e.g., a ResNet-101 (He et al., 2016)) with CLIP's visual encoder $\\mathcal{F}_V$ to generate a feature map $\\Gamma$ for the input image $I$ . We further compute a global feature vector $v_{g} \\in \\mathbb{R}^{D}$ with self-attention operation (Radford et al., 2021b).", + "bbox": [ + 169, + 881, + 826, + 926 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f3349fb8251160198ddf86acc80f35bdf49add88f16e617e0264756fb346c105.jpg", + "image_caption": [ + "(a) knowledge transfer network" + ], + "image_footnote": [], + "bbox": [ + 222, + 103, + 506, + 233 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/43ccc2a33ded6bf714fc9c749ae5107721836e3e4420f610ab7a8b7c1b4c5370.jpg", + "image_caption": [ + "(b) pseudo relatedness label generation", + "Figure 2: The knowledge transfer network explicitly transfers the discriminative relation-level semantic knowledge derived from CLIP to the pairwise HOI representations. Pseudo relatedness label generation uses the pairwise interaction scores to generate the pseudo association labels for self-taught relatedness classification" + ], + "image_footnote": [], + "bbox": [ + 509, + 103, + 764, + 233 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.2 GLOBAL BRANCH: HOI RECOGNITION NETWORK", + "text_level": 1, + "bbox": [ + 171, + 309, + 575, + 321 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We perform an image-wise HOI recognition task with the HOI knowledge bank $\\mathcal{W}_T$ . We obtain global HOI scores $s_g \\in \\mathbb{R}^N$ by computing the inner product between the image feature $v_g$ and the knowledge bank $\\mathcal{W}_T$ : $s_g = \\mathcal{W}_T \\times v_g$ , where $\\times$ is matrix multiplication. This has the effect of adapting the visual encoder and knowledge bank parameters to the HOI recognition task, fully taking advantage of the knowledge from CLIP.", + "bbox": [ + 169, + 330, + 823, + 401 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.3 LOCAL BRANCH: KNOWLEDGE TRANSFER NETWORK", + "text_level": 1, + "bbox": [ + 171, + 414, + 599, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given the CLIP-initialized visual encoder, a standard HOI representation can be formed by concatenating the human and object appearance features along with their spatial encoding. However, even after the finetuning as described above, such a representation still mainly focuses on object-level semantic cues rather than relation-level concepts. In this module, we explicitly exploit the HOI knowledge bank $\\mathcal{W}_T$ to learn a local relation-specific HOI representation. To achieve this, we propose an attention-based architecture as shown in Fig.2(a).", + "bbox": [ + 169, + 434, + 823, + 518 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, for each human proposal $\\mathbf{x}_h$ and object proposal $\\mathbf{x}_o$ , we use RoI-Align (He et al., 2017) to crop the feature maps from $\\Gamma$ followed by a self-attention operation to compute their appearance features $v_h, v_o \\in \\mathbb{R}^D$ . Then we compute a spatial feature $v_{sp}$ by encoding the relative positions of their bounding boxes $(\\mathbf{x}_h, \\mathbf{x}_o)^2$ . The holistic HOI representation $v_p \\in \\mathbb{R}^D$ is an embedding of the human and object appearance features and their spatial feature, i.e., $v_p = \\mathcal{F}_E([v_h; v_o; v_{sp}])$ , where $[\\cdot]$ is the concatenation operation and $\\mathcal{F}_E$ is a multi-layer perceptron (MLP).", + "bbox": [ + 169, + 525, + 823, + 611 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To enhance relation-level concepts, we further compute its union region $\\mathbf{x}_u\\in \\mathbb{R}^4$ (see Fig. 2a) and extract the corresponding appearance feature $v_{u}\\in \\mathbb{R}^{D}$ via RoI-align over the feature map $\\Gamma$ . The union region is important as it encodes relational context cues, but it potentially also contains a large amount of background that is noisy for model learning. We thus devise an attention module that is similar in design to the HOI recognition network, but uses the union feature $v_{u}$ as query to extract a meta-embedding $v_{meta}\\in \\mathbb{R}^{D}$ from the HOI knowledge bank $\\mathcal{W}_T$ . The final HOI representation $\\hat{v}_p\\in \\mathbb{R}^D$ is built by fusing the holistic representation $v_{p}$ and $v_{meta}$ with a MLP $\\mathcal{F}_K$ .", + "bbox": [ + 169, + 617, + 823, + 715 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha = \\operatorname {S o f t m a x} \\left(\\mathcal {W} _ {T} \\times v _ {u}\\right); \\quad v _ {\\text {m e t a}} = \\alpha^ {\\intercal} \\times \\mathcal {W} _ {T}; \\quad \\hat {v} _ {p} = \\mathcal {F} _ {K} \\left(v _ {p} + v _ {\\text {m e t a}}\\right). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 243, + 724, + 823, + 741 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here $\\alpha \\in \\mathbb{R}^N$ is the normalized attention weight and $\\tau$ is the transpose operation. $v_{meta}$ encodes a discriminative representation from CLIP and facilitates feature sharing between HOI classes.", + "bbox": [ + 169, + 750, + 823, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.4 LOCAL BRANCH: PAIRWISE CLASSIFICATION NETWORK", + "text_level": 1, + "bbox": [ + 171, + 791, + 617, + 805 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given the relation-aware HOI representation $\\hat{v}_p$ , our final module performs a coarse-level classification on human-object association and a fine-level classification for interaction recognition. Specifically, we use two MLPs $\\mathcal{F}_P$ and $\\mathcal{F}_B$ to predict the interaction scores $s_p \\in \\mathbb{R}^A$ and the relatedness score $s_b \\in \\mathbb{R}$ for each human-object pair:", + "bbox": [ + 169, + 816, + 826, + 873 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ns _ {p} = \\mathcal {F} _ {P} (\\hat {v} _ {p}); \\quad s _ {b} = \\mathcal {F} _ {B} (\\hat {v} _ {p}) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 881, + 823, + 898 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2For details c.f. the appendix C", + "bbox": [ + 191, + 909, + 382, + 922 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To train the model under weak supervision (see Sec. 3.3), we further aggregate the pairwise interaction scores into image-level interaction scores. Assume we have $M$ pairs of human-object proposals for a given image, and denote the interaction scores for the $m$ -th pair as $s_p^m$ . We first concatenate all the interaction scores to compose a bag $S = [s_p^1; \\ldots; s_p^M] \\in \\mathbb{R}^{M \\cdot A}$ , then we maximize over all pairs to obtain the image-wise interaction scores: $\\tilde{s}_p = \\max_m S$ .", + "bbox": [ + 169, + 103, + 826, + 184 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2.5 MODEL INFERENCE", + "text_level": 1, + "bbox": [ + 171, + 186, + 369, + 200 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "During model inference, we do not use the local interaction scores $s_p$ directly. Instead, we normalize $S$ with a Softmax operation defined on all pairs: $\\bar{S} = \\text{Softmax}(S)$ , and then compute the normalized", + "bbox": [ + 169, + 207, + 826, + 246 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "pairwise interaction scores $e_p = \\sigma(\\tilde{s}_p) \\cdot \\bar{s}_p$ , where $\\bar{s}_p$ is a row from $\\bar{S}$ and $\\sigma$ is Sigmoid function. This has the effect of measuring the contribution of a given pair, in case of multiple pairs in an image share the same interaction.", + "bbox": [ + 169, + 246, + 826, + 287 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The final interaction score $s_{h,o}^{a}$ for human-object pair $(\\mathbf{x}_h,\\mathbf{x}_o)$ combines multiple scores, including the global HOI scores $s_g$ , the normalized pairwise interaction scores $e_p$ , and the relatedness score $s_b$ . The overall HOI score $R_{h,o}^{a}$ is a combination of the interaction score and the object detection scores.", + "bbox": [ + 169, + 295, + 826, + 339 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\ns _ {h, o} ^ {a} = \\sigma \\left(s _ {g} ^ {a, c _ {o}}\\right) \\cdot e _ {p} ^ {a} \\cdot \\sigma \\left(s _ {b}\\right); \\quad R _ {h, o} ^ {a} = \\left(s _ {h} \\cdot s _ {o}\\right) ^ {\\gamma} \\cdot s _ {h, o} ^ {a} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 347, + 825, + 367 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $s_g^{a,c_o}$ is the HOI score corresponding to $a$ -th interaction and $c_o$ -th object category in $s_g$ , $e_p^a$ is the score of $a$ -th interaction in $e_p$ , and $\\gamma$ is a hyper-parameter to balance the scores (Zhang et al., 2021c; Li et al., 2019b).", + "bbox": [ + 169, + 368, + 826, + 411 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 LEARNING WITH WEAK SUPERVISION", + "text_level": 1, + "bbox": [ + 171, + 417, + 480, + 431 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To train our deep network in a weakly supervised setting, we use a multi-task loss defined on three different levels. Specifically, our overall loss function $\\mathcal{L}$ consists of three terms: i) an image-wise HOI recognition loss $\\mathcal{L}_g$ to adapt CLIP features to the task of human-object interaction detection; ii) a pairwise interaction classification loss $\\mathcal{L}_p$ to guide the knowledge transfer towards fine-grained relation-aware representations; and iii) a self-taught relatedness classification loss $\\mathcal{L}_b$ to prune non-interacting human-object combinations. Formally, the overall loss is written as:", + "bbox": [ + 169, + 441, + 826, + 526 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {g} + \\mathcal {L} _ {p} + \\mathcal {L} _ {b} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 429, + 532, + 825, + 549 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Image-wise HOI recognition loss $\\mathcal{L}_g$ : Given the HOI scores $s_g$ and ground-truth HOI categories $\\mathcal{R}$ , $\\mathcal{L}_g$ is a standard binary cross-entropy loss for multi-label classification: $\\mathcal{L}_g = L_{BCE}(s_g, \\mathcal{R})$ .", + "bbox": [ + 169, + 549, + 823, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Pairwise interaction classification loss $\\mathcal{L}_p$ : We adopt a MIL strategy that first aggregates the pairwise interaction scores and supervises this with image-level interaction labels as $\\mathcal{A} = \\{a^*\\}$ . Given the image-wise interaction scores $\\tilde{s}_p$ , $\\mathcal{L}_p$ is a standard binary cross-entropy loss for multi-label classification as: $\\mathcal{L}_p = L_{BCE}(\\tilde{s}_p, \\mathcal{A})$ .", + "bbox": [ + 169, + 585, + 826, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Self-taught relatedness classification loss $\\mathcal{L}_b$ : As human-object associations are not annotated, we devise a novel pseudo relatedness label generation mechanism for training a self-taught binary classifier to identify valid human-object associations. Specifically, we observe that the human-object pairs with confident interaction scores are often associated after a short period of initial training without self-taught classification loss. Motivated by this, we use the interaction scores $s_p$ from the model under training to supervise the relatedness classification.", + "bbox": [ + 169, + 648, + 826, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Concretely, we generate pseudo labels $\\mathcal{B} = \\{b_1,\\dots,b_M\\}$ for all human-object pairs in an image, where $b_{m}\\in \\{0,1\\}$ indicates the relatedness for the $m$ -th combination. To this end, as illustrated in Fig.2(b), we first propose a binary mask $Z\\in \\{0,1\\}^{M\\cdot A}$ for all interaction scores $S$ with respect to the ground-truth object categories $\\mathcal{C} = \\{c^*\\}$ . For each human-object pair where the object label $c_{o}$ is included in $\\mathcal{C}$ , we consider it as a potential interactive combination and thus assign the corresponding row in $Z$ as 1, and other rows as 0. For the latter, we also immediately set $b_{m} = 0$ . Then we generate pairwise scores $t^a\\in \\mathbb{R}^M$ for each ground-truth interaction $a^*$ by selecting the corresponding row from $S\\odot Z$ . The pseudo label for the pair with the highest score is assigned as 1, i.e., $m_a = \\arg \\max_{m}t^a$ and $b_{m_a} = 1$ . We only select one positive pair3 for each $a^*$ . Finally, $\\mathcal{L}_b$ is defined as a binary cross-entropy loss: $\\mathcal{L}_b = \\sum_m L_{BCE}(s_b^m,b_m)$ , where $s_b^m$ is the relatedness score for the $m$ -th pair.", + "bbox": [ + 169, + 739, + 828, + 909 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3We also explore top-K selection in Appendix F", + "bbox": [ + 191, + 909, + 478, + 924 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/1c8c230f9f7169d54dff5b1d6823b1233510ef4f79100b811857bb40bd219d38.jpg", + "table_caption": [ + "Table 1: mAP comparison on HICO-DET and V-COCO test set. - denotes the results are not available. * stands for the method we re-evaluate with the correct evaluation protocol (see Appendix.I for details) and †means our re-implementation. For V-COCO, all object detectors are pretrained on MSCOCO dataset by default, and details about the evaluation metrics APS1&2 c.f. Appendix H. IN-1K denotes ImageNet with 1000 classes." + ], + "table_footnote": [], + "table_body": "
MethodsBackboneDetectorHICO-DET (%)V-COCO (%)
FullRareNon-RareAProleAProle
supervised
iCAN (Gao et al., 2018)RN50 (IN-1K&COCO)FRCNN (COCO)14.8410.4516.1545.3052.40
PMFNet (Wan et al., 2019)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.4615.5618.0052.00-
TIN (Li et al., 2019b)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.2213.5118.3247.8054.20
DJ-RN (Li et al., 2020a)RN50 (IN-1K&COCO)FRCNN (COCO)21.3418.5321.1853.3060.30
IDN (Li et al., 2020b)RN50 (IN-1K&COCO)FRCNN (HICO-DET)26.2922.6127.3953.3060.30
SCG (Zhang et al., 2021c)RN50-FPN (IN-1K&HICO-DET)FRCNN (HICO-DET)31.3324.7233.3154.2060.90
HOTR (Kim et al., 2021)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)25.1017.3427.4255.2064.40
QPIC (Tamura et al., 2021)RN101+Transformer (IN-1K&COCO)DETR (COCO)29.9023.9231.6958.3060.70
CATN (Dong et al., 2022)RN50+Transformer (IN-1K&HICO-DET&COCO)DETR (HICO-DET)31.8625.1533.8460.10-
MSTR (Kim et al., 2022)RN50 + Transformer (IN-1K&COCO)DETR(HICO-DET)31.1725.3133.9262.0065.20
DisTr (Zhou et al., 2022)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)31.7527.4533.0366.2068.50
SSRT (Iftekhar et al., 2022)R101+Transformer (IN-1K&COCO)DETR (COCO)31.3424.3133.3265.0067.10
GEN-VLKT (Liao et al., 2022)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)34.9531.1836.0863.5865.93
between supervised & weakly-supervised setting, learning with image-level HOIs and box annotations
AlignFormer (Kilickaya & Smeulders, 2021)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)20.8518.2321.6415.8216.34
weakly-supervised
Explanation-HOI* (Baldassarre et al., 2020)ResNeXt101 (IN-1K&COCO)FRCNN (COCO)10.638.7111.20--
MX-HOI (Kumaraswamy et al., 2021)RN101 (IN-1K&COCO)FRCNN (COCO)16.1412.0617.50--
PPR-FCN† (Zhang et al., 2017)RN50 (CLIP dataset)FRCNN (COCO)17.5515.6918.41--
oursRN50 (CLIP dataset)FRCNN (COCO)22.8922.4123.0342.9748.06
oursRN101 (CLIP dataset)FRCNN (COCO)25.7024.5226.0544.7449.97
", + "bbox": [ + 173, + 164, + 823, + 353 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 368, + 328, + 382 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 EXPERIMENTAL SETUP", + "text_level": 1, + "bbox": [ + 171, + 395, + 375, + 409 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Datasets: We benchmark our model on two public datasets: HICO-DET and V-COCO. HICO-DET consists of 47776 images (38118 for training and 9658 for test). It has $N = 600$ HOI categories, which are composed of $C = 80$ common objects (the same as MSCOCO (Lin et al., 2014)) and $A = 117$ unique interaction categories. V-COCO is a subset of MSCOCO, consisting of 2533 images for training, 2867 for validation and 4946 for test. It has 16199 human instances, each annotated with binary labels for $A = 26$ interaction categories.", + "bbox": [ + 169, + 417, + 826, + 503 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation Metric: Following (Chao et al., 2015), we use mean average precision (mAP) to evaluate HOI detection performance. A human-object pair is considered as positive when both predicted human and object boxes have at least 0.5 IoU with their ground-truth boxes, and the HOI class is classified correctly.", + "bbox": [ + 169, + 508, + 823, + 566 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 575, + 408, + 589 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We use an off-the-shelf Faster R-CNN (Ren et al., 2015) pretrained on MSCOCO to generate at most 100 object candidates for each image. For V-COCO, it is worth noting that we train the object detector by removing the images in MSCOCO that overlap with V-COCO to prevent information leakage. The backbone network is initialized with the visual encoder from CLIP-RN101 model and the feature dimension $D = 1024$ .", + "bbox": [ + 169, + 595, + 823, + 666 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For model learning, we set the detection score weight $\\gamma = 2.8$ as default by following previous works (Zhang et al., 2021c; Li et al., 2019b), then optimize the entire network with AdamW and an initial learning rate of 1e-5 for backbone parameters and 1e-4 for others. We detach the parameters of the knowledge bank on the local branch for better model learning. We train up to 60K iterations with batch-size 24 in each on 4 NVIDIA 2080TI GPUs, and decay the learning rate by 10 times in 12K and 24K iteration.", + "bbox": [ + 169, + 672, + 826, + 756 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 QUANTITATIVE RESULTS", + "text_level": 1, + "bbox": [ + 171, + 760, + 388, + 773 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For HICO-DET (Tab.1), our approach outperforms the previous state of the arts on the weakly supervised setting by a clear margin, achieving 22.89 mAP with ResNet-50 and 25.70 mAP with ResNet-101 as backbone. For a fair comparison, we also re-implement PPR-FCN with CLIP visual encoder. The results show that we still outperform PPR-FCN by a sizeable margin, which validates the superiority of our framework. Besides, we even perform comparably with HOTR and IDN under an inferior experimental setting where HOTR adopts a more advanced transformer encoder-decoder architecture, and both methods are trained with strong supervision. Furthermore, the mAP gap between Rare (training annotations $< 10$ ) and Non-rare HOI classes in our results is much smaller than other methods, demonstrating the superior generalization capability of our HOI representation for solving the long-tailed distribution issue. In detail, we achieve a 0.62 mAP gap with ResNet-50", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/597049b964652d7583c17bbd2d3f5428634b215445100d84dfdc4765fda6c5f7.jpg", + "table_caption": [ + "Table 2: Ablation study on HICO-DET dataset. \"RN50-FPN(COCO)\" denotes the backbone initialized with Faster R-CNN parameters pretrained on MSCOCO dataset while \"CLIP RN50\" stands for the backbone initialized with CLIP visual encoder. Besides, we construct the knowledge bank $\\mathcal{W}_T$ with random initialization, or computing HOI prompts by RoBERTa or CLIP text transformer." + ], + "table_footnote": [], + "table_body": "
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
baselineCLIP RN50-----19.5216.5820.40
Exp 1CLIP RN50CLIP Text---20.3118.3420.90
Exp 2CLIP RN50CLIP Text✓ (freeze WT)---20.0918.2320.64
Exp 3CLIP RN50CLIP Text--20.8618.4021.60
Exp 4CLIP RN50CLIP Text-22.4020.7022.90
Exp 5CLIP RN50----19.8817.4520.61
Exp 6CLIP RN50CLIP Text--20.7519.3821.16
Exp 7CLIP RN50CLIP Text-21.5320.0521.97
oursCLIP RN50CLIP Text22.8922.4123.03
Exp 8RN50-FPN (COCO)-----19.4416.2020.41
Exp 9RN50-FPN (COCO)random19.6115.5720.82
Exp 10RN50-FPN (COCO)RoBERTa20.4516.4621.65
", + "bbox": [ + 205, + 164, + 792, + 297 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and 1.53 with ResNet-101 backbone, which is much smaller than AlignFormer (3.14) and PPR-FCN (2.64), and supervised methods SSRT (9.01) and GEN-VLKT (4.9).", + "bbox": [ + 169, + 324, + 823, + 354 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For V-COCO dataset, we report the performance of $\\mathrm{AP}_{role}$ in both scenario1 and scenario2 for a complete comparison, which are 42.97 / 48.06 $\\mathrm{AP}_{role}$ with ResNet-50 and 44.74 / 49.97 $\\mathrm{AP}_{role}$ with ResNet-101 as backbone. As shown in Tab.1, our model achieves significant improvement compared with AlignFormer, and even is comparable with supervised methods TIN and iCAN.", + "bbox": [ + 169, + 359, + 826, + 417 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 ABLATION STUDY", + "text_level": 1, + "bbox": [ + 171, + 421, + 344, + 434 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we mainly validate the effectiveness of each component with detailed ablation studies on HICO-DET dataset. We use ResNet-50 as the backbone network to reduce experimental costs.", + "bbox": [ + 169, + 440, + 823, + 470 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "**Baseline:** The baseline adopts the visual encoder from CLIP-RN50 to generate the vanilla HOI representation $v_{p}$ , which is directly used to predict the interaction scores $s_{p}$ . Only pairwise interaction classification loss $\\mathcal{L}_{p}$ is used for model learning.", + "bbox": [ + 169, + 479, + 825, + 523 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "HOI recognition: We augment the baseline with a HOI recognition network and observe the full mAP improves from 19.52 to 20.41, as reported in Exp 1 of Tab. 2. It suggests that the learnable knowledge bank $\\mathcal{W}_T$ serves as a powerful classifier to perform image-level HOI recognition and update the visual encoder for better HOI representation. We visualize the learned parameters of knowledge bank in Appendix D to demonstrate its effectiveness. Furthermore, as in Exp 2, the performance slightly decreases from 20.31 to 20.09 when we freeze the training of the knowledge bank, indicating that joint learning of visual features and the knowledge bank is more appropriate for HOI detection.", + "bbox": [ + 169, + 532, + 826, + 643 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Knowledge Transfer Network (KTN): KTN explicitly transfers the CLIP meta-knowledge to pairwise HOI features. As a result, it contributes 0.55 Full mAP improvement (Exp 3 v.s. Exp 1) and most of the performance gains come from Non-rare classes. This result shows KTN is capable of extracting discriminative features from the relational knowledge bank to our HOI representation. We also study the effectiveness of the attention mechanism of KTN in Appendix E.", + "bbox": [ + 169, + 654, + 826, + 726 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Score fusion: In Tab. 2, we largely improve the Full mAP from 20.86 (Exp 3) to 22.40 (Exp 4) by fusing the global HOI scores $s_g$ to pairwise interaction score $s_p$ . As the HOI recognition network seamlessly inherits the visual-linguistic features from CLIP and directly adopts image labels as supervision, the global interaction scores are pretty accurate and largely enhance the pairwise scores, demonstrating its strong capabilities to cope with long-tailed and fine-grained HOI recognition.", + "bbox": [ + 169, + 734, + 826, + 806 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Self-taught Relatedness Classification (SRC): Self-taught classification aims to identify the relatedness between human and objects. The improvements from Exp 4 to ours show the effectiveness of our self-taught strategy, which is capable of figuring out the irrelevant human-object pairs and suppressing their interaction scores during inference.", + "bbox": [ + 169, + 815, + 825, + 872 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Combining KTN & SRC: The ablation results of Exp 5-7 in Tab. 2 show the KTN and SRC are able to facilitate each other. In detail, the SRC obtains 0.49 Full mAP improvement when the KTN is introduced (ours v.s. Exp 4), which is only 0.36 without KTN (Exp 5 v.s. baseline). Similarly,", + "bbox": [ + 169, + 881, + 826, + 926 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3c4dc5eb76fc4028aefbf775852ee762e247e291481c548a837a8639ba17ecdc.jpg", + "image_caption": [ + "(a)", + "wash_motorcycle \nours: 0.18, 0.355 \nbaseline: 0.0189" + ], + "image_footnote": [], + "bbox": [ + 174, + 114, + 336, + 199 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/64d18385f4e7e147826a9c9ded9896f1574f4ad51300b8e50043ecca7c12edd0.jpg", + "image_caption": [ + "hold_horse:0.062,0.397,0.998 ride_horse:0.405,0.966,0.998" + ], + "image_footnote": [], + "bbox": [ + 339, + 101, + 498, + 200 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9df9bcc5f1461c0846c4218ef4173e3faefd7d6edad70696dd7e72628870ebc0.jpg", + "image_caption": [ + "(c)", + "sit_on_motorcycle: 0.515, 0.033, 0.950" + ], + "image_footnote": [], + "bbox": [ + 501, + 112, + 658, + 200 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3d058fc7e928eaacd2f473320fb34d4c09870479628f97693de6ed7388ecbfea.jpg", + "image_caption": [ + "(d)", + "sit_at_dining_table: 0.006, 0.993, 0.079 \nsit_at_dining_table: 0.232, 0.993, 0.994" + ], + "image_footnote": [], + "bbox": [ + 663, + 112, + 821, + 200 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/31a3d121ff52f3113f31974f177930970ca7ec73ecd4e03889b2f475115f9c2c.jpg", + "image_caption": [ + "paint_fire_hydrant: \nours: 0.203, 0.505, 0.955 \nbaseline: 0.0027", + "Figure 3: Visualization of HOI detection results on HICO-DET test set. Red scores denote the negative HOI predictions. We mainly demonstrate the model's capabilities on four aspects: (a) coping with imbalanced HOI distribution; (b) distinguishing subtle differences among interaction types; (c) suppressing background HOI classes, and (d) pruning irrelevant human-object associations. The numbers reported are normalized pairwise interaction score, global HOI score and relatedness score." + ], + "image_footnote": [], + "bbox": [ + 176, + 224, + 336, + 311 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/192708158f64b8c7105ece6aeedbf1e1b24fd0ec539a1403e13410348bb7f329.jpg", + "image_caption": [ + "repair truck: 0.23, 0.055, 0.979 \ninspect truck: 0.48, 0.138, 0.979" + ], + "image_footnote": [], + "bbox": [ + 339, + 224, + 500, + 311 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/5579212847887188404267a0686fd5fb59b35064bcc9bf3ca9fa886fb6aa1cfd.jpg", + "image_caption": [ + "stand_on_skateboard: 0.009, 0.001, 0.98" + ], + "image_footnote": [], + "bbox": [ + 501, + 223, + 660, + 311 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/acddb4ab24f2368acd17d4d546335723573c1d65e5ad4a799246606a28382d6a.jpg", + "image_caption": [ + "hold_kite:0.039,0.892,0.238 hold_kite:0.478,0.892,0.995" + ], + "image_footnote": [], + "bbox": [ + 663, + 224, + 821, + 311 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "the KTN contributes 0.78 Full mAP improvement with SRC (Exp 7 v.s. Exp 6), which is only 0.55 without SRC (Exp 3 v.s. Exp 1).", + "bbox": [ + 169, + 428, + 823, + 458 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Parameter initialization: Our visual encoder and knowledge bank are both initialized from CLIP. We also explore different parameter initialization strategy in Exp 8-10. Specifically, we initialize the visual encoder with a ResNet50-FPN pretrained on COCO detection task for the baseline (Exp 8), and the knowledge bank with random parameters (Exp 9) or embeddings of HOI labels from RoBERTa model (Exp 10) for the final model. We observe severe drops with all these initialization methods compared with ours, demonstrating the effectiveness and generalization ability of CLIP model. It is worth noting that the mAP of Rare classes decreases from 16.20 in Exp 8 to 15.57 in Exp 9, which suggests the randomly initialized knowledge bank even aggravates the imbalance issue in final model.", + "bbox": [ + 169, + 469, + 826, + 582 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.5 QUALITATIVE RESULTS", + "text_level": 1, + "bbox": [ + 171, + 599, + 377, + 614 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We show some qualitative results of our method in Fig.3. For each HOI prediction, we report (i) normalized pairwise interaction score, (ii) global HOI score and (iii) relatedness score for ours, and only pairwise interaction score for baseline. In Fig.3(a), ours interaction scores are more confident than baseline in Rare HOI classes, demonstrating the generalization ability of our CLIP-guided HOI representation. Besides, when incorporating relational knowledge bank into pairwise HOI representation, our method is capable of distinguishing the subtle differences among similar HOIs in Fig.3(b) (e.g., repair_truck:0.23 v.s. inspect_truck:0.48 in the bottom figure). Moreover, in Fig.3(c), the global branch suppresses background HOIs by predicting low global scores for them (e.g., the global HOI score is 0.033 for sit_onmotorcycle while the ground-truth is sit_on_bicycle). Finally, in Fig.3(d), our self-taught relatedness classification strategy shows strong capability at recognizing the ambiguous human-object associations (e.g., 0.079 v.s. 0.994 in the upper figure).", + "bbox": [ + 169, + 623, + 826, + 777 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 795, + 318, + 810 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we propose a bi-level knowledge integration strategy that incorporates the prior knowledge from CLIP for weakly-supervised HOI detection. Specifically, we exploit CLIP textual embeddings of HOI labels as a relational knowledge bank, which is adopted to enhance the HOI representation with an image-wise HOI recognition network and a pairwise knowledge transfer network. We further propose the addition of a self-taught binary pairwise relatedness classification loss to overcome ambiguous human-object association. Finally, our approach achieves the new state of the art on both HICO-DET and V-COCO benchmarks under the weakly supervised setting.", + "bbox": [ + 169, + 825, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 171, + 103, + 359, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We acknowledge funding from Flemish Government under the Onderzoeksprogramma Artificiele Intelligentie (AI) Vlaanderen programme, Shanghai Science and Technology Program 21010502700 and Shanghai Frontiers Science Center of Human-centered Artificial Intelligence.", + "bbox": [ + 171, + 133, + 826, + 176 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ETHICS STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 196, + 346, + 212 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Hereby, we consciously assure that our study is original work which has not been previously published elsewhere, and is not currently being considered for publication elsewhere. We do not have ethics risks as mentioned in the author guidelines.", + "bbox": [ + 171, + 227, + 823, + 270 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REPRODUCIBILITY STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 290, + 439, + 306 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We use publicly available benchmarks, HICO-DET and V-COCO, to validate our method. Code is available at https://github.com/bobwan1995/Weakly-HOI.", + "bbox": [ + 171, + 321, + 823, + 351 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 369, + 287, + 386 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Federico Baldassarre, Kevin Smith, Josephine Sullivan, and Hossein Azizpour. Explanation-based weakly-supervised learning of visual relations with graph networks. In ECCV, 2020.", + "Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020.", + "Yu-Wei Chao, Zhan Wang, Yugeng He, Jiaxuan Wang, and Jia Deng. HICO: A benchmark for recognizing human-object interactions in images. In ICCV, 2015.", + "Yu-Wei Chao, Yunfan Liu, Xieyang Liu, Huayi Zeng, and Jia Deng. Learning to detect human-object interactions. In WACV, 2018.", + "Leizhen Dong, Zhimin Li, Kunlun Xu, Zhijun Zhang, Luxin Yan, Sheng Zhong, and Xu Zou. Category-aware transformer network for better human-object interaction detection. arXiv preprint arXiv:2204.04911, 2022.", + "Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. arXiv preprint arXiv:2203.14940, 2022.", + "Chen Gao, Yuliang Zou, and Jia-Bin Huang. ican: Instance-centric attention network for human-object interaction detection. In BMVC, 2018.", + "Chen Gao, Jiarui Xu, Yuliang Zou, and Jia-Bin Huang. Drg: Dual relation graph for human-object interaction detection. In ECCV, 2020.", + "Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Open-vocabulary image segmentation. arXiv preprint arXiv:2112.12143, 2021.", + "Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2021.", + "Saurabh Gupta and Jitendra Malik. Visual semantic role labeling. arXiv preprint arXiv:1505.04474, 2015.", + "Tanmay Gupta, Alexander Schwing, and Derek Hoiem. No-frills human-object interaction detection: Factorization, layout encodings, and training techniques. In ICCV, 2019.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016.", + "Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In ICCV2017, 2017." + ], + "bbox": [ + 173, + 393, + 826, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 961-970, 2015.", + "ASM Iftekhar, Hao Chen, Kaustav Kundu, Xinyu Li, Joseph Tighe, and Davide Modolo. What to look at and where: Semantic and spatial refined transformer for detecting human-object interactions. arXiv preprint arXiv:2204.00746, 2022.", + "Maximilian Ilse, Jakub Tomczak, and Max Welling. Attention-based deep multiple instance learning. In ICML, pp. 2127-2136, 2018.", + "Mert Kilickaya and Arnold Smeulders. Human-object interaction detection via weak supervision. arXiv preprint arXiv:2112.00492, 2021.", + "Bumsoo Kim, Junhyun Lee, Jaewoo Kang, Eun-Sol Kim, and Hyunwoo J. Kim. Hotr: End-to-end human-object interaction detection with transformers. In CVPR, 2021.", + "Bumsoo Kim, Jonghwan Mun, Kyoung-Woon On, Minchul Shin, Junhyun Lee, and Eun-Sol Kim. Mstr: Multi-scale transformer for end-to-end human-object interaction detection. arXiv preprint arXiv:2203.14709, 2022.", + "Suresh Kirthi Kumaraswamy, Miaojing Shi, and Ewa Kijak. Detecting human-object interaction with mixed supervision. In WACV, 2021.", + "Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yan-Feng Wang, and Cewu Lu. Transferable interactiveness prior for human-object interaction detection. In CVPR, 2019a.", + "Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yanfeng Wang, and Cewu Lu. Transferable interactiveness knowledge for human-object interaction detection. In CVPR, 2019b.", + "Yong-Lu Li, Xinpeng Liu, Han Lu, Shiyi Wang, Junqi Liu, Jiefeng Li, and Cewu Lu. Detailed 2d-3d joint representation for human-object interaction. In CVPR, 2020a.", + "Yong-Lu Li, Xinpeng Liu, Xiaogqian Wu, Yizhuo Li, and Cewu Lu. Hoi analysis: Integrating and decomposing human-object interaction. In NeurIPS, 2020b.", + "Yue Liao, Aixi Zhang, Miao Lu, Yongliang Wang, Xiaobo Li, and Si Liu. Gen-vlkt: Simplify association and enhance interaction understanding for hoi detection. arXiv preprint arXiv:2203.13954, 2022.", + "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014.", + "Wen Liu, Weixin Luo, Dongze Lian, and Shenghua Gao Gao. Future frame prediction for anomaly detection - a new baseline. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018.", + "Hitoshi Nishimura, Satoshi Komorita, Yasutomo Kawanishi, and Hiroshi Murase. Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. arXiv preprint arXiv:2106.14259, 2021.", + "Guansong Pang, Cheng Yan, Chunhua Shen, van den Hengel Anton, and Xiao Bai. Self-trained deep ordinal regression for end-to-end video anomaly detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020.", + "Alessandro Prest, Cordelia Schmid, and Vittorio Ferrari. Weakly supervised learning of interactions between humans and objects. IEEE TPAMI, 2011.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021a." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021b.", + "Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. arXiv preprint arXiv:1506.01497, 2015.", + "Masato Tamura, Hiroki Ohashi, and Tomoaki Yoshinaga. Qpic: Query-based pairwise human-object interaction detection with image-wide contextual information. In CVPR, 2021.", + "Tina, Anmol Kumar Sharma, Siddharth Tomar, and Kapil Gupta. Various approaches of human activity recognition: A review. In International Conference on Computing Methodologies and Communication(ICCMC), 2021.", + "Oytun Ulutan, A S M Iftekhar, and B. S. Manjunath. Vsgnet: Spatial attention network for detecting human object interactions using graph convolutions. In CVPR, 2020.", + "Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 2008. URL http://jmlr.org/papers/v9/vandermaaten08a.html.", + "Mrabti Wafae, Baibai Kaoutar, Bellach Benaissa, Oulad Haj Thami Rachid, and Tairi Hamid. Human motion tracking: A comparative study. Procedia Computer Science, 148:145-153, 2019.", + "Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In ICCV, 2019.", + "Aixi Zhang, Yue Liao, Si Liu, Miao Lu, Yongliang Wang, Chen Gao, and Xiaobo Li. Mining the benefits of two-stage and one-stage hoi detection. NeuIPS, 2021a.", + "Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Efficient two-stage detection of human-object interactions with a novel unary-pairwise transformer. arXiv preprint arXiv:2112.01838, 2021b.", + "Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Spatially conditioned graphs for detecting human-object interactions. In ICCV, 2021c.", + "Hanwang Zhang, Zawlin Kyaw, Jinyang Yu, and Shih-Fu Chang. Ppr-fcn: Weakly supervised visual relation detection via parallel pairwise r-fcn. In ICCV, 2017.", + "Desen Zhou, Zhichao Liu, Jian Wang, Leshan Wang, Tao Hu, Errui Ding, and Jingdong Wang. Human-object interaction detection via disentangled transformer. arXiv preprint arXiv:2204.09290, 2022.", + "Penghao Zhou and Mingmin Chi. Relation parsing neural network for human-object interaction detection. In ICCV, 2019.", + "Tianfei Zhou, Wenguan Wang, Siyuan Qi, Haibin Ling, and Jianbing Shen. Cascaded human-object interaction recognition. In CVPR, 2020." + ], + "bbox": [ + 171, + 102, + 826, + 715 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "APPENDIX", + "text_level": 1, + "bbox": [ + 171, + 102, + 266, + 118 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this appendix, we first describe the spatial feature generation, and then supplement more experimental results of different CLIP knowledge integration strategies for weakly-supervised HOI detection. For Explanation-HOI (Baldassarre et al., 2020), we further clarify the difference between their mAP evaluation protocol and the standard one. Finally, we demonstrate the limitations, potential negative societal impacts as well as the result error bars of our method.", + "bbox": [ + 169, + 133, + 826, + 205 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A THE ADVANTAGE OF OUR HOI REPRESENTATION", + "text_level": 1, + "bbox": [ + 171, + 226, + 624, + 242 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To verify the improvement obtained with our CLIP-based HOI representation, we visualize the HOI representation $\\hat{v}_p$ in feature space with t-SNE(van der Maaten & Hinton, 2008). For clarity, we randomly sample 80 HOI categories, and collect 50 samples for each category. For comparison, we also demonstrate the object-based HOI representation derived from 'Exp 9' in Tab.2 (i.e., the model without CLIP knowledge and using a random knowledge bank). As shown in Fig.4, we observe that CLIP-based HOI representations for different HOI categories are diverse and well separated in feature space, which is better for HOI detection. In contrast, the object-based representations are not well separated in feature space (see the red box region in Fig.4b). Besides, the experimental results in the ablation study (ours v.s. 'Exp 9') also validate the advantage of CLIP-based HOI representation, improving full mAP from 19.61 to 22.89.", + "bbox": [ + 169, + 258, + 826, + 398 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B ABLATION ON CLIP KNOWLEDGE INTEGRATION", + "text_level": 1, + "bbox": [ + 171, + 419, + 617, + 435 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To further demonstrate the superiority of our CLIP knowledge integration strategy, we study several proven techniques for CLIP knowledge transfer in Tab. 3. In $Abl1$ , for each human-object pair, we directly infer the HOI scores with CLIP by computing the cross-modal similarities between their visual union region and the HOI prompts. Without introducing any HOI priors, the promising results indicate the powerful generalization ability of CLIP and motivate the design of incorporating CLIP knowledge for weakly-supervised HOI detection. In $Abl2$ , we duplicate the experiment setting and results from $Exp8$ in Tab. 2 of the main paper. It is a simplified baseline model but initializes the visual encoder with a ResNet50-FPN pretrained on COCO detection task. Then we introduce three different CLIP knowledge transfer strategies (Abl 3-4 and ours) based on $Abl2$ .", + "bbox": [ + 169, + 450, + 826, + 577 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In Abl 3, we directly enhance baseline scores in Abl 2 with the CLIP similarity scores in Abl 1 on the inference stage. Without bells and whistles, we obtain 1.12 gain in Full mAP.", + "bbox": [ + 169, + 583, + 823, + 612 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Furthermore, in Abl 4, we adopt a similar knowledge transfer strategy as GEN-VLKT (Liao et al., 2022), where we initialize the HOI classifier $\\mathcal{F}_P$ with HOI prompt and regularize the global HOI representation with CLIP image feature $v_{g}$ . In detail, we first compute the global HOI representation $v_{mean}$ with mean pooling on all pairwise HOI representations, i.e., $v_{mean} = MeanPool(\\{v_p^m\\}_{m=1}^M)$ . Here $v_p^m$ is the holistic HOI representation (c.f. Sec. 3.2.3 in the main paper) for $m$ -th human-object pair. Then we develop an additional $L2$ loss $\\mathcal{L}_{reg}$ to transfer the knowledge from CLIP to HOI representations: $\\mathcal{L}_{reg} = L2(v_{mean}, v_g)$ . The performance even decreases slightly from 19.44 to 19.39, which might be caused by the incompatibility of parameters between backbone network (ResNet50-FPN pretrained on COCO) and $\\mathcal{F}_P$ (HOI prompt embeddings from CLIP). When directly applying the knowledge transfer strategy of GEN-VLKT to a weakly-supervised setting, it is difficult to map the unmatched HOI representation and classification weights to a joint space as the supervisory signals are noisy.", + "bbox": [ + 169, + 618, + 828, + 792 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Finally, our approach achieves the best performance compared with other strategies, demonstrating the effectiveness of our bi-level knowledge integration strategy.", + "bbox": [ + 169, + 799, + 823, + 828 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C SPATIAL FEATURE GENERATION", + "text_level": 1, + "bbox": [ + 171, + 849, + 478, + 864 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Following (Zhang et al., 2021c), we generate the spatial feature $v_{sp} \\in \\mathbb{R}^{D}$ for each pair of human-object proposals $(\\mathbf{x}_h, \\mathbf{x}_o)$ . Specifically, we first compute the bounding boxes information for $\\mathbf{x}_h$ and $\\mathbf{x}_o$ separately, including their center coordinates, widths, heights, aspect ratios and areas, all", + "bbox": [ + 169, + 881, + 826, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/ec0d2877c25887ff4609d620312019b28f059c4f6a7a3f591043ace767320b71.jpg", + "image_caption": [ + "Figure 4: The t-SNE visualization of CLIP-based HOI representation and object-based HOI representation." + ], + "image_footnote": [], + "bbox": [ + 174, + 102, + 496, + 296 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/9c33688555a1473b9211dc245403d73ceed296cf44e1d2b567f377ee0add3094.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 102, + 821, + 296 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/fe363dacb48d8989d586e83fb6f3326ad7831f10cb09fb80f80800b75a732ddf.jpg", + "table_caption": [ + "Table 3: Ablation of different CLIP knowledge integration strategies on HICO-DET dataset." + ], + "table_footnote": [], + "table_body": "
MethodsExperimental settingmAP (%)
FullRareNon-Rare
Abl 1CLIP inference score11.8413.7211.27
Abl 2RN50-FPN (COCO) + FP random init.19.4416.2020.41
Abl 3RN50-FPN (COCO) + FP random init. + CLIP inference score20.5618.1921.27
Abl 4RN50-FPN (COCO) + FP HOI prompt init. + CLIP visual regularization19.3915.1220.66
oursCLIP RN50 + HOI recognition + KTN + self-taught relatedness cls.22.8922.4123.03
", + "bbox": [ + 238, + 387, + 756, + 457 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "normalized by the corresponding dimension of the image. We also encode their relative spatial relations by estimating the intersection over union (IoU), a ratio of the area of $\\mathbf{x}_h$ and $\\mathbf{x}_o$ , a directional encoding and the distance between center coordinates of $\\mathbf{x}_h$ and $\\mathbf{x}_o$ . We concatenate all the above-mentioned preliminary spatial cues and obtain a spatial encoding $\\mathbf{p} \\in \\mathbb{R}_{+}^{18}$ . To encode the second and higher order combinations of different terms, the spatial encoding is concatenated with its logarithm and then embedded to $v_{sp}$ : $v_{sp} = \\mathcal{F}_{sp}([p; \\log(p + \\epsilon)])$ . Where $\\epsilon > 0$ is a small constant to guarantee the numerical stability, and $\\mathcal{F}_{sp}$ is a multi-layer fully connected network.", + "bbox": [ + 169, + 492, + 826, + 590 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "D VISUALIZATION OF HOI KNOWLEDGE BANK $\\mathcal{W}_T$", + "text_level": 1, + "bbox": [ + 171, + 621, + 619, + 638 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To further understand $\\mathcal{W}_T$ , we visualize the knowledge bank features initialized by CLIP (Fig.5(a)) and learned from scratch (Fig.5(b)) in feature space by t-SNE. It is worth noting that the knowledge bank learned from scratch is derived from 'Exp 9' in Tab.2. As shown in Fig.5, we observe that the knowledge features of HOI classes initialized with CLIP are more discriminative than random initialized, and show a better clustering result (e.g. the HOI classes in red box regions).", + "bbox": [ + 169, + 659, + 826, + 731 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "E DIFFERENT DESIGNS OF KTN", + "text_level": 1, + "bbox": [ + 171, + 760, + 455, + 776 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To further validate the effectiveness of our attention mechanism in KTN, we compare our design with some variants in Tab. 4. First of all, we directly encode the relation-level features within the union region to enhance the pairwise representation rather than the external knowledge bank. As a result, the mAP even decreases a little bit from 20.75 (Exp 6) to 20.69 (Exp 11). The potential reason is that the union region contains more ambiguous visual relations and background clutters, which are difficult to learn in a weak setting. Besides, we also explore different normalization strategies in KTN. The results in Tab. 4 demonstrate that Softmax operation (ours) performs better than uniform attention (Exp 12) or Sigmoid operation (Exp 13), indicating our attention mechanism is non-trivial and more effective on aggregating the relational cues from HOI knowledge bank.", + "bbox": [ + 169, + 797, + 823, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/2e8f35e8efe169ddf8e8f37e0ef00a2cb7d7406a4aefb85cf71fe3778f5da5f5.jpg", + "image_caption": [ + "Figure 5: The t-SNE visualization of knowledge bank $\\mathcal{W}_T$ . (a) is the knowledge bank distribution in feature space based on our CLIP-based HOI representation while (b) is the knowledge bank learned from scratch (the model in Tab.2-Exp 9) based on object-based HOI representation." + ], + "image_footnote": [], + "bbox": [ + 174, + 103, + 821, + 488 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/7bd051eb2407505f5d0e91ad521730a1e01709b3b353c897c1f3b46d360b5784.jpg", + "table_caption": [ + "Table 4: Different network design of Knowledge Transfer Network (KTN)." + ], + "table_footnote": [], + "table_body": "
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
Exp 11CLIP RN50CLIP Text✓ (union)-20.6919.5521.04
Exp 12CLIP RN50CLIP Text✓ (uniform)-21.1419.8221.53
Exp 13CLIP RN50CLIP Text✓ (sigmoid)-21.2819.2721.88
oursCLIP RN50CLIP Text-21.5320.0521.97
", + "bbox": [ + 205, + 583, + 790, + 642 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "F TOP-K POSITIVE PAIR SELECTION FOR SRC", + "text_level": 1, + "bbox": [ + 171, + 659, + 571, + 674 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this section we show the results of selecting top-2 and top-5 pairs as positive in Tab. 5. We notice that there is a small performance drop, which is likely to be caused by mislabeling more negative pairs as positive, resulting in model learning with more noise.", + "bbox": [ + 169, + 691, + 823, + 733 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "G THE PROMPT GENERATION FOR V-COCO", + "text_level": 1, + "bbox": [ + 171, + 753, + 558, + 768 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For the V-COCO dataset, each action has two different semantic roles ('instrument' and 'object') for different objects, like 'cut cake' and 'cut with knife'. We use two different prompt templates to convert a HOI label to a language sentence. For the former one, we take template \"a person verb a/an object\", and use \"a person verb with object\" for the latter.", + "bbox": [ + 169, + 786, + 823, + 842 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "H EVALUATION METRIC FOR V-COCO", + "text_level": 1, + "bbox": [ + 171, + 863, + 516, + 878 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "V-COCO dataset has two scenarios for role AP evaluation. In Tab. 1, APS1&2 refer to 'Average Precision in scenario 1&2'. V-COCO dataset has two different annotations for HOIs: the first is a", + "bbox": [ + 169, + 895, + 823, + 922 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/ea948175792d2ed151c0ca1aee49e56a4e3c9c26bc08bbdd021b1dbab82c8ad6.jpg", + "table_caption": [ + "Table 5: Ablation of top-K positive pair selection for SRC on HICO-DET dataset." + ], + "table_footnote": [], + "table_body": "
MethodsmAP (%)
FullRareNon-Rare
Top-522.4521.6122.70
Top-222.4921.8322.69
ours (Top-1)22.8922.4123.03
", + "bbox": [ + 385, + 126, + 612, + 186 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5ea9aca00229807cfcbed7f05dd2e53f142325eae021f722c0c643c640bf02ee.jpg", + "image_caption": [ + "(a) Evaluation protocol in Explanation-HOI" + ], + "image_footnote": [], + "bbox": [ + 176, + 200, + 506, + 429 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b1ffa02461492b359ad767aae489aa3222ac27a003e81f6fd5b1f59f8392a3fe.jpg", + "image_caption": [ + "(b) The correct evaluation protocol", + "Figure 6: The screenshot of the evaluation code in Explanation-HOI. (a) is the original code while (b) is the correct one based on the standard evaluation code. We use red rectangle boxes to highlight the most important differences" + ], + "image_footnote": [], + "bbox": [ + 506, + 200, + 820, + 429 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "full label of (human location, interaction type, object location, object type), and the second misses target object (also denoted as 'role' in the original paper (Gupta & Malik, 2015)) annotations, and the label only includes (human location, interaction type). For the second case, there are two different evaluation protocols (scenarios) when taking a prediction as correct $^4$ : In scenario 1, it requires the interaction is correct & the overlap between the human boxes is $> 0.5$ & the corresponding role is empty, which is more restricted; in scenario 2, it only requires the interaction is correct & the overlap between the person boxes is $> 0.5$ .", + "bbox": [ + 169, + 527, + 823, + 625 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "I EVALUATION OF EXPLANATION-HOI", + "text_level": 1, + "bbox": [ + 171, + 646, + 509, + 662 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The Explanation-HOI (Baldassarre et al., 2020) has a misunderstanding of mAP evaluation protocol. As shown in Fig.6(a) L200-L205, the Explanation-HOI only takes some specific predicted HOIs into the evaluation process, which has the same HOI labels as groundtruth HOIs. Thus, they ignore lots of false-positive HOI predictions when calculating mAP, leading to an untrustable high mAP score (reported in their original paper). In Fig.6(b) L204-L208, we evaluate all predicted HOIs, which is the same as the standard evaluation protocol proposed in HICO-DET (Chao et al., 2015). The correct results have already been reported in Tab.1 in the main paper.", + "bbox": [ + 169, + 678, + 823, + 777 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "J LIMITATIONS", + "text_level": 1, + "bbox": [ + 171, + 796, + 315, + 811 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As described in Sec. 3.1, we adopt an external object detector to generate human-object proposals and then recognize their interactions. Consequently, our method is faced with two limitations brought by erroneous object detection results. Firstly, the positive human-object pairs are not recalled if the human or object proposals are not detected. Secondly, the proposals are kept fixed during learning, which leads to the problem of inaccurate localization and object types.", + "bbox": [ + 169, + 828, + 823, + 898 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "4https://github.com/s-gupta/v-coco", + "bbox": [ + 191, + 909, + 401, + 924 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "K RISK OF USING CLIP", + "text_level": 1, + "bbox": [ + 171, + 102, + 390, + 118 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For all the methods that adopt CLIP in their model design, there is a potential risk of data leakage as CLIP has seen quite a lot of data during pretraining. For HOI detection task, we cannot get access to CLIP dataset and do not know the exact overlap between CLIP and HOI benchmarks (i.e., HICO-DET and V-COCO), we carefully read Sec. 5 (Data Overlap Analysis) of the CLIP paper (Radford et al., 2021b), including an analysis of the overlap between its dataset with 35 popular datasets (HICO-DET and V-COCO are not included). It shows the overlap is small (median is $2.2\\%$ and average is $3.2\\%$ ) and the influence is limited (\"overall accuracy is rarely shifted by more than $0.1\\%$ with only 7 datasets above this threshold\"). Besides, the training text accompanying an image in the CLIP dataset is often not related to the HOI annotations. Thus, we think the risk is limited.", + "bbox": [ + 169, + 133, + 826, + 258 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "L LICENSE", + "text_level": 1, + "bbox": [ + 171, + 279, + 284, + 294 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The licenses of the assets used in our work are listed below, including open-sourced CLIP model, HICO-DET dataset, and V-COCO dataset. As for HICO-DET, we cannot find its license in the paper and the official project page. Thus we provide the official project page instead here for clarity.", + "bbox": [ + 169, + 310, + 828, + 354 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. CLIP: https://github.com/openai/CLIP MIT License", + "2. VCOCO: https://github.com/s-gupta/v-coco/MIT License", + "3. HICO-DET: http://www-personal.umich.edu/ ywchao/hico/ No license" + ], + "bbox": [ + 210, + 364, + 694, + 416 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_model.json b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5c19f2d4a1acf2dab270112b4469b2c35e716a9e --- /dev/null +++ b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_model.json @@ -0,0 +1,2907 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.83, + 0.147 + ], + "angle": 0, + "content": "WEAKLY-SUPERVISED HOI DETECTION VIA PRIOR-GUIDED BI-LEVEL REPRESENTATION LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.169, + 0.706, + 0.185 + ], + "angle": 0, + "content": "Bo Wan \\(^{1,*}\\), Yongfei Liu \\(^{2*}\\), Desen Zhou \\(^{2}\\), Tinne Tuytelaars \\(^{1}\\), Xuming He \\(^{2,3}\\)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.186, + 0.725, + 0.244 + ], + "angle": 0, + "content": "\\(^{1}\\) KU Leuven, Leuven, Belgium; \\(^{2}\\) ShanghaiTech University, Shanghai, China \n\\(^{3}\\) Shanghai Engineering Research Center of Intelligent Vision and Imaging {bwan, tinne.tuytelaars}@esat.kuleuven.be {liuyf3,zhouds,hexm}@shanghaitech.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.261, + 0.548, + 0.275 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.283, + 0.771, + 0.478 + ], + "angle": 0, + "content": "Human object interaction (HOI) detection plays a crucial role in human-centric scene understanding and serves as a fundamental building-block for many vision tasks. One generalizable and scalable strategy for HOI detection is to use weak supervision, learning from image-level annotations only. This is inherently challenging due to ambiguous human-object associations, large search space of detecting HOIs and highly noisy training signal. A promising strategy to address those challenges is to exploit knowledge from large-scale pretrained models (e.g., CLIP), but a direct knowledge distillation strategy (Liao et al., 2022) does not perform well on the weakly-supervised setting. In contrast, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge at both image level and HOI instance level, and adopt a self-taught mechanism to prune incorrect human-object associations. Experimental results on HICO-DET and V-COCO show that our method outperforms the previous works by a sizable margin, showing the efficacy of our HOI representation." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.498, + 0.338, + 0.513 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.523, + 0.828, + 0.594 + ], + "angle": 0, + "content": "Human object interaction detection aims to simultaneously localize the human-object regions in an image and to classify their interactions, which serves as a fundamental building-block in a wide range of tasks in human-centric artificial intelligence, such as human activity recognition (Heilbron et al., 2015; Tina et al., 2021), human motion tracking (Wafae et al., 2019; Nishimura et al., 2021) and anomalous behavior detection (Liu et al., 2018; Pang et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.6, + 0.828, + 0.727 + ], + "angle": 0, + "content": "Usually, HOI detection adopts a supervised learning paradigm (Gupta & Malik, 2015; Chao et al., 2018; Wan et al., 2019; Gao et al., 2020; Zhang et al., 2021c). This requires detailed annotations (i.e. human and object bounding boxes and their interaction types) in the training stage. However, such HOI annotations are expensive to collect and prone to labeling errors. In contrast, it is much easier to acquire image-level descriptions of target scenes. Consequently, a more scalable strategy for HOI detection is to learn from weak annotations at the image level, known as weakly-supervised HOI detection (Zhang et al., 2017). Learning under such weak supervision is particularly challenging mainly due to the lack of accurate visual-semantic associations, large search space of detecting HOIs and highly noisy training signal from only image level supervision." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.732, + 0.828, + 0.9 + ], + "angle": 0, + "content": "Most existing works (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) attempt to tackle the weakly-supervised HOI detection in a Multiple Instance Learning (MIL) framework (Ilse et al., 2018). They first utilize an object detector to generate human-object proposals and then train an interaction classifier with image-level labels as supervision. Despite promising results, these methods suffer from several weaknesses when coping with diverse and fine-grained HOIs. Firstly, they usually rely on visual representations derived from the external object detector, which mainly focus on the semantic concepts of the objects in the scene and hence are insufficient for capturing the concept of fine-grained interactions. Secondly, as the image-level supervision tends to ignore the imbalance in HOI classes, their representation learning is more susceptible to the dataset bias and dominated by frequent interaction classes. Finally, these methods learn the HOI concepts from a candidate set generated by pairing up all the human and object proposals, which is highly noisy and often leads to erroneous human-object associations for many interaction classes." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.714, + 0.925 + ], + "angle": 0, + "content": "*Equal Contribution. Code is available at https://github.com/bobwan1995/Weakly-HOI." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.258 + ], + "angle": 0, + "content": "To address the aforementioned limitations, we introduce a new weakly-supervised HOI detection strategy. It aims to incorporate the prior knowledge from pretrained foundation models to facilitate the HOI learning. In particular, we propose to integrate CLIP (Radford et al., 2021b), a large-scale vision-language pretrained model. This allows us to exploit the strong generalization capability of the CLIP representation for learning a better HOI representation under weak supervision. Compared to the representations learned by the object detector, the CLIP representations are inherently less object-centric, hence more likely to incorporate also aspects about the human-object interaction, as evidenced by Appendix A. Although a few works have successfully exploited CLIP for supervised HOI detection in the past, experimentally we find they do not perform well in the more challenging weakly-supervised setting (c.f. Appendix.B). We hypothesize this is because they only transfer knowledge at image level, and fail without supervision at the level of human-object pairs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.264, + 0.828, + 0.377 + ], + "angle": 0, + "content": "To this end, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge of HOIs at two different levels. First, at the image level, we utilize the visual and linguistic embeddings of the CLIP model to build a global HOI knowledge bank and generate image-level HOI predictions. In addition, for each human-object pair, we enrich the region-based HOI features by the HOI representations in the knowledge bank via a novel attention mechanism. Such a bi-level framework enables us to exploit the image-level supervision more effectively through the shared HOI knowledge bank, and to enhance the interaction feature learning by introducing the visual and text representations of the CLIP model." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.382, + 0.827, + 0.508 + ], + "angle": 0, + "content": "We instantiate our bi-level knowledge integration strategy as a modular deep neural network with a global and local branch. Given the human-object proposals generated by an off-the-shelf object detector, the global branch starts with a backbone network to compute image feature maps, which are used by a subsequent HOI recognition network to predict the image-wise HOI scores. The local branch builds a knowledge transfer network to extract the human-object features and augment them with the CLIP-guided knowledge bank, followed by a pairwise classification network to compute their relatedness and interaction scores1. The relatedness scores are used to prune incorrect human-object associations, which mitigates the issue of noisy proposals. Finally, the outputs of the two branches are fused to generate the final HOI scores." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.514, + 0.827, + 0.613 + ], + "angle": 0, + "content": "To train our HOI detection network with image-level annotations, we first initialize the backbone network and the HOI knowledge bank from the CLIP encoders, and then train the entire model in an end-to-end manner. In particular, we devise a novel multi-task weak supervision loss consisting of three terms: 1) an image-level HOI classification loss for the global branch; 2) an MIL-like loss for the interaction scores predicted by the local branch, which is defined on the aggregate of all the human-object pair predictions; 3) a self-taught classification loss for the relatedness of each human-object pair, which uses the interaction scores from the model itself as supervision." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.619, + 0.829, + 0.731 + ], + "angle": 0, + "content": "We validate our methods on two public benchmarks: HICO-DET (Chao et al., 2018) and V-COCO (Gupta & Malik, 2015). The empirical results and ablative studies show our method consistently achieves state-of-the-art performance on all benchmarks. In summary, our contributions are three-fold: (i) We exploit the CLIP knowledge to build a prior-enriched HOI representation, which is more robust for detecting fine-grained interaction types and under imbalanced data distributions. (ii) We develop a self-taught relatedness classification loss to alleviate the problem of mis-association between human-object pairs. (iii) Our approach achieves state-of-the-art performance on the weakly-supervised HOI detection task on both benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.741, + 0.353, + 0.756 + ], + "angle": 0, + "content": "2 RELATED WORKS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.761, + 0.829, + 0.888 + ], + "angle": 0, + "content": "HOI detection: Most works on supervised HOI detection can be categorized in two groups: two-stage and one-stage HOI detection. Two-stage methods first generate a set of human-object proposals with an external object detector, then classify their interactions. They mainly focus on exploring additional human pose information (Wan et al., 2019; Li et al., 2020a; Gupta et al., 2019), pairwise relatedness (Li et al., 2019a; Zhou et al., 2020) or modeling relations between object and human (Gao et al., 2020; Zhang et al., 2021c; Ulutan et al., 2020; Zhou & Chi, 2019), to enhance the HOI representations. One-stage methods predict human & object locations and their interaction types simultaneously in an end-to-end manner, which are currently dominated by transformer-based architectures (Carion et al., 2020; Kim et al., 2022; Dong et al., 2022; Zhang et al., 2021a;b)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.825, + 0.925 + ], + "angle": 0, + "content": "\\( {}^{1} \\) Relatedness indicates whether a human-object pair has a relation, and interaction scores are multi-label scores on the interaction space." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.103, + 0.79, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.331, + 0.825, + 0.358 + ], + "angle": 0, + "content": "Figure 1: Model Overview: There are four modules in our network: a backbone Network, an HOI recognition network, a knowledge transfer network and a pairwise classification network." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.416 + ], + "angle": 0, + "content": "Supervised methods show superior performance, but require labor-intensive HOI annotations that are infeasible to obtain in many scenarios. Thus, in this work we focus on HOI detection under weak supervision." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.424, + 0.827, + 0.619 + ], + "angle": 0, + "content": "Weakly-supervised HOI detection: Weakly-supervised HOI detection aims to learn instance-level HOIs with only image-level annotations. (Prest et al., 2011) learns a set of binary action classifiers based on detected human-object pairs, where human proposal is obtained from a part-based human detector and object is derived from the relative position with respect to the human. PPR-FCN (Zhang et al., 2017) employs a parallel FCN to perform pair selection and classification. Explainable-HOI (Baldassarre et al., 2020) adopts graph nets to capture relations for better image-level HOI recognition, and uses backward explanation for instance-level HOI detection. MX-HOI (Kumaraswamy et al., 2021) proposes a momentum-independent learning strategy to utilize strong & weak labels simultaneously. AlignFormer (Kilickaya & Smeulders, 2021) proposes an align layer in transformer framework, which utilizes geometric & visual priors to generate pseudo alignments for training. Those methods focus on learning HOIs with advanced network structures or better pseudo alignments. However, they still suffer from noisy human-object associations and ambiguous interaction types. To address those challenges, we exploit prior knowledge from CLIP to build a discriminative HOI representations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.827, + 0.768 + ], + "angle": 0, + "content": "Knowledge exploitation of pretrained V&L models: Recently, CLIP (Radford et al., 2021a) model has demonstrated strong generalization to various downstream tasks (Ghiasi et al., 2021; Du et al., 2022; Gu et al., 2021). Some works also explore CLIP knowledge in supervised HOI detection, e.g., CATN (Dong et al., 2022) initializes the object query with category-aware semantic information from CLIP text encoder, and GEN-VLTK (Liao et al., 2022) employs image feature distillation and classifier initialization with HOI prompts. However, they only exploit CLIP knowledge at a coarse level and require detailed annotations of human-object pairs. It is non-trivial to extend such strategies to the weak supervision paradigm due to highly noisy training signals. In our work, we build a deep connection between CLIP and HOI representation by incorporating the prior knowledge of HOIs at both image and HOI instance levels." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.781, + 0.283, + 0.795 + ], + "angle": 0, + "content": "3 METHOD" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.805, + 0.51, + 0.818 + ], + "angle": 0, + "content": "3.1 PROBLEM SETUP AND METHOD OVERVIEW" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.824, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Problem setup Given an input image \\(I\\), the task of weakly-supervised HOI detection aims to localize and recognize the human-object interactions, while only the corresponding image-level HOI categories are available for training. Formally, we aim to learn a HOI detector \\(\\mathcal{M}\\), which takes an image \\(I\\) as input and generates a set of tuples \\(\\mathcal{O} = \\{(\\mathbf{x}_h,\\mathbf{x}_o,c_o,a_{h,o},R_{h,o}^a)\\}\\), i.e., \\(\\mathcal{O} = \\mathcal{M}(I)\\). Here each tuple indicates a HOI instance, in which \\(\\mathbf{x}_h,\\mathbf{x}_o\\in \\mathbb{R}^4\\) represent human and object bounding boxes, \\(c_{o}\\in \\{1,\\dots,C\\}\\) is the object category, \\(a_{h,o}\\in \\{1,\\dots,A\\}\\) denotes the interaction class associated with \\(\\mathbf{x}_h\\) and \\(\\mathbf{x}_o\\), and \\(R_{h,o}^{a}\\in \\mathbb{R}\\) is the HOI score. For the weakly-supervised setting," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.148 + ], + "angle": 0, + "content": "each training image is annotated with a set of HOI categories \\(\\mathcal{R} = \\{r^{*}\\}\\) at the image level only, where \\(r^{*} \\in \\{1, \\dots, N\\}\\) is an index to a combination of ground-truth object category \\(c^{*}\\) and interaction category \\(a^{*}\\), and \\(N\\) denotes the number of all possible HOI combinations defined on the dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.15, + 0.828, + 0.208 + ], + "angle": 0, + "content": "Method Overview As we lack supervision for the HOI locations, we adopt a typical hypothesize-and-recognize strategy (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) for HOI detection: first we generate a set of human and object proposals with an off-the-shelf object detector (Ren et al., 2015) and then predict the interaction class for all human-object combinations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.213, + 0.828, + 0.298 + ], + "angle": 0, + "content": "Unlike other methods, we do not re-use the feature maps of the object or human detector - we only keep the bounding boxes. Instead, we learn a new representation optimized for the HOI task. This is challenging under the weak setting as the model learning is noisy, but feasible by leveraging the rich semantic knowledge from a pretrained large-scale multimodal model, like CLIP. However, the naive knowledge integration strategies for supervised setting fail when directly applied in the weak setting, as evidenced by our experiments in Appendix.B" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.304, + 0.828, + 0.389 + ], + "angle": 0, + "content": "Our framework adopts two philosophies to address the challenges in the weakly-supervised HOI task: the first is to integrate the prior knowledge into discriminative representation learning, and the second is to suppress noise in learning. For the first philosophy, we utilize the prior knowledge from CLIP to guide the representation learning in both global image-level and fine-grained human-object pairs, which is instantiated by a bi-level knowledge integration strategy. For the second philosophy, we adopt an effective self-taught learning mechanism to suppress the irrelevant pairs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.394, + 0.828, + 0.493 + ], + "angle": 0, + "content": "We instantiate the bi-level knowledge integration strategy with a two-branch deep network. Our detection pipeline starts with a set of human proposals with detection scores \\(\\{(\\mathbf{x}_h, s_h)\\}\\), and object proposals with their categories and detection scores \\(\\{(\\mathbf{x}_o, c_o, s_o)\\}\\). Then, the global branch performs image-level HOI recognition by utilizing a CLIP-initialized HOI knowledge bank as a classifier. This allows us to exploit both visual and text encoders from CLIP to generate better HOI representations. In parallel, for each human-object pair \\((\\mathbf{x}_h, \\mathbf{x}_o)\\), the local branch explicitly augments the pairwise HOI features with the HOI knowledge bank to then identify their relatedness and interaction classes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.499, + 0.828, + 0.556 + ], + "angle": 0, + "content": "To train our model, we use a multi-task loss, which incorporates a HOI recognition loss defined on image-wise HOIs for the visual encoder and knowledge bank finetuning, and a self-taught relatedness classification for suppressing the background human-object associations, on top of the standard MIL-based loss. We first present model details in Sec.3.2, followed by the training strategy in Sec.3.3." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.56, + 0.33, + 0.574 + ], + "angle": 0, + "content": "3.2 MODEL DESIGN" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.579, + 0.828, + 0.734 + ], + "angle": 0, + "content": "Now we introduce our bi-level knowledge integration strategy, where the aim is to exploit CLIP textual embeddings of HOI labels as a HOI knowledge bank for the HOI representation learning, and to transfer such knowledge both at image level as well as at the level of human-object pairs for interaction predictions. Specifically, as shown in Fig. 1, our network consists of a global branch and a local branch. The global branch includes a backbone network (Sec.3.2.1) that extracts image features, and a HOI recognition network (Sec.3.2.2) that uses a HOI knowledge bank based on CLIP to predict image-level HOI scores. For each human-object proposal generated by an off-the-shelf object detector, the local branch employs a knowledge transfer network (Sec.3.2.3) to compute its feature representation with enhancement from the HOI knowledge bank, and a pairwise classification network (Sec.3.2.4) to compute their relatedness and interaction scores. Finally, we generate the final HOI detection scores by combining global HOI scores with local predictions (Sec. 3.2.5)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.739, + 0.828, + 0.852 + ], + "angle": 0, + "content": "HOI Knowledge Bank Generation CLIP builds a powerful vision-language model by pretraining on large-scale image-text pairs. It consists of a visual encoder \\(\\mathcal{F}_V\\) and textual encoder \\(\\mathcal{F}_T\\), mapping both visual and textual inputs to a shared latent space. Here, we exploit CLIP to generate a HOI knowledge bank. We take a similar prompt strategy as in CLIP, adopting a common template 'a person {verb} a/an {object}' to convert HOI labels into text prompts (e.g., converting 'drive car' to 'a person driving a car'). Then we input the sentences into the CLIP textual encoder \\(\\mathcal{F}_T\\) to initialize the HOI knowledge bank \\(\\mathcal{W}_T \\in \\mathbb{R}^{N \\cdot D}\\), with \\(D\\) denoting the feature dimension. One can think of \\(\\mathcal{W}_T\\) as a set of 'prototypes' in feature space, one for each HOI in the dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.862, + 0.52, + 0.875 + ], + "angle": 0, + "content": "3.2.1 GLOBAL BRANCH: BACKBONE NETWORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.828, + 0.927 + ], + "angle": 0, + "content": "To incorporate CLIP for feature extraction, we initialize the backbone network (e.g., a ResNet-101 (He et al., 2016)) with CLIP's visual encoder \\(\\mathcal{F}_V\\) to generate a feature map \\(\\Gamma\\) for the input image \\(I\\). We further compute a global feature vector \\(v_{g} \\in \\mathbb{R}^{D}\\) with self-attention operation (Radford et al., 2021b)." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.104, + 0.508, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.279, + 0.235, + 0.464, + 0.248 + ], + "angle": 0, + "content": "(a) knowledge transfer network" + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.104, + 0.765, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.521, + 0.235, + 0.756, + 0.248 + ], + "angle": 0, + "content": "(b) pseudo relatedness label generation" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.255, + 0.825, + 0.294 + ], + "angle": 0, + "content": "Figure 2: The knowledge transfer network explicitly transfers the discriminative relation-level semantic knowledge derived from CLIP to the pairwise HOI representations. Pseudo relatedness label generation uses the pairwise interaction scores to generate the pseudo association labels for self-taught relatedness classification" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.31, + 0.576, + 0.323 + ], + "angle": 0, + "content": "3.2.2 GLOBAL BRANCH: HOI RECOGNITION NETWORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.331, + 0.825, + 0.402 + ], + "angle": 0, + "content": "We perform an image-wise HOI recognition task with the HOI knowledge bank \\(\\mathcal{W}_T\\). We obtain global HOI scores \\(s_g \\in \\mathbb{R}^N\\) by computing the inner product between the image feature \\(v_g\\) and the knowledge bank \\(\\mathcal{W}_T\\): \\(s_g = \\mathcal{W}_T \\times v_g\\), where \\(\\times\\) is matrix multiplication. This has the effect of adapting the visual encoder and knowledge bank parameters to the HOI recognition task, fully taking advantage of the knowledge from CLIP." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.415, + 0.601, + 0.428 + ], + "angle": 0, + "content": "3.2.3 LOCAL BRANCH: KNOWLEDGE TRANSFER NETWORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.435, + 0.825, + 0.52 + ], + "angle": 0, + "content": "Given the CLIP-initialized visual encoder, a standard HOI representation can be formed by concatenating the human and object appearance features along with their spatial encoding. However, even after the finetuning as described above, such a representation still mainly focuses on object-level semantic cues rather than relation-level concepts. In this module, we explicitly exploit the HOI knowledge bank \\(\\mathcal{W}_T\\) to learn a local relation-specific HOI representation. To achieve this, we propose an attention-based architecture as shown in Fig.2(a)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.526, + 0.825, + 0.612 + ], + "angle": 0, + "content": "Specifically, for each human proposal \\(\\mathbf{x}_h\\) and object proposal \\(\\mathbf{x}_o\\), we use RoI-Align (He et al., 2017) to crop the feature maps from \\(\\Gamma\\) followed by a self-attention operation to compute their appearance features \\(v_h, v_o \\in \\mathbb{R}^D\\). Then we compute a spatial feature \\(v_{sp}\\) by encoding the relative positions of their bounding boxes \\((\\mathbf{x}_h, \\mathbf{x}_o)^2\\). The holistic HOI representation \\(v_p \\in \\mathbb{R}^D\\) is an embedding of the human and object appearance features and their spatial feature, i.e., \\(v_p = \\mathcal{F}_E([v_h; v_o; v_{sp}])\\), where \\([\\cdot]\\) is the concatenation operation and \\(\\mathcal{F}_E\\) is a multi-layer perceptron (MLP)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.618, + 0.825, + 0.717 + ], + "angle": 0, + "content": "To enhance relation-level concepts, we further compute its union region \\(\\mathbf{x}_u\\in \\mathbb{R}^4\\) (see Fig. 2a) and extract the corresponding appearance feature \\(v_{u}\\in \\mathbb{R}^{D}\\) via RoI-align over the feature map \\(\\Gamma\\). The union region is important as it encodes relational context cues, but it potentially also contains a large amount of background that is noisy for model learning. We thus devise an attention module that is similar in design to the HOI recognition network, but uses the union feature \\(v_{u}\\) as query to extract a meta-embedding \\(v_{meta}\\in \\mathbb{R}^{D}\\) from the HOI knowledge bank \\(\\mathcal{W}_T\\). The final HOI representation \\(\\hat{v}_p\\in \\mathbb{R}^D\\) is built by fusing the holistic representation \\(v_{p}\\) and \\(v_{meta}\\) with a MLP \\(\\mathcal{F}_K\\)." + }, + { + "type": "equation", + "bbox": [ + 0.245, + 0.725, + 0.825, + 0.742 + ], + "angle": 0, + "content": "\\[\n\\alpha = \\operatorname {S o f t m a x} \\left(\\mathcal {W} _ {T} \\times v _ {u}\\right); \\quad v _ {\\text {m e t a}} = \\alpha^ {\\intercal} \\times \\mathcal {W} _ {T}; \\quad \\hat {v} _ {p} = \\mathcal {F} _ {K} \\left(v _ {p} + v _ {\\text {m e t a}}\\right). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.751, + 0.825, + 0.779 + ], + "angle": 0, + "content": "Here \\(\\alpha \\in \\mathbb{R}^N\\) is the normalized attention weight and \\(\\tau\\) is the transpose operation. \\(v_{meta}\\) encodes a discriminative representation from CLIP and facilitates feature sharing between HOI classes." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.792, + 0.619, + 0.806 + ], + "angle": 0, + "content": "3.2.4 LOCAL BRANCH: PAIRWISE CLASSIFICATION NETWORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.817, + 0.827, + 0.874 + ], + "angle": 0, + "content": "Given the relation-aware HOI representation \\(\\hat{v}_p\\), our final module performs a coarse-level classification on human-object association and a fine-level classification for interaction recognition. Specifically, we use two MLPs \\(\\mathcal{F}_P\\) and \\(\\mathcal{F}_B\\) to predict the interaction scores \\(s_p \\in \\mathbb{R}^A\\) and the relatedness score \\(s_b \\in \\mathbb{R}\\) for each human-object pair:" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.882, + 0.825, + 0.899 + ], + "angle": 0, + "content": "\\[\ns _ {p} = \\mathcal {F} _ {P} (\\hat {v} _ {p}); \\quad s _ {b} = \\mathcal {F} _ {B} (\\hat {v} _ {p}) \\tag {2}\n\\]" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.383, + 0.924 + ], + "angle": 0, + "content": "2For details c.f. the appendix C" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.185 + ], + "angle": 0, + "content": "To train the model under weak supervision (see Sec. 3.3), we further aggregate the pairwise interaction scores into image-level interaction scores. Assume we have \\(M\\) pairs of human-object proposals for a given image, and denote the interaction scores for the \\(m\\)-th pair as \\(s_p^m\\). We first concatenate all the interaction scores to compose a bag \\(S = [s_p^1; \\ldots; s_p^M] \\in \\mathbb{R}^{M \\cdot A}\\), then we maximize over all pairs to obtain the image-wise interaction scores: \\(\\tilde{s}_p = \\max_m S\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.187, + 0.37, + 0.202 + ], + "angle": 0, + "content": "3.2.5 MODEL INFERENCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.208, + 0.827, + 0.247 + ], + "angle": 0, + "content": "During model inference, we do not use the local interaction scores \\( s_p \\) directly. Instead, we normalize \\( S \\) with a Softmax operation defined on all pairs: \\( \\bar{S} = \\text{Softmax}(S) \\), and then compute the normalized" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.247, + 0.827, + 0.289 + ], + "angle": 0, + "content": "pairwise interaction scores \\( e_p = \\sigma(\\tilde{s}_p) \\cdot \\bar{s}_p \\), where \\( \\bar{s}_p \\) is a row from \\( \\bar{S} \\) and \\( \\sigma \\) is Sigmoid function. This has the effect of measuring the contribution of a given pair, in case of multiple pairs in an image share the same interaction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.296, + 0.828, + 0.34 + ], + "angle": 0, + "content": "The final interaction score \\( s_{h,o}^{a} \\) for human-object pair \\( (\\mathbf{x}_h,\\mathbf{x}_o) \\) combines multiple scores, including the global HOI scores \\( s_g \\), the normalized pairwise interaction scores \\( e_p \\), and the relatedness score \\( s_b \\). The overall HOI score \\( R_{h,o}^{a} \\) is a combination of the interaction score and the object detection scores." + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.348, + 0.826, + 0.368 + ], + "angle": 0, + "content": "\\[\ns _ {h, o} ^ {a} = \\sigma \\left(s _ {g} ^ {a, c _ {o}}\\right) \\cdot e _ {p} ^ {a} \\cdot \\sigma \\left(s _ {b}\\right); \\quad R _ {h, o} ^ {a} = \\left(s _ {h} \\cdot s _ {o}\\right) ^ {\\gamma} \\cdot s _ {h, o} ^ {a} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.369, + 0.828, + 0.412 + ], + "angle": 0, + "content": "where \\( s_g^{a,c_o} \\) is the HOI score corresponding to \\( a \\)-th interaction and \\( c_o \\)-th object category in \\( s_g \\), \\( e_p^a \\) is the score of \\( a \\)-th interaction in \\( e_p \\), and \\( \\gamma \\) is a hyper-parameter to balance the scores (Zhang et al., 2021c; Li et al., 2019b)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.419, + 0.482, + 0.433 + ], + "angle": 0, + "content": "3.3 LEARNING WITH WEAK SUPERVISION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.442, + 0.827, + 0.527 + ], + "angle": 0, + "content": "To train our deep network in a weakly supervised setting, we use a multi-task loss defined on three different levels. Specifically, our overall loss function \\(\\mathcal{L}\\) consists of three terms: i) an image-wise HOI recognition loss \\(\\mathcal{L}_g\\) to adapt CLIP features to the task of human-object interaction detection; ii) a pairwise interaction classification loss \\(\\mathcal{L}_p\\) to guide the knowledge transfer towards fine-grained relation-aware representations; and iii) a self-taught relatedness classification loss \\(\\mathcal{L}_b\\) to prune non-interacting human-object combinations. Formally, the overall loss is written as:" + }, + { + "type": "equation", + "bbox": [ + 0.431, + 0.534, + 0.826, + 0.55 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {g} + \\mathcal {L} _ {p} + \\mathcal {L} _ {b} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.825, + 0.581 + ], + "angle": 0, + "content": "Image-wise HOI recognition loss \\(\\mathcal{L}_g\\): Given the HOI scores \\(s_g\\) and ground-truth HOI categories \\(\\mathcal{R}\\), \\(\\mathcal{L}_g\\) is a standard binary cross-entropy loss for multi-label classification: \\(\\mathcal{L}_g = L_{BCE}(s_g, \\mathcal{R})\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.586, + 0.827, + 0.645 + ], + "angle": 0, + "content": "Pairwise interaction classification loss \\(\\mathcal{L}_p\\): We adopt a MIL strategy that first aggregates the pairwise interaction scores and supervises this with image-level interaction labels as \\(\\mathcal{A} = \\{a^*\\}\\). Given the image-wise interaction scores \\(\\tilde{s}_p\\), \\(\\mathcal{L}_p\\) is a standard binary cross-entropy loss for multi-label classification as: \\(\\mathcal{L}_p = L_{BCE}(\\tilde{s}_p, \\mathcal{A})\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.65, + 0.828, + 0.735 + ], + "angle": 0, + "content": "Self-taught relatedness classification loss \\(\\mathcal{L}_b\\): As human-object associations are not annotated, we devise a novel pseudo relatedness label generation mechanism for training a self-taught binary classifier to identify valid human-object associations. Specifically, we observe that the human-object pairs with confident interaction scores are often associated after a short period of initial training without self-taught classification loss. Motivated by this, we use the interaction scores \\(s_p\\) from the model under training to supervise the relatedness classification." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.74, + 0.829, + 0.91 + ], + "angle": 0, + "content": "Concretely, we generate pseudo labels \\(\\mathcal{B} = \\{b_1,\\dots,b_M\\}\\) for all human-object pairs in an image, where \\(b_{m}\\in \\{0,1\\}\\) indicates the relatedness for the \\(m\\) -th combination. To this end, as illustrated in Fig.2(b), we first propose a binary mask \\(Z\\in \\{0,1\\}^{M\\cdot A}\\) for all interaction scores \\(S\\) with respect to the ground-truth object categories \\(\\mathcal{C} = \\{c^*\\}\\). For each human-object pair where the object label \\(c_{o}\\) is included in \\(\\mathcal{C}\\), we consider it as a potential interactive combination and thus assign the corresponding row in \\(Z\\) as 1, and other rows as 0. For the latter, we also immediately set \\(b_{m} = 0\\). Then we generate pairwise scores \\(t^a\\in \\mathbb{R}^M\\) for each ground-truth interaction \\(a^*\\) by selecting the corresponding row from \\(S\\odot Z\\). The pseudo label for the pair with the highest score is assigned as 1, i.e., \\(m_a = \\arg \\max_{m}t^a\\) and \\(b_{m_a} = 1\\). We only select one positive pair3 for each \\(a^*\\). Finally, \\(\\mathcal{L}_b\\) is defined as a binary cross-entropy loss: \\(\\mathcal{L}_b = \\sum_m L_{BCE}(s_b^m,b_m)\\), where \\(s_b^m\\) is the relatedness score for the \\(m\\) -th pair." + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.91, + 0.48, + 0.925 + ], + "angle": 0, + "content": "3We also explore top-K selection in Appendix F" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.165 + ], + "angle": 0, + "content": "Table 1: mAP comparison on HICO-DET and V-COCO test set. - denotes the results are not available. * stands for the method we re-evaluate with the correct evaluation protocol (see Appendix.I for details) and †means our re-implementation. For V-COCO, all object detectors are pretrained on MSCOCO dataset by default, and details about the evaluation metrics APS1&2 c.f. Appendix H. IN-1K denotes ImageNet with 1000 classes." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.165, + 0.825, + 0.354 + ], + "angle": 0, + "content": "
MethodsBackboneDetectorHICO-DET (%)V-COCO (%)
FullRareNon-RareAProleAProle
supervised
iCAN (Gao et al., 2018)RN50 (IN-1K&COCO)FRCNN (COCO)14.8410.4516.1545.3052.40
PMFNet (Wan et al., 2019)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.4615.5618.0052.00-
TIN (Li et al., 2019b)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.2213.5118.3247.8054.20
DJ-RN (Li et al., 2020a)RN50 (IN-1K&COCO)FRCNN (COCO)21.3418.5321.1853.3060.30
IDN (Li et al., 2020b)RN50 (IN-1K&COCO)FRCNN (HICO-DET)26.2922.6127.3953.3060.30
SCG (Zhang et al., 2021c)RN50-FPN (IN-1K&HICO-DET)FRCNN (HICO-DET)31.3324.7233.3154.2060.90
HOTR (Kim et al., 2021)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)25.1017.3427.4255.2064.40
QPIC (Tamura et al., 2021)RN101+Transformer (IN-1K&COCO)DETR (COCO)29.9023.9231.6958.3060.70
CATN (Dong et al., 2022)RN50+Transformer (IN-1K&HICO-DET&COCO)DETR (HICO-DET)31.8625.1533.8460.10-
MSTR (Kim et al., 2022)RN50 + Transformer (IN-1K&COCO)DETR(HICO-DET)31.1725.3133.9262.0065.20
DisTr (Zhou et al., 2022)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)31.7527.4533.0366.2068.50
SSRT (Iftekhar et al., 2022)R101+Transformer (IN-1K&COCO)DETR (COCO)31.3424.3133.3265.0067.10
GEN-VLKT (Liao et al., 2022)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)34.9531.1836.0863.5865.93
between supervised & weakly-supervised setting, learning with image-level HOIs and box annotations
AlignFormer (Kilickaya & Smeulders, 2021)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)20.8518.2321.6415.8216.34
weakly-supervised
Explanation-HOI* (Baldassarre et al., 2020)ResNeXt101 (IN-1K&COCO)FRCNN (COCO)10.638.7111.20--
MX-HOI (Kumaraswamy et al., 2021)RN101 (IN-1K&COCO)FRCNN (COCO)16.1412.0617.50--
PPR-FCN† (Zhang et al., 2017)RN50 (CLIP dataset)FRCNN (COCO)17.5515.6918.41--
oursRN50 (CLIP dataset)FRCNN (COCO)22.8922.4123.0342.9748.06
oursRN101 (CLIP dataset)FRCNN (COCO)25.7024.5226.0544.7449.97
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.369, + 0.329, + 0.383 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.396, + 0.377, + 0.41 + ], + "angle": 0, + "content": "4.1 EXPERIMENTAL SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.419, + 0.827, + 0.504 + ], + "angle": 0, + "content": "Datasets: We benchmark our model on two public datasets: HICO-DET and V-COCO. HICO-DET consists of 47776 images (38118 for training and 9658 for test). It has \\( N = 600 \\) HOI categories, which are composed of \\( C = 80 \\) common objects (the same as MSCOCO (Lin et al., 2014)) and \\( A = 117 \\) unique interaction categories. V-COCO is a subset of MSCOCO, consisting of 2533 images for training, 2867 for validation and 4946 for test. It has 16199 human instances, each annotated with binary labels for \\( A = 26 \\) interaction categories." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.51, + 0.825, + 0.567 + ], + "angle": 0, + "content": "Evaluation Metric: Following (Chao et al., 2015), we use mean average precision (mAP) to evaluate HOI detection performance. A human-object pair is considered as positive when both predicted human and object boxes have at least 0.5 IoU with their ground-truth boxes, and the HOI class is classified correctly." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.577, + 0.409, + 0.59 + ], + "angle": 0, + "content": "4.2 IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.597, + 0.825, + 0.667 + ], + "angle": 0, + "content": "We use an off-the-shelf Faster R-CNN (Ren et al., 2015) pretrained on MSCOCO to generate at most 100 object candidates for each image. For V-COCO, it is worth noting that we train the object detector by removing the images in MSCOCO that overlap with V-COCO to prevent information leakage. The backbone network is initialized with the visual encoder from CLIP-RN101 model and the feature dimension \\( D = 1024 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.674, + 0.827, + 0.757 + ], + "angle": 0, + "content": "For model learning, we set the detection score weight \\(\\gamma = 2.8\\) as default by following previous works (Zhang et al., 2021c; Li et al., 2019b), then optimize the entire network with AdamW and an initial learning rate of 1e-5 for backbone parameters and 1e-4 for others. We detach the parameters of the knowledge bank on the local branch for better model learning. We train up to 60K iterations with batch-size 24 in each on 4 NVIDIA 2080TI GPUs, and decay the learning rate by 10 times in 12K and 24K iteration." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.761, + 0.39, + 0.775 + ], + "angle": 0, + "content": "4.3 QUANTITATIVE RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "For HICO-DET (Tab.1), our approach outperforms the previous state of the arts on the weakly supervised setting by a clear margin, achieving 22.89 mAP with ResNet-50 and 25.70 mAP with ResNet-101 as backbone. For a fair comparison, we also re-implement PPR-FCN with CLIP visual encoder. The results show that we still outperform PPR-FCN by a sizeable margin, which validates the superiority of our framework. Besides, we even perform comparably with HOTR and IDN under an inferior experimental setting where HOTR adopts a more advanced transformer encoder-decoder architecture, and both methods are trained with strong supervision. Furthermore, the mAP gap between Rare (training annotations \\(< 10\\)) and Non-rare HOI classes in our results is much smaller than other methods, demonstrating the superior generalization capability of our HOI representation for solving the long-tailed distribution issue. In detail, we achieve a 0.62 mAP gap with ResNet-50" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.827, + 0.165 + ], + "angle": 0, + "content": "Table 2: Ablation study on HICO-DET dataset. \"RN50-FPN(COCO)\" denotes the backbone initialized with Faster R-CNN parameters pretrained on MSCOCO dataset while \"CLIP RN50\" stands for the backbone initialized with CLIP visual encoder. Besides, we construct the knowledge bank \\(\\mathcal{W}_T\\) with random initialization, or computing HOI prompts by RoBERTa or CLIP text transformer." + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.165, + 0.794, + 0.299 + ], + "angle": 0, + "content": "
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
baselineCLIP RN50-----19.5216.5820.40
Exp 1CLIP RN50CLIP Text---20.3118.3420.90
Exp 2CLIP RN50CLIP Text✓ (freeze WT)---20.0918.2320.64
Exp 3CLIP RN50CLIP Text--20.8618.4021.60
Exp 4CLIP RN50CLIP Text-22.4020.7022.90
Exp 5CLIP RN50----19.8817.4520.61
Exp 6CLIP RN50CLIP Text--20.7519.3821.16
Exp 7CLIP RN50CLIP Text-21.5320.0521.97
oursCLIP RN50CLIP Text22.8922.4123.03
Exp 8RN50-FPN (COCO)-----19.4416.2020.41
Exp 9RN50-FPN (COCO)random19.6115.5720.82
Exp 10RN50-FPN (COCO)RoBERTa20.4516.4621.65
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.325, + 0.825, + 0.355 + ], + "angle": 0, + "content": "and 1.53 with ResNet-101 backbone, which is much smaller than AlignFormer (3.14) and PPR-FCN (2.64), and supervised methods SSRT (9.01) and GEN-VLKT (4.9)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.361, + 0.827, + 0.418 + ], + "angle": 0, + "content": "For V-COCO dataset, we report the performance of \\(\\mathrm{AP}_{role}\\) in both scenario1 and scenario2 for a complete comparison, which are 42.97 / 48.06 \\(\\mathrm{AP}_{role}\\) with ResNet-50 and 44.74 / 49.97 \\(\\mathrm{AP}_{role}\\) with ResNet-101 as backbone. As shown in Tab.1, our model achieves significant improvement compared with AlignFormer, and even is comparable with supervised methods TIN and iCAN." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.422, + 0.345, + 0.435 + ], + "angle": 0, + "content": "4.4 ABLATION STUDY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.441, + 0.825, + 0.471 + ], + "angle": 0, + "content": "In this section, we mainly validate the effectiveness of each component with detailed ablation studies on HICO-DET dataset. We use ResNet-50 as the backbone network to reduce experimental costs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.48, + 0.826, + 0.525 + ], + "angle": 0, + "content": "**Baseline:** The baseline adopts the visual encoder from CLIP-RN50 to generate the vanilla HOI representation \\( v_{p} \\), which is directly used to predict the interaction scores \\( s_{p} \\). Only pairwise interaction classification loss \\( \\mathcal{L}_{p} \\) is used for model learning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.827, + 0.645 + ], + "angle": 0, + "content": "HOI recognition: We augment the baseline with a HOI recognition network and observe the full mAP improves from 19.52 to 20.41, as reported in Exp 1 of Tab. 2. It suggests that the learnable knowledge bank \\(\\mathcal{W}_T\\) serves as a powerful classifier to perform image-level HOI recognition and update the visual encoder for better HOI representation. We visualize the learned parameters of knowledge bank in Appendix D to demonstrate its effectiveness. Furthermore, as in Exp 2, the performance slightly decreases from 20.31 to 20.09 when we freeze the training of the knowledge bank, indicating that joint learning of visual features and the knowledge bank is more appropriate for HOI detection." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.655, + 0.827, + 0.727 + ], + "angle": 0, + "content": "Knowledge Transfer Network (KTN): KTN explicitly transfers the CLIP meta-knowledge to pairwise HOI features. As a result, it contributes 0.55 Full mAP improvement (Exp 3 v.s. Exp 1) and most of the performance gains come from Non-rare classes. This result shows KTN is capable of extracting discriminative features from the relational knowledge bank to our HOI representation. We also study the effectiveness of the attention mechanism of KTN in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.827, + 0.807 + ], + "angle": 0, + "content": "Score fusion: In Tab. 2, we largely improve the Full mAP from 20.86 (Exp 3) to 22.40 (Exp 4) by fusing the global HOI scores \\( s_g \\) to pairwise interaction score \\( s_p \\). As the HOI recognition network seamlessly inherits the visual-linguistic features from CLIP and directly adopts image labels as supervision, the global interaction scores are pretty accurate and largely enhance the pairwise scores, demonstrating its strong capabilities to cope with long-tailed and fine-grained HOI recognition." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.816, + 0.826, + 0.873 + ], + "angle": 0, + "content": "Self-taught Relatedness Classification (SRC): Self-taught classification aims to identify the relatedness between human and objects. The improvements from Exp 4 to ours show the effectiveness of our self-taught strategy, which is capable of figuring out the irrelevant human-object pairs and suppressing their interaction scores during inference." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Combining KTN & SRC: The ablation results of Exp 5-7 in Tab. 2 show the KTN and SRC are able to facilitate each other. In detail, the SRC obtains 0.49 Full mAP improvement when the KTN is introduced (ours v.s. Exp 4), which is only 0.36 without KTN (Exp 5 v.s. baseline). Similarly," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image_caption", + "bbox": [ + 0.249, + 0.103, + 0.267, + 0.113 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.115, + 0.337, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.201, + 0.245, + 0.224 + ], + "angle": 0, + "content": "wash_motorcycle \nours: 0.18, 0.355 \nbaseline: 0.0189" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.102, + 0.499, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.343, + 0.203, + 0.466, + 0.22 + ], + "angle": 0, + "content": "hold_horse:0.062,0.397,0.998 ride_horse:0.405,0.966,0.998" + }, + { + "type": "image_caption", + "bbox": [ + 0.575, + 0.102, + 0.591, + 0.113 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.113, + 0.66, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.203, + 0.655, + 0.212 + ], + "angle": 0, + "content": "sit_on_motorcycle: 0.515, 0.033, 0.950" + }, + { + "type": "image_caption", + "bbox": [ + 0.734, + 0.103, + 0.75, + 0.113 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.113, + 0.822, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.664, + 0.202, + 0.818, + 0.22 + ], + "angle": 0, + "content": "sit_at_dining_table: 0.006, 0.993, 0.079 \nsit_at_dining_table: 0.232, 0.993, 0.994" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.225, + 0.337, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.313, + 0.278, + 0.338 + ], + "angle": 0, + "content": "paint_fire_hydrant: \nours: 0.203, 0.505, 0.955 \nbaseline: 0.0027" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.225, + 0.501, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.341, + 0.315, + 0.469, + 0.333 + ], + "angle": 0, + "content": "repair truck: 0.23, 0.055, 0.979 \ninspect truck: 0.48, 0.138, 0.979" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.224, + 0.661, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.318, + 0.661, + 0.327 + ], + "angle": 0, + "content": "stand_on_skateboard: 0.009, 0.001, 0.98" + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.225, + 0.822, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.668, + 0.316, + 0.784, + 0.334 + ], + "angle": 0, + "content": "hold_kite:0.039,0.892,0.238 hold_kite:0.478,0.892,0.995" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.345, + 0.825, + 0.41 + ], + "angle": 0, + "content": "Figure 3: Visualization of HOI detection results on HICO-DET test set. Red scores denote the negative HOI predictions. We mainly demonstrate the model's capabilities on four aspects: (a) coping with imbalanced HOI distribution; (b) distinguishing subtle differences among interaction types; (c) suppressing background HOI classes, and (d) pruning irrelevant human-object associations. The numbers reported are normalized pairwise interaction score, global HOI score and relatedness score." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.429, + 0.825, + 0.459 + ], + "angle": 0, + "content": "the KTN contributes 0.78 Full mAP improvement with SRC (Exp 7 v.s. Exp 6), which is only 0.55 without SRC (Exp 3 v.s. Exp 1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.47, + 0.827, + 0.583 + ], + "angle": 0, + "content": "Parameter initialization: Our visual encoder and knowledge bank are both initialized from CLIP. We also explore different parameter initialization strategy in Exp 8-10. Specifically, we initialize the visual encoder with a ResNet50-FPN pretrained on COCO detection task for the baseline (Exp 8), and the knowledge bank with random parameters (Exp 9) or embeddings of HOI labels from RoBERTa model (Exp 10) for the final model. We observe severe drops with all these initialization methods compared with ours, demonstrating the effectiveness and generalization ability of CLIP model. It is worth noting that the mAP of Rare classes decreases from 16.20 in Exp 8 to 15.57 in Exp 9, which suggests the randomly initialized knowledge bank even aggravates the imbalance issue in final model." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.601, + 0.379, + 0.615 + ], + "angle": 0, + "content": "4.5 QUALITATIVE RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.624, + 0.827, + 0.779 + ], + "angle": 0, + "content": "We show some qualitative results of our method in Fig.3. For each HOI prediction, we report (i) normalized pairwise interaction score, (ii) global HOI score and (iii) relatedness score for ours, and only pairwise interaction score for baseline. In Fig.3(a), ours interaction scores are more confident than baseline in Rare HOI classes, demonstrating the generalization ability of our CLIP-guided HOI representation. Besides, when incorporating relational knowledge bank into pairwise HOI representation, our method is capable of distinguishing the subtle differences among similar HOIs in Fig.3(b) (e.g., repair_truck:0.23 v.s. inspect_truck:0.48 in the bottom figure). Moreover, in Fig.3(c), the global branch suppresses background HOIs by predicting low global scores for them (e.g., the global HOI score is 0.033 for sit_onmotorcycle while the ground-truth is sit_on_bicycle). Finally, in Fig.3(d), our self-taught relatedness classification strategy shows strong capability at recognizing the ambiguous human-object associations (e.g., 0.079 v.s. 0.994 in the upper figure)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.796, + 0.32, + 0.811 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In this paper, we propose a bi-level knowledge integration strategy that incorporates the prior knowledge from CLIP for weakly-supervised HOI detection. Specifically, we exploit CLIP textual embeddings of HOI labels as a relational knowledge bank, which is adopted to enhance the HOI representation with an image-wise HOI recognition network and a pairwise knowledge transfer network. We further propose the addition of a self-taught binary pairwise relatedness classification loss to overcome ambiguous human-object association. Finally, our approach achieves the new state of the art on both HICO-DET and V-COCO benchmarks under the weakly supervised setting." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.36, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENT" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.134, + 0.827, + 0.178 + ], + "angle": 0, + "content": "We acknowledge funding from Flemish Government under the Onderzoeksprogramma Artificiele Intelligentie (AI) Vlaanderen programme, Shanghai Science and Technology Program 21010502700 and Shanghai Frontiers Science Center of Human-centered Artificial Intelligence." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.197, + 0.347, + 0.213 + ], + "angle": 0, + "content": "ETHICS STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.228, + 0.825, + 0.271 + ], + "angle": 0, + "content": "Hereby, we consciously assure that our study is original work which has not been previously published elsewhere, and is not currently being considered for publication elsewhere. We do not have ethics risks as mentioned in the author guidelines." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.291, + 0.44, + 0.307 + ], + "angle": 0, + "content": "REPRODUCIBILITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.322, + 0.825, + 0.352 + ], + "angle": 0, + "content": "We use publicly available benchmarks, HICO-DET and V-COCO, to validate our method. Code is available at https://github.com/bobwan1995/Weakly-HOI." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.371, + 0.289, + 0.387 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.394, + 0.826, + 0.424 + ], + "angle": 0, + "content": "Federico Baldassarre, Kevin Smith, Josephine Sullivan, and Hossein Azizpour. Explanation-based weakly-supervised learning of visual relations with graph networks. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.431, + 0.826, + 0.462 + ], + "angle": 0, + "content": "Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.469, + 0.827, + 0.499 + ], + "angle": 0, + "content": "Yu-Wei Chao, Zhan Wang, Yugeng He, Jiaxuan Wang, and Jia Deng. HICO: A benchmark for recognizing human-object interactions in images. In ICCV, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.506, + 0.827, + 0.535 + ], + "angle": 0, + "content": "Yu-Wei Chao, Yunfan Liu, Xieyang Liu, Huayi Zeng, and Jia Deng. Learning to detect human-object interactions. In WACV, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.544, + 0.827, + 0.587 + ], + "angle": 0, + "content": "Leizhen Dong, Zhimin Li, Kunlun Xu, Zhijun Zhang, Luxin Yan, Sheng Zhong, and Xu Zou. Category-aware transformer network for better human-object interaction detection. arXiv preprint arXiv:2204.04911, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.595, + 0.827, + 0.638 + ], + "angle": 0, + "content": "Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. arXiv preprint arXiv:2203.14940, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.647, + 0.825, + 0.676 + ], + "angle": 0, + "content": "Chen Gao, Yuliang Zou, and Jia-Bin Huang. ican: Instance-centric attention network for human-object interaction detection. In BMVC, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.684, + 0.825, + 0.713 + ], + "angle": 0, + "content": "Chen Gao, Jiarui Xu, Yuliang Zou, and Jia-Bin Huang. Drg: Dual relation graph for human-object interaction detection. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.721, + 0.825, + 0.751 + ], + "angle": 0, + "content": "Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Open-vocabulary image segmentation. arXiv preprint arXiv:2112.12143, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.759, + 0.825, + 0.79 + ], + "angle": 0, + "content": "Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.797, + 0.827, + 0.825 + ], + "angle": 0, + "content": "Saurabh Gupta and Jitendra Malik. Visual semantic role labeling. arXiv preprint arXiv:1505.04474, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.834, + 0.825, + 0.864 + ], + "angle": 0, + "content": "Tanmay Gupta, Alexander Schwing, and Derek Hoiem. No-frills human-object interaction detection: Factorization, layout encodings, and training techniques. In ICCV, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.872, + 0.825, + 0.902 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.909, + 0.82, + 0.925 + ], + "angle": 0, + "content": "Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In ICCV2017, 2017." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.394, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.148 + ], + "angle": 0, + "content": "Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 961-970, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.827, + 0.2 + ], + "angle": 0, + "content": "ASM Iftekhar, Hao Chen, Kaustav Kundu, Xinyu Li, Joseph Tighe, and Davide Modolo. What to look at and where: Semantic and spatial refined transformer for detecting human-object interactions. arXiv preprint arXiv:2204.00746, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.207, + 0.827, + 0.239 + ], + "angle": 0, + "content": "Maximilian Ilse, Jakub Tomczak, and Max Welling. Attention-based deep multiple instance learning. In ICML, pp. 2127-2136, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.827, + 0.276 + ], + "angle": 0, + "content": "Mert Kilickaya and Arnold Smeulders. Human-object interaction detection via weak supervision. arXiv preprint arXiv:2112.00492, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.285, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Bumsoo Kim, Junhyun Lee, Jaewoo Kang, Eun-Sol Kim, and Hyunwoo J. Kim. Hotr: End-to-end human-object interaction detection with transformers. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.324, + 0.827, + 0.366 + ], + "angle": 0, + "content": "Bumsoo Kim, Jonghwan Mun, Kyoung-Woon On, Minchul Shin, Junhyun Lee, and Eun-Sol Kim. Mstr: Multi-scale transformer for end-to-end human-object interaction detection. arXiv preprint arXiv:2203.14709, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.375, + 0.825, + 0.405 + ], + "angle": 0, + "content": "Suresh Kirthi Kumaraswamy, Miaojing Shi, and Ewa Kijak. Detecting human-object interaction with mixed supervision. In WACV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.414, + 0.827, + 0.457 + ], + "angle": 0, + "content": "Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yan-Feng Wang, and Cewu Lu. Transferable interactiveness prior for human-object interaction detection. In CVPR, 2019a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.466, + 0.827, + 0.51 + ], + "angle": 0, + "content": "Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yanfeng Wang, and Cewu Lu. Transferable interactiveness knowledge for human-object interaction detection. In CVPR, 2019b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.518, + 0.825, + 0.549 + ], + "angle": 0, + "content": "Yong-Lu Li, Xinpeng Liu, Han Lu, Shiyi Wang, Junqi Liu, Jiefeng Li, and Cewu Lu. Detailed 2d-3d joint representation for human-object interaction. In CVPR, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.557, + 0.825, + 0.587 + ], + "angle": 0, + "content": "Yong-Lu Li, Xinpeng Liu, Xiaogqian Wu, Yizhuo Li, and Cewu Lu. Hoi analysis: Integrating and decomposing human-object interaction. In NeurIPS, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.595, + 0.827, + 0.638 + ], + "angle": 0, + "content": "Yue Liao, Aixi Zhang, Miao Lu, Yongliang Wang, Xiaobo Li, and Si Liu. Gen-vlkt: Simplify association and enhance interaction understanding for hoi detection. arXiv preprint arXiv:2203.13954, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.647, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.686, + 0.825, + 0.73 + ], + "angle": 0, + "content": "Wen Liu, Weixin Luo, Dongze Lian, and Shenghua Gao Gao. Future frame prediction for anomaly detection - a new baseline. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.738, + 0.825, + 0.781 + ], + "angle": 0, + "content": "Hitoshi Nishimura, Satoshi Komorita, Yasutomo Kawanishi, and Hiroshi Murase. Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. arXiv preprint arXiv:2106.14259, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.79, + 0.825, + 0.835 + ], + "angle": 0, + "content": "Guansong Pang, Cheng Yan, Chunhua Shen, van den Hengel Anton, and Xiao Bai. Self-trained deep ordinal regression for end-to-end video anomaly detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.843, + 0.825, + 0.873 + ], + "angle": 0, + "content": "Alessandro Prest, Cordelia Schmid, and Vittorio Ferrari. Weakly supervised learning of interactions between humans and objects. IEEE TPAMI, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.881, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021a." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.148 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.185 + ], + "angle": 0, + "content": "Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. arXiv preprint arXiv:1506.01497, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.193, + 0.825, + 0.223 + ], + "angle": 0, + "content": "Masato Tamura, Hiroki Ohashi, and Tomoaki Yoshinaga. Qpic: Query-based pairwise human-object interaction detection with image-wide contextual information. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.231, + 0.826, + 0.274 + ], + "angle": 0, + "content": "Tina, Anmol Kumar Sharma, Siddharth Tomar, and Kapil Gupta. Various approaches of human activity recognition: A review. In International Conference on Computing Methodologies and Communication(ICCMC), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.282, + 0.825, + 0.311 + ], + "angle": 0, + "content": "Oytun Ulutan, A S M Iftekhar, and B. S. Manjunath. Vsgnet: Spatial attention network for detecting human object interactions using graph convolutions. In CVPR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.32, + 0.825, + 0.35 + ], + "angle": 0, + "content": "Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 2008. URL http://jmlr.org/papers/v9/vandermaaten08a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.357, + 0.825, + 0.388 + ], + "angle": 0, + "content": "Mrabti Wafae, Baibai Kaoutar, Bellach Benaissa, Oulad Haj Thami Rachid, and Tairi Hamid. Human motion tracking: A comparative study. Procedia Computer Science, 148:145-153, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.395, + 0.825, + 0.425 + ], + "angle": 0, + "content": "Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In ICCV, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.433, + 0.825, + 0.463 + ], + "angle": 0, + "content": "Aixi Zhang, Yue Liao, Si Liu, Miao Lu, Yongliang Wang, Chen Gao, and Xiaobo Li. Mining the benefits of two-stage and one-stage hoi detection. NeuIPS, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.471, + 0.827, + 0.513 + ], + "angle": 0, + "content": "Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Efficient two-stage detection of human-object interactions with a novel unary-pairwise transformer. arXiv preprint arXiv:2112.01838, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.523, + 0.825, + 0.552 + ], + "angle": 0, + "content": "Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Spatially conditioned graphs for detecting human-object interactions. In ICCV, 2021c." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.56, + 0.825, + 0.59 + ], + "angle": 0, + "content": "Hanwang Zhang, Zawlin Kyaw, Jinyang Yu, and Shih-Fu Chang. Ppr-fcn: Weakly supervised visual relation detection via parallel pairwise r-fcn. In ICCV, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.598, + 0.827, + 0.64 + ], + "angle": 0, + "content": "Desen Zhou, Zhichao Liu, Jian Wang, Leshan Wang, Tao Hu, Errui Ding, and Jingdong Wang. Human-object interaction detection via disentangled transformer. arXiv preprint arXiv:2204.09290, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.649, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Penghao Zhou and Mingmin Chi. Relation parsing neural network for human-object interaction detection. In ICCV, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.687, + 0.825, + 0.717 + ], + "angle": 0, + "content": "Tianfei Zhou, Wenguan Wang, Siyuan Qi, Haibin Ling, and Jianbing Shen. Cascaded human-object interaction recognition. In CVPR, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.717 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.267, + 0.119 + ], + "angle": 0, + "content": "APPENDIX" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.828, + 0.206 + ], + "angle": 0, + "content": "In this appendix, we first describe the spatial feature generation, and then supplement more experimental results of different CLIP knowledge integration strategies for weakly-supervised HOI detection. For Explanation-HOI (Baldassarre et al., 2020), we further clarify the difference between their mAP evaluation protocol and the standard one. Finally, we demonstrate the limitations, potential negative societal impacts as well as the result error bars of our method." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.227, + 0.625, + 0.243 + ], + "angle": 0, + "content": "A THE ADVANTAGE OF OUR HOI REPRESENTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.259, + 0.828, + 0.4 + ], + "angle": 0, + "content": "To verify the improvement obtained with our CLIP-based HOI representation, we visualize the HOI representation \\(\\hat{v}_p\\) in feature space with t-SNE(van der Maaten & Hinton, 2008). For clarity, we randomly sample 80 HOI categories, and collect 50 samples for each category. For comparison, we also demonstrate the object-based HOI representation derived from 'Exp 9' in Tab.2 (i.e., the model without CLIP knowledge and using a random knowledge bank). As shown in Fig.4, we observe that CLIP-based HOI representations for different HOI categories are diverse and well separated in feature space, which is better for HOI detection. In contrast, the object-based representations are not well separated in feature space (see the red box region in Fig.4b). Besides, the experimental results in the ablation study (ours v.s. 'Exp 9') also validate the advantage of CLIP-based HOI representation, improving full mAP from 19.61 to 22.89." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.42, + 0.618, + 0.436 + ], + "angle": 0, + "content": "B ABLATION ON CLIP KNOWLEDGE INTEGRATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.452, + 0.827, + 0.578 + ], + "angle": 0, + "content": "To further demonstrate the superiority of our CLIP knowledge integration strategy, we study several proven techniques for CLIP knowledge transfer in Tab. 3. In \\( Abl1 \\), for each human-object pair, we directly infer the HOI scores with CLIP by computing the cross-modal similarities between their visual union region and the HOI prompts. Without introducing any HOI priors, the promising results indicate the powerful generalization ability of CLIP and motivate the design of incorporating CLIP knowledge for weakly-supervised HOI detection. In \\( Abl2 \\), we duplicate the experiment setting and results from \\( Exp8 \\) in Tab. 2 of the main paper. It is a simplified baseline model but initializes the visual encoder with a ResNet50-FPN pretrained on COCO detection task. Then we introduce three different CLIP knowledge transfer strategies (Abl 3-4 and ours) based on \\( Abl2 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.584, + 0.825, + 0.613 + ], + "angle": 0, + "content": "In Abl 3, we directly enhance baseline scores in Abl 2 with the CLIP similarity scores in Abl 1 on the inference stage. Without bells and whistles, we obtain 1.12 gain in Full mAP." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.619, + 0.829, + 0.794 + ], + "angle": 0, + "content": "Furthermore, in Abl 4, we adopt a similar knowledge transfer strategy as GEN-VLKT (Liao et al., 2022), where we initialize the HOI classifier \\(\\mathcal{F}_P\\) with HOI prompt and regularize the global HOI representation with CLIP image feature \\(v_{g}\\). In detail, we first compute the global HOI representation \\(v_{mean}\\) with mean pooling on all pairwise HOI representations, i.e., \\(v_{mean} = MeanPool(\\{v_p^m\\}_{m=1}^M)\\). Here \\(v_p^m\\) is the holistic HOI representation (c.f. Sec. 3.2.3 in the main paper) for \\(m\\)-th human-object pair. Then we develop an additional \\(L2\\) loss \\(\\mathcal{L}_{reg}\\) to transfer the knowledge from CLIP to HOI representations: \\(\\mathcal{L}_{reg} = L2(v_{mean}, v_g)\\). The performance even decreases slightly from 19.44 to 19.39, which might be caused by the incompatibility of parameters between backbone network (ResNet50-FPN pretrained on COCO) and \\(\\mathcal{F}_P\\) (HOI prompt embeddings from CLIP). When directly applying the knowledge transfer strategy of GEN-VLKT to a weakly-supervised setting, it is difficult to map the unmatched HOI representation and classification weights to a joint space as the supervisory signals are noisy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.8, + 0.825, + 0.829 + ], + "angle": 0, + "content": "Finally, our approach achieves the best performance compared with other strategies, demonstrating the effectiveness of our bi-level knowledge integration strategy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.85, + 0.48, + 0.866 + ], + "angle": 0, + "content": "C SPATIAL FEATURE GENERATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Following (Zhang et al., 2021c), we generate the spatial feature \\( v_{sp} \\in \\mathbb{R}^{D} \\) for each pair of human-object proposals \\( (\\mathbf{x}_h, \\mathbf{x}_o) \\). Specifically, we first compute the bounding boxes information for \\( \\mathbf{x}_h \\) and \\( \\mathbf{x}_o \\) separately, including their center coordinates, widths, heights, aspect ratios and areas, all" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.103, + 0.497, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.103, + 0.822, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.314, + 0.828, + 0.341 + ], + "angle": 0, + "content": "Figure 4: The t-SNE visualization of CLIP-based HOI representation and object-based HOI representation." + }, + { + "type": "table_caption", + "bbox": [ + 0.223, + 0.374, + 0.773, + 0.388 + ], + "angle": 0, + "content": "Table 3: Ablation of different CLIP knowledge integration strategies on HICO-DET dataset." + }, + { + "type": "table", + "bbox": [ + 0.239, + 0.388, + 0.758, + 0.458 + ], + "angle": 0, + "content": "
MethodsExperimental settingmAP (%)
FullRareNon-Rare
Abl 1CLIP inference score11.8413.7211.27
Abl 2RN50-FPN (COCO) + FP random init.19.4416.2020.41
Abl 3RN50-FPN (COCO) + FP random init. + CLIP inference score20.5618.1921.27
Abl 4RN50-FPN (COCO) + FP HOI prompt init. + CLIP visual regularization19.3915.1220.66
oursCLIP RN50 + HOI recognition + KTN + self-taught relatedness cls.22.8922.4123.03
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.493, + 0.828, + 0.592 + ], + "angle": 0, + "content": "normalized by the corresponding dimension of the image. We also encode their relative spatial relations by estimating the intersection over union (IoU), a ratio of the area of \\(\\mathbf{x}_h\\) and \\(\\mathbf{x}_o\\), a directional encoding and the distance between center coordinates of \\(\\mathbf{x}_h\\) and \\(\\mathbf{x}_o\\). We concatenate all the above-mentioned preliminary spatial cues and obtain a spatial encoding \\(\\mathbf{p} \\in \\mathbb{R}_{+}^{18}\\). To encode the second and higher order combinations of different terms, the spatial encoding is concatenated with its logarithm and then embedded to \\(v_{sp}\\): \\(v_{sp} = \\mathcal{F}_{sp}([p; \\log(p + \\epsilon)])\\). Where \\(\\epsilon > 0\\) is a small constant to guarantee the numerical stability, and \\(\\mathcal{F}_{sp}\\) is a multi-layer fully connected network." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.622, + 0.62, + 0.639 + ], + "angle": 0, + "content": "D VISUALIZATION OF HOI KNOWLEDGE BANK \\(\\mathcal{W}_T\\)" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.66, + 0.827, + 0.732 + ], + "angle": 0, + "content": "To further understand \\(\\mathcal{W}_T\\), we visualize the knowledge bank features initialized by CLIP (Fig.5(a)) and learned from scratch (Fig.5(b)) in feature space by t-SNE. It is worth noting that the knowledge bank learned from scratch is derived from 'Exp 9' in Tab.2. As shown in Fig.5, we observe that the knowledge features of HOI classes initialized with CLIP are more discriminative than random initialized, and show a better clustering result (e.g. the HOI classes in red box regions)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.761, + 0.456, + 0.777 + ], + "angle": 0, + "content": "E DIFFERENT DESIGNS OF KTN" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.799, + 0.825, + 0.926 + ], + "angle": 0, + "content": "To further validate the effectiveness of our attention mechanism in KTN, we compare our design with some variants in Tab. 4. First of all, we directly encode the relation-level features within the union region to enhance the pairwise representation rather than the external knowledge bank. As a result, the mAP even decreases a little bit from 20.75 (Exp 6) to 20.69 (Exp 11). The potential reason is that the union region contains more ambiguous visual relations and background clutters, which are difficult to learn in a weak setting. Besides, we also explore different normalization strategies in KTN. The results in Tab. 4 demonstrate that Softmax operation (ours) performs better than uniform attention (Exp 12) or Sigmoid operation (Exp 13), indicating our attention mechanism is non-trivial and more effective on aggregating the relational cues from HOI knowledge bank." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.104, + 0.822, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.502, + 0.825, + 0.545 + ], + "angle": 0, + "content": "Figure 5: The t-SNE visualization of knowledge bank \\(\\mathcal{W}_T\\). (a) is the knowledge bank distribution in feature space based on our CLIP-based HOI representation while (b) is the knowledge bank learned from scratch (the model in Tab.2-Exp 9) based on object-based HOI representation." + }, + { + "type": "table_caption", + "bbox": [ + 0.273, + 0.571, + 0.723, + 0.584 + ], + "angle": 0, + "content": "Table 4: Different network design of Knowledge Transfer Network (KTN)." + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.584, + 0.791, + 0.643 + ], + "angle": 0, + "content": "
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
Exp 11CLIP RN50CLIP Text✓ (union)-20.6919.5521.04
Exp 12CLIP RN50CLIP Text✓ (uniform)-21.1419.8221.53
Exp 13CLIP RN50CLIP Text✓ (sigmoid)-21.2819.2721.88
oursCLIP RN50CLIP Text-21.5320.0521.97
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.66, + 0.573, + 0.675 + ], + "angle": 0, + "content": "F TOP-K POSITIVE PAIR SELECTION FOR SRC" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.692, + 0.825, + 0.734 + ], + "angle": 0, + "content": "In this section we show the results of selecting top-2 and top-5 pairs as positive in Tab. 5. We notice that there is a small performance drop, which is likely to be caused by mislabeling more negative pairs as positive, resulting in model learning with more noise." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.755, + 0.56, + 0.77 + ], + "angle": 0, + "content": "G THE PROMPT GENERATION FOR V-COCO" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.787, + 0.825, + 0.843 + ], + "angle": 0, + "content": "For the V-COCO dataset, each action has two different semantic roles ('instrument' and 'object') for different objects, like 'cut cake' and 'cut with knife'. We use two different prompt templates to convert a HOI label to a language sentence. For the former one, we take template \"a person verb a/an object\", and use \"a person verb with object\" for the latter." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.864, + 0.517, + 0.879 + ], + "angle": 0, + "content": "H EVALUATION METRIC FOR V-COCO" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.924 + ], + "angle": 0, + "content": "V-COCO dataset has two scenarios for role AP evaluation. In Tab. 1, APS1&2 refer to 'Average Precision in scenario 1&2'. V-COCO dataset has two different annotations for HOIs: the first is a" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.253, + 0.113, + 0.744, + 0.127 + ], + "angle": 0, + "content": "Table 5: Ablation of top-K positive pair selection for SRC on HICO-DET dataset." + }, + { + "type": "table", + "bbox": [ + 0.386, + 0.127, + 0.613, + 0.187 + ], + "angle": 0, + "content": "
MethodsmAP (%)
FullRareNon-Rare
Top-522.4521.6122.70
Top-222.4921.8322.69
ours (Top-1)22.8922.4123.03
" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.201, + 0.508, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.433, + 0.473, + 0.445 + ], + "angle": 0, + "content": "(a) Evaluation protocol in Explanation-HOI" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.201, + 0.821, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.566, + 0.432, + 0.774, + 0.444 + ], + "angle": 0, + "content": "(b) The correct evaluation protocol" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.459, + 0.825, + 0.503 + ], + "angle": 0, + "content": "Figure 6: The screenshot of the evaluation code in Explanation-HOI. (a) is the original code while (b) is the correct one based on the standard evaluation code. We use red rectangle boxes to highlight the most important differences" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.528, + 0.825, + 0.626 + ], + "angle": 0, + "content": "full label of (human location, interaction type, object location, object type), and the second misses target object (also denoted as 'role' in the original paper (Gupta & Malik, 2015)) annotations, and the label only includes (human location, interaction type). For the second case, there are two different evaluation protocols (scenarios) when taking a prediction as correct \\( ^4 \\): In scenario 1, it requires the interaction is correct & the overlap between the human boxes is \\( > 0.5 \\) & the corresponding role is empty, which is more restricted; in scenario 2, it only requires the interaction is correct & the overlap between the person boxes is \\( > 0.5 \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.647, + 0.51, + 0.663 + ], + "angle": 0, + "content": "I EVALUATION OF EXPLANATION-HOI" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.679, + 0.825, + 0.779 + ], + "angle": 0, + "content": "The Explanation-HOI (Baldassarre et al., 2020) has a misunderstanding of mAP evaluation protocol. As shown in Fig.6(a) L200-L205, the Explanation-HOI only takes some specific predicted HOIs into the evaluation process, which has the same HOI labels as groundtruth HOIs. Thus, they ignore lots of false-positive HOI predictions when calculating mAP, leading to an untrustable high mAP score (reported in their original paper). In Fig.6(b) L204-L208, we evaluate all predicted HOIs, which is the same as the standard evaluation protocol proposed in HICO-DET (Chao et al., 2015). The correct results have already been reported in Tab.1 in the main paper." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.797, + 0.316, + 0.813 + ], + "angle": 0, + "content": "J LIMITATIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.829, + 0.825, + 0.9 + ], + "angle": 0, + "content": "As described in Sec. 3.1, we adopt an external object detector to generate human-object proposals and then recognize their interactions. Consequently, our method is faced with two limitations brought by erroneous object detection results. Firstly, the positive human-object pairs are not recalled if the human or object proposals are not detected. Secondly, the proposals are kept fixed during learning, which leads to the problem of inaccurate localization and object types." + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.402, + 0.925 + ], + "angle": 0, + "content": "4https://github.com/s-gupta/v-coco" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.391, + 0.119 + ], + "angle": 0, + "content": "K RISK OF USING CLIP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.828, + 0.26 + ], + "angle": 0, + "content": "For all the methods that adopt CLIP in their model design, there is a potential risk of data leakage as CLIP has seen quite a lot of data during pretraining. For HOI detection task, we cannot get access to CLIP dataset and do not know the exact overlap between CLIP and HOI benchmarks (i.e., HICO-DET and V-COCO), we carefully read Sec. 5 (Data Overlap Analysis) of the CLIP paper (Radford et al., 2021b), including an analysis of the overlap between its dataset with 35 popular datasets (HICO-DET and V-COCO are not included). It shows the overlap is small (median is \\(2.2\\%\\) and average is \\(3.2\\%\\)) and the influence is limited (\"overall accuracy is rarely shifted by more than \\(0.1\\%\\) with only 7 datasets above this threshold\"). Besides, the training text accompanying an image in the CLIP dataset is often not related to the HOI annotations. Thus, we think the risk is limited." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.28, + 0.285, + 0.295 + ], + "angle": 0, + "content": "L LICENSE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.311, + 0.829, + 0.355 + ], + "angle": 0, + "content": "The licenses of the assets used in our work are listed below, including open-sourced CLIP model, HICO-DET dataset, and V-COCO dataset. As for HICO-DET, we cannot find its license in the paper and the official project page. Thus we provide the official project page instead here for clarity." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.365, + 0.575, + 0.379 + ], + "angle": 0, + "content": "1. CLIP: https://github.com/openai/CLIP MIT License" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.384, + 0.613, + 0.398 + ], + "angle": 0, + "content": "2. VCOCO: https://github.com/s-gupta/v-coco/MIT License" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.403, + 0.695, + 0.417 + ], + "angle": 0, + "content": "3. HICO-DET: http://www-personal.umich.edu/ ywchao/hico/ No license" + }, + { + "type": "list", + "bbox": [ + 0.211, + 0.365, + 0.695, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ] +] \ No newline at end of file diff --git a/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_origin.pdf b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..dd950aafc487a153fa82de147c9c9203a00804e5 --- /dev/null +++ b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/b2c89086-3efa-4d35-8fb8-fa570d2c2733_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ac80ab06bfd4465efd7b7d6d5fd4aa6ece75f756e605c06841275ac879ec820 +size 13311161 diff --git a/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/full.md b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5bdbf36a26e9da974e9a8584e00015340f787442 --- /dev/null +++ b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/full.md @@ -0,0 +1,385 @@ +# WEAKLY-SUPERVISED HOI DETECTION VIA PRIOR-GUIDED BI-LEVEL REPRESENTATION LEARNING + +Bo Wan $^{1,*}$ , Yongfei Liu $^{2*}$ , Desen Zhou $^{2}$ , Tinne Tuytelaars $^{1}$ , Xuming He $^{2,3}$ + +$^{1}$ KU Leuven, Leuven, Belgium; $^{2}$ ShanghaiTech University, Shanghai, China + $^{3}$ Shanghai Engineering Research Center of Intelligent Vision and Imaging {bwan, tinne.tuytelaars}@esat.kuleuven.be {liuyf3,zhouds,hexm}@shanghaitech.edu.cn + +# ABSTRACT + +Human object interaction (HOI) detection plays a crucial role in human-centric scene understanding and serves as a fundamental building-block for many vision tasks. One generalizable and scalable strategy for HOI detection is to use weak supervision, learning from image-level annotations only. This is inherently challenging due to ambiguous human-object associations, large search space of detecting HOIs and highly noisy training signal. A promising strategy to address those challenges is to exploit knowledge from large-scale pretrained models (e.g., CLIP), but a direct knowledge distillation strategy (Liao et al., 2022) does not perform well on the weakly-supervised setting. In contrast, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge at both image level and HOI instance level, and adopt a self-taught mechanism to prune incorrect human-object associations. Experimental results on HICO-DET and V-COCO show that our method outperforms the previous works by a sizable margin, showing the efficacy of our HOI representation. + +# 1 INTRODUCTION + +Human object interaction detection aims to simultaneously localize the human-object regions in an image and to classify their interactions, which serves as a fundamental building-block in a wide range of tasks in human-centric artificial intelligence, such as human activity recognition (Heilbron et al., 2015; Tina et al., 2021), human motion tracking (Wafae et al., 2019; Nishimura et al., 2021) and anomalous behavior detection (Liu et al., 2018; Pang et al., 2020). + +Usually, HOI detection adopts a supervised learning paradigm (Gupta & Malik, 2015; Chao et al., 2018; Wan et al., 2019; Gao et al., 2020; Zhang et al., 2021c). This requires detailed annotations (i.e. human and object bounding boxes and their interaction types) in the training stage. However, such HOI annotations are expensive to collect and prone to labeling errors. In contrast, it is much easier to acquire image-level descriptions of target scenes. Consequently, a more scalable strategy for HOI detection is to learn from weak annotations at the image level, known as weakly-supervised HOI detection (Zhang et al., 2017). Learning under such weak supervision is particularly challenging mainly due to the lack of accurate visual-semantic associations, large search space of detecting HOIs and highly noisy training signal from only image level supervision. + +Most existing works (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) attempt to tackle the weakly-supervised HOI detection in a Multiple Instance Learning (MIL) framework (Ilse et al., 2018). They first utilize an object detector to generate human-object proposals and then train an interaction classifier with image-level labels as supervision. Despite promising results, these methods suffer from several weaknesses when coping with diverse and fine-grained HOIs. Firstly, they usually rely on visual representations derived from the external object detector, which mainly focus on the semantic concepts of the objects in the scene and hence are insufficient for capturing the concept of fine-grained interactions. Secondly, as the image-level supervision tends to ignore the imbalance in HOI classes, their representation learning is more susceptible to the dataset bias and dominated by frequent interaction classes. Finally, these methods learn the HOI concepts from a candidate set generated by pairing up all the human and object proposals, which is highly noisy and often leads to erroneous human-object associations for many interaction classes. + +To address the aforementioned limitations, we introduce a new weakly-supervised HOI detection strategy. It aims to incorporate the prior knowledge from pretrained foundation models to facilitate the HOI learning. In particular, we propose to integrate CLIP (Radford et al., 2021b), a large-scale vision-language pretrained model. This allows us to exploit the strong generalization capability of the CLIP representation for learning a better HOI representation under weak supervision. Compared to the representations learned by the object detector, the CLIP representations are inherently less object-centric, hence more likely to incorporate also aspects about the human-object interaction, as evidenced by Appendix A. Although a few works have successfully exploited CLIP for supervised HOI detection in the past, experimentally we find they do not perform well in the more challenging weakly-supervised setting (c.f. Appendix.B). We hypothesize this is because they only transfer knowledge at image level, and fail without supervision at the level of human-object pairs. + +To this end, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge of HOIs at two different levels. First, at the image level, we utilize the visual and linguistic embeddings of the CLIP model to build a global HOI knowledge bank and generate image-level HOI predictions. In addition, for each human-object pair, we enrich the region-based HOI features by the HOI representations in the knowledge bank via a novel attention mechanism. Such a bi-level framework enables us to exploit the image-level supervision more effectively through the shared HOI knowledge bank, and to enhance the interaction feature learning by introducing the visual and text representations of the CLIP model. + +We instantiate our bi-level knowledge integration strategy as a modular deep neural network with a global and local branch. Given the human-object proposals generated by an off-the-shelf object detector, the global branch starts with a backbone network to compute image feature maps, which are used by a subsequent HOI recognition network to predict the image-wise HOI scores. The local branch builds a knowledge transfer network to extract the human-object features and augment them with the CLIP-guided knowledge bank, followed by a pairwise classification network to compute their relatedness and interaction scores1. The relatedness scores are used to prune incorrect human-object associations, which mitigates the issue of noisy proposals. Finally, the outputs of the two branches are fused to generate the final HOI scores. + +To train our HOI detection network with image-level annotations, we first initialize the backbone network and the HOI knowledge bank from the CLIP encoders, and then train the entire model in an end-to-end manner. In particular, we devise a novel multi-task weak supervision loss consisting of three terms: 1) an image-level HOI classification loss for the global branch; 2) an MIL-like loss for the interaction scores predicted by the local branch, which is defined on the aggregate of all the human-object pair predictions; 3) a self-taught classification loss for the relatedness of each human-object pair, which uses the interaction scores from the model itself as supervision. + +We validate our methods on two public benchmarks: HICO-DET (Chao et al., 2018) and V-COCO (Gupta & Malik, 2015). The empirical results and ablative studies show our method consistently achieves state-of-the-art performance on all benchmarks. In summary, our contributions are three-fold: (i) We exploit the CLIP knowledge to build a prior-enriched HOI representation, which is more robust for detecting fine-grained interaction types and under imbalanced data distributions. (ii) We develop a self-taught relatedness classification loss to alleviate the problem of mis-association between human-object pairs. (iii) Our approach achieves state-of-the-art performance on the weakly-supervised HOI detection task on both benchmarks. + +# 2 RELATED WORKS + +HOI detection: Most works on supervised HOI detection can be categorized in two groups: two-stage and one-stage HOI detection. Two-stage methods first generate a set of human-object proposals with an external object detector, then classify their interactions. They mainly focus on exploring additional human pose information (Wan et al., 2019; Li et al., 2020a; Gupta et al., 2019), pairwise relatedness (Li et al., 2019a; Zhou et al., 2020) or modeling relations between object and human (Gao et al., 2020; Zhang et al., 2021c; Ulutan et al., 2020; Zhou & Chi, 2019), to enhance the HOI representations. One-stage methods predict human & object locations and their interaction types simultaneously in an end-to-end manner, which are currently dominated by transformer-based architectures (Carion et al., 2020; Kim et al., 2022; Dong et al., 2022; Zhang et al., 2021a;b). + +![](images/e651bacab4fc1cd655ae1937f2758bab90cffc62a4962ef321be81b8fb18d4d7.jpg) +Figure 1: Model Overview: There are four modules in our network: a backbone Network, an HOI recognition network, a knowledge transfer network and a pairwise classification network. + +Supervised methods show superior performance, but require labor-intensive HOI annotations that are infeasible to obtain in many scenarios. Thus, in this work we focus on HOI detection under weak supervision. + +Weakly-supervised HOI detection: Weakly-supervised HOI detection aims to learn instance-level HOIs with only image-level annotations. (Prest et al., 2011) learns a set of binary action classifiers based on detected human-object pairs, where human proposal is obtained from a part-based human detector and object is derived from the relative position with respect to the human. PPR-FCN (Zhang et al., 2017) employs a parallel FCN to perform pair selection and classification. Explainable-HOI (Baldassarre et al., 2020) adopts graph nets to capture relations for better image-level HOI recognition, and uses backward explanation for instance-level HOI detection. MX-HOI (Kumaraswamy et al., 2021) proposes a momentum-independent learning strategy to utilize strong & weak labels simultaneously. AlignFormer (Kilickaya & Smeulders, 2021) proposes an align layer in transformer framework, which utilizes geometric & visual priors to generate pseudo alignments for training. Those methods focus on learning HOIs with advanced network structures or better pseudo alignments. However, they still suffer from noisy human-object associations and ambiguous interaction types. To address those challenges, we exploit prior knowledge from CLIP to build a discriminative HOI representations. + +Knowledge exploitation of pretrained V&L models: Recently, CLIP (Radford et al., 2021a) model has demonstrated strong generalization to various downstream tasks (Ghiasi et al., 2021; Du et al., 2022; Gu et al., 2021). Some works also explore CLIP knowledge in supervised HOI detection, e.g., CATN (Dong et al., 2022) initializes the object query with category-aware semantic information from CLIP text encoder, and GEN-VLTK (Liao et al., 2022) employs image feature distillation and classifier initialization with HOI prompts. However, they only exploit CLIP knowledge at a coarse level and require detailed annotations of human-object pairs. It is non-trivial to extend such strategies to the weak supervision paradigm due to highly noisy training signals. In our work, we build a deep connection between CLIP and HOI representation by incorporating the prior knowledge of HOIs at both image and HOI instance levels. + +# 3 METHOD + +# 3.1 PROBLEM SETUP AND METHOD OVERVIEW + +Problem setup Given an input image $I$ , the task of weakly-supervised HOI detection aims to localize and recognize the human-object interactions, while only the corresponding image-level HOI categories are available for training. Formally, we aim to learn a HOI detector $\mathcal{M}$ , which takes an image $I$ as input and generates a set of tuples $\mathcal{O} = \{(\mathbf{x}_h,\mathbf{x}_o,c_o,a_{h,o},R_{h,o}^a)\}$ , i.e., $\mathcal{O} = \mathcal{M}(I)$ . Here each tuple indicates a HOI instance, in which $\mathbf{x}_h,\mathbf{x}_o\in \mathbb{R}^4$ represent human and object bounding boxes, $c_{o}\in \{1,\dots,C\}$ is the object category, $a_{h,o}\in \{1,\dots,A\}$ denotes the interaction class associated with $\mathbf{x}_h$ and $\mathbf{x}_o$ , and $R_{h,o}^{a}\in \mathbb{R}$ is the HOI score. For the weakly-supervised setting, + +each training image is annotated with a set of HOI categories $\mathcal{R} = \{r^{*}\}$ at the image level only, where $r^{*} \in \{1, \dots, N\}$ is an index to a combination of ground-truth object category $c^{*}$ and interaction category $a^{*}$ , and $N$ denotes the number of all possible HOI combinations defined on the dataset. + +Method Overview As we lack supervision for the HOI locations, we adopt a typical hypothesize-and-recognize strategy (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) for HOI detection: first we generate a set of human and object proposals with an off-the-shelf object detector (Ren et al., 2015) and then predict the interaction class for all human-object combinations. + +Unlike other methods, we do not re-use the feature maps of the object or human detector - we only keep the bounding boxes. Instead, we learn a new representation optimized for the HOI task. This is challenging under the weak setting as the model learning is noisy, but feasible by leveraging the rich semantic knowledge from a pretrained large-scale multimodal model, like CLIP. However, the naive knowledge integration strategies for supervised setting fail when directly applied in the weak setting, as evidenced by our experiments in Appendix.B + +Our framework adopts two philosophies to address the challenges in the weakly-supervised HOI task: the first is to integrate the prior knowledge into discriminative representation learning, and the second is to suppress noise in learning. For the first philosophy, we utilize the prior knowledge from CLIP to guide the representation learning in both global image-level and fine-grained human-object pairs, which is instantiated by a bi-level knowledge integration strategy. For the second philosophy, we adopt an effective self-taught learning mechanism to suppress the irrelevant pairs. + +We instantiate the bi-level knowledge integration strategy with a two-branch deep network. Our detection pipeline starts with a set of human proposals with detection scores $\{(\mathbf{x}_h, s_h)\}$ , and object proposals with their categories and detection scores $\{(\mathbf{x}_o, c_o, s_o)\}$ . Then, the global branch performs image-level HOI recognition by utilizing a CLIP-initialized HOI knowledge bank as a classifier. This allows us to exploit both visual and text encoders from CLIP to generate better HOI representations. In parallel, for each human-object pair $(\mathbf{x}_h, \mathbf{x}_o)$ , the local branch explicitly augments the pairwise HOI features with the HOI knowledge bank to then identify their relatedness and interaction classes. + +To train our model, we use a multi-task loss, which incorporates a HOI recognition loss defined on image-wise HOIs for the visual encoder and knowledge bank finetuning, and a self-taught relatedness classification for suppressing the background human-object associations, on top of the standard MIL-based loss. We first present model details in Sec.3.2, followed by the training strategy in Sec.3.3. + +# 3.2 MODEL DESIGN + +Now we introduce our bi-level knowledge integration strategy, where the aim is to exploit CLIP textual embeddings of HOI labels as a HOI knowledge bank for the HOI representation learning, and to transfer such knowledge both at image level as well as at the level of human-object pairs for interaction predictions. Specifically, as shown in Fig. 1, our network consists of a global branch and a local branch. The global branch includes a backbone network (Sec.3.2.1) that extracts image features, and a HOI recognition network (Sec.3.2.2) that uses a HOI knowledge bank based on CLIP to predict image-level HOI scores. For each human-object proposal generated by an off-the-shelf object detector, the local branch employs a knowledge transfer network (Sec.3.2.3) to compute its feature representation with enhancement from the HOI knowledge bank, and a pairwise classification network (Sec.3.2.4) to compute their relatedness and interaction scores. Finally, we generate the final HOI detection scores by combining global HOI scores with local predictions (Sec. 3.2.5). + +HOI Knowledge Bank Generation CLIP builds a powerful vision-language model by pretraining on large-scale image-text pairs. It consists of a visual encoder $\mathcal{F}_V$ and textual encoder $\mathcal{F}_T$ , mapping both visual and textual inputs to a shared latent space. Here, we exploit CLIP to generate a HOI knowledge bank. We take a similar prompt strategy as in CLIP, adopting a common template 'a person {verb} a/an {object}' to convert HOI labels into text prompts (e.g., converting 'drive car' to 'a person driving a car'). Then we input the sentences into the CLIP textual encoder $\mathcal{F}_T$ to initialize the HOI knowledge bank $\mathcal{W}_T \in \mathbb{R}^{N \cdot D}$ , with $D$ denoting the feature dimension. One can think of $\mathcal{W}_T$ as a set of 'prototypes' in feature space, one for each HOI in the dataset. + +# 3.2.1 GLOBAL BRANCH: BACKBONE NETWORK + +To incorporate CLIP for feature extraction, we initialize the backbone network (e.g., a ResNet-101 (He et al., 2016)) with CLIP's visual encoder $\mathcal{F}_V$ to generate a feature map $\Gamma$ for the input image $I$ . We further compute a global feature vector $v_{g} \in \mathbb{R}^{D}$ with self-attention operation (Radford et al., 2021b). + +![](images/f3349fb8251160198ddf86acc80f35bdf49add88f16e617e0264756fb346c105.jpg) +(a) knowledge transfer network + +![](images/43ccc2a33ded6bf714fc9c749ae5107721836e3e4420f610ab7a8b7c1b4c5370.jpg) +(b) pseudo relatedness label generation +Figure 2: The knowledge transfer network explicitly transfers the discriminative relation-level semantic knowledge derived from CLIP to the pairwise HOI representations. Pseudo relatedness label generation uses the pairwise interaction scores to generate the pseudo association labels for self-taught relatedness classification + +# 3.2.2 GLOBAL BRANCH: HOI RECOGNITION NETWORK + +We perform an image-wise HOI recognition task with the HOI knowledge bank $\mathcal{W}_T$ . We obtain global HOI scores $s_g \in \mathbb{R}^N$ by computing the inner product between the image feature $v_g$ and the knowledge bank $\mathcal{W}_T$ : $s_g = \mathcal{W}_T \times v_g$ , where $\times$ is matrix multiplication. This has the effect of adapting the visual encoder and knowledge bank parameters to the HOI recognition task, fully taking advantage of the knowledge from CLIP. + +# 3.2.3 LOCAL BRANCH: KNOWLEDGE TRANSFER NETWORK + +Given the CLIP-initialized visual encoder, a standard HOI representation can be formed by concatenating the human and object appearance features along with their spatial encoding. However, even after the finetuning as described above, such a representation still mainly focuses on object-level semantic cues rather than relation-level concepts. In this module, we explicitly exploit the HOI knowledge bank $\mathcal{W}_T$ to learn a local relation-specific HOI representation. To achieve this, we propose an attention-based architecture as shown in Fig.2(a). + +Specifically, for each human proposal $\mathbf{x}_h$ and object proposal $\mathbf{x}_o$ , we use RoI-Align (He et al., 2017) to crop the feature maps from $\Gamma$ followed by a self-attention operation to compute their appearance features $v_h, v_o \in \mathbb{R}^D$ . Then we compute a spatial feature $v_{sp}$ by encoding the relative positions of their bounding boxes $(\mathbf{x}_h, \mathbf{x}_o)^2$ . The holistic HOI representation $v_p \in \mathbb{R}^D$ is an embedding of the human and object appearance features and their spatial feature, i.e., $v_p = \mathcal{F}_E([v_h; v_o; v_{sp}])$ , where $[\cdot]$ is the concatenation operation and $\mathcal{F}_E$ is a multi-layer perceptron (MLP). + +To enhance relation-level concepts, we further compute its union region $\mathbf{x}_u\in \mathbb{R}^4$ (see Fig. 2a) and extract the corresponding appearance feature $v_{u}\in \mathbb{R}^{D}$ via RoI-align over the feature map $\Gamma$ . The union region is important as it encodes relational context cues, but it potentially also contains a large amount of background that is noisy for model learning. We thus devise an attention module that is similar in design to the HOI recognition network, but uses the union feature $v_{u}$ as query to extract a meta-embedding $v_{meta}\in \mathbb{R}^{D}$ from the HOI knowledge bank $\mathcal{W}_T$ . The final HOI representation $\hat{v}_p\in \mathbb{R}^D$ is built by fusing the holistic representation $v_{p}$ and $v_{meta}$ with a MLP $\mathcal{F}_K$ . + +$$ +\alpha = \operatorname {S o f t m a x} \left(\mathcal {W} _ {T} \times v _ {u}\right); \quad v _ {\text {m e t a}} = \alpha^ {\intercal} \times \mathcal {W} _ {T}; \quad \hat {v} _ {p} = \mathcal {F} _ {K} \left(v _ {p} + v _ {\text {m e t a}}\right). \tag {1} +$$ + +Here $\alpha \in \mathbb{R}^N$ is the normalized attention weight and $\tau$ is the transpose operation. $v_{meta}$ encodes a discriminative representation from CLIP and facilitates feature sharing between HOI classes. + +# 3.2.4 LOCAL BRANCH: PAIRWISE CLASSIFICATION NETWORK + +Given the relation-aware HOI representation $\hat{v}_p$ , our final module performs a coarse-level classification on human-object association and a fine-level classification for interaction recognition. Specifically, we use two MLPs $\mathcal{F}_P$ and $\mathcal{F}_B$ to predict the interaction scores $s_p \in \mathbb{R}^A$ and the relatedness score $s_b \in \mathbb{R}$ for each human-object pair: + +$$ +s _ {p} = \mathcal {F} _ {P} (\hat {v} _ {p}); \quad s _ {b} = \mathcal {F} _ {B} (\hat {v} _ {p}) \tag {2} +$$ + +To train the model under weak supervision (see Sec. 3.3), we further aggregate the pairwise interaction scores into image-level interaction scores. Assume we have $M$ pairs of human-object proposals for a given image, and denote the interaction scores for the $m$ -th pair as $s_p^m$ . We first concatenate all the interaction scores to compose a bag $S = [s_p^1; \ldots; s_p^M] \in \mathbb{R}^{M \cdot A}$ , then we maximize over all pairs to obtain the image-wise interaction scores: $\tilde{s}_p = \max_m S$ . + +# 3.2.5 MODEL INFERENCE + +During model inference, we do not use the local interaction scores $s_p$ directly. Instead, we normalize $S$ with a Softmax operation defined on all pairs: $\bar{S} = \text{Softmax}(S)$ , and then compute the normalized + +pairwise interaction scores $e_p = \sigma(\tilde{s}_p) \cdot \bar{s}_p$ , where $\bar{s}_p$ is a row from $\bar{S}$ and $\sigma$ is Sigmoid function. This has the effect of measuring the contribution of a given pair, in case of multiple pairs in an image share the same interaction. + +The final interaction score $s_{h,o}^{a}$ for human-object pair $(\mathbf{x}_h,\mathbf{x}_o)$ combines multiple scores, including the global HOI scores $s_g$ , the normalized pairwise interaction scores $e_p$ , and the relatedness score $s_b$ . The overall HOI score $R_{h,o}^{a}$ is a combination of the interaction score and the object detection scores. + +$$ +s _ {h, o} ^ {a} = \sigma \left(s _ {g} ^ {a, c _ {o}}\right) \cdot e _ {p} ^ {a} \cdot \sigma \left(s _ {b}\right); \quad R _ {h, o} ^ {a} = \left(s _ {h} \cdot s _ {o}\right) ^ {\gamma} \cdot s _ {h, o} ^ {a} \tag {3} +$$ + +where $s_g^{a,c_o}$ is the HOI score corresponding to $a$ -th interaction and $c_o$ -th object category in $s_g$ , $e_p^a$ is the score of $a$ -th interaction in $e_p$ , and $\gamma$ is a hyper-parameter to balance the scores (Zhang et al., 2021c; Li et al., 2019b). + +# 3.3 LEARNING WITH WEAK SUPERVISION + +To train our deep network in a weakly supervised setting, we use a multi-task loss defined on three different levels. Specifically, our overall loss function $\mathcal{L}$ consists of three terms: i) an image-wise HOI recognition loss $\mathcal{L}_g$ to adapt CLIP features to the task of human-object interaction detection; ii) a pairwise interaction classification loss $\mathcal{L}_p$ to guide the knowledge transfer towards fine-grained relation-aware representations; and iii) a self-taught relatedness classification loss $\mathcal{L}_b$ to prune non-interacting human-object combinations. Formally, the overall loss is written as: + +$$ +\mathcal {L} = \mathcal {L} _ {g} + \mathcal {L} _ {p} + \mathcal {L} _ {b} \tag {4} +$$ + +Image-wise HOI recognition loss $\mathcal{L}_g$ : Given the HOI scores $s_g$ and ground-truth HOI categories $\mathcal{R}$ , $\mathcal{L}_g$ is a standard binary cross-entropy loss for multi-label classification: $\mathcal{L}_g = L_{BCE}(s_g, \mathcal{R})$ . + +Pairwise interaction classification loss $\mathcal{L}_p$ : We adopt a MIL strategy that first aggregates the pairwise interaction scores and supervises this with image-level interaction labels as $\mathcal{A} = \{a^*\}$ . Given the image-wise interaction scores $\tilde{s}_p$ , $\mathcal{L}_p$ is a standard binary cross-entropy loss for multi-label classification as: $\mathcal{L}_p = L_{BCE}(\tilde{s}_p, \mathcal{A})$ . + +Self-taught relatedness classification loss $\mathcal{L}_b$ : As human-object associations are not annotated, we devise a novel pseudo relatedness label generation mechanism for training a self-taught binary classifier to identify valid human-object associations. Specifically, we observe that the human-object pairs with confident interaction scores are often associated after a short period of initial training without self-taught classification loss. Motivated by this, we use the interaction scores $s_p$ from the model under training to supervise the relatedness classification. + +Concretely, we generate pseudo labels $\mathcal{B} = \{b_1,\dots,b_M\}$ for all human-object pairs in an image, where $b_{m}\in \{0,1\}$ indicates the relatedness for the $m$ -th combination. To this end, as illustrated in Fig.2(b), we first propose a binary mask $Z\in \{0,1\}^{M\cdot A}$ for all interaction scores $S$ with respect to the ground-truth object categories $\mathcal{C} = \{c^*\}$ . For each human-object pair where the object label $c_{o}$ is included in $\mathcal{C}$ , we consider it as a potential interactive combination and thus assign the corresponding row in $Z$ as 1, and other rows as 0. For the latter, we also immediately set $b_{m} = 0$ . Then we generate pairwise scores $t^a\in \mathbb{R}^M$ for each ground-truth interaction $a^*$ by selecting the corresponding row from $S\odot Z$ . The pseudo label for the pair with the highest score is assigned as 1, i.e., $m_a = \arg \max_{m}t^a$ and $b_{m_a} = 1$ . We only select one positive pair3 for each $a^*$ . Finally, $\mathcal{L}_b$ is defined as a binary cross-entropy loss: $\mathcal{L}_b = \sum_m L_{BCE}(s_b^m,b_m)$ , where $s_b^m$ is the relatedness score for the $m$ -th pair. + +3We also explore top-K selection in Appendix F + +Table 1: mAP comparison on HICO-DET and V-COCO test set. - denotes the results are not available. * stands for the method we re-evaluate with the correct evaluation protocol (see Appendix.I for details) and †means our re-implementation. For V-COCO, all object detectors are pretrained on MSCOCO dataset by default, and details about the evaluation metrics APS1&2 c.f. Appendix H. IN-1K denotes ImageNet with 1000 classes. + +
MethodsBackboneDetectorHICO-DET (%)V-COCO (%)
FullRareNon-RareAProleAProle
supervised
iCAN (Gao et al., 2018)RN50 (IN-1K&COCO)FRCNN (COCO)14.8410.4516.1545.3052.40
PMFNet (Wan et al., 2019)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.4615.5618.0052.00-
TIN (Li et al., 2019b)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.2213.5118.3247.8054.20
DJ-RN (Li et al., 2020a)RN50 (IN-1K&COCO)FRCNN (COCO)21.3418.5321.1853.3060.30
IDN (Li et al., 2020b)RN50 (IN-1K&COCO)FRCNN (HICO-DET)26.2922.6127.3953.3060.30
SCG (Zhang et al., 2021c)RN50-FPN (IN-1K&HICO-DET)FRCNN (HICO-DET)31.3324.7233.3154.2060.90
HOTR (Kim et al., 2021)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)25.1017.3427.4255.2064.40
QPIC (Tamura et al., 2021)RN101+Transformer (IN-1K&COCO)DETR (COCO)29.9023.9231.6958.3060.70
CATN (Dong et al., 2022)RN50+Transformer (IN-1K&HICO-DET&COCO)DETR (HICO-DET)31.8625.1533.8460.10-
MSTR (Kim et al., 2022)RN50 + Transformer (IN-1K&COCO)DETR(HICO-DET)31.1725.3133.9262.0065.20
DisTr (Zhou et al., 2022)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)31.7527.4533.0366.2068.50
SSRT (Iftekhar et al., 2022)R101+Transformer (IN-1K&COCO)DETR (COCO)31.3424.3133.3265.0067.10
GEN-VLKT (Liao et al., 2022)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)34.9531.1836.0863.5865.93
between supervised & weakly-supervised setting, learning with image-level HOIs and box annotations
AlignFormer (Kilickaya & Smeulders, 2021)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)20.8518.2321.6415.8216.34
weakly-supervised
Explanation-HOI* (Baldassarre et al., 2020)ResNeXt101 (IN-1K&COCO)FRCNN (COCO)10.638.7111.20--
MX-HOI (Kumaraswamy et al., 2021)RN101 (IN-1K&COCO)FRCNN (COCO)16.1412.0617.50--
PPR-FCN† (Zhang et al., 2017)RN50 (CLIP dataset)FRCNN (COCO)17.5515.6918.41--
oursRN50 (CLIP dataset)FRCNN (COCO)22.8922.4123.0342.9748.06
oursRN101 (CLIP dataset)FRCNN (COCO)25.7024.5226.0544.7449.97
+ +# 4 EXPERIMENTS + +# 4.1 EXPERIMENTAL SETUP + +Datasets: We benchmark our model on two public datasets: HICO-DET and V-COCO. HICO-DET consists of 47776 images (38118 for training and 9658 for test). It has $N = 600$ HOI categories, which are composed of $C = 80$ common objects (the same as MSCOCO (Lin et al., 2014)) and $A = 117$ unique interaction categories. V-COCO is a subset of MSCOCO, consisting of 2533 images for training, 2867 for validation and 4946 for test. It has 16199 human instances, each annotated with binary labels for $A = 26$ interaction categories. + +Evaluation Metric: Following (Chao et al., 2015), we use mean average precision (mAP) to evaluate HOI detection performance. A human-object pair is considered as positive when both predicted human and object boxes have at least 0.5 IoU with their ground-truth boxes, and the HOI class is classified correctly. + +# 4.2 IMPLEMENTATION DETAILS + +We use an off-the-shelf Faster R-CNN (Ren et al., 2015) pretrained on MSCOCO to generate at most 100 object candidates for each image. For V-COCO, it is worth noting that we train the object detector by removing the images in MSCOCO that overlap with V-COCO to prevent information leakage. The backbone network is initialized with the visual encoder from CLIP-RN101 model and the feature dimension $D = 1024$ . + +For model learning, we set the detection score weight $\gamma = 2.8$ as default by following previous works (Zhang et al., 2021c; Li et al., 2019b), then optimize the entire network with AdamW and an initial learning rate of 1e-5 for backbone parameters and 1e-4 for others. We detach the parameters of the knowledge bank on the local branch for better model learning. We train up to 60K iterations with batch-size 24 in each on 4 NVIDIA 2080TI GPUs, and decay the learning rate by 10 times in 12K and 24K iteration. + +# 4.3 QUANTITATIVE RESULTS + +For HICO-DET (Tab.1), our approach outperforms the previous state of the arts on the weakly supervised setting by a clear margin, achieving 22.89 mAP with ResNet-50 and 25.70 mAP with ResNet-101 as backbone. For a fair comparison, we also re-implement PPR-FCN with CLIP visual encoder. The results show that we still outperform PPR-FCN by a sizeable margin, which validates the superiority of our framework. Besides, we even perform comparably with HOTR and IDN under an inferior experimental setting where HOTR adopts a more advanced transformer encoder-decoder architecture, and both methods are trained with strong supervision. Furthermore, the mAP gap between Rare (training annotations $< 10$ ) and Non-rare HOI classes in our results is much smaller than other methods, demonstrating the superior generalization capability of our HOI representation for solving the long-tailed distribution issue. In detail, we achieve a 0.62 mAP gap with ResNet-50 + +Table 2: Ablation study on HICO-DET dataset. "RN50-FPN(COCO)" denotes the backbone initialized with Faster R-CNN parameters pretrained on MSCOCO dataset while "CLIP RN50" stands for the backbone initialized with CLIP visual encoder. Besides, we construct the knowledge bank $\mathcal{W}_T$ with random initialization, or computing HOI prompts by RoBERTa or CLIP text transformer. + +
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
baselineCLIP RN50-----19.5216.5820.40
Exp 1CLIP RN50CLIP Text---20.3118.3420.90
Exp 2CLIP RN50CLIP Text✓ (freeze WT)---20.0918.2320.64
Exp 3CLIP RN50CLIP Text--20.8618.4021.60
Exp 4CLIP RN50CLIP Text-22.4020.7022.90
Exp 5CLIP RN50----19.8817.4520.61
Exp 6CLIP RN50CLIP Text--20.7519.3821.16
Exp 7CLIP RN50CLIP Text-21.5320.0521.97
oursCLIP RN50CLIP Text22.8922.4123.03
Exp 8RN50-FPN (COCO)-----19.4416.2020.41
Exp 9RN50-FPN (COCO)random19.6115.5720.82
Exp 10RN50-FPN (COCO)RoBERTa20.4516.4621.65
+ +and 1.53 with ResNet-101 backbone, which is much smaller than AlignFormer (3.14) and PPR-FCN (2.64), and supervised methods SSRT (9.01) and GEN-VLKT (4.9). + +For V-COCO dataset, we report the performance of $\mathrm{AP}_{role}$ in both scenario1 and scenario2 for a complete comparison, which are 42.97 / 48.06 $\mathrm{AP}_{role}$ with ResNet-50 and 44.74 / 49.97 $\mathrm{AP}_{role}$ with ResNet-101 as backbone. As shown in Tab.1, our model achieves significant improvement compared with AlignFormer, and even is comparable with supervised methods TIN and iCAN. + +# 4.4 ABLATION STUDY + +In this section, we mainly validate the effectiveness of each component with detailed ablation studies on HICO-DET dataset. We use ResNet-50 as the backbone network to reduce experimental costs. + +**Baseline:** The baseline adopts the visual encoder from CLIP-RN50 to generate the vanilla HOI representation $v_{p}$ , which is directly used to predict the interaction scores $s_{p}$ . Only pairwise interaction classification loss $\mathcal{L}_{p}$ is used for model learning. + +HOI recognition: We augment the baseline with a HOI recognition network and observe the full mAP improves from 19.52 to 20.41, as reported in Exp 1 of Tab. 2. It suggests that the learnable knowledge bank $\mathcal{W}_T$ serves as a powerful classifier to perform image-level HOI recognition and update the visual encoder for better HOI representation. We visualize the learned parameters of knowledge bank in Appendix D to demonstrate its effectiveness. Furthermore, as in Exp 2, the performance slightly decreases from 20.31 to 20.09 when we freeze the training of the knowledge bank, indicating that joint learning of visual features and the knowledge bank is more appropriate for HOI detection. + +Knowledge Transfer Network (KTN): KTN explicitly transfers the CLIP meta-knowledge to pairwise HOI features. As a result, it contributes 0.55 Full mAP improvement (Exp 3 v.s. Exp 1) and most of the performance gains come from Non-rare classes. This result shows KTN is capable of extracting discriminative features from the relational knowledge bank to our HOI representation. We also study the effectiveness of the attention mechanism of KTN in Appendix E. + +Score fusion: In Tab. 2, we largely improve the Full mAP from 20.86 (Exp 3) to 22.40 (Exp 4) by fusing the global HOI scores $s_g$ to pairwise interaction score $s_p$ . As the HOI recognition network seamlessly inherits the visual-linguistic features from CLIP and directly adopts image labels as supervision, the global interaction scores are pretty accurate and largely enhance the pairwise scores, demonstrating its strong capabilities to cope with long-tailed and fine-grained HOI recognition. + +Self-taught Relatedness Classification (SRC): Self-taught classification aims to identify the relatedness between human and objects. The improvements from Exp 4 to ours show the effectiveness of our self-taught strategy, which is capable of figuring out the irrelevant human-object pairs and suppressing their interaction scores during inference. + +Combining KTN & SRC: The ablation results of Exp 5-7 in Tab. 2 show the KTN and SRC are able to facilitate each other. In detail, the SRC obtains 0.49 Full mAP improvement when the KTN is introduced (ours v.s. Exp 4), which is only 0.36 without KTN (Exp 5 v.s. baseline). Similarly, + +![](images/3c4dc5eb76fc4028aefbf775852ee762e247e291481c548a837a8639ba17ecdc.jpg) +(a) +wash_motorcycle +ours: 0.18, 0.355 +baseline: 0.0189 + +![](images/64d18385f4e7e147826a9c9ded9896f1574f4ad51300b8e50043ecca7c12edd0.jpg) +hold_horse:0.062,0.397,0.998 ride_horse:0.405,0.966,0.998 + +![](images/9df9bcc5f1461c0846c4218ef4173e3faefd7d6edad70696dd7e72628870ebc0.jpg) +(c) +sit_on_motorcycle: 0.515, 0.033, 0.950 + +![](images/3d058fc7e928eaacd2f473320fb34d4c09870479628f97693de6ed7388ecbfea.jpg) +(d) +sit_at_dining_table: 0.006, 0.993, 0.079 +sit_at_dining_table: 0.232, 0.993, 0.994 + +![](images/31a3d121ff52f3113f31974f177930970ca7ec73ecd4e03889b2f475115f9c2c.jpg) +paint_fire_hydrant: +ours: 0.203, 0.505, 0.955 +baseline: 0.0027 +Figure 3: Visualization of HOI detection results on HICO-DET test set. Red scores denote the negative HOI predictions. We mainly demonstrate the model's capabilities on four aspects: (a) coping with imbalanced HOI distribution; (b) distinguishing subtle differences among interaction types; (c) suppressing background HOI classes, and (d) pruning irrelevant human-object associations. The numbers reported are normalized pairwise interaction score, global HOI score and relatedness score. + +![](images/192708158f64b8c7105ece6aeedbf1e1b24fd0ec539a1403e13410348bb7f329.jpg) +repair truck: 0.23, 0.055, 0.979 +inspect truck: 0.48, 0.138, 0.979 + +![](images/5579212847887188404267a0686fd5fb59b35064bcc9bf3ca9fa886fb6aa1cfd.jpg) +stand_on_skateboard: 0.009, 0.001, 0.98 + +![](images/acddb4ab24f2368acd17d4d546335723573c1d65e5ad4a799246606a28382d6a.jpg) +hold_kite:0.039,0.892,0.238 hold_kite:0.478,0.892,0.995 + +the KTN contributes 0.78 Full mAP improvement with SRC (Exp 7 v.s. Exp 6), which is only 0.55 without SRC (Exp 3 v.s. Exp 1). + +Parameter initialization: Our visual encoder and knowledge bank are both initialized from CLIP. We also explore different parameter initialization strategy in Exp 8-10. Specifically, we initialize the visual encoder with a ResNet50-FPN pretrained on COCO detection task for the baseline (Exp 8), and the knowledge bank with random parameters (Exp 9) or embeddings of HOI labels from RoBERTa model (Exp 10) for the final model. We observe severe drops with all these initialization methods compared with ours, demonstrating the effectiveness and generalization ability of CLIP model. It is worth noting that the mAP of Rare classes decreases from 16.20 in Exp 8 to 15.57 in Exp 9, which suggests the randomly initialized knowledge bank even aggravates the imbalance issue in final model. + +# 4.5 QUALITATIVE RESULTS + +We show some qualitative results of our method in Fig.3. For each HOI prediction, we report (i) normalized pairwise interaction score, (ii) global HOI score and (iii) relatedness score for ours, and only pairwise interaction score for baseline. In Fig.3(a), ours interaction scores are more confident than baseline in Rare HOI classes, demonstrating the generalization ability of our CLIP-guided HOI representation. Besides, when incorporating relational knowledge bank into pairwise HOI representation, our method is capable of distinguishing the subtle differences among similar HOIs in Fig.3(b) (e.g., repair_truck:0.23 v.s. inspect_truck:0.48 in the bottom figure). Moreover, in Fig.3(c), the global branch suppresses background HOIs by predicting low global scores for them (e.g., the global HOI score is 0.033 for sit_onmotorcycle while the ground-truth is sit_on_bicycle). Finally, in Fig.3(d), our self-taught relatedness classification strategy shows strong capability at recognizing the ambiguous human-object associations (e.g., 0.079 v.s. 0.994 in the upper figure). + +# 5 CONCLUSION + +In this paper, we propose a bi-level knowledge integration strategy that incorporates the prior knowledge from CLIP for weakly-supervised HOI detection. Specifically, we exploit CLIP textual embeddings of HOI labels as a relational knowledge bank, which is adopted to enhance the HOI representation with an image-wise HOI recognition network and a pairwise knowledge transfer network. We further propose the addition of a self-taught binary pairwise relatedness classification loss to overcome ambiguous human-object association. Finally, our approach achieves the new state of the art on both HICO-DET and V-COCO benchmarks under the weakly supervised setting. + +# ACKNOWLEDGEMENT + +We acknowledge funding from Flemish Government under the Onderzoeksprogramma Artificiele Intelligentie (AI) Vlaanderen programme, Shanghai Science and Technology Program 21010502700 and Shanghai Frontiers Science Center of Human-centered Artificial Intelligence. + +# ETHICS STATEMENT + +Hereby, we consciously assure that our study is original work which has not been previously published elsewhere, and is not currently being considered for publication elsewhere. We do not have ethics risks as mentioned in the author guidelines. + +# REPRODUCIBILITY STATEMENT + +We use publicly available benchmarks, HICO-DET and V-COCO, to validate our method. Code is available at https://github.com/bobwan1995/Weakly-HOI. + +# REFERENCES + +Federico Baldassarre, Kevin Smith, Josephine Sullivan, and Hossein Azizpour. Explanation-based weakly-supervised learning of visual relations with graph networks. In ECCV, 2020. +Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020. +Yu-Wei Chao, Zhan Wang, Yugeng He, Jiaxuan Wang, and Jia Deng. HICO: A benchmark for recognizing human-object interactions in images. In ICCV, 2015. +Yu-Wei Chao, Yunfan Liu, Xieyang Liu, Huayi Zeng, and Jia Deng. Learning to detect human-object interactions. In WACV, 2018. +Leizhen Dong, Zhimin Li, Kunlun Xu, Zhijun Zhang, Luxin Yan, Sheng Zhong, and Xu Zou. Category-aware transformer network for better human-object interaction detection. arXiv preprint arXiv:2204.04911, 2022. +Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. arXiv preprint arXiv:2203.14940, 2022. +Chen Gao, Yuliang Zou, and Jia-Bin Huang. ican: Instance-centric attention network for human-object interaction detection. In BMVC, 2018. +Chen Gao, Jiarui Xu, Yuliang Zou, and Jia-Bin Huang. Drg: Dual relation graph for human-object interaction detection. In ECCV, 2020. +Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Open-vocabulary image segmentation. arXiv preprint arXiv:2112.12143, 2021. +Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2021. +Saurabh Gupta and Jitendra Malik. Visual semantic role labeling. arXiv preprint arXiv:1505.04474, 2015. +Tanmay Gupta, Alexander Schwing, and Derek Hoiem. No-frills human-object interaction detection: Factorization, layout encodings, and training techniques. In ICCV, 2019. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. +Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In ICCV2017, 2017. + +Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 961-970, 2015. +ASM Iftekhar, Hao Chen, Kaustav Kundu, Xinyu Li, Joseph Tighe, and Davide Modolo. What to look at and where: Semantic and spatial refined transformer for detecting human-object interactions. arXiv preprint arXiv:2204.00746, 2022. +Maximilian Ilse, Jakub Tomczak, and Max Welling. Attention-based deep multiple instance learning. In ICML, pp. 2127-2136, 2018. +Mert Kilickaya and Arnold Smeulders. Human-object interaction detection via weak supervision. arXiv preprint arXiv:2112.00492, 2021. +Bumsoo Kim, Junhyun Lee, Jaewoo Kang, Eun-Sol Kim, and Hyunwoo J. Kim. Hotr: End-to-end human-object interaction detection with transformers. In CVPR, 2021. +Bumsoo Kim, Jonghwan Mun, Kyoung-Woon On, Minchul Shin, Junhyun Lee, and Eun-Sol Kim. Mstr: Multi-scale transformer for end-to-end human-object interaction detection. arXiv preprint arXiv:2203.14709, 2022. +Suresh Kirthi Kumaraswamy, Miaojing Shi, and Ewa Kijak. Detecting human-object interaction with mixed supervision. In WACV, 2021. +Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yan-Feng Wang, and Cewu Lu. Transferable interactiveness prior for human-object interaction detection. In CVPR, 2019a. +Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yanfeng Wang, and Cewu Lu. Transferable interactiveness knowledge for human-object interaction detection. In CVPR, 2019b. +Yong-Lu Li, Xinpeng Liu, Han Lu, Shiyi Wang, Junqi Liu, Jiefeng Li, and Cewu Lu. Detailed 2d-3d joint representation for human-object interaction. In CVPR, 2020a. +Yong-Lu Li, Xinpeng Liu, Xiaogqian Wu, Yizhuo Li, and Cewu Lu. Hoi analysis: Integrating and decomposing human-object interaction. In NeurIPS, 2020b. +Yue Liao, Aixi Zhang, Miao Lu, Yongliang Wang, Xiaobo Li, and Si Liu. Gen-vlkt: Simplify association and enhance interaction understanding for hoi detection. arXiv preprint arXiv:2203.13954, 2022. +Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. +Wen Liu, Weixin Luo, Dongze Lian, and Shenghua Gao Gao. Future frame prediction for anomaly detection - a new baseline. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. +Hitoshi Nishimura, Satoshi Komorita, Yasutomo Kawanishi, and Hiroshi Murase. Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. arXiv preprint arXiv:2106.14259, 2021. +Guansong Pang, Cheng Yan, Chunhua Shen, van den Hengel Anton, and Xiao Bai. Self-trained deep ordinal regression for end-to-end video anomaly detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020. +Alessandro Prest, Cordelia Schmid, and Vittorio Ferrari. Weakly supervised learning of interactions between humans and objects. IEEE TPAMI, 2011. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021a. + +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021b. +Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. arXiv preprint arXiv:1506.01497, 2015. +Masato Tamura, Hiroki Ohashi, and Tomoaki Yoshinaga. Qpic: Query-based pairwise human-object interaction detection with image-wide contextual information. In CVPR, 2021. +Tina, Anmol Kumar Sharma, Siddharth Tomar, and Kapil Gupta. Various approaches of human activity recognition: A review. In International Conference on Computing Methodologies and Communication(ICCMC), 2021. +Oytun Ulutan, A S M Iftekhar, and B. S. Manjunath. Vsgnet: Spatial attention network for detecting human object interactions using graph convolutions. In CVPR, 2020. +Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 2008. URL http://jmlr.org/papers/v9/vandermaaten08a.html. +Mrabti Wafae, Baibai Kaoutar, Bellach Benaissa, Oulad Haj Thami Rachid, and Tairi Hamid. Human motion tracking: A comparative study. Procedia Computer Science, 148:145-153, 2019. +Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In ICCV, 2019. +Aixi Zhang, Yue Liao, Si Liu, Miao Lu, Yongliang Wang, Chen Gao, and Xiaobo Li. Mining the benefits of two-stage and one-stage hoi detection. NeuIPS, 2021a. +Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Efficient two-stage detection of human-object interactions with a novel unary-pairwise transformer. arXiv preprint arXiv:2112.01838, 2021b. +Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Spatially conditioned graphs for detecting human-object interactions. In ICCV, 2021c. +Hanwang Zhang, Zawlin Kyaw, Jinyang Yu, and Shih-Fu Chang. Ppr-fcn: Weakly supervised visual relation detection via parallel pairwise r-fcn. In ICCV, 2017. +Desen Zhou, Zhichao Liu, Jian Wang, Leshan Wang, Tao Hu, Errui Ding, and Jingdong Wang. Human-object interaction detection via disentangled transformer. arXiv preprint arXiv:2204.09290, 2022. +Penghao Zhou and Mingmin Chi. Relation parsing neural network for human-object interaction detection. In ICCV, 2019. +Tianfei Zhou, Wenguan Wang, Siyuan Qi, Haibin Ling, and Jianbing Shen. Cascaded human-object interaction recognition. In CVPR, 2020. + +# APPENDIX + +In this appendix, we first describe the spatial feature generation, and then supplement more experimental results of different CLIP knowledge integration strategies for weakly-supervised HOI detection. For Explanation-HOI (Baldassarre et al., 2020), we further clarify the difference between their mAP evaluation protocol and the standard one. Finally, we demonstrate the limitations, potential negative societal impacts as well as the result error bars of our method. + +# A THE ADVANTAGE OF OUR HOI REPRESENTATION + +To verify the improvement obtained with our CLIP-based HOI representation, we visualize the HOI representation $\hat{v}_p$ in feature space with t-SNE(van der Maaten & Hinton, 2008). For clarity, we randomly sample 80 HOI categories, and collect 50 samples for each category. For comparison, we also demonstrate the object-based HOI representation derived from 'Exp 9' in Tab.2 (i.e., the model without CLIP knowledge and using a random knowledge bank). As shown in Fig.4, we observe that CLIP-based HOI representations for different HOI categories are diverse and well separated in feature space, which is better for HOI detection. In contrast, the object-based representations are not well separated in feature space (see the red box region in Fig.4b). Besides, the experimental results in the ablation study (ours v.s. 'Exp 9') also validate the advantage of CLIP-based HOI representation, improving full mAP from 19.61 to 22.89. + +# B ABLATION ON CLIP KNOWLEDGE INTEGRATION + +To further demonstrate the superiority of our CLIP knowledge integration strategy, we study several proven techniques for CLIP knowledge transfer in Tab. 3. In $Abl1$ , for each human-object pair, we directly infer the HOI scores with CLIP by computing the cross-modal similarities between their visual union region and the HOI prompts. Without introducing any HOI priors, the promising results indicate the powerful generalization ability of CLIP and motivate the design of incorporating CLIP knowledge for weakly-supervised HOI detection. In $Abl2$ , we duplicate the experiment setting and results from $Exp8$ in Tab. 2 of the main paper. It is a simplified baseline model but initializes the visual encoder with a ResNet50-FPN pretrained on COCO detection task. Then we introduce three different CLIP knowledge transfer strategies (Abl 3-4 and ours) based on $Abl2$ . + +In Abl 3, we directly enhance baseline scores in Abl 2 with the CLIP similarity scores in Abl 1 on the inference stage. Without bells and whistles, we obtain 1.12 gain in Full mAP. + +Furthermore, in Abl 4, we adopt a similar knowledge transfer strategy as GEN-VLKT (Liao et al., 2022), where we initialize the HOI classifier $\mathcal{F}_P$ with HOI prompt and regularize the global HOI representation with CLIP image feature $v_{g}$ . In detail, we first compute the global HOI representation $v_{mean}$ with mean pooling on all pairwise HOI representations, i.e., $v_{mean} = MeanPool(\{v_p^m\}_{m=1}^M)$ . Here $v_p^m$ is the holistic HOI representation (c.f. Sec. 3.2.3 in the main paper) for $m$ -th human-object pair. Then we develop an additional $L2$ loss $\mathcal{L}_{reg}$ to transfer the knowledge from CLIP to HOI representations: $\mathcal{L}_{reg} = L2(v_{mean}, v_g)$ . The performance even decreases slightly from 19.44 to 19.39, which might be caused by the incompatibility of parameters between backbone network (ResNet50-FPN pretrained on COCO) and $\mathcal{F}_P$ (HOI prompt embeddings from CLIP). When directly applying the knowledge transfer strategy of GEN-VLKT to a weakly-supervised setting, it is difficult to map the unmatched HOI representation and classification weights to a joint space as the supervisory signals are noisy. + +Finally, our approach achieves the best performance compared with other strategies, demonstrating the effectiveness of our bi-level knowledge integration strategy. + +# C SPATIAL FEATURE GENERATION + +Following (Zhang et al., 2021c), we generate the spatial feature $v_{sp} \in \mathbb{R}^{D}$ for each pair of human-object proposals $(\mathbf{x}_h, \mathbf{x}_o)$ . Specifically, we first compute the bounding boxes information for $\mathbf{x}_h$ and $\mathbf{x}_o$ separately, including their center coordinates, widths, heights, aspect ratios and areas, all + +![](images/ec0d2877c25887ff4609d620312019b28f059c4f6a7a3f591043ace767320b71.jpg) +Figure 4: The t-SNE visualization of CLIP-based HOI representation and object-based HOI representation. + +![](images/9c33688555a1473b9211dc245403d73ceed296cf44e1d2b567f377ee0add3094.jpg) + +Table 3: Ablation of different CLIP knowledge integration strategies on HICO-DET dataset. + +
MethodsExperimental settingmAP (%)
FullRareNon-Rare
Abl 1CLIP inference score11.8413.7211.27
Abl 2RN50-FPN (COCO) + FP random init.19.4416.2020.41
Abl 3RN50-FPN (COCO) + FP random init. + CLIP inference score20.5618.1921.27
Abl 4RN50-FPN (COCO) + FP HOI prompt init. + CLIP visual regularization19.3915.1220.66
oursCLIP RN50 + HOI recognition + KTN + self-taught relatedness cls.22.8922.4123.03
+ +normalized by the corresponding dimension of the image. We also encode their relative spatial relations by estimating the intersection over union (IoU), a ratio of the area of $\mathbf{x}_h$ and $\mathbf{x}_o$ , a directional encoding and the distance between center coordinates of $\mathbf{x}_h$ and $\mathbf{x}_o$ . We concatenate all the above-mentioned preliminary spatial cues and obtain a spatial encoding $\mathbf{p} \in \mathbb{R}_{+}^{18}$ . To encode the second and higher order combinations of different terms, the spatial encoding is concatenated with its logarithm and then embedded to $v_{sp}$ : $v_{sp} = \mathcal{F}_{sp}([p; \log(p + \epsilon)])$ . Where $\epsilon > 0$ is a small constant to guarantee the numerical stability, and $\mathcal{F}_{sp}$ is a multi-layer fully connected network. + +# D VISUALIZATION OF HOI KNOWLEDGE BANK $\mathcal{W}_T$ + +To further understand $\mathcal{W}_T$ , we visualize the knowledge bank features initialized by CLIP (Fig.5(a)) and learned from scratch (Fig.5(b)) in feature space by t-SNE. It is worth noting that the knowledge bank learned from scratch is derived from 'Exp 9' in Tab.2. As shown in Fig.5, we observe that the knowledge features of HOI classes initialized with CLIP are more discriminative than random initialized, and show a better clustering result (e.g. the HOI classes in red box regions). + +# E DIFFERENT DESIGNS OF KTN + +To further validate the effectiveness of our attention mechanism in KTN, we compare our design with some variants in Tab. 4. First of all, we directly encode the relation-level features within the union region to enhance the pairwise representation rather than the external knowledge bank. As a result, the mAP even decreases a little bit from 20.75 (Exp 6) to 20.69 (Exp 11). The potential reason is that the union region contains more ambiguous visual relations and background clutters, which are difficult to learn in a weak setting. Besides, we also explore different normalization strategies in KTN. The results in Tab. 4 demonstrate that Softmax operation (ours) performs better than uniform attention (Exp 12) or Sigmoid operation (Exp 13), indicating our attention mechanism is non-trivial and more effective on aggregating the relational cues from HOI knowledge bank. + +![](images/2e8f35e8efe169ddf8e8f37e0ef00a2cb7d7406a4aefb85cf71fe3778f5da5f5.jpg) +Figure 5: The t-SNE visualization of knowledge bank $\mathcal{W}_T$ . (a) is the knowledge bank distribution in feature space based on our CLIP-based HOI representation while (b) is the knowledge bank learned from scratch (the model in Tab.2-Exp 9) based on object-based HOI representation. + +Table 4: Different network design of Knowledge Transfer Network (KTN). + +
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
Exp 11CLIP RN50CLIP Text✓ (union)-20.6919.5521.04
Exp 12CLIP RN50CLIP Text✓ (uniform)-21.1419.8221.53
Exp 13CLIP RN50CLIP Text✓ (sigmoid)-21.2819.2721.88
oursCLIP RN50CLIP Text-21.5320.0521.97
+ +# F TOP-K POSITIVE PAIR SELECTION FOR SRC + +In this section we show the results of selecting top-2 and top-5 pairs as positive in Tab. 5. We notice that there is a small performance drop, which is likely to be caused by mislabeling more negative pairs as positive, resulting in model learning with more noise. + +# G THE PROMPT GENERATION FOR V-COCO + +For the V-COCO dataset, each action has two different semantic roles ('instrument' and 'object') for different objects, like 'cut cake' and 'cut with knife'. We use two different prompt templates to convert a HOI label to a language sentence. For the former one, we take template "a person verb a/an object", and use "a person verb with object" for the latter. + +# H EVALUATION METRIC FOR V-COCO + +V-COCO dataset has two scenarios for role AP evaluation. In Tab. 1, APS1&2 refer to 'Average Precision in scenario 1&2'. V-COCO dataset has two different annotations for HOIs: the first is a + +Table 5: Ablation of top-K positive pair selection for SRC on HICO-DET dataset. + +
MethodsmAP (%)
FullRareNon-Rare
Top-522.4521.6122.70
Top-222.4921.8322.69
ours (Top-1)22.8922.4123.03
+ +![](images/5ea9aca00229807cfcbed7f05dd2e53f142325eae021f722c0c643c640bf02ee.jpg) +(a) Evaluation protocol in Explanation-HOI + +![](images/b1ffa02461492b359ad767aae489aa3222ac27a003e81f6fd5b1f59f8392a3fe.jpg) +(b) The correct evaluation protocol +Figure 6: The screenshot of the evaluation code in Explanation-HOI. (a) is the original code while (b) is the correct one based on the standard evaluation code. We use red rectangle boxes to highlight the most important differences + +full label of (human location, interaction type, object location, object type), and the second misses target object (also denoted as 'role' in the original paper (Gupta & Malik, 2015)) annotations, and the label only includes (human location, interaction type). For the second case, there are two different evaluation protocols (scenarios) when taking a prediction as correct $^4$ : In scenario 1, it requires the interaction is correct & the overlap between the human boxes is $> 0.5$ & the corresponding role is empty, which is more restricted; in scenario 2, it only requires the interaction is correct & the overlap between the person boxes is $> 0.5$ . + +# I EVALUATION OF EXPLANATION-HOI + +The Explanation-HOI (Baldassarre et al., 2020) has a misunderstanding of mAP evaluation protocol. As shown in Fig.6(a) L200-L205, the Explanation-HOI only takes some specific predicted HOIs into the evaluation process, which has the same HOI labels as groundtruth HOIs. Thus, they ignore lots of false-positive HOI predictions when calculating mAP, leading to an untrustable high mAP score (reported in their original paper). In Fig.6(b) L204-L208, we evaluate all predicted HOIs, which is the same as the standard evaluation protocol proposed in HICO-DET (Chao et al., 2015). The correct results have already been reported in Tab.1 in the main paper. + +# J LIMITATIONS + +As described in Sec. 3.1, we adopt an external object detector to generate human-object proposals and then recognize their interactions. Consequently, our method is faced with two limitations brought by erroneous object detection results. Firstly, the positive human-object pairs are not recalled if the human or object proposals are not detected. Secondly, the proposals are kept fixed during learning, which leads to the problem of inaccurate localization and object types. + +# K RISK OF USING CLIP + +For all the methods that adopt CLIP in their model design, there is a potential risk of data leakage as CLIP has seen quite a lot of data during pretraining. For HOI detection task, we cannot get access to CLIP dataset and do not know the exact overlap between CLIP and HOI benchmarks (i.e., HICO-DET and V-COCO), we carefully read Sec. 5 (Data Overlap Analysis) of the CLIP paper (Radford et al., 2021b), including an analysis of the overlap between its dataset with 35 popular datasets (HICO-DET and V-COCO are not included). It shows the overlap is small (median is $2.2\%$ and average is $3.2\%$ ) and the influence is limited ("overall accuracy is rarely shifted by more than $0.1\%$ with only 7 datasets above this threshold"). Besides, the training text accompanying an image in the CLIP dataset is often not related to the HOI annotations. Thus, we think the risk is limited. + +# L LICENSE + +The licenses of the assets used in our work are listed below, including open-sourced CLIP model, HICO-DET dataset, and V-COCO dataset. As for HICO-DET, we cannot find its license in the paper and the official project page. Thus we provide the official project page instead here for clarity. + +1. CLIP: https://github.com/openai/CLIP MIT License +2. VCOCO: https://github.com/s-gupta/v-coco/MIT License +3. HICO-DET: http://www-personal.umich.edu/ ywchao/hico/ No license \ No newline at end of file diff --git a/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/images.zip b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..a3f8a1ea8cd50f977c21ea139d732aa93a22f5f7 --- /dev/null +++ b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8743b72b5330c10a748f4d39b6b85416151fdcd6e6429b35955fd049a880da53 +size 769759 diff --git a/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/layout.json b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..52a2aadfabafdde86977ec97050791da52825d1e --- /dev/null +++ b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/layout.json @@ -0,0 +1,12731 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "type": "text", + "content": "WEAKLY-SUPERVISED HOI DETECTION VIA PRIOR-GUIDED BI-LEVEL REPRESENTATION LEARNING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "spans": [ + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": "Bo Wan " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{1,*}" + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": ", Yongfei Liu " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": ", Desen Zhou " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": ", Tinne Tuytelaars " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": ", Xuming He " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{2,3}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "spans": [ + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "text", + "content": " KU Leuven, Leuven, Belgium; " + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "text", + "content": " ShanghaiTech University, Shanghai, China \n" + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "text", + "content": " Shanghai Engineering Research Center of Intelligent Vision and Imaging {bwan, tinne.tuytelaars}@esat.kuleuven.be {liuyf3,zhouds,hexm}@shanghaitech.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 206, + 335, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 206, + 335, + 217 + ], + "spans": [ + { + "bbox": [ + 276, + 206, + 335, + 217 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 224, + 471, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 224, + 471, + 378 + ], + "spans": [ + { + "bbox": [ + 140, + 224, + 471, + 378 + ], + "type": "text", + "content": "Human object interaction (HOI) detection plays a crucial role in human-centric scene understanding and serves as a fundamental building-block for many vision tasks. One generalizable and scalable strategy for HOI detection is to use weak supervision, learning from image-level annotations only. This is inherently challenging due to ambiguous human-object associations, large search space of detecting HOIs and highly noisy training signal. A promising strategy to address those challenges is to exploit knowledge from large-scale pretrained models (e.g., CLIP), but a direct knowledge distillation strategy (Liao et al., 2022) does not perform well on the weakly-supervised setting. In contrast, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge at both image level and HOI instance level, and adopt a self-taught mechanism to prune incorrect human-object associations. Experimental results on HICO-DET and V-COCO show that our method outperforms the previous works by a sizable margin, showing the efficacy of our HOI representation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 394, + 206, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 394, + 206, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 394, + 206, + 406 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 414, + 506, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 414, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 104, + 414, + 506, + 470 + ], + "type": "text", + "content": "Human object interaction detection aims to simultaneously localize the human-object regions in an image and to classify their interactions, which serves as a fundamental building-block in a wide range of tasks in human-centric artificial intelligence, such as human activity recognition (Heilbron et al., 2015; Tina et al., 2021), human motion tracking (Wafae et al., 2019; Nishimura et al., 2021) and anomalous behavior detection (Liu et al., 2018; Pang et al., 2020)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "content": "Usually, HOI detection adopts a supervised learning paradigm (Gupta & Malik, 2015; Chao et al., 2018; Wan et al., 2019; Gao et al., 2020; Zhang et al., 2021c). This requires detailed annotations (i.e. human and object bounding boxes and their interaction types) in the training stage. However, such HOI annotations are expensive to collect and prone to labeling errors. In contrast, it is much easier to acquire image-level descriptions of target scenes. Consequently, a more scalable strategy for HOI detection is to learn from weak annotations at the image level, known as weakly-supervised HOI detection (Zhang et al., 2017). Learning under such weak supervision is particularly challenging mainly due to the lack of accurate visual-semantic associations, large search space of detecting HOIs and highly noisy training signal from only image level supervision." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 579, + 506, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 712 + ], + "type": "text", + "content": "Most existing works (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) attempt to tackle the weakly-supervised HOI detection in a Multiple Instance Learning (MIL) framework (Ilse et al., 2018). They first utilize an object detector to generate human-object proposals and then train an interaction classifier with image-level labels as supervision. Despite promising results, these methods suffer from several weaknesses when coping with diverse and fine-grained HOIs. Firstly, they usually rely on visual representations derived from the external object detector, which mainly focus on the semantic concepts of the objects in the scene and hence are insufficient for capturing the concept of fine-grained interactions. Secondly, as the image-level supervision tends to ignore the imbalance in HOI classes, their representation learning is more susceptible to the dataset bias and dominated by frequent interaction classes. Finally, these methods learn the HOI concepts from a candidate set generated by pairing up all the human and object proposals, which is highly noisy and often leads to erroneous human-object associations for many interaction classes." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 436, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 436, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 436, + 732 + ], + "type": "text", + "content": "*Equal Contribution. Code is available at https://github.com/bobwan1995/Weakly-HOI." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": "To address the aforementioned limitations, we introduce a new weakly-supervised HOI detection strategy. It aims to incorporate the prior knowledge from pretrained foundation models to facilitate the HOI learning. In particular, we propose to integrate CLIP (Radford et al., 2021b), a large-scale vision-language pretrained model. This allows us to exploit the strong generalization capability of the CLIP representation for learning a better HOI representation under weak supervision. Compared to the representations learned by the object detector, the CLIP representations are inherently less object-centric, hence more likely to incorporate also aspects about the human-object interaction, as evidenced by Appendix A. Although a few works have successfully exploited CLIP for supervised HOI detection in the past, experimentally we find they do not perform well in the more challenging weakly-supervised setting (c.f. Appendix.B). We hypothesize this is because they only transfer knowledge at image level, and fail without supervision at the level of human-object pairs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "content": "To this end, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge of HOIs at two different levels. First, at the image level, we utilize the visual and linguistic embeddings of the CLIP model to build a global HOI knowledge bank and generate image-level HOI predictions. In addition, for each human-object pair, we enrich the region-based HOI features by the HOI representations in the knowledge bank via a novel attention mechanism. Such a bi-level framework enables us to exploit the image-level supervision more effectively through the shared HOI knowledge bank, and to enhance the interaction feature learning by introducing the visual and text representations of the CLIP model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 302, + 506, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 302, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 506, + 402 + ], + "type": "text", + "content": "We instantiate our bi-level knowledge integration strategy as a modular deep neural network with a global and local branch. Given the human-object proposals generated by an off-the-shelf object detector, the global branch starts with a backbone network to compute image feature maps, which are used by a subsequent HOI recognition network to predict the image-wise HOI scores. The local branch builds a knowledge transfer network to extract the human-object features and augment them with the CLIP-guided knowledge bank, followed by a pairwise classification network to compute their relatedness and interaction scores1. The relatedness scores are used to prune incorrect human-object associations, which mitigates the issue of noisy proposals. Finally, the outputs of the two branches are fused to generate the final HOI scores." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "type": "text", + "content": "To train our HOI detection network with image-level annotations, we first initialize the backbone network and the HOI knowledge bank from the CLIP encoders, and then train the entire model in an end-to-end manner. In particular, we devise a novel multi-task weak supervision loss consisting of three terms: 1) an image-level HOI classification loss for the global branch; 2) an MIL-like loss for the interaction scores predicted by the local branch, which is defined on the aggregate of all the human-object pair predictions; 3) a self-taught classification loss for the relatedness of each human-object pair, which uses the interaction scores from the model itself as supervision." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "type": "text", + "content": "We validate our methods on two public benchmarks: HICO-DET (Chao et al., 2018) and V-COCO (Gupta & Malik, 2015). The empirical results and ablative studies show our method consistently achieves state-of-the-art performance on all benchmarks. In summary, our contributions are three-fold: (i) We exploit the CLIP knowledge to build a prior-enriched HOI representation, which is more robust for detecting fine-grained interaction types and under imbalanced data distributions. (ii) We develop a self-taught relatedness classification loss to alleviate the problem of mis-association between human-object pairs. (iii) Our approach achieves state-of-the-art performance on the weakly-supervised HOI detection task on both benchmarks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 586, + 216, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 216, + 598 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 216, + 598 + ], + "type": "text", + "content": "2 RELATED WORKS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 602, + 507, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 602, + 507, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 507, + 703 + ], + "type": "text", + "content": "HOI detection: Most works on supervised HOI detection can be categorized in two groups: two-stage and one-stage HOI detection. Two-stage methods first generate a set of human-object proposals with an external object detector, then classify their interactions. They mainly focus on exploring additional human pose information (Wan et al., 2019; Li et al., 2020a; Gupta et al., 2019), pairwise relatedness (Li et al., 2019a; Zhou et al., 2020) or modeling relations between object and human (Gao et al., 2020; Zhang et al., 2021c; Ulutan et al., 2020; Zhou & Chi, 2019), to enhance the HOI representations. One-stage methods predict human & object locations and their interaction types simultaneously in an end-to-end manner, which are currently dominated by transformer-based architectures (Carion et al., 2020; Kim et al., 2022; Dong et al., 2022; Zhang et al., 2021a;b)." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "inline_equation", + "content": "{}^{1}" + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "text", + "content": " Relatedness indicates whether a human-object pair has a relation, and interaction scores are multi-label scores on the interaction space." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 125, + 81, + 483, + 258 + ], + "blocks": [ + { + "bbox": [ + 125, + 81, + 483, + 258 + ], + "lines": [ + { + "bbox": [ + 125, + 81, + 483, + 258 + ], + "spans": [ + { + "bbox": [ + 125, + 81, + 483, + 258 + ], + "type": "image", + "image_path": "e651bacab4fc1cd655ae1937f2758bab90cffc62a4962ef321be81b8fb18d4d7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 262, + 504, + 283 + ], + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 283 + ], + "type": "text", + "content": "Figure 1: Model Overview: There are four modules in our network: a backbone Network, an HOI recognition network, a knowledge transfer network and a pairwise classification network." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "type": "text", + "content": "Supervised methods show superior performance, but require labor-intensive HOI annotations that are infeasible to obtain in many scenarios. Thus, in this work we focus on HOI detection under weak supervision." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 335, + 506, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 506, + 490 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 506, + 490 + ], + "type": "text", + "content": "Weakly-supervised HOI detection: Weakly-supervised HOI detection aims to learn instance-level HOIs with only image-level annotations. (Prest et al., 2011) learns a set of binary action classifiers based on detected human-object pairs, where human proposal is obtained from a part-based human detector and object is derived from the relative position with respect to the human. PPR-FCN (Zhang et al., 2017) employs a parallel FCN to perform pair selection and classification. Explainable-HOI (Baldassarre et al., 2020) adopts graph nets to capture relations for better image-level HOI recognition, and uses backward explanation for instance-level HOI detection. MX-HOI (Kumaraswamy et al., 2021) proposes a momentum-independent learning strategy to utilize strong & weak labels simultaneously. AlignFormer (Kilickaya & Smeulders, 2021) proposes an align layer in transformer framework, which utilizes geometric & visual priors to generate pseudo alignments for training. Those methods focus on learning HOIs with advanced network structures or better pseudo alignments. However, they still suffer from noisy human-object associations and ambiguous interaction types. To address those challenges, we exploit prior knowledge from CLIP to build a discriminative HOI representations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 496, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 506, + 608 + ], + "type": "text", + "content": "Knowledge exploitation of pretrained V&L models: Recently, CLIP (Radford et al., 2021a) model has demonstrated strong generalization to various downstream tasks (Ghiasi et al., 2021; Du et al., 2022; Gu et al., 2021). Some works also explore CLIP knowledge in supervised HOI detection, e.g., CATN (Dong et al., 2022) initializes the object query with category-aware semantic information from CLIP text encoder, and GEN-VLTK (Liao et al., 2022) employs image feature distillation and classifier initialization with HOI prompts. However, they only exploit CLIP knowledge at a coarse level and require detailed annotations of human-object pairs. It is non-trivial to extend such strategies to the weak supervision paradigm due to highly noisy training signals. In our work, we build a deep connection between CLIP and HOI representation by incorporating the prior knowledge of HOIs at both image and HOI instance levels." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 618, + 173, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 618, + 173, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 173, + 629 + ], + "type": "text", + "content": "3 METHOD" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 637, + 312, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 637, + 312, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 312, + 647 + ], + "type": "text", + "content": "3.1 PROBLEM SETUP AND METHOD OVERVIEW" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": "Problem setup Given an input image " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ", the task of weakly-supervised HOI detection aims to localize and recognize the human-object interactions, while only the corresponding image-level HOI categories are available for training. Formally, we aim to learn a HOI detector " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ", which takes an image " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " as input and generates a set of tuples " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{O} = \\{(\\mathbf{x}_h,\\mathbf{x}_o,c_o,a_{h,o},R_{h,o}^a)\\}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{O} = \\mathcal{M}(I)" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ". Here each tuple indicates a HOI instance, in which " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h,\\mathbf{x}_o\\in \\mathbb{R}^4" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " represent human and object bounding boxes, " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "c_{o}\\in \\{1,\\dots,C\\}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " is the object category, " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "a_{h,o}\\in \\{1,\\dots,A\\}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " denotes the interaction class associated with " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "R_{h,o}^{a}\\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " is the HOI score. For the weakly-supervised setting," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": "each training image is annotated with a set of HOI categories " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "\\mathcal{R} = \\{r^{*}\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": " at the image level only, where " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "r^{*} \\in \\{1, \\dots, N\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": " is an index to a combination of ground-truth object category " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "c^{*}" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": " and interaction category " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "a^{*}" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": " denotes the number of all possible HOI combinations defined on the dataset." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 118, + 506, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 118, + 506, + 164 + ], + "spans": [ + { + "bbox": [ + 104, + 118, + 506, + 164 + ], + "type": "text", + "content": "Method Overview As we lack supervision for the HOI locations, we adopt a typical hypothesize-and-recognize strategy (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) for HOI detection: first we generate a set of human and object proposals with an off-the-shelf object detector (Ren et al., 2015) and then predict the interaction class for all human-object combinations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 168, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 168, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 506, + 236 + ], + "type": "text", + "content": "Unlike other methods, we do not re-use the feature maps of the object or human detector - we only keep the bounding boxes. Instead, we learn a new representation optimized for the HOI task. This is challenging under the weak setting as the model learning is noisy, but feasible by leveraging the rich semantic knowledge from a pretrained large-scale multimodal model, like CLIP. However, the naive knowledge integration strategies for supervised setting fail when directly applied in the weak setting, as evidenced by our experiments in Appendix.B" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 240, + 506, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 506, + 308 + ], + "type": "text", + "content": "Our framework adopts two philosophies to address the challenges in the weakly-supervised HOI task: the first is to integrate the prior knowledge into discriminative representation learning, and the second is to suppress noise in learning. For the first philosophy, we utilize the prior knowledge from CLIP to guide the representation learning in both global image-level and fine-grained human-object pairs, which is instantiated by a bi-level knowledge integration strategy. For the second philosophy, we adopt an effective self-taught learning mechanism to suppress the irrelevant pairs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "content": "We instantiate the bi-level knowledge integration strategy with a two-branch deep network. Our detection pipeline starts with a set of human proposals with detection scores " + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "inline_equation", + "content": "\\{(\\mathbf{x}_h, s_h)\\}" + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "content": ", and object proposals with their categories and detection scores " + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "inline_equation", + "content": "\\{(\\mathbf{x}_o, c_o, s_o)\\}" + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "content": ". Then, the global branch performs image-level HOI recognition by utilizing a CLIP-initialized HOI knowledge bank as a classifier. This allows us to exploit both visual and text encoders from CLIP to generate better HOI representations. In parallel, for each human-object pair " + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_h, \\mathbf{x}_o)" + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "content": ", the local branch explicitly augments the pairwise HOI features with the HOI knowledge bank to then identify their relatedness and interaction classes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 395, + 506, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 395, + 506, + 440 + ], + "spans": [ + { + "bbox": [ + 104, + 395, + 506, + 440 + ], + "type": "text", + "content": "To train our model, we use a multi-task loss, which incorporates a HOI recognition loss defined on image-wise HOIs for the visual encoder and knowledge bank finetuning, and a self-taught relatedness classification for suppressing the background human-object associations, on top of the standard MIL-based loss. We first present model details in Sec.3.2, followed by the training strategy in Sec.3.3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 443, + 201, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 201, + 454 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 201, + 454 + ], + "type": "text", + "content": "3.2 MODEL DESIGN" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 458, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 458, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 506, + 581 + ], + "type": "text", + "content": "Now we introduce our bi-level knowledge integration strategy, where the aim is to exploit CLIP textual embeddings of HOI labels as a HOI knowledge bank for the HOI representation learning, and to transfer such knowledge both at image level as well as at the level of human-object pairs for interaction predictions. Specifically, as shown in Fig. 1, our network consists of a global branch and a local branch. The global branch includes a backbone network (Sec.3.2.1) that extracts image features, and a HOI recognition network (Sec.3.2.2) that uses a HOI knowledge bank based on CLIP to predict image-level HOI scores. For each human-object proposal generated by an off-the-shelf object detector, the local branch employs a knowledge transfer network (Sec.3.2.3) to compute its feature representation with enhancement from the HOI knowledge bank, and a pairwise classification network (Sec.3.2.4) to compute their relatedness and interaction scores. Finally, we generate the final HOI detection scores by combining global HOI scores with local predictions (Sec. 3.2.5)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": "HOI Knowledge Bank Generation CLIP builds a powerful vision-language model by pretraining on large-scale image-text pairs. It consists of a visual encoder " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_V" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": " and textual encoder " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_T" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": ", mapping both visual and textual inputs to a shared latent space. Here, we exploit CLIP to generate a HOI knowledge bank. We take a similar prompt strategy as in CLIP, adopting a common template 'a person {verb} a/an {object}' to convert HOI labels into text prompts (e.g., converting 'drive car' to 'a person driving a car'). Then we input the sentences into the CLIP textual encoder " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_T" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": " to initialize the HOI knowledge bank " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T \\in \\mathbb{R}^{N \\cdot D}" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": " denoting the feature dimension. One can think of " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": " as a set of 'prototypes' in feature space, one for each HOI in the dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 682, + 318, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 682, + 318, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 682, + 318, + 693 + ], + "type": "text", + "content": "3.2.1 GLOBAL BRANCH: BACKBONE NETWORK" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": "To incorporate CLIP for feature extraction, we initialize the backbone network (e.g., a ResNet-101 (He et al., 2016)) with CLIP's visual encoder " + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_V" + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": " to generate a feature map " + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": " for the input image " + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": ". We further compute a global feature vector " + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "v_{g} \\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": " with self-attention operation (Radford et al., 2021b)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 82, + 310, + 185 + ], + "blocks": [ + { + "bbox": [ + 136, + 82, + 310, + 185 + ], + "lines": [ + { + "bbox": [ + 136, + 82, + 310, + 185 + ], + "spans": [ + { + "bbox": [ + 136, + 82, + 310, + 185 + ], + "type": "image", + "image_path": "f3349fb8251160198ddf86acc80f35bdf49add88f16e617e0264756fb346c105.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 186, + 283, + 196 + ], + "lines": [ + { + "bbox": [ + 170, + 186, + 283, + 196 + ], + "spans": [ + { + "bbox": [ + 170, + 186, + 283, + 196 + ], + "type": "text", + "content": "(a) knowledge transfer network" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 312, + 82, + 468, + 185 + ], + "blocks": [ + { + "bbox": [ + 312, + 82, + 468, + 185 + ], + "lines": [ + { + "bbox": [ + 312, + 82, + 468, + 185 + ], + "spans": [ + { + "bbox": [ + 312, + 82, + 468, + 185 + ], + "type": "image", + "image_path": "43ccc2a33ded6bf714fc9c749ae5107721836e3e4420f610ab7a8b7c1b4c5370.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 186, + 462, + 196 + ], + "lines": [ + { + "bbox": [ + 318, + 186, + 462, + 196 + ], + "spans": [ + { + "bbox": [ + 318, + 186, + 462, + 196 + ], + "type": "text", + "content": "(b) pseudo relatedness label generation" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 201, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 104, + 201, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 504, + 232 + ], + "type": "text", + "content": "Figure 2: The knowledge transfer network explicitly transfers the discriminative relation-level semantic knowledge derived from CLIP to the pairwise HOI representations. Pseudo relatedness label generation uses the pairwise interaction scores to generate the pseudo association labels for self-taught relatedness classification" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 245, + 352, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 352, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 352, + 255 + ], + "type": "text", + "content": "3.2.2 GLOBAL BRANCH: HOI RECOGNITION NETWORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": "We perform an image-wise HOI recognition task with the HOI knowledge bank " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": ". We obtain global HOI scores " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "s_g \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": " by computing the inner product between the image feature " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "v_g" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": " and the knowledge bank " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "s_g = \\mathcal{W}_T \\times v_g" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": " is matrix multiplication. This has the effect of adapting the visual encoder and knowledge bank parameters to the HOI recognition task, fully taking advantage of the knowledge from CLIP." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 328, + 367, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 328, + 367, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 367, + 338 + ], + "type": "text", + "content": "3.2.3 LOCAL BRANCH: KNOWLEDGE TRANSFER NETWORK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "type": "text", + "content": "Given the CLIP-initialized visual encoder, a standard HOI representation can be formed by concatenating the human and object appearance features along with their spatial encoding. However, even after the finetuning as described above, such a representation still mainly focuses on object-level semantic cues rather than relation-level concepts. In this module, we explicitly exploit the HOI knowledge bank " + }, + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "type": "text", + "content": " to learn a local relation-specific HOI representation. To achieve this, we propose an attention-based architecture as shown in Fig.2(a)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": "Specifically, for each human proposal " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " and object proposal " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": ", we use RoI-Align (He et al., 2017) to crop the feature maps from " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " followed by a self-attention operation to compute their appearance features " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "v_h, v_o \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": ". Then we compute a spatial feature " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "v_{sp}" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " by encoding the relative positions of their bounding boxes " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_h, \\mathbf{x}_o)^2" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": ". The holistic HOI representation " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "v_p \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " is an embedding of the human and object appearance features and their spatial feature, i.e., " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "v_p = \\mathcal{F}_E([v_h; v_o; v_{sp}])" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "[\\cdot]" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " is the concatenation operation and " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_E" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " is a multi-layer perceptron (MLP)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": "To enhance relation-level concepts, we further compute its union region " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_u\\in \\mathbb{R}^4" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " (see Fig. 2a) and extract the corresponding appearance feature " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{u}\\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " via RoI-align over the feature map " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": ". The union region is important as it encodes relational context cues, but it potentially also contains a large amount of background that is noisy for model learning. We thus devise an attention module that is similar in design to the HOI recognition network, but uses the union feature " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{u}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " as query to extract a meta-embedding " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{meta}\\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " from the HOI knowledge bank " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": ". The final HOI representation " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\hat{v}_p\\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " is built by fusing the holistic representation " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{p}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{meta}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " with a MLP " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_K" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 149, + 574, + 504, + 587 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 574, + 504, + 587 + ], + "spans": [ + { + "bbox": [ + 149, + 574, + 504, + 587 + ], + "type": "interline_equation", + "content": "\\alpha = \\operatorname {S o f t m a x} \\left(\\mathcal {W} _ {T} \\times v _ {u}\\right); \\quad v _ {\\text {m e t a}} = \\alpha^ {\\intercal} \\times \\mathcal {W} _ {T}; \\quad \\hat {v} _ {p} = \\mathcal {F} _ {K} \\left(v _ {p} + v _ {\\text {m e t a}}\\right). \\tag {1}", + "image_path": "386c883a42547a86b1a4c6887dbfad85b678c63b27f4db5d235c9739282b40c4.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "inline_equation", + "content": "\\alpha \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "content": " is the normalized attention weight and " + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "content": " is the transpose operation. " + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "inline_equation", + "content": "v_{meta}" + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "content": " encodes a discriminative representation from CLIP and facilitates feature sharing between HOI classes." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 627, + 378, + 638 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 378, + 638 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 378, + 638 + ], + "type": "text", + "content": "3.2.4 LOCAL BRANCH: PAIRWISE CLASSIFICATION NETWORK" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": "Given the relation-aware HOI representation " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "\\hat{v}_p" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": ", our final module performs a coarse-level classification on human-object association and a fine-level classification for interaction recognition. Specifically, we use two MLPs " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_P" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_B" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": " to predict the interaction scores " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "s_p \\in \\mathbb{R}^A" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": " and the relatedness score " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "s_b \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": " for each human-object pair:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 242, + 698, + 504, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 698, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 242, + 698, + 504, + 712 + ], + "type": "interline_equation", + "content": "s _ {p} = \\mathcal {F} _ {P} (\\hat {v} _ {p}); \\quad s _ {b} = \\mathcal {F} _ {B} (\\hat {v} _ {p}) \\tag {2}", + "image_path": "6fe16a547ea607e052aab41a132d99bad327245c2aaf962bdc800fb9fcee83ba.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 234, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 234, + 731 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 234, + 731 + ], + "type": "text", + "content": "2For details c.f. the appendix C" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": "To train the model under weak supervision (see Sec. 3.3), we further aggregate the pairwise interaction scores into image-level interaction scores. Assume we have " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": " pairs of human-object proposals for a given image, and denote the interaction scores for the " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": "-th pair as " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "s_p^m" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": ". We first concatenate all the interaction scores to compose a bag " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "S = [s_p^1; \\ldots; s_p^M] \\in \\mathbb{R}^{M \\cdot A}" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": ", then we maximize over all pairs to obtain the image-wise interaction scores: " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "\\tilde{s}_p = \\max_m S" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 148, + 226, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 148, + 226, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 226, + 159 + ], + "type": "text", + "content": "3.2.5 MODEL INFERENCE" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "spans": [ + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "content": "During model inference, we do not use the local interaction scores " + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "inline_equation", + "content": "s_p" + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "content": " directly. Instead, we normalize " + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "content": " with a Softmax operation defined on all pairs: " + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "inline_equation", + "content": "\\bar{S} = \\text{Softmax}(S)" + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "content": ", and then compute the normalized" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": "pairwise interaction scores " + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "inline_equation", + "content": "e_p = \\sigma(\\tilde{s}_p) \\cdot \\bar{s}_p" + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\bar{s}_p" + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": " is a row from " + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\bar{S}" + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": " is Sigmoid function. This has the effect of measuring the contribution of a given pair, in case of multiple pairs in an image share the same interaction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": "The final interaction score " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "s_{h,o}^{a}" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": " for human-object pair " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_h,\\mathbf{x}_o)" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": " combines multiple scores, including the global HOI scores " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "s_g" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": ", the normalized pairwise interaction scores " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "e_p" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": ", and the relatedness score " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "s_b" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": ". The overall HOI score " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "R_{h,o}^{a}" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": " is a combination of the interaction score and the object detection scores." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 194, + 275, + 505, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 275, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 194, + 275, + 505, + 291 + ], + "type": "interline_equation", + "content": "s _ {h, o} ^ {a} = \\sigma \\left(s _ {g} ^ {a, c _ {o}}\\right) \\cdot e _ {p} ^ {a} \\cdot \\sigma \\left(s _ {b}\\right); \\quad R _ {h, o} ^ {a} = \\left(s _ {h} \\cdot s _ {o}\\right) ^ {\\gamma} \\cdot s _ {h, o} ^ {a} \\tag {3}", + "image_path": "439dd52e29b854e3e7fa8bbfa471200fe572ae8f1a5804429cf1dc051c40e3c9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "s_g^{a,c_o}" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": " is the HOI score corresponding to " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": "-th interaction and " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "c_o" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": "-th object category in " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "s_g" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "e_p^a" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": " is the score of " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": "-th interaction in " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "e_p" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": " is a hyper-parameter to balance the scores (Zhang et al., 2021c; Li et al., 2019b)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 331, + 294, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 294, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 294, + 342 + ], + "type": "text", + "content": "3.3 LEARNING WITH WEAK SUPERVISION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": "To train our deep network in a weakly supervised setting, we use a multi-task loss defined on three different levels. Specifically, our overall loss function " + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": " consists of three terms: i) an image-wise HOI recognition loss " + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g" + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": " to adapt CLIP features to the task of human-object interaction detection; ii) a pairwise interaction classification loss " + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p" + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": " to guide the knowledge transfer towards fine-grained relation-aware representations; and iii) a self-taught relatedness classification loss " + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_b" + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": " to prune non-interacting human-object combinations. Formally, the overall loss is written as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 263, + 422, + 505, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 422, + 505, + 435 + ], + "spans": [ + { + "bbox": [ + 263, + 422, + 505, + 435 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {g} + \\mathcal {L} _ {p} + \\mathcal {L} _ {b} \\tag {4}", + "image_path": "17ee4c4721f88734b1729db504ae9b2c78e22b394949c4d46d608228666095c8.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": "Image-wise HOI recognition loss " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": ": Given the HOI scores " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "s_g" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": " and ground-truth HOI categories " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": " is a standard binary cross-entropy loss for multi-label classification: " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g = L_{BCE}(s_g, \\mathcal{R})" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": "Pairwise interaction classification loss " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": ": We adopt a MIL strategy that first aggregates the pairwise interaction scores and supervises this with image-level interaction labels as " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\{a^*\\}" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": ". Given the image-wise interaction scores " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\tilde{s}_p" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": " is a standard binary cross-entropy loss for multi-label classification as: " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p = L_{BCE}(\\tilde{s}_p, \\mathcal{A})" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "content": "Self-taught relatedness classification loss " + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_b" + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "content": ": As human-object associations are not annotated, we devise a novel pseudo relatedness label generation mechanism for training a self-taught binary classifier to identify valid human-object associations. Specifically, we observe that the human-object pairs with confident interaction scores are often associated after a short period of initial training without self-taught classification loss. Motivated by this, we use the interaction scores " + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "inline_equation", + "content": "s_p" + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "content": " from the model under training to supervise the relatedness classification." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": "Concretely, we generate pseudo labels " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{B} = \\{b_1,\\dots,b_M\\}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " for all human-object pairs in an image, where " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "b_{m}\\in \\{0,1\\}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " indicates the relatedness for the " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " -th combination. To this end, as illustrated in Fig.2(b), we first propose a binary mask " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "Z\\in \\{0,1\\}^{M\\cdot A}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " for all interaction scores " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " with respect to the ground-truth object categories " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{C} = \\{c^*\\}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". For each human-object pair where the object label " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "c_{o}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " is included in " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ", we consider it as a potential interactive combination and thus assign the corresponding row in " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " as 1, and other rows as 0. For the latter, we also immediately set " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "b_{m} = 0" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". Then we generate pairwise scores " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "t^a\\in \\mathbb{R}^M" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " for each ground-truth interaction " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "a^*" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " by selecting the corresponding row from " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "S\\odot Z" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". The pseudo label for the pair with the highest score is assigned as 1, i.e., " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "m_a = \\arg \\max_{m}t^a" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "b_{m_a} = 1" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". We only select one positive pair3 for each " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "a^*" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". Finally, " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_b" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " is defined as a binary cross-entropy loss: " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_b = \\sum_m L_{BCE}(s_b^m,b_m)" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "s_b^m" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " is the relatedness score for the " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " -th pair." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 117, + 720, + 293, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 293, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 293, + 732 + ], + "type": "text", + "content": "3We also explore top-K selection in Appendix F" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 130, + 504, + 280 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 130 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 130 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 130 + ], + "type": "text", + "content": "Table 1: mAP comparison on HICO-DET and V-COCO test set. - denotes the results are not available. * stands for the method we re-evaluate with the correct evaluation protocol (see Appendix.I for details) and †means our re-implementation. For V-COCO, all object detectors are pretrained on MSCOCO dataset by default, and details about the evaluation metrics APS1&2 c.f. Appendix H. IN-1K denotes ImageNet with 1000 classes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 130, + 504, + 280 + ], + "lines": [ + { + "bbox": [ + 106, + 130, + 504, + 280 + ], + "spans": [ + { + "bbox": [ + 106, + 130, + 504, + 280 + ], + "type": "table", + "html": "
MethodsBackboneDetectorHICO-DET (%)V-COCO (%)
FullRareNon-RareAProleAProle
supervised
iCAN (Gao et al., 2018)RN50 (IN-1K&COCO)FRCNN (COCO)14.8410.4516.1545.3052.40
PMFNet (Wan et al., 2019)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.4615.5618.0052.00-
TIN (Li et al., 2019b)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.2213.5118.3247.8054.20
DJ-RN (Li et al., 2020a)RN50 (IN-1K&COCO)FRCNN (COCO)21.3418.5321.1853.3060.30
IDN (Li et al., 2020b)RN50 (IN-1K&COCO)FRCNN (HICO-DET)26.2922.6127.3953.3060.30
SCG (Zhang et al., 2021c)RN50-FPN (IN-1K&HICO-DET)FRCNN (HICO-DET)31.3324.7233.3154.2060.90
HOTR (Kim et al., 2021)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)25.1017.3427.4255.2064.40
QPIC (Tamura et al., 2021)RN101+Transformer (IN-1K&COCO)DETR (COCO)29.9023.9231.6958.3060.70
CATN (Dong et al., 2022)RN50+Transformer (IN-1K&HICO-DET&COCO)DETR (HICO-DET)31.8625.1533.8460.10-
MSTR (Kim et al., 2022)RN50 + Transformer (IN-1K&COCO)DETR(HICO-DET)31.1725.3133.9262.0065.20
DisTr (Zhou et al., 2022)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)31.7527.4533.0366.2068.50
SSRT (Iftekhar et al., 2022)R101+Transformer (IN-1K&COCO)DETR (COCO)31.3424.3133.3265.0067.10
GEN-VLKT (Liao et al., 2022)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)34.9531.1836.0863.5865.93
between supervised & weakly-supervised setting, learning with image-level HOIs and box annotations
AlignFormer (Kilickaya & Smeulders, 2021)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)20.8518.2321.6415.8216.34
weakly-supervised
Explanation-HOI* (Baldassarre et al., 2020)ResNeXt101 (IN-1K&COCO)FRCNN (COCO)10.638.7111.20--
MX-HOI (Kumaraswamy et al., 2021)RN101 (IN-1K&COCO)FRCNN (COCO)16.1412.0617.50--
PPR-FCN† (Zhang et al., 2017)RN50 (CLIP dataset)FRCNN (COCO)17.5515.6918.41--
oursRN50 (CLIP dataset)FRCNN (COCO)22.8922.4123.0342.9748.06
oursRN101 (CLIP dataset)FRCNN (COCO)25.7024.5226.0544.7449.97
", + "image_path": "1c8c230f9f7169d54dff5b1d6823b1233510ef4f79100b811857bb40bd219d38.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 292, + 201, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 292, + 201, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 201, + 303 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 313, + 230, + 324 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 313, + 230, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 230, + 324 + ], + "type": "text", + "content": "4.1 EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": "Datasets: We benchmark our model on two public datasets: HICO-DET and V-COCO. HICO-DET consists of 47776 images (38118 for training and 9658 for test). It has " + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "inline_equation", + "content": "N = 600" + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": " HOI categories, which are composed of " + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "inline_equation", + "content": "C = 80" + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": " common objects (the same as MSCOCO (Lin et al., 2014)) and " + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "inline_equation", + "content": "A = 117" + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": " unique interaction categories. V-COCO is a subset of MSCOCO, consisting of 2533 images for training, 2867 for validation and 4946 for test. It has 16199 human instances, each annotated with binary labels for " + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "inline_equation", + "content": "A = 26" + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": " interaction categories." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 403, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 449 + ], + "type": "text", + "content": "Evaluation Metric: Following (Chao et al., 2015), we use mean average precision (mAP) to evaluate HOI detection performance. A human-object pair is considered as positive when both predicted human and object boxes have at least 0.5 IoU with their ground-truth boxes, and the HOI class is classified correctly." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 456, + 250, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 250, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 250, + 467 + ], + "type": "text", + "content": "4.2 IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "text", + "content": "We use an off-the-shelf Faster R-CNN (Ren et al., 2015) pretrained on MSCOCO to generate at most 100 object candidates for each image. For V-COCO, it is worth noting that we train the object detector by removing the images in MSCOCO that overlap with V-COCO to prevent information leakage. The backbone network is initialized with the visual encoder from CLIP-RN101 model and the feature dimension " + }, + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "inline_equation", + "content": "D = 1024" + }, + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "type": "text", + "content": "For model learning, we set the detection score weight " + }, + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "type": "inline_equation", + "content": "\\gamma = 2.8" + }, + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "type": "text", + "content": " as default by following previous works (Zhang et al., 2021c; Li et al., 2019b), then optimize the entire network with AdamW and an initial learning rate of 1e-5 for backbone parameters and 1e-4 for others. We detach the parameters of the knowledge bank on the local branch for better model learning. We train up to 60K iterations with batch-size 24 in each on 4 NVIDIA 2080TI GPUs, and decay the learning rate by 10 times in 12K and 24K iteration." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 602, + 238, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 238, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 238, + 613 + ], + "type": "text", + "content": "4.3 QUANTITATIVE RESULTS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "For HICO-DET (Tab.1), our approach outperforms the previous state of the arts on the weakly supervised setting by a clear margin, achieving 22.89 mAP with ResNet-50 and 25.70 mAP with ResNet-101 as backbone. For a fair comparison, we also re-implement PPR-FCN with CLIP visual encoder. The results show that we still outperform PPR-FCN by a sizeable margin, which validates the superiority of our framework. Besides, we even perform comparably with HOTR and IDN under an inferior experimental setting where HOTR adopts a more advanced transformer encoder-decoder architecture, and both methods are trained with strong supervision. Furthermore, the mAP gap between Rare (training annotations " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "< 10" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ") and Non-rare HOI classes in our results is much smaller than other methods, demonstrating the superior generalization capability of our HOI representation for solving the long-tailed distribution issue. In detail, we achieve a 0.62 mAP gap with ResNet-50" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 126, + 130, + 485, + 236 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "type": "text", + "content": "Table 2: Ablation study on HICO-DET dataset. \"RN50-FPN(COCO)\" denotes the backbone initialized with Faster R-CNN parameters pretrained on MSCOCO dataset while \"CLIP RN50\" stands for the backbone initialized with CLIP visual encoder. Besides, we construct the knowledge bank " + }, + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "type": "text", + "content": " with random initialization, or computing HOI prompts by RoBERTa or CLIP text transformer." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 126, + 130, + 485, + 236 + ], + "lines": [ + { + "bbox": [ + 126, + 130, + 485, + 236 + ], + "spans": [ + { + "bbox": [ + 126, + 130, + 485, + 236 + ], + "type": "table", + "html": "
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
baselineCLIP RN50-----19.5216.5820.40
Exp 1CLIP RN50CLIP Text---20.3118.3420.90
Exp 2CLIP RN50CLIP Text✓ (freeze WT)---20.0918.2320.64
Exp 3CLIP RN50CLIP Text--20.8618.4021.60
Exp 4CLIP RN50CLIP Text-22.4020.7022.90
Exp 5CLIP RN50----19.8817.4520.61
Exp 6CLIP RN50CLIP Text--20.7519.3821.16
Exp 7CLIP RN50CLIP Text-21.5320.0521.97
oursCLIP RN50CLIP Text22.8922.4123.03
Exp 8RN50-FPN (COCO)-----19.4416.2020.41
Exp 9RN50-FPN (COCO)random19.6115.5720.82
Exp 10RN50-FPN (COCO)RoBERTa20.4516.4621.65
", + "image_path": "597049b964652d7583c17bbd2d3f5428634b215445100d84dfdc4765fda6c5f7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 257, + 504, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 257, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 504, + 281 + ], + "type": "text", + "content": "and 1.53 with ResNet-101 backbone, which is much smaller than AlignFormer (3.14) and PPR-FCN (2.64), and supervised methods SSRT (9.01) and GEN-VLKT (4.9)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "content": "For V-COCO dataset, we report the performance of " + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{role}" + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "content": " in both scenario1 and scenario2 for a complete comparison, which are 42.97 / 48.06 " + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{role}" + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "content": " with ResNet-50 and 44.74 / 49.97 " + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{role}" + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "content": " with ResNet-101 as backbone. As shown in Tab.1, our model achieves significant improvement compared with AlignFormer, and even is comparable with supervised methods TIN and iCAN." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 334, + 211, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 334, + 211, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 211, + 344 + ], + "type": "text", + "content": "4.4 ABLATION STUDY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 349, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 349, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 504, + 373 + ], + "type": "text", + "content": "In this section, we mainly validate the effectiveness of each component with detailed ablation studies on HICO-DET dataset. We use ResNet-50 as the backbone network to reduce experimental costs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "content": "**Baseline:** The baseline adopts the visual encoder from CLIP-RN50 to generate the vanilla HOI representation " + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "inline_equation", + "content": "v_{p}" + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "content": ", which is directly used to predict the interaction scores " + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "inline_equation", + "content": "s_{p}" + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "content": ". Only pairwise interaction classification loss " + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{p}" + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "content": " is used for model learning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "text", + "content": "HOI recognition: We augment the baseline with a HOI recognition network and observe the full mAP improves from 19.52 to 20.41, as reported in Exp 1 of Tab. 2. It suggests that the learnable knowledge bank " + }, + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "text", + "content": " serves as a powerful classifier to perform image-level HOI recognition and update the visual encoder for better HOI representation. We visualize the learned parameters of knowledge bank in Appendix D to demonstrate its effectiveness. Furthermore, as in Exp 2, the performance slightly decreases from 20.31 to 20.09 when we freeze the training of the knowledge bank, indicating that joint learning of visual features and the knowledge bank is more appropriate for HOI detection." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 518, + 506, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 518, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 518, + 506, + 575 + ], + "type": "text", + "content": "Knowledge Transfer Network (KTN): KTN explicitly transfers the CLIP meta-knowledge to pairwise HOI features. As a result, it contributes 0.55 Full mAP improvement (Exp 3 v.s. Exp 1) and most of the performance gains come from Non-rare classes. This result shows KTN is capable of extracting discriminative features from the relational knowledge bank to our HOI representation. We also study the effectiveness of the attention mechanism of KTN in Appendix E." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": "Score fusion: In Tab. 2, we largely improve the Full mAP from 20.86 (Exp 3) to 22.40 (Exp 4) by fusing the global HOI scores " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "s_g" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": " to pairwise interaction score " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "s_p" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": ". As the HOI recognition network seamlessly inherits the visual-linguistic features from CLIP and directly adopts image labels as supervision, the global interaction scores are pretty accurate and largely enhance the pairwise scores, demonstrating its strong capabilities to cope with long-tailed and fine-grained HOI recognition." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 646, + 505, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 646, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 646, + 505, + 691 + ], + "type": "text", + "content": "Self-taught Relatedness Classification (SRC): Self-taught classification aims to identify the relatedness between human and objects. The improvements from Exp 4 to ours show the effectiveness of our self-taught strategy, which is capable of figuring out the irrelevant human-object pairs and suppressing their interaction scores during inference." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": "Combining KTN & SRC: The ablation results of Exp 5-7 in Tab. 2 show the KTN and SRC are able to facilitate each other. In detail, the SRC obtains 0.49 Full mAP improvement when the KTN is introduced (ours v.s. Exp 4), which is only 0.36 without KTN (Exp 5 v.s. baseline). Similarly," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 91, + 206, + 158 + ], + "blocks": [ + { + "bbox": [ + 152, + 81, + 163, + 89 + ], + "lines": [ + { + "bbox": [ + 152, + 81, + 163, + 89 + ], + "spans": [ + { + "bbox": [ + 152, + 81, + 163, + 89 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 91, + 206, + 158 + ], + "lines": [ + { + "bbox": [ + 107, + 91, + 206, + 158 + ], + "spans": [ + { + "bbox": [ + 107, + 91, + 206, + 158 + ], + "type": "image", + "image_path": "3c4dc5eb76fc4028aefbf775852ee762e247e291481c548a837a8639ba17ecdc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 109, + 159, + 149, + 177 + ], + "lines": [ + { + "bbox": [ + 109, + 159, + 149, + 177 + ], + "spans": [ + { + "bbox": [ + 109, + 159, + 149, + 177 + ], + "type": "text", + "content": "wash_motorcycle \nours: 0.18, 0.355 \nbaseline: 0.0189" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 208, + 80, + 305, + 159 + ], + "blocks": [ + { + "bbox": [ + 208, + 80, + 305, + 159 + ], + "lines": [ + { + "bbox": [ + 208, + 80, + 305, + 159 + ], + "spans": [ + { + "bbox": [ + 208, + 80, + 305, + 159 + ], + "type": "image", + "image_path": "64d18385f4e7e147826a9c9ded9896f1574f4ad51300b8e50043ecca7c12edd0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 160, + 285, + 174 + ], + "lines": [ + { + "bbox": [ + 209, + 160, + 285, + 174 + ], + "spans": [ + { + "bbox": [ + 209, + 160, + 285, + 174 + ], + "type": "text", + "content": "hold_horse:0.062,0.397,0.998 ride_horse:0.405,0.966,0.998" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 307, + 89, + 403, + 159 + ], + "blocks": [ + { + "bbox": [ + 351, + 80, + 361, + 89 + ], + "lines": [ + { + "bbox": [ + 351, + 80, + 361, + 89 + ], + "spans": [ + { + "bbox": [ + 351, + 80, + 361, + 89 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 89, + 403, + 159 + ], + "lines": [ + { + "bbox": [ + 307, + 89, + 403, + 159 + ], + "spans": [ + { + "bbox": [ + 307, + 89, + 403, + 159 + ], + "type": "image", + "image_path": "9df9bcc5f1461c0846c4218ef4173e3faefd7d6edad70696dd7e72628870ebc0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 160, + 400, + 167 + ], + "lines": [ + { + "bbox": [ + 307, + 160, + 400, + 167 + ], + "spans": [ + { + "bbox": [ + 307, + 160, + 400, + 167 + ], + "type": "text", + "content": "sit_on_motorcycle: 0.515, 0.033, 0.950" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 406, + 89, + 503, + 159 + ], + "blocks": [ + { + "bbox": [ + 449, + 81, + 459, + 89 + ], + "lines": [ + { + "bbox": [ + 449, + 81, + 459, + 89 + ], + "spans": [ + { + "bbox": [ + 449, + 81, + 459, + 89 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 406, + 89, + 503, + 159 + ], + "lines": [ + { + "bbox": [ + 406, + 89, + 503, + 159 + ], + "spans": [ + { + "bbox": [ + 406, + 89, + 503, + 159 + ], + "type": "image", + "image_path": "3d058fc7e928eaacd2f473320fb34d4c09870479628f97693de6ed7388ecbfea.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 159, + 500, + 174 + ], + "lines": [ + { + "bbox": [ + 406, + 159, + 500, + 174 + ], + "spans": [ + { + "bbox": [ + 406, + 159, + 500, + 174 + ], + "type": "text", + "content": "sit_at_dining_table: 0.006, 0.993, 0.079 \nsit_at_dining_table: 0.232, 0.993, 0.994" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 108, + 178, + 206, + 247 + ], + "blocks": [ + { + "bbox": [ + 108, + 178, + 206, + 247 + ], + "lines": [ + { + "bbox": [ + 108, + 178, + 206, + 247 + ], + "spans": [ + { + "bbox": [ + 108, + 178, + 206, + 247 + ], + "type": "image", + "image_path": "31a3d121ff52f3113f31974f177930970ca7ec73ecd4e03889b2f475115f9c2c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 247, + 170, + 267 + ], + "lines": [ + { + "bbox": [ + 110, + 247, + 170, + 267 + ], + "spans": [ + { + "bbox": [ + 110, + 247, + 170, + 267 + ], + "type": "text", + "content": "paint_fire_hydrant: \nours: 0.203, 0.505, 0.955 \nbaseline: 0.0027" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 273, + 504, + 324 + ], + "lines": [ + { + "bbox": [ + 104, + 273, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 504, + 324 + ], + "type": "text", + "content": "Figure 3: Visualization of HOI detection results on HICO-DET test set. Red scores denote the negative HOI predictions. We mainly demonstrate the model's capabilities on four aspects: (a) coping with imbalanced HOI distribution; (b) distinguishing subtle differences among interaction types; (c) suppressing background HOI classes, and (d) pruning irrelevant human-object associations. The numbers reported are normalized pairwise interaction score, global HOI score and relatedness score." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 208, + 178, + 306, + 247 + ], + "blocks": [ + { + "bbox": [ + 208, + 178, + 306, + 247 + ], + "lines": [ + { + "bbox": [ + 208, + 178, + 306, + 247 + ], + "spans": [ + { + "bbox": [ + 208, + 178, + 306, + 247 + ], + "type": "image", + "image_path": "192708158f64b8c7105ece6aeedbf1e1b24fd0ec539a1403e13410348bb7f329.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 249, + 287, + 263 + ], + "lines": [ + { + "bbox": [ + 208, + 249, + 287, + 263 + ], + "spans": [ + { + "bbox": [ + 208, + 249, + 287, + 263 + ], + "type": "text", + "content": "repair truck: 0.23, 0.055, 0.979 \ninspect truck: 0.48, 0.138, 0.979" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 307, + 177, + 404, + 247 + ], + "blocks": [ + { + "bbox": [ + 307, + 177, + 404, + 247 + ], + "lines": [ + { + "bbox": [ + 307, + 177, + 404, + 247 + ], + "spans": [ + { + "bbox": [ + 307, + 177, + 404, + 247 + ], + "type": "image", + "image_path": "5579212847887188404267a0686fd5fb59b35064bcc9bf3ca9fa886fb6aa1cfd.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 251, + 404, + 258 + ], + "lines": [ + { + "bbox": [ + 307, + 251, + 404, + 258 + ], + "spans": [ + { + "bbox": [ + 307, + 251, + 404, + 258 + ], + "type": "text", + "content": "stand_on_skateboard: 0.009, 0.001, 0.98" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 406, + 178, + 503, + 247 + ], + "blocks": [ + { + "bbox": [ + 406, + 178, + 503, + 247 + ], + "lines": [ + { + "bbox": [ + 406, + 178, + 503, + 247 + ], + "spans": [ + { + "bbox": [ + 406, + 178, + 503, + 247 + ], + "type": "image", + "image_path": "acddb4ab24f2368acd17d4d546335723573c1d65e5ad4a799246606a28382d6a.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 408, + 250, + 479, + 264 + ], + "lines": [ + { + "bbox": [ + 408, + 250, + 479, + 264 + ], + "spans": [ + { + "bbox": [ + 408, + 250, + 479, + 264 + ], + "type": "text", + "content": "hold_kite:0.039,0.892,0.238 hold_kite:0.478,0.892,0.995" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "type": "text", + "content": "the KTN contributes 0.78 Full mAP improvement with SRC (Exp 7 v.s. Exp 6), which is only 0.55 without SRC (Exp 3 v.s. Exp 1)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 372, + 506, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 506, + 461 + ], + "type": "text", + "content": "Parameter initialization: Our visual encoder and knowledge bank are both initialized from CLIP. We also explore different parameter initialization strategy in Exp 8-10. Specifically, we initialize the visual encoder with a ResNet50-FPN pretrained on COCO detection task for the baseline (Exp 8), and the knowledge bank with random parameters (Exp 9) or embeddings of HOI labels from RoBERTa model (Exp 10) for the final model. We observe severe drops with all these initialization methods compared with ours, demonstrating the effectiveness and generalization ability of CLIP model. It is worth noting that the mAP of Rare classes decreases from 16.20 in Exp 8 to 15.57 in Exp 9, which suggests the randomly initialized knowledge bank even aggravates the imbalance issue in final model." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 475, + 231, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 231, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 231, + 487 + ], + "type": "text", + "content": "4.5 QUALITATIVE RESULTS" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 494, + 506, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 494, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 506, + 616 + ], + "type": "text", + "content": "We show some qualitative results of our method in Fig.3. For each HOI prediction, we report (i) normalized pairwise interaction score, (ii) global HOI score and (iii) relatedness score for ours, and only pairwise interaction score for baseline. In Fig.3(a), ours interaction scores are more confident than baseline in Rare HOI classes, demonstrating the generalization ability of our CLIP-guided HOI representation. Besides, when incorporating relational knowledge bank into pairwise HOI representation, our method is capable of distinguishing the subtle differences among similar HOIs in Fig.3(b) (e.g., repair_truck:0.23 v.s. inspect_truck:0.48 in the bottom figure). Moreover, in Fig.3(c), the global branch suppresses background HOIs by predicting low global scores for them (e.g., the global HOI score is 0.033 for sit_onmotorcycle while the ground-truth is sit_on_bicycle). Finally, in Fig.3(d), our self-taught relatedness classification strategy shows strong capability at recognizing the ambiguous human-object associations (e.g., 0.079 v.s. 0.994 in the upper figure)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 630, + 195, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 630, + 195, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 195, + 642 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": "In this paper, we propose a bi-level knowledge integration strategy that incorporates the prior knowledge from CLIP for weakly-supervised HOI detection. Specifically, we exploit CLIP textual embeddings of HOI labels as a relational knowledge bank, which is adopted to enhance the HOI representation with an image-wise HOI recognition network and a pairwise knowledge transfer network. We further propose the addition of a self-taught binary pairwise relatedness classification loss to overcome ambiguous human-object association. Finally, our approach achieves the new state of the art on both HICO-DET and V-COCO benchmarks under the weakly supervised setting." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "type": "text", + "content": "We acknowledge funding from Flemish Government under the Onderzoeksprogramma Artificiele Intelligentie (AI) Vlaanderen programme, Shanghai Science and Technology Program 21010502700 and Shanghai Frontiers Science Center of Human-centered Artificial Intelligence." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 156, + 212, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 212, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 212, + 168 + ], + "type": "text", + "content": "ETHICS STATEMENT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 180, + 504, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 180, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 504, + 214 + ], + "type": "text", + "content": "Hereby, we consciously assure that our study is original work which has not been previously published elsewhere, and is not currently being considered for publication elsewhere. We do not have ethics risks as mentioned in the author guidelines." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 230, + 269, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 230, + 269, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 269, + 243 + ], + "type": "text", + "content": "REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 255, + 504, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 504, + 278 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 504, + 278 + ], + "type": "text", + "content": "We use publicly available benchmarks, HICO-DET and V-COCO, to validate our method. Code is available at https://github.com/bobwan1995/Weakly-HOI." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 293, + 176, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 176, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 176, + 306 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 312, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 106, + 312, + 505, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 312, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 505, + 335 + ], + "type": "text", + "content": "Federico Baldassarre, Kevin Smith, Josephine Sullivan, and Hossein Azizpour. Explanation-based weakly-supervised learning of visual relations with graph networks. In ECCV, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 341, + 505, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 341, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 107, + 341, + 505, + 365 + ], + "type": "text", + "content": "Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 371, + 506, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 371, + 506, + 395 + ], + "spans": [ + { + "bbox": [ + 107, + 371, + 506, + 395 + ], + "type": "text", + "content": "Yu-Wei Chao, Zhan Wang, Yugeng He, Jiaxuan Wang, and Jia Deng. HICO: A benchmark for recognizing human-object interactions in images. In ICCV, 2015." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 400, + 506, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 400, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 107, + 400, + 506, + 423 + ], + "type": "text", + "content": "Yu-Wei Chao, Yunfan Liu, Xieyang Liu, Huayi Zeng, and Jia Deng. Learning to detect human-object interactions. In WACV, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 430, + 506, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 430, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 506, + 464 + ], + "type": "text", + "content": "Leizhen Dong, Zhimin Li, Kunlun Xu, Zhijun Zhang, Luxin Yan, Sheng Zhong, and Xu Zou. Category-aware transformer network for better human-object interaction detection. arXiv preprint arXiv:2204.04911, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "text", + "content": "Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. arXiv preprint arXiv:2203.14940, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 512, + 504, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 512, + 504, + 535 + ], + "spans": [ + { + "bbox": [ + 107, + 512, + 504, + 535 + ], + "type": "text", + "content": "Chen Gao, Yuliang Zou, and Jia-Bin Huang. ican: Instance-centric attention network for human-object interaction detection. In BMVC, 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 541, + 504, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 541, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 107, + 541, + 504, + 564 + ], + "type": "text", + "content": "Chen Gao, Jiarui Xu, Yuliang Zou, and Jia-Bin Huang. Drg: Dual relation graph for human-object interaction detection. In ECCV, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 571, + 504, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 571, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 107, + 571, + 504, + 594 + ], + "type": "text", + "content": "Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Open-vocabulary image segmentation. arXiv preprint arXiv:2112.12143, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 601, + 504, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 601, + 504, + 625 + ], + "spans": [ + { + "bbox": [ + 107, + 601, + 504, + 625 + ], + "type": "text", + "content": "Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 631, + 506, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 631, + 506, + 653 + ], + "spans": [ + { + "bbox": [ + 107, + 631, + 506, + 653 + ], + "type": "text", + "content": "Saurabh Gupta and Jitendra Malik. Visual semantic role labeling. arXiv preprint arXiv:1505.04474, 2015." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 660, + 504, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 660, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 107, + 660, + 504, + 684 + ], + "type": "text", + "content": "Tanmay Gupta, Alexander Schwing, and Derek Hoiem. No-frills human-object interaction detection: Factorization, layout encodings, and training techniques. In ICCV, 2019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 690, + 504, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 690, + 504, + 714 + ], + "spans": [ + { + "bbox": [ + 107, + 690, + 504, + 714 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 107, + 719, + 501, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 719, + 501, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 719, + 501, + 732 + ], + "type": "text", + "content": "Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In ICCV2017, 2017." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "text", + "content": "Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 961-970, 2015." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "type": "text", + "content": "ASM Iftekhar, Hao Chen, Kaustav Kundu, Xinyu Li, Joseph Tighe, and Davide Modolo. What to look at and where: Semantic and spatial refined transformer for detecting human-object interactions. arXiv preprint arXiv:2204.00746, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 163, + 506, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 163, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 107, + 163, + 506, + 189 + ], + "type": "text", + "content": "Maximilian Ilse, Jakub Tomczak, and Max Welling. Attention-based deep multiple instance learning. In ICML, pp. 2127-2136, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 194, + 506, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 506, + 218 + ], + "type": "text", + "content": "Mert Kilickaya and Arnold Smeulders. Human-object interaction detection via weak supervision. arXiv preprint arXiv:2112.00492, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "text", + "content": "Bumsoo Kim, Junhyun Lee, Jaewoo Kang, Eun-Sol Kim, and Hyunwoo J. Kim. Hotr: End-to-end human-object interaction detection with transformers. In CVPR, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 256, + 506, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 256, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 107, + 256, + 506, + 289 + ], + "type": "text", + "content": "Bumsoo Kim, Jonghwan Mun, Kyoung-Woon On, Minchul Shin, Junhyun Lee, and Eun-Sol Kim. Mstr: Multi-scale transformer for end-to-end human-object interaction detection. arXiv preprint arXiv:2203.14709, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 297, + 504, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 297, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 107, + 297, + 504, + 320 + ], + "type": "text", + "content": "Suresh Kirthi Kumaraswamy, Miaojing Shi, and Ewa Kijak. Detecting human-object interaction with mixed supervision. In WACV, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "type": "text", + "content": "Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yan-Feng Wang, and Cewu Lu. Transferable interactiveness prior for human-object interaction detection. In CVPR, 2019a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 369, + 506, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 369, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 107, + 369, + 506, + 403 + ], + "type": "text", + "content": "Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yanfeng Wang, and Cewu Lu. Transferable interactiveness knowledge for human-object interaction detection. In CVPR, 2019b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "type": "text", + "content": "Yong-Lu Li, Xinpeng Liu, Han Lu, Shiyi Wang, Junqi Liu, Jiefeng Li, and Cewu Lu. Detailed 2d-3d joint representation for human-object interaction. In CVPR, 2020a." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "type": "text", + "content": "Yong-Lu Li, Xinpeng Liu, Xiaogqian Wu, Yizhuo Li, and Cewu Lu. Hoi analysis: Integrating and decomposing human-object interaction. In NeurIPS, 2020b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "text", + "content": "Yue Liao, Aixi Zhang, Miao Lu, Yongliang Wang, Xiaobo Li, and Si Liu. Gen-vlkt: Simplify association and enhance interaction understanding for hoi detection. arXiv preprint arXiv:2203.13954, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "type": "text", + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 543, + 504, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 543, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 107, + 543, + 504, + 578 + ], + "type": "text", + "content": "Wen Liu, Weixin Luo, Dongze Lian, and Shenghua Gao Gao. Future frame prediction for anomaly detection - a new baseline. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 584, + 504, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 584, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 107, + 584, + 504, + 618 + ], + "type": "text", + "content": "Hitoshi Nishimura, Satoshi Komorita, Yasutomo Kawanishi, and Hiroshi Murase. Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. arXiv preprint arXiv:2106.14259, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 625, + 504, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 625, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 107, + 625, + 504, + 661 + ], + "type": "text", + "content": "Guansong Pang, Cheng Yan, Chunhua Shen, van den Hengel Anton, and Xiao Bai. Self-trained deep ordinal regression for end-to-end video anomaly detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 667, + 504, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 667, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 107, + 667, + 504, + 691 + ], + "type": "text", + "content": "Alessandro Prest, Cordelia Schmid, and Vittorio Ferrari. Weakly supervised learning of interactions between humans and objects. IEEE TPAMI, 2011." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021a." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 567 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "type": "text", + "content": "Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. arXiv preprint arXiv:1506.01497, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 152, + 504, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 152, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 107, + 152, + 504, + 176 + ], + "type": "text", + "content": "Masato Tamura, Hiroki Ohashi, and Tomoaki Yoshinaga. Qpic: Query-based pairwise human-object interaction detection with image-wide contextual information. In CVPR, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 182, + 505, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 182, + 505, + 217 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 505, + 217 + ], + "type": "text", + "content": "Tina, Anmol Kumar Sharma, Siddharth Tomar, and Kapil Gupta. Various approaches of human activity recognition: A review. In International Conference on Computing Methodologies and Communication(ICCMC), 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 504, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 504, + 246 + ], + "type": "text", + "content": "Oytun Ulutan, A S M Iftekhar, and B. S. Manjunath. Vsgnet: Spatial attention network for detecting human object interactions using graph convolutions. In CVPR, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 253, + 504, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 253, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 107, + 253, + 504, + 277 + ], + "type": "text", + "content": "Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 2008. URL http://jmlr.org/papers/v9/vandermaaten08a.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 282, + 504, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 282, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 107, + 282, + 504, + 307 + ], + "type": "text", + "content": "Mrabti Wafae, Baibai Kaoutar, Bellach Benaissa, Oulad Haj Thami Rachid, and Tairi Hamid. Human motion tracking: A comparative study. Procedia Computer Science, 148:145-153, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "type": "text", + "content": "Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In ICCV, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 342, + 504, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 342, + 504, + 366 + ], + "spans": [ + { + "bbox": [ + 107, + 342, + 504, + 366 + ], + "type": "text", + "content": "Aixi Zhang, Yue Liao, Si Liu, Miao Lu, Yongliang Wang, Chen Gao, and Xiaobo Li. Mining the benefits of two-stage and one-stage hoi detection. NeuIPS, 2021a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 373, + 506, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 373, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 107, + 373, + 506, + 406 + ], + "type": "text", + "content": "Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Efficient two-stage detection of human-object interactions with a novel unary-pairwise transformer. arXiv preprint arXiv:2112.01838, 2021b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 414, + 504, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 414, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 107, + 414, + 504, + 437 + ], + "type": "text", + "content": "Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Spatially conditioned graphs for detecting human-object interactions. In ICCV, 2021c." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 443, + 504, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 443, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 107, + 443, + 504, + 467 + ], + "type": "text", + "content": "Hanwang Zhang, Zawlin Kyaw, Jinyang Yu, and Shih-Fu Chang. Ppr-fcn: Weakly supervised visual relation detection via parallel pairwise r-fcn. In ICCV, 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 473, + 506, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 473, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 107, + 473, + 506, + 506 + ], + "type": "text", + "content": "Desen Zhou, Zhichao Liu, Jian Wang, Leshan Wang, Tao Hu, Errui Ding, and Jingdong Wang. Human-object interaction detection via disentangled transformer. arXiv preprint arXiv:2204.09290, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "type": "text", + "content": "Penghao Zhou and Mingmin Chi. Relation parsing neural network for human-object interaction detection. In ICCV, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 544, + 504, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 544, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 107, + 544, + 504, + 567 + ], + "type": "text", + "content": "Tianfei Zhou, Wenguan Wang, Siyuan Qi, Haibin Ling, and Jianbing Shen. Cascaded human-object interaction recognition. In CVPR, 2020." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 163, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 163, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 163, + 94 + ], + "type": "text", + "content": "APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "type": "text", + "content": "In this appendix, we first describe the spatial feature generation, and then supplement more experimental results of different CLIP knowledge integration strategies for weakly-supervised HOI detection. For Explanation-HOI (Baldassarre et al., 2020), we further clarify the difference between their mAP evaluation protocol and the standard one. Finally, we demonstrate the limitations, potential negative societal impacts as well as the result error bars of our method." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 179, + 382, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 179, + 382, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 382, + 192 + ], + "type": "text", + "content": "A THE ADVANTAGE OF OUR HOI REPRESENTATION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "type": "text", + "content": "To verify the improvement obtained with our CLIP-based HOI representation, we visualize the HOI representation " + }, + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\hat{v}_p" + }, + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "type": "text", + "content": " in feature space with t-SNE(van der Maaten & Hinton, 2008). For clarity, we randomly sample 80 HOI categories, and collect 50 samples for each category. For comparison, we also demonstrate the object-based HOI representation derived from 'Exp 9' in Tab.2 (i.e., the model without CLIP knowledge and using a random knowledge bank). As shown in Fig.4, we observe that CLIP-based HOI representations for different HOI categories are diverse and well separated in feature space, which is better for HOI detection. In contrast, the object-based representations are not well separated in feature space (see the red box region in Fig.4b). Besides, the experimental results in the ablation study (ours v.s. 'Exp 9') also validate the advantage of CLIP-based HOI representation, improving full mAP from 19.61 to 22.89." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 332, + 378, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 332, + 378, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 332, + 378, + 345 + ], + "type": "text", + "content": "B ABLATION ON CLIP KNOWLEDGE INTEGRATION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": "To further demonstrate the superiority of our CLIP knowledge integration strategy, we study several proven techniques for CLIP knowledge transfer in Tab. 3. In " + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "inline_equation", + "content": "Abl1" + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": ", for each human-object pair, we directly infer the HOI scores with CLIP by computing the cross-modal similarities between their visual union region and the HOI prompts. Without introducing any HOI priors, the promising results indicate the powerful generalization ability of CLIP and motivate the design of incorporating CLIP knowledge for weakly-supervised HOI detection. In " + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "inline_equation", + "content": "Abl2" + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": ", we duplicate the experiment setting and results from " + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "inline_equation", + "content": "Exp8" + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": " in Tab. 2 of the main paper. It is a simplified baseline model but initializes the visual encoder with a ResNet50-FPN pretrained on COCO detection task. Then we introduce three different CLIP knowledge transfer strategies (Abl 3-4 and ours) based on " + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "inline_equation", + "content": "Abl2" + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "type": "text", + "content": "In Abl 3, we directly enhance baseline scores in Abl 2 with the CLIP similarity scores in Abl 1 on the inference stage. Without bells and whistles, we obtain 1.12 gain in Full mAP." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": "Furthermore, in Abl 4, we adopt a similar knowledge transfer strategy as GEN-VLKT (Liao et al., 2022), where we initialize the HOI classifier " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_P" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " with HOI prompt and regularize the global HOI representation with CLIP image feature " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "v_{g}" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": ". In detail, we first compute the global HOI representation " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "v_{mean}" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " with mean pooling on all pairwise HOI representations, i.e., " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "v_{mean} = MeanPool(\\{v_p^m\\}_{m=1}^M)" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": ". Here " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "v_p^m" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " is the holistic HOI representation (c.f. Sec. 3.2.3 in the main paper) for " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": "-th human-object pair. Then we develop an additional " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "L2" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " loss " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg}" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " to transfer the knowledge from CLIP to HOI representations: " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg} = L2(v_{mean}, v_g)" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": ". The performance even decreases slightly from 19.44 to 19.39, which might be caused by the incompatibility of parameters between backbone network (ResNet50-FPN pretrained on COCO) and " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_P" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " (HOI prompt embeddings from CLIP). When directly applying the knowledge transfer strategy of GEN-VLKT to a weakly-supervised setting, it is difficult to map the unmatched HOI representation and classification weights to a joint space as the supervisory signals are noisy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 633, + 504, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 504, + 656 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 504, + 656 + ], + "type": "text", + "content": "Finally, our approach achieves the best performance compared with other strategies, demonstrating the effectiveness of our bi-level knowledge integration strategy." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 673, + 293, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 673, + 293, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 673, + 293, + 685 + ], + "type": "text", + "content": "C SPATIAL FEATURE GENERATION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "Following (Zhang et al., 2021c), we generate the spatial feature " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_{sp} \\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": " for each pair of human-object proposals " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_h, \\mathbf{x}_o)" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": ". Specifically, we first compute the bounding boxes information for " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": " separately, including their center coordinates, widths, heights, aspect ratios and areas, all" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 304, + 235 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 304, + 235 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 304, + 235 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 304, + 235 + ], + "type": "image", + "image_path": "ec0d2877c25887ff4609d620312019b28f059c4f6a7a3f591043ace767320b71.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 248, + 506, + 270 + ], + "lines": [ + { + "bbox": [ + 104, + 248, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 506, + 270 + ], + "type": "text", + "content": "Figure 4: The t-SNE visualization of CLIP-based HOI representation and object-based HOI representation." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 306, + 81, + 503, + 235 + ], + "blocks": [ + { + "bbox": [ + 306, + 81, + 503, + 235 + ], + "lines": [ + { + "bbox": [ + 306, + 81, + 503, + 235 + ], + "spans": [ + { + "bbox": [ + 306, + 81, + 503, + 235 + ], + "type": "image", + "image_path": "9c33688555a1473b9211dc245403d73ceed296cf44e1d2b567f377ee0add3094.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 146, + 307, + 463, + 362 + ], + "blocks": [ + { + "bbox": [ + 136, + 296, + 473, + 307 + ], + "lines": [ + { + "bbox": [ + 136, + 296, + 473, + 307 + ], + "spans": [ + { + "bbox": [ + 136, + 296, + 473, + 307 + ], + "type": "text", + "content": "Table 3: Ablation of different CLIP knowledge integration strategies on HICO-DET dataset." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 146, + 307, + 463, + 362 + ], + "lines": [ + { + "bbox": [ + 146, + 307, + 463, + 362 + ], + "spans": [ + { + "bbox": [ + 146, + 307, + 463, + 362 + ], + "type": "table", + "html": "
MethodsExperimental settingmAP (%)
FullRareNon-Rare
Abl 1CLIP inference score11.8413.7211.27
Abl 2RN50-FPN (COCO) + FP random init.19.4416.2020.41
Abl 3RN50-FPN (COCO) + FP random init. + CLIP inference score20.5618.1921.27
Abl 4RN50-FPN (COCO) + FP HOI prompt init. + CLIP visual regularization19.3915.1220.66
oursCLIP RN50 + HOI recognition + KTN + self-taught relatedness cls.22.8922.4123.03
", + "image_path": "fe363dacb48d8989d586e83fb6f3326ad7831f10cb09fb80f80800b75a732ddf.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": "normalized by the corresponding dimension of the image. We also encode their relative spatial relations by estimating the intersection over union (IoU), a ratio of the area of " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ", a directional encoding and the distance between center coordinates of " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ". We concatenate all the above-mentioned preliminary spatial cues and obtain a spatial encoding " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{p} \\in \\mathbb{R}_{+}^{18}" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ". To encode the second and higher order combinations of different terms, the spatial encoding is concatenated with its logarithm and then embedded to " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "v_{sp}" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "v_{sp} = \\mathcal{F}_{sp}([p; \\log(p + \\epsilon)])" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ". Where " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": " is a small constant to guarantee the numerical stability, and " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{sp}" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": " is a multi-layer fully connected network." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 492, + 379, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 379, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 379, + 506 + ], + "type": "text", + "content": "D VISUALIZATION OF HOI KNOWLEDGE BANK " + }, + { + "bbox": [ + 105, + 492, + 379, + 506 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "type": "text", + "content": "To further understand " + }, + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "type": "text", + "content": ", we visualize the knowledge bank features initialized by CLIP (Fig.5(a)) and learned from scratch (Fig.5(b)) in feature space by t-SNE. It is worth noting that the knowledge bank learned from scratch is derived from 'Exp 9' in Tab.2. As shown in Fig.5, we observe that the knowledge features of HOI classes initialized with CLIP are more discriminative than random initialized, and show a better clustering result (e.g. the HOI classes in red box regions)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 602, + 279, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 279, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 279, + 615 + ], + "type": "text", + "content": "E DIFFERENT DESIGNS OF KTN" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "text", + "content": "To further validate the effectiveness of our attention mechanism in KTN, we compare our design with some variants in Tab. 4. First of all, we directly encode the relation-level features within the union region to enhance the pairwise representation rather than the external knowledge bank. As a result, the mAP even decreases a little bit from 20.75 (Exp 6) to 20.69 (Exp 11). The potential reason is that the union region contains more ambiguous visual relations and background clutters, which are difficult to learn in a weak setting. Besides, we also explore different normalization strategies in KTN. The results in Tab. 4 demonstrate that Softmax operation (ours) performs better than uniform attention (Exp 12) or Sigmoid operation (Exp 13), indicating our attention mechanism is non-trivial and more effective on aggregating the relational cues from HOI knowledge bank." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 82, + 503, + 387 + ], + "blocks": [ + { + "bbox": [ + 107, + 82, + 503, + 387 + ], + "lines": [ + { + "bbox": [ + 107, + 82, + 503, + 387 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 503, + 387 + ], + "type": "image", + "image_path": "2e8f35e8efe169ddf8e8f37e0ef00a2cb7d7406a4aefb85cf71fe3778f5da5f5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "type": "text", + "content": "Figure 5: The t-SNE visualization of knowledge bank " + }, + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "type": "text", + "content": ". (a) is the knowledge bank distribution in feature space based on our CLIP-based HOI representation while (b) is the knowledge bank learned from scratch (the model in Tab.2-Exp 9) based on object-based HOI representation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 126, + 462, + 484, + 509 + ], + "blocks": [ + { + "bbox": [ + 167, + 452, + 442, + 462 + ], + "lines": [ + { + "bbox": [ + 167, + 452, + 442, + 462 + ], + "spans": [ + { + "bbox": [ + 167, + 452, + 442, + 462 + ], + "type": "text", + "content": "Table 4: Different network design of Knowledge Transfer Network (KTN)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 126, + 462, + 484, + 509 + ], + "lines": [ + { + "bbox": [ + 126, + 462, + 484, + 509 + ], + "spans": [ + { + "bbox": [ + 126, + 462, + 484, + 509 + ], + "type": "table", + "html": "
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
Exp 11CLIP RN50CLIP Text✓ (union)-20.6919.5521.04
Exp 12CLIP RN50CLIP Text✓ (uniform)-21.1419.8221.53
Exp 13CLIP RN50CLIP Text✓ (sigmoid)-21.2819.2721.88
oursCLIP RN50CLIP Text-21.5320.0521.97
", + "image_path": "7bd051eb2407505f5d0e91ad521730a1e01709b3b353c897c1f3b46d360b5784.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 522, + 350, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 350, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 350, + 534 + ], + "type": "text", + "content": "F TOP-K POSITIVE PAIR SELECTION FOR SRC" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 548, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 581 + ], + "type": "text", + "content": "In this section we show the results of selecting top-2 and top-5 pairs as positive in Tab. 5. We notice that there is a small performance drop, which is likely to be caused by mislabeling more negative pairs as positive, resulting in model learning with more noise." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 597, + 342, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 342, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 342, + 609 + ], + "type": "text", + "content": "G THE PROMPT GENERATION FOR V-COCO" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 623, + 504, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 623, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 504, + 667 + ], + "type": "text", + "content": "For the V-COCO dataset, each action has two different semantic roles ('instrument' and 'object') for different objects, like 'cut cake' and 'cut with knife'. We use two different prompt templates to convert a HOI label to a language sentence. For the former one, we take template \"a person verb a/an object\", and use \"a person verb with object\" for the latter." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 684, + 316, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 684, + 316, + 696 + ], + "spans": [ + { + "bbox": [ + 105, + 684, + 316, + 696 + ], + "type": "text", + "content": "H EVALUATION METRIC FOR V-COCO" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "type": "text", + "content": "V-COCO dataset has two scenarios for role AP evaluation. In Tab. 1, APS1&2 refer to 'Average Precision in scenario 1&2'. V-COCO dataset has two different annotations for HOIs: the first is a" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 236, + 100, + 375, + 148 + ], + "blocks": [ + { + "bbox": [ + 154, + 89, + 455, + 100 + ], + "lines": [ + { + "bbox": [ + 154, + 89, + 455, + 100 + ], + "spans": [ + { + "bbox": [ + 154, + 89, + 455, + 100 + ], + "type": "text", + "content": "Table 5: Ablation of top-K positive pair selection for SRC on HICO-DET dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 236, + 100, + 375, + 148 + ], + "lines": [ + { + "bbox": [ + 236, + 100, + 375, + 148 + ], + "spans": [ + { + "bbox": [ + 236, + 100, + 375, + 148 + ], + "type": "table", + "html": "
MethodsmAP (%)
FullRareNon-Rare
Top-522.4521.6122.70
Top-222.4921.8322.69
ours (Top-1)22.8922.4123.03
", + "image_path": "ea948175792d2ed151c0ca1aee49e56a4e3c9c26bc08bbdd021b1dbab82c8ad6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 159, + 310, + 340 + ], + "blocks": [ + { + "bbox": [ + 108, + 159, + 310, + 340 + ], + "lines": [ + { + "bbox": [ + 108, + 159, + 310, + 340 + ], + "spans": [ + { + "bbox": [ + 108, + 159, + 310, + 340 + ], + "type": "image", + "image_path": "5ea9aca00229807cfcbed7f05dd2e53f142325eae021f722c0c643c640bf02ee.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 342, + 289, + 352 + ], + "lines": [ + { + "bbox": [ + 137, + 342, + 289, + 352 + ], + "spans": [ + { + "bbox": [ + 137, + 342, + 289, + 352 + ], + "type": "text", + "content": "(a) Evaluation protocol in Explanation-HOI" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 310, + 159, + 502, + 340 + ], + "blocks": [ + { + "bbox": [ + 310, + 159, + 502, + 340 + ], + "lines": [ + { + "bbox": [ + 310, + 159, + 502, + 340 + ], + "spans": [ + { + "bbox": [ + 310, + 159, + 502, + 340 + ], + "type": "image", + "image_path": "b1ffa02461492b359ad767aae489aa3222ac27a003e81f6fd5b1f59f8392a3fe.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 346, + 342, + 473, + 351 + ], + "lines": [ + { + "bbox": [ + 346, + 342, + 473, + 351 + ], + "spans": [ + { + "bbox": [ + 346, + 342, + 473, + 351 + ], + "type": "text", + "content": "(b) The correct evaluation protocol" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 363, + 504, + 398 + ], + "lines": [ + { + "bbox": [ + 104, + 363, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 504, + 398 + ], + "type": "text", + "content": "Figure 6: The screenshot of the evaluation code in Explanation-HOI. (a) is the original code while (b) is the correct one based on the standard evaluation code. We use red rectangle boxes to highlight the most important differences" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "content": "full label of (human location, interaction type, object location, object type), and the second misses target object (also denoted as 'role' in the original paper (Gupta & Malik, 2015)) annotations, and the label only includes (human location, interaction type). For the second case, there are two different evaluation protocols (scenarios) when taking a prediction as correct " + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "inline_equation", + "content": "^4" + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "content": ": In scenario 1, it requires the interaction is correct & the overlap between the human boxes is " + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "inline_equation", + "content": "> 0.5" + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "content": " & the corresponding role is empty, which is more restricted; in scenario 2, it only requires the interaction is correct & the overlap between the person boxes is " + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "inline_equation", + "content": "> 0.5" + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 512, + 312, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 312, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 312, + 525 + ], + "type": "text", + "content": "I EVALUATION OF EXPLANATION-HOI" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 537, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 504, + 616 + ], + "type": "text", + "content": "The Explanation-HOI (Baldassarre et al., 2020) has a misunderstanding of mAP evaluation protocol. As shown in Fig.6(a) L200-L205, the Explanation-HOI only takes some specific predicted HOIs into the evaluation process, which has the same HOI labels as groundtruth HOIs. Thus, they ignore lots of false-positive HOI predictions when calculating mAP, leading to an untrustable high mAP score (reported in their original paper). In Fig.6(b) L204-L208, we evaluate all predicted HOIs, which is the same as the standard evaluation protocol proposed in HICO-DET (Chao et al., 2015). The correct results have already been reported in Tab.1 in the main paper." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 631, + 193, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 631, + 193, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 193, + 643 + ], + "type": "text", + "content": "J LIMITATIONS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "type": "text", + "content": "As described in Sec. 3.1, we adopt an external object detector to generate human-object proposals and then recognize their interactions. Consequently, our method is faced with two limitations brought by erroneous object detection results. Firstly, the positive human-object pairs are not recalled if the human or object proposals are not detected. Secondly, the proposals are kept fixed during learning, which leads to the problem of inaccurate localization and object types." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 246, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 246, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 246, + 732 + ], + "type": "text", + "content": "4https://github.com/s-gupta/v-coco" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "type": "text", + "content": "K RISK OF USING CLIP" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "content": "For all the methods that adopt CLIP in their model design, there is a potential risk of data leakage as CLIP has seen quite a lot of data during pretraining. For HOI detection task, we cannot get access to CLIP dataset and do not know the exact overlap between CLIP and HOI benchmarks (i.e., HICO-DET and V-COCO), we carefully read Sec. 5 (Data Overlap Analysis) of the CLIP paper (Radford et al., 2021b), including an analysis of the overlap between its dataset with 35 popular datasets (HICO-DET and V-COCO are not included). It shows the overlap is small (median is " + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "inline_equation", + "content": "2.2\\%" + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "content": " and average is " + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "inline_equation", + "content": "3.2\\%" + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "content": ") and the influence is limited (\"overall accuracy is rarely shifted by more than " + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "inline_equation", + "content": "0.1\\%" + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "content": " with only 7 datasets above this threshold\"). Besides, the training text accompanying an image in the CLIP dataset is often not related to the HOI annotations. Thus, we think the risk is limited." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 221, + 174, + 233 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 221, + 174, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 221, + 174, + 233 + ], + "type": "text", + "content": "L LICENSE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 246, + 507, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 246, + 507, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 246, + 507, + 281 + ], + "type": "text", + "content": "The licenses of the assets used in our work are listed below, including open-sourced CLIP model, HICO-DET dataset, and V-COCO dataset. As for HICO-DET, we cannot find its license in the paper and the official project page. Thus we provide the official project page instead here for clarity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 289, + 425, + 330 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 129, + 289, + 351, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 289, + 351, + 300 + ], + "spans": [ + { + "bbox": [ + 129, + 289, + 351, + 300 + ], + "type": "text", + "content": "1. CLIP: https://github.com/openai/CLIP MIT License" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 129, + 304, + 375, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 304, + 375, + 315 + ], + "spans": [ + { + "bbox": [ + 129, + 304, + 375, + 315 + ], + "type": "text", + "content": "2. VCOCO: https://github.com/s-gupta/v-coco/MIT License" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 319, + 425, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 319, + 425, + 330 + ], + "spans": [ + { + "bbox": [ + 129, + 319, + 425, + 330 + ], + "type": "text", + "content": "3. HICO-DET: http://www-personal.umich.edu/ ywchao/hico/ No license" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_content_list.json b/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..da0d3687c07b5af6643ec6f71cb5d0ba5bd03789 --- /dev/null +++ b/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_content_list.json @@ -0,0 +1,3888 @@ +[ + { + "type": "text", + "text": "WEIGHTED CLOCK LOGIC POINT PROCESS", + "text_level": 1, + "bbox": [ + 171, + 99, + 692, + 122 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ruixuan Yan $^{1}$ , Yunshi Wen $^{1}$ , Debarun Bhattacharjya $^{2}$ , Ronny Luss $^{2}$ , Tengfei Ma $^{2}$ , Achille Fokoue $^{2}$ , and Agung Julius $^{1}$", + "bbox": [ + 227, + 148, + 771, + 181 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Rensselaer Polytechnic Institute \n $^{2}$ IBM T.J. Watson Research Center", + "bbox": [ + 380, + 191, + 614, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 246, + 547, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Datasets involving multivariate event streams are prevalent in numerous applications. We present a novel framework for modeling temporal point processes called clock logic neural networks (CLNN) which learn weighted clock logic (wCL) formulas as interpretable temporal rules by which some events promote or inhibit other events. Specifically, CLNN models temporal relations between events using conditional intensity rates informed by a set of wCL formulas, which are more expressive than related prior work. Unlike conventional approaches of searching for generative rules through expensive combinatorial optimization, we design smooth activation functions for components of wCL formulas that enable a continuous relaxation of the discrete search space and efficient learning of wCL formulas using gradient-based methods. Experiments on synthetic datasets manifest our model's ability to recover the ground-truth rules and improve computational efficiency. In addition, experiments on real-world datasets show that our models perform competitively when compared with state-of-the-art models.", + "bbox": [ + 228, + 277, + 769, + 474 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION AND RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 488, + 522, + 503 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multivariate event streams are emerging types of data that involve occurrences of different types of events in continuous time. Event streams are observed in a wide range of applications, including but not limited to finance (Bacry et al., 2015), politics (O'Brien, 2010), system maintenance (Gunawardana et al., 2011), healthcare (Weiss & Page, 2013), and social networks (Farajtabar et al., 2015). As opposed to time series data that typically comprises continuous-valued variables evolving in regular discrete time stamps, event streams involve events occurring irregularly and asynchronously in continuous time. Modeling the dynamics in event streams is important for a wide range of scientific and industrial processes, such as predicting the occurrence of events of interest or understanding why some deleterious events occur so as to possibly prevent their occurrence. A (multivariate) temporal point process (TPP) provides a formal mathematical framework for representing event streams, where a conditional intensity rate for each event measures its occurrence rate at any time given the historical events in the stream (Daley & Vere-Jones, 2003; Aalen et al., 2008).", + "bbox": [ + 169, + 520, + 826, + 688 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "There has been a proliferation of research around TPPs in recent years, particularly around the use of neural networks for modeling conditional intensity rates as a function of historical occurrences (Du et al., 2016; Mei & Eisner, 2017; Xiao et al., 2017; Xu et al., 2017; Gao et al., 2020; Zhang et al., 2020; Zuo et al., 2020). One stream of research studies graphical event models (GEMs) as a compact and interpretable graphical representation for TPPs, where the conditional intensity rate for any particular event depends only on the history of a subset of the events (Didelez, 2008; Gunawardana & Meek, 2016). While any TPP can be represented as a GEM, various models make assumptions about the parametric form of conditional intensity rates for the sake of learnability, for instance that rates are piece-wise constant with respect to occurrences within historical windows (Gunawardana et al., 2011; Bhattacharjya et al., 2018). Ordinal GEMs(OGEM) (Bhattacharjya et al., 2020; 2021) are a recent model from this family where a conditional intensity rate depends on the order in which parent events occur within the most recent historical time period.", + "bbox": [ + 169, + 694, + 826, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A temporal logic point process (TLPP) framework was proposed as an alternate way to lend some interpretability to TPPs by modeling intensity rates using temporal logic rules (Li et al., 2020). Although the initial work pre-specified temporal logic rules, recent work has introduced a temporal logic rule learner (TELLER) for automatically discovering rules (Li et al., 2021). There is however", + "bbox": [ + 169, + 867, + 826, + 926 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the issue of scalability since TELLER exploits an expensive branch-and-price algorithm to search for temporal logic rules in a discrete space. Another important limitation of this work is that TELLER's rules are not informative enough to explain how the interval length between ordered events impacts the conditional intensity rate. For instance, while predicting the occurrence of diabetes, the rule that \"insulin injection happens 20 minutes before eating meal\" is more informative and accurate in predicting \"blood glucose remains normal\" than the rule that \"insulin injection happens before eating meal\", as the latter rule cannot expose the interval between 'insulin injection' and 'eating meal'. To tackle the above limitations, we propose novel atomic predicates enriching the expressiveness of temporal logic rules as well as a differentiable framework to learn rules in an end-to-end manner.", + "bbox": [ + 169, + 103, + 826, + 229 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This work introduces a differentiable neuro-symbolic framework, clock logic neural network (CLNN), to model TPPs by learning weighted clock logic (wCL) formulas as explanations. Firstly, event streams are converted into continuous-time clock signals representing the time interval between the last occurrence of an event and the current time. Next, we propose a novel wCL to describe the underlying temporal relations with relative interval length, enabling the design of a CLNN to learn the generative mechanisms. Instead of searching for temporal logic rules in some vast discrete space, CLNN associates every neuron with an order representation or a logical operator and assigns weights to edges to reflect the importance of various inputs, which relaxes the search space to be continuous. Moreover, architecture weights are introduced into CLNN to make the formula structure search differentiable. wCL formula-informed intensity rates are carefully designed so that the parameters appearing in the rules can be learned through maximum likelihood estimation using gradient-based approaches. CLNN is tested on synthetic datasets to show that CLNN can recover the ground-truth rules as well as on real-world datasets to demonstrate its model-fitting performance.", + "bbox": [ + 169, + 234, + 826, + 417 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 PRELIMINARIES", + "text_level": 1, + "bbox": [ + 171, + 431, + 341, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 NOTATION & BACKGROUND", + "text_level": 1, + "bbox": [ + 171, + 455, + 411, + 469 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Let $\\mathcal{L}$ denote the set of event labels, and $M = |\\mathcal{L}|$ denote the number of event labels. An event stream is a sequence of events including time stamps, denoted as $\\mathcal{D} = \\{(l_1,t_1),(l_2,t_2),\\dots,(l_N,t_N)\\}$ , where $t_i\\in \\mathbb{R}^+$ denotes a time stamp between the beginning time $t_0 = 0$ and end time $t_{N + 1} = T$ , and $l_{i}\\in \\mathcal{L}$ is the event label that happens at $t_i$ . We refer to 'event label' and 'label' interchangeably. Every event label $l\\in \\mathcal{L}$ has an associated conditional intensity rate describing the occurrence rate of label $l$ at $t$ given the history up to $t$ . In multivariate temporal point processes, conditional intensity rates describe the dynamics of events. Let $\\mathcal{H}_t = \\{(l_i,t_i):t_i < t\\}$ denote the historical events up to time $t$ . The conditional intensity rate of event label $l$ is denoted as $\\lambda_l(t|\\mathcal{H}_t)$ . Specifically, $\\lambda_l(t|\\mathcal{H}_t)$ describes the expected number of occurrences of event label $l$ in an infinitesimal interval $[t,t + \\Delta t]$ given the history $\\mathcal{H}_t$ , i.e., $\\lambda_l(t|\\mathcal{H}_t) = \\lim_{\\Delta t\\to 0}(E[N_l(t + \\Delta t) - N_l(t)|\\mathcal{H}_t] / \\Delta t)$ , where $N_{l}(t)$ denotes the number of event label $l$ 's occurrences up to $t$ .", + "bbox": [ + 169, + 479, + 826, + 635 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Example 1 A running example of an event stream with 11 events of 4 labels is shown in Figure 1(a).", + "bbox": [ + 169, + 640, + 823, + 656 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/557ee827b58081cb6292fa7bca2267a38e9181cbaf851f056a773b70a640ed44.jpg", + "image_caption": [ + "(a)", + "(b)", + "Figure 1: (a): An event stream example with $N = 11$ events of $M = 4$ event labels over $T = 30$ days. (Integer-valued time stamps are utilized for easy interpretation, note that the proposed approach also works for $t_i \\in \\mathbb{R}$ ). (b): The overall workflow of the proposed method (POC: paired order cell, SOC: singleton order cell, AC: architecture cell, details presented in Section 2.2 to 3.3)." + ], + "image_footnote": [], + "bbox": [ + 178, + 660, + 834, + 713 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 ORDER REPRESENTATIONS FOR EVENT STREAMS", + "text_level": 1, + "bbox": [ + 169, + 800, + 560, + 814 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The overall workflow of the proposed framework is visualized as Figure 1(b). The raw event streams first go through a masking function to generate the masked event streams, which are then transformed into event clocks using a clocking function. The event clocks are given as inputs to the clock logic neural network (CLNN) to learn interpretable wCL formulas and the intensity rate of event occurrences. The following sections provide a detailed explanation for each module in Figure 1(b).", + "bbox": [ + 169, + 818, + 823, + 888 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We are interested in exploring the effect of temporal ordering between event labels and the occurrences of causal event labels in a historical window on the occurrence rate of a particular event label,", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where the generative mechanism is expressed as interpretable formulas. An event stream up to $t$ may include multiple occurrences of the same event label, thus a masking function is required to mask out duplicated event labels in the history for accessing the ordering information at any $t$ . Here we adopt a technique similar to Bhattacharjya et al. (2020) for extracting distinct event labels from $\\mathcal{H}_t$ .", + "bbox": [ + 169, + 104, + 823, + 161 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 1 (Masking Function) A masking function $\\Gamma(\\cdot)$ is a function that takes an event stream as input and returns a new event stream that is a subset of the input stream and contains no duplicated event labels. Mathematically, $\\Gamma(\\cdot)$ is applied to $\\mathcal{H}_t = \\{(l_i, t_i)\\}$ and converts it into a new stream $\\mathcal{H}_t' = \\{(l_j, t_j) \\in \\mathcal{H}_t : l_j \\neq l_{j'} \\text{ if } j \\neq j'\\}$ .", + "bbox": [ + 169, + 164, + 823, + 222 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We consider the following two masking functions as per Bhattacharjya et al. (2020) due to simplicity: 'first' masking and 'last' masking. The 'first' (resp. 'last') masking function keeps the first (resp. last) occurrence of an event label in an event stream.", + "bbox": [ + 169, + 224, + 823, + 267 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Example 1 (cont.) Let $\\mathcal{H}_{13} = \\{(A,1),(B,3),(A,6),(D,8),(C,10),(D,12)\\}$ . The 'first' masking function converts it to $\\mathcal{H}_{13}' = \\{(A,1),(B,3),(D,8),(C,10)\\}$ , and the 'last' masking function converts it to $\\mathcal{H}_{13}' = \\{(B,3),(A,6),(C,10),(D,12)\\}$ .", + "bbox": [ + 169, + 271, + 823, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "With the masked event history $\\mathcal{H}_t^\\prime$ , we define two order representations for the order relationship between any two event labels and the occurrence of an event within a historical window of $t$ .", + "bbox": [ + 169, + 319, + 823, + 349 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 2 (Paired Order Representation (POR)) A paired order representation is defined as $[l_i, l_j] \\in [\\mathcal{L}]^2$ , where $[\\mathcal{L}]^2$ denotes two-element permutation of a subset of $\\mathcal{L}$ . A paired order representation for $\\mathcal{H}_t^\\prime$ can be obtained by arranging any two distinct labels in $\\mathcal{H}_t^\\prime$ in a sequential order.", + "bbox": [ + 169, + 352, + 823, + 396 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 3 (Singleton Order Representation (SOR)) A singleton order representation is denoted as $[l_j, \\underline{u}_{l_j}] \\in \\mathcal{L} \\times \\mathbb{R}_+$ , representing event label $l_j \\in \\mathcal{L}$ occurred within the past $\\underline{u}_{l_j}$ time units, where $\\underline{u}_{l_j}$ is a variable to learn through a process that will be explained in Section 3.3.", + "bbox": [ + 169, + 402, + 823, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Example 1 (cont.) With first masking, an example of paired order representation for $\\mathcal{H}_{13}^{\\prime}$ can be $[A,B]$ representing \"A happens before $B$ \" or $[B,C]$ representing \"B happens before $C$ \". The overall order representation for $\\mathcal{H}_{13}^{\\prime}$ is expressed as $[A,B,D,C]$ , which can be derived from the paired order representations: $[A,B],[B,D],[D,C]$ . A singleton order representation example of $\\mathcal{H}_{13}^{\\prime}$ can be expressed as $[B,10.5]$ , meaning $B$ happened in the past 10.5 days.", + "bbox": [ + 169, + 453, + 825, + 523 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 WEIGHTED CLOCK LOGIC FORMULA", + "text_level": 1, + "bbox": [ + 171, + 532, + 472, + 545 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To adapt $\\mathcal{H}_t^\\prime$ to continuous-time signals that can be described by logical statements, we extract clock signals from $\\mathcal{H}_t^\\prime$ to describe the time passed since the last occurrence of a label. A clocking function is introduced to convert $t_j$ into a clock signal $c_{j}$ denoting the time interval length between $t_j$ and $t$ .", + "bbox": [ + 169, + 551, + 823, + 595 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 4 (Clocking Function) A clocking function $\\Xi(\\cdot)$ converts $\\mathcal{H}_t^\\prime$ into a vector of clock signals as $\\mathcal{C}'(t) = [c_1(t), c_2(t), \\dots, c_M(t)]^T \\in \\mathbb{R}_+^M$ with $c_i(t)$ denoting the clock signal for event label $i \\in \\mathcal{L}$ , where $c_i(t)$ is computed as $c_i(t) = t - t_j$ if $(l_j, t_j) \\in \\mathcal{H}_t^\\prime$ and $l_j = i$ , and $c_i(t) = \\bar{Z}$ otherwise. Note that $\\bar{Z}$ is a user-defined, large positive number to indicate event label $i$ not happening in $\\mathcal{H}_t^\\prime$ .", + "bbox": [ + 169, + 597, + 823, + 657 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Example 1 (cont.) Taking the 'first' masked event stream $\\mathcal{H}_{13}^{\\prime} = \\{(A,1),(B,3),(D,8),(C,12)\\}$ as an example, the event clocks are extracted as $\\mathcal{C}'(13) = [12,10,1,5]^T$ .", + "bbox": [ + 169, + 664, + 823, + 694 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The event clocks can essentially provide the ordering between any two event labels in that the difference between any two event labels' clock signals reflects which event label happens first. As shown in the diabetes prediction example in the Introduction section, the time interval between ordering events is notably important in explaining and predicting an event label's occurrence. In contrast to (Li et al., 2020; 2021) which only learns the temporal ordering relation between event labels, we define a paired order predicate (POP) with a learnable parameter $\\underline{u}_{l_i l_j}$ to describe the time interval between two ordered event labels $l_i$ and $l_j$ and a singleton order predicate (SOP) with a learnable parameter $\\underline{u}_{l_j}$ to describe the occurrence of label $l_j$ within a historical window $\\underline{u}_{l_j}$ as follows.", + "bbox": [ + 169, + 696, + 823, + 813 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 5 (Paired Order Predicate) A POP describes the order between two labels $l_i, l_j \\in \\mathcal{L}, l_i \\neq l_j$ , denoted as $\\pi_{pop}^{l_i l_j} := g(c_{l_i}, c_{l_j}) = c_{l_i} - c_{l_j} > \\underline{u}_{l_i l_j}$ , where $\\underline{u}_{l_i l_j} \\in \\mathbb{R}$ is a parameter to learn. A positive $\\underline{u}_{l_i l_j}$ means $l_i$ happened before $l_j$ for at least $\\underline{u}_{l_i l_j}$ time units, and a negative $\\underline{u}_{l_i l_j}$ means $l_j$ happened before $l_i$ for at most $-\\underline{u}_{l_i l_j}$ time units. A POP is used in the POC of Figure 1(b).", + "bbox": [ + 169, + 816, + 823, + 883 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 6 (Singleton Order Predicate) An SOP describes a causal label $l_j \\in \\mathcal{L}$ occurring within the past $\\underline{u}_{l_j}$ time units, defined as $\\pi_{sop}^{l_j} := c_{l_j} - \\underline{u}_{l_j} < 0$ , where $\\underline{u}_{l_j} \\in \\mathbb{R}_+$ is a learnable parameter.", + "bbox": [ + 169, + 890, + 823, + 928 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instead of taking a heuristic approach for some underlying combinatorial search problem for a given set of temporal predicates (Bhattacharjya et al., 2020; 2021; Li et al., 2021) to uncover the effective order relations, this work proposes a differentiable learning model to learn suitable singleton and paired order predicates among all the possible choices of order predicates through a gradient-based approach. The scheme of weighted signal temporal logic (wSTL) in Yan et al. (2021; 2022) is exploited to build weighted clock logic (wCL) formulas that are logical compositions of singleton and paired order predicates. The syntax of wCL is recursively defined as (Mehdipour et al., 2021):", + "bbox": [ + 169, + 103, + 826, + 203 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\phi := \\pi_ {p o p} ^ {l _ {i} l _ {j}} \\left| \\pi_ {s o p} ^ {l _ {j}} \\right| \\neg \\phi \\left| \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}} \\right| \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 276, + 207, + 823, + 227 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\phi_1, \\dots, \\phi_k$ are wCL formulas, $\\neg$ denotes negation, $\\land$ denotes logical conjunction, $\\lor$ denotes logical disjunction, $w_j \\geq 0, j = 1, \\dots, k$ denotes non-negative weights assigned to $\\phi_1, \\dots, \\phi_k$ in the conjunction and disjunction operations. A wCL formula can describe the characteristics of $\\mathcal{H}_t$ , thus the conditional intensity rate of event $l$ given $\\mathcal{H}_t$ can be equivalently denoted as $\\lambda_{l|\\phi}(t)$ .", + "bbox": [ + 169, + 231, + 823, + 289 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Remark 7 The syntax above means each wCL formula can be built by using predicates in $\\pi_{pop}^{l_i l_j}$ or $\\pi_{sop}^{l_j}$ and then by recursively applying the $\\neg$ or the $\\land$ or the $\\lor$ operations.", + "bbox": [ + 169, + 294, + 825, + 330 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Example 1 (cont.) A $wCL$ formula example is $\\phi = (c_A - c_B > 1)^1 \\wedge (c_C < 3)^{0.05}$ . The first and second clauses read \"A happened before $B$ for at least one day\" and \"C happened less than 3 days ago\", respectively. Note that $\\phi$ is satisfied by the event stream up to $t = 13$ in Figure 1(a). The two clauses have weights of 1 and 0.05, reflecting the first clause is more important than the second one.", + "bbox": [ + 169, + 339, + 825, + 397 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 WEIGHTED CLOCK LOGIC POINT PROCESSES", + "text_level": 1, + "bbox": [ + 171, + 409, + 584, + 424 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 TRUTH DEGREE OF WEIGHTED CLOCK LOGIC", + "text_level": 1, + "bbox": [ + 171, + 434, + 534, + 448 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To quantitatively measure the satisfaction degree of a wCL formula $\\phi$ over the event clocks $\\mathcal{C}'(t)$ , i.e., how well does $\\phi$ describe the underlying patterns of $\\mathcal{C}'(t)$ , we propose smooth activation functions (AFs) to compute the truth degree, denoted $p(\\mathcal{C}',\\phi,t)\\in [0,1]$ , defined as (Riegel et al., 2020):", + "bbox": [ + 169, + 455, + 823, + 500 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\pi_ {p o p} ^ {l _ {i} l _ {j}}, t\\right) = \\operatorname {s i g m o i d} \\left(c _ {l _ {i}} (t) - c _ {l _ {j}} (t) - \\underline {{u}} _ {l _ {i} l _ {j}}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 500, + 823, + 520 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\pi_ {s o p} ^ {l _ {j}}, t\\right) = \\operatorname {s i g m o i d} \\left(\\underline {{u}} _ {l _ {j}} - c _ {l _ {j}} (t)\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 521, + 823, + 541 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\neg \\phi , t\\right) = 1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi , t\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 542, + 823, + 560 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In contrast to the combinatorial search of the temporal logic predicates in Li et al. (2021), the smooth design of AFs in (2) - (4) benefits the maximum likelihood estimation problem shown later in Section 3.6 by allowing it to learn the parameters in the POP and SOP through gradient-based methods. Next, we present the design of activation functions (AF) for the $\\wedge$ operator. Here we use a 2-ary conjunction operator to motivate the design. Let $p^{\\wedge} = p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t)\\in [0,1]$ . Intuitively, $p^{\\wedge}$ is low when either input is low, and $p^{\\wedge}$ is high when both inputs are high. Here we adopt a similar idea to Sen et al. (2022) for capturing the low and high. A user-defined hyperparameter $\\alpha \\in [\\frac{1}{2},1]$ is introduced to aid the interpretability of low and high such that $p^{\\wedge}$ represents high if $p^{\\wedge}\\in [\\alpha ,1]$ and low if $p^{\\wedge}\\in [0,1 - \\alpha ]$ . Considering the importance weights, a low input with a zero weight should not impact the output, which implies $p^{\\wedge}$ should be low when both inputs are low. With these considerations, the AF for the $\\wedge$ operator is defined as follows: (See Appendix A for more details.)", + "bbox": [ + 169, + 565, + 825, + 719 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 717, + 825, + 760 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\leq 1 - \\alpha ,\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 762, + 764, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $f(z) = \\max \\{0, \\min \\{z, 1\\}\\}$ clamps the truth degree into [0,1], $w_{j} \\geq 0$ and $\\beta \\geq 0$ are parameters to learn. By De Morgan's law (Hurley, 2014), the AF for the $\\vee$ operator is defined as", + "bbox": [ + 169, + 811, + 823, + 840 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, t\\right) = f (1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 840, + 825, + 882 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha .\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 883, + 790, + 928 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "An event stream with $M$ event labels would generate $\\mathrm{P}_M^2 = \\frac{M!}{(M - 2)!}$ paired order predicates and $M$ singleton order predicates. If a conjunction or disjunction operator takes these predicates as inputs, how it recognizes the effective order predicates in describing the event dynamics becomes a critical issue. By carefully designing the AFs in (5) - (6), the logical operators exhibit the following properties so as to recognize effective inputs. This is a critical advantage over Bhattacharjya et al. (2020; 2021); Li et al. (2021) in that it allows a differentiable search of the suitable predicates among all the possible choices of order predicates in an end-to-end manner. Here we illustrate the properties for $\\wedge$ with two inputs, which can be generalized to $k$ -ary inputs. (See Appendix B for more details.)", + "bbox": [ + 169, + 102, + 826, + 219 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theorem 8 The $AF$ for the $\\wedge$ operator with two inputs exhibits the following properties.", + "bbox": [ + 171, + 224, + 754, + 239 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Nonimpact for zero weights: If $w_{j} = 0, j = 1,2$ , $p(\\mathcal{C}',\\phi_j,t)$ has no impact on $p(\\mathcal{C}',\\phi_1\\wedge \\phi_2,t)$ .", + "2) Impact ordering: If $p(\\mathcal{C}', \\phi_1, t) = p(\\mathcal{C}', \\phi_2, t)$ , and $w_1 \\geq w_2$ , then $\\frac{\\partial p(\\mathcal{C}', \\phi_1 \\wedge \\phi_2, t)}{\\partial p(\\mathcal{C}', \\phi_1, t)} \\geq \\frac{\\partial p(\\mathcal{C}', \\phi_1 \\wedge \\phi_2, t)}{\\partial p(\\mathcal{C}', \\phi_2, t)}$ .", + "3) Monotonicity: $f(\\beta - \\sum_{j=1}^{2} w_j (1 - p(\\mathcal{C}', \\phi_j, t))) \\leq f(\\beta - \\sum_{j=1}^{2} w_j (1 - (p(\\mathcal{C}', \\phi_j, t) + d))), d \\geq 0.$" + ], + "bbox": [ + 171, + 244, + 823, + 316 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9e0cac7601d681cedd8dc0d19c63518c0191e9bee81a8bb2a62181f80436ece0.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 207, + 335, + 562, + 549 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c640ccb66e96e9424797e3c3f14486a4b78a2595964840319ae75556b8aea159.jpg", + "image_caption": [ + "(b)", + "Figure 2: CLNN Structure. (a): Continuous relaxation of the search space using weights. (b): The learned discrete model structure for $\\phi = (\\pi_{pop}^{A,B}\\wedge \\pi_{pop}^{B,C})\\vee (\\pi_{sop}^{A})$" + ], + "image_footnote": [], + "bbox": [ + 571, + 334, + 782, + 547 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 LEARNING OF PAIRED ORDER REPRESENTATION", + "text_level": 1, + "bbox": [ + 171, + 619, + 552, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the smooth AFs designed in (2) - (6), a neuro-symbolic model called clock logic neural network (CLNN) can be designed for any given wCL formula $\\phi$ , in which every neuron has a corresponding symbolic representation. A typical CLNN for $\\phi = (\\pi_{pop}^{A,B}\\wedge \\pi_{pop}^{B,C})\\vee (\\pi_{sop}^{A})$ is visualized as Fig. 2(b), which can be considered as the discrete structure obtained by learning the parameters of the model in Figure 2(a) and keeping the dominant components. Here $\\phi$ can be interpreted as “(A happens before $B$ for at least $\\underline{u}_{AB}$ time units or $B$ happens before $C$ for at least $\\underline{u}_{BC}$ time units) and $A$ happens within the past $\\underline{u}_A$ time units.” This part describes the continuous relaxation of the search space by designing a paired order cell, a singleton order cell, and an architecture cell for learning the paired order representation, singleton order representation and the formula structure.", + "bbox": [ + 169, + 646, + 823, + 772 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Paired Order Cell (POC). A POC is a directed acyclic graph (DAG) comprising two paired order predicate (POP) nodes and one logical node for the $\\wedge$ operator, shown as an orange block in Figure 2(a). The two POP nodes represent $\\pi_{pop}^{l_i,l_j}$ and $\\pi_{pop}^{l_j,l_i}$ sharing the same parameter $\\underline{u}_{l_i,l_j}$ , where $\\pi_{pop}^{l_i,l_j}$ denotes \" $l_i$ happened before $l_j$ for at least $\\underline{u}_{l_i,l_j}$ time units\" and $\\pi_{pop}^{l_j,l_i}$ denotes \" $l_j$ happened before $l_i$ for at least $\\underline{u}_{l_i,l_j}$ time units\". Each POP has an associated weight $w_{pop}^{l_i,l_j}$ or $w_{pop}^{l_j,l_i}$ to be learned, and the $\\wedge$ operator forces one of the two weight parameters to dominate the other one such that the learned POR is consistent with the event stream. For example, the POC in Figure 2(a) aims to learn the POR between $A$ and $B$ , whose discretized version would be either $\\pi_{pop}^{A,B}$ or $\\pi_{pop}^{B,A}$ . An event stream with $M$ event labels can generate $\\mathrm{P}_M^2 = \\frac{M!}{(M - 2)!}$ PORs between any two event", + "bbox": [ + 169, + 777, + 826, + 926 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "labels, resulting in $(\\mathrm{P}_M^2 / 2)$ POCs. Similar to learning the POR between any two events, the discrete order representations for the entire history $\\mathcal{H}_t$ can be learned using a POP selection node (as shown in Figure 2(a)) that takes the outputs of all the POCs as input and identifies the important PORs. The learning of the POCs essentially becomes learning the $w$ , $\\beta$ in (5) for the POCs and the POP selection node, as well as $\\underline{u}_{l_i l_j}$ in (2) for the POPs through back propagation. The discrete PORs can be acquired by keeping the top- $k$ strongest POCs and the dominant POPs.", + "bbox": [ + 169, + 103, + 826, + 191 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 LEARNING OF SINGLETON ORDER REPRESENTATION", + "text_level": 1, + "bbox": [ + 171, + 196, + 583, + 209 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Singleton Order Cell (SOC). The learning of SOR is accomplished by an SOC, which is displayed as a green block in Figure 2(a). An SOC is a DAG comprising $M$ singleton order predicate (SOP) nodes and one SOP selection node for the $\\wedge$ operator. An SOP node represents $\\pi_{sop}^{l_j}$ that takes $c_{l_j}(t)$ as input and returns the truth degree of $\\pi_{sop}^{l_j}$ over $c_{l_j}(t)$ . The SOP selection node has the same functionality as the POP selection node. The $\\wedge$ operator in the SOP selection node assigns a nonnegative weight to every SOP node and learns the importance weights $w$ and $\\beta$ to extract the dominant SORs affecting the conditional intensity rate the most. The learning of the SOC is thus learning the $w, \\beta$ in (5) for the SOP selection node and $\\underline{u}_{l_j}$ in (3) for the SOPs through back propagation. The discrete SORs can be determined by keeping the top- $k$ strongest SOPs.", + "bbox": [ + 169, + 214, + 826, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 LEARNING OF FORMULA STRUCTURE", + "text_level": 1, + "bbox": [ + 171, + 357, + 478, + 371 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Architecture Cell (AC). For a given set of PORs or SORs, their conjunction or disjunction will behave differently and have distinct meanings. For instance, given two causal formulas $\\phi_{1} = (c_{A} - c_{B} > 1)^{1}\\wedge (c_{C} < 5)^{1}$ and $\\phi_{2} = (c_{A} - c_{B} > 1)^{1}\\vee (c_{C} < 5)^{1}$ for the occurrence of event label $D$ , $\\phi_{1}$ means “(A happens before $B$ for at least 1 time unit) and (C happens within the past 5 time units) simultaneously will cause $D$ to happen”, whereas $\\phi_{2}$ means “(A happens before $B$ for at least 1 time unit) or (C happens within the past 5 time units) alternatively will cause $D$ to happen.” The afore-mentioned cells can learn the order representations. Nevertheless, whether their outputs should be connected by the $\\wedge$ or $\\vee$ operator needs to be determined. Here we consider the outputs of the POCs and the SOCs having two choices of being connected by a $\\wedge$ or $\\vee$ operator, each of which is associated with an architecture weight $\\alpha_{arc}^{\\wedge}$ or $\\alpha_{arc}^{\\vee}$ that enables continuous learning of the two choices; this is also called differentiable architecture search (Liu et al., 2019). An architecture cell is introduced for learning the model architecture, which comprises two logical nodes representing a $\\wedge$ operator and a $\\vee$ operator as well as a logical selection node (LSN), shown as the blue block in Figure 2(a). Let $\\pmb{p} = \\{p_1,\\dots,p_k\\}$ denote the set of inputs for each logical operator. Subsequently, the conjunction operator takes $\\pmb{p}$ as input and returns $p^{\\wedge} = f(\\beta^{\\wedge} - \\sum_{j = 1}^{k}w_{j}^{\\wedge}(1 - p_{j}))$ , and the disjunction operator takes $\\pmb{p}$ as input and returns $p^{\\vee} = f(1 - \\beta^{\\vee} + \\sum_{j = 1}^{k}w_{j}^{\\vee}p_{j})$ . The LSN represented by $\\ominus$ takes $p^{\\wedge}$ and $p^{\\vee}$ as inputs and returns their weighted sum, where the weights are computed using the softmax of the architecture weights as shown below:", + "bbox": [ + 169, + 375, + 826, + 635 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\ominus} = p \\left(\\mathcal {C} ^ {\\prime}, \\phi , t\\right) = \\sum_ {m \\in \\{\\wedge , \\vee \\}} \\frac {e ^ {\\alpha_ {a r c} ^ {m}}}{\\sum_ {m ^ {\\prime} \\in \\{\\wedge , \\vee \\} e ^ {\\alpha_ {a r c} ^ {m ^ {\\prime}}}}} p ^ {m}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 635, + 823, + 674 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The task of architecture search then reduces to learning the architecture weights $\\alpha_{arc}^{\\wedge}$ , $\\alpha_{arc}^{\\vee}$ and the $w, \\beta$ in (5) - (6) for the two logical operators, which can be executed simultaneously while learning parameters in the POCs and SOCs. The outcome of the architecture search process is a discrete architecture obtained by retaining the logical operator with the strongest architecture weight.", + "bbox": [ + 169, + 674, + 826, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.5 WCL-INFORMED INTENSITY FUNCTION", + "text_level": 1, + "bbox": [ + 171, + 738, + 493, + 752 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The output of a CLNN is the truth degree of $\\phi$ over $\\mathcal{C}'$ at $t$ , which is incorporated into modeling the conditional intensity rates. The modeling process aims to discover the generative mechanism as wCL formulas for every $l \\in \\mathcal{L}$ . In other words, a larger value of $p(\\mathcal{C}', \\phi, t)$ should reflect that $\\phi$ has a greater impact on the occurrence of a particular label. For example, if the wCL formula for affecting the occurrence of event label $D$ is given as $\\phi = ((\\pi_{pop}^{A,B})^{w_1} \\wedge (\\pi_{sop}^{C})^{w_2})$ , it means if $\\phi$ is satisfied or the truth degree of $\\phi$ is high, then it has a strong impact on the occurrence of $D$ , where the impact can be promoting or inhibiting the occurrence of $D$ . In terms of the relation between the truth degree and the con-", + "bbox": [ + 169, + 756, + 612, + 909 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5d5b49c9b0aa3673d8a056f465562a03c01bddae53c13e45a86456e37664c6a3.jpg", + "image_caption": [ + "Figure 3: The overall learning framework for $n$ wCL formulas." + ], + "image_footnote": [], + "bbox": [ + 630, + 739, + 816, + 857 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ditional intensity rate, the higher the truth degree $p(\\mathcal{C}',\\phi ,t)$ , the greater its impact on $\\lambda_{D|\\phi}$ . Note", + "bbox": [ + 169, + 909, + 823, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "that the occurrence of one event label may depend on multiple wCL formulas. This work follows the assumption that the impact of multiple formulas are additive in predicting the intensity rate, similar to Li et al. (2020). To incorporate a set of wCL formulas $\\Phi = \\{\\phi_1,\\phi_2,\\dots,\\phi_n\\}$ into the modeling of the conditional intensity rate, we define a wCL formula-informed conditional intensity rate as:", + "bbox": [ + 169, + 103, + 826, + 160 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {l \\mid \\Phi} (t) = \\exp \\left(\\sum_ {i = 1} ^ {n} w _ {\\phi_ {i}} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {i}, t\\right) + \\rho\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 162, + 823, + 200 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $w_{\\phi_i}$ is the weight of $\\phi_i$ , and $\\rho$ is a bias term that allows for spontaneous occurrence without the influence from $\\phi$ .", + "bbox": [ + 169, + 204, + 823, + 232 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.6 MAXIMUM LIKELIHOOD ESTIMATION", + "text_level": 1, + "bbox": [ + 171, + 251, + 478, + 265 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Suppose event stream $\\mathcal{D}$ contains $n_l$ occurrences of event $l$ , for which the occurrence time stamps are denoted as $t_{l_1}, t_{l_2}, \\ldots, t_{l_{n_l}}$ . Let $t_0 = 0$ , $t_{l_{n_l + 1}} = T$ . Based on the conditional intensity function in (8), the likelihood for label $l$ over the event stream is calculated as (Daley & Vere-Jones, 2003):", + "bbox": [ + 169, + 277, + 823, + 321 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nL _ {l} = \\prod_ {i = 0} ^ {n _ {l} - 1} \\left(\\exp \\left(- \\int_ {t _ {l _ {i}}} ^ {t _ {l _ {i + 1}}} \\lambda_ {l | \\Phi} (s) d s\\right) \\lambda_ {l | \\Phi} \\left(t _ {l _ {i + 1}}\\right)\\right) \\exp \\left(- \\int_ {t _ {l _ {n _ {l}}}} ^ {T} \\lambda_ {l | \\Phi} (s) d s\\right). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 330, + 825, + 375 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The corresponding log-likelihood for event label $l$ is expressed as $LL_{l} = (-\\int_{0}^{T}\\lambda_{l|\\Phi}(s)ds) + \\sum_{i = 1}^{n_{l}}[\\log (\\lambda_{l|\\Phi}(t_{l_{i}}))]$ . The total log-likelihood of all the events in $\\mathcal{D}$ is thus $LL_{\\mathcal{D}} = \\sum_{l\\in \\mathcal{L}}LL_{l}$ . During the training process, we train the model parameters for each event label separately. Specifically, the maximum likelihood estimation problem for event label $l$ can be formulated as follows:", + "bbox": [ + 169, + 383, + 823, + 443 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\min - L L _ {l} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 450, + 823, + 467 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\ns. t. \\quad \\forall \\phi \\in \\Phi , \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} (1 - \\alpha) \\geq \\alpha , \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} \\alpha \\leq 1 - \\alpha , \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 470, + 823, + 503 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\forall \\phi \\in \\Phi , \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} \\alpha \\geq \\alpha , 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} (1 - \\alpha) \\leq 1 - \\alpha , \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 507, + 823, + 541 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i, k} \\geq 0, \\beta_ {k} \\geq 0, w _ {i, k ^ {\\prime}} \\geq 0, \\beta_ {k ^ {\\prime}} \\geq 0, \\underline {{u}} _ {l _ {j}} \\geq 0,\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 542, + 491, + 563 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $K_{\\phi}^{\\wedge}$ (resp. $K_{\\phi}^{\\vee}$ ) is the number of $\\wedge$ (resp. $\\vee$ ) operators in $\\phi$ , $I_{k}$ (resp. $I_{k'}$ ) denotes the inputs to the $k$ -th $\\wedge$ (resp. $k'$ -th $\\vee$ ) operator. Please see Appendix A for more details about the above formulation. The overall learning framework is shown in Figure 3, in which the forward propagation computes $LL_{l}$ by using $n$ CLNNs; each learns a wCL formula $\\phi_{i}$ and the backward propagation updates the parameters in $n$ CLNNs using projected gradient descent.", + "bbox": [ + 169, + 570, + 823, + 643 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 652, + 328, + 667 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct several experiments on synthetic and real-world datasets to demonstrate the efficacy of our proposed model. Simultaneously, we compare with state-of-the-art (SOTA) models. The experiments are run using the AdamW optimizer in Pytorch (1.10.2) on a Windows 10 system desktop with a 16-core CPU (i7, 3.60GHz) and 32 GB RAM. Our code is available at https://ICLR-CLNN.", + "bbox": [ + 169, + 676, + 826, + 734 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 MODELS", + "text_level": 1, + "bbox": [ + 171, + 752, + 277, + 765 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Multivariate Hawkes Process (MHP) [(Bacry et al., 2017)]: A conventional multivariate Hawkes process utilizing an exponential kernel function to describe the conditional intensity rate, which involves a decay rate and an infectivity matrix characterizing the inter-dependence among events. This model is implemented in the tick $^{1}$ library, where the learning problem is posed as a convex quadratic programming problem with a fixed decay rate.", + "bbox": [ + 169, + 777, + 823, + 849 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Proximal Graphical Event Model (PGEM) [(Bhattacharjya et al., 2018)]: A type of GEM that models event data by considering whether a parent in some underlying graph happens in a proximal (recent) window.", + "bbox": [ + 169, + 854, + 823, + 897 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "$^{1}$ https://x-datainitiative.github.io/tick/modules/hawkes.html", + "bbox": [ + 191, + 909, + 545, + 922 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/df33c2f34507afad9f0414108959db34b01d63ce06b3dc21a0fec99af340f4d1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Ground truthφ1=(cA-cB>1)1∧(cA-cC>3)1
CLNN's rule(cA-cB>1.21)1.52 ∧ (cA-cC>3.00)1.41 ∧ (cA-cD>0.82)0.33 ∧ (cB-cC>4.33)0 ∧ (cB-cD>10.69)0 ∧ (cD-cC>-6.57)0.16
TELLER's ruleA before D, B before D, C before D, A before D and C before D
OGEM-tab's ruleExcitation: [B], [C,B], [B,C]; Inhibitory: [A], [C,A], [A,C]
", + "bbox": [ + 178, + 71, + 816, + 125 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ordinal Graphical Event Model (OGEM) [(Bhattacharjya et al., 2020; 2021)]: An ordinal GEM that models the impact of the order of events on the conditional intensity rate. OGEM-tab (resp. OGEM-tree) refers to an OGEM that adopts a tabular (resp. tree) representation of orders.", + "bbox": [ + 169, + 154, + 823, + 196 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Temporal Logic Rule Learner (TELLER)2 [(Li et al., 2021)]. This is a method to learn first-order temporal logic rules explaining the generative mechanism of TPPs. The rule discovery process is formulated as a maximum likelihood estimation problem solved by a branch-and-price algorithm.", + "bbox": [ + 169, + 198, + 823, + 239 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 SYNTHETIC DATASETS", + "text_level": 1, + "bbox": [ + 171, + 242, + 374, + 257 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The first part of this experiment demonstrates CLNN's capability of recovering ground-truth rules using three synthetic datasets generated by CLNN with pre-specified formula structure and parameters, including $\\underline{u}_{l_i l_j}$ in $\\pi_{pop}^{l_i, l_j}$ , as well as the importance weights $w$ and bias $\\beta$ in (5) for logical operators, and the $w_\\phi$ and $\\rho$ in (8) for the conditional intensity rate.", + "bbox": [ + 169, + 262, + 823, + 325 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Experimental Setting. Each synthetic dataset contains 1,000 event streams partitioned into three sets: training (70%), validation (15%), and test (15%). Every dataset is generated using a wCL formula with $w_{\\phi} = 3$ and $\\rho = -5$ . The truth value threshold is set as $\\alpha = 0.5$ , and the clock signal for representing an event not occurring in $\\mathcal{H}_t^\\prime$ is set as $\\bar{Z} = 1.5T_{\\mathrm{max}}$ , where $T_{\\mathrm{max}}$ is the maximal ending time among all the event streams. During the training process, we initialize the parameters using four approaches (see Appendix C.5 for more details) and report the best one, and CLNN aims to recover the manually set parameters.", + "bbox": [ + 169, + 330, + 826, + 430 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results. The ground-truth rule $\\hat{\\phi}_1$ for generating the first synthetic dataset (Syn-1) with $\\mathcal{L} = \\{A, B, C, D\\}$ and the rules discovered by CLNN, TELLER, and OGEM-tab are summarized in Table 1. Results for the other synthetic datasets are presented in Appendix C. The rules are learned using the 'last' masking method, which was also used for data generation. The experimental results show an accurate recovery performance of CLNN in terms of order representation recovery and parameter identification. The unweighted version of the ground truth rule reads: \"If $A$ happens before $B$ for at least 1 time unit and $A$ happens before $C$ for at least 3 time units, then $D$ will happen\". The rule of TELLER only reflects the temporal relation between events $A, B, C$ and $D$ but is unable to capture the temporal relation between $A$ and $B$ or $A$ and $C$ , which does not match the ground-truth rule. In OGEM-tab's rule, $[l]$ denotes a single parent. We show the top 3 excitation and inhibitory rules from OGEM-tab, where excitation (resp. inhibitory) means $\\lambda_{l|\\Phi}$ is higher (resp. lower) than the $\\lambda_{l|\\Phi}$ with all $w_{\\phi_i} = 0$ . The excitation rules of OGEM-tab do not match the ground-truth rule. In contrast, the rule discovered by CLNN ( $\\phi_1$ ) assigns larger weights to the paired order predicates $\\pi_{pop}^{A,B} = (c_A - c_B > 1.21)$ and $\\pi_{pop}^{A,C} = (c_A - c_C > 3.00)$ and small weights to the other predicates, where the interval values of 1.21 and 3.00 are both learned. By ignoring the small weights, $\\phi_1$ can be interpreted as \"If $A$ happens before $B$ for at least 1.21 time units and $A$ happens before $C$ for at least 3.00 time units, then $D$ will happen\", meaning the paired order representations discovered by CLNN match well with the ground truth. Moreover, CLNN's rules are more expressive than TELLER and OGEM as it provides a detailed interval length between two ordered labels.", + "bbox": [ + 169, + 433, + 826, + 699 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To show the computational efficiency of our gradient-based learning, we compare the runtimes of CLNN and TELLER on the synthetic datasets in Table 2. Notably, CLNN not only recovers the correct order representations but also was two orders of magnitude faster on average (5.62 s vs 635.99", + "bbox": [ + 169, + 705, + 506, + 787 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "s). In addition, CLNN can learn more expressive order representations that describe both the order relation between two events and their interval length.", + "bbox": [ + 169, + 787, + 823, + 816 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e9a860f558716b4646ae4a52856ca856922009fa023c315fe39cdb81e7b962ad.jpg", + "table_caption": [ + "Table 1: Comparison of rule discovery for CLNN, TELLER, and OGEM-tab on the Syn-1 dataset." + ], + "table_footnote": [], + "table_body": "
wCL formulaφ1φ2φ3,1φ3,2Average
CLNN5.204.604.957.735.62
TELLER252.91286.83925.581078.66635.99
", + "bbox": [ + 521, + 714, + 821, + 753 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2: Runtime (s) for CLNN and TELLER on synthetic datasets.", + "bbox": [ + 517, + 758, + 823, + 785 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 REAL-WORLD DATASETS", + "text_level": 1, + "bbox": [ + 169, + 821, + 390, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "LinkedIn [(Xu et al., 2017)]. An event dataset related to job hopping records of 3,000 LinkedInIn users in 82 IT companies. Each event stream records a user's check-in time stamps for different companies or the time stamps for role change within the same company. We filter the dataset to popular companies as per Bhattacharjya et al. (2020), resulting in 1,000 users.", + "bbox": [ + 169, + 840, + 825, + 898 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://github.com/FengMingquan-sjtu/Logic_Point_Processes_ICLR", + "bbox": [ + 189, + 909, + 604, + 924 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Mimic II [(Saeed et al., 2011)]. An event dataset concerning health records of patients from Intensive Care Unit (ICU) visits over 7 years. A patient's event stream records each visit's time stamp and the corresponding diagnosis. We filter out sequences with few visits, resulting in 650 patients.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Stack Overflow [(Grant & Betts, 2013)]. An event dataset that is related to the badges awarded to users in the question-answering website, the Stack Overflow. Each user's event stream records the badges that he/she receives at various time stamps. We keep the event streams with one or more of 20 types of badges and sample 1,000 users from the dataset used in Du et al. (2016).", + "bbox": [ + 169, + 152, + 826, + 209 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Experimental Setup. Each dataset is partitioned into three sets: training (70%), validation (15%), and test (15%). For simplicity, $\\underline{u}_{l_i l_j}$ are set as 0 to study the ordering representations. The truth value threshold is $\\alpha = 0.5$ , and $\\bar{Z} = 1.5T_{\\mathrm{max}}$ , same as the setting for the synthetic datasets, and the number of subformulas is $n = 5$ , and the parameters are initialized as random numbers from a uniform distribution on [0, 1). CLNN is trained on the training set, and the validation set is utilized for model selection during training. Model fit is evaluated using log-likelihood on the test set.", + "bbox": [ + 169, + 215, + 826, + 305 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Results. We follow a similar trend to Bhattacharjya et al. (2018; 2020; 2021) to use the log-likelihood for evaluation of the model's performance. The log-likelihood on the real-world datasets is reported in Table 3, where $DR$ denotes the difference ratio – the difference between CLNN and the best SOTA divided by the absolute value of best SOTA. CLNN's result is chosen as the better one among the 'first' or the 'last' masking. Notably, CLNN outperforms the baseline models on the LinkedIn dataset (13.40% advantage) and achieves a competitive result on the MIMIC II dataset (1.63% loss only). It is observed that PGEM achieves a better result on the Stack Overflow dataset. In Stack Overflow, one type of badge can be awarded only when a user receives a particular badge multiple times, for example, the 'Epic' badge is awarded only when earning 200 daily reputations 50 times, depending on the 'Mortarboard' badge acquired while answering or asking questions. CLNN and OGEMs apply masking methods to the data, which may not capture the above dependence. In contrast, PGEM models data without masking, making it more suitable for this dataset.", + "bbox": [ + 169, + 309, + 826, + 477 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/7645a8b1809d9569cc95dc9917c1ec7a57d8d1454d47d8a71c7a8a00ac353d9e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetN (# events)M (labels)MHPPGEMOGEM-tabOGEM-treeTELLERCLNNDR
LinkedIn293210-1593-1462-1478-1418-1548-122813.40%
MIMIC II241915-567-500-474-429-645-436-1.63%
Stack Overflow7125420-52543-48323-49344-49192-71101-50981-5.50%
", + "bbox": [ + 197, + 479, + 799, + 542 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Case Study. The primary strength of CLNN over the SOTA models is that it can describe the generative mechanism as wCL formulas, being more expressive and potentially providing more detailed information. CLNN can be deployed as a valuable tool for assisting domain specialists in knowledge discovery from event data. Here we showcase the above strength of CLNN using an il", + "bbox": [ + 169, + 565, + 437, + 705 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "lustrative example. We select the experimental result on company $F$ of the LinkedIn dataset to demonstrate the expressivity of CLNN's rules, which are shown in Table 4. Here we specify the model to learn five formulas, four of which are inhibitory, and one exhibits excitation. One inhibitory formula has a weight of 0.05, thus not reported in Table 4. Each formula shows the dominant singleton or paired order predicates. Notably, CLNN learns expressive wCL formulas that describe how the logical composition of paired order predicates and(or) singleton order predicates affect a role change in the company $F$ . CLNN's rules are more expressive than TELLER and as expressive as OGEM-tab for describing the occurrence of a causal event within a specific historical window.", + "bbox": [ + 169, + 705, + 826, + 816 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/f50b05780e8b99fb1e3a2e1d3b84574f831871aaa555bf682d357de8e87f832f.jpg", + "table_caption": [ + "Table 3: Dataset information and log-likelihood for all models on the real-world datasets." + ], + "table_footnote": [], + "table_body": "
RulesEffect
CLNNφ1=(CD>cH)0.90 ∧ (CI>cJ)0.72Inhibitory
φ2=((CB<0.45)0.58 ∧ (CD<0.05)0.66Excitation
φ3=(CB>cF)0.50 ∧ (CI>cJ>cD)0.47Inhibitory
φ4=(CA<0.84)0.76 ∧ (CH<1.09)0.50Inhibitory
TELLER[A,F],[C,F],[E,F],[B,F],[D,F]Excitation
OGEM-tab[F],[F,A]Excitation
[A]Inhibitory
", + "bbox": [ + 455, + 573, + 818, + 666 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4: Formulas and their effect as learned by CLNN, TELLER and OGEM-tab on company $F$ of LinkedIn.", + "bbox": [ + 447, + 672, + 823, + 700 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 829, + 320, + 844 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we proposed a novel neuro-symbolic model, CLNN, to learn interpretable wCL formulas from multivariate event data. Experimental results using synthetic and real-world datasets demonstrate CLNN's expressiveness in recovering ground-truth rules in multivariate temporal point processes. Further, CLNN can be trained using gradient-based methods, which improve the learning speed compared to the SOTA.", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 171, + 102, + 390, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This research is sponsored by the Rensselaer-IBM AI Research Collaboration (http://airc.rpi.edu), part of the IBM AI Horizons Network; the National Science Foundation under Grant CMMI-1936578; and the Defense Advanced Research Projects Agency (DARPA) through Cooperative Agreement D20AC00004 awarded by the U.S. Department of the Interior (DOI), Interior Business Center. The content of the information does not necessarily reflect the position or the policy of the Government, and no official endorsement should be inferred.", + "bbox": [ + 169, + 133, + 826, + 218 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 239, + 287, + 253 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Odd Aalen, Ornulf Borgan, and Hakon Gjessing. Survival and Event History Analysis: A Process Point of View. Springer Science & Business Media, 2008.", + "Emmanuel Bacre, Iacopo Mastromatteo, and Jean-François Muzy. Hawkes processes in finance. Market Microstructure and Liquidity, 1(01):1550005, 2015.", + "Emmanuel Bacry, Martin Bompaire, Philip Deegan, Stéphane Gaiffas, and Søren V Poulsen. tick: A Python library for statistical learning, with an emphasis on Hawkes processes and time-dependent models. The Journal of Machine Learning Research, 18(1):7937-7941, 2017.", + "Debarun Bhattacharjya, Dharmashankar Subramanian, and Tian Gao. Proximal graphical event models. Advances in Neural Information Processing Systems (NeurIPS), 31:8147-8156, 2018.", + "Debarun Bhattacharjya, Tian Gao, and Dharmashankar Subramanian. Order-dependent event models for agent interactions. In Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI), pp. 1977-1983, 2020.", + "Debarun Bhattacharjya, Tian Gao, and Dharmashankar Subramanian. Ordinal historical dependence in graphical event models with tree representations. In Proceedings of the Conference on Artificial Intelligence (AAAI), pp. 6759-6767, 2021.", + "Yuanda Chen. Thinning algorithms for simulating point processes. Florida State University, Tallahassee, FL, 2016.", + "Daryl J Daley and David Vere-Jones. An Introduction to the Theory of Point Processes, Volume I: Elementary Theory and Methods. Springer, 2003.", + "Vanessa Didelez. Graphical models for marked point processes based on local independence. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 70(1):245-264, 2008.", + "Nan Du, Hanjun Dai, Rakshit Trivedi, Utkarsh Upadhyay, Manuel Gomez-Rodriguez, and Le Song. Recurrent marked temporal point processes: embedding event history to vector. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 1555-1564, 2016.", + "Mehrdad Farajtabar, Yichen Wang, Manuel Gomez Rodriguez, Shuang Li, Hongyuan Zha, and Le Song. COEVOLVE: A joint point process model for information diffusion and network coevolution. In Advances in Neural Information Processing Systems (NeurIPS), volume 28, pp. 1954-1962, 2015.", + "Tian Gao, Dharmashankar Subramanian, Karthikeyan Shanmugam, Debarun Bhattacharjya, and Nicholas Mattei. A multi-channel neural graphical event model with negative evidence. In Proceedings of the Conference on Artificial Intelligence (AAAI), pp. 3946-3953, 2020.", + "Scott Grant and Buddy Betts. Encouraging user behaviour with achievements: An empirical study. In Proceedings of the 10th Working Conference on Mining Software Repositories, MSR '13, pp. 65-68. IEEE Press, 2013.", + "Asela Gunawardana and Chris Meek. Universal models of multivariate temporal point processes. In Artificial Intelligence and Statistics, pp. 556-563. PMLR, 2016." + ], + "bbox": [ + 171, + 262, + 825, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Asela Gunawardana, Christopher Meek, and Puyang Xu. A model for temporal dependencies in event streams. Advances in Neural Information Processing Systems (NeurIPS), 24, 2011.", + "Patrick J Hurley. A Concise Introduction to Logic. Cengage Learning, 2014.", + "Shuang Li, Lu Wang, Ruizhi Zhang, Xiaofu Chang, Xuqin Liu, Yao Xie, Yuan Qi, and Le Song. Temporal logic point processes. In International Conference on Machine Learning, pp. 5990-6000. PMLR, 2020.", + "Shuang Li, Mingquan Feng, Lu Wang, Abdelmajid Essofi, Yufeng Cao, Junchi Yan, and Le Song. Explaining point processes by learning interpretable temporal logic rules. In International Conference on Learning Representations, 2021.", + "Hanxiao Liu, Karen Simonyan, and Yiming Yang. DARTS: Differentiable architecture search. In International Conference on Learning Representations, 2019.", + "Noushin Mehdipour, Cristian-Ioan Vasile, and Calin Belta. Specifying user preferences using weighted signal temporal logic. IEEE Control Systems Letters, 5(6):2006-2011, 2021.", + "Hongyuan Mei and Jason M Eisner. The neural Hawkes process: A neurally self-modulating multivariate point process. Advances in Neural Information Processing Systems (NeurIPS), 30:6757-6767, 2017.", + "Sean P O'Brien. Crisis early warning and decision support: Contemporary approaches and thoughts on future research. International Studies Review, 12(1):87-104, 2010.", + "Ryan Riegel, Alexander Gray, Francois Luus, Naweed Khan, Ndivhuwo Makondo, Ismail Yunus Akhalwaya, Haifeng Qian, Ronald Fagin, Francisco Barahona, Udit Sharma, et al. Logical neural networks. arXiv preprint arXiv:2006.13155, 2020.", + "Mohammed Saeed, Mauricio Villarroel, Andrew T Reisner, Gari Clifford, Li-Wei Lehman, George Moody, Thomas Heldt, Tin H Kyaw, Benjamin Moody, and Roger G Mark. Multiparameter intelligent monitoring in intensive care II (MIMIC-II): A public-access intensive care unit database. Critical Care Medicine, 39(5):952, 2011.", + "Prithviraj Sen, Bruno WSR de Carvalho, Ryan Riegel, and Alexander Gray. Neuro-symbolic inductive logic programming with logical neural networks. In Proceedings of the Conference on Artificial Intelligence (AAAI), volume 36, pp. 8212-8219, 2022.", + "Jeremy C. Weiss and David Page. Forest-based point process for event prediction from electronic health records. In Machine Learning and Knowledge Discovery in Databases, pp. 547-562, 2013.", + "Shuai Xiao, Junchi Yan, Xiaokang Yang, Hongyuan Zha, and Stephen Chu. Modeling the intensity function of point process via recurrent neural networks. In Proceedings of the Conference on Artificial Intelligence (AAAI), volume 31, pp. 1597-1603, 2017.", + "Hongteng Xu, Dixin Luo, and Hongyuan Zha. Learning Hawkes processes from short doubly-censored event sequences. In International Conference on Machine Learning, pp. 3831-3840. PMLR, 2017.", + "Ruixuan Yan, Agung Julius, Maria Chang, Achille Fokoue, Tengfei Ma, and Rosario Uceda-Sosa. STONE: Signal temporal logic neural network for time series classification. In 2021 International Conference on Data Mining Workshops (ICDMW), pp. 778-787. IEEE, 2021.", + "Ruixuan Yan, Tengfei Ma, Achille Fokoue, Maria Chang, and Agung Julius. Neuro-symbolic models for interpretable time series classification using temporal logic description. In 2022 IEEE International Conference on Data Mining (ICDM), pp. 618-627, 2022. doi: 10.1109/ICDM54844.2022.00072.", + "Qiang Zhang, Aldo Lipani, Omer Kirnap, and Emine Yilmaz. Self-attentive Hawkes process. In International Conference on Machine Learning, pp. 11183-11193. PMLR, 2020.", + "Simiao Zuo, Haoming Jiang, Zichong Li, Tuo Zhao, and Hongyuan Zha. Transformer Hawkes process. In International Conference on Machine Learning, pp. 11692-11702. PMLR, 2020." + ], + "bbox": [ + 171, + 102, + 825, + 925 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A FORMULATION OF LOGICAL CONSTRAINTS & OBJECTIVE FUNCTION", + "text_level": 1, + "bbox": [ + 171, + 102, + 785, + 118 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The optimization problem in (10) is formulated by maximizing the log-likelihood subject to the logical constraints for the $\\wedge$ and $\\vee$ operators. This section discusses the details of the formulation for the two logical constraints and how to formulate the optimization problem while considering the logical constraints. Without loss of generality, we illustrate the formulation of the constraints for the $\\wedge$ operator, and the constraints for $\\vee$ operator can be derived from the constraints for the $\\wedge$ operator using De Morgan's law.", + "bbox": [ + 169, + 133, + 826, + 218 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "- Logical constraints for $\\wedge$ operator.", + "text_level": 1, + "bbox": [ + 215, + 229, + 475, + 244 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Let $x, y \\in [0,1]$ denote the inputs of the $\\wedge$ operator, and $f(x,y)$ denote the quantitative satisfaction of $\\wedge$ . The conventional characteristic of the $\\wedge$ operator is illustrated as follows: 1) $f(x,y)$ is low when either input is low, and 2) $f(x,y)$ is high when both inputs are high. However, we associate each input with a nonnegative weight, implying the input with a zero weight should not affect the output. In other words, if a low input has a zero weight, it should not affect the output of $f(x,y)$ . Therefore, we require the $\\wedge$ operator to exhibit the following characteristics: 1) $f(x,y)$ is low when both inputs are low, and 2) $f(x,y)$ is high when both inputs are high. Here we introduce a user-defined hyperparameter $\\alpha \\in [\\frac{1}{2},1]$ to capture low vs. high: $x \\in [0,1 - \\alpha)$ represents low and $x \\in [\\alpha,1]$ represents high. According to the above characteristics, we have (Sen et al., 2022)", + "bbox": [ + 227, + 246, + 823, + 385 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nf (x, y) \\leq 1 - \\alpha , \\quad \\forall x, y \\in [ 0, 1 - \\alpha), \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 391, + 823, + 414 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nf (x, y) \\geq \\alpha , \\quad \\forall x, y \\in [ \\alpha , 1 ].\n$$\n", + "text_format": "latex", + "bbox": [ + 428, + 410, + 624, + 425 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Here we follow a specific choice of $f$ by using a triangular norm ( $t$ -norm) and define the quantitative satisfaction function of $\\wedge$ as (Riegel et al., 2020)", + "bbox": [ + 228, + 431, + 823, + 460 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 465, + 823, + 508 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {2} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\alpha \\leq 1 - \\alpha , \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 513, + 823, + 555 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $f(z) = \\max \\{0, \\min \\{z, 1\\}\\}$ is introduced to clamp the truth value into the range of [0, 1].", + "bbox": [ + 228, + 561, + 823, + 590 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "- Logical constraints for $\\vee$ operator.", + "text_level": 1, + "bbox": [ + 215, + 594, + 475, + 608 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "By using De Morgan's law, we could derive the quantitative satisfaction function and the logical constraints for the $\\lor$ operator with 2 inputs as follows:", + "bbox": [ + 227, + 609, + 823, + 638 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}}, t\\right) = f \\left(1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 645, + 823, + 686 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 691, + 823, + 734 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Here we show the characteristics of the activation functions for the $\\wedge$ and $\\vee$ operators using Figure 4. Figure 4(a) shows the truth value of the $\\wedge$ operator with $\\alpha = 0.7$ . Figure 4(b) shows the truth value of the $\\wedge$ operator with $\\alpha = 0.9$ . It can be distinctly observed that $f(x,y)$ is close to 0 when both $x$ and $y$ are low, and $f(x,y)$ is close to 1 when both $x$ and $y$ are high. In addition, the unconstrained region for $\\alpha = 0.9$ is larger than the unconstrained region for $\\alpha = 0.7$ . Figure 4(c) shows the truth value of the $\\vee$ operator with $\\alpha = 0.7$ . It is obvious that $f(x,y)$ is close to 0 when both $x$ and $y$ are low, and $f(x,y)$ is close to 1 when both $x$ and $y$ are high.", + "bbox": [ + 169, + 744, + 823, + 843 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In general, we could extend the quantitative satisfaction for the $\\wedge$ and $\\vee$ operators in (14) - (17) to $k$ -ary conjunction and $k$ -ary disjunction. The $k$ -ary conjunction formulation is expressed as follows.", + "bbox": [ + 169, + 849, + 823, + 878 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 883, + 823, + 928 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d11ca93f13aa2d576cabacd06a21df8f552d4545ee7577c50b2397da4f4f2f16.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 186, + 109, + 380, + 191 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/44228cc4cb20f4bb5ebcf91b59b11946430cf8359c5cdc1bfbbcccf7866d5bec.jpg", + "image_caption": [ + "(b)", + "Figure 4: Plot of truth degree for (a) CLNN- $\\wedge$ with $\\alpha = 0.7$ , (b) CLNN- $\\wedge$ with $\\alpha = 0.9$ , (c) CLNN- $\\vee$ with $\\alpha = 0.7$ ." + ], + "image_footnote": [], + "bbox": [ + 395, + 109, + 598, + 191 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1110140bea7cd3027c436d8e7f557f9ff7327030c05282067db9188a4a31941c.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 611, + 109, + 813, + 191 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\leq 1 - \\alpha . \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 262, + 825, + 306 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The $k$ -ary disjunction formulation is expressed as follows.", + "bbox": [ + 171, + 320, + 555, + 335 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, t\\right) = f (1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)), \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 343, + 825, + 386 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 388, + 823, + 431 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "With the above constraints, we can formulate the maximum likelihood estimation problem as", + "bbox": [ + 171, + 446, + 782, + 460 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\min - L L _ {l} \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 468, + 823, + 484 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\ns. t. \\quad \\forall \\phi \\in \\Phi , \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} (1 - \\alpha) \\geq \\alpha , \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} \\alpha \\leq 1 - \\alpha , \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 487, + 823, + 518 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\forall \\phi \\in \\Phi , \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} \\alpha \\geq \\alpha , 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 523, + 823, + 571 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this paper, we set $\\alpha = 0.5$ , thus the constraints in (19) become", + "bbox": [ + 171, + 585, + 599, + 601 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {k} w _ {i} \\geq 2 \\beta - 1,\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 609, + 557, + 650 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {k} w _ {i} \\leq 2 \\beta - 1, \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 654, + 823, + 694 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n2 \\beta - 1 \\geq 0,\n$$\n", + "text_format": "latex", + "bbox": [ + 473, + 696, + 557, + 710 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i} \\geq 0.\n$$\n", + "text_format": "latex", + "bbox": [ + 503, + 715, + 557, + 729 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Reformulating the above constraints, we have", + "bbox": [ + 171, + 738, + 475, + 752 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {k} w _ {i} = 2 \\beta - 1, \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 760, + 823, + 801 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\beta \\geq 0. 5,\n$$\n", + "text_format": "latex", + "bbox": [ + 470, + 804, + 532, + 819 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i} \\geq 0. \\tag {27}\n$$\n", + "text_format": "latex", + "bbox": [ + 465, + 821, + 823, + 837 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The above constraints hold for each conjunction operator in $\\phi$ . Therefore, we can incorporate the constraints in (26) into the objective function, which becomes", + "bbox": [ + 169, + 844, + 823, + 873 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\min - L L _ {l} + \\sum_ {k = 1} ^ {K _ {\\phi} ^ {\\wedge}} \\left(\\sum_ {i \\in I _ {k}} w _ {i, k} - 2 \\beta_ {k} + 1\\right) ^ {2}, \\tag {28}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 881, + 823, + 926 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s u b j e c t} w _ {i, k} \\geq 0, \\beta_ {k} \\geq 0. 5, \\forall i \\in I _ {k}, \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\forall \\phi \\in \\Phi . \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 103, + 823, + 119 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Similarly, we propose a set of logical constraints for the $\\lor$ operator as (21). If we set $\\alpha = 0.5$ , the constraints in (21) become", + "bbox": [ + 169, + 132, + 823, + 160 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {k} w _ {i} \\geq 2 \\beta - 1,\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 167, + 557, + 208 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {k} w _ {i} \\leq 2 \\beta - 1, \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 212, + 823, + 252 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n2 \\beta - 1 \\geq 0,\n$$\n", + "text_format": "latex", + "bbox": [ + 473, + 255, + 557, + 268 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i} \\geq 0.\n$$\n", + "text_format": "latex", + "bbox": [ + 503, + 273, + 557, + 287 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Reformulating the above constraints, we have", + "bbox": [ + 171, + 294, + 475, + 309 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i _ {1}} ^ {k} w _ {i} = 2 \\beta - 1, \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 316, + 823, + 357 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\beta \\geq 0. 5.\n$$\n", + "text_format": "latex", + "bbox": [ + 470, + 361, + 531, + 375 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i} \\geq 0. \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 465, + 378, + 823, + 393 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The above constraints hold for each disjunction operator in $\\phi$ . Therefore, we can incorporate the constraints in (31) into the objective function. The maximum likelihood estimation problem then becomes", + "bbox": [ + 169, + 398, + 823, + 440 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\min - L L _ {l} + \\sum_ {k = 1} ^ {K _ {\\phi} ^ {\\wedge}} \\left(\\sum_ {i \\in I _ {k}} w _ {i, k} - 2 \\beta_ {k} + 1\\right) ^ {2} + \\sum_ {k ^ {\\prime} = 1} ^ {K _ {\\phi} ^ {\\vee}} \\left(\\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} - 2 \\beta_ {k ^ {\\prime}} + 1\\right) ^ {2}, \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 446, + 823, + 492 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "subject to $w_{i,k}\\geq 0,\\beta_k\\geq 0.5,\\forall i\\in I_k,\\forall 1\\leq k\\leq K_\\phi^{\\wedge},\\forall \\phi \\in \\Phi ,$", + "bbox": [ + 225, + 496, + 656, + 512 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i, k ^ {\\prime}} \\geq 0, \\beta_ {k ^ {\\prime}} \\geq 0. 5, \\forall i \\in I _ {k ^ {\\prime}}, \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, \\forall \\phi \\in \\Phi .\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 515, + 674, + 532 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B PROOF OF THEOREM 8", + "text_level": 1, + "bbox": [ + 171, + 551, + 398, + 566 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The activation function designed for the $\\wedge$ operator satisfies the properties of nonimpact for zero weights, impact ordering, and monotonicity. Without loss of generality, we present the proof for the $\\wedge$ operator connecting two clauses, which can be generalized to the $\\wedge$ operator connecting $k$ -ary clauses.", + "bbox": [ + 169, + 582, + 823, + 637 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Proof 1 Here we present the proof for the activation function for the $\\wedge$ operator satisfying each property mentioned above.", + "bbox": [ + 169, + 651, + 823, + 680 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "- Nonimpact for zero weights.", + "bbox": [ + 215, + 691, + 423, + 705 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This means if $w_{j} = 0, j = 1,2$ , then $p(\\mathcal{C}',\\phi_j,t)$ should have no impact on $p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t)$ . Without loss of generality, we suppose $w_{1} = 0$ , thus we have", + "bbox": [ + 228, + 712, + 823, + 742 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}}, t\\right) = f (\\beta - 0 \\cdot (1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1}, t\\right)) - w _ {2} \\cdot (1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right))), \\tag {34} \\\\ = f \\left(\\beta - w _ {2} \\cdot \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right)\\right)\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 747, + 823, + 784 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "meaning $p(\\mathcal{C}',\\phi_1,t)$ has no impact on $p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t)$", + "bbox": [ + 228, + 789, + 616, + 806 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "- Impact Ordering", + "text_level": 1, + "bbox": [ + 215, + 814, + 349, + 829 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This means the truth degree of subformula with higher weights has a greater impact on $p(\\mathcal{C}', \\phi_1^{w_1} \\wedge \\phi_2^{w_2}, t)$ . Mathematically, we need to prove that if $p(\\mathcal{C}', \\phi_1, t) = p(\\mathcal{C}', \\phi_2, t)$ and $w_1 \\geq w_2$ , then", + "bbox": [ + 228, + 835, + 825, + 878 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} \\geq \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)}. \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 893, + 823, + 929 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As $f(x) = \\max \\{0, \\min \\{x, 1\\}\\}$ , we have", + "bbox": [ + 228, + 103, + 503, + 119 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d f}{d x} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} x < 0, \\\\ 1, & \\text {i f} 0 < x < 1, \\\\ 0, & \\text {i f} x > 1. \\end{array} \\right. \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 434, + 128, + 823, + 178 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "If $\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t)) < 0$ or $\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},k)) > 1$ , then we have", + "bbox": [ + 228, + 188, + 825, + 209 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} = \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)} = 0. \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 215, + 825, + 251 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Also, if $0 < \\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t)) < 1$ , then we have", + "bbox": [ + 228, + 266, + 645, + 286 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {j} , t\\right)\\right)\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} = w _ {1} \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 294, + 825, + 338 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 228, + 347, + 261, + 359 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {j} , t\\right)\\right)\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)} = w _ {2} \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right). \\tag {39}\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 366, + 825, + 409 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As $w_{1}\\geq w_{2}$ , the following holds:", + "bbox": [ + 228, + 417, + 455, + 433 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} \\geq \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)}, \\tag {40}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 439, + 825, + 474 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "which proves the impact ordering property holds.", + "bbox": [ + 228, + 481, + 555, + 498 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "- Monotonicity.", + "text_level": 1, + "bbox": [ + 215, + 506, + 328, + 520 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This means $p(\\mathcal{C}', \\phi_1^{w_1} \\wedge \\phi_2^{w_2}, t)$ increases monotonically over $p(\\mathcal{C}', \\phi_j, t)$ , i.e.", + "bbox": [ + 228, + 526, + 736, + 544 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nf \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right) \\leq f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right) - d\\right)\\right) \\text {f o r} d \\geq 0. \\tag {41}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 551, + 825, + 594 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "First, note that $\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t))$ can be rewritten as", + "bbox": [ + 228, + 611, + 669, + 631 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right) = \\beta - w _ {1} - w _ {2} + w _ {1} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1}, t\\right) + w _ {2} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right). \\tag {42}\n$$\n", + "text_format": "latex", + "bbox": [ + 248, + 640, + 825, + 681 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This implies $f(\\beta - \\sum_{j=1}^{2} w_j (1 - p(\\mathcal{C}', \\phi_j, t)))$ is monotonically increasing over $p(\\mathcal{C}', \\phi_1, t)$ and $p(\\mathcal{C}', \\phi_2, t)$ . Also, from the proof of impact ordering we know $f(x) = \\max \\{0, \\min \\{x, 1\\}\\}$ is monotonically nondecreasing, we can show that", + "bbox": [ + 228, + 691, + 826, + 739 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nf \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right) \\leq f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right) - d\\right)\\right), d \\geq 0. \\tag {43}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 748, + 825, + 790 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Thus the property of monotonicity is satisfied.", + "bbox": [ + 228, + 799, + 532, + 814 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C EXPERIMENT RESULTS OF SYNTHETIC DATASETS", + "text_level": 1, + "bbox": [ + 171, + 835, + 622, + 849 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Dataset Generation. In the experiments on synthetic datasets, we manually generate 3 synthetic datasets considering different settings, where the details and results for the first synthetic dataset is reported in Section 4.2. Each setting considers a different order representation, different number of event labels or different intensity of causal event labels.", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/6615481611b2355c10d747efa0377f49ce26dac2c3075e65e67373d9e97fbe23.jpg", + "image_caption": [ + "Figure 5: Model structure of $\\hat{\\phi}_1$ for generating the first synthetic dataset." + ], + "image_footnote": [], + "bbox": [ + 320, + 103, + 671, + 314 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 SYNTHETIC DATASET-1 (SYN-1).", + "text_level": 1, + "bbox": [ + 171, + 375, + 450, + 388 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Generation process. The first synthetic dataset contains 4 event labels: $A, B, C$ , and $D$ , where $D$ is the event for prediction, and $A, B, C$ are causal events. The wCL formula used to generate event $D$ in the first synthetic dataset is set as", + "bbox": [ + 169, + 398, + 826, + 441 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\phi} _ {1} = \\left(c _ {A} - c _ {B} > 1\\right) ^ {1} \\wedge \\left(c _ {A} - c _ {C} > 3\\right) ^ {1}, \\tag {44}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 449, + 823, + 467 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "whose unweighted version reads as \"If $A$ happens before $B$ for at least 1 time unit and $A$ happens before $C$ for at least 3 time units, then $D$ will happen.\"", + "bbox": [ + 169, + 472, + 823, + 502 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Here we consider event labels $A, B, C$ as free predicates, whose occurrences are generated by a homogeneous Poisson process. The homogeneous intensity rate for $A, B, C$ are set as $\\lambda_A = 0.2$ , $\\lambda_B = 0.2$ , and $\\lambda_C = 0.2$ . The algorithm used to generate instances of $A, B, C$ is described as Algorithm 1 (Chen, 2016).", + "bbox": [ + 169, + 507, + 825, + 564 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Simulation of a homogeneous Poisson process with intensity rate $\\lambda$ ." + ], + "code_body": "Input: Intensity rate $\\lambda$ , simulation horizon $T$ \nOutput: Occurrence time stamps $\\mathcal{T} = \\{t_k\\}$ \n1: Initialize $n = 0,t_0 = 0$ . \n2: while True do \n3: Generate $u\\sim$ uniform(0, 1); \n4: Let $w = -ln(u) / \\lambda$ . \n5: Set $t_{n + 1} = t_n + w$ . \n6: if $t_{n + 1} > T$ then \n7: return $\\mathcal{T} = \\{t_k\\}_{k = 1,2,\\dots,n}$ . \n8: else \n9: Set $n = n + 1$ . \n10: end if \n11: end while", + "bbox": [ + 173, + 595, + 823, + 806 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "With the above algorithm, we can generate the occurrences of event labels $A, B$ , and $C$ . Next, we build a CLNN for $\\hat{\\phi}_1 = (c_A - c_B > 1)^1 \\wedge (c_A - c_C > 3)^1$ to calculate the conditional intensity rate $\\lambda_{D|\\hat{\\phi}_1}$ , whose model structure is shown in Figure 5. After obtaining $\\lambda_{D|\\hat{\\phi}_1}(t)$ , we could use Algorithm 2 (Chen, 2016) to generate the occurrence of $D$ .", + "bbox": [ + 169, + 828, + 823, + 888 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Results. The rules learned by CLNN, TELLER, and OGEM-tab on the first synthetic dataset are presented in Table 5, where the paired order predicate among the two candidates with the highest", + "bbox": [ + 169, + 895, + 825, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 509, + 959 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2 Simulation of an inhomogeneous Poisson process with intensity rate $\\lambda(t)$ ." + ], + "code_body": "Input: intensity rate $\\lambda (t)$ , simulation horizon $T$ \nOutput: Occurrence time stamps $\\mathcal{T} = \\{t_k\\}$ 1: Initialize $n = m = 0,t_0 = s_0 = 0,\\bar{\\lambda} = \\sup_{0\\leq t\\leq T};\\lambda (t);$ 2: while $s_m < T$ do 3: Generate a uniform random variable $u\\sim \\mathrm{uniform}(0,1)$ 4: Let $w = -\\ln u / \\bar{\\lambda}$ . 5: Set $s_{m + 1} = s_m + w$ . 6: Generate $D\\sim \\mathrm{uniform}(0,1)$ . 7: if $D\\leq \\lambda (s_{m + 1})\\bar{\\lambda}$ then 8: $t_{n + 1} = s_{m + 1}$ . 9: $n = n + 1$ . \n10: end if \n11: $m = m + 1$ . \n12: if $t_n\\leq T$ then \n13: return $\\{t_k\\}_{k = 1,2,\\dots,n}$ \n14: else \n15: return $\\{t_k\\}_{k = 1,2,\\dots,n - 1}$ \n16: end if \n17: end while", + "bbox": [ + 173, + 121, + 591, + 412 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/1ecad1295859dbea184c98335fe73ae47c3179671a3dc4e984f7a1a0a1695ef8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSyn-1
N (# events)N = 4, L = {A, B, C, D}
Ground truthφ1 = (cA - cB > 1)1 ∧ (cA - cC > 3)1
CLNN's rule(cA - cB > 1.21)1.52 ∧ (cA - cC > 3.00)1.41 ∧ (cA - cD > 0.82)0.33 ∧ (cB - cC > 4.33)0 ∧ (cB - cD > 10.69)0 ∧ (cD - cC > -6.57)0.16
TELLER's ruleA before D, B before D, C before D, A before D and C before D
OGEM-tab's ruleExcitation: [B], [C], [C, B], [B, C], [A, C, B], [A, B, C]Inhibitory: [A], [B, A], [B, A, C], [C, B, A], [A, B], [A, C], [B, C, A], [C, A, B], [C, A]
", + "bbox": [ + 232, + 428, + 761, + 606 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 5: Comparison of rule discovery for CLNN and TELLER on the Syn-1 dataset.", + "bbox": [ + 217, + 616, + 777, + 631 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "weight is presented. It can be clearly observed that by truncating the predicates with small weights, we could obtain the formula as", + "bbox": [ + 169, + 657, + 823, + 685 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {1} = \\left(c _ {A} - c _ {B} > 1. 2 1\\right) ^ {1. 5 2} \\wedge \\left(c _ {A} - c _ {C} > 3. 0 0\\right) ^ {1. 4 1}, \\tag {45}\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 693, + 823, + 710 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "which matches well with the ground-truth rule. However, TELLER cannot capture the paired order representation between $A$ and $B$ or $A$ and $C$ . OGEM-tab captures the order representation $[A, B]$ and $[A, C]$ as inhibitory causes, which contradicts the ground-truth rule.", + "bbox": [ + 169, + 718, + 825, + 762 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.2 SYNTHETIC DATASET-2 (SYN-2).", + "text_level": 1, + "bbox": [ + 171, + 777, + 449, + 792 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Generation Process. The second synthetic dataset contains 5 event labels: $A, B, C, D$ and $E$ , where $E$ is the event for prediction, and $A, B, C, D$ are causal events. The wCL formula used to generate the occurrence of event $E$ in the second synthetic dataset is set as", + "bbox": [ + 169, + 805, + 825, + 848 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\phi} _ {2} = \\left(c _ {A} - c _ {B} > 0. 5\\right) ^ {1} \\wedge \\left(c _ {A} - c _ {C} > 1. 5\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {D} > 2\\right) ^ {1}, \\tag {46}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 854, + 823, + 875 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "whose unweighted version reads as \"If $A$ happens before $B$ for at least 0.5 time units, $A$ happens before $C$ for at least 1.5 time units, and $C$ happens before $D$ for at least 2 time units, then $E$ will happen.\"", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/5c9eb772a4b858637daaf47666b26dd2ef522b35246e1a44e155613c66b870c4.jpg", + "image_caption": [ + "Figure 6: Model structure of $\\hat{\\phi}_2$ for generating the second synthetic dataset." + ], + "image_footnote": [], + "bbox": [ + 261, + 106, + 733, + 318 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The occurrence of events $A, B, C$ and $D$ are generated using Algorithm 1, in which $\\lambda_A = \\lambda_B = \\lambda_C = \\lambda_D = 0.2$ . After obtaining the occurrence of $A, B, C$ and $D$ , we simulate the generation of event label $E$ using Algorithm 2, in which the intensity rate $\\lambda_{E|\\hat{\\phi}_2}(t)$ is computed using the model shown in Figure 6.", + "bbox": [ + 169, + 373, + 823, + 434 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Results. The rules learned by CLNN, TELLER and OGEM-tab on the second synthetic dataset are presented in Table 6, where the paired order predicate with the highest weight is presented. It can be clearly observed that by truncating the predicates with small weights, CLNN learns a wCL formula as:", + "bbox": [ + 169, + 439, + 823, + 494 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {2} = \\left(c _ {A} - c _ {B} > 0. 7 7\\right) ^ {1. 2 7} \\wedge \\left(c _ {A} - c _ {C} > 2. 0 9\\right) ^ {1. 1 5} \\wedge \\left(c _ {C} - c _ {D} > 2. 6 0\\right) ^ {1. 0 6}, \\tag {47}\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 494, + 823, + 512 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "whose order representation match well with the ground-truth rule. Nevertheless, TELLER's rule only captures the ordering between $A$ , $B$ and $E$ , whereas the ordering between $A$ and $B$ or $B$ and $C$ or $C$ and $D$ are not learned. OGEM-tab's rules can only capture the relation between event label $D$ and event label $E$ can excite the occurrence of event label $E$ , whereas not able to capture the dependence of event label $E$ 's occurrence on the order relation between $A$ and $B$ or $B$ and $C$ or $C$ and $D$ .", + "bbox": [ + 169, + 516, + 826, + 599 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/e3d81b80ba2fe7d2f1245e05c07d7c1330d42f7a95cc7e95b47e339517f27422.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSyn-2
N (# events)N=5, L={A,B,C,D,E}
Ground truthφ2=(cA-cB>0.5)1∧(cB-cC>1.5)1∧(cC-cD>2)1
CLNN's rule(cA-cB>0.77)1.27∧(cA-cC>2.09)1.15∧((cA-cD)>−5.00)0.25∧((cA-cE)>−2.74)0.09∧(cB-cC>−9.31)0.02∧(cB-cD>−8.54)0.08∧(cB-cE>2.07)0∧((cC-cD)>2.60)1.06∧((cC-cE)>−4.27)0.03∧((cD-cE)>1.17)0.07
TELLER's ruleA before E, B before E, A and B before E, A and C before E
OGEM-tab's ruleExcitation: [D], [D,E], [E], [E,D]Inhibitory: [D,A], [A], [A,D], [A,D,E], [E,D,A], [D,A,E], [A,E], [E,A], [D,E,A], [A,E,D], [E,A,D]
", + "bbox": [ + 232, + 609, + 761, + 816 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 6: Comparison of rule discovery for CLNN and TELLER on the Syn-2 dataset.", + "bbox": [ + 217, + 825, + 777, + 840 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.3 SYNTHETIC DATASET 3 (SYN-3).", + "text_level": 1, + "bbox": [ + 171, + 869, + 447, + 883 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The third synthetic dataset is generated using a more interesting scheme by combining the generation schemes of the first synthetic dataset and the second synthetic dataset. The third synthetic dataset", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/940514745458e4b682db12a6e7789c21a874a28f6e3a43dcfc1c2c79cb00a3da.jpg", + "image_caption": [ + "Figure 7: Model structure of $\\hat{\\phi}_{3,1}$ for generating the occurrence of $D$ in the Syn-3 dataset." + ], + "image_footnote": [], + "bbox": [ + 318, + 103, + 674, + 309 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/b081e37e363fe8a10159d63a78d5d0118d701b9c610e183152ab23a0ad8a4a74.jpg", + "image_caption": [ + "Figure 8: Model structure of $\\hat{\\phi}_{3,2}$ for generating the occurrence of $E$ in the Syn-3 dataset." + ], + "image_footnote": [], + "bbox": [ + 290, + 353, + 705, + 539 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "includes five event labels: $A, B, C, D$ and $E$ . Here we consider $A, B$ , and $C$ as the causal events for the occurrence of $D$ , and $A, B, C$ , and $D$ as the causal events for the occurrence of $E$ . The occurrence of events $A, B, C$ are generated using Algorithm 1, in which $\\lambda_{A} = 0.2$ , $\\lambda_{b} = 0.2$ , and $\\lambda_{c} = 0.2$ . The wCL formula used to generate the occurrence of event $D$ is set as", + "bbox": [ + 169, + 595, + 823, + 652 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\phi} _ {3, 1} = \\left(c _ {B} - c _ {A} > - 2\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {A} > - 5\\right) ^ {1}, \\tag {48}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 657, + 823, + 678 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "whose unweighted version reads as \"If $A$ happens before $B$ for less than 2 time units, and $A$ happens before $C$ for less than 1 time unit, then $D$ will happen.\" The generation of $D$ 's occurrence follows Algorithm 2, where $\\lambda_{D|\\hat{\\phi}_{3,1}}(t)$ is computed using the model shown in Figure 7. We call the third synthetic dataset at this step as Syn-3.1.", + "bbox": [ + 169, + 683, + 823, + 743 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "After obtaining the occurrences of events $A, B, C$ , and $D$ , we could simulate the occurrence of $E$ using the following formula:", + "bbox": [ + 169, + 748, + 826, + 777 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\phi} _ {3, 2} = \\left(c _ {B} - c _ {A} > - 5\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {B} > - 4\\right) ^ {1} \\wedge \\left(c _ {D} - c _ {C} > - 3\\right) ^ {1}. \\tag {49}\n$$\n", + "text_format": "latex", + "bbox": [ + 271, + 784, + 823, + 803 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Similarly, the generation of $E$ 's occurrence follows Algorithm 2, where the intensity rate $\\lambda_{E|\\hat{\\phi}_{3,2}}(t)$ is computed using the model shown in Figure 8. We call the third synthetic dataset at this step as Syn-3.2.", + "bbox": [ + 169, + 808, + 825, + 853 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Results.", + "text_level": 1, + "bbox": [ + 171, + 859, + 232, + 873 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The rules learned by CLNN, TELLER, and OGEM-tab on the cause of event $D$ in the third synthetic dataset are presented in Table 7, where the paired order predicate with the highest weight among the two candidates is reported. It can be clearly observed that by truncating the predicates with small", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "weights, CLNN learns a wCL formula as", + "bbox": [ + 171, + 103, + 444, + 118 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {3, 1} = \\left(c _ {B} - c _ {A} > - 1. 8 5\\right) ^ {1. 7 2} \\wedge \\left(c _ {C} - c _ {A} > - 3. 9 0\\right) ^ {1. 5 9}, \\tag {50}\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 125, + 823, + 142 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "whose order representation match well with the ground-truth rule. On the other hand, TELLER's rule only reveals the temporal relation between event labels $A$ , $B$ , $C$ and $D$ , but it does not capture the temporal relation between event labels $A$ and $B$ or $A$ and $C$ . In addition, we could observe that OGEM-tab does not capture that $C$ is a parent event of $D$ .", + "bbox": [ + 169, + 150, + 826, + 205 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/0ade2cd181c78f6e20129e109757d9589040feebc8fc3b63f08edf29c05fc84b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSyn-3.1
N (# events)N = 5, L = {A, B, C, D, E}
Ground truth\\(\\hat{\\phi}_{3,1} = (c_B - c_A > -2)^1 \\wedge (c_C - c_A > -5)^1\\)
CLNN's rule\\((c_B - c_A > -1.85)^{1.72} \\wedge (c_C - c_A > -3.90)^{1.59} \\wedge ((c_D - c_A) > -16.25)^{0.33} \\wedge ((c_C - c_B) > -3.01)^0 \\wedge (c_D - c_B > -7.37)^{0.02} \\wedge (c_D - c_C > -7.55)^0\\)
TELLER's ruleA before D, B before D, C before D
OGEM-tab's ruleExcitation: [A], [A, B, D], [B, D, A], [D, A], [D, A, B], [B, A], [A, D], [D], [B, A, D], [D, B, A]Inhibitory: [A, B], [B, D], [B], [A, D, B], [D, B]
", + "bbox": [ + 187, + 219, + 807, + 369 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The rules learned by CLNN, TELLER, and GEM on the cause of event $E$ in the third synthetic dataset are presented in Table 8, in which the discrete wCL formula learned by CLNN is", + "bbox": [ + 169, + 409, + 823, + 439 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {3, 2} = \\left(c _ {B} - c _ {A} > - 3. 9 4\\right) ^ {1. 4 9} \\wedge \\left(c _ {C} - c _ {B} > - 3. 0 2\\right) ^ {2. 0 3} \\wedge \\left(\\left(c _ {D} - c _ {C}\\right) > - 2. 0 0\\right) ^ {1. 9 2}. \\tag {51}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 444, + 823, + 465 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "It is obvious that $\\phi_{3,2}$ is able to learn the temporal relation between $A$ and $B$ , $B$ and $C$ , and $C$ and $D$ . However, TELLER's rules only reflect the temporal relation between $A$ , $B$ , $C$ and $E$ , which cannot give the information about the temporal relation between $A$ and $B$ , or $B$ and $C$ , or $C$ and $D$ . OGEM-tab's rule indicates that it considers event labels $A$ , $D$ , $E$ as the parent events of $D$ , which does not match with the ground-truth parent set.", + "bbox": [ + 169, + 469, + 826, + 541 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/b73a935eff88ee2e71108810437f7f70abd97257512e582c4d2e9df8953f8845.jpg", + "table_caption": [ + "Table 7: Comparison of rule discovery of ${\\phi }_{3,1}$ for CLNN and TELLER on the Syn-3.1 dataset." + ], + "table_footnote": [], + "table_body": "
DatasetSyn-3.2
N (# events)N=5, L={A,B,C,D,E}
Ground truth\\(\\hat{\\phi}_{3,2}=(c_{B}-c_{A}> -5)^{1}\\wedge(c_{C}-c_{B}> -4)^{1}\\wedge(c_{D}-c_{C}> -3)^{1}\\)
CLNN's rule\\((c_{B}-c_{A}> -3.94)^{1.49}\\wedge(c_{C}-c_{A}> -9.12)^{0.25}\\wedge((c_{D}-c_{A})> -1.42)^{0.13}\\wedge((c_{E}-c_{A})> -3.88)^{0.15}\\wedge(c_{C}-c_{B}> -3.02)^{2.03}\\wedge(c_{D}-c_{B}> -6.27)^{0.02}\\wedge(c_{E}-c_{B}> -7.30)^{0.04}\\wedge((c_{D}- c_{C})> -2.00)^{1.92}\\wedge((c_{E}-c_{C})> -5.30)^{0.09}\\wedge((c_{E}-c_{D})> -1.57)^{0.01}\\)
TELLER's ruleA before E, B before E, C before E
OGEM-tab's ruleExcitation: [A,D], [D,A], [D,E], [E], [A,D, E], [D,E,A], [E,A], [A,E], [E,A,D], [A,E, D], [D,A,E], [E,D,A]Inhibitory: [A], [D], [E,D]
", + "bbox": [ + 254, + 551, + 738, + 773 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 8: Comparison of rule discovery of ${\\phi }_{3,2}$ for CLNN and TELLER on the Syn-3.2 dataset.", + "bbox": [ + 186, + 782, + 808, + 799 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.4 QUANTITATIVE COMPARISON OF CLNN'S RULES WITH GROUND TRUTH", + "text_level": 1, + "bbox": [ + 169, + 824, + 723, + 839 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To quantitatively evaluate the difference between the ground-truth rules and the rules learned by CLNN, we adopt the Jaccard similarity score to assess the learned formulas against the ground truth. Let $\\mathcal{G}$ denote the set of paired ordering representations from the ground-truth rule, and $\\mathcal{C}$ denote the set of paired ordering representations from the learned rules, the Jaccard similarity score is calculated as $J = \\frac{|\\mathcal{C} \\cap \\mathcal{G}|}{|\\mathcal{C} \\cup \\mathcal{G}|}$ . For TELLER and OGEM-tab, the ordering representations are extracted", + "bbox": [ + 169, + 849, + 826, + 928 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/e778c73031d538bd262a2304c138c0361986948ec160d092a3b6c088d78183f6.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 196, + 108, + 493, + 276 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/96858fce8cd5a11bbd1adfbb7d71a973c286ee8decdbab1100967a9142becc93.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 503, + 107, + 803, + 273 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/a6c6bd22c64fc1f1fc65224884fde0d5cd2e30493bdc294bb501e98d1361ff80.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 196, + 301, + 488, + 465 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/1511e755f98306b72c0769194b5a539ac0f64fffcc66182d9520ef4267f0858f.jpg", + "image_caption": [ + "(d)", + "Figure 9: Comparison of ground-truth rules with CLNN's rules in terms of Jaccard similarity score for a) Syn-1, b) Syn-2, c) Syn-3.1, d) Syn-3.2." + ], + "image_footnote": [], + "bbox": [ + 503, + 303, + 794, + 467 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "from the excitation rules. The comparison of Jaccard similarity score for the synthetic datasets is shown in Figure 9, where the Jaccard similarity score of 0 is manually set to the minimum threshold 0.05 for clarity purposes. It is clearly observed that the Jaccard similarity scores for CLNN is higher than the ones for TELLER or OGEM, implying the rules discovered by CLNN are more consistent with the ground truth.", + "bbox": [ + 169, + 560, + 823, + 630 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.5 STABILITY ANALYSIS OF CLNN'S RULES WITH RESPECT TO INITIALIZATION", + "text_level": 1, + "bbox": [ + 169, + 652, + 748, + 667 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To further validate the model's stability in learning wCL rules, different parameter initialization methods are carried out, including:", + "bbox": [ + 169, + 681, + 823, + 710 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. rand - parameter initialization as random numbers from a uniform distribution on the interval [0, 1);", + "2. randn - random numbers from a normal distribution with mean 0 and variance 1;", + "3. ones - constant values of 1;", + "4. xavier - random numbers from a uniform distribution on the interval $[-1/\\sqrt{n}, 1/\\sqrt{n}]$ , where $n$ is the dimension of the parameter." + ], + "bbox": [ + 207, + 724, + 823, + 840 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The rules learned by CLNN for the above parameter initializations are summarized in Table 9. By inspecting the rules for different initialization methods, it is clear that CLNN can still recover the correct paired order representations even if initializing the learning process from a different position. In the meantime, the logic formulas learned by CLNN are stable as the variance of learned parameters is relatively small.", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/720e0b182ecd75a826d23884634877955fe58becb261d25acc4e0e57933c455c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetInitializationRules
Syn - 1Ground truth\\(\\hat{\\phi}=(c_{A}-c_{B}>1)^{1}\\wedge(c_{A}-c_{C}>3)^{1}\\)
rand\\(\\phi=(c_{A}-c_{B}>1.21)^{1.52}\\wedge(c_{A}-c_{C}>3.00)^{1.41}\\)
randn\\(\\phi=(c_{A}-c_{B}>1.21)^{1.58}\\wedge(c_{A}-c_{C}>3.32)^{1.56}\\)
ones\\(\\phi=(c_{A}-c_{B}>1.17)^{1.59}\\wedge(c_{A}-c_{C}>3.14)^{1.32}\\)
xavier\\(\\phi=(c_{A}-c_{B}>1.12)^{1.45}\\wedge(c_{A}-c_{C}>3.20)^{1.33}\\)
Syn - 2Ground truth\\(\\hat{\\phi}=(c_{A}-c_{B}>0.5)^{1}\\wedge(c_{A}-c_{C}>1.5)^{1}\\wedge(c_{C}-c_{D}>2)^{1}\\)
rand\\(\\phi=(c_{A}-c_{B}>0.77)^{1.27}\\wedge(c_{A}-c_{C}>2.09)^{1.15}\\wedge((c_{C}-c_{D})>2.60)^{1.06}\\)
randn\\(\\phi=(c_{A}-c_{B}>0.80)^{1.97}\\wedge(c_{A}-c_{C}>1.92)^{1.62}\\wedge((c_{C}-c_{D})>1.74)^{1.45}\\)
ones\\(\\phi=(c_{A}-c_{B}>1.03)^{1.63}\\wedge(c_{A}-c_{C}>1.92)^{1.50}\\wedge((c_{C}-c_{D})>2.03)^{1.44}\\)
xavier\\(\\phi=(c_{A}-c_{B}>0.97)^{1.92}\\wedge(c_{A}-c_{C}>2.07)^{1.63}\\wedge((c_{C}-c_{D})>1.97)^{1.62}\\)
Syn - 3.1Ground truth\\(\\hat{\\phi}=(c_{B}-c_{A}>-2)^{1}\\wedge(c_{C}-c_{A}>-5)^{1}\\)
rand\\(\\phi=(c_{B}-c_{A}>-1.85)^{1.72}\\wedge(c_{C}-c_{A}>-3.90)^{1.59}\\)
randn\\(\\phi=(c_{B}-c_{A}>-1.98)^{1.51}\\wedge(c_{C}-c_{A}>-3.89)^{1.68}\\)
ones\\(\\phi_{3,1}=(c_{B}-c_{A}>-1.94)^{1.84}\\wedge(c_{C}-c_{A}>-3.68)^{2.33}\\)
xavier\\(\\phi_{3,1}=(c_{B}-c_{A}>-1.89)^{1.54}\\wedge(c_{C}-c_{A}>-3.92)^{1.62}\\)
Syn - 3.2Ground truth\\(\\hat{\\phi}=(c_{B}-c_{A}>-5)^{1}\\wedge(c_{C}-c_{B}>-4)^{1}\\wedge(c_{D}-c_{C}>-3)^{1}\\)
rand\\(\\phi=(c_{B}-c_{A}>-3.94)^{1.49}\\wedge(c_{C}-c_{B}>-3.02)^{2.03}\\wedge((c_{D}-c_{C})> -2.00)^{1.92}\\)
randn\\(\\phi=(c_{B}-c_{A}>-3.79)^{1.71}\\wedge(c_{C}-c_{B}>-3.04)^{1.89}\\wedge((c_{D}-c_{C})> -1.68)^{1.65}\\)
ones\\(\\phi=(c_{B}-c_{A}>-3.53)^{1.66}\\wedge(c_{C}-c_{B}>-3.09)^{1.88}\\wedge((c_{D}-c_{C})> -1.25)^{1.81}\\)
xavier\\(\\phi=(c_{B}-c_{A}>-3.71)^{1.53}\\wedge(c_{C}-c_{B}>-3.09)^{2.04}\\wedge((c_{D}-c_{C})> -1.86)^{1.73}\\)
", + "bbox": [ + 196, + 101, + 802, + 358 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.6 ANALYSIS OF LOGICAL CONSTRAINTS ON THE LL", + "text_level": 1, + "bbox": [ + 171, + 417, + 568, + 431 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In this part, we investigate the effect of the interpretability using an experiment of the impact of logical constraints on the model's performance. The log-likelihood on the synthetic datasets for CLNN with and without logical constraints is summarized in Table 10. Table 10 demonstrates that the log-likelihood for CLNN with logical constraints is higher than the log-likelihood for CLNN without constraints, implying that interpretability (logical constraints) is helpful to improve the performance.", + "bbox": [ + 169, + 446, + 823, + 518 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/2658b04aa74bc189fbdd0573cbd758c6395726b3651feecc9a6fb422e3c6d043.jpg", + "table_caption": [ + "Table 9: Comparison of rules learned by CLNN for different parameter initialization methods." + ], + "table_footnote": [], + "table_body": "
DatasetCLNN with constraintsCLNN without constraints
Syn - 1-7821-8716
Syn - 2-6075-6942
Syn - 3.1-10898-11583
Syn - 3.2-10919-11230
", + "bbox": [ + 274, + 539, + 720, + 619 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 10: Comparison of LL for CLNN with and without logical constraints.", + "bbox": [ + 245, + 628, + 750, + 643 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D EXPERIMENT RESULTS OF REAL-WORLD DATASETS", + "text_level": 1, + "bbox": [ + 171, + 689, + 643, + 705 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D.1 LINKEDIN DATASET", + "text_level": 1, + "bbox": [ + 171, + 727, + 359, + 742 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The LinkedIn dataset is a collection of job hopping records between 82 IT companies of 3,000 LinkedIn users. Each event stream represents a user's check-in time stamps for different companies or role changes within the same company. Here we select 1000 users' event streams to compose the dataset by filtering out the event streams with uncommon companies, resulting in 10 event labels: $\\mathcal{L} = \\{A,B,C,D,E,F,G,H,I,J\\}$ . Here we set the number of formulas as 5, i.e., $\\Phi = \\{\\phi_1,\\phi_2,\\phi_3,\\phi_4,\\phi_5\\}$ , each of which embodies a model structure shown in Figure 2(a) and CLNN aims to learn the parameters for each formula. The weight parameters in the paired order cell or the singleton order cell are initialized as random variables following a Gaussian distribution, and the bias terms of conjunction or disjunction operators are initialized as 1. The architecture weights are initialized as random variables following a Gaussian distribution, and the formula impact weights and bias are initialized as Gaussian random variables. The detailed log-likelihood for each event label is summarized in Table 11.", + "bbox": [ + 169, + 757, + 826, + 924 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/69a871f954c73941778d038c5558feb755082b3add5ca333f770779f0c7a455c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Event LabelLog-likelihood
A-180.59
B-177.80
C-89.49
D-140.31
E-132.83
F-76.63
G-106.23
H-103.33
I-95.51
J-125.45
", + "bbox": [ + 387, + 101, + 607, + 268 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.2 MIMIC II DATASET", + "text_level": 1, + "bbox": [ + 171, + 321, + 359, + 335 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "MIMIC II dataset is obtained from the intensive care unit research database that consists of 25,328 intensity care unit stays. The records include laboratory data, therapeutic intervention profiles such as nursing progress notes, discharge summaries and others. Here we restrict the event types to the diagnosis of patients and filter out the shorter event sequences with few visits, ending up with 650 patients and 15 event labels: $\\mathcal{L} = \\{1,2,8,9,11,12,14,20,21,22,23,26,27,42,47\\}$ . Similar to the setting for the LinkedIn dataset, where the initialization of parameters follow the same setting as the LinkedIn dataset. The detailed log-likelihood for each event label is presented in Table 12.", + "bbox": [ + 169, + 348, + 826, + 446 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/4cd041f239d47902683a689de705b11ef3e36f99d74bc603b88b10e11d7f71f1.jpg", + "table_caption": [ + "Table 11: Log likelihood for each event label in the LinkedIn dataset." + ], + "table_footnote": [], + "table_body": "
Event LabelLog-likelihood
1-72.14
2-62.33
8-5.98
9-51.34
11-43.64
12-25.81
14-69.73
20-5.96
21-6.08
22-10.47
23-10.64
26-27.08
27-27.42
42-5.95
47-10.54
", + "bbox": [ + 385, + 460, + 607, + 698 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Table 12: Log likelihood for each event label in the MIMIC II dataset.", + "bbox": [ + 266, + 708, + 730, + 724 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.3 STACK OVERFLOW DATASET", + "text_level": 1, + "bbox": [ + 171, + 758, + 418, + 772 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Stack Overflow is a question-and-answer website spanning a wide range of domains. A badge rewarding scheme is exploited to encourage users to participate in the questioning and answering activities. The badge system of Stack Overflow comprises 81 types of non-topical badges, including the badges that can be awarded only once and the badges that can be awarded several times. The dataset in (Du et al., 2016) was obtained by first filtering out the badges that can be awarded only once, then restricting to the users who have acquired at least 40 badges from 2012-01-01 to 2014-01-01, from which the badges have been awarded more than 100 times are selected as the determinate dataset. Our dataset was acquired by retaining the event streams with one or more of the 20 types of specified badges and then randomly sampling 1000 users to obtain 1000 event streams. The detailed log-likelihood for each event label in the Stack Overflow dataset is summarized in Table 13.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/745b4d12b05a442f60099c558989a23fad57a215fe56896144c6556a9a1740dd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Event LabelLog-likelihood
1-3791
2-1451
3-538
4-17656
5-3574
6-3559
7-1381
8-1330
9-10961
10-1105
11-189
12-2012
13-673
14-1340
15-406
16-117
17-186
18-330
19-282
20-100
", + "bbox": [ + 385, + 101, + 607, + 407 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/c8f7735f28aa8d4df118a71a63f8b9c9027bb154f4eceb79be2aeedbcbb5aaaf.jpg", + "table_caption": [ + "Table 13: Log likelihood for each event label in the Stack Overflow dataset." + ], + "table_footnote": [], + "table_body": "
DatasetCLNN with SOPCLNN without SOP
LinkedIn-1228-1344
MIMIC II-436-480
Stack Overflow-50981-51195
", + "bbox": [ + 292, + 445, + 702, + 530 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Table 14: Comparison of log-likelihood for CLNN with and without SOP on the real-world datasets.", + "bbox": [ + 171, + 540, + 823, + 555 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "D.4 ANALYSIS OF EXPRESSIVENESS ON MODEL'S PERFORMANCE", + "text_level": 1, + "bbox": [ + 171, + 580, + 648, + 595 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In this part, we conduct an experiment by training the CLNN without the singleton order cell (SOC) on real-world datasets to show the effectiveness of the singleton order predicates. The comparison of log-likelihood for CLNN with SOC and CLNN without SOC is summarized in Table 14. As evidenced by Table 14, the log-likelihood of CLNN with SOP is higher than the log-likelihood of CLNN without SOP, meaning enriching the expressiveness of wCL formulas can better explain the generative mechanism of events.", + "bbox": [ + 169, + 606, + 826, + 691 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 23 + } +] \ No newline at end of file diff --git a/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_model.json b/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4e3f015ae249b381b258ed9f881db9be41cdad38 --- /dev/null +++ b/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_model.json @@ -0,0 +1,4395 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.1, + 0.693, + 0.123 + ], + "angle": 0, + "content": "WEIGHTED CLOCK LOGIC POINT PROCESS" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.15, + 0.772, + 0.182 + ], + "angle": 0, + "content": "Ruixuan Yan\\(^{1}\\), Yunshi Wen\\(^{1}\\), Debarun Bhattacharjya\\(^{2}\\), Ronny Luss\\(^{2}\\), Tengfei Ma\\(^{2}\\), Achille Fokoue\\(^{2}\\), and Agung Julius\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.192, + 0.615, + 0.221 + ], + "angle": 0, + "content": "\\(^{1}\\)Rensselaer Polytechnic Institute \n\\(^{2}\\)IBM T.J. Watson Research Center" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.247, + 0.548, + 0.262 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.279, + 0.77, + 0.475 + ], + "angle": 0, + "content": "Datasets involving multivariate event streams are prevalent in numerous applications. We present a novel framework for modeling temporal point processes called clock logic neural networks (CLNN) which learn weighted clock logic (wCL) formulas as interpretable temporal rules by which some events promote or inhibit other events. Specifically, CLNN models temporal relations between events using conditional intensity rates informed by a set of wCL formulas, which are more expressive than related prior work. Unlike conventional approaches of searching for generative rules through expensive combinatorial optimization, we design smooth activation functions for components of wCL formulas that enable a continuous relaxation of the discrete search space and efficient learning of wCL formulas using gradient-based methods. Experiments on synthetic datasets manifest our model's ability to recover the ground-truth rules and improve computational efficiency. In addition, experiments on real-world datasets show that our models perform competitively when compared with state-of-the-art models." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.489, + 0.524, + 0.505 + ], + "angle": 0, + "content": "1 INTRODUCTION AND RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.521, + 0.828, + 0.689 + ], + "angle": 0, + "content": "Multivariate event streams are emerging types of data that involve occurrences of different types of events in continuous time. Event streams are observed in a wide range of applications, including but not limited to finance (Bacry et al., 2015), politics (O'Brien, 2010), system maintenance (Gunawardana et al., 2011), healthcare (Weiss & Page, 2013), and social networks (Farajtabar et al., 2015). As opposed to time series data that typically comprises continuous-valued variables evolving in regular discrete time stamps, event streams involve events occurring irregularly and asynchronously in continuous time. Modeling the dynamics in event streams is important for a wide range of scientific and industrial processes, such as predicting the occurrence of events of interest or understanding why some deleterious events occur so as to possibly prevent their occurrence. A (multivariate) temporal point process (TPP) provides a formal mathematical framework for representing event streams, where a conditional intensity rate for each event measures its occurrence rate at any time given the historical events in the stream (Daley & Vere-Jones, 2003; Aalen et al., 2008)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.695, + 0.827, + 0.864 + ], + "angle": 0, + "content": "There has been a proliferation of research around TPPs in recent years, particularly around the use of neural networks for modeling conditional intensity rates as a function of historical occurrences (Du et al., 2016; Mei & Eisner, 2017; Xiao et al., 2017; Xu et al., 2017; Gao et al., 2020; Zhang et al., 2020; Zuo et al., 2020). One stream of research studies graphical event models (GEMs) as a compact and interpretable graphical representation for TPPs, where the conditional intensity rate for any particular event depends only on the history of a subset of the events (Didelez, 2008; Gunawardana & Meek, 2016). While any TPP can be represented as a GEM, various models make assumptions about the parametric form of conditional intensity rates for the sake of learnability, for instance that rates are piece-wise constant with respect to occurrences within historical windows (Gunawardana et al., 2011; Bhattacharjya et al., 2018). Ordinal GEMs(OGEM) (Bhattacharjya et al., 2020; 2021) are a recent model from this family where a conditional intensity rate depends on the order in which parent events occur within the most recent historical time period." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.828, + 0.927 + ], + "angle": 0, + "content": "A temporal logic point process (TLPP) framework was proposed as an alternate way to lend some interpretability to TPPs by modeling intensity rates using temporal logic rules (Li et al., 2020). Although the initial work pre-specified temporal logic rules, recent work has introduced a temporal logic rule learner (TELLER) for automatically discovering rules (Li et al., 2021). There is however" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.231 + ], + "angle": 0, + "content": "the issue of scalability since TELLER exploits an expensive branch-and-price algorithm to search for temporal logic rules in a discrete space. Another important limitation of this work is that TELLER's rules are not informative enough to explain how the interval length between ordered events impacts the conditional intensity rate. For instance, while predicting the occurrence of diabetes, the rule that \"insulin injection happens 20 minutes before eating meal\" is more informative and accurate in predicting \"blood glucose remains normal\" than the rule that \"insulin injection happens before eating meal\", as the latter rule cannot expose the interval between 'insulin injection' and 'eating meal'. To tackle the above limitations, we propose novel atomic predicates enriching the expressiveness of temporal logic rules as well as a differentiable framework to learn rules in an end-to-end manner." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.236, + 0.828, + 0.419 + ], + "angle": 0, + "content": "This work introduces a differentiable neuro-symbolic framework, clock logic neural network (CLNN), to model TPPs by learning weighted clock logic (wCL) formulas as explanations. Firstly, event streams are converted into continuous-time clock signals representing the time interval between the last occurrence of an event and the current time. Next, we propose a novel wCL to describe the underlying temporal relations with relative interval length, enabling the design of a CLNN to learn the generative mechanisms. Instead of searching for temporal logic rules in some vast discrete space, CLNN associates every neuron with an order representation or a logical operator and assigns weights to edges to reflect the importance of various inputs, which relaxes the search space to be continuous. Moreover, architecture weights are introduced into CLNN to make the formula structure search differentiable. wCL formula-informed intensity rates are carefully designed so that the parameters appearing in the rules can be learned through maximum likelihood estimation using gradient-based approaches. CLNN is tested on synthetic datasets to show that CLNN can recover the ground-truth rules as well as on real-world datasets to demonstrate its model-fitting performance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.432, + 0.342, + 0.447 + ], + "angle": 0, + "content": "2 PRELIMINARIES" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.456, + 0.412, + 0.47 + ], + "angle": 0, + "content": "2.1 NOTATION & BACKGROUND" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.481, + 0.827, + 0.636 + ], + "angle": 0, + "content": "Let \\(\\mathcal{L}\\) denote the set of event labels, and \\(M = |\\mathcal{L}|\\) denote the number of event labels. An event stream is a sequence of events including time stamps, denoted as \\(\\mathcal{D} = \\{(l_1,t_1),(l_2,t_2),\\dots,(l_N,t_N)\\}\\), where \\(t_i\\in \\mathbb{R}^+\\) denotes a time stamp between the beginning time \\(t_0 = 0\\) and end time \\(t_{N + 1} = T\\), and \\(l_{i}\\in \\mathcal{L}\\) is the event label that happens at \\(t_i\\). We refer to 'event label' and 'label' interchangeably. Every event label \\(l\\in \\mathcal{L}\\) has an associated conditional intensity rate describing the occurrence rate of label \\(l\\) at \\(t\\) given the history up to \\(t\\). In multivariate temporal point processes, conditional intensity rates describe the dynamics of events. Let \\(\\mathcal{H}_t = \\{(l_i,t_i):t_i < t\\}\\) denote the historical events up to time \\(t\\). The conditional intensity rate of event label \\(l\\) is denoted as \\(\\lambda_l(t|\\mathcal{H}_t)\\). Specifically, \\(\\lambda_l(t|\\mathcal{H}_t)\\) describes the expected number of occurrences of event label \\(l\\) in an infinitesimal interval \\([t,t + \\Delta t]\\) given the history \\(\\mathcal{H}_t\\), i.e., \\(\\lambda_l(t|\\mathcal{H}_t) = \\lim_{\\Delta t\\to 0}(E[N_l(t + \\Delta t) - N_l(t)|\\mathcal{H}_t] / \\Delta t)\\), where \\(N_{l}(t)\\) denotes the number of event label \\(l\\)'s occurrences up to \\(t\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.641, + 0.825, + 0.657 + ], + "angle": 0, + "content": "Example 1 A running example of an event stream with 11 events of 4 labels is shown in Figure 1(a)." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.661, + 0.835, + 0.714 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.718, + 0.333, + 0.73 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image_caption", + "bbox": [ + 0.645, + 0.717, + 0.665, + 0.73 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.739, + 0.825, + 0.797 + ], + "angle": 0, + "content": "Figure 1: (a): An event stream example with \\( N = 11 \\) events of \\( M = 4 \\) event labels over \\( T = 30 \\) days. (Integer-valued time stamps are utilized for easy interpretation, note that the proposed approach also works for \\( t_i \\in \\mathbb{R} \\)). (b): The overall workflow of the proposed method (POC: paired order cell, SOC: singleton order cell, AC: architecture cell, details presented in Section 2.2 to 3.3)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.801, + 0.561, + 0.815 + ], + "angle": 0, + "content": "2.2 ORDER REPRESENTATIONS FOR EVENT STREAMS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.819, + 0.825, + 0.89 + ], + "angle": 0, + "content": "The overall workflow of the proposed framework is visualized as Figure 1(b). The raw event streams first go through a masking function to generate the masked event streams, which are then transformed into event clocks using a clocking function. The event clocks are given as inputs to the clock logic neural network (CLNN) to learn interpretable wCL formulas and the intensity rate of event occurrences. The following sections provide a detailed explanation for each module in Figure 1(b)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We are interested in exploring the effect of temporal ordering between event labels and the occurrences of causal event labels in a historical window on the occurrence rate of a particular event label," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.105, + 0.825, + 0.162 + ], + "angle": 0, + "content": "where the generative mechanism is expressed as interpretable formulas. An event stream up to \\( t \\) may include multiple occurrences of the same event label, thus a masking function is required to mask out duplicated event labels in the history for accessing the ordering information at any \\( t \\). Here we adopt a technique similar to Bhattacharjya et al. (2020) for extracting distinct event labels from \\( \\mathcal{H}_t \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.165, + 0.825, + 0.223 + ], + "angle": 0, + "content": "Definition 1 (Masking Function) A masking function \\(\\Gamma(\\cdot)\\) is a function that takes an event stream as input and returns a new event stream that is a subset of the input stream and contains no duplicated event labels. Mathematically, \\(\\Gamma(\\cdot)\\) is applied to \\(\\mathcal{H}_t = \\{(l_i, t_i)\\}\\) and converts it into a new stream \\(\\mathcal{H}_t' = \\{(l_j, t_j) \\in \\mathcal{H}_t : l_j \\neq l_{j'} \\text{ if } j \\neq j'\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.226, + 0.825, + 0.268 + ], + "angle": 0, + "content": "We consider the following two masking functions as per Bhattacharjya et al. (2020) due to simplicity: 'first' masking and 'last' masking. The 'first' (resp. 'last') masking function keeps the first (resp. last) occurrence of an event label in an event stream." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.272, + 0.825, + 0.317 + ], + "angle": 0, + "content": "Example 1 (cont.) Let \\(\\mathcal{H}_{13} = \\{(A,1),(B,3),(A,6),(D,8),(C,10),(D,12)\\}\\). The 'first' masking function converts it to \\(\\mathcal{H}_{13}' = \\{(A,1),(B,3),(D,8),(C,10)\\}\\), and the 'last' masking function converts it to \\(\\mathcal{H}_{13}' = \\{(B,3),(A,6),(C,10),(D,12)\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.32, + 0.825, + 0.35 + ], + "angle": 0, + "content": "With the masked event history \\(\\mathcal{H}_t^\\prime\\), we define two order representations for the order relationship between any two event labels and the occurrence of an event within a historical window of \\(t\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.353, + 0.825, + 0.397 + ], + "angle": 0, + "content": "Definition 2 (Paired Order Representation (POR)) A paired order representation is defined as \\([l_i, l_j] \\in [\\mathcal{L}]^2\\), where \\([\\mathcal{L}]^2\\) denotes two-element permutation of a subset of \\(\\mathcal{L}\\). A paired order representation for \\(\\mathcal{H}_t^\\prime\\) can be obtained by arranging any two distinct labels in \\(\\mathcal{H}_t^\\prime\\) in a sequential order." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.404, + 0.825, + 0.451 + ], + "angle": 0, + "content": "Definition 3 (Singleton Order Representation (SOR)) A singleton order representation is denoted as \\([l_j, \\underline{u}_{l_j}] \\in \\mathcal{L} \\times \\mathbb{R}_+\\), representing event label \\(l_j \\in \\mathcal{L}\\) occurred within the past \\(\\underline{u}_{l_j}\\) time units, where \\(\\underline{u}_{l_j}\\) is a variable to learn through a process that will be explained in Section 3.3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.826, + 0.525 + ], + "angle": 0, + "content": "Example 1 (cont.) With first masking, an example of paired order representation for \\(\\mathcal{H}_{13}^{\\prime}\\) can be \\([A,B]\\) representing \"A happens before \\(B\\) \" or \\([B,C]\\) representing \"B happens before \\(C\\)\". The overall order representation for \\(\\mathcal{H}_{13}^{\\prime}\\) is expressed as \\([A,B,D,C]\\), which can be derived from the paired order representations: \\([A,B],[B,D],[D,C]\\). A singleton order representation example of \\(\\mathcal{H}_{13}^{\\prime}\\) can be expressed as \\([B,10.5]\\), meaning \\(B\\) happened in the past 10.5 days." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.533, + 0.473, + 0.546 + ], + "angle": 0, + "content": "2.3 WEIGHTED CLOCK LOGIC FORMULA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.552, + 0.825, + 0.596 + ], + "angle": 0, + "content": "To adapt \\(\\mathcal{H}_t^\\prime\\) to continuous-time signals that can be described by logical statements, we extract clock signals from \\(\\mathcal{H}_t^\\prime\\) to describe the time passed since the last occurrence of a label. A clocking function is introduced to convert \\(t_j\\) into a clock signal \\(c_{j}\\) denoting the time interval length between \\(t_j\\) and \\(t\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.598, + 0.825, + 0.659 + ], + "angle": 0, + "content": "Definition 4 (Clocking Function) A clocking function \\(\\Xi(\\cdot)\\) converts \\(\\mathcal{H}_t^\\prime\\) into a vector of clock signals as \\(\\mathcal{C}'(t) = [c_1(t), c_2(t), \\dots, c_M(t)]^T \\in \\mathbb{R}_+^M\\) with \\(c_i(t)\\) denoting the clock signal for event label \\(i \\in \\mathcal{L}\\), where \\(c_i(t)\\) is computed as \\(c_i(t) = t - t_j\\) if \\((l_j, t_j) \\in \\mathcal{H}_t^\\prime\\) and \\(l_j = i\\), and \\(c_i(t) = \\bar{Z}\\) otherwise. Note that \\(\\bar{Z}\\) is a user-defined, large positive number to indicate event label \\(i\\) not happening in \\(\\mathcal{H}_t^\\prime\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.665, + 0.825, + 0.695 + ], + "angle": 0, + "content": "Example 1 (cont.) Taking the 'first' masked event stream \\(\\mathcal{H}_{13}^{\\prime} = \\{(A,1),(B,3),(D,8),(C,12)\\}\\) as an example, the event clocks are extracted as \\(\\mathcal{C}'(13) = [12,10,1,5]^T\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.698, + 0.825, + 0.814 + ], + "angle": 0, + "content": "The event clocks can essentially provide the ordering between any two event labels in that the difference between any two event labels' clock signals reflects which event label happens first. As shown in the diabetes prediction example in the Introduction section, the time interval between ordering events is notably important in explaining and predicting an event label's occurrence. In contrast to (Li et al., 2020; 2021) which only learns the temporal ordering relation between event labels, we define a paired order predicate (POP) with a learnable parameter \\(\\underline{u}_{l_i l_j}\\) to describe the time interval between two ordered event labels \\(l_i\\) and \\(l_j\\) and a singleton order predicate (SOP) with a learnable parameter \\(\\underline{u}_{l_j}\\) to describe the occurrence of label \\(l_j\\) within a historical window \\(\\underline{u}_{l_j}\\) as follows." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.817, + 0.825, + 0.884 + ], + "angle": 0, + "content": "Definition 5 (Paired Order Predicate) A POP describes the order between two labels \\( l_i, l_j \\in \\mathcal{L}, l_i \\neq l_j \\), denoted as \\( \\pi_{pop}^{l_i l_j} := g(c_{l_i}, c_{l_j}) = c_{l_i} - c_{l_j} > \\underline{u}_{l_i l_j} \\), where \\( \\underline{u}_{l_i l_j} \\in \\mathbb{R} \\) is a parameter to learn. A positive \\( \\underline{u}_{l_i l_j} \\) means \\( l_i \\) happened before \\( l_j \\) for at least \\( \\underline{u}_{l_i l_j} \\) time units, and a negative \\( \\underline{u}_{l_i l_j} \\) means \\( l_j \\) happened before \\( l_i \\) for at most \\( -\\underline{u}_{l_i l_j} \\) time units. A POP is used in the POC of Figure 1(b)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.891, + 0.825, + 0.929 + ], + "angle": 0, + "content": "Definition 6 (Singleton Order Predicate) An SOP describes a causal label \\( l_j \\in \\mathcal{L} \\) occurring within the past \\( \\underline{u}_{l_j} \\) time units, defined as \\( \\pi_{sop}^{l_j} := c_{l_j} - \\underline{u}_{l_j} < 0 \\), where \\( \\underline{u}_{l_j} \\in \\mathbb{R}_+ \\) is a learnable parameter." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.204 + ], + "angle": 0, + "content": "Instead of taking a heuristic approach for some underlying combinatorial search problem for a given set of temporal predicates (Bhattacharjya et al., 2020; 2021; Li et al., 2021) to uncover the effective order relations, this work proposes a differentiable learning model to learn suitable singleton and paired order predicates among all the possible choices of order predicates through a gradient-based approach. The scheme of weighted signal temporal logic (wSTL) in Yan et al. (2021; 2022) is exploited to build weighted clock logic (wCL) formulas that are logical compositions of singleton and paired order predicates. The syntax of wCL is recursively defined as (Mehdipour et al., 2021):" + }, + { + "type": "equation", + "bbox": [ + 0.277, + 0.208, + 0.825, + 0.228 + ], + "angle": 0, + "content": "\\[\n\\phi := \\pi_ {p o p} ^ {l _ {i} l _ {j}} \\left| \\pi_ {s o p} ^ {l _ {j}} \\right| \\neg \\phi \\left| \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}} \\right| \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.232, + 0.825, + 0.29 + ], + "angle": 0, + "content": "where \\(\\phi_1, \\dots, \\phi_k\\) are wCL formulas, \\(\\neg\\) denotes negation, \\(\\land\\) denotes logical conjunction, \\(\\lor\\) denotes logical disjunction, \\(w_j \\geq 0, j = 1, \\dots, k\\) denotes non-negative weights assigned to \\(\\phi_1, \\dots, \\phi_k\\) in the conjunction and disjunction operations. A wCL formula can describe the characteristics of \\(\\mathcal{H}_t\\), thus the conditional intensity rate of event \\(l\\) given \\(\\mathcal{H}_t\\) can be equivalently denoted as \\(\\lambda_{l|\\phi}(t)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.295, + 0.826, + 0.332 + ], + "angle": 0, + "content": "Remark 7 The syntax above means each wCL formula can be built by using predicates in \\(\\pi_{pop}^{l_i l_j}\\) or \\(\\pi_{sop}^{l_j}\\) and then by recursively applying the \\(\\neg\\) or the \\(\\land\\) or the \\(\\lor\\) operations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.826, + 0.398 + ], + "angle": 0, + "content": "Example 1 (cont.) A \\( wCL \\) formula example is \\( \\phi = (c_A - c_B > 1)^1 \\wedge (c_C < 3)^{0.05} \\). The first and second clauses read \"A happened before \\( B \\) for at least one day\" and \"C happened less than 3 days ago\", respectively. Note that \\( \\phi \\) is satisfied by the event stream up to \\( t = 13 \\) in Figure 1(a). The two clauses have weights of 1 and 0.05, reflecting the first clause is more important than the second one." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.41, + 0.586, + 0.425 + ], + "angle": 0, + "content": "3 WEIGHTED CLOCK LOGIC POINT PROCESSES" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.435, + 0.535, + 0.449 + ], + "angle": 0, + "content": "3.1 TRUTH DEGREE OF WEIGHTED CLOCK LOGIC" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.456, + 0.825, + 0.5 + ], + "angle": 0, + "content": "To quantitatively measure the satisfaction degree of a wCL formula \\(\\phi\\) over the event clocks \\(\\mathcal{C}'(t)\\), i.e., how well does \\(\\phi\\) describe the underlying patterns of \\(\\mathcal{C}'(t)\\), we propose smooth activation functions (AFs) to compute the truth degree, denoted \\(p(\\mathcal{C}',\\phi,t)\\in [0,1]\\), defined as (Riegel et al., 2020):" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.5, + 0.825, + 0.521 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\pi_ {p o p} ^ {l _ {i} l _ {j}}, t\\right) = \\operatorname {s i g m o i d} \\left(c _ {l _ {i}} (t) - c _ {l _ {j}} (t) - \\underline {{u}} _ {l _ {i} l _ {j}}\\right), \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.522, + 0.825, + 0.542 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\pi_ {s o p} ^ {l _ {j}}, t\\right) = \\operatorname {s i g m o i d} \\left(\\underline {{u}} _ {l _ {j}} - c _ {l _ {j}} (t)\\right), \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.544, + 0.825, + 0.561 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\neg \\phi , t\\right) = 1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi , t\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.566, + 0.826, + 0.72 + ], + "angle": 0, + "content": "In contrast to the combinatorial search of the temporal logic predicates in Li et al. (2021), the smooth design of AFs in (2) - (4) benefits the maximum likelihood estimation problem shown later in Section 3.6 by allowing it to learn the parameters in the POP and SOP through gradient-based methods. Next, we present the design of activation functions (AF) for the \\(\\wedge\\) operator. Here we use a 2-ary conjunction operator to motivate the design. Let \\(p^{\\wedge} = p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t)\\in [0,1]\\). Intuitively, \\(p^{\\wedge}\\) is low when either input is low, and \\(p^{\\wedge}\\) is high when both inputs are high. Here we adopt a similar idea to Sen et al. (2022) for capturing the low and high. A user-defined hyperparameter \\(\\alpha \\in [\\frac{1}{2},1]\\) is introduced to aid the interpretability of low and high such that \\(p^{\\wedge}\\) represents high if \\(p^{\\wedge}\\in [\\alpha ,1]\\) and low if \\(p^{\\wedge}\\in [0,1 - \\alpha ]\\). Considering the importance weights, a low input with a zero weight should not impact the output, which implies \\(p^{\\wedge}\\) should be low when both inputs are low. With these considerations, the AF for the \\(\\wedge\\) operator is defined as follows: (See Appendix A for more details.)" + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.718, + 0.826, + 0.761 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.763, + 0.765, + 0.807 + ], + "angle": 0, + "content": "\\[\n\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\leq 1 - \\alpha ,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.812, + 0.825, + 0.841 + ], + "angle": 0, + "content": "where \\( f(z) = \\max \\{0, \\min \\{z, 1\\}\\} \\) clamps the truth degree into [0,1], \\( w_{j} \\geq 0 \\) and \\( \\beta \\geq 0 \\) are parameters to learn. By De Morgan's law (Hurley, 2014), the AF for the \\( \\vee \\) operator is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.841, + 0.826, + 0.883 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, t\\right) = f (1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)), \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.335, + 0.885, + 0.792, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha .\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.827, + 0.22 + ], + "angle": 0, + "content": "An event stream with \\( M \\) event labels would generate \\( \\mathrm{P}_M^2 = \\frac{M!}{(M - 2)!} \\) paired order predicates and \\( M \\) singleton order predicates. If a conjunction or disjunction operator takes these predicates as inputs, how it recognizes the effective order predicates in describing the event dynamics becomes a critical issue. By carefully designing the AFs in (5) - (6), the logical operators exhibit the following properties so as to recognize effective inputs. This is a critical advantage over Bhattacharjya et al. (2020; 2021); Li et al. (2021) in that it allows a differentiable search of the suitable predicates among all the possible choices of order predicates in an end-to-end manner. Here we illustrate the properties for \\( \\wedge \\) with two inputs, which can be generalized to \\( k \\)-ary inputs. (See Appendix B for more details.)" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.225, + 0.755, + 0.24 + ], + "angle": 0, + "content": "Theorem 8 The \\( AF \\) for the \\( \\wedge \\) operator with two inputs exhibits the following properties." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.245, + 0.82, + 0.263 + ], + "angle": 0, + "content": "1) Nonimpact for zero weights: If \\( w_{j} = 0, j = 1,2 \\), \\( p(\\mathcal{C}',\\phi_j,t) \\) has no impact on \\( p(\\mathcal{C}',\\phi_1\\wedge \\phi_2,t) \\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.268, + 0.825, + 0.291 + ], + "angle": 0, + "content": "2) Impact ordering: If \\( p(\\mathcal{C}', \\phi_1, t) = p(\\mathcal{C}', \\phi_2, t) \\), and \\( w_1 \\geq w_2 \\), then \\( \\frac{\\partial p(\\mathcal{C}', \\phi_1 \\wedge \\phi_2, t)}{\\partial p(\\mathcal{C}', \\phi_1, t)} \\geq \\frac{\\partial p(\\mathcal{C}', \\phi_1 \\wedge \\phi_2, t)}{\\partial p(\\mathcal{C}', \\phi_2, t)} \\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.296, + 0.825, + 0.318 + ], + "angle": 0, + "content": "3) Monotonicity: \\( f(\\beta - \\sum_{j=1}^{2} w_j (1 - p(\\mathcal{C}', \\phi_j, t))) \\leq f(\\beta - \\sum_{j=1}^{2} w_j (1 - (p(\\mathcal{C}', \\phi_j, t) + d))), d \\geq 0. \\)" + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.245, + 0.825, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.336, + 0.563, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.373, + 0.555, + 0.392, + 0.567 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.335, + 0.784, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.665, + 0.554, + 0.685, + 0.567 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.575, + 0.825, + 0.607 + ], + "angle": 0, + "content": "Figure 2: CLNN Structure. (a): Continuous relaxation of the search space using weights. (b): The learned discrete model structure for \\(\\phi = (\\pi_{pop}^{A,B}\\wedge \\pi_{pop}^{B,C})\\vee (\\pi_{sop}^{A})\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.621, + 0.553, + 0.634 + ], + "angle": 0, + "content": "3.2 LEARNING OF PAIRED ORDER REPRESENTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.647, + 0.825, + 0.773 + ], + "angle": 0, + "content": "With the smooth AFs designed in (2) - (6), a neuro-symbolic model called clock logic neural network (CLNN) can be designed for any given wCL formula \\(\\phi\\), in which every neuron has a corresponding symbolic representation. A typical CLNN for \\(\\phi = (\\pi_{pop}^{A,B}\\wedge \\pi_{pop}^{B,C})\\vee (\\pi_{sop}^{A})\\) is visualized as Fig. 2(b), which can be considered as the discrete structure obtained by learning the parameters of the model in Figure 2(a) and keeping the dominant components. Here \\(\\phi\\) can be interpreted as “(A happens before \\(B\\) for at least \\(\\underline{u}_{AB}\\) time units or \\(B\\) happens before \\(C\\) for at least \\(\\underline{u}_{BC}\\) time units) and \\(A\\) happens within the past \\(\\underline{u}_A\\) time units.” This part describes the continuous relaxation of the search space by designing a paired order cell, a singleton order cell, and an architecture cell for learning the paired order representation, singleton order representation and the formula structure." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.779, + 0.827, + 0.928 + ], + "angle": 0, + "content": "Paired Order Cell (POC). A POC is a directed acyclic graph (DAG) comprising two paired order predicate (POP) nodes and one logical node for the \\(\\wedge\\) operator, shown as an orange block in Figure 2(a). The two POP nodes represent \\(\\pi_{pop}^{l_i,l_j}\\) and \\(\\pi_{pop}^{l_j,l_i}\\) sharing the same parameter \\(\\underline{u}_{l_i,l_j}\\), where \\(\\pi_{pop}^{l_i,l_j}\\) denotes \"\\(l_i\\) happened before \\(l_j\\) for at least \\(\\underline{u}_{l_i,l_j}\\) time units\" and \\(\\pi_{pop}^{l_j,l_i}\\) denotes \"\\(l_j\\) happened before \\(l_i\\) for at least \\(\\underline{u}_{l_i,l_j}\\) time units\". Each POP has an associated weight \\(w_{pop}^{l_i,l_j}\\) or \\(w_{pop}^{l_j,l_i}\\) to be learned, and the \\(\\wedge\\) operator forces one of the two weight parameters to dominate the other one such that the learned POR is consistent with the event stream. For example, the POC in Figure 2(a) aims to learn the POR between \\(A\\) and \\(B\\), whose discretized version would be either \\(\\pi_{pop}^{A,B}\\) or \\(\\pi_{pop}^{B,A}\\). An event stream with \\(M\\) event labels can generate \\(\\mathrm{P}_M^2 = \\frac{M!}{(M - 2)!}\\) PORs between any two event" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.192 + ], + "angle": 0, + "content": "labels, resulting in \\((\\mathrm{P}_M^2 / 2)\\) POCs. Similar to learning the POR between any two events, the discrete order representations for the entire history \\(\\mathcal{H}_t\\) can be learned using a POP selection node (as shown in Figure 2(a)) that takes the outputs of all the POCs as input and identifies the important PORs. The learning of the POCs essentially becomes learning the \\(w\\), \\(\\beta\\) in (5) for the POCs and the POP selection node, as well as \\(\\underline{u}_{l_i l_j}\\) in (2) for the POPs through back propagation. The discrete PORs can be acquired by keeping the top-\\(k\\) strongest POCs and the dominant POPs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.197, + 0.584, + 0.21 + ], + "angle": 0, + "content": "3.3 LEARNING OF SINGLETON ORDER REPRESENTATION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.215, + 0.828, + 0.351 + ], + "angle": 0, + "content": "Singleton Order Cell (SOC). The learning of SOR is accomplished by an SOC, which is displayed as a green block in Figure 2(a). An SOC is a DAG comprising \\( M \\) singleton order predicate (SOP) nodes and one SOP selection node for the \\( \\wedge \\) operator. An SOP node represents \\( \\pi_{sop}^{l_j} \\) that takes \\( c_{l_j}(t) \\) as input and returns the truth degree of \\( \\pi_{sop}^{l_j} \\) over \\( c_{l_j}(t) \\). The SOP selection node has the same functionality as the POP selection node. The \\( \\wedge \\) operator in the SOP selection node assigns a nonnegative weight to every SOP node and learns the importance weights \\( w \\) and \\( \\beta \\) to extract the dominant SORs affecting the conditional intensity rate the most. The learning of the SOC is thus learning the \\( w, \\beta \\) in (5) for the SOP selection node and \\( \\underline{u}_{l_j} \\) in (3) for the SOPs through back propagation. The discrete SORs can be determined by keeping the top- \\( k \\) strongest SOPs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.358, + 0.479, + 0.372 + ], + "angle": 0, + "content": "3.4 LEARNING OF FORMULA STRUCTURE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.376, + 0.828, + 0.636 + ], + "angle": 0, + "content": "Architecture Cell (AC). For a given set of PORs or SORs, their conjunction or disjunction will behave differently and have distinct meanings. For instance, given two causal formulas \\(\\phi_{1} = (c_{A} - c_{B} > 1)^{1}\\wedge (c_{C} < 5)^{1}\\) and \\(\\phi_{2} = (c_{A} - c_{B} > 1)^{1}\\vee (c_{C} < 5)^{1}\\) for the occurrence of event label \\(D\\), \\(\\phi_{1}\\) means “(A happens before \\(B\\) for at least 1 time unit) and (C happens within the past 5 time units) simultaneously will cause \\(D\\) to happen”, whereas \\(\\phi_{2}\\) means “(A happens before \\(B\\) for at least 1 time unit) or (C happens within the past 5 time units) alternatively will cause \\(D\\) to happen.” The afore-mentioned cells can learn the order representations. Nevertheless, whether their outputs should be connected by the \\(\\wedge\\) or \\(\\vee\\) operator needs to be determined. Here we consider the outputs of the POCs and the SOCs having two choices of being connected by a \\(\\wedge\\) or \\(\\vee\\) operator, each of which is associated with an architecture weight \\(\\alpha_{arc}^{\\wedge}\\) or \\(\\alpha_{arc}^{\\vee}\\) that enables continuous learning of the two choices; this is also called differentiable architecture search (Liu et al., 2019). An architecture cell is introduced for learning the model architecture, which comprises two logical nodes representing a \\(\\wedge\\) operator and a \\(\\vee\\) operator as well as a logical selection node (LSN), shown as the blue block in Figure 2(a). Let \\(\\pmb{p} = \\{p_1,\\dots,p_k\\}\\) denote the set of inputs for each logical operator. Subsequently, the conjunction operator takes \\(\\pmb{p}\\) as input and returns \\(p^{\\wedge} = f(\\beta^{\\wedge} - \\sum_{j = 1}^{k}w_{j}^{\\wedge}(1 - p_{j}))\\), and the disjunction operator takes \\(\\pmb{p}\\) as input and returns \\(p^{\\vee} = f(1 - \\beta^{\\vee} + \\sum_{j = 1}^{k}w_{j}^{\\vee}p_{j})\\). The LSN represented by \\(\\ominus\\) takes \\(p^{\\wedge}\\) and \\(p^{\\vee}\\) as inputs and returns their weighted sum, where the weights are computed using the softmax of the architecture weights as shown below:" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.636, + 0.825, + 0.675 + ], + "angle": 0, + "content": "\\[\np _ {\\ominus} = p \\left(\\mathcal {C} ^ {\\prime}, \\phi , t\\right) = \\sum_ {m \\in \\{\\wedge , \\vee \\}} \\frac {e ^ {\\alpha_ {a r c} ^ {m}}}{\\sum_ {m ^ {\\prime} \\in \\{\\wedge , \\vee \\} e ^ {\\alpha_ {a r c} ^ {m ^ {\\prime}}}}} p ^ {m}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.675, + 0.827, + 0.735 + ], + "angle": 0, + "content": "The task of architecture search then reduces to learning the architecture weights \\(\\alpha_{arc}^{\\wedge}\\), \\(\\alpha_{arc}^{\\vee}\\) and the \\(w, \\beta\\) in (5) - (6) for the two logical operators, which can be executed simultaneously while learning parameters in the POCs and SOCs. The outcome of the architecture search process is a discrete architecture obtained by retaining the logical operator with the strongest architecture weight." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.739, + 0.494, + 0.753 + ], + "angle": 0, + "content": "3.5 WCL-INFORMED INTENSITY FUNCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.757, + 0.613, + 0.91 + ], + "angle": 0, + "content": "The output of a CLNN is the truth degree of \\(\\phi\\) over \\(\\mathcal{C}'\\) at \\(t\\), which is incorporated into modeling the conditional intensity rates. The modeling process aims to discover the generative mechanism as wCL formulas for every \\(l \\in \\mathcal{L}\\). In other words, a larger value of \\(p(\\mathcal{C}', \\phi, t)\\) should reflect that \\(\\phi\\) has a greater impact on the occurrence of a particular label. For example, if the wCL formula for affecting the occurrence of event label \\(D\\) is given as \\(\\phi = ((\\pi_{pop}^{A,B})^{w_1} \\wedge (\\pi_{sop}^{C})^{w_2})\\), it means if \\(\\phi\\) is satisfied or the truth degree of \\(\\phi\\) is high, then it has a strong impact on the occurrence of \\(D\\), where the impact can be promoting or inhibiting the occurrence of \\(D\\). In terms of the relation between the truth degree and the con-" + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.741, + 0.817, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.622, + 0.865, + 0.825, + 0.89 + ], + "angle": 0, + "content": "Figure 3: The overall learning framework for \\( n \\) wCL formulas." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.91, + 0.825, + 0.926 + ], + "angle": 0, + "content": "ditional intensity rate, the higher the truth degree \\(p(\\mathcal{C}',\\phi ,t)\\) , the greater its impact on \\(\\lambda_{D|\\phi}\\) . Note" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.161 + ], + "angle": 0, + "content": "that the occurrence of one event label may depend on multiple wCL formulas. This work follows the assumption that the impact of multiple formulas are additive in predicting the intensity rate, similar to Li et al. (2020). To incorporate a set of wCL formulas \\(\\Phi = \\{\\phi_1,\\phi_2,\\dots,\\phi_n\\}\\) into the modeling of the conditional intensity rate, we define a wCL formula-informed conditional intensity rate as:" + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.164, + 0.825, + 0.202 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {l \\mid \\Phi} (t) = \\exp \\left(\\sum_ {i = 1} ^ {n} w _ {\\phi_ {i}} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {i}, t\\right) + \\rho\\right), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.205, + 0.825, + 0.233 + ], + "angle": 0, + "content": "where \\( w_{\\phi_i} \\) is the weight of \\( \\phi_i \\), and \\( \\rho \\) is a bias term that allows for spontaneous occurrence without the influence from \\( \\phi \\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.252, + 0.479, + 0.266 + ], + "angle": 0, + "content": "3.6 MAXIMUM LIKELIHOOD ESTIMATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.279, + 0.825, + 0.323 + ], + "angle": 0, + "content": "Suppose event stream \\(\\mathcal{D}\\) contains \\(n_l\\) occurrences of event \\(l\\), for which the occurrence time stamps are denoted as \\(t_{l_1}, t_{l_2}, \\ldots, t_{l_{n_l}}\\). Let \\(t_0 = 0\\), \\(t_{l_{n_l + 1}} = T\\). Based on the conditional intensity function in (8), the likelihood for label \\(l\\) over the event stream is calculated as (Daley & Vere-Jones, 2003):" + }, + { + "type": "equation", + "bbox": [ + 0.231, + 0.331, + 0.826, + 0.375 + ], + "angle": 0, + "content": "\\[\nL _ {l} = \\prod_ {i = 0} ^ {n _ {l} - 1} \\left(\\exp \\left(- \\int_ {t _ {l _ {i}}} ^ {t _ {l _ {i + 1}}} \\lambda_ {l | \\Phi} (s) d s\\right) \\lambda_ {l | \\Phi} \\left(t _ {l _ {i + 1}}\\right)\\right) \\exp \\left(- \\int_ {t _ {l _ {n _ {l}}}} ^ {T} \\lambda_ {l | \\Phi} (s) d s\\right). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.385, + 0.825, + 0.444 + ], + "angle": 0, + "content": "The corresponding log-likelihood for event label \\( l \\) is expressed as \\( LL_{l} = (-\\int_{0}^{T}\\lambda_{l|\\Phi}(s)ds) + \\sum_{i = 1}^{n_{l}}[\\log (\\lambda_{l|\\Phi}(t_{l_{i}}))] \\). The total log-likelihood of all the events in \\( \\mathcal{D} \\) is thus \\( LL_{\\mathcal{D}} = \\sum_{l\\in \\mathcal{L}}LL_{l} \\). During the training process, we train the model parameters for each event label separately. Specifically, the maximum likelihood estimation problem for event label \\( l \\) can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.452, + 0.825, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\min - L L _ {l} \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.472, + 0.825, + 0.505 + ], + "angle": 0, + "content": "\\[\ns. t. \\quad \\forall \\phi \\in \\Phi , \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} (1 - \\alpha) \\geq \\alpha , \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} \\alpha \\leq 1 - \\alpha , \\tag {11}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.183, + 0.508, + 0.825, + 0.542 + ], + "angle": 0, + "content": "\\[\n\\forall \\phi \\in \\Phi , \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} \\alpha \\geq \\alpha , 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} (1 - \\alpha) \\leq 1 - \\alpha , \\tag {12}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.544, + 0.492, + 0.564 + ], + "angle": 0, + "content": "\\[\nw _ {i, k} \\geq 0, \\beta_ {k} \\geq 0, w _ {i, k ^ {\\prime}} \\geq 0, \\beta_ {k ^ {\\prime}} \\geq 0, \\underline {{u}} _ {l _ {j}} \\geq 0,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.571, + 0.825, + 0.645 + ], + "angle": 0, + "content": "where \\( K_{\\phi}^{\\wedge} \\) (resp. \\( K_{\\phi}^{\\vee} \\)) is the number of \\( \\wedge \\) (resp. \\( \\vee \\)) operators in \\( \\phi \\), \\( I_{k} \\) (resp. \\( I_{k'} \\)) denotes the inputs to the \\( k \\)-th \\( \\wedge \\) (resp. \\( k' \\)-th \\( \\vee \\)) operator. Please see Appendix A for more details about the above formulation. The overall learning framework is shown in Figure 3, in which the forward propagation computes \\( LL_{l} \\) by using \\( n \\) CLNNs; each learns a wCL formula \\( \\phi_{i} \\) and the backward propagation updates the parameters in \\( n \\) CLNNs using projected gradient descent." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.653, + 0.329, + 0.668 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.678, + 0.827, + 0.735 + ], + "angle": 0, + "content": "We conduct several experiments on synthetic and real-world datasets to demonstrate the efficacy of our proposed model. Simultaneously, we compare with state-of-the-art (SOTA) models. The experiments are run using the AdamW optimizer in Pytorch (1.10.2) on a Windows 10 system desktop with a 16-core CPU (i7, 3.60GHz) and 32 GB RAM. Our code is available at https://ICLR-CLNN." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.753, + 0.279, + 0.766 + ], + "angle": 0, + "content": "4.1 MODELS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.779, + 0.825, + 0.85 + ], + "angle": 0, + "content": "Multivariate Hawkes Process (MHP) [(Bacry et al., 2017)]: A conventional multivariate Hawkes process utilizing an exponential kernel function to describe the conditional intensity rate, which involves a decay rate and an infectivity matrix characterizing the inter-dependence among events. This model is implemented in the tick\\(^{1}\\) library, where the learning problem is posed as a convex quadratic programming problem with a fixed decay rate." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.898 + ], + "angle": 0, + "content": "Proximal Graphical Event Model (PGEM) [(Bhattacharjya et al., 2018)]: A type of GEM that models event data by considering whether a parent in some underlying graph happens in a proximal (recent) window." + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.547, + 0.924 + ], + "angle": 0, + "content": "\\(^{1}\\)https://x-datainitiative.github.io/tick/modules/hawkes.html" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.073, + 0.818, + 0.125 + ], + "angle": 0, + "content": "
Ground truthφ1=(cA-cB>1)1∧(cA-cC>3)1
CLNN's rule(cA-cB>1.21)1.52 ∧ (cA-cC>3.00)1.41 ∧ (cA-cD>0.82)0.33 ∧ (cB-cC>4.33)0 ∧ (cB-cD>10.69)0 ∧ (cD-cC>-6.57)0.16
TELLER's ruleA before D, B before D, C before D, A before D and C before D
OGEM-tab's ruleExcitation: [B], [C,B], [B,C]; Inhibitory: [A], [C,A], [A,C]
" + }, + { + "type": "table_caption", + "bbox": [ + 0.203, + 0.133, + 0.794, + 0.149 + ], + "angle": 0, + "content": "Table 1: Comparison of rule discovery for CLNN, TELLER, and OGEM-tab on the Syn-1 dataset." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.155, + 0.825, + 0.198 + ], + "angle": 0, + "content": "Ordinal Graphical Event Model (OGEM) [(Bhattacharjya et al., 2020; 2021)]: An ordinal GEM that models the impact of the order of events on the conditional intensity rate. OGEM-tab (resp. OGEM-tree) refers to an OGEM that adopts a tabular (resp. tree) representation of orders." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.199, + 0.825, + 0.24 + ], + "angle": 0, + "content": "Temporal Logic Rule Learner (TELLER)2 [(Li et al., 2021)]. This is a method to learn first-order temporal logic rules explaining the generative mechanism of TPPs. The rule discovery process is formulated as a maximum likelihood estimation problem solved by a branch-and-price algorithm." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.243, + 0.375, + 0.258 + ], + "angle": 0, + "content": "4.2 SYNTHETIC DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.263, + 0.825, + 0.326 + ], + "angle": 0, + "content": "The first part of this experiment demonstrates CLNN's capability of recovering ground-truth rules using three synthetic datasets generated by CLNN with pre-specified formula structure and parameters, including \\(\\underline{u}_{l_i l_j}\\) in \\(\\pi_{pop}^{l_i, l_j}\\), as well as the importance weights \\(w\\) and bias \\(\\beta\\) in (5) for logical operators, and the \\(w_\\phi\\) and \\(\\rho\\) in (8) for the conditional intensity rate." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.331, + 0.827, + 0.431 + ], + "angle": 0, + "content": "Experimental Setting. Each synthetic dataset contains 1,000 event streams partitioned into three sets: training (70%), validation (15%), and test (15%). Every dataset is generated using a wCL formula with \\(w_{\\phi} = 3\\) and \\(\\rho = -5\\). The truth value threshold is set as \\(\\alpha = 0.5\\), and the clock signal for representing an event not occurring in \\(\\mathcal{H}_t^\\prime\\) is set as \\(\\bar{Z} = 1.5T_{\\mathrm{max}}\\), where \\(T_{\\mathrm{max}}\\) is the maximal ending time among all the event streams. During the training process, we initialize the parameters using four approaches (see Appendix C.5 for more details) and report the best one, and CLNN aims to recover the manually set parameters." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.434, + 0.827, + 0.7 + ], + "angle": 0, + "content": "Results. The ground-truth rule \\(\\hat{\\phi}_1\\) for generating the first synthetic dataset (Syn-1) with \\(\\mathcal{L} = \\{A, B, C, D\\}\\) and the rules discovered by CLNN, TELLER, and OGEM-tab are summarized in Table 1. Results for the other synthetic datasets are presented in Appendix C. The rules are learned using the 'last' masking method, which was also used for data generation. The experimental results show an accurate recovery performance of CLNN in terms of order representation recovery and parameter identification. The unweighted version of the ground truth rule reads: \"If \\(A\\) happens before \\(B\\) for at least 1 time unit and \\(A\\) happens before \\(C\\) for at least 3 time units, then \\(D\\) will happen\". The rule of TELLER only reflects the temporal relation between events \\(A, B, C\\) and \\(D\\) but is unable to capture the temporal relation between \\(A\\) and \\(B\\) or \\(A\\) and \\(C\\), which does not match the ground-truth rule. In OGEM-tab's rule, \\([l]\\) denotes a single parent. We show the top 3 excitation and inhibitory rules from OGEM-tab, where excitation (resp. inhibitory) means \\(\\lambda_{l|\\Phi}\\) is higher (resp. lower) than the \\(\\lambda_{l|\\Phi}\\) with all \\(w_{\\phi_i} = 0\\). The excitation rules of OGEM-tab do not match the ground-truth rule. In contrast, the rule discovered by CLNN (\\(\\phi_1\\)) assigns larger weights to the paired order predicates \\(\\pi_{pop}^{A,B} = (c_A - c_B > 1.21)\\) and \\(\\pi_{pop}^{A,C} = (c_A - c_C > 3.00)\\) and small weights to the other predicates, where the interval values of 1.21 and 3.00 are both learned. By ignoring the small weights, \\(\\phi_1\\) can be interpreted as \"If \\(A\\) happens before \\(B\\) for at least 1.21 time units and \\(A\\) happens before \\(C\\) for at least 3.00 time units, then \\(D\\) will happen\", meaning the paired order representations discovered by CLNN match well with the ground truth. Moreover, CLNN's rules are more expressive than TELLER and OGEM as it provides a detailed interval length between two ordered labels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.706, + 0.508, + 0.789 + ], + "angle": 0, + "content": "To show the computational efficiency of our gradient-based learning, we compare the runtimes of CLNN and TELLER on the synthetic datasets in Table 2. Notably, CLNN not only recovers the correct order representations but also was two orders of magnitude faster on average (5.62 s vs 635.99" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.789, + 0.825, + 0.817 + ], + "angle": 0, + "content": "s). In addition, CLNN can learn more expressive order representations that describe both the order relation between two events and their interval length." + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.715, + 0.822, + 0.755 + ], + "angle": 0, + "content": "
wCL formulaφ1φ2φ3,1φ3,2Average
CLNN5.204.604.957.735.62
TELLER252.91286.83925.581078.66635.99
" + }, + { + "type": "table_caption", + "bbox": [ + 0.518, + 0.759, + 0.825, + 0.786 + ], + "angle": 0, + "content": "Table 2: Runtime (s) for CLNN and TELLER on synthetic datasets." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.823, + 0.391, + 0.837 + ], + "angle": 0, + "content": "4.3 REAL-WORLD DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.826, + 0.899 + ], + "angle": 0, + "content": "LinkedIn [(Xu et al., 2017)]. An event dataset related to job hopping records of 3,000 LinkedInIn users in 82 IT companies. Each event stream records a user's check-in time stamps for different companies or the time stamps for role change within the same company. We filter the dataset to popular companies as per Bhattacharjya et al. (2020), resulting in 1,000 users." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.606, + 0.925 + ], + "angle": 0, + "content": "\\(^{2}\\)https://github.com/FengMingquan-sjtu/Logic_Point_Processes_ICLR" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "Mimic II [(Saeed et al., 2011)]. An event dataset concerning health records of patients from Intensive Care Unit (ICU) visits over 7 years. A patient's event stream records each visit's time stamp and the corresponding diagnosis. We filter out sequences with few visits, resulting in 650 patients." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.153, + 0.827, + 0.21 + ], + "angle": 0, + "content": "Stack Overflow [(Grant & Betts, 2013)]. An event dataset that is related to the badges awarded to users in the question-answering website, the Stack Overflow. Each user's event stream records the badges that he/she receives at various time stamps. We keep the event streams with one or more of 20 types of badges and sample 1,000 users from the dataset used in Du et al. (2016)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.216, + 0.828, + 0.306 + ], + "angle": 0, + "content": "Experimental Setup. Each dataset is partitioned into three sets: training (70%), validation (15%), and test (15%). For simplicity, \\( \\underline{u}_{l_i l_j} \\) are set as 0 to study the ordering representations. The truth value threshold is \\( \\alpha = 0.5 \\), and \\( \\bar{Z} = 1.5T_{\\mathrm{max}} \\), same as the setting for the synthetic datasets, and the number of subformulas is \\( n = 5 \\), and the parameters are initialized as random numbers from a uniform distribution on [0, 1). CLNN is trained on the training set, and the validation set is utilized for model selection during training. Model fit is evaluated using log-likelihood on the test set." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.31, + 0.827, + 0.478 + ], + "angle": 0, + "content": "Results. We follow a similar trend to Bhattacharjya et al. (2018; 2020; 2021) to use the log-likelihood for evaluation of the model's performance. The log-likelihood on the real-world datasets is reported in Table 3, where \\( DR \\) denotes the difference ratio – the difference between CLNN and the best SOTA divided by the absolute value of best SOTA. CLNN's result is chosen as the better one among the 'first' or the 'last' masking. Notably, CLNN outperforms the baseline models on the LinkedIn dataset (13.40% advantage) and achieves a competitive result on the MIMIC II dataset (1.63% loss only). It is observed that PGEM achieves a better result on the Stack Overflow dataset. In Stack Overflow, one type of badge can be awarded only when a user receives a particular badge multiple times, for example, the 'Epic' badge is awarded only when earning 200 daily reputations 50 times, depending on the 'Mortarboard' badge acquired while answering or asking questions. CLNN and OGEMs apply masking methods to the data, which may not capture the above dependence. In contrast, PGEM models data without masking, making it more suitable for this dataset." + }, + { + "type": "table", + "bbox": [ + 0.199, + 0.481, + 0.8, + 0.543 + ], + "angle": 0, + "content": "
DatasetN (# events)M (labels)MHPPGEMOGEM-tabOGEM-treeTELLERCLNNDR
LinkedIn293210-1593-1462-1478-1418-1548-122813.40%
MIMIC II241915-567-500-474-429-645-436-1.63%
Stack Overflow7125420-52543-48323-49344-49192-71101-50981-5.50%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.204, + 0.548, + 0.793, + 0.563 + ], + "angle": 0, + "content": "Table 3: Dataset information and log-likelihood for all models on the real-world datasets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.566, + 0.439, + 0.706 + ], + "angle": 0, + "content": "Case Study. The primary strength of CLNN over the SOTA models is that it can describe the generative mechanism as wCL formulas, being more expressive and potentially providing more detailed information. CLNN can be deployed as a valuable tool for assisting domain specialists in knowledge discovery from event data. Here we showcase the above strength of CLNN using an il" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.706, + 0.827, + 0.817 + ], + "angle": 0, + "content": "lustrative example. We select the experimental result on company \\( F \\) of the LinkedIn dataset to demonstrate the expressivity of CLNN's rules, which are shown in Table 4. Here we specify the model to learn five formulas, four of which are inhibitory, and one exhibits excitation. One inhibitory formula has a weight of 0.05, thus not reported in Table 4. Each formula shows the dominant singleton or paired order predicates. Notably, CLNN learns expressive wCL formulas that describe how the logical composition of paired order predicates and(or) singleton order predicates affect a role change in the company \\( F \\). CLNN's rules are more expressive than TELLER and as expressive as OGEM-tab for describing the occurrence of a causal event within a specific historical window." + }, + { + "type": "table", + "bbox": [ + 0.456, + 0.574, + 0.819, + 0.667 + ], + "angle": 0, + "content": "
RulesEffect
CLNNφ1=(CD>cH)0.90 ∧ (CI>cJ)0.72Inhibitory
φ2=((CB<0.45)0.58 ∧ (CD<0.05)0.66Excitation
φ3=(CB>cF)0.50 ∧ (CI>cJ>cD)0.47Inhibitory
φ4=(CA<0.84)0.76 ∧ (CH<1.09)0.50Inhibitory
TELLER[A,F],[C,F],[E,F],[B,F],[D,F]Excitation
OGEM-tab[F],[F,A]Excitation
[A]Inhibitory
" + }, + { + "type": "table_caption", + "bbox": [ + 0.448, + 0.674, + 0.825, + 0.701 + ], + "angle": 0, + "content": "Table 4: Formulas and their effect as learned by CLNN, TELLER and OGEM-tab on company \\( F \\) of LinkedIn." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.83, + 0.321, + 0.845 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "In this paper, we proposed a novel neuro-symbolic model, CLNN, to learn interpretable wCL formulas from multivariate event data. Experimental results using synthetic and real-world datasets demonstrate CLNN's expressiveness in recovering ground-truth rules in multivariate temporal point processes. Further, CLNN can be trained using gradient-based methods, which improve the learning speed compared to the SOTA." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.391, + 0.119 + ], + "angle": 0, + "content": "6 ACKNOWLEDGEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.828, + 0.219 + ], + "angle": 0, + "content": "This research is sponsored by the Rensselaer-IBM AI Research Collaboration (http://airc.rpi.edu), part of the IBM AI Horizons Network; the National Science Foundation under Grant CMMI-1936578; and the Defense Advanced Research Projects Agency (DARPA) through Cooperative Agreement D20AC00004 awarded by the U.S. Department of the Interior (DOI), Interior Business Center. The content of the information does not necessarily reflect the position or the policy of the Government, and no official endorsement should be inferred." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.24, + 0.288, + 0.255 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.263, + 0.826, + 0.294 + ], + "angle": 0, + "content": "Odd Aalen, Ornulf Borgan, and Hakon Gjessing. Survival and Event History Analysis: A Process Point of View. Springer Science & Business Media, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.303, + 0.824, + 0.332 + ], + "angle": 0, + "content": "Emmanuel Bacre, Iacopo Mastromatteo, and Jean-François Muzy. Hawkes processes in finance. Market Microstructure and Liquidity, 1(01):1550005, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.341, + 0.825, + 0.385 + ], + "angle": 0, + "content": "Emmanuel Bacry, Martin Bompaire, Philip Deegan, Stéphane Gaiffas, and Søren V Poulsen. tick: A Python library for statistical learning, with an emphasis on Hawkes processes and time-dependent models. The Journal of Machine Learning Research, 18(1):7937-7941, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.395, + 0.825, + 0.425 + ], + "angle": 0, + "content": "Debarun Bhattacharjya, Dharmashankar Subramanian, and Tian Gao. Proximal graphical event models. Advances in Neural Information Processing Systems (NeurIPS), 31:8147-8156, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.434, + 0.826, + 0.477 + ], + "angle": 0, + "content": "Debarun Bhattacharjya, Tian Gao, and Dharmashankar Subramanian. Order-dependent event models for agent interactions. In Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI), pp. 1977-1983, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.486, + 0.826, + 0.53 + ], + "angle": 0, + "content": "Debarun Bhattacharjya, Tian Gao, and Dharmashankar Subramanian. Ordinal historical dependence in graphical event models with tree representations. In Proceedings of the Conference on Artificial Intelligence (AAAI), pp. 6759-6767, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.539, + 0.824, + 0.568 + ], + "angle": 0, + "content": "Yuanda Chen. Thinning algorithms for simulating point processes. Florida State University, Tallahassee, FL, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.578, + 0.824, + 0.608 + ], + "angle": 0, + "content": "Daryl J Daley and David Vere-Jones. An Introduction to the Theory of Point Processes, Volume I: Elementary Theory and Methods. Springer, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.618, + 0.824, + 0.647 + ], + "angle": 0, + "content": "Vanessa Didelez. Graphical models for marked point processes based on local independence. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 70(1):245-264, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.657, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Nan Du, Hanjun Dai, Rakshit Trivedi, Utkarsh Upadhyay, Manuel Gomez-Rodriguez, and Le Song. Recurrent marked temporal point processes: embedding event history to vector. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 1555-1564, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.723, + 0.825, + 0.78 + ], + "angle": 0, + "content": "Mehrdad Farajtabar, Yichen Wang, Manuel Gomez Rodriguez, Shuang Li, Hongyuan Zha, and Le Song. COEVOLVE: A joint point process model for information diffusion and network coevolution. In Advances in Neural Information Processing Systems (NeurIPS), volume 28, pp. 1954-1962, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.79, + 0.825, + 0.834 + ], + "angle": 0, + "content": "Tian Gao, Dharmashankar Subramanian, Karthikeyan Shanmugam, Debarun Bhattacharjya, and Nicholas Mattei. A multi-channel neural graphical event model with negative evidence. In Proceedings of the Conference on Artificial Intelligence (AAAI), pp. 3946-3953, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.844, + 0.824, + 0.885 + ], + "angle": 0, + "content": "Scott Grant and Buddy Betts. Encouraging user behaviour with achievements: An empirical study. In Proceedings of the 10th Working Conference on Mining Software Repositories, MSR '13, pp. 65-68. IEEE Press, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.896, + 0.824, + 0.925 + ], + "angle": 0, + "content": "Asela Gunawardana and Chris Meek. Universal models of multivariate temporal point processes. In Artificial Intelligence and Statistics, pp. 556-563. PMLR, 2016." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.263, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Asela Gunawardana, Christopher Meek, and Puyang Xu. A model for temporal dependencies in event streams. Advances in Neural Information Processing Systems (NeurIPS), 24, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.141, + 0.675, + 0.158 + ], + "angle": 0, + "content": "Patrick J Hurley. A Concise Introduction to Logic. Cengage Learning, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.164, + 0.825, + 0.209 + ], + "angle": 0, + "content": "Shuang Li, Lu Wang, Ruizhi Zhang, Xiaofu Chang, Xuqin Liu, Yao Xie, Yuan Qi, and Le Song. Temporal logic point processes. In International Conference on Machine Learning, pp. 5990-6000. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.215, + 0.825, + 0.261 + ], + "angle": 0, + "content": "Shuang Li, Mingquan Feng, Lu Wang, Abdelmajid Essofi, Yufeng Cao, Junchi Yan, and Le Song. Explaining point processes by learning interpretable temporal logic rules. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.267, + 0.825, + 0.298 + ], + "angle": 0, + "content": "Hanxiao Liu, Karen Simonyan, and Yiming Yang. DARTS: Differentiable architecture search. In International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.305, + 0.825, + 0.336 + ], + "angle": 0, + "content": "Noushin Mehdipour, Cristian-Ioan Vasile, and Calin Belta. Specifying user preferences using weighted signal temporal logic. IEEE Control Systems Letters, 5(6):2006-2011, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.343, + 0.825, + 0.386 + ], + "angle": 0, + "content": "Hongyuan Mei and Jason M Eisner. The neural Hawkes process: A neurally self-modulating multivariate point process. Advances in Neural Information Processing Systems (NeurIPS), 30:6757-6767, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.394, + 0.825, + 0.425 + ], + "angle": 0, + "content": "Sean P O'Brien. Crisis early warning and decision support: Contemporary approaches and thoughts on future research. International Studies Review, 12(1):87-104, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.432, + 0.825, + 0.476 + ], + "angle": 0, + "content": "Ryan Riegel, Alexander Gray, Francois Luus, Naweed Khan, Ndivhuwo Makondo, Ismail Yunus Akhalwaya, Haifeng Qian, Ronald Fagin, Francisco Barahona, Udit Sharma, et al. Logical neural networks. arXiv preprint arXiv:2006.13155, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.483, + 0.825, + 0.541 + ], + "angle": 0, + "content": "Mohammed Saeed, Mauricio Villarroel, Andrew T Reisner, Gari Clifford, Li-Wei Lehman, George Moody, Thomas Heldt, Tin H Kyaw, Benjamin Moody, and Roger G Mark. Multiparameter intelligent monitoring in intensive care II (MIMIC-II): A public-access intensive care unit database. Critical Care Medicine, 39(5):952, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.549, + 0.825, + 0.593 + ], + "angle": 0, + "content": "Prithviraj Sen, Bruno WSR de Carvalho, Ryan Riegel, and Alexander Gray. Neuro-symbolic inductive logic programming with logical neural networks. In Proceedings of the Conference on Artificial Intelligence (AAAI), volume 36, pp. 8212-8219, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.601, + 0.825, + 0.631 + ], + "angle": 0, + "content": "Jeremy C. Weiss and David Page. Forest-based point process for event prediction from electronic health records. In Machine Learning and Knowledge Discovery in Databases, pp. 547-562, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.638, + 0.825, + 0.682 + ], + "angle": 0, + "content": "Shuai Xiao, Junchi Yan, Xiaokang Yang, Hongyuan Zha, and Stephen Chu. Modeling the intensity function of point process via recurrent neural networks. In Proceedings of the Conference on Artificial Intelligence (AAAI), volume 31, pp. 1597-1603, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.689, + 0.825, + 0.733 + ], + "angle": 0, + "content": "Hongteng Xu, Dixin Luo, and Hongyuan Zha. Learning Hawkes processes from short doubly-censored event sequences. In International Conference on Machine Learning, pp. 3831-3840. PMLR, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.741, + 0.825, + 0.785 + ], + "angle": 0, + "content": "Ruixuan Yan, Agung Julius, Maria Chang, Achille Fokoue, Tengfei Ma, and Rosario Uceda-Sosa. STONE: Signal temporal logic neural network for time series classification. In 2021 International Conference on Data Mining Workshops (ICDMW), pp. 778-787. IEEE, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.792, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Ruixuan Yan, Tengfei Ma, Achille Fokoue, Maria Chang, and Agung Julius. Neuro-symbolic models for interpretable time series classification using temporal logic description. In 2022 IEEE International Conference on Data Mining (ICDM), pp. 618-627, 2022. doi: 10.1109/ICDM54844.2022.00072." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.857, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Qiang Zhang, Aldo Lipani, Omer Kirnap, and Emine Yilmaz. Self-attentive Hawkes process. In International Conference on Machine Learning, pp. 11183-11193. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Simiao Zuo, Haoming Jiang, Zichong Li, Tuo Zhao, and Hongyuan Zha. Transformer Hawkes process. In International Conference on Machine Learning, pp. 11692-11702. PMLR, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.787, + 0.119 + ], + "angle": 0, + "content": "A FORMULATION OF LOGICAL CONSTRAINTS & OBJECTIVE FUNCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.219 + ], + "angle": 0, + "content": "The optimization problem in (10) is formulated by maximizing the log-likelihood subject to the logical constraints for the \\(\\wedge\\) and \\(\\vee\\) operators. This section discusses the details of the formulation for the two logical constraints and how to formulate the optimization problem while considering the logical constraints. Without loss of generality, we illustrate the formulation of the constraints for the \\(\\wedge\\) operator, and the constraints for \\(\\vee\\) operator can be derived from the constraints for the \\(\\wedge\\) operator using De Morgan's law." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.231, + 0.476, + 0.245 + ], + "angle": 0, + "content": "- Logical constraints for \\(\\wedge\\) operator." + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.247, + 0.825, + 0.386 + ], + "angle": 0, + "content": "Let \\( x, y \\in [0,1] \\) denote the inputs of the \\( \\wedge \\) operator, and \\( f(x,y) \\) denote the quantitative satisfaction of \\( \\wedge \\). The conventional characteristic of the \\( \\wedge \\) operator is illustrated as follows: 1) \\( f(x,y) \\) is low when either input is low, and 2) \\( f(x,y) \\) is high when both inputs are high. However, we associate each input with a nonnegative weight, implying the input with a zero weight should not affect the output. In other words, if a low input has a zero weight, it should not affect the output of \\( f(x,y) \\). Therefore, we require the \\( \\wedge \\) operator to exhibit the following characteristics: 1) \\( f(x,y) \\) is low when both inputs are low, and 2) \\( f(x,y) \\) is high when both inputs are high. Here we introduce a user-defined hyperparameter \\( \\alpha \\in [\\frac{1}{2},1] \\) to capture low vs. high: \\( x \\in [0,1 - \\alpha) \\) represents low and \\( x \\in [\\alpha,1] \\) represents high. According to the above characteristics, we have (Sen et al., 2022)" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.392, + 0.825, + 0.415 + ], + "angle": 0, + "content": "\\[\nf (x, y) \\leq 1 - \\alpha , \\quad \\forall x, y \\in [ 0, 1 - \\alpha), \\tag {13}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.429, + 0.411, + 0.625, + 0.426 + ], + "angle": 0, + "content": "\\[\nf (x, y) \\geq \\alpha , \\quad \\forall x, y \\in [ \\alpha , 1 ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.432, + 0.825, + 0.461 + ], + "angle": 0, + "content": "Here we follow a specific choice of \\( f \\) by using a triangular norm (\\( t \\)-norm) and define the quantitative satisfaction function of \\( \\wedge \\) as (Riegel et al., 2020)" + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.467, + 0.825, + 0.509 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {14}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.514, + 0.825, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {2} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\alpha \\leq 1 - \\alpha , \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.562, + 0.825, + 0.592 + ], + "angle": 0, + "content": "where \\( f(z) = \\max \\{0, \\min \\{z, 1\\}\\} \\) is introduced to clamp the truth value into the range of [0, 1]." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.595, + 0.476, + 0.609 + ], + "angle": 0, + "content": "- Logical constraints for \\(\\vee\\) operator." + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.611, + 0.825, + 0.64 + ], + "angle": 0, + "content": "By using De Morgan's law, we could derive the quantitative satisfaction function and the logical constraints for the \\(\\lor\\) operator with 2 inputs as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.253, + 0.646, + 0.825, + 0.688 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}}, t\\right) = f \\left(1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {16}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.692, + 0.825, + 0.735 + ], + "angle": 0, + "content": "\\[\n\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.825, + 0.844 + ], + "angle": 0, + "content": "Here we show the characteristics of the activation functions for the \\(\\wedge\\) and \\(\\vee\\) operators using Figure 4. Figure 4(a) shows the truth value of the \\(\\wedge\\) operator with \\(\\alpha = 0.7\\). Figure 4(b) shows the truth value of the \\(\\wedge\\) operator with \\(\\alpha = 0.9\\). It can be distinctly observed that \\(f(x,y)\\) is close to 0 when both \\(x\\) and \\(y\\) are low, and \\(f(x,y)\\) is close to 1 when both \\(x\\) and \\(y\\) are high. In addition, the unconstrained region for \\(\\alpha = 0.9\\) is larger than the unconstrained region for \\(\\alpha = 0.7\\). Figure 4(c) shows the truth value of the \\(\\vee\\) operator with \\(\\alpha = 0.7\\). It is obvious that \\(f(x,y)\\) is close to 0 when both \\(x\\) and \\(y\\) are low, and \\(f(x,y)\\) is close to 1 when both \\(x\\) and \\(y\\) are high." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.85, + 0.825, + 0.879 + ], + "angle": 0, + "content": "In general, we could extend the quantitative satisfaction for the \\(\\wedge\\) and \\(\\vee\\) operators in (14) - (17) to \\(k\\)-ary conjunction and \\(k\\)-ary disjunction. The \\(k\\)-ary conjunction formulation is expressed as follows." + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.885, + 0.825, + 0.929 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {18}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.187, + 0.111, + 0.382, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.27, + 0.196, + 0.29, + 0.208 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.111, + 0.599, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.485, + 0.195, + 0.505, + 0.208 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.612, + 0.111, + 0.815, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.7, + 0.195, + 0.72, + 0.209 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.224, + 0.825, + 0.253 + ], + "angle": 0, + "content": "Figure 4: Plot of truth degree for (a) CLNN- \\(\\wedge\\) with \\(\\alpha = 0.7\\), (b) CLNN- \\(\\wedge\\) with \\(\\alpha = 0.9\\), (c) CLNN- \\(\\vee\\) with \\(\\alpha = 0.7\\)." + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.263, + 0.826, + 0.307 + ], + "angle": 0, + "content": "\\[\n\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\leq 1 - \\alpha . \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.321, + 0.557, + 0.336 + ], + "angle": 0, + "content": "The \\(k\\)-ary disjunction formulation is expressed as follows." + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.344, + 0.826, + 0.387 + ], + "angle": 0, + "content": "\\[\np \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, t\\right) = f (1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)), \\tag {20}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.39, + 0.825, + 0.433 + ], + "angle": 0, + "content": "\\[\n\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.447, + 0.784, + 0.462 + ], + "angle": 0, + "content": "With the above constraints, we can formulate the maximum likelihood estimation problem as" + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.469, + 0.825, + 0.485 + ], + "angle": 0, + "content": "\\[\n\\min - L L _ {l} \\tag {22}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.177, + 0.488, + 0.825, + 0.52 + ], + "angle": 0, + "content": "\\[\ns. t. \\quad \\forall \\phi \\in \\Phi , \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} (1 - \\alpha) \\geq \\alpha , \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} \\alpha \\leq 1 - \\alpha , \\tag {23}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.524, + 0.825, + 0.572 + ], + "angle": 0, + "content": "\\[\n\\forall \\phi \\in \\Phi , \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} \\alpha \\geq \\alpha , 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.587, + 0.601, + 0.602 + ], + "angle": 0, + "content": "In this paper, we set \\(\\alpha = 0.5\\), thus the constraints in (19) become" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.61, + 0.558, + 0.651 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {k} w _ {i} \\geq 2 \\beta - 1,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.655, + 0.825, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {k} w _ {i} \\leq 2 \\beta - 1, \\tag {25}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.474, + 0.698, + 0.558, + 0.712 + ], + "angle": 0, + "content": "\\[\n2 \\beta - 1 \\geq 0,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.504, + 0.716, + 0.558, + 0.73 + ], + "angle": 0, + "content": "\\[\nw _ {i} \\geq 0.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.739, + 0.476, + 0.753 + ], + "angle": 0, + "content": "Reformulating the above constraints, we have" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.761, + 0.825, + 0.802 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {k} w _ {i} = 2 \\beta - 1, \\tag {26}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.472, + 0.805, + 0.533, + 0.82 + ], + "angle": 0, + "content": "\\[\n\\beta \\geq 0. 5,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.466, + 0.823, + 0.825, + 0.838 + ], + "angle": 0, + "content": "\\[\nw _ {i} \\geq 0. \\tag {27}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.845, + 0.825, + 0.874 + ], + "angle": 0, + "content": "The above constraints hold for each conjunction operator in \\(\\phi\\). Therefore, we can incorporate the constraints in (26) into the objective function, which becomes" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.882, + 0.825, + 0.928 + ], + "angle": 0, + "content": "\\[\n\\min - L L _ {l} + \\sum_ {k = 1} ^ {K _ {\\phi} ^ {\\wedge}} \\left(\\sum_ {i \\in I _ {k}} w _ {i, k} - 2 \\beta_ {k} + 1\\right) ^ {2}, \\tag {28}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.283, + 0.104, + 0.825, + 0.121 + ], + "angle": 0, + "content": "\\[\n\\text {s u b j e c t} w _ {i, k} \\geq 0, \\beta_ {k} \\geq 0. 5, \\forall i \\in I _ {k}, \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\forall \\phi \\in \\Phi . \\tag {29}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.133, + 0.825, + 0.161 + ], + "angle": 0, + "content": "Similarly, we propose a set of logical constraints for the \\(\\lor\\) operator as (21). If we set \\(\\alpha = 0.5\\), the constraints in (21) become" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.168, + 0.558, + 0.209 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {k} w _ {i} \\geq 2 \\beta - 1,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.213, + 0.824, + 0.253 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {k} w _ {i} \\leq 2 \\beta - 1, \\tag {30}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.474, + 0.256, + 0.558, + 0.27 + ], + "angle": 0, + "content": "\\[\n2 \\beta - 1 \\geq 0,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.504, + 0.274, + 0.558, + 0.288 + ], + "angle": 0, + "content": "\\[\nw _ {i} \\geq 0.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.295, + 0.477, + 0.31 + ], + "angle": 0, + "content": "Reformulating the above constraints, we have" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.317, + 0.824, + 0.358 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i _ {1}} ^ {k} w _ {i} = 2 \\beta - 1, \\tag {31}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.472, + 0.362, + 0.532, + 0.376 + ], + "angle": 0, + "content": "\\[\n\\beta \\geq 0. 5.\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.466, + 0.379, + 0.824, + 0.394 + ], + "angle": 0, + "content": "\\[\nw _ {i} \\geq 0. \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.4, + 0.825, + 0.441 + ], + "angle": 0, + "content": "The above constraints hold for each disjunction operator in \\(\\phi\\). Therefore, we can incorporate the constraints in (31) into the objective function. The maximum likelihood estimation problem then becomes" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.447, + 0.825, + 0.493 + ], + "angle": 0, + "content": "\\[\n\\min - L L _ {l} + \\sum_ {k = 1} ^ {K _ {\\phi} ^ {\\wedge}} \\left(\\sum_ {i \\in I _ {k}} w _ {i, k} - 2 \\beta_ {k} + 1\\right) ^ {2} + \\sum_ {k ^ {\\prime} = 1} ^ {K _ {\\phi} ^ {\\vee}} \\left(\\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} - 2 \\beta_ {k ^ {\\prime}} + 1\\right) ^ {2}, \\tag {33}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.497, + 0.658, + 0.513 + ], + "angle": 0, + "content": "subject to \\(w_{i,k}\\geq 0,\\beta_k\\geq 0.5,\\forall i\\in I_k,\\forall 1\\leq k\\leq K_\\phi^{\\wedge},\\forall \\phi \\in \\Phi ,\\)" + }, + { + "type": "equation", + "bbox": [ + 0.307, + 0.516, + 0.675, + 0.534 + ], + "angle": 0, + "content": "\\[\nw _ {i, k ^ {\\prime}} \\geq 0, \\beta_ {k ^ {\\prime}} \\geq 0. 5, \\forall i \\in I _ {k ^ {\\prime}}, \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, \\forall \\phi \\in \\Phi .\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.552, + 0.4, + 0.567 + ], + "angle": 0, + "content": "B PROOF OF THEOREM 8" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.583, + 0.825, + 0.638 + ], + "angle": 0, + "content": "The activation function designed for the \\(\\wedge\\) operator satisfies the properties of nonimpact for zero weights, impact ordering, and monotonicity. Without loss of generality, we present the proof for the \\(\\wedge\\) operator connecting two clauses, which can be generalized to the \\(\\wedge\\) operator connecting \\(k\\)-ary clauses." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.825, + 0.681 + ], + "angle": 0, + "content": "Proof 1 Here we present the proof for the activation function for the \\(\\wedge\\) operator satisfying each property mentioned above." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.692, + 0.424, + 0.707 + ], + "angle": 0, + "content": "- Nonimpact for zero weights." + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.713, + 0.825, + 0.743 + ], + "angle": 0, + "content": "This means if \\( w_{j} = 0, j = 1,2 \\), then \\( p(\\mathcal{C}',\\phi_j,t) \\) should have no impact on \\( p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t) \\). Without loss of generality, we suppose \\( w_{1} = 0 \\), thus we have" + }, + { + "type": "equation", + "bbox": [ + 0.26, + 0.748, + 0.825, + 0.785 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}}, t\\right) = f (\\beta - 0 \\cdot (1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1}, t\\right)) - w _ {2} \\cdot (1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right))), \\tag {34} \\\\ = f \\left(\\beta - w _ {2} \\cdot \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right)\\right)\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.79, + 0.617, + 0.807 + ], + "angle": 0, + "content": "meaning \\(p(\\mathcal{C}',\\phi_1,t)\\) has no impact on \\(p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t)\\)" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.815, + 0.35, + 0.83 + ], + "angle": 0, + "content": "- Impact Ordering" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.836, + 0.826, + 0.879 + ], + "angle": 0, + "content": "This means the truth degree of subformula with higher weights has a greater impact on \\( p(\\mathcal{C}', \\phi_1^{w_1} \\wedge \\phi_2^{w_2}, t) \\). Mathematically, we need to prove that if \\( p(\\mathcal{C}', \\phi_1, t) = p(\\mathcal{C}', \\phi_2, t) \\) and \\( w_1 \\geq w_2 \\), then" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.895, + 0.825, + 0.93 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} \\geq \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)}. \\tag {35}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.104, + 0.504, + 0.12 + ], + "angle": 0, + "content": "As \\( f(x) = \\max \\{0, \\min \\{x, 1\\}\\} \\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.436, + 0.13, + 0.825, + 0.179 + ], + "angle": 0, + "content": "\\[\n\\frac {d f}{d x} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} x < 0, \\\\ 1, & \\text {i f} 0 < x < 1, \\\\ 0, & \\text {i f} x > 1. \\end{array} \\right. \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.189, + 0.826, + 0.21 + ], + "angle": 0, + "content": "If \\(\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t)) < 0\\) or \\(\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},k)) > 1\\) , then we have" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.217, + 0.826, + 0.252 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} = \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)} = 0. \\tag {37}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.267, + 0.646, + 0.287 + ], + "angle": 0, + "content": "Also, if \\(0 < \\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t)) < 1\\), then we have" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.295, + 0.826, + 0.339 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {j} , t\\right)\\right)\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} = w _ {1} \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.348, + 0.262, + 0.36 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.367, + 0.826, + 0.41 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {j} , t\\right)\\right)\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)} = w _ {2} \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right). \\tag {39}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.419, + 0.457, + 0.434 + ], + "angle": 0, + "content": "As \\(w_{1}\\geq w_{2}\\) , the following holds:" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.44, + 0.826, + 0.475 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} \\geq \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)}, \\tag {40}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.482, + 0.557, + 0.499 + ], + "angle": 0, + "content": "which proves the impact ordering property holds." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.507, + 0.329, + 0.521 + ], + "angle": 0, + "content": "- Monotonicity." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.527, + 0.738, + 0.545 + ], + "angle": 0, + "content": "This means \\(p(\\mathcal{C}', \\phi_1^{w_1} \\wedge \\phi_2^{w_2}, t)\\) increases monotonically over \\(p(\\mathcal{C}', \\phi_j, t)\\), i.e." + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.552, + 0.826, + 0.595 + ], + "angle": 0, + "content": "\\[\nf \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right) \\leq f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right) - d\\right)\\right) \\text {f o r} d \\geq 0. \\tag {41}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.612, + 0.671, + 0.632 + ], + "angle": 0, + "content": "First, note that \\(\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t))\\) can be rewritten as" + }, + { + "type": "equation", + "bbox": [ + 0.249, + 0.641, + 0.826, + 0.683 + ], + "angle": 0, + "content": "\\[\n\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right) = \\beta - w _ {1} - w _ {2} + w _ {1} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1}, t\\right) + w _ {2} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right). \\tag {42}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.693, + 0.827, + 0.741 + ], + "angle": 0, + "content": "This implies \\( f(\\beta - \\sum_{j=1}^{2} w_j (1 - p(\\mathcal{C}', \\phi_j, t))) \\) is monotonically increasing over \\( p(\\mathcal{C}', \\phi_1, t) \\) and \\( p(\\mathcal{C}', \\phi_2, t) \\). Also, from the proof of impact ordering we know \\( f(x) = \\max \\{0, \\min \\{x, 1\\}\\} \\) is monotonically nondecreasing, we can show that" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.749, + 0.826, + 0.791 + ], + "angle": 0, + "content": "\\[\nf \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right) \\leq f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right) - d\\right)\\right), d \\geq 0. \\tag {43}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.8, + 0.534, + 0.815 + ], + "angle": 0, + "content": "Thus the property of monotonicity is satisfied." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.836, + 0.624, + 0.851 + ], + "angle": 0, + "content": "C EXPERIMENT RESULTS OF SYNTHETIC DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Dataset Generation. In the experiments on synthetic datasets, we manually generate 3 synthetic datasets considering different settings, where the details and results for the first synthetic dataset is reported in Section 4.2. Each setting considers a different order representation, different number of event labels or different intensity of causal event labels." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.104, + 0.672, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.259, + 0.334, + 0.738, + 0.351 + ], + "angle": 0, + "content": "Figure 5: Model structure of \\(\\hat{\\phi}_1\\) for generating the first synthetic dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.375, + 0.452, + 0.39 + ], + "angle": 0, + "content": "C.1 SYNTHETIC DATASET-1 (SYN-1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.4, + 0.827, + 0.443 + ], + "angle": 0, + "content": "Generation process. The first synthetic dataset contains 4 event labels: \\( A, B, C \\), and \\( D \\), where \\( D \\) is the event for prediction, and \\( A, B, C \\) are causal events. The wCL formula used to generate event \\( D \\) in the first synthetic dataset is set as" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.45, + 0.825, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\hat {\\phi} _ {1} = \\left(c _ {A} - c _ {B} > 1\\right) ^ {1} \\wedge \\left(c _ {A} - c _ {C} > 3\\right) ^ {1}, \\tag {44}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.473, + 0.825, + 0.503 + ], + "angle": 0, + "content": "whose unweighted version reads as \"If \\( A \\) happens before \\( B \\) for at least 1 time unit and \\( A \\) happens before \\( C \\) for at least 3 time units, then \\( D \\) will happen.\"" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.508, + 0.826, + 0.565 + ], + "angle": 0, + "content": "Here we consider event labels \\( A, B, C \\) as free predicates, whose occurrences are generated by a homogeneous Poisson process. The homogeneous intensity rate for \\( A, B, C \\) are set as \\( \\lambda_A = 0.2 \\), \\( \\lambda_B = 0.2 \\), and \\( \\lambda_C = 0.2 \\). The algorithm used to generate instances of \\( A, B, C \\) is described as Algorithm 1 (Chen, 2016)." + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.579, + 0.707, + 0.594 + ], + "angle": 0, + "content": "Algorithm 1 Simulation of a homogeneous Poisson process with intensity rate \\(\\lambda\\)." + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.596, + 0.825, + 0.808 + ], + "angle": 0, + "content": "Input: Intensity rate \\(\\lambda\\) , simulation horizon \\(T\\) \nOutput: Occurrence time stamps \\(\\mathcal{T} = \\{t_k\\}\\) \n1: Initialize \\(n = 0,t_0 = 0\\) . \n2: while True do \n3: Generate \\(u\\sim\\) uniform(0, 1); \n4: Let \\(w = -ln(u) / \\lambda\\) . \n5: Set \\(t_{n + 1} = t_n + w\\) . \n6: if \\(t_{n + 1} > T\\) then \n7: return \\(\\mathcal{T} = \\{t_k\\}_{k = 1,2,\\dots,n}\\) . \n8: else \n9: Set \\(n = n + 1\\) . \n10: end if \n11: end while" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.825, + 0.89 + ], + "angle": 0, + "content": "With the above algorithm, we can generate the occurrences of event labels \\( A, B \\), and \\( C \\). Next, we build a CLNN for \\( \\hat{\\phi}_1 = (c_A - c_B > 1)^1 \\wedge (c_A - c_C > 3)^1 \\) to calculate the conditional intensity rate \\( \\lambda_{D|\\hat{\\phi}_1} \\), whose model structure is shown in Figure 5. After obtaining \\( \\lambda_{D|\\hat{\\phi}_1}(t) \\), we could use Algorithm 2 (Chen, 2016) to generate the occurrence of \\( D \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Results. The rules learned by CLNN, TELLER, and OGEM-tab on the first synthetic dataset are presented in Table 5, where the paired order predicate among the two candidates with the highest" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.51, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.104, + 0.747, + 0.12 + ], + "angle": 0, + "content": "Algorithm 2 Simulation of an inhomogeneous Poisson process with intensity rate \\(\\lambda(t)\\)." + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.122, + 0.592, + 0.414 + ], + "angle": 0, + "content": "Input: intensity rate \\(\\lambda (t)\\) , simulation horizon \\(T\\) \nOutput: Occurrence time stamps \\(\\mathcal{T} = \\{t_k\\}\\) 1: Initialize \\(n = m = 0,t_0 = s_0 = 0,\\bar{\\lambda} = \\sup_{0\\leq t\\leq T};\\lambda (t);\\) 2: while \\(s_m < T\\) do 3: Generate a uniform random variable \\(u\\sim \\mathrm{uniform}(0,1)\\) 4: Let \\(w = -\\ln u / \\bar{\\lambda}\\) . 5: Set \\(s_{m + 1} = s_m + w\\) . 6: Generate \\(D\\sim \\mathrm{uniform}(0,1)\\) . 7: if \\(D\\leq \\lambda (s_{m + 1})\\bar{\\lambda}\\) then 8: \\(t_{n + 1} = s_{m + 1}\\) . 9: \\(n = n + 1\\) . \n10: end if \n11: \\(m = m + 1\\) . \n12: if \\(t_n\\leq T\\) then \n13: return \\(\\{t_k\\}_{k = 1,2,\\dots,n}\\) \n14: else \n15: return \\(\\{t_k\\}_{k = 1,2,\\dots,n - 1}\\) \n16: end if \n17: end while" + }, + { + "type": "table", + "bbox": [ + 0.233, + 0.429, + 0.763, + 0.607 + ], + "angle": 0, + "content": "
DatasetSyn-1
N (# events)N = 4, L = {A, B, C, D}
Ground truthφ1 = (cA - cB > 1)1 ∧ (cA - cC > 3)1
CLNN's rule(cA - cB > 1.21)1.52 ∧ (cA - cC > 3.00)1.41 ∧ (cA - cD > 0.82)0.33 ∧ (cB - cC > 4.33)0 ∧ (cB - cD > 10.69)0 ∧ (cD - cC > -6.57)0.16
TELLER's ruleA before D, B before D, C before D, A before D and C before D
OGEM-tab's ruleExcitation: [B], [C], [C, B], [B, C], [A, C, B], [A, B, C]Inhibitory: [A], [B, A], [B, A, C], [C, B, A], [A, B], [A, C], [B, C, A], [C, A, B], [C, A]
" + }, + { + "type": "table_caption", + "bbox": [ + 0.218, + 0.617, + 0.779, + 0.632 + ], + "angle": 0, + "content": "Table 5: Comparison of rule discovery for CLNN and TELLER on the Syn-1 dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.659, + 0.825, + 0.686 + ], + "angle": 0, + "content": "weight is presented. It can be clearly observed that by truncating the predicates with small weights, we could obtain the formula as" + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.694, + 0.825, + 0.712 + ], + "angle": 0, + "content": "\\[\n\\phi_ {1} = \\left(c _ {A} - c _ {B} > 1. 2 1\\right) ^ {1. 5 2} \\wedge \\left(c _ {A} - c _ {C} > 3. 0 0\\right) ^ {1. 4 1}, \\tag {45}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.719, + 0.826, + 0.763 + ], + "angle": 0, + "content": "which matches well with the ground-truth rule. However, TELLER cannot capture the paired order representation between \\( A \\) and \\( B \\) or \\( A \\) and \\( C \\). OGEM-tab captures the order representation \\( [A, B] \\) and \\( [A, C] \\) as inhibitory causes, which contradicts the ground-truth rule." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.779, + 0.45, + 0.794 + ], + "angle": 0, + "content": "C.2 SYNTHETIC DATASET-2 (SYN-2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.826, + 0.849 + ], + "angle": 0, + "content": "Generation Process. The second synthetic dataset contains 5 event labels: \\( A, B, C, D \\) and \\( E \\), where \\( E \\) is the event for prediction, and \\( A, B, C, D \\) are causal events. The wCL formula used to generate the occurrence of event \\( E \\) in the second synthetic dataset is set as" + }, + { + "type": "equation", + "bbox": [ + 0.283, + 0.856, + 0.825, + 0.875 + ], + "angle": 0, + "content": "\\[\n\\hat {\\phi} _ {2} = \\left(c _ {A} - c _ {B} > 0. 5\\right) ^ {1} \\wedge \\left(c _ {A} - c _ {C} > 1. 5\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {D} > 2\\right) ^ {1}, \\tag {46}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "whose unweighted version reads as \"If \\( A \\) happens before \\( B \\) for at least 0.5 time units, \\( A \\) happens before \\( C \\) for at least 1.5 time units, and \\( C \\) happens before \\( D \\) for at least 2 time units, then \\( E \\) will happen.\"" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.107, + 0.734, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.249, + 0.332, + 0.747, + 0.35 + ], + "angle": 0, + "content": "Figure 6: Model structure of \\(\\hat{\\phi}_2\\) for generating the second synthetic dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.374, + 0.825, + 0.435 + ], + "angle": 0, + "content": "The occurrence of events \\( A, B, C \\) and \\( D \\) are generated using Algorithm 1, in which \\( \\lambda_A = \\lambda_B = \\lambda_C = \\lambda_D = 0.2 \\). After obtaining the occurrence of \\( A, B, C \\) and \\( D \\), we simulate the generation of event label \\( E \\) using Algorithm 2, in which the intensity rate \\( \\lambda_{E|\\hat{\\phi}_2}(t) \\) is computed using the model shown in Figure 6." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.44, + 0.825, + 0.495 + ], + "angle": 0, + "content": "Results. The rules learned by CLNN, TELLER and OGEM-tab on the second synthetic dataset are presented in Table 6, where the paired order predicate with the highest weight is presented. It can be clearly observed that by truncating the predicates with small weights, CLNN learns a wCL formula as:" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.495, + 0.825, + 0.513 + ], + "angle": 0, + "content": "\\[\n\\phi_ {2} = \\left(c _ {A} - c _ {B} > 0. 7 7\\right) ^ {1. 2 7} \\wedge \\left(c _ {A} - c _ {C} > 2. 0 9\\right) ^ {1. 1 5} \\wedge \\left(c _ {C} - c _ {D} > 2. 6 0\\right) ^ {1. 0 6}, \\tag {47}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.517, + 0.827, + 0.6 + ], + "angle": 0, + "content": "whose order representation match well with the ground-truth rule. Nevertheless, TELLER's rule only captures the ordering between \\( A \\), \\( B \\) and \\( E \\), whereas the ordering between \\( A \\) and \\( B \\) or \\( B \\) and \\( C \\) or \\( C \\) and \\( D \\) are not learned. OGEM-tab's rules can only capture the relation between event label \\( D \\) and event label \\( E \\) can excite the occurrence of event label \\( E \\), whereas not able to capture the dependence of event label \\( E \\)'s occurrence on the order relation between \\( A \\) and \\( B \\) or \\( B \\) and \\( C \\) or \\( C \\) and \\( D \\)." + }, + { + "type": "table", + "bbox": [ + 0.233, + 0.611, + 0.763, + 0.817 + ], + "angle": 0, + "content": "
DatasetSyn-2
N (# events)N=5, L={A,B,C,D,E}
Ground truthφ2=(cA-cB>0.5)1∧(cB-cC>1.5)1∧(cC-cD>2)1
CLNN's rule(cA-cB>0.77)1.27∧(cA-cC>2.09)1.15∧((cA-cD)>−5.00)0.25∧((cA-cE)>−2.74)0.09∧(cB-cC>−9.31)0.02∧(cB-cD>−8.54)0.08∧(cB-cE>2.07)0∧((cC-cD)>2.60)1.06∧((cC-cE)>−4.27)0.03∧((cD-cE)>1.17)0.07
TELLER's ruleA before E, B before E, A and B before E, A and C before E
OGEM-tab's ruleExcitation: [D], [D,E], [E], [E,D]Inhibitory: [D,A], [A], [A,D], [A,D,E], [E,D,A], [D,A,E], [A,E], [E,A], [D,E,A], [A,E,D], [E,A,D]
" + }, + { + "type": "table_caption", + "bbox": [ + 0.218, + 0.826, + 0.779, + 0.842 + ], + "angle": 0, + "content": "Table 6: Comparison of rule discovery for CLNN and TELLER on the Syn-2 dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.449, + 0.884 + ], + "angle": 0, + "content": "C.3 SYNTHETIC DATASET 3 (SYN-3)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "The third synthetic dataset is generated using a more interesting scheme by combining the generation schemes of the first synthetic dataset and the second synthetic dataset. The third synthetic dataset" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.319, + 0.104, + 0.675, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.203, + 0.325, + 0.794, + 0.341 + ], + "angle": 0, + "content": "Figure 7: Model structure of \\(\\hat{\\phi}_{3,1}\\) for generating the occurrence of \\(D\\) in the Syn-3 dataset." + }, + { + "type": "image", + "bbox": [ + 0.292, + 0.354, + 0.706, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.203, + 0.555, + 0.794, + 0.571 + ], + "angle": 0, + "content": "Figure 8: Model structure of \\(\\hat{\\phi}_{3,2}\\) for generating the occurrence of \\(E\\) in the Syn-3 dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.596, + 0.825, + 0.653 + ], + "angle": 0, + "content": "includes five event labels: \\( A, B, C, D \\) and \\( E \\). Here we consider \\( A, B \\), and \\( C \\) as the causal events for the occurrence of \\( D \\), and \\( A, B, C \\), and \\( D \\) as the causal events for the occurrence of \\( E \\). The occurrence of events \\( A, B, C \\) are generated using Algorithm 1, in which \\( \\lambda_{A} = 0.2 \\), \\( \\lambda_{b} = 0.2 \\), and \\( \\lambda_{c} = 0.2 \\). The wCL formula used to generate the occurrence of event \\( D \\) is set as" + }, + { + "type": "equation", + "bbox": [ + 0.341, + 0.659, + 0.825, + 0.679 + ], + "angle": 0, + "content": "\\[\n\\hat {\\phi} _ {3, 1} = \\left(c _ {B} - c _ {A} > - 2\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {A} > - 5\\right) ^ {1}, \\tag {48}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.825, + 0.744 + ], + "angle": 0, + "content": "whose unweighted version reads as \"If \\( A \\) happens before \\( B \\) for less than 2 time units, and \\( A \\) happens before \\( C \\) for less than 1 time unit, then \\( D \\) will happen.\" The generation of \\( D \\)'s occurrence follows Algorithm 2, where \\( \\lambda_{D|\\hat{\\phi}_{3,1}}(t) \\) is computed using the model shown in Figure 7. We call the third synthetic dataset at this step as Syn-3.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.749, + 0.827, + 0.779 + ], + "angle": 0, + "content": "After obtaining the occurrences of events \\( A, B, C \\), and \\( D \\), we could simulate the occurrence of \\( E \\) using the following formula:" + }, + { + "type": "equation", + "bbox": [ + 0.272, + 0.785, + 0.825, + 0.804 + ], + "angle": 0, + "content": "\\[\n\\hat {\\phi} _ {3, 2} = \\left(c _ {B} - c _ {A} > - 5\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {B} > - 4\\right) ^ {1} \\wedge \\left(c _ {D} - c _ {C} > - 3\\right) ^ {1}. \\tag {49}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.809, + 0.826, + 0.854 + ], + "angle": 0, + "content": "Similarly, the generation of \\( E \\)'s occurrence follows Algorithm 2, where the intensity rate \\( \\lambda_{E|\\hat{\\phi}_{3,2}}(t) \\) is computed using the model shown in Figure 8. We call the third synthetic dataset at this step as Syn-3.2." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.861, + 0.233, + 0.874 + ], + "angle": 0, + "content": "Results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "The rules learned by CLNN, TELLER, and OGEM-tab on the cause of event \\( D \\) in the third synthetic dataset are presented in Table 7, where the paired order predicate with the highest weight among the two candidates is reported. It can be clearly observed that by truncating the predicates with small" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.445, + 0.119 + ], + "angle": 0, + "content": "weights, CLNN learns a wCL formula as" + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.126, + 0.825, + 0.143 + ], + "angle": 0, + "content": "\\[\n\\phi_ {3, 1} = \\left(c _ {B} - c _ {A} > - 1. 8 5\\right) ^ {1. 7 2} \\wedge \\left(c _ {C} - c _ {A} > - 3. 9 0\\right) ^ {1. 5 9}, \\tag {50}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.151, + 0.827, + 0.207 + ], + "angle": 0, + "content": "whose order representation match well with the ground-truth rule. On the other hand, TELLER's rule only reveals the temporal relation between event labels \\( A \\), \\( B \\), \\( C \\) and \\( D \\), but it does not capture the temporal relation between event labels \\( A \\) and \\( B \\) or \\( A \\) and \\( C \\). In addition, we could observe that OGEM-tab does not capture that \\( C \\) is a parent event of \\( D \\)." + }, + { + "type": "table", + "bbox": [ + 0.188, + 0.22, + 0.808, + 0.37 + ], + "angle": 0, + "content": "
DatasetSyn-3.1
N (# events)N = 5, L = {A, B, C, D, E}
Ground truth\\(\\hat{\\phi}_{3,1} = (c_B - c_A > -2)^1 \\wedge (c_C - c_A > -5)^1\\)
CLNN's rule\\((c_B - c_A > -1.85)^{1.72} \\wedge (c_C - c_A > -3.90)^{1.59} \\wedge ((c_D - c_A) > -16.25)^{0.33} \\wedge ((c_C - c_B) > -3.01)^0 \\wedge (c_D - c_B > -7.37)^{0.02} \\wedge (c_D - c_C > -7.55)^0\\)
TELLER's ruleA before D, B before D, C before D
OGEM-tab's ruleExcitation: [A], [A, B, D], [B, D, A], [D, A], [D, A, B], [B, A], [A, D], [D], [B, A, D], [D, B, A]Inhibitory: [A, B], [B, D], [B], [A, D, B], [D, B]
" + }, + { + "type": "table_caption", + "bbox": [ + 0.187, + 0.38, + 0.809, + 0.396 + ], + "angle": 0, + "content": "Table 7: Comparison of rule discovery of \\( {\\phi }_{3,1} \\) for CLNN and TELLER on the Syn-3.1 dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.41, + 0.825, + 0.44 + ], + "angle": 0, + "content": "The rules learned by CLNN, TELLER, and GEM on the cause of event \\( E \\) in the third synthetic dataset are presented in Table 8, in which the discrete wCL formula learned by CLNN is" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.445, + 0.825, + 0.466 + ], + "angle": 0, + "content": "\\[\n\\phi_ {3, 2} = \\left(c _ {B} - c _ {A} > - 3. 9 4\\right) ^ {1. 4 9} \\wedge \\left(c _ {C} - c _ {B} > - 3. 0 2\\right) ^ {2. 0 3} \\wedge \\left(\\left(c _ {D} - c _ {C}\\right) > - 2. 0 0\\right) ^ {1. 9 2}. \\tag {51}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.47, + 0.827, + 0.542 + ], + "angle": 0, + "content": "It is obvious that \\(\\phi_{3,2}\\) is able to learn the temporal relation between \\(A\\) and \\(B\\), \\(B\\) and \\(C\\), and \\(C\\) and \\(D\\). However, TELLER's rules only reflect the temporal relation between \\(A\\), \\(B\\), \\(C\\) and \\(E\\), which cannot give the information about the temporal relation between \\(A\\) and \\(B\\), or \\(B\\) and \\(C\\), or \\(C\\) and \\(D\\). OGEM-tab's rule indicates that it considers event labels \\(A\\), \\(D\\), \\(E\\) as the parent events of \\(D\\), which does not match with the ground-truth parent set." + }, + { + "type": "table", + "bbox": [ + 0.256, + 0.553, + 0.74, + 0.774 + ], + "angle": 0, + "content": "
DatasetSyn-3.2
N (# events)N=5, L={A,B,C,D,E}
Ground truth\\(\\hat{\\phi}_{3,2}=(c_{B}-c_{A}> -5)^{1}\\wedge(c_{C}-c_{B}> -4)^{1}\\wedge(c_{D}-c_{C}> -3)^{1}\\)
CLNN's rule\\((c_{B}-c_{A}> -3.94)^{1.49}\\wedge(c_{C}-c_{A}> -9.12)^{0.25}\\wedge((c_{D}-c_{A})> -1.42)^{0.13}\\wedge((c_{E}-c_{A})> -3.88)^{0.15}\\wedge(c_{C}-c_{B}> -3.02)^{2.03}\\wedge(c_{D}-c_{B}> -6.27)^{0.02}\\wedge(c_{E}-c_{B}> -7.30)^{0.04}\\wedge((c_{D}- c_{C})> -2.00)^{1.92}\\wedge((c_{E}-c_{C})> -5.30)^{0.09}\\wedge((c_{E}-c_{D})> -1.57)^{0.01}\\)
TELLER's ruleA before E, B before E, C before E
OGEM-tab's ruleExcitation: [A,D], [D,A], [D,E], [E], [A,D, E], [D,E,A], [E,A], [A,E], [E,A,D], [A,E, D], [D,A,E], [E,D,A]Inhibitory: [A], [D], [E,D]
" + }, + { + "type": "table_caption", + "bbox": [ + 0.187, + 0.784, + 0.809, + 0.8 + ], + "angle": 0, + "content": "Table 8: Comparison of rule discovery of \\( {\\phi }_{3,2} \\) for CLNN and TELLER on the Syn-3.2 dataset." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.825, + 0.724, + 0.84 + ], + "angle": 0, + "content": "C.4 QUANTITATIVE COMPARISON OF CLNN'S RULES WITH GROUND TRUTH" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.851, + 0.827, + 0.929 + ], + "angle": 0, + "content": "To quantitatively evaluate the difference between the ground-truth rules and the rules learned by CLNN, we adopt the Jaccard similarity score to assess the learned formulas against the ground truth. Let \\(\\mathcal{G}\\) denote the set of paired ordering representations from the ground-truth rule, and \\(\\mathcal{C}\\) denote the set of paired ordering representations from the learned rules, the Jaccard similarity score is calculated as \\(J = \\frac{|\\mathcal{C} \\cap \\mathcal{G}|}{|\\mathcal{C} \\cup \\mathcal{G}|}\\). For TELLER and OGEM-tab, the ordering representations are extracted" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.109, + 0.495, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.334, + 0.279, + 0.353, + 0.292 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.108, + 0.804, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.64, + 0.279, + 0.66, + 0.291 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.302, + 0.49, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.333, + 0.473, + 0.351, + 0.485 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.304, + 0.795, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.472, + 0.657, + 0.484 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.501, + 0.825, + 0.531 + ], + "angle": 0, + "content": "Figure 9: Comparison of ground-truth rules with CLNN's rules in terms of Jaccard similarity score for a) Syn-1, b) Syn-2, c) Syn-3.1, d) Syn-3.2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.561, + 0.825, + 0.631 + ], + "angle": 0, + "content": "from the excitation rules. The comparison of Jaccard similarity score for the synthetic datasets is shown in Figure 9, where the Jaccard similarity score of 0 is manually set to the minimum threshold 0.05 for clarity purposes. It is clearly observed that the Jaccard similarity scores for CLNN is higher than the ones for TELLER or OGEM, implying the rules discovered by CLNN are more consistent with the ground truth." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.654, + 0.749, + 0.669 + ], + "angle": 0, + "content": "C.5 STABILITY ANALYSIS OF CLNN'S RULES WITH RESPECT TO INITIALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.682, + 0.825, + 0.711 + ], + "angle": 0, + "content": "To further validate the model's stability in learning wCL rules, different parameter initialization methods are carried out, including:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.725, + 0.825, + 0.754 + ], + "angle": 0, + "content": "1. rand - parameter initialization as random numbers from a uniform distribution on the interval [0, 1);" + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.763, + 0.765, + 0.777 + ], + "angle": 0, + "content": "2. randn - random numbers from a normal distribution with mean 0 and variance 1;" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.788, + 0.416, + 0.802 + ], + "angle": 0, + "content": "3. ones - constant values of 1;" + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.811, + 0.825, + 0.841 + ], + "angle": 0, + "content": "4. xavier - random numbers from a uniform distribution on the interval \\([-1/\\sqrt{n}, 1/\\sqrt{n}]\\), where \\(n\\) is the dimension of the parameter." + }, + { + "type": "list", + "bbox": [ + 0.209, + 0.725, + 0.825, + 0.841 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "The rules learned by CLNN for the above parameter initializations are summarized in Table 9. By inspecting the rules for different initialization methods, it is clear that CLNN can still recover the correct paired order representations even if initializing the learning process from a different position. In the meantime, the logic formulas learned by CLNN are stable as the variance of learned parameters is relatively small." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.197, + 0.102, + 0.803, + 0.359 + ], + "angle": 0, + "content": "
DatasetInitializationRules
Syn - 1Ground truth\\(\\hat{\\phi}=(c_{A}-c_{B}>1)^{1}\\wedge(c_{A}-c_{C}>3)^{1}\\)
rand\\(\\phi=(c_{A}-c_{B}>1.21)^{1.52}\\wedge(c_{A}-c_{C}>3.00)^{1.41}\\)
randn\\(\\phi=(c_{A}-c_{B}>1.21)^{1.58}\\wedge(c_{A}-c_{C}>3.32)^{1.56}\\)
ones\\(\\phi=(c_{A}-c_{B}>1.17)^{1.59}\\wedge(c_{A}-c_{C}>3.14)^{1.32}\\)
xavier\\(\\phi=(c_{A}-c_{B}>1.12)^{1.45}\\wedge(c_{A}-c_{C}>3.20)^{1.33}\\)
Syn - 2Ground truth\\(\\hat{\\phi}=(c_{A}-c_{B}>0.5)^{1}\\wedge(c_{A}-c_{C}>1.5)^{1}\\wedge(c_{C}-c_{D}>2)^{1}\\)
rand\\(\\phi=(c_{A}-c_{B}>0.77)^{1.27}\\wedge(c_{A}-c_{C}>2.09)^{1.15}\\wedge((c_{C}-c_{D})>2.60)^{1.06}\\)
randn\\(\\phi=(c_{A}-c_{B}>0.80)^{1.97}\\wedge(c_{A}-c_{C}>1.92)^{1.62}\\wedge((c_{C}-c_{D})>1.74)^{1.45}\\)
ones\\(\\phi=(c_{A}-c_{B}>1.03)^{1.63}\\wedge(c_{A}-c_{C}>1.92)^{1.50}\\wedge((c_{C}-c_{D})>2.03)^{1.44}\\)
xavier\\(\\phi=(c_{A}-c_{B}>0.97)^{1.92}\\wedge(c_{A}-c_{C}>2.07)^{1.63}\\wedge((c_{C}-c_{D})>1.97)^{1.62}\\)
Syn - 3.1Ground truth\\(\\hat{\\phi}=(c_{B}-c_{A}>-2)^{1}\\wedge(c_{C}-c_{A}>-5)^{1}\\)
rand\\(\\phi=(c_{B}-c_{A}>-1.85)^{1.72}\\wedge(c_{C}-c_{A}>-3.90)^{1.59}\\)
randn\\(\\phi=(c_{B}-c_{A}>-1.98)^{1.51}\\wedge(c_{C}-c_{A}>-3.89)^{1.68}\\)
ones\\(\\phi_{3,1}=(c_{B}-c_{A}>-1.94)^{1.84}\\wedge(c_{C}-c_{A}>-3.68)^{2.33}\\)
xavier\\(\\phi_{3,1}=(c_{B}-c_{A}>-1.89)^{1.54}\\wedge(c_{C}-c_{A}>-3.92)^{1.62}\\)
Syn - 3.2Ground truth\\(\\hat{\\phi}=(c_{B}-c_{A}>-5)^{1}\\wedge(c_{C}-c_{B}>-4)^{1}\\wedge(c_{D}-c_{C}>-3)^{1}\\)
rand\\(\\phi=(c_{B}-c_{A}>-3.94)^{1.49}\\wedge(c_{C}-c_{B}>-3.02)^{2.03}\\wedge((c_{D}-c_{C})> -2.00)^{1.92}\\)
randn\\(\\phi=(c_{B}-c_{A}>-3.79)^{1.71}\\wedge(c_{C}-c_{B}>-3.04)^{1.89}\\wedge((c_{D}-c_{C})> -1.68)^{1.65}\\)
ones\\(\\phi=(c_{B}-c_{A}>-3.53)^{1.66}\\wedge(c_{C}-c_{B}>-3.09)^{1.88}\\wedge((c_{D}-c_{C})> -1.25)^{1.81}\\)
xavier\\(\\phi=(c_{B}-c_{A}>-3.71)^{1.53}\\wedge(c_{C}-c_{B}>-3.09)^{2.04}\\wedge((c_{D}-c_{C})> -1.86)^{1.73}\\)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.189, + 0.369, + 0.807, + 0.384 + ], + "angle": 0, + "content": "Table 9: Comparison of rules learned by CLNN for different parameter initialization methods." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.419, + 0.57, + 0.433 + ], + "angle": 0, + "content": "C.6 ANALYSIS OF LOGICAL CONSTRAINTS ON THE LL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.448, + 0.825, + 0.52 + ], + "angle": 0, + "content": "In this part, we investigate the effect of the interpretability using an experiment of the impact of logical constraints on the model's performance. The log-likelihood on the synthetic datasets for CLNN with and without logical constraints is summarized in Table 10. Table 10 demonstrates that the log-likelihood for CLNN with logical constraints is higher than the log-likelihood for CLNN without constraints, implying that interpretability (logical constraints) is helpful to improve the performance." + }, + { + "type": "table", + "bbox": [ + 0.275, + 0.54, + 0.722, + 0.62 + ], + "angle": 0, + "content": "
DatasetCLNN with constraintsCLNN without constraints
Syn - 1-7821-8716
Syn - 2-6075-6942
Syn - 3.1-10898-11583
Syn - 3.2-10919-11230
" + }, + { + "type": "table_caption", + "bbox": [ + 0.246, + 0.629, + 0.751, + 0.645 + ], + "angle": 0, + "content": "Table 10: Comparison of LL for CLNN with and without logical constraints." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.69, + 0.644, + 0.707 + ], + "angle": 0, + "content": "D EXPERIMENT RESULTS OF REAL-WORLD DATASETS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.728, + 0.36, + 0.743 + ], + "angle": 0, + "content": "D.1 LINKEDIN DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.827, + 0.925 + ], + "angle": 0, + "content": "The LinkedIn dataset is a collection of job hopping records between 82 IT companies of 3,000 LinkedIn users. Each event stream represents a user's check-in time stamps for different companies or role changes within the same company. Here we select 1000 users' event streams to compose the dataset by filtering out the event streams with uncommon companies, resulting in 10 event labels: \\(\\mathcal{L} = \\{A,B,C,D,E,F,G,H,I,J\\}\\). Here we set the number of formulas as 5, i.e., \\(\\Phi = \\{\\phi_1,\\phi_2,\\phi_3,\\phi_4,\\phi_5\\}\\), each of which embodies a model structure shown in Figure 2(a) and CLNN aims to learn the parameters for each formula. The weight parameters in the paired order cell or the singleton order cell are initialized as random variables following a Gaussian distribution, and the bias terms of conjunction or disjunction operators are initialized as 1. The architecture weights are initialized as random variables following a Gaussian distribution, and the formula impact weights and bias are initialized as Gaussian random variables. The detailed log-likelihood for each event label is summarized in Table 11." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.388, + 0.102, + 0.609, + 0.27 + ], + "angle": 0, + "content": "
Event LabelLog-likelihood
A-180.59
B-177.80
C-89.49
D-140.31
E-132.83
F-76.63
G-106.23
H-103.33
I-95.51
J-125.45
" + }, + { + "type": "table_caption", + "bbox": [ + 0.27, + 0.28, + 0.726, + 0.296 + ], + "angle": 0, + "content": "Table 11: Log likelihood for each event label in the LinkedIn dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.322, + 0.36, + 0.336 + ], + "angle": 0, + "content": "D.2 MIMIC II DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.349, + 0.827, + 0.448 + ], + "angle": 0, + "content": "MIMIC II dataset is obtained from the intensive care unit research database that consists of 25,328 intensity care unit stays. The records include laboratory data, therapeutic intervention profiles such as nursing progress notes, discharge summaries and others. Here we restrict the event types to the diagnosis of patients and filter out the shorter event sequences with few visits, ending up with 650 patients and 15 event labels: \\(\\mathcal{L} = \\{1,2,8,9,11,12,14,20,21,22,23,26,27,42,47\\}\\). Similar to the setting for the LinkedIn dataset, where the initialization of parameters follow the same setting as the LinkedIn dataset. The detailed log-likelihood for each event label is presented in Table 12." + }, + { + "type": "table", + "bbox": [ + 0.386, + 0.461, + 0.609, + 0.699 + ], + "angle": 0, + "content": "
Event LabelLog-likelihood
1-72.14
2-62.33
8-5.98
9-51.34
11-43.64
12-25.81
14-69.73
20-5.96
21-6.08
22-10.47
23-10.64
26-27.08
27-27.42
42-5.95
47-10.54
" + }, + { + "type": "table_caption", + "bbox": [ + 0.267, + 0.709, + 0.731, + 0.725 + ], + "angle": 0, + "content": "Table 12: Log likelihood for each event label in the MIMIC II dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.759, + 0.419, + 0.773 + ], + "angle": 0, + "content": "D.3 STACK OVERFLOW DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Stack Overflow is a question-and-answer website spanning a wide range of domains. A badge rewarding scheme is exploited to encourage users to participate in the questioning and answering activities. The badge system of Stack Overflow comprises 81 types of non-topical badges, including the badges that can be awarded only once and the badges that can be awarded several times. The dataset in (Du et al., 2016) was obtained by first filtering out the badges that can be awarded only once, then restricting to the users who have acquired at least 40 badges from 2012-01-01 to 2014-01-01, from which the badges have been awarded more than 100 times are selected as the determinate dataset. Our dataset was acquired by retaining the event streams with one or more of the 20 types of specified badges and then randomly sampling 1000 users to obtain 1000 event streams. The detailed log-likelihood for each event label in the Stack Overflow dataset is summarized in Table 13." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.387, + 0.102, + 0.609, + 0.408 + ], + "angle": 0, + "content": "
Event LabelLog-likelihood
1-3791
2-1451
3-538
4-17656
5-3574
6-3559
7-1381
8-1330
9-10961
10-1105
11-189
12-2012
13-673
14-1340
15-406
16-117
17-186
18-330
19-282
20-100
" + }, + { + "type": "table_caption", + "bbox": [ + 0.25, + 0.419, + 0.747, + 0.435 + ], + "angle": 0, + "content": "Table 13: Log likelihood for each event label in the Stack Overflow dataset." + }, + { + "type": "table", + "bbox": [ + 0.294, + 0.446, + 0.703, + 0.531 + ], + "angle": 0, + "content": "
DatasetCLNN with SOPCLNN without SOP
LinkedIn-1228-1344
MIMIC II-436-480
Stack Overflow-50981-51195
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.541, + 0.825, + 0.556 + ], + "angle": 0, + "content": "Table 14: Comparison of log-likelihood for CLNN with and without SOP on the real-world datasets." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.581, + 0.649, + 0.596 + ], + "angle": 0, + "content": "D.4 ANALYSIS OF EXPRESSIVENESS ON MODEL'S PERFORMANCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.607, + 0.827, + 0.692 + ], + "angle": 0, + "content": "In this part, we conduct an experiment by training the CLNN without the singleton order cell (SOC) on real-world datasets to show the effectiveness of the singleton order predicates. The comparison of log-likelihood for CLNN with SOC and CLNN without SOC is summarized in Table 14. As evidenced by Table 14, the log-likelihood of CLNN with SOP is higher than the log-likelihood of CLNN without SOP, meaning enriching the expressiveness of wCL formulas can better explain the generative mechanism of events." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "24" + } + ] +] \ No newline at end of file diff --git a/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_origin.pdf b/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3a9c3077ceb8a2c6c06c267d77cd7d84c8d9b9b8 --- /dev/null +++ b/2023/Weighted Clock Logic Point Process/3eef33de-4305-442c-87ae-f007ec3ea0e2_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c36453a5123425f0cff87356182e77a2ed246cb3d6c688bb81de41a770e04a97 +size 1093684 diff --git a/2023/Weighted Clock Logic Point Process/full.md b/2023/Weighted Clock Logic Point Process/full.md new file mode 100644 index 0000000000000000000000000000000000000000..9eab178eed5253387b31a443f9477f4ee94895d1 --- /dev/null +++ b/2023/Weighted Clock Logic Point Process/full.md @@ -0,0 +1,774 @@ +# WEIGHTED CLOCK LOGIC POINT PROCESS + +Ruixuan Yan $^{1}$ , Yunshi Wen $^{1}$ , Debarun Bhattacharjya $^{2}$ , Ronny Luss $^{2}$ , Tengfei Ma $^{2}$ , Achille Fokoue $^{2}$ , and Agung Julius $^{1}$ + +$^{1}$ Rensselaer Polytechnic Institute + $^{2}$ IBM T.J. Watson Research Center + +# ABSTRACT + +Datasets involving multivariate event streams are prevalent in numerous applications. We present a novel framework for modeling temporal point processes called clock logic neural networks (CLNN) which learn weighted clock logic (wCL) formulas as interpretable temporal rules by which some events promote or inhibit other events. Specifically, CLNN models temporal relations between events using conditional intensity rates informed by a set of wCL formulas, which are more expressive than related prior work. Unlike conventional approaches of searching for generative rules through expensive combinatorial optimization, we design smooth activation functions for components of wCL formulas that enable a continuous relaxation of the discrete search space and efficient learning of wCL formulas using gradient-based methods. Experiments on synthetic datasets manifest our model's ability to recover the ground-truth rules and improve computational efficiency. In addition, experiments on real-world datasets show that our models perform competitively when compared with state-of-the-art models. + +# 1 INTRODUCTION AND RELATED WORK + +Multivariate event streams are emerging types of data that involve occurrences of different types of events in continuous time. Event streams are observed in a wide range of applications, including but not limited to finance (Bacry et al., 2015), politics (O'Brien, 2010), system maintenance (Gunawardana et al., 2011), healthcare (Weiss & Page, 2013), and social networks (Farajtabar et al., 2015). As opposed to time series data that typically comprises continuous-valued variables evolving in regular discrete time stamps, event streams involve events occurring irregularly and asynchronously in continuous time. Modeling the dynamics in event streams is important for a wide range of scientific and industrial processes, such as predicting the occurrence of events of interest or understanding why some deleterious events occur so as to possibly prevent their occurrence. A (multivariate) temporal point process (TPP) provides a formal mathematical framework for representing event streams, where a conditional intensity rate for each event measures its occurrence rate at any time given the historical events in the stream (Daley & Vere-Jones, 2003; Aalen et al., 2008). + +There has been a proliferation of research around TPPs in recent years, particularly around the use of neural networks for modeling conditional intensity rates as a function of historical occurrences (Du et al., 2016; Mei & Eisner, 2017; Xiao et al., 2017; Xu et al., 2017; Gao et al., 2020; Zhang et al., 2020; Zuo et al., 2020). One stream of research studies graphical event models (GEMs) as a compact and interpretable graphical representation for TPPs, where the conditional intensity rate for any particular event depends only on the history of a subset of the events (Didelez, 2008; Gunawardana & Meek, 2016). While any TPP can be represented as a GEM, various models make assumptions about the parametric form of conditional intensity rates for the sake of learnability, for instance that rates are piece-wise constant with respect to occurrences within historical windows (Gunawardana et al., 2011; Bhattacharjya et al., 2018). Ordinal GEMs(OGEM) (Bhattacharjya et al., 2020; 2021) are a recent model from this family where a conditional intensity rate depends on the order in which parent events occur within the most recent historical time period. + +A temporal logic point process (TLPP) framework was proposed as an alternate way to lend some interpretability to TPPs by modeling intensity rates using temporal logic rules (Li et al., 2020). Although the initial work pre-specified temporal logic rules, recent work has introduced a temporal logic rule learner (TELLER) for automatically discovering rules (Li et al., 2021). There is however + +the issue of scalability since TELLER exploits an expensive branch-and-price algorithm to search for temporal logic rules in a discrete space. Another important limitation of this work is that TELLER's rules are not informative enough to explain how the interval length between ordered events impacts the conditional intensity rate. For instance, while predicting the occurrence of diabetes, the rule that "insulin injection happens 20 minutes before eating meal" is more informative and accurate in predicting "blood glucose remains normal" than the rule that "insulin injection happens before eating meal", as the latter rule cannot expose the interval between 'insulin injection' and 'eating meal'. To tackle the above limitations, we propose novel atomic predicates enriching the expressiveness of temporal logic rules as well as a differentiable framework to learn rules in an end-to-end manner. + +This work introduces a differentiable neuro-symbolic framework, clock logic neural network (CLNN), to model TPPs by learning weighted clock logic (wCL) formulas as explanations. Firstly, event streams are converted into continuous-time clock signals representing the time interval between the last occurrence of an event and the current time. Next, we propose a novel wCL to describe the underlying temporal relations with relative interval length, enabling the design of a CLNN to learn the generative mechanisms. Instead of searching for temporal logic rules in some vast discrete space, CLNN associates every neuron with an order representation or a logical operator and assigns weights to edges to reflect the importance of various inputs, which relaxes the search space to be continuous. Moreover, architecture weights are introduced into CLNN to make the formula structure search differentiable. wCL formula-informed intensity rates are carefully designed so that the parameters appearing in the rules can be learned through maximum likelihood estimation using gradient-based approaches. CLNN is tested on synthetic datasets to show that CLNN can recover the ground-truth rules as well as on real-world datasets to demonstrate its model-fitting performance. + +# 2 PRELIMINARIES + +# 2.1 NOTATION & BACKGROUND + +Let $\mathcal{L}$ denote the set of event labels, and $M = |\mathcal{L}|$ denote the number of event labels. An event stream is a sequence of events including time stamps, denoted as $\mathcal{D} = \{(l_1,t_1),(l_2,t_2),\dots,(l_N,t_N)\}$ , where $t_i\in \mathbb{R}^+$ denotes a time stamp between the beginning time $t_0 = 0$ and end time $t_{N + 1} = T$ , and $l_{i}\in \mathcal{L}$ is the event label that happens at $t_i$ . We refer to 'event label' and 'label' interchangeably. Every event label $l\in \mathcal{L}$ has an associated conditional intensity rate describing the occurrence rate of label $l$ at $t$ given the history up to $t$ . In multivariate temporal point processes, conditional intensity rates describe the dynamics of events. Let $\mathcal{H}_t = \{(l_i,t_i):t_i < t\}$ denote the historical events up to time $t$ . The conditional intensity rate of event label $l$ is denoted as $\lambda_l(t|\mathcal{H}_t)$ . Specifically, $\lambda_l(t|\mathcal{H}_t)$ describes the expected number of occurrences of event label $l$ in an infinitesimal interval $[t,t + \Delta t]$ given the history $\mathcal{H}_t$ , i.e., $\lambda_l(t|\mathcal{H}_t) = \lim_{\Delta t\to 0}(E[N_l(t + \Delta t) - N_l(t)|\mathcal{H}_t] / \Delta t)$ , where $N_{l}(t)$ denotes the number of event label $l$ 's occurrences up to $t$ . + +Example 1 A running example of an event stream with 11 events of 4 labels is shown in Figure 1(a). + +![](images/557ee827b58081cb6292fa7bca2267a38e9181cbaf851f056a773b70a640ed44.jpg) +(a) +(b) +Figure 1: (a): An event stream example with $N = 11$ events of $M = 4$ event labels over $T = 30$ days. (Integer-valued time stamps are utilized for easy interpretation, note that the proposed approach also works for $t_i \in \mathbb{R}$ ). (b): The overall workflow of the proposed method (POC: paired order cell, SOC: singleton order cell, AC: architecture cell, details presented in Section 2.2 to 3.3). + +# 2.2 ORDER REPRESENTATIONS FOR EVENT STREAMS + +The overall workflow of the proposed framework is visualized as Figure 1(b). The raw event streams first go through a masking function to generate the masked event streams, which are then transformed into event clocks using a clocking function. The event clocks are given as inputs to the clock logic neural network (CLNN) to learn interpretable wCL formulas and the intensity rate of event occurrences. The following sections provide a detailed explanation for each module in Figure 1(b). + +We are interested in exploring the effect of temporal ordering between event labels and the occurrences of causal event labels in a historical window on the occurrence rate of a particular event label, + +where the generative mechanism is expressed as interpretable formulas. An event stream up to $t$ may include multiple occurrences of the same event label, thus a masking function is required to mask out duplicated event labels in the history for accessing the ordering information at any $t$ . Here we adopt a technique similar to Bhattacharjya et al. (2020) for extracting distinct event labels from $\mathcal{H}_t$ . + +Definition 1 (Masking Function) A masking function $\Gamma(\cdot)$ is a function that takes an event stream as input and returns a new event stream that is a subset of the input stream and contains no duplicated event labels. Mathematically, $\Gamma(\cdot)$ is applied to $\mathcal{H}_t = \{(l_i, t_i)\}$ and converts it into a new stream $\mathcal{H}_t' = \{(l_j, t_j) \in \mathcal{H}_t : l_j \neq l_{j'} \text{ if } j \neq j'\}$ . + +We consider the following two masking functions as per Bhattacharjya et al. (2020) due to simplicity: 'first' masking and 'last' masking. The 'first' (resp. 'last') masking function keeps the first (resp. last) occurrence of an event label in an event stream. + +Example 1 (cont.) Let $\mathcal{H}_{13} = \{(A,1),(B,3),(A,6),(D,8),(C,10),(D,12)\}$ . The 'first' masking function converts it to $\mathcal{H}_{13}' = \{(A,1),(B,3),(D,8),(C,10)\}$ , and the 'last' masking function converts it to $\mathcal{H}_{13}' = \{(B,3),(A,6),(C,10),(D,12)\}$ . + +With the masked event history $\mathcal{H}_t^\prime$ , we define two order representations for the order relationship between any two event labels and the occurrence of an event within a historical window of $t$ . + +Definition 2 (Paired Order Representation (POR)) A paired order representation is defined as $[l_i, l_j] \in [\mathcal{L}]^2$ , where $[\mathcal{L}]^2$ denotes two-element permutation of a subset of $\mathcal{L}$ . A paired order representation for $\mathcal{H}_t^\prime$ can be obtained by arranging any two distinct labels in $\mathcal{H}_t^\prime$ in a sequential order. + +Definition 3 (Singleton Order Representation (SOR)) A singleton order representation is denoted as $[l_j, \underline{u}_{l_j}] \in \mathcal{L} \times \mathbb{R}_+$ , representing event label $l_j \in \mathcal{L}$ occurred within the past $\underline{u}_{l_j}$ time units, where $\underline{u}_{l_j}$ is a variable to learn through a process that will be explained in Section 3.3. + +Example 1 (cont.) With first masking, an example of paired order representation for $\mathcal{H}_{13}^{\prime}$ can be $[A,B]$ representing "A happens before $B$ " or $[B,C]$ representing "B happens before $C$ ". The overall order representation for $\mathcal{H}_{13}^{\prime}$ is expressed as $[A,B,D,C]$ , which can be derived from the paired order representations: $[A,B],[B,D],[D,C]$ . A singleton order representation example of $\mathcal{H}_{13}^{\prime}$ can be expressed as $[B,10.5]$ , meaning $B$ happened in the past 10.5 days. + +# 2.3 WEIGHTED CLOCK LOGIC FORMULA + +To adapt $\mathcal{H}_t^\prime$ to continuous-time signals that can be described by logical statements, we extract clock signals from $\mathcal{H}_t^\prime$ to describe the time passed since the last occurrence of a label. A clocking function is introduced to convert $t_j$ into a clock signal $c_{j}$ denoting the time interval length between $t_j$ and $t$ . + +Definition 4 (Clocking Function) A clocking function $\Xi(\cdot)$ converts $\mathcal{H}_t^\prime$ into a vector of clock signals as $\mathcal{C}'(t) = [c_1(t), c_2(t), \dots, c_M(t)]^T \in \mathbb{R}_+^M$ with $c_i(t)$ denoting the clock signal for event label $i \in \mathcal{L}$ , where $c_i(t)$ is computed as $c_i(t) = t - t_j$ if $(l_j, t_j) \in \mathcal{H}_t^\prime$ and $l_j = i$ , and $c_i(t) = \bar{Z}$ otherwise. Note that $\bar{Z}$ is a user-defined, large positive number to indicate event label $i$ not happening in $\mathcal{H}_t^\prime$ . + +Example 1 (cont.) Taking the 'first' masked event stream $\mathcal{H}_{13}^{\prime} = \{(A,1),(B,3),(D,8),(C,12)\}$ as an example, the event clocks are extracted as $\mathcal{C}'(13) = [12,10,1,5]^T$ . + +The event clocks can essentially provide the ordering between any two event labels in that the difference between any two event labels' clock signals reflects which event label happens first. As shown in the diabetes prediction example in the Introduction section, the time interval between ordering events is notably important in explaining and predicting an event label's occurrence. In contrast to (Li et al., 2020; 2021) which only learns the temporal ordering relation between event labels, we define a paired order predicate (POP) with a learnable parameter $\underline{u}_{l_i l_j}$ to describe the time interval between two ordered event labels $l_i$ and $l_j$ and a singleton order predicate (SOP) with a learnable parameter $\underline{u}_{l_j}$ to describe the occurrence of label $l_j$ within a historical window $\underline{u}_{l_j}$ as follows. + +Definition 5 (Paired Order Predicate) A POP describes the order between two labels $l_i, l_j \in \mathcal{L}, l_i \neq l_j$ , denoted as $\pi_{pop}^{l_i l_j} := g(c_{l_i}, c_{l_j}) = c_{l_i} - c_{l_j} > \underline{u}_{l_i l_j}$ , where $\underline{u}_{l_i l_j} \in \mathbb{R}$ is a parameter to learn. A positive $\underline{u}_{l_i l_j}$ means $l_i$ happened before $l_j$ for at least $\underline{u}_{l_i l_j}$ time units, and a negative $\underline{u}_{l_i l_j}$ means $l_j$ happened before $l_i$ for at most $-\underline{u}_{l_i l_j}$ time units. A POP is used in the POC of Figure 1(b). + +Definition 6 (Singleton Order Predicate) An SOP describes a causal label $l_j \in \mathcal{L}$ occurring within the past $\underline{u}_{l_j}$ time units, defined as $\pi_{sop}^{l_j} := c_{l_j} - \underline{u}_{l_j} < 0$ , where $\underline{u}_{l_j} \in \mathbb{R}_+$ is a learnable parameter. + +Instead of taking a heuristic approach for some underlying combinatorial search problem for a given set of temporal predicates (Bhattacharjya et al., 2020; 2021; Li et al., 2021) to uncover the effective order relations, this work proposes a differentiable learning model to learn suitable singleton and paired order predicates among all the possible choices of order predicates through a gradient-based approach. The scheme of weighted signal temporal logic (wSTL) in Yan et al. (2021; 2022) is exploited to build weighted clock logic (wCL) formulas that are logical compositions of singleton and paired order predicates. The syntax of wCL is recursively defined as (Mehdipour et al., 2021): + +$$ +\phi := \pi_ {p o p} ^ {l _ {i} l _ {j}} \left| \pi_ {s o p} ^ {l _ {j}} \right| \neg \phi \left| \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} \dots \wedge \phi_ {k} ^ {w _ {k}} \right| \phi_ {1} ^ {w _ {1}} \vee \phi_ {2} ^ {w _ {2}} \dots \vee \phi_ {k} ^ {w _ {k}}, \tag {1} +$$ + +where $\phi_1, \dots, \phi_k$ are wCL formulas, $\neg$ denotes negation, $\land$ denotes logical conjunction, $\lor$ denotes logical disjunction, $w_j \geq 0, j = 1, \dots, k$ denotes non-negative weights assigned to $\phi_1, \dots, \phi_k$ in the conjunction and disjunction operations. A wCL formula can describe the characteristics of $\mathcal{H}_t$ , thus the conditional intensity rate of event $l$ given $\mathcal{H}_t$ can be equivalently denoted as $\lambda_{l|\phi}(t)$ . + +Remark 7 The syntax above means each wCL formula can be built by using predicates in $\pi_{pop}^{l_i l_j}$ or $\pi_{sop}^{l_j}$ and then by recursively applying the $\neg$ or the $\land$ or the $\lor$ operations. + +Example 1 (cont.) A $wCL$ formula example is $\phi = (c_A - c_B > 1)^1 \wedge (c_C < 3)^{0.05}$ . The first and second clauses read "A happened before $B$ for at least one day" and "C happened less than 3 days ago", respectively. Note that $\phi$ is satisfied by the event stream up to $t = 13$ in Figure 1(a). The two clauses have weights of 1 and 0.05, reflecting the first clause is more important than the second one. + +# 3 WEIGHTED CLOCK LOGIC POINT PROCESSES + +# 3.1 TRUTH DEGREE OF WEIGHTED CLOCK LOGIC + +To quantitatively measure the satisfaction degree of a wCL formula $\phi$ over the event clocks $\mathcal{C}'(t)$ , i.e., how well does $\phi$ describe the underlying patterns of $\mathcal{C}'(t)$ , we propose smooth activation functions (AFs) to compute the truth degree, denoted $p(\mathcal{C}',\phi,t)\in [0,1]$ , defined as (Riegel et al., 2020): + +$$ +p \left(\mathcal {C} ^ {\prime}, \pi_ {p o p} ^ {l _ {i} l _ {j}}, t\right) = \operatorname {s i g m o i d} \left(c _ {l _ {i}} (t) - c _ {l _ {j}} (t) - \underline {{u}} _ {l _ {i} l _ {j}}\right), \tag {2} +$$ + +$$ +p \left(\mathcal {C} ^ {\prime}, \pi_ {s o p} ^ {l _ {j}}, t\right) = \operatorname {s i g m o i d} \left(\underline {{u}} _ {l _ {j}} - c _ {l _ {j}} (t)\right), \tag {3} +$$ + +$$ +p \left(\mathcal {C} ^ {\prime}, \neg \phi , t\right) = 1 - p \left(\mathcal {C} ^ {\prime}, \phi , t\right). \tag {4} +$$ + +In contrast to the combinatorial search of the temporal logic predicates in Li et al. (2021), the smooth design of AFs in (2) - (4) benefits the maximum likelihood estimation problem shown later in Section 3.6 by allowing it to learn the parameters in the POP and SOP through gradient-based methods. Next, we present the design of activation functions (AF) for the $\wedge$ operator. Here we use a 2-ary conjunction operator to motivate the design. Let $p^{\wedge} = p(\mathcal{C}',\phi_1^{w_1}\wedge \phi_2^{w_2},t)\in [0,1]$ . Intuitively, $p^{\wedge}$ is low when either input is low, and $p^{\wedge}$ is high when both inputs are high. Here we adopt a similar idea to Sen et al. (2022) for capturing the low and high. A user-defined hyperparameter $\alpha \in [\frac{1}{2},1]$ is introduced to aid the interpretability of low and high such that $p^{\wedge}$ represents high if $p^{\wedge}\in [\alpha ,1]$ and low if $p^{\wedge}\in [0,1 - \alpha ]$ . Considering the importance weights, a low input with a zero weight should not impact the output, which implies $p^{\wedge}$ should be low when both inputs are low. With these considerations, the AF for the $\wedge$ operator is defined as follows: (See Appendix A for more details.) + +$$ +p \left(\mathcal {C} ^ {\prime}, \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} \dots \wedge \phi_ {k} ^ {w _ {k}}, t\right) = f \left(\beta - \sum_ {j = 1} ^ {k} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)\right), \tag {5} +$$ + +$$ +\text {s u b j e c t} \quad \beta - \sum_ {j = 1} ^ {k} w _ {j} (1 - \alpha) \geq \alpha , \beta - \sum_ {j = 1} ^ {k} w _ {j} \alpha \leq 1 - \alpha , +$$ + +where $f(z) = \max \{0, \min \{z, 1\}\}$ clamps the truth degree into [0,1], $w_{j} \geq 0$ and $\beta \geq 0$ are parameters to learn. By De Morgan's law (Hurley, 2014), the AF for the $\vee$ operator is defined as + +$$ +p \left(\mathcal {C} ^ {\prime}, \phi_ {1} ^ {w _ {1}} \vee \phi_ {2} ^ {w _ {2}} \dots \vee \phi_ {k} ^ {w _ {k}}, t\right) = f (1 - \beta + \sum_ {j = 1} ^ {k} w _ {j} \left(p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)), \tag {6} +$$ + +$$ +\text {s u b j e c t} \quad 1 - \beta + \sum_ {j = 1} ^ {k} w _ {j} \alpha \geq \alpha , 1 - \beta + \sum_ {j = 1} ^ {k} w _ {j} (1 - \alpha) \leq 1 - \alpha . +$$ + +An event stream with $M$ event labels would generate $\mathrm{P}_M^2 = \frac{M!}{(M - 2)!}$ paired order predicates and $M$ singleton order predicates. If a conjunction or disjunction operator takes these predicates as inputs, how it recognizes the effective order predicates in describing the event dynamics becomes a critical issue. By carefully designing the AFs in (5) - (6), the logical operators exhibit the following properties so as to recognize effective inputs. This is a critical advantage over Bhattacharjya et al. (2020; 2021); Li et al. (2021) in that it allows a differentiable search of the suitable predicates among all the possible choices of order predicates in an end-to-end manner. Here we illustrate the properties for $\wedge$ with two inputs, which can be generalized to $k$ -ary inputs. (See Appendix B for more details.) + +Theorem 8 The $AF$ for the $\wedge$ operator with two inputs exhibits the following properties. + +1) Nonimpact for zero weights: If $w_{j} = 0, j = 1,2$ , $p(\mathcal{C}',\phi_j,t)$ has no impact on $p(\mathcal{C}',\phi_1\wedge \phi_2,t)$ . +2) Impact ordering: If $p(\mathcal{C}', \phi_1, t) = p(\mathcal{C}', \phi_2, t)$ , and $w_1 \geq w_2$ , then $\frac{\partial p(\mathcal{C}', \phi_1 \wedge \phi_2, t)}{\partial p(\mathcal{C}', \phi_1, t)} \geq \frac{\partial p(\mathcal{C}', \phi_1 \wedge \phi_2, t)}{\partial p(\mathcal{C}', \phi_2, t)}$ . +3) Monotonicity: $f(\beta - \sum_{j=1}^{2} w_j (1 - p(\mathcal{C}', \phi_j, t))) \leq f(\beta - \sum_{j=1}^{2} w_j (1 - (p(\mathcal{C}', \phi_j, t) + d))), d \geq 0.$ + +![](images/9e0cac7601d681cedd8dc0d19c63518c0191e9bee81a8bb2a62181f80436ece0.jpg) +(a) + +![](images/c640ccb66e96e9424797e3c3f14486a4b78a2595964840319ae75556b8aea159.jpg) +(b) +Figure 2: CLNN Structure. (a): Continuous relaxation of the search space using weights. (b): The learned discrete model structure for $\phi = (\pi_{pop}^{A,B}\wedge \pi_{pop}^{B,C})\vee (\pi_{sop}^{A})$ + +# 3.2 LEARNING OF PAIRED ORDER REPRESENTATION + +With the smooth AFs designed in (2) - (6), a neuro-symbolic model called clock logic neural network (CLNN) can be designed for any given wCL formula $\phi$ , in which every neuron has a corresponding symbolic representation. A typical CLNN for $\phi = (\pi_{pop}^{A,B}\wedge \pi_{pop}^{B,C})\vee (\pi_{sop}^{A})$ is visualized as Fig. 2(b), which can be considered as the discrete structure obtained by learning the parameters of the model in Figure 2(a) and keeping the dominant components. Here $\phi$ can be interpreted as “(A happens before $B$ for at least $\underline{u}_{AB}$ time units or $B$ happens before $C$ for at least $\underline{u}_{BC}$ time units) and $A$ happens within the past $\underline{u}_A$ time units.” This part describes the continuous relaxation of the search space by designing a paired order cell, a singleton order cell, and an architecture cell for learning the paired order representation, singleton order representation and the formula structure. + +Paired Order Cell (POC). A POC is a directed acyclic graph (DAG) comprising two paired order predicate (POP) nodes and one logical node for the $\wedge$ operator, shown as an orange block in Figure 2(a). The two POP nodes represent $\pi_{pop}^{l_i,l_j}$ and $\pi_{pop}^{l_j,l_i}$ sharing the same parameter $\underline{u}_{l_i,l_j}$ , where $\pi_{pop}^{l_i,l_j}$ denotes " $l_i$ happened before $l_j$ for at least $\underline{u}_{l_i,l_j}$ time units" and $\pi_{pop}^{l_j,l_i}$ denotes " $l_j$ happened before $l_i$ for at least $\underline{u}_{l_i,l_j}$ time units". Each POP has an associated weight $w_{pop}^{l_i,l_j}$ or $w_{pop}^{l_j,l_i}$ to be learned, and the $\wedge$ operator forces one of the two weight parameters to dominate the other one such that the learned POR is consistent with the event stream. For example, the POC in Figure 2(a) aims to learn the POR between $A$ and $B$ , whose discretized version would be either $\pi_{pop}^{A,B}$ or $\pi_{pop}^{B,A}$ . An event stream with $M$ event labels can generate $\mathrm{P}_M^2 = \frac{M!}{(M - 2)!}$ PORs between any two event + +labels, resulting in $(\mathrm{P}_M^2 / 2)$ POCs. Similar to learning the POR between any two events, the discrete order representations for the entire history $\mathcal{H}_t$ can be learned using a POP selection node (as shown in Figure 2(a)) that takes the outputs of all the POCs as input and identifies the important PORs. The learning of the POCs essentially becomes learning the $w$ , $\beta$ in (5) for the POCs and the POP selection node, as well as $\underline{u}_{l_i l_j}$ in (2) for the POPs through back propagation. The discrete PORs can be acquired by keeping the top- $k$ strongest POCs and the dominant POPs. + +# 3.3 LEARNING OF SINGLETON ORDER REPRESENTATION + +Singleton Order Cell (SOC). The learning of SOR is accomplished by an SOC, which is displayed as a green block in Figure 2(a). An SOC is a DAG comprising $M$ singleton order predicate (SOP) nodes and one SOP selection node for the $\wedge$ operator. An SOP node represents $\pi_{sop}^{l_j}$ that takes $c_{l_j}(t)$ as input and returns the truth degree of $\pi_{sop}^{l_j}$ over $c_{l_j}(t)$ . The SOP selection node has the same functionality as the POP selection node. The $\wedge$ operator in the SOP selection node assigns a nonnegative weight to every SOP node and learns the importance weights $w$ and $\beta$ to extract the dominant SORs affecting the conditional intensity rate the most. The learning of the SOC is thus learning the $w, \beta$ in (5) for the SOP selection node and $\underline{u}_{l_j}$ in (3) for the SOPs through back propagation. The discrete SORs can be determined by keeping the top- $k$ strongest SOPs. + +# 3.4 LEARNING OF FORMULA STRUCTURE + +Architecture Cell (AC). For a given set of PORs or SORs, their conjunction or disjunction will behave differently and have distinct meanings. For instance, given two causal formulas $\phi_{1} = (c_{A} - c_{B} > 1)^{1}\wedge (c_{C} < 5)^{1}$ and $\phi_{2} = (c_{A} - c_{B} > 1)^{1}\vee (c_{C} < 5)^{1}$ for the occurrence of event label $D$ , $\phi_{1}$ means “(A happens before $B$ for at least 1 time unit) and (C happens within the past 5 time units) simultaneously will cause $D$ to happen”, whereas $\phi_{2}$ means “(A happens before $B$ for at least 1 time unit) or (C happens within the past 5 time units) alternatively will cause $D$ to happen.” The afore-mentioned cells can learn the order representations. Nevertheless, whether their outputs should be connected by the $\wedge$ or $\vee$ operator needs to be determined. Here we consider the outputs of the POCs and the SOCs having two choices of being connected by a $\wedge$ or $\vee$ operator, each of which is associated with an architecture weight $\alpha_{arc}^{\wedge}$ or $\alpha_{arc}^{\vee}$ that enables continuous learning of the two choices; this is also called differentiable architecture search (Liu et al., 2019). An architecture cell is introduced for learning the model architecture, which comprises two logical nodes representing a $\wedge$ operator and a $\vee$ operator as well as a logical selection node (LSN), shown as the blue block in Figure 2(a). Let $\pmb{p} = \{p_1,\dots,p_k\}$ denote the set of inputs for each logical operator. Subsequently, the conjunction operator takes $\pmb{p}$ as input and returns $p^{\wedge} = f(\beta^{\wedge} - \sum_{j = 1}^{k}w_{j}^{\wedge}(1 - p_{j}))$ , and the disjunction operator takes $\pmb{p}$ as input and returns $p^{\vee} = f(1 - \beta^{\vee} + \sum_{j = 1}^{k}w_{j}^{\vee}p_{j})$ . The LSN represented by $\ominus$ takes $p^{\wedge}$ and $p^{\vee}$ as inputs and returns their weighted sum, where the weights are computed using the softmax of the architecture weights as shown below: + +$$ +p _ {\ominus} = p \left(\mathcal {C} ^ {\prime}, \phi , t\right) = \sum_ {m \in \{\wedge , \vee \}} \frac {e ^ {\alpha_ {a r c} ^ {m}}}{\sum_ {m ^ {\prime} \in \{\wedge , \vee \} e ^ {\alpha_ {a r c} ^ {m ^ {\prime}}}}} p ^ {m}. \tag {7} +$$ + +The task of architecture search then reduces to learning the architecture weights $\alpha_{arc}^{\wedge}$ , $\alpha_{arc}^{\vee}$ and the $w, \beta$ in (5) - (6) for the two logical operators, which can be executed simultaneously while learning parameters in the POCs and SOCs. The outcome of the architecture search process is a discrete architecture obtained by retaining the logical operator with the strongest architecture weight. + +# 3.5 WCL-INFORMED INTENSITY FUNCTION + +The output of a CLNN is the truth degree of $\phi$ over $\mathcal{C}'$ at $t$ , which is incorporated into modeling the conditional intensity rates. The modeling process aims to discover the generative mechanism as wCL formulas for every $l \in \mathcal{L}$ . In other words, a larger value of $p(\mathcal{C}', \phi, t)$ should reflect that $\phi$ has a greater impact on the occurrence of a particular label. For example, if the wCL formula for affecting the occurrence of event label $D$ is given as $\phi = ((\pi_{pop}^{A,B})^{w_1} \wedge (\pi_{sop}^{C})^{w_2})$ , it means if $\phi$ is satisfied or the truth degree of $\phi$ is high, then it has a strong impact on the occurrence of $D$ , where the impact can be promoting or inhibiting the occurrence of $D$ . In terms of the relation between the truth degree and the con- + +![](images/5d5b49c9b0aa3673d8a056f465562a03c01bddae53c13e45a86456e37664c6a3.jpg) +Figure 3: The overall learning framework for $n$ wCL formulas. + +ditional intensity rate, the higher the truth degree $p(\mathcal{C}',\phi ,t)$ , the greater its impact on $\lambda_{D|\phi}$ . Note + +that the occurrence of one event label may depend on multiple wCL formulas. This work follows the assumption that the impact of multiple formulas are additive in predicting the intensity rate, similar to Li et al. (2020). To incorporate a set of wCL formulas $\Phi = \{\phi_1,\phi_2,\dots,\phi_n\}$ into the modeling of the conditional intensity rate, we define a wCL formula-informed conditional intensity rate as: + +$$ +\lambda_ {l \mid \Phi} (t) = \exp \left(\sum_ {i = 1} ^ {n} w _ {\phi_ {i}} p \left(\mathcal {C} ^ {\prime}, \phi_ {i}, t\right) + \rho\right), \tag {8} +$$ + +where $w_{\phi_i}$ is the weight of $\phi_i$ , and $\rho$ is a bias term that allows for spontaneous occurrence without the influence from $\phi$ . + +# 3.6 MAXIMUM LIKELIHOOD ESTIMATION + +Suppose event stream $\mathcal{D}$ contains $n_l$ occurrences of event $l$ , for which the occurrence time stamps are denoted as $t_{l_1}, t_{l_2}, \ldots, t_{l_{n_l}}$ . Let $t_0 = 0$ , $t_{l_{n_l + 1}} = T$ . Based on the conditional intensity function in (8), the likelihood for label $l$ over the event stream is calculated as (Daley & Vere-Jones, 2003): + +$$ +L _ {l} = \prod_ {i = 0} ^ {n _ {l} - 1} \left(\exp \left(- \int_ {t _ {l _ {i}}} ^ {t _ {l _ {i + 1}}} \lambda_ {l | \Phi} (s) d s\right) \lambda_ {l | \Phi} \left(t _ {l _ {i + 1}}\right)\right) \exp \left(- \int_ {t _ {l _ {n _ {l}}}} ^ {T} \lambda_ {l | \Phi} (s) d s\right). \tag {9} +$$ + +The corresponding log-likelihood for event label $l$ is expressed as $LL_{l} = (-\int_{0}^{T}\lambda_{l|\Phi}(s)ds) + \sum_{i = 1}^{n_{l}}[\log (\lambda_{l|\Phi}(t_{l_{i}}))]$ . The total log-likelihood of all the events in $\mathcal{D}$ is thus $LL_{\mathcal{D}} = \sum_{l\in \mathcal{L}}LL_{l}$ . During the training process, we train the model parameters for each event label separately. Specifically, the maximum likelihood estimation problem for event label $l$ can be formulated as follows: + +$$ +\min - L L _ {l} \tag {10} +$$ + +$$ +s. t. \quad \forall \phi \in \Phi , \forall 1 \leq k \leq K _ {\phi} ^ {\wedge}, \beta_ {k} - \sum_ {i \in I _ {k}} w _ {i, k} (1 - \alpha) \geq \alpha , \beta_ {k} - \sum_ {i \in I _ {k}} w _ {i, k} \alpha \leq 1 - \alpha , \tag {11} +$$ + +$$ +\forall \phi \in \Phi , \forall 1 \leq k ^ {\prime} \leq K _ {\phi} ^ {\vee}, 1 - \beta_ {k ^ {\prime}} + \sum_ {i \in I _ {k ^ {\prime}}} w _ {i, k ^ {\prime}} \alpha \geq \alpha , 1 - \beta_ {k ^ {\prime}} + \sum_ {i \in I _ {k ^ {\prime}}} w _ {i, k ^ {\prime}} (1 - \alpha) \leq 1 - \alpha , \tag {12} +$$ + +$$ +w _ {i, k} \geq 0, \beta_ {k} \geq 0, w _ {i, k ^ {\prime}} \geq 0, \beta_ {k ^ {\prime}} \geq 0, \underline {{u}} _ {l _ {j}} \geq 0, +$$ + +where $K_{\phi}^{\wedge}$ (resp. $K_{\phi}^{\vee}$ ) is the number of $\wedge$ (resp. $\vee$ ) operators in $\phi$ , $I_{k}$ (resp. $I_{k'}$ ) denotes the inputs to the $k$ -th $\wedge$ (resp. $k'$ -th $\vee$ ) operator. Please see Appendix A for more details about the above formulation. The overall learning framework is shown in Figure 3, in which the forward propagation computes $LL_{l}$ by using $n$ CLNNs; each learns a wCL formula $\phi_{i}$ and the backward propagation updates the parameters in $n$ CLNNs using projected gradient descent. + +# 4 EXPERIMENTS + +We conduct several experiments on synthetic and real-world datasets to demonstrate the efficacy of our proposed model. Simultaneously, we compare with state-of-the-art (SOTA) models. The experiments are run using the AdamW optimizer in Pytorch (1.10.2) on a Windows 10 system desktop with a 16-core CPU (i7, 3.60GHz) and 32 GB RAM. Our code is available at https://ICLR-CLNN. + +# 4.1 MODELS + +Multivariate Hawkes Process (MHP) [(Bacry et al., 2017)]: A conventional multivariate Hawkes process utilizing an exponential kernel function to describe the conditional intensity rate, which involves a decay rate and an infectivity matrix characterizing the inter-dependence among events. This model is implemented in the tick $^{1}$ library, where the learning problem is posed as a convex quadratic programming problem with a fixed decay rate. + +Proximal Graphical Event Model (PGEM) [(Bhattacharjya et al., 2018)]: A type of GEM that models event data by considering whether a parent in some underlying graph happens in a proximal (recent) window. + +
Ground truthφ1=(cA-cB>1)1∧(cA-cC>3)1
CLNN's rule(cA-cB>1.21)1.52 ∧ (cA-cC>3.00)1.41 ∧ (cA-cD>0.82)0.33 ∧ (cB-cC>4.33)0 ∧ (cB-cD>10.69)0 ∧ (cD-cC>-6.57)0.16
TELLER's ruleA before D, B before D, C before D, A before D and C before D
OGEM-tab's ruleExcitation: [B], [C,B], [B,C]; Inhibitory: [A], [C,A], [A,C]
+ +Ordinal Graphical Event Model (OGEM) [(Bhattacharjya et al., 2020; 2021)]: An ordinal GEM that models the impact of the order of events on the conditional intensity rate. OGEM-tab (resp. OGEM-tree) refers to an OGEM that adopts a tabular (resp. tree) representation of orders. + +Temporal Logic Rule Learner (TELLER)2 [(Li et al., 2021)]. This is a method to learn first-order temporal logic rules explaining the generative mechanism of TPPs. The rule discovery process is formulated as a maximum likelihood estimation problem solved by a branch-and-price algorithm. + +# 4.2 SYNTHETIC DATASETS + +The first part of this experiment demonstrates CLNN's capability of recovering ground-truth rules using three synthetic datasets generated by CLNN with pre-specified formula structure and parameters, including $\underline{u}_{l_i l_j}$ in $\pi_{pop}^{l_i, l_j}$ , as well as the importance weights $w$ and bias $\beta$ in (5) for logical operators, and the $w_\phi$ and $\rho$ in (8) for the conditional intensity rate. + +Experimental Setting. Each synthetic dataset contains 1,000 event streams partitioned into three sets: training (70%), validation (15%), and test (15%). Every dataset is generated using a wCL formula with $w_{\phi} = 3$ and $\rho = -5$ . The truth value threshold is set as $\alpha = 0.5$ , and the clock signal for representing an event not occurring in $\mathcal{H}_t^\prime$ is set as $\bar{Z} = 1.5T_{\mathrm{max}}$ , where $T_{\mathrm{max}}$ is the maximal ending time among all the event streams. During the training process, we initialize the parameters using four approaches (see Appendix C.5 for more details) and report the best one, and CLNN aims to recover the manually set parameters. + +Results. The ground-truth rule $\hat{\phi}_1$ for generating the first synthetic dataset (Syn-1) with $\mathcal{L} = \{A, B, C, D\}$ and the rules discovered by CLNN, TELLER, and OGEM-tab are summarized in Table 1. Results for the other synthetic datasets are presented in Appendix C. The rules are learned using the 'last' masking method, which was also used for data generation. The experimental results show an accurate recovery performance of CLNN in terms of order representation recovery and parameter identification. The unweighted version of the ground truth rule reads: "If $A$ happens before $B$ for at least 1 time unit and $A$ happens before $C$ for at least 3 time units, then $D$ will happen". The rule of TELLER only reflects the temporal relation between events $A, B, C$ and $D$ but is unable to capture the temporal relation between $A$ and $B$ or $A$ and $C$ , which does not match the ground-truth rule. In OGEM-tab's rule, $[l]$ denotes a single parent. We show the top 3 excitation and inhibitory rules from OGEM-tab, where excitation (resp. inhibitory) means $\lambda_{l|\Phi}$ is higher (resp. lower) than the $\lambda_{l|\Phi}$ with all $w_{\phi_i} = 0$ . The excitation rules of OGEM-tab do not match the ground-truth rule. In contrast, the rule discovered by CLNN ( $\phi_1$ ) assigns larger weights to the paired order predicates $\pi_{pop}^{A,B} = (c_A - c_B > 1.21)$ and $\pi_{pop}^{A,C} = (c_A - c_C > 3.00)$ and small weights to the other predicates, where the interval values of 1.21 and 3.00 are both learned. By ignoring the small weights, $\phi_1$ can be interpreted as "If $A$ happens before $B$ for at least 1.21 time units and $A$ happens before $C$ for at least 3.00 time units, then $D$ will happen", meaning the paired order representations discovered by CLNN match well with the ground truth. Moreover, CLNN's rules are more expressive than TELLER and OGEM as it provides a detailed interval length between two ordered labels. + +To show the computational efficiency of our gradient-based learning, we compare the runtimes of CLNN and TELLER on the synthetic datasets in Table 2. Notably, CLNN not only recovers the correct order representations but also was two orders of magnitude faster on average (5.62 s vs 635.99 + +s). In addition, CLNN can learn more expressive order representations that describe both the order relation between two events and their interval length. + +Table 1: Comparison of rule discovery for CLNN, TELLER, and OGEM-tab on the Syn-1 dataset. + +
wCL formulaφ1φ2φ3,1φ3,2Average
CLNN5.204.604.957.735.62
TELLER252.91286.83925.581078.66635.99
+ +Table 2: Runtime (s) for CLNN and TELLER on synthetic datasets. + +# 4.3 REAL-WORLD DATASETS + +LinkedIn [(Xu et al., 2017)]. An event dataset related to job hopping records of 3,000 LinkedInIn users in 82 IT companies. Each event stream records a user's check-in time stamps for different companies or the time stamps for role change within the same company. We filter the dataset to popular companies as per Bhattacharjya et al. (2020), resulting in 1,000 users. + +Mimic II [(Saeed et al., 2011)]. An event dataset concerning health records of patients from Intensive Care Unit (ICU) visits over 7 years. A patient's event stream records each visit's time stamp and the corresponding diagnosis. We filter out sequences with few visits, resulting in 650 patients. + +Stack Overflow [(Grant & Betts, 2013)]. An event dataset that is related to the badges awarded to users in the question-answering website, the Stack Overflow. Each user's event stream records the badges that he/she receives at various time stamps. We keep the event streams with one or more of 20 types of badges and sample 1,000 users from the dataset used in Du et al. (2016). + +Experimental Setup. Each dataset is partitioned into three sets: training (70%), validation (15%), and test (15%). For simplicity, $\underline{u}_{l_i l_j}$ are set as 0 to study the ordering representations. The truth value threshold is $\alpha = 0.5$ , and $\bar{Z} = 1.5T_{\mathrm{max}}$ , same as the setting for the synthetic datasets, and the number of subformulas is $n = 5$ , and the parameters are initialized as random numbers from a uniform distribution on [0, 1). CLNN is trained on the training set, and the validation set is utilized for model selection during training. Model fit is evaluated using log-likelihood on the test set. + +Results. We follow a similar trend to Bhattacharjya et al. (2018; 2020; 2021) to use the log-likelihood for evaluation of the model's performance. The log-likelihood on the real-world datasets is reported in Table 3, where $DR$ denotes the difference ratio – the difference between CLNN and the best SOTA divided by the absolute value of best SOTA. CLNN's result is chosen as the better one among the 'first' or the 'last' masking. Notably, CLNN outperforms the baseline models on the LinkedIn dataset (13.40% advantage) and achieves a competitive result on the MIMIC II dataset (1.63% loss only). It is observed that PGEM achieves a better result on the Stack Overflow dataset. In Stack Overflow, one type of badge can be awarded only when a user receives a particular badge multiple times, for example, the 'Epic' badge is awarded only when earning 200 daily reputations 50 times, depending on the 'Mortarboard' badge acquired while answering or asking questions. CLNN and OGEMs apply masking methods to the data, which may not capture the above dependence. In contrast, PGEM models data without masking, making it more suitable for this dataset. + +
DatasetN (# events)M (labels)MHPPGEMOGEM-tabOGEM-treeTELLERCLNNDR
LinkedIn293210-1593-1462-1478-1418-1548-122813.40%
MIMIC II241915-567-500-474-429-645-436-1.63%
Stack Overflow7125420-52543-48323-49344-49192-71101-50981-5.50%
+ +Case Study. The primary strength of CLNN over the SOTA models is that it can describe the generative mechanism as wCL formulas, being more expressive and potentially providing more detailed information. CLNN can be deployed as a valuable tool for assisting domain specialists in knowledge discovery from event data. Here we showcase the above strength of CLNN using an il + +lustrative example. We select the experimental result on company $F$ of the LinkedIn dataset to demonstrate the expressivity of CLNN's rules, which are shown in Table 4. Here we specify the model to learn five formulas, four of which are inhibitory, and one exhibits excitation. One inhibitory formula has a weight of 0.05, thus not reported in Table 4. Each formula shows the dominant singleton or paired order predicates. Notably, CLNN learns expressive wCL formulas that describe how the logical composition of paired order predicates and(or) singleton order predicates affect a role change in the company $F$ . CLNN's rules are more expressive than TELLER and as expressive as OGEM-tab for describing the occurrence of a causal event within a specific historical window. + +Table 3: Dataset information and log-likelihood for all models on the real-world datasets. + +
RulesEffect
CLNNφ1=(CD>cH)0.90 ∧ (CI>cJ)0.72Inhibitory
φ2=((CB<0.45)0.58 ∧ (CD<0.05)0.66Excitation
φ3=(CB>cF)0.50 ∧ (CI>cJ>cD)0.47Inhibitory
φ4=(CA<0.84)0.76 ∧ (CH<1.09)0.50Inhibitory
TELLER[A,F],[C,F],[E,F],[B,F],[D,F]Excitation
OGEM-tab[F],[F,A]Excitation
[A]Inhibitory
+ +Table 4: Formulas and their effect as learned by CLNN, TELLER and OGEM-tab on company $F$ of LinkedIn. + +# 5 CONCLUSION + +In this paper, we proposed a novel neuro-symbolic model, CLNN, to learn interpretable wCL formulas from multivariate event data. Experimental results using synthetic and real-world datasets demonstrate CLNN's expressiveness in recovering ground-truth rules in multivariate temporal point processes. Further, CLNN can be trained using gradient-based methods, which improve the learning speed compared to the SOTA. + +# 6 ACKNOWLEDGEMENT + +This research is sponsored by the Rensselaer-IBM AI Research Collaboration (http://airc.rpi.edu), part of the IBM AI Horizons Network; the National Science Foundation under Grant CMMI-1936578; and the Defense Advanced Research Projects Agency (DARPA) through Cooperative Agreement D20AC00004 awarded by the U.S. Department of the Interior (DOI), Interior Business Center. The content of the information does not necessarily reflect the position or the policy of the Government, and no official endorsement should be inferred. + +# REFERENCES + +Odd Aalen, Ornulf Borgan, and Hakon Gjessing. Survival and Event History Analysis: A Process Point of View. Springer Science & Business Media, 2008. +Emmanuel Bacre, Iacopo Mastromatteo, and Jean-François Muzy. Hawkes processes in finance. Market Microstructure and Liquidity, 1(01):1550005, 2015. +Emmanuel Bacry, Martin Bompaire, Philip Deegan, Stéphane Gaiffas, and Søren V Poulsen. tick: A Python library for statistical learning, with an emphasis on Hawkes processes and time-dependent models. The Journal of Machine Learning Research, 18(1):7937-7941, 2017. +Debarun Bhattacharjya, Dharmashankar Subramanian, and Tian Gao. Proximal graphical event models. Advances in Neural Information Processing Systems (NeurIPS), 31:8147-8156, 2018. +Debarun Bhattacharjya, Tian Gao, and Dharmashankar Subramanian. Order-dependent event models for agent interactions. In Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI), pp. 1977-1983, 2020. +Debarun Bhattacharjya, Tian Gao, and Dharmashankar Subramanian. Ordinal historical dependence in graphical event models with tree representations. In Proceedings of the Conference on Artificial Intelligence (AAAI), pp. 6759-6767, 2021. +Yuanda Chen. Thinning algorithms for simulating point processes. Florida State University, Tallahassee, FL, 2016. +Daryl J Daley and David Vere-Jones. An Introduction to the Theory of Point Processes, Volume I: Elementary Theory and Methods. Springer, 2003. +Vanessa Didelez. Graphical models for marked point processes based on local independence. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 70(1):245-264, 2008. +Nan Du, Hanjun Dai, Rakshit Trivedi, Utkarsh Upadhyay, Manuel Gomez-Rodriguez, and Le Song. Recurrent marked temporal point processes: embedding event history to vector. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 1555-1564, 2016. +Mehrdad Farajtabar, Yichen Wang, Manuel Gomez Rodriguez, Shuang Li, Hongyuan Zha, and Le Song. COEVOLVE: A joint point process model for information diffusion and network coevolution. In Advances in Neural Information Processing Systems (NeurIPS), volume 28, pp. 1954-1962, 2015. +Tian Gao, Dharmashankar Subramanian, Karthikeyan Shanmugam, Debarun Bhattacharjya, and Nicholas Mattei. A multi-channel neural graphical event model with negative evidence. In Proceedings of the Conference on Artificial Intelligence (AAAI), pp. 3946-3953, 2020. +Scott Grant and Buddy Betts. Encouraging user behaviour with achievements: An empirical study. In Proceedings of the 10th Working Conference on Mining Software Repositories, MSR '13, pp. 65-68. IEEE Press, 2013. +Asela Gunawardana and Chris Meek. Universal models of multivariate temporal point processes. In Artificial Intelligence and Statistics, pp. 556-563. PMLR, 2016. + +Asela Gunawardana, Christopher Meek, and Puyang Xu. A model for temporal dependencies in event streams. Advances in Neural Information Processing Systems (NeurIPS), 24, 2011. +Patrick J Hurley. A Concise Introduction to Logic. Cengage Learning, 2014. +Shuang Li, Lu Wang, Ruizhi Zhang, Xiaofu Chang, Xuqin Liu, Yao Xie, Yuan Qi, and Le Song. Temporal logic point processes. In International Conference on Machine Learning, pp. 5990-6000. PMLR, 2020. +Shuang Li, Mingquan Feng, Lu Wang, Abdelmajid Essofi, Yufeng Cao, Junchi Yan, and Le Song. Explaining point processes by learning interpretable temporal logic rules. In International Conference on Learning Representations, 2021. +Hanxiao Liu, Karen Simonyan, and Yiming Yang. DARTS: Differentiable architecture search. In International Conference on Learning Representations, 2019. +Noushin Mehdipour, Cristian-Ioan Vasile, and Calin Belta. Specifying user preferences using weighted signal temporal logic. IEEE Control Systems Letters, 5(6):2006-2011, 2021. +Hongyuan Mei and Jason M Eisner. The neural Hawkes process: A neurally self-modulating multivariate point process. Advances in Neural Information Processing Systems (NeurIPS), 30:6757-6767, 2017. +Sean P O'Brien. Crisis early warning and decision support: Contemporary approaches and thoughts on future research. International Studies Review, 12(1):87-104, 2010. +Ryan Riegel, Alexander Gray, Francois Luus, Naweed Khan, Ndivhuwo Makondo, Ismail Yunus Akhalwaya, Haifeng Qian, Ronald Fagin, Francisco Barahona, Udit Sharma, et al. Logical neural networks. arXiv preprint arXiv:2006.13155, 2020. +Mohammed Saeed, Mauricio Villarroel, Andrew T Reisner, Gari Clifford, Li-Wei Lehman, George Moody, Thomas Heldt, Tin H Kyaw, Benjamin Moody, and Roger G Mark. Multiparameter intelligent monitoring in intensive care II (MIMIC-II): A public-access intensive care unit database. Critical Care Medicine, 39(5):952, 2011. +Prithviraj Sen, Bruno WSR de Carvalho, Ryan Riegel, and Alexander Gray. Neuro-symbolic inductive logic programming with logical neural networks. In Proceedings of the Conference on Artificial Intelligence (AAAI), volume 36, pp. 8212-8219, 2022. +Jeremy C. Weiss and David Page. Forest-based point process for event prediction from electronic health records. In Machine Learning and Knowledge Discovery in Databases, pp. 547-562, 2013. +Shuai Xiao, Junchi Yan, Xiaokang Yang, Hongyuan Zha, and Stephen Chu. Modeling the intensity function of point process via recurrent neural networks. In Proceedings of the Conference on Artificial Intelligence (AAAI), volume 31, pp. 1597-1603, 2017. +Hongteng Xu, Dixin Luo, and Hongyuan Zha. Learning Hawkes processes from short doubly-censored event sequences. In International Conference on Machine Learning, pp. 3831-3840. PMLR, 2017. +Ruixuan Yan, Agung Julius, Maria Chang, Achille Fokoue, Tengfei Ma, and Rosario Uceda-Sosa. STONE: Signal temporal logic neural network for time series classification. In 2021 International Conference on Data Mining Workshops (ICDMW), pp. 778-787. IEEE, 2021. +Ruixuan Yan, Tengfei Ma, Achille Fokoue, Maria Chang, and Agung Julius. Neuro-symbolic models for interpretable time series classification using temporal logic description. In 2022 IEEE International Conference on Data Mining (ICDM), pp. 618-627, 2022. doi: 10.1109/ICDM54844.2022.00072. +Qiang Zhang, Aldo Lipani, Omer Kirnap, and Emine Yilmaz. Self-attentive Hawkes process. In International Conference on Machine Learning, pp. 11183-11193. PMLR, 2020. +Simiao Zuo, Haoming Jiang, Zichong Li, Tuo Zhao, and Hongyuan Zha. Transformer Hawkes process. In International Conference on Machine Learning, pp. 11692-11702. PMLR, 2020. + +# A FORMULATION OF LOGICAL CONSTRAINTS & OBJECTIVE FUNCTION + +The optimization problem in (10) is formulated by maximizing the log-likelihood subject to the logical constraints for the $\wedge$ and $\vee$ operators. This section discusses the details of the formulation for the two logical constraints and how to formulate the optimization problem while considering the logical constraints. Without loss of generality, we illustrate the formulation of the constraints for the $\wedge$ operator, and the constraints for $\vee$ operator can be derived from the constraints for the $\wedge$ operator using De Morgan's law. + +# - Logical constraints for $\wedge$ operator. + +Let $x, y \in [0,1]$ denote the inputs of the $\wedge$ operator, and $f(x,y)$ denote the quantitative satisfaction of $\wedge$ . The conventional characteristic of the $\wedge$ operator is illustrated as follows: 1) $f(x,y)$ is low when either input is low, and 2) $f(x,y)$ is high when both inputs are high. However, we associate each input with a nonnegative weight, implying the input with a zero weight should not affect the output. In other words, if a low input has a zero weight, it should not affect the output of $f(x,y)$ . Therefore, we require the $\wedge$ operator to exhibit the following characteristics: 1) $f(x,y)$ is low when both inputs are low, and 2) $f(x,y)$ is high when both inputs are high. Here we introduce a user-defined hyperparameter $\alpha \in [\frac{1}{2},1]$ to capture low vs. high: $x \in [0,1 - \alpha)$ represents low and $x \in [\alpha,1]$ represents high. According to the above characteristics, we have (Sen et al., 2022) + +$$ +f (x, y) \leq 1 - \alpha , \quad \forall x, y \in [ 0, 1 - \alpha), \tag {13} +$$ + +$$ +f (x, y) \geq \alpha , \quad \forall x, y \in [ \alpha , 1 ]. +$$ + +Here we follow a specific choice of $f$ by using a triangular norm ( $t$ -norm) and define the quantitative satisfaction function of $\wedge$ as (Riegel et al., 2020) + +$$ +p \left(\mathcal {C} ^ {\prime}, \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}}, t\right) = f \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)\right), \tag {14} +$$ + +$$ +\text {s u b j e c t} \quad \beta - \sum_ {j = 1} ^ {2} w _ {j} (1 - \alpha) \geq \alpha , \beta - \sum_ {j = 1} ^ {2} w _ {j} \alpha \leq 1 - \alpha , \tag {15} +$$ + +where $f(z) = \max \{0, \min \{z, 1\}\}$ is introduced to clamp the truth value into the range of [0, 1]. + +# - Logical constraints for $\vee$ operator. + +By using De Morgan's law, we could derive the quantitative satisfaction function and the logical constraints for the $\lor$ operator with 2 inputs as follows: + +$$ +p \left(\mathcal {C} ^ {\prime}, \phi_ {1} ^ {w _ {1}} \vee \phi_ {2} ^ {w _ {2}}, t\right) = f \left(1 - \beta + \sum_ {j = 1} ^ {2} w _ {j} \left(p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)\right), \tag {16} +$$ + +$$ +\text {s u b j e c t} \quad 1 - \beta + \sum_ {j = 1} ^ {2} w _ {j} \alpha \geq \alpha , 1 - \beta + \sum_ {j = 1} ^ {2} w _ {j} (1 - \alpha) \leq 1 - \alpha . \tag {17} +$$ + +Here we show the characteristics of the activation functions for the $\wedge$ and $\vee$ operators using Figure 4. Figure 4(a) shows the truth value of the $\wedge$ operator with $\alpha = 0.7$ . Figure 4(b) shows the truth value of the $\wedge$ operator with $\alpha = 0.9$ . It can be distinctly observed that $f(x,y)$ is close to 0 when both $x$ and $y$ are low, and $f(x,y)$ is close to 1 when both $x$ and $y$ are high. In addition, the unconstrained region for $\alpha = 0.9$ is larger than the unconstrained region for $\alpha = 0.7$ . Figure 4(c) shows the truth value of the $\vee$ operator with $\alpha = 0.7$ . It is obvious that $f(x,y)$ is close to 0 when both $x$ and $y$ are low, and $f(x,y)$ is close to 1 when both $x$ and $y$ are high. + +In general, we could extend the quantitative satisfaction for the $\wedge$ and $\vee$ operators in (14) - (17) to $k$ -ary conjunction and $k$ -ary disjunction. The $k$ -ary conjunction formulation is expressed as follows. + +$$ +p \left(\mathcal {C} ^ {\prime}, \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} \dots \wedge \phi_ {k} ^ {w _ {k}}, t\right) = f \left(\beta - \sum_ {j = 1} ^ {k} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)\right), \tag {18} +$$ + +![](images/d11ca93f13aa2d576cabacd06a21df8f552d4545ee7577c50b2397da4f4f2f16.jpg) +(a) + +![](images/44228cc4cb20f4bb5ebcf91b59b11946430cf8359c5cdc1bfbbcccf7866d5bec.jpg) +(b) +Figure 4: Plot of truth degree for (a) CLNN- $\wedge$ with $\alpha = 0.7$ , (b) CLNN- $\wedge$ with $\alpha = 0.9$ , (c) CLNN- $\vee$ with $\alpha = 0.7$ . + +![](images/1110140bea7cd3027c436d8e7f557f9ff7327030c05282067db9188a4a31941c.jpg) +(c) + +$$ +\text {s u b j e c t} \quad \beta - \sum_ {j = 1} ^ {k} w _ {j} (1 - \alpha) \geq \alpha , \beta - \sum_ {j = 1} ^ {k} w _ {j} \alpha \leq 1 - \alpha . \tag {19} +$$ + +The $k$ -ary disjunction formulation is expressed as follows. + +$$ +p \left(\mathcal {C} ^ {\prime}, \phi_ {1} ^ {w _ {1}} \vee \phi_ {2} ^ {w _ {2}} \dots \vee \phi_ {k} ^ {w _ {k}}, t\right) = f (1 - \beta + \sum_ {j = 1} ^ {k} w _ {j} \left(p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)), \tag {20} +$$ + +$$ +\text {s u b j e c t} \quad 1 - \beta + \sum_ {j = 1} ^ {k} w _ {j} \alpha \geq \alpha , 1 - \beta + \sum_ {j = 1} ^ {k} w _ {j} (1 - \alpha) \leq 1 - \alpha . \tag {21} +$$ + +With the above constraints, we can formulate the maximum likelihood estimation problem as + +$$ +\min - L L _ {l} \tag {22} +$$ + +$$ +s. t. \quad \forall \phi \in \Phi , \forall 1 \leq k \leq K _ {\phi} ^ {\wedge}, \beta_ {k} - \sum_ {i \in I _ {k}} w _ {i, k} (1 - \alpha) \geq \alpha , \beta_ {k} - \sum_ {i \in I _ {k}} w _ {i, k} \alpha \leq 1 - \alpha , \tag {23} +$$ + +$$ +\forall \phi \in \Phi , \forall 1 \leq k ^ {\prime} \leq K _ {\phi} ^ {\vee}, 1 - \beta_ {k ^ {\prime}} + \sum_ {i \in I _ {k ^ {\prime}}} w _ {i, k ^ {\prime}} \alpha \geq \alpha , 1 - \beta_ {k ^ {\prime}} + \sum_ {i \in I _ {k ^ {\prime}}} w _ {i, k ^ {\prime}} (1 - \alpha) \leq 1 - \alpha . \tag {24} +$$ + +In this paper, we set $\alpha = 0.5$ , thus the constraints in (19) become + +$$ +\sum_ {i = 1} ^ {k} w _ {i} \geq 2 \beta - 1, +$$ + +$$ +\sum_ {i = 1} ^ {k} w _ {i} \leq 2 \beta - 1, \tag {25} +$$ + +$$ +2 \beta - 1 \geq 0, +$$ + +$$ +w _ {i} \geq 0. +$$ + +Reformulating the above constraints, we have + +$$ +\sum_ {i = 1} ^ {k} w _ {i} = 2 \beta - 1, \tag {26} +$$ + +$$ +\beta \geq 0. 5, +$$ + +$$ +w _ {i} \geq 0. \tag {27} +$$ + +The above constraints hold for each conjunction operator in $\phi$ . Therefore, we can incorporate the constraints in (26) into the objective function, which becomes + +$$ +\min - L L _ {l} + \sum_ {k = 1} ^ {K _ {\phi} ^ {\wedge}} \left(\sum_ {i \in I _ {k}} w _ {i, k} - 2 \beta_ {k} + 1\right) ^ {2}, \tag {28} +$$ + +$$ +\text {s u b j e c t} w _ {i, k} \geq 0, \beta_ {k} \geq 0. 5, \forall i \in I _ {k}, \forall 1 \leq k \leq K _ {\phi} ^ {\wedge}, \forall \phi \in \Phi . \tag {29} +$$ + +Similarly, we propose a set of logical constraints for the $\lor$ operator as (21). If we set $\alpha = 0.5$ , the constraints in (21) become + +$$ +\sum_ {i = 1} ^ {k} w _ {i} \geq 2 \beta - 1, +$$ + +$$ +\sum_ {i = 1} ^ {k} w _ {i} \leq 2 \beta - 1, \tag {30} +$$ + +$$ +2 \beta - 1 \geq 0, +$$ + +$$ +w _ {i} \geq 0. +$$ + +Reformulating the above constraints, we have + +$$ +\sum_ {i _ {1}} ^ {k} w _ {i} = 2 \beta - 1, \tag {31} +$$ + +$$ +\beta \geq 0. 5. +$$ + +$$ +w _ {i} \geq 0. \tag {32} +$$ + +The above constraints hold for each disjunction operator in $\phi$ . Therefore, we can incorporate the constraints in (31) into the objective function. The maximum likelihood estimation problem then becomes + +$$ +\min - L L _ {l} + \sum_ {k = 1} ^ {K _ {\phi} ^ {\wedge}} \left(\sum_ {i \in I _ {k}} w _ {i, k} - 2 \beta_ {k} + 1\right) ^ {2} + \sum_ {k ^ {\prime} = 1} ^ {K _ {\phi} ^ {\vee}} \left(\sum_ {i \in I _ {k ^ {\prime}}} w _ {i, k ^ {\prime}} - 2 \beta_ {k ^ {\prime}} + 1\right) ^ {2}, \tag {33} +$$ + +subject to $w_{i,k}\geq 0,\beta_k\geq 0.5,\forall i\in I_k,\forall 1\leq k\leq K_\phi^{\wedge},\forall \phi \in \Phi ,$ + +$$ +w _ {i, k ^ {\prime}} \geq 0, \beta_ {k ^ {\prime}} \geq 0. 5, \forall i \in I _ {k ^ {\prime}}, \forall 1 \leq k ^ {\prime} \leq K _ {\phi} ^ {\vee}, \forall \phi \in \Phi . +$$ + +# B PROOF OF THEOREM 8 + +The activation function designed for the $\wedge$ operator satisfies the properties of nonimpact for zero weights, impact ordering, and monotonicity. Without loss of generality, we present the proof for the $\wedge$ operator connecting two clauses, which can be generalized to the $\wedge$ operator connecting $k$ -ary clauses. + +Proof 1 Here we present the proof for the activation function for the $\wedge$ operator satisfying each property mentioned above. + +- Nonimpact for zero weights. + +This means if $w_{j} = 0, j = 1,2$ , then $p(\mathcal{C}',\phi_j,t)$ should have no impact on $p(\mathcal{C}',\phi_1^{w_1}\wedge \phi_2^{w_2},t)$ . Without loss of generality, we suppose $w_{1} = 0$ , thus we have + +$$ +\begin{array}{l} p \left(\mathcal {C} ^ {\prime}, \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}}, t\right) = f (\beta - 0 \cdot (1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {1}, t\right)) - w _ {2} \cdot (1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {2}, t\right))), \tag {34} \\ = f \left(\beta - w _ {2} \cdot \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {2}, t\right)\right)\right), \\ \end{array} +$$ + +meaning $p(\mathcal{C}',\phi_1,t)$ has no impact on $p(\mathcal{C}',\phi_1^{w_1}\wedge \phi_2^{w_2},t)$ + +# - Impact Ordering + +This means the truth degree of subformula with higher weights has a greater impact on $p(\mathcal{C}', \phi_1^{w_1} \wedge \phi_2^{w_2}, t)$ . Mathematically, we need to prove that if $p(\mathcal{C}', \phi_1, t) = p(\mathcal{C}', \phi_2, t)$ and $w_1 \geq w_2$ , then + +$$ +\frac {\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} , t\right)}{\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} , t\right)} \geq \frac {\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} , t\right)}{\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {2} , t\right)}. \tag {35} +$$ + +As $f(x) = \max \{0, \min \{x, 1\}\}$ , we have + +$$ +\frac {d f}{d x} = \left\{ \begin{array}{l l} 0, & \text {i f} x < 0, \\ 1, & \text {i f} 0 < x < 1, \\ 0, & \text {i f} x > 1. \end{array} \right. \tag {36} +$$ + +If $\beta -\sum_{j = 1}^{2}w_{j}(1 - p(\mathcal{C}^{\prime},\phi_{j},t)) < 0$ or $\beta -\sum_{j = 1}^{2}w_{j}(1 - p(\mathcal{C}^{\prime},\phi_{j},k)) > 1$ , then we have + +$$ +\frac {\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} , t\right)}{\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} , t\right)} = \frac {\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} , t\right)}{\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {2} , t\right)} = 0. \tag {37} +$$ + +Also, if $0 < \beta -\sum_{j = 1}^{2}w_{j}(1 - p(\mathcal{C}^{\prime},\phi_{j},t)) < 1$ , then we have + +$$ +\frac {\partial \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime} , \phi_ {j} , t\right)\right)\right)}{\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} , t\right)} = w _ {1} \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)\right), \tag {38} +$$ + +and + +$$ +\frac {\partial \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime} , \phi_ {j} , t\right)\right)\right)}{\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {2} , t\right)} = w _ {2} \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)\right). \tag {39} +$$ + +As $w_{1}\geq w_{2}$ , the following holds: + +$$ +\frac {\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} , t\right)}{\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} , t\right)} \geq \frac {\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {1} ^ {w _ {1}} \wedge \phi_ {2} ^ {w _ {2}} , t\right)}{\partial p \left(\mathcal {C} ^ {\prime} , \phi_ {2} , t\right)}, \tag {40} +$$ + +which proves the impact ordering property holds. + +# - Monotonicity. + +This means $p(\mathcal{C}', \phi_1^{w_1} \wedge \phi_2^{w_2}, t)$ increases monotonically over $p(\mathcal{C}', \phi_j, t)$ , i.e. + +$$ +f \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)\right) \leq f \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right) - d\right)\right) \text {f o r} d \geq 0. \tag {41} +$$ + +First, note that $\beta -\sum_{j = 1}^{2}w_{j}(1 - p(\mathcal{C}^{\prime},\phi_{j},t))$ can be rewritten as + +$$ +\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right) = \beta - w _ {1} - w _ {2} + w _ {1} p \left(\mathcal {C} ^ {\prime}, \phi_ {1}, t\right) + w _ {2} p \left(\mathcal {C} ^ {\prime}, \phi_ {2}, t\right). \tag {42} +$$ + +This implies $f(\beta - \sum_{j=1}^{2} w_j (1 - p(\mathcal{C}', \phi_j, t)))$ is monotonically increasing over $p(\mathcal{C}', \phi_1, t)$ and $p(\mathcal{C}', \phi_2, t)$ . Also, from the proof of impact ordering we know $f(x) = \max \{0, \min \{x, 1\}\}$ is monotonically nondecreasing, we can show that + +$$ +f \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right)\right)\right) \leq f \left(\beta - \sum_ {j = 1} ^ {2} w _ {j} \left(1 - p \left(\mathcal {C} ^ {\prime}, \phi_ {j}, t\right) - d\right)\right), d \geq 0. \tag {43} +$$ + +Thus the property of monotonicity is satisfied. + +# C EXPERIMENT RESULTS OF SYNTHETIC DATASETS + +Dataset Generation. In the experiments on synthetic datasets, we manually generate 3 synthetic datasets considering different settings, where the details and results for the first synthetic dataset is reported in Section 4.2. Each setting considers a different order representation, different number of event labels or different intensity of causal event labels. + +![](images/6615481611b2355c10d747efa0377f49ce26dac2c3075e65e67373d9e97fbe23.jpg) +Figure 5: Model structure of $\hat{\phi}_1$ for generating the first synthetic dataset. + +# C.1 SYNTHETIC DATASET-1 (SYN-1). + +Generation process. The first synthetic dataset contains 4 event labels: $A, B, C$ , and $D$ , where $D$ is the event for prediction, and $A, B, C$ are causal events. The wCL formula used to generate event $D$ in the first synthetic dataset is set as + +$$ +\hat {\phi} _ {1} = \left(c _ {A} - c _ {B} > 1\right) ^ {1} \wedge \left(c _ {A} - c _ {C} > 3\right) ^ {1}, \tag {44} +$$ + +whose unweighted version reads as "If $A$ happens before $B$ for at least 1 time unit and $A$ happens before $C$ for at least 3 time units, then $D$ will happen." + +Here we consider event labels $A, B, C$ as free predicates, whose occurrences are generated by a homogeneous Poisson process. The homogeneous intensity rate for $A, B, C$ are set as $\lambda_A = 0.2$ , $\lambda_B = 0.2$ , and $\lambda_C = 0.2$ . The algorithm used to generate instances of $A, B, C$ is described as Algorithm 1 (Chen, 2016). + +Algorithm 1 Simulation of a homogeneous Poisson process with intensity rate $\lambda$ . +Input: Intensity rate $\lambda$ , simulation horizon $T$ +Output: Occurrence time stamps $\mathcal{T} = \{t_k\}$ +1: Initialize $n = 0,t_0 = 0$ . +2: while True do +3: Generate $u\sim$ uniform(0, 1); +4: Let $w = -ln(u) / \lambda$ . +5: Set $t_{n + 1} = t_n + w$ . +6: if $t_{n + 1} > T$ then +7: return $\mathcal{T} = \{t_k\}_{k = 1,2,\dots,n}$ . +8: else +9: Set $n = n + 1$ . +10: end if +11: end while + +With the above algorithm, we can generate the occurrences of event labels $A, B$ , and $C$ . Next, we build a CLNN for $\hat{\phi}_1 = (c_A - c_B > 1)^1 \wedge (c_A - c_C > 3)^1$ to calculate the conditional intensity rate $\lambda_{D|\hat{\phi}_1}$ , whose model structure is shown in Figure 5. After obtaining $\lambda_{D|\hat{\phi}_1}(t)$ , we could use Algorithm 2 (Chen, 2016) to generate the occurrence of $D$ . + +Results. The rules learned by CLNN, TELLER, and OGEM-tab on the first synthetic dataset are presented in Table 5, where the paired order predicate among the two candidates with the highest + +Algorithm 2 Simulation of an inhomogeneous Poisson process with intensity rate $\lambda(t)$ . +Input: intensity rate $\lambda (t)$ , simulation horizon $T$ +Output: Occurrence time stamps $\mathcal{T} = \{t_k\}$ 1: Initialize $n = m = 0,t_0 = s_0 = 0,\bar{\lambda} = \sup_{0\leq t\leq T};\lambda (t);$ 2: while $s_m < T$ do 3: Generate a uniform random variable $u\sim \mathrm{uniform}(0,1)$ 4: Let $w = -\ln u / \bar{\lambda}$ . 5: Set $s_{m + 1} = s_m + w$ . 6: Generate $D\sim \mathrm{uniform}(0,1)$ . 7: if $D\leq \lambda (s_{m + 1})\bar{\lambda}$ then 8: $t_{n + 1} = s_{m + 1}$ . 9: $n = n + 1$ . +10: end if +11: $m = m + 1$ . +12: if $t_n\leq T$ then +13: return $\{t_k\}_{k = 1,2,\dots,n}$ +14: else +15: return $\{t_k\}_{k = 1,2,\dots,n - 1}$ +16: end if +17: end while + +
DatasetSyn-1
N (# events)N = 4, L = {A, B, C, D}
Ground truthφ1 = (cA - cB > 1)1 ∧ (cA - cC > 3)1
CLNN's rule(cA - cB > 1.21)1.52 ∧ (cA - cC > 3.00)1.41 ∧ (cA - cD > 0.82)0.33 ∧ (cB - cC > 4.33)0 ∧ (cB - cD > 10.69)0 ∧ (cD - cC > -6.57)0.16
TELLER's ruleA before D, B before D, C before D, A before D and C before D
OGEM-tab's ruleExcitation: [B], [C], [C, B], [B, C], [A, C, B], [A, B, C]Inhibitory: [A], [B, A], [B, A, C], [C, B, A], [A, B], [A, C], [B, C, A], [C, A, B], [C, A]
+ +Table 5: Comparison of rule discovery for CLNN and TELLER on the Syn-1 dataset. + +weight is presented. It can be clearly observed that by truncating the predicates with small weights, we could obtain the formula as + +$$ +\phi_ {1} = \left(c _ {A} - c _ {B} > 1. 2 1\right) ^ {1. 5 2} \wedge \left(c _ {A} - c _ {C} > 3. 0 0\right) ^ {1. 4 1}, \tag {45} +$$ + +which matches well with the ground-truth rule. However, TELLER cannot capture the paired order representation between $A$ and $B$ or $A$ and $C$ . OGEM-tab captures the order representation $[A, B]$ and $[A, C]$ as inhibitory causes, which contradicts the ground-truth rule. + +# C.2 SYNTHETIC DATASET-2 (SYN-2). + +Generation Process. The second synthetic dataset contains 5 event labels: $A, B, C, D$ and $E$ , where $E$ is the event for prediction, and $A, B, C, D$ are causal events. The wCL formula used to generate the occurrence of event $E$ in the second synthetic dataset is set as + +$$ +\hat {\phi} _ {2} = \left(c _ {A} - c _ {B} > 0. 5\right) ^ {1} \wedge \left(c _ {A} - c _ {C} > 1. 5\right) ^ {1} \wedge \left(c _ {C} - c _ {D} > 2\right) ^ {1}, \tag {46} +$$ + +whose unweighted version reads as "If $A$ happens before $B$ for at least 0.5 time units, $A$ happens before $C$ for at least 1.5 time units, and $C$ happens before $D$ for at least 2 time units, then $E$ will happen." + +![](images/5c9eb772a4b858637daaf47666b26dd2ef522b35246e1a44e155613c66b870c4.jpg) +Figure 6: Model structure of $\hat{\phi}_2$ for generating the second synthetic dataset. + +The occurrence of events $A, B, C$ and $D$ are generated using Algorithm 1, in which $\lambda_A = \lambda_B = \lambda_C = \lambda_D = 0.2$ . After obtaining the occurrence of $A, B, C$ and $D$ , we simulate the generation of event label $E$ using Algorithm 2, in which the intensity rate $\lambda_{E|\hat{\phi}_2}(t)$ is computed using the model shown in Figure 6. + +Results. The rules learned by CLNN, TELLER and OGEM-tab on the second synthetic dataset are presented in Table 6, where the paired order predicate with the highest weight is presented. It can be clearly observed that by truncating the predicates with small weights, CLNN learns a wCL formula as: + +$$ +\phi_ {2} = \left(c _ {A} - c _ {B} > 0. 7 7\right) ^ {1. 2 7} \wedge \left(c _ {A} - c _ {C} > 2. 0 9\right) ^ {1. 1 5} \wedge \left(c _ {C} - c _ {D} > 2. 6 0\right) ^ {1. 0 6}, \tag {47} +$$ + +whose order representation match well with the ground-truth rule. Nevertheless, TELLER's rule only captures the ordering between $A$ , $B$ and $E$ , whereas the ordering between $A$ and $B$ or $B$ and $C$ or $C$ and $D$ are not learned. OGEM-tab's rules can only capture the relation between event label $D$ and event label $E$ can excite the occurrence of event label $E$ , whereas not able to capture the dependence of event label $E$ 's occurrence on the order relation between $A$ and $B$ or $B$ and $C$ or $C$ and $D$ . + +
DatasetSyn-2
N (# events)N=5, L={A,B,C,D,E}
Ground truthφ2=(cA-cB>0.5)1∧(cB-cC>1.5)1∧(cC-cD>2)1
CLNN's rule(cA-cB>0.77)1.27∧(cA-cC>2.09)1.15∧((cA-cD)>−5.00)0.25∧((cA-cE)>−2.74)0.09∧(cB-cC>−9.31)0.02∧(cB-cD>−8.54)0.08∧(cB-cE>2.07)0∧((cC-cD)>2.60)1.06∧((cC-cE)>−4.27)0.03∧((cD-cE)>1.17)0.07
TELLER's ruleA before E, B before E, A and B before E, A and C before E
OGEM-tab's ruleExcitation: [D], [D,E], [E], [E,D]Inhibitory: [D,A], [A], [A,D], [A,D,E], [E,D,A], [D,A,E], [A,E], [E,A], [D,E,A], [A,E,D], [E,A,D]
+ +Table 6: Comparison of rule discovery for CLNN and TELLER on the Syn-2 dataset. + +# C.3 SYNTHETIC DATASET 3 (SYN-3). + +The third synthetic dataset is generated using a more interesting scheme by combining the generation schemes of the first synthetic dataset and the second synthetic dataset. The third synthetic dataset + +![](images/940514745458e4b682db12a6e7789c21a874a28f6e3a43dcfc1c2c79cb00a3da.jpg) +Figure 7: Model structure of $\hat{\phi}_{3,1}$ for generating the occurrence of $D$ in the Syn-3 dataset. + +![](images/b081e37e363fe8a10159d63a78d5d0118d701b9c610e183152ab23a0ad8a4a74.jpg) +Figure 8: Model structure of $\hat{\phi}_{3,2}$ for generating the occurrence of $E$ in the Syn-3 dataset. + +includes five event labels: $A, B, C, D$ and $E$ . Here we consider $A, B$ , and $C$ as the causal events for the occurrence of $D$ , and $A, B, C$ , and $D$ as the causal events for the occurrence of $E$ . The occurrence of events $A, B, C$ are generated using Algorithm 1, in which $\lambda_{A} = 0.2$ , $\lambda_{b} = 0.2$ , and $\lambda_{c} = 0.2$ . The wCL formula used to generate the occurrence of event $D$ is set as + +$$ +\hat {\phi} _ {3, 1} = \left(c _ {B} - c _ {A} > - 2\right) ^ {1} \wedge \left(c _ {C} - c _ {A} > - 5\right) ^ {1}, \tag {48} +$$ + +whose unweighted version reads as "If $A$ happens before $B$ for less than 2 time units, and $A$ happens before $C$ for less than 1 time unit, then $D$ will happen." The generation of $D$ 's occurrence follows Algorithm 2, where $\lambda_{D|\hat{\phi}_{3,1}}(t)$ is computed using the model shown in Figure 7. We call the third synthetic dataset at this step as Syn-3.1. + +After obtaining the occurrences of events $A, B, C$ , and $D$ , we could simulate the occurrence of $E$ using the following formula: + +$$ +\hat {\phi} _ {3, 2} = \left(c _ {B} - c _ {A} > - 5\right) ^ {1} \wedge \left(c _ {C} - c _ {B} > - 4\right) ^ {1} \wedge \left(c _ {D} - c _ {C} > - 3\right) ^ {1}. \tag {49} +$$ + +Similarly, the generation of $E$ 's occurrence follows Algorithm 2, where the intensity rate $\lambda_{E|\hat{\phi}_{3,2}}(t)$ is computed using the model shown in Figure 8. We call the third synthetic dataset at this step as Syn-3.2. + +# Results. + +The rules learned by CLNN, TELLER, and OGEM-tab on the cause of event $D$ in the third synthetic dataset are presented in Table 7, where the paired order predicate with the highest weight among the two candidates is reported. It can be clearly observed that by truncating the predicates with small + +weights, CLNN learns a wCL formula as + +$$ +\phi_ {3, 1} = \left(c _ {B} - c _ {A} > - 1. 8 5\right) ^ {1. 7 2} \wedge \left(c _ {C} - c _ {A} > - 3. 9 0\right) ^ {1. 5 9}, \tag {50} +$$ + +whose order representation match well with the ground-truth rule. On the other hand, TELLER's rule only reveals the temporal relation between event labels $A$ , $B$ , $C$ and $D$ , but it does not capture the temporal relation between event labels $A$ and $B$ or $A$ and $C$ . In addition, we could observe that OGEM-tab does not capture that $C$ is a parent event of $D$ . + +
DatasetSyn-3.1
N (# events)N = 5, L = {A, B, C, D, E}
Ground truth\(\hat{\phi}_{3,1} = (c_B - c_A > -2)^1 \wedge (c_C - c_A > -5)^1\)
CLNN's rule\((c_B - c_A > -1.85)^{1.72} \wedge (c_C - c_A > -3.90)^{1.59} \wedge ((c_D - c_A) > -16.25)^{0.33} \wedge ((c_C - c_B) > -3.01)^0 \wedge (c_D - c_B > -7.37)^{0.02} \wedge (c_D - c_C > -7.55)^0\)
TELLER's ruleA before D, B before D, C before D
OGEM-tab's ruleExcitation: [A], [A, B, D], [B, D, A], [D, A], [D, A, B], [B, A], [A, D], [D], [B, A, D], [D, B, A]Inhibitory: [A, B], [B, D], [B], [A, D, B], [D, B]
+ +The rules learned by CLNN, TELLER, and GEM on the cause of event $E$ in the third synthetic dataset are presented in Table 8, in which the discrete wCL formula learned by CLNN is + +$$ +\phi_ {3, 2} = \left(c _ {B} - c _ {A} > - 3. 9 4\right) ^ {1. 4 9} \wedge \left(c _ {C} - c _ {B} > - 3. 0 2\right) ^ {2. 0 3} \wedge \left(\left(c _ {D} - c _ {C}\right) > - 2. 0 0\right) ^ {1. 9 2}. \tag {51} +$$ + +It is obvious that $\phi_{3,2}$ is able to learn the temporal relation between $A$ and $B$ , $B$ and $C$ , and $C$ and $D$ . However, TELLER's rules only reflect the temporal relation between $A$ , $B$ , $C$ and $E$ , which cannot give the information about the temporal relation between $A$ and $B$ , or $B$ and $C$ , or $C$ and $D$ . OGEM-tab's rule indicates that it considers event labels $A$ , $D$ , $E$ as the parent events of $D$ , which does not match with the ground-truth parent set. + +Table 7: Comparison of rule discovery of ${\phi }_{3,1}$ for CLNN and TELLER on the Syn-3.1 dataset. + +
DatasetSyn-3.2
N (# events)N=5, L={A,B,C,D,E}
Ground truth\(\hat{\phi}_{3,2}=(c_{B}-c_{A}> -5)^{1}\wedge(c_{C}-c_{B}> -4)^{1}\wedge(c_{D}-c_{C}> -3)^{1}\)
CLNN's rule\((c_{B}-c_{A}> -3.94)^{1.49}\wedge(c_{C}-c_{A}> -9.12)^{0.25}\wedge((c_{D}-c_{A})> -1.42)^{0.13}\wedge((c_{E}-c_{A})> -3.88)^{0.15}\wedge(c_{C}-c_{B}> -3.02)^{2.03}\wedge(c_{D}-c_{B}> -6.27)^{0.02}\wedge(c_{E}-c_{B}> -7.30)^{0.04}\wedge((c_{D}- c_{C})> -2.00)^{1.92}\wedge((c_{E}-c_{C})> -5.30)^{0.09}\wedge((c_{E}-c_{D})> -1.57)^{0.01}\)
TELLER's ruleA before E, B before E, C before E
OGEM-tab's ruleExcitation: [A,D], [D,A], [D,E], [E], [A,D, E], [D,E,A], [E,A], [A,E], [E,A,D], [A,E, D], [D,A,E], [E,D,A]Inhibitory: [A], [D], [E,D]
+ +Table 8: Comparison of rule discovery of ${\phi }_{3,2}$ for CLNN and TELLER on the Syn-3.2 dataset. + +# C.4 QUANTITATIVE COMPARISON OF CLNN'S RULES WITH GROUND TRUTH + +To quantitatively evaluate the difference between the ground-truth rules and the rules learned by CLNN, we adopt the Jaccard similarity score to assess the learned formulas against the ground truth. Let $\mathcal{G}$ denote the set of paired ordering representations from the ground-truth rule, and $\mathcal{C}$ denote the set of paired ordering representations from the learned rules, the Jaccard similarity score is calculated as $J = \frac{|\mathcal{C} \cap \mathcal{G}|}{|\mathcal{C} \cup \mathcal{G}|}$ . For TELLER and OGEM-tab, the ordering representations are extracted + +![](images/e778c73031d538bd262a2304c138c0361986948ec160d092a3b6c088d78183f6.jpg) +(a) + +![](images/96858fce8cd5a11bbd1adfbb7d71a973c286ee8decdbab1100967a9142becc93.jpg) +(b) + +![](images/a6c6bd22c64fc1f1fc65224884fde0d5cd2e30493bdc294bb501e98d1361ff80.jpg) +(c) + +![](images/1511e755f98306b72c0769194b5a539ac0f64fffcc66182d9520ef4267f0858f.jpg) +(d) +Figure 9: Comparison of ground-truth rules with CLNN's rules in terms of Jaccard similarity score for a) Syn-1, b) Syn-2, c) Syn-3.1, d) Syn-3.2. + +from the excitation rules. The comparison of Jaccard similarity score for the synthetic datasets is shown in Figure 9, where the Jaccard similarity score of 0 is manually set to the minimum threshold 0.05 for clarity purposes. It is clearly observed that the Jaccard similarity scores for CLNN is higher than the ones for TELLER or OGEM, implying the rules discovered by CLNN are more consistent with the ground truth. + +# C.5 STABILITY ANALYSIS OF CLNN'S RULES WITH RESPECT TO INITIALIZATION + +To further validate the model's stability in learning wCL rules, different parameter initialization methods are carried out, including: + +1. rand - parameter initialization as random numbers from a uniform distribution on the interval [0, 1); +2. randn - random numbers from a normal distribution with mean 0 and variance 1; +3. ones - constant values of 1; +4. xavier - random numbers from a uniform distribution on the interval $[-1/\sqrt{n}, 1/\sqrt{n}]$ , where $n$ is the dimension of the parameter. + +The rules learned by CLNN for the above parameter initializations are summarized in Table 9. By inspecting the rules for different initialization methods, it is clear that CLNN can still recover the correct paired order representations even if initializing the learning process from a different position. In the meantime, the logic formulas learned by CLNN are stable as the variance of learned parameters is relatively small. + +
DatasetInitializationRules
Syn - 1Ground truth\(\hat{\phi}=(c_{A}-c_{B}>1)^{1}\wedge(c_{A}-c_{C}>3)^{1}\)
rand\(\phi=(c_{A}-c_{B}>1.21)^{1.52}\wedge(c_{A}-c_{C}>3.00)^{1.41}\)
randn\(\phi=(c_{A}-c_{B}>1.21)^{1.58}\wedge(c_{A}-c_{C}>3.32)^{1.56}\)
ones\(\phi=(c_{A}-c_{B}>1.17)^{1.59}\wedge(c_{A}-c_{C}>3.14)^{1.32}\)
xavier\(\phi=(c_{A}-c_{B}>1.12)^{1.45}\wedge(c_{A}-c_{C}>3.20)^{1.33}\)
Syn - 2Ground truth\(\hat{\phi}=(c_{A}-c_{B}>0.5)^{1}\wedge(c_{A}-c_{C}>1.5)^{1}\wedge(c_{C}-c_{D}>2)^{1}\)
rand\(\phi=(c_{A}-c_{B}>0.77)^{1.27}\wedge(c_{A}-c_{C}>2.09)^{1.15}\wedge((c_{C}-c_{D})>2.60)^{1.06}\)
randn\(\phi=(c_{A}-c_{B}>0.80)^{1.97}\wedge(c_{A}-c_{C}>1.92)^{1.62}\wedge((c_{C}-c_{D})>1.74)^{1.45}\)
ones\(\phi=(c_{A}-c_{B}>1.03)^{1.63}\wedge(c_{A}-c_{C}>1.92)^{1.50}\wedge((c_{C}-c_{D})>2.03)^{1.44}\)
xavier\(\phi=(c_{A}-c_{B}>0.97)^{1.92}\wedge(c_{A}-c_{C}>2.07)^{1.63}\wedge((c_{C}-c_{D})>1.97)^{1.62}\)
Syn - 3.1Ground truth\(\hat{\phi}=(c_{B}-c_{A}>-2)^{1}\wedge(c_{C}-c_{A}>-5)^{1}\)
rand\(\phi=(c_{B}-c_{A}>-1.85)^{1.72}\wedge(c_{C}-c_{A}>-3.90)^{1.59}\)
randn\(\phi=(c_{B}-c_{A}>-1.98)^{1.51}\wedge(c_{C}-c_{A}>-3.89)^{1.68}\)
ones\(\phi_{3,1}=(c_{B}-c_{A}>-1.94)^{1.84}\wedge(c_{C}-c_{A}>-3.68)^{2.33}\)
xavier\(\phi_{3,1}=(c_{B}-c_{A}>-1.89)^{1.54}\wedge(c_{C}-c_{A}>-3.92)^{1.62}\)
Syn - 3.2Ground truth\(\hat{\phi}=(c_{B}-c_{A}>-5)^{1}\wedge(c_{C}-c_{B}>-4)^{1}\wedge(c_{D}-c_{C}>-3)^{1}\)
rand\(\phi=(c_{B}-c_{A}>-3.94)^{1.49}\wedge(c_{C}-c_{B}>-3.02)^{2.03}\wedge((c_{D}-c_{C})> -2.00)^{1.92}\)
randn\(\phi=(c_{B}-c_{A}>-3.79)^{1.71}\wedge(c_{C}-c_{B}>-3.04)^{1.89}\wedge((c_{D}-c_{C})> -1.68)^{1.65}\)
ones\(\phi=(c_{B}-c_{A}>-3.53)^{1.66}\wedge(c_{C}-c_{B}>-3.09)^{1.88}\wedge((c_{D}-c_{C})> -1.25)^{1.81}\)
xavier\(\phi=(c_{B}-c_{A}>-3.71)^{1.53}\wedge(c_{C}-c_{B}>-3.09)^{2.04}\wedge((c_{D}-c_{C})> -1.86)^{1.73}\)
+ +# C.6 ANALYSIS OF LOGICAL CONSTRAINTS ON THE LL + +In this part, we investigate the effect of the interpretability using an experiment of the impact of logical constraints on the model's performance. The log-likelihood on the synthetic datasets for CLNN with and without logical constraints is summarized in Table 10. Table 10 demonstrates that the log-likelihood for CLNN with logical constraints is higher than the log-likelihood for CLNN without constraints, implying that interpretability (logical constraints) is helpful to improve the performance. + +Table 9: Comparison of rules learned by CLNN for different parameter initialization methods. + +
DatasetCLNN with constraintsCLNN without constraints
Syn - 1-7821-8716
Syn - 2-6075-6942
Syn - 3.1-10898-11583
Syn - 3.2-10919-11230
+ +Table 10: Comparison of LL for CLNN with and without logical constraints. + +# D EXPERIMENT RESULTS OF REAL-WORLD DATASETS + +# D.1 LINKEDIN DATASET + +The LinkedIn dataset is a collection of job hopping records between 82 IT companies of 3,000 LinkedIn users. Each event stream represents a user's check-in time stamps for different companies or role changes within the same company. Here we select 1000 users' event streams to compose the dataset by filtering out the event streams with uncommon companies, resulting in 10 event labels: $\mathcal{L} = \{A,B,C,D,E,F,G,H,I,J\}$ . Here we set the number of formulas as 5, i.e., $\Phi = \{\phi_1,\phi_2,\phi_3,\phi_4,\phi_5\}$ , each of which embodies a model structure shown in Figure 2(a) and CLNN aims to learn the parameters for each formula. The weight parameters in the paired order cell or the singleton order cell are initialized as random variables following a Gaussian distribution, and the bias terms of conjunction or disjunction operators are initialized as 1. The architecture weights are initialized as random variables following a Gaussian distribution, and the formula impact weights and bias are initialized as Gaussian random variables. The detailed log-likelihood for each event label is summarized in Table 11. + +
Event LabelLog-likelihood
A-180.59
B-177.80
C-89.49
D-140.31
E-132.83
F-76.63
G-106.23
H-103.33
I-95.51
J-125.45
+ +# D.2 MIMIC II DATASET + +MIMIC II dataset is obtained from the intensive care unit research database that consists of 25,328 intensity care unit stays. The records include laboratory data, therapeutic intervention profiles such as nursing progress notes, discharge summaries and others. Here we restrict the event types to the diagnosis of patients and filter out the shorter event sequences with few visits, ending up with 650 patients and 15 event labels: $\mathcal{L} = \{1,2,8,9,11,12,14,20,21,22,23,26,27,42,47\}$ . Similar to the setting for the LinkedIn dataset, where the initialization of parameters follow the same setting as the LinkedIn dataset. The detailed log-likelihood for each event label is presented in Table 12. + +Table 11: Log likelihood for each event label in the LinkedIn dataset. + +
Event LabelLog-likelihood
1-72.14
2-62.33
8-5.98
9-51.34
11-43.64
12-25.81
14-69.73
20-5.96
21-6.08
22-10.47
23-10.64
26-27.08
27-27.42
42-5.95
47-10.54
+ +Table 12: Log likelihood for each event label in the MIMIC II dataset. + +# D.3 STACK OVERFLOW DATASET + +Stack Overflow is a question-and-answer website spanning a wide range of domains. A badge rewarding scheme is exploited to encourage users to participate in the questioning and answering activities. The badge system of Stack Overflow comprises 81 types of non-topical badges, including the badges that can be awarded only once and the badges that can be awarded several times. The dataset in (Du et al., 2016) was obtained by first filtering out the badges that can be awarded only once, then restricting to the users who have acquired at least 40 badges from 2012-01-01 to 2014-01-01, from which the badges have been awarded more than 100 times are selected as the determinate dataset. Our dataset was acquired by retaining the event streams with one or more of the 20 types of specified badges and then randomly sampling 1000 users to obtain 1000 event streams. The detailed log-likelihood for each event label in the Stack Overflow dataset is summarized in Table 13. + +
Event LabelLog-likelihood
1-3791
2-1451
3-538
4-17656
5-3574
6-3559
7-1381
8-1330
9-10961
10-1105
11-189
12-2012
13-673
14-1340
15-406
16-117
17-186
18-330
19-282
20-100
+ +Table 13: Log likelihood for each event label in the Stack Overflow dataset. + +
DatasetCLNN with SOPCLNN without SOP
LinkedIn-1228-1344
MIMIC II-436-480
Stack Overflow-50981-51195
+ +Table 14: Comparison of log-likelihood for CLNN with and without SOP on the real-world datasets. + +# D.4 ANALYSIS OF EXPRESSIVENESS ON MODEL'S PERFORMANCE + +In this part, we conduct an experiment by training the CLNN without the singleton order cell (SOC) on real-world datasets to show the effectiveness of the singleton order predicates. The comparison of log-likelihood for CLNN with SOC and CLNN without SOC is summarized in Table 14. As evidenced by Table 14, the log-likelihood of CLNN with SOP is higher than the log-likelihood of CLNN without SOP, meaning enriching the expressiveness of wCL formulas can better explain the generative mechanism of events. \ No newline at end of file diff --git a/2023/Weighted Clock Logic Point Process/images.zip b/2023/Weighted Clock Logic Point Process/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ac2453df1ac27e6bf163ed73caa1e0b6cdc705fb --- /dev/null +++ b/2023/Weighted Clock Logic Point Process/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:128a625c4cda5aebcae955ed4f8baad439f863448f8b3688974e7c6247994986 +size 1279444 diff --git a/2023/Weighted Clock Logic Point Process/layout.json b/2023/Weighted Clock Logic Point Process/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e7d23fcc91026a44fb715cc2b68c9478043f2c26 --- /dev/null +++ b/2023/Weighted Clock Logic Point Process/layout.json @@ -0,0 +1,25561 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 424, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 424, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 424, + 97 + ], + "type": "text", + "content": "WEIGHTED CLOCK LOGIC POINT PROCESS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "spans": [ + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "text", + "content": "Ruixuan Yan" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "text", + "content": ", Yunshi Wen" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "text", + "content": ", Debarun Bhattacharjya" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "text", + "content": ", Ronny Luss" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "text", + "content": ", Tengfei Ma" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "text", + "content": ", Achille Fokoue" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "text", + "content": ", and Agung Julius" + }, + { + "bbox": [ + 139, + 118, + 472, + 144 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 233, + 152, + 376, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 152, + 376, + 175 + ], + "spans": [ + { + "bbox": [ + 233, + 152, + 376, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 233, + 152, + 376, + 175 + ], + "type": "text", + "content": "Rensselaer Polytechnic Institute \n" + }, + { + "bbox": [ + 233, + 152, + 376, + 175 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 233, + 152, + 376, + 175 + ], + "type": "text", + "content": "IBM T.J. Watson Research Center" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 195, + 335, + 207 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 195, + 335, + 207 + ], + "spans": [ + { + "bbox": [ + 276, + 195, + 335, + 207 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 220, + 471, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 220, + 471, + 376 + ], + "spans": [ + { + "bbox": [ + 140, + 220, + 471, + 376 + ], + "type": "text", + "content": "Datasets involving multivariate event streams are prevalent in numerous applications. We present a novel framework for modeling temporal point processes called clock logic neural networks (CLNN) which learn weighted clock logic (wCL) formulas as interpretable temporal rules by which some events promote or inhibit other events. Specifically, CLNN models temporal relations between events using conditional intensity rates informed by a set of wCL formulas, which are more expressive than related prior work. Unlike conventional approaches of searching for generative rules through expensive combinatorial optimization, we design smooth activation functions for components of wCL formulas that enable a continuous relaxation of the discrete search space and efficient learning of wCL formulas using gradient-based methods. Experiments on synthetic datasets manifest our model's ability to recover the ground-truth rules and improve computational efficiency. In addition, experiments on real-world datasets show that our models perform competitively when compared with state-of-the-art models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 387, + 320, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 387, + 320, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 320, + 399 + ], + "type": "text", + "content": "1 INTRODUCTION AND RELATED WORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 412, + 506, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 412, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 412, + 506, + 545 + ], + "type": "text", + "content": "Multivariate event streams are emerging types of data that involve occurrences of different types of events in continuous time. Event streams are observed in a wide range of applications, including but not limited to finance (Bacry et al., 2015), politics (O'Brien, 2010), system maintenance (Gunawardana et al., 2011), healthcare (Weiss & Page, 2013), and social networks (Farajtabar et al., 2015). As opposed to time series data that typically comprises continuous-valued variables evolving in regular discrete time stamps, event streams involve events occurring irregularly and asynchronously in continuous time. Modeling the dynamics in event streams is important for a wide range of scientific and industrial processes, such as predicting the occurrence of events of interest or understanding why some deleterious events occur so as to possibly prevent their occurrence. A (multivariate) temporal point process (TPP) provides a formal mathematical framework for representing event streams, where a conditional intensity rate for each event measures its occurrence rate at any time given the historical events in the stream (Daley & Vere-Jones, 2003; Aalen et al., 2008)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 550, + 506, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 506, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 506, + 684 + ], + "type": "text", + "content": "There has been a proliferation of research around TPPs in recent years, particularly around the use of neural networks for modeling conditional intensity rates as a function of historical occurrences (Du et al., 2016; Mei & Eisner, 2017; Xiao et al., 2017; Xu et al., 2017; Gao et al., 2020; Zhang et al., 2020; Zuo et al., 2020). One stream of research studies graphical event models (GEMs) as a compact and interpretable graphical representation for TPPs, where the conditional intensity rate for any particular event depends only on the history of a subset of the events (Didelez, 2008; Gunawardana & Meek, 2016). While any TPP can be represented as a GEM, various models make assumptions about the parametric form of conditional intensity rates for the sake of learnability, for instance that rates are piece-wise constant with respect to occurrences within historical windows (Gunawardana et al., 2011; Bhattacharjya et al., 2018). Ordinal GEMs(OGEM) (Bhattacharjya et al., 2020; 2021) are a recent model from this family where a conditional intensity rate depends on the order in which parent events occur within the most recent historical time period." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "type": "text", + "content": "A temporal logic point process (TLPP) framework was proposed as an alternate way to lend some interpretability to TPPs by modeling intensity rates using temporal logic rules (Li et al., 2020). Although the initial work pre-specified temporal logic rules, recent work has introduced a temporal logic rule learner (TELLER) for automatically discovering rules (Li et al., 2021). There is however" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": "the issue of scalability since TELLER exploits an expensive branch-and-price algorithm to search for temporal logic rules in a discrete space. Another important limitation of this work is that TELLER's rules are not informative enough to explain how the interval length between ordered events impacts the conditional intensity rate. For instance, while predicting the occurrence of diabetes, the rule that \"insulin injection happens 20 minutes before eating meal\" is more informative and accurate in predicting \"blood glucose remains normal\" than the rule that \"insulin injection happens before eating meal\", as the latter rule cannot expose the interval between 'insulin injection' and 'eating meal'. To tackle the above limitations, we propose novel atomic predicates enriching the expressiveness of temporal logic rules as well as a differentiable framework to learn rules in an end-to-end manner." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 186, + 506, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 506, + 331 + ], + "type": "text", + "content": "This work introduces a differentiable neuro-symbolic framework, clock logic neural network (CLNN), to model TPPs by learning weighted clock logic (wCL) formulas as explanations. Firstly, event streams are converted into continuous-time clock signals representing the time interval between the last occurrence of an event and the current time. Next, we propose a novel wCL to describe the underlying temporal relations with relative interval length, enabling the design of a CLNN to learn the generative mechanisms. Instead of searching for temporal logic rules in some vast discrete space, CLNN associates every neuron with an order representation or a logical operator and assigns weights to edges to reflect the importance of various inputs, which relaxes the search space to be continuous. Moreover, architecture weights are introduced into CLNN to make the formula structure search differentiable. wCL formula-informed intensity rates are carefully designed so that the parameters appearing in the rules can be learned through maximum likelihood estimation using gradient-based approaches. CLNN is tested on synthetic datasets to show that CLNN can recover the ground-truth rules as well as on real-world datasets to demonstrate its model-fitting performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 342, + 209, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 209, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 209, + 354 + ], + "type": "text", + "content": "2 PRELIMINARIES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 361, + 252, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 252, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 252, + 372 + ], + "type": "text", + "content": "2.1 NOTATION & BACKGROUND" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " denote the set of event labels, and " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "M = |\\mathcal{L}|" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " denote the number of event labels. An event stream is a sequence of events including time stamps, denoted as " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{(l_1,t_1),(l_2,t_2),\\dots,(l_N,t_N)\\}" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "t_i\\in \\mathbb{R}^+" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " denotes a time stamp between the beginning time " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "t_0 = 0" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " and end time " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "t_{N + 1} = T" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "l_{i}\\in \\mathcal{L}" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " is the event label that happens at " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": ". We refer to 'event label' and 'label' interchangeably. Every event label " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "l\\in \\mathcal{L}" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " has an associated conditional intensity rate describing the occurrence rate of label " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " given the history up to " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": ". In multivariate temporal point processes, conditional intensity rates describe the dynamics of events. Let " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t = \\{(l_i,t_i):t_i < t\\}" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " denote the historical events up to time " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": ". The conditional intensity rate of event label " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " is denoted as " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\lambda_l(t|\\mathcal{H}_t)" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": ". Specifically, " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\lambda_l(t|\\mathcal{H}_t)" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " describes the expected number of occurrences of event label " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " in an infinitesimal interval " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "[t,t + \\Delta t]" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " given the history " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\lambda_l(t|\\mathcal{H}_t) = \\lim_{\\Delta t\\to 0}(E[N_l(t + \\Delta t) - N_l(t)|\\mathcal{H}_t] / \\Delta t)" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "N_{l}(t)" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": " denotes the number of event label " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": "'s occurrences up to " + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 380, + 506, + 503 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 507, + 504, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 507, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 507, + 504, + 520 + ], + "type": "text", + "content": "Example 1 A running example of an event stream with 11 events of 4 labels is shown in Figure 1(a)." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 523, + 511, + 565 + ], + "blocks": [ + { + "bbox": [ + 109, + 523, + 511, + 565 + ], + "lines": [ + { + "bbox": [ + 109, + 523, + 511, + 565 + ], + "spans": [ + { + "bbox": [ + 109, + 523, + 511, + 565 + ], + "type": "image", + "image_path": "557ee827b58081cb6292fa7bca2267a38e9181cbaf851f056a773b70a640ed44.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 568, + 203, + 578 + ], + "lines": [ + { + "bbox": [ + 190, + 568, + 203, + 578 + ], + "spans": [ + { + "bbox": [ + 190, + 568, + 203, + 578 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 394, + 567, + 406, + 578 + ], + "lines": [ + { + "bbox": [ + 394, + 567, + 406, + 578 + ], + "spans": [ + { + "bbox": [ + 394, + 567, + 406, + 578 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "lines": [ + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "text", + "content": "Figure 1: (a): An event stream example with " + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "inline_equation", + "content": "N = 11" + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "text", + "content": " events of " + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "inline_equation", + "content": "M = 4" + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "text", + "content": " event labels over " + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "inline_equation", + "content": "T = 30" + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "text", + "content": " days. (Integer-valued time stamps are utilized for easy interpretation, note that the proposed approach also works for " + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "inline_equation", + "content": "t_i \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 585, + 504, + 631 + ], + "type": "text", + "content": "). (b): The overall workflow of the proposed method (POC: paired order cell, SOC: singleton order cell, AC: architecture cell, details presented in Section 2.2 to 3.3)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 634, + 343, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 343, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 343, + 645 + ], + "type": "text", + "content": "2.2 ORDER REPRESENTATIONS FOR EVENT STREAMS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "text", + "content": "The overall workflow of the proposed framework is visualized as Figure 1(b). The raw event streams first go through a masking function to generate the masked event streams, which are then transformed into event clocks using a clocking function. The event clocks are given as inputs to the clock logic neural network (CLNN) to learn interpretable wCL formulas and the intensity rate of event occurrences. The following sections provide a detailed explanation for each module in Figure 1(b)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "We are interested in exploring the effect of temporal ordering between event labels and the occurrences of causal event labels in a historical window on the occurrence rate of a particular event label," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "type": "text", + "content": "where the generative mechanism is expressed as interpretable formulas. An event stream up to " + }, + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "type": "text", + "content": " may include multiple occurrences of the same event label, thus a masking function is required to mask out duplicated event labels in the history for accessing the ordering information at any " + }, + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "type": "text", + "content": ". Here we adopt a technique similar to Bhattacharjya et al. (2020) for extracting distinct event labels from " + }, + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t" + }, + { + "bbox": [ + 104, + 83, + 504, + 128 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "text", + "content": "Definition 1 (Masking Function) A masking function " + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\Gamma(\\cdot)" + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "text", + "content": " is a function that takes an event stream as input and returns a new event stream that is a subset of the input stream and contains no duplicated event labels. Mathematically, " + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\Gamma(\\cdot)" + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "text", + "content": " is applied to " + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t = \\{(l_i, t_i)\\}" + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "text", + "content": " and converts it into a new stream " + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t' = \\{(l_j, t_j) \\in \\mathcal{H}_t : l_j \\neq l_{j'} \\text{ if } j \\neq j'\\}" + }, + { + "bbox": [ + 104, + 130, + 504, + 176 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "text", + "content": "We consider the following two masking functions as per Bhattacharjya et al. (2020) due to simplicity: 'first' masking and 'last' masking. The 'first' (resp. 'last') masking function keeps the first (resp. last) occurrence of an event label in an event stream." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "type": "text", + "content": "Example 1 (cont.) Let " + }, + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{13} = \\{(A,1),(B,3),(A,6),(D,8),(C,10),(D,12)\\}" + }, + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "type": "text", + "content": ". The 'first' masking function converts it to " + }, + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{13}' = \\{(A,1),(B,3),(D,8),(C,10)\\}" + }, + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "type": "text", + "content": ", and the 'last' masking function converts it to " + }, + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{13}' = \\{(B,3),(A,6),(C,10),(D,12)\\}" + }, + { + "bbox": [ + 104, + 215, + 504, + 251 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 253, + 504, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 253, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 504, + 277 + ], + "type": "text", + "content": "With the masked event history " + }, + { + "bbox": [ + 104, + 253, + 504, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 253, + 504, + 277 + ], + "type": "text", + "content": ", we define two order representations for the order relationship between any two event labels and the occurrence of an event within a historical window of " + }, + { + "bbox": [ + 104, + 253, + 504, + 277 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 253, + 504, + 277 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "spans": [ + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "content": "Definition 2 (Paired Order Representation (POR)) A paired order representation is defined as " + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "inline_equation", + "content": "[l_i, l_j] \\in [\\mathcal{L}]^2" + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "inline_equation", + "content": "[\\mathcal{L}]^2" + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "content": " denotes two-element permutation of a subset of " + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "content": ". A paired order representation for " + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "content": " can be obtained by arranging any two distinct labels in " + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 279, + 504, + 314 + ], + "type": "text", + "content": " in a sequential order." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "text", + "content": "Definition 3 (Singleton Order Representation (SOR)) A singleton order representation is denoted as " + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "inline_equation", + "content": "[l_j, \\underline{u}_{l_j}] \\in \\mathcal{L} \\times \\mathbb{R}_+" + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "text", + "content": ", representing event label " + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "inline_equation", + "content": "l_j \\in \\mathcal{L}" + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "text", + "content": " occurred within the past " + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_j}" + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "text", + "content": " time units, where " + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_j}" + }, + { + "bbox": [ + 104, + 319, + 504, + 357 + ], + "type": "text", + "content": " is a variable to learn through a process that will be explained in Section 3.3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": "Example 1 (cont.) With first masking, an example of paired order representation for " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{13}^{\\prime}" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": " can be " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "[A,B]" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": " representing \"A happens before " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": " \" or " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "[B,C]" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": " representing \"B happens before " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": "\". The overall order representation for " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{13}^{\\prime}" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": " is expressed as " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "[A,B,D,C]" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": ", which can be derived from the paired order representations: " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "[A,B],[B,D],[D,C]" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": ". A singleton order representation example of " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{13}^{\\prime}" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": " can be expressed as " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "[B,10.5]" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": ", meaning " + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 359, + 505, + 415 + ], + "type": "text", + "content": " happened in the past 10.5 days." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 422, + 289, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 289, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 289, + 432 + ], + "type": "text", + "content": "2.3 WEIGHTED CLOCK LOGIC FORMULA" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "content": "To adapt " + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "content": " to continuous-time signals that can be described by logical statements, we extract clock signals from " + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "content": " to describe the time passed since the last occurrence of a label. A clocking function is introduced to convert " + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "inline_equation", + "content": "t_j" + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "content": " into a clock signal " + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "inline_equation", + "content": "c_{j}" + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "content": " denoting the time interval length between " + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "inline_equation", + "content": "t_j" + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": "Definition 4 (Clocking Function) A clocking function " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "\\Xi(\\cdot)" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " converts " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " into a vector of clock signals as " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "\\mathcal{C}'(t) = [c_1(t), c_2(t), \\dots, c_M(t)]^T \\in \\mathbb{R}_+^M" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "c_i(t)" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " denoting the clock signal for event label " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "i \\in \\mathcal{L}" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "c_i(t)" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " is computed as " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "c_i(t) = t - t_j" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "(l_j, t_j) \\in \\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "l_j = i" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "c_i(t) = \\bar{Z}" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " otherwise. Note that " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "\\bar{Z}" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " is a user-defined, large positive number to indicate event label " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": " not happening in " + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 473, + 504, + 521 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 526, + 504, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 526, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 104, + 526, + 504, + 550 + ], + "type": "text", + "content": "Example 1 (cont.) Taking the 'first' masked event stream " + }, + { + "bbox": [ + 104, + 526, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{13}^{\\prime} = \\{(A,1),(B,3),(D,8),(C,12)\\}" + }, + { + "bbox": [ + 104, + 526, + 504, + 550 + ], + "type": "text", + "content": " as an example, the event clocks are extracted as " + }, + { + "bbox": [ + 104, + 526, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\mathcal{C}'(13) = [12,10,1,5]^T" + }, + { + "bbox": [ + 104, + 526, + 504, + 550 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "text", + "content": "The event clocks can essentially provide the ordering between any two event labels in that the difference between any two event labels' clock signals reflects which event label happens first. As shown in the diabetes prediction example in the Introduction section, the time interval between ordering events is notably important in explaining and predicting an event label's occurrence. In contrast to (Li et al., 2020; 2021) which only learns the temporal ordering relation between event labels, we define a paired order predicate (POP) with a learnable parameter " + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "text", + "content": " to describe the time interval between two ordered event labels " + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "inline_equation", + "content": "l_i" + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "inline_equation", + "content": "l_j" + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "text", + "content": " and a singleton order predicate (SOP) with a learnable parameter " + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_j}" + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "text", + "content": " to describe the occurrence of label " + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "inline_equation", + "content": "l_j" + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "text", + "content": " within a historical window " + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_j}" + }, + { + "bbox": [ + 104, + 552, + 504, + 644 + ], + "type": "text", + "content": " as follows." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": "Definition 5 (Paired Order Predicate) A POP describes the order between two labels " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "l_i, l_j \\in \\mathcal{L}, l_i \\neq l_j" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{l_i l_j} := g(c_{l_i}, c_{l_j}) = c_{l_i} - c_{l_j} > \\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i l_j} \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " is a parameter to learn. A positive " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " means " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "l_i" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " happened before " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "l_j" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " for at least " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " time units, and a negative " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " means " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "l_j" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " happened before " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "l_i" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " for at most " + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "inline_equation", + "content": "-\\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 647, + 504, + 700 + ], + "type": "text", + "content": " time units. A POP is used in the POC of Figure 1(b)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "text", + "content": "Definition 6 (Singleton Order Predicate) An SOP describes a causal label " + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "inline_equation", + "content": "l_j \\in \\mathcal{L}" + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "text", + "content": " occurring within the past " + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_j}" + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "text", + "content": " time units, defined as " + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "inline_equation", + "content": "\\pi_{sop}^{l_j} := c_{l_j} - \\underline{u}_{l_j} < 0" + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_j} \\in \\mathbb{R}_+" + }, + { + "bbox": [ + 104, + 705, + 504, + 735 + ], + "type": "text", + "content": " is a learnable parameter." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "Instead of taking a heuristic approach for some underlying combinatorial search problem for a given set of temporal predicates (Bhattacharjya et al., 2020; 2021; Li et al., 2021) to uncover the effective order relations, this work proposes a differentiable learning model to learn suitable singleton and paired order predicates among all the possible choices of order predicates through a gradient-based approach. The scheme of weighted signal temporal logic (wSTL) in Yan et al. (2021; 2022) is exploited to build weighted clock logic (wCL) formulas that are logical compositions of singleton and paired order predicates. The syntax of wCL is recursively defined as (Mehdipour et al., 2021):" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 169, + 164, + 504, + 180 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 164, + 504, + 180 + ], + "spans": [ + { + "bbox": [ + 169, + 164, + 504, + 180 + ], + "type": "interline_equation", + "content": "\\phi := \\pi_ {p o p} ^ {l _ {i} l _ {j}} \\left| \\pi_ {s o p} ^ {l _ {j}} \\right| \\neg \\phi \\left| \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}} \\right| \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, \\tag {1}", + "image_path": "9ea7b07b59d5b8ab70449d8c71489d29639caf822cfac5db875da2a9d4455a6c.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\phi_1, \\dots, \\phi_k" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": " are wCL formulas, " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\neg" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": " denotes negation, " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\land" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": " denotes logical conjunction, " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\lor" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": " denotes logical disjunction, " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "w_j \\geq 0, j = 1, \\dots, k" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": " denotes non-negative weights assigned to " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\phi_1, \\dots, \\phi_k" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": " in the conjunction and disjunction operations. A wCL formula can describe the characteristics of " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": ", thus the conditional intensity rate of event " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": " can be equivalently denoted as " + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\lambda_{l|\\phi}(t)" + }, + { + "bbox": [ + 104, + 183, + 504, + 229 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "text", + "content": "Remark 7 The syntax above means each wCL formula can be built by using predicates in " + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{l_i l_j}" + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "inline_equation", + "content": "\\pi_{sop}^{l_j}" + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "text", + "content": " and then by recursively applying the " + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "inline_equation", + "content": "\\neg" + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "text", + "content": " or the " + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "inline_equation", + "content": "\\land" + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "text", + "content": " or the " + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "inline_equation", + "content": "\\lor" + }, + { + "bbox": [ + 104, + 233, + 505, + 262 + ], + "type": "text", + "content": " operations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "text", + "content": "Example 1 (cont.) A " + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "inline_equation", + "content": "wCL" + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "text", + "content": " formula example is " + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "inline_equation", + "content": "\\phi = (c_A - c_B > 1)^1 \\wedge (c_C < 3)^{0.05}" + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "text", + "content": ". The first and second clauses read \"A happened before " + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "text", + "content": " for at least one day\" and \"C happened less than 3 days ago\", respectively. Note that " + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "text", + "content": " is satisfied by the event stream up to " + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "inline_equation", + "content": "t = 13" + }, + { + "bbox": [ + 104, + 269, + 505, + 315 + ], + "type": "text", + "content": " in Figure 1(a). The two clauses have weights of 1 and 0.05, reflecting the first clause is more important than the second one." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 324, + 358, + 336 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 324, + 358, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 358, + 336 + ], + "type": "text", + "content": "3 WEIGHTED CLOCK LOGIC POINT PROCESSES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 344, + 327, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 344, + 327, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 327, + 355 + ], + "type": "text", + "content": "3.1 TRUTH DEGREE OF WEIGHTED CLOCK LOGIC" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "text", + "content": "To quantitatively measure the satisfaction degree of a wCL formula " + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "text", + "content": " over the event clocks " + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "inline_equation", + "content": "\\mathcal{C}'(t)" + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "text", + "content": ", i.e., how well does " + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "text", + "content": " describe the underlying patterns of " + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "inline_equation", + "content": "\\mathcal{C}'(t)" + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "text", + "content": ", we propose smooth activation functions (AFs) to compute the truth degree, denoted " + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}',\\phi,t)\\in [0,1]" + }, + { + "bbox": [ + 104, + 361, + 504, + 396 + ], + "type": "text", + "content": ", defined as (Riegel et al., 2020):" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 202, + 396, + 504, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 396, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 202, + 396, + 504, + 412 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\pi_ {p o p} ^ {l _ {i} l _ {j}}, t\\right) = \\operatorname {s i g m o i d} \\left(c _ {l _ {i}} (t) - c _ {l _ {j}} (t) - \\underline {{u}} _ {l _ {i} l _ {j}}\\right), \\tag {2}", + "image_path": "0f54c9dab632661a6cd6aac46ffefa37d76ac8a519beabf5360d9dc30ced3062.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 203, + 413, + 504, + 429 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 413, + 504, + 429 + ], + "spans": [ + { + "bbox": [ + 203, + 413, + 504, + 429 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\pi_ {s o p} ^ {l _ {j}}, t\\right) = \\operatorname {s i g m o i d} \\left(\\underline {{u}} _ {l _ {j}} - c _ {l _ {j}} (t)\\right), \\tag {3}", + "image_path": "bc5283eab2f2576c9d29128999c740c079cc22a7d33ed6e3a628f2432ef255a8.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 209, + 430, + 504, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 430, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 209, + 430, + 504, + 444 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\neg \\phi , t\\right) = 1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi , t\\right). \\tag {4}", + "image_path": "aa902b50a2f12f0686b161aa70b494ca4b3b4bf8c40afcdf6d1b22c8ecc0014e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": "In contrast to the combinatorial search of the temporal logic predicates in Li et al. (2021), the smooth design of AFs in (2) - (4) benefits the maximum likelihood estimation problem shown later in Section 3.6 by allowing it to learn the parameters in the POP and SOP through gradient-based methods. Next, we present the design of activation functions (AF) for the " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": " operator. Here we use a 2-ary conjunction operator to motivate the design. Let " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "p^{\\wedge} = p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t)\\in [0,1]" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": ". Intuitively, " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "p^{\\wedge}" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": " is low when either input is low, and " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "p^{\\wedge}" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": " is high when both inputs are high. Here we adopt a similar idea to Sen et al. (2022) for capturing the low and high. A user-defined hyperparameter " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "\\alpha \\in [\\frac{1}{2},1]" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": " is introduced to aid the interpretability of low and high such that " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "p^{\\wedge}" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": " represents high if " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "p^{\\wedge}\\in [\\alpha ,1]" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": " and low if " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "p^{\\wedge}\\in [0,1 - \\alpha ]" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": ". Considering the importance weights, a low input with a zero weight should not impact the output, which implies " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "p^{\\wedge}" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": " should be low when both inputs are low. With these considerations, the AF for the " + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 448, + 505, + 570 + ], + "type": "text", + "content": " operator is defined as follows: (See Appendix A for more details.)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 568, + 505, + 602 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 568, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 141, + 568, + 505, + 602 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {5}", + "image_path": "4c81a34794e0943f520fcffeed5110404a8b82e4acf8474669ff65c7a01ce240.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 222, + 604, + 468, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 604, + 468, + 639 + ], + "spans": [ + { + "bbox": [ + 222, + 604, + 468, + 639 + ], + "type": "interline_equation", + "content": "\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\leq 1 - \\alpha ,", + "image_path": "e3e787c8eabe635084f591ee86a976f9978fa2fba5a6dd055bb7a7041502a69a.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "inline_equation", + "content": "f(z) = \\max \\{0, \\min \\{z, 1\\}\\}" + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "text", + "content": " clamps the truth degree into [0,1], " + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "inline_equation", + "content": "w_{j} \\geq 0" + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "inline_equation", + "content": "\\beta \\geq 0" + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "text", + "content": " are parameters to learn. By De Morgan's law (Hurley, 2014), the AF for the " + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 643, + 504, + 666 + ], + "type": "text", + "content": " operator is defined as" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 124, + 666, + 505, + 699 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 666, + 505, + 699 + ], + "spans": [ + { + "bbox": [ + 124, + 666, + 505, + 699 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, t\\right) = f (1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)), \\tag {6}", + "image_path": "e994dfb46d1c306b548d6aecc7192afa272ddc05909f2796cf8ddc033a3bfa0e.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 205, + 700, + 484, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 700, + 484, + 735 + ], + "spans": [ + { + "bbox": [ + 205, + 700, + 484, + 735 + ], + "type": "interline_equation", + "content": "\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha .", + "image_path": "c8e7c1d751fbd5ea84d2310d9e2f43f56b7c8c2626ee6840ccccc2e00ac8f25d.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "text", + "content": "An event stream with " + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "text", + "content": " event labels would generate " + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "inline_equation", + "content": "\\mathrm{P}_M^2 = \\frac{M!}{(M - 2)!}" + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "text", + "content": " paired order predicates and " + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "text", + "content": " singleton order predicates. If a conjunction or disjunction operator takes these predicates as inputs, how it recognizes the effective order predicates in describing the event dynamics becomes a critical issue. By carefully designing the AFs in (5) - (6), the logical operators exhibit the following properties so as to recognize effective inputs. This is a critical advantage over Bhattacharjya et al. (2020; 2021); Li et al. (2021) in that it allows a differentiable search of the suitable predicates among all the possible choices of order predicates in an end-to-end manner. Here we illustrate the properties for " + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "text", + "content": " with two inputs, which can be generalized to " + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 81, + 506, + 174 + ], + "type": "text", + "content": "-ary inputs. (See Appendix B for more details.)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 178, + 462, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 462, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 462, + 190 + ], + "type": "text", + "content": "Theorem 8 The " + }, + { + "bbox": [ + 105, + 178, + 462, + 190 + ], + "type": "inline_equation", + "content": "AF" + }, + { + "bbox": [ + 105, + 178, + 462, + 190 + ], + "type": "text", + "content": " for the " + }, + { + "bbox": [ + 105, + 178, + 462, + 190 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 105, + 178, + 462, + 190 + ], + "type": "text", + "content": " operator with two inputs exhibits the following properties." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 194, + 504, + 251 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "type": "text", + "content": "1) Nonimpact for zero weights: If " + }, + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "type": "inline_equation", + "content": "w_{j} = 0, j = 1,2" + }, + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}',\\phi_j,t)" + }, + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "type": "text", + "content": " has no impact on " + }, + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}',\\phi_1\\wedge \\phi_2,t)" + }, + { + "bbox": [ + 105, + 194, + 501, + 208 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "type": "text", + "content": "2) Impact ordering: If " + }, + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}', \\phi_1, t) = p(\\mathcal{C}', \\phi_2, t)" + }, + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "type": "inline_equation", + "content": "w_1 \\geq w_2" + }, + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "type": "inline_equation", + "content": "\\frac{\\partial p(\\mathcal{C}', \\phi_1 \\wedge \\phi_2, t)}{\\partial p(\\mathcal{C}', \\phi_1, t)} \\geq \\frac{\\partial p(\\mathcal{C}', \\phi_1 \\wedge \\phi_2, t)}{\\partial p(\\mathcal{C}', \\phi_2, t)}" + }, + { + "bbox": [ + 105, + 212, + 504, + 230 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 234, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 234, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 504, + 251 + ], + "type": "text", + "content": "3) Monotonicity: " + }, + { + "bbox": [ + 105, + 234, + 504, + 251 + ], + "type": "inline_equation", + "content": "f(\\beta - \\sum_{j=1}^{2} w_j (1 - p(\\mathcal{C}', \\phi_j, t))) \\leq f(\\beta - \\sum_{j=1}^{2} w_j (1 - (p(\\mathcal{C}', \\phi_j, t) + d))), d \\geq 0." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 127, + 266, + 344, + 435 + ], + "blocks": [ + { + "bbox": [ + 127, + 266, + 344, + 435 + ], + "lines": [ + { + "bbox": [ + 127, + 266, + 344, + 435 + ], + "spans": [ + { + "bbox": [ + 127, + 266, + 344, + 435 + ], + "type": "image", + "image_path": "9e0cac7601d681cedd8dc0d19c63518c0191e9bee81a8bb2a62181f80436ece0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 228, + 439, + 239, + 449 + ], + "lines": [ + { + "bbox": [ + 228, + 439, + 239, + 449 + ], + "spans": [ + { + "bbox": [ + 228, + 439, + 239, + 449 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 350, + 265, + 479, + 434 + ], + "blocks": [ + { + "bbox": [ + 350, + 265, + 479, + 434 + ], + "lines": [ + { + "bbox": [ + 350, + 265, + 479, + 434 + ], + "spans": [ + { + "bbox": [ + 350, + 265, + 479, + 434 + ], + "type": "image", + "image_path": "c640ccb66e96e9424797e3c3f14486a4b78a2595964840319ae75556b8aea159.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 438, + 419, + 449 + ], + "lines": [ + { + "bbox": [ + 406, + 438, + 419, + 449 + ], + "spans": [ + { + "bbox": [ + 406, + 438, + 419, + 449 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 455, + 504, + 480 + ], + "lines": [ + { + "bbox": [ + 104, + 455, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 504, + 480 + ], + "type": "text", + "content": "Figure 2: CLNN Structure. (a): Continuous relaxation of the search space using weights. (b): The learned discrete model structure for " + }, + { + "bbox": [ + 104, + 455, + 504, + 480 + ], + "type": "inline_equation", + "content": "\\phi = (\\pi_{pop}^{A,B}\\wedge \\pi_{pop}^{B,C})\\vee (\\pi_{sop}^{A})" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 491, + 338, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 338, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 338, + 502 + ], + "type": "text", + "content": "3.2 LEARNING OF PAIRED ORDER REPRESENTATION" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": "With the smooth AFs designed in (2) - (6), a neuro-symbolic model called clock logic neural network (CLNN) can be designed for any given wCL formula " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": ", in which every neuron has a corresponding symbolic representation. A typical CLNN for " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\phi = (\\pi_{pop}^{A,B}\\wedge \\pi_{pop}^{B,C})\\vee (\\pi_{sop}^{A})" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " is visualized as Fig. 2(b), which can be considered as the discrete structure obtained by learning the parameters of the model in Figure 2(a) and keeping the dominant components. Here " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " can be interpreted as “(A happens before " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " for at least " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\underline{u}_{AB}" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " time units or " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " for at least " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\underline{u}_{BC}" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " time units) and " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " happens within the past " + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\underline{u}_A" + }, + { + "bbox": [ + 104, + 512, + 504, + 612 + ], + "type": "text", + "content": " time units.” This part describes the continuous relaxation of the search space by designing a paired order cell, a singleton order cell, and an architecture cell for learning the paired order representation, singleton order representation and the formula structure." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": "Paired Order Cell (POC). A POC is a directed acyclic graph (DAG) comprising two paired order predicate (POP) nodes and one logical node for the " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " operator, shown as an orange block in Figure 2(a). The two POP nodes represent " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{l_i,l_j}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{l_j,l_i}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " sharing the same parameter " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i,l_j}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{l_i,l_j}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " denotes \"" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "l_i" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " happened before " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "l_j" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " for at least " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i,l_j}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " time units\" and " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{l_j,l_i}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " denotes \"" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "l_j" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " happened before " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "l_i" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " for at least " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i,l_j}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " time units\". Each POP has an associated weight " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "w_{pop}^{l_i,l_j}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "w_{pop}^{l_j,l_i}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " to be learned, and the " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " operator forces one of the two weight parameters to dominate the other one such that the learned POR is consistent with the event stream. For example, the POC in Figure 2(a) aims to learn the POR between " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": ", whose discretized version would be either " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{A,B}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{B,A}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": ". An event stream with " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " event labels can generate " + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathrm{P}_M^2 = \\frac{M!}{(M - 2)!}" + }, + { + "bbox": [ + 104, + 616, + 506, + 734 + ], + "type": "text", + "content": " PORs between any two event" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "text", + "content": "labels, resulting in " + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "inline_equation", + "content": "(\\mathrm{P}_M^2 / 2)" + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "text", + "content": " POCs. Similar to learning the POR between any two events, the discrete order representations for the entire history " + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t" + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "text", + "content": " can be learned using a POP selection node (as shown in Figure 2(a)) that takes the outputs of all the POCs as input and identifies the important PORs. The learning of the POCs essentially becomes learning the " + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "text", + "content": " in (5) for the POCs and the POP selection node, as well as " + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "text", + "content": " in (2) for the POPs through back propagation. The discrete PORs can be acquired by keeping the top-" + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 82, + 506, + 152 + ], + "type": "text", + "content": " strongest POCs and the dominant POPs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 156, + 357, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 357, + 166 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 357, + 166 + ], + "type": "text", + "content": "3.3 LEARNING OF SINGLETON ORDER REPRESENTATION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": "Singleton Order Cell (SOC). The learning of SOR is accomplished by an SOC, which is displayed as a green block in Figure 2(a). An SOC is a DAG comprising " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " singleton order predicate (SOP) nodes and one SOP selection node for the " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " operator. An SOP node represents " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\pi_{sop}^{l_j}" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " that takes " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "c_{l_j}(t)" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " as input and returns the truth degree of " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\pi_{sop}^{l_j}" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "c_{l_j}(t)" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": ". The SOP selection node has the same functionality as the POP selection node. The " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " operator in the SOP selection node assigns a nonnegative weight to every SOP node and learns the importance weights " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " to extract the dominant SORs affecting the conditional intensity rate the most. The learning of the SOC is thus learning the " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "w, \\beta" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " in (5) for the SOP selection node and " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_j}" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " in (3) for the SOPs through back propagation. The discrete SORs can be determined by keeping the top- " + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 170, + 506, + 277 + ], + "type": "text", + "content": " strongest SOPs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 283, + 293, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 283, + 293, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 293, + 294 + ], + "type": "text", + "content": "3.4 LEARNING OF FORMULA STRUCTURE" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": "Architecture Cell (AC). For a given set of PORs or SORs, their conjunction or disjunction will behave differently and have distinct meanings. For instance, given two causal formulas " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\phi_{1} = (c_{A} - c_{B} > 1)^{1}\\wedge (c_{C} < 5)^{1}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\phi_{2} = (c_{A} - c_{B} > 1)^{1}\\vee (c_{C} < 5)^{1}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " for the occurrence of event label " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\phi_{1}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " means “(A happens before " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " for at least 1 time unit) and (C happens within the past 5 time units) simultaneously will cause " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " to happen”, whereas " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\phi_{2}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " means “(A happens before " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " for at least 1 time unit) or (C happens within the past 5 time units) alternatively will cause " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " to happen.” The afore-mentioned cells can learn the order representations. Nevertheless, whether their outputs should be connected by the " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " operator needs to be determined. Here we consider the outputs of the POCs and the SOCs having two choices of being connected by a " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " operator, each of which is associated with an architecture weight " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\alpha_{arc}^{\\wedge}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\alpha_{arc}^{\\vee}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " that enables continuous learning of the two choices; this is also called differentiable architecture search (Liu et al., 2019). An architecture cell is introduced for learning the model architecture, which comprises two logical nodes representing a " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " operator and a " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " operator as well as a logical selection node (LSN), shown as the blue block in Figure 2(a). Let " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\pmb{p} = \\{p_1,\\dots,p_k\\}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " denote the set of inputs for each logical operator. Subsequently, the conjunction operator takes " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " as input and returns " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "p^{\\wedge} = f(\\beta^{\\wedge} - \\sum_{j = 1}^{k}w_{j}^{\\wedge}(1 - p_{j}))" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": ", and the disjunction operator takes " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " as input and returns " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "p^{\\vee} = f(1 - \\beta^{\\vee} + \\sum_{j = 1}^{k}w_{j}^{\\vee}p_{j})" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": ". The LSN represented by " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "\\ominus" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " takes " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "p^{\\wedge}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "inline_equation", + "content": "p^{\\vee}" + }, + { + "bbox": [ + 104, + 297, + 506, + 503 + ], + "type": "text", + "content": " as inputs and returns their weighted sum, where the weights are computed using the softmax of the architecture weights as shown below:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 202, + 503, + 504, + 534 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 503, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 202, + 503, + 504, + 534 + ], + "type": "interline_equation", + "content": "p _ {\\ominus} = p \\left(\\mathcal {C} ^ {\\prime}, \\phi , t\\right) = \\sum_ {m \\in \\{\\wedge , \\vee \\}} \\frac {e ^ {\\alpha_ {a r c} ^ {m}}}{\\sum_ {m ^ {\\prime} \\in \\{\\wedge , \\vee \\} e ^ {\\alpha_ {a r c} ^ {m ^ {\\prime}}}}} p ^ {m}. \\tag {7}", + "image_path": "236e1327a320d1102c24cdc041a177ec5a3003e1afd4e9d23058d5eb664f1ad4.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "type": "text", + "content": "The task of architecture search then reduces to learning the architecture weights " + }, + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\alpha_{arc}^{\\wedge}" + }, + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\alpha_{arc}^{\\vee}" + }, + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "type": "text", + "content": " and the " + }, + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "type": "inline_equation", + "content": "w, \\beta" + }, + { + "bbox": [ + 104, + 534, + 506, + 582 + ], + "type": "text", + "content": " in (5) - (6) for the two logical operators, which can be executed simultaneously while learning parameters in the POCs and SOCs. The outcome of the architecture search process is a discrete architecture obtained by retaining the logical operator with the strongest architecture weight." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 585, + 302, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 302, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 302, + 596 + ], + "type": "text", + "content": "3.5 WCL-INFORMED INTENSITY FUNCTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": "The output of a CLNN is the truth degree of " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{C}'" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": ", which is incorporated into modeling the conditional intensity rates. The modeling process aims to discover the generative mechanism as wCL formulas for every " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "l \\in \\mathcal{L}" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": ". In other words, a larger value of " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}', \\phi, t)" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": " should reflect that " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": " has a greater impact on the occurrence of a particular label. For example, if the wCL formula for affecting the occurrence of event label " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": " is given as " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "\\phi = ((\\pi_{pop}^{A,B})^{w_1} \\wedge (\\pi_{sop}^{C})^{w_2})" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": ", it means if " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": " is satisfied or the truth degree of " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": " is high, then it has a strong impact on the occurrence of " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": ", where the impact can be promoting or inhibiting the occurrence of " + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 599, + 375, + 720 + ], + "type": "text", + "content": ". In terms of the relation between the truth degree and the con-" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 386, + 586, + 500, + 679 + ], + "blocks": [ + { + "bbox": [ + 386, + 586, + 500, + 679 + ], + "lines": [ + { + "bbox": [ + 386, + 586, + 500, + 679 + ], + "spans": [ + { + "bbox": [ + 386, + 586, + 500, + 679 + ], + "type": "image", + "image_path": "5d5b49c9b0aa3673d8a056f465562a03c01bddae53c13e45a86456e37664c6a3.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 685, + 504, + 704 + ], + "lines": [ + { + "bbox": [ + 380, + 685, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 380, + 685, + 504, + 704 + ], + "type": "text", + "content": "Figure 3: The overall learning framework for " + }, + { + "bbox": [ + 380, + 685, + 504, + 704 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 380, + 685, + 504, + 704 + ], + "type": "text", + "content": " wCL formulas." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "content": "ditional intensity rate, the higher the truth degree " + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}',\\phi ,t)" + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "content": " , the greater its impact on " + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\lambda_{D|\\phi}" + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "content": " . Note" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "content": "that the occurrence of one event label may depend on multiple wCL formulas. This work follows the assumption that the impact of multiple formulas are additive in predicting the intensity rate, similar to Li et al. (2020). To incorporate a set of wCL formulas " + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "inline_equation", + "content": "\\Phi = \\{\\phi_1,\\phi_2,\\dots,\\phi_n\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "content": " into the modeling of the conditional intensity rate, we define a wCL formula-informed conditional intensity rate as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 223, + 129, + 504, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 129, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 223, + 129, + 504, + 159 + ], + "type": "interline_equation", + "content": "\\lambda_ {l \\mid \\Phi} (t) = \\exp \\left(\\sum_ {i = 1} ^ {n} w _ {\\phi_ {i}} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {i}, t\\right) + \\rho\\right), \\tag {8}", + "image_path": "514ee6088c1895a7315ab78f0eeb240e20f20b4cd590c023e3a72a1abccd98ca.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "inline_equation", + "content": "w_{\\phi_i}" + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "text", + "content": " is the weight of " + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\phi_i" + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "text", + "content": " is a bias term that allows for spontaneous occurrence without the influence from " + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 162, + 504, + 184 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 199, + 293, + 210 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 199, + 293, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 199, + 293, + 210 + ], + "type": "text", + "content": "3.6 MAXIMUM LIKELIHOOD ESTIMATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": "Suppose event stream " + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": " contains " + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "inline_equation", + "content": "n_l" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": " occurrences of event " + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": ", for which the occurrence time stamps are denoted as " + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "inline_equation", + "content": "t_{l_1}, t_{l_2}, \\ldots, t_{l_{n_l}}" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "inline_equation", + "content": "t_0 = 0" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "inline_equation", + "content": "t_{l_{n_l + 1}} = T" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": ". Based on the conditional intensity function in (8), the likelihood for label " + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": " over the event stream is calculated as (Daley & Vere-Jones, 2003):" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 262, + 505, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 262, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 141, + 262, + 505, + 297 + ], + "type": "interline_equation", + "content": "L _ {l} = \\prod_ {i = 0} ^ {n _ {l} - 1} \\left(\\exp \\left(- \\int_ {t _ {l _ {i}}} ^ {t _ {l _ {i + 1}}} \\lambda_ {l | \\Phi} (s) d s\\right) \\lambda_ {l | \\Phi} \\left(t _ {l _ {i + 1}}\\right)\\right) \\exp \\left(- \\int_ {t _ {l _ {n _ {l}}}} ^ {T} \\lambda_ {l | \\Phi} (s) d s\\right). \\tag {9}", + "image_path": "803d5dcd7ac30236246f5e7f0c2ad3d01d414a5bec6a8d00efb1f02461ff63a3.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "text", + "content": "The corresponding log-likelihood for event label " + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "text", + "content": " is expressed as " + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "inline_equation", + "content": "LL_{l} = (-\\int_{0}^{T}\\lambda_{l|\\Phi}(s)ds) + \\sum_{i = 1}^{n_{l}}[\\log (\\lambda_{l|\\Phi}(t_{l_{i}}))]" + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "text", + "content": ". The total log-likelihood of all the events in " + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "text", + "content": " is thus " + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "inline_equation", + "content": "LL_{\\mathcal{D}} = \\sum_{l\\in \\mathcal{L}}LL_{l}" + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "text", + "content": ". During the training process, we train the model parameters for each event label separately. Specifically, the maximum likelihood estimation problem for event label " + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 304, + 504, + 351 + ], + "type": "text", + "content": " can be formulated as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 357, + 504, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 357, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 113, + 357, + 504, + 370 + ], + "type": "interline_equation", + "content": "\\min - L L _ {l} \\tag {10}", + "image_path": "b0cf7cd41f0e8534cb66e4247753aa70b39e578cdf10e186c0401321529f76c1.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 373, + 504, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 373, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 113, + 373, + 504, + 399 + ], + "type": "interline_equation", + "content": "s. t. \\quad \\forall \\phi \\in \\Phi , \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} (1 - \\alpha) \\geq \\alpha , \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} \\alpha \\leq 1 - \\alpha , \\tag {11}", + "image_path": "b03645b0632ea424efcb8aade0e0d893e80478d2c407d862261c95177d130340.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 402, + 504, + 429 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 402, + 504, + 429 + ], + "spans": [ + { + "bbox": [ + 111, + 402, + 504, + 429 + ], + "type": "interline_equation", + "content": "\\forall \\phi \\in \\Phi , \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} \\alpha \\geq \\alpha , 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} (1 - \\alpha) \\leq 1 - \\alpha , \\tag {12}", + "image_path": "21603a37e696e0e6c5983eada3c97cdf9daefb3a261ce92d879a266c707fdff4.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 430, + 301, + 446 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 430, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 113, + 430, + 301, + 446 + ], + "type": "interline_equation", + "content": "w _ {i, k} \\geq 0, \\beta_ {k} \\geq 0, w _ {i, k ^ {\\prime}} \\geq 0, \\beta_ {k ^ {\\prime}} \\geq 0, \\underline {{u}} _ {l _ {j}} \\geq 0,", + "image_path": "17db73b5c6998f6bafec5eb833c0464378c9814e120a8f117fed2155e68ce2ed.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "K_{\\phi}^{\\wedge}" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": " (resp. " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "K_{\\phi}^{\\vee}" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": ") is the number of " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": " (resp. " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": ") operators in " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "I_{k}" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": " (resp. " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "I_{k'}" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": ") denotes the inputs to the " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": "-th " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": " (resp. " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "k'" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": "-th " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": ") operator. Please see Appendix A for more details about the above formulation. The overall learning framework is shown in Figure 3, in which the forward propagation computes " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "LL_{l}" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": " by using " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": " CLNNs; each learns a wCL formula " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\phi_{i}" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": " and the backward propagation updates the parameters in " + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 452, + 504, + 510 + ], + "type": "text", + "content": " CLNNs using projected gradient descent." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 517, + 201, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 517, + 201, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 201, + 529 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 536, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 582 + ], + "type": "text", + "content": "We conduct several experiments on synthetic and real-world datasets to demonstrate the efficacy of our proposed model. Simultaneously, we compare with state-of-the-art (SOTA) models. The experiments are run using the AdamW optimizer in Pytorch (1.10.2) on a Windows 10 system desktop with a 16-core CPU (i7, 3.60GHz) and 32 GB RAM. Our code is available at https://ICLR-CLNN." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 596, + 170, + 606 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 170, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 170, + 606 + ], + "type": "text", + "content": "4.1 MODELS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 616, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 673 + ], + "type": "text", + "content": "Multivariate Hawkes Process (MHP) [(Bacry et al., 2017)]: A conventional multivariate Hawkes process utilizing an exponential kernel function to describe the conditional intensity rate, which involves a decay rate and an infectivity matrix characterizing the inter-dependence among events. This model is implemented in the tick" + }, + { + "bbox": [ + 104, + 616, + 504, + 673 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 104, + 616, + 504, + 673 + ], + "type": "text", + "content": " library, where the learning problem is posed as a convex quadratic programming problem with a fixed decay rate." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 677, + 504, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 711 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 711 + ], + "type": "text", + "content": "Proximal Graphical Event Model (PGEM) [(Bhattacharjya et al., 2018)]: A type of GEM that models event data by considering whether a parent in some underlying graph happens in a proximal (recent) window." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 334, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 334, + 731 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 334, + 731 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 117, + 720, + 334, + 731 + ], + "type": "text", + "content": "https://x-datainitiative.github.io/tick/modules/hawkes.html" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 57, + 500, + 99 + ], + "blocks": [ + { + "bbox": [ + 109, + 57, + 500, + 99 + ], + "lines": [ + { + "bbox": [ + 109, + 57, + 500, + 99 + ], + "spans": [ + { + "bbox": [ + 109, + 57, + 500, + 99 + ], + "type": "table", + "html": "
Ground truthφ1=(cA-cB>1)1∧(cA-cC>3)1
CLNN's rule(cA-cB>1.21)1.52 ∧ (cA-cC>3.00)1.41 ∧ (cA-cD>0.82)0.33 ∧ (cB-cC>4.33)0 ∧ (cB-cD>10.69)0 ∧ (cD-cC>-6.57)0.16
TELLER's ruleA before D, B before D, C before D, A before D and C before D
OGEM-tab's ruleExcitation: [B], [C,B], [B,C]; Inhibitory: [A], [C,A], [A,C]
", + "image_path": "df33c2f34507afad9f0414108959db34b01d63ce06b3dc21a0fec99af340f4d1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 122, + 504, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 122, + 504, + 156 + ], + "spans": [ + { + "bbox": [ + 104, + 122, + 504, + 156 + ], + "type": "text", + "content": "Ordinal Graphical Event Model (OGEM) [(Bhattacharjya et al., 2020; 2021)]: An ordinal GEM that models the impact of the order of events on the conditional intensity rate. OGEM-tab (resp. OGEM-tree) refers to an OGEM that adopts a tabular (resp. tree) representation of orders." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 157, + 504, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 157, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 157, + 504, + 190 + ], + "type": "text", + "content": "Temporal Logic Rule Learner (TELLER)2 [(Li et al., 2021)]. This is a method to learn first-order temporal logic rules explaining the generative mechanism of TPPs. The rule discovery process is formulated as a maximum likelihood estimation problem solved by a branch-and-price algorithm." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 192, + 229, + 204 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 192, + 229, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 229, + 204 + ], + "type": "text", + "content": "4.2 SYNTHETIC DATASETS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "text", + "content": "The first part of this experiment demonstrates CLNN's capability of recovering ground-truth rules using three synthetic datasets generated by CLNN with pre-specified formula structure and parameters, including " + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{l_i, l_j}" + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "text", + "content": ", as well as the importance weights " + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "text", + "content": " and bias " + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "text", + "content": " in (5) for logical operators, and the " + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "inline_equation", + "content": "w_\\phi" + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 208, + 504, + 258 + ], + "type": "text", + "content": " in (8) for the conditional intensity rate." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "text", + "content": "Experimental Setting. Each synthetic dataset contains 1,000 event streams partitioned into three sets: training (70%), validation (15%), and test (15%). Every dataset is generated using a wCL formula with " + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "inline_equation", + "content": "w_{\\phi} = 3" + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "inline_equation", + "content": "\\rho = -5" + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "text", + "content": ". The truth value threshold is set as " + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "inline_equation", + "content": "\\alpha = 0.5" + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "text", + "content": ", and the clock signal for representing an event not occurring in " + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t^\\prime" + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "text", + "content": " is set as " + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "inline_equation", + "content": "\\bar{Z} = 1.5T_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 262, + 506, + 341 + ], + "type": "text", + "content": " is the maximal ending time among all the event streams. During the training process, we initialize the parameters using four approaches (see Appendix C.5 for more details) and report the best one, and CLNN aims to recover the manually set parameters." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": "Results. The ground-truth rule " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}_1" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " for generating the first synthetic dataset (Syn-1) with " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{L} = \\{A, B, C, D\\}" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " and the rules discovered by CLNN, TELLER, and OGEM-tab are summarized in Table 1. Results for the other synthetic datasets are presented in Appendix C. The rules are learned using the 'last' masking method, which was also used for data generation. The experimental results show an accurate recovery performance of CLNN in terms of order representation recovery and parameter identification. The unweighted version of the ground truth rule reads: \"If " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " for at least 1 time unit and " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " for at least 3 time units, then " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " will happen\". The rule of TELLER only reflects the temporal relation between events " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " but is unable to capture the temporal relation between " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": ", which does not match the ground-truth rule. In OGEM-tab's rule, " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "[l]" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " denotes a single parent. We show the top 3 excitation and inhibitory rules from OGEM-tab, where excitation (resp. inhibitory) means " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\lambda_{l|\\Phi}" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " is higher (resp. lower) than the " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\lambda_{l|\\Phi}" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " with all " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "w_{\\phi_i} = 0" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": ". The excitation rules of OGEM-tab do not match the ground-truth rule. In contrast, the rule discovered by CLNN (" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\phi_1" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": ") assigns larger weights to the paired order predicates " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{A,B} = (c_A - c_B > 1.21)" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\pi_{pop}^{A,C} = (c_A - c_C > 3.00)" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " and small weights to the other predicates, where the interval values of 1.21 and 3.00 are both learned. By ignoring the small weights, " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "\\phi_1" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " can be interpreted as \"If " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " for at least 1.21 time units and " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " for at least 3.00 time units, then " + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 343, + 506, + 554 + ], + "type": "text", + "content": " will happen\", meaning the paired order representations discovered by CLNN match well with the ground truth. Moreover, CLNN's rules are more expressive than TELLER and OGEM as it provides a detailed interval length between two ordered labels." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 559, + 310, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 310, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 310, + 624 + ], + "type": "text", + "content": "To show the computational efficiency of our gradient-based learning, we compare the runtimes of CLNN and TELLER on the synthetic datasets in Table 2. Notably, CLNN not only recovers the correct order representations but also was two orders of magnitude faster on average (5.62 s vs 635.99" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 624, + 504, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 647 + ], + "type": "text", + "content": "s). In addition, CLNN can learn more expressive order representations that describe both the order relation between two events and their interval length." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 319, + 566, + 503, + 597 + ], + "blocks": [ + { + "bbox": [ + 124, + 105, + 485, + 118 + ], + "lines": [ + { + "bbox": [ + 124, + 105, + 485, + 118 + ], + "spans": [ + { + "bbox": [ + 124, + 105, + 485, + 118 + ], + "type": "text", + "content": "Table 1: Comparison of rule discovery for CLNN, TELLER, and OGEM-tab on the Syn-1 dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 319, + 566, + 503, + 597 + ], + "lines": [ + { + "bbox": [ + 319, + 566, + 503, + 597 + ], + "spans": [ + { + "bbox": [ + 319, + 566, + 503, + 597 + ], + "type": "table", + "html": "
wCL formulaφ1φ2φ3,1φ3,2Average
CLNN5.204.604.957.735.62
TELLER252.91286.83925.581078.66635.99
", + "image_path": "e9a860f558716b4646ae4a52856ca856922009fa023c315fe39cdb81e7b962ad.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 601, + 504, + 622 + ], + "lines": [ + { + "bbox": [ + 317, + 601, + 504, + 622 + ], + "spans": [ + { + "bbox": [ + 317, + 601, + 504, + 622 + ], + "type": "text", + "content": "Table 2: Runtime (s) for CLNN and TELLER on synthetic datasets." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 651, + 239, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 651, + 239, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 651, + 239, + 662 + ], + "type": "text", + "content": "4.3 REAL-WORLD DATASETS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 666, + 505, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 505, + 712 + ], + "type": "text", + "content": "LinkedIn [(Xu et al., 2017)]. An event dataset related to job hopping records of 3,000 LinkedInIn users in 82 IT companies. Each event stream records a user's check-in time stamps for different companies or the time stamps for role change within the same company. We filter the dataset to popular companies as per Bhattacharjya et al. (2020), resulting in 1,000 users." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 370, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 370, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 370, + 732 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 720, + 370, + 732 + ], + "type": "text", + "content": "https://github.com/FengMingquan-sjtu/Logic_Point_Processes_ICLR" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "Mimic II [(Saeed et al., 2011)]. An event dataset concerning health records of patients from Intensive Care Unit (ICU) visits over 7 years. A patient's event stream records each visit's time stamp and the corresponding diagnosis. We filter out sequences with few visits, resulting in 650 patients." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 506, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 166 + ], + "type": "text", + "content": "Stack Overflow [(Grant & Betts, 2013)]. An event dataset that is related to the badges awarded to users in the question-answering website, the Stack Overflow. Each user's event stream records the badges that he/she receives at various time stamps. We keep the event streams with one or more of 20 types of badges and sample 1,000 users from the dataset used in Du et al. (2016)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "text", + "content": "Experimental Setup. Each dataset is partitioned into three sets: training (70%), validation (15%), and test (15%). For simplicity, " + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\underline{u}_{l_i l_j}" + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "text", + "content": " are set as 0 to study the ordering representations. The truth value threshold is " + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\alpha = 0.5" + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\bar{Z} = 1.5T_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "text", + "content": ", same as the setting for the synthetic datasets, and the number of subformulas is " + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "inline_equation", + "content": "n = 5" + }, + { + "bbox": [ + 104, + 171, + 506, + 242 + ], + "type": "text", + "content": ", and the parameters are initialized as random numbers from a uniform distribution on [0, 1). CLNN is trained on the training set, and the validation set is utilized for model selection during training. Model fit is evaluated using log-likelihood on the test set." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 245, + 506, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 245, + 506, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 245, + 506, + 378 + ], + "type": "text", + "content": "Results. We follow a similar trend to Bhattacharjya et al. (2018; 2020; 2021) to use the log-likelihood for evaluation of the model's performance. The log-likelihood on the real-world datasets is reported in Table 3, where " + }, + { + "bbox": [ + 104, + 245, + 506, + 378 + ], + "type": "inline_equation", + "content": "DR" + }, + { + "bbox": [ + 104, + 245, + 506, + 378 + ], + "type": "text", + "content": " denotes the difference ratio – the difference between CLNN and the best SOTA divided by the absolute value of best SOTA. CLNN's result is chosen as the better one among the 'first' or the 'last' masking. Notably, CLNN outperforms the baseline models on the LinkedIn dataset (13.40% advantage) and achieves a competitive result on the MIMIC II dataset (1.63% loss only). It is observed that PGEM achieves a better result on the Stack Overflow dataset. In Stack Overflow, one type of badge can be awarded only when a user receives a particular badge multiple times, for example, the 'Epic' badge is awarded only when earning 200 daily reputations 50 times, depending on the 'Mortarboard' badge acquired while answering or asking questions. CLNN and OGEMs apply masking methods to the data, which may not capture the above dependence. In contrast, PGEM models data without masking, making it more suitable for this dataset." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 121, + 380, + 489, + 430 + ], + "blocks": [ + { + "bbox": [ + 121, + 380, + 489, + 430 + ], + "lines": [ + { + "bbox": [ + 121, + 380, + 489, + 430 + ], + "spans": [ + { + "bbox": [ + 121, + 380, + 489, + 430 + ], + "type": "table", + "html": "
DatasetN (# events)M (labels)MHPPGEMOGEM-tabOGEM-treeTELLERCLNNDR
LinkedIn293210-1593-1462-1478-1418-1548-122813.40%
MIMIC II241915-567-500-474-429-645-436-1.63%
Stack Overflow7125420-52543-48323-49344-49192-71101-50981-5.50%
", + "image_path": "7645a8b1809d9569cc95dc9917c1ec7a57d8d1454d47d8a71c7a8a00ac353d9e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 448, + 268, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 268, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 268, + 559 + ], + "type": "text", + "content": "Case Study. The primary strength of CLNN over the SOTA models is that it can describe the generative mechanism as wCL formulas, being more expressive and potentially providing more detailed information. CLNN can be deployed as a valuable tool for assisting domain specialists in knowledge discovery from event data. Here we showcase the above strength of CLNN using an il" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 559, + 506, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 506, + 647 + ], + "type": "text", + "content": "lustrative example. We select the experimental result on company " + }, + { + "bbox": [ + 104, + 559, + 506, + 647 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 559, + 506, + 647 + ], + "type": "text", + "content": " of the LinkedIn dataset to demonstrate the expressivity of CLNN's rules, which are shown in Table 4. Here we specify the model to learn five formulas, four of which are inhibitory, and one exhibits excitation. One inhibitory formula has a weight of 0.05, thus not reported in Table 4. Each formula shows the dominant singleton or paired order predicates. Notably, CLNN learns expressive wCL formulas that describe how the logical composition of paired order predicates and(or) singleton order predicates affect a role change in the company " + }, + { + "bbox": [ + 104, + 559, + 506, + 647 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 559, + 506, + 647 + ], + "type": "text", + "content": ". CLNN's rules are more expressive than TELLER and as expressive as OGEM-tab for describing the occurrence of a causal event within a specific historical window." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 279, + 454, + 501, + 528 + ], + "blocks": [ + { + "bbox": [ + 124, + 434, + 485, + 445 + ], + "lines": [ + { + "bbox": [ + 124, + 434, + 485, + 445 + ], + "spans": [ + { + "bbox": [ + 124, + 434, + 485, + 445 + ], + "type": "text", + "content": "Table 3: Dataset information and log-likelihood for all models on the real-world datasets." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 279, + 454, + 501, + 528 + ], + "lines": [ + { + "bbox": [ + 279, + 454, + 501, + 528 + ], + "spans": [ + { + "bbox": [ + 279, + 454, + 501, + 528 + ], + "type": "table", + "html": "
RulesEffect
CLNNφ1=(CD>cH)0.90 ∧ (CI>cJ)0.72Inhibitory
φ2=((CB<0.45)0.58 ∧ (CD<0.05)0.66Excitation
φ3=(CB>cF)0.50 ∧ (CI>cJ>cD)0.47Inhibitory
φ4=(CA<0.84)0.76 ∧ (CH<1.09)0.50Inhibitory
TELLER[A,F],[C,F],[E,F],[B,F],[D,F]Excitation
OGEM-tab[F],[F,A]Excitation
[A]Inhibitory
", + "image_path": "f50b05780e8b99fb1e3a2e1d3b84574f831871aaa555bf682d357de8e87f832f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 274, + 533, + 504, + 555 + ], + "lines": [ + { + "bbox": [ + 274, + 533, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 274, + 533, + 504, + 555 + ], + "type": "text", + "content": "Table 4: Formulas and their effect as learned by CLNN, TELLER and OGEM-tab on company " + }, + { + "bbox": [ + 274, + 533, + 504, + 555 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 274, + 533, + 504, + 555 + ], + "type": "text", + "content": " of LinkedIn." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 657, + 196, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 196, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 196, + 669 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "In this paper, we proposed a novel neuro-symbolic model, CLNN, to learn interpretable wCL formulas from multivariate event data. Experimental results using synthetic and real-world datasets demonstrate CLNN's expressiveness in recovering ground-truth rules in multivariate temporal point processes. Further, CLNN can be trained using gradient-based methods, which improve the learning speed compared to the SOTA." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "type": "text", + "content": "6 ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "content": "This research is sponsored by the Rensselaer-IBM AI Research Collaboration (http://airc.rpi.edu), part of the IBM AI Horizons Network; the National Science Foundation under Grant CMMI-1936578; and the Defense Advanced Research Projects Agency (DARPA) through Cooperative Agreement D20AC00004 awarded by the U.S. Department of the Interior (DOI), Interior Business Center. The content of the information does not necessarily reflect the position or the policy of the Government, and no official endorsement should be inferred." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 190, + 176, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 190, + 176, + 201 + ], + "spans": [ + { + "bbox": [ + 106, + 190, + 176, + 201 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 208, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 208, + 505, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 208, + 505, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 505, + 232 + ], + "type": "text", + "content": "Odd Aalen, Ornulf Borgan, and Hakon Gjessing. Survival and Event History Analysis: A Process Point of View. Springer Science & Business Media, 2008." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 239, + 504, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 239, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 239, + 504, + 262 + ], + "type": "text", + "content": "Emmanuel Bacre, Iacopo Mastromatteo, and Jean-François Muzy. Hawkes processes in finance. Market Microstructure and Liquidity, 1(01):1550005, 2015." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 270, + 504, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 270, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 106, + 270, + 504, + 304 + ], + "type": "text", + "content": "Emmanuel Bacry, Martin Bompaire, Philip Deegan, Stéphane Gaiffas, and Søren V Poulsen. tick: A Python library for statistical learning, with an emphasis on Hawkes processes and time-dependent models. The Journal of Machine Learning Research, 18(1):7937-7941, 2017." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "type": "text", + "content": "Debarun Bhattacharjya, Dharmashankar Subramanian, and Tian Gao. Proximal graphical event models. Advances in Neural Information Processing Systems (NeurIPS), 31:8147-8156, 2018." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 343, + 505, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 343, + 505, + 377 + ], + "spans": [ + { + "bbox": [ + 107, + 343, + 505, + 377 + ], + "type": "text", + "content": "Debarun Bhattacharjya, Tian Gao, and Dharmashankar Subramanian. Order-dependent event models for agent interactions. In Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI), pp. 1977-1983, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 384, + 505, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 384, + 505, + 419 + ], + "spans": [ + { + "bbox": [ + 107, + 384, + 505, + 419 + ], + "type": "text", + "content": "Debarun Bhattacharjya, Tian Gao, and Dharmashankar Subramanian. Ordinal historical dependence in graphical event models with tree representations. In Proceedings of the Conference on Artificial Intelligence (AAAI), pp. 6759-6767, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 426, + 504, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 426, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 107, + 426, + 504, + 449 + ], + "type": "text", + "content": "Yuanda Chen. Thinning algorithms for simulating point processes. Florida State University, Tallahassee, FL, 2016." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 457, + 504, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 457, + 504, + 481 + ], + "spans": [ + { + "bbox": [ + 107, + 457, + 504, + 481 + ], + "type": "text", + "content": "Daryl J Daley and David Vere-Jones. An Introduction to the Theory of Point Processes, Volume I: Elementary Theory and Methods. Springer, 2003." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 489, + 504, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 489, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 107, + 489, + 504, + 512 + ], + "type": "text", + "content": "Vanessa Didelez. Graphical models for marked point processes based on local independence. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 70(1):245-264, 2008." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 520, + 504, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 520, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 106, + 520, + 504, + 565 + ], + "type": "text", + "content": "Nan Du, Hanjun Dai, Rakshit Trivedi, Utkarsh Upadhyay, Manuel Gomez-Rodriguez, and Le Song. Recurrent marked temporal point processes: embedding event history to vector. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 1555-1564, 2016." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 572, + 504, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 572, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 106, + 572, + 504, + 617 + ], + "type": "text", + "content": "Mehrdad Farajtabar, Yichen Wang, Manuel Gomez Rodriguez, Shuang Li, Hongyuan Zha, and Le Song. COEVOLVE: A joint point process model for information diffusion and network coevolution. In Advances in Neural Information Processing Systems (NeurIPS), volume 28, pp. 1954-1962, 2015." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 625, + 504, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 625, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 107, + 625, + 504, + 660 + ], + "type": "text", + "content": "Tian Gao, Dharmashankar Subramanian, Karthikeyan Shanmugam, Debarun Bhattacharjya, and Nicholas Mattei. A multi-channel neural graphical event model with negative evidence. In Proceedings of the Conference on Artificial Intelligence (AAAI), pp. 3946-3953, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 668, + 504, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 668, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 107, + 668, + 504, + 700 + ], + "type": "text", + "content": "Scott Grant and Buddy Betts. Encouraging user behaviour with achievements: An empirical study. In Proceedings of the 10th Working Conference on Mining Software Repositories, MSR '13, pp. 65-68. IEEE Press, 2013." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "type": "text", + "content": "Asela Gunawardana and Chris Meek. Universal models of multivariate temporal point processes. In Artificial Intelligence and Statistics, pp. 556-563. PMLR, 2016." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 733 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "type": "text", + "content": "Asela Gunawardana, Christopher Meek, and Puyang Xu. A model for temporal dependencies in event streams. Advances in Neural Information Processing Systems (NeurIPS), 24, 2011." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 111, + 413, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 111, + 413, + 125 + ], + "spans": [ + { + "bbox": [ + 107, + 111, + 413, + 125 + ], + "type": "text", + "content": "Patrick J Hurley. A Concise Introduction to Logic. Cengage Learning, 2014." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 129, + 504, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 129, + 504, + 165 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 504, + 165 + ], + "type": "text", + "content": "Shuang Li, Lu Wang, Ruizhi Zhang, Xiaofu Chang, Xuqin Liu, Yao Xie, Yuan Qi, and Le Song. Temporal logic point processes. In International Conference on Machine Learning, pp. 5990-6000. PMLR, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 170, + 504, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 170, + 504, + 206 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 504, + 206 + ], + "type": "text", + "content": "Shuang Li, Mingquan Feng, Lu Wang, Abdelmajid Essofi, Yufeng Cao, Junchi Yan, and Le Song. Explaining point processes by learning interpretable temporal logic rules. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 211, + 504, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 211, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 211, + 504, + 236 + ], + "type": "text", + "content": "Hanxiao Liu, Karen Simonyan, and Yiming Yang. DARTS: Differentiable architecture search. In International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 241, + 504, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 241, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 504, + 266 + ], + "type": "text", + "content": "Noushin Mehdipour, Cristian-Ioan Vasile, and Calin Belta. Specifying user preferences using weighted signal temporal logic. IEEE Control Systems Letters, 5(6):2006-2011, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 271, + 504, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 271, + 504, + 305 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 504, + 305 + ], + "type": "text", + "content": "Hongyuan Mei and Jason M Eisner. The neural Hawkes process: A neurally self-modulating multivariate point process. Advances in Neural Information Processing Systems (NeurIPS), 30:6757-6767, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 312, + 504, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 504, + 336 + ], + "type": "text", + "content": "Sean P O'Brien. Crisis early warning and decision support: Contemporary approaches and thoughts on future research. International Studies Review, 12(1):87-104, 2010." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 342, + 504, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 504, + 376 + ], + "type": "text", + "content": "Ryan Riegel, Alexander Gray, Francois Luus, Naweed Khan, Ndivhuwo Makondo, Ismail Yunus Akhalwaya, Haifeng Qian, Ronald Fagin, Francisco Barahona, Udit Sharma, et al. Logical neural networks. arXiv preprint arXiv:2006.13155, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 382, + 504, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 382, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 382, + 504, + 428 + ], + "type": "text", + "content": "Mohammed Saeed, Mauricio Villarroel, Andrew T Reisner, Gari Clifford, Li-Wei Lehman, George Moody, Thomas Heldt, Tin H Kyaw, Benjamin Moody, and Roger G Mark. Multiparameter intelligent monitoring in intensive care II (MIMIC-II): A public-access intensive care unit database. Critical Care Medicine, 39(5):952, 2011." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 434, + 504, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 434, + 504, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 504, + 469 + ], + "type": "text", + "content": "Prithviraj Sen, Bruno WSR de Carvalho, Ryan Riegel, and Alexander Gray. Neuro-symbolic inductive logic programming with logical neural networks. In Proceedings of the Conference on Artificial Intelligence (AAAI), volume 36, pp. 8212-8219, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 475, + 504, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 504, + 499 + ], + "type": "text", + "content": "Jeremy C. Weiss and David Page. Forest-based point process for event prediction from electronic health records. In Machine Learning and Knowledge Discovery in Databases, pp. 547-562, 2013." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 505, + 504, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 505, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 504, + 540 + ], + "type": "text", + "content": "Shuai Xiao, Junchi Yan, Xiaokang Yang, Hongyuan Zha, and Stephen Chu. Modeling the intensity function of point process via recurrent neural networks. In Proceedings of the Conference on Artificial Intelligence (AAAI), volume 31, pp. 1597-1603, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 545, + 504, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 545, + 504, + 580 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 504, + 580 + ], + "type": "text", + "content": "Hongteng Xu, Dixin Luo, and Hongyuan Zha. Learning Hawkes processes from short doubly-censored event sequences. In International Conference on Machine Learning, pp. 3831-3840. PMLR, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 586, + 504, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 504, + 621 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 504, + 621 + ], + "type": "text", + "content": "Ruixuan Yan, Agung Julius, Maria Chang, Achille Fokoue, Tengfei Ma, and Rosario Uceda-Sosa. STONE: Signal temporal logic neural network for time series classification. In 2021 International Conference on Data Mining Workshops (ICDMW), pp. 778-787. IEEE, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 627, + 504, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 504, + 672 + ], + "type": "text", + "content": "Ruixuan Yan, Tengfei Ma, Achille Fokoue, Maria Chang, and Agung Julius. Neuro-symbolic models for interpretable time series classification using temporal logic description. In 2022 IEEE International Conference on Data Mining (ICDM), pp. 618-627, 2022. doi: 10.1109/ICDM54844.2022.00072." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "type": "text", + "content": "Qiang Zhang, Aldo Lipani, Omer Kirnap, and Emine Yilmaz. Self-attentive Hawkes process. In International Conference on Machine Learning, pp. 11183-11193. PMLR, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 708, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 733 + ], + "type": "text", + "content": "Simiao Zuo, Haoming Jiang, Zichong Li, Tuo Zhao, and Hongyuan Zha. Transformer Hawkes process. In International Conference on Machine Learning, pp. 11692-11702. PMLR, 2020." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 481, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 481, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 481, + 94 + ], + "type": "text", + "content": "A FORMULATION OF LOGICAL CONSTRAINTS & OBJECTIVE FUNCTION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "content": "The optimization problem in (10) is formulated by maximizing the log-likelihood subject to the logical constraints for the " + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "content": " operators. This section discusses the details of the formulation for the two logical constraints and how to formulate the optimization problem while considering the logical constraints. Without loss of generality, we illustrate the formulation of the constraints for the " + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "content": " operator, and the constraints for " + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "content": " operator can be derived from the constraints for the " + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 106, + 506, + 173 + ], + "type": "text", + "content": " operator using De Morgan's law." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 182, + 291, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 182, + 291, + 194 + ], + "spans": [ + { + "bbox": [ + 132, + 182, + 291, + 194 + ], + "type": "text", + "content": "- Logical constraints for " + }, + { + "bbox": [ + 132, + 182, + 291, + 194 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 132, + 182, + 291, + 194 + ], + "type": "text", + "content": " operator." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "spans": [ + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "x, y \\in [0,1]" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " denote the inputs of the " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " operator, and " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " denote the quantitative satisfaction of " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": ". The conventional characteristic of the " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " operator is illustrated as follows: 1) " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " is low when either input is low, and 2) " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " is high when both inputs are high. However, we associate each input with a nonnegative weight, implying the input with a zero weight should not affect the output. In other words, if a low input has a zero weight, it should not affect the output of " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": ". Therefore, we require the " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " operator to exhibit the following characteristics: 1) " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " is low when both inputs are low, and 2) " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " is high when both inputs are high. Here we introduce a user-defined hyperparameter " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "\\alpha \\in [\\frac{1}{2},1]" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " to capture low vs. high: " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "x \\in [0,1 - \\alpha)" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " represents low and " + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "inline_equation", + "content": "x \\in [\\alpha,1]" + }, + { + "bbox": [ + 139, + 195, + 504, + 305 + ], + "type": "text", + "content": " represents high. According to the above characteristics, we have (Sen et al., 2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 244, + 310, + 504, + 328 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 310, + 504, + 328 + ], + "spans": [ + { + "bbox": [ + 244, + 310, + 504, + 328 + ], + "type": "interline_equation", + "content": "f (x, y) \\leq 1 - \\alpha , \\quad \\forall x, y \\in [ 0, 1 - \\alpha), \\tag {13}", + "image_path": "e2a16ba412bb4a90d03ed87581c84d8ca513c43239bbce9f47a1c67dbd62c836.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 262, + 325, + 382, + 337 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 325, + 382, + 337 + ], + "spans": [ + { + "bbox": [ + 262, + 325, + 382, + 337 + ], + "type": "interline_equation", + "content": "f (x, y) \\geq \\alpha , \\quad \\forall x, y \\in [ \\alpha , 1 ].", + "image_path": "93742ef8e9e02cdebaad230ec85409401eb7d35e7d4162c0ec85b0dab6f91f79.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "type": "text", + "content": "Here we follow a specific choice of " + }, + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "type": "text", + "content": " by using a triangular norm (" + }, + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "type": "text", + "content": "-norm) and define the quantitative satisfaction function of " + }, + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 140, + 342, + 504, + 365 + ], + "type": "text", + "content": " as (Riegel et al., 2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 180, + 369, + 504, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 369, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 180, + 369, + 504, + 403 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {14}", + "image_path": "bb0e6357191bfd10902b6be5fff7bc60cf2856b43cf1f5dc5c670ff356ef6d3b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 220, + 407, + 504, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 407, + 504, + 440 + ], + "spans": [ + { + "bbox": [ + 220, + 407, + 504, + 440 + ], + "type": "interline_equation", + "content": "\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {2} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\alpha \\leq 1 - \\alpha , \\tag {15}", + "image_path": "4875f14214dc39dbf293489cc82a44733f7cf54a2305837c2050817239e95afc.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 140, + 445, + 504, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 445, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 140, + 445, + 504, + 468 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 140, + 445, + 504, + 468 + ], + "type": "inline_equation", + "content": "f(z) = \\max \\{0, \\min \\{z, 1\\}\\}" + }, + { + "bbox": [ + 140, + 445, + 504, + 468 + ], + "type": "text", + "content": " is introduced to clamp the truth value into the range of [0, 1]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 471, + 291, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 471, + 291, + 482 + ], + "spans": [ + { + "bbox": [ + 132, + 471, + 291, + 482 + ], + "type": "text", + "content": "- Logical constraints for " + }, + { + "bbox": [ + 132, + 471, + 291, + 482 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 132, + 471, + 291, + 482 + ], + "type": "text", + "content": " operator." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 139, + 483, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 483, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 139, + 483, + 504, + 506 + ], + "type": "text", + "content": "By using De Morgan's law, we could derive the quantitative satisfaction function and the logical constraints for the " + }, + { + "bbox": [ + 139, + 483, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\lor" + }, + { + "bbox": [ + 139, + 483, + 504, + 506 + ], + "type": "text", + "content": " operator with 2 inputs as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 154, + 511, + 504, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 511, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 154, + 511, + 504, + 544 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}}, t\\right) = f \\left(1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {16}", + "image_path": "61b7697f52052be7e0ce8f34bd72202d27ec37f3c3011144e8177a9439c5c218.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 194, + 548, + 504, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 548, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 194, + 548, + 504, + 582 + ], + "type": "interline_equation", + "content": "\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {2} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {17}", + "image_path": "8cbf23b896cb783e6e706bc67b79c4ce40d56f2477ccc1c14b2f4ae36cb22d16.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": "Here we show the characteristics of the activation functions for the " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " operators using Figure 4. Figure 4(a) shows the truth value of the " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " operator with " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\alpha = 0.7" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": ". Figure 4(b) shows the truth value of the " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " operator with " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\alpha = 0.9" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": ". It can be distinctly observed that " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " is close to 0 when both " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " are low, and " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " is close to 1 when both " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " are high. In addition, the unconstrained region for " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\alpha = 0.9" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " is larger than the unconstrained region for " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\alpha = 0.7" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": ". Figure 4(c) shows the truth value of the " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " operator with " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "\\alpha = 0.7" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": ". It is obvious that " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " is close to 0 when both " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " are low, and " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "f(x,y)" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " is close to 1 when both " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 590, + 504, + 668 + ], + "type": "text", + "content": " are high." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "text", + "content": "In general, we could extend the quantitative satisfaction for the " + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "text", + "content": " operators in (14) - (17) to " + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "text", + "content": "-ary conjunction and " + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "text", + "content": "-ary disjunction. The " + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "text", + "content": "-ary conjunction formulation is expressed as follows." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 700, + 504, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 700, + 504, + 735 + ], + "spans": [ + { + "bbox": [ + 141, + 700, + 504, + 735 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} \\dots \\wedge \\phi_ {k} ^ {w _ {k}}, t\\right) = f \\left(\\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {18}", + "image_path": "f335c9e3e8bc45846cb4ca61b2df3f8f1641f48fe240d8be6c274727e52fa395.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 114, + 87, + 233, + 152 + ], + "blocks": [ + { + "bbox": [ + 114, + 87, + 233, + 152 + ], + "lines": [ + { + "bbox": [ + 114, + 87, + 233, + 152 + ], + "spans": [ + { + "bbox": [ + 114, + 87, + 233, + 152 + ], + "type": "image", + "image_path": "d11ca93f13aa2d576cabacd06a21df8f552d4545ee7577c50b2397da4f4f2f16.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 155, + 177, + 164 + ], + "lines": [ + { + "bbox": [ + 165, + 155, + 177, + 164 + ], + "spans": [ + { + "bbox": [ + 165, + 155, + 177, + 164 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 87, + 366, + 152 + ], + "blocks": [ + { + "bbox": [ + 242, + 87, + 366, + 152 + ], + "lines": [ + { + "bbox": [ + 242, + 87, + 366, + 152 + ], + "spans": [ + { + "bbox": [ + 242, + 87, + 366, + 152 + ], + "type": "image", + "image_path": "44228cc4cb20f4bb5ebcf91b59b11946430cf8359c5cdc1bfbbcccf7866d5bec.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 296, + 154, + 309, + 164 + ], + "lines": [ + { + "bbox": [ + 296, + 154, + 309, + 164 + ], + "spans": [ + { + "bbox": [ + 296, + 154, + 309, + 164 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "lines": [ + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": "Figure 4: Plot of truth degree for (a) CLNN- " + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "inline_equation", + "content": "\\alpha = 0.7" + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": ", (b) CLNN- " + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "inline_equation", + "content": "\\alpha = 0.9" + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": ", (c) CLNN- " + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "inline_equation", + "content": "\\vee" + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "inline_equation", + "content": "\\alpha = 0.7" + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 374, + 87, + 498, + 152 + ], + "blocks": [ + { + "bbox": [ + 374, + 87, + 498, + 152 + ], + "lines": [ + { + "bbox": [ + 374, + 87, + 498, + 152 + ], + "spans": [ + { + "bbox": [ + 374, + 87, + 498, + 152 + ], + "type": "image", + "image_path": "1110140bea7cd3027c436d8e7f557f9ff7327030c05282067db9188a4a31941c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 428, + 154, + 440, + 165 + ], + "lines": [ + { + "bbox": [ + 428, + 154, + 440, + 165 + ], + "spans": [ + { + "bbox": [ + 428, + 154, + 440, + 165 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 221, + 208, + 505, + 243 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 208, + 505, + 243 + ], + "spans": [ + { + "bbox": [ + 221, + 208, + 505, + 243 + ], + "type": "interline_equation", + "content": "\\text {s u b j e c t} \\quad \\beta - \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\geq \\alpha , \\beta - \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\leq 1 - \\alpha . \\tag {19}", + "image_path": "572b2765660fe83b77600632c030bc633a00c0045678b17d3a62fd09eb039d45.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 254, + 340, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 254, + 340, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 254, + 340, + 266 + ], + "type": "text", + "content": "The " + }, + { + "bbox": [ + 105, + 254, + 340, + 266 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 105, + 254, + 340, + 266 + ], + "type": "text", + "content": "-ary disjunction formulation is expressed as follows." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 272, + 505, + 306 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 272, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 116, + 272, + 505, + 306 + ], + "type": "interline_equation", + "content": "p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\vee \\phi_ {2} ^ {w _ {2}} \\dots \\vee \\phi_ {k} ^ {w _ {k}}, t\\right) = f (1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\left(p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)), \\tag {20}", + "image_path": "5d4b192d62bfe90dae84e681ddbc4a93e68814d21b6816499bbf336fdb97d52d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 196, + 308, + 504, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 308, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 196, + 308, + 504, + 342 + ], + "type": "interline_equation", + "content": "\\text {s u b j e c t} \\quad 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} \\alpha \\geq \\alpha , 1 - \\beta + \\sum_ {j = 1} ^ {k} w _ {j} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {21}", + "image_path": "84c4b781ffbe08a290086b3d325dc760c589d40013aea57595194d381721a2bd.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 354, + 479, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 479, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 479, + 365 + ], + "type": "text", + "content": "With the above constraints, we can formulate the maximum likelihood estimation problem as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 133, + 371, + 504, + 384 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 371, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 133, + 371, + 504, + 384 + ], + "type": "interline_equation", + "content": "\\min - L L _ {l} \\tag {22}", + "image_path": "1e7b8606082b456d4b3de4d24d353221517f8426352ff8d067c5d98ca8a2e0c1.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 386, + 504, + 411 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 386, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 108, + 386, + 504, + 411 + ], + "type": "interline_equation", + "content": "s. t. \\quad \\forall \\phi \\in \\Phi , \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} (1 - \\alpha) \\geq \\alpha , \\beta_ {k} - \\sum_ {i \\in I _ {k}} w _ {i, k} \\alpha \\leq 1 - \\alpha , \\tag {23}", + "image_path": "c76692d57c1772bd4e8d51c854741afe642f90595a70751c93c616d5a141c3f0.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 415, + 504, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 415, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 130, + 415, + 504, + 453 + ], + "type": "interline_equation", + "content": "\\forall \\phi \\in \\Phi , \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} \\alpha \\geq \\alpha , 1 - \\beta_ {k ^ {\\prime}} + \\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} (1 - \\alpha) \\leq 1 - \\alpha . \\tag {24}", + "image_path": "e5e730d8fa1ef6b66bf7e36fd99098734a3d0afab2008382aa2bb368076c1742.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 464, + 367, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 367, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 367, + 476 + ], + "type": "text", + "content": "In this paper, we set " + }, + { + "bbox": [ + 105, + 464, + 367, + 476 + ], + "type": "inline_equation", + "content": "\\alpha = 0.5" + }, + { + "bbox": [ + 105, + 464, + 367, + 476 + ], + "type": "text", + "content": ", thus the constraints in (19) become" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 267, + 483, + 341, + 515 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 483, + 341, + 515 + ], + "spans": [ + { + "bbox": [ + 267, + 483, + 341, + 515 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {k} w _ {i} \\geq 2 \\beta - 1,", + "image_path": "ccfeeaa1ce50e9d37c0fc16433881031c165fe025978a92e9f4aeabcc5ac83c8.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 267, + 518, + 504, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 518, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 267, + 518, + 504, + 550 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {k} w _ {i} \\leq 2 \\beta - 1, \\tag {25}", + "image_path": "4a9684772514f3f9a8ae51a6191bdfd082d7d5e9e553018cdad1003bdf49e7b2.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 290, + 552, + 341, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 552, + 341, + 563 + ], + "spans": [ + { + "bbox": [ + 290, + 552, + 341, + 563 + ], + "type": "interline_equation", + "content": "2 \\beta - 1 \\geq 0,", + "image_path": "581a0776617a6a05077e655e9712e42f4a1f29215ce3acc06cca9bb3cf0f0f9e.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 567, + 341, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 567, + 341, + 578 + ], + "spans": [ + { + "bbox": [ + 308, + 567, + 341, + 578 + ], + "type": "interline_equation", + "content": "w _ {i} \\geq 0.", + "image_path": "519bb43541d9798ff615b988df1a08044dc588321c9c036aa3d1f460fafd3ce0.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 585, + 291, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 291, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 291, + 596 + ], + "type": "text", + "content": "Reformulating the above constraints, we have" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 267, + 602, + 504, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 602, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 267, + 602, + 504, + 635 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {k} w _ {i} = 2 \\beta - 1, \\tag {26}", + "image_path": "dca94c0214e4d975542b98a69346b0bd0d13c64b81b6cc21e8fc7fbec3d09928.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 288, + 637, + 326, + 649 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 637, + 326, + 649 + ], + "spans": [ + { + "bbox": [ + 288, + 637, + 326, + 649 + ], + "type": "interline_equation", + "content": "\\beta \\geq 0. 5,", + "image_path": "5d3e9a94afd3cfaafccf34dbfb8a2ad4d7046f9f8fa3a94f84313a2568db6e12.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 285, + 651, + 504, + 663 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 651, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 285, + 651, + 504, + 663 + ], + "type": "interline_equation", + "content": "w _ {i} \\geq 0. \\tag {27}", + "image_path": "104b62554f2a20aed353147a109201d406983af502fdd7aa0b06c193527f4695.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 669, + 504, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 669, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 669, + 504, + 692 + ], + "type": "text", + "content": "The above constraints hold for each conjunction operator in " + }, + { + "bbox": [ + 104, + 669, + 504, + 692 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 669, + 504, + 692 + ], + "type": "text", + "content": ". Therefore, we can incorporate the constraints in (26) into the objective function, which becomes" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 194, + 698, + 504, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 698, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 194, + 698, + 504, + 734 + ], + "type": "interline_equation", + "content": "\\min - L L _ {l} + \\sum_ {k = 1} ^ {K _ {\\phi} ^ {\\wedge}} \\left(\\sum_ {i \\in I _ {k}} w _ {i, k} - 2 \\beta_ {k} + 1\\right) ^ {2}, \\tag {28}", + "image_path": "7d0e8fc4bad788e316b15b33da442af16d87f1ce543761bc9800018cb283fded.jpg" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 173, + 82, + 504, + 95 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 82, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 173, + 82, + 504, + 95 + ], + "type": "interline_equation", + "content": "\\text {s u b j e c t} w _ {i, k} \\geq 0, \\beta_ {k} \\geq 0. 5, \\forall i \\in I _ {k}, \\forall 1 \\leq k \\leq K _ {\\phi} ^ {\\wedge}, \\forall \\phi \\in \\Phi . \\tag {29}", + "image_path": "a403ca58f398d1f326ded1f78afdde175ddc35b17a816824b4a8b1e17c47b50f.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 105, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 105, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 504, + 127 + ], + "type": "text", + "content": "Similarly, we propose a set of logical constraints for the " + }, + { + "bbox": [ + 104, + 105, + 504, + 127 + ], + "type": "inline_equation", + "content": "\\lor" + }, + { + "bbox": [ + 104, + 105, + 504, + 127 + ], + "type": "text", + "content": " operator as (21). If we set " + }, + { + "bbox": [ + 104, + 105, + 504, + 127 + ], + "type": "inline_equation", + "content": "\\alpha = 0.5" + }, + { + "bbox": [ + 104, + 105, + 504, + 127 + ], + "type": "text", + "content": ", the constraints in (21) become" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 267, + 133, + 341, + 165 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 133, + 341, + 165 + ], + "spans": [ + { + "bbox": [ + 267, + 133, + 341, + 165 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {k} w _ {i} \\geq 2 \\beta - 1,", + "image_path": "d264510c26503b603c47a4360731af577d54c22bb6f647acc9f6ba5c37057802.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 267, + 168, + 504, + 200 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 168, + 504, + 200 + ], + "spans": [ + { + "bbox": [ + 267, + 168, + 504, + 200 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {k} w _ {i} \\leq 2 \\beta - 1, \\tag {30}", + "image_path": "20395ccc2428e5c90d772566810474fb9f5f2d8d3ef1ec6c1260a190bde7185f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 290, + 202, + 341, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 202, + 341, + 213 + ], + "spans": [ + { + "bbox": [ + 290, + 202, + 341, + 213 + ], + "type": "interline_equation", + "content": "2 \\beta - 1 \\geq 0,", + "image_path": "37058c181670c0b8cef9b7539b3325c7a78801749bb587e2f389da24ed556e6a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 217, + 341, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 341, + 228 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 341, + 228 + ], + "type": "interline_equation", + "content": "w _ {i} \\geq 0.", + "image_path": "2d844268a6c132efda695c0e4229e0155516f22ff07d38147712af1840700d9b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 233, + 291, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 291, + 245 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 291, + 245 + ], + "type": "text", + "content": "Reformulating the above constraints, we have" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 267, + 251, + 504, + 283 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 251, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 267, + 251, + 504, + 283 + ], + "type": "interline_equation", + "content": "\\sum_ {i _ {1}} ^ {k} w _ {i} = 2 \\beta - 1, \\tag {31}", + "image_path": "e48fb1e0760d1512f0fff8492f231ac2d5e993e88c302b4fc2fd0ce9f34fd442.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 288, + 286, + 325, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 286, + 325, + 297 + ], + "spans": [ + { + "bbox": [ + 288, + 286, + 325, + 297 + ], + "type": "interline_equation", + "content": "\\beta \\geq 0. 5.", + "image_path": "f27a34e476f5545f3852eaef143216c1be10d6da6fe07f0befb1657efe182686.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 285, + 300, + 504, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 300, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 285, + 300, + 504, + 312 + ], + "type": "interline_equation", + "content": "w _ {i} \\geq 0. \\tag {32}", + "image_path": "b4afed3571cee0a8c35616aee7684cce8f17f25ed6e07f4a6378f354efa64187.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 316, + 504, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 349 + ], + "type": "text", + "content": "The above constraints hold for each disjunction operator in " + }, + { + "bbox": [ + 104, + 316, + 504, + 349 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 316, + 504, + 349 + ], + "type": "text", + "content": ". Therefore, we can incorporate the constraints in (31) into the objective function. The maximum likelihood estimation problem then becomes" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 160, + 354, + 504, + 390 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 354, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 160, + 354, + 504, + 390 + ], + "type": "interline_equation", + "content": "\\min - L L _ {l} + \\sum_ {k = 1} ^ {K _ {\\phi} ^ {\\wedge}} \\left(\\sum_ {i \\in I _ {k}} w _ {i, k} - 2 \\beta_ {k} + 1\\right) ^ {2} + \\sum_ {k ^ {\\prime} = 1} ^ {K _ {\\phi} ^ {\\vee}} \\left(\\sum_ {i \\in I _ {k ^ {\\prime}}} w _ {i, k ^ {\\prime}} - 2 \\beta_ {k ^ {\\prime}} + 1\\right) ^ {2}, \\tag {33}", + "image_path": "a47060c897712a4224f6f9a8e31b17501e73184e2476c6e80dd9d2c6a8e3c633.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 393, + 402, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 393, + 402, + 406 + ], + "spans": [ + { + "bbox": [ + 138, + 393, + 402, + 406 + ], + "type": "text", + "content": "subject to " + }, + { + "bbox": [ + 138, + 393, + 402, + 406 + ], + "type": "inline_equation", + "content": "w_{i,k}\\geq 0,\\beta_k\\geq 0.5,\\forall i\\in I_k,\\forall 1\\leq k\\leq K_\\phi^{\\wedge},\\forall \\phi \\in \\Phi ," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 187, + 408, + 413, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 408, + 413, + 422 + ], + "spans": [ + { + "bbox": [ + 187, + 408, + 413, + 422 + ], + "type": "interline_equation", + "content": "w _ {i, k ^ {\\prime}} \\geq 0, \\beta_ {k ^ {\\prime}} \\geq 0. 5, \\forall i \\in I _ {k ^ {\\prime}}, \\forall 1 \\leq k ^ {\\prime} \\leq K _ {\\phi} ^ {\\vee}, \\forall \\phi \\in \\Phi .", + "image_path": "e93166de61848b5b50eb16d52b5f5d46ad4159f2a012b7774b239d59f66a1793.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 437, + 244, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 244, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 244, + 449 + ], + "type": "text", + "content": "B PROOF OF THEOREM 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "text", + "content": "The activation function designed for the " + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "text", + "content": " operator satisfies the properties of nonimpact for zero weights, impact ordering, and monotonicity. Without loss of generality, we present the proof for the " + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "text", + "content": " operator connecting two clauses, which can be generalized to the " + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "text", + "content": " operator connecting " + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 461, + 504, + 505 + ], + "type": "text", + "content": "-ary clauses." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 516, + 504, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 504, + 539 + ], + "type": "text", + "content": "Proof 1 Here we present the proof for the activation function for the " + }, + { + "bbox": [ + 104, + 516, + 504, + 539 + ], + "type": "inline_equation", + "content": "\\wedge" + }, + { + "bbox": [ + 104, + 516, + 504, + 539 + ], + "type": "text", + "content": " operator satisfying each property mentioned above." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 548, + 259, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 548, + 259, + 559 + ], + "spans": [ + { + "bbox": [ + 132, + 548, + 259, + 559 + ], + "type": "text", + "content": "- Nonimpact for zero weights." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "spans": [ + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "text", + "content": "This means if " + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "inline_equation", + "content": "w_{j} = 0, j = 1,2" + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}',\\phi_j,t)" + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "text", + "content": " should have no impact on " + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t)" + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "text", + "content": ". Without loss of generality, we suppose " + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "inline_equation", + "content": "w_{1} = 0" + }, + { + "bbox": [ + 140, + 564, + 504, + 588 + ], + "type": "text", + "content": ", thus we have" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 159, + 592, + 504, + 621 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 592, + 504, + 621 + ], + "spans": [ + { + "bbox": [ + 159, + 592, + 504, + 621 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}}, t\\right) = f (\\beta - 0 \\cdot (1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1}, t\\right)) - w _ {2} \\cdot (1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right))), \\tag {34} \\\\ = f \\left(\\beta - w _ {2} \\cdot \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right)\\right)\\right), \\\\ \\end{array}", + "image_path": "89775e2612ff9978b5d58138a1a9cc24a23f81b00b7be0c1ce27494a511cbbb6.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 140, + 625, + 377, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 625, + 377, + 639 + ], + "spans": [ + { + "bbox": [ + 140, + 625, + 377, + 639 + ], + "type": "text", + "content": "meaning " + }, + { + "bbox": [ + 140, + 625, + 377, + 639 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}',\\phi_1,t)" + }, + { + "bbox": [ + 140, + 625, + 377, + 639 + ], + "type": "text", + "content": " has no impact on " + }, + { + "bbox": [ + 140, + 625, + 377, + 639 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}',\\phi_1^{w_1}\\wedge \\phi_2^{w_2},t)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 645, + 214, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 645, + 214, + 657 + ], + "spans": [ + { + "bbox": [ + 132, + 645, + 214, + 657 + ], + "type": "text", + "content": "- Impact Ordering" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "spans": [ + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "type": "text", + "content": "This means the truth degree of subformula with higher weights has a greater impact on " + }, + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}', \\phi_1^{w_1} \\wedge \\phi_2^{w_2}, t)" + }, + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "type": "text", + "content": ". Mathematically, we need to prove that if " + }, + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}', \\phi_1, t) = p(\\mathcal{C}', \\phi_2, t)" + }, + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "type": "inline_equation", + "content": "w_1 \\geq w_2" + }, + { + "bbox": [ + 140, + 662, + 505, + 696 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 228, + 708, + 504, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 708, + 504, + 736 + ], + "spans": [ + { + "bbox": [ + 228, + 708, + 504, + 736 + ], + "type": "interline_equation", + "content": "\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} \\geq \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)}. \\tag {35}", + "image_path": "62bc4194028d2ee2e13aca58b8be3cd505f7b81222255b0de9fcf3e21895fcda.jpg" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 140, + 82, + 308, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 82, + 308, + 95 + ], + "spans": [ + { + "bbox": [ + 140, + 82, + 308, + 95 + ], + "type": "text", + "content": "As " + }, + { + "bbox": [ + 140, + 82, + 308, + 95 + ], + "type": "inline_equation", + "content": "f(x) = \\max \\{0, \\min \\{x, 1\\}\\}" + }, + { + "bbox": [ + 140, + 82, + 308, + 95 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 266, + 102, + 504, + 141 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 102, + 504, + 141 + ], + "spans": [ + { + "bbox": [ + 266, + 102, + 504, + 141 + ], + "type": "interline_equation", + "content": "\\frac {d f}{d x} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} x < 0, \\\\ 1, & \\text {i f} 0 < x < 1, \\\\ 0, & \\text {i f} x > 1. \\end{array} \\right. \\tag {36}", + "image_path": "8934ed7e46def8e3f02c395aa04567e35b10dc3b9dee4f590ad8bcb7ae9e6b7c.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 149, + 505, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 149, + 505, + 166 + ], + "spans": [ + { + "bbox": [ + 140, + 149, + 505, + 166 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 140, + 149, + 505, + 166 + ], + "type": "inline_equation", + "content": "\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t)) < 0" + }, + { + "bbox": [ + 140, + 149, + 505, + 166 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 140, + 149, + 505, + 166 + ], + "type": "inline_equation", + "content": "\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},k)) > 1" + }, + { + "bbox": [ + 140, + 149, + 505, + 166 + ], + "type": "text", + "content": " , then we have" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 219, + 171, + 505, + 199 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 171, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 219, + 171, + 505, + 199 + ], + "type": "interline_equation", + "content": "\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} = \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)} = 0. \\tag {37}", + "image_path": "0ba02dcb659a8cf71d7c84111e54cad8022996fb7ca5a3feb27bd0c83a327c04.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 211, + 395, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 211, + 395, + 227 + ], + "spans": [ + { + "bbox": [ + 140, + 211, + 395, + 227 + ], + "type": "text", + "content": "Also, if " + }, + { + "bbox": [ + 140, + 211, + 395, + 227 + ], + "type": "inline_equation", + "content": "0 < \\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t)) < 1" + }, + { + "bbox": [ + 140, + 211, + 395, + 227 + ], + "type": "text", + "content": ", then we have" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 176, + 233, + 505, + 268 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 233, + 505, + 268 + ], + "spans": [ + { + "bbox": [ + 176, + 233, + 505, + 268 + ], + "type": "interline_equation", + "content": "\\frac {\\partial \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {j} , t\\right)\\right)\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} = w _ {1} \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right), \\tag {38}", + "image_path": "0617870a5727fc0e874417c051f81f50b6a90599bf637eec665751b8bd87d12a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 275, + 160, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 275, + 160, + 285 + ], + "spans": [ + { + "bbox": [ + 140, + 275, + 160, + 285 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 176, + 290, + 505, + 324 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 290, + 505, + 324 + ], + "spans": [ + { + "bbox": [ + 176, + 290, + 505, + 324 + ], + "type": "interline_equation", + "content": "\\frac {\\partial \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {j} , t\\right)\\right)\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)} = w _ {2} \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right). \\tag {39}", + "image_path": "2fc6518e76668676e12c8e1bbb84b2d3a4c2f69f216f4d794b2d6f64a4ca5a4f.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 331, + 279, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 331, + 279, + 343 + ], + "spans": [ + { + "bbox": [ + 140, + 331, + 279, + 343 + ], + "type": "text", + "content": "As " + }, + { + "bbox": [ + 140, + 331, + 279, + 343 + ], + "type": "inline_equation", + "content": "w_{1}\\geq w_{2}" + }, + { + "bbox": [ + 140, + 331, + 279, + 343 + ], + "type": "text", + "content": " , the following holds:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 228, + 348, + 505, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 348, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 228, + 348, + 505, + 376 + ], + "type": "interline_equation", + "content": "\\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} , t\\right)} \\geq \\frac {\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {1} ^ {w _ {1}} \\wedge \\phi_ {2} ^ {w _ {2}} , t\\right)}{\\partial p \\left(\\mathcal {C} ^ {\\prime} , \\phi_ {2} , t\\right)}, \\tag {40}", + "image_path": "117f6f44c0193afde420298ce46902b102f4c58242649dc2514a3160dd2bf0aa.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 381, + 340, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 381, + 340, + 395 + ], + "spans": [ + { + "bbox": [ + 140, + 381, + 340, + 395 + ], + "type": "text", + "content": "which proves the impact ordering property holds." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 401, + 201, + 412 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 401, + 201, + 412 + ], + "spans": [ + { + "bbox": [ + 132, + 401, + 201, + 412 + ], + "type": "text", + "content": "- Monotonicity." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 417, + 451, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 417, + 451, + 431 + ], + "spans": [ + { + "bbox": [ + 140, + 417, + 451, + 431 + ], + "type": "text", + "content": "This means " + }, + { + "bbox": [ + 140, + 417, + 451, + 431 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}', \\phi_1^{w_1} \\wedge \\phi_2^{w_2}, t)" + }, + { + "bbox": [ + 140, + 417, + 451, + 431 + ], + "type": "text", + "content": " increases monotonically over " + }, + { + "bbox": [ + 140, + 417, + 451, + 431 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}', \\phi_j, t)" + }, + { + "bbox": [ + 140, + 417, + 451, + 431 + ], + "type": "text", + "content": ", i.e." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 147, + 437, + 505, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 437, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 147, + 437, + 505, + 471 + ], + "type": "interline_equation", + "content": "f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right) \\leq f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right) - d\\right)\\right) \\text {f o r} d \\geq 0. \\tag {41}", + "image_path": "c77d731be1b74a627d2e4c50f314ea93fcc273f2a544a86b7e2d3737e8e14c2b.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 140, + 484, + 410, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 484, + 410, + 500 + ], + "spans": [ + { + "bbox": [ + 140, + 484, + 410, + 500 + ], + "type": "text", + "content": "First, note that " + }, + { + "bbox": [ + 140, + 484, + 410, + 500 + ], + "type": "inline_equation", + "content": "\\beta -\\sum_{j = 1}^{2}w_{j}(1 - p(\\mathcal{C}^{\\prime},\\phi_{j},t))" + }, + { + "bbox": [ + 140, + 484, + 410, + 500 + ], + "type": "text", + "content": " can be rewritten as" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 152, + 507, + 505, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 507, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 152, + 507, + 505, + 540 + ], + "type": "interline_equation", + "content": "\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right) = \\beta - w _ {1} - w _ {2} + w _ {1} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {1}, t\\right) + w _ {2} p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {2}, t\\right). \\tag {42}", + "image_path": "4dcaaeec91b8b1cab4ae240eda936a9cd5c2585a0150d6b2860e13d4964b6430.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "text", + "content": "This implies " + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "inline_equation", + "content": "f(\\beta - \\sum_{j=1}^{2} w_j (1 - p(\\mathcal{C}', \\phi_j, t)))" + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "text", + "content": " is monotonically increasing over " + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}', \\phi_1, t)" + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "inline_equation", + "content": "p(\\mathcal{C}', \\phi_2, t)" + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "text", + "content": ". Also, from the proof of impact ordering we know " + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "inline_equation", + "content": "f(x) = \\max \\{0, \\min \\{x, 1\\}\\}" + }, + { + "bbox": [ + 140, + 548, + 506, + 586 + ], + "type": "text", + "content": " is monotonically nondecreasing, we can show that" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 153, + 593, + 505, + 626 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 593, + 505, + 626 + ], + "spans": [ + { + "bbox": [ + 153, + 593, + 505, + 626 + ], + "type": "interline_equation", + "content": "f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right)\\right)\\right) \\leq f \\left(\\beta - \\sum_ {j = 1} ^ {2} w _ {j} \\left(1 - p \\left(\\mathcal {C} ^ {\\prime}, \\phi_ {j}, t\\right) - d\\right)\\right), d \\geq 0. \\tag {43}", + "image_path": "2949893612ccfacf2515b2fec9c3a6c47b020c19fe85762115bba84a7f64af3a.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 140, + 633, + 326, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 633, + 326, + 645 + ], + "spans": [ + { + "bbox": [ + 140, + 633, + 326, + 645 + ], + "type": "text", + "content": "Thus the property of monotonicity is satisfied." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 662, + 381, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 662, + 381, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 381, + 673 + ], + "type": "text", + "content": "C EXPERIMENT RESULTS OF SYNTHETIC DATASETS" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Dataset Generation. In the experiments on synthetic datasets, we manually generate 3 synthetic datasets considering different settings, where the details and results for the first synthetic dataset is reported in Section 4.2. Each setting considers a different order representation, different number of event labels or different intensity of causal event labels." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 196, + 82, + 411, + 249 + ], + "blocks": [ + { + "bbox": [ + 196, + 82, + 411, + 249 + ], + "lines": [ + { + "bbox": [ + 196, + 82, + 411, + 249 + ], + "spans": [ + { + "bbox": [ + 196, + 82, + 411, + 249 + ], + "type": "image", + "image_path": "6615481611b2355c10d747efa0377f49ce26dac2c3075e65e67373d9e97fbe23.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 158, + 264, + 451, + 277 + ], + "lines": [ + { + "bbox": [ + 158, + 264, + 451, + 277 + ], + "spans": [ + { + "bbox": [ + 158, + 264, + 451, + 277 + ], + "type": "text", + "content": "Figure 5: Model structure of " + }, + { + "bbox": [ + 158, + 264, + 451, + 277 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}_1" + }, + { + "bbox": [ + 158, + 264, + 451, + 277 + ], + "type": "text", + "content": " for generating the first synthetic dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 297, + 276, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 276, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 276, + 308 + ], + "type": "text", + "content": "C.1 SYNTHETIC DATASET-1 (SYN-1)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "text", + "content": "Generation process. The first synthetic dataset contains 4 event labels: " + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "text", + "content": " is the event for prediction, and " + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "text", + "content": " are causal events. The wCL formula used to generate event " + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 316, + 506, + 350 + ], + "type": "text", + "content": " in the first synthetic dataset is set as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 219, + 356, + 504, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 356, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 219, + 356, + 504, + 370 + ], + "type": "interline_equation", + "content": "\\hat {\\phi} _ {1} = \\left(c _ {A} - c _ {B} > 1\\right) ^ {1} \\wedge \\left(c _ {A} - c _ {C} > 3\\right) ^ {1}, \\tag {44}", + "image_path": "560de73d2b36c7af7e239234b9832137dfb5e27474b54de8aa41965e413b0bec.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "text", + "content": "whose unweighted version reads as \"If " + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "text", + "content": " for at least 1 time unit and " + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "text", + "content": " for at least 3 time units, then " + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 374, + 504, + 398 + ], + "type": "text", + "content": " will happen.\"" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "text", + "content": "Here we consider event labels " + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "text", + "content": " as free predicates, whose occurrences are generated by a homogeneous Poisson process. The homogeneous intensity rate for " + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "text", + "content": " are set as " + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "inline_equation", + "content": "\\lambda_A = 0.2" + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "inline_equation", + "content": "\\lambda_B = 0.2" + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "inline_equation", + "content": "\\lambda_C = 0.2" + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "text", + "content": ". The algorithm used to generate instances of " + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 402, + 505, + 447 + ], + "type": "text", + "content": " is described as Algorithm 1 (Chen, 2016)." + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 106, + 472, + 504, + 639 + ], + "blocks": [ + { + "bbox": [ + 106, + 458, + 432, + 470 + ], + "lines": [ + { + "bbox": [ + 106, + 458, + 432, + 470 + ], + "spans": [ + { + "bbox": [ + 106, + 458, + 432, + 470 + ], + "type": "text", + "content": "Algorithm 1 Simulation of a homogeneous Poisson process with intensity rate " + }, + { + "bbox": [ + 106, + 458, + 432, + 470 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 106, + 458, + 432, + 470 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "lines": [ + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "spans": [ + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": "Input: Intensity rate " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " , simulation horizon " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " \nOutput: Occurrence time stamps " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = \\{t_k\\}" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " \n1: Initialize " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "n = 0,t_0 = 0" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " . \n2: while True do \n3: Generate " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "u\\sim" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " uniform(0, 1); \n4: Let " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "w = -ln(u) / \\lambda" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " . \n5: Set " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "t_{n + 1} = t_n + w" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " . \n6: if " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "t_{n + 1} > T" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " then \n7: return " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = \\{t_k\\}_{k = 1,2,\\dots,n}" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " . \n8: else \n9: Set " + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "inline_equation", + "content": "n = n + 1" + }, + { + "bbox": [ + 106, + 472, + 504, + 639 + ], + "type": "text", + "content": " . \n10: end if \n11: end while" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "text", + "content": "With the above algorithm, we can generate the occurrences of event labels " + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "inline_equation", + "content": "A, B" + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "text", + "content": ". Next, we build a CLNN for " + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}_1 = (c_A - c_B > 1)^1 \\wedge (c_A - c_C > 3)^1" + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "text", + "content": " to calculate the conditional intensity rate " + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "inline_equation", + "content": "\\lambda_{D|\\hat{\\phi}_1}" + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "text", + "content": ", whose model structure is shown in Figure 5. After obtaining " + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "inline_equation", + "content": "\\lambda_{D|\\hat{\\phi}_1}(t)" + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "text", + "content": ", we could use Algorithm 2 (Chen, 2016) to generate the occurrence of " + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 656, + 504, + 704 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": "Results. The rules learned by CLNN, TELLER, and OGEM-tab on the first synthetic dataset are presented in Table 5, where the paired order predicate among the two candidates with the highest" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 96, + 362, + 327 + ], + "blocks": [ + { + "bbox": [ + 106, + 82, + 457, + 95 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 457, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 457, + 95 + ], + "type": "text", + "content": "Algorithm 2 Simulation of an inhomogeneous Poisson process with intensity rate " + }, + { + "bbox": [ + 106, + 82, + 457, + 95 + ], + "type": "inline_equation", + "content": "\\lambda(t)" + }, + { + "bbox": [ + 106, + 82, + 457, + 95 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "lines": [ + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "spans": [ + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": "Input: intensity rate " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "\\lambda (t)" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " , simulation horizon " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " \nOutput: Occurrence time stamps " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = \\{t_k\\}" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " 1: Initialize " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "n = m = 0,t_0 = s_0 = 0,\\bar{\\lambda} = \\sup_{0\\leq t\\leq T};\\lambda (t);" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " 2: while " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "s_m < T" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " do 3: Generate a uniform random variable " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "u\\sim \\mathrm{uniform}(0,1)" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " 4: Let " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "w = -\\ln u / \\bar{\\lambda}" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " . 5: Set " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "s_{m + 1} = s_m + w" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " . 6: Generate " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "D\\sim \\mathrm{uniform}(0,1)" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " . 7: if " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "D\\leq \\lambda (s_{m + 1})\\bar{\\lambda}" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " then 8: " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "t_{n + 1} = s_{m + 1}" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " . 9: " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "n = n + 1" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " . \n10: end if \n11: " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "m = m + 1" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " . \n12: if " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "t_n\\leq T" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " then \n13: return " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "\\{t_k\\}_{k = 1,2,\\dots,n}" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " \n14: else \n15: return " + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "inline_equation", + "content": "\\{t_k\\}_{k = 1,2,\\dots,n - 1}" + }, + { + "bbox": [ + 106, + 96, + 362, + 327 + ], + "type": "text", + "content": " \n16: end if \n17: end while" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "algorithm" + }, + { + "type": "table", + "bbox": [ + 142, + 339, + 466, + 480 + ], + "blocks": [ + { + "bbox": [ + 142, + 339, + 466, + 480 + ], + "lines": [ + { + "bbox": [ + 142, + 339, + 466, + 480 + ], + "spans": [ + { + "bbox": [ + 142, + 339, + 466, + 480 + ], + "type": "table", + "html": "
DatasetSyn-1
N (# events)N = 4, L = {A, B, C, D}
Ground truthφ1 = (cA - cB > 1)1 ∧ (cA - cC > 3)1
CLNN's rule(cA - cB > 1.21)1.52 ∧ (cA - cC > 3.00)1.41 ∧ (cA - cD > 0.82)0.33 ∧ (cB - cC > 4.33)0 ∧ (cB - cD > 10.69)0 ∧ (cD - cC > -6.57)0.16
TELLER's ruleA before D, B before D, C before D, A before D and C before D
OGEM-tab's ruleExcitation: [B], [C], [C, B], [B, C], [A, C, B], [A, B, C]Inhibitory: [A], [B, A], [B, A, C], [C, B, A], [A, B], [A, C], [B, C, A], [C, A, B], [C, A]
", + "image_path": "1ecad1295859dbea184c98335fe73ae47c3179671a3dc4e984f7a1a0a1695ef8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 488, + 476, + 500 + ], + "lines": [ + { + "bbox": [ + 133, + 488, + 476, + 500 + ], + "spans": [ + { + "bbox": [ + 133, + 488, + 476, + 500 + ], + "type": "text", + "content": "Table 5: Comparison of rule discovery for CLNN and TELLER on the Syn-1 dataset." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 521, + 504, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 504, + 543 + ], + "type": "text", + "content": "weight is presented. It can be clearly observed that by truncating the predicates with small weights, we could obtain the formula as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 196, + 549, + 504, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 549, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 196, + 549, + 504, + 563 + ], + "type": "interline_equation", + "content": "\\phi_ {1} = \\left(c _ {A} - c _ {B} > 1. 2 1\\right) ^ {1. 5 2} \\wedge \\left(c _ {A} - c _ {C} > 3. 0 0\\right) ^ {1. 4 1}, \\tag {45}", + "image_path": "c34dd71bc06ac3a3b174b61cff263b2b9a662c462fca2dfbde0f33eab1fc76ac.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "text", + "content": "which matches well with the ground-truth rule. However, TELLER cannot capture the paired order representation between " + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "text", + "content": ". OGEM-tab captures the order representation " + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "inline_equation", + "content": "[A, B]" + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "inline_equation", + "content": "[A, C]" + }, + { + "bbox": [ + 104, + 569, + 505, + 604 + ], + "type": "text", + "content": " as inhibitory causes, which contradicts the ground-truth rule." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 616, + 275, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 275, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 275, + 628 + ], + "type": "text", + "content": "C.2 SYNTHETIC DATASET-2 (SYN-2)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "text", + "content": "Generation Process. The second synthetic dataset contains 5 event labels: " + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "inline_equation", + "content": "A, B, C, D" + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "text", + "content": " is the event for prediction, and " + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "inline_equation", + "content": "A, B, C, D" + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "text", + "content": " are causal events. The wCL formula used to generate the occurrence of event " + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 638, + 505, + 672 + ], + "type": "text", + "content": " in the second synthetic dataset is set as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 173, + 677, + 504, + 693 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 677, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 173, + 677, + 504, + 693 + ], + "type": "interline_equation", + "content": "\\hat {\\phi} _ {2} = \\left(c _ {A} - c _ {B} > 0. 5\\right) ^ {1} \\wedge \\left(c _ {A} - c _ {C} > 1. 5\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {D} > 2\\right) ^ {1}, \\tag {46}", + "image_path": "08dac4113ede2c2c0388c32ae1cc6117eb089953a93f8265e4df9cc762241f4e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "whose unweighted version reads as \"If " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " for at least 0.5 time units, " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " for at least 1.5 time units, and " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " for at least 2 time units, then " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " will happen.\"" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 160, + 84, + 449, + 252 + ], + "blocks": [ + { + "bbox": [ + 160, + 84, + 449, + 252 + ], + "lines": [ + { + "bbox": [ + 160, + 84, + 449, + 252 + ], + "spans": [ + { + "bbox": [ + 160, + 84, + 449, + 252 + ], + "type": "image", + "image_path": "5c9eb772a4b858637daaf47666b26dd2ef522b35246e1a44e155613c66b870c4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 152, + 262, + 457, + 277 + ], + "lines": [ + { + "bbox": [ + 152, + 262, + 457, + 277 + ], + "spans": [ + { + "bbox": [ + 152, + 262, + 457, + 277 + ], + "type": "text", + "content": "Figure 6: Model structure of " + }, + { + "bbox": [ + 152, + 262, + 457, + 277 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}_2" + }, + { + "bbox": [ + 152, + 262, + 457, + 277 + ], + "type": "text", + "content": " for generating the second synthetic dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "content": "The occurrence of events " + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "content": " are generated using Algorithm 1, in which " + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "inline_equation", + "content": "\\lambda_A = \\lambda_B = \\lambda_C = \\lambda_D = 0.2" + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "content": ". After obtaining the occurrence of " + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "content": ", we simulate the generation of event label " + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "content": " using Algorithm 2, in which the intensity rate " + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "inline_equation", + "content": "\\lambda_{E|\\hat{\\phi}_2}(t)" + }, + { + "bbox": [ + 104, + 296, + 504, + 344 + ], + "type": "text", + "content": " is computed using the model shown in Figure 6." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": "Results. The rules learned by CLNN, TELLER and OGEM-tab on the second synthetic dataset are presented in Table 6, where the paired order predicate with the highest weight is presented. It can be clearly observed that by truncating the predicates with small weights, CLNN learns a wCL formula as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 146, + 392, + 504, + 406 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 392, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 146, + 392, + 504, + 406 + ], + "type": "interline_equation", + "content": "\\phi_ {2} = \\left(c _ {A} - c _ {B} > 0. 7 7\\right) ^ {1. 2 7} \\wedge \\left(c _ {A} - c _ {C} > 2. 0 9\\right) ^ {1. 1 5} \\wedge \\left(c _ {C} - c _ {D} > 2. 6 0\\right) ^ {1. 0 6}, \\tag {47}", + "image_path": "570e9517a670a10e55ecf1ebfd16b9ef42cd296d86b8bd4efbbe9ef57b292658.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": "whose order representation match well with the ground-truth rule. Nevertheless, TELLER's rule only captures the ordering between " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": ", whereas the ordering between " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " are not learned. OGEM-tab's rules can only capture the relation between event label " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " and event label " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " can excite the occurrence of event label " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": ", whereas not able to capture the dependence of event label " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": "'s occurrence on the order relation between " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 409, + 506, + 475 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 142, + 483, + 466, + 647 + ], + "blocks": [ + { + "bbox": [ + 142, + 483, + 466, + 647 + ], + "lines": [ + { + "bbox": [ + 142, + 483, + 466, + 647 + ], + "spans": [ + { + "bbox": [ + 142, + 483, + 466, + 647 + ], + "type": "table", + "html": "
DatasetSyn-2
N (# events)N=5, L={A,B,C,D,E}
Ground truthφ2=(cA-cB>0.5)1∧(cB-cC>1.5)1∧(cC-cD>2)1
CLNN's rule(cA-cB>0.77)1.27∧(cA-cC>2.09)1.15∧((cA-cD)>−5.00)0.25∧((cA-cE)>−2.74)0.09∧(cB-cC>−9.31)0.02∧(cB-cD>−8.54)0.08∧(cB-cE>2.07)0∧((cC-cD)>2.60)1.06∧((cC-cE)>−4.27)0.03∧((cD-cE)>1.17)0.07
TELLER's ruleA before E, B before E, A and B before E, A and C before E
OGEM-tab's ruleExcitation: [D], [D,E], [E], [E,D]Inhibitory: [D,A], [A], [A,D], [A,D,E], [E,D,A], [D,A,E], [A,E], [E,A], [D,E,A], [A,E,D], [E,A,D]
", + "image_path": "e3d81b80ba2fe7d2f1245e05c07d7c1330d42f7a95cc7e95b47e339517f27422.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 133, + 654, + 476, + 666 + ], + "lines": [ + { + "bbox": [ + 133, + 654, + 476, + 666 + ], + "spans": [ + { + "bbox": [ + 133, + 654, + 476, + 666 + ], + "type": "text", + "content": "Table 6: Comparison of rule discovery for CLNN and TELLER on the Syn-2 dataset." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 689, + 274, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 274, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 274, + 700 + ], + "type": "text", + "content": "C.3 SYNTHETIC DATASET 3 (SYN-3)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "The third synthetic dataset is generated using a more interesting scheme by combining the generation schemes of the first synthetic dataset and the second synthetic dataset. The third synthetic dataset" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 195, + 82, + 413, + 245 + ], + "blocks": [ + { + "bbox": [ + 195, + 82, + 413, + 245 + ], + "lines": [ + { + "bbox": [ + 195, + 82, + 413, + 245 + ], + "spans": [ + { + "bbox": [ + 195, + 82, + 413, + 245 + ], + "type": "image", + "image_path": "940514745458e4b682db12a6e7789c21a874a28f6e3a43dcfc1c2c79cb00a3da.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 257, + 485, + 270 + ], + "lines": [ + { + "bbox": [ + 124, + 257, + 485, + 270 + ], + "spans": [ + { + "bbox": [ + 124, + 257, + 485, + 270 + ], + "type": "text", + "content": "Figure 7: Model structure of " + }, + { + "bbox": [ + 124, + 257, + 485, + 270 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}_{3,1}" + }, + { + "bbox": [ + 124, + 257, + 485, + 270 + ], + "type": "text", + "content": " for generating the occurrence of " + }, + { + "bbox": [ + 124, + 257, + 485, + 270 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 124, + 257, + 485, + 270 + ], + "type": "text", + "content": " in the Syn-3 dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 178, + 280, + 432, + 427 + ], + "blocks": [ + { + "bbox": [ + 178, + 280, + 432, + 427 + ], + "lines": [ + { + "bbox": [ + 178, + 280, + 432, + 427 + ], + "spans": [ + { + "bbox": [ + 178, + 280, + 432, + 427 + ], + "type": "image", + "image_path": "b081e37e363fe8a10159d63a78d5d0118d701b9c610e183152ab23a0ad8a4a74.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 439, + 485, + 452 + ], + "lines": [ + { + "bbox": [ + 124, + 439, + 485, + 452 + ], + "spans": [ + { + "bbox": [ + 124, + 439, + 485, + 452 + ], + "type": "text", + "content": "Figure 8: Model structure of " + }, + { + "bbox": [ + 124, + 439, + 485, + 452 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}_{3,2}" + }, + { + "bbox": [ + 124, + 439, + 485, + 452 + ], + "type": "text", + "content": " for generating the occurrence of " + }, + { + "bbox": [ + 124, + 439, + 485, + 452 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 124, + 439, + 485, + 452 + ], + "type": "text", + "content": " in the Syn-3 dataset." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": "includes five event labels: " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "A, B, C, D" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": ". Here we consider " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "A, B" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": " as the causal events for the occurrence of " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": " as the causal events for the occurrence of " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": ". The occurrence of events " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": " are generated using Algorithm 1, in which " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\lambda_{A} = 0.2" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\lambda_{b} = 0.2" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\lambda_{c} = 0.2" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": ". The wCL formula used to generate the occurrence of event " + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 472, + 504, + 517 + ], + "type": "text", + "content": " is set as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 208, + 521, + 504, + 537 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 521, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 208, + 521, + 504, + 537 + ], + "type": "interline_equation", + "content": "\\hat {\\phi} _ {3, 1} = \\left(c _ {B} - c _ {A} > - 2\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {A} > - 5\\right) ^ {1}, \\tag {48}", + "image_path": "e13bbde21d10390c5abc62017f7add9554ca05b250569d966154e2f626f53d15.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "content": "whose unweighted version reads as \"If " + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "content": " for less than 2 time units, and " + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "content": " happens before " + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "content": " for less than 1 time unit, then " + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "content": " will happen.\" The generation of " + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "content": "'s occurrence follows Algorithm 2, where " + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\lambda_{D|\\hat{\\phi}_{3,1}}(t)" + }, + { + "bbox": [ + 104, + 541, + 504, + 589 + ], + "type": "text", + "content": " is computed using the model shown in Figure 7. We call the third synthetic dataset at this step as Syn-3.1." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "type": "text", + "content": "After obtaining the occurrences of events " + }, + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "type": "inline_equation", + "content": "A, B, C" + }, + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "type": "text", + "content": ", we could simulate the occurrence of " + }, + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 593, + 506, + 616 + ], + "type": "text", + "content": " using the following formula:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 166, + 621, + 504, + 636 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 621, + 504, + 636 + ], + "spans": [ + { + "bbox": [ + 166, + 621, + 504, + 636 + ], + "type": "interline_equation", + "content": "\\hat {\\phi} _ {3, 2} = \\left(c _ {B} - c _ {A} > - 5\\right) ^ {1} \\wedge \\left(c _ {C} - c _ {B} > - 4\\right) ^ {1} \\wedge \\left(c _ {D} - c _ {C} > - 3\\right) ^ {1}. \\tag {49}", + "image_path": "21c1df0af86aecec7c39a2d1adf13cec67d4629141ec47e07cdbed010a0d0277.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 640, + 505, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 640, + 505, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 640, + 505, + 676 + ], + "type": "text", + "content": "Similarly, the generation of " + }, + { + "bbox": [ + 104, + 640, + 505, + 676 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 640, + 505, + 676 + ], + "type": "text", + "content": "'s occurrence follows Algorithm 2, where the intensity rate " + }, + { + "bbox": [ + 104, + 640, + 505, + 676 + ], + "type": "inline_equation", + "content": "\\lambda_{E|\\hat{\\phi}_{3,2}}(t)" + }, + { + "bbox": [ + 104, + 640, + 505, + 676 + ], + "type": "text", + "content": " is computed using the model shown in Figure 8. We call the third synthetic dataset at this step as Syn-3.2." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 681, + 142, + 692 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 142, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 142, + 692 + ], + "type": "text", + "content": "Results." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "The rules learned by CLNN, TELLER, and OGEM-tab on the cause of event " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " in the third synthetic dataset are presented in Table 7, where the paired order predicate with the highest weight among the two candidates is reported. It can be clearly observed that by truncating the predicates with small" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 272, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 272, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 272, + 94 + ], + "type": "text", + "content": "weights, CLNN learns a wCL formula as" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 185, + 99, + 504, + 113 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 99, + 504, + 113 + ], + "spans": [ + { + "bbox": [ + 185, + 99, + 504, + 113 + ], + "type": "interline_equation", + "content": "\\phi_ {3, 1} = \\left(c _ {B} - c _ {A} > - 1. 8 5\\right) ^ {1. 7 2} \\wedge \\left(c _ {C} - c _ {A} > - 3. 9 0\\right) ^ {1. 5 9}, \\tag {50}", + "image_path": "588c5ca46288fa6c0234ef6aade9513e4688246cb74b0c239acf91ff54c44a74.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": "whose order representation match well with the ground-truth rule. On the other hand, TELLER's rule only reveals the temporal relation between event labels " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": ", but it does not capture the temporal relation between event labels " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": ". In addition, we could observe that OGEM-tab does not capture that " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": " is a parent event of " + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 119, + 506, + 163 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 115, + 174, + 494, + 293 + ], + "blocks": [ + { + "bbox": [ + 115, + 174, + 494, + 293 + ], + "lines": [ + { + "bbox": [ + 115, + 174, + 494, + 293 + ], + "spans": [ + { + "bbox": [ + 115, + 174, + 494, + 293 + ], + "type": "table", + "html": "
DatasetSyn-3.1
N (# events)N = 5, L = {A, B, C, D, E}
Ground truth\\(\\hat{\\phi}_{3,1} = (c_B - c_A > -2)^1 \\wedge (c_C - c_A > -5)^1\\)
CLNN's rule\\((c_B - c_A > -1.85)^{1.72} \\wedge (c_C - c_A > -3.90)^{1.59} \\wedge ((c_D - c_A) > -16.25)^{0.33} \\wedge ((c_C - c_B) > -3.01)^0 \\wedge (c_D - c_B > -7.37)^{0.02} \\wedge (c_D - c_C > -7.55)^0\\)
TELLER's ruleA before D, B before D, C before D
OGEM-tab's ruleExcitation: [A], [A, B, D], [B, D, A], [D, A], [D, A, B], [B, A], [A, D], [D], [B, A, D], [D, B, A]Inhibitory: [A, B], [B, D], [B], [A, D, B], [D, B]
", + "image_path": "0ade2cd181c78f6e20129e109757d9589040feebc8fc3b63f08edf29c05fc84b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 324, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 324, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 324, + 504, + 348 + ], + "type": "text", + "content": "The rules learned by CLNN, TELLER, and GEM on the cause of event " + }, + { + "bbox": [ + 104, + 324, + 504, + 348 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 324, + 504, + 348 + ], + "type": "text", + "content": " in the third synthetic dataset are presented in Table 8, in which the discrete wCL formula learned by CLNN is" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 119, + 352, + 504, + 369 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 352, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 119, + 352, + 504, + 369 + ], + "type": "interline_equation", + "content": "\\phi_ {3, 2} = \\left(c _ {B} - c _ {A} > - 3. 9 4\\right) ^ {1. 4 9} \\wedge \\left(c _ {C} - c _ {B} > - 3. 0 2\\right) ^ {2. 0 3} \\wedge \\left(\\left(c _ {D} - c _ {C}\\right) > - 2. 0 0\\right) ^ {1. 9 2}. \\tag {51}", + "image_path": "fe625186602a38393c63c5f6d77a70f673e024c67ab771ad5914a7464f12c78c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": "It is obvious that " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "\\phi_{3,2}" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " is able to learn the temporal relation between " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ". However, TELLER's rules only reflect the temporal relation between " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", which cannot give the information about the temporal relation between " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", or " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", or " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ". OGEM-tab's rule indicates that it considers event labels " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": " as the parent events of " + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 372, + 506, + 429 + ], + "type": "text", + "content": ", which does not match with the ground-truth parent set." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 156, + 437, + 452, + 613 + ], + "blocks": [ + { + "bbox": [ + 114, + 300, + 495, + 313 + ], + "lines": [ + { + "bbox": [ + 114, + 300, + 495, + 313 + ], + "spans": [ + { + "bbox": [ + 114, + 300, + 495, + 313 + ], + "type": "text", + "content": "Table 7: Comparison of rule discovery of " + }, + { + "bbox": [ + 114, + 300, + 495, + 313 + ], + "type": "inline_equation", + "content": "{\\phi }_{3,1}" + }, + { + "bbox": [ + 114, + 300, + 495, + 313 + ], + "type": "text", + "content": " for CLNN and TELLER on the Syn-3.1 dataset." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 156, + 437, + 452, + 613 + ], + "lines": [ + { + "bbox": [ + 156, + 437, + 452, + 613 + ], + "spans": [ + { + "bbox": [ + 156, + 437, + 452, + 613 + ], + "type": "table", + "html": "
DatasetSyn-3.2
N (# events)N=5, L={A,B,C,D,E}
Ground truth\\(\\hat{\\phi}_{3,2}=(c_{B}-c_{A}> -5)^{1}\\wedge(c_{C}-c_{B}> -4)^{1}\\wedge(c_{D}-c_{C}> -3)^{1}\\)
CLNN's rule\\((c_{B}-c_{A}> -3.94)^{1.49}\\wedge(c_{C}-c_{A}> -9.12)^{0.25}\\wedge((c_{D}-c_{A})> -1.42)^{0.13}\\wedge((c_{E}-c_{A})> -3.88)^{0.15}\\wedge(c_{C}-c_{B}> -3.02)^{2.03}\\wedge(c_{D}-c_{B}> -6.27)^{0.02}\\wedge(c_{E}-c_{B}> -7.30)^{0.04}\\wedge((c_{D}- c_{C})> -2.00)^{1.92}\\wedge((c_{E}-c_{C})> -5.30)^{0.09}\\wedge((c_{E}-c_{D})> -1.57)^{0.01}\\)
TELLER's ruleA before E, B before E, C before E
OGEM-tab's ruleExcitation: [A,D], [D,A], [D,E], [E], [A,D, E], [D,E,A], [E,A], [A,E], [E,A,D], [A,E, D], [D,A,E], [E,D,A]Inhibitory: [A], [D], [E,D]
", + "image_path": "b73a935eff88ee2e71108810437f7f70abd97257512e582c4d2e9df8953f8845.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 620, + 495, + 633 + ], + "lines": [ + { + "bbox": [ + 114, + 620, + 495, + 633 + ], + "spans": [ + { + "bbox": [ + 114, + 620, + 495, + 633 + ], + "type": "text", + "content": "Table 8: Comparison of rule discovery of " + }, + { + "bbox": [ + 114, + 620, + 495, + 633 + ], + "type": "inline_equation", + "content": "{\\phi }_{3,2}" + }, + { + "bbox": [ + 114, + 620, + 495, + 633 + ], + "type": "text", + "content": " for CLNN and TELLER on the Syn-3.2 dataset." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 653, + 443, + 665 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 653, + 443, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 653, + 443, + 665 + ], + "type": "text", + "content": "C.4 QUANTITATIVE COMPARISON OF CLNN'S RULES WITH GROUND TRUTH" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "type": "text", + "content": "To quantitatively evaluate the difference between the ground-truth rules and the rules learned by CLNN, we adopt the Jaccard similarity score to assess the learned formulas against the ground truth. Let " + }, + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "type": "text", + "content": " denote the set of paired ordering representations from the ground-truth rule, and " + }, + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "type": "text", + "content": " denote the set of paired ordering representations from the learned rules, the Jaccard similarity score is calculated as " + }, + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "type": "inline_equation", + "content": "J = \\frac{|\\mathcal{C} \\cap \\mathcal{G}|}{|\\mathcal{C} \\cup \\mathcal{G}|}" + }, + { + "bbox": [ + 104, + 673, + 506, + 735 + ], + "type": "text", + "content": ". For TELLER and OGEM-tab, the ordering representations are extracted" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 120, + 86, + 302, + 219 + ], + "blocks": [ + { + "bbox": [ + 120, + 86, + 302, + 219 + ], + "lines": [ + { + "bbox": [ + 120, + 86, + 302, + 219 + ], + "spans": [ + { + "bbox": [ + 120, + 86, + 302, + 219 + ], + "type": "image", + "image_path": "e778c73031d538bd262a2304c138c0361986948ec160d092a3b6c088d78183f6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 204, + 220, + 216, + 231 + ], + "lines": [ + { + "bbox": [ + 204, + 220, + 216, + 231 + ], + "spans": [ + { + "bbox": [ + 204, + 220, + 216, + 231 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 308, + 85, + 492, + 217 + ], + "blocks": [ + { + "bbox": [ + 308, + 85, + 492, + 217 + ], + "lines": [ + { + "bbox": [ + 308, + 85, + 492, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 85, + 492, + 217 + ], + "type": "image", + "image_path": "96858fce8cd5a11bbd1adfbb7d71a973c286ee8decdbab1100967a9142becc93.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 391, + 220, + 403, + 230 + ], + "lines": [ + { + "bbox": [ + 391, + 220, + 403, + 230 + ], + "spans": [ + { + "bbox": [ + 391, + 220, + 403, + 230 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 120, + 239, + 299, + 369 + ], + "blocks": [ + { + "bbox": [ + 120, + 239, + 299, + 369 + ], + "lines": [ + { + "bbox": [ + 120, + 239, + 299, + 369 + ], + "spans": [ + { + "bbox": [ + 120, + 239, + 299, + 369 + ], + "type": "image", + "image_path": "a6c6bd22c64fc1f1fc65224884fde0d5cd2e30493bdc294bb501e98d1361ff80.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 374, + 214, + 384 + ], + "lines": [ + { + "bbox": [ + 203, + 374, + 214, + 384 + ], + "spans": [ + { + "bbox": [ + 203, + 374, + 214, + 384 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 308, + 240, + 486, + 370 + ], + "blocks": [ + { + "bbox": [ + 308, + 240, + 486, + 370 + ], + "lines": [ + { + "bbox": [ + 308, + 240, + 486, + 370 + ], + "spans": [ + { + "bbox": [ + 308, + 240, + 486, + 370 + ], + "type": "image", + "image_path": "1511e755f98306b72c0769194b5a539ac0f64fffcc66182d9520ef4267f0858f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 390, + 373, + 402, + 383 + ], + "lines": [ + { + "bbox": [ + 390, + 373, + 402, + 383 + ], + "spans": [ + { + "bbox": [ + 390, + 373, + 402, + 383 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 396, + 504, + 420 + ], + "lines": [ + { + "bbox": [ + 104, + 396, + 504, + 420 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 504, + 420 + ], + "type": "text", + "content": "Figure 9: Comparison of ground-truth rules with CLNN's rules in terms of Jaccard similarity score for a) Syn-1, b) Syn-2, c) Syn-3.1, d) Syn-3.2." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 444, + 504, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 444, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 444, + 504, + 499 + ], + "type": "text", + "content": "from the excitation rules. The comparison of Jaccard similarity score for the synthetic datasets is shown in Figure 9, where the Jaccard similarity score of 0 is manually set to the minimum threshold 0.05 for clarity purposes. It is clearly observed that the Jaccard similarity scores for CLNN is higher than the ones for TELLER or OGEM, implying the rules discovered by CLNN are more consistent with the ground truth." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 517, + 458, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 458, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 458, + 529 + ], + "type": "text", + "content": "C.5 STABILITY ANALYSIS OF CLNN'S RULES WITH RESPECT TO INITIALIZATION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "text", + "content": "To further validate the model's stability in learning wCL rules, different parameter initialization methods are carried out, including:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 127, + 574, + 504, + 666 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 129, + 574, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 574, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 129, + 574, + 504, + 597 + ], + "type": "text", + "content": "1. rand - parameter initialization as random numbers from a uniform distribution on the interval [0, 1);" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 127, + 604, + 468, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 604, + 468, + 615 + ], + "spans": [ + { + "bbox": [ + 127, + 604, + 468, + 615 + ], + "type": "text", + "content": "2. randn - random numbers from a normal distribution with mean 0 and variance 1;" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 624, + 254, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 624, + 254, + 635 + ], + "spans": [ + { + "bbox": [ + 129, + 624, + 254, + 635 + ], + "type": "text", + "content": "3. ones - constant values of 1;" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 642, + 504, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 642, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 127, + 642, + 504, + 666 + ], + "type": "text", + "content": "4. xavier - random numbers from a uniform distribution on the interval " + }, + { + "bbox": [ + 127, + 642, + 504, + 666 + ], + "type": "inline_equation", + "content": "[-1/\\sqrt{n}, 1/\\sqrt{n}]" + }, + { + "bbox": [ + 127, + 642, + 504, + 666 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 127, + 642, + 504, + 666 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 127, + 642, + 504, + 666 + ], + "type": "text", + "content": " is the dimension of the parameter." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "The rules learned by CLNN for the above parameter initializations are summarized in Table 9. By inspecting the rules for different initialization methods, it is clear that CLNN can still recover the correct paired order representations even if initializing the learning process from a different position. In the meantime, the logic formulas learned by CLNN are stable as the variance of learned parameters is relatively small." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 120, + 80, + 491, + 284 + ], + "blocks": [ + { + "bbox": [ + 120, + 80, + 491, + 284 + ], + "lines": [ + { + "bbox": [ + 120, + 80, + 491, + 284 + ], + "spans": [ + { + "bbox": [ + 120, + 80, + 491, + 284 + ], + "type": "table", + "html": "
DatasetInitializationRules
Syn - 1Ground truth\\(\\hat{\\phi}=(c_{A}-c_{B}>1)^{1}\\wedge(c_{A}-c_{C}>3)^{1}\\)
rand\\(\\phi=(c_{A}-c_{B}>1.21)^{1.52}\\wedge(c_{A}-c_{C}>3.00)^{1.41}\\)
randn\\(\\phi=(c_{A}-c_{B}>1.21)^{1.58}\\wedge(c_{A}-c_{C}>3.32)^{1.56}\\)
ones\\(\\phi=(c_{A}-c_{B}>1.17)^{1.59}\\wedge(c_{A}-c_{C}>3.14)^{1.32}\\)
xavier\\(\\phi=(c_{A}-c_{B}>1.12)^{1.45}\\wedge(c_{A}-c_{C}>3.20)^{1.33}\\)
Syn - 2Ground truth\\(\\hat{\\phi}=(c_{A}-c_{B}>0.5)^{1}\\wedge(c_{A}-c_{C}>1.5)^{1}\\wedge(c_{C}-c_{D}>2)^{1}\\)
rand\\(\\phi=(c_{A}-c_{B}>0.77)^{1.27}\\wedge(c_{A}-c_{C}>2.09)^{1.15}\\wedge((c_{C}-c_{D})>2.60)^{1.06}\\)
randn\\(\\phi=(c_{A}-c_{B}>0.80)^{1.97}\\wedge(c_{A}-c_{C}>1.92)^{1.62}\\wedge((c_{C}-c_{D})>1.74)^{1.45}\\)
ones\\(\\phi=(c_{A}-c_{B}>1.03)^{1.63}\\wedge(c_{A}-c_{C}>1.92)^{1.50}\\wedge((c_{C}-c_{D})>2.03)^{1.44}\\)
xavier\\(\\phi=(c_{A}-c_{B}>0.97)^{1.92}\\wedge(c_{A}-c_{C}>2.07)^{1.63}\\wedge((c_{C}-c_{D})>1.97)^{1.62}\\)
Syn - 3.1Ground truth\\(\\hat{\\phi}=(c_{B}-c_{A}>-2)^{1}\\wedge(c_{C}-c_{A}>-5)^{1}\\)
rand\\(\\phi=(c_{B}-c_{A}>-1.85)^{1.72}\\wedge(c_{C}-c_{A}>-3.90)^{1.59}\\)
randn\\(\\phi=(c_{B}-c_{A}>-1.98)^{1.51}\\wedge(c_{C}-c_{A}>-3.89)^{1.68}\\)
ones\\(\\phi_{3,1}=(c_{B}-c_{A}>-1.94)^{1.84}\\wedge(c_{C}-c_{A}>-3.68)^{2.33}\\)
xavier\\(\\phi_{3,1}=(c_{B}-c_{A}>-1.89)^{1.54}\\wedge(c_{C}-c_{A}>-3.92)^{1.62}\\)
Syn - 3.2Ground truth\\(\\hat{\\phi}=(c_{B}-c_{A}>-5)^{1}\\wedge(c_{C}-c_{B}>-4)^{1}\\wedge(c_{D}-c_{C}>-3)^{1}\\)
rand\\(\\phi=(c_{B}-c_{A}>-3.94)^{1.49}\\wedge(c_{C}-c_{B}>-3.02)^{2.03}\\wedge((c_{D}-c_{C})> -2.00)^{1.92}\\)
randn\\(\\phi=(c_{B}-c_{A}>-3.79)^{1.71}\\wedge(c_{C}-c_{B}>-3.04)^{1.89}\\wedge((c_{D}-c_{C})> -1.68)^{1.65}\\)
ones\\(\\phi=(c_{B}-c_{A}>-3.53)^{1.66}\\wedge(c_{C}-c_{B}>-3.09)^{1.88}\\wedge((c_{D}-c_{C})> -1.25)^{1.81}\\)
xavier\\(\\phi=(c_{B}-c_{A}>-3.71)^{1.53}\\wedge(c_{C}-c_{B}>-3.09)^{2.04}\\wedge((c_{D}-c_{C})> -1.86)^{1.73}\\)
", + "image_path": "720e0b182ecd75a826d23884634877955fe58becb261d25acc4e0e57933c455c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 331, + 348, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 348, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 348, + 342 + ], + "type": "text", + "content": "C.6 ANALYSIS OF LOGICAL CONSTRAINTS ON THE LL" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 354, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 504, + 411 + ], + "type": "text", + "content": "In this part, we investigate the effect of the interpretability using an experiment of the impact of logical constraints on the model's performance. The log-likelihood on the synthetic datasets for CLNN with and without logical constraints is summarized in Table 10. Table 10 demonstrates that the log-likelihood for CLNN with logical constraints is higher than the log-likelihood for CLNN without constraints, implying that interpretability (logical constraints) is helpful to improve the performance." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 168, + 427, + 441, + 491 + ], + "blocks": [ + { + "bbox": [ + 115, + 292, + 493, + 304 + ], + "lines": [ + { + "bbox": [ + 115, + 292, + 493, + 304 + ], + "spans": [ + { + "bbox": [ + 115, + 292, + 493, + 304 + ], + "type": "text", + "content": "Table 9: Comparison of rules learned by CLNN for different parameter initialization methods." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 168, + 427, + 441, + 491 + ], + "lines": [ + { + "bbox": [ + 168, + 427, + 441, + 491 + ], + "spans": [ + { + "bbox": [ + 168, + 427, + 441, + 491 + ], + "type": "table", + "html": "
DatasetCLNN with constraintsCLNN without constraints
Syn - 1-7821-8716
Syn - 2-6075-6942
Syn - 3.1-10898-11583
Syn - 3.2-10919-11230
", + "image_path": "2658b04aa74bc189fbdd0573cbd758c6395726b3651feecc9a6fb422e3c6d043.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 150, + 498, + 459, + 510 + ], + "lines": [ + { + "bbox": [ + 150, + 498, + 459, + 510 + ], + "spans": [ + { + "bbox": [ + 150, + 498, + 459, + 510 + ], + "type": "text", + "content": "Table 10: Comparison of LL for CLNN with and without logical constraints." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 546, + 394, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 546, + 394, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 394, + 559 + ], + "type": "text", + "content": "D EXPERIMENT RESULTS OF REAL-WORLD DATASETS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 576, + 220, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 220, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 220, + 588 + ], + "type": "text", + "content": "D.1 LINKEDIN DATASET" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": "The LinkedIn dataset is a collection of job hopping records between 82 IT companies of 3,000 LinkedIn users. Each event stream represents a user's check-in time stamps for different companies or role changes within the same company. Here we select 1000 users' event streams to compose the dataset by filtering out the event streams with uncommon companies, resulting in 10 event labels: " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{L} = \\{A,B,C,D,E,F,G,H,I,J\\}" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": ". Here we set the number of formulas as 5, i.e., " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\Phi = \\{\\phi_1,\\phi_2,\\phi_3,\\phi_4,\\phi_5\\}" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": ", each of which embodies a model structure shown in Figure 2(a) and CLNN aims to learn the parameters for each formula. The weight parameters in the paired order cell or the singleton order cell are initialized as random variables following a Gaussian distribution, and the bias terms of conjunction or disjunction operators are initialized as 1. The architecture weights are initialized as random variables following a Gaussian distribution, and the formula impact weights and bias are initialized as Gaussian random variables. The detailed log-likelihood for each event label is summarized in Table 11." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 237, + 80, + 372, + 213 + ], + "blocks": [ + { + "bbox": [ + 237, + 80, + 372, + 213 + ], + "lines": [ + { + "bbox": [ + 237, + 80, + 372, + 213 + ], + "spans": [ + { + "bbox": [ + 237, + 80, + 372, + 213 + ], + "type": "table", + "html": "
Event LabelLog-likelihood
A-180.59
B-177.80
C-89.49
D-140.31
E-132.83
F-76.63
G-106.23
H-103.33
I-95.51
J-125.45
", + "image_path": "69a871f954c73941778d038c5558feb755082b3add5ca333f770779f0c7a455c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 255, + 220, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 220, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 220, + 266 + ], + "type": "text", + "content": "D.2 MIMIC II DATASET" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 276, + 506, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 276, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 276, + 506, + 354 + ], + "type": "text", + "content": "MIMIC II dataset is obtained from the intensive care unit research database that consists of 25,328 intensity care unit stays. The records include laboratory data, therapeutic intervention profiles such as nursing progress notes, discharge summaries and others. Here we restrict the event types to the diagnosis of patients and filter out the shorter event sequences with few visits, ending up with 650 patients and 15 event labels: " + }, + { + "bbox": [ + 104, + 276, + 506, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{L} = \\{1,2,8,9,11,12,14,20,21,22,23,26,27,42,47\\}" + }, + { + "bbox": [ + 104, + 276, + 506, + 354 + ], + "type": "text", + "content": ". Similar to the setting for the LinkedIn dataset, where the initialization of parameters follow the same setting as the LinkedIn dataset. The detailed log-likelihood for each event label is presented in Table 12." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 236, + 365, + 372, + 553 + ], + "blocks": [ + { + "bbox": [ + 165, + 221, + 444, + 234 + ], + "lines": [ + { + "bbox": [ + 165, + 221, + 444, + 234 + ], + "spans": [ + { + "bbox": [ + 165, + 221, + 444, + 234 + ], + "type": "text", + "content": "Table 11: Log likelihood for each event label in the LinkedIn dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 236, + 365, + 372, + 553 + ], + "lines": [ + { + "bbox": [ + 236, + 365, + 372, + 553 + ], + "spans": [ + { + "bbox": [ + 236, + 365, + 372, + 553 + ], + "type": "table", + "html": "
Event LabelLog-likelihood
1-72.14
2-62.33
8-5.98
9-51.34
11-43.64
12-25.81
14-69.73
20-5.96
21-6.08
22-10.47
23-10.64
26-27.08
27-27.42
42-5.95
47-10.54
", + "image_path": "4cd041f239d47902683a689de705b11ef3e36f99d74bc603b88b10e11d7f71f1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 163, + 561, + 447, + 574 + ], + "lines": [ + { + "bbox": [ + 163, + 561, + 447, + 574 + ], + "spans": [ + { + "bbox": [ + 163, + 561, + 447, + 574 + ], + "type": "text", + "content": "Table 12: Log likelihood for each event label in the MIMIC II dataset." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 601, + 256, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 256, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 256, + 612 + ], + "type": "text", + "content": "D.3 STACK OVERFLOW DATASET" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "Stack Overflow is a question-and-answer website spanning a wide range of domains. A badge rewarding scheme is exploited to encourage users to participate in the questioning and answering activities. The badge system of Stack Overflow comprises 81 types of non-topical badges, including the badges that can be awarded only once and the badges that can be awarded several times. The dataset in (Du et al., 2016) was obtained by first filtering out the badges that can be awarded only once, then restricting to the users who have acquired at least 40 badges from 2012-01-01 to 2014-01-01, from which the badges have been awarded more than 100 times are selected as the determinate dataset. Our dataset was acquired by retaining the event streams with one or more of the 20 types of specified badges and then randomly sampling 1000 users to obtain 1000 event streams. The detailed log-likelihood for each event label in the Stack Overflow dataset is summarized in Table 13." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 236, + 80, + 372, + 323 + ], + "blocks": [ + { + "bbox": [ + 236, + 80, + 372, + 323 + ], + "lines": [ + { + "bbox": [ + 236, + 80, + 372, + 323 + ], + "spans": [ + { + "bbox": [ + 236, + 80, + 372, + 323 + ], + "type": "table", + "html": "
Event LabelLog-likelihood
1-3791
2-1451
3-538
4-17656
5-3574
6-3559
7-1381
8-1330
9-10961
10-1105
11-189
12-2012
13-673
14-1340
15-406
16-117
17-186
18-330
19-282
20-100
", + "image_path": "745b4d12b05a442f60099c558989a23fad57a215fe56896144c6556a9a1740dd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 179, + 353, + 430, + 420 + ], + "blocks": [ + { + "bbox": [ + 153, + 331, + 457, + 344 + ], + "lines": [ + { + "bbox": [ + 153, + 331, + 457, + 344 + ], + "spans": [ + { + "bbox": [ + 153, + 331, + 457, + 344 + ], + "type": "text", + "content": "Table 13: Log likelihood for each event label in the Stack Overflow dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 179, + 353, + 430, + 420 + ], + "lines": [ + { + "bbox": [ + 179, + 353, + 430, + 420 + ], + "spans": [ + { + "bbox": [ + 179, + 353, + 430, + 420 + ], + "type": "table", + "html": "
DatasetCLNN with SOPCLNN without SOP
LinkedIn-1228-1344
MIMIC II-436-480
Stack Overflow-50981-51195
", + "image_path": "c8f7735f28aa8d4df118a71a63f8b9c9027bb154f4eceb79be2aeedbcbb5aaaf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 428, + 504, + 440 + ], + "lines": [ + { + "bbox": [ + 105, + 428, + 504, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 504, + 440 + ], + "type": "text", + "content": "Table 14: Comparison of log-likelihood for CLNN with and without SOP on the real-world datasets." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 460, + 397, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 397, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 397, + 472 + ], + "type": "text", + "content": "D.4 ANALYSIS OF EXPRESSIVENESS ON MODEL'S PERFORMANCE" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 480, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 506, + 548 + ], + "type": "text", + "content": "In this part, we conduct an experiment by training the CLNN without the singleton order cell (SOC) on real-world datasets to show the effectiveness of the singleton order predicates. The comparison of log-likelihood for CLNN with SOC and CLNN without SOC is summarized in Table 14. As evidenced by Table 14, the log-likelihood of CLNN with SOP is higher than the log-likelihood of CLNN without SOP, meaning enriching the expressiveness of wCL formulas can better explain the generative mechanism of events." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_content_list.json b/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..668e2bffc15b2f4cd6eb273b5265b367aa5b6122 --- /dev/null +++ b/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_content_list.json @@ -0,0 +1,3716 @@ +[ + { + "type": "text", + "text": "WEIGHTED ENSEMBLE SELF-SUPERVISED LEARNING", + "text_level": 1, + "bbox": [ + 171, + 85, + 818, + 108 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yangjun Ruan*† Saurabh Singh Warren Morningstar Alexander A. Alemi \nSergey Ioffe Ian Fischer† Joshua V. Dillon† \nGoogle Research", + "bbox": [ + 197, + 119, + 799, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 204, + 547, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ensembling has proven to be a powerful technique for boosting model performance, uncertainty estimation, and robustness in supervised learning. Advances in self-supervised learning (SSL) enable leveraging large unlabeled corpora for state-of-the-art few-shot and supervised learning performance. In this paper, we explore how ensemble methods can improve recent SSL techniques by developing a framework that permits data-dependent weighted cross-entropy losses. We refrain from ensembling the representation backbone; this choice yields an efficient ensemble method that incurs a small training cost and requires no architectural changes or computational overhead to downstream evaluation. The effectiveness of our method is demonstrated with two state-of-the-art SSL methods, DINO (Caron et al., 2021) and MSN (Assran et al., 2022). Our method outperforms both in multiple evaluation metrics on ImageNet-1K, particularly in the few-shot setting. We explore several weighting schemes and find that those which increase the diversity of ensemble heads lead to better downstream evaluation results. Thorough experiments yield improved prior art baselines which our method still surpasses; e.g., our overall improvement with MSN ViT-B/16 is 3.9 p.p. for 1-shot learning.", + "bbox": [ + 228, + 224, + 769, + 450 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 463, + 336, + 478 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The promise of self-supervised learning (SSL) is to extract information from unlabeled data and leverage this information in downstream tasks (He et al., 2020; Caron et al., 2021); e.g., semi-supervised learning (Chen et al., 2020a,b), robust learning (Radford et al., 2021; Ruan et al., 2022; Lee et al., 2021), few-shot learning (Assran et al., 2022), and supervised learning (Tomasev et al., 2022). These successes have encouraged increasingly advanced SSL techniques", + "bbox": [ + 169, + 486, + 486, + 625 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(e.g., Grill et al., 2020; Zbontar et al., 2021; He et al., 2022). Perhaps surprisingly however, a simple and otherwise common idea has received limited consideration: ensembling.", + "bbox": [ + 169, + 625, + 823, + 654 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ensembling combines predictions from multiple trained models and has proven effective at improving model accuracy (Hansen & Salamon, 1990; Perrone & Cooper, 1992) and capturing predictive uncertainty in supervised learning (Lakshminarayanan et al., 2017; Ovadia et al., 2019). Ensembling in the SSL regime is nuanced, however; since the goal is to learn useful representations from unlabeled data, it is less obvious where and how to ensemble. We explore these questions in this work.", + "bbox": [ + 169, + 659, + 823, + 731 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We develop an efficient ensemble method tailored for SSL that replicates the non-representation parts (e.g., projection heads) of the SSL model. In contrast with traditional \"post-training\" ensembling, our ensembles are only used during training to facilitate the learning of a single representation encoder, which yields no extra cost in downstream evaluation. We further present a family of weighted cross-entropy losses to effectively train the ensembles. The key component of our losses is the introduction of data-dependant importance weights for ensemble members. We empirically compare different choices from our framework and find that the choice of weighting schemes critically impacts ensemble diversity, and that greater ensemble diversity correlates with improved downstream performance. Our method is potentially applicable to many SSL methods; we focus on DINO (Caron et al., 2021) and MSN (Assran et al., 2022) to demonstrate its effectiveness. Fig. 1 shows DINO improvements from using our ensembling and weighted cross-entropy loss.", + "bbox": [ + 169, + 736, + 826, + 890 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*University of Toronto & Vector Institute. Work done as a student researcher at Google.", + "bbox": [ + 189, + 896, + 712, + 910 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{\\dagger}$ Correspondence to yjruan@cs.toronto.edu, {iansf, jvdillon} @ google.com.", + "bbox": [ + 192, + 910, + 750, + 922 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In summary, our core contributions are to:", + "bbox": [ + 171, + 104, + 450, + 118 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Develop a downstream-efficient ensemble method suitable for many SSL techniques (Sec. 3.1).", + "- Characterize an ensemble loss family of weighted cross-entropy objectives (Sec. 3.2).", + "- Conduct extensive ablation studies that improve the prior art baselines by up to 6.3 p.p. (Sec. 5.1).", + "- Further improve those baselines with ensembling (e.g., up to 5.5 p.p. gain for 1-shot) (Table 2)." + ], + "bbox": [ + 179, + 125, + 825, + 189 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 BACKGROUND", + "text_level": 1, + "bbox": [ + 171, + 208, + 328, + 223 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we frame SSL methods from the perspective of maximum likelihood estimation (MLE) and use this as the notational basis to describe the state-of-the-art clustering-based SSL methods as well as derive their ensembled variants in Sec. 3.", + "bbox": [ + 169, + 238, + 826, + 280 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "From Maximum Likelihood to SSL Denote unnormalized KL divergence (Dikmen et al., 2014) between non-negative integrable functions $p, q$ by $\\mathsf{K}[p(X), q(X)] = \\mathsf{H}^{\\times}[p(X), q(X)] - \\mathsf{H}[p(X)]$ , where $\\mathsf{H}^{\\times}[p(X), q(X)] = -\\int_{\\mathcal{X}} p(x) \\log q(x) \\, \\mathrm{d}x + \\int_{\\mathcal{X}} q(x) \\, \\mathrm{d}x - 1$ is the unnormalized cross-entropy (with $0 \\log 0 = 0$ ) and $\\mathsf{H}[p(X)] = \\mathsf{H}^{\\times}[p(X), p(X)]$ . These quantities simplify to their usual definitions when $p, q$ are normalized, but critically they enable flexible weighting of distributions for the derivation of our weighted ensemble losses in Sec. 3.2.", + "bbox": [ + 169, + 295, + 826, + 383 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Let $\\nu(X, Y) = \\nu(X)\\nu(Y|X)$ be a natural distribution of input/target pairs over the space $\\mathcal{X} \\times \\mathcal{Y}$ and $s(Y|\\theta, X)$ be a predictive model of target given the input parameterized by $\\theta \\in \\mathcal{T}$ . Supervised maximum likelihood seeks the minimum expected conditional population risk with respect to $\\theta$ ,", + "bbox": [ + 169, + 390, + 823, + 434 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathsf {E} _ {\\nu (X)} \\mathsf {K} [ \\nu (Y | X), s (Y | \\theta , X) ] = \\mathsf {E} _ {\\nu (X)} \\mathsf {H} ^ {\\times} [ \\nu (Y | X), s (Y | \\theta , X) ] - \\mathsf {E} _ {\\nu (X)} \\mathsf {H} [ \\nu (Y | X) ]. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 439, + 823, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Henceforth omit $\\mathsf{E}_{\\nu(X)} \\mathsf{H}[\\nu(Y|X)]$ since it is constant in $\\theta$ . Since $\\nu(X, Y)$ is unknown, a finite sample approximation is often employed. Denote a size- $n$ i.i.d. training set by $\\mathcal{D}_n = \\{x_i\\}_{i \\in [n]} \\sim \\nu^{\\otimes n}$ and empirical distribution by $\\hat{\\nu}(X, Y) = \\frac{1}{n} \\sum_{x \\in \\mathcal{D}_n, y \\sim \\nu(Y|x)} \\delta(X - x) \\delta(Y - y)$ where $\\delta: \\mathbb{R} \\to \\{0, 1\\}$ is 1 when $x = 0$ and 0 otherwise. The sample risk is thus $-\\frac{1}{n} \\sum_{x \\in \\mathcal{D}_n} \\mathsf{H}^\\times[\\hat{\\nu}(Y|x), s(Y|\\theta, x)]$ .", + "bbox": [ + 169, + 469, + 823, + 537 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In SSL, we interpret $\\nu(Y|x)$ as being the oracle teacher under a presumption of how the representations will be evaluated on a downstream task. This assumption is similar to that made in Arora et al. (2019); Nozawa et al. (2020). We also assume $\\hat{\\nu}(Y|X)$ is inaccessible and/or unreliable. Under this view, some SSL techniques substitute $\\hat{\\nu}(Y|x)$ for a weakly learned target or \"teacher\", $t(Y|x)$ . We don't generally expect $t(Y|x)$ to recover $\\nu(Y|x)$ ; we only assume that an optimal teacher exists and it is $\\nu(Y|x)$ . With the teacher providing the targets, the loss becomes $-\\frac{1}{n}\\sum_{x\\in\\mathcal{D}_n}\\mathsf{H}^\\times[t(Y|x), s(Y|\\theta, x)]$ .", + "bbox": [ + 169, + 542, + 826, + 647 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Teacher and student in clustering SSL methods Clustering SSL methods such as SWaV (Caron et al., 2020), DINO (Caron et al., 2021), and MSN (Assran et al., 2022) employ a student model characterized by proximity between learned codebook entries and a data-dependent code,", + "bbox": [ + 169, + 657, + 823, + 700 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\ns (Y | \\theta , x) = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\tau} \\frac {\\left(h _ {\\psi} \\circ r _ {\\omega}\\right) (x) \\cdot \\mu_ {y}}{\\| \\left(h _ {\\psi} \\circ r _ {\\omega}\\right) (x) \\| _ {2} \\| \\mu_ {y} \\| _ {2}}: y \\in [ c ] \\right\\}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 705, + 823, + 739 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\theta = \\{\\omega , \\psi , \\left\\{\\mu_ {y} \\right\\} _ {y \\in [ c ]} \\} \\in \\mathcal {T}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 741, + 823, + 758 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where the encoder $r_{\\omega}:\\mathcal{X}\\to \\mathcal{Z}$ produces the representations used for downstream tasks, and the projection head $h_\\psi :\\mathcal{Z}\\rightarrow \\mathbb{R}^d$ and codebook entries $\\{\\mu_y\\}_{y\\in \\mathcal{Y}}\\in \\mathbb{R}^d$ characterize the SSL loss. Eq. (2) can be viewed as \"soft clustering\", where the input is assigned to those centroids that are closer to the projection head's output. The projection head and codebook are used during training but thrown away for evaluation, which is empirically found vital for downstream tasks (Chen et al., 2020a;b). Hyperparameters $\\tau \\in \\mathbb{R}_{>0},c\\in \\mathbb{Z}_{>0}$ represent temperature and codebook size. The teacher is defined as $t(Y|x) = s(Y|\\mathrm{stopgrad}(g(\\theta)),x)$ where $g:\\mathcal{T}\\to \\mathcal{T}$ . Commonly $g(\\theta)$ is an exponential moving average of gradient descent iterates and the teacher uses a lower temperature than the student.", + "bbox": [ + 169, + 763, + 826, + 875 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To capture desirable invariances and prevent degeneracy, data augmentation and regularization (e.g., Sinkhorn-Knopp normalization (Caron et al., 2020), mean entropy maximization (Assran et al., 2022)) are essential. As these are not directly relevant to our method, we omit them for brevity.", + "bbox": [ + 169, + 881, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 METHOD", + "text_level": 1, + "bbox": [ + 171, + 102, + 284, + 118 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Ensembling is a technique that combines models to boost performance, and has been especially successful in supervised learning. We are interested in ensembling methods that carry over this success to SSL approaches. However, SSL has key differences, such as throw-away \"projection heads\", from supervised learning that result in a multitude of possibilities for how to ensemble. With this in mind, we propose first where to ensemble, and then how to ensemble. Those proposals result in an efficient \"peri-training\" ensembling technique specifically tailored for SSL and a family of weighted ensemble objectives; we subsequently suggest different ways to select the weights.", + "bbox": [ + 169, + 135, + 826, + 233 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 WHERE TO ENSEMBLE?", + "text_level": 1, + "bbox": [ + 171, + 253, + 380, + 268 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Denote the teacher/student ensembles by $\\{t_i(Y|x)\\}_{i\\in [m]}$ and $\\{s(Y|\\theta_j,x)\\}_{j\\in [m]}$ and define each as in Sec. 2; parameters $\\theta = \\{\\theta_{j}\\}_{j\\in [m]}\\in \\mathcal{T}^{m}$ are independently initialized, all students use one temperature and all teachers another. We asymmetrically denote $t_i(Y|x)$ and $s(Y|\\theta_j,x)$ to emphasize that teachers' gradients are zero and that the students are distinct solely by way of $\\theta_{i}\\neq \\theta_{j}$ . Studying heterogeneous architectures and/or different teacher parameterizations is left for future work.", + "bbox": [ + 169, + 281, + 517, + 424 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recall that $\\theta_{j}$ parameterizes the encoder, projection head, and codebook parameters: $\\theta_{j} = (\\omega_{j},\\psi_{j},\\{\\mu_{jy}\\}_{y\\in \\mathcal{Y}})$ . We further restrict $\\mathcal{T}^m$ such that $\\omega_{i} = \\omega_{j}$ , i.e., we limit our consideration to ensembles of projection heads $h_{\\psi_j}$ and/or codebooks $\\mu_{j}$ but not encoders $r_{\\omega_j}$ . This choice makes our ensemble method inherently different from traditional supervised ensembling or encoder $r_{\\omega}$ ensembling: the ensembled parts are not used for evaluation but", + "bbox": [ + 169, + 431, + 519, + 556 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for improving the learning of non-enssembled representation encoder during training, thus it requires no change of downstream evaluation or computational cost. Ensembling of $r_{\\omega}$ is left for future work.", + "bbox": [ + 169, + 556, + 826, + 585 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1a32ce26502f3e27b447c32b32c0018e24f6fc95bb0f146fdb5c4659fa233a6f.jpg", + "image_caption": [ + "Figure 2: Overview of $(h_{\\psi},\\mu)$ -ensemble. Two augmented inputs are encoded by the teacher/student into representations, and then processed by an ensemble of heads. The loss for each head is weighted and summed into the final loss. Strike-through edges indicate stop-gradients. See Appx. A for pseudocode." + ], + "image_footnote": [], + "bbox": [ + 526, + 257, + 830, + 441 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 HOW TO ENSEMBLE?", + "text_level": 1, + "bbox": [ + 171, + 604, + 362, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We would like to extend the loss to support an ensemble of teacher/student pairs while respecting the MLE intuition of the loss as in Sec. 2. Additionally, we want to facilitate data-dependent importance weights, thus enabling preferential treatment of some teacher/student pairs. We therefore propose a weighted average (unnormized) cross-entropy loss,", + "bbox": [ + 169, + 632, + 826, + 689 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 699, + 823, + 738 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\text {w h e r e} w _ {i j y} = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\gamma} f _ {i j y} (\\operatorname {s t o p g r a d} (\\theta), x): i, j \\in [ m ] \\right\\}\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 739, + 823, + 768 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The notation $w_{ijY} \\odot t_i(Y|x)$ denotes a Hadamard product; i.e., the product of event-specific weights and probabilities for each $y \\in \\mathcal{V}$ . The hyperparameter $\\gamma$ is the temperature. The function $f_{ijy}$ is defined for brevity and discussed in the following section.", + "bbox": [ + 169, + 777, + 823, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This objective admits generality and flexibility for introducing various weighting schemes, as it supports potential interactions between all teacher/student pairs and allows the weights to be both model- and data-dependent. Up to a constant independent of $\\theta$ , it is an importance weighted average of (unnormized) KL divergences between each teacher and each student; i.e., a mixture of MLE-like objectives. We stop the gradient of $w_{ijy}$ to $\\theta$ in order to keep the overall gradient a weighted average of students' log-likelihood gradients, similar to Eq. (1). We also normalize the weights such that each data point equally contributes to the loss.", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3 HOW TO WEIGHT?", + "text_level": 1, + "bbox": [ + 171, + 103, + 344, + 118 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we present several instantiations of our losses with different weighting schemes. We empirically show in Sec. 5 that the particular choice of weighting scheme is critical for the representation performance and the induced diversity of $(h_{\\psi},\\mu)$ -ensembles. For simplicity we assume $\\gamma = 1$ in this section. We indicate with $\\Longleftrightarrow$ that a loss has the same arg min as Eq. (4). For additional analysis and discussion, see Appx. D.", + "bbox": [ + 169, + 128, + 826, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Uniform weighting (UNIF) The simplest strategy is to treat different teacher/student pairs independently and average each with uniform weighting; i.e.,", + "bbox": [ + 169, + 213, + 826, + 244 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i j y} = \\log \\delta (i - j) \\Longleftrightarrow \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right] \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 233, + 250, + 825, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This strategy introduces uniform weights $w_{i} = \\frac{1}{m}$ over ensemble elements. The role of $\\log \\delta (i - j)$ (here and elsewhere) is to sub-select corresponding teacher/student pairs rather than all $m^2$ pairs.", + "bbox": [ + 169, + 295, + 826, + 328 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Probability weighting (PROB) An alternative to using the average cross-entropy loss (UNIF) is to compute the cross-entropy loss of the average predictions whose gradient is weighted by $w_{ijy}$ (see Appx. D.1). At $\\gamma = 1$ , those gradient weights simplify into an average over the student probabilities:", + "bbox": [ + 169, + 340, + 825, + 383 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i j y} = \\log s (y | \\theta_ {j}, x) \\iff \\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\mathsf {H} ^ {\\times} \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right] \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 390, + 825, + 440 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Averaging the predictive distributions introduces correspondence between codes from different heads; thus different heads are no longer independent but instead cooperate to match the student to the teachers. The loss favors student heads with more confident predictions (i.e., larger $s(y|\\theta_j, x)$ ). Further motivation for averaging predictions comes from multi-sample losses studied in Morningstar et al. (2022). Note that the joint convexity of (unnormized) KL divergence implies that this loss is upper bounded by the UNIF loss up to some constant in $\\theta$ (see Appx. D).", + "bbox": [ + 169, + 444, + 826, + 529 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Although the PROB strategy favors confident student predictions, the weights change as a function of $y \\in \\mathcal{V}$ . This may be in conflict with our intuition that SSL is like maximum likelihood (Sec. 2), since under that view, the teacher is responsible for weighting outcomes.", + "bbox": [ + 169, + 534, + 826, + 578 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Entropy weighting (ENT) Another way to favor heads with more confident predictions is to directly weight by their predictive entropies; i.e.,", + "bbox": [ + 169, + 592, + 823, + 621 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i j y} = - \\mathrm {H} [ t _ {i} (Y | x) ] + \\log \\delta (i - j) \\Longleftrightarrow \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 222, + 627, + 823, + 643 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {E N T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} \\left[ t _ {i ^ {\\prime}} (Y | x) : i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 646, + 823, + 683 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the weight $w_{i} = \\mathrm{softmax}_{i}\\left(\\{-\\frac{1}{\\gamma}\\mathsf{H}[t_{i'}(Y|x)]:i'\\in [m]\\}\\right)$ is inversely correlated with the entropy of teacher predictions. In other words, the head whose teacher has a lower entropy (i.e., higher confidence about its prediction) is given a larger importance weight for learning the representation. Like PROB, this strategy encourages \"data specialists\" by emphasizing strongly opinionated teacher heads for different inputs. Like UNIF, different heads are treated more independent (than PROB), since interaction between different heads is introduced only through the weight computation. By preferring low-entropy teachers we also favor low variance teachers; this aligns with the intuition that using a lower-variance teacher benefits representation quality (Wang et al., 2022).", + "bbox": [ + 169, + 691, + 826, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Countless other weighting schemes It is impossible to fully explore the space of weightings; the following might also be interesting to study in detail but were omitted due to resource constraints.", + "bbox": [ + 169, + 820, + 823, + 849 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i j y} = 0 \\quad \\text {(F a v o r s a l l p a i r s o f t e a c h e r s / s t u d e n t s e q u a l l y)} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 857, + 823, + 872 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i j y} = \\log t _ {i} (y | x) \\quad (\\text {F a v o r s o p i n i o n a t e d t e a c h e r s}) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 875, + 823, + 888 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i j y} = - \\mathrm {H} [ s (Y | \\theta_ {j}, x) ] \\quad (\\text {F a v o r s l o w - e n t r o p y s t d u e n t s}) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 891, + 823, + 907 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i j y} = \\mathsf {K} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] \\quad (\\text {F a v o r s d i a g e e i n g t e a c h e r / s t u d e n t p a i r s}) \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 909, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i j y} = - \\frac {1}{2} \\log \\left(\\operatorname {V a r} _ {t _ {i} (Y | x)} [ Y ] + \\epsilon\\right) \\quad \\text {(F a v o r s l o w v a r i a n c e t e a c h e r s ; e . g . ,} \\epsilon = \\frac {1}{1 2}) \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 102, + 823, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that \"aligned\" versions of all schemes are possible by using $f_{ijy} + \\log \\delta (i - j)$ . We did early experiments exploring Eqs. (11) and (12), but the results were inferior and are largely omitted below.", + "bbox": [ + 169, + 123, + 828, + 154 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 172, + 346, + 186 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Self-supervised learning Recent work on self-supervised learning (SSL) focuses on discriminative or generative approaches. Most discriminative approaches seek to learn augmentation-invariant representations by enforcing the similarity between augmented pairs of the same image while utilizing different techniques to avoid collapse. Contrastive methods (Chen et al., 2020a; He et al., 2020; Wu et al., 2018; Hjelm et al., 2018; Bachman et al., 2019; Tian et al., 2020) use a large number of negative samples with a noise-contrastive objective (Gutmann & Hyvarinen, 2010; Oord et al., 2018). A large body of followup work eliminates the necessity of explicit negative samples with various techniques, including clustering assignment constraints (Caron et al., 2018; 2020; 2021; Asano et al., 2019), bootstrapping (Grill et al., 2020) or self-distillation (Caron et al., 2021) inspired by mean teacher (Tarvainen & Valpola, 2017), asymmetric architecture design (Grill et al., 2020; Chen & He, 2021), or redundancy reduction (Zbontar et al., 2021; Bardes et al., 2021). Recent generative approaches that use masked image modeling as the pretraining task (Dosovitskiy et al., 2020; Bao et al., 2021; He et al., 2022; Zhou et al., 2022; Xie et al., 2022) have achieved competitive finetuning performance. Our method may be applicable to all of the above methods that have some sort of \"projection head\", such as most of the discriminative approaches.", + "bbox": [ + 169, + 203, + 826, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Ensemble methods Ensembling has been extensively studied for improving model performance (Hansen & Salamon, 1990; Perrone & Cooper, 1992; Dietterich, 2000) and uncertainty estimation (Lakshminarayanan et al., 2017; Ovadia et al., 2019) in supervised learning and semi-supervised learning (Laine & Aila, 2016). A major research direction is to train efficient ensembles with partial parameter sharing (Lee et al., 2015; Wen et al., 2020; Dusenberry et al., 2020; Havasi et al., 2020) or intermediate checkpointing (Huang et al., 2017; Garipov et al., 2018). Our method also shares the encoder parameters across ensembles, which is closely related to multi-headed networks (Lee et al., 2015; Tran et al., 2020). Ensemble methods for SSL are less explored. Some recent work studies ensembles of supervised models adapted from pretrained SSL models. Gontijo-Lopes et al. (2022) conduct an empirical study of ensembles adapted from different SSL models and find that higher divergence in SSL methods leads to less correlated errors and better performance. Wortsman et al. (2022) ensemble multiple finetuned models adapted from the same SSL model by averaging their weights, which boosts the performance without any inference cost. Our method differs from them in that it (1) applies to the SSL training stage to directly improve representation quality, rather than aggregates multiple models in the post-training/finetuning stage; (2) introduces little training cost and no evaluation cost; and (3) is complementary to these post-training/finetuning ensembling methods.", + "bbox": [ + 169, + 426, + 828, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 667, + 328, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We carefully study the impact of $(h_{\\psi},\\mu)$ -ensembles and our selected weighted ensemble losses (UNIF, PROB, and ENT) on smaller DINO models in Sec. 5.1. Using what we learned in those experiments, in Sec. 5.2 we present new state-of-the-art results on ImageNet-1K on various metrics for multiple model sizes by ensembling both DINO- and MSN-based models. Finally, we explore ensemble evaluations in the transfer learning setting in Sec. 5.3. Additional experimental details and results are in Appx. B and Appx. C, respectively.", + "bbox": [ + 169, + 700, + 823, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Experimental setup We assessed the effectiveness of our method with two SSL methods: DINO (Caron et al., 2021) and MSN (Assran et al., 2022). In order to ensure that we are comparing against strong baselines, we consider three different classes of baselines: (1) evaluation numbers reported in the original works (Caron et al. (2021), Assran et al. (2022), and Zhou et al. (2022) for an additional baseline iBOT); (2) evaluation of our implementation using the hyperparameters reported in the original works (DINO only, for space reasons) to validate our implementation; and (3) evaluation of our implementation using the best hyperparameters that we found by tuning the baselines (DINO and MSN) for fair comparisons. In almost all models and evaluations, our retuned baselines give nontrivial performance improvements on top of previously reported numbers. These type (3) baselines", + "bbox": [ + 169, + 797, + 828, + 926 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/6447524140b7634dc92578536ae658f8f0846bd5737531fcd84c0dea6796f34b.jpg", + "table_caption": [ + "Table 1: Comparison of different ensemble strategies. ENT and PROB significantly improve over the non-ensembleed baseline, while UNIF leads to no gains. Ensembling both the projection head and the codebook works the best. All models are DINO* ViT-S/16 trained for 300 epochs. Averages and standard deviations are over 3 initialization seeds. The linear evaluation results on ImageNet-1K with different amounts of labeled data are reported here (see Table 11 in Appx. C.3 for all metrics)." + ], + "table_footnote": [], + "table_body": "
HowWhere# of Labels Per Class
Proj. hψCode. μ15~13 (1%)Full
Base40.6 ± 0.257.9 ± 0.363.4 ± 0.274.4 ± 0.1
UNIF40.4 ± 0.457.6 ± 0.363.3 ± 0.374.5 ± 0.2
PROB39.8 ± 0.5 ↓ 0.957.4 ± 0.4 ↓ 0.563.0 ± 0.4 ↓ 0.474.8 ± 0.1 ↑ 0.4
PROB41.9 ± 0.3 ↑ 1.359.6 ± 0.4 ↑ 1.765.1 ± 0.3 ↑ 1.775.4 ± 0.1 ↑ 1.0
ENT-ST40.0 ± 0.5 ↓ 0.657.3 ± 0.5 ↓ 0.662.7 ± 0.5 ↓ 0.774.0 ± 0.4 ↓ 0.4
ENT40.8 ± 0.458.0 ± 0.463.5 ± 0.474.5 ± 0.3
ENT43.0 ± 0.6 ↑ 2.459.7 ± 0.7 ↑ 1.864.8 ± 0.5 ↑ 1.475.1 ± 0.4 ↑ 0.7
ENT44.0 ± 0.2 ↑ 3.460.5 ± 0.3 ↑ 2.665.5 ± 0.1 ↑ 2.275.3 ± 0.1 ↑ 0.9
", + "bbox": [ + 174, + 183, + 823, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "we label DINO* and MSN*, and we use them as the base models for our experiments with $(h_{\\psi}, \\mu)$ -ensembles and weighted ensemble losses. Appx. B.2.1 describes the details for getting such strong performance for DINO* and MSN*. In particular, we find that the projection head has a crucial impact on label efficiency of representations and using a smaller head (3-layer MLP with hidden size 1024) significantly improves few-shot evaluation performance (see Appx. C.2).", + "bbox": [ + 169, + 359, + 826, + 431 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation metrics We compared models trained with and without our $(h_{\\psi},\\mu)$ -ensembles by measuring various evaluation metrics on ImageNet-1K (Deng et al., 2009). The evaluation metrics reflect the decodability and the label efficiency of learned representations. We measured the decodability with respect to both the linear classifier following the common linear evaluation protocol and the $k$ -NN classifier following Caron et al. (2021). We measured the label efficiency by evaluating the linear evaluation performance in few-shot settings, including $1\\%$ ( $\\sim 13$ -shots) labeled data evaluation (Chen et al., 2020a) and 1-/2-/5-shot evaluations (Assran et al., 2022). All evaluations used frozen representations of the teacher encoder - we did not fine tune the models. See Appx. B.3 for details.", + "bbox": [ + 169, + 445, + 826, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 EMPIRICAL STUDY OF $(h_{\\psi},\\mu)$ -ENSEMBLES", + "text_level": 1, + "bbox": [ + 171, + 574, + 511, + 589 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1 compares different strategies for where and how to ensemble. Fig. 4 compares the impact of the weighted ensemble loss on $(h_{\\psi},\\mu)$ -ensemble diversity. Fig. 3 shows the effect of increasing the number of ensembles, adjusting the temperature $\\gamma$ , and increasing baseline projection head parameters. In these experiments, we used DINO* with ViT-S/16 trained for 300 epochs as the base model. We compared different ensemble methods applied to the base model with $m = 16$ heads which we found to work the best. For the ENT strategy in Table 1, the entropy weighting temperature $\\gamma$ is set to $0.05\\times \\log (c)$ by default which is selected from $\\{0.0125,0.025,0.05,0.1,0.2\\} \\times \\log (c)$ where the scale $\\log (c)$ gives the maximum entropy of the codebook size $c$ . For PROB, we keep $\\gamma = 1$ .", + "bbox": [ + 169, + 599, + 828, + 713 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Where to ensemble We study the where question by ensembling either the projection head $h_{\\psi}$ , the codebook $\\mu$ , or both with the ENT and the PROB ensemble strategies, as shown in Table 1. We find that ensembling both $h_{\\psi}$ and $\\mu$ provides the largest gains for both losses, probably due to the increased flexibility for learning a diverse ensemble. Interestingly, only ensembling $h_{\\psi}$ also works well for the ENT strategy.", + "bbox": [ + 169, + 726, + 826, + 799 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "How to ensemble We study the how question by considering four different loss variants: UNIF, PROB, ENT, and the variant of ENT with student entropy weighting. We find that when we ensemble both the projection head $h_{\\psi}$ and the codebook $\\mu$ , the ENT ensemble strategy leads to the most significant gains (e.g., 3.4 p.p. gains for 1-shot and 0.9 p.p. gains for full-data). The PROB strategy also consistently improves the performance with a slightly larger gain (1 p.p.) in full-data evaluation. In contrast, we see no gains for the UNIF strategy over the baseline. We also study a variant of ENT that uses the student entropy (i.e., Eq. (12) with the log $\\delta(i - j)$ term) for the importance weights (denoted as ENT-ST). ENT-ST performs much worse than ENT and is even worse than the baseline.", + "bbox": [ + 169, + 811, + 828, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7d60d8124efc22ad555d6fbbdbc43aab642f5688816526f2126d36420340702c.jpg", + "image_caption": [ + "(a) Scaling of $(h_{\\psi},\\mu)$ -ensembles." + ], + "image_footnote": [], + "bbox": [ + 181, + 102, + 385, + 213 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/79a22c445327a2374c4567b01548ac9e1cde74ecbd0a82714617a48008948f91.jpg", + "image_caption": [ + "Figure 3: Empirical study of $(h_{\\psi},\\mu)$ -ensembles. (a) The gains of $(h_{\\psi},\\mu)$ -ensembles start to diminish above 16 heads. (b) The temperature for entropy weighting has a larger impact on few-shot performance. 16 heads are used and $\\gamma$ is scaled by $\\log(c)$ . (c) Our $(h_{\\psi},\\mu)$ -ensembles outperform all non-ensembleed baselines when controlling for number of parameters. A too powerful non-ensemble projection head significantly harms accuracy. $1\\%$ data evaluation is shown. Also see Fig. 5." + ], + "image_footnote": [], + "bbox": [ + 395, + 102, + 598, + 213 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f0d9635be7f5c83827576c16d79bb2fa15568867fec3b8742609cc81341a2443.jpg", + "image_caption": [ + "(b) Effect of ENT temperature $\\gamma$", + "(c) Comparing different heads." + ], + "image_footnote": [], + "bbox": [ + 609, + 102, + 816, + 213 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conjecture that this is because the student predictions typically have a larger variance than teacher predictions (Wang et al., 2022) especially when multi-crop augmentation (Caron et al., 2020; 2021) is applied to the student. Similar experiments on Eq. (11) and/or $\\gamma = 0$ variants of PROB also resulted in inferior performance (see Table 12).", + "bbox": [ + 169, + 330, + 823, + 387 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Analysis of $(h_{\\psi}, \\mu)$ -ensemble diversity The previous experiments showed that the choice of ensemble weighting strategy has a large impact on performance. We hypothesize that this choice substantially impacts the diversity of the codebook ensembles. Since the codes in different heads may not be aligned, we align them by the similarity of their code assignment probabilities across different input images, which measures how the codes are effectively used to 'cluster' the data. See Appx. C.4 for detailed explanations and results. In Fig. 4, we visualize the decay patterns of the similarity score between aligned codes (1.0 means the most similar) in a random pair of heads for each weighting strategy. ENT decays the fastest and UNIF decays the slowest, indicating that ENT learns the most diverse codebooks while UNIF is least diverse. This shows a positive correlation between the diversity of $(h_{\\psi}, \\mu)$ -ensembles and the empirical", + "bbox": [ + 169, + 402, + 583, + 611 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "performance of the ensemble strategies from Table 1. Finally, for UNIF, we find that different heads tend to learn the same semantic mappings even when randomly initialized; i.e., the code assignments in different heads become homogeneous up to permutation. See Fig. 8 for a visualization.", + "bbox": [ + 169, + 611, + 823, + 654 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7a994fe5319f23357708dff945fb9964c4b86408e92bf1413733afc789d9d957.jpg", + "image_caption": [ + "Figure 4: Visualization of code similarity. ENT learns the most diverse $(h_{\\psi},\\mu)$ -ensembles reflected by the fastest decay of similarity scores between aligned codes in different heads. UNIF has low diversity between heads." + ], + "image_footnote": [], + "bbox": [ + 593, + 405, + 815, + 496 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Number of $(h_{\\psi},\\mu)$ -ensembles We study the effect of increasing the number of $(h_{\\psi},\\mu)$ -ensembles $m$ for ENT in Fig. 3a. Having more $(h_{\\psi},\\mu)$ -ensembles boosts the performance until $m = 16$ . Interestingly, using as few as $m = 2$ heads already significantly improves over the baseline.", + "bbox": [ + 169, + 667, + 826, + 712 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effect of ENT temperature $\\gamma$ Fig. 3b studies the effect of entropy weighting temperature $\\gamma$ for different evaluation metrics. We observe that $\\gamma$ has a relatively larger impact on few-shot evaluation performance. $\\gamma$ should be neither too high nor too low: a high temperature leads to under-specialization (i.e. less diversity) of heads similar to UNIF ( $\\gamma \\rightarrow \\infty$ ) and a low temperature may otherwise lead to over-specialization (i.e., only a single head is used for each input).", + "bbox": [ + 169, + 726, + 823, + 797 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison of different projection heads Our method linearly increases projection head parameters, thus a natural question is: Is the gain of $(h_{\\psi},\\mu)$ -ensembles due to the increased power (or number of parameters) in projection heads? We answer this question with an empirical study of non-ensembled projection heads. Specifically, we studied non-ensembled $h_{\\psi}$ with (depth, width) searched over $\\{2,3,4\\} \\times \\{512,1024,2048,4096\\}$ and measured the linear evaluation performance with different amounts of labeled data. In Fig. 3c, we plot the $1\\%$ data evaluation result with respect to the number of parameters of the projection head both for ensembled and non-ensembled baselines. See Appx. C.2 for detailed analysis and extra results for other metrics. Our key findings are:", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Effectiveness of ensemble heads for DINO*/MSN* with different ViT models. Our ensemble heads consistently improve all downstream evaluation metrics on ImageNet-1K and achieve a new state-of-the-art for few-shot evaluations. For ViT-S/16, we report linear evaluation results probed from the last layer (left) and from the last 4 layers (right, following DINO). †We evaluated the few-shot settings using DINO's publicly-available pretrained weights in the cases those results were not reported in Caron et al. (2021). ‡MSN ViT-B/16 and ViT-B/8 are both trained for 600 epochs in Assran et al. (2022), whereas our models are trained for only 400, 300 epochs, respectively. For each architecture, we highlight the best DINO baseline and weighted ensemble in blue. For MSN, the corresponding highlights are yellow. The best results for each architecture and metric are bolded.", + "bbox": [ + 169, + 79, + 826, + 212 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/826bd0d1c3be03d85518b0f12bc7dced251218aae7e8ad7d1b39ff9fc91ddb2b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodFew-shotFull-data
125~13 (1%)k-NNLinear
ViT-S/16, 800 epochs
iBOT40.4 ± 0.550.8 ± 0.859.9 ± 0.265.975.2- / 77.9
DINO38.9 ± 0.448.9 ± 0.358.5 ± 0.164.574.576.1 / 77.0
DINO (Repro)39.1 ± 0.349.1 ± 0.558.6 ± 0.264.774.375.8 / 76.9
DINO* (Retuned)44.6 ± 0.253.6 ± 0.361.1 ± 0.266.274.175.8 / 76.9
MSN47.1 ± 0.155.8 ± 0.662.8 ± 0.367.2-- / 76.9
MSN* (Retuned)47.4 ± 0.156.3 ± 0.462.8 ± 0.267.173.375.6 / 76.6
DINO*-PROB (16)45.2 ± 0.454.9 ± 0.462.5 ± 0.267.375.176.5 / 77.6
DINO*-ENT (4)46.3 ± 0.155.5 ± 0.663.0 ± 0.367.574.876.2 / 77.2
DINO*-ENT (16)47.6 ± 0.1 ↑ 3.056.8 ± 0.564.0 ± 0.268.3 ↑ 2.175.376.8 / 77.7 ↑ 0.8
MSN*-ENT (2)48.8 ± 0.257.5 ± 0.564.0 ± 0.267.974.676.0 / 76.9
MSN*-ENT (8)50.1 ± 0.1 ↑ 2.758.9 ± 0.665.1 ± 0.368.7 ↑ 1.675.276.4 / 77.4 ↑ 0.8
ViT-B/16, 400 epochs
iBOT46.1 ± 0.356.2 ± 0.764.7 ± 0.369.777.179.5
DINO†43.0 ± 0.252.7 ± 0.561.8 ± 0.267.476.178.2
DINO* (Retuned)49.3 ± 0.158.1 ± 0.565.0 ± 0.369.176.078.5
MSN‡49.8 ± 0.258.9 ± 0.465.5 ± 0.3---
MSN* (Retuned)50.7 ± 0.159.2 ± 0.465.9 ± 0.269.774.778.1
DINO*-ENT (16)52.8 ± 0.1 ↑ 3.561.5 ± 0.467.6 ± 0.371.1 ↑ 2.077.179.1 ↑ 0.6
MSN*-ENT (8)53.7 ± 0.2 ↑ 3.062.4 ± 0.668.3 ± 0.271.5 ↑ 1.877.278.9 ↑ 0.8
ViT-B/8, 300 epochs
DINO†47.5 ± 0.257.3 ± 0.565.4 ± 0.370.377.480.1
DINO* (Retuned)49.5 ± 0.558.6 ± 0.665.9 ± 0.370.777.180.2
MSN‡55.1 ± 0.164.9 ± 0.771.6 ± 0.3---
MSN* (Retuned)51.9 ± 0.361.1 ± 0.467.7 ± 0.371.775.780.3
DINO*-ENT (16)55.0 ± 0.4 ↑ 5.563.4 ± 0.669.5 ± 0.373.4 ↑ 2.778.681.0 ↑ 0.8
MSN*-ENT (8)55.6 ± 0.2 ↑ 3.764.5 ± 0.570.3 ± 0.273.4 ↑ 1.778.980.8 ↑ 0.5
", + "bbox": [ + 173, + 218, + 823, + 636 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A too powerful non-enssembled $h_{\\psi}$ significantly hurts the label efficiency of learned representations. This result is similar to Chen et al. (2020b), which found that probing from intermediate layers of projection heads (which can be viewed as using a shallower head) could improve semi-supervised learning ( $1\\% - 10\\%$ labeled data) results.", + "- The default head (3/2048, denoted as 'Default') used in recent SSL methods (SimCLRv2, DINO, MSN, etc.) does not perform as well in few-shot evaluations, probably because it is selected by looking at full-data evaluation metrics. In contrast, our baseline (3/1024, denoted as 'Our baseline') significantly improves few-shot evaluation performance.", + "- Our $(h_{\\psi}, \\mu)$ -ensembles outperform all non-enssembled baselines and lead to consistent improvements in all evaluation metrics, despite the increase of parameters." + ], + "bbox": [ + 179, + 652, + 826, + 797 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 IMPROVING SOTA RESULTS WITH ENSEMBLEING", + "text_level": 1, + "bbox": [ + 171, + 814, + 555, + 828 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Next we apply $(h_{\\psi},\\mu)$ -ensembles to DINO* and MSN* and compare with the state-of-the-art results. We experimented with model architectures ViT-S/16, ViT-B/16, ViT-B/8 trained for 800, 400, 300 epochs respectively following Caron et al. (2021). We include both the published results and our returned versions to ensure strong baselines. For clarity, we denote our method as “{baseline}-{ensemble strategy} (# of heads)”, e.g., DINO*-ENT (4). We tuned both baselines and our methods for all architectures. We report the best hyperparameters for all models in Appx. B.2.2.", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2 compares the results of $(h_{\\psi},\\mu)$ -ensembles and baselines. We find that $(h_{\\psi},\\mu)$ -ensembles with ENT consistently improve all evaluation metrics (full-data, few-shot) across both SSL methods (DINO*, MSN*) and all architectures (ViT-S/16, ViT-B/16, ViT-B/8) over their non-ensembld counterparts. The gains in few-shot evaluation are particularly substantial, providing a new state-of-the-art for ImageNet-1K evaluation from ImageNet pretraining.", + "bbox": [ + 169, + 103, + 826, + 176 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.3 MORE EVALUATIONS FOR $(h_{\\psi},\\mu)$ -ENSEMBLES", + "text_level": 1, + "bbox": [ + 171, + 189, + 535, + 205 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/cdbaf3dca865c69b60f19eff8a4d8982561ce030cfab7252d09568ff127d58c0.jpg", + "table_caption": [ + "Table 3: Comparison of transfer performance. ViT-S/16 is used. Our ensemble heads lead to consistent improvements for $\\mathrm{MSN^{*}}$ and comparable results for DINO*." + ], + "table_footnote": [], + "table_body": "
Food101CIFAR10CIFAR100SUN397CarsDTDPetsCaltech-101FlowersAvg.
DINO*78.493.881.066.166.774.692.094.994.482.43
DINO*-ENT (16)79.193.881.466.566.874.992.894.693.982.64
MSN*77.793.179.864.663.372.292.494.792.781.17
MSN*-ENT (8)78.493.981.165.268.073.293.195.492.882.34
", + "bbox": [ + 174, + 247, + 823, + 321 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Transfer learning In Table 3, we compare the transfer learning performance of $(h_{\\psi}, \\mu)$ -ensembles and non-ensembed baselines. We used ViT-S-16 models trained on ImageNet-1K for 800 epochs and evaluated on 9 natural downstream datasets from Chen et al. (2020a) with linear evaluation (details in Appx. B.3). $(h_{\\psi}, \\mu)$ -ensembles lead to consistent improvements in transfer performance for $\\mathrm{MSN}^*$ and comparable results for DINO*.", + "bbox": [ + 169, + 323, + 823, + 393 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Training overhead In Table 4, we benchmark the computational overhead of $(h_{\\psi}, \\mu)$ -ensembles at training time. We used a medium sized model, DINO* with ViT-B/16, trained with the same setting used in all of our experiments. We benchmarked the wall-clock time and peak memory on 128 TPUv3 cores. $(h_{\\psi}, \\mu)$ -ensembling is relatively cheap in terms of training cost because the ensembled parts typically account for a small portion of total computation, especially when the backbone encoder is more computationally expensive (e.g., ViT-B/8).", + "bbox": [ + 169, + 400, + 586, + 526 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Again, we emphasize that there is no evaluation overhead when $(h_{\\psi},\\mu)$ -ensembles are removed.", + "bbox": [ + 171, + 526, + 803, + 541 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/04e9dba9ecc945cde833cf63b0ef7a8ad64ecb50213bf25b413a96769eacacab.jpg", + "table_caption": [ + "Table 4: Training overhead. Wall-clock time and peak memory per core for training with different numbers of ensembles." + ], + "table_footnote": [], + "table_body": "
mWall TimePeak Memory
15.81h5.25G
45.91h5.40G
166.34h5.89G
", + "bbox": [ + 596, + 457, + 821, + 525 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 CONCLUSION & DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 559, + 450, + 574 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We introduced an efficient ensemble method for SSL where multiple projection heads are ensembled to effectively improve representation learning. We showed that with carefully designed ensemble losses that induce diversity over ensemble heads, our method significantly improves recent state-of-the-art SSL methods in various evaluation metrics, particularly for few-shot evaluation. Although ensembling is a well-known technique for improving evaluation performance of a single model, we demonstrated that, for models with throw-away parts such as the projection heads in SSL, ensembling these parts can improve the learning of the non-ensemble representation encoder and also achieve significant gains in downstream evaluation without introducing extra evaluation cost.", + "bbox": [ + 169, + 589, + 826, + 702 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our ensemble method is applicable to many SSL methods beyond the two we explored. For example, one may consider generalization to BYOL (Grill et al., 2020) or SimSiam (Chen & He, 2021) that ensembles projection and/or prediction heads, or MAE (He et al., 2022) that ensembles the decoders (which introduces more training cost though). Our weighted ensemble losses can also be applied as long as the original loss can be reformulated as MLE for some $t$ , $s$ , and $Y$ , e.g., the MSE loss in these methods is MLE under multivariate normal distributions. We hope our results and insights will motivate more future work for extending our method or exploring more ensemble techniques for SSL.", + "bbox": [ + 169, + 708, + 826, + 806 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In future work, we also hope to remove three limitations of our setting. First, considering ensembling strategies that include the representation encoder, $r_{\\omega}$ , may lead to further improvements in the performance of weighted ensemble SSL, at the cost of increased computation requirements during both training and evaluation. Second, considering heterogenous architectures in the ensemble may further improve the learned representations (e.g., mixing Transformers with ResNets), whether the heterogeneity is in $r_{\\omega}, h_{\\psi}$ , or both. Third, considering other possibilities for $f_{ijy}$ may also reveal performance gains and improve our understanding of the critical aspects that lead to good SSL representations, similar to what we learned about the importance of ensemble diversity.", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 104, + 328, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We would like to thank Mathilde Caron and Mahmoud Assran for their extensive help in reproducing DINO and MSN baselines. We would also like to thank Ting Chen and Yann Dubois for their helpful discussions and encouragements.", + "bbox": [ + 171, + 127, + 826, + 171 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REPRODUCIBITLITY STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 184, + 405, + 199 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We include detailed derivations for all our proposed losses in Appx. D. We report experimental details in Appx. B, including the implementation details for reproducing the baselines (Appx. B.1), training and evaluating our methods (Appx. B.2.1), and all hyper-parameters (Appx. B.2.2) used in our experiments for reproducing our results in Table 2.", + "bbox": [ + 171, + 208, + 826, + 266 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 279, + 269, + 292 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "TensorFlow Datasets, a collection of ready-to-use datasets. https://www.tensorflow.org/datasets.", + "Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. arXiv preprint arXiv:1902.09229, 2019.", + "Yuki Markus Asano, Christian Rupprecht, and Andrea Vedaldi. Self-labelling via simultaneous clustering and representation learning. arXiv preprint arXiv:1911.05371, 2019.", + "Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, and Nicolas Ballas. Masked siamese networks for label-efficient learning. arXiv preprint arXiv:2204.07141, 2022.", + "Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. Advances in neural information processing systems, 32, 2019.", + "Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021.", + "Adrien Bardes, Jean Ponce, and Yann LeCun. Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906, 2021.", + "Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, pp. 446-461. Springer, 2014.", + "Yuri Burda, Roger B Grosse, and Ruslan Salakhutdinov. Importance weighted autoencoders. In ICLR, 2016.", + "Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In Proceedings of the European conference on computer vision (ECCV), pp. 132-149, 2018.", + "Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. Advances in Neural Information Processing Systems, 33:9912-9924, 2020.", + "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9650-9660, 2021.", + "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pp. 1597-1607. PMLR, 2020a.", + "Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems, 33:22243-22255, 2020b." + ], + "bbox": [ + 171, + 303, + 826, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 508, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750-15758, 2021.", + "Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3606-3613, 2014.", + "Thomas M Cover and Joy A Thomas. Elements of Information Theory. John Wiley & Sons, 1999.", + "Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transport. Advances in neural information processing systems, 26, 2013.", + "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009.", + "Thomas G Dietterich. Ensemble methods in machine learning. In International workshop on multiple classifier systems, pp. 1-15. Springer, 2000.", + "Onur Dikmen, Zhirong Yang, and Erkki Oja. Learning the information divergence. IEEE transactions on pattern analysis and machine intelligence, 37(7):1442-1454, 2014.", + "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020.", + "Sever S Dragomir. A generalization of $f$ -divergence measure to convex functions defined on linear spaces. Communications in Mathematical Analysis, 15(2):1-14, 2013.", + "Michael Dusenberry, Ghassen Jerfel, Yeming Wen, Yian Ma, Jasper Snoek, Katherine Heller, Balaji Lakshminarayanan, and Dustin Tran. Efficient and scalable bayesian neural nets with rank-1 factors. In International conference on machine learning, pp. 2782-2792. PMLR, 2020.", + "Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In 2004 conference on computer vision and pattern recognition workshop, pp. 178-178. IEEE, 2004.", + "Timur Garipov, Pavel Izmailov, Dmitrii Podoprikhin, Dmitry P Vetrov, and Andrew G Wilson. Loss surfaces, mode connectivity, and fast ensembling of dnns. Advances in neural information processing systems, 31, 2018.", + "Raphael Gontijo-Lopes, Yann Dauphin, and Ekin Dogus Cubuk. No one representation to rule them all: Overlapping features of training methods. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=BK-4qbGgIE3.", + "Jean-Bastien Grill, Florian Strub, Florent Alché, Coretin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 33:21271-21284, 2020.", + "Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pp. 297-304. JMLR Workshop and Conference Proceedings, 2010.", + "Abner Guzman-Rivera, Dhruv Batra, and Pushmeet Kohli. Multiple choice learning: Learning to produce multiple structured outputs. Advances in neural information processing systems, 25, 2012.", + "Lars Kai Hansen and Peter Salamon. Neural network ensembles. IEEE transactions on pattern analysis and machine intelligence, 12(10):993-1001, 1990.", + "Marton Havasi, Rodolphe Jenatton, Stanislav Fort, Jeremiah Zhe Liu, Jasper Snoek, Balaji Lakshminarayanan, Andrew M Dai, and Dustin Tran. Training independent subnetworks for robust prediction. arXiv preprint arXiv:2010.06610, 2020." + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9729-9738, 2020.", + "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022.", + "R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670, 2018.", + "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pp. 646-661. Springer, 2016.", + "Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E Hopcroft, and Kilian Q Weinberger. Snapshot ensembles: Train 1, get m for free. arXiv preprint arXiv:1704.00109, 2017.", + "Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pp. 554-561, 2013.", + "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009.", + "Harold W Kuhn. The hungarian method for the assignment problem. *Naval research logistics quarterly*, 2(1-2):83-97, 1955.", + "Samuli Laine and Timo Aila. Temporal ensembling for semi-supervised learning. arXiv preprint arXiv:1610.02242, 2016.", + "Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. Advances in neural information processing systems, 30, 2017.", + "Kuang-Huei Lee, Anurag Arnab, Sergio Guadarrama, John Canny, and Ian Fischer. Compressive visual representations. Advances in Neural Information Processing Systems, 34:19538-19552, 2021.", + "Stefan Lee, Senthil Purushwalkam, Michael Cogswell, David Crandall, and Dhruv Batra. Why m heads are better than one: Training a diverse ensemble of deep networks. arXiv preprint arXiv:1511.06314, 2015.", + "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018.", + "Warren R Morningstar, Alex Alemi, and Joshua V Dillon. Pacm-bayes: Narrowing the empirical risk gap in the misspecified bayesian regime. In International Conference on Artificial Intelligence and Statistics, pp. 8270-8298. PMLR, 2022.", + "Wai Ho Mow. A tight upper bound on discrete entropy. IEEE Transactions on Information Theory, 44(2):775-778, 1998.", + "Yurii Nesterov. A method for solving the convex programming problem with convergence rate $o(1 / k^2)$ . Proceedings of the USSR Academy of Sciences, 269:543-547, 1983.", + "Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729. IEEE, 2008.", + "Kento Nozawa, Pascal Germain, and Benjamin Guedj. Pac-bayesian contrastive unsupervised representation learning. In Jonas Peters and David Sontag (eds.), Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI), volume 124 of Proceedings of Machine Learning Research, pp. 21-30. PMLR, 03-06 Aug 2020. URL https://proceedings.mlr.press/v124/nozawa20a.html." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018.", + "Yaniv Ovadia, Emily Fertig, Jie Ren, Zachary Nado, David Sculley, Sebastian Nowozin, Joshua Dillon, Balaji Lakshminarayanan, and Jasper Snoek. Can you trust your model's uncertainty? evaluating predictive uncertainty under dataset shift. Advances in neural information processing systems, 32, 2019.", + "Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In 2012 IEEE conference on computer vision and pattern recognition, pp. 3498-3505. IEEE, 2012.", + "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011.", + "Michael P Perrone and Leon N Cooper. When networks disagree: Ensemble methods for hybrid neural networks. Technical report, Brown Univ Providence Ri Inst for Brain and Neural Systems, 1992.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021.", + "Yangjun Ruan, Yann Dubois, and Chris J. Maddison. Optimal representations for covariate shift. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=Rf58LPCwJj0.", + "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958, 2014.", + "Ilya Sutskever, James Martens, George Dahl, and Geoffrey Hinton. On the importance of initialization and momentum in deep learning. In International conference on machine learning, pp. 1139-1147. PMLR, 2013.", + "Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. Advances in neural information processing systems, 30, 2017.", + "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In European conference on computer vision, pp. 776-794. Springer, 2020.", + "Nenad Tomasev, Ioana Bica, Brian McWilliams, Lars Buesing, Razvan Pascanu, Charles Blundell, and Jovana Mitrovic. Pushing the limits of self-supervised resnets: Can we outperform supervised learning without labels onImagenet? arXiv preprint arXiv:2201.05119, 2022.", + "Linh Tran, Bastiaan S Veeling, Kevin Roth, Jakub Swiatkowski, Joshua V Dillon, Jasper Snoek, Stephan Mandt, Tim Salimans, Sebastian Nowozin, and Rodolphe Jenatton. Hydra: Preserving ensemble diversity for model distillation. arXiv preprint arXiv:2001.04694, 2020.", + "Xiao Wang, Haoqi Fan, Yuandong Tian, Daisuke Kihara, and Xinlei Chen. On the importance of asymmetry for siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16570-16579, 2022.", + "Yeming Wen, Dustin Tran, and Jimmy Ba. Batchsemble: an alternative approach to efficient ensemble and lifelong learning. arXiv preprint arXiv:2002.06715, 2020.", + "Mitchell Wortsman, Gabriel Ilharco, Samir Ya Gadre, Rebecca Roelofs, Raphael Gontijo-Lopes, Ari S Morcos, Hongseok Namkoong, Ali Farhadi, Yair Carmon, Simon Kornblith, et al. Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In International Conference on Machine Learning, pp. 23965-23998. PMLR, 2022." + ], + "bbox": [ + 171, + 103, + 828, + 922 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3733-3742, 2018.", + "Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In 2010 IEEE computer society conference on computer vision and pattern recognition, pp. 3485-3492. IEEE, 2010.", + "Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9653-9663, 2022.", + "Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. In International Conference on Machine Learning, pp. 12310-12320. PMLR, 2021.", + "Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. Image BERT pre-training with online tokenizer. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=ydopy-e6Dg." + ], + "bbox": [ + 171, + 102, + 828, + 353 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A PSEUDOCODE", + "text_level": 1, + "bbox": [ + 171, + 102, + 328, + 118 + ], + "page_idx": 14 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1: Pseudocode for computing ensemble loss" + ], + "code_body": "b, n, c:, batch size, number of ensemble heads, codebook size \n# log_ps, log_ct: student, teacher log probabilities with n ensembles \n# strategy: ensemble loss average strategy \n# tau_ent: temperature for entropy weighting \ndef ensemble_loss(log_ps, log_ct, strategy, tau_ent): \n b, n, c = log_ct.shape # axis 1 corresponds to ensemble \n log_ct = stop_grad(log_ct) # stop gradient for teacher \n if strategy == \"Unif\": \n loss = - (exp(log_ct) * log_ps).sum(axis=-1) \n loss = loss.mean(axis=1) # average over ensembles \n elif strategy == \"Prob\": \n log_mean_ct = logsumexp(log_ct, axis=1, b=1/n) # mean teacher \n log_mean_ps = logsumexp(log_ps, axis=1, b=1/n) # mean student \n loss = - (exp(log_mean_ct) * log_mean_ps).sum(axis=-1) \n elif strategy == \"Ent\": \n ent = - (exp(log_ct) * log_ct).sum(axis=-1) # teacher entropy \n weight = softmax(-ent/tau_ent, axis=1) # entropy weights \n loss = - (exp(log_ct) * log_ps).sum(axis=-1) \n loss = (loss * weight).sum(axis=1) # entropy weighted average \nreturn loss.mean() # average over samples", + "guess_lang": "python", + "bbox": [ + 171, + 181, + 656, + 415 + ], + "page_idx": 14 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 2: Pseudocode for ensemble heads with simplified DINO" + ], + "code_body": "# n, c, eta: number of ensemble heads, codebook size, momentum update rate\n# fs, ft: student, teacher encoders\n# hs_ens, ht_ens: student, teacher projection heads with n ensembles, list with length n\n# mus_ens, mut_ens: student, teacher codebooks with n ensembles, list with length n\n# taus, taut: student, teacher temperatures\n# strategy: ensemble loss average strategy\n# tau_ent: temperature for entropy weighting\nfor x in dataloder: # load a batch with b samples\nxs, xt = augs(x), augt(x) # random augmentations\nzs, zt = fs(xs), ft(xt) # representations, (b, l)\n# all following computation can be parallelized with batch computation\nlog_ps, log_ct = [], []\nfor j in range(n):\nhs_j, ht_j = hs_ens[j], ht_ens[j] # j-th projection head\nmus_j, mut_j = mus_ens[j], mut_ens[j] # j-th codebook, (d, c)\nes_j, et_j = hs_j(zs), ht_j(zt) # j-th embedding, (b, d)\nrs_j = (es_j @ mus_j) / (es_j(norm(axis=1, keepdims=True) * mus_j(norm(axis=0, keepdims=True)) / taus # student logits, (b, c)\nrt_j = (et_j @ mut_j) / (et_j(norm(axis=1, keepdims=True) * mut_j(norm(axis=0, keepdims=True)) / taut # teacher logits, (b, c)\nlog_ps_j = logsoftmax(rs_j, axis=-1) # (b, c)\nlog_rt_j = logsoftmax(rt_j, axis=-1) # (b, c)\nlog_rt_j = renorm(log_rt_j) # adjust teacher predictions with centering or sinkhorn,\nomitted here for simplicity\nlog_ps.append(log_ps_j)\nlog_rt.append(log_rt_j)\nlog_ps = stack(log_ps_j, axis=1) # stacked student log probabilities, (b, n, c)\nlog_rt = stack(log_rt_j, axis=1) # stacked teacher log probabilities, (b, n, c)\nloss = ensemble_loss(log_ps, log_rt, strategy=strategy) # compute ensemble loss\nloss.backup() # back-propagate\nsgd_update(fs, hs_ens, mus_ens) # apply gradient decent update for student\nema_update(ft, ht_ens, mut_ens, rate=eta) # apply momentum update for teacher", + "guess_lang": "julia", + "bbox": [ + 171, + 483, + 802, + 898 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B EXPERIMENTAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 102, + 419, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In this section, we provide details for our experiments. In Appx. B.1, we describe how we reproduced and improved the baseline DINO/MSN models. We give the implementation details for SSL training and evaluation in Appx. B.2 and Appx. B.3 respectively. All the hyper-parameters used in our experiments are in Appx. B.2.2.", + "bbox": [ + 169, + 133, + 826, + 191 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1 REPRODUCING & IMPROVING BASELINES", + "text_level": 1, + "bbox": [ + 171, + 205, + 506, + 220 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We carefully reproduced and further improved baseline methods (denoted as DINO* and MSN* respectively) with an extensive study and hyperparameter search (see Appx. B.1). In particular, we systematically study the projection head design (which we found is crucial for few-shot evaluation performance (Appx. C.2)) and different techniques for avoiding collapse used in both methods (Appx. C.1). DINO* performs significantly better than DINO on few-shot evaluation (e.g., $2\\sim 6$ percentage point (p.p.) gains for 1 shot) and maintains the full-data evaluation performance. The main adjustments of DINO* are: (i) A 3-layer projection head with a hidden dimension of 1024 (instead of 2048); (ii) Sinkhorn-Knopp (SK) normalization (instead of centering) is applied to teacher predictions, combined with a smaller teacher temperature $\\tau = 0.025$ and codebook size $c = 1024$ or 4096. MSN* uses the same projection head as DINO* and applies ME-MAX regularization without SK normalization (which is applied in MSN by default). Further details for DINO and MSN can be found below.", + "bbox": [ + 169, + 231, + 826, + 398 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1.1 DINO", + "text_level": 1, + "bbox": [ + 171, + 412, + 276, + 428 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/b2a2e41958ede2350a5e360b42fe559d83ce2c86ef637dd7610ae3b5a081277a.jpg", + "table_caption": [ + "Table 5: Reproducing & Improving DINO. Our reproduce results match the public numbers. We further improve the DINO baseline (DINO*) by studying projection heads and collapse-avoiding techniques. The evaluation results of DINO/DINO* ViT-S/16 trained with 800 epochs are reported." + ], + "table_footnote": [], + "table_body": "
Few-shotFull-data
125~13 (1%)k-NNLinear
DINO (Caron et al., 2021)38.9 ± 0.448.9 ± 0.358.5 ± 0.164.574.576.1 / 77.0
DINO (Ours reproduced)39.1 ± 0.349.1 ± 0.558.6 ± 0.264.774.375.8 / 76.9
DINO* (Retuned)44.6 ± 0.253.6 ± 0.361.1 ± 0.266.274.175.8 / 76.9
", + "bbox": [ + 178, + 506, + 820, + 592 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Reproducing DINO We carefully reproduced DINO with JAX following the official DINO implementation1. In Table 5, we report the evaluation results of DINO using ViT-S trained with 800 epochs following the exact training configuration for ViT-S/16 in the official DINO code. The official results of full-data evaluation and $1\\%$ data evaluation are from Caron et al. (2021), the other few-shot evaluation results are evaluated by Assran et al. (2022) and also validated by us. Note that for consistency of full-data linear evaluation, we report the results with both the [CLS] token representations of the last layer and the concatenation of the [CLS] token representations from the last 4 layers following Caron et al. (2021). For 1-/2-/5-shots evaluation results, we report the mean accuracy and standard deviation across 3 random splits of the data following Assran et al. (2022). As shown in Table 5, our reproduced results are all comparable with the published numbers which validates the implementation of our training and evaluation pipelines.", + "bbox": [ + 169, + 604, + 826, + 760 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Improving DINO We improved the DINO baseline with a systematic empirical study of some important components. We first empirically compared different techniques for avoiding collapse (see Appx. C.1) and find that Sinkhorn-Knopp (SK) normalization is a more effective and also simpler technique for encouraging codebook usage than the centering operation used in DINO. We thus applied SK normalization, which enabled us to use a smaller teacher temperature $\\tau = 0.025$ (instead of $\\tau = 0.07$ ) and a much smaller codebook size $c = 1024$ or 4096 (instead of 65536). These modifications lead to similar performance as DINO with a much smaller codebook (up to 1M parameters, compared to 16M parameters for DINO). Next we empirically studied the effect of projection heads for different evaluation metrics (see Appx. C.2), and found that the design of", + "bbox": [ + 169, + 773, + 826, + 902 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "https://github.com/facebookresearch/dino", + "bbox": [ + 191, + 909, + 555, + 924 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "projection heads is crucial for few-shot evaluation metrics and an too power powerful projection head (e.g., the 3-layer MLP with a hidden dimension of 2048 used in DINO/MSN/etc.) could significantly hurt the few-shot performance. With an empirically study of projection head architectures, we found that a simply reducing the hidden dimension to 1024 could significantly improves the few-shot evaluation performance while maintaining full-data evaluation performance. The improved results of DINO* are shown in Table 5.", + "bbox": [ + 169, + 103, + 826, + 188 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.1.2 MSN", + "text_level": 1, + "bbox": [ + 171, + 202, + 272, + 215 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/ef482eb8da3b2eef6dfd5d4b3c41b41b8cbb4b20575f8fc58f977eaa82c18716.jpg", + "table_caption": [ + "Table 6: Reproducing & improving MSN. We implement $\\mathsf{MSN^{*}}$ by adding ME-MAX regularization and masking to DINO*, which surpasses public MSN results. The evaluation results of MSN/MSN* ViT-S/16 trained with 800 epochs are reported." + ], + "table_footnote": [], + "table_body": "
Few-shotFull-data
125~13 (1%)k-NNLinear
MSN (Assran et al., 2022)47.1 ± 0.155.8 ± 0.662.8 ± 0.367.2-- / 76.9
MSN (Repro)39.1 ± 0.349.2 ± 0.358.4 ± 0.164.372.874.7 / 75.5
MSN* (Retuned)47.4 ± 0.156.3 ± 0.462.8 ± 0.267.173.375.6 / 76.6
", + "bbox": [ + 176, + 292, + 820, + 380 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We carefully implemented MSN by adding its main components, i.e., ME-MAX regularization and masking, to the DINO implementation (denoted as $\\mathrm{MSN}^*$ ), which surpassed public results as shown in Table 6. Note that the implementation of $\\mathrm{MSN}^*$ does not exactly match the public implementation in the public MSN code $^2$ , where the main differences are:", + "bbox": [ + 169, + 383, + 823, + 441 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- MSN applies ME-MAX with Sinkhorn-Knopp normalization by default (as in the released training configuration), which we empirically find does not work very well (see Table 9). $\\mathrm{MSN}^*$ does not apply SK normalization and tunes the regularization strength for ME-MAX.", + "- Some differences in implementation details, e.g., schedules for learning rate/weight decay, batch normalization in projection heads, specific data augmentations, etc. $\\mathrm{MSN}^*$ uses the exact same setup as DINO\\* which follows original DINO implementation." + ], + "bbox": [ + 212, + 452, + 826, + 542 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We initially tried to exactly reproduce the original MSN following the public MSN code, but the results are much below the public ones, as shown in Table 6. Incorporating the two differences above bridges the gap and makes $\\mathrm{MSN}^*$ surpass the public results.", + "bbox": [ + 169, + 553, + 823, + 595 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.2 PRETRAINING DETAILS", + "text_level": 1, + "bbox": [ + 171, + 612, + 380, + 626 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this subsection, we provide the general implementation details in Appx. B.2.1 and specific hyperparameters in Appx. B.2.2 in Appx. B.2.2 for reproducibility.", + "bbox": [ + 169, + 638, + 826, + 667 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.2.1 IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 681, + 423, + 695 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "**Common setup** We experimented with DINO (Caron et al., 2021) and MSN (Assran et al., 2022) models on ImageNet ILSVRC-2012 dataset (Deng et al., 2009). We mainly followed the training setup in Caron et al. (2021). In particular, all models were trained with AdamW optimizer (Loshchilov & Hutter, 2018) and a batch size of 1024. The learning rate was linearly warmuped to 0.002 $(= 0.001 \\times \\text{batch size} / 512)$ and followed a cosine decay schedule. The weight decay followed a cosine schedule from 0.04 to 0.4. The momentum rate for the teacher was increased from 0.996 to 1 with a cosine schedule following BYOL (Grill et al., 2020). A stochastic depth (Huang et al., 2016) of 0.1 was applied without dropout (Srivastava et al., 2014). The student temperature $\\tau$ is set to 0.1. As with DINO, we used the data augmentations of BYOL and multi-crop augmentation of SWAV (Caron et al., 2020). In particular, 2 global views with a $224 \\times 224$ resolution and crop area range [0.25, 1.0] were generated for the teacher and student, and another 10 local views with $96 \\times 96$ resolution and crop area range [0.08, 0.25] were used as extra augmented inputs for the student. For MSN, we used the exact same setup and incorporated its major component: 1) mean entropy maximization (ME-MAX) regularization; 2) masking as an extra augmentation applied to the student global view.", + "bbox": [ + 169, + 705, + 826, + 902 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://github.com/facebookresearch/msn", + "bbox": [ + 189, + 909, + 547, + 924 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Main modifications We retuned the baselines (DINO* and MSN*) as detailed in Appx. B.1, and the main adjustments are as followed. We used a 3-layer projection head with a hidden dimension of 1024. The output embedding (i.e., $(h_{\\psi} \\circ r_{\\omega})(x)$ ) and the codes (i.e., $\\mu$ ) both have a dimension of 256 and are $L_{2}$ normalized. For DINO*, Sinkhorn-Knopp (SK) normalization was applied to teacher predictions. For MSN*, ME-MAX was used without SK normalization and the regularization strength was tuned over $\\{3, 4, 5\\}$ . For all models, we used teacher temperature $\\tau = 0.025$ which was linearly decayed from 0.05 for the first 30 epochs. The codebook size $c$ was selected over $\\{1024, 4096\\}$ for all models, and typically $c = 4096$ was selected for baseline methods and $c = 1024$ was selected for ours. For our $(h_{\\psi}, \\mu)$ -ensembles with ENT, entropy weighting temperature $\\gamma$ is linearly decayed from 0.5 to the specified value.", + "bbox": [ + 169, + 103, + 826, + 243 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B.2.2 HYPER-PARAMETERS", + "text_level": 1, + "bbox": [ + 171, + 257, + 382, + 272 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We report the hyperparameters for training our models for reproducibility:", + "bbox": [ + 169, + 281, + 661, + 297 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/0ce535b282b281cff1393e8223140d3e8be97283eef8434f1fc586abc05a4ea3.jpg", + "table_caption": [ + "Table 7: Hyper-parameters for training the DINO* model." + ], + "table_footnote": [], + "table_body": "
Hyper-parameterViT-S/16ViT-B/16ViT-B/8
DINO*DINO*-PROB (16)DINO*-ENT (4/16)DINO*DINO*-ENT (16)DINO*DINO*-ENT (16)
training epoch800400300
batch size102410241024
learning rate2e-32e-32e-3
warmup epoch103010
min lr1e-51e-54e-5
weight decay0.04 → 0.40.04 → 0.40.04 → 0.4
stochastic depth0.10.10.1
gradient clip3.01.03.0
momentum0.996 → 1.00.996 → 1.00.996 → 1.0
# of multi-crops101010
masking ratio---
proj. layer333
proj. hidden dim102410241024
emb. dim d256256256
rep. dim384768768
codebook size c4096102410244096102440961024
student temp.0.10.10.1
teacher temp.0.0250.0250.025
te. temp. decay epoch303030
centerxxx
SK norm
ME-MAX weight---
ent. weight temp. γ--0.05-0.05-0.06
γ init.--0.5-0.5-0.5
γ decay epoch--30-30-30
", + "bbox": [ + 173, + 342, + 823, + 636 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B.3 EVALUATION PROTOCALS", + "text_level": 1, + "bbox": [ + 171, + 651, + 398, + 666 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Few-shot linear evaluation We followed the few-shot evaluation protocol in Assran et al. (2022). Specifically, we used the 1-/2-/5-shot ImageNet dataset splits3 in Assran et al. (2022) and $1\\%$ ( $\\sim 13$ -shot) ImageNet dataset splits4. For given labelled images, we took a single central crop of size $224 \\times 224$ without additional data augmentations, and extracted the output [CLS] token representations from the frozen pretrained model. Then we trained a linear classifier with multi-class logistic regression on top of the extracted representations. We used the scikit-learn package (Pedregosa et al., 2011) for the logistic regression classifier. For all few-shot evaluations, we searched the $\\mathrm{L}_2$ regularization strength over $\\{1\\mathrm{e}-4, 3\\mathrm{e}-4, 1\\mathrm{e}-3, 3\\mathrm{e}-3, 1\\mathrm{e}-2, 3\\mathrm{e}-2, 1\\mathrm{e}-1, 3\\mathrm{e}-1, 1, 3, 10\\}$ .", + "bbox": [ + 169, + 676, + 826, + 789 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Full-data linear evaluation We followed the linear evaluation protocol in (Caron et al., 2021). Specifically, we trained a linear classifier on top of the representations extracted from the frozen pretrained model. The linear classifier is optimized by SGD with Nesterov momentum (Nesterov, 1983; Sutskever et al., 2013) of 0.9 and a batch size of 4096 for 100 epochs on the whole ImageNet dataset, following a cosine learning rate decay schedule. We did not apply any weight decay.", + "bbox": [ + 169, + 803, + 828, + 875 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "3Publicly available at https://github.com/facebookresearch/msn", + "bbox": [ + 192, + 883, + 669, + 897 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "4Publicly available at https://github.com/google-research/simclr/tree/master/imagenet_subsets", + "bbox": [ + 173, + 898, + 825, + 922 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/f37856b7343d311e5439e11a81d14b5ed3e4b0d25d2ee0e7220e7b47751129d5.jpg", + "table_caption": [ + "Table 8: Hyper-parameters for training the ${\\mathrm{{MSN}}}^{ * }$ model." + ], + "table_footnote": [], + "table_body": "
Hyper-parameterViT-S/16ViT-B/16ViT-B/8
DINO*MSN*-ENT (2/8)MSN*MSN*-ENT (8)MSN*MSN*-ENT (8)
training epoch800400300
batch size102410241024
learning rate2e-32e-32e-3
warmup epoch203020
min lr1e-54e-54e-5
weight decay0.04 → 0.40.04 → 0.40.04 → 0.4
stochastic depth0.10.10.1
gradient clip1.01.01.0
momentum0.996 → 1.00.996 → 1.00.996 → 1.0
# of multi-crops101010
masking ratio0.20.20.15
proj. layer333
proj. hidden dim102410241024
emb. dim d256256256
rep. dim384768768
codebook size c409610244096102440961024
student temp.0.10.10.1
teacher temp.0.0250.0250.025
te. temp. decay epoch303030
centerXXX
SK normXXX
ME-MAX weight4.04.04.0
ent. weight temp. γ-0.01-0.005-0.01
γ init.-0.5-0.5-0.5
γ decay epoch-30-30-30
", + "bbox": [ + 215, + 132, + 784, + 454 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "During training, we only applied basic data augmentations including random resized crops of size $224 \\times 224$ and horizontal flips. During testing, we took a single central crop of the same size. For ViT-S/16, Caron et al. (2021) found that concatenating the [CLS] token representations from the last $l$ (specifically, $l = 4$ ) layers (c.f. Appendix F.2 in Caron et al. (2021)) improved the results by about 1 p.p. We followed the same procedure, but reported linear evaluation results with both $l = 1$ and $l = 4$ in Table 2 for consistency. In our empirical study with ViT-S/16, we used the result with $l = 1$ . For larger models (e.g., ViT-B/16), we followed Caron et al. (2021); Zhou et al. (2022) to use the concatenation of the [CLS] token representation and the average-pooled patch tokens from the last $l = 1$ layer for linear evaluation. For all linear evaluations, we searched the base learning rate over $\\{4.8\\mathrm{e} - 3, 1.6\\mathrm{e} - 2, 4.8\\mathrm{e} - 2, 1.6\\mathrm{e} - 1, 4.8\\mathrm{e} - 1, 1.6\\}$ .", + "bbox": [ + 169, + 474, + 826, + 613 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Full-data $k$ -NN evaluation We followed the $k$ -NN evaluation protocol in Caron et al. (2021); Wu et al. (2018). Specifically, for each image in the given dataset, we took a single central crop of size $224 \\times 224$ without additional data augmentations, and extracted the output [CLS] token representations from the frozen pretrained model. The extracted representations are used for a weighted $k$ -Nearest-Neighbor classifier. In particular, denote the stored training representations and labels as $\\mathcal{D} = \\{(z_i, y_i)\\}_{i=1}^N$ . For a test image with extracted representation $z$ , denote the set of its top $k$ -NN training samples as $\\mathcal{D}_k[z] \\subseteq \\mathcal{D}$ and $|\\mathcal{D}_k[z]| = k$ . The $k$ -NN set $\\mathcal{D}_k[z]$ is used to make the prediction for the test image with a weighted vote, i.e., $\\hat{y} = \\arg \\max_y \\left( \\sum_{(z_j, y_j) \\in \\mathcal{D}_k[z]} \\alpha_j \\mathbf{1}_{y=y_j} \\right)$ , where $\\mathbf{1}_{y=y_j}$ is the one-hot vector corresponding to label $y_j$ and $\\alpha_j$ is the weight induced by the cosine similarity between $z$ and $z_j$ , i.e., $\\alpha_j = \\exp \\left( \\frac{1}{\\tau'} \\frac{z^\\top z_j}{||z|| \\|z_j||} \\right)$ . We set $\\tau' = 0.07$ without tuning as in Caron et al. (2021); Wu et al. (2018). For all $k$ -NN evaluations, we searched $k$ over $\\{5, 10, 20, 50, 100\\}$ and found that $k = 10$ or $k = 20$ was consistently the best.", + "bbox": [ + 169, + 632, + 826, + 821 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Transfer evaluation via linear probing We mainly followed the transfer evaluation protocol in (Grill et al., 2020; Chen et al., 2020a). In particular, we used 9 of their 13 datasets that are available in tensorflow-datasets (tfd), namely Food-101 (Bossard et al., 2014), CIFAR10 (Krizhevsky et al., 2009), CIFAR100 (Krizhevsky et al., 2009), SUN397 scene dataset (Xiao et al., 2010), Stanford Cars (Krause et al., 2013), Describable Textures Dataset (Cimpoi et al., 2014, DTD), Oxford-IIIT Pets (Parkhi et al., 2012), Caltech-101 (Fei-Fei et al., 2004), Oxford 102 Flowers (Nilsback & Zisserman,", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/9a27ed9b5ae01b37badb6bf57d8bd17d7e8524fdbd48e5d2b36164ee73106be3.jpg", + "table_caption": [ + "Table 9: Empirical study of different techniques for avoiding collapse. Using Sinkhorn-Knopp normalization instead of centering for DINO leads to improved performance, and matches the original DINO even with a much smaller codebook. The ME-MAX regularization of MSN is very effective and leads to significant improvement for few-shot evaluations." + ], + "table_footnote": [], + "table_body": "
TechniqueFew-shotFull-data
CenterSinkhornME-MAX125~13 (1%)k-NNLinear
DINO37.8 ± 0.447.4 ± 0.356.9 ± 0.463.072.474.9
39.1 ± 0.349.4 ± 0.358.7 ± 0.264.874.176.0
MSN36.0 ± 0.446.6 ± 0.656.5 ± 0.263.273.275.2
43.9 ± 0.253.0 ± 0.361.1 ± 0.266.074.075.8
", + "bbox": [ + 176, + 176, + 820, + 271 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/60b12267d8f6efabcdcf0af59665cc9d258e1f1c0f4fded2ce22f5096265c227.jpg", + "table_caption": [ + "Table 10: ME-MAX regularization is sensitive to hyper-parameters." + ], + "table_footnote": [], + "table_body": "
WeightFew-shotFull-data
125~13 (1%)KNNLinear
1.037.6 ± 0.248.0 ± 0.457.7 ± 0.264.073.575.6
3.043.9 ± 0.253.0 ± 0.361.1 ± 0.266.074.075.8
5.043.6 ± 0.252.6 ± 0.460.4 ± 0.165.573.975.6
", + "bbox": [ + 246, + 308, + 750, + 393 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "2008). Following their evaluation metrics, we reported mean per-class accuracy for Oxford-IIIT Pets, Caltech-101, and Oxford 102 Flowers datasets and reported top-1 accuracy for other datasets. We transferred the models pretrained on ImageNet (Deng et al., 2009) to these datasets by training a linear classifier on top of frozen representations. In particular, we resized given images to $256 \\times 256$ and took a single central crop of size $224 \\times 224$ without additional data augmentations. We extracted the output [CLS] token representations from the frozen pretrained model. Then we trained a linear classifier with multi-class logistic regression on top of the extracted representations. We used the scikit-learn package (Pedregosa et al., 2011) for the logistic regression classifier. For all transfer evaluations, we searched the $\\mathbf{L}_2$ regularization strength over $\\{1e - 6, 1e - 5, 1e - 4, 3e - 4, 1e - 3, 3e - 3, 1e - 2, 3e - 2, 1e - 1, 3, 1e, 3e, 1e2, 1e3, 1e4, 1e5\\}$ .", + "bbox": [ + 169, + 411, + 826, + 551 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C ADDITIONAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 571, + 400, + 585 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.1 EMPIRICAL STUDY OF TECHNIQUES FOR AVOIDING COLLAPSE", + "text_level": 1, + "bbox": [ + 171, + 604, + 645, + 619 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Most self-supervised learning methods utilize some techniques to avoid collapse of representations with, e.g., contrastive loss (Chen et al., 2020a; He et al., 2020), batch normalization (Grill et al., 2020), asymmetric architecture design with a predictor (Grill et al., 2020; Chen & He, 2021), etc. In DINO and MSN, a learnable codebook is used for the learning objective and different techniques are applied to encourage the effective codebook usage. There are two potential cases of collapse (as discussed in Caron et al. (2021)):", + "bbox": [ + 169, + 630, + 823, + 713 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Dominating codes. This is the case of \"winner-take-all\": only a small portion of codes are being predicted while others are inactive. Typical solutions for avoiding this include applying Sinkhorn-Knopp normalization (Cuturei, 2013) as in SWaV (Caron et al., 2020), centering teacher logits as in DINO (Caron et al., 2021), and applying mean-entropy maximization regularization (ME-MAX) as in MSN (Assran et al., 2022). Note that in MSN, ME-MAX is combined with Sinkhorn-Knopp normalization by default.", + "- Uniform codes. This is the case where all codes are treated equally and the predictions reduce to be uniform over codes. A simple and effective solution is to applying sharpening, i.e., using a lower temperature for computing the teacher prediction." + ], + "bbox": [ + 212, + 726, + 823, + 857 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We systematically study different techniques in a unified setup. In particular, we used DINO with the ViT-S backbone, a 3-layer MLP projection head with hidden dimension 2048, and a codebook of size 4096 and dimension 256. We applied different techniques to DINO and searched the teacher temperature in $\\{0.0125, 0.025, 0.05\\}$ for each. For ME-MAX, we searched regularization weight in", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/7aa3a2618ab4f81946ae6bb6a3ca92ed6ddae7efb732ca1e8841212ceb669cc1.jpg", + "image_caption": [ + "(a) Merged" + ], + "image_footnote": [], + "bbox": [ + 205, + 104, + 493, + 255 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/eacebcf1752170d31391abb882a7366a3ddf70fc8efbfdda8a126124a1dd4a4e.jpg", + "image_caption": [ + "(b) 1-shot" + ], + "image_footnote": [], + "bbox": [ + 503, + 103, + 790, + 255 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/17820ffe48f4d9f4a2a9c91f2bb0d87fd05d0efafd855e0c3791dd310c6138e9.jpg", + "image_caption": [ + "(c) $1\\%$ -data" + ], + "image_footnote": [], + "bbox": [ + 197, + 281, + 488, + 431 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/93cf075d1b3664c9ea0e84057da987b3d78503601684857ad97b19dc53f52ece.jpg", + "image_caption": [ + "(d) Full-data", + "Figure 5: Effect of projection heads for different evaluation metrics. We compare non-ensemble projection heads with different depths and widths as well as our $(h_{\\psi},\\mu)$ -ensembles, and evaluate linear evaluation performance with different amount of labeled data. (a) shows the comparison of normalized metrics for non-ensembles. (b)-(d) compares non-ensemble and $(h_{\\psi},\\mu)$ -ensembles by unnormalized metrics. 'Default' denotes the default projection heads used in many SSL methods. See analysis in Appx. C.2 for details." + ], + "image_footnote": [], + "bbox": [ + 514, + 281, + 799, + 431 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "$\\{1.0, 3.0, 5.0\\}$ . For ME-MAX combined with Sinkhorn, we followed Assran et al. (2022) and used default regularization weight of 1.0. The results are in Table 10. We observed that:", + "bbox": [ + 171, + 577, + 823, + 607 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- DINO's centering operation is not as strong as other techniques, and it favours a larger teacher temperature (e.g., 0.05). It does not work well when the codebook size (4096) is not as large as the one used in the original DINO model (65536). Switching to use Sinkhorn-Knopp normalization leads to much more improved performance, and matches the performance of original DINO (Table 5) with a much smaller codebook.", + "- MSN's ME-MAX regularization is very effective, and leads to significant improvements over others. We also found it is sensitive to the regularization weight and teacher temperature (c.f. Table 10). However, we observed that combining ME-MAX with Sinkhorn does not work well without tuning the regularization weight (which is recommended by Assran et al. (2022))." + ], + "bbox": [ + 212, + 618, + 826, + 762 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.2 EMPIRICAL STUDY OF PROJECTION HEADS", + "text_level": 1, + "bbox": [ + 171, + 779, + 517, + 792 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this subsection, we systematically study the effect of projection heads for different evaluation metrics. In particular, we used DINO* ViT-S/16 as the base model and used different projection heads with (depth, width) searched over $\\{2,3,4\\} \\times \\{512,1024,2048,4096\\}$ . All models are trained with 300 epochs using exact the same set of hyper-parameters. We measured the linear evaluation performance with different amount of labeled data (i.e., full-data, $1\\%$ data, 1-shot).", + "bbox": [ + 169, + 805, + 823, + 875 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In Fig. 5a, we plot different evaluation metrics (normalized respectively by the best of each) versus the number of projection head parameters. In Figs. 5b to 5d, we plot each unnormalized evaluation metric respectively for different heads as well as our $(h_{\\psi}, \\mu)$ -ensembles. Our key findings are:", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/dbf4514b134bb8ce72a6eac34d5f024be139ceac375ac2bd229901674992e7ca.jpg", + "image_caption": [ + "Figure 6: Effect of teacher temperature for non-ensemble DINO*. DINO* with a lower temperature can achieve better few-shot performance, but still under-performs our ensemble method (DINO*-ENT with 16 heads, orange lines). DINO* ViT-S/16 trained for 300 epochs is used and $\\tau = 0.025$ is used for DINO*-ENT." + ], + "image_footnote": [], + "bbox": [ + 341, + 103, + 651, + 270 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The projection head has a relatively larger impact on few-shot evaluation metrics, as reflected by the relative magnitudes of different metrics in Fig. 5a. An too powerful non-ensemble projection head significantly hurts the label efficiency of learned representations, reflected by a much larger drop in few-shot evaluation performance (up to 18 p.p. for 1-shot, 9 p.p. for $1\\%$ data). This result is also partially observed in Chen et al. (2020b), where they found that probing from intermediate layers of projection heads (which can be viewed as using a shallower head) could improve the semi-supervised learning ( $1\\% - 10\\%$ ) results.", + "- The optimal projection head for different metrics can differ a lot. A weaker head improves label efficiency (few-shot performance), while a stronger (but not too strong) head improves linear decodability. As a result, the default projection head (3/2048) that is widely used in SimCLR v2 (Chen et al., 2020b), DINO (Caron et al., 2021), iBOT (Zhou et al., 2022), MSN (Assran et al., 2022), etc., does not perform well in few-shot evaluations (as shown by the green cross denoted as 'Default'), probably because it is selected by full-data evaluation metrics.", + "- There exist some projection heads that performs decently well on all evaluation metrics, e.g., the baseline model (3/1024) used in our experiments (pink star denoted as 'Our base').", + "- Compared to naively tuning projection head architectures, our $(h_{\\psi}, \\mu)$ -ensembles (orange curves in Figs. 5b to 5d) consistently improve all metrics with different amount of labeled data, despite it also increases the number of parameters in projection heads. Our $(h_{\\psi}, \\mu)$ -ensembles outperform all non-ensembles, which also include the counterparts of probing from intermediate layers from the a deeper head (i.e., shallower heads)." + ], + "bbox": [ + 212, + 371, + 823, + 683 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.3 EMPIRICAL STUDY OF $(h_{\\psi},\\mu)$ -ENSEMBLES", + "text_level": 1, + "bbox": [ + 171, + 703, + 514, + 718 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Are the gains of ENT purely from sharper teacher predictions? Our ENT strategy assigns higher weights to the heads that predict with lower entropies, thus effectively uses sharper teacher predictions as the targets. One may be curious about how this effect accounts for the gains of the ENT strategy. We empirically answer this question by studying the non-ensemble baseline that uses a sharper teacher predictions in a data-independent manner (in contrast to ENT, which uses data-dependent entropy weights). Specifically, we compare the non-ensemble DINO* that use different teacher temperature $\\tau \\in \\{0.005, 0.01, 0.025, 0.05\\}$ and also our DINO*-ENT (16) with $\\tau = 0.025$ , as shown in Fig. 6. We find that the teacher temperature has a big impact on evaluation results especially for few-shot evaluation. Compared to our default baseline that uses $\\tau = 0.025$ , a lower temperature (e.g., $\\tau = 0.01$ ) can indeed improve the 1-shot performance (at the cost of worse full-data performance). However, an too low temperature ( $\\tau = 0.005$ ) will hurt the performance. Our DINO*-ENT (16) consistently outperform all the baselines, which implies the importance of selecting sharper teacher predictions in a data-dependent manner.", + "bbox": [ + 169, + 729, + 826, + 910 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/2a91811cb30ca0ac1f030c675ed7dcecd63230129dcea028d7b0fad6c6e8d1d3.jpg", + "table_caption": [ + "Table 11: Full table of Table 1 including all metrics for comparing different ensemble strategies. ENT and PROB significantly improves over the non-ensemble baseline, while UNIF leads to no gains. Ensembling the whole projection head works the best. All models are DINO* ViT-S/16 trained for 300 epochs. The means and standard deviations over 3 initialization seeds for all evaluation results are reported." + ], + "table_footnote": [], + "table_body": "
HowWhereFew-shotFull-data
Proj. HeadCodebook125~13 (1%)k-NNLinear
Base40.6 ± 0.249.8 ± 0.257.9 ± 0.363.4 ± 0.272.3 ± 0.174.4 ± 0.1
UNIF40.4 ± 0.449.5 ± 0.457.6 ± 0.363.3 ± 0.372.2 ± 0.274.5 ± 0.2
PROB39.7 ± 0.549.0 ± 0.557.4 ± 0.463.0 ± 0.472.8 ± 0.274.8 ± 0.1
PROB41.9 ± 0.351.5 ± 0.559.6 ± 0.465.1 ± 0.373.7 ± 0.375.4 ± 0.1
ENT40.6 ± 0.449.5 ± 0.658.0 ± 0.463.5 ± 0.472.1 ± 0.374.5 ± 0.3
ENT43.0 ± 0.652.2 ± 0.859.7 ± 0.764.8 ± 0.572.9 ± 0.675.1 ± 0.4
ENT44.0 ± 0.253.0 ± 0.560.5 ± 0.365.5 ± 0.173.2 ± 0.175.3 ± 0.1
ENT-ST40.0 ± 0.539.2 ± 0.657.3 ± 0.562.7 ± 0.571.9 ± 0.474.0 ± 0.4
", + "bbox": [ + 174, + 189, + 823, + 345 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/10140e926b5dcade496087f3088fda8e813b6a2202ec94f8156c76ad55529556.jpg", + "table_caption": [ + "Table 12: Comparison of different varants of PROB. The PROB strategy used in our experiments performs the best. ' -' in the table denotes training divergence for PROB-MAX. The experimental setup is the same as Table 11." + ], + "table_footnote": [], + "table_body": "
HowWhereFew-shotFull-data
Weight byTemp. γ125~13 (1%)k-NNLinear
Base40.6 ± 0.249.8 ± 0.257.9 ± 0.363.4 ± 0.272.3 ± 0.174.4 ± 0.1
PROBstudent141.9 ± 0.351.5 ± 0.559.6 ± 0.465.1 ± 0.373.7 ± 0.375.4 ± 0.1
PROB-TEteacher141.5 ± 0.250.4 ± 0.358.3 ± 0.363.7 ± 0.172.3 ± 0.274.6 ± 0.1
PROB-MAXstudent0------
PROB-MAX-TEteacher041.4 ± 0.250.3 ± 0.358.1 ± 0.363.6 ± 0.272.3 ± 0.274.5 ± 0.2
", + "bbox": [ + 174, + 412, + 821, + 512 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Comparison of different ensemble strategies and variants We present the full table of Table 1 that includes all the metrics in Table 11. The same observation holds for all metrics.", + "bbox": [ + 169, + 532, + 823, + 560 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "For all previous studies, we considered a specific instantiation of PROB strategy, i.e., weight by student predicted probabilities $f_{ijy} = \\log s(y|\\theta_j,x)$ and $\\gamma = 1$ , which has a nice interpretation of model average (see Sec. 3.3). We also studied different variants of the PROB strategy (see Appx. D.1),", + "bbox": [ + 169, + 566, + 826, + 609 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "PROB-TE: weight by teacher $f_{ijy} = \\log t_i(y|x)$ and $\\gamma = 1$", + "PROB-MAX: weight by student $f_{ijy} = \\log s_j(y|x)$ and $\\gamma \\rightarrow 0$", + "PROB-MAX-TE: weight by teacher $f_{ijy} = \\log t_i(y|x)$ and $\\gamma \\to 0$" + ], + "bbox": [ + 212, + 622, + 663, + 681 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Table 12 compares the downstream performance for all the variants. We find that the our PROB (used in our empirical studies) performs better than other variants. Interestingly, weighting by the teacher (PROB-TE) performs worse than PROB. We conjecture that this is because the important weights turn out to give a weighted average of teacher predictions as the surrogate target that is shared across all students (like PROB) but does not give effective preferential treatment across students which are directly optimized (unlike PROB-TE). Furthermore, PROB-MAX which sharpens the importance weights leads to training divergence. This is probably because the student predictions have higher variance based on which sharp weights lead to unstable training. In contrast, PROB-MAX-TE which uses the (lower-variance) teacher gives reasonable results and comparable to PROB-TE.", + "bbox": [ + 169, + 691, + 826, + 819 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Number of ensembles for $\\mathbf{MSN}^*$ In Fig. 7a, we study the effect of increasing the number of $(h_{\\psi},\\mu)$ -ensembles for $\\mathbf{MSN}^*$ -ENT with ViT-S/16 trained for 800 epochs. The scaling trend is similar to DINO\\*-ENT (Fig. 3a) and the gains start to diminish when the number of heads increases above 8.", + "bbox": [ + 169, + 835, + 826, + 878 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Effect of ENT temperature $\\gamma$ for $\\mathbf{MSN}^*$ Fig. 7b studies the effect of entropy weighting temperature $\\gamma$ for $\\mathbf{MSN}^*$ -ENT. We observed that $\\mathbf{MSN}^*$ is more robust to small temperatures, and the", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/490cb30743786891a399ad47d2e7d5d357f8e031ff6e40f82d53e6a5347c2e08.jpg", + "image_caption": [ + "(a) Scaling of ensembles" + ], + "image_footnote": [], + "bbox": [ + 207, + 104, + 488, + 257 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/0a9b966552940abc5d3f63d5e0d2d091adf2b746587f3106e8aadebcd57a9fd5.jpg", + "image_caption": [ + "(b) Effect of ENT temperature $\\gamma$", + "Figure 7: Empirical study for $\\mathbf{MSN}^*$ -ENT. (a) The gains by increasing the number of $(h_{\\psi},\\mu)$ ensembles start to diminish when it is over 8 heads. (b) $\\mathbf{MSN}^*$ prefers smaller temperature for entropy weighting than DINO*." + ], + "image_footnote": [], + "bbox": [ + 501, + 102, + 782, + 258 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "best $\\gamma = 0.01$ is smaller than that of DINO\\* $(\\gamma = 0.05)$ . When the temperature is too high, the performance drops as a result of under-specialization (i.e., less diversity) as with DINO\\*.", + "bbox": [ + 169, + 414, + 823, + 445 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.4 ANALYZING $(h_{\\psi},\\mu)$ -ENSEMBLE DIVERSITY", + "text_level": 1, + "bbox": [ + 171, + 518, + 517, + 534 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Visualizing $(h_{\\psi},\\mu)$ -ensemble similarity We analyze the diversity between different heads by visualizing the similarity matrix between their codes. Directly measuring the similarity between codes in two heads could not work, because 1) they may live in different subspaces because of the ensembled projection heads; 2) they may not align in the natural order but in a permuted order.", + "bbox": [ + 169, + 566, + 823, + 625 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Therefore, we seek to align codes between different heads by how they are effectively used to 'cluster' the data. In particular, we use a set of randomly sampled inputs $\\{x^i\\}_{i\\in [b]}$ of size $b = 51200$ to obtain an empirical code assignment matrix $A^{j}\\in \\mathbb{R}^{b\\times c}$ for each $(h_{\\psi},\\mu)$ -ensemble $j\\in [m]$ , where the $i$ -th row of $A^j$ corresponds to the teacher predictions $t_j(Y|x^i)$ . For the $k$ -th code in the head $j$ , we extract the $k$ -th column from $A^j$ (i.e., its empirical assignment) as its embedding. For two codes, we measure their similarity by the cosine similarity between their embeddings. For a pair of heads $j$ and $j'$ , we align their codes using the Hungarian algorithm (Kuhn, 1955) to maximize the sum of cosine similarity. After that, we plot the similarity matrix which is aligned and reordered by the similarity value on the diagonal (in an descending order). Note that it is not necessary to do the alignment procedure for the PROB strategy since it is naturally aligned because of the direct distribution averaging over $(h_{\\psi},\\mu)$ -ensembles, but we did for fair comparison with other strategies.", + "bbox": [ + 169, + 630, + 826, + 792 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We applied the same procedure for different ensemble weighting strategies using DINO* with 4 $(h_{\\psi},\\mu)$ -ensembles. We randomly picked a pair of heads and visualize the similarity matrix before (top row) and after (bottom row) the alignment-reordering setup in Fig. 8. We found that before the alignment procedure, the similarity matrix of the PROB strategy already mostly aligns because it explicitly introduces code correspondence between different heads. Furthermore, by analyzing the similarity decay pattern on the diagonal, it is clear that ENT learns the most diverse $(h_{\\psi},\\mu)$ -ensembles while UNIF learns the least ones, which may explain the difference of their empirical performance. For completeness, we also include the visualization of aligned similarity matrices for all pairs of heads in Figs. 9 to 11, the observations are the same.", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/93d84602fa00e114dd3c0daeb60a27b653229fa1fbb04e8c93b8bae3163f38e4.jpg", + "image_caption": [ + "(a) UNIF" + ], + "image_footnote": [], + "bbox": [ + 217, + 117, + 379, + 371 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/69e3ff8a8e40a87c085722110883fb610a78890e858601ad95efdd9cf61d3d2a.jpg", + "image_caption": [ + "(b) PROB" + ], + "image_footnote": [], + "bbox": [ + 401, + 117, + 563, + 371 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/7634234890279c11db0dfd9b0cb24cd3d4b24177529df99d2e1436bfc66bea1b.jpg", + "image_caption": [ + "(c)ENT" + ], + "image_footnote": [], + "bbox": [ + 588, + 118, + 781, + 373 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/e05471b9f5e868d1dffbd8e5bcea52e9435ba7d5d8be5b8126734318036c4abf.jpg", + "image_caption": [ + "Figure 8: Visualization of $(h_{\\psi},\\mu)$ -ensemble diversity. ENT learns the most diverse $(h_{\\psi},\\mu)$ -ensembles while UNIF learns the least ones. We visualize the code similarity matrix between a pair of randomly selected projection heads. Top row shows the original similarity matrix (i.e., in natural order) and the bottom row shows the aligned similarity matrix which aligns codes by empirical assignment probabilities. DINO* ViT-S/16 with 4 heads is used. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 232, + 506, + 401, + 681 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/4fcb7930e54c036bf09639468394c07434ebea281fd08eeee5a751eb6cbb0c30.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 408, + 507, + 576, + 680 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/be1db7c343bf205e685344364e792bd47d8b3209eaab5706fca18ff963715821.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 507, + 790, + 680 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/b26d4702e63657ab529e61b968bf21c875cc03721b6843d0481d2a8fd9ae83f9.jpg", + "image_caption": [ + "Figure 9: Visualization of $(h_{\\psi},\\mu)$ -ensemble diversity between all pairs of heads for DINO*UNIF. The UNIF strategy does not learn diverse $(h_{\\psi},\\mu)$ -ensembles. DINO* with ViT-S/16 and 4 heads is used. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 233, + 688, + 398, + 816 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/d0eeef05fd5b7cbc2af0e80f19c6c5bc29c4d802d0008e6b11d345448f6f5d6a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 688, + 576, + 816 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/800c2c5d5ceee656fe26bbec9d5a4eec6a5751cf5cc5f19a9e7a56734071c9b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 688, + 753, + 816 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/272be9539ff5f046a8c9e23b472a43e11da8447364537fc81791658061a50b93.jpg", + "image_caption": [ + "Head 2-Head 3" + ], + "image_footnote": [], + "bbox": [ + 233, + 142, + 401, + 294 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/be6fa8e76ae37017b8a194d4f64f1ae0bc32fecf97ff2739cc8970c7f355fe34.jpg", + "image_caption": [ + "Head 2 - Head 4" + ], + "image_footnote": [], + "bbox": [ + 410, + 142, + 576, + 294 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/01b47cb375d674dc6f01d1367efc025e231692d883ac3813544199c4fb5ff68f.jpg", + "image_caption": [ + "Head 3 - Head 4" + ], + "image_footnote": [], + "bbox": [ + 588, + 142, + 790, + 295 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/ae5e040b53d11107d21ac0a879235838cf924b9ab2b0f5caa0a1a792481f6568.jpg", + "image_caption": [ + "Figure 10: Visualization of $(h_{\\psi},\\mu)$ -ensemble diversity between all pairs of heads for DINO\\*PROB. The PROB strategy learns more diverse $(h_{\\psi},\\mu)$ -ensembles than UNIF. DINO\\* with ViT-S/16 and 4 heads is used. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 233, + 324, + 400, + 452 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/1375ae686351af0267608f622f6a018b95da8a0139150e0e7f5197326005e972.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 324, + 576, + 452 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/3ebf637ad44c2d4a824c4cab904b9560dcb9dba73522fc73344666c0257ba54f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 324, + 754, + 452 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/151f38dca80bdbc22fd11b7f1783b360b52b7abe6e22abdf1102b673eb0e0f96.jpg", + "image_caption": [ + "Head 2 - Head 3" + ], + "image_footnote": [], + "bbox": [ + 232, + 541, + 400, + 694 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/08efe47fa419970fcf1c422d4cc1d375935b1f0185e4068c9e706deec3c3fc1f.jpg", + "image_caption": [ + "Head 2 - Head 4" + ], + "image_footnote": [], + "bbox": [ + 410, + 542, + 576, + 694 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/b36c8b5148138c8101d06cc95e49969bf5472dfab11ef4555ab5f67274ad87ce.jpg", + "image_caption": [ + "Head 3 - Head 4" + ], + "image_footnote": [], + "bbox": [ + 588, + 542, + 790, + 695 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/99012c1cf59f4ca983add61d184cc261235a1f27e6126857cea6e0510e9fe7e8.jpg", + "image_caption": [ + "Figure 11: Visualization of $(h_{\\psi},\\mu)$ -ensemble diversity between all pairs of heads for DINO*-ENT. The ENT strategy learns the most diverse $(h_{\\psi},\\mu)$ -ensembles. DINO* with ViT-S/16 and 4 heads is used. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 233, + 724, + 400, + 853 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/bec4ce2bf9a81d5b0de88b89f5799727a134240fd9cf66377c3e800caf989a13.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 724, + 576, + 853 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/4151f52eebf1277b2204f9bf6d0a923d161832e267af427000acb645d8f04a57.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 724, + 754, + 853 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "D ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 102, + 297, + 118 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "D.1 DERIVATIONS", + "text_level": 1, + "bbox": [ + 171, + 135, + 316, + 148 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "In this subsection, we provide derivations for some non-trivial losses that we explore within our framework.", + "bbox": [ + 169, + 161, + 823, + 189 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Recall that our weighted cross-entropy loss is of the form,", + "bbox": [ + 171, + 196, + 555, + 212 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {n} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} H ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] (15) \\\\ = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) (16) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 219, + 823, + 297 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i j y} = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\gamma} f _ {i j y} (\\operatorname {s t o p g r a d} (\\theta), x): i, j \\in [ m ] \\right\\}\\right). \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 300, + 823, + 327 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Furthermore, observe that,", + "bbox": [ + 171, + 340, + 351, + 354 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} \\sum_ {i, j \\in [ m ]} \\mathsf {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] = \\sum_ {i, j \\in [ m ]} \\int_ {\\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\nabla_ {\\theta} \\log s (y | \\theta_ {j}, x) \\mathrm {d} y. \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 362, + 823, + 400 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "This indicates that the proposed weighted ensemble SSL loss is simply a reweighted log-likelihood loss. We use this fact in our derivation of probability weighting (PROB) loss.", + "bbox": [ + 169, + 407, + 823, + 436 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Uniform weighting (UNIF) Our UNIF strategy in Eq. (6) uses $f_{ijy} = \\log \\delta (i - j)$ which gives $w_{ijy} = \\frac{1}{m}\\delta (i - j)$ (for any choice of $\\gamma$ ), thus the loss,", + "bbox": [ + 169, + 450, + 823, + 483 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} \\frac {1}{m} \\delta (i - j) t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) (19) \\\\ = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right] (20) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 489, + 823, + 569 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "This loss assigns equal weights to $m$ pairs of pairwised student/teacher.", + "bbox": [ + 169, + 575, + 642, + 590 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "An straightforward generalization is to assign equal weights to all possible pairs $(m^2)$ of student/teacher with $f_{ijy} = 0$ and $w_{ijy} = \\frac{1}{m^2}$ , which gives the UNIF-ALL loss,", + "bbox": [ + 169, + 597, + 826, + 628 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\text {U N I F - A L L}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m ^ {2}} \\sum_ {i, j \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right], \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 633, + 823, + 672 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Probability weighting (PROB) Recall our PROB loss in Eq. (7) has the form,", + "bbox": [ + 171, + 686, + 694, + 703 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\mathrm {H} ^ {\\times} \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right]. \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 710, + 823, + 758 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "We derive its equivalence with our general loss with $f_{ijy} = \\log s(y|\\theta_j,x)$ and $\\gamma = 1$ in terms of the gradients,", + "bbox": [ + 169, + 766, + 823, + 795 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\nabla_ {\\theta} \\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\log \\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j}, x) \\mathrm {d} y (23) \\\\ = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\nabla_ {\\theta} \\log \\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j}, x) d y (24) \\\\ = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {\\frac {1}{m} \\sum_ {j \\in [ m ]} \\nabla_ {\\theta} s (y | \\theta_ {j} , x)}{\\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j} , x)} d y (25) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 248, + 801, + 823, + 926 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {\\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j} , x) \\nabla_ {\\theta} \\log s (y | \\theta_ {j} , x)}{\\frac {1}{m} \\sum_ {j ^ {\\prime} \\in [ m ]} s (y | \\theta_ {j ^ {\\prime}} , x)} d y (26) \\\\ = \\frac {1}{m} \\sum_ {i, j \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {s (y | \\theta_ {j} , x)}{\\sum_ {j ^ {\\prime} \\in [ m ]} s (y | \\theta_ {j ^ {\\prime}} , x)} \\nabla_ {\\theta} \\log s (y | \\theta_ {j}, x) d y (27) \\\\ = \\nabla_ {\\theta} \\frac {1}{m} \\sum_ {i, j \\in [ m ]} \\mathsf {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] (28) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 99, + 823, + 224 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "where $w_{ijy} = \\frac{s(y|\\theta_j,x)}{\\sum_{j'\\in[m]}s(y|\\theta_{j'}x)}$ (or equivalently, $f_{ijy} = \\log s(y|\\theta_j,x)$ and $\\gamma = 1$ ). The last equality is because $w_{ijy}$ is stopped gradient with respect to $\\theta$ . This is the same analysis as done in Burda et al. (2016). The above formation establishes the equivalence of gradients between two losses, which implies the same behavior (e.g., optimum) using gradient-based optimization, as the common practice of deep learning.", + "bbox": [ + 169, + 237, + 826, + 316 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We also generalize this loss to some variants which we explore in Table 12. A \"dual\" variant is to use teacher predictions $f_{ijy} = \\log t_i(y|x)$ instead of student ones; this implies $w_{ijy} = \\frac{t_i(y|x)}{\\sum_{i' \\in [m]} t_{i'}(y|x)}$ and the PROB-TE loss,", + "bbox": [ + 169, + 321, + 823, + 371 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B - T E}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} \\frac {t _ {i} (y | x)}{\\sum_ {i ^ {\\prime} \\in [ m ]} t _ {i ^ {\\prime}} (y | x)} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x). \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 378, + 823, + 419 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Note that this simply reduces to use a weighted teacher predictions $\\frac{t_i(y|x)}{\\sum_{i' \\in [m]} t_{i'}(y|x)} t_i(y|x)$ as the surrogate target that is shared across all students.", + "bbox": [ + 169, + 429, + 823, + 465 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Another generalization is to use \"hard\" weighting, i.e., $\\gamma \\rightarrow 0$ , which gives the PROB-MAX loss that only assigns weight to the most confident student,", + "bbox": [ + 169, + 470, + 823, + 501 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B - M A X}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 258, + 508, + 823, + 547 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nw _ {i j y} = \\delta \\left(i - i ^ {*}\\right) \\delta \\left(j - j ^ {*}\\right), \\quad \\left(i ^ {*}, j ^ {*}\\right) = \\arg \\max _ {i j} f _ {i j y}, \\forall y \\in \\mathcal {Y}. \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 549, + 823, + 573 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "This loss reduces to a generalization of multiple choice learning (Guzman-Rivera et al., 2012) used in multi-headed networks (Lee et al., 2015) in our ensemble SSL setup. Similarly we can also derive the dual variant of it that uses the teacher predictions, which is omitted here for brevity.", + "bbox": [ + 169, + 582, + 823, + 625 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Entropy weighting (ENT) The derivation of ENT loss in Eq. (9) is similar to the UNIF loss but applies an entropy weights. Recall that we use $f_{ijy} = -\\mathsf{H}[t_i(Y|x)] + \\log \\delta (i - j)$ , which gives $w_{ijy} = \\mathrm{softmax}_i(\\{-\\frac{1}{\\gamma}\\mathsf{H}[t_{i'}(Y|x)]:i'\\in [m]\\})$ and,", + "bbox": [ + 169, + 641, + 823, + 688 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {E N T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} \\left[ t _ {i ^ {\\prime}} (Y | x) : i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]\\right). \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 696, + 823, + 736 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "One can also generalize it to its dual variant which uses the student entopies, i.e., $f_{ijy} = -\\mathsf{H}[s(Y|\\theta_j,x)] + \\log \\delta (i - j)$ , which gives the ENT-ST loss,", + "bbox": [ + 169, + 744, + 823, + 776 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {E N T - S T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} [ s (Y | \\theta_ {i ^ {\\prime}}, x) ]: i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]. \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 782, + 823, + 835 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "D.2 RELATING SOME LOSSES", + "text_level": 1, + "bbox": [ + 171, + 854, + 390, + 869 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Here, we relate some losses derived above. Specifically, we relate the uniform weighting (UNIF, UNIF-ALL) and probability weighting (PROB) in Appx. D.2.1, and relate entropy weighting (ENT) and variance weighting in Appx. D.2.2.", + "bbox": [ + 169, + 881, + 826, + 925 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "D.2.1 UNIFORM & PROBABILITY WEIGHTING", + "text_level": 1, + "bbox": [ + 171, + 103, + 508, + 118 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We first establish the relation between UNIF and PROB using the joint convexity of unnormalized KL divergence and the fact that our weighted cross-entropy loss is a weighted unnormalized KL divergence up to some constant in $\\theta$ . In particular, the joint convexity of unnormalized KL divergence can be shown by combining the facts that Csiszár $f$ -divergences are jointly convex (Proposition 1 in Dragomir (2013)) and unnormalized KL divergence corresponds to the convex generator, $f(u) = u\\log u - u + 1$ , as required by the proposition.", + "bbox": [ + 169, + 127, + 826, + 212 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "First, our weighted cross-entropy loss is unnormalized KL divergence up to some constant in $\\theta$ :", + "bbox": [ + 169, + 218, + 799, + 233 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {K} \\left[ t _ {i} (Y | x), s \\left(Y \\mid \\theta_ {i}, x\\right) \\right] + \\text {c o n s t a n t} \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 253, + 239, + 823, + 277 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} K \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right] + \\text {c o n s t a n t} \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 253, + 281, + 823, + 330 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Therefore, the joint convexity of (unnormized) KL divergence directly implies an ordering of the loss up to some constant in $\\theta$ , i.e.,", + "bbox": [ + 169, + 335, + 823, + 364 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} \\leq \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 446, + 371, + 823, + 388 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Furthermore, we can also relate PROB and UNIF-ALL using the fact that the (unnormized) cross-entropy $\\mathsf{H}^{\\times}[p(X), q(X)]$ is linear in the first argument $p$ but convex in the second argument $q$ , which implies,", + "bbox": [ + 169, + 396, + 826, + 438 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {n} ^ {\\text {P R O B}} \\leq \\mathcal {L} _ {n} ^ {\\text {U N I F - A L L}} \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 433, + 445, + 823, + 464 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "D.2.2 ENTROPY & VARIANCE WEIGHTING", + "text_level": 1, + "bbox": [ + 171, + 477, + 485, + 491 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Suppose $p(X)$ is a discrete distribution (normalized) on $\\mathcal{X} = [c]$ . It can be shown that,", + "bbox": [ + 169, + 501, + 740, + 518 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\mathsf {H} [ p (X) ] \\leq \\frac {1}{2} \\log \\left(\\operatorname {V a r} _ {p} [ X ] + \\frac {1}{1 2}\\right) + \\frac {1}{2} \\log (2 \\pi e) \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 523, + 823, + 542 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "where $\\operatorname{Var}_p[X] = \\sum_{x \\in [c]} p(x)(x - \\mu)^2$ and $\\mu = \\mathsf{E}_p[X] = \\sum_{x \\in [c]} p(x)x$ (Theorem 9.7.1, Cover & Thomas (1999)). Note, a tighter bound (Mow, 1998) also exists but it places stronger restrictions on $p$ . This relationship suggests that choosing weights proportional to $\\exp(-\\mathsf{H}[t_i(Y|x)])$ (as in ENT) is potentially related to choosing weights proportional to weighting by variance $(\\operatorname{Var}_{t_i(Y|x)}[Y] + \\epsilon)^{-1/2}$ where $(\\epsilon = \\frac{1}{12})$ .", + "bbox": [ + 169, + 549, + 826, + 630 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 28 + } +] \ No newline at end of file diff --git a/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_model.json b/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_model.json new file mode 100644 index 0000000000000000000000000000000000000000..610120ef243c1a14eeb0d407aa00da6d4b2fa8bc --- /dev/null +++ b/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_model.json @@ -0,0 +1,4878 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.086, + 0.819, + 0.109 + ], + "angle": 0, + "content": "WEIGHTED ENSEMBLE SELF-SUPERVISED LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.121, + 0.8, + 0.185 + ], + "angle": 0, + "content": "Yangjun Ruan*† Saurabh Singh Warren Morningstar Alexander A. Alemi \nSergey Ioffe Ian Fischer† Joshua V. Dillon† \nGoogle Research" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.205, + 0.548, + 0.22 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.226, + 0.77, + 0.451 + ], + "angle": 0, + "content": "Ensembling has proven to be a powerful technique for boosting model performance, uncertainty estimation, and robustness in supervised learning. Advances in self-supervised learning (SSL) enable leveraging large unlabeled corpora for state-of-the-art few-shot and supervised learning performance. In this paper, we explore how ensemble methods can improve recent SSL techniques by developing a framework that permits data-dependent weighted cross-entropy losses. We refrain from ensembling the representation backbone; this choice yields an efficient ensemble method that incurs a small training cost and requires no architectural changes or computational overhead to downstream evaluation. The effectiveness of our method is demonstrated with two state-of-the-art SSL methods, DINO (Caron et al., 2021) and MSN (Assran et al., 2022). Our method outperforms both in multiple evaluation metrics on ImageNet-1K, particularly in the few-shot setting. We explore several weighting schemes and find that those which increase the diversity of ensemble heads lead to better downstream evaluation results. Thorough experiments yield improved prior art baselines which our method still surpasses; e.g., our overall improvement with MSN ViT-B/16 is 3.9 p.p. for 1-shot learning." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.464, + 0.338, + 0.479 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.487, + 0.487, + 0.626 + ], + "angle": 0, + "content": "The promise of self-supervised learning (SSL) is to extract information from unlabeled data and leverage this information in downstream tasks (He et al., 2020; Caron et al., 2021); e.g., semi-supervised learning (Chen et al., 2020a,b), robust learning (Radford et al., 2021; Ruan et al., 2022; Lee et al., 2021), few-shot learning (Assran et al., 2022), and supervised learning (Tomasev et al., 2022). These successes have encouraged increasingly advanced SSL techniques" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.825, + 0.655 + ], + "angle": 0, + "content": "(e.g., Grill et al., 2020; Zbontar et al., 2021; He et al., 2022). Perhaps surprisingly however, a simple and otherwise common idea has received limited consideration: ensembling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.66, + 0.825, + 0.732 + ], + "angle": 0, + "content": "Ensembling combines predictions from multiple trained models and has proven effective at improving model accuracy (Hansen & Salamon, 1990; Perrone & Cooper, 1992) and capturing predictive uncertainty in supervised learning (Lakshminarayanan et al., 2017; Ovadia et al., 2019). Ensembling in the SSL regime is nuanced, however; since the goal is to learn useful representations from unlabeled data, it is less obvious where and how to ensemble. We explore these questions in this work." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.737, + 0.827, + 0.891 + ], + "angle": 0, + "content": "We develop an efficient ensemble method tailored for SSL that replicates the non-representation parts (e.g., projection heads) of the SSL model. In contrast with traditional \"post-training\" ensembling, our ensembles are only used during training to facilitate the learning of a single representation encoder, which yields no extra cost in downstream evaluation. We further present a family of weighted cross-entropy losses to effectively train the ensembles. The key component of our losses is the introduction of data-dependant importance weights for ensemble members. We empirically compare different choices from our framework and find that the choice of weighting schemes critically impacts ensemble diversity, and that greater ensemble diversity correlates with improved downstream performance. Our method is potentially applicable to many SSL methods; we focus on DINO (Caron et al., 2021) and MSN (Assran et al., 2022) to demonstrate its effectiveness. Fig. 1 shows DINO improvements from using our ensembling and weighted cross-entropy loss." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.714, + 0.911 + ], + "angle": 0, + "content": "*University of Toronto & Vector Institute. Work done as a student researcher at Google." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.911, + 0.75, + 0.924 + ], + "angle": 0, + "content": "\\(^{\\dagger}\\)Correspondence to yjruan@cs.toronto.edu, {iansf, jvdillon} @ google.com." + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.897, + 0.75, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.452, + 0.119 + ], + "angle": 0, + "content": "In summary, our core contributions are to:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.125, + 0.822, + 0.141 + ], + "angle": 0, + "content": "- Develop a downstream-efficient ensemble method suitable for many SSL techniques (Sec. 3.1)." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.142, + 0.76, + 0.157 + ], + "angle": 0, + "content": "- Characterize an ensemble loss family of weighted cross-entropy objectives (Sec. 3.2)." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.158, + 0.826, + 0.173 + ], + "angle": 0, + "content": "- Conduct extensive ablation studies that improve the prior art baselines by up to 6.3 p.p. (Sec. 5.1)." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.174, + 0.82, + 0.19 + ], + "angle": 0, + "content": "- Further improve those baselines with ensembling (e.g., up to 5.5 p.p. gain for 1-shot) (Table 2)." + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.125, + 0.826, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.209, + 0.33, + 0.224 + ], + "angle": 0, + "content": "2 BACKGROUND" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.239, + 0.827, + 0.281 + ], + "angle": 0, + "content": "In this section, we frame SSL methods from the perspective of maximum likelihood estimation (MLE) and use this as the notational basis to describe the state-of-the-art clustering-based SSL methods as well as derive their ensembled variants in Sec. 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.296, + 0.827, + 0.385 + ], + "angle": 0, + "content": "From Maximum Likelihood to SSL Denote unnormalized KL divergence (Dikmen et al., 2014) between non-negative integrable functions \\( p, q \\) by \\( \\mathsf{K}[p(X), q(X)] = \\mathsf{H}^{\\times}[p(X), q(X)] - \\mathsf{H}[p(X)] \\), where \\( \\mathsf{H}^{\\times}[p(X), q(X)] = -\\int_{\\mathcal{X}} p(x) \\log q(x) \\, \\mathrm{d}x + \\int_{\\mathcal{X}} q(x) \\, \\mathrm{d}x - 1 \\) is the unnormalized cross-entropy (with \\( 0 \\log 0 = 0 \\)) and \\( \\mathsf{H}[p(X)] = \\mathsf{H}^{\\times}[p(X), p(X)] \\). These quantities simplify to their usual definitions when \\( p, q \\) are normalized, but critically they enable flexible weighting of distributions for the derivation of our weighted ensemble losses in Sec. 3.2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.391, + 0.825, + 0.435 + ], + "angle": 0, + "content": "Let \\(\\nu(X, Y) = \\nu(X)\\nu(Y|X)\\) be a natural distribution of input/target pairs over the space \\(\\mathcal{X} \\times \\mathcal{Y}\\) and \\(s(Y|\\theta, X)\\) be a predictive model of target given the input parameterized by \\(\\theta \\in \\mathcal{T}\\). Supervised maximum likelihood seeks the minimum expected conditional population risk with respect to \\(\\theta\\)," + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.44, + 0.825, + 0.457 + ], + "angle": 0, + "content": "\\[\n\\mathsf {E} _ {\\nu (X)} \\mathsf {K} [ \\nu (Y | X), s (Y | \\theta , X) ] = \\mathsf {E} _ {\\nu (X)} \\mathsf {H} ^ {\\times} [ \\nu (Y | X), s (Y | \\theta , X) ] - \\mathsf {E} _ {\\nu (X)} \\mathsf {H} [ \\nu (Y | X) ]. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.47, + 0.825, + 0.539 + ], + "angle": 0, + "content": "Henceforth omit \\(\\mathsf{E}_{\\nu(X)} \\mathsf{H}[\\nu(Y|X)]\\) since it is constant in \\(\\theta\\). Since \\(\\nu(X, Y)\\) is unknown, a finite sample approximation is often employed. Denote a size-\\(n\\) i.i.d. training set by \\(\\mathcal{D}_n = \\{x_i\\}_{i \\in [n]} \\sim \\nu^{\\otimes n}\\) and empirical distribution by \\(\\hat{\\nu}(X, Y) = \\frac{1}{n} \\sum_{x \\in \\mathcal{D}_n, y \\sim \\nu(Y|x)} \\delta(X - x) \\delta(Y - y)\\) where \\(\\delta: \\mathbb{R} \\to \\{0, 1\\}\\) is 1 when \\(x = 0\\) and 0 otherwise. The sample risk is thus \\(-\\frac{1}{n} \\sum_{x \\in \\mathcal{D}_n} \\mathsf{H}^\\times[\\hat{\\nu}(Y|x), s(Y|\\theta, x)]\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.544, + 0.827, + 0.648 + ], + "angle": 0, + "content": "In SSL, we interpret \\(\\nu(Y|x)\\) as being the oracle teacher under a presumption of how the representations will be evaluated on a downstream task. This assumption is similar to that made in Arora et al. (2019); Nozawa et al. (2020). We also assume \\(\\hat{\\nu}(Y|X)\\) is inaccessible and/or unreliable. Under this view, some SSL techniques substitute \\(\\hat{\\nu}(Y|x)\\) for a weakly learned target or \"teacher\", \\(t(Y|x)\\). We don't generally expect \\(t(Y|x)\\) to recover \\(\\nu(Y|x)\\); we only assume that an optimal teacher exists and it is \\(\\nu(Y|x)\\). With the teacher providing the targets, the loss becomes \\(-\\frac{1}{n}\\sum_{x\\in\\mathcal{D}_n}\\mathsf{H}^\\times[t(Y|x), s(Y|\\theta, x)]\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.658, + 0.825, + 0.702 + ], + "angle": 0, + "content": "Teacher and student in clustering SSL methods Clustering SSL methods such as SWaV (Caron et al., 2020), DINO (Caron et al., 2021), and MSN (Assran et al., 2022) employ a student model characterized by proximity between learned codebook entries and a data-dependent code," + }, + { + "type": "equation", + "bbox": [ + 0.287, + 0.706, + 0.825, + 0.741 + ], + "angle": 0, + "content": "\\[\ns (Y | \\theta , x) = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\tau} \\frac {\\left(h _ {\\psi} \\circ r _ {\\omega}\\right) (x) \\cdot \\mu_ {y}}{\\| \\left(h _ {\\psi} \\circ r _ {\\omega}\\right) (x) \\| _ {2} \\| \\mu_ {y} \\| _ {2}}: y \\in [ c ] \\right\\}\\right) \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.742, + 0.825, + 0.76 + ], + "angle": 0, + "content": "\\[\n\\theta = \\{\\omega , \\psi , \\left\\{\\mu_ {y} \\right\\} _ {y \\in [ c ]} \\} \\in \\mathcal {T}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.827, + 0.876 + ], + "angle": 0, + "content": "where the encoder \\(r_{\\omega}:\\mathcal{X}\\to \\mathcal{Z}\\) produces the representations used for downstream tasks, and the projection head \\(h_\\psi :\\mathcal{Z}\\rightarrow \\mathbb{R}^d\\) and codebook entries \\(\\{\\mu_y\\}_{y\\in \\mathcal{Y}}\\in \\mathbb{R}^d\\) characterize the SSL loss. Eq. (2) can be viewed as \"soft clustering\", where the input is assigned to those centroids that are closer to the projection head's output. The projection head and codebook are used during training but thrown away for evaluation, which is empirically found vital for downstream tasks (Chen et al., 2020a;b). Hyperparameters \\(\\tau \\in \\mathbb{R}_{>0},c\\in \\mathbb{Z}_{>0}\\) represent temperature and codebook size. The teacher is defined as \\(t(Y|x) = s(Y|\\mathrm{stopgrad}(g(\\theta)),x)\\) where \\(g:\\mathcal{T}\\to \\mathcal{T}\\). Commonly \\(g(\\theta)\\) is an exponential moving average of gradient descent iterates and the teacher uses a lower temperature than the student." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.827, + 0.926 + ], + "angle": 0, + "content": "To capture desirable invariances and prevent degeneracy, data augmentation and regularization (e.g., Sinkhorn-Knopp normalization (Caron et al., 2020), mean entropy maximization (Assran et al., 2022)) are essential. As these are not directly relevant to our method, we omit them for brevity." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.285, + 0.119 + ], + "angle": 0, + "content": "3 METHOD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.136, + 0.827, + 0.234 + ], + "angle": 0, + "content": "Ensembling is a technique that combines models to boost performance, and has been especially successful in supervised learning. We are interested in ensembling methods that carry over this success to SSL approaches. However, SSL has key differences, such as throw-away \"projection heads\", from supervised learning that result in a multitude of possibilities for how to ensemble. With this in mind, we propose first where to ensemble, and then how to ensemble. Those proposals result in an efficient \"peri-training\" ensembling technique specifically tailored for SSL and a family of weighted ensemble objectives; we subsequently suggest different ways to select the weights." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.255, + 0.382, + 0.269 + ], + "angle": 0, + "content": "3.1 WHERE TO ENSEMBLE?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.518, + 0.425 + ], + "angle": 0, + "content": "Denote the teacher/student ensembles by \\(\\{t_i(Y|x)\\}_{i\\in [m]}\\) and \\(\\{s(Y|\\theta_j,x)\\}_{j\\in [m]}\\) and define each as in Sec. 2; parameters \\(\\theta = \\{\\theta_{j}\\}_{j\\in [m]}\\in \\mathcal{T}^{m}\\) are independently initialized, all students use one temperature and all teachers another. We asymmetrically denote \\(t_i(Y|x)\\) and \\(s(Y|\\theta_j,x)\\) to emphasize that teachers' gradients are zero and that the students are distinct solely by way of \\(\\theta_{i}\\neq \\theta_{j}\\). Studying heterogeneous architectures and/or different teacher parameterizations is left for future work." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.52, + 0.557 + ], + "angle": 0, + "content": "Recall that \\(\\theta_{j}\\) parameterizes the encoder, projection head, and codebook parameters: \\(\\theta_{j} = (\\omega_{j},\\psi_{j},\\{\\mu_{jy}\\}_{y\\in \\mathcal{Y}})\\). We further restrict \\(\\mathcal{T}^m\\) such that \\(\\omega_{i} = \\omega_{j}\\), i.e., we limit our consideration to ensembles of projection heads \\(h_{\\psi_j}\\) and/or codebooks \\(\\mu_{j}\\) but not encoders \\(r_{\\omega_j}\\). This choice makes our ensemble method inherently different from traditional supervised ensembling or encoder \\(r_{\\omega}\\) ensembling: the ensembled parts are not used for evaluation but" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.557, + 0.827, + 0.587 + ], + "angle": 0, + "content": "for improving the learning of non-enssembled representation encoder during training, thus it requires no change of downstream evaluation or computational cost. Ensembling of \\( r_{\\omega} \\) is left for future work." + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.258, + 0.831, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.452, + 0.828, + 0.552 + ], + "angle": 0, + "content": "Figure 2: Overview of \\((h_{\\psi},\\mu)\\)-ensemble. Two augmented inputs are encoded by the teacher/student into representations, and then processed by an ensemble of heads. The loss for each head is weighted and summed into the final loss. Strike-through edges indicate stop-gradients. See Appx. A for pseudocode." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.606, + 0.364, + 0.62 + ], + "angle": 0, + "content": "3.2 HOW TO ENSEMBLE?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.633, + 0.827, + 0.69 + ], + "angle": 0, + "content": "We would like to extend the loss to support an ensemble of teacher/student pairs while respecting the MLE intuition of the loss as in Sec. 2. Additionally, we want to facilitate data-dependent importance weights, thus enabling preferential treatment of some teacher/student pairs. We therefore propose a weighted average (unnormized) cross-entropy loss," + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.7, + 0.825, + 0.739 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.741, + 0.825, + 0.769 + ], + "angle": 0, + "content": "\\[\n\\text {w h e r e} w _ {i j y} = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\gamma} f _ {i j y} (\\operatorname {s t o p g r a d} (\\theta), x): i, j \\in [ m ] \\right\\}\\right). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.778, + 0.825, + 0.822 + ], + "angle": 0, + "content": "The notation \\( w_{ijY} \\odot t_i(Y|x) \\) denotes a Hadamard product; i.e., the product of event-specific weights and probabilities for each \\( y \\in \\mathcal{V} \\). The hyperparameter \\( \\gamma \\) is the temperature. The function \\( f_{ijy} \\) is defined for brevity and discussed in the following section." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "This objective admits generality and flexibility for introducing various weighting schemes, as it supports potential interactions between all teacher/student pairs and allows the weights to be both model- and data-dependent. Up to a constant independent of \\(\\theta\\), it is an importance weighted average of (unnormized) KL divergences between each teacher and each student; i.e., a mixture of MLE-like objectives. We stop the gradient of \\(w_{ijy}\\) to \\(\\theta\\) in order to keep the overall gradient a weighted average of students' log-likelihood gradients, similar to Eq. (1). We also normalize the weights such that each data point equally contributes to the loss." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.345, + 0.119 + ], + "angle": 0, + "content": "3.3 HOW TO WEIGHT?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.828, + 0.201 + ], + "angle": 0, + "content": "In this section, we present several instantiations of our losses with different weighting schemes. We empirically show in Sec. 5 that the particular choice of weighting scheme is critical for the representation performance and the induced diversity of \\((h_{\\psi},\\mu)\\)-ensembles. For simplicity we assume \\(\\gamma = 1\\) in this section. We indicate with \\(\\Longleftrightarrow\\) that a loss has the same arg min as Eq. (4). For additional analysis and discussion, see Appx. D." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.214, + 0.827, + 0.245 + ], + "angle": 0, + "content": "Uniform weighting (UNIF) The simplest strategy is to treat different teacher/student pairs independently and average each with uniform weighting; i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.235, + 0.25, + 0.826, + 0.288 + ], + "angle": 0, + "content": "\\[\nf _ {i j y} = \\log \\delta (i - j) \\Longleftrightarrow \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right] \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.296, + 0.827, + 0.329 + ], + "angle": 0, + "content": "This strategy introduces uniform weights \\( w_{i} = \\frac{1}{m} \\) over ensemble elements. The role of \\( \\log \\delta (i - j) \\) (here and elsewhere) is to sub-select corresponding teacher/student pairs rather than all \\( m^2 \\) pairs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.342, + 0.826, + 0.385 + ], + "angle": 0, + "content": "Probability weighting (PROB) An alternative to using the average cross-entropy loss (UNIF) is to compute the cross-entropy loss of the average predictions whose gradient is weighted by \\( w_{ijy} \\) (see Appx. D.1). At \\( \\gamma = 1 \\), those gradient weights simplify into an average over the student probabilities:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.391, + 0.826, + 0.441 + ], + "angle": 0, + "content": "\\[\nf _ {i j y} = \\log s (y | \\theta_ {j}, x) \\iff \\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\mathsf {H} ^ {\\times} \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right] \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.827, + 0.53 + ], + "angle": 0, + "content": "Averaging the predictive distributions introduces correspondence between codes from different heads; thus different heads are no longer independent but instead cooperate to match the student to the teachers. The loss favors student heads with more confident predictions (i.e., larger \\( s(y|\\theta_j, x) \\)). Further motivation for averaging predictions comes from multi-sample losses studied in Morningstar et al. (2022). Note that the joint convexity of (unnormized) KL divergence implies that this loss is upper bounded by the UNIF loss up to some constant in \\( \\theta \\) (see Appx. D)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.535, + 0.827, + 0.579 + ], + "angle": 0, + "content": "Although the PROB strategy favors confident student predictions, the weights change as a function of \\( y \\in \\mathcal{V} \\). This may be in conflict with our intuition that SSL is like maximum likelihood (Sec. 2), since under that view, the teacher is responsible for weighting outcomes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.593, + 0.825, + 0.622 + ], + "angle": 0, + "content": "Entropy weighting (ENT) Another way to favor heads with more confident predictions is to directly weight by their predictive entropies; i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.223, + 0.628, + 0.825, + 0.645 + ], + "angle": 0, + "content": "\\[\nf _ {i j y} = - \\mathrm {H} [ t _ {i} (Y | x) ] + \\log \\delta (i - j) \\Longleftrightarrow \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.647, + 0.825, + 0.684 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {E N T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} \\left[ t _ {i ^ {\\prime}} (Y | x) : i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]\\right) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.692, + 0.827, + 0.807 + ], + "angle": 0, + "content": "where the weight \\( w_{i} = \\mathrm{softmax}_{i}\\left(\\{-\\frac{1}{\\gamma}\\mathsf{H}[t_{i'}(Y|x)]:i'\\in [m]\\}\\right) \\) is inversely correlated with the entropy of teacher predictions. In other words, the head whose teacher has a lower entropy (i.e., higher confidence about its prediction) is given a larger importance weight for learning the representation. Like PROB, this strategy encourages \"data specialists\" by emphasizing strongly opinionated teacher heads for different inputs. Like UNIF, different heads are treated more independent (than PROB), since interaction between different heads is introduced only through the weight computation. By preferring low-entropy teachers we also favor low variance teachers; this aligns with the intuition that using a lower-variance teacher benefits representation quality (Wang et al., 2022)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.821, + 0.825, + 0.851 + ], + "angle": 0, + "content": "Countless other weighting schemes It is impossible to fully explore the space of weightings; the following might also be interesting to study in detail but were omitted due to resource constraints." + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.858, + 0.825, + 0.873 + ], + "angle": 0, + "content": "\\[\nf _ {i j y} = 0 \\quad \\text {(F a v o r s a l l p a i r s o f t e a c h e r s / s t u d e n t s e q u a l l y)} \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.875, + 0.825, + 0.89 + ], + "angle": 0, + "content": "\\[\nf _ {i j y} = \\log t _ {i} (y | x) \\quad (\\text {F a v o r s o p i n i o n a t e d t e a c h e r s}) \\tag {11}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.892, + 0.825, + 0.908 + ], + "angle": 0, + "content": "\\[\nf _ {i j y} = - \\mathrm {H} [ s (Y | \\theta_ {j}, x) ] \\quad (\\text {F a v o r s l o w - e n t r o p y s t d u e n t s}) \\tag {12}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.91, + 0.825, + 0.926 + ], + "angle": 0, + "content": "\\[\nf _ {i j y} = \\mathsf {K} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] \\quad (\\text {F a v o r s d i a g e e i n g t e a c h e r / s t u d e n t p a i r s}) \\tag {13}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.103, + 0.825, + 0.121 + ], + "angle": 0, + "content": "\\[\nf _ {i j y} = - \\frac {1}{2} \\log \\left(\\operatorname {V a r} _ {t _ {i} (Y | x)} [ Y ] + \\epsilon\\right) \\quad \\text {(F a v o r s l o w v a r i a n c e t e a c h e r s ; e . g . ,} \\epsilon = \\frac {1}{1 2}) \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.124, + 0.829, + 0.155 + ], + "angle": 0, + "content": "Note that \"aligned\" versions of all schemes are possible by using \\( f_{ijy} + \\log \\delta (i - j) \\). We did early experiments exploring Eqs. (11) and (12), but the results were inferior and are largely omitted below." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.173, + 0.347, + 0.188 + ], + "angle": 0, + "content": "4 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.204, + 0.828, + 0.415 + ], + "angle": 0, + "content": "Self-supervised learning Recent work on self-supervised learning (SSL) focuses on discriminative or generative approaches. Most discriminative approaches seek to learn augmentation-invariant representations by enforcing the similarity between augmented pairs of the same image while utilizing different techniques to avoid collapse. Contrastive methods (Chen et al., 2020a; He et al., 2020; Wu et al., 2018; Hjelm et al., 2018; Bachman et al., 2019; Tian et al., 2020) use a large number of negative samples with a noise-contrastive objective (Gutmann & Hyvarinen, 2010; Oord et al., 2018). A large body of followup work eliminates the necessity of explicit negative samples with various techniques, including clustering assignment constraints (Caron et al., 2018; 2020; 2021; Asano et al., 2019), bootstrapping (Grill et al., 2020) or self-distillation (Caron et al., 2021) inspired by mean teacher (Tarvainen & Valpola, 2017), asymmetric architecture design (Grill et al., 2020; Chen & He, 2021), or redundancy reduction (Zbontar et al., 2021; Bardes et al., 2021). Recent generative approaches that use masked image modeling as the pretraining task (Dosovitskiy et al., 2020; Bao et al., 2021; He et al., 2022; Zhou et al., 2022; Xie et al., 2022) have achieved competitive finetuning performance. Our method may be applicable to all of the above methods that have some sort of \"projection head\", such as most of the discriminative approaches." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.427, + 0.829, + 0.651 + ], + "angle": 0, + "content": "Ensemble methods Ensembling has been extensively studied for improving model performance (Hansen & Salamon, 1990; Perrone & Cooper, 1992; Dietterich, 2000) and uncertainty estimation (Lakshminarayanan et al., 2017; Ovadia et al., 2019) in supervised learning and semi-supervised learning (Laine & Aila, 2016). A major research direction is to train efficient ensembles with partial parameter sharing (Lee et al., 2015; Wen et al., 2020; Dusenberry et al., 2020; Havasi et al., 2020) or intermediate checkpointing (Huang et al., 2017; Garipov et al., 2018). Our method also shares the encoder parameters across ensembles, which is closely related to multi-headed networks (Lee et al., 2015; Tran et al., 2020). Ensemble methods for SSL are less explored. Some recent work studies ensembles of supervised models adapted from pretrained SSL models. Gontijo-Lopes et al. (2022) conduct an empirical study of ensembles adapted from different SSL models and find that higher divergence in SSL methods leads to less correlated errors and better performance. Wortsman et al. (2022) ensemble multiple finetuned models adapted from the same SSL model by averaging their weights, which boosts the performance without any inference cost. Our method differs from them in that it (1) applies to the SSL training stage to directly improve representation quality, rather than aggregates multiple models in the post-training/finetuning stage; (2) introduces little training cost and no evaluation cost; and (3) is complementary to these post-training/finetuning ensembling methods." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.669, + 0.33, + 0.685 + ], + "angle": 0, + "content": "5 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.701, + 0.825, + 0.788 + ], + "angle": 0, + "content": "We carefully study the impact of \\((h_{\\psi},\\mu)\\)-ensembles and our selected weighted ensemble losses (UNIF, PROB, and ENT) on smaller DINO models in Sec. 5.1. Using what we learned in those experiments, in Sec. 5.2 we present new state-of-the-art results on ImageNet-1K on various metrics for multiple model sizes by ensembling both DINO- and MSN-based models. Finally, we explore ensemble evaluations in the transfer learning setting in Sec. 5.3. Additional experimental details and results are in Appx. B and Appx. C, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.829, + 0.927 + ], + "angle": 0, + "content": "Experimental setup We assessed the effectiveness of our method with two SSL methods: DINO (Caron et al., 2021) and MSN (Assran et al., 2022). In order to ensure that we are comparing against strong baselines, we consider three different classes of baselines: (1) evaluation numbers reported in the original works (Caron et al. (2021), Assran et al. (2022), and Zhou et al. (2022) for an additional baseline iBOT); (2) evaluation of our implementation using the hyperparameters reported in the original works (DINO only, for space reasons) to validate our implementation; and (3) evaluation of our implementation using the best hyperparameters that we found by tuning the baselines (DINO and MSN) for fair comparisons. In almost all models and evaluations, our retuned baselines give nontrivial performance improvements on top of previously reported numbers. These type (3) baselines" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.827, + 0.173 + ], + "angle": 0, + "content": "Table 1: Comparison of different ensemble strategies. ENT and PROB significantly improve over the non-ensembleed baseline, while UNIF leads to no gains. Ensembling both the projection head and the codebook works the best. All models are DINO* ViT-S/16 trained for 300 epochs. Averages and standard deviations are over 3 initialization seeds. The linear evaluation results on ImageNet-1K with different amounts of labeled data are reported here (see Table 11 in Appx. C.3 for all metrics)." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.184, + 0.825, + 0.35 + ], + "angle": 0, + "content": "
HowWhere# of Labels Per Class
Proj. hψCode. μ15~13 (1%)Full
Base40.6 ± 0.257.9 ± 0.363.4 ± 0.274.4 ± 0.1
UNIF40.4 ± 0.457.6 ± 0.363.3 ± 0.374.5 ± 0.2
PROB39.8 ± 0.5 ↓ 0.957.4 ± 0.4 ↓ 0.563.0 ± 0.4 ↓ 0.474.8 ± 0.1 ↑ 0.4
PROB41.9 ± 0.3 ↑ 1.359.6 ± 0.4 ↑ 1.765.1 ± 0.3 ↑ 1.775.4 ± 0.1 ↑ 1.0
ENT-ST40.0 ± 0.5 ↓ 0.657.3 ± 0.5 ↓ 0.662.7 ± 0.5 ↓ 0.774.0 ± 0.4 ↓ 0.4
ENT40.8 ± 0.458.0 ± 0.463.5 ± 0.474.5 ± 0.3
ENT43.0 ± 0.6 ↑ 2.459.7 ± 0.7 ↑ 1.864.8 ± 0.5 ↑ 1.475.1 ± 0.4 ↑ 0.7
ENT44.0 ± 0.2 ↑ 3.460.5 ± 0.3 ↑ 2.665.5 ± 0.1 ↑ 2.275.3 ± 0.1 ↑ 0.9
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.36, + 0.827, + 0.432 + ], + "angle": 0, + "content": "we label DINO* and MSN*, and we use them as the base models for our experiments with \\((h_{\\psi}, \\mu)\\)-ensembles and weighted ensemble losses. Appx. B.2.1 describes the details for getting such strong performance for DINO* and MSN*. In particular, we find that the projection head has a crucial impact on label efficiency of representations and using a smaller head (3-layer MLP with hidden size 1024) significantly improves few-shot evaluation performance (see Appx. C.2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.446, + 0.828, + 0.558 + ], + "angle": 0, + "content": "Evaluation metrics We compared models trained with and without our \\((h_{\\psi},\\mu)\\)-ensembles by measuring various evaluation metrics on ImageNet-1K (Deng et al., 2009). The evaluation metrics reflect the decodability and the label efficiency of learned representations. We measured the decodability with respect to both the linear classifier following the common linear evaluation protocol and the \\(k\\)-NN classifier following Caron et al. (2021). We measured the label efficiency by evaluating the linear evaluation performance in few-shot settings, including \\(1\\%\\) (\\(\\sim 13\\)-shots) labeled data evaluation (Chen et al., 2020a) and 1-/2-/5-shot evaluations (Assran et al., 2022). All evaluations used frozen representations of the teacher encoder - we did not fine tune the models. See Appx. B.3 for details." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.575, + 0.513, + 0.59 + ], + "angle": 0, + "content": "5.1 EMPIRICAL STUDY OF \\((h_{\\psi},\\mu)\\)-ENSEMBLES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.6, + 0.829, + 0.714 + ], + "angle": 0, + "content": "Table 1 compares different strategies for where and how to ensemble. Fig. 4 compares the impact of the weighted ensemble loss on \\((h_{\\psi},\\mu)\\)-ensemble diversity. Fig. 3 shows the effect of increasing the number of ensembles, adjusting the temperature \\(\\gamma\\), and increasing baseline projection head parameters. In these experiments, we used DINO* with ViT-S/16 trained for 300 epochs as the base model. We compared different ensemble methods applied to the base model with \\(m = 16\\) heads which we found to work the best. For the ENT strategy in Table 1, the entropy weighting temperature \\(\\gamma\\) is set to \\(0.05\\times \\log (c)\\) by default which is selected from \\(\\{0.0125,0.025,0.05,0.1,0.2\\} \\times \\log (c)\\) where the scale \\(\\log (c)\\) gives the maximum entropy of the codebook size \\(c\\). For PROB, we keep \\(\\gamma = 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.828, + 0.8 + ], + "angle": 0, + "content": "Where to ensemble We study the where question by ensembling either the projection head \\( h_{\\psi} \\), the codebook \\( \\mu \\), or both with the ENT and the PROB ensemble strategies, as shown in Table 1. We find that ensembling both \\( h_{\\psi} \\) and \\( \\mu \\) provides the largest gains for both losses, probably due to the increased flexibility for learning a diverse ensemble. Interestingly, only ensembling \\( h_{\\psi} \\) also works well for the ENT strategy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.829, + 0.926 + ], + "angle": 0, + "content": "How to ensemble We study the how question by considering four different loss variants: UNIF, PROB, ENT, and the variant of ENT with student entropy weighting. We find that when we ensemble both the projection head \\( h_{\\psi} \\) and the codebook \\( \\mu \\), the ENT ensemble strategy leads to the most significant gains (e.g., 3.4 p.p. gains for 1-shot and 0.9 p.p. gains for full-data). The PROB strategy also consistently improves the performance with a slightly larger gain (1 p.p.) in full-data evaluation. In contrast, we see no gains for the UNIF strategy over the baseline. We also study a variant of ENT that uses the student entropy (i.e., Eq. (12) with the log \\( \\delta(i - j) \\) term) for the importance weights (denoted as ENT-ST). ENT-ST performs much worse than ENT and is even worse than the baseline." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.103, + 0.386, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.183, + 0.222, + 0.385, + 0.236 + ], + "angle": 0, + "content": "(a) Scaling of \\((h_{\\psi},\\mu)\\)-ensembles." + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.103, + 0.599, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.222, + 0.596, + 0.237 + ], + "angle": 0, + "content": "(b) Effect of ENT temperature \\(\\gamma\\)" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.103, + 0.818, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.621, + 0.222, + 0.805, + 0.236 + ], + "angle": 0, + "content": "(c) Comparing different heads." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.247, + 0.825, + 0.318 + ], + "angle": 0, + "content": "Figure 3: Empirical study of \\((h_{\\psi},\\mu)\\)-ensembles. (a) The gains of \\((h_{\\psi},\\mu)\\)-ensembles start to diminish above 16 heads. (b) The temperature for entropy weighting has a larger impact on few-shot performance. 16 heads are used and \\(\\gamma\\) is scaled by \\(\\log(c)\\). (c) Our \\((h_{\\psi},\\mu)\\)-ensembles outperform all non-ensembleed baselines when controlling for number of parameters. A too powerful non-ensemble projection head significantly harms accuracy. \\(1\\%\\) data evaluation is shown. Also see Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.331, + 0.825, + 0.388 + ], + "angle": 0, + "content": "We conjecture that this is because the student predictions typically have a larger variance than teacher predictions (Wang et al., 2022) especially when multi-crop augmentation (Caron et al., 2020; 2021) is applied to the student. Similar experiments on Eq. (11) and/or \\(\\gamma = 0\\) variants of PROB also resulted in inferior performance (see Table 12)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.403, + 0.584, + 0.612 + ], + "angle": 0, + "content": "Analysis of \\((h_{\\psi}, \\mu)\\)-ensemble diversity The previous experiments showed that the choice of ensemble weighting strategy has a large impact on performance. We hypothesize that this choice substantially impacts the diversity of the codebook ensembles. Since the codes in different heads may not be aligned, we align them by the similarity of their code assignment probabilities across different input images, which measures how the codes are effectively used to 'cluster' the data. See Appx. C.4 for detailed explanations and results. In Fig. 4, we visualize the decay patterns of the similarity score between aligned codes (1.0 means the most similar) in a random pair of heads for each weighting strategy. ENT decays the fastest and UNIF decays the slowest, indicating that ENT learns the most diverse codebooks while UNIF is least diverse. This shows a positive correlation between the diversity of \\((h_{\\psi}, \\mu)\\)-ensembles and the empirical" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.612, + 0.825, + 0.655 + ], + "angle": 0, + "content": "performance of the ensemble strategies from Table 1. Finally, for UNIF, we find that different heads tend to learn the same semantic mappings even when randomly initialized; i.e., the code assignments in different heads become homogeneous up to permutation. See Fig. 8 for a visualization." + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.406, + 0.816, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.507, + 0.827, + 0.605 + ], + "angle": 0, + "content": "Figure 4: Visualization of code similarity. ENT learns the most diverse \\((h_{\\psi},\\mu)\\)-ensembles reflected by the fastest decay of similarity scores between aligned codes in different heads. UNIF has low diversity between heads." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.669, + 0.827, + 0.713 + ], + "angle": 0, + "content": "Number of \\((h_{\\psi},\\mu)\\)-ensembles We study the effect of increasing the number of \\((h_{\\psi},\\mu)\\)-ensembles \\(m\\) for ENT in Fig. 3a. Having more \\((h_{\\psi},\\mu)\\)-ensembles boosts the performance until \\(m = 16\\). Interestingly, using as few as \\(m = 2\\) heads already significantly improves over the baseline." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.825, + 0.798 + ], + "angle": 0, + "content": "Effect of ENT temperature \\(\\gamma\\) Fig. 3b studies the effect of entropy weighting temperature \\(\\gamma\\) for different evaluation metrics. We observe that \\(\\gamma\\) has a relatively larger impact on few-shot evaluation performance. \\(\\gamma\\) should be neither too high nor too low: a high temperature leads to under-specialization (i.e. less diversity) of heads similar to UNIF (\\(\\gamma \\rightarrow \\infty\\)) and a low temperature may otherwise lead to over-specialization (i.e., only a single head is used for each input)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Comparison of different projection heads Our method linearly increases projection head parameters, thus a natural question is: Is the gain of \\((h_{\\psi},\\mu)\\)-ensembles due to the increased power (or number of parameters) in projection heads? We answer this question with an empirical study of non-ensembled projection heads. Specifically, we studied non-ensembled \\(h_{\\psi}\\) with (depth, width) searched over \\(\\{2,3,4\\} \\times \\{512,1024,2048,4096\\}\\) and measured the linear evaluation performance with different amounts of labeled data. In Fig. 3c, we plot the \\(1\\%\\) data evaluation result with respect to the number of parameters of the projection head both for ensembled and non-ensembled baselines. See Appx. C.2 for detailed analysis and extra results for other metrics. Our key findings are:" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.08, + 0.827, + 0.213 + ], + "angle": 0, + "content": "Table 2: Effectiveness of ensemble heads for DINO*/MSN* with different ViT models. Our ensemble heads consistently improve all downstream evaluation metrics on ImageNet-1K and achieve a new state-of-the-art for few-shot evaluations. For ViT-S/16, we report linear evaluation results probed from the last layer (left) and from the last 4 layers (right, following DINO). †We evaluated the few-shot settings using DINO's publicly-available pretrained weights in the cases those results were not reported in Caron et al. (2021). ‡MSN ViT-B/16 and ViT-B/8 are both trained for 600 epochs in Assran et al. (2022), whereas our models are trained for only 400, 300 epochs, respectively. For each architecture, we highlight the best DINO baseline and weighted ensemble in blue. For MSN, the corresponding highlights are yellow. The best results for each architecture and metric are bolded." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.219, + 0.824, + 0.637 + ], + "angle": 0, + "content": "
MethodFew-shotFull-data
125~13 (1%)k-NNLinear
ViT-S/16, 800 epochs
iBOT40.4 ± 0.550.8 ± 0.859.9 ± 0.265.975.2- / 77.9
DINO38.9 ± 0.448.9 ± 0.358.5 ± 0.164.574.576.1 / 77.0
DINO (Repro)39.1 ± 0.349.1 ± 0.558.6 ± 0.264.774.375.8 / 76.9
DINO* (Retuned)44.6 ± 0.253.6 ± 0.361.1 ± 0.266.274.175.8 / 76.9
MSN47.1 ± 0.155.8 ± 0.662.8 ± 0.367.2-- / 76.9
MSN* (Retuned)47.4 ± 0.156.3 ± 0.462.8 ± 0.267.173.375.6 / 76.6
DINO*-PROB (16)45.2 ± 0.454.9 ± 0.462.5 ± 0.267.375.176.5 / 77.6
DINO*-ENT (4)46.3 ± 0.155.5 ± 0.663.0 ± 0.367.574.876.2 / 77.2
DINO*-ENT (16)47.6 ± 0.1 ↑ 3.056.8 ± 0.564.0 ± 0.268.3 ↑ 2.175.376.8 / 77.7 ↑ 0.8
MSN*-ENT (2)48.8 ± 0.257.5 ± 0.564.0 ± 0.267.974.676.0 / 76.9
MSN*-ENT (8)50.1 ± 0.1 ↑ 2.758.9 ± 0.665.1 ± 0.368.7 ↑ 1.675.276.4 / 77.4 ↑ 0.8
ViT-B/16, 400 epochs
iBOT46.1 ± 0.356.2 ± 0.764.7 ± 0.369.777.179.5
DINO†43.0 ± 0.252.7 ± 0.561.8 ± 0.267.476.178.2
DINO* (Retuned)49.3 ± 0.158.1 ± 0.565.0 ± 0.369.176.078.5
MSN‡49.8 ± 0.258.9 ± 0.465.5 ± 0.3---
MSN* (Retuned)50.7 ± 0.159.2 ± 0.465.9 ± 0.269.774.778.1
DINO*-ENT (16)52.8 ± 0.1 ↑ 3.561.5 ± 0.467.6 ± 0.371.1 ↑ 2.077.179.1 ↑ 0.6
MSN*-ENT (8)53.7 ± 0.2 ↑ 3.062.4 ± 0.668.3 ± 0.271.5 ↑ 1.877.278.9 ↑ 0.8
ViT-B/8, 300 epochs
DINO†47.5 ± 0.257.3 ± 0.565.4 ± 0.370.377.480.1
DINO* (Retuned)49.5 ± 0.558.6 ± 0.665.9 ± 0.370.777.180.2
MSN‡55.1 ± 0.164.9 ± 0.771.6 ± 0.3---
MSN* (Retuned)51.9 ± 0.361.1 ± 0.467.7 ± 0.371.775.780.3
DINO*-ENT (16)55.0 ± 0.4 ↑ 5.563.4 ± 0.669.5 ± 0.373.4 ↑ 2.778.681.0 ↑ 0.8
MSN*-ENT (8)55.6 ± 0.2 ↑ 3.764.5 ± 0.570.3 ± 0.273.4 ↑ 1.778.980.8 ↑ 0.5
" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.653, + 0.827, + 0.709 + ], + "angle": 0, + "content": "- A too powerful non-enssembled \\( h_{\\psi} \\) significantly hurts the label efficiency of learned representations. This result is similar to Chen et al. (2020b), which found that probing from intermediate layers of projection heads (which can be viewed as using a shallower head) could improve semi-supervised learning (\\( 1\\% - 10\\% \\) labeled data) results." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.711, + 0.827, + 0.768 + ], + "angle": 0, + "content": "- The default head (3/2048, denoted as 'Default') used in recent SSL methods (SimCLRv2, DINO, MSN, etc.) does not perform as well in few-shot evaluations, probably because it is selected by looking at full-data evaluation metrics. In contrast, our baseline (3/1024, denoted as 'Our baseline') significantly improves few-shot evaluation performance." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.77, + 0.827, + 0.798 + ], + "angle": 0, + "content": "- Our \\((h_{\\psi}, \\mu)\\)-ensembles outperform all non-enssembled baselines and lead to consistent improvements in all evaluation metrics, despite the increase of parameters." + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.653, + 0.827, + 0.798 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.815, + 0.557, + 0.829 + ], + "angle": 0, + "content": "5.2 IMPROVING SOTA RESULTS WITH ENSEMBLEING" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Next we apply \\((h_{\\psi},\\mu)\\)-ensembles to DINO* and MSN* and compare with the state-of-the-art results. We experimented with model architectures ViT-S/16, ViT-B/16, ViT-B/8 trained for 800, 400, 300 epochs respectively following Caron et al. (2021). We include both the published results and our returned versions to ensure strong baselines. For clarity, we denote our method as “{baseline}-{ensemble strategy} (# of heads)”, e.g., DINO*-ENT (4). We tuned both baselines and our methods for all architectures. We report the best hyperparameters for all models in Appx. B.2.2." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.177 + ], + "angle": 0, + "content": "Table 2 compares the results of \\((h_{\\psi},\\mu)\\)-ensembles and baselines. We find that \\((h_{\\psi},\\mu)\\)-ensembles with ENT consistently improve all evaluation metrics (full-data, few-shot) across both SSL methods (DINO*, MSN*) and all architectures (ViT-S/16, ViT-B/16, ViT-B/8) over their non-ensembld counterparts. The gains in few-shot evaluation are particularly substantial, providing a new state-of-the-art for ImageNet-1K evaluation from ImageNet pretraining." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.19, + 0.536, + 0.207 + ], + "angle": 0, + "content": "5.3 MORE EVALUATIONS FOR \\((h_{\\psi},\\mu)\\) -ENSEMBLES" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.215, + 0.825, + 0.245 + ], + "angle": 0, + "content": "Table 3: Comparison of transfer performance. ViT-S/16 is used. Our ensemble heads lead to consistent improvements for \\(\\mathrm{MSN^{*}}\\) and comparable results for DINO*." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.248, + 0.825, + 0.322 + ], + "angle": 0, + "content": "
Food101CIFAR10CIFAR100SUN397CarsDTDPetsCaltech-101FlowersAvg.
DINO*78.493.881.066.166.774.692.094.994.482.43
DINO*-ENT (16)79.193.881.466.566.874.992.894.693.982.64
MSN*77.793.179.864.663.372.292.494.792.781.17
MSN*-ENT (8)78.493.981.165.268.073.293.195.492.882.34
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.324, + 0.825, + 0.395 + ], + "angle": 0, + "content": "Transfer learning In Table 3, we compare the transfer learning performance of \\((h_{\\psi}, \\mu)\\)-ensembles and non-ensembed baselines. We used ViT-S-16 models trained on ImageNet-1K for 800 epochs and evaluated on 9 natural downstream datasets from Chen et al. (2020a) with linear evaluation (details in Appx. B.3). \\((h_{\\psi}, \\mu)\\)-ensembles lead to consistent improvements in transfer performance for \\(\\mathrm{MSN}^*\\) and comparable results for DINO*." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.401, + 0.587, + 0.527 + ], + "angle": 0, + "content": "Training overhead In Table 4, we benchmark the computational overhead of \\((h_{\\psi}, \\mu)\\)-ensembles at training time. We used a medium sized model, DINO* with ViT-B/16, trained with the same setting used in all of our experiments. We benchmarked the wall-clock time and peak memory on 128 TPUv3 cores. \\((h_{\\psi}, \\mu)\\)-ensembling is relatively cheap in terms of training cost because the ensembled parts typically account for a small portion of total computation, especially when the backbone encoder is more computationally expensive (e.g., ViT-B/8)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.527, + 0.805, + 0.542 + ], + "angle": 0, + "content": "Again, we emphasize that there is no evaluation overhead when \\((h_{\\psi},\\mu)\\)-ensembles are removed." + }, + { + "type": "table_caption", + "bbox": [ + 0.593, + 0.398, + 0.827, + 0.454 + ], + "angle": 0, + "content": "Table 4: Training overhead. Wall-clock time and peak memory per core for training with different numbers of ensembles." + }, + { + "type": "table", + "bbox": [ + 0.597, + 0.458, + 0.822, + 0.526 + ], + "angle": 0, + "content": "
mWall TimePeak Memory
15.81h5.25G
45.91h5.40G
166.34h5.89G
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.56, + 0.452, + 0.575 + ], + "angle": 0, + "content": "6 CONCLUSION & DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.59, + 0.827, + 0.703 + ], + "angle": 0, + "content": "We introduced an efficient ensemble method for SSL where multiple projection heads are ensembled to effectively improve representation learning. We showed that with carefully designed ensemble losses that induce diversity over ensemble heads, our method significantly improves recent state-of-the-art SSL methods in various evaluation metrics, particularly for few-shot evaluation. Although ensembling is a well-known technique for improving evaluation performance of a single model, we demonstrated that, for models with throw-away parts such as the projection heads in SSL, ensembling these parts can improve the learning of the non-ensemble representation encoder and also achieve significant gains in downstream evaluation without introducing extra evaluation cost." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.709, + 0.827, + 0.807 + ], + "angle": 0, + "content": "Our ensemble method is applicable to many SSL methods beyond the two we explored. For example, one may consider generalization to BYOL (Grill et al., 2020) or SimSiam (Chen & He, 2021) that ensembles projection and/or prediction heads, or MAE (He et al., 2022) that ensembles the decoders (which introduces more training cost though). Our weighted ensemble losses can also be applied as long as the original loss can be reformulated as MLE for some \\( t \\), \\( s \\), and \\( Y \\), e.g., the MSE loss in these methods is MLE under multivariate normal distributions. We hope our results and insights will motivate more future work for extending our method or exploring more ensemble techniques for SSL." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.813, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In future work, we also hope to remove three limitations of our setting. First, considering ensembling strategies that include the representation encoder, \\( r_{\\omega} \\), may lead to further improvements in the performance of weighted ensemble SSL, at the cost of increased computation requirements during both training and evaluation. Second, considering heterogenous architectures in the ensemble may further improve the learned representations (e.g., mixing Transformers with ResNets), whether the heterogeneity is in \\( r_{\\omega}, h_{\\psi} \\), or both. Third, considering other possibilities for \\( f_{ijy} \\) may also reveal performance gains and improve our understanding of the critical aspects that lead to good SSL representations, similar to what we learned about the importance of ensemble diversity." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.105, + 0.33, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.128, + 0.827, + 0.172 + ], + "angle": 0, + "content": "We would like to thank Mathilde Caron and Mahmoud Assran for their extensive help in reproducing DINO and MSN baselines. We would also like to thank Ting Chen and Yann Dubois for their helpful discussions and encouragements." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.185, + 0.406, + 0.2 + ], + "angle": 0, + "content": "REPRODUCIBITLITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.209, + 0.828, + 0.267 + ], + "angle": 0, + "content": "We include detailed derivations for all our proposed losses in Appx. D. We report experimental details in Appx. B, including the implementation details for reproducing the baselines (Appx. B.1), training and evaluating our methods (Appx. B.2.1), and all hyper-parameters (Appx. B.2.2) used in our experiments for reproducing our results in Table 2." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.28, + 0.27, + 0.294 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.304, + 0.826, + 0.332 + ], + "angle": 0, + "content": "TensorFlow Datasets, a collection of ready-to-use datasets. https://www.tensorflow.org/datasets." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.34, + 0.827, + 0.383 + ], + "angle": 0, + "content": "Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. arXiv preprint arXiv:1902.09229, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.391, + 0.825, + 0.421 + ], + "angle": 0, + "content": "Yuki Markus Asano, Christian Rupprecht, and Andrea Vedaldi. Self-labelling via simultaneous clustering and representation learning. arXiv preprint arXiv:1911.05371, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.428, + 0.827, + 0.471 + ], + "angle": 0, + "content": "Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, and Nicolas Ballas. Masked siamese networks for label-efficient learning. arXiv preprint arXiv:2204.07141, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.479, + 0.825, + 0.51 + ], + "angle": 0, + "content": "Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.516, + 0.825, + 0.546 + ], + "angle": 0, + "content": "Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.553, + 0.825, + 0.584 + ], + "angle": 0, + "content": "Adrien Bardes, Jean Ponce, and Yann LeCun. Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.591, + 0.827, + 0.633 + ], + "angle": 0, + "content": "Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, pp. 446-461. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.641, + 0.825, + 0.671 + ], + "angle": 0, + "content": "Yuri Burda, Roger B Grosse, and Ruslan Salakhutdinov. Importance weighted autoencoders. In ICLR, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.678, + 0.827, + 0.722 + ], + "angle": 0, + "content": "Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In Proceedings of the European conference on computer vision (ECCV), pp. 132-149, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.729, + 0.827, + 0.773 + ], + "angle": 0, + "content": "Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. Advances in Neural Information Processing Systems, 33:9912-9924, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.78, + 0.825, + 0.824 + ], + "angle": 0, + "content": "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9650-9660, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.831, + 0.827, + 0.874 + ], + "angle": 0, + "content": "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pp. 1597-1607. PMLR, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.881, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems, 33:22243-22255, 2020b." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.304, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.135 + ], + "angle": 0, + "content": "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750-15758, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.14, + 0.829, + 0.185 + ], + "angle": 0, + "content": "Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3606-3613, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.19, + 0.822, + 0.208 + ], + "angle": 0, + "content": "Thomas M Cover and Joy A Thomas. Elements of Information Theory. John Wiley & Sons, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.214, + 0.826, + 0.245 + ], + "angle": 0, + "content": "Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transport. Advances in neural information processing systems, 26, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.25, + 0.829, + 0.295 + ], + "angle": 0, + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.301, + 0.826, + 0.333 + ], + "angle": 0, + "content": "Thomas G Dietterich. Ensemble methods in machine learning. In International workshop on multiple classifier systems, pp. 1-15. Springer, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.338, + 0.826, + 0.369 + ], + "angle": 0, + "content": "Onur Dikmen, Zhirong Yang, and Erkki Oja. Learning the information divergence. IEEE transactions on pattern analysis and machine intelligence, 37(7):1442-1454, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.375, + 0.826, + 0.433 + ], + "angle": 0, + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.44, + 0.826, + 0.471 + ], + "angle": 0, + "content": "Sever S Dragomir. A generalization of \\( f \\)-divergence measure to convex functions defined on linear spaces. Communications in Mathematical Analysis, 15(2):1-14, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.476, + 0.826, + 0.521 + ], + "angle": 0, + "content": "Michael Dusenberry, Ghassen Jerfel, Yeming Wen, Yian Ma, Jasper Snoek, Katherine Heller, Balaji Lakshminarayanan, and Dustin Tran. Efficient and scalable bayesian neural nets with rank-1 factors. In International conference on machine learning, pp. 2782-2792. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.527, + 0.826, + 0.571 + ], + "angle": 0, + "content": "Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In 2004 conference on computer vision and pattern recognition workshop, pp. 178-178. IEEE, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.578, + 0.826, + 0.622 + ], + "angle": 0, + "content": "Timur Garipov, Pavel Izmailov, Dmitrii Podoprikhin, Dmitry P Vetrov, and Andrew G Wilson. Loss surfaces, mode connectivity, and fast ensembling of dnns. Advances in neural information processing systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.628, + 0.826, + 0.673 + ], + "angle": 0, + "content": "Raphael Gontijo-Lopes, Yann Dauphin, and Ekin Dogus Cubuk. No one representation to rule them all: Overlapping features of training methods. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=BK-4qbGgIE3." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.679, + 0.826, + 0.737 + ], + "angle": 0, + "content": "Jean-Bastien Grill, Florian Strub, Florent Alché, Coretin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 33:21271-21284, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.744, + 0.826, + 0.8 + ], + "angle": 0, + "content": "Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pp. 297-304. JMLR Workshop and Conference Proceedings, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.808, + 0.826, + 0.839 + ], + "angle": 0, + "content": "Abner Guzman-Rivera, Dhruv Batra, and Pushmeet Kohli. Multiple choice learning: Learning to produce multiple structured outputs. Advances in neural information processing systems, 25, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.846, + 0.826, + 0.875 + ], + "angle": 0, + "content": "Lars Kai Hansen and Peter Salamon. Neural network ensembles. IEEE transactions on pattern analysis and machine intelligence, 12(10):993-1001, 1990." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Marton Havasi, Rodolphe Jenatton, Stanislav Fort, Jeremiah Zhe Liu, Jasper Snoek, Balaji Lakshminarayanan, Andrew M Dai, and Dustin Tran. Training independent subnetworks for robust prediction. arXiv preprint arXiv:2010.06610, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9729-9738, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.825, + 0.2 + ], + "angle": 0, + "content": "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.825, + 0.251 + ], + "angle": 0, + "content": "R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.258, + 0.825, + 0.288 + ], + "angle": 0, + "content": "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pp. 646-661. Springer, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.295, + 0.827, + 0.326 + ], + "angle": 0, + "content": "Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E Hopcroft, and Kilian Q Weinberger. Snapshot ensembles: Train 1, get m for free. arXiv preprint arXiv:1704.00109, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.333, + 0.827, + 0.377 + ], + "angle": 0, + "content": "Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pp. 554-561, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.384, + 0.827, + 0.402 + ], + "angle": 0, + "content": "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.409, + 0.825, + 0.439 + ], + "angle": 0, + "content": "Harold W Kuhn. The hungarian method for the assignment problem. *Naval research logistics quarterly*, 2(1-2):83-97, 1955." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.446, + 0.825, + 0.475 + ], + "angle": 0, + "content": "Samuli Laine and Timo Aila. Temporal ensembling for semi-supervised learning. arXiv preprint arXiv:1610.02242, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.483, + 0.827, + 0.527 + ], + "angle": 0, + "content": "Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.535, + 0.827, + 0.578 + ], + "angle": 0, + "content": "Kuang-Huei Lee, Anurag Arnab, Sergio Guadarrama, John Canny, and Ian Fischer. Compressive visual representations. Advances in Neural Information Processing Systems, 34:19538-19552, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.586, + 0.825, + 0.63 + ], + "angle": 0, + "content": "Stefan Lee, Senthil Purushwalkam, Michael Cogswell, David Crandall, and Dhruv Batra. Why m heads are better than one: Training a diverse ensemble of deep networks. arXiv preprint arXiv:1511.06314, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.638, + 0.827, + 0.668 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.676, + 0.825, + 0.72 + ], + "angle": 0, + "content": "Warren R Morningstar, Alex Alemi, and Joshua V Dillon. Pacm-bayes: Narrowing the empirical risk gap in the misspecified bayesian regime. In International Conference on Artificial Intelligence and Statistics, pp. 8270-8298. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.727, + 0.827, + 0.757 + ], + "angle": 0, + "content": "Wai Ho Mow. A tight upper bound on discrete entropy. IEEE Transactions on Information Theory, 44(2):775-778, 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.765, + 0.825, + 0.795 + ], + "angle": 0, + "content": "Yurii Nesterov. A method for solving the convex programming problem with convergence rate \\( o(1 / k^2) \\). Proceedings of the USSR Academy of Sciences, 269:543-547, 1983." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.802, + 0.827, + 0.846 + ], + "angle": 0, + "content": "Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729. IEEE, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.854, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Kento Nozawa, Pascal Germain, and Benjamin Guedj. Pac-bayesian contrastive unsupervised representation learning. In Jonas Peters and David Sontag (eds.), Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI), volume 124 of Proceedings of Machine Learning Research, pp. 21-30. PMLR, 03-06 Aug 2020. URL https://proceedings.mlr.press/v124/nozawa20a.html." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.104, + 0.826, + 0.133 + ], + "angle": 0, + "content": "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.827, + 0.198 + ], + "angle": 0, + "content": "Yaniv Ovadia, Emily Fertig, Jie Ren, Zachary Nado, David Sculley, Sebastian Nowozin, Joshua Dillon, Balaji Lakshminarayanan, and Jasper Snoek. Can you trust your model's uncertainty? evaluating predictive uncertainty under dataset shift. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.205, + 0.825, + 0.235 + ], + "angle": 0, + "content": "Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In 2012 IEEE conference on computer vision and pattern recognition, pp. 3498-3505. IEEE, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.242, + 0.827, + 0.299 + ], + "angle": 0, + "content": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.307, + 0.827, + 0.349 + ], + "angle": 0, + "content": "Michael P Perrone and Leon N Cooper. When networks disagree: Ensemble methods for hybrid neural networks. Technical report, Brown Univ Providence Ri Inst for Brain and Neural Systems, 1992." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.359, + 0.827, + 0.415 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.424, + 0.829, + 0.466 + ], + "angle": 0, + "content": "Yangjun Ruan, Yann Dubois, and Chris J. Maddison. Optimal representations for covariate shift. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=Rf58LPCwJj0." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.474, + 0.827, + 0.517 + ], + "angle": 0, + "content": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.525, + 0.827, + 0.568 + ], + "angle": 0, + "content": "Ilya Sutskever, James Martens, George Dahl, and Geoffrey Hinton. On the importance of initialization and momentum in deep learning. In International conference on machine learning, pp. 1139-1147. PMLR, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.577, + 0.825, + 0.619 + ], + "angle": 0, + "content": "Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.627, + 0.825, + 0.657 + ], + "angle": 0, + "content": "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In European conference on computer vision, pp. 776-794. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.665, + 0.827, + 0.708 + ], + "angle": 0, + "content": "Nenad Tomasev, Ioana Bica, Brian McWilliams, Lars Buesing, Razvan Pascanu, Charles Blundell, and Jovana Mitrovic. Pushing the limits of self-supervised resnets: Can we outperform supervised learning without labels onImagenet? arXiv preprint arXiv:2201.05119, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.715, + 0.827, + 0.758 + ], + "angle": 0, + "content": "Linh Tran, Bastiaan S Veeling, Kevin Roth, Jakub Swiatkowski, Joshua V Dillon, Jasper Snoek, Stephan Mandt, Tim Salimans, Sebastian Nowozin, and Rodolphe Jenatton. Hydra: Preserving ensemble diversity for model distillation. arXiv preprint arXiv:2001.04694, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.766, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Xiao Wang, Haoqi Fan, Yuandong Tian, Daisuke Kihara, and Xinlei Chen. On the importance of asymmetry for siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16570-16579, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.817, + 0.825, + 0.846 + ], + "angle": 0, + "content": "Yeming Wen, Dustin Tran, and Jimmy Ba. Batchsemble: an alternative approach to efficient ensemble and lifelong learning. arXiv preprint arXiv:2002.06715, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.855, + 0.827, + 0.923 + ], + "angle": 0, + "content": "Mitchell Wortsman, Gabriel Ilharco, Samir Ya Gadre, Rebecca Roelofs, Raphael Gontijo-Lopes, Ari S Morcos, Hongseok Namkoong, Ali Farhadi, Yair Carmon, Simon Kornblith, et al. Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In International Conference on Machine Learning, pp. 23965-23998. PMLR, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.829, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.148 + ], + "angle": 0, + "content": "Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3733-3742, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.829, + 0.2 + ], + "angle": 0, + "content": "Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In 2010 IEEE computer society conference on computer vision and pattern recognition, pp. 3485-3492. IEEE, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.829, + 0.251 + ], + "angle": 0, + "content": "Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9653-9663, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.258, + 0.829, + 0.301 + ], + "angle": 0, + "content": "Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. In International Conference on Machine Learning, pp. 12310-12320. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.31, + 0.829, + 0.354 + ], + "angle": 0, + "content": "Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. Image BERT pre-training with online tokenizer. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=ydopy-e6Dg." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.329, + 0.119 + ], + "angle": 0, + "content": "A PSEUDOCODE" + }, + { + "type": "code_caption", + "bbox": [ + 0.173, + 0.164, + 0.549, + 0.18 + ], + "angle": 0, + "content": "Algorithm 1: Pseudocode for computing ensemble loss" + }, + { + "type": "code", + "bbox": [ + 0.172, + 0.182, + 0.657, + 0.416 + ], + "angle": 0, + "content": "b, n, c:, batch size, number of ensemble heads, codebook size \n# log_ps, log_ct: student, teacher log probabilities with n ensembles \n# strategy: ensemble loss average strategy \n# tau_ent: temperature for entropy weighting \ndef ensemble_loss(log_ps, log_ct, strategy, tau_ent): \n b, n, c = log_ct.shape # axis 1 corresponds to ensemble \n log_ct = stop_grad(log_ct) # stop gradient for teacher \n if strategy == \"Unif\": \n loss = - (exp(log_ct) * log_ps).sum(axis=-1) \n loss = loss.mean(axis=1) # average over ensembles \n elif strategy == \"Prob\": \n log_mean_ct = logsumexp(log_ct, axis=1, b=1/n) # mean teacher \n log_mean_ps = logsumexp(log_ps, axis=1, b=1/n) # mean student \n loss = - (exp(log_mean_ct) * log_mean_ps).sum(axis=-1) \n elif strategy == \"Ent\": \n ent = - (exp(log_ct) * log_ct).sum(axis=-1) # teacher entropy \n weight = softmax(-ent/tau_ent, axis=1) # entropy weights \n loss = - (exp(log_ct) * log_ps).sum(axis=-1) \n loss = (loss * weight).sum(axis=1) # entropy weighted average \nreturn loss.mean() # average over samples" + }, + { + "type": "code_caption", + "bbox": [ + 0.173, + 0.467, + 0.632, + 0.482 + ], + "angle": 0, + "content": "Algorithm 2: Pseudocode for ensemble heads with simplified DINO" + }, + { + "type": "code", + "bbox": [ + 0.172, + 0.484, + 0.803, + 0.9 + ], + "angle": 0, + "content": "# n, c, eta: number of ensemble heads, codebook size, momentum update rate\n# fs, ft: student, teacher encoders\n# hs_ens, ht_ens: student, teacher projection heads with n ensembles, list with length n\n# mus_ens, mut_ens: student, teacher codebooks with n ensembles, list with length n\n# taus, taut: student, teacher temperatures\n# strategy: ensemble loss average strategy\n# tau_ent: temperature for entropy weighting\nfor x in dataloder: # load a batch with b samples\nxs, xt = augs(x), augt(x) # random augmentations\nzs, zt = fs(xs), ft(xt) # representations, (b, l)\n# all following computation can be parallelized with batch computation\nlog_ps, log_ct = [], []\nfor j in range(n):\nhs_j, ht_j = hs_ens[j], ht_ens[j] # j-th projection head\nmus_j, mut_j = mus_ens[j], mut_ens[j] # j-th codebook, (d, c)\nes_j, et_j = hs_j(zs), ht_j(zt) # j-th embedding, (b, d)\nrs_j = (es_j @ mus_j) / (es_j(norm(axis=1, keepdims=True) * mus_j(norm(axis=0, keepdims=True)) / taus # student logits, (b, c)\nrt_j = (et_j @ mut_j) / (et_j(norm(axis=1, keepdims=True) * mut_j(norm(axis=0, keepdims=True)) / taut # teacher logits, (b, c)\nlog_ps_j = logsoftmax(rs_j, axis=-1) # (b, c)\nlog_rt_j = logsoftmax(rt_j, axis=-1) # (b, c)\nlog_rt_j = renorm(log_rt_j) # adjust teacher predictions with centering or sinkhorn,\nomitted here for simplicity\nlog_ps.append(log_ps_j)\nlog_rt.append(log_rt_j)\nlog_ps = stack(log_ps_j, axis=1) # stacked student log probabilities, (b, n, c)\nlog_rt = stack(log_rt_j, axis=1) # stacked teacher log probabilities, (b, n, c)\nloss = ensemble_loss(log_ps, log_rt, strategy=strategy) # compute ensemble loss\nloss.backup() # back-propagate\nsgd_update(fs, hs_ens, mus_ens) # apply gradient decent update for student\nema_update(ft, ht_ens, mut_ens, rate=eta) # apply momentum update for teacher" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.421, + 0.119 + ], + "angle": 0, + "content": "B EXPERIMENTAL DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.192 + ], + "angle": 0, + "content": "In this section, we provide details for our experiments. In Appx. B.1, we describe how we reproduced and improved the baseline DINO/MSN models. We give the implementation details for SSL training and evaluation in Appx. B.2 and Appx. B.3 respectively. All the hyper-parameters used in our experiments are in Appx. B.2.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.207, + 0.508, + 0.221 + ], + "angle": 0, + "content": "B.1 REPRODUCING & IMPROVING BASELINES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.232, + 0.827, + 0.4 + ], + "angle": 0, + "content": "We carefully reproduced and further improved baseline methods (denoted as DINO* and MSN* respectively) with an extensive study and hyperparameter search (see Appx. B.1). In particular, we systematically study the projection head design (which we found is crucial for few-shot evaluation performance (Appx. C.2)) and different techniques for avoiding collapse used in both methods (Appx. C.1). DINO* performs significantly better than DINO on few-shot evaluation (e.g., \\(2\\sim 6\\) percentage point (p.p.) gains for 1 shot) and maintains the full-data evaluation performance. The main adjustments of DINO* are: (i) A 3-layer projection head with a hidden dimension of 1024 (instead of 2048); (ii) Sinkhorn-Knopp (SK) normalization (instead of centering) is applied to teacher predictions, combined with a smaller teacher temperature \\(\\tau = 0.025\\) and codebook size \\(c = 1024\\) or 4096. MSN* uses the same projection head as DINO* and applies ME-MAX regularization without SK normalization (which is applied in MSN by default). Further details for DINO and MSN can be found below." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.414, + 0.277, + 0.429 + ], + "angle": 0, + "content": "B.1.1 DINO" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.447, + 0.825, + 0.492 + ], + "angle": 0, + "content": "Table 5: Reproducing & Improving DINO. Our reproduce results match the public numbers. We further improve the DINO baseline (DINO*) by studying projection heads and collapse-avoiding techniques. The evaluation results of DINO/DINO* ViT-S/16 trained with 800 epochs are reported." + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.507, + 0.821, + 0.593 + ], + "angle": 0, + "content": "
Few-shotFull-data
125~13 (1%)k-NNLinear
DINO (Caron et al., 2021)38.9 ± 0.448.9 ± 0.358.5 ± 0.164.574.576.1 / 77.0
DINO (Ours reproduced)39.1 ± 0.349.1 ± 0.558.6 ± 0.264.774.375.8 / 76.9
DINO* (Retuned)44.6 ± 0.253.6 ± 0.361.1 ± 0.266.274.175.8 / 76.9
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.606, + 0.827, + 0.761 + ], + "angle": 0, + "content": "Reproducing DINO We carefully reproduced DINO with JAX following the official DINO implementation1. In Table 5, we report the evaluation results of DINO using ViT-S trained with 800 epochs following the exact training configuration for ViT-S/16 in the official DINO code. The official results of full-data evaluation and \\(1\\%\\) data evaluation are from Caron et al. (2021), the other few-shot evaluation results are evaluated by Assran et al. (2022) and also validated by us. Note that for consistency of full-data linear evaluation, we report the results with both the [CLS] token representations of the last layer and the concatenation of the [CLS] token representations from the last 4 layers following Caron et al. (2021). For 1-/2-/5-shots evaluation results, we report the mean accuracy and standard deviation across 3 random splits of the data following Assran et al. (2022). As shown in Table 5, our reproduced results are all comparable with the published numbers which validates the implementation of our training and evaluation pipelines." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.775, + 0.827, + 0.903 + ], + "angle": 0, + "content": "Improving DINO We improved the DINO baseline with a systematic empirical study of some important components. We first empirically compared different techniques for avoiding collapse (see Appx. C.1) and find that Sinkhorn-Knopp (SK) normalization is a more effective and also simpler technique for encouraging codebook usage than the centering operation used in DINO. We thus applied SK normalization, which enabled us to use a smaller teacher temperature \\(\\tau = 0.025\\) (instead of \\(\\tau = 0.07\\)) and a much smaller codebook size \\(c = 1024\\) or 4096 (instead of 65536). These modifications lead to similar performance as DINO with a much smaller codebook (up to 1M parameters, compared to 16M parameters for DINO). Next we empirically studied the effect of projection heads for different evaluation metrics (see Appx. C.2), and found that the design of" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.556, + 0.925 + ], + "angle": 0, + "content": "https://github.com/facebookresearch/dino" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.189 + ], + "angle": 0, + "content": "projection heads is crucial for few-shot evaluation metrics and an too power powerful projection head (e.g., the 3-layer MLP with a hidden dimension of 2048 used in DINO/MSN/etc.) could significantly hurt the few-shot performance. With an empirically study of projection head architectures, we found that a simply reducing the hidden dimension to 1024 could significantly improves the few-shot evaluation performance while maintaining full-data evaluation performance. The improved results of DINO* are shown in Table 5." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.203, + 0.273, + 0.216 + ], + "angle": 0, + "content": "B.1.2 MSN" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.234, + 0.825, + 0.279 + ], + "angle": 0, + "content": "Table 6: Reproducing & improving MSN. We implement \\(\\mathsf{MSN^{*}}\\) by adding ME-MAX regularization and masking to DINO*, which surpasses public MSN results. The evaluation results of MSN/MSN* ViT-S/16 trained with 800 epochs are reported." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.294, + 0.821, + 0.381 + ], + "angle": 0, + "content": "
Few-shotFull-data
125~13 (1%)k-NNLinear
MSN (Assran et al., 2022)47.1 ± 0.155.8 ± 0.662.8 ± 0.367.2-- / 76.9
MSN (Repro)39.1 ± 0.349.2 ± 0.358.4 ± 0.164.372.874.7 / 75.5
MSN* (Retuned)47.4 ± 0.156.3 ± 0.462.8 ± 0.267.173.375.6 / 76.6
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.385, + 0.825, + 0.442 + ], + "angle": 0, + "content": "We carefully implemented MSN by adding its main components, i.e., ME-MAX regularization and masking, to the DINO implementation (denoted as \\(\\mathrm{MSN}^*\\)), which surpassed public results as shown in Table 6. Note that the implementation of \\(\\mathrm{MSN}^*\\) does not exactly match the public implementation in the public MSN code\\(^2\\), where the main differences are:" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.453, + 0.827, + 0.496 + ], + "angle": 0, + "content": "- MSN applies ME-MAX with Sinkhorn-Knopp normalization by default (as in the released training configuration), which we empirically find does not work very well (see Table 9). \\(\\mathrm{MSN}^*\\) does not apply SK normalization and tunes the regularization strength for ME-MAX." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.5, + 0.827, + 0.543 + ], + "angle": 0, + "content": "- Some differences in implementation details, e.g., schedules for learning rate/weight decay, batch normalization in projection heads, specific data augmentations, etc. \\(\\mathrm{MSN}^*\\) uses the exact same setup as DINO\\* which follows original DINO implementation." + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.453, + 0.827, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.554, + 0.825, + 0.597 + ], + "angle": 0, + "content": "We initially tried to exactly reproduce the original MSN following the public MSN code, but the results are much below the public ones, as shown in Table 6. Incorporating the two differences above bridges the gap and makes \\(\\mathrm{MSN}^*\\) surpass the public results." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.613, + 0.381, + 0.627 + ], + "angle": 0, + "content": "B.2 PRETRAINING DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.639, + 0.827, + 0.669 + ], + "angle": 0, + "content": "In this subsection, we provide the general implementation details in Appx. B.2.1 and specific hyperparameters in Appx. B.2.2 in Appx. B.2.2 for reproducibility." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.682, + 0.424, + 0.696 + ], + "angle": 0, + "content": "B.2.1 IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.706, + 0.827, + 0.903 + ], + "angle": 0, + "content": "**Common setup** We experimented with DINO (Caron et al., 2021) and MSN (Assran et al., 2022) models on ImageNet ILSVRC-2012 dataset (Deng et al., 2009). We mainly followed the training setup in Caron et al. (2021). In particular, all models were trained with AdamW optimizer (Loshchilov & Hutter, 2018) and a batch size of 1024. The learning rate was linearly warmuped to 0.002 \\((= 0.001 \\times \\text{batch size} / 512)\\) and followed a cosine decay schedule. The weight decay followed a cosine schedule from 0.04 to 0.4. The momentum rate for the teacher was increased from 0.996 to 1 with a cosine schedule following BYOL (Grill et al., 2020). A stochastic depth (Huang et al., 2016) of 0.1 was applied without dropout (Srivastava et al., 2014). The student temperature \\(\\tau\\) is set to 0.1. As with DINO, we used the data augmentations of BYOL and multi-crop augmentation of SWAV (Caron et al., 2020). In particular, 2 global views with a \\(224 \\times 224\\) resolution and crop area range [0.25, 1.0] were generated for the teacher and student, and another 10 local views with \\(96 \\times 96\\) resolution and crop area range [0.08, 0.25] were used as extra augmented inputs for the student. For MSN, we used the exact same setup and incorporated its major component: 1) mean entropy maximization (ME-MAX) regularization; 2) masking as an extra augmentation applied to the student global view." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.548, + 0.925 + ], + "angle": 0, + "content": "\\(^{2}\\)https://github.com/facebookresearch/msn" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.244 + ], + "angle": 0, + "content": "Main modifications We retuned the baselines (DINO* and MSN*) as detailed in Appx. B.1, and the main adjustments are as followed. We used a 3-layer projection head with a hidden dimension of 1024. The output embedding (i.e., \\((h_{\\psi} \\circ r_{\\omega})(x)\\)) and the codes (i.e., \\(\\mu\\)) both have a dimension of 256 and are \\(L_{2}\\) normalized. For DINO*, Sinkhorn-Knopp (SK) normalization was applied to teacher predictions. For MSN*, ME-MAX was used without SK normalization and the regularization strength was tuned over \\(\\{3, 4, 5\\}\\). For all models, we used teacher temperature \\(\\tau = 0.025\\) which was linearly decayed from 0.05 for the first 30 epochs. The codebook size \\(c\\) was selected over \\(\\{1024, 4096\\}\\) for all models, and typically \\(c = 4096\\) was selected for baseline methods and \\(c = 1024\\) was selected for ours. For our \\((h_{\\psi}, \\mu)\\)-ensembles with ENT, entropy weighting temperature \\(\\gamma\\) is linearly decayed from 0.5 to the specified value." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.258, + 0.383, + 0.273 + ], + "angle": 0, + "content": "B.2.2 HYPER-PARAMETERS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.662, + 0.299 + ], + "angle": 0, + "content": "We report the hyperparameters for training our models for reproducibility:" + }, + { + "type": "table_caption", + "bbox": [ + 0.306, + 0.31, + 0.691, + 0.326 + ], + "angle": 0, + "content": "Table 7: Hyper-parameters for training the DINO* model." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.343, + 0.825, + 0.637 + ], + "angle": 0, + "content": "
Hyper-parameterViT-S/16ViT-B/16ViT-B/8
DINO*DINO*-PROB (16)DINO*-ENT (4/16)DINO*DINO*-ENT (16)DINO*DINO*-ENT (16)
training epoch800400300
batch size102410241024
learning rate2e-32e-32e-3
warmup epoch103010
min lr1e-51e-54e-5
weight decay0.04 → 0.40.04 → 0.40.04 → 0.4
stochastic depth0.10.10.1
gradient clip3.01.03.0
momentum0.996 → 1.00.996 → 1.00.996 → 1.0
# of multi-crops101010
masking ratio---
proj. layer333
proj. hidden dim102410241024
emb. dim d256256256
rep. dim384768768
codebook size c4096102410244096102440961024
student temp.0.10.10.1
teacher temp.0.0250.0250.025
te. temp. decay epoch303030
centerxxx
SK norm
ME-MAX weight---
ent. weight temp. γ--0.05-0.05-0.06
γ init.--0.5-0.5-0.5
γ decay epoch--30-30-30
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.652, + 0.399, + 0.667 + ], + "angle": 0, + "content": "B.3 EVALUATION PROTOCALS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.677, + 0.828, + 0.79 + ], + "angle": 0, + "content": "Few-shot linear evaluation We followed the few-shot evaluation protocol in Assran et al. (2022). Specifically, we used the 1-/2-/5-shot ImageNet dataset splits3 in Assran et al. (2022) and \\(1\\%\\) (\\(\\sim 13\\)-shot) ImageNet dataset splits4. For given labelled images, we took a single central crop of size \\(224 \\times 224\\) without additional data augmentations, and extracted the output [CLS] token representations from the frozen pretrained model. Then we trained a linear classifier with multi-class logistic regression on top of the extracted representations. We used the scikit-learn package (Pedregosa et al., 2011) for the logistic regression classifier. For all few-shot evaluations, we searched the \\(\\mathrm{L}_2\\) regularization strength over \\(\\{1\\mathrm{e}-4, 3\\mathrm{e}-4, 1\\mathrm{e}-3, 3\\mathrm{e}-3, 1\\mathrm{e}-2, 3\\mathrm{e}-2, 1\\mathrm{e}-1, 3\\mathrm{e}-1, 1, 3, 10\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.804, + 0.829, + 0.875 + ], + "angle": 0, + "content": "Full-data linear evaluation We followed the linear evaluation protocol in (Caron et al., 2021). Specifically, we trained a linear classifier on top of the representations extracted from the frozen pretrained model. The linear classifier is optimized by SGD with Nesterov momentum (Nesterov, 1983; Sutskever et al., 2013) of 0.9 and a batch size of 4096 for 100 epochs on the whole ImageNet dataset, following a cosine learning rate decay schedule. We did not apply any weight decay." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.884, + 0.671, + 0.898 + ], + "angle": 0, + "content": "3Publicly available at https://github.com/facebookresearch/msn" + }, + { + "type": "page_footnote", + "bbox": [ + 0.174, + 0.899, + 0.826, + 0.924 + ], + "angle": 0, + "content": "4Publicly available at https://github.com/google-research/simclr/tree/master/imagenet_subsets" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.884, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.31, + 0.102, + 0.687, + 0.117 + ], + "angle": 0, + "content": "Table 8: Hyper-parameters for training the \\( {\\mathrm{{MSN}}}^{ * } \\) model." + }, + { + "type": "table", + "bbox": [ + 0.216, + 0.133, + 0.785, + 0.455 + ], + "angle": 0, + "content": "
Hyper-parameterViT-S/16ViT-B/16ViT-B/8
DINO*MSN*-ENT (2/8)MSN*MSN*-ENT (8)MSN*MSN*-ENT (8)
training epoch800400300
batch size102410241024
learning rate2e-32e-32e-3
warmup epoch203020
min lr1e-54e-54e-5
weight decay0.04 → 0.40.04 → 0.40.04 → 0.4
stochastic depth0.10.10.1
gradient clip1.01.01.0
momentum0.996 → 1.00.996 → 1.00.996 → 1.0
# of multi-crops101010
masking ratio0.20.20.15
proj. layer333
proj. hidden dim102410241024
emb. dim d256256256
rep. dim384768768
codebook size c409610244096102440961024
student temp.0.10.10.1
teacher temp.0.0250.0250.025
te. temp. decay epoch303030
centerXXX
SK normXXX
ME-MAX weight4.04.04.0
ent. weight temp. γ-0.01-0.005-0.01
γ init.-0.5-0.5-0.5
γ decay epoch-30-30-30
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.475, + 0.828, + 0.614 + ], + "angle": 0, + "content": "During training, we only applied basic data augmentations including random resized crops of size \\(224 \\times 224\\) and horizontal flips. During testing, we took a single central crop of the same size. For ViT-S/16, Caron et al. (2021) found that concatenating the [CLS] token representations from the last \\(l\\) (specifically, \\(l = 4\\)) layers (c.f. Appendix F.2 in Caron et al. (2021)) improved the results by about 1 p.p. We followed the same procedure, but reported linear evaluation results with both \\(l = 1\\) and \\(l = 4\\) in Table 2 for consistency. In our empirical study with ViT-S/16, we used the result with \\(l = 1\\). For larger models (e.g., ViT-B/16), we followed Caron et al. (2021); Zhou et al. (2022) to use the concatenation of the [CLS] token representation and the average-pooled patch tokens from the last \\(l = 1\\) layer for linear evaluation. For all linear evaluations, we searched the base learning rate over \\(\\{4.8\\mathrm{e} - 3, 1.6\\mathrm{e} - 2, 4.8\\mathrm{e} - 2, 1.6\\mathrm{e} - 1, 4.8\\mathrm{e} - 1, 1.6\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.633, + 0.828, + 0.822 + ], + "angle": 0, + "content": "Full-data \\(k\\)-NN evaluation We followed the \\(k\\)-NN evaluation protocol in Caron et al. (2021); Wu et al. (2018). Specifically, for each image in the given dataset, we took a single central crop of size \\(224 \\times 224\\) without additional data augmentations, and extracted the output [CLS] token representations from the frozen pretrained model. The extracted representations are used for a weighted \\(k\\)-Nearest-Neighbor classifier. In particular, denote the stored training representations and labels as \\(\\mathcal{D} = \\{(z_i, y_i)\\}_{i=1}^N\\). For a test image with extracted representation \\(z\\), denote the set of its top \\(k\\)-NN training samples as \\(\\mathcal{D}_k[z] \\subseteq \\mathcal{D}\\) and \\(|\\mathcal{D}_k[z]| = k\\). The \\(k\\)-NN set \\(\\mathcal{D}_k[z]\\) is used to make the prediction for the test image with a weighted vote, i.e., \\(\\hat{y} = \\arg \\max_y \\left( \\sum_{(z_j, y_j) \\in \\mathcal{D}_k[z]} \\alpha_j \\mathbf{1}_{y=y_j} \\right)\\), where \\(\\mathbf{1}_{y=y_j}\\) is the one-hot vector corresponding to label \\(y_j\\) and \\(\\alpha_j\\) is the weight induced by the cosine similarity between \\(z\\) and \\(z_j\\), i.e., \\(\\alpha_j = \\exp \\left( \\frac{1}{\\tau'} \\frac{z^\\top z_j}{||z|| \\|z_j||} \\right)\\). We set \\(\\tau' = 0.07\\) without tuning as in Caron et al. (2021); Wu et al. (2018). For all \\(k\\)-NN evaluations, we searched \\(k\\) over \\(\\{5, 10, 20, 50, 100\\}\\) and found that \\(k = 10\\) or \\(k = 20\\) was consistently the best." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Transfer evaluation via linear probing We mainly followed the transfer evaluation protocol in (Grill et al., 2020; Chen et al., 2020a). In particular, we used 9 of their 13 datasets that are available in tensorflow-datasets (tfd), namely Food-101 (Bossard et al., 2014), CIFAR10 (Krizhevsky et al., 2009), CIFAR100 (Krizhevsky et al., 2009), SUN397 scene dataset (Xiao et al., 2010), Stanford Cars (Krause et al., 2013), Describable Textures Dataset (Cimpoi et al., 2014, DTD), Oxford-IIIT Pets (Parkhi et al., 2012), Caltech-101 (Fei-Fei et al., 2004), Oxford 102 Flowers (Nilsback & Zisserman," + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.825, + 0.158 + ], + "angle": 0, + "content": "Table 9: Empirical study of different techniques for avoiding collapse. Using Sinkhorn-Knopp normalization instead of centering for DINO leads to improved performance, and matches the original DINO even with a much smaller codebook. The ME-MAX regularization of MSN is very effective and leads to significant improvement for few-shot evaluations." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.177, + 0.821, + 0.272 + ], + "angle": 0, + "content": "
TechniqueFew-shotFull-data
CenterSinkhornME-MAX125~13 (1%)k-NNLinear
DINO37.8 ± 0.447.4 ± 0.356.9 ± 0.463.072.474.9
39.1 ± 0.349.4 ± 0.358.7 ± 0.264.874.176.0
MSN36.0 ± 0.446.6 ± 0.656.5 ± 0.263.273.275.2
43.9 ± 0.253.0 ± 0.361.1 ± 0.266.074.075.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.275, + 0.277, + 0.722, + 0.293 + ], + "angle": 0, + "content": "Table 10: ME-MAX regularization is sensitive to hyper-parameters." + }, + { + "type": "table", + "bbox": [ + 0.248, + 0.309, + 0.751, + 0.395 + ], + "angle": 0, + "content": "
WeightFew-shotFull-data
125~13 (1%)KNNLinear
1.037.6 ± 0.248.0 ± 0.457.7 ± 0.264.073.575.6
3.043.9 ± 0.253.0 ± 0.361.1 ± 0.266.074.075.8
5.043.6 ± 0.252.6 ± 0.460.4 ± 0.165.573.975.6
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.412, + 0.827, + 0.552 + ], + "angle": 0, + "content": "2008). Following their evaluation metrics, we reported mean per-class accuracy for Oxford-IIIT Pets, Caltech-101, and Oxford 102 Flowers datasets and reported top-1 accuracy for other datasets. We transferred the models pretrained on ImageNet (Deng et al., 2009) to these datasets by training a linear classifier on top of frozen representations. In particular, we resized given images to \\(256 \\times 256\\) and took a single central crop of size \\(224 \\times 224\\) without additional data augmentations. We extracted the output [CLS] token representations from the frozen pretrained model. Then we trained a linear classifier with multi-class logistic regression on top of the extracted representations. We used the scikit-learn package (Pedregosa et al., 2011) for the logistic regression classifier. For all transfer evaluations, we searched the \\(\\mathbf{L}_2\\) regularization strength over \\(\\{1e - 6, 1e - 5, 1e - 4, 3e - 4, 1e - 3, 3e - 3, 1e - 2, 3e - 2, 1e - 1, 3, 1e, 3e, 1e2, 1e3, 1e4, 1e5\\}\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.572, + 0.401, + 0.587 + ], + "angle": 0, + "content": "C ADDITIONAL RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.606, + 0.647, + 0.62 + ], + "angle": 0, + "content": "C.1 EMPIRICAL STUDY OF TECHNIQUES FOR AVOIDING COLLAPSE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.631, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Most self-supervised learning methods utilize some techniques to avoid collapse of representations with, e.g., contrastive loss (Chen et al., 2020a; He et al., 2020), batch normalization (Grill et al., 2020), asymmetric architecture design with a predictor (Grill et al., 2020; Chen & He, 2021), etc. In DINO and MSN, a learnable codebook is used for the learning objective and different techniques are applied to encourage the effective codebook usage. There are two potential cases of collapse (as discussed in Caron et al. (2021)):" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.727, + 0.825, + 0.81 + ], + "angle": 0, + "content": "- Dominating codes. This is the case of \"winner-take-all\": only a small portion of codes are being predicted while others are inactive. Typical solutions for avoiding this include applying Sinkhorn-Knopp normalization (Cuturei, 2013) as in SWaV (Caron et al., 2020), centering teacher logits as in DINO (Caron et al., 2021), and applying mean-entropy maximization regularization (ME-MAX) as in MSN (Assran et al., 2022). Note that in MSN, ME-MAX is combined with Sinkhorn-Knopp normalization by default." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.814, + 0.825, + 0.858 + ], + "angle": 0, + "content": "- Uniform codes. This is the case where all codes are treated equally and the predictions reduce to be uniform over codes. A simple and effective solution is to applying sharpening, i.e., using a lower temperature for computing the teacher prediction." + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.727, + 0.825, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We systematically study different techniques in a unified setup. In particular, we used DINO with the ViT-S backbone, a 3-layer MLP projection head with hidden dimension 2048, and a codebook of size 4096 and dimension 256. We applied different techniques to DINO and searched the teacher temperature in \\(\\{0.0125, 0.025, 0.05\\}\\) for each. For ME-MAX, we searched regularization weight in" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.105, + 0.495, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.265, + 0.386, + 0.279 + ], + "angle": 0, + "content": "(a) Merged" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.104, + 0.791, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.617, + 0.265, + 0.677, + 0.278 + ], + "angle": 0, + "content": "(b) 1-shot" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.282, + 0.49, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.305, + 0.442, + 0.378, + 0.456 + ], + "angle": 0, + "content": "(c) \\(1\\%\\) -data" + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.282, + 0.8, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.442, + 0.696, + 0.456 + ], + "angle": 0, + "content": "(d) Full-data" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.467, + 0.828, + 0.553 + ], + "angle": 0, + "content": "Figure 5: Effect of projection heads for different evaluation metrics. We compare non-ensemble projection heads with different depths and widths as well as our \\((h_{\\psi},\\mu)\\)-ensembles, and evaluate linear evaluation performance with different amount of labeled data. (a) shows the comparison of normalized metrics for non-ensembles. (b)-(d) compares non-ensemble and \\((h_{\\psi},\\mu)\\)-ensembles by unnormalized metrics. 'Default' denotes the default projection heads used in many SSL methods. See analysis in Appx. C.2 for details." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.578, + 0.825, + 0.608 + ], + "angle": 0, + "content": "\\(\\{1.0, 3.0, 5.0\\}\\). For ME-MAX combined with Sinkhorn, we followed Assran et al. (2022) and used default regularization weight of 1.0. The results are in Table 10. We observed that:" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.619, + 0.825, + 0.689 + ], + "angle": 0, + "content": "- DINO's centering operation is not as strong as other techniques, and it favours a larger teacher temperature (e.g., 0.05). It does not work well when the codebook size (4096) is not as large as the one used in the original DINO model (65536). Switching to use Sinkhorn-Knopp normalization leads to much more improved performance, and matches the performance of original DINO (Table 5) with a much smaller codebook." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.693, + 0.827, + 0.763 + ], + "angle": 0, + "content": "- MSN's ME-MAX regularization is very effective, and leads to significant improvements over others. We also found it is sensitive to the regularization weight and teacher temperature (c.f. Table 10). However, we observed that combining ME-MAX with Sinkhorn does not work well without tuning the regularization weight (which is recommended by Assran et al. (2022))." + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.619, + 0.827, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.78, + 0.519, + 0.794 + ], + "angle": 0, + "content": "C.2 EMPIRICAL STUDY OF PROJECTION HEADS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.825, + 0.876 + ], + "angle": 0, + "content": "In this subsection, we systematically study the effect of projection heads for different evaluation metrics. In particular, we used DINO* ViT-S/16 as the base model and used different projection heads with (depth, width) searched over \\(\\{2,3,4\\} \\times \\{512,1024,2048,4096\\}\\). All models are trained with 300 epochs using exact the same set of hyper-parameters. We measured the linear evaluation performance with different amount of labeled data (i.e., full-data, \\(1\\%\\) data, 1-shot)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In Fig. 5a, we plot different evaluation metrics (normalized respectively by the best of each) versus the number of projection head parameters. In Figs. 5b to 5d, we plot each unnormalized evaluation metric respectively for different heads as well as our \\((h_{\\psi}, \\mu)\\)-ensembles. Our key findings are:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.104, + 0.652, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.286, + 0.825, + 0.342 + ], + "angle": 0, + "content": "Figure 6: Effect of teacher temperature for non-ensemble DINO*. DINO* with a lower temperature can achieve better few-shot performance, but still under-performs our ensemble method (DINO*-ENT with 16 heads, orange lines). DINO* ViT-S/16 trained for 300 epochs is used and \\(\\tau = 0.025\\) is used for DINO*-ENT." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.372, + 0.825, + 0.469 + ], + "angle": 0, + "content": "- The projection head has a relatively larger impact on few-shot evaluation metrics, as reflected by the relative magnitudes of different metrics in Fig. 5a. An too powerful non-ensemble projection head significantly hurts the label efficiency of learned representations, reflected by a much larger drop in few-shot evaluation performance (up to 18 p.p. for 1-shot, 9 p.p. for \\(1\\%\\) data). This result is also partially observed in Chen et al. (2020b), where they found that probing from intermediate layers of projection heads (which can be viewed as using a shallower head) could improve the semi-supervised learning (\\(1\\% - 10\\%\\)) results." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.476, + 0.825, + 0.572 + ], + "angle": 0, + "content": "- The optimal projection head for different metrics can differ a lot. A weaker head improves label efficiency (few-shot performance), while a stronger (but not too strong) head improves linear decodability. As a result, the default projection head (3/2048) that is widely used in SimCLR v2 (Chen et al., 2020b), DINO (Caron et al., 2021), iBOT (Zhou et al., 2022), MSN (Assran et al., 2022), etc., does not perform well in few-shot evaluations (as shown by the green cross denoted as 'Default'), probably because it is selected by full-data evaluation metrics." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.581, + 0.825, + 0.607 + ], + "angle": 0, + "content": "- There exist some projection heads that performs decently well on all evaluation metrics, e.g., the baseline model (3/1024) used in our experiments (pink star denoted as 'Our base')." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.615, + 0.825, + 0.684 + ], + "angle": 0, + "content": "- Compared to naively tuning projection head architectures, our \\((h_{\\psi}, \\mu)\\)-ensembles (orange curves in Figs. 5b to 5d) consistently improve all metrics with different amount of labeled data, despite it also increases the number of parameters in projection heads. Our \\((h_{\\psi}, \\mu)\\)-ensembles outperform all non-ensembles, which also include the counterparts of probing from intermediate layers from the a deeper head (i.e., shallower heads)." + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.372, + 0.825, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.704, + 0.515, + 0.719 + ], + "angle": 0, + "content": "C.3 EMPIRICAL STUDY OF \\((h_{\\psi},\\mu)\\)-ENSEMBLES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.827, + 0.911 + ], + "angle": 0, + "content": "Are the gains of ENT purely from sharper teacher predictions? Our ENT strategy assigns higher weights to the heads that predict with lower entropies, thus effectively uses sharper teacher predictions as the targets. One may be curious about how this effect accounts for the gains of the ENT strategy. We empirically answer this question by studying the non-ensemble baseline that uses a sharper teacher predictions in a data-independent manner (in contrast to ENT, which uses data-dependent entropy weights). Specifically, we compare the non-ensemble DINO* that use different teacher temperature \\(\\tau \\in \\{0.005, 0.01, 0.025, 0.05\\}\\) and also our DINO*-ENT (16) with \\(\\tau = 0.025\\), as shown in Fig. 6. We find that the teacher temperature has a big impact on evaluation results especially for few-shot evaluation. Compared to our default baseline that uses \\(\\tau = 0.025\\), a lower temperature (e.g., \\(\\tau = 0.01\\)) can indeed improve the 1-shot performance (at the cost of worse full-data performance). However, an too low temperature (\\(\\tau = 0.005\\)) will hurt the performance. Our DINO*-ENT (16) consistently outperform all the baselines, which implies the importance of selecting sharper teacher predictions in a data-dependent manner." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.828, + 0.173 + ], + "angle": 0, + "content": "Table 11: Full table of Table 1 including all metrics for comparing different ensemble strategies. ENT and PROB significantly improves over the non-ensemble baseline, while UNIF leads to no gains. Ensembling the whole projection head works the best. All models are DINO* ViT-S/16 trained for 300 epochs. The means and standard deviations over 3 initialization seeds for all evaluation results are reported." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.19, + 0.825, + 0.346 + ], + "angle": 0, + "content": "
HowWhereFew-shotFull-data
Proj. HeadCodebook125~13 (1%)k-NNLinear
Base40.6 ± 0.249.8 ± 0.257.9 ± 0.363.4 ± 0.272.3 ± 0.174.4 ± 0.1
UNIF40.4 ± 0.449.5 ± 0.457.6 ± 0.363.3 ± 0.372.2 ± 0.274.5 ± 0.2
PROB39.7 ± 0.549.0 ± 0.557.4 ± 0.463.0 ± 0.472.8 ± 0.274.8 ± 0.1
PROB41.9 ± 0.351.5 ± 0.559.6 ± 0.465.1 ± 0.373.7 ± 0.375.4 ± 0.1
ENT40.6 ± 0.449.5 ± 0.658.0 ± 0.463.5 ± 0.472.1 ± 0.374.5 ± 0.3
ENT43.0 ± 0.652.2 ± 0.859.7 ± 0.764.8 ± 0.572.9 ± 0.675.1 ± 0.4
ENT44.0 ± 0.253.0 ± 0.560.5 ± 0.365.5 ± 0.173.2 ± 0.175.3 ± 0.1
ENT-ST40.0 ± 0.539.2 ± 0.657.3 ± 0.562.7 ± 0.571.9 ± 0.474.0 ± 0.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.351, + 0.825, + 0.395 + ], + "angle": 0, + "content": "Table 12: Comparison of different varants of PROB. The PROB strategy used in our experiments performs the best. ' -' in the table denotes training divergence for PROB-MAX. The experimental setup is the same as Table 11." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.413, + 0.822, + 0.513 + ], + "angle": 0, + "content": "
HowWhereFew-shotFull-data
Weight byTemp. γ125~13 (1%)k-NNLinear
Base40.6 ± 0.249.8 ± 0.257.9 ± 0.363.4 ± 0.272.3 ± 0.174.4 ± 0.1
PROBstudent141.9 ± 0.351.5 ± 0.559.6 ± 0.465.1 ± 0.373.7 ± 0.375.4 ± 0.1
PROB-TEteacher141.5 ± 0.250.4 ± 0.358.3 ± 0.363.7 ± 0.172.3 ± 0.274.6 ± 0.1
PROB-MAXstudent0------
PROB-MAX-TEteacher041.4 ± 0.250.3 ± 0.358.1 ± 0.363.6 ± 0.272.3 ± 0.274.5 ± 0.2
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.825, + 0.561 + ], + "angle": 0, + "content": "Comparison of different ensemble strategies and variants We present the full table of Table 1 that includes all the metrics in Table 11. The same observation holds for all metrics." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.568, + 0.827, + 0.611 + ], + "angle": 0, + "content": "For all previous studies, we considered a specific instantiation of PROB strategy, i.e., weight by student predicted probabilities \\( f_{ijy} = \\log s(y|\\theta_j,x) \\) and \\( \\gamma = 1 \\), which has a nice interpretation of model average (see Sec. 3.3). We also studied different variants of the PROB strategy (see Appx. D.1)," + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.623, + 0.625, + 0.64 + ], + "angle": 0, + "content": "PROB-TE: weight by teacher \\( f_{ijy} = \\log t_i(y|x) \\) and \\( \\gamma = 1 \\)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.648, + 0.661 + ], + "angle": 0, + "content": "PROB-MAX: weight by student \\( f_{ijy} = \\log s_j(y|x) \\) and \\( \\gamma \\rightarrow 0 \\)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.665, + 0.664, + 0.682 + ], + "angle": 0, + "content": "PROB-MAX-TE: weight by teacher \\( f_{ijy} = \\log t_i(y|x) \\) and \\( \\gamma \\to 0 \\)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.623, + 0.664, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.693, + 0.827, + 0.82 + ], + "angle": 0, + "content": "Table 12 compares the downstream performance for all the variants. We find that the our PROB (used in our empirical studies) performs better than other variants. Interestingly, weighting by the teacher (PROB-TE) performs worse than PROB. We conjecture that this is because the important weights turn out to give a weighted average of teacher predictions as the surrogate target that is shared across all students (like PROB) but does not give effective preferential treatment across students which are directly optimized (unlike PROB-TE). Furthermore, PROB-MAX which sharpens the importance weights leads to training divergence. This is probably because the student predictions have higher variance based on which sharp weights lead to unstable training. In contrast, PROB-MAX-TE which uses the (lower-variance) teacher gives reasonable results and comparable to PROB-TE." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.836, + 0.828, + 0.88 + ], + "angle": 0, + "content": "Number of ensembles for \\(\\mathbf{MSN}^*\\) In Fig. 7a, we study the effect of increasing the number of \\((h_{\\psi},\\mu)\\)-ensembles for \\(\\mathbf{MSN}^*\\)-ENT with ViT-S/16 trained for 800 epochs. The scaling trend is similar to DINO\\*-ENT (Fig. 3a) and the gains start to diminish when the number of heads increases above 8." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Effect of ENT temperature \\(\\gamma\\) for \\(\\mathbf{MSN}^*\\) Fig. 7b studies the effect of entropy weighting temperature \\(\\gamma\\) for \\(\\mathbf{MSN}^*\\)-ENT. We observed that \\(\\mathbf{MSN}^*\\) is more robust to small temperatures, and the" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.105, + 0.49, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.277, + 0.267, + 0.426, + 0.281 + ], + "angle": 0, + "content": "(a) Scaling of ensembles" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.103, + 0.784, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.267, + 0.742, + 0.281 + ], + "angle": 0, + "content": "(b) Effect of ENT temperature \\(\\gamma\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.292, + 0.828, + 0.336 + ], + "angle": 0, + "content": "Figure 7: Empirical study for \\(\\mathbf{MSN}^*\\) -ENT. (a) The gains by increasing the number of \\((h_{\\psi},\\mu)\\) ensembles start to diminish when it is over 8 heads. (b) \\(\\mathbf{MSN}^*\\) prefers smaller temperature for entropy weighting than DINO*." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.415, + 0.825, + 0.446 + ], + "angle": 0, + "content": "best \\(\\gamma = 0.01\\) is smaller than that of DINO\\* \\((\\gamma = 0.05)\\). When the temperature is too high, the performance drops as a result of under-specialization (i.e., less diversity) as with DINO\\*." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.52, + 0.519, + 0.535 + ], + "angle": 0, + "content": "C.4 ANALYZING \\((h_{\\psi},\\mu)\\) -ENSEMBLE DIVERSITY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.568, + 0.825, + 0.626 + ], + "angle": 0, + "content": "Visualizing \\((h_{\\psi},\\mu)\\)-ensemble similarity We analyze the diversity between different heads by visualizing the similarity matrix between their codes. Directly measuring the similarity between codes in two heads could not work, because 1) they may live in different subspaces because of the ensembled projection heads; 2) they may not align in the natural order but in a permuted order." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.631, + 0.827, + 0.794 + ], + "angle": 0, + "content": "Therefore, we seek to align codes between different heads by how they are effectively used to 'cluster' the data. In particular, we use a set of randomly sampled inputs \\(\\{x^i\\}_{i\\in [b]}\\) of size \\(b = 51200\\) to obtain an empirical code assignment matrix \\(A^{j}\\in \\mathbb{R}^{b\\times c}\\) for each \\((h_{\\psi},\\mu)\\)-ensemble \\(j\\in [m]\\), where the \\(i\\)-th row of \\(A^j\\) corresponds to the teacher predictions \\(t_j(Y|x^i)\\). For the \\(k\\)-th code in the head \\(j\\), we extract the \\(k\\)-th column from \\(A^j\\) (i.e., its empirical assignment) as its embedding. For two codes, we measure their similarity by the cosine similarity between their embeddings. For a pair of heads \\(j\\) and \\(j'\\), we align their codes using the Hungarian algorithm (Kuhn, 1955) to maximize the sum of cosine similarity. After that, we plot the similarity matrix which is aligned and reordered by the similarity value on the diagonal (in an descending order). Note that it is not necessary to do the alignment procedure for the PROB strategy since it is naturally aligned because of the direct distribution averaging over \\((h_{\\psi},\\mu)\\)-ensembles, but we did for fair comparison with other strategies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We applied the same procedure for different ensemble weighting strategies using DINO* with 4 \\((h_{\\psi},\\mu)\\)-ensembles. We randomly picked a pair of heads and visualize the similarity matrix before (top row) and after (bottom row) the alignment-reordering setup in Fig. 8. We found that before the alignment procedure, the similarity matrix of the PROB strategy already mostly aligns because it explicitly introduces code correspondence between different heads. Furthermore, by analyzing the similarity decay pattern on the diagonal, it is clear that ENT learns the most diverse \\((h_{\\psi},\\mu)\\)-ensembles while UNIF learns the least ones, which may explain the difference of their empirical performance. For completeness, we also include the visualization of aligned similarity matrices for all pairs of heads in Figs. 9 to 11, the observations are the same." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.118, + 0.38, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.27, + 0.383, + 0.326, + 0.397 + ], + "angle": 0, + "content": "(a) UNIF" + }, + { + "type": "image", + "bbox": [ + 0.403, + 0.118, + 0.565, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.455, + 0.383, + 0.514, + 0.397 + ], + "angle": 0, + "content": "(b) PROB" + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.119, + 0.782, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.66, + 0.383, + 0.71, + 0.397 + ], + "angle": 0, + "content": "(c)ENT" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.408, + 0.828, + 0.48 + ], + "angle": 0, + "content": "Figure 8: Visualization of \\((h_{\\psi},\\mu)\\)-ensemble diversity. ENT learns the most diverse \\((h_{\\psi},\\mu)\\)-ensembles while UNIF learns the least ones. We visualize the code similarity matrix between a pair of randomly selected projection heads. Top row shows the original similarity matrix (i.e., in natural order) and the bottom row shows the aligned similarity matrix which aligns codes by empirical assignment probabilities. DINO* ViT-S/16 with 4 heads is used. Best viewed in color." + }, + { + "type": "image", + "bbox": [ + 0.233, + 0.507, + 0.402, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.41, + 0.508, + 0.578, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.508, + 0.792, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.689, + 0.4, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.689, + 0.577, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.689, + 0.754, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.836, + 0.828, + 0.879 + ], + "angle": 0, + "content": "Figure 9: Visualization of \\((h_{\\psi},\\mu)\\)-ensemble diversity between all pairs of heads for DINO*UNIF. The UNIF strategy does not learn diverse \\((h_{\\psi},\\mu)\\)-ensembles. DINO* with ViT-S/16 and 4 heads is used. Best viewed in color." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.143, + 0.402, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.304, + 0.364, + 0.316 + ], + "angle": 0, + "content": "Head 2-Head 3" + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.143, + 0.578, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.446, + 0.304, + 0.542, + 0.316 + ], + "angle": 0, + "content": "Head 2 - Head 4" + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.143, + 0.792, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.624, + 0.304, + 0.719, + 0.316 + ], + "angle": 0, + "content": "Head 3 - Head 4" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.325, + 0.401, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.325, + 0.578, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.325, + 0.756, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.472, + 0.828, + 0.515 + ], + "angle": 0, + "content": "Figure 10: Visualization of \\((h_{\\psi},\\mu)\\)-ensemble diversity between all pairs of heads for DINO\\*PROB. The PROB strategy learns more diverse \\((h_{\\psi},\\mu)\\)-ensembles than UNIF. DINO\\* with ViT-S/16 and 4 heads is used. Best viewed in color." + }, + { + "type": "image", + "bbox": [ + 0.233, + 0.542, + 0.401, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.704, + 0.363, + 0.716 + ], + "angle": 0, + "content": "Head 2 - Head 3" + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.543, + 0.578, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.447, + 0.704, + 0.541, + 0.716 + ], + "angle": 0, + "content": "Head 2 - Head 4" + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.543, + 0.792, + 0.696 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.624, + 0.704, + 0.718, + 0.716 + ], + "angle": 0, + "content": "Head 3 - Head 4" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.726, + 0.401, + 0.854 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.726, + 0.578, + 0.854 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.726, + 0.756, + 0.854 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.872, + 0.828, + 0.915 + ], + "angle": 0, + "content": "Figure 11: Visualization of \\((h_{\\psi},\\mu)\\)-ensemble diversity between all pairs of heads for DINO*-ENT. The ENT strategy learns the most diverse \\((h_{\\psi},\\mu)\\)-ensembles. DINO* with ViT-S/16 and 4 heads is used. Best viewed in color." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.298, + 0.119 + ], + "angle": 0, + "content": "D ANALYSIS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.136, + 0.317, + 0.15 + ], + "angle": 0, + "content": "D.1 DERIVATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.162, + 0.825, + 0.19 + ], + "angle": 0, + "content": "In this subsection, we provide derivations for some non-trivial losses that we explore within our framework." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.198, + 0.557, + 0.213 + ], + "angle": 0, + "content": "Recall that our weighted cross-entropy loss is of the form," + }, + { + "type": "equation", + "bbox": [ + 0.323, + 0.22, + 0.825, + 0.298 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {n} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} H ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] (15) \\\\ = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) (16) \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.301, + 0.825, + 0.328 + ], + "angle": 0, + "content": "\\[\nw _ {i j y} = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\gamma} f _ {i j y} (\\operatorname {s t o p g r a d} (\\theta), x): i, j \\in [ m ] \\right\\}\\right). \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.341, + 0.352, + 0.355 + ], + "angle": 0, + "content": "Furthermore, observe that," + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.363, + 0.825, + 0.401 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} \\sum_ {i, j \\in [ m ]} \\mathsf {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] = \\sum_ {i, j \\in [ m ]} \\int_ {\\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\nabla_ {\\theta} \\log s (y | \\theta_ {j}, x) \\mathrm {d} y. \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.408, + 0.825, + 0.438 + ], + "angle": 0, + "content": "This indicates that the proposed weighted ensemble SSL loss is simply a reweighted log-likelihood loss. We use this fact in our derivation of probability weighting (PROB) loss." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.825, + 0.484 + ], + "angle": 0, + "content": "Uniform weighting (UNIF) Our UNIF strategy in Eq. (6) uses \\( f_{ijy} = \\log \\delta (i - j) \\) which gives \\( w_{ijy} = \\frac{1}{m}\\delta (i - j) \\) (for any choice of \\( \\gamma \\)), thus the loss," + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.49, + 0.825, + 0.57 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} \\frac {1}{m} \\delta (i - j) t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) (19) \\\\ = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right] (20) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.577, + 0.643, + 0.592 + ], + "angle": 0, + "content": "This loss assigns equal weights to \\( m \\) pairs of pairwised student/teacher." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.598, + 0.827, + 0.629 + ], + "angle": 0, + "content": "An straightforward generalization is to assign equal weights to all possible pairs \\((m^2)\\) of student/teacher with \\(f_{ijy} = 0\\) and \\(w_{ijy} = \\frac{1}{m^2}\\), which gives the UNIF-ALL loss," + }, + { + "type": "equation", + "bbox": [ + 0.296, + 0.635, + 0.825, + 0.673 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\text {U N I F - A L L}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m ^ {2}} \\sum_ {i, j \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right], \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.688, + 0.696, + 0.704 + ], + "angle": 0, + "content": "Probability weighting (PROB) Recall our PROB loss in Eq. (7) has the form," + }, + { + "type": "equation", + "bbox": [ + 0.281, + 0.711, + 0.825, + 0.76 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\mathrm {H} ^ {\\times} \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right]. \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.767, + 0.825, + 0.796 + ], + "angle": 0, + "content": "We derive its equivalence with our general loss with \\( f_{ijy} = \\log s(y|\\theta_j,x) \\) and \\( \\gamma = 1 \\) in terms of the gradients," + }, + { + "type": "equation", + "bbox": [ + 0.249, + 0.803, + 0.825, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\nabla_ {\\theta} \\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\log \\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j}, x) \\mathrm {d} y (23) \\\\ = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\nabla_ {\\theta} \\log \\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j}, x) d y (24) \\\\ = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {\\frac {1}{m} \\sum_ {j \\in [ m ]} \\nabla_ {\\theta} s (y | \\theta_ {j} , x)}{\\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j} , x)} d y (25) \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.101, + 0.825, + 0.226 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {\\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j} , x) \\nabla_ {\\theta} \\log s (y | \\theta_ {j} , x)}{\\frac {1}{m} \\sum_ {j ^ {\\prime} \\in [ m ]} s (y | \\theta_ {j ^ {\\prime}} , x)} d y (26) \\\\ = \\frac {1}{m} \\sum_ {i, j \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {s (y | \\theta_ {j} , x)}{\\sum_ {j ^ {\\prime} \\in [ m ]} s (y | \\theta_ {j ^ {\\prime}} , x)} \\nabla_ {\\theta} \\log s (y | \\theta_ {j}, x) d y (27) \\\\ = \\nabla_ {\\theta} \\frac {1}{m} \\sum_ {i, j \\in [ m ]} \\mathsf {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] (28) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.238, + 0.827, + 0.317 + ], + "angle": 0, + "content": "where \\( w_{ijy} = \\frac{s(y|\\theta_j,x)}{\\sum_{j'\\in[m]}s(y|\\theta_{j'}x)} \\) (or equivalently, \\( f_{ijy} = \\log s(y|\\theta_j,x) \\) and \\( \\gamma = 1 \\)). The last equality is because \\( w_{ijy} \\) is stopped gradient with respect to \\( \\theta \\). This is the same analysis as done in Burda et al. (2016). The above formation establishes the equivalence of gradients between two losses, which implies the same behavior (e.g., optimum) using gradient-based optimization, as the common practice of deep learning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.322, + 0.825, + 0.372 + ], + "angle": 0, + "content": "We also generalize this loss to some variants which we explore in Table 12. A \"dual\" variant is to use teacher predictions \\( f_{ijy} = \\log t_i(y|x) \\) instead of student ones; this implies \\( w_{ijy} = \\frac{t_i(y|x)}{\\sum_{i' \\in [m]} t_{i'}(y|x)} \\) and the PROB-TE loss," + }, + { + "type": "equation", + "bbox": [ + 0.256, + 0.38, + 0.825, + 0.42 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B - T E}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} \\frac {t _ {i} (y | x)}{\\sum_ {i ^ {\\prime} \\in [ m ]} t _ {i ^ {\\prime}} (y | x)} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x). \\tag {29}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.825, + 0.466 + ], + "angle": 0, + "content": "Note that this simply reduces to use a weighted teacher predictions \\(\\frac{t_i(y|x)}{\\sum_{i' \\in [m]} t_{i'}(y|x)} t_i(y|x)\\) as the surrogate target that is shared across all students." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.472, + 0.825, + 0.502 + ], + "angle": 0, + "content": "Another generalization is to use \"hard\" weighting, i.e., \\(\\gamma \\rightarrow 0\\), which gives the PROB-MAX loss that only assigns weight to the most confident student," + }, + { + "type": "equation", + "bbox": [ + 0.259, + 0.51, + 0.825, + 0.548 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B - M A X}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) \\tag {30}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.55, + 0.825, + 0.574 + ], + "angle": 0, + "content": "\\[\nw _ {i j y} = \\delta \\left(i - i ^ {*}\\right) \\delta \\left(j - j ^ {*}\\right), \\quad \\left(i ^ {*}, j ^ {*}\\right) = \\arg \\max _ {i j} f _ {i j y}, \\forall y \\in \\mathcal {Y}. \\tag {31}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.583, + 0.825, + 0.626 + ], + "angle": 0, + "content": "This loss reduces to a generalization of multiple choice learning (Guzman-Rivera et al., 2012) used in multi-headed networks (Lee et al., 2015) in our ensemble SSL setup. Similarly we can also derive the dual variant of it that uses the teacher predictions, which is omitted here for brevity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.642, + 0.825, + 0.689 + ], + "angle": 0, + "content": "Entropy weighting (ENT) The derivation of ENT loss in Eq. (9) is similar to the UNIF loss but applies an entropy weights. Recall that we use \\( f_{ijy} = -\\mathsf{H}[t_i(Y|x)] + \\log \\delta (i - j) \\), which gives \\( w_{ijy} = \\mathrm{softmax}_i(\\{-\\frac{1}{\\gamma}\\mathsf{H}[t_{i'}(Y|x)]:i'\\in [m]\\}) \\) and," + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.698, + 0.825, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {E N T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} \\left[ t _ {i ^ {\\prime}} (Y | x) : i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]\\right). \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.825, + 0.777 + ], + "angle": 0, + "content": "One can also generalize it to its dual variant which uses the student entopies, i.e., \\( f_{ijy} = -\\mathsf{H}[s(Y|\\theta_j,x)] + \\log \\delta (i - j) \\), which gives the ENT-ST loss," + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.784, + 0.825, + 0.837 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {E N T - S T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} [ s (Y | \\theta_ {i ^ {\\prime}}, x) ]: i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]. \\tag {33}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.856, + 0.392, + 0.87 + ], + "angle": 0, + "content": "D.2 RELATING SOME LOSSES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Here, we relate some losses derived above. Specifically, we relate the uniform weighting (UNIF, UNIF-ALL) and probability weighting (PROB) in Appx. D.2.1, and relate entropy weighting (ENT) and variance weighting in Appx. D.2.2." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.509, + 0.119 + ], + "angle": 0, + "content": "D.2.1 UNIFORM & PROBABILITY WEIGHTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.128, + 0.827, + 0.213 + ], + "angle": 0, + "content": "We first establish the relation between UNIF and PROB using the joint convexity of unnormalized KL divergence and the fact that our weighted cross-entropy loss is a weighted unnormalized KL divergence up to some constant in \\(\\theta\\). In particular, the joint convexity of unnormalized KL divergence can be shown by combining the facts that Csiszár \\(f\\)-divergences are jointly convex (Proposition 1 in Dragomir (2013)) and unnormalized KL divergence corresponds to the convex generator, \\(f(u) = u\\log u - u + 1\\), as required by the proposition." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.219, + 0.8, + 0.234 + ], + "angle": 0, + "content": "First, our weighted cross-entropy loss is unnormalized KL divergence up to some constant in \\(\\theta\\):" + }, + { + "type": "equation", + "bbox": [ + 0.254, + 0.24, + 0.825, + 0.279 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {K} \\left[ t _ {i} (Y | x), s \\left(Y \\mid \\theta_ {i}, x\\right) \\right] + \\text {c o n s t a n t} \\tag {34}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.254, + 0.282, + 0.825, + 0.331 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} K \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right] + \\text {c o n s t a n t} \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.337, + 0.825, + 0.366 + ], + "angle": 0, + "content": "Therefore, the joint convexity of (unnormized) KL divergence directly implies an ordering of the loss up to some constant in \\(\\theta\\), i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.447, + 0.372, + 0.825, + 0.39 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} \\leq \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.827, + 0.439 + ], + "angle": 0, + "content": "Furthermore, we can also relate PROB and UNIF-ALL using the fact that the (unnormized) cross-entropy \\(\\mathsf{H}^{\\times}[p(X), q(X)]\\) is linear in the first argument \\(p\\) but convex in the second argument \\(q\\), which implies," + }, + { + "type": "equation", + "bbox": [ + 0.434, + 0.446, + 0.825, + 0.465 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {n} ^ {\\text {P R O B}} \\leq \\mathcal {L} _ {n} ^ {\\text {U N I F - A L L}} \\tag {37}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.478, + 0.486, + 0.492 + ], + "angle": 0, + "content": "D.2.2 ENTROPY & VARIANCE WEIGHTING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.741, + 0.519 + ], + "angle": 0, + "content": "Suppose \\(p(X)\\) is a discrete distribution (normalized) on \\(\\mathcal{X} = [c]\\). It can be shown that," + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.524, + 0.825, + 0.543 + ], + "angle": 0, + "content": "\\[\n\\mathsf {H} [ p (X) ] \\leq \\frac {1}{2} \\log \\left(\\operatorname {V a r} _ {p} [ X ] + \\frac {1}{1 2}\\right) + \\frac {1}{2} \\log (2 \\pi e) \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.827, + 0.631 + ], + "angle": 0, + "content": "where \\(\\operatorname{Var}_p[X] = \\sum_{x \\in [c]} p(x)(x - \\mu)^2\\) and \\(\\mu = \\mathsf{E}_p[X] = \\sum_{x \\in [c]} p(x)x\\) (Theorem 9.7.1, Cover & Thomas (1999)). Note, a tighter bound (Mow, 1998) also exists but it places stronger restrictions on \\(p\\). This relationship suggests that choosing weights proportional to \\(\\exp(-\\mathsf{H}[t_i(Y|x)])\\) (as in ENT) is potentially related to choosing weights proportional to weighting by variance \\((\\operatorname{Var}_{t_i(Y|x)}[Y] + \\epsilon)^{-1/2}\\) where \\((\\epsilon = \\frac{1}{12})\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "29" + } + ] +] \ No newline at end of file diff --git a/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_origin.pdf b/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e53f1b6dfc2b6fca10b5fb40fe202b439db7d69e --- /dev/null +++ b/2023/Weighted Ensemble Self-Supervised Learning/0c863f59-c784-4516-9026-d5e5e7ae916e_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15ea395d8c0e0c58246b2ead9ac69d84ab9597662eb36d3e3fae8e29d55b7e94 +size 777640 diff --git a/2023/Weighted Ensemble Self-Supervised Learning/full.md b/2023/Weighted Ensemble Self-Supervised Learning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1fb9f918dca1831e140460852cbccd47d17fa203 --- /dev/null +++ b/2023/Weighted Ensemble Self-Supervised Learning/full.md @@ -0,0 +1,728 @@ +# WEIGHTED ENSEMBLE SELF-SUPERVISED LEARNING + +Yangjun Ruan*† Saurabh Singh Warren Morningstar Alexander A. Alemi +Sergey Ioffe Ian Fischer† Joshua V. Dillon† +Google Research + +# ABSTRACT + +Ensembling has proven to be a powerful technique for boosting model performance, uncertainty estimation, and robustness in supervised learning. Advances in self-supervised learning (SSL) enable leveraging large unlabeled corpora for state-of-the-art few-shot and supervised learning performance. In this paper, we explore how ensemble methods can improve recent SSL techniques by developing a framework that permits data-dependent weighted cross-entropy losses. We refrain from ensembling the representation backbone; this choice yields an efficient ensemble method that incurs a small training cost and requires no architectural changes or computational overhead to downstream evaluation. The effectiveness of our method is demonstrated with two state-of-the-art SSL methods, DINO (Caron et al., 2021) and MSN (Assran et al., 2022). Our method outperforms both in multiple evaluation metrics on ImageNet-1K, particularly in the few-shot setting. We explore several weighting schemes and find that those which increase the diversity of ensemble heads lead to better downstream evaluation results. Thorough experiments yield improved prior art baselines which our method still surpasses; e.g., our overall improvement with MSN ViT-B/16 is 3.9 p.p. for 1-shot learning. + +# 1 INTRODUCTION + +The promise of self-supervised learning (SSL) is to extract information from unlabeled data and leverage this information in downstream tasks (He et al., 2020; Caron et al., 2021); e.g., semi-supervised learning (Chen et al., 2020a,b), robust learning (Radford et al., 2021; Ruan et al., 2022; Lee et al., 2021), few-shot learning (Assran et al., 2022), and supervised learning (Tomasev et al., 2022). These successes have encouraged increasingly advanced SSL techniques + +(e.g., Grill et al., 2020; Zbontar et al., 2021; He et al., 2022). Perhaps surprisingly however, a simple and otherwise common idea has received limited consideration: ensembling. + +Ensembling combines predictions from multiple trained models and has proven effective at improving model accuracy (Hansen & Salamon, 1990; Perrone & Cooper, 1992) and capturing predictive uncertainty in supervised learning (Lakshminarayanan et al., 2017; Ovadia et al., 2019). Ensembling in the SSL regime is nuanced, however; since the goal is to learn useful representations from unlabeled data, it is less obvious where and how to ensemble. We explore these questions in this work. + +We develop an efficient ensemble method tailored for SSL that replicates the non-representation parts (e.g., projection heads) of the SSL model. In contrast with traditional "post-training" ensembling, our ensembles are only used during training to facilitate the learning of a single representation encoder, which yields no extra cost in downstream evaluation. We further present a family of weighted cross-entropy losses to effectively train the ensembles. The key component of our losses is the introduction of data-dependant importance weights for ensemble members. We empirically compare different choices from our framework and find that the choice of weighting schemes critically impacts ensemble diversity, and that greater ensemble diversity correlates with improved downstream performance. Our method is potentially applicable to many SSL methods; we focus on DINO (Caron et al., 2021) and MSN (Assran et al., 2022) to demonstrate its effectiveness. Fig. 1 shows DINO improvements from using our ensembling and weighted cross-entropy loss. + +In summary, our core contributions are to: + +- Develop a downstream-efficient ensemble method suitable for many SSL techniques (Sec. 3.1). +- Characterize an ensemble loss family of weighted cross-entropy objectives (Sec. 3.2). +- Conduct extensive ablation studies that improve the prior art baselines by up to 6.3 p.p. (Sec. 5.1). +- Further improve those baselines with ensembling (e.g., up to 5.5 p.p. gain for 1-shot) (Table 2). + +# 2 BACKGROUND + +In this section, we frame SSL methods from the perspective of maximum likelihood estimation (MLE) and use this as the notational basis to describe the state-of-the-art clustering-based SSL methods as well as derive their ensembled variants in Sec. 3. + +From Maximum Likelihood to SSL Denote unnormalized KL divergence (Dikmen et al., 2014) between non-negative integrable functions $p, q$ by $\mathsf{K}[p(X), q(X)] = \mathsf{H}^{\times}[p(X), q(X)] - \mathsf{H}[p(X)]$ , where $\mathsf{H}^{\times}[p(X), q(X)] = -\int_{\mathcal{X}} p(x) \log q(x) \, \mathrm{d}x + \int_{\mathcal{X}} q(x) \, \mathrm{d}x - 1$ is the unnormalized cross-entropy (with $0 \log 0 = 0$ ) and $\mathsf{H}[p(X)] = \mathsf{H}^{\times}[p(X), p(X)]$ . These quantities simplify to their usual definitions when $p, q$ are normalized, but critically they enable flexible weighting of distributions for the derivation of our weighted ensemble losses in Sec. 3.2. + +Let $\nu(X, Y) = \nu(X)\nu(Y|X)$ be a natural distribution of input/target pairs over the space $\mathcal{X} \times \mathcal{Y}$ and $s(Y|\theta, X)$ be a predictive model of target given the input parameterized by $\theta \in \mathcal{T}$ . Supervised maximum likelihood seeks the minimum expected conditional population risk with respect to $\theta$ , + +$$ +\mathsf {E} _ {\nu (X)} \mathsf {K} [ \nu (Y | X), s (Y | \theta , X) ] = \mathsf {E} _ {\nu (X)} \mathsf {H} ^ {\times} [ \nu (Y | X), s (Y | \theta , X) ] - \mathsf {E} _ {\nu (X)} \mathsf {H} [ \nu (Y | X) ]. \tag {1} +$$ + +Henceforth omit $\mathsf{E}_{\nu(X)} \mathsf{H}[\nu(Y|X)]$ since it is constant in $\theta$ . Since $\nu(X, Y)$ is unknown, a finite sample approximation is often employed. Denote a size- $n$ i.i.d. training set by $\mathcal{D}_n = \{x_i\}_{i \in [n]} \sim \nu^{\otimes n}$ and empirical distribution by $\hat{\nu}(X, Y) = \frac{1}{n} \sum_{x \in \mathcal{D}_n, y \sim \nu(Y|x)} \delta(X - x) \delta(Y - y)$ where $\delta: \mathbb{R} \to \{0, 1\}$ is 1 when $x = 0$ and 0 otherwise. The sample risk is thus $-\frac{1}{n} \sum_{x \in \mathcal{D}_n} \mathsf{H}^\times[\hat{\nu}(Y|x), s(Y|\theta, x)]$ . + +In SSL, we interpret $\nu(Y|x)$ as being the oracle teacher under a presumption of how the representations will be evaluated on a downstream task. This assumption is similar to that made in Arora et al. (2019); Nozawa et al. (2020). We also assume $\hat{\nu}(Y|X)$ is inaccessible and/or unreliable. Under this view, some SSL techniques substitute $\hat{\nu}(Y|x)$ for a weakly learned target or "teacher", $t(Y|x)$ . We don't generally expect $t(Y|x)$ to recover $\nu(Y|x)$ ; we only assume that an optimal teacher exists and it is $\nu(Y|x)$ . With the teacher providing the targets, the loss becomes $-\frac{1}{n}\sum_{x\in\mathcal{D}_n}\mathsf{H}^\times[t(Y|x), s(Y|\theta, x)]$ . + +Teacher and student in clustering SSL methods Clustering SSL methods such as SWaV (Caron et al., 2020), DINO (Caron et al., 2021), and MSN (Assran et al., 2022) employ a student model characterized by proximity between learned codebook entries and a data-dependent code, + +$$ +s (Y | \theta , x) = \operatorname {s o f t m a x} \left(\left\{\frac {1}{\tau} \frac {\left(h _ {\psi} \circ r _ {\omega}\right) (x) \cdot \mu_ {y}}{\| \left(h _ {\psi} \circ r _ {\omega}\right) (x) \| _ {2} \| \mu_ {y} \| _ {2}}: y \in [ c ] \right\}\right) \tag {2} +$$ + +$$ +\theta = \{\omega , \psi , \left\{\mu_ {y} \right\} _ {y \in [ c ]} \} \in \mathcal {T}, \tag {3} +$$ + +where the encoder $r_{\omega}:\mathcal{X}\to \mathcal{Z}$ produces the representations used for downstream tasks, and the projection head $h_\psi :\mathcal{Z}\rightarrow \mathbb{R}^d$ and codebook entries $\{\mu_y\}_{y\in \mathcal{Y}}\in \mathbb{R}^d$ characterize the SSL loss. Eq. (2) can be viewed as "soft clustering", where the input is assigned to those centroids that are closer to the projection head's output. The projection head and codebook are used during training but thrown away for evaluation, which is empirically found vital for downstream tasks (Chen et al., 2020a;b). Hyperparameters $\tau \in \mathbb{R}_{>0},c\in \mathbb{Z}_{>0}$ represent temperature and codebook size. The teacher is defined as $t(Y|x) = s(Y|\mathrm{stopgrad}(g(\theta)),x)$ where $g:\mathcal{T}\to \mathcal{T}$ . Commonly $g(\theta)$ is an exponential moving average of gradient descent iterates and the teacher uses a lower temperature than the student. + +To capture desirable invariances and prevent degeneracy, data augmentation and regularization (e.g., Sinkhorn-Knopp normalization (Caron et al., 2020), mean entropy maximization (Assran et al., 2022)) are essential. As these are not directly relevant to our method, we omit them for brevity. + +# 3 METHOD + +Ensembling is a technique that combines models to boost performance, and has been especially successful in supervised learning. We are interested in ensembling methods that carry over this success to SSL approaches. However, SSL has key differences, such as throw-away "projection heads", from supervised learning that result in a multitude of possibilities for how to ensemble. With this in mind, we propose first where to ensemble, and then how to ensemble. Those proposals result in an efficient "peri-training" ensembling technique specifically tailored for SSL and a family of weighted ensemble objectives; we subsequently suggest different ways to select the weights. + +# 3.1 WHERE TO ENSEMBLE? + +Denote the teacher/student ensembles by $\{t_i(Y|x)\}_{i\in [m]}$ and $\{s(Y|\theta_j,x)\}_{j\in [m]}$ and define each as in Sec. 2; parameters $\theta = \{\theta_{j}\}_{j\in [m]}\in \mathcal{T}^{m}$ are independently initialized, all students use one temperature and all teachers another. We asymmetrically denote $t_i(Y|x)$ and $s(Y|\theta_j,x)$ to emphasize that teachers' gradients are zero and that the students are distinct solely by way of $\theta_{i}\neq \theta_{j}$ . Studying heterogeneous architectures and/or different teacher parameterizations is left for future work. + +Recall that $\theta_{j}$ parameterizes the encoder, projection head, and codebook parameters: $\theta_{j} = (\omega_{j},\psi_{j},\{\mu_{jy}\}_{y\in \mathcal{Y}})$ . We further restrict $\mathcal{T}^m$ such that $\omega_{i} = \omega_{j}$ , i.e., we limit our consideration to ensembles of projection heads $h_{\psi_j}$ and/or codebooks $\mu_{j}$ but not encoders $r_{\omega_j}$ . This choice makes our ensemble method inherently different from traditional supervised ensembling or encoder $r_{\omega}$ ensembling: the ensembled parts are not used for evaluation but + +for improving the learning of non-enssembled representation encoder during training, thus it requires no change of downstream evaluation or computational cost. Ensembling of $r_{\omega}$ is left for future work. + +![](images/1a32ce26502f3e27b447c32b32c0018e24f6fc95bb0f146fdb5c4659fa233a6f.jpg) +Figure 2: Overview of $(h_{\psi},\mu)$ -ensemble. Two augmented inputs are encoded by the teacher/student into representations, and then processed by an ensemble of heads. The loss for each head is weighted and summed into the final loss. Strike-through edges indicate stop-gradients. See Appx. A for pseudocode. + +# 3.2 HOW TO ENSEMBLE? + +We would like to extend the loss to support an ensemble of teacher/student pairs while respecting the MLE intuition of the loss as in Sec. 2. Additionally, we want to facilitate data-dependent importance weights, thus enabling preferential treatment of some teacher/student pairs. We therefore propose a weighted average (unnormized) cross-entropy loss, + +$$ +\mathcal {L} _ {n} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i, j \in [ m ]} \mathrm {H} ^ {\times} \left[ w _ {i j Y} \odot t _ {i} (Y | x), s (Y | \theta_ {j}, x) \right] \tag {4} +$$ + +$$ +\text {w h e r e} w _ {i j y} = \operatorname {s o f t m a x} \left(\left\{\frac {1}{\gamma} f _ {i j y} (\operatorname {s t o p g r a d} (\theta), x): i, j \in [ m ] \right\}\right). \tag {5} +$$ + +The notation $w_{ijY} \odot t_i(Y|x)$ denotes a Hadamard product; i.e., the product of event-specific weights and probabilities for each $y \in \mathcal{V}$ . The hyperparameter $\gamma$ is the temperature. The function $f_{ijy}$ is defined for brevity and discussed in the following section. + +This objective admits generality and flexibility for introducing various weighting schemes, as it supports potential interactions between all teacher/student pairs and allows the weights to be both model- and data-dependent. Up to a constant independent of $\theta$ , it is an importance weighted average of (unnormized) KL divergences between each teacher and each student; i.e., a mixture of MLE-like objectives. We stop the gradient of $w_{ijy}$ to $\theta$ in order to keep the overall gradient a weighted average of students' log-likelihood gradients, similar to Eq. (1). We also normalize the weights such that each data point equally contributes to the loss. + +# 3.3 HOW TO WEIGHT? + +In this section, we present several instantiations of our losses with different weighting schemes. We empirically show in Sec. 5 that the particular choice of weighting scheme is critical for the representation performance and the induced diversity of $(h_{\psi},\mu)$ -ensembles. For simplicity we assume $\gamma = 1$ in this section. We indicate with $\Longleftrightarrow$ that a loss has the same arg min as Eq. (4). For additional analysis and discussion, see Appx. D. + +Uniform weighting (UNIF) The simplest strategy is to treat different teacher/student pairs independently and average each with uniform weighting; i.e., + +$$ +f _ {i j y} = \log \delta (i - j) \Longleftrightarrow \mathcal {L} _ {n} ^ {\mathrm {U N I F}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \frac {1}{m} \sum_ {i \in [ m ]} \mathrm {H} ^ {\times} \left[ t _ {i} (Y | x), s (Y | \theta_ {i}, x) \right] \tag {6} +$$ + +This strategy introduces uniform weights $w_{i} = \frac{1}{m}$ over ensemble elements. The role of $\log \delta (i - j)$ (here and elsewhere) is to sub-select corresponding teacher/student pairs rather than all $m^2$ pairs. + +Probability weighting (PROB) An alternative to using the average cross-entropy loss (UNIF) is to compute the cross-entropy loss of the average predictions whose gradient is weighted by $w_{ijy}$ (see Appx. D.1). At $\gamma = 1$ , those gradient weights simplify into an average over the student probabilities: + +$$ +f _ {i j y} = \log s (y | \theta_ {j}, x) \iff \mathcal {L} _ {n} ^ {\mathrm {P R O B}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \mathsf {H} ^ {\times} \left[ \frac {1}{m} \sum_ {i \in [ m ]} t _ {i} (Y | x), \frac {1}{m} \sum_ {j \in [ m ]} s (Y | \theta_ {j}, x) \right] \tag {7} +$$ + +Averaging the predictive distributions introduces correspondence between codes from different heads; thus different heads are no longer independent but instead cooperate to match the student to the teachers. The loss favors student heads with more confident predictions (i.e., larger $s(y|\theta_j, x)$ ). Further motivation for averaging predictions comes from multi-sample losses studied in Morningstar et al. (2022). Note that the joint convexity of (unnormized) KL divergence implies that this loss is upper bounded by the UNIF loss up to some constant in $\theta$ (see Appx. D). + +Although the PROB strategy favors confident student predictions, the weights change as a function of $y \in \mathcal{V}$ . This may be in conflict with our intuition that SSL is like maximum likelihood (Sec. 2), since under that view, the teacher is responsible for weighting outcomes. + +Entropy weighting (ENT) Another way to favor heads with more confident predictions is to directly weight by their predictive entropies; i.e., + +$$ +f _ {i j y} = - \mathrm {H} [ t _ {i} (Y | x) ] + \log \delta (i - j) \Longleftrightarrow \tag {8} +$$ + +$$ +\mathcal {L} _ {n} ^ {\mathrm {E N T}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i \in [ m ]} \operatorname {s o f t m a x} _ {i} \left(\left\{- \frac {1}{\gamma} \mathrm {H} \left[ t _ {i ^ {\prime}} (Y | x) : i ^ {\prime} \in [ m ] \right\}\right) \mathrm {H} ^ {\times} \left[ t _ {i} (Y | x), s (Y | \theta_ {i}, x) \right]\right) \tag {9} +$$ + +where the weight $w_{i} = \mathrm{softmax}_{i}\left(\{-\frac{1}{\gamma}\mathsf{H}[t_{i'}(Y|x)]:i'\in [m]\}\right)$ is inversely correlated with the entropy of teacher predictions. In other words, the head whose teacher has a lower entropy (i.e., higher confidence about its prediction) is given a larger importance weight for learning the representation. Like PROB, this strategy encourages "data specialists" by emphasizing strongly opinionated teacher heads for different inputs. Like UNIF, different heads are treated more independent (than PROB), since interaction between different heads is introduced only through the weight computation. By preferring low-entropy teachers we also favor low variance teachers; this aligns with the intuition that using a lower-variance teacher benefits representation quality (Wang et al., 2022). + +Countless other weighting schemes It is impossible to fully explore the space of weightings; the following might also be interesting to study in detail but were omitted due to resource constraints. + +$$ +f _ {i j y} = 0 \quad \text {(F a v o r s a l l p a i r s o f t e a c h e r s / s t u d e n t s e q u a l l y)} \tag {10} +$$ + +$$ +f _ {i j y} = \log t _ {i} (y | x) \quad (\text {F a v o r s o p i n i o n a t e d t e a c h e r s}) \tag {11} +$$ + +$$ +f _ {i j y} = - \mathrm {H} [ s (Y | \theta_ {j}, x) ] \quad (\text {F a v o r s l o w - e n t r o p y s t d u e n t s}) \tag {12} +$$ + +$$ +f _ {i j y} = \mathsf {K} \left[ t _ {i} (Y | x), s (Y | \theta_ {j}, x) \right] \quad (\text {F a v o r s d i a g e e i n g t e a c h e r / s t u d e n t p a i r s}) \tag {13} +$$ + +$$ +f _ {i j y} = - \frac {1}{2} \log \left(\operatorname {V a r} _ {t _ {i} (Y | x)} [ Y ] + \epsilon\right) \quad \text {(F a v o r s l o w v a r i a n c e t e a c h e r s ; e . g . ,} \epsilon = \frac {1}{1 2}) \tag {14} +$$ + +Note that "aligned" versions of all schemes are possible by using $f_{ijy} + \log \delta (i - j)$ . We did early experiments exploring Eqs. (11) and (12), but the results were inferior and are largely omitted below. + +# 4 RELATED WORK + +Self-supervised learning Recent work on self-supervised learning (SSL) focuses on discriminative or generative approaches. Most discriminative approaches seek to learn augmentation-invariant representations by enforcing the similarity between augmented pairs of the same image while utilizing different techniques to avoid collapse. Contrastive methods (Chen et al., 2020a; He et al., 2020; Wu et al., 2018; Hjelm et al., 2018; Bachman et al., 2019; Tian et al., 2020) use a large number of negative samples with a noise-contrastive objective (Gutmann & Hyvarinen, 2010; Oord et al., 2018). A large body of followup work eliminates the necessity of explicit negative samples with various techniques, including clustering assignment constraints (Caron et al., 2018; 2020; 2021; Asano et al., 2019), bootstrapping (Grill et al., 2020) or self-distillation (Caron et al., 2021) inspired by mean teacher (Tarvainen & Valpola, 2017), asymmetric architecture design (Grill et al., 2020; Chen & He, 2021), or redundancy reduction (Zbontar et al., 2021; Bardes et al., 2021). Recent generative approaches that use masked image modeling as the pretraining task (Dosovitskiy et al., 2020; Bao et al., 2021; He et al., 2022; Zhou et al., 2022; Xie et al., 2022) have achieved competitive finetuning performance. Our method may be applicable to all of the above methods that have some sort of "projection head", such as most of the discriminative approaches. + +Ensemble methods Ensembling has been extensively studied for improving model performance (Hansen & Salamon, 1990; Perrone & Cooper, 1992; Dietterich, 2000) and uncertainty estimation (Lakshminarayanan et al., 2017; Ovadia et al., 2019) in supervised learning and semi-supervised learning (Laine & Aila, 2016). A major research direction is to train efficient ensembles with partial parameter sharing (Lee et al., 2015; Wen et al., 2020; Dusenberry et al., 2020; Havasi et al., 2020) or intermediate checkpointing (Huang et al., 2017; Garipov et al., 2018). Our method also shares the encoder parameters across ensembles, which is closely related to multi-headed networks (Lee et al., 2015; Tran et al., 2020). Ensemble methods for SSL are less explored. Some recent work studies ensembles of supervised models adapted from pretrained SSL models. Gontijo-Lopes et al. (2022) conduct an empirical study of ensembles adapted from different SSL models and find that higher divergence in SSL methods leads to less correlated errors and better performance. Wortsman et al. (2022) ensemble multiple finetuned models adapted from the same SSL model by averaging their weights, which boosts the performance without any inference cost. Our method differs from them in that it (1) applies to the SSL training stage to directly improve representation quality, rather than aggregates multiple models in the post-training/finetuning stage; (2) introduces little training cost and no evaluation cost; and (3) is complementary to these post-training/finetuning ensembling methods. + +# 5 EXPERIMENTS + +We carefully study the impact of $(h_{\psi},\mu)$ -ensembles and our selected weighted ensemble losses (UNIF, PROB, and ENT) on smaller DINO models in Sec. 5.1. Using what we learned in those experiments, in Sec. 5.2 we present new state-of-the-art results on ImageNet-1K on various metrics for multiple model sizes by ensembling both DINO- and MSN-based models. Finally, we explore ensemble evaluations in the transfer learning setting in Sec. 5.3. Additional experimental details and results are in Appx. B and Appx. C, respectively. + +Experimental setup We assessed the effectiveness of our method with two SSL methods: DINO (Caron et al., 2021) and MSN (Assran et al., 2022). In order to ensure that we are comparing against strong baselines, we consider three different classes of baselines: (1) evaluation numbers reported in the original works (Caron et al. (2021), Assran et al. (2022), and Zhou et al. (2022) for an additional baseline iBOT); (2) evaluation of our implementation using the hyperparameters reported in the original works (DINO only, for space reasons) to validate our implementation; and (3) evaluation of our implementation using the best hyperparameters that we found by tuning the baselines (DINO and MSN) for fair comparisons. In almost all models and evaluations, our retuned baselines give nontrivial performance improvements on top of previously reported numbers. These type (3) baselines + +Table 1: Comparison of different ensemble strategies. ENT and PROB significantly improve over the non-ensembleed baseline, while UNIF leads to no gains. Ensembling both the projection head and the codebook works the best. All models are DINO* ViT-S/16 trained for 300 epochs. Averages and standard deviations are over 3 initialization seeds. The linear evaluation results on ImageNet-1K with different amounts of labeled data are reported here (see Table 11 in Appx. C.3 for all metrics). + +
HowWhere# of Labels Per Class
Proj. hψCode. μ15~13 (1%)Full
Base40.6 ± 0.257.9 ± 0.363.4 ± 0.274.4 ± 0.1
UNIF40.4 ± 0.457.6 ± 0.363.3 ± 0.374.5 ± 0.2
PROB39.8 ± 0.5 ↓ 0.957.4 ± 0.4 ↓ 0.563.0 ± 0.4 ↓ 0.474.8 ± 0.1 ↑ 0.4
PROB41.9 ± 0.3 ↑ 1.359.6 ± 0.4 ↑ 1.765.1 ± 0.3 ↑ 1.775.4 ± 0.1 ↑ 1.0
ENT-ST40.0 ± 0.5 ↓ 0.657.3 ± 0.5 ↓ 0.662.7 ± 0.5 ↓ 0.774.0 ± 0.4 ↓ 0.4
ENT40.8 ± 0.458.0 ± 0.463.5 ± 0.474.5 ± 0.3
ENT43.0 ± 0.6 ↑ 2.459.7 ± 0.7 ↑ 1.864.8 ± 0.5 ↑ 1.475.1 ± 0.4 ↑ 0.7
ENT44.0 ± 0.2 ↑ 3.460.5 ± 0.3 ↑ 2.665.5 ± 0.1 ↑ 2.275.3 ± 0.1 ↑ 0.9
+ +we label DINO* and MSN*, and we use them as the base models for our experiments with $(h_{\psi}, \mu)$ -ensembles and weighted ensemble losses. Appx. B.2.1 describes the details for getting such strong performance for DINO* and MSN*. In particular, we find that the projection head has a crucial impact on label efficiency of representations and using a smaller head (3-layer MLP with hidden size 1024) significantly improves few-shot evaluation performance (see Appx. C.2). + +Evaluation metrics We compared models trained with and without our $(h_{\psi},\mu)$ -ensembles by measuring various evaluation metrics on ImageNet-1K (Deng et al., 2009). The evaluation metrics reflect the decodability and the label efficiency of learned representations. We measured the decodability with respect to both the linear classifier following the common linear evaluation protocol and the $k$ -NN classifier following Caron et al. (2021). We measured the label efficiency by evaluating the linear evaluation performance in few-shot settings, including $1\%$ ( $\sim 13$ -shots) labeled data evaluation (Chen et al., 2020a) and 1-/2-/5-shot evaluations (Assran et al., 2022). All evaluations used frozen representations of the teacher encoder - we did not fine tune the models. See Appx. B.3 for details. + +# 5.1 EMPIRICAL STUDY OF $(h_{\psi},\mu)$ -ENSEMBLES + +Table 1 compares different strategies for where and how to ensemble. Fig. 4 compares the impact of the weighted ensemble loss on $(h_{\psi},\mu)$ -ensemble diversity. Fig. 3 shows the effect of increasing the number of ensembles, adjusting the temperature $\gamma$ , and increasing baseline projection head parameters. In these experiments, we used DINO* with ViT-S/16 trained for 300 epochs as the base model. We compared different ensemble methods applied to the base model with $m = 16$ heads which we found to work the best. For the ENT strategy in Table 1, the entropy weighting temperature $\gamma$ is set to $0.05\times \log (c)$ by default which is selected from $\{0.0125,0.025,0.05,0.1,0.2\} \times \log (c)$ where the scale $\log (c)$ gives the maximum entropy of the codebook size $c$ . For PROB, we keep $\gamma = 1$ . + +Where to ensemble We study the where question by ensembling either the projection head $h_{\psi}$ , the codebook $\mu$ , or both with the ENT and the PROB ensemble strategies, as shown in Table 1. We find that ensembling both $h_{\psi}$ and $\mu$ provides the largest gains for both losses, probably due to the increased flexibility for learning a diverse ensemble. Interestingly, only ensembling $h_{\psi}$ also works well for the ENT strategy. + +How to ensemble We study the how question by considering four different loss variants: UNIF, PROB, ENT, and the variant of ENT with student entropy weighting. We find that when we ensemble both the projection head $h_{\psi}$ and the codebook $\mu$ , the ENT ensemble strategy leads to the most significant gains (e.g., 3.4 p.p. gains for 1-shot and 0.9 p.p. gains for full-data). The PROB strategy also consistently improves the performance with a slightly larger gain (1 p.p.) in full-data evaluation. In contrast, we see no gains for the UNIF strategy over the baseline. We also study a variant of ENT that uses the student entropy (i.e., Eq. (12) with the log $\delta(i - j)$ term) for the importance weights (denoted as ENT-ST). ENT-ST performs much worse than ENT and is even worse than the baseline. + +![](images/7d60d8124efc22ad555d6fbbdbc43aab642f5688816526f2126d36420340702c.jpg) +(a) Scaling of $(h_{\psi},\mu)$ -ensembles. + +![](images/79a22c445327a2374c4567b01548ac9e1cde74ecbd0a82714617a48008948f91.jpg) +Figure 3: Empirical study of $(h_{\psi},\mu)$ -ensembles. (a) The gains of $(h_{\psi},\mu)$ -ensembles start to diminish above 16 heads. (b) The temperature for entropy weighting has a larger impact on few-shot performance. 16 heads are used and $\gamma$ is scaled by $\log(c)$ . (c) Our $(h_{\psi},\mu)$ -ensembles outperform all non-ensembleed baselines when controlling for number of parameters. A too powerful non-ensemble projection head significantly harms accuracy. $1\%$ data evaluation is shown. Also see Fig. 5. + +![](images/f0d9635be7f5c83827576c16d79bb2fa15568867fec3b8742609cc81341a2443.jpg) +(b) Effect of ENT temperature $\gamma$ +(c) Comparing different heads. + +We conjecture that this is because the student predictions typically have a larger variance than teacher predictions (Wang et al., 2022) especially when multi-crop augmentation (Caron et al., 2020; 2021) is applied to the student. Similar experiments on Eq. (11) and/or $\gamma = 0$ variants of PROB also resulted in inferior performance (see Table 12). + +Analysis of $(h_{\psi}, \mu)$ -ensemble diversity The previous experiments showed that the choice of ensemble weighting strategy has a large impact on performance. We hypothesize that this choice substantially impacts the diversity of the codebook ensembles. Since the codes in different heads may not be aligned, we align them by the similarity of their code assignment probabilities across different input images, which measures how the codes are effectively used to 'cluster' the data. See Appx. C.4 for detailed explanations and results. In Fig. 4, we visualize the decay patterns of the similarity score between aligned codes (1.0 means the most similar) in a random pair of heads for each weighting strategy. ENT decays the fastest and UNIF decays the slowest, indicating that ENT learns the most diverse codebooks while UNIF is least diverse. This shows a positive correlation between the diversity of $(h_{\psi}, \mu)$ -ensembles and the empirical + +performance of the ensemble strategies from Table 1. Finally, for UNIF, we find that different heads tend to learn the same semantic mappings even when randomly initialized; i.e., the code assignments in different heads become homogeneous up to permutation. See Fig. 8 for a visualization. + +![](images/7a994fe5319f23357708dff945fb9964c4b86408e92bf1413733afc789d9d957.jpg) +Figure 4: Visualization of code similarity. ENT learns the most diverse $(h_{\psi},\mu)$ -ensembles reflected by the fastest decay of similarity scores between aligned codes in different heads. UNIF has low diversity between heads. + +Number of $(h_{\psi},\mu)$ -ensembles We study the effect of increasing the number of $(h_{\psi},\mu)$ -ensembles $m$ for ENT in Fig. 3a. Having more $(h_{\psi},\mu)$ -ensembles boosts the performance until $m = 16$ . Interestingly, using as few as $m = 2$ heads already significantly improves over the baseline. + +Effect of ENT temperature $\gamma$ Fig. 3b studies the effect of entropy weighting temperature $\gamma$ for different evaluation metrics. We observe that $\gamma$ has a relatively larger impact on few-shot evaluation performance. $\gamma$ should be neither too high nor too low: a high temperature leads to under-specialization (i.e. less diversity) of heads similar to UNIF ( $\gamma \rightarrow \infty$ ) and a low temperature may otherwise lead to over-specialization (i.e., only a single head is used for each input). + +Comparison of different projection heads Our method linearly increases projection head parameters, thus a natural question is: Is the gain of $(h_{\psi},\mu)$ -ensembles due to the increased power (or number of parameters) in projection heads? We answer this question with an empirical study of non-ensembled projection heads. Specifically, we studied non-ensembled $h_{\psi}$ with (depth, width) searched over $\{2,3,4\} \times \{512,1024,2048,4096\}$ and measured the linear evaluation performance with different amounts of labeled data. In Fig. 3c, we plot the $1\%$ data evaluation result with respect to the number of parameters of the projection head both for ensembled and non-ensembled baselines. See Appx. C.2 for detailed analysis and extra results for other metrics. Our key findings are: + +Table 2: Effectiveness of ensemble heads for DINO*/MSN* with different ViT models. Our ensemble heads consistently improve all downstream evaluation metrics on ImageNet-1K and achieve a new state-of-the-art for few-shot evaluations. For ViT-S/16, we report linear evaluation results probed from the last layer (left) and from the last 4 layers (right, following DINO). †We evaluated the few-shot settings using DINO's publicly-available pretrained weights in the cases those results were not reported in Caron et al. (2021). ‡MSN ViT-B/16 and ViT-B/8 are both trained for 600 epochs in Assran et al. (2022), whereas our models are trained for only 400, 300 epochs, respectively. For each architecture, we highlight the best DINO baseline and weighted ensemble in blue. For MSN, the corresponding highlights are yellow. The best results for each architecture and metric are bolded. + +
MethodFew-shotFull-data
125~13 (1%)k-NNLinear
ViT-S/16, 800 epochs
iBOT40.4 ± 0.550.8 ± 0.859.9 ± 0.265.975.2- / 77.9
DINO38.9 ± 0.448.9 ± 0.358.5 ± 0.164.574.576.1 / 77.0
DINO (Repro)39.1 ± 0.349.1 ± 0.558.6 ± 0.264.774.375.8 / 76.9
DINO* (Retuned)44.6 ± 0.253.6 ± 0.361.1 ± 0.266.274.175.8 / 76.9
MSN47.1 ± 0.155.8 ± 0.662.8 ± 0.367.2-- / 76.9
MSN* (Retuned)47.4 ± 0.156.3 ± 0.462.8 ± 0.267.173.375.6 / 76.6
DINO*-PROB (16)45.2 ± 0.454.9 ± 0.462.5 ± 0.267.375.176.5 / 77.6
DINO*-ENT (4)46.3 ± 0.155.5 ± 0.663.0 ± 0.367.574.876.2 / 77.2
DINO*-ENT (16)47.6 ± 0.1 ↑ 3.056.8 ± 0.564.0 ± 0.268.3 ↑ 2.175.376.8 / 77.7 ↑ 0.8
MSN*-ENT (2)48.8 ± 0.257.5 ± 0.564.0 ± 0.267.974.676.0 / 76.9
MSN*-ENT (8)50.1 ± 0.1 ↑ 2.758.9 ± 0.665.1 ± 0.368.7 ↑ 1.675.276.4 / 77.4 ↑ 0.8
ViT-B/16, 400 epochs
iBOT46.1 ± 0.356.2 ± 0.764.7 ± 0.369.777.179.5
DINO†43.0 ± 0.252.7 ± 0.561.8 ± 0.267.476.178.2
DINO* (Retuned)49.3 ± 0.158.1 ± 0.565.0 ± 0.369.176.078.5
MSN‡49.8 ± 0.258.9 ± 0.465.5 ± 0.3---
MSN* (Retuned)50.7 ± 0.159.2 ± 0.465.9 ± 0.269.774.778.1
DINO*-ENT (16)52.8 ± 0.1 ↑ 3.561.5 ± 0.467.6 ± 0.371.1 ↑ 2.077.179.1 ↑ 0.6
MSN*-ENT (8)53.7 ± 0.2 ↑ 3.062.4 ± 0.668.3 ± 0.271.5 ↑ 1.877.278.9 ↑ 0.8
ViT-B/8, 300 epochs
DINO†47.5 ± 0.257.3 ± 0.565.4 ± 0.370.377.480.1
DINO* (Retuned)49.5 ± 0.558.6 ± 0.665.9 ± 0.370.777.180.2
MSN‡55.1 ± 0.164.9 ± 0.771.6 ± 0.3---
MSN* (Retuned)51.9 ± 0.361.1 ± 0.467.7 ± 0.371.775.780.3
DINO*-ENT (16)55.0 ± 0.4 ↑ 5.563.4 ± 0.669.5 ± 0.373.4 ↑ 2.778.681.0 ↑ 0.8
MSN*-ENT (8)55.6 ± 0.2 ↑ 3.764.5 ± 0.570.3 ± 0.273.4 ↑ 1.778.980.8 ↑ 0.5
+ +- A too powerful non-enssembled $h_{\psi}$ significantly hurts the label efficiency of learned representations. This result is similar to Chen et al. (2020b), which found that probing from intermediate layers of projection heads (which can be viewed as using a shallower head) could improve semi-supervised learning ( $1\% - 10\%$ labeled data) results. +- The default head (3/2048, denoted as 'Default') used in recent SSL methods (SimCLRv2, DINO, MSN, etc.) does not perform as well in few-shot evaluations, probably because it is selected by looking at full-data evaluation metrics. In contrast, our baseline (3/1024, denoted as 'Our baseline') significantly improves few-shot evaluation performance. +- Our $(h_{\psi}, \mu)$ -ensembles outperform all non-enssembled baselines and lead to consistent improvements in all evaluation metrics, despite the increase of parameters. + +# 5.2 IMPROVING SOTA RESULTS WITH ENSEMBLEING + +Next we apply $(h_{\psi},\mu)$ -ensembles to DINO* and MSN* and compare with the state-of-the-art results. We experimented with model architectures ViT-S/16, ViT-B/16, ViT-B/8 trained for 800, 400, 300 epochs respectively following Caron et al. (2021). We include both the published results and our returned versions to ensure strong baselines. For clarity, we denote our method as “{baseline}-{ensemble strategy} (# of heads)”, e.g., DINO*-ENT (4). We tuned both baselines and our methods for all architectures. We report the best hyperparameters for all models in Appx. B.2.2. + +Table 2 compares the results of $(h_{\psi},\mu)$ -ensembles and baselines. We find that $(h_{\psi},\mu)$ -ensembles with ENT consistently improve all evaluation metrics (full-data, few-shot) across both SSL methods (DINO*, MSN*) and all architectures (ViT-S/16, ViT-B/16, ViT-B/8) over their non-ensembld counterparts. The gains in few-shot evaluation are particularly substantial, providing a new state-of-the-art for ImageNet-1K evaluation from ImageNet pretraining. + +# 5.3 MORE EVALUATIONS FOR $(h_{\psi},\mu)$ -ENSEMBLES + +Table 3: Comparison of transfer performance. ViT-S/16 is used. Our ensemble heads lead to consistent improvements for $\mathrm{MSN^{*}}$ and comparable results for DINO*. + +
Food101CIFAR10CIFAR100SUN397CarsDTDPetsCaltech-101FlowersAvg.
DINO*78.493.881.066.166.774.692.094.994.482.43
DINO*-ENT (16)79.193.881.466.566.874.992.894.693.982.64
MSN*77.793.179.864.663.372.292.494.792.781.17
MSN*-ENT (8)78.493.981.165.268.073.293.195.492.882.34
+ +Transfer learning In Table 3, we compare the transfer learning performance of $(h_{\psi}, \mu)$ -ensembles and non-ensembed baselines. We used ViT-S-16 models trained on ImageNet-1K for 800 epochs and evaluated on 9 natural downstream datasets from Chen et al. (2020a) with linear evaluation (details in Appx. B.3). $(h_{\psi}, \mu)$ -ensembles lead to consistent improvements in transfer performance for $\mathrm{MSN}^*$ and comparable results for DINO*. + +Training overhead In Table 4, we benchmark the computational overhead of $(h_{\psi}, \mu)$ -ensembles at training time. We used a medium sized model, DINO* with ViT-B/16, trained with the same setting used in all of our experiments. We benchmarked the wall-clock time and peak memory on 128 TPUv3 cores. $(h_{\psi}, \mu)$ -ensembling is relatively cheap in terms of training cost because the ensembled parts typically account for a small portion of total computation, especially when the backbone encoder is more computationally expensive (e.g., ViT-B/8). + +Again, we emphasize that there is no evaluation overhead when $(h_{\psi},\mu)$ -ensembles are removed. + +Table 4: Training overhead. Wall-clock time and peak memory per core for training with different numbers of ensembles. + +
mWall TimePeak Memory
15.81h5.25G
45.91h5.40G
166.34h5.89G
+ +# 6 CONCLUSION & DISCUSSION + +We introduced an efficient ensemble method for SSL where multiple projection heads are ensembled to effectively improve representation learning. We showed that with carefully designed ensemble losses that induce diversity over ensemble heads, our method significantly improves recent state-of-the-art SSL methods in various evaluation metrics, particularly for few-shot evaluation. Although ensembling is a well-known technique for improving evaluation performance of a single model, we demonstrated that, for models with throw-away parts such as the projection heads in SSL, ensembling these parts can improve the learning of the non-ensemble representation encoder and also achieve significant gains in downstream evaluation without introducing extra evaluation cost. + +Our ensemble method is applicable to many SSL methods beyond the two we explored. For example, one may consider generalization to BYOL (Grill et al., 2020) or SimSiam (Chen & He, 2021) that ensembles projection and/or prediction heads, or MAE (He et al., 2022) that ensembles the decoders (which introduces more training cost though). Our weighted ensemble losses can also be applied as long as the original loss can be reformulated as MLE for some $t$ , $s$ , and $Y$ , e.g., the MSE loss in these methods is MLE under multivariate normal distributions. We hope our results and insights will motivate more future work for extending our method or exploring more ensemble techniques for SSL. + +In future work, we also hope to remove three limitations of our setting. First, considering ensembling strategies that include the representation encoder, $r_{\omega}$ , may lead to further improvements in the performance of weighted ensemble SSL, at the cost of increased computation requirements during both training and evaluation. Second, considering heterogenous architectures in the ensemble may further improve the learned representations (e.g., mixing Transformers with ResNets), whether the heterogeneity is in $r_{\omega}, h_{\psi}$ , or both. Third, considering other possibilities for $f_{ijy}$ may also reveal performance gains and improve our understanding of the critical aspects that lead to good SSL representations, similar to what we learned about the importance of ensemble diversity. + +# ACKNOWLEDGMENTS + +We would like to thank Mathilde Caron and Mahmoud Assran for their extensive help in reproducing DINO and MSN baselines. We would also like to thank Ting Chen and Yann Dubois for their helpful discussions and encouragements. + +# REPRODUCIBITLITY STATEMENT + +We include detailed derivations for all our proposed losses in Appx. D. We report experimental details in Appx. B, including the implementation details for reproducing the baselines (Appx. B.1), training and evaluating our methods (Appx. B.2.1), and all hyper-parameters (Appx. B.2.2) used in our experiments for reproducing our results in Table 2. + +# REFERENCES + +TensorFlow Datasets, a collection of ready-to-use datasets. https://www.tensorflow.org/datasets. +Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. arXiv preprint arXiv:1902.09229, 2019. +Yuki Markus Asano, Christian Rupprecht, and Andrea Vedaldi. Self-labelling via simultaneous clustering and representation learning. arXiv preprint arXiv:1911.05371, 2019. +Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, and Nicolas Ballas. Masked siamese networks for label-efficient learning. arXiv preprint arXiv:2204.07141, 2022. +Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. Advances in neural information processing systems, 32, 2019. +Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021. +Adrien Bardes, Jean Ponce, and Yann LeCun. Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906, 2021. +Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, pp. 446-461. Springer, 2014. +Yuri Burda, Roger B Grosse, and Ruslan Salakhutdinov. Importance weighted autoencoders. In ICLR, 2016. +Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In Proceedings of the European conference on computer vision (ECCV), pp. 132-149, 2018. +Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. Advances in Neural Information Processing Systems, 33:9912-9924, 2020. +Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9650-9660, 2021. +Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pp. 1597-1607. PMLR, 2020a. +Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems, 33:22243-22255, 2020b. + +Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750-15758, 2021. +Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3606-3613, 2014. +Thomas M Cover and Joy A Thomas. Elements of Information Theory. John Wiley & Sons, 1999. +Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transport. Advances in neural information processing systems, 26, 2013. +Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009. +Thomas G Dietterich. Ensemble methods in machine learning. In International workshop on multiple classifier systems, pp. 1-15. Springer, 2000. +Onur Dikmen, Zhirong Yang, and Erkki Oja. Learning the information divergence. IEEE transactions on pattern analysis and machine intelligence, 37(7):1442-1454, 2014. +Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. +Sever S Dragomir. A generalization of $f$ -divergence measure to convex functions defined on linear spaces. Communications in Mathematical Analysis, 15(2):1-14, 2013. +Michael Dusenberry, Ghassen Jerfel, Yeming Wen, Yian Ma, Jasper Snoek, Katherine Heller, Balaji Lakshminarayanan, and Dustin Tran. Efficient and scalable bayesian neural nets with rank-1 factors. In International conference on machine learning, pp. 2782-2792. PMLR, 2020. +Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In 2004 conference on computer vision and pattern recognition workshop, pp. 178-178. IEEE, 2004. +Timur Garipov, Pavel Izmailov, Dmitrii Podoprikhin, Dmitry P Vetrov, and Andrew G Wilson. Loss surfaces, mode connectivity, and fast ensembling of dnns. Advances in neural information processing systems, 31, 2018. +Raphael Gontijo-Lopes, Yann Dauphin, and Ekin Dogus Cubuk. No one representation to rule them all: Overlapping features of training methods. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=BK-4qbGgIE3. +Jean-Bastien Grill, Florian Strub, Florent Alché, Coretin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 33:21271-21284, 2020. +Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pp. 297-304. JMLR Workshop and Conference Proceedings, 2010. +Abner Guzman-Rivera, Dhruv Batra, and Pushmeet Kohli. Multiple choice learning: Learning to produce multiple structured outputs. Advances in neural information processing systems, 25, 2012. +Lars Kai Hansen and Peter Salamon. Neural network ensembles. IEEE transactions on pattern analysis and machine intelligence, 12(10):993-1001, 1990. +Marton Havasi, Rodolphe Jenatton, Stanislav Fort, Jeremiah Zhe Liu, Jasper Snoek, Balaji Lakshminarayanan, Andrew M Dai, and Dustin Tran. Training independent subnetworks for robust prediction. arXiv preprint arXiv:2010.06610, 2020. + +Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9729-9738, 2020. +Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022. +R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670, 2018. +Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pp. 646-661. Springer, 2016. +Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E Hopcroft, and Kilian Q Weinberger. Snapshot ensembles: Train 1, get m for free. arXiv preprint arXiv:1704.00109, 2017. +Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pp. 554-561, 2013. +Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. +Harold W Kuhn. The hungarian method for the assignment problem. *Naval research logistics quarterly*, 2(1-2):83-97, 1955. +Samuli Laine and Timo Aila. Temporal ensembling for semi-supervised learning. arXiv preprint arXiv:1610.02242, 2016. +Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. Advances in neural information processing systems, 30, 2017. +Kuang-Huei Lee, Anurag Arnab, Sergio Guadarrama, John Canny, and Ian Fischer. Compressive visual representations. Advances in Neural Information Processing Systems, 34:19538-19552, 2021. +Stefan Lee, Senthil Purushwalkam, Michael Cogswell, David Crandall, and Dhruv Batra. Why m heads are better than one: Training a diverse ensemble of deep networks. arXiv preprint arXiv:1511.06314, 2015. +Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018. +Warren R Morningstar, Alex Alemi, and Joshua V Dillon. Pacm-bayes: Narrowing the empirical risk gap in the misspecified bayesian regime. In International Conference on Artificial Intelligence and Statistics, pp. 8270-8298. PMLR, 2022. +Wai Ho Mow. A tight upper bound on discrete entropy. IEEE Transactions on Information Theory, 44(2):775-778, 1998. +Yurii Nesterov. A method for solving the convex programming problem with convergence rate $o(1 / k^2)$ . Proceedings of the USSR Academy of Sciences, 269:543-547, 1983. +Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729. IEEE, 2008. +Kento Nozawa, Pascal Germain, and Benjamin Guedj. Pac-bayesian contrastive unsupervised representation learning. In Jonas Peters and David Sontag (eds.), Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI), volume 124 of Proceedings of Machine Learning Research, pp. 21-30. PMLR, 03-06 Aug 2020. URL https://proceedings.mlr.press/v124/nozawa20a.html. + +Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. +Yaniv Ovadia, Emily Fertig, Jie Ren, Zachary Nado, David Sculley, Sebastian Nowozin, Joshua Dillon, Balaji Lakshminarayanan, and Jasper Snoek. Can you trust your model's uncertainty? evaluating predictive uncertainty under dataset shift. Advances in neural information processing systems, 32, 2019. +Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In 2012 IEEE conference on computer vision and pattern recognition, pp. 3498-3505. IEEE, 2012. +F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011. +Michael P Perrone and Leon N Cooper. When networks disagree: Ensemble methods for hybrid neural networks. Technical report, Brown Univ Providence Ri Inst for Brain and Neural Systems, 1992. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021. +Yangjun Ruan, Yann Dubois, and Chris J. Maddison. Optimal representations for covariate shift. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=Rf58LPCwJj0. +Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958, 2014. +Ilya Sutskever, James Martens, George Dahl, and Geoffrey Hinton. On the importance of initialization and momentum in deep learning. In International conference on machine learning, pp. 1139-1147. PMLR, 2013. +Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. Advances in neural information processing systems, 30, 2017. +Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In European conference on computer vision, pp. 776-794. Springer, 2020. +Nenad Tomasev, Ioana Bica, Brian McWilliams, Lars Buesing, Razvan Pascanu, Charles Blundell, and Jovana Mitrovic. Pushing the limits of self-supervised resnets: Can we outperform supervised learning without labels onImagenet? arXiv preprint arXiv:2201.05119, 2022. +Linh Tran, Bastiaan S Veeling, Kevin Roth, Jakub Swiatkowski, Joshua V Dillon, Jasper Snoek, Stephan Mandt, Tim Salimans, Sebastian Nowozin, and Rodolphe Jenatton. Hydra: Preserving ensemble diversity for model distillation. arXiv preprint arXiv:2001.04694, 2020. +Xiao Wang, Haoqi Fan, Yuandong Tian, Daisuke Kihara, and Xinlei Chen. On the importance of asymmetry for siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16570-16579, 2022. +Yeming Wen, Dustin Tran, and Jimmy Ba. Batchsemble: an alternative approach to efficient ensemble and lifelong learning. arXiv preprint arXiv:2002.06715, 2020. +Mitchell Wortsman, Gabriel Ilharco, Samir Ya Gadre, Rebecca Roelofs, Raphael Gontijo-Lopes, Ari S Morcos, Hongseok Namkoong, Ali Farhadi, Yair Carmon, Simon Kornblith, et al. Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In International Conference on Machine Learning, pp. 23965-23998. PMLR, 2022. + +Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3733-3742, 2018. +Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In 2010 IEEE computer society conference on computer vision and pattern recognition, pp. 3485-3492. IEEE, 2010. +Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9653-9663, 2022. +Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. In International Conference on Machine Learning, pp. 12310-12320. PMLR, 2021. +Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. Image BERT pre-training with online tokenizer. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=ydopy-e6Dg. + +# A PSEUDOCODE + +Algorithm 1: Pseudocode for computing ensemble loss +```python +b, n, c:, batch size, number of ensemble heads, codebook size +# log_ps, log_ct: student, teacher log probabilities with n ensembles +# strategy: ensemble loss average strategy +# tau_ent: temperature for entropy weighting +def ensemble_loss(log_ps, log_ct, strategy, tau_ent): + b, n, c = log_ct.shape # axis 1 corresponds to ensemble + log_ct = stop_grad(log_ct) # stop gradient for teacher + if strategy == "Unif": + loss = - (exp(log_ct) * log_ps).sum(axis=-1) + loss = loss.mean(axis=1) # average over ensembles + elif strategy == "Prob": + log_mean_ct = logsumexp(log_ct, axis=1, b=1/n) # mean teacher + log_mean_ps = logsumexp(log_ps, axis=1, b=1/n) # mean student + loss = - (exp(log_mean_ct) * log_mean_ps).sum(axis=-1) + elif strategy == "Ent": + ent = - (exp(log_ct) * log_ct).sum(axis=-1) # teacher entropy + weight = softmax(-ent/tau_ent, axis=1) # entropy weights + loss = - (exp(log_ct) * log_ps).sum(axis=-1) + loss = (loss * weight).sum(axis=1) # entropy weighted average +return loss.mean() # average over samples +``` + +Algorithm 2: Pseudocode for ensemble heads with simplified DINO +```julia +# n, c, eta: number of ensemble heads, codebook size, momentum update rate +# fs, ft: student, teacher encoders +# hs_ens, ht_ens: student, teacher projection heads with n ensembles, list with length n +# mus_ens, mut_ens: student, teacher codebooks with n ensembles, list with length n +# taus, taut: student, teacher temperatures +# strategy: ensemble loss average strategy +# tau_ent: temperature for entropy weighting +for x in dataloder: # load a batch with b samples +xs, xt = augs(x), augt(x) # random augmentations +zs, zt = fs(xs), ft(xt) # representations, (b, l) +# all following computation can be parallelized with batch computation +log_ps, log_ct = [], [] +for j in range(n): +hs_j, ht_j = hs_ens[j], ht_ens[j] # j-th projection head +mus_j, mut_j = mus_ens[j], mut_ens[j] # j-th codebook, (d, c) +es_j, et_j = hs_j(zs), ht_j(zt) # j-th embedding, (b, d) +rs_j = (es_j @ mus_j) / (es_j(norm(axis=1, keepdims=True) * mus_j(norm(axis=0, keepdims=True)) / taus # student logits, (b, c) +rt_j = (et_j @ mut_j) / (et_j(norm(axis=1, keepdims=True) * mut_j(norm(axis=0, keepdims=True)) / taut # teacher logits, (b, c) +log_ps_j = logsoftmax(rs_j, axis=-1) # (b, c) +log_rt_j = logsoftmax(rt_j, axis=-1) # (b, c) +log_rt_j = renorm(log_rt_j) # adjust teacher predictions with centering or sinkhorn, +omitted here for simplicity +log_ps.append(log_ps_j) +log_rt.append(log_rt_j) +log_ps = stack(log_ps_j, axis=1) # stacked student log probabilities, (b, n, c) +log_rt = stack(log_rt_j, axis=1) # stacked teacher log probabilities, (b, n, c) +loss = ensemble_loss(log_ps, log_rt, strategy=strategy) # compute ensemble loss +loss.backup() # back-propagate +sgd_update(fs, hs_ens, mus_ens) # apply gradient decent update for student +ema_update(ft, ht_ens, mut_ens, rate=eta) # apply momentum update for teacher +``` + +# B EXPERIMENTAL DETAILS + +In this section, we provide details for our experiments. In Appx. B.1, we describe how we reproduced and improved the baseline DINO/MSN models. We give the implementation details for SSL training and evaluation in Appx. B.2 and Appx. B.3 respectively. All the hyper-parameters used in our experiments are in Appx. B.2.2. + +# B.1 REPRODUCING & IMPROVING BASELINES + +We carefully reproduced and further improved baseline methods (denoted as DINO* and MSN* respectively) with an extensive study and hyperparameter search (see Appx. B.1). In particular, we systematically study the projection head design (which we found is crucial for few-shot evaluation performance (Appx. C.2)) and different techniques for avoiding collapse used in both methods (Appx. C.1). DINO* performs significantly better than DINO on few-shot evaluation (e.g., $2\sim 6$ percentage point (p.p.) gains for 1 shot) and maintains the full-data evaluation performance. The main adjustments of DINO* are: (i) A 3-layer projection head with a hidden dimension of 1024 (instead of 2048); (ii) Sinkhorn-Knopp (SK) normalization (instead of centering) is applied to teacher predictions, combined with a smaller teacher temperature $\tau = 0.025$ and codebook size $c = 1024$ or 4096. MSN* uses the same projection head as DINO* and applies ME-MAX regularization without SK normalization (which is applied in MSN by default). Further details for DINO and MSN can be found below. + +# B.1.1 DINO + +Table 5: Reproducing & Improving DINO. Our reproduce results match the public numbers. We further improve the DINO baseline (DINO*) by studying projection heads and collapse-avoiding techniques. The evaluation results of DINO/DINO* ViT-S/16 trained with 800 epochs are reported. + +
Few-shotFull-data
125~13 (1%)k-NNLinear
DINO (Caron et al., 2021)38.9 ± 0.448.9 ± 0.358.5 ± 0.164.574.576.1 / 77.0
DINO (Ours reproduced)39.1 ± 0.349.1 ± 0.558.6 ± 0.264.774.375.8 / 76.9
DINO* (Retuned)44.6 ± 0.253.6 ± 0.361.1 ± 0.266.274.175.8 / 76.9
+ +Reproducing DINO We carefully reproduced DINO with JAX following the official DINO implementation1. In Table 5, we report the evaluation results of DINO using ViT-S trained with 800 epochs following the exact training configuration for ViT-S/16 in the official DINO code. The official results of full-data evaluation and $1\%$ data evaluation are from Caron et al. (2021), the other few-shot evaluation results are evaluated by Assran et al. (2022) and also validated by us. Note that for consistency of full-data linear evaluation, we report the results with both the [CLS] token representations of the last layer and the concatenation of the [CLS] token representations from the last 4 layers following Caron et al. (2021). For 1-/2-/5-shots evaluation results, we report the mean accuracy and standard deviation across 3 random splits of the data following Assran et al. (2022). As shown in Table 5, our reproduced results are all comparable with the published numbers which validates the implementation of our training and evaluation pipelines. + +Improving DINO We improved the DINO baseline with a systematic empirical study of some important components. We first empirically compared different techniques for avoiding collapse (see Appx. C.1) and find that Sinkhorn-Knopp (SK) normalization is a more effective and also simpler technique for encouraging codebook usage than the centering operation used in DINO. We thus applied SK normalization, which enabled us to use a smaller teacher temperature $\tau = 0.025$ (instead of $\tau = 0.07$ ) and a much smaller codebook size $c = 1024$ or 4096 (instead of 65536). These modifications lead to similar performance as DINO with a much smaller codebook (up to 1M parameters, compared to 16M parameters for DINO). Next we empirically studied the effect of projection heads for different evaluation metrics (see Appx. C.2), and found that the design of + +projection heads is crucial for few-shot evaluation metrics and an too power powerful projection head (e.g., the 3-layer MLP with a hidden dimension of 2048 used in DINO/MSN/etc.) could significantly hurt the few-shot performance. With an empirically study of projection head architectures, we found that a simply reducing the hidden dimension to 1024 could significantly improves the few-shot evaluation performance while maintaining full-data evaluation performance. The improved results of DINO* are shown in Table 5. + +# B.1.2 MSN + +Table 6: Reproducing & improving MSN. We implement $\mathsf{MSN^{*}}$ by adding ME-MAX regularization and masking to DINO*, which surpasses public MSN results. The evaluation results of MSN/MSN* ViT-S/16 trained with 800 epochs are reported. + +
Few-shotFull-data
125~13 (1%)k-NNLinear
MSN (Assran et al., 2022)47.1 ± 0.155.8 ± 0.662.8 ± 0.367.2-- / 76.9
MSN (Repro)39.1 ± 0.349.2 ± 0.358.4 ± 0.164.372.874.7 / 75.5
MSN* (Retuned)47.4 ± 0.156.3 ± 0.462.8 ± 0.267.173.375.6 / 76.6
+ +We carefully implemented MSN by adding its main components, i.e., ME-MAX regularization and masking, to the DINO implementation (denoted as $\mathrm{MSN}^*$ ), which surpassed public results as shown in Table 6. Note that the implementation of $\mathrm{MSN}^*$ does not exactly match the public implementation in the public MSN code $^2$ , where the main differences are: + +- MSN applies ME-MAX with Sinkhorn-Knopp normalization by default (as in the released training configuration), which we empirically find does not work very well (see Table 9). $\mathrm{MSN}^*$ does not apply SK normalization and tunes the regularization strength for ME-MAX. +- Some differences in implementation details, e.g., schedules for learning rate/weight decay, batch normalization in projection heads, specific data augmentations, etc. $\mathrm{MSN}^*$ uses the exact same setup as DINO\* which follows original DINO implementation. + +We initially tried to exactly reproduce the original MSN following the public MSN code, but the results are much below the public ones, as shown in Table 6. Incorporating the two differences above bridges the gap and makes $\mathrm{MSN}^*$ surpass the public results. + +# B.2 PRETRAINING DETAILS + +In this subsection, we provide the general implementation details in Appx. B.2.1 and specific hyperparameters in Appx. B.2.2 in Appx. B.2.2 for reproducibility. + +# B.2.1 IMPLEMENTATION DETAILS + +**Common setup** We experimented with DINO (Caron et al., 2021) and MSN (Assran et al., 2022) models on ImageNet ILSVRC-2012 dataset (Deng et al., 2009). We mainly followed the training setup in Caron et al. (2021). In particular, all models were trained with AdamW optimizer (Loshchilov & Hutter, 2018) and a batch size of 1024. The learning rate was linearly warmuped to 0.002 $(= 0.001 \times \text{batch size} / 512)$ and followed a cosine decay schedule. The weight decay followed a cosine schedule from 0.04 to 0.4. The momentum rate for the teacher was increased from 0.996 to 1 with a cosine schedule following BYOL (Grill et al., 2020). A stochastic depth (Huang et al., 2016) of 0.1 was applied without dropout (Srivastava et al., 2014). The student temperature $\tau$ is set to 0.1. As with DINO, we used the data augmentations of BYOL and multi-crop augmentation of SWAV (Caron et al., 2020). In particular, 2 global views with a $224 \times 224$ resolution and crop area range [0.25, 1.0] were generated for the teacher and student, and another 10 local views with $96 \times 96$ resolution and crop area range [0.08, 0.25] were used as extra augmented inputs for the student. For MSN, we used the exact same setup and incorporated its major component: 1) mean entropy maximization (ME-MAX) regularization; 2) masking as an extra augmentation applied to the student global view. + +Main modifications We retuned the baselines (DINO* and MSN*) as detailed in Appx. B.1, and the main adjustments are as followed. We used a 3-layer projection head with a hidden dimension of 1024. The output embedding (i.e., $(h_{\psi} \circ r_{\omega})(x)$ ) and the codes (i.e., $\mu$ ) both have a dimension of 256 and are $L_{2}$ normalized. For DINO*, Sinkhorn-Knopp (SK) normalization was applied to teacher predictions. For MSN*, ME-MAX was used without SK normalization and the regularization strength was tuned over $\{3, 4, 5\}$ . For all models, we used teacher temperature $\tau = 0.025$ which was linearly decayed from 0.05 for the first 30 epochs. The codebook size $c$ was selected over $\{1024, 4096\}$ for all models, and typically $c = 4096$ was selected for baseline methods and $c = 1024$ was selected for ours. For our $(h_{\psi}, \mu)$ -ensembles with ENT, entropy weighting temperature $\gamma$ is linearly decayed from 0.5 to the specified value. + +# B.2.2 HYPER-PARAMETERS + +We report the hyperparameters for training our models for reproducibility: + +Table 7: Hyper-parameters for training the DINO* model. + +
Hyper-parameterViT-S/16ViT-B/16ViT-B/8
DINO*DINO*-PROB (16)DINO*-ENT (4/16)DINO*DINO*-ENT (16)DINO*DINO*-ENT (16)
training epoch800400300
batch size102410241024
learning rate2e-32e-32e-3
warmup epoch103010
min lr1e-51e-54e-5
weight decay0.04 → 0.40.04 → 0.40.04 → 0.4
stochastic depth0.10.10.1
gradient clip3.01.03.0
momentum0.996 → 1.00.996 → 1.00.996 → 1.0
# of multi-crops101010
masking ratio---
proj. layer333
proj. hidden dim102410241024
emb. dim d256256256
rep. dim384768768
codebook size c4096102410244096102440961024
student temp.0.10.10.1
teacher temp.0.0250.0250.025
te. temp. decay epoch303030
centerxxx
SK norm
ME-MAX weight---
ent. weight temp. γ--0.05-0.05-0.06
γ init.--0.5-0.5-0.5
γ decay epoch--30-30-30
+ +# B.3 EVALUATION PROTOCALS + +Few-shot linear evaluation We followed the few-shot evaluation protocol in Assran et al. (2022). Specifically, we used the 1-/2-/5-shot ImageNet dataset splits3 in Assran et al. (2022) and $1\%$ ( $\sim 13$ -shot) ImageNet dataset splits4. For given labelled images, we took a single central crop of size $224 \times 224$ without additional data augmentations, and extracted the output [CLS] token representations from the frozen pretrained model. Then we trained a linear classifier with multi-class logistic regression on top of the extracted representations. We used the scikit-learn package (Pedregosa et al., 2011) for the logistic regression classifier. For all few-shot evaluations, we searched the $\mathrm{L}_2$ regularization strength over $\{1\mathrm{e}-4, 3\mathrm{e}-4, 1\mathrm{e}-3, 3\mathrm{e}-3, 1\mathrm{e}-2, 3\mathrm{e}-2, 1\mathrm{e}-1, 3\mathrm{e}-1, 1, 3, 10\}$ . + +Full-data linear evaluation We followed the linear evaluation protocol in (Caron et al., 2021). Specifically, we trained a linear classifier on top of the representations extracted from the frozen pretrained model. The linear classifier is optimized by SGD with Nesterov momentum (Nesterov, 1983; Sutskever et al., 2013) of 0.9 and a batch size of 4096 for 100 epochs on the whole ImageNet dataset, following a cosine learning rate decay schedule. We did not apply any weight decay. + +Table 8: Hyper-parameters for training the ${\mathrm{{MSN}}}^{ * }$ model. + +
Hyper-parameterViT-S/16ViT-B/16ViT-B/8
DINO*MSN*-ENT (2/8)MSN*MSN*-ENT (8)MSN*MSN*-ENT (8)
training epoch800400300
batch size102410241024
learning rate2e-32e-32e-3
warmup epoch203020
min lr1e-54e-54e-5
weight decay0.04 → 0.40.04 → 0.40.04 → 0.4
stochastic depth0.10.10.1
gradient clip1.01.01.0
momentum0.996 → 1.00.996 → 1.00.996 → 1.0
# of multi-crops101010
masking ratio0.20.20.15
proj. layer333
proj. hidden dim102410241024
emb. dim d256256256
rep. dim384768768
codebook size c409610244096102440961024
student temp.0.10.10.1
teacher temp.0.0250.0250.025
te. temp. decay epoch303030
centerXXX
SK normXXX
ME-MAX weight4.04.04.0
ent. weight temp. γ-0.01-0.005-0.01
γ init.-0.5-0.5-0.5
γ decay epoch-30-30-30
+ +During training, we only applied basic data augmentations including random resized crops of size $224 \times 224$ and horizontal flips. During testing, we took a single central crop of the same size. For ViT-S/16, Caron et al. (2021) found that concatenating the [CLS] token representations from the last $l$ (specifically, $l = 4$ ) layers (c.f. Appendix F.2 in Caron et al. (2021)) improved the results by about 1 p.p. We followed the same procedure, but reported linear evaluation results with both $l = 1$ and $l = 4$ in Table 2 for consistency. In our empirical study with ViT-S/16, we used the result with $l = 1$ . For larger models (e.g., ViT-B/16), we followed Caron et al. (2021); Zhou et al. (2022) to use the concatenation of the [CLS] token representation and the average-pooled patch tokens from the last $l = 1$ layer for linear evaluation. For all linear evaluations, we searched the base learning rate over $\{4.8\mathrm{e} - 3, 1.6\mathrm{e} - 2, 4.8\mathrm{e} - 2, 1.6\mathrm{e} - 1, 4.8\mathrm{e} - 1, 1.6\}$ . + +Full-data $k$ -NN evaluation We followed the $k$ -NN evaluation protocol in Caron et al. (2021); Wu et al. (2018). Specifically, for each image in the given dataset, we took a single central crop of size $224 \times 224$ without additional data augmentations, and extracted the output [CLS] token representations from the frozen pretrained model. The extracted representations are used for a weighted $k$ -Nearest-Neighbor classifier. In particular, denote the stored training representations and labels as $\mathcal{D} = \{(z_i, y_i)\}_{i=1}^N$ . For a test image with extracted representation $z$ , denote the set of its top $k$ -NN training samples as $\mathcal{D}_k[z] \subseteq \mathcal{D}$ and $|\mathcal{D}_k[z]| = k$ . The $k$ -NN set $\mathcal{D}_k[z]$ is used to make the prediction for the test image with a weighted vote, i.e., $\hat{y} = \arg \max_y \left( \sum_{(z_j, y_j) \in \mathcal{D}_k[z]} \alpha_j \mathbf{1}_{y=y_j} \right)$ , where $\mathbf{1}_{y=y_j}$ is the one-hot vector corresponding to label $y_j$ and $\alpha_j$ is the weight induced by the cosine similarity between $z$ and $z_j$ , i.e., $\alpha_j = \exp \left( \frac{1}{\tau'} \frac{z^\top z_j}{||z|| \|z_j||} \right)$ . We set $\tau' = 0.07$ without tuning as in Caron et al. (2021); Wu et al. (2018). For all $k$ -NN evaluations, we searched $k$ over $\{5, 10, 20, 50, 100\}$ and found that $k = 10$ or $k = 20$ was consistently the best. + +Transfer evaluation via linear probing We mainly followed the transfer evaluation protocol in (Grill et al., 2020; Chen et al., 2020a). In particular, we used 9 of their 13 datasets that are available in tensorflow-datasets (tfd), namely Food-101 (Bossard et al., 2014), CIFAR10 (Krizhevsky et al., 2009), CIFAR100 (Krizhevsky et al., 2009), SUN397 scene dataset (Xiao et al., 2010), Stanford Cars (Krause et al., 2013), Describable Textures Dataset (Cimpoi et al., 2014, DTD), Oxford-IIIT Pets (Parkhi et al., 2012), Caltech-101 (Fei-Fei et al., 2004), Oxford 102 Flowers (Nilsback & Zisserman, + +Table 9: Empirical study of different techniques for avoiding collapse. Using Sinkhorn-Knopp normalization instead of centering for DINO leads to improved performance, and matches the original DINO even with a much smaller codebook. The ME-MAX regularization of MSN is very effective and leads to significant improvement for few-shot evaluations. + +
TechniqueFew-shotFull-data
CenterSinkhornME-MAX125~13 (1%)k-NNLinear
DINO37.8 ± 0.447.4 ± 0.356.9 ± 0.463.072.474.9
39.1 ± 0.349.4 ± 0.358.7 ± 0.264.874.176.0
MSN36.0 ± 0.446.6 ± 0.656.5 ± 0.263.273.275.2
43.9 ± 0.253.0 ± 0.361.1 ± 0.266.074.075.8
+ +Table 10: ME-MAX regularization is sensitive to hyper-parameters. + +
WeightFew-shotFull-data
125~13 (1%)KNNLinear
1.037.6 ± 0.248.0 ± 0.457.7 ± 0.264.073.575.6
3.043.9 ± 0.253.0 ± 0.361.1 ± 0.266.074.075.8
5.043.6 ± 0.252.6 ± 0.460.4 ± 0.165.573.975.6
+ +2008). Following their evaluation metrics, we reported mean per-class accuracy for Oxford-IIIT Pets, Caltech-101, and Oxford 102 Flowers datasets and reported top-1 accuracy for other datasets. We transferred the models pretrained on ImageNet (Deng et al., 2009) to these datasets by training a linear classifier on top of frozen representations. In particular, we resized given images to $256 \times 256$ and took a single central crop of size $224 \times 224$ without additional data augmentations. We extracted the output [CLS] token representations from the frozen pretrained model. Then we trained a linear classifier with multi-class logistic regression on top of the extracted representations. We used the scikit-learn package (Pedregosa et al., 2011) for the logistic regression classifier. For all transfer evaluations, we searched the $\mathbf{L}_2$ regularization strength over $\{1e - 6, 1e - 5, 1e - 4, 3e - 4, 1e - 3, 3e - 3, 1e - 2, 3e - 2, 1e - 1, 3, 1e, 3e, 1e2, 1e3, 1e4, 1e5\}$ . + +# C ADDITIONAL RESULTS + +# C.1 EMPIRICAL STUDY OF TECHNIQUES FOR AVOIDING COLLAPSE + +Most self-supervised learning methods utilize some techniques to avoid collapse of representations with, e.g., contrastive loss (Chen et al., 2020a; He et al., 2020), batch normalization (Grill et al., 2020), asymmetric architecture design with a predictor (Grill et al., 2020; Chen & He, 2021), etc. In DINO and MSN, a learnable codebook is used for the learning objective and different techniques are applied to encourage the effective codebook usage. There are two potential cases of collapse (as discussed in Caron et al. (2021)): + +- Dominating codes. This is the case of "winner-take-all": only a small portion of codes are being predicted while others are inactive. Typical solutions for avoiding this include applying Sinkhorn-Knopp normalization (Cuturei, 2013) as in SWaV (Caron et al., 2020), centering teacher logits as in DINO (Caron et al., 2021), and applying mean-entropy maximization regularization (ME-MAX) as in MSN (Assran et al., 2022). Note that in MSN, ME-MAX is combined with Sinkhorn-Knopp normalization by default. +- Uniform codes. This is the case where all codes are treated equally and the predictions reduce to be uniform over codes. A simple and effective solution is to applying sharpening, i.e., using a lower temperature for computing the teacher prediction. + +We systematically study different techniques in a unified setup. In particular, we used DINO with the ViT-S backbone, a 3-layer MLP projection head with hidden dimension 2048, and a codebook of size 4096 and dimension 256. We applied different techniques to DINO and searched the teacher temperature in $\{0.0125, 0.025, 0.05\}$ for each. For ME-MAX, we searched regularization weight in + +![](images/7aa3a2618ab4f81946ae6bb6a3ca92ed6ddae7efb732ca1e8841212ceb669cc1.jpg) +(a) Merged + +![](images/eacebcf1752170d31391abb882a7366a3ddf70fc8efbfdda8a126124a1dd4a4e.jpg) +(b) 1-shot + +![](images/17820ffe48f4d9f4a2a9c91f2bb0d87fd05d0efafd855e0c3791dd310c6138e9.jpg) +(c) $1\%$ -data + +![](images/93cf075d1b3664c9ea0e84057da987b3d78503601684857ad97b19dc53f52ece.jpg) +(d) Full-data +Figure 5: Effect of projection heads for different evaluation metrics. We compare non-ensemble projection heads with different depths and widths as well as our $(h_{\psi},\mu)$ -ensembles, and evaluate linear evaluation performance with different amount of labeled data. (a) shows the comparison of normalized metrics for non-ensembles. (b)-(d) compares non-ensemble and $(h_{\psi},\mu)$ -ensembles by unnormalized metrics. 'Default' denotes the default projection heads used in many SSL methods. See analysis in Appx. C.2 for details. + +$\{1.0, 3.0, 5.0\}$ . For ME-MAX combined with Sinkhorn, we followed Assran et al. (2022) and used default regularization weight of 1.0. The results are in Table 10. We observed that: + +- DINO's centering operation is not as strong as other techniques, and it favours a larger teacher temperature (e.g., 0.05). It does not work well when the codebook size (4096) is not as large as the one used in the original DINO model (65536). Switching to use Sinkhorn-Knopp normalization leads to much more improved performance, and matches the performance of original DINO (Table 5) with a much smaller codebook. +- MSN's ME-MAX regularization is very effective, and leads to significant improvements over others. We also found it is sensitive to the regularization weight and teacher temperature (c.f. Table 10). However, we observed that combining ME-MAX with Sinkhorn does not work well without tuning the regularization weight (which is recommended by Assran et al. (2022)). + +# C.2 EMPIRICAL STUDY OF PROJECTION HEADS + +In this subsection, we systematically study the effect of projection heads for different evaluation metrics. In particular, we used DINO* ViT-S/16 as the base model and used different projection heads with (depth, width) searched over $\{2,3,4\} \times \{512,1024,2048,4096\}$ . All models are trained with 300 epochs using exact the same set of hyper-parameters. We measured the linear evaluation performance with different amount of labeled data (i.e., full-data, $1\%$ data, 1-shot). + +In Fig. 5a, we plot different evaluation metrics (normalized respectively by the best of each) versus the number of projection head parameters. In Figs. 5b to 5d, we plot each unnormalized evaluation metric respectively for different heads as well as our $(h_{\psi}, \mu)$ -ensembles. Our key findings are: + +![](images/dbf4514b134bb8ce72a6eac34d5f024be139ceac375ac2bd229901674992e7ca.jpg) +Figure 6: Effect of teacher temperature for non-ensemble DINO*. DINO* with a lower temperature can achieve better few-shot performance, but still under-performs our ensemble method (DINO*-ENT with 16 heads, orange lines). DINO* ViT-S/16 trained for 300 epochs is used and $\tau = 0.025$ is used for DINO*-ENT. + +- The projection head has a relatively larger impact on few-shot evaluation metrics, as reflected by the relative magnitudes of different metrics in Fig. 5a. An too powerful non-ensemble projection head significantly hurts the label efficiency of learned representations, reflected by a much larger drop in few-shot evaluation performance (up to 18 p.p. for 1-shot, 9 p.p. for $1\%$ data). This result is also partially observed in Chen et al. (2020b), where they found that probing from intermediate layers of projection heads (which can be viewed as using a shallower head) could improve the semi-supervised learning ( $1\% - 10\%$ ) results. +- The optimal projection head for different metrics can differ a lot. A weaker head improves label efficiency (few-shot performance), while a stronger (but not too strong) head improves linear decodability. As a result, the default projection head (3/2048) that is widely used in SimCLR v2 (Chen et al., 2020b), DINO (Caron et al., 2021), iBOT (Zhou et al., 2022), MSN (Assran et al., 2022), etc., does not perform well in few-shot evaluations (as shown by the green cross denoted as 'Default'), probably because it is selected by full-data evaluation metrics. +- There exist some projection heads that performs decently well on all evaluation metrics, e.g., the baseline model (3/1024) used in our experiments (pink star denoted as 'Our base'). +- Compared to naively tuning projection head architectures, our $(h_{\psi}, \mu)$ -ensembles (orange curves in Figs. 5b to 5d) consistently improve all metrics with different amount of labeled data, despite it also increases the number of parameters in projection heads. Our $(h_{\psi}, \mu)$ -ensembles outperform all non-ensembles, which also include the counterparts of probing from intermediate layers from the a deeper head (i.e., shallower heads). + +# C.3 EMPIRICAL STUDY OF $(h_{\psi},\mu)$ -ENSEMBLES + +Are the gains of ENT purely from sharper teacher predictions? Our ENT strategy assigns higher weights to the heads that predict with lower entropies, thus effectively uses sharper teacher predictions as the targets. One may be curious about how this effect accounts for the gains of the ENT strategy. We empirically answer this question by studying the non-ensemble baseline that uses a sharper teacher predictions in a data-independent manner (in contrast to ENT, which uses data-dependent entropy weights). Specifically, we compare the non-ensemble DINO* that use different teacher temperature $\tau \in \{0.005, 0.01, 0.025, 0.05\}$ and also our DINO*-ENT (16) with $\tau = 0.025$ , as shown in Fig. 6. We find that the teacher temperature has a big impact on evaluation results especially for few-shot evaluation. Compared to our default baseline that uses $\tau = 0.025$ , a lower temperature (e.g., $\tau = 0.01$ ) can indeed improve the 1-shot performance (at the cost of worse full-data performance). However, an too low temperature ( $\tau = 0.005$ ) will hurt the performance. Our DINO*-ENT (16) consistently outperform all the baselines, which implies the importance of selecting sharper teacher predictions in a data-dependent manner. + +Table 11: Full table of Table 1 including all metrics for comparing different ensemble strategies. ENT and PROB significantly improves over the non-ensemble baseline, while UNIF leads to no gains. Ensembling the whole projection head works the best. All models are DINO* ViT-S/16 trained for 300 epochs. The means and standard deviations over 3 initialization seeds for all evaluation results are reported. + +
HowWhereFew-shotFull-data
Proj. HeadCodebook125~13 (1%)k-NNLinear
Base40.6 ± 0.249.8 ± 0.257.9 ± 0.363.4 ± 0.272.3 ± 0.174.4 ± 0.1
UNIF40.4 ± 0.449.5 ± 0.457.6 ± 0.363.3 ± 0.372.2 ± 0.274.5 ± 0.2
PROB39.7 ± 0.549.0 ± 0.557.4 ± 0.463.0 ± 0.472.8 ± 0.274.8 ± 0.1
PROB41.9 ± 0.351.5 ± 0.559.6 ± 0.465.1 ± 0.373.7 ± 0.375.4 ± 0.1
ENT40.6 ± 0.449.5 ± 0.658.0 ± 0.463.5 ± 0.472.1 ± 0.374.5 ± 0.3
ENT43.0 ± 0.652.2 ± 0.859.7 ± 0.764.8 ± 0.572.9 ± 0.675.1 ± 0.4
ENT44.0 ± 0.253.0 ± 0.560.5 ± 0.365.5 ± 0.173.2 ± 0.175.3 ± 0.1
ENT-ST40.0 ± 0.539.2 ± 0.657.3 ± 0.562.7 ± 0.571.9 ± 0.474.0 ± 0.4
+ +Table 12: Comparison of different varants of PROB. The PROB strategy used in our experiments performs the best. ' -' in the table denotes training divergence for PROB-MAX. The experimental setup is the same as Table 11. + +
HowWhereFew-shotFull-data
Weight byTemp. γ125~13 (1%)k-NNLinear
Base40.6 ± 0.249.8 ± 0.257.9 ± 0.363.4 ± 0.272.3 ± 0.174.4 ± 0.1
PROBstudent141.9 ± 0.351.5 ± 0.559.6 ± 0.465.1 ± 0.373.7 ± 0.375.4 ± 0.1
PROB-TEteacher141.5 ± 0.250.4 ± 0.358.3 ± 0.363.7 ± 0.172.3 ± 0.274.6 ± 0.1
PROB-MAXstudent0------
PROB-MAX-TEteacher041.4 ± 0.250.3 ± 0.358.1 ± 0.363.6 ± 0.272.3 ± 0.274.5 ± 0.2
+ +Comparison of different ensemble strategies and variants We present the full table of Table 1 that includes all the metrics in Table 11. The same observation holds for all metrics. + +For all previous studies, we considered a specific instantiation of PROB strategy, i.e., weight by student predicted probabilities $f_{ijy} = \log s(y|\theta_j,x)$ and $\gamma = 1$ , which has a nice interpretation of model average (see Sec. 3.3). We also studied different variants of the PROB strategy (see Appx. D.1), + +PROB-TE: weight by teacher $f_{ijy} = \log t_i(y|x)$ and $\gamma = 1$ +PROB-MAX: weight by student $f_{ijy} = \log s_j(y|x)$ and $\gamma \rightarrow 0$ +PROB-MAX-TE: weight by teacher $f_{ijy} = \log t_i(y|x)$ and $\gamma \to 0$ + +Table 12 compares the downstream performance for all the variants. We find that the our PROB (used in our empirical studies) performs better than other variants. Interestingly, weighting by the teacher (PROB-TE) performs worse than PROB. We conjecture that this is because the important weights turn out to give a weighted average of teacher predictions as the surrogate target that is shared across all students (like PROB) but does not give effective preferential treatment across students which are directly optimized (unlike PROB-TE). Furthermore, PROB-MAX which sharpens the importance weights leads to training divergence. This is probably because the student predictions have higher variance based on which sharp weights lead to unstable training. In contrast, PROB-MAX-TE which uses the (lower-variance) teacher gives reasonable results and comparable to PROB-TE. + +Number of ensembles for $\mathbf{MSN}^*$ In Fig. 7a, we study the effect of increasing the number of $(h_{\psi},\mu)$ -ensembles for $\mathbf{MSN}^*$ -ENT with ViT-S/16 trained for 800 epochs. The scaling trend is similar to DINO\*-ENT (Fig. 3a) and the gains start to diminish when the number of heads increases above 8. + +Effect of ENT temperature $\gamma$ for $\mathbf{MSN}^*$ Fig. 7b studies the effect of entropy weighting temperature $\gamma$ for $\mathbf{MSN}^*$ -ENT. We observed that $\mathbf{MSN}^*$ is more robust to small temperatures, and the + +![](images/490cb30743786891a399ad47d2e7d5d357f8e031ff6e40f82d53e6a5347c2e08.jpg) +(a) Scaling of ensembles + +![](images/0a9b966552940abc5d3f63d5e0d2d091adf2b746587f3106e8aadebcd57a9fd5.jpg) +(b) Effect of ENT temperature $\gamma$ +Figure 7: Empirical study for $\mathbf{MSN}^*$ -ENT. (a) The gains by increasing the number of $(h_{\psi},\mu)$ ensembles start to diminish when it is over 8 heads. (b) $\mathbf{MSN}^*$ prefers smaller temperature for entropy weighting than DINO*. + +best $\gamma = 0.01$ is smaller than that of DINO\* $(\gamma = 0.05)$ . When the temperature is too high, the performance drops as a result of under-specialization (i.e., less diversity) as with DINO\*. + +# C.4 ANALYZING $(h_{\psi},\mu)$ -ENSEMBLE DIVERSITY + +Visualizing $(h_{\psi},\mu)$ -ensemble similarity We analyze the diversity between different heads by visualizing the similarity matrix between their codes. Directly measuring the similarity between codes in two heads could not work, because 1) they may live in different subspaces because of the ensembled projection heads; 2) they may not align in the natural order but in a permuted order. + +Therefore, we seek to align codes between different heads by how they are effectively used to 'cluster' the data. In particular, we use a set of randomly sampled inputs $\{x^i\}_{i\in [b]}$ of size $b = 51200$ to obtain an empirical code assignment matrix $A^{j}\in \mathbb{R}^{b\times c}$ for each $(h_{\psi},\mu)$ -ensemble $j\in [m]$ , where the $i$ -th row of $A^j$ corresponds to the teacher predictions $t_j(Y|x^i)$ . For the $k$ -th code in the head $j$ , we extract the $k$ -th column from $A^j$ (i.e., its empirical assignment) as its embedding. For two codes, we measure their similarity by the cosine similarity between their embeddings. For a pair of heads $j$ and $j'$ , we align their codes using the Hungarian algorithm (Kuhn, 1955) to maximize the sum of cosine similarity. After that, we plot the similarity matrix which is aligned and reordered by the similarity value on the diagonal (in an descending order). Note that it is not necessary to do the alignment procedure for the PROB strategy since it is naturally aligned because of the direct distribution averaging over $(h_{\psi},\mu)$ -ensembles, but we did for fair comparison with other strategies. + +We applied the same procedure for different ensemble weighting strategies using DINO* with 4 $(h_{\psi},\mu)$ -ensembles. We randomly picked a pair of heads and visualize the similarity matrix before (top row) and after (bottom row) the alignment-reordering setup in Fig. 8. We found that before the alignment procedure, the similarity matrix of the PROB strategy already mostly aligns because it explicitly introduces code correspondence between different heads. Furthermore, by analyzing the similarity decay pattern on the diagonal, it is clear that ENT learns the most diverse $(h_{\psi},\mu)$ -ensembles while UNIF learns the least ones, which may explain the difference of their empirical performance. For completeness, we also include the visualization of aligned similarity matrices for all pairs of heads in Figs. 9 to 11, the observations are the same. + +![](images/93d84602fa00e114dd3c0daeb60a27b653229fa1fbb04e8c93b8bae3163f38e4.jpg) +(a) UNIF + +![](images/69e3ff8a8e40a87c085722110883fb610a78890e858601ad95efdd9cf61d3d2a.jpg) +(b) PROB + +![](images/7634234890279c11db0dfd9b0cb24cd3d4b24177529df99d2e1436bfc66bea1b.jpg) +(c)ENT + +![](images/e05471b9f5e868d1dffbd8e5bcea52e9435ba7d5d8be5b8126734318036c4abf.jpg) +Figure 8: Visualization of $(h_{\psi},\mu)$ -ensemble diversity. ENT learns the most diverse $(h_{\psi},\mu)$ -ensembles while UNIF learns the least ones. We visualize the code similarity matrix between a pair of randomly selected projection heads. Top row shows the original similarity matrix (i.e., in natural order) and the bottom row shows the aligned similarity matrix which aligns codes by empirical assignment probabilities. DINO* ViT-S/16 with 4 heads is used. Best viewed in color. + +![](images/4fcb7930e54c036bf09639468394c07434ebea281fd08eeee5a751eb6cbb0c30.jpg) + +![](images/be1db7c343bf205e685344364e792bd47d8b3209eaab5706fca18ff963715821.jpg) + +![](images/b26d4702e63657ab529e61b968bf21c875cc03721b6843d0481d2a8fd9ae83f9.jpg) +Figure 9: Visualization of $(h_{\psi},\mu)$ -ensemble diversity between all pairs of heads for DINO*UNIF. The UNIF strategy does not learn diverse $(h_{\psi},\mu)$ -ensembles. DINO* with ViT-S/16 and 4 heads is used. Best viewed in color. + +![](images/d0eeef05fd5b7cbc2af0e80f19c6c5bc29c4d802d0008e6b11d345448f6f5d6a.jpg) + +![](images/800c2c5d5ceee656fe26bbec9d5a4eec6a5751cf5cc5f19a9e7a56734071c9b8.jpg) + +![](images/272be9539ff5f046a8c9e23b472a43e11da8447364537fc81791658061a50b93.jpg) +Head 2-Head 3 + +![](images/be6fa8e76ae37017b8a194d4f64f1ae0bc32fecf97ff2739cc8970c7f355fe34.jpg) +Head 2 - Head 4 + +![](images/01b47cb375d674dc6f01d1367efc025e231692d883ac3813544199c4fb5ff68f.jpg) +Head 3 - Head 4 + +![](images/ae5e040b53d11107d21ac0a879235838cf924b9ab2b0f5caa0a1a792481f6568.jpg) +Figure 10: Visualization of $(h_{\psi},\mu)$ -ensemble diversity between all pairs of heads for DINO\*PROB. The PROB strategy learns more diverse $(h_{\psi},\mu)$ -ensembles than UNIF. DINO\* with ViT-S/16 and 4 heads is used. Best viewed in color. + +![](images/1375ae686351af0267608f622f6a018b95da8a0139150e0e7f5197326005e972.jpg) + +![](images/3ebf637ad44c2d4a824c4cab904b9560dcb9dba73522fc73344666c0257ba54f.jpg) + +![](images/151f38dca80bdbc22fd11b7f1783b360b52b7abe6e22abdf1102b673eb0e0f96.jpg) +Head 2 - Head 3 + +![](images/08efe47fa419970fcf1c422d4cc1d375935b1f0185e4068c9e706deec3c3fc1f.jpg) +Head 2 - Head 4 + +![](images/b36c8b5148138c8101d06cc95e49969bf5472dfab11ef4555ab5f67274ad87ce.jpg) +Head 3 - Head 4 + +![](images/99012c1cf59f4ca983add61d184cc261235a1f27e6126857cea6e0510e9fe7e8.jpg) +Figure 11: Visualization of $(h_{\psi},\mu)$ -ensemble diversity between all pairs of heads for DINO*-ENT. The ENT strategy learns the most diverse $(h_{\psi},\mu)$ -ensembles. DINO* with ViT-S/16 and 4 heads is used. Best viewed in color. + +![](images/bec4ce2bf9a81d5b0de88b89f5799727a134240fd9cf66377c3e800caf989a13.jpg) + +![](images/4151f52eebf1277b2204f9bf6d0a923d161832e267af427000acb645d8f04a57.jpg) + +# D ANALYSIS + +# D.1 DERIVATIONS + +In this subsection, we provide derivations for some non-trivial losses that we explore within our framework. + +Recall that our weighted cross-entropy loss is of the form, + +$$ +\begin{array}{l} \mathcal {L} _ {n} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i, j \in [ m ]} H ^ {\times} \left[ w _ {i j Y} \odot t _ {i} (Y | x), s (Y | \theta_ {j}, x) \right] (15) \\ = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i, j \in [ m ]} \sum_ {y \in \mathcal {Y}} w _ {i j y} t _ {i} (y | x) \log s (y | \theta_ {j}, x) (16) \\ \end{array} +$$ + +$$ +w _ {i j y} = \operatorname {s o f t m a x} \left(\left\{\frac {1}{\gamma} f _ {i j y} (\operatorname {s t o p g r a d} (\theta), x): i, j \in [ m ] \right\}\right). \tag {17} +$$ + +Furthermore, observe that, + +$$ +\nabla_ {\theta} \sum_ {i, j \in [ m ]} \mathsf {H} ^ {\times} \left[ w _ {i j Y} \odot t _ {i} (Y | x), s (Y | \theta_ {j}, x) \right] = \sum_ {i, j \in [ m ]} \int_ {\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \nabla_ {\theta} \log s (y | \theta_ {j}, x) \mathrm {d} y. \tag {18} +$$ + +This indicates that the proposed weighted ensemble SSL loss is simply a reweighted log-likelihood loss. We use this fact in our derivation of probability weighting (PROB) loss. + +Uniform weighting (UNIF) Our UNIF strategy in Eq. (6) uses $f_{ijy} = \log \delta (i - j)$ which gives $w_{ijy} = \frac{1}{m}\delta (i - j)$ (for any choice of $\gamma$ ), thus the loss, + +$$ +\begin{array}{l} \mathcal {L} _ {n} ^ {\mathrm {U N I F}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i, j \in [ m ]} \sum_ {y \in \mathcal {Y}} \frac {1}{m} \delta (i - j) t _ {i} (y | x) \log s (y | \theta_ {j}, x) (19) \\ = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \frac {1}{m} \sum_ {i \in [ m ]} \mathrm {H} ^ {\times} \left[ t _ {i} (Y | x), s (Y | \theta_ {i}, x) \right] (20) \\ \end{array} +$$ + +This loss assigns equal weights to $m$ pairs of pairwised student/teacher. + +An straightforward generalization is to assign equal weights to all possible pairs $(m^2)$ of student/teacher with $f_{ijy} = 0$ and $w_{ijy} = \frac{1}{m^2}$ , which gives the UNIF-ALL loss, + +$$ +\mathcal {L} _ {n} ^ {\text {U N I F - A L L}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \frac {1}{m ^ {2}} \sum_ {i, j \in [ m ]} \mathrm {H} ^ {\times} \left[ t _ {i} (Y | x), s (Y | \theta_ {j}, x) \right], \tag {21} +$$ + +Probability weighting (PROB) Recall our PROB loss in Eq. (7) has the form, + +$$ +\mathcal {L} _ {n} ^ {\mathrm {P R O B}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \mathrm {H} ^ {\times} \left[ \frac {1}{m} \sum_ {i \in [ m ]} t _ {i} (Y | x), \frac {1}{m} \sum_ {j \in [ m ]} s (Y | \theta_ {j}, x) \right]. \tag {22} +$$ + +We derive its equivalence with our general loss with $f_{ijy} = \log s(y|\theta_j,x)$ and $\gamma = 1$ in terms of the gradients, + +$$ +\begin{array}{l} \nabla_ {\theta} \mathcal {L} _ {n} ^ {\mathrm {P R O B}} (\theta) = \frac {1}{m} \sum_ {i \in [ m ]} \int_ {\mathcal {Y}} t _ {i} (y | x) \log \frac {1}{m} \sum_ {j \in [ m ]} s (y | \theta_ {j}, x) \mathrm {d} y (23) \\ = \frac {1}{m} \sum_ {i \in [ m ]} \int_ {\mathcal {Y}} t _ {i} (y | x) \nabla_ {\theta} \log \frac {1}{m} \sum_ {j \in [ m ]} s (y | \theta_ {j}, x) d y (24) \\ = \frac {1}{m} \sum_ {i \in [ m ]} \int_ {\mathcal {Y}} t _ {i} (y | x) \frac {\frac {1}{m} \sum_ {j \in [ m ]} \nabla_ {\theta} s (y | \theta_ {j} , x)}{\frac {1}{m} \sum_ {j \in [ m ]} s (y | \theta_ {j} , x)} d y (25) \\ \end{array} +$$ + +$$ +\begin{array}{l} = \frac {1}{m} \sum_ {i \in [ m ]} \int_ {\mathcal {Y}} t _ {i} (y | x) \frac {\frac {1}{m} \sum_ {j \in [ m ]} s (y | \theta_ {j} , x) \nabla_ {\theta} \log s (y | \theta_ {j} , x)}{\frac {1}{m} \sum_ {j ^ {\prime} \in [ m ]} s (y | \theta_ {j ^ {\prime}} , x)} d y (26) \\ = \frac {1}{m} \sum_ {i, j \in [ m ]} \int_ {\mathcal {Y}} t _ {i} (y | x) \frac {s (y | \theta_ {j} , x)}{\sum_ {j ^ {\prime} \in [ m ]} s (y | \theta_ {j ^ {\prime}} , x)} \nabla_ {\theta} \log s (y | \theta_ {j}, x) d y (27) \\ = \nabla_ {\theta} \frac {1}{m} \sum_ {i, j \in [ m ]} \mathsf {H} ^ {\times} \left[ w _ {i j Y} \odot t _ {i} (Y | x), s (Y | \theta_ {j}, x) \right] (28) \\ \end{array} +$$ + +where $w_{ijy} = \frac{s(y|\theta_j,x)}{\sum_{j'\in[m]}s(y|\theta_{j'}x)}$ (or equivalently, $f_{ijy} = \log s(y|\theta_j,x)$ and $\gamma = 1$ ). The last equality is because $w_{ijy}$ is stopped gradient with respect to $\theta$ . This is the same analysis as done in Burda et al. (2016). The above formation establishes the equivalence of gradients between two losses, which implies the same behavior (e.g., optimum) using gradient-based optimization, as the common practice of deep learning. + +We also generalize this loss to some variants which we explore in Table 12. A "dual" variant is to use teacher predictions $f_{ijy} = \log t_i(y|x)$ instead of student ones; this implies $w_{ijy} = \frac{t_i(y|x)}{\sum_{i' \in [m]} t_{i'}(y|x)}$ and the PROB-TE loss, + +$$ +\mathcal {L} _ {n} ^ {\mathrm {P R O B - T E}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i, j \in [ m ]} \sum_ {y \in \mathcal {Y}} \frac {t _ {i} (y | x)}{\sum_ {i ^ {\prime} \in [ m ]} t _ {i ^ {\prime}} (y | x)} t _ {i} (y | x) \log s (y | \theta_ {j}, x). \tag {29} +$$ + +Note that this simply reduces to use a weighted teacher predictions $\frac{t_i(y|x)}{\sum_{i' \in [m]} t_{i'}(y|x)} t_i(y|x)$ as the surrogate target that is shared across all students. + +Another generalization is to use "hard" weighting, i.e., $\gamma \rightarrow 0$ , which gives the PROB-MAX loss that only assigns weight to the most confident student, + +$$ +\mathcal {L} _ {n} ^ {\mathrm {P R O B - M A X}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i, j \in [ m ]} \sum_ {y \in \mathcal {Y}} w _ {i j y} t _ {i} (y | x) \log s (y | \theta_ {j}, x) \tag {30} +$$ + +$$ +w _ {i j y} = \delta \left(i - i ^ {*}\right) \delta \left(j - j ^ {*}\right), \quad \left(i ^ {*}, j ^ {*}\right) = \arg \max _ {i j} f _ {i j y}, \forall y \in \mathcal {Y}. \tag {31} +$$ + +This loss reduces to a generalization of multiple choice learning (Guzman-Rivera et al., 2012) used in multi-headed networks (Lee et al., 2015) in our ensemble SSL setup. Similarly we can also derive the dual variant of it that uses the teacher predictions, which is omitted here for brevity. + +Entropy weighting (ENT) The derivation of ENT loss in Eq. (9) is similar to the UNIF loss but applies an entropy weights. Recall that we use $f_{ijy} = -\mathsf{H}[t_i(Y|x)] + \log \delta (i - j)$ , which gives $w_{ijy} = \mathrm{softmax}_i(\{-\frac{1}{\gamma}\mathsf{H}[t_{i'}(Y|x)]:i'\in [m]\})$ and, + +$$ +\mathcal {L} _ {n} ^ {\mathrm {E N T}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i \in [ m ]} \operatorname {s o f t m a x} _ {i} \left(\left\{- \frac {1}{\gamma} \mathrm {H} \left[ t _ {i ^ {\prime}} (Y | x) : i ^ {\prime} \in [ m ] \right\}\right) \mathrm {H} ^ {\times} \left[ t _ {i} (Y | x), s (Y | \theta_ {i}, x) \right]\right). \tag {32} +$$ + +One can also generalize it to its dual variant which uses the student entopies, i.e., $f_{ijy} = -\mathsf{H}[s(Y|\theta_j,x)] + \log \delta (i - j)$ , which gives the ENT-ST loss, + +$$ +\mathcal {L} _ {n} ^ {\mathrm {E N T - S T}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \sum_ {i \in [ m ]} \operatorname {s o f t m a x} _ {i} \left(\left\{- \frac {1}{\gamma} \mathrm {H} [ s (Y | \theta_ {i ^ {\prime}}, x) ]: i ^ {\prime} \in [ m ] \right\}\right) \mathrm {H} ^ {\times} \left[ t _ {i} (Y | x), s (Y | \theta_ {i}, x) \right]. \tag {33} +$$ + +# D.2 RELATING SOME LOSSES + +Here, we relate some losses derived above. Specifically, we relate the uniform weighting (UNIF, UNIF-ALL) and probability weighting (PROB) in Appx. D.2.1, and relate entropy weighting (ENT) and variance weighting in Appx. D.2.2. + +# D.2.1 UNIFORM & PROBABILITY WEIGHTING + +We first establish the relation between UNIF and PROB using the joint convexity of unnormalized KL divergence and the fact that our weighted cross-entropy loss is a weighted unnormalized KL divergence up to some constant in $\theta$ . In particular, the joint convexity of unnormalized KL divergence can be shown by combining the facts that Csiszár $f$ -divergences are jointly convex (Proposition 1 in Dragomir (2013)) and unnormalized KL divergence corresponds to the convex generator, $f(u) = u\log u - u + 1$ , as required by the proposition. + +First, our weighted cross-entropy loss is unnormalized KL divergence up to some constant in $\theta$ : + +$$ +\mathcal {L} _ {n} ^ {\mathrm {U N I F}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} \frac {1}{m} \sum_ {i \in [ m ]} \mathrm {K} \left[ t _ {i} (Y | x), s \left(Y \mid \theta_ {i}, x\right) \right] + \text {c o n s t a n t} \tag {34} +$$ + +$$ +\mathcal {L} _ {n} ^ {\mathrm {P R O B}} (\theta) = \frac {1}{n} \sum_ {x \in \mathcal {D} _ {n}} K \left[ \frac {1}{m} \sum_ {i \in [ m ]} t _ {i} (Y | x), \frac {1}{m} \sum_ {j \in [ m ]} s (Y | \theta_ {j}, x) \right] + \text {c o n s t a n t} \tag {35} +$$ + +Therefore, the joint convexity of (unnormized) KL divergence directly implies an ordering of the loss up to some constant in $\theta$ , i.e., + +$$ +\mathcal {L} _ {n} ^ {\mathrm {P R O B}} \leq \mathcal {L} _ {n} ^ {\mathrm {U N I F}} \tag {36} +$$ + +Furthermore, we can also relate PROB and UNIF-ALL using the fact that the (unnormized) cross-entropy $\mathsf{H}^{\times}[p(X), q(X)]$ is linear in the first argument $p$ but convex in the second argument $q$ , which implies, + +$$ +\mathcal {L} _ {n} ^ {\text {P R O B}} \leq \mathcal {L} _ {n} ^ {\text {U N I F - A L L}} \tag {37} +$$ + +# D.2.2 ENTROPY & VARIANCE WEIGHTING + +Suppose $p(X)$ is a discrete distribution (normalized) on $\mathcal{X} = [c]$ . It can be shown that, + +$$ +\mathsf {H} [ p (X) ] \leq \frac {1}{2} \log \left(\operatorname {V a r} _ {p} [ X ] + \frac {1}{1 2}\right) + \frac {1}{2} \log (2 \pi e) \tag {38} +$$ + +where $\operatorname{Var}_p[X] = \sum_{x \in [c]} p(x)(x - \mu)^2$ and $\mu = \mathsf{E}_p[X] = \sum_{x \in [c]} p(x)x$ (Theorem 9.7.1, Cover & Thomas (1999)). Note, a tighter bound (Mow, 1998) also exists but it places stronger restrictions on $p$ . This relationship suggests that choosing weights proportional to $\exp(-\mathsf{H}[t_i(Y|x)])$ (as in ENT) is potentially related to choosing weights proportional to weighting by variance $(\operatorname{Var}_{t_i(Y|x)}[Y] + \epsilon)^{-1/2}$ where $(\epsilon = \frac{1}{12})$ . \ No newline at end of file diff --git a/2023/Weighted Ensemble Self-Supervised Learning/images.zip b/2023/Weighted Ensemble Self-Supervised Learning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..c1f0f0cfc428cdf1958733fb4b51f1d4176c7627 --- /dev/null +++ b/2023/Weighted Ensemble Self-Supervised Learning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:435f89ba8b76bfc1760754fb122b03fbda95751a5f8873aae4e7d778f1b46491 +size 1241569 diff --git a/2023/Weighted Ensemble Self-Supervised Learning/layout.json b/2023/Weighted Ensemble Self-Supervised Learning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..509dd79a8de18542e11f695cb15dc525e6b2e2d3 --- /dev/null +++ b/2023/Weighted Ensemble Self-Supervised Learning/layout.json @@ -0,0 +1,21457 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 68, + 501, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 68, + 501, + 86 + ], + "spans": [ + { + "bbox": [ + 105, + 68, + 501, + 86 + ], + "type": "text", + "content": "WEIGHTED ENSEMBLE SELF-SUPERVISED LEARNING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 95, + 489, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 95, + 489, + 146 + ], + "spans": [ + { + "bbox": [ + 121, + 95, + 489, + 146 + ], + "type": "text", + "content": "Yangjun Ruan*† Saurabh Singh Warren Morningstar Alexander A. Alemi \nSergey Ioffe Ian Fischer† Joshua V. Dillon† \nGoogle Research" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 276, + 162, + 335, + 174 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 162, + 335, + 174 + ], + "spans": [ + { + "bbox": [ + 276, + 162, + 335, + 174 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 178, + 471, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 178, + 471, + 357 + ], + "spans": [ + { + "bbox": [ + 140, + 178, + 471, + 357 + ], + "type": "text", + "content": "Ensembling has proven to be a powerful technique for boosting model performance, uncertainty estimation, and robustness in supervised learning. Advances in self-supervised learning (SSL) enable leveraging large unlabeled corpora for state-of-the-art few-shot and supervised learning performance. In this paper, we explore how ensemble methods can improve recent SSL techniques by developing a framework that permits data-dependent weighted cross-entropy losses. We refrain from ensembling the representation backbone; this choice yields an efficient ensemble method that incurs a small training cost and requires no architectural changes or computational overhead to downstream evaluation. The effectiveness of our method is demonstrated with two state-of-the-art SSL methods, DINO (Caron et al., 2021) and MSN (Assran et al., 2022). Our method outperforms both in multiple evaluation metrics on ImageNet-1K, particularly in the few-shot setting. We explore several weighting schemes and find that those which increase the diversity of ensemble heads lead to better downstream evaluation results. Thorough experiments yield improved prior art baselines which our method still surpasses; e.g., our overall improvement with MSN ViT-B/16 is 3.9 p.p. for 1-shot learning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 367, + 206, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 367, + 206, + 379 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 206, + 379 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 385, + 298, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 298, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 298, + 495 + ], + "type": "text", + "content": "The promise of self-supervised learning (SSL) is to extract information from unlabeled data and leverage this information in downstream tasks (He et al., 2020; Caron et al., 2021); e.g., semi-supervised learning (Chen et al., 2020a,b), robust learning (Radford et al., 2021; Ruan et al., 2022; Lee et al., 2021), few-shot learning (Assran et al., 2022), and supervised learning (Tomasev et al., 2022). These successes have encouraged increasingly advanced SSL techniques" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 495, + 504, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 504, + 518 + ], + "type": "text", + "content": "(e.g., Grill et al., 2020; Zbontar et al., 2021; He et al., 2022). Perhaps surprisingly however, a simple and otherwise common idea has received limited consideration: ensembling." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 522, + 504, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 504, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 504, + 579 + ], + "type": "text", + "content": "Ensembling combines predictions from multiple trained models and has proven effective at improving model accuracy (Hansen & Salamon, 1990; Perrone & Cooper, 1992) and capturing predictive uncertainty in supervised learning (Lakshminarayanan et al., 2017; Ovadia et al., 2019). Ensembling in the SSL regime is nuanced, however; since the goal is to learn useful representations from unlabeled data, it is less obvious where and how to ensemble. We explore these questions in this work." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 583, + 506, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 583, + 506, + 705 + ], + "spans": [ + { + "bbox": [ + 104, + 583, + 506, + 705 + ], + "type": "text", + "content": "We develop an efficient ensemble method tailored for SSL that replicates the non-representation parts (e.g., projection heads) of the SSL model. In contrast with traditional \"post-training\" ensembling, our ensembles are only used during training to facilitate the learning of a single representation encoder, which yields no extra cost in downstream evaluation. We further present a family of weighted cross-entropy losses to effectively train the ensembles. The key component of our losses is the introduction of data-dependant importance weights for ensemble members. We empirically compare different choices from our framework and find that the choice of weighting schemes critically impacts ensemble diversity, and that greater ensemble diversity correlates with improved downstream performance. Our method is potentially applicable to many SSL methods; we focus on DINO (Caron et al., 2021) and MSN (Assran et al., 2022) to demonstrate its effectiveness. Fig. 1 shows DINO improvements from using our ensembling and weighted cross-entropy loss." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 710, + 436, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 436, + 721 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 436, + 721 + ], + "type": "text", + "content": "*University of Toronto & Vector Institute. Work done as a student researcher at Google." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 118, + 721, + 459, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 459, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 459, + 731 + ], + "type": "inline_equation", + "content": "^{\\dagger}" + }, + { + "bbox": [ + 118, + 721, + 459, + 731 + ], + "type": "text", + "content": "Correspondence to yjruan@cs.toronto.edu, {iansf, jvdillon} @ google.com." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 276, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 276, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 276, + 94 + ], + "type": "text", + "content": "In summary, our core contributions are to:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 99, + 505, + 150 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 110, + 99, + 503, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 99, + 503, + 111 + ], + "spans": [ + { + "bbox": [ + 110, + 99, + 503, + 111 + ], + "type": "text", + "content": "- Develop a downstream-efficient ensemble method suitable for many SSL techniques (Sec. 3.1)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 112, + 465, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 112, + 465, + 124 + ], + "spans": [ + { + "bbox": [ + 110, + 112, + 465, + 124 + ], + "type": "text", + "content": "- Characterize an ensemble loss family of weighted cross-entropy objectives (Sec. 3.2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 125, + 505, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 125, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 110, + 125, + 505, + 137 + ], + "type": "text", + "content": "- Conduct extensive ablation studies that improve the prior art baselines by up to 6.3 p.p. (Sec. 5.1)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 137, + 501, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 137, + 501, + 150 + ], + "spans": [ + { + "bbox": [ + 110, + 137, + 501, + 150 + ], + "type": "text", + "content": "- Further improve those baselines with ensembling (e.g., up to 5.5 p.p. gain for 1-shot) (Table 2)." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 165, + 201, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 165, + 201, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 201, + 177 + ], + "type": "text", + "content": "2 BACKGROUND" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 189, + 506, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 189, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 506, + 222 + ], + "type": "text", + "content": "In this section, we frame SSL methods from the perspective of maximum likelihood estimation (MLE) and use this as the notational basis to describe the state-of-the-art clustering-based SSL methods as well as derive their ensembled variants in Sec. 3." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "text", + "content": "From Maximum Likelihood to SSL Denote unnormalized KL divergence (Dikmen et al., 2014) between non-negative integrable functions " + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "inline_equation", + "content": "p, q" + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\mathsf{K}[p(X), q(X)] = \\mathsf{H}^{\\times}[p(X), q(X)] - \\mathsf{H}[p(X)]" + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\mathsf{H}^{\\times}[p(X), q(X)] = -\\int_{\\mathcal{X}} p(x) \\log q(x) \\, \\mathrm{d}x + \\int_{\\mathcal{X}} q(x) \\, \\mathrm{d}x - 1" + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "text", + "content": " is the unnormalized cross-entropy (with " + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "inline_equation", + "content": "0 \\log 0 = 0" + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\mathsf{H}[p(X)] = \\mathsf{H}^{\\times}[p(X), p(X)]" + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "text", + "content": ". These quantities simplify to their usual definitions when " + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "inline_equation", + "content": "p, q" + }, + { + "bbox": [ + 104, + 234, + 506, + 304 + ], + "type": "text", + "content": " are normalized, but critically they enable flexible weighting of distributions for the derivation of our weighted ensemble losses in Sec. 3.2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "inline_equation", + "content": "\\nu(X, Y) = \\nu(X)\\nu(Y|X)" + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "text", + "content": " be a natural distribution of input/target pairs over the space " + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "inline_equation", + "content": "\\mathcal{X} \\times \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "inline_equation", + "content": "s(Y|\\theta, X)" + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "text", + "content": " be a predictive model of target given the input parameterized by " + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "inline_equation", + "content": "\\theta \\in \\mathcal{T}" + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "text", + "content": ". Supervised maximum likelihood seeks the minimum expected conditional population risk with respect to " + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 309, + 504, + 344 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 348, + 504, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 348, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 132, + 348, + 504, + 361 + ], + "type": "interline_equation", + "content": "\\mathsf {E} _ {\\nu (X)} \\mathsf {K} [ \\nu (Y | X), s (Y | \\theta , X) ] = \\mathsf {E} _ {\\nu (X)} \\mathsf {H} ^ {\\times} [ \\nu (Y | X), s (Y | \\theta , X) ] - \\mathsf {E} _ {\\nu (X)} \\mathsf {H} [ \\nu (Y | X) ]. \\tag {1}", + "image_path": "6c641804918299210a7a9af991a882bbd584c47a399f10783ce9600f773cb8f2.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": "Henceforth omit " + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\mathsf{E}_{\\nu(X)} \\mathsf{H}[\\nu(Y|X)]" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": " since it is constant in " + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\nu(X, Y)" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": " is unknown, a finite sample approximation is often employed. Denote a size-" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": " i.i.d. training set by " + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_n = \\{x_i\\}_{i \\in [n]} \\sim \\nu^{\\otimes n}" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": " and empirical distribution by " + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\hat{\\nu}(X, Y) = \\frac{1}{n} \\sum_{x \\in \\mathcal{D}_n, y \\sim \\nu(Y|x)} \\delta(X - x) \\delta(Y - y)" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\delta: \\mathbb{R} \\to \\{0, 1\\}" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": " is 1 when " + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "x = 0" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": " and 0 otherwise. The sample risk is thus " + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "inline_equation", + "content": "-\\frac{1}{n} \\sum_{x \\in \\mathcal{D}_n} \\mathsf{H}^\\times[\\hat{\\nu}(Y|x), s(Y|\\theta, x)]" + }, + { + "bbox": [ + 104, + 372, + 504, + 426 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": "In SSL, we interpret " + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\nu(Y|x)" + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": " as being the oracle teacher under a presumption of how the representations will be evaluated on a downstream task. This assumption is similar to that made in Arora et al. (2019); Nozawa et al. (2020). We also assume " + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\hat{\\nu}(Y|X)" + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": " is inaccessible and/or unreliable. Under this view, some SSL techniques substitute " + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\hat{\\nu}(Y|x)" + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": " for a weakly learned target or \"teacher\", " + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "inline_equation", + "content": "t(Y|x)" + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": ". We don't generally expect " + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "inline_equation", + "content": "t(Y|x)" + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": " to recover " + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\nu(Y|x)" + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": "; we only assume that an optimal teacher exists and it is " + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "inline_equation", + "content": "\\nu(Y|x)" + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": ". With the teacher providing the targets, the loss becomes " + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "inline_equation", + "content": "-\\frac{1}{n}\\sum_{x\\in\\mathcal{D}_n}\\mathsf{H}^\\times[t(Y|x), s(Y|\\theta, x)]" + }, + { + "bbox": [ + 104, + 430, + 506, + 513 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 521, + 504, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 504, + 555 + ], + "type": "text", + "content": "Teacher and student in clustering SSL methods Clustering SSL methods such as SWaV (Caron et al., 2020), DINO (Caron et al., 2021), and MSN (Assran et al., 2022) employ a student model characterized by proximity between learned codebook entries and a data-dependent code," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 175, + 559, + 504, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 559, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 175, + 559, + 504, + 586 + ], + "type": "interline_equation", + "content": "s (Y | \\theta , x) = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\tau} \\frac {\\left(h _ {\\psi} \\circ r _ {\\omega}\\right) (x) \\cdot \\mu_ {y}}{\\| \\left(h _ {\\psi} \\circ r _ {\\omega}\\right) (x) \\| _ {2} \\| \\mu_ {y} \\| _ {2}}: y \\in [ c ] \\right\\}\\right) \\tag {2}", + "image_path": "6e35e65700cfc187bb29020d6d1e0f6437ac9d2af0b895ec41c69d0930a17b3b.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 209, + 587, + 504, + 601 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 587, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 209, + 587, + 504, + 601 + ], + "type": "interline_equation", + "content": "\\theta = \\{\\omega , \\psi , \\left\\{\\mu_ {y} \\right\\} _ {y \\in [ c ]} \\} \\in \\mathcal {T}, \\tag {3}", + "image_path": "cdc594e740c54db61e436a3bb6a0f94af1b9c720c392a696e23f84464c51fef6.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "content": "where the encoder " + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "inline_equation", + "content": "r_{\\omega}:\\mathcal{X}\\to \\mathcal{Z}" + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "content": " produces the representations used for downstream tasks, and the projection head " + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "inline_equation", + "content": "h_\\psi :\\mathcal{Z}\\rightarrow \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "content": " and codebook entries " + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "inline_equation", + "content": "\\{\\mu_y\\}_{y\\in \\mathcal{Y}}\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "content": " characterize the SSL loss. Eq. (2) can be viewed as \"soft clustering\", where the input is assigned to those centroids that are closer to the projection head's output. The projection head and codebook are used during training but thrown away for evaluation, which is empirically found vital for downstream tasks (Chen et al., 2020a;b). Hyperparameters " + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "inline_equation", + "content": "\\tau \\in \\mathbb{R}_{>0},c\\in \\mathbb{Z}_{>0}" + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "content": " represent temperature and codebook size. The teacher is defined as " + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "inline_equation", + "content": "t(Y|x) = s(Y|\\mathrm{stopgrad}(g(\\theta)),x)" + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "inline_equation", + "content": "g:\\mathcal{T}\\to \\mathcal{T}" + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "content": ". Commonly " + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "inline_equation", + "content": "g(\\theta)" + }, + { + "bbox": [ + 104, + 605, + 506, + 693 + ], + "type": "text", + "content": " is an exponential moving average of gradient descent iterates and the teacher uses a lower temperature than the student." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "To capture desirable invariances and prevent degeneracy, data augmentation and regularization (e.g., Sinkhorn-Knopp normalization (Caron et al., 2020), mean entropy maximization (Assran et al., 2022)) are essential. As these are not directly relevant to our method, we omit them for brevity." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 174, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 174, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 174, + 94 + ], + "type": "text", + "content": "3 METHOD" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 107, + 506, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 107, + 506, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 107, + 506, + 185 + ], + "type": "text", + "content": "Ensembling is a technique that combines models to boost performance, and has been especially successful in supervised learning. We are interested in ensembling methods that carry over this success to SSL approaches. However, SSL has key differences, such as throw-away \"projection heads\", from supervised learning that result in a multitude of possibilities for how to ensemble. With this in mind, we propose first where to ensemble, and then how to ensemble. Those proposals result in an efficient \"peri-training\" ensembling technique specifically tailored for SSL and a family of weighted ensemble objectives; we subsequently suggest different ways to select the weights." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 201, + 233, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 233, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 233, + 213 + ], + "type": "text", + "content": "3.1 WHERE TO ENSEMBLE?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "text", + "content": "Denote the teacher/student ensembles by " + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "inline_equation", + "content": "\\{t_i(Y|x)\\}_{i\\in [m]}" + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "inline_equation", + "content": "\\{s(Y|\\theta_j,x)\\}_{j\\in [m]}" + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "text", + "content": " and define each as in Sec. 2; parameters " + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "inline_equation", + "content": "\\theta = \\{\\theta_{j}\\}_{j\\in [m]}\\in \\mathcal{T}^{m}" + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "text", + "content": " are independently initialized, all students use one temperature and all teachers another. We asymmetrically denote " + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "inline_equation", + "content": "t_i(Y|x)" + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "inline_equation", + "content": "s(Y|\\theta_j,x)" + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "text", + "content": " to emphasize that teachers' gradients are zero and that the students are distinct solely by way of " + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "inline_equation", + "content": "\\theta_{i}\\neq \\theta_{j}" + }, + { + "bbox": [ + 104, + 223, + 317, + 336 + ], + "type": "text", + "content": ". Studying heterogeneous architectures and/or different teacher parameterizations is left for future work." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": "Recall that " + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "inline_equation", + "content": "\\theta_{j}" + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": " parameterizes the encoder, projection head, and codebook parameters: " + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "inline_equation", + "content": "\\theta_{j} = (\\omega_{j},\\psi_{j},\\{\\mu_{jy}\\}_{y\\in \\mathcal{Y}})" + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": ". We further restrict " + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^m" + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "inline_equation", + "content": "\\omega_{i} = \\omega_{j}" + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": ", i.e., we limit our consideration to ensembles of projection heads " + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "inline_equation", + "content": "h_{\\psi_j}" + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": " and/or codebooks " + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "inline_equation", + "content": "\\mu_{j}" + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": " but not encoders " + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "inline_equation", + "content": "r_{\\omega_j}" + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": ". This choice makes our ensemble method inherently different from traditional supervised ensembling or encoder " + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "inline_equation", + "content": "r_{\\omega}" + }, + { + "bbox": [ + 104, + 342, + 318, + 441 + ], + "type": "text", + "content": " ensembling: the ensembled parts are not used for evaluation but" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 441, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 506, + 464 + ], + "type": "text", + "content": "for improving the learning of non-enssembled representation encoder during training, thus it requires no change of downstream evaluation or computational cost. Ensembling of " + }, + { + "bbox": [ + 104, + 441, + 506, + 464 + ], + "type": "inline_equation", + "content": "r_{\\omega}" + }, + { + "bbox": [ + 104, + 441, + 506, + 464 + ], + "type": "text", + "content": " is left for future work." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 322, + 204, + 508, + 350 + ], + "blocks": [ + { + "bbox": [ + 322, + 204, + 508, + 350 + ], + "lines": [ + { + "bbox": [ + 322, + 204, + 508, + 350 + ], + "spans": [ + { + "bbox": [ + 322, + 204, + 508, + 350 + ], + "type": "image", + "image_path": "1a32ce26502f3e27b447c32b32c0018e24f6fc95bb0f146fdb5c4659fa233a6f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 322, + 357, + 506, + 437 + ], + "lines": [ + { + "bbox": [ + 322, + 357, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 322, + 357, + 506, + 437 + ], + "type": "text", + "content": "Figure 2: Overview of " + }, + { + "bbox": [ + 322, + 357, + 506, + 437 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 322, + 357, + 506, + 437 + ], + "type": "text", + "content": "-ensemble. Two augmented inputs are encoded by the teacher/student into representations, and then processed by an ensemble of heads. The loss for each head is weighted and summed into the final loss. Strike-through edges indicate stop-gradients. See Appx. A for pseudocode." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 479, + 222, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 479, + 222, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 222, + 491 + ], + "type": "text", + "content": "3.2 HOW TO ENSEMBLE?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 501, + 506, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 501, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 506, + 546 + ], + "type": "text", + "content": "We would like to extend the loss to support an ensemble of teacher/student pairs while respecting the MLE intuition of the loss as in Sec. 2. Additionally, we want to facilitate data-dependent importance weights, thus enabling preferential treatment of some teacher/student pairs. We therefore propose a weighted average (unnormized) cross-entropy loss," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 195, + 554, + 504, + 585 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 554, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 195, + 554, + 504, + 585 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] \\tag {4}", + "image_path": "94121557a8a8c811380f941c5abb1779209decf2c28c6b44763bdb4514ff490e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 168, + 586, + 504, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 586, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 168, + 586, + 504, + 609 + ], + "type": "interline_equation", + "content": "\\text {w h e r e} w _ {i j y} = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\gamma} f _ {i j y} (\\operatorname {s t o p g r a d} (\\theta), x): i, j \\in [ m ] \\right\\}\\right). \\tag {5}", + "image_path": "087d7642c18cdf0d271df977caa3d94de8e78d78055c9015010c342242de837e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "text", + "content": "The notation " + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "inline_equation", + "content": "w_{ijY} \\odot t_i(Y|x)" + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "text", + "content": " denotes a Hadamard product; i.e., the product of event-specific weights and probabilities for each " + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "inline_equation", + "content": "y \\in \\mathcal{V}" + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "text", + "content": ". The hyperparameter " + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "text", + "content": " is the temperature. The function " + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "inline_equation", + "content": "f_{ijy}" + }, + { + "bbox": [ + 104, + 616, + 504, + 651 + ], + "type": "text", + "content": " is defined for brevity and discussed in the following section." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "This objective admits generality and flexibility for introducing various weighting schemes, as it supports potential interactions between all teacher/student pairs and allows the weights to be both model- and data-dependent. Up to a constant independent of " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": ", it is an importance weighted average of (unnormized) KL divergences between each teacher and each student; i.e., a mixture of MLE-like objectives. We stop the gradient of " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "w_{ijy}" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " in order to keep the overall gradient a weighted average of students' log-likelihood gradients, similar to Eq. (1). We also normalize the weights such that each data point equally contributes to the loss." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 211, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 211, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 211, + 94 + ], + "type": "text", + "content": "3.3 HOW TO WEIGHT?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "type": "text", + "content": "In this section, we present several instantiations of our losses with different weighting schemes. We empirically show in Sec. 5 that the particular choice of weighting scheme is critical for the representation performance and the induced diversity of " + }, + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "type": "text", + "content": "-ensembles. For simplicity we assume " + }, + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "type": "inline_equation", + "content": "\\gamma = 1" + }, + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "type": "text", + "content": " in this section. We indicate with " + }, + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "type": "inline_equation", + "content": "\\Longleftrightarrow" + }, + { + "bbox": [ + 104, + 102, + 506, + 159 + ], + "type": "text", + "content": " that a loss has the same arg min as Eq. (4). For additional analysis and discussion, see Appx. D." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 169, + 506, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 169, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 169, + 506, + 194 + ], + "type": "text", + "content": "Uniform weighting (UNIF) The simplest strategy is to treat different teacher/student pairs independently and average each with uniform weighting; i.e.," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 198, + 505, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 198, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 143, + 198, + 505, + 228 + ], + "type": "interline_equation", + "content": "f _ {i j y} = \\log \\delta (i - j) \\Longleftrightarrow \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right] \\tag {6}", + "image_path": "2a3f9d649a7e19740b115addff3277875ce27523629f74fe044736fef0834577.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "type": "text", + "content": "This strategy introduces uniform weights " + }, + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "type": "inline_equation", + "content": "w_{i} = \\frac{1}{m}" + }, + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "type": "text", + "content": " over ensemble elements. The role of " + }, + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "type": "inline_equation", + "content": "\\log \\delta (i - j)" + }, + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "type": "text", + "content": " (here and elsewhere) is to sub-select corresponding teacher/student pairs rather than all " + }, + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "type": "inline_equation", + "content": "m^2" + }, + { + "bbox": [ + 104, + 234, + 506, + 260 + ], + "type": "text", + "content": " pairs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "type": "text", + "content": "Probability weighting (PROB) An alternative to using the average cross-entropy loss (UNIF) is to compute the cross-entropy loss of the average predictions whose gradient is weighted by " + }, + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "type": "inline_equation", + "content": "w_{ijy}" + }, + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "type": "text", + "content": " (see Appx. D.1). At " + }, + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "type": "inline_equation", + "content": "\\gamma = 1" + }, + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "type": "text", + "content": ", those gradient weights simplify into an average over the student probabilities:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 309, + 505, + 349 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 309, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 111, + 309, + 505, + 349 + ], + "type": "interline_equation", + "content": "f _ {i j y} = \\log s (y | \\theta_ {j}, x) \\iff \\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\mathsf {H} ^ {\\times} \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right] \\tag {7}", + "image_path": "bec58df67e15c99412f65c1ca799b6e2da8e1c885c3bf737ecc3915e34ec0d24.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 352, + 506, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 506, + 419 + ], + "type": "text", + "content": "Averaging the predictive distributions introduces correspondence between codes from different heads; thus different heads are no longer independent but instead cooperate to match the student to the teachers. The loss favors student heads with more confident predictions (i.e., larger " + }, + { + "bbox": [ + 104, + 352, + 506, + 419 + ], + "type": "inline_equation", + "content": "s(y|\\theta_j, x)" + }, + { + "bbox": [ + 104, + 352, + 506, + 419 + ], + "type": "text", + "content": "). Further motivation for averaging predictions comes from multi-sample losses studied in Morningstar et al. (2022). Note that the joint convexity of (unnormized) KL divergence implies that this loss is upper bounded by the UNIF loss up to some constant in " + }, + { + "bbox": [ + 104, + 352, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 352, + 506, + 419 + ], + "type": "text", + "content": " (see Appx. D)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 423, + 506, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 423, + 506, + 458 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 506, + 458 + ], + "type": "text", + "content": "Although the PROB strategy favors confident student predictions, the weights change as a function of " + }, + { + "bbox": [ + 104, + 423, + 506, + 458 + ], + "type": "inline_equation", + "content": "y \\in \\mathcal{V}" + }, + { + "bbox": [ + 104, + 423, + 506, + 458 + ], + "type": "text", + "content": ". This may be in conflict with our intuition that SSL is like maximum likelihood (Sec. 2), since under that view, the teacher is responsible for weighting outcomes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "content": "Entropy weighting (ENT) Another way to favor heads with more confident predictions is to directly weight by their predictive entropies; i.e.," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 136, + 497, + 504, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 497, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 136, + 497, + 504, + 510 + ], + "type": "interline_equation", + "content": "f _ {i j y} = - \\mathrm {H} [ t _ {i} (Y | x) ] + \\log \\delta (i - j) \\Longleftrightarrow \\tag {8}", + "image_path": "283805d1f3d44cc872a1bf1423900f11dbb3903ef735cd4f648e15e502b87658.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 119, + 512, + 504, + 541 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 512, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 119, + 512, + 504, + 541 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {E N T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} \\left[ t _ {i ^ {\\prime}} (Y | x) : i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]\\right) \\tag {9}", + "image_path": "4d0af2602010ef5a92d1b7a46911659ccd8e661672663af01498e55be239df54.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 548, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 506, + 639 + ], + "type": "text", + "content": "where the weight " + }, + { + "bbox": [ + 104, + 548, + 506, + 639 + ], + "type": "inline_equation", + "content": "w_{i} = \\mathrm{softmax}_{i}\\left(\\{-\\frac{1}{\\gamma}\\mathsf{H}[t_{i'}(Y|x)]:i'\\in [m]\\}\\right)" + }, + { + "bbox": [ + 104, + 548, + 506, + 639 + ], + "type": "text", + "content": " is inversely correlated with the entropy of teacher predictions. In other words, the head whose teacher has a lower entropy (i.e., higher confidence about its prediction) is given a larger importance weight for learning the representation. Like PROB, this strategy encourages \"data specialists\" by emphasizing strongly opinionated teacher heads for different inputs. Like UNIF, different heads are treated more independent (than PROB), since interaction between different heads is introduced only through the weight computation. By preferring low-entropy teachers we also favor low variance teachers; this aligns with the intuition that using a lower-variance teacher benefits representation quality (Wang et al., 2022)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 650, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 650, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 650, + 504, + 673 + ], + "type": "text", + "content": "Countless other weighting schemes It is impossible to fully explore the space of weightings; the following might also be interesting to study in detail but were omitted due to resource constraints." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 127, + 679, + 504, + 691 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 679, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 127, + 679, + 504, + 691 + ], + "type": "interline_equation", + "content": "f _ {i j y} = 0 \\quad \\text {(F a v o r s a l l p a i r s o f t e a c h e r s / s t u d e n t s e q u a l l y)} \\tag {10}", + "image_path": "a5b0dd2ee85a1b798d613b9610b31d298bef0c9c0c956b830a114b950e356b98.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 693, + 504, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 693, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 127, + 693, + 504, + 704 + ], + "type": "interline_equation", + "content": "f _ {i j y} = \\log t _ {i} (y | x) \\quad (\\text {F a v o r s o p i n i o n a t e d t e a c h e r s}) \\tag {11}", + "image_path": "e4d83453d06cd40931f4fda8f6e5b62177e58d6251631a40bae572749f229fd7.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 127, + 706, + 504, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 706, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 127, + 706, + 504, + 719 + ], + "type": "interline_equation", + "content": "f _ {i j y} = - \\mathrm {H} [ s (Y | \\theta_ {j}, x) ] \\quad (\\text {F a v o r s l o w - e n t r o p y s t d u e n t s}) \\tag {12}", + "image_path": "aa7d72f3c35764f959195a2200736edc2eea074bc21667cbf5863faaaff899a5.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 127, + 720, + 504, + 733 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 720, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 127, + 720, + 504, + 733 + ], + "type": "interline_equation", + "content": "f _ {i j y} = \\mathsf {K} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] \\quad (\\text {F a v o r s d i a g e e i n g t e a c h e r / s t u d e n t p a i r s}) \\tag {13}", + "image_path": "9473a82d3d8c75cc19da308beec3849d4fb7de36e420cf9b3ff30853d96098dd.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 125, + 81, + 504, + 95 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 81, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 125, + 81, + 504, + 95 + ], + "type": "interline_equation", + "content": "f _ {i j y} = - \\frac {1}{2} \\log \\left(\\operatorname {V a r} _ {t _ {i} (Y | x)} [ Y ] + \\epsilon\\right) \\quad \\text {(F a v o r s l o w v a r i a n c e t e a c h e r s ; e . g . ,} \\epsilon = \\frac {1}{1 2}) \\tag {14}", + "image_path": "fb2a31360be1bcc070ffcda6e510220476d73c24972894cd0826998ed24f0938.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 98, + 507, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 98, + 507, + 122 + ], + "spans": [ + { + "bbox": [ + 104, + 98, + 507, + 122 + ], + "type": "text", + "content": "Note that \"aligned\" versions of all schemes are possible by using " + }, + { + "bbox": [ + 104, + 98, + 507, + 122 + ], + "type": "inline_equation", + "content": "f_{ijy} + \\log \\delta (i - j)" + }, + { + "bbox": [ + 104, + 98, + 507, + 122 + ], + "type": "text", + "content": ". We did early experiments exploring Eqs. (11) and (12), but the results were inferior and are largely omitted below." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 137, + 212, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 137, + 212, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 212, + 148 + ], + "type": "text", + "content": "4 RELATED WORK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 161, + 506, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 161, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 104, + 161, + 506, + 328 + ], + "type": "text", + "content": "Self-supervised learning Recent work on self-supervised learning (SSL) focuses on discriminative or generative approaches. Most discriminative approaches seek to learn augmentation-invariant representations by enforcing the similarity between augmented pairs of the same image while utilizing different techniques to avoid collapse. Contrastive methods (Chen et al., 2020a; He et al., 2020; Wu et al., 2018; Hjelm et al., 2018; Bachman et al., 2019; Tian et al., 2020) use a large number of negative samples with a noise-contrastive objective (Gutmann & Hyvarinen, 2010; Oord et al., 2018). A large body of followup work eliminates the necessity of explicit negative samples with various techniques, including clustering assignment constraints (Caron et al., 2018; 2020; 2021; Asano et al., 2019), bootstrapping (Grill et al., 2020) or self-distillation (Caron et al., 2021) inspired by mean teacher (Tarvainen & Valpola, 2017), asymmetric architecture design (Grill et al., 2020; Chen & He, 2021), or redundancy reduction (Zbontar et al., 2021; Bardes et al., 2021). Recent generative approaches that use masked image modeling as the pretraining task (Dosovitskiy et al., 2020; Bao et al., 2021; He et al., 2022; Zhou et al., 2022; Xie et al., 2022) have achieved competitive finetuning performance. Our method may be applicable to all of the above methods that have some sort of \"projection head\", such as most of the discriminative approaches." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 338, + 507, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 507, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 507, + 515 + ], + "type": "text", + "content": "Ensemble methods Ensembling has been extensively studied for improving model performance (Hansen & Salamon, 1990; Perrone & Cooper, 1992; Dietterich, 2000) and uncertainty estimation (Lakshminarayanan et al., 2017; Ovadia et al., 2019) in supervised learning and semi-supervised learning (Laine & Aila, 2016). A major research direction is to train efficient ensembles with partial parameter sharing (Lee et al., 2015; Wen et al., 2020; Dusenberry et al., 2020; Havasi et al., 2020) or intermediate checkpointing (Huang et al., 2017; Garipov et al., 2018). Our method also shares the encoder parameters across ensembles, which is closely related to multi-headed networks (Lee et al., 2015; Tran et al., 2020). Ensemble methods for SSL are less explored. Some recent work studies ensembles of supervised models adapted from pretrained SSL models. Gontijo-Lopes et al. (2022) conduct an empirical study of ensembles adapted from different SSL models and find that higher divergence in SSL methods leads to less correlated errors and better performance. Wortsman et al. (2022) ensemble multiple finetuned models adapted from the same SSL model by averaging their weights, which boosts the performance without any inference cost. Our method differs from them in that it (1) applies to the SSL training stage to directly improve representation quality, rather than aggregates multiple models in the post-training/finetuning stage; (2) introduces little training cost and no evaluation cost; and (3) is complementary to these post-training/finetuning ensembling methods." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 529, + 201, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 201, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 201, + 542 + ], + "type": "text", + "content": "5 EXPERIMENTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "content": "We carefully study the impact of " + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 555, + 504, + 624 + ], + "type": "text", + "content": "-ensembles and our selected weighted ensemble losses (UNIF, PROB, and ENT) on smaller DINO models in Sec. 5.1. Using what we learned in those experiments, in Sec. 5.2 we present new state-of-the-art results on ImageNet-1K on various metrics for multiple model sizes by ensembling both DINO- and MSN-based models. Finally, we explore ensemble evaluations in the transfer learning setting in Sec. 5.3. Additional experimental details and results are in Appx. B and Appx. C, respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 632, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 507, + 734 + ], + "type": "text", + "content": "Experimental setup We assessed the effectiveness of our method with two SSL methods: DINO (Caron et al., 2021) and MSN (Assran et al., 2022). In order to ensure that we are comparing against strong baselines, we consider three different classes of baselines: (1) evaluation numbers reported in the original works (Caron et al. (2021), Assran et al. (2022), and Zhou et al. (2022) for an additional baseline iBOT); (2) evaluation of our implementation using the hyperparameters reported in the original works (DINO only, for space reasons) to validate our implementation; and (3) evaluation of our implementation using the best hyperparameters that we found by tuning the baselines (DINO and MSN) for fair comparisons. In almost all models and evaluations, our retuned baselines give nontrivial performance improvements on top of previously reported numbers. These type (3) baselines" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 145, + 504, + 277 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "type": "text", + "content": "Table 1: Comparison of different ensemble strategies. ENT and PROB significantly improve over the non-ensembleed baseline, while UNIF leads to no gains. Ensembling both the projection head and the codebook works the best. All models are DINO* ViT-S/16 trained for 300 epochs. Averages and standard deviations are over 3 initialization seeds. The linear evaluation results on ImageNet-1K with different amounts of labeled data are reported here (see Table 11 in Appx. C.3 for all metrics)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 145, + 504, + 277 + ], + "lines": [ + { + "bbox": [ + 107, + 145, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 107, + 145, + 504, + 277 + ], + "type": "table", + "html": "
HowWhere# of Labels Per Class
Proj. hψCode. μ15~13 (1%)Full
Base40.6 ± 0.257.9 ± 0.363.4 ± 0.274.4 ± 0.1
UNIF40.4 ± 0.457.6 ± 0.363.3 ± 0.374.5 ± 0.2
PROB39.8 ± 0.5 ↓ 0.957.4 ± 0.4 ↓ 0.563.0 ± 0.4 ↓ 0.474.8 ± 0.1 ↑ 0.4
PROB41.9 ± 0.3 ↑ 1.359.6 ± 0.4 ↑ 1.765.1 ± 0.3 ↑ 1.775.4 ± 0.1 ↑ 1.0
ENT-ST40.0 ± 0.5 ↓ 0.657.3 ± 0.5 ↓ 0.662.7 ± 0.5 ↓ 0.774.0 ± 0.4 ↓ 0.4
ENT40.8 ± 0.458.0 ± 0.463.5 ± 0.474.5 ± 0.3
ENT43.0 ± 0.6 ↑ 2.459.7 ± 0.7 ↑ 1.864.8 ± 0.5 ↑ 1.475.1 ± 0.4 ↑ 0.7
ENT44.0 ± 0.2 ↑ 3.460.5 ± 0.3 ↑ 2.665.5 ± 0.1 ↑ 2.275.3 ± 0.1 ↑ 0.9
", + "image_path": "6447524140b7634dc92578536ae658f8f0846bd5737531fcd84c0dea6796f34b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 285, + 506, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 342 + ], + "type": "text", + "content": "we label DINO* and MSN*, and we use them as the base models for our experiments with " + }, + { + "bbox": [ + 104, + 285, + 506, + 342 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 285, + 506, + 342 + ], + "type": "text", + "content": "-ensembles and weighted ensemble losses. Appx. B.2.1 describes the details for getting such strong performance for DINO* and MSN*. In particular, we find that the projection head has a crucial impact on label efficiency of representations and using a smaller head (3-layer MLP with hidden size 1024) significantly improves few-shot evaluation performance (see Appx. C.2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "text", + "content": "Evaluation metrics We compared models trained with and without our " + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "text", + "content": "-ensembles by measuring various evaluation metrics on ImageNet-1K (Deng et al., 2009). The evaluation metrics reflect the decodability and the label efficiency of learned representations. We measured the decodability with respect to both the linear classifier following the common linear evaluation protocol and the " + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "text", + "content": "-NN classifier following Caron et al. (2021). We measured the label efficiency by evaluating the linear evaluation performance in few-shot settings, including " + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "inline_equation", + "content": "\\sim 13" + }, + { + "bbox": [ + 104, + 353, + 506, + 441 + ], + "type": "text", + "content": "-shots) labeled data evaluation (Chen et al., 2020a) and 1-/2-/5-shot evaluations (Assran et al., 2022). All evaluations used frozen representations of the teacher encoder - we did not fine tune the models. See Appx. B.3 for details." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 455, + 313, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 455, + 313, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 313, + 467 + ], + "type": "text", + "content": "5.1 EMPIRICAL STUDY OF " + }, + { + "bbox": [ + 105, + 455, + 313, + 467 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 105, + 455, + 313, + 467 + ], + "type": "text", + "content": "-ENSEMBLES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": "Table 1 compares different strategies for where and how to ensemble. Fig. 4 compares the impact of the weighted ensemble loss on " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": "-ensemble diversity. Fig. 3 shows the effect of increasing the number of ensembles, adjusting the temperature " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": ", and increasing baseline projection head parameters. In these experiments, we used DINO* with ViT-S/16 trained for 300 epochs as the base model. We compared different ensemble methods applied to the base model with " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "m = 16" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": " heads which we found to work the best. For the ENT strategy in Table 1, the entropy weighting temperature " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": " is set to " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "0.05\\times \\log (c)" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": " by default which is selected from " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "\\{0.0125,0.025,0.05,0.1,0.2\\} \\times \\log (c)" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": " where the scale " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "\\log (c)" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": " gives the maximum entropy of the codebook size " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": ". For PROB, we keep " + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "inline_equation", + "content": "\\gamma = 1" + }, + { + "bbox": [ + 104, + 475, + 507, + 565 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "text", + "content": "Where to ensemble We study the where question by ensembling either the projection head " + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "inline_equation", + "content": "h_{\\psi}" + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "text", + "content": ", the codebook " + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "text", + "content": ", or both with the ENT and the PROB ensemble strategies, as shown in Table 1. We find that ensembling both " + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "inline_equation", + "content": "h_{\\psi}" + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "text", + "content": " provides the largest gains for both losses, probably due to the increased flexibility for learning a diverse ensemble. Interestingly, only ensembling " + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "inline_equation", + "content": "h_{\\psi}" + }, + { + "bbox": [ + 104, + 575, + 506, + 633 + ], + "type": "text", + "content": " also works well for the ENT strategy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "content": "How to ensemble We study the how question by considering four different loss variants: UNIF, PROB, ENT, and the variant of ENT with student entropy weighting. We find that when we ensemble both the projection head " + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "inline_equation", + "content": "h_{\\psi}" + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "content": " and the codebook " + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "content": ", the ENT ensemble strategy leads to the most significant gains (e.g., 3.4 p.p. gains for 1-shot and 0.9 p.p. gains for full-data). The PROB strategy also consistently improves the performance with a slightly larger gain (1 p.p.) in full-data evaluation. In contrast, we see no gains for the UNIF strategy over the baseline. We also study a variant of ENT that uses the student entropy (i.e., Eq. (12) with the log " + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\delta(i - j)" + }, + { + "bbox": [ + 104, + 643, + 507, + 733 + ], + "type": "text", + "content": " term) for the importance weights (denoted as ENT-ST). ENT-ST performs much worse than ENT and is even worse than the baseline." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 81, + 236, + 169 + ], + "blocks": [ + { + "bbox": [ + 111, + 81, + 236, + 169 + ], + "lines": [ + { + "bbox": [ + 111, + 81, + 236, + 169 + ], + "spans": [ + { + "bbox": [ + 111, + 81, + 236, + 169 + ], + "type": "image", + "image_path": "7d60d8124efc22ad555d6fbbdbc43aab642f5688816526f2126d36420340702c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 175, + 235, + 186 + ], + "lines": [ + { + "bbox": [ + 111, + 175, + 235, + 186 + ], + "spans": [ + { + "bbox": [ + 111, + 175, + 235, + 186 + ], + "type": "text", + "content": "(a) Scaling of " + }, + { + "bbox": [ + 111, + 175, + 235, + 186 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 111, + 175, + 235, + 186 + ], + "type": "text", + "content": "-ensembles." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 81, + 366, + 169 + ], + "blocks": [ + { + "bbox": [ + 242, + 81, + 366, + 169 + ], + "lines": [ + { + "bbox": [ + 242, + 81, + 366, + 169 + ], + "spans": [ + { + "bbox": [ + 242, + 81, + 366, + 169 + ], + "type": "image", + "image_path": "79a22c445327a2374c4567b01548ac9e1cde74ecbd0a82714617a48008948f91.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "text", + "content": "Figure 3: Empirical study of " + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "text", + "content": "-ensembles. (a) The gains of " + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "text", + "content": "-ensembles start to diminish above 16 heads. (b) The temperature for entropy weighting has a larger impact on few-shot performance. 16 heads are used and " + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "text", + "content": " is scaled by " + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "inline_equation", + "content": "\\log(c)" + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "text", + "content": ". (c) Our " + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "text", + "content": "-ensembles outperform all non-ensembleed baselines when controlling for number of parameters. A too powerful non-ensemble projection head significantly harms accuracy. " + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 195, + 504, + 251 + ], + "type": "text", + "content": " data evaluation is shown. Also see Fig. 5." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 373, + 81, + 500, + 169 + ], + "blocks": [ + { + "bbox": [ + 246, + 175, + 364, + 187 + ], + "lines": [ + { + "bbox": [ + 246, + 175, + 364, + 187 + ], + "spans": [ + { + "bbox": [ + 246, + 175, + 364, + 187 + ], + "type": "text", + "content": "(b) Effect of ENT temperature " + }, + { + "bbox": [ + 246, + 175, + 364, + 187 + ], + "type": "inline_equation", + "content": "\\gamma" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 373, + 81, + 500, + 169 + ], + "lines": [ + { + "bbox": [ + 373, + 81, + 500, + 169 + ], + "spans": [ + { + "bbox": [ + 373, + 81, + 500, + 169 + ], + "type": "image", + "image_path": "f0d9635be7f5c83827576c16d79bb2fa15568867fec3b8742609cc81341a2443.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 175, + 492, + 186 + ], + "lines": [ + { + "bbox": [ + 380, + 175, + 492, + 186 + ], + "spans": [ + { + "bbox": [ + 380, + 175, + 492, + 186 + ], + "type": "text", + "content": "(c) Comparing different heads." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "type": "text", + "content": "We conjecture that this is because the student predictions typically have a larger variance than teacher predictions (Wang et al., 2022) especially when multi-crop augmentation (Caron et al., 2020; 2021) is applied to the student. Similar experiments on Eq. (11) and/or " + }, + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\gamma = 0" + }, + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "type": "text", + "content": " variants of PROB also resulted in inferior performance (see Table 12)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 319, + 357, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 357, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 357, + 484 + ], + "type": "text", + "content": "Analysis of " + }, + { + "bbox": [ + 104, + 319, + 357, + 484 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 319, + 357, + 484 + ], + "type": "text", + "content": "-ensemble diversity The previous experiments showed that the choice of ensemble weighting strategy has a large impact on performance. We hypothesize that this choice substantially impacts the diversity of the codebook ensembles. Since the codes in different heads may not be aligned, we align them by the similarity of their code assignment probabilities across different input images, which measures how the codes are effectively used to 'cluster' the data. See Appx. C.4 for detailed explanations and results. In Fig. 4, we visualize the decay patterns of the similarity score between aligned codes (1.0 means the most similar) in a random pair of heads for each weighting strategy. ENT decays the fastest and UNIF decays the slowest, indicating that ENT learns the most diverse codebooks while UNIF is least diverse. This shows a positive correlation between the diversity of " + }, + { + "bbox": [ + 104, + 319, + 357, + 484 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 319, + 357, + 484 + ], + "type": "text", + "content": "-ensembles and the empirical" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 484, + 504, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 504, + 518 + ], + "type": "text", + "content": "performance of the ensemble strategies from Table 1. Finally, for UNIF, we find that different heads tend to learn the same semantic mappings even when randomly initialized; i.e., the code assignments in different heads become homogeneous up to permutation. See Fig. 8 for a visualization." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 363, + 321, + 499, + 393 + ], + "blocks": [ + { + "bbox": [ + 363, + 321, + 499, + 393 + ], + "lines": [ + { + "bbox": [ + 363, + 321, + 499, + 393 + ], + "spans": [ + { + "bbox": [ + 363, + 321, + 499, + 393 + ], + "type": "image", + "image_path": "7a994fe5319f23357708dff945fb9964c4b86408e92bf1413733afc789d9d957.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 401, + 506, + 479 + ], + "lines": [ + { + "bbox": [ + 362, + 401, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 362, + 401, + 506, + 479 + ], + "type": "text", + "content": "Figure 4: Visualization of code similarity. ENT learns the most diverse " + }, + { + "bbox": [ + 362, + 401, + 506, + 479 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 362, + 401, + 506, + 479 + ], + "type": "text", + "content": "-ensembles reflected by the fastest decay of similarity scores between aligned codes in different heads. UNIF has low diversity between heads." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "text", + "content": "Number of " + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "text", + "content": "-ensembles We study the effect of increasing the number of " + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "text", + "content": "-ensembles " + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "text", + "content": " for ENT in Fig. 3a. Having more " + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "text", + "content": "-ensembles boosts the performance until " + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "inline_equation", + "content": "m = 16" + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "text", + "content": ". Interestingly, using as few as " + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "inline_equation", + "content": "m = 2" + }, + { + "bbox": [ + 104, + 529, + 506, + 564 + ], + "type": "text", + "content": " heads already significantly improves over the baseline." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": "Effect of ENT temperature " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " Fig. 3b studies the effect of entropy weighting temperature " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " for different evaluation metrics. We observe that " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " has a relatively larger impact on few-shot evaluation performance. " + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": " should be neither too high nor too low: a high temperature leads to under-specialization (i.e. less diversity) of heads similar to UNIF (" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "inline_equation", + "content": "\\gamma \\rightarrow \\infty" + }, + { + "bbox": [ + 104, + 575, + 504, + 632 + ], + "type": "text", + "content": ") and a low temperature may otherwise lead to over-specialization (i.e., only a single head is used for each input)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "Comparison of different projection heads Our method linearly increases projection head parameters, thus a natural question is: Is the gain of " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "-ensembles due to the increased power (or number of parameters) in projection heads? We answer this question with an empirical study of non-ensembled projection heads. Specifically, we studied non-ensembled " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "h_{\\psi}" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " with (depth, width) searched over " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\{2,3,4\\} \\times \\{512,1024,2048,4096\\}" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " and measured the linear evaluation performance with different amounts of labeled data. In Fig. 3c, we plot the " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " data evaluation result with respect to the number of parameters of the projection head both for ensembled and non-ensembled baselines. See Appx. C.2 for detailed analysis and extra results for other metrics. Our key findings are:" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 63, + 506, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 63, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 104, + 63, + 506, + 168 + ], + "type": "text", + "content": "Table 2: Effectiveness of ensemble heads for DINO*/MSN* with different ViT models. Our ensemble heads consistently improve all downstream evaluation metrics on ImageNet-1K and achieve a new state-of-the-art for few-shot evaluations. For ViT-S/16, we report linear evaluation results probed from the last layer (left) and from the last 4 layers (right, following DINO). †We evaluated the few-shot settings using DINO's publicly-available pretrained weights in the cases those results were not reported in Caron et al. (2021). ‡MSN ViT-B/16 and ViT-B/8 are both trained for 600 epochs in Assran et al. (2022), whereas our models are trained for only 400, 300 epochs, respectively. For each architecture, we highlight the best DINO baseline and weighted ensemble in blue. For MSN, the corresponding highlights are yellow. The best results for each architecture and metric are bolded." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 173, + 504, + 504 + ], + "blocks": [ + { + "bbox": [ + 106, + 173, + 504, + 504 + ], + "lines": [ + { + "bbox": [ + 106, + 173, + 504, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 173, + 504, + 504 + ], + "type": "table", + "html": "
MethodFew-shotFull-data
125~13 (1%)k-NNLinear
ViT-S/16, 800 epochs
iBOT40.4 ± 0.550.8 ± 0.859.9 ± 0.265.975.2- / 77.9
DINO38.9 ± 0.448.9 ± 0.358.5 ± 0.164.574.576.1 / 77.0
DINO (Repro)39.1 ± 0.349.1 ± 0.558.6 ± 0.264.774.375.8 / 76.9
DINO* (Retuned)44.6 ± 0.253.6 ± 0.361.1 ± 0.266.274.175.8 / 76.9
MSN47.1 ± 0.155.8 ± 0.662.8 ± 0.367.2-- / 76.9
MSN* (Retuned)47.4 ± 0.156.3 ± 0.462.8 ± 0.267.173.375.6 / 76.6
DINO*-PROB (16)45.2 ± 0.454.9 ± 0.462.5 ± 0.267.375.176.5 / 77.6
DINO*-ENT (4)46.3 ± 0.155.5 ± 0.663.0 ± 0.367.574.876.2 / 77.2
DINO*-ENT (16)47.6 ± 0.1 ↑ 3.056.8 ± 0.564.0 ± 0.268.3 ↑ 2.175.376.8 / 77.7 ↑ 0.8
MSN*-ENT (2)48.8 ± 0.257.5 ± 0.564.0 ± 0.267.974.676.0 / 76.9
MSN*-ENT (8)50.1 ± 0.1 ↑ 2.758.9 ± 0.665.1 ± 0.368.7 ↑ 1.675.276.4 / 77.4 ↑ 0.8
ViT-B/16, 400 epochs
iBOT46.1 ± 0.356.2 ± 0.764.7 ± 0.369.777.179.5
DINO†43.0 ± 0.252.7 ± 0.561.8 ± 0.267.476.178.2
DINO* (Retuned)49.3 ± 0.158.1 ± 0.565.0 ± 0.369.176.078.5
MSN‡49.8 ± 0.258.9 ± 0.465.5 ± 0.3---
MSN* (Retuned)50.7 ± 0.159.2 ± 0.465.9 ± 0.269.774.778.1
DINO*-ENT (16)52.8 ± 0.1 ↑ 3.561.5 ± 0.467.6 ± 0.371.1 ↑ 2.077.179.1 ↑ 0.6
MSN*-ENT (8)53.7 ± 0.2 ↑ 3.062.4 ± 0.668.3 ± 0.271.5 ↑ 1.877.278.9 ↑ 0.8
ViT-B/8, 300 epochs
DINO†47.5 ± 0.257.3 ± 0.565.4 ± 0.370.377.480.1
DINO* (Retuned)49.5 ± 0.558.6 ± 0.665.9 ± 0.370.777.180.2
MSN‡55.1 ± 0.164.9 ± 0.771.6 ± 0.3---
MSN* (Retuned)51.9 ± 0.361.1 ± 0.467.7 ± 0.371.775.780.3
DINO*-ENT (16)55.0 ± 0.4 ↑ 5.563.4 ± 0.669.5 ± 0.373.4 ↑ 2.778.681.0 ↑ 0.8
MSN*-ENT (8)55.6 ± 0.2 ↑ 3.764.5 ± 0.570.3 ± 0.273.4 ↑ 1.778.980.8 ↑ 0.5
", + "image_path": "826bd0d1c3be03d85518b0f12bc7dced251218aae7e8ad7d1b39ff9fc91ddb2b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 517, + 506, + 632 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 110, + 517, + 506, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 517, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 110, + 517, + 506, + 561 + ], + "type": "text", + "content": "- A too powerful non-enssembled " + }, + { + "bbox": [ + 110, + 517, + 506, + 561 + ], + "type": "inline_equation", + "content": "h_{\\psi}" + }, + { + "bbox": [ + 110, + 517, + 506, + 561 + ], + "type": "text", + "content": " significantly hurts the label efficiency of learned representations. This result is similar to Chen et al. (2020b), which found that probing from intermediate layers of projection heads (which can be viewed as using a shallower head) could improve semi-supervised learning (" + }, + { + "bbox": [ + 110, + 517, + 506, + 561 + ], + "type": "inline_equation", + "content": "1\\% - 10\\%" + }, + { + "bbox": [ + 110, + 517, + 506, + 561 + ], + "type": "text", + "content": " labeled data) results." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 563, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 563, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 110, + 563, + 506, + 608 + ], + "type": "text", + "content": "- The default head (3/2048, denoted as 'Default') used in recent SSL methods (SimCLRv2, DINO, MSN, etc.) does not perform as well in few-shot evaluations, probably because it is selected by looking at full-data evaluation metrics. In contrast, our baseline (3/1024, denoted as 'Our baseline') significantly improves few-shot evaluation performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 609, + 506, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 609, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 110, + 609, + 506, + 632 + ], + "type": "text", + "content": "- Our " + }, + { + "bbox": [ + 110, + 609, + 506, + 632 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 110, + 609, + 506, + 632 + ], + "type": "text", + "content": "-ensembles outperform all non-enssembled baselines and lead to consistent improvements in all evaluation metrics, despite the increase of parameters." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 645, + 340, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 645, + 340, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 340, + 656 + ], + "type": "text", + "content": "5.2 IMPROVING SOTA RESULTS WITH ENSEMBLEING" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "Next we apply " + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "-ensembles to DINO* and MSN* and compare with the state-of-the-art results. We experimented with model architectures ViT-S/16, ViT-B/16, ViT-B/8 trained for 800, 400, 300 epochs respectively following Caron et al. (2021). We include both the published results and our returned versions to ensure strong baselines. For clarity, we denote our method as “{baseline}-{ensemble strategy} (# of heads)”, e.g., DINO*-ENT (4). We tuned both baselines and our methods for all architectures. We report the best hyperparameters for all models in Appx. B.2.2." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "Table 2 compares the results of " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "-ensembles and baselines. We find that " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "-ensembles with ENT consistently improve all evaluation metrics (full-data, few-shot) across both SSL methods (DINO*, MSN*) and all architectures (ViT-S/16, ViT-B/16, ViT-B/8) over their non-ensembld counterparts. The gains in few-shot evaluation are particularly substantial, providing a new state-of-the-art for ImageNet-1K evaluation from ImageNet pretraining." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 150, + 328, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 150, + 328, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 328, + 163 + ], + "type": "text", + "content": "5.3 MORE EVALUATIONS FOR " + }, + { + "bbox": [ + 105, + 150, + 328, + 163 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 105, + 150, + 328, + 163 + ], + "type": "text", + "content": " -ENSEMBLES" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 196, + 504, + 255 + ], + "blocks": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "lines": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "text", + "content": "Table 3: Comparison of transfer performance. ViT-S/16 is used. Our ensemble heads lead to consistent improvements for " + }, + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "inline_equation", + "content": "\\mathrm{MSN^{*}}" + }, + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "text", + "content": " and comparable results for DINO*." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 196, + 504, + 255 + ], + "lines": [ + { + "bbox": [ + 107, + 196, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 107, + 196, + 504, + 255 + ], + "type": "table", + "html": "
Food101CIFAR10CIFAR100SUN397CarsDTDPetsCaltech-101FlowersAvg.
DINO*78.493.881.066.166.774.692.094.994.482.43
DINO*-ENT (16)79.193.881.466.566.874.992.894.693.982.64
MSN*77.793.179.864.663.372.292.494.792.781.17
MSN*-ENT (8)78.493.981.165.268.073.293.195.492.882.34
", + "image_path": "cdbaf3dca865c69b60f19eff8a4d8982561ce030cfab7252d09568ff127d58c0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "type": "text", + "content": "Transfer learning In Table 3, we compare the transfer learning performance of " + }, + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "type": "text", + "content": "-ensembles and non-ensembed baselines. We used ViT-S-16 models trained on ImageNet-1K for 800 epochs and evaluated on 9 natural downstream datasets from Chen et al. (2020a) with linear evaluation (details in Appx. B.3). " + }, + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "type": "text", + "content": "-ensembles lead to consistent improvements in transfer performance for " + }, + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\mathrm{MSN}^*" + }, + { + "bbox": [ + 104, + 256, + 504, + 312 + ], + "type": "text", + "content": " and comparable results for DINO*." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 317, + 359, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 317, + 359, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 317, + 359, + 417 + ], + "type": "text", + "content": "Training overhead In Table 4, we benchmark the computational overhead of " + }, + { + "bbox": [ + 104, + 317, + 359, + 417 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 317, + 359, + 417 + ], + "type": "text", + "content": "-ensembles at training time. We used a medium sized model, DINO* with ViT-B/16, trained with the same setting used in all of our experiments. We benchmarked the wall-clock time and peak memory on 128 TPUv3 cores. " + }, + { + "bbox": [ + 104, + 317, + 359, + 417 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 317, + 359, + 417 + ], + "type": "text", + "content": "-ensembling is relatively cheap in terms of training cost because the ensembled parts typically account for a small portion of total computation, especially when the backbone encoder is more computationally expensive (e.g., ViT-B/8)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 417, + 492, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 417, + 492, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 492, + 429 + ], + "type": "text", + "content": "Again, we emphasize that there is no evaluation overhead when " + }, + { + "bbox": [ + 105, + 417, + 492, + 429 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 105, + 417, + 492, + 429 + ], + "type": "text", + "content": "-ensembles are removed." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 365, + 362, + 503, + 416 + ], + "blocks": [ + { + "bbox": [ + 362, + 315, + 506, + 359 + ], + "lines": [ + { + "bbox": [ + 362, + 315, + 506, + 359 + ], + "spans": [ + { + "bbox": [ + 362, + 315, + 506, + 359 + ], + "type": "text", + "content": "Table 4: Training overhead. Wall-clock time and peak memory per core for training with different numbers of ensembles." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 365, + 362, + 503, + 416 + ], + "lines": [ + { + "bbox": [ + 365, + 362, + 503, + 416 + ], + "spans": [ + { + "bbox": [ + 365, + 362, + 503, + 416 + ], + "type": "table", + "html": "
mWall TimePeak Memory
15.81h5.25G
45.91h5.40G
166.34h5.89G
", + "image_path": "04e9dba9ecc945cde833cf63b0ef7a8ad64ecb50213bf25b413a96769eacacab.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 443, + 276, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 276, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 276, + 455 + ], + "type": "text", + "content": "6 CONCLUSION & DISCUSSION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 467, + 506, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 467, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 506, + 556 + ], + "type": "text", + "content": "We introduced an efficient ensemble method for SSL where multiple projection heads are ensembled to effectively improve representation learning. We showed that with carefully designed ensemble losses that induce diversity over ensemble heads, our method significantly improves recent state-of-the-art SSL methods in various evaluation metrics, particularly for few-shot evaluation. Although ensembling is a well-known technique for improving evaluation performance of a single model, we demonstrated that, for models with throw-away parts such as the projection heads in SSL, ensembling these parts can improve the learning of the non-ensemble representation encoder and also achieve significant gains in downstream evaluation without introducing extra evaluation cost." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "type": "text", + "content": "Our ensemble method is applicable to many SSL methods beyond the two we explored. For example, one may consider generalization to BYOL (Grill et al., 2020) or SimSiam (Chen & He, 2021) that ensembles projection and/or prediction heads, or MAE (He et al., 2022) that ensembles the decoders (which introduces more training cost though). Our weighted ensemble losses can also be applied as long as the original loss can be reformulated as MLE for some " + }, + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 104, + 561, + 506, + 639 + ], + "type": "text", + "content": ", e.g., the MSE loss in these methods is MLE under multivariate normal distributions. We hope our results and insights will motivate more future work for extending our method or exploring more ensemble techniques for SSL." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "In future work, we also hope to remove three limitations of our setting. First, considering ensembling strategies that include the representation encoder, " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "r_{\\omega}" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": ", may lead to further improvements in the performance of weighted ensemble SSL, at the cost of increased computation requirements during both training and evaluation. Second, considering heterogenous architectures in the ensemble may further improve the learned representations (e.g., mixing Transformers with ResNets), whether the heterogeneity is in " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "r_{\\omega}, h_{\\psi}" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": ", or both. Third, considering other possibilities for " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "f_{ijy}" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " may also reveal performance gains and improve our understanding of the critical aspects that lead to good SSL representations, similar to what we learned about the importance of ensemble diversity." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 101, + 506, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 101, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 506, + 136 + ], + "type": "text", + "content": "We would like to thank Mathilde Caron and Mahmoud Assran for their extensive help in reproducing DINO and MSN baselines. We would also like to thank Ting Chen and Yann Dubois for their helpful discussions and encouragements." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 146, + 248, + 158 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 146, + 248, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 146, + 248, + 158 + ], + "type": "text", + "content": "REPRODUCIBITLITY STATEMENT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 165, + 506, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 165, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 506, + 211 + ], + "type": "text", + "content": "We include detailed derivations for all our proposed losses in Appx. D. We report experimental details in Appx. B, including the implementation details for reproducing the baselines (Appx. B.1), training and evaluating our methods (Appx. B.2.1), and all hyper-parameters (Appx. B.2.2) used in our experiments for reproducing our results in Table 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 221, + 165, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 221, + 165, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 221, + 165, + 232 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 240, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 240, + 505, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 240, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 505, + 262 + ], + "type": "text", + "content": "TensorFlow Datasets, a collection of ready-to-use datasets. https://www.tensorflow.org/datasets." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 269, + 506, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 269, + 506, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 506, + 303 + ], + "type": "text", + "content": "Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. arXiv preprint arXiv:1902.09229, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 309, + 504, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 309, + 504, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 504, + 333 + ], + "type": "text", + "content": "Yuki Markus Asano, Christian Rupprecht, and Andrea Vedaldi. Self-labelling via simultaneous clustering and representation learning. arXiv preprint arXiv:1911.05371, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 338, + 506, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 338, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 506, + 373 + ], + "type": "text", + "content": "Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, and Nicolas Ballas. Masked siamese networks for label-efficient learning. arXiv preprint arXiv:2204.07141, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 379, + 504, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 379, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 504, + 403 + ], + "type": "text", + "content": "Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 408, + 504, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 408, + 504, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 504, + 432 + ], + "type": "text", + "content": "Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 437, + 504, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 504, + 462 + ], + "type": "text", + "content": "Adrien Bardes, Jean Ponce, and Yann LeCun. Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 468, + 506, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 506, + 501 + ], + "type": "text", + "content": "Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, pp. 446-461. Springer, 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 507, + 504, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 507, + 504, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 507, + 504, + 531 + ], + "type": "text", + "content": "Yuri Burda, Roger B Grosse, and Ruslan Salakhutdinov. Importance weighted autoencoders. In ICLR, 2016." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 536, + 506, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 536, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 506, + 571 + ], + "type": "text", + "content": "Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In Proceedings of the European conference on computer vision (ECCV), pp. 132-149, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 577, + 506, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 577, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 506, + 612 + ], + "type": "text", + "content": "Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. Advances in Neural Information Processing Systems, 33:9912-9924, 2020." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 617, + 504, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 617, + 504, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 504, + 652 + ], + "type": "text", + "content": "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9650-9660, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 658, + 506, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 506, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 506, + 692 + ], + "type": "text", + "content": "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pp. 1597-1607. PMLR, 2020a." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 697, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 504, + 732 + ], + "type": "text", + "content": "Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems, 33:22243-22255, 2020b." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "type": "text", + "content": "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750-15758, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 507, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 507, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 507, + 146 + ], + "type": "text", + "content": "Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3606-3613, 2014." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 150, + 503, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 150, + 503, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 503, + 164 + ], + "type": "text", + "content": "Thomas M Cover and Joy A Thomas. Elements of Information Theory. John Wiley & Sons, 1999." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 169, + 505, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 169, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 169, + 505, + 194 + ], + "type": "text", + "content": "Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transport. Advances in neural information processing systems, 26, 2013." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 198, + 507, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 198, + 507, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 507, + 233 + ], + "type": "text", + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 238, + 505, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 238, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 505, + 263 + ], + "type": "text", + "content": "Thomas G Dietterich. Ensemble methods in machine learning. In International workshop on multiple classifier systems, pp. 1-15. Springer, 2000." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 267, + 505, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 267, + 505, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 267, + 505, + 292 + ], + "type": "text", + "content": "Onur Dikmen, Zhirong Yang, and Erkki Oja. Learning the information divergence. IEEE transactions on pattern analysis and machine intelligence, 37(7):1442-1454, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 297, + 505, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 505, + 342 + ], + "type": "text", + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 348, + 505, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 348, + 505, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 505, + 373 + ], + "type": "text", + "content": "Sever S Dragomir. A generalization of " + }, + { + "bbox": [ + 105, + 348, + 505, + 373 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 105, + 348, + 505, + 373 + ], + "type": "text", + "content": "-divergence measure to convex functions defined on linear spaces. Communications in Mathematical Analysis, 15(2):1-14, 2013." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 376, + 505, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 376, + 505, + 412 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 505, + 412 + ], + "type": "text", + "content": "Michael Dusenberry, Ghassen Jerfel, Yeming Wen, Yian Ma, Jasper Snoek, Katherine Heller, Balaji Lakshminarayanan, and Dustin Tran. Efficient and scalable bayesian neural nets with rank-1 factors. In International conference on machine learning, pp. 2782-2792. PMLR, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 417, + 505, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 417, + 505, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 505, + 452 + ], + "type": "text", + "content": "Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In 2004 conference on computer vision and pattern recognition workshop, pp. 178-178. IEEE, 2004." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 457, + 505, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 505, + 492 + ], + "type": "text", + "content": "Timur Garipov, Pavel Izmailov, Dmitrii Podoprikhin, Dmitry P Vetrov, and Andrew G Wilson. Loss surfaces, mode connectivity, and fast ensembling of dnns. Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 497, + 505, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 497, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 505, + 533 + ], + "type": "text", + "content": "Raphael Gontijo-Lopes, Yann Dauphin, and Ekin Dogus Cubuk. No one representation to rule them all: Overlapping features of training methods. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=BK-4qbGgIE3." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 537, + 505, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 537, + 505, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 537, + 505, + 583 + ], + "type": "text", + "content": "Jean-Bastien Grill, Florian Strub, Florent Alché, Coretin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 33:21271-21284, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 589, + 505, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 589, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 589, + 505, + 633 + ], + "type": "text", + "content": "Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pp. 297-304. JMLR Workshop and Conference Proceedings, 2010." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 639, + 505, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 505, + 664 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 505, + 664 + ], + "type": "text", + "content": "Abner Guzman-Rivera, Dhruv Batra, and Pushmeet Kohli. Multiple choice learning: Learning to produce multiple structured outputs. Advances in neural information processing systems, 25, 2012." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 670, + 505, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 670, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 670, + 505, + 693 + ], + "type": "text", + "content": "Lars Kai Hansen and Peter Salamon. Neural network ensembles. IEEE transactions on pattern analysis and machine intelligence, 12(10):993-1001, 1990." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "text", + "content": "Marton Havasi, Rodolphe Jenatton, Stanislav Fort, Jeremiah Zhe Liu, Jasper Snoek, Balaji Lakshminarayanan, Andrew M Dai, and Dustin Tran. Training independent subnetworks for robust prediction. arXiv preprint arXiv:2010.06610, 2020." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9729-9738, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 158 + ], + "type": "text", + "content": "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "text", + "content": "R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 504, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 504, + 228 + ], + "type": "text", + "content": "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pp. 646-661. Springer, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 233, + 506, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 506, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 506, + 258 + ], + "type": "text", + "content": "Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E Hopcroft, and Kilian Q Weinberger. Snapshot ensembles: Train 1, get m for free. arXiv preprint arXiv:1704.00109, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 263, + 506, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 506, + 298 + ], + "type": "text", + "content": "Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pp. 554-561, 2013." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "type": "text", + "content": "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 323, + 504, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 504, + 347 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 504, + 347 + ], + "type": "text", + "content": "Harold W Kuhn. The hungarian method for the assignment problem. *Naval research logistics quarterly*, 2(1-2):83-97, 1955." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 353, + 504, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 353, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 504, + 376 + ], + "type": "text", + "content": "Samuli Laine and Timo Aila. Temporal ensembling for semi-supervised learning. arXiv preprint arXiv:1610.02242, 2016." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 382, + 506, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 382, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 382, + 506, + 417 + ], + "type": "text", + "content": "Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 423, + 506, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 423, + 506, + 457 + ], + "spans": [ + { + "bbox": [ + 105, + 423, + 506, + 457 + ], + "type": "text", + "content": "Kuang-Huei Lee, Anurag Arnab, Sergio Guadarrama, John Canny, and Ian Fischer. Compressive visual representations. Advances in Neural Information Processing Systems, 34:19538-19552, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 464, + 504, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 504, + 498 + ], + "type": "text", + "content": "Stefan Lee, Senthil Purushwalkam, Michael Cogswell, David Crandall, and Dhruv Batra. Why m heads are better than one: Training a diverse ensemble of deep networks. arXiv preprint arXiv:1511.06314, 2015." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 505, + 506, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 505, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 506, + 529 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 535, + 504, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 535, + 504, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 535, + 504, + 570 + ], + "type": "text", + "content": "Warren R Morningstar, Alex Alemi, and Joshua V Dillon. Pacm-bayes: Narrowing the empirical risk gap in the misspecified bayesian regime. In International Conference on Artificial Intelligence and Statistics, pp. 8270-8298. PMLR, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 575, + 506, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 575, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 506, + 599 + ], + "type": "text", + "content": "Wai Ho Mow. A tight upper bound on discrete entropy. IEEE Transactions on Information Theory, 44(2):775-778, 1998." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 605, + 504, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 504, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 504, + 629 + ], + "type": "text", + "content": "Yurii Nesterov. A method for solving the convex programming problem with convergence rate " + }, + { + "bbox": [ + 105, + 605, + 504, + 629 + ], + "type": "inline_equation", + "content": "o(1 / k^2)" + }, + { + "bbox": [ + 105, + 605, + 504, + 629 + ], + "type": "text", + "content": ". Proceedings of the USSR Academy of Sciences, 269:543-547, 1983." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 635, + 506, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 506, + 670 + ], + "type": "text", + "content": "Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729. IEEE, 2008." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 676, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 676, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 506, + 732 + ], + "type": "text", + "content": "Kento Nozawa, Pascal Germain, and Benjamin Guedj. Pac-bayesian contrastive unsupervised representation learning. In Jonas Peters and David Sontag (eds.), Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI), volume 124 of Proceedings of Machine Learning Research, pp. 21-30. PMLR, 03-06 Aug 2020. URL https://proceedings.mlr.press/v124/nozawa20a.html." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 507, + 731 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 82, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 505, + 105 + ], + "type": "text", + "content": "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 506, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 506, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 506, + 156 + ], + "type": "text", + "content": "Yaniv Ovadia, Emily Fertig, Jie Ren, Zachary Nado, David Sculley, Sebastian Nowozin, Joshua Dillon, Balaji Lakshminarayanan, and Jasper Snoek. Can you trust your model's uncertainty? evaluating predictive uncertainty under dataset shift. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 162, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 162, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 107, + 162, + 504, + 186 + ], + "type": "text", + "content": "Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In 2012 IEEE conference on computer vision and pattern recognition, pp. 3498-3505. IEEE, 2012." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 191, + 506, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 191, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 106, + 191, + 506, + 236 + ], + "type": "text", + "content": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 243, + 506, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 243, + 506, + 276 + ], + "spans": [ + { + "bbox": [ + 106, + 243, + 506, + 276 + ], + "type": "text", + "content": "Michael P Perrone and Leon N Cooper. When networks disagree: Ensemble methods for hybrid neural networks. Technical report, Brown Univ Providence Ri Inst for Brain and Neural Systems, 1992." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 284, + 506, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 284, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 107, + 284, + 506, + 328 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 335, + 507, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 335, + 507, + 369 + ], + "spans": [ + { + "bbox": [ + 107, + 335, + 507, + 369 + ], + "type": "text", + "content": "Yangjun Ruan, Yann Dubois, and Chris J. Maddison. Optimal representations for covariate shift. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=Rf58LPCwJj0." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 375, + 506, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 375, + 506, + 409 + ], + "spans": [ + { + "bbox": [ + 107, + 375, + 506, + 409 + ], + "type": "text", + "content": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958, 2014." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 415, + 506, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 415, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 107, + 415, + 506, + 449 + ], + "type": "text", + "content": "Ilya Sutskever, James Martens, George Dahl, and Geoffrey Hinton. On the importance of initialization and momentum in deep learning. In International conference on machine learning, pp. 1139-1147. PMLR, 2013." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 456, + 504, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 456, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 107, + 456, + 504, + 490 + ], + "type": "text", + "content": "Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 496, + 504, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 496, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 107, + 496, + 504, + 520 + ], + "type": "text", + "content": "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In European conference on computer vision, pp. 776-794. Springer, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 526, + 506, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 526, + 506, + 560 + ], + "spans": [ + { + "bbox": [ + 107, + 526, + 506, + 560 + ], + "type": "text", + "content": "Nenad Tomasev, Ioana Bica, Brian McWilliams, Lars Buesing, Razvan Pascanu, Charles Blundell, and Jovana Mitrovic. Pushing the limits of self-supervised resnets: Can we outperform supervised learning without labels onImagenet? arXiv preprint arXiv:2201.05119, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 566, + 506, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 566, + 506, + 600 + ], + "spans": [ + { + "bbox": [ + 107, + 566, + 506, + 600 + ], + "type": "text", + "content": "Linh Tran, Bastiaan S Veeling, Kevin Roth, Jakub Swiatkowski, Joshua V Dillon, Jasper Snoek, Stephan Mandt, Tim Salimans, Sebastian Nowozin, and Rodolphe Jenatton. Hydra: Preserving ensemble diversity for model distillation. arXiv preprint arXiv:2001.04694, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 606, + 504, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 606, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 107, + 606, + 504, + 640 + ], + "type": "text", + "content": "Xiao Wang, Haoqi Fan, Yuandong Tian, Daisuke Kihara, and Xinlei Chen. On the importance of asymmetry for siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16570-16579, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 647, + 504, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 647, + 504, + 670 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 504, + 670 + ], + "type": "text", + "content": "Yeming Wen, Dustin Tran, and Jimmy Ba. Batchsemble: an alternative approach to efficient ensemble and lifelong learning. arXiv preprint arXiv:2002.06715, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 677, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 677, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 677, + 506, + 731 + ], + "type": "text", + "content": "Mitchell Wortsman, Gabriel Ilharco, Samir Ya Gadre, Rebecca Roelofs, Raphael Gontijo-Lopes, Ari S Morcos, Hongseok Namkoong, Ali Farhadi, Yair Carmon, Simon Kornblith, et al. Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In International Conference on Machine Learning, pp. 23965-23998. PMLR, 2022." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 280 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "type": "text", + "content": "Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3733-3742, 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 507, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 507, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 507, + 158 + ], + "type": "text", + "content": "Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In 2010 IEEE computer society conference on computer vision and pattern recognition, pp. 3485-3492. IEEE, 2010." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 507, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 507, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 507, + 198 + ], + "type": "text", + "content": "Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9653-9663, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 507, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 507, + 238 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 507, + 238 + ], + "type": "text", + "content": "Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. In International Conference on Machine Learning, pp. 12310-12320. PMLR, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 245, + 507, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 507, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 507, + 280 + ], + "type": "text", + "content": "Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. Image BERT pre-training with online tokenizer. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=ydopy-e6Dg." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 201, + 94 + ], + "type": "text", + "content": "A PSEUDOCODE" + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 105, + 144, + 402, + 329 + ], + "blocks": [ + { + "bbox": [ + 105, + 129, + 335, + 142 + ], + "lines": [ + { + "bbox": [ + 105, + 129, + 335, + 142 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 335, + 142 + ], + "type": "text", + "content": "Algorithm 1: Pseudocode for computing ensemble loss" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 105, + 144, + 402, + 329 + ], + "lines": [ + { + "bbox": [ + 105, + 144, + 402, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 402, + 329 + ], + "type": "text", + "content": "b, n, c:, batch size, number of ensemble heads, codebook size \n# log_ps, log_ct: student, teacher log probabilities with n ensembles \n# strategy: ensemble loss average strategy \n# tau_ent: temperature for entropy weighting \ndef ensemble_loss(log_ps, log_ct, strategy, tau_ent): \n b, n, c = log_ct.shape # axis 1 corresponds to ensemble \n log_ct = stop_grad(log_ct) # stop gradient for teacher \n if strategy == \"Unif\": \n loss = - (exp(log_ct) * log_ps).sum(axis=-1) \n loss = loss.mean(axis=1) # average over ensembles \n elif strategy == \"Prob\": \n log_mean_ct = logsumexp(log_ct, axis=1, b=1/n) # mean teacher \n log_mean_ps = logsumexp(log_ps, axis=1, b=1/n) # mean student \n loss = - (exp(log_mean_ct) * log_mean_ps).sum(axis=-1) \n elif strategy == \"Ent\": \n ent = - (exp(log_ct) * log_ct).sum(axis=-1) # teacher entropy \n weight = softmax(-ent/tau_ent, axis=1) # entropy weights \n loss = - (exp(log_ct) * log_ps).sum(axis=-1) \n loss = (loss * weight).sum(axis=1) # entropy weighted average \nreturn loss.mean() # average over samples" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "python" + }, + { + "type": "code", + "bbox": [ + 105, + 383, + 491, + 712 + ], + "blocks": [ + { + "bbox": [ + 105, + 369, + 386, + 381 + ], + "lines": [ + { + "bbox": [ + 105, + 369, + 386, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 386, + 381 + ], + "type": "text", + "content": "Algorithm 2: Pseudocode for ensemble heads with simplified DINO" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 105, + 383, + 491, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 383, + 491, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 383, + 491, + 712 + ], + "type": "text", + "content": "# n, c, eta: number of ensemble heads, codebook size, momentum update rate\n# fs, ft: student, teacher encoders\n# hs_ens, ht_ens: student, teacher projection heads with n ensembles, list with length n\n# mus_ens, mut_ens: student, teacher codebooks with n ensembles, list with length n\n# taus, taut: student, teacher temperatures\n# strategy: ensemble loss average strategy\n# tau_ent: temperature for entropy weighting\nfor x in dataloder: # load a batch with b samples\nxs, xt = augs(x), augt(x) # random augmentations\nzs, zt = fs(xs), ft(xt) # representations, (b, l)\n# all following computation can be parallelized with batch computation\nlog_ps, log_ct = [], []\nfor j in range(n):\nhs_j, ht_j = hs_ens[j], ht_ens[j] # j-th projection head\nmus_j, mut_j = mus_ens[j], mut_ens[j] # j-th codebook, (d, c)\nes_j, et_j = hs_j(zs), ht_j(zt) # j-th embedding, (b, d)\nrs_j = (es_j @ mus_j) / (es_j(norm(axis=1, keepdims=True) * mus_j(norm(axis=0, keepdims=True)) / taus # student logits, (b, c)\nrt_j = (et_j @ mut_j) / (et_j(norm(axis=1, keepdims=True) * mut_j(norm(axis=0, keepdims=True)) / taut # teacher logits, (b, c)\nlog_ps_j = logsoftmax(rs_j, axis=-1) # (b, c)\nlog_rt_j = logsoftmax(rt_j, axis=-1) # (b, c)\nlog_rt_j = renorm(log_rt_j) # adjust teacher predictions with centering or sinkhorn,\nomitted here for simplicity\nlog_ps.append(log_ps_j)\nlog_rt.append(log_rt_j)\nlog_ps = stack(log_ps_j, axis=1) # stacked student log probabilities, (b, n, c)\nlog_rt = stack(log_rt_j, axis=1) # stacked teacher log probabilities, (b, n, c)\nloss = ensemble_loss(log_ps, log_rt, strategy=strategy) # compute ensemble loss\nloss.backup() # back-propagate\nsgd_update(fs, hs_ens, mus_ens) # apply gradient decent update for student\nema_update(ft, ht_ens, mut_ens, rate=eta) # apply momentum update for teacher" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "julia" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 257, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 257, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 257, + 94 + ], + "type": "text", + "content": "B EXPERIMENTAL DETAILS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "type": "text", + "content": "In this section, we provide details for our experiments. In Appx. B.1, we describe how we reproduced and improved the baseline DINO/MSN models. We give the implementation details for SSL training and evaluation in Appx. B.2 and Appx. B.3 respectively. All the hyper-parameters used in our experiments are in Appx. B.2.2." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 310, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 310, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 310, + 175 + ], + "type": "text", + "content": "B.1 REPRODUCING & IMPROVING BASELINES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "type": "text", + "content": "We carefully reproduced and further improved baseline methods (denoted as DINO* and MSN* respectively) with an extensive study and hyperparameter search (see Appx. B.1). In particular, we systematically study the projection head design (which we found is crucial for few-shot evaluation performance (Appx. C.2)) and different techniques for avoiding collapse used in both methods (Appx. C.1). DINO* performs significantly better than DINO on few-shot evaluation (e.g., " + }, + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "type": "inline_equation", + "content": "2\\sim 6" + }, + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "type": "text", + "content": " percentage point (p.p.) gains for 1 shot) and maintains the full-data evaluation performance. The main adjustments of DINO* are: (i) A 3-layer projection head with a hidden dimension of 1024 (instead of 2048); (ii) Sinkhorn-Knopp (SK) normalization (instead of centering) is applied to teacher predictions, combined with a smaller teacher temperature " + }, + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\tau = 0.025" + }, + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "type": "text", + "content": " and codebook size " + }, + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "type": "inline_equation", + "content": "c = 1024" + }, + { + "bbox": [ + 104, + 183, + 506, + 316 + ], + "type": "text", + "content": " or 4096. MSN* uses the same projection head as DINO* and applies ME-MAX regularization without SK normalization (which is applied in MSN by default). Further details for DINO and MSN can be found below." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 327, + 169, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 169, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 169, + 339 + ], + "type": "text", + "content": "B.1.1 DINO" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 109, + 401, + 502, + 469 + ], + "blocks": [ + { + "bbox": [ + 104, + 354, + 504, + 389 + ], + "lines": [ + { + "bbox": [ + 104, + 354, + 504, + 389 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 504, + 389 + ], + "type": "text", + "content": "Table 5: Reproducing & Improving DINO. Our reproduce results match the public numbers. We further improve the DINO baseline (DINO*) by studying projection heads and collapse-avoiding techniques. The evaluation results of DINO/DINO* ViT-S/16 trained with 800 epochs are reported." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 109, + 401, + 502, + 469 + ], + "lines": [ + { + "bbox": [ + 109, + 401, + 502, + 469 + ], + "spans": [ + { + "bbox": [ + 109, + 401, + 502, + 469 + ], + "type": "table", + "html": "
Few-shotFull-data
125~13 (1%)k-NNLinear
DINO (Caron et al., 2021)38.9 ± 0.448.9 ± 0.358.5 ± 0.164.574.576.1 / 77.0
DINO (Ours reproduced)39.1 ± 0.349.1 ± 0.558.6 ± 0.264.774.375.8 / 76.9
DINO* (Retuned)44.6 ± 0.253.6 ± 0.361.1 ± 0.266.274.175.8 / 76.9
", + "image_path": "b2a2e41958ede2350a5e360b42fe559d83ce2c86ef637dd7610ae3b5a081277a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 479, + 506, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 506, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 506, + 602 + ], + "type": "text", + "content": "Reproducing DINO We carefully reproduced DINO with JAX following the official DINO implementation1. In Table 5, we report the evaluation results of DINO using ViT-S trained with 800 epochs following the exact training configuration for ViT-S/16 in the official DINO code. The official results of full-data evaluation and " + }, + { + "bbox": [ + 104, + 479, + 506, + 602 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 479, + 506, + 602 + ], + "type": "text", + "content": " data evaluation are from Caron et al. (2021), the other few-shot evaluation results are evaluated by Assran et al. (2022) and also validated by us. Note that for consistency of full-data linear evaluation, we report the results with both the [CLS] token representations of the last layer and the concatenation of the [CLS] token representations from the last 4 layers following Caron et al. (2021). For 1-/2-/5-shots evaluation results, we report the mean accuracy and standard deviation across 3 random splits of the data following Assran et al. (2022). As shown in Table 5, our reproduced results are all comparable with the published numbers which validates the implementation of our training and evaluation pipelines." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": "Improving DINO We improved the DINO baseline with a systematic empirical study of some important components. We first empirically compared different techniques for avoiding collapse (see Appx. C.1) and find that Sinkhorn-Knopp (SK) normalization is a more effective and also simpler technique for encouraging codebook usage than the centering operation used in DINO. We thus applied SK normalization, which enabled us to use a smaller teacher temperature " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "\\tau = 0.025" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " (instead of " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "\\tau = 0.07" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": ") and a much smaller codebook size " + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "inline_equation", + "content": "c = 1024" + }, + { + "bbox": [ + 104, + 613, + 506, + 715 + ], + "type": "text", + "content": " or 4096 (instead of 65536). These modifications lead to similar performance as DINO with a much smaller codebook (up to 1M parameters, compared to 16M parameters for DINO). Next we empirically studied the effect of projection heads for different evaluation metrics (see Appx. C.2), and found that the design of" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 340, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 340, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 340, + 732 + ], + "type": "text", + "content": "https://github.com/facebookresearch/dino" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": "projection heads is crucial for few-shot evaluation metrics and an too power powerful projection head (e.g., the 3-layer MLP with a hidden dimension of 2048 used in DINO/MSN/etc.) could significantly hurt the few-shot performance. With an empirically study of projection head architectures, we found that a simply reducing the hidden dimension to 1024 could significantly improves the few-shot evaluation performance while maintaining full-data evaluation performance. The improved results of DINO* are shown in Table 5." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 160, + 167, + 171 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 160, + 167, + 171 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 167, + 171 + ], + "type": "text", + "content": "B.1.2 MSN" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 108, + 232, + 502, + 301 + ], + "blocks": [ + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "lines": [ + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "text", + "content": "Table 6: Reproducing & improving MSN. We implement " + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "inline_equation", + "content": "\\mathsf{MSN^{*}}" + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "text", + "content": " by adding ME-MAX regularization and masking to DINO*, which surpasses public MSN results. The evaluation results of MSN/MSN* ViT-S/16 trained with 800 epochs are reported." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 232, + 502, + 301 + ], + "lines": [ + { + "bbox": [ + 108, + 232, + 502, + 301 + ], + "spans": [ + { + "bbox": [ + 108, + 232, + 502, + 301 + ], + "type": "table", + "html": "
Few-shotFull-data
125~13 (1%)k-NNLinear
MSN (Assran et al., 2022)47.1 ± 0.155.8 ± 0.662.8 ± 0.367.2-- / 76.9
MSN (Repro)39.1 ± 0.349.2 ± 0.358.4 ± 0.164.372.874.7 / 75.5
MSN* (Retuned)47.4 ± 0.156.3 ± 0.462.8 ± 0.267.173.375.6 / 76.6
", + "image_path": "ef482eb8da3b2eef6dfd5d4b3c41b41b8cbb4b20575f8fc58f977eaa82c18716.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "type": "text", + "content": "We carefully implemented MSN by adding its main components, i.e., ME-MAX regularization and masking, to the DINO implementation (denoted as " + }, + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "type": "inline_equation", + "content": "\\mathrm{MSN}^*" + }, + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "type": "text", + "content": "), which surpassed public results as shown in Table 6. Note that the implementation of " + }, + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "type": "inline_equation", + "content": "\\mathrm{MSN}^*" + }, + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "type": "text", + "content": " does not exactly match the public implementation in the public MSN code" + }, + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 104, + 304, + 504, + 350 + ], + "type": "text", + "content": ", where the main differences are:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 358, + 506, + 430 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 130, + 358, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 358, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 130, + 358, + 506, + 392 + ], + "type": "text", + "content": "- MSN applies ME-MAX with Sinkhorn-Knopp normalization by default (as in the released training configuration), which we empirically find does not work very well (see Table 9). " + }, + { + "bbox": [ + 130, + 358, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\mathrm{MSN}^*" + }, + { + "bbox": [ + 130, + 358, + 506, + 392 + ], + "type": "text", + "content": " does not apply SK normalization and tunes the regularization strength for ME-MAX." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 396, + 506, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 396, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 130, + 396, + 506, + 430 + ], + "type": "text", + "content": "- Some differences in implementation details, e.g., schedules for learning rate/weight decay, batch normalization in projection heads, specific data augmentations, etc. " + }, + { + "bbox": [ + 130, + 396, + 506, + 430 + ], + "type": "inline_equation", + "content": "\\mathrm{MSN}^*" + }, + { + "bbox": [ + 130, + 396, + 506, + 430 + ], + "type": "text", + "content": " uses the exact same setup as DINO\\* which follows original DINO implementation." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 438, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 504, + 472 + ], + "type": "text", + "content": "We initially tried to exactly reproduce the original MSN following the public MSN code, but the results are much below the public ones, as shown in Table 6. Incorporating the two differences above bridges the gap and makes " + }, + { + "bbox": [ + 104, + 438, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\mathrm{MSN}^*" + }, + { + "bbox": [ + 104, + 438, + 504, + 472 + ], + "type": "text", + "content": " surpass the public results." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 485, + 233, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 485, + 233, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 233, + 496 + ], + "type": "text", + "content": "B.2 PRETRAINING DETAILS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 506, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 506, + 529 + ], + "type": "text", + "content": "In this subsection, we provide the general implementation details in Appx. B.2.1 and specific hyperparameters in Appx. B.2.2 in Appx. B.2.2 for reproducibility." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 540, + 259, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 259, + 551 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 259, + 551 + ], + "type": "text", + "content": "B.2.1 IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "text", + "content": "**Common setup** We experimented with DINO (Caron et al., 2021) and MSN (Assran et al., 2022) models on ImageNet ILSVRC-2012 dataset (Deng et al., 2009). We mainly followed the training setup in Caron et al. (2021). In particular, all models were trained with AdamW optimizer (Loshchilov & Hutter, 2018) and a batch size of 1024. The learning rate was linearly warmuped to 0.002 " + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "inline_equation", + "content": "(= 0.001 \\times \\text{batch size} / 512)" + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "text", + "content": " and followed a cosine decay schedule. The weight decay followed a cosine schedule from 0.04 to 0.4. The momentum rate for the teacher was increased from 0.996 to 1 with a cosine schedule following BYOL (Grill et al., 2020). A stochastic depth (Huang et al., 2016) of 0.1 was applied without dropout (Srivastava et al., 2014). The student temperature " + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "text", + "content": " is set to 0.1. As with DINO, we used the data augmentations of BYOL and multi-crop augmentation of SWAV (Caron et al., 2020). In particular, 2 global views with a " + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "text", + "content": " resolution and crop area range [0.25, 1.0] were generated for the teacher and student, and another 10 local views with " + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "inline_equation", + "content": "96 \\times 96" + }, + { + "bbox": [ + 104, + 559, + 506, + 715 + ], + "type": "text", + "content": " resolution and crop area range [0.08, 0.25] were used as extra augmented inputs for the student. For MSN, we used the exact same setup and incorporated its major component: 1) mean entropy maximization (ME-MAX) regularization; 2) masking as an extra augmentation applied to the student global view." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 335, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 335, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 335, + 732 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 720, + 335, + 732 + ], + "type": "text", + "content": "https://github.com/facebookresearch/msn" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": "Main modifications We retuned the baselines (DINO* and MSN*) as detailed in Appx. B.1, and the main adjustments are as followed. We used a 3-layer projection head with a hidden dimension of 1024. The output embedding (i.e., " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "(h_{\\psi} \\circ r_{\\omega})(x)" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": ") and the codes (i.e., " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": ") both have a dimension of 256 and are " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " normalized. For DINO*, Sinkhorn-Knopp (SK) normalization was applied to teacher predictions. For MSN*, ME-MAX was used without SK normalization and the regularization strength was tuned over " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\{3, 4, 5\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": ". For all models, we used teacher temperature " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\tau = 0.025" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " which was linearly decayed from 0.05 for the first 30 epochs. The codebook size " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " was selected over " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\{1024, 4096\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " for all models, and typically " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "c = 4096" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " was selected for baseline methods and " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "c = 1024" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " was selected for ours. For our " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": "-ensembles with ENT, entropy weighting temperature " + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": " is linearly decayed from 0.5 to the specified value." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 204, + 234, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 234, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 234, + 216 + ], + "type": "text", + "content": "B.2.2 HYPER-PARAMETERS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 223, + 405, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 405, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 405, + 236 + ], + "type": "text", + "content": "We report the hyperparameters for training our models for reproducibility:" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 106, + 271, + 504, + 504 + ], + "blocks": [ + { + "bbox": [ + 187, + 245, + 422, + 258 + ], + "lines": [ + { + "bbox": [ + 187, + 245, + 422, + 258 + ], + "spans": [ + { + "bbox": [ + 187, + 245, + 422, + 258 + ], + "type": "text", + "content": "Table 7: Hyper-parameters for training the DINO* model." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 271, + 504, + 504 + ], + "lines": [ + { + "bbox": [ + 106, + 271, + 504, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 271, + 504, + 504 + ], + "type": "table", + "html": "
Hyper-parameterViT-S/16ViT-B/16ViT-B/8
DINO*DINO*-PROB (16)DINO*-ENT (4/16)DINO*DINO*-ENT (16)DINO*DINO*-ENT (16)
training epoch800400300
batch size102410241024
learning rate2e-32e-32e-3
warmup epoch103010
min lr1e-51e-54e-5
weight decay0.04 → 0.40.04 → 0.40.04 → 0.4
stochastic depth0.10.10.1
gradient clip3.01.03.0
momentum0.996 → 1.00.996 → 1.00.996 → 1.0
# of multi-crops101010
masking ratio---
proj. layer333
proj. hidden dim102410241024
emb. dim d256256256
rep. dim384768768
codebook size c4096102410244096102440961024
student temp.0.10.10.1
teacher temp.0.0250.0250.025
te. temp. decay epoch303030
centerxxx
SK norm
ME-MAX weight---
ent. weight temp. γ--0.05-0.05-0.06
γ init.--0.5-0.5-0.5
γ decay epoch--30-30-30
", + "image_path": "0ce535b282b281cff1393e8223140d3e8be97283eef8434f1fc586abc05a4ea3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 516, + 244, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 516, + 244, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 244, + 528 + ], + "type": "text", + "content": "B.3 EVALUATION PROTOCALS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "content": "Few-shot linear evaluation We followed the few-shot evaluation protocol in Assran et al. (2022). Specifically, we used the 1-/2-/5-shot ImageNet dataset splits3 in Assran et al. (2022) and " + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "inline_equation", + "content": "\\sim 13" + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "content": "-shot) ImageNet dataset splits4. For given labelled images, we took a single central crop of size " + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "content": " without additional data augmentations, and extracted the output [CLS] token representations from the frozen pretrained model. Then we trained a linear classifier with multi-class logistic regression on top of the extracted representations. We used the scikit-learn package (Pedregosa et al., 2011) for the logistic regression classifier. For all few-shot evaluations, we searched the " + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "inline_equation", + "content": "\\mathrm{L}_2" + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "content": " regularization strength over " + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "inline_equation", + "content": "\\{1\\mathrm{e}-4, 3\\mathrm{e}-4, 1\\mathrm{e}-3, 3\\mathrm{e}-3, 1\\mathrm{e}-2, 3\\mathrm{e}-2, 1\\mathrm{e}-1, 3\\mathrm{e}-1, 1, 3, 10\\}" + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 636, + 507, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 636, + 507, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 636, + 507, + 693 + ], + "type": "text", + "content": "Full-data linear evaluation We followed the linear evaluation protocol in (Caron et al., 2021). Specifically, we trained a linear classifier on top of the representations extracted from the frozen pretrained model. The linear classifier is optimized by SGD with Nesterov momentum (Nesterov, 1983; Sutskever et al., 2013) of 0.9 and a batch size of 4096 for 100 epochs on the whole ImageNet dataset, following a cosine learning rate decay schedule. We did not apply any weight decay." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 700, + 410, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 700, + 410, + 711 + ], + "spans": [ + { + "bbox": [ + 118, + 700, + 410, + 711 + ], + "type": "text", + "content": "3Publicly available at https://github.com/facebookresearch/msn" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 712, + 505, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 712, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 106, + 712, + 505, + 731 + ], + "type": "text", + "content": "4Publicly available at https://github.com/google-research/simclr/tree/master/imagenet_subsets" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 132, + 105, + 480, + 360 + ], + "blocks": [ + { + "bbox": [ + 189, + 80, + 420, + 92 + ], + "lines": [ + { + "bbox": [ + 189, + 80, + 420, + 92 + ], + "spans": [ + { + "bbox": [ + 189, + 80, + 420, + 92 + ], + "type": "text", + "content": "Table 8: Hyper-parameters for training the " + }, + { + "bbox": [ + 189, + 80, + 420, + 92 + ], + "type": "inline_equation", + "content": "{\\mathrm{{MSN}}}^{ * }" + }, + { + "bbox": [ + 189, + 80, + 420, + 92 + ], + "type": "text", + "content": " model." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 132, + 105, + 480, + 360 + ], + "lines": [ + { + "bbox": [ + 132, + 105, + 480, + 360 + ], + "spans": [ + { + "bbox": [ + 132, + 105, + 480, + 360 + ], + "type": "table", + "html": "
Hyper-parameterViT-S/16ViT-B/16ViT-B/8
DINO*MSN*-ENT (2/8)MSN*MSN*-ENT (8)MSN*MSN*-ENT (8)
training epoch800400300
batch size102410241024
learning rate2e-32e-32e-3
warmup epoch203020
min lr1e-54e-54e-5
weight decay0.04 → 0.40.04 → 0.40.04 → 0.4
stochastic depth0.10.10.1
gradient clip1.01.01.0
momentum0.996 → 1.00.996 → 1.00.996 → 1.0
# of multi-crops101010
masking ratio0.20.20.15
proj. layer333
proj. hidden dim102410241024
emb. dim d256256256
rep. dim384768768
codebook size c409610244096102440961024
student temp.0.10.10.1
teacher temp.0.0250.0250.025
te. temp. decay epoch303030
centerXXX
SK normXXX
ME-MAX weight4.04.04.0
ent. weight temp. γ-0.01-0.005-0.01
γ init.-0.5-0.5-0.5
γ decay epoch-30-30-30
", + "image_path": "f37856b7343d311e5439e11a81d14b5ed3e4b0d25d2ee0e7220e7b47751129d5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": "During training, we only applied basic data augmentations including random resized crops of size " + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": " and horizontal flips. During testing, we took a single central crop of the same size. For ViT-S/16, Caron et al. (2021) found that concatenating the [CLS] token representations from the last " + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": " (specifically, " + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "inline_equation", + "content": "l = 4" + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": ") layers (c.f. Appendix F.2 in Caron et al. (2021)) improved the results by about 1 p.p. We followed the same procedure, but reported linear evaluation results with both " + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "inline_equation", + "content": "l = 1" + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "inline_equation", + "content": "l = 4" + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": " in Table 2 for consistency. In our empirical study with ViT-S/16, we used the result with " + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "inline_equation", + "content": "l = 1" + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": ". For larger models (e.g., ViT-B/16), we followed Caron et al. (2021); Zhou et al. (2022) to use the concatenation of the [CLS] token representation and the average-pooled patch tokens from the last " + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "inline_equation", + "content": "l = 1" + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": " layer for linear evaluation. For all linear evaluations, we searched the base learning rate over " + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "inline_equation", + "content": "\\{4.8\\mathrm{e} - 3, 1.6\\mathrm{e} - 2, 4.8\\mathrm{e} - 2, 1.6\\mathrm{e} - 1, 4.8\\mathrm{e} - 1, 1.6\\}" + }, + { + "bbox": [ + 104, + 376, + 506, + 486 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": "Full-data " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": "-NN evaluation We followed the " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": "-NN evaluation protocol in Caron et al. (2021); Wu et al. (2018). Specifically, for each image in the given dataset, we took a single central crop of size " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " without additional data augmentations, and extracted the output [CLS] token representations from the frozen pretrained model. The extracted representations are used for a weighted " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": "-Nearest-Neighbor classifier. In particular, denote the stored training representations and labels as " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{(z_i, y_i)\\}_{i=1}^N" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": ". For a test image with extracted representation " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": ", denote the set of its top " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": "-NN training samples as " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_k[z] \\subseteq \\mathcal{D}" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "|\\mathcal{D}_k[z]| = k" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": "-NN set " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_k[z]" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " is used to make the prediction for the test image with a weighted vote, i.e., " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\hat{y} = \\arg \\max_y \\left( \\sum_{(z_j, y_j) \\in \\mathcal{D}_k[z]} \\alpha_j \\mathbf{1}_{y=y_j} \\right)" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_{y=y_j}" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " is the one-hot vector corresponding to label " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "y_j" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\alpha_j" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " is the weight induced by the cosine similarity between " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "z_j" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\alpha_j = \\exp \\left( \\frac{1}{\\tau'} \\frac{z^\\top z_j}{||z|| \\|z_j||} \\right)" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": ". We set " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\tau' = 0.07" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " without tuning as in Caron et al. (2021); Wu et al. (2018). For all " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": "-NN evaluations, we searched " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\{5, 10, 20, 50, 100\\}" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " and found that " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k = 10" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "inline_equation", + "content": "k = 20" + }, + { + "bbox": [ + 104, + 501, + 506, + 651 + ], + "type": "text", + "content": " was consistently the best." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "Transfer evaluation via linear probing We mainly followed the transfer evaluation protocol in (Grill et al., 2020; Chen et al., 2020a). In particular, we used 9 of their 13 datasets that are available in tensorflow-datasets (tfd), namely Food-101 (Bossard et al., 2014), CIFAR10 (Krizhevsky et al., 2009), CIFAR100 (Krizhevsky et al., 2009), SUN397 scene dataset (Xiao et al., 2010), Stanford Cars (Krause et al., 2013), Describable Textures Dataset (Cimpoi et al., 2014, DTD), Oxford-IIIT Pets (Parkhi et al., 2012), Caltech-101 (Fei-Fei et al., 2004), Oxford 102 Flowers (Nilsback & Zisserman," + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 140, + 502, + 215 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 504, + 125 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 504, + 125 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 504, + 125 + ], + "type": "text", + "content": "Table 9: Empirical study of different techniques for avoiding collapse. Using Sinkhorn-Knopp normalization instead of centering for DINO leads to improved performance, and matches the original DINO even with a much smaller codebook. The ME-MAX regularization of MSN is very effective and leads to significant improvement for few-shot evaluations." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 140, + 502, + 215 + ], + "lines": [ + { + "bbox": [ + 108, + 140, + 502, + 215 + ], + "spans": [ + { + "bbox": [ + 108, + 140, + 502, + 215 + ], + "type": "table", + "html": "
TechniqueFew-shotFull-data
CenterSinkhornME-MAX125~13 (1%)k-NNLinear
DINO37.8 ± 0.447.4 ± 0.356.9 ± 0.463.072.474.9
39.1 ± 0.349.4 ± 0.358.7 ± 0.264.874.176.0
MSN36.0 ± 0.446.6 ± 0.656.5 ± 0.263.273.275.2
43.9 ± 0.253.0 ± 0.361.1 ± 0.266.074.075.8
", + "image_path": "9a27ed9b5ae01b37badb6bf57d8bd17d7e8524fdbd48e5d2b36164ee73106be3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 151, + 244, + 459, + 312 + ], + "blocks": [ + { + "bbox": [ + 168, + 219, + 441, + 232 + ], + "lines": [ + { + "bbox": [ + 168, + 219, + 441, + 232 + ], + "spans": [ + { + "bbox": [ + 168, + 219, + 441, + 232 + ], + "type": "text", + "content": "Table 10: ME-MAX regularization is sensitive to hyper-parameters." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 151, + 244, + 459, + 312 + ], + "lines": [ + { + "bbox": [ + 151, + 244, + 459, + 312 + ], + "spans": [ + { + "bbox": [ + 151, + 244, + 459, + 312 + ], + "type": "table", + "html": "
WeightFew-shotFull-data
125~13 (1%)KNNLinear
1.037.6 ± 0.248.0 ± 0.457.7 ± 0.264.073.575.6
3.043.9 ± 0.253.0 ± 0.361.1 ± 0.266.074.075.8
5.043.6 ± 0.252.6 ± 0.460.4 ± 0.165.573.975.6
", + "image_path": "60b12267d8f6efabcdcf0af59665cc9d258e1f1c0f4fded2ce22f5096265c227.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "text", + "content": "2008). Following their evaluation metrics, we reported mean per-class accuracy for Oxford-IIIT Pets, Caltech-101, and Oxford 102 Flowers datasets and reported top-1 accuracy for other datasets. We transferred the models pretrained on ImageNet (Deng et al., 2009) to these datasets by training a linear classifier on top of frozen representations. In particular, we resized given images to " + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "text", + "content": " and took a single central crop of size " + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "text", + "content": " without additional data augmentations. We extracted the output [CLS] token representations from the frozen pretrained model. Then we trained a linear classifier with multi-class logistic regression on top of the extracted representations. We used the scikit-learn package (Pedregosa et al., 2011) for the logistic regression classifier. For all transfer evaluations, we searched the " + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{L}_2" + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "text", + "content": " regularization strength over " + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "inline_equation", + "content": "\\{1e - 6, 1e - 5, 1e - 4, 3e - 4, 1e - 3, 3e - 3, 1e - 2, 3e - 2, 1e - 1, 3, 1e, 3e, 1e2, 1e3, 1e4, 1e5\\}" + }, + { + "bbox": [ + 104, + 326, + 506, + 437 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 453, + 245, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 245, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 245, + 464 + ], + "type": "text", + "content": "C ADDITIONAL RESULTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 479, + 395, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 479, + 395, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 395, + 491 + ], + "type": "text", + "content": "C.1 EMPIRICAL STUDY OF TECHNIQUES FOR AVOIDING COLLAPSE" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 499, + 504, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 499, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 499, + 504, + 565 + ], + "type": "text", + "content": "Most self-supervised learning methods utilize some techniques to avoid collapse of representations with, e.g., contrastive loss (Chen et al., 2020a; He et al., 2020), batch normalization (Grill et al., 2020), asymmetric architecture design with a predictor (Grill et al., 2020; Chen & He, 2021), etc. In DINO and MSN, a learnable codebook is used for the learning objective and different techniques are applied to encourage the effective codebook usage. There are two potential cases of collapse (as discussed in Caron et al. (2021)):" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 575, + 504, + 679 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 130, + 575, + 504, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 575, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 575, + 504, + 641 + ], + "type": "text", + "content": "- Dominating codes. This is the case of \"winner-take-all\": only a small portion of codes are being predicted while others are inactive. Typical solutions for avoiding this include applying Sinkhorn-Knopp normalization (Cuturei, 2013) as in SWaV (Caron et al., 2020), centering teacher logits as in DINO (Caron et al., 2021), and applying mean-entropy maximization regularization (ME-MAX) as in MSN (Assran et al., 2022). Note that in MSN, ME-MAX is combined with Sinkhorn-Knopp normalization by default." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 644, + 504, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 644, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 130, + 644, + 504, + 679 + ], + "type": "text", + "content": "- Uniform codes. This is the case where all codes are treated equally and the predictions reduce to be uniform over codes. A simple and effective solution is to applying sharpening, i.e., using a lower temperature for computing the teacher prediction." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "We systematically study different techniques in a unified setup. In particular, we used DINO with the ViT-S backbone, a 3-layer MLP projection head with hidden dimension 2048, and a codebook of size 4096 and dimension 256. We applied different techniques to DINO and searched the teacher temperature in " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\{0.0125, 0.025, 0.05\\}" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " for each. For ME-MAX, we searched regularization weight in" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 83, + 302, + 202 + ], + "blocks": [ + { + "bbox": [ + 126, + 83, + 302, + 202 + ], + "lines": [ + { + "bbox": [ + 126, + 83, + 302, + 202 + ], + "spans": [ + { + "bbox": [ + 126, + 83, + 302, + 202 + ], + "type": "image", + "image_path": "7aa3a2618ab4f81946ae6bb6a3ca92ed6ddae7efb732ca1e8841212ceb669cc1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 194, + 209, + 236, + 220 + ], + "lines": [ + { + "bbox": [ + 194, + 209, + 236, + 220 + ], + "spans": [ + { + "bbox": [ + 194, + 209, + 236, + 220 + ], + "type": "text", + "content": "(a) Merged" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 308, + 82, + 484, + 202 + ], + "blocks": [ + { + "bbox": [ + 308, + 82, + 484, + 202 + ], + "lines": [ + { + "bbox": [ + 308, + 82, + 484, + 202 + ], + "spans": [ + { + "bbox": [ + 308, + 82, + 484, + 202 + ], + "type": "image", + "image_path": "eacebcf1752170d31391abb882a7366a3ddf70fc8efbfdda8a126124a1dd4a4e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 377, + 209, + 414, + 220 + ], + "lines": [ + { + "bbox": [ + 377, + 209, + 414, + 220 + ], + "spans": [ + { + "bbox": [ + 377, + 209, + 414, + 220 + ], + "type": "text", + "content": "(b) 1-shot" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 121, + 223, + 299, + 342 + ], + "blocks": [ + { + "bbox": [ + 121, + 223, + 299, + 342 + ], + "lines": [ + { + "bbox": [ + 121, + 223, + 299, + 342 + ], + "spans": [ + { + "bbox": [ + 121, + 223, + 299, + 342 + ], + "type": "image", + "image_path": "17820ffe48f4d9f4a2a9c91f2bb0d87fd05d0efafd855e0c3791dd310c6138e9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 186, + 350, + 231, + 361 + ], + "lines": [ + { + "bbox": [ + 186, + 350, + 231, + 361 + ], + "spans": [ + { + "bbox": [ + 186, + 350, + 231, + 361 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 186, + 350, + 231, + 361 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 186, + 350, + 231, + 361 + ], + "type": "text", + "content": " -data" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 315, + 223, + 489, + 342 + ], + "blocks": [ + { + "bbox": [ + 315, + 223, + 489, + 342 + ], + "lines": [ + { + "bbox": [ + 315, + 223, + 489, + 342 + ], + "spans": [ + { + "bbox": [ + 315, + 223, + 489, + 342 + ], + "type": "image", + "image_path": "93cf075d1b3664c9ea0e84057da987b3d78503601684857ad97b19dc53f52ece.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 350, + 425, + 361 + ], + "lines": [ + { + "bbox": [ + 378, + 350, + 425, + 361 + ], + "spans": [ + { + "bbox": [ + 378, + 350, + 425, + 361 + ], + "type": "text", + "content": "(d) Full-data" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 369, + 506, + 437 + ], + "lines": [ + { + "bbox": [ + 104, + 369, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 506, + 437 + ], + "type": "text", + "content": "Figure 5: Effect of projection heads for different evaluation metrics. We compare non-ensemble projection heads with different depths and widths as well as our " + }, + { + "bbox": [ + 104, + 369, + 506, + 437 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 369, + 506, + 437 + ], + "type": "text", + "content": "-ensembles, and evaluate linear evaluation performance with different amount of labeled data. (a) shows the comparison of normalized metrics for non-ensembles. (b)-(d) compares non-ensemble and " + }, + { + "bbox": [ + 104, + 369, + 506, + 437 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 369, + 506, + 437 + ], + "type": "text", + "content": "-ensembles by unnormalized metrics. 'Default' denotes the default projection heads used in many SSL methods. See analysis in Appx. C.2 for details." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 457, + 504, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 504, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 504, + 481 + ], + "type": "inline_equation", + "content": "\\{1.0, 3.0, 5.0\\}" + }, + { + "bbox": [ + 105, + 457, + 504, + 481 + ], + "type": "text", + "content": ". For ME-MAX combined with Sinkhorn, we followed Assran et al. (2022) and used default regularization weight of 1.0. The results are in Table 10. We observed that:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 490, + 506, + 604 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 130, + 490, + 504, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 490, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 130, + 490, + 504, + 545 + ], + "type": "text", + "content": "- DINO's centering operation is not as strong as other techniques, and it favours a larger teacher temperature (e.g., 0.05). It does not work well when the codebook size (4096) is not as large as the one used in the original DINO model (65536). Switching to use Sinkhorn-Knopp normalization leads to much more improved performance, and matches the performance of original DINO (Table 5) with a much smaller codebook." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 548, + 506, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 548, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 130, + 548, + 506, + 604 + ], + "type": "text", + "content": "- MSN's ME-MAX regularization is very effective, and leads to significant improvements over others. We also found it is sensitive to the regularization weight and teacher temperature (c.f. Table 10). However, we observed that combining ME-MAX with Sinkhorn does not work well without tuning the regularization weight (which is recommended by Assran et al. (2022))." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 617, + 317, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 617, + 317, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 317, + 628 + ], + "type": "text", + "content": "C.2 EMPIRICAL STUDY OF PROJECTION HEADS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 638, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 504, + 693 + ], + "type": "text", + "content": "In this subsection, we systematically study the effect of projection heads for different evaluation metrics. In particular, we used DINO* ViT-S/16 as the base model and used different projection heads with (depth, width) searched over " + }, + { + "bbox": [ + 104, + 638, + 504, + 693 + ], + "type": "inline_equation", + "content": "\\{2,3,4\\} \\times \\{512,1024,2048,4096\\}" + }, + { + "bbox": [ + 104, + 638, + 504, + 693 + ], + "type": "text", + "content": ". All models are trained with 300 epochs using exact the same set of hyper-parameters. We measured the linear evaluation performance with different amount of labeled data (i.e., full-data, " + }, + { + "bbox": [ + 104, + 638, + 504, + 693 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 638, + 504, + 693 + ], + "type": "text", + "content": " data, 1-shot)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "In Fig. 5a, we plot different evaluation metrics (normalized respectively by the best of each) versus the number of projection head parameters. In Figs. 5b to 5d, we plot each unnormalized evaluation metric respectively for different heads as well as our " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "-ensembles. Our key findings are:" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 209, + 82, + 399, + 214 + ], + "blocks": [ + { + "bbox": [ + 209, + 82, + 399, + 214 + ], + "lines": [ + { + "bbox": [ + 209, + 82, + 399, + 214 + ], + "spans": [ + { + "bbox": [ + 209, + 82, + 399, + 214 + ], + "type": "image", + "image_path": "dbf4514b134bb8ce72a6eac34d5f024be139ceac375ac2bd229901674992e7ca.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 226, + 504, + 270 + ], + "lines": [ + { + "bbox": [ + 104, + 226, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 226, + 504, + 270 + ], + "type": "text", + "content": "Figure 6: Effect of teacher temperature for non-ensemble DINO*. DINO* with a lower temperature can achieve better few-shot performance, but still under-performs our ensemble method (DINO*-ENT with 16 heads, orange lines). DINO* ViT-S/16 trained for 300 epochs is used and " + }, + { + "bbox": [ + 104, + 226, + 504, + 270 + ], + "type": "inline_equation", + "content": "\\tau = 0.025" + }, + { + "bbox": [ + 104, + 226, + 504, + 270 + ], + "type": "text", + "content": " is used for DINO*-ENT." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 130, + 294, + 504, + 541 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 130, + 294, + 504, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 294, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 130, + 294, + 504, + 371 + ], + "type": "text", + "content": "- The projection head has a relatively larger impact on few-shot evaluation metrics, as reflected by the relative magnitudes of different metrics in Fig. 5a. An too powerful non-ensemble projection head significantly hurts the label efficiency of learned representations, reflected by a much larger drop in few-shot evaluation performance (up to 18 p.p. for 1-shot, 9 p.p. for " + }, + { + "bbox": [ + 130, + 294, + 504, + 371 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 130, + 294, + 504, + 371 + ], + "type": "text", + "content": " data). This result is also partially observed in Chen et al. (2020b), where they found that probing from intermediate layers of projection heads (which can be viewed as using a shallower head) could improve the semi-supervised learning (" + }, + { + "bbox": [ + 130, + 294, + 504, + 371 + ], + "type": "inline_equation", + "content": "1\\% - 10\\%" + }, + { + "bbox": [ + 130, + 294, + 504, + 371 + ], + "type": "text", + "content": ") results." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 376, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 376, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 130, + 376, + 504, + 453 + ], + "type": "text", + "content": "- The optimal projection head for different metrics can differ a lot. A weaker head improves label efficiency (few-shot performance), while a stronger (but not too strong) head improves linear decodability. As a result, the default projection head (3/2048) that is widely used in SimCLR v2 (Chen et al., 2020b), DINO (Caron et al., 2021), iBOT (Zhou et al., 2022), MSN (Assran et al., 2022), etc., does not perform well in few-shot evaluations (as shown by the green cross denoted as 'Default'), probably because it is selected by full-data evaluation metrics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 460, + 504, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 460, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 130, + 460, + 504, + 480 + ], + "type": "text", + "content": "- There exist some projection heads that performs decently well on all evaluation metrics, e.g., the baseline model (3/1024) used in our experiments (pink star denoted as 'Our base')." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 487, + 504, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 487, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 130, + 487, + 504, + 541 + ], + "type": "text", + "content": "- Compared to naively tuning projection head architectures, our " + }, + { + "bbox": [ + 130, + 487, + 504, + 541 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 130, + 487, + 504, + 541 + ], + "type": "text", + "content": "-ensembles (orange curves in Figs. 5b to 5d) consistently improve all metrics with different amount of labeled data, despite it also increases the number of parameters in projection heads. Our " + }, + { + "bbox": [ + 130, + 487, + 504, + 541 + ], + "type": "inline_equation", + "content": "(h_{\\psi}, \\mu)" + }, + { + "bbox": [ + 130, + 487, + 504, + 541 + ], + "type": "text", + "content": "-ensembles outperform all non-ensembles, which also include the counterparts of probing from intermediate layers from the a deeper head (i.e., shallower heads)." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 557, + 315, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 557, + 315, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 315, + 569 + ], + "type": "text", + "content": "C.3 EMPIRICAL STUDY OF " + }, + { + "bbox": [ + 105, + 557, + 315, + 569 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 105, + 557, + 315, + 569 + ], + "type": "text", + "content": "-ENSEMBLES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "text", + "content": "Are the gains of ENT purely from sharper teacher predictions? Our ENT strategy assigns higher weights to the heads that predict with lower entropies, thus effectively uses sharper teacher predictions as the targets. One may be curious about how this effect accounts for the gains of the ENT strategy. We empirically answer this question by studying the non-ensemble baseline that uses a sharper teacher predictions in a data-independent manner (in contrast to ENT, which uses data-dependent entropy weights). Specifically, we compare the non-ensemble DINO* that use different teacher temperature " + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "inline_equation", + "content": "\\tau \\in \\{0.005, 0.01, 0.025, 0.05\\}" + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "text", + "content": " and also our DINO*-ENT (16) with " + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "inline_equation", + "content": "\\tau = 0.025" + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "text", + "content": ", as shown in Fig. 6. We find that the teacher temperature has a big impact on evaluation results especially for few-shot evaluation. Compared to our default baseline that uses " + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "inline_equation", + "content": "\\tau = 0.025" + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "text", + "content": ", a lower temperature (e.g., " + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "inline_equation", + "content": "\\tau = 0.01" + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "text", + "content": ") can indeed improve the 1-shot performance (at the cost of worse full-data performance). However, an too low temperature (" + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "inline_equation", + "content": "\\tau = 0.005" + }, + { + "bbox": [ + 104, + 578, + 506, + 721 + ], + "type": "text", + "content": ") will hurt the performance. Our DINO*-ENT (16) consistently outperform all the baselines, which implies the importance of selecting sharper teacher predictions in a data-dependent manner." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 150, + 504, + 274 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "type": "text", + "content": "Table 11: Full table of Table 1 including all metrics for comparing different ensemble strategies. ENT and PROB significantly improves over the non-ensemble baseline, while UNIF leads to no gains. Ensembling the whole projection head works the best. All models are DINO* ViT-S/16 trained for 300 epochs. The means and standard deviations over 3 initialization seeds for all evaluation results are reported." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 150, + 504, + 274 + ], + "lines": [ + { + "bbox": [ + 107, + 150, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 107, + 150, + 504, + 274 + ], + "type": "table", + "html": "
HowWhereFew-shotFull-data
Proj. HeadCodebook125~13 (1%)k-NNLinear
Base40.6 ± 0.249.8 ± 0.257.9 ± 0.363.4 ± 0.272.3 ± 0.174.4 ± 0.1
UNIF40.4 ± 0.449.5 ± 0.457.6 ± 0.363.3 ± 0.372.2 ± 0.274.5 ± 0.2
PROB39.7 ± 0.549.0 ± 0.557.4 ± 0.463.0 ± 0.472.8 ± 0.274.8 ± 0.1
PROB41.9 ± 0.351.5 ± 0.559.6 ± 0.465.1 ± 0.373.7 ± 0.375.4 ± 0.1
ENT40.6 ± 0.449.5 ± 0.658.0 ± 0.463.5 ± 0.472.1 ± 0.374.5 ± 0.3
ENT43.0 ± 0.652.2 ± 0.859.7 ± 0.764.8 ± 0.572.9 ± 0.675.1 ± 0.4
ENT44.0 ± 0.253.0 ± 0.560.5 ± 0.365.5 ± 0.173.2 ± 0.175.3 ± 0.1
ENT-ST40.0 ± 0.539.2 ± 0.657.3 ± 0.562.7 ± 0.571.9 ± 0.474.0 ± 0.4
", + "image_path": "2a91811cb30ca0ac1f030c675ed7dcecd63230129dcea028d7b0fad6c6e8d1d3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 327, + 503, + 406 + ], + "blocks": [ + { + "bbox": [ + 104, + 277, + 504, + 312 + ], + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 312 + ], + "type": "text", + "content": "Table 12: Comparison of different varants of PROB. The PROB strategy used in our experiments performs the best. ' -' in the table denotes training divergence for PROB-MAX. The experimental setup is the same as Table 11." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 327, + 503, + 406 + ], + "lines": [ + { + "bbox": [ + 107, + 327, + 503, + 406 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 503, + 406 + ], + "type": "table", + "html": "
HowWhereFew-shotFull-data
Weight byTemp. γ125~13 (1%)k-NNLinear
Base40.6 ± 0.249.8 ± 0.257.9 ± 0.363.4 ± 0.272.3 ± 0.174.4 ± 0.1
PROBstudent141.9 ± 0.351.5 ± 0.559.6 ± 0.465.1 ± 0.373.7 ± 0.375.4 ± 0.1
PROB-TEteacher141.5 ± 0.250.4 ± 0.358.3 ± 0.363.7 ± 0.172.3 ± 0.274.6 ± 0.1
PROB-MAXstudent0------
PROB-MAX-TEteacher041.4 ± 0.250.3 ± 0.358.1 ± 0.363.6 ± 0.272.3 ± 0.274.5 ± 0.2
", + "image_path": "10140e926b5dcade496087f3088fda8e813b6a2202ec94f8156c76ad55529556.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 422, + 504, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 444 + ], + "type": "text", + "content": "Comparison of different ensemble strategies and variants We present the full table of Table 1 that includes all the metrics in Table 11. The same observation holds for all metrics." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 449, + 506, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 506, + 483 + ], + "type": "text", + "content": "For all previous studies, we considered a specific instantiation of PROB strategy, i.e., weight by student predicted probabilities " + }, + { + "bbox": [ + 104, + 449, + 506, + 483 + ], + "type": "inline_equation", + "content": "f_{ijy} = \\log s(y|\\theta_j,x)" + }, + { + "bbox": [ + 104, + 449, + 506, + 483 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 449, + 506, + 483 + ], + "type": "inline_equation", + "content": "\\gamma = 1" + }, + { + "bbox": [ + 104, + 449, + 506, + 483 + ], + "type": "text", + "content": ", which has a nice interpretation of model average (see Sec. 3.3). We also studied different variants of the PROB strategy (see Appx. D.1)," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 493, + 406, + 540 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 130, + 493, + 382, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 493, + 382, + 506 + ], + "spans": [ + { + "bbox": [ + 130, + 493, + 382, + 506 + ], + "type": "text", + "content": "PROB-TE: weight by teacher " + }, + { + "bbox": [ + 130, + 493, + 382, + 506 + ], + "type": "inline_equation", + "content": "f_{ijy} = \\log t_i(y|x)" + }, + { + "bbox": [ + 130, + 493, + 382, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 493, + 382, + 506 + ], + "type": "inline_equation", + "content": "\\gamma = 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 510, + 396, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 396, + 523 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 396, + 523 + ], + "type": "text", + "content": "PROB-MAX: weight by student " + }, + { + "bbox": [ + 130, + 510, + 396, + 523 + ], + "type": "inline_equation", + "content": "f_{ijy} = \\log s_j(y|x)" + }, + { + "bbox": [ + 130, + 510, + 396, + 523 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 510, + 396, + 523 + ], + "type": "inline_equation", + "content": "\\gamma \\rightarrow 0" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 526, + 406, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 526, + 406, + 540 + ], + "spans": [ + { + "bbox": [ + 130, + 526, + 406, + 540 + ], + "type": "text", + "content": "PROB-MAX-TE: weight by teacher " + }, + { + "bbox": [ + 130, + 526, + 406, + 540 + ], + "type": "inline_equation", + "content": "f_{ijy} = \\log t_i(y|x)" + }, + { + "bbox": [ + 130, + 526, + 406, + 540 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 526, + 406, + 540 + ], + "type": "inline_equation", + "content": "\\gamma \\to 0" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 548, + 506, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 506, + 649 + ], + "type": "text", + "content": "Table 12 compares the downstream performance for all the variants. We find that the our PROB (used in our empirical studies) performs better than other variants. Interestingly, weighting by the teacher (PROB-TE) performs worse than PROB. We conjecture that this is because the important weights turn out to give a weighted average of teacher predictions as the surrogate target that is shared across all students (like PROB) but does not give effective preferential treatment across students which are directly optimized (unlike PROB-TE). Furthermore, PROB-MAX which sharpens the importance weights leads to training divergence. This is probably because the student predictions have higher variance based on which sharp weights lead to unstable training. In contrast, PROB-MAX-TE which uses the (lower-variance) teacher gives reasonable results and comparable to PROB-TE." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": "Number of ensembles for " + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{MSN}^*" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": " In Fig. 7a, we study the effect of increasing the number of " + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": "-ensembles for " + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{MSN}^*" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": "-ENT with ViT-S/16 trained for 800 epochs. The scaling trend is similar to DINO\\*-ENT (Fig. 3a) and the gains start to diminish when the number of heads increases above 8." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "Effect of ENT temperature " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{MSN}^*" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " Fig. 7b studies the effect of entropy weighting temperature " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{MSN}^*" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "-ENT. We observed that " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{MSN}^*" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " is more robust to small temperatures, and the" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 127, + 83, + 299, + 204 + ], + "blocks": [ + { + "bbox": [ + 127, + 83, + 299, + 204 + ], + "lines": [ + { + "bbox": [ + 127, + 83, + 299, + 204 + ], + "spans": [ + { + "bbox": [ + 127, + 83, + 299, + 204 + ], + "type": "image", + "image_path": "490cb30743786891a399ad47d2e7d5d357f8e031ff6e40f82d53e6a5347c2e08.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 169, + 211, + 260, + 222 + ], + "lines": [ + { + "bbox": [ + 169, + 211, + 260, + 222 + ], + "spans": [ + { + "bbox": [ + 169, + 211, + 260, + 222 + ], + "type": "text", + "content": "(a) Scaling of ensembles" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 81, + 479, + 205 + ], + "blocks": [ + { + "bbox": [ + 307, + 81, + 479, + 205 + ], + "lines": [ + { + "bbox": [ + 307, + 81, + 479, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 81, + 479, + 205 + ], + "type": "image", + "image_path": "0a9b966552940abc5d3f63d5e0d2d091adf2b746587f3106e8aadebcd57a9fd5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 211, + 454, + 222 + ], + "lines": [ + { + "bbox": [ + 336, + 211, + 454, + 222 + ], + "spans": [ + { + "bbox": [ + 336, + 211, + 454, + 222 + ], + "type": "text", + "content": "(b) Effect of ENT temperature " + }, + { + "bbox": [ + 336, + 211, + 454, + 222 + ], + "type": "inline_equation", + "content": "\\gamma" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "lines": [ + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "type": "text", + "content": "Figure 7: Empirical study for " + }, + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{MSN}^*" + }, + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "type": "text", + "content": " -ENT. (a) The gains by increasing the number of " + }, + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "type": "text", + "content": " ensembles start to diminish when it is over 8 heads. (b) " + }, + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{MSN}^*" + }, + { + "bbox": [ + 104, + 231, + 506, + 266 + ], + "type": "text", + "content": " prefers smaller temperature for entropy weighting than DINO*." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 328, + 504, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 328, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 504, + 353 + ], + "type": "text", + "content": "best " + }, + { + "bbox": [ + 104, + 328, + 504, + 353 + ], + "type": "inline_equation", + "content": "\\gamma = 0.01" + }, + { + "bbox": [ + 104, + 328, + 504, + 353 + ], + "type": "text", + "content": " is smaller than that of DINO\\* " + }, + { + "bbox": [ + 104, + 328, + 504, + 353 + ], + "type": "inline_equation", + "content": "(\\gamma = 0.05)" + }, + { + "bbox": [ + 104, + 328, + 504, + 353 + ], + "type": "text", + "content": ". When the temperature is too high, the performance drops as a result of under-specialization (i.e., less diversity) as with DINO\\*." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 411, + 317, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 317, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 317, + 423 + ], + "type": "text", + "content": "C.4 ANALYZING " + }, + { + "bbox": [ + 105, + 411, + 317, + 423 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 105, + 411, + 317, + 423 + ], + "type": "text", + "content": " -ENSEMBLE DIVERSITY" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 449, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 504, + 495 + ], + "type": "text", + "content": "Visualizing " + }, + { + "bbox": [ + 104, + 449, + 504, + 495 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 449, + 504, + 495 + ], + "type": "text", + "content": "-ensemble similarity We analyze the diversity between different heads by visualizing the similarity matrix between their codes. Directly measuring the similarity between codes in two heads could not work, because 1) they may live in different subspaces because of the ensembled projection heads; 2) they may not align in the natural order but in a permuted order." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": "Therefore, we seek to align codes between different heads by how they are effectively used to 'cluster' the data. In particular, we use a set of randomly sampled inputs " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "\\{x^i\\}_{i\\in [b]}" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "b = 51200" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": " to obtain an empirical code assignment matrix " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "A^{j}\\in \\mathbb{R}^{b\\times c}" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": "-ensemble " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "j\\in [m]" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": ", where the " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": "-th row of " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "A^j" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": " corresponds to the teacher predictions " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "t_j(Y|x^i)" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": ". For the " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": "-th code in the head " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": ", we extract the " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": "-th column from " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "A^j" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": " (i.e., its empirical assignment) as its embedding. For two codes, we measure their similarity by the cosine similarity between their embeddings. For a pair of heads " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "j'" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": ", we align their codes using the Hungarian algorithm (Kuhn, 1955) to maximize the sum of cosine similarity. After that, we plot the similarity matrix which is aligned and reordered by the similarity value on the diagonal (in an descending order). Note that it is not necessary to do the alignment procedure for the PROB strategy since it is naturally aligned because of the direct distribution averaging over " + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 499, + 506, + 628 + ], + "type": "text", + "content": "-ensembles, but we did for fair comparison with other strategies." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "We applied the same procedure for different ensemble weighting strategies using DINO* with 4 " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "-ensembles. We randomly picked a pair of heads and visualize the similarity matrix before (top row) and after (bottom row) the alignment-reordering setup in Fig. 8. We found that before the alignment procedure, the similarity matrix of the PROB strategy already mostly aligns because it explicitly introduces code correspondence between different heads. Furthermore, by analyzing the similarity decay pattern on the diagonal, it is clear that ENT learns the most diverse " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "-ensembles while UNIF learns the least ones, which may explain the difference of their empirical performance. For completeness, we also include the visualization of aligned similarity matrices for all pairs of heads in Figs. 9 to 11, the observations are the same." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 93, + 232, + 294 + ], + "blocks": [ + { + "bbox": [ + 133, + 93, + 232, + 294 + ], + "lines": [ + { + "bbox": [ + 133, + 93, + 232, + 294 + ], + "spans": [ + { + "bbox": [ + 133, + 93, + 232, + 294 + ], + "type": "image", + "image_path": "93d84602fa00e114dd3c0daeb60a27b653229fa1fbb04e8c93b8bae3163f38e4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 303, + 199, + 314 + ], + "lines": [ + { + "bbox": [ + 165, + 303, + 199, + 314 + ], + "spans": [ + { + "bbox": [ + 165, + 303, + 199, + 314 + ], + "type": "text", + "content": "(a) UNIF" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 246, + 93, + 345, + 294 + ], + "blocks": [ + { + "bbox": [ + 246, + 93, + 345, + 294 + ], + "lines": [ + { + "bbox": [ + 246, + 93, + 345, + 294 + ], + "spans": [ + { + "bbox": [ + 246, + 93, + 345, + 294 + ], + "type": "image", + "image_path": "69e3ff8a8e40a87c085722110883fb610a78890e858601ad95efdd9cf61d3d2a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 278, + 303, + 314, + 314 + ], + "lines": [ + { + "bbox": [ + 278, + 303, + 314, + 314 + ], + "spans": [ + { + "bbox": [ + 278, + 303, + 314, + 314 + ], + "type": "text", + "content": "(b) PROB" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 360, + 94, + 478, + 296 + ], + "blocks": [ + { + "bbox": [ + 360, + 94, + 478, + 296 + ], + "lines": [ + { + "bbox": [ + 360, + 94, + 478, + 296 + ], + "spans": [ + { + "bbox": [ + 360, + 94, + 478, + 296 + ], + "type": "image", + "image_path": "7634234890279c11db0dfd9b0cb24cd3d4b24177529df99d2e1436bfc66bea1b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 303, + 434, + 314 + ], + "lines": [ + { + "bbox": [ + 403, + 303, + 434, + 314 + ], + "spans": [ + { + "bbox": [ + 403, + 303, + 434, + 314 + ], + "type": "text", + "content": "(c)ENT" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 142, + 401, + 246, + 540 + ], + "blocks": [ + { + "bbox": [ + 104, + 323, + 506, + 380 + ], + "lines": [ + { + "bbox": [ + 104, + 323, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 506, + 380 + ], + "type": "text", + "content": "Figure 8: Visualization of " + }, + { + "bbox": [ + 104, + 323, + 506, + 380 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 323, + 506, + 380 + ], + "type": "text", + "content": "-ensemble diversity. ENT learns the most diverse " + }, + { + "bbox": [ + 104, + 323, + 506, + 380 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 323, + 506, + 380 + ], + "type": "text", + "content": "-ensembles while UNIF learns the least ones. We visualize the code similarity matrix between a pair of randomly selected projection heads. Top row shows the original similarity matrix (i.e., in natural order) and the bottom row shows the aligned similarity matrix which aligns codes by empirical assignment probabilities. DINO* ViT-S/16 with 4 heads is used. Best viewed in color." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 142, + 401, + 246, + 540 + ], + "lines": [ + { + "bbox": [ + 142, + 401, + 246, + 540 + ], + "spans": [ + { + "bbox": [ + 142, + 401, + 246, + 540 + ], + "type": "image", + "image_path": "e05471b9f5e868d1dffbd8e5bcea52e9435ba7d5d8be5b8126734318036c4abf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 250, + 402, + 353, + 539 + ], + "blocks": [ + { + "bbox": [ + 250, + 402, + 353, + 539 + ], + "lines": [ + { + "bbox": [ + 250, + 402, + 353, + 539 + ], + "spans": [ + { + "bbox": [ + 250, + 402, + 353, + 539 + ], + "type": "image", + "image_path": "4fcb7930e54c036bf09639468394c07434ebea281fd08eeee5a751eb6cbb0c30.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 360, + 402, + 484, + 539 + ], + "blocks": [ + { + "bbox": [ + 360, + 402, + 484, + 539 + ], + "lines": [ + { + "bbox": [ + 360, + 402, + 484, + 539 + ], + "spans": [ + { + "bbox": [ + 360, + 402, + 484, + 539 + ], + "type": "image", + "image_path": "be1db7c343bf205e685344364e792bd47d8b3209eaab5706fca18ff963715821.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 143, + 545, + 244, + 647 + ], + "blocks": [ + { + "bbox": [ + 143, + 545, + 244, + 647 + ], + "lines": [ + { + "bbox": [ + 143, + 545, + 244, + 647 + ], + "spans": [ + { + "bbox": [ + 143, + 545, + 244, + 647 + ], + "type": "image", + "image_path": "b26d4702e63657ab529e61b968bf21c875cc03721b6843d0481d2a8fd9ae83f9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "lines": [ + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": "Figure 9: Visualization of " + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": "-ensemble diversity between all pairs of heads for DINO*UNIF. The UNIF strategy does not learn diverse " + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 662, + 506, + 696 + ], + "type": "text", + "content": "-ensembles. DINO* with ViT-S/16 and 4 heads is used. Best viewed in color." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 251, + 545, + 353, + 647 + ], + "blocks": [ + { + "bbox": [ + 251, + 545, + 353, + 647 + ], + "lines": [ + { + "bbox": [ + 251, + 545, + 353, + 647 + ], + "spans": [ + { + "bbox": [ + 251, + 545, + 353, + 647 + ], + "type": "image", + "image_path": "d0eeef05fd5b7cbc2af0e80f19c6c5bc29c4d802d0008e6b11d345448f6f5d6a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 360, + 545, + 461, + 647 + ], + "blocks": [ + { + "bbox": [ + 360, + 545, + 461, + 647 + ], + "lines": [ + { + "bbox": [ + 360, + 545, + 461, + 647 + ], + "spans": [ + { + "bbox": [ + 360, + 545, + 461, + 647 + ], + "type": "image", + "image_path": "800c2c5d5ceee656fe26bbec9d5a4eec6a5751cf5cc5f19a9e7a56734071c9b8.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 143, + 113, + 246, + 233 + ], + "blocks": [ + { + "bbox": [ + 143, + 113, + 246, + 233 + ], + "lines": [ + { + "bbox": [ + 143, + 113, + 246, + 233 + ], + "spans": [ + { + "bbox": [ + 143, + 113, + 246, + 233 + ], + "type": "image", + "image_path": "272be9539ff5f046a8c9e23b472a43e11da8447364537fc81791658061a50b93.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 240, + 222, + 250 + ], + "lines": [ + { + "bbox": [ + 164, + 240, + 222, + 250 + ], + "spans": [ + { + "bbox": [ + 164, + 240, + 222, + 250 + ], + "type": "text", + "content": "Head 2-Head 3" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 251, + 113, + 353, + 233 + ], + "blocks": [ + { + "bbox": [ + 251, + 113, + 353, + 233 + ], + "lines": [ + { + "bbox": [ + 251, + 113, + 353, + 233 + ], + "spans": [ + { + "bbox": [ + 251, + 113, + 353, + 233 + ], + "type": "image", + "image_path": "be6fa8e76ae37017b8a194d4f64f1ae0bc32fecf97ff2739cc8970c7f355fe34.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 240, + 331, + 250 + ], + "lines": [ + { + "bbox": [ + 272, + 240, + 331, + 250 + ], + "spans": [ + { + "bbox": [ + 272, + 240, + 331, + 250 + ], + "type": "text", + "content": "Head 2 - Head 4" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 360, + 113, + 484, + 234 + ], + "blocks": [ + { + "bbox": [ + 360, + 113, + 484, + 234 + ], + "lines": [ + { + "bbox": [ + 360, + 113, + 484, + 234 + ], + "spans": [ + { + "bbox": [ + 360, + 113, + 484, + 234 + ], + "type": "image", + "image_path": "01b47cb375d674dc6f01d1367efc025e231692d883ac3813544199c4fb5ff68f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 381, + 240, + 440, + 250 + ], + "lines": [ + { + "bbox": [ + 381, + 240, + 440, + 250 + ], + "spans": [ + { + "bbox": [ + 381, + 240, + 440, + 250 + ], + "type": "text", + "content": "Head 3 - Head 4" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 143, + 257, + 245, + 358 + ], + "blocks": [ + { + "bbox": [ + 143, + 257, + 245, + 358 + ], + "lines": [ + { + "bbox": [ + 143, + 257, + 245, + 358 + ], + "spans": [ + { + "bbox": [ + 143, + 257, + 245, + 358 + ], + "type": "image", + "image_path": "ae5e040b53d11107d21ac0a879235838cf924b9ab2b0f5caa0a1a792481f6568.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 373, + 506, + 407 + ], + "lines": [ + { + "bbox": [ + 104, + 373, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 407 + ], + "type": "text", + "content": "Figure 10: Visualization of " + }, + { + "bbox": [ + 104, + 373, + 506, + 407 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 373, + 506, + 407 + ], + "type": "text", + "content": "-ensemble diversity between all pairs of heads for DINO\\*PROB. The PROB strategy learns more diverse " + }, + { + "bbox": [ + 104, + 373, + 506, + 407 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 373, + 506, + 407 + ], + "type": "text", + "content": "-ensembles than UNIF. DINO\\* with ViT-S/16 and 4 heads is used. Best viewed in color." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 251, + 257, + 353, + 358 + ], + "blocks": [ + { + "bbox": [ + 251, + 257, + 353, + 358 + ], + "lines": [ + { + "bbox": [ + 251, + 257, + 353, + 358 + ], + "spans": [ + { + "bbox": [ + 251, + 257, + 353, + 358 + ], + "type": "image", + "image_path": "1375ae686351af0267608f622f6a018b95da8a0139150e0e7f5197326005e972.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 360, + 257, + 462, + 358 + ], + "blocks": [ + { + "bbox": [ + 360, + 257, + 462, + 358 + ], + "lines": [ + { + "bbox": [ + 360, + 257, + 462, + 358 + ], + "spans": [ + { + "bbox": [ + 360, + 257, + 462, + 358 + ], + "type": "image", + "image_path": "3ebf637ad44c2d4a824c4cab904b9560dcb9dba73522fc73344666c0257ba54f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 142, + 429, + 245, + 550 + ], + "blocks": [ + { + "bbox": [ + 142, + 429, + 245, + 550 + ], + "lines": [ + { + "bbox": [ + 142, + 429, + 245, + 550 + ], + "spans": [ + { + "bbox": [ + 142, + 429, + 245, + 550 + ], + "type": "image", + "image_path": "151f38dca80bdbc22fd11b7f1783b360b52b7abe6e22abdf1102b673eb0e0f96.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 557, + 222, + 567 + ], + "lines": [ + { + "bbox": [ + 164, + 557, + 222, + 567 + ], + "spans": [ + { + "bbox": [ + 164, + 557, + 222, + 567 + ], + "type": "text", + "content": "Head 2 - Head 3" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 251, + 430, + 353, + 550 + ], + "blocks": [ + { + "bbox": [ + 251, + 430, + 353, + 550 + ], + "lines": [ + { + "bbox": [ + 251, + 430, + 353, + 550 + ], + "spans": [ + { + "bbox": [ + 251, + 430, + 353, + 550 + ], + "type": "image", + "image_path": "08efe47fa419970fcf1c422d4cc1d375935b1f0185e4068c9e706deec3c3fc1f.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 273, + 557, + 331, + 567 + ], + "lines": [ + { + "bbox": [ + 273, + 557, + 331, + 567 + ], + "spans": [ + { + "bbox": [ + 273, + 557, + 331, + 567 + ], + "type": "text", + "content": "Head 2 - Head 4" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 360, + 430, + 484, + 551 + ], + "blocks": [ + { + "bbox": [ + 360, + 430, + 484, + 551 + ], + "lines": [ + { + "bbox": [ + 360, + 430, + 484, + 551 + ], + "spans": [ + { + "bbox": [ + 360, + 430, + 484, + 551 + ], + "type": "image", + "image_path": "b36c8b5148138c8101d06cc95e49969bf5472dfab11ef4555ab5f67274ad87ce.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 381, + 557, + 439, + 567 + ], + "lines": [ + { + "bbox": [ + 381, + 557, + 439, + 567 + ], + "spans": [ + { + "bbox": [ + 381, + 557, + 439, + 567 + ], + "type": "text", + "content": "Head 3 - Head 4" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 143, + 574, + 245, + 676 + ], + "blocks": [ + { + "bbox": [ + 143, + 574, + 245, + 676 + ], + "lines": [ + { + "bbox": [ + 143, + 574, + 245, + 676 + ], + "spans": [ + { + "bbox": [ + 143, + 574, + 245, + 676 + ], + "type": "image", + "image_path": "99012c1cf59f4ca983add61d184cc261235a1f27e6126857cea6e0510e9fe7e8.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 690, + 506, + 724 + ], + "lines": [ + { + "bbox": [ + 104, + 690, + 506, + 724 + ], + "spans": [ + { + "bbox": [ + 104, + 690, + 506, + 724 + ], + "type": "text", + "content": "Figure 11: Visualization of " + }, + { + "bbox": [ + 104, + 690, + 506, + 724 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 690, + 506, + 724 + ], + "type": "text", + "content": "-ensemble diversity between all pairs of heads for DINO*-ENT. The ENT strategy learns the most diverse " + }, + { + "bbox": [ + 104, + 690, + 506, + 724 + ], + "type": "inline_equation", + "content": "(h_{\\psi},\\mu)" + }, + { + "bbox": [ + 104, + 690, + 506, + 724 + ], + "type": "text", + "content": "-ensembles. DINO* with ViT-S/16 and 4 heads is used. Best viewed in color." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 251, + 574, + 353, + 676 + ], + "blocks": [ + { + "bbox": [ + 251, + 574, + 353, + 676 + ], + "lines": [ + { + "bbox": [ + 251, + 574, + 353, + 676 + ], + "spans": [ + { + "bbox": [ + 251, + 574, + 353, + 676 + ], + "type": "image", + "image_path": "bec4ce2bf9a81d5b0de88b89f5799727a134240fd9cf66377c3e800caf989a13.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 360, + 574, + 462, + 676 + ], + "blocks": [ + { + "bbox": [ + 360, + 574, + 462, + 676 + ], + "lines": [ + { + "bbox": [ + 360, + 574, + 462, + 676 + ], + "spans": [ + { + "bbox": [ + 360, + 574, + 462, + 676 + ], + "type": "image", + "image_path": "4151f52eebf1277b2204f9bf6d0a923d161832e267af427000acb645d8f04a57.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 182, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 182, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 182, + 94 + ], + "type": "text", + "content": "D ANALYSIS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 107, + 194, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 107, + 194, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 107, + 194, + 118 + ], + "type": "text", + "content": "D.1 DERIVATIONS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 128, + 504, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 128, + 504, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 128, + 504, + 150 + ], + "type": "text", + "content": "In this subsection, we provide derivations for some non-trivial losses that we explore within our framework." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 156, + 340, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 340, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 340, + 168 + ], + "type": "text", + "content": "Recall that our weighted cross-entropy loss is of the form," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 197, + 174, + 504, + 236 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 174, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 197, + 174, + 504, + 236 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {n} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} H ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] (15) \\\\ = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) (16) \\\\ \\end{array}", + "image_path": "df04c8a64c0aeba76e5992c254b9ae667d330149a83bc3f2a5433ba1129c4027.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 168, + 238, + 504, + 259 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 238, + 504, + 259 + ], + "spans": [ + { + "bbox": [ + 168, + 238, + 504, + 259 + ], + "type": "interline_equation", + "content": "w _ {i j y} = \\operatorname {s o f t m a x} \\left(\\left\\{\\frac {1}{\\gamma} f _ {i j y} (\\operatorname {s t o p g r a d} (\\theta), x): i, j \\in [ m ] \\right\\}\\right). \\tag {17}", + "image_path": "bcbc22dec89755456abad632c35d0d42887c70f0b86da02ade0753825bc702b0.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 270, + 215, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 215, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 215, + 281 + ], + "type": "text", + "content": "Furthermore, observe that," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 117, + 287, + 504, + 317 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 287, + 504, + 317 + ], + "spans": [ + { + "bbox": [ + 117, + 287, + 504, + 317 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\sum_ {i, j \\in [ m ]} \\mathsf {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] = \\sum_ {i, j \\in [ m ]} \\int_ {\\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\nabla_ {\\theta} \\log s (y | \\theta_ {j}, x) \\mathrm {d} y. \\tag {18}", + "image_path": "b4a1925d631d4e243e1ba45032b50963297460913589259bae22419db2b381f2.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 323, + 504, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 323, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 504, + 346 + ], + "type": "text", + "content": "This indicates that the proposed weighted ensemble SSL loss is simply a reweighted log-likelihood loss. We use this fact in our derivation of probability weighting (PROB) loss." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "type": "text", + "content": "Uniform weighting (UNIF) Our UNIF strategy in Eq. (6) uses " + }, + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "type": "inline_equation", + "content": "f_{ijy} = \\log \\delta (i - j)" + }, + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "type": "text", + "content": " which gives " + }, + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "type": "inline_equation", + "content": "w_{ijy} = \\frac{1}{m}\\delta (i - j)" + }, + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "type": "text", + "content": " (for any choice of " + }, + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 357, + 504, + 383 + ], + "type": "text", + "content": "), thus the loss," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 176, + 388, + 504, + 451 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 388, + 504, + 451 + ], + "spans": [ + { + "bbox": [ + 176, + 388, + 504, + 451 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} \\frac {1}{m} \\delta (i - j) t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) (19) \\\\ = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right] (20) \\\\ \\end{array}", + "image_path": "c3d26d2d3214e1d60e2c8163796a1320d44fa99231ef492ec0f9fb215bfa8d41.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 456, + 393, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 393, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 393, + 468 + ], + "type": "text", + "content": "This loss assigns equal weights to " + }, + { + "bbox": [ + 104, + 456, + 393, + 468 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 456, + 393, + 468 + ], + "type": "text", + "content": " pairs of pairwised student/teacher." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "type": "text", + "content": "An straightforward generalization is to assign equal weights to all possible pairs " + }, + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "type": "inline_equation", + "content": "(m^2)" + }, + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "type": "text", + "content": " of student/teacher with " + }, + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "type": "inline_equation", + "content": "f_{ijy} = 0" + }, + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "type": "inline_equation", + "content": "w_{ijy} = \\frac{1}{m^2}" + }, + { + "bbox": [ + 104, + 473, + 506, + 498 + ], + "type": "text", + "content": ", which gives the UNIF-ALL loss," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 181, + 502, + 504, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 502, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 181, + 502, + 504, + 533 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\text {U N I F - A L L}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m ^ {2}} \\sum_ {i, j \\in [ m ]} \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right], \\tag {21}", + "image_path": "883f661a547ea7257918ad5e653de5ca813939eca91bb9068fc4b08363f9fbba.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 544, + 425, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 425, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 425, + 557 + ], + "type": "text", + "content": "Probability weighting (PROB) Recall our PROB loss in Eq. (7) has the form," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 171, + 563, + 504, + 601 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 563, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 171, + 563, + 504, + 601 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\mathrm {H} ^ {\\times} \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right]. \\tag {22}", + "image_path": "74666ea0932ed664cd342954ff757c336333a8256762dbedfdf1d5eaf8b344c5.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 607, + 504, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 504, + 630 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 504, + 630 + ], + "type": "text", + "content": "We derive its equivalence with our general loss with " + }, + { + "bbox": [ + 104, + 607, + 504, + 630 + ], + "type": "inline_equation", + "content": "f_{ijy} = \\log s(y|\\theta_j,x)" + }, + { + "bbox": [ + 104, + 607, + 504, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 607, + 504, + 630 + ], + "type": "inline_equation", + "content": "\\gamma = 1" + }, + { + "bbox": [ + 104, + 607, + 504, + 630 + ], + "type": "text", + "content": " in terms of the gradients," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 152, + 635, + 504, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 635, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 152, + 635, + 504, + 734 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\nabla_ {\\theta} \\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\log \\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j}, x) \\mathrm {d} y (23) \\\\ = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\nabla_ {\\theta} \\log \\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j}, x) d y (24) \\\\ = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {\\frac {1}{m} \\sum_ {j \\in [ m ]} \\nabla_ {\\theta} s (y | \\theta_ {j} , x)}{\\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j} , x)} d y (25) \\\\ \\end{array}", + "image_path": "a881c8a5ec70cf49ebd5af07bca5ea33555e582a5b8449da6dcefeacb0a39b7d.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 203, + 79, + 504, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 79, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 203, + 79, + 504, + 178 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} = \\frac {1}{m} \\sum_ {i \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {\\frac {1}{m} \\sum_ {j \\in [ m ]} s (y | \\theta_ {j} , x) \\nabla_ {\\theta} \\log s (y | \\theta_ {j} , x)}{\\frac {1}{m} \\sum_ {j ^ {\\prime} \\in [ m ]} s (y | \\theta_ {j ^ {\\prime}} , x)} d y (26) \\\\ = \\frac {1}{m} \\sum_ {i, j \\in [ m ]} \\int_ {\\mathcal {Y}} t _ {i} (y | x) \\frac {s (y | \\theta_ {j} , x)}{\\sum_ {j ^ {\\prime} \\in [ m ]} s (y | \\theta_ {j ^ {\\prime}} , x)} \\nabla_ {\\theta} \\log s (y | \\theta_ {j}, x) d y (27) \\\\ = \\nabla_ {\\theta} \\frac {1}{m} \\sum_ {i, j \\in [ m ]} \\mathsf {H} ^ {\\times} \\left[ w _ {i j Y} \\odot t _ {i} (Y | x), s (Y | \\theta_ {j}, x) \\right] (28) \\\\ \\end{array}", + "image_path": "f94eeb9a2453e0465d1823fea498f92cf7c4e8b632af50d6fe2e21053560234a.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "inline_equation", + "content": "w_{ijy} = \\frac{s(y|\\theta_j,x)}{\\sum_{j'\\in[m]}s(y|\\theta_{j'}x)}" + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "text", + "content": " (or equivalently, " + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "inline_equation", + "content": "f_{ijy} = \\log s(y|\\theta_j,x)" + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\gamma = 1" + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "text", + "content": "). The last equality is because " + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "inline_equation", + "content": "w_{ijy}" + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "text", + "content": " is stopped gradient with respect to " + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 188, + 506, + 251 + ], + "type": "text", + "content": ". This is the same analysis as done in Burda et al. (2016). The above formation establishes the equivalence of gradients between two losses, which implies the same behavior (e.g., optimum) using gradient-based optimization, as the common practice of deep learning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 255, + 504, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 294 + ], + "type": "text", + "content": "We also generalize this loss to some variants which we explore in Table 12. A \"dual\" variant is to use teacher predictions " + }, + { + "bbox": [ + 104, + 255, + 504, + 294 + ], + "type": "inline_equation", + "content": "f_{ijy} = \\log t_i(y|x)" + }, + { + "bbox": [ + 104, + 255, + 504, + 294 + ], + "type": "text", + "content": " instead of student ones; this implies " + }, + { + "bbox": [ + 104, + 255, + 504, + 294 + ], + "type": "inline_equation", + "content": "w_{ijy} = \\frac{t_i(y|x)}{\\sum_{i' \\in [m]} t_{i'}(y|x)}" + }, + { + "bbox": [ + 104, + 255, + 504, + 294 + ], + "type": "text", + "content": " and the PROB-TE loss," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 156, + 300, + 504, + 332 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 300, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 156, + 300, + 504, + 332 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {P R O B - T E}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} \\frac {t _ {i} (y | x)}{\\sum_ {i ^ {\\prime} \\in [ m ]} t _ {i ^ {\\prime}} (y | x)} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x). \\tag {29}", + "image_path": "bf01620ed0ce2a99cb794df464cdf621d58742c507c162938e600eb4dc1bbea0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 340, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 504, + 369 + ], + "type": "text", + "content": "Note that this simply reduces to use a weighted teacher predictions " + }, + { + "bbox": [ + 104, + 340, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\frac{t_i(y|x)}{\\sum_{i' \\in [m]} t_{i'}(y|x)} t_i(y|x)" + }, + { + "bbox": [ + 104, + 340, + 504, + 369 + ], + "type": "text", + "content": " as the surrogate target that is shared across all students." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "content": "Another generalization is to use \"hard\" weighting, i.e., " + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "inline_equation", + "content": "\\gamma \\rightarrow 0" + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "content": ", which gives the PROB-MAX loss that only assigns weight to the most confident student," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 158, + 403, + 504, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 403, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 158, + 403, + 504, + 434 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {P R O B - M A X}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i, j \\in [ m ]} \\sum_ {y \\in \\mathcal {Y}} w _ {i j y} t _ {i} (y | x) \\log s (y | \\theta_ {j}, x) \\tag {30}", + "image_path": "1d10373a9689868e14732e3718b6282ec5d8ef123b75ea10a437d80d63a22507.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 160, + 435, + 504, + 454 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 435, + 504, + 454 + ], + "spans": [ + { + "bbox": [ + 160, + 435, + 504, + 454 + ], + "type": "interline_equation", + "content": "w _ {i j y} = \\delta \\left(i - i ^ {*}\\right) \\delta \\left(j - j ^ {*}\\right), \\quad \\left(i ^ {*}, j ^ {*}\\right) = \\arg \\max _ {i j} f _ {i j y}, \\forall y \\in \\mathcal {Y}. \\tag {31}", + "image_path": "c0e8f14a982bdd943d568bd0c940f7ef8c3b83c293c408e2e5757fd626f9f0d6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 461, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 461, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 504, + 495 + ], + "type": "text", + "content": "This loss reduces to a generalization of multiple choice learning (Guzman-Rivera et al., 2012) used in multi-headed networks (Lee et al., 2015) in our ensemble SSL setup. Similarly we can also derive the dual variant of it that uses the teacher predictions, which is omitted here for brevity." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 508, + 504, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 508, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 508, + 504, + 545 + ], + "type": "text", + "content": "Entropy weighting (ENT) The derivation of ENT loss in Eq. (9) is similar to the UNIF loss but applies an entropy weights. Recall that we use " + }, + { + "bbox": [ + 104, + 508, + 504, + 545 + ], + "type": "inline_equation", + "content": "f_{ijy} = -\\mathsf{H}[t_i(Y|x)] + \\log \\delta (i - j)" + }, + { + "bbox": [ + 104, + 508, + 504, + 545 + ], + "type": "text", + "content": ", which gives " + }, + { + "bbox": [ + 104, + 508, + 504, + 545 + ], + "type": "inline_equation", + "content": "w_{ijy} = \\mathrm{softmax}_i(\\{-\\frac{1}{\\gamma}\\mathsf{H}[t_{i'}(Y|x)]:i'\\in [m]\\})" + }, + { + "bbox": [ + 104, + 508, + 504, + 545 + ], + "type": "text", + "content": " and," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 552, + 504, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 552, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 113, + 552, + 504, + 583 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {E N T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} \\left[ t _ {i ^ {\\prime}} (Y | x) : i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]\\right). \\tag {32}", + "image_path": "8633d4fa8b0ca6014a9763c5ed910c95ae54d2751aaba5cc6b506abda3cd305b.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 590, + 504, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 615 + ], + "type": "text", + "content": "One can also generalize it to its dual variant which uses the student entopies, i.e., " + }, + { + "bbox": [ + 104, + 590, + 504, + 615 + ], + "type": "inline_equation", + "content": "f_{ijy} = -\\mathsf{H}[s(Y|\\theta_j,x)] + \\log \\delta (i - j)" + }, + { + "bbox": [ + 104, + 590, + 504, + 615 + ], + "type": "text", + "content": ", which gives the ENT-ST loss," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 620, + 504, + 662 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 620, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 111, + 620, + 504, + 662 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {E N T - S T}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\sum_ {i \\in [ m ]} \\operatorname {s o f t m a x} _ {i} \\left(\\left\\{- \\frac {1}{\\gamma} \\mathrm {H} [ s (Y | \\theta_ {i ^ {\\prime}}, x) ]: i ^ {\\prime} \\in [ m ] \\right\\}\\right) \\mathrm {H} ^ {\\times} \\left[ t _ {i} (Y | x), s (Y | \\theta_ {i}, x) \\right]. \\tag {33}", + "image_path": "36072b890579dd42958333fc4e11990091b8d57ece73e653d4a6ba2e314c7a39.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 677, + 239, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 239, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 239, + 689 + ], + "type": "text", + "content": "D.2 RELATING SOME LOSSES" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "Here, we relate some losses derived above. Specifically, we relate the uniform weighting (UNIF, UNIF-ALL) and probability weighting (PROB) in Appx. D.2.1, and relate entropy weighting (ENT) and variance weighting in Appx. D.2.2." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 311, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 311, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 311, + 94 + ], + "type": "text", + "content": "D.2.1 UNIFORM & PROBABILITY WEIGHTING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "type": "text", + "content": "We first establish the relation between UNIF and PROB using the joint convexity of unnormalized KL divergence and the fact that our weighted cross-entropy loss is a weighted unnormalized KL divergence up to some constant in " + }, + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "type": "text", + "content": ". In particular, the joint convexity of unnormalized KL divergence can be shown by combining the facts that Csiszár " + }, + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "type": "text", + "content": "-divergences are jointly convex (Proposition 1 in Dragomir (2013)) and unnormalized KL divergence corresponds to the convex generator, " + }, + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "type": "inline_equation", + "content": "f(u) = u\\log u - u + 1" + }, + { + "bbox": [ + 104, + 101, + 506, + 168 + ], + "type": "text", + "content": ", as required by the proposition." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 173, + 489, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 173, + 489, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 489, + 185 + ], + "type": "text", + "content": "First, our weighted cross-entropy loss is unnormalized KL divergence up to some constant in " + }, + { + "bbox": [ + 104, + 173, + 489, + 185 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 173, + 489, + 185 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 155, + 190, + 504, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 190, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 155, + 190, + 504, + 220 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} \\frac {1}{m} \\sum_ {i \\in [ m ]} \\mathrm {K} \\left[ t _ {i} (Y | x), s \\left(Y \\mid \\theta_ {i}, x\\right) \\right] + \\text {c o n s t a n t} \\tag {34}", + "image_path": "22d8e6f255374e3f1813c4b1394a2ca20498fe0dab831dd2ae452b82b3d9baab.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 155, + 223, + 504, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 223, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 155, + 223, + 504, + 262 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} (\\theta) = \\frac {1}{n} \\sum_ {x \\in \\mathcal {D} _ {n}} K \\left[ \\frac {1}{m} \\sum_ {i \\in [ m ]} t _ {i} (Y | x), \\frac {1}{m} \\sum_ {j \\in [ m ]} s (Y | \\theta_ {j}, x) \\right] + \\text {c o n s t a n t} \\tag {35}", + "image_path": "987560568239edd2f9ff0d12d041d285983a18e2714b24d25b31c8a6c0d22afd.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "type": "text", + "content": "Therefore, the joint convexity of (unnormized) KL divergence directly implies an ordering of the loss up to some constant in " + }, + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "type": "text", + "content": ", i.e.," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 273, + 294, + 504, + 308 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 294, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 273, + 294, + 504, + 308 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\mathrm {P R O B}} \\leq \\mathcal {L} _ {n} ^ {\\mathrm {U N I F}} \\tag {36}", + "image_path": "dea4103539725dcf2b78d359d63341d0601cc0b843aec1926f0f1e365216b5ae.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "type": "text", + "content": "Furthermore, we can also relate PROB and UNIF-ALL using the fact that the (unnormized) cross-entropy " + }, + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "type": "inline_equation", + "content": "\\mathsf{H}^{\\times}[p(X), q(X)]" + }, + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "type": "text", + "content": " is linear in the first argument " + }, + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "type": "text", + "content": " but convex in the second argument " + }, + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 314, + 506, + 347 + ], + "type": "text", + "content": ", which implies," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 265, + 353, + 504, + 368 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 353, + 504, + 368 + ], + "spans": [ + { + "bbox": [ + 265, + 353, + 504, + 368 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {n} ^ {\\text {P R O B}} \\leq \\mathcal {L} _ {n} ^ {\\text {U N I F - A L L}} \\tag {37}", + "image_path": "40c1f06f6cb8136fd1b96a93530c5f88114226da212746cfc1e71ba7e123a1ca.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 378, + 297, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 378, + 297, + 389 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 297, + 389 + ], + "type": "text", + "content": "D.2.2 ENTROPY & VARIANCE WEIGHTING" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 397, + 453, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 453, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 453, + 411 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 104, + 397, + 453, + 411 + ], + "type": "inline_equation", + "content": "p(X)" + }, + { + "bbox": [ + 104, + 397, + 453, + 411 + ], + "type": "text", + "content": " is a discrete distribution (normalized) on " + }, + { + "bbox": [ + 104, + 397, + 453, + 411 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = [c]" + }, + { + "bbox": [ + 104, + 397, + 453, + 411 + ], + "type": "text", + "content": ". It can be shown that," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 205, + 415, + 504, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 415, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 205, + 415, + 504, + 430 + ], + "type": "interline_equation", + "content": "\\mathsf {H} [ p (X) ] \\leq \\frac {1}{2} \\log \\left(\\operatorname {V a r} _ {p} [ X ] + \\frac {1}{1 2}\\right) + \\frac {1}{2} \\log (2 \\pi e) \\tag {38}", + "image_path": "25614e7c479ac2433d5a9e25186b2a1a26a8e9bd789321bc40b42e5e6f4c839c.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "inline_equation", + "content": "\\operatorname{Var}_p[X] = \\sum_{x \\in [c]} p(x)(x - \\mu)^2" + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "inline_equation", + "content": "\\mu = \\mathsf{E}_p[X] = \\sum_{x \\in [c]} p(x)x" + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "text", + "content": " (Theorem 9.7.1, Cover & Thomas (1999)). Note, a tighter bound (Mow, 1998) also exists but it places stronger restrictions on " + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "text", + "content": ". This relationship suggests that choosing weights proportional to " + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "inline_equation", + "content": "\\exp(-\\mathsf{H}[t_i(Y|x)])" + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "text", + "content": " (as in ENT) is potentially related to choosing weights proportional to weighting by variance " + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "inline_equation", + "content": "(\\operatorname{Var}_{t_i(Y|x)}[Y] + \\epsilon)^{-1/2}" + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "inline_equation", + "content": "(\\epsilon = \\frac{1}{12})" + }, + { + "bbox": [ + 104, + 435, + 506, + 499 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_content_list.json b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..59f53a6c432e786c723fd1a4ce2580504f99052a --- /dev/null +++ b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_content_list.json @@ -0,0 +1,2541 @@ +[ + { + "type": "text", + "text": "WHAT CAN WE LEARN FROM THE SELECTIVE PREDICTION AND UNCERTAINTY ESTIMATION PERFORMANCE OF 523 IMAGENET CLASSIFIERS?", + "text_level": 1, + "bbox": [ + 171, + 98, + 828, + 169 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ido Galil", + "bbox": [ + 184, + 191, + 253, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Technion", + "bbox": [ + 184, + 205, + 246, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "idogail.iq@gmail.com", + "bbox": [ + 184, + 220, + 370, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mohammed Dabbah", + "bbox": [ + 398, + 191, + 545, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Amazon", + "bbox": [ + 398, + 207, + 457, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "m.m.dabbah@gmail.com", + "bbox": [ + 398, + 220, + 576, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ran El-Yaniv", + "bbox": [ + 604, + 191, + 702, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Technion, Deci.AI", + "bbox": [ + 606, + 207, + 746, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "rani@cs.technion.ac.il", + "bbox": [ + 606, + 220, + 800, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 271, + 545, + 285 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "When deployed for risk-sensitive tasks, deep neural networks must include an uncertainty estimation mechanism. Here we examine the relationship between deep architectures and their respective training regimes, with their corresponding selective prediction and uncertainty estimation performance. We consider some of the most popular estimation performance metrics previously proposed including AUROC, ECE, AURC as well as coverage for selective accuracy constraint. We present a novel and comprehensive study of selective prediction and the uncertainty estimation performance of 523 existing pretrained deep ImageNet classifiers that are available in popular repositories. We identify numerous and previously unknown factors that affect uncertainty estimation and examine the relationships between the different metrics. We find that distillation-based training regimes consistently yield better uncertainty estimations than other training schemes such as vanilla training, pretraining on a larger dataset and adversarial training. Moreover, we find a subset of ViT models that outperform any other models in terms of uncertainty estimation performance. For example, we discovered an unprecedented $99\\%$ top-1 selective accuracy on ImageNet at $47\\%$ coverage (and $95\\%$ top-1 accuracy at $80\\%$ ) for a ViT model, whereas a competing EfficientNet-V2-XL cannot obtain these accuracy constraints at any level of coverage. Our companion paper, also published in ICLR 2023 (Galil et al., 2023), examines the performance of these classifiers in a class-out-of-distribution setting.", + "bbox": [ + 228, + 299, + 767, + 579 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 599, + 336, + 616 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The excellent performance of deep neural networks (DNNs) has been demonstrated in a range of applications, including computer vision, natural language understanding and audio processing. To deploy these models successfully, it is imperative that they provide an uncertainty quantification of their predictions, either via some kind of selective prediction or a probabilistic confidence score.", + "bbox": [ + 169, + 631, + 826, + 688 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Notwithstanding, what metric should we use to evaluate the uncertainty estimation performance? There are many and diverse ways so the answer to this question is not obvious, and to demonstrate the difficulty, consider the case of two classification models for the stock market that predict whether a stock's value is about to increase, decrease, or remain neutral (three-class classification). Suppose that model A has a $95\\%$ true accuracy, and generates a confidence score of 0.95 on every prediction (even on misclassified instances); model B has a $40\\%$ true accuracy, but always gives a confidence score of 0.6 on correct predictions, and 0.4 on incorrect ones. Model B can be utilized easily to generate perfect investment decisions. Using selective prediction (El-Yaniv & Wiener, 2010; Geifman & El-Yaniv, 2017), Model B will simply reject all investments on stocks whenever the confidence score is 0.4. While model A offers many more investment opportunities, each of its predictions carries a $5\\%$ risk of failure.", + "bbox": [ + 169, + 694, + 826, + 847 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Among the various metrics proposed for evaluating the performance of uncertainty estimation are: Area Under the Receiver Operating Characteristic (AUROC or AUC), Area Under the Risk-Coverage curve (AURC) (Geifman et al., 2018), selective risk or coverage for a selective accuracy constraint (SAC), Negative Log-likelihood (NLL), Expected Calibration Error (ECE), which is often used for evaluating a model's calibration (see Section 2) and Brier score (Brier, 1950). All these metrics", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/fa9b91586e5aca9d13d95ff3b56f34ea71d0af6bbb95e238639c29c5ecf07ebc.jpg", + "image_caption": [ + "Figure 1: A comparison of 523 models by their AUROC ( $\\times 100$ , higher is better) and -log(ECE) (higher is better) on ImageNet. Each marker's size is determined by the model's number of parameters. A full version graph is given in Figure 8. Distilled models are better than non-distilled ones. A subset of ViT models is superior to all other models for all aspects of uncertainty estimation (\"ViT\" in the legend, marked as a red triangle facing upwards); the performance of EfficientNet-V2 and GENet models is worse." + ], + "image_footnote": [], + "bbox": [ + 300, + 99, + 699, + 305 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "are well known and are often used for comparing the uncertainty estimation performance of models (Moon et al., 2020; Nado et al., 2021; Maddox et al., 2019; Lakshminarayanan et al., 2017). Somewhat surprisingly, NLL, Brier, AURC, and ECE all fail to reveal the uncertainty superiority of Model B in our investment example (see Appendix A for the calculations). Both AUROC and SAC, on the other hand, reveal the advantage of Model B perfectly (see Appendix A for details). It is not hard to construct counterexamples where these two metrics fails and others (e.g., ECE) succeed. To sum up this brief discussion, we believe that the ultimate suitability of a performance metric should be determined by its context. If there is no specific application in mind, there is a strong incentive to examine a variety of metrics, as we choose to do in this study.", + "bbox": [ + 169, + 426, + 823, + 551 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This study evaluates the ability of 523 models from the Torchvision and Timm repositories (Paszke et al., 2019; Wightman, 2019) to estimate uncertainty1. Our study identifies several major factors that affect confidence rankings, calibration, and selective prediction, and lead to numerous empirical contributions important to selective predictions and uncertainty estimation. While no new algorithm or method is introduced in our paper, our study generates many interesting conclusions that will help practitioners achieve more powerful uncertainty estimation. Moreover, the research questions that are uncovered by our empirical study shed light on uncertainty estimation, which may stimulate the development of new methods and techniques for improving uncertainty estimation. Among the most interesting conclusions our study elicits are:", + "bbox": [ + 169, + 558, + 823, + 683 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Knowledge distillation training improves estimation. Training regimes incorporating any kind of knowledge distillation (KD) (Hinton et al., 2015) lead to DNNs with improved uncertainty estimation performance evaluated by any metric, more than by using any other training tricks (such as pretraining on a larger dataset, adversarial training, etc.). In Galil et al. (2023) we find similar performance boosts for class-out-of-distribution (C-OOD) detection.", + "(2) Certain architectures are more inclined to perform better or worse at uncertainty estimation. Some architectures seem more inclined to perform well on all aspects of uncertainty estimation, e.g., a subset of vision transformers (ViTs) (Dosovitskiy et al., 2021) and the zero-shot language-vision CLIP model (Radford et al., 2021), while other architectures tend to perform worse, e.g., EfficientNet-V2 and GENet (Tan & Le, 2021; Lin et al., 2020). These results are visualized in Figure 1. In Galil et al. (2023) we find that ViTs and CLIPs are also powerful C-OOD detectors.", + "(3) Several training regimes result in a subset of ViTs that outperforms all other architectures and training regimes. These regimes include the original one from the paper introducing ViTs (Dosovitskiy et al., 2021; Steiner et al., 2022; Chen et al., 2022; Ridnik et al., 2021). These ViTs" + ], + "bbox": [ + 169, + 690, + 826, + 900 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Our code is available at https://github.com/IdoGalil/benchmarking-uncertainty-estimation-performance", + "bbox": [ + 191, + 909, + 810, + 922 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "achieve the best uncertainty estimation performance on any aspect measured, both in absolute terms and per-model size (# parameters, see Figures 9 and 10 in Appendix B).", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(4) Temperature scaling improves selective and ranking performance. The simple post-training calibration method of temperature scaling (Guo et al., 2017), which is known to improve ECE, for the most part also improves ranking (AUROC) and selective prediction—meaning not only does it calibrate the probabilistic estimation for each individual instance, but it also improves the partial order of all instances induced by those improved estimations, pushing instances more likely to be correct to have a higher confidence score than instances less likely to be correct (see Section 3).", + "(5) The correlations between AUROC, ECE, accuracy and the number of parameters are dependent on the architecture analyzed. Contrary to previous work by (Guo et al., 2017), we observe that while there is a strong correlation between accuracy/number of parameters and ECE or AUROC within each specific family of models of the same architecture, the correlation flips between a strong negative and a strong positive correlation depending on the type of architecture being observed. For example, as DLA (Yu et al., 2018) architectures increase in size and accuracy, their ECE deteriorates while their AUROC improves. The exact opposite, however, can be observed in XCiTs (Ali et al., 2021) as their ECE improves with size while their AUROC deteriorates (see Appendix L).", + "(6) The best model in terms of AUROC or SAC is not always the best in terms of calibration, as illustrated in Figure 1, and the trade-off should be considered when choosing a model based on its application." + ], + "bbox": [ + 169, + 138, + 826, + 391 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 HOW TO EVALUATE DEEP UNCERTAINTY ESTIMATION PERFORMANCE", + "text_level": 1, + "bbox": [ + 171, + 410, + 779, + 426 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let $\\mathcal{X}$ be the input space and $\\mathcal{Y}$ be the label space. Let $P(\\mathcal{X},\\mathcal{Y})$ be an unknown distribution over $\\mathcal{X}\\times \\mathcal{Y}$ . A model $f$ is a prediction function $f:\\mathcal{X}\\to \\mathcal{Y}$ , and its predicted label for an image $x$ is denoted by $\\hat{y}_f(x)$ . The model's true risk w.r.t. $P$ is $R(f|P) = E_{P(\\mathcal{X},\\mathcal{Y})}[\\ell (f(x),y)]$ , where $\\ell :\\mathcal{Y}\\times \\mathcal{Y}\\rightarrow \\mathbb{R}^{+}$ is a given loss function, for example, 0/1 loss for classification. Given a labeled set $S_{m} = \\{(x_{i},y_{i})\\}_{i = 1}^{m}\\subseteq (\\mathcal{X}\\times \\mathcal{Y})$ , sampled i.i.d. from $P(\\mathcal{X},\\mathcal{Y})$ , the empirical risk of model $f$ is $\\hat{r} (f|S_m)\\triangleq \\frac{1}{m}\\sum_{i = 1}^{m}\\ell (f(x_i),y_i)$ . Following Geifman et al. (2018), for a given model $f$ we define a confidence score function $\\kappa (x,\\hat{y} |f)$ , where $x\\in \\mathcal{X}$ , and $\\hat{y}\\in \\mathcal{V}$ is the model's prediction for $x$ , as follows. The function $\\kappa$ should quantify confidence in the prediction of $\\hat{y}$ for the input $x$ , based on signals from model $f$ . This function should induce a partial order over instances in $\\mathcal{X}$ .", + "bbox": [ + 169, + 441, + 826, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The most common and well-known $\\kappa$ function for a classification model $f$ (with softmax at its last layer) is its softmax response values: $\\kappa(x, \\hat{y} | f) \\triangleq f(x)_{\\hat{y}}$ (Cordella et al., 1995; De Stefano et al., 2000). We chose to focus on studying uncertainty estimation performance using softmax response as the models' $\\kappa$ function because of its extreme popularity, and its importance as a baseline due to its solid performance compared to other methods (Geifman & El-Yaniv, 2017; Geifman et al., 2018). While this is the main $\\kappa$ we evaluate, we also test the popular uncertainty estimation technique of Monte Carlo dropout (MC dropout) (Gal & Ghahramani, 2016), which is motivated by Bayesian reasoning. Although these methods use the direct output from $f$ , $\\kappa$ could be a different model unrelated to $f$ and unable to affect $f$ 's predictions. Note that to enable a probabilistic interpretation, $\\kappa$ can only be calibrated if its values reside in $[0, 1]$ whereas for ranking and selective prediction any value in $\\mathbb{R}$ can be used.", + "bbox": [ + 169, + 577, + 826, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A selective model $f$ (El-Yaniv & Wiener, 2010; Chow, 1957) uses a selection function $g: \\mathcal{X} \\to \\{0,1\\}$ to serve as a binary selector for $f$ , enabling it to abstain from giving predictions for certain inputs. $g$ can be defined by a threshold $\\theta$ on the values of a $\\kappa$ function such that $g_{\\theta}(x|\\kappa, f) = \\mathbb{1}[\\kappa(x, \\hat{y}_f(x)|f) > \\theta]$ . The performance of a selective model is measured using coverage and risk, where coverage, defined as $\\phi(f, g) = E_P[g(x)]$ , is the probability mass of the non-rejected instances in $\\mathcal{X}$ . The selective risk of the selective model $(f, g)$ is defined as $R(f, g) \\triangleq \\frac{E_P[\\ell(f(x), g(x))]}{\\phi(f, g)}$ . These quantities can be evaluated empirically over a finite labeled set $S_m$ , with the empirical coverage defined as $\\hat{\\phi}(f, g|S_m) = \\frac{1}{m} \\sum_{i=1}^{m} g(x_i)$ , and the empirical selective risk defined as $\\hat{r}(f, g|S_m) \\triangleq \\frac{1}{m} \\sum_{i=1}^{m} \\frac{\\ell(f(x_i), y_i) g(x_i)}{\\hat{\\phi}(f, g|S_m)}$ . Similarly, SAC is defined as the largest coverage available for a specific accuracy constraint. A way to visually inspect the behavior of a $\\kappa$ function for selective prediction can be done using the risk-coverage (RC) curve (El-Yaniv & Wiener, 2010)—a curve showing the selective risk as a function of coverage, measured on some chosen test set; see Figure 2 for an", + "bbox": [ + 169, + 738, + 826, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/3ed74c4b0f126392266bf292dc08034cc04d6d2875f51211b042029c66bc05df.jpg", + "image_caption": [ + "Figure 2: The RC curve made by a ResNet18 trained on CIFAR-10, measured on the test set. The risk is calculated using a 0/1 loss (meaning the model has about $95\\%$ accuracy for 1.0 coverage); the $\\kappa$ used was softmax-response. The value of the risk at each point of coverage corresponds to the selective risk of the model when rejecting inputs that are not covered at that coverage slice. e.g., the selective risk for coverage 0.8 is about $0.5\\%$ , meaning that an end user setting a matching threshold would enjoy a model accuracy of $99.5\\%$ on the $80\\%$ of images the model would not reject." + ], + "image_footnote": [], + "bbox": [ + 367, + 99, + 632, + 261 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "example. In general, though, two RC curves are not necessarily comparable if one does not fully dominate the other (Figure 3 shows an example of lack of dominance).", + "bbox": [ + 169, + 398, + 823, + 428 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The AURC and E-AURC metrics were defined by (Geifman et al., 2018) for quantifying the selective quality of $\\kappa$ functions via a single number, with AURC being defined as the area under the RC curve. AURC, however, is very sensitive to the model's accuracy, and in an attempt to mitigate this, E-AURC was suggested. The latter also suffers from sensitivity to accuracy, as we demonstrate in Appendix C. The advantage of scalar metrics such as the above is that they summarize the model's overall uncertainty estimation behavior by reducing it to a single scalar. When not carefully chosen, however, these reductions could result in a loss of vital information about the problem (recall the investment example from Section 1, which is also discussed in Appendix A: reducing an RC curve to an AURC does not show that Model B has an optimal 0 risk if the coverage is smaller than 0.4). Thus, the choice of the \"correct\" single scalar performance metric unfortunately must be task-specific. When comparing the uncertainty estimation performance of deep architectures that exhibit different accuracies, we find that AUROC and SAC can effectively \"normalize\" accuracy differences that plague the usefulness of other metrics (see Figure 3). This normalization is essential in our study where we compare uncertainty performance of hundreds of models that can greatly differ in their accuracies.", + "bbox": [ + 169, + 434, + 826, + 642 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For risk-sensitive deployment, let us consider the two models in Figure 3; EfficientNet-V2-XL (Tan & Le, 2021) and ViT-B/32-SAM (Chen et al., 2022). While the former model has better overall accuracy and AURC (metrics that could lead us to believe the model is best for our needs), it cannot guarantee a Top-1 ImageNet selective accuracy above $95\\%$ for any coverage. ViT-B/32-SAM, on the other hand, can provide accuracies above $95\\%$ for all coverages below $50\\%$ .", + "bbox": [ + 169, + 648, + 823, + 720 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In applications where risk (or coverage) constraints are dictated (Geifman & El-Yaniv, 2017), the most straightforward and natural metric is SAC (or selective risk), which directly measures the coverage (resp., risk) given at the required level of risk (resp., coverage) constraint. We demonstrate this in Appendix I, evaluating which models give the most coverage for an ambitious SAC of $99\\%$ . If instead a specific range of coverages is specified, we could measure the area under the RC curve for those coverages: $\\mathrm{AURC}_{\\mathcal{C}}(\\kappa, f|S_m) = \\frac{1}{|\\mathcal{C}|}\\sum_{c\\in \\mathcal{C}}\\hat{r} (f,g_c|S_m)$ , with $\\mathcal{C}$ being those required coverages.", + "bbox": [ + 169, + 726, + 823, + 820 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Often, these requirements are not known or can change as a result of changing circumstances or individual needs. Also, using metrics sensitive to accuracy such as AURC makes designing architectures and methods to improve $\\kappa$ very hard, since an improvement in these metrics could be attributed to either an increase in overall accuracy (if such occurred) or to a real improvement in the model's ranking performance. Lastly, some tasks might not allow the model to abstain from making predictions at all, but instead require interpretable and well-calibrated probabilities of correctness, which could be measured using ECE.", + "bbox": [ + 169, + 825, + 828, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/314ef4f65139873f967cdbaaab5f81a96139c78a6a5ff353109de204f28481d9.jpg", + "image_caption": [ + "Figure 3: A comparison of RC curves made by the best (ViT-L/16-384) and worst (EfficientNet-V2-XL) models we evaluated in terms of AUROC. Comparing ViT-B/32-SAM to EfficientNet-V2 exemplifies the fact that neither accuracy nor AURC reflect selective performance well enough." + ], + "image_footnote": [], + "bbox": [ + 269, + 101, + 723, + 348 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.1 MEASURING RANKING AND CALIBRATION", + "text_level": 1, + "bbox": [ + 171, + 428, + 508, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A $\\kappa$ function is not necessarily able to change the model's predictions. Therefore, it can improve the selective risk by ranking correct and incorrect predictions better, inducing a more accurate partial order over instances in $\\mathcal{X}$ . Thus, for every two random samples $(x_{1},y_{1}),(x_{2},y_{2})\\sim P(\\mathcal{X},\\mathcal{Y})$ and given that $\\ell (f(x_1),y_1) > \\ell (f(x_2),y_2)$ , the ranking performance of $\\kappa$ is defined as the probability that $\\kappa$ ranks $x_{2}$ higher than $x_{1}$ :", + "bbox": [ + 169, + 455, + 823, + 525 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\Pr \\left[ \\kappa \\left(x _ {1}, \\hat {y} \\mid f\\right) < \\kappa \\left(x _ {2}, \\hat {y} \\mid f\\right) \\mid \\ell \\left(f \\left(x _ {1}\\right), y _ {1}\\right) > \\ell \\left(f \\left(x _ {2}\\right), y _ {2}\\right) \\right] \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 534, + 823, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We discuss this definition in greater detail in Appendix D. The AUROC metric is often used in the field of machine learning. When the $0/1$ loss is in play, it is known that AUROC in fact equals the probability in Equation (1) (Fawcett, 2006) and thus is a proper metric to measure ranking in classification (AKA discrimination). AUROC is furthermore equivalent to Goodman and Kruskal's $\\gamma$ -correlation (Goodman & Kruskal, 1954), which for decades has been extensively used to measure ranking (known as \"resolution\") in the field of metacognition (Nelson, 1984). The precise relationship between $\\gamma$ -correlation and AUROC is $\\gamma = 2 \\cdot \\text{AUROC} - 1$ (Higham & Higham, 2018). We note also that both the $\\gamma$ -correlation and AUROC are nearly identical or closely related to various other correlations and metrics; $\\gamma$ -correlation (AUROC) becomes identical to Kendall's $\\tau$ (up to a linear transformation) in the absence of tied values. Both metrics are also closely related to rank-biserial correlation, the Gini coefficient (not to be confused with the measure from economics) and the Mann-Whitney $U$ test, hinting at their importance and usefulness in a variety of fields and settings. In Appendix E, we briefly compare the ranking performance of deep neural networks and humans in metacognitive research, and in Appendix F we address criticism of using AUROC to measure ranking.", + "bbox": [ + 169, + 558, + 826, + 770 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The most widely used metric for calibration is ECE (Naeini et al., 2015). For a finite test set of size $N$ , ECE is calculated by grouping all instances into $m$ interval bins (such that $m \\ll N$ ), each of size $\\frac{1}{m}$ (the confidence interval of bin $B_j$ is $\\left(\\frac{j-1}{m}, \\frac{j}{m}\\right]$ ). With $\\mathrm{acc}(B_j)$ being the mean accuracy in bin $B_j$ and $\\mathrm{conf}(B_j)$ being its mean confidence, ECE is defined as", + "bbox": [ + 169, + 773, + 823, + 833 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} E C E = \\sum_ {j = 1} ^ {m} \\frac {| B _ {j} |}{N} \\sum_ {i \\in B _ {j}} \\left| \\frac {\\mathbf {1} [ \\hat {y} _ {f} (x _ {i}) = y _ {i} ]}{| B _ {j} |} - \\frac {\\kappa (x , \\hat {y} _ {f} (x _ {i}) | f)}{| B _ {j} |} \\right| \\\\ = \\sum_ {j = 1} ^ {m} \\frac {\\left| B _ {j} \\right|}{N} \\left| \\operatorname {a c c} \\left(B _ {j}\\right) - \\operatorname {c o n f} \\left(B _ {j}\\right) \\right| \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 842, + 692, + 928 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Since ECE is widely accepted we use it here to evaluate calibration, and follow (Guo et al., 2017) in setting the number of bins to $m = 15$ . Many alternatives to ECE exist, allowing an adaptive binning scheme, evaluating the calibration on the non-chosen labels as well, and other various methods (Nixon et al., 2019; Vaicenavicius et al., 2019; Zhao et al., 2020). Relevant to our objective is that by using binning, this metric is not affected by the overall accuracy as is the Brier score (mentioned in Section 1), for example.", + "bbox": [ + 169, + 103, + 826, + 188 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 PERFORMANCE ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 207, + 426, + 222 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e16ae8fc794f6b8b3e22af3848104c33e5b6c16d8f8eeae744aa372c7997e6e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 254, + 239, + 743, + 393 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1fcc5c597d87ea2c03dd757c10dd6ecb06163e6450ec487a274a811e9adb6b18.jpg", + "image_caption": [ + "(a)", + "(b)", + "Figure 4: A comparison of different methods and their improvement in terms of (a) AUROC and (b) ECE, relative to the same model's performance without employing the method. Markers above the x-axis represent models that benefited from the evaluated method, and vice versa. The numbers in the legend to the right of each method indicate the number of pairs compared. Temperature scaling can sometimes harm ECE, even though its purpose is to improve it." + ], + "image_footnote": [], + "bbox": [ + 267, + 412, + 728, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section we study the performance of 523 different models (available in timm 0.4.12 and torchvision 0.10). Note that all figures from our analysis are available as interactive plotly plots in the supplementary material, which provides information about every data point.", + "bbox": [ + 169, + 672, + 823, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "1) Among the training regimes evaluated, knowledge distillation improves performance the most. We evaluated several training regimes: (a) Training that involves KD in any form, including Touvron et al. (2021b), knapsack pruning with distillation (in which the teacher is the original unpruned model) (Aflalo et al., 2020) and a pretraining technique that employs distillation (Ridnik et al., 2021); (b) adversarial training (Xie et al., 2020a; Tramère et al., 2018); (c) pretraining on ImageNet21k (\"pure\", with no additions) (Tan & Le, 2021; Touvron et al., 2021a; 2022); and (d) various forms of weakly or semi-supervised learning (Mahajan et al., 2018; Yalniz et al., 2019; Xie et al., 2020b). To make a fair comparison, we only compare pairs of models such that both models have identical architectures and training regimes, with the exception of the method itself being evaluated (e.g., training with or without knowledge distillation). More information about each data point of comparison is available in the supplementary material. Note that the samples are of various sizes due to the different number of potential models available for each.", + "bbox": [ + 169, + 720, + 826, + 888 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Of the methods mentioned above, training methods incorporating distillation improve AUROC and ECE the most. For example, looking at Figure 4a, it is evident that distillation (purple box) almost", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e134eba07fd3104387ff4199406165c294cd546e1215ccc3c7efaa9c54c30250.jpg", + "image_caption": [ + "Figure 5: Comparing teacher models (yellow markers) to their KD students (represented by markers with thick borders and a dot). The performance of each model is measured in AUROC (higher is better) and -log(ECE) (higher is better)." + ], + "image_footnote": [], + "bbox": [ + 269, + 99, + 727, + 294 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "always improves AUROC, and moreover, its median improvement is the best of all techniques evaluated. The same observation can be made with regards to improving ECE; see Figure 4b. Distillation seems to greatly improve both metrics even when the teacher itself is much worse at both metrics. Figure 5 nicely shows this by comparing the teacher architecture and the students in each case. Additionally, in a pruning scenario that included distillation in which the original model was also the teacher (Aflalo et al., 2020), the pruned models outperformed their teachers. The fact that KD improves the model over its original form is surprising, and suggests that the distillation process itself helps uncertainty estimation. In Galil et al. (2023) we find that KD also improves C-OOD detection performance, measured by AUROC. We discuss these effects in greater detail in Appendix G.", + "bbox": [ + 169, + 375, + 826, + 502 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2) Temperature scaling greatly benefits AUROC and selective prediction. Evaluations of the simple post-training calibration method of temperature scaling (TS) (Guo et al., 2017), which is widely known to improve ECE without changing the model's accuracy, also revealed several interesting facts: (a) TS consistently and greatly improves AUROC and selective performance (see Figure 4a)—meaning not only does TS calibrate the probabilistic estimation for each individual instance, but it also improves the partial order of all instances induced by those improved estimations. While TS is well known and used for calibration, to the best of our knowledge, its benefits for selective prediction were previously unknown. (b) While TS is usually beneficial, it could harm some models (see Figures 4a and 4b). While it is surprising that TS—a calibration method—would harm ECE, this phenomenon is explained by the fact that TS optimizes NLL and not ECE (to avoid trivial solutions), and the two may sometimes misalign. (c) Models that benefit from TS in terms", + "bbox": [ + 169, + 507, + 826, + 660 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/442e9c2100ba0d0ccdbb6ae60acbe3de55ee92187c02b65bf18b506a5f49b3ae.jpg", + "image_caption": [ + "Figure 6: Out of 523 models evaluated, models that were assigned a temperature higher than 1 by the calibration process tended to degrade in AUROC performance rather than improve. Markers above the x-axis represent models that benefited from TS, and vice versa." + ], + "image_footnote": [], + "bbox": [ + 318, + 675, + 679, + 825 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "of AUROC tend to have been assigned a temperature smaller than 1 by the calibration process (see Figure 6). This, however, does not hold true for ECE (see Figure 14 in Appendix H). This example", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "also emphasizes the fact that improvements in terms of AUROC do not necessarily translate into improvements in ECE, and vice versa. (d) While all models usually improve with TS, the overall ranking of uncertainty performance between families tends to stay similar, with the worse (in terms of ECE and AUROC) models closing most of the gap between them and the mediocre ones (see Figure 13 in Appendix H).", + "bbox": [ + 169, + 103, + 823, + 174 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3) A subset of ViTs outperforms all other architectures in selective prediction, ranking and calibration, both in absolute terms and per-model size. Several training regimes (including the original regime from the paper introducing ViT) Dosovitskiy et al. (2021); Steiner et al. (2022); Chen et al. (2022); Ridnik et al. (2021) result in ViTs that outperform all other architectures and training regimes in terms of AUROC and ECE (see Figure 1; Figure 13 in Appendix H shows this is true even after using TS) as well as for the SAC of $99\\%$ we explored (see Figure 7 and Appendix I). These ViTs also outperform all other models in terms of C-OOD detection performance (Galil et al., 2023). Moreover, for any size, ViT models outperform their competition in all of these metrics (see Figures 9 and 10 in Appendix B and Figure 15 in Appendix I).", + "bbox": [ + 169, + 180, + 826, + 306 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/37fbae763d641ce784deabef302abec247b021eabe92d823ac12a20aa1e46db4.jpg", + "image_caption": [ + "Figure 7: Comparison of models by their overall accuracy and the coverage they are able to provide given a selective accuracy constraint of Top-1 $99\\%$ on ImageNet. A higher coverage is better. Only ViT models are able to provide coverage beyond $30\\%$ for this constraint. They provide more coverage than any other model compared to their accuracy or size. \"Various\" refers to all other models (out of the 523) that were not mentioned by name." + ], + "image_footnote": [], + "bbox": [ + 290, + 316, + 707, + 518 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Further research into other training regimes, however, reveals that not all training regimes result in superb performance (Touvron et al., 2021b; 2022; Singh et al., 2022; Paszke et al., 2019) (these ViTs are dubbed \"ViT* in the figures), even when a similar amount of data is introduced into the training and strong augmentations are used. In fact, the models trained by Chen et al. (2022) were not pretrained at all and yet reach superb ranking. Even the largest model introduced by Tran et al. (2022), which is a large modified ViT that was pretrained on JFT-4B (a dataset containing 4 billion images) with the aim of excelling in uncertainty estimation (and other areas), is outperformed by the best ViT we evaluated: Plex L achieves an AUROC of 87.7 (while its smaller versions, Plex M and Plex S, achieve an AUROC of 87.4 and 86.7, respectively), compared to 88.5 achieved by ViT-L/16-384 that has less parameters and was pretrained on ImageNet-21k. In total, 18 ViTs trained on ImageNet-21k outperform² Plex L, among which are two variations of small ViTs (each with 36 or 22 million parameters). In Appendix J we analyze the different hyperparameters and augmentations used for training the ViT models evaluated in this paper. Unfortunately, no clear conclusions emerge to explain the advantage of the successful training regimes. There is, however, ample evidence to show that advanced augmentations are unlikely to be part of such an explanation.", + "bbox": [ + 169, + 619, + 826, + 829 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The above facts suggest that the excellent performance exhibited by some ViTs cannot be attributed to the amount of data or to the augmentations used during training. These observations warrant", + "bbox": [ + 169, + 835, + 823, + 864 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "2The authors had not released clear results for Plex ECE performance on ImageNet, making a comparison of calibration difficult. The authors mentioned that the average ECE of Plex L in CIFAR-10, CIFAR-100 and ImageNet is slightly below 0.01. Our evaluations revealed six ViTs that achieved the same results, with the most calibrated model being ViT-T/16 with an ECE of 0.0054 on ImageNet.", + "bbox": [ + 169, + 871, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "additional research with the hope of either training more robust ViTs or transferring the unidentified ingredient of the successful subset of ViTs into other models.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4) Correlations between AUROC, ECE, accuracy and the model's size could either be positive or negative, and depend on the family of architectures evaluated. This observation contradicts previous smaller scale studies on calibration. While AUROC and ECE are (negatively) correlated (they have a Spearman correlation of -0.44, meaning that generally, as AUROC improves, so does ECE), their agreement on the best performing model depends greatly on the architectural family in question. For example, the Spearman correlation between the two metrics evaluated on 28 undistilled XCiTs is 0.76 (meaning ECE deteriorates as AUROC improves), while for the 33 ResNets (He et al., 2016) evaluated, the correlation is -0.74. Another general observation is that contrary to previous work by (Guo et al., 2017) concerning ECE, the correlations between ECE and the accuracy or the number of model parameters are nearly zero, although each family tends to have a strong correlation, either negative or positive. We include a family-based comparison in Appendix L for correlations between AUROC/ECE and accuracy, number of parameters and input size. These results suggest that while some architectures might utilize extra resources to achieve improved uncertainty estimation capabilities, other architectures do not and are even harmed in this respect.", + "bbox": [ + 169, + 138, + 826, + 333 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5) The zero-shot language-vision CLIP model is well-calibrated, with its best instance outperforming $96\\%$ of all other models. CLIP (Radford et al., 2021) enables zero-shot classification and demonstrates impressive performance. We find it is also inclined to be well-calibrated. See Appendix K for details about how we use CLIP. The most calibrated CLIP is based on ViT-B/32 with a linear-probe added to it, and obtains an ECE of $1.3\\%$ , which outperforms $96\\%$ of models evaluated. Moreover, for their size category, CLIP models tend to outperform their competition in calibration, with the exception of ViTs (see Figure 10 in Appendix B). While this trend is clear for zero-shot CLIPs, we note that some models' calibration performance deteriorates with the addition of a linear-probe. Further research is required to understand the ingredients of multimodal models' contribution to calibration, and to find ways to utilize them to get better calibrated models. For example, could a multimodal pretraining regime be used to get better calibrated models?", + "bbox": [ + 169, + 340, + 826, + 494 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6) MC dropout does not improve selective performance, in accordance with previous works. We evaluate the performance of MC dropout using predictive entropy as its confidence score and 30 dropout-enabled forward passes. We do not measure its effects on ECE since entropy scores do not reside in [0, 1]. Using MC dropout causes a consistent drop in both AUROC and selective performance compared with using the same models with softmax as the $\\kappa$ (see Appendix M and Figure 4a). MC dropout's underperformance was also previously observed in (Geifman & El-Yaniv, 2017). We note, however, that evaluations we have conducted in Galil et al. (2023) show that MC dropout performs well when dealing with C-OOD data.", + "bbox": [ + 169, + 500, + 826, + 612 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 CONCLUDING REMARKS", + "text_level": 1, + "bbox": [ + 171, + 633, + 413, + 648 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We presented a comprehensive study of the effectiveness of numerous DNN architectures (families) in providing reliable uncertainty estimation, including the impact of various techniques on improving such capabilities. Our study led to many new insights and perhaps the most important ones are: (1) architectures trained with distillation almost always improve their uncertainty estimation performance, (2) temperature scaling is very useful not only for calibration but also for ranking and selective prediction, and (3) no DNN (evaluated in this study) demonstrated an uncertainty estimation performance comparable—in any metric tested—to a subset of ViT models (see Section 3).", + "bbox": [ + 169, + 666, + 826, + 765 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our work leaves open many interesting avenues for future research and we would like to mention a few. Perhaps the most interesting question is why distillation is so beneficial in boosting uncertainty estimation. Next, is there an architectural secret in vision transformers (ViT) that enables their uncertainty estimation supremacy under certain training regimes? This issue is especially puzzling given the fact that comparable performance is not observed in many other supposedly similar transformer-based models that we tested. If the secret is not in the architecture, what is the mysterious ingredient of the subset of training regimes that produces such superb results, and how can it be used to train other models? Finally, can we create specialized training regimes (e.g., Geifman & El-Yaniv (2019)), specialized augmentations, special pretraining regimes (such as CLIP's multimodal training regime) or even specialized neural architecture search (NAS) strategies that can promote superior uncertainty estimation performance?", + "bbox": [ + 169, + 771, + 826, + 924 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 103, + 356, + 117 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "This research was partially supported by the Israel Science Foundation, grant No. 710/18.", + "We thank Prof. Rakefet Ackerman for her help with understanding how uncertainty estimation performance is evaluated for humans in the field of metacognition, and for her useful comments for Appendix E." + ], + "bbox": [ + 171, + 132, + 825, + 196 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 215, + 285, + 229 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Rakefet Ackerman, Avi Parush, Fareda Nassar, and Avraham Shtub. Metacognition and system usability: Incorporating metacognitive research paradigm into usability testing. Computers in Human Behavior, 54:101-113, January 2016. doi: 10.1016/j.chb.2015.07.041. URL https://doi.org/10.1016/j.chb.2015.07.041.", + "Rakefet Ackerman, Avigdor Gal, Tomer Sagi, and Roee Shraga. A cognitive model of human bias in matching. In PRICAI 2019: Trends in Artificial Intelligence, pp. 632-646. Springer International Publishing, 2019. doi: 10.1007/978-3-030-29908-8_50. URL https://doi.org/10.1007/978-3-030-29908-8_50.", + "Yonathan Aflalo, Asaf Noy, Ming Lin, Itamar Friedman, and Lihi Zelnik-Manor. Knapsack pruning with inner distillation. CoRR, abs/2002.08258, 2020. URL https://arxiv.org/abs/2002.08258.", + "Alaaeldin Ali, Hugo Touvron, Mathilde Caron, Piotr Bojanowski, Matthijs Douze, Armand Joulin, Ivan Laptev, Natalia Neverova, Gabriel Synnaeve, Jakob Verbeek, and Hervé Jégou. Xcit: Cross-covariance image transformers. In Marc' Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan (eds.), Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pp. 20014-20027, 2021. URL https://proceedings.neurips.cc/paper/2021/bitize/a655fbe4b8d7439994aa37ddad80de56-AAbstract.html.", + "Alexandra Basile, Maggie E. Toplak, and Brendan F. Andrade. Using metacognitive methods to examine emotion recognition in children with ADHD. Journal of Attention Disorders, 25(2): 245-257, November 2018. doi: 10.1177/1087054718808602. URL https://doi.org/10.1177/1087054718808602.", + "Glenn W. Brier. Verification of Forecasts Expressed in Terms of Probability. Monthly Weather Review, 78(1):1, January 1950. doi: 10.1175/1520-0493(1950)0780001:VOFEIT2.0.CO;2.", + "Xiangning Chen, Cho-Jui Hsieh, and Boqing Gong. When vision transformers outperform resnets without pre-training or strong data augmentations. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net, 2022. URL https://openreview.net/forum?id=LtKcMgGOeLt.", + "C. K. Chow. An optimum character recognition system using decision functions. IRE Transactions on Electronic Computers, EC-6(4):247-254, 1957. doi: 10.1109/TEC.1957.5222035.", + "L. P. Cordella, C. De Stefano, F. Tortorella, and M. Vento. A method for improving classification reliability of multilayer perceptrons. IEEE Transactions on Neural Networks, 6(5):1140-1147, 1995. doi: 10.1109/72.410358.", + "C. De Stefano, C. Sansone, and M. Vento. To reject or not to reject: that is the question-an answer in case of neural classifiers. IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews), 30(1):84-94, 2000. doi: 10.1109/5326.827457.", + "Yukun Ding, Jinglan Liu, Jinjun Xiong, and Yiyu Shi. Evaluation of neural network uncertainty estimation with application to resource-constrained platforms. CoRR, abs/1903.02050, 2019. URL http://arxiv.org/abs/1903.02050.", + "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=YicbFdNTTy." + ], + "bbox": [ + 173, + 238, + 826, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ran El-Yaniv and Yair Wiener. On the foundations of noise-free selective classification. Journal of Machine Learning Research, 11(5), 2010.", + "Tom Fawcett. An introduction to roc analysis. Pattern Recognition Letters, 27(8):861-874, 2006. ISSN 0167-8655. doi: https://doi.org/10.1016/j.patrec.2005.10.010. URL https://www.sciencedirect.com/science/article/pii/S016786550500303X. ROC Analysis in Pattern Recognition.", + "K. Fiedler, Rakefet Ackerman, and Chiara Scarampi. ! metacognition : Monitoring and controlling one ’ s own knowledge , reasoning and decisions. 2019.", + "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=6Tm1mposlrM.", + "Jonathan Frankle and Michael Carbin. The lottery ticket hypothesis: Training pruned neural networks. CoRR, abs/1803.03635, 2018. URL http://arxiv.org/abs/1803.03635.", + "Yarin Gal and Zoubin Ghahramani. Dropout as a bayesian approximation: Representing model uncertainty in deep learning. In Maria-Florina Balcan and Kilian Q. Weinberger (eds.), Proceedings of the 33nd International Conference on Machine Learning, ICML 2016, New York City, NY, USA, June 19-24, 2016, volume 48 of JMLR Workshop and Conference Proceedings, pp. 1050-1059. JMLR.org, 2016. URL http://proceedings.mlr.press/v48/gal16.html.", + "Ido Galil, Mohammed Dabbah, and Ran El-Yaniv. A framework for benchmarking class-out-of-distribution detection and its application to imagenet. In International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=Iuubb9W6Jtk.", + "Yonatan Geifman and Ran El-Yaniv. Selective classification for deep neural networks. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 4878-4887, 2017. URL https://proceedings.neurips.cc/paper/2017/hash/4a8423d5e91fda00bb7e46540e2b0cf1-Abstract.html.", + "Yonatan Geifman and Ran El-Yaniv. Selectivenet: A deep neural network with an integrated reject option. CoRR, abs/1901.09192, 2019. URL http://arxiv.org/abs/1901.09192.", + "Yonatan Geifman, Guy Uziel, and Ran El-Yaniv. Bias-reduced uncertainty estimation for deep neural classifiers. In International Conference on Learning Representations, 2018.", + "Leo A. Goodman and William H. Kruskal. Measures of association for cross classifications. Journal of the American Statistical Association, 49(268):732-764, December 1954. doi: 10.1080/01621459.1954.10501231. URL https://doi.org/10.1080/01621459.1954.10501231.", + "Thomas D. Griffin, Jennifer Wiley, and Keith W. Thiede. The effects of comprehension-test expectancies on metacomprehension accuracy. Journal of Experimental Psychology: Learning, Memory, and Cognition, 45(6):1066-1092, June 2019. doi: 10.1037/xlm0000634. URL https://doi.org/10.1037/xlm0000634.", + "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 1321-1330. PMLR, 2017. URL http://proceedings.mlr.press/v70/guo17a.html.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 770-778. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.90. URL https://doi.org/10.1109/CVPR.2016.90.", + "Philip A. Higham and D. Paul Higham. New improved gamma: Enhancing the accuracy of goodman-kruskal's gamma using ROC curves. Behavior Research Methods, 51(1):108-125, September 2018. doi: 10.3758/s13428-018-1125-5. URL https://doi.org/10.3758/s13428-018-1125-5." + ], + "bbox": [ + 171, + 102, + 828, + 925 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015.", + "Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, and Hartwig Adam. Searching for mobilenetv3. CoRR, abs/1905.02244, 2019. URL http://arxiv.org/abs/1905.02244.", + "Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6402-6413, 2017. URL https://proceedings.neurips.cc/paper/2017混沌/9ef2ed4b7fd2c810847ffa5fa85bce38-AAbstract.html.", + "Ming Lin, Hesen Chen, Xiuyu Sun, Qi Qian, Hao Li, and Rong Jin. Neural architecture design forgpu-efficient networks, 2020.", + "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7.", + "Wesley J. Maddox, Pavel Izmailov, Timur Garipov, Dmitry P. Vetrov, and Andrew Gordon Wilson. A simple baseline for bayesian uncertainty in deep learning. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 13132-13143, 2019. URL https://proceedings.neurips.cc/paper/2019/bitnet/118921efba23fc329e6560b27861f0c2-Abstract.html.", + "Dhruv Mahajan, Ross B. Girshick, Vignesh Ramanathan, Kaiming He, Manohar Paluri, Yixuan Li, Ashwin Bharambe, and Laurens van der Maaten. Exploring the limits of weakly supervised pretraining. In Vittorio Ferrari, Martial Hebert, Cristian Sminchisescu, and Yair Weiss (eds.), Computer Vision - ECCV 2018 - 15th European Conference, Munich, Germany, September 8-14, 2018, Proceedings, Part II, volume 11206 of Lecture Notes in Computer Science, pp. 185-201. Springer, 2018. doi: 10.1007/978-3-030-01216-8\\_.12. URL https://doi.org/10.1007/978-3-030-01216-8_12.", + "Jooyoung Moon, Jihyo Kim, Younghak Shin, and Sangheum Hwang. Confidence-aware learning for deep neural networks. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 7034-7044. PMLR, 2020. URL http://proceedings.mlr.press/v119/moon20a.html.", + "Zachary Nado, Neil Band, Mark Collier, Josip Djolonga, Michael W. Dusenberry, Sebastian Farquhar, Angelos Filos, Marton Havasi, Rodolphe Jenatton, Ghassen Jerfel, Jeremiah Liu, Zelda Mariet, Jeremy Nixon, Shreyas Padhy, Jie Ren, Tim G. J. Rudner, Yeming Wen, Florian Wenzel, Kevin Murphy, D. Sculley, Balaji Lakshminarayanan, Jasper Snoek, Yarin Gal, and Dustin Tran. Uncertainty baselines: Benchmarks for uncertainty & robustness in deep learning. CoRR, abs/2106.04015, 2021. URL https://arxiv.org/abs/2106.04015.", + "Mahdi Pakdaman Naeini, Gregory F. Cooper, and Milos Hauskrecht. Obtaining well calibrated probabilities using bayesian binning. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, AAAI'15, pp. 2901-2907. AAAI Press, 2015. ISBN 0262511290.", + "Niv Nayman, Yonathan Aflalo, Asaf Noy, and Lihi Zelnik. *Hard constrained differentiable neural architecture search*. In Marina Meila and Tong Zhang (eds.), *Proceedings of the 38th International Conference on Machine Learning*, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of *Proceedings of Machine Learning Research*, pp. 7979-7990. PMLR, 2021. URL http://proceedings.mlr.press/v139/nayman21a.html.", + "Thomas O. Nelson. A comparison of current measures of the accuracy of feeling-of-knowing predictions. *Psychological Bulletin*, 95(1):109-133, 1984. doi: 10.1037/0033-2909.95.1.109. URL https://doi.org/10.1037/0033-2909.95.1.109." + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jeremy Nixon, Michael W. Dusenberry, Linchuan Zhang, Ghassen Jerfel, and Dustin Tran. Measuring calibration in deep learning. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, CVPR Workshops 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 38-41. Computer Vision Foundation / IEEE, 2019. URL http://openaccess.thecvf.com/content_CVPRW_2019/html/Uncertainty_and_Robustness_in_Dep_Visual_Learning/Nixon_Measuring_Calibration_in_Dep_Learning_CVPRW_2019_paper.html.", + "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems 32, pp. 8024-8035. Curran Associates, Inc., 2019. URL http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8748-8763. PMLR, 2021. URL http://proceedings.mlr.press/v139/radford21a.html.", + "Tal Ridnik, Emanuel Ben Baruch, Asaf Noy, and Lihi Zelnik. Imagenet-21k pretraining for the masses. In Joaquin Vanschoren and Sai-Kit Yeung (eds.), Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual, 2021. URL https://datasets-benchmarks-proceedings.neurips.cc/paper/2021混沌/98f13708210194c475687be6106a3b84-AAbstract-round1.html.", + "Mark Sandler, Andrew G. Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh Chen. *Mobilenetv2: Inverted residuals and linear bottlenecks*. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pp. 4510-4520. Computer Vision Foundation / IEEE Computer Society, 2018. doi: 10.1109/CVPR.2018.00474. URL http://openaccess.thecvf.com/content_cvpr_2018/html/Sandler_MobileNetV2_Inverted_Residuals_CVPR_2018_paper.html.", + "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. URL http://arxiv.org/abs/1409.1556.", + "Mannat Singh, Laura Gustafson, Aaron Adcock, Vinicius de Freitas Reis, Bugra Gedik, Raj Prateek Kosaraju, Dhruv Mahajan, Ross B. Girshick, Piotr Dolkar, and Laurens van der Maaten. Revisiting weakly supervised pre-training of visual perception models. CoRR, abs/2201.08371, 2022. URL https://arxiv.org/abs/2201.08371.", + "Andreas Peter Steiner, Alexander Kolesnikov, Xiaohua Zhai, Ross Wightman, Jakob Uszkoreit, and Lucas Beyer. How to train your vit? data, augmentation, and regularization in vision transformers. Transactions on Machine Learning Research, 2022. URL https://openreview.net/forum?id=4nPswr1KcP.", + "Mingxing Tan and Quoc V. Le. Efficientnetv2: Smaller models and faster training. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10096-10106. PMLR, 2021. URL http://proceedings.mlr.press/v139/tan21a.html.", + "Hugo Touvron, Piotr Bojanowski, Mathilde Caron, Matthieu Cord, Alaaeldin El-Nouby, Edouard Grave, Armand Joulin, Gabriel Synnaeve, Jakob Verbeek, and Hervé Jégou. Resmlp: Feedforward networks for image classification with data-efficient training. CoRR, abs/2105.03404, 2021a. URL https://arxiv.org/abs/2105.03404." + ], + "bbox": [ + 174, + 102, + 826, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10347-10357. PMLR, 2021b. URL http://proceedings.mlr.press/v139/touvron21a.html.", + "Hugo Touvron, Matthieu Cord, and Herve Jégou. Deit III: revenge of the vit. CoRR, abs/2204.07118, 2022. doi: 10.48550/arXiv.2204.07118. URL https://doi.org/10.48550/arXiv.2204.07118.", + "Florian Tramèr, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble adversarial training: Attacks and defenses. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=rkZvSe-RZ.", + "Dustin Tran, Jeremiah Liu, Michael W. Dusenberry, Du Phan, Mark Collier, Jie Ren, Kehang Han, Zi Wang, Zelda Mariet, Huiyi Hu, Neil Band, Tim G. J. Rudner, Karan Singhal, Zachary Nado, Joost van Amersfoort, Andreas Kirsch, Rodolphe Jenatton, Nithum Thain, Honglin Yuan, Kelly Buchanan, Kevin Murphy, D. Sculley, Yarin Gal, Zoubin Ghahramani, Jasper Snoek, and Balaji Lakshminarayanan. Plex: Towards reliability using pretrained large model extensions. CoRR, abs/2207.07411, 2022. doi: 10.48550/arXiv.2207.07411. URL https://doi.org/10.48550/arXiv.2207.07411.", + "Monika Undorf and Arndt Broder. Cue integration in metamemory judgements is strategic. Quarterly Journal of Experimental Psychology, 73(4):629-642, October 2019. doi: 10.1177/1747021819882308. URL https://doi.org/10.1177/1747021819882308.", + "Juozas Vaicenavicius, David Widmann, Carl R. Andersson, Fredrik Lindsten, Jacob Roll, and Thomas B. Schön. Evaluating model calibration in classification. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), The 22nd International Conference on Artificial Intelligence and Statistics, AISTATS 2019, 16-18 April 2019, Naha, Okinawa, Japan, volume 89 of Proceedings of Machine Learning Research, pp. 3459-3467. PMLR, 2019. URL http://proceedings.mlr.press/v89/vaicenavicius19a.html.", + "Ross Wightman. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019.", + "Cihang Xie, Mingxing Tan, Boqing Gong, Jiang Wang, Alan L. Yuille, and Quoc V. Le. Adversarial examples improve image recognition. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 816-825. Computer Vision Foundation / IEEE, 2020a. doi: 10.1109/CVPR42600.2020.00090. URL https://openaccess.thecvf.com/content_CVPR_2020/html/Xie_Adversarial_Examples_Implore_Photosynthetic_Vision_Example.pdf.", + "Qizhe Xie, Minh-Thang Luong, Eduard H. Hovy, and Quoc V. Le. Self-training with noisy student improves imagenet classification. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 10684-10695. Computer Vision Foundation / IEEE, 2020b. doi: 10.1109/CVPR42600.2020.01070. URL https://openaccess.thecvf.com/content_CVPR_2020/html/Xie_Self-Training_With_Noisy_Student_Approves_Photosynthetic_Vision_Paper.html.", + "I. Zeki Yalniz, Hervé Jégou, Kan Chen, Manohar Paluri, and Dhruv Mahajan. Billion-scale semi-supervised learning for image classification, 2019.", + "Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2403-2412, 2018. doi: 10.1109/CVPR.2018.00255.", + "Shengjia Zhao, Tengyu Ma, and Stefano Ermon. Individual calibration with randomized forecasting. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 11387-11397. PMLR, 2020. URL http://proceedings.mlr.press/v119/zhao20e.html." + ], + "bbox": [ + 171, + 102, + 828, + 917 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A THE INVESTMENT EXAMPLE", + "text_level": 1, + "bbox": [ + 171, + 102, + 447, + 118 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Let us consider two classification models for the stock market that predict whether a stock's value is about to increase, decrease or remain neutral (three-class classification). Suppose that Model A has a $95\\%$ true accuracy, and generates a confidence score of 0.95 on any prediction (even on misclassified instances); Model B has a $40\\%$ true accuracy, but always gives a confidence score of 0.6 on correct predictions, and 0.4 on incorrect ones. We now try and evaluate these two models using the uncertainty metrics mentioned in Section 1 to see which can reveal Model B's superior uncertainty estimation performance. AURC will fail due to its sensitivity to accuracy (the AURC of Model B is 0.12, more than twice as bad as the AURC for Model A, which is 0.05). NLL will rank Model A four times higher (Model A's NLL is 0.23 and Model B's is 0.93). The Brier score would also much prefer Model A (giving it a score of 0.096 while giving Model B a score of 0.54). Evaluating the models' calibration with ECE will also not reveal Model B's advantages, since it is less calibrated than Model A, which has perfect calibration (Model A has an ECE of 0, and Model B has a worse ECE of 0.4).", + "bbox": [ + 169, + 133, + 826, + 301 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "AUROC, on the other hand, would give Model B a perfect score of 1 and a terrible score of 0.5 to Model A. The selective risk for Model B would be better for any coverage of stock predictions below $40\\%$ , and for any SAC above $95\\%$ the coverage for Model A would be 0, but 0.4 for Model B.", + "bbox": [ + 169, + 306, + 823, + 349 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Those two metrics are not perfect for any example. Let us instead compare two different models for the task of predicting the weather when we cannot abstain from making predictions. Accordingly, being required to provide an accurate probabilistic uncertainty estimation of the model's predictions, AUROC and selective risk would be meaningless (due to the model's inability to abstain in this task), but ECE or the Brier Score would better evaluate the performance the new task requires.", + "bbox": [ + 169, + 356, + 826, + 426 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B RANKING AND CALIBRATION VISUAL COMPARISON", + "text_level": 1, + "bbox": [ + 171, + 446, + 643, + 463 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A comparison of 523 models by their AUROC ( $\\times 100$ , higher is better) and -log(ECE) (higher is better) on ImageNet is visualized in Figure 8. An interactive version of this figure is provided as supplementary material. To compare models fairly by their size, we plot two graphs with the logarithm of the number of parameters as the X-axis, so that models sharing the same x value can be compared solely based on their y value. In Figure 9 we set the X axis to be AUROC (higher is better), and see ViTs outperform any other architecture with a comparable amount of parameters by a large margin. We can also observe that using distillation creates a consistent improvement in AUROC. In Figure 10 we set the X axis to be the negative logarithm of ECE (higher is better) and observe a very similar trend, with ViT outperforming its competition for any model size.", + "bbox": [ + 169, + 478, + 826, + 604 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C DEMONSTRATION OF E-AURC'S DEPENDENCE ON THE MODEL'S ACCURACY", + "text_level": 1, + "bbox": [ + 169, + 625, + 750, + 656 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Excess-AURC (E-AURC) was suggested by Geifman et al. (2018) as an alternative to AURC (explained in Section 2). To calculate E-AURC, two AURC scores need to be calculated: (1) $AURC(model)$ , the AURC value of the actual model and (2) $AURC(model^{*})$ , the AURC value of a hypothetical model with identical predicted labels as the first model, but that outputs confidence values that induce a perfect partial order on the instances in terms of their correctness. The latter means that all incorrectly predicted instances are assigned confidence values lower than the correctly predicted instances.", + "bbox": [ + 169, + 672, + 826, + 772 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E-AURC is then defined as $AURC(model) - AURC(model^{*})$ . In essence, this metric acknowledges that given a model's accuracy, the area of $AURC(model^{*})$ is always unavoidable no matter how good the partial order is, but anything above that could have been minimized if the $\\kappa$ function was better at ranking, assigning correct instances higher values than incorrect ones and inducing a better partial order over the instances.", + "bbox": [ + 169, + 777, + 823, + 848 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This metric indeed helps to reduce some of the sensitivity to accuracy suffered by AURC, and for the example presented in Section 1, E-AURC would have given a perfect score of 0 to the model inducing a perfect partial order by its confidence values (Model B). It is easy, however, to craft examples showing that E-AURC prefers models with higher accuracy, even if they have lower or equal capacity to rank.", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/9d9ebd2771f1ac371bbf660f35fae88018b600af2f4c4ec467ea5a18bc2e73a0.jpg", + "image_caption": [ + "Figure 8: A comparison of 523 models by their AUROC ( $\\times 100$ , higher is better) and log(ECE) (lower is better) on ImageNet. Each marker's size is determined by the model's number of parameters. Each dotted marker represents a distilled version of the original. An interactive version of this figure is provided as supplementary material." + ], + "image_footnote": [], + "bbox": [ + 263, + 140, + 733, + 814 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 477, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3d1c595130db629b8f46da807f5c489fd04b7ff94c241895b7b8b54048b0b313.jpg", + "image_caption": [ + "Figure 9: A comparison of 523 models by their AUROC ( $\\times 100$ , higher is better) and log(number of model's parameters) on ImageNet. Each dotted marker represents a distilled version of the original." + ], + "image_footnote": [], + "bbox": [ + 240, + 99, + 763, + 367 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/2540814b7d1ea2012307b39fcf8425b3d6ec27ea7291a6317dfa00cd7f9805d4.jpg", + "image_caption": [ + "Figure 10: A comparison of 523 models by their -log(ECE) (higher is better) and log(number of model's parameters) on ImageNet. Each dotted marker represents a distilled version of the original." + ], + "image_footnote": [], + "bbox": [ + 204, + 420, + 792, + 670 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To demonstrate this in a simple way, let us consider two models with a complete lack of capacity to rank correct and incorrect predictions correctly, always outputting the same confidence score. Model A has an accuracy of $20\\%$ (thus an error rate of $80\\%$ ), and Model B has an accuracy of $80\\%$ (and an error rate of $20\\%$ ). A good ranking metric should evaluate them equally (the same way E-AURC gives the same score for two models that rank perfectly regardless of their accuracy). In Figure 11 we plot their RC curves with dashed lines, which are both straight lines due to their lack of ranking ability. We can calculate both of these models' AURCs, $AURC(modelA) = 0.8$ , $AURC(modelB) = 0.2$ .", + "bbox": [ + 169, + 734, + 826, + 835 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The next thing to calculate is the best AURC values those models could have achieved given the same accuracy if they had a perfect partial order. We plot these hypothetical models' RC curves in Figure 11 as solid lines. Their selective risk remains 0 for every coverage below their total accuracy, since these hypothetical models assigned the highest confidence to all of their correct instances first. As the coverage increases and they have no more correct instances to select, they begin to give instances that are incorrect, and thus their selective risk deteriorates for higher coverages.", + "bbox": [ + 169, + 839, + 828, + 926 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/d0fefebcf81f0e5a828b47b663d84eb7f06149d59c176e24af2536dc97cb3db3.jpg", + "image_caption": [ + "Figure 11: The RC curves for Models A and B are plotted with dashed lines. The RC curves for the hypothetically optimal versions of Models A and B are plotted with solid lines." + ], + "image_footnote": [], + "bbox": [ + 303, + 101, + 697, + 378 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Calculating both of these hypothetical models' AURCs gives us the following: $AURC(modelA^{*}) = 0.482$ , $AURC(modelB^{*}) = 0.022$ . Subtracting our results we get: E-AURC(modelA) = 0.8 - 0.482 = 0.318, E-AURC(modelB) = 0.2 - 0.022 = 0.178. Hence, E-AURC prefers Model B over Model A, even though both do not discriminate at all between incorrect and correct instances.", + "bbox": [ + 169, + 452, + 823, + 510 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D MORE ON THE DEFINITION OF RANKING", + "text_level": 1, + "bbox": [ + 171, + 539, + 549, + 555 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Let us consider a finite set $S_{m} = \\{(x_{i},y_{i})\\}_{i = 1}^{m}\\sim P_{X,Y}$ . We assume that there are no two identical values given by $\\kappa$ on $S_{m}$ . Such an assumption is reasonable when choosing a continuous confidence signal.", + "bbox": [ + 169, + 575, + 823, + 619 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We further denote $c$ as the number of concordant pairs (i.e., pairs in $S_{m}$ that satisfy the condition $[\\kappa(x_{i}, \\hat{y} | f) < \\kappa(x_{j}, \\hat{y} | f) \\cap \\ell(f(x_{i}), y_{i}) > \\ell(f(x_{j}), y_{j})]$ ) and $d$ as the number of discordant pairs (i.e., pairs in $S_{m}$ that satisfy the condition $[\\kappa(x_{i}, \\hat{y} | f) > \\kappa(x_{j}, \\hat{y} | f) \\cap \\ell(f(x_{i}), y_{i}) > \\ell(f(x_{j}), y_{j})]$ .", + "bbox": [ + 169, + 625, + 823, + 669 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We assume, for now, that there are no two identical values given by $\\ell$ on $S_{m}$ . Accordingly, we can further develop Equation (1) from Section 2.1 using the definition of conditional probability,", + "bbox": [ + 169, + 674, + 823, + 705 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\Pr [ \\kappa (x _ {i}, \\hat {y} | f) < \\kappa (x _ {j}, \\hat {y} | f) | \\ell (f (x _ {i}), y _ {i}) > \\ell (f (x _ {j}), y _ {j}) ] =\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 738, + 696, + 753 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\mathbf {P r} [ \\kappa (x _ {i} , \\hat {y} | f) < \\kappa (x _ {j} , \\hat {y} | f) \\cap \\ell (f (x _ {i}) , y _ {i}) > \\ell (f (x _ {j}) , y _ {j}) ]}{\\mathbf {P r} [ \\ell (f (x _ {i}) , y _ {i}) > \\ell (f (x _ {j}) , y _ {j}) ]},\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 756, + 694, + 791 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "which can be approximated empirically, using the most likelihood estimator, as", + "bbox": [ + 169, + 805, + 692, + 821 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {c}{\\binom {m} {2}}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 480, + 854, + 823, + 887 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We note that the last equation is identical to Kendall's $\\tau$ up to a linear transformation, which equals", + "bbox": [ + 169, + 909, + 823, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {c - d}{\\binom {m} {2}} = \\frac {c - d + c - c}{\\binom {m} {2}} \\\\ = \\frac {2 c - (c + d)}{\\binom {m} {2}} = \\frac {2 c}{\\binom {m} {2}} - \\frac {c + d}{\\binom {m} {2}} = \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 123, + 614, + 193 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n2 \\cdot \\frac {c}{\\binom {m} {2}} - 1 = 2 \\cdot [ \\text {E q u a t i o n} 2 ] - 1.\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 196, + 614, + 224 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Otherwise, if the loss assigns two identical values to a pair of points in $S_{m}$ , but $\\kappa$ does not, then we get:", + "bbox": [ + 169, + 232, + 823, + 260 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {c}{c + d}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 473, + 285, + 823, + 311 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "which is identical to Goodman & Kruskal's $\\gamma$ -correlation up to a linear transformation", + "bbox": [ + 169, + 318, + 740, + 333 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {c - d}{c + d} = \\frac {c - d + c - c}{c + d} = \\frac {2 c - (c + d)}{c + d} = \\\\ \\frac {2 c}{c + d} - \\frac {c + d}{c + d} = 2 \\cdot [ \\text {E q u a t i o n} 3 ] - 1. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 339, + 640, + 402 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D.1 INEQUALITIES OF THE DEFINITION", + "text_level": 1, + "bbox": [ + 171, + 417, + 457, + 431 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "One might wonder why Equation (1) should have strict inequalities rather than non-strict ones to define ranking. As we discuss below, this would damage the definition:", + "bbox": [ + 169, + 444, + 823, + 472 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "(1) If the losses had a non-strict inequality:", + "bbox": [ + 169, + 479, + 455, + 494 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\Pr [ \\kappa (x _ {1}, \\hat {y} | f) < \\kappa (x _ {2}, \\hat {y} | f) | \\ell (f (x _ {1}), y _ {1}) \\geq \\ell (f (x _ {2}), y _ {2}) ]\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 501, + 687, + 518 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Consequently, in the case of classification, for example, this probability would increase for any pairs consisting of correct instances with different confidences. This would yield no benefit in ranking between incorrect and correct instances and motivates giving different confidence values for instances with the same loss—a fact that would not truly add any value.", + "bbox": [ + 169, + 527, + 823, + 583 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "(2) If the $\\kappa$ values had a non-strict inequality:", + "bbox": [ + 169, + 590, + 472, + 604 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\Pr [ \\kappa (x _ {1}, \\hat {y} | f) \\leq \\kappa (x _ {2}, \\hat {y} | f) | \\ell (f (x _ {1}), y _ {1}) > \\ell (f (x _ {2}), y _ {2}) ].\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 612, + 689, + 630 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This probability would increase for any pair $(x_{1}, x_{2})$ such that $\\kappa(x_{1}, \\hat{y} | f) = \\kappa(x_{2}, \\hat{y} | f)$ and $\\ell(f(x_{1})) > \\ell(f(x_{2}))$ , although $\\kappa$ should have ranked $x_{1}$ with a lower value. Furthermore, if a $\\kappa$ function were to assign the same confidence score to all $x \\in \\mathcal{X}$ , then when there are no two identical values of losses, the definition's probability would be 1; otherwise, the more different values for losses there are, the larger the probability would grow. In classification with a $0/1$ loss, for example, assigning the same confidence score to all instances would result in the probability being Accuracy $(f) \\cdot (1 - Accuracy(f))$ , which is largest when Accuracy $(f) = 0.5$ .", + "bbox": [ + 169, + 638, + 825, + 737 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E RANKING CAPACITY COMPARISON BETWEEN HUMANS AND NEURAL NETWORKS", + "text_level": 1, + "bbox": [ + 169, + 756, + 774, + 789 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In the field of metacognition, interestingly, the predictive value of confidence is evaluated by two different aspects: by its ability to discriminate between correct and incorrect predictions (also known as resolution in metacognition or ranking in our context) and by its ability to give well-calibrated confidence estimations, not being over- or under-confident (Fiedler et al., 2019). These two aspects correspond perfectly with much of the research done in the deep learning field, with the nearly matching metric to AUROC of $\\gamma$ -correlation (see Section 2).", + "bbox": [ + 169, + 805, + 823, + 888 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This allows us to compare how well humans rank predictions in various tasks versus how well models rank their own in others. Human AUROC measurements in various tasks (translated from", + "bbox": [ + 169, + 895, + 823, + 922 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "$\\gamma$ -correlation) tend to range from 0.6 to 0.75 (Undorf & Broder, 2019; Basile et al., 2018; Ackerman et al., 2016), but could vary, usually towards much lower values (Griffin et al., 2019). In our comprehensive evaluation on ImageNet, AUROC ranged from 0.77 to 0.88 (with the median value being 0.85), and in CIFAR-10 these measurements jump to the range of 0.92 to 0.94.", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "While such comparisons between neural networks and humans are somewhat unfair due to the great sensitivity required for the task, research that directly compares humans and machine learning algorithms performing the same task exist. For example, in Ackerman et al. (2019), algorithms far surpass even the group of highest performing individuals in terms of ranking.", + "bbox": [ + 169, + 166, + 823, + 224 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F CRITICISMS OF AUROC AS A RANKING METRIC", + "text_level": 1, + "bbox": [ + 171, + 244, + 609, + 260 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In this section, addressing the criticism of AUROC as a ranking metric, we show why AUROC does not simply reward models for having lower accuracy. The paper by Ding et al. (2019) presented a semi-artificial experiment to demonstrate that AUROC might get larger the worse the model's accuracy becomes. They consider a model $f$ and its $\\kappa$ function evaluated on a classification test set $\\mathcal{X}$ , giving each a prediction $\\hat{y}_f(x)$ and a confidence score $\\kappa(x, \\hat{y}_f(x)|f)$ , which in this case is the model's softmax response. Let $\\mathcal{X}^c = \\{x^c \\in \\mathcal{X} | \\hat{y}_f(x^c) = y(x)\\}$ be the set of all instances correctly predicted by the model $f$ , and define $x_{(i)}^c \\in \\mathcal{X}^c$ to be the correct instance that received the i-lowest confidence score from $\\kappa$ . Their example continues and considers an artificial model $f^m$ to be an exact clone of $f$ with the following modification: for every $i \\leq m$ , the model $f^m$ now predicts a different, incorrect label for $x_{(i)}^c$ ; however, its given confidence score remains identical: $\\kappa(x_{(i)}^c, \\hat{y}_f(x_{(i)}^c)|f) = \\kappa(x_{(i)}^c, \\hat{y}_{f^m}(x_{(i)}^c)|f^m)$ . $f^0$ is exactly identical to $f$ , by this definition, not changing any predictions. The paper shows how an artificially created model $f^m$ obtains a higher AUROC score the bigger its $m$ . This happens even though \"nothing\" changed but a hit to the model's accuracy performance (by making mistakes on more instances).", + "bbox": [ + 169, + 277, + 826, + 481 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "First, to understand why this happens, let us consider $f^1$ : AUROC for $\\kappa$ increases the more pairs of $[\\kappa(x_1) < \\kappa(x_2)|\\hat{y}_f(x_1) \\neq y(x_1), \\hat{y}_f(x_2) = y(x_2)]$ there are. The model $f^1$ is now giving an incorrect classification to $x_{(1)}^c$ , but this instance's position in the partial order induced by $\\kappa$ has remained the same (since $\\kappa(x_{(1)}^c)$ is unchanged); therefore, $|\\mathcal{X}^c| - 1$ correctly ranked pairs were added: $[\\kappa(x_{(1)}^c) < \\kappa(x_{(i)}^c)|\\hat{y}_f(x_{(1)}^c) \\neq y(x_{(1)}^c), \\hat{y}_f(x_{(i)}^c) = y(x_{(i)}^c)]$ for every $1 < i \\leq |\\mathcal{X}^c|$ . Nevertheless, this does not guarantee an increase to AUROC by itself: if, previously, all pairs of (correct, incorrect) instances were ranked correctly by $\\kappa$ , AUROC would already be 1.0 for $f^0$ and would not change for $f^1$ . If AUROC for $f^1$ is higher than it was for $f^0$ , this means there exists at least one instance $x^w$ that was incorrectly predicted by the original model $f^0$ such that $\\kappa(x_{(1)}^c) < \\kappa(x^w)$ . Every such originally wrongly ranked pair (by $f^0$ ) of $[\\kappa(x_{(1)}^c) < \\kappa(x^w)|\\hat{y}_f(x^w) \\neq y(x^w), \\hat{y}_f(x_{(1)}^c) = y(x_{(1)}^c)]$ has been eliminated by $f^1$ wrongly predicting $x_{(1)}^c$ . This, therefore, causes AUROC to increase at the expense of the model's accuracy.", + "bbox": [ + 169, + 487, + 826, + 674 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Such an analysis neglects many factors, which is probably why such an effect is only likely to be observed in artificial models (and not among the actual models we have empirically tested):", + "bbox": [ + 169, + 679, + 823, + 709 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. It is unreasonable to assume that the confidence score given by $\\kappa$ will remain exactly the same for an instance $x_{(i)}^{c}$ given it now has a different prediction. In the case of $\\kappa$ being softmax, it assumes the model's logits have changed in a very precise and nontrivial manner. Additionally, by our broad definition of $\\kappa$ , which allows $\\kappa$ to even be produced from an entirely different model than $f$ , $\\kappa$ receives the prediction and model as a given input (and cannot change or affect either), and it is unlikely to assume changing its inputs will not change its output.", + "2. Suppose we find the setting reasonable and assume we can actually create a model $f^m$ as described. Let us observe a model $f^p$ such that $p = \\min_{m} (\\text{AUROC of } f^m = 1)$ , meaning that $f^p$ ranks its predictions perfectly, unlike the original $f^0$ . Is it really true that $f^p$ has no better uncertainty estimation than $f^0$ ? Model $f^p$ behaves very much like the investment in \"Model B\" from our example in Section 1, possessing perfect knowledge of when it is wrong and when it is correct, allowing its users risk-free classification. So, given a model $f$ , we can use the above process to produce an improved model $f^p$ , and then we can even calibrate its" + ], + "bbox": [ + 210, + 720, + 826, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "$\\kappa$ to output $0\\%$ for all instances below its threshold and $100\\%$ for all those above to produce a perfect model, which might have a small coverage but is correct every time, knows it and notifies its user when it truly knows the prediction. The increase in AUROC reflects such an improvement.", + "bbox": [ + 228, + 103, + 823, + 160 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Not only do we disagree with such an analysis and its conclusions, but we also have vast empirical evidence to show that AUROC does not prefer lower accuracy models unless there is a good reason for it to do so, as we demonstrate in Figure 3 (comparing EfficientNet-V2-XL to ViT-B/32-SAM). In fact, out of the 523 models we tested, the model with the highest AUROC also has the $4^{th}$ highest accuracy of all models, and the overall Spearman correlation between AUROC and accuracy of all the models we tested is 0.03. Furthermore, Figure 3 also exemplifies why AUROC, which was suggested by the just mentioned paper as the alternative to AUROC, is a bad choice as a single number metric, and might lead us to deploy a model that has a worse selective risk for most coverages only due to its higher overall accuracy.", + "bbox": [ + 169, + 172, + 826, + 299 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "G KNOWLEDGE DISTILLATION EFFECTS ON UNCERTAINTY ESTIMATION", + "text_level": 1, + "bbox": [ + 171, + 323, + 782, + 339 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/f188edd92ad5504d8af446f7da799b3fc9769cd70d682862a17c5d78793ce1c6.jpg", + "image_caption": [ + "Figure 12: Comparing vanilla models to those incorporating KD into their training (represented by markers with thick borders and a dot). In a pruning scenario that includes distillation, yellow markers indicate that the original model was also the teacher. The performance of each model is measured in AUROC (higher is better) and -log(ECE) (higher is better)." + ], + "image_footnote": [], + "bbox": [ + 205, + 364, + 792, + 612 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Figure 12 compares vanilla models to those incorporating KD into their training (represented by markers with thick borders and a dot). In a pruning scenario that includes distillation, yellow markers indicate that the original model was also the teacher (Aflalo et al., 2020). While distillation using a different model tends to improve uncertainty estimation in both aspects, distillation by the model itself seems to improve only one—suggesting it is generally more beneficial to use a different model as a teacher. The fact that KD improves the model over its original form, however, is surprising, and implies that the distillation process itself helps uncertainty estimation. Note that although this specific method involves pruning, evaluations of models pruned without incorporating distillation (Frankle & Carbin, 2018) revealed no improvement.", + "bbox": [ + 169, + 700, + 823, + 827 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "It seems, moreover, that the teacher does not have to be good in uncertainty estimation itself; Figure 5 in Section 3 shows this by comparing the teacher architecture and the students in each case.", + "bbox": [ + 169, + 832, + 823, + 861 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "While the training method by Ridnik et al. (2021) included pretraining on ImageNet-21k and demonstrated impressive improvements, comparison of models that were pretrained on ImageNet21k (Tan & Le, 2021; Touvron et al., 2021a; 2022) with identical models that were not pretrained showed only a slight improvement in ECE, and, in fact, exhibit a degradation of AUROC (see", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Figures 4a and 4b in Section 3). This suggests that pretraining alone does not improve uncertainty estimation.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "H MORE INFORMATION ABOUT TEMPERATURE SCALING", + "text_level": 1, + "bbox": [ + 171, + 151, + 663, + 167 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/034ae8e1de9f41d6fe5cac55c9a457b469a7856eca88d835a395c08d2e60e6c9.jpg", + "image_caption": [ + "Figure 13: A comparison of 523 models after being calibrated with TS, evaluated by their AUROC $(\\times 100$ , higher is better) and -log(ECE) (higher is better) on ImageNet. Each marker's size is determined by the model's number of parameters. ViT models are still among the best performing architectures for all aspects of uncertainty estimation." + ], + "image_footnote": [], + "bbox": [ + 238, + 183, + 759, + 450 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In Figure 13 we see how temperature scaling (TS) affects the overall ranking of models in terms of AUROC and ECE. While the ranking between the different architecture remains similar, the poorly performing models are much improved and minimize the gap between them and the best models. One particularly notable exception is HardCoRe-NAS (Nayman et al., 2021), with its lowest latency versions becoming the top performers in terms of ECE. In addition, models that benefit from", + "bbox": [ + 169, + 535, + 823, + 606 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/171b1c9407f03ba242867cd36b052c046d8aa0c7512ed878a58fd7430f7f9cc8.jpg", + "image_caption": [ + "Figure 14: Here the relationship between temperature and the success of TS, unlike the case for AUROC, seems unrelated." + ], + "image_footnote": [], + "bbox": [ + 205, + 614, + 792, + 858 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "TS in terms of AUROC tend to have been assigned a temperature lower than 1 by the calibration", + "bbox": [ + 169, + 909, + 823, + 924 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "process (see Figure 6 in Section 3). The same, however, does not hold true for ECE (see Figure 14). This example also emphasizes the fact that models benefiting from TS in terms of AUROC do not necessarily benefit in terms of ECE, and vice versa. Therefore, determining whether to calibrate the deployed model with TS is, unfortunately, a task-specific decision.", + "bbox": [ + 169, + 103, + 826, + 161 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We perform TS as was suggested in Guo et al. (2017). For each model we take a random stratified sampling of 5,000 instances from the ImageNet validation set on which to calibrate, and reserve the remainder 45,000 instances for testing. Using the box-constrained L-BFGS (Limited-Memory Broyden-Fletcher-Goldfarb-Shanno) algorithm, we optimize for 5,000 iterations (though fewer iterations usually converge into the same temperature parameter) using a learning rate of 0.01.", + "bbox": [ + 169, + 166, + 826, + 238 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "I ARCHITECTURE CHOICE FOR PRACTICAL DEPLOYMENT BASED ON SELECTIVE PERFORMANCE", + "text_level": 1, + "bbox": [ + 171, + 261, + 756, + 295 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "As discussed in Section 2, when we know the coverage or risk we require for deployment, the most direct metric to check is which model obtains the best risk for the coverage required (selective risk), or which model gets the largest coverage for the accuracy constraint (SAC). While each deployment scenario specifies its own constraints, for demonstration purposes we consider a scenario in which misclassifications are by far more costly than abstaining from giving correct predictions. An example of this could be classifying a huge unlabeled dataset (or cleaning bad labels from a labeled dataset). While it is desirable to assign labels to a larger portion of the dataset (or to correct more of the wrong labels), it is crucial that these labels are as accurate as possible (or that correctly labeled instances are not replaced with a bad label).", + "bbox": [ + 169, + 311, + 826, + 436 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/0d9498774b8c27650a838b860059762e91ee2a1e034b9c0144323f176fdd71b8.jpg", + "image_caption": [ + "Figure 15: A comparison of 523 models by their log(number of model's parameters) and the coverage they are able to provide for a SAC of $99\\%$ (higher is better) on ImageNet." + ], + "image_footnote": [], + "bbox": [ + 235, + 453, + 759, + 714 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To explore such a scenario, we evaluate all models on ImageNet to see which ones give us the largest coverage for a required accuracy of $99\\%$ . In Figure 7, Section 3 (paper's main body) we observe that of all the models studied, only ViT models are able to provide coverage beyond $30\\%$ for such an extreme constraint. Moreover, we note that the coverage they provide is significantly larger than that given by models with comparable accuracy or size, and that ViT models that provide similar coverage to their counterparts do so with less overall accuracy.", + "bbox": [ + 169, + 777, + 823, + 863 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In Figure 15 we see that not only do ViT models provide more coverage than any other model, but that they are also able to do so in any size category. To compare models fairly by their size, we present Figure 15, which sets the Y axis to be the logarithm of the number of parameters, so that models sharing the same y value can be compared solely based on their x value—which is the coverage they", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/62da8f73c31fd0a2dcd7ddaa6e9b66291acc2327749db53b70bb5056012956a9.jpg", + "table_caption": [ + "Table 1: A comparison of different training regimes of ViTs. *The paper introducing ViTs (Dosovitskiy et al., 2021) had also trained ViT models with the JFT-300M dataset; however, their weights are unavailable to the general public. All evaluations of ViTs from that paper were conducted on ViTs pretrained on ImageNet-21k, which are publicly available. **Pretrained DeiT3 models were first pretrained with a learning rate of $3 \\cdot 10^{-3}$ and then fine-tuned with a learning rate of $3 \\cdot 10^{-4}$" + ], + "table_footnote": [], + "table_body": "
RegimeViT (original)Steiner et al.Chen et al.DeiTDeiT3DeiT3 +PretrainingTorchvision
ReferenceDosovitskiy et al. (2021)Steiner et al. (2022)Chen et al. (2022)Touvron et al. (2021b)Touvron et al. (2022)Touvron et al. (2022)Paszke et al. (2019)
Pretraining datasetImageNet-21k*ImageNet-21k---ImageNet-21k-
Batch Size409640964096102420482048512
OptimizerAdamWAdamWSAMLAMBLAMBLAMBAdamW
LR3·10-33·10-33·10-31·10-33·10-33·10-3**3·10-3
LR decaycosinecosinecosinecosinecosinecosinecosine
Weight decay0.10.30.10.050.020.020.3
Warmup epochs3.43.43.455530
Label smoothing ε0.10.10.10.1X0.10.11
DropoutXXXX
Stoch. DepthXXX
Repeated AugXXXX
Gradient Clip.1.01.01.0X1.01.01.0
H. flip
Random Resized CropX
Rand AugmentXAdapt.X9/0.5XXAdapt.
3 AugmentXXXXX
LayerScaleXXXXX
Mixup alphaXAdapt.X0.80.8X0.2
Cutmix alphaXXX1.01.01.01.0
Erasing prob.XXX0.25XXX
ColorJitterXXXX0.30.3X
Test crop ratio0.8750.8750.8750.8751.01.00.875
LossCECECECEBCECECE
Superb performanceXXXX
", + "bbox": [ + 173, + 181, + 828, + 410 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "provide for a SAC of $99\\%$ . We see that ViT models provide a larger coverage even when compared with models of a similar size.", + "bbox": [ + 169, + 435, + 823, + 464 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "J COMPARISON OF VIT TRAINING REGIMES AND THEIR EFFECTS ON UNCERTAINTY ESTIMATION PERFORMANCE", + "text_level": 1, + "bbox": [ + 171, + 484, + 756, + 517 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In Table 1 we compare the different hyperparameters and augmentations used for training the ViT models evaluated in this paper, with the aim of revealing why some training regimes consistently result in superb ViTs, while others do not. An analysis of the various differences between these regimes, however, eliminates the obvious suspects.", + "bbox": [ + 169, + 532, + 823, + 589 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Pretraining, on its own, does not seem to offer an explanation: First, we analyze eight pairs of models (provided by Touvron et al. 2022) such that both models have identical architecture and training regimes, with the exception that one was pretrained on ImageNet-21k, and the other was not. Pretraining results in only a slight improvement of 0.16 in AUROC and 0.01 in ECE. Moreover, as mentioned in detail in Section 3, ViT models trained on JFT-4B (Tran et al., 2022) were outperformed by the successful ViT models evaluated in this paper, most of which were pretrained on ImageNet-21k (and even by one ViT SAM model that was not pretrained at all). Second, we note that ViTs trained with the SAM optimizer (Chen et al., 2022), and not pretrained at all, reach superb ranking (AUROC) as well. These facts lead us to conclude that pretraining, at least by itself, is not the main contributor to training successful ViTs.", + "2) The selection of optimizers and other hyperparameters (such as learning rate, label smoothing etc.) does not seem to have a significant impact. For example, while AdamW (Loshchilov & Hutter, 2019) was used by two of the successful regimes, it was also used by Paszke et al. (2019), and on the other hand was replaced by SAM (Foret et al., 2021) in another successful training regime.", + "3) Advanced augmentations are unlikely to explain the gaps in uncertainty estimation performance, as regimes producing superior ViT models (Dosovitskiy et al., 2021; Chen et al., 2022) did not use advanced augmentations (in comparison to Touvron et al. (2021b) and Touvron et al. (2022), for example)." + ], + "bbox": [ + 169, + 595, + 826, + 862 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "For these reasons, for the moment, the explanation for the gap remains elusive. The only remaining \"suspect\" is the batch size used, with all successful regimes using a batch size of 4096, while others use a smaller batch size of 2048 or lower. One could argue, however, that a two-fold increase in batch size is not sufficient to explain the huge gaps in performance measured.", + "bbox": [ + 169, + 868, + 823, + 925 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/3a0a1a0a3f1dceb4213f6f365e532d02fc678ba638fb2108990875012e1b5cb9.jpg", + "table_caption": [ + "Table 2: The relationship between uncertainty estimation performance and the model's attributes and resources (accuracy, number of parameters and input size), measured by Spearman correlation. Positive correlations indicate good utilization of resources for uncertainty estimation." + ], + "table_footnote": [], + "table_body": "
ArchitectureAUROC & Accuracy-ECE & AccuracyAUROC & #Parameters-ECE & #ParametersAUROC & Input Size-ECE & Input Size#Models Evaluated
EfficientNet-0.16-0.29-0.22-0.29-0.26-0.3850
ResNet-0.28-0.220.160.03-0.40-0.4433
ViT0.84-0.170.50-0.670.04-0.1331
XCiT distilled0.600.090.350.020.510.1228
XCiT-0.680.89-0.790.94--28
ViT*0.230.38-0.040.410.14-0.1226
SE_ResNet-0.46-0.02-0.530.20-0.02-0.3518
EfficientNetV2-0.70-0.45-0.63-0.47-0.59-0.4015
NFNet0.560.780.630.810.480.6013
Inception-0.290.09-0.430.30-0.080.2313
RegNetY-0.03-0.980.27-0.86--12
RegNetX0.20-0.960.20-0.96--12
CaT distilled0.44-0.870.35-0.870.58-0.5010
DLA0.64-0.900.77-0.90--10
MobileNetV30.370.590.420.60--10
Res2Net-0.700.27-0.680.60--9
CLIP Zero-Shot1.0-0.630.9-0.80.55-0.589
CLIP + Linear Probe0.880.260.710.10.19-0.278
VGG0.81-0.980.71-0.90--8
RepVGG-0.710.50-0.570.21--8
BiT-0.33-0.81-0.20-0.85-0.46-0.258
ResNeXt-0.960.39-0.22-0.30--7
ResNet RS0.000.79-0.180.82-0.300.827
MixConv-0.110.89-0.240.86--7
DenseNet0.43-0.140.720.12--6
HardCoReNAS-0.600.26-0.490.37--6
Swin0.710.140.770.260.410.006
ECANet-0.200.60-0.430.370.830.376
Twins-0.260.94-0.140.89--6
SWSL ResNet0.94-0.890.77-0.83--6
GENet0.50-1.000.50-1.000.87-0.876
SSL ResNet0.14-1.000.26-0.94--6
TResNet0.10-0.300.530.53-0.58-0.875
CoaT-0.100.90-0.100.50--5
LeViT distilled0.60-0.900.60-0.90--5
ResMLP0.201.000.150.97--5
MobileNetV2-0.300.00-0.210.10--5
ViT* Distilled0.8-1.00.71-0.770.22-0.774
PiT distilled1.00-1.001.00-1.00--4
PiT-0.401.00-0.401.00--4
WSP ResNeXt1.000.801.000.80--4
ResMLP distilled0.800.200.800.20--4
MnasNet0.400.200.630.95--4
", + "bbox": [ + 169, + 155, + 826, + 487 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "K EVALUATIONS OF THE ZERO-SHOT LANGUAGE-VISION CLIP MODEL", + "text_level": 1, + "bbox": [ + 171, + 526, + 779, + 542 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In this section we describe how we use CLIP model and extract confidence signals from it during inference. To evaluate CLIP on ImageNet, we first prepare it following the code provided by its authors (https://github.com/openai/CLIP): The labels of ImageNet-1k are encoded into normalized embedding vectors. At inference time, the incoming image is encoded into another normalized embedding vector. A cosine similarity is then calculated between each label embedding vector and the image embedding vector, and lastly, softmax is applied. The highest score is then taken as the confidence score for that prediction. We also evaluate the same models when adding a trained \"linear-probe\" to them (as described in Radford et al. (2021), which is essentially a logistic regression head), that results in a large boost in their accuracy.", + "bbox": [ + 169, + 566, + 826, + 691 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "L EFFECTS OF THE MODEL'S ACCURACY, NUMBER OF PARAMETERS AND INPUT SIZE ON UNCERTAINTY ESTIMATION PERFORMANCE", + "text_level": 1, + "bbox": [ + 169, + 727, + 794, + 760 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 2 shows the relationship between uncertainty estimation performance and model attributes and resources (accuracy, number of parameters and input size), measured by Spearman correlation. We measure uncertainty estimation performance by AUROC (higher is better) and -ECE (higher is better). Positive correlations indicate good utilization of resources for uncertainty estimation (for example, a positive correlation between -ECE and the number of parameters indicates that as the number of parameters increases, the calibration improves). An interesting observation is that distillation can drastically change the correlation between a resource and the uncertainty estimation performance metrics. For example, undistilled XCiTs have a Spearman correlation of -0.79 between their number of parameters and AUROC, indicating that more parameters are correlated with lower ranking performance, while distilled XCiTs have a correlation of 0.35 between the two.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/8cb7831169cfb6d617fc5c6ca0c7b226e0e96ac33e185eae4751d461afb1bb3d.jpg", + "table_caption": [ + "Table 3: Comparing using MC dropout to softmax-response (vanilla)." + ], + "table_footnote": [], + "table_body": "
ArchitectureMethodAccuracyAUROC
MobileNetV3 LargeVanilla74.0486.88
MC dropout7486.14
MobileNetV3 SmallVanilla67.6786.2
MC dropout67.5584.54
MobileNetV2Vanilla71.8886.05
MC dropout71.8184.68
VGG11Vanilla70.3786.31
MC dropout70.2184.3
VGG11 (no BatchNorm)Vanilla69.0286.19
MC dropout68.9583.94
VGG13Vanilla71.5986.3
MC dropout71.4384.37
VGG13 (no BatchNorm)Vanilla69.9386.24
MC dropout69.7184.3
VGG16Vanilla73.3686.76
MC dropout73.3385.02
VGG16 (no BatchNorm)Vanilla71.5986.63
MC dropout71.4784.97
VGG19Vanilla74.2286.52
MC dropout74.1785.06
VGG19 (no BatchNorm)Vanilla72.3886.55
MC dropout72.3784.99
", + "bbox": [ + 326, + 126, + 669, + 436 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "M EVALUATIONS OF MONTE CARLO DROPOUT RANKING PERFORMANCE", + "text_level": 1, + "bbox": [ + 171, + 459, + 797, + 474 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "MC Dropout (Gal & Ghahramani, 2016) is computed using several dropout-enabled forward passes to produce uncertainty estimates. In classification, the mean softmax score of these passes, is calculated, and then a predictive entropy score is used as the final uncertainty estimate. In our evaluations, we use 30 dropout-enabled forward passes. We do not measure MC Dropout's effect on ECE since entropy scores do not reside in $[0,1]$ .", + "bbox": [ + 169, + 489, + 823, + 560 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We test this technique using MobileNetV3 (Howard et al., 2019), MobileNetv2 (Sandler et al., 2018) and VGG (Simonyan & Zisserman, 2015), all trained on ImageNet and taken from the PyTorch repository (Paszke et al., 2019).", + "bbox": [ + 169, + 566, + 823, + 609 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The results comparing these models with and without using MC dropout are provided in Table 3.", + "bbox": [ + 169, + 616, + 805, + 631 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The table shows that using MC dropout causes a consistent drop in both AUROC and selective performance compared with using the same models with softmax as the $\\kappa$ . These results are also visualized in comparison to other methods in Figure 4a in Section 3. MC dropout underperformance in an ID setting was also previously observed in Geifman & El-Yaniv (2017).", + "bbox": [ + 169, + 637, + 823, + 694 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + } +] \ No newline at end of file diff --git a/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_model.json b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a8bda52a0ff5987a8a3aaa259692eaacc26e5f88 --- /dev/null +++ b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_model.json @@ -0,0 +1,3420 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.83, + 0.17 + ], + "angle": 0, + "content": "WHAT CAN WE LEARN FROM THE SELECTIVE PREDICTION AND UNCERTAINTY ESTIMATION PERFORMANCE OF 523 IMAGENET CLASSIFIERS?" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.193, + 0.254, + 0.206 + ], + "angle": 0, + "content": "Ido Galil" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.207, + 0.248, + 0.22 + ], + "angle": 0, + "content": "Technion" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.222, + 0.372, + 0.235 + ], + "angle": 0, + "content": "idogail.iq@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.399, + 0.193, + 0.546, + 0.206 + ], + "angle": 0, + "content": "Mohammed Dabbah" + }, + { + "type": "text", + "bbox": [ + 0.399, + 0.208, + 0.459, + 0.22 + ], + "angle": 0, + "content": "Amazon" + }, + { + "type": "text", + "bbox": [ + 0.399, + 0.222, + 0.578, + 0.235 + ], + "angle": 0, + "content": "m.m.dabbah@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.606, + 0.193, + 0.704, + 0.206 + ], + "angle": 0, + "content": "Ran El-Yaniv" + }, + { + "type": "text", + "bbox": [ + 0.607, + 0.208, + 0.747, + 0.22 + ], + "angle": 0, + "content": "Technion, Deci.AI" + }, + { + "type": "text", + "bbox": [ + 0.607, + 0.222, + 0.802, + 0.235 + ], + "angle": 0, + "content": "rani@cs.technion.ac.il" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.272, + 0.547, + 0.286 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.3, + 0.769, + 0.58 + ], + "angle": 0, + "content": "When deployed for risk-sensitive tasks, deep neural networks must include an uncertainty estimation mechanism. Here we examine the relationship between deep architectures and their respective training regimes, with their corresponding selective prediction and uncertainty estimation performance. We consider some of the most popular estimation performance metrics previously proposed including AUROC, ECE, AURC as well as coverage for selective accuracy constraint. We present a novel and comprehensive study of selective prediction and the uncertainty estimation performance of 523 existing pretrained deep ImageNet classifiers that are available in popular repositories. We identify numerous and previously unknown factors that affect uncertainty estimation and examine the relationships between the different metrics. We find that distillation-based training regimes consistently yield better uncertainty estimations than other training schemes such as vanilla training, pretraining on a larger dataset and adversarial training. Moreover, we find a subset of ViT models that outperform any other models in terms of uncertainty estimation performance. For example, we discovered an unprecedented \\(99\\%\\) top-1 selective accuracy on ImageNet at \\(47\\%\\) coverage (and \\(95\\%\\) top-1 accuracy at \\(80\\%\\)) for a ViT model, whereas a competing EfficientNet-V2-XL cannot obtain these accuracy constraints at any level of coverage. Our companion paper, also published in ICLR 2023 (Galil et al., 2023), examines the performance of these classifiers in a class-out-of-distribution setting." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.601, + 0.338, + 0.617 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.632, + 0.828, + 0.689 + ], + "angle": 0, + "content": "The excellent performance of deep neural networks (DNNs) has been demonstrated in a range of applications, including computer vision, natural language understanding and audio processing. To deploy these models successfully, it is imperative that they provide an uncertainty quantification of their predictions, either via some kind of selective prediction or a probabilistic confidence score." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.695, + 0.827, + 0.848 + ], + "angle": 0, + "content": "Notwithstanding, what metric should we use to evaluate the uncertainty estimation performance? There are many and diverse ways so the answer to this question is not obvious, and to demonstrate the difficulty, consider the case of two classification models for the stock market that predict whether a stock's value is about to increase, decrease, or remain neutral (three-class classification). Suppose that model A has a \\(95\\%\\) true accuracy, and generates a confidence score of 0.95 on every prediction (even on misclassified instances); model B has a \\(40\\%\\) true accuracy, but always gives a confidence score of 0.6 on correct predictions, and 0.4 on incorrect ones. Model B can be utilized easily to generate perfect investment decisions. Using selective prediction (El-Yaniv & Wiener, 2010; Geifman & El-Yaniv, 2017), Model B will simply reject all investments on stocks whenever the confidence score is 0.4. While model A offers many more investment opportunities, each of its predictions carries a \\(5\\%\\) risk of failure." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Among the various metrics proposed for evaluating the performance of uncertainty estimation are: Area Under the Receiver Operating Characteristic (AUROC or AUC), Area Under the Risk-Coverage curve (AURC) (Geifman et al., 2018), selective risk or coverage for a selective accuracy constraint (SAC), Negative Log-likelihood (NLL), Expected Calibration Error (ECE), which is often used for evaluating a model's calibration (see Section 2) and Brier score (Brier, 1950). All these metrics" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.1, + 0.7, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.316, + 0.828, + 0.4 + ], + "angle": 0, + "content": "Figure 1: A comparison of 523 models by their AUROC (\\( \\times 100 \\), higher is better) and -log(ECE) (higher is better) on ImageNet. Each marker's size is determined by the model's number of parameters. A full version graph is given in Figure 8. Distilled models are better than non-distilled ones. A subset of ViT models is superior to all other models for all aspects of uncertainty estimation (\"ViT\" in the legend, marked as a red triangle facing upwards); the performance of EfficientNet-V2 and GENet models is worse." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.427, + 0.825, + 0.552 + ], + "angle": 0, + "content": "are well known and are often used for comparing the uncertainty estimation performance of models (Moon et al., 2020; Nado et al., 2021; Maddox et al., 2019; Lakshminarayanan et al., 2017). Somewhat surprisingly, NLL, Brier, AURC, and ECE all fail to reveal the uncertainty superiority of Model B in our investment example (see Appendix A for the calculations). Both AUROC and SAC, on the other hand, reveal the advantage of Model B perfectly (see Appendix A for details). It is not hard to construct counterexamples where these two metrics fails and others (e.g., ECE) succeed. To sum up this brief discussion, we believe that the ultimate suitability of a performance metric should be determined by its context. If there is no specific application in mind, there is a strong incentive to examine a variety of metrics, as we choose to do in this study." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.559, + 0.825, + 0.684 + ], + "angle": 0, + "content": "This study evaluates the ability of 523 models from the Torchvision and Timm repositories (Paszke et al., 2019; Wightman, 2019) to estimate uncertainty1. Our study identifies several major factors that affect confidence rankings, calibration, and selective prediction, and lead to numerous empirical contributions important to selective predictions and uncertainty estimation. While no new algorithm or method is introduced in our paper, our study generates many interesting conclusions that will help practitioners achieve more powerful uncertainty estimation. Moreover, the research questions that are uncovered by our empirical study shed light on uncertainty estimation, which may stimulate the development of new methods and techniques for improving uncertainty estimation. Among the most interesting conclusions our study elicits are:" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.691, + 0.825, + 0.761 + ], + "angle": 0, + "content": "(1) Knowledge distillation training improves estimation. Training regimes incorporating any kind of knowledge distillation (KD) (Hinton et al., 2015) lead to DNNs with improved uncertainty estimation performance evaluated by any metric, more than by using any other training tricks (such as pretraining on a larger dataset, adversarial training, etc.). In Galil et al. (2023) we find similar performance boosts for class-out-of-distribution (C-OOD) detection." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.768, + 0.827, + 0.852 + ], + "angle": 0, + "content": "(2) Certain architectures are more inclined to perform better or worse at uncertainty estimation. Some architectures seem more inclined to perform well on all aspects of uncertainty estimation, e.g., a subset of vision transformers (ViTs) (Dosovitskiy et al., 2021) and the zero-shot language-vision CLIP model (Radford et al., 2021), while other architectures tend to perform worse, e.g., EfficientNet-V2 and GENet (Tan & Le, 2021; Lin et al., 2020). These results are visualized in Figure 1. In Galil et al. (2023) we find that ViTs and CLIPs are also powerful C-OOD detectors." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.858, + 0.825, + 0.901 + ], + "angle": 0, + "content": "(3) Several training regimes result in a subset of ViTs that outperforms all other architectures and training regimes. These regimes include the original one from the paper introducing ViTs (Dosovitskiy et al., 2021; Steiner et al., 2022; Chen et al., 2022; Ridnik et al., 2021). These ViTs" + }, + { + "type": "list", + "bbox": [ + 0.17, + 0.691, + 0.827, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.811, + 0.924 + ], + "angle": 0, + "content": "\\(^{1}\\)Our code is available at https://github.com/IdoGalil/benchmarking-uncertainty-estimation-performance" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "achieve the best uncertainty estimation performance on any aspect measured, both in absolute terms and per-model size (# parameters, see Figures 9 and 10 in Appendix B)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.225 + ], + "angle": 0, + "content": "(4) Temperature scaling improves selective and ranking performance. The simple post-training calibration method of temperature scaling (Guo et al., 2017), which is known to improve ECE, for the most part also improves ranking (AUROC) and selective prediction—meaning not only does it calibrate the probabilistic estimation for each individual instance, but it also improves the partial order of all instances induced by those improved estimations, pushing instances more likely to be correct to have a higher confidence score than instances less likely to be correct (see Section 3)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.23, + 0.828, + 0.343 + ], + "angle": 0, + "content": "(5) The correlations between AUROC, ECE, accuracy and the number of parameters are dependent on the architecture analyzed. Contrary to previous work by (Guo et al., 2017), we observe that while there is a strong correlation between accuracy/number of parameters and ECE or AUROC within each specific family of models of the same architecture, the correlation flips between a strong negative and a strong positive correlation depending on the type of architecture being observed. For example, as DLA (Yu et al., 2018) architectures increase in size and accuracy, their ECE deteriorates while their AUROC improves. The exact opposite, however, can be observed in XCiTs (Ali et al., 2021) as their ECE improves with size while their AUROC deteriorates (see Appendix L)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.348, + 0.828, + 0.392 + ], + "angle": 0, + "content": "(6) The best model in terms of AUROC or SAC is not always the best in terms of calibration, as illustrated in Figure 1, and the trade-off should be considered when choosing a model based on its application." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.14, + 0.828, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.411, + 0.78, + 0.427 + ], + "angle": 0, + "content": "2 HOW TO EVALUATE DEEP UNCERTAINTY ESTIMATION PERFORMANCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.442, + 0.827, + 0.572 + ], + "angle": 0, + "content": "Let \\(\\mathcal{X}\\) be the input space and \\(\\mathcal{Y}\\) be the label space. Let \\(P(\\mathcal{X},\\mathcal{Y})\\) be an unknown distribution over \\(\\mathcal{X}\\times \\mathcal{Y}\\). A model \\(f\\) is a prediction function \\(f:\\mathcal{X}\\to \\mathcal{Y}\\), and its predicted label for an image \\(x\\) is denoted by \\(\\hat{y}_f(x)\\). The model's true risk w.r.t. \\(P\\) is \\(R(f|P) = E_{P(\\mathcal{X},\\mathcal{Y})}[\\ell (f(x),y)]\\), where \\(\\ell :\\mathcal{Y}\\times \\mathcal{Y}\\rightarrow \\mathbb{R}^{+}\\) is a given loss function, for example, 0/1 loss for classification. Given a labeled set \\(S_{m} = \\{(x_{i},y_{i})\\}_{i = 1}^{m}\\subseteq (\\mathcal{X}\\times \\mathcal{Y})\\), sampled i.i.d. from \\(P(\\mathcal{X},\\mathcal{Y})\\), the empirical risk of model \\(f\\) is \\(\\hat{r} (f|S_m)\\triangleq \\frac{1}{m}\\sum_{i = 1}^{m}\\ell (f(x_i),y_i)\\). Following Geifman et al. (2018), for a given model \\(f\\) we define a confidence score function \\(\\kappa (x,\\hat{y} |f)\\), where \\(x\\in \\mathcal{X}\\), and \\(\\hat{y}\\in \\mathcal{V}\\) is the model's prediction for \\(x\\), as follows. The function \\(\\kappa\\) should quantify confidence in the prediction of \\(\\hat{y}\\) for the input \\(x\\), based on signals from model \\(f\\). This function should induce a partial order over instances in \\(\\mathcal{X}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.828, + 0.735 + ], + "angle": 0, + "content": "The most common and well-known \\(\\kappa\\) function for a classification model \\(f\\) (with softmax at its last layer) is its softmax response values: \\(\\kappa(x, \\hat{y} | f) \\triangleq f(x)_{\\hat{y}}\\) (Cordella et al., 1995; De Stefano et al., 2000). We chose to focus on studying uncertainty estimation performance using softmax response as the models' \\(\\kappa\\) function because of its extreme popularity, and its importance as a baseline due to its solid performance compared to other methods (Geifman & El-Yaniv, 2017; Geifman et al., 2018). While this is the main \\(\\kappa\\) we evaluate, we also test the popular uncertainty estimation technique of Monte Carlo dropout (MC dropout) (Gal & Ghahramani, 2016), which is motivated by Bayesian reasoning. Although these methods use the direct output from \\(f\\), \\(\\kappa\\) could be a different model unrelated to \\(f\\) and unable to affect \\(f\\)'s predictions. Note that to enable a probabilistic interpretation, \\(\\kappa\\) can only be calibrated if its values reside in \\([0, 1]\\) whereas for ranking and selective prediction any value in \\(\\mathbb{R}\\) can be used." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.828, + 0.926 + ], + "angle": 0, + "content": "A selective model \\( f \\) (El-Yaniv & Wiener, 2010; Chow, 1957) uses a selection function \\( g: \\mathcal{X} \\to \\{0,1\\} \\) to serve as a binary selector for \\( f \\), enabling it to abstain from giving predictions for certain inputs. \\( g \\) can be defined by a threshold \\( \\theta \\) on the values of a \\( \\kappa \\) function such that \\( g_{\\theta}(x|\\kappa, f) = \\mathbb{1}[\\kappa(x, \\hat{y}_f(x)|f) > \\theta] \\). The performance of a selective model is measured using coverage and risk, where coverage, defined as \\( \\phi(f, g) = E_P[g(x)] \\), is the probability mass of the non-rejected instances in \\( \\mathcal{X} \\). The selective risk of the selective model \\( (f, g) \\) is defined as \\( R(f, g) \\triangleq \\frac{E_P[\\ell(f(x), g(x))]}{\\phi(f, g)} \\). These quantities can be evaluated empirically over a finite labeled set \\( S_m \\), with the empirical coverage defined as \\( \\hat{\\phi}(f, g|S_m) = \\frac{1}{m} \\sum_{i=1}^{m} g(x_i) \\), and the empirical selective risk defined as \\( \\hat{r}(f, g|S_m) \\triangleq \\frac{1}{m} \\sum_{i=1}^{m} \\frac{\\ell(f(x_i), y_i) g(x_i)}{\\hat{\\phi}(f, g|S_m)} \\). Similarly, SAC is defined as the largest coverage available for a specific accuracy constraint. A way to visually inspect the behavior of a \\( \\kappa \\) function for selective prediction can be done using the risk-coverage (RC) curve (El-Yaniv & Wiener, 2010)—a curve showing the selective risk as a function of coverage, measured on some chosen test set; see Figure 2 for an" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.369, + 0.101, + 0.633, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.271, + 0.828, + 0.357 + ], + "angle": 0, + "content": "Figure 2: The RC curve made by a ResNet18 trained on CIFAR-10, measured on the test set. The risk is calculated using a 0/1 loss (meaning the model has about \\(95\\%\\) accuracy for 1.0 coverage); the \\(\\kappa\\) used was softmax-response. The value of the risk at each point of coverage corresponds to the selective risk of the model when rejecting inputs that are not covered at that coverage slice. e.g., the selective risk for coverage 0.8 is about \\(0.5\\%\\), meaning that an end user setting a matching threshold would enjoy a model accuracy of \\(99.5\\%\\) on the \\(80\\%\\) of images the model would not reject." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.4, + 0.825, + 0.429 + ], + "angle": 0, + "content": "example. In general, though, two RC curves are not necessarily comparable if one does not fully dominate the other (Figure 3 shows an example of lack of dominance)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.435, + 0.828, + 0.643 + ], + "angle": 0, + "content": "The AURC and E-AURC metrics were defined by (Geifman et al., 2018) for quantifying the selective quality of \\(\\kappa\\) functions via a single number, with AURC being defined as the area under the RC curve. AURC, however, is very sensitive to the model's accuracy, and in an attempt to mitigate this, E-AURC was suggested. The latter also suffers from sensitivity to accuracy, as we demonstrate in Appendix C. The advantage of scalar metrics such as the above is that they summarize the model's overall uncertainty estimation behavior by reducing it to a single scalar. When not carefully chosen, however, these reductions could result in a loss of vital information about the problem (recall the investment example from Section 1, which is also discussed in Appendix A: reducing an RC curve to an AURC does not show that Model B has an optimal 0 risk if the coverage is smaller than 0.4). Thus, the choice of the \"correct\" single scalar performance metric unfortunately must be task-specific. When comparing the uncertainty estimation performance of deep architectures that exhibit different accuracies, we find that AUROC and SAC can effectively \"normalize\" accuracy differences that plague the usefulness of other metrics (see Figure 3). This normalization is essential in our study where we compare uncertainty performance of hundreds of models that can greatly differ in their accuracies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.65, + 0.825, + 0.721 + ], + "angle": 0, + "content": "For risk-sensitive deployment, let us consider the two models in Figure 3; EfficientNet-V2-XL (Tan & Le, 2021) and ViT-B/32-SAM (Chen et al., 2022). While the former model has better overall accuracy and AURC (metrics that could lead us to believe the model is best for our needs), it cannot guarantee a Top-1 ImageNet selective accuracy above \\(95\\%\\) for any coverage. ViT-B/32-SAM, on the other hand, can provide accuracies above \\(95\\%\\) for all coverages below \\(50\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.825, + 0.821 + ], + "angle": 0, + "content": "In applications where risk (or coverage) constraints are dictated (Geifman & El-Yaniv, 2017), the most straightforward and natural metric is SAC (or selective risk), which directly measures the coverage (resp., risk) given at the required level of risk (resp., coverage) constraint. We demonstrate this in Appendix I, evaluating which models give the most coverage for an ambitious SAC of \\(99\\%\\). If instead a specific range of coverages is specified, we could measure the area under the RC curve for those coverages: \\(\\mathrm{AURC}_{\\mathcal{C}}(\\kappa, f|S_m) = \\frac{1}{|\\mathcal{C}|}\\sum_{c\\in \\mathcal{C}}\\hat{r} (f,g_c|S_m)\\), with \\(\\mathcal{C}\\) being those required coverages." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.829, + 0.926 + ], + "angle": 0, + "content": "Often, these requirements are not known or can change as a result of changing circumstances or individual needs. Also, using metrics sensitive to accuracy such as AURC makes designing architectures and methods to improve \\(\\kappa\\) very hard, since an improvement in these metrics could be attributed to either an increase in overall accuracy (if such occurred) or to a real improvement in the model's ranking performance. Lastly, some tasks might not allow the model to abstain from making predictions at all, but instead require interpretable and well-calibrated probabilities of correctness, which could be measured using ECE." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.102, + 0.725, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.357, + 0.828, + 0.403 + ], + "angle": 0, + "content": "Figure 3: A comparison of RC curves made by the best (ViT-L/16-384) and worst (EfficientNet-V2-XL) models we evaluated in terms of AUROC. Comparing ViT-B/32-SAM to EfficientNet-V2 exemplifies the fact that neither accuracy nor AURC reflect selective performance well enough." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.429, + 0.509, + 0.442 + ], + "angle": 0, + "content": "2.1 MEASURING RANKING AND CALIBRATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.456, + 0.825, + 0.526 + ], + "angle": 0, + "content": "A \\(\\kappa\\) function is not necessarily able to change the model's predictions. Therefore, it can improve the selective risk by ranking correct and incorrect predictions better, inducing a more accurate partial order over instances in \\(\\mathcal{X}\\). Thus, for every two random samples \\((x_{1},y_{1}),(x_{2},y_{2})\\sim P(\\mathcal{X},\\mathcal{Y})\\) and given that \\(\\ell (f(x_1),y_1) > \\ell (f(x_2),y_2)\\), the ranking performance of \\(\\kappa\\) is defined as the probability that \\(\\kappa\\) ranks \\(x_{2}\\) higher than \\(x_{1}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.535, + 0.825, + 0.553 + ], + "angle": 0, + "content": "\\[\n\\Pr \\left[ \\kappa \\left(x _ {1}, \\hat {y} \\mid f\\right) < \\kappa \\left(x _ {2}, \\hat {y} \\mid f\\right) \\mid \\ell \\left(f \\left(x _ {1}\\right), y _ {1}\\right) > \\ell \\left(f \\left(x _ {2}\\right), y _ {2}\\right) \\right] \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.559, + 0.827, + 0.771 + ], + "angle": 0, + "content": "We discuss this definition in greater detail in Appendix D. The AUROC metric is often used in the field of machine learning. When the \\(0/1\\) loss is in play, it is known that AUROC in fact equals the probability in Equation (1) (Fawcett, 2006) and thus is a proper metric to measure ranking in classification (AKA discrimination). AUROC is furthermore equivalent to Goodman and Kruskal's \\(\\gamma\\)-correlation (Goodman & Kruskal, 1954), which for decades has been extensively used to measure ranking (known as \"resolution\") in the field of metacognition (Nelson, 1984). The precise relationship between \\(\\gamma\\)-correlation and AUROC is \\(\\gamma = 2 \\cdot \\text{AUROC} - 1\\) (Higham & Higham, 2018). We note also that both the \\(\\gamma\\)-correlation and AUROC are nearly identical or closely related to various other correlations and metrics; \\(\\gamma\\)-correlation (AUROC) becomes identical to Kendall's \\(\\tau\\) (up to a linear transformation) in the absence of tied values. Both metrics are also closely related to rank-biserial correlation, the Gini coefficient (not to be confused with the measure from economics) and the Mann-Whitney \\(U\\) test, hinting at their importance and usefulness in a variety of fields and settings. In Appendix E, we briefly compare the ranking performance of deep neural networks and humans in metacognitive research, and in Appendix F we address criticism of using AUROC to measure ranking." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.775, + 0.825, + 0.834 + ], + "angle": 0, + "content": "The most widely used metric for calibration is ECE (Naeini et al., 2015). For a finite test set of size \\(N\\), ECE is calculated by grouping all instances into \\(m\\) interval bins (such that \\(m \\ll N\\)), each of size \\(\\frac{1}{m}\\) (the confidence interval of bin \\(B_j\\) is \\(\\left(\\frac{j-1}{m}, \\frac{j}{m}\\right]\\)). With \\(\\mathrm{acc}(B_j)\\) being the mean accuracy in bin \\(B_j\\) and \\(\\mathrm{conf}(B_j)\\) being its mean confidence, ECE is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.301, + 0.843, + 0.694, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} E C E = \\sum_ {j = 1} ^ {m} \\frac {| B _ {j} |}{N} \\sum_ {i \\in B _ {j}} \\left| \\frac {\\mathbf {1} [ \\hat {y} _ {f} (x _ {i}) = y _ {i} ]}{| B _ {j} |} - \\frac {\\kappa (x , \\hat {y} _ {f} (x _ {i}) | f)}{| B _ {j} |} \\right| \\\\ = \\sum_ {j = 1} ^ {m} \\frac {\\left| B _ {j} \\right|}{N} \\left| \\operatorname {a c c} \\left(B _ {j}\\right) - \\operatorname {c o n f} \\left(B _ {j}\\right) \\right| \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.189 + ], + "angle": 0, + "content": "Since ECE is widely accepted we use it here to evaluate calibration, and follow (Guo et al., 2017) in setting the number of bins to \\( m = 15 \\). Many alternatives to ECE exist, allowing an adaptive binning scheme, evaluating the calibration on the non-chosen labels as well, and other various methods (Nixon et al., 2019; Vaicenavicius et al., 2019; Zhao et al., 2020). Relevant to our objective is that by using binning, this metric is not affected by the overall accuracy as is the Brier score (mentioned in Section 1), for example." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.208, + 0.427, + 0.223 + ], + "angle": 0, + "content": "3 PERFORMANCE ANALYSIS" + }, + { + "type": "image", + "bbox": [ + 0.256, + 0.241, + 0.744, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.49, + 0.401, + 0.509, + 0.413 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.268, + 0.414, + 0.73, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.489, + 0.562, + 0.509, + 0.574 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.586, + 0.825, + 0.658 + ], + "angle": 0, + "content": "Figure 4: A comparison of different methods and their improvement in terms of (a) AUROC and (b) ECE, relative to the same model's performance without employing the method. Markers above the x-axis represent models that benefited from the evaluated method, and vice versa. The numbers in the legend to the right of each method indicate the number of pairs compared. Temperature scaling can sometimes harm ECE, even though its purpose is to improve it." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.673, + 0.825, + 0.717 + ], + "angle": 0, + "content": "In this section we study the performance of 523 different models (available in timm 0.4.12 and torchvision 0.10). Note that all figures from our analysis are available as interactive plotly plots in the supplementary material, which provides information about every data point." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.722, + 0.827, + 0.89 + ], + "angle": 0, + "content": "1) Among the training regimes evaluated, knowledge distillation improves performance the most. We evaluated several training regimes: (a) Training that involves KD in any form, including Touvron et al. (2021b), knapsack pruning with distillation (in which the teacher is the original unpruned model) (Aflalo et al., 2020) and a pretraining technique that employs distillation (Ridnik et al., 2021); (b) adversarial training (Xie et al., 2020a; Tramère et al., 2018); (c) pretraining on ImageNet21k (\"pure\", with no additions) (Tan & Le, 2021; Touvron et al., 2021a; 2022); and (d) various forms of weakly or semi-supervised learning (Mahajan et al., 2018; Yalniz et al., 2019; Xie et al., 2020b). To make a fair comparison, we only compare pairs of models such that both models have identical architectures and training regimes, with the exception of the method itself being evaluated (e.g., training with or without knowledge distillation). More information about each data point of comparison is available in the supplementary material. Note that the samples are of various sizes due to the different number of potential models available for each." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Of the methods mentioned above, training methods incorporating distillation improve AUROC and ECE the most. For example, looking at Figure 4a, it is evident that distillation (purple box) almost" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.1, + 0.728, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.304, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Figure 5: Comparing teacher models (yellow markers) to their KD students (represented by markers with thick borders and a dot). The performance of each model is measured in AUROC (higher is better) and -log(ECE) (higher is better)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.376, + 0.827, + 0.503 + ], + "angle": 0, + "content": "always improves AUROC, and moreover, its median improvement is the best of all techniques evaluated. The same observation can be made with regards to improving ECE; see Figure 4b. Distillation seems to greatly improve both metrics even when the teacher itself is much worse at both metrics. Figure 5 nicely shows this by comparing the teacher architecture and the students in each case. Additionally, in a pruning scenario that included distillation in which the original model was also the teacher (Aflalo et al., 2020), the pruned models outperformed their teachers. The fact that KD improves the model over its original form is surprising, and suggests that the distillation process itself helps uncertainty estimation. In Galil et al. (2023) we find that KD also improves C-OOD detection performance, measured by AUROC. We discuss these effects in greater detail in Appendix G." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.508, + 0.828, + 0.661 + ], + "angle": 0, + "content": "2) Temperature scaling greatly benefits AUROC and selective prediction. Evaluations of the simple post-training calibration method of temperature scaling (TS) (Guo et al., 2017), which is widely known to improve ECE without changing the model's accuracy, also revealed several interesting facts: (a) TS consistently and greatly improves AUROC and selective performance (see Figure 4a)—meaning not only does TS calibrate the probabilistic estimation for each individual instance, but it also improves the partial order of all instances induced by those improved estimations. While TS is well known and used for calibration, to the best of our knowledge, its benefits for selective prediction were previously unknown. (b) While TS is usually beneficial, it could harm some models (see Figures 4a and 4b). While it is surprising that TS—a calibration method—would harm ECE, this phenomenon is explained by the fact that TS optimizes NLL and not ECE (to avoid trivial solutions), and the two may sometimes misalign. (c) Models that benefit from TS in terms" + }, + { + "type": "image", + "bbox": [ + 0.32, + 0.676, + 0.681, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.835, + 0.825, + 0.879 + ], + "angle": 0, + "content": "Figure 6: Out of 523 models evaluated, models that were assigned a temperature higher than 1 by the calibration process tended to degrade in AUROC performance rather than improve. Markers above the x-axis represent models that benefited from TS, and vice versa." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "of AUROC tend to have been assigned a temperature smaller than 1 by the calibration process (see Figure 6). This, however, does not hold true for ECE (see Figure 14 in Appendix H). This example" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.175 + ], + "angle": 0, + "content": "also emphasizes the fact that improvements in terms of AUROC do not necessarily translate into improvements in ECE, and vice versa. (d) While all models usually improve with TS, the overall ranking of uncertainty performance between families tends to stay similar, with the worse (in terms of ECE and AUROC) models closing most of the gap between them and the mediocre ones (see Figure 13 in Appendix H)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.181, + 0.827, + 0.307 + ], + "angle": 0, + "content": "3) A subset of ViTs outperforms all other architectures in selective prediction, ranking and calibration, both in absolute terms and per-model size. Several training regimes (including the original regime from the paper introducing ViT) Dosovitskiy et al. (2021); Steiner et al. (2022); Chen et al. (2022); Ridnik et al. (2021) result in ViTs that outperform all other architectures and training regimes in terms of AUROC and ECE (see Figure 1; Figure 13 in Appendix H shows this is true even after using TS) as well as for the SAC of \\(99\\%\\) we explored (see Figure 7 and Appendix I). These ViTs also outperform all other models in terms of C-OOD detection performance (Galil et al., 2023). Moreover, for any size, ViT models outperform their competition in all of these metrics (see Figures 9 and 10 in Appendix B and Figure 15 in Appendix I)." + }, + { + "type": "image", + "bbox": [ + 0.292, + 0.317, + 0.709, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.529, + 0.825, + 0.6 + ], + "angle": 0, + "content": "Figure 7: Comparison of models by their overall accuracy and the coverage they are able to provide given a selective accuracy constraint of Top-1 \\(99\\%\\) on ImageNet. A higher coverage is better. Only ViT models are able to provide coverage beyond \\(30\\%\\) for this constraint. They provide more coverage than any other model compared to their accuracy or size. \"Various\" refers to all other models (out of the 523) that were not mentioned by name." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.621, + 0.827, + 0.83 + ], + "angle": 0, + "content": "Further research into other training regimes, however, reveals that not all training regimes result in superb performance (Touvron et al., 2021b; 2022; Singh et al., 2022; Paszke et al., 2019) (these ViTs are dubbed \"ViT* in the figures), even when a similar amount of data is introduced into the training and strong augmentations are used. In fact, the models trained by Chen et al. (2022) were not pretrained at all and yet reach superb ranking. Even the largest model introduced by Tran et al. (2022), which is a large modified ViT that was pretrained on JFT-4B (a dataset containing 4 billion images) with the aim of excelling in uncertainty estimation (and other areas), is outperformed by the best ViT we evaluated: Plex L achieves an AUROC of 87.7 (while its smaller versions, Plex M and Plex S, achieve an AUROC of 87.4 and 86.7, respectively), compared to 88.5 achieved by ViT-L/16-384 that has less parameters and was pretrained on ImageNet-21k. In total, 18 ViTs trained on ImageNet-21k outperform² Plex L, among which are two variations of small ViTs (each with 36 or 22 million parameters). In Appendix J we analyze the different hyperparameters and augmentations used for training the ViT models evaluated in this paper. Unfortunately, no clear conclusions emerge to explain the advantage of the successful training regimes. There is, however, ample evidence to show that advanced augmentations are unlikely to be part of such an explanation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.836, + 0.825, + 0.865 + ], + "angle": 0, + "content": "The above facts suggest that the excellent performance exhibited by some ViTs cannot be attributed to the amount of data or to the augmentations used during training. These observations warrant" + }, + { + "type": "page_footnote", + "bbox": [ + 0.17, + 0.872, + 0.825, + 0.926 + ], + "angle": 0, + "content": "2The authors had not released clear results for Plex ECE performance on ImageNet, making a comparison of calibration difficult. The authors mentioned that the average ECE of Plex L in CIFAR-10, CIFAR-100 and ImageNet is slightly below 0.01. Our evaluations revealed six ViTs that achieved the same results, with the most calibrated model being ViT-T/16 with an ECE of 0.0054 on ImageNet." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "additional research with the hope of either training more robust ViTs or transferring the unidentified ingredient of the successful subset of ViTs into other models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.334 + ], + "angle": 0, + "content": "4) Correlations between AUROC, ECE, accuracy and the model's size could either be positive or negative, and depend on the family of architectures evaluated. This observation contradicts previous smaller scale studies on calibration. While AUROC and ECE are (negatively) correlated (they have a Spearman correlation of -0.44, meaning that generally, as AUROC improves, so does ECE), their agreement on the best performing model depends greatly on the architectural family in question. For example, the Spearman correlation between the two metrics evaluated on 28 undistilled XCiTs is 0.76 (meaning ECE deteriorates as AUROC improves), while for the 33 ResNets (He et al., 2016) evaluated, the correlation is -0.74. Another general observation is that contrary to previous work by (Guo et al., 2017) concerning ECE, the correlations between ECE and the accuracy or the number of model parameters are nearly zero, although each family tends to have a strong correlation, either negative or positive. We include a family-based comparison in Appendix L for correlations between AUROC/ECE and accuracy, number of parameters and input size. These results suggest that while some architectures might utilize extra resources to achieve improved uncertainty estimation capabilities, other architectures do not and are even harmed in this respect." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.828, + 0.495 + ], + "angle": 0, + "content": "5) The zero-shot language-vision CLIP model is well-calibrated, with its best instance outperforming \\(96\\%\\) of all other models. CLIP (Radford et al., 2021) enables zero-shot classification and demonstrates impressive performance. We find it is also inclined to be well-calibrated. See Appendix K for details about how we use CLIP. The most calibrated CLIP is based on ViT-B/32 with a linear-probe added to it, and obtains an ECE of \\(1.3\\%\\), which outperforms \\(96\\%\\) of models evaluated. Moreover, for their size category, CLIP models tend to outperform their competition in calibration, with the exception of ViTs (see Figure 10 in Appendix B). While this trend is clear for zero-shot CLIPs, we note that some models' calibration performance deteriorates with the addition of a linear-probe. Further research is required to understand the ingredients of multimodal models' contribution to calibration, and to find ways to utilize them to get better calibrated models. For example, could a multimodal pretraining regime be used to get better calibrated models?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.501, + 0.828, + 0.613 + ], + "angle": 0, + "content": "6) MC dropout does not improve selective performance, in accordance with previous works. We evaluate the performance of MC dropout using predictive entropy as its confidence score and 30 dropout-enabled forward passes. We do not measure its effects on ECE since entropy scores do not reside in [0, 1]. Using MC dropout causes a consistent drop in both AUROC and selective performance compared with using the same models with softmax as the \\(\\kappa\\) (see Appendix M and Figure 4a). MC dropout's underperformance was also previously observed in (Geifman & El-Yaniv, 2017). We note, however, that evaluations we have conducted in Galil et al. (2023) show that MC dropout performs well when dealing with C-OOD data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.635, + 0.414, + 0.65 + ], + "angle": 0, + "content": "4 CONCLUDING REMARKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.827, + 0.766 + ], + "angle": 0, + "content": "We presented a comprehensive study of the effectiveness of numerous DNN architectures (families) in providing reliable uncertainty estimation, including the impact of various techniques on improving such capabilities. Our study led to many new insights and perhaps the most important ones are: (1) architectures trained with distillation almost always improve their uncertainty estimation performance, (2) temperature scaling is very useful not only for calibration but also for ranking and selective prediction, and (3) no DNN (evaluated in this study) demonstrated an uncertainty estimation performance comparable—in any metric tested—to a subset of ViT models (see Section 3)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Our work leaves open many interesting avenues for future research and we would like to mention a few. Perhaps the most interesting question is why distillation is so beneficial in boosting uncertainty estimation. Next, is there an architectural secret in vision transformers (ViT) that enables their uncertainty estimation supremacy under certain training regimes? This issue is especially puzzling given the fact that comparable performance is not observed in many other supposedly similar transformer-based models that we tested. If the secret is not in the architecture, what is the mysterious ingredient of the subset of training regimes that produces such superb results, and how can it be used to train other models? Finally, can we create specialized training regimes (e.g., Geifman & El-Yaniv (2019)), specialized augmentations, special pretraining regimes (such as CLIP's multimodal training regime) or even specialized neural architecture search (NAS) strategies that can promote superior uncertainty estimation performance?" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.357, + 0.118 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.133, + 0.762, + 0.149 + ], + "angle": 0, + "content": "This research was partially supported by the Israel Science Foundation, grant No. 710/18." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.154, + 0.826, + 0.198 + ], + "angle": 0, + "content": "We thank Prof. Rakefet Ackerman for her help with understanding how uncertainty estimation performance is evaluated for humans in the field of metacognition, and for her useful comments for Appendix E." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.133, + 0.826, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.217, + 0.287, + 0.23 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.239, + 0.826, + 0.296 + ], + "angle": 0, + "content": "Rakefet Ackerman, Avi Parush, Fareda Nassar, and Avraham Shtub. Metacognition and system usability: Incorporating metacognitive research paradigm into usability testing. Computers in Human Behavior, 54:101-113, January 2016. doi: 10.1016/j.chb.2015.07.041. URL https://doi.org/10.1016/j.chb.2015.07.041." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.303, + 0.826, + 0.358 + ], + "angle": 0, + "content": "Rakefet Ackerman, Avigdor Gal, Tomer Sagi, and Roee Shraga. A cognitive model of human bias in matching. In PRICAI 2019: Trends in Artificial Intelligence, pp. 632-646. Springer International Publishing, 2019. doi: 10.1007/978-3-030-29908-8_50. URL https://doi.org/10.1007/978-3-030-29908-8_50." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.366, + 0.825, + 0.395 + ], + "angle": 0, + "content": "Yonathan Aflalo, Asaf Noy, Ming Lin, Itamar Friedman, and Lihi Zelnik-Manor. Knapsack pruning with inner distillation. CoRR, abs/2002.08258, 2020. URL https://arxiv.org/abs/2002.08258." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.402, + 0.827, + 0.5 + ], + "angle": 0, + "content": "Alaaeldin Ali, Hugo Touvron, Mathilde Caron, Piotr Bojanowski, Matthijs Douze, Armand Joulin, Ivan Laptev, Natalia Neverova, Gabriel Synnaeve, Jakob Verbeek, and Hervé Jégou. Xcit: Cross-covariance image transformers. In Marc' Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan (eds.), Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pp. 20014-20027, 2021. URL https://proceedings.neurips.cc/paper/2021/bitize/a655fbe4b8d7439994aa37ddad80de56-AAbstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.507, + 0.827, + 0.562 + ], + "angle": 0, + "content": "Alexandra Basile, Maggie E. Toplak, and Brendan F. Andrade. Using metacognitive methods to examine emotion recognition in children with ADHD. Journal of Attention Disorders, 25(2): 245-257, November 2018. doi: 10.1177/1087054718808602. URL https://doi.org/10.1177/1087054718808602." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.571, + 0.825, + 0.599 + ], + "angle": 0, + "content": "Glenn W. Brier. Verification of Forecasts Expressed in Terms of Probability. Monthly Weather Review, 78(1):1, January 1950. doi: 10.1175/1520-0493(1950)0780001:VOFEIT2.0.CO;2." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.607, + 0.827, + 0.663 + ], + "angle": 0, + "content": "Xiangning Chen, Cho-Jui Hsieh, and Boqing Gong. When vision transformers outperform resnets without pre-training or strong data augmentations. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net, 2022. URL https://openreview.net/forum?id=LtKcMgGOeLt." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.67, + 0.825, + 0.698 + ], + "angle": 0, + "content": "C. K. Chow. An optimum character recognition system using decision functions. IRE Transactions on Electronic Computers, EC-6(4):247-254, 1957. doi: 10.1109/TEC.1957.5222035." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.705, + 0.827, + 0.747 + ], + "angle": 0, + "content": "L. P. Cordella, C. De Stefano, F. Tortorella, and M. Vento. A method for improving classification reliability of multilayer perceptrons. IEEE Transactions on Neural Networks, 6(5):1140-1147, 1995. doi: 10.1109/72.410358." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.755, + 0.827, + 0.797 + ], + "angle": 0, + "content": "C. De Stefano, C. Sansone, and M. Vento. To reject or not to reject: that is the question-an answer in case of neural classifiers. IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews), 30(1):84-94, 2000. doi: 10.1109/5326.827457." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.805, + 0.827, + 0.847 + ], + "angle": 0, + "content": "Yukun Ding, Jinglan Liu, Jinjun Xiong, and Yiyu Shi. Evaluation of neural network uncertainty estimation with application to resource-constrained platforms. CoRR, abs/1903.02050, 2019. URL http://arxiv.org/abs/1903.02050." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.855, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=YicbFdNTTy." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.239, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.829, + 0.134 + ], + "angle": 0, + "content": "Ran El-Yaniv and Yair Wiener. On the foundations of noise-free selective classification. Journal of Machine Learning Research, 11(5), 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.139, + 0.83, + 0.185 + ], + "angle": 0, + "content": "Tom Fawcett. An introduction to roc analysis. Pattern Recognition Letters, 27(8):861-874, 2006. ISSN 0167-8655. doi: https://doi.org/10.1016/j.patrec.2005.10.010. URL https://www.sciencedirect.com/science/article/pii/S016786550500303X. ROC Analysis in Pattern Recognition." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.19, + 0.828, + 0.223 + ], + "angle": 0, + "content": "K. Fiedler, Rakefet Ackerman, and Chiara Scarampi. ! metacognition : Monitoring and controlling one ’ s own knowledge , reasoning and decisions. 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.226, + 0.829, + 0.285 + ], + "angle": 0, + "content": "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=6Tm1mposlrM." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.291, + 0.827, + 0.324 + ], + "angle": 0, + "content": "Jonathan Frankle and Michael Carbin. The lottery ticket hypothesis: Training pruned neural networks. CoRR, abs/1803.03635, 2018. URL http://arxiv.org/abs/1803.03635." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.328, + 0.829, + 0.401 + ], + "angle": 0, + "content": "Yarin Gal and Zoubin Ghahramani. Dropout as a bayesian approximation: Representing model uncertainty in deep learning. In Maria-Florina Balcan and Kilian Q. Weinberger (eds.), Proceedings of the 33nd International Conference on Machine Learning, ICML 2016, New York City, NY, USA, June 19-24, 2016, volume 48 of JMLR Workshop and Conference Proceedings, pp. 1050-1059. JMLR.org, 2016. URL http://proceedings.mlr.press/v48/gal16.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.406, + 0.829, + 0.452 + ], + "angle": 0, + "content": "Ido Galil, Mohammed Dabbah, and Ran El-Yaniv. A framework for benchmarking class-out-of-distribution detection and its application to imagenet. In International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=Iuubb9W6Jtk." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.457, + 0.829, + 0.543 + ], + "angle": 0, + "content": "Yonatan Geifman and Ran El-Yaniv. Selective classification for deep neural networks. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 4878-4887, 2017. URL https://proceedings.neurips.cc/paper/2017/hash/4a8423d5e91fda00bb7e46540e2b0cf1-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.549, + 0.826, + 0.581 + ], + "angle": 0, + "content": "Yonatan Geifman and Ran El-Yaniv. Selectivenet: A deep neural network with an integrated reject option. CoRR, abs/1901.09192, 2019. URL http://arxiv.org/abs/1901.09192." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.586, + 0.825, + 0.617 + ], + "angle": 0, + "content": "Yonatan Geifman, Guy Uziel, and Ran El-Yaniv. Bias-reduced uncertainty estimation for deep neural classifiers. In International Conference on Learning Representations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.623, + 0.829, + 0.668 + ], + "angle": 0, + "content": "Leo A. Goodman and William H. Kruskal. Measures of association for cross classifications. Journal of the American Statistical Association, 49(268):732-764, December 1954. doi: 10.1080/01621459.1954.10501231. URL https://doi.org/10.1080/01621459.1954.10501231." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.674, + 0.829, + 0.732 + ], + "angle": 0, + "content": "Thomas D. Griffin, Jennifer Wiley, and Keith W. Thiede. The effects of comprehension-test expectancies on metacomprehension accuracy. Journal of Experimental Psychology: Learning, Memory, and Cognition, 45(6):1066-1092, June 2019. doi: 10.1037/xlm0000634. URL https://doi.org/10.1037/xlm0000634." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.738, + 0.829, + 0.81 + ], + "angle": 0, + "content": "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 1321-1330. PMLR, 2017. URL http://proceedings.mlr.press/v70/guo17a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.817, + 0.829, + 0.875 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 770-778. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.90. URL https://doi.org/10.1109/CVPR.2016.90." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.881, + 0.829, + 0.926 + ], + "angle": 0, + "content": "Philip A. Higham and D. Paul Higham. New improved gamma: Enhancing the accuracy of goodman-kruskal's gamma using ROC curves. Behavior Research Methods, 51(1):108-125, September 2018. doi: 10.3758/s13428-018-1125-5. URL https://doi.org/10.3758/s13428-018-1125-5." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.83, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.825, + 0.12 + ], + "angle": 0, + "content": "Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.13, + 0.826, + 0.175 + ], + "angle": 0, + "content": "Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, and Hartwig Adam. Searching for mobilenetv3. CoRR, abs/1905.02244, 2019. URL http://arxiv.org/abs/1905.02244." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.185, + 0.829, + 0.271 + ], + "angle": 0, + "content": "Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6402-6413, 2017. URL https://proceedings.neurips.cc/paper/2017混沌/9ef2ed4b7fd2c810847ffa5fa85bce38-AAbstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.281, + 0.826, + 0.311 + ], + "angle": 0, + "content": "Ming Lin, Hesen Chen, Xiuyu Sun, Qi Qian, Hao Li, and Rong Jin. Neural architecture design forgpu-efficient networks, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.322, + 0.827, + 0.351 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.362, + 0.829, + 0.46 + ], + "angle": 0, + "content": "Wesley J. Maddox, Pavel Izmailov, Timur Garipov, Dmitry P. Vetrov, and Andrew Gordon Wilson. A simple baseline for bayesian uncertainty in deep learning. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 13132-13143, 2019. URL https://proceedings.neurips.cc/paper/2019/bitnet/118921efba23fc329e6560b27861f0c2-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.471, + 0.829, + 0.57 + ], + "angle": 0, + "content": "Dhruv Mahajan, Ross B. Girshick, Vignesh Ramanathan, Kaiming He, Manohar Paluri, Yixuan Li, Ashwin Bharambe, and Laurens van der Maaten. Exploring the limits of weakly supervised pretraining. In Vittorio Ferrari, Martial Hebert, Cristian Sminchisescu, and Yair Weiss (eds.), Computer Vision - ECCV 2018 - 15th European Conference, Munich, Germany, September 8-14, 2018, Proceedings, Part II, volume 11206 of Lecture Notes in Computer Science, pp. 185-201. Springer, 2018. doi: 10.1007/978-3-030-01216-8\\_.12. URL https://doi.org/10.1007/978-3-030-01216-8_12." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.581, + 0.826, + 0.639 + ], + "angle": 0, + "content": "Jooyoung Moon, Jihyo Kim, Younghak Shin, and Sangheum Hwang. Confidence-aware learning for deep neural networks. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 7034-7044. PMLR, 2020. URL http://proceedings.mlr.press/v119/moon20a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.649, + 0.829, + 0.735 + ], + "angle": 0, + "content": "Zachary Nado, Neil Band, Mark Collier, Josip Djolonga, Michael W. Dusenberry, Sebastian Farquhar, Angelos Filos, Marton Havasi, Rodolphe Jenatton, Ghassen Jerfel, Jeremiah Liu, Zelda Mariet, Jeremy Nixon, Shreyas Padhy, Jie Ren, Tim G. J. Rudner, Yeming Wen, Florian Wenzel, Kevin Murphy, D. Sculley, Balaji Lakshminarayanan, Jasper Snoek, Yarin Gal, and Dustin Tran. Uncertainty baselines: Benchmarks for uncertainty & robustness in deep learning. CoRR, abs/2106.04015, 2021. URL https://arxiv.org/abs/2106.04015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.745, + 0.826, + 0.789 + ], + "angle": 0, + "content": "Mahdi Pakdaman Naeini, Gregory F. Cooper, and Milos Hauskrecht. Obtaining well calibrated probabilities using bayesian binning. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, AAAI'15, pp. 2901-2907. AAAI Press, 2015. ISBN 0262511290." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.8, + 0.829, + 0.871 + ], + "angle": 0, + "content": "Niv Nayman, Yonathan Aflalo, Asaf Noy, and Lihi Zelnik. *Hard constrained differentiable neural architecture search*. In Marina Meila and Tong Zhang (eds.), *Proceedings of the 38th International Conference on Machine Learning*, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of *Proceedings of Machine Learning Research*, pp. 7979-7990. PMLR, 2021. URL http://proceedings.mlr.press/v139/nayman21a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Thomas O. Nelson. A comparison of current measures of the accuracy of feeling-of-knowing predictions. *Psychological Bulletin*, 95(1):109-133, 1984. doi: 10.1037/0033-2909.95.1.109. URL https://doi.org/10.1037/0033-2909.95.1.109." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.189 + ], + "angle": 0, + "content": "Jeremy Nixon, Michael W. Dusenberry, Linchuan Zhang, Ghassen Jerfel, and Dustin Tran. Measuring calibration in deep learning. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, CVPR Workshops 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 38-41. Computer Vision Foundation / IEEE, 2019. URL http://openaccess.thecvf.com/content_CVPRW_2019/html/Uncertainty_and_Robustness_in_Dep_Visual_Learning/Nixon_Measuring_Calibration_in_Dep_Learning_CVPRW_2019_paper.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.2, + 0.828, + 0.313 + ], + "angle": 0, + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems 32, pp. 8024-8035. Curran Associates, Inc., 2019. URL http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.323, + 0.828, + 0.407 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8748-8763. PMLR, 2021. URL http://proceedings.mlr.press/v139/radford21a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.418, + 0.828, + 0.49 + ], + "angle": 0, + "content": "Tal Ridnik, Emanuel Ben Baruch, Asaf Noy, and Lihi Zelnik. Imagenet-21k pretraining for the masses. In Joaquin Vanschoren and Sai-Kit Yeung (eds.), Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual, 2021. URL https://datasets-benchmarks-proceedings.neurips.cc/paper/2021混沌/98f13708210194c475687be6106a3b84-AAbstract-round1.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.5, + 0.828, + 0.585 + ], + "angle": 0, + "content": "Mark Sandler, Andrew G. Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh Chen. *Mobilenetv2: Inverted residuals and linear bottlenecks*. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pp. 4510-4520. Computer Vision Foundation / IEEE Computer Society, 2018. doi: 10.1109/CVPR.2018.00474. URL http://openaccess.thecvf.com/content_cvpr_2018/html/Sandler_MobileNetV2_Inverted_Residuals_CVPR_2018_paper.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.596, + 0.828, + 0.653 + ], + "angle": 0, + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. URL http://arxiv.org/abs/1409.1556." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.664, + 0.828, + 0.721 + ], + "angle": 0, + "content": "Mannat Singh, Laura Gustafson, Aaron Adcock, Vinicius de Freitas Reis, Bugra Gedik, Raj Prateek Kosaraju, Dhruv Mahajan, Ross B. Girshick, Piotr Dolkar, and Laurens van der Maaten. Revisiting weakly supervised pre-training of visual perception models. CoRR, abs/2201.08371, 2022. URL https://arxiv.org/abs/2201.08371." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.732, + 0.828, + 0.788 + ], + "angle": 0, + "content": "Andreas Peter Steiner, Alexander Kolesnikov, Xiaohua Zhai, Ross Wightman, Jakob Uszkoreit, and Lucas Beyer. How to train your vit? data, augmentation, and regularization in vision transformers. Transactions on Machine Learning Research, 2022. URL https://openreview.net/forum?id=4nPswr1KcP." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.8, + 0.828, + 0.857 + ], + "angle": 0, + "content": "Mingxing Tan and Quoc V. Le. Efficientnetv2: Smaller models and faster training. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10096-10106. PMLR, 2021. URL http://proceedings.mlr.press/v139/tan21a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.868, + 0.828, + 0.925 + ], + "angle": 0, + "content": "Hugo Touvron, Piotr Bojanowski, Mathilde Caron, Matthieu Cord, Alaaeldin El-Nouby, Edouard Grave, Armand Joulin, Gabriel Synnaeve, Jakob Verbeek, and Hervé Jégou. Resmlp: Feedforward networks for image classification with data-efficient training. CoRR, abs/2105.03404, 2021a. URL https://arxiv.org/abs/2105.03404." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.189 + ], + "angle": 0, + "content": "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10347-10357. PMLR, 2021b. URL http://proceedings.mlr.press/v139/touvron21a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.199, + 0.829, + 0.23 + ], + "angle": 0, + "content": "Hugo Touvron, Matthieu Cord, and Herve Jégou. Deit III: revenge of the vit. CoRR, abs/2204.07118, 2022. doi: 10.48550/arXiv.2204.07118. URL https://doi.org/10.48550/arXiv.2204.07118." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.238, + 0.829, + 0.283 + ], + "angle": 0, + "content": "Florian Tramèr, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble adversarial training: Attacks and defenses. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=rkZvSe-RZ." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.292, + 0.829, + 0.391 + ], + "angle": 0, + "content": "Dustin Tran, Jeremiah Liu, Michael W. Dusenberry, Du Phan, Mark Collier, Jie Ren, Kehang Han, Zi Wang, Zelda Mariet, Huiyi Hu, Neil Band, Tim G. J. Rudner, Karan Singhal, Zachary Nado, Joost van Amersfoort, Andreas Kirsch, Rodolphe Jenatton, Nithum Thain, Honglin Yuan, Kelly Buchanan, Kevin Murphy, D. Sculley, Yarin Gal, Zoubin Ghahramani, Jasper Snoek, and Balaji Lakshminarayanan. Plex: Towards reliability using pretrained large model extensions. CoRR, abs/2207.07411, 2022. doi: 10.48550/arXiv.2207.07411. URL https://doi.org/10.48550/arXiv.2207.07411." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.401, + 0.829, + 0.445 + ], + "angle": 0, + "content": "Monika Undorf and Arndt Broder. Cue integration in metamemory judgements is strategic. Quarterly Journal of Experimental Psychology, 73(4):629-642, October 2019. doi: 10.1177/1747021819882308. URL https://doi.org/10.1177/1747021819882308." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.455, + 0.829, + 0.54 + ], + "angle": 0, + "content": "Juozas Vaicenavicius, David Widmann, Carl R. Andersson, Fredrik Lindsten, Jacob Roll, and Thomas B. Schön. Evaluating model calibration in classification. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), The 22nd International Conference on Artificial Intelligence and Statistics, AISTATS 2019, 16-18 April 2019, Naha, Okinawa, Japan, volume 89 of Proceedings of Machine Learning Research, pp. 3459-3467. PMLR, 2019. URL http://proceedings.mlr.press/v89/vaicenavicius19a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.55, + 0.829, + 0.567 + ], + "angle": 0, + "content": "Ross Wightman. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.576, + 0.829, + 0.662 + ], + "angle": 0, + "content": "Cihang Xie, Mingxing Tan, Boqing Gong, Jiang Wang, Alan L. Yuille, and Quoc V. Le. Adversarial examples improve image recognition. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 816-825. Computer Vision Foundation / IEEE, 2020a. doi: 10.1109/CVPR42600.2020.00090. URL https://openaccess.thecvf.com/content_CVPR_2020/html/Xie_Adversarial_Examples_Implore_Photosynthetic_Vision_Example.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.671, + 0.829, + 0.757 + ], + "angle": 0, + "content": "Qizhe Xie, Minh-Thang Luong, Eduard H. Hovy, and Quoc V. Le. Self-training with noisy student improves imagenet classification. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 10684-10695. Computer Vision Foundation / IEEE, 2020b. doi: 10.1109/CVPR42600.2020.01070. URL https://openaccess.thecvf.com/content_CVPR_2020/html/Xie_Self-Training_With_Noisy_Student_Approves_Photosynthetic_Vision_Paper.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.767, + 0.829, + 0.797 + ], + "angle": 0, + "content": "I. Zeki Yalniz, Hervé Jégou, Kan Chen, Manohar Paluri, and Dhruv Mahajan. Billion-scale semi-supervised learning for image classification, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.806, + 0.829, + 0.85 + ], + "angle": 0, + "content": "Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2403-2412, 2018. doi: 10.1109/CVPR.2018.00255." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.86, + 0.829, + 0.918 + ], + "angle": 0, + "content": "Shengjia Zhao, Tengyu Ma, and Stefano Ermon. Individual calibration with randomized forecasting. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 11387-11397. PMLR, 2020. URL http://proceedings.mlr.press/v119/zhao20e.html." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.918 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.449, + 0.119 + ], + "angle": 0, + "content": "A THE INVESTMENT EXAMPLE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.134, + 0.827, + 0.302 + ], + "angle": 0, + "content": "Let us consider two classification models for the stock market that predict whether a stock's value is about to increase, decrease or remain neutral (three-class classification). Suppose that Model A has a \\(95\\%\\) true accuracy, and generates a confidence score of 0.95 on any prediction (even on misclassified instances); Model B has a \\(40\\%\\) true accuracy, but always gives a confidence score of 0.6 on correct predictions, and 0.4 on incorrect ones. We now try and evaluate these two models using the uncertainty metrics mentioned in Section 1 to see which can reveal Model B's superior uncertainty estimation performance. AURC will fail due to its sensitivity to accuracy (the AURC of Model B is 0.12, more than twice as bad as the AURC for Model A, which is 0.05). NLL will rank Model A four times higher (Model A's NLL is 0.23 and Model B's is 0.93). The Brier score would also much prefer Model A (giving it a score of 0.096 while giving Model B a score of 0.54). Evaluating the models' calibration with ECE will also not reveal Model B's advantages, since it is less calibrated than Model A, which has perfect calibration (Model A has an ECE of 0, and Model B has a worse ECE of 0.4)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.825, + 0.351 + ], + "angle": 0, + "content": "AUROC, on the other hand, would give Model B a perfect score of 1 and a terrible score of 0.5 to Model A. The selective risk for Model B would be better for any coverage of stock predictions below \\(40\\%\\), and for any SAC above \\(95\\%\\) the coverage for Model A would be 0, but 0.4 for Model B." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.357, + 0.828, + 0.428 + ], + "angle": 0, + "content": "Those two metrics are not perfect for any example. Let us instead compare two different models for the task of predicting the weather when we cannot abstain from making predictions. Accordingly, being required to provide an accurate probabilistic uncertainty estimation of the model's predictions, AUROC and selective risk would be meaningless (due to the model's inability to abstain in this task), but ECE or the Brier Score would better evaluate the performance the new task requires." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.447, + 0.644, + 0.464 + ], + "angle": 0, + "content": "B RANKING AND CALIBRATION VISUAL COMPARISON" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.479, + 0.827, + 0.605 + ], + "angle": 0, + "content": "A comparison of 523 models by their AUROC (\\( \\times 100 \\), higher is better) and -log(ECE) (higher is better) on ImageNet is visualized in Figure 8. An interactive version of this figure is provided as supplementary material. To compare models fairly by their size, we plot two graphs with the logarithm of the number of parameters as the X-axis, so that models sharing the same x value can be compared solely based on their y value. In Figure 9 we set the X axis to be AUROC (higher is better), and see ViTs outperform any other architecture with a comparable amount of parameters by a large margin. We can also observe that using distillation creates a consistent improvement in AUROC. In Figure 10 we set the X axis to be the negative logarithm of ECE (higher is better) and observe a very similar trend, with ViT outperforming its competition for any model size." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.625, + 0.75, + 0.657 + ], + "angle": 0, + "content": "C DEMONSTRATION OF E-AURC'S DEPENDENCE ON THE MODEL'S ACCURACY" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.673, + 0.827, + 0.773 + ], + "angle": 0, + "content": "Excess-AURC (E-AURC) was suggested by Geifman et al. (2018) as an alternative to AURC (explained in Section 2). To calculate E-AURC, two AURC scores need to be calculated: (1) \\(AURC(model)\\), the AURC value of the actual model and (2) \\(AURC(model^{*})\\), the AURC value of a hypothetical model with identical predicted labels as the first model, but that outputs confidence values that induce a perfect partial order on the instances in terms of their correctness. The latter means that all incorrectly predicted instances are assigned confidence values lower than the correctly predicted instances." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.778, + 0.825, + 0.849 + ], + "angle": 0, + "content": "E-AURC is then defined as \\( AURC(model) - AURC(model^{*}) \\). In essence, this metric acknowledges that given a model's accuracy, the area of \\( AURC(model^{*}) \\) is always unavoidable no matter how good the partial order is, but anything above that could have been minimized if the \\( \\kappa \\) function was better at ranking, assigning correct instances higher values than incorrect ones and inducing a better partial order over the instances." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "This metric indeed helps to reduce some of the sensitivity to accuracy suffered by AURC, and for the example presented in Section 1, E-AURC would have given a perfect score of 0 to the model inducing a perfect partial order by its confidence values (Model B). It is easy, however, to craft examples showing that E-AURC prefers models with higher accuracy, even if they have lower or equal capacity to rank." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.141, + 0.734, + 0.815 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.174, + 0.827, + 0.825, + 0.882 + ], + "angle": 0, + "content": "Figure 8: A comparison of 523 models by their AUROC (\\( \\times 100 \\), higher is better) and log(ECE) (lower is better) on ImageNet. Each marker's size is determined by the model's number of parameters. Each dotted marker represents a distilled version of the original. An interactive version of this figure is provided as supplementary material." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.1, + 0.764, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.377, + 0.828, + 0.408 + ], + "angle": 0, + "content": "Figure 9: A comparison of 523 models by their AUROC (\\( \\times 100 \\), higher is better) and log(number of model's parameters) on ImageNet. Each dotted marker represents a distilled version of the original." + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.421, + 0.794, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.679, + 0.828, + 0.71 + ], + "angle": 0, + "content": "Figure 10: A comparison of 523 models by their -log(ECE) (higher is better) and log(number of model's parameters) on ImageNet. Each dotted marker represents a distilled version of the original." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.736, + 0.828, + 0.837 + ], + "angle": 0, + "content": "To demonstrate this in a simple way, let us consider two models with a complete lack of capacity to rank correct and incorrect predictions correctly, always outputting the same confidence score. Model A has an accuracy of \\(20\\%\\) (thus an error rate of \\(80\\%\\)), and Model B has an accuracy of \\(80\\%\\) (and an error rate of \\(20\\%\\)). A good ranking metric should evaluate them equally (the same way E-AURC gives the same score for two models that rank perfectly regardless of their accuracy). In Figure 11 we plot their RC curves with dashed lines, which are both straight lines due to their lack of ranking ability. We can calculate both of these models' AURCs, \\(AURC(modelA) = 0.8\\), \\(AURC(modelB) = 0.2\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.829, + 0.927 + ], + "angle": 0, + "content": "The next thing to calculate is the best AURC values those models could have achieved given the same accuracy if they had a perfect partial order. We plot these hypothetical models' RC curves in Figure 11 as solid lines. Their selective risk remains 0 for every coverage below their total accuracy, since these hypothetical models assigned the highest confidence to all of their correct instances first. As the coverage increases and they have no more correct instances to select, they begin to give instances that are incorrect, and thus their selective risk deteriorates for higher coverages." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.304, + 0.102, + 0.699, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.388, + 0.825, + 0.418 + ], + "angle": 0, + "content": "Figure 11: The RC curves for Models A and B are plotted with dashed lines. The RC curves for the hypothetically optimal versions of Models A and B are plotted with solid lines." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.453, + 0.825, + 0.511 + ], + "angle": 0, + "content": "Calculating both of these hypothetical models' AURCs gives us the following: \\(AURC(modelA^{*}) = 0.482\\), \\(AURC(modelB^{*}) = 0.022\\). Subtracting our results we get: E-AURC(modelA) = 0.8 - 0.482 = 0.318, E-AURC(modelB) = 0.2 - 0.022 = 0.178. Hence, E-AURC prefers Model B over Model A, even though both do not discriminate at all between incorrect and correct instances." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.54, + 0.55, + 0.556 + ], + "angle": 0, + "content": "D MORE ON THE DEFINITION OF RANKING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.577, + 0.825, + 0.62 + ], + "angle": 0, + "content": "Let us consider a finite set \\( S_{m} = \\{(x_{i},y_{i})\\}_{i = 1}^{m}\\sim P_{X,Y} \\). We assume that there are no two identical values given by \\( \\kappa \\) on \\( S_{m} \\). Such an assumption is reasonable when choosing a continuous confidence signal." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.825, + 0.67 + ], + "angle": 0, + "content": "We further denote \\(c\\) as the number of concordant pairs (i.e., pairs in \\(S_{m}\\) that satisfy the condition \\([\\kappa(x_{i}, \\hat{y} | f) < \\kappa(x_{j}, \\hat{y} | f) \\cap \\ell(f(x_{i}), y_{i}) > \\ell(f(x_{j}), y_{j})]\\)) and \\(d\\) as the number of discordant pairs (i.e., pairs in \\(S_{m}\\) that satisfy the condition \\([\\kappa(x_{i}, \\hat{y} | f) > \\kappa(x_{j}, \\hat{y} | f) \\cap \\ell(f(x_{i}), y_{i}) > \\ell(f(x_{j}), y_{j})]\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.675, + 0.825, + 0.706 + ], + "angle": 0, + "content": "We assume, for now, that there are no two identical values given by \\(\\ell\\) on \\(S_{m}\\). Accordingly, we can further develop Equation (1) from Section 2.1 using the definition of conditional probability," + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.739, + 0.697, + 0.755 + ], + "angle": 0, + "content": "\\[\n\\Pr [ \\kappa (x _ {i}, \\hat {y} | f) < \\kappa (x _ {j}, \\hat {y} | f) | \\ell (f (x _ {i}), y _ {i}) > \\ell (f (x _ {j}), y _ {j}) ] =\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.757, + 0.696, + 0.792 + ], + "angle": 0, + "content": "\\[\n\\frac {\\mathbf {P r} [ \\kappa (x _ {i} , \\hat {y} | f) < \\kappa (x _ {j} , \\hat {y} | f) \\cap \\ell (f (x _ {i}) , y _ {i}) > \\ell (f (x _ {j}) , y _ {j}) ]}{\\mathbf {P r} [ \\ell (f (x _ {i}) , y _ {i}) > \\ell (f (x _ {j}) , y _ {j}) ]},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.694, + 0.822 + ], + "angle": 0, + "content": "which can be approximated empirically, using the most likelihood estimator, as" + }, + { + "type": "equation", + "bbox": [ + 0.481, + 0.856, + 0.825, + 0.888 + ], + "angle": 0, + "content": "\\[\n\\frac {c}{\\binom {m} {2}}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.91, + 0.824, + 0.926 + ], + "angle": 0, + "content": "We note that the last equation is identical to Kendall's \\(\\tau\\) up to a linear transformation, which equals" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.124, + 0.616, + 0.194 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {c - d}{\\binom {m} {2}} = \\frac {c - d + c - c}{\\binom {m} {2}} \\\\ = \\frac {2 c - (c + d)}{\\binom {m} {2}} = \\frac {2 c}{\\binom {m} {2}} - \\frac {c + d}{\\binom {m} {2}} = \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.197, + 0.616, + 0.226 + ], + "angle": 0, + "content": "\\[\n2 \\cdot \\frac {c}{\\binom {m} {2}} - 1 = 2 \\cdot [ \\text {E q u a t i o n} 2 ] - 1.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.233, + 0.825, + 0.261 + ], + "angle": 0, + "content": "Otherwise, if the loss assigns two identical values to a pair of points in \\( S_{m} \\), but \\( \\kappa \\) does not, then we get:" + }, + { + "type": "equation", + "bbox": [ + 0.475, + 0.286, + 0.825, + 0.313 + ], + "angle": 0, + "content": "\\[\n\\frac {c}{c + d}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.319, + 0.741, + 0.334 + ], + "angle": 0, + "content": "which is identical to Goodman & Kruskal's \\(\\gamma\\)-correlation up to a linear transformation" + }, + { + "type": "equation", + "bbox": [ + 0.356, + 0.34, + 0.641, + 0.403 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {c - d}{c + d} = \\frac {c - d + c - c}{c + d} = \\frac {2 c - (c + d)}{c + d} = \\\\ \\frac {2 c}{c + d} - \\frac {c + d}{c + d} = 2 \\cdot [ \\text {E q u a t i o n} 3 ] - 1. \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.419, + 0.458, + 0.433 + ], + "angle": 0, + "content": "D.1 INEQUALITIES OF THE DEFINITION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.825, + 0.473 + ], + "angle": 0, + "content": "One might wonder why Equation (1) should have strict inequalities rather than non-strict ones to define ranking. As we discuss below, this would damage the definition:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.48, + 0.457, + 0.495 + ], + "angle": 0, + "content": "(1) If the losses had a non-strict inequality:" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.502, + 0.689, + 0.52 + ], + "angle": 0, + "content": "\\[\n\\Pr [ \\kappa (x _ {1}, \\hat {y} | f) < \\kappa (x _ {2}, \\hat {y} | f) | \\ell (f (x _ {1}), y _ {1}) \\geq \\ell (f (x _ {2}), y _ {2}) ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.528, + 0.825, + 0.584 + ], + "angle": 0, + "content": "Consequently, in the case of classification, for example, this probability would increase for any pairs consisting of correct instances with different confidences. This would yield no benefit in ranking between incorrect and correct instances and motivates giving different confidence values for instances with the same loss—a fact that would not truly add any value." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.591, + 0.473, + 0.606 + ], + "angle": 0, + "content": "(2) If the \\(\\kappa\\) values had a non-strict inequality:" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.613, + 0.691, + 0.631 + ], + "angle": 0, + "content": "\\[\n\\Pr [ \\kappa (x _ {1}, \\hat {y} | f) \\leq \\kappa (x _ {2}, \\hat {y} | f) | \\ell (f (x _ {1}), y _ {1}) > \\ell (f (x _ {2}), y _ {2}) ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.639, + 0.826, + 0.738 + ], + "angle": 0, + "content": "This probability would increase for any pair \\((x_{1}, x_{2})\\) such that \\(\\kappa(x_{1}, \\hat{y} | f) = \\kappa(x_{2}, \\hat{y} | f)\\) and \\(\\ell(f(x_{1})) > \\ell(f(x_{2}))\\), although \\(\\kappa\\) should have ranked \\(x_{1}\\) with a lower value. Furthermore, if a \\(\\kappa\\) function were to assign the same confidence score to all \\(x \\in \\mathcal{X}\\), then when there are no two identical values of losses, the definition's probability would be 1; otherwise, the more different values for losses there are, the larger the probability would grow. In classification with a \\(0/1\\) loss, for example, assigning the same confidence score to all instances would result in the probability being Accuracy\\((f) \\cdot (1 - Accuracy(f))\\), which is largest when Accuracy\\((f) = 0.5\\)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.757, + 0.776, + 0.79 + ], + "angle": 0, + "content": "E RANKING CAPACITY COMPARISON BETWEEN HUMANS AND NEURAL NETWORKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.825, + 0.89 + ], + "angle": 0, + "content": "In the field of metacognition, interestingly, the predictive value of confidence is evaluated by two different aspects: by its ability to discriminate between correct and incorrect predictions (also known as resolution in metacognition or ranking in our context) and by its ability to give well-calibrated confidence estimations, not being over- or under-confident (Fiedler et al., 2019). These two aspects correspond perfectly with much of the research done in the deep learning field, with the nearly matching metric to AUROC of \\(\\gamma\\)-correlation (see Section 2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.924 + ], + "angle": 0, + "content": "This allows us to compare how well humans rank predictions in various tasks versus how well models rank their own in others. Human AUROC measurements in various tasks (translated from" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "\\(\\gamma\\)-correlation) tend to range from 0.6 to 0.75 (Undorf & Broder, 2019; Basile et al., 2018; Ackerman et al., 2016), but could vary, usually towards much lower values (Griffin et al., 2019). In our comprehensive evaluation on ImageNet, AUROC ranged from 0.77 to 0.88 (with the median value being 0.85), and in CIFAR-10 these measurements jump to the range of 0.92 to 0.94." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.825, + 0.225 + ], + "angle": 0, + "content": "While such comparisons between neural networks and humans are somewhat unfair due to the great sensitivity required for the task, research that directly compares humans and machine learning algorithms performing the same task exist. For example, in Ackerman et al. (2019), algorithms far surpass even the group of highest performing individuals in terms of ranking." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.246, + 0.61, + 0.261 + ], + "angle": 0, + "content": "F CRITICISMS OF AUROC AS A RANKING METRIC" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.278, + 0.827, + 0.482 + ], + "angle": 0, + "content": "In this section, addressing the criticism of AUROC as a ranking metric, we show why AUROC does not simply reward models for having lower accuracy. The paper by Ding et al. (2019) presented a semi-artificial experiment to demonstrate that AUROC might get larger the worse the model's accuracy becomes. They consider a model \\( f \\) and its \\( \\kappa \\) function evaluated on a classification test set \\( \\mathcal{X} \\), giving each a prediction \\( \\hat{y}_f(x) \\) and a confidence score \\( \\kappa(x, \\hat{y}_f(x)|f) \\), which in this case is the model's softmax response. Let \\( \\mathcal{X}^c = \\{x^c \\in \\mathcal{X} | \\hat{y}_f(x^c) = y(x)\\} \\) be the set of all instances correctly predicted by the model \\( f \\), and define \\( x_{(i)}^c \\in \\mathcal{X}^c \\) to be the correct instance that received the i-lowest confidence score from \\( \\kappa \\). Their example continues and considers an artificial model \\( f^m \\) to be an exact clone of \\( f \\) with the following modification: for every \\( i \\leq m \\), the model \\( f^m \\) now predicts a different, incorrect label for \\( x_{(i)}^c \\); however, its given confidence score remains identical: \\( \\kappa(x_{(i)}^c, \\hat{y}_f(x_{(i)}^c)|f) = \\kappa(x_{(i)}^c, \\hat{y}_{f^m}(x_{(i)}^c)|f^m) \\). \\( f^0 \\) is exactly identical to \\( f \\), by this definition, not changing any predictions. The paper shows how an artificially created model \\( f^m \\) obtains a higher AUROC score the bigger its \\( m \\). This happens even though \"nothing\" changed but a hit to the model's accuracy performance (by making mistakes on more instances)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.488, + 0.827, + 0.675 + ], + "angle": 0, + "content": "First, to understand why this happens, let us consider \\( f^1 \\): AUROC for \\( \\kappa \\) increases the more pairs of \\( [\\kappa(x_1) < \\kappa(x_2)|\\hat{y}_f(x_1) \\neq y(x_1), \\hat{y}_f(x_2) = y(x_2)] \\) there are. The model \\( f^1 \\) is now giving an incorrect classification to \\( x_{(1)}^c \\), but this instance's position in the partial order induced by \\( \\kappa \\) has remained the same (since \\( \\kappa(x_{(1)}^c) \\) is unchanged); therefore, \\( |\\mathcal{X}^c| - 1 \\) correctly ranked pairs were added: \\( [\\kappa(x_{(1)}^c) < \\kappa(x_{(i)}^c)|\\hat{y}_f(x_{(1)}^c) \\neq y(x_{(1)}^c), \\hat{y}_f(x_{(i)}^c) = y(x_{(i)}^c)] \\) for every \\( 1 < i \\leq |\\mathcal{X}^c| \\). Nevertheless, this does not guarantee an increase to AUROC by itself: if, previously, all pairs of (correct, incorrect) instances were ranked correctly by \\( \\kappa \\), AUROC would already be 1.0 for \\( f^0 \\) and would not change for \\( f^1 \\). If AUROC for \\( f^1 \\) is higher than it was for \\( f^0 \\), this means there exists at least one instance \\( x^w \\) that was incorrectly predicted by the original model \\( f^0 \\) such that \\( \\kappa(x_{(1)}^c) < \\kappa(x^w) \\). Every such originally wrongly ranked pair (by \\( f^0 \\)) of \\( [\\kappa(x_{(1)}^c) < \\kappa(x^w)|\\hat{y}_f(x^w) \\neq y(x^w), \\hat{y}_f(x_{(1)}^c) = y(x_{(1)}^c)] \\) has been eliminated by \\( f^1 \\) wrongly predicting \\( x_{(1)}^c \\). This, therefore, causes AUROC to increase at the expense of the model's accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.68, + 0.825, + 0.71 + ], + "angle": 0, + "content": "Such an analysis neglects many factors, which is probably why such an effect is only likely to be observed in artificial models (and not among the actual models we have empirically tested):" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.721, + 0.825, + 0.821 + ], + "angle": 0, + "content": "1. It is unreasonable to assume that the confidence score given by \\(\\kappa\\) will remain exactly the same for an instance \\(x_{(i)}^{c}\\) given it now has a different prediction. In the case of \\(\\kappa\\) being softmax, it assumes the model's logits have changed in a very precise and nontrivial manner. Additionally, by our broad definition of \\(\\kappa\\), which allows \\(\\kappa\\) to even be produced from an entirely different model than \\(f\\), \\(\\kappa\\) receives the prediction and model as a given input (and cannot change or affect either), and it is unlikely to assume changing its inputs will not change its output." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "2. Suppose we find the setting reasonable and assume we can actually create a model \\( f^m \\) as described. Let us observe a model \\( f^p \\) such that \\( p = \\min_{m} (\\text{AUROC of } f^m = 1) \\), meaning that \\( f^p \\) ranks its predictions perfectly, unlike the original \\( f^0 \\). Is it really true that \\( f^p \\) has no better uncertainty estimation than \\( f^0 \\)? Model \\( f^p \\) behaves very much like the investment in \"Model B\" from our example in Section 1, possessing perfect knowledge of when it is wrong and when it is correct, allowing its users risk-free classification. So, given a model \\( f \\), we can use the above process to produce an improved model \\( f^p \\), and then we can even calibrate its" + }, + { + "type": "list", + "bbox": [ + 0.211, + 0.721, + 0.827, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "\\(\\kappa\\) to output \\(0\\%\\) for all instances below its threshold and \\(100\\%\\) for all those above to produce a perfect model, which might have a small coverage but is correct every time, knows it and notifies its user when it truly knows the prediction. The increase in AUROC reflects such an improvement." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.174, + 0.827, + 0.3 + ], + "angle": 0, + "content": "Not only do we disagree with such an analysis and its conclusions, but we also have vast empirical evidence to show that AUROC does not prefer lower accuracy models unless there is a good reason for it to do so, as we demonstrate in Figure 3 (comparing EfficientNet-V2-XL to ViT-B/32-SAM). In fact, out of the 523 models we tested, the model with the highest AUROC also has the \\(4^{th}\\) highest accuracy of all models, and the overall Spearman correlation between AUROC and accuracy of all the models we tested is 0.03. Furthermore, Figure 3 also exemplifies why AUROC, which was suggested by the just mentioned paper as the alternative to AUROC, is a bad choice as a single number metric, and might lead us to deploy a model that has a worse selective risk for most coverages only due to its higher overall accuracy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.324, + 0.783, + 0.34 + ], + "angle": 0, + "content": "G KNOWLEDGE DISTILLATION EFFECTS ON UNCERTAINTY ESTIMATION" + }, + { + "type": "image", + "bbox": [ + 0.206, + 0.365, + 0.794, + 0.613 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.623, + 0.825, + 0.681 + ], + "angle": 0, + "content": "Figure 12: Comparing vanilla models to those incorporating KD into their training (represented by markers with thick borders and a dot). In a pruning scenario that includes distillation, yellow markers indicate that the original model was also the teacher. The performance of each model is measured in AUROC (higher is better) and -log(ECE) (higher is better)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.701, + 0.825, + 0.828 + ], + "angle": 0, + "content": "Figure 12 compares vanilla models to those incorporating KD into their training (represented by markers with thick borders and a dot). In a pruning scenario that includes distillation, yellow markers indicate that the original model was also the teacher (Aflalo et al., 2020). While distillation using a different model tends to improve uncertainty estimation in both aspects, distillation by the model itself seems to improve only one—suggesting it is generally more beneficial to use a different model as a teacher. The fact that KD improves the model over its original form, however, is surprising, and implies that the distillation process itself helps uncertainty estimation. Note that although this specific method involves pruning, evaluations of models pruned without incorporating distillation (Frankle & Carbin, 2018) revealed no improvement." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.862 + ], + "angle": 0, + "content": "It seems, moreover, that the teacher does not have to be good in uncertainty estimation itself; Figure 5 in Section 3 shows this by comparing the teacher architecture and the students in each case." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "While the training method by Ridnik et al. (2021) included pretraining on ImageNet-21k and demonstrated impressive improvements, comparison of models that were pretrained on ImageNet21k (Tan & Le, 2021; Touvron et al., 2021a; 2022) with identical models that were not pretrained showed only a slight improvement in ECE, and, in fact, exhibit a degradation of AUROC (see" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Figures 4a and 4b in Section 3). This suggests that pretraining alone does not improve uncertainty estimation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.152, + 0.664, + 0.168 + ], + "angle": 0, + "content": "H MORE INFORMATION ABOUT TEMPERATURE SCALING" + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.184, + 0.761, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.463, + 0.825, + 0.52 + ], + "angle": 0, + "content": "Figure 13: A comparison of 523 models after being calibrated with TS, evaluated by their AUROC \\((\\times 100\\), higher is better) and -log(ECE) (higher is better) on ImageNet. Each marker's size is determined by the model's number of parameters. ViT models are still among the best performing architectures for all aspects of uncertainty estimation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.536, + 0.825, + 0.607 + ], + "angle": 0, + "content": "In Figure 13 we see how temperature scaling (TS) affects the overall ranking of models in terms of AUROC and ECE. While the ranking between the different architecture remains similar, the poorly performing models are much improved and minimize the gap between them and the best models. One particularly notable exception is HardCoRe-NAS (Nayman et al., 2021), with its lowest latency versions becoming the top performers in terms of ECE. In addition, models that benefit from" + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.616, + 0.794, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.896 + ], + "angle": 0, + "content": "Figure 14: Here the relationship between temperature and the success of TS, unlike the case for AUROC, seems unrelated." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.91, + 0.825, + 0.925 + ], + "angle": 0, + "content": "TS in terms of AUROC tend to have been assigned a temperature lower than 1 by the calibration" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.162 + ], + "angle": 0, + "content": "process (see Figure 6 in Section 3). The same, however, does not hold true for ECE (see Figure 14). This example also emphasizes the fact that models benefiting from TS in terms of AUROC do not necessarily benefit in terms of ECE, and vice versa. Therefore, determining whether to calibrate the deployed model with TS is, unfortunately, a task-specific decision." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.828, + 0.239 + ], + "angle": 0, + "content": "We perform TS as was suggested in Guo et al. (2017). For each model we take a random stratified sampling of 5,000 instances from the ImageNet validation set on which to calibrate, and reserve the remainder 45,000 instances for testing. Using the box-constrained L-BFGS (Limited-Memory Broyden-Fletcher-Goldfarb-Shanno) algorithm, we optimize for 5,000 iterations (though fewer iterations usually converge into the same temperature parameter) using a learning rate of 0.01." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.262, + 0.757, + 0.296 + ], + "angle": 0, + "content": "I ARCHITECTURE CHOICE FOR PRACTICAL DEPLOYMENT BASED ON SELECTIVE PERFORMANCE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.313, + 0.828, + 0.438 + ], + "angle": 0, + "content": "As discussed in Section 2, when we know the coverage or risk we require for deployment, the most direct metric to check is which model obtains the best risk for the coverage required (selective risk), or which model gets the largest coverage for the accuracy constraint (SAC). While each deployment scenario specifies its own constraints, for demonstration purposes we consider a scenario in which misclassifications are by far more costly than abstaining from giving correct predictions. An example of this could be classifying a huge unlabeled dataset (or cleaning bad labels from a labeled dataset). While it is desirable to assign labels to a larger portion of the dataset (or to correct more of the wrong labels), it is crucial that these labels are as accurate as possible (or that correctly labeled instances are not replaced with a bad label)." + }, + { + "type": "image", + "bbox": [ + 0.236, + 0.454, + 0.761, + 0.715 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.723, + 0.825, + 0.753 + ], + "angle": 0, + "content": "Figure 15: A comparison of 523 models by their log(number of model's parameters) and the coverage they are able to provide for a SAC of \\(99\\%\\) (higher is better) on ImageNet." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.778, + 0.825, + 0.864 + ], + "angle": 0, + "content": "To explore such a scenario, we evaluate all models on ImageNet to see which ones give us the largest coverage for a required accuracy of \\(99\\%\\). In Figure 7, Section 3 (paper's main body) we observe that of all the models studied, only ViT models are able to provide coverage beyond \\(30\\%\\) for such an extreme constraint. Moreover, we note that the coverage they provide is significantly larger than that given by models with comparable accuracy or size, and that ViT models that provide similar coverage to their counterparts do so with less overall accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In Figure 15 we see that not only do ViT models provide more coverage than any other model, but that they are also able to do so in any size category. To compare models fairly by their size, we present Figure 15, which sets the Y axis to be the logarithm of the number of parameters, so that models sharing the same y value can be compared solely based on their x value—which is the coverage they" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.828, + 0.173 + ], + "angle": 0, + "content": "Table 1: A comparison of different training regimes of ViTs. *The paper introducing ViTs (Dosovitskiy et al., 2021) had also trained ViT models with the JFT-300M dataset; however, their weights are unavailable to the general public. All evaluations of ViTs from that paper were conducted on ViTs pretrained on ImageNet-21k, which are publicly available. **Pretrained DeiT3 models were first pretrained with a learning rate of \\(3 \\cdot 10^{-3}\\) and then fine-tuned with a learning rate of \\(3 \\cdot 10^{-4}\\)" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.183, + 0.83, + 0.411 + ], + "angle": 0, + "content": "
RegimeViT (original)Steiner et al.Chen et al.DeiTDeiT3DeiT3 +PretrainingTorchvision
ReferenceDosovitskiy et al. (2021)Steiner et al. (2022)Chen et al. (2022)Touvron et al. (2021b)Touvron et al. (2022)Touvron et al. (2022)Paszke et al. (2019)
Pretraining datasetImageNet-21k*ImageNet-21k---ImageNet-21k-
Batch Size409640964096102420482048512
OptimizerAdamWAdamWSAMLAMBLAMBLAMBAdamW
LR3·10-33·10-33·10-31·10-33·10-33·10-3**3·10-3
LR decaycosinecosinecosinecosinecosinecosinecosine
Weight decay0.10.30.10.050.020.020.3
Warmup epochs3.43.43.455530
Label smoothing ε0.10.10.10.1X0.10.11
DropoutXXXX
Stoch. DepthXXX
Repeated AugXXXX
Gradient Clip.1.01.01.0X1.01.01.0
H. flip
Random Resized CropX
Rand AugmentXAdapt.X9/0.5XXAdapt.
3 AugmentXXXXX
LayerScaleXXXXX
Mixup alphaXAdapt.X0.80.8X0.2
Cutmix alphaXXX1.01.01.01.0
Erasing prob.XXX0.25XXX
ColorJitterXXXX0.30.3X
Test crop ratio0.8750.8750.8750.8751.01.00.875
LossCECECECEBCECECE
Superb performanceXXXX
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.436, + 0.825, + 0.465 + ], + "angle": 0, + "content": "provide for a SAC of \\(99\\%\\). We see that ViT models provide a larger coverage even when compared with models of a similar size." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.485, + 0.757, + 0.518 + ], + "angle": 0, + "content": "J COMPARISON OF VIT TRAINING REGIMES AND THEIR EFFECTS ON UNCERTAINTY ESTIMATION PERFORMANCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.534, + 0.825, + 0.59 + ], + "angle": 0, + "content": "In Table 1 we compare the different hyperparameters and augmentations used for training the ViT models evaluated in this paper, with the aim of revealing why some training regimes consistently result in superb ViTs, while others do not. An analysis of the various differences between these regimes, however, eliminates the obvious suspects." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.597, + 0.827, + 0.736 + ], + "angle": 0, + "content": "1) Pretraining, on its own, does not seem to offer an explanation: First, we analyze eight pairs of models (provided by Touvron et al. 2022) such that both models have identical architecture and training regimes, with the exception that one was pretrained on ImageNet-21k, and the other was not. Pretraining results in only a slight improvement of 0.16 in AUROC and 0.01 in ECE. Moreover, as mentioned in detail in Section 3, ViT models trained on JFT-4B (Tran et al., 2022) were outperformed by the successful ViT models evaluated in this paper, most of which were pretrained on ImageNet-21k (and even by one ViT SAM model that was not pretrained at all). Second, we note that ViTs trained with the SAM optimizer (Chen et al., 2022), and not pretrained at all, reach superb ranking (AUROC) as well. These facts lead us to conclude that pretraining, at least by itself, is not the main contributor to training successful ViTs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.743, + 0.827, + 0.8 + ], + "angle": 0, + "content": "2) The selection of optimizers and other hyperparameters (such as learning rate, label smoothing etc.) does not seem to have a significant impact. For example, while AdamW (Loshchilov & Hutter, 2019) was used by two of the successful regimes, it was also used by Paszke et al. (2019), and on the other hand was replaced by SAM (Foret et al., 2021) in another successful training regime." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.827, + 0.863 + ], + "angle": 0, + "content": "3) Advanced augmentations are unlikely to explain the gaps in uncertainty estimation performance, as regimes producing superior ViT models (Dosovitskiy et al., 2021; Chen et al., 2022) did not use advanced augmentations (in comparison to Touvron et al. (2021b) and Touvron et al. (2022), for example)." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.597, + 0.827, + 0.863 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.869, + 0.825, + 0.926 + ], + "angle": 0, + "content": "For these reasons, for the moment, the explanation for the gap remains elusive. The only remaining \"suspect\" is the batch size used, with all successful regimes using a batch size of 4096, while others use a smaller batch size of 2048 or lower. One could argue, however, that a two-fold increase in batch size is not sufficient to explain the huge gaps in performance measured." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.102, + 0.828, + 0.145 + ], + "angle": 0, + "content": "Table 2: The relationship between uncertainty estimation performance and the model's attributes and resources (accuracy, number of parameters and input size), measured by Spearman correlation. Positive correlations indicate good utilization of resources for uncertainty estimation." + }, + { + "type": "table", + "bbox": [ + 0.17, + 0.156, + 0.828, + 0.488 + ], + "angle": 0, + "content": "
ArchitectureAUROC & Accuracy-ECE & AccuracyAUROC & #Parameters-ECE & #ParametersAUROC & Input Size-ECE & Input Size#Models Evaluated
EfficientNet-0.16-0.29-0.22-0.29-0.26-0.3850
ResNet-0.28-0.220.160.03-0.40-0.4433
ViT0.84-0.170.50-0.670.04-0.1331
XCiT distilled0.600.090.350.020.510.1228
XCiT-0.680.89-0.790.94--28
ViT*0.230.38-0.040.410.14-0.1226
SE_ResNet-0.46-0.02-0.530.20-0.02-0.3518
EfficientNetV2-0.70-0.45-0.63-0.47-0.59-0.4015
NFNet0.560.780.630.810.480.6013
Inception-0.290.09-0.430.30-0.080.2313
RegNetY-0.03-0.980.27-0.86--12
RegNetX0.20-0.960.20-0.96--12
CaT distilled0.44-0.870.35-0.870.58-0.5010
DLA0.64-0.900.77-0.90--10
MobileNetV30.370.590.420.60--10
Res2Net-0.700.27-0.680.60--9
CLIP Zero-Shot1.0-0.630.9-0.80.55-0.589
CLIP + Linear Probe0.880.260.710.10.19-0.278
VGG0.81-0.980.71-0.90--8
RepVGG-0.710.50-0.570.21--8
BiT-0.33-0.81-0.20-0.85-0.46-0.258
ResNeXt-0.960.39-0.22-0.30--7
ResNet RS0.000.79-0.180.82-0.300.827
MixConv-0.110.89-0.240.86--7
DenseNet0.43-0.140.720.12--6
HardCoReNAS-0.600.26-0.490.37--6
Swin0.710.140.770.260.410.006
ECANet-0.200.60-0.430.370.830.376
Twins-0.260.94-0.140.89--6
SWSL ResNet0.94-0.890.77-0.83--6
GENet0.50-1.000.50-1.000.87-0.876
SSL ResNet0.14-1.000.26-0.94--6
TResNet0.10-0.300.530.53-0.58-0.875
CoaT-0.100.90-0.100.50--5
LeViT distilled0.60-0.900.60-0.90--5
ResMLP0.201.000.150.97--5
MobileNetV2-0.300.00-0.210.10--5
ViT* Distilled0.8-1.00.71-0.770.22-0.774
PiT distilled1.00-1.001.00-1.00--4
PiT-0.401.00-0.401.00--4
WSP ResNeXt1.000.801.000.80--4
ResMLP distilled0.800.200.800.20--4
MnasNet0.400.200.630.95--4
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.527, + 0.78, + 0.543 + ], + "angle": 0, + "content": "K EVALUATIONS OF THE ZERO-SHOT LANGUAGE-VISION CLIP MODEL" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.567, + 0.827, + 0.693 + ], + "angle": 0, + "content": "In this section we describe how we use CLIP model and extract confidence signals from it during inference. To evaluate CLIP on ImageNet, we first prepare it following the code provided by its authors (https://github.com/openai/CLIP): The labels of ImageNet-1k are encoded into normalized embedding vectors. At inference time, the incoming image is encoded into another normalized embedding vector. A cosine similarity is then calculated between each label embedding vector and the image embedding vector, and lastly, softmax is applied. The highest score is then taken as the confidence score for that prediction. We also evaluate the same models when adding a trained \"linear-probe\" to them (as described in Radford et al. (2021), which is essentially a logistic regression head), that results in a large boost in their accuracy." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.728, + 0.795, + 0.761 + ], + "angle": 0, + "content": "L EFFECTS OF THE MODEL'S ACCURACY, NUMBER OF PARAMETERS AND INPUT SIZE ON UNCERTAINTY ESTIMATION PERFORMANCE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.785, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Table 2 shows the relationship between uncertainty estimation performance and model attributes and resources (accuracy, number of parameters and input size), measured by Spearman correlation. We measure uncertainty estimation performance by AUROC (higher is better) and -ECE (higher is better). Positive correlations indicate good utilization of resources for uncertainty estimation (for example, a positive correlation between -ECE and the number of parameters indicates that as the number of parameters increases, the calibration improves). An interesting observation is that distillation can drastically change the correlation between a resource and the uncertainty estimation performance metrics. For example, undistilled XCiTs have a Spearman correlation of -0.79 between their number of parameters and AUROC, indicating that more parameters are correlated with lower ranking performance, while distilled XCiTs have a correlation of 0.35 between the two." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.269, + 0.101, + 0.726, + 0.117 + ], + "angle": 0, + "content": "Table 3: Comparing using MC dropout to softmax-response (vanilla)." + }, + { + "type": "table", + "bbox": [ + 0.328, + 0.127, + 0.67, + 0.437 + ], + "angle": 0, + "content": "
ArchitectureMethodAccuracyAUROC
MobileNetV3 LargeVanilla74.0486.88
MC dropout7486.14
MobileNetV3 SmallVanilla67.6786.2
MC dropout67.5584.54
MobileNetV2Vanilla71.8886.05
MC dropout71.8184.68
VGG11Vanilla70.3786.31
MC dropout70.2184.3
VGG11 (no BatchNorm)Vanilla69.0286.19
MC dropout68.9583.94
VGG13Vanilla71.5986.3
MC dropout71.4384.37
VGG13 (no BatchNorm)Vanilla69.9386.24
MC dropout69.7184.3
VGG16Vanilla73.3686.76
MC dropout73.3385.02
VGG16 (no BatchNorm)Vanilla71.5986.63
MC dropout71.4784.97
VGG19Vanilla74.2286.52
MC dropout74.1785.06
VGG19 (no BatchNorm)Vanilla72.3886.55
MC dropout72.3784.99
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.46, + 0.798, + 0.475 + ], + "angle": 0, + "content": "M EVALUATIONS OF MONTE CARLO DROPOUT RANKING PERFORMANCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.561 + ], + "angle": 0, + "content": "MC Dropout (Gal & Ghahramani, 2016) is computed using several dropout-enabled forward passes to produce uncertainty estimates. In classification, the mean softmax score of these passes, is calculated, and then a predictive entropy score is used as the final uncertainty estimate. In our evaluations, we use 30 dropout-enabled forward passes. We do not measure MC Dropout's effect on ECE since entropy scores do not reside in \\([0,1]\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.568, + 0.825, + 0.611 + ], + "angle": 0, + "content": "We test this technique using MobileNetV3 (Howard et al., 2019), MobileNetv2 (Sandler et al., 2018) and VGG (Simonyan & Zisserman, 2015), all trained on ImageNet and taken from the PyTorch repository (Paszke et al., 2019)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.617, + 0.807, + 0.632 + ], + "angle": 0, + "content": "The results comparing these models with and without using MC dropout are provided in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.825, + 0.695 + ], + "angle": 0, + "content": "The table shows that using MC dropout causes a consistent drop in both AUROC and selective performance compared with using the same models with softmax as the \\(\\kappa\\). These results are also visualized in comparison to other methods in Figure 4a in Section 3. MC dropout underperformance in an ID setting was also previously observed in Geifman & El-Yaniv (2017)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ] +] \ No newline at end of file diff --git a/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_origin.pdf b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a253485b3d5e17cda26bd9d4bc7cbeb1937c363f --- /dev/null +++ b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/9da122df-288c-42c9-8090-73c7e3adccf9_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5026d84a96356d58cf958c136de76ab8ff954c89dd187ce95dafdffa57bb221 +size 3694592 diff --git a/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/full.md b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/full.md new file mode 100644 index 0000000000000000000000000000000000000000..942701f1fe040c2dae3714153bcef96b7379fbf7 --- /dev/null +++ b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/full.md @@ -0,0 +1,424 @@ +# WHAT CAN WE LEARN FROM THE SELECTIVE PREDICTION AND UNCERTAINTY ESTIMATION PERFORMANCE OF 523 IMAGENET CLASSIFIERS? + +Ido Galil + +Technion + +idogail.iq@gmail.com + +Mohammed Dabbah + +Amazon + +m.m.dabbah@gmail.com + +Ran El-Yaniv + +Technion, Deci.AI + +rani@cs.technion.ac.il + +# ABSTRACT + +When deployed for risk-sensitive tasks, deep neural networks must include an uncertainty estimation mechanism. Here we examine the relationship between deep architectures and their respective training regimes, with their corresponding selective prediction and uncertainty estimation performance. We consider some of the most popular estimation performance metrics previously proposed including AUROC, ECE, AURC as well as coverage for selective accuracy constraint. We present a novel and comprehensive study of selective prediction and the uncertainty estimation performance of 523 existing pretrained deep ImageNet classifiers that are available in popular repositories. We identify numerous and previously unknown factors that affect uncertainty estimation and examine the relationships between the different metrics. We find that distillation-based training regimes consistently yield better uncertainty estimations than other training schemes such as vanilla training, pretraining on a larger dataset and adversarial training. Moreover, we find a subset of ViT models that outperform any other models in terms of uncertainty estimation performance. For example, we discovered an unprecedented $99\%$ top-1 selective accuracy on ImageNet at $47\%$ coverage (and $95\%$ top-1 accuracy at $80\%$ ) for a ViT model, whereas a competing EfficientNet-V2-XL cannot obtain these accuracy constraints at any level of coverage. Our companion paper, also published in ICLR 2023 (Galil et al., 2023), examines the performance of these classifiers in a class-out-of-distribution setting. + +# 1 INTRODUCTION + +The excellent performance of deep neural networks (DNNs) has been demonstrated in a range of applications, including computer vision, natural language understanding and audio processing. To deploy these models successfully, it is imperative that they provide an uncertainty quantification of their predictions, either via some kind of selective prediction or a probabilistic confidence score. + +Notwithstanding, what metric should we use to evaluate the uncertainty estimation performance? There are many and diverse ways so the answer to this question is not obvious, and to demonstrate the difficulty, consider the case of two classification models for the stock market that predict whether a stock's value is about to increase, decrease, or remain neutral (three-class classification). Suppose that model A has a $95\%$ true accuracy, and generates a confidence score of 0.95 on every prediction (even on misclassified instances); model B has a $40\%$ true accuracy, but always gives a confidence score of 0.6 on correct predictions, and 0.4 on incorrect ones. Model B can be utilized easily to generate perfect investment decisions. Using selective prediction (El-Yaniv & Wiener, 2010; Geifman & El-Yaniv, 2017), Model B will simply reject all investments on stocks whenever the confidence score is 0.4. While model A offers many more investment opportunities, each of its predictions carries a $5\%$ risk of failure. + +Among the various metrics proposed for evaluating the performance of uncertainty estimation are: Area Under the Receiver Operating Characteristic (AUROC or AUC), Area Under the Risk-Coverage curve (AURC) (Geifman et al., 2018), selective risk or coverage for a selective accuracy constraint (SAC), Negative Log-likelihood (NLL), Expected Calibration Error (ECE), which is often used for evaluating a model's calibration (see Section 2) and Brier score (Brier, 1950). All these metrics + +![](images/fa9b91586e5aca9d13d95ff3b56f34ea71d0af6bbb95e238639c29c5ecf07ebc.jpg) +Figure 1: A comparison of 523 models by their AUROC ( $\times 100$ , higher is better) and -log(ECE) (higher is better) on ImageNet. Each marker's size is determined by the model's number of parameters. A full version graph is given in Figure 8. Distilled models are better than non-distilled ones. A subset of ViT models is superior to all other models for all aspects of uncertainty estimation ("ViT" in the legend, marked as a red triangle facing upwards); the performance of EfficientNet-V2 and GENet models is worse. + +are well known and are often used for comparing the uncertainty estimation performance of models (Moon et al., 2020; Nado et al., 2021; Maddox et al., 2019; Lakshminarayanan et al., 2017). Somewhat surprisingly, NLL, Brier, AURC, and ECE all fail to reveal the uncertainty superiority of Model B in our investment example (see Appendix A for the calculations). Both AUROC and SAC, on the other hand, reveal the advantage of Model B perfectly (see Appendix A for details). It is not hard to construct counterexamples where these two metrics fails and others (e.g., ECE) succeed. To sum up this brief discussion, we believe that the ultimate suitability of a performance metric should be determined by its context. If there is no specific application in mind, there is a strong incentive to examine a variety of metrics, as we choose to do in this study. + +This study evaluates the ability of 523 models from the Torchvision and Timm repositories (Paszke et al., 2019; Wightman, 2019) to estimate uncertainty1. Our study identifies several major factors that affect confidence rankings, calibration, and selective prediction, and lead to numerous empirical contributions important to selective predictions and uncertainty estimation. While no new algorithm or method is introduced in our paper, our study generates many interesting conclusions that will help practitioners achieve more powerful uncertainty estimation. Moreover, the research questions that are uncovered by our empirical study shed light on uncertainty estimation, which may stimulate the development of new methods and techniques for improving uncertainty estimation. Among the most interesting conclusions our study elicits are: + +(1) Knowledge distillation training improves estimation. Training regimes incorporating any kind of knowledge distillation (KD) (Hinton et al., 2015) lead to DNNs with improved uncertainty estimation performance evaluated by any metric, more than by using any other training tricks (such as pretraining on a larger dataset, adversarial training, etc.). In Galil et al. (2023) we find similar performance boosts for class-out-of-distribution (C-OOD) detection. +(2) Certain architectures are more inclined to perform better or worse at uncertainty estimation. Some architectures seem more inclined to perform well on all aspects of uncertainty estimation, e.g., a subset of vision transformers (ViTs) (Dosovitskiy et al., 2021) and the zero-shot language-vision CLIP model (Radford et al., 2021), while other architectures tend to perform worse, e.g., EfficientNet-V2 and GENet (Tan & Le, 2021; Lin et al., 2020). These results are visualized in Figure 1. In Galil et al. (2023) we find that ViTs and CLIPs are also powerful C-OOD detectors. +(3) Several training regimes result in a subset of ViTs that outperforms all other architectures and training regimes. These regimes include the original one from the paper introducing ViTs (Dosovitskiy et al., 2021; Steiner et al., 2022; Chen et al., 2022; Ridnik et al., 2021). These ViTs + +achieve the best uncertainty estimation performance on any aspect measured, both in absolute terms and per-model size (# parameters, see Figures 9 and 10 in Appendix B). + +(4) Temperature scaling improves selective and ranking performance. The simple post-training calibration method of temperature scaling (Guo et al., 2017), which is known to improve ECE, for the most part also improves ranking (AUROC) and selective prediction—meaning not only does it calibrate the probabilistic estimation for each individual instance, but it also improves the partial order of all instances induced by those improved estimations, pushing instances more likely to be correct to have a higher confidence score than instances less likely to be correct (see Section 3). +(5) The correlations between AUROC, ECE, accuracy and the number of parameters are dependent on the architecture analyzed. Contrary to previous work by (Guo et al., 2017), we observe that while there is a strong correlation between accuracy/number of parameters and ECE or AUROC within each specific family of models of the same architecture, the correlation flips between a strong negative and a strong positive correlation depending on the type of architecture being observed. For example, as DLA (Yu et al., 2018) architectures increase in size and accuracy, their ECE deteriorates while their AUROC improves. The exact opposite, however, can be observed in XCiTs (Ali et al., 2021) as their ECE improves with size while their AUROC deteriorates (see Appendix L). +(6) The best model in terms of AUROC or SAC is not always the best in terms of calibration, as illustrated in Figure 1, and the trade-off should be considered when choosing a model based on its application. + +# 2 HOW TO EVALUATE DEEP UNCERTAINTY ESTIMATION PERFORMANCE + +Let $\mathcal{X}$ be the input space and $\mathcal{Y}$ be the label space. Let $P(\mathcal{X},\mathcal{Y})$ be an unknown distribution over $\mathcal{X}\times \mathcal{Y}$ . A model $f$ is a prediction function $f:\mathcal{X}\to \mathcal{Y}$ , and its predicted label for an image $x$ is denoted by $\hat{y}_f(x)$ . The model's true risk w.r.t. $P$ is $R(f|P) = E_{P(\mathcal{X},\mathcal{Y})}[\ell (f(x),y)]$ , where $\ell :\mathcal{Y}\times \mathcal{Y}\rightarrow \mathbb{R}^{+}$ is a given loss function, for example, 0/1 loss for classification. Given a labeled set $S_{m} = \{(x_{i},y_{i})\}_{i = 1}^{m}\subseteq (\mathcal{X}\times \mathcal{Y})$ , sampled i.i.d. from $P(\mathcal{X},\mathcal{Y})$ , the empirical risk of model $f$ is $\hat{r} (f|S_m)\triangleq \frac{1}{m}\sum_{i = 1}^{m}\ell (f(x_i),y_i)$ . Following Geifman et al. (2018), for a given model $f$ we define a confidence score function $\kappa (x,\hat{y} |f)$ , where $x\in \mathcal{X}$ , and $\hat{y}\in \mathcal{V}$ is the model's prediction for $x$ , as follows. The function $\kappa$ should quantify confidence in the prediction of $\hat{y}$ for the input $x$ , based on signals from model $f$ . This function should induce a partial order over instances in $\mathcal{X}$ . + +The most common and well-known $\kappa$ function for a classification model $f$ (with softmax at its last layer) is its softmax response values: $\kappa(x, \hat{y} | f) \triangleq f(x)_{\hat{y}}$ (Cordella et al., 1995; De Stefano et al., 2000). We chose to focus on studying uncertainty estimation performance using softmax response as the models' $\kappa$ function because of its extreme popularity, and its importance as a baseline due to its solid performance compared to other methods (Geifman & El-Yaniv, 2017; Geifman et al., 2018). While this is the main $\kappa$ we evaluate, we also test the popular uncertainty estimation technique of Monte Carlo dropout (MC dropout) (Gal & Ghahramani, 2016), which is motivated by Bayesian reasoning. Although these methods use the direct output from $f$ , $\kappa$ could be a different model unrelated to $f$ and unable to affect $f$ 's predictions. Note that to enable a probabilistic interpretation, $\kappa$ can only be calibrated if its values reside in $[0, 1]$ whereas for ranking and selective prediction any value in $\mathbb{R}$ can be used. + +A selective model $f$ (El-Yaniv & Wiener, 2010; Chow, 1957) uses a selection function $g: \mathcal{X} \to \{0,1\}$ to serve as a binary selector for $f$ , enabling it to abstain from giving predictions for certain inputs. $g$ can be defined by a threshold $\theta$ on the values of a $\kappa$ function such that $g_{\theta}(x|\kappa, f) = \mathbb{1}[\kappa(x, \hat{y}_f(x)|f) > \theta]$ . The performance of a selective model is measured using coverage and risk, where coverage, defined as $\phi(f, g) = E_P[g(x)]$ , is the probability mass of the non-rejected instances in $\mathcal{X}$ . The selective risk of the selective model $(f, g)$ is defined as $R(f, g) \triangleq \frac{E_P[\ell(f(x), g(x))]}{\phi(f, g)}$ . These quantities can be evaluated empirically over a finite labeled set $S_m$ , with the empirical coverage defined as $\hat{\phi}(f, g|S_m) = \frac{1}{m} \sum_{i=1}^{m} g(x_i)$ , and the empirical selective risk defined as $\hat{r}(f, g|S_m) \triangleq \frac{1}{m} \sum_{i=1}^{m} \frac{\ell(f(x_i), y_i) g(x_i)}{\hat{\phi}(f, g|S_m)}$ . Similarly, SAC is defined as the largest coverage available for a specific accuracy constraint. A way to visually inspect the behavior of a $\kappa$ function for selective prediction can be done using the risk-coverage (RC) curve (El-Yaniv & Wiener, 2010)—a curve showing the selective risk as a function of coverage, measured on some chosen test set; see Figure 2 for an + +![](images/3ed74c4b0f126392266bf292dc08034cc04d6d2875f51211b042029c66bc05df.jpg) +Figure 2: The RC curve made by a ResNet18 trained on CIFAR-10, measured on the test set. The risk is calculated using a 0/1 loss (meaning the model has about $95\%$ accuracy for 1.0 coverage); the $\kappa$ used was softmax-response. The value of the risk at each point of coverage corresponds to the selective risk of the model when rejecting inputs that are not covered at that coverage slice. e.g., the selective risk for coverage 0.8 is about $0.5\%$ , meaning that an end user setting a matching threshold would enjoy a model accuracy of $99.5\%$ on the $80\%$ of images the model would not reject. + +example. In general, though, two RC curves are not necessarily comparable if one does not fully dominate the other (Figure 3 shows an example of lack of dominance). + +The AURC and E-AURC metrics were defined by (Geifman et al., 2018) for quantifying the selective quality of $\kappa$ functions via a single number, with AURC being defined as the area under the RC curve. AURC, however, is very sensitive to the model's accuracy, and in an attempt to mitigate this, E-AURC was suggested. The latter also suffers from sensitivity to accuracy, as we demonstrate in Appendix C. The advantage of scalar metrics such as the above is that they summarize the model's overall uncertainty estimation behavior by reducing it to a single scalar. When not carefully chosen, however, these reductions could result in a loss of vital information about the problem (recall the investment example from Section 1, which is also discussed in Appendix A: reducing an RC curve to an AURC does not show that Model B has an optimal 0 risk if the coverage is smaller than 0.4). Thus, the choice of the "correct" single scalar performance metric unfortunately must be task-specific. When comparing the uncertainty estimation performance of deep architectures that exhibit different accuracies, we find that AUROC and SAC can effectively "normalize" accuracy differences that plague the usefulness of other metrics (see Figure 3). This normalization is essential in our study where we compare uncertainty performance of hundreds of models that can greatly differ in their accuracies. + +For risk-sensitive deployment, let us consider the two models in Figure 3; EfficientNet-V2-XL (Tan & Le, 2021) and ViT-B/32-SAM (Chen et al., 2022). While the former model has better overall accuracy and AURC (metrics that could lead us to believe the model is best for our needs), it cannot guarantee a Top-1 ImageNet selective accuracy above $95\%$ for any coverage. ViT-B/32-SAM, on the other hand, can provide accuracies above $95\%$ for all coverages below $50\%$ . + +In applications where risk (or coverage) constraints are dictated (Geifman & El-Yaniv, 2017), the most straightforward and natural metric is SAC (or selective risk), which directly measures the coverage (resp., risk) given at the required level of risk (resp., coverage) constraint. We demonstrate this in Appendix I, evaluating which models give the most coverage for an ambitious SAC of $99\%$ . If instead a specific range of coverages is specified, we could measure the area under the RC curve for those coverages: $\mathrm{AURC}_{\mathcal{C}}(\kappa, f|S_m) = \frac{1}{|\mathcal{C}|}\sum_{c\in \mathcal{C}}\hat{r} (f,g_c|S_m)$ , with $\mathcal{C}$ being those required coverages. + +Often, these requirements are not known or can change as a result of changing circumstances or individual needs. Also, using metrics sensitive to accuracy such as AURC makes designing architectures and methods to improve $\kappa$ very hard, since an improvement in these metrics could be attributed to either an increase in overall accuracy (if such occurred) or to a real improvement in the model's ranking performance. Lastly, some tasks might not allow the model to abstain from making predictions at all, but instead require interpretable and well-calibrated probabilities of correctness, which could be measured using ECE. + +![](images/314ef4f65139873f967cdbaaab5f81a96139c78a6a5ff353109de204f28481d9.jpg) +Figure 3: A comparison of RC curves made by the best (ViT-L/16-384) and worst (EfficientNet-V2-XL) models we evaluated in terms of AUROC. Comparing ViT-B/32-SAM to EfficientNet-V2 exemplifies the fact that neither accuracy nor AURC reflect selective performance well enough. + +# 2.1 MEASURING RANKING AND CALIBRATION + +A $\kappa$ function is not necessarily able to change the model's predictions. Therefore, it can improve the selective risk by ranking correct and incorrect predictions better, inducing a more accurate partial order over instances in $\mathcal{X}$ . Thus, for every two random samples $(x_{1},y_{1}),(x_{2},y_{2})\sim P(\mathcal{X},\mathcal{Y})$ and given that $\ell (f(x_1),y_1) > \ell (f(x_2),y_2)$ , the ranking performance of $\kappa$ is defined as the probability that $\kappa$ ranks $x_{2}$ higher than $x_{1}$ : + +$$ +\Pr \left[ \kappa \left(x _ {1}, \hat {y} \mid f\right) < \kappa \left(x _ {2}, \hat {y} \mid f\right) \mid \ell \left(f \left(x _ {1}\right), y _ {1}\right) > \ell \left(f \left(x _ {2}\right), y _ {2}\right) \right] \tag {1} +$$ + +We discuss this definition in greater detail in Appendix D. The AUROC metric is often used in the field of machine learning. When the $0/1$ loss is in play, it is known that AUROC in fact equals the probability in Equation (1) (Fawcett, 2006) and thus is a proper metric to measure ranking in classification (AKA discrimination). AUROC is furthermore equivalent to Goodman and Kruskal's $\gamma$ -correlation (Goodman & Kruskal, 1954), which for decades has been extensively used to measure ranking (known as "resolution") in the field of metacognition (Nelson, 1984). The precise relationship between $\gamma$ -correlation and AUROC is $\gamma = 2 \cdot \text{AUROC} - 1$ (Higham & Higham, 2018). We note also that both the $\gamma$ -correlation and AUROC are nearly identical or closely related to various other correlations and metrics; $\gamma$ -correlation (AUROC) becomes identical to Kendall's $\tau$ (up to a linear transformation) in the absence of tied values. Both metrics are also closely related to rank-biserial correlation, the Gini coefficient (not to be confused with the measure from economics) and the Mann-Whitney $U$ test, hinting at their importance and usefulness in a variety of fields and settings. In Appendix E, we briefly compare the ranking performance of deep neural networks and humans in metacognitive research, and in Appendix F we address criticism of using AUROC to measure ranking. + +The most widely used metric for calibration is ECE (Naeini et al., 2015). For a finite test set of size $N$ , ECE is calculated by grouping all instances into $m$ interval bins (such that $m \ll N$ ), each of size $\frac{1}{m}$ (the confidence interval of bin $B_j$ is $\left(\frac{j-1}{m}, \frac{j}{m}\right]$ ). With $\mathrm{acc}(B_j)$ being the mean accuracy in bin $B_j$ and $\mathrm{conf}(B_j)$ being its mean confidence, ECE is defined as + +$$ +\begin{array}{l} E C E = \sum_ {j = 1} ^ {m} \frac {| B _ {j} |}{N} \sum_ {i \in B _ {j}} \left| \frac {\mathbf {1} [ \hat {y} _ {f} (x _ {i}) = y _ {i} ]}{| B _ {j} |} - \frac {\kappa (x , \hat {y} _ {f} (x _ {i}) | f)}{| B _ {j} |} \right| \\ = \sum_ {j = 1} ^ {m} \frac {\left| B _ {j} \right|}{N} \left| \operatorname {a c c} \left(B _ {j}\right) - \operatorname {c o n f} \left(B _ {j}\right) \right| \\ \end{array} +$$ + +Since ECE is widely accepted we use it here to evaluate calibration, and follow (Guo et al., 2017) in setting the number of bins to $m = 15$ . Many alternatives to ECE exist, allowing an adaptive binning scheme, evaluating the calibration on the non-chosen labels as well, and other various methods (Nixon et al., 2019; Vaicenavicius et al., 2019; Zhao et al., 2020). Relevant to our objective is that by using binning, this metric is not affected by the overall accuracy as is the Brier score (mentioned in Section 1), for example. + +# 3 PERFORMANCE ANALYSIS + +![](images/e16ae8fc794f6b8b3e22af3848104c33e5b6c16d8f8eeae744aa372c7997e6e9.jpg) + +![](images/1fcc5c597d87ea2c03dd757c10dd6ecb06163e6450ec487a274a811e9adb6b18.jpg) +(a) +(b) +Figure 4: A comparison of different methods and their improvement in terms of (a) AUROC and (b) ECE, relative to the same model's performance without employing the method. Markers above the x-axis represent models that benefited from the evaluated method, and vice versa. The numbers in the legend to the right of each method indicate the number of pairs compared. Temperature scaling can sometimes harm ECE, even though its purpose is to improve it. + +In this section we study the performance of 523 different models (available in timm 0.4.12 and torchvision 0.10). Note that all figures from our analysis are available as interactive plotly plots in the supplementary material, which provides information about every data point. + +1) Among the training regimes evaluated, knowledge distillation improves performance the most. We evaluated several training regimes: (a) Training that involves KD in any form, including Touvron et al. (2021b), knapsack pruning with distillation (in which the teacher is the original unpruned model) (Aflalo et al., 2020) and a pretraining technique that employs distillation (Ridnik et al., 2021); (b) adversarial training (Xie et al., 2020a; Tramère et al., 2018); (c) pretraining on ImageNet21k ("pure", with no additions) (Tan & Le, 2021; Touvron et al., 2021a; 2022); and (d) various forms of weakly or semi-supervised learning (Mahajan et al., 2018; Yalniz et al., 2019; Xie et al., 2020b). To make a fair comparison, we only compare pairs of models such that both models have identical architectures and training regimes, with the exception of the method itself being evaluated (e.g., training with or without knowledge distillation). More information about each data point of comparison is available in the supplementary material. Note that the samples are of various sizes due to the different number of potential models available for each. + +Of the methods mentioned above, training methods incorporating distillation improve AUROC and ECE the most. For example, looking at Figure 4a, it is evident that distillation (purple box) almost + +![](images/e134eba07fd3104387ff4199406165c294cd546e1215ccc3c7efaa9c54c30250.jpg) +Figure 5: Comparing teacher models (yellow markers) to their KD students (represented by markers with thick borders and a dot). The performance of each model is measured in AUROC (higher is better) and -log(ECE) (higher is better). + +always improves AUROC, and moreover, its median improvement is the best of all techniques evaluated. The same observation can be made with regards to improving ECE; see Figure 4b. Distillation seems to greatly improve both metrics even when the teacher itself is much worse at both metrics. Figure 5 nicely shows this by comparing the teacher architecture and the students in each case. Additionally, in a pruning scenario that included distillation in which the original model was also the teacher (Aflalo et al., 2020), the pruned models outperformed their teachers. The fact that KD improves the model over its original form is surprising, and suggests that the distillation process itself helps uncertainty estimation. In Galil et al. (2023) we find that KD also improves C-OOD detection performance, measured by AUROC. We discuss these effects in greater detail in Appendix G. + +2) Temperature scaling greatly benefits AUROC and selective prediction. Evaluations of the simple post-training calibration method of temperature scaling (TS) (Guo et al., 2017), which is widely known to improve ECE without changing the model's accuracy, also revealed several interesting facts: (a) TS consistently and greatly improves AUROC and selective performance (see Figure 4a)—meaning not only does TS calibrate the probabilistic estimation for each individual instance, but it also improves the partial order of all instances induced by those improved estimations. While TS is well known and used for calibration, to the best of our knowledge, its benefits for selective prediction were previously unknown. (b) While TS is usually beneficial, it could harm some models (see Figures 4a and 4b). While it is surprising that TS—a calibration method—would harm ECE, this phenomenon is explained by the fact that TS optimizes NLL and not ECE (to avoid trivial solutions), and the two may sometimes misalign. (c) Models that benefit from TS in terms + +![](images/442e9c2100ba0d0ccdbb6ae60acbe3de55ee92187c02b65bf18b506a5f49b3ae.jpg) +Figure 6: Out of 523 models evaluated, models that were assigned a temperature higher than 1 by the calibration process tended to degrade in AUROC performance rather than improve. Markers above the x-axis represent models that benefited from TS, and vice versa. + +of AUROC tend to have been assigned a temperature smaller than 1 by the calibration process (see Figure 6). This, however, does not hold true for ECE (see Figure 14 in Appendix H). This example + +also emphasizes the fact that improvements in terms of AUROC do not necessarily translate into improvements in ECE, and vice versa. (d) While all models usually improve with TS, the overall ranking of uncertainty performance between families tends to stay similar, with the worse (in terms of ECE and AUROC) models closing most of the gap between them and the mediocre ones (see Figure 13 in Appendix H). + +3) A subset of ViTs outperforms all other architectures in selective prediction, ranking and calibration, both in absolute terms and per-model size. Several training regimes (including the original regime from the paper introducing ViT) Dosovitskiy et al. (2021); Steiner et al. (2022); Chen et al. (2022); Ridnik et al. (2021) result in ViTs that outperform all other architectures and training regimes in terms of AUROC and ECE (see Figure 1; Figure 13 in Appendix H shows this is true even after using TS) as well as for the SAC of $99\%$ we explored (see Figure 7 and Appendix I). These ViTs also outperform all other models in terms of C-OOD detection performance (Galil et al., 2023). Moreover, for any size, ViT models outperform their competition in all of these metrics (see Figures 9 and 10 in Appendix B and Figure 15 in Appendix I). + +![](images/37fbae763d641ce784deabef302abec247b021eabe92d823ac12a20aa1e46db4.jpg) +Figure 7: Comparison of models by their overall accuracy and the coverage they are able to provide given a selective accuracy constraint of Top-1 $99\%$ on ImageNet. A higher coverage is better. Only ViT models are able to provide coverage beyond $30\%$ for this constraint. They provide more coverage than any other model compared to their accuracy or size. "Various" refers to all other models (out of the 523) that were not mentioned by name. + +Further research into other training regimes, however, reveals that not all training regimes result in superb performance (Touvron et al., 2021b; 2022; Singh et al., 2022; Paszke et al., 2019) (these ViTs are dubbed "ViT* in the figures), even when a similar amount of data is introduced into the training and strong augmentations are used. In fact, the models trained by Chen et al. (2022) were not pretrained at all and yet reach superb ranking. Even the largest model introduced by Tran et al. (2022), which is a large modified ViT that was pretrained on JFT-4B (a dataset containing 4 billion images) with the aim of excelling in uncertainty estimation (and other areas), is outperformed by the best ViT we evaluated: Plex L achieves an AUROC of 87.7 (while its smaller versions, Plex M and Plex S, achieve an AUROC of 87.4 and 86.7, respectively), compared to 88.5 achieved by ViT-L/16-384 that has less parameters and was pretrained on ImageNet-21k. In total, 18 ViTs trained on ImageNet-21k outperform² Plex L, among which are two variations of small ViTs (each with 36 or 22 million parameters). In Appendix J we analyze the different hyperparameters and augmentations used for training the ViT models evaluated in this paper. Unfortunately, no clear conclusions emerge to explain the advantage of the successful training regimes. There is, however, ample evidence to show that advanced augmentations are unlikely to be part of such an explanation. + +The above facts suggest that the excellent performance exhibited by some ViTs cannot be attributed to the amount of data or to the augmentations used during training. These observations warrant + +additional research with the hope of either training more robust ViTs or transferring the unidentified ingredient of the successful subset of ViTs into other models. + +4) Correlations between AUROC, ECE, accuracy and the model's size could either be positive or negative, and depend on the family of architectures evaluated. This observation contradicts previous smaller scale studies on calibration. While AUROC and ECE are (negatively) correlated (they have a Spearman correlation of -0.44, meaning that generally, as AUROC improves, so does ECE), their agreement on the best performing model depends greatly on the architectural family in question. For example, the Spearman correlation between the two metrics evaluated on 28 undistilled XCiTs is 0.76 (meaning ECE deteriorates as AUROC improves), while for the 33 ResNets (He et al., 2016) evaluated, the correlation is -0.74. Another general observation is that contrary to previous work by (Guo et al., 2017) concerning ECE, the correlations between ECE and the accuracy or the number of model parameters are nearly zero, although each family tends to have a strong correlation, either negative or positive. We include a family-based comparison in Appendix L for correlations between AUROC/ECE and accuracy, number of parameters and input size. These results suggest that while some architectures might utilize extra resources to achieve improved uncertainty estimation capabilities, other architectures do not and are even harmed in this respect. + +5) The zero-shot language-vision CLIP model is well-calibrated, with its best instance outperforming $96\%$ of all other models. CLIP (Radford et al., 2021) enables zero-shot classification and demonstrates impressive performance. We find it is also inclined to be well-calibrated. See Appendix K for details about how we use CLIP. The most calibrated CLIP is based on ViT-B/32 with a linear-probe added to it, and obtains an ECE of $1.3\%$ , which outperforms $96\%$ of models evaluated. Moreover, for their size category, CLIP models tend to outperform their competition in calibration, with the exception of ViTs (see Figure 10 in Appendix B). While this trend is clear for zero-shot CLIPs, we note that some models' calibration performance deteriorates with the addition of a linear-probe. Further research is required to understand the ingredients of multimodal models' contribution to calibration, and to find ways to utilize them to get better calibrated models. For example, could a multimodal pretraining regime be used to get better calibrated models? + +6) MC dropout does not improve selective performance, in accordance with previous works. We evaluate the performance of MC dropout using predictive entropy as its confidence score and 30 dropout-enabled forward passes. We do not measure its effects on ECE since entropy scores do not reside in [0, 1]. Using MC dropout causes a consistent drop in both AUROC and selective performance compared with using the same models with softmax as the $\kappa$ (see Appendix M and Figure 4a). MC dropout's underperformance was also previously observed in (Geifman & El-Yaniv, 2017). We note, however, that evaluations we have conducted in Galil et al. (2023) show that MC dropout performs well when dealing with C-OOD data. + +# 4 CONCLUDING REMARKS + +We presented a comprehensive study of the effectiveness of numerous DNN architectures (families) in providing reliable uncertainty estimation, including the impact of various techniques on improving such capabilities. Our study led to many new insights and perhaps the most important ones are: (1) architectures trained with distillation almost always improve their uncertainty estimation performance, (2) temperature scaling is very useful not only for calibration but also for ranking and selective prediction, and (3) no DNN (evaluated in this study) demonstrated an uncertainty estimation performance comparable—in any metric tested—to a subset of ViT models (see Section 3). + +Our work leaves open many interesting avenues for future research and we would like to mention a few. Perhaps the most interesting question is why distillation is so beneficial in boosting uncertainty estimation. Next, is there an architectural secret in vision transformers (ViT) that enables their uncertainty estimation supremacy under certain training regimes? This issue is especially puzzling given the fact that comparable performance is not observed in many other supposedly similar transformer-based models that we tested. If the secret is not in the architecture, what is the mysterious ingredient of the subset of training regimes that produces such superb results, and how can it be used to train other models? Finally, can we create specialized training regimes (e.g., Geifman & El-Yaniv (2019)), specialized augmentations, special pretraining regimes (such as CLIP's multimodal training regime) or even specialized neural architecture search (NAS) strategies that can promote superior uncertainty estimation performance? + +# ACKNOWLEDGMENTS + +This research was partially supported by the Israel Science Foundation, grant No. 710/18. +We thank Prof. Rakefet Ackerman for her help with understanding how uncertainty estimation performance is evaluated for humans in the field of metacognition, and for her useful comments for Appendix E. + +# REFERENCES + +Rakefet Ackerman, Avi Parush, Fareda Nassar, and Avraham Shtub. Metacognition and system usability: Incorporating metacognitive research paradigm into usability testing. Computers in Human Behavior, 54:101-113, January 2016. doi: 10.1016/j.chb.2015.07.041. URL https://doi.org/10.1016/j.chb.2015.07.041. +Rakefet Ackerman, Avigdor Gal, Tomer Sagi, and Roee Shraga. A cognitive model of human bias in matching. In PRICAI 2019: Trends in Artificial Intelligence, pp. 632-646. Springer International Publishing, 2019. doi: 10.1007/978-3-030-29908-8_50. URL https://doi.org/10.1007/978-3-030-29908-8_50. +Yonathan Aflalo, Asaf Noy, Ming Lin, Itamar Friedman, and Lihi Zelnik-Manor. Knapsack pruning with inner distillation. CoRR, abs/2002.08258, 2020. URL https://arxiv.org/abs/2002.08258. +Alaaeldin Ali, Hugo Touvron, Mathilde Caron, Piotr Bojanowski, Matthijs Douze, Armand Joulin, Ivan Laptev, Natalia Neverova, Gabriel Synnaeve, Jakob Verbeek, and Hervé Jégou. Xcit: Cross-covariance image transformers. In Marc' Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan (eds.), Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pp. 20014-20027, 2021. URL https://proceedings.neurips.cc/paper/2021/bitize/a655fbe4b8d7439994aa37ddad80de56-AAbstract.html. +Alexandra Basile, Maggie E. Toplak, and Brendan F. Andrade. Using metacognitive methods to examine emotion recognition in children with ADHD. Journal of Attention Disorders, 25(2): 245-257, November 2018. doi: 10.1177/1087054718808602. URL https://doi.org/10.1177/1087054718808602. +Glenn W. Brier. Verification of Forecasts Expressed in Terms of Probability. Monthly Weather Review, 78(1):1, January 1950. doi: 10.1175/1520-0493(1950)0780001:VOFEIT2.0.CO;2. +Xiangning Chen, Cho-Jui Hsieh, and Boqing Gong. When vision transformers outperform resnets without pre-training or strong data augmentations. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net, 2022. URL https://openreview.net/forum?id=LtKcMgGOeLt. +C. K. Chow. An optimum character recognition system using decision functions. IRE Transactions on Electronic Computers, EC-6(4):247-254, 1957. doi: 10.1109/TEC.1957.5222035. +L. P. Cordella, C. De Stefano, F. Tortorella, and M. Vento. A method for improving classification reliability of multilayer perceptrons. IEEE Transactions on Neural Networks, 6(5):1140-1147, 1995. doi: 10.1109/72.410358. +C. De Stefano, C. Sansone, and M. Vento. To reject or not to reject: that is the question-an answer in case of neural classifiers. IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews), 30(1):84-94, 2000. doi: 10.1109/5326.827457. +Yukun Ding, Jinglan Liu, Jinjun Xiong, and Yiyu Shi. Evaluation of neural network uncertainty estimation with application to resource-constrained platforms. CoRR, abs/1903.02050, 2019. URL http://arxiv.org/abs/1903.02050. +Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=YicbFdNTTy. + +Ran El-Yaniv and Yair Wiener. On the foundations of noise-free selective classification. Journal of Machine Learning Research, 11(5), 2010. +Tom Fawcett. An introduction to roc analysis. Pattern Recognition Letters, 27(8):861-874, 2006. ISSN 0167-8655. doi: https://doi.org/10.1016/j.patrec.2005.10.010. URL https://www.sciencedirect.com/science/article/pii/S016786550500303X. ROC Analysis in Pattern Recognition. +K. Fiedler, Rakefet Ackerman, and Chiara Scarampi. ! metacognition : Monitoring and controlling one ’ s own knowledge , reasoning and decisions. 2019. +Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=6Tm1mposlrM. +Jonathan Frankle and Michael Carbin. The lottery ticket hypothesis: Training pruned neural networks. CoRR, abs/1803.03635, 2018. URL http://arxiv.org/abs/1803.03635. +Yarin Gal and Zoubin Ghahramani. Dropout as a bayesian approximation: Representing model uncertainty in deep learning. In Maria-Florina Balcan and Kilian Q. Weinberger (eds.), Proceedings of the 33nd International Conference on Machine Learning, ICML 2016, New York City, NY, USA, June 19-24, 2016, volume 48 of JMLR Workshop and Conference Proceedings, pp. 1050-1059. JMLR.org, 2016. URL http://proceedings.mlr.press/v48/gal16.html. +Ido Galil, Mohammed Dabbah, and Ran El-Yaniv. A framework for benchmarking class-out-of-distribution detection and its application to imagenet. In International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=Iuubb9W6Jtk. +Yonatan Geifman and Ran El-Yaniv. Selective classification for deep neural networks. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 4878-4887, 2017. URL https://proceedings.neurips.cc/paper/2017/hash/4a8423d5e91fda00bb7e46540e2b0cf1-Abstract.html. +Yonatan Geifman and Ran El-Yaniv. Selectivenet: A deep neural network with an integrated reject option. CoRR, abs/1901.09192, 2019. URL http://arxiv.org/abs/1901.09192. +Yonatan Geifman, Guy Uziel, and Ran El-Yaniv. Bias-reduced uncertainty estimation for deep neural classifiers. In International Conference on Learning Representations, 2018. +Leo A. Goodman and William H. Kruskal. Measures of association for cross classifications. Journal of the American Statistical Association, 49(268):732-764, December 1954. doi: 10.1080/01621459.1954.10501231. URL https://doi.org/10.1080/01621459.1954.10501231. +Thomas D. Griffin, Jennifer Wiley, and Keith W. Thiede. The effects of comprehension-test expectancies on metacomprehension accuracy. Journal of Experimental Psychology: Learning, Memory, and Cognition, 45(6):1066-1092, June 2019. doi: 10.1037/xlm0000634. URL https://doi.org/10.1037/xlm0000634. +Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 1321-1330. PMLR, 2017. URL http://proceedings.mlr.press/v70/guo17a.html. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 770-778. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.90. URL https://doi.org/10.1109/CVPR.2016.90. +Philip A. Higham and D. Paul Higham. New improved gamma: Enhancing the accuracy of goodman-kruskal's gamma using ROC curves. Behavior Research Methods, 51(1):108-125, September 2018. doi: 10.3758/s13428-018-1125-5. URL https://doi.org/10.3758/s13428-018-1125-5. + +Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015. +Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, and Hartwig Adam. Searching for mobilenetv3. CoRR, abs/1905.02244, 2019. URL http://arxiv.org/abs/1905.02244. +Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6402-6413, 2017. URL https://proceedings.neurips.cc/paper/2017混沌/9ef2ed4b7fd2c810847ffa5fa85bce38-AAbstract.html. +Ming Lin, Hesen Chen, Xiuyu Sun, Qi Qian, Hao Li, and Rong Jin. Neural architecture design forgpu-efficient networks, 2020. +Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7. +Wesley J. Maddox, Pavel Izmailov, Timur Garipov, Dmitry P. Vetrov, and Andrew Gordon Wilson. A simple baseline for bayesian uncertainty in deep learning. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 13132-13143, 2019. URL https://proceedings.neurips.cc/paper/2019/bitnet/118921efba23fc329e6560b27861f0c2-Abstract.html. +Dhruv Mahajan, Ross B. Girshick, Vignesh Ramanathan, Kaiming He, Manohar Paluri, Yixuan Li, Ashwin Bharambe, and Laurens van der Maaten. Exploring the limits of weakly supervised pretraining. In Vittorio Ferrari, Martial Hebert, Cristian Sminchisescu, and Yair Weiss (eds.), Computer Vision - ECCV 2018 - 15th European Conference, Munich, Germany, September 8-14, 2018, Proceedings, Part II, volume 11206 of Lecture Notes in Computer Science, pp. 185-201. Springer, 2018. doi: 10.1007/978-3-030-01216-8\_.12. URL https://doi.org/10.1007/978-3-030-01216-8_12. +Jooyoung Moon, Jihyo Kim, Younghak Shin, and Sangheum Hwang. Confidence-aware learning for deep neural networks. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 7034-7044. PMLR, 2020. URL http://proceedings.mlr.press/v119/moon20a.html. +Zachary Nado, Neil Band, Mark Collier, Josip Djolonga, Michael W. Dusenberry, Sebastian Farquhar, Angelos Filos, Marton Havasi, Rodolphe Jenatton, Ghassen Jerfel, Jeremiah Liu, Zelda Mariet, Jeremy Nixon, Shreyas Padhy, Jie Ren, Tim G. J. Rudner, Yeming Wen, Florian Wenzel, Kevin Murphy, D. Sculley, Balaji Lakshminarayanan, Jasper Snoek, Yarin Gal, and Dustin Tran. Uncertainty baselines: Benchmarks for uncertainty & robustness in deep learning. CoRR, abs/2106.04015, 2021. URL https://arxiv.org/abs/2106.04015. +Mahdi Pakdaman Naeini, Gregory F. Cooper, and Milos Hauskrecht. Obtaining well calibrated probabilities using bayesian binning. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, AAAI'15, pp. 2901-2907. AAAI Press, 2015. ISBN 0262511290. +Niv Nayman, Yonathan Aflalo, Asaf Noy, and Lihi Zelnik. *Hard constrained differentiable neural architecture search*. In Marina Meila and Tong Zhang (eds.), *Proceedings of the 38th International Conference on Machine Learning*, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of *Proceedings of Machine Learning Research*, pp. 7979-7990. PMLR, 2021. URL http://proceedings.mlr.press/v139/nayman21a.html. +Thomas O. Nelson. A comparison of current measures of the accuracy of feeling-of-knowing predictions. *Psychological Bulletin*, 95(1):109-133, 1984. doi: 10.1037/0033-2909.95.1.109. URL https://doi.org/10.1037/0033-2909.95.1.109. + +Jeremy Nixon, Michael W. Dusenberry, Linchuan Zhang, Ghassen Jerfel, and Dustin Tran. Measuring calibration in deep learning. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, CVPR Workshops 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 38-41. Computer Vision Foundation / IEEE, 2019. URL http://openaccess.thecvf.com/content_CVPRW_2019/html/Uncertainty_and_Robustness_in_Dep_Visual_Learning/Nixon_Measuring_Calibration_in_Dep_Learning_CVPRW_2019_paper.html. +Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems 32, pp. 8024-8035. Curran Associates, Inc., 2019. URL http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8748-8763. PMLR, 2021. URL http://proceedings.mlr.press/v139/radford21a.html. +Tal Ridnik, Emanuel Ben Baruch, Asaf Noy, and Lihi Zelnik. Imagenet-21k pretraining for the masses. In Joaquin Vanschoren and Sai-Kit Yeung (eds.), Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual, 2021. URL https://datasets-benchmarks-proceedings.neurips.cc/paper/2021混沌/98f13708210194c475687be6106a3b84-AAbstract-round1.html. +Mark Sandler, Andrew G. Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh Chen. *Mobilenetv2: Inverted residuals and linear bottlenecks*. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pp. 4510-4520. Computer Vision Foundation / IEEE Computer Society, 2018. doi: 10.1109/CVPR.2018.00474. URL http://openaccess.thecvf.com/content_cvpr_2018/html/Sandler_MobileNetV2_Inverted_Residuals_CVPR_2018_paper.html. +Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. URL http://arxiv.org/abs/1409.1556. +Mannat Singh, Laura Gustafson, Aaron Adcock, Vinicius de Freitas Reis, Bugra Gedik, Raj Prateek Kosaraju, Dhruv Mahajan, Ross B. Girshick, Piotr Dolkar, and Laurens van der Maaten. Revisiting weakly supervised pre-training of visual perception models. CoRR, abs/2201.08371, 2022. URL https://arxiv.org/abs/2201.08371. +Andreas Peter Steiner, Alexander Kolesnikov, Xiaohua Zhai, Ross Wightman, Jakob Uszkoreit, and Lucas Beyer. How to train your vit? data, augmentation, and regularization in vision transformers. Transactions on Machine Learning Research, 2022. URL https://openreview.net/forum?id=4nPswr1KcP. +Mingxing Tan and Quoc V. Le. Efficientnetv2: Smaller models and faster training. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10096-10106. PMLR, 2021. URL http://proceedings.mlr.press/v139/tan21a.html. +Hugo Touvron, Piotr Bojanowski, Mathilde Caron, Matthieu Cord, Alaaeldin El-Nouby, Edouard Grave, Armand Joulin, Gabriel Synnaeve, Jakob Verbeek, and Hervé Jégou. Resmlp: Feedforward networks for image classification with data-efficient training. CoRR, abs/2105.03404, 2021a. URL https://arxiv.org/abs/2105.03404. + +Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10347-10357. PMLR, 2021b. URL http://proceedings.mlr.press/v139/touvron21a.html. +Hugo Touvron, Matthieu Cord, and Herve Jégou. Deit III: revenge of the vit. CoRR, abs/2204.07118, 2022. doi: 10.48550/arXiv.2204.07118. URL https://doi.org/10.48550/arXiv.2204.07118. +Florian Tramèr, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble adversarial training: Attacks and defenses. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=rkZvSe-RZ. +Dustin Tran, Jeremiah Liu, Michael W. Dusenberry, Du Phan, Mark Collier, Jie Ren, Kehang Han, Zi Wang, Zelda Mariet, Huiyi Hu, Neil Band, Tim G. J. Rudner, Karan Singhal, Zachary Nado, Joost van Amersfoort, Andreas Kirsch, Rodolphe Jenatton, Nithum Thain, Honglin Yuan, Kelly Buchanan, Kevin Murphy, D. Sculley, Yarin Gal, Zoubin Ghahramani, Jasper Snoek, and Balaji Lakshminarayanan. Plex: Towards reliability using pretrained large model extensions. CoRR, abs/2207.07411, 2022. doi: 10.48550/arXiv.2207.07411. URL https://doi.org/10.48550/arXiv.2207.07411. +Monika Undorf and Arndt Broder. Cue integration in metamemory judgements is strategic. Quarterly Journal of Experimental Psychology, 73(4):629-642, October 2019. doi: 10.1177/1747021819882308. URL https://doi.org/10.1177/1747021819882308. +Juozas Vaicenavicius, David Widmann, Carl R. Andersson, Fredrik Lindsten, Jacob Roll, and Thomas B. Schön. Evaluating model calibration in classification. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), The 22nd International Conference on Artificial Intelligence and Statistics, AISTATS 2019, 16-18 April 2019, Naha, Okinawa, Japan, volume 89 of Proceedings of Machine Learning Research, pp. 3459-3467. PMLR, 2019. URL http://proceedings.mlr.press/v89/vaicenavicius19a.html. +Ross Wightman. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019. +Cihang Xie, Mingxing Tan, Boqing Gong, Jiang Wang, Alan L. Yuille, and Quoc V. Le. Adversarial examples improve image recognition. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 816-825. Computer Vision Foundation / IEEE, 2020a. doi: 10.1109/CVPR42600.2020.00090. URL https://openaccess.thecvf.com/content_CVPR_2020/html/Xie_Adversarial_Examples_Implore_Photosynthetic_Vision_Example.pdf. +Qizhe Xie, Minh-Thang Luong, Eduard H. Hovy, and Quoc V. Le. Self-training with noisy student improves imagenet classification. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 10684-10695. Computer Vision Foundation / IEEE, 2020b. doi: 10.1109/CVPR42600.2020.01070. URL https://openaccess.thecvf.com/content_CVPR_2020/html/Xie_Self-Training_With_Noisy_Student_Approves_Photosynthetic_Vision_Paper.html. +I. Zeki Yalniz, Hervé Jégou, Kan Chen, Manohar Paluri, and Dhruv Mahajan. Billion-scale semi-supervised learning for image classification, 2019. +Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2403-2412, 2018. doi: 10.1109/CVPR.2018.00255. +Shengjia Zhao, Tengyu Ma, and Stefano Ermon. Individual calibration with randomized forecasting. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 11387-11397. PMLR, 2020. URL http://proceedings.mlr.press/v119/zhao20e.html. + +# A THE INVESTMENT EXAMPLE + +Let us consider two classification models for the stock market that predict whether a stock's value is about to increase, decrease or remain neutral (three-class classification). Suppose that Model A has a $95\%$ true accuracy, and generates a confidence score of 0.95 on any prediction (even on misclassified instances); Model B has a $40\%$ true accuracy, but always gives a confidence score of 0.6 on correct predictions, and 0.4 on incorrect ones. We now try and evaluate these two models using the uncertainty metrics mentioned in Section 1 to see which can reveal Model B's superior uncertainty estimation performance. AURC will fail due to its sensitivity to accuracy (the AURC of Model B is 0.12, more than twice as bad as the AURC for Model A, which is 0.05). NLL will rank Model A four times higher (Model A's NLL is 0.23 and Model B's is 0.93). The Brier score would also much prefer Model A (giving it a score of 0.096 while giving Model B a score of 0.54). Evaluating the models' calibration with ECE will also not reveal Model B's advantages, since it is less calibrated than Model A, which has perfect calibration (Model A has an ECE of 0, and Model B has a worse ECE of 0.4). + +AUROC, on the other hand, would give Model B a perfect score of 1 and a terrible score of 0.5 to Model A. The selective risk for Model B would be better for any coverage of stock predictions below $40\%$ , and for any SAC above $95\%$ the coverage for Model A would be 0, but 0.4 for Model B. + +Those two metrics are not perfect for any example. Let us instead compare two different models for the task of predicting the weather when we cannot abstain from making predictions. Accordingly, being required to provide an accurate probabilistic uncertainty estimation of the model's predictions, AUROC and selective risk would be meaningless (due to the model's inability to abstain in this task), but ECE or the Brier Score would better evaluate the performance the new task requires. + +# B RANKING AND CALIBRATION VISUAL COMPARISON + +A comparison of 523 models by their AUROC ( $\times 100$ , higher is better) and -log(ECE) (higher is better) on ImageNet is visualized in Figure 8. An interactive version of this figure is provided as supplementary material. To compare models fairly by their size, we plot two graphs with the logarithm of the number of parameters as the X-axis, so that models sharing the same x value can be compared solely based on their y value. In Figure 9 we set the X axis to be AUROC (higher is better), and see ViTs outperform any other architecture with a comparable amount of parameters by a large margin. We can also observe that using distillation creates a consistent improvement in AUROC. In Figure 10 we set the X axis to be the negative logarithm of ECE (higher is better) and observe a very similar trend, with ViT outperforming its competition for any model size. + +# C DEMONSTRATION OF E-AURC'S DEPENDENCE ON THE MODEL'S ACCURACY + +Excess-AURC (E-AURC) was suggested by Geifman et al. (2018) as an alternative to AURC (explained in Section 2). To calculate E-AURC, two AURC scores need to be calculated: (1) $AURC(model)$ , the AURC value of the actual model and (2) $AURC(model^{*})$ , the AURC value of a hypothetical model with identical predicted labels as the first model, but that outputs confidence values that induce a perfect partial order on the instances in terms of their correctness. The latter means that all incorrectly predicted instances are assigned confidence values lower than the correctly predicted instances. + +E-AURC is then defined as $AURC(model) - AURC(model^{*})$ . In essence, this metric acknowledges that given a model's accuracy, the area of $AURC(model^{*})$ is always unavoidable no matter how good the partial order is, but anything above that could have been minimized if the $\kappa$ function was better at ranking, assigning correct instances higher values than incorrect ones and inducing a better partial order over the instances. + +This metric indeed helps to reduce some of the sensitivity to accuracy suffered by AURC, and for the example presented in Section 1, E-AURC would have given a perfect score of 0 to the model inducing a perfect partial order by its confidence values (Model B). It is easy, however, to craft examples showing that E-AURC prefers models with higher accuracy, even if they have lower or equal capacity to rank. + +![](images/9d9ebd2771f1ac371bbf660f35fae88018b600af2f4c4ec467ea5a18bc2e73a0.jpg) +Figure 8: A comparison of 523 models by their AUROC ( $\times 100$ , higher is better) and log(ECE) (lower is better) on ImageNet. Each marker's size is determined by the model's number of parameters. Each dotted marker represents a distilled version of the original. An interactive version of this figure is provided as supplementary material. + +![](images/3d1c595130db629b8f46da807f5c489fd04b7ff94c241895b7b8b54048b0b313.jpg) +Figure 9: A comparison of 523 models by their AUROC ( $\times 100$ , higher is better) and log(number of model's parameters) on ImageNet. Each dotted marker represents a distilled version of the original. + +![](images/2540814b7d1ea2012307b39fcf8425b3d6ec27ea7291a6317dfa00cd7f9805d4.jpg) +Figure 10: A comparison of 523 models by their -log(ECE) (higher is better) and log(number of model's parameters) on ImageNet. Each dotted marker represents a distilled version of the original. + +To demonstrate this in a simple way, let us consider two models with a complete lack of capacity to rank correct and incorrect predictions correctly, always outputting the same confidence score. Model A has an accuracy of $20\%$ (thus an error rate of $80\%$ ), and Model B has an accuracy of $80\%$ (and an error rate of $20\%$ ). A good ranking metric should evaluate them equally (the same way E-AURC gives the same score for two models that rank perfectly regardless of their accuracy). In Figure 11 we plot their RC curves with dashed lines, which are both straight lines due to their lack of ranking ability. We can calculate both of these models' AURCs, $AURC(modelA) = 0.8$ , $AURC(modelB) = 0.2$ . + +The next thing to calculate is the best AURC values those models could have achieved given the same accuracy if they had a perfect partial order. We plot these hypothetical models' RC curves in Figure 11 as solid lines. Their selective risk remains 0 for every coverage below their total accuracy, since these hypothetical models assigned the highest confidence to all of their correct instances first. As the coverage increases and they have no more correct instances to select, they begin to give instances that are incorrect, and thus their selective risk deteriorates for higher coverages. + +![](images/d0fefebcf81f0e5a828b47b663d84eb7f06149d59c176e24af2536dc97cb3db3.jpg) +Figure 11: The RC curves for Models A and B are plotted with dashed lines. The RC curves for the hypothetically optimal versions of Models A and B are plotted with solid lines. + +Calculating both of these hypothetical models' AURCs gives us the following: $AURC(modelA^{*}) = 0.482$ , $AURC(modelB^{*}) = 0.022$ . Subtracting our results we get: E-AURC(modelA) = 0.8 - 0.482 = 0.318, E-AURC(modelB) = 0.2 - 0.022 = 0.178. Hence, E-AURC prefers Model B over Model A, even though both do not discriminate at all between incorrect and correct instances. + +# D MORE ON THE DEFINITION OF RANKING + +Let us consider a finite set $S_{m} = \{(x_{i},y_{i})\}_{i = 1}^{m}\sim P_{X,Y}$ . We assume that there are no two identical values given by $\kappa$ on $S_{m}$ . Such an assumption is reasonable when choosing a continuous confidence signal. + +We further denote $c$ as the number of concordant pairs (i.e., pairs in $S_{m}$ that satisfy the condition $[\kappa(x_{i}, \hat{y} | f) < \kappa(x_{j}, \hat{y} | f) \cap \ell(f(x_{i}), y_{i}) > \ell(f(x_{j}), y_{j})]$ ) and $d$ as the number of discordant pairs (i.e., pairs in $S_{m}$ that satisfy the condition $[\kappa(x_{i}, \hat{y} | f) > \kappa(x_{j}, \hat{y} | f) \cap \ell(f(x_{i}), y_{i}) > \ell(f(x_{j}), y_{j})]$ . + +We assume, for now, that there are no two identical values given by $\ell$ on $S_{m}$ . Accordingly, we can further develop Equation (1) from Section 2.1 using the definition of conditional probability, + +$$ +\Pr [ \kappa (x _ {i}, \hat {y} | f) < \kappa (x _ {j}, \hat {y} | f) | \ell (f (x _ {i}), y _ {i}) > \ell (f (x _ {j}), y _ {j}) ] = +$$ + +$$ +\frac {\mathbf {P r} [ \kappa (x _ {i} , \hat {y} | f) < \kappa (x _ {j} , \hat {y} | f) \cap \ell (f (x _ {i}) , y _ {i}) > \ell (f (x _ {j}) , y _ {j}) ]}{\mathbf {P r} [ \ell (f (x _ {i}) , y _ {i}) > \ell (f (x _ {j}) , y _ {j}) ]}, +$$ + +which can be approximated empirically, using the most likelihood estimator, as + +$$ +\frac {c}{\binom {m} {2}}. \tag {2} +$$ + +We note that the last equation is identical to Kendall's $\tau$ up to a linear transformation, which equals + +$$ +\begin{array}{l} \frac {c - d}{\binom {m} {2}} = \frac {c - d + c - c}{\binom {m} {2}} \\ = \frac {2 c - (c + d)}{\binom {m} {2}} = \frac {2 c}{\binom {m} {2}} - \frac {c + d}{\binom {m} {2}} = \\ \end{array} +$$ + +$$ +2 \cdot \frac {c}{\binom {m} {2}} - 1 = 2 \cdot [ \text {E q u a t i o n} 2 ] - 1. +$$ + +Otherwise, if the loss assigns two identical values to a pair of points in $S_{m}$ , but $\kappa$ does not, then we get: + +$$ +\frac {c}{c + d}. \tag {3} +$$ + +which is identical to Goodman & Kruskal's $\gamma$ -correlation up to a linear transformation + +$$ +\begin{array}{l} \frac {c - d}{c + d} = \frac {c - d + c - c}{c + d} = \frac {2 c - (c + d)}{c + d} = \\ \frac {2 c}{c + d} - \frac {c + d}{c + d} = 2 \cdot [ \text {E q u a t i o n} 3 ] - 1. \\ \end{array} +$$ + +# D.1 INEQUALITIES OF THE DEFINITION + +One might wonder why Equation (1) should have strict inequalities rather than non-strict ones to define ranking. As we discuss below, this would damage the definition: + +(1) If the losses had a non-strict inequality: + +$$ +\Pr [ \kappa (x _ {1}, \hat {y} | f) < \kappa (x _ {2}, \hat {y} | f) | \ell (f (x _ {1}), y _ {1}) \geq \ell (f (x _ {2}), y _ {2}) ] +$$ + +Consequently, in the case of classification, for example, this probability would increase for any pairs consisting of correct instances with different confidences. This would yield no benefit in ranking between incorrect and correct instances and motivates giving different confidence values for instances with the same loss—a fact that would not truly add any value. + +(2) If the $\kappa$ values had a non-strict inequality: + +$$ +\Pr [ \kappa (x _ {1}, \hat {y} | f) \leq \kappa (x _ {2}, \hat {y} | f) | \ell (f (x _ {1}), y _ {1}) > \ell (f (x _ {2}), y _ {2}) ]. +$$ + +This probability would increase for any pair $(x_{1}, x_{2})$ such that $\kappa(x_{1}, \hat{y} | f) = \kappa(x_{2}, \hat{y} | f)$ and $\ell(f(x_{1})) > \ell(f(x_{2}))$ , although $\kappa$ should have ranked $x_{1}$ with a lower value. Furthermore, if a $\kappa$ function were to assign the same confidence score to all $x \in \mathcal{X}$ , then when there are no two identical values of losses, the definition's probability would be 1; otherwise, the more different values for losses there are, the larger the probability would grow. In classification with a $0/1$ loss, for example, assigning the same confidence score to all instances would result in the probability being Accuracy $(f) \cdot (1 - Accuracy(f))$ , which is largest when Accuracy $(f) = 0.5$ . + +# E RANKING CAPACITY COMPARISON BETWEEN HUMANS AND NEURAL NETWORKS + +In the field of metacognition, interestingly, the predictive value of confidence is evaluated by two different aspects: by its ability to discriminate between correct and incorrect predictions (also known as resolution in metacognition or ranking in our context) and by its ability to give well-calibrated confidence estimations, not being over- or under-confident (Fiedler et al., 2019). These two aspects correspond perfectly with much of the research done in the deep learning field, with the nearly matching metric to AUROC of $\gamma$ -correlation (see Section 2). + +This allows us to compare how well humans rank predictions in various tasks versus how well models rank their own in others. Human AUROC measurements in various tasks (translated from + +$\gamma$ -correlation) tend to range from 0.6 to 0.75 (Undorf & Broder, 2019; Basile et al., 2018; Ackerman et al., 2016), but could vary, usually towards much lower values (Griffin et al., 2019). In our comprehensive evaluation on ImageNet, AUROC ranged from 0.77 to 0.88 (with the median value being 0.85), and in CIFAR-10 these measurements jump to the range of 0.92 to 0.94. + +While such comparisons between neural networks and humans are somewhat unfair due to the great sensitivity required for the task, research that directly compares humans and machine learning algorithms performing the same task exist. For example, in Ackerman et al. (2019), algorithms far surpass even the group of highest performing individuals in terms of ranking. + +# F CRITICISMS OF AUROC AS A RANKING METRIC + +In this section, addressing the criticism of AUROC as a ranking metric, we show why AUROC does not simply reward models for having lower accuracy. The paper by Ding et al. (2019) presented a semi-artificial experiment to demonstrate that AUROC might get larger the worse the model's accuracy becomes. They consider a model $f$ and its $\kappa$ function evaluated on a classification test set $\mathcal{X}$ , giving each a prediction $\hat{y}_f(x)$ and a confidence score $\kappa(x, \hat{y}_f(x)|f)$ , which in this case is the model's softmax response. Let $\mathcal{X}^c = \{x^c \in \mathcal{X} | \hat{y}_f(x^c) = y(x)\}$ be the set of all instances correctly predicted by the model $f$ , and define $x_{(i)}^c \in \mathcal{X}^c$ to be the correct instance that received the i-lowest confidence score from $\kappa$ . Their example continues and considers an artificial model $f^m$ to be an exact clone of $f$ with the following modification: for every $i \leq m$ , the model $f^m$ now predicts a different, incorrect label for $x_{(i)}^c$ ; however, its given confidence score remains identical: $\kappa(x_{(i)}^c, \hat{y}_f(x_{(i)}^c)|f) = \kappa(x_{(i)}^c, \hat{y}_{f^m}(x_{(i)}^c)|f^m)$ . $f^0$ is exactly identical to $f$ , by this definition, not changing any predictions. The paper shows how an artificially created model $f^m$ obtains a higher AUROC score the bigger its $m$ . This happens even though "nothing" changed but a hit to the model's accuracy performance (by making mistakes on more instances). + +First, to understand why this happens, let us consider $f^1$ : AUROC for $\kappa$ increases the more pairs of $[\kappa(x_1) < \kappa(x_2)|\hat{y}_f(x_1) \neq y(x_1), \hat{y}_f(x_2) = y(x_2)]$ there are. The model $f^1$ is now giving an incorrect classification to $x_{(1)}^c$ , but this instance's position in the partial order induced by $\kappa$ has remained the same (since $\kappa(x_{(1)}^c)$ is unchanged); therefore, $|\mathcal{X}^c| - 1$ correctly ranked pairs were added: $[\kappa(x_{(1)}^c) < \kappa(x_{(i)}^c)|\hat{y}_f(x_{(1)}^c) \neq y(x_{(1)}^c), \hat{y}_f(x_{(i)}^c) = y(x_{(i)}^c)]$ for every $1 < i \leq |\mathcal{X}^c|$ . Nevertheless, this does not guarantee an increase to AUROC by itself: if, previously, all pairs of (correct, incorrect) instances were ranked correctly by $\kappa$ , AUROC would already be 1.0 for $f^0$ and would not change for $f^1$ . If AUROC for $f^1$ is higher than it was for $f^0$ , this means there exists at least one instance $x^w$ that was incorrectly predicted by the original model $f^0$ such that $\kappa(x_{(1)}^c) < \kappa(x^w)$ . Every such originally wrongly ranked pair (by $f^0$ ) of $[\kappa(x_{(1)}^c) < \kappa(x^w)|\hat{y}_f(x^w) \neq y(x^w), \hat{y}_f(x_{(1)}^c) = y(x_{(1)}^c)]$ has been eliminated by $f^1$ wrongly predicting $x_{(1)}^c$ . This, therefore, causes AUROC to increase at the expense of the model's accuracy. + +Such an analysis neglects many factors, which is probably why such an effect is only likely to be observed in artificial models (and not among the actual models we have empirically tested): + +1. It is unreasonable to assume that the confidence score given by $\kappa$ will remain exactly the same for an instance $x_{(i)}^{c}$ given it now has a different prediction. In the case of $\kappa$ being softmax, it assumes the model's logits have changed in a very precise and nontrivial manner. Additionally, by our broad definition of $\kappa$ , which allows $\kappa$ to even be produced from an entirely different model than $f$ , $\kappa$ receives the prediction and model as a given input (and cannot change or affect either), and it is unlikely to assume changing its inputs will not change its output. +2. Suppose we find the setting reasonable and assume we can actually create a model $f^m$ as described. Let us observe a model $f^p$ such that $p = \min_{m} (\text{AUROC of } f^m = 1)$ , meaning that $f^p$ ranks its predictions perfectly, unlike the original $f^0$ . Is it really true that $f^p$ has no better uncertainty estimation than $f^0$ ? Model $f^p$ behaves very much like the investment in "Model B" from our example in Section 1, possessing perfect knowledge of when it is wrong and when it is correct, allowing its users risk-free classification. So, given a model $f$ , we can use the above process to produce an improved model $f^p$ , and then we can even calibrate its + +$\kappa$ to output $0\%$ for all instances below its threshold and $100\%$ for all those above to produce a perfect model, which might have a small coverage but is correct every time, knows it and notifies its user when it truly knows the prediction. The increase in AUROC reflects such an improvement. + +Not only do we disagree with such an analysis and its conclusions, but we also have vast empirical evidence to show that AUROC does not prefer lower accuracy models unless there is a good reason for it to do so, as we demonstrate in Figure 3 (comparing EfficientNet-V2-XL to ViT-B/32-SAM). In fact, out of the 523 models we tested, the model with the highest AUROC also has the $4^{th}$ highest accuracy of all models, and the overall Spearman correlation between AUROC and accuracy of all the models we tested is 0.03. Furthermore, Figure 3 also exemplifies why AUROC, which was suggested by the just mentioned paper as the alternative to AUROC, is a bad choice as a single number metric, and might lead us to deploy a model that has a worse selective risk for most coverages only due to its higher overall accuracy. + +# G KNOWLEDGE DISTILLATION EFFECTS ON UNCERTAINTY ESTIMATION + +![](images/f188edd92ad5504d8af446f7da799b3fc9769cd70d682862a17c5d78793ce1c6.jpg) +Figure 12: Comparing vanilla models to those incorporating KD into their training (represented by markers with thick borders and a dot). In a pruning scenario that includes distillation, yellow markers indicate that the original model was also the teacher. The performance of each model is measured in AUROC (higher is better) and -log(ECE) (higher is better). + +Figure 12 compares vanilla models to those incorporating KD into their training (represented by markers with thick borders and a dot). In a pruning scenario that includes distillation, yellow markers indicate that the original model was also the teacher (Aflalo et al., 2020). While distillation using a different model tends to improve uncertainty estimation in both aspects, distillation by the model itself seems to improve only one—suggesting it is generally more beneficial to use a different model as a teacher. The fact that KD improves the model over its original form, however, is surprising, and implies that the distillation process itself helps uncertainty estimation. Note that although this specific method involves pruning, evaluations of models pruned without incorporating distillation (Frankle & Carbin, 2018) revealed no improvement. + +It seems, moreover, that the teacher does not have to be good in uncertainty estimation itself; Figure 5 in Section 3 shows this by comparing the teacher architecture and the students in each case. + +While the training method by Ridnik et al. (2021) included pretraining on ImageNet-21k and demonstrated impressive improvements, comparison of models that were pretrained on ImageNet21k (Tan & Le, 2021; Touvron et al., 2021a; 2022) with identical models that were not pretrained showed only a slight improvement in ECE, and, in fact, exhibit a degradation of AUROC (see + +Figures 4a and 4b in Section 3). This suggests that pretraining alone does not improve uncertainty estimation. + +# H MORE INFORMATION ABOUT TEMPERATURE SCALING + +![](images/034ae8e1de9f41d6fe5cac55c9a457b469a7856eca88d835a395c08d2e60e6c9.jpg) +Figure 13: A comparison of 523 models after being calibrated with TS, evaluated by their AUROC $(\times 100$ , higher is better) and -log(ECE) (higher is better) on ImageNet. Each marker's size is determined by the model's number of parameters. ViT models are still among the best performing architectures for all aspects of uncertainty estimation. + +In Figure 13 we see how temperature scaling (TS) affects the overall ranking of models in terms of AUROC and ECE. While the ranking between the different architecture remains similar, the poorly performing models are much improved and minimize the gap between them and the best models. One particularly notable exception is HardCoRe-NAS (Nayman et al., 2021), with its lowest latency versions becoming the top performers in terms of ECE. In addition, models that benefit from + +![](images/171b1c9407f03ba242867cd36b052c046d8aa0c7512ed878a58fd7430f7f9cc8.jpg) +Figure 14: Here the relationship between temperature and the success of TS, unlike the case for AUROC, seems unrelated. + +TS in terms of AUROC tend to have been assigned a temperature lower than 1 by the calibration + +process (see Figure 6 in Section 3). The same, however, does not hold true for ECE (see Figure 14). This example also emphasizes the fact that models benefiting from TS in terms of AUROC do not necessarily benefit in terms of ECE, and vice versa. Therefore, determining whether to calibrate the deployed model with TS is, unfortunately, a task-specific decision. + +We perform TS as was suggested in Guo et al. (2017). For each model we take a random stratified sampling of 5,000 instances from the ImageNet validation set on which to calibrate, and reserve the remainder 45,000 instances for testing. Using the box-constrained L-BFGS (Limited-Memory Broyden-Fletcher-Goldfarb-Shanno) algorithm, we optimize for 5,000 iterations (though fewer iterations usually converge into the same temperature parameter) using a learning rate of 0.01. + +# I ARCHITECTURE CHOICE FOR PRACTICAL DEPLOYMENT BASED ON SELECTIVE PERFORMANCE + +As discussed in Section 2, when we know the coverage or risk we require for deployment, the most direct metric to check is which model obtains the best risk for the coverage required (selective risk), or which model gets the largest coverage for the accuracy constraint (SAC). While each deployment scenario specifies its own constraints, for demonstration purposes we consider a scenario in which misclassifications are by far more costly than abstaining from giving correct predictions. An example of this could be classifying a huge unlabeled dataset (or cleaning bad labels from a labeled dataset). While it is desirable to assign labels to a larger portion of the dataset (or to correct more of the wrong labels), it is crucial that these labels are as accurate as possible (or that correctly labeled instances are not replaced with a bad label). + +![](images/0d9498774b8c27650a838b860059762e91ee2a1e034b9c0144323f176fdd71b8.jpg) +Figure 15: A comparison of 523 models by their log(number of model's parameters) and the coverage they are able to provide for a SAC of $99\%$ (higher is better) on ImageNet. + +To explore such a scenario, we evaluate all models on ImageNet to see which ones give us the largest coverage for a required accuracy of $99\%$ . In Figure 7, Section 3 (paper's main body) we observe that of all the models studied, only ViT models are able to provide coverage beyond $30\%$ for such an extreme constraint. Moreover, we note that the coverage they provide is significantly larger than that given by models with comparable accuracy or size, and that ViT models that provide similar coverage to their counterparts do so with less overall accuracy. + +In Figure 15 we see that not only do ViT models provide more coverage than any other model, but that they are also able to do so in any size category. To compare models fairly by their size, we present Figure 15, which sets the Y axis to be the logarithm of the number of parameters, so that models sharing the same y value can be compared solely based on their x value—which is the coverage they + +Table 1: A comparison of different training regimes of ViTs. *The paper introducing ViTs (Dosovitskiy et al., 2021) had also trained ViT models with the JFT-300M dataset; however, their weights are unavailable to the general public. All evaluations of ViTs from that paper were conducted on ViTs pretrained on ImageNet-21k, which are publicly available. **Pretrained DeiT3 models were first pretrained with a learning rate of $3 \cdot 10^{-3}$ and then fine-tuned with a learning rate of $3 \cdot 10^{-4}$ + +
RegimeViT (original)Steiner et al.Chen et al.DeiTDeiT3DeiT3 +PretrainingTorchvision
ReferenceDosovitskiy et al. (2021)Steiner et al. (2022)Chen et al. (2022)Touvron et al. (2021b)Touvron et al. (2022)Touvron et al. (2022)Paszke et al. (2019)
Pretraining datasetImageNet-21k*ImageNet-21k---ImageNet-21k-
Batch Size409640964096102420482048512
OptimizerAdamWAdamWSAMLAMBLAMBLAMBAdamW
LR3·10-33·10-33·10-31·10-33·10-33·10-3**3·10-3
LR decaycosinecosinecosinecosinecosinecosinecosine
Weight decay0.10.30.10.050.020.020.3
Warmup epochs3.43.43.455530
Label smoothing ε0.10.10.10.1X0.10.11
DropoutXXXX
Stoch. DepthXXX
Repeated AugXXXX
Gradient Clip.1.01.01.0X1.01.01.0
H. flip
Random Resized CropX
Rand AugmentXAdapt.X9/0.5XXAdapt.
3 AugmentXXXXX
LayerScaleXXXXX
Mixup alphaXAdapt.X0.80.8X0.2
Cutmix alphaXXX1.01.01.01.0
Erasing prob.XXX0.25XXX
ColorJitterXXXX0.30.3X
Test crop ratio0.8750.8750.8750.8751.01.00.875
LossCECECECEBCECECE
Superb performanceXXXX
+ +provide for a SAC of $99\%$ . We see that ViT models provide a larger coverage even when compared with models of a similar size. + +# J COMPARISON OF VIT TRAINING REGIMES AND THEIR EFFECTS ON UNCERTAINTY ESTIMATION PERFORMANCE + +In Table 1 we compare the different hyperparameters and augmentations used for training the ViT models evaluated in this paper, with the aim of revealing why some training regimes consistently result in superb ViTs, while others do not. An analysis of the various differences between these regimes, however, eliminates the obvious suspects. + +1) Pretraining, on its own, does not seem to offer an explanation: First, we analyze eight pairs of models (provided by Touvron et al. 2022) such that both models have identical architecture and training regimes, with the exception that one was pretrained on ImageNet-21k, and the other was not. Pretraining results in only a slight improvement of 0.16 in AUROC and 0.01 in ECE. Moreover, as mentioned in detail in Section 3, ViT models trained on JFT-4B (Tran et al., 2022) were outperformed by the successful ViT models evaluated in this paper, most of which were pretrained on ImageNet-21k (and even by one ViT SAM model that was not pretrained at all). Second, we note that ViTs trained with the SAM optimizer (Chen et al., 2022), and not pretrained at all, reach superb ranking (AUROC) as well. These facts lead us to conclude that pretraining, at least by itself, is not the main contributor to training successful ViTs. +2) The selection of optimizers and other hyperparameters (such as learning rate, label smoothing etc.) does not seem to have a significant impact. For example, while AdamW (Loshchilov & Hutter, 2019) was used by two of the successful regimes, it was also used by Paszke et al. (2019), and on the other hand was replaced by SAM (Foret et al., 2021) in another successful training regime. +3) Advanced augmentations are unlikely to explain the gaps in uncertainty estimation performance, as regimes producing superior ViT models (Dosovitskiy et al., 2021; Chen et al., 2022) did not use advanced augmentations (in comparison to Touvron et al. (2021b) and Touvron et al. (2022), for example). + +For these reasons, for the moment, the explanation for the gap remains elusive. The only remaining "suspect" is the batch size used, with all successful regimes using a batch size of 4096, while others use a smaller batch size of 2048 or lower. One could argue, however, that a two-fold increase in batch size is not sufficient to explain the huge gaps in performance measured. + +Table 2: The relationship between uncertainty estimation performance and the model's attributes and resources (accuracy, number of parameters and input size), measured by Spearman correlation. Positive correlations indicate good utilization of resources for uncertainty estimation. + +
ArchitectureAUROC & Accuracy-ECE & AccuracyAUROC & #Parameters-ECE & #ParametersAUROC & Input Size-ECE & Input Size#Models Evaluated
EfficientNet-0.16-0.29-0.22-0.29-0.26-0.3850
ResNet-0.28-0.220.160.03-0.40-0.4433
ViT0.84-0.170.50-0.670.04-0.1331
XCiT distilled0.600.090.350.020.510.1228
XCiT-0.680.89-0.790.94--28
ViT*0.230.38-0.040.410.14-0.1226
SE_ResNet-0.46-0.02-0.530.20-0.02-0.3518
EfficientNetV2-0.70-0.45-0.63-0.47-0.59-0.4015
NFNet0.560.780.630.810.480.6013
Inception-0.290.09-0.430.30-0.080.2313
RegNetY-0.03-0.980.27-0.86--12
RegNetX0.20-0.960.20-0.96--12
CaT distilled0.44-0.870.35-0.870.58-0.5010
DLA0.64-0.900.77-0.90--10
MobileNetV30.370.590.420.60--10
Res2Net-0.700.27-0.680.60--9
CLIP Zero-Shot1.0-0.630.9-0.80.55-0.589
CLIP + Linear Probe0.880.260.710.10.19-0.278
VGG0.81-0.980.71-0.90--8
RepVGG-0.710.50-0.570.21--8
BiT-0.33-0.81-0.20-0.85-0.46-0.258
ResNeXt-0.960.39-0.22-0.30--7
ResNet RS0.000.79-0.180.82-0.300.827
MixConv-0.110.89-0.240.86--7
DenseNet0.43-0.140.720.12--6
HardCoReNAS-0.600.26-0.490.37--6
Swin0.710.140.770.260.410.006
ECANet-0.200.60-0.430.370.830.376
Twins-0.260.94-0.140.89--6
SWSL ResNet0.94-0.890.77-0.83--6
GENet0.50-1.000.50-1.000.87-0.876
SSL ResNet0.14-1.000.26-0.94--6
TResNet0.10-0.300.530.53-0.58-0.875
CoaT-0.100.90-0.100.50--5
LeViT distilled0.60-0.900.60-0.90--5
ResMLP0.201.000.150.97--5
MobileNetV2-0.300.00-0.210.10--5
ViT* Distilled0.8-1.00.71-0.770.22-0.774
PiT distilled1.00-1.001.00-1.00--4
PiT-0.401.00-0.401.00--4
WSP ResNeXt1.000.801.000.80--4
ResMLP distilled0.800.200.800.20--4
MnasNet0.400.200.630.95--4
+ +# K EVALUATIONS OF THE ZERO-SHOT LANGUAGE-VISION CLIP MODEL + +In this section we describe how we use CLIP model and extract confidence signals from it during inference. To evaluate CLIP on ImageNet, we first prepare it following the code provided by its authors (https://github.com/openai/CLIP): The labels of ImageNet-1k are encoded into normalized embedding vectors. At inference time, the incoming image is encoded into another normalized embedding vector. A cosine similarity is then calculated between each label embedding vector and the image embedding vector, and lastly, softmax is applied. The highest score is then taken as the confidence score for that prediction. We also evaluate the same models when adding a trained "linear-probe" to them (as described in Radford et al. (2021), which is essentially a logistic regression head), that results in a large boost in their accuracy. + +# L EFFECTS OF THE MODEL'S ACCURACY, NUMBER OF PARAMETERS AND INPUT SIZE ON UNCERTAINTY ESTIMATION PERFORMANCE + +Table 2 shows the relationship between uncertainty estimation performance and model attributes and resources (accuracy, number of parameters and input size), measured by Spearman correlation. We measure uncertainty estimation performance by AUROC (higher is better) and -ECE (higher is better). Positive correlations indicate good utilization of resources for uncertainty estimation (for example, a positive correlation between -ECE and the number of parameters indicates that as the number of parameters increases, the calibration improves). An interesting observation is that distillation can drastically change the correlation between a resource and the uncertainty estimation performance metrics. For example, undistilled XCiTs have a Spearman correlation of -0.79 between their number of parameters and AUROC, indicating that more parameters are correlated with lower ranking performance, while distilled XCiTs have a correlation of 0.35 between the two. + +Table 3: Comparing using MC dropout to softmax-response (vanilla). + +
ArchitectureMethodAccuracyAUROC
MobileNetV3 LargeVanilla74.0486.88
MC dropout7486.14
MobileNetV3 SmallVanilla67.6786.2
MC dropout67.5584.54
MobileNetV2Vanilla71.8886.05
MC dropout71.8184.68
VGG11Vanilla70.3786.31
MC dropout70.2184.3
VGG11 (no BatchNorm)Vanilla69.0286.19
MC dropout68.9583.94
VGG13Vanilla71.5986.3
MC dropout71.4384.37
VGG13 (no BatchNorm)Vanilla69.9386.24
MC dropout69.7184.3
VGG16Vanilla73.3686.76
MC dropout73.3385.02
VGG16 (no BatchNorm)Vanilla71.5986.63
MC dropout71.4784.97
VGG19Vanilla74.2286.52
MC dropout74.1785.06
VGG19 (no BatchNorm)Vanilla72.3886.55
MC dropout72.3784.99
+ +# M EVALUATIONS OF MONTE CARLO DROPOUT RANKING PERFORMANCE + +MC Dropout (Gal & Ghahramani, 2016) is computed using several dropout-enabled forward passes to produce uncertainty estimates. In classification, the mean softmax score of these passes, is calculated, and then a predictive entropy score is used as the final uncertainty estimate. In our evaluations, we use 30 dropout-enabled forward passes. We do not measure MC Dropout's effect on ECE since entropy scores do not reside in $[0,1]$ . + +We test this technique using MobileNetV3 (Howard et al., 2019), MobileNetv2 (Sandler et al., 2018) and VGG (Simonyan & Zisserman, 2015), all trained on ImageNet and taken from the PyTorch repository (Paszke et al., 2019). + +The results comparing these models with and without using MC dropout are provided in Table 3. + +The table shows that using MC dropout causes a consistent drop in both AUROC and selective performance compared with using the same models with softmax as the $\kappa$ . These results are also visualized in comparison to other methods in Figure 4a in Section 3. MC dropout underperformance in an ID setting was also previously observed in Geifman & El-Yaniv (2017). \ No newline at end of file diff --git a/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/images.zip b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1ad06d40f9c86bd91bbd259fbb2499fe6d362df2 --- /dev/null +++ b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a48339dcf3ad04c5e935bb16d42b766a95397447e139239ad10ce491af876b2 +size 1046236 diff --git a/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/layout.json b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b336c11321d9a21ce96961aa960053719c748abf --- /dev/null +++ b/2023/What Can we Learn From The Selective Prediction And Uncertainty Estimation Performance Of 523 Imagenet Classifiers_/layout.json @@ -0,0 +1,14922 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 507, + 134 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 507, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 507, + 134 + ], + "type": "text", + "content": "WHAT CAN WE LEARN FROM THE SELECTIVE PREDICTION AND UNCERTAINTY ESTIMATION PERFORMANCE OF 523 IMAGENET CLASSIFIERS?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 113, + 152, + 155, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 152, + 155, + 163 + ], + "spans": [ + { + "bbox": [ + 113, + 152, + 155, + 163 + ], + "type": "text", + "content": "Ido Galil" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 163, + 151, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 163, + 151, + 174 + ], + "spans": [ + { + "bbox": [ + 113, + 163, + 151, + 174 + ], + "type": "text", + "content": "Technion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 175, + 227, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 175, + 227, + 186 + ], + "spans": [ + { + "bbox": [ + 113, + 175, + 227, + 186 + ], + "type": "text", + "content": "idogail.iq@gmail.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 244, + 152, + 334, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 152, + 334, + 163 + ], + "spans": [ + { + "bbox": [ + 244, + 152, + 334, + 163 + ], + "type": "text", + "content": "Mohammed Dabbah" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 244, + 164, + 280, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 164, + 280, + 174 + ], + "spans": [ + { + "bbox": [ + 244, + 164, + 280, + 174 + ], + "type": "text", + "content": "Amazon" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 244, + 175, + 353, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 175, + 353, + 186 + ], + "spans": [ + { + "bbox": [ + 244, + 175, + 353, + 186 + ], + "type": "text", + "content": "m.m.dabbah@gmail.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 370, + 152, + 430, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 152, + 430, + 163 + ], + "spans": [ + { + "bbox": [ + 370, + 152, + 430, + 163 + ], + "type": "text", + "content": "Ran El-Yaniv" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 371, + 164, + 457, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 164, + 457, + 174 + ], + "spans": [ + { + "bbox": [ + 371, + 164, + 457, + 174 + ], + "type": "text", + "content": "Technion, Deci.AI" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 371, + 175, + 490, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 175, + 490, + 186 + ], + "spans": [ + { + "bbox": [ + 371, + 175, + 490, + 186 + ], + "type": "text", + "content": "rani@cs.technion.ac.il" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 276, + 215, + 334, + 226 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 215, + 334, + 226 + ], + "spans": [ + { + "bbox": [ + 276, + 215, + 334, + 226 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "spans": [ + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "text", + "content": "When deployed for risk-sensitive tasks, deep neural networks must include an uncertainty estimation mechanism. Here we examine the relationship between deep architectures and their respective training regimes, with their corresponding selective prediction and uncertainty estimation performance. We consider some of the most popular estimation performance metrics previously proposed including AUROC, ECE, AURC as well as coverage for selective accuracy constraint. We present a novel and comprehensive study of selective prediction and the uncertainty estimation performance of 523 existing pretrained deep ImageNet classifiers that are available in popular repositories. We identify numerous and previously unknown factors that affect uncertainty estimation and examine the relationships between the different metrics. We find that distillation-based training regimes consistently yield better uncertainty estimations than other training schemes such as vanilla training, pretraining on a larger dataset and adversarial training. Moreover, we find a subset of ViT models that outperform any other models in terms of uncertainty estimation performance. For example, we discovered an unprecedented " + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "text", + "content": " top-1 selective accuracy on ImageNet at " + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "inline_equation", + "content": "47\\%" + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "text", + "content": " coverage (and " + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "text", + "content": " top-1 accuracy at " + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 140, + 237, + 470, + 459 + ], + "type": "text", + "content": ") for a ViT model, whereas a competing EfficientNet-V2-XL cannot obtain these accuracy constraints at any level of coverage. Our companion paper, also published in ICLR 2023 (Galil et al., 2023), examines the performance of these classifiers in a class-out-of-distribution setting." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 475, + 206, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 475, + 206, + 488 + ], + "spans": [ + { + "bbox": [ + 106, + 475, + 206, + 488 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 500, + 506, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 500, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 500, + 506, + 545 + ], + "type": "text", + "content": "The excellent performance of deep neural networks (DNNs) has been demonstrated in a range of applications, including computer vision, natural language understanding and audio processing. To deploy these models successfully, it is imperative that they provide an uncertainty quantification of their predictions, either via some kind of selective prediction or a probabilistic confidence score." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "type": "text", + "content": "Notwithstanding, what metric should we use to evaluate the uncertainty estimation performance? There are many and diverse ways so the answer to this question is not obvious, and to demonstrate the difficulty, consider the case of two classification models for the stock market that predict whether a stock's value is about to increase, decrease, or remain neutral (three-class classification). Suppose that model A has a " + }, + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "type": "text", + "content": " true accuracy, and generates a confidence score of 0.95 on every prediction (even on misclassified instances); model B has a " + }, + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "type": "text", + "content": " true accuracy, but always gives a confidence score of 0.6 on correct predictions, and 0.4 on incorrect ones. Model B can be utilized easily to generate perfect investment decisions. Using selective prediction (El-Yaniv & Wiener, 2010; Geifman & El-Yaniv, 2017), Model B will simply reject all investments on stocks whenever the confidence score is 0.4. While model A offers many more investment opportunities, each of its predictions carries a " + }, + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 671 + ], + "type": "text", + "content": " risk of failure." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "Among the various metrics proposed for evaluating the performance of uncertainty estimation are: Area Under the Receiver Operating Characteristic (AUROC or AUC), Area Under the Risk-Coverage curve (AURC) (Geifman et al., 2018), selective risk or coverage for a selective accuracy constraint (SAC), Negative Log-likelihood (NLL), Expected Calibration Error (ECE), which is often used for evaluating a model's calibration (see Section 2) and Brier score (Brier, 1950). All these metrics" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 184, + 79, + 428, + 242 + ], + "blocks": [ + { + "bbox": [ + 184, + 79, + 428, + 242 + ], + "lines": [ + { + "bbox": [ + 184, + 79, + 428, + 242 + ], + "spans": [ + { + "bbox": [ + 184, + 79, + 428, + 242 + ], + "type": "image", + "image_path": "fa9b91586e5aca9d13d95ff3b56f34ea71d0af6bbb95e238639c29c5ecf07ebc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 250, + 506, + 316 + ], + "lines": [ + { + "bbox": [ + 104, + 250, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 250, + 506, + 316 + ], + "type": "text", + "content": "Figure 1: A comparison of 523 models by their AUROC (" + }, + { + "bbox": [ + 104, + 250, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\times 100" + }, + { + "bbox": [ + 104, + 250, + 506, + 316 + ], + "type": "text", + "content": ", higher is better) and -log(ECE) (higher is better) on ImageNet. Each marker's size is determined by the model's number of parameters. A full version graph is given in Figure 8. Distilled models are better than non-distilled ones. A subset of ViT models is superior to all other models for all aspects of uncertainty estimation (\"ViT\" in the legend, marked as a red triangle facing upwards); the performance of EfficientNet-V2 and GENet models is worse." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 338, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 437 + ], + "type": "text", + "content": "are well known and are often used for comparing the uncertainty estimation performance of models (Moon et al., 2020; Nado et al., 2021; Maddox et al., 2019; Lakshminarayanan et al., 2017). Somewhat surprisingly, NLL, Brier, AURC, and ECE all fail to reveal the uncertainty superiority of Model B in our investment example (see Appendix A for the calculations). Both AUROC and SAC, on the other hand, reveal the advantage of Model B perfectly (see Appendix A for details). It is not hard to construct counterexamples where these two metrics fails and others (e.g., ECE) succeed. To sum up this brief discussion, we believe that the ultimate suitability of a performance metric should be determined by its context. If there is no specific application in mind, there is a strong incentive to examine a variety of metrics, as we choose to do in this study." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 442, + 504, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 442, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 104, + 442, + 504, + 541 + ], + "type": "text", + "content": "This study evaluates the ability of 523 models from the Torchvision and Timm repositories (Paszke et al., 2019; Wightman, 2019) to estimate uncertainty1. Our study identifies several major factors that affect confidence rankings, calibration, and selective prediction, and lead to numerous empirical contributions important to selective predictions and uncertainty estimation. While no new algorithm or method is introduced in our paper, our study generates many interesting conclusions that will help practitioners achieve more powerful uncertainty estimation. Moreover, the research questions that are uncovered by our empirical study shed light on uncertainty estimation, which may stimulate the development of new methods and techniques for improving uncertainty estimation. Among the most interesting conclusions our study elicits are:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 547, + 506, + 713 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 104, + 547, + 504, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 547, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 547, + 504, + 602 + ], + "type": "text", + "content": "(1) Knowledge distillation training improves estimation. Training regimes incorporating any kind of knowledge distillation (KD) (Hinton et al., 2015) lead to DNNs with improved uncertainty estimation performance evaluated by any metric, more than by using any other training tricks (such as pretraining on a larger dataset, adversarial training, etc.). In Galil et al. (2023) we find similar performance boosts for class-out-of-distribution (C-OOD) detection." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 608, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 608, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 506, + 674 + ], + "type": "text", + "content": "(2) Certain architectures are more inclined to perform better or worse at uncertainty estimation. Some architectures seem more inclined to perform well on all aspects of uncertainty estimation, e.g., a subset of vision transformers (ViTs) (Dosovitskiy et al., 2021) and the zero-shot language-vision CLIP model (Radford et al., 2021), while other architectures tend to perform worse, e.g., EfficientNet-V2 and GENet (Tan & Le, 2021; Lin et al., 2020). These results are visualized in Figure 1. In Galil et al. (2023) we find that ViTs and CLIPs are also powerful C-OOD detectors." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 679, + 504, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 679, + 504, + 713 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 504, + 713 + ], + "type": "text", + "content": "(3) Several training regimes result in a subset of ViTs that outperforms all other architectures and training regimes. These regimes include the original one from the paper introducing ViTs (Dosovitskiy et al., 2021; Steiner et al., 2022; Chen et al., 2022; Ridnik et al., 2021). These ViTs" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 496, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 496, + 731 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 496, + 731 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 117, + 720, + 496, + 731 + ], + "type": "text", + "content": "Our code is available at https://github.com/IdoGalil/benchmarking-uncertainty-estimation-performance" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "achieve the best uncertainty estimation performance on any aspect measured, both in absolute terms and per-model size (# parameters, see Figures 9 and 10 in Appendix B)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 310 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "type": "text", + "content": "(4) Temperature scaling improves selective and ranking performance. The simple post-training calibration method of temperature scaling (Guo et al., 2017), which is known to improve ECE, for the most part also improves ranking (AUROC) and selective prediction—meaning not only does it calibrate the probabilistic estimation for each individual instance, but it also improves the partial order of all instances induced by those improved estimations, pushing instances more likely to be correct to have a higher confidence score than instances less likely to be correct (see Section 3)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 506, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 271 + ], + "type": "text", + "content": "(5) The correlations between AUROC, ECE, accuracy and the number of parameters are dependent on the architecture analyzed. Contrary to previous work by (Guo et al., 2017), we observe that while there is a strong correlation between accuracy/number of parameters and ECE or AUROC within each specific family of models of the same architecture, the correlation flips between a strong negative and a strong positive correlation depending on the type of architecture being observed. For example, as DLA (Yu et al., 2018) architectures increase in size and accuracy, their ECE deteriorates while their AUROC improves. The exact opposite, however, can be observed in XCiTs (Ali et al., 2021) as their ECE improves with size while their AUROC deteriorates (see Appendix L)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 275, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 275, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 506, + 310 + ], + "type": "text", + "content": "(6) The best model in terms of AUROC or SAC is not always the best in terms of calibration, as illustrated in Figure 1, and the trade-off should be considered when choosing a model based on its application." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 325, + 477, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 325, + 477, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 325, + 477, + 338 + ], + "type": "text", + "content": "2 HOW TO EVALUATE DEEP UNCERTAINTY ESTIMATION PERFORMANCE" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " be the input space and " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\mathcal{Y}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " be the label space. Let " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "P(\\mathcal{X},\\mathcal{Y})" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " be an unknown distribution over " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\mathcal{X}\\times \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ". A model " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " is a prediction function " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "f:\\mathcal{X}\\to \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ", and its predicted label for an image " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " is denoted by " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\hat{y}_f(x)" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ". The model's true risk w.r.t. " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "R(f|P) = E_{P(\\mathcal{X},\\mathcal{Y})}[\\ell (f(x),y)]" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\ell :\\mathcal{Y}\\times \\mathcal{Y}\\rightarrow \\mathbb{R}^{+}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " is a given loss function, for example, 0/1 loss for classification. Given a labeled set " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "S_{m} = \\{(x_{i},y_{i})\\}_{i = 1}^{m}\\subseteq (\\mathcal{X}\\times \\mathcal{Y})" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ", sampled i.i.d. from " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "P(\\mathcal{X},\\mathcal{Y})" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ", the empirical risk of model " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\hat{r} (f|S_m)\\triangleq \\frac{1}{m}\\sum_{i = 1}^{m}\\ell (f(x_i),y_i)" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ". Following Geifman et al. (2018), for a given model " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " we define a confidence score function " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\kappa (x,\\hat{y} |f)" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "x\\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\hat{y}\\in \\mathcal{V}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " is the model's prediction for " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ", as follows. The function " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " should quantify confidence in the prediction of " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": " for the input " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ", based on signals from model " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": ". This function should induce a partial order over instances in " + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 350, + 506, + 453 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": "The most common and well-known " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " function for a classification model " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " (with softmax at its last layer) is its softmax response values: " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\kappa(x, \\hat{y} | f) \\triangleq f(x)_{\\hat{y}}" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " (Cordella et al., 1995; De Stefano et al., 2000). We chose to focus on studying uncertainty estimation performance using softmax response as the models' " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " function because of its extreme popularity, and its importance as a baseline due to its solid performance compared to other methods (Geifman & El-Yaniv, 2017; Geifman et al., 2018). While this is the main " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " we evaluate, we also test the popular uncertainty estimation technique of Monte Carlo dropout (MC dropout) (Gal & Ghahramani, 2016), which is motivated by Bayesian reasoning. Although these methods use the direct output from " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " could be a different model unrelated to " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " and unable to affect " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": "'s predictions. Note that to enable a probabilistic interpretation, " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " can only be calibrated if its values reside in " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "[0, 1]" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " whereas for ranking and selective prediction any value in " + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\mathbb{R}" + }, + { + "bbox": [ + 104, + 457, + 506, + 582 + ], + "type": "text", + "content": " can be used." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": "A selective model " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": " (El-Yaniv & Wiener, 2010; Chow, 1957) uses a selection function " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "g: \\mathcal{X} \\to \\{0,1\\}" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": " to serve as a binary selector for " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": ", enabling it to abstain from giving predictions for certain inputs. " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": " can be defined by a threshold " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": " on the values of a " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": " function such that " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "g_{\\theta}(x|\\kappa, f) = \\mathbb{1}[\\kappa(x, \\hat{y}_f(x)|f) > \\theta]" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": ". The performance of a selective model is measured using coverage and risk, where coverage, defined as " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\phi(f, g) = E_P[g(x)]" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": ", is the probability mass of the non-rejected instances in " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": ". The selective risk of the selective model " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "(f, g)" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": " is defined as " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "R(f, g) \\triangleq \\frac{E_P[\\ell(f(x), g(x))]}{\\phi(f, g)}" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": ". These quantities can be evaluated empirically over a finite labeled set " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "S_m" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": ", with the empirical coverage defined as " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}(f, g|S_m) = \\frac{1}{m} \\sum_{i=1}^{m} g(x_i)" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": ", and the empirical selective risk defined as " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{r}(f, g|S_m) \\triangleq \\frac{1}{m} \\sum_{i=1}^{m} \\frac{\\ell(f(x_i), y_i) g(x_i)}{\\hat{\\phi}(f, g|S_m)}" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": ". Similarly, SAC is defined as the largest coverage available for a specific accuracy constraint. A way to visually inspect the behavior of a " + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 585, + 506, + 733 + ], + "type": "text", + "content": " function for selective prediction can be done using the risk-coverage (RC) curve (El-Yaniv & Wiener, 2010)—a curve showing the selective risk as a function of coverage, measured on some chosen test set; see Figure 2 for an" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 225, + 79, + 387, + 207 + ], + "blocks": [ + { + "bbox": [ + 225, + 79, + 387, + 207 + ], + "lines": [ + { + "bbox": [ + 225, + 79, + 387, + 207 + ], + "spans": [ + { + "bbox": [ + 225, + 79, + 387, + 207 + ], + "type": "image", + "image_path": "3ed74c4b0f126392266bf292dc08034cc04d6d2875f51211b042029c66bc05df.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "lines": [ + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "text", + "content": "Figure 2: The RC curve made by a ResNet18 trained on CIFAR-10, measured on the test set. The risk is calculated using a 0/1 loss (meaning the model has about " + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "text", + "content": " accuracy for 1.0 coverage); the " + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "text", + "content": " used was softmax-response. The value of the risk at each point of coverage corresponds to the selective risk of the model when rejecting inputs that are not covered at that coverage slice. e.g., the selective risk for coverage 0.8 is about " + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "text", + "content": ", meaning that an end user setting a matching threshold would enjoy a model accuracy of " + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "inline_equation", + "content": "99.5\\%" + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "text", + "content": " on the " + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 214, + 506, + 282 + ], + "type": "text", + "content": " of images the model would not reject." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 316, + 504, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 339 + ], + "type": "text", + "content": "example. In general, though, two RC curves are not necessarily comparable if one does not fully dominate the other (Figure 3 shows an example of lack of dominance)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 344, + 506, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 506, + 509 + ], + "type": "text", + "content": "The AURC and E-AURC metrics were defined by (Geifman et al., 2018) for quantifying the selective quality of " + }, + { + "bbox": [ + 104, + 344, + 506, + 509 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 344, + 506, + 509 + ], + "type": "text", + "content": " functions via a single number, with AURC being defined as the area under the RC curve. AURC, however, is very sensitive to the model's accuracy, and in an attempt to mitigate this, E-AURC was suggested. The latter also suffers from sensitivity to accuracy, as we demonstrate in Appendix C. The advantage of scalar metrics such as the above is that they summarize the model's overall uncertainty estimation behavior by reducing it to a single scalar. When not carefully chosen, however, these reductions could result in a loss of vital information about the problem (recall the investment example from Section 1, which is also discussed in Appendix A: reducing an RC curve to an AURC does not show that Model B has an optimal 0 risk if the coverage is smaller than 0.4). Thus, the choice of the \"correct\" single scalar performance metric unfortunately must be task-specific. When comparing the uncertainty estimation performance of deep architectures that exhibit different accuracies, we find that AUROC and SAC can effectively \"normalize\" accuracy differences that plague the usefulness of other metrics (see Figure 3). This normalization is essential in our study where we compare uncertainty performance of hundreds of models that can greatly differ in their accuracies." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": "For risk-sensitive deployment, let us consider the two models in Figure 3; EfficientNet-V2-XL (Tan & Le, 2021) and ViT-B/32-SAM (Chen et al., 2022). While the former model has better overall accuracy and AURC (metrics that could lead us to believe the model is best for our needs), it cannot guarantee a Top-1 ImageNet selective accuracy above " + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": " for any coverage. ViT-B/32-SAM, on the other hand, can provide accuracies above " + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": " for all coverages below " + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 514, + 504, + 571 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "type": "text", + "content": "In applications where risk (or coverage) constraints are dictated (Geifman & El-Yaniv, 2017), the most straightforward and natural metric is SAC (or selective risk), which directly measures the coverage (resp., risk) given at the required level of risk (resp., coverage) constraint. We demonstrate this in Appendix I, evaluating which models give the most coverage for an ambitious SAC of " + }, + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "type": "text", + "content": ". If instead a specific range of coverages is specified, we could measure the area under the RC curve for those coverages: " + }, + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{AURC}_{\\mathcal{C}}(\\kappa, f|S_m) = \\frac{1}{|\\mathcal{C}|}\\sum_{c\\in \\mathcal{C}}\\hat{r} (f,g_c|S_m)" + }, + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 575, + 504, + 650 + ], + "type": "text", + "content": " being those required coverages." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 654, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 507, + 733 + ], + "type": "text", + "content": "Often, these requirements are not known or can change as a result of changing circumstances or individual needs. Also, using metrics sensitive to accuracy such as AURC makes designing architectures and methods to improve " + }, + { + "bbox": [ + 104, + 654, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 654, + 507, + 733 + ], + "type": "text", + "content": " very hard, since an improvement in these metrics could be attributed to either an increase in overall accuracy (if such occurred) or to a real improvement in the model's ranking performance. Lastly, some tasks might not allow the model to abstain from making predictions at all, but instead require interpretable and well-calibrated probabilities of correctness, which could be measured using ECE." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 165, + 80, + 443, + 276 + ], + "blocks": [ + { + "bbox": [ + 165, + 80, + 443, + 276 + ], + "lines": [ + { + "bbox": [ + 165, + 80, + 443, + 276 + ], + "spans": [ + { + "bbox": [ + 165, + 80, + 443, + 276 + ], + "type": "image", + "image_path": "314ef4f65139873f967cdbaaab5f81a96139c78a6a5ff353109de204f28481d9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 282, + 506, + 319 + ], + "lines": [ + { + "bbox": [ + 104, + 282, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 506, + 319 + ], + "type": "text", + "content": "Figure 3: A comparison of RC curves made by the best (ViT-L/16-384) and worst (EfficientNet-V2-XL) models we evaluated in terms of AUROC. Comparing ViT-B/32-SAM to EfficientNet-V2 exemplifies the fact that neither accuracy nor AURC reflect selective performance well enough." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 339, + 311, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 339, + 311, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 311, + 350 + ], + "type": "text", + "content": "2.1 MEASURING RANKING AND CALIBRATION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": "A " + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": " function is not necessarily able to change the model's predictions. Therefore, it can improve the selective risk by ranking correct and incorrect predictions better, inducing a more accurate partial order over instances in " + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": ". Thus, for every two random samples " + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "inline_equation", + "content": "(x_{1},y_{1}),(x_{2},y_{2})\\sim P(\\mathcal{X},\\mathcal{Y})" + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": " and given that " + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\ell (f(x_1),y_1) > \\ell (f(x_2),y_2)" + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": ", the ranking performance of " + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": " is defined as the probability that " + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": " ranks " + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "inline_equation", + "content": "x_{2}" + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": " higher than " + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "inline_equation", + "content": "x_{1}" + }, + { + "bbox": [ + 104, + 361, + 504, + 416 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 187, + 423, + 504, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 423, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 187, + 423, + 504, + 437 + ], + "type": "interline_equation", + "content": "\\Pr \\left[ \\kappa \\left(x _ {1}, \\hat {y} \\mid f\\right) < \\kappa \\left(x _ {2}, \\hat {y} \\mid f\\right) \\mid \\ell \\left(f \\left(x _ {1}\\right), y _ {1}\\right) > \\ell \\left(f \\left(x _ {2}\\right), y _ {2}\\right) \\right] \\tag {1}", + "image_path": "8152c1c11c5d81a81788f5c1c66a5ecaf1181ab213896445adfaf87b52a6c234.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "spans": [ + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": "We discuss this definition in greater detail in Appendix D. The AUROC metric is often used in the field of machine learning. When the " + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "inline_equation", + "content": "0/1" + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": " loss is in play, it is known that AUROC in fact equals the probability in Equation (1) (Fawcett, 2006) and thus is a proper metric to measure ranking in classification (AKA discrimination). AUROC is furthermore equivalent to Goodman and Kruskal's " + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": "-correlation (Goodman & Kruskal, 1954), which for decades has been extensively used to measure ranking (known as \"resolution\") in the field of metacognition (Nelson, 1984). The precise relationship between " + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": "-correlation and AUROC is " + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "inline_equation", + "content": "\\gamma = 2 \\cdot \\text{AUROC} - 1" + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": " (Higham & Higham, 2018). We note also that both the " + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": "-correlation and AUROC are nearly identical or closely related to various other correlations and metrics; " + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": "-correlation (AUROC) becomes identical to Kendall's " + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": " (up to a linear transformation) in the absence of tied values. Both metrics are also closely related to rank-biserial correlation, the Gini coefficient (not to be confused with the measure from economics) and the Mann-Whitney " + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 442, + 506, + 610 + ], + "type": "text", + "content": " test, hinting at their importance and usefulness in a variety of fields and settings. In Appendix E, we briefly compare the ranking performance of deep neural networks and humans in metacognitive research, and in Appendix F we address criticism of using AUROC to measure ranking." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": "The most widely used metric for calibration is ECE (Naeini et al., 2015). For a finite test set of size " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": ", ECE is calculated by grouping all instances into " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": " interval bins (such that " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "m \\ll N" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": "), each of size " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "\\frac{1}{m}" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": " (the confidence interval of bin " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "B_j" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "\\left(\\frac{j-1}{m}, \\frac{j}{m}\\right]" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": "). With " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "\\mathrm{acc}(B_j)" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": " being the mean accuracy in bin " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "B_j" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "inline_equation", + "content": "\\mathrm{conf}(B_j)" + }, + { + "bbox": [ + 104, + 613, + 504, + 660 + ], + "type": "text", + "content": " being its mean confidence, ECE is defined as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 184, + 667, + 424, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 667, + 424, + 735 + ], + "spans": [ + { + "bbox": [ + 184, + 667, + 424, + 735 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} E C E = \\sum_ {j = 1} ^ {m} \\frac {| B _ {j} |}{N} \\sum_ {i \\in B _ {j}} \\left| \\frac {\\mathbf {1} [ \\hat {y} _ {f} (x _ {i}) = y _ {i} ]}{| B _ {j} |} - \\frac {\\kappa (x , \\hat {y} _ {f} (x _ {i}) | f)}{| B _ {j} |} \\right| \\\\ = \\sum_ {j = 1} ^ {m} \\frac {\\left| B _ {j} \\right|}{N} \\left| \\operatorname {a c c} \\left(B _ {j}\\right) - \\operatorname {c o n f} \\left(B _ {j}\\right) \\right| \\\\ \\end{array}", + "image_path": "3999336d5b00d81d473091d8fb8314cab38dbe5433b20b55754a3d6c809702a6.jpg" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": "Since ECE is widely accepted we use it here to evaluate calibration, and follow (Guo et al., 2017) in setting the number of bins to " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "m = 15" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": ". Many alternatives to ECE exist, allowing an adaptive binning scheme, evaluating the calibration on the non-chosen labels as well, and other various methods (Nixon et al., 2019; Vaicenavicius et al., 2019; Zhao et al., 2020). Relevant to our objective is that by using binning, this metric is not affected by the overall accuracy as is the Brier score (mentioned in Section 1), for example." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 164, + 261, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 164, + 261, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 261, + 176 + ], + "type": "text", + "content": "3 PERFORMANCE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 156, + 190, + 455, + 312 + ], + "blocks": [ + { + "bbox": [ + 156, + 190, + 455, + 312 + ], + "lines": [ + { + "bbox": [ + 156, + 190, + 455, + 312 + ], + "spans": [ + { + "bbox": [ + 156, + 190, + 455, + 312 + ], + "type": "image", + "image_path": "e16ae8fc794f6b8b3e22af3848104c33e5b6c16d8f8eeae744aa372c7997e6e9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 164, + 327, + 446, + 441 + ], + "blocks": [ + { + "bbox": [ + 299, + 317, + 311, + 327 + ], + "lines": [ + { + "bbox": [ + 299, + 317, + 311, + 327 + ], + "spans": [ + { + "bbox": [ + 299, + 317, + 311, + 327 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 164, + 327, + 446, + 441 + ], + "lines": [ + { + "bbox": [ + 164, + 327, + 446, + 441 + ], + "spans": [ + { + "bbox": [ + 164, + 327, + 446, + 441 + ], + "type": "image", + "image_path": "1fcc5c597d87ea2c03dd757c10dd6ecb06163e6450ec487a274a811e9adb6b18.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 445, + 311, + 454 + ], + "lines": [ + { + "bbox": [ + 299, + 445, + 311, + 454 + ], + "spans": [ + { + "bbox": [ + 299, + 445, + 311, + 454 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "type": "text", + "content": "Figure 4: A comparison of different methods and their improvement in terms of (a) AUROC and (b) ECE, relative to the same model's performance without employing the method. Markers above the x-axis represent models that benefited from the evaluated method, and vice versa. The numbers in the legend to the right of each method indicate the number of pairs compared. Temperature scaling can sometimes harm ECE, even though its purpose is to improve it." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 533, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 504, + 567 + ], + "type": "text", + "content": "In this section we study the performance of 523 different models (available in timm 0.4.12 and torchvision 0.10). Note that all figures from our analysis are available as interactive plotly plots in the supplementary material, which provides information about every data point." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 571, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 506, + 704 + ], + "type": "text", + "content": "1) Among the training regimes evaluated, knowledge distillation improves performance the most. We evaluated several training regimes: (a) Training that involves KD in any form, including Touvron et al. (2021b), knapsack pruning with distillation (in which the teacher is the original unpruned model) (Aflalo et al., 2020) and a pretraining technique that employs distillation (Ridnik et al., 2021); (b) adversarial training (Xie et al., 2020a; Tramère et al., 2018); (c) pretraining on ImageNet21k (\"pure\", with no additions) (Tan & Le, 2021; Touvron et al., 2021a; 2022); and (d) various forms of weakly or semi-supervised learning (Mahajan et al., 2018; Yalniz et al., 2019; Xie et al., 2020b). To make a fair comparison, we only compare pairs of models such that both models have identical architectures and training regimes, with the exception of the method itself being evaluated (e.g., training with or without knowledge distillation). More information about each data point of comparison is available in the supplementary material. Note that the samples are of various sizes due to the different number of potential models available for each." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Of the methods mentioned above, training methods incorporating distillation improve AUROC and ECE the most. For example, looking at Figure 4a, it is evident that distillation (purple box) almost" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 165, + 79, + 445, + 233 + ], + "blocks": [ + { + "bbox": [ + 165, + 79, + 445, + 233 + ], + "lines": [ + { + "bbox": [ + 165, + 79, + 445, + 233 + ], + "spans": [ + { + "bbox": [ + 165, + 79, + 445, + 233 + ], + "type": "image", + "image_path": "e134eba07fd3104387ff4199406165c294cd546e1215ccc3c7efaa9c54c30250.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 240, + 504, + 275 + ], + "lines": [ + { + "bbox": [ + 104, + 240, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 504, + 275 + ], + "type": "text", + "content": "Figure 5: Comparing teacher models (yellow markers) to their KD students (represented by markers with thick borders and a dot). The performance of each model is measured in AUROC (higher is better) and -log(ECE) (higher is better)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 297, + 506, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 506, + 398 + ], + "type": "text", + "content": "always improves AUROC, and moreover, its median improvement is the best of all techniques evaluated. The same observation can be made with regards to improving ECE; see Figure 4b. Distillation seems to greatly improve both metrics even when the teacher itself is much worse at both metrics. Figure 5 nicely shows this by comparing the teacher architecture and the students in each case. Additionally, in a pruning scenario that included distillation in which the original model was also the teacher (Aflalo et al., 2020), the pruned models outperformed their teachers. The fact that KD improves the model over its original form is surprising, and suggests that the distillation process itself helps uncertainty estimation. In Galil et al. (2023) we find that KD also improves C-OOD detection performance, measured by AUROC. We discuss these effects in greater detail in Appendix G." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 402, + 506, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 506, + 523 + ], + "type": "text", + "content": "2) Temperature scaling greatly benefits AUROC and selective prediction. Evaluations of the simple post-training calibration method of temperature scaling (TS) (Guo et al., 2017), which is widely known to improve ECE without changing the model's accuracy, also revealed several interesting facts: (a) TS consistently and greatly improves AUROC and selective performance (see Figure 4a)—meaning not only does TS calibrate the probabilistic estimation for each individual instance, but it also improves the partial order of all instances induced by those improved estimations. While TS is well known and used for calibration, to the best of our knowledge, its benefits for selective prediction were previously unknown. (b) While TS is usually beneficial, it could harm some models (see Figures 4a and 4b). While it is surprising that TS—a calibration method—would harm ECE, this phenomenon is explained by the fact that TS optimizes NLL and not ECE (to avoid trivial solutions), and the two may sometimes misalign. (c) Models that benefit from TS in terms" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 195, + 535, + 416, + 654 + ], + "blocks": [ + { + "bbox": [ + 195, + 535, + 416, + 654 + ], + "lines": [ + { + "bbox": [ + 195, + 535, + 416, + 654 + ], + "spans": [ + { + "bbox": [ + 195, + 535, + 416, + 654 + ], + "type": "image", + "image_path": "442e9c2100ba0d0ccdbb6ae60acbe3de55ee92187c02b65bf18b506a5f49b3ae.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 661, + 504, + 696 + ], + "lines": [ + { + "bbox": [ + 104, + 661, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 504, + 696 + ], + "type": "text", + "content": "Figure 6: Out of 523 models evaluated, models that were assigned a temperature higher than 1 by the calibration process tended to degrade in AUROC performance rather than improve. Markers above the x-axis represent models that benefited from TS, and vice versa." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "of AUROC tend to have been assigned a temperature smaller than 1 by the calibration process (see Figure 6). This, however, does not hold true for ECE (see Figure 14 in Appendix H). This example" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": "also emphasizes the fact that improvements in terms of AUROC do not necessarily translate into improvements in ECE, and vice versa. (d) While all models usually improve with TS, the overall ranking of uncertainty performance between families tends to stay similar, with the worse (in terms of ECE and AUROC) models closing most of the gap between them and the mediocre ones (see Figure 13 in Appendix H)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "type": "text", + "content": "3) A subset of ViTs outperforms all other architectures in selective prediction, ranking and calibration, both in absolute terms and per-model size. Several training regimes (including the original regime from the paper introducing ViT) Dosovitskiy et al. (2021); Steiner et al. (2022); Chen et al. (2022); Ridnik et al. (2021) result in ViTs that outperform all other architectures and training regimes in terms of AUROC and ECE (see Figure 1; Figure 13 in Appendix H shows this is true even after using TS) as well as for the SAC of " + }, + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "type": "text", + "content": " we explored (see Figure 7 and Appendix I). These ViTs also outperform all other models in terms of C-OOD detection performance (Galil et al., 2023). Moreover, for any size, ViT models outperform their competition in all of these metrics (see Figures 9 and 10 in Appendix B and Figure 15 in Appendix I)." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 178, + 251, + 433, + 411 + ], + "blocks": [ + { + "bbox": [ + 178, + 251, + 433, + 411 + ], + "lines": [ + { + "bbox": [ + 178, + 251, + 433, + 411 + ], + "spans": [ + { + "bbox": [ + 178, + 251, + 433, + 411 + ], + "type": "image", + "image_path": "37fbae763d641ce784deabef302abec247b021eabe92d823ac12a20aa1e46db4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "type": "text", + "content": "Figure 7: Comparison of models by their overall accuracy and the coverage they are able to provide given a selective accuracy constraint of Top-1 " + }, + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "type": "text", + "content": " on ImageNet. A higher coverage is better. Only ViT models are able to provide coverage beyond " + }, + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 418, + 504, + 475 + ], + "type": "text", + "content": " for this constraint. They provide more coverage than any other model compared to their accuracy or size. \"Various\" refers to all other models (out of the 523) that were not mentioned by name." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 491, + 506, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 506, + 657 + ], + "type": "text", + "content": "Further research into other training regimes, however, reveals that not all training regimes result in superb performance (Touvron et al., 2021b; 2022; Singh et al., 2022; Paszke et al., 2019) (these ViTs are dubbed \"ViT* in the figures), even when a similar amount of data is introduced into the training and strong augmentations are used. In fact, the models trained by Chen et al. (2022) were not pretrained at all and yet reach superb ranking. Even the largest model introduced by Tran et al. (2022), which is a large modified ViT that was pretrained on JFT-4B (a dataset containing 4 billion images) with the aim of excelling in uncertainty estimation (and other areas), is outperformed by the best ViT we evaluated: Plex L achieves an AUROC of 87.7 (while its smaller versions, Plex M and Plex S, achieve an AUROC of 87.4 and 86.7, respectively), compared to 88.5 achieved by ViT-L/16-384 that has less parameters and was pretrained on ImageNet-21k. In total, 18 ViTs trained on ImageNet-21k outperform² Plex L, among which are two variations of small ViTs (each with 36 or 22 million parameters). In Appendix J we analyze the different hyperparameters and augmentations used for training the ViT models evaluated in this paper. Unfortunately, no clear conclusions emerge to explain the advantage of the successful training regimes. There is, however, ample evidence to show that advanced augmentations are unlikely to be part of such an explanation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 662, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 504, + 685 + ], + "type": "text", + "content": "The above facts suggest that the excellent performance exhibited by some ViTs cannot be attributed to the amount of data or to the augmentations used during training. These observations warrant" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 690, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 690, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 690, + 504, + 733 + ], + "type": "text", + "content": "2The authors had not released clear results for Plex ECE performance on ImageNet, making a comparison of calibration difficult. The authors mentioned that the average ECE of Plex L in CIFAR-10, CIFAR-100 and ImageNet is slightly below 0.01. Our evaluations revealed six ViTs that achieved the same results, with the most calibrated model being ViT-T/16 with an ECE of 0.0054 on ImageNet." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "additional research with the hope of either training more robust ViTs or transferring the unidentified ingredient of the successful subset of ViTs into other models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 264 + ], + "type": "text", + "content": "4) Correlations between AUROC, ECE, accuracy and the model's size could either be positive or negative, and depend on the family of architectures evaluated. This observation contradicts previous smaller scale studies on calibration. While AUROC and ECE are (negatively) correlated (they have a Spearman correlation of -0.44, meaning that generally, as AUROC improves, so does ECE), their agreement on the best performing model depends greatly on the architectural family in question. For example, the Spearman correlation between the two metrics evaluated on 28 undistilled XCiTs is 0.76 (meaning ECE deteriorates as AUROC improves), while for the 33 ResNets (He et al., 2016) evaluated, the correlation is -0.74. Another general observation is that contrary to previous work by (Guo et al., 2017) concerning ECE, the correlations between ECE and the accuracy or the number of model parameters are nearly zero, although each family tends to have a strong correlation, either negative or positive. We include a family-based comparison in Appendix L for correlations between AUROC/ECE and accuracy, number of parameters and input size. These results suggest that while some architectures might utilize extra resources to achieve improved uncertainty estimation capabilities, other architectures do not and are even harmed in this respect." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "type": "text", + "content": "5) The zero-shot language-vision CLIP model is well-calibrated, with its best instance outperforming " + }, + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "type": "inline_equation", + "content": "96\\%" + }, + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "type": "text", + "content": " of all other models. CLIP (Radford et al., 2021) enables zero-shot classification and demonstrates impressive performance. We find it is also inclined to be well-calibrated. See Appendix K for details about how we use CLIP. The most calibrated CLIP is based on ViT-B/32 with a linear-probe added to it, and obtains an ECE of " + }, + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "type": "inline_equation", + "content": "1.3\\%" + }, + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "type": "text", + "content": ", which outperforms " + }, + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "type": "inline_equation", + "content": "96\\%" + }, + { + "bbox": [ + 104, + 270, + 506, + 392 + ], + "type": "text", + "content": " of models evaluated. Moreover, for their size category, CLIP models tend to outperform their competition in calibration, with the exception of ViTs (see Figure 10 in Appendix B). While this trend is clear for zero-shot CLIPs, we note that some models' calibration performance deteriorates with the addition of a linear-probe. Further research is required to understand the ingredients of multimodal models' contribution to calibration, and to find ways to utilize them to get better calibrated models. For example, could a multimodal pretraining regime be used to get better calibrated models?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 396, + 506, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 506, + 485 + ], + "type": "text", + "content": "6) MC dropout does not improve selective performance, in accordance with previous works. We evaluate the performance of MC dropout using predictive entropy as its confidence score and 30 dropout-enabled forward passes. We do not measure its effects on ECE since entropy scores do not reside in [0, 1]. Using MC dropout causes a consistent drop in both AUROC and selective performance compared with using the same models with softmax as the " + }, + { + "bbox": [ + 104, + 396, + 506, + 485 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 396, + 506, + 485 + ], + "type": "text", + "content": " (see Appendix M and Figure 4a). MC dropout's underperformance was also previously observed in (Geifman & El-Yaniv, 2017). We note, however, that evaluations we have conducted in Galil et al. (2023) show that MC dropout performs well when dealing with C-OOD data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 502, + 253, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 253, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 253, + 514 + ], + "type": "text", + "content": "4 CONCLUDING REMARKS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 528, + 506, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 606 + ], + "type": "text", + "content": "We presented a comprehensive study of the effectiveness of numerous DNN architectures (families) in providing reliable uncertainty estimation, including the impact of various techniques on improving such capabilities. Our study led to many new insights and perhaps the most important ones are: (1) architectures trained with distillation almost always improve their uncertainty estimation performance, (2) temperature scaling is very useful not only for calibration but also for ranking and selective prediction, and (3) no DNN (evaluated in this study) demonstrated an uncertainty estimation performance comparable—in any metric tested—to a subset of ViT models (see Section 3)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": "Our work leaves open many interesting avenues for future research and we would like to mention a few. Perhaps the most interesting question is why distillation is so beneficial in boosting uncertainty estimation. Next, is there an architectural secret in vision transformers (ViT) that enables their uncertainty estimation supremacy under certain training regimes? This issue is especially puzzling given the fact that comparable performance is not observed in many other supposedly similar transformer-based models that we tested. If the secret is not in the architecture, what is the mysterious ingredient of the subset of training regimes that produces such superb results, and how can it be used to train other models? Finally, can we create specialized training regimes (e.g., Geifman & El-Yaniv (2019)), specialized augmentations, special pretraining regimes (such as CLIP's multimodal training regime) or even specialized neural architecture search (NAS) strategies that can promote superior uncertainty estimation performance?" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 218, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 218, + 93 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 218, + 93 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 505, + 156 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 105, + 105, + 466, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 466, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 466, + 118 + ], + "type": "text", + "content": "This research was partially supported by the Israel Science Foundation, grant No. 710/18." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "type": "text", + "content": "We thank Prof. Rakefet Ackerman for her help with understanding how uncertainty estimation performance is evaluated for humans in the field of metacognition, and for her useful comments for Appendix E." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 106, + 171, + 175, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 171, + 175, + 182 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 175, + 182 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 189, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 106, + 189, + 505, + 234 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 189, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 106, + 189, + 505, + 234 + ], + "type": "text", + "content": "Rakefet Ackerman, Avi Parush, Fareda Nassar, and Avraham Shtub. Metacognition and system usability: Incorporating metacognitive research paradigm into usability testing. Computers in Human Behavior, 54:101-113, January 2016. doi: 10.1016/j.chb.2015.07.041. URL https://doi.org/10.1016/j.chb.2015.07.041." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 239, + 505, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 239, + 505, + 283 + ], + "spans": [ + { + "bbox": [ + 106, + 239, + 505, + 283 + ], + "type": "text", + "content": "Rakefet Ackerman, Avigdor Gal, Tomer Sagi, and Roee Shraga. A cognitive model of human bias in matching. In PRICAI 2019: Trends in Artificial Intelligence, pp. 632-646. Springer International Publishing, 2019. doi: 10.1007/978-3-030-29908-8_50. URL https://doi.org/10.1007/978-3-030-29908-8_50." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 289, + 504, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 289, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 107, + 289, + 504, + 312 + ], + "type": "text", + "content": "Yonathan Aflalo, Asaf Noy, Ming Lin, Itamar Friedman, and Lihi Zelnik-Manor. Knapsack pruning with inner distillation. CoRR, abs/2002.08258, 2020. URL https://arxiv.org/abs/2002.08258." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 318, + 506, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 318, + 506, + 396 + ], + "spans": [ + { + "bbox": [ + 106, + 318, + 506, + 396 + ], + "type": "text", + "content": "Alaaeldin Ali, Hugo Touvron, Mathilde Caron, Piotr Bojanowski, Matthijs Douze, Armand Joulin, Ivan Laptev, Natalia Neverova, Gabriel Synnaeve, Jakob Verbeek, and Hervé Jégou. Xcit: Cross-covariance image transformers. In Marc' Aurelio Ranzato, Alina Beygelzimer, Yann N. Dauphin, Percy Liang, and Jennifer Wortman Vaughan (eds.), Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pp. 20014-20027, 2021. URL https://proceedings.neurips.cc/paper/2021/bitize/a655fbe4b8d7439994aa37ddad80de56-AAbstract.html." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 401, + 506, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 401, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 107, + 401, + 506, + 445 + ], + "type": "text", + "content": "Alexandra Basile, Maggie E. Toplak, and Brendan F. Andrade. Using metacognitive methods to examine emotion recognition in children with ADHD. Journal of Attention Disorders, 25(2): 245-257, November 2018. doi: 10.1177/1087054718808602. URL https://doi.org/10.1177/1087054718808602." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 452, + 504, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 452, + 504, + 474 + ], + "spans": [ + { + "bbox": [ + 107, + 452, + 504, + 474 + ], + "type": "text", + "content": "Glenn W. Brier. Verification of Forecasts Expressed in Terms of Probability. Monthly Weather Review, 78(1):1, January 1950. doi: 10.1175/1520-0493(1950)0780001:VOFEIT2.0.CO;2." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 480, + 506, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 480, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 107, + 480, + 506, + 525 + ], + "type": "text", + "content": "Xiangning Chen, Cho-Jui Hsieh, and Boqing Gong. When vision transformers outperform resnets without pre-training or strong data augmentations. In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net, 2022. URL https://openreview.net/forum?id=LtKcMgGOeLt." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 530, + 504, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 530, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 107, + 530, + 504, + 552 + ], + "type": "text", + "content": "C. K. Chow. An optimum character recognition system using decision functions. IRE Transactions on Electronic Computers, EC-6(4):247-254, 1957. doi: 10.1109/TEC.1957.5222035." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 558, + 506, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 558, + 506, + 591 + ], + "spans": [ + { + "bbox": [ + 107, + 558, + 506, + 591 + ], + "type": "text", + "content": "L. P. Cordella, C. De Stefano, F. Tortorella, and M. Vento. A method for improving classification reliability of multilayer perceptrons. IEEE Transactions on Neural Networks, 6(5):1140-1147, 1995. doi: 10.1109/72.410358." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 597, + 506, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 597, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 107, + 597, + 506, + 631 + ], + "type": "text", + "content": "C. De Stefano, C. Sansone, and M. Vento. To reject or not to reject: that is the question-an answer in case of neural classifiers. IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews), 30(1):84-94, 2000. doi: 10.1109/5326.827457." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 637, + 506, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 637, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 107, + 637, + 506, + 670 + ], + "type": "text", + "content": "Yukun Ding, Jinglan Liu, Jinjun Xiong, and Yiyu Shi. Evaluation of neural network uncertainty estimation with application to resource-constrained platforms. CoRR, abs/1903.02050, 2019. URL http://arxiv.org/abs/1903.02050." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 677, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 677, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 677, + 506, + 732 + ], + "type": "text", + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=YicbFdNTTy." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 733 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 507, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 507, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 507, + 106 + ], + "type": "text", + "content": "Ran El-Yaniv and Yair Wiener. On the foundations of noise-free selective classification. Journal of Machine Learning Research, 11(5), 2010." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 507, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 507, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 507, + 146 + ], + "type": "text", + "content": "Tom Fawcett. An introduction to roc analysis. Pattern Recognition Letters, 27(8):861-874, 2006. ISSN 0167-8655. doi: https://doi.org/10.1016/j.patrec.2005.10.010. URL https://www.sciencedirect.com/science/article/pii/S016786550500303X. ROC Analysis in Pattern Recognition." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 150, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 150, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 506, + 176 + ], + "type": "text", + "content": "K. Fiedler, Rakefet Ackerman, and Chiara Scarampi. ! metacognition : Monitoring and controlling one ’ s own knowledge , reasoning and decisions. 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 178, + 507, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 507, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 507, + 225 + ], + "type": "text", + "content": "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=6Tm1mposlrM." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 230, + 506, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 230, + 506, + 256 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 506, + 256 + ], + "type": "text", + "content": "Jonathan Frankle and Michael Carbin. The lottery ticket hypothesis: Training pruned neural networks. CoRR, abs/1803.03635, 2018. URL http://arxiv.org/abs/1803.03635." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 259, + 507, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 259, + 507, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 507, + 317 + ], + "type": "text", + "content": "Yarin Gal and Zoubin Ghahramani. Dropout as a bayesian approximation: Representing model uncertainty in deep learning. In Maria-Florina Balcan and Kilian Q. Weinberger (eds.), Proceedings of the 33nd International Conference on Machine Learning, ICML 2016, New York City, NY, USA, June 19-24, 2016, volume 48 of JMLR Workshop and Conference Proceedings, pp. 1050-1059. JMLR.org, 2016. URL http://proceedings.mlr.press/v48/gal16.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 321, + 507, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 321, + 507, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 507, + 357 + ], + "type": "text", + "content": "Ido Galil, Mohammed Dabbah, and Ran El-Yaniv. A framework for benchmarking class-out-of-distribution detection and its application to imagenet. In International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=Iuubb9W6Jtk." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 361, + 507, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 507, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 507, + 430 + ], + "type": "text", + "content": "Yonatan Geifman and Ran El-Yaniv. Selective classification for deep neural networks. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 4878-4887, 2017. URL https://proceedings.neurips.cc/paper/2017/hash/4a8423d5e91fda00bb7e46540e2b0cf1-Abstract.html." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 434, + 505, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 434, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 505, + 460 + ], + "type": "text", + "content": "Yonatan Geifman and Ran El-Yaniv. Selectivenet: A deep neural network with an integrated reject option. CoRR, abs/1901.09192, 2019. URL http://arxiv.org/abs/1901.09192." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 464, + 504, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 504, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 504, + 488 + ], + "type": "text", + "content": "Yonatan Geifman, Guy Uziel, and Ran El-Yaniv. Bias-reduced uncertainty estimation for deep neural classifiers. In International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 493, + 507, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 507, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 507, + 529 + ], + "type": "text", + "content": "Leo A. Goodman and William H. Kruskal. Measures of association for cross classifications. Journal of the American Statistical Association, 49(268):732-764, December 1954. doi: 10.1080/01621459.1954.10501231. URL https://doi.org/10.1080/01621459.1954.10501231." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 533, + 507, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 507, + 579 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 507, + 579 + ], + "type": "text", + "content": "Thomas D. Griffin, Jennifer Wiley, and Keith W. Thiede. The effects of comprehension-test expectancies on metacomprehension accuracy. Journal of Experimental Psychology: Learning, Memory, and Cognition, 45(6):1066-1092, June 2019. doi: 10.1037/xlm0000634. URL https://doi.org/10.1037/xlm0000634." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 584, + 507, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 507, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 507, + 641 + ], + "type": "text", + "content": "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 1321-1330. PMLR, 2017. URL http://proceedings.mlr.press/v70/guo17a.html." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 647, + 507, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 507, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 507, + 693 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 770-778. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.90. URL https://doi.org/10.1109/CVPR.2016.90." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 697, + 507, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 507, + 733 + ], + "type": "text", + "content": "Philip A. Higham and D. Paul Higham. New improved gamma: Enhancing the accuracy of goodman-kruskal's gamma using ROC curves. Behavior Research Methods, 51(1):108-125, September 2018. doi: 10.3758/s13428-018-1125-5. URL https://doi.org/10.3758/s13428-018-1125-5." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 81, + 504, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 504, + 95 + ], + "type": "text", + "content": "Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 102, + 505, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 102, + 505, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 505, + 138 + ], + "type": "text", + "content": "Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, and Hartwig Adam. Searching for mobilenetv3. CoRR, abs/1905.02244, 2019. URL http://arxiv.org/abs/1905.02244." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 146, + 507, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 146, + 507, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 146, + 507, + 214 + ], + "type": "text", + "content": "Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6402-6413, 2017. URL https://proceedings.neurips.cc/paper/2017混沌/9ef2ed4b7fd2c810847ffa5fa85bce38-AAbstract.html." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 222, + 505, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 222, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 505, + 246 + ], + "type": "text", + "content": "Ming Lin, Hesen Chen, Xiuyu Sun, Qi Qian, Hao Li, and Rong Jin. Neural architecture design forgpu-efficient networks, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 255, + 506, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 506, + 277 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 286, + 507, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 286, + 507, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 507, + 364 + ], + "type": "text", + "content": "Wesley J. Maddox, Pavel Izmailov, Timur Garipov, Dmitry P. Vetrov, and Andrew Gordon Wilson. A simple baseline for bayesian uncertainty in deep learning. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 13132-13143, 2019. URL https://proceedings.neurips.cc/paper/2019/bitnet/118921efba23fc329e6560b27861f0c2-Abstract.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 373, + 507, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 507, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 507, + 451 + ], + "type": "text", + "content": "Dhruv Mahajan, Ross B. Girshick, Vignesh Ramanathan, Kaiming He, Manohar Paluri, Yixuan Li, Ashwin Bharambe, and Laurens van der Maaten. Exploring the limits of weakly supervised pretraining. In Vittorio Ferrari, Martial Hebert, Cristian Sminchisescu, and Yair Weiss (eds.), Computer Vision - ECCV 2018 - 15th European Conference, Munich, Germany, September 8-14, 2018, Proceedings, Part II, volume 11206 of Lecture Notes in Computer Science, pp. 185-201. Springer, 2018. doi: 10.1007/978-3-030-01216-8\\_.12. URL https://doi.org/10.1007/978-3-030-01216-8_12." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 460, + 505, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 505, + 506 + ], + "type": "text", + "content": "Jooyoung Moon, Jihyo Kim, Younghak Shin, and Sangheum Hwang. Confidence-aware learning for deep neural networks. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 7034-7044. PMLR, 2020. URL http://proceedings.mlr.press/v119/moon20a.html." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 514, + 507, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 507, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 507, + 582 + ], + "type": "text", + "content": "Zachary Nado, Neil Band, Mark Collier, Josip Djolonga, Michael W. Dusenberry, Sebastian Farquhar, Angelos Filos, Marton Havasi, Rodolphe Jenatton, Ghassen Jerfel, Jeremiah Liu, Zelda Mariet, Jeremy Nixon, Shreyas Padhy, Jie Ren, Tim G. J. Rudner, Yeming Wen, Florian Wenzel, Kevin Murphy, D. Sculley, Balaji Lakshminarayanan, Jasper Snoek, Yarin Gal, and Dustin Tran. Uncertainty baselines: Benchmarks for uncertainty & robustness in deep learning. CoRR, abs/2106.04015, 2021. URL https://arxiv.org/abs/2106.04015." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 590, + 505, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 590, + 505, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 590, + 505, + 624 + ], + "type": "text", + "content": "Mahdi Pakdaman Naeini, Gregory F. Cooper, and Milos Hauskrecht. Obtaining well calibrated probabilities using bayesian binning. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, AAAI'15, pp. 2901-2907. AAAI Press, 2015. ISBN 0262511290." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 633, + 507, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 633, + 507, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 507, + 689 + ], + "type": "text", + "content": "Niv Nayman, Yonathan Aflalo, Asaf Noy, and Lihi Zelnik. *Hard constrained differentiable neural architecture search*. In Marina Meila and Tong Zhang (eds.), *Proceedings of the 38th International Conference on Machine Learning*, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of *Proceedings of Machine Learning Research*, pp. 7979-7990. PMLR, 2021. URL http://proceedings.mlr.press/v139/nayman21a.html." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "text", + "content": "Thomas O. Nelson. A comparison of current measures of the accuracy of feeling-of-knowing predictions. *Psychological Bulletin*, 95(1):109-133, 1984. doi: 10.1037/0033-2909.95.1.109. URL https://doi.org/10.1037/0033-2909.95.1.109." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 149 + ], + "type": "text", + "content": "Jeremy Nixon, Michael W. Dusenberry, Linchuan Zhang, Ghassen Jerfel, and Dustin Tran. Measuring calibration in deep learning. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, CVPR Workshops 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 38-41. Computer Vision Foundation / IEEE, 2019. URL http://openaccess.thecvf.com/content_CVPRW_2019/html/Uncertainty_and_Robustness_in_Dep_Visual_Learning/Nixon_Measuring_Calibration_in_Dep_Learning_CVPRW_2019_paper.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 158, + 506, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 158, + 506, + 247 + ], + "spans": [ + { + "bbox": [ + 107, + 158, + 506, + 247 + ], + "type": "text", + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems 32, pp. 8024-8035. Curran Associates, Inc., 2019. URL http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 255, + 506, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 255, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 107, + 255, + 506, + 322 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8748-8763. PMLR, 2021. URL http://proceedings.mlr.press/v139/radford21a.html." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 331, + 506, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 331, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 107, + 331, + 506, + 388 + ], + "type": "text", + "content": "Tal Ridnik, Emanuel Ben Baruch, Asaf Noy, and Lihi Zelnik. Imagenet-21k pretraining for the masses. In Joaquin Vanschoren and Sai-Kit Yeung (eds.), Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual, 2021. URL https://datasets-benchmarks-proceedings.neurips.cc/paper/2021混沌/98f13708210194c475687be6106a3b84-AAbstract-round1.html." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 396, + 506, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 396, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 107, + 396, + 506, + 463 + ], + "type": "text", + "content": "Mark Sandler, Andrew G. Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh Chen. *Mobilenetv2: Inverted residuals and linear bottlenecks*. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pp. 4510-4520. Computer Vision Foundation / IEEE Computer Society, 2018. doi: 10.1109/CVPR.2018.00474. URL http://openaccess.thecvf.com/content_cvpr_2018/html/Sandler_MobileNetV2_Inverted_Residuals_CVPR_2018_paper.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 472, + 506, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 472, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 107, + 472, + 506, + 517 + ], + "type": "text", + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Yoshua Bengio and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. URL http://arxiv.org/abs/1409.1556." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 525, + 506, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 525, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 107, + 525, + 506, + 571 + ], + "type": "text", + "content": "Mannat Singh, Laura Gustafson, Aaron Adcock, Vinicius de Freitas Reis, Bugra Gedik, Raj Prateek Kosaraju, Dhruv Mahajan, Ross B. Girshick, Piotr Dolkar, and Laurens van der Maaten. Revisiting weakly supervised pre-training of visual perception models. CoRR, abs/2201.08371, 2022. URL https://arxiv.org/abs/2201.08371." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 579, + 506, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 579, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 107, + 579, + 506, + 624 + ], + "type": "text", + "content": "Andreas Peter Steiner, Alexander Kolesnikov, Xiaohua Zhai, Ross Wightman, Jakob Uszkoreit, and Lucas Beyer. How to train your vit? data, augmentation, and regularization in vision transformers. Transactions on Machine Learning Research, 2022. URL https://openreview.net/forum?id=4nPswr1KcP." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 633, + 506, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 633, + 506, + 678 + ], + "spans": [ + { + "bbox": [ + 107, + 633, + 506, + 678 + ], + "type": "text", + "content": "Mingxing Tan and Quoc V. Le. Efficientnetv2: Smaller models and faster training. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10096-10106. PMLR, 2021. URL http://proceedings.mlr.press/v139/tan21a.html." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 687, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 687, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 687, + 506, + 732 + ], + "type": "text", + "content": "Hugo Touvron, Piotr Bojanowski, Mathilde Caron, Matthieu Cord, Alaaeldin El-Nouby, Edouard Grave, Armand Joulin, Gabriel Synnaeve, Jakob Verbeek, and Hervé Jégou. Resmlp: Feedforward networks for image classification with data-efficient training. CoRR, abs/2105.03404, 2021a. URL https://arxiv.org/abs/2105.03404." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 727 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 149 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 149 + ], + "type": "text", + "content": "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10347-10357. PMLR, 2021b. URL http://proceedings.mlr.press/v139/touvron21a.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 157, + 507, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 157, + 507, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 157, + 507, + 182 + ], + "type": "text", + "content": "Hugo Touvron, Matthieu Cord, and Herve Jégou. Deit III: revenge of the vit. CoRR, abs/2204.07118, 2022. doi: 10.48550/arXiv.2204.07118. URL https://doi.org/10.48550/arXiv.2204.07118." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 188, + 507, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 188, + 507, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 188, + 507, + 224 + ], + "type": "text", + "content": "Florian Tramèr, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble adversarial training: Attacks and defenses. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=rkZvSe-RZ." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 231, + 507, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 507, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 507, + 309 + ], + "type": "text", + "content": "Dustin Tran, Jeremiah Liu, Michael W. Dusenberry, Du Phan, Mark Collier, Jie Ren, Kehang Han, Zi Wang, Zelda Mariet, Huiyi Hu, Neil Band, Tim G. J. Rudner, Karan Singhal, Zachary Nado, Joost van Amersfoort, Andreas Kirsch, Rodolphe Jenatton, Nithum Thain, Honglin Yuan, Kelly Buchanan, Kevin Murphy, D. Sculley, Yarin Gal, Zoubin Ghahramani, Jasper Snoek, and Balaji Lakshminarayanan. Plex: Towards reliability using pretrained large model extensions. CoRR, abs/2207.07411, 2022. doi: 10.48550/arXiv.2207.07411. URL https://doi.org/10.48550/arXiv.2207.07411." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 317, + 507, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 317, + 507, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 507, + 352 + ], + "type": "text", + "content": "Monika Undorf and Arndt Broder. Cue integration in metamemory judgements is strategic. Quarterly Journal of Experimental Psychology, 73(4):629-642, October 2019. doi: 10.1177/1747021819882308. URL https://doi.org/10.1177/1747021819882308." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 360, + 507, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 360, + 507, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 507, + 427 + ], + "type": "text", + "content": "Juozas Vaicenavicius, David Widmann, Carl R. Andersson, Fredrik Lindsten, Jacob Roll, and Thomas B. Schön. Evaluating model calibration in classification. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), The 22nd International Conference on Artificial Intelligence and Statistics, AISTATS 2019, 16-18 April 2019, Naha, Okinawa, Japan, volume 89 of Proceedings of Machine Learning Research, pp. 3459-3467. PMLR, 2019. URL http://proceedings.mlr.press/v89/vaicenavicius19a.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 435, + 507, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 435, + 507, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 507, + 449 + ], + "type": "text", + "content": "Ross Wightman. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 456, + 507, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 507, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 507, + 524 + ], + "type": "text", + "content": "Cihang Xie, Mingxing Tan, Boqing Gong, Jiang Wang, Alan L. Yuille, and Quoc V. Le. Adversarial examples improve image recognition. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 816-825. Computer Vision Foundation / IEEE, 2020a. doi: 10.1109/CVPR42600.2020.00090. URL https://openaccess.thecvf.com/content_CVPR_2020/html/Xie_Adversarial_Examples_Implore_Photosynthetic_Vision_Example.pdf." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 531, + 507, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 531, + 507, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 507, + 599 + ], + "type": "text", + "content": "Qizhe Xie, Minh-Thang Luong, Eduard H. Hovy, and Quoc V. Le. Self-training with noisy student improves imagenet classification. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 10684-10695. Computer Vision Foundation / IEEE, 2020b. doi: 10.1109/CVPR42600.2020.01070. URL https://openaccess.thecvf.com/content_CVPR_2020/html/Xie_Self-Training_With_Noisy_Student_Approves_Photosynthetic_Vision_Paper.html." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 607, + 507, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 607, + 507, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 607, + 507, + 631 + ], + "type": "text", + "content": "I. Zeki Yalniz, Hervé Jégou, Kan Chen, Manohar Paluri, and Dhruv Mahajan. Billion-scale semi-supervised learning for image classification, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 638, + 507, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 638, + 507, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 507, + 673 + ], + "type": "text", + "content": "Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2403-2412, 2018. doi: 10.1109/CVPR.2018.00255." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 681, + 507, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 507, + 727 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 507, + 727 + ], + "type": "text", + "content": "Shengjia Zhao, Tengyu Ma, and Stefano Ermon. Individual calibration with randomized forecasting. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 11387-11397. PMLR, 2020. URL http://proceedings.mlr.press/v119/zhao20e.html." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 274, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 274, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 274, + 94 + ], + "type": "text", + "content": "A THE INVESTMENT EXAMPLE" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 239 + ], + "type": "text", + "content": "Let us consider two classification models for the stock market that predict whether a stock's value is about to increase, decrease or remain neutral (three-class classification). Suppose that Model A has a " + }, + { + "bbox": [ + 104, + 106, + 506, + 239 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 106, + 506, + 239 + ], + "type": "text", + "content": " true accuracy, and generates a confidence score of 0.95 on any prediction (even on misclassified instances); Model B has a " + }, + { + "bbox": [ + 104, + 106, + 506, + 239 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 104, + 106, + 506, + 239 + ], + "type": "text", + "content": " true accuracy, but always gives a confidence score of 0.6 on correct predictions, and 0.4 on incorrect ones. We now try and evaluate these two models using the uncertainty metrics mentioned in Section 1 to see which can reveal Model B's superior uncertainty estimation performance. AURC will fail due to its sensitivity to accuracy (the AURC of Model B is 0.12, more than twice as bad as the AURC for Model A, which is 0.05). NLL will rank Model A four times higher (Model A's NLL is 0.23 and Model B's is 0.93). The Brier score would also much prefer Model A (giving it a score of 0.096 while giving Model B a score of 0.54). Evaluating the models' calibration with ECE will also not reveal Model B's advantages, since it is less calibrated than Model A, which has perfect calibration (Model A has an ECE of 0, and Model B has a worse ECE of 0.4)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "content": "AUROC, on the other hand, would give Model B a perfect score of 1 and a terrible score of 0.5 to Model A. The selective risk for Model B would be better for any coverage of stock predictions below " + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "content": ", and for any SAC above " + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 104, + 243, + 504, + 277 + ], + "type": "text", + "content": " the coverage for Model A would be 0, but 0.4 for Model B." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 282, + 506, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 506, + 338 + ], + "type": "text", + "content": "Those two metrics are not perfect for any example. Let us instead compare two different models for the task of predicting the weather when we cannot abstain from making predictions. Accordingly, being required to provide an accurate probabilistic uncertainty estimation of the model's predictions, AUROC and selective risk would be meaningless (due to the model's inability to abstain in this task), but ECE or the Brier Score would better evaluate the performance the new task requires." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 354, + 394, + 367 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 394, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 394, + 367 + ], + "type": "text", + "content": "B RANKING AND CALIBRATION VISUAL COMPARISON" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 379, + 506, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 379, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 379, + 506, + 479 + ], + "type": "text", + "content": "A comparison of 523 models by their AUROC (" + }, + { + "bbox": [ + 104, + 379, + 506, + 479 + ], + "type": "inline_equation", + "content": "\\times 100" + }, + { + "bbox": [ + 104, + 379, + 506, + 479 + ], + "type": "text", + "content": ", higher is better) and -log(ECE) (higher is better) on ImageNet is visualized in Figure 8. An interactive version of this figure is provided as supplementary material. To compare models fairly by their size, we plot two graphs with the logarithm of the number of parameters as the X-axis, so that models sharing the same x value can be compared solely based on their y value. In Figure 9 we set the X axis to be AUROC (higher is better), and see ViTs outperform any other architecture with a comparable amount of parameters by a large margin. We can also observe that using distillation creates a consistent improvement in AUROC. In Figure 10 we set the X axis to be the negative logarithm of ECE (higher is better) and observe a very similar trend, with ViT outperforming its competition for any model size." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 495, + 459, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 459, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 459, + 520 + ], + "type": "text", + "content": "C DEMONSTRATION OF E-AURC'S DEPENDENCE ON THE MODEL'S ACCURACY" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 533, + 506, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 506, + 612 + ], + "type": "text", + "content": "Excess-AURC (E-AURC) was suggested by Geifman et al. (2018) as an alternative to AURC (explained in Section 2). To calculate E-AURC, two AURC scores need to be calculated: (1) " + }, + { + "bbox": [ + 104, + 533, + 506, + 612 + ], + "type": "inline_equation", + "content": "AURC(model)" + }, + { + "bbox": [ + 104, + 533, + 506, + 612 + ], + "type": "text", + "content": ", the AURC value of the actual model and (2) " + }, + { + "bbox": [ + 104, + 533, + 506, + 612 + ], + "type": "inline_equation", + "content": "AURC(model^{*})" + }, + { + "bbox": [ + 104, + 533, + 506, + 612 + ], + "type": "text", + "content": ", the AURC value of a hypothetical model with identical predicted labels as the first model, but that outputs confidence values that induce a perfect partial order on the instances in terms of their correctness. The latter means that all incorrectly predicted instances are assigned confidence values lower than the correctly predicted instances." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "type": "text", + "content": "E-AURC is then defined as " + }, + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "type": "inline_equation", + "content": "AURC(model) - AURC(model^{*})" + }, + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "type": "text", + "content": ". In essence, this metric acknowledges that given a model's accuracy, the area of " + }, + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "type": "inline_equation", + "content": "AURC(model^{*})" + }, + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "type": "text", + "content": " is always unavoidable no matter how good the partial order is, but anything above that could have been minimized if the " + }, + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 616, + 504, + 672 + ], + "type": "text", + "content": " function was better at ranking, assigning correct instances higher values than incorrect ones and inducing a better partial order over the instances." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "This metric indeed helps to reduce some of the sensitivity to accuracy suffered by AURC, and for the example presented in Section 1, E-AURC would have given a perfect score of 0 to the model inducing a perfect partial order by its confidence values (Model B). It is easy, however, to craft examples showing that E-AURC prefers models with higher accuracy, even if they have lower or equal capacity to rank." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 161, + 111, + 449, + 645 + ], + "blocks": [ + { + "bbox": [ + 161, + 111, + 449, + 645 + ], + "lines": [ + { + "bbox": [ + 161, + 111, + 449, + 645 + ], + "spans": [ + { + "bbox": [ + 161, + 111, + 449, + 645 + ], + "type": "image", + "image_path": "9d9ebd2771f1ac371bbf660f35fae88018b600af2f4c4ec467ea5a18bc2e73a0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 106, + 654, + 504, + 698 + ], + "lines": [ + { + "bbox": [ + 106, + 654, + 504, + 698 + ], + "spans": [ + { + "bbox": [ + 106, + 654, + 504, + 698 + ], + "type": "text", + "content": "Figure 8: A comparison of 523 models by their AUROC (" + }, + { + "bbox": [ + 106, + 654, + 504, + 698 + ], + "type": "inline_equation", + "content": "\\times 100" + }, + { + "bbox": [ + 106, + 654, + 504, + 698 + ], + "type": "text", + "content": ", higher is better) and log(ECE) (lower is better) on ImageNet. Each marker's size is determined by the model's number of parameters. Each dotted marker represents a distilled version of the original. An interactive version of this figure is provided as supplementary material." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 147, + 79, + 467, + 291 + ], + "blocks": [ + { + "bbox": [ + 147, + 79, + 467, + 291 + ], + "lines": [ + { + "bbox": [ + 147, + 79, + 467, + 291 + ], + "spans": [ + { + "bbox": [ + 147, + 79, + 467, + 291 + ], + "type": "image", + "image_path": "3d1c595130db629b8f46da807f5c489fd04b7ff94c241895b7b8b54048b0b313.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 298, + 506, + 323 + ], + "lines": [ + { + "bbox": [ + 104, + 298, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 506, + 323 + ], + "type": "text", + "content": "Figure 9: A comparison of 523 models by their AUROC (" + }, + { + "bbox": [ + 104, + 298, + 506, + 323 + ], + "type": "inline_equation", + "content": "\\times 100" + }, + { + "bbox": [ + 104, + 298, + 506, + 323 + ], + "type": "text", + "content": ", higher is better) and log(number of model's parameters) on ImageNet. Each dotted marker represents a distilled version of the original." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 125, + 333, + 485, + 531 + ], + "blocks": [ + { + "bbox": [ + 125, + 333, + 485, + 531 + ], + "lines": [ + { + "bbox": [ + 125, + 333, + 485, + 531 + ], + "spans": [ + { + "bbox": [ + 125, + 333, + 485, + 531 + ], + "type": "image", + "image_path": "2540814b7d1ea2012307b39fcf8425b3d6ec27ea7291a6317dfa00cd7f9805d4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 537, + 506, + 562 + ], + "lines": [ + { + "bbox": [ + 104, + 537, + 506, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 506, + 562 + ], + "type": "text", + "content": "Figure 10: A comparison of 523 models by their -log(ECE) (higher is better) and log(number of model's parameters) on ImageNet. Each dotted marker represents a distilled version of the original." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "text", + "content": "To demonstrate this in a simple way, let us consider two models with a complete lack of capacity to rank correct and incorrect predictions correctly, always outputting the same confidence score. Model A has an accuracy of " + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "text", + "content": " (thus an error rate of " + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "text", + "content": "), and Model B has an accuracy of " + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "text", + "content": " (and an error rate of " + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "text", + "content": "). A good ranking metric should evaluate them equally (the same way E-AURC gives the same score for two models that rank perfectly regardless of their accuracy). In Figure 11 we plot their RC curves with dashed lines, which are both straight lines due to their lack of ranking ability. We can calculate both of these models' AURCs, " + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "inline_equation", + "content": "AURC(modelA) = 0.8" + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "inline_equation", + "content": "AURC(modelB) = 0.2" + }, + { + "bbox": [ + 104, + 582, + 506, + 662 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "text", + "content": "The next thing to calculate is the best AURC values those models could have achieved given the same accuracy if they had a perfect partial order. We plot these hypothetical models' RC curves in Figure 11 as solid lines. Their selective risk remains 0 for every coverage below their total accuracy, since these hypothetical models assigned the highest confidence to all of their correct instances first. As the coverage increases and they have no more correct instances to select, they begin to give instances that are incorrect, and thus their selective risk deteriorates for higher coverages." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 186, + 80, + 427, + 300 + ], + "blocks": [ + { + "bbox": [ + 186, + 80, + 427, + 300 + ], + "lines": [ + { + "bbox": [ + 186, + 80, + 427, + 300 + ], + "spans": [ + { + "bbox": [ + 186, + 80, + 427, + 300 + ], + "type": "image", + "image_path": "d0fefebcf81f0e5a828b47b663d84eb7f06149d59c176e24af2536dc97cb3db3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 307, + 504, + 331 + ], + "lines": [ + { + "bbox": [ + 104, + 307, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 504, + 331 + ], + "type": "text", + "content": "Figure 11: The RC curves for Models A and B are plotted with dashed lines. The RC curves for the hypothetically optimal versions of Models A and B are plotted with solid lines." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 358, + 504, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 504, + 404 + ], + "type": "text", + "content": "Calculating both of these hypothetical models' AURCs gives us the following: " + }, + { + "bbox": [ + 104, + 358, + 504, + 404 + ], + "type": "inline_equation", + "content": "AURC(modelA^{*}) = 0.482" + }, + { + "bbox": [ + 104, + 358, + 504, + 404 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 358, + 504, + 404 + ], + "type": "inline_equation", + "content": "AURC(modelB^{*}) = 0.022" + }, + { + "bbox": [ + 104, + 358, + 504, + 404 + ], + "type": "text", + "content": ". Subtracting our results we get: E-AURC(modelA) = 0.8 - 0.482 = 0.318, E-AURC(modelB) = 0.2 - 0.022 = 0.178. Hence, E-AURC prefers Model B over Model A, even though both do not discriminate at all between incorrect and correct instances." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 427, + 336, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 427, + 336, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 336, + 440 + ], + "type": "text", + "content": "D MORE ON THE DEFINITION OF RANKING" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "content": "Let us consider a finite set " + }, + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "inline_equation", + "content": "S_{m} = \\{(x_{i},y_{i})\\}_{i = 1}^{m}\\sim P_{X,Y}" + }, + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "content": ". We assume that there are no two identical values given by " + }, + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "inline_equation", + "content": "S_{m}" + }, + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "content": ". Such an assumption is reasonable when choosing a continuous confidence signal." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "text", + "content": "We further denote " + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "text", + "content": " as the number of concordant pairs (i.e., pairs in " + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "inline_equation", + "content": "S_{m}" + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "text", + "content": " that satisfy the condition " + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "inline_equation", + "content": "[\\kappa(x_{i}, \\hat{y} | f) < \\kappa(x_{j}, \\hat{y} | f) \\cap \\ell(f(x_{i}), y_{i}) > \\ell(f(x_{j}), y_{j})]" + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "text", + "content": " as the number of discordant pairs (i.e., pairs in " + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "inline_equation", + "content": "S_{m}" + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "text", + "content": " that satisfy the condition " + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "inline_equation", + "content": "[\\kappa(x_{i}, \\hat{y} | f) > \\kappa(x_{j}, \\hat{y} | f) \\cap \\ell(f(x_{i}), y_{i}) > \\ell(f(x_{j}), y_{j})]" + }, + { + "bbox": [ + 104, + 495, + 504, + 530 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 534, + 504, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 504, + 559 + ], + "type": "text", + "content": "We assume, for now, that there are no two identical values given by " + }, + { + "bbox": [ + 104, + 534, + 504, + 559 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 104, + 534, + 504, + 559 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 534, + 504, + 559 + ], + "type": "inline_equation", + "content": "S_{m}" + }, + { + "bbox": [ + 104, + 534, + 504, + 559 + ], + "type": "text", + "content": ". Accordingly, we can further develop Equation (1) from Section 2.1 using the definition of conditional probability," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 186, + 585, + 426, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 585, + 426, + 597 + ], + "spans": [ + { + "bbox": [ + 186, + 585, + 426, + 597 + ], + "type": "interline_equation", + "content": "\\Pr [ \\kappa (x _ {i}, \\hat {y} | f) < \\kappa (x _ {j}, \\hat {y} | f) | \\ell (f (x _ {i}), y _ {i}) > \\ell (f (x _ {j}), y _ {j}) ] =", + "image_path": "45d0b1f7bf0771add9c9c46f59bc51fa871d5370f11395258cc9edf080041628.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 186, + 599, + 425, + 627 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 599, + 425, + 627 + ], + "spans": [ + { + "bbox": [ + 186, + 599, + 425, + 627 + ], + "type": "interline_equation", + "content": "\\frac {\\mathbf {P r} [ \\kappa (x _ {i} , \\hat {y} | f) < \\kappa (x _ {j} , \\hat {y} | f) \\cap \\ell (f (x _ {i}) , y _ {i}) > \\ell (f (x _ {j}) , y _ {j}) ]}{\\mathbf {P r} [ \\ell (f (x _ {i}) , y _ {i}) > \\ell (f (x _ {j}) , y _ {j}) ]},", + "image_path": "ab608cfe2a15a92a9a4c536506de0a2c084ada2741f734d1cabbcf1b70c883bb.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 638, + 424, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 424, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 424, + 651 + ], + "type": "text", + "content": "which can be approximated empirically, using the most likelihood estimator, as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 294, + 677, + 504, + 703 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 677, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 294, + 677, + 504, + 703 + ], + "type": "interline_equation", + "content": "\\frac {c}{\\binom {m} {2}}. \\tag {2}", + "image_path": "c8afbb63ae2c5aa9f95b936689d4380c2b946d837b907b23efad9a9c20aded78.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "content": "We note that the last equation is identical to Kendall's " + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "content": " up to a linear transformation, which equals" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 233, + 98, + 376, + 153 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 98, + 376, + 153 + ], + "spans": [ + { + "bbox": [ + 233, + 98, + 376, + 153 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {c - d}{\\binom {m} {2}} = \\frac {c - d + c - c}{\\binom {m} {2}} \\\\ = \\frac {2 c - (c + d)}{\\binom {m} {2}} = \\frac {2 c}{\\binom {m} {2}} - \\frac {c + d}{\\binom {m} {2}} = \\\\ \\end{array}", + "image_path": "bd2624419a6c33512c23935315b8d8af03c03c4dd3ef3377776c3e1e8902d4a6.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 233, + 156, + 376, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 156, + 376, + 178 + ], + "spans": [ + { + "bbox": [ + 233, + 156, + 376, + 178 + ], + "type": "interline_equation", + "content": "2 \\cdot \\frac {c}{\\binom {m} {2}} - 1 = 2 \\cdot [ \\text {E q u a t i o n} 2 ] - 1.", + "image_path": "ff62968e9e83a352b84d89958e2ff4c6ee3826d0e0b692101b789639820888a6.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 184, + 504, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 184, + 504, + 206 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 504, + 206 + ], + "type": "text", + "content": "Otherwise, if the loss assigns two identical values to a pair of points in " + }, + { + "bbox": [ + 104, + 184, + 504, + 206 + ], + "type": "inline_equation", + "content": "S_{m}" + }, + { + "bbox": [ + 104, + 184, + 504, + 206 + ], + "type": "text", + "content": ", but " + }, + { + "bbox": [ + 104, + 184, + 504, + 206 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 184, + 504, + 206 + ], + "type": "text", + "content": " does not, then we get:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 290, + 226, + 504, + 247 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 226, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 290, + 226, + 504, + 247 + ], + "type": "interline_equation", + "content": "\\frac {c}{c + d}. \\tag {3}", + "image_path": "a119ade5d41b0dcbdb47af465a02fad0d1cdf44c868d1e2fc73175964e90315a.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 252, + 453, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 252, + 453, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 252, + 453, + 264 + ], + "type": "text", + "content": "which is identical to Goodman & Kruskal's " + }, + { + "bbox": [ + 104, + 252, + 453, + 264 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 252, + 453, + 264 + ], + "type": "text", + "content": "-correlation up to a linear transformation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 217, + 269, + 392, + 319 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 269, + 392, + 319 + ], + "spans": [ + { + "bbox": [ + 217, + 269, + 392, + 319 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {c - d}{c + d} = \\frac {c - d + c - c}{c + d} = \\frac {2 c - (c + d)}{c + d} = \\\\ \\frac {2 c}{c + d} - \\frac {c + d}{c + d} = 2 \\cdot [ \\text {E q u a t i o n} 3 ] - 1. \\\\ \\end{array}", + "image_path": "75210626a5ef4822b5f62dde3d277f78ee434cbef85d9199a1f4f44a57211c3e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 331, + 280, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 280, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 280, + 342 + ], + "type": "text", + "content": "D.1 INEQUALITIES OF THE DEFINITION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "content": "One might wonder why Equation (1) should have strict inequalities rather than non-strict ones to define ranking. As we discuss below, this would damage the definition:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 380, + 279, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 279, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 279, + 392 + ], + "type": "text", + "content": "(1) If the losses had a non-strict inequality:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 187, + 397, + 421, + 411 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 397, + 421, + 411 + ], + "spans": [ + { + "bbox": [ + 187, + 397, + 421, + 411 + ], + "type": "interline_equation", + "content": "\\Pr [ \\kappa (x _ {1}, \\hat {y} | f) < \\kappa (x _ {2}, \\hat {y} | f) | \\ell (f (x _ {1}), y _ {1}) \\geq \\ell (f (x _ {2}), y _ {2}) ]", + "image_path": "1e9fe24f9b0ad62c92660322fc7fcdef9d32243e197dfd2ce3c544cd7e70b788.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 418, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 462 + ], + "type": "text", + "content": "Consequently, in the case of classification, for example, this probability would increase for any pairs consisting of correct instances with different confidences. This would yield no benefit in ranking between incorrect and correct instances and motivates giving different confidence values for instances with the same loss—a fact that would not truly add any value." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 468, + 289, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 289, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 289, + 479 + ], + "type": "text", + "content": "(2) If the " + }, + { + "bbox": [ + 104, + 468, + 289, + 479 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 468, + 289, + 479 + ], + "type": "text", + "content": " values had a non-strict inequality:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 186, + 485, + 422, + 499 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 485, + 422, + 499 + ], + "spans": [ + { + "bbox": [ + 186, + 485, + 422, + 499 + ], + "type": "interline_equation", + "content": "\\Pr [ \\kappa (x _ {1}, \\hat {y} | f) \\leq \\kappa (x _ {2}, \\hat {y} | f) | \\ell (f (x _ {1}), y _ {1}) > \\ell (f (x _ {2}), y _ {2}) ].", + "image_path": "816d26805ac9df702f58812e544585eb182be7f8c77074b9716ff7074f65ccf4.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": "This probability would increase for any pair " + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "(x_{1}, x_{2})" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "\\kappa(x_{1}, \\hat{y} | f) = \\kappa(x_{2}, \\hat{y} | f)" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "\\ell(f(x_{1})) > \\ell(f(x_{2}))" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": ", although " + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": " should have ranked " + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "x_{1}" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": " with a lower value. Furthermore, if a " + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": " function were to assign the same confidence score to all " + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": ", then when there are no two identical values of losses, the definition's probability would be 1; otherwise, the more different values for losses there are, the larger the probability would grow. In classification with a " + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "0/1" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": " loss, for example, assigning the same confidence score to all instances would result in the probability being Accuracy" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "(f) \\cdot (1 - Accuracy(f))" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": ", which is largest when Accuracy" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "inline_equation", + "content": "(f) = 0.5" + }, + { + "bbox": [ + 104, + 506, + 505, + 584 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 599, + 474, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 474, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 474, + 625 + ], + "type": "text", + "content": "E RANKING CAPACITY COMPARISON BETWEEN HUMANS AND NEURAL NETWORKS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "content": "In the field of metacognition, interestingly, the predictive value of confidence is evaluated by two different aspects: by its ability to discriminate between correct and incorrect predictions (also known as resolution in metacognition or ranking in our context) and by its ability to give well-calibrated confidence estimations, not being over- or under-confident (Fiedler et al., 2019). These two aspects correspond perfectly with much of the research done in the deep learning field, with the nearly matching metric to AUROC of " + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "content": "-correlation (see Section 2)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "type": "text", + "content": "This allows us to compare how well humans rank predictions in various tasks versus how well models rank their own in others. Human AUROC measurements in various tasks (translated from" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "-correlation) tend to range from 0.6 to 0.75 (Undorf & Broder, 2019; Basile et al., 2018; Ackerman et al., 2016), but could vary, usually towards much lower values (Griffin et al., 2019). In our comprehensive evaluation on ImageNet, AUROC ranged from 0.77 to 0.88 (with the median value being 0.85), and in CIFAR-10 these measurements jump to the range of 0.92 to 0.94." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 504, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 504, + 178 + ], + "type": "text", + "content": "While such comparisons between neural networks and humans are somewhat unfair due to the great sensitivity required for the task, research that directly compares humans and machine learning algorithms performing the same task exist. For example, in Ackerman et al. (2019), algorithms far surpass even the group of highest performing individuals in terms of ranking." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 194, + 373, + 206 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 373, + 206 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 373, + 206 + ], + "type": "text", + "content": "F CRITICISMS OF AUROC AS A RANKING METRIC" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": "In this section, addressing the criticism of AUROC as a ranking metric, we show why AUROC does not simply reward models for having lower accuracy. The paper by Ding et al. (2019) presented a semi-artificial experiment to demonstrate that AUROC might get larger the worse the model's accuracy becomes. They consider a model " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " and its " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " function evaluated on a classification test set " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": ", giving each a prediction " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\hat{y}_f(x)" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " and a confidence score " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\kappa(x, \\hat{y}_f(x)|f)" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": ", which in this case is the model's softmax response. Let " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^c = \\{x^c \\in \\mathcal{X} | \\hat{y}_f(x^c) = y(x)\\}" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " be the set of all instances correctly predicted by the model " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": ", and define " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "x_{(i)}^c \\in \\mathcal{X}^c" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " to be the correct instance that received the i-lowest confidence score from " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": ". Their example continues and considers an artificial model " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "f^m" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " to be an exact clone of " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " with the following modification: for every " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "i \\leq m" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": ", the model " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "f^m" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " now predicts a different, incorrect label for " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "x_{(i)}^c" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": "; however, its given confidence score remains identical: " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\kappa(x_{(i)}^c, \\hat{y}_f(x_{(i)}^c)|f) = \\kappa(x_{(i)}^c, \\hat{y}_{f^m}(x_{(i)}^c)|f^m)" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "f^0" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " is exactly identical to " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": ", by this definition, not changing any predictions. The paper shows how an artificially created model " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "f^m" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": " obtains a higher AUROC score the bigger its " + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 220, + 506, + 381 + ], + "type": "text", + "content": ". This happens even though \"nothing\" changed but a hit to the model's accuracy performance (by making mistakes on more instances)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": "First, to understand why this happens, let us consider " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^1" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ": AUROC for " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " increases the more pairs of " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "[\\kappa(x_1) < \\kappa(x_2)|\\hat{y}_f(x_1) \\neq y(x_1), \\hat{y}_f(x_2) = y(x_2)]" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " there are. The model " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^1" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " is now giving an incorrect classification to " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "x_{(1)}^c" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ", but this instance's position in the partial order induced by " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " has remained the same (since " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\kappa(x_{(1)}^c)" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " is unchanged); therefore, " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "|\\mathcal{X}^c| - 1" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " correctly ranked pairs were added: " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "[\\kappa(x_{(1)}^c) < \\kappa(x_{(i)}^c)|\\hat{y}_f(x_{(1)}^c) \\neq y(x_{(1)}^c), \\hat{y}_f(x_{(i)}^c) = y(x_{(i)}^c)]" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " for every " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "1 < i \\leq |\\mathcal{X}^c|" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ". Nevertheless, this does not guarantee an increase to AUROC by itself: if, previously, all pairs of (correct, incorrect) instances were ranked correctly by " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ", AUROC would already be 1.0 for " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^0" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " and would not change for " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^1" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ". If AUROC for " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^1" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " is higher than it was for " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^0" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ", this means there exists at least one instance " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "x^w" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " that was incorrectly predicted by the original model " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^0" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "\\kappa(x_{(1)}^c) < \\kappa(x^w)" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ". Every such originally wrongly ranked pair (by " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^0" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ") of " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "[\\kappa(x_{(1)}^c) < \\kappa(x^w)|\\hat{y}_f(x^w) \\neq y(x^w), \\hat{y}_f(x_{(1)}^c) = y(x_{(1)}^c)]" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " has been eliminated by " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "f^1" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": " wrongly predicting " + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "inline_equation", + "content": "x_{(1)}^c" + }, + { + "bbox": [ + 104, + 386, + 506, + 534 + ], + "type": "text", + "content": ". This, therefore, causes AUROC to increase at the expense of the model's accuracy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 538, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 538, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 538, + 504, + 562 + ], + "type": "text", + "content": "Such an analysis neglects many factors, which is probably why such an effect is only likely to be observed in artificial models (and not among the actual models we have empirically tested):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 571, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "content": "1. It is unreasonable to assume that the confidence score given by " + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "content": " will remain exactly the same for an instance " + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "inline_equation", + "content": "x_{(i)}^{c}" + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "content": " given it now has a different prediction. In the case of " + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "content": " being softmax, it assumes the model's logits have changed in a very precise and nontrivial manner. Additionally, by our broad definition of " + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "content": ", which allows " + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "content": " to even be produced from an entirely different model than " + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 129, + 571, + 504, + 650 + ], + "type": "text", + "content": " receives the prediction and model as a given input (and cannot change or affect either), and it is unlikely to assume changing its inputs will not change its output." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": "2. Suppose we find the setting reasonable and assume we can actually create a model " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f^m" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": " as described. Let us observe a model " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f^p" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "p = \\min_{m} (\\text{AUROC of } f^m = 1)" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": ", meaning that " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f^p" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": " ranks its predictions perfectly, unlike the original " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f^0" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": ". Is it really true that " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f^p" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": " has no better uncertainty estimation than " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f^0" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": "? Model " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f^p" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": " behaves very much like the investment in \"Model B\" from our example in Section 1, possessing perfect knowledge of when it is wrong and when it is correct, allowing its users risk-free classification. So, given a model " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": ", we can use the above process to produce an improved model " + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "f^p" + }, + { + "bbox": [ + 129, + 654, + 506, + 733 + ], + "type": "text", + "content": ", and then we can even calibrate its" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 140, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 140, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 140, + 82, + 504, + 127 + ], + "type": "text", + "content": " to output " + }, + { + "bbox": [ + 140, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "0\\%" + }, + { + "bbox": [ + 140, + 82, + 504, + 127 + ], + "type": "text", + "content": " for all instances below its threshold and " + }, + { + "bbox": [ + 140, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 140, + 82, + 504, + 127 + ], + "type": "text", + "content": " for all those above to produce a perfect model, which might have a small coverage but is correct every time, knows it and notifies its user when it truly knows the prediction. The increase in AUROC reflects such an improvement." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 137, + 506, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 137, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 137, + 506, + 237 + ], + "type": "text", + "content": "Not only do we disagree with such an analysis and its conclusions, but we also have vast empirical evidence to show that AUROC does not prefer lower accuracy models unless there is a good reason for it to do so, as we demonstrate in Figure 3 (comparing EfficientNet-V2-XL to ViT-B/32-SAM). In fact, out of the 523 models we tested, the model with the highest AUROC also has the " + }, + { + "bbox": [ + 104, + 137, + 506, + 237 + ], + "type": "inline_equation", + "content": "4^{th}" + }, + { + "bbox": [ + 104, + 137, + 506, + 237 + ], + "type": "text", + "content": " highest accuracy of all models, and the overall Spearman correlation between AUROC and accuracy of all the models we tested is 0.03. Furthermore, Figure 3 also exemplifies why AUROC, which was suggested by the just mentioned paper as the alternative to AUROC, is a bad choice as a single number metric, and might lead us to deploy a model that has a worse selective risk for most coverages only due to its higher overall accuracy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 256, + 479, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 256, + 479, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 256, + 479, + 269 + ], + "type": "text", + "content": "G KNOWLEDGE DISTILLATION EFFECTS ON UNCERTAINTY ESTIMATION" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 126, + 289, + 485, + 485 + ], + "blocks": [ + { + "bbox": [ + 126, + 289, + 485, + 485 + ], + "lines": [ + { + "bbox": [ + 126, + 289, + 485, + 485 + ], + "spans": [ + { + "bbox": [ + 126, + 289, + 485, + 485 + ], + "type": "image", + "image_path": "f188edd92ad5504d8af446f7da799b3fc9769cd70d682862a17c5d78793ce1c6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 493, + 504, + 539 + ], + "lines": [ + { + "bbox": [ + 104, + 493, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 493, + 504, + 539 + ], + "type": "text", + "content": "Figure 12: Comparing vanilla models to those incorporating KD into their training (represented by markers with thick borders and a dot). In a pruning scenario that includes distillation, yellow markers indicate that the original model was also the teacher. The performance of each model is measured in AUROC (higher is better) and -log(ECE) (higher is better)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 555, + 504, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 655 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 655 + ], + "type": "text", + "content": "Figure 12 compares vanilla models to those incorporating KD into their training (represented by markers with thick borders and a dot). In a pruning scenario that includes distillation, yellow markers indicate that the original model was also the teacher (Aflalo et al., 2020). While distillation using a different model tends to improve uncertainty estimation in both aspects, distillation by the model itself seems to improve only one—suggesting it is generally more beneficial to use a different model as a teacher. The fact that KD improves the model over its original form, however, is surprising, and implies that the distillation process itself helps uncertainty estimation. Note that although this specific method involves pruning, evaluations of models pruned without incorporating distillation (Frankle & Carbin, 2018) revealed no improvement." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 659, + 504, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 682 + ], + "type": "text", + "content": "It seems, moreover, that the teacher does not have to be good in uncertainty estimation itself; Figure 5 in Section 3 shows this by comparing the teacher architecture and the students in each case." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "While the training method by Ridnik et al. (2021) included pretraining on ImageNet-21k and demonstrated impressive improvements, comparison of models that were pretrained on ImageNet21k (Tan & Le, 2021; Touvron et al., 2021a; 2022) with identical models that were not pretrained showed only a slight improvement in ECE, and, in fact, exhibit a degradation of AUROC (see" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "Figures 4a and 4b in Section 3). This suggests that pretraining alone does not improve uncertainty estimation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 120, + 406, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 120, + 406, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 120, + 406, + 133 + ], + "type": "text", + "content": "H MORE INFORMATION ABOUT TEMPERATURE SCALING" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 146, + 145, + 465, + 357 + ], + "blocks": [ + { + "bbox": [ + 146, + 145, + 465, + 357 + ], + "lines": [ + { + "bbox": [ + 146, + 145, + 465, + 357 + ], + "spans": [ + { + "bbox": [ + 146, + 145, + 465, + 357 + ], + "type": "image", + "image_path": "034ae8e1de9f41d6fe5cac55c9a457b469a7856eca88d835a395c08d2e60e6c9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 366, + 504, + 411 + ], + "lines": [ + { + "bbox": [ + 104, + 366, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 504, + 411 + ], + "type": "text", + "content": "Figure 13: A comparison of 523 models after being calibrated with TS, evaluated by their AUROC " + }, + { + "bbox": [ + 104, + 366, + 504, + 411 + ], + "type": "inline_equation", + "content": "(\\times 100" + }, + { + "bbox": [ + 104, + 366, + 504, + 411 + ], + "type": "text", + "content": ", higher is better) and -log(ECE) (higher is better) on ImageNet. Each marker's size is determined by the model's number of parameters. ViT models are still among the best performing architectures for all aspects of uncertainty estimation." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 424, + 504, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 424, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 424, + 504, + 480 + ], + "type": "text", + "content": "In Figure 13 we see how temperature scaling (TS) affects the overall ranking of models in terms of AUROC and ECE. While the ranking between the different architecture remains similar, the poorly performing models are much improved and minimize the gap between them and the best models. One particularly notable exception is HardCoRe-NAS (Nayman et al., 2021), with its lowest latency versions becoming the top performers in terms of ECE. In addition, models that benefit from" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 126, + 487, + 485, + 680 + ], + "blocks": [ + { + "bbox": [ + 126, + 487, + 485, + 680 + ], + "lines": [ + { + "bbox": [ + 126, + 487, + 485, + 680 + ], + "spans": [ + { + "bbox": [ + 126, + 487, + 485, + 680 + ], + "type": "image", + "image_path": "171b1c9407f03ba242867cd36b052c046d8aa0c7512ed878a58fd7430f7f9cc8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 687, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 709 + ], + "type": "text", + "content": "Figure 14: Here the relationship between temperature and the success of TS, unlike the case for AUROC, seems unrelated." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 720, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 720, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 720, + 504, + 732 + ], + "type": "text", + "content": "TS in terms of AUROC tend to have been assigned a temperature lower than 1 by the calibration" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": "process (see Figure 6 in Section 3). The same, however, does not hold true for ECE (see Figure 14). This example also emphasizes the fact that models benefiting from TS in terms of AUROC do not necessarily benefit in terms of ECE, and vice versa. Therefore, determining whether to calibrate the deployed model with TS is, unfortunately, a task-specific decision." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 506, + 189 + ], + "type": "text", + "content": "We perform TS as was suggested in Guo et al. (2017). For each model we take a random stratified sampling of 5,000 instances from the ImageNet validation set on which to calibrate, and reserve the remainder 45,000 instances for testing. Using the box-constrained L-BFGS (Limited-Memory Broyden-Fletcher-Goldfarb-Shanno) algorithm, we optimize for 5,000 iterations (though fewer iterations usually converge into the same temperature parameter) using a learning rate of 0.01." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 207, + 463, + 234 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 207, + 463, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 463, + 234 + ], + "type": "text", + "content": "I ARCHITECTURE CHOICE FOR PRACTICAL DEPLOYMENT BASED ON SELECTIVE PERFORMANCE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 247, + 506, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 346 + ], + "type": "text", + "content": "As discussed in Section 2, when we know the coverage or risk we require for deployment, the most direct metric to check is which model obtains the best risk for the coverage required (selective risk), or which model gets the largest coverage for the accuracy constraint (SAC). While each deployment scenario specifies its own constraints, for demonstration purposes we consider a scenario in which misclassifications are by far more costly than abstaining from giving correct predictions. An example of this could be classifying a huge unlabeled dataset (or cleaning bad labels from a labeled dataset). While it is desirable to assign labels to a larger portion of the dataset (or to correct more of the wrong labels), it is crucial that these labels are as accurate as possible (or that correctly labeled instances are not replaced with a bad label)." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 144, + 359, + 465, + 566 + ], + "blocks": [ + { + "bbox": [ + 144, + 359, + 465, + 566 + ], + "lines": [ + { + "bbox": [ + 144, + 359, + 465, + 566 + ], + "spans": [ + { + "bbox": [ + 144, + 359, + 465, + 566 + ], + "type": "image", + "image_path": "0d9498774b8c27650a838b860059762e91ee2a1e034b9c0144323f176fdd71b8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 572, + 504, + 596 + ], + "lines": [ + { + "bbox": [ + 104, + 572, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 572, + 504, + 596 + ], + "type": "text", + "content": "Figure 15: A comparison of 523 models by their log(number of model's parameters) and the coverage they are able to provide for a SAC of " + }, + { + "bbox": [ + 104, + 572, + 504, + 596 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 572, + 504, + 596 + ], + "type": "text", + "content": " (higher is better) on ImageNet." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 616, + 504, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 684 + ], + "type": "text", + "content": "To explore such a scenario, we evaluate all models on ImageNet to see which ones give us the largest coverage for a required accuracy of " + }, + { + "bbox": [ + 104, + 616, + 504, + 684 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 616, + 504, + 684 + ], + "type": "text", + "content": ". In Figure 7, Section 3 (paper's main body) we observe that of all the models studied, only ViT models are able to provide coverage beyond " + }, + { + "bbox": [ + 104, + 616, + 504, + 684 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 616, + 504, + 684 + ], + "type": "text", + "content": " for such an extreme constraint. Moreover, we note that the coverage they provide is significantly larger than that given by models with comparable accuracy or size, and that ViT models that provide similar coverage to their counterparts do so with less overall accuracy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "In Figure 15 we see that not only do ViT models provide more coverage than any other model, but that they are also able to do so in any size category. To compare models fairly by their size, we present Figure 15, which sets the Y axis to be the logarithm of the number of parameters, so that models sharing the same y value can be compared solely based on their x value—which is the coverage they" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 144, + 507, + 325 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "type": "text", + "content": "Table 1: A comparison of different training regimes of ViTs. *The paper introducing ViTs (Dosovitskiy et al., 2021) had also trained ViT models with the JFT-300M dataset; however, their weights are unavailable to the general public. All evaluations of ViTs from that paper were conducted on ViTs pretrained on ImageNet-21k, which are publicly available. **Pretrained DeiT3 models were first pretrained with a learning rate of " + }, + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "type": "inline_equation", + "content": "3 \\cdot 10^{-3}" + }, + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "type": "text", + "content": " and then fine-tuned with a learning rate of " + }, + { + "bbox": [ + 104, + 79, + 506, + 137 + ], + "type": "inline_equation", + "content": "3 \\cdot 10^{-4}" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 144, + 507, + 325 + ], + "lines": [ + { + "bbox": [ + 106, + 144, + 507, + 325 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 507, + 325 + ], + "type": "table", + "html": "
RegimeViT (original)Steiner et al.Chen et al.DeiTDeiT3DeiT3 +PretrainingTorchvision
ReferenceDosovitskiy et al. (2021)Steiner et al. (2022)Chen et al. (2022)Touvron et al. (2021b)Touvron et al. (2022)Touvron et al. (2022)Paszke et al. (2019)
Pretraining datasetImageNet-21k*ImageNet-21k---ImageNet-21k-
Batch Size409640964096102420482048512
OptimizerAdamWAdamWSAMLAMBLAMBLAMBAdamW
LR3·10-33·10-33·10-31·10-33·10-33·10-3**3·10-3
LR decaycosinecosinecosinecosinecosinecosinecosine
Weight decay0.10.30.10.050.020.020.3
Warmup epochs3.43.43.455530
Label smoothing ε0.10.10.10.1X0.10.11
DropoutXXXX
Stoch. DepthXXX
Repeated AugXXXX
Gradient Clip.1.01.01.0X1.01.01.0
H. flip
Random Resized CropX
Rand AugmentXAdapt.X9/0.5XXAdapt.
3 AugmentXXXXX
LayerScaleXXXXX
Mixup alphaXAdapt.X0.80.8X0.2
Cutmix alphaXXX1.01.01.01.0
Erasing prob.XXX0.25XXX
ColorJitterXXXX0.30.3X
Test crop ratio0.8750.8750.8750.8751.01.00.875
LossCECECECEBCECECE
Superb performanceXXXX
", + "image_path": "62da8f73c31fd0a2dcd7ddaa6e9b66291acc2327749db53b70bb5056012956a9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 345, + 504, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 345, + 504, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 504, + 368 + ], + "type": "text", + "content": "provide for a SAC of " + }, + { + "bbox": [ + 104, + 345, + 504, + 368 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 345, + 504, + 368 + ], + "type": "text", + "content": ". We see that ViT models provide a larger coverage even when compared with models of a similar size." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 384, + 463, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 463, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 463, + 410 + ], + "type": "text", + "content": "J COMPARISON OF VIT TRAINING REGIMES AND THEIR EFFECTS ON UNCERTAINTY ESTIMATION PERFORMANCE" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 467 + ], + "type": "text", + "content": "In Table 1 we compare the different hyperparameters and augmentations used for training the ViT models evaluated in this paper, with the aim of revealing why some training regimes consistently result in superb ViTs, while others do not. An analysis of the various differences between these regimes, however, eliminates the obvious suspects." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 472, + 506, + 683 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 104, + 472, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 506, + 582 + ], + "type": "text", + "content": "1) Pretraining, on its own, does not seem to offer an explanation: First, we analyze eight pairs of models (provided by Touvron et al. 2022) such that both models have identical architecture and training regimes, with the exception that one was pretrained on ImageNet-21k, and the other was not. Pretraining results in only a slight improvement of 0.16 in AUROC and 0.01 in ECE. Moreover, as mentioned in detail in Section 3, ViT models trained on JFT-4B (Tran et al., 2022) were outperformed by the successful ViT models evaluated in this paper, most of which were pretrained on ImageNet-21k (and even by one ViT SAM model that was not pretrained at all). Second, we note that ViTs trained with the SAM optimizer (Chen et al., 2022), and not pretrained at all, reach superb ranking (AUROC) as well. These facts lead us to conclude that pretraining, at least by itself, is not the main contributor to training successful ViTs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 588, + 506, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 588, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 588, + 506, + 633 + ], + "type": "text", + "content": "2) The selection of optimizers and other hyperparameters (such as learning rate, label smoothing etc.) does not seem to have a significant impact. For example, while AdamW (Loshchilov & Hutter, 2019) was used by two of the successful regimes, it was also used by Paszke et al. (2019), and on the other hand was replaced by SAM (Foret et al., 2021) in another successful training regime." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 638, + 506, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 506, + 683 + ], + "type": "text", + "content": "3) Advanced augmentations are unlikely to explain the gaps in uncertainty estimation performance, as regimes producing superior ViT models (Dosovitskiy et al., 2021; Chen et al., 2022) did not use advanced augmentations (in comparison to Touvron et al. (2021b) and Touvron et al. (2022), for example)." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": "For these reasons, for the moment, the explanation for the gap remains elusive. The only remaining \"suspect\" is the batch size used, with all successful regimes using a batch size of 4096, while others use a smaller batch size of 2048 or lower. One could argue, however, that a two-fold increase in batch size is not sufficient to explain the huge gaps in performance measured." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 104, + 123, + 506, + 386 + ], + "blocks": [ + { + "bbox": [ + 104, + 80, + 506, + 114 + ], + "lines": [ + { + "bbox": [ + 104, + 80, + 506, + 114 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 506, + 114 + ], + "type": "text", + "content": "Table 2: The relationship between uncertainty estimation performance and the model's attributes and resources (accuracy, number of parameters and input size), measured by Spearman correlation. Positive correlations indicate good utilization of resources for uncertainty estimation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 104, + 123, + 506, + 386 + ], + "lines": [ + { + "bbox": [ + 104, + 123, + 506, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 123, + 506, + 386 + ], + "type": "table", + "html": "
ArchitectureAUROC & Accuracy-ECE & AccuracyAUROC & #Parameters-ECE & #ParametersAUROC & Input Size-ECE & Input Size#Models Evaluated
EfficientNet-0.16-0.29-0.22-0.29-0.26-0.3850
ResNet-0.28-0.220.160.03-0.40-0.4433
ViT0.84-0.170.50-0.670.04-0.1331
XCiT distilled0.600.090.350.020.510.1228
XCiT-0.680.89-0.790.94--28
ViT*0.230.38-0.040.410.14-0.1226
SE_ResNet-0.46-0.02-0.530.20-0.02-0.3518
EfficientNetV2-0.70-0.45-0.63-0.47-0.59-0.4015
NFNet0.560.780.630.810.480.6013
Inception-0.290.09-0.430.30-0.080.2313
RegNetY-0.03-0.980.27-0.86--12
RegNetX0.20-0.960.20-0.96--12
CaT distilled0.44-0.870.35-0.870.58-0.5010
DLA0.64-0.900.77-0.90--10
MobileNetV30.370.590.420.60--10
Res2Net-0.700.27-0.680.60--9
CLIP Zero-Shot1.0-0.630.9-0.80.55-0.589
CLIP + Linear Probe0.880.260.710.10.19-0.278
VGG0.81-0.980.71-0.90--8
RepVGG-0.710.50-0.570.21--8
BiT-0.33-0.81-0.20-0.85-0.46-0.258
ResNeXt-0.960.39-0.22-0.30--7
ResNet RS0.000.79-0.180.82-0.300.827
MixConv-0.110.89-0.240.86--7
DenseNet0.43-0.140.720.12--6
HardCoReNAS-0.600.26-0.490.37--6
Swin0.710.140.770.260.410.006
ECANet-0.200.60-0.430.370.830.376
Twins-0.260.94-0.140.89--6
SWSL ResNet0.94-0.890.77-0.83--6
GENet0.50-1.000.50-1.000.87-0.876
SSL ResNet0.14-1.000.26-0.94--6
TResNet0.10-0.300.530.53-0.58-0.875
CoaT-0.100.90-0.100.50--5
LeViT distilled0.60-0.900.60-0.90--5
ResMLP0.201.000.150.97--5
MobileNetV2-0.300.00-0.210.10--5
ViT* Distilled0.8-1.00.71-0.770.22-0.774
PiT distilled1.00-1.001.00-1.00--4
PiT-0.401.00-0.401.00--4
WSP ResNeXt1.000.801.000.80--4
ResMLP distilled0.800.200.800.20--4
MnasNet0.400.200.630.95--4
", + "image_path": "3a0a1a0a3f1dceb4213f6f365e532d02fc678ba638fb2108990875012e1b5cb9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 417, + 477, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 417, + 477, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 477, + 430 + ], + "type": "text", + "content": "K EVALUATIONS OF THE ZERO-SHOT LANGUAGE-VISION CLIP MODEL" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 449, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 506, + 548 + ], + "type": "text", + "content": "In this section we describe how we use CLIP model and extract confidence signals from it during inference. To evaluate CLIP on ImageNet, we first prepare it following the code provided by its authors (https://github.com/openai/CLIP): The labels of ImageNet-1k are encoded into normalized embedding vectors. At inference time, the incoming image is encoded into another normalized embedding vector. A cosine similarity is then calculated between each label embedding vector and the image embedding vector, and lastly, softmax is applied. The highest score is then taken as the confidence score for that prediction. We also evaluate the same models when adding a trained \"linear-probe\" to them (as described in Radford et al. (2021), which is essentially a logistic regression head), that results in a large boost in their accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 576, + 486, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 486, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 486, + 602 + ], + "type": "text", + "content": "L EFFECTS OF THE MODEL'S ACCURACY, NUMBER OF PARAMETERS AND INPUT SIZE ON UNCERTAINTY ESTIMATION PERFORMANCE" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "Table 2 shows the relationship between uncertainty estimation performance and model attributes and resources (accuracy, number of parameters and input size), measured by Spearman correlation. We measure uncertainty estimation performance by AUROC (higher is better) and -ECE (higher is better). Positive correlations indicate good utilization of resources for uncertainty estimation (for example, a positive correlation between -ECE and the number of parameters indicates that as the number of parameters increases, the calibration improves). An interesting observation is that distillation can drastically change the correlation between a resource and the uncertainty estimation performance metrics. For example, undistilled XCiTs have a Spearman correlation of -0.79 between their number of parameters and AUROC, indicating that more parameters are correlated with lower ranking performance, while distilled XCiTs have a correlation of 0.35 between the two." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 200, + 100, + 410, + 346 + ], + "blocks": [ + { + "bbox": [ + 164, + 79, + 444, + 92 + ], + "lines": [ + { + "bbox": [ + 164, + 79, + 444, + 92 + ], + "spans": [ + { + "bbox": [ + 164, + 79, + 444, + 92 + ], + "type": "text", + "content": "Table 3: Comparing using MC dropout to softmax-response (vanilla)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 200, + 100, + 410, + 346 + ], + "lines": [ + { + "bbox": [ + 200, + 100, + 410, + 346 + ], + "spans": [ + { + "bbox": [ + 200, + 100, + 410, + 346 + ], + "type": "table", + "html": "
ArchitectureMethodAccuracyAUROC
MobileNetV3 LargeVanilla74.0486.88
MC dropout7486.14
MobileNetV3 SmallVanilla67.6786.2
MC dropout67.5584.54
MobileNetV2Vanilla71.8886.05
MC dropout71.8184.68
VGG11Vanilla70.3786.31
MC dropout70.2184.3
VGG11 (no BatchNorm)Vanilla69.0286.19
MC dropout68.9583.94
VGG13Vanilla71.5986.3
MC dropout71.4384.37
VGG13 (no BatchNorm)Vanilla69.9386.24
MC dropout69.7184.3
VGG16Vanilla73.3686.76
MC dropout73.3385.02
VGG16 (no BatchNorm)Vanilla71.5986.63
MC dropout71.4784.97
VGG19Vanilla74.2286.52
MC dropout74.1785.06
VGG19 (no BatchNorm)Vanilla72.3886.55
MC dropout72.3784.99
", + "image_path": "8cb7831169cfb6d617fc5c6ca0c7b226e0e96ac33e185eae4751d461afb1bb3d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 364, + 488, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 488, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 488, + 376 + ], + "type": "text", + "content": "M EVALUATIONS OF MONTE CARLO DROPOUT RANKING PERFORMANCE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "text", + "content": "MC Dropout (Gal & Ghahramani, 2016) is computed using several dropout-enabled forward passes to produce uncertainty estimates. In classification, the mean softmax score of these passes, is calculated, and then a predictive entropy score is used as the final uncertainty estimate. In our evaluations, we use 30 dropout-enabled forward passes. We do not measure MC Dropout's effect on ECE since entropy scores do not reside in " + }, + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "inline_equation", + "content": "[0,1]" + }, + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 449, + 504, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 504, + 483 + ], + "type": "text", + "content": "We test this technique using MobileNetV3 (Howard et al., 2019), MobileNetv2 (Sandler et al., 2018) and VGG (Simonyan & Zisserman, 2015), all trained on ImageNet and taken from the PyTorch repository (Paszke et al., 2019)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 488, + 493, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 488, + 493, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 488, + 493, + 500 + ], + "type": "text", + "content": "The results comparing these models with and without using MC dropout are provided in Table 3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 505, + 504, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 504, + 550 + ], + "type": "text", + "content": "The table shows that using MC dropout causes a consistent drop in both AUROC and selective performance compared with using the same models with softmax as the " + }, + { + "bbox": [ + 104, + 505, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 505, + 504, + 550 + ], + "type": "text", + "content": ". These results are also visualized in comparison to other methods in Figure 4a in Section 3. MC dropout underperformance in an ID setting was also previously observed in Geifman & El-Yaniv (2017)." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_content_list.json b/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d01363075ad8e9667e27211bc9208f2ee3a334c1 --- /dev/null +++ b/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_content_list.json @@ -0,0 +1,2432 @@ +[ + { + "type": "text", + "text": "WHAT DO SELF-SUPERVISED VISION TRANSFORMERS LEARN?", + "text_level": 1, + "bbox": [ + 171, + 99, + 807, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Namuk Park $^{1*}$ Wonjae Kim $^{2}$ Byeongho Heo $^{2}$ Taekyung Kim $^{2}$ Sangdoo Yun $^{2}$", + "bbox": [ + 179, + 167, + 759, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Prescient Design, Genentech $^{2}$ NAVER AI Lab", + "bbox": [ + 183, + 184, + 509, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "park.namuk@gene.com {wonjae.kim,bh.heo,taekyung.k,sangdoo.yun}@navercorp.com", + "bbox": [ + 187, + 200, + 784, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 250, + 545, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present a comparative study on how and why contrastive learning (CL) and masked image modeling (MIM) differ in their representations and in their performance of downstream tasks. In particular, we demonstrate that self-supervised Vision Transformers (ViTs) have the following properties: (1) CL trains self-attention to capture longer-range global patterns than MIM, such as the shape of an object, especially in the later layers of the ViT architecture. This CL property helps ViTs linearly separate images in their representation spaces. However, it also makes the self-attention collapse into homogeneity for all query tokens and heads. Such homogeneity of self-attention reduces the diversity of representations, worsening scalability and dense prediction performance. (2) CL utilizes the low-frequency signals of the representations, but MIM utilizes high-frequencies. Since low- and high-frequency information respectively represent shapes and textures, CL is more shape-oriented and MIM more texture-oriented. (3) CL plays a crucial role in the later layers, while MIM mainly focuses on the early layers. Upon these analyses, we find that CL and MIM can complement each other and observe that even the simplest harmonization can help leverage the advantages of both methods.", + "bbox": [ + 228, + 280, + 769, + 503 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 529, + 336, + 544 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Contrastive Learning (CL) (He et al., 2020; Chen et al., 2020a;b; 2021) has been the most popular self-supervised learning methods until recently. It aims to learn the invariant semantics of two random views (Tian et al., 2020a;b) by making global projections of representations similar for positive samples and dissimilar for negative samples. Since CL exploits the globally projected representations to contrast each other, it can be deemed as an \"image-level\" self-supervised learning approach.", + "bbox": [ + 169, + 559, + 826, + 631 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deviating from CL, masked image modeling (MIM) (Bao et al., 2022; Xie et al., 2022b; He et al., 2022) has risen as a strong competitor of CL in the era of Vision Transformers (ViTs) (Dosovitskiy et al., 2021) with its impressive performances of downstream tasks. MIM trains ViTs by reconstructing the correct semantics of masked input patches. Unlike CL, it learns the semantics of patch tokens and this can be deemed as a \"token-level\" self-supervised learning approach. Since MIM outperforms CL in fine-tuning accuracy, it may appear prima facie as a more effective pre-training method than CL. However, a different trend is observed for linear probing accuracy with CL outperforming MIM (See Figure 1). For further exposition on CL and MIM, we refer the reader to Appendix B.", + "bbox": [ + 169, + 636, + 826, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Then, which method—CL or MIM—should we use for the self-supervised learning of ViTs? Although both methods are widely used, little is known about what they learn. This paper sheds light on their nature by showing that ViTs trained through CL and MIM learn opposite knowledge. In particular, we raise questions to better understand self-supervised learning, and then find the answers that can potentially affect future improvements. The questions posed can be divided into the following properties of Vision Transformers: the behavior of self-attention, the transformation of the representations, and the position of lead role components. Our key questions and findings are elaborated below.", + "bbox": [ + 169, + 753, + 828, + 853 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "How do self-attention behaviors? (Section 2) We find that CL primarily captures global relationships, while MIM captures local relationships. This implies that the representations of CL contain more global patterns, such as object shapes, than those of MIM. On the one hand, this property helps", + "bbox": [ + 169, + 859, + 826, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Most of this work was done while the author was at Naver AI Lab.", + "bbox": [ + 189, + 909, + 604, + 922 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/fb19c61ab1e5f8dba96ca35eba9a9981210dcc46fda949229d28dd8d16bd1885.jpg", + "image_caption": [ + "Figure 1: CL outperforms MIM in linear probing and small model regimes. In contrast, MIM excels in fine-tuning, large model regimes, and dense prediction. Red squares (■) denote CL, and blue triangles (▲) denote MIM. By default, we report the performance of ViT-B trained or pretrained on ImageNet-1K. We use the results from original papers and He et al. (2022) for object detection. Regarding the scaling experiment, we report the results that we reproduced based on official configurations except with 100 epochs, marking them as $\\mathrm{MoCo}^{\\dagger}$ and SimMIM†. Left: CL outperforms MIM in linear probing but underperforms in fine-tuning. Middle: CL outperforms MIM in small model regimes (ViT-Ti and ViT-S), and MIM shows superior scalability in large model regimes (ViT-L and ViT-H). Right: MIM outperforms CL in the dense prediction downstream tasks, such as object detection with Mask R-CNN (He et al., 2017) on COCO (Lin et al., 2014)." + ], + "image_footnote": [], + "bbox": [ + 181, + 99, + 395, + 246 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/8e526279a352ed99d5c45a57f0a7abf4002aaab19db924697413ba84fa35bdc7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 99, + 607, + 244 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9174d5157db5599272dee2a00a1acf8c538e0e673d696d0f88c2d45b4285662c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 101, + 818, + 244 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "CL recognize objects and distinguish images. On the other hand, however, it also suggests that CL struggles to preserve local information. In particular, we observe that self-attention of CL in the later layers for all query tokens and heads collapse into homogeneous attention maps. In such cases, most self-attention maps focus on object boundaries, meaning that they can capture object shapes but may lose interaction diversity between tokens. Consequently, CL and MIM each have advantages over different tasks: CL works well for linear probing and classification tasks with smaller models, whereas MIM outperforms CL in fine-tuning and dense prediction tasks with larger models.", + "bbox": [ + 169, + 405, + 826, + 503 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "How are representations transformed? (Section 3) CL transforms representations mainly based on image-level information, and its self-attentions collect information on object shape over entire tokens. This process makes tokens similar rather than diversifying them. As a result, CL distinguishes images well but has difficulty distinguishing tokens. On the contrary, MIM preserves and amplifies token-level information. Thus, the self-attentions for each token are substantially different and prohibit each token from including redundant information. We observe the consistent property from our Fourier analysis: CL primarily utilizes the low-frequency signals, but MIM utilizes high-frequencies. This observation suggests that CL is shape-biased and MIM is texture-biased. In sum, self-supervised models trained with CL and MIM learn the representations in different levels of detail.", + "bbox": [ + 169, + 510, + 823, + 636 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Which components play an important role? (Section 4) Analyses of the importance of each CL and MIM layer demonstrate that the later layers in CL and early layers in MIM play a key role. We interpret this as a consistent observation since early layers are usually known to capture low-level features—e.g., local patterns, high-frequency signals, and texture information—and later layers capture global patterns, low-frequency signals, and shape information (Dosovitskiy et al., 2021; Raghu et al., 2021; d'Ascoli et al., 2021; Graham et al., 2021; Dai et al., 2021; Park & Kim, 2022b).", + "bbox": [ + 169, + 642, + 826, + 727 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "From the above analyses and insights, we find that CL and MIM can complement each other and show in Section 5 that even the simplest implementation, such as a linear combination of CL and MIM objectives, can take advantage of both methods. Surprisingly, the hybrid models outperform those pre-trained with either CL or MIM both in terms of fine-tuning and linear probing accuracy.", + "bbox": [ + 169, + 732, + 823, + 790 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 HOW DO SELF-ATTENTIONS BEHAVE?", + "text_level": 1, + "bbox": [ + 171, + 808, + 529, + 824 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We point out that CL and MIM may not be silver bullets for all tasks, as shown in Figure 1. CL generally outperforms MIM in linear probing, while MIM dominates CL in the fine-tuning scheme. However, when we dissect the size of the model, CL outperforms MIM after fine-tuning for small models (cf. (Wang et al., 2022)), while MIM performs better on large models. Also, MIM yields effective representations for dense prediction tasks, such as object detection, but CL falls short on those tasks. This section explains these phenomena by investigating the behavior of self-attention.", + "bbox": [ + 169, + 839, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bf35728974a9349d03d0e62edcedf24d8b55abf4d06e5b006a0e6d0de5b5a279.jpg", + "image_caption": [ + "" + ], + "image_footnote": [], + "bbox": [ + 189, + 99, + 331, + 210 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4b948ba1de2243db01e228d1c6dccedc84dd877ca9996c9111d42b7fefe4fc74.jpg", + "image_caption": [ + "Depth $= 1$" + ], + "image_footnote": [], + "bbox": [ + 336, + 162, + 383, + 220 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0d2c4ae36b84acbb38247d9f530838052fed9d70228f569bc7c290b4870e56ea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 99, + 460, + 157 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/2cd9f91e1dbd7f253962c304f777c5b901582ff44617837422fd4c29956eeb42.jpg", + "image_caption": [ + "Depth $= 4$", + "(a) MoCo", + "Figure 2: Self-attention of CL (MoCo) capture global relationships, but they collapse into homogeneous attention maps for all query tokens and heads. Self-attention of MIM (SimMIM) mainly focus on local areas. We visualize the attention maps for two different query tokens in the beginning through the end layers. We omit the results for self-attention heads, which show mostly consistent results. Left: Self-attention of CL capture global patterns and the shape of an object. However, all attention maps capture the same shape information regardless of the query tokens. Right: Self-attention of MIM capture local patterns and are correlated with query tokens." + ], + "image_footnote": [], + "bbox": [ + 413, + 162, + 460, + 220 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/cd87e3ce9fd95db6d96f4acab5aa416a349ecc5e66aa1a73ab836bb3008a4f80.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 99, + 563, + 157 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4bf05f6f2707473d618fa5bbe36a08de93104499a912f0053ed63910785ff73b.jpg", + "image_caption": [ + "Depth $= 11$" + ], + "image_footnote": [], + "bbox": [ + 491, + 162, + 563, + 220 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/774707bc6a8c17a823d098c5932768a2a907b5729e535c7e059ee849ac95d4cb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 580, + 99, + 653, + 157 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5bb8c9061fd07a7d2a1da479182ba2962c50c66e1d277bc6e47217e25063792a.jpg", + "image_caption": [ + "Depth $= 1$" + ], + "image_footnote": [], + "bbox": [ + 580, + 162, + 653, + 220 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/28aaca9c4abcebdc1b515966087fbb173a50f019faade9018ec7be7505fe36de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 656, + 99, + 730, + 157 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/24c5506ade57cd25c09e8cddfb90324c84f474209403bf28aa3b97c5065e765b.jpg", + "image_caption": [ + "Depth $= 4$", + "(b) SimMIM" + ], + "image_footnote": [], + "bbox": [ + 656, + 162, + 730, + 220 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f494cdd375851dcadd3b3dfee47b7994f0e0f66ae842cc5e45e1d7f69702eb20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 733, + 99, + 807, + 157 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6c43d5fa9ff4523b19d789b99a778143dcf43176ca7a3a76257659d61bf7da06.jpg", + "image_caption": [ + "Depth $= 11$" + ], + "image_footnote": [], + "bbox": [ + 733, + 162, + 807, + 220 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our analyses mainly compare ViT-B/16 pre-trained on ImageNet-1K (Russakovsky et al., 2015) with MoCo v3 (Chen et al., 2021) and SimMIM (Xie et al., 2022b). We use the ImageNet validation images for our experiments. We observe that other methods, e.g., DINO (Caron et al., 2021), BEiT (Bao et al., 2022), and MAE (He et al., 2022), have consistent properties (See Figure C.1).", + "bbox": [ + 169, + 387, + 823, + 445 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "CL mainly captures global relationships. We measure the ranges of self-attention via attention distance (Dosovitskiy et al., 2021). Attention distance is defined as the average distance between the query tokens and key tokens considering their self-attention weights. Therefore, it conceptually corresponds to the size of the receptive fields in CNNs.", + "bbox": [ + 169, + 460, + 583, + 545 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 3 shows that the attention distance of CL (MoCo) is significantly higher than that of MIM (SimMIM), especially in the later layers. As seen in Figure 2, the qualitative visualization, this implies that the representations of CL contain global patterns and shape information, so CL can help ViTs distinguish between objects of images. Conversely, the self-attention of MIM mainly capture local relationships; i.e., MIM may have difficulty recognizing whole objects and their shapes. Section 3 also discuss this claim from a representational perspective.", + "bbox": [ + 169, + 551, + 583, + 678 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Self-attention of CL collapse into homogeneity. We observe an interesting behavior of CL in Figure 2, which shows the attention maps for query tokens from two different spatial locations. The self-attention of CL surprisingly indicate almost identical object shapes for the two query tokens, compared to that of MIM. We describe this phenomenon as an attention collapse into homogeneity. This collapsing trend in the self-attention of CL is observed across all the heads and query tokens. In contrast, the self-attention of MIM are more faithful to the two query tokens, as expected.", + "bbox": [ + 169, + 694, + 583, + 834 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We use normalized mutual information (NMI) (Strehl & Ghosh, 2002) to measure the attention collapse. Let $p(q)$ be a distribution of query tokens, and assume that these query tokens are uniformly distributed since a single query token is given for each spatial coordinate, i.e., $p(q) = 1 / N$ where $N$ is the number", + "bbox": [ + 169, + 839, + 583, + 910 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "of the tokens. Then the joint distribution of query and key tokens is $p(q, k) = \\pi(k|q)p(q)$ where", + "bbox": [ + 169, + 909, + 823, + 925 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/99e3b5c3a038b687f23183f30eb71953eccbf1a6c211918b2aca6e3631a13e01.jpg", + "image_caption": [ + "Figure 3: Effective receptive fields of CL are global, but those of MIM are local. This is particularly evident in the later layers." + ], + "image_footnote": [], + "bbox": [ + 601, + 465, + 818, + 609 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a1cb58d7c38ec6d018ff2afe2609cbab97a982fa031907530f646285aa1bb55d.jpg", + "image_caption": [ + "Figure 4: Self-attentions of CL have little to do with query tokens. Normalized MI of CL is significantly lower than that of MIM in the later layers." + ], + "image_footnote": [], + "bbox": [ + 598, + 700, + 815, + 845 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/82d220a43e6c076dc65feb2c5e9314ccdb2a6b51deec70dd041e72eb1855935d.jpg", + "image_caption": [ + "(a) Between heads" + ], + "image_footnote": [], + "bbox": [ + 178, + 102, + 390, + 239 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e68f0d48a31427b1e469ae73c101ef9376549e8a96f084255d217431e321cd11.jpg", + "image_caption": [ + "(b) Between depths", + "Figure 5: CL lacks representational diversity in the later layers. We measure cosine similarities of representations in the self-attentions between the heads (left), depths (middle), and spatial coordinates (right). All of the results show that the representational similarity of later self-attentions of CL is higher than that of MIM. Increasing heads or depths of CL is not effective in improving the diversity. Left: The similarity of representations from two heads in self-attention. Middle: The similarity between representations before and after self-attention transform them. Right: The similarities of representations at two spatial coordinates. ViT- $\\{\\mathrm{S}, \\mathrm{L}\\}$ is trained with 100 epochs." + ], + "image_footnote": [], + "bbox": [ + 392, + 102, + 604, + 239 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9ee5281e4a88280f4a955c6205a315b8252a5d398e156f8772953329c885ac96.jpg", + "image_caption": [ + "(c) Between tokens" + ], + "image_footnote": [], + "bbox": [ + 607, + 101, + 820, + 239 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\pi (k|q)$ is the softmax-normalized self-attention matrix. Thus, the normalized mutual information is $\\frac{I(q,k)}{\\sqrt{H(q)H(k)}}$ where $I(\\cdot ,\\cdot)$ is the mutual information and $H(\\cdot)$ is the marginal entropy. Low mutual information values show that attention maps are less dependent on the query tokens, implying an attention collapse into homogeneity. Conversely, high mutual information means that the attention maps strongly depend on the query tokens.", + "bbox": [ + 169, + 380, + 823, + 462 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 4 shows the degree of attention collapse in terms of the normalized mutual information (NMI). Results show that the mutual information of CL is significantly lower than that of MIM in the later layers, suggesting that the self-attention of CL tend to collapse into homogeneous distributions.", + "bbox": [ + 169, + 468, + 826, + 512 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Attention collapse reduces representational diversity. We conjecture that the self-attention collapse into homogeneity eventually leads to homogeneous token representations. To support this argument, we measure representational cosine similarities. In particular, we design three similarities: between different self-attention heads (heads), between the before and after self-attention layers (depths), and between different tokens (tokens).", + "bbox": [ + 169, + 525, + 826, + 595 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 5 shows the results, reporting the representation similarities for heads, depths, and tokens. As expected, the similarities of CL are notably higher than those of MIM in the later layers, indicating that the representations of CL have significant homogeneity. Even increasing the model size does not solve the problem CL has and may rather worsen it. Increasing the number of heads (ViT-S to ViT-B; Figure 5a) improves the representational diversity of MIM, but hardly improves the diversity of CL. Increasing the depth of CL (ViT-B to ViT-L; Figure 5b) only adds redundant modules.", + "bbox": [ + 169, + 602, + 826, + 688 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Implications of the behaviors we observed. In conclusion, the self-attention of CL captures global patterns and shapes of objects. However, CL suffers from the problem of attention collapse into homogeneity, which reduces the diversity of token representations. On the other hand, MIM primarily captures local patterns and thus does not suffer from the attention collapse problem.", + "bbox": [ + 169, + 700, + 823, + 758 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The behaviors mentioned above can explain the phenomena we observed in Figure 1:", + "bbox": [ + 169, + 763, + 732, + 779 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- CL outperforms MIM in linear probing tasks because it captures shapes, which helps recognize objects and distinguish images. Although MIM preserves the texture and diversity of representations, their correlation with objects or content may not be as strong as shapes do.", + "- The attention collapse prohibits CL from fully exploiting heads, depths, and tokens of ViTs. Since homogeneous representations are not very helpful in improving token representations, ViTs trained with CL waste a large part of network capability. Therefore, the fine-tuning accuracy of MIM is significantly higher than CL in large models.", + "- CL is not suitable for dense prediction since the token features are homogeneous with respect to their spatial coordinates." + ], + "bbox": [ + 179, + 789, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/49c4ce65597eb7dcdb83ed994f7627dba34a0d903b0043650206813c7fc112b3.jpg", + "image_caption": [ + "(a) MoCo (one image)" + ], + "image_footnote": [], + "bbox": [ + 214, + 133, + 364, + 220 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a8c0fbaadb19d29e242269eabe79f452b5fc0aa94e82419a7dad9f4624fe83e7.jpg", + "image_caption": [ + "(b) MoCo (two images)", + "Figure 6: Self-attention layers of CL and MIM transform representations differently. We visualize 196 spatial representation tokens for an example validation image in a representation space. The blue $(\\bullet)$ and red $(\\bullet)$ data points denote the tokens before and after the self-attention transformation. Left: The self- attentions of CL (e.g., MoCo) translate all the tokens equally, so the distances between the tokens of an image do not increase. Middle: However, CL moves the \"centers of representations (represented by $\\times$ )\" away from each other. Therefore, the images are linearly separable. The circle $(\\bullet)$ and triangle $(\\triangle)$ data represent tokens from different images. Right: The self- attentions of MIM (e.g., SimMIM) transform representations differently according to query tokens, thus increasing the distances between tokens. See Figure 7 for quantitative analyses." + ], + "image_footnote": [], + "bbox": [ + 421, + 103, + 576, + 236 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d79a51535df574efcc7e45e8fb443c5da38de43e28c348c07224b35da91d8830.jpg", + "image_caption": [ + "(c) SimMIM (one image)" + ], + "image_footnote": [], + "bbox": [ + 627, + 112, + 795, + 220 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We further investigate the self-attention's behavior with restricted receptive fields in Figure D.1. As shown in the experiment, locally restricted self-attentions lead to lower linear probing but higher fine-tuning accuracy, which is consistent with our observations.", + "bbox": [ + 169, + 417, + 826, + 460 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 HOW ARE REPRESENTATIONS TRANSFORMED?", + "text_level": 1, + "bbox": [ + 171, + 481, + 599, + 498 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we analyze the token representations of ViTs pre-trained with CL and MIM to demonstrate how the properties of self-attentions we observed in Section 2 affect the representations differently. We use the same pre-trained ViT-B/16 models by default default just as we did in Section 2.", + "bbox": [ + 169, + 513, + 826, + 558 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "CL transforms all tokens in unison, while MIM does so individually. To show how CL and MIM transform token representations, we visualize them in representation space. Figure 6 shows 196 (14×14 patches) tokens before and after self-attention modules from a single image sample of the ImageNet validation set. We use the three large singular vectors obtained via singular value decomposition (SVD) as the bases of the space. To better visualize this, we display the representation of MoCo and SimMIM in their crucial layers—the last layer and the first layer, respectively.", + "bbox": [ + 169, + 574, + 823, + 659 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 6a visualizes the changes that occur in the tokens of CL when transformed by self-attention module; it indicates that the self-attention of CL translate all tokens in unison. This phenomenon occurs because the self-attention maps of CL are homogeneous, i.e., self-attention is almost independent of the spatial coordinates and query tokens. Therefore, the modules add near-constant to all the token representations. As a result, the inter-representation distance and the volume of representations do not increase, which implies that CL cares less about individual tokens.", + "bbox": [ + 169, + 665, + 826, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Nevertheless, self-attention are essential for the discriminative power of CL. As shown in Figure 6b, they help distinguish images by moving \"the centers of the representation distribution\" away from each other. In short, this figure suggests that CL makes the image linearly separable even though it loses the ability to distinguish tokens.", + "bbox": [ + 169, + 756, + 826, + 811 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In contrast, MIM applies a different transformation to individual tokens, as shown in Figure 6c, because different self-attention are assigned to the individual spatial tokens. Thus, MIM alters the distance between tokens of a single image as well as the volume of the representation distribution.", + "bbox": [ + 169, + 819, + 825, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We find consistent results in quantitative analysis. Inspired by Jing et al. (2022), Figure 7 visualizes singular value spectra for tokens and images. A singular value spectrum provides singular values of a representation distribution obtained by SVD, so we can use it to represent the effective volume of distributions in a representation space. The higher the singular value in a spectrum, the larger the", + "bbox": [ + 169, + 868, + 825, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9ef3380b4df0c37dc0622f3ca6b6662e88772a9772af3c5a415479d047cf2c99.jpg", + "image_caption": [ + "(a) Singluar value spectrum of tokens from a single image (token-level)" + ], + "image_footnote": [], + "bbox": [ + 233, + 99, + 380, + 223 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/de245a5bf2e224909a2ff0fd09edb5e8e951208b716152fbbd1bda67243a3df2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 385, + 99, + 563, + 223 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b7090f8183eca964795b233964f9b8ff98297632d955f865063ff44f1d947265.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 584, + 103, + 767, + 224 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/10818ff7ef3740f028faa7a71f35ab740d750a4e968475054bd344e0af097616.jpg", + "image_caption": [ + "(b) Singluar value spectrum of images (image-level)" + ], + "image_footnote": [], + "bbox": [ + 228, + 253, + 379, + 375 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/734960add7561f85bce6dfd04d6919f304218f71ac54b926079a4fe51dc8b634.jpg", + "image_caption": [ + "Figure 7: CL barely changes or even decreases the distribution volume of tokens from a single image, implying that it hardly distinguishes between token. Instead, it significantly increases the distribution volume of images. To demonstrate these properties, we visualize singular value spectra, the singular values of the distribution of representations sorted by the magnitude. The higher a singular value, the larger the volume of a distribution. The right of this figure shows the $64^{\\text{th}}$ and $128^{\\text{th}}$ highest singular value for depth. Top: Singular value spectra of tokens from a single image. CL decreases the singular values of the tokens, but MIM increases. Bottom: Singular value spectra of images. CL significantly increases the volumes occupied by images, but MIM hardly does so." + ], + "image_footnote": [], + "bbox": [ + 383, + 252, + 566, + 375 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4a648590f389717f54d3b3c3dfc28108812c7ddc6e8871eb4efd288d01028240.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 586, + 255, + 771, + 375 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "volume of a representation distribution. To calibrate the scale, we use the relative log singular value $(\\Delta \\log \\text{ singular value})$ , the difference with the (second) largest singular value for a depth.", + "bbox": [ + 169, + 534, + 823, + 564 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 7a shows singular value spectra of tokens from a single image. We calculate them for each image in the ImageNet validation set and report averaged singular values over the dataset. In this figure, the CL layers hardly increase or even decrease the singular value; consistent with the explanation above, this implies that CL hardly distinguishes tokens. In contrast, MIM increases the singular value, meaning that it changes the volume of tokens and can distinguish tokens. Another interesting observation is that a few later layers of MIM decrease the volume, even though they capture local patterns as shown in Figures 3 and 4. This is because they behave like decoders. Section 4 discusses this in detail.", + "bbox": [ + 169, + 568, + 826, + 679 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 7b shows the singular value spectra of images. We average all tokens in an image to build an image-level representation vector and conduct a singular value spectrum over the collection of representations in the validation set. As opposed to the previous case, the representational volume of CL is larger than that of MIM, which implies that CL makes the image-level representation separable.", + "bbox": [ + 169, + 686, + 826, + 744 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "CL exploits low-frequencies, and MIM exploits high-frequencies. We hypothesize that CL captures low-frequency and MIM captures high-frequency information in spatial dimensions since CL provides image-level self-supervision to capture global patterns, while MIM provides token-level self-supervision to exploit local patterns. To support this argument from a frequency perspective, we conduct a Fourier analysis of the representations as following Park & Kim (2022b). In particular, we report the relative log amplitude of Fourier-transformed representations by calculating the amplitude difference between the highest and lowest frequencies of representations.", + "bbox": [ + 169, + 760, + 826, + 859 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 9 visualizes the relative amplitudes of CL and MIM. It shows that the high-frequency amplitude of CL is significantly smaller than that of MIM, suggesting that CL mainly utilizes low-frequency spatial information such as global structures and shapes. On the contrary, MIM usually uses high-frequency spatial information such as narrow structures and fine textures.", + "bbox": [ + 169, + 864, + 826, + 921 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/aa4af24e63032d5d5a609582042f59d6bf99b8848371ea63d40be6ce655be4e0.jpg", + "image_caption": [ + "(a) Stylized ImageNet" + ], + "image_footnote": [], + "bbox": [ + 243, + 103, + 509, + 276 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/17b03a344d686cfb28d91b880356506d67cd18e5b01a0c0eb4f7cc9058d21eb7.jpg", + "image_caption": [ + "(b) Robustness for noise frequency" + ], + "image_footnote": [], + "bbox": [ + 540, + 130, + 759, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Another interesting finding is that the last few layers of MIM reduce the high frequencies even though they only focus on local areas (See Figure 3). We conjecture that MIM implicitly divides ViTs into the encoder-decoder structure and allows intermediate layers to have linearly separable information. In contrast, CL allows the last layer to have such information. This is further elaborated in Figure 11.", + "bbox": [ + 169, + 453, + 563, + 551 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "CL is shape-biased, but MIM is texture-biased. Based on the results of the Fourier analysis, we assume that CL and MIM each have a bias toward shapes and textures, respectively. To demonstrate this claim, we use Stylized ImageNet (Geirhos et al., 2019), a texture-altered dataset, by using AdaIN (Huang & Belongie, 2017). Figure 8a reports the linear probing results on Stylized ImageNet to evaluate the shape and texture biases of pre-trained models. Compared to the model pre-trained with supervised learning, CL depends more on the shape and MIM depends on texture of images to classify images. In other words, CL is robust to texture changes, and MIM is vulnerable to them.", + "bbox": [ + 169, + 568, + 565, + 736 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d333916ae8c845554b3935f597f899fe57ac1a7ba7f3e5412aca22a2cfff7673.jpg", + "image_caption": [ + "Figure 8: CL is biased toward shape, whereas MIM is biased toward texture. We report the predictive results of models for linear probing tasks. However, we observe consistent results in fine-tuned models (See Figure F.2). Left: Result of classification on Stylized ImageNet. It shows that CL is more shape-biased than MIM and even than the supervised pre-trained model. Vertical lines represent averaged results for the shape categories. We also report the results of supervised ViT with ImageNet-1K class labels for comparison. Right: Accuracy drops on images with frequency-based random noises. MIM shows a more significant amount of accuracy drop than CL with high-frequency noises, demonstrating MIM's texture-biased property. The frequency window size of the frequency-based noise is $0.1\\pi$ .", + "Figure 9: CL exploits low-frequency, but MIM exploits high-frequency. Moreover, a few last layers of CL reduce high-frequency by capturing global patterns. MIM also reduces it even though they capture local patterns, because the later layers behave like decoders. See also Figure 11." + ], + "image_footnote": [], + "bbox": [ + 584, + 458, + 816, + 608 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 8b shows the consistent results. In this experiment, we follow Park & Kim (2022a;b) and measure the decrease in accuracy on the ImageNet dataset with frequency-based random noise. The results suggest that CL is robust to high-frequency noises, but MIM is significantly more vulnerable to them. Since high-frequency noises harm the fine details of images, we arrive at the same conclusion that CL is more shape-biased and MIM is texture-biased. This can explain the robustness of CL against adversarial perturbations (Bordes et al., 2022).", + "bbox": [ + 169, + 742, + 823, + 828 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 WHICH COMPONENTS PLAY AN IMPORTANT ROLE?", + "text_level": 1, + "bbox": [ + 171, + 848, + 635, + 864 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The previous sections consistently show through various perspectives that CL exploits image-level global patterns while MIM captures token-level local patterns. This section analyzes pre-trained ViTs from an architectural perspective and shows that the key components in CL and MIM are different.", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/28b264e1dfb9a221edefed3623532e8a409dcf5bd2f5e7438f11ae454028eb23.jpg", + "image_caption": [ + "(a) Self-attention" + ], + "image_footnote": [], + "bbox": [ + 187, + 99, + 486, + 226 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f1d6145da6e351ee872975f01fa01b41af6cd669653784b00caefb8832f9a64a.jpg", + "image_caption": [ + "(b) Fourier analysis", + "Figure 10: The explicit decoder architecture of MAE helps ViTs effectively leverage the advantages of MIM. We analyze the encoder and decoder of a pre-trained model with a masking ratio of zero. The left side of each figure represents the encoder and the right side the decoder. Left: The mutual information of MAE is lower than that of SimMIM in the encoder but higher in the decoder. Right: The decoder of MAE captures low-frequency information, and its encoder captures high-frequency information. Moreover, the later layers (excluding the last layer) of MAE do not reduce high-frequency information, while those of SimMIM do." + ], + "image_footnote": [], + "bbox": [ + 504, + 99, + 812, + 227 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Later layers of CL and early layers of MIM are important. According to studies on ViT (Graham et al., 2021; Dai et al., 2021; Park & Kim, 2022b), the later layers use high-level information, and the early layers exploit low-level information. Since CL and MIM each exploit global and local patterns, we expect that the later layers of CL and early layers of MIM play a key role.", + "bbox": [ + 169, + 369, + 826, + 428 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To evaluate the importance of each layer, we measure the linear probing accuracy using intermediate representations with the configuration of Table A.1. In Figure 11, we observe the following properties: First, the linear probing accuracy of MIM is higher than that of CL at the beginning. Conversely, CL outperforms MIM at the end of the model. Such result indicates that the later layers of CL and early layers of MIM play an important role in making linearly separable representations. Second, the accuracy of CL increases with increasing depth as expected, but the accuracy of MIM surprisingly decreases at the end of the model, i.e., the later layers of MIM are not very helpful in separating representations. We explain this observation as a phenomenon in which MIM methods with shallow prediction heads, e.g., SimMIM, use later layers of the backbone as a decoder. Therefore, MIM with a deep self-attention decoder, e.g., MAE (He et al., 2022), can be useful for linear probing performance. Moreover, it also explains why SimMIM's high-frequency component and representational volumes drop in the later layers as shown in Figures 7 and 9. Third, even the highest linear probing accuracy of MIM is lower than that of CL.", + "bbox": [ + 169, + 433, + 565, + 724 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0f82e99a78a6a5cbc0ac240670d931fd06893c5a8e088b52c6309caecaf9d3ad.jpg", + "image_caption": [ + "Figure 11: Later layers of CL and early layers of MIM play a key role. We report linear probing accuracies by using the representations of the intermediate layers. CL outperforms MIM in later layers, and MIM outperforms CL in early layers." + ], + "image_footnote": [], + "bbox": [ + 584, + 439, + 816, + 597 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The explicit decoder helps ViTs further leverage the advantages of MIM. Several previous observations find that the implicit decoder of MIM with a shallow prediction head, such as SimMIM, can impair performance. MAE (He et al., 2022) addresses this problem by introducing deep explicit ViT decoders and reconstructing masked tokens only in the separate decoders.", + "bbox": [ + 169, + 741, + 826, + 800 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figure 10, we analyze MAE to understand the properties of decoders more deeply. Figure 10a shows the self-attention behaviors. The results indicate that the mutual information of MAE is lower than that of SimMIM in the later layers of the encoder but higher in the decoder, implying that the decoder reconstructs masked tokens based on its neighborhood tokens.", + "bbox": [ + 169, + 805, + 825, + 861 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 10b shows the results of the Fourier analysis. As explained in Figure 9, the last four layers of SimMIM reduce the high-frequency components. In contrast, the later layers (excluding the last layer) of MAE do not reduce them. Instead, the decoder of MAE prioritizes low-frequency information compared with the encoder, allowing the backbone to efficiently utilize high-frequency information.", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b7e58b0e0c9a6b67dbc0e7313b85167f8d8dca7c28ae3567fa1fd9acc4b1186b.jpg", + "image_caption": [ + "(a) Performance" + ], + "image_footnote": [], + "bbox": [ + 186, + 99, + 395, + 239 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/06ff79e5b211bc5fabfa86aff051d6c454f9ecfab754055a85d05fce81ad52d9.jpg", + "image_caption": [ + "(b) Self-attention" + ], + "image_footnote": [], + "bbox": [ + 395, + 99, + 599, + 239 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/178d8aaae7148ce435789432a3d4773d9246beee239105030dc1d7abda603364.jpg", + "image_caption": [ + "(c) Fourier analysis", + "Figure 12: The simple linear combination of CL (MoCo) and MIM (SimMIM) objectives outperforms the vanilla CL and MIM. $\\lambda$ is the importance weight of CL, so $\\lambda = 0$ means SimMIM and $\\lambda = 1$ means MoCo. Left: \"CL + MIM\" outperforms CL and MIM in both linear probing and fine-tuning accuracy. Middle: Mutual information of \"CL + MIM\" decreases at the end of the model, suggesting that the self-attention of later layers collapse into homogeneity and capture the same object shape information. Right: Fourier analysis shows that \"CL + MIM\" amplifies high frequencies at the beginning and reduces them at the end. It implies that \"CL + MIM\" exploits high-frequency information at the beginning and low-frequency information at the end." + ], + "image_footnote": [], + "bbox": [ + 602, + 101, + 812, + 241 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 ARE THE TWO METHODS COMPLEMENTARY TO EACH OTHER?", + "text_level": 1, + "bbox": [ + 171, + 398, + 732, + 416 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We present comparative analyses on CL and MIM from three perspectives: self-attention, representation transforms, and the position of important layers. All of our results indicate that CL and MIM train ViTs differently. These differences naturally imply that combining CL and MIM to train a backbone may help leverage the advantages of both methods.", + "bbox": [ + 169, + 431, + 826, + 488 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To show that CL and MIM are complementary, we introduce the simplest way to harmonize CL and MIM by linearly combining two losses, i.e., $\\mathcal{L} = (1 - \\lambda)\\mathcal{L}_{\\mathrm{MIM}} + \\lambda \\mathcal{L}_{\\mathrm{CL}}$ where $\\mathcal{L}_{\\mathrm{MIM}}$ and $\\mathcal{L}_{\\mathrm{CL}}$ each indicate the losses of MIM and CL, and $\\lambda$ is the importance weight of CL. We find that this simple hybrid model trained with combined losses efficiently exploits the strengths of both methods. Figure 12a shows linear probing and fine-tuning accuracy on ImageNet with varying $\\lambda$ . Surprisingly, the hybrid models outperform MIM ( $\\lambda = 0$ ) and CL ( $\\lambda = 1$ ) in both aspects. Figure 12b and Figure 12c can provide insights on how hybrid models behave by analyzing the model with $\\lambda = 0.2$ in terms of self-attention in Section 2 and Fourier analysis in Section 3, respectively; both results show that the hybrid model exploits MIM properties in the early layers and CL properties in the later layers. In particular, Figure 12b indicates that the self-attention of the early layers are changed according to the query token but those of the later layers are not. Likewise, Figure 12c shows that the early layers exploit high-frequency, while the later layers try to exploit low-frequency.", + "bbox": [ + 169, + 494, + 826, + 662 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 681, + 320, + 696 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We conducted a comparative study highlighting various facets of two widely used self-supervised learning methods for vision transformers: contrastive learning (CL) and masked image modeling (MIM). The study demonstrated many opposing properties of the two methods: image information (image-level vs. token-level; as in Section 2), feature representations (low-frequency vs. high-frequency; as in Section 3), and lead role components (later layers vs. early layers; as in Section 4). Furthermore, we suggested a possible application that exploits only the benefits from both methods and showed how a combined model can outperform individual methods.", + "bbox": [ + 169, + 712, + 826, + 811 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Future directions. Various future directions can be explored based on our study. We believe that there are better ways than a simple linear combination of CL and MIM objectives. For example, a novel self-supervised learning approach, in which CL is applied in the later layers and MIM in the early layers, can be considered. Moreover, we may extend our findings on self-supervision for multi-stage ViTs, such as PiT (Heo et al., 2021) and Swin (Liu et al., 2021). Another interesting direction is to enhance the individual properties of CL and MIM. Techniques that help CL or MIM learn shapes or textures, respectively, may also improve performance.", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 102, + 287, + 118 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. In International Conference on Learning Representations, 2022.", + "Florian Bordes, Randall Balestriero, and Pascal Vincent. High fidelity visualization of what your self-supervised representation knows about. Transactions on Machine Learning Research, 2022.", + "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In International Conference on Computer Vision, 2021.", + "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International Conference on Machine Learning, 2020a.", + "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2021.", + "Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b.", + "Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised vision transformers. In International Conference on Computer Vision, 2021.", + "Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Advances in Neural Information Processing Systems, 2020.", + "Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 2021.", + "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021.", + "Stéphane d'Ascoli, Hugo Touvron, Matthew L Leavitt, Ari S Morcos, Giulio Biroli, and Levent Sagun. Convit: Improving vision transformers with soft convolutional inductive biases. In International Conference on Machine Learning, 2021.", + "Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A Wichmann, and Wieland Brendel. Imagenet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness. In International Conference on Learning Representations, 2019.", + "Benjamin Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Herve Jégou, and Matthijs Douze. Levit: a vision transformer in convnet's clothing for faster inference. In International Conference on Computer Vision, 2021.", + "Jean-Bastien Grill, Florian Strub, Florent Alché, Corentin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 2020.", + "Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, 2017.", + "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020." + ], + "bbox": [ + 171, + 133, + 826, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022.", + "Byeongho Heo, Sangdoo Yun, Dongyoon Han, Sanghyuk Chun, Junsuk Choe, and Seong Joon Oh. Rethinking spatial dimensions of vision transformers. In International Conference on Computer Vision, 2021.", + "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European Conference on Computer Vision, 2016.", + "Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In Proceedings of the IEEE international conference on computer vision, 2017.", + "Li Jing, Pascal Vincent, Yann LeCun, and Yuandong Tian. Understanding dimensional collapse in contrastive self-supervised learning. In International Conference on Learning Representations, 2022.", + "Hanjoo Kim, Minkyu Kim, Dongjoo Seo, Jinwoong Kim, Heungseok Park, Soeun Park, Hyunwoo Jo, KyungHyun Kim, Youngil Yang, Youngkwan Kim, et al. Nsml: Meet the mlaas platform with a real-world case study. arXiv preprint arXiv:1810.09957, 2018.", + "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, 2014.", + "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In International Conference on Computer Vision, 2021.", + "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018.", + "Namuk Park and Songkuk Kim. Blurs behave like ensembles: Spatial smoothings to improve accuracy, uncertainty, and robustness. In International Conference on Machine Learning, 2022a.", + "Namuk Park and Songkuk Kim. How do vision transformers work? In International Conference on Learning Representations, 2022b.", + "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, 2019.", + "Maithra Raghu, Thomas Unterthiner, Simon Kornblith, Chiyuan Zhang, and Alexey Dosovitskiy. Do vision transformers see like convolutional neural networks? Advances in Neural Information Processing Systems, 2021.", + "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 2015.", + "Alexander Strehl and Joydeep Ghosh. Cluster ensembles—a knowledge reuse framework for combining multiple partitions. Journal of machine learning research, 2002.", + "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016.", + "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In European Conference on Computer Vision, 2020a." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? Advances in Neural Information Processing Systems, 33:6827-6839, 2020b.", + "Yuandong Tian, Xinlei Chen, and Surya Ganguli. Understanding self-supervised learning dynamics without contrastive pairs. In International Conference on Machine Learning, 2021.", + "Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 2008.", + "Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8769-8778, 2018.", + "Shaoru Wang, Jin Gao, Zeming Li, Jian Sun, and Weiming Hu. A closer look at self-supervised lightweight vision transformers. arXiv preprint arXiv:2205.14443, 2022.", + "Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong, and Lei Li. Dense contrastive learning for self-supervised visual pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021.", + "Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022a.", + "Yixuan Wei, Han Hu, Zhenda Xie, Zheng Zhang, Yue Cao, Jianmin Bao, Dong Chen, and Baining Guo. Contrastive learning rivals masked image modeling in fine-tuning via feature distillation. arXiv preprint arXiv:2205.14141, 2022b.", + "Zhenda Xie, Zigang Geng, Jingcheng Hu, Zheng Zhang, Han Hu, and Yue Cao. Revealing the dark secrets of masked image modeling. arXiv preprint arXiv:2205.13543, 2022a.", + "Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022b.", + "Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917, 2022.", + "Sangdoo Yun, Dongyoon Han, Seong Joon Oh, Sanghyuk Chun, Junsuk Choe, and Youngjoon Yoo. Cutmix: Regularization strategy to train strong classifiers with localizable features. In International Conference on Computer Vision, 2019.", + "Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. In International Conference on Learning Representations, 2018.", + "Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127(3):302-321, 2019.", + "Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. ibot: Image bert pre-training with online tokenizer. International Conference on Learning Representations, 2022." + ], + "bbox": [ + 171, + 102, + 826, + 811 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A SETUP", + "text_level": 1, + "bbox": [ + 171, + 102, + 267, + 118 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We build the configurations based on Xie et al. (2022b) for fine-tuning and Caron et al. (2021) for linear probing. Table A.1 summarizes the configurations. Most analyzes of ViTs use the official ViT-B pre-trained models, and some analyzes use ViT-{S, L} pre-trained with official configurations but epochs of 100. Due to memory limitations, ViT-L is pre-trained with a quarter batch size of the other models. The hybrid model introduced in Section 5 uses the ViT backbone architecture of Xie et al. (2022b) and employs a configuration based on their work for pre-training as shown in Table A.1. For data augmentation and regularization, we adopt widely used settings, e.g., Randaugment (Cubuk et al., 2020), label smoothing (Szegedy et al., 2016), mixup (Zhang et al., 2018), cutmix (Yun et al., 2019), stochastic depth (Huang et al., 2016). Layer decay (Bao et al., 2022) is also used for fine-tuning. Neural network models are implemented in PyTorch (Paszke et al., 2019). The code for analysis is available at https://github.com/naver-ai/cl-vs-mim. All experiments use {1, 4, 8} NVIDIA A100 Tensor Core GPU. NSML (Kim et al., 2018) has been used for experiments.", + "bbox": [ + 169, + 133, + 826, + 303 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/c55ccf7380dc893488f929e005a837e33550db9d874a752f52cf423e536c83df.jpg", + "table_caption": [ + "Table A.1: Training settings." + ], + "table_footnote": [], + "table_body": "
CONFIGURATIONLinear ProbingFine-tuningPre-training
optimizersgdadamwadamw
base learning rate1.0e-01.25e-31.0e-4
weight decay0.050.050.05
batch size1k2k1k
training epoch50100100
learning rate schedulecosinecosinemultistep
warmup epoch02010
warmup schedule·linearlinear
randaugment·9, 0.59, 0.5
label smoothing·0.10.1
mixup·0.80.8
cutmix·1.01.0
stochastic depth·0.10.1
layer decay·0.651.0
gradient clip·5.05.0
", + "bbox": [ + 259, + 337, + 738, + 645 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 675, + 349, + 690 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "CL is a method based on comparing the global projection of two different random views. However, this approach usually suffers from the collapsing problem, where all representations collapse into constant solutions. To solve this problem, various methods such as negative samples and InfoNCE (Oord et al., 2018) have been proposed. Negative samples are an effective technique to avoid the collapsing problems, but they cause dimensional collapse (Jing et al., 2022) and require extra large batches (Chen et al., 2020a) or memory queues (He et al., 2020; Chen et al., 2020b) to retrieve them. We mainly analyze MoCo v3 (Chen et al., 2021), since the method includes these de facto standard components—global projection, random views, and negative samples.", + "bbox": [ + 169, + 708, + 826, + 821 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Some SSL methods, e.g. Grill et al. (2020); Caron et al. (2021), do not use negative samples and use the projections of their Siamese representations as the positives. Such self-distillation has been explored theoretically and empirically (Chen & He, 2021; Tian et al., 2021) to prevent the collapsing problem, but we do not discuss the distillation scheme in this paper. Wei et al. (2022b) shows that feature distillation improves the fine-tuning performance of CL by diversifying attention ranges; this observation is consistent with our findings. While they focus on distillation to improve CL, we reveal the fundamental nature of self-supervised learning by rigorously comparing CL and MIM.", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/0a7defb6cf5ca448cbeadfa09eece56e710d4e928fa34c8110a31f6063707dd5.jpg", + "image_caption": [ + "(a) Attention distance" + ], + "image_footnote": [], + "bbox": [ + 272, + 104, + 483, + 244 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/2940011fb68852857470985169c458b291f5397c3a9df53a321760de5a4350d7.jpg", + "image_caption": [ + "(b) Normalized MI" + ], + "image_footnote": [], + "bbox": [ + 516, + 102, + 727, + 244 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/6b8d60f909c96901f45026634f8622fb176aa05b7b617722bdc2a9a000c3b65c.jpg", + "image_caption": [ + "(a) Attention distance", + "Figure C.2: ViTs exhibit consistent self-attention patterns, regardless of their size. To better understand these patterns, we visualize the self-attention behaviors of three ViTs—ViT-{Ti, S, B}—using two metrics: attention distance and normalized mutual information (MI). Left: All self-attention of MoCo capture global patterns in the later layers. In contrast, the self-attention of SimMIM capture local patterns. Right: Likewise, all self-attention maps of MoCo collapse into homogeneity in the later layers." + ], + "image_footnote": [], + "bbox": [ + 272, + 392, + 483, + 531 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/f7b27a38746bf54f92540391b3c2ca815b95f136dd50d7cbae60b838d52255ef.jpg", + "image_caption": [ + "Figure C.1: MIM and CL methods each have consistent properties. To show this, we visualize self-attention behaviors in terms of attention distance and normalized mutual information (MI). SimCLR\\*, which was introduced in Chen et al. (2021), stands for MoCo with a momentum coefficient of 0. Left: The attention distance of CL methods (namely MoCo, SimCLR\\*, and DINO) is higher than that of MIM methods (namely SimMIM, BEiT, and MAE). This suggests that CL methods consistently capture global patterns. Right: The normalized mutual information of MIM is higher than that of CL; i.e., the self- attentions of MIM are more correlated with query tokens than CL.", + "(b) Normalized MI" + ], + "image_footnote": [], + "bbox": [ + 516, + 390, + 727, + 531 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Compared with CL, MIM has been rarely explored in vision tasks. Various methods, such as histograms of oriented gradients (Wei et al., 2022a) and tokenization (Bao et al., 2022), have been proposed as part of porting masked language models to the image domain with ViTs. Among them, SimMIM (Xie et al., 2022b) and MAE (He et al., 2022) are simple yet effective methods to reconstruct masked tokens without complicated pretext tasks. Because of its simplicity and superior performance in downstream operations, MIM is attracting attention as a promising technique in image processing.", + "bbox": [ + 169, + 669, + 826, + 753 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Nevertheless, we find hints suggesting that CL and MIM utilize different aspects of the data, making them complementary. For example, Zhou et al. (2022); Wang et al. (2021); Yu et al. (2022) achieve high predictive performance by harmonizing the image-level and the token-level self-supervised learning. Xie et al. (2022a) also observe that, unlike supervised pre-trained models or CL, self-attention in SimMIM focus locally; this is a consistent result with our findings.", + "bbox": [ + 169, + 758, + 826, + 830 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C OUR INSIGHTS ARE GENERALIZABLE TO VARIOUS MODELS", + "text_level": 1, + "bbox": [ + 169, + 849, + 712, + 866 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In the main text, we analyze ViT-B pre-trained using MoCo and SimMIM. We observe consistent characteristics across various sizes of ViTs that have been pre-trained using other self-supervised learning methods. To support this claim, we delve into the properties of self-attention.", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/227f272cc4f03f82eeb6e01630535a451968a203c4b06945b12f38209dc45e73.jpg", + "image_caption": [ + "(a) Standard deviations" + ], + "image_footnote": [], + "bbox": [ + 274, + 99, + 486, + 242 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/6a47cec191d37145789159dcc512d224238fa7efc778426247a6f4d9dec3ca38.jpg", + "image_caption": [ + "(b) Distribution at $3^{\\mathrm{rd}}$ layer", + "Figure E.1: The presence of an outlier head in MoCo raises the average of normalized mutual information. This observation explains how the normalized mutual information in a couple of MoCo's self-attention layers is similar to or even surpasses that in SimMIM. Left: We present the standard deviation of the normalized mutual information. As depicted in this figure, the standard deviation in SimMIM remains relatively consistent across different depths. In contrast, the standard deviation in MoCo's $3^{\\mathrm{rd}}$ or $4^{\\mathrm{th}}$ self-attention layer is notably higher than that in SimMIM. Right: Distribution of mutual information for the third self-attention layer head. The visualization of this kernel density estimation shows that MoCo has an outlier head with mutual information close to 1.0. The red rectangles $(\\square)$ and blue triangles $(\\triangle)$ refer to the mutual information of heads in MoCo and SimMIM, respectively." + ], + "image_footnote": [], + "bbox": [ + 521, + 101, + 722, + 241 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D LOCALITY INDUCTIVE BIAS IMPROVES FINE-TUNING ACCURACY OF CL", + "text_level": 1, + "bbox": [ + 171, + 518, + 818, + 534 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In Section 2, we demonstrate that the homogeneity of self-attention map, i.e., attention collapse of CL, helps ViT distinguish images but harms fine-tuning accuracy. As a result, we anticipate that incorporating a locality inductive bias into CL will improve fine-tuning accuracy but degrade linear probing accuracy. One simple method to inject locality into self-attention is to limit the receptive field of self-attention by using attention masks.", + "bbox": [ + 169, + 550, + 565, + 662 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure D.1 shows the predictive performance of MoCo with restricted local self-attention. As expected, the results are similar to the performance of MIM; As the kernel size decreases, the linear probing accuracy decreases but the fine-tuning accuracy increases. These results are consistent with our findings.", + "bbox": [ + 169, + 667, + 565, + 753 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E A CLOSER LOOK AT", + "text_level": 1, + "bbox": [ + 171, + 773, + 380, + 787 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "THE ROLE OF SELF-SUPERVISED VIT LAYERS", + "text_level": 1, + "bbox": [ + 171, + 791, + 558, + 806 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/6528fc13531a5561df61e7e99e0a8b810a3b28433ca7b19f481fa92ee89206a5.jpg", + "image_caption": [ + "Figure C.1 visualizes the self-attention behaviors of different self-supervised learning methods in terms of attention distance and normalized mutual information. As depicted in the figure, all CLs and MIMs exhibit consistent properties. Similarly, Figure C.2 demonstrates that various sizes of models also demonstrate consistent properties.", + "Figure D.1: Locality inductive bias harms linear probing but improves fine-tuning. We report the linear probing and fine-tuning accuracy of MoCo with restricted self-attention via attention masks." + ], + "image_footnote": [], + "bbox": [ + 581, + 551, + 818, + 714 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The main text provides the key characteristics of CL and MIM. This section delves deeper into the details not covered in the main text to provide a more comprehensive understanding of the subjects.", + "bbox": [ + 169, + 823, + 823, + 852 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The role of the early modules. Figures 3 and 4 suggest that most layers of MoCo capture global patterns and have only a weak correlation with query tokens. However, one or two of MoCo layers exhibit unusual behavior. For example, the $3^{\\mathrm{rd}}$ layer of MoCo focuses on local areas and its self-attention map is dependent on the query. We explore this property in more detail.", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/41906592df7f4da9c6950d3212fe997066b0f7d7667a573cc916e1c431aea0f0.jpg", + "image_caption": [ + "(a) MoCo" + ], + "image_footnote": [], + "bbox": [ + 277, + 102, + 480, + 248 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/ea0f156b766cf9413d5aa69695375a437bb4c9362f681e9a2fc87e055aafbff9.jpg", + "image_caption": [ + "(b) SimMIM", + "Figure E.2: The tokens of MoCo form a cluster for each image, while those of SimMIM are intermingled. This aligns with the finding that, compared to SimMIM, MoCo is linearly separable. To demonstrate this property, we visualize 3,528 tokens (196 tokens $\\times$ 18 images) from the representations of the last layer via t-SNE, and find that a consistent pattern is observed even in the representations of the intermediate layers. The colors represent three different classes. See also Figures 6 and 7." + ], + "image_footnote": [], + "bbox": [ + 521, + 102, + 723, + 247 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure E.1a provides the variance of normalized mutual information with respect to heads. As the results show, the variance of SimMIM is consistent across all depths whereas that of MoCo is not. In particular, the $3^{\\mathrm{rd}}$ layer of MoCo has high variance even though other layers do not. This suggests that, while most of MoCo's self-attention heads capture global patterns and have weak correlation with query tokens, some heads deviate from this behavior and exhibit a different pattern.", + "bbox": [ + 169, + 369, + 823, + 441 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure E.1b shows the distribution of normalized mutual information among heads in the $3^{\\mathrm{rd}}$ layer to analyze this phenomenon. In this figure, we use kernel density estimation with Gaussian kernel to visualize the distribution. The results reveal several outlier heads in MoCo with mutual information close to 1.0. As a result, these outliers significantly raises the average value of normalized mutual information.", + "bbox": [ + 169, + 446, + 825, + 516 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A comprehensive view through visualization of tokens from multiple images. Figure 6 visualizes how self-attention layers transform tokens from one or two images in representation space. The figure demonstrates that MoCo transforms all tokens in union while SimMIM transforms them individually. As a result, MoCo separates the representations at the image-level and SimMIM separates them at the token-level.", + "bbox": [ + 169, + 532, + 826, + 602 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The t-SNE visualization (Van der Maaten & Hinton, 2008) in Figure E.2 provides consistent results and offers even a more comprehensive perspective. In this figure, we visualize the last representations of 3528 tokens from 18 images that belong to three different classes. The visualization demonstrates that MoCo separates the representations into distinct classes and even images, while maintaining the tokens close together in compact image clusters. On the other hand, SimMIM separates tokens from images, resulting in a wide representation space for each image, but the images or even classes may be challenging to linearly distinguish.", + "bbox": [ + 169, + 608, + 823, + 707 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The first layer of MoCo aggregates tokens into compact clusters. Figures 6 and 7 show that all modules, except the first module, in MoCo behave consistently. However, we observe that MoCo's first module behaves differently and unusually than the others. We elaborate the behaviour of the first layer of module.", + "bbox": [ + 169, + 722, + 823, + 777 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure E.3a shows the qualitative visualization of tokens for a sample image, similar to Figure 6. This visualization shows that the first MoCo layer aggregates tokens into compact clusters. Although this figure only uses a single image, the layer aggregates all images into a small representation space as well. In terms of singular values, we observe consistent results. Similar to Figure 7, Figures E.3b and E.3c report the second largest log singular value, instead of the relative log singular value, to investigate the absolute volume of the representations. As expected, most layers in both MoCo and SimMIM increase the singular value, but surprisingly, the first layer of MoCo reduces the singular value, meaning that the volumes of representations are decreased at both the token-level and image-level. Based on these observations, we conjecture that the first module of MoCo behaves like an embedding component.", + "bbox": [ + 169, + 784, + 826, + 924 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/eb3d6f9af5ba9611bdfbe6c912a91ee24c14f2feb91040b2f6281e0bfe387850.jpg", + "image_caption": [ + "Figure E.3: The first layer of MoCo clumps tokens together. We demonstrate this property from two perspectives: qualitative visualization and singular value of token distribution. Left: Similar to Figure 6, we visualize tokens of a sample image in a representation space. The blue and red data points represent the tokens before and after the self-attention transformation. As shown in this figure, the first self-attention layer clumps tokens into a compact cluster. Middle and Right: Similar to Figure 7, we visualize the second largest log singular value (not $\\Delta$ log singular value) for depth. The singular value spectra demonstrate consistent results; the first layer of MoCo (gray area) not only clumps tokens but also images into a compact cluster." + ], + "image_footnote": [], + "bbox": [ + 196, + 126, + 398, + 281 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/d64ede6f1b47ef1ed7c25065f01279e789d17e7c6c742fae29b0a2132fa6dec2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 429, + 126, + 620, + 281 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0a8d7bc5980b846efd6f7189a450e1633c6199cb74793172b02bacd8d7c9461b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 630, + 126, + 821, + 281 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/88933cf42fcccccde418631f6f1e1e9d14fe0d056bbd93b74833adfe7c531890.jpg", + "image_caption": [ + "Figure F.1: Self-attention and representations in fine-tuned models exhibit consistency with those of pre-trained models. Similar to Figures 4 and 9, we present the normalized mutual information and the Fourier analysis results of fine-tuned models. The abbreviation \"ft\" stands for \"fine-tuned model.\" Left: Similar to pre-trained models, the mutual information of MoCo's self-attention maps is generally lower compared to that of SimMIM. However, it is noteworthy that the mutual information of the later self-attention maps in SimMIM decreases significantly. This is because the later layers of a model trained with supervision or fine-tuning tend to capture global information. Right: Similarly, SimMIM utilizes higher frequency information than MoCo." + ], + "image_footnote": [], + "bbox": [ + 272, + 421, + 478, + 579 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/201e8fdeec51703e6d99d8f341cb390d5af57c3dd2a701c46eb2db4e81fa7963.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 421, + 725, + 579 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "F FINE-TUNED MODELS INHERIT THE PROPERTIES OF PRE-TRAINED MODELS", + "text_level": 1, + "bbox": [ + 169, + 720, + 764, + 753 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The main text focuses on highlighting the key properties of pre-trained models. This section demonstrates that these properties are also utilized by fine-tuned models. As a result, we can safely apply the insights gained from the main text to various situations.", + "bbox": [ + 169, + 772, + 826, + 815 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Consistent results in self-attention and Fourier analysis. Figures 3 and 4 demonstrate that MoCo captures global areas and that its self-attention are less related to the query tokens, compared with SimMIM. In addition, Figure 9 shows that MoCo captures low-frequency information as opposite to SimMIM. These results are consistent in the fine-tuning scheme.", + "bbox": [ + 169, + 832, + 825, + 888 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Figure F.1a reveals the self-attention behaviours of fine-tuned MoCo and SimMIM in terms of normalized mutual information. Similar to pre-trained models, the fine-tuned self-attention maps of", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ae0d609fe3927c5ee5f0360d0d1cc99ec1c299dc6fad161ea59db333747faa63.jpg", + "image_caption": [ + "(a) Stylized ImageNet" + ], + "image_footnote": [], + "bbox": [ + 243, + 103, + 517, + 276 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/6dd58ff76341c3cb4e49f52e1d68b72d5e52b09730b9b39e843e3415f0658cfe.jpg", + "image_caption": [ + "(b) Robustness for noise frequency", + "Figure F.2: Fine-tuned ViTs inherit the robustness against frequency-based noise. Similar to Figure 8b, we measure the decrease in the accuracy of ViTs fine-tuned with MoCo and SimMIM. Left: Even with fine-tuned ViTs, MoCo is relatively shape-biased and SimMIM relatively texture-biased. This bias is just less apparent than in linear probing models. Right: The robustness against frequency-based random noise also suggests the same: MoCo is robust against high-frequency noise, but SimMIM is not. In conclusion, fine-tuned models inherit the properties of linear probing models." + ], + "image_footnote": [], + "bbox": [ + 545, + 132, + 754, + 277 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "MoCo have generally lower mutual information compared to those of SimMIM. The only significant difference is that the mutual information of the later self-attention maps in fine-tuned SimMIM decreases significantly, as later layers in models trained with supervision or fine-tuning tend to capture more global information. As a result, the gap between the two methods is reduced. This is also reflected in the consistent results of Fourier analysis as shown in Figure F.1b. In this analysis, SimMIM captures higher-frequency information compared to MoCo in fine-tuning scheme as well. However, the later layers of SimMIM attempt to capture low-frequency information. Therefore, the gap of fine-tuned models is smaller than that of pre-trained models.", + "bbox": [ + 169, + 412, + 826, + 526 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "CL is shape-biased and MIM is texture-biased in fine-tuning scheme. In Figure 8, we demonstrate that linear probing model with CL (MoCo) is more shape-biased and that with MIM (SimMIM) is texture-biased, compared with each other. As in the experiment, we calculate the classification results of ImageNet fine-tuned MoCo and SimMIM on Stylized-ImageNet, and measure the decrease in accuracy against frequency-based random noise. As we would expected, Figure F.2 shows that the property also extends to the fine-tuned model. Even though we still observe the difference between MoCo and SimMIM, the performance gap between MoCo and SimMIM is quite reduced compared to the gap between the linear probing models.", + "bbox": [ + 169, + 540, + 826, + 652 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Later layers of CL and early layers of MIM are important in find-tuning phases. As shown in Figure 11, the later layers of the CL and the early layers of the MIM are linearly separable. This finding suggests that these layers are significant, however, it does not provide direct evidence that such properties are preserved during fine-tuning phases. We demonstrate that these layers play a crucial role in fine-tuning phases as well.", + "bbox": [ + 169, + 666, + 532, + 792 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To support this claim, we conduct a study to measure the accuracy drop of fine-tuned models using pretrained models with a few blocks initialized. As shown in Figure F.3a, the results indicate that the initializing a few early blocks in the pre-training models of SimMIM significantly harms the fine-tuning accuracy, compared to MoCo. These observations suggest that early layers of SimMIM play an important role in fine-tuning. Conversely, Figure F.3b shows that initializing later", + "bbox": [ + 169, + 799, + 532, + 925 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/ca4a5bdcfd162bc5e03d71ed980045dda92682b4358c070ecb137d3bd1b4fb84.jpg", + "image_caption": [ + "(a) Early layer", + "(b) Later layer", + "Figure F.3: The later layers of CL and early layers of MIM play a key role in the fine-tuning scheme. To show this, we initialize a few blocks and measure the decrease in the fine-tuning accuracy of pretrained models." + ], + "image_footnote": [], + "bbox": [ + 544, + 670, + 821, + 806 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "blocks in the pre-training models of SimMIM does not significantly harms the fine-tuning accuracy, suggesting that they are not important in fine-tuning compared with MoCo.", + "bbox": [ + 169, + 103, + 826, + 133 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "One limitation of this experiment is the evaluation of the accuracy drop in a single run. Since the accuracy drop of MoCo is marginally higher than that of SimMIM at the first initialization depth in Figure F.3b, additional experiments may improve the results. In this experiment, we utilized the same fine-tuning settings for both MoCo and SimMIM; but experiments with fine-tuning settings tailored to each method may provide further insight.", + "bbox": [ + 169, + 138, + 826, + 209 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "G HYBRID MODELS OUTPERFORM CL AND MIM IN DOWNSTREAM TASKS", + "text_level": 1, + "bbox": [ + 169, + 229, + 818, + 247 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The claim that CL and MIM are complementary is demonstrated only on ImageNet in Section 5. To validate this claim in tasks beyond ImageNet, we evaluated the pre-trained models of the hybrid method introduced in Section 5 for another classification task and a semantic segmentation task. In particular, we measured the accuracy on iNaturalist 2018 (Van Horn et al., 2018) and the mIoU on ADE20K (Zhou et al., 2019). As shown in Table G.1, the hybrid", + "bbox": [ + 169, + 260, + 480, + 398 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/798889eb7cea5efb9c1e7a77e37956cb55992745c85c03583dfdc68e4ca6174a.jpg", + "table_caption": [ + "Table G.1: Hybrid models of CL and MIM outperform both CL and MIM in various tasks." + ], + "table_footnote": [], + "table_body": "
λ (IMPORTANCE OF CL)iNat-18ADE20k
0.0 (SimMIM)62.135.4
0.2 (SimMIM + MoCo)68.842.2
1.0 (MoCo)66.239.7
", + "bbox": [ + 496, + 299, + 820, + 385 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "model of SimMIM and MoCo outperforms both SimMIM and MoCo in various downstream tasks. Therefore, we conclude that the effectiveness of this claim extends beyond ImageNet.", + "bbox": [ + 169, + 398, + 826, + 429 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_model.json b/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c1479ad7e2f58050e8c321f00eb4e204a70d87e1 --- /dev/null +++ b/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_model.json @@ -0,0 +1,3395 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.808, + 0.149 + ], + "angle": 0, + "content": "WHAT DO SELF-SUPERVISED VISION TRANSFORMERS LEARN?" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.169, + 0.761, + 0.185 + ], + "angle": 0, + "content": "Namuk Park\\(^{1*}\\) Wonjae Kim\\(^{2}\\) Byeongho Heo\\(^{2}\\) Taekyung Kim\\(^{2}\\) Sangdoo Yun\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.185, + 0.51, + 0.199 + ], + "angle": 0, + "content": "\\(^{1}\\)Prescient Design, Genentech \\(^{2}\\)NAVER AI Lab" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.201, + 0.785, + 0.214 + ], + "angle": 0, + "content": "park.namuk@gene.com {wonjae.kim,bh.heo,taekyung.k,sangdoo.yun}@navercorp.com" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.25, + 0.547, + 0.265 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.281, + 0.771, + 0.504 + ], + "angle": 0, + "content": "We present a comparative study on how and why contrastive learning (CL) and masked image modeling (MIM) differ in their representations and in their performance of downstream tasks. In particular, we demonstrate that self-supervised Vision Transformers (ViTs) have the following properties: (1) CL trains self-attention to capture longer-range global patterns than MIM, such as the shape of an object, especially in the later layers of the ViT architecture. This CL property helps ViTs linearly separate images in their representation spaces. However, it also makes the self-attention collapse into homogeneity for all query tokens and heads. Such homogeneity of self-attention reduces the diversity of representations, worsening scalability and dense prediction performance. (2) CL utilizes the low-frequency signals of the representations, but MIM utilizes high-frequencies. Since low- and high-frequency information respectively represent shapes and textures, CL is more shape-oriented and MIM more texture-oriented. (3) CL plays a crucial role in the later layers, while MIM mainly focuses on the early layers. Upon these analyses, we find that CL and MIM can complement each other and observe that even the simplest harmonization can help leverage the advantages of both methods." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.53, + 0.338, + 0.545 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.56, + 0.827, + 0.632 + ], + "angle": 0, + "content": "Contrastive Learning (CL) (He et al., 2020; Chen et al., 2020a;b; 2021) has been the most popular self-supervised learning methods until recently. It aims to learn the invariant semantics of two random views (Tian et al., 2020a;b) by making global projections of representations similar for positive samples and dissimilar for negative samples. Since CL exploits the globally projected representations to contrast each other, it can be deemed as an \"image-level\" self-supervised learning approach." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.637, + 0.828, + 0.75 + ], + "angle": 0, + "content": "Deviating from CL, masked image modeling (MIM) (Bao et al., 2022; Xie et al., 2022b; He et al., 2022) has risen as a strong competitor of CL in the era of Vision Transformers (ViTs) (Dosovitskiy et al., 2021) with its impressive performances of downstream tasks. MIM trains ViTs by reconstructing the correct semantics of masked input patches. Unlike CL, it learns the semantics of patch tokens and this can be deemed as a \"token-level\" self-supervised learning approach. Since MIM outperforms CL in fine-tuning accuracy, it may appear prima facie as a more effective pre-training method than CL. However, a different trend is observed for linear probing accuracy with CL outperforming MIM (See Figure 1). For further exposition on CL and MIM, we refer the reader to Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.755, + 0.829, + 0.854 + ], + "angle": 0, + "content": "Then, which method—CL or MIM—should we use for the self-supervised learning of ViTs? Although both methods are widely used, little is known about what they learn. This paper sheds light on their nature by showing that ViTs trained through CL and MIM learn opposite knowledge. In particular, we raise questions to better understand self-supervised learning, and then find the answers that can potentially affect future improvements. The questions posed can be divided into the following properties of Vision Transformers: the behavior of self-attention, the transformation of the representations, and the position of lead role components. Our key questions and findings are elaborated below." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.86, + 0.828, + 0.904 + ], + "angle": 0, + "content": "How do self-attention behaviors? (Section 2) We find that CL primarily captures global relationships, while MIM captures local relationships. This implies that the representations of CL contain more global patterns, such as object shapes, than those of MIM. On the one hand, this property helps" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.606, + 0.924 + ], + "angle": 0, + "content": "*Most of this work was done while the author was at Naver AI Lab." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.101, + 0.396, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.101, + 0.609, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.102, + 0.82, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.254, + 0.828, + 0.396 + ], + "angle": 0, + "content": "Figure 1: CL outperforms MIM in linear probing and small model regimes. In contrast, MIM excels in fine-tuning, large model regimes, and dense prediction. Red squares (■) denote CL, and blue triangles (▲) denote MIM. By default, we report the performance of ViT-B trained or pretrained on ImageNet-1K. We use the results from original papers and He et al. (2022) for object detection. Regarding the scaling experiment, we report the results that we reproduced based on official configurations except with 100 epochs, marking them as \\(\\mathrm{MoCo}^{\\dagger}\\) and SimMIM†. Left: CL outperforms MIM in linear probing but underperforms in fine-tuning. Middle: CL outperforms MIM in small model regimes (ViT-Ti and ViT-S), and MIM shows superior scalability in large model regimes (ViT-L and ViT-H). Right: MIM outperforms CL in the dense prediction downstream tasks, such as object detection with Mask R-CNN (He et al., 2017) on COCO (Lin et al., 2014)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.406, + 0.827, + 0.504 + ], + "angle": 0, + "content": "CL recognize objects and distinguish images. On the other hand, however, it also suggests that CL struggles to preserve local information. In particular, we observe that self-attention of CL in the later layers for all query tokens and heads collapse into homogeneous attention maps. In such cases, most self-attention maps focus on object boundaries, meaning that they can capture object shapes but may lose interaction diversity between tokens. Consequently, CL and MIM each have advantages over different tasks: CL works well for linear probing and classification tasks with smaller models, whereas MIM outperforms CL in fine-tuning and dense prediction tasks with larger models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.511, + 0.825, + 0.637 + ], + "angle": 0, + "content": "How are representations transformed? (Section 3) CL transforms representations mainly based on image-level information, and its self-attentions collect information on object shape over entire tokens. This process makes tokens similar rather than diversifying them. As a result, CL distinguishes images well but has difficulty distinguishing tokens. On the contrary, MIM preserves and amplifies token-level information. Thus, the self-attentions for each token are substantially different and prohibit each token from including redundant information. We observe the consistent property from our Fourier analysis: CL primarily utilizes the low-frequency signals, but MIM utilizes high-frequencies. This observation suggests that CL is shape-biased and MIM is texture-biased. In sum, self-supervised models trained with CL and MIM learn the representations in different levels of detail." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.643, + 0.827, + 0.728 + ], + "angle": 0, + "content": "Which components play an important role? (Section 4) Analyses of the importance of each CL and MIM layer demonstrate that the later layers in CL and early layers in MIM play a key role. We interpret this as a consistent observation since early layers are usually known to capture low-level features—e.g., local patterns, high-frequency signals, and texture information—and later layers capture global patterns, low-frequency signals, and shape information (Dosovitskiy et al., 2021; Raghu et al., 2021; d'Ascoli et al., 2021; Graham et al., 2021; Dai et al., 2021; Park & Kim, 2022b)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.733, + 0.825, + 0.791 + ], + "angle": 0, + "content": "From the above analyses and insights, we find that CL and MIM can complement each other and show in Section 5 that even the simplest implementation, such as a linear combination of CL and MIM objectives, can take advantage of both methods. Surprisingly, the hybrid models outperform those pre-trained with either CL or MIM both in terms of fine-tuning and linear probing accuracy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.809, + 0.53, + 0.825 + ], + "angle": 0, + "content": "2 HOW DO SELF-ATTENTIONS BEHAVE?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We point out that CL and MIM may not be silver bullets for all tasks, as shown in Figure 1. CL generally outperforms MIM in linear probing, while MIM dominates CL in the fine-tuning scheme. However, when we dissect the size of the model, CL outperforms MIM after fine-tuning for small models (cf. (Wang et al., 2022)), while MIM performs better on large models. Also, MIM yields effective representations for dense prediction tasks, such as object detection, but CL falls short on those tasks. This section explains these phenomena by investigating the behavior of self-attention." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.19, + 0.101, + 0.332, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.336, + 0.118, + 0.338, + 0.154 + ], + "angle": 0, + "content": "" + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.164, + 0.385, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.226, + 0.403, + 0.238 + ], + "angle": 0, + "content": "Depth \\(= 1\\)" + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.101, + 0.462, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.164, + 0.462, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.422, + 0.226, + 0.48, + 0.239 + ], + "angle": 0, + "content": "Depth \\( = 4 \\)" + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.101, + 0.565, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.492, + 0.164, + 0.565, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.496, + 0.226, + 0.558, + 0.239 + ], + "angle": 0, + "content": "Depth \\(= 11\\)" + }, + { + "type": "image", + "bbox": [ + 0.581, + 0.101, + 0.655, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.581, + 0.164, + 0.655, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.59, + 0.226, + 0.646, + 0.239 + ], + "angle": 0, + "content": "Depth \\(= 1\\)" + }, + { + "type": "image", + "bbox": [ + 0.658, + 0.101, + 0.732, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.658, + 0.164, + 0.732, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.667, + 0.226, + 0.723, + 0.239 + ], + "angle": 0, + "content": "Depth \\(= 4\\)" + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.101, + 0.808, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.164, + 0.808, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.74, + 0.226, + 0.802, + 0.239 + ], + "angle": 0, + "content": "Depth \\(= 11\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.422, + 0.247, + 0.479, + 0.26 + ], + "angle": 0, + "content": "(a) MoCo" + }, + { + "type": "image_caption", + "bbox": [ + 0.658, + 0.247, + 0.731, + 0.26 + ], + "angle": 0, + "content": "(b) SimMIM" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.271, + 0.828, + 0.37 + ], + "angle": 0, + "content": "Figure 2: Self-attention of CL (MoCo) capture global relationships, but they collapse into homogeneous attention maps for all query tokens and heads. Self-attention of MIM (SimMIM) mainly focus on local areas. We visualize the attention maps for two different query tokens in the beginning through the end layers. We omit the results for self-attention heads, which show mostly consistent results. Left: Self-attention of CL capture global patterns and the shape of an object. However, all attention maps capture the same shape information regardless of the query tokens. Right: Self-attention of MIM capture local patterns and are correlated with query tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.388, + 0.825, + 0.446 + ], + "angle": 0, + "content": "Our analyses mainly compare ViT-B/16 pre-trained on ImageNet-1K (Russakovsky et al., 2015) with MoCo v3 (Chen et al., 2021) and SimMIM (Xie et al., 2022b). We use the ImageNet validation images for our experiments. We observe that other methods, e.g., DINO (Caron et al., 2021), BEiT (Bao et al., 2022), and MAE (He et al., 2022), have consistent properties (See Figure C.1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.462, + 0.584, + 0.546 + ], + "angle": 0, + "content": "CL mainly captures global relationships. We measure the ranges of self-attention via attention distance (Dosovitskiy et al., 2021). Attention distance is defined as the average distance between the query tokens and key tokens considering their self-attention weights. Therefore, it conceptually corresponds to the size of the receptive fields in CNNs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.552, + 0.584, + 0.679 + ], + "angle": 0, + "content": "Figure 3 shows that the attention distance of CL (MoCo) is significantly higher than that of MIM (SimMIM), especially in the later layers. As seen in Figure 2, the qualitative visualization, this implies that the representations of CL contain global patterns and shape information, so CL can help ViTs distinguish between objects of images. Conversely, the self-attention of MIM mainly capture local relationships; i.e., MIM may have difficulty recognizing whole objects and their shapes. Section 3 also discuss this claim from a representational perspective." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.695, + 0.584, + 0.835 + ], + "angle": 0, + "content": "Self-attention of CL collapse into homogeneity. We observe an interesting behavior of CL in Figure 2, which shows the attention maps for query tokens from two different spatial locations. The self-attention of CL surprisingly indicate almost identical object shapes for the two query tokens, compared to that of MIM. We describe this phenomenon as an attention collapse into homogeneity. This collapsing trend in the self-attention of CL is observed across all the heads and query tokens. In contrast, the self-attention of MIM are more faithful to the two query tokens, as expected." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.584, + 0.911 + ], + "angle": 0, + "content": "We use normalized mutual information (NMI) (Strehl & Ghosh, 2002) to measure the attention collapse. Let \\( p(q) \\) be a distribution of query tokens, and assume that these query tokens are uniformly distributed since a single query token is given for each spatial coordinate, i.e., \\( p(q) = 1 / N \\) where \\( N \\) is the number" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.91, + 0.825, + 0.926 + ], + "angle": 0, + "content": "of the tokens. Then the joint distribution of query and key tokens is \\( p(q, k) = \\pi(k|q)p(q) \\) where" + }, + { + "type": "image", + "bbox": [ + 0.602, + 0.466, + 0.819, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.593, + 0.621, + 0.826, + 0.673 + ], + "angle": 0, + "content": "Figure 3: Effective receptive fields of CL are global, but those of MIM are local. This is particularly evident in the later layers." + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.701, + 0.816, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.851, + 0.826, + 0.904 + ], + "angle": 0, + "content": "Figure 4: Self-attentions of CL have little to do with query tokens. Normalized MI of CL is significantly lower than that of MIM in the later layers." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.103, + 0.391, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.248, + 0.342, + 0.261 + ], + "angle": 0, + "content": "(a) Between heads" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.103, + 0.605, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.441, + 0.248, + 0.558, + 0.262 + ], + "angle": 0, + "content": "(b) Between depths" + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.102, + 0.821, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.656, + 0.248, + 0.771, + 0.261 + ], + "angle": 0, + "content": "(c) Between tokens" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.272, + 0.828, + 0.373 + ], + "angle": 0, + "content": "Figure 5: CL lacks representational diversity in the later layers. We measure cosine similarities of representations in the self-attentions between the heads (left), depths (middle), and spatial coordinates (right). All of the results show that the representational similarity of later self-attentions of CL is higher than that of MIM. Increasing heads or depths of CL is not effective in improving the diversity. Left: The similarity of representations from two heads in self-attention. Middle: The similarity between representations before and after self-attention transform them. Right: The similarities of representations at two spatial coordinates. ViT-\\(\\{\\mathrm{S}, \\mathrm{L}\\}\\) is trained with 100 epochs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.825, + 0.463 + ], + "angle": 0, + "content": "\\(\\pi (k|q)\\) is the softmax-normalized self-attention matrix. Thus, the normalized mutual information is \\(\\frac{I(q,k)}{\\sqrt{H(q)H(k)}}\\) where \\(I(\\cdot ,\\cdot)\\) is the mutual information and \\(H(\\cdot)\\) is the marginal entropy. Low mutual information values show that attention maps are less dependent on the query tokens, implying an attention collapse into homogeneity. Conversely, high mutual information means that the attention maps strongly depend on the query tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.469, + 0.827, + 0.513 + ], + "angle": 0, + "content": "Figure 4 shows the degree of attention collapse in terms of the normalized mutual information (NMI). Results show that the mutual information of CL is significantly lower than that of MIM in the later layers, suggesting that the self-attention of CL tend to collapse into homogeneous distributions." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.526, + 0.827, + 0.597 + ], + "angle": 0, + "content": "Attention collapse reduces representational diversity. We conjecture that the self-attention collapse into homogeneity eventually leads to homogeneous token representations. To support this argument, we measure representational cosine similarities. In particular, we design three similarities: between different self-attention heads (heads), between the before and after self-attention layers (depths), and between different tokens (tokens)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.603, + 0.828, + 0.689 + ], + "angle": 0, + "content": "Figure 5 shows the results, reporting the representation similarities for heads, depths, and tokens. As expected, the similarities of CL are notably higher than those of MIM in the later layers, indicating that the representations of CL have significant homogeneity. Even increasing the model size does not solve the problem CL has and may rather worsen it. Increasing the number of heads (ViT-S to ViT-B; Figure 5a) improves the representational diversity of MIM, but hardly improves the diversity of CL. Increasing the depth of CL (ViT-B to ViT-L; Figure 5b) only adds redundant modules." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.701, + 0.825, + 0.759 + ], + "angle": 0, + "content": "Implications of the behaviors we observed. In conclusion, the self-attention of CL captures global patterns and shapes of objects. However, CL suffers from the problem of attention collapse into homogeneity, which reduces the diversity of token representations. On the other hand, MIM primarily captures local patterns and thus does not suffer from the attention collapse problem." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.733, + 0.78 + ], + "angle": 0, + "content": "The behaviors mentioned above can explain the phenomena we observed in Figure 1:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.79, + 0.827, + 0.833 + ], + "angle": 0, + "content": "- CL outperforms MIM in linear probing tasks because it captures shapes, which helps recognize objects and distinguish images. Although MIM preserves the texture and diversity of representations, their correlation with objects or content may not be as strong as shapes do." + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.836, + 0.828, + 0.893 + ], + "angle": 0, + "content": "- The attention collapse prohibits CL from fully exploiting heads, depths, and tokens of ViTs. Since homogeneous representations are not very helpful in improving token representations, ViTs trained with CL waste a large part of network capability. Therefore, the fine-tuning accuracy of MIM is significantly higher than CL in large models." + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "- CL is not suitable for dense prediction since the token features are homogeneous with respect to their spatial coordinates." + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.79, + 0.828, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.215, + 0.134, + 0.365, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.247, + 0.352, + 0.261 + ], + "angle": 0, + "content": "(a) MoCo (one image)" + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.104, + 0.577, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.428, + 0.247, + 0.571, + 0.261 + ], + "angle": 0, + "content": "(b) MoCo (two images)" + }, + { + "type": "image", + "bbox": [ + 0.628, + 0.113, + 0.796, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.247, + 0.79, + 0.261 + ], + "angle": 0, + "content": "(c) SimMIM (one image)" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.272, + 0.828, + 0.399 + ], + "angle": 0, + "content": "Figure 6: Self-attention layers of CL and MIM transform representations differently. We visualize 196 spatial representation tokens for an example validation image in a representation space. The blue \\((\\bullet)\\) and red \\((\\bullet)\\) data points denote the tokens before and after the self-attention transformation. Left: The self- attentions of CL (e.g., MoCo) translate all the tokens equally, so the distances between the tokens of an image do not increase. Middle: However, CL moves the \"centers of representations (represented by \\(\\times\\))\" away from each other. Therefore, the images are linearly separable. The circle \\((\\bullet)\\) and triangle \\((\\triangle)\\) data represent tokens from different images. Right: The self- attentions of MIM (e.g., SimMIM) transform representations differently according to query tokens, thus increasing the distances between tokens. See Figure 7 for quantitative analyses." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.418, + 0.827, + 0.461 + ], + "angle": 0, + "content": "We further investigate the self-attention's behavior with restricted receptive fields in Figure D.1. As shown in the experiment, locally restricted self-attentions lead to lower linear probing but higher fine-tuning accuracy, which is consistent with our observations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.482, + 0.6, + 0.499 + ], + "angle": 0, + "content": "3 HOW ARE REPRESENTATIONS TRANSFORMED?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.515, + 0.828, + 0.559 + ], + "angle": 0, + "content": "In this section, we analyze the token representations of ViTs pre-trained with CL and MIM to demonstrate how the properties of self-attentions we observed in Section 2 affect the representations differently. We use the same pre-trained ViT-B/16 models by default default just as we did in Section 2." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.575, + 0.825, + 0.66 + ], + "angle": 0, + "content": "CL transforms all tokens in unison, while MIM does so individually. To show how CL and MIM transform token representations, we visualize them in representation space. Figure 6 shows 196 (14×14 patches) tokens before and after self-attention modules from a single image sample of the ImageNet validation set. We use the three large singular vectors obtained via singular value decomposition (SVD) as the bases of the space. To better visualize this, we display the representation of MoCo and SimMIM in their crucial layers—the last layer and the first layer, respectively." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.666, + 0.827, + 0.75 + ], + "angle": 0, + "content": "Figure 6a visualizes the changes that occur in the tokens of CL when transformed by self-attention module; it indicates that the self-attention of CL translate all tokens in unison. This phenomenon occurs because the self-attention maps of CL are homogeneous, i.e., self-attention is almost independent of the spatial coordinates and query tokens. Therefore, the modules add near-constant to all the token representations. As a result, the inter-representation distance and the volume of representations do not increase, which implies that CL cares less about individual tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.757, + 0.827, + 0.813 + ], + "angle": 0, + "content": "Nevertheless, self-attention are essential for the discriminative power of CL. As shown in Figure 6b, they help distinguish images by moving \"the centers of the representation distribution\" away from each other. In short, this figure suggests that CL makes the image linearly separable even though it loses the ability to distinguish tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.82, + 0.826, + 0.862 + ], + "angle": 0, + "content": "In contrast, MIM applies a different transformation to individual tokens, as shown in Figure 6c, because different self-attention are assigned to the individual spatial tokens. Thus, MIM alters the distance between tokens of a single image as well as the volume of the representation distribution." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.826, + 0.926 + ], + "angle": 0, + "content": "We find consistent results in quantitative analysis. Inspired by Jing et al. (2022), Figure 7 visualizes singular value spectra for tokens and images. A singular value spectrum provides singular values of a representation distribution obtained by SVD, so we can use it to represent the effective volume of distributions in a representation space. The higher the singular value in a spectrum, the larger the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.101, + 0.382, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.387, + 0.101, + 0.565, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.104, + 0.768, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.29, + 0.231, + 0.71, + 0.246 + ], + "angle": 0, + "content": "(a) Singluar value spectrum of tokens from a single image (token-level)" + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.254, + 0.38, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.384, + 0.253, + 0.567, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.256, + 0.772, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.382, + 0.653, + 0.397 + ], + "angle": 0, + "content": "(b) Singluar value spectrum of images (image-level)" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.407, + 0.828, + 0.52 + ], + "angle": 0, + "content": "Figure 7: CL barely changes or even decreases the distribution volume of tokens from a single image, implying that it hardly distinguishes between token. Instead, it significantly increases the distribution volume of images. To demonstrate these properties, we visualize singular value spectra, the singular values of the distribution of representations sorted by the magnitude. The higher a singular value, the larger the volume of a distribution. The right of this figure shows the \\(64^{\\text{th}}\\) and \\(128^{\\text{th}}\\) highest singular value for depth. Top: Singular value spectra of tokens from a single image. CL decreases the singular values of the tokens, but MIM increases. Bottom: Singular value spectra of images. CL significantly increases the volumes occupied by images, but MIM hardly does so." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.535, + 0.825, + 0.565 + ], + "angle": 0, + "content": "volume of a representation distribution. To calibrate the scale, we use the relative log singular value \\((\\Delta \\log \\text{ singular value})\\), the difference with the (second) largest singular value for a depth." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.569, + 0.827, + 0.68 + ], + "angle": 0, + "content": "Figure 7a shows singular value spectra of tokens from a single image. We calculate them for each image in the ImageNet validation set and report averaged singular values over the dataset. In this figure, the CL layers hardly increase or even decrease the singular value; consistent with the explanation above, this implies that CL hardly distinguishes tokens. In contrast, MIM increases the singular value, meaning that it changes the volume of tokens and can distinguish tokens. Another interesting observation is that a few later layers of MIM decrease the volume, even though they capture local patterns as shown in Figures 3 and 4. This is because they behave like decoders. Section 4 discusses this in detail." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.688, + 0.828, + 0.745 + ], + "angle": 0, + "content": "Figure 7b shows the singular value spectra of images. We average all tokens in an image to build an image-level representation vector and conduct a singular value spectrum over the collection of representations in the validation set. As opposed to the previous case, the representational volume of CL is larger than that of MIM, which implies that CL makes the image-level representation separable." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.761, + 0.828, + 0.86 + ], + "angle": 0, + "content": "CL exploits low-frequencies, and MIM exploits high-frequencies. We hypothesize that CL captures low-frequency and MIM captures high-frequency information in spatial dimensions since CL provides image-level self-supervision to capture global patterns, while MIM provides token-level self-supervision to exploit local patterns. To support this argument from a frequency perspective, we conduct a Fourier analysis of the representations as following Park & Kim (2022b). In particular, we report the relative log amplitude of Fourier-transformed representations by calculating the amplitude difference between the highest and lowest frequencies of representations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.866, + 0.828, + 0.922 + ], + "angle": 0, + "content": "Figure 9 visualizes the relative amplitudes of CL and MIM. It shows that the high-frequency amplitude of CL is significantly smaller than that of MIM, suggesting that CL mainly utilizes low-frequency spatial information such as global structures and shapes. On the contrary, MIM usually uses high-frequency spatial information such as narrow structures and fine textures." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.244, + 0.104, + 0.51, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.284, + 0.441, + 0.298 + ], + "angle": 0, + "content": "(a) Stylized ImageNet" + }, + { + "type": "image", + "bbox": [ + 0.542, + 0.131, + 0.761, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.547, + 0.284, + 0.756, + 0.299 + ], + "angle": 0, + "content": "(b) Robustness for noise frequency" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.308, + 0.828, + 0.435 + ], + "angle": 0, + "content": "Figure 8: CL is biased toward shape, whereas MIM is biased toward texture. We report the predictive results of models for linear probing tasks. However, we observe consistent results in fine-tuned models (See Figure F.2). Left: Result of classification on Stylized ImageNet. It shows that CL is more shape-biased than MIM and even than the supervised pre-trained model. Vertical lines represent averaged results for the shape categories. We also report the results of supervised ViT with ImageNet-1K class labels for comparison. Right: Accuracy drops on images with frequency-based random noises. MIM shows a more significant amount of accuracy drop than CL with high-frequency noises, demonstrating MIM's texture-biased property. The frequency window size of the frequency-based noise is \\(0.1\\pi\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.565, + 0.553 + ], + "angle": 0, + "content": "Another interesting finding is that the last few layers of MIM reduce the high frequencies even though they only focus on local areas (See Figure 3). We conjecture that MIM implicitly divides ViTs into the encoder-decoder structure and allows intermediate layers to have linearly separable information. In contrast, CL allows the last layer to have such information. This is further elaborated in Figure 11." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.569, + 0.566, + 0.737 + ], + "angle": 0, + "content": "CL is shape-biased, but MIM is texture-biased. Based on the results of the Fourier analysis, we assume that CL and MIM each have a bias toward shapes and textures, respectively. To demonstrate this claim, we use Stylized ImageNet (Geirhos et al., 2019), a texture-altered dataset, by using AdaIN (Huang & Belongie, 2017). Figure 8a reports the linear probing results on Stylized ImageNet to evaluate the shape and texture biases of pre-trained models. Compared to the model pre-trained with supervised learning, CL depends more on the shape and MIM depends on texture of images to classify images. In other words, CL is robust to texture changes, and MIM is vulnerable to them." + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.459, + 0.817, + 0.609 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.62, + 0.828, + 0.732 + ], + "angle": 0, + "content": "Figure 9: CL exploits low-frequency, but MIM exploits high-frequency. Moreover, a few last layers of CL reduce high-frequency by capturing global patterns. MIM also reduces it even though they capture local patterns, because the later layers behave like decoders. See also Figure 11." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.743, + 0.825, + 0.829 + ], + "angle": 0, + "content": "Figure 8b shows the consistent results. In this experiment, we follow Park & Kim (2022a;b) and measure the decrease in accuracy on the ImageNet dataset with frequency-based random noise. The results suggest that CL is robust to high-frequency noises, but MIM is significantly more vulnerable to them. Since high-frequency noises harm the fine details of images, we arrive at the same conclusion that CL is more shape-biased and MIM is texture-biased. This can explain the robustness of CL against adversarial perturbations (Bordes et al., 2022)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.849, + 0.637, + 0.866 + ], + "angle": 0, + "content": "4 WHICH COMPONENTS PLAY AN IMPORTANT ROLE?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "The previous sections consistently show through various perspectives that CL exploits image-level global patterns while MIM captures token-level local patterns. This section analyzes pre-trained ViTs from an architectural perspective and shows that the key components in CL and MIM are different." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.101, + 0.488, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.234, + 0.39, + 0.248 + ], + "angle": 0, + "content": "(a) Self-attention" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.1, + 0.813, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.599, + 0.234, + 0.719, + 0.248 + ], + "angle": 0, + "content": "(b) Fourier analysis" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.258, + 0.828, + 0.357 + ], + "angle": 0, + "content": "Figure 10: The explicit decoder architecture of MAE helps ViTs effectively leverage the advantages of MIM. We analyze the encoder and decoder of a pre-trained model with a masking ratio of zero. The left side of each figure represents the encoder and the right side the decoder. Left: The mutual information of MAE is lower than that of SimMIM in the encoder but higher in the decoder. Right: The decoder of MAE captures low-frequency information, and its encoder captures high-frequency information. Moreover, the later layers (excluding the last layer) of MAE do not reduce high-frequency information, while those of SimMIM do." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.371, + 0.828, + 0.429 + ], + "angle": 0, + "content": "Later layers of CL and early layers of MIM are important. According to studies on ViT (Graham et al., 2021; Dai et al., 2021; Park & Kim, 2022b), the later layers use high-level information, and the early layers exploit low-level information. Since CL and MIM each exploit global and local patterns, we expect that the later layers of CL and early layers of MIM play a key role." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.434, + 0.566, + 0.726 + ], + "angle": 0, + "content": "To evaluate the importance of each layer, we measure the linear probing accuracy using intermediate representations with the configuration of Table A.1. In Figure 11, we observe the following properties: First, the linear probing accuracy of MIM is higher than that of CL at the beginning. Conversely, CL outperforms MIM at the end of the model. Such result indicates that the later layers of CL and early layers of MIM play an important role in making linearly separable representations. Second, the accuracy of CL increases with increasing depth as expected, but the accuracy of MIM surprisingly decreases at the end of the model, i.e., the later layers of MIM are not very helpful in separating representations. We explain this observation as a phenomenon in which MIM methods with shallow prediction heads, e.g., SimMIM, use later layers of the backbone as a decoder. Therefore, MIM with a deep self-attention decoder, e.g., MAE (He et al., 2022), can be useful for linear probing performance. Moreover, it also explains why SimMIM's high-frequency component and representational volumes drop in the later layers as shown in Figures 7 and 9. Third, even the highest linear probing accuracy of MIM is lower than that of CL." + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.44, + 0.817, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.607, + 0.828, + 0.707 + ], + "angle": 0, + "content": "Figure 11: Later layers of CL and early layers of MIM play a key role. We report linear probing accuracies by using the representations of the intermediate layers. CL outperforms MIM in later layers, and MIM outperforms CL in early layers." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.742, + 0.827, + 0.801 + ], + "angle": 0, + "content": "The explicit decoder helps ViTs further leverage the advantages of MIM. Several previous observations find that the implicit decoder of MIM with a shallow prediction head, such as SimMIM, can impair performance. MAE (He et al., 2022) addresses this problem by introducing deep explicit ViT decoders and reconstructing masked tokens only in the separate decoders." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.806, + 0.826, + 0.862 + ], + "angle": 0, + "content": "In Figure 10, we analyze MAE to understand the properties of decoders more deeply. Figure 10a shows the self-attention behaviors. The results indicate that the mutual information of MAE is lower than that of SimMIM in the later layers of the encoder but higher in the decoder, implying that the decoder reconstructs masked tokens based on its neighborhood tokens." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Figure 10b shows the results of the Fourier analysis. As explained in Figure 9, the last four layers of SimMIM reduce the high-frequency components. In contrast, the later layers (excluding the last layer) of MAE do not reduce them. Instead, the decoder of MAE prioritizes low-frequency information compared with the encoder, allowing the backbone to efficiently utilize high-frequency information." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.187, + 0.101, + 0.396, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.248, + 0.338, + 0.261 + ], + "angle": 0, + "content": "(a) Performance" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.101, + 0.601, + 0.241 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.248, + 0.548, + 0.261 + ], + "angle": 0, + "content": "(b) Self-attention" + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.102, + 0.813, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.648, + 0.248, + 0.765, + 0.262 + ], + "angle": 0, + "content": "(c) Fourier analysis" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.272, + 0.828, + 0.385 + ], + "angle": 0, + "content": "Figure 12: The simple linear combination of CL (MoCo) and MIM (SimMIM) objectives outperforms the vanilla CL and MIM. \\(\\lambda\\) is the importance weight of CL, so \\(\\lambda = 0\\) means SimMIM and \\(\\lambda = 1\\) means MoCo. Left: \"CL + MIM\" outperforms CL and MIM in both linear probing and fine-tuning accuracy. Middle: Mutual information of \"CL + MIM\" decreases at the end of the model, suggesting that the self-attention of later layers collapse into homogeneity and capture the same object shape information. Right: Fourier analysis shows that \"CL + MIM\" amplifies high frequencies at the beginning and reduces them at the end. It implies that \"CL + MIM\" exploits high-frequency information at the beginning and low-frequency information at the end." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.4, + 0.733, + 0.417 + ], + "angle": 0, + "content": "5 ARE THE TWO METHODS COMPLEMENTARY TO EACH OTHER?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.827, + 0.489 + ], + "angle": 0, + "content": "We present comparative analyses on CL and MIM from three perspectives: self-attention, representation transforms, and the position of important layers. All of our results indicate that CL and MIM train ViTs differently. These differences naturally imply that combining CL and MIM to train a backbone may help leverage the advantages of both methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.828, + 0.664 + ], + "angle": 0, + "content": "To show that CL and MIM are complementary, we introduce the simplest way to harmonize CL and MIM by linearly combining two losses, i.e., \\(\\mathcal{L} = (1 - \\lambda)\\mathcal{L}_{\\mathrm{MIM}} + \\lambda \\mathcal{L}_{\\mathrm{CL}}\\) where \\(\\mathcal{L}_{\\mathrm{MIM}}\\) and \\(\\mathcal{L}_{\\mathrm{CL}}\\) each indicate the losses of MIM and CL, and \\(\\lambda\\) is the importance weight of CL. We find that this simple hybrid model trained with combined losses efficiently exploits the strengths of both methods. Figure 12a shows linear probing and fine-tuning accuracy on ImageNet with varying \\(\\lambda\\). Surprisingly, the hybrid models outperform MIM (\\(\\lambda = 0\\)) and CL (\\(\\lambda = 1\\)) in both aspects. Figure 12b and Figure 12c can provide insights on how hybrid models behave by analyzing the model with \\(\\lambda = 0.2\\) in terms of self-attention in Section 2 and Fourier analysis in Section 3, respectively; both results show that the hybrid model exploits MIM properties in the early layers and CL properties in the later layers. In particular, Figure 12b indicates that the self-attention of the early layers are changed according to the query token but those of the later layers are not. Likewise, Figure 12c shows that the early layers exploit high-frequency, while the later layers try to exploit low-frequency." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.682, + 0.321, + 0.698 + ], + "angle": 0, + "content": "6 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.713, + 0.828, + 0.812 + ], + "angle": 0, + "content": "We conducted a comparative study highlighting various facets of two widely used self-supervised learning methods for vision transformers: contrastive learning (CL) and masked image modeling (MIM). The study demonstrated many opposing properties of the two methods: image information (image-level vs. token-level; as in Section 2), feature representations (low-frequency vs. high-frequency; as in Section 3), and lead role components (later layers vs. early layers; as in Section 4). Furthermore, we suggested a possible application that exploits only the benefits from both methods and showed how a combined model can outperform individual methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Future directions. Various future directions can be explored based on our study. We believe that there are better ways than a simple linear combination of CL and MIM objectives. For example, a novel self-supervised learning approach, in which CL is applied in the later layers and MIM in the early layers, can be considered. Moreover, we may extend our findings on self-supervision for multi-stage ViTs, such as PiT (Heo et al., 2021) and Swin (Liu et al., 2021). Another interesting direction is to enhance the individual properties of CL and MIM. Techniques that help CL or MIM learn shapes or textures, respectively, may also improve performance." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.103, + 0.289, + 0.119 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.135, + 0.826, + 0.164 + ], + "angle": 0, + "content": "Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. In International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.175, + 0.826, + 0.203 + ], + "angle": 0, + "content": "Florian Bordes, Randall Balestriero, and Pascal Vincent. High fidelity visualization of what your self-supervised representation knows about. Transactions on Machine Learning Research, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.215, + 0.827, + 0.257 + ], + "angle": 0, + "content": "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In International Conference on Computer Vision, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.268, + 0.827, + 0.309 + ], + "angle": 0, + "content": "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International Conference on Machine Learning, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.322, + 0.827, + 0.35 + ], + "angle": 0, + "content": "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.362, + 0.825, + 0.39 + ], + "angle": 0, + "content": "Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.401, + 0.825, + 0.429 + ], + "angle": 0, + "content": "Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised vision transformers. In International Conference on Computer Vision, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.44, + 0.825, + 0.482 + ], + "angle": 0, + "content": "Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Advances in Neural Information Processing Systems, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.494, + 0.825, + 0.523 + ], + "angle": 0, + "content": "Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.534, + 0.826, + 0.59 + ], + "angle": 0, + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.601, + 0.827, + 0.643 + ], + "angle": 0, + "content": "Stéphane d'Ascoli, Hugo Touvron, Matthew L Leavitt, Ari S Morcos, Giulio Biroli, and Levent Sagun. Convit: Improving vision transformers with soft convolutional inductive biases. In International Conference on Machine Learning, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.654, + 0.827, + 0.709 + ], + "angle": 0, + "content": "Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A Wichmann, and Wieland Brendel. Imagenet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness. In International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.722, + 0.827, + 0.764 + ], + "angle": 0, + "content": "Benjamin Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Herve Jégou, and Matthijs Douze. Levit: a vision transformer in convnet's clothing for faster inference. In International Conference on Computer Vision, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.775, + 0.827, + 0.831 + ], + "angle": 0, + "content": "Jean-Bastien Grill, Florian Strub, Florent Alché, Corentin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.825, + 0.872 + ], + "angle": 0, + "content": "Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.883, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.135, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.827, + 0.2 + ], + "angle": 0, + "content": "Byeongho Heo, Sangdoo Yun, Dongyoon Han, Sanghyuk Chun, Junsuk Choe, and Seong Joon Oh. Rethinking spatial dimensions of vision transformers. In International Conference on Computer Vision, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.209, + 0.825, + 0.24 + ], + "angle": 0, + "content": "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European Conference on Computer Vision, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.249, + 0.827, + 0.279 + ], + "angle": 0, + "content": "Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In Proceedings of the IEEE international conference on computer vision, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.288, + 0.827, + 0.33 + ], + "angle": 0, + "content": "Li Jing, Pascal Vincent, Yann LeCun, and Yuandong Tian. Understanding dimensional collapse in contrastive self-supervised learning. In International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.341, + 0.825, + 0.384 + ], + "angle": 0, + "content": "Hanjoo Kim, Minkyu Kim, Dongjoo Seo, Jinwoong Kim, Heungseok Park, Soeun Park, Hyunwoo Jo, KyungHyun Kim, Youngil Yang, Youngkwan Kim, et al. Nsml: Meet the mlaas platform with a real-world case study. arXiv preprint arXiv:1810.09957, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.393, + 0.825, + 0.438 + ], + "angle": 0, + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.447, + 0.825, + 0.49 + ], + "angle": 0, + "content": "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In International Conference on Computer Vision, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.5, + 0.825, + 0.53 + ], + "angle": 0, + "content": "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.539, + 0.827, + 0.568 + ], + "angle": 0, + "content": "Namuk Park and Songkuk Kim. Blurs behave like ensembles: Spatial smoothings to improve accuracy, uncertainty, and robustness. In International Conference on Machine Learning, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.578, + 0.825, + 0.608 + ], + "angle": 0, + "content": "Namuk Park and Songkuk Kim. How do vision transformers work? In International Conference on Learning Representations, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.617, + 0.827, + 0.673 + ], + "angle": 0, + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.684, + 0.827, + 0.727 + ], + "angle": 0, + "content": "Maithra Raghu, Thomas Unterthiner, Simon Kornblith, Chiyuan Zhang, and Alexey Dosovitskiy. Do vision transformers see like convolutional neural networks? Advances in Neural Information Processing Systems, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.737, + 0.827, + 0.793 + ], + "angle": 0, + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.803, + 0.827, + 0.834 + ], + "angle": 0, + "content": "Alexander Strehl and Joydeep Ghosh. Cluster ensembles—a knowledge reuse framework for combining multiple partitions. Journal of machine learning research, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.825, + 0.886 + ], + "angle": 0, + "content": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In European Conference on Computer Vision, 2020a." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? Advances in Neural Information Processing Systems, 33:6827-6839, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.155, + 0.825, + 0.185 + ], + "angle": 0, + "content": "Yuandong Tian, Xinlei Chen, and Surya Ganguli. Understanding self-supervised learning dynamics without contrastive pairs. In International Conference on Machine Learning, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.194, + 0.825, + 0.223 + ], + "angle": 0, + "content": "Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.231, + 0.827, + 0.288 + ], + "angle": 0, + "content": "Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8769-8778, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.296, + 0.825, + 0.326 + ], + "angle": 0, + "content": "Shaoru Wang, Jin Gao, Zeming Li, Jian Sun, and Weiming Hu. A closer look at self-supervised lightweight vision transformers. arXiv preprint arXiv:2205.14443, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.334, + 0.825, + 0.377 + ], + "angle": 0, + "content": "Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong, and Lei Li. Dense contrastive learning for self-supervised visual pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.827, + 0.43 + ], + "angle": 0, + "content": "Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.437, + 0.827, + 0.481 + ], + "angle": 0, + "content": "Yixuan Wei, Han Hu, Zhenda Xie, Zheng Zhang, Yue Cao, Jianmin Bao, Dong Chen, and Baining Guo. Contrastive learning rivals masked image modeling in fine-tuning via feature distillation. arXiv preprint arXiv:2205.14141, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.488, + 0.825, + 0.519 + ], + "angle": 0, + "content": "Zhenda Xie, Zigang Geng, Jingcheng Hu, Zheng Zhang, Han Hu, and Yue Cao. Revealing the dark secrets of masked image modeling. arXiv preprint arXiv:2205.13543, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.526, + 0.825, + 0.571 + ], + "angle": 0, + "content": "Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.578, + 0.827, + 0.621 + ], + "angle": 0, + "content": "Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.63, + 0.827, + 0.673 + ], + "angle": 0, + "content": "Sangdoo Yun, Dongyoon Han, Seong Joon Oh, Sanghyuk Chun, Junsuk Choe, and Youngjoon Yoo. Cutmix: Regularization strategy to train strong classifiers with localizable features. In International Conference on Computer Vision, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.681, + 0.825, + 0.711 + ], + "angle": 0, + "content": "Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. In International Conference on Learning Representations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.719, + 0.827, + 0.762 + ], + "angle": 0, + "content": "Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127(3):302-321, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.77, + 0.827, + 0.813 + ], + "angle": 0, + "content": "Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. ibot: Image bert pre-training with online tokenizer. International Conference on Learning Representations, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.813 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.269, + 0.119 + ], + "angle": 0, + "content": "A SETUP" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.135, + 0.828, + 0.304 + ], + "angle": 0, + "content": "We build the configurations based on Xie et al. (2022b) for fine-tuning and Caron et al. (2021) for linear probing. Table A.1 summarizes the configurations. Most analyzes of ViTs use the official ViT-B pre-trained models, and some analyzes use ViT-{S, L} pre-trained with official configurations but epochs of 100. Due to memory limitations, ViT-L is pre-trained with a quarter batch size of the other models. The hybrid model introduced in Section 5 uses the ViT backbone architecture of Xie et al. (2022b) and employs a configuration based on their work for pre-training as shown in Table A.1. For data augmentation and regularization, we adopt widely used settings, e.g., Randaugment (Cubuk et al., 2020), label smoothing (Szegedy et al., 2016), mixup (Zhang et al., 2018), cutmix (Yun et al., 2019), stochastic depth (Huang et al., 2016). Layer decay (Bao et al., 2022) is also used for fine-tuning. Neural network models are implemented in PyTorch (Paszke et al., 2019). The code for analysis is available at https://github.com/naver-ai/cl-vs-mim. All experiments use {1, 4, 8} NVIDIA A100 Tensor Core GPU. NSML (Kim et al., 2018) has been used for experiments." + }, + { + "type": "table_caption", + "bbox": [ + 0.4, + 0.318, + 0.599, + 0.334 + ], + "angle": 0, + "content": "Table A.1: Training settings." + }, + { + "type": "table", + "bbox": [ + 0.26, + 0.338, + 0.74, + 0.646 + ], + "angle": 0, + "content": "
CONFIGURATIONLinear ProbingFine-tuningPre-training
optimizersgdadamwadamw
base learning rate1.0e-01.25e-31.0e-4
weight decay0.050.050.05
batch size1k2k1k
training epoch50100100
learning rate schedulecosinecosinemultistep
warmup epoch02010
warmup schedule·linearlinear
randaugment·9, 0.59, 0.5
label smoothing·0.10.1
mixup·0.80.8
cutmix·1.01.0
stochastic depth·0.10.1
layer decay·0.651.0
gradient clip·5.05.0
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.676, + 0.35, + 0.691 + ], + "angle": 0, + "content": "B RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.709, + 0.828, + 0.822 + ], + "angle": 0, + "content": "CL is a method based on comparing the global projection of two different random views. However, this approach usually suffers from the collapsing problem, where all representations collapse into constant solutions. To solve this problem, various methods such as negative samples and InfoNCE (Oord et al., 2018) have been proposed. Negative samples are an effective technique to avoid the collapsing problems, but they cause dimensional collapse (Jing et al., 2022) and require extra large batches (Chen et al., 2020a) or memory queues (He et al., 2020; Chen et al., 2020b) to retrieve them. We mainly analyze MoCo v3 (Chen et al., 2021), since the method includes these de facto standard components—global projection, random views, and negative samples." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Some SSL methods, e.g. Grill et al. (2020); Caron et al. (2021), do not use negative samples and use the projections of their Siamese representations as the positives. Such self-distillation has been explored theoretically and empirically (Chen & He, 2021; Tian et al., 2021) to prevent the collapsing problem, but we do not discuss the distillation scheme in this paper. Wei et al. (2022b) shows that feature distillation improves the fine-tuning performance of CL by diversifying attention ranges; this observation is consistent with our findings. While they focus on distillation to improve CL, we reveal the fundamental nature of self-supervised learning by rigorously comparing CL and MIM." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.273, + 0.106, + 0.485, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.253, + 0.442, + 0.266 + ], + "angle": 0, + "content": "(a) Attention distance" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.103, + 0.728, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.564, + 0.253, + 0.678, + 0.266 + ], + "angle": 0, + "content": "(b) Normalized MI" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.278, + 0.828, + 0.377 + ], + "angle": 0, + "content": "Figure C.1: MIM and CL methods each have consistent properties. To show this, we visualize self-attention behaviors in terms of attention distance and normalized mutual information (MI). SimCLR\\*, which was introduced in Chen et al. (2021), stands for MoCo with a momentum coefficient of 0. Left: The attention distance of CL methods (namely MoCo, SimCLR\\*, and DINO) is higher than that of MIM methods (namely SimMIM, BEiT, and MAE). This suggests that CL methods consistently capture global patterns. Right: The normalized mutual information of MIM is higher than that of CL; i.e., the self- attentions of MIM are more correlated with query tokens than CL." + }, + { + "type": "image", + "bbox": [ + 0.273, + 0.393, + 0.485, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.311, + 0.538, + 0.442, + 0.551 + ], + "angle": 0, + "content": "(a) Attention distance" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.391, + 0.728, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.564, + 0.538, + 0.678, + 0.551 + ], + "angle": 0, + "content": "(b) Normalized MI" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.564, + 0.828, + 0.648 + ], + "angle": 0, + "content": "Figure C.2: ViTs exhibit consistent self-attention patterns, regardless of their size. To better understand these patterns, we visualize the self-attention behaviors of three ViTs—ViT-{Ti, S, B}—using two metrics: attention distance and normalized mutual information (MI). Left: All self-attention of MoCo capture global patterns in the later layers. In contrast, the self-attention of SimMIM capture local patterns. Right: Likewise, all self-attention maps of MoCo collapse into homogeneity in the later layers." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.67, + 0.828, + 0.755 + ], + "angle": 0, + "content": "Compared with CL, MIM has been rarely explored in vision tasks. Various methods, such as histograms of oriented gradients (Wei et al., 2022a) and tokenization (Bao et al., 2022), have been proposed as part of porting masked language models to the image domain with ViTs. Among them, SimMIM (Xie et al., 2022b) and MAE (He et al., 2022) are simple yet effective methods to reconstruct masked tokens without complicated pretext tasks. Because of its simplicity and superior performance in downstream operations, MIM is attracting attention as a promising technique in image processing." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.76, + 0.828, + 0.831 + ], + "angle": 0, + "content": "Nevertheless, we find hints suggesting that CL and MIM utilize different aspects of the data, making them complementary. For example, Zhou et al. (2022); Wang et al. (2021); Yu et al. (2022) achieve high predictive performance by harmonizing the image-level and the token-level self-supervised learning. Xie et al. (2022a) also observe that, unlike supervised pre-trained models or CL, self-attention in SimMIM focus locally; this is a consistent result with our findings." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.851, + 0.714, + 0.867 + ], + "angle": 0, + "content": "C OUR INSIGHTS ARE GENERALIZABLE TO VARIOUS MODELS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In the main text, we analyze ViT-B pre-trained using MoCo and SimMIM. We observe consistent characteristics across various sizes of ViTs that have been pre-trained using other self-supervised learning methods. To support this claim, we delve into the properties of self-attention." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.101, + 0.488, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.31, + 0.249, + 0.45, + 0.262 + ], + "angle": 0, + "content": "(a) Standard deviations" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.102, + 0.723, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.248, + 0.703, + 0.264 + ], + "angle": 0, + "content": "(b) Distribution at \\(3^{\\mathrm{rd}}\\) layer" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.274, + 0.828, + 0.415 + ], + "angle": 0, + "content": "Figure E.1: The presence of an outlier head in MoCo raises the average of normalized mutual information. This observation explains how the normalized mutual information in a couple of MoCo's self-attention layers is similar to or even surpasses that in SimMIM. Left: We present the standard deviation of the normalized mutual information. As depicted in this figure, the standard deviation in SimMIM remains relatively consistent across different depths. In contrast, the standard deviation in MoCo's \\(3^{\\mathrm{rd}}\\) or \\(4^{\\mathrm{th}}\\) self-attention layer is notably higher than that in SimMIM. Right: Distribution of mutual information for the third self-attention layer head. The visualization of this kernel density estimation shows that MoCo has an outlier head with mutual information close to 1.0. The red rectangles \\((\\square)\\) and blue triangles \\((\\triangle)\\) refer to the mutual information of heads in MoCo and SimMIM, respectively." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.441, + 0.825, + 0.499 + ], + "angle": 0, + "content": "Figure C.1 visualizes the self-attention behaviors of different self-supervised learning methods in terms of attention distance and normalized mutual information. As depicted in the figure, all CLs and MIMs exhibit consistent properties. Similarly, Figure C.2 demonstrates that various sizes of models also demonstrate consistent properties." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.519, + 0.819, + 0.535 + ], + "angle": 0, + "content": "D LOCALITY INDUCTIVE BIAS IMPROVES FINE-TUNING ACCURACY OF CL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.551, + 0.566, + 0.663 + ], + "angle": 0, + "content": "In Section 2, we demonstrate that the homogeneity of self-attention map, i.e., attention collapse of CL, helps ViT distinguish images but harms fine-tuning accuracy. As a result, we anticipate that incorporating a locality inductive bias into CL will improve fine-tuning accuracy but degrade linear probing accuracy. One simple method to inject locality into self-attention is to limit the receptive field of self-attention by using attention masks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.669, + 0.566, + 0.754 + ], + "angle": 0, + "content": "Figure D.1 shows the predictive performance of MoCo with restricted local self-attention. As expected, the results are similar to the performance of MIM; As the kernel size decreases, the linear probing accuracy decreases but the fine-tuning accuracy increases. These results are consistent with our findings." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.774, + 0.381, + 0.789 + ], + "angle": 0, + "content": "E A CLOSER LOOK AT" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.792, + 0.559, + 0.807 + ], + "angle": 0, + "content": "THE ROLE OF SELF-SUPERVISED VIT LAYERS" + }, + { + "type": "image", + "bbox": [ + 0.583, + 0.552, + 0.819, + 0.715 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.726, + 0.828, + 0.81 + ], + "angle": 0, + "content": "Figure D.1: Locality inductive bias harms linear probing but improves fine-tuning. We report the linear probing and fine-tuning accuracy of MoCo with restricted self-attention via attention masks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.824, + 0.825, + 0.853 + ], + "angle": 0, + "content": "The main text provides the key characteristics of CL and MIM. This section delves deeper into the details not covered in the main text to provide a more comprehensive understanding of the subjects." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "The role of the early modules. Figures 3 and 4 suggest that most layers of MoCo capture global patterns and have only a weak correlation with query tokens. However, one or two of MoCo layers exhibit unusual behavior. For example, the \\(3^{\\mathrm{rd}}\\) layer of MoCo focuses on local areas and its self-attention map is dependent on the query. We explore this property in more detail." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.103, + 0.481, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.258, + 0.408, + 0.271 + ], + "angle": 0, + "content": "(a) MoCo" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.103, + 0.724, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.582, + 0.258, + 0.661, + 0.271 + ], + "angle": 0, + "content": "(b) SimMIM" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.283, + 0.828, + 0.354 + ], + "angle": 0, + "content": "Figure E.2: The tokens of MoCo form a cluster for each image, while those of SimMIM are intermingled. This aligns with the finding that, compared to SimMIM, MoCo is linearly separable. To demonstrate this property, we visualize 3,528 tokens (196 tokens \\(\\times\\) 18 images) from the representations of the last layer via t-SNE, and find that a consistent pattern is observed even in the representations of the intermediate layers. The colors represent three different classes. See also Figures 6 and 7." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.371, + 0.825, + 0.442 + ], + "angle": 0, + "content": "Figure E.1a provides the variance of normalized mutual information with respect to heads. As the results show, the variance of SimMIM is consistent across all depths whereas that of MoCo is not. In particular, the \\(3^{\\mathrm{rd}}\\) layer of MoCo has high variance even though other layers do not. This suggests that, while most of MoCo's self-attention heads capture global patterns and have weak correlation with query tokens, some heads deviate from this behavior and exhibit a different pattern." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.448, + 0.826, + 0.517 + ], + "angle": 0, + "content": "Figure E.1b shows the distribution of normalized mutual information among heads in the \\(3^{\\mathrm{rd}}\\) layer to analyze this phenomenon. In this figure, we use kernel density estimation with Gaussian kernel to visualize the distribution. The results reveal several outlier heads in MoCo with mutual information close to 1.0. As a result, these outliers significantly raises the average value of normalized mutual information." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.827, + 0.603 + ], + "angle": 0, + "content": "A comprehensive view through visualization of tokens from multiple images. Figure 6 visualizes how self-attention layers transform tokens from one or two images in representation space. The figure demonstrates that MoCo transforms all tokens in union while SimMIM transforms them individually. As a result, MoCo separates the representations at the image-level and SimMIM separates them at the token-level." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.609, + 0.825, + 0.708 + ], + "angle": 0, + "content": "The t-SNE visualization (Van der Maaten & Hinton, 2008) in Figure E.2 provides consistent results and offers even a more comprehensive perspective. In this figure, we visualize the last representations of 3528 tokens from 18 images that belong to three different classes. The visualization demonstrates that MoCo separates the representations into distinct classes and even images, while maintaining the tokens close together in compact image clusters. On the other hand, SimMIM separates tokens from images, resulting in a wide representation space for each image, but the images or even classes may be challenging to linearly distinguish." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.723, + 0.825, + 0.779 + ], + "angle": 0, + "content": "The first layer of MoCo aggregates tokens into compact clusters. Figures 6 and 7 show that all modules, except the first module, in MoCo behave consistently. However, we observe that MoCo's first module behaves differently and unusually than the others. We elaborate the behaviour of the first layer of module." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Figure E.3a shows the qualitative visualization of tokens for a sample image, similar to Figure 6. This visualization shows that the first MoCo layer aggregates tokens into compact clusters. Although this figure only uses a single image, the layer aggregates all images into a small representation space as well. In terms of singular values, we observe consistent results. Similar to Figure 7, Figures E.3b and E.3c report the second largest log singular value, instead of the relative log singular value, to investigate the absolute volume of the representations. As expected, most layers in both MoCo and SimMIM increase the singular value, but surprisingly, the first layer of MoCo reduces the singular value, meaning that the volumes of representations are decreased at both the token-level and image-level. Based on these observations, we conjecture that the first module of MoCo behaves like an embedding component." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.127, + 0.4, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.431, + 0.127, + 0.622, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.631, + 0.127, + 0.822, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.294, + 0.828, + 0.407 + ], + "angle": 0, + "content": "Figure E.3: The first layer of MoCo clumps tokens together. We demonstrate this property from two perspectives: qualitative visualization and singular value of token distribution. Left: Similar to Figure 6, we visualize tokens of a sample image in a representation space. The blue and red data points represent the tokens before and after the self-attention transformation. As shown in this figure, the first self-attention layer clumps tokens into a compact cluster. Middle and Right: Similar to Figure 7, we visualize the second largest log singular value (not \\(\\Delta\\) log singular value) for depth. The singular value spectra demonstrate consistent results; the first layer of MoCo (gray area) not only clumps tokens but also images into a compact cluster." + }, + { + "type": "image", + "bbox": [ + 0.273, + 0.422, + 0.48, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.422, + 0.727, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.592, + 0.828, + 0.704 + ], + "angle": 0, + "content": "Figure F.1: Self-attention and representations in fine-tuned models exhibit consistency with those of pre-trained models. Similar to Figures 4 and 9, we present the normalized mutual information and the Fourier analysis results of fine-tuned models. The abbreviation \"ft\" stands for \"fine-tuned model.\" Left: Similar to pre-trained models, the mutual information of MoCo's self-attention maps is generally lower compared to that of SimMIM. However, it is noteworthy that the mutual information of the later self-attention maps in SimMIM decreases significantly. This is because the later layers of a model trained with supervision or fine-tuning tend to capture global information. Right: Similarly, SimMIM utilizes higher frequency information than MoCo." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.722, + 0.766, + 0.755 + ], + "angle": 0, + "content": "F FINE-TUNED MODELS INHERIT THE PROPERTIES OF PRE-TRAINED MODELS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.773, + 0.828, + 0.816 + ], + "angle": 0, + "content": "The main text focuses on highlighting the key properties of pre-trained models. This section demonstrates that these properties are also utilized by fine-tuned models. As a result, we can safely apply the insights gained from the main text to various situations." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.833, + 0.826, + 0.89 + ], + "angle": 0, + "content": "Consistent results in self-attention and Fourier analysis. Figures 3 and 4 demonstrate that MoCo captures global areas and that its self-attention are less related to the query tokens, compared with SimMIM. In addition, Figure 9 shows that MoCo captures low-frequency information as opposite to SimMIM. These results are consistent in the fine-tuning scheme." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Figure F.1a reveals the self-attention behaviours of fine-tuned MoCo and SimMIM in terms of normalized mutual information. Similar to pre-trained models, the fine-tuned self-attention maps of" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.104, + 0.518, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.309, + 0.284, + 0.444, + 0.298 + ], + "angle": 0, + "content": "(a) Stylized ImageNet" + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.133, + 0.756, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.547, + 0.284, + 0.756, + 0.298 + ], + "angle": 0, + "content": "(b) Robustness for noise frequency" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.309, + 0.828, + 0.396 + ], + "angle": 0, + "content": "Figure F.2: Fine-tuned ViTs inherit the robustness against frequency-based noise. Similar to Figure 8b, we measure the decrease in the accuracy of ViTs fine-tuned with MoCo and SimMIM. Left: Even with fine-tuned ViTs, MoCo is relatively shape-biased and SimMIM relatively texture-biased. This bias is just less apparent than in linear probing models. Right: The robustness against frequency-based random noise also suggests the same: MoCo is robust against high-frequency noise, but SimMIM is not. In conclusion, fine-tuned models inherit the properties of linear probing models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.414, + 0.828, + 0.527 + ], + "angle": 0, + "content": "MoCo have generally lower mutual information compared to those of SimMIM. The only significant difference is that the mutual information of the later self-attention maps in fine-tuned SimMIM decreases significantly, as later layers in models trained with supervision or fine-tuning tend to capture more global information. As a result, the gap between the two methods is reduced. This is also reflected in the consistent results of Fourier analysis as shown in Figure F.1b. In this analysis, SimMIM captures higher-frequency information compared to MoCo in fine-tuning scheme as well. However, the later layers of SimMIM attempt to capture low-frequency information. Therefore, the gap of fine-tuned models is smaller than that of pre-trained models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.541, + 0.828, + 0.653 + ], + "angle": 0, + "content": "CL is shape-biased and MIM is texture-biased in fine-tuning scheme. In Figure 8, we demonstrate that linear probing model with CL (MoCo) is more shape-biased and that with MIM (SimMIM) is texture-biased, compared with each other. As in the experiment, we calculate the classification results of ImageNet fine-tuned MoCo and SimMIM on Stylized-ImageNet, and measure the decrease in accuracy against frequency-based random noise. As we would expected, Figure F.2 shows that the property also extends to the fine-tuned model. Even though we still observe the difference between MoCo and SimMIM, the performance gap between MoCo and SimMIM is quite reduced compared to the gap between the linear probing models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.667, + 0.534, + 0.793 + ], + "angle": 0, + "content": "Later layers of CL and early layers of MIM are important in find-tuning phases. As shown in Figure 11, the later layers of the CL and the early layers of the MIM are linearly separable. This finding suggests that these layers are significant, however, it does not provide direct evidence that such properties are preserved during fine-tuning phases. We demonstrate that these layers play a crucial role in fine-tuning phases as well." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.8, + 0.534, + 0.926 + ], + "angle": 0, + "content": "To support this claim, we conduct a study to measure the accuracy drop of fine-tuned models using pretrained models with a few blocks initialized. As shown in Figure F.3a, the results indicate that the initializing a few early blocks in the pre-training models of SimMIM significantly harms the fine-tuning accuracy, compared to MoCo. These observations suggest that early layers of SimMIM play an important role in fine-tuning. Conversely, Figure F.3b shows that initializing later" + }, + { + "type": "image", + "bbox": [ + 0.545, + 0.671, + 0.822, + 0.807 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.569, + 0.814, + 0.659, + 0.828 + ], + "angle": 0, + "content": "(a) Early layer" + }, + { + "type": "image_caption", + "bbox": [ + 0.709, + 0.814, + 0.798, + 0.828 + ], + "angle": 0, + "content": "(b) Later layer" + }, + { + "type": "image_caption", + "bbox": [ + 0.54, + 0.839, + 0.828, + 0.922 + ], + "angle": 0, + "content": "Figure F.3: The later layers of CL and early layers of MIM play a key role in the fine-tuning scheme. To show this, we initialize a few blocks and measure the decrease in the fine-tuning accuracy of pretrained models." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.135 + ], + "angle": 0, + "content": "blocks in the pre-training models of SimMIM does not significantly harms the fine-tuning accuracy, suggesting that they are not important in fine-tuning compared with MoCo." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.828, + 0.21 + ], + "angle": 0, + "content": "One limitation of this experiment is the evaluation of the accuracy drop in a single run. Since the accuracy drop of MoCo is marginally higher than that of SimMIM at the first initialization depth in Figure F.3b, additional experiments may improve the results. In this experiment, we utilized the same fine-tuning settings for both MoCo and SimMIM; but experiments with fine-tuning settings tailored to each method may provide further insight." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.23, + 0.819, + 0.248 + ], + "angle": 0, + "content": "G HYBRID MODELS OUTPERFORM CL AND MIM IN DOWNSTREAM TASKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.261, + 0.482, + 0.4 + ], + "angle": 0, + "content": "The claim that CL and MIM are complementary is demonstrated only on ImageNet in Section 5. To validate this claim in tasks beyond ImageNet, we evaluated the pre-trained models of the hybrid method introduced in Section 5 for another classification task and a semantic segmentation task. In particular, we measured the accuracy on iNaturalist 2018 (Van Horn et al., 2018) and the mIoU on ADE20K (Zhou et al., 2019). As shown in Table G.1, the hybrid" + }, + { + "type": "table_caption", + "bbox": [ + 0.49, + 0.264, + 0.828, + 0.294 + ], + "angle": 0, + "content": "Table G.1: Hybrid models of CL and MIM outperform both CL and MIM in various tasks." + }, + { + "type": "table", + "bbox": [ + 0.497, + 0.3, + 0.821, + 0.386 + ], + "angle": 0, + "content": "
λ (IMPORTANCE OF CL)iNat-18ADE20k
0.0 (SimMIM)62.135.4
0.2 (SimMIM + MoCo)68.842.2
1.0 (MoCo)66.239.7
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.4, + 0.827, + 0.43 + ], + "angle": 0, + "content": "model of SimMIM and MoCo outperforms both SimMIM and MoCo in various downstream tasks. Therefore, we conclude that the effectiveness of this claim extends beyond ImageNet." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "19" + } + ] +] \ No newline at end of file diff --git a/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_origin.pdf b/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aa5659a086d8bf5dc3e603606eae0ed87938214b --- /dev/null +++ b/2023/What Do Self-Supervised Vision Transformers Learn_/eb9117a9-6734-4afe-bd94-17080f9ab76e_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dbf061e70509215a926af2f1691dfb8ce268585d78959c9f01a6bc63f6620ff +size 2858489 diff --git a/2023/What Do Self-Supervised Vision Transformers Learn_/full.md b/2023/What Do Self-Supervised Vision Transformers Learn_/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b233bc2a44c419252ac165d0495e9b18de36f79f --- /dev/null +++ b/2023/What Do Self-Supervised Vision Transformers Learn_/full.md @@ -0,0 +1,417 @@ +# WHAT DO SELF-SUPERVISED VISION TRANSFORMERS LEARN? + +Namuk Park $^{1*}$ Wonjae Kim $^{2}$ Byeongho Heo $^{2}$ Taekyung Kim $^{2}$ Sangdoo Yun $^{2}$ + +$^{1}$ Prescient Design, Genentech $^{2}$ NAVER AI Lab + +park.namuk@gene.com {wonjae.kim,bh.heo,taekyung.k,sangdoo.yun}@navercorp.com + +# ABSTRACT + +We present a comparative study on how and why contrastive learning (CL) and masked image modeling (MIM) differ in their representations and in their performance of downstream tasks. In particular, we demonstrate that self-supervised Vision Transformers (ViTs) have the following properties: (1) CL trains self-attention to capture longer-range global patterns than MIM, such as the shape of an object, especially in the later layers of the ViT architecture. This CL property helps ViTs linearly separate images in their representation spaces. However, it also makes the self-attention collapse into homogeneity for all query tokens and heads. Such homogeneity of self-attention reduces the diversity of representations, worsening scalability and dense prediction performance. (2) CL utilizes the low-frequency signals of the representations, but MIM utilizes high-frequencies. Since low- and high-frequency information respectively represent shapes and textures, CL is more shape-oriented and MIM more texture-oriented. (3) CL plays a crucial role in the later layers, while MIM mainly focuses on the early layers. Upon these analyses, we find that CL and MIM can complement each other and observe that even the simplest harmonization can help leverage the advantages of both methods. + +# 1 INTRODUCTION + +Contrastive Learning (CL) (He et al., 2020; Chen et al., 2020a;b; 2021) has been the most popular self-supervised learning methods until recently. It aims to learn the invariant semantics of two random views (Tian et al., 2020a;b) by making global projections of representations similar for positive samples and dissimilar for negative samples. Since CL exploits the globally projected representations to contrast each other, it can be deemed as an "image-level" self-supervised learning approach. + +Deviating from CL, masked image modeling (MIM) (Bao et al., 2022; Xie et al., 2022b; He et al., 2022) has risen as a strong competitor of CL in the era of Vision Transformers (ViTs) (Dosovitskiy et al., 2021) with its impressive performances of downstream tasks. MIM trains ViTs by reconstructing the correct semantics of masked input patches. Unlike CL, it learns the semantics of patch tokens and this can be deemed as a "token-level" self-supervised learning approach. Since MIM outperforms CL in fine-tuning accuracy, it may appear prima facie as a more effective pre-training method than CL. However, a different trend is observed for linear probing accuracy with CL outperforming MIM (See Figure 1). For further exposition on CL and MIM, we refer the reader to Appendix B. + +Then, which method—CL or MIM—should we use for the self-supervised learning of ViTs? Although both methods are widely used, little is known about what they learn. This paper sheds light on their nature by showing that ViTs trained through CL and MIM learn opposite knowledge. In particular, we raise questions to better understand self-supervised learning, and then find the answers that can potentially affect future improvements. The questions posed can be divided into the following properties of Vision Transformers: the behavior of self-attention, the transformation of the representations, and the position of lead role components. Our key questions and findings are elaborated below. + +How do self-attention behaviors? (Section 2) We find that CL primarily captures global relationships, while MIM captures local relationships. This implies that the representations of CL contain more global patterns, such as object shapes, than those of MIM. On the one hand, this property helps + +![](images/fb19c61ab1e5f8dba96ca35eba9a9981210dcc46fda949229d28dd8d16bd1885.jpg) +Figure 1: CL outperforms MIM in linear probing and small model regimes. In contrast, MIM excels in fine-tuning, large model regimes, and dense prediction. Red squares (■) denote CL, and blue triangles (▲) denote MIM. By default, we report the performance of ViT-B trained or pretrained on ImageNet-1K. We use the results from original papers and He et al. (2022) for object detection. Regarding the scaling experiment, we report the results that we reproduced based on official configurations except with 100 epochs, marking them as $\mathrm{MoCo}^{\dagger}$ and SimMIM†. Left: CL outperforms MIM in linear probing but underperforms in fine-tuning. Middle: CL outperforms MIM in small model regimes (ViT-Ti and ViT-S), and MIM shows superior scalability in large model regimes (ViT-L and ViT-H). Right: MIM outperforms CL in the dense prediction downstream tasks, such as object detection with Mask R-CNN (He et al., 2017) on COCO (Lin et al., 2014). + +![](images/8e526279a352ed99d5c45a57f0a7abf4002aaab19db924697413ba84fa35bdc7.jpg) + +![](images/9174d5157db5599272dee2a00a1acf8c538e0e673d696d0f88c2d45b4285662c.jpg) + +CL recognize objects and distinguish images. On the other hand, however, it also suggests that CL struggles to preserve local information. In particular, we observe that self-attention of CL in the later layers for all query tokens and heads collapse into homogeneous attention maps. In such cases, most self-attention maps focus on object boundaries, meaning that they can capture object shapes but may lose interaction diversity between tokens. Consequently, CL and MIM each have advantages over different tasks: CL works well for linear probing and classification tasks with smaller models, whereas MIM outperforms CL in fine-tuning and dense prediction tasks with larger models. + +How are representations transformed? (Section 3) CL transforms representations mainly based on image-level information, and its self-attentions collect information on object shape over entire tokens. This process makes tokens similar rather than diversifying them. As a result, CL distinguishes images well but has difficulty distinguishing tokens. On the contrary, MIM preserves and amplifies token-level information. Thus, the self-attentions for each token are substantially different and prohibit each token from including redundant information. We observe the consistent property from our Fourier analysis: CL primarily utilizes the low-frequency signals, but MIM utilizes high-frequencies. This observation suggests that CL is shape-biased and MIM is texture-biased. In sum, self-supervised models trained with CL and MIM learn the representations in different levels of detail. + +Which components play an important role? (Section 4) Analyses of the importance of each CL and MIM layer demonstrate that the later layers in CL and early layers in MIM play a key role. We interpret this as a consistent observation since early layers are usually known to capture low-level features—e.g., local patterns, high-frequency signals, and texture information—and later layers capture global patterns, low-frequency signals, and shape information (Dosovitskiy et al., 2021; Raghu et al., 2021; d'Ascoli et al., 2021; Graham et al., 2021; Dai et al., 2021; Park & Kim, 2022b). + +From the above analyses and insights, we find that CL and MIM can complement each other and show in Section 5 that even the simplest implementation, such as a linear combination of CL and MIM objectives, can take advantage of both methods. Surprisingly, the hybrid models outperform those pre-trained with either CL or MIM both in terms of fine-tuning and linear probing accuracy. + +# 2 HOW DO SELF-ATTENTIONS BEHAVE? + +We point out that CL and MIM may not be silver bullets for all tasks, as shown in Figure 1. CL generally outperforms MIM in linear probing, while MIM dominates CL in the fine-tuning scheme. However, when we dissect the size of the model, CL outperforms MIM after fine-tuning for small models (cf. (Wang et al., 2022)), while MIM performs better on large models. Also, MIM yields effective representations for dense prediction tasks, such as object detection, but CL falls short on those tasks. This section explains these phenomena by investigating the behavior of self-attention. + +![](images/bf35728974a9349d03d0e62edcedf24d8b55abf4d06e5b006a0e6d0de5b5a279.jpg) + +![](images/4b948ba1de2243db01e228d1c6dccedc84dd877ca9996c9111d42b7fefe4fc74.jpg) +Depth $= 1$ + +![](images/0d2c4ae36b84acbb38247d9f530838052fed9d70228f569bc7c290b4870e56ea.jpg) + +![](images/2cd9f91e1dbd7f253962c304f777c5b901582ff44617837422fd4c29956eeb42.jpg) +Depth $= 4$ +(a) MoCo +Figure 2: Self-attention of CL (MoCo) capture global relationships, but they collapse into homogeneous attention maps for all query tokens and heads. Self-attention of MIM (SimMIM) mainly focus on local areas. We visualize the attention maps for two different query tokens in the beginning through the end layers. We omit the results for self-attention heads, which show mostly consistent results. Left: Self-attention of CL capture global patterns and the shape of an object. However, all attention maps capture the same shape information regardless of the query tokens. Right: Self-attention of MIM capture local patterns and are correlated with query tokens. + +![](images/cd87e3ce9fd95db6d96f4acab5aa416a349ecc5e66aa1a73ab836bb3008a4f80.jpg) + +![](images/4bf05f6f2707473d618fa5bbe36a08de93104499a912f0053ed63910785ff73b.jpg) +Depth $= 11$ + +![](images/774707bc6a8c17a823d098c5932768a2a907b5729e535c7e059ee849ac95d4cb.jpg) + +![](images/5bb8c9061fd07a7d2a1da479182ba2962c50c66e1d277bc6e47217e25063792a.jpg) +Depth $= 1$ + +![](images/28aaca9c4abcebdc1b515966087fbb173a50f019faade9018ec7be7505fe36de.jpg) + +![](images/24c5506ade57cd25c09e8cddfb90324c84f474209403bf28aa3b97c5065e765b.jpg) +Depth $= 4$ +(b) SimMIM + +![](images/f494cdd375851dcadd3b3dfee47b7994f0e0f66ae842cc5e45e1d7f69702eb20.jpg) + +![](images/6c43d5fa9ff4523b19d789b99a778143dcf43176ca7a3a76257659d61bf7da06.jpg) +Depth $= 11$ + +Our analyses mainly compare ViT-B/16 pre-trained on ImageNet-1K (Russakovsky et al., 2015) with MoCo v3 (Chen et al., 2021) and SimMIM (Xie et al., 2022b). We use the ImageNet validation images for our experiments. We observe that other methods, e.g., DINO (Caron et al., 2021), BEiT (Bao et al., 2022), and MAE (He et al., 2022), have consistent properties (See Figure C.1). + +CL mainly captures global relationships. We measure the ranges of self-attention via attention distance (Dosovitskiy et al., 2021). Attention distance is defined as the average distance between the query tokens and key tokens considering their self-attention weights. Therefore, it conceptually corresponds to the size of the receptive fields in CNNs. + +Figure 3 shows that the attention distance of CL (MoCo) is significantly higher than that of MIM (SimMIM), especially in the later layers. As seen in Figure 2, the qualitative visualization, this implies that the representations of CL contain global patterns and shape information, so CL can help ViTs distinguish between objects of images. Conversely, the self-attention of MIM mainly capture local relationships; i.e., MIM may have difficulty recognizing whole objects and their shapes. Section 3 also discuss this claim from a representational perspective. + +Self-attention of CL collapse into homogeneity. We observe an interesting behavior of CL in Figure 2, which shows the attention maps for query tokens from two different spatial locations. The self-attention of CL surprisingly indicate almost identical object shapes for the two query tokens, compared to that of MIM. We describe this phenomenon as an attention collapse into homogeneity. This collapsing trend in the self-attention of CL is observed across all the heads and query tokens. In contrast, the self-attention of MIM are more faithful to the two query tokens, as expected. + +We use normalized mutual information (NMI) (Strehl & Ghosh, 2002) to measure the attention collapse. Let $p(q)$ be a distribution of query tokens, and assume that these query tokens are uniformly distributed since a single query token is given for each spatial coordinate, i.e., $p(q) = 1 / N$ where $N$ is the number + +of the tokens. Then the joint distribution of query and key tokens is $p(q, k) = \pi(k|q)p(q)$ where + +![](images/99e3b5c3a038b687f23183f30eb71953eccbf1a6c211918b2aca6e3631a13e01.jpg) +Figure 3: Effective receptive fields of CL are global, but those of MIM are local. This is particularly evident in the later layers. + +![](images/a1cb58d7c38ec6d018ff2afe2609cbab97a982fa031907530f646285aa1bb55d.jpg) +Figure 4: Self-attentions of CL have little to do with query tokens. Normalized MI of CL is significantly lower than that of MIM in the later layers. + +![](images/82d220a43e6c076dc65feb2c5e9314ccdb2a6b51deec70dd041e72eb1855935d.jpg) +(a) Between heads + +![](images/e68f0d48a31427b1e469ae73c101ef9376549e8a96f084255d217431e321cd11.jpg) +(b) Between depths +Figure 5: CL lacks representational diversity in the later layers. We measure cosine similarities of representations in the self-attentions between the heads (left), depths (middle), and spatial coordinates (right). All of the results show that the representational similarity of later self-attentions of CL is higher than that of MIM. Increasing heads or depths of CL is not effective in improving the diversity. Left: The similarity of representations from two heads in self-attention. Middle: The similarity between representations before and after self-attention transform them. Right: The similarities of representations at two spatial coordinates. ViT- $\{\mathrm{S}, \mathrm{L}\}$ is trained with 100 epochs. + +![](images/9ee5281e4a88280f4a955c6205a315b8252a5d398e156f8772953329c885ac96.jpg) +(c) Between tokens + +$\pi (k|q)$ is the softmax-normalized self-attention matrix. Thus, the normalized mutual information is $\frac{I(q,k)}{\sqrt{H(q)H(k)}}$ where $I(\cdot ,\cdot)$ is the mutual information and $H(\cdot)$ is the marginal entropy. Low mutual information values show that attention maps are less dependent on the query tokens, implying an attention collapse into homogeneity. Conversely, high mutual information means that the attention maps strongly depend on the query tokens. + +Figure 4 shows the degree of attention collapse in terms of the normalized mutual information (NMI). Results show that the mutual information of CL is significantly lower than that of MIM in the later layers, suggesting that the self-attention of CL tend to collapse into homogeneous distributions. + +Attention collapse reduces representational diversity. We conjecture that the self-attention collapse into homogeneity eventually leads to homogeneous token representations. To support this argument, we measure representational cosine similarities. In particular, we design three similarities: between different self-attention heads (heads), between the before and after self-attention layers (depths), and between different tokens (tokens). + +Figure 5 shows the results, reporting the representation similarities for heads, depths, and tokens. As expected, the similarities of CL are notably higher than those of MIM in the later layers, indicating that the representations of CL have significant homogeneity. Even increasing the model size does not solve the problem CL has and may rather worsen it. Increasing the number of heads (ViT-S to ViT-B; Figure 5a) improves the representational diversity of MIM, but hardly improves the diversity of CL. Increasing the depth of CL (ViT-B to ViT-L; Figure 5b) only adds redundant modules. + +Implications of the behaviors we observed. In conclusion, the self-attention of CL captures global patterns and shapes of objects. However, CL suffers from the problem of attention collapse into homogeneity, which reduces the diversity of token representations. On the other hand, MIM primarily captures local patterns and thus does not suffer from the attention collapse problem. + +The behaviors mentioned above can explain the phenomena we observed in Figure 1: + +- CL outperforms MIM in linear probing tasks because it captures shapes, which helps recognize objects and distinguish images. Although MIM preserves the texture and diversity of representations, their correlation with objects or content may not be as strong as shapes do. +- The attention collapse prohibits CL from fully exploiting heads, depths, and tokens of ViTs. Since homogeneous representations are not very helpful in improving token representations, ViTs trained with CL waste a large part of network capability. Therefore, the fine-tuning accuracy of MIM is significantly higher than CL in large models. +- CL is not suitable for dense prediction since the token features are homogeneous with respect to their spatial coordinates. + +![](images/49c4ce65597eb7dcdb83ed994f7627dba34a0d903b0043650206813c7fc112b3.jpg) +(a) MoCo (one image) + +![](images/a8c0fbaadb19d29e242269eabe79f452b5fc0aa94e82419a7dad9f4624fe83e7.jpg) +(b) MoCo (two images) +Figure 6: Self-attention layers of CL and MIM transform representations differently. We visualize 196 spatial representation tokens for an example validation image in a representation space. The blue $(\bullet)$ and red $(\bullet)$ data points denote the tokens before and after the self-attention transformation. Left: The self- attentions of CL (e.g., MoCo) translate all the tokens equally, so the distances between the tokens of an image do not increase. Middle: However, CL moves the "centers of representations (represented by $\times$ )" away from each other. Therefore, the images are linearly separable. The circle $(\bullet)$ and triangle $(\triangle)$ data represent tokens from different images. Right: The self- attentions of MIM (e.g., SimMIM) transform representations differently according to query tokens, thus increasing the distances between tokens. See Figure 7 for quantitative analyses. + +![](images/d79a51535df574efcc7e45e8fb443c5da38de43e28c348c07224b35da91d8830.jpg) +(c) SimMIM (one image) + +We further investigate the self-attention's behavior with restricted receptive fields in Figure D.1. As shown in the experiment, locally restricted self-attentions lead to lower linear probing but higher fine-tuning accuracy, which is consistent with our observations. + +# 3 HOW ARE REPRESENTATIONS TRANSFORMED? + +In this section, we analyze the token representations of ViTs pre-trained with CL and MIM to demonstrate how the properties of self-attentions we observed in Section 2 affect the representations differently. We use the same pre-trained ViT-B/16 models by default default just as we did in Section 2. + +CL transforms all tokens in unison, while MIM does so individually. To show how CL and MIM transform token representations, we visualize them in representation space. Figure 6 shows 196 (14×14 patches) tokens before and after self-attention modules from a single image sample of the ImageNet validation set. We use the three large singular vectors obtained via singular value decomposition (SVD) as the bases of the space. To better visualize this, we display the representation of MoCo and SimMIM in their crucial layers—the last layer and the first layer, respectively. + +Figure 6a visualizes the changes that occur in the tokens of CL when transformed by self-attention module; it indicates that the self-attention of CL translate all tokens in unison. This phenomenon occurs because the self-attention maps of CL are homogeneous, i.e., self-attention is almost independent of the spatial coordinates and query tokens. Therefore, the modules add near-constant to all the token representations. As a result, the inter-representation distance and the volume of representations do not increase, which implies that CL cares less about individual tokens. + +Nevertheless, self-attention are essential for the discriminative power of CL. As shown in Figure 6b, they help distinguish images by moving "the centers of the representation distribution" away from each other. In short, this figure suggests that CL makes the image linearly separable even though it loses the ability to distinguish tokens. + +In contrast, MIM applies a different transformation to individual tokens, as shown in Figure 6c, because different self-attention are assigned to the individual spatial tokens. Thus, MIM alters the distance between tokens of a single image as well as the volume of the representation distribution. + +We find consistent results in quantitative analysis. Inspired by Jing et al. (2022), Figure 7 visualizes singular value spectra for tokens and images. A singular value spectrum provides singular values of a representation distribution obtained by SVD, so we can use it to represent the effective volume of distributions in a representation space. The higher the singular value in a spectrum, the larger the + +![](images/9ef3380b4df0c37dc0622f3ca6b6662e88772a9772af3c5a415479d047cf2c99.jpg) +(a) Singluar value spectrum of tokens from a single image (token-level) + +![](images/de245a5bf2e224909a2ff0fd09edb5e8e951208b716152fbbd1bda67243a3df2.jpg) + +![](images/b7090f8183eca964795b233964f9b8ff98297632d955f865063ff44f1d947265.jpg) + +![](images/10818ff7ef3740f028faa7a71f35ab740d750a4e968475054bd344e0af097616.jpg) +(b) Singluar value spectrum of images (image-level) + +![](images/734960add7561f85bce6dfd04d6919f304218f71ac54b926079a4fe51dc8b634.jpg) +Figure 7: CL barely changes or even decreases the distribution volume of tokens from a single image, implying that it hardly distinguishes between token. Instead, it significantly increases the distribution volume of images. To demonstrate these properties, we visualize singular value spectra, the singular values of the distribution of representations sorted by the magnitude. The higher a singular value, the larger the volume of a distribution. The right of this figure shows the $64^{\text{th}}$ and $128^{\text{th}}$ highest singular value for depth. Top: Singular value spectra of tokens from a single image. CL decreases the singular values of the tokens, but MIM increases. Bottom: Singular value spectra of images. CL significantly increases the volumes occupied by images, but MIM hardly does so. + +![](images/4a648590f389717f54d3b3c3dfc28108812c7ddc6e8871eb4efd288d01028240.jpg) + +volume of a representation distribution. To calibrate the scale, we use the relative log singular value $(\Delta \log \text{ singular value})$ , the difference with the (second) largest singular value for a depth. + +Figure 7a shows singular value spectra of tokens from a single image. We calculate them for each image in the ImageNet validation set and report averaged singular values over the dataset. In this figure, the CL layers hardly increase or even decrease the singular value; consistent with the explanation above, this implies that CL hardly distinguishes tokens. In contrast, MIM increases the singular value, meaning that it changes the volume of tokens and can distinguish tokens. Another interesting observation is that a few later layers of MIM decrease the volume, even though they capture local patterns as shown in Figures 3 and 4. This is because they behave like decoders. Section 4 discusses this in detail. + +Figure 7b shows the singular value spectra of images. We average all tokens in an image to build an image-level representation vector and conduct a singular value spectrum over the collection of representations in the validation set. As opposed to the previous case, the representational volume of CL is larger than that of MIM, which implies that CL makes the image-level representation separable. + +CL exploits low-frequencies, and MIM exploits high-frequencies. We hypothesize that CL captures low-frequency and MIM captures high-frequency information in spatial dimensions since CL provides image-level self-supervision to capture global patterns, while MIM provides token-level self-supervision to exploit local patterns. To support this argument from a frequency perspective, we conduct a Fourier analysis of the representations as following Park & Kim (2022b). In particular, we report the relative log amplitude of Fourier-transformed representations by calculating the amplitude difference between the highest and lowest frequencies of representations. + +Figure 9 visualizes the relative amplitudes of CL and MIM. It shows that the high-frequency amplitude of CL is significantly smaller than that of MIM, suggesting that CL mainly utilizes low-frequency spatial information such as global structures and shapes. On the contrary, MIM usually uses high-frequency spatial information such as narrow structures and fine textures. + +![](images/aa4af24e63032d5d5a609582042f59d6bf99b8848371ea63d40be6ce655be4e0.jpg) +(a) Stylized ImageNet + +![](images/17b03a344d686cfb28d91b880356506d67cd18e5b01a0c0eb4f7cc9058d21eb7.jpg) +(b) Robustness for noise frequency + +Another interesting finding is that the last few layers of MIM reduce the high frequencies even though they only focus on local areas (See Figure 3). We conjecture that MIM implicitly divides ViTs into the encoder-decoder structure and allows intermediate layers to have linearly separable information. In contrast, CL allows the last layer to have such information. This is further elaborated in Figure 11. + +CL is shape-biased, but MIM is texture-biased. Based on the results of the Fourier analysis, we assume that CL and MIM each have a bias toward shapes and textures, respectively. To demonstrate this claim, we use Stylized ImageNet (Geirhos et al., 2019), a texture-altered dataset, by using AdaIN (Huang & Belongie, 2017). Figure 8a reports the linear probing results on Stylized ImageNet to evaluate the shape and texture biases of pre-trained models. Compared to the model pre-trained with supervised learning, CL depends more on the shape and MIM depends on texture of images to classify images. In other words, CL is robust to texture changes, and MIM is vulnerable to them. + +![](images/d333916ae8c845554b3935f597f899fe57ac1a7ba7f3e5412aca22a2cfff7673.jpg) +Figure 8: CL is biased toward shape, whereas MIM is biased toward texture. We report the predictive results of models for linear probing tasks. However, we observe consistent results in fine-tuned models (See Figure F.2). Left: Result of classification on Stylized ImageNet. It shows that CL is more shape-biased than MIM and even than the supervised pre-trained model. Vertical lines represent averaged results for the shape categories. We also report the results of supervised ViT with ImageNet-1K class labels for comparison. Right: Accuracy drops on images with frequency-based random noises. MIM shows a more significant amount of accuracy drop than CL with high-frequency noises, demonstrating MIM's texture-biased property. The frequency window size of the frequency-based noise is $0.1\pi$ . +Figure 9: CL exploits low-frequency, but MIM exploits high-frequency. Moreover, a few last layers of CL reduce high-frequency by capturing global patterns. MIM also reduces it even though they capture local patterns, because the later layers behave like decoders. See also Figure 11. + +Figure 8b shows the consistent results. In this experiment, we follow Park & Kim (2022a;b) and measure the decrease in accuracy on the ImageNet dataset with frequency-based random noise. The results suggest that CL is robust to high-frequency noises, but MIM is significantly more vulnerable to them. Since high-frequency noises harm the fine details of images, we arrive at the same conclusion that CL is more shape-biased and MIM is texture-biased. This can explain the robustness of CL against adversarial perturbations (Bordes et al., 2022). + +# 4 WHICH COMPONENTS PLAY AN IMPORTANT ROLE? + +The previous sections consistently show through various perspectives that CL exploits image-level global patterns while MIM captures token-level local patterns. This section analyzes pre-trained ViTs from an architectural perspective and shows that the key components in CL and MIM are different. + +![](images/28b264e1dfb9a221edefed3623532e8a409dcf5bd2f5e7438f11ae454028eb23.jpg) +(a) Self-attention + +![](images/f1d6145da6e351ee872975f01fa01b41af6cd669653784b00caefb8832f9a64a.jpg) +(b) Fourier analysis +Figure 10: The explicit decoder architecture of MAE helps ViTs effectively leverage the advantages of MIM. We analyze the encoder and decoder of a pre-trained model with a masking ratio of zero. The left side of each figure represents the encoder and the right side the decoder. Left: The mutual information of MAE is lower than that of SimMIM in the encoder but higher in the decoder. Right: The decoder of MAE captures low-frequency information, and its encoder captures high-frequency information. Moreover, the later layers (excluding the last layer) of MAE do not reduce high-frequency information, while those of SimMIM do. + +Later layers of CL and early layers of MIM are important. According to studies on ViT (Graham et al., 2021; Dai et al., 2021; Park & Kim, 2022b), the later layers use high-level information, and the early layers exploit low-level information. Since CL and MIM each exploit global and local patterns, we expect that the later layers of CL and early layers of MIM play a key role. + +To evaluate the importance of each layer, we measure the linear probing accuracy using intermediate representations with the configuration of Table A.1. In Figure 11, we observe the following properties: First, the linear probing accuracy of MIM is higher than that of CL at the beginning. Conversely, CL outperforms MIM at the end of the model. Such result indicates that the later layers of CL and early layers of MIM play an important role in making linearly separable representations. Second, the accuracy of CL increases with increasing depth as expected, but the accuracy of MIM surprisingly decreases at the end of the model, i.e., the later layers of MIM are not very helpful in separating representations. We explain this observation as a phenomenon in which MIM methods with shallow prediction heads, e.g., SimMIM, use later layers of the backbone as a decoder. Therefore, MIM with a deep self-attention decoder, e.g., MAE (He et al., 2022), can be useful for linear probing performance. Moreover, it also explains why SimMIM's high-frequency component and representational volumes drop in the later layers as shown in Figures 7 and 9. Third, even the highest linear probing accuracy of MIM is lower than that of CL. + +![](images/0f82e99a78a6a5cbc0ac240670d931fd06893c5a8e088b52c6309caecaf9d3ad.jpg) +Figure 11: Later layers of CL and early layers of MIM play a key role. We report linear probing accuracies by using the representations of the intermediate layers. CL outperforms MIM in later layers, and MIM outperforms CL in early layers. + +The explicit decoder helps ViTs further leverage the advantages of MIM. Several previous observations find that the implicit decoder of MIM with a shallow prediction head, such as SimMIM, can impair performance. MAE (He et al., 2022) addresses this problem by introducing deep explicit ViT decoders and reconstructing masked tokens only in the separate decoders. + +In Figure 10, we analyze MAE to understand the properties of decoders more deeply. Figure 10a shows the self-attention behaviors. The results indicate that the mutual information of MAE is lower than that of SimMIM in the later layers of the encoder but higher in the decoder, implying that the decoder reconstructs masked tokens based on its neighborhood tokens. + +Figure 10b shows the results of the Fourier analysis. As explained in Figure 9, the last four layers of SimMIM reduce the high-frequency components. In contrast, the later layers (excluding the last layer) of MAE do not reduce them. Instead, the decoder of MAE prioritizes low-frequency information compared with the encoder, allowing the backbone to efficiently utilize high-frequency information. + +![](images/b7e58b0e0c9a6b67dbc0e7313b85167f8d8dca7c28ae3567fa1fd9acc4b1186b.jpg) +(a) Performance + +![](images/06ff79e5b211bc5fabfa86aff051d6c454f9ecfab754055a85d05fce81ad52d9.jpg) +(b) Self-attention + +![](images/178d8aaae7148ce435789432a3d4773d9246beee239105030dc1d7abda603364.jpg) +(c) Fourier analysis +Figure 12: The simple linear combination of CL (MoCo) and MIM (SimMIM) objectives outperforms the vanilla CL and MIM. $\lambda$ is the importance weight of CL, so $\lambda = 0$ means SimMIM and $\lambda = 1$ means MoCo. Left: "CL + MIM" outperforms CL and MIM in both linear probing and fine-tuning accuracy. Middle: Mutual information of "CL + MIM" decreases at the end of the model, suggesting that the self-attention of later layers collapse into homogeneity and capture the same object shape information. Right: Fourier analysis shows that "CL + MIM" amplifies high frequencies at the beginning and reduces them at the end. It implies that "CL + MIM" exploits high-frequency information at the beginning and low-frequency information at the end. + +# 5 ARE THE TWO METHODS COMPLEMENTARY TO EACH OTHER? + +We present comparative analyses on CL and MIM from three perspectives: self-attention, representation transforms, and the position of important layers. All of our results indicate that CL and MIM train ViTs differently. These differences naturally imply that combining CL and MIM to train a backbone may help leverage the advantages of both methods. + +To show that CL and MIM are complementary, we introduce the simplest way to harmonize CL and MIM by linearly combining two losses, i.e., $\mathcal{L} = (1 - \lambda)\mathcal{L}_{\mathrm{MIM}} + \lambda \mathcal{L}_{\mathrm{CL}}$ where $\mathcal{L}_{\mathrm{MIM}}$ and $\mathcal{L}_{\mathrm{CL}}$ each indicate the losses of MIM and CL, and $\lambda$ is the importance weight of CL. We find that this simple hybrid model trained with combined losses efficiently exploits the strengths of both methods. Figure 12a shows linear probing and fine-tuning accuracy on ImageNet with varying $\lambda$ . Surprisingly, the hybrid models outperform MIM ( $\lambda = 0$ ) and CL ( $\lambda = 1$ ) in both aspects. Figure 12b and Figure 12c can provide insights on how hybrid models behave by analyzing the model with $\lambda = 0.2$ in terms of self-attention in Section 2 and Fourier analysis in Section 3, respectively; both results show that the hybrid model exploits MIM properties in the early layers and CL properties in the later layers. In particular, Figure 12b indicates that the self-attention of the early layers are changed according to the query token but those of the later layers are not. Likewise, Figure 12c shows that the early layers exploit high-frequency, while the later layers try to exploit low-frequency. + +# 6 CONCLUSION + +We conducted a comparative study highlighting various facets of two widely used self-supervised learning methods for vision transformers: contrastive learning (CL) and masked image modeling (MIM). The study demonstrated many opposing properties of the two methods: image information (image-level vs. token-level; as in Section 2), feature representations (low-frequency vs. high-frequency; as in Section 3), and lead role components (later layers vs. early layers; as in Section 4). Furthermore, we suggested a possible application that exploits only the benefits from both methods and showed how a combined model can outperform individual methods. + +Future directions. Various future directions can be explored based on our study. We believe that there are better ways than a simple linear combination of CL and MIM objectives. For example, a novel self-supervised learning approach, in which CL is applied in the later layers and MIM in the early layers, can be considered. Moreover, we may extend our findings on self-supervision for multi-stage ViTs, such as PiT (Heo et al., 2021) and Swin (Liu et al., 2021). Another interesting direction is to enhance the individual properties of CL and MIM. Techniques that help CL or MIM learn shapes or textures, respectively, may also improve performance. + +# REFERENCES + +Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. In International Conference on Learning Representations, 2022. +Florian Bordes, Randall Balestriero, and Pascal Vincent. High fidelity visualization of what your self-supervised representation knows about. Transactions on Machine Learning Research, 2022. +Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In International Conference on Computer Vision, 2021. +Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International Conference on Machine Learning, 2020a. +Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2021. +Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b. +Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised vision transformers. In International Conference on Computer Vision, 2021. +Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Advances in Neural Information Processing Systems, 2020. +Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 2021. +Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. +Stéphane d'Ascoli, Hugo Touvron, Matthew L Leavitt, Ari S Morcos, Giulio Biroli, and Levent Sagun. Convit: Improving vision transformers with soft convolutional inductive biases. In International Conference on Machine Learning, 2021. +Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A Wichmann, and Wieland Brendel. Imagenet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness. In International Conference on Learning Representations, 2019. +Benjamin Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Herve Jégou, and Matthijs Douze. Levit: a vision transformer in convnet's clothing for faster inference. In International Conference on Computer Vision, 2021. +Jean-Bastien Grill, Florian Strub, Florent Alché, Corentin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 2020. +Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, 2017. +Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020. + +Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022. +Byeongho Heo, Sangdoo Yun, Dongyoon Han, Sanghyuk Chun, Junsuk Choe, and Seong Joon Oh. Rethinking spatial dimensions of vision transformers. In International Conference on Computer Vision, 2021. +Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European Conference on Computer Vision, 2016. +Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In Proceedings of the IEEE international conference on computer vision, 2017. +Li Jing, Pascal Vincent, Yann LeCun, and Yuandong Tian. Understanding dimensional collapse in contrastive self-supervised learning. In International Conference on Learning Representations, 2022. +Hanjoo Kim, Minkyu Kim, Dongjoo Seo, Jinwoong Kim, Heungseok Park, Soeun Park, Hyunwoo Jo, KyungHyun Kim, Youngil Yang, Youngkwan Kim, et al. Nsml: Meet the mlaas platform with a real-world case study. arXiv preprint arXiv:1810.09957, 2018. +Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, 2014. +Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In International Conference on Computer Vision, 2021. +Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. +Namuk Park and Songkuk Kim. Blurs behave like ensembles: Spatial smoothings to improve accuracy, uncertainty, and robustness. In International Conference on Machine Learning, 2022a. +Namuk Park and Songkuk Kim. How do vision transformers work? In International Conference on Learning Representations, 2022b. +Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, 2019. +Maithra Raghu, Thomas Unterthiner, Simon Kornblith, Chiyuan Zhang, and Alexey Dosovitskiy. Do vision transformers see like convolutional neural networks? Advances in Neural Information Processing Systems, 2021. +Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 2015. +Alexander Strehl and Joydeep Ghosh. Cluster ensembles—a knowledge reuse framework for combining multiple partitions. Journal of machine learning research, 2002. +Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016. +Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In European Conference on Computer Vision, 2020a. + +Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? Advances in Neural Information Processing Systems, 33:6827-6839, 2020b. +Yuandong Tian, Xinlei Chen, and Surya Ganguli. Understanding self-supervised learning dynamics without contrastive pairs. In International Conference on Machine Learning, 2021. +Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 2008. +Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8769-8778, 2018. +Shaoru Wang, Jin Gao, Zeming Li, Jian Sun, and Weiming Hu. A closer look at self-supervised lightweight vision transformers. arXiv preprint arXiv:2205.14443, 2022. +Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong, and Lei Li. Dense contrastive learning for self-supervised visual pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021. +Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022a. +Yixuan Wei, Han Hu, Zhenda Xie, Zheng Zhang, Yue Cao, Jianmin Bao, Dong Chen, and Baining Guo. Contrastive learning rivals masked image modeling in fine-tuning via feature distillation. arXiv preprint arXiv:2205.14141, 2022b. +Zhenda Xie, Zigang Geng, Jingcheng Hu, Zheng Zhang, Han Hu, and Yue Cao. Revealing the dark secrets of masked image modeling. arXiv preprint arXiv:2205.13543, 2022a. +Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022b. +Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917, 2022. +Sangdoo Yun, Dongyoon Han, Seong Joon Oh, Sanghyuk Chun, Junsuk Choe, and Youngjoon Yoo. Cutmix: Regularization strategy to train strong classifiers with localizable features. In International Conference on Computer Vision, 2019. +Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. In International Conference on Learning Representations, 2018. +Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127(3):302-321, 2019. +Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. ibot: Image bert pre-training with online tokenizer. International Conference on Learning Representations, 2022. + +# A SETUP + +We build the configurations based on Xie et al. (2022b) for fine-tuning and Caron et al. (2021) for linear probing. Table A.1 summarizes the configurations. Most analyzes of ViTs use the official ViT-B pre-trained models, and some analyzes use ViT-{S, L} pre-trained with official configurations but epochs of 100. Due to memory limitations, ViT-L is pre-trained with a quarter batch size of the other models. The hybrid model introduced in Section 5 uses the ViT backbone architecture of Xie et al. (2022b) and employs a configuration based on their work for pre-training as shown in Table A.1. For data augmentation and regularization, we adopt widely used settings, e.g., Randaugment (Cubuk et al., 2020), label smoothing (Szegedy et al., 2016), mixup (Zhang et al., 2018), cutmix (Yun et al., 2019), stochastic depth (Huang et al., 2016). Layer decay (Bao et al., 2022) is also used for fine-tuning. Neural network models are implemented in PyTorch (Paszke et al., 2019). The code for analysis is available at https://github.com/naver-ai/cl-vs-mim. All experiments use {1, 4, 8} NVIDIA A100 Tensor Core GPU. NSML (Kim et al., 2018) has been used for experiments. + +Table A.1: Training settings. + +
CONFIGURATIONLinear ProbingFine-tuningPre-training
optimizersgdadamwadamw
base learning rate1.0e-01.25e-31.0e-4
weight decay0.050.050.05
batch size1k2k1k
training epoch50100100
learning rate schedulecosinecosinemultistep
warmup epoch02010
warmup schedule·linearlinear
randaugment·9, 0.59, 0.5
label smoothing·0.10.1
mixup·0.80.8
cutmix·1.01.0
stochastic depth·0.10.1
layer decay·0.651.0
gradient clip·5.05.0
+ +# B RELATED WORK + +CL is a method based on comparing the global projection of two different random views. However, this approach usually suffers from the collapsing problem, where all representations collapse into constant solutions. To solve this problem, various methods such as negative samples and InfoNCE (Oord et al., 2018) have been proposed. Negative samples are an effective technique to avoid the collapsing problems, but they cause dimensional collapse (Jing et al., 2022) and require extra large batches (Chen et al., 2020a) or memory queues (He et al., 2020; Chen et al., 2020b) to retrieve them. We mainly analyze MoCo v3 (Chen et al., 2021), since the method includes these de facto standard components—global projection, random views, and negative samples. + +Some SSL methods, e.g. Grill et al. (2020); Caron et al. (2021), do not use negative samples and use the projections of their Siamese representations as the positives. Such self-distillation has been explored theoretically and empirically (Chen & He, 2021; Tian et al., 2021) to prevent the collapsing problem, but we do not discuss the distillation scheme in this paper. Wei et al. (2022b) shows that feature distillation improves the fine-tuning performance of CL by diversifying attention ranges; this observation is consistent with our findings. While they focus on distillation to improve CL, we reveal the fundamental nature of self-supervised learning by rigorously comparing CL and MIM. + +![](images/0a7defb6cf5ca448cbeadfa09eece56e710d4e928fa34c8110a31f6063707dd5.jpg) +(a) Attention distance + +![](images/2940011fb68852857470985169c458b291f5397c3a9df53a321760de5a4350d7.jpg) +(b) Normalized MI + +![](images/6b8d60f909c96901f45026634f8622fb176aa05b7b617722bdc2a9a000c3b65c.jpg) +(a) Attention distance +Figure C.2: ViTs exhibit consistent self-attention patterns, regardless of their size. To better understand these patterns, we visualize the self-attention behaviors of three ViTs—ViT-{Ti, S, B}—using two metrics: attention distance and normalized mutual information (MI). Left: All self-attention of MoCo capture global patterns in the later layers. In contrast, the self-attention of SimMIM capture local patterns. Right: Likewise, all self-attention maps of MoCo collapse into homogeneity in the later layers. + +![](images/f7b27a38746bf54f92540391b3c2ca815b95f136dd50d7cbae60b838d52255ef.jpg) +Figure C.1: MIM and CL methods each have consistent properties. To show this, we visualize self-attention behaviors in terms of attention distance and normalized mutual information (MI). SimCLR\*, which was introduced in Chen et al. (2021), stands for MoCo with a momentum coefficient of 0. Left: The attention distance of CL methods (namely MoCo, SimCLR\*, and DINO) is higher than that of MIM methods (namely SimMIM, BEiT, and MAE). This suggests that CL methods consistently capture global patterns. Right: The normalized mutual information of MIM is higher than that of CL; i.e., the self- attentions of MIM are more correlated with query tokens than CL. +(b) Normalized MI + +Compared with CL, MIM has been rarely explored in vision tasks. Various methods, such as histograms of oriented gradients (Wei et al., 2022a) and tokenization (Bao et al., 2022), have been proposed as part of porting masked language models to the image domain with ViTs. Among them, SimMIM (Xie et al., 2022b) and MAE (He et al., 2022) are simple yet effective methods to reconstruct masked tokens without complicated pretext tasks. Because of its simplicity and superior performance in downstream operations, MIM is attracting attention as a promising technique in image processing. + +Nevertheless, we find hints suggesting that CL and MIM utilize different aspects of the data, making them complementary. For example, Zhou et al. (2022); Wang et al. (2021); Yu et al. (2022) achieve high predictive performance by harmonizing the image-level and the token-level self-supervised learning. Xie et al. (2022a) also observe that, unlike supervised pre-trained models or CL, self-attention in SimMIM focus locally; this is a consistent result with our findings. + +# C OUR INSIGHTS ARE GENERALIZABLE TO VARIOUS MODELS + +In the main text, we analyze ViT-B pre-trained using MoCo and SimMIM. We observe consistent characteristics across various sizes of ViTs that have been pre-trained using other self-supervised learning methods. To support this claim, we delve into the properties of self-attention. + +![](images/227f272cc4f03f82eeb6e01630535a451968a203c4b06945b12f38209dc45e73.jpg) +(a) Standard deviations + +![](images/6a47cec191d37145789159dcc512d224238fa7efc778426247a6f4d9dec3ca38.jpg) +(b) Distribution at $3^{\mathrm{rd}}$ layer +Figure E.1: The presence of an outlier head in MoCo raises the average of normalized mutual information. This observation explains how the normalized mutual information in a couple of MoCo's self-attention layers is similar to or even surpasses that in SimMIM. Left: We present the standard deviation of the normalized mutual information. As depicted in this figure, the standard deviation in SimMIM remains relatively consistent across different depths. In contrast, the standard deviation in MoCo's $3^{\mathrm{rd}}$ or $4^{\mathrm{th}}$ self-attention layer is notably higher than that in SimMIM. Right: Distribution of mutual information for the third self-attention layer head. The visualization of this kernel density estimation shows that MoCo has an outlier head with mutual information close to 1.0. The red rectangles $(\square)$ and blue triangles $(\triangle)$ refer to the mutual information of heads in MoCo and SimMIM, respectively. + +# D LOCALITY INDUCTIVE BIAS IMPROVES FINE-TUNING ACCURACY OF CL + +In Section 2, we demonstrate that the homogeneity of self-attention map, i.e., attention collapse of CL, helps ViT distinguish images but harms fine-tuning accuracy. As a result, we anticipate that incorporating a locality inductive bias into CL will improve fine-tuning accuracy but degrade linear probing accuracy. One simple method to inject locality into self-attention is to limit the receptive field of self-attention by using attention masks. + +Figure D.1 shows the predictive performance of MoCo with restricted local self-attention. As expected, the results are similar to the performance of MIM; As the kernel size decreases, the linear probing accuracy decreases but the fine-tuning accuracy increases. These results are consistent with our findings. + +# E A CLOSER LOOK AT + +# THE ROLE OF SELF-SUPERVISED VIT LAYERS + +![](images/6528fc13531a5561df61e7e99e0a8b810a3b28433ca7b19f481fa92ee89206a5.jpg) +Figure C.1 visualizes the self-attention behaviors of different self-supervised learning methods in terms of attention distance and normalized mutual information. As depicted in the figure, all CLs and MIMs exhibit consistent properties. Similarly, Figure C.2 demonstrates that various sizes of models also demonstrate consistent properties. +Figure D.1: Locality inductive bias harms linear probing but improves fine-tuning. We report the linear probing and fine-tuning accuracy of MoCo with restricted self-attention via attention masks. + +The main text provides the key characteristics of CL and MIM. This section delves deeper into the details not covered in the main text to provide a more comprehensive understanding of the subjects. + +The role of the early modules. Figures 3 and 4 suggest that most layers of MoCo capture global patterns and have only a weak correlation with query tokens. However, one or two of MoCo layers exhibit unusual behavior. For example, the $3^{\mathrm{rd}}$ layer of MoCo focuses on local areas and its self-attention map is dependent on the query. We explore this property in more detail. + +![](images/41906592df7f4da9c6950d3212fe997066b0f7d7667a573cc916e1c431aea0f0.jpg) +(a) MoCo + +![](images/ea0f156b766cf9413d5aa69695375a437bb4c9362f681e9a2fc87e055aafbff9.jpg) +(b) SimMIM +Figure E.2: The tokens of MoCo form a cluster for each image, while those of SimMIM are intermingled. This aligns with the finding that, compared to SimMIM, MoCo is linearly separable. To demonstrate this property, we visualize 3,528 tokens (196 tokens $\times$ 18 images) from the representations of the last layer via t-SNE, and find that a consistent pattern is observed even in the representations of the intermediate layers. The colors represent three different classes. See also Figures 6 and 7. + +Figure E.1a provides the variance of normalized mutual information with respect to heads. As the results show, the variance of SimMIM is consistent across all depths whereas that of MoCo is not. In particular, the $3^{\mathrm{rd}}$ layer of MoCo has high variance even though other layers do not. This suggests that, while most of MoCo's self-attention heads capture global patterns and have weak correlation with query tokens, some heads deviate from this behavior and exhibit a different pattern. + +Figure E.1b shows the distribution of normalized mutual information among heads in the $3^{\mathrm{rd}}$ layer to analyze this phenomenon. In this figure, we use kernel density estimation with Gaussian kernel to visualize the distribution. The results reveal several outlier heads in MoCo with mutual information close to 1.0. As a result, these outliers significantly raises the average value of normalized mutual information. + +A comprehensive view through visualization of tokens from multiple images. Figure 6 visualizes how self-attention layers transform tokens from one or two images in representation space. The figure demonstrates that MoCo transforms all tokens in union while SimMIM transforms them individually. As a result, MoCo separates the representations at the image-level and SimMIM separates them at the token-level. + +The t-SNE visualization (Van der Maaten & Hinton, 2008) in Figure E.2 provides consistent results and offers even a more comprehensive perspective. In this figure, we visualize the last representations of 3528 tokens from 18 images that belong to three different classes. The visualization demonstrates that MoCo separates the representations into distinct classes and even images, while maintaining the tokens close together in compact image clusters. On the other hand, SimMIM separates tokens from images, resulting in a wide representation space for each image, but the images or even classes may be challenging to linearly distinguish. + +The first layer of MoCo aggregates tokens into compact clusters. Figures 6 and 7 show that all modules, except the first module, in MoCo behave consistently. However, we observe that MoCo's first module behaves differently and unusually than the others. We elaborate the behaviour of the first layer of module. + +Figure E.3a shows the qualitative visualization of tokens for a sample image, similar to Figure 6. This visualization shows that the first MoCo layer aggregates tokens into compact clusters. Although this figure only uses a single image, the layer aggregates all images into a small representation space as well. In terms of singular values, we observe consistent results. Similar to Figure 7, Figures E.3b and E.3c report the second largest log singular value, instead of the relative log singular value, to investigate the absolute volume of the representations. As expected, most layers in both MoCo and SimMIM increase the singular value, but surprisingly, the first layer of MoCo reduces the singular value, meaning that the volumes of representations are decreased at both the token-level and image-level. Based on these observations, we conjecture that the first module of MoCo behaves like an embedding component. + +![](images/eb3d6f9af5ba9611bdfbe6c912a91ee24c14f2feb91040b2f6281e0bfe387850.jpg) +Figure E.3: The first layer of MoCo clumps tokens together. We demonstrate this property from two perspectives: qualitative visualization and singular value of token distribution. Left: Similar to Figure 6, we visualize tokens of a sample image in a representation space. The blue and red data points represent the tokens before and after the self-attention transformation. As shown in this figure, the first self-attention layer clumps tokens into a compact cluster. Middle and Right: Similar to Figure 7, we visualize the second largest log singular value (not $\Delta$ log singular value) for depth. The singular value spectra demonstrate consistent results; the first layer of MoCo (gray area) not only clumps tokens but also images into a compact cluster. + +![](images/d64ede6f1b47ef1ed7c25065f01279e789d17e7c6c742fae29b0a2132fa6dec2.jpg) + +![](images/0a8d7bc5980b846efd6f7189a450e1633c6199cb74793172b02bacd8d7c9461b.jpg) + +![](images/88933cf42fcccccde418631f6f1e1e9d14fe0d056bbd93b74833adfe7c531890.jpg) +Figure F.1: Self-attention and representations in fine-tuned models exhibit consistency with those of pre-trained models. Similar to Figures 4 and 9, we present the normalized mutual information and the Fourier analysis results of fine-tuned models. The abbreviation "ft" stands for "fine-tuned model." Left: Similar to pre-trained models, the mutual information of MoCo's self-attention maps is generally lower compared to that of SimMIM. However, it is noteworthy that the mutual information of the later self-attention maps in SimMIM decreases significantly. This is because the later layers of a model trained with supervision or fine-tuning tend to capture global information. Right: Similarly, SimMIM utilizes higher frequency information than MoCo. + +![](images/201e8fdeec51703e6d99d8f341cb390d5af57c3dd2a701c46eb2db4e81fa7963.jpg) + +# F FINE-TUNED MODELS INHERIT THE PROPERTIES OF PRE-TRAINED MODELS + +The main text focuses on highlighting the key properties of pre-trained models. This section demonstrates that these properties are also utilized by fine-tuned models. As a result, we can safely apply the insights gained from the main text to various situations. + +Consistent results in self-attention and Fourier analysis. Figures 3 and 4 demonstrate that MoCo captures global areas and that its self-attention are less related to the query tokens, compared with SimMIM. In addition, Figure 9 shows that MoCo captures low-frequency information as opposite to SimMIM. These results are consistent in the fine-tuning scheme. + +Figure F.1a reveals the self-attention behaviours of fine-tuned MoCo and SimMIM in terms of normalized mutual information. Similar to pre-trained models, the fine-tuned self-attention maps of + +![](images/ae0d609fe3927c5ee5f0360d0d1cc99ec1c299dc6fad161ea59db333747faa63.jpg) +(a) Stylized ImageNet + +![](images/6dd58ff76341c3cb4e49f52e1d68b72d5e52b09730b9b39e843e3415f0658cfe.jpg) +(b) Robustness for noise frequency +Figure F.2: Fine-tuned ViTs inherit the robustness against frequency-based noise. Similar to Figure 8b, we measure the decrease in the accuracy of ViTs fine-tuned with MoCo and SimMIM. Left: Even with fine-tuned ViTs, MoCo is relatively shape-biased and SimMIM relatively texture-biased. This bias is just less apparent than in linear probing models. Right: The robustness against frequency-based random noise also suggests the same: MoCo is robust against high-frequency noise, but SimMIM is not. In conclusion, fine-tuned models inherit the properties of linear probing models. + +MoCo have generally lower mutual information compared to those of SimMIM. The only significant difference is that the mutual information of the later self-attention maps in fine-tuned SimMIM decreases significantly, as later layers in models trained with supervision or fine-tuning tend to capture more global information. As a result, the gap between the two methods is reduced. This is also reflected in the consistent results of Fourier analysis as shown in Figure F.1b. In this analysis, SimMIM captures higher-frequency information compared to MoCo in fine-tuning scheme as well. However, the later layers of SimMIM attempt to capture low-frequency information. Therefore, the gap of fine-tuned models is smaller than that of pre-trained models. + +CL is shape-biased and MIM is texture-biased in fine-tuning scheme. In Figure 8, we demonstrate that linear probing model with CL (MoCo) is more shape-biased and that with MIM (SimMIM) is texture-biased, compared with each other. As in the experiment, we calculate the classification results of ImageNet fine-tuned MoCo and SimMIM on Stylized-ImageNet, and measure the decrease in accuracy against frequency-based random noise. As we would expected, Figure F.2 shows that the property also extends to the fine-tuned model. Even though we still observe the difference between MoCo and SimMIM, the performance gap between MoCo and SimMIM is quite reduced compared to the gap between the linear probing models. + +Later layers of CL and early layers of MIM are important in find-tuning phases. As shown in Figure 11, the later layers of the CL and the early layers of the MIM are linearly separable. This finding suggests that these layers are significant, however, it does not provide direct evidence that such properties are preserved during fine-tuning phases. We demonstrate that these layers play a crucial role in fine-tuning phases as well. + +To support this claim, we conduct a study to measure the accuracy drop of fine-tuned models using pretrained models with a few blocks initialized. As shown in Figure F.3a, the results indicate that the initializing a few early blocks in the pre-training models of SimMIM significantly harms the fine-tuning accuracy, compared to MoCo. These observations suggest that early layers of SimMIM play an important role in fine-tuning. Conversely, Figure F.3b shows that initializing later + +![](images/ca4a5bdcfd162bc5e03d71ed980045dda92682b4358c070ecb137d3bd1b4fb84.jpg) +(a) Early layer +(b) Later layer +Figure F.3: The later layers of CL and early layers of MIM play a key role in the fine-tuning scheme. To show this, we initialize a few blocks and measure the decrease in the fine-tuning accuracy of pretrained models. + +blocks in the pre-training models of SimMIM does not significantly harms the fine-tuning accuracy, suggesting that they are not important in fine-tuning compared with MoCo. + +One limitation of this experiment is the evaluation of the accuracy drop in a single run. Since the accuracy drop of MoCo is marginally higher than that of SimMIM at the first initialization depth in Figure F.3b, additional experiments may improve the results. In this experiment, we utilized the same fine-tuning settings for both MoCo and SimMIM; but experiments with fine-tuning settings tailored to each method may provide further insight. + +# G HYBRID MODELS OUTPERFORM CL AND MIM IN DOWNSTREAM TASKS + +The claim that CL and MIM are complementary is demonstrated only on ImageNet in Section 5. To validate this claim in tasks beyond ImageNet, we evaluated the pre-trained models of the hybrid method introduced in Section 5 for another classification task and a semantic segmentation task. In particular, we measured the accuracy on iNaturalist 2018 (Van Horn et al., 2018) and the mIoU on ADE20K (Zhou et al., 2019). As shown in Table G.1, the hybrid + +Table G.1: Hybrid models of CL and MIM outperform both CL and MIM in various tasks. + +
λ (IMPORTANCE OF CL)iNat-18ADE20k
0.0 (SimMIM)62.135.4
0.2 (SimMIM + MoCo)68.842.2
1.0 (MoCo)66.239.7
+ +model of SimMIM and MoCo outperforms both SimMIM and MoCo in various downstream tasks. Therefore, we conclude that the effectiveness of this claim extends beyond ImageNet. \ No newline at end of file diff --git a/2023/What Do Self-Supervised Vision Transformers Learn_/images.zip b/2023/What Do Self-Supervised Vision Transformers Learn_/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..84ab4efe550f2e3c1dd915a7744343eb6d8f761e --- /dev/null +++ b/2023/What Do Self-Supervised Vision Transformers Learn_/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e579e6f9427fede5955ce02145e31798435b5321e2546e108b3baa3c1eb75f1e +size 792426 diff --git a/2023/What Do Self-Supervised Vision Transformers Learn_/layout.json b/2023/What Do Self-Supervised Vision Transformers Learn_/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f6ffc97bba0643735b347d02416c7654b32eb256 --- /dev/null +++ b/2023/What Do Self-Supervised Vision Transformers Learn_/layout.json @@ -0,0 +1,11860 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 494, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 494, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 494, + 118 + ], + "type": "text", + "content": "WHAT DO SELF-SUPERVISED VISION TRANSFORMERS LEARN?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "spans": [ + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "text", + "content": "Namuk Park" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "text", + "content": " Wonjae Kim" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "text", + "content": " Byeongho Heo" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "text", + "content": " Taekyung Kim" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "text", + "content": " Sangdoo Yun" + }, + { + "bbox": [ + 110, + 133, + 465, + 146 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 146, + 312, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 146, + 312, + 157 + ], + "spans": [ + { + "bbox": [ + 112, + 146, + 312, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 146, + 312, + 157 + ], + "type": "text", + "content": "Prescient Design, Genentech " + }, + { + "bbox": [ + 112, + 146, + 312, + 157 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 112, + 146, + 312, + 157 + ], + "type": "text", + "content": "NAVER AI Lab" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 159, + 480, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 159, + 480, + 169 + ], + "spans": [ + { + "bbox": [ + 115, + 159, + 480, + 169 + ], + "type": "text", + "content": "park.namuk@gene.com {wonjae.kim,bh.heo,taekyung.k,sangdoo.yun}@navercorp.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 276, + 198, + 334, + 209 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 198, + 334, + 209 + ], + "spans": [ + { + "bbox": [ + 276, + 198, + 334, + 209 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 222, + 471, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 222, + 471, + 399 + ], + "spans": [ + { + "bbox": [ + 140, + 222, + 471, + 399 + ], + "type": "text", + "content": "We present a comparative study on how and why contrastive learning (CL) and masked image modeling (MIM) differ in their representations and in their performance of downstream tasks. In particular, we demonstrate that self-supervised Vision Transformers (ViTs) have the following properties: (1) CL trains self-attention to capture longer-range global patterns than MIM, such as the shape of an object, especially in the later layers of the ViT architecture. This CL property helps ViTs linearly separate images in their representation spaces. However, it also makes the self-attention collapse into homogeneity for all query tokens and heads. Such homogeneity of self-attention reduces the diversity of representations, worsening scalability and dense prediction performance. (2) CL utilizes the low-frequency signals of the representations, but MIM utilizes high-frequencies. Since low- and high-frequency information respectively represent shapes and textures, CL is more shape-oriented and MIM more texture-oriented. (3) CL plays a crucial role in the later layers, while MIM mainly focuses on the early layers. Upon these analyses, we find that CL and MIM can complement each other and observe that even the simplest harmonization can help leverage the advantages of both methods." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 419, + 206, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 419, + 206, + 431 + ], + "spans": [ + { + "bbox": [ + 106, + 419, + 206, + 431 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 443, + 506, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 443, + 506, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 506, + 500 + ], + "type": "text", + "content": "Contrastive Learning (CL) (He et al., 2020; Chen et al., 2020a;b; 2021) has been the most popular self-supervised learning methods until recently. It aims to learn the invariant semantics of two random views (Tian et al., 2020a;b) by making global projections of representations similar for positive samples and dissimilar for negative samples. Since CL exploits the globally projected representations to contrast each other, it can be deemed as an \"image-level\" self-supervised learning approach." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 504, + 506, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 506, + 594 + ], + "type": "text", + "content": "Deviating from CL, masked image modeling (MIM) (Bao et al., 2022; Xie et al., 2022b; He et al., 2022) has risen as a strong competitor of CL in the era of Vision Transformers (ViTs) (Dosovitskiy et al., 2021) with its impressive performances of downstream tasks. MIM trains ViTs by reconstructing the correct semantics of masked input patches. Unlike CL, it learns the semantics of patch tokens and this can be deemed as a \"token-level\" self-supervised learning approach. Since MIM outperforms CL in fine-tuning accuracy, it may appear prima facie as a more effective pre-training method than CL. However, a different trend is observed for linear probing accuracy with CL outperforming MIM (See Figure 1). For further exposition on CL and MIM, we refer the reader to Appendix B." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 597, + 507, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 507, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 507, + 676 + ], + "type": "text", + "content": "Then, which method—CL or MIM—should we use for the self-supervised learning of ViTs? Although both methods are widely used, little is known about what they learn. This paper sheds light on their nature by showing that ViTs trained through CL and MIM learn opposite knowledge. In particular, we raise questions to better understand self-supervised learning, and then find the answers that can potentially affect future improvements. The questions posed can be divided into the following properties of Vision Transformers: the behavior of self-attention, the transformation of the representations, and the position of lead role components. Our key questions and findings are elaborated below." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 681, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 506, + 715 + ], + "type": "text", + "content": "How do self-attention behaviors? (Section 2) We find that CL primarily captures global relationships, while MIM captures local relationships. This implies that the representations of CL contain more global patterns, such as object shapes, than those of MIM. On the one hand, this property helps" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 370, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 370, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 370, + 731 + ], + "type": "text", + "content": "*Most of this work was done while the author was at Naver AI Lab." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 79, + 242, + 195 + ], + "blocks": [ + { + "bbox": [ + 111, + 79, + 242, + 195 + ], + "lines": [ + { + "bbox": [ + 111, + 79, + 242, + 195 + ], + "spans": [ + { + "bbox": [ + 111, + 79, + 242, + 195 + ], + "type": "image", + "image_path": "fb19c61ab1e5f8dba96ca35eba9a9981210dcc46fda949229d28dd8d16bd1885.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 201, + 506, + 313 + ], + "lines": [ + { + "bbox": [ + 104, + 201, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 506, + 313 + ], + "type": "text", + "content": "Figure 1: CL outperforms MIM in linear probing and small model regimes. In contrast, MIM excels in fine-tuning, large model regimes, and dense prediction. Red squares (■) denote CL, and blue triangles (▲) denote MIM. By default, we report the performance of ViT-B trained or pretrained on ImageNet-1K. We use the results from original papers and He et al. (2022) for object detection. Regarding the scaling experiment, we report the results that we reproduced based on official configurations except with 100 epochs, marking them as " + }, + { + "bbox": [ + 104, + 201, + 506, + 313 + ], + "type": "inline_equation", + "content": "\\mathrm{MoCo}^{\\dagger}" + }, + { + "bbox": [ + 104, + 201, + 506, + 313 + ], + "type": "text", + "content": " and SimMIM†. Left: CL outperforms MIM in linear probing but underperforms in fine-tuning. Middle: CL outperforms MIM in small model regimes (ViT-Ti and ViT-S), and MIM shows superior scalability in large model regimes (ViT-L and ViT-H). Right: MIM outperforms CL in the dense prediction downstream tasks, such as object detection with Mask R-CNN (He et al., 2017) on COCO (Lin et al., 2014)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 244, + 79, + 372, + 194 + ], + "blocks": [ + { + "bbox": [ + 244, + 79, + 372, + 194 + ], + "lines": [ + { + "bbox": [ + 244, + 79, + 372, + 194 + ], + "spans": [ + { + "bbox": [ + 244, + 79, + 372, + 194 + ], + "type": "image", + "image_path": "8e526279a352ed99d5c45a57f0a7abf4002aaab19db924697413ba84fa35bdc7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 373, + 80, + 501, + 194 + ], + "blocks": [ + { + "bbox": [ + 373, + 80, + 501, + 194 + ], + "lines": [ + { + "bbox": [ + 373, + 80, + 501, + 194 + ], + "spans": [ + { + "bbox": [ + 373, + 80, + 501, + 194 + ], + "type": "image", + "image_path": "9174d5157db5599272dee2a00a1acf8c538e0e673d696d0f88c2d45b4285662c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 321, + 506, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 321, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 321, + 506, + 399 + ], + "type": "text", + "content": "CL recognize objects and distinguish images. On the other hand, however, it also suggests that CL struggles to preserve local information. In particular, we observe that self-attention of CL in the later layers for all query tokens and heads collapse into homogeneous attention maps. In such cases, most self-attention maps focus on object boundaries, meaning that they can capture object shapes but may lose interaction diversity between tokens. Consequently, CL and MIM each have advantages over different tasks: CL works well for linear probing and classification tasks with smaller models, whereas MIM outperforms CL in fine-tuning and dense prediction tasks with larger models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 404, + 504, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 404, + 504, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 504, + 504 + ], + "type": "text", + "content": "How are representations transformed? (Section 3) CL transforms representations mainly based on image-level information, and its self-attentions collect information on object shape over entire tokens. This process makes tokens similar rather than diversifying them. As a result, CL distinguishes images well but has difficulty distinguishing tokens. On the contrary, MIM preserves and amplifies token-level information. Thus, the self-attentions for each token are substantially different and prohibit each token from including redundant information. We observe the consistent property from our Fourier analysis: CL primarily utilizes the low-frequency signals, but MIM utilizes high-frequencies. This observation suggests that CL is shape-biased and MIM is texture-biased. In sum, self-supervised models trained with CL and MIM learn the representations in different levels of detail." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 509, + 506, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 506, + 576 + ], + "type": "text", + "content": "Which components play an important role? (Section 4) Analyses of the importance of each CL and MIM layer demonstrate that the later layers in CL and early layers in MIM play a key role. We interpret this as a consistent observation since early layers are usually known to capture low-level features—e.g., local patterns, high-frequency signals, and texture information—and later layers capture global patterns, low-frequency signals, and shape information (Dosovitskiy et al., 2021; Raghu et al., 2021; d'Ascoli et al., 2021; Graham et al., 2021; Dai et al., 2021; Park & Kim, 2022b)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 580, + 504, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 504, + 626 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 504, + 626 + ], + "type": "text", + "content": "From the above analyses and insights, we find that CL and MIM can complement each other and show in Section 5 that even the simplest implementation, such as a linear combination of CL and MIM objectives, can take advantage of both methods. Surprisingly, the hybrid models outperform those pre-trained with either CL or MIM both in terms of fine-tuning and linear probing accuracy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 640, + 324, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 640, + 324, + 653 + ], + "spans": [ + { + "bbox": [ + 105, + 640, + 324, + 653 + ], + "type": "text", + "content": "2 HOW DO SELF-ATTENTIONS BEHAVE?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "content": "We point out that CL and MIM may not be silver bullets for all tasks, as shown in Figure 1. CL generally outperforms MIM in linear probing, while MIM dominates CL in the fine-tuning scheme. However, when we dissect the size of the model, CL outperforms MIM after fine-tuning for small models (cf. (Wang et al., 2022)), while MIM performs better on large models. Also, MIM yields effective representations for dense prediction tasks, such as object detection, but CL falls short on those tasks. This section explains these phenomena by investigating the behavior of self-attention." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 79, + 203, + 167 + ], + "blocks": [ + { + "bbox": [ + 116, + 79, + 203, + 167 + ], + "lines": [ + { + "bbox": [ + 116, + 79, + 203, + 167 + ], + "spans": [ + { + "bbox": [ + 116, + 79, + 203, + 167 + ], + "type": "image", + "image_path": "bf35728974a9349d03d0e62edcedf24d8b55abf4d06e5b006a0e6d0de5b5a279.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 205, + 93, + 206, + 121 + ], + "lines": [ + { + "bbox": [ + 205, + 93, + 206, + 121 + ], + "spans": [ + { + "bbox": [ + 205, + 93, + 206, + 121 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 206, + 129, + 235, + 175 + ], + "blocks": [ + { + "bbox": [ + 206, + 129, + 235, + 175 + ], + "lines": [ + { + "bbox": [ + 206, + 129, + 235, + 175 + ], + "spans": [ + { + "bbox": [ + 206, + 129, + 235, + 175 + ], + "type": "image", + "image_path": "4b948ba1de2243db01e228d1c6dccedc84dd877ca9996c9111d42b7fefe4fc74.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 178, + 246, + 188 + ], + "lines": [ + { + "bbox": [ + 211, + 178, + 246, + 188 + ], + "spans": [ + { + "bbox": [ + 211, + 178, + 246, + 188 + ], + "type": "text", + "content": "Depth " + }, + { + "bbox": [ + 211, + 178, + 246, + 188 + ], + "type": "inline_equation", + "content": "= 1" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 253, + 79, + 282, + 125 + ], + "blocks": [ + { + "bbox": [ + 253, + 79, + 282, + 125 + ], + "lines": [ + { + "bbox": [ + 253, + 79, + 282, + 125 + ], + "spans": [ + { + "bbox": [ + 253, + 79, + 282, + 125 + ], + "type": "image", + "image_path": "0d2c4ae36b84acbb38247d9f530838052fed9d70228f569bc7c290b4870e56ea.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 253, + 129, + 282, + 175 + ], + "blocks": [ + { + "bbox": [ + 253, + 129, + 282, + 175 + ], + "lines": [ + { + "bbox": [ + 253, + 129, + 282, + 175 + ], + "spans": [ + { + "bbox": [ + 253, + 129, + 282, + 175 + ], + "type": "image", + "image_path": "2cd9f91e1dbd7f253962c304f777c5b901582ff44617837422fd4c29956eeb42.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 258, + 178, + 293, + 189 + ], + "lines": [ + { + "bbox": [ + 258, + 178, + 293, + 189 + ], + "spans": [ + { + "bbox": [ + 258, + 178, + 293, + 189 + ], + "type": "text", + "content": "Depth " + }, + { + "bbox": [ + 258, + 178, + 293, + 189 + ], + "type": "inline_equation", + "content": "= 4" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 258, + 195, + 293, + 205 + ], + "lines": [ + { + "bbox": [ + 258, + 195, + 293, + 205 + ], + "spans": [ + { + "bbox": [ + 258, + 195, + 293, + 205 + ], + "type": "text", + "content": "(a) MoCo" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 214, + 506, + 293 + ], + "lines": [ + { + "bbox": [ + 104, + 214, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 506, + 293 + ], + "type": "text", + "content": "Figure 2: Self-attention of CL (MoCo) capture global relationships, but they collapse into homogeneous attention maps for all query tokens and heads. Self-attention of MIM (SimMIM) mainly focus on local areas. We visualize the attention maps for two different query tokens in the beginning through the end layers. We omit the results for self-attention heads, which show mostly consistent results. Left: Self-attention of CL capture global patterns and the shape of an object. However, all attention maps capture the same shape information regardless of the query tokens. Right: Self-attention of MIM capture local patterns and are correlated with query tokens." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 299, + 79, + 345, + 125 + ], + "blocks": [ + { + "bbox": [ + 299, + 79, + 345, + 125 + ], + "lines": [ + { + "bbox": [ + 299, + 79, + 345, + 125 + ], + "spans": [ + { + "bbox": [ + 299, + 79, + 345, + 125 + ], + "type": "image", + "image_path": "cd87e3ce9fd95db6d96f4acab5aa416a349ecc5e66aa1a73ab836bb3008a4f80.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 301, + 129, + 345, + 175 + ], + "blocks": [ + { + "bbox": [ + 301, + 129, + 345, + 175 + ], + "lines": [ + { + "bbox": [ + 301, + 129, + 345, + 175 + ], + "spans": [ + { + "bbox": [ + 301, + 129, + 345, + 175 + ], + "type": "image", + "image_path": "4bf05f6f2707473d618fa5bbe36a08de93104499a912f0053ed63910785ff73b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 303, + 178, + 341, + 189 + ], + "lines": [ + { + "bbox": [ + 303, + 178, + 341, + 189 + ], + "spans": [ + { + "bbox": [ + 303, + 178, + 341, + 189 + ], + "type": "text", + "content": "Depth " + }, + { + "bbox": [ + 303, + 178, + 341, + 189 + ], + "type": "inline_equation", + "content": "= 11" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 355, + 79, + 400, + 125 + ], + "blocks": [ + { + "bbox": [ + 355, + 79, + 400, + 125 + ], + "lines": [ + { + "bbox": [ + 355, + 79, + 400, + 125 + ], + "spans": [ + { + "bbox": [ + 355, + 79, + 400, + 125 + ], + "type": "image", + "image_path": "774707bc6a8c17a823d098c5932768a2a907b5729e535c7e059ee849ac95d4cb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 355, + 129, + 400, + 175 + ], + "blocks": [ + { + "bbox": [ + 355, + 129, + 400, + 175 + ], + "lines": [ + { + "bbox": [ + 355, + 129, + 400, + 175 + ], + "spans": [ + { + "bbox": [ + 355, + 129, + 400, + 175 + ], + "type": "image", + "image_path": "5bb8c9061fd07a7d2a1da479182ba2962c50c66e1d277bc6e47217e25063792a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 178, + 395, + 189 + ], + "lines": [ + { + "bbox": [ + 361, + 178, + 395, + 189 + ], + "spans": [ + { + "bbox": [ + 361, + 178, + 395, + 189 + ], + "type": "text", + "content": "Depth " + }, + { + "bbox": [ + 361, + 178, + 395, + 189 + ], + "type": "inline_equation", + "content": "= 1" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 402, + 79, + 447, + 125 + ], + "blocks": [ + { + "bbox": [ + 402, + 79, + 447, + 125 + ], + "lines": [ + { + "bbox": [ + 402, + 79, + 447, + 125 + ], + "spans": [ + { + "bbox": [ + 402, + 79, + 447, + 125 + ], + "type": "image", + "image_path": "28aaca9c4abcebdc1b515966087fbb173a50f019faade9018ec7be7505fe36de.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 402, + 129, + 447, + 175 + ], + "blocks": [ + { + "bbox": [ + 402, + 129, + 447, + 175 + ], + "lines": [ + { + "bbox": [ + 402, + 129, + 447, + 175 + ], + "spans": [ + { + "bbox": [ + 402, + 129, + 447, + 175 + ], + "type": "image", + "image_path": "24c5506ade57cd25c09e8cddfb90324c84f474209403bf28aa3b97c5065e765b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 408, + 178, + 442, + 189 + ], + "lines": [ + { + "bbox": [ + 408, + 178, + 442, + 189 + ], + "spans": [ + { + "bbox": [ + 408, + 178, + 442, + 189 + ], + "type": "text", + "content": "Depth " + }, + { + "bbox": [ + 408, + 178, + 442, + 189 + ], + "type": "inline_equation", + "content": "= 4" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 402, + 195, + 447, + 205 + ], + "lines": [ + { + "bbox": [ + 402, + 195, + 447, + 205 + ], + "spans": [ + { + "bbox": [ + 402, + 195, + 447, + 205 + ], + "type": "text", + "content": "(b) SimMIM" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 449, + 79, + 494, + 125 + ], + "blocks": [ + { + "bbox": [ + 449, + 79, + 494, + 125 + ], + "lines": [ + { + "bbox": [ + 449, + 79, + 494, + 125 + ], + "spans": [ + { + "bbox": [ + 449, + 79, + 494, + 125 + ], + "type": "image", + "image_path": "f494cdd375851dcadd3b3dfee47b7994f0e0f66ae842cc5e45e1d7f69702eb20.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 449, + 129, + 494, + 175 + ], + "blocks": [ + { + "bbox": [ + 449, + 129, + 494, + 175 + ], + "lines": [ + { + "bbox": [ + 449, + 129, + 494, + 175 + ], + "spans": [ + { + "bbox": [ + 449, + 129, + 494, + 175 + ], + "type": "image", + "image_path": "6c43d5fa9ff4523b19d789b99a778143dcf43176ca7a3a76257659d61bf7da06.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 178, + 490, + 189 + ], + "lines": [ + { + "bbox": [ + 452, + 178, + 490, + 189 + ], + "spans": [ + { + "bbox": [ + 452, + 178, + 490, + 189 + ], + "type": "text", + "content": "Depth " + }, + { + "bbox": [ + 452, + 178, + 490, + 189 + ], + "type": "inline_equation", + "content": "= 11" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 307, + 504, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 504, + 353 + ], + "type": "text", + "content": "Our analyses mainly compare ViT-B/16 pre-trained on ImageNet-1K (Russakovsky et al., 2015) with MoCo v3 (Chen et al., 2021) and SimMIM (Xie et al., 2022b). We use the ImageNet validation images for our experiments. We observe that other methods, e.g., DINO (Caron et al., 2021), BEiT (Bao et al., 2022), and MAE (He et al., 2022), have consistent properties (See Figure C.1)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 365, + 357, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 357, + 432 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 357, + 432 + ], + "type": "text", + "content": "CL mainly captures global relationships. We measure the ranges of self-attention via attention distance (Dosovitskiy et al., 2021). Attention distance is defined as the average distance between the query tokens and key tokens considering their self-attention weights. Therefore, it conceptually corresponds to the size of the receptive fields in CNNs." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 437, + 357, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 357, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 357, + 537 + ], + "type": "text", + "content": "Figure 3 shows that the attention distance of CL (MoCo) is significantly higher than that of MIM (SimMIM), especially in the later layers. As seen in Figure 2, the qualitative visualization, this implies that the representations of CL contain global patterns and shape information, so CL can help ViTs distinguish between objects of images. Conversely, the self-attention of MIM mainly capture local relationships; i.e., MIM may have difficulty recognizing whole objects and their shapes. Section 3 also discuss this claim from a representational perspective." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 550, + 357, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 357, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 357, + 661 + ], + "type": "text", + "content": "Self-attention of CL collapse into homogeneity. We observe an interesting behavior of CL in Figure 2, which shows the attention maps for query tokens from two different spatial locations. The self-attention of CL surprisingly indicate almost identical object shapes for the two query tokens, compared to that of MIM. We describe this phenomenon as an attention collapse into homogeneity. This collapsing trend in the self-attention of CL is observed across all the heads and query tokens. In contrast, the self-attention of MIM are more faithful to the two query tokens, as expected." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "type": "text", + "content": "We use normalized mutual information (NMI) (Strehl & Ghosh, 2002) to measure the attention collapse. Let " + }, + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "type": "inline_equation", + "content": "p(q)" + }, + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "type": "text", + "content": " be a distribution of query tokens, and assume that these query tokens are uniformly distributed since a single query token is given for each spatial coordinate, i.e., " + }, + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "type": "inline_equation", + "content": "p(q) = 1 / N" + }, + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 665, + 357, + 721 + ], + "type": "text", + "content": " is the number" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "content": "of the tokens. Then the joint distribution of query and key tokens is " + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "inline_equation", + "content": "p(q, k) = \\pi(k|q)p(q)" + }, + { + "bbox": [ + 104, + 720, + 504, + 733 + ], + "type": "text", + "content": " where" + } + ] + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 368, + 369, + 501, + 483 + ], + "blocks": [ + { + "bbox": [ + 368, + 369, + 501, + 483 + ], + "lines": [ + { + "bbox": [ + 368, + 369, + 501, + 483 + ], + "spans": [ + { + "bbox": [ + 368, + 369, + 501, + 483 + ], + "type": "image", + "image_path": "99e3b5c3a038b687f23183f30eb71953eccbf1a6c211918b2aca6e3631a13e01.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 491, + 505, + 533 + ], + "lines": [ + { + "bbox": [ + 362, + 491, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 362, + 491, + 505, + 533 + ], + "type": "text", + "content": "Figure 3: Effective receptive fields of CL are global, but those of MIM are local. This is particularly evident in the later layers." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 366, + 555, + 499, + 670 + ], + "blocks": [ + { + "bbox": [ + 366, + 555, + 499, + 670 + ], + "lines": [ + { + "bbox": [ + 366, + 555, + 499, + 670 + ], + "spans": [ + { + "bbox": [ + 366, + 555, + 499, + 670 + ], + "type": "image", + "image_path": "a1cb58d7c38ec6d018ff2afe2609cbab97a982fa031907530f646285aa1bb55d.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 673, + 505, + 715 + ], + "lines": [ + { + "bbox": [ + 362, + 673, + 505, + 715 + ], + "spans": [ + { + "bbox": [ + 362, + 673, + 505, + 715 + ], + "type": "text", + "content": "Figure 4: Self-attentions of CL have little to do with query tokens. Normalized MI of CL is significantly lower than that of MIM in the later layers." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 81, + 239, + 190 + ], + "blocks": [ + { + "bbox": [ + 109, + 81, + 239, + 190 + ], + "lines": [ + { + "bbox": [ + 109, + 81, + 239, + 190 + ], + "spans": [ + { + "bbox": [ + 109, + 81, + 239, + 190 + ], + "type": "image", + "image_path": "82d220a43e6c076dc65feb2c5e9314ccdb2a6b51deec70dd041e72eb1855935d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 196, + 209, + 206 + ], + "lines": [ + { + "bbox": [ + 140, + 196, + 209, + 206 + ], + "spans": [ + { + "bbox": [ + 140, + 196, + 209, + 206 + ], + "type": "text", + "content": "(a) Between heads" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 240, + 81, + 370, + 190 + ], + "blocks": [ + { + "bbox": [ + 240, + 81, + 370, + 190 + ], + "lines": [ + { + "bbox": [ + 240, + 81, + 370, + 190 + ], + "spans": [ + { + "bbox": [ + 240, + 81, + 370, + 190 + ], + "type": "image", + "image_path": "e68f0d48a31427b1e469ae73c101ef9376549e8a96f084255d217431e321cd11.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 269, + 196, + 341, + 207 + ], + "lines": [ + { + "bbox": [ + 269, + 196, + 341, + 207 + ], + "spans": [ + { + "bbox": [ + 269, + 196, + 341, + 207 + ], + "type": "text", + "content": "(b) Between depths" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 215, + 506, + 295 + ], + "lines": [ + { + "bbox": [ + 104, + 215, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 215, + 506, + 295 + ], + "type": "text", + "content": "Figure 5: CL lacks representational diversity in the later layers. We measure cosine similarities of representations in the self-attentions between the heads (left), depths (middle), and spatial coordinates (right). All of the results show that the representational similarity of later self-attentions of CL is higher than that of MIM. Increasing heads or depths of CL is not effective in improving the diversity. Left: The similarity of representations from two heads in self-attention. Middle: The similarity between representations before and after self-attention transform them. Right: The similarities of representations at two spatial coordinates. ViT-" + }, + { + "bbox": [ + 104, + 215, + 506, + 295 + ], + "type": "inline_equation", + "content": "\\{\\mathrm{S}, \\mathrm{L}\\}" + }, + { + "bbox": [ + 104, + 215, + 506, + 295 + ], + "type": "text", + "content": " is trained with 100 epochs." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 372, + 80, + 502, + 190 + ], + "blocks": [ + { + "bbox": [ + 372, + 80, + 502, + 190 + ], + "lines": [ + { + "bbox": [ + 372, + 80, + 502, + 190 + ], + "spans": [ + { + "bbox": [ + 372, + 80, + 502, + 190 + ], + "type": "image", + "image_path": "9ee5281e4a88280f4a955c6205a315b8252a5d398e156f8772953329c885ac96.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 401, + 196, + 471, + 206 + ], + "lines": [ + { + "bbox": [ + 401, + 196, + 471, + 206 + ], + "spans": [ + { + "bbox": [ + 401, + 196, + 471, + 206 + ], + "type": "text", + "content": "(c) Between tokens" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\pi (k|q)" + }, + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "text", + "content": " is the softmax-normalized self-attention matrix. Thus, the normalized mutual information is " + }, + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\frac{I(q,k)}{\\sqrt{H(q)H(k)}}" + }, + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "inline_equation", + "content": "I(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "text", + "content": " is the mutual information and " + }, + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "inline_equation", + "content": "H(\\cdot)" + }, + { + "bbox": [ + 104, + 301, + 504, + 366 + ], + "type": "text", + "content": " is the marginal entropy. Low mutual information values show that attention maps are less dependent on the query tokens, implying an attention collapse into homogeneity. Conversely, high mutual information means that the attention maps strongly depend on the query tokens." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 371, + 506, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 506, + 406 + ], + "type": "text", + "content": "Figure 4 shows the degree of attention collapse in terms of the normalized mutual information (NMI). Results show that the mutual information of CL is significantly lower than that of MIM in the later layers, suggesting that the self-attention of CL tend to collapse into homogeneous distributions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 416, + 506, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 416, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 416, + 506, + 472 + ], + "type": "text", + "content": "Attention collapse reduces representational diversity. We conjecture that the self-attention collapse into homogeneity eventually leads to homogeneous token representations. To support this argument, we measure representational cosine similarities. In particular, we design three similarities: between different self-attention heads (heads), between the before and after self-attention layers (depths), and between different tokens (tokens)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 477, + 506, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 477, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 477, + 506, + 545 + ], + "type": "text", + "content": "Figure 5 shows the results, reporting the representation similarities for heads, depths, and tokens. As expected, the similarities of CL are notably higher than those of MIM in the later layers, indicating that the representations of CL have significant homogeneity. Even increasing the model size does not solve the problem CL has and may rather worsen it. Increasing the number of heads (ViT-S to ViT-B; Figure 5a) improves the representational diversity of MIM, but hardly improves the diversity of CL. Increasing the depth of CL (ViT-B to ViT-L; Figure 5b) only adds redundant modules." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 555, + 504, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 601 + ], + "type": "text", + "content": "Implications of the behaviors we observed. In conclusion, the self-attention of CL captures global patterns and shapes of objects. However, CL suffers from the problem of attention collapse into homogeneity, which reduces the diversity of token representations. On the other hand, MIM primarily captures local patterns and thus does not suffer from the attention collapse problem." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 605, + 448, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 448, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 448, + 617 + ], + "type": "text", + "content": "The behaviors mentioned above can explain the phenomena we observed in Figure 1:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 110, + 625, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 110, + 625, + 506, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 625, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 110, + 625, + 506, + 659 + ], + "type": "text", + "content": "- CL outperforms MIM in linear probing tasks because it captures shapes, which helps recognize objects and distinguish images. Although MIM preserves the texture and diversity of representations, their correlation with objects or content may not be as strong as shapes do." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 110, + 662, + 506, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 662, + 506, + 707 + ], + "spans": [ + { + "bbox": [ + 110, + 662, + 506, + 707 + ], + "type": "text", + "content": "- The attention collapse prohibits CL from fully exploiting heads, depths, and tokens of ViTs. Since homogeneous representations are not very helpful in improving token representations, ViTs trained with CL waste a large part of network capability. Therefore, the fine-tuning accuracy of MIM is significantly higher than CL in large models." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 110, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 110, + 709, + 504, + 733 + ], + "type": "text", + "content": "- CL is not suitable for dense prediction since the token features are homogeneous with respect to their spatial coordinates." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 131, + 106, + 223, + 175 + ], + "blocks": [ + { + "bbox": [ + 131, + 106, + 223, + 175 + ], + "lines": [ + { + "bbox": [ + 131, + 106, + 223, + 175 + ], + "spans": [ + { + "bbox": [ + 131, + 106, + 223, + 175 + ], + "type": "image", + "image_path": "49c4ce65597eb7dcdb83ed994f7627dba34a0d903b0043650206813c7fc112b3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 195, + 215, + 206 + ], + "lines": [ + { + "bbox": [ + 132, + 195, + 215, + 206 + ], + "spans": [ + { + "bbox": [ + 132, + 195, + 215, + 206 + ], + "type": "text", + "content": "(a) MoCo (one image)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 258, + 82, + 353, + 187 + ], + "blocks": [ + { + "bbox": [ + 258, + 82, + 353, + 187 + ], + "lines": [ + { + "bbox": [ + 258, + 82, + 353, + 187 + ], + "spans": [ + { + "bbox": [ + 258, + 82, + 353, + 187 + ], + "type": "image", + "image_path": "a8c0fbaadb19d29e242269eabe79f452b5fc0aa94e82419a7dad9f4624fe83e7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 261, + 195, + 349, + 206 + ], + "lines": [ + { + "bbox": [ + 261, + 195, + 349, + 206 + ], + "spans": [ + { + "bbox": [ + 261, + 195, + 349, + 206 + ], + "type": "text", + "content": "(b) MoCo (two images)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "lines": [ + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "text", + "content": "Figure 6: Self-attention layers of CL and MIM transform representations differently. We visualize 196 spatial representation tokens for an example validation image in a representation space. The blue " + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "inline_equation", + "content": "(\\bullet)" + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "text", + "content": " and red " + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "inline_equation", + "content": "(\\bullet)" + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "text", + "content": " data points denote the tokens before and after the self-attention transformation. Left: The self- attentions of CL (e.g., MoCo) translate all the tokens equally, so the distances between the tokens of an image do not increase. Middle: However, CL moves the \"centers of representations (represented by " + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "text", + "content": ")\" away from each other. Therefore, the images are linearly separable. The circle " + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "inline_equation", + "content": "(\\bullet)" + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "text", + "content": " and triangle " + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "inline_equation", + "content": "(\\triangle)" + }, + { + "bbox": [ + 104, + 215, + 506, + 316 + ], + "type": "text", + "content": " data represent tokens from different images. Right: The self- attentions of MIM (e.g., SimMIM) transform representations differently according to query tokens, thus increasing the distances between tokens. See Figure 7 for quantitative analyses." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 384, + 89, + 487, + 175 + ], + "blocks": [ + { + "bbox": [ + 384, + 89, + 487, + 175 + ], + "lines": [ + { + "bbox": [ + 384, + 89, + 487, + 175 + ], + "spans": [ + { + "bbox": [ + 384, + 89, + 487, + 175 + ], + "type": "image", + "image_path": "d79a51535df574efcc7e45e8fb443c5da38de43e28c348c07224b35da91d8830.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 390, + 195, + 483, + 206 + ], + "lines": [ + { + "bbox": [ + 390, + 195, + 483, + 206 + ], + "spans": [ + { + "bbox": [ + 390, + 195, + 483, + 206 + ], + "type": "text", + "content": "(c) SimMIM (one image)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 331, + 506, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 506, + 365 + ], + "type": "text", + "content": "We further investigate the self-attention's behavior with restricted receptive fields in Figure D.1. As shown in the experiment, locally restricted self-attentions lead to lower linear probing but higher fine-tuning accuracy, which is consistent with our observations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 381, + 367, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 381, + 367, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 381, + 367, + 395 + ], + "type": "text", + "content": "3 HOW ARE REPRESENTATIONS TRANSFORMED?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 407, + 506, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 506, + 442 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 506, + 442 + ], + "type": "text", + "content": "In this section, we analyze the token representations of ViTs pre-trained with CL and MIM to demonstrate how the properties of self-attentions we observed in Section 2 affect the representations differently. We use the same pre-trained ViT-B/16 models by default default just as we did in Section 2." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 455, + 504, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 455, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 504, + 522 + ], + "type": "text", + "content": "CL transforms all tokens in unison, while MIM does so individually. To show how CL and MIM transform token representations, we visualize them in representation space. Figure 6 shows 196 (14×14 patches) tokens before and after self-attention modules from a single image sample of the ImageNet validation set. We use the three large singular vectors obtained via singular value decomposition (SVD) as the bases of the space. To better visualize this, we display the representation of MoCo and SimMIM in their crucial layers—the last layer and the first layer, respectively." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 527, + 506, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 527, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 506, + 594 + ], + "type": "text", + "content": "Figure 6a visualizes the changes that occur in the tokens of CL when transformed by self-attention module; it indicates that the self-attention of CL translate all tokens in unison. This phenomenon occurs because the self-attention maps of CL are homogeneous, i.e., self-attention is almost independent of the spatial coordinates and query tokens. Therefore, the modules add near-constant to all the token representations. As a result, the inter-representation distance and the volume of representations do not increase, which implies that CL cares less about individual tokens." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 599, + 506, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 506, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 506, + 643 + ], + "type": "text", + "content": "Nevertheless, self-attention are essential for the discriminative power of CL. As shown in Figure 6b, they help distinguish images by moving \"the centers of the representation distribution\" away from each other. In short, this figure suggests that CL makes the image linearly separable even though it loses the ability to distinguish tokens." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 649, + 505, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 649, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 649, + 505, + 682 + ], + "type": "text", + "content": "In contrast, MIM applies a different transformation to individual tokens, as shown in Figure 6c, because different self-attention are assigned to the individual spatial tokens. Thus, MIM alters the distance between tokens of a single image as well as the volume of the representation distribution." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 688, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 505, + 733 + ], + "type": "text", + "content": "We find consistent results in quantitative analysis. Inspired by Jing et al. (2022), Figure 7 visualizes singular value spectra for tokens and images. A singular value spectrum provides singular values of a representation distribution obtained by SVD, so we can use it to represent the effective volume of distributions in a representation space. The higher the singular value in a spectrum, the larger the" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 143, + 79, + 233, + 177 + ], + "blocks": [ + { + "bbox": [ + 143, + 79, + 233, + 177 + ], + "lines": [ + { + "bbox": [ + 143, + 79, + 233, + 177 + ], + "spans": [ + { + "bbox": [ + 143, + 79, + 233, + 177 + ], + "type": "image", + "image_path": "9ef3380b4df0c37dc0622f3ca6b6662e88772a9772af3c5a415479d047cf2c99.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 177, + 182, + 434, + 194 + ], + "lines": [ + { + "bbox": [ + 177, + 182, + 434, + 194 + ], + "spans": [ + { + "bbox": [ + 177, + 182, + 434, + 194 + ], + "type": "text", + "content": "(a) Singluar value spectrum of tokens from a single image (token-level)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 236, + 79, + 345, + 177 + ], + "blocks": [ + { + "bbox": [ + 236, + 79, + 345, + 177 + ], + "lines": [ + { + "bbox": [ + 236, + 79, + 345, + 177 + ], + "spans": [ + { + "bbox": [ + 236, + 79, + 345, + 177 + ], + "type": "image", + "image_path": "de245a5bf2e224909a2ff0fd09edb5e8e951208b716152fbbd1bda67243a3df2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 358, + 82, + 470, + 178 + ], + "blocks": [ + { + "bbox": [ + 358, + 82, + 470, + 178 + ], + "lines": [ + { + "bbox": [ + 358, + 82, + 470, + 178 + ], + "spans": [ + { + "bbox": [ + 358, + 82, + 470, + 178 + ], + "type": "image", + "image_path": "b7090f8183eca964795b233964f9b8ff98297632d955f865063ff44f1d947265.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 140, + 201, + 232, + 297 + ], + "blocks": [ + { + "bbox": [ + 140, + 201, + 232, + 297 + ], + "lines": [ + { + "bbox": [ + 140, + 201, + 232, + 297 + ], + "spans": [ + { + "bbox": [ + 140, + 201, + 232, + 297 + ], + "type": "image", + "image_path": "10818ff7ef3740f028faa7a71f35ab740d750a4e968475054bd344e0af097616.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 302, + 399, + 314 + ], + "lines": [ + { + "bbox": [ + 211, + 302, + 399, + 314 + ], + "spans": [ + { + "bbox": [ + 211, + 302, + 399, + 314 + ], + "type": "text", + "content": "(b) Singluar value spectrum of images (image-level)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 235, + 200, + 347, + 297 + ], + "blocks": [ + { + "bbox": [ + 235, + 200, + 347, + 297 + ], + "lines": [ + { + "bbox": [ + 235, + 200, + 347, + 297 + ], + "spans": [ + { + "bbox": [ + 235, + 200, + 347, + 297 + ], + "type": "image", + "image_path": "734960add7561f85bce6dfd04d6919f304218f71ac54b926079a4fe51dc8b634.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 322, + 506, + 411 + ], + "lines": [ + { + "bbox": [ + 104, + 322, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 506, + 411 + ], + "type": "text", + "content": "Figure 7: CL barely changes or even decreases the distribution volume of tokens from a single image, implying that it hardly distinguishes between token. Instead, it significantly increases the distribution volume of images. To demonstrate these properties, we visualize singular value spectra, the singular values of the distribution of representations sorted by the magnitude. The higher a singular value, the larger the volume of a distribution. The right of this figure shows the " + }, + { + "bbox": [ + 104, + 322, + 506, + 411 + ], + "type": "inline_equation", + "content": "64^{\\text{th}}" + }, + { + "bbox": [ + 104, + 322, + 506, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 322, + 506, + 411 + ], + "type": "inline_equation", + "content": "128^{\\text{th}}" + }, + { + "bbox": [ + 104, + 322, + 506, + 411 + ], + "type": "text", + "content": " highest singular value for depth. Top: Singular value spectra of tokens from a single image. CL decreases the singular values of the tokens, but MIM increases. Bottom: Singular value spectra of images. CL significantly increases the volumes occupied by images, but MIM hardly does so." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 359, + 202, + 472, + 297 + ], + "blocks": [ + { + "bbox": [ + 359, + 202, + 472, + 297 + ], + "lines": [ + { + "bbox": [ + 359, + 202, + 472, + 297 + ], + "spans": [ + { + "bbox": [ + 359, + 202, + 472, + 297 + ], + "type": "image", + "image_path": "4a648590f389717f54d3b3c3dfc28108812c7ddc6e8871eb4efd288d01028240.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "content": "volume of a representation distribution. To calibrate the scale, we use the relative log singular value " + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "inline_equation", + "content": "(\\Delta \\log \\text{ singular value})" + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "content": ", the difference with the (second) largest singular value for a depth." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 450, + 506, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 506, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 506, + 538 + ], + "type": "text", + "content": "Figure 7a shows singular value spectra of tokens from a single image. We calculate them for each image in the ImageNet validation set and report averaged singular values over the dataset. In this figure, the CL layers hardly increase or even decrease the singular value; consistent with the explanation above, this implies that CL hardly distinguishes tokens. In contrast, MIM increases the singular value, meaning that it changes the volume of tokens and can distinguish tokens. Another interesting observation is that a few later layers of MIM decrease the volume, even though they capture local patterns as shown in Figures 3 and 4. This is because they behave like decoders. Section 4 discusses this in detail." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 544, + 506, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 590 + ], + "type": "text", + "content": "Figure 7b shows the singular value spectra of images. We average all tokens in an image to build an image-level representation vector and conduct a singular value spectrum over the collection of representations in the validation set. As opposed to the previous case, the representational volume of CL is larger than that of MIM, which implies that CL makes the image-level representation separable." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 602, + 506, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 602, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 506, + 681 + ], + "type": "text", + "content": "CL exploits low-frequencies, and MIM exploits high-frequencies. We hypothesize that CL captures low-frequency and MIM captures high-frequency information in spatial dimensions since CL provides image-level self-supervision to capture global patterns, while MIM provides token-level self-supervision to exploit local patterns. To support this argument from a frequency perspective, we conduct a Fourier analysis of the representations as following Park & Kim (2022b). In particular, we report the relative log amplitude of Fourier-transformed representations by calculating the amplitude difference between the highest and lowest frequencies of representations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 685, + 506, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 506, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 506, + 730 + ], + "type": "text", + "content": "Figure 9 visualizes the relative amplitudes of CL and MIM. It shows that the high-frequency amplitude of CL is significantly smaller than that of MIM, suggesting that CL mainly utilizes low-frequency spatial information such as global structures and shapes. On the contrary, MIM usually uses high-frequency spatial information such as narrow structures and fine textures." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 149, + 82, + 312, + 219 + ], + "blocks": [ + { + "bbox": [ + 149, + 82, + 312, + 219 + ], + "lines": [ + { + "bbox": [ + 149, + 82, + 312, + 219 + ], + "spans": [ + { + "bbox": [ + 149, + 82, + 312, + 219 + ], + "type": "image", + "image_path": "aa4af24e63032d5d5a609582042f59d6bf99b8848371ea63d40be6ce655be4e0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 224, + 269, + 236 + ], + "lines": [ + { + "bbox": [ + 187, + 224, + 269, + 236 + ], + "spans": [ + { + "bbox": [ + 187, + 224, + 269, + 236 + ], + "type": "text", + "content": "(a) Stylized ImageNet" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 331, + 103, + 465, + 220 + ], + "blocks": [ + { + "bbox": [ + 331, + 103, + 465, + 220 + ], + "lines": [ + { + "bbox": [ + 331, + 103, + 465, + 220 + ], + "spans": [ + { + "bbox": [ + 331, + 103, + 465, + 220 + ], + "type": "image", + "image_path": "17b03a344d686cfb28d91b880356506d67cd18e5b01a0c0eb4f7cc9058d21eb7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 334, + 224, + 462, + 236 + ], + "lines": [ + { + "bbox": [ + 334, + 224, + 462, + 236 + ], + "spans": [ + { + "bbox": [ + 334, + 224, + 462, + 236 + ], + "type": "text", + "content": "(b) Robustness for noise frequency" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 359, + 345, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 345, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 345, + 437 + ], + "type": "text", + "content": "Another interesting finding is that the last few layers of MIM reduce the high frequencies even though they only focus on local areas (See Figure 3). We conjecture that MIM implicitly divides ViTs into the encoder-decoder structure and allows intermediate layers to have linearly separable information. In contrast, CL allows the last layer to have such information. This is further elaborated in Figure 11." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 450, + 346, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 346, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 346, + 583 + ], + "type": "text", + "content": "CL is shape-biased, but MIM is texture-biased. Based on the results of the Fourier analysis, we assume that CL and MIM each have a bias toward shapes and textures, respectively. To demonstrate this claim, we use Stylized ImageNet (Geirhos et al., 2019), a texture-altered dataset, by using AdaIN (Huang & Belongie, 2017). Figure 8a reports the linear probing results on Stylized ImageNet to evaluate the shape and texture biases of pre-trained models. Compared to the model pre-trained with supervised learning, CL depends more on the shape and MIM depends on texture of images to classify images. In other words, CL is robust to texture changes, and MIM is vulnerable to them." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 358, + 363, + 500, + 482 + ], + "blocks": [ + { + "bbox": [ + 104, + 243, + 506, + 344 + ], + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 344 + ], + "type": "text", + "content": "Figure 8: CL is biased toward shape, whereas MIM is biased toward texture. We report the predictive results of models for linear probing tasks. However, we observe consistent results in fine-tuned models (See Figure F.2). Left: Result of classification on Stylized ImageNet. It shows that CL is more shape-biased than MIM and even than the supervised pre-trained model. Vertical lines represent averaged results for the shape categories. We also report the results of supervised ViT with ImageNet-1K class labels for comparison. Right: Accuracy drops on images with frequency-based random noises. MIM shows a more significant amount of accuracy drop than CL with high-frequency noises, demonstrating MIM's texture-biased property. The frequency window size of the frequency-based noise is " + }, + { + "bbox": [ + 104, + 243, + 506, + 344 + ], + "type": "inline_equation", + "content": "0.1\\pi" + }, + { + "bbox": [ + 104, + 243, + 506, + 344 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 358, + 363, + 500, + 482 + ], + "lines": [ + { + "bbox": [ + 358, + 363, + 500, + 482 + ], + "spans": [ + { + "bbox": [ + 358, + 363, + 500, + 482 + ], + "type": "image", + "image_path": "d333916ae8c845554b3935f597f899fe57ac1a7ba7f3e5412aca22a2cfff7673.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 491, + 506, + 579 + ], + "lines": [ + { + "bbox": [ + 350, + 491, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 350, + 491, + 506, + 579 + ], + "type": "text", + "content": "Figure 9: CL exploits low-frequency, but MIM exploits high-frequency. Moreover, a few last layers of CL reduce high-frequency by capturing global patterns. MIM also reduces it even though they capture local patterns, because the later layers behave like decoders. See also Figure 11." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 588, + 504, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 588, + 504, + 656 + ], + "spans": [ + { + "bbox": [ + 104, + 588, + 504, + 656 + ], + "type": "text", + "content": "Figure 8b shows the consistent results. In this experiment, we follow Park & Kim (2022a;b) and measure the decrease in accuracy on the ImageNet dataset with frequency-based random noise. The results suggest that CL is robust to high-frequency noises, but MIM is significantly more vulnerable to them. Since high-frequency noises harm the fine details of images, we arrive at the same conclusion that CL is more shape-biased and MIM is texture-biased. This can explain the robustness of CL against adversarial perturbations (Bordes et al., 2022)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 672, + 389, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 672, + 389, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 672, + 389, + 685 + ], + "type": "text", + "content": "4 WHICH COMPONENTS PLAY AN IMPORTANT ROLE?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "The previous sections consistently show through various perspectives that CL exploits image-level global patterns while MIM captures token-level local patterns. This section analyzes pre-trained ViTs from an architectural perspective and shows that the key components in CL and MIM are different." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 115, + 79, + 298, + 179 + ], + "blocks": [ + { + "bbox": [ + 115, + 79, + 298, + 179 + ], + "lines": [ + { + "bbox": [ + 115, + 79, + 298, + 179 + ], + "spans": [ + { + "bbox": [ + 115, + 79, + 298, + 179 + ], + "type": "image", + "image_path": "28b264e1dfb9a221edefed3623532e8a409dcf5bd2f5e7438f11ae454028eb23.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 185, + 238, + 196 + ], + "lines": [ + { + "bbox": [ + 175, + 185, + 238, + 196 + ], + "spans": [ + { + "bbox": [ + 175, + 185, + 238, + 196 + ], + "type": "text", + "content": "(a) Self-attention" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 79, + 497, + 180 + ], + "blocks": [ + { + "bbox": [ + 309, + 79, + 497, + 180 + ], + "lines": [ + { + "bbox": [ + 309, + 79, + 497, + 180 + ], + "spans": [ + { + "bbox": [ + 309, + 79, + 497, + 180 + ], + "type": "image", + "image_path": "f1d6145da6e351ee872975f01fa01b41af6cd669653784b00caefb8832f9a64a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 366, + 185, + 440, + 196 + ], + "lines": [ + { + "bbox": [ + 366, + 185, + 440, + 196 + ], + "spans": [ + { + "bbox": [ + 366, + 185, + 440, + 196 + ], + "type": "text", + "content": "(b) Fourier analysis" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 204, + 506, + 282 + ], + "lines": [ + { + "bbox": [ + 104, + 204, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 506, + 282 + ], + "type": "text", + "content": "Figure 10: The explicit decoder architecture of MAE helps ViTs effectively leverage the advantages of MIM. We analyze the encoder and decoder of a pre-trained model with a masking ratio of zero. The left side of each figure represents the encoder and the right side the decoder. Left: The mutual information of MAE is lower than that of SimMIM in the encoder but higher in the decoder. Right: The decoder of MAE captures low-frequency information, and its encoder captures high-frequency information. Moreover, the later layers (excluding the last layer) of MAE do not reduce high-frequency information, while those of SimMIM do." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 293, + 506, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 339 + ], + "type": "text", + "content": "Later layers of CL and early layers of MIM are important. According to studies on ViT (Graham et al., 2021; Dai et al., 2021; Park & Kim, 2022b), the later layers use high-level information, and the early layers exploit low-level information. Since CL and MIM each exploit global and local patterns, we expect that the later layers of CL and early layers of MIM play a key role." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 343, + 346, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 343, + 346, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 343, + 346, + 574 + ], + "type": "text", + "content": "To evaluate the importance of each layer, we measure the linear probing accuracy using intermediate representations with the configuration of Table A.1. In Figure 11, we observe the following properties: First, the linear probing accuracy of MIM is higher than that of CL at the beginning. Conversely, CL outperforms MIM at the end of the model. Such result indicates that the later layers of CL and early layers of MIM play an important role in making linearly separable representations. Second, the accuracy of CL increases with increasing depth as expected, but the accuracy of MIM surprisingly decreases at the end of the model, i.e., the later layers of MIM are not very helpful in separating representations. We explain this observation as a phenomenon in which MIM methods with shallow prediction heads, e.g., SimMIM, use later layers of the backbone as a decoder. Therefore, MIM with a deep self-attention decoder, e.g., MAE (He et al., 2022), can be useful for linear probing performance. Moreover, it also explains why SimMIM's high-frequency component and representational volumes drop in the later layers as shown in Figures 7 and 9. Third, even the highest linear probing accuracy of MIM is lower than that of CL." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 358, + 348, + 500, + 473 + ], + "blocks": [ + { + "bbox": [ + 358, + 348, + 500, + 473 + ], + "lines": [ + { + "bbox": [ + 358, + 348, + 500, + 473 + ], + "spans": [ + { + "bbox": [ + 358, + 348, + 500, + 473 + ], + "type": "image", + "image_path": "0f82e99a78a6a5cbc0ac240670d931fd06893c5a8e088b52c6309caecaf9d3ad.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 480, + 506, + 559 + ], + "lines": [ + { + "bbox": [ + 350, + 480, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 350, + 480, + 506, + 559 + ], + "type": "text", + "content": "Figure 11: Later layers of CL and early layers of MIM play a key role. We report linear probing accuracies by using the representations of the intermediate layers. CL outperforms MIM in later layers, and MIM outperforms CL in early layers." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 587, + 506, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 587, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 587, + 506, + 634 + ], + "type": "text", + "content": "The explicit decoder helps ViTs further leverage the advantages of MIM. Several previous observations find that the implicit decoder of MIM with a shallow prediction head, such as SimMIM, can impair performance. MAE (He et al., 2022) addresses this problem by introducing deep explicit ViT decoders and reconstructing masked tokens only in the separate decoders." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 638, + 505, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 505, + 682 + ], + "type": "text", + "content": "In Figure 10, we analyze MAE to understand the properties of decoders more deeply. Figure 10a shows the self-attention behaviors. The results indicate that the mutual information of MAE is lower than that of SimMIM in the later layers of the encoder but higher in the decoder, implying that the decoder reconstructs masked tokens based on its neighborhood tokens." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Figure 10b shows the results of the Fourier analysis. As explained in Figure 9, the last four layers of SimMIM reduce the high-frequency components. In contrast, the later layers (excluding the last layer) of MAE do not reduce them. Instead, the decoder of MAE prioritizes low-frequency information compared with the encoder, allowing the backbone to efficiently utilize high-frequency information." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 114, + 79, + 242, + 190 + ], + "blocks": [ + { + "bbox": [ + 114, + 79, + 242, + 190 + ], + "lines": [ + { + "bbox": [ + 114, + 79, + 242, + 190 + ], + "spans": [ + { + "bbox": [ + 114, + 79, + 242, + 190 + ], + "type": "image", + "image_path": "b7e58b0e0c9a6b67dbc0e7313b85167f8d8dca7c28ae3567fa1fd9acc4b1186b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 196, + 206, + 206 + ], + "lines": [ + { + "bbox": [ + 146, + 196, + 206, + 206 + ], + "spans": [ + { + "bbox": [ + 146, + 196, + 206, + 206 + ], + "type": "text", + "content": "(a) Performance" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 79, + 367, + 190 + ], + "blocks": [ + { + "bbox": [ + 242, + 79, + 367, + 190 + ], + "lines": [ + { + "bbox": [ + 242, + 79, + 367, + 190 + ], + "spans": [ + { + "bbox": [ + 242, + 79, + 367, + 190 + ], + "type": "image", + "image_path": "06ff79e5b211bc5fabfa86aff051d6c454f9ecfab754055a85d05fce81ad52d9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 196, + 335, + 206 + ], + "lines": [ + { + "bbox": [ + 272, + 196, + 335, + 206 + ], + "spans": [ + { + "bbox": [ + 272, + 196, + 335, + 206 + ], + "type": "text", + "content": "(b) Self-attention" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 369, + 80, + 497, + 191 + ], + "blocks": [ + { + "bbox": [ + 369, + 80, + 497, + 191 + ], + "lines": [ + { + "bbox": [ + 369, + 80, + 497, + 191 + ], + "spans": [ + { + "bbox": [ + 369, + 80, + 497, + 191 + ], + "type": "image", + "image_path": "178d8aaae7148ce435789432a3d4773d9246beee239105030dc1d7abda603364.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 396, + 196, + 468, + 207 + ], + "lines": [ + { + "bbox": [ + 396, + 196, + 468, + 207 + ], + "spans": [ + { + "bbox": [ + 396, + 196, + 468, + 207 + ], + "type": "text", + "content": "(c) Fourier analysis" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "lines": [ + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "type": "text", + "content": "Figure 12: The simple linear combination of CL (MoCo) and MIM (SimMIM) objectives outperforms the vanilla CL and MIM. " + }, + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "type": "text", + "content": " is the importance weight of CL, so " + }, + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\lambda = 0" + }, + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "type": "text", + "content": " means SimMIM and " + }, + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "type": "inline_equation", + "content": "\\lambda = 1" + }, + { + "bbox": [ + 104, + 215, + 506, + 304 + ], + "type": "text", + "content": " means MoCo. Left: \"CL + MIM\" outperforms CL and MIM in both linear probing and fine-tuning accuracy. Middle: Mutual information of \"CL + MIM\" decreases at the end of the model, suggesting that the self-attention of later layers collapse into homogeneity and capture the same object shape information. Right: Fourier analysis shows that \"CL + MIM\" amplifies high frequencies at the beginning and reduces them at the end. It implies that \"CL + MIM\" exploits high-frequency information at the beginning and low-frequency information at the end." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 316, + 448, + 330 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 448, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 448, + 330 + ], + "type": "text", + "content": "5 ARE THE TWO METHODS COMPLEMENTARY TO EACH OTHER?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 342, + 506, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 506, + 387 + ], + "type": "text", + "content": "We present comparative analyses on CL and MIM from three perspectives: self-attention, representation transforms, and the position of important layers. All of our results indicate that CL and MIM train ViTs differently. These differences naturally imply that combining CL and MIM to train a backbone may help leverage the advantages of both methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": "To show that CL and MIM are complementary, we introduce the simplest way to harmonize CL and MIM by linearly combining two losses, i.e., " + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{L} = (1 - \\lambda)\\mathcal{L}_{\\mathrm{MIM}} + \\lambda \\mathcal{L}_{\\mathrm{CL}}" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{MIM}}" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{CL}}" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": " each indicate the losses of MIM and CL, and " + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": " is the importance weight of CL. We find that this simple hybrid model trained with combined losses efficiently exploits the strengths of both methods. Figure 12a shows linear probing and fine-tuning accuracy on ImageNet with varying " + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": ". Surprisingly, the hybrid models outperform MIM (" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\lambda = 0" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": ") and CL (" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\lambda = 1" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": ") in both aspects. Figure 12b and Figure 12c can provide insights on how hybrid models behave by analyzing the model with " + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\lambda = 0.2" + }, + { + "bbox": [ + 104, + 392, + 506, + 525 + ], + "type": "text", + "content": " in terms of self-attention in Section 2 and Fourier analysis in Section 3, respectively; both results show that the hybrid model exploits MIM properties in the early layers and CL properties in the later layers. In particular, Figure 12b indicates that the self-attention of the early layers are changed according to the query token but those of the later layers are not. Likewise, Figure 12c shows that the early layers exploit high-frequency, while the later layers try to exploit low-frequency." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 540, + 196, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 196, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 196, + 552 + ], + "type": "text", + "content": "6 CONCLUSION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 564, + 506, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 564, + 506, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 506, + 643 + ], + "type": "text", + "content": "We conducted a comparative study highlighting various facets of two widely used self-supervised learning methods for vision transformers: contrastive learning (CL) and masked image modeling (MIM). The study demonstrated many opposing properties of the two methods: image information (image-level vs. token-level; as in Section 2), feature representations (low-frequency vs. high-frequency; as in Section 3), and lead role components (later layers vs. early layers; as in Section 4). Furthermore, we suggested a possible application that exploits only the benefits from both methods and showed how a combined model can outperform individual methods." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "Future directions. Various future directions can be explored based on our study. We believe that there are better ways than a simple linear combination of CL and MIM objectives. For example, a novel self-supervised learning approach, in which CL is applied in the later layers and MIM in the early layers, can be considered. Moreover, we may extend our findings on self-supervision for multi-stage ViTs, such as PiT (Heo et al., 2021) and Swin (Liu et al., 2021). Another interesting direction is to enhance the individual properties of CL and MIM. Techniques that help CL or MIM learn shapes or textures, respectively, may also improve performance." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 176, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 176, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 176, + 94 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 106, + 505, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 505, + 129 + ], + "type": "text", + "content": "Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. In International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 138, + 505, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 138, + 505, + 160 + ], + "spans": [ + { + "bbox": [ + 107, + 138, + 505, + 160 + ], + "type": "text", + "content": "Florian Bordes, Randall Balestriero, and Pascal Vincent. High fidelity visualization of what your self-supervised representation knows about. Transactions on Machine Learning Research, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 170, + 506, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 170, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 106, + 170, + 506, + 203 + ], + "type": "text", + "content": "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In International Conference on Computer Vision, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 212, + 506, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 212, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 506, + 244 + ], + "type": "text", + "content": "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International Conference on Machine Learning, 2020a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 255, + 506, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 506, + 277 + ], + "type": "text", + "content": "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 286, + 504, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 286, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 504, + 308 + ], + "type": "text", + "content": "Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 317, + 504, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 317, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 504, + 339 + ], + "type": "text", + "content": "Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised vision transformers. In International Conference on Computer Vision, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 348, + 504, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 348, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 504, + 381 + ], + "type": "text", + "content": "Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Advances in Neural Information Processing Systems, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 391, + 504, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 391, + 504, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 391, + 504, + 414 + ], + "type": "text", + "content": "Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 422, + 505, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 422, + 505, + 467 + ], + "spans": [ + { + "bbox": [ + 106, + 422, + 505, + 467 + ], + "type": "text", + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 475, + 506, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 506, + 509 + ], + "type": "text", + "content": "Stéphane d'Ascoli, Hugo Touvron, Matthew L Leavitt, Ari S Morcos, Giulio Biroli, and Levent Sagun. Convit: Improving vision transformers with soft convolutional inductive biases. In International Conference on Machine Learning, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 517, + 506, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 517, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 106, + 517, + 506, + 561 + ], + "type": "text", + "content": "Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A Wichmann, and Wieland Brendel. Imagenet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness. In International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 571, + 506, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 506, + 605 + ], + "type": "text", + "content": "Benjamin Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Herve Jégou, and Matthijs Douze. Levit: a vision transformer in convnet's clothing for faster inference. In International Conference on Computer Vision, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 613, + 506, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 506, + 658 + ], + "type": "text", + "content": "Jean-Bastien Grill, Florian Strub, Florent Alché, Corentin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 667, + 504, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 504, + 690 + ], + "type": "text", + "content": "Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, 2017." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 699, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 699, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 504, + 732 + ], + "type": "text", + "content": "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "text", + "content": "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "type": "text", + "content": "Byeongho Heo, Sangdoo Yun, Dongyoon Han, Sanghyuk Chun, Junsuk Choe, and Seong Joon Oh. Rethinking spatial dimensions of vision transformers. In International Conference on Computer Vision, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 165, + 504, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 165, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 504, + 190 + ], + "type": "text", + "content": "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European Conference on Computer Vision, 2016." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 197, + 506, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 197, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 106, + 197, + 506, + 220 + ], + "type": "text", + "content": "Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In Proceedings of the IEEE international conference on computer vision, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 228, + 506, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 228, + 506, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 261 + ], + "type": "text", + "content": "Li Jing, Pascal Vincent, Yann LeCun, and Yuandong Tian. Understanding dimensional collapse in contrastive self-supervised learning. In International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 270, + 504, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 504, + 304 + ], + "type": "text", + "content": "Hanjoo Kim, Minkyu Kim, Dongjoo Seo, Jinwoong Kim, Heungseok Park, Soeun Park, Hyunwoo Jo, KyungHyun Kim, Youngil Yang, Youngkwan Kim, et al. Nsml: Meet the mlaas platform with a real-world case study. arXiv preprint arXiv:1810.09957, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 311, + 504, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 311, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 504, + 346 + ], + "type": "text", + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 354, + 504, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 504, + 388 + ], + "type": "text", + "content": "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In International Conference on Computer Vision, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 396, + 504, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 396, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 396, + 504, + 419 + ], + "type": "text", + "content": "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 426, + 506, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 506, + 449 + ], + "type": "text", + "content": "Namuk Park and Songkuk Kim. Blurs behave like ensembles: Spatial smoothings to improve accuracy, uncertainty, and robustness. In International Conference on Machine Learning, 2022a." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 457, + 504, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 504, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 504, + 481 + ], + "type": "text", + "content": "Namuk Park and Songkuk Kim. How do vision transformers work? In International Conference on Learning Representations, 2022b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 488, + 506, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 488, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 506, + 533 + ], + "type": "text", + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 541, + 506, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 541, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 541, + 506, + 575 + ], + "type": "text", + "content": "Maithra Raghu, Thomas Unterthiner, Simon Kornblith, Chiyuan Zhang, and Alexey Dosovitskiy. Do vision transformers see like convolutional neural networks? Advances in Neural Information Processing Systems, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 583, + 506, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 583, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 506, + 628 + ], + "type": "text", + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 2015." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 635, + 506, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 506, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 506, + 660 + ], + "type": "text", + "content": "Alexander Strehl and Joydeep Ghosh. Cluster ensembles—a knowledge reuse framework for combining multiple partitions. Journal of machine learning research, 2002." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "type": "text", + "content": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "text", + "content": "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In European Conference on Computer Vision, 2020a." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 643 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? Advances in Neural Information Processing Systems, 33:6827-6839, 2020b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 122, + 504, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 122, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 107, + 122, + 504, + 146 + ], + "type": "text", + "content": "Yuandong Tian, Xinlei Chen, and Surya Ganguli. Understanding self-supervised learning dynamics without contrastive pairs. In International Conference on Machine Learning, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 153, + 504, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 153, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 153, + 504, + 176 + ], + "type": "text", + "content": "Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 2008." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 182, + 506, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 182, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 506, + 228 + ], + "type": "text", + "content": "Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8769-8778, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 234, + 504, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 234, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 107, + 234, + 504, + 258 + ], + "type": "text", + "content": "Shaoru Wang, Jin Gao, Zeming Li, Jian Sun, and Weiming Hu. A closer look at self-supervised lightweight vision transformers. arXiv preprint arXiv:2205.14443, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 264, + 504, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 264, + 504, + 298 + ], + "spans": [ + { + "bbox": [ + 107, + 264, + 504, + 298 + ], + "type": "text", + "content": "Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong, and Lei Li. Dense contrastive learning for self-supervised visual pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 304, + 506, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 340 + ], + "type": "text", + "content": "Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 346, + 506, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 346, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 506, + 380 + ], + "type": "text", + "content": "Yixuan Wei, Han Hu, Zhenda Xie, Zheng Zhang, Yue Cao, Jianmin Bao, Dong Chen, and Baining Guo. Contrastive learning rivals masked image modeling in fine-tuning via feature distillation. arXiv preprint arXiv:2205.14141, 2022b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 386, + 504, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 386, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 107, + 386, + 504, + 411 + ], + "type": "text", + "content": "Zhenda Xie, Zigang Geng, Jingcheng Hu, Zheng Zhang, Han Hu, and Yue Cao. Revealing the dark secrets of masked image modeling. arXiv preprint arXiv:2205.13543, 2022a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 416, + 504, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 416, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 107, + 416, + 504, + 452 + ], + "type": "text", + "content": "Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 457, + 506, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 457, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 107, + 457, + 506, + 491 + ], + "type": "text", + "content": "Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 498, + 506, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 498, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 107, + 498, + 506, + 533 + ], + "type": "text", + "content": "Sangdoo Yun, Dongyoon Han, Seong Joon Oh, Sanghyuk Chun, Junsuk Choe, and Youngjoon Yoo. Cutmix: Regularization strategy to train strong classifiers with localizable features. In International Conference on Computer Vision, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 539, + 504, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 539, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 107, + 539, + 504, + 563 + ], + "type": "text", + "content": "Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. In International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 569, + 506, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 569, + 506, + 603 + ], + "spans": [ + { + "bbox": [ + 107, + 569, + 506, + 603 + ], + "type": "text", + "content": "Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127(3):302-321, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 609, + 506, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 609, + 506, + 643 + ], + "spans": [ + { + "bbox": [ + 107, + 609, + 506, + 643 + ], + "type": "text", + "content": "Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. ibot: Image bert pre-training with online tokenizer. International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 164, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 164, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 164, + 94 + ], + "type": "text", + "content": "A SETUP" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": "We build the configurations based on Xie et al. (2022b) for fine-tuning and Caron et al. (2021) for linear probing. Table A.1 summarizes the configurations. Most analyzes of ViTs use the official ViT-B pre-trained models, and some analyzes use ViT-{S, L} pre-trained with official configurations but epochs of 100. Due to memory limitations, ViT-L is pre-trained with a quarter batch size of the other models. The hybrid model introduced in Section 5 uses the ViT backbone architecture of Xie et al. (2022b) and employs a configuration based on their work for pre-training as shown in Table A.1. For data augmentation and regularization, we adopt widely used settings, e.g., Randaugment (Cubuk et al., 2020), label smoothing (Szegedy et al., 2016), mixup (Zhang et al., 2018), cutmix (Yun et al., 2019), stochastic depth (Huang et al., 2016). Layer decay (Bao et al., 2022) is also used for fine-tuning. Neural network models are implemented in PyTorch (Paszke et al., 2019). The code for analysis is available at https://github.com/naver-ai/cl-vs-mim. All experiments use {1, 4, 8} NVIDIA A100 Tensor Core GPU. NSML (Kim et al., 2018) has been used for experiments." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 159, + 267, + 452, + 511 + ], + "blocks": [ + { + "bbox": [ + 244, + 251, + 366, + 264 + ], + "lines": [ + { + "bbox": [ + 244, + 251, + 366, + 264 + ], + "spans": [ + { + "bbox": [ + 244, + 251, + 366, + 264 + ], + "type": "text", + "content": "Table A.1: Training settings." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 159, + 267, + 452, + 511 + ], + "lines": [ + { + "bbox": [ + 159, + 267, + 452, + 511 + ], + "spans": [ + { + "bbox": [ + 159, + 267, + 452, + 511 + ], + "type": "table", + "html": "
CONFIGURATIONLinear ProbingFine-tuningPre-training
optimizersgdadamwadamw
base learning rate1.0e-01.25e-31.0e-4
weight decay0.050.050.05
batch size1k2k1k
training epoch50100100
learning rate schedulecosinecosinemultistep
warmup epoch02010
warmup schedule·linearlinear
randaugment·9, 0.59, 0.5
label smoothing·0.10.1
mixup·0.80.8
cutmix·1.01.0
stochastic depth·0.10.1
layer decay·0.651.0
gradient clip·5.05.0
", + "image_path": "c55ccf7380dc893488f929e005a837e33550db9d874a752f52cf423e536c83df.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 535, + 214, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 535, + 214, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 535, + 214, + 547 + ], + "type": "text", + "content": "B RELATED WORK" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 561, + 506, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 506, + 651 + ], + "type": "text", + "content": "CL is a method based on comparing the global projection of two different random views. However, this approach usually suffers from the collapsing problem, where all representations collapse into constant solutions. To solve this problem, various methods such as negative samples and InfoNCE (Oord et al., 2018) have been proposed. Negative samples are an effective technique to avoid the collapsing problems, but they cause dimensional collapse (Jing et al., 2022) and require extra large batches (Chen et al., 2020a) or memory queues (He et al., 2020; Chen et al., 2020b) to retrieve them. We mainly analyze MoCo v3 (Chen et al., 2021), since the method includes these de facto standard components—global projection, random views, and negative samples." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "Some SSL methods, e.g. Grill et al. (2020); Caron et al. (2021), do not use negative samples and use the projections of their Siamese representations as the positives. Such self-distillation has been explored theoretically and empirically (Chen & He, 2021; Tian et al., 2021) to prevent the collapsing problem, but we do not discuss the distillation scheme in this paper. Wei et al. (2022b) shows that feature distillation improves the fine-tuning performance of CL by diversifying attention ranges; this observation is consistent with our findings. While they focus on distillation to improve CL, we reveal the fundamental nature of self-supervised learning by rigorously comparing CL and MIM." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 167, + 83, + 296, + 194 + ], + "blocks": [ + { + "bbox": [ + 167, + 83, + 296, + 194 + ], + "lines": [ + { + "bbox": [ + 167, + 83, + 296, + 194 + ], + "spans": [ + { + "bbox": [ + 167, + 83, + 296, + 194 + ], + "type": "image", + "image_path": "0a7defb6cf5ca448cbeadfa09eece56e710d4e928fa34c8110a31f6063707dd5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 200, + 270, + 210 + ], + "lines": [ + { + "bbox": [ + 190, + 200, + 270, + 210 + ], + "spans": [ + { + "bbox": [ + 190, + 200, + 270, + 210 + ], + "type": "text", + "content": "(a) Attention distance" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 316, + 81, + 445, + 194 + ], + "blocks": [ + { + "bbox": [ + 316, + 81, + 445, + 194 + ], + "lines": [ + { + "bbox": [ + 316, + 81, + 445, + 194 + ], + "spans": [ + { + "bbox": [ + 316, + 81, + 445, + 194 + ], + "type": "image", + "image_path": "2940011fb68852857470985169c458b291f5397c3a9df53a321760de5a4350d7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 200, + 414, + 210 + ], + "lines": [ + { + "bbox": [ + 345, + 200, + 414, + 210 + ], + "spans": [ + { + "bbox": [ + 345, + 200, + 414, + 210 + ], + "type": "text", + "content": "(b) Normalized MI" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 167, + 311, + 296, + 421 + ], + "blocks": [ + { + "bbox": [ + 167, + 311, + 296, + 421 + ], + "lines": [ + { + "bbox": [ + 167, + 311, + 296, + 421 + ], + "spans": [ + { + "bbox": [ + 167, + 311, + 296, + 421 + ], + "type": "image", + "image_path": "6b8d60f909c96901f45026634f8622fb176aa05b7b617722bdc2a9a000c3b65c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 426, + 270, + 436 + ], + "lines": [ + { + "bbox": [ + 190, + 426, + 270, + 436 + ], + "spans": [ + { + "bbox": [ + 190, + 426, + 270, + 436 + ], + "type": "text", + "content": "(a) Attention distance" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 446, + 506, + 513 + ], + "lines": [ + { + "bbox": [ + 104, + 446, + 506, + 513 + ], + "spans": [ + { + "bbox": [ + 104, + 446, + 506, + 513 + ], + "type": "text", + "content": "Figure C.2: ViTs exhibit consistent self-attention patterns, regardless of their size. To better understand these patterns, we visualize the self-attention behaviors of three ViTs—ViT-{Ti, S, B}—using two metrics: attention distance and normalized mutual information (MI). Left: All self-attention of MoCo capture global patterns in the later layers. In contrast, the self-attention of SimMIM capture local patterns. Right: Likewise, all self-attention maps of MoCo collapse into homogeneity in the later layers." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 316, + 309, + 445, + 421 + ], + "blocks": [ + { + "bbox": [ + 104, + 220, + 506, + 298 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 506, + 298 + ], + "type": "text", + "content": "Figure C.1: MIM and CL methods each have consistent properties. To show this, we visualize self-attention behaviors in terms of attention distance and normalized mutual information (MI). SimCLR\\*, which was introduced in Chen et al. (2021), stands for MoCo with a momentum coefficient of 0. Left: The attention distance of CL methods (namely MoCo, SimCLR\\*, and DINO) is higher than that of MIM methods (namely SimMIM, BEiT, and MAE). This suggests that CL methods consistently capture global patterns. Right: The normalized mutual information of MIM is higher than that of CL; i.e., the self- attentions of MIM are more correlated with query tokens than CL." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 316, + 309, + 445, + 421 + ], + "lines": [ + { + "bbox": [ + 316, + 309, + 445, + 421 + ], + "spans": [ + { + "bbox": [ + 316, + 309, + 445, + 421 + ], + "type": "image", + "image_path": "f7b27a38746bf54f92540391b3c2ca815b95f136dd50d7cbae60b838d52255ef.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 426, + 414, + 436 + ], + "lines": [ + { + "bbox": [ + 345, + 426, + 414, + 436 + ], + "spans": [ + { + "bbox": [ + 345, + 426, + 414, + 436 + ], + "type": "text", + "content": "(b) Normalized MI" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 530, + 506, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 506, + 597 + ], + "type": "text", + "content": "Compared with CL, MIM has been rarely explored in vision tasks. Various methods, such as histograms of oriented gradients (Wei et al., 2022a) and tokenization (Bao et al., 2022), have been proposed as part of porting masked language models to the image domain with ViTs. Among them, SimMIM (Xie et al., 2022b) and MAE (He et al., 2022) are simple yet effective methods to reconstruct masked tokens without complicated pretext tasks. Because of its simplicity and superior performance in downstream operations, MIM is attracting attention as a promising technique in image processing." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 601, + 506, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 506, + 658 + ], + "type": "text", + "content": "Nevertheless, we find hints suggesting that CL and MIM utilize different aspects of the data, making them complementary. For example, Zhou et al. (2022); Wang et al. (2021); Yu et al. (2022) achieve high predictive performance by harmonizing the image-level and the token-level self-supervised learning. Xie et al. (2022a) also observe that, unlike supervised pre-trained models or CL, self-attention in SimMIM focus locally; this is a consistent result with our findings." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 673, + 436, + 686 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 673, + 436, + 686 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 436, + 686 + ], + "type": "text", + "content": "C OUR INSIGHTS ARE GENERALIZABLE TO VARIOUS MODELS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "In the main text, we analyze ViT-B pre-trained using MoCo and SimMIM. We observe consistent characteristics across various sizes of ViTs that have been pre-trained using other self-supervised learning methods. To support this claim, we delve into the properties of self-attention." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 168, + 79, + 298, + 192 + ], + "blocks": [ + { + "bbox": [ + 168, + 79, + 298, + 192 + ], + "lines": [ + { + "bbox": [ + 168, + 79, + 298, + 192 + ], + "spans": [ + { + "bbox": [ + 168, + 79, + 298, + 192 + ], + "type": "image", + "image_path": "227f272cc4f03f82eeb6e01630535a451968a203c4b06945b12f38209dc45e73.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 189, + 197, + 275, + 207 + ], + "lines": [ + { + "bbox": [ + 189, + 197, + 275, + 207 + ], + "spans": [ + { + "bbox": [ + 189, + 197, + 275, + 207 + ], + "type": "text", + "content": "(a) Standard deviations" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 319, + 80, + 442, + 191 + ], + "blocks": [ + { + "bbox": [ + 319, + 80, + 442, + 191 + ], + "lines": [ + { + "bbox": [ + 319, + 80, + 442, + 191 + ], + "spans": [ + { + "bbox": [ + 319, + 80, + 442, + 191 + ], + "type": "image", + "image_path": "6a47cec191d37145789159dcc512d224238fa7efc778426247a6f4d9dec3ca38.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 196, + 430, + 209 + ], + "lines": [ + { + "bbox": [ + 331, + 196, + 430, + 209 + ], + "spans": [ + { + "bbox": [ + 331, + 196, + 430, + 209 + ], + "type": "text", + "content": "(b) Distribution at " + }, + { + "bbox": [ + 331, + 196, + 430, + 209 + ], + "type": "inline_equation", + "content": "3^{\\mathrm{rd}}" + }, + { + "bbox": [ + 331, + 196, + 430, + 209 + ], + "type": "text", + "content": " layer" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "lines": [ + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "text", + "content": "Figure E.1: The presence of an outlier head in MoCo raises the average of normalized mutual information. This observation explains how the normalized mutual information in a couple of MoCo's self-attention layers is similar to or even surpasses that in SimMIM. Left: We present the standard deviation of the normalized mutual information. As depicted in this figure, the standard deviation in SimMIM remains relatively consistent across different depths. In contrast, the standard deviation in MoCo's " + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "inline_equation", + "content": "3^{\\mathrm{rd}}" + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "inline_equation", + "content": "4^{\\mathrm{th}}" + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "text", + "content": " self-attention layer is notably higher than that in SimMIM. Right: Distribution of mutual information for the third self-attention layer head. The visualization of this kernel density estimation shows that MoCo has an outlier head with mutual information close to 1.0. The red rectangles " + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "inline_equation", + "content": "(\\square)" + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "text", + "content": " and blue triangles " + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "inline_equation", + "content": "(\\triangle)" + }, + { + "bbox": [ + 104, + 217, + 506, + 328 + ], + "type": "text", + "content": " refer to the mutual information of heads in MoCo and SimMIM, respectively." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 411, + 501, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 501, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 501, + 423 + ], + "type": "text", + "content": "D LOCALITY INDUCTIVE BIAS IMPROVES FINE-TUNING ACCURACY OF CL" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 436, + 346, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 346, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 346, + 525 + ], + "type": "text", + "content": "In Section 2, we demonstrate that the homogeneity of self-attention map, i.e., attention collapse of CL, helps ViT distinguish images but harms fine-tuning accuracy. As a result, we anticipate that incorporating a locality inductive bias into CL will improve fine-tuning accuracy but degrade linear probing accuracy. One simple method to inject locality into self-attention is to limit the receptive field of self-attention by using attention masks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 529, + 346, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 346, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 346, + 597 + ], + "type": "text", + "content": "Figure D.1 shows the predictive performance of MoCo with restricted local self-attention. As expected, the results are similar to the performance of MIM; As the kernel size decreases, the linear probing accuracy decreases but the fine-tuning accuracy increases. These results are consistent with our findings." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 613, + 233, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 233, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 233, + 624 + ], + "type": "text", + "content": "E A CLOSER LOOK AT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 627, + 342, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 342, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 342, + 639 + ], + "type": "text", + "content": "THE ROLE OF SELF-SUPERVISED VIT LAYERS" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 356, + 437, + 501, + 566 + ], + "blocks": [ + { + "bbox": [ + 104, + 349, + 504, + 395 + ], + "lines": [ + { + "bbox": [ + 104, + 349, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 504, + 395 + ], + "type": "text", + "content": "Figure C.1 visualizes the self-attention behaviors of different self-supervised learning methods in terms of attention distance and normalized mutual information. As depicted in the figure, all CLs and MIMs exhibit consistent properties. Similarly, Figure C.2 demonstrates that various sizes of models also demonstrate consistent properties." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 356, + 437, + 501, + 566 + ], + "lines": [ + { + "bbox": [ + 356, + 437, + 501, + 566 + ], + "spans": [ + { + "bbox": [ + 356, + 437, + 501, + 566 + ], + "type": "image", + "image_path": "6528fc13531a5561df61e7e99e0a8b810a3b28433ca7b19f481fa92ee89206a5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 574, + 506, + 641 + ], + "lines": [ + { + "bbox": [ + 350, + 574, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 350, + 574, + 506, + 641 + ], + "type": "text", + "content": "Figure D.1: Locality inductive bias harms linear probing but improves fine-tuning. We report the linear probing and fine-tuning accuracy of MoCo with restricted self-attention via attention masks." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 652, + 504, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 652, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 652, + 504, + 675 + ], + "type": "text", + "content": "The main text provides the key characteristics of CL and MIM. This section delves deeper into the details not covered in the main text to provide a more comprehensive understanding of the subjects." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "The role of the early modules. Figures 3 and 4 suggest that most layers of MoCo capture global patterns and have only a weak correlation with query tokens. However, one or two of MoCo layers exhibit unusual behavior. For example, the " + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "inline_equation", + "content": "3^{\\mathrm{rd}}" + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": " layer of MoCo focuses on local areas and its self-attention map is dependent on the query. We explore this property in more detail." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 170, + 81, + 294, + 197 + ], + "blocks": [ + { + "bbox": [ + 170, + 81, + 294, + 197 + ], + "lines": [ + { + "bbox": [ + 170, + 81, + 294, + 197 + ], + "spans": [ + { + "bbox": [ + 170, + 81, + 294, + 197 + ], + "type": "image", + "image_path": "41906592df7f4da9c6950d3212fe997066b0f7d7667a573cc916e1c431aea0f0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 204, + 249, + 214 + ], + "lines": [ + { + "bbox": [ + 211, + 204, + 249, + 214 + ], + "spans": [ + { + "bbox": [ + 211, + 204, + 249, + 214 + ], + "type": "text", + "content": "(a) MoCo" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 319, + 81, + 443, + 196 + ], + "blocks": [ + { + "bbox": [ + 319, + 81, + 443, + 196 + ], + "lines": [ + { + "bbox": [ + 319, + 81, + 443, + 196 + ], + "spans": [ + { + "bbox": [ + 319, + 81, + 443, + 196 + ], + "type": "image", + "image_path": "ea0f156b766cf9413d5aa69695375a437bb4c9362f681e9a2fc87e055aafbff9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 356, + 204, + 404, + 214 + ], + "lines": [ + { + "bbox": [ + 356, + 204, + 404, + 214 + ], + "spans": [ + { + "bbox": [ + 356, + 204, + 404, + 214 + ], + "type": "text", + "content": "(b) SimMIM" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 224, + 506, + 280 + ], + "lines": [ + { + "bbox": [ + 104, + 224, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 506, + 280 + ], + "type": "text", + "content": "Figure E.2: The tokens of MoCo form a cluster for each image, while those of SimMIM are intermingled. This aligns with the finding that, compared to SimMIM, MoCo is linearly separable. To demonstrate this property, we visualize 3,528 tokens (196 tokens " + }, + { + "bbox": [ + 104, + 224, + 506, + 280 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 104, + 224, + 506, + 280 + ], + "type": "text", + "content": " 18 images) from the representations of the last layer via t-SNE, and find that a consistent pattern is observed even in the representations of the intermediate layers. The colors represent three different classes. See also Figures 6 and 7." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 293, + 504, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 350 + ], + "type": "text", + "content": "Figure E.1a provides the variance of normalized mutual information with respect to heads. As the results show, the variance of SimMIM is consistent across all depths whereas that of MoCo is not. In particular, the " + }, + { + "bbox": [ + 104, + 293, + 504, + 350 + ], + "type": "inline_equation", + "content": "3^{\\mathrm{rd}}" + }, + { + "bbox": [ + 104, + 293, + 504, + 350 + ], + "type": "text", + "content": " layer of MoCo has high variance even though other layers do not. This suggests that, while most of MoCo's self-attention heads capture global patterns and have weak correlation with query tokens, some heads deviate from this behavior and exhibit a different pattern." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 354, + 505, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 505, + 409 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 505, + 409 + ], + "type": "text", + "content": "Figure E.1b shows the distribution of normalized mutual information among heads in the " + }, + { + "bbox": [ + 104, + 354, + 505, + 409 + ], + "type": "inline_equation", + "content": "3^{\\mathrm{rd}}" + }, + { + "bbox": [ + 104, + 354, + 505, + 409 + ], + "type": "text", + "content": " layer to analyze this phenomenon. In this figure, we use kernel density estimation with Gaussian kernel to visualize the distribution. The results reveal several outlier heads in MoCo with mutual information close to 1.0. As a result, these outliers significantly raises the average value of normalized mutual information." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 422, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 506, + 477 + ], + "type": "text", + "content": "A comprehensive view through visualization of tokens from multiple images. Figure 6 visualizes how self-attention layers transform tokens from one or two images in representation space. The figure demonstrates that MoCo transforms all tokens in union while SimMIM transforms them individually. As a result, MoCo separates the representations at the image-level and SimMIM separates them at the token-level." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 482, + 504, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 482, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 104, + 482, + 504, + 560 + ], + "type": "text", + "content": "The t-SNE visualization (Van der Maaten & Hinton, 2008) in Figure E.2 provides consistent results and offers even a more comprehensive perspective. In this figure, we visualize the last representations of 3528 tokens from 18 images that belong to three different classes. The visualization demonstrates that MoCo separates the representations into distinct classes and even images, while maintaining the tokens close together in compact image clusters. On the other hand, SimMIM separates tokens from images, resulting in a wide representation space for each image, but the images or even classes may be challenging to linearly distinguish." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 572, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 572, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 572, + 504, + 616 + ], + "type": "text", + "content": "The first layer of MoCo aggregates tokens into compact clusters. Figures 6 and 7 show that all modules, except the first module, in MoCo behave consistently. However, we observe that MoCo's first module behaves differently and unusually than the others. We elaborate the behaviour of the first layer of module." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "content": "Figure E.3a shows the qualitative visualization of tokens for a sample image, similar to Figure 6. This visualization shows that the first MoCo layer aggregates tokens into compact clusters. Although this figure only uses a single image, the layer aggregates all images into a small representation space as well. In terms of singular values, we observe consistent results. Similar to Figure 7, Figures E.3b and E.3c report the second largest log singular value, instead of the relative log singular value, to investigate the absolute volume of the representations. As expected, most layers in both MoCo and SimMIM increase the singular value, but surprisingly, the first layer of MoCo reduces the singular value, meaning that the volumes of representations are decreased at both the token-level and image-level. Based on these observations, we conjecture that the first module of MoCo behaves like an embedding component." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 120, + 100, + 244, + 223 + ], + "blocks": [ + { + "bbox": [ + 120, + 100, + 244, + 223 + ], + "lines": [ + { + "bbox": [ + 120, + 100, + 244, + 223 + ], + "spans": [ + { + "bbox": [ + 120, + 100, + 244, + 223 + ], + "type": "image", + "image_path": "eb3d6f9af5ba9611bdfbe6c912a91ee24c14f2feb91040b2f6281e0bfe387850.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 232, + 506, + 322 + ], + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 322 + ], + "type": "text", + "content": "Figure E.3: The first layer of MoCo clumps tokens together. We demonstrate this property from two perspectives: qualitative visualization and singular value of token distribution. Left: Similar to Figure 6, we visualize tokens of a sample image in a representation space. The blue and red data points represent the tokens before and after the self-attention transformation. As shown in this figure, the first self-attention layer clumps tokens into a compact cluster. Middle and Right: Similar to Figure 7, we visualize the second largest log singular value (not " + }, + { + "bbox": [ + 104, + 232, + 506, + 322 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 104, + 232, + 506, + 322 + ], + "type": "text", + "content": " log singular value) for depth. The singular value spectra demonstrate consistent results; the first layer of MoCo (gray area) not only clumps tokens but also images into a compact cluster." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 263, + 100, + 380, + 223 + ], + "blocks": [ + { + "bbox": [ + 263, + 100, + 380, + 223 + ], + "lines": [ + { + "bbox": [ + 263, + 100, + 380, + 223 + ], + "spans": [ + { + "bbox": [ + 263, + 100, + 380, + 223 + ], + "type": "image", + "image_path": "d64ede6f1b47ef1ed7c25065f01279e789d17e7c6c742fae29b0a2132fa6dec2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 386, + 100, + 503, + 223 + ], + "blocks": [ + { + "bbox": [ + 386, + 100, + 503, + 223 + ], + "lines": [ + { + "bbox": [ + 386, + 100, + 503, + 223 + ], + "spans": [ + { + "bbox": [ + 386, + 100, + 503, + 223 + ], + "type": "image", + "image_path": "0a8d7bc5980b846efd6f7189a450e1633c6199cb74793172b02bacd8d7c9461b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 167, + 334, + 293, + 459 + ], + "blocks": [ + { + "bbox": [ + 167, + 334, + 293, + 459 + ], + "lines": [ + { + "bbox": [ + 167, + 334, + 293, + 459 + ], + "spans": [ + { + "bbox": [ + 167, + 334, + 293, + 459 + ], + "type": "image", + "image_path": "88933cf42fcccccde418631f6f1e1e9d14fe0d056bbd93b74833adfe7c531890.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 468, + 506, + 557 + ], + "lines": [ + { + "bbox": [ + 104, + 468, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 506, + 557 + ], + "type": "text", + "content": "Figure F.1: Self-attention and representations in fine-tuned models exhibit consistency with those of pre-trained models. Similar to Figures 4 and 9, we present the normalized mutual information and the Fourier analysis results of fine-tuned models. The abbreviation \"ft\" stands for \"fine-tuned model.\" Left: Similar to pre-trained models, the mutual information of MoCo's self-attention maps is generally lower compared to that of SimMIM. However, it is noteworthy that the mutual information of the later self-attention maps in SimMIM decreases significantly. This is because the later layers of a model trained with supervision or fine-tuning tend to capture global information. Right: Similarly, SimMIM utilizes higher frequency information than MoCo." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 312, + 334, + 444, + 459 + ], + "blocks": [ + { + "bbox": [ + 312, + 334, + 444, + 459 + ], + "lines": [ + { + "bbox": [ + 312, + 334, + 444, + 459 + ], + "spans": [ + { + "bbox": [ + 312, + 334, + 444, + 459 + ], + "type": "image", + "image_path": "201e8fdeec51703e6d99d8f341cb390d5af57c3dd2a701c46eb2db4e81fa7963.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 571, + 468, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 468, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 468, + 597 + ], + "type": "text", + "content": "F FINE-TUNED MODELS INHERIT THE PROPERTIES OF PRE-TRAINED MODELS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 612, + 506, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 646 + ], + "type": "text", + "content": "The main text focuses on highlighting the key properties of pre-trained models. This section demonstrates that these properties are also utilized by fine-tuned models. As a result, we can safely apply the insights gained from the main text to various situations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 659, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 505, + 704 + ], + "type": "text", + "content": "Consistent results in self-attention and Fourier analysis. Figures 3 and 4 demonstrate that MoCo captures global areas and that its self-attention are less related to the query tokens, compared with SimMIM. In addition, Figure 9 shows that MoCo captures low-frequency information as opposite to SimMIM. These results are consistent in the fine-tuning scheme." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "Figure F.1a reveals the self-attention behaviours of fine-tuned MoCo and SimMIM in terms of normalized mutual information. Similar to pre-trained models, the fine-tuned self-attention maps of" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 149, + 82, + 317, + 219 + ], + "blocks": [ + { + "bbox": [ + 149, + 82, + 317, + 219 + ], + "lines": [ + { + "bbox": [ + 149, + 82, + 317, + 219 + ], + "spans": [ + { + "bbox": [ + 149, + 82, + 317, + 219 + ], + "type": "image", + "image_path": "ae0d609fe3927c5ee5f0360d0d1cc99ec1c299dc6fad161ea59db333747faa63.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 189, + 224, + 271, + 236 + ], + "lines": [ + { + "bbox": [ + 189, + 224, + 271, + 236 + ], + "spans": [ + { + "bbox": [ + 189, + 224, + 271, + 236 + ], + "type": "text", + "content": "(a) Stylized ImageNet" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 334, + 105, + 462, + 220 + ], + "blocks": [ + { + "bbox": [ + 334, + 105, + 462, + 220 + ], + "lines": [ + { + "bbox": [ + 334, + 105, + 462, + 220 + ], + "spans": [ + { + "bbox": [ + 334, + 105, + 462, + 220 + ], + "type": "image", + "image_path": "6dd58ff76341c3cb4e49f52e1d68b72d5e52b09730b9b39e843e3415f0658cfe.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 334, + 224, + 462, + 236 + ], + "lines": [ + { + "bbox": [ + 334, + 224, + 462, + 236 + ], + "spans": [ + { + "bbox": [ + 334, + 224, + 462, + 236 + ], + "type": "text", + "content": "(b) Robustness for noise frequency" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 244, + 506, + 313 + ], + "lines": [ + { + "bbox": [ + 104, + 244, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 244, + 506, + 313 + ], + "type": "text", + "content": "Figure F.2: Fine-tuned ViTs inherit the robustness against frequency-based noise. Similar to Figure 8b, we measure the decrease in the accuracy of ViTs fine-tuned with MoCo and SimMIM. Left: Even with fine-tuned ViTs, MoCo is relatively shape-biased and SimMIM relatively texture-biased. This bias is just less apparent than in linear probing models. Right: The robustness against frequency-based random noise also suggests the same: MoCo is robust against high-frequency noise, but SimMIM is not. In conclusion, fine-tuned models inherit the properties of linear probing models." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 327, + 506, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 417 + ], + "type": "text", + "content": "MoCo have generally lower mutual information compared to those of SimMIM. The only significant difference is that the mutual information of the later self-attention maps in fine-tuned SimMIM decreases significantly, as later layers in models trained with supervision or fine-tuning tend to capture more global information. As a result, the gap between the two methods is reduced. This is also reflected in the consistent results of Fourier analysis as shown in Figure F.1b. In this analysis, SimMIM captures higher-frequency information compared to MoCo in fine-tuning scheme as well. However, the later layers of SimMIM attempt to capture low-frequency information. Therefore, the gap of fine-tuned models is smaller than that of pre-trained models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 428, + 506, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 428, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 506, + 517 + ], + "type": "text", + "content": "CL is shape-biased and MIM is texture-biased in fine-tuning scheme. In Figure 8, we demonstrate that linear probing model with CL (MoCo) is more shape-biased and that with MIM (SimMIM) is texture-biased, compared with each other. As in the experiment, we calculate the classification results of ImageNet fine-tuned MoCo and SimMIM on Stylized-ImageNet, and measure the decrease in accuracy against frequency-based random noise. As we would expected, Figure F.2 shows that the property also extends to the fine-tuned model. Even though we still observe the difference between MoCo and SimMIM, the performance gap between MoCo and SimMIM is quite reduced compared to the gap between the linear probing models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 528, + 326, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 326, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 326, + 628 + ], + "type": "text", + "content": "Later layers of CL and early layers of MIM are important in find-tuning phases. As shown in Figure 11, the later layers of the CL and the early layers of the MIM are linearly separable. This finding suggests that these layers are significant, however, it does not provide direct evidence that such properties are preserved during fine-tuning phases. We demonstrate that these layers play a crucial role in fine-tuning phases as well." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 633, + 326, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 326, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 326, + 733 + ], + "type": "text", + "content": "To support this claim, we conduct a study to measure the accuracy drop of fine-tuned models using pretrained models with a few blocks initialized. As shown in Figure F.3a, the results indicate that the initializing a few early blocks in the pre-training models of SimMIM significantly harms the fine-tuning accuracy, compared to MoCo. These observations suggest that early layers of SimMIM play an important role in fine-tuning. Conversely, Figure F.3b shows that initializing later" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 333, + 531, + 503, + 639 + ], + "blocks": [ + { + "bbox": [ + 333, + 531, + 503, + 639 + ], + "lines": [ + { + "bbox": [ + 333, + 531, + 503, + 639 + ], + "spans": [ + { + "bbox": [ + 333, + 531, + 503, + 639 + ], + "type": "image", + "image_path": "ca4a5bdcfd162bc5e03d71ed980045dda92682b4358c070ecb137d3bd1b4fb84.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 348, + 644, + 403, + 655 + ], + "lines": [ + { + "bbox": [ + 348, + 644, + 403, + 655 + ], + "spans": [ + { + "bbox": [ + 348, + 644, + 403, + 655 + ], + "type": "text", + "content": "(a) Early layer" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 433, + 644, + 488, + 655 + ], + "lines": [ + { + "bbox": [ + 433, + 644, + 488, + 655 + ], + "spans": [ + { + "bbox": [ + 433, + 644, + 488, + 655 + ], + "type": "text", + "content": "(b) Later layer" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 330, + 664, + 506, + 730 + ], + "lines": [ + { + "bbox": [ + 330, + 664, + 506, + 730 + ], + "spans": [ + { + "bbox": [ + 330, + 664, + 506, + 730 + ], + "type": "text", + "content": "Figure F.3: The later layers of CL and early layers of MIM play a key role in the fine-tuning scheme. To show this, we initialize a few blocks and measure the decrease in the fine-tuning accuracy of pretrained models." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 106 + ], + "type": "text", + "content": "blocks in the pre-training models of SimMIM does not significantly harms the fine-tuning accuracy, suggesting that they are not important in fine-tuning compared with MoCo." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "type": "text", + "content": "One limitation of this experiment is the evaluation of the accuracy drop in a single run. Since the accuracy drop of MoCo is marginally higher than that of SimMIM at the first initialization depth in Figure F.3b, additional experiments may improve the results. In this experiment, we utilized the same fine-tuning settings for both MoCo and SimMIM; but experiments with fine-tuning settings tailored to each method may provide further insight." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 501, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 501, + 196 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 501, + 196 + ], + "type": "text", + "content": "G HYBRID MODELS OUTPERFORM CL AND MIM IN DOWNSTREAM TASKS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 206, + 294, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 206, + 294, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 206, + 294, + 316 + ], + "type": "text", + "content": "The claim that CL and MIM are complementary is demonstrated only on ImageNet in Section 5. To validate this claim in tasks beyond ImageNet, we evaluated the pre-trained models of the hybrid method introduced in Section 5 for another classification task and a semantic segmentation task. In particular, we measured the accuracy on iNaturalist 2018 (Van Horn et al., 2018) and the mIoU on ADE20K (Zhou et al., 2019). As shown in Table G.1, the hybrid" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 304, + 237, + 502, + 305 + ], + "blocks": [ + { + "bbox": [ + 299, + 209, + 506, + 232 + ], + "lines": [ + { + "bbox": [ + 299, + 209, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 299, + 209, + 506, + 232 + ], + "type": "text", + "content": "Table G.1: Hybrid models of CL and MIM outperform both CL and MIM in various tasks." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 237, + 502, + 305 + ], + "lines": [ + { + "bbox": [ + 304, + 237, + 502, + 305 + ], + "spans": [ + { + "bbox": [ + 304, + 237, + 502, + 305 + ], + "type": "table", + "html": "
λ (IMPORTANCE OF CL)iNat-18ADE20k
0.0 (SimMIM)62.135.4
0.2 (SimMIM + MoCo)68.842.2
1.0 (MoCo)66.239.7
", + "image_path": "798889eb7cea5efb9c1e7a77e37956cb55992745c85c03583dfdc68e4ca6174a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 316, + 506, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 506, + 340 + ], + "type": "text", + "content": "model of SimMIM and MoCo outperforms both SimMIM and MoCo in various downstream tasks. Therefore, we conclude that the effectiveness of this claim extends beyond ImageNet." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_content_list.json b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..acd81781cb4f62ac6959b0187c130cbd8da2dcd5 --- /dev/null +++ b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_content_list.json @@ -0,0 +1,2318 @@ +[ + { + "type": "text", + "text": "WHAT IS MISSING IN IRM TRAINING AND EVALUATION? CHALLENGES AND SOLUTIONS", + "text_level": 1, + "bbox": [ + 171, + 98, + 823, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yihua Zhang $^{1}$ , Pranay Sharma $^{2}$ , Parikshit Ram $^{3}$ , Mingyi Hong $^{4}$ , Kush Varshney $^{3}$ , Sijia Liu $^{1,3}$ $^{1}$ Michigan State University, $^{2}$ Carnegie Mellon University, $^{3}$ IBM Research, $^{4}$ University of Minnesota", + "bbox": [ + 181, + 167, + 844, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 234, + 547, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Invariant risk minimization (IRM) has received increasing attention as a way to acquire environment-agnostic data representations and predictions, and as a principled solution for preventing spurious correlations from being learned and for improving models' out-of-distribution generalization. Yet, recent works have found that the optimality of the originally-proposed IRM optimization (IRMv1) may be compromised in practice or could be impossible to achieve in some scenarios. Therefore, a series of advanced IRM algorithms have been developed that show practical improvement over IRMv1. In this work, we revisit these recent IRM advancements, and identify and resolve three practical limitations in IRM training and evaluation. First, we find that the effect of batch size during training has been chronically overlooked in previous studies, leaving room for further improvement. We propose small-batch training and highlight the improvements over a set of large-batch optimization techniques. Second, we find that improper selection of evaluation environments could give a false sense of invariance for IRM. To alleviate this effect, we leverage diversified test-time environments to precisely characterize the invariance of IRM when applied in practice. Third, we revisit Ahuja et al. (2020)'s proposal to convert IRM into an ensemble game and identify a limitation when a single invariant predictor is desired instead of an ensemble of individual predictors. We propose a new IRM variant to address this limitation based on a novel viewpoint of ensemble IRM games as consensus-constrained bilevel optimization. Lastly, we conduct extensive experiments (covering 7 existing IRM variants and 7 datasets) to justify the practical significance of revisiting IRM training and evaluation in a principled manner.", + "bbox": [ + 228, + 263, + 769, + 585 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 608, + 336, + 623 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deep neural networks (DNNs) have enjoyed unprecedented success in many real-world applications (He et al., 2016; Krizhevsky et al., 2017; Simonyan & Zisserman, 2014; Sun et al., 2014). However, experimental evidence (Beery et al., 2018; De Haan et al., 2019; DeGrave et al., 2021; Geirhos et al., 2020; Zhang et al., 2022b) suggests that DNNs trained with empirical risk minimization (ERM), the most commonly used training method, are prone to reproducing spurious correlations in the training data (Beery et al., 2018; Sagawa et al., 2020). This phenomenon causes performance degradation when facing distributional shifts at test time (Gulrajani & Lopez-Paz, 2020; Koh et al., 2021; Wang et al., 2022; Zhou et al., 2022a). In response, the problem of invariant prediction arises to enforce the model trainer to learn stable and causal features (Beery et al., 2018; Sagawa et al., 2020).", + "bbox": [ + 169, + 638, + 826, + 765 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In pursuit of out-of-distribution generalization, a new model training paradigm, termed invariant risk minimization (IRM) (Arjovsky et al., 2019), has received increasing attention to overcome the shortcomings of ERM against distribution shifts. In contrast to ERM, IRM aims to learn a universal representation extractor, which can elicit an invariant predictor across multiple training environments. However, different from ERM, the learning objective of IRM is highly non-trivial to optimize in practice. Specifically, IRM requires solving a challenging bi-level optimization (BLO) problem with a hierarchical learning structure: invariant representation learning at the upper-level and invariant predictive modeling at the lower-level. Various techniques have been developed to solve IRM effectively, such as (Ahuja et al., 2020; Lin et al., 2022; Rame et al., 2022; Zhou et al., 2022b) to name a few. Despite the proliferation of IRM advancements, several issues in the theory and practice have also appeared. For example, recent works (Rosenfeld et al., 2020; Kamath et al.,", + "bbox": [ + 169, + 771, + 828, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2021) revealed the theoretical failure of IRM in some cases. In particular, there exist scenarios where the optimal invariant predictor is impossible to achieve, and the IRM performance may fall behind even that of ERM. Practical studies also demonstrate that the performance of IRM relies on multiple factors, e.g., model size (Lin et al., 2022; Zhou et al., 2022b), environment difficulty (Dranker et al., 2021; Krueger et al., 2021), and dataset type (Gulrajani & Lopez-Paz, 2020).", + "bbox": [ + 169, + 103, + 823, + 174 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Therefore, key challenges remain in deploying IRM to real-world applications. In this work, we revisit recent IRM advancements and uncover and tackle several pitfalls in IRM training and evaluation, which have so far gone overlooked. We first identify the large-batch training issue in existing IRM algorithms, which prevents escape from bad local optima during IRM training. Next, we show that evaluation of IRM performance with a single test-time environment could lead to an inaccurate assessment of prediction invariance, even if this test environment differs significantly from training environments. Based on the above findings, we further develop a novel IRM variant, termed BLOC-IRM, by interpreting and advancing the IRM-GAME method (Ahuja et al., 2020) through the lens of BLO with Consensus prediction. Below, we list our contributions (1-3).", + "bbox": [ + 169, + 180, + 826, + 306 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 We demonstrate that the prevalent use of large-batch training leaves significant room for performance improvement in IRM, something chronically overlooked in the previous IRM studies with benchmark datasets COLORED-MNIST and COLORED-FMNIST. By reviewing and comparing with 7 state-of-the-art (SOTA) IRM variants (Table 1), we show that simply using small-batch training improves generalization over a series of more involved large-batch optimization enhancements.", + "We also show that an inappropriate evaluation metric could give a false sense of invariance to IRM. Thus, we propose an extended evaluation scheme that quantifies both precision and 'invariance' across diverse testing environments.", + "Further, we revisit and advance the IRM-GAME approach (Ahuja et al., 2020) through the lens of consensus-constrained BLO. We remove the need for an ensemble (one per training environment) of predictors in IRM-GAME by proposing BLOC-IRM (BLO with Consensus IRM), which produces a single invariant predictor.", + "Lastly, we conduct extensive experiments (on 7 datasets, using diverse model architectures and training environments) to justify the practical significance of our findings and methods. Notably, we conduct experiments on the CELEBA dataset as a new IRM benchmark with realistic spurious correlations. We show that BLOC-IRM outperforms all baselines in nearly all settings." + ], + "bbox": [ + 169, + 311, + 826, + 558 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1.1 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 592, + 331, + 606 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "IRM methods. Inspired by the invariance principle (Peters et al., 2016), Arjovsky et al. (2019) define IRM as a BLO problem, and develop a relaxed single-level formulation, termed IRMv1, for ease of training. Recently, there has been considerable work to advance IRM techniques. Examples of IRM variants include penalization on the variance of risks or loss gradients across training environments (Chang et al., 2020; Krueger et al., 2021; Rame et al., 2022; Xie et al., 2020; Xu & Jaakkola, 2021; Xu et al., 2022), domain regret minimization (Jin et al., 2020), robust optimization over multiple domains (Xu & Jaakkola, 2021), sparsity-promoting invariant learning (Zhou et al., 2022b), Bayesian inference-baked IRM (Lin et al., 2022), and ensemble game over the environment-specific predictors (Ahuja et al., 2020). We refer readers to Section 2 and Table 1 for more details on the IRM methods that we will focus on in this work.", + "bbox": [ + 169, + 625, + 826, + 765 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite the potential and popularity of IRM, some works have also shown the theoretical and practical limitations of current IRM algorithms. Specifically, Chen et al. (2022); Kamath et al. (2021) show that invariance learning via IRM could fail and be worse than ERM in some two-bit environment setups on COLORED-MNIST, a synthetic benchmark dataset often used in IRM works. The existence of failure cases of IRM is also theoretically shown by Rosenfeld et al. (2020) for both linear and non-linear models. Although subsequent IRM algorithms take these failure cases into account, there still exist huge gaps between theoretically desired IRM and its practical variants. For example, Lin et al. (2021; 2022); Zhou et al. (2022b) found many IRM variants incapable of maintaining graceful generalization on large and deep models. Moreover, Ahuja et al. (2021); Dranker et al. (2021) demonstrated that the performance of IRM algorithms could depend on practical details, e.g., dataset size, sample efficiency, and environmental bias strength. The above IRM limitations in", + "bbox": [ + 169, + 771, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "spire our work to study when and how we can turn the IRM advancements into effective solutions, to gain high-accuracy and stable invariant predictions in practical scenarios.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Domain generalization. IRM is also closely related to domain generalization (Carlucci et al., 2019; Gulrajani & Lopez-Paz, 2020; Koh et al., 2021; Li et al., 2019; Nam et al., 2021; Wang et al., 2022; Zhou et al., 2022a). Compared to IRM, domain generalization includes a wider range of approaches to improve prediction accuracy against distributional shifts (Beery et al., 2018; Jean et al., 2016; Koh et al., 2021). For example, an important line of research is to improve representation learning by encouraging cross-domain feature resemblance (Long et al., 2015; Tzeng et al., 2014). The studies on domain generalization have also been conducted across different learning paradigms, e.g., adversarial learning (Ganin et al., 2016), self-supervised learning (Carlucci et al., 2019), and meta-learning (Balaji et al., 2018; Dou et al., 2019).", + "bbox": [ + 169, + 147, + 826, + 273 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 PRELIMINARIES AND SETUP", + "text_level": 1, + "bbox": [ + 171, + 292, + 442, + 308 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we introduce the basics of IRM and provide an overview of our IRM case study.", + "bbox": [ + 169, + 324, + 797, + 339 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "IRM formulation. In the original IRM framework Arjovsky et al. (2019), consider a supervised learning paradigm, with datasets $\\{\\mathcal{D}^{(e)}\\}_{e\\in \\mathcal{E}_{\\mathrm{tr}}}$ collected from $N$ training environments $\\mathcal{E}_{\\mathrm{tr}} = \\{1,2,\\dots ,N\\}$ . The training samples in $\\mathcal{D}^{(e)}$ (corresponding to the environment $e$ ) are of the form $(\\mathbf{x},y)\\in \\mathcal{X}\\times \\mathcal{Y}$ , where $\\mathcal{X}$ and $\\mathcal{Y}$ are, respectively, the raw feature space and the label space. IRM aims to find an environment-agnostic data representation $\\phi_{\\theta}:\\mathcal{X}\\to \\mathcal{Z}$ , which elicits an invariant prediction $f_{\\mathbf{w}}:\\mathcal{Z}\\rightarrow \\mathcal{V}$ that is simultaneously optimal for all environments. Here $\\pmb{\\theta}$ and $\\mathbf{w}$ denote model parameters to be learned, and $\\mathcal{Z}$ denotes the representation space. Thus, IRM yields an invariant predictor $f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}:\\mathcal{X}\\to \\mathcal{Y}$ that can generalize to unseen test-time environments $\\{\\mathcal{D}^{(e)}\\}_{e\\notin \\mathcal{E}_{\\mathrm{tr}}}$ . Here $\\circ$ denotes function composition, i.e., $f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}(\\cdot) = f_{\\mathbf{w}}(\\phi_{\\pmb{\\theta}}(\\cdot))$ . We will use $\\mathbf{w}\\circ \\pmb{\\theta}$ as a shorthand for $f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}$ . IRM constitutes the following BLO problem:", + "bbox": [ + 169, + 345, + 823, + 489 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right); \\quad \\text {s u b j e c t t o} \\quad \\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} \\circ \\boldsymbol {\\theta}\\right), \\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\quad (\\text {I R M})\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 498, + 823, + 523 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\ell^{(e)}(\\mathbf{w} \\circ \\boldsymbol{\\theta})$ is the per-environment training loss of the predictor $\\mathbf{w} \\circ \\boldsymbol{\\theta}$ under $\\mathcal{D}^{(e)}$ . Clearly, IRM involves two optimization levels that are coupled through the lower-level solution $\\mathbf{w}^*(\\boldsymbol{\\theta})$ . Achieving the desired invariant prediction requires the solution sets of the individual lower-level problems $\\{\\arg \\min_{\\bar{\\mathbf{w}}} \\ell^{(e)}(\\bar{\\mathbf{w}} \\circ \\boldsymbol{\\theta}), e \\in \\mathcal{E}_{tr}\\}$ to be non-singleton. However, BLO problems with non-singleton lower-level solution sets are significantly more challenging (Liu et al., 2021). To circumvent this difficulty, Arjovsky et al. (2019) relax (IRM) into a single-level optimization problem (a.k.a., IRMv1):", + "bbox": [ + 169, + 529, + 823, + 616 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} [ \\ell^ {(e)} (\\boldsymbol {\\theta}) + \\gamma \\| \\nabla_ {w | w = 1. 0} \\ell^ {(e)} (w \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2} ], \\tag {IRMv1}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 623, + 823, + 646 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\gamma > 0$ is a regularization parameter and $\\nabla_{w|w = 1.0}\\ell^{(e)}$ denotes the gradient of $\\ell^{(e)}$ with respect to $w$ , computed at $w = 1.0$ . Compared with IRM, IRMv1 is restricted to linear invariant predictors, and penalizes the deviation of individual environment losses from stationarity to approach the lower-level optimality in (IRM). IRMv1 uses the fact that a scalar predictor ( $w = 1.0$ ) is equivalent to a linear predictor. Despite the practical simplicity of (IRMv1), it may fail to achieve the desired invariance (Chen et al., 2022; Kamath et al., 2021).", + "bbox": [ + 169, + 652, + 823, + 737 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Case study of IRM methods. As illustrated above, the objective of IRM is difficult to optimize, while IRMv1 only provides a sub-optimal solution. Subsequent advances have attempted to reduce this gap. In this work, we focus on 7 popular IRM variants and evaluate their invariant prediction performance over 7 datasets. Table 1 and Table 2 respectively summarize the IRM methods and the datasets considered in this work. We survey the most representative and effective IRM variants in the literature, which will also serve as our baselines in performance comparison.", + "bbox": [ + 169, + 744, + 823, + 828 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Following Table 1, we first introduce the IRMv0 variant, a generalization of IRMv1, by relaxing its assumption of linearity of the predictor $\\mathbf{w}$ , yielding", + "bbox": [ + 169, + 834, + 823, + 863 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\mathbf {w}, \\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} [ \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) + \\gamma \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2} ]. \\tag {IRMv0}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 866, + 823, + 888 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Next, we consider the risk extrapolation method REx (Krueger et al., 2021), an important baseline based on distributionally robust optimization for group shifts (Sagawa et al., 2019). Furthermore,", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/027c35cc5c5ca28afaaec8b0e8c129c95774c586f6d2bcccd5d980a4661ff219.jpg", + "table_caption": [ + "Table 1: Summary of the 7 existing IRM variants considered in this work, and the proposed BLOC-IRM method (see Section 5). We also list the 7 benchmark datasets used to evaluate IRM performance, namely, COLORED-MNIST (CoM), COLORED-FMNIST (CoF), CIFAR-MNIST (CiM), COLORED-OBJECT (CoO), CELEBA (CA), PACS (P) and VLCS (A). The symbols $\\checkmark$ signifies the dataset used in the specific reference." + ], + "table_footnote": [], + "table_body": "
IRM\nMethodVenueDatasetsReference
CoMCoFCiMCoOCAPV
IRMv1arXiv(Arjovsky et al., 2019)
IRMv0N/AThis Work
IRM-GAMEICML(Ahuja et al., 2020)
REXICML(Krueger et al., 2021)
BIRMCVPR(Lin et al., 2022)
SPARSEIRMICML(Zhou et al., 2022b)
FISHRICML(Rame et al., 2022)
OursN/AThis Work
", + "bbox": [ + 173, + 218, + 532, + 311 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/ee501038ae999121b6a3e3872525dc0b3bc4d9a66ce51aa740a8bf44ee1698cf.jpg", + "table_caption": [ + "Table 2: Dataset setups. 'Invariant' and 'Spurious' represent the core and spurious features. 'Env1' and 'Env2' are environments with different spurious correlations." + ], + "table_footnote": [], + "table_body": "
DatasetInvariantSpuriousEnv 1Env 2
CoMDigitColor
CoFObjectColor
CiMCIFARMNIST
CoOObjectColor
CASmilingHair Color
PObjectTexture
VObjectEnvironment
", + "bbox": [ + 560, + 164, + 823, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "inspired by the empirical findings that the performance of IRM could be sensitive to model size (Choe et al., 2020; Gulrajani & Lopez-Paz, 2020), we choose the SOTA methods Bayesian IRM (BIRM) (Lin et al., 2022) and sparse IRM (SPARSEIRM) (Zhou et al., 2022b), both of which show improved performance with large models. Also, we consider the SOTA method FISHR (Rame et al., 2022), which modifies IRM to penalize the domain-level gradient variance in single-level risk minimization. FISHR provably matches both domain-level risks and Hessians. Lastly, we include IRM-GAME (Ahuja et al., 2020) as a special variant of IRM. Different from the other methods which seek an invariant predictor, IRM-GAME endows each environment with a predictor, and leverages this ensemble of predictors to achieve invariant representation learning. This is in contrast to other existing works which seek an invariant predictor. Yet, we show in Section 5 that IRM-GAME can be interpreted through the lens of consensus-constrained BLO and generalized for invariant prediction. We also highlight that diverse dataset types are considered in this work (see Table 2) to benchmark IRM's performance. More details on dataset selections can be found in Appendix A.", + "bbox": [ + 169, + 325, + 826, + 521 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 LARGE-BATCH TRAINING CHALLENGE AND IMPROVEMENT", + "text_level": 1, + "bbox": [ + 171, + 542, + 702, + 559 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we demonstrate and resolve the large-batch training challenge in current IRM implementations (Table 1).", + "bbox": [ + 169, + 575, + 823, + 603 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Large-batch optimization causes instabilities of IRM training. Using very large-size batches for model training can result in the model getting trapped near a bad local optima (Keskar et al., 2016). This happens as a result of the lack of stochasticity in the training process, and is known to exist even in the ERM paradigm (Goyal et al., 2017; You et al., 2017a). Yet, nearly all the existing IRM methods follow the training setup of IRMv1 (Arjovsky et al., 2019), which used the full-batch gradient descent (GD) method rather than the mini-batch stochastic gradient descent (SGD) for IRM training over COLORED-MNIST and COLORED-FMNIST. In the following, we show that large-batch training might give a false impression of the relative ranking of IRM performances.", + "bbox": [ + 169, + 611, + 583, + 792 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d81db67045654a5d0cb71843aa85d8ea308dbecba3d380258eefcaf3a5d9d075.jpg", + "image_caption": [ + "Figure 1: The performance of three IRM methods (IRMv1, IRMv0, and REX) vs. batch-size under COLORED-MNIST. The full batch-size is $50\\mathrm{k}$ ." + ], + "image_footnote": [], + "bbox": [ + 606, + 611, + 805, + 739 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We start with an exploration of the impact of batch size on the invariant prediction accuracy of existing IRM methods under COLORED-MNIST. Here the invariant prediction accuracy refers to the averaged accuracy of the invariant predictor applied to diverse test-time environments. We defer its formal description to Section 4. Figure 1 shows the invariant prediction accuracy of three IRM methods IRMv1, IRMv0, and REX vs. the data batch size (see Figure A1 for results of other IRM variants and Figure A5 for COLORED-FMNIST). Recall that the full batch size (50k) was used in the existing IRM implementations (Arjovsky et al., 2019; Krueger et al., 2021). As we can see, in the full-batch setup, IRM methods lead to widely different invariant prediction accuracies, where REX and IRMv1 significantly outperform IRMv0. In contrast, in the small-batch case (with size", + "bbox": [ + 169, + 797, + 826, + 924 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1k), the discrepancy in accuracy across methods vanishes. We see that IRMv0 can be as effective as IRMv1 and other IRM variants (such as REX) only if an appropriate small batch size is used.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Empirical evidence in Figure 1 shows that large-batch IRM training is less effective than small-batch. This is aligned with the observations in ERM (You et al., 2017b; 2018; 2019), where the lack of stochasticity makes the optimizer difficult to escape from a sharp local minimum. We also justify this issue by visualizing the loss landscapes in Figure A2. Notably, the small-batch training enables IRMv1 to converge to a local optimum with a flat loss landscape, indicating better generalization (Keskar et al., 2016).", + "bbox": [ + 169, + 138, + 826, + 223 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Small-batch training is effective versus a zoo of large-batch optimization enhancements. To mitigate the large-batch IRM training issue, we next investigate the effectiveness of both small-batch training and a zoo of large-batch optimization enhancements. Inspired by large-batch training techniques to scale up ERM, we consider Large-batch SGD (LSGD) (Goyal et al., 2017) and Layerwise Adaptive Learning Rate (LALR) (You et al., 2017b; 2018; 2019; Zhang et al., 2022a). Both methods aim to smoothen the optimization trajectory by improving either the learning rate scheduler or the quality of initialization. Furthermore, we adopt sharpness-aware minimization (SAM) (Foret et al., 2020) as another possible large-batch training solution to explicitly penalize the sharpness of the loss landscape. We integrate the above optimization techniques with IRM, leading to the variants IRM-LSGD, IRM-LALR, and IRM-SAM. See Appendix B.1 for more details.", + "bbox": [ + 169, + 229, + 826, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Table 3, we compare the performance of the simplest small-batch IRM training with that of those large-batch optimization technique-integrated IRM variants (i.e., 'LSGD/LALR/SAM' in the Table). As we can see, the use of large-batch optimization techniques indeed improves the prediction accuracy over the original IRM implementation. We also observe that the use of SAM for IRM is consistently better than LALR and LSGD, indicating the promise of SAM to scale up IRM with a large batch size. Yet, the small-batch training protocol consistently outperforms large-batch training across all the IRM variants (see the column 'Small'). Additional experiment results in Section 6 show that small-", + "bbox": [ + 169, + 376, + 545, + 555 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/b6c86d9aa658dfce9c60b22230d091397f7113139270a7d5f698cb303a06f18d.jpg", + "table_caption": [ + "Table 3: Prediction accuracy of IRM methods on COLORED-MNIST using the original large-batch implementation ('Original'), the large-batch optimization-integrated implementations ('LSGD/LALR/SAM'), and the small-batch training recipe ('Small')." + ], + "table_footnote": [], + "table_body": "
MethodOriginalLSGDLALRSAMSmall
IRMv167.1367.3167.4467.7968.33
IRMv065.3966.4266.7666.9968.37
IRM-GAME65.6965.8265.4766.2367.73
REX67.4267.5367.5967.8268.42
BIRM67.9367.9968.2168.3268.71
SPARSEIRM67.7267.8567.9968.1368.81
FISHR67.8867.8267.9368.1168.69
Average67.0267.2567.3467.6368.44
", + "bbox": [ + 555, + 455, + 823, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "batch IRM training is effective across datasets, and promotes the invariance achieved by all methods.", + "bbox": [ + 169, + 556, + 823, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 MULTI-ENVIRONMENT INVARIANCE EVALUATION", + "text_level": 1, + "bbox": [ + 169, + 592, + 620, + 608 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we revisit the evaluation metric used in existing IRM methods, and show that expanding the diversity of test-time environments would improve the accuracy of invariance assessment.", + "bbox": [ + 169, + 625, + 823, + 655 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Nearly all the existing IRM methods (including those listed in Table 1) follow the evaluation pipeline used in the vanilla IRM framework (Arjovsky et al., 2019), which assesses the performance of the learned invariant predictor on a single unseen test environment. This test-time environment is significantly different from train-time environments. For example, COLORED-MNIST (Arjovsky et al., 2019) suggests a principled way to define two-bit environments, widely-used for IRM dataset curation. Specifically, the COLORED-MNIST task is to predict the label of the handwritten digit groups (digits 0-4 for group 1 and digits 5-9 for group 2). The digit number is also spuriously correlated with the digit color (Table 2). This spurious correlation is controlled by an environment bias parameter $\\beta$ , which specifies different data environments with different levels of spurious correlation1. In (Arjovsky et al., 2019), $\\beta = 0.1$ and $\\beta = 0.2$ are used to define two training environments, which sample the color ID by flipping the digit group label with probability $10\\%$ and $20\\%$ , respectively. At test time, the invariant accuracy is evaluated on a single, unseen environment with $\\beta = 0.9$ .", + "bbox": [ + 169, + 659, + 825, + 828 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "However, the prediction accuracy of IRM could be sensitive to the choice of test-time environment (i.e., the value of $\\beta$ ). For the default test environment $\\beta = 0.9$ , the predictor performance of three representative IRM methods (IRMv1, IRM-GAME, FISHR) ranked from high to low is IRM-GAME>FISHR>IRMv1. Given this apparent ranking, we explore more diverse test-time environments, generated by $\\beta \\in \\Omega := \\{0.05, 0.1, \\ldots, 0.95\\}$ .", + "bbox": [ + 169, + 833, + 828, + 906 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "In the two-bit environment, there exists another environment parameter $\\alpha$ that controls the label noise level.", + "bbox": [ + 191, + 909, + 823, + 924 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Although the train-time bias parameters $\\{0.1, 0.2\\}$ belong to $\\Omega$ , test data is generated afresh, different from training data. We see in Figure 2A that the superiority of IRM-GAME at $\\beta = 0.9$ vanishes for smaller $\\beta$ . Consequently, for invariant prediction evaluated in other testing environments (e.g., $\\beta < 0.4$ ), the performance ranking of the same methods becomes IRMV1>FISHR>IRM-GAME. This mismatch of results suggests we measure the 'invariance' of IRM methods against diverse test environments. Otherwise, evaluation with single $\\beta$ could give a false sense of invariance. In Figure 2B, we present the box plots of prediction accuracies for IRM variants, over the diverse set of testing environments $(\\beta \\in \\Omega)$ . Evidently, IRMV1, the oldest", + "bbox": [ + 169, + 104, + 444, + 366 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(sub-optimal) IRM method, yields the least variance of invariant prediction accuracies and the best average prediction accuracy, compared to both IRM-GAME and FISHR. To summarize, the new evaluation method, with diverse test environments, enables us to make a fair comparison of IRM methods implemented in different training environment settings. Unless specified otherwise, we use the multi-environment evaluation method throughout this work.", + "bbox": [ + 169, + 366, + 823, + 436 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a24b292c97fe77b14b0f86373dec97022fcf16b5cfe6084ea939afc2d489514d.jpg", + "image_caption": [ + "(A)" + ], + "image_footnote": [], + "bbox": [ + 457, + 106, + 637, + 224 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f9966abd7be911907704e9dd96d7ef1fa68175e53f5b5b0edc6731bd5a53335b.jpg", + "image_caption": [ + "(B)", + "Figure 2: Performance comparison of IRM variants IRMv1, IRM-GAME, and FISHR on COLORED-MNIST. (A) Evaluation in different test-time environments (corresponding to different $\\beta$ ). $\\beta$ values used by the two training environments are 0.1, 0.2 respectively. The conventional evaluation is done with the test environment $\\beta = 0.9$ (see $\\triangle$ ). (B) Box plots of prediction accuracies over diverse test environments corresponding to $\\beta \\in \\{0.05, 0.1, \\dots, 0.95\\}$ . IRMv1 achieves the best average accuracy $(67.13\\%)$ , followed by FISHR $(67.05\\%)$ and IRM-GAME $(65.53\\%)$ ." + ], + "image_footnote": [], + "bbox": [ + 637, + 107, + 816, + 224 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 ADVANCING IRM-GAME VIA CONSENSUS-CONSTRAINED BLO", + "text_level": 1, + "bbox": [ + 171, + 457, + 733, + 472 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we revisit and advance a special IRM variant, IRM-GAME (Ahuja et al., 2020), which endows each individual environment with a separate prediction head and converts IRM into an ensemble game over these multiple predictors.", + "bbox": [ + 169, + 489, + 823, + 532 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Revisiting IRM-GAME. We first introduce the setup of IRM-GAME following notations used in Section 2. The most essential difference between IRM-GAME and the vanilla IRM framework is that the former assigns each environment with an individual classifier $\\mathbf{w}^{(e)}$ , and then relies on the ensemble of these individual predictors, i.e., $\\frac{1}{N}\\sum_{e\\in \\mathcal{E}_{\\mathrm{tr}}}(\\mathbf{w}^{(e)}\\circ \\pmb {\\theta})$ , for inference. IRM-GAME is in a sharp contrast to IRM, where an environment-agnostic prediction head $\\mathbf{w}^*$ simultaneously optimizes the losses across all environments. Therefore, we raise the following question: Can IRM-GAME learn an invariant predictor?", + "bbox": [ + 169, + 537, + 823, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Inspired by the above question, we explicitly enforce invariance by imposing a consensus prediction constraint $\\mathcal{C} \\coloneqq \\left\\{\\left(\\bar{\\mathbf{w}}^{(1)}, \\bar{\\mathbf{w}}^{(2)}, \\ldots \\bar{\\mathbf{w}}^{(N)}\\right) \\mid \\bar{\\mathbf{w}}^{(1)} = \\ldots = \\bar{\\mathbf{w}}^{(N)}\\right\\}$ and integrate it with IRM-GAME. Here, $\\bar{\\mathbf{w}}^{(e)}$ denotes the prediction head for the $e$ -th environment. Based on the newly-introduced constraint, the ensemble prediction head $\\frac{1}{N} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\mathbf{w}^{(e)}$ can be interpreted as the average consensus over $N$ environments: $\\mathbf{w}^* \\coloneqq \\frac{1}{N} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\mathbf{w}^{(e)} = \\arg \\min_{\\{\\bar{\\mathbf{w}}^{(e)}\\}_e \\in \\mathcal{C}} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\| \\bar{\\mathbf{w}}^{(e)} - \\mathbf{w}^{(e)} \\|_2^2$ . With the above consensus interpretation, we can then cast the invariant predictor-baked IRM-GAME as a consensus-constrained BLO problem, extended from (IRM):", + "bbox": [ + 169, + 643, + 826, + 752 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m a z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 762, + 535, + 781 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\text {s u b j e c t} \\quad (\\mathbf {I}): \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}} ^ {(e)}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} ^ {(e)} \\circ \\boldsymbol {\\theta}\\right), \\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 312, + 781, + 823, + 805 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n(\\mathbf {I I}) \\colon \\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) = \\frac {1}{N} \\sum_ {e \\in \\varepsilon_ {\\mathrm {t r}}} \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 806, + 596, + 824 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The above contains two lower-level problems: (I) per-environment risk minimization, and (II) projection onto the consensus constraint $(\\{\\mathbf{w}^{(e)}\\} \\in \\mathcal{C})$ . The incorporation of (II) is intended to ensure the use of invariant prediction head $\\mathbf{w}^*(\\pmb{\\theta})$ in the upper-level optimization problem of (1).", + "bbox": [ + 169, + 830, + 823, + 875 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Limitation of (1) and BLOC-IRM. In (1), the introduced consensus-constrained lower-level problem might compromise the optimality of the lower-level solution $\\mathbf{w}^{*}(\\pmb{\\theta})$ to the per-environment (unconstrained) risk minimization problem (I), i.e., violating the per-environment stationarity", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\| \\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^* (\\pmb {\\theta})\\circ \\pmb {\\theta})\\| _2^2$ . Figure A3 justifies this side effect. As we can see, the per-environment stationarity is hardly attained at the consensus prediction when solving (1). This is not surprising since a constrained optimization solution might not be a stationary solution to minimizing the (unconstrained) objective function. To alleviate this limitation, we improve (1) by explicitly promoting the per-environment stationarity $\\| \\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^* (\\pmb {\\theta})\\circ \\pmb {\\theta})\\| _2^2$ in its upper-level problem through optimization over $\\pmb{\\theta}$ . This leads to BLOC-IRM (BLO with Consensus IRM):", + "bbox": [ + 169, + 102, + 826, + 189 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\left[ \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right) + \\gamma \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right) \\| _ {2} ^ {2} \\right] \\tag {BLOC-IRM}\n$$\n", + "text_format": "latex", + "bbox": [ + 248, + 194, + 823, + 218 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "subject to Lower-level problems (I) and (II) in (1),", + "bbox": [ + 250, + 215, + 563, + 229 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\gamma > 0$ is a regularization parameter like IRMv0. Assisted by the (upper-level) prediction stationarity regularization, the consensus prediction (II) indeed simultaneously minimizes the risks of all the environments, supported by the empirical evidence that the convergence of $\\|\\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^*(\\boldsymbol{\\theta}) \\circ \\boldsymbol{\\theta})\\|_2^2$ towards 0 along each environment's optimization path (see Figure A3).", + "bbox": [ + 169, + 234, + 823, + 294 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Further, we elaborate on how the BLOC-IRM problem can be effectively solved using an ordinary BLO solver. First, it is worth noting that although both (IRM) and BLOC-IRM are BLO problems, the latter is easier to solve since the lower-level constraint (I) is unconstrained and separable over environments, and the consensus operation (II) is linear. Based on these characteristics, the implicit gradient $\\frac{dw^{*}(\\theta)}{d\\theta}$ can be directly computed as", + "bbox": [ + 169, + 299, + 823, + 375 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d \\mathbf {w} ^ {*} (\\boldsymbol {\\theta})}{d \\boldsymbol {\\theta}} = \\frac {1}{N} \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\frac {d \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta})}{d \\boldsymbol {\\theta}}, \\text {s u b j e c t t o} \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}} ^ {(e)}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} ^ {(e)} \\circ \\boldsymbol {\\theta}\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 378, + 823, + 415 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Since the above lower-level problem is unconstrained, we can call the standard arg min differentiating method, such as implicit function approach (Gould et al., 2016) or gradient unrolling (Liu et al., 2021) to compute $\\frac{d\\mathbf{w}^{(e)}(\\boldsymbol{\\theta})}{d\\boldsymbol{\\theta}}$ . In our work, we adopt the gradient unrolling method, which approximates $\\mathbf{w}^{(e)}(\\boldsymbol{\\theta})$ by a $K$ -step gradient descent solution, noted by $\\mathbf{w}_K^{(e)}(\\boldsymbol{\\theta})$ and then leverages automatic differentiation (AD) to compute the derivative from $\\mathbf{w}_K^{(e)}(\\boldsymbol{\\theta})$ to the variable $\\boldsymbol{\\theta}$ . Figure 3 shows the working pipeline of BLOC-IRM and its comparison to original IRM and", + "bbox": [ + 169, + 426, + 478, + 619 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "IRM-GAME methods. We use $K = 1$ for the lower-level problem throughout our experiments. We refer readers to Appendix B.2 for more algorithmic details. We also explore the performance of our proposed BLOC-IRM with various regularization terms, based on the penalties used in the existing literature. We show the best performance is always achieved when the stationarity is penalized in the upper-level (see Table A3).", + "bbox": [ + 169, + 619, + 823, + 691 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0122b18efe16a4aa864ab68166e294a34ab15a4c112c8a7589decd971ce3168f.jpg", + "image_caption": [ + "Figure 3: Schematic overview of BLOC-IRM over two training environments (red and green), and its comparison to IRM and IRM-GAME." + ], + "image_footnote": [], + "bbox": [ + 488, + 425, + 823, + 566 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 710, + 328, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we begin by introducing some key experiment setups (with details in Appendix C.1), and then empirically show the effectiveness of our proposed IRM training and evaluation improvements over existing IRM methods across various datasets, models, and learning environments.", + "bbox": [ + 169, + 741, + 823, + 785 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.1 EXPERIMENT SETUPS", + "text_level": 1, + "bbox": [ + 171, + 800, + 366, + 814 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Datasets and models. Our experiments are conducted over 7 datasets as referenced and shown in Tables 1, 2. Among these datasets, COLORED-MNIST, COLORED-FMNIST, CIFAR-MNIST, and COLORED-OBJECT are similarly curated, mimicking the pipeline of COLORED-MNIST (Arjovsky et al., 2019), by introducing an environment bias parameter (e.g., $\\beta$ for COLORED-MNIST in Section 4) to customize the level of spurious correlation (as shown in Table 2) in different environments. In the CELEBA dataset, we choose the face attribute 'smiling' (vs. 'non-smiling') as the core feature aimed for classification, and regard another face attribute 'hair color' ('blond' vs. 'dark') as the", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "source of spurious correlation imposed on the core feature. By controlling the level of spurious correlation, we then create different training/testing environments in CELEBA. Furthermore, we study PACS and VLCS datasets, which were used to benchmark domain generalization ability in the real world (Borlino et al., 2021). It was recently shown by Gulrajani & Lopez-Paz (2020) that for these datasets, ERM could even be better than IRMv1. Yet, we will show that our proposed BLOC-IRM is a promising domain generalization method, which outperforms all the IRM baselines and ERM in practice. In addition, we follow Arjovsky et al. (2019) in adopting multi-layer perceptron (MLP) as the model for resolving COLORED-MNIST and COLORED-FMNIST problems. In the other more complex datasets, we use the ResNet-18 architecture (He et al., 2016).", + "bbox": [ + 169, + 103, + 823, + 229 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Baselines and implementation. Our baselines include 7 IRM variants (Table 1) and ERM, which are implemented using their official repositories if available (see Appendix C.2). Unless specified otherwise, our training pipeline uses the small-batch training setting. By default, we use the batch size of 1024 for COLORED-MNIST and COLORED-FMNIST, and 256 for other datasets. In Section 6.2 below, we also do a thorough comparison of large-batch vs small-batch IRM training.", + "bbox": [ + 169, + 234, + 823, + 306 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Evaluation setup. As proposed in Section 4, we use the multi-environment evaluation metric unless specified otherwise. To capture both the accuracy and variance of invariant predictions across multiple testing environments, the average accuracy and the accuracy gap (the difference of the best-case and worst-case accuracy) are measured for IRM methods. The resulting performance is reported in the form $a \\pm b$ , with mean $a$ and standard deviation $b$ computed across 10 independent trials.", + "bbox": [ + 169, + 311, + 498, + 465 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.2 EXPERIMENT RESULTS", + "text_level": 1, + "bbox": [ + 171, + 486, + 375, + 500 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Small-batch training improves all existing IRM methods on COLORED-MNIST & COLORED-FMNIST. Recall from Section 3 that all the existing IRM methods (Table 1) adopt full-batch IRM training on COLORED-MNIST & COLORED-FMNIST, which raises the large-batch training problem. In Table 4, we conduct", + "bbox": [ + 169, + 513, + 496, + 611 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4: Performance of existing IRM methods in large and small-batch settings. GRAYSCALE refers to ERM on uncolored data, which yields the best prediction (supposing no spurious correlation during training). The IRM performance is evaluated by average accuracy ('Avg Acc') and accuracy gap ('Acc Gap'), in the format mean±std. A higher Avg Acc and lower Acc Gap is preferred. The theoretically optimal performance is $75\\%$ (Arjovsky et al., 2019).", + "bbox": [ + 506, + 316, + 823, + 430 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/58169d988f131bfa217cda42fc0864dfea1197a1e261577e0667465497329e57.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset Metrics(%)COLORED-MNISTCOLORED-FMNIST
Avg Acc (↑)Acc Gap (↓)Avg Acc (↑)Acc Gap (↓)
Large BatchGRYSCALE73.39±0.160.32±0.0374.05±0.090.13±0.04
ERM49.19±1.8990.72±2.0849.77±1.7188.62±2.49
IRMv167.13±0.333.43±0.1467.19±0.223.35±0.11
IRMv065.39±0.344.69±0.1866.44±0.283.53±0.13
IRM-GAME65.69±0.428.75±0.1465.91±0.293.74±0.09
REX67.42±0.293.76±0.0767.82±0.313.26±0.16
BIRM67.93±0.313.81±0.1167.75±0.263.81±0.11
SPARSEIRM67.72±0.283.65±0.0867.89±0.303.12±0.15
FISHR67.49±0.394.37±0.1067.33±0.244.49±0.16
Small BatchIRMv168.33±0.312.04±0.0568.76±0.311.45±0.09
IRMv068.37±0.281.32±0.0969.07±0.271.36±0.06
IRM-GAME67.73±0.241.67±0.1467.49±0.321.82±0.13
REX68.42±0.291.65±0.0768.66±0.221.29±0.08
BIRM68.71±0.211.35±0.0968.64±0.321.44±0.13
SPARSEIRM68.81±0.251.72±0.0568.29±0.221.28±0.15
FISHR68.69±0.192.13±0.0868.79±0.171.77±0.10
", + "bbox": [ + 509, + 431, + 823, + 595 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "a thorough comparison between the originally-used full-batch IRM methods and their small-batch counterparts. In addition, we present the performance of ERM and ERM-grayscale (we call it 'grayscale'), where the latter is ERM on uncolored data. In the absence of any spurious correlation in the training set, grayscale gives the best performance. As discussed in Section 4 & 6.1, the IRM performance is measured by the average accuracy and the accuracy gap across 19 testing environments, parameterized by the environment bias parameter $\\beta \\in \\{0.05,\\dots ,0.95\\}$ . We make some key observations from Table 4. First, small batch size helps improve all the existing IRM methods consistently, evidenced by the $1\\% \\sim 3\\%$ improvement in average accuracy. Second, the small-batch IRM training significantly reduces the variance of invariant predictions across different testing environments, evidenced by the decreased accuracy gap. This implies that the small-batch IRM training can also help resolve the limitation of multi-environment evaluation for the existing IRM methods, like the sensitivity of IRM-GAME accuracy to $\\beta$ in Figure 2. Third, we observe that IRMv0, which does not seem to be useful in the large batch setting, becomes quite competitive with the other baselines in the small-batch setting. Thus, large-batch could suppress the IRM performance for some methods. In the rest of the experiments, we stick to the small-batch implementation of IRM training.", + "bbox": [ + 169, + 611, + 826, + 834 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "BLOC-IRM outperforms IRM baselines in various datasets. Next, Table 5 demonstrates the effectiveness of our proposed BLOC-IRM approach versus ERM and existing IRM baselines across all the 7 datasets listed in Table 2. Evidently, BLOC-IRM yields a higher average accuracy compared to all the baselines, together with the smallest accuracy gap in most cases. Additionally, we observe that CELEBA, PACS and VLCS are much more challenging datasets for capturing invariance through IRM, as evidenced by the small performance gap between ERM and IRM methods. In", + "bbox": [ + 169, + 839, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/8fbf17543825cff1662e9c6b65a367fc4f314bc4a161e7c1a84c438f6107d9ee.jpg", + "table_caption": [ + "Table 5: IRM performance comparison between BLOC-IRM and other baselines. We use ResNet-18 (He et al., 2016) for all the datasets. The evaluation setup is consistent with Table 4, and the best performance per-dataset is highlighted in bold. We present the results with the full dataset list in Table A1." + ], + "table_footnote": [], + "table_body": "
Algorithm Metrics (%)COLORED-OBJECTCIFAR-MNISTCELEBAVLCSPACS
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
ERM41.11±1.4486.43±2.8940.39±1.3285.53±2.3372.38±0.2910.73±0.3663.23±0.2312.39±0.3569.95±0.3514.32±0.75
IRMv164.42±0.214.18±0.2961.49±0.297.17±0.3372.49±0.3810.15±0.2762.72±0.2912.74±0.2768.93±0.3314.99±0.51
IRMv062.39±0.255.36±0.3160.14±0.188.83±0.3972.42±0.3510.43±0.3862.59±0.3212.99±0.3668.72±0.2915.29±0.71
IRM-GAME62.88±0.345.59±0.2860.44±0.316.72±0.4172.18±0.4412.32±0.4162.31±0.3813.37±0.6268.12±0.2215.77±0.66
REX63.37±0.355.42±0.3162.32±0.245.55±0.3272.34±0.2610.31±0.2363.19±0.3112.87±0.3169.43±0.3415.31±0.67
BIRM65.11±0.273.31±0.2262.99±0.355.23±0.3672.93±0.289.92±0.3363.33±0.4012.13±0.2369.34±0.2515.76±0.49
SPARSEIRM64.97±0.393.97±0.2562.16±0.294.14±0.3172.42±0.339.79±0.2162.86±0.2612.79±0.3569.52±0.3915.81±0.82
FISHR64.07±0.234.41±0.2961.79±0.255.55±0.2172.89±0.259.42±0.3263.44±0.3711.93±0.4270.21±0.2214.52±0.43
BLOC-IRM65.97±0.334.10±0.3663.69±0.324.89±0.3673.35±0.328.79±0.2163.62±0.3511.55±0.3270.31±0.2114.73±0.65
", + "bbox": [ + 173, + 152, + 823, + 263 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "particular, all the IRM methods, except FISHR and BLOC-IRM, could even be worse than ERM on PACS and VLCS. Here, we echo and extend the findings of Krueger et al. (2021, Section 4.3). However, we also show that BLOC-IRM is a quite competitive IRM variant when applied to realistic domain generalization datasets. We also highlight that the CELEBA experiment is newly constructed and performed in our work for invariance evaluation. Like PACS and VLCS, this experiment also shows that ERM is a strong baseline, and among IRM-based methods, BLOC-IRM is the best-performing, both in terms of accuracy and variance of invariant predictions.", + "bbox": [ + 169, + 272, + 826, + 371 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "IRM against model size and training environment variation. Furthermore, we investigate the effect of model size and training environment diversity on the IRM performance. The recent works (Lin et al., 2022; Zhou et al., 2022b) have empirically shown that IRMv1 may suffer a significant performance loss when trained over large-sized neural network models, and thus developed BIRM and SPARSEIRM approaches as advancements of IRMv1. Inspired by these works, Figure 4 presents the sensitivity of invariant prediction to model size for different IRM methods on COLORED-MNIST. Here the model size is controlled by the dimension of the intermediate layer (denoted by $d$ ) in MLP, and the default dimension is $d = 390$ (i.e., the vertical dotted line in Figure 4), which was used in (Arjovsky et al., 2019) and followed in the subsequent literature. As we can see, when $d > 390$ , nearly all the studied IRM methods (including BLOC-IRM) suffer a performance drop. Yet, as $d \\geq 800$ , from the perspective of prediction accuracy and model resilience together, the top-3 best IRM methods with model size resilience are BIRM, SPARSEIRM, and BLOC-IRM, although we did not intentionally design BLOC-IRM to resist performance degradation against model", + "bbox": [ + 169, + 376, + 602, + 656 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3d11bbd572a5a8ea881ce0606f8b523c5dcb5609512320a723945579f90a497e.jpg", + "image_caption": [ + "Figure 4: IRM performance on COLORED-MNIST against the layer dimension in MLP. The dotted line represents the default dimension $(d = 390)$ used in the literature. The invariant prediction accuracy is presented via the dot line (mean). The results are based on 10 independent trials and we report the variance in Figure A4." + ], + "image_footnote": [], + "bbox": [ + 617, + 377, + 815, + 503 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We also show more experiment results in the Appendix. In Table A2, we study IRM with different numbers of training environment configurations and observe the consistent improvement of BLOC-IRM over other baselines. In Table A4 we show that the performance of invariant prediction degrades, if additional covariate shifts (class, digit, and color imbalances on COLORED-MNIST) are imposed on the training environments following Krueger et al. (2021, Section 4.1) and also demonstrate that BLOC-IRM maintains the accuracy improvement over baselines with each variation. In Table A5, we compare the performance of different methods in the failure cases of IRM pointed out by (Kamath et al., 2021) and show the consistent improvement brought by BLOC-IRM.", + "bbox": [ + 169, + 660, + 826, + 773 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 789, + 320, + 804 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we investigate existing IRM methods and reveal long-standing but chronically overlooked challenges involving IRM training and evaluation, which may lead to sub-optimal solutions and incomplete invariance assessment. As a remedy, we propose small-batch training and multi-environment evaluation. We reexamine the IRM-GAME method through the lens of consensus-constrained BLO, and develop a novel IRM variant, termed BLOC-IRM. We conducted extensive experiments on 7 datasets and demonstrate that BLOC-IRM consistently improves all baselines.", + "bbox": [ + 169, + 820, + 823, + 905 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 174, + 103, + 356, + 116 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The work of Y. Zhang and S. Liu was partially supported by National Science Foundation (NSF) Grant IIS-2207052. The work of M. Hong was supported by NSF grants CNS-2003033 and CIF-1910385. The computing resources used in this work were partially supported by the MIT-IBM Watson AI Lab and the Institute for Cyber-Enabled Research (ICER) at Michigan State University.", + "bbox": [ + 174, + 133, + 823, + 189 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REPRODUCIBILITY STATEMENT", + "text_level": 1, + "bbox": [ + 174, + 210, + 436, + 224 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The authors have made an extensive effort to ensure the reproducibility of algorithms and results presented in the paper. First, the details of the experiment settings have been elaborated in Section 6.1 and Appendix C.1. In this paper, seven datasets are studied and the environment generation process for each dataset is described with details in Appendix A. The evaluation metrics are also clearly introduced in Section 3. Second, eight IRM-oriented methods (including our proposed BLOC-IRM) are studied in this work. The implementation details of all the baseline methods are clearly presented in Appendix C.2, including the hyper-parameters tuning, model configuration, and used code bases. For our proposed BLOC-IRM, we include all the implementation details in Section 5 and Appendix B.2, including training pipeline in Figure 3 and the pseudo-code in Algorithm A1. Third, all the results are based on 10 independent trials with different random seeds. The standard deviations are also reported to ensure fair comparisons across different methods. Fourth, codes are available at https://github.com/OPTML-Group/BLOC-IRM.", + "bbox": [ + 174, + 241, + 823, + 407 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 174, + 30, + 478, + 44 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 491, + 946, + 506, + 958 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 174, + 102, + 287, + 117 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Faruk Ahmed, Yoshua Bengio, Harm van Seijen, and Aaron Courville. Systematic generalisation with group invariant predictions. In International Conference on Learning Representations, 2020.", + "Kartik Ahuja, Karthikeyan Shanmugam, Kush Varshney, and Amit Dhurandhar. Invariant risk minimization games. In International Conference on Machine Learning, pp. 145-155. PMLR, 2020.", + "Kartik Ahuja, Jun Wang, Amit Dhurandhar, Karthikeyan Shanmugam, and Kush R Varshney. Empirical or invariant risk minimization? a sample complexity perspective. In International Conference on Learning Representations, 2021.", + "Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019.", + "Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using meta-regularization. Advances in neural information processing systems, 31, 2018.", + "Sara Beery, Grant Van Horn, and Pietro Perona. Recognition in terra incognita. In Proceedings of the European conference on computer vision (ECCV), pp. 456-473, 2018.", + "Francesco Cappio Borlino, Antonio D'Innocente, and Tatiana Tommasi. Rethinking domain generalization baselines. In 2020 25th International Conference on Pattern Recognition (ICPR), pp. 9227-9233. IEEE, 2021.", + "Fabio M Carlucci, Antonio D'Innocente, Silvia Bucci, Barbara Caputo, and Tatiana Tommasi. Domain generalization by solving jigsaw puzzles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2229-2238, 2019.", + "Shiyu Chang, Yang Zhang, Mo Yu, and Tommi Jaakkola. Invariant rationalization. In International Conference on Machine Learning, pp. 1448-1458. PMLR, 2020.", + "Yongqiang Chen, Kaiwen Zhou, Yatao Bian, Binghui Xie, Kaili Ma, Yonggang Zhang, Han Yang, Bo Han, and James Cheng. Pareto invariant risk minimization. arXiv preprint arXiv:2206.07766, 2022.", + "Yo Joong Choe, Jiyeon Ham, and Kyubyong Park. An empirical study of invariant risk minimization. arXiv preprint arXiv:2004.05007, 2020.", + "Pim De Haan, Dinesh Jayaraman, and Sergey Levine. Causal confusion in imitation learning. Advances in Neural Information Processing Systems, 32, 2019.", + "Alex J DeGrave, Joseph D Janizek, and Su-In Lee. Ai for radiographic Covid-19 detection selects shortcuts over signal. Nature Machine Intelligence, 3(7):610-619, 2021.", + "Qi Dou, Daniel Coelho de Castro, Konstantinos Kamnitsas, and Ben Glocker. Domain generalization via model-agnostic learning of semantic features. Advances in Neural Information Processing Systems, 32, 2019.", + "Yana Dranker, He He, and Yonatan Belinkov. Irm—when it works and when it doesn't: A test case of natural language inference. Advances in Neural Information Processing Systems, 34:18212-18224, 2021.", + "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. arXiv preprint arXiv:2010.01412, 2020.", + "Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Lavoille, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. The journal of machine learning research, 17(1):2096-2030, 2016.", + "Robert Geirhos, Jorn-Henrik Jacobsen, Claudio Michaelis, Richard Zemel, Wieland Brendel, Matthias Bethge, and Felix A Wichmann. Shortcut learning in deep neural networks. Nature Machine Intelligence, 2(11): 665-673, 2020.", + "Stephen Gould, Basura Fernando, Anoop Cherian, Peter Anderson, Rodrigo Santa Cruz, and Edison Guo. On differentiating parameterized argmin and argmax problems with application to bi-level optimization. arXiv preprint arXiv:1607.05447, 2016.", + "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017." + ], + "bbox": [ + 171, + 125, + 825, + 922 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ishaan Gulrajani and David Lopez-Paz. In search of lost domain generalization. arXiv preprint arXiv:2007.01434, 2020.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.", + "Neal Jean, Marshall Burke, Michael Xie, W Matthew Davis, David B Lobell, and Stefano Ermon. Combining satellite imagery and machine learning to predict poverty. Science, 353(6301):790-794, 2016.", + "Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Domain extrapolation via regret minimization. arXiv preprint arXiv:2006.03908, 2020.", + "Pritish Kamath, Akilesh Tangella, Danica Sutherland, and Nathan Srebro. Does invariant risk minimization capture invariance? In International Conference on Artificial Intelligence and Statistics, pp. 4069-4077. PMLR, 2021.", + "Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv preprint arXiv:1609.04836, 2016.", + "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.", + "Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsbramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Irena Gao, et al. Wilds: A benchmark of in-the-wild distribution shifts. In International Conference on Machine Learning, pp. 5637-5664. PMLR, 2021.", + "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60(6):84-90, 2017.", + "David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In International Conference on Machine Learning, pp. 5815-5826. PMLR, 2021.", + "Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy M Hospedales. Deeper, broader and artier domain generalization. In Proceedings of the IEEE international conference on computer vision, pp. 5542-5550, 2017.", + "Da Li, Jianshu Zhang, Yongxin Yang, Cong Liu, Yi-Zhe Song, and Timothy M Hospedales. Episodic training for domain generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1446-1455, 2019.", + "Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. Advances in neural information processing systems, 31, 2018.", + "Yong Lin, Qing Lian, and Tong Zhang. An empirical study of invariant risk minimization on deep models. In ICML 2021 Workshop on Uncertainty and Robustness in Deep Learning, pp. 7, 2021.", + "Yong Lin, Hanze Dong, Hao Wang, and Tong Zhang. Bayesian invariant risk minimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16021-16030, 2022.", + "Risheng Liu, Jiaxin Gao, Jin Zhang, Deyu Meng, and Zhouchen Lin. Investigating bi-level optimization for learning and vision from a unified perspective: A survey and beyond. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021.", + "Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), December 2015.", + "Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International conference on machine learning, pp. 97-105. PMLR, 2015.", + "Hyeonseob Nam, HyunJae Lee, Jongchan Park, Wonjun Yoon, and Donggeun Yoo. Reducing domain gap by reducing style bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8690-8699, 2021.", + "Jonas Peters, Peter Buhlmann, and Nicolai Meinshausen. Causal inference by using invariant prediction: identification and confidence intervals. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(5):947-1012, 2016." + ], + "bbox": [ + 171, + 103, + 825, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alexandre Rame, Coretin Dancette, and Matthieu Cord. Fishr: Invariant gradient variances for out-of-distribution generalization. In International Conference on Machine Learning, pp. 18347-18377. PMLR, 2022.", + "Elan Rosenfeld, Pradeep Ravikumar, and Andrej Risteski. The risks of invariant risk minimization. arXiv preprint arXiv:2010.05761, 2020.", + "Shiori Sagawa, Pang Wei Koh, Tatsunori B Hashimoto, and Percy Liang. Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization. arXiv preprint arXiv:1911.08731, 2019.", + "Shiori Sagawa, Aditi Raghunathan, Pang Wei Koh, and Percy Liang. An investigation of why overparameterization exacerbates spurious correlations. In International Conference on Machine Learning, pp. 8346-8356. PMLR, 2020.", + "Harshay Shah, Kaustav Tamuly, Aditi Raghunathan, Prateek Jain, and Praneeth Netrapalli. The pitfalls of simplicity bias in neural networks. Advances in Neural Information Processing Systems, 33:9573-9585, 2020.", + "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014.", + "Yi Sun, Xiaogang Wang, and Xiaou Tang. Deep learning face representation from predicting 10,000 classes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1891-1898, 2014.", + "Antonio Torralba and Alexei A Efros. Unbiased look at dataset bias. In CVPR 2011, pp. 1521-1528. IEEE, 2011.", + "Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014.", + "Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip Yu. Generalizing to unseen domains: A survey on domain generalization. IEEE Transactions on Knowledge and Data Engineering, 2022.", + "Chuanlong Xie, Haotian Ye, Fei Chen, Yue Liu, Rui Sun, and Zhenguo Li. Risk variance penalization. arXiv preprint arXiv:2006.07544, 2020.", + "Renzhe Xu, Xingxuan Zhang, Peng Cui, Bo Li, Zheyan Shen, and Jiazheng Xu. Regulatory instruments for fair personalized pricing. In Proceedings of the ACM Web Conference 2022, pp. 4-15, 2022.", + "Yilun Xu and Tommi Jaakkola. Learning representations that support robust transfer of predictors. arXiv preprint arXiv:2110.09940, 2021.", + "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017a.", + "Yang You, Igor Gitman, and Boris Ginsburg. Scaling SGD batch size to 32k for imagenet training. arXiv preprint arXiv:1708.03888, 6, 2017b.", + "Yang You, Zhao Zhang, Cho-Jui Hsieh, James Demmel, and Kurt Keutzer. Imagenet training in minutes. In Proceedings of the 47th International Conference on Parallel Processing, pp. 1. ACM, 2018.", + "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes. arXiv preprint arXiv:1904.00962, 2019.", + "Dinghuai Zhang, Kartik Ahuja, Yilun Xu, Yisen Wang, and Aaron Courville. Can subnetwork structure be the key to out-of-distribution generalization? In International Conference on Machine Learning, pp. 12356-12367. PMLR, 2021.", + "Gaoyuan Zhang, Songtao Lu, Yihua Zhang, Xiangyi Chen, Pin-Yu Chen, Quanfu Fan, Lee Martie, Lior Horesh, Mingyi Hong, and Sijia Liu. Distributed adversarial training to robustify deep neural networks at scale. In Uncertainty in Artificial Intelligence, pp. 2353-2363. PMLR, 2022a.", + "Xingxuan Zhang, Linjun Zhou, Renzhe Xu, Peng Cui, Zheyan Shen, and Haoxin Liu. Nico++: Towards better benchmarking for domain generalization. arXiv preprint arXiv:2204.08040, 2022b.", + "Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022a.", + "Xiao Zhou, Yong Lin, Weizhong Zhang, and Tong Zhang. Sparse invariant risk minimization. In International Conference on Machine Learning, pp. 27222-27244. PMLR, 2022b." + ], + "bbox": [ + 171, + 103, + 826, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "APPENDIX", + "text_level": 1, + "bbox": [ + 444, + 101, + 553, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A DATASET SELECTION", + "text_level": 1, + "bbox": [ + 171, + 138, + 388, + 152 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Compared to existing work, we expand the dataset types for evaluating the performance of different IRM methods (see Table 2). In addition to the most commonly-used benchmark datasets COLORED-MNIST (Arjovsky et al., 2019) and COLORED-FMNIST (Ahuja et al., 2020), we also consider the datasets CIFAR-MNIST (Lin et al., 2021; Shah et al., 2020) and COLORED-OBJECT (Ahmed et al., 2020; Zhang et al., 2021), which impose artificial spurious correlations, MNIST digit number and object color, into the original CIFAR-10 and COCO Detection datasets, respectively. Furthermore, we consider other three real-world datasets CELEBA (Liu et al., 2015), PACS (Li et al., 2017) and VLCS (Torralba & Efros, 2011), without imposing artificial spurious correlations. Notably, CELEBA was first formalized and introduced to benchmark IRM performance. The recent work (Gulrajani & Lopez-Paz, 2020) showed that when carefully implemented, ERM could outperform IRMv1 in PACS and VLCS. Thus, we regard them as challenging datasets to capture invariance.", + "bbox": [ + 169, + 167, + 826, + 321 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For COLORED-OBJECT dataset, we strictly follow the setting adopted in (Lin et al., 2022) to generate the spurious features. For CIFAR-MNIST we use the class \"bird\" and \"plane\" in the dataset CIFAR as the invariant feature, while the digit \"0\" and \"1\" in MNIST as the spurious correlation.", + "bbox": [ + 169, + 328, + 823, + 371 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "CELEBA dataset is, for the first time, introduced to measure IRM performance. We select the attribute \"Smiling\" as the invariant label and use the attribute \"Hair Color\" (blond and black hair) to create a spurious correlation in each environment.", + "bbox": [ + 169, + 377, + 826, + 420 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 439, + 439, + 455 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.1 DETAILS ON LARGE-BATCH OPTIMIZATION ENHANCEMENTS", + "text_level": 1, + "bbox": [ + 171, + 469, + 640, + 484 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\spadesuit$ IRM-LSGD: We first integrate large-batch SGD (LSGD) with IRM. Following (Goyal et al., 2017), we make two main modifications: (1) scaling up learning rate linearly with batch size, and (2) prepending a warm-up optimization phase to IRM training. We call the LSGD-baked IRM variant IRM-LSGD.", + "$\\spadesuit$ IRM-LALR: Next, we adopt layerwise adaptive learning rate (LALR) in IRM training. Following (You et al., 2019), we advance the learning rate scheduler by assigning each layer of a neural network-based prediction model with an adaptive learning rate (i.e., proportional to the norm of updated model weights per layer). More specifically, the model parameter update rule becomes:" + ], + "bbox": [ + 169, + 494, + 826, + 614 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {t + 1, i} = \\boldsymbol {\\theta} _ {t, i} - \\frac {\\tau \\left(\\left\\| \\boldsymbol {\\theta} _ {t , i} \\right\\| _ {2} ^ {2}\\right) \\cdot \\eta_ {t}}{\\left\\| \\mathbf {u} _ {t , i} \\right\\| _ {2} ^ {2}} \\mathbf {u} _ {t, i}, \\tag {A1}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 617, + 823, + 651 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\pmb{\\theta}_{t,i}$ denotes the $i$ -th layer of the model parameters at iteration $t$ , and $\\mathbf{u}_{t,i}$ represents the first-order gradient of the corresponding layer-wise model parameters. We use $\\tau(\\|\\pmb{\\theta}_{t,i}\\|_2^2 = \\min\\{\\max\\{\\|\\pmb{\\theta}_{t,i}\\|_2^2, c_l\\}, c_u\\})$ as the scaling factor of the adaptive learning rate $\\frac{\\eta_t}{\\|\\mathbf{u}_{t,i}\\|}$ . We use $c_l = 0$ and $c_u = 1$ in our experiments.", + "bbox": [ + 169, + 654, + 826, + 715 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$\\spadesuit$ IRM-SAM: Lastly, we leverage sharpness-aware minimization (SAM) to simultaneously minimize the IRM loss and the loss sharpness. The latter is achieved by explicitly penalizing the worst-case training loss of model weights when facing small weight perturbations. This yields a wide minimum within a flat loss landscape. More specifically, the sharpness-aware loss can be formulated as:", + "bbox": [ + 169, + 722, + 823, + 790 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\boldsymbol {\\theta}} \\ell^ {\\mathrm {S A M}} (\\boldsymbol {\\theta}), \\quad \\text {w h e r e} \\quad \\ell^ {\\mathrm {S A M}} (\\boldsymbol {\\theta}) = \\max _ {\\| \\epsilon \\| _ {2} ^ {2} \\leq \\rho} \\ell (\\boldsymbol {\\theta} + \\boldsymbol {\\epsilon}), \\tag {A2}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 792, + 823, + 820 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where the parameter perturbation $\\epsilon$ is subject to the perturbation constraint $\\| \\epsilon \\| _2^2\\leq \\rho$ . When applied to IRM, we replace the per-environment training loss with the SAM loss, and adopt the $\\rho = 0.001$ .", + "bbox": [ + 169, + 824, + 823, + 854 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.2 BLOC-IRM IMPLEMENTATION", + "text_level": 1, + "bbox": [ + 171, + 869, + 434, + 883 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As described in Section 5, the BLOC-IRM algorithm solves the IRM problem with two optimization levels. We use 1-step gradient descent to get the lower-level solution. We retain the gradient", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "graph in PyTorch to enable auto differentiation. We assign each of the classification head $\\{\\mathbf{w}^{(e)}\\}$ a separate optimizer and use the same learning rate as the feature extractor $\\theta$ . For COLORED-MNIST and COLORED-FMNIST, we adopt a learning rate of $2 \\times 10^{-3}$ and use the Adam (Kingma & Ba, 2014) optimizer. As for other datasets, we use the multi-step learning rate scheduler with an initial learning rate of 0.1, which is consistent with other baselines. We adopt the same penalty weight of $10^{6}$ as IRMv1 and IRMv0.", + "bbox": [ + 169, + 102, + 826, + 188 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Algorithm A1 BLOC-IRM", + "text_level": 1, + "bbox": [ + 173, + 204, + 362, + 219 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1: Initialization: Training data $\\{\\mathbf{x}^{(e)}\\}$ from $N$ environments, Model feature extractor $\\theta_0$ , and $N$ model classification heads $\\{\\mathbf{w}_0^{(e)}\\}$ , learning rate $\\{\\eta_t\\}$ series, penalty weight $\\{\\gamma_t\\}$ series.", + "2: for Step $t = 0,1,\\ldots ,\\mathbf{d}\\mathbf{o}$", + "3: Lower-level: update classification head for each environment:" + ], + "bbox": [ + 179, + 223, + 823, + 284 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\quad \\tilde {\\mathbf {w}} _ {t + 1} ^ {(e)} = \\mathbf {w} _ {t} ^ {(e)} - \\eta_ {t} \\left. \\frac {d \\ell^ {(e)} (\\mathbf {w} \\odot \\boldsymbol {\\theta})}{d \\mathbf {w}} \\right| _ {\\boldsymbol {\\theta} = \\boldsymbol {\\theta} _ {t}, \\mathbf {w} = \\mathbf {w} _ {t} ^ {(e)}} \\tag {A3}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 291, + 825, + 329 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "4: Consensus projection: $\\forall e\\in \\mathcal{E}_{\\mathrm{tr}},\\mathbf{w}_{t + 1}^{(e)} = \\mathbf{w}_{t + 1}^{*} = \\frac{1}{N}\\sum_{e\\in \\mathcal{E}_{\\mathrm{tr}}}\\tilde{\\mathbf{w}}_{t + 1}^{(e)}$", + "5: Upper-level: update feature extractor with stationary penalty:" + ], + "bbox": [ + 179, + 337, + 671, + 369 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {t + 1} = \\boldsymbol {\\theta} _ {t} - \\eta_ {t} \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\frac {d}{d \\boldsymbol {\\theta}} \\left(\\ell^ {(e)} (\\mathbf {w} \\odot \\boldsymbol {\\theta}) + \\gamma_ {t} \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2}\\right) \\Big | _ {\\boldsymbol {\\theta} = \\boldsymbol {\\theta} _ {t}, \\mathbf {w} = \\mathbf {w} _ {t + 1} ^ {*}} \\tag {A4}\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 387, + 825, + 424 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6: end for", + "bbox": [ + 179, + 431, + 256, + 446 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C EXPERIMENTATION", + "text_level": 1, + "bbox": [ + 171, + 479, + 372, + 494 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.1 ENVIRONMENT SETUP", + "text_level": 1, + "bbox": [ + 171, + 512, + 375, + 526 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As proposed in Section 4, we use the multi-environment evaluation metric unless specified otherwise. To capture both the accuracy and variance of invariant predictions across multiple testing environments, the average accuracy and the accuracy gap (the difference between the best-case and worst-case accuracy) are evaluated for IRM methods.", + "bbox": [ + 169, + 540, + 823, + 595 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Specifically, for the COLORED-MNIST, COLORED-FMNIST, COLORED-OBJECT, CIFAR-MNIST, and CELEBA dataset, we manually create 19 test environments with uniformly sampled bias parameter $\\beta \\in \\{0.05, 0.1, \\dots, 0.95\\}$ , where the environment bias parameter $\\beta$ controls the spurious correlation (see Section 4 for more details).", + "bbox": [ + 169, + 602, + 823, + 659 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For VLCS and PACS datasets, the training and test sets have 4 environments, namely {art painting, cartoon, sketch, photo} and {CALTECH, LABELME, PASCAL, SUN} respectively. We use the first three environments as the training environments, while we use the test set of all four environments to form our proposed multi-environment invariance evaluation system.", + "bbox": [ + 169, + 665, + 823, + 723 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.2 BASELINES", + "text_level": 1, + "bbox": [ + 171, + 742, + 299, + 756 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For each baseline method, we follow its official PyTorch repository except IRM-GAME and SPARSEIRM. We translate the TensorFlow-based original code base of IRM-GAME to PyTorch. As one of the latest IRM advancements, the official code of SPARSEIRM is not yet publicly available. Therefore, we reproduce SPARSEIRM in PyTorch.", + "bbox": [ + 169, + 768, + 823, + 827 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In particular, for COLORED-MNIST and COLORED-FMNIST, we stick to the original hyperparameters for the large-batch setting and tune the hyper-parameters of each method, including the penalty weight, number of warm-up epochs, and learning rate for the small batch setting.", + "bbox": [ + 169, + 832, + 823, + 876 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In particular, for the large-batch setting, we use the penalty weight of $10^{6}$ , 190 warm-up epochs, and 500 epochs in total, as suggested by the original IRMv1 and inherited by its variants. For the small-batch setting, we adopt the same penalty weight $10^{6}$ . Further, we found that the warm-up", + "bbox": [ + 169, + 881, + 825, + 926 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 960 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "phase could be shortened without sacrificing accuracy. Therefore, we use 50 warm-up epochs and total 200 epochs for all the methods.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For other datasets, we adopt the batch size of 128 and use ResNet-18 as the default model architecture. We train for 200 epochs. We adopt the step-wise learning rate scheduler with an initial learning rate of 0.1. The learning rate decays by 0.1 at the 100th and 150th epochs.", + "bbox": [ + 169, + 138, + 825, + 181 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.3 ADDITIONAL EXPERIMENT RESULTS", + "text_level": 1, + "bbox": [ + 171, + 198, + 473, + 212 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The influence of batch size with all the baselines. We show in Figure A1 the influence of training batch size on the performance of different methods. We observe in Figure A1, as in Figure 1, that full batch setting does not achieve the best performance, and the use of mini-batch (stochastic gradient descent) indeed improves performance.", + "bbox": [ + 169, + 224, + 823, + 280 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/039def54e2becbaec79d1d03a53933990c3fed2cbd66dda2235a21b0bc50bf3a.jpg", + "image_caption": [ + "Figure A1: The performance of all the baselines in this work trained with different batch sizes on COLORED-MNIST dataset. The full data batch-size is 50k. The invariant accuracy corresponds to the average accuracy evaluated based on the diversified environments-based evaluation metric." + ], + "image_footnote": [], + "bbox": [ + 383, + 292, + 609, + 443 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/ef8217039f5f005b3bd69e9a9794f7e3b7c1686ce84d77fdda027d9d0567a5f7.jpg", + "image_caption": [ + "Figure A2: The loss landscapes of invariant prediction models acquired by (A) large-batch IRMv1 training with 50k batch size and (B) small-batch training with 1k batch size. The 2D loss landscape visualization is realized using the tool in (Li et al., 2018). The $x$ and $y$ axes represent the linear interpolation coefficients over two directional vectors originated from the converged local optima. Here the numbers on the contour denote the loss values over test data." + ], + "image_footnote": [], + "bbox": [ + 256, + 526, + 486, + 678 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/de052964e98aadf5f59cf377fff58ac0c1a4646431936e9149896a2f2230471d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 526, + 740, + 678 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Loss landscapes of IRMv1 with different batch sizes. We plot the loss landscapes of the models trained with IRMv1 on COLORED-MNIST using large (full) and small batch in Figure A2. Using small batch training, IRMv1 (Fig. A2B) converges to a smooth neighborhood of a local optima. This also corresponds to a flatter loss landscape than the landscape of the large-batch training (Figure A2(A)). The loss landscapes demonstrate consistent results as other experiments discussed in Section 3.", + "bbox": [ + 169, + 768, + 823, + 852 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Training trajectory with BLOC-IRM with and without stationary loss. In Figure A3, we plot the per-environment training trajectory of stationary loss when solving (1) and (BLOC-IRM) on COLORED-MNIST. For (BLOC-IRM) we use the regularization term $\\lambda = 10^6$ , which is aligned with the penalty coefficient used in IRMv1. As we can see, without the stationarity regularization,", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "the stationary loss remains at a high level for both environments (the dotted curves). Notably, the lower-level stationary can be reached fast with the stationarity penalty, as shown in the solid curves.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/337a64114140457585dd9b2b577dd0fd6b39b0f199f7682f05dfdd4e20a40c7a.jpg", + "image_caption": [ + "Figure A3: The per-environment training trajectory for the stationarity loss of (1) and (BLOC-IRM) on COLORED-MNIST. The training setting is the same as Figure 2. The algorithmic details can be found in Appendix B." + ], + "image_footnote": [], + "bbox": [ + 369, + 147, + 622, + 311 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Performance of all the methods with full dataset list. We show in Table A1 the results of all the methods on the seven datasets we studied. To be more specific, in Table A1, we append the results of COLORED-MNIST and COLORED-FMNIST into Table 5 as a whole. As we can see, our methods outperforms other baselines in all the datasets in terms of average accuracy, and stands top in most cases in terms of the accuracy gap.", + "bbox": [ + 169, + 386, + 823, + 458 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/a4f83e9566b91effe521dcae282a9281257532dd65009a8d4bb0f5d3a2faf42f.jpg", + "table_caption": [ + "Table A1: IRM performance comparison between our proposed BLOC-IRM method and other baselines under the full list of datasets. We use MLP for COLORED-MNIST and COLORED-FMNIST, and ResNet-18 (He et al., 2016) for the rest datasets. The evaluation setup is consistent with Table 4, and the best performance per-evaluation metric and per-dataset is highlighted in bold." + ], + "table_footnote": [], + "table_body": "
Algorithm Metrics (%)COLORED-MNISTCOLORED-FMNISTCOLORED-OBJECTCIFAR-MNISTCELEBAVLCSPACS
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
ERM49.19±1.8990.72±2.0849.77±1.7188.62±2.4941.11±1.4486.43±2.8940.39±1.3285.53±2.3372.38±0.2910.73±0.3663.23±0.2312.39±0.3569.95±0.3514.32±0.75
IRVM168.33±0.312.04±0.0568.76±0.311.45±0.0964.42±0.214.18±0.2961.49±0.297.17±0.3372.49±0.3810.15±0.2762.72±0.2912.74±0.2768.93±0.3314.99±0.51
IRVM068.37±0.281.32±0.0969.07±0.271.36±0.0662.39±0.255.36±0.3160.14±0.188.83±0.3972.42±0.3510.43±0.3862.59±0.3212.99±0.3668.72±0.2915.29±0.71
IRGMAGE67.73±0.241.67±0.1467.49±0.321.82±0.1362.88±0.345.59±0.2860.44±0.316.72±0.4172.18±0.4412.32±0.4162.31±0.3813.37±0.6268.12±0.2215.77±0.66
REX68.42±0.291.65±0.0768.66±0.221.29±0.0863.37±0.355.42±0.3162.32±0.245.55±0.3272.34±0.2610.31±0.2363.19±0.3112.87±0.3169.43±0.3415.31±0.67
BIRM68.71±0.211.35±0.0968.64±0.321.44±0.1365.11±0.273.31±0.2262.99±0.355.23±0.3672.93±0.289.92±0.3363.33±0.4012.13±0.2369.34±0.2515.76±0.49
SPARSEIRM68.81±0.251.72±0.0568.29±0.221.28±0.1564.97±0.393.97±0.2562.16±0.294.14±0.3172.42±0.339.79±0.2162.86±0.2612.79±0.3569.52±0.3915.81±0.82
FISHR68.69±0.192.13±0.0868.79±0.171.77±0.1064.07±0.234.41±0.2961.79±0.255.55±0.2172.89±0.259.42±0.3263.44±0.3711.93±0.4270.21±0.2214.52±0.43
BLOC-IRM69.47±0.241.04±0.0769.43±0.211.14±0.1165.97±0.334.10±0.3663.69±0.324.89±0.3673.35±0.328.79±0.2163.62±0.3511.55±0.3270.31±0.2114.73±0.65
", + "bbox": [ + 173, + 532, + 823, + 614 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Experiment on different model sizes. We show in Figure A4 the influence of the increasing model size on the performance of different baselines considered in this work. Compared to Figure 4, we report additional standard deviation of the 10 independent trials in Figure A4.", + "bbox": [ + 169, + 637, + 823, + 681 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/f3c2dd9be52dd33649836e9caba9cd6238b137e020dcb52b708b55ea9a23b2e1.jpg", + "image_caption": [ + "Figure A4: IRM performance on COLORED-MNIST against the dimension of the intermediate layer in MLP. The dotted line represents the default dimension ( $d = 390$ ) used in the literature. The invariant prediction accuracy is presented via the dot line (mean) and shaded area (standard deviation) over 10 random trials." + ], + "image_footnote": [], + "bbox": [ + 370, + 694, + 620, + 859 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Experiment with different training environments. In Table A2, we show the performance of all the methods in more complex training environments, such as more training environments and more skewed environment bias parameter $\\beta$ . As we can see, BLOC-IRM outperforms other baselines.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/1166dfe068891ef6fb4ed2653bc8f7c7e95a4acf9f3d90938a0343d6c81d6974.jpg", + "table_caption": [ + "Table A2: Performance under different training environments in COLORED-MNIST." + ], + "table_footnote": [], + "table_body": "
Environment Metrics (%)\\( {p}_{\\text{tr }} \\in \\{ {0.1},{0.15}\\} \\)\\( {p}_{\\text{tr }} \\in \\{ {0.1},{0.15},{0.2}\\} \\)
Avg AccAcc GapAvg AccAcc Gap
OPTIMUM75.000.0075.000.00
GRAYSCALE73.82±0.110.37±0.0573.97±0.140.29±0.08
ERM49.21±0.7991.88±3.3149.03±0.9392.17±3.04
IRMv167.36±0.312.77±0.1567.11±0.34\\( {2.42} \\pm {0.12} \\)
IRMv067.01±0.42\\( {2.85} \\pm {0.18} \\)66.71±0.42\\( {2.36} \\pm {0.19} \\)
IRM-GAME66.39±0.724.47±0.6165.93±0.53\\( {4.25} \\pm {0.84} \\)
REX66.82±0.44\\( {2.59} \\pm {0.11} \\)67.14±0.38\\( {2.16} \\pm {0.13} \\)
BIRM67.35±0.39\\( {2.65} \\pm {0.10} \\)68.05±0.43\\( {1.99} \\pm {0.07} \\)
SPARSEIRM67.12±0.53\\( {2.33} \\pm {0.18} \\)67.72±0.41\\( {2.11} \\pm {0.19} \\)
FISHR67.22±0.43\\( {2.44} \\pm {0.15} \\)67.32±0.39\\( {2.59} \\pm {0.15} \\)
BLO-IRM68.72±0.41\\( {2.19} \\pm {0.15} \\)68.89±0.31\\( {2.39} \\pm {0.09} \\)
", + "bbox": [ + 302, + 185, + 696, + 351 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "BLOC-IRM with different regularizations. Based on the penalty terms used in the existing IRM variants, we explore the performance of our proposed BLOC-IRM with various regularization, including the ones used in IRMv1 (BLOC-IRM-v1), REX (BLOC-IRM-REX), and FISHR (BLOC-IRM-FISHR). We conduct experiments on three different datasets and the results are shown in Table A3. It is obvious that the best performance is always achieved when the per-environment stationarity is penalized in the upper-level. This is not surprising since without an explicit promotion of stationarity, other forms of penalties do not guarantee the BLO algorithm to achieve an optimal solution.", + "bbox": [ + 169, + 373, + 823, + 484 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/e5fb58e26d79ab8b12f38b8085b254e7f8bfc0c07d7675862655c88dcbc0cc84.jpg", + "table_caption": [ + "Table A3: The performance of BLOC-IRM with different regularization terms. Three datasets are studied and the latest baseline SPARSEIRM is listed as reference for comparison. The best performance per-evaluation metric and per-dataset is highlighted in **bold**." + ], + "table_footnote": [], + "table_body": "
Dataset MetricsCOLORED-MNISTCOLORED-OBJECTCIFAR-MNIST
Avg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
SPARSEIRM68.81±0.251.72±0.0564.97±0.393.97±0.2562.87±0.294.14±0.31
BLOC-IRM69.47±0.241.04±0.0765.97±0.334.10±0.3663.69±0.324.89±0.36
BLOC-IRM-v167.14±0.244.33±0.8363.38±0.296.31±0.5161.13±0.516.71±0.41
BLOC-IRM-REX62.71±0.218.74±1.2160.31±0.337.62±0.6659.39±0.557.89±0.45
BLOC-IRM-FISHR63.25±0.167.12±0.3961.17±0.346.98±0.4560.86±0.516.63±0.30
", + "bbox": [ + 236, + 546, + 759, + 638 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Performance comparison of different methods with additional covariate shifts. Besides the sensitivity check on model size, Table A4 examines the resilience of IRM to variations in the training environment. This study is motivated by Krueger et al. (2021), who empirically showed that the performance of invariant prediction degrades if additional covariate shifts are imposed on the training environments. Thus, we present the IRM performance on COLORED-MNIST by introducing class, digit, and color imbalances, following Krueger et al. (2021, Section 4.1). Compared with Table 4, IRM suffers a greater performance loss in Table A4, in the presence of training environment variations. However, the proposed BLOC-IRM maintains the accuracy improvement over baselines with each variation. In Table A2, we also study IRM with different numbers of training environments and observe the consistent improvement of BLOC-IRM over other baselines.", + "bbox": [ + 169, + 662, + 823, + 801 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Exploration on the failure cases of previous IRM methods. Some papers either theoretically (Rosenfeld et al., 2020) or empirically (Kamath et al., 2021) pointed out that the original IRMv1 method could fail in certain circumstances, due to the fact that the regularization term used in IRMv1 heavily relies on the \"linear predictor\" assumption. Regarding this issue, we first bring to attention that the BLOC-IRM formulation does not require the predictors to be linear, since we adopt the regularization in the form of IRMv0 in the upper-level objective, not IRMv1. To justify our argument, we repeat the experiments in (Kamath et al., 2021), which points out a specific scenario using the COLORED-MNIST dataset where IRMv1 fails.", + "bbox": [ + 169, + 816, + 823, + 930 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/31b96d56506bce747bf6c6fc3e2296e0ba166ec2663bedb0d870c90493635c4d.jpg", + "table_caption": [ + "Table A4: IRM performance on COLORED-MNIST and COLORED-FMNIST with training environment variations in terms of class, digit and color imbalances. The best IRM performance per-evaluation metric and per-variation source is highlighted in bold." + ], + "table_footnote": [], + "table_body": "
DatasetCOLORED-MNISTCOLORED-FMNIST
Variation Metrics (%)Class ImbalanceDigit ImbalanceColor ImbalanceClass ImbalanceDigit ImbalanceColor Imbalance
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
GRAYSCALE71.23±0.182.76±0.1170.31±0.212.79±0.1572.29±0.162.88±0.1470.15±0.212.29±0.1269.92±0.152.72±0.2173.31±0.111.17±0.23
ERM43.72±1.0192.76±1.4545.89±2.8291.65±1.8646.19±2.8890.88±1.6941.72±1.9893.37±2.1542.39±2.3992.23±2.7245.89±0.2791.31±2.27
IRMv165.39±0.224.44±0.2964.89±0.264.19±0.4466.12±0.253.31±0.2962.49±0.334.93±0.4561.88±0.235.54±0.3964.39±0.443.79±0.33
IRMv065.01±0.284.29±0.3365.13±0.253.87±0.2866.72±0.253.01±0.4462.78±0.485.33±0.4761.62±0.295.29±0.4164.93±0.273.28±0.31
IRM-GAME62.21±0.426.45±0.3562.10±0.356.72±0.4461.82±0.657.78±0.5560.73±0.846.24±0.4360.79±0.456.47±0.8264.32±0.425.73±0.31
REX66.45±0.253.39±0.2866.23±0.433.21±0.2066.99±0.423.32±0.2764.89±0.365.78±0.5363.95±0.254.73±0.6265.87±0.424.30±0.42
BIRM65.73±0.254.11±0.3165.73±0.884.49±0.6766.72±0.243.47±0.2564.39±0.344.47±0.3963.24±0.394.54±0.4265.08±0.313.80±0.29
SPARSEIRM65.32±0.394.92±0.2264.44±0.364.85±0.3366.03±0.322.85±0.1964.32±0.514.15±0.3662.97±0.355.75±0.5264.72±0.463.99±0.39
FISHR66.13±0.283.99±0.3265.87±0.423.72±0.4165.48±0.214.49±0.3163.62±0.535.59±0.3562.47±0.265.72±0.3365.13±0.324.44±0.21
BLOC-IRM66.32±0.273.11±0.2266.41±0.293.32±0.2567.25±0.243.72±0.2765.99±0.313.97±0.4365.13±0.315.11±0.4566.79±0.263.72±0.36
", + "bbox": [ + 173, + 152, + 823, + 263 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "More specifically, the models are trained in the training environments $(\\alpha, \\beta) = (0.1, 0.2)$ and $(0.1, 0.25)$ , and evaluated in the test environment $(0.1, 0.9)$ . Note that denotes the label flipping rate and represents the environment bias parameter. The results are shown in the Table A5. As we can see, IRMv1 is clearly worse than ERM as it achieves much lower average accuracy and higher accuracy gap. However, BLOC-IRM outperforms ERM by obtaining high average accuracy and lower accuracy gap. This result shows that BLOC-IRM seems promising to address the empirical IRM challenge discovered in (Kamath et al., 2021). In the meantime, we also acknowledge that BLOC-IRM is not per", + "bbox": [ + 169, + 289, + 602, + 443 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "fect since the advantage achieved by BLOC-IRM over ERM is not strong enough. However, we stress that the main contribution of BLOC-IRM does not lie in solving the failure cases of IRMv1, but to fix the issue of IRM-Game that resorts to a predictor ensemble to make the invariant prediction, which deviates from the spirit of acquiring invariant predictors in the original IRM paradigm.", + "bbox": [ + 169, + 443, + 823, + 500 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/555cc6211eacb461f589fcf37cfcbe02bf6782991a8daa9c39167629bfbeb33e.jpg", + "table_caption": [ + "Table A5: Performance comparisons on COLORED-MNIST among ERM, IRMv1, and BLOC-IRM in the scenarios where IRM-variants failed following (Kamath et al., 2021)." + ], + "table_footnote": [], + "table_body": "
MethodAvg. Acc.Acc. Gap
ERM83.0913.79
IRMv176.8927.68
BLOC-IRM84.2211.01
", + "bbox": [ + 612, + 378, + 808, + 430 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A similar curve to Figure 1 on COLORED-FMNIST. We show the results for COLORED-FMNIST similar to Figure 1 in Figure A5 and the conclusion does not change much. As mentioned before, the large-batch training setup was typically used for IRM training over the COLORED-FMNIST and COLORED-FMNIST datasets.", + "bbox": [ + 169, + 512, + 823, + 570 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/14defae9d974e70d514e4e2d0e013149aa8ca0b72a658f25f0b6255938cf8e97.jpg", + "image_caption": [ + "Figure A5: The performance of three IRM methods (IRMv1, IRMv0, and REX) vs. batch size under COLORED-FMNIST." + ], + "image_footnote": [], + "bbox": [ + 382, + 580, + 609, + 729 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_model.json b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..8cac0c1b755cc455cedba1d7bd110087ad876ec4 --- /dev/null +++ b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_model.json @@ -0,0 +1,3164 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.825, + 0.149 + ], + "angle": 0, + "content": "WHAT IS MISSING IN IRM TRAINING AND EVALUATION? CHALLENGES AND SOLUTIONS" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.169, + 0.846, + 0.201 + ], + "angle": 0, + "content": "Yihua Zhang\\(^{1}\\), Pranay Sharma\\(^{2}\\), Parikshit Ram\\(^{3}\\), Mingyi Hong\\(^{4}\\), Kush Varshney\\(^{3}\\), Sijia Liu\\(^{1,3}\\) \n\\(^{1}\\)Michigan State University, \\(^{2}\\)Carnegie Mellon University, \\(^{3}\\)IBM Research, \\(^{4}\\)University of Minnesota" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.236, + 0.548, + 0.251 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.265, + 0.77, + 0.587 + ], + "angle": 0, + "content": "Invariant risk minimization (IRM) has received increasing attention as a way to acquire environment-agnostic data representations and predictions, and as a principled solution for preventing spurious correlations from being learned and for improving models' out-of-distribution generalization. Yet, recent works have found that the optimality of the originally-proposed IRM optimization (IRMv1) may be compromised in practice or could be impossible to achieve in some scenarios. Therefore, a series of advanced IRM algorithms have been developed that show practical improvement over IRMv1. In this work, we revisit these recent IRM advancements, and identify and resolve three practical limitations in IRM training and evaluation. First, we find that the effect of batch size during training has been chronically overlooked in previous studies, leaving room for further improvement. We propose small-batch training and highlight the improvements over a set of large-batch optimization techniques. Second, we find that improper selection of evaluation environments could give a false sense of invariance for IRM. To alleviate this effect, we leverage diversified test-time environments to precisely characterize the invariance of IRM when applied in practice. Third, we revisit Ahuja et al. (2020)'s proposal to convert IRM into an ensemble game and identify a limitation when a single invariant predictor is desired instead of an ensemble of individual predictors. We propose a new IRM variant to address this limitation based on a novel viewpoint of ensemble IRM games as consensus-constrained bilevel optimization. Lastly, we conduct extensive experiments (covering 7 existing IRM variants and 7 datasets) to justify the practical significance of revisiting IRM training and evaluation in a principled manner." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.609, + 0.338, + 0.624 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.639, + 0.828, + 0.766 + ], + "angle": 0, + "content": "Deep neural networks (DNNs) have enjoyed unprecedented success in many real-world applications (He et al., 2016; Krizhevsky et al., 2017; Simonyan & Zisserman, 2014; Sun et al., 2014). However, experimental evidence (Beery et al., 2018; De Haan et al., 2019; DeGrave et al., 2021; Geirhos et al., 2020; Zhang et al., 2022b) suggests that DNNs trained with empirical risk minimization (ERM), the most commonly used training method, are prone to reproducing spurious correlations in the training data (Beery et al., 2018; Sagawa et al., 2020). This phenomenon causes performance degradation when facing distributional shifts at test time (Gulrajani & Lopez-Paz, 2020; Koh et al., 2021; Wang et al., 2022; Zhou et al., 2022a). In response, the problem of invariant prediction arises to enforce the model trainer to learn stable and causal features (Beery et al., 2018; Sagawa et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.772, + 0.829, + 0.926 + ], + "angle": 0, + "content": "In pursuit of out-of-distribution generalization, a new model training paradigm, termed invariant risk minimization (IRM) (Arjovsky et al., 2019), has received increasing attention to overcome the shortcomings of ERM against distribution shifts. In contrast to ERM, IRM aims to learn a universal representation extractor, which can elicit an invariant predictor across multiple training environments. However, different from ERM, the learning objective of IRM is highly non-trivial to optimize in practice. Specifically, IRM requires solving a challenging bi-level optimization (BLO) problem with a hierarchical learning structure: invariant representation learning at the upper-level and invariant predictive modeling at the lower-level. Various techniques have been developed to solve IRM effectively, such as (Ahuja et al., 2020; Lin et al., 2022; Rame et al., 2022; Zhou et al., 2022b) to name a few. Despite the proliferation of IRM advancements, several issues in the theory and practice have also appeared. For example, recent works (Rosenfeld et al., 2020; Kamath et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.175 + ], + "angle": 0, + "content": "2021) revealed the theoretical failure of IRM in some cases. In particular, there exist scenarios where the optimal invariant predictor is impossible to achieve, and the IRM performance may fall behind even that of ERM. Practical studies also demonstrate that the performance of IRM relies on multiple factors, e.g., model size (Lin et al., 2022; Zhou et al., 2022b), environment difficulty (Dranker et al., 2021; Krueger et al., 2021), and dataset type (Gulrajani & Lopez-Paz, 2020)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.181, + 0.827, + 0.307 + ], + "angle": 0, + "content": "Therefore, key challenges remain in deploying IRM to real-world applications. In this work, we revisit recent IRM advancements and uncover and tackle several pitfalls in IRM training and evaluation, which have so far gone overlooked. We first identify the large-batch training issue in existing IRM algorithms, which prevents escape from bad local optima during IRM training. Next, we show that evaluation of IRM performance with a single test-time environment could lead to an inaccurate assessment of prediction invariance, even if this test environment differs significantly from training environments. Based on the above findings, we further develop a novel IRM variant, termed BLOC-IRM, by interpreting and advancing the IRM-GAME method (Ahuja et al., 2020) through the lens of BLO with Consensus prediction. Below, we list our contributions (1-3)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.313, + 0.825, + 0.385 + ], + "angle": 0, + "content": "1 We demonstrate that the prevalent use of large-batch training leaves significant room for performance improvement in IRM, something chronically overlooked in the previous IRM studies with benchmark datasets COLORED-MNIST and COLORED-FMNIST. By reviewing and comparing with 7 state-of-the-art (SOTA) IRM variants (Table 1), we show that simply using small-batch training improves generalization over a series of more involved large-batch optimization enhancements." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.433 + ], + "angle": 0, + "content": "We also show that an inappropriate evaluation metric could give a false sense of invariance to IRM. Thus, we propose an extended evaluation scheme that quantifies both precision and 'invariance' across diverse testing environments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.439, + 0.827, + 0.496 + ], + "angle": 0, + "content": "Further, we revisit and advance the IRM-GAME approach (Ahuja et al., 2020) through the lens of consensus-constrained BLO. We remove the need for an ensemble (one per training environment) of predictors in IRM-GAME by proposing BLOC-IRM (BLO with Consensus IRM), which produces a single invariant predictor." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.825, + 0.559 + ], + "angle": 0, + "content": "Lastly, we conduct extensive experiments (on 7 datasets, using diverse model architectures and training environments) to justify the practical significance of our findings and methods. Notably, we conduct experiments on the CELEBA dataset as a new IRM benchmark with realistic spurious correlations. We show that BLOC-IRM outperforms all baselines in nearly all settings." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.313, + 0.827, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.593, + 0.333, + 0.607 + ], + "angle": 0, + "content": "1.1 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.625, + 0.827, + 0.766 + ], + "angle": 0, + "content": "IRM methods. Inspired by the invariance principle (Peters et al., 2016), Arjovsky et al. (2019) define IRM as a BLO problem, and develop a relaxed single-level formulation, termed IRMv1, for ease of training. Recently, there has been considerable work to advance IRM techniques. Examples of IRM variants include penalization on the variance of risks or loss gradients across training environments (Chang et al., 2020; Krueger et al., 2021; Rame et al., 2022; Xie et al., 2020; Xu & Jaakkola, 2021; Xu et al., 2022), domain regret minimization (Jin et al., 2020), robust optimization over multiple domains (Xu & Jaakkola, 2021), sparsity-promoting invariant learning (Zhou et al., 2022b), Bayesian inference-baked IRM (Lin et al., 2022), and ensemble game over the environment-specific predictors (Ahuja et al., 2020). We refer readers to Section 2 and Table 1 for more details on the IRM methods that we will focus on in this work." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.772, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Despite the potential and popularity of IRM, some works have also shown the theoretical and practical limitations of current IRM algorithms. Specifically, Chen et al. (2022); Kamath et al. (2021) show that invariance learning via IRM could fail and be worse than ERM in some two-bit environment setups on COLORED-MNIST, a synthetic benchmark dataset often used in IRM works. The existence of failure cases of IRM is also theoretically shown by Rosenfeld et al. (2020) for both linear and non-linear models. Although subsequent IRM algorithms take these failure cases into account, there still exist huge gaps between theoretically desired IRM and its practical variants. For example, Lin et al. (2021; 2022); Zhou et al. (2022b) found many IRM variants incapable of maintaining graceful generalization on large and deep models. Moreover, Ahuja et al. (2021); Dranker et al. (2021) demonstrated that the performance of IRM algorithms could depend on practical details, e.g., dataset size, sample efficiency, and environmental bias strength. The above IRM limitations in" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "spire our work to study when and how we can turn the IRM advancements into effective solutions, to gain high-accuracy and stable invariant predictions in practical scenarios." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.148, + 0.827, + 0.274 + ], + "angle": 0, + "content": "Domain generalization. IRM is also closely related to domain generalization (Carlucci et al., 2019; Gulrajani & Lopez-Paz, 2020; Koh et al., 2021; Li et al., 2019; Nam et al., 2021; Wang et al., 2022; Zhou et al., 2022a). Compared to IRM, domain generalization includes a wider range of approaches to improve prediction accuracy against distributional shifts (Beery et al., 2018; Jean et al., 2016; Koh et al., 2021). For example, an important line of research is to improve representation learning by encouraging cross-domain feature resemblance (Long et al., 2015; Tzeng et al., 2014). The studies on domain generalization have also been conducted across different learning paradigms, e.g., adversarial learning (Ganin et al., 2016), self-supervised learning (Carlucci et al., 2019), and meta-learning (Balaji et al., 2018; Dou et al., 2019)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.294, + 0.443, + 0.309 + ], + "angle": 0, + "content": "2 PRELIMINARIES AND SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.325, + 0.799, + 0.34 + ], + "angle": 0, + "content": "In this section, we introduce the basics of IRM and provide an overview of our IRM case study." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.346, + 0.825, + 0.491 + ], + "angle": 0, + "content": "IRM formulation. In the original IRM framework Arjovsky et al. (2019), consider a supervised learning paradigm, with datasets \\(\\{\\mathcal{D}^{(e)}\\}_{e\\in \\mathcal{E}_{\\mathrm{tr}}}\\) collected from \\(N\\) training environments \\(\\mathcal{E}_{\\mathrm{tr}} = \\{1,2,\\dots ,N\\}\\). The training samples in \\(\\mathcal{D}^{(e)}\\) (corresponding to the environment \\(e\\)) are of the form \\((\\mathbf{x},y)\\in \\mathcal{X}\\times \\mathcal{Y}\\), where \\(\\mathcal{X}\\) and \\(\\mathcal{Y}\\) are, respectively, the raw feature space and the label space. IRM aims to find an environment-agnostic data representation \\(\\phi_{\\theta}:\\mathcal{X}\\to \\mathcal{Z}\\), which elicits an invariant prediction \\(f_{\\mathbf{w}}:\\mathcal{Z}\\rightarrow \\mathcal{V}\\) that is simultaneously optimal for all environments. Here \\(\\pmb{\\theta}\\) and \\(\\mathbf{w}\\) denote model parameters to be learned, and \\(\\mathcal{Z}\\) denotes the representation space. Thus, IRM yields an invariant predictor \\(f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}:\\mathcal{X}\\to \\mathcal{Y}\\) that can generalize to unseen test-time environments \\(\\{\\mathcal{D}^{(e)}\\}_{e\\notin \\mathcal{E}_{\\mathrm{tr}}}\\). Here \\(\\circ\\) denotes function composition, i.e., \\(f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}(\\cdot) = f_{\\mathbf{w}}(\\phi_{\\pmb{\\theta}}(\\cdot))\\). We will use \\(\\mathbf{w}\\circ \\pmb{\\theta}\\) as a shorthand for \\(f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}\\). IRM constitutes the following BLO problem:" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.499, + 0.825, + 0.524 + ], + "angle": 0, + "content": "\\[\n\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right); \\quad \\text {s u b j e c t t o} \\quad \\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} \\circ \\boldsymbol {\\theta}\\right), \\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\quad (\\text {I R M})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.53, + 0.825, + 0.617 + ], + "angle": 0, + "content": "where \\(\\ell^{(e)}(\\mathbf{w} \\circ \\boldsymbol{\\theta})\\) is the per-environment training loss of the predictor \\(\\mathbf{w} \\circ \\boldsymbol{\\theta}\\) under \\(\\mathcal{D}^{(e)}\\). Clearly, IRM involves two optimization levels that are coupled through the lower-level solution \\(\\mathbf{w}^*(\\boldsymbol{\\theta})\\). Achieving the desired invariant prediction requires the solution sets of the individual lower-level problems \\(\\{\\arg \\min_{\\bar{\\mathbf{w}}} \\ell^{(e)}(\\bar{\\mathbf{w}} \\circ \\boldsymbol{\\theta}), e \\in \\mathcal{E}_{tr}\\}\\) to be non-singleton. However, BLO problems with non-singleton lower-level solution sets are significantly more challenging (Liu et al., 2021). To circumvent this difficulty, Arjovsky et al. (2019) relax (IRM) into a single-level optimization problem (a.k.a., IRMv1):" + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.624, + 0.825, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} [ \\ell^ {(e)} (\\boldsymbol {\\theta}) + \\gamma \\| \\nabla_ {w | w = 1. 0} \\ell^ {(e)} (w \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2} ], \\tag {IRMv1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.654, + 0.825, + 0.738 + ], + "angle": 0, + "content": "where \\(\\gamma > 0\\) is a regularization parameter and \\(\\nabla_{w|w = 1.0}\\ell^{(e)}\\) denotes the gradient of \\(\\ell^{(e)}\\) with respect to \\(w\\), computed at \\(w = 1.0\\). Compared with IRM, IRMv1 is restricted to linear invariant predictors, and penalizes the deviation of individual environment losses from stationarity to approach the lower-level optimality in (IRM). IRMv1 uses the fact that a scalar predictor (\\(w = 1.0\\)) is equivalent to a linear predictor. Despite the practical simplicity of (IRMv1), it may fail to achieve the desired invariance (Chen et al., 2022; Kamath et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.825, + 0.829 + ], + "angle": 0, + "content": "Case study of IRM methods. As illustrated above, the objective of IRM is difficult to optimize, while IRMv1 only provides a sub-optimal solution. Subsequent advances have attempted to reduce this gap. In this work, we focus on 7 popular IRM variants and evaluate their invariant prediction performance over 7 datasets. Table 1 and Table 2 respectively summarize the IRM methods and the datasets considered in this work. We survey the most representative and effective IRM variants in the literature, which will also serve as our baselines in performance comparison." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.835, + 0.825, + 0.864 + ], + "angle": 0, + "content": "Following Table 1, we first introduce the IRMv0 variant, a generalization of IRMv1, by relaxing its assumption of linearity of the predictor \\(\\mathbf{w}\\), yielding" + }, + { + "type": "equation", + "bbox": [ + 0.323, + 0.867, + 0.825, + 0.889 + ], + "angle": 0, + "content": "\\[\n\\underset {\\mathbf {w}, \\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} [ \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) + \\gamma \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2} ]. \\tag {IRMv0}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Next, we consider the risk extrapolation method REx (Krueger et al., 2021), an important baseline based on distributionally robust optimization for group shifts (Sagawa et al., 2019). Furthermore," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.534, + 0.215 + ], + "angle": 0, + "content": "Table 1: Summary of the 7 existing IRM variants considered in this work, and the proposed BLOC-IRM method (see Section 5). We also list the 7 benchmark datasets used to evaluate IRM performance, namely, COLORED-MNIST (CoM), COLORED-FMNIST (CoF), CIFAR-MNIST (CiM), COLORED-OBJECT (CoO), CELEBA (CA), PACS (P) and VLCS (A). The symbols \\(\\checkmark\\) signifies the dataset used in the specific reference." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.219, + 0.534, + 0.313 + ], + "angle": 0, + "content": "
IRM\nMethodVenueDatasetsReference
CoMCoFCiMCoOCAPV
IRMv1arXiv(Arjovsky et al., 2019)
IRMv0N/AThis Work
IRM-GAMEICML(Ahuja et al., 2020)
REXICML(Krueger et al., 2021)
BIRMCVPR(Lin et al., 2022)
SPARSEIRMICML(Zhou et al., 2022b)
FISHRICML(Rame et al., 2022)
OursN/AThis Work
" + }, + { + "type": "table_caption", + "bbox": [ + 0.56, + 0.113, + 0.825, + 0.165 + ], + "angle": 0, + "content": "Table 2: Dataset setups. 'Invariant' and 'Spurious' represent the core and spurious features. 'Env1' and 'Env2' are environments with different spurious correlations." + }, + { + "type": "table", + "bbox": [ + 0.562, + 0.165, + 0.825, + 0.313 + ], + "angle": 0, + "content": "
DatasetInvariantSpuriousEnv 1Env 2
CoMDigitColor
CoFObjectColor
CiMCIFARMNIST
CoOObjectColor
CASmilingHair Color
PObjectTexture
VObjectEnvironment
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.326, + 0.827, + 0.522 + ], + "angle": 0, + "content": "inspired by the empirical findings that the performance of IRM could be sensitive to model size (Choe et al., 2020; Gulrajani & Lopez-Paz, 2020), we choose the SOTA methods Bayesian IRM (BIRM) (Lin et al., 2022) and sparse IRM (SPARSEIRM) (Zhou et al., 2022b), both of which show improved performance with large models. Also, we consider the SOTA method FISHR (Rame et al., 2022), which modifies IRM to penalize the domain-level gradient variance in single-level risk minimization. FISHR provably matches both domain-level risks and Hessians. Lastly, we include IRM-GAME (Ahuja et al., 2020) as a special variant of IRM. Different from the other methods which seek an invariant predictor, IRM-GAME endows each environment with a predictor, and leverages this ensemble of predictors to achieve invariant representation learning. This is in contrast to other existing works which seek an invariant predictor. Yet, we show in Section 5 that IRM-GAME can be interpreted through the lens of consensus-constrained BLO and generalized for invariant prediction. We also highlight that diverse dataset types are considered in this work (see Table 2) to benchmark IRM's performance. More details on dataset selections can be found in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.544, + 0.703, + 0.56 + ], + "angle": 0, + "content": "3 LARGE-BATCH TRAINING CHALLENGE AND IMPROVEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.576, + 0.825, + 0.604 + ], + "angle": 0, + "content": "In this section, we demonstrate and resolve the large-batch training challenge in current IRM implementations (Table 1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.612, + 0.584, + 0.793 + ], + "angle": 0, + "content": "Large-batch optimization causes instabilities of IRM training. Using very large-size batches for model training can result in the model getting trapped near a bad local optima (Keskar et al., 2016). This happens as a result of the lack of stochasticity in the training process, and is known to exist even in the ERM paradigm (Goyal et al., 2017; You et al., 2017a). Yet, nearly all the existing IRM methods follow the training setup of IRMv1 (Arjovsky et al., 2019), which used the full-batch gradient descent (GD) method rather than the mini-batch stochastic gradient descent (SGD) for IRM training over COLORED-MNIST and COLORED-FMNIST. In the following, we show that large-batch training might give a false impression of the relative ranking of IRM performances." + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.612, + 0.807, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.741, + 0.825, + 0.792 + ], + "angle": 0, + "content": "Figure 1: The performance of three IRM methods (IRMv1, IRMv0, and REX) vs. batch-size under COLORED-MNIST. The full batch-size is \\(50\\mathrm{k}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.799, + 0.827, + 0.925 + ], + "angle": 0, + "content": "We start with an exploration of the impact of batch size on the invariant prediction accuracy of existing IRM methods under COLORED-MNIST. Here the invariant prediction accuracy refers to the averaged accuracy of the invariant predictor applied to diverse test-time environments. We defer its formal description to Section 4. Figure 1 shows the invariant prediction accuracy of three IRM methods IRMv1, IRMv0, and REX vs. the data batch size (see Figure A1 for results of other IRM variants and Figure A5 for COLORED-FMNIST). Recall that the full batch size (50k) was used in the existing IRM implementations (Arjovsky et al., 2019; Krueger et al., 2021). As we can see, in the full-batch setup, IRM methods lead to widely different invariant prediction accuracies, where REX and IRMv1 significantly outperform IRMv0. In contrast, in the small-batch case (with size" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "1k), the discrepancy in accuracy across methods vanishes. We see that IRMv0 can be as effective as IRMv1 and other IRM variants (such as REX) only if an appropriate small batch size is used." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.224 + ], + "angle": 0, + "content": "Empirical evidence in Figure 1 shows that large-batch IRM training is less effective than small-batch. This is aligned with the observations in ERM (You et al., 2017b; 2018; 2019), where the lack of stochasticity makes the optimizer difficult to escape from a sharp local minimum. We also justify this issue by visualizing the loss landscapes in Figure A2. Notably, the small-batch training enables IRMv1 to converge to a local optimum with a flat loss landscape, indicating better generalization (Keskar et al., 2016)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.231, + 0.828, + 0.37 + ], + "angle": 0, + "content": "Small-batch training is effective versus a zoo of large-batch optimization enhancements. To mitigate the large-batch IRM training issue, we next investigate the effectiveness of both small-batch training and a zoo of large-batch optimization enhancements. Inspired by large-batch training techniques to scale up ERM, we consider Large-batch SGD (LSGD) (Goyal et al., 2017) and Layerwise Adaptive Learning Rate (LALR) (You et al., 2017b; 2018; 2019; Zhang et al., 2022a). Both methods aim to smoothen the optimization trajectory by improving either the learning rate scheduler or the quality of initialization. Furthermore, we adopt sharpness-aware minimization (SAM) (Foret et al., 2020) as another possible large-batch training solution to explicitly penalize the sharpness of the loss landscape. We integrate the above optimization techniques with IRM, leading to the variants IRM-LSGD, IRM-LALR, and IRM-SAM. See Appendix B.1 for more details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.377, + 0.546, + 0.556 + ], + "angle": 0, + "content": "In Table 3, we compare the performance of the simplest small-batch IRM training with that of those large-batch optimization technique-integrated IRM variants (i.e., 'LSGD/LALR/SAM' in the Table). As we can see, the use of large-batch optimization techniques indeed improves the prediction accuracy over the original IRM implementation. We also observe that the use of SAM for IRM is consistently better than LALR and LSGD, indicating the promise of SAM to scale up IRM with a large batch size. Yet, the small-batch training protocol consistently outperforms large-batch training across all the IRM variants (see the column 'Small'). Additional experiment results in Section 6 show that small-" + }, + { + "type": "table_caption", + "bbox": [ + 0.552, + 0.379, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Table 3: Prediction accuracy of IRM methods on COLORED-MNIST using the original large-batch implementation ('Original'), the large-batch optimization-integrated implementations ('LSGD/LALR/SAM'), and the small-batch training recipe ('Small')." + }, + { + "type": "table", + "bbox": [ + 0.556, + 0.457, + 0.825, + 0.553 + ], + "angle": 0, + "content": "
MethodOriginalLSGDLALRSAMSmall
IRMv167.1367.3167.4467.7968.33
IRMv065.3966.4266.7666.9968.37
IRM-GAME65.6965.8265.4766.2367.73
REX67.4267.5367.5967.8268.42
BIRM67.9367.9968.2168.3268.71
SPARSEIRM67.7267.8567.9968.1368.81
FISHR67.8867.8267.9368.1168.69
Average67.0267.2567.3467.6368.44
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.557, + 0.825, + 0.572 + ], + "angle": 0, + "content": "batch IRM training is effective across datasets, and promotes the invariance achieved by all methods." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.593, + 0.622, + 0.609 + ], + "angle": 0, + "content": "4 MULTI-ENVIRONMENT INVARIANCE EVALUATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.625, + 0.825, + 0.656 + ], + "angle": 0, + "content": "In this section, we revisit the evaluation metric used in existing IRM methods, and show that expanding the diversity of test-time environments would improve the accuracy of invariance assessment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.66, + 0.826, + 0.829 + ], + "angle": 0, + "content": "Nearly all the existing IRM methods (including those listed in Table 1) follow the evaluation pipeline used in the vanilla IRM framework (Arjovsky et al., 2019), which assesses the performance of the learned invariant predictor on a single unseen test environment. This test-time environment is significantly different from train-time environments. For example, COLORED-MNIST (Arjovsky et al., 2019) suggests a principled way to define two-bit environments, widely-used for IRM dataset curation. Specifically, the COLORED-MNIST task is to predict the label of the handwritten digit groups (digits 0-4 for group 1 and digits 5-9 for group 2). The digit number is also spuriously correlated with the digit color (Table 2). This spurious correlation is controlled by an environment bias parameter \\(\\beta\\), which specifies different data environments with different levels of spurious correlation1. In (Arjovsky et al., 2019), \\(\\beta = 0.1\\) and \\(\\beta = 0.2\\) are used to define two training environments, which sample the color ID by flipping the digit group label with probability \\(10\\%\\) and \\(20\\%\\), respectively. At test time, the invariant accuracy is evaluated on a single, unseen environment with \\(\\beta = 0.9\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.834, + 0.829, + 0.907 + ], + "angle": 0, + "content": "However, the prediction accuracy of IRM could be sensitive to the choice of test-time environment (i.e., the value of \\(\\beta\\)). For the default test environment \\(\\beta = 0.9\\), the predictor performance of three representative IRM methods (IRMv1, IRM-GAME, FISHR) ranked from high to low is IRM-GAME>FISHR>IRMv1. Given this apparent ranking, we explore more diverse test-time environments, generated by \\(\\beta \\in \\Omega := \\{0.05, 0.1, \\ldots, 0.95\\}\\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.825, + 0.925 + ], + "angle": 0, + "content": "In the two-bit environment, there exists another environment parameter \\(\\alpha\\) that controls the label noise level." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.105, + 0.446, + 0.367 + ], + "angle": 0, + "content": "Although the train-time bias parameters \\(\\{0.1, 0.2\\}\\) belong to \\(\\Omega\\), test data is generated afresh, different from training data. We see in Figure 2A that the superiority of IRM-GAME at \\(\\beta = 0.9\\) vanishes for smaller \\(\\beta\\). Consequently, for invariant prediction evaluated in other testing environments (e.g., \\(\\beta < 0.4\\)), the performance ranking of the same methods becomes IRMV1>FISHR>IRM-GAME. This mismatch of results suggests we measure the 'invariance' of IRM methods against diverse test environments. Otherwise, evaluation with single \\(\\beta\\) could give a false sense of invariance. In Figure 2B, we present the box plots of prediction accuracies for IRM variants, over the diverse set of testing environments \\((\\beta \\in \\Omega)\\). Evidently, IRMV1, the oldest" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.367, + 0.825, + 0.437 + ], + "angle": 0, + "content": "(sub-optimal) IRM method, yields the least variance of invariant prediction accuracies and the best average prediction accuracy, compared to both IRM-GAME and FISHR. To summarize, the new evaluation method, with diverse test environments, enables us to make a fair comparison of IRM methods implemented in different training environment settings. Unless specified otherwise, we use the multi-environment evaluation method throughout this work." + }, + { + "type": "image", + "bbox": [ + 0.458, + 0.107, + 0.638, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.226, + 0.566, + 0.238 + ], + "angle": 0, + "content": "(A)" + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.108, + 0.817, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.739, + 0.226, + 0.761, + 0.238 + ], + "angle": 0, + "content": "(B)" + }, + { + "type": "image_caption", + "bbox": [ + 0.456, + 0.238, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Figure 2: Performance comparison of IRM variants IRMv1, IRM-GAME, and FISHR on COLORED-MNIST. (A) Evaluation in different test-time environments (corresponding to different \\(\\beta\\)). \\(\\beta\\) values used by the two training environments are 0.1, 0.2 respectively. The conventional evaluation is done with the test environment \\(\\beta = 0.9\\) (see \\(\\triangle\\)). (B) Box plots of prediction accuracies over diverse test environments corresponding to \\(\\beta \\in \\{0.05, 0.1, \\dots, 0.95\\}\\). IRMv1 achieves the best average accuracy \\((67.13\\%)\\), followed by FISHR \\((67.05\\%)\\) and IRM-GAME \\((65.53\\%)\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.458, + 0.734, + 0.473 + ], + "angle": 0, + "content": "5 ADVANCING IRM-GAME VIA CONSENSUS-CONSTRAINED BLO" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.49, + 0.825, + 0.533 + ], + "angle": 0, + "content": "In this section, we revisit and advance a special IRM variant, IRM-GAME (Ahuja et al., 2020), which endows each individual environment with a separate prediction head and converts IRM into an ensemble game over these multiple predictors." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.539, + 0.825, + 0.639 + ], + "angle": 0, + "content": "Revisiting IRM-GAME. We first introduce the setup of IRM-GAME following notations used in Section 2. The most essential difference between IRM-GAME and the vanilla IRM framework is that the former assigns each environment with an individual classifier \\(\\mathbf{w}^{(e)}\\), and then relies on the ensemble of these individual predictors, i.e., \\(\\frac{1}{N}\\sum_{e\\in \\mathcal{E}_{\\mathrm{tr}}}(\\mathbf{w}^{(e)}\\circ \\pmb {\\theta})\\), for inference. IRM-GAME is in a sharp contrast to IRM, where an environment-agnostic prediction head \\(\\mathbf{w}^*\\) simultaneously optimizes the losses across all environments. Therefore, we raise the following question: Can IRM-GAME learn an invariant predictor?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.645, + 0.827, + 0.753 + ], + "angle": 0, + "content": "Inspired by the above question, we explicitly enforce invariance by imposing a consensus prediction constraint \\(\\mathcal{C} \\coloneqq \\left\\{\\left(\\bar{\\mathbf{w}}^{(1)}, \\bar{\\mathbf{w}}^{(2)}, \\ldots \\bar{\\mathbf{w}}^{(N)}\\right) \\mid \\bar{\\mathbf{w}}^{(1)} = \\ldots = \\bar{\\mathbf{w}}^{(N)}\\right\\}\\) and integrate it with IRM-GAME. Here, \\(\\bar{\\mathbf{w}}^{(e)}\\) denotes the prediction head for the \\(e\\)-th environment. Based on the newly-introduced constraint, the ensemble prediction head \\(\\frac{1}{N} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\mathbf{w}^{(e)}\\) can be interpreted as the average consensus over \\(N\\) environments: \\(\\mathbf{w}^* \\coloneqq \\frac{1}{N} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\mathbf{w}^{(e)} = \\arg \\min_{\\{\\bar{\\mathbf{w}}^{(e)}\\}_e \\in \\mathcal{C}} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\| \\bar{\\mathbf{w}}^{(e)} - \\mathbf{w}^{(e)} \\|_2^2\\). With the above consensus interpretation, we can then cast the invariant predictor-baked IRM-GAME as a consensus-constrained BLO problem, extended from (IRM):" + }, + { + "type": "equation", + "bbox": [ + 0.311, + 0.763, + 0.537, + 0.782 + ], + "angle": 0, + "content": "\\[\n\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m a z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.313, + 0.782, + 0.825, + 0.806 + ], + "angle": 0, + "content": "\\[\n\\text {s u b j e c t} \\quad (\\mathbf {I}): \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}} ^ {(e)}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} ^ {(e)} \\circ \\boldsymbol {\\theta}\\right), \\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.387, + 0.807, + 0.597, + 0.825 + ], + "angle": 0, + "content": "\\[\n(\\mathbf {I I}) \\colon \\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) = \\frac {1}{N} \\sum_ {e \\in \\varepsilon_ {\\mathrm {t r}}} \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.832, + 0.825, + 0.876 + ], + "angle": 0, + "content": "The above contains two lower-level problems: (I) per-environment risk minimization, and (II) projection onto the consensus constraint \\((\\{\\mathbf{w}^{(e)}\\} \\in \\mathcal{C})\\). The incorporation of (II) is intended to ensure the use of invariant prediction head \\(\\mathbf{w}^*(\\pmb{\\theta})\\) in the upper-level optimization problem of (1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Limitation of (1) and BLOC-IRM. In (1), the introduced consensus-constrained lower-level problem might compromise the optimality of the lower-level solution \\(\\mathbf{w}^{*}(\\pmb{\\theta})\\) to the per-environment (unconstrained) risk minimization problem (I), i.e., violating the per-environment stationarity" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.103, + 0.827, + 0.19 + ], + "angle": 0, + "content": "\\(\\| \\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^* (\\pmb {\\theta})\\circ \\pmb {\\theta})\\| _2^2\\) . Figure A3 justifies this side effect. As we can see, the per-environment stationarity is hardly attained at the consensus prediction when solving (1). This is not surprising since a constrained optimization solution might not be a stationary solution to minimizing the (unconstrained) objective function. To alleviate this limitation, we improve (1) by explicitly promoting the per-environment stationarity \\(\\| \\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^* (\\pmb {\\theta})\\circ \\pmb {\\theta})\\| _2^2\\) in its upper-level problem through optimization over \\(\\pmb{\\theta}\\) . This leads to BLOC-IRM (BLO with Consensus IRM):" + }, + { + "type": "equation", + "bbox": [ + 0.249, + 0.195, + 0.825, + 0.219 + ], + "angle": 0, + "content": "\\[\n\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\left[ \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right) + \\gamma \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right) \\| _ {2} ^ {2} \\right] \\tag {BLOC-IRM}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.251, + 0.217, + 0.565, + 0.231 + ], + "angle": 0, + "content": "subject to Lower-level problems (I) and (II) in (1)," + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.235, + 0.825, + 0.295 + ], + "angle": 0, + "content": "where \\(\\gamma > 0\\) is a regularization parameter like IRMv0. Assisted by the (upper-level) prediction stationarity regularization, the consensus prediction (II) indeed simultaneously minimizes the risks of all the environments, supported by the empirical evidence that the convergence of \\(\\|\\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^*(\\boldsymbol{\\theta}) \\circ \\boldsymbol{\\theta})\\|_2^2\\) towards 0 along each environment's optimization path (see Figure A3)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.3, + 0.825, + 0.375 + ], + "angle": 0, + "content": "Further, we elaborate on how the BLOC-IRM problem can be effectively solved using an ordinary BLO solver. First, it is worth noting that although both (IRM) and BLOC-IRM are BLO problems, the latter is easier to solve since the lower-level constraint (I) is unconstrained and separable over environments, and the consensus operation (II) is linear. Based on these characteristics, the implicit gradient \\(\\frac{dw^{*}(\\theta)}{d\\theta}\\) can be directly computed as" + }, + { + "type": "equation", + "bbox": [ + 0.26, + 0.38, + 0.825, + 0.416 + ], + "angle": 0, + "content": "\\[\n\\frac {d \\mathbf {w} ^ {*} (\\boldsymbol {\\theta})}{d \\boldsymbol {\\theta}} = \\frac {1}{N} \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\frac {d \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta})}{d \\boldsymbol {\\theta}}, \\text {s u b j e c t t o} \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}} ^ {(e)}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} ^ {(e)} \\circ \\boldsymbol {\\theta}\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.428, + 0.479, + 0.621 + ], + "angle": 0, + "content": "Since the above lower-level problem is unconstrained, we can call the standard arg min differentiating method, such as implicit function approach (Gould et al., 2016) or gradient unrolling (Liu et al., 2021) to compute \\(\\frac{d\\mathbf{w}^{(e)}(\\boldsymbol{\\theta})}{d\\boldsymbol{\\theta}}\\). In our work, we adopt the gradient unrolling method, which approximates \\(\\mathbf{w}^{(e)}(\\boldsymbol{\\theta})\\) by a \\(K\\)-step gradient descent solution, noted by \\(\\mathbf{w}_K^{(e)}(\\boldsymbol{\\theta})\\) and then leverages automatic differentiation (AD) to compute the derivative from \\(\\mathbf{w}_K^{(e)}(\\boldsymbol{\\theta})\\) to the variable \\(\\boldsymbol{\\theta}\\). Figure 3 shows the working pipeline of BLOC-IRM and its comparison to original IRM and" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.621, + 0.825, + 0.692 + ], + "angle": 0, + "content": "IRM-GAME methods. We use \\( K = 1 \\) for the lower-level problem throughout our experiments. We refer readers to Appendix B.2 for more algorithmic details. We also explore the performance of our proposed BLOC-IRM with various regularization terms, based on the penalties used in the existing literature. We show the best performance is always achieved when the stationarity is penalized in the upper-level (see Table A3)." + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.426, + 0.825, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.489, + 0.574, + 0.825, + 0.613 + ], + "angle": 0, + "content": "Figure 3: Schematic overview of BLOC-IRM over two training environments (red and green), and its comparison to IRM and IRM-GAME." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.711, + 0.329, + 0.726 + ], + "angle": 0, + "content": "6 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.742, + 0.825, + 0.786 + ], + "angle": 0, + "content": "In this section, we begin by introducing some key experiment setups (with details in Appendix C.1), and then empirically show the effectiveness of our proposed IRM training and evaluation improvements over existing IRM methods across various datasets, models, and learning environments." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.801, + 0.367, + 0.815 + ], + "angle": 0, + "content": "6.1 EXPERIMENT SETUPS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Datasets and models. Our experiments are conducted over 7 datasets as referenced and shown in Tables 1, 2. Among these datasets, COLORED-MNIST, COLORED-FMNIST, CIFAR-MNIST, and COLORED-OBJECT are similarly curated, mimicking the pipeline of COLORED-MNIST (Arjovsky et al., 2019), by introducing an environment bias parameter (e.g., \\(\\beta\\) for COLORED-MNIST in Section 4) to customize the level of spurious correlation (as shown in Table 2) in different environments. In the CELEBA dataset, we choose the face attribute 'smiling' (vs. 'non-smiling') as the core feature aimed for classification, and regard another face attribute 'hair color' ('blond' vs. 'dark') as the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.231 + ], + "angle": 0, + "content": "source of spurious correlation imposed on the core feature. By controlling the level of spurious correlation, we then create different training/testing environments in CELEBA. Furthermore, we study PACS and VLCS datasets, which were used to benchmark domain generalization ability in the real world (Borlino et al., 2021). It was recently shown by Gulrajani & Lopez-Paz (2020) that for these datasets, ERM could even be better than IRMv1. Yet, we will show that our proposed BLOC-IRM is a promising domain generalization method, which outperforms all the IRM baselines and ERM in practice. In addition, we follow Arjovsky et al. (2019) in adopting multi-layer perceptron (MLP) as the model for resolving COLORED-MNIST and COLORED-FMNIST problems. In the other more complex datasets, we use the ResNet-18 architecture (He et al., 2016)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.236, + 0.825, + 0.307 + ], + "angle": 0, + "content": "Baselines and implementation. Our baselines include 7 IRM variants (Table 1) and ERM, which are implemented using their official repositories if available (see Appendix C.2). Unless specified otherwise, our training pipeline uses the small-batch training setting. By default, we use the batch size of 1024 for COLORED-MNIST and COLORED-FMNIST, and 256 for other datasets. In Section 6.2 below, we also do a thorough comparison of large-batch vs small-batch IRM training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.313, + 0.499, + 0.466 + ], + "angle": 0, + "content": "Evaluation setup. As proposed in Section 4, we use the multi-environment evaluation metric unless specified otherwise. To capture both the accuracy and variance of invariant predictions across multiple testing environments, the average accuracy and the accuracy gap (the difference of the best-case and worst-case accuracy) are measured for IRM methods. The resulting performance is reported in the form \\( a \\pm b \\), with mean \\( a \\) and standard deviation \\( b \\) computed across 10 independent trials." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.487, + 0.376, + 0.501 + ], + "angle": 0, + "content": "6.2 EXPERIMENT RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.514, + 0.498, + 0.612 + ], + "angle": 0, + "content": "Small-batch training improves all existing IRM methods on COLORED-MNIST & COLORED-FMNIST. Recall from Section 3 that all the existing IRM methods (Table 1) adopt full-batch IRM training on COLORED-MNIST & COLORED-FMNIST, which raises the large-batch training problem. In Table 4, we conduct" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.317, + 0.825, + 0.431 + ], + "angle": 0, + "content": "Table 4: Performance of existing IRM methods in large and small-batch settings. GRAYSCALE refers to ERM on uncolored data, which yields the best prediction (supposing no spurious correlation during training). The IRM performance is evaluated by average accuracy ('Avg Acc') and accuracy gap ('Acc Gap'), in the format mean±std. A higher Avg Acc and lower Acc Gap is preferred. The theoretically optimal performance is \\(75\\%\\) (Arjovsky et al., 2019)." + }, + { + "type": "table", + "bbox": [ + 0.511, + 0.432, + 0.825, + 0.597 + ], + "angle": 0, + "content": "
Dataset Metrics(%)COLORED-MNISTCOLORED-FMNIST
Avg Acc (↑)Acc Gap (↓)Avg Acc (↑)Acc Gap (↓)
Large BatchGRYSCALE73.39±0.160.32±0.0374.05±0.090.13±0.04
ERM49.19±1.8990.72±2.0849.77±1.7188.62±2.49
IRMv167.13±0.333.43±0.1467.19±0.223.35±0.11
IRMv065.39±0.344.69±0.1866.44±0.283.53±0.13
IRM-GAME65.69±0.428.75±0.1465.91±0.293.74±0.09
REX67.42±0.293.76±0.0767.82±0.313.26±0.16
BIRM67.93±0.313.81±0.1167.75±0.263.81±0.11
SPARSEIRM67.72±0.283.65±0.0867.89±0.303.12±0.15
FISHR67.49±0.394.37±0.1067.33±0.244.49±0.16
Small BatchIRMv168.33±0.312.04±0.0568.76±0.311.45±0.09
IRMv068.37±0.281.32±0.0969.07±0.271.36±0.06
IRM-GAME67.73±0.241.67±0.1467.49±0.321.82±0.13
REX68.42±0.291.65±0.0768.66±0.221.29±0.08
BIRM68.71±0.211.35±0.0968.64±0.321.44±0.13
SPARSEIRM68.81±0.251.72±0.0568.29±0.221.28±0.15
FISHR68.69±0.192.13±0.0868.79±0.171.77±0.10
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.612, + 0.827, + 0.835 + ], + "angle": 0, + "content": "a thorough comparison between the originally-used full-batch IRM methods and their small-batch counterparts. In addition, we present the performance of ERM and ERM-grayscale (we call it 'grayscale'), where the latter is ERM on uncolored data. In the absence of any spurious correlation in the training set, grayscale gives the best performance. As discussed in Section 4 & 6.1, the IRM performance is measured by the average accuracy and the accuracy gap across 19 testing environments, parameterized by the environment bias parameter \\(\\beta \\in \\{0.05,\\dots ,0.95\\}\\). We make some key observations from Table 4. First, small batch size helps improve all the existing IRM methods consistently, evidenced by the \\(1\\% \\sim 3\\%\\) improvement in average accuracy. Second, the small-batch IRM training significantly reduces the variance of invariant predictions across different testing environments, evidenced by the decreased accuracy gap. This implies that the small-batch IRM training can also help resolve the limitation of multi-environment evaluation for the existing IRM methods, like the sensitivity of IRM-GAME accuracy to \\(\\beta\\) in Figure 2. Third, we observe that IRMv0, which does not seem to be useful in the large batch setting, becomes quite competitive with the other baselines in the small-batch setting. Thus, large-batch could suppress the IRM performance for some methods. In the rest of the experiments, we stick to the small-batch implementation of IRM training." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.825, + 0.926 + ], + "angle": 0, + "content": "BLOC-IRM outperforms IRM baselines in various datasets. Next, Table 5 demonstrates the effectiveness of our proposed BLOC-IRM approach versus ERM and existing IRM baselines across all the 7 datasets listed in Table 2. Evidently, BLOC-IRM yields a higher average accuracy compared to all the baselines, together with the smallest accuracy gap in most cases. Additionally, we observe that CELEBA, PACS and VLCS are much more challenging datasets for capturing invariance through IRM, as evidenced by the small performance gap between ERM and IRM methods. In" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.113, + 0.825, + 0.152 + ], + "angle": 0, + "content": "Table 5: IRM performance comparison between BLOC-IRM and other baselines. We use ResNet-18 (He et al., 2016) for all the datasets. The evaluation setup is consistent with Table 4, and the best performance per-dataset is highlighted in bold. We present the results with the full dataset list in Table A1." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.153, + 0.825, + 0.264 + ], + "angle": 0, + "content": "
Algorithm Metrics (%)COLORED-OBJECTCIFAR-MNISTCELEBAVLCSPACS
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
ERM41.11±1.4486.43±2.8940.39±1.3285.53±2.3372.38±0.2910.73±0.3663.23±0.2312.39±0.3569.95±0.3514.32±0.75
IRMv164.42±0.214.18±0.2961.49±0.297.17±0.3372.49±0.3810.15±0.2762.72±0.2912.74±0.2768.93±0.3314.99±0.51
IRMv062.39±0.255.36±0.3160.14±0.188.83±0.3972.42±0.3510.43±0.3862.59±0.3212.99±0.3668.72±0.2915.29±0.71
IRM-GAME62.88±0.345.59±0.2860.44±0.316.72±0.4172.18±0.4412.32±0.4162.31±0.3813.37±0.6268.12±0.2215.77±0.66
REX63.37±0.355.42±0.3162.32±0.245.55±0.3272.34±0.2610.31±0.2363.19±0.3112.87±0.3169.43±0.3415.31±0.67
BIRM65.11±0.273.31±0.2262.99±0.355.23±0.3672.93±0.289.92±0.3363.33±0.4012.13±0.2369.34±0.2515.76±0.49
SPARSEIRM64.97±0.393.97±0.2562.16±0.294.14±0.3172.42±0.339.79±0.2162.86±0.2612.79±0.3569.52±0.3915.81±0.82
FISHR64.07±0.234.41±0.2961.79±0.255.55±0.2172.89±0.259.42±0.3263.44±0.3711.93±0.4270.21±0.2214.52±0.43
BLOC-IRM65.97±0.334.10±0.3663.69±0.324.89±0.3673.35±0.328.79±0.2163.62±0.3511.55±0.3270.31±0.2114.73±0.65
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.827, + 0.372 + ], + "angle": 0, + "content": "particular, all the IRM methods, except FISHR and BLOC-IRM, could even be worse than ERM on PACS and VLCS. Here, we echo and extend the findings of Krueger et al. (2021, Section 4.3). However, we also show that BLOC-IRM is a quite competitive IRM variant when applied to realistic domain generalization datasets. We also highlight that the CELEBA experiment is newly constructed and performed in our work for invariance evaluation. Like PACS and VLCS, this experiment also shows that ERM is a strong baseline, and among IRM-based methods, BLOC-IRM is the best-performing, both in terms of accuracy and variance of invariant predictions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.377, + 0.603, + 0.657 + ], + "angle": 0, + "content": "IRM against model size and training environment variation. Furthermore, we investigate the effect of model size and training environment diversity on the IRM performance. The recent works (Lin et al., 2022; Zhou et al., 2022b) have empirically shown that IRMv1 may suffer a significant performance loss when trained over large-sized neural network models, and thus developed BIRM and SPARSEIRM approaches as advancements of IRMv1. Inspired by these works, Figure 4 presents the sensitivity of invariant prediction to model size for different IRM methods on COLORED-MNIST. Here the model size is controlled by the dimension of the intermediate layer (denoted by \\(d\\)) in MLP, and the default dimension is \\(d = 390\\) (i.e., the vertical dotted line in Figure 4), which was used in (Arjovsky et al., 2019) and followed in the subsequent literature. As we can see, when \\(d > 390\\), nearly all the studied IRM methods (including BLOC-IRM) suffer a performance drop. Yet, as \\(d \\geq 800\\), from the perspective of prediction accuracy and model resilience together, the top-3 best IRM methods with model size resilience are BIRM, SPARSEIRM, and BLOC-IRM, although we did not intentionally design BLOC-IRM to resist performance degradation against model" + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.378, + 0.816, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.611, + 0.508, + 0.825, + 0.636 + ], + "angle": 0, + "content": "Figure 4: IRM performance on COLORED-MNIST against the layer dimension in MLP. The dotted line represents the default dimension \\((d = 390)\\) used in the literature. The invariant prediction accuracy is presented via the dot line (mean). The results are based on 10 independent trials and we report the variance in Figure A4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.661, + 0.827, + 0.775 + ], + "angle": 0, + "content": "We also show more experiment results in the Appendix. In Table A2, we study IRM with different numbers of training environment configurations and observe the consistent improvement of BLOC-IRM over other baselines. In Table A4 we show that the performance of invariant prediction degrades, if additional covariate shifts (class, digit, and color imbalances on COLORED-MNIST) are imposed on the training environments following Krueger et al. (2021, Section 4.1) and also demonstrate that BLOC-IRM maintains the accuracy improvement over baselines with each variation. In Table A5, we compare the performance of different methods in the failure cases of IRM pointed out by (Kamath et al., 2021) and show the consistent improvement brought by BLOC-IRM." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.79, + 0.321, + 0.805 + ], + "angle": 0, + "content": "7 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.821, + 0.825, + 0.906 + ], + "angle": 0, + "content": "In this work, we investigate existing IRM methods and reveal long-standing but chronically overlooked challenges involving IRM training and evaluation, which may lead to sub-optimal solutions and incomplete invariance assessment. As a remedy, we propose small-batch training and multi-environment evaluation. We reexamine the IRM-GAME method through the lens of consensus-constrained BLO, and develop a novel IRM variant, termed BLOC-IRM. We conducted extensive experiments on 7 datasets and demonstrate that BLOC-IRM consistently improves all baselines." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.176, + 0.031, + 0.48, + 0.045 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.104, + 0.357, + 0.117 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENT" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.134, + 0.825, + 0.19 + ], + "angle": 0, + "content": "The work of Y. Zhang and S. Liu was partially supported by National Science Foundation (NSF) Grant IIS-2207052. The work of M. Hong was supported by NSF grants CNS-2003033 and CIF-1910385. The computing resources used in this work were partially supported by the MIT-IBM Watson AI Lab and the Institute for Cyber-Enabled Research (ICER) at Michigan State University." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.211, + 0.437, + 0.225 + ], + "angle": 0, + "content": "REPRODUCIBILITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.242, + 0.825, + 0.409 + ], + "angle": 0, + "content": "The authors have made an extensive effort to ensure the reproducibility of algorithms and results presented in the paper. First, the details of the experiment settings have been elaborated in Section 6.1 and Appendix C.1. In this paper, seven datasets are studied and the environment generation process for each dataset is described with details in Appendix A. The evaluation metrics are also clearly introduced in Section 3. Second, eight IRM-oriented methods (including our proposed BLOC-IRM) are studied in this work. The implementation details of all the baseline methods are clearly presented in Appendix C.2, including the hyper-parameters tuning, model configuration, and used code bases. For our proposed BLOC-IRM, we include all the implementation details in Section 5 and Appendix B.2, including training pipeline in Figure 3 and the pseudo-code in Algorithm A1. Third, all the results are based on 10 independent trials with different random seeds. The standard deviations are also reported to ensure fair comparisons across different methods. Fourth, codes are available at https://github.com/OPTML-Group/BLOC-IRM." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.948, + 0.508, + 0.959 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.103, + 0.289, + 0.118 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.125, + 0.826, + 0.153 + ], + "angle": 0, + "content": "Faruk Ahmed, Yoshua Bengio, Harm van Seijen, and Aaron Courville. Systematic generalisation with group invariant predictions. In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.16, + 0.825, + 0.188 + ], + "angle": 0, + "content": "Kartik Ahuja, Karthikeyan Shanmugam, Kush Varshney, and Amit Dhurandhar. Invariant risk minimization games. In International Conference on Machine Learning, pp. 145-155. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.196, + 0.825, + 0.235 + ], + "angle": 0, + "content": "Kartik Ahuja, Jun Wang, Amit Dhurandhar, Karthikeyan Shanmugam, and Kush R Varshney. Empirical or invariant risk minimization? a sample complexity perspective. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.244, + 0.825, + 0.271 + ], + "angle": 0, + "content": "Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.279, + 0.825, + 0.306 + ], + "angle": 0, + "content": "Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using meta-regularization. Advances in neural information processing systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.314, + 0.825, + 0.342 + ], + "angle": 0, + "content": "Sara Beery, Grant Van Horn, and Pietro Perona. Recognition in terra incognita. In Proceedings of the European conference on computer vision (ECCV), pp. 456-473, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.35, + 0.825, + 0.388 + ], + "angle": 0, + "content": "Francesco Cappio Borlino, Antonio D'Innocente, and Tatiana Tommasi. Rethinking domain generalization baselines. In 2020 25th International Conference on Pattern Recognition (ICPR), pp. 9227-9233. IEEE, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.398, + 0.826, + 0.438 + ], + "angle": 0, + "content": "Fabio M Carlucci, Antonio D'Innocente, Silvia Bucci, Barbara Caputo, and Tatiana Tommasi. Domain generalization by solving jigsaw puzzles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2229-2238, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.446, + 0.825, + 0.472 + ], + "angle": 0, + "content": "Shiyu Chang, Yang Zhang, Mo Yu, and Tommi Jaakkola. Invariant rationalization. In International Conference on Machine Learning, pp. 1448-1458. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.481, + 0.825, + 0.509 + ], + "angle": 0, + "content": "Yongqiang Chen, Kaiwen Zhou, Yatao Bian, Binghui Xie, Kaili Ma, Yonggang Zhang, Han Yang, Bo Han, and James Cheng. Pareto invariant risk minimization. arXiv preprint arXiv:2206.07766, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.517, + 0.825, + 0.544 + ], + "angle": 0, + "content": "Yo Joong Choe, Jiyeon Ham, and Kyubyong Park. An empirical study of invariant risk minimization. arXiv preprint arXiv:2004.05007, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.552, + 0.825, + 0.579 + ], + "angle": 0, + "content": "Pim De Haan, Dinesh Jayaraman, and Sergey Levine. Causal confusion in imitation learning. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.588, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Alex J DeGrave, Joseph D Janizek, and Su-In Lee. Ai for radiographic Covid-19 detection selects shortcuts over signal. Nature Machine Intelligence, 3(7):610-619, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.623, + 0.825, + 0.661 + ], + "angle": 0, + "content": "Qi Dou, Daniel Coelho de Castro, Konstantinos Kamnitsas, and Ben Glocker. Domain generalization via model-agnostic learning of semantic features. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.671, + 0.825, + 0.698 + ], + "angle": 0, + "content": "Yana Dranker, He He, and Yonatan Belinkov. Irm—when it works and when it doesn't: A test case of natural language inference. Advances in Neural Information Processing Systems, 34:18212-18224, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.706, + 0.825, + 0.733 + ], + "angle": 0, + "content": "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. arXiv preprint arXiv:2010.01412, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.742, + 0.826, + 0.78 + ], + "angle": 0, + "content": "Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Lavoille, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. The journal of machine learning research, 17(1):2096-2030, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.789, + 0.825, + 0.828 + ], + "angle": 0, + "content": "Robert Geirhos, Jorn-Henrik Jacobsen, Claudio Michaelis, Richard Zemel, Wieland Brendel, Matthias Bethge, and Felix A Wichmann. Shortcut learning in deep neural networks. Nature Machine Intelligence, 2(11): 665-673, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.838, + 0.825, + 0.877 + ], + "angle": 0, + "content": "Stephen Gould, Basura Fernando, Anoop Cherian, Peter Anderson, Rodrigo Santa Cruz, and Edison Guo. On differentiating parameterized argmin and argmax problems with application to bi-level optimization. arXiv preprint arXiv:1607.05447, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.885, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.125, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.104, + 0.826, + 0.133 + ], + "angle": 0, + "content": "Ishaan Gulrajani and David Lopez-Paz. In search of lost domain generalization. arXiv preprint arXiv:2007.01434, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.826, + 0.17 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.177, + 0.826, + 0.205 + ], + "angle": 0, + "content": "Neal Jean, Marshall Burke, Michael Xie, W Matthew Davis, David B Lobell, and Stefano Ermon. Combining satellite imagery and machine learning to predict poverty. Science, 353(6301):790-794, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.214, + 0.826, + 0.242 + ], + "angle": 0, + "content": "Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Domain extrapolation via regret minimization. arXiv preprint arXiv:2006.03908, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.25, + 0.826, + 0.29 + ], + "angle": 0, + "content": "Pritish Kamath, Akilesh Tangella, Danica Sutherland, and Nathan Srebro. Does invariant risk minimization capture invariance? In International Conference on Artificial Intelligence and Statistics, pp. 4069-4077. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.299, + 0.826, + 0.339 + ], + "angle": 0, + "content": "Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv preprint arXiv:1609.04836, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.348, + 0.826, + 0.375 + ], + "angle": 0, + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.826, + 0.425 + ], + "angle": 0, + "content": "Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsbramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Irena Gao, et al. Wilds: A benchmark of in-the-wild distribution shifts. In International Conference on Machine Learning, pp. 5637-5664. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.433, + 0.826, + 0.461 + ], + "angle": 0, + "content": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60(6):84-90, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.47, + 0.826, + 0.51 + ], + "angle": 0, + "content": "David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In International Conference on Machine Learning, pp. 5815-5826. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.519, + 0.826, + 0.547 + ], + "angle": 0, + "content": "Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy M Hospedales. Deeper, broader and artier domain generalization. In Proceedings of the IEEE international conference on computer vision, pp. 5542-5550, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.556, + 0.826, + 0.595 + ], + "angle": 0, + "content": "Da Li, Jianshu Zhang, Yongxin Yang, Cong Liu, Yi-Zhe Song, and Timothy M Hospedales. Episodic training for domain generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1446-1455, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.604, + 0.826, + 0.633 + ], + "angle": 0, + "content": "Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. Advances in neural information processing systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.641, + 0.826, + 0.669 + ], + "angle": 0, + "content": "Yong Lin, Qing Lian, and Tong Zhang. An empirical study of invariant risk minimization on deep models. In ICML 2021 Workshop on Uncertainty and Robustness in Deep Learning, pp. 7, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.678, + 0.826, + 0.706 + ], + "angle": 0, + "content": "Yong Lin, Hanze Dong, Hao Wang, and Tong Zhang. Bayesian invariant risk minimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16021-16030, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.714, + 0.826, + 0.753 + ], + "angle": 0, + "content": "Risheng Liu, Jiaxin Gao, Jin Zhang, Deyu Meng, and Zhouchen Lin. Investigating bi-level optimization for learning and vision from a unified perspective: A survey and beyond. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.763, + 0.826, + 0.791 + ], + "angle": 0, + "content": "Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), December 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.799, + 0.826, + 0.827 + ], + "angle": 0, + "content": "Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International conference on machine learning, pp. 97-105. PMLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.836, + 0.826, + 0.876 + ], + "angle": 0, + "content": "Hyeonseob Nam, HyunJae Lee, Jongchan Park, Wonjun Yoon, and Donggeun Yoo. Reducing domain gap by reducing style bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8690-8699, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.885, + 0.826, + 0.924 + ], + "angle": 0, + "content": "Jonas Peters, Peter Buhlmann, and Nicolai Meinshausen. Causal inference by using invariant prediction: identification and confidence intervals. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(5):947-1012, 2016." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.104, + 0.826, + 0.144 + ], + "angle": 0, + "content": "Alexandre Rame, Coretin Dancette, and Matthieu Cord. Fishr: Invariant gradient variances for out-of-distribution generalization. In International Conference on Machine Learning, pp. 18347-18377. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.15, + 0.826, + 0.178 + ], + "angle": 0, + "content": "Elan Rosenfeld, Pradeep Ravikumar, and Andrej Risteski. The risks of invariant risk minimization. arXiv preprint arXiv:2010.05761, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.183, + 0.827, + 0.223 + ], + "angle": 0, + "content": "Shiori Sagawa, Pang Wei Koh, Tatsunori B Hashimoto, and Percy Liang. Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization. arXiv preprint arXiv:1911.08731, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.228, + 0.827, + 0.268 + ], + "angle": 0, + "content": "Shiori Sagawa, Aditi Raghunathan, Pang Wei Koh, and Percy Liang. An investigation of why overparameterization exacerbates spurious correlations. In International Conference on Machine Learning, pp. 8346-8356. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.274, + 0.827, + 0.313 + ], + "angle": 0, + "content": "Harshay Shah, Kaustav Tamuly, Aditi Raghunathan, Prateek Jain, and Praneeth Netrapalli. The pitfalls of simplicity bias in neural networks. Advances in Neural Information Processing Systems, 33:9573-9585, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.32, + 0.825, + 0.347 + ], + "angle": 0, + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.353, + 0.825, + 0.381 + ], + "angle": 0, + "content": "Yi Sun, Xiaogang Wang, and Xiaou Tang. Deep learning face representation from predicting 10,000 classes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1891-1898, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.386, + 0.825, + 0.412 + ], + "angle": 0, + "content": "Antonio Torralba and Alexei A Efros. Unbiased look at dataset bias. In CVPR 2011, pp. 1521-1528. IEEE, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.419, + 0.825, + 0.446 + ], + "angle": 0, + "content": "Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.452, + 0.825, + 0.491 + ], + "angle": 0, + "content": "Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip Yu. Generalizing to unseen domains: A survey on domain generalization. IEEE Transactions on Knowledge and Data Engineering, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.497, + 0.825, + 0.524 + ], + "angle": 0, + "content": "Chuanlong Xie, Haotian Ye, Fei Chen, Yue Liu, Rui Sun, and Zhenguo Li. Risk variance penalization. arXiv preprint arXiv:2006.07544, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.53, + 0.825, + 0.558 + ], + "angle": 0, + "content": "Renzhe Xu, Xingxuan Zhang, Peng Cui, Bo Li, Zheyan Shen, and Jiazheng Xu. Regulatory instruments for fair personalized pricing. In Proceedings of the ACM Web Conference 2022, pp. 4-15, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.564, + 0.825, + 0.591 + ], + "angle": 0, + "content": "Yilun Xu and Tommi Jaakkola. Learning representations that support robust transfer of predictors. arXiv preprint arXiv:2110.09940, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.596, + 0.825, + 0.623 + ], + "angle": 0, + "content": "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.629, + 0.825, + 0.656 + ], + "angle": 0, + "content": "Yang You, Igor Gitman, and Boris Ginsburg. Scaling SGD batch size to 32k for imagenet training. arXiv preprint arXiv:1708.03888, 6, 2017b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.662, + 0.825, + 0.69 + ], + "angle": 0, + "content": "Yang You, Zhao Zhang, Cho-Jui Hsieh, James Demmel, and Kurt Keutzer. Imagenet training in minutes. In Proceedings of the 47th International Conference on Parallel Processing, pp. 1. ACM, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.695, + 0.825, + 0.735 + ], + "angle": 0, + "content": "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes. arXiv preprint arXiv:1904.00962, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.741, + 0.825, + 0.78 + ], + "angle": 0, + "content": "Dinghuai Zhang, Kartik Ahuja, Yilun Xu, Yisen Wang, and Aaron Courville. Can subnetwork structure be the key to out-of-distribution generalization? In International Conference on Machine Learning, pp. 12356-12367. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.786, + 0.825, + 0.826 + ], + "angle": 0, + "content": "Gaoyuan Zhang, Songtao Lu, Yihua Zhang, Xiangyi Chen, Pin-Yu Chen, Quanfu Fan, Lee Martie, Lior Horesh, Mingyi Hong, and Sijia Liu. Distributed adversarial training to robustify deep neural networks at scale. In Uncertainty in Artificial Intelligence, pp. 2353-2363. PMLR, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.832, + 0.825, + 0.859 + ], + "angle": 0, + "content": "Xingxuan Zhang, Linjun Zhou, Renzhe Xu, Peng Cui, Zheyan Shen, and Haoxin Liu. Nico++: Towards better benchmarking for domain generalization. arXiv preprint arXiv:2204.08040, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.865, + 0.825, + 0.892 + ], + "angle": 0, + "content": "Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.898, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Xiao Zhou, Yong Lin, Weizhong Zhang, and Tong Zhang. Sparse invariant risk minimization. In International Conference on Machine Learning, pp. 27222-27244. PMLR, 2022b." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.445, + 0.102, + 0.554, + 0.119 + ], + "angle": 0, + "content": "APPENDIX" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.139, + 0.39, + 0.154 + ], + "angle": 0, + "content": "A DATASET SELECTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.169, + 0.827, + 0.323 + ], + "angle": 0, + "content": "Compared to existing work, we expand the dataset types for evaluating the performance of different IRM methods (see Table 2). In addition to the most commonly-used benchmark datasets COLORED-MNIST (Arjovsky et al., 2019) and COLORED-FMNIST (Ahuja et al., 2020), we also consider the datasets CIFAR-MNIST (Lin et al., 2021; Shah et al., 2020) and COLORED-OBJECT (Ahmed et al., 2020; Zhang et al., 2021), which impose artificial spurious correlations, MNIST digit number and object color, into the original CIFAR-10 and COCO Detection datasets, respectively. Furthermore, we consider other three real-world datasets CELEBA (Liu et al., 2015), PACS (Li et al., 2017) and VLCS (Torralba & Efros, 2011), without imposing artificial spurious correlations. Notably, CELEBA was first formalized and introduced to benchmark IRM performance. The recent work (Gulrajani & Lopez-Paz, 2020) showed that when carefully implemented, ERM could outperform IRMv1 in PACS and VLCS. Thus, we regard them as challenging datasets to capture invariance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.329, + 0.825, + 0.372 + ], + "angle": 0, + "content": "For COLORED-OBJECT dataset, we strictly follow the setting adopted in (Lin et al., 2022) to generate the spurious features. For CIFAR-MNIST we use the class \"bird\" and \"plane\" in the dataset CIFAR as the invariant feature, while the digit \"0\" and \"1\" in MNIST as the spurious correlation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.378, + 0.827, + 0.421 + ], + "angle": 0, + "content": "CELEBA dataset is, for the first time, introduced to measure IRM performance. We select the attribute \"Smiling\" as the invariant label and use the attribute \"Hair Color\" (blond and black hair) to create a spurious correlation in each environment." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.44, + 0.441, + 0.456 + ], + "angle": 0, + "content": "B IMPLEMENTATION DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.47, + 0.642, + 0.485 + ], + "angle": 0, + "content": "B.1 DETAILS ON LARGE-BATCH OPTIMIZATION ENHANCEMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.496, + 0.825, + 0.553 + ], + "angle": 0, + "content": "\\(\\spadesuit\\) IRM-LSGD: We first integrate large-batch SGD (LSGD) with IRM. Following (Goyal et al., 2017), we make two main modifications: (1) scaling up learning rate linearly with batch size, and (2) prepending a warm-up optimization phase to IRM training. We call the LSGD-baked IRM variant IRM-LSGD." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.559, + 0.827, + 0.616 + ], + "angle": 0, + "content": "\\(\\spadesuit\\) IRM-LALR: Next, we adopt layerwise adaptive learning rate (LALR) in IRM training. Following (You et al., 2019), we advance the learning rate scheduler by assigning each layer of a neural network-based prediction model with an adaptive learning rate (i.e., proportional to the norm of updated model weights per layer). More specifically, the model parameter update rule becomes:" + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.496, + 0.827, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.378, + 0.618, + 0.825, + 0.652 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {t + 1, i} = \\boldsymbol {\\theta} _ {t, i} - \\frac {\\tau \\left(\\left\\| \\boldsymbol {\\theta} _ {t , i} \\right\\| _ {2} ^ {2}\\right) \\cdot \\eta_ {t}}{\\left\\| \\mathbf {u} _ {t , i} \\right\\| _ {2} ^ {2}} \\mathbf {u} _ {t, i}, \\tag {A1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.655, + 0.827, + 0.717 + ], + "angle": 0, + "content": "where \\(\\pmb{\\theta}_{t,i}\\) denotes the \\(i\\)-th layer of the model parameters at iteration \\(t\\), and \\(\\mathbf{u}_{t,i}\\) represents the first-order gradient of the corresponding layer-wise model parameters. We use \\(\\tau(\\|\\pmb{\\theta}_{t,i}\\|_2^2 = \\min\\{\\max\\{\\|\\pmb{\\theta}_{t,i}\\|_2^2, c_l\\}, c_u\\})\\) as the scaling factor of the adaptive learning rate \\(\\frac{\\eta_t}{\\|\\mathbf{u}_{t,i}\\|}\\). We use \\(c_l = 0\\) and \\(c_u = 1\\) in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.723, + 0.825, + 0.791 + ], + "angle": 0, + "content": "\\(\\spadesuit\\) IRM-SAM: Lastly, we leverage sharpness-aware minimization (SAM) to simultaneously minimize the IRM loss and the loss sharpness. The latter is achieved by explicitly penalizing the worst-case training loss of model weights when facing small weight perturbations. This yields a wide minimum within a flat loss landscape. More specifically, the sharpness-aware loss can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.794, + 0.825, + 0.821 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\boldsymbol {\\theta}} \\ell^ {\\mathrm {S A M}} (\\boldsymbol {\\theta}), \\quad \\text {w h e r e} \\quad \\ell^ {\\mathrm {S A M}} (\\boldsymbol {\\theta}) = \\max _ {\\| \\epsilon \\| _ {2} ^ {2} \\leq \\rho} \\ell (\\boldsymbol {\\theta} + \\boldsymbol {\\epsilon}), \\tag {A2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.825, + 0.825, + 0.855 + ], + "angle": 0, + "content": "where the parameter perturbation \\(\\epsilon\\) is subject to the perturbation constraint \\(\\| \\epsilon \\| _2^2\\leq \\rho\\). When applied to IRM, we replace the per-environment training loss with the SAM loss, and adopt the \\(\\rho = 0.001\\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.87, + 0.436, + 0.884 + ], + "angle": 0, + "content": "B.2 BLOC-IRM IMPLEMENTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "As described in Section 5, the BLOC-IRM algorithm solves the IRM problem with two optimization levels. We use 1-step gradient descent to get the lower-level solution. We retain the gradient" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.827, + 0.189 + ], + "angle": 0, + "content": "graph in PyTorch to enable auto differentiation. We assign each of the classification head \\(\\{\\mathbf{w}^{(e)}\\}\\) a separate optimizer and use the same learning rate as the feature extractor \\(\\theta\\). For COLORED-MNIST and COLORED-FMNIST, we adopt a learning rate of \\(2 \\times 10^{-3}\\) and use the Adam (Kingma & Ba, 2014) optimizer. As for other datasets, we use the multi-step learning rate scheduler with an initial learning rate of 0.1, which is consistent with other baselines. We adopt the same penalty weight of \\(10^{6}\\) as IRMv1 and IRMv0." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.205, + 0.364, + 0.22 + ], + "angle": 0, + "content": "Algorithm A1 BLOC-IRM" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.224, + 0.825, + 0.258 + ], + "angle": 0, + "content": "1: Initialization: Training data \\(\\{\\mathbf{x}^{(e)}\\}\\) from \\(N\\) environments, Model feature extractor \\(\\theta_0\\), and \\(N\\) model classification heads \\(\\{\\mathbf{w}_0^{(e)}\\}\\), learning rate \\(\\{\\eta_t\\}\\) series, penalty weight \\(\\{\\gamma_t\\}\\) series." + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.257, + 0.37, + 0.27 + ], + "angle": 0, + "content": "2: for Step \\( t = 0,1,\\ldots ,\\mathbf{d}\\mathbf{o} \\)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.27, + 0.635, + 0.285 + ], + "angle": 0, + "content": "3: Lower-level: update classification head for each environment:" + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.224, + 0.825, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.292, + 0.826, + 0.33 + ], + "angle": 0, + "content": "\\[\n\\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\quad \\tilde {\\mathbf {w}} _ {t + 1} ^ {(e)} = \\mathbf {w} _ {t} ^ {(e)} - \\eta_ {t} \\left. \\frac {d \\ell^ {(e)} (\\mathbf {w} \\odot \\boldsymbol {\\theta})}{d \\mathbf {w}} \\right| _ {\\boldsymbol {\\theta} = \\boldsymbol {\\theta} _ {t}, \\mathbf {w} = \\mathbf {w} _ {t} ^ {(e)}} \\tag {A3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.338, + 0.672, + 0.357 + ], + "angle": 0, + "content": "4: Consensus projection: \\(\\forall e\\in \\mathcal{E}_{\\mathrm{tr}},\\mathbf{w}_{t + 1}^{(e)} = \\mathbf{w}_{t + 1}^{*} = \\frac{1}{N}\\sum_{e\\in \\mathcal{E}_{\\mathrm{tr}}}\\tilde{\\mathbf{w}}_{t + 1}^{(e)}\\)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.356, + 0.632, + 0.371 + ], + "angle": 0, + "content": "5: Upper-level: update feature extractor with stationary penalty:" + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.338, + 0.672, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.388, + 0.826, + 0.425 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {t + 1} = \\boldsymbol {\\theta} _ {t} - \\eta_ {t} \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\frac {d}{d \\boldsymbol {\\theta}} \\left(\\ell^ {(e)} (\\mathbf {w} \\odot \\boldsymbol {\\theta}) + \\gamma_ {t} \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2}\\right) \\Big | _ {\\boldsymbol {\\theta} = \\boldsymbol {\\theta} _ {t}, \\mathbf {w} = \\mathbf {w} _ {t + 1} ^ {*}} \\tag {A4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.432, + 0.258, + 0.447 + ], + "angle": 0, + "content": "6: end for" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.48, + 0.374, + 0.495 + ], + "angle": 0, + "content": "C EXPERIMENTATION" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.513, + 0.376, + 0.527 + ], + "angle": 0, + "content": "C.1 ENVIRONMENT SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.541, + 0.825, + 0.597 + ], + "angle": 0, + "content": "As proposed in Section 4, we use the multi-environment evaluation metric unless specified otherwise. To capture both the accuracy and variance of invariant predictions across multiple testing environments, the average accuracy and the accuracy gap (the difference between the best-case and worst-case accuracy) are evaluated for IRM methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.603, + 0.825, + 0.66 + ], + "angle": 0, + "content": "Specifically, for the COLORED-MNIST, COLORED-FMNIST, COLORED-OBJECT, CIFAR-MNIST, and CELEBA dataset, we manually create 19 test environments with uniformly sampled bias parameter \\(\\beta \\in \\{0.05, 0.1, \\dots, 0.95\\}\\), where the environment bias parameter \\(\\beta\\) controls the spurious correlation (see Section 4 for more details)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.666, + 0.825, + 0.724 + ], + "angle": 0, + "content": "For VLCS and PACS datasets, the training and test sets have 4 environments, namely {art painting, cartoon, sketch, photo} and {CALTECH, LABELME, PASCAL, SUN} respectively. We use the first three environments as the training environments, while we use the test set of all four environments to form our proposed multi-environment invariance evaluation system." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.743, + 0.3, + 0.757 + ], + "angle": 0, + "content": "C.2 BASELINES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.825, + 0.828 + ], + "angle": 0, + "content": "For each baseline method, we follow its official PyTorch repository except IRM-GAME and SPARSEIRM. We translate the TensorFlow-based original code base of IRM-GAME to PyTorch. As one of the latest IRM advancements, the official code of SPARSEIRM is not yet publicly available. Therefore, we reproduce SPARSEIRM in PyTorch." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.877 + ], + "angle": 0, + "content": "In particular, for COLORED-MNIST and COLORED-FMNIST, we stick to the original hyperparameters for the large-batch setting and tune the hyper-parameters of each method, including the penalty weight, number of warm-up epochs, and learning rate for the small batch setting." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.927 + ], + "angle": 0, + "content": "In particular, for the large-batch setting, we use the penalty weight of \\(10^{6}\\), 190 warm-up epochs, and 500 epochs in total, as suggested by the original IRMv1 and inherited by its variants. For the small-batch setting, we adopt the same penalty weight \\(10^{6}\\). Further, we found that the warm-up" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "phase could be shortened without sacrificing accuracy. Therefore, we use 50 warm-up epochs and total 200 epochs for all the methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.826, + 0.182 + ], + "angle": 0, + "content": "For other datasets, we adopt the batch size of 128 and use ResNet-18 as the default model architecture. We train for 200 epochs. We adopt the step-wise learning rate scheduler with an initial learning rate of 0.1. The learning rate decays by 0.1 at the 100th and 150th epochs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.199, + 0.475, + 0.213 + ], + "angle": 0, + "content": "C.3 ADDITIONAL EXPERIMENT RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.225, + 0.825, + 0.281 + ], + "angle": 0, + "content": "The influence of batch size with all the baselines. We show in Figure A1 the influence of training batch size on the performance of different methods. We observe in Figure A1, as in Figure 1, that full batch setting does not achieve the best performance, and the use of mini-batch (stochastic gradient descent) indeed improves performance." + }, + { + "type": "image", + "bbox": [ + 0.384, + 0.294, + 0.611, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.46, + 0.825, + 0.499 + ], + "angle": 0, + "content": "Figure A1: The performance of all the baselines in this work trained with different batch sizes on COLORED-MNIST dataset. The full data batch-size is 50k. The invariant accuracy corresponds to the average accuracy evaluated based on the diversified environments-based evaluation metric." + }, + { + "type": "image", + "bbox": [ + 0.257, + 0.527, + 0.487, + 0.679 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.527, + 0.741, + 0.679 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.693, + 0.825, + 0.757 + ], + "angle": 0, + "content": "Figure A2: The loss landscapes of invariant prediction models acquired by (A) large-batch IRMv1 training with 50k batch size and (B) small-batch training with 1k batch size. The 2D loss landscape visualization is realized using the tool in (Li et al., 2018). The \\( x \\) and \\( y \\) axes represent the linear interpolation coefficients over two directional vectors originated from the converged local optima. Here the numbers on the contour denote the loss values over test data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.769, + 0.825, + 0.853 + ], + "angle": 0, + "content": "Loss landscapes of IRMv1 with different batch sizes. We plot the loss landscapes of the models trained with IRMv1 on COLORED-MNIST using large (full) and small batch in Figure A2. Using small batch training, IRMv1 (Fig. A2B) converges to a smooth neighborhood of a local optima. This also corresponds to a flatter loss landscape than the landscape of the large-batch training (Figure A2(A)). The loss landscapes demonstrate consistent results as other experiments discussed in Section 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Training trajectory with BLOC-IRM with and without stationary loss. In Figure A3, we plot the per-environment training trajectory of stationary loss when solving (1) and (BLOC-IRM) on COLORED-MNIST. For (BLOC-IRM) we use the regularization term \\(\\lambda = 10^6\\), which is aligned with the penalty coefficient used in IRMv1. As we can see, without the stationarity regularization," + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "the stationary loss remains at a high level for both environments (the dotted curves). Notably, the lower-level stationary can be reached fast with the stationarity penalty, as shown in the solid curves." + }, + { + "type": "image", + "bbox": [ + 0.37, + 0.148, + 0.624, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.325, + 0.825, + 0.366 + ], + "angle": 0, + "content": "Figure A3: The per-environment training trajectory for the stationarity loss of (1) and (BLOC-IRM) on COLORED-MNIST. The training setting is the same as Figure 2. The algorithmic details can be found in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.459 + ], + "angle": 0, + "content": "Performance of all the methods with full dataset list. We show in Table A1 the results of all the methods on the seven datasets we studied. To be more specific, in Table A1, we append the results of COLORED-MNIST and COLORED-FMNIST into Table 5 as a whole. As we can see, our methods outperforms other baselines in all the datasets in terms of average accuracy, and stands top in most cases in terms of the accuracy gap." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.482, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Table A1: IRM performance comparison between our proposed BLOC-IRM method and other baselines under the full list of datasets. We use MLP for COLORED-MNIST and COLORED-FMNIST, and ResNet-18 (He et al., 2016) for the rest datasets. The evaluation setup is consistent with Table 4, and the best performance per-evaluation metric and per-dataset is highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.534, + 0.825, + 0.615 + ], + "angle": 0, + "content": "
Algorithm Metrics (%)COLORED-MNISTCOLORED-FMNISTCOLORED-OBJECTCIFAR-MNISTCELEBAVLCSPACS
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
ERM49.19±1.8990.72±2.0849.77±1.7188.62±2.4941.11±1.4486.43±2.8940.39±1.3285.53±2.3372.38±0.2910.73±0.3663.23±0.2312.39±0.3569.95±0.3514.32±0.75
IRVM168.33±0.312.04±0.0568.76±0.311.45±0.0964.42±0.214.18±0.2961.49±0.297.17±0.3372.49±0.3810.15±0.2762.72±0.2912.74±0.2768.93±0.3314.99±0.51
IRVM068.37±0.281.32±0.0969.07±0.271.36±0.0662.39±0.255.36±0.3160.14±0.188.83±0.3972.42±0.3510.43±0.3862.59±0.3212.99±0.3668.72±0.2915.29±0.71
IRGMAGE67.73±0.241.67±0.1467.49±0.321.82±0.1362.88±0.345.59±0.2860.44±0.316.72±0.4172.18±0.4412.32±0.4162.31±0.3813.37±0.6268.12±0.2215.77±0.66
REX68.42±0.291.65±0.0768.66±0.221.29±0.0863.37±0.355.42±0.3162.32±0.245.55±0.3272.34±0.2610.31±0.2363.19±0.3112.87±0.3169.43±0.3415.31±0.67
BIRM68.71±0.211.35±0.0968.64±0.321.44±0.1365.11±0.273.31±0.2262.99±0.355.23±0.3672.93±0.289.92±0.3363.33±0.4012.13±0.2369.34±0.2515.76±0.49
SPARSEIRM68.81±0.251.72±0.0568.29±0.221.28±0.1564.97±0.393.97±0.2562.16±0.294.14±0.3172.42±0.339.79±0.2162.86±0.2612.79±0.3569.52±0.3915.81±0.82
FISHR68.69±0.192.13±0.0868.79±0.171.77±0.1064.07±0.234.41±0.2961.79±0.255.55±0.2172.89±0.259.42±0.3263.44±0.3711.93±0.4270.21±0.2214.52±0.43
BLOC-IRM69.47±0.241.04±0.0769.43±0.211.14±0.1165.97±0.334.10±0.3663.69±0.324.89±0.3673.35±0.328.79±0.2163.62±0.3511.55±0.3270.31±0.2114.73±0.65
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.825, + 0.682 + ], + "angle": 0, + "content": "Experiment on different model sizes. We show in Figure A4 the influence of the increasing model size on the performance of different baselines considered in this work. Compared to Figure 4, we report additional standard deviation of the 10 independent trials in Figure A4." + }, + { + "type": "image", + "bbox": [ + 0.372, + 0.695, + 0.622, + 0.861 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.873, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Figure A4: IRM performance on COLORED-MNIST against the dimension of the intermediate layer in MLP. The dotted line represents the default dimension (\\(d = 390\\)) used in the literature. The invariant prediction accuracy is presented via the dot line (mean) and shaded area (standard deviation) over 10 random trials." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "Experiment with different training environments. In Table A2, we show the performance of all the methods in more complex training environments, such as more training environments and more skewed environment bias parameter \\(\\beta\\). As we can see, BLOC-IRM outperforms other baselines." + }, + { + "type": "table_caption", + "bbox": [ + 0.243, + 0.171, + 0.754, + 0.185 + ], + "angle": 0, + "content": "Table A2: Performance under different training environments in COLORED-MNIST." + }, + { + "type": "table", + "bbox": [ + 0.303, + 0.186, + 0.697, + 0.352 + ], + "angle": 0, + "content": "
Environment Metrics (%)\\( {p}_{\\text{tr }} \\in \\{ {0.1},{0.15}\\} \\)\\( {p}_{\\text{tr }} \\in \\{ {0.1},{0.15},{0.2}\\} \\)
Avg AccAcc GapAvg AccAcc Gap
OPTIMUM75.000.0075.000.00
GRAYSCALE73.82±0.110.37±0.0573.97±0.140.29±0.08
ERM49.21±0.7991.88±3.3149.03±0.9392.17±3.04
IRMv167.36±0.312.77±0.1567.11±0.34\\( {2.42} \\pm {0.12} \\)
IRMv067.01±0.42\\( {2.85} \\pm {0.18} \\)66.71±0.42\\( {2.36} \\pm {0.19} \\)
IRM-GAME66.39±0.724.47±0.6165.93±0.53\\( {4.25} \\pm {0.84} \\)
REX66.82±0.44\\( {2.59} \\pm {0.11} \\)67.14±0.38\\( {2.16} \\pm {0.13} \\)
BIRM67.35±0.39\\( {2.65} \\pm {0.10} \\)68.05±0.43\\( {1.99} \\pm {0.07} \\)
SPARSEIRM67.12±0.53\\( {2.33} \\pm {0.18} \\)67.72±0.41\\( {2.11} \\pm {0.19} \\)
FISHR67.22±0.43\\( {2.44} \\pm {0.15} \\)67.32±0.39\\( {2.59} \\pm {0.15} \\)
BLO-IRM68.72±0.41\\( {2.19} \\pm {0.15} \\)68.89±0.31\\( {2.39} \\pm {0.09} \\)
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.374, + 0.825, + 0.485 + ], + "angle": 0, + "content": "BLOC-IRM with different regularizations. Based on the penalty terms used in the existing IRM variants, we explore the performance of our proposed BLOC-IRM with various regularization, including the ones used in IRMv1 (BLOC-IRM-v1), REX (BLOC-IRM-REX), and FISHR (BLOC-IRM-FISHR). We conduct experiments on three different datasets and the results are shown in Table A3. It is obvious that the best performance is always achieved when the per-environment stationarity is penalized in the upper-level. This is not surprising since without an explicit promotion of stationarity, other forms of penalties do not guarantee the BLO algorithm to achieve an optimal solution." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.507, + 0.825, + 0.546 + ], + "angle": 0, + "content": "Table A3: The performance of BLOC-IRM with different regularization terms. Three datasets are studied and the latest baseline SPARSEIRM is listed as reference for comparison. The best performance per-evaluation metric and per-dataset is highlighted in **bold**." + }, + { + "type": "table", + "bbox": [ + 0.238, + 0.547, + 0.761, + 0.64 + ], + "angle": 0, + "content": "
Dataset MetricsCOLORED-MNISTCOLORED-OBJECTCIFAR-MNIST
Avg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
SPARSEIRM68.81±0.251.72±0.0564.97±0.393.97±0.2562.87±0.294.14±0.31
BLOC-IRM69.47±0.241.04±0.0765.97±0.334.10±0.3663.69±0.324.89±0.36
BLOC-IRM-v167.14±0.244.33±0.8363.38±0.296.31±0.5161.13±0.516.71±0.41
BLOC-IRM-REX62.71±0.218.74±1.2160.31±0.337.62±0.6659.39±0.557.89±0.45
BLOC-IRM-FISHR63.25±0.167.12±0.3961.17±0.346.98±0.4560.86±0.516.63±0.30
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.664, + 0.825, + 0.803 + ], + "angle": 0, + "content": "Performance comparison of different methods with additional covariate shifts. Besides the sensitivity check on model size, Table A4 examines the resilience of IRM to variations in the training environment. This study is motivated by Krueger et al. (2021), who empirically showed that the performance of invariant prediction degrades if additional covariate shifts are imposed on the training environments. Thus, we present the IRM performance on COLORED-MNIST by introducing class, digit, and color imbalances, following Krueger et al. (2021, Section 4.1). Compared with Table 4, IRM suffers a greater performance loss in Table A4, in the presence of training environment variations. However, the proposed BLOC-IRM maintains the accuracy improvement over baselines with each variation. In Table A2, we also study IRM with different numbers of training environments and observe the consistent improvement of BLOC-IRM over other baselines." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.818, + 0.825, + 0.931 + ], + "angle": 0, + "content": "Exploration on the failure cases of previous IRM methods. Some papers either theoretically (Rosenfeld et al., 2020) or empirically (Kamath et al., 2021) pointed out that the original IRMv1 method could fail in certain circumstances, due to the fact that the regularization term used in IRMv1 heavily relies on the \"linear predictor\" assumption. Regarding this issue, we first bring to attention that the BLOC-IRM formulation does not require the predictors to be linear, since we adopt the regularization in the form of IRMv0 in the upper-level objective, not IRMv1. To justify our argument, we repeat the experiments in (Kamath et al., 2021), which points out a specific scenario using the COLORED-MNIST dataset where IRMv1 fails." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.113, + 0.825, + 0.152 + ], + "angle": 0, + "content": "Table A4: IRM performance on COLORED-MNIST and COLORED-FMNIST with training environment variations in terms of class, digit and color imbalances. The best IRM performance per-evaluation metric and per-variation source is highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.153, + 0.825, + 0.264 + ], + "angle": 0, + "content": "
DatasetCOLORED-MNISTCOLORED-FMNIST
Variation Metrics (%)Class ImbalanceDigit ImbalanceColor ImbalanceClass ImbalanceDigit ImbalanceColor Imbalance
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
GRAYSCALE71.23±0.182.76±0.1170.31±0.212.79±0.1572.29±0.162.88±0.1470.15±0.212.29±0.1269.92±0.152.72±0.2173.31±0.111.17±0.23
ERM43.72±1.0192.76±1.4545.89±2.8291.65±1.8646.19±2.8890.88±1.6941.72±1.9893.37±2.1542.39±2.3992.23±2.7245.89±0.2791.31±2.27
IRMv165.39±0.224.44±0.2964.89±0.264.19±0.4466.12±0.253.31±0.2962.49±0.334.93±0.4561.88±0.235.54±0.3964.39±0.443.79±0.33
IRMv065.01±0.284.29±0.3365.13±0.253.87±0.2866.72±0.253.01±0.4462.78±0.485.33±0.4761.62±0.295.29±0.4164.93±0.273.28±0.31
IRM-GAME62.21±0.426.45±0.3562.10±0.356.72±0.4461.82±0.657.78±0.5560.73±0.846.24±0.4360.79±0.456.47±0.8264.32±0.425.73±0.31
REX66.45±0.253.39±0.2866.23±0.433.21±0.2066.99±0.423.32±0.2764.89±0.365.78±0.5363.95±0.254.73±0.6265.87±0.424.30±0.42
BIRM65.73±0.254.11±0.3165.73±0.884.49±0.6766.72±0.243.47±0.2564.39±0.344.47±0.3963.24±0.394.54±0.4265.08±0.313.80±0.29
SPARSEIRM65.32±0.394.92±0.2264.44±0.364.85±0.3366.03±0.322.85±0.1964.32±0.514.15±0.3662.97±0.355.75±0.5264.72±0.463.99±0.39
FISHR66.13±0.283.99±0.3265.87±0.423.72±0.4165.48±0.214.49±0.3163.62±0.535.59±0.3562.47±0.265.72±0.3365.13±0.324.44±0.21
BLOC-IRM66.32±0.273.11±0.2266.41±0.293.32±0.2567.25±0.243.72±0.2765.99±0.313.97±0.4365.13±0.315.11±0.4566.79±0.263.72±0.36
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.29, + 0.603, + 0.444 + ], + "angle": 0, + "content": "More specifically, the models are trained in the training environments \\((\\alpha, \\beta) = (0.1, 0.2)\\) and \\((0.1, 0.25)\\), and evaluated in the test environment \\((0.1, 0.9)\\). Note that denotes the label flipping rate and represents the environment bias parameter. The results are shown in the Table A5. As we can see, IRMv1 is clearly worse than ERM as it achieves much lower average accuracy and higher accuracy gap. However, BLOC-IRM outperforms ERM by obtaining high average accuracy and lower accuracy gap. This result shows that BLOC-IRM seems promising to address the empirical IRM challenge discovered in (Kamath et al., 2021). In the meantime, we also acknowledge that BLOC-IRM is not per" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.444, + 0.825, + 0.5 + ], + "angle": 0, + "content": "fect since the advantage achieved by BLOC-IRM over ERM is not strong enough. However, we stress that the main contribution of BLOC-IRM does not lie in solving the failure cases of IRMv1, but to fix the issue of IRM-Game that resorts to a predictor ensemble to make the invariant prediction, which deviates from the spirit of acquiring invariant predictors in the original IRM paradigm." + }, + { + "type": "table_caption", + "bbox": [ + 0.612, + 0.304, + 0.825, + 0.38 + ], + "angle": 0, + "content": "Table A5: Performance comparisons on COLORED-MNIST among ERM, IRMv1, and BLOC-IRM in the scenarios where IRM-variants failed following (Kamath et al., 2021)." + }, + { + "type": "table", + "bbox": [ + 0.614, + 0.38, + 0.809, + 0.431 + ], + "angle": 0, + "content": "
MethodAvg. Acc.Acc. Gap
ERM83.0913.79
IRMv176.8927.68
BLOC-IRM84.2211.01
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.513, + 0.825, + 0.571 + ], + "angle": 0, + "content": "A similar curve to Figure 1 on COLORED-FMNIST. We show the results for COLORED-FMNIST similar to Figure 1 in Figure A5 and the conclusion does not change much. As mentioned before, the large-batch training setup was typically used for IRM training over the COLORED-FMNIST and COLORED-FMNIST datasets." + }, + { + "type": "image", + "bbox": [ + 0.383, + 0.582, + 0.611, + 0.731 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.747, + 0.825, + 0.772 + ], + "angle": 0, + "content": "Figure A5: The performance of three IRM methods (IRMv1, IRMv0, and REX) vs. batch size under COLORED-FMNIST." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ] +] \ No newline at end of file diff --git a/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_origin.pdf b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f22155397fa5b67215009e96427d49df9f5271a6 --- /dev/null +++ b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/ada9a8d8-393e-4e9c-91dc-8e9b9de8056a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5aa1b207273ba5a9e0df9575e053d1f49419dc88738a105821ff94d9ddf4ffe9 +size 1405256 diff --git a/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/full.md b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1969b8d08bb68c6aae9fba9859755633be5b67d8 --- /dev/null +++ b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/full.md @@ -0,0 +1,423 @@ +# WHAT IS MISSING IN IRM TRAINING AND EVALUATION? CHALLENGES AND SOLUTIONS + +Yihua Zhang $^{1}$ , Pranay Sharma $^{2}$ , Parikshit Ram $^{3}$ , Mingyi Hong $^{4}$ , Kush Varshney $^{3}$ , Sijia Liu $^{1,3}$ $^{1}$ Michigan State University, $^{2}$ Carnegie Mellon University, $^{3}$ IBM Research, $^{4}$ University of Minnesota + +# ABSTRACT + +Invariant risk minimization (IRM) has received increasing attention as a way to acquire environment-agnostic data representations and predictions, and as a principled solution for preventing spurious correlations from being learned and for improving models' out-of-distribution generalization. Yet, recent works have found that the optimality of the originally-proposed IRM optimization (IRMv1) may be compromised in practice or could be impossible to achieve in some scenarios. Therefore, a series of advanced IRM algorithms have been developed that show practical improvement over IRMv1. In this work, we revisit these recent IRM advancements, and identify and resolve three practical limitations in IRM training and evaluation. First, we find that the effect of batch size during training has been chronically overlooked in previous studies, leaving room for further improvement. We propose small-batch training and highlight the improvements over a set of large-batch optimization techniques. Second, we find that improper selection of evaluation environments could give a false sense of invariance for IRM. To alleviate this effect, we leverage diversified test-time environments to precisely characterize the invariance of IRM when applied in practice. Third, we revisit Ahuja et al. (2020)'s proposal to convert IRM into an ensemble game and identify a limitation when a single invariant predictor is desired instead of an ensemble of individual predictors. We propose a new IRM variant to address this limitation based on a novel viewpoint of ensemble IRM games as consensus-constrained bilevel optimization. Lastly, we conduct extensive experiments (covering 7 existing IRM variants and 7 datasets) to justify the practical significance of revisiting IRM training and evaluation in a principled manner. + +# 1 INTRODUCTION + +Deep neural networks (DNNs) have enjoyed unprecedented success in many real-world applications (He et al., 2016; Krizhevsky et al., 2017; Simonyan & Zisserman, 2014; Sun et al., 2014). However, experimental evidence (Beery et al., 2018; De Haan et al., 2019; DeGrave et al., 2021; Geirhos et al., 2020; Zhang et al., 2022b) suggests that DNNs trained with empirical risk minimization (ERM), the most commonly used training method, are prone to reproducing spurious correlations in the training data (Beery et al., 2018; Sagawa et al., 2020). This phenomenon causes performance degradation when facing distributional shifts at test time (Gulrajani & Lopez-Paz, 2020; Koh et al., 2021; Wang et al., 2022; Zhou et al., 2022a). In response, the problem of invariant prediction arises to enforce the model trainer to learn stable and causal features (Beery et al., 2018; Sagawa et al., 2020). + +In pursuit of out-of-distribution generalization, a new model training paradigm, termed invariant risk minimization (IRM) (Arjovsky et al., 2019), has received increasing attention to overcome the shortcomings of ERM against distribution shifts. In contrast to ERM, IRM aims to learn a universal representation extractor, which can elicit an invariant predictor across multiple training environments. However, different from ERM, the learning objective of IRM is highly non-trivial to optimize in practice. Specifically, IRM requires solving a challenging bi-level optimization (BLO) problem with a hierarchical learning structure: invariant representation learning at the upper-level and invariant predictive modeling at the lower-level. Various techniques have been developed to solve IRM effectively, such as (Ahuja et al., 2020; Lin et al., 2022; Rame et al., 2022; Zhou et al., 2022b) to name a few. Despite the proliferation of IRM advancements, several issues in the theory and practice have also appeared. For example, recent works (Rosenfeld et al., 2020; Kamath et al., + +2021) revealed the theoretical failure of IRM in some cases. In particular, there exist scenarios where the optimal invariant predictor is impossible to achieve, and the IRM performance may fall behind even that of ERM. Practical studies also demonstrate that the performance of IRM relies on multiple factors, e.g., model size (Lin et al., 2022; Zhou et al., 2022b), environment difficulty (Dranker et al., 2021; Krueger et al., 2021), and dataset type (Gulrajani & Lopez-Paz, 2020). + +Therefore, key challenges remain in deploying IRM to real-world applications. In this work, we revisit recent IRM advancements and uncover and tackle several pitfalls in IRM training and evaluation, which have so far gone overlooked. We first identify the large-batch training issue in existing IRM algorithms, which prevents escape from bad local optima during IRM training. Next, we show that evaluation of IRM performance with a single test-time environment could lead to an inaccurate assessment of prediction invariance, even if this test environment differs significantly from training environments. Based on the above findings, we further develop a novel IRM variant, termed BLOC-IRM, by interpreting and advancing the IRM-GAME method (Ahuja et al., 2020) through the lens of BLO with Consensus prediction. Below, we list our contributions (1-3). + +1 We demonstrate that the prevalent use of large-batch training leaves significant room for performance improvement in IRM, something chronically overlooked in the previous IRM studies with benchmark datasets COLORED-MNIST and COLORED-FMNIST. By reviewing and comparing with 7 state-of-the-art (SOTA) IRM variants (Table 1), we show that simply using small-batch training improves generalization over a series of more involved large-batch optimization enhancements. +We also show that an inappropriate evaluation metric could give a false sense of invariance to IRM. Thus, we propose an extended evaluation scheme that quantifies both precision and 'invariance' across diverse testing environments. +Further, we revisit and advance the IRM-GAME approach (Ahuja et al., 2020) through the lens of consensus-constrained BLO. We remove the need for an ensemble (one per training environment) of predictors in IRM-GAME by proposing BLOC-IRM (BLO with Consensus IRM), which produces a single invariant predictor. +Lastly, we conduct extensive experiments (on 7 datasets, using diverse model architectures and training environments) to justify the practical significance of our findings and methods. Notably, we conduct experiments on the CELEBA dataset as a new IRM benchmark with realistic spurious correlations. We show that BLOC-IRM outperforms all baselines in nearly all settings. + +# 1.1 RELATED WORK + +IRM methods. Inspired by the invariance principle (Peters et al., 2016), Arjovsky et al. (2019) define IRM as a BLO problem, and develop a relaxed single-level formulation, termed IRMv1, for ease of training. Recently, there has been considerable work to advance IRM techniques. Examples of IRM variants include penalization on the variance of risks or loss gradients across training environments (Chang et al., 2020; Krueger et al., 2021; Rame et al., 2022; Xie et al., 2020; Xu & Jaakkola, 2021; Xu et al., 2022), domain regret minimization (Jin et al., 2020), robust optimization over multiple domains (Xu & Jaakkola, 2021), sparsity-promoting invariant learning (Zhou et al., 2022b), Bayesian inference-baked IRM (Lin et al., 2022), and ensemble game over the environment-specific predictors (Ahuja et al., 2020). We refer readers to Section 2 and Table 1 for more details on the IRM methods that we will focus on in this work. + +Despite the potential and popularity of IRM, some works have also shown the theoretical and practical limitations of current IRM algorithms. Specifically, Chen et al. (2022); Kamath et al. (2021) show that invariance learning via IRM could fail and be worse than ERM in some two-bit environment setups on COLORED-MNIST, a synthetic benchmark dataset often used in IRM works. The existence of failure cases of IRM is also theoretically shown by Rosenfeld et al. (2020) for both linear and non-linear models. Although subsequent IRM algorithms take these failure cases into account, there still exist huge gaps between theoretically desired IRM and its practical variants. For example, Lin et al. (2021; 2022); Zhou et al. (2022b) found many IRM variants incapable of maintaining graceful generalization on large and deep models. Moreover, Ahuja et al. (2021); Dranker et al. (2021) demonstrated that the performance of IRM algorithms could depend on practical details, e.g., dataset size, sample efficiency, and environmental bias strength. The above IRM limitations in + +spire our work to study when and how we can turn the IRM advancements into effective solutions, to gain high-accuracy and stable invariant predictions in practical scenarios. + +Domain generalization. IRM is also closely related to domain generalization (Carlucci et al., 2019; Gulrajani & Lopez-Paz, 2020; Koh et al., 2021; Li et al., 2019; Nam et al., 2021; Wang et al., 2022; Zhou et al., 2022a). Compared to IRM, domain generalization includes a wider range of approaches to improve prediction accuracy against distributional shifts (Beery et al., 2018; Jean et al., 2016; Koh et al., 2021). For example, an important line of research is to improve representation learning by encouraging cross-domain feature resemblance (Long et al., 2015; Tzeng et al., 2014). The studies on domain generalization have also been conducted across different learning paradigms, e.g., adversarial learning (Ganin et al., 2016), self-supervised learning (Carlucci et al., 2019), and meta-learning (Balaji et al., 2018; Dou et al., 2019). + +# 2 PRELIMINARIES AND SETUP + +In this section, we introduce the basics of IRM and provide an overview of our IRM case study. + +IRM formulation. In the original IRM framework Arjovsky et al. (2019), consider a supervised learning paradigm, with datasets $\{\mathcal{D}^{(e)}\}_{e\in \mathcal{E}_{\mathrm{tr}}}$ collected from $N$ training environments $\mathcal{E}_{\mathrm{tr}} = \{1,2,\dots ,N\}$ . The training samples in $\mathcal{D}^{(e)}$ (corresponding to the environment $e$ ) are of the form $(\mathbf{x},y)\in \mathcal{X}\times \mathcal{Y}$ , where $\mathcal{X}$ and $\mathcal{Y}$ are, respectively, the raw feature space and the label space. IRM aims to find an environment-agnostic data representation $\phi_{\theta}:\mathcal{X}\to \mathcal{Z}$ , which elicits an invariant prediction $f_{\mathbf{w}}:\mathcal{Z}\rightarrow \mathcal{V}$ that is simultaneously optimal for all environments. Here $\pmb{\theta}$ and $\mathbf{w}$ denote model parameters to be learned, and $\mathcal{Z}$ denotes the representation space. Thus, IRM yields an invariant predictor $f_{\mathbf{w}}\circ \phi_{\pmb{\theta}}:\mathcal{X}\to \mathcal{Y}$ that can generalize to unseen test-time environments $\{\mathcal{D}^{(e)}\}_{e\notin \mathcal{E}_{\mathrm{tr}}}$ . Here $\circ$ denotes function composition, i.e., $f_{\mathbf{w}}\circ \phi_{\pmb{\theta}}(\cdot) = f_{\mathbf{w}}(\phi_{\pmb{\theta}}(\cdot))$ . We will use $\mathbf{w}\circ \pmb{\theta}$ as a shorthand for $f_{\mathbf{w}}\circ \phi_{\pmb{\theta}}$ . IRM constitutes the following BLO problem: + +$$ +\underset {\boldsymbol {\theta}} {\text {m i n i m i z e}} \quad \sum_ {e \in \mathcal {E} _ {\mathrm {t r}}} \ell^ {(e)} \left(\mathbf {w} ^ {*} (\boldsymbol {\theta}) \circ \boldsymbol {\theta}\right); \quad \text {s u b j e c t t o} \quad \mathbf {w} ^ {*} (\boldsymbol {\theta}) \in \underset {\bar {\mathbf {w}}} {\arg \min } \ell^ {(e)} \left(\bar {\mathbf {w}} \circ \boldsymbol {\theta}\right), \forall e \in \mathcal {E} _ {\mathrm {t r}}, \quad (\text {I R M}) +$$ + +where $\ell^{(e)}(\mathbf{w} \circ \boldsymbol{\theta})$ is the per-environment training loss of the predictor $\mathbf{w} \circ \boldsymbol{\theta}$ under $\mathcal{D}^{(e)}$ . Clearly, IRM involves two optimization levels that are coupled through the lower-level solution $\mathbf{w}^*(\boldsymbol{\theta})$ . Achieving the desired invariant prediction requires the solution sets of the individual lower-level problems $\{\arg \min_{\bar{\mathbf{w}}} \ell^{(e)}(\bar{\mathbf{w}} \circ \boldsymbol{\theta}), e \in \mathcal{E}_{tr}\}$ to be non-singleton. However, BLO problems with non-singleton lower-level solution sets are significantly more challenging (Liu et al., 2021). To circumvent this difficulty, Arjovsky et al. (2019) relax (IRM) into a single-level optimization problem (a.k.a., IRMv1): + +$$ +\underset {\boldsymbol {\theta}} {\text {m i n i m i z e}} \quad \sum_ {e \in \mathcal {E} _ {\mathrm {t r}}} [ \ell^ {(e)} (\boldsymbol {\theta}) + \gamma \| \nabla_ {w | w = 1. 0} \ell^ {(e)} (w \circ \boldsymbol {\theta}) \| _ {2} ^ {2} ], \tag {IRMv1} +$$ + +where $\gamma > 0$ is a regularization parameter and $\nabla_{w|w = 1.0}\ell^{(e)}$ denotes the gradient of $\ell^{(e)}$ with respect to $w$ , computed at $w = 1.0$ . Compared with IRM, IRMv1 is restricted to linear invariant predictors, and penalizes the deviation of individual environment losses from stationarity to approach the lower-level optimality in (IRM). IRMv1 uses the fact that a scalar predictor ( $w = 1.0$ ) is equivalent to a linear predictor. Despite the practical simplicity of (IRMv1), it may fail to achieve the desired invariance (Chen et al., 2022; Kamath et al., 2021). + +Case study of IRM methods. As illustrated above, the objective of IRM is difficult to optimize, while IRMv1 only provides a sub-optimal solution. Subsequent advances have attempted to reduce this gap. In this work, we focus on 7 popular IRM variants and evaluate their invariant prediction performance over 7 datasets. Table 1 and Table 2 respectively summarize the IRM methods and the datasets considered in this work. We survey the most representative and effective IRM variants in the literature, which will also serve as our baselines in performance comparison. + +Following Table 1, we first introduce the IRMv0 variant, a generalization of IRMv1, by relaxing its assumption of linearity of the predictor $\mathbf{w}$ , yielding + +$$ +\underset {\mathbf {w}, \boldsymbol {\theta}} {\text {m i n i m i z e}} \quad \sum_ {e \in \mathcal {E} _ {\mathrm {t r}}} [ \ell^ {(e)} (\mathbf {w} \circ \boldsymbol {\theta}) + \gamma \| \nabla_ {\mathbf {w}} \ell^ {(e)} (\mathbf {w} \circ \boldsymbol {\theta}) \| _ {2} ^ {2} ]. \tag {IRMv0} +$$ + +Next, we consider the risk extrapolation method REx (Krueger et al., 2021), an important baseline based on distributionally robust optimization for group shifts (Sagawa et al., 2019). Furthermore, + +Table 1: Summary of the 7 existing IRM variants considered in this work, and the proposed BLOC-IRM method (see Section 5). We also list the 7 benchmark datasets used to evaluate IRM performance, namely, COLORED-MNIST (CoM), COLORED-FMNIST (CoF), CIFAR-MNIST (CiM), COLORED-OBJECT (CoO), CELEBA (CA), PACS (P) and VLCS (A). The symbols $\checkmark$ signifies the dataset used in the specific reference. + +
IRM +MethodVenueDatasetsReference
CoMCoFCiMCoOCAPV
IRMv1arXiv(Arjovsky et al., 2019)
IRMv0N/AThis Work
IRM-GAMEICML(Ahuja et al., 2020)
REXICML(Krueger et al., 2021)
BIRMCVPR(Lin et al., 2022)
SPARSEIRMICML(Zhou et al., 2022b)
FISHRICML(Rame et al., 2022)
OursN/AThis Work
+ +Table 2: Dataset setups. 'Invariant' and 'Spurious' represent the core and spurious features. 'Env1' and 'Env2' are environments with different spurious correlations. + +
DatasetInvariantSpuriousEnv 1Env 2
CoMDigitColor
CoFObjectColor
CiMCIFARMNIST
CoOObjectColor
CASmilingHair Color
PObjectTexture
VObjectEnvironment
+ +inspired by the empirical findings that the performance of IRM could be sensitive to model size (Choe et al., 2020; Gulrajani & Lopez-Paz, 2020), we choose the SOTA methods Bayesian IRM (BIRM) (Lin et al., 2022) and sparse IRM (SPARSEIRM) (Zhou et al., 2022b), both of which show improved performance with large models. Also, we consider the SOTA method FISHR (Rame et al., 2022), which modifies IRM to penalize the domain-level gradient variance in single-level risk minimization. FISHR provably matches both domain-level risks and Hessians. Lastly, we include IRM-GAME (Ahuja et al., 2020) as a special variant of IRM. Different from the other methods which seek an invariant predictor, IRM-GAME endows each environment with a predictor, and leverages this ensemble of predictors to achieve invariant representation learning. This is in contrast to other existing works which seek an invariant predictor. Yet, we show in Section 5 that IRM-GAME can be interpreted through the lens of consensus-constrained BLO and generalized for invariant prediction. We also highlight that diverse dataset types are considered in this work (see Table 2) to benchmark IRM's performance. More details on dataset selections can be found in Appendix A. + +# 3 LARGE-BATCH TRAINING CHALLENGE AND IMPROVEMENT + +In this section, we demonstrate and resolve the large-batch training challenge in current IRM implementations (Table 1). + +Large-batch optimization causes instabilities of IRM training. Using very large-size batches for model training can result in the model getting trapped near a bad local optima (Keskar et al., 2016). This happens as a result of the lack of stochasticity in the training process, and is known to exist even in the ERM paradigm (Goyal et al., 2017; You et al., 2017a). Yet, nearly all the existing IRM methods follow the training setup of IRMv1 (Arjovsky et al., 2019), which used the full-batch gradient descent (GD) method rather than the mini-batch stochastic gradient descent (SGD) for IRM training over COLORED-MNIST and COLORED-FMNIST. In the following, we show that large-batch training might give a false impression of the relative ranking of IRM performances. + +![](images/d81db67045654a5d0cb71843aa85d8ea308dbecba3d380258eefcaf3a5d9d075.jpg) +Figure 1: The performance of three IRM methods (IRMv1, IRMv0, and REX) vs. batch-size under COLORED-MNIST. The full batch-size is $50\mathrm{k}$ . + +We start with an exploration of the impact of batch size on the invariant prediction accuracy of existing IRM methods under COLORED-MNIST. Here the invariant prediction accuracy refers to the averaged accuracy of the invariant predictor applied to diverse test-time environments. We defer its formal description to Section 4. Figure 1 shows the invariant prediction accuracy of three IRM methods IRMv1, IRMv0, and REX vs. the data batch size (see Figure A1 for results of other IRM variants and Figure A5 for COLORED-FMNIST). Recall that the full batch size (50k) was used in the existing IRM implementations (Arjovsky et al., 2019; Krueger et al., 2021). As we can see, in the full-batch setup, IRM methods lead to widely different invariant prediction accuracies, where REX and IRMv1 significantly outperform IRMv0. In contrast, in the small-batch case (with size + +1k), the discrepancy in accuracy across methods vanishes. We see that IRMv0 can be as effective as IRMv1 and other IRM variants (such as REX) only if an appropriate small batch size is used. + +Empirical evidence in Figure 1 shows that large-batch IRM training is less effective than small-batch. This is aligned with the observations in ERM (You et al., 2017b; 2018; 2019), where the lack of stochasticity makes the optimizer difficult to escape from a sharp local minimum. We also justify this issue by visualizing the loss landscapes in Figure A2. Notably, the small-batch training enables IRMv1 to converge to a local optimum with a flat loss landscape, indicating better generalization (Keskar et al., 2016). + +Small-batch training is effective versus a zoo of large-batch optimization enhancements. To mitigate the large-batch IRM training issue, we next investigate the effectiveness of both small-batch training and a zoo of large-batch optimization enhancements. Inspired by large-batch training techniques to scale up ERM, we consider Large-batch SGD (LSGD) (Goyal et al., 2017) and Layerwise Adaptive Learning Rate (LALR) (You et al., 2017b; 2018; 2019; Zhang et al., 2022a). Both methods aim to smoothen the optimization trajectory by improving either the learning rate scheduler or the quality of initialization. Furthermore, we adopt sharpness-aware minimization (SAM) (Foret et al., 2020) as another possible large-batch training solution to explicitly penalize the sharpness of the loss landscape. We integrate the above optimization techniques with IRM, leading to the variants IRM-LSGD, IRM-LALR, and IRM-SAM. See Appendix B.1 for more details. + +In Table 3, we compare the performance of the simplest small-batch IRM training with that of those large-batch optimization technique-integrated IRM variants (i.e., 'LSGD/LALR/SAM' in the Table). As we can see, the use of large-batch optimization techniques indeed improves the prediction accuracy over the original IRM implementation. We also observe that the use of SAM for IRM is consistently better than LALR and LSGD, indicating the promise of SAM to scale up IRM with a large batch size. Yet, the small-batch training protocol consistently outperforms large-batch training across all the IRM variants (see the column 'Small'). Additional experiment results in Section 6 show that small- + +Table 3: Prediction accuracy of IRM methods on COLORED-MNIST using the original large-batch implementation ('Original'), the large-batch optimization-integrated implementations ('LSGD/LALR/SAM'), and the small-batch training recipe ('Small'). + +
MethodOriginalLSGDLALRSAMSmall
IRMv167.1367.3167.4467.7968.33
IRMv065.3966.4266.7666.9968.37
IRM-GAME65.6965.8265.4766.2367.73
REX67.4267.5367.5967.8268.42
BIRM67.9367.9968.2168.3268.71
SPARSEIRM67.7267.8567.9968.1368.81
FISHR67.8867.8267.9368.1168.69
Average67.0267.2567.3467.6368.44
+ +batch IRM training is effective across datasets, and promotes the invariance achieved by all methods. + +# 4 MULTI-ENVIRONMENT INVARIANCE EVALUATION + +In this section, we revisit the evaluation metric used in existing IRM methods, and show that expanding the diversity of test-time environments would improve the accuracy of invariance assessment. + +Nearly all the existing IRM methods (including those listed in Table 1) follow the evaluation pipeline used in the vanilla IRM framework (Arjovsky et al., 2019), which assesses the performance of the learned invariant predictor on a single unseen test environment. This test-time environment is significantly different from train-time environments. For example, COLORED-MNIST (Arjovsky et al., 2019) suggests a principled way to define two-bit environments, widely-used for IRM dataset curation. Specifically, the COLORED-MNIST task is to predict the label of the handwritten digit groups (digits 0-4 for group 1 and digits 5-9 for group 2). The digit number is also spuriously correlated with the digit color (Table 2). This spurious correlation is controlled by an environment bias parameter $\beta$ , which specifies different data environments with different levels of spurious correlation1. In (Arjovsky et al., 2019), $\beta = 0.1$ and $\beta = 0.2$ are used to define two training environments, which sample the color ID by flipping the digit group label with probability $10\%$ and $20\%$ , respectively. At test time, the invariant accuracy is evaluated on a single, unseen environment with $\beta = 0.9$ . + +However, the prediction accuracy of IRM could be sensitive to the choice of test-time environment (i.e., the value of $\beta$ ). For the default test environment $\beta = 0.9$ , the predictor performance of three representative IRM methods (IRMv1, IRM-GAME, FISHR) ranked from high to low is IRM-GAME>FISHR>IRMv1. Given this apparent ranking, we explore more diverse test-time environments, generated by $\beta \in \Omega := \{0.05, 0.1, \ldots, 0.95\}$ . + +Although the train-time bias parameters $\{0.1, 0.2\}$ belong to $\Omega$ , test data is generated afresh, different from training data. We see in Figure 2A that the superiority of IRM-GAME at $\beta = 0.9$ vanishes for smaller $\beta$ . Consequently, for invariant prediction evaluated in other testing environments (e.g., $\beta < 0.4$ ), the performance ranking of the same methods becomes IRMV1>FISHR>IRM-GAME. This mismatch of results suggests we measure the 'invariance' of IRM methods against diverse test environments. Otherwise, evaluation with single $\beta$ could give a false sense of invariance. In Figure 2B, we present the box plots of prediction accuracies for IRM variants, over the diverse set of testing environments $(\beta \in \Omega)$ . Evidently, IRMV1, the oldest + +(sub-optimal) IRM method, yields the least variance of invariant prediction accuracies and the best average prediction accuracy, compared to both IRM-GAME and FISHR. To summarize, the new evaluation method, with diverse test environments, enables us to make a fair comparison of IRM methods implemented in different training environment settings. Unless specified otherwise, we use the multi-environment evaluation method throughout this work. + +![](images/a24b292c97fe77b14b0f86373dec97022fcf16b5cfe6084ea939afc2d489514d.jpg) +(A) + +![](images/f9966abd7be911907704e9dd96d7ef1fa68175e53f5b5b0edc6731bd5a53335b.jpg) +(B) +Figure 2: Performance comparison of IRM variants IRMv1, IRM-GAME, and FISHR on COLORED-MNIST. (A) Evaluation in different test-time environments (corresponding to different $\beta$ ). $\beta$ values used by the two training environments are 0.1, 0.2 respectively. The conventional evaluation is done with the test environment $\beta = 0.9$ (see $\triangle$ ). (B) Box plots of prediction accuracies over diverse test environments corresponding to $\beta \in \{0.05, 0.1, \dots, 0.95\}$ . IRMv1 achieves the best average accuracy $(67.13\%)$ , followed by FISHR $(67.05\%)$ and IRM-GAME $(65.53\%)$ . + +# 5 ADVANCING IRM-GAME VIA CONSENSUS-CONSTRAINED BLO + +In this section, we revisit and advance a special IRM variant, IRM-GAME (Ahuja et al., 2020), which endows each individual environment with a separate prediction head and converts IRM into an ensemble game over these multiple predictors. + +Revisiting IRM-GAME. We first introduce the setup of IRM-GAME following notations used in Section 2. The most essential difference between IRM-GAME and the vanilla IRM framework is that the former assigns each environment with an individual classifier $\mathbf{w}^{(e)}$ , and then relies on the ensemble of these individual predictors, i.e., $\frac{1}{N}\sum_{e\in \mathcal{E}_{\mathrm{tr}}}(\mathbf{w}^{(e)}\circ \pmb {\theta})$ , for inference. IRM-GAME is in a sharp contrast to IRM, where an environment-agnostic prediction head $\mathbf{w}^*$ simultaneously optimizes the losses across all environments. Therefore, we raise the following question: Can IRM-GAME learn an invariant predictor? + +Inspired by the above question, we explicitly enforce invariance by imposing a consensus prediction constraint $\mathcal{C} \coloneqq \left\{\left(\bar{\mathbf{w}}^{(1)}, \bar{\mathbf{w}}^{(2)}, \ldots \bar{\mathbf{w}}^{(N)}\right) \mid \bar{\mathbf{w}}^{(1)} = \ldots = \bar{\mathbf{w}}^{(N)}\right\}$ and integrate it with IRM-GAME. Here, $\bar{\mathbf{w}}^{(e)}$ denotes the prediction head for the $e$ -th environment. Based on the newly-introduced constraint, the ensemble prediction head $\frac{1}{N} \sum_{e \in \mathcal{E}_{\mathrm{tr}}} \mathbf{w}^{(e)}$ can be interpreted as the average consensus over $N$ environments: $\mathbf{w}^* \coloneqq \frac{1}{N} \sum_{e \in \mathcal{E}_{\mathrm{tr}}} \mathbf{w}^{(e)} = \arg \min_{\{\bar{\mathbf{w}}^{(e)}\}_e \in \mathcal{C}} \sum_{e \in \mathcal{E}_{\mathrm{tr}}} \| \bar{\mathbf{w}}^{(e)} - \mathbf{w}^{(e)} \|_2^2$ . With the above consensus interpretation, we can then cast the invariant predictor-baked IRM-GAME as a consensus-constrained BLO problem, extended from (IRM): + +$$ +\underset {\boldsymbol {\theta}} {\text {m i n i m a z e}} \quad \sum_ {e \in \mathcal {E} _ {\mathrm {t r}}} \ell^ {(e)} \left(\mathbf {w} ^ {*} (\boldsymbol {\theta}) \circ \boldsymbol {\theta}\right) +$$ + +$$ +\text {s u b j e c t} \quad (\mathbf {I}): \mathbf {w} ^ {(e)} (\boldsymbol {\theta}) \in \underset {\bar {\mathbf {w}} ^ {(e)}} {\arg \min } \ell^ {(e)} \left(\bar {\mathbf {w}} ^ {(e)} \circ \boldsymbol {\theta}\right), \forall e \in \mathcal {E} _ {\mathrm {t r}}, \tag {1} +$$ + +$$ +(\mathbf {I I}) \colon \mathbf {w} ^ {*} (\boldsymbol {\theta}) = \frac {1}{N} \sum_ {e \in \varepsilon_ {\mathrm {t r}}} \mathbf {w} ^ {(e)} (\boldsymbol {\theta}). +$$ + +The above contains two lower-level problems: (I) per-environment risk minimization, and (II) projection onto the consensus constraint $(\{\mathbf{w}^{(e)}\} \in \mathcal{C})$ . The incorporation of (II) is intended to ensure the use of invariant prediction head $\mathbf{w}^*(\pmb{\theta})$ in the upper-level optimization problem of (1). + +Limitation of (1) and BLOC-IRM. In (1), the introduced consensus-constrained lower-level problem might compromise the optimality of the lower-level solution $\mathbf{w}^{*}(\pmb{\theta})$ to the per-environment (unconstrained) risk minimization problem (I), i.e., violating the per-environment stationarity + +$\| \nabla_{\mathbf{w}}\ell^{(e)}(\mathbf{w}^* (\pmb {\theta})\circ \pmb {\theta})\| _2^2$ . Figure A3 justifies this side effect. As we can see, the per-environment stationarity is hardly attained at the consensus prediction when solving (1). This is not surprising since a constrained optimization solution might not be a stationary solution to minimizing the (unconstrained) objective function. To alleviate this limitation, we improve (1) by explicitly promoting the per-environment stationarity $\| \nabla_{\mathbf{w}}\ell^{(e)}(\mathbf{w}^* (\pmb {\theta})\circ \pmb {\theta})\| _2^2$ in its upper-level problem through optimization over $\pmb{\theta}$ . This leads to BLOC-IRM (BLO with Consensus IRM): + +$$ +\underset {\boldsymbol {\theta}} {\text {m i n i m i z e}} \quad \sum_ {e \in \mathcal {E} _ {\mathrm {t r}}} \left[ \ell^ {(e)} \left(\mathbf {w} ^ {*} (\boldsymbol {\theta}) \circ \boldsymbol {\theta}\right) + \gamma \| \nabla_ {\mathbf {w}} \ell^ {(e)} \left(\mathbf {w} ^ {*} (\boldsymbol {\theta}) \circ \boldsymbol {\theta}\right) \| _ {2} ^ {2} \right] \tag {BLOC-IRM} +$$ + +subject to Lower-level problems (I) and (II) in (1), + +where $\gamma > 0$ is a regularization parameter like IRMv0. Assisted by the (upper-level) prediction stationarity regularization, the consensus prediction (II) indeed simultaneously minimizes the risks of all the environments, supported by the empirical evidence that the convergence of $\|\nabla_{\mathbf{w}}\ell^{(e)}(\mathbf{w}^*(\boldsymbol{\theta}) \circ \boldsymbol{\theta})\|_2^2$ towards 0 along each environment's optimization path (see Figure A3). + +Further, we elaborate on how the BLOC-IRM problem can be effectively solved using an ordinary BLO solver. First, it is worth noting that although both (IRM) and BLOC-IRM are BLO problems, the latter is easier to solve since the lower-level constraint (I) is unconstrained and separable over environments, and the consensus operation (II) is linear. Based on these characteristics, the implicit gradient $\frac{dw^{*}(\theta)}{d\theta}$ can be directly computed as + +$$ +\frac {d \mathbf {w} ^ {*} (\boldsymbol {\theta})}{d \boldsymbol {\theta}} = \frac {1}{N} \sum_ {e \in \mathcal {E} _ {\mathrm {t r}}} \frac {d \mathbf {w} ^ {(e)} (\boldsymbol {\theta})}{d \boldsymbol {\theta}}, \text {s u b j e c t t o} \mathbf {w} ^ {(e)} (\boldsymbol {\theta}) \in \underset {\bar {\mathbf {w}} ^ {(e)}} {\arg \min } \ell^ {(e)} \left(\bar {\mathbf {w}} ^ {(e)} \circ \boldsymbol {\theta}\right). \tag {2} +$$ + +Since the above lower-level problem is unconstrained, we can call the standard arg min differentiating method, such as implicit function approach (Gould et al., 2016) or gradient unrolling (Liu et al., 2021) to compute $\frac{d\mathbf{w}^{(e)}(\boldsymbol{\theta})}{d\boldsymbol{\theta}}$ . In our work, we adopt the gradient unrolling method, which approximates $\mathbf{w}^{(e)}(\boldsymbol{\theta})$ by a $K$ -step gradient descent solution, noted by $\mathbf{w}_K^{(e)}(\boldsymbol{\theta})$ and then leverages automatic differentiation (AD) to compute the derivative from $\mathbf{w}_K^{(e)}(\boldsymbol{\theta})$ to the variable $\boldsymbol{\theta}$ . Figure 3 shows the working pipeline of BLOC-IRM and its comparison to original IRM and + +IRM-GAME methods. We use $K = 1$ for the lower-level problem throughout our experiments. We refer readers to Appendix B.2 for more algorithmic details. We also explore the performance of our proposed BLOC-IRM with various regularization terms, based on the penalties used in the existing literature. We show the best performance is always achieved when the stationarity is penalized in the upper-level (see Table A3). + +![](images/0122b18efe16a4aa864ab68166e294a34ab15a4c112c8a7589decd971ce3168f.jpg) +Figure 3: Schematic overview of BLOC-IRM over two training environments (red and green), and its comparison to IRM and IRM-GAME. + +# 6 EXPERIMENTS + +In this section, we begin by introducing some key experiment setups (with details in Appendix C.1), and then empirically show the effectiveness of our proposed IRM training and evaluation improvements over existing IRM methods across various datasets, models, and learning environments. + +# 6.1 EXPERIMENT SETUPS + +Datasets and models. Our experiments are conducted over 7 datasets as referenced and shown in Tables 1, 2. Among these datasets, COLORED-MNIST, COLORED-FMNIST, CIFAR-MNIST, and COLORED-OBJECT are similarly curated, mimicking the pipeline of COLORED-MNIST (Arjovsky et al., 2019), by introducing an environment bias parameter (e.g., $\beta$ for COLORED-MNIST in Section 4) to customize the level of spurious correlation (as shown in Table 2) in different environments. In the CELEBA dataset, we choose the face attribute 'smiling' (vs. 'non-smiling') as the core feature aimed for classification, and regard another face attribute 'hair color' ('blond' vs. 'dark') as the + +source of spurious correlation imposed on the core feature. By controlling the level of spurious correlation, we then create different training/testing environments in CELEBA. Furthermore, we study PACS and VLCS datasets, which were used to benchmark domain generalization ability in the real world (Borlino et al., 2021). It was recently shown by Gulrajani & Lopez-Paz (2020) that for these datasets, ERM could even be better than IRMv1. Yet, we will show that our proposed BLOC-IRM is a promising domain generalization method, which outperforms all the IRM baselines and ERM in practice. In addition, we follow Arjovsky et al. (2019) in adopting multi-layer perceptron (MLP) as the model for resolving COLORED-MNIST and COLORED-FMNIST problems. In the other more complex datasets, we use the ResNet-18 architecture (He et al., 2016). + +Baselines and implementation. Our baselines include 7 IRM variants (Table 1) and ERM, which are implemented using their official repositories if available (see Appendix C.2). Unless specified otherwise, our training pipeline uses the small-batch training setting. By default, we use the batch size of 1024 for COLORED-MNIST and COLORED-FMNIST, and 256 for other datasets. In Section 6.2 below, we also do a thorough comparison of large-batch vs small-batch IRM training. + +Evaluation setup. As proposed in Section 4, we use the multi-environment evaluation metric unless specified otherwise. To capture both the accuracy and variance of invariant predictions across multiple testing environments, the average accuracy and the accuracy gap (the difference of the best-case and worst-case accuracy) are measured for IRM methods. The resulting performance is reported in the form $a \pm b$ , with mean $a$ and standard deviation $b$ computed across 10 independent trials. + +# 6.2 EXPERIMENT RESULTS + +Small-batch training improves all existing IRM methods on COLORED-MNIST & COLORED-FMNIST. Recall from Section 3 that all the existing IRM methods (Table 1) adopt full-batch IRM training on COLORED-MNIST & COLORED-FMNIST, which raises the large-batch training problem. In Table 4, we conduct + +Table 4: Performance of existing IRM methods in large and small-batch settings. GRAYSCALE refers to ERM on uncolored data, which yields the best prediction (supposing no spurious correlation during training). The IRM performance is evaluated by average accuracy ('Avg Acc') and accuracy gap ('Acc Gap'), in the format mean±std. A higher Avg Acc and lower Acc Gap is preferred. The theoretically optimal performance is $75\%$ (Arjovsky et al., 2019). + +
Dataset Metrics(%)COLORED-MNISTCOLORED-FMNIST
Avg Acc (↑)Acc Gap (↓)Avg Acc (↑)Acc Gap (↓)
Large BatchGRYSCALE73.39±0.160.32±0.0374.05±0.090.13±0.04
ERM49.19±1.8990.72±2.0849.77±1.7188.62±2.49
IRMv167.13±0.333.43±0.1467.19±0.223.35±0.11
IRMv065.39±0.344.69±0.1866.44±0.283.53±0.13
IRM-GAME65.69±0.428.75±0.1465.91±0.293.74±0.09
REX67.42±0.293.76±0.0767.82±0.313.26±0.16
BIRM67.93±0.313.81±0.1167.75±0.263.81±0.11
SPARSEIRM67.72±0.283.65±0.0867.89±0.303.12±0.15
FISHR67.49±0.394.37±0.1067.33±0.244.49±0.16
Small BatchIRMv168.33±0.312.04±0.0568.76±0.311.45±0.09
IRMv068.37±0.281.32±0.0969.07±0.271.36±0.06
IRM-GAME67.73±0.241.67±0.1467.49±0.321.82±0.13
REX68.42±0.291.65±0.0768.66±0.221.29±0.08
BIRM68.71±0.211.35±0.0968.64±0.321.44±0.13
SPARSEIRM68.81±0.251.72±0.0568.29±0.221.28±0.15
FISHR68.69±0.192.13±0.0868.79±0.171.77±0.10
+ +a thorough comparison between the originally-used full-batch IRM methods and their small-batch counterparts. In addition, we present the performance of ERM and ERM-grayscale (we call it 'grayscale'), where the latter is ERM on uncolored data. In the absence of any spurious correlation in the training set, grayscale gives the best performance. As discussed in Section 4 & 6.1, the IRM performance is measured by the average accuracy and the accuracy gap across 19 testing environments, parameterized by the environment bias parameter $\beta \in \{0.05,\dots ,0.95\}$ . We make some key observations from Table 4. First, small batch size helps improve all the existing IRM methods consistently, evidenced by the $1\% \sim 3\%$ improvement in average accuracy. Second, the small-batch IRM training significantly reduces the variance of invariant predictions across different testing environments, evidenced by the decreased accuracy gap. This implies that the small-batch IRM training can also help resolve the limitation of multi-environment evaluation for the existing IRM methods, like the sensitivity of IRM-GAME accuracy to $\beta$ in Figure 2. Third, we observe that IRMv0, which does not seem to be useful in the large batch setting, becomes quite competitive with the other baselines in the small-batch setting. Thus, large-batch could suppress the IRM performance for some methods. In the rest of the experiments, we stick to the small-batch implementation of IRM training. + +BLOC-IRM outperforms IRM baselines in various datasets. Next, Table 5 demonstrates the effectiveness of our proposed BLOC-IRM approach versus ERM and existing IRM baselines across all the 7 datasets listed in Table 2. Evidently, BLOC-IRM yields a higher average accuracy compared to all the baselines, together with the smallest accuracy gap in most cases. Additionally, we observe that CELEBA, PACS and VLCS are much more challenging datasets for capturing invariance through IRM, as evidenced by the small performance gap between ERM and IRM methods. In + +Table 5: IRM performance comparison between BLOC-IRM and other baselines. We use ResNet-18 (He et al., 2016) for all the datasets. The evaluation setup is consistent with Table 4, and the best performance per-dataset is highlighted in bold. We present the results with the full dataset list in Table A1. + +
Algorithm Metrics (%)COLORED-OBJECTCIFAR-MNISTCELEBAVLCSPACS
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
ERM41.11±1.4486.43±2.8940.39±1.3285.53±2.3372.38±0.2910.73±0.3663.23±0.2312.39±0.3569.95±0.3514.32±0.75
IRMv164.42±0.214.18±0.2961.49±0.297.17±0.3372.49±0.3810.15±0.2762.72±0.2912.74±0.2768.93±0.3314.99±0.51
IRMv062.39±0.255.36±0.3160.14±0.188.83±0.3972.42±0.3510.43±0.3862.59±0.3212.99±0.3668.72±0.2915.29±0.71
IRM-GAME62.88±0.345.59±0.2860.44±0.316.72±0.4172.18±0.4412.32±0.4162.31±0.3813.37±0.6268.12±0.2215.77±0.66
REX63.37±0.355.42±0.3162.32±0.245.55±0.3272.34±0.2610.31±0.2363.19±0.3112.87±0.3169.43±0.3415.31±0.67
BIRM65.11±0.273.31±0.2262.99±0.355.23±0.3672.93±0.289.92±0.3363.33±0.4012.13±0.2369.34±0.2515.76±0.49
SPARSEIRM64.97±0.393.97±0.2562.16±0.294.14±0.3172.42±0.339.79±0.2162.86±0.2612.79±0.3569.52±0.3915.81±0.82
FISHR64.07±0.234.41±0.2961.79±0.255.55±0.2172.89±0.259.42±0.3263.44±0.3711.93±0.4270.21±0.2214.52±0.43
BLOC-IRM65.97±0.334.10±0.3663.69±0.324.89±0.3673.35±0.328.79±0.2163.62±0.3511.55±0.3270.31±0.2114.73±0.65
+ +particular, all the IRM methods, except FISHR and BLOC-IRM, could even be worse than ERM on PACS and VLCS. Here, we echo and extend the findings of Krueger et al. (2021, Section 4.3). However, we also show that BLOC-IRM is a quite competitive IRM variant when applied to realistic domain generalization datasets. We also highlight that the CELEBA experiment is newly constructed and performed in our work for invariance evaluation. Like PACS and VLCS, this experiment also shows that ERM is a strong baseline, and among IRM-based methods, BLOC-IRM is the best-performing, both in terms of accuracy and variance of invariant predictions. + +IRM against model size and training environment variation. Furthermore, we investigate the effect of model size and training environment diversity on the IRM performance. The recent works (Lin et al., 2022; Zhou et al., 2022b) have empirically shown that IRMv1 may suffer a significant performance loss when trained over large-sized neural network models, and thus developed BIRM and SPARSEIRM approaches as advancements of IRMv1. Inspired by these works, Figure 4 presents the sensitivity of invariant prediction to model size for different IRM methods on COLORED-MNIST. Here the model size is controlled by the dimension of the intermediate layer (denoted by $d$ ) in MLP, and the default dimension is $d = 390$ (i.e., the vertical dotted line in Figure 4), which was used in (Arjovsky et al., 2019) and followed in the subsequent literature. As we can see, when $d > 390$ , nearly all the studied IRM methods (including BLOC-IRM) suffer a performance drop. Yet, as $d \geq 800$ , from the perspective of prediction accuracy and model resilience together, the top-3 best IRM methods with model size resilience are BIRM, SPARSEIRM, and BLOC-IRM, although we did not intentionally design BLOC-IRM to resist performance degradation against model + +![](images/3d11bbd572a5a8ea881ce0606f8b523c5dcb5609512320a723945579f90a497e.jpg) +Figure 4: IRM performance on COLORED-MNIST against the layer dimension in MLP. The dotted line represents the default dimension $(d = 390)$ used in the literature. The invariant prediction accuracy is presented via the dot line (mean). The results are based on 10 independent trials and we report the variance in Figure A4. + +We also show more experiment results in the Appendix. In Table A2, we study IRM with different numbers of training environment configurations and observe the consistent improvement of BLOC-IRM over other baselines. In Table A4 we show that the performance of invariant prediction degrades, if additional covariate shifts (class, digit, and color imbalances on COLORED-MNIST) are imposed on the training environments following Krueger et al. (2021, Section 4.1) and also demonstrate that BLOC-IRM maintains the accuracy improvement over baselines with each variation. In Table A5, we compare the performance of different methods in the failure cases of IRM pointed out by (Kamath et al., 2021) and show the consistent improvement brought by BLOC-IRM. + +# 7 CONCLUSION + +In this work, we investigate existing IRM methods and reveal long-standing but chronically overlooked challenges involving IRM training and evaluation, which may lead to sub-optimal solutions and incomplete invariance assessment. As a remedy, we propose small-batch training and multi-environment evaluation. We reexamine the IRM-GAME method through the lens of consensus-constrained BLO, and develop a novel IRM variant, termed BLOC-IRM. We conducted extensive experiments on 7 datasets and demonstrate that BLOC-IRM consistently improves all baselines. + +# ACKNOWLEDGEMENT + +The work of Y. Zhang and S. Liu was partially supported by National Science Foundation (NSF) Grant IIS-2207052. The work of M. Hong was supported by NSF grants CNS-2003033 and CIF-1910385. The computing resources used in this work were partially supported by the MIT-IBM Watson AI Lab and the Institute for Cyber-Enabled Research (ICER) at Michigan State University. + +# REPRODUCIBILITY STATEMENT + +The authors have made an extensive effort to ensure the reproducibility of algorithms and results presented in the paper. First, the details of the experiment settings have been elaborated in Section 6.1 and Appendix C.1. In this paper, seven datasets are studied and the environment generation process for each dataset is described with details in Appendix A. The evaluation metrics are also clearly introduced in Section 3. Second, eight IRM-oriented methods (including our proposed BLOC-IRM) are studied in this work. The implementation details of all the baseline methods are clearly presented in Appendix C.2, including the hyper-parameters tuning, model configuration, and used code bases. For our proposed BLOC-IRM, we include all the implementation details in Section 5 and Appendix B.2, including training pipeline in Figure 3 and the pseudo-code in Algorithm A1. Third, all the results are based on 10 independent trials with different random seeds. The standard deviations are also reported to ensure fair comparisons across different methods. Fourth, codes are available at https://github.com/OPTML-Group/BLOC-IRM. + +# REFERENCES + +Faruk Ahmed, Yoshua Bengio, Harm van Seijen, and Aaron Courville. Systematic generalisation with group invariant predictions. In International Conference on Learning Representations, 2020. +Kartik Ahuja, Karthikeyan Shanmugam, Kush Varshney, and Amit Dhurandhar. Invariant risk minimization games. In International Conference on Machine Learning, pp. 145-155. PMLR, 2020. +Kartik Ahuja, Jun Wang, Amit Dhurandhar, Karthikeyan Shanmugam, and Kush R Varshney. Empirical or invariant risk minimization? a sample complexity perspective. In International Conference on Learning Representations, 2021. +Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019. +Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using meta-regularization. Advances in neural information processing systems, 31, 2018. +Sara Beery, Grant Van Horn, and Pietro Perona. Recognition in terra incognita. In Proceedings of the European conference on computer vision (ECCV), pp. 456-473, 2018. +Francesco Cappio Borlino, Antonio D'Innocente, and Tatiana Tommasi. Rethinking domain generalization baselines. In 2020 25th International Conference on Pattern Recognition (ICPR), pp. 9227-9233. IEEE, 2021. +Fabio M Carlucci, Antonio D'Innocente, Silvia Bucci, Barbara Caputo, and Tatiana Tommasi. Domain generalization by solving jigsaw puzzles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2229-2238, 2019. +Shiyu Chang, Yang Zhang, Mo Yu, and Tommi Jaakkola. Invariant rationalization. In International Conference on Machine Learning, pp. 1448-1458. PMLR, 2020. +Yongqiang Chen, Kaiwen Zhou, Yatao Bian, Binghui Xie, Kaili Ma, Yonggang Zhang, Han Yang, Bo Han, and James Cheng. Pareto invariant risk minimization. arXiv preprint arXiv:2206.07766, 2022. +Yo Joong Choe, Jiyeon Ham, and Kyubyong Park. An empirical study of invariant risk minimization. arXiv preprint arXiv:2004.05007, 2020. +Pim De Haan, Dinesh Jayaraman, and Sergey Levine. Causal confusion in imitation learning. Advances in Neural Information Processing Systems, 32, 2019. +Alex J DeGrave, Joseph D Janizek, and Su-In Lee. Ai for radiographic Covid-19 detection selects shortcuts over signal. Nature Machine Intelligence, 3(7):610-619, 2021. +Qi Dou, Daniel Coelho de Castro, Konstantinos Kamnitsas, and Ben Glocker. Domain generalization via model-agnostic learning of semantic features. Advances in Neural Information Processing Systems, 32, 2019. +Yana Dranker, He He, and Yonatan Belinkov. Irm—when it works and when it doesn't: A test case of natural language inference. Advances in Neural Information Processing Systems, 34:18212-18224, 2021. +Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. arXiv preprint arXiv:2010.01412, 2020. +Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Lavoille, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. The journal of machine learning research, 17(1):2096-2030, 2016. +Robert Geirhos, Jorn-Henrik Jacobsen, Claudio Michaelis, Richard Zemel, Wieland Brendel, Matthias Bethge, and Felix A Wichmann. Shortcut learning in deep neural networks. Nature Machine Intelligence, 2(11): 665-673, 2020. +Stephen Gould, Basura Fernando, Anoop Cherian, Peter Anderson, Rodrigo Santa Cruz, and Edison Guo. On differentiating parameterized argmin and argmax problems with application to bi-level optimization. arXiv preprint arXiv:1607.05447, 2016. +Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017. + +Ishaan Gulrajani and David Lopez-Paz. In search of lost domain generalization. arXiv preprint arXiv:2007.01434, 2020. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. +Neal Jean, Marshall Burke, Michael Xie, W Matthew Davis, David B Lobell, and Stefano Ermon. Combining satellite imagery and machine learning to predict poverty. Science, 353(6301):790-794, 2016. +Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Domain extrapolation via regret minimization. arXiv preprint arXiv:2006.03908, 2020. +Pritish Kamath, Akilesh Tangella, Danica Sutherland, and Nathan Srebro. Does invariant risk minimization capture invariance? In International Conference on Artificial Intelligence and Statistics, pp. 4069-4077. PMLR, 2021. +Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv preprint arXiv:1609.04836, 2016. +Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. +Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsbramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Irena Gao, et al. Wilds: A benchmark of in-the-wild distribution shifts. In International Conference on Machine Learning, pp. 5637-5664. PMLR, 2021. +Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60(6):84-90, 2017. +David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In International Conference on Machine Learning, pp. 5815-5826. PMLR, 2021. +Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy M Hospedales. Deeper, broader and artier domain generalization. In Proceedings of the IEEE international conference on computer vision, pp. 5542-5550, 2017. +Da Li, Jianshu Zhang, Yongxin Yang, Cong Liu, Yi-Zhe Song, and Timothy M Hospedales. Episodic training for domain generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1446-1455, 2019. +Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. Advances in neural information processing systems, 31, 2018. +Yong Lin, Qing Lian, and Tong Zhang. An empirical study of invariant risk minimization on deep models. In ICML 2021 Workshop on Uncertainty and Robustness in Deep Learning, pp. 7, 2021. +Yong Lin, Hanze Dong, Hao Wang, and Tong Zhang. Bayesian invariant risk minimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16021-16030, 2022. +Risheng Liu, Jiaxin Gao, Jin Zhang, Deyu Meng, and Zhouchen Lin. Investigating bi-level optimization for learning and vision from a unified perspective: A survey and beyond. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. +Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), December 2015. +Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International conference on machine learning, pp. 97-105. PMLR, 2015. +Hyeonseob Nam, HyunJae Lee, Jongchan Park, Wonjun Yoon, and Donggeun Yoo. Reducing domain gap by reducing style bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8690-8699, 2021. +Jonas Peters, Peter Buhlmann, and Nicolai Meinshausen. Causal inference by using invariant prediction: identification and confidence intervals. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(5):947-1012, 2016. + +Alexandre Rame, Coretin Dancette, and Matthieu Cord. Fishr: Invariant gradient variances for out-of-distribution generalization. In International Conference on Machine Learning, pp. 18347-18377. PMLR, 2022. +Elan Rosenfeld, Pradeep Ravikumar, and Andrej Risteski. The risks of invariant risk minimization. arXiv preprint arXiv:2010.05761, 2020. +Shiori Sagawa, Pang Wei Koh, Tatsunori B Hashimoto, and Percy Liang. Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization. arXiv preprint arXiv:1911.08731, 2019. +Shiori Sagawa, Aditi Raghunathan, Pang Wei Koh, and Percy Liang. An investigation of why overparameterization exacerbates spurious correlations. In International Conference on Machine Learning, pp. 8346-8356. PMLR, 2020. +Harshay Shah, Kaustav Tamuly, Aditi Raghunathan, Prateek Jain, and Praneeth Netrapalli. The pitfalls of simplicity bias in neural networks. Advances in Neural Information Processing Systems, 33:9573-9585, 2020. +Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. +Yi Sun, Xiaogang Wang, and Xiaou Tang. Deep learning face representation from predicting 10,000 classes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1891-1898, 2014. +Antonio Torralba and Alexei A Efros. Unbiased look at dataset bias. In CVPR 2011, pp. 1521-1528. IEEE, 2011. +Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. +Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip Yu. Generalizing to unseen domains: A survey on domain generalization. IEEE Transactions on Knowledge and Data Engineering, 2022. +Chuanlong Xie, Haotian Ye, Fei Chen, Yue Liu, Rui Sun, and Zhenguo Li. Risk variance penalization. arXiv preprint arXiv:2006.07544, 2020. +Renzhe Xu, Xingxuan Zhang, Peng Cui, Bo Li, Zheyan Shen, and Jiazheng Xu. Regulatory instruments for fair personalized pricing. In Proceedings of the ACM Web Conference 2022, pp. 4-15, 2022. +Yilun Xu and Tommi Jaakkola. Learning representations that support robust transfer of predictors. arXiv preprint arXiv:2110.09940, 2021. +Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017a. +Yang You, Igor Gitman, and Boris Ginsburg. Scaling SGD batch size to 32k for imagenet training. arXiv preprint arXiv:1708.03888, 6, 2017b. +Yang You, Zhao Zhang, Cho-Jui Hsieh, James Demmel, and Kurt Keutzer. Imagenet training in minutes. In Proceedings of the 47th International Conference on Parallel Processing, pp. 1. ACM, 2018. +Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes. arXiv preprint arXiv:1904.00962, 2019. +Dinghuai Zhang, Kartik Ahuja, Yilun Xu, Yisen Wang, and Aaron Courville. Can subnetwork structure be the key to out-of-distribution generalization? In International Conference on Machine Learning, pp. 12356-12367. PMLR, 2021. +Gaoyuan Zhang, Songtao Lu, Yihua Zhang, Xiangyi Chen, Pin-Yu Chen, Quanfu Fan, Lee Martie, Lior Horesh, Mingyi Hong, and Sijia Liu. Distributed adversarial training to robustify deep neural networks at scale. In Uncertainty in Artificial Intelligence, pp. 2353-2363. PMLR, 2022a. +Xingxuan Zhang, Linjun Zhou, Renzhe Xu, Peng Cui, Zheyan Shen, and Haoxin Liu. Nico++: Towards better benchmarking for domain generalization. arXiv preprint arXiv:2204.08040, 2022b. +Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022a. +Xiao Zhou, Yong Lin, Weizhong Zhang, and Tong Zhang. Sparse invariant risk minimization. In International Conference on Machine Learning, pp. 27222-27244. PMLR, 2022b. + +# APPENDIX + +# A DATASET SELECTION + +Compared to existing work, we expand the dataset types for evaluating the performance of different IRM methods (see Table 2). In addition to the most commonly-used benchmark datasets COLORED-MNIST (Arjovsky et al., 2019) and COLORED-FMNIST (Ahuja et al., 2020), we also consider the datasets CIFAR-MNIST (Lin et al., 2021; Shah et al., 2020) and COLORED-OBJECT (Ahmed et al., 2020; Zhang et al., 2021), which impose artificial spurious correlations, MNIST digit number and object color, into the original CIFAR-10 and COCO Detection datasets, respectively. Furthermore, we consider other three real-world datasets CELEBA (Liu et al., 2015), PACS (Li et al., 2017) and VLCS (Torralba & Efros, 2011), without imposing artificial spurious correlations. Notably, CELEBA was first formalized and introduced to benchmark IRM performance. The recent work (Gulrajani & Lopez-Paz, 2020) showed that when carefully implemented, ERM could outperform IRMv1 in PACS and VLCS. Thus, we regard them as challenging datasets to capture invariance. + +For COLORED-OBJECT dataset, we strictly follow the setting adopted in (Lin et al., 2022) to generate the spurious features. For CIFAR-MNIST we use the class "bird" and "plane" in the dataset CIFAR as the invariant feature, while the digit "0" and "1" in MNIST as the spurious correlation. + +CELEBA dataset is, for the first time, introduced to measure IRM performance. We select the attribute "Smiling" as the invariant label and use the attribute "Hair Color" (blond and black hair) to create a spurious correlation in each environment. + +# B IMPLEMENTATION DETAILS + +# B.1 DETAILS ON LARGE-BATCH OPTIMIZATION ENHANCEMENTS + +$\spadesuit$ IRM-LSGD: We first integrate large-batch SGD (LSGD) with IRM. Following (Goyal et al., 2017), we make two main modifications: (1) scaling up learning rate linearly with batch size, and (2) prepending a warm-up optimization phase to IRM training. We call the LSGD-baked IRM variant IRM-LSGD. +$\spadesuit$ IRM-LALR: Next, we adopt layerwise adaptive learning rate (LALR) in IRM training. Following (You et al., 2019), we advance the learning rate scheduler by assigning each layer of a neural network-based prediction model with an adaptive learning rate (i.e., proportional to the norm of updated model weights per layer). More specifically, the model parameter update rule becomes: + +$$ +\boldsymbol {\theta} _ {t + 1, i} = \boldsymbol {\theta} _ {t, i} - \frac {\tau \left(\left\| \boldsymbol {\theta} _ {t , i} \right\| _ {2} ^ {2}\right) \cdot \eta_ {t}}{\left\| \mathbf {u} _ {t , i} \right\| _ {2} ^ {2}} \mathbf {u} _ {t, i}, \tag {A1} +$$ + +where $\pmb{\theta}_{t,i}$ denotes the $i$ -th layer of the model parameters at iteration $t$ , and $\mathbf{u}_{t,i}$ represents the first-order gradient of the corresponding layer-wise model parameters. We use $\tau(\|\pmb{\theta}_{t,i}\|_2^2 = \min\{\max\{\|\pmb{\theta}_{t,i}\|_2^2, c_l\}, c_u\})$ as the scaling factor of the adaptive learning rate $\frac{\eta_t}{\|\mathbf{u}_{t,i}\|}$ . We use $c_l = 0$ and $c_u = 1$ in our experiments. + +$\spadesuit$ IRM-SAM: Lastly, we leverage sharpness-aware minimization (SAM) to simultaneously minimize the IRM loss and the loss sharpness. The latter is achieved by explicitly penalizing the worst-case training loss of model weights when facing small weight perturbations. This yields a wide minimum within a flat loss landscape. More specifically, the sharpness-aware loss can be formulated as: + +$$ +\min _ {\boldsymbol {\theta}} \ell^ {\mathrm {S A M}} (\boldsymbol {\theta}), \quad \text {w h e r e} \quad \ell^ {\mathrm {S A M}} (\boldsymbol {\theta}) = \max _ {\| \epsilon \| _ {2} ^ {2} \leq \rho} \ell (\boldsymbol {\theta} + \boldsymbol {\epsilon}), \tag {A2} +$$ + +where the parameter perturbation $\epsilon$ is subject to the perturbation constraint $\| \epsilon \| _2^2\leq \rho$ . When applied to IRM, we replace the per-environment training loss with the SAM loss, and adopt the $\rho = 0.001$ . + +# B.2 BLOC-IRM IMPLEMENTATION + +As described in Section 5, the BLOC-IRM algorithm solves the IRM problem with two optimization levels. We use 1-step gradient descent to get the lower-level solution. We retain the gradient + +graph in PyTorch to enable auto differentiation. We assign each of the classification head $\{\mathbf{w}^{(e)}\}$ a separate optimizer and use the same learning rate as the feature extractor $\theta$ . For COLORED-MNIST and COLORED-FMNIST, we adopt a learning rate of $2 \times 10^{-3}$ and use the Adam (Kingma & Ba, 2014) optimizer. As for other datasets, we use the multi-step learning rate scheduler with an initial learning rate of 0.1, which is consistent with other baselines. We adopt the same penalty weight of $10^{6}$ as IRMv1 and IRMv0. + +# Algorithm A1 BLOC-IRM + +1: Initialization: Training data $\{\mathbf{x}^{(e)}\}$ from $N$ environments, Model feature extractor $\theta_0$ , and $N$ model classification heads $\{\mathbf{w}_0^{(e)}\}$ , learning rate $\{\eta_t\}$ series, penalty weight $\{\gamma_t\}$ series. +2: for Step $t = 0,1,\ldots ,\mathbf{d}\mathbf{o}$ +3: Lower-level: update classification head for each environment: + +$$ +\forall e \in \mathcal {E} _ {\mathrm {t r}}, \quad \tilde {\mathbf {w}} _ {t + 1} ^ {(e)} = \mathbf {w} _ {t} ^ {(e)} - \eta_ {t} \left. \frac {d \ell^ {(e)} (\mathbf {w} \odot \boldsymbol {\theta})}{d \mathbf {w}} \right| _ {\boldsymbol {\theta} = \boldsymbol {\theta} _ {t}, \mathbf {w} = \mathbf {w} _ {t} ^ {(e)}} \tag {A3} +$$ + +4: Consensus projection: $\forall e\in \mathcal{E}_{\mathrm{tr}},\mathbf{w}_{t + 1}^{(e)} = \mathbf{w}_{t + 1}^{*} = \frac{1}{N}\sum_{e\in \mathcal{E}_{\mathrm{tr}}}\tilde{\mathbf{w}}_{t + 1}^{(e)}$ +5: Upper-level: update feature extractor with stationary penalty: + +$$ +\boldsymbol {\theta} _ {t + 1} = \boldsymbol {\theta} _ {t} - \eta_ {t} \sum_ {e \in \mathcal {E} _ {\mathrm {t r}}} \frac {d}{d \boldsymbol {\theta}} \left(\ell^ {(e)} (\mathbf {w} \odot \boldsymbol {\theta}) + \gamma_ {t} \| \nabla_ {\mathbf {w}} \ell^ {(e)} (\mathbf {w} \circ \boldsymbol {\theta}) \| _ {2} ^ {2}\right) \Big | _ {\boldsymbol {\theta} = \boldsymbol {\theta} _ {t}, \mathbf {w} = \mathbf {w} _ {t + 1} ^ {*}} \tag {A4} +$$ + +6: end for + +# C EXPERIMENTATION + +# C.1 ENVIRONMENT SETUP + +As proposed in Section 4, we use the multi-environment evaluation metric unless specified otherwise. To capture both the accuracy and variance of invariant predictions across multiple testing environments, the average accuracy and the accuracy gap (the difference between the best-case and worst-case accuracy) are evaluated for IRM methods. + +Specifically, for the COLORED-MNIST, COLORED-FMNIST, COLORED-OBJECT, CIFAR-MNIST, and CELEBA dataset, we manually create 19 test environments with uniformly sampled bias parameter $\beta \in \{0.05, 0.1, \dots, 0.95\}$ , where the environment bias parameter $\beta$ controls the spurious correlation (see Section 4 for more details). + +For VLCS and PACS datasets, the training and test sets have 4 environments, namely {art painting, cartoon, sketch, photo} and {CALTECH, LABELME, PASCAL, SUN} respectively. We use the first three environments as the training environments, while we use the test set of all four environments to form our proposed multi-environment invariance evaluation system. + +# C.2 BASELINES + +For each baseline method, we follow its official PyTorch repository except IRM-GAME and SPARSEIRM. We translate the TensorFlow-based original code base of IRM-GAME to PyTorch. As one of the latest IRM advancements, the official code of SPARSEIRM is not yet publicly available. Therefore, we reproduce SPARSEIRM in PyTorch. + +In particular, for COLORED-MNIST and COLORED-FMNIST, we stick to the original hyperparameters for the large-batch setting and tune the hyper-parameters of each method, including the penalty weight, number of warm-up epochs, and learning rate for the small batch setting. + +In particular, for the large-batch setting, we use the penalty weight of $10^{6}$ , 190 warm-up epochs, and 500 epochs in total, as suggested by the original IRMv1 and inherited by its variants. For the small-batch setting, we adopt the same penalty weight $10^{6}$ . Further, we found that the warm-up + +phase could be shortened without sacrificing accuracy. Therefore, we use 50 warm-up epochs and total 200 epochs for all the methods. + +For other datasets, we adopt the batch size of 128 and use ResNet-18 as the default model architecture. We train for 200 epochs. We adopt the step-wise learning rate scheduler with an initial learning rate of 0.1. The learning rate decays by 0.1 at the 100th and 150th epochs. + +# C.3 ADDITIONAL EXPERIMENT RESULTS + +The influence of batch size with all the baselines. We show in Figure A1 the influence of training batch size on the performance of different methods. We observe in Figure A1, as in Figure 1, that full batch setting does not achieve the best performance, and the use of mini-batch (stochastic gradient descent) indeed improves performance. + +![](images/039def54e2becbaec79d1d03a53933990c3fed2cbd66dda2235a21b0bc50bf3a.jpg) +Figure A1: The performance of all the baselines in this work trained with different batch sizes on COLORED-MNIST dataset. The full data batch-size is 50k. The invariant accuracy corresponds to the average accuracy evaluated based on the diversified environments-based evaluation metric. + +![](images/ef8217039f5f005b3bd69e9a9794f7e3b7c1686ce84d77fdda027d9d0567a5f7.jpg) +Figure A2: The loss landscapes of invariant prediction models acquired by (A) large-batch IRMv1 training with 50k batch size and (B) small-batch training with 1k batch size. The 2D loss landscape visualization is realized using the tool in (Li et al., 2018). The $x$ and $y$ axes represent the linear interpolation coefficients over two directional vectors originated from the converged local optima. Here the numbers on the contour denote the loss values over test data. + +![](images/de052964e98aadf5f59cf377fff58ac0c1a4646431936e9149896a2f2230471d.jpg) + +Loss landscapes of IRMv1 with different batch sizes. We plot the loss landscapes of the models trained with IRMv1 on COLORED-MNIST using large (full) and small batch in Figure A2. Using small batch training, IRMv1 (Fig. A2B) converges to a smooth neighborhood of a local optima. This also corresponds to a flatter loss landscape than the landscape of the large-batch training (Figure A2(A)). The loss landscapes demonstrate consistent results as other experiments discussed in Section 3. + +Training trajectory with BLOC-IRM with and without stationary loss. In Figure A3, we plot the per-environment training trajectory of stationary loss when solving (1) and (BLOC-IRM) on COLORED-MNIST. For (BLOC-IRM) we use the regularization term $\lambda = 10^6$ , which is aligned with the penalty coefficient used in IRMv1. As we can see, without the stationarity regularization, + +the stationary loss remains at a high level for both environments (the dotted curves). Notably, the lower-level stationary can be reached fast with the stationarity penalty, as shown in the solid curves. + +![](images/337a64114140457585dd9b2b577dd0fd6b39b0f199f7682f05dfdd4e20a40c7a.jpg) +Figure A3: The per-environment training trajectory for the stationarity loss of (1) and (BLOC-IRM) on COLORED-MNIST. The training setting is the same as Figure 2. The algorithmic details can be found in Appendix B. + +Performance of all the methods with full dataset list. We show in Table A1 the results of all the methods on the seven datasets we studied. To be more specific, in Table A1, we append the results of COLORED-MNIST and COLORED-FMNIST into Table 5 as a whole. As we can see, our methods outperforms other baselines in all the datasets in terms of average accuracy, and stands top in most cases in terms of the accuracy gap. + +Table A1: IRM performance comparison between our proposed BLOC-IRM method and other baselines under the full list of datasets. We use MLP for COLORED-MNIST and COLORED-FMNIST, and ResNet-18 (He et al., 2016) for the rest datasets. The evaluation setup is consistent with Table 4, and the best performance per-evaluation metric and per-dataset is highlighted in bold. + +
Algorithm Metrics (%)COLORED-MNISTCOLORED-FMNISTCOLORED-OBJECTCIFAR-MNISTCELEBAVLCSPACS
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
ERM49.19±1.8990.72±2.0849.77±1.7188.62±2.4941.11±1.4486.43±2.8940.39±1.3285.53±2.3372.38±0.2910.73±0.3663.23±0.2312.39±0.3569.95±0.3514.32±0.75
IRVM168.33±0.312.04±0.0568.76±0.311.45±0.0964.42±0.214.18±0.2961.49±0.297.17±0.3372.49±0.3810.15±0.2762.72±0.2912.74±0.2768.93±0.3314.99±0.51
IRVM068.37±0.281.32±0.0969.07±0.271.36±0.0662.39±0.255.36±0.3160.14±0.188.83±0.3972.42±0.3510.43±0.3862.59±0.3212.99±0.3668.72±0.2915.29±0.71
IRGMAGE67.73±0.241.67±0.1467.49±0.321.82±0.1362.88±0.345.59±0.2860.44±0.316.72±0.4172.18±0.4412.32±0.4162.31±0.3813.37±0.6268.12±0.2215.77±0.66
REX68.42±0.291.65±0.0768.66±0.221.29±0.0863.37±0.355.42±0.3162.32±0.245.55±0.3272.34±0.2610.31±0.2363.19±0.3112.87±0.3169.43±0.3415.31±0.67
BIRM68.71±0.211.35±0.0968.64±0.321.44±0.1365.11±0.273.31±0.2262.99±0.355.23±0.3672.93±0.289.92±0.3363.33±0.4012.13±0.2369.34±0.2515.76±0.49
SPARSEIRM68.81±0.251.72±0.0568.29±0.221.28±0.1564.97±0.393.97±0.2562.16±0.294.14±0.3172.42±0.339.79±0.2162.86±0.2612.79±0.3569.52±0.3915.81±0.82
FISHR68.69±0.192.13±0.0868.79±0.171.77±0.1064.07±0.234.41±0.2961.79±0.255.55±0.2172.89±0.259.42±0.3263.44±0.3711.93±0.4270.21±0.2214.52±0.43
BLOC-IRM69.47±0.241.04±0.0769.43±0.211.14±0.1165.97±0.334.10±0.3663.69±0.324.89±0.3673.35±0.328.79±0.2163.62±0.3511.55±0.3270.31±0.2114.73±0.65
+ +Experiment on different model sizes. We show in Figure A4 the influence of the increasing model size on the performance of different baselines considered in this work. Compared to Figure 4, we report additional standard deviation of the 10 independent trials in Figure A4. + +![](images/f3c2dd9be52dd33649836e9caba9cd6238b137e020dcb52b708b55ea9a23b2e1.jpg) +Figure A4: IRM performance on COLORED-MNIST against the dimension of the intermediate layer in MLP. The dotted line represents the default dimension ( $d = 390$ ) used in the literature. The invariant prediction accuracy is presented via the dot line (mean) and shaded area (standard deviation) over 10 random trials. + +Experiment with different training environments. In Table A2, we show the performance of all the methods in more complex training environments, such as more training environments and more skewed environment bias parameter $\beta$ . As we can see, BLOC-IRM outperforms other baselines. + +Table A2: Performance under different training environments in COLORED-MNIST. + +
Environment Metrics (%)\( {p}_{\text{tr }} \in \{ {0.1},{0.15}\} \)\( {p}_{\text{tr }} \in \{ {0.1},{0.15},{0.2}\} \)
Avg AccAcc GapAvg AccAcc Gap
OPTIMUM75.000.0075.000.00
GRAYSCALE73.82±0.110.37±0.0573.97±0.140.29±0.08
ERM49.21±0.7991.88±3.3149.03±0.9392.17±3.04
IRMv167.36±0.312.77±0.1567.11±0.34\( {2.42} \pm {0.12} \)
IRMv067.01±0.42\( {2.85} \pm {0.18} \)66.71±0.42\( {2.36} \pm {0.19} \)
IRM-GAME66.39±0.724.47±0.6165.93±0.53\( {4.25} \pm {0.84} \)
REX66.82±0.44\( {2.59} \pm {0.11} \)67.14±0.38\( {2.16} \pm {0.13} \)
BIRM67.35±0.39\( {2.65} \pm {0.10} \)68.05±0.43\( {1.99} \pm {0.07} \)
SPARSEIRM67.12±0.53\( {2.33} \pm {0.18} \)67.72±0.41\( {2.11} \pm {0.19} \)
FISHR67.22±0.43\( {2.44} \pm {0.15} \)67.32±0.39\( {2.59} \pm {0.15} \)
BLO-IRM68.72±0.41\( {2.19} \pm {0.15} \)68.89±0.31\( {2.39} \pm {0.09} \)
+ +BLOC-IRM with different regularizations. Based on the penalty terms used in the existing IRM variants, we explore the performance of our proposed BLOC-IRM with various regularization, including the ones used in IRMv1 (BLOC-IRM-v1), REX (BLOC-IRM-REX), and FISHR (BLOC-IRM-FISHR). We conduct experiments on three different datasets and the results are shown in Table A3. It is obvious that the best performance is always achieved when the per-environment stationarity is penalized in the upper-level. This is not surprising since without an explicit promotion of stationarity, other forms of penalties do not guarantee the BLO algorithm to achieve an optimal solution. + +Table A3: The performance of BLOC-IRM with different regularization terms. Three datasets are studied and the latest baseline SPARSEIRM is listed as reference for comparison. The best performance per-evaluation metric and per-dataset is highlighted in **bold**. + +
Dataset MetricsCOLORED-MNISTCOLORED-OBJECTCIFAR-MNIST
Avg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
SPARSEIRM68.81±0.251.72±0.0564.97±0.393.97±0.2562.87±0.294.14±0.31
BLOC-IRM69.47±0.241.04±0.0765.97±0.334.10±0.3663.69±0.324.89±0.36
BLOC-IRM-v167.14±0.244.33±0.8363.38±0.296.31±0.5161.13±0.516.71±0.41
BLOC-IRM-REX62.71±0.218.74±1.2160.31±0.337.62±0.6659.39±0.557.89±0.45
BLOC-IRM-FISHR63.25±0.167.12±0.3961.17±0.346.98±0.4560.86±0.516.63±0.30
+ +Performance comparison of different methods with additional covariate shifts. Besides the sensitivity check on model size, Table A4 examines the resilience of IRM to variations in the training environment. This study is motivated by Krueger et al. (2021), who empirically showed that the performance of invariant prediction degrades if additional covariate shifts are imposed on the training environments. Thus, we present the IRM performance on COLORED-MNIST by introducing class, digit, and color imbalances, following Krueger et al. (2021, Section 4.1). Compared with Table 4, IRM suffers a greater performance loss in Table A4, in the presence of training environment variations. However, the proposed BLOC-IRM maintains the accuracy improvement over baselines with each variation. In Table A2, we also study IRM with different numbers of training environments and observe the consistent improvement of BLOC-IRM over other baselines. + +Exploration on the failure cases of previous IRM methods. Some papers either theoretically (Rosenfeld et al., 2020) or empirically (Kamath et al., 2021) pointed out that the original IRMv1 method could fail in certain circumstances, due to the fact that the regularization term used in IRMv1 heavily relies on the "linear predictor" assumption. Regarding this issue, we first bring to attention that the BLOC-IRM formulation does not require the predictors to be linear, since we adopt the regularization in the form of IRMv0 in the upper-level objective, not IRMv1. To justify our argument, we repeat the experiments in (Kamath et al., 2021), which points out a specific scenario using the COLORED-MNIST dataset where IRMv1 fails. + +Table A4: IRM performance on COLORED-MNIST and COLORED-FMNIST with training environment variations in terms of class, digit and color imbalances. The best IRM performance per-evaluation metric and per-variation source is highlighted in bold. + +
DatasetCOLORED-MNISTCOLORED-FMNIST
Variation Metrics (%)Class ImbalanceDigit ImbalanceColor ImbalanceClass ImbalanceDigit ImbalanceColor Imbalance
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
GRAYSCALE71.23±0.182.76±0.1170.31±0.212.79±0.1572.29±0.162.88±0.1470.15±0.212.29±0.1269.92±0.152.72±0.2173.31±0.111.17±0.23
ERM43.72±1.0192.76±1.4545.89±2.8291.65±1.8646.19±2.8890.88±1.6941.72±1.9893.37±2.1542.39±2.3992.23±2.7245.89±0.2791.31±2.27
IRMv165.39±0.224.44±0.2964.89±0.264.19±0.4466.12±0.253.31±0.2962.49±0.334.93±0.4561.88±0.235.54±0.3964.39±0.443.79±0.33
IRMv065.01±0.284.29±0.3365.13±0.253.87±0.2866.72±0.253.01±0.4462.78±0.485.33±0.4761.62±0.295.29±0.4164.93±0.273.28±0.31
IRM-GAME62.21±0.426.45±0.3562.10±0.356.72±0.4461.82±0.657.78±0.5560.73±0.846.24±0.4360.79±0.456.47±0.8264.32±0.425.73±0.31
REX66.45±0.253.39±0.2866.23±0.433.21±0.2066.99±0.423.32±0.2764.89±0.365.78±0.5363.95±0.254.73±0.6265.87±0.424.30±0.42
BIRM65.73±0.254.11±0.3165.73±0.884.49±0.6766.72±0.243.47±0.2564.39±0.344.47±0.3963.24±0.394.54±0.4265.08±0.313.80±0.29
SPARSEIRM65.32±0.394.92±0.2264.44±0.364.85±0.3366.03±0.322.85±0.1964.32±0.514.15±0.3662.97±0.355.75±0.5264.72±0.463.99±0.39
FISHR66.13±0.283.99±0.3265.87±0.423.72±0.4165.48±0.214.49±0.3163.62±0.535.59±0.3562.47±0.265.72±0.3365.13±0.324.44±0.21
BLOC-IRM66.32±0.273.11±0.2266.41±0.293.32±0.2567.25±0.243.72±0.2765.99±0.313.97±0.4365.13±0.315.11±0.4566.79±0.263.72±0.36
+ +More specifically, the models are trained in the training environments $(\alpha, \beta) = (0.1, 0.2)$ and $(0.1, 0.25)$ , and evaluated in the test environment $(0.1, 0.9)$ . Note that denotes the label flipping rate and represents the environment bias parameter. The results are shown in the Table A5. As we can see, IRMv1 is clearly worse than ERM as it achieves much lower average accuracy and higher accuracy gap. However, BLOC-IRM outperforms ERM by obtaining high average accuracy and lower accuracy gap. This result shows that BLOC-IRM seems promising to address the empirical IRM challenge discovered in (Kamath et al., 2021). In the meantime, we also acknowledge that BLOC-IRM is not per + +fect since the advantage achieved by BLOC-IRM over ERM is not strong enough. However, we stress that the main contribution of BLOC-IRM does not lie in solving the failure cases of IRMv1, but to fix the issue of IRM-Game that resorts to a predictor ensemble to make the invariant prediction, which deviates from the spirit of acquiring invariant predictors in the original IRM paradigm. + +Table A5: Performance comparisons on COLORED-MNIST among ERM, IRMv1, and BLOC-IRM in the scenarios where IRM-variants failed following (Kamath et al., 2021). + +
MethodAvg. Acc.Acc. Gap
ERM83.0913.79
IRMv176.8927.68
BLOC-IRM84.2211.01
+ +A similar curve to Figure 1 on COLORED-FMNIST. We show the results for COLORED-FMNIST similar to Figure 1 in Figure A5 and the conclusion does not change much. As mentioned before, the large-batch training setup was typically used for IRM training over the COLORED-FMNIST and COLORED-FMNIST datasets. + +![](images/14defae9d974e70d514e4e2d0e013149aa8ca0b72a658f25f0b6255938cf8e97.jpg) +Figure A5: The performance of three IRM methods (IRMv1, IRMv0, and REX) vs. batch size under COLORED-FMNIST. \ No newline at end of file diff --git a/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/images.zip b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..3566f9fd4ce7ac4d729cb6319558e58a3bb12086 --- /dev/null +++ b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:132a28f05c0f24f7cd931698f23fe88965125452a4ba32a00bea49c65de180ac +size 822897 diff --git a/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/layout.json b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e25cfd23cd8dc7f67e43b878634ac2163d5579a0 --- /dev/null +++ b/2023/What Is Missing in IRM Training and Evaluation_ Challenges and Solutions/layout.json @@ -0,0 +1,12558 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 504, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 504, + 118 + ], + "type": "text", + "content": "WHAT IS MISSING IN IRM TRAINING AND EVALUATION? CHALLENGES AND SOLUTIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "spans": [ + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": "Yihua Zhang" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": ", Pranay Sharma" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": ", Parikshit Ram" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": ", Mingyi Hong" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": ", Kush Varshney" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": ", Sijia Liu" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{1,3}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": "Michigan State University, " + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": "Carnegie Mellon University, " + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": "IBM Research, " + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 111, + 133, + 517, + 159 + ], + "type": "text", + "content": "University of Minnesota" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 276, + 186, + 335, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 186, + 335, + 198 + ], + "spans": [ + { + "bbox": [ + 276, + 186, + 335, + 198 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 209, + 471, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 209, + 471, + 464 + ], + "spans": [ + { + "bbox": [ + 140, + 209, + 471, + 464 + ], + "type": "text", + "content": "Invariant risk minimization (IRM) has received increasing attention as a way to acquire environment-agnostic data representations and predictions, and as a principled solution for preventing spurious correlations from being learned and for improving models' out-of-distribution generalization. Yet, recent works have found that the optimality of the originally-proposed IRM optimization (IRMv1) may be compromised in practice or could be impossible to achieve in some scenarios. Therefore, a series of advanced IRM algorithms have been developed that show practical improvement over IRMv1. In this work, we revisit these recent IRM advancements, and identify and resolve three practical limitations in IRM training and evaluation. First, we find that the effect of batch size during training has been chronically overlooked in previous studies, leaving room for further improvement. We propose small-batch training and highlight the improvements over a set of large-batch optimization techniques. Second, we find that improper selection of evaluation environments could give a false sense of invariance for IRM. To alleviate this effect, we leverage diversified test-time environments to precisely characterize the invariance of IRM when applied in practice. Third, we revisit Ahuja et al. (2020)'s proposal to convert IRM into an ensemble game and identify a limitation when a single invariant predictor is desired instead of an ensemble of individual predictors. We propose a new IRM variant to address this limitation based on a novel viewpoint of ensemble IRM games as consensus-constrained bilevel optimization. Lastly, we conduct extensive experiments (covering 7 existing IRM variants and 7 datasets) to justify the practical significance of revisiting IRM training and evaluation in a principled manner." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 482, + 206, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 482, + 206, + 494 + ], + "spans": [ + { + "bbox": [ + 106, + 482, + 206, + 494 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 506, + 506, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 506, + 606 + ], + "type": "text", + "content": "Deep neural networks (DNNs) have enjoyed unprecedented success in many real-world applications (He et al., 2016; Krizhevsky et al., 2017; Simonyan & Zisserman, 2014; Sun et al., 2014). However, experimental evidence (Beery et al., 2018; De Haan et al., 2019; DeGrave et al., 2021; Geirhos et al., 2020; Zhang et al., 2022b) suggests that DNNs trained with empirical risk minimization (ERM), the most commonly used training method, are prone to reproducing spurious correlations in the training data (Beery et al., 2018; Sagawa et al., 2020). This phenomenon causes performance degradation when facing distributional shifts at test time (Gulrajani & Lopez-Paz, 2020; Koh et al., 2021; Wang et al., 2022; Zhou et al., 2022a). In response, the problem of invariant prediction arises to enforce the model trainer to learn stable and causal features (Beery et al., 2018; Sagawa et al., 2020)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": "In pursuit of out-of-distribution generalization, a new model training paradigm, termed invariant risk minimization (IRM) (Arjovsky et al., 2019), has received increasing attention to overcome the shortcomings of ERM against distribution shifts. In contrast to ERM, IRM aims to learn a universal representation extractor, which can elicit an invariant predictor across multiple training environments. However, different from ERM, the learning objective of IRM is highly non-trivial to optimize in practice. Specifically, IRM requires solving a challenging bi-level optimization (BLO) problem with a hierarchical learning structure: invariant representation learning at the upper-level and invariant predictive modeling at the lower-level. Various techniques have been developed to solve IRM effectively, such as (Ahuja et al., 2020; Lin et al., 2022; Rame et al., 2022; Zhou et al., 2022b) to name a few. Despite the proliferation of IRM advancements, several issues in the theory and practice have also appeared. For example, recent works (Rosenfeld et al., 2020; Kamath et al.," + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": "2021) revealed the theoretical failure of IRM in some cases. In particular, there exist scenarios where the optimal invariant predictor is impossible to achieve, and the IRM performance may fall behind even that of ERM. Practical studies also demonstrate that the performance of IRM relies on multiple factors, e.g., model size (Lin et al., 2022; Zhou et al., 2022b), environment difficulty (Dranker et al., 2021; Krueger et al., 2021), and dataset type (Gulrajani & Lopez-Paz, 2020)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 506, + 243 + ], + "type": "text", + "content": "Therefore, key challenges remain in deploying IRM to real-world applications. In this work, we revisit recent IRM advancements and uncover and tackle several pitfalls in IRM training and evaluation, which have so far gone overlooked. We first identify the large-batch training issue in existing IRM algorithms, which prevents escape from bad local optima during IRM training. Next, we show that evaluation of IRM performance with a single test-time environment could lead to an inaccurate assessment of prediction invariance, even if this test environment differs significantly from training environments. Based on the above findings, we further develop a novel IRM variant, termed BLOC-IRM, by interpreting and advancing the IRM-GAME method (Ahuja et al., 2020) through the lens of BLO with Consensus prediction. Below, we list our contributions (1-3)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 247, + 506, + 442 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 104, + 247, + 504, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 304 + ], + "type": "text", + "content": "1 We demonstrate that the prevalent use of large-batch training leaves significant room for performance improvement in IRM, something chronically overlooked in the previous IRM studies with benchmark datasets COLORED-MNIST and COLORED-FMNIST. By reviewing and comparing with 7 state-of-the-art (SOTA) IRM variants (Table 1), we show that simply using small-batch training improves generalization over a series of more involved large-batch optimization enhancements." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "type": "text", + "content": "We also show that an inappropriate evaluation metric could give a false sense of invariance to IRM. Thus, we propose an extended evaluation scheme that quantifies both precision and 'invariance' across diverse testing environments." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 347, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 506, + 392 + ], + "type": "text", + "content": "Further, we revisit and advance the IRM-GAME approach (Ahuja et al., 2020) through the lens of consensus-constrained BLO. We remove the need for an ensemble (one per training environment) of predictors in IRM-GAME by proposing BLOC-IRM (BLO with Consensus IRM), which produces a single invariant predictor." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 397, + 504, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 442 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 442 + ], + "type": "text", + "content": "Lastly, we conduct extensive experiments (on 7 datasets, using diverse model architectures and training environments) to justify the practical significance of our findings and methods. Notably, we conduct experiments on the CELEBA dataset as a new IRM benchmark with realistic spurious correlations. We show that BLOC-IRM outperforms all baselines in nearly all settings." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 469, + 203, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 469, + 203, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 469, + 203, + 480 + ], + "type": "text", + "content": "1.1 RELATED WORK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 495, + 506, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 606 + ], + "type": "text", + "content": "IRM methods. Inspired by the invariance principle (Peters et al., 2016), Arjovsky et al. (2019) define IRM as a BLO problem, and develop a relaxed single-level formulation, termed IRMv1, for ease of training. Recently, there has been considerable work to advance IRM techniques. Examples of IRM variants include penalization on the variance of risks or loss gradients across training environments (Chang et al., 2020; Krueger et al., 2021; Rame et al., 2022; Xie et al., 2020; Xu & Jaakkola, 2021; Xu et al., 2022), domain regret minimization (Jin et al., 2020), robust optimization over multiple domains (Xu & Jaakkola, 2021), sparsity-promoting invariant learning (Zhou et al., 2022b), Bayesian inference-baked IRM (Lin et al., 2022), and ensemble game over the environment-specific predictors (Ahuja et al., 2020). We refer readers to Section 2 and Table 1 for more details on the IRM methods that we will focus on in this work." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": "Despite the potential and popularity of IRM, some works have also shown the theoretical and practical limitations of current IRM algorithms. Specifically, Chen et al. (2022); Kamath et al. (2021) show that invariance learning via IRM could fail and be worse than ERM in some two-bit environment setups on COLORED-MNIST, a synthetic benchmark dataset often used in IRM works. The existence of failure cases of IRM is also theoretically shown by Rosenfeld et al. (2020) for both linear and non-linear models. Although subsequent IRM algorithms take these failure cases into account, there still exist huge gaps between theoretically desired IRM and its practical variants. For example, Lin et al. (2021; 2022); Zhou et al. (2022b) found many IRM variants incapable of maintaining graceful generalization on large and deep models. Moreover, Ahuja et al. (2021); Dranker et al. (2021) demonstrated that the performance of IRM algorithms could depend on practical details, e.g., dataset size, sample efficiency, and environmental bias strength. The above IRM limitations in" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "spire our work to study when and how we can turn the IRM advancements into effective solutions, to gain high-accuracy and stable invariant predictions in practical scenarios." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 117, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 117, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 117, + 506, + 217 + ], + "type": "text", + "content": "Domain generalization. IRM is also closely related to domain generalization (Carlucci et al., 2019; Gulrajani & Lopez-Paz, 2020; Koh et al., 2021; Li et al., 2019; Nam et al., 2021; Wang et al., 2022; Zhou et al., 2022a). Compared to IRM, domain generalization includes a wider range of approaches to improve prediction accuracy against distributional shifts (Beery et al., 2018; Jean et al., 2016; Koh et al., 2021). For example, an important line of research is to improve representation learning by encouraging cross-domain feature resemblance (Long et al., 2015; Tzeng et al., 2014). The studies on domain generalization have also been conducted across different learning paradigms, e.g., adversarial learning (Ganin et al., 2016), self-supervised learning (Carlucci et al., 2019), and meta-learning (Balaji et al., 2018; Dou et al., 2019)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 232, + 271, + 244 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 271, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 271, + 244 + ], + "type": "text", + "content": "2 PRELIMINARIES AND SETUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 257, + 488, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 257, + 488, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 488, + 269 + ], + "type": "text", + "content": "In this section, we introduce the basics of IRM and provide an overview of our IRM case study." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": "IRM formulation. In the original IRM framework Arjovsky et al. (2019), consider a supervised learning paradigm, with datasets " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{D}^{(e)}\\}_{e\\in \\mathcal{E}_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " collected from " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " training environments " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{\\mathrm{tr}} = \\{1,2,\\dots ,N\\}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": ". The training samples in " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\mathcal{D}^{(e)}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " (corresponding to the environment " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": ") are of the form " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "(\\mathbf{x},y)\\in \\mathcal{X}\\times \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\mathcal{Y}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " are, respectively, the raw feature space and the label space. IRM aims to find an environment-agnostic data representation " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\phi_{\\theta}:\\mathcal{X}\\to \\mathcal{Z}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": ", which elicits an invariant prediction " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "f_{\\mathbf{w}}:\\mathcal{Z}\\rightarrow \\mathcal{V}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " that is simultaneously optimal for all environments. Here " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " denote model parameters to be learned, and " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " denotes the representation space. Thus, IRM yields an invariant predictor " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}:\\mathcal{X}\\to \\mathcal{Y}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " that can generalize to unseen test-time environments " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{D}^{(e)}\\}_{e\\notin \\mathcal{E}_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": ". Here " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\circ" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " denotes function composition, i.e., " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}(\\cdot) = f_{\\mathbf{w}}(\\phi_{\\pmb{\\theta}}(\\cdot))" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": ". We will use " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\mathbf{w}\\circ \\pmb{\\theta}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": " as a shorthand for " + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "inline_equation", + "content": "f_{\\mathbf{w}}\\circ \\phi_{\\pmb{\\theta}}" + }, + { + "bbox": [ + 104, + 274, + 504, + 388 + ], + "type": "text", + "content": ". IRM constitutes the following BLO problem:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 395, + 504, + 415 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 395, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 120, + 395, + 504, + 415 + ], + "type": "interline_equation", + "content": "\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right); \\quad \\text {s u b j e c t t o} \\quad \\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} \\circ \\boldsymbol {\\theta}\\right), \\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\quad (\\text {I R M})", + "image_path": "549b6d68f483de315e41530af0adea3522a8cb9af89dfaa278b39343d62fd9ad.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "spans": [ + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "inline_equation", + "content": "\\ell^{(e)}(\\mathbf{w} \\circ \\boldsymbol{\\theta})" + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "text", + "content": " is the per-environment training loss of the predictor " + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "inline_equation", + "content": "\\mathbf{w} \\circ \\boldsymbol{\\theta}" + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "text", + "content": " under " + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "inline_equation", + "content": "\\mathcal{D}^{(e)}" + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "text", + "content": ". Clearly, IRM involves two optimization levels that are coupled through the lower-level solution " + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^*(\\boldsymbol{\\theta})" + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "text", + "content": ". Achieving the desired invariant prediction requires the solution sets of the individual lower-level problems " + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "inline_equation", + "content": "\\{\\arg \\min_{\\bar{\\mathbf{w}}} \\ell^{(e)}(\\bar{\\mathbf{w}} \\circ \\boldsymbol{\\theta}), e \\in \\mathcal{E}_{tr}\\}" + }, + { + "bbox": [ + 104, + 419, + 504, + 488 + ], + "type": "text", + "content": " to be non-singleton. However, BLO problems with non-singleton lower-level solution sets are significantly more challenging (Liu et al., 2021). To circumvent this difficulty, Arjovsky et al. (2019) relax (IRM) into a single-level optimization problem (a.k.a., IRMv1):" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 195, + 494, + 504, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 494, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 195, + 494, + 504, + 512 + ], + "type": "interline_equation", + "content": "\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} [ \\ell^ {(e)} (\\boldsymbol {\\theta}) + \\gamma \\| \\nabla_ {w | w = 1. 0} \\ell^ {(e)} (w \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2} ], \\tag {IRMv1}", + "image_path": "e59606ec560eb60e2bf16d22632ad07b2606312046f0b2a5da205753a9284117.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "inline_equation", + "content": "\\gamma > 0" + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "text", + "content": " is a regularization parameter and " + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "inline_equation", + "content": "\\nabla_{w|w = 1.0}\\ell^{(e)}" + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "text", + "content": " denotes the gradient of " + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "inline_equation", + "content": "\\ell^{(e)}" + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "text", + "content": " with respect to " + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "text", + "content": ", computed at " + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "inline_equation", + "content": "w = 1.0" + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "text", + "content": ". Compared with IRM, IRMv1 is restricted to linear invariant predictors, and penalizes the deviation of individual environment losses from stationarity to approach the lower-level optimality in (IRM). IRMv1 uses the fact that a scalar predictor (" + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "inline_equation", + "content": "w = 1.0" + }, + { + "bbox": [ + 104, + 517, + 504, + 584 + ], + "type": "text", + "content": ") is equivalent to a linear predictor. Despite the practical simplicity of (IRMv1), it may fail to achieve the desired invariance (Chen et al., 2022; Kamath et al., 2021)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 590, + 504, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 656 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 656 + ], + "type": "text", + "content": "Case study of IRM methods. As illustrated above, the objective of IRM is difficult to optimize, while IRMv1 only provides a sub-optimal solution. Subsequent advances have attempted to reduce this gap. In this work, we focus on 7 popular IRM variants and evaluate their invariant prediction performance over 7 datasets. Table 1 and Table 2 respectively summarize the IRM methods and the datasets considered in this work. We survey the most representative and effective IRM variants in the literature, which will also serve as our baselines in performance comparison." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 661, + 504, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 661, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 504, + 684 + ], + "type": "text", + "content": "Following Table 1, we first introduce the IRMv0 variant, a generalization of IRMv1, by relaxing its assumption of linearity of the predictor " + }, + { + "bbox": [ + 104, + 661, + 504, + 684 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 104, + 661, + 504, + 684 + ], + "type": "text", + "content": ", yielding" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 197, + 686, + 504, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 686, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 197, + 686, + 504, + 704 + ], + "type": "interline_equation", + "content": "\\underset {\\mathbf {w}, \\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} [ \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) + \\gamma \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2} ]. \\tag {IRMv0}", + "image_path": "b8784cf3017e9f4bd229972e7d6f55961295aef05893020921dbbbd800dc081b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Next, we consider the risk extrapolation method REx (Krueger et al., 2021), an important baseline based on distributionally robust optimization for group shifts (Sagawa et al., 2019). Furthermore," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 173, + 326, + 247 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 326, + 170 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 326, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 326, + 170 + ], + "type": "text", + "content": "Table 1: Summary of the 7 existing IRM variants considered in this work, and the proposed BLOC-IRM method (see Section 5). We also list the 7 benchmark datasets used to evaluate IRM performance, namely, COLORED-MNIST (CoM), COLORED-FMNIST (CoF), CIFAR-MNIST (CiM), COLORED-OBJECT (CoO), CELEBA (CA), PACS (P) and VLCS (A). The symbols " + }, + { + "bbox": [ + 104, + 89, + 326, + 170 + ], + "type": "inline_equation", + "content": "\\checkmark" + }, + { + "bbox": [ + 104, + 89, + 326, + 170 + ], + "type": "text", + "content": " signifies the dataset used in the specific reference." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 173, + 326, + 247 + ], + "lines": [ + { + "bbox": [ + 106, + 173, + 326, + 247 + ], + "spans": [ + { + "bbox": [ + 106, + 173, + 326, + 247 + ], + "type": "table", + "html": "
IRM\nMethodVenueDatasetsReference
CoMCoFCiMCoOCAPV
IRMv1arXiv(Arjovsky et al., 2019)
IRMv0N/AThis Work
IRM-GAMEICML(Ahuja et al., 2020)
REXICML(Krueger et al., 2021)
BIRMCVPR(Lin et al., 2022)
SPARSEIRMICML(Zhou et al., 2022b)
FISHRICML(Rame et al., 2022)
OursN/AThis Work
", + "image_path": "027c35cc5c5ca28afaaec8b0e8c129c95774c586f6d2bcccd5d980a4661ff219.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 343, + 130, + 504, + 247 + ], + "blocks": [ + { + "bbox": [ + 342, + 89, + 504, + 130 + ], + "lines": [ + { + "bbox": [ + 342, + 89, + 504, + 130 + ], + "spans": [ + { + "bbox": [ + 342, + 89, + 504, + 130 + ], + "type": "text", + "content": "Table 2: Dataset setups. 'Invariant' and 'Spurious' represent the core and spurious features. 'Env1' and 'Env2' are environments with different spurious correlations." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 343, + 130, + 504, + 247 + ], + "lines": [ + { + "bbox": [ + 343, + 130, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 343, + 130, + 504, + 247 + ], + "type": "table", + "html": "
DatasetInvariantSpuriousEnv 1Env 2
CoMDigitColor
CoFObjectColor
CiMCIFARMNIST
CoOObjectColor
CASmilingHair Color
PObjectTexture
VObjectEnvironment
", + "image_path": "ee501038ae999121b6a3e3872525dc0b3bc4d9a66ce51aa740a8bf44ee1698cf.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 258, + 506, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 413 + ], + "type": "text", + "content": "inspired by the empirical findings that the performance of IRM could be sensitive to model size (Choe et al., 2020; Gulrajani & Lopez-Paz, 2020), we choose the SOTA methods Bayesian IRM (BIRM) (Lin et al., 2022) and sparse IRM (SPARSEIRM) (Zhou et al., 2022b), both of which show improved performance with large models. Also, we consider the SOTA method FISHR (Rame et al., 2022), which modifies IRM to penalize the domain-level gradient variance in single-level risk minimization. FISHR provably matches both domain-level risks and Hessians. Lastly, we include IRM-GAME (Ahuja et al., 2020) as a special variant of IRM. Different from the other methods which seek an invariant predictor, IRM-GAME endows each environment with a predictor, and leverages this ensemble of predictors to achieve invariant representation learning. This is in contrast to other existing works which seek an invariant predictor. Yet, we show in Section 5 that IRM-GAME can be interpreted through the lens of consensus-constrained BLO and generalized for invariant prediction. We also highlight that diverse dataset types are considered in this work (see Table 2) to benchmark IRM's performance. More details on dataset selections can be found in Appendix A." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 430, + 430, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 430, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 430, + 443 + ], + "type": "text", + "content": "3 LARGE-BATCH TRAINING CHALLENGE AND IMPROVEMENT" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 456, + 504, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 478 + ], + "type": "text", + "content": "In this section, we demonstrate and resolve the large-batch training challenge in current IRM implementations (Table 1)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 484, + 357, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 357, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 357, + 628 + ], + "type": "text", + "content": "Large-batch optimization causes instabilities of IRM training. Using very large-size batches for model training can result in the model getting trapped near a bad local optima (Keskar et al., 2016). This happens as a result of the lack of stochasticity in the training process, and is known to exist even in the ERM paradigm (Goyal et al., 2017; You et al., 2017a). Yet, nearly all the existing IRM methods follow the training setup of IRMv1 (Arjovsky et al., 2019), which used the full-batch gradient descent (GD) method rather than the mini-batch stochastic gradient descent (SGD) for IRM training over COLORED-MNIST and COLORED-FMNIST. In the following, we show that large-batch training might give a false impression of the relative ranking of IRM performances." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 371, + 484, + 493, + 586 + ], + "blocks": [ + { + "bbox": [ + 371, + 484, + 493, + 586 + ], + "lines": [ + { + "bbox": [ + 371, + 484, + 493, + 586 + ], + "spans": [ + { + "bbox": [ + 371, + 484, + 493, + 586 + ], + "type": "image", + "image_path": "d81db67045654a5d0cb71843aa85d8ea308dbecba3d380258eefcaf3a5d9d075.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 586, + 504, + 627 + ], + "lines": [ + { + "bbox": [ + 362, + 586, + 504, + 627 + ], + "spans": [ + { + "bbox": [ + 362, + 586, + 504, + 627 + ], + "type": "text", + "content": "Figure 1: The performance of three IRM methods (IRMv1, IRMv0, and REX) vs. batch-size under COLORED-MNIST. The full batch-size is " + }, + { + "bbox": [ + 362, + 586, + 504, + 627 + ], + "type": "inline_equation", + "content": "50\\mathrm{k}" + }, + { + "bbox": [ + 362, + 586, + 504, + 627 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 732 + ], + "type": "text", + "content": "We start with an exploration of the impact of batch size on the invariant prediction accuracy of existing IRM methods under COLORED-MNIST. Here the invariant prediction accuracy refers to the averaged accuracy of the invariant predictor applied to diverse test-time environments. We defer its formal description to Section 4. Figure 1 shows the invariant prediction accuracy of three IRM methods IRMv1, IRMv0, and REX vs. the data batch size (see Figure A1 for results of other IRM variants and Figure A5 for COLORED-FMNIST). Recall that the full batch size (50k) was used in the existing IRM implementations (Arjovsky et al., 2019; Krueger et al., 2021). As we can see, in the full-batch setup, IRM methods lead to widely different invariant prediction accuracies, where REX and IRMv1 significantly outperform IRMv0. In contrast, in the small-batch case (with size" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "1k), the discrepancy in accuracy across methods vanishes. We see that IRMv0 can be as effective as IRMv1 and other IRM variants (such as REX) only if an appropriate small batch size is used." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "content": "Empirical evidence in Figure 1 shows that large-batch IRM training is less effective than small-batch. This is aligned with the observations in ERM (You et al., 2017b; 2018; 2019), where the lack of stochasticity makes the optimizer difficult to escape from a sharp local minimum. We also justify this issue by visualizing the loss landscapes in Figure A2. Notably, the small-batch training enables IRMv1 to converge to a local optimum with a flat loss landscape, indicating better generalization (Keskar et al., 2016)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 506, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 293 + ], + "type": "text", + "content": "Small-batch training is effective versus a zoo of large-batch optimization enhancements. To mitigate the large-batch IRM training issue, we next investigate the effectiveness of both small-batch training and a zoo of large-batch optimization enhancements. Inspired by large-batch training techniques to scale up ERM, we consider Large-batch SGD (LSGD) (Goyal et al., 2017) and Layerwise Adaptive Learning Rate (LALR) (You et al., 2017b; 2018; 2019; Zhang et al., 2022a). Both methods aim to smoothen the optimization trajectory by improving either the learning rate scheduler or the quality of initialization. Furthermore, we adopt sharpness-aware minimization (SAM) (Foret et al., 2020) as another possible large-batch training solution to explicitly penalize the sharpness of the loss landscape. We integrate the above optimization techniques with IRM, leading to the variants IRM-LSGD, IRM-LALR, and IRM-SAM. See Appendix B.1 for more details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 298, + 334, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 334, + 440 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 334, + 440 + ], + "type": "text", + "content": "In Table 3, we compare the performance of the simplest small-batch IRM training with that of those large-batch optimization technique-integrated IRM variants (i.e., 'LSGD/LALR/SAM' in the Table). As we can see, the use of large-batch optimization techniques indeed improves the prediction accuracy over the original IRM implementation. We also observe that the use of SAM for IRM is consistently better than LALR and LSGD, indicating the promise of SAM to scale up IRM with a large batch size. Yet, the small-batch training protocol consistently outperforms large-batch training across all the IRM variants (see the column 'Small'). Additional experiment results in Section 6 show that small-" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 340, + 361, + 504, + 437 + ], + "blocks": [ + { + "bbox": [ + 337, + 300, + 504, + 361 + ], + "lines": [ + { + "bbox": [ + 337, + 300, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 337, + 300, + 504, + 361 + ], + "type": "text", + "content": "Table 3: Prediction accuracy of IRM methods on COLORED-MNIST using the original large-batch implementation ('Original'), the large-batch optimization-integrated implementations ('LSGD/LALR/SAM'), and the small-batch training recipe ('Small')." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 340, + 361, + 504, + 437 + ], + "lines": [ + { + "bbox": [ + 340, + 361, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 340, + 361, + 504, + 437 + ], + "type": "table", + "html": "
MethodOriginalLSGDLALRSAMSmall
IRMv167.1367.3167.4467.7968.33
IRMv065.3966.4266.7666.9968.37
IRM-GAME65.6965.8265.4766.2367.73
REX67.4267.5367.5967.8268.42
BIRM67.9367.9968.2168.3268.71
SPARSEIRM67.7267.8567.9968.1368.81
FISHR67.8867.8267.9368.1168.69
Average67.0267.2567.3467.6368.44
", + "image_path": "b6c86d9aa658dfce9c60b22230d091397f7113139270a7d5f698cb303a06f18d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 441, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 504, + 453 + ], + "type": "text", + "content": "batch IRM training is effective across datasets, and promotes the invariance achieved by all methods." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 469, + 380, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 380, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 380, + 482 + ], + "type": "text", + "content": "4 MULTI-ENVIRONMENT INVARIANCE EVALUATION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 495, + 504, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 504, + 519 + ], + "type": "text", + "content": "In this section, we revisit the evaluation metric used in existing IRM methods, and show that expanding the diversity of test-time environments would improve the accuracy of invariance assessment." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "text", + "content": "Nearly all the existing IRM methods (including those listed in Table 1) follow the evaluation pipeline used in the vanilla IRM framework (Arjovsky et al., 2019), which assesses the performance of the learned invariant predictor on a single unseen test environment. This test-time environment is significantly different from train-time environments. For example, COLORED-MNIST (Arjovsky et al., 2019) suggests a principled way to define two-bit environments, widely-used for IRM dataset curation. Specifically, the COLORED-MNIST task is to predict the label of the handwritten digit groups (digits 0-4 for group 1 and digits 5-9 for group 2). The digit number is also spuriously correlated with the digit color (Table 2). This spurious correlation is controlled by an environment bias parameter " + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "text", + "content": ", which specifies different data environments with different levels of spurious correlation1. In (Arjovsky et al., 2019), " + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "inline_equation", + "content": "\\beta = 0.1" + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "inline_equation", + "content": "\\beta = 0.2" + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "text", + "content": " are used to define two training environments, which sample the color ID by flipping the digit group label with probability " + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "text", + "content": ", respectively. At test time, the invariant accuracy is evaluated on a single, unseen environment with " + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "inline_equation", + "content": "\\beta = 0.9" + }, + { + "bbox": [ + 104, + 522, + 505, + 656 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "spans": [ + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "type": "text", + "content": "However, the prediction accuracy of IRM could be sensitive to the choice of test-time environment (i.e., the value of " + }, + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "type": "text", + "content": "). For the default test environment " + }, + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "type": "inline_equation", + "content": "\\beta = 0.9" + }, + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "type": "text", + "content": ", the predictor performance of three representative IRM methods (IRMv1, IRM-GAME, FISHR) ranked from high to low is IRM-GAME>FISHR>IRMv1. Given this apparent ranking, we explore more diverse test-time environments, generated by " + }, + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "type": "inline_equation", + "content": "\\beta \\in \\Omega := \\{0.05, 0.1, \\ldots, 0.95\\}" + }, + { + "bbox": [ + 104, + 660, + 507, + 718 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 504, + 732 + ], + "type": "text", + "content": "In the two-bit environment, there exists another environment parameter " + }, + { + "bbox": [ + 117, + 720, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 117, + 720, + 504, + 732 + ], + "type": "text", + "content": " that controls the label noise level." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "spans": [ + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "content": "Although the train-time bias parameters " + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "inline_equation", + "content": "\\{0.1, 0.2\\}" + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "content": " belong to " + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "content": ", test data is generated afresh, different from training data. We see in Figure 2A that the superiority of IRM-GAME at " + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "inline_equation", + "content": "\\beta = 0.9" + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "content": " vanishes for smaller " + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "content": ". Consequently, for invariant prediction evaluated in other testing environments (e.g., " + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "inline_equation", + "content": "\\beta < 0.4" + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "content": "), the performance ranking of the same methods becomes IRMV1>FISHR>IRM-GAME. This mismatch of results suggests we measure the 'invariance' of IRM methods against diverse test environments. Otherwise, evaluation with single " + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "content": " could give a false sense of invariance. In Figure 2B, we present the box plots of prediction accuracies for IRM variants, over the diverse set of testing environments " + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "inline_equation", + "content": "(\\beta \\in \\Omega)" + }, + { + "bbox": [ + 104, + 83, + 272, + 290 + ], + "type": "text", + "content": ". Evidently, IRMV1, the oldest" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 290, + 504, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 290, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 290, + 504, + 346 + ], + "type": "text", + "content": "(sub-optimal) IRM method, yields the least variance of invariant prediction accuracies and the best average prediction accuracy, compared to both IRM-GAME and FISHR. To summarize, the new evaluation method, with diverse test environments, enables us to make a fair comparison of IRM methods implemented in different training environment settings. Unless specified otherwise, we use the multi-environment evaluation method throughout this work." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 280, + 84, + 390, + 178 + ], + "blocks": [ + { + "bbox": [ + 280, + 84, + 390, + 178 + ], + "lines": [ + { + "bbox": [ + 280, + 84, + 390, + 178 + ], + "spans": [ + { + "bbox": [ + 280, + 84, + 390, + 178 + ], + "type": "image", + "image_path": "a24b292c97fe77b14b0f86373dec97022fcf16b5cfe6084ea939afc2d489514d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 332, + 178, + 346, + 188 + ], + "lines": [ + { + "bbox": [ + 332, + 178, + 346, + 188 + ], + "spans": [ + { + "bbox": [ + 332, + 178, + 346, + 188 + ], + "type": "text", + "content": "(A)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 390, + 85, + 500, + 178 + ], + "blocks": [ + { + "bbox": [ + 390, + 85, + 500, + 178 + ], + "lines": [ + { + "bbox": [ + 390, + 85, + 500, + 178 + ], + "spans": [ + { + "bbox": [ + 390, + 85, + 500, + 178 + ], + "type": "image", + "image_path": "f9966abd7be911907704e9dd96d7ef1fa68175e53f5b5b0edc6731bd5a53335b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 178, + 465, + 188 + ], + "lines": [ + { + "bbox": [ + 452, + 178, + 465, + 188 + ], + "spans": [ + { + "bbox": [ + 452, + 178, + 465, + 188 + ], + "type": "text", + "content": "(B)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "lines": [ + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": "Figure 2: Performance comparison of IRM variants IRMv1, IRM-GAME, and FISHR on COLORED-MNIST. (A) Evaluation in different test-time environments (corresponding to different " + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": "). " + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": " values used by the two training environments are 0.1, 0.2 respectively. The conventional evaluation is done with the test environment " + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\beta = 0.9" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": " (see " + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\triangle" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": "). (B) Box plots of prediction accuracies over diverse test environments corresponding to " + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\beta \\in \\{0.05, 0.1, \\dots, 0.95\\}" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": ". IRMv1 achieves the best average accuracy " + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "inline_equation", + "content": "(67.13\\%)" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": ", followed by FISHR " + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "inline_equation", + "content": "(67.05\\%)" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": " and IRM-GAME " + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "inline_equation", + "content": "(65.53\\%)" + }, + { + "bbox": [ + 279, + 188, + 504, + 288 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 362, + 449, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 362, + 449, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 362, + 449, + 374 + ], + "type": "text", + "content": "5 ADVANCING IRM-GAME VIA CONSENSUS-CONSTRAINED BLO" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 388, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 422 + ], + "type": "text", + "content": "In this section, we revisit and advance a special IRM variant, IRM-GAME (Ahuja et al., 2020), which endows each individual environment with a separate prediction head and converts IRM into an ensemble game over these multiple predictors." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "type": "text", + "content": "Revisiting IRM-GAME. We first introduce the setup of IRM-GAME following notations used in Section 2. The most essential difference between IRM-GAME and the vanilla IRM framework is that the former assigns each environment with an individual classifier " + }, + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^{(e)}" + }, + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "type": "text", + "content": ", and then relies on the ensemble of these individual predictors, i.e., " + }, + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\frac{1}{N}\\sum_{e\\in \\mathcal{E}_{\\mathrm{tr}}}(\\mathbf{w}^{(e)}\\circ \\pmb {\\theta})" + }, + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "type": "text", + "content": ", for inference. IRM-GAME is in a sharp contrast to IRM, where an environment-agnostic prediction head " + }, + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^*" + }, + { + "bbox": [ + 104, + 426, + 504, + 506 + ], + "type": "text", + "content": " simultaneously optimizes the losses across all environments. Therefore, we raise the following question: Can IRM-GAME learn an invariant predictor?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "text", + "content": "Inspired by the above question, we explicitly enforce invariance by imposing a consensus prediction constraint " + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "inline_equation", + "content": "\\mathcal{C} \\coloneqq \\left\\{\\left(\\bar{\\mathbf{w}}^{(1)}, \\bar{\\mathbf{w}}^{(2)}, \\ldots \\bar{\\mathbf{w}}^{(N)}\\right) \\mid \\bar{\\mathbf{w}}^{(1)} = \\ldots = \\bar{\\mathbf{w}}^{(N)}\\right\\}" + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "text", + "content": " and integrate it with IRM-GAME. Here, " + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "inline_equation", + "content": "\\bar{\\mathbf{w}}^{(e)}" + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "text", + "content": " denotes the prediction head for the " + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "text", + "content": "-th environment. Based on the newly-introduced constraint, the ensemble prediction head " + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "inline_equation", + "content": "\\frac{1}{N} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\mathbf{w}^{(e)}" + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "text", + "content": " can be interpreted as the average consensus over " + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "text", + "content": " environments: " + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^* \\coloneqq \\frac{1}{N} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\mathbf{w}^{(e)} = \\arg \\min_{\\{\\bar{\\mathbf{w}}^{(e)}\\}_e \\in \\mathcal{C}} \\sum_{e \\in \\mathcal{E}_{\\mathrm{tr}}} \\| \\bar{\\mathbf{w}}^{(e)} - \\mathbf{w}^{(e)} \\|_2^2" + }, + { + "bbox": [ + 104, + 510, + 506, + 596 + ], + "type": "text", + "content": ". With the above consensus interpretation, we can then cast the invariant predictor-baked IRM-GAME as a consensus-constrained BLO problem, extended from (IRM):" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 190, + 604, + 328, + 619 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 604, + 328, + 619 + ], + "spans": [ + { + "bbox": [ + 190, + 604, + 328, + 619 + ], + "type": "interline_equation", + "content": "\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m a z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right)", + "image_path": "05b586639d1e9c16fba838573ccd1e07e2d05bd0e43bd24a405dad3b89a45acd.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 191, + 619, + 504, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 619, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 191, + 619, + 504, + 638 + ], + "type": "interline_equation", + "content": "\\text {s u b j e c t} \\quad (\\mathbf {I}): \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}} ^ {(e)}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} ^ {(e)} \\circ \\boldsymbol {\\theta}\\right), \\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\tag {1}", + "image_path": "46f2cf6ab91fef1a5ea8bd9da47e6f1b997859d40a955810d40c52457de773f8.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 236, + 639, + 365, + 653 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 639, + 365, + 653 + ], + "spans": [ + { + "bbox": [ + 236, + 639, + 365, + 653 + ], + "type": "interline_equation", + "content": "(\\mathbf {I I}) \\colon \\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) = \\frac {1}{N} \\sum_ {e \\in \\varepsilon_ {\\mathrm {t r}}} \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}).", + "image_path": "94104dda7515057549fbf32350ad6d9f57b16b24b54aa6b84e7923ca8ffccf60.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 658, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 504, + 693 + ], + "type": "text", + "content": "The above contains two lower-level problems: (I) per-environment risk minimization, and (II) projection onto the consensus constraint " + }, + { + "bbox": [ + 104, + 658, + 504, + 693 + ], + "type": "inline_equation", + "content": "(\\{\\mathbf{w}^{(e)}\\} \\in \\mathcal{C})" + }, + { + "bbox": [ + 104, + 658, + 504, + 693 + ], + "type": "text", + "content": ". The incorporation of (II) is intended to ensure the use of invariant prediction head " + }, + { + "bbox": [ + 104, + 658, + 504, + 693 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^*(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 658, + 504, + 693 + ], + "type": "text", + "content": " in the upper-level optimization problem of (1)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "Limitation of (1) and BLOC-IRM. In (1), the introduced consensus-constrained lower-level problem might compromise the optimality of the lower-level solution " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^{*}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " to the per-environment (unconstrained) risk minimization problem (I), i.e., violating the per-environment stationarity" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "inline_equation", + "content": "\\| \\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^* (\\pmb {\\theta})\\circ \\pmb {\\theta})\\| _2^2" + }, + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "text", + "content": " . Figure A3 justifies this side effect. As we can see, the per-environment stationarity is hardly attained at the consensus prediction when solving (1). This is not surprising since a constrained optimization solution might not be a stationary solution to minimizing the (unconstrained) objective function. To alleviate this limitation, we improve (1) by explicitly promoting the per-environment stationarity " + }, + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "inline_equation", + "content": "\\| \\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^* (\\pmb {\\theta})\\circ \\pmb {\\theta})\\| _2^2" + }, + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "text", + "content": " in its upper-level problem through optimization over " + }, + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}" + }, + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "text", + "content": " . This leads to BLOC-IRM (BLO with Consensus IRM):" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 152, + 154, + 504, + 173 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 154, + 504, + 173 + ], + "spans": [ + { + "bbox": [ + 152, + 154, + 504, + 173 + ], + "type": "interline_equation", + "content": "\\underset {\\boldsymbol {\\theta}} {\\text {m i n i m i z e}} \\quad \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\left[ \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right) + \\gamma \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} \\left(\\mathbf {w} ^ {*} (\\boldsymbol {\\theta}) \\circ \\boldsymbol {\\theta}\\right) \\| _ {2} ^ {2} \\right] \\tag {BLOC-IRM}", + "image_path": "1a9d285407d492a64456634c64454da5f3d5aa8e29165e5198872e14241814ef.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 153, + 171, + 345, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 171, + 345, + 182 + ], + "spans": [ + { + "bbox": [ + 153, + 171, + 345, + 182 + ], + "type": "text", + "content": "subject to Lower-level problems (I) and (II) in (1)," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 186, + 504, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 504, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 504, + 233 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 186, + 504, + 233 + ], + "type": "inline_equation", + "content": "\\gamma > 0" + }, + { + "bbox": [ + 104, + 186, + 504, + 233 + ], + "type": "text", + "content": " is a regularization parameter like IRMv0. Assisted by the (upper-level) prediction stationarity regularization, the consensus prediction (II) indeed simultaneously minimizes the risks of all the environments, supported by the empirical evidence that the convergence of " + }, + { + "bbox": [ + 104, + 186, + 504, + 233 + ], + "type": "inline_equation", + "content": "\\|\\nabla_{\\mathbf{w}}\\ell^{(e)}(\\mathbf{w}^*(\\boldsymbol{\\theta}) \\circ \\boldsymbol{\\theta})\\|_2^2" + }, + { + "bbox": [ + 104, + 186, + 504, + 233 + ], + "type": "text", + "content": " towards 0 along each environment's optimization path (see Figure A3)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 237, + 504, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 237, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 237, + 504, + 297 + ], + "type": "text", + "content": "Further, we elaborate on how the BLOC-IRM problem can be effectively solved using an ordinary BLO solver. First, it is worth noting that although both (IRM) and BLOC-IRM are BLO problems, the latter is easier to solve since the lower-level constraint (I) is unconstrained and separable over environments, and the consensus operation (II) is linear. Based on these characteristics, the implicit gradient " + }, + { + "bbox": [ + 104, + 237, + 504, + 297 + ], + "type": "inline_equation", + "content": "\\frac{dw^{*}(\\theta)}{d\\theta}" + }, + { + "bbox": [ + 104, + 237, + 504, + 297 + ], + "type": "text", + "content": " can be directly computed as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 159, + 300, + 504, + 329 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 300, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 159, + 300, + 504, + 329 + ], + "type": "interline_equation", + "content": "\\frac {d \\mathbf {w} ^ {*} (\\boldsymbol {\\theta})}{d \\boldsymbol {\\theta}} = \\frac {1}{N} \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\frac {d \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta})}{d \\boldsymbol {\\theta}}, \\text {s u b j e c t t o} \\mathbf {w} ^ {(e)} (\\boldsymbol {\\theta}) \\in \\underset {\\bar {\\mathbf {w}} ^ {(e)}} {\\arg \\min } \\ell^ {(e)} \\left(\\bar {\\mathbf {w}} ^ {(e)} \\circ \\boldsymbol {\\theta}\\right). \\tag {2}", + "image_path": "74e011ecee8241e390ff839a7173d5ed219a8f3a9d53f1a19576a2ff27f0396e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "text", + "content": "Since the above lower-level problem is unconstrained, we can call the standard arg min differentiating method, such as implicit function approach (Gould et al., 2016) or gradient unrolling (Liu et al., 2021) to compute " + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "inline_equation", + "content": "\\frac{d\\mathbf{w}^{(e)}(\\boldsymbol{\\theta})}{d\\boldsymbol{\\theta}}" + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "text", + "content": ". In our work, we adopt the gradient unrolling method, which approximates " + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^{(e)}(\\boldsymbol{\\theta})" + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "text", + "content": " by a " + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "text", + "content": "-step gradient descent solution, noted by " + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_K^{(e)}(\\boldsymbol{\\theta})" + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "text", + "content": " and then leverages automatic differentiation (AD) to compute the derivative from " + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_K^{(e)}(\\boldsymbol{\\theta})" + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "text", + "content": " to the variable " + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\theta}" + }, + { + "bbox": [ + 104, + 338, + 293, + 491 + ], + "type": "text", + "content": ". Figure 3 shows the working pipeline of BLOC-IRM and its comparison to original IRM and" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 491, + 504, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 504, + 548 + ], + "type": "text", + "content": "IRM-GAME methods. We use " + }, + { + "bbox": [ + 104, + 491, + 504, + 548 + ], + "type": "inline_equation", + "content": "K = 1" + }, + { + "bbox": [ + 104, + 491, + 504, + 548 + ], + "type": "text", + "content": " for the lower-level problem throughout our experiments. We refer readers to Appendix B.2 for more algorithmic details. We also explore the performance of our proposed BLOC-IRM with various regularization terms, based on the penalties used in the existing literature. We show the best performance is always achieved when the stationarity is penalized in the upper-level (see Table A3)." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 299, + 337, + 504, + 449 + ], + "blocks": [ + { + "bbox": [ + 299, + 337, + 504, + 449 + ], + "lines": [ + { + "bbox": [ + 299, + 337, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 299, + 337, + 504, + 449 + ], + "type": "image", + "image_path": "0122b18efe16a4aa864ab68166e294a34ab15a4c112c8a7589decd971ce3168f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 454, + 504, + 485 + ], + "lines": [ + { + "bbox": [ + 299, + 454, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 299, + 454, + 504, + 485 + ], + "type": "text", + "content": "Figure 3: Schematic overview of BLOC-IRM over two training environments (red and green), and its comparison to IRM and IRM-GAME." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 563, + 201, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 201, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 201, + 574 + ], + "type": "text", + "content": "6 EXPERIMENTS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 587, + 504, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 587, + 504, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 587, + 504, + 622 + ], + "type": "text", + "content": "In this section, we begin by introducing some key experiment setups (with details in Appendix C.1), and then empirically show the effectiveness of our proposed IRM training and evaluation improvements over existing IRM methods across various datasets, models, and learning environments." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 634, + 224, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 634, + 224, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 224, + 645 + ], + "type": "text", + "content": "6.1 EXPERIMENT SETUPS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "Datasets and models. Our experiments are conducted over 7 datasets as referenced and shown in Tables 1, 2. Among these datasets, COLORED-MNIST, COLORED-FMNIST, CIFAR-MNIST, and COLORED-OBJECT are similarly curated, mimicking the pipeline of COLORED-MNIST (Arjovsky et al., 2019), by introducing an environment bias parameter (e.g., " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " for COLORED-MNIST in Section 4) to customize the level of spurious correlation (as shown in Table 2) in different environments. In the CELEBA dataset, we choose the face attribute 'smiling' (vs. 'non-smiling') as the core feature aimed for classification, and regard another face attribute 'hair color' ('blond' vs. 'dark') as the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 182 + ], + "type": "text", + "content": "source of spurious correlation imposed on the core feature. By controlling the level of spurious correlation, we then create different training/testing environments in CELEBA. Furthermore, we study PACS and VLCS datasets, which were used to benchmark domain generalization ability in the real world (Borlino et al., 2021). It was recently shown by Gulrajani & Lopez-Paz (2020) that for these datasets, ERM could even be better than IRMv1. Yet, we will show that our proposed BLOC-IRM is a promising domain generalization method, which outperforms all the IRM baselines and ERM in practice. In addition, we follow Arjovsky et al. (2019) in adopting multi-layer perceptron (MLP) as the model for resolving COLORED-MNIST and COLORED-FMNIST problems. In the other more complex datasets, we use the ResNet-18 architecture (He et al., 2016)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 186, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 504, + 243 + ], + "type": "text", + "content": "Baselines and implementation. Our baselines include 7 IRM variants (Table 1) and ERM, which are implemented using their official repositories if available (see Appendix C.2). Unless specified otherwise, our training pipeline uses the small-batch training setting. By default, we use the batch size of 1024 for COLORED-MNIST and COLORED-FMNIST, and 256 for other datasets. In Section 6.2 below, we also do a thorough comparison of large-batch vs small-batch IRM training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "type": "text", + "content": "Evaluation setup. As proposed in Section 4, we use the multi-environment evaluation metric unless specified otherwise. To capture both the accuracy and variance of invariant predictions across multiple testing environments, the average accuracy and the accuracy gap (the difference of the best-case and worst-case accuracy) are measured for IRM methods. The resulting performance is reported in the form " + }, + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "type": "inline_equation", + "content": "a \\pm b" + }, + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "type": "text", + "content": ", with mean " + }, + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "type": "text", + "content": " and standard deviation " + }, + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 247, + 305, + 369 + ], + "type": "text", + "content": " computed across 10 independent trials." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 385, + 230, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 385, + 230, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 230, + 396 + ], + "type": "text", + "content": "6.2 EXPERIMENT RESULTS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 407, + 304, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 304, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 304, + 484 + ], + "type": "text", + "content": "Small-batch training improves all existing IRM methods on COLORED-MNIST & COLORED-FMNIST. Recall from Section 3 that all the existing IRM methods (Table 1) adopt full-batch IRM training on COLORED-MNIST & COLORED-FMNIST, which raises the large-batch training problem. In Table 4, we conduct" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 310, + 251, + 504, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 504, + 341 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 504, + 341 + ], + "type": "text", + "content": "Table 4: Performance of existing IRM methods in large and small-batch settings. GRAYSCALE refers to ERM on uncolored data, which yields the best prediction (supposing no spurious correlation during training). The IRM performance is evaluated by average accuracy ('Avg Acc') and accuracy gap ('Acc Gap'), in the format mean±std. A higher Avg Acc and lower Acc Gap is preferred. The theoretically optimal performance is " + }, + { + "bbox": [ + 310, + 251, + 504, + 341 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 310, + 251, + 504, + 341 + ], + "type": "text", + "content": " (Arjovsky et al., 2019)." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 312, + 342, + 504, + 472 + ], + "blocks": [ + { + "bbox": [ + 312, + 342, + 504, + 472 + ], + "lines": [ + { + "bbox": [ + 312, + 342, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 312, + 342, + 504, + 472 + ], + "type": "table", + "html": "
Dataset Metrics(%)COLORED-MNISTCOLORED-FMNIST
Avg Acc (↑)Acc Gap (↓)Avg Acc (↑)Acc Gap (↓)
Large BatchGRYSCALE73.39±0.160.32±0.0374.05±0.090.13±0.04
ERM49.19±1.8990.72±2.0849.77±1.7188.62±2.49
IRMv167.13±0.333.43±0.1467.19±0.223.35±0.11
IRMv065.39±0.344.69±0.1866.44±0.283.53±0.13
IRM-GAME65.69±0.428.75±0.1465.91±0.293.74±0.09
REX67.42±0.293.76±0.0767.82±0.313.26±0.16
BIRM67.93±0.313.81±0.1167.75±0.263.81±0.11
SPARSEIRM67.72±0.283.65±0.0867.89±0.303.12±0.15
FISHR67.49±0.394.37±0.1067.33±0.244.49±0.16
Small BatchIRMv168.33±0.312.04±0.0568.76±0.311.45±0.09
IRMv068.37±0.281.32±0.0969.07±0.271.36±0.06
IRM-GAME67.73±0.241.67±0.1467.49±0.321.82±0.13
REX68.42±0.291.65±0.0768.66±0.221.29±0.08
BIRM68.71±0.211.35±0.0968.64±0.321.44±0.13
SPARSEIRM68.81±0.251.72±0.0568.29±0.221.28±0.15
FISHR68.69±0.192.13±0.0868.79±0.171.77±0.10
", + "image_path": "58169d988f131bfa217cda42fc0864dfea1197a1e261577e0667465497329e57.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "type": "text", + "content": "a thorough comparison between the originally-used full-batch IRM methods and their small-batch counterparts. In addition, we present the performance of ERM and ERM-grayscale (we call it 'grayscale'), where the latter is ERM on uncolored data. In the absence of any spurious correlation in the training set, grayscale gives the best performance. As discussed in Section 4 & 6.1, the IRM performance is measured by the average accuracy and the accuracy gap across 19 testing environments, parameterized by the environment bias parameter " + }, + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "type": "inline_equation", + "content": "\\beta \\in \\{0.05,\\dots ,0.95\\}" + }, + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "type": "text", + "content": ". We make some key observations from Table 4. First, small batch size helps improve all the existing IRM methods consistently, evidenced by the " + }, + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "type": "inline_equation", + "content": "1\\% \\sim 3\\%" + }, + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "type": "text", + "content": " improvement in average accuracy. Second, the small-batch IRM training significantly reduces the variance of invariant predictions across different testing environments, evidenced by the decreased accuracy gap. This implies that the small-batch IRM training can also help resolve the limitation of multi-environment evaluation for the existing IRM methods, like the sensitivity of IRM-GAME accuracy to " + }, + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 484, + 506, + 661 + ], + "type": "text", + "content": " in Figure 2. Third, we observe that IRMv0, which does not seem to be useful in the large batch setting, becomes quite competitive with the other baselines in the small-batch setting. Thus, large-batch could suppress the IRM performance for some methods. In the rest of the experiments, we stick to the small-batch implementation of IRM training." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "content": "BLOC-IRM outperforms IRM baselines in various datasets. Next, Table 5 demonstrates the effectiveness of our proposed BLOC-IRM approach versus ERM and existing IRM baselines across all the 7 datasets listed in Table 2. Evidently, BLOC-IRM yields a higher average accuracy compared to all the baselines, together with the smallest accuracy gap in most cases. Additionally, we observe that CELEBA, PACS and VLCS are much more challenging datasets for capturing invariance through IRM, as evidenced by the small performance gap between ERM and IRM methods. In" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 121, + 504, + 209 + ], + "blocks": [ + { + "bbox": [ + 105, + 89, + 504, + 120 + ], + "lines": [ + { + "bbox": [ + 105, + 89, + 504, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 89, + 504, + 120 + ], + "type": "text", + "content": "Table 5: IRM performance comparison between BLOC-IRM and other baselines. We use ResNet-18 (He et al., 2016) for all the datasets. The evaluation setup is consistent with Table 4, and the best performance per-dataset is highlighted in bold. We present the results with the full dataset list in Table A1." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 121, + 504, + 209 + ], + "lines": [ + { + "bbox": [ + 106, + 121, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 504, + 209 + ], + "type": "table", + "html": "
Algorithm Metrics (%)COLORED-OBJECTCIFAR-MNISTCELEBAVLCSPACS
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
ERM41.11±1.4486.43±2.8940.39±1.3285.53±2.3372.38±0.2910.73±0.3663.23±0.2312.39±0.3569.95±0.3514.32±0.75
IRMv164.42±0.214.18±0.2961.49±0.297.17±0.3372.49±0.3810.15±0.2762.72±0.2912.74±0.2768.93±0.3314.99±0.51
IRMv062.39±0.255.36±0.3160.14±0.188.83±0.3972.42±0.3510.43±0.3862.59±0.3212.99±0.3668.72±0.2915.29±0.71
IRM-GAME62.88±0.345.59±0.2860.44±0.316.72±0.4172.18±0.4412.32±0.4162.31±0.3813.37±0.6268.12±0.2215.77±0.66
REX63.37±0.355.42±0.3162.32±0.245.55±0.3272.34±0.2610.31±0.2363.19±0.3112.87±0.3169.43±0.3415.31±0.67
BIRM65.11±0.273.31±0.2262.99±0.355.23±0.3672.93±0.289.92±0.3363.33±0.4012.13±0.2369.34±0.2515.76±0.49
SPARSEIRM64.97±0.393.97±0.2562.16±0.294.14±0.3172.42±0.339.79±0.2162.86±0.2612.79±0.3569.52±0.3915.81±0.82
FISHR64.07±0.234.41±0.2961.79±0.255.55±0.2172.89±0.259.42±0.3263.44±0.3711.93±0.4270.21±0.2214.52±0.43
BLOC-IRM65.97±0.334.10±0.3663.69±0.324.89±0.3673.35±0.328.79±0.2163.62±0.3511.55±0.3270.31±0.2114.73±0.65
", + "image_path": "8fbf17543825cff1662e9c6b65a367fc4f314bc4a161e7c1a84c438f6107d9ee.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 216, + 506, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 506, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 506, + 294 + ], + "type": "text", + "content": "particular, all the IRM methods, except FISHR and BLOC-IRM, could even be worse than ERM on PACS and VLCS. Here, we echo and extend the findings of Krueger et al. (2021, Section 4.3). However, we also show that BLOC-IRM is a quite competitive IRM variant when applied to realistic domain generalization datasets. We also highlight that the CELEBA experiment is newly constructed and performed in our work for invariance evaluation. Like PACS and VLCS, this experiment also shows that ERM is a strong baseline, and among IRM-based methods, BLOC-IRM is the best-performing, both in terms of accuracy and variance of invariant predictions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "text", + "content": "IRM against model size and training environment variation. Furthermore, we investigate the effect of model size and training environment diversity on the IRM performance. The recent works (Lin et al., 2022; Zhou et al., 2022b) have empirically shown that IRMv1 may suffer a significant performance loss when trained over large-sized neural network models, and thus developed BIRM and SPARSEIRM approaches as advancements of IRMv1. Inspired by these works, Figure 4 presents the sensitivity of invariant prediction to model size for different IRM methods on COLORED-MNIST. Here the model size is controlled by the dimension of the intermediate layer (denoted by " + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "text", + "content": ") in MLP, and the default dimension is " + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "inline_equation", + "content": "d = 390" + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "text", + "content": " (i.e., the vertical dotted line in Figure 4), which was used in (Arjovsky et al., 2019) and followed in the subsequent literature. As we can see, when " + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "inline_equation", + "content": "d > 390" + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "text", + "content": ", nearly all the studied IRM methods (including BLOC-IRM) suffer a performance drop. Yet, as " + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "inline_equation", + "content": "d \\geq 800" + }, + { + "bbox": [ + 104, + 298, + 369, + 520 + ], + "type": "text", + "content": ", from the perspective of prediction accuracy and model resilience together, the top-3 best IRM methods with model size resilience are BIRM, SPARSEIRM, and BLOC-IRM, although we did not intentionally design BLOC-IRM to resist performance degradation against model" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 378, + 299, + 499, + 399 + ], + "blocks": [ + { + "bbox": [ + 378, + 299, + 499, + 399 + ], + "lines": [ + { + "bbox": [ + 378, + 299, + 499, + 399 + ], + "spans": [ + { + "bbox": [ + 378, + 299, + 499, + 399 + ], + "type": "image", + "image_path": "3d11bbd572a5a8ea881ce0606f8b523c5dcb5609512320a723945579f90a497e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 373, + 402, + 504, + 503 + ], + "lines": [ + { + "bbox": [ + 373, + 402, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 373, + 402, + 504, + 503 + ], + "type": "text", + "content": "Figure 4: IRM performance on COLORED-MNIST against the layer dimension in MLP. The dotted line represents the default dimension " + }, + { + "bbox": [ + 373, + 402, + 504, + 503 + ], + "type": "inline_equation", + "content": "(d = 390)" + }, + { + "bbox": [ + 373, + 402, + 504, + 503 + ], + "type": "text", + "content": " used in the literature. The invariant prediction accuracy is presented via the dot line (mean). The results are based on 10 independent trials and we report the variance in Figure A4." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 523, + 506, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 506, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 506, + 613 + ], + "type": "text", + "content": "We also show more experiment results in the Appendix. In Table A2, we study IRM with different numbers of training environment configurations and observe the consistent improvement of BLOC-IRM over other baselines. In Table A4 we show that the performance of invariant prediction degrades, if additional covariate shifts (class, digit, and color imbalances on COLORED-MNIST) are imposed on the training environments following Krueger et al. (2021, Section 4.1) and also demonstrate that BLOC-IRM maintains the accuracy improvement over baselines with each variation. In Table A5, we compare the performance of different methods in the failure cases of IRM pointed out by (Kamath et al., 2021) and show the consistent improvement brought by BLOC-IRM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 625, + 196, + 637 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 196, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 196, + 637 + ], + "type": "text", + "content": "7 CONCLUSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 650, + 504, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 650, + 504, + 717 + ], + "spans": [ + { + "bbox": [ + 104, + 650, + 504, + 717 + ], + "type": "text", + "content": "In this work, we investigate existing IRM methods and reveal long-standing but chronically overlooked challenges involving IRM training and evaluation, which may lead to sub-optimal solutions and incomplete invariance assessment. As a remedy, we propose small-batch training and multi-environment evaluation. We reexamine the IRM-GAME method through the lens of consensus-constrained BLO, and develop a novel IRM variant, termed BLOC-IRM. We conducted extensive experiments on 7 datasets and demonstrate that BLOC-IRM consistently improves all baselines." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 82, + 218, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 218, + 92 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 218, + 92 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 106, + 504, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 106, + 504, + 150 + ], + "spans": [ + { + "bbox": [ + 107, + 106, + 504, + 150 + ], + "type": "text", + "content": "The work of Y. Zhang and S. Liu was partially supported by National Science Foundation (NSF) Grant IIS-2207052. The work of M. Hong was supported by NSF grants CNS-2003033 and CIF-1910385. The computing resources used in this work were partially supported by the MIT-IBM Watson AI Lab and the Institute for Cyber-Enabled Research (ICER) at Michigan State University." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 167, + 267, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 167, + 267, + 178 + ], + "spans": [ + { + "bbox": [ + 107, + 167, + 267, + 178 + ], + "type": "text", + "content": "REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 191, + 504, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 191, + 504, + 323 + ], + "spans": [ + { + "bbox": [ + 107, + 191, + 504, + 323 + ], + "type": "text", + "content": "The authors have made an extensive effort to ensure the reproducibility of algorithms and results presented in the paper. First, the details of the experiment settings have been elaborated in Section 6.1 and Appendix C.1. In this paper, seven datasets are studied and the environment generation process for each dataset is described with details in Appendix A. The evaluation metrics are also clearly introduced in Section 3. Second, eight IRM-oriented methods (including our proposed BLOC-IRM) are studied in this work. The implementation details of all the baseline methods are clearly presented in Appendix C.2, including the hyper-parameters tuning, model configuration, and used code bases. For our proposed BLOC-IRM, we include all the implementation details in Section 5 and Appendix B.2, including training pipeline in Figure 3 and the pseudo-code in Algorithm A1. Third, all the results are based on 10 independent trials with different random seeds. The standard deviations are also reported to ensure fair comparisons across different methods. Fourth, codes are available at https://github.com/OPTML-Group/BLOC-IRM." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 107, + 24, + 293, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 24, + 293, + 35 + ], + "spans": [ + { + "bbox": [ + 107, + 24, + 293, + 35 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 310, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 759 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 759 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 99, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 105, + 99, + 505, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 99, + 505, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 99, + 505, + 121 + ], + "type": "text", + "content": "Faruk Ahmed, Yoshua Bengio, Harm van Seijen, and Aaron Courville. Systematic generalisation with group invariant predictions. In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 126, + 504, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 126, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 107, + 126, + 504, + 148 + ], + "type": "text", + "content": "Kartik Ahuja, Karthikeyan Shanmugam, Kush Varshney, and Amit Dhurandhar. Invariant risk minimization games. In International Conference on Machine Learning, pp. 145-155. PMLR, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 155, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 155, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 155, + 504, + 186 + ], + "type": "text", + "content": "Kartik Ahuja, Jun Wang, Amit Dhurandhar, Karthikeyan Shanmugam, and Kush R Varshney. Empirical or invariant risk minimization? a sample complexity perspective. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 193, + 504, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 193, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 107, + 193, + 504, + 214 + ], + "type": "text", + "content": "Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 220, + 504, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 220, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 107, + 220, + 504, + 242 + ], + "type": "text", + "content": "Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using meta-regularization. Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 248, + 504, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 248, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 107, + 248, + 504, + 270 + ], + "type": "text", + "content": "Sara Beery, Grant Van Horn, and Pietro Perona. Recognition in terra incognita. In Proceedings of the European conference on computer vision (ECCV), pp. 456-473, 2018." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 277, + 504, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 277, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 107, + 277, + 504, + 307 + ], + "type": "text", + "content": "Francesco Cappio Borlino, Antonio D'Innocente, and Tatiana Tommasi. Rethinking domain generalization baselines. In 2020 25th International Conference on Pattern Recognition (ICPR), pp. 9227-9233. IEEE, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 315, + 505, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 315, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 106, + 315, + 505, + 346 + ], + "type": "text", + "content": "Fabio M Carlucci, Antonio D'Innocente, Silvia Bucci, Barbara Caputo, and Tatiana Tommasi. Domain generalization by solving jigsaw puzzles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2229-2238, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 353, + 504, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 353, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 107, + 353, + 504, + 373 + ], + "type": "text", + "content": "Shiyu Chang, Yang Zhang, Mo Yu, and Tommi Jaakkola. Invariant rationalization. In International Conference on Machine Learning, pp. 1448-1458. PMLR, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "type": "text", + "content": "Yongqiang Chen, Kaiwen Zhou, Yatao Bian, Binghui Xie, Kaili Ma, Yonggang Zhang, Han Yang, Bo Han, and James Cheng. Pareto invariant risk minimization. arXiv preprint arXiv:2206.07766, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 409, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 409, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 107, + 409, + 504, + 430 + ], + "type": "text", + "content": "Yo Joong Choe, Jiyeon Ham, and Kyubyong Park. An empirical study of invariant risk minimization. arXiv preprint arXiv:2004.05007, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 437, + 504, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 437, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 107, + 437, + 504, + 458 + ], + "type": "text", + "content": "Pim De Haan, Dinesh Jayaraman, and Sergey Levine. Causal confusion in imitation learning. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 465, + 504, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 465, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 107, + 465, + 504, + 486 + ], + "type": "text", + "content": "Alex J DeGrave, Joseph D Janizek, and Su-In Lee. Ai for radiographic Covid-19 detection selects shortcuts over signal. Nature Machine Intelligence, 3(7):610-619, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 493, + 504, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 493, + 504, + 523 + ], + "spans": [ + { + "bbox": [ + 107, + 493, + 504, + 523 + ], + "type": "text", + "content": "Qi Dou, Daniel Coelho de Castro, Konstantinos Kamnitsas, and Ben Glocker. Domain generalization via model-agnostic learning of semantic features. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 531, + 504, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 531, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 107, + 531, + 504, + 552 + ], + "type": "text", + "content": "Yana Dranker, He He, and Yonatan Belinkov. Irm—when it works and when it doesn't: A test case of natural language inference. Advances in Neural Information Processing Systems, 34:18212-18224, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 559, + 504, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 559, + 504, + 580 + ], + "spans": [ + { + "bbox": [ + 107, + 559, + 504, + 580 + ], + "type": "text", + "content": "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. arXiv preprint arXiv:2010.01412, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 587, + 505, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 587, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 107, + 587, + 505, + 617 + ], + "type": "text", + "content": "Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Lavoille, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. The journal of machine learning research, 17(1):2096-2030, 2016." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 624, + 504, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 624, + 504, + 655 + ], + "spans": [ + { + "bbox": [ + 107, + 624, + 504, + 655 + ], + "type": "text", + "content": "Robert Geirhos, Jorn-Henrik Jacobsen, Claudio Michaelis, Richard Zemel, Wieland Brendel, Matthias Bethge, and Felix A Wichmann. Shortcut learning in deep neural networks. Nature Machine Intelligence, 2(11): 665-673, 2020." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 663, + 504, + 694 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 663, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 107, + 663, + 504, + 694 + ], + "type": "text", + "content": "Stephen Gould, Basura Fernando, Anoop Cherian, Peter Anderson, Rodrigo Santa Cruz, and Edison Guo. On differentiating parameterized argmin and argmax problems with application to bi-level optimization. arXiv preprint arXiv:1607.05447, 2016." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 107, + 700, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 700, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 700, + 504, + 731 + ], + "type": "text", + "content": "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 105 + ], + "type": "text", + "content": "Ishaan Gulrajani and David Lopez-Paz. In search of lost domain generalization. arXiv preprint arXiv:2007.01434, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 505, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 505, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 505, + 134 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 505, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 505, + 162 + ], + "type": "text", + "content": "Neal Jean, Marshall Burke, Michael Xie, W Matthew Davis, David B Lobell, and Stefano Ermon. Combining satellite imagery and machine learning to predict poverty. Science, 353(6301):790-794, 2016." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 169, + 505, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 169, + 505, + 191 + ], + "spans": [ + { + "bbox": [ + 105, + 169, + 505, + 191 + ], + "type": "text", + "content": "Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Domain extrapolation via regret minimization. arXiv preprint arXiv:2006.03908, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 198, + 505, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 198, + 505, + 229 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 505, + 229 + ], + "type": "text", + "content": "Pritish Kamath, Akilesh Tangella, Danica Sutherland, and Nathan Srebro. Does invariant risk minimization capture invariance? In International Conference on Artificial Intelligence and Statistics, pp. 4069-4077. PMLR, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 236, + 505, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 505, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 268 + ], + "type": "text", + "content": "Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv preprint arXiv:1609.04836, 2016." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 275, + 505, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 505, + 297 + ], + "type": "text", + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 304, + 505, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 505, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 505, + 336 + ], + "type": "text", + "content": "Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsbramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Irena Gao, et al. Wilds: A benchmark of in-the-wild distribution shifts. In International Conference on Machine Learning, pp. 5637-5664. PMLR, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 342, + 505, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 505, + 365 + ], + "type": "text", + "content": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60(6):84-90, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 372, + 505, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 372, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 372, + 505, + 403 + ], + "type": "text", + "content": "David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In International Conference on Machine Learning, pp. 5815-5826. PMLR, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 411, + 505, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 505, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 505, + 433 + ], + "type": "text", + "content": "Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy M Hospedales. Deeper, broader and artier domain generalization. In Proceedings of the IEEE international conference on computer vision, pp. 5542-5550, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 440, + 505, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 505, + 471 + ], + "type": "text", + "content": "Da Li, Jianshu Zhang, Yongxin Yang, Cong Liu, Yi-Zhe Song, and Timothy M Hospedales. Episodic training for domain generalization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1446-1455, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 478, + 505, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 478, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 505, + 501 + ], + "type": "text", + "content": "Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 507, + 505, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 507, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 507, + 505, + 529 + ], + "type": "text", + "content": "Yong Lin, Qing Lian, and Tong Zhang. An empirical study of invariant risk minimization on deep models. In ICML 2021 Workshop on Uncertainty and Robustness in Deep Learning, pp. 7, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 536, + 505, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 536, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 505, + 559 + ], + "type": "text", + "content": "Yong Lin, Hanze Dong, Hao Wang, and Tong Zhang. Bayesian invariant risk minimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16021-16030, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 565, + 505, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 565, + 505, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 505, + 596 + ], + "type": "text", + "content": "Risheng Liu, Jiaxin Gao, Jin Zhang, Deyu Meng, and Zhouchen Lin. Investigating bi-level optimization for learning and vision from a unified perspective: A survey and beyond. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 604, + 505, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 505, + 626 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 626 + ], + "type": "text", + "content": "Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), December 2015." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 632, + 505, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 632, + 505, + 654 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 505, + 654 + ], + "type": "text", + "content": "Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International conference on machine learning, pp. 97-105. PMLR, 2015." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 662, + 505, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 662, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 505, + 693 + ], + "type": "text", + "content": "Hyeonseob Nam, HyunJae Lee, Jongchan Park, Wonjun Yoon, and Donggeun Yoo. Reducing domain gap by reducing style bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8690-8699, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 700, + 505, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 700, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 505, + 731 + ], + "type": "text", + "content": "Jonas Peters, Peter Buhlmann, and Nicolai Meinshausen. Causal inference by using invariant prediction: identification and confidence intervals. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(5):947-1012, 2016." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 114 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 114 + ], + "type": "text", + "content": "Alexandre Rame, Coretin Dancette, and Matthieu Cord. Fishr: Invariant gradient variances for out-of-distribution generalization. In International Conference on Machine Learning, pp. 18347-18377. PMLR, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 118, + 505, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 118, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 505, + 140 + ], + "type": "text", + "content": "Elan Rosenfeld, Pradeep Ravikumar, and Andrej Risteski. The risks of invariant risk minimization. arXiv preprint arXiv:2010.05761, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 144, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 144, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 506, + 176 + ], + "type": "text", + "content": "Shiori Sagawa, Pang Wei Koh, Tatsunori B Hashimoto, and Percy Liang. Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization. arXiv preprint arXiv:1911.08731, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 180, + 506, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 180, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 106, + 180, + 506, + 212 + ], + "type": "text", + "content": "Shiori Sagawa, Aditi Raghunathan, Pang Wei Koh, and Percy Liang. An investigation of why overparameterization exacerbates spurious correlations. In International Conference on Machine Learning, pp. 8346-8356. PMLR, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 217, + 506, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 506, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 506, + 247 + ], + "type": "text", + "content": "Harshay Shah, Kaustav Tamuly, Aditi Raghunathan, Prateek Jain, and Praneeth Netrapalli. The pitfalls of simplicity bias in neural networks. Advances in Neural Information Processing Systems, 33:9573-9585, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 253, + 504, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 253, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 504, + 274 + ], + "type": "text", + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 279, + 504, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 279, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 504, + 301 + ], + "type": "text", + "content": "Yi Sun, Xiaogang Wang, and Xiaou Tang. Deep learning face representation from predicting 10,000 classes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1891-1898, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 305, + 504, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 305, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 504, + 326 + ], + "type": "text", + "content": "Antonio Torralba and Alexei A Efros. Unbiased look at dataset bias. In CVPR 2011, pp. 1521-1528. IEEE, 2011." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 331, + 504, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 504, + 353 + ], + "type": "text", + "content": "Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 357, + 504, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 504, + 388 + ], + "type": "text", + "content": "Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip Yu. Generalizing to unseen domains: A survey on domain generalization. IEEE Transactions on Knowledge and Data Engineering, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 393, + 504, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 393, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 393, + 504, + 415 + ], + "type": "text", + "content": "Chuanlong Xie, Haotian Ye, Fei Chen, Yue Liu, Rui Sun, and Zhenguo Li. Risk variance penalization. arXiv preprint arXiv:2006.07544, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 419, + 504, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 419, + 504, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 504, + 441 + ], + "type": "text", + "content": "Renzhe Xu, Xingxuan Zhang, Peng Cui, Bo Li, Zheyan Shen, and Jiazheng Xu. Regulatory instruments for fair personalized pricing. In Proceedings of the ACM Web Conference 2022, pp. 4-15, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 446, + 504, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 446, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 504, + 468 + ], + "type": "text", + "content": "Yilun Xu and Tommi Jaakkola. Learning representations that support robust transfer of predictors. arXiv preprint arXiv:2110.09940, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 472, + 504, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 504, + 493 + ], + "type": "text", + "content": "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017a." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 498, + 504, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 498, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 504, + 519 + ], + "type": "text", + "content": "Yang You, Igor Gitman, and Boris Ginsburg. Scaling SGD batch size to 32k for imagenet training. arXiv preprint arXiv:1708.03888, 6, 2017b." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 524, + 504, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 524, + 504, + 546 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 504, + 546 + ], + "type": "text", + "content": "Yang You, Zhao Zhang, Cho-Jui Hsieh, James Demmel, and Kurt Keutzer. Imagenet training in minutes. In Proceedings of the 47th International Conference on Parallel Processing, pp. 1. ACM, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 550, + 504, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 550, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 504, + 582 + ], + "type": "text", + "content": "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes. arXiv preprint arXiv:1904.00962, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 586, + 504, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 504, + 617 + ], + "type": "text", + "content": "Dinghuai Zhang, Kartik Ahuja, Yilun Xu, Yisen Wang, and Aaron Courville. Can subnetwork structure be the key to out-of-distribution generalization? In International Conference on Machine Learning, pp. 12356-12367. PMLR, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 622, + 504, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 622, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 105, + 622, + 504, + 654 + ], + "type": "text", + "content": "Gaoyuan Zhang, Songtao Lu, Yihua Zhang, Xiangyi Chen, Pin-Yu Chen, Quanfu Fan, Lee Martie, Lior Horesh, Mingyi Hong, and Sijia Liu. Distributed adversarial training to robustify deep neural networks at scale. In Uncertainty in Artificial Intelligence, pp. 2353-2363. PMLR, 2022a." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 658, + 504, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 504, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 504, + 680 + ], + "type": "text", + "content": "Xingxuan Zhang, Linjun Zhou, Renzhe Xu, Peng Cui, Zheyan Shen, and Haoxin Liu. Nico++: Towards better benchmarking for domain generalization. arXiv preprint arXiv:2204.08040, 2022b." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 685, + 504, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 685, + 504, + 706 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 504, + 706 + ], + "type": "text", + "content": "Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022a." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 711, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 711, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 504, + 732 + ], + "type": "text", + "content": "Xiao Zhou, Yong Lin, Weizhong Zhang, and Tong Zhang. Sparse invariant risk minimization. In International Conference on Machine Learning, pp. 27222-27244. PMLR, 2022b." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 272, + 80, + 339, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 80, + 339, + 94 + ], + "spans": [ + { + "bbox": [ + 272, + 80, + 339, + 94 + ], + "type": "text", + "content": "APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 238, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 238, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 238, + 121 + ], + "type": "text", + "content": "A DATASET SELECTION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 133, + 506, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 506, + 255 + ], + "type": "text", + "content": "Compared to existing work, we expand the dataset types for evaluating the performance of different IRM methods (see Table 2). In addition to the most commonly-used benchmark datasets COLORED-MNIST (Arjovsky et al., 2019) and COLORED-FMNIST (Ahuja et al., 2020), we also consider the datasets CIFAR-MNIST (Lin et al., 2021; Shah et al., 2020) and COLORED-OBJECT (Ahmed et al., 2020; Zhang et al., 2021), which impose artificial spurious correlations, MNIST digit number and object color, into the original CIFAR-10 and COCO Detection datasets, respectively. Furthermore, we consider other three real-world datasets CELEBA (Liu et al., 2015), PACS (Li et al., 2017) and VLCS (Torralba & Efros, 2011), without imposing artificial spurious correlations. Notably, CELEBA was first formalized and introduced to benchmark IRM performance. The recent work (Gulrajani & Lopez-Paz, 2020) showed that when carefully implemented, ERM could outperform IRMv1 in PACS and VLCS. Thus, we regard them as challenging datasets to capture invariance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 260, + 504, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 260, + 504, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 260, + 504, + 294 + ], + "type": "text", + "content": "For COLORED-OBJECT dataset, we strictly follow the setting adopted in (Lin et al., 2022) to generate the spurious features. For CIFAR-MNIST we use the class \"bird\" and \"plane\" in the dataset CIFAR as the invariant feature, while the digit \"0\" and \"1\" in MNIST as the spurious correlation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 299, + 506, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 299, + 506, + 333 + ], + "spans": [ + { + "bbox": [ + 104, + 299, + 506, + 333 + ], + "type": "text", + "content": "CELEBA dataset is, for the first time, introduced to measure IRM performance. We select the attribute \"Smiling\" as the invariant label and use the attribute \"Hair Color\" (blond and black hair) to create a spurious correlation in each environment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 348, + 269, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 348, + 269, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 269, + 361 + ], + "type": "text", + "content": "B IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 372, + 392, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 372, + 392, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 372, + 392, + 384 + ], + "type": "text", + "content": "B.1 DETAILS ON LARGE-BATCH OPTIMIZATION ENHANCEMENTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 392, + 506, + 487 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 104, + 392, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\spadesuit" + }, + { + "bbox": [ + 104, + 392, + 504, + 437 + ], + "type": "text", + "content": " IRM-LSGD: We first integrate large-batch SGD (LSGD) with IRM. Following (Goyal et al., 2017), we make two main modifications: (1) scaling up learning rate linearly with batch size, and (2) prepending a warm-up optimization phase to IRM training. We call the LSGD-baked IRM variant IRM-LSGD." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 442, + 506, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 442, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 442, + 506, + 487 + ], + "type": "inline_equation", + "content": "\\spadesuit" + }, + { + "bbox": [ + 104, + 442, + 506, + 487 + ], + "type": "text", + "content": " IRM-LALR: Next, we adopt layerwise adaptive learning rate (LALR) in IRM training. Following (You et al., 2019), we advance the learning rate scheduler by assigning each layer of a neural network-based prediction model with an adaptive learning rate (i.e., proportional to the norm of updated model weights per layer). More specifically, the model parameter update rule becomes:" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 231, + 489, + 504, + 516 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 489, + 504, + 516 + ], + "spans": [ + { + "bbox": [ + 231, + 489, + 504, + 516 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {t + 1, i} = \\boldsymbol {\\theta} _ {t, i} - \\frac {\\tau \\left(\\left\\| \\boldsymbol {\\theta} _ {t , i} \\right\\| _ {2} ^ {2}\\right) \\cdot \\eta_ {t}}{\\left\\| \\mathbf {u} _ {t , i} \\right\\| _ {2} ^ {2}} \\mathbf {u} _ {t, i}, \\tag {A1}", + "image_path": "9becc9d6b850923b034cdebade573d722338a5cab258f2d64f3b92e6685b79de.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{t,i}" + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": "-th layer of the model parameters at iteration " + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{t,i}" + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": " represents the first-order gradient of the corresponding layer-wise model parameters. We use " + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "inline_equation", + "content": "\\tau(\\|\\pmb{\\theta}_{t,i}\\|_2^2 = \\min\\{\\max\\{\\|\\pmb{\\theta}_{t,i}\\|_2^2, c_l\\}, c_u\\})" + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": " as the scaling factor of the adaptive learning rate " + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "inline_equation", + "content": "\\frac{\\eta_t}{\\|\\mathbf{u}_{t,i}\\|}" + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": ". We use " + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "inline_equation", + "content": "c_l = 0" + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "inline_equation", + "content": "c_u = 1" + }, + { + "bbox": [ + 104, + 518, + 506, + 567 + ], + "type": "text", + "content": " in our experiments." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 572, + 504, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 572, + 504, + 626 + ], + "spans": [ + { + "bbox": [ + 104, + 572, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\spadesuit" + }, + { + "bbox": [ + 104, + 572, + 504, + 626 + ], + "type": "text", + "content": " IRM-SAM: Lastly, we leverage sharpness-aware minimization (SAM) to simultaneously minimize the IRM loss and the loss sharpness. The latter is achieved by explicitly penalizing the worst-case training loss of model weights when facing small weight perturbations. This yields a wide minimum within a flat loss landscape. More specifically, the sharpness-aware loss can be formulated as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 194, + 628, + 504, + 650 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 628, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 194, + 628, + 504, + 650 + ], + "type": "interline_equation", + "content": "\\min _ {\\boldsymbol {\\theta}} \\ell^ {\\mathrm {S A M}} (\\boldsymbol {\\theta}), \\quad \\text {w h e r e} \\quad \\ell^ {\\mathrm {S A M}} (\\boldsymbol {\\theta}) = \\max _ {\\| \\epsilon \\| _ {2} ^ {2} \\leq \\rho} \\ell (\\boldsymbol {\\theta} + \\boldsymbol {\\epsilon}), \\tag {A2}", + "image_path": "a7157e9c1d24f4efa5a9e019bb53353de7211fde22e1d888275a09787a4fb1bf.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "type": "text", + "content": "where the parameter perturbation " + }, + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "type": "text", + "content": " is subject to the perturbation constraint " + }, + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "type": "inline_equation", + "content": "\\| \\epsilon \\| _2^2\\leq \\rho" + }, + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "type": "text", + "content": ". When applied to IRM, we replace the per-environment training loss with the SAM loss, and adopt the " + }, + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "type": "inline_equation", + "content": "\\rho = 0.001" + }, + { + "bbox": [ + 104, + 653, + 504, + 677 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 689, + 266, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 266, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 266, + 700 + ], + "type": "text", + "content": "B.2 BLOC-IRM IMPLEMENTATION" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "As described in Section 5, the BLOC-IRM algorithm solves the IRM problem with two optimization levels. We use 1-step gradient descent to get the lower-level solution. We retain the gradient" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "text", + "content": "graph in PyTorch to enable auto differentiation. We assign each of the classification head " + }, + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{w}^{(e)}\\}" + }, + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "text", + "content": " a separate optimizer and use the same learning rate as the feature extractor " + }, + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "text", + "content": ". For COLORED-MNIST and COLORED-FMNIST, we adopt a learning rate of " + }, + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-3}" + }, + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "text", + "content": " and use the Adam (Kingma & Ba, 2014) optimizer. As for other datasets, we use the multi-step learning rate scheduler with an initial learning rate of 0.1, which is consistent with other baselines. We adopt the same penalty weight of " + }, + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "inline_equation", + "content": "10^{6}" + }, + { + "bbox": [ + 104, + 81, + 506, + 149 + ], + "type": "text", + "content": " as IRMv1 and IRMv0." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 162, + 222, + 174 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 162, + 222, + 174 + ], + "spans": [ + { + "bbox": [ + 106, + 162, + 222, + 174 + ], + "type": "text", + "content": "Algorithm A1 BLOC-IRM" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 177, + 504, + 225 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "spans": [ + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "content": "1: Initialization: Training data " + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{x}^{(e)}\\}" + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "content": " environments, Model feature extractor " + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "inline_equation", + "content": "\\theta_0" + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "content": " model classification heads " + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{w}_0^{(e)}\\}" + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "content": ", learning rate " + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "inline_equation", + "content": "\\{\\eta_t\\}" + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "content": " series, penalty weight " + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "inline_equation", + "content": "\\{\\gamma_t\\}" + }, + { + "bbox": [ + 110, + 177, + 504, + 204 + ], + "type": "text", + "content": " series." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 203, + 226, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 203, + 226, + 213 + ], + "spans": [ + { + "bbox": [ + 110, + 203, + 226, + 213 + ], + "type": "text", + "content": "2: for Step " + }, + { + "bbox": [ + 110, + 203, + 226, + 213 + ], + "type": "inline_equation", + "content": "t = 0,1,\\ldots ,\\mathbf{d}\\mathbf{o}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 213, + 388, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 213, + 388, + 225 + ], + "spans": [ + { + "bbox": [ + 110, + 213, + 388, + 225 + ], + "type": "text", + "content": "3: Lower-level: update classification head for each environment:" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 188, + 231, + 505, + 261 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 231, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 188, + 231, + 505, + 261 + ], + "type": "interline_equation", + "content": "\\forall e \\in \\mathcal {E} _ {\\mathrm {t r}}, \\quad \\tilde {\\mathbf {w}} _ {t + 1} ^ {(e)} = \\mathbf {w} _ {t} ^ {(e)} - \\eta_ {t} \\left. \\frac {d \\ell^ {(e)} (\\mathbf {w} \\odot \\boldsymbol {\\theta})}{d \\mathbf {w}} \\right| _ {\\boldsymbol {\\theta} = \\boldsymbol {\\theta} _ {t}, \\mathbf {w} = \\mathbf {w} _ {t} ^ {(e)}} \\tag {A3}", + "image_path": "171d51e2268ddcd8a8082b107e390aef535f1b0551c3382bcfb52c46e17238f5.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 267, + 411, + 293 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 110, + 267, + 411, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 267, + 411, + 282 + ], + "spans": [ + { + "bbox": [ + 110, + 267, + 411, + 282 + ], + "type": "text", + "content": "4: Consensus projection: " + }, + { + "bbox": [ + 110, + 267, + 411, + 282 + ], + "type": "inline_equation", + "content": "\\forall e\\in \\mathcal{E}_{\\mathrm{tr}},\\mathbf{w}_{t + 1}^{(e)} = \\mathbf{w}_{t + 1}^{*} = \\frac{1}{N}\\sum_{e\\in \\mathcal{E}_{\\mathrm{tr}}}\\tilde{\\mathbf{w}}_{t + 1}^{(e)}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 110, + 281, + 386, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 281, + 386, + 293 + ], + "spans": [ + { + "bbox": [ + 110, + 281, + 386, + 293 + ], + "type": "text", + "content": "5: Upper-level: update feature extractor with stationary penalty:" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 144, + 307, + 505, + 336 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 307, + 505, + 336 + ], + "spans": [ + { + "bbox": [ + 144, + 307, + 505, + 336 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {t + 1} = \\boldsymbol {\\theta} _ {t} - \\eta_ {t} \\sum_ {e \\in \\mathcal {E} _ {\\mathrm {t r}}} \\frac {d}{d \\boldsymbol {\\theta}} \\left(\\ell^ {(e)} (\\mathbf {w} \\odot \\boldsymbol {\\theta}) + \\gamma_ {t} \\| \\nabla_ {\\mathbf {w}} \\ell^ {(e)} (\\mathbf {w} \\circ \\boldsymbol {\\theta}) \\| _ {2} ^ {2}\\right) \\Big | _ {\\boldsymbol {\\theta} = \\boldsymbol {\\theta} _ {t}, \\mathbf {w} = \\mathbf {w} _ {t + 1} ^ {*}} \\tag {A4}", + "image_path": "6193562e5de0bd2f5ec11ad3fa7c32203f828669ff038a419044102dc0fc6547.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 342, + 157, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 342, + 157, + 354 + ], + "spans": [ + { + "bbox": [ + 110, + 342, + 157, + 354 + ], + "type": "text", + "content": "6: end for" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 380, + 228, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 228, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 228, + 392 + ], + "type": "text", + "content": "C EXPERIMENTATION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 406, + 230, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 230, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 230, + 417 + ], + "type": "text", + "content": "C.1 ENVIRONMENT SETUP" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 428, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 428, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 504, + 472 + ], + "type": "text", + "content": "As proposed in Section 4, we use the multi-environment evaluation metric unless specified otherwise. To capture both the accuracy and variance of invariant predictions across multiple testing environments, the average accuracy and the accuracy gap (the difference between the best-case and worst-case accuracy) are evaluated for IRM methods." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 477, + 504, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 477, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 477, + 504, + 522 + ], + "type": "text", + "content": "Specifically, for the COLORED-MNIST, COLORED-FMNIST, COLORED-OBJECT, CIFAR-MNIST, and CELEBA dataset, we manually create 19 test environments with uniformly sampled bias parameter " + }, + { + "bbox": [ + 104, + 477, + 504, + 522 + ], + "type": "inline_equation", + "content": "\\beta \\in \\{0.05, 0.1, \\dots, 0.95\\}" + }, + { + "bbox": [ + 104, + 477, + 504, + 522 + ], + "type": "text", + "content": ", where the environment bias parameter " + }, + { + "bbox": [ + 104, + 477, + 504, + 522 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 477, + 504, + 522 + ], + "type": "text", + "content": " controls the spurious correlation (see Section 4 for more details)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 527, + 504, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 527, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 504, + 573 + ], + "type": "text", + "content": "For VLCS and PACS datasets, the training and test sets have 4 environments, namely {art painting, cartoon, sketch, photo} and {CALTECH, LABELME, PASCAL, SUN} respectively. We use the first three environments as the training environments, while we use the test set of all four environments to form our proposed multi-environment invariance evaluation system." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 588, + 183, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 588, + 183, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 183, + 599 + ], + "type": "text", + "content": "C.2 BASELINES" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 609, + 504, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 655 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 655 + ], + "type": "text", + "content": "For each baseline method, we follow its official PyTorch repository except IRM-GAME and SPARSEIRM. We translate the TensorFlow-based original code base of IRM-GAME to PyTorch. As one of the latest IRM advancements, the official code of SPARSEIRM is not yet publicly available. Therefore, we reproduce SPARSEIRM in PyTorch." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 659, + 504, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 694 + ], + "type": "text", + "content": "In particular, for COLORED-MNIST and COLORED-FMNIST, we stick to the original hyperparameters for the large-batch setting and tune the hyper-parameters of each method, including the penalty weight, number of warm-up epochs, and learning rate for the small batch setting." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": "In particular, for the large-batch setting, we use the penalty weight of " + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "inline_equation", + "content": "10^{6}" + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": ", 190 warm-up epochs, and 500 epochs in total, as suggested by the original IRMv1 and inherited by its variants. For the small-batch setting, we adopt the same penalty weight " + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "inline_equation", + "content": "10^{6}" + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": ". Further, we found that the warm-up" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "phase could be shortened without sacrificing accuracy. Therefore, we use 50 warm-up epochs and total 200 epochs for all the methods." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 505, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 505, + 144 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 505, + 144 + ], + "type": "text", + "content": "For other datasets, we adopt the batch size of 128 and use ResNet-18 as the default model architecture. We train for 200 epochs. We adopt the step-wise learning rate scheduler with an initial learning rate of 0.1. The learning rate decays by 0.1 at the 100th and 150th epochs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 157, + 290, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 157, + 290, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 157, + 290, + 168 + ], + "type": "text", + "content": "C.3 ADDITIONAL EXPERIMENT RESULTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 178, + 504, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 178, + 504, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 178, + 504, + 222 + ], + "type": "text", + "content": "The influence of batch size with all the baselines. We show in Figure A1 the influence of training batch size on the performance of different methods. We observe in Figure A1, as in Figure 1, that full batch setting does not achieve the best performance, and the use of mini-batch (stochastic gradient descent) indeed improves performance." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 235, + 232, + 373, + 351 + ], + "blocks": [ + { + "bbox": [ + 235, + 232, + 373, + 351 + ], + "lines": [ + { + "bbox": [ + 235, + 232, + 373, + 351 + ], + "spans": [ + { + "bbox": [ + 235, + 232, + 373, + 351 + ], + "type": "image", + "image_path": "039def54e2becbaec79d1d03a53933990c3fed2cbd66dda2235a21b0bc50bf3a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 364, + 504, + 395 + ], + "lines": [ + { + "bbox": [ + 104, + 364, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 504, + 395 + ], + "type": "text", + "content": "Figure A1: The performance of all the baselines in this work trained with different batch sizes on COLORED-MNIST dataset. The full data batch-size is 50k. The invariant accuracy corresponds to the average accuracy evaluated based on the diversified environments-based evaluation metric." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 157, + 417, + 298, + 537 + ], + "blocks": [ + { + "bbox": [ + 157, + 417, + 298, + 537 + ], + "lines": [ + { + "bbox": [ + 157, + 417, + 298, + 537 + ], + "spans": [ + { + "bbox": [ + 157, + 417, + 298, + 537 + ], + "type": "image", + "image_path": "ef8217039f5f005b3bd69e9a9794f7e3b7c1686ce84d77fdda027d9d0567a5f7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 548, + 504, + 599 + ], + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 599 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 599 + ], + "type": "text", + "content": "Figure A2: The loss landscapes of invariant prediction models acquired by (A) large-batch IRMv1 training with 50k batch size and (B) small-batch training with 1k batch size. The 2D loss landscape visualization is realized using the tool in (Li et al., 2018). The " + }, + { + "bbox": [ + 104, + 548, + 504, + 599 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 548, + 504, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 548, + 504, + 599 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 548, + 504, + 599 + ], + "type": "text", + "content": " axes represent the linear interpolation coefficients over two directional vectors originated from the converged local optima. Here the numbers on the contour denote the loss values over test data." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 312, + 417, + 453, + 537 + ], + "blocks": [ + { + "bbox": [ + 312, + 417, + 453, + 537 + ], + "lines": [ + { + "bbox": [ + 312, + 417, + 453, + 537 + ], + "spans": [ + { + "bbox": [ + 312, + 417, + 453, + 537 + ], + "type": "image", + "image_path": "de052964e98aadf5f59cf377fff58ac0c1a4646431936e9149896a2f2230471d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 609, + 504, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 675 + ], + "type": "text", + "content": "Loss landscapes of IRMv1 with different batch sizes. We plot the loss landscapes of the models trained with IRMv1 on COLORED-MNIST using large (full) and small batch in Figure A2. Using small batch training, IRMv1 (Fig. A2B) converges to a smooth neighborhood of a local optima. This also corresponds to a flatter loss landscape than the landscape of the large-batch training (Figure A2(A)). The loss landscapes demonstrate consistent results as other experiments discussed in Section 3." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "Training trajectory with BLOC-IRM with and without stationary loss. In Figure A3, we plot the per-environment training trajectory of stationary loss when solving (1) and (BLOC-IRM) on COLORED-MNIST. For (BLOC-IRM) we use the regularization term " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\lambda = 10^6" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ", which is aligned with the penalty coefficient used in IRMv1. As we can see, without the stationarity regularization," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "the stationary loss remains at a high level for both environments (the dotted curves). Notably, the lower-level stationary can be reached fast with the stationarity penalty, as shown in the solid curves." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 226, + 117, + 381, + 247 + ], + "blocks": [ + { + "bbox": [ + 226, + 117, + 381, + 247 + ], + "lines": [ + { + "bbox": [ + 226, + 117, + 381, + 247 + ], + "spans": [ + { + "bbox": [ + 226, + 117, + 381, + 247 + ], + "type": "image", + "image_path": "337a64114140457585dd9b2b577dd0fd6b39b0f199f7682f05dfdd4e20a40c7a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 257, + 504, + 289 + ], + "lines": [ + { + "bbox": [ + 104, + 257, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 504, + 289 + ], + "type": "text", + "content": "Figure A3: The per-environment training trajectory for the stationarity loss of (1) and (BLOC-IRM) on COLORED-MNIST. The training setting is the same as Figure 2. The algorithmic details can be found in Appendix B." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 306, + 504, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 363 + ], + "type": "text", + "content": "Performance of all the methods with full dataset list. We show in Table A1 the results of all the methods on the seven datasets we studied. To be more specific, in Table A1, we append the results of COLORED-MNIST and COLORED-FMNIST into Table 5 as a whole. As we can see, our methods outperforms other baselines in all the datasets in terms of average accuracy, and stands top in most cases in terms of the accuracy gap." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 106, + 422, + 504, + 487 + ], + "blocks": [ + { + "bbox": [ + 104, + 381, + 504, + 422 + ], + "lines": [ + { + "bbox": [ + 104, + 381, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 381, + 504, + 422 + ], + "type": "text", + "content": "Table A1: IRM performance comparison between our proposed BLOC-IRM method and other baselines under the full list of datasets. We use MLP for COLORED-MNIST and COLORED-FMNIST, and ResNet-18 (He et al., 2016) for the rest datasets. The evaluation setup is consistent with Table 4, and the best performance per-evaluation metric and per-dataset is highlighted in bold." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 422, + 504, + 487 + ], + "lines": [ + { + "bbox": [ + 106, + 422, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 106, + 422, + 504, + 487 + ], + "type": "table", + "html": "
Algorithm Metrics (%)COLORED-MNISTCOLORED-FMNISTCOLORED-OBJECTCIFAR-MNISTCELEBAVLCSPACS
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
ERM49.19±1.8990.72±2.0849.77±1.7188.62±2.4941.11±1.4486.43±2.8940.39±1.3285.53±2.3372.38±0.2910.73±0.3663.23±0.2312.39±0.3569.95±0.3514.32±0.75
IRVM168.33±0.312.04±0.0568.76±0.311.45±0.0964.42±0.214.18±0.2961.49±0.297.17±0.3372.49±0.3810.15±0.2762.72±0.2912.74±0.2768.93±0.3314.99±0.51
IRVM068.37±0.281.32±0.0969.07±0.271.36±0.0662.39±0.255.36±0.3160.14±0.188.83±0.3972.42±0.3510.43±0.3862.59±0.3212.99±0.3668.72±0.2915.29±0.71
IRGMAGE67.73±0.241.67±0.1467.49±0.321.82±0.1362.88±0.345.59±0.2860.44±0.316.72±0.4172.18±0.4412.32±0.4162.31±0.3813.37±0.6268.12±0.2215.77±0.66
REX68.42±0.291.65±0.0768.66±0.221.29±0.0863.37±0.355.42±0.3162.32±0.245.55±0.3272.34±0.2610.31±0.2363.19±0.3112.87±0.3169.43±0.3415.31±0.67
BIRM68.71±0.211.35±0.0968.64±0.321.44±0.1365.11±0.273.31±0.2262.99±0.355.23±0.3672.93±0.289.92±0.3363.33±0.4012.13±0.2369.34±0.2515.76±0.49
SPARSEIRM68.81±0.251.72±0.0568.29±0.221.28±0.1564.97±0.393.97±0.2562.16±0.294.14±0.3172.42±0.339.79±0.2162.86±0.2612.79±0.3569.52±0.3915.81±0.82
FISHR68.69±0.192.13±0.0868.79±0.171.77±0.1064.07±0.234.41±0.2961.79±0.255.55±0.2172.89±0.259.42±0.3263.44±0.3711.93±0.4270.21±0.2214.52±0.43
BLOC-IRM69.47±0.241.04±0.0769.43±0.211.14±0.1165.97±0.334.10±0.3663.69±0.324.89±0.3673.35±0.328.79±0.2163.62±0.3511.55±0.3270.31±0.2114.73±0.65
", + "image_path": "a4f83e9566b91effe521dcae282a9281257532dd65009a8d4bb0f5d3a2faf42f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 505, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 504, + 540 + ], + "type": "text", + "content": "Experiment on different model sizes. We show in Figure A4 the influence of the increasing model size on the performance of different baselines considered in this work. Compared to Figure 4, we report additional standard deviation of the 10 independent trials in Figure A4." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 227, + 550, + 380, + 681 + ], + "blocks": [ + { + "bbox": [ + 227, + 550, + 380, + 681 + ], + "lines": [ + { + "bbox": [ + 227, + 550, + 380, + 681 + ], + "spans": [ + { + "bbox": [ + 227, + 550, + 380, + 681 + ], + "type": "image", + "image_path": "f3c2dd9be52dd33649836e9caba9cd6238b137e020dcb52b708b55ea9a23b2e1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 691, + 504, + 723 + ], + "lines": [ + { + "bbox": [ + 104, + 691, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 691, + 504, + 723 + ], + "type": "text", + "content": "Figure A4: IRM performance on COLORED-MNIST against the dimension of the intermediate layer in MLP. The dotted line represents the default dimension (" + }, + { + "bbox": [ + 104, + 691, + 504, + 723 + ], + "type": "inline_equation", + "content": "d = 390" + }, + { + "bbox": [ + 104, + 691, + 504, + 723 + ], + "type": "text", + "content": ") used in the literature. The invariant prediction accuracy is presented via the dot line (mean) and shaded area (standard deviation) over 10 random trials." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "Experiment with different training environments. In Table A2, we show the performance of all the methods in more complex training environments, such as more training environments and more skewed environment bias parameter " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": ". As we can see, BLOC-IRM outperforms other baselines." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 185, + 147, + 426, + 278 + ], + "blocks": [ + { + "bbox": [ + 148, + 135, + 461, + 146 + ], + "lines": [ + { + "bbox": [ + 148, + 135, + 461, + 146 + ], + "spans": [ + { + "bbox": [ + 148, + 135, + 461, + 146 + ], + "type": "text", + "content": "Table A2: Performance under different training environments in COLORED-MNIST." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 185, + 147, + 426, + 278 + ], + "lines": [ + { + "bbox": [ + 185, + 147, + 426, + 278 + ], + "spans": [ + { + "bbox": [ + 185, + 147, + 426, + 278 + ], + "type": "table", + "html": "
Environment Metrics (%)\\( {p}_{\\text{tr }} \\in \\{ {0.1},{0.15}\\} \\)\\( {p}_{\\text{tr }} \\in \\{ {0.1},{0.15},{0.2}\\} \\)
Avg AccAcc GapAvg AccAcc Gap
OPTIMUM75.000.0075.000.00
GRAYSCALE73.82±0.110.37±0.0573.97±0.140.29±0.08
ERM49.21±0.7991.88±3.3149.03±0.9392.17±3.04
IRMv167.36±0.312.77±0.1567.11±0.34\\( {2.42} \\pm {0.12} \\)
IRMv067.01±0.42\\( {2.85} \\pm {0.18} \\)66.71±0.42\\( {2.36} \\pm {0.19} \\)
IRM-GAME66.39±0.724.47±0.6165.93±0.53\\( {4.25} \\pm {0.84} \\)
REX66.82±0.44\\( {2.59} \\pm {0.11} \\)67.14±0.38\\( {2.16} \\pm {0.13} \\)
BIRM67.35±0.39\\( {2.65} \\pm {0.10} \\)68.05±0.43\\( {1.99} \\pm {0.07} \\)
SPARSEIRM67.12±0.53\\( {2.33} \\pm {0.18} \\)67.72±0.41\\( {2.11} \\pm {0.19} \\)
FISHR67.22±0.43\\( {2.44} \\pm {0.15} \\)67.32±0.39\\( {2.59} \\pm {0.15} \\)
BLO-IRM68.72±0.41\\( {2.19} \\pm {0.15} \\)68.89±0.31\\( {2.39} \\pm {0.09} \\)
", + "image_path": "1166dfe068891ef6fb4ed2653bc8f7c7e95a4acf9f3d90938a0343d6c81d6974.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 296, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 504, + 384 + ], + "type": "text", + "content": "BLOC-IRM with different regularizations. Based on the penalty terms used in the existing IRM variants, we explore the performance of our proposed BLOC-IRM with various regularization, including the ones used in IRMv1 (BLOC-IRM-v1), REX (BLOC-IRM-REX), and FISHR (BLOC-IRM-FISHR). We conduct experiments on three different datasets and the results are shown in Table A3. It is obvious that the best performance is always achieved when the per-environment stationarity is penalized in the upper-level. This is not surprising since without an explicit promotion of stationarity, other forms of penalties do not guarantee the BLO algorithm to achieve an optimal solution." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 145, + 433, + 465, + 506 + ], + "blocks": [ + { + "bbox": [ + 104, + 401, + 504, + 432 + ], + "lines": [ + { + "bbox": [ + 104, + 401, + 504, + 432 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 504, + 432 + ], + "type": "text", + "content": "Table A3: The performance of BLOC-IRM with different regularization terms. Three datasets are studied and the latest baseline SPARSEIRM is listed as reference for comparison. The best performance per-evaluation metric and per-dataset is highlighted in **bold**." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 145, + 433, + 465, + 506 + ], + "lines": [ + { + "bbox": [ + 145, + 433, + 465, + 506 + ], + "spans": [ + { + "bbox": [ + 145, + 433, + 465, + 506 + ], + "type": "table", + "html": "
Dataset MetricsCOLORED-MNISTCOLORED-OBJECTCIFAR-MNIST
Avg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
SPARSEIRM68.81±0.251.72±0.0564.97±0.393.97±0.2562.87±0.294.14±0.31
BLOC-IRM69.47±0.241.04±0.0765.97±0.334.10±0.3663.69±0.324.89±0.36
BLOC-IRM-v167.14±0.244.33±0.8363.38±0.296.31±0.5161.13±0.516.71±0.41
BLOC-IRM-REX62.71±0.218.74±1.2160.31±0.337.62±0.6659.39±0.557.89±0.45
BLOC-IRM-FISHR63.25±0.167.12±0.3961.17±0.346.98±0.4560.86±0.516.63±0.30
", + "image_path": "e5fb58e26d79ab8b12f38b8085b254e7f8bfc0c07d7675862655c88dcbc0cc84.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 525, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 635 + ], + "type": "text", + "content": "Performance comparison of different methods with additional covariate shifts. Besides the sensitivity check on model size, Table A4 examines the resilience of IRM to variations in the training environment. This study is motivated by Krueger et al. (2021), who empirically showed that the performance of invariant prediction degrades if additional covariate shifts are imposed on the training environments. Thus, we present the IRM performance on COLORED-MNIST by introducing class, digit, and color imbalances, following Krueger et al. (2021, Section 4.1). Compared with Table 4, IRM suffers a greater performance loss in Table A4, in the presence of training environment variations. However, the proposed BLOC-IRM maintains the accuracy improvement over baselines with each variation. In Table A2, we also study IRM with different numbers of training environments and observe the consistent improvement of BLOC-IRM over other baselines." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 647, + 504, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 504, + 737 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 504, + 737 + ], + "type": "text", + "content": "Exploration on the failure cases of previous IRM methods. Some papers either theoretically (Rosenfeld et al., 2020) or empirically (Kamath et al., 2021) pointed out that the original IRMv1 method could fail in certain circumstances, due to the fact that the regularization term used in IRMv1 heavily relies on the \"linear predictor\" assumption. Regarding this issue, we first bring to attention that the BLOC-IRM formulation does not require the predictors to be linear, since we adopt the regularization in the form of IRMv0 in the upper-level objective, not IRMv1. To justify our argument, we repeat the experiments in (Kamath et al., 2021), which points out a specific scenario using the COLORED-MNIST dataset where IRMv1 fails." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 121, + 504, + 209 + ], + "blocks": [ + { + "bbox": [ + 105, + 89, + 504, + 120 + ], + "lines": [ + { + "bbox": [ + 105, + 89, + 504, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 89, + 504, + 120 + ], + "type": "text", + "content": "Table A4: IRM performance on COLORED-MNIST and COLORED-FMNIST with training environment variations in terms of class, digit and color imbalances. The best IRM performance per-evaluation metric and per-variation source is highlighted in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 121, + 504, + 209 + ], + "lines": [ + { + "bbox": [ + 106, + 121, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 504, + 209 + ], + "type": "table", + "html": "
DatasetCOLORED-MNISTCOLORED-FMNIST
Variation Metrics (%)Class ImbalanceDigit ImbalanceColor ImbalanceClass ImbalanceDigit ImbalanceColor Imbalance
Avg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc GapAvg AccAcc Gap
GRAYSCALE71.23±0.182.76±0.1170.31±0.212.79±0.1572.29±0.162.88±0.1470.15±0.212.29±0.1269.92±0.152.72±0.2173.31±0.111.17±0.23
ERM43.72±1.0192.76±1.4545.89±2.8291.65±1.8646.19±2.8890.88±1.6941.72±1.9893.37±2.1542.39±2.3992.23±2.7245.89±0.2791.31±2.27
IRMv165.39±0.224.44±0.2964.89±0.264.19±0.4466.12±0.253.31±0.2962.49±0.334.93±0.4561.88±0.235.54±0.3964.39±0.443.79±0.33
IRMv065.01±0.284.29±0.3365.13±0.253.87±0.2866.72±0.253.01±0.4462.78±0.485.33±0.4761.62±0.295.29±0.4164.93±0.273.28±0.31
IRM-GAME62.21±0.426.45±0.3562.10±0.356.72±0.4461.82±0.657.78±0.5560.73±0.846.24±0.4360.79±0.456.47±0.8264.32±0.425.73±0.31
REX66.45±0.253.39±0.2866.23±0.433.21±0.2066.99±0.423.32±0.2764.89±0.365.78±0.5363.95±0.254.73±0.6265.87±0.424.30±0.42
BIRM65.73±0.254.11±0.3165.73±0.884.49±0.6766.72±0.243.47±0.2564.39±0.344.47±0.3963.24±0.394.54±0.4265.08±0.313.80±0.29
SPARSEIRM65.32±0.394.92±0.2264.44±0.364.85±0.3366.03±0.322.85±0.1964.32±0.514.15±0.3662.97±0.355.75±0.5264.72±0.463.99±0.39
FISHR66.13±0.283.99±0.3265.87±0.423.72±0.4165.48±0.214.49±0.3163.62±0.535.59±0.3562.47±0.265.72±0.3365.13±0.324.44±0.21
BLOC-IRM66.32±0.273.11±0.2266.41±0.293.32±0.2567.25±0.243.72±0.2765.99±0.313.97±0.4365.13±0.315.11±0.4566.79±0.263.72±0.36
", + "image_path": "31b96d56506bce747bf6c6fc3e2296e0ba166ec2663bedb0d870c90493635c4d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "type": "text", + "content": "More specifically, the models are trained in the training environments " + }, + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "type": "inline_equation", + "content": "(\\alpha, \\beta) = (0.1, 0.2)" + }, + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "type": "inline_equation", + "content": "(0.1, 0.25)" + }, + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "type": "text", + "content": ", and evaluated in the test environment " + }, + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "type": "inline_equation", + "content": "(0.1, 0.9)" + }, + { + "bbox": [ + 104, + 229, + 369, + 351 + ], + "type": "text", + "content": ". Note that denotes the label flipping rate and represents the environment bias parameter. The results are shown in the Table A5. As we can see, IRMv1 is clearly worse than ERM as it achieves much lower average accuracy and higher accuracy gap. However, BLOC-IRM outperforms ERM by obtaining high average accuracy and lower accuracy gap. This result shows that BLOC-IRM seems promising to address the empirical IRM challenge discovered in (Kamath et al., 2021). In the meantime, we also acknowledge that BLOC-IRM is not per" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 351, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 351, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 351, + 504, + 396 + ], + "type": "text", + "content": "fect since the advantage achieved by BLOC-IRM over ERM is not strong enough. However, we stress that the main contribution of BLOC-IRM does not lie in solving the failure cases of IRMv1, but to fix the issue of IRM-Game that resorts to a predictor ensemble to make the invariant prediction, which deviates from the spirit of acquiring invariant predictors in the original IRM paradigm." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 375, + 300, + 495, + 341 + ], + "blocks": [ + { + "bbox": [ + 374, + 240, + 504, + 300 + ], + "lines": [ + { + "bbox": [ + 374, + 240, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 374, + 240, + 504, + 300 + ], + "type": "text", + "content": "Table A5: Performance comparisons on COLORED-MNIST among ERM, IRMv1, and BLOC-IRM in the scenarios where IRM-variants failed following (Kamath et al., 2021)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 375, + 300, + 495, + 341 + ], + "lines": [ + { + "bbox": [ + 375, + 300, + 495, + 341 + ], + "spans": [ + { + "bbox": [ + 375, + 300, + 495, + 341 + ], + "type": "table", + "html": "
MethodAvg. Acc.Acc. Gap
ERM83.0913.79
IRMv176.8927.68
BLOC-IRM84.2211.01
", + "image_path": "555cc6211eacb461f589fcf37cfcbe02bf6782991a8daa9c39167629bfbeb33e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 406, + 504, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 504, + 452 + ], + "type": "text", + "content": "A similar curve to Figure 1 on COLORED-FMNIST. We show the results for COLORED-FMNIST similar to Figure 1 in Figure A5 and the conclusion does not change much. As mentioned before, the large-batch training setup was typically used for IRM training over the COLORED-FMNIST and COLORED-FMNIST datasets." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 234, + 460, + 373, + 578 + ], + "blocks": [ + { + "bbox": [ + 234, + 460, + 373, + 578 + ], + "lines": [ + { + "bbox": [ + 234, + 460, + 373, + 578 + ], + "spans": [ + { + "bbox": [ + 234, + 460, + 373, + 578 + ], + "type": "image", + "image_path": "14defae9d974e70d514e4e2d0e013149aa8ca0b72a658f25f0b6255938cf8e97.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 591, + 504, + 611 + ], + "lines": [ + { + "bbox": [ + 104, + 591, + 504, + 611 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 504, + 611 + ], + "type": "text", + "content": "Figure A5: The performance of three IRM methods (IRMv1, IRMv0, and REX) vs. batch size under COLORED-FMNIST." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_content_list.json b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2cd73235554ea738b3329f1da19cbb8e48eabff7 --- /dev/null +++ b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_content_list.json @@ -0,0 +1,2127 @@ +[ + { + "type": "text", + "text": "WHAT MAKES CONVOLUTIONAL MODELS GREAT ON LONG SEQUENCE MODELING?", + "text_level": 1, + "bbox": [ + 171, + 98, + 823, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuhong Li $^{1*}$ Tianle Cai $^{2*}$ Yi Zhang $^{3}$ Deming Chen $^{1}$ Debadeepta Dey $^{3}$", + "bbox": [ + 179, + 171, + 715, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1University of Illinois Urbana-Champaign, 2Princeton University, 3Microsoft Research.", + "bbox": [ + 179, + 186, + 758, + 203 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 238, + 547, + 253 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Convolutional models have been widely used in multiple domains. However, most existing models only use local convolution, making the model unable to handle long-range dependency efficiently. Attention overcomes this problem by aggregating global information based on the pair-wise attention score but also makes the computational complexity quadratic to the sequence length. Recently, Gu et al. (2021a) proposed a model called S4 inspired by the state space model. S4 can be efficiently implemented as a global convolutional model whose kernel size equals the input sequence length. With Fast Fourier Transform, S4 can model much longer sequences than Transformers and achieve significant gains over SoTA on several long-range tasks. Despite its empirical success, S4 is involved. It requires sophisticated parameterization and initialization schemes that combine the wisdom from several prior works. As a result, S4 is less intuitive and hard to use for researchers with limited prior knowledge. Here we aim to demystify S4 and extract basic principles that contribute to the success of S4 as a global convolutional model. We focus on the structure of the convolution kernel and identify two critical but intuitive principles enjoyed by S4 that are sufficient to make up an effective global convolutional model: 1) The parameterization of the convolutional kernel needs to be efficient in the sense that the number of parameters should scale sub-linearly with sequence length. 2) The kernel needs to satisfy a decaying structure that the weights for convolving with closer neighbors are larger than the more distant ones. Based on the two principles, we propose a simple yet effective convolutional model called Structured Global Convolution (SGConv). SGConv exhibits strong empirical performance over several tasks: 1) With faster speed, SGConv surpasses the previous SoTA on Long Range Arena and Speech Command datasets. 2) When plugging SGConv into standard language and vision models, it shows the potential to improve both efficiency and performance.", + "bbox": [ + 228, + 268, + 769, + 632 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 655, + 336, + 671 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Handling Long-Range Dependency (LRD) is a key challenge in long-sequence modeling tasks such as time-series forecasting, language modeling, and pixel-level image generation. Unfortunately, standard deep learning models fail to solve this problem for different reasons: Recurrent Neural Network (RNN) suffers from vanishing gradient, Transformer has complexity quadratic in the sequence length, and Convolutional Neural Network (CNN) usually only has a local receptive field in each layer.", + "bbox": [ + 169, + 686, + 826, + 772 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A recently proposed benchmark called Long-Range Arena (LRA) (Tay et al., 2020b) reveals that all existing models perform poorly in modeling LRD. Notably, on one spatial-level sequence modeling task called Pathfinder-X from LRA, all models fail except a new Structured State Space sequence model (S4) (Gu et al., 2021a). The S4 model is inspired by the state space model widely used in control theory and can be computed efficiently with a special parameterization based on the Cauchy kernel. The exact implementation of the S4 model can be viewed as a (depthwise) global convolutional model with an involved computation global convolution kernel. Thanks to the global receptive field of the convolution kernel, S4 is able to handle tasks that require LRD, such as Pathfinder (Linsley et al., 2018; Tay et al., 2020b), where classic local CNNs fail (Linsley et al., 2018; Kim et al.,", + "bbox": [ + 169, + 776, + 826, + 905 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution. Work done during the internship at Microsoft Research. Code is available.", + "bbox": [ + 189, + 909, + 756, + 925 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2019). Also, the use of Fast Fourier Transform (FFT) and techniques from numerical linear algebra make the computational complexity of S4 tractable compared to the quadratic complexity of attention. Together, S4 shows the potential of global convolutional models to model LRD and advances the SoTA on LRA.", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite its accomplishments, the delicate design of S4 makes it unfriendly even to knowledgeable researchers. In particular, the empirical success of S4 relies on 1) A Diagonal Plus Low Rank (DLPR) parameterization whose efficient implementation requires several numerical linear algebra tricks, 2) An initialization scheme based on the HiPPO matrix derived in prior work (Gu et al., 2020). Therefore, aiming to reduce the complications of the model and highlight minimal principles, we raise the following questions:", + "bbox": [ + 169, + 166, + 826, + 253 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "What contributes to the success of the S4 model? Can we establish a simpler model based on minimal principles to handle long-range dependency?", + "bbox": [ + 192, + 263, + 805, + 292 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To answer these questions, we focus on the design of the global convolution kernel. We extract two simple and intuitive principles that contribute to the success of the S4 kernel. The first principle is that the parameterization of the global convolution kernel should be efficient in terms of the sequence length: the number of parameters should scale slowly with the sequence length. For example, classic CNNs use a fixed kernel size. S4 also uses a fixed number of parameters to compute the convolution kernel while the number is greater than classic CNNs. Both models satisfy the first principle as the number of parameters does not scale with input length. The efficiency of parameterization is also necessary because the naive implementation of a global convolution kernel with the size of sentence length is intractable for inputs with thousands of tokens. Too many parameters will also cause overfitting, thus hurting the performance. The second principle is the decaying structure of the convolution kernel, meaning that the weights for convolving with closer neighbors are larger than the more distant ones. This structure appears ubiquitously in signal processing, with the well-known Gaussian filter as an example. The intuition is clear that closer neighbors provide a more helpful signal. S4 inherently enjoys this decaying property because of the exponential decay of the spectrum of matrix powers (See Figure 2), and we find this inductive bias improves the model performance (See Section 4.1.2).", + "bbox": [ + 169, + 306, + 826, + 529 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We show that these two principles are sufficient for designing a global convolutional model that captures LRD well. To verify this, we introduce a class of global convolution kernels with a simple multiscale structure, as shown in Figure 1. Specifically, we compose the convolution kernel by a sequence of sub-kernels of increasing sizes, yet every sub-kernel is upsampled from the same number of parameters. This parameterization ensures that the number of parameters only scales logarithmically to the input length, which satisfies the first principle. In addition, we add a decaying weight to each scale during the combination step and fulfill the second principle. We named our methods as Structural Global Convolution kernels (SGConv). Empirically, SGConv improves S4 by more than $1\\%$ and achieves SoTA results on the LRA benchmark. On Speech Command datasets, SGConv achieves comparative results in the ten-class classification task and significantly better results in the 35-class classification task upon previous SoTA. We further show", + "bbox": [ + 169, + 536, + 452, + 895 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "that SGConv is more efficient than S4 and can be used as a general purpose module in different domains. For example, a hybrid model of classic attention and SGConv shows promising performance", + "bbox": [ + 169, + 896, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b246c359bb838516f889939657ee626ef6d989ac90c4f8fd5d617971fba8bace.jpg", + "image_caption": [ + "Figure 1: Illustration of the parameterization used in SGConv (Eq. (1)). The convolution kernel is composed of multi-scale sub-kernels. Parameterization Efficiency. Every larger sub-kernel doubles the size of the previous sub-kernel while the same number of parameters are used for every scale, ensuring a logarithmic dependency of the number of parameters to the input length. Decaying. We use a weighted combination of sub-kernels where the weights are decaying, and smaller weights are assigned to larger scales." + ], + "image_footnote": [], + "bbox": [ + 496, + 555, + 794, + 724 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "on both autoregressive language modeling and sentence classification tasks, replacing the 2D convolution kernel of the ConvNext model with 1D SGConv matches the performance of the original model.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 169, + 341, + 184 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Efficient Transformers. The Transformer architecture (Vaswani et al., 2017) has been successful across a wide range of applications (Dosovitskiy et al., 2020; Liu et al., 2021; Dong et al., 2018; Ye et al., 2022) in machine learning. However, the computation and memory complexity of Transformer scales quadratically with the input length, making it intractable for modeling long-range interactions in very long sequences. Therefore, several efficient variants of Transformer model have been proposed recently to overcome this issue (Child et al., 2019; Wang et al., 2020; Kitaev et al., 2019; Zaheer et al., 2020; Tay et al., 2020a; Peng et al., 2021; Qin et al., 2021). Nevertheless, few of these methods performed well on benchmarks such as Long Range Arena (Tay et al., 2020b), SCROLLS (Shaham et al., 2022), which require long-range modeling ability.", + "bbox": [ + 169, + 200, + 826, + 328 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(Re-)parameterization. Parameterization is a crucial but underrated part of architecture design because different parameterizations usually provide different inductive biases. For example, weight normalization (Salimans & Kingma, 2016) parameterizes the norm and direction of the weight matrices separately and thus reaches faster convergence. On the other hand, Zagoruyko & Komodakis (2017) proposed a Dirac weight re-parameterization to train deep networks without explicit skip-connections and matched the performance of ResNets (He et al., 2016). In computer vision, several works explored using structural re-parameterization to create 2D convolution kernels. Most of these works (Ding et al., 2019; Guo et al., 2020; Ding et al., 2021; Cao et al., 2022) are limited to the vision domain and utilize only short-range convolution kernels (e.g., $7 \\times 7$ ) except for the line of work based on 2D Fourier operators (Rao et al., 2021; Guibas et al., 2021) and the line of work based on continuous convolutional kernel (Romero et al., 2021b;a; 2022). Our SGConv kernel is a special parameterization of global convolution kernels that tackles LRD and showcases the extensibility of re-parameterized kernels.", + "bbox": [ + 169, + 344, + 826, + 525 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "State Space Models. The state space model (SSM) uses a set of linear differential equations to model physical systems with input, output, and state variables. It is widely used in control, neuroscience, and statistics. Recently, Gu et al. (2021b) introduced a deep SSM-based model that can outperform prior approaches on several long sequence modeling tasks with a specially structured state transition matrix. However, the expensive computation and memory requirements make it impractical. A followup work of Gu et al. (2021b) proposed a new parameterization of SSM (Gu et al., 2021a), which decomposes the state transition matrix into the sum of low-rank and normal matrices and implements SSM as a global convolutional model. Under this parameterization, the authors then combine the techniques of diagonalizing the Cauchy kernel and performing low-rank corrections with the Woodbury identity to compute the global convolution kernel. While achieving promising results, S4 is theoretically involved and practical implementations of S4 require accelerator-specific dedicated code optimization for the Cauchy kernel computation. This makes it difficult to readily implement in deep learning frameworks (Abadi et al., 2016; Chen et al., 2015; Chen, 2021; Ma et al., 2019) and hardware targets. Concurrent with this work, many state-space-based models are emerging and bringing better performance (Gu et al., 2022a; Smith et al., 2022; Hasani et al., 2022).", + "bbox": [ + 169, + 542, + 826, + 752 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 DESIGN OF GLOBAL CONVOLUTIONAL MODELS", + "text_level": 1, + "bbox": [ + 171, + 773, + 607, + 789 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We summarize the design principles that enable the global convolutional model to be both efficient and effective. Then we introduce the proposed Structured Global Convolution (SGConv) based on the highlighted principles.", + "bbox": [ + 169, + 806, + 823, + 849 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 DESIGN PRINCIPLES", + "text_level": 1, + "bbox": [ + 171, + 869, + 357, + 882 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The two intuitive design principles that contribute to the success of S4 are efficient parameterization and decaying structure.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4691e8575ba409b19c933a56ae5b9ff34a9b74295b23b8ccb413b87c23f99425.jpg", + "image_caption": [ + "(a) Pathfinder-X" + ], + "image_footnote": [], + "bbox": [ + 238, + 104, + 496, + 247 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/61d2227122cdc75f65e5059b4262264ada45e61689a290a23bdcdd4de34daf8a.jpg", + "image_caption": [ + "(b) SC-10", + "Figure 2: Visualization of S4 kernels on (a) Pathfinder-X and (b) Speech Command 10-class. The values in the convolution kernel exhibit a decaying behavior. We only plot the first 4096 positions for better illustration." + ], + "image_footnote": [], + "bbox": [ + 501, + 102, + 759, + 247 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Efficient Parameterization. Different from local convolution, where the kernel size is fixed, global convolution requires a kernel size that is the same as the sentence length. Naive parameterization of convolution kernel as classic local convolutions is therefore intractable for long sequences. For instance, the Pathfinder-X task has a length of $16K$ . It then impractically requires $4M$ parameters for a single layer to model the depth-wise global convolution kernel with a standard channel size of 256. Thus, an efficient convolution kernel parameterization is necessary, especially when the sentence is extremely long. For example, S4 takes a well-designed Normal Plus Low-Rank (NPLR) parameterization to model the whole kernel with two special matrices where the number of parameters is fixed.", + "bbox": [ + 169, + 348, + 826, + 472 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Decaying Structure. Apart from the efficiency of the parameterization, we find that a decaying structure of the convolution kernel provides a good inductive bias to long-sequence modeling and contributes to the performance (See Section 4.1.2 for detailed ablation study). Concretely, the magnitude of the value in the convolution kernel should decay so that more weight is assigned to the close neighbors. S4 model inherently satisfies this property because the $k$ -th element of the kernel of S4 is $\\mathbf{C}\\mathbf{A}^k\\mathbf{B}$ and the operator norm of the power of a matrix decays exponentially:", + "bbox": [ + 169, + 488, + 823, + 571 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fact 1. For a square matrix $\\mathbf{A}$ , the operator norm $\\left\\| \\mathbf{A}^k \\right\\|_2 \\leq \\left\\| \\mathbf{A} \\right\\|_2^k$ . In particular, if $\\left\\| \\mathbf{A} \\right\\|_2 < 1$ , $\\left\\| \\mathbf{A}^k \\right\\|_2$ decays exponentially to $k$ , so $\\left\\| \\mathbf{C} \\mathbf{A}^k \\mathbf{B} \\right\\|_2 \\leq \\left\\| \\mathbf{C} \\right\\|_2 \\left\\| \\mathbf{A}^k \\right\\|_2 \\left\\| \\mathbf{B} \\right\\|_2$ also decays exponentially.", + "bbox": [ + 171, + 575, + 823, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We can also directly observe the decaying structure of S4 in different tasks in Figure 2.", + "bbox": [ + 171, + 619, + 743, + 635 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 SGCONV", + "text_level": 1, + "bbox": [ + 171, + 650, + 279, + 664 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Putting the two principles altogether, we propose a simple global depth-wise convolution, dubbed Structured Global Convolution (SGConv), based on multiscale sub-kernels and weighted combinations. (See Figure 1). We will first introduce the parameterization of the convolutional kernel and then present how to build a global convolutional model with this kernel.", + "bbox": [ + 169, + 676, + 823, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Parameterization of SGConv Kernel. Formally, let $L$ be the length of the input sequence, the convolutional kernel should also has length $L$ . We define the parameter set of a single channel as $S = \\{\\mathbf{w}_i|0\\leq i < \\left[\\log_2\\left(\\frac{L}{d}\\right)\\right] + 1\\}$ where $\\mathbf{w}_i\\in \\mathbb{R}^d$ is the parameter for $i$ -th sub-kernel $k_{i}$ , and $d$ is the dimension of the parameter. Denote the number of scales $N = \\left[\\log_2\\left(\\frac{L}{d}\\right)\\right] + 1$ . We use the upsample operation, implemented as linear interpolation, to form sub-kernels of different scales. We use Upsample $_l(\\mathbf{x})$ to denote upsampling $\\mathbf{x}$ to length $l$ (We use F. interpolate function in Pytorch and set the mode to be linear in our implementation). We also introduce a normalization constant $Z$ to ensure the convolution operation will not change the scale of the input and a coefficient $\\alpha$ to control the decaying speed. Now, we are ready to introduce the weighted combination scheme by concatenating a set of weighted sub-kernels $k_{i}$ :", + "bbox": [ + 169, + 747, + 826, + 888 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {C a t} (S) = \\frac {1}{Z} [ k _ {0}, k _ {1}, \\dots , k _ {N - 1} ], \\text {w h e r e} k _ {i} = \\alpha^ {i} \\operatorname {U p s a m p l e} _ {2 \\max [ i - 1, 0 ] d} (\\mathbf {w} _ {i}). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 243, + 893, + 823, + 922 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/68ae175a257df9ec08386f807a771caf26de78fa55eea39a0a0e97ab69ccd396.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelListOpsTextRetrievalImagePathfinderPath-XAvg.
Transformer36.3764.2757.4642.4471.40X54.39
Sparse Trans.17.0763.5859.5944.2471.71X51.24
Linformer35.7053.9452.2738.5676.34X51.36
Reformer37.2756.1053.4038.0768.50X50.67
BigBird36.0564.0259.2940.8374.87X55.01
S4 (original)58.3576.0287.0987.2686.0588.1080.48
S4 (Gu et al., 2022b)59.6086.8290.9088.6594.2096.3586.09
SGConv61.4589.2091.1187.9795.4697.8387.17
", + "bbox": [ + 220, + 99, + 777, + 231 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: The performance of SGConv compared to other baselines on the LRA dataset. SGConv achieves significant improvement compared to previous methods with a more straightforward structure and faster speed (See Table 2)", + "bbox": [ + 169, + 239, + 823, + 285 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It is easy to check that $\\operatorname{Cat}(S)$ gives the convolution kernel with length $\\sum_{i=0}^{N} 2^{\\max[i-1,0]} d = 2^{N-1} d \\geq L$ (See Figure 1 for an illustration), which can be truncated to $L$ if it is overlength. And the number of parameters is $Nd = O(\\log L)$ . The decay coefficient $\\alpha$ , usually chosen to be $1/2$ , induces the decaying structure.", + "bbox": [ + 169, + 306, + 823, + 367 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Incorporate SGConv to Modern Architectures. In the implementation, we compute the depthwise convolution kernel and use Fast Fourier Transform to compute the convolution in $O(L \\log L)$ time (See Figure 8 for detailed illustration). We compute the normalization constant $Z$ such that the norm of the kernel is one at initialization and fix it during training. Please refer to Appendix B.2 for a Python-style pseudo-code. We can plug SGConv into modern architectures as a replacement of attention in Transformer or local convolution in ConvNets (See Figure 6, 7 for two examples). Due to the relaxation of the structure of the convolutional kernel, SGConv does not have the RNN-style reformulation as S4. Yet, SGConv is naturally capable of performing autoregressive generation, such as language modeling, similarly to classic causal convolutional models (Van den Oord et al., 2016; Oord et al., 2016) and Transformers. Concretely, the convolution kernel is unidirectional, where the computation at the embedding of $i$ -th is only computed based on tokens before $i$ , and left zero padding is used for ignoring the overlength kernel. During generation, hidden states of past tokens are cached for fast calculation of the next token with a single convolution step. Due to the simplicity of the parameterization, SGConv kernel is easy to compute and more efficient than the S4 kernel, as shown in Section 4.1.3.", + "bbox": [ + 169, + 380, + 826, + 590 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 609, + 328, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we first test the effectiveness of SGConv on two standard long sequence modeling tasks, i.e., Long Range Arena (Tay et al., 2020b) and Speech Commands (Warden, 2018), and compare it with S4 and other baselines. We also conduct ablation studies over the decay speed and scale dimension $d$ and evaluate the speed of SGConv on LRA. Further, we explore the possibility of plugging the global convolutional layer into standard models as a general-purpose component for capturing long-range dependency. For language tasks, we find that replacing half of layers of Transformer with a certain strategy with SGConv block will not hurt performance, while the complexity of those layers improves from $O(L^2)$ to $O(L\\log L)$ . On ImageNet, we replace the $7\\times 7$ convolution in ConvNext (Liu et al., 2022) with SGConv and show comparative or better performance.", + "bbox": [ + 169, + 638, + 823, + 765 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 LONG RANGE ARENA", + "text_level": 1, + "bbox": [ + 171, + 781, + 367, + 795 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Long Range Arena benchmark (Tay et al., 2020b) is a suite of six tasks consisting of sequences ranging from 1K to 16K tokens, encompassing a wide range of data types and modalities such as text, natural, synthetic images, and mathematical expressions requiring similarity, structural, and visual-spatial reasoning.", + "bbox": [ + 169, + 806, + 823, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.1 RESULTS", + "text_level": 1, + "bbox": [ + 171, + 871, + 292, + 885 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We show the experimental results in Table 1 with several baseline methods (Vaswani et al., 2017; Child et al., 2019; Wang et al., 2020; Kitaev et al., 2019; Zaheer et al., 2020; Gu et al., 2021a; 2022b).", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/baf9783f19377440e85fc0356d573a6d3c76c8e50f89db4ef09466a62d2662fe.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Sequence length256512102420484096819216384
Inf.S429.481.7158.3306.95941156.92274.0
CPUSGConv23.856.2108.7211.3409.3789.51559.3
Inf.S4 (w/o opt)2.72.74.47.915.232.764.5
GPUS4 (w. opt.)1.61.93.15.410.022.344.3
SGConv1.21.32.34.48.519.839.4
BPS4 (w/o opt)4.15.710.219.438.180.1161.2
GPUS4 (w. opt.)3.546.611.922.648.997.8
SGConv2.02.75.09.618.641.282.5
", + "bbox": [ + 222, + 99, + 777, + 256 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: Comparison of the inference and backpropagation time (ms/batch) of S4 and SGConv blocks (number of channels 128, batch size 64) on CPU and GPU. Note that the parameterization in S4 requires a customized CUDA kernel to improve the efficiency (refer to opt. in the Table). Nevertheless, SGConv still always surpasses S4 even compared to the optimized CUDA kernel.", + "bbox": [ + 169, + 263, + 823, + 321 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SGConv achieves a $1\\%$ improvement in average accuracy upon well-tuned S4 variants introduced in Gu et al. (2022b). Notably, SGConv is guided by the two intuitive principles and has a much simpler structure than S4 (Gu et al., 2022b). The detailed implementation settings can be found in Appendix A.1.", + "bbox": [ + 169, + 348, + 823, + 405 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.2 ABLATION STUDY ON IMDB", + "text_level": 1, + "bbox": [ + 171, + 421, + 429, + 434 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct ablation studies on the IMDB byte-level document classification task in the LRA benchmark. We mainly focus on two aspects: 1) The speed of decaying and 2) The parameter dimension $d$ of each scale. For simplicity, in the standard SGConv formulation (Eq. (1)), we fix the decay coefficient $\\alpha = 1/2$ and only tune the dimension $d$ . However, the actual decay speed as a function of the position in the kernel depends both on $\\alpha$ and $d$ , making it hard to conduct ablation studies. Thus, we use a slightly different convolution kernel that disentangles the decay speed and the dimension of each scale:", + "bbox": [ + 169, + 445, + 823, + 542 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {C a t} ^ {*} (S) = \\frac {1}{Z} [ k _ {0}, k _ {1}, \\dots , k _ {N - 1} ] \\odot \\left[ \\frac {1}{1 ^ {t}}, \\frac {1}{2 ^ {t}}, \\dots , \\frac {1}{L ^ {t}} \\right], \\text {w h e r e} k _ {i} = \\operatorname {U p s a m p l e} _ {2 \\max [ i - 1, 0 ] d} (\\mathbf {w} _ {i}). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 549, + 823, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$t$ here then controls the decay speed, which is independent of each scale's dimension. We conduct two sets of experiments: 1) Fix $d = 8$ , vary $t$ from 0 (which means no decay) to 2, and 2) Fix $t = 1$ , vary $d$ from 1 to 64. Figure 3 reports the accuracies in different settings. We can observe that 1) The decay structure is crucial for getting good performance, and 2) In a reasonable range, $d$ has less impact on the performance than $t$ . Nevertheless, we observe a trend of performance drop when increasing $d$ from 8 to 64. Experiments on larger $d$ show worse performance, which can be attributed to overfitting.", + "bbox": [ + 169, + 604, + 506, + 787 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.3 SPEED COMPARISON", + "text_level": 1, + "bbox": [ + 171, + 801, + 375, + 816 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Table 2, we compare the computation speed of the S4 kernel and SGConv kernel in different settings. Due to its simplicity, SGConv is faster than S4 for any sentence length. SGConv is about $50\\%$ faster than the vanilla implementation of the S4 kernel and is $15\\%$ faster than the optimized CUDA mized CUDA kernels.", + "bbox": [ + 169, + 825, + 506, + 924 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/105ae48cced803b79ac8996d98110c83cd9f19bdf90f5ca5034e35cd2913a84f.jpg", + "image_caption": [ + "Figure 3: Ablation study on the effect of decay speed and hidden dimension of each scale on IMDB dataset. $pos \\in [1, L]$ refers to the position in the convolution kernel. We observe: 1) The decay structure is crucial for getting good performance; 2) In a reasonable range, $d$ (Dimension) has less impact on the performance than $t$ ( $t \\in [0, 2.0]$ )." + ], + "image_footnote": [], + "bbox": [ + 517, + 623, + 821, + 773 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 SPEECH COMMANDS", + "text_level": 1, + "bbox": [ + 171, + 103, + 359, + 118 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The Speech Command (SC) dataset (Warden, 2018) is a 35-class dataset of 1 second (16000 Hz sampling rate) spoken words in English. However, followup works (Kidger et al., 2020; Gu et al., 2021b; Romero et al., 2021b;a) adopted a smaller 10-class subset of SC. And works (Romero et al., 2021a; Gu et al., 2021b) on the SC dataset specifically use pre-processing such as MFCC features. Our baselines are obtained from (Gu et al., 2021a; 2022a). Note that besides SSM-based models, there is no strong baseline for raw waveform classification using either the 10-class or the full dataset. And SSM-based methods also show the ability to perform 0-shot testing at lower sampling rate such as $8000\\mathrm{Hz}$ . Table 3 shows that the SGConv yields better results compared to the SSM-based method among 4 out of 5 tasks. Notably, for the original SC (35-class), SGConv achieves marginally higher accuracy for raw-sequence classification and significantly better results $(+2.40\\%)$ compared to the existing SoTA method.", + "bbox": [ + 169, + 128, + 826, + 282 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/fad401d1b67e783771cabdf52654d29bd38ad06a1bdc71adbb7da3dc7fbc18e0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
10-clsTransformerPerformerNRDECKConvWaveGAN-DS4S4*SGConv
MFCC90.7580.8589.895.3X93.9692.0594.91
16000HZX30.7716.4911.671.6698.3297.9897.52
8000HZ (0-shot)X30.6815.1265.96X96.3091.8396.03
35-clsInceptionNetResNet-18XResNet-50ConvNetS4DS4S4*SGConv
16000HZ61.2477.8683.0195.5196.2596.0896.2796.42
8000HZ (0-shot)5.188.747.727.2691.5891.3291.8994.29
", + "bbox": [ + 173, + 295, + 841, + 434 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 FURTHER APPLICATIONS OF SGCONV", + "text_level": 1, + "bbox": [ + 171, + 529, + 478, + 542 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further study SGConv as a generic network architecture drop-in component targeting tasks in language modeling and computer vision. In Section 4.3.1 we present an efficient mixture of attention and SGConv layers architecture that replaces half of the attention blocks in the Transformer with the SGConv blocks. We demonstrate the potential of utilizing such a model for long text processing. In Section 4.3.2, we incorporate SGConv (1D) into ConvNeXt (Liu et al., 2022). Surprisingly, SGConv achieves comparable or even better results compared to several SoTA CNN and Vision Transformer models by treating the 2D features as a 1D sequence.", + "bbox": [ + 169, + 555, + 823, + 654 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.1 LANGUAGE TASKS", + "text_level": 1, + "bbox": [ + 171, + 667, + 359, + 681 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Language modeling. We propose the SGConv block (shown in Figure 6) which is similar to the Attention block in Transformer (Vaswani et al., 2017). SGConv block enjoys both $O(L\\log (L))$ time complexity and space complexity. We benchmark the inference time and GPU memory usage of both SGConv and Attention in Table 7. When the sequence length is 1024, SGConv block is $\\sim 2.1\\mathrm{X}$ faster than the Attention block. For language modeling, we utilize the feature of SGConv to directly process the long sequences. The", + "bbox": [ + 169, + 691, + 452, + 875 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/2359e899124c0c25528fcbf2cd977f5728ca9dbf3179d9786cae2fe9926a9ffb.jpg", + "table_caption": [ + "Table 3: Speech Command classification results compared to existing methods. * We carefully reproduce the S4 method based on the released code1. Since the latest version removed 10-class experiments settings, we utilized a earlier version2. The results suggest that for the SC 35-classification, SGConv achieves SoTA on both full length task and 2X sampling rate, zero-shot task." + ], + "table_footnote": [], + "table_body": "
ModelValid.Test
LSTM+Hebb.29.029.2
16L Transformer-XL-24.0
16L SGConv+SAAttn21.9022.83
Adaptive Input-18.7
S4-20.95
18L Transformer-XL-18.3
18L Transformer-XL*18.1618.75
18L SGConv+SAAttn18.1018.70
", + "bbox": [ + 501, + 708, + 787, + 839 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4: Performance comparison on WikiText-103.", + "bbox": [ + 470, + 848, + 815, + 864 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "$^{1}$ https://github.com/HazyResearch/state-spaces \n $^{2}$ https://github.com/HazyResearch/state-spaces/tree/307f11bba801d5734235a1791df1859f6ae0e367", + "bbox": [ + 171, + 883, + 643, + 922 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c5a6171f8c15be41cfb4e5e9980e7cefe774992c98ee31a6f2f8cbbe1c008995.jpg", + "image_caption": [ + "(a) Illustration of SGConv and Transformer-XL style Short Attention used in language modeling task. SGConv directly processes the full length sequence." + ], + "image_footnote": [], + "bbox": [ + 178, + 104, + 488, + 267 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2085f3250995842f2739313154583d46dd3f4b5ce326fae9be371b6e857b5e84.jpg", + "image_caption": [ + "(b) The depth to replace SAttention with SGConv vs. validation PPL on WikiText-103" + ], + "image_footnote": [], + "bbox": [ + 511, + 128, + 821, + 276 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7100fd046473842eb007be62eb9fc44820c986ddfe1130c2416737fc8a5f9638.jpg", + "table_caption": [ + "Figure 4: Incorporating SGConv to Transformer models in language tasks." + ], + "table_footnote": [], + "table_body": "
MNLI-m/mmQNLIQQPSSTCoLASTSAvg.
BERT84.93/84.9191.3491.0492.8855.1988.2984.08
SGConvBERT84.78/84.7091.2591.1892.5557.9288.4284.40
", + "bbox": [ + 251, + 362, + 746, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5: Performance comparison of BERT and SGConvBERT on GLUE dataset. SGConvBERT is comparable with BERT while being more efficient. We exclude MRPC and RTE datasets in GLUE because their sizes are too small ( $< 5K$ training samples).", + "bbox": [ + 169, + 435, + 826, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Attention block only targets the short range data termed SAttention. We illustrate the structure in Figure 4a. Furthermore, we investigate the strategy to replace the Attention blocks with SGConv blocks. We generate 50 architectures with 8 SGConv blocks and 8 Attention blocks where the order is shuffled. We denote the average depth to replace the Attention blocks as: $\\sum_{i=0}^{N_{SGConv}} \\mathrm{idx}_i / N_{total}$ where the idx denotes the $i$ th SGConv depth position. $N_{SGConv} = 8$ and $N_{total} = 16$ in this case. The results in Figure 4b suggest that when fixing the number of SGConv layer, models achieve better performance by placing SGConv blocks in deeper layers. Guided by the strategy, we handcraft two Transformer-XL (Dai et al., 2019) style models. (1) 16-layer: $\\{\\mathrm{A}, \\mathrm{A}, \\mathrm{A}, \\mathrm{C}\\} \\times 2 + \\{\\mathrm{A}, \\mathrm{C}, \\mathrm{C}, \\mathrm{C}\\} \\times 2$ . (2) 18-layer: $\\{\\mathrm{A}, \\mathrm{A}, \\mathrm{C}\\} \\times 3 + \\{\\mathrm{A}, \\mathrm{C}, \\mathrm{C}\\} \\times 3$ . A denotes SAttention and C denotes SGConv. $\\times N$ denotes repeating the order of layers for $N$ times. We test the model on WikiText-103 (Merit et al., 2016) which is a wide-used language modeling benchmark with an average length of 3.6K tokens per article. We set both the attention and memory length to 384 for 18L model and 192 for 16L model. The length of input sequence is 3092 which can be processed by SGConv directly. We show the results in Table 4. Our results suggest that when the attention range is short, the 16L model outperforms the baseline with -1.17 perplexity. For the 18L model, our model achieves 18.70 perplexity. Note that we use a smaller and affordable batch size (16) for training. Under the same setting, our model gains slightly better perplexity than Transformer-XL (-0.05). Our results show the potential of adopting SGConv as part of the language model for long range language sequence processing.", + "bbox": [ + 169, + 516, + 826, + 785 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Sentence classification. We combine the SGConv block with the BERT model (Devlin et al., 2018). Concretely, we utilize the 12-layer $\\{\\mathrm{A},\\mathrm{A},\\mathrm{C}\\} \\times 2 + \\{\\mathrm{A},\\mathrm{C},\\mathrm{C}\\} \\times 2$ model. The pretraining is conducted on BooksCorpus (Zhu et al., 2015) and English Wikipedia (Foundation). We then fine-tune the model on the GLUE benchmark (Wang et al., 2019). To avoid the instability of fine-tuning on small datasets, we only test on tasks with more than $5K$ training samples. We follow the training and fine-tuning pipeline of Ke et al. (2020) (BERT-A in Table 1 of Ke et al. (2020)) and report the average accuracy of 5 different random seeds. SGConvBERT achieves comparable performance to the original BERT model, while the SGConv layer is more efficient than the attention layer.", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fa1c055f3a58b583445e4dfbcba0aec183d4829dbcb6645314bec1232ecf0b59.jpg", + "image_caption": [ + "Figure 5: Comparison of ImageNet-1k Top-1 accuracy with SoTA works. Left: Top-1 Accuracy vs. FLOPs. Right: Top-1 Accuracy vs. Throughputs." + ], + "image_footnote": [], + "bbox": [ + 176, + 103, + 488, + 265 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f5669ff133c83e9466da7c5214f223f50acfec2d677017a0c06503b1b16ab4bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 103, + 821, + 266 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3.2 IMAGE CLASSIFICATION", + "text_level": 1, + "bbox": [ + 171, + 339, + 398, + 353 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We also evaluate the adaptability of SGConv by applying it on large-scale image classification. We conduct experiments on ImageNet-1k (Deng et al., 2009) which consists of more than 1.28 million high-resolution training and 50,000 validation images. We replace the $7 \\times 7$ 2D convolutional kernels with SGConvs in ConvNeXt (Liu et al., 2022) denoted as SGConvNeXt. The block designs of SGConvNeXt are shown in Figure 7. Note we train the SGConvNeXt-Tiny/Small/Base/Large using hyperparameter settings from ConvNeXt4 without any changes. By treating the 2D features as sequences, our SGConvNeXt achieves better results compared to existing SoTA methods such as EfficientNets (Tan & Le, 2019), Swin Transformers (Liu et al., 2021) (shown in Figure 5). Note that Vision Transformer (Dosovitskiy et al., 2020) and its variants (Touvron et al., 2021a;b; Yu et al., 2022) adopt patching techniques that can lead to a quadratic increase in complexity with image size. Also, patching is incompatible with dynamic input resolutions while SGConvNeXt processes the data globally. We list several interesting directions that can be explored for future work: 1) Optimization for the long-range convolution: we noticed that though FFT theoretically requires less FLOPs than plain convolution, the throughput drops empirically. One reason is that there is no optimized CUDA implementation for 1D long-range convolution and can be a good direction for future work. 2) Optimized hyperparameters and data augmentation methods: ConvNeXts' hyperparameters are tuned for maximum performance, which may not be ideal for SGConvNeXt. 3) SGConv for vision reasoning tasks: we show that SGConv is powerful for long-range synthetic reasoning tasks and large-scale classification tasks. It could be effective in visual reasoning applications such as Vision-Language Reasoning (Johnson et al., 2017; Zhu et al., 2020) with great potential.", + "bbox": [ + 169, + 364, + 826, + 643 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 667, + 310, + 681 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we attempt to answer the question of what makes convolutional models great again on long sequence modeling and summarize two principles contributing to the success. Based on the principles, we propose a simple and intuitive global convolutional model SGConv that has both direct implications and solid performance. Concurrent to our work there are also attempts to simplify the S4 model by restricting the state transition matrix to be diagonal (Gu et al., 2022a; Gupta, 2022). The proposal by Gu et al. (2022a) incorporates an intricate approach to parameterization and initialization schemes compared to our paper. Their method provides insights into the S4 phenomenon from a state-space-model perspective. Instead, we hope our simpler principles and non-SSM-based model can open up a direction for general audiences to understand and try global convolution as a general-purpose module for tackling long-range dependency. This potential has been shown in a very recent paper (Ma et al., 2022) concurrent to our work, where the authors incorporate an exponential moving average layer to a Transformer-like model and achieve promising performance over several long sequence modeling tasks. The exponential moving average layer is a particular type of global convolution layer that naturally satisfies our two principles. We believe that similar global convolutional modules will emerge in the future as long-range dependency becomes increasingly critical for sequence modeling.", + "bbox": [ + 169, + 702, + 826, + 924 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENTS", + "text_level": 1, + "bbox": [ + 171, + 103, + 369, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We extend our gratitude to the anonymous reviewers for dedicating their time and expertise to provide constructive feedback and suggestions, which significantly enhanced the quality of this paper. We also express our appreciation to the Program Chairs and Area Chairs for their careful review and valuable comments. Special thanks go to Sebastien Bubeck, Arturs Backurs, Gustavo de Rosa, Di He, and Cong 'Callie' Hao for their valuable suggestions and support.", + "bbox": [ + 171, + 133, + 826, + 204 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 223, + 287, + 239 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, et al. Tensorflow: Large-scale machine learning on heterogeneous distributed systems. arXiv preprint arXiv:1603.04467, 2016.", + "Jinming Cao, Yangyan Li, Mingchao Sun, Ying Chen, Dani Lischinski, Daniel Cohen-Or, Baoquan Chen, and Changhe Tu. Do-conv: Depthwise over-parameterized convolutional layer. IEEE Transactions on Image Processing, 2022.", + "Lei Chen. Deep Learning and Practice with MindSpore. Springer Nature, 2021.", + "Tianqi Chen, Mu Li, Yutian Li, Min Lin, Naiyan Wang, Minjie Wang, Tianjun Xiao, Bing Xu, Chiyuan Zhang, and Zheng Zhang. Mxnet: A flexible and efficient machine learning library for heterogeneous distributed systems. arXiv preprint arXiv:1512.01274, 2015.", + "Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509, 2019.", + "Zihang Dai, Zhilin Yang, Yiming Yang, William W Cohen, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019.", + "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009.", + "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.", + "Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 1911-1920, 2019.", + "Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13733-13742, 2021.", + "Linhao Dong, Shuang Xu, and Bo Xu. Speech-transformer: a no-recurrence sequence-to-sequence model for speech recognition. In 2018 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5884-5888. IEEE, 2018.", + "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.", + "Wikipedia Foundation. Wikipedia downloads. URL https://dumps.wikipedia.org.", + "Albert Gu, Tri Dao, Stefano Ermon, Atri Rudra, and Christopher Ré. Hippo: Recurrent memory with optimal polynomial projections. Advances in Neural Information Processing Systems, 33: 1474-1487, 2020.", + "Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces. arXiv preprint arXiv:2111.00396, 2021a." + ], + "bbox": [ + 171, + 246, + 825, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Albert Gu, Isys Johnson, Karan Goel, Khaled Saab, Tri Dao, Atri Rudra, and Christopher Ré. Combining recurrent, convolutional, and continuous-time models with linear state space layers. Advances in neural information processing systems, 34:572-585, 2021b.", + "Albert Gu, Ankit Gupta, Karan Goel, and Christopher Ré. On the parameterization and initialization of diagonal state space models. arXiv preprint arXiv:2206.11893, 2022a.", + "Albert Gu, Isys Johnson, Aman Timalsina, Atri Rudra, and Christopher Ré. How to train your hippo: State space models with generalized orthogonal basis projections. arXiv preprint arXiv:2206.12037, 2022b.", + "John Guibas, Morteza Mardani, Zongyi Li, Andrew Tao, Anima Anandkumar, and Bryan Catanzaro. Efficient token mixing for transformers via adaptive fourier neural operators. In International Conference on Learning Representations, 2021.", + "Shuxuan Guo, Jose M Alvarez, and Mathieu Salzmann. Expandnets: Linear over-parameterization to train compact convolutional networks. Advances in Neural Information Processing Systems, 33:1298-1310, 2020.", + "Ankit Gupta. Diagonal state spaces are as effective as structured state spaces. arXiv preprint arXiv:2203.14343, 2022.", + "Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. Liquid structural state-space models. arXiv preprint arXiv:2209.12951, 2022.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.", + "Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2901-2910, 2017.", + "Kirthevasan Kandasamy, Willie Neiswanger, Jeff Schneider, Barnabas Poczos, and Eric P Xing. Neural architecture search with bayesian optimisation and optimal transport. Advances in neural information processing systems, 31, 2018.", + "Guolin Ke, Di He, and Tie-Yan Liu. Rethinking positional encoding in language pre-training. In International Conference on Learning Representations, 2020.", + "Patrick Kidger, James Morrill, James Foster, and Terry Lyons. Neural controlled differential equations for irregular time series. Advances in Neural Information Processing Systems, 33:6696-6707, 2020.", + "Junkyung Kim, Drew Linsley, Kalpit Thakkar, and Thomas Serre. Disentangling neural mechanisms for perceptual grouping. In International Conference on Learning Representations, 2019.", + "Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. In International Conference on Learning Representations, 2019.", + "Guihong Li, Yuedong Yang, Kartikeya Bhardwaj, and Radu Marculescu. Zico: Zero-shot nas via inverse coefficient of variation on gradients. arXiv preprint arXiv:2301.11300, 2023.", + "Yuhong Li, Cong Hao, Pan Li, Jinjun Xiong, and Deming Chen. Generic neural architecture search via regression. Advances in Neural Information Processing Systems, 34:20476-20490, 2021.", + "Ming Lin, Pichao Wang, Zhenhong Sun, Hesen Chen, Xiuyu Sun, Qi Qian, Hao Li, and Rong Jin. Zen-nas: A zero-shot nas for high-performance image recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 347-356, 2021.", + "Drew Linsley, Junkyung Kim, Vijay Veerabadran, Charles Windolf, and Thomas Serre. Learning long-range spatial dependencies with horizontal gated recurrent units. Advances in neural information processing systems, 31, 2018." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021.", + "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022.", + "Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer. Mega: Moving average equipped gated attention. arXiv preprint arXiv:2209.10655, 2022.", + "Yanjun Ma, Dianhai Yu, Tian Wu, and Haifeng Wang. Paddlepaddle: An open-source deep learning platform from industrial practice. Frontiers of Data and Computing, 1(1):105-115, 2019.", + "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016.", + "Aaron van den Oord, Sander Dieleman, Heiga Zen, Karen Simonyan, Oriol Vinyals, Alex Graves, Nal Kalchbrenner, Andrew Senior, and Koray Kavukcuoglu. Wavenet: A generative model for raw audio. arXiv preprint arXiv:1609.03499, 2016.", + "Hao Peng, Nikolaos Pappas, Dani Yogatama, Roy Schwartz, Noah Smith, and Lingpeng Kong. Random feature attention. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=QtTKTdVrFBB.", + "Zhen Qin, Weixuan Sun, Hui Deng, Dongxu Li, Yunshen Wei, Baohong Lv, Junjie Yan, Lingpeng Kong, and Yiran Zhong. cosformer: Rethinking softmax in attention. In International Conference on Learning Representations, 2021.", + "Yongming Rao, Wenliang Zhao, Zheng Zhu, Jiwen Lu, and Jie Zhou. Global filter networks for image classification. Advances in Neural Information Processing Systems, 34:980-993, 2021.", + "Esteban Real, Alok Aggarwal, Yanping Huang, and Quoc V Le. Regularized evolution for image classifier architecture search. In Proceedings of the aaai conference on artificial intelligence, volume 33, pp. 4780-4789, 2019.", + "David W Romero, Robert-Jan Bruintjes, Jakub Mikolaj Tomczak, Erik J Bekkers, Mark Hoogendoorn, and Jan van Gemert. Flexconv: Continuous kernel convolutions with differentiable kernel sizes. In International Conference on Learning Representations, 2021a.", + "David W Romero, Anna Kuzina, Erik J Bekkers, Jakub Mikolaj Tomczak, and Mark Hoogendoorn. Ckconv: Continuous kernel convolution for sequential data. In International Conference on Learning Representations, 2021b.", + "David W Romero, David M Knigge, Albert Gu, Erik J Bekkers, Efstratios Gavves, Jakub M Tomczak, and Mark Hoogendoorn. Towards a general purpose cnn for long range dependencies in nd. arXiv preprint arXiv:2206.03398, 2022.", + "Tim Salimans and Durk P Kingma. Weight normalization: A simple reparameterization to accelerate training of deep neural networks. Advances in neural information processing systems, 29, 2016.", + "Uri Shaham, Elad Segal, Maor Ivgi, Avia Efrat, Ori Yoran, Adi Haviv, Ankit Gupta, Wenhan Xiong, Mor Geva, Jonathan Berant, and Omer Levy. Scrols: Standardized comparison over long language sequences, 2022.", + "Jimmy TH Smith, Andrew Warrington, and Scott W Linderman. Simplified state space layers for sequence modeling. arXiv preprint arXiv:2208.04933, 2022.", + "Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pp. 6105-6114. PMLR, 2019.", + "Y Tay, D Bahri, D Metzler, D Juan, Z Zhao, and C Zheng. Synthesizer: Rethinking self-attention in transformer models. arxiv 2020. arXiv preprint arXiv:2005.00743, 2, 2020a." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yi Tay, Mostafa Dehghani, Samira Abnar, Yikang Shen, Dara Bahri, Philip Pham, Jinfeng Rao, Liu Yang, Sebastian Ruder, and Donald Metzler. Long range arena: A benchmark for efficient transformers. arXiv preprint arXiv:2011.04006, 2020b.", + "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In International Conference on Machine Learning, pp. 10347-10357. PMLR, 2021a.", + "Hugo Touvron, Matthieu Cord, Alexandre Sablayrolles, Gabriel Synnaeve, and Hervé Jégou. Going deeper with image transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 32-42, 2021b.", + "Aaron Van den Oord, Nal Kalchbrenner, Lasse Espeholt, Oriol Vinyals, Alex Graves, et al. Conditional image generation with pixelCNN decoders. Advances in neural information processing systems, 29, 2016.", + "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017.", + "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. 2019. In the Proceedings of ICLR.", + "Sinong Wang, Belinda Z. Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity, 2020.", + "Pete Warden. Speech commands: A dataset for limited-vocabulary speech recognition. arXiv preprint arXiv:1804.03209, 2018.", + "Wenting Ye, Hongfei Yang, Shuai Zhao, Haoyang Fang, Xingjian Shi, and Naveen Neppalli. A transformer-based substitute recommendation model incorporating weakly supervised customer behavior data. arXiv preprint arXiv:2211.02533, 2022.", + "Weihao Yu, Mi Luo, Pan Zhou, Chenyang Si, Yichen Zhou, Xinchao Wang, Jiashi Feng, and Shuicheng Yan. Metaformer is actually what you need for vision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10819-10829, 2022.", + "Sergey Zagoruyko and Nikos Komodakis. Diracnets: Training very deep neural networks without skip-connections. arXiv preprint arXiv:1706.00388, 2017.", + "Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontonon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. Big bird: Transformers for longer sequences. Advances in Neural Information Processing Systems, 33:17283-17297, 2020.", + "Fengda Zhu, Yi Zhu, Xiaojun Chang, and Xiaodan Liang. Vision-language navigation with self-supervised auxiliary reasoning tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10012-10022, 2020.", + "Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision, pp. 19-27, 2015.", + "Barret Zoph, Vijay Vasudevan, Jonathon Shlens, and Quoc V Le. Learning transferable architectures for scalable image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8697-8710, 2018." + ], + "bbox": [ + 171, + 102, + 825, + 842 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A DETAILED EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 516, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 LONG RANGE ARENA", + "text_level": 1, + "bbox": [ + 171, + 155, + 372, + 169 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Here we report the detailed implementation of the LRA experiments. We use the concatenation style combination of sub-kernels in all experiments and mildly tune the dimension of each scale. Since the SGConv exhibits a strong ability to fit data, we slightly increase the dropout for some tasks to prevent overfitting. Table 6 lists the detailed hyperparameters used in LRA. In most experiments, we set $\\alpha$ to $1/2$ , which approximately decays in speed $1 / pos$ . Experiments on flattened 2D images require some special modification of the kernel. We hypothesize that it is because images require more subtle inductive bias. For the experiment on the Image dataset, we use the disentangled version of parameterization and combination weights as described in Section 4.1.2 and set the decay speed to be $1 / pos$ . For the experiment on the Pathfinder-X task, we initialize convolution kernels in different channels with cosine waves with different frequencies and randomly assign $\\alpha$ ranging from 1 to $1/3$ to different channels. Both these modifications bring about $1\\%$ improvement compared to standard fixed $\\alpha = 1/2$ and random initialization. The remaining hyperparameters and experimental settings are same to Gu et al. (2022a) which can be found in the Github repo1.", + "bbox": [ + 169, + 194, + 826, + 377 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/f7289c91d4e58b5033e90590e23b06f162d2c6751363421f6ce87cac85459132.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ListOpsTextRetrievalImagePathfinderPath-X
Acc.61.4589.2091.1187.9795.4697.83
Scale dim.121323264
Dropout0000.20.20
", + "bbox": [ + 277, + 419, + 722, + 486 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 6: Hyperparameters used in LRA experiments.", + "bbox": [ + 321, + 494, + 671, + 511 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 SPEECH COMMAND", + "text_level": 1, + "bbox": [ + 171, + 566, + 356, + 580 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For Speech Command 10-class task, we use the same training setting from Gu et al. (2021a) earlier version Github repo $^2$ . For Speech Command 35-class task, we use the training setting from the Github repo $^1$ . The scale dimension of SGConv is 32.", + "bbox": [ + 169, + 606, + 826, + 650 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.3 LANGUAGE TASK", + "text_level": 1, + "bbox": [ + 171, + 700, + 341, + 714 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Our implementation for Language Task is based on the project $^{3}$ . For the 16-L model, we utilize 3072 as the sequence length for SGCONV and 192 as both the attention and memory length for SAAttention. For the 18-L model, we utilize 3072 as the sequence length for SGCONV and 384 as both the attention and memory length for SAAttention. The SGConv has 96 as the scale dimension. We adopt the training settings from the above mentioned project 3 except the batch size which is reduced to 64. The SGConv block is shown in Figure 4.", + "bbox": [ + 169, + 739, + 826, + 825 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "3https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL", + "bbox": [ + 171, + 896, + 792, + 925 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/43affd57893af7a4fd9c10d4a23c5d23a137a803fd92031e7f0d12197d6d73fd.jpg", + "image_caption": [ + "Figure 6: SGConv block" + ], + "image_footnote": [], + "bbox": [ + 398, + 99, + 571, + 297 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/be3124a45efd4a51d65ebde39313b5d9977f19434cccf41865e3db4df0eb47f7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
256512102420483072
Attn. BlockInf. (ms/batch)2.67.323.291.7X
Mem. (GB)2.63.97.923.9OOM
SGConv BlockInf. (ms/batch)2.75.410.921.843.6
Mem. (GB)2.63.45.28.715.7
", + "bbox": [ + 274, + 359, + 722, + 465 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 7: Comparison of inference time and GPU memory utilization with Attention blocks. SGConv has significantly less memory usage and faster inference speed when the sequence increases.", + "bbox": [ + 169, + 474, + 823, + 505 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.4 IMAGE CLASSIFICATION", + "text_level": 1, + "bbox": [ + 171, + 529, + 387, + 542 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We use the training settings in the work Liu et al. (2022) $^4$ . Since the SGConvNeXt has several downsampling layers, we fixed the scale to 5 and the scale dimensions are calculated based on the flattened features length of the corresponding layers. The structure is shown in Figure 7. The results are shown in Table 8. The visualization of the SGConvNeXt-Base outputs are shown in Figure 9. The visualization of the SGConv kernels at different stages are shown in Figure 10.", + "bbox": [ + 169, + 554, + 823, + 625 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/1fa5dada2ae98699989f808bf6e6627d621edc1eae969b3f5ef14bb0223c5cf3.jpg", + "image_caption": [ + "Figure 7: SGConvnext" + ], + "image_footnote": [], + "bbox": [ + 267, + 637, + 449, + 859 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/c5e897c14ce292212b5305fdbc9027f627c9942c0ea5cb81bd2c0333cd4f6091.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 637, + 710, + 859 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_footnote", + "text": "4https://github.com/facebookresearch/ConvNeXt", + "bbox": [ + 191, + 909, + 589, + 924 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/407e45cbe52030a1fa74fba42dee8571a99b8b3810a004053f5d82ba0f6e9dea.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
modelFLOPsthroughput (image/s)paramsAcc.
Swin-T4.5G944.529M81.3
Swin-S8.7G576.850M83.0
Swin-B15.4G433.488M83.5
Swin-B384247.0G134.688M84.5
ConvNeXt-T4.5G1252.629M82.1
ConvNeXt-S8.7G801.450M83.1
ConvNeXt-B15.4G588.389M83.8
ConvNeXt-L34.4G349.8198M84.3
", + "bbox": [ + 187, + 104, + 486, + 229 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/d08d177dac67bcdb62444bfe08763b8750def72d9261d5a932fb54887eadcdfb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
modelFLOPsthroughput (image/s)paramsAcc.
EffNet-B330021.8G693.912M81.6
EffNet-B438024.2G341.519M82.9
EffNet-B545629.9G223.530M83.6
EffNet-B6528219.0G91.543M84.0
EffNet-B7600237.0G52.966M84.3
SGConvNeXt-T4.3G872.629M82.0
SGConvNeXt-S8.3G565.351M83.4
SGConvNeXt-B14.6G417.990M83.9
SGConvNeXt-L32.5G256.7200M84.4
", + "bbox": [ + 503, + 101, + 816, + 234 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 8: Comparison of ImageNet-1k Top-1 accuracy with SoTA works.", + "bbox": [ + 258, + 244, + 736, + 260 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B DETAILED IMPLEMENTATION", + "text_level": 1, + "bbox": [ + 171, + 287, + 454, + 303 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1 ILLUSTRATION OF SGCONV MODULE", + "text_level": 1, + "bbox": [ + 171, + 319, + 480, + 333 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/dc73cd86ac1632a3107e6a2cc4f5942032d3e2eaf21f4bc8327b3750251d8525.jpg", + "image_caption": [ + "Figure 8: Implementing SGConv with FFT. We first compute the convolutional kernels for each channel as described in Section 3.2, and apply the depth-wise global convolution to the input features." + ], + "image_footnote": [], + "bbox": [ + 176, + 354, + 815, + 474 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.2 PYTHON STYLE PSEUDO-CODE", + "text_level": 1, + "bbox": [ + 171, + 565, + 434, + 579 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "# Parameters \nkernel-param_list = [] # w_i \nfor _ in range(num_scales): \n kernel-param_list.append(\n nnParameter(torch.random(hidden_dim, kernel_dim)) # size: h * d \n# Compute global convolution kernel \nkernel_list = [] # k_i \nfor i in range(num_scales): \n kernel = F.interpolate(\n kernel-param_list[i],\n scale_factor = 2**max(0, i-1),\n mode = \"linear\"\n ) * 0.5 ** i # alpha = 0.5 \n kernel_list.append(kernel) \n# The computed kernel, size: h * (d * 2^{s-1}) \nk = torch.cat(kernel_list, dim=-1) \n#Normalize kernel \nif is_init: # Compute the norm at initialization \nkernel_norm = k(norm(dim=-1, keepdim=True).detach() \nk = k / kernel_norm", + "guess_lang": "python", + "bbox": [ + 171, + 590, + 707, + 912 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 509, + 960 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Use kernel to compute global convolution \n#x:batch_size \\* hidden_dim \\* seq_len \nL $=$ x.size(-1) \n#Truncate kernel if it is too long \nk $=$ k[., :L] \n# Use FFT to compute convolution \nx_f $=$ torch.fft.rfft(x, n=2*L) \nk_f $=$ torch.fft.rfft(k, n=2*L) \ny_f $=$ torch.einsum(\"b h l,h l-> b h l\",x_f,k_f) \n#Inverse FFT to get the result \ny $=$ torch.fft.irfft(y_f, n=2*L)[...,:L]", + "guess_lang": "txt", + "bbox": [ + 169, + 103, + 673, + 273 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/a9dc7d8f9a6579bcb0e37dc1288d81c3d7b12bb3cf6ca8adff9a4a8c7ef05f25.jpg", + "image_caption": [ + "Input" + ], + "image_footnote": [], + "bbox": [ + 205, + 315, + 302, + 388 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/07133a81bd32c85fca2b3c2f6edf443585bfeb5580f157079a479ab6bdc87c3d.jpg", + "image_caption": [ + "Stage 0" + ], + "image_footnote": [], + "bbox": [ + 330, + 316, + 424, + 387 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/8f9ccced2a4c5a31bd266c5297b0e0fcd55d74317ffa850e4164d0f29a20f46e.jpg", + "image_caption": [ + "Stage 1" + ], + "image_footnote": [], + "bbox": [ + 454, + 316, + 547, + 387 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/8f9354eb71c1900eb43ba42d87dc85cb5e33f59b5e413a9ee0900ab39647fb31.jpg", + "image_caption": [ + "Stage 2" + ], + "image_footnote": [], + "bbox": [ + 576, + 316, + 671, + 387 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/53394019db4eaa733cdb780433fbdcde63af6ac4ea7cb9231ec9885ef646c3af.jpg", + "image_caption": [ + "Stage 3" + ], + "image_footnote": [], + "bbox": [ + 700, + 316, + 794, + 387 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/39119b984d911ce8b1acf0d5c37a77cfc3ee189085b413fe8d29f451b7e423f9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 205, + 409, + 300, + 481 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/a7386662ad0f56176c6b1fbb805a405d5a8d59b6207af84b5d674bcdacbf39d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 330, + 409, + 424, + 479 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/4af53c42af2cccf6e45985a232eaa6503b464f113c97419605ed3da59341bc7e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 454, + 409, + 547, + 479 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/5005d29b7d379e0930dd26c01c74520b6dd14f226fc01b8f0f88e70f3eb4968c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 409, + 671, + 479 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/64a6d10bfc28168ce83ffa5298213128c167a407122dd6af6eadea4e3a01684c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 409, + 794, + 479 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/de571302efda9573db54e971b0e66fc36970220a56704ed3333efa6cfe2c9299.jpg", + "image_caption": [ + "Figure 9: Visualization of the intermediate features of SGConvNeXt on ImageNet-1k dataset." + ], + "image_footnote": [], + "bbox": [ + 205, + 502, + 300, + 573 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/5564739a278af131f9579f07b6df9f3fe5b7c3dc6af6e24ee023f0f07cefeb79.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 330, + 502, + 424, + 571 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ec0bad80ad47b2e9c11fb30bfcd15d076fa5ab1069b5dbd683f06b363df9e5a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 454, + 502, + 547, + 571 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/a9826591ae19eadcb1b5057a65e10a3daa19c1db9dad446489d6b46a8796752e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 576, + 502, + 671, + 571 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/7cc909378927af4951b6e0afc256a4b067211b32c2308bfa7d3dda0cd4a34f98.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 502, + 794, + 571 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/d68ce1d42882558455ae27c576e70616f56bcf9853ae2ec281c69e5cb0ff8343.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 303, + 101, + 692, + 253 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/f0451dca363eba8e73cee1210fcccf9312c36bc021eec48307df6eb61648f68e.jpg", + "image_caption": [ + "(a) Visulization of kernels at Stage 0." + ], + "image_footnote": [], + "bbox": [ + 303, + 277, + 692, + 429 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/3f0b943fcd06078d4d6d796f7c864a44ce0cbb8d24901ac7809b4f31f49b659e.jpg", + "image_caption": [ + "(b) Kernels at Stage 1." + ], + "image_footnote": [], + "bbox": [ + 303, + 452, + 692, + 606 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/9a98e21258cb5eaec5b05f739d68b236172d88715c45a81383461b6792f15f39.jpg", + "image_caption": [ + "(c) Kernels at Stage 2.", + "(d) Kernels at Stage 3.", + "Figure 10: Kernels in SGConvNeXt at different stages." + ], + "image_footnote": [], + "bbox": [ + 303, + 628, + 691, + 777 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C NEURAL ARCHITECTURE SEARCH PERSPECTIVE OF SGCONV", + "text_level": 1, + "bbox": [ + 171, + 859, + 723, + 875 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Neural architecture search (NAS) is an automated process for discovering a neural network's optimal architecture or structure for a particular task. NAS typically involves searching through a large", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "space of possible network architectures using combination algorithms, such as reinforcement learning (Zoph et al., 2018), evolutionary algorithms (Real et al., 2019), or Bayesian optimization (Kandasamy et al., 2018). In recent years, there has been a proliferation of research aimed at designing traditional convolutional neural networks with local convolution (Li et al., 2021; Lin et al., 2021; Li et al., 2023). These works primarily focus on optimizing the networks' structures to improve their performance. From the perspective of NAS, the SGConv can be interpreted as a kernel-level fine-grained search for the distribution of parameters by utilizing parameterization. Furthermore, the SGConv has shown that the global convolution kernel exhibits sparsity and can be pruned (Fig. 10), meaning that the effective kernel length can be automatically determined through the training phase. These findings can potentially spark further research and development in the field. Another simple approach we explore in NAS is the combination of Attention and SGConv through a mixture model (Section 4.3.1). This approach is both intuitive and efficient and has the potential to improve the performance of neural network architectures further.", + "bbox": [ + 174, + 103, + 823, + 282 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 491, + 946, + 506, + 959 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_model.json b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f56aa93e115e09a177596d089a5df19bcf1d6f7c --- /dev/null +++ b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_model.json @@ -0,0 +1,2911 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.825, + 0.149 + ], + "angle": 0, + "content": "WHAT MAKES CONVOLUTIONAL MODELS GREAT ON LONG SEQUENCE MODELING?" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.172, + 0.717, + 0.188 + ], + "angle": 0, + "content": "Yuhong Li\\(^{1*}\\) Tianle Cai\\(^{2*}\\) Yi Zhang\\(^{3}\\) Deming Chen\\(^{1}\\) Debadeepta Dey\\(^{3}\\)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.188, + 0.759, + 0.204 + ], + "angle": 0, + "content": "1University of Illinois Urbana-Champaign, 2Princeton University, 3Microsoft Research." + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.239, + 0.548, + 0.255 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.269, + 0.77, + 0.633 + ], + "angle": 0, + "content": "Convolutional models have been widely used in multiple domains. However, most existing models only use local convolution, making the model unable to handle long-range dependency efficiently. Attention overcomes this problem by aggregating global information based on the pair-wise attention score but also makes the computational complexity quadratic to the sequence length. Recently, Gu et al. (2021a) proposed a model called S4 inspired by the state space model. S4 can be efficiently implemented as a global convolutional model whose kernel size equals the input sequence length. With Fast Fourier Transform, S4 can model much longer sequences than Transformers and achieve significant gains over SoTA on several long-range tasks. Despite its empirical success, S4 is involved. It requires sophisticated parameterization and initialization schemes that combine the wisdom from several prior works. As a result, S4 is less intuitive and hard to use for researchers with limited prior knowledge. Here we aim to demystify S4 and extract basic principles that contribute to the success of S4 as a global convolutional model. We focus on the structure of the convolution kernel and identify two critical but intuitive principles enjoyed by S4 that are sufficient to make up an effective global convolutional model: 1) The parameterization of the convolutional kernel needs to be efficient in the sense that the number of parameters should scale sub-linearly with sequence length. 2) The kernel needs to satisfy a decaying structure that the weights for convolving with closer neighbors are larger than the more distant ones. Based on the two principles, we propose a simple yet effective convolutional model called Structured Global Convolution (SGConv). SGConv exhibits strong empirical performance over several tasks: 1) With faster speed, SGConv surpasses the previous SoTA on Long Range Arena and Speech Command datasets. 2) When plugging SGConv into standard language and vision models, it shows the potential to improve both efficiency and performance." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.656, + 0.338, + 0.672 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.687, + 0.827, + 0.773 + ], + "angle": 0, + "content": "Handling Long-Range Dependency (LRD) is a key challenge in long-sequence modeling tasks such as time-series forecasting, language modeling, and pixel-level image generation. Unfortunately, standard deep learning models fail to solve this problem for different reasons: Recurrent Neural Network (RNN) suffers from vanishing gradient, Transformer has complexity quadratic in the sequence length, and Convolutional Neural Network (CNN) usually only has a local receptive field in each layer." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.777, + 0.828, + 0.906 + ], + "angle": 0, + "content": "A recently proposed benchmark called Long-Range Arena (LRA) (Tay et al., 2020b) reveals that all existing models perform poorly in modeling LRD. Notably, on one spatial-level sequence modeling task called Pathfinder-X from LRA, all models fail except a new Structured State Space sequence model (S4) (Gu et al., 2021a). The S4 model is inspired by the state space model widely used in control theory and can be computed efficiently with a special parameterization based on the Cauchy kernel. The exact implementation of the S4 model can be viewed as a (depthwise) global convolutional model with an involved computation global convolution kernel. Thanks to the global receptive field of the convolution kernel, S4 is able to handle tasks that require LRD, such as Pathfinder (Linsley et al., 2018; Tay et al., 2020b), where classic local CNNs fail (Linsley et al., 2018; Kim et al.," + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.757, + 0.926 + ], + "angle": 0, + "content": "*Equal contribution. Work done during the internship at Microsoft Research. Code is available." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "2019). Also, the use of Fast Fourier Transform (FFT) and techniques from numerical linear algebra make the computational complexity of S4 tractable compared to the quadratic complexity of attention. Together, S4 shows the potential of global convolutional models to model LRD and advances the SoTA on LRA." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.827, + 0.254 + ], + "angle": 0, + "content": "Despite its accomplishments, the delicate design of S4 makes it unfriendly even to knowledgeable researchers. In particular, the empirical success of S4 relies on 1) A Diagonal Plus Low Rank (DLPR) parameterization whose efficient implementation requires several numerical linear algebra tricks, 2) An initialization scheme based on the HiPPO matrix derived in prior work (Gu et al., 2020). Therefore, aiming to reduce the complications of the model and highlight minimal principles, we raise the following questions:" + }, + { + "type": "text", + "bbox": [ + 0.193, + 0.265, + 0.807, + 0.294 + ], + "angle": 0, + "content": "What contributes to the success of the S4 model? Can we establish a simpler model based on minimal principles to handle long-range dependency?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.307, + 0.828, + 0.53 + ], + "angle": 0, + "content": "To answer these questions, we focus on the design of the global convolution kernel. We extract two simple and intuitive principles that contribute to the success of the S4 kernel. The first principle is that the parameterization of the global convolution kernel should be efficient in terms of the sequence length: the number of parameters should scale slowly with the sequence length. For example, classic CNNs use a fixed kernel size. S4 also uses a fixed number of parameters to compute the convolution kernel while the number is greater than classic CNNs. Both models satisfy the first principle as the number of parameters does not scale with input length. The efficiency of parameterization is also necessary because the naive implementation of a global convolution kernel with the size of sentence length is intractable for inputs with thousands of tokens. Too many parameters will also cause overfitting, thus hurting the performance. The second principle is the decaying structure of the convolution kernel, meaning that the weights for convolving with closer neighbors are larger than the more distant ones. This structure appears ubiquitously in signal processing, with the well-known Gaussian filter as an example. The intuition is clear that closer neighbors provide a more helpful signal. S4 inherently enjoys this decaying property because of the exponential decay of the spectrum of matrix powers (See Figure 2), and we find this inductive bias improves the model performance (See Section 4.1.2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.537, + 0.453, + 0.896 + ], + "angle": 0, + "content": "We show that these two principles are sufficient for designing a global convolutional model that captures LRD well. To verify this, we introduce a class of global convolution kernels with a simple multiscale structure, as shown in Figure 1. Specifically, we compose the convolution kernel by a sequence of sub-kernels of increasing sizes, yet every sub-kernel is upsampled from the same number of parameters. This parameterization ensures that the number of parameters only scales logarithmically to the input length, which satisfies the first principle. In addition, we add a decaying weight to each scale during the combination step and fulfill the second principle. We named our methods as Structural Global Convolution kernels (SGConv). Empirically, SGConv improves S4 by more than \\(1\\%\\) and achieves SoTA results on the LRA benchmark. On Speech Command datasets, SGConv achieves comparative results in the ten-class classification task and significantly better results in the 35-class classification task upon previous SoTA. We further show" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.897, + 0.825, + 0.926 + ], + "angle": 0, + "content": "that SGConv is more efficient than S4 and can be used as a general purpose module in different domains. For example, a hybrid model of classic attention and SGConv shows promising performance" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.556, + 0.795, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.736, + 0.825, + 0.877 + ], + "angle": 0, + "content": "Figure 1: Illustration of the parameterization used in SGConv (Eq. (1)). The convolution kernel is composed of multi-scale sub-kernels. Parameterization Efficiency. Every larger sub-kernel doubles the size of the previous sub-kernel while the same number of parameters are used for every scale, ensuring a logarithmic dependency of the number of parameters to the input length. Decaying. We use a weighted combination of sub-kernels where the weights are decaying, and smaller weights are assigned to larger scales." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "on both autoregressive language modeling and sentence classification tasks, replacing the 2D convolution kernel of the ConvNext model with 1D SGConv matches the performance of the original model." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.17, + 0.343, + 0.185 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.202, + 0.827, + 0.329 + ], + "angle": 0, + "content": "Efficient Transformers. The Transformer architecture (Vaswani et al., 2017) has been successful across a wide range of applications (Dosovitskiy et al., 2020; Liu et al., 2021; Dong et al., 2018; Ye et al., 2022) in machine learning. However, the computation and memory complexity of Transformer scales quadratically with the input length, making it intractable for modeling long-range interactions in very long sequences. Therefore, several efficient variants of Transformer model have been proposed recently to overcome this issue (Child et al., 2019; Wang et al., 2020; Kitaev et al., 2019; Zaheer et al., 2020; Tay et al., 2020a; Peng et al., 2021; Qin et al., 2021). Nevertheless, few of these methods performed well on benchmarks such as Long Range Arena (Tay et al., 2020b), SCROLLS (Shaham et al., 2022), which require long-range modeling ability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.345, + 0.828, + 0.526 + ], + "angle": 0, + "content": "(Re-)parameterization. Parameterization is a crucial but underrated part of architecture design because different parameterizations usually provide different inductive biases. For example, weight normalization (Salimans & Kingma, 2016) parameterizes the norm and direction of the weight matrices separately and thus reaches faster convergence. On the other hand, Zagoruyko & Komodakis (2017) proposed a Dirac weight re-parameterization to train deep networks without explicit skip-connections and matched the performance of ResNets (He et al., 2016). In computer vision, several works explored using structural re-parameterization to create 2D convolution kernels. Most of these works (Ding et al., 2019; Guo et al., 2020; Ding et al., 2021; Cao et al., 2022) are limited to the vision domain and utilize only short-range convolution kernels (e.g., \\(7 \\times 7\\)) except for the line of work based on 2D Fourier operators (Rao et al., 2021; Guibas et al., 2021) and the line of work based on continuous convolutional kernel (Romero et al., 2021b;a; 2022). Our SGConv kernel is a special parameterization of global convolution kernels that tackles LRD and showcases the extensibility of re-parameterized kernels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.543, + 0.827, + 0.753 + ], + "angle": 0, + "content": "State Space Models. The state space model (SSM) uses a set of linear differential equations to model physical systems with input, output, and state variables. It is widely used in control, neuroscience, and statistics. Recently, Gu et al. (2021b) introduced a deep SSM-based model that can outperform prior approaches on several long sequence modeling tasks with a specially structured state transition matrix. However, the expensive computation and memory requirements make it impractical. A followup work of Gu et al. (2021b) proposed a new parameterization of SSM (Gu et al., 2021a), which decomposes the state transition matrix into the sum of low-rank and normal matrices and implements SSM as a global convolutional model. Under this parameterization, the authors then combine the techniques of diagonalizing the Cauchy kernel and performing low-rank corrections with the Woodbury identity to compute the global convolution kernel. While achieving promising results, S4 is theoretically involved and practical implementations of S4 require accelerator-specific dedicated code optimization for the Cauchy kernel computation. This makes it difficult to readily implement in deep learning frameworks (Abadi et al., 2016; Chen et al., 2015; Chen, 2021; Ma et al., 2019) and hardware targets. Concurrent with this work, many state-space-based models are emerging and bringing better performance (Gu et al., 2022a; Smith et al., 2022; Hasani et al., 2022)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.775, + 0.608, + 0.79 + ], + "angle": 0, + "content": "3 DESIGN OF GLOBAL CONVOLUTIONAL MODELS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.808, + 0.825, + 0.851 + ], + "angle": 0, + "content": "We summarize the design principles that enable the global convolutional model to be both efficient and effective. Then we introduce the proposed Structured Global Convolution (SGConv) based on the highlighted principles." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.359, + 0.883 + ], + "angle": 0, + "content": "3.1 DESIGN PRINCIPLES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "The two intuitive design principles that contribute to the success of S4 are efficient parameterization and decaying structure." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.239, + 0.105, + 0.497, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.319, + 0.256, + 0.418, + 0.27 + ], + "angle": 0, + "content": "(a) Pathfinder-X" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.103, + 0.761, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.6, + 0.256, + 0.662, + 0.27 + ], + "angle": 0, + "content": "(b) SC-10" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.281, + 0.825, + 0.325 + ], + "angle": 0, + "content": "Figure 2: Visualization of S4 kernels on (a) Pathfinder-X and (b) Speech Command 10-class. The values in the convolution kernel exhibit a decaying behavior. We only plot the first 4096 positions for better illustration." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.349, + 0.827, + 0.473 + ], + "angle": 0, + "content": "Efficient Parameterization. Different from local convolution, where the kernel size is fixed, global convolution requires a kernel size that is the same as the sentence length. Naive parameterization of convolution kernel as classic local convolutions is therefore intractable for long sequences. For instance, the Pathfinder-X task has a length of \\(16K\\). It then impractically requires \\(4M\\) parameters for a single layer to model the depth-wise global convolution kernel with a standard channel size of 256. Thus, an efficient convolution kernel parameterization is necessary, especially when the sentence is extremely long. For example, S4 takes a well-designed Normal Plus Low-Rank (NPLR) parameterization to model the whole kernel with two special matrices where the number of parameters is fixed." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.489, + 0.825, + 0.573 + ], + "angle": 0, + "content": "Decaying Structure. Apart from the efficiency of the parameterization, we find that a decaying structure of the convolution kernel provides a good inductive bias to long-sequence modeling and contributes to the performance (See Section 4.1.2 for detailed ablation study). Concretely, the magnitude of the value in the convolution kernel should decay so that more weight is assigned to the close neighbors. S4 model inherently satisfies this property because the \\(k\\)-th element of the kernel of S4 is \\(\\mathbf{C}\\mathbf{A}^k\\mathbf{B}\\) and the operator norm of the power of a matrix decays exponentially:" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.576, + 0.825, + 0.613 + ], + "angle": 0, + "content": "Fact 1. For a square matrix \\(\\mathbf{A}\\), the operator norm \\(\\left\\| \\mathbf{A}^k \\right\\|_2 \\leq \\left\\| \\mathbf{A} \\right\\|_2^k\\). In particular, if \\(\\left\\| \\mathbf{A} \\right\\|_2 < 1\\), \\(\\left\\| \\mathbf{A}^k \\right\\|_2\\) decays exponentially to \\(k\\), so \\(\\left\\| \\mathbf{C} \\mathbf{A}^k \\mathbf{B} \\right\\|_2 \\leq \\left\\| \\mathbf{C} \\right\\|_2 \\left\\| \\mathbf{A}^k \\right\\|_2 \\left\\| \\mathbf{B} \\right\\|_2\\) also decays exponentially." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.62, + 0.744, + 0.636 + ], + "angle": 0, + "content": "We can also directly observe the decaying structure of S4 in different tasks in Figure 2." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.651, + 0.281, + 0.665 + ], + "angle": 0, + "content": "3.2 SGCONV" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.677, + 0.825, + 0.734 + ], + "angle": 0, + "content": "Putting the two principles altogether, we propose a simple global depth-wise convolution, dubbed Structured Global Convolution (SGConv), based on multiscale sub-kernels and weighted combinations. (See Figure 1). We will first introduce the parameterization of the convolutional kernel and then present how to build a global convolutional model with this kernel." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.748, + 0.827, + 0.89 + ], + "angle": 0, + "content": "Parameterization of SGConv Kernel. Formally, let \\( L \\) be the length of the input sequence, the convolutional kernel should also has length \\( L \\). We define the parameter set of a single channel as \\( S = \\{\\mathbf{w}_i|0\\leq i < \\left[\\log_2\\left(\\frac{L}{d}\\right)\\right] + 1\\} \\) where \\( \\mathbf{w}_i\\in \\mathbb{R}^d \\) is the parameter for \\( i \\)-th sub-kernel \\( k_{i} \\), and \\( d \\) is the dimension of the parameter. Denote the number of scales \\( N = \\left[\\log_2\\left(\\frac{L}{d}\\right)\\right] + 1 \\). We use the upsample operation, implemented as linear interpolation, to form sub-kernels of different scales. We use Upsample \\( _l(\\mathbf{x}) \\) to denote upsampling \\( \\mathbf{x} \\) to length \\( l \\) (We use F. interpolate function in Pytorch and set the mode to be linear in our implementation). We also introduce a normalization constant \\( Z \\) to ensure the convolution operation will not change the scale of the input and a coefficient \\( \\alpha \\) to control the decaying speed. Now, we are ready to introduce the weighted combination scheme by concatenating a set of weighted sub-kernels \\( k_{i} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.245, + 0.894, + 0.825, + 0.924 + ], + "angle": 0, + "content": "\\[\n\\operatorname {C a t} (S) = \\frac {1}{Z} [ k _ {0}, k _ {1}, \\dots , k _ {N - 1} ], \\text {w h e r e} k _ {i} = \\alpha^ {i} \\operatorname {U p s a m p l e} _ {2 \\max [ i - 1, 0 ] d} (\\mathbf {w} _ {i}). \\tag {1}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.101, + 0.778, + 0.232 + ], + "angle": 0, + "content": "
ModelListOpsTextRetrievalImagePathfinderPath-XAvg.
Transformer36.3764.2757.4642.4471.40X54.39
Sparse Trans.17.0763.5859.5944.2471.71X51.24
Linformer35.7053.9452.2738.5676.34X51.36
Reformer37.2756.1053.4038.0768.50X50.67
BigBird36.0564.0259.2940.8374.87X55.01
S4 (original)58.3576.0287.0987.2686.0588.1080.48
S4 (Gu et al., 2022b)59.6086.8290.9088.6594.2096.3586.09
SGConv61.4589.2091.1187.9795.4697.8387.17
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.241, + 0.825, + 0.286 + ], + "angle": 0, + "content": "Table 1: The performance of SGConv compared to other baselines on the LRA dataset. SGConv achieves significant improvement compared to previous methods with a more straightforward structure and faster speed (See Table 2)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.307, + 0.825, + 0.368 + ], + "angle": 0, + "content": "It is easy to check that \\(\\operatorname{Cat}(S)\\) gives the convolution kernel with length \\(\\sum_{i=0}^{N} 2^{\\max[i-1,0]} d = 2^{N-1} d \\geq L\\) (See Figure 1 for an illustration), which can be truncated to \\(L\\) if it is overlength. And the number of parameters is \\(Nd = O(\\log L)\\). The decay coefficient \\(\\alpha\\), usually chosen to be \\(1/2\\), induces the decaying structure." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.827, + 0.591 + ], + "angle": 0, + "content": "Incorporate SGConv to Modern Architectures. In the implementation, we compute the depthwise convolution kernel and use Fast Fourier Transform to compute the convolution in \\( O(L \\log L) \\) time (See Figure 8 for detailed illustration). We compute the normalization constant \\( Z \\) such that the norm of the kernel is one at initialization and fix it during training. Please refer to Appendix B.2 for a Python-style pseudo-code. We can plug SGConv into modern architectures as a replacement of attention in Transformer or local convolution in ConvNets (See Figure 6, 7 for two examples). Due to the relaxation of the structure of the convolutional kernel, SGConv does not have the RNN-style reformulation as S4. Yet, SGConv is naturally capable of performing autoregressive generation, such as language modeling, similarly to classic causal convolutional models (Van den Oord et al., 2016; Oord et al., 2016) and Transformers. Concretely, the convolution kernel is unidirectional, where the computation at the embedding of \\( i \\)-th is only computed based on tokens before \\( i \\), and left zero padding is used for ignoring the overlength kernel. During generation, hidden states of past tokens are cached for fast calculation of the next token with a single convolution step. Due to the simplicity of the parameterization, SGConv kernel is easy to compute and more efficient than the S4 kernel, as shown in Section 4.1.3." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.61, + 0.329, + 0.625 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.825, + 0.766 + ], + "angle": 0, + "content": "In this section, we first test the effectiveness of SGConv on two standard long sequence modeling tasks, i.e., Long Range Arena (Tay et al., 2020b) and Speech Commands (Warden, 2018), and compare it with S4 and other baselines. We also conduct ablation studies over the decay speed and scale dimension \\(d\\) and evaluate the speed of SGConv on LRA. Further, we explore the possibility of plugging the global convolutional layer into standard models as a general-purpose component for capturing long-range dependency. For language tasks, we find that replacing half of layers of Transformer with a certain strategy with SGConv block will not hurt performance, while the complexity of those layers improves from \\(O(L^2)\\) to \\(O(L\\log L)\\). On ImageNet, we replace the \\(7\\times 7\\) convolution in ConvNext (Liu et al., 2022) with SGConv and show comparative or better performance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.782, + 0.368, + 0.796 + ], + "angle": 0, + "content": "4.1 LONG RANGE ARENA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.808, + 0.825, + 0.866 + ], + "angle": 0, + "content": "Long Range Arena benchmark (Tay et al., 2020b) is a suite of six tasks consisting of sequences ranging from 1K to 16K tokens, encompassing a wide range of data types and modalities such as text, natural, synthetic images, and mathematical expressions requiring similarity, structural, and visual-spatial reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.872, + 0.294, + 0.886 + ], + "angle": 0, + "content": "4.1.1 RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We show the experimental results in Table 1 with several baseline methods (Vaswani et al., 2017; Child et al., 2019; Wang et al., 2020; Kitaev et al., 2019; Zaheer et al., 2020; Gu et al., 2021a; 2022b)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.223, + 0.101, + 0.778, + 0.257 + ], + "angle": 0, + "content": "
Sequence length256512102420484096819216384
Inf.S429.481.7158.3306.95941156.92274.0
CPUSGConv23.856.2108.7211.3409.3789.51559.3
Inf.S4 (w/o opt)2.72.74.47.915.232.764.5
GPUS4 (w. opt.)1.61.93.15.410.022.344.3
SGConv1.21.32.34.48.519.839.4
BPS4 (w/o opt)4.15.710.219.438.180.1161.2
GPUS4 (w. opt.)3.546.611.922.648.997.8
SGConv2.02.75.09.618.641.282.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.265, + 0.825, + 0.322 + ], + "angle": 0, + "content": "Table 2: Comparison of the inference and backpropagation time (ms/batch) of S4 and SGConv blocks (number of channels 128, batch size 64) on CPU and GPU. Note that the parameterization in S4 requires a customized CUDA kernel to improve the efficiency (refer to opt. in the Table). Nevertheless, SGConv still always surpasses S4 even compared to the optimized CUDA kernel." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.349, + 0.825, + 0.406 + ], + "angle": 0, + "content": "SGConv achieves a \\(1\\%\\) improvement in average accuracy upon well-tuned S4 variants introduced in Gu et al. (2022b). Notably, SGConv is guided by the two intuitive principles and has a much simpler structure than S4 (Gu et al., 2022b). The detailed implementation settings can be found in Appendix A.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.422, + 0.431, + 0.435 + ], + "angle": 0, + "content": "4.1.2 ABLATION STUDY ON IMDB" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.446, + 0.825, + 0.543 + ], + "angle": 0, + "content": "We conduct ablation studies on the IMDB byte-level document classification task in the LRA benchmark. We mainly focus on two aspects: 1) The speed of decaying and 2) The parameter dimension \\( d \\) of each scale. For simplicity, in the standard SGConv formulation (Eq. (1)), we fix the decay coefficient \\( \\alpha = 1/2 \\) and only tune the dimension \\( d \\). However, the actual decay speed as a function of the position in the kernel depends both on \\( \\alpha \\) and \\( d \\), making it hard to conduct ablation studies. Thus, we use a slightly different convolution kernel that disentangles the decay speed and the dimension of each scale:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.55, + 0.825, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\operatorname {C a t} ^ {*} (S) = \\frac {1}{Z} [ k _ {0}, k _ {1}, \\dots , k _ {N - 1} ] \\odot \\left[ \\frac {1}{1 ^ {t}}, \\frac {1}{2 ^ {t}}, \\dots , \\frac {1}{L ^ {t}} \\right], \\text {w h e r e} k _ {i} = \\operatorname {U p s a m p l e} _ {2 \\max [ i - 1, 0 ] d} (\\mathbf {w} _ {i}). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.605, + 0.507, + 0.788 + ], + "angle": 0, + "content": "\\(t\\) here then controls the decay speed, which is independent of each scale's dimension. We conduct two sets of experiments: 1) Fix \\(d = 8\\), vary \\(t\\) from 0 (which means no decay) to 2, and 2) Fix \\(t = 1\\), vary \\(d\\) from 1 to 64. Figure 3 reports the accuracies in different settings. We can observe that 1) The decay structure is crucial for getting good performance, and 2) In a reasonable range, \\(d\\) has less impact on the performance than \\(t\\). Nevertheless, we observe a trend of performance drop when increasing \\(d\\) from 8 to 64. Experiments on larger \\(d\\) show worse performance, which can be attributed to overfitting." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.802, + 0.376, + 0.817 + ], + "angle": 0, + "content": "4.1.3 SPEED COMPARISON" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.508, + 0.925 + ], + "angle": 0, + "content": "In Table 2, we compare the computation speed of the S4 kernel and SGConv kernel in different settings. Due to its simplicity, SGConv is faster than S4 for any sentence length. SGConv is about \\(50\\%\\) faster than the vanilla implementation of the S4 kernel and is \\(15\\%\\) faster than the optimized CUDA mized CUDA kernels." + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.624, + 0.822, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.787, + 0.825, + 0.898 + ], + "angle": 0, + "content": "Figure 3: Ablation study on the effect of decay speed and hidden dimension of each scale on IMDB dataset. \\(pos \\in [1, L]\\) refers to the position in the convolution kernel. We observe: 1) The decay structure is crucial for getting good performance; 2) In a reasonable range, \\(d\\) (Dimension) has less impact on the performance than \\(t\\) (\\(t \\in [0, 2.0]\\))." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.361, + 0.119 + ], + "angle": 0, + "content": "4.2 SPEECH COMMANDS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.828, + 0.284 + ], + "angle": 0, + "content": "The Speech Command (SC) dataset (Warden, 2018) is a 35-class dataset of 1 second (16000 Hz sampling rate) spoken words in English. However, followup works (Kidger et al., 2020; Gu et al., 2021b; Romero et al., 2021b;a) adopted a smaller 10-class subset of SC. And works (Romero et al., 2021a; Gu et al., 2021b) on the SC dataset specifically use pre-processing such as MFCC features. Our baselines are obtained from (Gu et al., 2021a; 2022a). Note that besides SSM-based models, there is no strong baseline for raw waveform classification using either the 10-class or the full dataset. And SSM-based methods also show the ability to perform 0-shot testing at lower sampling rate such as \\(8000\\mathrm{Hz}\\). Table 3 shows that the SGConv yields better results compared to the SSM-based method among 4 out of 5 tasks. Notably, for the original SC (35-class), SGConv achieves marginally higher accuracy for raw-sequence classification and significantly better results \\((+2.40\\%)\\) compared to the existing SoTA method." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.296, + 0.842, + 0.435 + ], + "angle": 0, + "content": "
10-clsTransformerPerformerNRDECKConvWaveGAN-DS4S4*SGConv
MFCC90.7580.8589.895.3X93.9692.0594.91
16000HZX30.7716.4911.671.6698.3297.9897.52
8000HZ (0-shot)X30.6815.1265.96X96.3091.8396.03
35-clsInceptionNetResNet-18XResNet-50ConvNetS4DS4S4*SGConv
16000HZ61.2477.8683.0195.5196.2596.0896.2796.42
8000HZ (0-shot)5.188.747.727.2691.5891.3291.8994.29
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.447, + 0.825, + 0.504 + ], + "angle": 0, + "content": "Table 3: Speech Command classification results compared to existing methods. * We carefully reproduce the S4 method based on the released code1. Since the latest version removed 10-class experiments settings, we utilized a earlier version2. The results suggest that for the SC 35-classification, SGConv achieves SoTA on both full length task and 2X sampling rate, zero-shot task." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.53, + 0.48, + 0.543 + ], + "angle": 0, + "content": "4.3 FURTHER APPLICATIONS OF SGCONV" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.556, + 0.825, + 0.655 + ], + "angle": 0, + "content": "We further study SGConv as a generic network architecture drop-in component targeting tasks in language modeling and computer vision. In Section 4.3.1 we present an efficient mixture of attention and SGConv layers architecture that replaces half of the attention blocks in the Transformer with the SGConv blocks. We demonstrate the potential of utilizing such a model for long text processing. In Section 4.3.2, we incorporate SGConv (1D) into ConvNeXt (Liu et al., 2022). Surprisingly, SGConv achieves comparable or even better results compared to several SoTA CNN and Vision Transformer models by treating the 2D features as a 1D sequence." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.669, + 0.36, + 0.683 + ], + "angle": 0, + "content": "4.3.1 LANGUAGE TASKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.693, + 0.454, + 0.875 + ], + "angle": 0, + "content": "Language modeling. We propose the SGConv block (shown in Figure 6) which is similar to the Attention block in Transformer (Vaswani et al., 2017). SGConv block enjoys both \\( O(L\\log (L)) \\) time complexity and space complexity. We benchmark the inference time and GPU memory usage of both SGConv and Attention in Table 7. When the sequence length is 1024, SGConv block is \\( \\sim 2.1\\mathrm{X} \\) faster than the Attention block. For language modeling, we utilize the feature of SGConv to directly process the long sequences. The" + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.709, + 0.788, + 0.84 + ], + "angle": 0, + "content": "
ModelValid.Test
LSTM+Hebb.29.029.2
16L Transformer-XL-24.0
16L SGConv+SAAttn21.9022.83
Adaptive Input-18.7
S4-20.95
18L Transformer-XL-18.3
18L Transformer-XL*18.1618.75
18L SGConv+SAAttn18.1018.70
" + }, + { + "type": "table_caption", + "bbox": [ + 0.472, + 0.849, + 0.816, + 0.865 + ], + "angle": 0, + "content": "Table 4: Performance comparison on WikiText-103." + }, + { + "type": "page_footnote", + "bbox": [ + 0.172, + 0.884, + 0.644, + 0.924 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/HazyResearch/state-spaces \n\\(^{2}\\)https://github.com/HazyResearch/state-spaces/tree/307f11bba801d5734235a1791df1859f6ae0e367" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.105, + 0.49, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.272, + 0.489, + 0.312 + ], + "angle": 0, + "content": "(a) Illustration of SGConv and Transformer-XL style Short Attention used in language modeling task. SGConv directly processes the full length sequence." + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.129, + 0.822, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.285, + 0.825, + 0.31 + ], + "angle": 0, + "content": "(b) The depth to replace SAttention with SGConv vs. validation PPL on WikiText-103" + }, + { + "type": "table_caption", + "bbox": [ + 0.251, + 0.323, + 0.746, + 0.339 + ], + "angle": 0, + "content": "Figure 4: Incorporating SGConv to Transformer models in language tasks." + }, + { + "type": "table", + "bbox": [ + 0.252, + 0.363, + 0.747, + 0.423 + ], + "angle": 0, + "content": "
MNLI-m/mmQNLIQQPSSTCoLASTSAvg.
BERT84.93/84.9191.3491.0492.8855.1988.2984.08
SGConvBERT84.78/84.7091.2591.1892.5557.9288.4284.40
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.436, + 0.827, + 0.48 + ], + "angle": 0, + "content": "Table 5: Performance comparison of BERT and SGConvBERT on GLUE dataset. SGConvBERT is comparable with BERT while being more efficient. We exclude MRPC and RTE datasets in GLUE because their sizes are too small (\\(< 5K\\) training samples)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.517, + 0.828, + 0.786 + ], + "angle": 0, + "content": "Attention block only targets the short range data termed SAttention. We illustrate the structure in Figure 4a. Furthermore, we investigate the strategy to replace the Attention blocks with SGConv blocks. We generate 50 architectures with 8 SGConv blocks and 8 Attention blocks where the order is shuffled. We denote the average depth to replace the Attention blocks as: \\(\\sum_{i=0}^{N_{SGConv}} \\mathrm{idx}_i / N_{total}\\) where the idx denotes the \\(i\\)th SGConv depth position. \\(N_{SGConv} = 8\\) and \\(N_{total} = 16\\) in this case. The results in Figure 4b suggest that when fixing the number of SGConv layer, models achieve better performance by placing SGConv blocks in deeper layers. Guided by the strategy, we handcraft two Transformer-XL (Dai et al., 2019) style models. (1) 16-layer: \\(\\{\\mathrm{A}, \\mathrm{A}, \\mathrm{A}, \\mathrm{C}\\} \\times 2 + \\{\\mathrm{A}, \\mathrm{C}, \\mathrm{C}, \\mathrm{C}\\} \\times 2\\). (2) 18-layer: \\(\\{\\mathrm{A}, \\mathrm{A}, \\mathrm{C}\\} \\times 3 + \\{\\mathrm{A}, \\mathrm{C}, \\mathrm{C}\\} \\times 3\\). A denotes SAttention and C denotes SGConv. \\(\\times N\\) denotes repeating the order of layers for \\(N\\) times. We test the model on WikiText-103 (Merit et al., 2016) which is a wide-used language modeling benchmark with an average length of 3.6K tokens per article. We set both the attention and memory length to 384 for 18L model and 192 for 16L model. The length of input sequence is 3092 which can be processed by SGConv directly. We show the results in Table 4. Our results suggest that when the attention range is short, the 16L model outperforms the baseline with -1.17 perplexity. For the 18L model, our model achieves 18.70 perplexity. Note that we use a smaller and affordable batch size (16) for training. Under the same setting, our model gains slightly better perplexity than Transformer-XL (-0.05). Our results show the potential of adopting SGConv as part of the language model for long range language sequence processing." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Sentence classification. We combine the SGConv block with the BERT model (Devlin et al., 2018). Concretely, we utilize the 12-layer \\(\\{\\mathrm{A},\\mathrm{A},\\mathrm{C}\\} \\times 2 + \\{\\mathrm{A},\\mathrm{C},\\mathrm{C}\\} \\times 2\\) model. The pretraining is conducted on BooksCorpus (Zhu et al., 2015) and English Wikipedia (Foundation). We then fine-tune the model on the GLUE benchmark (Wang et al., 2019). To avoid the instability of fine-tuning on small datasets, we only test on tasks with more than \\(5K\\) training samples. We follow the training and fine-tuning pipeline of Ke et al. (2020) (BERT-A in Table 1 of Ke et al. (2020)) and report the average accuracy of 5 different random seeds. SGConvBERT achieves comparable performance to the original BERT model, while the SGConv layer is more efficient than the attention layer." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.104, + 0.49, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.104, + 0.822, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.279, + 0.825, + 0.31 + ], + "angle": 0, + "content": "Figure 5: Comparison of ImageNet-1k Top-1 accuracy with SoTA works. Left: Top-1 Accuracy vs. FLOPs. Right: Top-1 Accuracy vs. Throughputs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.34, + 0.399, + 0.354 + ], + "angle": 0, + "content": "4.3.2 IMAGE CLASSIFICATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.365, + 0.828, + 0.645 + ], + "angle": 0, + "content": "We also evaluate the adaptability of SGConv by applying it on large-scale image classification. We conduct experiments on ImageNet-1k (Deng et al., 2009) which consists of more than 1.28 million high-resolution training and 50,000 validation images. We replace the \\(7 \\times 7\\) 2D convolutional kernels with SGConvs in ConvNeXt (Liu et al., 2022) denoted as SGConvNeXt. The block designs of SGConvNeXt are shown in Figure 7. Note we train the SGConvNeXt-Tiny/Small/Base/Large using hyperparameter settings from ConvNeXt4 without any changes. By treating the 2D features as sequences, our SGConvNeXt achieves better results compared to existing SoTA methods such as EfficientNets (Tan & Le, 2019), Swin Transformers (Liu et al., 2021) (shown in Figure 5). Note that Vision Transformer (Dosovitskiy et al., 2020) and its variants (Touvron et al., 2021a;b; Yu et al., 2022) adopt patching techniques that can lead to a quadratic increase in complexity with image size. Also, patching is incompatible with dynamic input resolutions while SGConvNeXt processes the data globally. We list several interesting directions that can be explored for future work: 1) Optimization for the long-range convolution: we noticed that though FFT theoretically requires less FLOPs than plain convolution, the throughput drops empirically. One reason is that there is no optimized CUDA implementation for 1D long-range convolution and can be a good direction for future work. 2) Optimized hyperparameters and data augmentation methods: ConvNeXts' hyperparameters are tuned for maximum performance, which may not be ideal for SGConvNeXt. 3) SGConv for vision reasoning tasks: we show that SGConv is powerful for long-range synthetic reasoning tasks and large-scale classification tasks. It could be effective in visual reasoning applications such as Vision-Language Reasoning (Johnson et al., 2017; Zhu et al., 2020) with great potential." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.668, + 0.312, + 0.683 + ], + "angle": 0, + "content": "5 DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.703, + 0.827, + 0.925 + ], + "angle": 0, + "content": "In this paper, we attempt to answer the question of what makes convolutional models great again on long sequence modeling and summarize two principles contributing to the success. Based on the principles, we propose a simple and intuitive global convolutional model SGConv that has both direct implications and solid performance. Concurrent to our work there are also attempts to simplify the S4 model by restricting the state transition matrix to be diagonal (Gu et al., 2022a; Gupta, 2022). The proposal by Gu et al. (2022a) incorporates an intricate approach to parameterization and initialization schemes compared to our paper. Their method provides insights into the S4 phenomenon from a state-space-model perspective. Instead, we hope our simpler principles and non-SSM-based model can open up a direction for general audiences to understand and try global convolution as a general-purpose module for tackling long-range dependency. This potential has been shown in a very recent paper (Ma et al., 2022) concurrent to our work, where the authors incorporate an exponential moving average layer to a Transformer-like model and achieve promising performance over several long sequence modeling tasks. The exponential moving average layer is a particular type of global convolution layer that naturally satisfies our two principles. We believe that similar global convolutional modules will emerge in the future as long-range dependency becomes increasingly critical for sequence modeling." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.37, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.134, + 0.827, + 0.205 + ], + "angle": 0, + "content": "We extend our gratitude to the anonymous reviewers for dedicating their time and expertise to provide constructive feedback and suggestions, which significantly enhanced the quality of this paper. We also express our appreciation to the Program Chairs and Area Chairs for their careful review and valuable comments. Special thanks go to Sebastien Bubeck, Arturs Backurs, Gustavo de Rosa, Di He, and Cong 'Callie' Hao for their valuable suggestions and support." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.224, + 0.289, + 0.24 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.247, + 0.826, + 0.293 + ], + "angle": 0, + "content": "Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, et al. Tensorflow: Large-scale machine learning on heterogeneous distributed systems. arXiv preprint arXiv:1603.04467, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.298, + 0.826, + 0.342 + ], + "angle": 0, + "content": "Jinming Cao, Yangyan Li, Mingchao Sun, Ying Chen, Dani Lischinski, Daniel Cohen-Or, Baoquan Chen, and Changhe Tu. Do-conv: Depthwise over-parameterized convolutional layer. IEEE Transactions on Image Processing, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.348, + 0.7, + 0.365 + ], + "angle": 0, + "content": "Lei Chen. Deep Learning and Practice with MindSpore. Springer Nature, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.373, + 0.826, + 0.416 + ], + "angle": 0, + "content": "Tianqi Chen, Mu Li, Yutian Li, Min Lin, Naiyan Wang, Minjie Wang, Tianjun Xiao, Bing Xu, Chiyuan Zhang, and Zheng Zhang. Mxnet: A flexible and efficient machine learning library for heterogeneous distributed systems. arXiv preprint arXiv:1512.01274, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.424, + 0.826, + 0.453 + ], + "angle": 0, + "content": "Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.461, + 0.826, + 0.505 + ], + "angle": 0, + "content": "Zihang Dai, Zhilin Yang, Yiming Yang, William W Cohen, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.513, + 0.826, + 0.557 + ], + "angle": 0, + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.564, + 0.826, + 0.594 + ], + "angle": 0, + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.602, + 0.826, + 0.646 + ], + "angle": 0, + "content": "Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 1911-1920, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.653, + 0.826, + 0.697 + ], + "angle": 0, + "content": "Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13733-13742, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.704, + 0.826, + 0.748 + ], + "angle": 0, + "content": "Linhao Dong, Shuang Xu, and Bo Xu. Speech-transformer: a no-recurrence sequence-to-sequence model for speech recognition. In 2018 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5884-5888. IEEE, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.755, + 0.826, + 0.813 + ], + "angle": 0, + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.82, + 0.799, + 0.837 + ], + "angle": 0, + "content": "Wikipedia Foundation. Wikipedia downloads. URL https://dumps.wikipedia.org." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.826, + 0.887 + ], + "angle": 0, + "content": "Albert Gu, Tri Dao, Stefano Ermon, Atri Rudra, and Christopher Ré. Hippo: Recurrent memory with optimal polynomial projections. Advances in Neural Information Processing Systems, 33: 1474-1487, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces. arXiv preprint arXiv:2111.00396, 2021a." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.247, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Albert Gu, Isys Johnson, Karan Goel, Khaled Saab, Tri Dao, Atri Rudra, and Christopher Ré. Combining recurrent, convolutional, and continuous-time models with linear state space layers. Advances in neural information processing systems, 34:572-585, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.825, + 0.185 + ], + "angle": 0, + "content": "Albert Gu, Ankit Gupta, Karan Goel, and Christopher Ré. On the parameterization and initialization of diagonal state space models. arXiv preprint arXiv:2206.11893, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.193, + 0.825, + 0.235 + ], + "angle": 0, + "content": "Albert Gu, Isys Johnson, Aman Timalsina, Atri Rudra, and Christopher Ré. How to train your hippo: State space models with generalized orthogonal basis projections. arXiv preprint arXiv:2206.12037, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.244, + 0.825, + 0.288 + ], + "angle": 0, + "content": "John Guibas, Morteza Mardani, Zongyi Li, Andrew Tao, Anima Anandkumar, and Bryan Catanzaro. Efficient token mixing for transformers via adaptive fourier neural operators. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.296, + 0.825, + 0.338 + ], + "angle": 0, + "content": "Shuxuan Guo, Jose M Alvarez, and Mathieu Salzmann. Expandnets: Linear over-parameterization to train compact convolutional networks. Advances in Neural Information Processing Systems, 33:1298-1310, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.348, + 0.825, + 0.375 + ], + "angle": 0, + "content": "Ankit Gupta. Diagonal state spaces are as effective as structured state spaces. arXiv preprint arXiv:2203.14343, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.825, + 0.414 + ], + "angle": 0, + "content": "Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. Liquid structural state-space models. arXiv preprint arXiv:2209.12951, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.422, + 0.825, + 0.465 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.473, + 0.825, + 0.532 + ], + "angle": 0, + "content": "Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2901-2910, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.539, + 0.825, + 0.583 + ], + "angle": 0, + "content": "Kirthevasan Kandasamy, Willie Neiswanger, Jeff Schneider, Barnabas Poczos, and Eric P Xing. Neural architecture search with bayesian optimisation and optimal transport. Advances in neural information processing systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.591, + 0.825, + 0.62 + ], + "angle": 0, + "content": "Guolin Ke, Di He, and Tie-Yan Liu. Rethinking positional encoding in language pre-training. In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.628, + 0.825, + 0.671 + ], + "angle": 0, + "content": "Patrick Kidger, James Morrill, James Foster, and Terry Lyons. Neural controlled differential equations for irregular time series. Advances in Neural Information Processing Systems, 33:6696-6707, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.679, + 0.825, + 0.71 + ], + "angle": 0, + "content": "Junkyung Kim, Drew Linsley, Kalpit Thakkar, and Thomas Serre. Disentangling neural mechanisms for perceptual grouping. In International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.717, + 0.825, + 0.747 + ], + "angle": 0, + "content": "Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. In International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.755, + 0.825, + 0.784 + ], + "angle": 0, + "content": "Guihong Li, Yuedong Yang, Kartikeya Bhardwaj, and Radu Marculescu. Zico: Zero-shot nas via inverse coefficient of variation on gradients. arXiv preprint arXiv:2301.11300, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.793, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Yuhong Li, Cong Hao, Pan Li, Jinjun Xiong, and Deming Chen. Generic neural architecture search via regression. Advances in Neural Information Processing Systems, 34:20476-20490, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.83, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Ming Lin, Pichao Wang, Zhenhong Sun, Hesen Chen, Xiuyu Sun, Qi Qian, Hao Li, and Rong Jin. Zen-nas: A zero-shot nas for high-performance image recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 347-356, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Drew Linsley, Junkyung Kim, Vijay Veerabadran, Charles Windolf, and Thomas Serre. Learning long-range spatial dependencies with horizontal gated recurrent units. Advances in neural information processing systems, 31, 2018." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.826, + 0.249 + ], + "angle": 0, + "content": "Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer. Mega: Moving average equipped gated attention. arXiv preprint arXiv:2209.10655, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.258, + 0.826, + 0.288 + ], + "angle": 0, + "content": "Yanjun Ma, Dianhai Yu, Tian Wu, and Haifeng Wang. Paddlepaddle: An open-source deep learning platform from industrial practice. Frontiers of Data and Computing, 1(1):105-115, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.296, + 0.826, + 0.325 + ], + "angle": 0, + "content": "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.333, + 0.826, + 0.377 + ], + "angle": 0, + "content": "Aaron van den Oord, Sander Dieleman, Heiga Zen, Karen Simonyan, Oriol Vinyals, Alex Graves, Nal Kalchbrenner, Andrew Senior, and Koray Kavukcuoglu. Wavenet: A generative model for raw audio. arXiv preprint arXiv:1609.03499, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.384, + 0.826, + 0.428 + ], + "angle": 0, + "content": "Hao Peng, Nikolaos Pappas, Dani Yogatama, Roy Schwartz, Noah Smith, and Lingpeng Kong. Random feature attention. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=QtTKTdVrFBB." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.437, + 0.826, + 0.48 + ], + "angle": 0, + "content": "Zhen Qin, Weixuan Sun, Hui Deng, Dongxu Li, Yunshen Wei, Baohong Lv, Junjie Yan, Lingpeng Kong, and Yiran Zhong. cosformer: Rethinking softmax in attention. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.488, + 0.826, + 0.517 + ], + "angle": 0, + "content": "Yongming Rao, Wenliang Zhao, Zheng Zhu, Jiwen Lu, and Jie Zhou. Global filter networks for image classification. Advances in Neural Information Processing Systems, 34:980-993, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.525, + 0.826, + 0.568 + ], + "angle": 0, + "content": "Esteban Real, Alok Aggarwal, Yanping Huang, and Quoc V Le. Regularized evolution for image classifier architecture search. In Proceedings of the aaai conference on artificial intelligence, volume 33, pp. 4780-4789, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.577, + 0.826, + 0.621 + ], + "angle": 0, + "content": "David W Romero, Robert-Jan Bruintjes, Jakub Mikolaj Tomczak, Erik J Bekkers, Mark Hoogendoorn, and Jan van Gemert. Flexconv: Continuous kernel convolutions with differentiable kernel sizes. In International Conference on Learning Representations, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.628, + 0.826, + 0.672 + ], + "angle": 0, + "content": "David W Romero, Anna Kuzina, Erik J Bekkers, Jakub Mikolaj Tomczak, and Mark Hoogendoorn. Ckconv: Continuous kernel convolution for sequential data. In International Conference on Learning Representations, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.68, + 0.826, + 0.723 + ], + "angle": 0, + "content": "David W Romero, David M Knigge, Albert Gu, Erik J Bekkers, Efstratios Gavves, Jakub M Tomczak, and Mark Hoogendoorn. Towards a general purpose cnn for long range dependencies in nd. arXiv preprint arXiv:2206.03398, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.731, + 0.826, + 0.761 + ], + "angle": 0, + "content": "Tim Salimans and Durk P Kingma. Weight normalization: A simple reparameterization to accelerate training of deep neural networks. Advances in neural information processing systems, 29, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.769, + 0.826, + 0.813 + ], + "angle": 0, + "content": "Uri Shaham, Elad Segal, Maor Ivgi, Avia Efrat, Ori Yoran, Adi Haviv, Ankit Gupta, Wenhan Xiong, Mor Geva, Jonathan Berant, and Omer Levy. Scrols: Standardized comparison over long language sequences, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.82, + 0.826, + 0.85 + ], + "angle": 0, + "content": "Jimmy TH Smith, Andrew Warrington, and Scott W Linderman. Simplified state space layers for sequence modeling. arXiv preprint arXiv:2208.04933, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.858, + 0.826, + 0.888 + ], + "angle": 0, + "content": "Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pp. 6105-6114. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Y Tay, D Bahri, D Metzler, D Juan, Z Zhao, and C Zheng. Synthesizer: Rethinking self-attention in transformer models. arxiv 2020. arXiv preprint arXiv:2005.00743, 2, 2020a." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Yi Tay, Mostafa Dehghani, Samira Abnar, Yikang Shen, Dara Bahri, Philip Pham, Jinfeng Rao, Liu Yang, Sebastian Ruder, and Donald Metzler. Long range arena: A benchmark for efficient transformers. arXiv preprint arXiv:2011.04006, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In International Conference on Machine Learning, pp. 10347-10357. PMLR, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.826, + 0.251 + ], + "angle": 0, + "content": "Hugo Touvron, Matthieu Cord, Alexandre Sablayrolles, Gabriel Synnaeve, and Hervé Jégou. Going deeper with image transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 32-42, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.258, + 0.826, + 0.302 + ], + "angle": 0, + "content": "Aaron Van den Oord, Nal Kalchbrenner, Lasse Espeholt, Oriol Vinyals, Alex Graves, et al. Conditional image generation with pixelCNN decoders. Advances in neural information processing systems, 29, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.31, + 0.826, + 0.354 + ], + "angle": 0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.362, + 0.826, + 0.405 + ], + "angle": 0, + "content": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. 2019. In the Proceedings of ICLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.413, + 0.826, + 0.443 + ], + "angle": 0, + "content": "Sinong Wang, Belinda Z. Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.451, + 0.826, + 0.481 + ], + "angle": 0, + "content": "Pete Warden. Speech commands: A dataset for limited-vocabulary speech recognition. arXiv preprint arXiv:1804.03209, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.488, + 0.826, + 0.532 + ], + "angle": 0, + "content": "Wenting Ye, Hongfei Yang, Shuai Zhao, Haoyang Fang, Xingjian Shi, and Naveen Neppalli. A transformer-based substitute recommendation model incorporating weakly supervised customer behavior data. arXiv preprint arXiv:2211.02533, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.54, + 0.826, + 0.584 + ], + "angle": 0, + "content": "Weihao Yu, Mi Luo, Pan Zhou, Chenyang Si, Yichen Zhou, Xinchao Wang, Jiashi Feng, and Shuicheng Yan. Metaformer is actually what you need for vision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10819-10829, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.592, + 0.826, + 0.622 + ], + "angle": 0, + "content": "Sergey Zagoruyko and Nikos Komodakis. Diracnets: Training very deep neural networks without skip-connections. arXiv preprint arXiv:1706.00388, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.63, + 0.826, + 0.673 + ], + "angle": 0, + "content": "Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontonon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. Big bird: Transformers for longer sequences. Advances in Neural Information Processing Systems, 33:17283-17297, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.681, + 0.826, + 0.725 + ], + "angle": 0, + "content": "Fengda Zhu, Yi Zhu, Xiaojun Chang, and Xiaodan Liang. Vision-language navigation with self-supervised auxiliary reasoning tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10012-10022, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.733, + 0.826, + 0.79 + ], + "angle": 0, + "content": "Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision, pp. 19-27, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.798, + 0.826, + 0.843 + ], + "angle": 0, + "content": "Barret Zoph, Vijay Vasudevan, Jonathon Shlens, and Quoc V Le. Learning transferable architectures for scalable image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8697-8710, 2018." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.843 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.517, + 0.119 + ], + "angle": 0, + "content": "A DETAILED EXPERIMENTAL RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.156, + 0.373, + 0.17 + ], + "angle": 0, + "content": "A.1 LONG RANGE ARENA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.828, + 0.378 + ], + "angle": 0, + "content": "Here we report the detailed implementation of the LRA experiments. We use the concatenation style combination of sub-kernels in all experiments and mildly tune the dimension of each scale. Since the SGConv exhibits a strong ability to fit data, we slightly increase the dropout for some tasks to prevent overfitting. Table 6 lists the detailed hyperparameters used in LRA. In most experiments, we set \\(\\alpha\\) to \\(1/2\\), which approximately decays in speed \\(1 / pos\\). Experiments on flattened 2D images require some special modification of the kernel. We hypothesize that it is because images require more subtle inductive bias. For the experiment on the Image dataset, we use the disentangled version of parameterization and combination weights as described in Section 4.1.2 and set the decay speed to be \\(1 / pos\\). For the experiment on the Pathfinder-X task, we initialize convolution kernels in different channels with cosine waves with different frequencies and randomly assign \\(\\alpha\\) ranging from 1 to \\(1/3\\) to different channels. Both these modifications bring about \\(1\\%\\) improvement compared to standard fixed \\(\\alpha = 1/2\\) and random initialization. The remaining hyperparameters and experimental settings are same to Gu et al. (2022a) which can be found in the Github repo1." + }, + { + "type": "table", + "bbox": [ + 0.278, + 0.42, + 0.723, + 0.487 + ], + "angle": 0, + "content": "
ListOpsTextRetrievalImagePathfinderPath-X
Acc.61.4589.2091.1187.9795.4697.83
Scale dim.121323264
Dropout0000.20.20
" + }, + { + "type": "table_caption", + "bbox": [ + 0.323, + 0.495, + 0.673, + 0.512 + ], + "angle": 0, + "content": "Table 6: Hyperparameters used in LRA experiments." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.568, + 0.357, + 0.582 + ], + "angle": 0, + "content": "A.2 SPEECH COMMAND" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.607, + 0.827, + 0.651 + ], + "angle": 0, + "content": "For Speech Command 10-class task, we use the same training setting from Gu et al. (2021a) earlier version Github repo\\(^2\\). For Speech Command 35-class task, we use the training setting from the Github repo\\(^1\\). The scale dimension of SGConv is 32." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.702, + 0.342, + 0.715 + ], + "angle": 0, + "content": "A.3 LANGUAGE TASK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.741, + 0.827, + 0.827 + ], + "angle": 0, + "content": "Our implementation for Language Task is based on the project \\(^{3}\\). For the 16-L model, we utilize 3072 as the sequence length for SGCONV and 192 as both the attention and memory length for SAAttention. For the 18-L model, we utilize 3072 as the sequence length for SGCONV and 384 as both the attention and memory length for SAAttention. The SGConv has 96 as the scale dimension. We adopt the training settings from the above mentioned project 3 except the batch size which is reduced to 64. The SGConv block is shown in Figure 4." + }, + { + "type": "page_footnote", + "bbox": [ + 0.172, + 0.897, + 0.794, + 0.926 + ], + "angle": 0, + "content": "3https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.101, + 0.572, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.416, + 0.317, + 0.584, + 0.334 + ], + "angle": 0, + "content": "Figure 6: SGConv block" + }, + { + "type": "table", + "bbox": [ + 0.276, + 0.361, + 0.723, + 0.466 + ], + "angle": 0, + "content": "
256512102420483072
Attn. BlockInf. (ms/batch)2.67.323.291.7X
Mem. (GB)2.63.97.923.9OOM
SGConv BlockInf. (ms/batch)2.75.410.921.843.6
Mem. (GB)2.63.45.28.715.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.475, + 0.825, + 0.506 + ], + "angle": 0, + "content": "Table 7: Comparison of inference time and GPU memory utilization with Attention blocks. SGConv has significantly less memory usage and faster inference speed when the sequence increases." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.53, + 0.388, + 0.543 + ], + "angle": 0, + "content": "A.4 IMAGE CLASSIFICATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.555, + 0.825, + 0.626 + ], + "angle": 0, + "content": "We use the training settings in the work Liu et al. (2022)\\(^4\\). Since the SGConvNeXt has several downsampling layers, we fixed the scale to 5 and the scale dimensions are calculated based on the flattened features length of the corresponding layers. The structure is shown in Figure 7. The results are shown in Table 8. The visualization of the SGConvNeXt-Base outputs are shown in Figure 9. The visualization of the SGConv kernels at different stages are shown in Figure 10." + }, + { + "type": "image", + "bbox": [ + 0.269, + 0.638, + 0.45, + 0.861 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.638, + 0.711, + 0.861 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.421, + 0.885, + 0.579, + 0.901 + ], + "angle": 0, + "content": "Figure 7: SGConvnext" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.591, + 0.925 + ], + "angle": 0, + "content": "4https://github.com/facebookresearch/ConvNeXt" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.188, + 0.106, + 0.487, + 0.23 + ], + "angle": 0, + "content": "
modelFLOPsthroughput (image/s)paramsAcc.
Swin-T4.5G944.529M81.3
Swin-S8.7G576.850M83.0
Swin-B15.4G433.488M83.5
Swin-B384247.0G134.688M84.5
ConvNeXt-T4.5G1252.629M82.1
ConvNeXt-S8.7G801.450M83.1
ConvNeXt-B15.4G588.389M83.8
ConvNeXt-L34.4G349.8198M84.3
" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.102, + 0.818, + 0.236 + ], + "angle": 0, + "content": "
modelFLOPsthroughput (image/s)paramsAcc.
EffNet-B330021.8G693.912M81.6
EffNet-B438024.2G341.519M82.9
EffNet-B545629.9G223.530M83.6
EffNet-B6528219.0G91.543M84.0
EffNet-B7600237.0G52.966M84.3
SGConvNeXt-T4.3G872.629M82.0
SGConvNeXt-S8.3G565.351M83.4
SGConvNeXt-B14.6G417.990M83.9
SGConvNeXt-L32.5G256.7200M84.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.259, + 0.245, + 0.738, + 0.261 + ], + "angle": 0, + "content": "Table 8: Comparison of ImageNet-1k Top-1 accuracy with SoTA works." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.288, + 0.455, + 0.304 + ], + "angle": 0, + "content": "B DETAILED IMPLEMENTATION" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.32, + 0.482, + 0.334 + ], + "angle": 0, + "content": "B.1 ILLUSTRATION OF SGCONV MODULE" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.355, + 0.816, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.497, + 0.825, + 0.539 + ], + "angle": 0, + "content": "Figure 8: Implementing SGConv with FFT. We first compute the convolutional kernels for each channel as described in Section 3.2, and apply the depth-wise global convolution to the input features." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.566, + 0.435, + 0.58 + ], + "angle": 0, + "content": "B.2 PYTHON STYLE PSEUDO-CODE" + }, + { + "type": "code", + "bbox": [ + 0.172, + 0.592, + 0.709, + 0.913 + ], + "angle": 0, + "content": "# Parameters \nkernel-param_list = [] # w_i \nfor _ in range(num_scales): \n kernel-param_list.append(\n nnParameter(torch.random(hidden_dim, kernel_dim)) # size: h * d \n# Compute global convolution kernel \nkernel_list = [] # k_i \nfor i in range(num_scales): \n kernel = F.interpolate(\n kernel-param_list[i],\n scale_factor = 2**max(0, i-1),\n mode = \"linear\"\n ) * 0.5 ** i # alpha = 0.5 \n kernel_list.append(kernel) \n# The computed kernel, size: h * (d * 2^{s-1}) \nk = torch.cat(kernel_list, dim=-1) \n#Normalize kernel \nif is_init: # Compute the norm at initialization \nkernel_norm = k(norm(dim=-1, keepdim=True).detach() \nk = k / kernel_norm" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code", + "bbox": [ + 0.171, + 0.104, + 0.674, + 0.274 + ], + "angle": 0, + "content": "Use kernel to compute global convolution \n#x:batch_size \\* hidden_dim \\* seq_len \nL \\(=\\) x.size(-1) \n#Truncate kernel if it is too long \nk \\(=\\) k[., :L] \n# Use FFT to compute convolution \nx_f \\(=\\) torch.fft.rfft(x, n=2*L) \nk_f \\(=\\) torch.fft.rfft(k, n=2*L) \ny_f \\(=\\) torch.einsum(\"b h l,h l-> b h l\",x_f,k_f) \n#Inverse FFT to get the result \ny \\(=\\) torch.fft.irfft(y_f, n=2*L)[...,:L]" + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.296, + 0.277, + 0.312 + ], + "angle": 0, + "content": "Input" + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.316, + 0.303, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.342, + 0.296, + 0.41, + 0.313 + ], + "angle": 0, + "content": "Stage 0" + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.317, + 0.426, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.47, + 0.296, + 0.536, + 0.313 + ], + "angle": 0, + "content": "Stage 1" + }, + { + "type": "image", + "bbox": [ + 0.455, + 0.317, + 0.548, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.594, + 0.296, + 0.661, + 0.313 + ], + "angle": 0, + "content": "Stage 2" + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.317, + 0.672, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.718, + 0.296, + 0.785, + 0.313 + ], + "angle": 0, + "content": "Stage 3" + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.317, + 0.795, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.41, + 0.301, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.41, + 0.425, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.455, + 0.41, + 0.548, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.41, + 0.672, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.41, + 0.795, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.503, + 0.301, + 0.574 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.503, + 0.425, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.455, + 0.503, + 0.548, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.503, + 0.672, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.503, + 0.795, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.187, + 0.6, + 0.809, + 0.616 + ], + "angle": 0, + "content": "Figure 9: Visualization of the intermediate features of SGConvNeXt on ImageNet-1k dataset." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.304, + 0.102, + 0.693, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.388, + 0.263, + 0.61, + 0.276 + ], + "angle": 0, + "content": "(a) Visulization of kernels at Stage 0." + }, + { + "type": "image", + "bbox": [ + 0.304, + 0.278, + 0.693, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.432, + 0.438, + 0.566, + 0.451 + ], + "angle": 0, + "content": "(b) Kernels at Stage 1." + }, + { + "type": "image", + "bbox": [ + 0.305, + 0.453, + 0.693, + 0.607 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.433, + 0.615, + 0.566, + 0.629 + ], + "angle": 0, + "content": "(c) Kernels at Stage 2." + }, + { + "type": "image", + "bbox": [ + 0.305, + 0.63, + 0.692, + 0.779 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.432, + 0.787, + 0.566, + 0.801 + ], + "angle": 0, + "content": "(d) Kernels at Stage 3." + }, + { + "type": "image_caption", + "bbox": [ + 0.313, + 0.812, + 0.684, + 0.828 + ], + "angle": 0, + "content": "Figure 10: Kernels in SGConvNeXt at different stages." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.86, + 0.724, + 0.875 + ], + "angle": 0, + "content": "C NEURAL ARCHITECTURE SEARCH PERSPECTIVE OF SGCONV" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Neural architecture search (NAS) is an automated process for discovering a neural network's optimal architecture or structure for a particular task. NAS typically involves searching through a large" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.104, + 0.825, + 0.284 + ], + "angle": 0, + "content": "space of possible network architectures using combination algorithms, such as reinforcement learning (Zoph et al., 2018), evolutionary algorithms (Real et al., 2019), or Bayesian optimization (Kandasamy et al., 2018). In recent years, there has been a proliferation of research aimed at designing traditional convolutional neural networks with local convolution (Li et al., 2021; Lin et al., 2021; Li et al., 2023). These works primarily focus on optimizing the networks' structures to improve their performance. From the perspective of NAS, the SGConv can be interpreted as a kernel-level fine-grained search for the distribution of parameters by utilizing parameterization. Furthermore, the SGConv has shown that the global convolution kernel exhibits sparsity and can be pruned (Fig. 10), meaning that the effective kernel length can be automatically determined through the training phase. These findings can potentially spark further research and development in the field. Another simple approach we explore in NAS is the combination of Attention and SGConv through a mixture model (Section 4.3.1). This approach is both intuitive and efficient and has the potential to improve the performance of neural network architectures further." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ] +] \ No newline at end of file diff --git a/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_origin.pdf b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bfdcce71c59f7458ea9ff281911499204a0187f0 --- /dev/null +++ b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/33760ea2-7ca5-43be-a157-6f11d24d15b1_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4290ca975190575a3f7e44741f43d3e23a9bbc5fdf5eb12d44d6c6607ef163c +size 1253279 diff --git a/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/full.md b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/full.md new file mode 100644 index 0000000000000000000000000000000000000000..fc28c1592a611bda72daef880e14334f1280c77b --- /dev/null +++ b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/full.md @@ -0,0 +1,382 @@ +# WHAT MAKES CONVOLUTIONAL MODELS GREAT ON LONG SEQUENCE MODELING? + +Yuhong Li $^{1*}$ Tianle Cai $^{2*}$ Yi Zhang $^{3}$ Deming Chen $^{1}$ Debadeepta Dey $^{3}$ + +1University of Illinois Urbana-Champaign, 2Princeton University, 3Microsoft Research. + +# ABSTRACT + +Convolutional models have been widely used in multiple domains. However, most existing models only use local convolution, making the model unable to handle long-range dependency efficiently. Attention overcomes this problem by aggregating global information based on the pair-wise attention score but also makes the computational complexity quadratic to the sequence length. Recently, Gu et al. (2021a) proposed a model called S4 inspired by the state space model. S4 can be efficiently implemented as a global convolutional model whose kernel size equals the input sequence length. With Fast Fourier Transform, S4 can model much longer sequences than Transformers and achieve significant gains over SoTA on several long-range tasks. Despite its empirical success, S4 is involved. It requires sophisticated parameterization and initialization schemes that combine the wisdom from several prior works. As a result, S4 is less intuitive and hard to use for researchers with limited prior knowledge. Here we aim to demystify S4 and extract basic principles that contribute to the success of S4 as a global convolutional model. We focus on the structure of the convolution kernel and identify two critical but intuitive principles enjoyed by S4 that are sufficient to make up an effective global convolutional model: 1) The parameterization of the convolutional kernel needs to be efficient in the sense that the number of parameters should scale sub-linearly with sequence length. 2) The kernel needs to satisfy a decaying structure that the weights for convolving with closer neighbors are larger than the more distant ones. Based on the two principles, we propose a simple yet effective convolutional model called Structured Global Convolution (SGConv). SGConv exhibits strong empirical performance over several tasks: 1) With faster speed, SGConv surpasses the previous SoTA on Long Range Arena and Speech Command datasets. 2) When plugging SGConv into standard language and vision models, it shows the potential to improve both efficiency and performance. + +# 1 INTRODUCTION + +Handling Long-Range Dependency (LRD) is a key challenge in long-sequence modeling tasks such as time-series forecasting, language modeling, and pixel-level image generation. Unfortunately, standard deep learning models fail to solve this problem for different reasons: Recurrent Neural Network (RNN) suffers from vanishing gradient, Transformer has complexity quadratic in the sequence length, and Convolutional Neural Network (CNN) usually only has a local receptive field in each layer. + +A recently proposed benchmark called Long-Range Arena (LRA) (Tay et al., 2020b) reveals that all existing models perform poorly in modeling LRD. Notably, on one spatial-level sequence modeling task called Pathfinder-X from LRA, all models fail except a new Structured State Space sequence model (S4) (Gu et al., 2021a). The S4 model is inspired by the state space model widely used in control theory and can be computed efficiently with a special parameterization based on the Cauchy kernel. The exact implementation of the S4 model can be viewed as a (depthwise) global convolutional model with an involved computation global convolution kernel. Thanks to the global receptive field of the convolution kernel, S4 is able to handle tasks that require LRD, such as Pathfinder (Linsley et al., 2018; Tay et al., 2020b), where classic local CNNs fail (Linsley et al., 2018; Kim et al., + +2019). Also, the use of Fast Fourier Transform (FFT) and techniques from numerical linear algebra make the computational complexity of S4 tractable compared to the quadratic complexity of attention. Together, S4 shows the potential of global convolutional models to model LRD and advances the SoTA on LRA. + +Despite its accomplishments, the delicate design of S4 makes it unfriendly even to knowledgeable researchers. In particular, the empirical success of S4 relies on 1) A Diagonal Plus Low Rank (DLPR) parameterization whose efficient implementation requires several numerical linear algebra tricks, 2) An initialization scheme based on the HiPPO matrix derived in prior work (Gu et al., 2020). Therefore, aiming to reduce the complications of the model and highlight minimal principles, we raise the following questions: + +What contributes to the success of the S4 model? Can we establish a simpler model based on minimal principles to handle long-range dependency? + +To answer these questions, we focus on the design of the global convolution kernel. We extract two simple and intuitive principles that contribute to the success of the S4 kernel. The first principle is that the parameterization of the global convolution kernel should be efficient in terms of the sequence length: the number of parameters should scale slowly with the sequence length. For example, classic CNNs use a fixed kernel size. S4 also uses a fixed number of parameters to compute the convolution kernel while the number is greater than classic CNNs. Both models satisfy the first principle as the number of parameters does not scale with input length. The efficiency of parameterization is also necessary because the naive implementation of a global convolution kernel with the size of sentence length is intractable for inputs with thousands of tokens. Too many parameters will also cause overfitting, thus hurting the performance. The second principle is the decaying structure of the convolution kernel, meaning that the weights for convolving with closer neighbors are larger than the more distant ones. This structure appears ubiquitously in signal processing, with the well-known Gaussian filter as an example. The intuition is clear that closer neighbors provide a more helpful signal. S4 inherently enjoys this decaying property because of the exponential decay of the spectrum of matrix powers (See Figure 2), and we find this inductive bias improves the model performance (See Section 4.1.2). + +We show that these two principles are sufficient for designing a global convolutional model that captures LRD well. To verify this, we introduce a class of global convolution kernels with a simple multiscale structure, as shown in Figure 1. Specifically, we compose the convolution kernel by a sequence of sub-kernels of increasing sizes, yet every sub-kernel is upsampled from the same number of parameters. This parameterization ensures that the number of parameters only scales logarithmically to the input length, which satisfies the first principle. In addition, we add a decaying weight to each scale during the combination step and fulfill the second principle. We named our methods as Structural Global Convolution kernels (SGConv). Empirically, SGConv improves S4 by more than $1\%$ and achieves SoTA results on the LRA benchmark. On Speech Command datasets, SGConv achieves comparative results in the ten-class classification task and significantly better results in the 35-class classification task upon previous SoTA. We further show + +that SGConv is more efficient than S4 and can be used as a general purpose module in different domains. For example, a hybrid model of classic attention and SGConv shows promising performance + +![](images/b246c359bb838516f889939657ee626ef6d989ac90c4f8fd5d617971fba8bace.jpg) +Figure 1: Illustration of the parameterization used in SGConv (Eq. (1)). The convolution kernel is composed of multi-scale sub-kernels. Parameterization Efficiency. Every larger sub-kernel doubles the size of the previous sub-kernel while the same number of parameters are used for every scale, ensuring a logarithmic dependency of the number of parameters to the input length. Decaying. We use a weighted combination of sub-kernels where the weights are decaying, and smaller weights are assigned to larger scales. + +on both autoregressive language modeling and sentence classification tasks, replacing the 2D convolution kernel of the ConvNext model with 1D SGConv matches the performance of the original model. + +# 2 RELATED WORK + +Efficient Transformers. The Transformer architecture (Vaswani et al., 2017) has been successful across a wide range of applications (Dosovitskiy et al., 2020; Liu et al., 2021; Dong et al., 2018; Ye et al., 2022) in machine learning. However, the computation and memory complexity of Transformer scales quadratically with the input length, making it intractable for modeling long-range interactions in very long sequences. Therefore, several efficient variants of Transformer model have been proposed recently to overcome this issue (Child et al., 2019; Wang et al., 2020; Kitaev et al., 2019; Zaheer et al., 2020; Tay et al., 2020a; Peng et al., 2021; Qin et al., 2021). Nevertheless, few of these methods performed well on benchmarks such as Long Range Arena (Tay et al., 2020b), SCROLLS (Shaham et al., 2022), which require long-range modeling ability. + +(Re-)parameterization. Parameterization is a crucial but underrated part of architecture design because different parameterizations usually provide different inductive biases. For example, weight normalization (Salimans & Kingma, 2016) parameterizes the norm and direction of the weight matrices separately and thus reaches faster convergence. On the other hand, Zagoruyko & Komodakis (2017) proposed a Dirac weight re-parameterization to train deep networks without explicit skip-connections and matched the performance of ResNets (He et al., 2016). In computer vision, several works explored using structural re-parameterization to create 2D convolution kernels. Most of these works (Ding et al., 2019; Guo et al., 2020; Ding et al., 2021; Cao et al., 2022) are limited to the vision domain and utilize only short-range convolution kernels (e.g., $7 \times 7$ ) except for the line of work based on 2D Fourier operators (Rao et al., 2021; Guibas et al., 2021) and the line of work based on continuous convolutional kernel (Romero et al., 2021b;a; 2022). Our SGConv kernel is a special parameterization of global convolution kernels that tackles LRD and showcases the extensibility of re-parameterized kernels. + +State Space Models. The state space model (SSM) uses a set of linear differential equations to model physical systems with input, output, and state variables. It is widely used in control, neuroscience, and statistics. Recently, Gu et al. (2021b) introduced a deep SSM-based model that can outperform prior approaches on several long sequence modeling tasks with a specially structured state transition matrix. However, the expensive computation and memory requirements make it impractical. A followup work of Gu et al. (2021b) proposed a new parameterization of SSM (Gu et al., 2021a), which decomposes the state transition matrix into the sum of low-rank and normal matrices and implements SSM as a global convolutional model. Under this parameterization, the authors then combine the techniques of diagonalizing the Cauchy kernel and performing low-rank corrections with the Woodbury identity to compute the global convolution kernel. While achieving promising results, S4 is theoretically involved and practical implementations of S4 require accelerator-specific dedicated code optimization for the Cauchy kernel computation. This makes it difficult to readily implement in deep learning frameworks (Abadi et al., 2016; Chen et al., 2015; Chen, 2021; Ma et al., 2019) and hardware targets. Concurrent with this work, many state-space-based models are emerging and bringing better performance (Gu et al., 2022a; Smith et al., 2022; Hasani et al., 2022). + +# 3 DESIGN OF GLOBAL CONVOLUTIONAL MODELS + +We summarize the design principles that enable the global convolutional model to be both efficient and effective. Then we introduce the proposed Structured Global Convolution (SGConv) based on the highlighted principles. + +# 3.1 DESIGN PRINCIPLES + +The two intuitive design principles that contribute to the success of S4 are efficient parameterization and decaying structure. + +![](images/4691e8575ba409b19c933a56ae5b9ff34a9b74295b23b8ccb413b87c23f99425.jpg) +(a) Pathfinder-X + +![](images/61d2227122cdc75f65e5059b4262264ada45e61689a290a23bdcdd4de34daf8a.jpg) +(b) SC-10 +Figure 2: Visualization of S4 kernels on (a) Pathfinder-X and (b) Speech Command 10-class. The values in the convolution kernel exhibit a decaying behavior. We only plot the first 4096 positions for better illustration. + +Efficient Parameterization. Different from local convolution, where the kernel size is fixed, global convolution requires a kernel size that is the same as the sentence length. Naive parameterization of convolution kernel as classic local convolutions is therefore intractable for long sequences. For instance, the Pathfinder-X task has a length of $16K$ . It then impractically requires $4M$ parameters for a single layer to model the depth-wise global convolution kernel with a standard channel size of 256. Thus, an efficient convolution kernel parameterization is necessary, especially when the sentence is extremely long. For example, S4 takes a well-designed Normal Plus Low-Rank (NPLR) parameterization to model the whole kernel with two special matrices where the number of parameters is fixed. + +Decaying Structure. Apart from the efficiency of the parameterization, we find that a decaying structure of the convolution kernel provides a good inductive bias to long-sequence modeling and contributes to the performance (See Section 4.1.2 for detailed ablation study). Concretely, the magnitude of the value in the convolution kernel should decay so that more weight is assigned to the close neighbors. S4 model inherently satisfies this property because the $k$ -th element of the kernel of S4 is $\mathbf{C}\mathbf{A}^k\mathbf{B}$ and the operator norm of the power of a matrix decays exponentially: + +Fact 1. For a square matrix $\mathbf{A}$ , the operator norm $\left\| \mathbf{A}^k \right\|_2 \leq \left\| \mathbf{A} \right\|_2^k$ . In particular, if $\left\| \mathbf{A} \right\|_2 < 1$ , $\left\| \mathbf{A}^k \right\|_2$ decays exponentially to $k$ , so $\left\| \mathbf{C} \mathbf{A}^k \mathbf{B} \right\|_2 \leq \left\| \mathbf{C} \right\|_2 \left\| \mathbf{A}^k \right\|_2 \left\| \mathbf{B} \right\|_2$ also decays exponentially. + +We can also directly observe the decaying structure of S4 in different tasks in Figure 2. + +# 3.2 SGCONV + +Putting the two principles altogether, we propose a simple global depth-wise convolution, dubbed Structured Global Convolution (SGConv), based on multiscale sub-kernels and weighted combinations. (See Figure 1). We will first introduce the parameterization of the convolutional kernel and then present how to build a global convolutional model with this kernel. + +Parameterization of SGConv Kernel. Formally, let $L$ be the length of the input sequence, the convolutional kernel should also has length $L$ . We define the parameter set of a single channel as $S = \{\mathbf{w}_i|0\leq i < \left[\log_2\left(\frac{L}{d}\right)\right] + 1\}$ where $\mathbf{w}_i\in \mathbb{R}^d$ is the parameter for $i$ -th sub-kernel $k_{i}$ , and $d$ is the dimension of the parameter. Denote the number of scales $N = \left[\log_2\left(\frac{L}{d}\right)\right] + 1$ . We use the upsample operation, implemented as linear interpolation, to form sub-kernels of different scales. We use Upsample $_l(\mathbf{x})$ to denote upsampling $\mathbf{x}$ to length $l$ (We use F. interpolate function in Pytorch and set the mode to be linear in our implementation). We also introduce a normalization constant $Z$ to ensure the convolution operation will not change the scale of the input and a coefficient $\alpha$ to control the decaying speed. Now, we are ready to introduce the weighted combination scheme by concatenating a set of weighted sub-kernels $k_{i}$ : + +$$ +\operatorname {C a t} (S) = \frac {1}{Z} [ k _ {0}, k _ {1}, \dots , k _ {N - 1} ], \text {w h e r e} k _ {i} = \alpha^ {i} \operatorname {U p s a m p l e} _ {2 \max [ i - 1, 0 ] d} (\mathbf {w} _ {i}). \tag {1} +$$ + +
ModelListOpsTextRetrievalImagePathfinderPath-XAvg.
Transformer36.3764.2757.4642.4471.40X54.39
Sparse Trans.17.0763.5859.5944.2471.71X51.24
Linformer35.7053.9452.2738.5676.34X51.36
Reformer37.2756.1053.4038.0768.50X50.67
BigBird36.0564.0259.2940.8374.87X55.01
S4 (original)58.3576.0287.0987.2686.0588.1080.48
S4 (Gu et al., 2022b)59.6086.8290.9088.6594.2096.3586.09
SGConv61.4589.2091.1187.9795.4697.8387.17
+ +Table 1: The performance of SGConv compared to other baselines on the LRA dataset. SGConv achieves significant improvement compared to previous methods with a more straightforward structure and faster speed (See Table 2) + +It is easy to check that $\operatorname{Cat}(S)$ gives the convolution kernel with length $\sum_{i=0}^{N} 2^{\max[i-1,0]} d = 2^{N-1} d \geq L$ (See Figure 1 for an illustration), which can be truncated to $L$ if it is overlength. And the number of parameters is $Nd = O(\log L)$ . The decay coefficient $\alpha$ , usually chosen to be $1/2$ , induces the decaying structure. + +Incorporate SGConv to Modern Architectures. In the implementation, we compute the depthwise convolution kernel and use Fast Fourier Transform to compute the convolution in $O(L \log L)$ time (See Figure 8 for detailed illustration). We compute the normalization constant $Z$ such that the norm of the kernel is one at initialization and fix it during training. Please refer to Appendix B.2 for a Python-style pseudo-code. We can plug SGConv into modern architectures as a replacement of attention in Transformer or local convolution in ConvNets (See Figure 6, 7 for two examples). Due to the relaxation of the structure of the convolutional kernel, SGConv does not have the RNN-style reformulation as S4. Yet, SGConv is naturally capable of performing autoregressive generation, such as language modeling, similarly to classic causal convolutional models (Van den Oord et al., 2016; Oord et al., 2016) and Transformers. Concretely, the convolution kernel is unidirectional, where the computation at the embedding of $i$ -th is only computed based on tokens before $i$ , and left zero padding is used for ignoring the overlength kernel. During generation, hidden states of past tokens are cached for fast calculation of the next token with a single convolution step. Due to the simplicity of the parameterization, SGConv kernel is easy to compute and more efficient than the S4 kernel, as shown in Section 4.1.3. + +# 4 EXPERIMENTS + +In this section, we first test the effectiveness of SGConv on two standard long sequence modeling tasks, i.e., Long Range Arena (Tay et al., 2020b) and Speech Commands (Warden, 2018), and compare it with S4 and other baselines. We also conduct ablation studies over the decay speed and scale dimension $d$ and evaluate the speed of SGConv on LRA. Further, we explore the possibility of plugging the global convolutional layer into standard models as a general-purpose component for capturing long-range dependency. For language tasks, we find that replacing half of layers of Transformer with a certain strategy with SGConv block will not hurt performance, while the complexity of those layers improves from $O(L^2)$ to $O(L\log L)$ . On ImageNet, we replace the $7\times 7$ convolution in ConvNext (Liu et al., 2022) with SGConv and show comparative or better performance. + +# 4.1 LONG RANGE ARENA + +Long Range Arena benchmark (Tay et al., 2020b) is a suite of six tasks consisting of sequences ranging from 1K to 16K tokens, encompassing a wide range of data types and modalities such as text, natural, synthetic images, and mathematical expressions requiring similarity, structural, and visual-spatial reasoning. + +# 4.1.1 RESULTS + +We show the experimental results in Table 1 with several baseline methods (Vaswani et al., 2017; Child et al., 2019; Wang et al., 2020; Kitaev et al., 2019; Zaheer et al., 2020; Gu et al., 2021a; 2022b). + +
Sequence length256512102420484096819216384
Inf.S429.481.7158.3306.95941156.92274.0
CPUSGConv23.856.2108.7211.3409.3789.51559.3
Inf.S4 (w/o opt)2.72.74.47.915.232.764.5
GPUS4 (w. opt.)1.61.93.15.410.022.344.3
SGConv1.21.32.34.48.519.839.4
BPS4 (w/o opt)4.15.710.219.438.180.1161.2
GPUS4 (w. opt.)3.546.611.922.648.997.8
SGConv2.02.75.09.618.641.282.5
+ +Table 2: Comparison of the inference and backpropagation time (ms/batch) of S4 and SGConv blocks (number of channels 128, batch size 64) on CPU and GPU. Note that the parameterization in S4 requires a customized CUDA kernel to improve the efficiency (refer to opt. in the Table). Nevertheless, SGConv still always surpasses S4 even compared to the optimized CUDA kernel. + +SGConv achieves a $1\%$ improvement in average accuracy upon well-tuned S4 variants introduced in Gu et al. (2022b). Notably, SGConv is guided by the two intuitive principles and has a much simpler structure than S4 (Gu et al., 2022b). The detailed implementation settings can be found in Appendix A.1. + +# 4.1.2 ABLATION STUDY ON IMDB + +We conduct ablation studies on the IMDB byte-level document classification task in the LRA benchmark. We mainly focus on two aspects: 1) The speed of decaying and 2) The parameter dimension $d$ of each scale. For simplicity, in the standard SGConv formulation (Eq. (1)), we fix the decay coefficient $\alpha = 1/2$ and only tune the dimension $d$ . However, the actual decay speed as a function of the position in the kernel depends both on $\alpha$ and $d$ , making it hard to conduct ablation studies. Thus, we use a slightly different convolution kernel that disentangles the decay speed and the dimension of each scale: + +$$ +\operatorname {C a t} ^ {*} (S) = \frac {1}{Z} [ k _ {0}, k _ {1}, \dots , k _ {N - 1} ] \odot \left[ \frac {1}{1 ^ {t}}, \frac {1}{2 ^ {t}}, \dots , \frac {1}{L ^ {t}} \right], \text {w h e r e} k _ {i} = \operatorname {U p s a m p l e} _ {2 \max [ i - 1, 0 ] d} (\mathbf {w} _ {i}). \tag {2} +$$ + +$t$ here then controls the decay speed, which is independent of each scale's dimension. We conduct two sets of experiments: 1) Fix $d = 8$ , vary $t$ from 0 (which means no decay) to 2, and 2) Fix $t = 1$ , vary $d$ from 1 to 64. Figure 3 reports the accuracies in different settings. We can observe that 1) The decay structure is crucial for getting good performance, and 2) In a reasonable range, $d$ has less impact on the performance than $t$ . Nevertheless, we observe a trend of performance drop when increasing $d$ from 8 to 64. Experiments on larger $d$ show worse performance, which can be attributed to overfitting. + +# 4.1.3 SPEED COMPARISON + +In Table 2, we compare the computation speed of the S4 kernel and SGConv kernel in different settings. Due to its simplicity, SGConv is faster than S4 for any sentence length. SGConv is about $50\%$ faster than the vanilla implementation of the S4 kernel and is $15\%$ faster than the optimized CUDA mized CUDA kernels. + +![](images/105ae48cced803b79ac8996d98110c83cd9f19bdf90f5ca5034e35cd2913a84f.jpg) +Figure 3: Ablation study on the effect of decay speed and hidden dimension of each scale on IMDB dataset. $pos \in [1, L]$ refers to the position in the convolution kernel. We observe: 1) The decay structure is crucial for getting good performance; 2) In a reasonable range, $d$ (Dimension) has less impact on the performance than $t$ ( $t \in [0, 2.0]$ ). + +# 4.2 SPEECH COMMANDS + +The Speech Command (SC) dataset (Warden, 2018) is a 35-class dataset of 1 second (16000 Hz sampling rate) spoken words in English. However, followup works (Kidger et al., 2020; Gu et al., 2021b; Romero et al., 2021b;a) adopted a smaller 10-class subset of SC. And works (Romero et al., 2021a; Gu et al., 2021b) on the SC dataset specifically use pre-processing such as MFCC features. Our baselines are obtained from (Gu et al., 2021a; 2022a). Note that besides SSM-based models, there is no strong baseline for raw waveform classification using either the 10-class or the full dataset. And SSM-based methods also show the ability to perform 0-shot testing at lower sampling rate such as $8000\mathrm{Hz}$ . Table 3 shows that the SGConv yields better results compared to the SSM-based method among 4 out of 5 tasks. Notably, for the original SC (35-class), SGConv achieves marginally higher accuracy for raw-sequence classification and significantly better results $(+2.40\%)$ compared to the existing SoTA method. + +
10-clsTransformerPerformerNRDECKConvWaveGAN-DS4S4*SGConv
MFCC90.7580.8589.895.3X93.9692.0594.91
16000HZX30.7716.4911.671.6698.3297.9897.52
8000HZ (0-shot)X30.6815.1265.96X96.3091.8396.03
35-clsInceptionNetResNet-18XResNet-50ConvNetS4DS4S4*SGConv
16000HZ61.2477.8683.0195.5196.2596.0896.2796.42
8000HZ (0-shot)5.188.747.727.2691.5891.3291.8994.29
+ +# 4.3 FURTHER APPLICATIONS OF SGCONV + +We further study SGConv as a generic network architecture drop-in component targeting tasks in language modeling and computer vision. In Section 4.3.1 we present an efficient mixture of attention and SGConv layers architecture that replaces half of the attention blocks in the Transformer with the SGConv blocks. We demonstrate the potential of utilizing such a model for long text processing. In Section 4.3.2, we incorporate SGConv (1D) into ConvNeXt (Liu et al., 2022). Surprisingly, SGConv achieves comparable or even better results compared to several SoTA CNN and Vision Transformer models by treating the 2D features as a 1D sequence. + +# 4.3.1 LANGUAGE TASKS + +Language modeling. We propose the SGConv block (shown in Figure 6) which is similar to the Attention block in Transformer (Vaswani et al., 2017). SGConv block enjoys both $O(L\log (L))$ time complexity and space complexity. We benchmark the inference time and GPU memory usage of both SGConv and Attention in Table 7. When the sequence length is 1024, SGConv block is $\sim 2.1\mathrm{X}$ faster than the Attention block. For language modeling, we utilize the feature of SGConv to directly process the long sequences. The + +Table 3: Speech Command classification results compared to existing methods. * We carefully reproduce the S4 method based on the released code1. Since the latest version removed 10-class experiments settings, we utilized a earlier version2. The results suggest that for the SC 35-classification, SGConv achieves SoTA on both full length task and 2X sampling rate, zero-shot task. + +
ModelValid.Test
LSTM+Hebb.29.029.2
16L Transformer-XL-24.0
16L SGConv+SAAttn21.9022.83
Adaptive Input-18.7
S4-20.95
18L Transformer-XL-18.3
18L Transformer-XL*18.1618.75
18L SGConv+SAAttn18.1018.70
+ +Table 4: Performance comparison on WikiText-103. + +![](images/c5a6171f8c15be41cfb4e5e9980e7cefe774992c98ee31a6f2f8cbbe1c008995.jpg) +(a) Illustration of SGConv and Transformer-XL style Short Attention used in language modeling task. SGConv directly processes the full length sequence. + +![](images/2085f3250995842f2739313154583d46dd3f4b5ce326fae9be371b6e857b5e84.jpg) +(b) The depth to replace SAttention with SGConv vs. validation PPL on WikiText-103 + +Figure 4: Incorporating SGConv to Transformer models in language tasks. + +
MNLI-m/mmQNLIQQPSSTCoLASTSAvg.
BERT84.93/84.9191.3491.0492.8855.1988.2984.08
SGConvBERT84.78/84.7091.2591.1892.5557.9288.4284.40
+ +Table 5: Performance comparison of BERT and SGConvBERT on GLUE dataset. SGConvBERT is comparable with BERT while being more efficient. We exclude MRPC and RTE datasets in GLUE because their sizes are too small ( $< 5K$ training samples). + +Attention block only targets the short range data termed SAttention. We illustrate the structure in Figure 4a. Furthermore, we investigate the strategy to replace the Attention blocks with SGConv blocks. We generate 50 architectures with 8 SGConv blocks and 8 Attention blocks where the order is shuffled. We denote the average depth to replace the Attention blocks as: $\sum_{i=0}^{N_{SGConv}} \mathrm{idx}_i / N_{total}$ where the idx denotes the $i$ th SGConv depth position. $N_{SGConv} = 8$ and $N_{total} = 16$ in this case. The results in Figure 4b suggest that when fixing the number of SGConv layer, models achieve better performance by placing SGConv blocks in deeper layers. Guided by the strategy, we handcraft two Transformer-XL (Dai et al., 2019) style models. (1) 16-layer: $\{\mathrm{A}, \mathrm{A}, \mathrm{A}, \mathrm{C}\} \times 2 + \{\mathrm{A}, \mathrm{C}, \mathrm{C}, \mathrm{C}\} \times 2$ . (2) 18-layer: $\{\mathrm{A}, \mathrm{A}, \mathrm{C}\} \times 3 + \{\mathrm{A}, \mathrm{C}, \mathrm{C}\} \times 3$ . A denotes SAttention and C denotes SGConv. $\times N$ denotes repeating the order of layers for $N$ times. We test the model on WikiText-103 (Merit et al., 2016) which is a wide-used language modeling benchmark with an average length of 3.6K tokens per article. We set both the attention and memory length to 384 for 18L model and 192 for 16L model. The length of input sequence is 3092 which can be processed by SGConv directly. We show the results in Table 4. Our results suggest that when the attention range is short, the 16L model outperforms the baseline with -1.17 perplexity. For the 18L model, our model achieves 18.70 perplexity. Note that we use a smaller and affordable batch size (16) for training. Under the same setting, our model gains slightly better perplexity than Transformer-XL (-0.05). Our results show the potential of adopting SGConv as part of the language model for long range language sequence processing. + +Sentence classification. We combine the SGConv block with the BERT model (Devlin et al., 2018). Concretely, we utilize the 12-layer $\{\mathrm{A},\mathrm{A},\mathrm{C}\} \times 2 + \{\mathrm{A},\mathrm{C},\mathrm{C}\} \times 2$ model. The pretraining is conducted on BooksCorpus (Zhu et al., 2015) and English Wikipedia (Foundation). We then fine-tune the model on the GLUE benchmark (Wang et al., 2019). To avoid the instability of fine-tuning on small datasets, we only test on tasks with more than $5K$ training samples. We follow the training and fine-tuning pipeline of Ke et al. (2020) (BERT-A in Table 1 of Ke et al. (2020)) and report the average accuracy of 5 different random seeds. SGConvBERT achieves comparable performance to the original BERT model, while the SGConv layer is more efficient than the attention layer. + +![](images/fa1c055f3a58b583445e4dfbcba0aec183d4829dbcb6645314bec1232ecf0b59.jpg) +Figure 5: Comparison of ImageNet-1k Top-1 accuracy with SoTA works. Left: Top-1 Accuracy vs. FLOPs. Right: Top-1 Accuracy vs. Throughputs. + +![](images/f5669ff133c83e9466da7c5214f223f50acfec2d677017a0c06503b1b16ab4bc.jpg) + +# 4.3.2 IMAGE CLASSIFICATION + +We also evaluate the adaptability of SGConv by applying it on large-scale image classification. We conduct experiments on ImageNet-1k (Deng et al., 2009) which consists of more than 1.28 million high-resolution training and 50,000 validation images. We replace the $7 \times 7$ 2D convolutional kernels with SGConvs in ConvNeXt (Liu et al., 2022) denoted as SGConvNeXt. The block designs of SGConvNeXt are shown in Figure 7. Note we train the SGConvNeXt-Tiny/Small/Base/Large using hyperparameter settings from ConvNeXt4 without any changes. By treating the 2D features as sequences, our SGConvNeXt achieves better results compared to existing SoTA methods such as EfficientNets (Tan & Le, 2019), Swin Transformers (Liu et al., 2021) (shown in Figure 5). Note that Vision Transformer (Dosovitskiy et al., 2020) and its variants (Touvron et al., 2021a;b; Yu et al., 2022) adopt patching techniques that can lead to a quadratic increase in complexity with image size. Also, patching is incompatible with dynamic input resolutions while SGConvNeXt processes the data globally. We list several interesting directions that can be explored for future work: 1) Optimization for the long-range convolution: we noticed that though FFT theoretically requires less FLOPs than plain convolution, the throughput drops empirically. One reason is that there is no optimized CUDA implementation for 1D long-range convolution and can be a good direction for future work. 2) Optimized hyperparameters and data augmentation methods: ConvNeXts' hyperparameters are tuned for maximum performance, which may not be ideal for SGConvNeXt. 3) SGConv for vision reasoning tasks: we show that SGConv is powerful for long-range synthetic reasoning tasks and large-scale classification tasks. It could be effective in visual reasoning applications such as Vision-Language Reasoning (Johnson et al., 2017; Zhu et al., 2020) with great potential. + +# 5 DISCUSSION + +In this paper, we attempt to answer the question of what makes convolutional models great again on long sequence modeling and summarize two principles contributing to the success. Based on the principles, we propose a simple and intuitive global convolutional model SGConv that has both direct implications and solid performance. Concurrent to our work there are also attempts to simplify the S4 model by restricting the state transition matrix to be diagonal (Gu et al., 2022a; Gupta, 2022). The proposal by Gu et al. (2022a) incorporates an intricate approach to parameterization and initialization schemes compared to our paper. Their method provides insights into the S4 phenomenon from a state-space-model perspective. Instead, we hope our simpler principles and non-SSM-based model can open up a direction for general audiences to understand and try global convolution as a general-purpose module for tackling long-range dependency. This potential has been shown in a very recent paper (Ma et al., 2022) concurrent to our work, where the authors incorporate an exponential moving average layer to a Transformer-like model and achieve promising performance over several long sequence modeling tasks. The exponential moving average layer is a particular type of global convolution layer that naturally satisfies our two principles. We believe that similar global convolutional modules will emerge in the future as long-range dependency becomes increasingly critical for sequence modeling. + +# ACKNOWLEDGEMENTS + +We extend our gratitude to the anonymous reviewers for dedicating their time and expertise to provide constructive feedback and suggestions, which significantly enhanced the quality of this paper. We also express our appreciation to the Program Chairs and Area Chairs for their careful review and valuable comments. Special thanks go to Sebastien Bubeck, Arturs Backurs, Gustavo de Rosa, Di He, and Cong 'Callie' Hao for their valuable suggestions and support. + +# REFERENCES + +Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, et al. Tensorflow: Large-scale machine learning on heterogeneous distributed systems. arXiv preprint arXiv:1603.04467, 2016. +Jinming Cao, Yangyan Li, Mingchao Sun, Ying Chen, Dani Lischinski, Daniel Cohen-Or, Baoquan Chen, and Changhe Tu. Do-conv: Depthwise over-parameterized convolutional layer. IEEE Transactions on Image Processing, 2022. +Lei Chen. Deep Learning and Practice with MindSpore. Springer Nature, 2021. +Tianqi Chen, Mu Li, Yutian Li, Min Lin, Naiyan Wang, Minjie Wang, Tianjun Xiao, Bing Xu, Chiyuan Zhang, and Zheng Zhang. Mxnet: A flexible and efficient machine learning library for heterogeneous distributed systems. arXiv preprint arXiv:1512.01274, 2015. +Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509, 2019. +Zihang Dai, Zhilin Yang, Yiming Yang, William W Cohen, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019. +Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009. +Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. +Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 1911-1920, 2019. +Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13733-13742, 2021. +Linhao Dong, Shuang Xu, and Bo Xu. Speech-transformer: a no-recurrence sequence-to-sequence model for speech recognition. In 2018 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5884-5888. IEEE, 2018. +Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. +Wikipedia Foundation. Wikipedia downloads. URL https://dumps.wikipedia.org. +Albert Gu, Tri Dao, Stefano Ermon, Atri Rudra, and Christopher Ré. Hippo: Recurrent memory with optimal polynomial projections. Advances in Neural Information Processing Systems, 33: 1474-1487, 2020. +Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces. arXiv preprint arXiv:2111.00396, 2021a. + +Albert Gu, Isys Johnson, Karan Goel, Khaled Saab, Tri Dao, Atri Rudra, and Christopher Ré. Combining recurrent, convolutional, and continuous-time models with linear state space layers. Advances in neural information processing systems, 34:572-585, 2021b. +Albert Gu, Ankit Gupta, Karan Goel, and Christopher Ré. On the parameterization and initialization of diagonal state space models. arXiv preprint arXiv:2206.11893, 2022a. +Albert Gu, Isys Johnson, Aman Timalsina, Atri Rudra, and Christopher Ré. How to train your hippo: State space models with generalized orthogonal basis projections. arXiv preprint arXiv:2206.12037, 2022b. +John Guibas, Morteza Mardani, Zongyi Li, Andrew Tao, Anima Anandkumar, and Bryan Catanzaro. Efficient token mixing for transformers via adaptive fourier neural operators. In International Conference on Learning Representations, 2021. +Shuxuan Guo, Jose M Alvarez, and Mathieu Salzmann. Expandnets: Linear over-parameterization to train compact convolutional networks. Advances in Neural Information Processing Systems, 33:1298-1310, 2020. +Ankit Gupta. Diagonal state spaces are as effective as structured state spaces. arXiv preprint arXiv:2203.14343, 2022. +Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. Liquid structural state-space models. arXiv preprint arXiv:2209.12951, 2022. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. +Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2901-2910, 2017. +Kirthevasan Kandasamy, Willie Neiswanger, Jeff Schneider, Barnabas Poczos, and Eric P Xing. Neural architecture search with bayesian optimisation and optimal transport. Advances in neural information processing systems, 31, 2018. +Guolin Ke, Di He, and Tie-Yan Liu. Rethinking positional encoding in language pre-training. In International Conference on Learning Representations, 2020. +Patrick Kidger, James Morrill, James Foster, and Terry Lyons. Neural controlled differential equations for irregular time series. Advances in Neural Information Processing Systems, 33:6696-6707, 2020. +Junkyung Kim, Drew Linsley, Kalpit Thakkar, and Thomas Serre. Disentangling neural mechanisms for perceptual grouping. In International Conference on Learning Representations, 2019. +Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. In International Conference on Learning Representations, 2019. +Guihong Li, Yuedong Yang, Kartikeya Bhardwaj, and Radu Marculescu. Zico: Zero-shot nas via inverse coefficient of variation on gradients. arXiv preprint arXiv:2301.11300, 2023. +Yuhong Li, Cong Hao, Pan Li, Jinjun Xiong, and Deming Chen. Generic neural architecture search via regression. Advances in Neural Information Processing Systems, 34:20476-20490, 2021. +Ming Lin, Pichao Wang, Zhenhong Sun, Hesen Chen, Xiuyu Sun, Qi Qian, Hao Li, and Rong Jin. Zen-nas: A zero-shot nas for high-performance image recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 347-356, 2021. +Drew Linsley, Junkyung Kim, Vijay Veerabadran, Charles Windolf, and Thomas Serre. Learning long-range spatial dependencies with horizontal gated recurrent units. Advances in neural information processing systems, 31, 2018. + +Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021. +Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022. +Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer. Mega: Moving average equipped gated attention. arXiv preprint arXiv:2209.10655, 2022. +Yanjun Ma, Dianhai Yu, Tian Wu, and Haifeng Wang. Paddlepaddle: An open-source deep learning platform from industrial practice. Frontiers of Data and Computing, 1(1):105-115, 2019. +Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016. +Aaron van den Oord, Sander Dieleman, Heiga Zen, Karen Simonyan, Oriol Vinyals, Alex Graves, Nal Kalchbrenner, Andrew Senior, and Koray Kavukcuoglu. Wavenet: A generative model for raw audio. arXiv preprint arXiv:1609.03499, 2016. +Hao Peng, Nikolaos Pappas, Dani Yogatama, Roy Schwartz, Noah Smith, and Lingpeng Kong. Random feature attention. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=QtTKTdVrFBB. +Zhen Qin, Weixuan Sun, Hui Deng, Dongxu Li, Yunshen Wei, Baohong Lv, Junjie Yan, Lingpeng Kong, and Yiran Zhong. cosformer: Rethinking softmax in attention. In International Conference on Learning Representations, 2021. +Yongming Rao, Wenliang Zhao, Zheng Zhu, Jiwen Lu, and Jie Zhou. Global filter networks for image classification. Advances in Neural Information Processing Systems, 34:980-993, 2021. +Esteban Real, Alok Aggarwal, Yanping Huang, and Quoc V Le. Regularized evolution for image classifier architecture search. In Proceedings of the aaai conference on artificial intelligence, volume 33, pp. 4780-4789, 2019. +David W Romero, Robert-Jan Bruintjes, Jakub Mikolaj Tomczak, Erik J Bekkers, Mark Hoogendoorn, and Jan van Gemert. Flexconv: Continuous kernel convolutions with differentiable kernel sizes. In International Conference on Learning Representations, 2021a. +David W Romero, Anna Kuzina, Erik J Bekkers, Jakub Mikolaj Tomczak, and Mark Hoogendoorn. Ckconv: Continuous kernel convolution for sequential data. In International Conference on Learning Representations, 2021b. +David W Romero, David M Knigge, Albert Gu, Erik J Bekkers, Efstratios Gavves, Jakub M Tomczak, and Mark Hoogendoorn. Towards a general purpose cnn for long range dependencies in nd. arXiv preprint arXiv:2206.03398, 2022. +Tim Salimans and Durk P Kingma. Weight normalization: A simple reparameterization to accelerate training of deep neural networks. Advances in neural information processing systems, 29, 2016. +Uri Shaham, Elad Segal, Maor Ivgi, Avia Efrat, Ori Yoran, Adi Haviv, Ankit Gupta, Wenhan Xiong, Mor Geva, Jonathan Berant, and Omer Levy. Scrols: Standardized comparison over long language sequences, 2022. +Jimmy TH Smith, Andrew Warrington, and Scott W Linderman. Simplified state space layers for sequence modeling. arXiv preprint arXiv:2208.04933, 2022. +Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pp. 6105-6114. PMLR, 2019. +Y Tay, D Bahri, D Metzler, D Juan, Z Zhao, and C Zheng. Synthesizer: Rethinking self-attention in transformer models. arxiv 2020. arXiv preprint arXiv:2005.00743, 2, 2020a. + +Yi Tay, Mostafa Dehghani, Samira Abnar, Yikang Shen, Dara Bahri, Philip Pham, Jinfeng Rao, Liu Yang, Sebastian Ruder, and Donald Metzler. Long range arena: A benchmark for efficient transformers. arXiv preprint arXiv:2011.04006, 2020b. +Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In International Conference on Machine Learning, pp. 10347-10357. PMLR, 2021a. +Hugo Touvron, Matthieu Cord, Alexandre Sablayrolles, Gabriel Synnaeve, and Hervé Jégou. Going deeper with image transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 32-42, 2021b. +Aaron Van den Oord, Nal Kalchbrenner, Lasse Espeholt, Oriol Vinyals, Alex Graves, et al. Conditional image generation with pixelCNN decoders. Advances in neural information processing systems, 29, 2016. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017. +Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. 2019. In the Proceedings of ICLR. +Sinong Wang, Belinda Z. Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity, 2020. +Pete Warden. Speech commands: A dataset for limited-vocabulary speech recognition. arXiv preprint arXiv:1804.03209, 2018. +Wenting Ye, Hongfei Yang, Shuai Zhao, Haoyang Fang, Xingjian Shi, and Naveen Neppalli. A transformer-based substitute recommendation model incorporating weakly supervised customer behavior data. arXiv preprint arXiv:2211.02533, 2022. +Weihao Yu, Mi Luo, Pan Zhou, Chenyang Si, Yichen Zhou, Xinchao Wang, Jiashi Feng, and Shuicheng Yan. Metaformer is actually what you need for vision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10819-10829, 2022. +Sergey Zagoruyko and Nikos Komodakis. Diracnets: Training very deep neural networks without skip-connections. arXiv preprint arXiv:1706.00388, 2017. +Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontonon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. Big bird: Transformers for longer sequences. Advances in Neural Information Processing Systems, 33:17283-17297, 2020. +Fengda Zhu, Yi Zhu, Xiaojun Chang, and Xiaodan Liang. Vision-language navigation with self-supervised auxiliary reasoning tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10012-10022, 2020. +Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision, pp. 19-27, 2015. +Barret Zoph, Vijay Vasudevan, Jonathon Shlens, and Quoc V Le. Learning transferable architectures for scalable image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8697-8710, 2018. + +# A DETAILED EXPERIMENTAL RESULTS + +# A.1 LONG RANGE ARENA + +Here we report the detailed implementation of the LRA experiments. We use the concatenation style combination of sub-kernels in all experiments and mildly tune the dimension of each scale. Since the SGConv exhibits a strong ability to fit data, we slightly increase the dropout for some tasks to prevent overfitting. Table 6 lists the detailed hyperparameters used in LRA. In most experiments, we set $\alpha$ to $1/2$ , which approximately decays in speed $1 / pos$ . Experiments on flattened 2D images require some special modification of the kernel. We hypothesize that it is because images require more subtle inductive bias. For the experiment on the Image dataset, we use the disentangled version of parameterization and combination weights as described in Section 4.1.2 and set the decay speed to be $1 / pos$ . For the experiment on the Pathfinder-X task, we initialize convolution kernels in different channels with cosine waves with different frequencies and randomly assign $\alpha$ ranging from 1 to $1/3$ to different channels. Both these modifications bring about $1\%$ improvement compared to standard fixed $\alpha = 1/2$ and random initialization. The remaining hyperparameters and experimental settings are same to Gu et al. (2022a) which can be found in the Github repo1. + +
ListOpsTextRetrievalImagePathfinderPath-X
Acc.61.4589.2091.1187.9795.4697.83
Scale dim.121323264
Dropout0000.20.20
+ +Table 6: Hyperparameters used in LRA experiments. + +# A.2 SPEECH COMMAND + +For Speech Command 10-class task, we use the same training setting from Gu et al. (2021a) earlier version Github repo $^2$ . For Speech Command 35-class task, we use the training setting from the Github repo $^1$ . The scale dimension of SGConv is 32. + +# A.3 LANGUAGE TASK + +Our implementation for Language Task is based on the project $^{3}$ . For the 16-L model, we utilize 3072 as the sequence length for SGCONV and 192 as both the attention and memory length for SAAttention. For the 18-L model, we utilize 3072 as the sequence length for SGCONV and 384 as both the attention and memory length for SAAttention. The SGConv has 96 as the scale dimension. We adopt the training settings from the above mentioned project 3 except the batch size which is reduced to 64. The SGConv block is shown in Figure 4. + +![](images/43affd57893af7a4fd9c10d4a23c5d23a137a803fd92031e7f0d12197d6d73fd.jpg) +Figure 6: SGConv block + +
256512102420483072
Attn. BlockInf. (ms/batch)2.67.323.291.7X
Mem. (GB)2.63.97.923.9OOM
SGConv BlockInf. (ms/batch)2.75.410.921.843.6
Mem. (GB)2.63.45.28.715.7
+ +Table 7: Comparison of inference time and GPU memory utilization with Attention blocks. SGConv has significantly less memory usage and faster inference speed when the sequence increases. + +# A.4 IMAGE CLASSIFICATION + +We use the training settings in the work Liu et al. (2022) $^4$ . Since the SGConvNeXt has several downsampling layers, we fixed the scale to 5 and the scale dimensions are calculated based on the flattened features length of the corresponding layers. The structure is shown in Figure 7. The results are shown in Table 8. The visualization of the SGConvNeXt-Base outputs are shown in Figure 9. The visualization of the SGConv kernels at different stages are shown in Figure 10. + +![](images/1fa5dada2ae98699989f808bf6e6627d621edc1eae969b3f5ef14bb0223c5cf3.jpg) +Figure 7: SGConvnext + +![](images/c5e897c14ce292212b5305fdbc9027f627c9942c0ea5cb81bd2c0333cd4f6091.jpg) + +
modelFLOPsthroughput (image/s)paramsAcc.
Swin-T4.5G944.529M81.3
Swin-S8.7G576.850M83.0
Swin-B15.4G433.488M83.5
Swin-B384247.0G134.688M84.5
ConvNeXt-T4.5G1252.629M82.1
ConvNeXt-S8.7G801.450M83.1
ConvNeXt-B15.4G588.389M83.8
ConvNeXt-L34.4G349.8198M84.3
+ +
modelFLOPsthroughput (image/s)paramsAcc.
EffNet-B330021.8G693.912M81.6
EffNet-B438024.2G341.519M82.9
EffNet-B545629.9G223.530M83.6
EffNet-B6528219.0G91.543M84.0
EffNet-B7600237.0G52.966M84.3
SGConvNeXt-T4.3G872.629M82.0
SGConvNeXt-S8.3G565.351M83.4
SGConvNeXt-B14.6G417.990M83.9
SGConvNeXt-L32.5G256.7200M84.4
+ +Table 8: Comparison of ImageNet-1k Top-1 accuracy with SoTA works. + +# B DETAILED IMPLEMENTATION + +# B.1 ILLUSTRATION OF SGCONV MODULE + +![](images/dc73cd86ac1632a3107e6a2cc4f5942032d3e2eaf21f4bc8327b3750251d8525.jpg) +Figure 8: Implementing SGConv with FFT. We first compute the convolutional kernels for each channel as described in Section 3.2, and apply the depth-wise global convolution to the input features. + +# B.2 PYTHON STYLE PSEUDO-CODE + +```python +# Parameters +kernel-param_list = [] # w_i +for _ in range(num_scales): + kernel-param_list.append( + nnParameter(torch.random(hidden_dim, kernel_dim)) # size: h * d +# Compute global convolution kernel +kernel_list = [] # k_i +for i in range(num_scales): + kernel = F.interpolate( + kernel-param_list[i], + scale_factor = 2**max(0, i-1), + mode = "linear" + ) * 0.5 ** i # alpha = 0.5 + kernel_list.append(kernel) +# The computed kernel, size: h * (d * 2^{s-1}) +k = torch.cat(kernel_list, dim=-1) +#Normalize kernel +if is_init: # Compute the norm at initialization +kernel_norm = k(norm(dim=-1, keepdim=True).detach() +k = k / kernel_norm +``` + +```txt +Use kernel to compute global convolution +#x:batch_size \* hidden_dim \* seq_len +L $=$ x.size(-1) +#Truncate kernel if it is too long +k $=$ k[., :L] +# Use FFT to compute convolution +x_f $=$ torch.fft.rfft(x, n=2*L) +k_f $=$ torch.fft.rfft(k, n=2*L) +y_f $=$ torch.einsum("b h l,h l-> b h l",x_f,k_f) +#Inverse FFT to get the result +y $=$ torch.fft.irfft(y_f, n=2*L)[...,:L] +``` + +![](images/a9dc7d8f9a6579bcb0e37dc1288d81c3d7b12bb3cf6ca8adff9a4a8c7ef05f25.jpg) +Input + +![](images/07133a81bd32c85fca2b3c2f6edf443585bfeb5580f157079a479ab6bdc87c3d.jpg) +Stage 0 + +![](images/8f9ccced2a4c5a31bd266c5297b0e0fcd55d74317ffa850e4164d0f29a20f46e.jpg) +Stage 1 + +![](images/8f9354eb71c1900eb43ba42d87dc85cb5e33f59b5e413a9ee0900ab39647fb31.jpg) +Stage 2 + +![](images/53394019db4eaa733cdb780433fbdcde63af6ac4ea7cb9231ec9885ef646c3af.jpg) +Stage 3 + +![](images/39119b984d911ce8b1acf0d5c37a77cfc3ee189085b413fe8d29f451b7e423f9.jpg) + +![](images/a7386662ad0f56176c6b1fbb805a405d5a8d59b6207af84b5d674bcdacbf39d3.jpg) + +![](images/4af53c42af2cccf6e45985a232eaa6503b464f113c97419605ed3da59341bc7e.jpg) + +![](images/5005d29b7d379e0930dd26c01c74520b6dd14f226fc01b8f0f88e70f3eb4968c.jpg) + +![](images/64a6d10bfc28168ce83ffa5298213128c167a407122dd6af6eadea4e3a01684c.jpg) + +![](images/de571302efda9573db54e971b0e66fc36970220a56704ed3333efa6cfe2c9299.jpg) +Figure 9: Visualization of the intermediate features of SGConvNeXt on ImageNet-1k dataset. + +![](images/5564739a278af131f9579f07b6df9f3fe5b7c3dc6af6e24ee023f0f07cefeb79.jpg) + +![](images/ec0bad80ad47b2e9c11fb30bfcd15d076fa5ab1069b5dbd683f06b363df9e5a7.jpg) + +![](images/a9826591ae19eadcb1b5057a65e10a3daa19c1db9dad446489d6b46a8796752e.jpg) + +![](images/7cc909378927af4951b6e0afc256a4b067211b32c2308bfa7d3dda0cd4a34f98.jpg) + +![](images/d68ce1d42882558455ae27c576e70616f56bcf9853ae2ec281c69e5cb0ff8343.jpg) + +![](images/f0451dca363eba8e73cee1210fcccf9312c36bc021eec48307df6eb61648f68e.jpg) +(a) Visulization of kernels at Stage 0. + +![](images/3f0b943fcd06078d4d6d796f7c864a44ce0cbb8d24901ac7809b4f31f49b659e.jpg) +(b) Kernels at Stage 1. + +![](images/9a98e21258cb5eaec5b05f739d68b236172d88715c45a81383461b6792f15f39.jpg) +(c) Kernels at Stage 2. +(d) Kernels at Stage 3. +Figure 10: Kernels in SGConvNeXt at different stages. + +# C NEURAL ARCHITECTURE SEARCH PERSPECTIVE OF SGCONV + +Neural architecture search (NAS) is an automated process for discovering a neural network's optimal architecture or structure for a particular task. NAS typically involves searching through a large + +space of possible network architectures using combination algorithms, such as reinforcement learning (Zoph et al., 2018), evolutionary algorithms (Real et al., 2019), or Bayesian optimization (Kandasamy et al., 2018). In recent years, there has been a proliferation of research aimed at designing traditional convolutional neural networks with local convolution (Li et al., 2021; Lin et al., 2021; Li et al., 2023). These works primarily focus on optimizing the networks' structures to improve their performance. From the perspective of NAS, the SGConv can be interpreted as a kernel-level fine-grained search for the distribution of parameters by utilizing parameterization. Furthermore, the SGConv has shown that the global convolution kernel exhibits sparsity and can be pruned (Fig. 10), meaning that the effective kernel length can be automatically determined through the training phase. These findings can potentially spark further research and development in the field. Another simple approach we explore in NAS is the combination of Attention and SGConv through a mixture model (Section 4.3.1). This approach is both intuitive and efficient and has the potential to improve the performance of neural network architectures further. \ No newline at end of file diff --git a/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/images.zip b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..14437b2031a49dbf9cfa12fc634f0bc20018e4a4 --- /dev/null +++ b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:709f4417f224b8b13799353ec6efe447490967b7b2fce7e9b1d04426d2124e1c +size 670207 diff --git a/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/layout.json b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..49d23da3ca0bdec8975f26d07df190c700ae8e34 --- /dev/null +++ b/2023/What Makes Convolutional Models Great on Long Sequence Modeling_/layout.json @@ -0,0 +1,11281 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 504, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 504, + 118 + ], + "type": "text", + "content": "WHAT MAKES CONVOLUTIONAL MODELS GREAT ON LONG SEQUENCE MODELING?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "spans": [ + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "text", + "content": "Yuhong Li" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "text", + "content": " Tianle Cai" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "text", + "content": " Yi Zhang" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "text", + "content": " Deming Chen" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "text", + "content": " Debadeepta Dey" + }, + { + "bbox": [ + 110, + 136, + 438, + 148 + ], + "type": "inline_equation", + "content": "^{3}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 148, + 464, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 148, + 464, + 161 + ], + "spans": [ + { + "bbox": [ + 110, + 148, + 464, + 161 + ], + "type": "text", + "content": "1University of Illinois Urbana-Champaign, 2Princeton University, 3Microsoft Research." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 189, + 335, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 189, + 335, + 201 + ], + "spans": [ + { + "bbox": [ + 276, + 189, + 335, + 201 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 213, + 471, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 213, + 471, + 501 + ], + "spans": [ + { + "bbox": [ + 140, + 213, + 471, + 501 + ], + "type": "text", + "content": "Convolutional models have been widely used in multiple domains. However, most existing models only use local convolution, making the model unable to handle long-range dependency efficiently. Attention overcomes this problem by aggregating global information based on the pair-wise attention score but also makes the computational complexity quadratic to the sequence length. Recently, Gu et al. (2021a) proposed a model called S4 inspired by the state space model. S4 can be efficiently implemented as a global convolutional model whose kernel size equals the input sequence length. With Fast Fourier Transform, S4 can model much longer sequences than Transformers and achieve significant gains over SoTA on several long-range tasks. Despite its empirical success, S4 is involved. It requires sophisticated parameterization and initialization schemes that combine the wisdom from several prior works. As a result, S4 is less intuitive and hard to use for researchers with limited prior knowledge. Here we aim to demystify S4 and extract basic principles that contribute to the success of S4 as a global convolutional model. We focus on the structure of the convolution kernel and identify two critical but intuitive principles enjoyed by S4 that are sufficient to make up an effective global convolutional model: 1) The parameterization of the convolutional kernel needs to be efficient in the sense that the number of parameters should scale sub-linearly with sequence length. 2) The kernel needs to satisfy a decaying structure that the weights for convolving with closer neighbors are larger than the more distant ones. Based on the two principles, we propose a simple yet effective convolutional model called Structured Global Convolution (SGConv). SGConv exhibits strong empirical performance over several tasks: 1) With faster speed, SGConv surpasses the previous SoTA on Long Range Arena and Speech Command datasets. 2) When plugging SGConv into standard language and vision models, it shows the potential to improve both efficiency and performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 519, + 206, + 532 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 519, + 206, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 206, + 532 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": "Handling Long-Range Dependency (LRD) is a key challenge in long-sequence modeling tasks such as time-series forecasting, language modeling, and pixel-level image generation. Unfortunately, standard deep learning models fail to solve this problem for different reasons: Recurrent Neural Network (RNN) suffers from vanishing gradient, Transformer has complexity quadratic in the sequence length, and Convolutional Neural Network (CNN) usually only has a local receptive field in each layer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 615, + 506, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 615, + 506, + 717 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 506, + 717 + ], + "type": "text", + "content": "A recently proposed benchmark called Long-Range Arena (LRA) (Tay et al., 2020b) reveals that all existing models perform poorly in modeling LRD. Notably, on one spatial-level sequence modeling task called Pathfinder-X from LRA, all models fail except a new Structured State Space sequence model (S4) (Gu et al., 2021a). The S4 model is inspired by the state space model widely used in control theory and can be computed efficiently with a special parameterization based on the Cauchy kernel. The exact implementation of the S4 model can be viewed as a (depthwise) global convolutional model with an involved computation global convolution kernel. Thanks to the global receptive field of the convolution kernel, S4 is able to handle tasks that require LRD, such as Pathfinder (Linsley et al., 2018; Tay et al., 2020b), where classic local CNNs fail (Linsley et al., 2018; Kim et al.," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 463, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 463, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 463, + 733 + ], + "type": "text", + "content": "*Equal contribution. Work done during the internship at Microsoft Research. Code is available." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "2019). Also, the use of Fast Fourier Transform (FFT) and techniques from numerical linear algebra make the computational complexity of S4 tractable compared to the quadratic complexity of attention. Together, S4 shows the potential of global convolutional models to model LRD and advances the SoTA on LRA." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 506, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 506, + 201 + ], + "type": "text", + "content": "Despite its accomplishments, the delicate design of S4 makes it unfriendly even to knowledgeable researchers. In particular, the empirical success of S4 relies on 1) A Diagonal Plus Low Rank (DLPR) parameterization whose efficient implementation requires several numerical linear algebra tricks, 2) An initialization scheme based on the HiPPO matrix derived in prior work (Gu et al., 2020). Therefore, aiming to reduce the complications of the model and highlight minimal principles, we raise the following questions:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 118, + 209, + 493, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 209, + 493, + 232 + ], + "spans": [ + { + "bbox": [ + 118, + 209, + 493, + 232 + ], + "type": "text", + "content": "What contributes to the success of the S4 model? Can we establish a simpler model based on minimal principles to handle long-range dependency?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 243, + 506, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 419 + ], + "type": "text", + "content": "To answer these questions, we focus on the design of the global convolution kernel. We extract two simple and intuitive principles that contribute to the success of the S4 kernel. The first principle is that the parameterization of the global convolution kernel should be efficient in terms of the sequence length: the number of parameters should scale slowly with the sequence length. For example, classic CNNs use a fixed kernel size. S4 also uses a fixed number of parameters to compute the convolution kernel while the number is greater than classic CNNs. Both models satisfy the first principle as the number of parameters does not scale with input length. The efficiency of parameterization is also necessary because the naive implementation of a global convolution kernel with the size of sentence length is intractable for inputs with thousands of tokens. Too many parameters will also cause overfitting, thus hurting the performance. The second principle is the decaying structure of the convolution kernel, meaning that the weights for convolving with closer neighbors are larger than the more distant ones. This structure appears ubiquitously in signal processing, with the well-known Gaussian filter as an example. The intuition is clear that closer neighbors provide a more helpful signal. S4 inherently enjoys this decaying property because of the exponential decay of the spectrum of matrix powers (See Figure 2), and we find this inductive bias improves the model performance (See Section 4.1.2)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 425, + 277, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 425, + 277, + 709 + ], + "spans": [ + { + "bbox": [ + 104, + 425, + 277, + 709 + ], + "type": "text", + "content": "We show that these two principles are sufficient for designing a global convolutional model that captures LRD well. To verify this, we introduce a class of global convolution kernels with a simple multiscale structure, as shown in Figure 1. Specifically, we compose the convolution kernel by a sequence of sub-kernels of increasing sizes, yet every sub-kernel is upsampled from the same number of parameters. This parameterization ensures that the number of parameters only scales logarithmically to the input length, which satisfies the first principle. In addition, we add a decaying weight to each scale during the combination step and fulfill the second principle. We named our methods as Structural Global Convolution kernels (SGConv). Empirically, SGConv improves S4 by more than " + }, + { + "bbox": [ + 104, + 425, + 277, + 709 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 425, + 277, + 709 + ], + "type": "text", + "content": " and achieves SoTA results on the LRA benchmark. On Speech Command datasets, SGConv achieves comparative results in the ten-class classification task and significantly better results in the 35-class classification task upon previous SoTA. We further show" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": "that SGConv is more efficient than S4 and can be used as a general purpose module in different domains. For example, a hybrid model of classic attention and SGConv shows promising performance" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 304, + 440, + 486, + 574 + ], + "blocks": [ + { + "bbox": [ + 304, + 440, + 486, + 574 + ], + "lines": [ + { + "bbox": [ + 304, + 440, + 486, + 574 + ], + "spans": [ + { + "bbox": [ + 304, + 440, + 486, + 574 + ], + "type": "image", + "image_path": "b246c359bb838516f889939657ee626ef6d989ac90c4f8fd5d617971fba8bace.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 582, + 504, + 694 + ], + "lines": [ + { + "bbox": [ + 282, + 582, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 282, + 582, + 504, + 694 + ], + "type": "text", + "content": "Figure 1: Illustration of the parameterization used in SGConv (Eq. (1)). The convolution kernel is composed of multi-scale sub-kernels. Parameterization Efficiency. Every larger sub-kernel doubles the size of the previous sub-kernel while the same number of parameters are used for every scale, ensuring a logarithmic dependency of the number of parameters to the input length. Decaying. We use a weighted combination of sub-kernels where the weights are decaying, and smaller weights are assigned to larger scales." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "on both autoregressive language modeling and sentence classification tasks, replacing the 2D convolution kernel of the ConvNext model with 1D SGConv matches the performance of the original model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 134, + 209, + 146 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 134, + 209, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 134, + 209, + 146 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 159, + 506, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 506, + 260 + ], + "type": "text", + "content": "Efficient Transformers. The Transformer architecture (Vaswani et al., 2017) has been successful across a wide range of applications (Dosovitskiy et al., 2020; Liu et al., 2021; Dong et al., 2018; Ye et al., 2022) in machine learning. However, the computation and memory complexity of Transformer scales quadratically with the input length, making it intractable for modeling long-range interactions in very long sequences. Therefore, several efficient variants of Transformer model have been proposed recently to overcome this issue (Child et al., 2019; Wang et al., 2020; Kitaev et al., 2019; Zaheer et al., 2020; Tay et al., 2020a; Peng et al., 2021; Qin et al., 2021). Nevertheless, few of these methods performed well on benchmarks such as Long Range Arena (Tay et al., 2020b), SCROLLS (Shaham et al., 2022), which require long-range modeling ability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 273, + 506, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 506, + 416 + ], + "type": "text", + "content": "(Re-)parameterization. Parameterization is a crucial but underrated part of architecture design because different parameterizations usually provide different inductive biases. For example, weight normalization (Salimans & Kingma, 2016) parameterizes the norm and direction of the weight matrices separately and thus reaches faster convergence. On the other hand, Zagoruyko & Komodakis (2017) proposed a Dirac weight re-parameterization to train deep networks without explicit skip-connections and matched the performance of ResNets (He et al., 2016). In computer vision, several works explored using structural re-parameterization to create 2D convolution kernels. Most of these works (Ding et al., 2019; Guo et al., 2020; Ding et al., 2021; Cao et al., 2022) are limited to the vision domain and utilize only short-range convolution kernels (e.g., " + }, + { + "bbox": [ + 104, + 273, + 506, + 416 + ], + "type": "inline_equation", + "content": "7 \\times 7" + }, + { + "bbox": [ + 104, + 273, + 506, + 416 + ], + "type": "text", + "content": ") except for the line of work based on 2D Fourier operators (Rao et al., 2021; Guibas et al., 2021) and the line of work based on continuous convolutional kernel (Romero et al., 2021b;a; 2022). Our SGConv kernel is a special parameterization of global convolution kernels that tackles LRD and showcases the extensibility of re-parameterized kernels." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 430, + 506, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 596 + ], + "type": "text", + "content": "State Space Models. The state space model (SSM) uses a set of linear differential equations to model physical systems with input, output, and state variables. It is widely used in control, neuroscience, and statistics. Recently, Gu et al. (2021b) introduced a deep SSM-based model that can outperform prior approaches on several long sequence modeling tasks with a specially structured state transition matrix. However, the expensive computation and memory requirements make it impractical. A followup work of Gu et al. (2021b) proposed a new parameterization of SSM (Gu et al., 2021a), which decomposes the state transition matrix into the sum of low-rank and normal matrices and implements SSM as a global convolutional model. Under this parameterization, the authors then combine the techniques of diagonalizing the Cauchy kernel and performing low-rank corrections with the Woodbury identity to compute the global convolution kernel. While achieving promising results, S4 is theoretically involved and practical implementations of S4 require accelerator-specific dedicated code optimization for the Cauchy kernel computation. This makes it difficult to readily implement in deep learning frameworks (Abadi et al., 2016; Chen et al., 2015; Chen, 2021; Ma et al., 2019) and hardware targets. Concurrent with this work, many state-space-based models are emerging and bringing better performance (Gu et al., 2022a; Smith et al., 2022; Hasani et al., 2022)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 613, + 372, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 372, + 625 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 372, + 625 + ], + "type": "text", + "content": "3 DESIGN OF GLOBAL CONVOLUTIONAL MODELS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 639, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 673 + ], + "type": "text", + "content": "We summarize the design principles that enable the global convolutional model to be both efficient and effective. Then we introduce the proposed Structured Global Convolution (SGConv) based on the highlighted principles." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 689, + 219, + 699 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 219, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 219, + 699 + ], + "type": "text", + "content": "3.1 DESIGN PRINCIPLES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "The two intuitive design principles that contribute to the success of S4 are efficient parameterization and decaying structure." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 146, + 83, + 304, + 196 + ], + "blocks": [ + { + "bbox": [ + 146, + 83, + 304, + 196 + ], + "lines": [ + { + "bbox": [ + 146, + 83, + 304, + 196 + ], + "spans": [ + { + "bbox": [ + 146, + 83, + 304, + 196 + ], + "type": "image", + "image_path": "4691e8575ba409b19c933a56ae5b9ff34a9b74295b23b8ccb413b87c23f99425.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 202, + 255, + 213 + ], + "lines": [ + { + "bbox": [ + 195, + 202, + 255, + 213 + ], + "spans": [ + { + "bbox": [ + 195, + 202, + 255, + 213 + ], + "type": "text", + "content": "(a) Pathfinder-X" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 81, + 465, + 196 + ], + "blocks": [ + { + "bbox": [ + 307, + 81, + 465, + 196 + ], + "lines": [ + { + "bbox": [ + 307, + 81, + 465, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 81, + 465, + 196 + ], + "type": "image", + "image_path": "61d2227122cdc75f65e5059b4262264ada45e61689a290a23bdcdd4de34daf8a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 367, + 202, + 405, + 213 + ], + "lines": [ + { + "bbox": [ + 367, + 202, + 405, + 213 + ], + "spans": [ + { + "bbox": [ + 367, + 202, + 405, + 213 + ], + "type": "text", + "content": "(b) SC-10" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "lines": [ + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": "Figure 2: Visualization of S4 kernels on (a) Pathfinder-X and (b) Speech Command 10-class. The values in the convolution kernel exhibit a decaying behavior. We only plot the first 4096 positions for better illustration." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 276, + 506, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 276, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 276, + 506, + 374 + ], + "type": "text", + "content": "Efficient Parameterization. Different from local convolution, where the kernel size is fixed, global convolution requires a kernel size that is the same as the sentence length. Naive parameterization of convolution kernel as classic local convolutions is therefore intractable for long sequences. For instance, the Pathfinder-X task has a length of " + }, + { + "bbox": [ + 104, + 276, + 506, + 374 + ], + "type": "inline_equation", + "content": "16K" + }, + { + "bbox": [ + 104, + 276, + 506, + 374 + ], + "type": "text", + "content": ". It then impractically requires " + }, + { + "bbox": [ + 104, + 276, + 506, + 374 + ], + "type": "inline_equation", + "content": "4M" + }, + { + "bbox": [ + 104, + 276, + 506, + 374 + ], + "type": "text", + "content": " parameters for a single layer to model the depth-wise global convolution kernel with a standard channel size of 256. Thus, an efficient convolution kernel parameterization is necessary, especially when the sentence is extremely long. For example, S4 takes a well-designed Normal Plus Low-Rank (NPLR) parameterization to model the whole kernel with two special matrices where the number of parameters is fixed." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 387, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 504, + 453 + ], + "type": "text", + "content": "Decaying Structure. Apart from the efficiency of the parameterization, we find that a decaying structure of the convolution kernel provides a good inductive bias to long-sequence modeling and contributes to the performance (See Section 4.1.2 for detailed ablation study). Concretely, the magnitude of the value in the convolution kernel should decay so that more weight is assigned to the close neighbors. S4 model inherently satisfies this property because the " + }, + { + "bbox": [ + 104, + 387, + 504, + 453 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 387, + 504, + 453 + ], + "type": "text", + "content": "-th element of the kernel of S4 is " + }, + { + "bbox": [ + 104, + 387, + 504, + 453 + ], + "type": "inline_equation", + "content": "\\mathbf{C}\\mathbf{A}^k\\mathbf{B}" + }, + { + "bbox": [ + 104, + 387, + 504, + 453 + ], + "type": "text", + "content": " and the operator norm of the power of a matrix decays exponentially:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "text", + "content": "Fact 1. For a square matrix " + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "text", + "content": ", the operator norm " + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\left\\| \\mathbf{A}^k \\right\\|_2 \\leq \\left\\| \\mathbf{A} \\right\\|_2^k" + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "text", + "content": ". In particular, if " + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\left\\| \\mathbf{A} \\right\\|_2 < 1" + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\left\\| \\mathbf{A}^k \\right\\|_2" + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "text", + "content": " decays exponentially to " + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "text", + "content": ", so " + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\left\\| \\mathbf{C} \\mathbf{A}^k \\mathbf{B} \\right\\|_2 \\leq \\left\\| \\mathbf{C} \\right\\|_2 \\left\\| \\mathbf{A}^k \\right\\|_2 \\left\\| \\mathbf{B} \\right\\|_2" + }, + { + "bbox": [ + 105, + 456, + 504, + 485 + ], + "type": "text", + "content": " also decays exponentially." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 491, + 455, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 455, + 503 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 455, + 503 + ], + "type": "text", + "content": "We can also directly observe the decaying structure of S4 in different tasks in Figure 2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 515, + 171, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 171, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 171, + 526 + ], + "type": "text", + "content": "3.2 SGCONV" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 536, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 504, + 581 + ], + "type": "text", + "content": "Putting the two principles altogether, we propose a simple global depth-wise convolution, dubbed Structured Global Convolution (SGConv), based on multiscale sub-kernels and weighted combinations. (See Figure 1). We will first introduce the parameterization of the convolutional kernel and then present how to build a global convolutional model with this kernel." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": "Parameterization of SGConv Kernel. Formally, let " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " be the length of the input sequence, the convolutional kernel should also has length " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": ". We define the parameter set of a single channel as " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "S = \\{\\mathbf{w}_i|0\\leq i < \\left[\\log_2\\left(\\frac{L}{d}\\right)\\right] + 1\\}" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_i\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " is the parameter for " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": "-th sub-kernel " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "k_{i}" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " is the dimension of the parameter. Denote the number of scales " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "N = \\left[\\log_2\\left(\\frac{L}{d}\\right)\\right] + 1" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": ". We use the upsample operation, implemented as linear interpolation, to form sub-kernels of different scales. We use Upsample " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "_l(\\mathbf{x})" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " to denote upsampling " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " to length " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " (We use F. interpolate function in Pytorch and set the mode to be linear in our implementation). We also introduce a normalization constant " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " to ensure the convolution operation will not change the scale of the input and a coefficient " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " to control the decaying speed. Now, we are ready to introduce the weighted combination scheme by concatenating a set of weighted sub-kernels " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "k_{i}" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 149, + 708, + 504, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 708, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 149, + 708, + 504, + 731 + ], + "type": "interline_equation", + "content": "\\operatorname {C a t} (S) = \\frac {1}{Z} [ k _ {0}, k _ {1}, \\dots , k _ {N - 1} ], \\text {w h e r e} k _ {i} = \\alpha^ {i} \\operatorname {U p s a m p l e} _ {2 \\max [ i - 1, 0 ] d} (\\mathbf {w} _ {i}). \\tag {1}", + "image_path": "56e551c7b9bd8babc8e3f02625b8fc074735137301fc8cf4ab19686b0b12017b.jpg" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 79, + 476, + 183 + ], + "blocks": [ + { + "bbox": [ + 135, + 79, + 476, + 183 + ], + "lines": [ + { + "bbox": [ + 135, + 79, + 476, + 183 + ], + "spans": [ + { + "bbox": [ + 135, + 79, + 476, + 183 + ], + "type": "table", + "html": "
ModelListOpsTextRetrievalImagePathfinderPath-XAvg.
Transformer36.3764.2757.4642.4471.40X54.39
Sparse Trans.17.0763.5859.5944.2471.71X51.24
Linformer35.7053.9452.2738.5676.34X51.36
Reformer37.2756.1053.4038.0768.50X50.67
BigBird36.0564.0259.2940.8374.87X55.01
S4 (original)58.3576.0287.0987.2686.0588.1080.48
S4 (Gu et al., 2022b)59.6086.8290.9088.6594.2096.3586.09
SGConv61.4589.2091.1187.9795.4697.8387.17
", + "image_path": "68ae175a257df9ec08386f807a771caf26de78fa55eea39a0a0e97ab69ccd396.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 190, + 504, + 226 + ], + "lines": [ + { + "bbox": [ + 104, + 190, + 504, + 226 + ], + "spans": [ + { + "bbox": [ + 104, + 190, + 504, + 226 + ], + "type": "text", + "content": "Table 1: The performance of SGConv compared to other baselines on the LRA dataset. SGConv achieves significant improvement compared to previous methods with a more straightforward structure and faster speed (See Table 2)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "text", + "content": "It is easy to check that " + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "inline_equation", + "content": "\\operatorname{Cat}(S)" + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "text", + "content": " gives the convolution kernel with length " + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "inline_equation", + "content": "\\sum_{i=0}^{N} 2^{\\max[i-1,0]} d = 2^{N-1} d \\geq L" + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "text", + "content": " (See Figure 1 for an illustration), which can be truncated to " + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "text", + "content": " if it is overlength. And the number of parameters is " + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "inline_equation", + "content": "Nd = O(\\log L)" + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "text", + "content": ". The decay coefficient " + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "text", + "content": ", usually chosen to be " + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "inline_equation", + "content": "1/2" + }, + { + "bbox": [ + 104, + 243, + 504, + 291 + ], + "type": "text", + "content": ", induces the decaying structure." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "text", + "content": "Incorporate SGConv to Modern Architectures. In the implementation, we compute the depthwise convolution kernel and use Fast Fourier Transform to compute the convolution in " + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "inline_equation", + "content": "O(L \\log L)" + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "text", + "content": " time (See Figure 8 for detailed illustration). We compute the normalization constant " + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "text", + "content": " such that the norm of the kernel is one at initialization and fix it during training. Please refer to Appendix B.2 for a Python-style pseudo-code. We can plug SGConv into modern architectures as a replacement of attention in Transformer or local convolution in ConvNets (See Figure 6, 7 for two examples). Due to the relaxation of the structure of the convolutional kernel, SGConv does not have the RNN-style reformulation as S4. Yet, SGConv is naturally capable of performing autoregressive generation, such as language modeling, similarly to classic causal convolutional models (Van den Oord et al., 2016; Oord et al., 2016) and Transformers. Concretely, the convolution kernel is unidirectional, where the computation at the embedding of " + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "text", + "content": "-th is only computed based on tokens before " + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 301, + 506, + 468 + ], + "type": "text", + "content": ", and left zero padding is used for ignoring the overlength kernel. During generation, hidden states of past tokens are cached for fast calculation of the next token with a single convolution step. Due to the simplicity of the parameterization, SGConv kernel is easy to compute and more efficient than the S4 kernel, as shown in Section 4.1.3." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 483, + 201, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 201, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 201, + 495 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "text", + "content": "In this section, we first test the effectiveness of SGConv on two standard long sequence modeling tasks, i.e., Long Range Arena (Tay et al., 2020b) and Speech Commands (Warden, 2018), and compare it with S4 and other baselines. We also conduct ablation studies over the decay speed and scale dimension " + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "text", + "content": " and evaluate the speed of SGConv on LRA. Further, we explore the possibility of plugging the global convolutional layer into standard models as a general-purpose component for capturing long-range dependency. For language tasks, we find that replacing half of layers of Transformer with a certain strategy with SGConv block will not hurt performance, while the complexity of those layers improves from " + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "inline_equation", + "content": "O(L^2)" + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "inline_equation", + "content": "O(L\\log L)" + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "text", + "content": ". On ImageNet, we replace the " + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "inline_equation", + "content": "7\\times 7" + }, + { + "bbox": [ + 104, + 506, + 504, + 606 + ], + "type": "text", + "content": " convolution in ConvNext (Liu et al., 2022) with SGConv and show comparative or better performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 619, + 225, + 630 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 619, + 225, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 225, + 630 + ], + "type": "text", + "content": "4.1 LONG RANGE ARENA" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "type": "text", + "content": "Long Range Arena benchmark (Tay et al., 2020b) is a suite of six tasks consisting of sequences ranging from 1K to 16K tokens, encompassing a wide range of data types and modalities such as text, natural, synthetic images, and mathematical expressions requiring similarity, structural, and visual-spatial reasoning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 690, + 179, + 701 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 690, + 179, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 690, + 179, + 701 + ], + "type": "text", + "content": "4.1.1 RESULTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "We show the experimental results in Table 1 with several baseline methods (Vaswani et al., 2017; Child et al., 2019; Wang et al., 2020; Kitaev et al., 2019; Zaheer et al., 2020; Gu et al., 2021a; 2022b)." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 136, + 79, + 476, + 203 + ], + "blocks": [ + { + "bbox": [ + 136, + 79, + 476, + 203 + ], + "lines": [ + { + "bbox": [ + 136, + 79, + 476, + 203 + ], + "spans": [ + { + "bbox": [ + 136, + 79, + 476, + 203 + ], + "type": "table", + "html": "
Sequence length256512102420484096819216384
Inf.S429.481.7158.3306.95941156.92274.0
CPUSGConv23.856.2108.7211.3409.3789.51559.3
Inf.S4 (w/o opt)2.72.74.47.915.232.764.5
GPUS4 (w. opt.)1.61.93.15.410.022.344.3
SGConv1.21.32.34.48.519.839.4
BPS4 (w/o opt)4.15.710.219.438.180.1161.2
GPUS4 (w. opt.)3.546.611.922.648.997.8
SGConv2.02.75.09.618.641.282.5
", + "image_path": "baf9783f19377440e85fc0356d573a6d3c76c8e50f89db4ef09466a62d2662fe.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 209, + 504, + 255 + ], + "lines": [ + { + "bbox": [ + 104, + 209, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 504, + 255 + ], + "type": "text", + "content": "Table 2: Comparison of the inference and backpropagation time (ms/batch) of S4 and SGConv blocks (number of channels 128, batch size 64) on CPU and GPU. Note that the parameterization in S4 requires a customized CUDA kernel to improve the efficiency (refer to opt. in the Table). Nevertheless, SGConv still always surpasses S4 even compared to the optimized CUDA kernel." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 276, + 504, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 276, + 504, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 276, + 504, + 321 + ], + "type": "text", + "content": "SGConv achieves a " + }, + { + "bbox": [ + 104, + 276, + 504, + 321 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 276, + 504, + 321 + ], + "type": "text", + "content": " improvement in average accuracy upon well-tuned S4 variants introduced in Gu et al. (2022b). Notably, SGConv is guided by the two intuitive principles and has a much simpler structure than S4 (Gu et al., 2022b). The detailed implementation settings can be found in Appendix A.1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 334, + 263, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 334, + 263, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 263, + 344 + ], + "type": "text", + "content": "4.1.2 ABLATION STUDY ON IMDB" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "text", + "content": "We conduct ablation studies on the IMDB byte-level document classification task in the LRA benchmark. We mainly focus on two aspects: 1) The speed of decaying and 2) The parameter dimension " + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "text", + "content": " of each scale. For simplicity, in the standard SGConv formulation (Eq. (1)), we fix the decay coefficient " + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\alpha = 1/2" + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "text", + "content": " and only tune the dimension " + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "text", + "content": ". However, the actual decay speed as a function of the position in the kernel depends both on " + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 353, + 504, + 430 + ], + "type": "text", + "content": ", making it hard to conduct ablation studies. Thus, we use a slightly different convolution kernel that disentangles the decay speed and the dimension of each scale:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 110, + 435, + 504, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 435, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 110, + 435, + 504, + 472 + ], + "type": "interline_equation", + "content": "\\operatorname {C a t} ^ {*} (S) = \\frac {1}{Z} [ k _ {0}, k _ {1}, \\dots , k _ {N - 1} ] \\odot \\left[ \\frac {1}{1 ^ {t}}, \\frac {1}{2 ^ {t}}, \\dots , \\frac {1}{L ^ {t}} \\right], \\text {w h e r e} k _ {i} = \\operatorname {U p s a m p l e} _ {2 \\max [ i - 1, 0 ] d} (\\mathbf {w} _ {i}). \\tag {2}", + "image_path": "c7bf141d7c0cc022a623ad6c88739442fa5d69582aa32a444b0c3c45237a1f82.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": " here then controls the decay speed, which is independent of each scale's dimension. We conduct two sets of experiments: 1) Fix " + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "d = 8" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": ", vary " + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": " from 0 (which means no decay) to 2, and 2) Fix " + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": ", vary " + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": " from 1 to 64. Figure 3 reports the accuracies in different settings. We can observe that 1) The decay structure is crucial for getting good performance, and 2) In a reasonable range, " + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": " has less impact on the performance than " + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": ". Nevertheless, we observe a trend of performance drop when increasing " + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": " from 8 to 64. Experiments on larger " + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 479, + 310, + 624 + ], + "type": "text", + "content": " show worse performance, which can be attributed to overfitting." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 635, + 230, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 230, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 230, + 647 + ], + "type": "text", + "content": "4.1.3 SPEED COMPARISON" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 654, + 310, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 310, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 310, + 732 + ], + "type": "text", + "content": "In Table 2, we compare the computation speed of the S4 kernel and SGConv kernel in different settings. Due to its simplicity, SGConv is faster than S4 for any sentence length. SGConv is about " + }, + { + "bbox": [ + 104, + 654, + 310, + 732 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 654, + 310, + 732 + ], + "type": "text", + "content": " faster than the vanilla implementation of the S4 kernel and is " + }, + { + "bbox": [ + 104, + 654, + 310, + 732 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 104, + 654, + 310, + 732 + ], + "type": "text", + "content": " faster than the optimized CUDA mized CUDA kernels." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 317, + 494, + 503, + 613 + ], + "blocks": [ + { + "bbox": [ + 317, + 494, + 503, + 613 + ], + "lines": [ + { + "bbox": [ + 317, + 494, + 503, + 613 + ], + "spans": [ + { + "bbox": [ + 317, + 494, + 503, + 613 + ], + "type": "image", + "image_path": "105ae48cced803b79ac8996d98110c83cd9f19bdf90f5ca5034e35cd2913a84f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "lines": [ + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "spans": [ + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "text", + "content": "Figure 3: Ablation study on the effect of decay speed and hidden dimension of each scale on IMDB dataset. " + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "inline_equation", + "content": "pos \\in [1, L]" + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "text", + "content": " refers to the position in the convolution kernel. We observe: 1) The decay structure is crucial for getting good performance; 2) In a reasonable range, " + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "text", + "content": " (Dimension) has less impact on the performance than " + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "inline_equation", + "content": "t \\in [0, 2.0]" + }, + { + "bbox": [ + 313, + 623, + 504, + 711 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "type": "text", + "content": "4.2 SPEECH COMMANDS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "text", + "content": "The Speech Command (SC) dataset (Warden, 2018) is a 35-class dataset of 1 second (16000 Hz sampling rate) spoken words in English. However, followup works (Kidger et al., 2020; Gu et al., 2021b; Romero et al., 2021b;a) adopted a smaller 10-class subset of SC. And works (Romero et al., 2021a; Gu et al., 2021b) on the SC dataset specifically use pre-processing such as MFCC features. Our baselines are obtained from (Gu et al., 2021a; 2022a). Note that besides SSM-based models, there is no strong baseline for raw waveform classification using either the 10-class or the full dataset. And SSM-based methods also show the ability to perform 0-shot testing at lower sampling rate such as " + }, + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "inline_equation", + "content": "8000\\mathrm{Hz}" + }, + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "text", + "content": ". Table 3 shows that the SGConv yields better results compared to the SSM-based method among 4 out of 5 tasks. Notably, for the original SC (35-class), SGConv achieves marginally higher accuracy for raw-sequence classification and significantly better results " + }, + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "inline_equation", + "content": "(+2.40\\%)" + }, + { + "bbox": [ + 104, + 102, + 506, + 224 + ], + "type": "text", + "content": " compared to the existing SoTA method." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 234, + 515, + 344 + ], + "blocks": [ + { + "bbox": [ + 106, + 234, + 515, + 344 + ], + "lines": [ + { + "bbox": [ + 106, + 234, + 515, + 344 + ], + "spans": [ + { + "bbox": [ + 106, + 234, + 515, + 344 + ], + "type": "table", + "html": "
10-clsTransformerPerformerNRDECKConvWaveGAN-DS4S4*SGConv
MFCC90.7580.8589.895.3X93.9692.0594.91
16000HZX30.7716.4911.671.6698.3297.9897.52
8000HZ (0-shot)X30.6815.1265.96X96.3091.8396.03
35-clsInceptionNetResNet-18XResNet-50ConvNetS4DS4S4*SGConv
16000HZ61.2477.8683.0195.5196.2596.0896.2796.42
8000HZ (0-shot)5.188.747.727.2691.5891.3291.8994.29
", + "image_path": "fad401d1b67e783771cabdf52654d29bd38ad06a1bdc71adbb7da3dc7fbc18e0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 419, + 293, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 419, + 293, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 293, + 430 + ], + "type": "text", + "content": "4.3 FURTHER APPLICATIONS OF SGCONV" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 440, + 504, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 440, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 440, + 504, + 518 + ], + "type": "text", + "content": "We further study SGConv as a generic network architecture drop-in component targeting tasks in language modeling and computer vision. In Section 4.3.1 we present an efficient mixture of attention and SGConv layers architecture that replaces half of the attention blocks in the Transformer with the SGConv blocks. We demonstrate the potential of utilizing such a model for long text processing. In Section 4.3.2, we incorporate SGConv (1D) into ConvNeXt (Liu et al., 2022). Surprisingly, SGConv achieves comparable or even better results compared to several SoTA CNN and Vision Transformer models by treating the 2D features as a 1D sequence." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 529, + 220, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 220, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 220, + 540 + ], + "type": "text", + "content": "4.3.1 LANGUAGE TASKS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 548, + 277, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 277, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 277, + 693 + ], + "type": "text", + "content": "Language modeling. We propose the SGConv block (shown in Figure 6) which is similar to the Attention block in Transformer (Vaswani et al., 2017). SGConv block enjoys both " + }, + { + "bbox": [ + 104, + 548, + 277, + 693 + ], + "type": "inline_equation", + "content": "O(L\\log (L))" + }, + { + "bbox": [ + 104, + 548, + 277, + 693 + ], + "type": "text", + "content": " time complexity and space complexity. We benchmark the inference time and GPU memory usage of both SGConv and Attention in Table 7. When the sequence length is 1024, SGConv block is " + }, + { + "bbox": [ + 104, + 548, + 277, + 693 + ], + "type": "inline_equation", + "content": "\\sim 2.1\\mathrm{X}" + }, + { + "bbox": [ + 104, + 548, + 277, + 693 + ], + "type": "text", + "content": " faster than the Attention block. For language modeling, we utilize the feature of SGConv to directly process the long sequences. The" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 307, + 561, + 482, + 665 + ], + "blocks": [ + { + "bbox": [ + 104, + 354, + 504, + 399 + ], + "lines": [ + { + "bbox": [ + 104, + 354, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 504, + 399 + ], + "type": "text", + "content": "Table 3: Speech Command classification results compared to existing methods. * We carefully reproduce the S4 method based on the released code1. Since the latest version removed 10-class experiments settings, we utilized a earlier version2. The results suggest that for the SC 35-classification, SGConv achieves SoTA on both full length task and 2X sampling rate, zero-shot task." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 561, + 482, + 665 + ], + "lines": [ + { + "bbox": [ + 307, + 561, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 307, + 561, + 482, + 665 + ], + "type": "table", + "html": "
ModelValid.Test
LSTM+Hebb.29.029.2
16L Transformer-XL-24.0
16L SGConv+SAAttn21.9022.83
Adaptive Input-18.7
S4-20.95
18L Transformer-XL-18.3
18L Transformer-XL*18.1618.75
18L SGConv+SAAttn18.1018.70
", + "image_path": "2359e899124c0c25528fcbf2cd977f5728ca9dbf3179d9786cae2fe9926a9ffb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 288, + 672, + 499, + 685 + ], + "lines": [ + { + "bbox": [ + 288, + 672, + 499, + 685 + ], + "spans": [ + { + "bbox": [ + 288, + 672, + 499, + 685 + ], + "type": "text", + "content": "Table 4: Performance comparison on WikiText-103." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 700, + 394, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 700, + 394, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 394, + 731 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 105, + 700, + 394, + 731 + ], + "type": "text", + "content": "https://github.com/HazyResearch/state-spaces \n" + }, + { + "bbox": [ + 105, + 700, + 394, + 731 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 700, + 394, + 731 + ], + "type": "text", + "content": "https://github.com/HazyResearch/state-spaces/tree/307f11bba801d5734235a1791df1859f6ae0e367" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 83, + 299, + 212 + ], + "blocks": [ + { + "bbox": [ + 109, + 83, + 299, + 212 + ], + "lines": [ + { + "bbox": [ + 109, + 83, + 299, + 212 + ], + "spans": [ + { + "bbox": [ + 109, + 83, + 299, + 212 + ], + "type": "image", + "image_path": "c5a6171f8c15be41cfb4e5e9980e7cefe774992c98ee31a6f2f8cbbe1c008995.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 215, + 299, + 247 + ], + "lines": [ + { + "bbox": [ + 105, + 215, + 299, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 215, + 299, + 247 + ], + "type": "text", + "content": "(a) Illustration of SGConv and Transformer-XL style Short Attention used in language modeling task. SGConv directly processes the full length sequence." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 313, + 102, + 503, + 219 + ], + "blocks": [ + { + "bbox": [ + 313, + 102, + 503, + 219 + ], + "lines": [ + { + "bbox": [ + 313, + 102, + 503, + 219 + ], + "spans": [ + { + "bbox": [ + 313, + 102, + 503, + 219 + ], + "type": "image", + "image_path": "2085f3250995842f2739313154583d46dd3f4b5ce326fae9be371b6e857b5e84.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 311, + 225, + 504, + 245 + ], + "lines": [ + { + "bbox": [ + 311, + 225, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 311, + 225, + 504, + 245 + ], + "type": "text", + "content": "(b) The depth to replace SAttention with SGConv vs. validation PPL on WikiText-103" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 154, + 287, + 457, + 335 + ], + "blocks": [ + { + "bbox": [ + 153, + 255, + 456, + 268 + ], + "lines": [ + { + "bbox": [ + 153, + 255, + 456, + 268 + ], + "spans": [ + { + "bbox": [ + 153, + 255, + 456, + 268 + ], + "type": "text", + "content": "Figure 4: Incorporating SGConv to Transformer models in language tasks." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 154, + 287, + 457, + 335 + ], + "lines": [ + { + "bbox": [ + 154, + 287, + 457, + 335 + ], + "spans": [ + { + "bbox": [ + 154, + 287, + 457, + 335 + ], + "type": "table", + "html": "
MNLI-m/mmQNLIQQPSSTCoLASTSAvg.
BERT84.93/84.9191.3491.0492.8855.1988.2984.08
SGConvBERT84.78/84.7091.2591.1892.5557.9288.4284.40
", + "image_path": "7100fd046473842eb007be62eb9fc44820c986ddfe1130c2416737fc8a5f9638.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 345, + 506, + 380 + ], + "lines": [ + { + "bbox": [ + 104, + 345, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 506, + 380 + ], + "type": "text", + "content": "Table 5: Performance comparison of BERT and SGConvBERT on GLUE dataset. SGConvBERT is comparable with BERT while being more efficient. We exclude MRPC and RTE datasets in GLUE because their sizes are too small (" + }, + { + "bbox": [ + 104, + 345, + 506, + 380 + ], + "type": "inline_equation", + "content": "< 5K" + }, + { + "bbox": [ + 104, + 345, + 506, + 380 + ], + "type": "text", + "content": " training samples)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": "Attention block only targets the short range data termed SAttention. We illustrate the structure in Figure 4a. Furthermore, we investigate the strategy to replace the Attention blocks with SGConv blocks. We generate 50 architectures with 8 SGConv blocks and 8 Attention blocks where the order is shuffled. We denote the average depth to replace the Attention blocks as: " + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\sum_{i=0}^{N_{SGConv}} \\mathrm{idx}_i / N_{total}" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": " where the idx denotes the " + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": "th SGConv depth position. " + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "inline_equation", + "content": "N_{SGConv} = 8" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "inline_equation", + "content": "N_{total} = 16" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": " in this case. The results in Figure 4b suggest that when fixing the number of SGConv layer, models achieve better performance by placing SGConv blocks in deeper layers. Guided by the strategy, we handcraft two Transformer-XL (Dai et al., 2019) style models. (1) 16-layer: " + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\{\\mathrm{A}, \\mathrm{A}, \\mathrm{A}, \\mathrm{C}\\} \\times 2 + \\{\\mathrm{A}, \\mathrm{C}, \\mathrm{C}, \\mathrm{C}\\} \\times 2" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": ". (2) 18-layer: " + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\{\\mathrm{A}, \\mathrm{A}, \\mathrm{C}\\} \\times 3 + \\{\\mathrm{A}, \\mathrm{C}, \\mathrm{C}\\} \\times 3" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": ". A denotes SAttention and C denotes SGConv. " + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\times N" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": " denotes repeating the order of layers for " + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 409, + 506, + 622 + ], + "type": "text", + "content": " times. We test the model on WikiText-103 (Merit et al., 2016) which is a wide-used language modeling benchmark with an average length of 3.6K tokens per article. We set both the attention and memory length to 384 for 18L model and 192 for 16L model. The length of input sequence is 3092 which can be processed by SGConv directly. We show the results in Table 4. Our results suggest that when the attention range is short, the 16L model outperforms the baseline with -1.17 perplexity. For the 18L model, our model achieves 18.70 perplexity. Note that we use a smaller and affordable batch size (16) for training. Under the same setting, our model gains slightly better perplexity than Transformer-XL (-0.05). Our results show the potential of adopting SGConv as part of the language model for long range language sequence processing." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "Sentence classification. We combine the SGConv block with the BERT model (Devlin et al., 2018). Concretely, we utilize the 12-layer " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\{\\mathrm{A},\\mathrm{A},\\mathrm{C}\\} \\times 2 + \\{\\mathrm{A},\\mathrm{C},\\mathrm{C}\\} \\times 2" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " model. The pretraining is conducted on BooksCorpus (Zhu et al., 2015) and English Wikipedia (Foundation). We then fine-tune the model on the GLUE benchmark (Wang et al., 2019). To avoid the instability of fine-tuning on small datasets, we only test on tasks with more than " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "5K" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " training samples. We follow the training and fine-tuning pipeline of Ke et al. (2020) (BERT-A in Table 1 of Ke et al. (2020)) and report the average accuracy of 5 different random seeds. SGConvBERT achieves comparable performance to the original BERT model, while the SGConv layer is more efficient than the attention layer." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 82, + 299, + 210 + ], + "blocks": [ + { + "bbox": [ + 108, + 82, + 299, + 210 + ], + "lines": [ + { + "bbox": [ + 108, + 82, + 299, + 210 + ], + "spans": [ + { + "bbox": [ + 108, + 82, + 299, + 210 + ], + "type": "image", + "image_path": "fa1c055f3a58b583445e4dfbcba0aec183d4829dbcb6645314bec1232ecf0b59.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 220, + 504, + 245 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 245 + ], + "type": "text", + "content": "Figure 5: Comparison of ImageNet-1k Top-1 accuracy with SoTA works. Left: Top-1 Accuracy vs. FLOPs. Right: Top-1 Accuracy vs. Throughputs." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 313, + 82, + 503, + 211 + ], + "blocks": [ + { + "bbox": [ + 313, + 82, + 503, + 211 + ], + "lines": [ + { + "bbox": [ + 313, + 82, + 503, + 211 + ], + "spans": [ + { + "bbox": [ + 313, + 82, + 503, + 211 + ], + "type": "image", + "image_path": "f5669ff133c83e9466da7c5214f223f50acfec2d677017a0c06503b1b16ab4bc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 269, + 244, + 280 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 269, + 244, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 244, + 280 + ], + "type": "text", + "content": "4.3.2 IMAGE CLASSIFICATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 289, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 506, + 510 + ], + "type": "text", + "content": "We also evaluate the adaptability of SGConv by applying it on large-scale image classification. We conduct experiments on ImageNet-1k (Deng et al., 2009) which consists of more than 1.28 million high-resolution training and 50,000 validation images. We replace the " + }, + { + "bbox": [ + 104, + 289, + 506, + 510 + ], + "type": "inline_equation", + "content": "7 \\times 7" + }, + { + "bbox": [ + 104, + 289, + 506, + 510 + ], + "type": "text", + "content": " 2D convolutional kernels with SGConvs in ConvNeXt (Liu et al., 2022) denoted as SGConvNeXt. The block designs of SGConvNeXt are shown in Figure 7. Note we train the SGConvNeXt-Tiny/Small/Base/Large using hyperparameter settings from ConvNeXt4 without any changes. By treating the 2D features as sequences, our SGConvNeXt achieves better results compared to existing SoTA methods such as EfficientNets (Tan & Le, 2019), Swin Transformers (Liu et al., 2021) (shown in Figure 5). Note that Vision Transformer (Dosovitskiy et al., 2020) and its variants (Touvron et al., 2021a;b; Yu et al., 2022) adopt patching techniques that can lead to a quadratic increase in complexity with image size. Also, patching is incompatible with dynamic input resolutions while SGConvNeXt processes the data globally. We list several interesting directions that can be explored for future work: 1) Optimization for the long-range convolution: we noticed that though FFT theoretically requires less FLOPs than plain convolution, the throughput drops empirically. One reason is that there is no optimized CUDA implementation for 1D long-range convolution and can be a good direction for future work. 2) Optimized hyperparameters and data augmentation methods: ConvNeXts' hyperparameters are tuned for maximum performance, which may not be ideal for SGConvNeXt. 3) SGConv for vision reasoning tasks: we show that SGConv is powerful for long-range synthetic reasoning tasks and large-scale classification tasks. It could be effective in visual reasoning applications such as Vision-Language Reasoning (Johnson et al., 2017; Zhu et al., 2020) with great potential." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 529, + 190, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 190, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 190, + 540 + ], + "type": "text", + "content": "5 DISCUSSION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 556, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 556, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 506, + 732 + ], + "type": "text", + "content": "In this paper, we attempt to answer the question of what makes convolutional models great again on long sequence modeling and summarize two principles contributing to the success. Based on the principles, we propose a simple and intuitive global convolutional model SGConv that has both direct implications and solid performance. Concurrent to our work there are also attempts to simplify the S4 model by restricting the state transition matrix to be diagonal (Gu et al., 2022a; Gupta, 2022). The proposal by Gu et al. (2022a) incorporates an intricate approach to parameterization and initialization schemes compared to our paper. Their method provides insights into the S4 phenomenon from a state-space-model perspective. Instead, we hope our simpler principles and non-SSM-based model can open up a direction for general audiences to understand and try global convolution as a general-purpose module for tackling long-range dependency. This potential has been shown in a very recent paper (Ma et al., 2022) concurrent to our work, where the authors incorporate an exponential moving average layer to a Transformer-like model and achieve promising performance over several long sequence modeling tasks. The exponential moving average layer is a particular type of global convolution layer that naturally satisfies our two principles. We believe that similar global convolutional modules will emerge in the future as long-range dependency becomes increasingly critical for sequence modeling." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 226, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 226, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 226, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGEMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 506, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 506, + 162 + ], + "type": "text", + "content": "We extend our gratitude to the anonymous reviewers for dedicating their time and expertise to provide constructive feedback and suggestions, which significantly enhanced the quality of this paper. We also express our appreciation to the Program Chairs and Area Chairs for their careful review and valuable comments. Special thanks go to Sebastien Bubeck, Arturs Backurs, Gustavo de Rosa, Di He, and Cong 'Callie' Hao for their valuable suggestions and support." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 177, + 176, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 177, + 176, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 177, + 176, + 190 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 195, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 195, + 505, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 195, + 505, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 195, + 505, + 232 + ], + "type": "text", + "content": "Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, et al. Tensorflow: Large-scale machine learning on heterogeneous distributed systems. arXiv preprint arXiv:1603.04467, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 236, + 505, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 505, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 270 + ], + "type": "text", + "content": "Jinming Cao, Yangyan Li, Mingchao Sun, Ying Chen, Dani Lischinski, Daniel Cohen-Or, Baoquan Chen, and Changhe Tu. Do-conv: Depthwise over-parameterized convolutional layer. IEEE Transactions on Image Processing, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 275, + 428, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 428, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 428, + 289 + ], + "type": "text", + "content": "Lei Chen. Deep Learning and Practice with MindSpore. Springer Nature, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 295, + 505, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 295, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 505, + 329 + ], + "type": "text", + "content": "Tianqi Chen, Mu Li, Yutian Li, Min Lin, Naiyan Wang, Minjie Wang, Tianjun Xiao, Bing Xu, Chiyuan Zhang, and Zheng Zhang. Mxnet: A flexible and efficient machine learning library for heterogeneous distributed systems. arXiv preprint arXiv:1512.01274, 2015." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "type": "text", + "content": "Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "type": "text", + "content": "Zihang Dai, Zhilin Yang, Yiming Yang, William W Cohen, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 406, + 505, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 505, + 441 + ], + "type": "text", + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 446, + 505, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 446, + 505, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 505, + 470 + ], + "type": "text", + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 476, + 505, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 476, + 505, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 476, + 505, + 511 + ], + "type": "text", + "content": "Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 1911-1920, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 517, + 505, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 517, + 505, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 505, + 552 + ], + "type": "text", + "content": "Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13733-13742, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 557, + 505, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 557, + 505, + 592 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 505, + 592 + ], + "type": "text", + "content": "Linhao Dong, Shuang Xu, and Bo Xu. Speech-transformer: a no-recurrence sequence-to-sequence model for speech recognition. In 2018 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5884-5888. IEEE, 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 597, + 505, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 505, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 505, + 643 + ], + "type": "text", + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 649, + 488, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 649, + 488, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 488, + 662 + ], + "type": "text", + "content": "Wikipedia Foundation. Wikipedia downloads. URL https://dumps.wikipedia.org." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 668, + 505, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 505, + 702 + ], + "type": "text", + "content": "Albert Gu, Tri Dao, Stefano Ermon, Atri Rudra, and Christopher Ré. Hippo: Recurrent memory with optimal polynomial projections. Advances in Neural Information Processing Systems, 33: 1474-1487, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "text", + "content": "Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces. arXiv preprint arXiv:2111.00396, 2021a." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Albert Gu, Isys Johnson, Karan Goel, Khaled Saab, Tri Dao, Atri Rudra, and Christopher Ré. Combining recurrent, convolutional, and continuous-time models with linear state space layers. Advances in neural information processing systems, 34:572-585, 2021b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 146 + ], + "type": "text", + "content": "Albert Gu, Ankit Gupta, Karan Goel, and Christopher Ré. On the parameterization and initialization of diagonal state space models. arXiv preprint arXiv:2206.11893, 2022a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 152, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 152, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 504, + 186 + ], + "type": "text", + "content": "Albert Gu, Isys Johnson, Aman Timalsina, Atri Rudra, and Christopher Ré. How to train your hippo: State space models with generalized orthogonal basis projections. arXiv preprint arXiv:2206.12037, 2022b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 193, + 504, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 504, + 228 + ], + "type": "text", + "content": "John Guibas, Morteza Mardani, Zongyi Li, Andrew Tao, Anima Anandkumar, and Bryan Catanzaro. Efficient token mixing for transformers via adaptive fourier neural operators. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 234, + 504, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 234, + 504, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 504, + 267 + ], + "type": "text", + "content": "Shuxuan Guo, Jose M Alvarez, and Mathieu Salzmann. Expandnets: Linear over-parameterization to train compact convolutional networks. Advances in Neural Information Processing Systems, 33:1298-1310, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 275, + 504, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 504, + 297 + ], + "type": "text", + "content": "Ankit Gupta. Diagonal state spaces are as effective as structured state spaces. arXiv preprint arXiv:2203.14343, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 304, + 504, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 504, + 327 + ], + "type": "text", + "content": "Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. Liquid structural state-space models. arXiv preprint arXiv:2209.12951, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 334, + 504, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 334, + 504, + 368 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 504, + 368 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 374, + 504, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 504, + 421 + ], + "type": "text", + "content": "Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2901-2910, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 426, + 504, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 504, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 504, + 461 + ], + "type": "text", + "content": "Kirthevasan Kandasamy, Willie Neiswanger, Jeff Schneider, Barnabas Poczos, and Eric P Xing. Neural architecture search with bayesian optimisation and optimal transport. Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 468, + 504, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 504, + 491 + ], + "type": "text", + "content": "Guolin Ke, Di He, and Tie-Yan Liu. Rethinking positional encoding in language pre-training. In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 497, + 504, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 497, + 504, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 504, + 531 + ], + "type": "text", + "content": "Patrick Kidger, James Morrill, James Foster, and Terry Lyons. Neural controlled differential equations for irregular time series. Advances in Neural Information Processing Systems, 33:6696-6707, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 537, + 504, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 537, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 105, + 537, + 504, + 562 + ], + "type": "text", + "content": "Junkyung Kim, Drew Linsley, Kalpit Thakkar, and Thomas Serre. Disentangling neural mechanisms for perceptual grouping. In International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 567, + 504, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 504, + 591 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 504, + 591 + ], + "type": "text", + "content": "Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. In International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 597, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 504, + 620 + ], + "type": "text", + "content": "Guihong Li, Yuedong Yang, Kartikeya Bhardwaj, and Radu Marculescu. Zico: Zero-shot nas via inverse coefficient of variation on gradients. arXiv preprint arXiv:2301.11300, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 628, + 504, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 504, + 651 + ], + "type": "text", + "content": "Yuhong Li, Cong Hao, Pan Li, Jinjun Xiong, and Deming Chen. Generic neural architecture search via regression. Advances in Neural Information Processing Systems, 34:20476-20490, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 657, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 504, + 692 + ], + "type": "text", + "content": "Ming Lin, Pichao Wang, Zhenhong Sun, Hesen Chen, Xiuyu Sun, Qi Qian, Hao Li, and Rong Jin. Zen-nas: A zero-shot nas for high-performance image recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 347-356, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Drew Linsley, Junkyung Kim, Vijay Veerabadran, Charles Windolf, and Thomas Serre. Learning long-range spatial dependencies with horizontal gated recurrent units. Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "text", + "content": "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 197 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 197 + ], + "type": "text", + "content": "Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer. Mega: Moving average equipped gated attention. arXiv preprint arXiv:2209.10655, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "type": "text", + "content": "Yanjun Ma, Dianhai Yu, Tian Wu, and Haifeng Wang. Paddlepaddle: An open-source deep learning platform from industrial practice. Frontiers of Data and Computing, 1(1):105-115, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 234, + 505, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 234, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 505, + 257 + ], + "type": "text", + "content": "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 263, + 505, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 505, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 505, + 298 + ], + "type": "text", + "content": "Aaron van den Oord, Sander Dieleman, Heiga Zen, Karen Simonyan, Oriol Vinyals, Alex Graves, Nal Kalchbrenner, Andrew Senior, and Koray Kavukcuoglu. Wavenet: A generative model for raw audio. arXiv preprint arXiv:1609.03499, 2016." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 304, + 505, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 505, + 338 + ], + "type": "text", + "content": "Hao Peng, Nikolaos Pappas, Dani Yogatama, Roy Schwartz, Noah Smith, and Lingpeng Kong. Random feature attention. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=QtTKTdVrFBB." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 346, + 505, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 505, + 380 + ], + "type": "text", + "content": "Zhen Qin, Weixuan Sun, Hui Deng, Dongxu Li, Yunshen Wei, Baohong Lv, Junjie Yan, Lingpeng Kong, and Yiran Zhong. cosformer: Rethinking softmax in attention. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 386, + 505, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 386, + 505, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 505, + 409 + ], + "type": "text", + "content": "Yongming Rao, Wenliang Zhao, Zheng Zhu, Jiwen Lu, and Jie Zhou. Global filter networks for image classification. Advances in Neural Information Processing Systems, 34:980-993, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 415, + 505, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 505, + 449 + ], + "type": "text", + "content": "Esteban Real, Alok Aggarwal, Yanping Huang, and Quoc V Le. Regularized evolution for image classifier architecture search. In Proceedings of the aaai conference on artificial intelligence, volume 33, pp. 4780-4789, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 456, + 505, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 505, + 491 + ], + "type": "text", + "content": "David W Romero, Robert-Jan Bruintjes, Jakub Mikolaj Tomczak, Erik J Bekkers, Mark Hoogendoorn, and Jan van Gemert. Flexconv: Continuous kernel convolutions with differentiable kernel sizes. In International Conference on Learning Representations, 2021a." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 497, + 505, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 497, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 505, + 532 + ], + "type": "text", + "content": "David W Romero, Anna Kuzina, Erik J Bekkers, Jakub Mikolaj Tomczak, and Mark Hoogendoorn. Ckconv: Continuous kernel convolution for sequential data. In International Conference on Learning Representations, 2021b." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 538, + 505, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 538, + 505, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 505, + 572 + ], + "type": "text", + "content": "David W Romero, David M Knigge, Albert Gu, Erik J Bekkers, Efstratios Gavves, Jakub M Tomczak, and Mark Hoogendoorn. Towards a general purpose cnn for long range dependencies in nd. arXiv preprint arXiv:2206.03398, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 578, + 505, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 505, + 602 + ], + "type": "text", + "content": "Tim Salimans and Durk P Kingma. Weight normalization: A simple reparameterization to accelerate training of deep neural networks. Advances in neural information processing systems, 29, 2016." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 609, + 505, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 505, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 505, + 643 + ], + "type": "text", + "content": "Uri Shaham, Elad Segal, Maor Ivgi, Avia Efrat, Ori Yoran, Adi Haviv, Ankit Gupta, Wenhan Xiong, Mor Geva, Jonathan Berant, and Omer Levy. Scrols: Standardized comparison over long language sequences, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 649, + 505, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 649, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 505, + 673 + ], + "type": "text", + "content": "Jimmy TH Smith, Andrew Warrington, and Scott W Linderman. Simplified state space layers for sequence modeling. arXiv preprint arXiv:2208.04933, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 679, + 505, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 505, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 505, + 703 + ], + "type": "text", + "content": "Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pp. 6105-6114. PMLR, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "text", + "content": "Y Tay, D Bahri, D Metzler, D Juan, Z Zhao, and C Zheng. Synthesizer: Rethinking self-attention in transformer models. arxiv 2020. arXiv preprint arXiv:2005.00743, 2, 2020a." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 667 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Yi Tay, Mostafa Dehghani, Samira Abnar, Yikang Shen, Dara Bahri, Philip Pham, Jinfeng Rao, Liu Yang, Sebastian Ruder, and Donald Metzler. Long range arena: A benchmark for efficient transformers. arXiv preprint arXiv:2011.04006, 2020b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In International Conference on Machine Learning, pp. 10347-10357. PMLR, 2021a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "text", + "content": "Hugo Touvron, Matthieu Cord, Alexandre Sablayrolles, Gabriel Synnaeve, and Hervé Jégou. Going deeper with image transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 32-42, 2021b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 505, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 505, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 505, + 239 + ], + "type": "text", + "content": "Aaron Van den Oord, Nal Kalchbrenner, Lasse Espeholt, Oriol Vinyals, Alex Graves, et al. Conditional image generation with pixelCNN decoders. Advances in neural information processing systems, 29, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 245, + 505, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 505, + 280 + ], + "type": "text", + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 286, + 505, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 286, + 505, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 505, + 320 + ], + "type": "text", + "content": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. 2019. In the Proceedings of ICLR." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 327, + 505, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 505, + 350 + ], + "type": "text", + "content": "Sinong Wang, Belinda Z. Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "type": "text", + "content": "Pete Warden. Speech commands: A dataset for limited-vocabulary speech recognition. arXiv preprint arXiv:1804.03209, 2018." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 386, + 505, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 386, + 505, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 505, + 421 + ], + "type": "text", + "content": "Wenting Ye, Hongfei Yang, Shuai Zhao, Haoyang Fang, Xingjian Shi, and Naveen Neppalli. A transformer-based substitute recommendation model incorporating weakly supervised customer behavior data. arXiv preprint arXiv:2211.02533, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 427, + 505, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 427, + 505, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 505, + 462 + ], + "type": "text", + "content": "Weihao Yu, Mi Luo, Pan Zhou, Chenyang Si, Yichen Zhou, Xinchao Wang, Jiashi Feng, and Shuicheng Yan. Metaformer is actually what you need for vision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10819-10829, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 468, + 505, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 505, + 492 + ], + "type": "text", + "content": "Sergey Zagoruyko and Nikos Komodakis. Diracnets: Training very deep neural networks without skip-connections. arXiv preprint arXiv:1706.00388, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 498, + 505, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 498, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 505, + 533 + ], + "type": "text", + "content": "Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontonon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. Big bird: Transformers for longer sequences. Advances in Neural Information Processing Systems, 33:17283-17297, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 539, + 505, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 539, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 539, + 505, + 574 + ], + "type": "text", + "content": "Fengda Zhu, Yi Zhu, Xiaojun Chang, and Xiaodan Liang. Vision-language navigation with self-supervised auxiliary reasoning tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10012-10022, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 580, + 505, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 580, + 505, + 625 + ], + "spans": [ + { + "bbox": [ + 105, + 580, + 505, + 625 + ], + "type": "text", + "content": "Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision, pp. 19-27, 2015." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 632, + 505, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 632, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 505, + 667 + ], + "type": "text", + "content": "Barret Zoph, Vijay Vasudevan, Jonathon Shlens, and Quoc V Le. Learning transferable architectures for scalable image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8697-8710, 2018." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 316, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 316, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 316, + 94 + ], + "type": "text", + "content": "A DETAILED EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 123, + 228, + 134 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 123, + 228, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 228, + 134 + ], + "type": "text", + "content": "A.1 LONG RANGE ARENA" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": "Here we report the detailed implementation of the LRA experiments. We use the concatenation style combination of sub-kernels in all experiments and mildly tune the dimension of each scale. Since the SGConv exhibits a strong ability to fit data, we slightly increase the dropout for some tasks to prevent overfitting. Table 6 lists the detailed hyperparameters used in LRA. In most experiments, we set " + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "inline_equation", + "content": "1/2" + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": ", which approximately decays in speed " + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "inline_equation", + "content": "1 / pos" + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": ". Experiments on flattened 2D images require some special modification of the kernel. We hypothesize that it is because images require more subtle inductive bias. For the experiment on the Image dataset, we use the disentangled version of parameterization and combination weights as described in Section 4.1.2 and set the decay speed to be " + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "inline_equation", + "content": "1 / pos" + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": ". For the experiment on the Pathfinder-X task, we initialize convolution kernels in different channels with cosine waves with different frequencies and randomly assign " + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": " ranging from 1 to " + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "inline_equation", + "content": "1/3" + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": " to different channels. Both these modifications bring about " + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": " improvement compared to standard fixed " + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\alpha = 1/2" + }, + { + "bbox": [ + 104, + 154, + 506, + 299 + ], + "type": "text", + "content": " and random initialization. The remaining hyperparameters and experimental settings are same to Gu et al. (2022a) which can be found in the Github repo1." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 170, + 332, + 442, + 385 + ], + "blocks": [ + { + "bbox": [ + 170, + 332, + 442, + 385 + ], + "lines": [ + { + "bbox": [ + 170, + 332, + 442, + 385 + ], + "spans": [ + { + "bbox": [ + 170, + 332, + 442, + 385 + ], + "type": "table", + "html": "
ListOpsTextRetrievalImagePathfinderPath-X
Acc.61.4589.2091.1187.9795.4697.83
Scale dim.121323264
Dropout0000.20.20
", + "image_path": "f7289c91d4e58b5033e90590e23b06f162d2c6751363421f6ce87cac85459132.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 197, + 392, + 411, + 405 + ], + "lines": [ + { + "bbox": [ + 197, + 392, + 411, + 405 + ], + "spans": [ + { + "bbox": [ + 197, + 392, + 411, + 405 + ], + "type": "text", + "content": "Table 6: Hyperparameters used in LRA experiments." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 449, + 218, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 218, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 218, + 460 + ], + "type": "text", + "content": "A.2 SPEECH COMMAND" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 480, + 506, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 506, + 515 + ], + "type": "text", + "content": "For Speech Command 10-class task, we use the same training setting from Gu et al. (2021a) earlier version Github repo" + }, + { + "bbox": [ + 104, + 480, + 506, + 515 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 104, + 480, + 506, + 515 + ], + "type": "text", + "content": ". For Speech Command 35-class task, we use the training setting from the Github repo" + }, + { + "bbox": [ + 104, + 480, + 506, + 515 + ], + "type": "inline_equation", + "content": "^1" + }, + { + "bbox": [ + 104, + 480, + 506, + 515 + ], + "type": "text", + "content": ". The scale dimension of SGConv is 32." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 555, + 209, + 566 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 209, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 209, + 566 + ], + "type": "text", + "content": "A.3 LANGUAGE TASK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 586, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 654 + ], + "type": "text", + "content": "Our implementation for Language Task is based on the project " + }, + { + "bbox": [ + 104, + 586, + 506, + 654 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 104, + 586, + 506, + 654 + ], + "type": "text", + "content": ". For the 16-L model, we utilize 3072 as the sequence length for SGCONV and 192 as both the attention and memory length for SAAttention. For the 18-L model, we utilize 3072 as the sequence length for SGCONV and 384 as both the attention and memory length for SAAttention. The SGConv has 96 as the scale dimension. We adopt the training settings from the above mentioned project 3 except the batch size which is reduced to 64. The SGConv block is shown in Figure 4." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 710, + 485, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 710, + 485, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 485, + 733 + ], + "type": "text", + "content": "3https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 244, + 79, + 350, + 236 + ], + "blocks": [ + { + "bbox": [ + 244, + 79, + 350, + 236 + ], + "lines": [ + { + "bbox": [ + 244, + 79, + 350, + 236 + ], + "spans": [ + { + "bbox": [ + 244, + 79, + 350, + 236 + ], + "type": "image", + "image_path": "43affd57893af7a4fd9c10d4a23c5d23a137a803fd92031e7f0d12197d6d73fd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 254, + 251, + 357, + 264 + ], + "lines": [ + { + "bbox": [ + 254, + 251, + 357, + 264 + ], + "spans": [ + { + "bbox": [ + 254, + 251, + 357, + 264 + ], + "type": "text", + "content": "Figure 6: SGConv block" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 168, + 285, + 442, + 369 + ], + "blocks": [ + { + "bbox": [ + 168, + 285, + 442, + 369 + ], + "lines": [ + { + "bbox": [ + 168, + 285, + 442, + 369 + ], + "spans": [ + { + "bbox": [ + 168, + 285, + 442, + 369 + ], + "type": "table", + "html": "
256512102420483072
Attn. BlockInf. (ms/batch)2.67.323.291.7X
Mem. (GB)2.63.97.923.9OOM
SGConv BlockInf. (ms/batch)2.75.410.921.843.6
Mem. (GB)2.63.45.28.715.7
", + "image_path": "be3124a45efd4a51d65ebde39313b5d9977f19434cccf41865e3db4df0eb47f7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "type": "text", + "content": "Table 7: Comparison of inference time and GPU memory utilization with Attention blocks. SGConv has significantly less memory usage and faster inference speed when the sequence increases." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 419, + 237, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 419, + 237, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 237, + 430 + ], + "type": "text", + "content": "A.4 IMAGE CLASSIFICATION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 439, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 504, + 495 + ], + "type": "text", + "content": "We use the training settings in the work Liu et al. (2022)" + }, + { + "bbox": [ + 104, + 439, + 504, + 495 + ], + "type": "inline_equation", + "content": "^4" + }, + { + "bbox": [ + 104, + 439, + 504, + 495 + ], + "type": "text", + "content": ". Since the SGConvNeXt has several downsampling layers, we fixed the scale to 5 and the scale dimensions are calculated based on the flattened features length of the corresponding layers. The structure is shown in Figure 7. The results are shown in Table 8. The visualization of the SGConvNeXt-Base outputs are shown in Figure 9. The visualization of the SGConv kernels at different stages are shown in Figure 10." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 164, + 505, + 275, + 681 + ], + "blocks": [ + { + "bbox": [ + 164, + 505, + 275, + 681 + ], + "lines": [ + { + "bbox": [ + 164, + 505, + 275, + 681 + ], + "spans": [ + { + "bbox": [ + 164, + 505, + 275, + 681 + ], + "type": "image", + "image_path": "1fa5dada2ae98699989f808bf6e6627d621edc1eae969b3f5ef14bb0223c5cf3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 700, + 354, + 713 + ], + "lines": [ + { + "bbox": [ + 257, + 700, + 354, + 713 + ], + "spans": [ + { + "bbox": [ + 257, + 700, + 354, + 713 + ], + "type": "text", + "content": "Figure 7: SGConvnext" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 317, + 505, + 435, + 681 + ], + "blocks": [ + { + "bbox": [ + 317, + 505, + 435, + 681 + ], + "lines": [ + { + "bbox": [ + 317, + 505, + 435, + 681 + ], + "spans": [ + { + "bbox": [ + 317, + 505, + 435, + 681 + ], + "type": "image", + "image_path": "c5e897c14ce292212b5305fdbc9027f627c9942c0ea5cb81bd2c0333cd4f6091.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 361, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 361, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 361, + 732 + ], + "type": "text", + "content": "4https://github.com/facebookresearch/ConvNeXt" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 83, + 298, + 182 + ], + "blocks": [ + { + "bbox": [ + 115, + 83, + 298, + 182 + ], + "lines": [ + { + "bbox": [ + 115, + 83, + 298, + 182 + ], + "spans": [ + { + "bbox": [ + 115, + 83, + 298, + 182 + ], + "type": "table", + "html": "
modelFLOPsthroughput (image/s)paramsAcc.
Swin-T4.5G944.529M81.3
Swin-S8.7G576.850M83.0
Swin-B15.4G433.488M83.5
Swin-B384247.0G134.688M84.5
ConvNeXt-T4.5G1252.629M82.1
ConvNeXt-S8.7G801.450M83.1
ConvNeXt-B15.4G588.389M83.8
ConvNeXt-L34.4G349.8198M84.3
", + "image_path": "407e45cbe52030a1fa74fba42dee8571a99b8b3810a004053f5d82ba0f6e9dea.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 308, + 80, + 500, + 186 + ], + "blocks": [ + { + "bbox": [ + 308, + 80, + 500, + 186 + ], + "lines": [ + { + "bbox": [ + 308, + 80, + 500, + 186 + ], + "spans": [ + { + "bbox": [ + 308, + 80, + 500, + 186 + ], + "type": "table", + "html": "
modelFLOPsthroughput (image/s)paramsAcc.
EffNet-B330021.8G693.912M81.6
EffNet-B438024.2G341.519M82.9
EffNet-B545629.9G223.530M83.6
EffNet-B6528219.0G91.543M84.0
EffNet-B7600237.0G52.966M84.3
SGConvNeXt-T4.3G872.629M82.0
SGConvNeXt-S8.3G565.351M83.4
SGConvNeXt-B14.6G417.990M83.9
SGConvNeXt-L32.5G256.7200M84.4
", + "image_path": "d08d177dac67bcdb62444bfe08763b8750def72d9261d5a932fb54887eadcdfb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 158, + 194, + 451, + 206 + ], + "lines": [ + { + "bbox": [ + 158, + 194, + 451, + 206 + ], + "spans": [ + { + "bbox": [ + 158, + 194, + 451, + 206 + ], + "type": "text", + "content": "Table 8: Comparison of ImageNet-1k Top-1 accuracy with SoTA works." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 228, + 278, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 228, + 278, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 278, + 240 + ], + "type": "text", + "content": "B DETAILED IMPLEMENTATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 253, + 294, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 253, + 294, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 294, + 264 + ], + "type": "text", + "content": "B.1 ILLUSTRATION OF SGCONV MODULE" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 108, + 281, + 499, + 376 + ], + "blocks": [ + { + "bbox": [ + 108, + 281, + 499, + 376 + ], + "lines": [ + { + "bbox": [ + 108, + 281, + 499, + 376 + ], + "spans": [ + { + "bbox": [ + 108, + 281, + 499, + 376 + ], + "type": "image", + "image_path": "dc73cd86ac1632a3107e6a2cc4f5942032d3e2eaf21f4bc8327b3750251d8525.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 393, + 504, + 426 + ], + "lines": [ + { + "bbox": [ + 104, + 393, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 393, + 504, + 426 + ], + "type": "text", + "content": "Figure 8: Implementing SGConv with FFT. We first compute the convolutional kernels for each channel as described in Section 3.2, and apply the depth-wise global convolution to the input features." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 448, + 266, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 448, + 266, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 266, + 459 + ], + "type": "text", + "content": "B.2 PYTHON STYLE PSEUDO-CODE" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 105, + 468, + 433, + 723 + ], + "blocks": [ + { + "bbox": [ + 105, + 468, + 433, + 723 + ], + "lines": [ + { + "bbox": [ + 105, + 468, + 433, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 433, + 723 + ], + "type": "text", + "content": "# Parameters \nkernel-param_list = [] # w_i \nfor _ in range(num_scales): \n kernel-param_list.append(\n nnParameter(torch.random(hidden_dim, kernel_dim)) # size: h * d \n# Compute global convolution kernel \nkernel_list = [] # k_i \nfor i in range(num_scales): \n kernel = F.interpolate(\n kernel-param_list[i],\n scale_factor = 2**max(0, i-1),\n mode = \"linear\"\n ) * 0.5 ** i # alpha = 0.5 \n kernel_list.append(kernel) \n# The computed kernel, size: h * (d * 2^{s-1}) \nk = torch.cat(kernel_list, dim=-1) \n#Normalize kernel \nif is_init: # Compute the norm at initialization \nkernel_norm = k(norm(dim=-1, keepdim=True).detach() \nk = k / kernel_norm" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 104, + 82, + 412, + 217 + ], + "blocks": [ + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "lines": [ + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "text", + "content": "Use kernel to compute global convolution \n#x:batch_size \\* hidden_dim \\* seq_len \nL " + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "text", + "content": " x.size(-1) \n#Truncate kernel if it is too long \nk " + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "text", + "content": " k[., :L] \n# Use FFT to compute convolution \nx_f " + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "text", + "content": " torch.fft.rfft(x, n=2*L) \nk_f " + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "text", + "content": " torch.fft.rfft(k, n=2*L) \ny_f " + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "text", + "content": " torch.einsum(\"b h l,h l-> b h l\",x_f,k_f) \n#Inverse FFT to get the result \ny " + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 104, + 82, + 412, + 217 + ], + "type": "text", + "content": " torch.fft.irfft(y_f, n=2*L)[...,:L]" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "image", + "bbox": [ + 126, + 250, + 185, + 308 + ], + "blocks": [ + { + "bbox": [ + 139, + 234, + 169, + 247 + ], + "lines": [ + { + "bbox": [ + 139, + 234, + 169, + 247 + ], + "spans": [ + { + "bbox": [ + 139, + 234, + 169, + 247 + ], + "type": "text", + "content": "Input" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 126, + 250, + 185, + 308 + ], + "lines": [ + { + "bbox": [ + 126, + 250, + 185, + 308 + ], + "spans": [ + { + "bbox": [ + 126, + 250, + 185, + 308 + ], + "type": "image", + "image_path": "a9dc7d8f9a6579bcb0e37dc1288d81c3d7b12bb3cf6ca8adff9a4a8c7ef05f25.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 202, + 251, + 260, + 307 + ], + "blocks": [ + { + "bbox": [ + 209, + 234, + 250, + 247 + ], + "lines": [ + { + "bbox": [ + 209, + 234, + 250, + 247 + ], + "spans": [ + { + "bbox": [ + 209, + 234, + 250, + 247 + ], + "type": "text", + "content": "Stage 0" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 202, + 251, + 260, + 307 + ], + "lines": [ + { + "bbox": [ + 202, + 251, + 260, + 307 + ], + "spans": [ + { + "bbox": [ + 202, + 251, + 260, + 307 + ], + "type": "image", + "image_path": "07133a81bd32c85fca2b3c2f6edf443585bfeb5580f157079a479ab6bdc87c3d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 278, + 251, + 335, + 307 + ], + "blocks": [ + { + "bbox": [ + 287, + 234, + 328, + 247 + ], + "lines": [ + { + "bbox": [ + 287, + 234, + 328, + 247 + ], + "spans": [ + { + "bbox": [ + 287, + 234, + 328, + 247 + ], + "type": "text", + "content": "Stage 1" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 278, + 251, + 335, + 307 + ], + "lines": [ + { + "bbox": [ + 278, + 251, + 335, + 307 + ], + "spans": [ + { + "bbox": [ + 278, + 251, + 335, + 307 + ], + "type": "image", + "image_path": "8f9ccced2a4c5a31bd266c5297b0e0fcd55d74317ffa850e4164d0f29a20f46e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 353, + 251, + 411, + 307 + ], + "blocks": [ + { + "bbox": [ + 363, + 234, + 404, + 247 + ], + "lines": [ + { + "bbox": [ + 363, + 234, + 404, + 247 + ], + "spans": [ + { + "bbox": [ + 363, + 234, + 404, + 247 + ], + "type": "text", + "content": "Stage 2" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 353, + 251, + 411, + 307 + ], + "lines": [ + { + "bbox": [ + 353, + 251, + 411, + 307 + ], + "spans": [ + { + "bbox": [ + 353, + 251, + 411, + 307 + ], + "type": "image", + "image_path": "8f9354eb71c1900eb43ba42d87dc85cb5e33f59b5e413a9ee0900ab39647fb31.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 429, + 251, + 486, + 307 + ], + "blocks": [ + { + "bbox": [ + 439, + 234, + 480, + 247 + ], + "lines": [ + { + "bbox": [ + 439, + 234, + 480, + 247 + ], + "spans": [ + { + "bbox": [ + 439, + 234, + 480, + 247 + ], + "type": "text", + "content": "Stage 3" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 429, + 251, + 486, + 307 + ], + "lines": [ + { + "bbox": [ + 429, + 251, + 486, + 307 + ], + "spans": [ + { + "bbox": [ + 429, + 251, + 486, + 307 + ], + "type": "image", + "image_path": "53394019db4eaa733cdb780433fbdcde63af6ac4ea7cb9231ec9885ef646c3af.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 126, + 324, + 184, + 381 + ], + "blocks": [ + { + "bbox": [ + 126, + 324, + 184, + 381 + ], + "lines": [ + { + "bbox": [ + 126, + 324, + 184, + 381 + ], + "spans": [ + { + "bbox": [ + 126, + 324, + 184, + 381 + ], + "type": "image", + "image_path": "39119b984d911ce8b1acf0d5c37a77cfc3ee189085b413fe8d29f451b7e423f9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 202, + 324, + 260, + 380 + ], + "blocks": [ + { + "bbox": [ + 202, + 324, + 260, + 380 + ], + "lines": [ + { + "bbox": [ + 202, + 324, + 260, + 380 + ], + "spans": [ + { + "bbox": [ + 202, + 324, + 260, + 380 + ], + "type": "image", + "image_path": "a7386662ad0f56176c6b1fbb805a405d5a8d59b6207af84b5d674bcdacbf39d3.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 278, + 324, + 335, + 380 + ], + "blocks": [ + { + "bbox": [ + 278, + 324, + 335, + 380 + ], + "lines": [ + { + "bbox": [ + 278, + 324, + 335, + 380 + ], + "spans": [ + { + "bbox": [ + 278, + 324, + 335, + 380 + ], + "type": "image", + "image_path": "4af53c42af2cccf6e45985a232eaa6503b464f113c97419605ed3da59341bc7e.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 353, + 324, + 411, + 380 + ], + "blocks": [ + { + "bbox": [ + 353, + 324, + 411, + 380 + ], + "lines": [ + { + "bbox": [ + 353, + 324, + 411, + 380 + ], + "spans": [ + { + "bbox": [ + 353, + 324, + 411, + 380 + ], + "type": "image", + "image_path": "5005d29b7d379e0930dd26c01c74520b6dd14f226fc01b8f0f88e70f3eb4968c.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 429, + 324, + 486, + 380 + ], + "blocks": [ + { + "bbox": [ + 429, + 324, + 486, + 380 + ], + "lines": [ + { + "bbox": [ + 429, + 324, + 486, + 380 + ], + "spans": [ + { + "bbox": [ + 429, + 324, + 486, + 380 + ], + "type": "image", + "image_path": "64a6d10bfc28168ce83ffa5298213128c167a407122dd6af6eadea4e3a01684c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 126, + 398, + 184, + 454 + ], + "blocks": [ + { + "bbox": [ + 126, + 398, + 184, + 454 + ], + "lines": [ + { + "bbox": [ + 126, + 398, + 184, + 454 + ], + "spans": [ + { + "bbox": [ + 126, + 398, + 184, + 454 + ], + "type": "image", + "image_path": "de571302efda9573db54e971b0e66fc36970220a56704ed3333efa6cfe2c9299.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 114, + 475, + 495, + 487 + ], + "lines": [ + { + "bbox": [ + 114, + 475, + 495, + 487 + ], + "spans": [ + { + "bbox": [ + 114, + 475, + 495, + 487 + ], + "type": "text", + "content": "Figure 9: Visualization of the intermediate features of SGConvNeXt on ImageNet-1k dataset." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 202, + 398, + 260, + 453 + ], + "blocks": [ + { + "bbox": [ + 202, + 398, + 260, + 453 + ], + "lines": [ + { + "bbox": [ + 202, + 398, + 260, + 453 + ], + "spans": [ + { + "bbox": [ + 202, + 398, + 260, + 453 + ], + "type": "image", + "image_path": "5564739a278af131f9579f07b6df9f3fe5b7c3dc6af6e24ee023f0f07cefeb79.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 278, + 398, + 335, + 453 + ], + "blocks": [ + { + "bbox": [ + 278, + 398, + 335, + 453 + ], + "lines": [ + { + "bbox": [ + 278, + 398, + 335, + 453 + ], + "spans": [ + { + "bbox": [ + 278, + 398, + 335, + 453 + ], + "type": "image", + "image_path": "ec0bad80ad47b2e9c11fb30bfcd15d076fa5ab1069b5dbd683f06b363df9e5a7.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 353, + 398, + 411, + 453 + ], + "blocks": [ + { + "bbox": [ + 353, + 398, + 411, + 453 + ], + "lines": [ + { + "bbox": [ + 353, + 398, + 411, + 453 + ], + "spans": [ + { + "bbox": [ + 353, + 398, + 411, + 453 + ], + "type": "image", + "image_path": "a9826591ae19eadcb1b5057a65e10a3daa19c1db9dad446489d6b46a8796752e.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 429, + 398, + 486, + 453 + ], + "blocks": [ + { + "bbox": [ + 429, + 398, + 486, + 453 + ], + "lines": [ + { + "bbox": [ + 429, + 398, + 486, + 453 + ], + "spans": [ + { + "bbox": [ + 429, + 398, + 486, + 453 + ], + "type": "image", + "image_path": "7cc909378927af4951b6e0afc256a4b067211b32c2308bfa7d3dda0cd4a34f98.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 186, + 80, + 424, + 201 + ], + "blocks": [ + { + "bbox": [ + 186, + 80, + 424, + 201 + ], + "lines": [ + { + "bbox": [ + 186, + 80, + 424, + 201 + ], + "spans": [ + { + "bbox": [ + 186, + 80, + 424, + 201 + ], + "type": "image", + "image_path": "d68ce1d42882558455ae27c576e70616f56bcf9853ae2ec281c69e5cb0ff8343.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 186, + 220, + 424, + 340 + ], + "blocks": [ + { + "bbox": [ + 237, + 208, + 373, + 218 + ], + "lines": [ + { + "bbox": [ + 237, + 208, + 373, + 218 + ], + "spans": [ + { + "bbox": [ + 237, + 208, + 373, + 218 + ], + "type": "text", + "content": "(a) Visulization of kernels at Stage 0." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 186, + 220, + 424, + 340 + ], + "lines": [ + { + "bbox": [ + 186, + 220, + 424, + 340 + ], + "spans": [ + { + "bbox": [ + 186, + 220, + 424, + 340 + ], + "type": "image", + "image_path": "f0451dca363eba8e73cee1210fcccf9312c36bc021eec48307df6eb61648f68e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 186, + 358, + 424, + 480 + ], + "blocks": [ + { + "bbox": [ + 264, + 346, + 346, + 357 + ], + "lines": [ + { + "bbox": [ + 264, + 346, + 346, + 357 + ], + "spans": [ + { + "bbox": [ + 264, + 346, + 346, + 357 + ], + "type": "text", + "content": "(b) Kernels at Stage 1." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 186, + 358, + 424, + 480 + ], + "lines": [ + { + "bbox": [ + 186, + 358, + 424, + 480 + ], + "spans": [ + { + "bbox": [ + 186, + 358, + 424, + 480 + ], + "type": "image", + "image_path": "3f0b943fcd06078d4d6d796f7c864a44ce0cbb8d24901ac7809b4f31f49b659e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 186, + 498, + 423, + 616 + ], + "blocks": [ + { + "bbox": [ + 264, + 487, + 346, + 498 + ], + "lines": [ + { + "bbox": [ + 264, + 487, + 346, + 498 + ], + "spans": [ + { + "bbox": [ + 264, + 487, + 346, + 498 + ], + "type": "text", + "content": "(c) Kernels at Stage 2." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 186, + 498, + 423, + 616 + ], + "lines": [ + { + "bbox": [ + 186, + 498, + 423, + 616 + ], + "spans": [ + { + "bbox": [ + 186, + 498, + 423, + 616 + ], + "type": "image", + "image_path": "9a98e21258cb5eaec5b05f739d68b236172d88715c45a81383461b6792f15f39.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 264, + 623, + 346, + 634 + ], + "lines": [ + { + "bbox": [ + 264, + 623, + 346, + 634 + ], + "spans": [ + { + "bbox": [ + 264, + 623, + 346, + 634 + ], + "type": "text", + "content": "(d) Kernels at Stage 3." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 191, + 643, + 418, + 655 + ], + "lines": [ + { + "bbox": [ + 191, + 643, + 418, + 655 + ], + "spans": [ + { + "bbox": [ + 191, + 643, + 418, + 655 + ], + "type": "text", + "content": "Figure 10: Kernels in SGConvNeXt at different stages." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 681, + 443, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 443, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 443, + 693 + ], + "type": "text", + "content": "C NEURAL ARCHITECTURE SEARCH PERSPECTIVE OF SGCONV" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Neural architecture search (NAS) is an automated process for discovering a neural network's optimal architecture or structure for a particular task. NAS typically involves searching through a large" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 82, + 504, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 504, + 224 + ], + "type": "text", + "content": "space of possible network architectures using combination algorithms, such as reinforcement learning (Zoph et al., 2018), evolutionary algorithms (Real et al., 2019), or Bayesian optimization (Kandasamy et al., 2018). In recent years, there has been a proliferation of research aimed at designing traditional convolutional neural networks with local convolution (Li et al., 2021; Lin et al., 2021; Li et al., 2023). These works primarily focus on optimizing the networks' structures to improve their performance. From the perspective of NAS, the SGConv can be interpreted as a kernel-level fine-grained search for the distribution of parameters by utilizing parameterization. Furthermore, the SGConv has shown that the global convolution kernel exhibits sparsity and can be pruned (Fig. 10), meaning that the effective kernel length can be automatically determined through the training phase. These findings can potentially spark further research and development in the field. Another simple approach we explore in NAS is the combination of Attention and SGConv through a mixture model (Section 4.3.1). This approach is both intuitive and efficient and has the potential to improve the performance of neural network architectures further." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_content_list.json b/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9b7c1fad4a4ca2ea68a11e56c84f805d207ab107 --- /dev/null +++ b/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_content_list.json @@ -0,0 +1,3418 @@ +[ + { + "type": "text", + "text": "WHAT SHAPES THE LOSS LANDSCAPE OF SELF SUPERVISED LEARNING?", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Liu Ziyin $^{1,2,3\\dagger}$ , Ekdeep Singh Lubana $^{2,3,4\\dagger}$ , Masahito Ueda $^{1,5,6}$ , Hidenori Tanaka $^{2,3}$", + "bbox": [ + 218, + 174, + 774, + 191 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Department of Physics, The University of Tokyo, Tokyo, Japan", + "bbox": [ + 308, + 202, + 686, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Physics & Informatics Laboratories, NTT Research, Inc., Sunnyvale, CA, USA", + "bbox": [ + 263, + 215, + 733, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3Center for Brain Science, Harvard University, Cambridge, USA", + "bbox": [ + 305, + 228, + 691, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ EECS Department, University of Michigan, Ann Arbor, USA", + "bbox": [ + 316, + 239, + 679, + 253 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{5}$ Institute for Physics of Intelligence, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo", + "bbox": [ + 220, + 253, + 776, + 267 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{6}$ RIKEN Center for Emergent Matter Science (CEMS), Wako, Saitama, Japan", + "bbox": [ + 267, + 266, + 725, + 280 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 287, + 547, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Prevention of complete and dimensional collapse of representations has recently become a design principle for self-supervised learning (SSL). However, questions remain in our theoretical understanding: When do those collapses occur? What are the mechanisms and causes? We answer these questions by deriving and thoroughly analyzing an analytically tractable theory of SSL loss landscapes. In this theory, we identify the causes of the dimensional collapse and study the effect of normalization and bias. Finally, we leverage the interpretability afforded by the analytical theory to understand how dimensional collapse can be beneficial and what affects the robustness of SSL against data imbalance.", + "bbox": [ + 228, + 308, + 767, + 434 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 450, + 336, + 465 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Self-supervised learning (SSL) methods have achieved remarkable success in learning good representations without labeled data (Chen et al., 2020b). Loss functions used in such SSL techniques promote representational similarity between pairs of related samples while using explicit penalties (Chen et al., 2020a; He et al., 2020; Zbontar et al., 2021; Caron et al., 2020) or asymmetric dynamics (Caron et al., 2021; Grill et al., 2020; Chen and He, 2021) to ensure that the distance between unrelated samples remains large. In practice, however, SSL training often experiences the phenomenon of dimensional collapse (Jing et al., 2021; Tian et al., 2021; Pokle et al., 2022), where the learned representation spans a low dimensional subspace of the overall available space. In the extreme case, this failure mode instantiates as a complete collapse, where the learned representation becomes zero-rank, and no informative features can be extracted.", + "bbox": [ + 169, + 474, + 826, + 614 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Prior work has primarily positioned such collapses in SSL as enemies of learning, arguing that they can negatively impact downstream task performance (Zbontar et al., 2021; Jing et al., 2021; Bardes et al., 2021). However, recent work by Cosentino et al. (2022) empirically demonstrates otherwise: quality of representations can be improved when there is a degree of collapse. These conflicting results indicate that despite extensive empirical explorations, a gap remains in our understanding of the collapse phenomenon in SSL training. We argue that this gap is due to the lack of a theoretical framework to analyze the mechanisms promoting collapsed representations. We aim to close this gap by carefully studying the loss landscapes of SSL.", + "bbox": [ + 169, + 621, + 823, + 733 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we analytically solve the effective landscapes of linear models trained on several popular losses used in self-supervised learning, including InfoNCE (Oord et al., 2018), Normalized Temperature Cross-Entropy (NT-xent) (Chen et al., 2020a), Spectral Contrastive Loss (HaoChen et al., 2021), and Barlow Twins / VICReg (Zbontar et al., 2021; Bardes et al., 2021). The main thesis of this work is: the local geometry of the SSL landscapes around the origin crucially decides the learning behavior of SSL models. Technically, we show that", + "bbox": [ + 169, + 739, + 823, + 824 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. the interplay between data variation and data augmentation determines the geometry of the loss;", + "2. the geometry of the loss explains when dimensional collapse can be helpful and why certain SSL losses are robust against data imbalance, but not the others." + ], + "bbox": [ + 171, + 830, + 823, + 872 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To the best of our knowledge, our work is the first to study the landscape causes of collapse in SSL thoroughly.", + "bbox": [ + 169, + 878, + 823, + 907 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "†Work done during an internship at Physics & Informatics Laboratories, NTT Research.", + "bbox": [ + 200, + 910, + 723, + 924 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8646768e22b533aecebc4335e5d0ac391451a5247e209ea684d46b6e91eeeaff.jpg", + "image_caption": [ + "(a) An eigenmode" + ], + "image_footnote": [], + "bbox": [ + 174, + 89, + 323, + 189 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/cbbd6117e91840b6e94713dcb5d6158eb9b8b02f0b21496e2397f12a4de3fadb.jpg", + "image_caption": [ + "(b) No collapse" + ], + "image_footnote": [], + "bbox": [ + 336, + 93, + 488, + 188 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2c09e4bcd2758fb826668aee9a0defc0a67ebd2ab0285f41d0bca338d1f516d4.jpg", + "image_caption": [ + "(c) Dimensional collapse" + ], + "image_footnote": [], + "bbox": [ + 504, + 95, + 656, + 188 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5f5d0ef31c9b33d250ec46485b1a7b883ec443bf1d735a885a7fdbec145b7bf4.jpg", + "image_caption": [ + "(d) Complete collapse", + "Figure 1: Landscape in self-supervised learning (SSL). SSL losses generally depend only on the relative angle between pairs of network outputs (e.g., $f(x)^T f(x')$ ). Thus, the landscapes with a linear network ( $f(x) = Wx$ ) have a global rotational symmetry and are symmetric about the origin. Our theory finds that the local stability at the origin decides the collapse, and larger data variation (green) prevents collapse, while strong data augmentation (red) can promote collapse. We plot the loss for a toy linear model with a diagonal weight matrix $diag(r_1, r_2)$ . (a) The 1d landscape when fixing one of the parameter. (b-d) The 2d landscape. (b) No collapse: the origin is an unstable local maximum, and surrounding local minima avoid collapse. The dimensionally collapsed solutions are the saddle points. (c) Dimensional collapse: the value of $w_1$ for all stable fixed points collapses to zero. (d) Complete collapse: the origin becomes the isolated local minimum." + ], + "image_footnote": [], + "bbox": [ + 669, + 95, + 821, + 189 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORKS", + "text_level": 1, + "bbox": [ + 171, + 344, + 354, + 359 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "SSL and Collapses. On the one hand, prior literature has often argued collapse as a harmful phenomenon that can deteriorate downstream task performance (Jing et al., 2021; Zbontar et al., 2021). Preventing such collapsed representations is a frequently discussed topic in literature (Hua et al., 2021; Jing et al., 2021; Pokle et al., 2022; Tian et al., 2021) and has motivated the design of several SSL techniques (Zbontar et al., 2021; Bardes et al., 2021; Ermolov et al., 2021). On the other hand, Cosentino et al. (2022) empirically showed that dimensional collapses under strong augmentations could significantly improve generalization performance. Our work demystifies these conflicting results by finding analytic solutions to loss landscapes of several standard SSL techniques.", + "bbox": [ + 169, + 372, + 823, + 484 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Theoretical Advances in SSL. Recently, several advances have been made towards understanding the success of SSL techniques from different perspectives: e.g., learning theory (Arora et al., 2019; Saunshi et al., 2022; Nozawa and Sato, 2021; Wei et al., 2021), information theory (Tsai et al., 2021a;b; Tosh et al., 2021), causality and data-generating processes (Zimmerman et al., 2021; Kugelgen et al., 2021; Trivedi et al., 2022; Tian et al., 2020; Mitrovic et al., 2020; Wang et al., 2022), dynamics (Wang and Isola, 2020; Tian et al., 2021; Tian, 2022; Wang and Liu, 2021; Simon et al., 2023), and loss landscapes (Pokle et al., 2022). These advances have unveiled practically useful properties of SSL, such as robustness to dataset imbalance (Liu et al., 2021) and principled solutions to avoid spurious correlations (Robinson et al., 2021).", + "bbox": [ + 169, + 489, + 823, + 617 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The work by Jing et al. (2021) is the closest to ours in problem setting. In that paper, the authors focused on studying the linearized learning dynamics and suggested that a competition between the feature signal strength and augmentation strength can lead to dimensional collapse. In contrast, our focus is on the landscape and our result implies that this feature-augmentation competition on its own is insufficient to cause a dimensional collapse. In fact, we show that there will be no collapse in the setting studied by Jing et al. (2021).", + "bbox": [ + 169, + 622, + 823, + 708 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 A LANDSCAPE THEORY OF SELF-SUPERVISED-LEARNING", + "text_level": 1, + "bbox": [ + 171, + 724, + 689, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This section presents the main theoretical results. Let $\\{\\hat{x}_i\\}_i^N$ be a dataset with $N$ data points. For every data point $\\hat{x}$ , we augment it with an i.i.d. noise $\\epsilon$ such that $x \\coloneqq \\hat{x} + \\epsilon$ . To be concrete, we start with considering the standard contrastive loss, InfoNCE (Oord et al., 2018):", + "bbox": [ + 169, + 752, + 823, + 796 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nL = \\mathbb {E} _ {\\epsilon} \\left[ - \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp (- | f (x _ {i}) - f \\left(x _ {i} ^ {\\prime}\\right) | ^ {2} / 2)}{\\sum_ {j \\neq i} \\exp (- | f (x _ {i}) - f (\\chi_ {j}) | ^ {2} / 2) + \\exp (- | f (x _ {i}) - f \\left(x _ {i} ^ {\\prime}\\right) | ^ {2} / 2)} \\right], \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 803, + 823, + 840 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $f(x) \\in \\mathbb{R}^{d_1}$ is the model output; all $x, x'$ and $\\chi$ are augmented data points for some independent additive noise $\\epsilon$ such that $\\mathbb{E}_{\\epsilon}[x] = \\hat{x} = \\mathbb{E}_{\\epsilon}[x'] \\neq \\mathbb{E}_{\\epsilon}[\\chi] = \\hat{\\chi}$ . We decompose the model output into a general function $\\phi(x) \\in \\mathbb{R}^{d_0}$ and the last-layer weight matrix $W \\in \\mathbb{R}^{d_1 \\times d_0}$ : $f(x) = W\\phi(x)$ . The covariance of $\\phi(\\hat{x})$ is $A_0 := \\mathbb{E}_{\\hat{x}}[\\phi(\\hat{x})\\phi(\\hat{x})^T]$ , and the covariance of the data-augmented penultimate layer representation is $\\Sigma := \\mathbb{E}_x[\\phi(x)\\phi(x)^T]$ . The effect of data augmentation on the learned", + "bbox": [ + 169, + 849, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "representation is captured through a symmetric matrix $C \\coloneqq \\Sigma - A_0$ . For a general $\\phi$ , the eigenvalues of $C$ can be either positive or negative. When $\\phi$ is the identity mapping, $A_0$ becomes the empirical data covariance, $C$ becomes positive semi-definite and is the covariance of the noise $\\epsilon$ , and $\\Sigma$ is the covariance of the augmented data. In some sense, this loss function captures the essence of SSL: the numerator encourages the representation $f(x)$ to be closer to the representation of similar data, and the denominator encourages a separation between dissimilar data.", + "bbox": [ + 169, + 103, + 826, + 188 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For a fixed set of noises, we can write the InfoNCE in a cleaner form:", + "bbox": [ + 169, + 194, + 632, + 208 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\epsilon} = \\mathbb {E} _ {\\hat {x}} \\left\\{\\frac {1}{2} | f (x) - f \\left(x ^ {\\prime}\\right) | ^ {2} + \\log \\mathbb {E} _ {\\hat {\\chi}} \\left[ \\exp \\left(- \\frac {1}{2} | f (x) - f (\\chi) | ^ {2}\\right) \\right] \\right\\}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 214, + 823, + 247 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where we used $\\mathbb{E}_{\\hat{x}}$ to denote an averaging over the training set.", + "bbox": [ + 169, + 251, + 588, + 267 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this notation, we have $\\mathbb{E}_{\\epsilon}\\mathbb{E}_{\\hat{x}}[x] = \\mathbb{E}_x[x]$ and $\\mathbb{E}_{\\epsilon}[L_{\\epsilon}] = L$ . We first show that the expansion of the loss function around the origin takes a rather universal form. We then find analytical solutions to the stationary points of this landscape and study their relevance to feature learning and collapses. See Table 1 for a summary of the main results. The proofs are presented in Appendix E. For a quantitative understanding, we mainly focus on the case when $\\phi$ is the identity function. We discuss the general nonlinear case in Section 4.1.", + "bbox": [ + 169, + 272, + 493, + 439 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 LANDSCAPE OF A LINEAR MODEL", + "text_level": 1, + "bbox": [ + 171, + 452, + 455, + 467 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We first analyze representative SSL loss functions and show that to leading order in $W$ , the local geometry of SSL losses takes the following form", + "bbox": [ + 169, + 474, + 493, + 531 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right]. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 535, + 491, + 566 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/6ff6fe7b3c11a62ca709814ff0e28a82c3882af2577f35a1c57f569a23e321c1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
HessianDim.Complete
InfoNCEA0XX
NT-Xent (SimCLR)A0-C/N
Spectral ContrastiveCXX
Barlow TwinsA0+CXX
+ Normalization-X
+ bias-
+ Weight Decay+γI
", + "bbox": [ + 504, + 273, + 826, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1: What shapes the SSL landscapes around the origin? For each of the SSL losses, the combination of data covariance $(A_0)$ , data-augmentation covariance $(C)$ , and dataset size $(N)$ can affect its stability and thus determine the presence $(\\checkmark)$ and absence $(X)$ of dimensional/complete collapse (Here, a $\\checkmark$ means \"there exists a hyperparameter setting and data distribution such that the relevant collapse happens;\" see section 3). Beyond collapses, the theory implies that SCL, whose landscape is formed primarily by data augmentation, is more robust to data imbalance than InfoNCE, which is affected primarily by the data (see section 4).", + "bbox": [ + 501, + 398, + 825, + 564 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A distinctive feature of Eq. (3) is that its first and third-order terms vanish. This is because the loss function is invariant to a left rotation of $W$ . We will see that this symmetry in rotation is a crucial and general feature of the SSL loss functions that allow us to treat them in a universal way. We discuss how rotation symmetry can cause collapses in nonlinear settings in Section 4.", + "bbox": [ + 169, + 571, + 823, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "InfoNCE. The loss function simplifies to:", + "bbox": [ + 171, + 633, + 450, + 648 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL = \\underbrace {\\operatorname {T r} \\left[ W C W ^ {T} \\right]} _ {E} + \\underbrace {\\mathbb {E} _ {\\epsilon , \\hat {x}} \\left\\{\\log \\mathbb {E} _ {\\hat {\\chi}} \\left[ \\exp \\left(- \\frac {1}{2} | W (x - \\chi) | ^ {2}\\right) \\right] \\right\\}} _ {- S}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 655, + 825, + 705 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Expanding the entropy term to the fourth order, we obtain1", + "bbox": [ + 171, + 713, + 563, + 729 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n- S = - \\mathbb {E} _ {x} \\mathbb {E} _ {\\chi} \\left[ \\frac {1}{2} | W (x - \\chi) | ^ {2} \\right] + \\frac {1}{8} \\operatorname {V a r} [ | W (x - \\chi) | ^ {2} ] + O (\\| W \\| ^ {6}). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 277, + 734, + 825, + 766 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This (perturbative) decomposition of entropy deserves some special attention. The entropy decomposes into a repulsion term that is second order in $W$ , and a variance term that is fourth order in $W$ . The first term encourages a repulsion between $x$ and its augmentation, which counteracts the effect of the energy term. The repulsion term can be decomposed into", + "bbox": [ + 169, + 772, + 823, + 828 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} _ {x} \\mathbb {E} _ {\\chi} \\left[ \\frac {1}{2} | W (x - \\chi) | ^ {2} \\right] = \\operatorname {T r} \\left[ W C W ^ {T} \\right] + \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right]. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 835, + 823, + 867 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The first term encourages an expansion of $W$ along the direction of the augmentation $C$ , while the second term encourages an expansion along the directions of feature $A_0$ . It is intriguing to see", + "bbox": [ + 169, + 871, + 826, + 901 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "1 Throughout, we use $\\|\\cdot\\|$ to denote the $L_{2}$ norm for vectors and Frobenius norm for matrices.", + "bbox": [ + 194, + 909, + 750, + 924 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "that the repulsion term dominates the attraction of the energy term: the motion along the direction of $C$ completely cancels out, and only the expansion along $A_0$ remains. This means that to leading order, the learned representation has a larger variation along the directions where the data has a larger variation, which is what one naively expects. Collecting results, we have obtained the loss landscape in the neighborhood of the origin as $L = -\\mathrm{Tr}[WA_0W^T] + \\frac{1}{8}\\mathrm{Var}[|W(x - \\chi)|^2] + O(\\| W\\|^6)$ .", + "bbox": [ + 169, + 103, + 823, + 176 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "NT-xent (SimCLR). As an additional example, we analyze Normalized Temperature Cross-Entropy loss (NT-xent) used in SimCLR (Chen et al., 2020a). Tian (2022) shows that InfoNCE can be generalized to encompass NT-xent as follows:", + "bbox": [ + 169, + 180, + 823, + 223 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL = \\mathbb {E} _ {\\epsilon} \\left[ - \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp (- | f (x _ {i}) - f (x _ {i} ^ {\\prime}) | ^ {2} / 2)}{\\sum_ {\\chi \\neq x} \\exp (- | f (x _ {i}) - f (\\chi_ {j}) | ^ {2} / 2) + \\alpha \\exp (- | f (x _ {i}) - f (x _ {i} ^ {\\prime}) | ^ {2} / 2)} \\right]. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 222, + 228, + 823, + 267 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In contrast to InfoNCE, here one of the terms in the denominator is reweighted by a factor of $\\alpha \\geq 0$ . Two interesting limits are $\\alpha = 1$ , where we recover the InfoNCE loss, and $\\alpha = 0$ , where we obtain NT-xent. For general $\\alpha$ , we refer to this loss as the weighted InfoNCE. We will see in section 3 that this weighted InfoNCE can have a mild dimensional collapse problem.", + "bbox": [ + 169, + 268, + 823, + 325 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The same perturbative expansion as Eq. (4)-(6) gives", + "bbox": [ + 171, + 332, + 524, + 348 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL = \\frac {1 - \\alpha}{N} \\operatorname {T r} \\left[ W C W ^ {T} \\right] - \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right] + O \\left(\\left\\| W \\right\\| ^ {6}\\right) + O \\left(\\left\\| W \\right\\| ^ {4} N ^ {- 1}\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 178, + 351, + 825, + 380 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Now, the Hessian of the origin is no longer guaranteed to be negative definite. In fact, if $\\frac{1 - \\alpha}{N} C - A_0 \\geq 0$ , $W = 0$ becomes an isolated local minimum.", + "bbox": [ + 169, + 386, + 823, + 415 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Landscape Analysis. The above discussion shows that the common loss landscapes in self-supervised contrastive learning can be reduced to an effective form in Eq. (3). The following proposition shows that the variance term of the loss takes a specific form when the data is Gaussian.", + "bbox": [ + 169, + 421, + 823, + 464 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Proposition 1. Let the data and noise be Gaussian. Then, $L = -\\mathrm{Tr}[W B W^T] + \\mathrm{Tr}[W\\Sigma W^T W\\Sigma W^T]$ .", + "bbox": [ + 169, + 465, + 823, + 498 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "When the training ends, one expects the model to locate at (at least close to) a stationary point of the loss. It is thus important to identify all the stationary points of this loss function.", + "bbox": [ + 169, + 508, + 823, + 536 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Theorem 1. Let $d^{*} \\coloneqq \\min(d_{0}, d_{1})$ . Let the data and noise be Gaussian. All stationary points $W$ of Eq. (3) satisfy $W^{T}W = \\frac{1}{2}\\Sigma^{-1/2}UM\\Lambda U^{T}\\Sigma^{-1/2}$ , where $U\\Lambda U^{T}$ is the eigenvalue decomposition of $\\Sigma^{-1/2}B\\Sigma^{-1/2}$ , and $M$ is an arbitrary (masking) diagonal matrix containing only zero or one such that (1) $M_{ii} = 0$ if $\\Lambda_{ii} < 0$ and (2) contain at most $d^{*}$ nonzero terms.", + "bbox": [ + 169, + 539, + 826, + 602 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Additionally, if $C$ and $A_0$ commute, all stationary points satisfy", + "bbox": [ + 169, + 608, + 591, + 625 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} \\Sigma^ {- 1}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 415, + 627, + 823, + 656 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $B_M$ denotes the matrix obtained by masking the eigenvalues of $B$ with $M$ .", + "bbox": [ + 169, + 660, + 705, + 676 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This stationary-point condition implies the direct cause of the dimensional collapse. Namely, dimensional collapse happens when the eigenvalues of the matrix $B$ become negative. The eigenvalues of $B$ , in turn, depend on the competition between data augmentation and the data feature. Comparing the commuting case with the noncommuting case, we see that the main difference is that when $C$ does not commute with $A_0$ , the augmentation can also change the orientation of the learned representation; otherwise, augmentation only affects the eigenvalues. To focus on the most important terms, we now assume that the augmentation is well-aligned with the features such that the augmentation covariance commute with the data covariance.", + "bbox": [ + 169, + 686, + 823, + 797 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Assumption 1. From now on, we assume $CA_0 = A_0C$ .", + "bbox": [ + 171, + 801, + 537, + 816 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the case of weighted InfoNCE, we have that $B = A_0 - \\frac{1 - \\alpha}{N} C$ . Let $a_i$ denote the $i$ -th eigenvalue of the $A$ and $c_i$ that of $C$ viewed in a predetermined order; then, the $i$ th subspace collapses when $\\frac{1 - \\alpha}{N} c_i \\geq a_i$ , namely, when the variation introduced by the noise dominates that of the original data. Importantly, this collapse is a property shared by all stationary points of the landscape, and one cannot hope to fix the problem by, say, biasing the gradient descent towards a certain type of local minima. When weight decay is used, the condition for collapse becomes $\\frac{1 - \\alpha}{N} c_i + \\gamma \\geq a_i$ : it becomes easier to cause a collapse when weight decay is used.", + "bbox": [ + 169, + 825, + 825, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The global minimum of the loss function is also easy to find. For all stationary points, the loss function takes a simple form; $L = -\\frac{1}{4}\\mathrm{Tr}[\\Sigma^2 B_M B]$ . Thus, $L$ becomes more and more negative if the eigenvalues of $B_M$ align with the largest eigenvalues of $B$ . Namely, the global minimum is achieved if $M$ leaves the largest eigenvalues of $B$ intact.", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Because the stationary points contain collapsed solutions where the eigenvalues of $W^T W$ are zero, one is naturally interested in how likely it is to converge to these solutions.", + "bbox": [ + 169, + 166, + 823, + 195 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Proposition 2. $(W^T W$ achieves maximum possible rank) Let $m$ denote the number of positive eigenvalues $B$ . Then, $\\mathrm{rank}(W^T W) = \\min(m, d^*)$ for any local minimum.", + "bbox": [ + 169, + 196, + 823, + 226 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This proposition implies that the loss landscape of contrastive SSL (with a linear model) is rather benign because all local minima must achieve a maximum possible rank. In fact, this result implies that the collapses may be well controllable by carefully controlling and tuning the eigenvalues of the matrix $B$ , which directly depends on the nature of the data augmentation we use.", + "bbox": [ + 169, + 234, + 825, + 292 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 LANDSCAPE WITH NORMALIZATION", + "text_level": 1, + "bbox": [ + 171, + 303, + 467, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It is common in practice to normalize the learned representation such that $\\| f(x) \\|^2 = c$ . When normalization is applied, only the direction of the learned representation matters. While this is a simple trick in practice, its implication on the landscape is poorly understood. In this section, we extend our theory to analyze the effect of normalization.", + "bbox": [ + 169, + 325, + 823, + 381 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We model the effect of normalization as a regularization term: $R \\coloneqq (\\mathbb{E}_x\\| f(x)\\| ^2 -c)^2$", + "bbox": [ + 171, + 387, + 743, + 404 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {n o r m}} = E q. (3) + \\kappa R. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 405, + 823, + 420 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that this regularization term achieves two things simultaneously: (1) $\\| f(x) \\|^2 = c$ for all $x$ is a minimizer of the loss function; (2) the regularization is invariant to any rotation of the learned representation. For a linear model, we note that this condition is not entirely the same as a direct normalization of the representation because it is generally impossible to achieve $\\| Wx \\|^2 = c$ for all $x$ because a linear model has limited expressivity. However, it is generally possible to achieve the slightly weaker condition: the representation has a norm 1 on average. This loss function can also be seen as a mathematical model of the VICReg loss (Bardes et al., 2021), where $R$ effectively models the variance regularization term of VICReg loss and $\\kappa$ is its strength. This modeling is necessary because the variance term of the original VICReg is not differentiable and thus cannot be expanded. The proposed term $R$ captures the essence of the variance term because it also encourages the representation to have a constant variance. Our theory also explains why the VICReg is observed to experience collapses when $\\kappa$ is not large enough. As $\\kappa$ tends to infinity, this constraint will become perfectly satisfied. We thus take the infinite $\\kappa$ limit to study the effect of normalization.", + "bbox": [ + 169, + 421, + 825, + 602 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The following proposition gives a condition that all stationary points of Eq. (10) satisfy.", + "bbox": [ + 171, + 608, + 746, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Proposition 3. Let $\\rho(W) \\coloneqq \\operatorname{Tr}[W\\Sigma W^T]$ , $B' \\coloneqq B + 2\\kappa(c - \\rho)\\Sigma$ , and let $\\Lambda_i$ be the eigenvalues of $B'$ . Then, every stationary point of Eq. (10) satisfy $W^T W = \\frac{1}{2}\\Sigma^{-1}B_M'\\Sigma^{-1}$ , where $M$ is an arbitrary diagonal mask of the eigenvalues of $B'$ containing only zero or one such that (1) $M_{ii} = 0$ if $\\Lambda_i < 0$ and (2) contain at most $d^*$ nonzero terms.", + "bbox": [ + 169, + 625, + 823, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compared with the unnormalized case, the term $2\\kappa (1 - \\rho)\\Sigma_{M}$ emerges due to normalization. The effect of normalization is as expected: it shrinks the norm of the model if $\\rho > 1$ , and it expands the model if $\\rho < 1$ , and it does not have any effect if we have already achieved $\\rho = 1$ . Interestingly, this rescaling effect is anisotropic and stronger along the directions of larger eigenvalues of the covariance of the augmented data $\\Sigma$ .", + "bbox": [ + 169, + 693, + 823, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The next theorem gives the explicit form of $\\rho$ at the stationary points.", + "bbox": [ + 171, + 770, + 627, + 785 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Proposition 4. For any stationary point $W^{*}$ , $c - \\rho(W^{*}) = \\frac{c - \\frac{1}{2}\\mathrm{Tr}[\\Sigma^{-1}B_{M}]}{1 + \\kappa d_{M}}$ , where $d_{M}$ is the number of non-zero eigenvalues of $B_{M}'$ .", + "bbox": [ + 169, + 787, + 823, + 824 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For a finite $\\kappa$ , these results suggest that collapses can still happen. For VICReg, $B = -A_0$ , and the complete collapse can happen when $\\kappa \\ll \\| A_0\\| /c\\|\\Sigma\\|$ - this explains the experimental observation of collapses for small values of $\\kappa$ in VICReg loss (Bardes et al., 2021).", + "bbox": [ + 169, + 832, + 823, + 875 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Lastly, to understand normalization, we are interested in the case of $\\kappa \\to \\infty$ . Combining Proposition 3 and 4, we have proved the following theorem, showing that the asymptotic solution converges to a form independent of $\\kappa$ .", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theorem 2. Let $W_{\\kappa}$ be a stationary point of Eq. (10) at fixed $\\kappa$ . Then,", + "bbox": [ + 171, + 103, + 637, + 119 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {\\kappa \\rightarrow \\infty} W _ {\\kappa} ^ {T} W _ {\\kappa} = \\frac {1}{2} \\Sigma^ {- 1} \\left[ B _ {M} + \\frac {2 c - \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{d _ {M}} \\Sigma_ {M} \\right] \\Sigma^ {- 1}. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 125, + 823, + 161 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The correction term $\\frac{2c - \\mathrm{Tr}[\\Sigma B_M]}{d_0}\\Sigma_M$ emerges as a result of applying normalization. The effect can be easier to understand if we write the solution as", + "bbox": [ + 169, + 174, + 823, + 208 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {T} W = \\frac {1}{2} \\left[ \\Sigma^ {- 1} B _ {M} - \\frac {\\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{d _ {M}} M + \\frac {2 c}{d _ {M}} \\right] \\Sigma^ {- 1}, \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 212, + 823, + 250 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where we have used the relation $\\Sigma_{M}\\Sigma^{-1} = M$ . Note the term in brackets: it subtracts the average eigenvalue of $\\Sigma^{-1}B_M$ from $\\Sigma^{-1}B_M$ and shifts the remaining eigenvalues positively by $2c / d_{M}$ . Because the eigenvalues of $WW^{T}$ must be positive, the following condition must hold for all solutions:", + "bbox": [ + 169, + 255, + 823, + 300 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {i} + 2 c / d _ {M} > \\bar {\\lambda}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 441, + 305, + 823, + 321 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\lambda_{i}$ are the eigenvalues of $\\Sigma^{-1}B_M$ and $\\bar{\\lambda}$ is its average. Namely, for the $i$ -th dimension not to collapse, it must be smaller than the average eigenvalues by at most $2c / d_{M}$ . Any smaller eigenvalues must collapse. Compared to the case without normalization, normalization makes collapses dependent on the relative strength of each feature and augmentation. In the following discussion, we let $c = 1$ to simplify the discussion. We present a detailed analysis of this condition in Section D.1. One finds that the condition for collapse becomes heavily dependent on the data structure, and there are cases where collapses become harder, and there are cases where collapses become much easier. Importantly, it also becomes the case that a sufficiently strong augmentation can always cause a collapse in the corresponding subspace.", + "bbox": [ + 169, + 328, + 826, + 455 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Effect of Bias. Lastly, we study the effect of explicitly having a bias term: $Wx \\rightarrow Wx + b$ . First of all, when there is no normalization, the bias term does not affect the solution because the loss landscape is invariant to a translation in the learned representation. However, this effect dramatically changes if we apply normalization at the same time. This is because normalization removes the translation symmetry of the effective loss, and the trivial solution $W = 0$ , $b = 1$ becomes the simplest way to achieve the norm-1 constraint. Our result shows that the addition of bias dramatically affects the stationary points.", + "bbox": [ + 169, + 460, + 823, + 559 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3. Let $f(x) = Wx + b$ and $\\mathbb{E}[x] = 0$ . Then, all stationary points $W$ satisfy Eq. (9), subject to the constraint that $\\mathrm{Tr}[W^T\\Sigma W]\\leq c$ .", + "bbox": [ + 169, + 561, + 823, + 594 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Namely, the solution reverts to the case where there is no normalization at all, except that the norm of the solution can no longer be larger than $c$ . This upper bound can make collapses much easier to happen. For example, if $c < (a_i - c_i) / (a_i + c_i)$ for all $i$ , a complete collapse can happen despite normalization. When $c = 1$ and $c_i \\ll a_i$ , $\\rho \\approx d_M / 2$ and the constraint indicates that $d_M \\leq 2$ : when the augmentation is very weak, there are at most 2 nontrivial subspaces. This is too restrictive for learning a meaningful representation, which helps us understand why dimensional collapse can harm learning in practice. The fact that simple normalization cannot prevent collapse has been noticed for a while for the simplest case of a cosine-similarity loss, and our result explains why previous works have tried to introduce asymmetry to cosine similarity to avoid collapses (Grill et al., 2020; Chen and He, 2021).", + "bbox": [ + 169, + 604, + 825, + 743 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Relevant Loss Functions. Having developed a framework for understanding normalization, we show that other common loss functions in SSL can also be written in the form given in Eq. (3). The spectral contrastive loss (SCL) (HaoChen et al., 2021) reads", + "bbox": [ + 169, + 750, + 823, + 792 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nL _ {S C L} = - 2 \\mathbb {E} [ f (x) ^ {T} f (x ^ {\\prime}) ] + \\mathbb {E} [ (f (x) ^ {T} f (\\chi)) ^ {2} ] + c o n s t. \\quad \\text {s . t .} \\| f (x) \\| ^ {2} = 1. \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 233, + 797, + 823, + 818 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Let $f(x) = Wx$ be linear, the distributions are zero-mean Gaussian, and ignore the normalization. This loss function becomes", + "bbox": [ + 169, + 823, + 823, + 851 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nL _ {S C L} = - 2 \\operatorname {T r} \\left[ W C W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right]. \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 857, + 823, + 876 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "When normalization exists, we can apply the result in Section 3.2. By our argument, there is no collapse in this loss function. The difference with InfoNCE loss is that the learned feature spreads along the directions of the augmentation $C$ , not along the directions of the feature $A_0$ .", + "bbox": [ + 169, + 882, + 823, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b6cdba91d2a565539e66ba75f413aca578134d7d83ec9125d2516d70353f36e2.jpg", + "image_caption": [ + "(a) Landscape of ResNet" + ], + "image_footnote": [], + "bbox": [ + 174, + 108, + 330, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0635702b74af3b585911f8fb63feaf16d41cc921156f0b8b6074cc4dd163371a.jpg", + "image_caption": [ + "(b) No collapse" + ], + "image_footnote": [], + "bbox": [ + 343, + 99, + 496, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/760f7caf8872e36e82a2cee351701c1849aae17a3fbcd8d69f68db8cdae1d80e.jpg", + "image_caption": [ + "(c) Dimensional collapse" + ], + "image_footnote": [], + "bbox": [ + 509, + 99, + 661, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3700ed29d4b867983f389d76e342c8dc1cf9f00536ba629c8fcce3994613d2ce.jpg", + "image_caption": [ + "(d) Complete collapse" + ], + "image_footnote": [], + "bbox": [ + 673, + 99, + 823, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/247de21cfec7c7aa2b9b112b93049f8d26a3a94d2541a04acb43288b78b9356b.jpg", + "image_caption": [ + "(e) Landscape of ViT", + "Figure 2: Landscape of Resnet18 (upper) and vision transformers (lower) on CIFAR10 with SimCLR qualitatively agrees with our linear theory. (a) Training objective $L$ as a function of a rescaling of the last layer $W \\rightarrow aW$ . (b-d) $L$ as a function of a $2d$ rescaling of the last layer where the data augmentation strength is (b) small, (c) intermediate, and (d) strong. Red indicates areas of high loss, blue indicates areas of low loss, and stars locate local minima. The use of data augmentation changes the stability of the origin, a qualitative change that leads to different types of collapses in qualitative agreement with our linear theory (cf. Figure 1). Additionally, we also notice the same qualitative changes of landscape in simpler nonlinear models (see Appendix A). (e-h) are the same setting but for ViT." + ], + "image_footnote": [], + "bbox": [ + 173, + 220, + 330, + 306 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/99218392843222b7950988269986742fda90110ffc047d25018fe0f4107c9a5c.jpg", + "image_caption": [ + "(f) No collapse" + ], + "image_footnote": [], + "bbox": [ + 343, + 210, + 493, + 306 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/74a020fac9322634c3ab15388122141e788c5a0121465d6f8f5a6eb4821660c8.jpg", + "image_caption": [ + "(g) Dimensional collapse" + ], + "image_footnote": [], + "bbox": [ + 506, + 210, + 656, + 306 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d8c1fcf3738a0556ea7afca13056d481ca2ba01613f438daee1b8479e38e48ad.jpg", + "image_caption": [ + "(h) Complete collapse" + ], + "image_footnote": [], + "bbox": [ + 669, + 210, + 820, + 306 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The case of Barlow Twin (BT) (Zbontar et al., 2021) is similar. While the fourth-order term of BT is much more complicated due to the imbalance created by the $\\lambda$ term. The second-order term can be identified easily: $L_{BT} = -2\\mathrm{Tr}[W\\Sigma W^T] + O(||W||^4)$ . This also does not collapse. A difference between the SCL loss and InfoNCE is that the learned representation has a spread that aligns with the combination of the feature and the augmentation strength.", + "bbox": [ + 169, + 444, + 823, + 513 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 IMPLICATIONS", + "text_level": 1, + "bbox": [ + 171, + 530, + 326, + 544 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we explore some theoretical and practical implications of our results. In Appendix Section A, we also present numerical simulations that directly validate the predictions of the theory.", + "bbox": [ + 169, + 558, + 823, + 588 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 RELEVANCE TO NONLINEAR MODELS", + "text_level": 1, + "bbox": [ + 171, + 599, + 478, + 612 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "An important question is how much of the analysis is relevant for deep nonlinear models in general. In fact, the loss landscape we have studied is quite close to the most general landscape one can have. Let $L(f(x))$ be a general SSL loss function for data point $x$ . The quality of the learned representation should be independent of the population-level orientation of the representation. Therefore, the loss function should satisfy a rotational invariance. Namely, for any rotation matrix $R$ , $L(x) = L(Rf(x))$ ; this rotational invariance implies that the loss should expand as $L(f(x)) = af(x)^T f(x) + b[f(x)^T f(x)]^2 + O(f(x)^6)$ . Note that all the odd-order terms of $f(x)$ vanish due to the rotational symmetry. Substituting $f(x) = W\\phi(x)$ in the loss function, we obtain a very general form of landscape that $W$ obeys:", + "bbox": [ + 169, + 622, + 823, + 750 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nL (W, \\phi) = \\operatorname {T r} \\left[ W ^ {T} W A \\right] + \\sum W _ {i m} W _ {j m} W _ {k n} W _ {l n} Z _ {i j k i}, \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 758, + 823, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $A$ and $Z$ are dependent on $\\phi$ . Note how all the examples we have studied take this form. For $W$ , its collapse entirely depends on the stability of the matrix $A$ . Thus the study of the stability of the matrix $A$ becomes crucial for our understanding. To illustrate, we train a Resnet18 on CIFAR10 with the SimCLR loss with normalization and with weight decay strength $10^{-3}$ until convergence to obtain the converged weights $W^{*}$ . The representation has a dimension 128. We rescale the weight matrix of the last layer $W_{\\mathrm{last}}^{*}$ by a factor $a$ and compute the loss as a function of $a$ . See Figure 2-a. We then partition the singular values of $W_{\\mathrm{last}}^{*}$ into the larger half and the smaller half. We rescale the larger half by a factor $r_1$ and the smaller half by $r_2$ . We plot the loss as a $2d$ function of $(r_1, r_2)$ in Figure 2. We also perform experiments for vision transformers (ViT) in the lower row (Dosovitskiy et al., 2020). In all cases, the landscape features qualitative changes comparable to those in Figure 1.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A connection to Landau theory in physics. Those familiar with statistical physics should note that the proposed theory is analogous to the Landau theory of second-order phase transitions. When treating the loss function as the free energy, the square root of the eigenvalues $\\sqrt{\\lambda}$ of $W^T W$ are the order parameters of the system, and the phase transitions happen when $\\lambda$ turns from 0 to positive. These transitions (collapses) happen because of symmetry breaking (Landau and Lifshitz, 2013): the loss function (2) is symmetric in the sign of $W$ . Yet, for any nontrivial learning, $W$ must be nonzero; thus, a symmetry breaking of the sign of $W$ needs to happen for learning. The recent work by Ziyin and Ueda (2022) suggested how symmetry breaking around the origin and Landau theory could explain various types of collapses in deep learning. Therefore, the dimensional collapse could be related to neural collapses in supervised learning (Papyan et al., 2020; Ziyin et al., 2022a) and posterior collapse in Bayesian deep learning (Wang and Ziyin, 2022). Because second-order phase transitions should come with the divergence of the correlation function, one might also wonder what is \"divergent\" in the SSL problem. Here, the learning time scale for the collapsing dimension is divergent at the critical point because the second-order term vanishes in this direction, and so the dynamics are effectively frozen along this direction.", + "bbox": [ + 169, + 103, + 826, + 316 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 ROBUSTLY INDUCING GOOD COLLAPSES", + "text_level": 1, + "bbox": [ + 171, + 327, + 495, + 340 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Contrary to previous works, a recent work (Cosentino et al., 2022) has suggested that dimensional collapse can be beneficial and significantly improve the generalization performance of the model. This observation raises a question. How can dimensional collapse be beneficial and how can it be induced? In the following, we first introduce $\\beta$ -InfoNCE, which can adjust the degree of dimensional collapse, and analyze the collapse behavior to elucidate the mechanism of task-alligned collapse.", + "bbox": [ + 169, + 349, + 580, + 462 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Adjusting the degree of dimensional collapse with $\\beta$ -InfoNCE. Despite the potential benefit, existing SSL loss functions cannot robustly induce dimensional collapse. InfoNCE is insufficient to induce a collapse, and the collapse induced by SimCLR depends on a vanishingly small parameter $1/N$ . One thus wonders whether there is a loss function that allows us to induce collapsing behavior in a more predictable manner so that one might controllably extract some benefits from collapse. Our result suggests that one way to directly control collapses is through the strength of the competition for the model Hessian at the origin. For InfoNCE, one way to achieve this is to weigh the entropy term by a general factor $\\beta$ :", + "bbox": [ + 169, + 467, + 583, + 648 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\right. \\mathbb {E} _ {x} \\left\\{\\frac {1}{2} | f (x) - f \\left(x ^ {\\prime}\\right) | ^ {2} + \\beta \\log \\mathbb {E} _ {\\chi} \\left[ \\exp \\left(- \\frac {1}{2} | f (x) - f (\\chi) | ^ {2}\\right)\\right]\\right\\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 654, + 588, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Due to its similarity with the $\\beta$ -VAE in Bayesian learning, we call it the $\\beta$ -InfoNCE. The leading term in the loss function becomes", + "bbox": [ + 169, + 689, + 583, + 729 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n- \\operatorname {T r} \\left[ W \\left(A _ {0} - (1 - \\beta) C\\right) W ^ {T} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 729, + 480, + 747 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "When $1 - \\beta > 0$ , the augmentations $C$ pull the representation towards zero. When the augmentation is as strong as the fea-", + "bbox": [ + 169, + 750, + 583, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ture variations, a collapse happens. One can thus introduce collapse by setting $\\beta$ to be sufficiently small. When $1 - \\beta < 0$ , the augmentations push the weights away from the origin along its direction, resulting in no collapse at all: When one really wants to avoid collapse, one can use a rather large $\\beta$ ; $\\beta = 1$ is thus at the boundary of this bifurcating behavior. We note that existing loss functions often do not have a parameter that is directly controlling the collapse behavior (see Table 1). The $\\beta$ parameter here directly controls the level of difficulty of collapse.", + "bbox": [ + 169, + 777, + 823, + 862 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Achieving invariance with dimensional collapse. Here, we closely study an illustrative minimal example to demonstrate how collapses can be beneficial. Consider the following structured data generating process where the input features can be separated into two sets: (1) a task-relevant set with dimension $d_{c} < d_{0}$ and (2) a task-irrelevant set: $x = (x_{1},\\dots,x_{d_{c}},\\dots,x_{d_{0}})$ . Our result suggests", + "bbox": [ + 169, + 867, + 823, + 926 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/151d2f5ed48da89f2bf707f660fd4d9553cc4bfa4b0a668bd6b3db9ea7c6532f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 594, + 329, + 823, + 474 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ba1c6e376e5802b03cc41f400c616a162ddd4953464210016db0340c31011fdf.jpg", + "image_caption": [ + "Figure 3: Top: Phase diagram of representational collapses. Bottom: $\\beta$ -InfoNCE with $\\beta = 0.5$ . The generalization error of a downstream regression task where the data augmentation (1) is isotropic and noninformative or (2) aligns with the style. We see that the performance worsens as collapses happen for the noninformative augmentation and improves as the collapse happens for the style-targeting augmentation." + ], + "image_footnote": [], + "bbox": [ + 594, + 476, + 821, + 611 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "a precise way to remove the irrelevant features from the learned representation. For the purpose of causing a robust collapse, we use the $\\beta$ -InfoNCE with $\\beta = 1/2$ . For illustration, we consider the simple case $d_c = 1$ and $d_0 = 2$ . For any input $x = (x_1, x_2)$ , the label is generated as a linear function of $x_1$ : $y = cx_1$ .", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Correspondingly, we consider a structured data augmentation $x = \\hat{x} + \\sigma R\\xi$ , where $R \\in \\mathbb{R}^{d_0 \\times d_0}$ is $R = \\text{diag}(\\sqrt{1 - \\theta}, \\sqrt{\\theta})$ , where $\\theta \\in [0,1]$ . The parameter $\\sigma$ controls the overall strength of the augmentation, and $\\theta$ controls the orientation of the strength. When $\\theta = 0.5$ , we have an uninformative isotropic noise that has often been used in practice. When $\\theta = 1$ , the augmentation is only on the task-irrelevant feature, and when $\\theta = 0$ , the augmentation is only on the content. Since the prediction target only depends on the content, we want to learn a representation invariant to the style. For the downstream regression task, we use the learned representations $z := f(\\hat{x})$ to train a ridge linear regressor that minimizes $\\min_G \\mathbb{E}_{\\hat{x}}[||Gz - y(\\hat{x})||^2] + 0.001||G||^2$ . See Figure 3. The top panel shows the phase diagram of this problem with different combinations of the augmentation strengths and orientations. The bottom panel shows that collapses introduce phase-transition-like behaviors in the generalization performance and that a data augmentation aligning with the task-irrelevant dimension improves performance.", + "bbox": [ + 169, + 165, + 826, + 335 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 ROBUSTNESS TO DATA IMBALANCE", + "text_level": 1, + "bbox": [ + 171, + 349, + 464, + 364 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our theory is not only relevant for understanding collapses but can also be used to understand how an SSL model encodes the feature. Liu et al. (2021) recently showed that compared with supervised learning, SSL techniques are relatively more robust to imbalanced datasets that have disproportionately represented minority subgroups. As another application of our analysis, we illustrate the robustness of different techniques is not equal. As we have seen, the learned model $W^T W$ has eigenvalues that, to the leading order, are proportional to the Hessian $B$ , which is different for each loss function. As previously summarized in Table 1, for InfoNCE and SimCLR, the learned model aligns", + "bbox": [ + 169, + 375, + 485, + 583 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/98faacf0f58a577c4eaa29fb3811d104d74c5162d8911fbecf1fc66f2bd95273.jpg", + "image_caption": [ + "Figure 4: Spectral Contrastive loss (SCL) is more robust against data imbalance than InfoNCE. We train SimCLR and SCL ResNet-12 models on imbalanced versions of CIFAR-10. We see that SCL is more robust than SimCLR, as suggested by our theory. These results are especially pronounced when there is no projector head." + ], + "image_footnote": [], + "bbox": [ + 496, + 335, + 823, + 484 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "with the eigenvalues of the data covariance $A_0$ , which varies hugely as different classes of a dataset become more and more imbalanced. In comparison, the model trained with SCL aligns purely with the augmentation covariance $C$ , which is independent of the data imbalance. This suggests that the SCL landscape can be less dependent on data and thus more robust against data imbalance. See Figure 4. More experimental details are given in Appendix C.", + "bbox": [ + 169, + 583, + 823, + 654 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 666, + 320, + 681 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we approached the problem of collapses in SSL from a loss landscape perspective. We analytically solved an effective landscape that can be extended to understand the effect of normalization. Our result suggests that dimensional collapse can be well understood in the minimal setting and is something neutral to learning on its own. With the help from the theory, we also showed that when task-irrelevant dimensions are targeted, dimensional collapse can result in improved performance, whereas an uninformative noise will (without good luck) leads to collapses in the dimensions that are relevant to the task. It is thus important for practitioners to devise targeted data augmentation mechanisms that incorporate the correct domain knowledge. Also, we advocated the thesis that the local geometry of the loss landscape around the origin is an essential component for understanding collapses, and this should invite more future work to understand the landscape around the origin.", + "bbox": [ + 169, + 694, + 826, + 834 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The limitation of our work is clear; our result only identifies the causes of the collapse that can be directly attributed to the low-rank structure of the local minima of the landscape. One possible alternative cause of the collapse is dynamics. For example, having a large learning rate and small batch can sometimes cause a convergence towards the saddle points in the landscape (Ziyin et al., 2022b), which, as we have shown, are the collapsed solutions. Investigating the role of dynamics in the collapse is thus a crucial future problem.", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 369, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This work was supported by a KAKENHI Grant No. JP18H01145 from the Japan Society for the Promotion of Science. Ziyin has been financially supported by the JSPS fellowship and thanks Zihan for the generous help during the writing of this paper. ESL was partially supported via NSF under the award CNS-2008151.", + "bbox": [ + 171, + 132, + 826, + 189 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 210, + 287, + 224 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Arora, S., Khandeparkar, H., Khodak, M., Plevrakis, O., and Saunshi, N. (2019). A Theoretical Analysis of Contrastive Unsupervised Representation Learning. In Proc. Int. Conf. on Machine Learning (ICML).", + "Bardes, A., Ponce, J., and LeCun, Y. (2021). Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906.", + "Caron, M., Misra, I., Mairal, J., Goyal, P., Bojanowski, P., and Joulin, A. (2020). Unsupervised Learning of Visual Features by Contrasting Cluster Assignments. In Proc. Adv. on Neural Information Processing Systems (NeurIPS).", + "Caron, M., Touvron, H., Misra, I., Jegou, H., Mairal, J., Bojanowski, P., and Joulin, A. (2021). Emerging Properties in Self-Supervised Vision Transformer. arXiv, abs/2104.14294.", + "Chen, T., Kornblith, S., Norouzi, M., and Hinton, G. (2020a). A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR.", + "Chen, T., Kornblith, S., Swersky, K., Norouzi, M., and Hinton, G. E. (2020b). Big Self-Supervised Models are Strong Semi-Supervised Learners. Adv. in Neural Information Processing Systems, 33.", + "Chen, X. and He, K. (2021). Exploring Simple Siamese Representation Learning. In Proc. Int. Conf. on Computer Vision and Pattern Recognition (CVPR).", + "Cosentino, R., Sengupta, A., Avestimehr, S., Soltanolkotabi, M., Ortega, A., Willke, T., and Tepper, M. (2022). Toward a geometrical understanding of self-supervised contrastive learning. arXiv preprint arXiv:2205.06926.", + "Dayan, P. and Abbott, L. F. (2005). Theoretical neuroscience: computational and mathematical modeling of neural systems. MIT press.", + "Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al. (2020). An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929.", + "Ermolov, A., Siarohin, A., Sangineto, E., and Sebe, N. (2021). Whitening for self-supervised representation learning. In International Conference on Machine Learning, pages 3015-3024. PMLR.", + "Grill, J.-B., Strub, F., Altché, F., Tallec, C., Richemond, P., Buchatskaya, E., Doersch, C., Avila Pires, B., Guo, Z., Gheshlaghi Azar, M., Piot, B., kavukcuoglu, k., Munos, R., and Valko, M. (2020). Bootstrap your own latent: A new approach to self-supervised Learning. In Proc. Adv. on Neural Information Processing Systems (NeurIPS).", + "HaoChen, J. Z., Wei, C., Gaidon, A., and Ma, T. (2021). Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34:5000-5011.", + "He, K., Fan, H., Wu, Y., Xie, S., and Girschick, R. (2020). Momentum Contrast for Unsupervised Visual Representation Learning. In Proc. Int. Conf. on Computer Vision and Pattern Recognition (CVPR).", + "Hsu, H., Qi, H., and Brown, M. (2019). Measuring the Effects of Non-Identical Data Distribution for Federated Visual Classification. arXiv, abs/1909.06335." + ], + "bbox": [ + 171, + 233, + 825, + 922 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Hua, T., Wang, W., Xue, Z., Ren, S., Wang, Y., and Zhao, H. (2021). On feature decorrelation in self-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9598-9608.", + "Jing, L., Vincent, P., LeCun, Y., and Tian, Y. (2021). Understanding dimensional collapse in contrastive self-supervised learning. arXiv preprint arXiv:2110.09348.", + "Kugelgen, J., Sharma, Y., Gresle, L., Brendel, W., Scholkopf, B., Besserve, M., and Locatello, F. (2021). Self-Supervised Learning with Data Augmentations Provably Isolates Content from Style. arXiv, abs/2106.04619.", + "Landau, L. D. and Lifshitz, E. M. (2013). Statistical Physics: Volume 5, volume 5. Elsevier.", + "Liu, H., HaoChen, J. Z., Gaidon, A., and Ma, T. (2021). Self-supervised learning is more robust to dataset imbalance. International Conference on Learning Representations.", + "Mitrovic, J., McWilliams, B., Walker, J., Buesing, L., and Blundell, C. (2020). Representation learning via invariant causal mechanisms. arXiv preprint arXiv:2010.07922.", + "Nozawa, K. and Sato, I. (2021). Understanding negative samples in instance discriminative self-supervised representation learning. Advances in Neural Information Processing Systems, 34:5784-5797.", + "Oord, A. v. d., Li, Y., and Vinyals, O. (2018). Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748.", + "Papyan, V., Han, X., and Donoho, D. L. (2020). Prevalence of neural collapse during the terminal phase of deep learning training. Proceedings of the National Academy of Sciences, 117(40):24652-24663.", + "Pokle, A., Tian, J., Li, Y., and Risteski, A. (2022). Contrasting the landscape of contrastive and non-contrastive learning. arXiv preprint arXiv:2203.15702.", + "Robinson, J., Sun, L., Yu, K., Batmanghelich, K., Jegelka, S., and Sra, S. (2021). Can contrastive learning avoid shortcut solutions? Advances in neural information processing systems, 34:4974-4986.", + "Saunshi, N., Ash, J., Goel, S., Misra, D., Zhang, C., Arora, S., Kakade, S., and Krishnamurthy, A. (2022). Understanding Contrastive Learning Requires Incorporating Inductive Biases. In Proc. Int. Conf. on Machine Learning (ICML).", + "Simon, J. B., Knutins, M., Ziyin, L., Geisz, D., Fetterman, A. J., and Albrecht, J. (2023). On the stepwise nature of self-supervised learning. arXiv preprint arXiv:2303.15438.", + "Tian, Y. (2022). Deep contrastive learning is provably (almost) principal component analysis. arXiv preprint arXiv:2201.12680.", + "Tian, Y., Chen, X., and Ganguli, S. (2021). Understanding self-supervised Learning Dynamics without Contrastive Pairs. In Proc. Int. Conf. on Machine Learning (ICML).", + "Tian, Y., Sun, C., Poole, B., Krishnan, D., Schmid, C., and Isola, P. (2020). What makes for good views for contrastive learning? Advances in Neural Information Processing Systems, 33:6827-6839.", + "Tosh, C., Krishnamurthy, A., and Hsu, D. (2021). Contrastive estimation reveals topic posterior information to linear models. J. Mach. Learn. Res., 22:281-1.", + "Trivedi, P., Lubana, E. S., Heimann, M., Koutra, D., and Thiagarajan, J. J. (2022). Analyzing data-centric properties for contrastive learning on graphs. arXiv preprint arXiv:2208.02810.", + "Tsai, Y.-H., Wu, Y., Salakhutdinov, R., and Morency, L.-P. (2021a). Self-supervised Learning from a Multi-view Perspective. In Proc. Int. Conf. on Learning Representations (ICLR)." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tsai, Y.-H. H., Ma, M. Q., Yang, M., Zhao, H., Morency, L.-P., and Salakhutdinov, R. (2021b). Self-supervised representation learning with relative predictive coding. International Conference on Learning Representations.", + "Wang, F. and Liu, H. (2021). Understanding the behaviour of contrastive loss. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2495-2504.", + "Wang, T. and Isola, P. (2020). Understanding Contrastive Representation Learning through Alignment and Uniformity on the Hypersphere. In Proc. Int. Conf. on Machine Learning (ICML).", + "Wang, Y., Zhang, Q., Wang, Y., Yang, J., and Lin, Z. (2022). Chaos is a ladder: A new theoretical understanding of contrastive learning via augmentation overlap. International Conference on Learning Representations.", + "Wang, Z. and Ziyin, L. (2022). Posterior collapse of a linear latent variable model. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems.", + "Wei, C., Shen, K., Chen, Y., and Ma, T. (2021). Theoretical Analysis of Self-Training with Deep Networks on Unlabeled Data. In Proc. Int. Conf. on Learning Representations (ICLR).", + "Zbontar, J., Jing, L., Misra, I., LeCun, Y., and Deny, S. (2021). Barlow twins: Self-supervised learning via redundancy reduction. In International Conference on Machine Learning, pages 12310-12320. PMLR.", + "Zimmerman, R., Sharma, Y., Schneider, S., Bethge, M., and Brendel, W. (2021). Contrastive Learning Inverts the Data Generating Process. In Proc. Int. Conf. on Machine Learning (ICML).", + "Ziyin, L., Li, B., and Meng, X. (2022a). Exact solutions of a deep linear network. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems.", + "Ziyin, L., Li, B., Simon, J. B., and Ueda, M. (2022b). SGD can converge to local maxima. In International Conference on Learning Representations.", + "Ziyin, L. and Ueda, M. (2022). Exact phase transitions in deep learning. arXiv preprint arXiv:2205.12510." + ], + "bbox": [ + 171, + 102, + 825, + 578 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A ADDITIONAL NUMERICAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 511, + 118 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this section, we validate our theory with numerical results. Unless specified otherwise, the dimension of the learned representation is set to be equal to the input dimension: $d_0 = d_1$ .", + "bbox": [ + 169, + 133, + 823, + 162 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "No Collapse for InfoNCE. We showed that there is no collapse at all for the vanilla InfoNCE, no matter how strong the augmentation is. Our result implies that the smallest singular of the model $W$ scales as $\\sigma^4$ where $\\sigma^2$ is the strength (namely, the variance) of the augmentation. See the left panel of Fig. 5. We use the vanilla InfoNCE loss defined in (1) with a linear model. The training set is sampled from $\\mathcal{N}(0, I_{32})$ . The training proceeds with Adam with a learning rate of $6e - 4$ with full batch training for 5000 iterations. We use a simple diagonal Gaussian noise with variance $\\sigma^2$ for data augmentation. We see that the singular values scale as $\\sigma^4$ and never vanishes, as the theory predicts.", + "bbox": [ + 169, + 167, + 826, + 282 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Nonrobust Collapses of Weighted InfoNCE. We now demonstrate that, as the theory predicts, collapses of weighted InfoNCE depend strongly on the dataset size. We use the same dataset and training procedure as the previous experiment. We set $\\alpha = 0.1$ and change the size of the training set. Theory suggests that for a collapse in the $i$ -th subspace to happen, the size of the dataset needs to obey", + "bbox": [ + 169, + 286, + 823, + 356 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nN > \\frac {a _ {i}}{c _ {i} (1 - \\alpha)} := N _ {\\text {c r i t}}. \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 356, + 823, + 383 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "See the middle panel of Figure 5. We show the smallest three eigenvalues of $W^T W$ (roughly having similar magnitudes), and the critical dataset size for the smallest eigenvalue. We see that the theoretical threshold of collapse agrees well with where the collapse actually happens.", + "bbox": [ + 169, + 387, + 823, + 431 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Collapses in $\\beta$ -InfoNCE. With $\\beta < 1$ , one can cause collapses in a predictable and controllable way. In this experiment, we let $d_0 = 5$ and we plot all five eigenvalues of $W^T W$ as we increase the strength of an isotropic augmentation. As the numerical results show, collapses happen at the points predicted by the theory.", + "bbox": [ + 169, + 436, + 825, + 494 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Normalization Causes Dimensional Collapse. We also plot the three smallest eigenvalues of $W^T W$ when we apply the standard representation normalization in practice: $f(x) \\to f(x) / \\| f(x) \\|$ . To facilitate comparison, we also use the same dataset and training procedure as before. See Figure 6. We see that normalization does cause a collapse in the smallest eigenvalues at an augmentation strength much smaller than the feature variation.", + "bbox": [ + 169, + 500, + 826, + 571 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B LANDSCAPE OF A NONLINEAR MODEL", + "text_level": 1, + "bbox": [ + 171, + 590, + 532, + 606 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this section, we plot the landscape of the layer of nonlinear models on the same synthetic dataset we outlined in the previous section. We train a three-layer nonlinear network with output dimension 2 with SGD until convergence. We then rescale the optimized weight of the last by a factor $a$ : $W_{last} \\rightarrow aW_{last}$ and plot the loss function along this direction. See the top panel of Figure 7 for", + "bbox": [ + 169, + 621, + 826, + 679 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/ccc7996109f2be90d2989e39053cb4dc258e6124b3ce2766def4469ed82262d4.jpg", + "image_caption": [ + "Figure 5: The three smallest singular values of $W^T W$ as a function of the augmentation strength. We see that our effective landscape theory around the origin accurately captures collapses in learning. Left: Vanilla InfoNCE. As the theory suggests, the singular values scale as $\\sigma^4$ and do not vanish for any finite value of $\\sigma$ . Mid: Weight InfoNCE. $\\alpha = 0.1$ , $\\sigma = 5$ . Collapse happens at the critical dataset size predicted by the theory. Right: (Sqrt) Eigenvalues of $WW^T$ in $\\beta$ -InfoNCE. The collapses can be well controlled." + ], + "image_footnote": [], + "bbox": [ + 204, + 704, + 395, + 820 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/99881e58f3d4b4bcc3669742d31c4f8e0145f69c41e93cfa315ed27252276a3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 401, + 703, + 598, + 820 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/b844c5ec134326f0df3db25f6a54bb2e3d3f33830b6942b46d8f8c0d95e40847.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 700, + 795, + 820 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/cd7b65731d8ca087e5133850229933d5468111b18b4cb5f7284188c5ff53b0c1.jpg", + "image_caption": [ + "Figure 6: A collapse happens easily when the learned representation is normalized. The smallest eigenvalues of $A_0$ are roughly 0.2, and the collapse happens much before the noise reaches this strength." + ], + "image_footnote": [], + "bbox": [ + 400, + 103, + 604, + 224 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/2b537a60334e685645cae422322617aea2f6b2646af91a0e882bede9eddf0800.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 403, + 297, + 598, + 411 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/562a004beb8484fd766e2416e9b1d947231c3209ae9008d89406e86bbfd73d32.jpg", + "image_caption": [ + "Figure 7: The Landscape of nonlinear models is very similar to the landscape of linear models (cf. Figure 1). Top: 1d projection of the landscape of a two-layer tanh and ReLU network. Bottom Left: the landscape of a 2D projection of the last layer of a nonlinear model with a weak augmentation. Middle: with intermediate augmentation. Right: with strong augmentation." + ], + "image_footnote": [], + "bbox": [ + 187, + 415, + 390, + 526 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/b73c66122800f71f6b2bd952d5c6f2d5f1490c04535ec96a92a152fd1bdcab3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 415, + 602, + 527 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/f41769dfc9310c2e798a0bc14d334452726fc74b2d23497988985f2779b3188c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 607, + 415, + 812, + 526 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "both the tanh and the ReLU nonlinearity. We then rescale the two rows of the weight matrix of the model by $r_1$ and $r_2$ respectively: $W = (w_{1},W_{2})^{T}\\rightarrow (r_{1}w_{1},r_{2}w_{2})$ . We see that the landscape of the model is qualitatively the same as that of the linear models, shown in Figure 1.", + "bbox": [ + 169, + 623, + 826, + 667 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C SETUP FOR IMBALANCED DATA EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 686, + 599, + 702 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Creating an Imbalanced Dataset: For our experiments measuring the influence on imbalanced datasets on SSL training, we use CIFAR-10 by sampling 20000 samples out of the 50000 training samples. The sampling process is described by a Dirichlet distribution and is often used to analyze effects of heterogeneity and data imbalance in Federated Learning problems (Hsu et al., 2019). Specifically, a small value of the distribution parameter yields a highly imbalanced dataset, while a large value yields a perfectly balanced dataset. We evaluate our models in three scenarios, for which we report below the number of samples per class:", + "bbox": [ + 169, + 718, + 823, + 816 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "High imbalance: [4890, 87, 5000, 0, 74, 0, 0, 212, 4788, 4947]", + "- Medium imbalance: [4268, 4296, 1741, 420, 945, 161, 4633, 1015, 131, 2386]", + "- No imbalance: [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000]" + ], + "bbox": [ + 178, + 821, + 709, + 875 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Training Setup: We use ResNet-12 models as the backbone for all experiments due to computational constraints. SimCLR augmentations (Chen et al., 2020a) are followed, except for a reduced strength of resized cropping from 0.2 to 0.5. All training involves a standardly used cosine decay learning", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "rate schedule, starting at 0.03 and decaying to 0.001. When a projector module is used, it involves a two-layer MLP with hidden dimension of 512 and BatchNorm layer in between. We use SGD for optimization and perform the standardly used linear evaluation protocol for measuring the quality of the final representation. For training the linear layer, we use an initial learning rate of 10 and decay it to 0.01 with a cosine schedule. We note linear evaluation is used for supervised models as well, following the practice advocated by Liu et al. (2021).", + "bbox": [ + 169, + 103, + 826, + 189 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D ADDITIONAL THEORETICAL CONCERNS", + "text_level": 1, + "bbox": [ + 171, + 213, + 547, + 229 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D.1 COLLAPSE CONDITION FOR NORMALIZATION", + "text_level": 1, + "bbox": [ + 171, + 248, + 532, + 262 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The important condition for collapse in Eq. (13) can be better understood by considering the extreme cases. First of all, note that the eigenvalues of $\\Sigma B_{M}$ are bounded between $-1$ and $1$", + "bbox": [ + 169, + 276, + 823, + 306 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n- 1 \\leq \\frac {a _ {i} - c _ {i}}{a _ {i} + c _ {i}} \\leq 1, \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 439, + 316, + 825, + 347 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "and $-1$ is achieved when $c_{i} \\gg a_{i}$ , and 1 is achieved when $a_{i} \\gg c_{i}$ .", + "bbox": [ + 169, + 356, + 609, + 372 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "When the augmentation is negligibly small, $\\Sigma^{-1}B_M\\approx M$ , and $\\lambda_{i}\\approx \\bar{\\lambda} = 1$ , the condition thus becomes", + "bbox": [ + 169, + 377, + 823, + 405 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {2}{d _ {M}} > 0, \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 467, + 409, + 825, + 440 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "which always holds. Thus, a sufficiently small augmentation will never cause collapse. Next, when we apply very strong augmentation to the $j$ -th subspace and zero augmentation to the others, the condition for the non-augmented spaces becomes", + "bbox": [ + 169, + 449, + 823, + 492 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n1 + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 2}{d _ {M}}, \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 431, + 503, + 825, + 536 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "meaning that the collapse will not happen. For the $j$ -th space, the condition is", + "bbox": [ + 169, + 546, + 683, + 561 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n- 1 + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 2}{d _ {M}} (\\Longleftrightarrow) \\frac {4}{d _ {M}} > 2, \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 573, + 825, + 606 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "which is only possible when $d_M = 1$ , namely, the strongly augmented space is the only space that does not collapse. This is reasonable when the original data is rank-1 because the normalization will ensure that this space does not collapse, but when the original data is not rank-1, this stationary point will be a saddle and will not be preferred by gradient descent. In different words, a strong enough augmentation will cause a collapse in the corresponding subspace, as is the case without normalization.", + "bbox": [ + 169, + 614, + 823, + 699 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "It is also interesting to note that having $c_{i} \\geq a_{i}$ is no longer sufficient to cause a collapse. For example, let $c_{1} = 0$ and $c_{j} = a_{j}$ for $j \\neq 1$ . The condition for $j \\neq 1$ becomes", + "bbox": [ + 169, + 705, + 826, + 736 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {2}{d _ {M}} > \\frac {1}{d _ {M}}, \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 457, + 747, + 825, + 779 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "which always holds. At the same time, it does not mean that collapsing has become harder in general. For example, it is also possible for $c_{i} < a_{i}$ to cause a collapse. Suppose we add a weak augmentation only to the first subspace such that $a_{i} - c_{i} = \\epsilon > 0$ , the condition for this dimension to not to collapse is", + "bbox": [ + 169, + 789, + 823, + 844 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\epsilon}{a _ {i} + c _ {i}} + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 1 + \\epsilon}{d _ {M}}, \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 848, + 823, + 880 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "which can be violated whenever $\\epsilon < \\frac{(a_i + c_i)(d_M - 3)}{a_i + c_i + d_m}$ . Namely, in some cases, normalization can in fact facilitate collapse.", + "bbox": [ + 169, + 890, + 823, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E PROOFS", + "text_level": 1, + "bbox": [ + 171, + 102, + 276, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "E.1 PROOF OF PROPOSITION 1", + "text_level": 1, + "bbox": [ + 171, + 133, + 398, + 148 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. The second term in Eq. (3) can be written as", + "bbox": [ + 171, + 159, + 513, + 175 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right) ^ {2} \\right] - \\mathbb {E} \\left[ \\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right] \\right] ^ {2} (24) \\\\ = [ \\text {f i r s t} \\quad \\text {t e r m} ] - 4 \\operatorname {T r} [ W (A _ {0} + C) W ^ {T} ] ^ {2} (25) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2}, (26) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 181, + 823, + 258 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where we have used the definition $\\Sigma = A_0 + C$ . The first term is", + "bbox": [ + 171, + 263, + 594, + 279 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n[ f i r s t \\text {t e r m} ] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} [ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} ]\\right) ^ {2} \\right] = 4 \\operatorname {T r} [ W \\Sigma W ^ {T} ] ^ {2} + 8 \\operatorname {T r} [ W \\Sigma W ^ {T} W \\Sigma W ^ {T} ]. \\tag {27}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 284, + 823, + 315 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Combining the above expressions, we see that Eq. (3) can be written as", + "bbox": [ + 171, + 315, + 640, + 330 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] (28) \\\\ = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right]. (29) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 337, + 823, + 383 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "This finishes the proof. $\\square$", + "bbox": [ + 171, + 390, + 344, + 406 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "E.2 PROOF OF THEOREM 1", + "text_level": 1, + "bbox": [ + 171, + 422, + 374, + 436 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. All stationary points have a zero gradient:", + "bbox": [ + 171, + 448, + 496, + 463 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n- 2 W B + 4 W \\Sigma W ^ {T} W \\Sigma = 0. \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 470, + 823, + 486 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Multiplying by $W^T$ on the left and $B^{-1}$ on the right,", + "bbox": [ + 171, + 492, + 521, + 508 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {T} W = 2 W ^ {T} W \\Sigma W ^ {T} W \\Sigma B ^ {- 1} \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 515, + 823, + 532 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\Longleftrightarrow\\right) \\quad \\Sigma^ {1 / 2} W ^ {T} W \\Sigma^ {1 / 2} = 2 \\Sigma^ {1 / 2} W ^ {T} W \\Sigma W ^ {T} W \\Sigma B ^ {- 1} \\Sigma^ {1 / 2} \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 537, + 823, + 555 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Defining $H \\coloneqq \\Sigma^{1/2} W^T W \\Sigma^{1/2}$ , we obtain", + "bbox": [ + 171, + 560, + 455, + 575 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nH = 2 H ^ {2} \\Sigma^ {1 / 2} \\Sigma B ^ {- 1} \\Sigma^ {1 / 2}, \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 415, + 584, + 823, + 599 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\Longleftrightarrow\\right) \\quad H \\left(I - 2 H \\Sigma^ {1 / 2} B ^ {- 1} \\Sigma^ {1 / 2}\\right) = 0. \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 607, + 823, + 625 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Because both $H$ and $\\Sigma^{1/2}\\Sigma B^{-1}\\Sigma^{1/2}$ are symmetric, one can take the transpose of Eq. (33) to find that $H$ and $\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}$ commute with each, which implies that $H$ has the same eigenvectors as $\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}/2$ .", + "bbox": [ + 169, + 628, + 823, + 676 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Eq. (34) then implies that the eigenvalues of $H$ is either the inverse of that of $\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}$ or zero. This implies that any stationary point of $H$ can be written in the form", + "bbox": [ + 169, + 683, + 823, + 713 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nH = \\frac {1}{2} U M \\Lambda U ^ {T}, \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 441, + 720, + 823, + 748 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $U$ is a unitary matrix, $\\Lambda$ is diagonal matrix containing the eigenvalues of $\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}$ , and $M$ is an arbitrary (masking) diagonal matrix containing only zero or one such that (1) $M_{ii} = 0$ if $\\Lambda_{ii} < 0$ and (2) contain at most $d^*$ nonzero terms. This then implies that the weight matrix $W$ satisfies", + "bbox": [ + 171, + 755, + 825, + 810 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1 / 2} U M \\Lambda U ^ {T} \\Sigma^ {- 1 / 2}. \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 809, + 823, + 837 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lastly, when $\\Sigma$ and $B$ commute, we can compactly write the result as", + "bbox": [ + 171, + 839, + 630, + 854 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} \\Sigma^ {- 1}, \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 418, + 861, + 823, + 888 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $B_{M}$ denotes the matrix obtained by masking the eigenvalues of $B$ with $M$ . This finishes the proof.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 509, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "E.3 PROOF OF PROPOSITION 2", + "text_level": 1, + "bbox": [ + 171, + 103, + 398, + 118 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. For all stationary points, $W^T W$ commutes with $B$ and $\\Sigma$ , which means that at these stationary points, one can simultaneously diagonalize all the matrices and the loss function (3) can be written as", + "bbox": [ + 169, + 128, + 823, + 170 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nL = - \\sum_ {i = 1} ^ {d ^ {*}} \\lambda_ {i} b _ {i} + \\lambda_ {i} ^ {2} s _ {i} ^ {2} \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 426, + 170, + 823, + 205 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $\\lambda_{i}, b_{i}, s_{i}$ are the eigenvalues of $W^{T}W$ , $B$ , and $\\Sigma$ respectively.", + "bbox": [ + 169, + 209, + 627, + 224 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We can thus consider each $i$ separately. When $b_{i} > 0$ , $\\lambda_{i} = 0$ cannot be a local minimum because the local Hessian is $-b_{i} < 0$ . When $b_{i} \\leq 0$ , the only stationary point is $\\lambda_{i} = 0$ . This sum covers at most $d^{*}$ summands, and so, at the local minima, $\\lambda_{i} \\neq$ if and only if $b_{i} > 0$ , and so the number of non-zero eigenvalues of $W^{T}W$ is $\\min(m, d^{*})$ .", + "bbox": [ + 169, + 229, + 823, + 287 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E.4 PROOF OF PROPOSITION 3", + "text_level": 1, + "bbox": [ + 171, + 303, + 398, + 316 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. The regularization can be written as", + "bbox": [ + 171, + 329, + 455, + 344 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} R = \\left[ \\left(\\mathbb {E} _ {x} \\| W x \\| ^ {2} - c\\right) ^ {2} \\right] (39) \\\\ = \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2} - 2 c \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] + c ^ {2}. (40) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 348, + 823, + 388 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "By Proposition 1, Eq. (10) reads", + "bbox": [ + 171, + 398, + 388, + 412 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right] + \\kappa \\left(\\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2} - 2 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] + 1\\right) (41) \\\\ = - \\operatorname {T r} \\left[ W (B + 2 \\kappa c \\Sigma) W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right] + \\kappa \\rho^ {2}. (42) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 417, + 823, + 457 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The derivative of $\\rho$ is", + "bbox": [ + 171, + 460, + 315, + 474 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d}{d W} \\rho = 4 \\rho W \\Sigma . \\tag {43}\n$$\n", + "text_format": "latex", + "bbox": [ + 444, + 474, + 823, + 503 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The zero-gradient gradient is thus", + "bbox": [ + 171, + 503, + 395, + 518 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n- 2 W (B + 2 \\kappa c \\Sigma - 2 \\kappa \\rho \\Sigma) + 4 W \\Sigma W ^ {T} W \\Sigma = 0. \\tag {44}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 523, + 823, + 542 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We can define $B' \\coloneqq B + 2\\kappa c\\Sigma - 2\\kappa \\rho \\Sigma$ to see that this condition is the same as Eq. (30) in the proof of Theorem 1. The rest of the proof thus follows from the arguments. We thus arrive at the theorem statement:", + "bbox": [ + 169, + 545, + 823, + 585 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} ^ {\\prime} \\Sigma^ {- 1}. \\tag {45}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 585, + 823, + 613 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We are done. $\\square$", + "bbox": [ + 171, + 614, + 277, + 628 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E.5 PROOF OF PROPOSITION 4", + "text_level": 1, + "bbox": [ + 171, + 645, + 398, + 657 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. Recalling that $\\rho = \\mathrm{Tr}[W\\Sigma W^T]$ , we multiply $\\Sigma$ from the right to both sides of the solution in Proposition 3 and take trace:", + "bbox": [ + 169, + 670, + 823, + 699 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} ^ {\\prime} \\right] = \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} \\left(B _ {M} + 2 \\kappa (c - \\rho) \\Sigma_ {M}\\right) \\right] (46) \\\\ = \\operatorname {T r} \\left[ W ^ {T} W \\Sigma \\right] (47) \\\\ = \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] = \\rho . (48) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 704, + 823, + 773 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The first line further simplifies to", + "bbox": [ + 171, + 777, + 393, + 792 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right] + \\kappa (c - \\rho) \\operatorname {T r} \\left[ \\Sigma^ {- 1} \\Sigma_ {M} \\right] = \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right] + \\kappa (c - \\rho) d _ {M}, \\tag {49}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 797, + 823, + 825 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $d_M \\coloneqq \\operatorname{Tr}[M]$ is the number of nonzero eigenvalues of $B_M'$ .", + "bbox": [ + 171, + 830, + 609, + 847 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This gives an equation of $\\rho$ that solves to", + "bbox": [ + 171, + 852, + 444, + 866 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nc - \\rho = \\frac {c - \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{1 + \\kappa d _ {M}}. \\tag {50}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 869, + 823, + 906 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This proves the proposition. $\\square$", + "bbox": [ + 171, + 909, + 375, + 924 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "F ADDITIONAL THEORETICAL CONCERNS", + "text_level": 1, + "bbox": [ + 171, + 102, + 542, + 118 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "F.1 CASE OF DATA-INDEPENDENT NON-GAUSSIAN AUGMENTATION", + "text_level": 1, + "bbox": [ + 171, + 133, + 661, + 148 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In the main text, we mainly considered the case when the noise is Gaussian. In this section, we consider a case where the noise is data-dependent and non-Gaussian. We show that the results we discussed in the main text still hold qualitatively. The general form of the loss function in Eq. (3) still applies:", + "bbox": [ + 169, + 159, + 826, + 214 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nL = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right]. \\tag {51}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 215, + 823, + 242 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We consider a global rescaling augmentation for each datum $x$ :", + "bbox": [ + 171, + 244, + 591, + 260 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nx = s \\hat {x}, \\tag {52}\n$$\n", + "text_format": "latex", + "bbox": [ + 470, + 268, + 823, + 282 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $s \\sim \\exp(b)$ obeys an exponential distribution with mean $b$ and variance $b^2$ . Note that even if $\\hat{x}$ is Gaussian, the augmented data is no longer Gaussian. In particular, the augmentation now becomes data-dependent. This augmentation can also be seen as a structured, biologically plausible data augmentation that encourages the model to be scale-invariant, which is what Wien's law for biological perception demands (Dayan and Abbott, 2005): no matter whether an image is dark or bright, the content of the image is the same.", + "bbox": [ + 169, + 290, + 823, + 376 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Under this augmentation, the noise covariance is dependent on $x$ and no longer Gaussian:", + "bbox": [ + 171, + 381, + 759, + 396 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ x x ^ {T} \\right] = 2 b ^ {2} A _ {0}. \\tag {53}\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 402, + 823, + 421 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We also obtain that", + "bbox": [ + 171, + 428, + 303, + 441 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nC = \\mathbb {E} \\left[ (b - s) ^ {2} x x ^ {T} \\right] = b ^ {2} A _ {0}. \\tag {54}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 439, + 825, + 458 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The second term in Eq. (3) can be written as", + "bbox": [ + 171, + 460, + 467, + 474 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left. \\right. \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] = \\mathbb {E} \\left[\\left(\\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right) ^ {2} \\right] - \\mathbb {E} \\left[ \\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right] ^ {2} (55) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} [ W (A _ {0} + C) W ^ {T} ] ^ {2} (56) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2}, (57) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 482, + 823, + 560 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where we have used the definition $\\Sigma = A_0 + C$ . The first term is", + "bbox": [ + 171, + 566, + 594, + 580 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n[ f i r s t \\text {t e r m} ] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} [ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} ]\\right) ^ {2} \\right]. \\tag {58}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 587, + 823, + 608 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "However, for fixed rescaling factor $s_x$ and $s_\\chi$ , each $W(x - \\chi)$ obeys a multivariate Gaussian distribution with variance $2(s_x^2 + s_\\chi^2)WA_0$ , and so we have", + "bbox": [ + 169, + 614, + 823, + 646 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n[ f i r s t \\text {t e r m} ] = \\mathbb {E} _ {s _ {x}, s _ {\\chi}} \\left[ \\left(s _ {x} ^ {2} + s _ {\\chi} ^ {2}\\right) ^ {2} \\right] \\left(4 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 8 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]\\right), \\tag {59}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 652, + 825, + 672 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\mathbb{E}_{s_x,s_\\chi}\\big[(s_x^2 +s_\\chi^2)^2\\big] = 56b^4$ . Combining terms, we obtain that", + "bbox": [ + 171, + 680, + 619, + 698 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right] = 4 8 b ^ {2} \\times 4 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 5 6 b ^ {4} \\times 8 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]. \\tag {60}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 705, + 825, + 724 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The loss function is thus:", + "bbox": [ + 171, + 738, + 339, + 752 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nL = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + 2 4 b ^ {2} \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 5 6 b ^ {4} \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]. \\tag {61}\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 758, + 825, + 777 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Note that this loss function is a special case of the loss function in Eq. (10) where $c = 0$ and $\\kappa = 24b^2$ (and with a rescaled fourth-order term). As in the main text, $B$ is different according to different choices of loss functions. Because $B$ commute with $A_0$ by construction, one expects collapses to happen at locations predicted by Proposition 3 and 4 under suitable choices of parameters. Also note that the odd terms vanish as discussed, and so the local stability of the origin should decide the collapsing behavior of this situation.", + "bbox": [ + 169, + 790, + 823, + 875 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This shows that collapse can also happen when the data augmentation is structured. We comment that the analysis in this section is minimal, and one important future direction is to provide more precise and insightful conditions of collapse under structured data augmentation.", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_model.json b/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f154e994f8ae834a1084e97db273b48cd82cbadb --- /dev/null +++ b/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_model.json @@ -0,0 +1,3987 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.147 + ], + "angle": 0, + "content": "WHAT SHAPES THE LOSS LANDSCAPE OF SELF SUPERVISED LEARNING?" + }, + { + "type": "text", + "bbox": [ + 0.22, + 0.175, + 0.776, + 0.193 + ], + "angle": 0, + "content": "Liu Ziyin\\(^{1,2,3\\dagger}\\), Ekdeep Singh Lubana\\(^{2,3,4\\dagger}\\), Masahito Ueda\\(^{1,5,6}\\), Hidenori Tanaka\\(^{2,3}\\)" + }, + { + "type": "text", + "bbox": [ + 0.31, + 0.203, + 0.687, + 0.218 + ], + "angle": 0, + "content": "\\(^{1}\\)Department of Physics, The University of Tokyo, Tokyo, Japan" + }, + { + "type": "text", + "bbox": [ + 0.264, + 0.217, + 0.734, + 0.23 + ], + "angle": 0, + "content": "\\(^{2}\\)Physics & Informatics Laboratories, NTT Research, Inc., Sunnyvale, CA, USA" + }, + { + "type": "text", + "bbox": [ + 0.307, + 0.229, + 0.692, + 0.242 + ], + "angle": 0, + "content": "3Center for Brain Science, Harvard University, Cambridge, USA" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.241, + 0.681, + 0.255 + ], + "angle": 0, + "content": "\\(^{4}\\)EECS Department, University of Michigan, Ann Arbor, USA" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.254, + 0.777, + 0.268 + ], + "angle": 0, + "content": "\\(^{5}\\)Institute for Physics of Intelligence, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo" + }, + { + "type": "text", + "bbox": [ + 0.268, + 0.267, + 0.726, + 0.281 + ], + "angle": 0, + "content": "\\(^{6}\\)RIKEN Center for Emergent Matter Science (CEMS), Wako, Saitama, Japan" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.289, + 0.548, + 0.302 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.309, + 0.768, + 0.435 + ], + "angle": 0, + "content": "Prevention of complete and dimensional collapse of representations has recently become a design principle for self-supervised learning (SSL). However, questions remain in our theoretical understanding: When do those collapses occur? What are the mechanisms and causes? We answer these questions by deriving and thoroughly analyzing an analytically tractable theory of SSL loss landscapes. In this theory, we identify the causes of the dimensional collapse and study the effect of normalization and bias. Finally, we leverage the interpretability afforded by the analytical theory to understand how dimensional collapse can be beneficial and what affects the robustness of SSL against data imbalance." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.452, + 0.338, + 0.467 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.476, + 0.827, + 0.616 + ], + "angle": 0, + "content": "Self-supervised learning (SSL) methods have achieved remarkable success in learning good representations without labeled data (Chen et al., 2020b). Loss functions used in such SSL techniques promote representational similarity between pairs of related samples while using explicit penalties (Chen et al., 2020a; He et al., 2020; Zbontar et al., 2021; Caron et al., 2020) or asymmetric dynamics (Caron et al., 2021; Grill et al., 2020; Chen and He, 2021) to ensure that the distance between unrelated samples remains large. In practice, however, SSL training often experiences the phenomenon of dimensional collapse (Jing et al., 2021; Tian et al., 2021; Pokle et al., 2022), where the learned representation spans a low dimensional subspace of the overall available space. In the extreme case, this failure mode instantiates as a complete collapse, where the learned representation becomes zero-rank, and no informative features can be extracted." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.622, + 0.825, + 0.734 + ], + "angle": 0, + "content": "Prior work has primarily positioned such collapses in SSL as enemies of learning, arguing that they can negatively impact downstream task performance (Zbontar et al., 2021; Jing et al., 2021; Bardes et al., 2021). However, recent work by Cosentino et al. (2022) empirically demonstrates otherwise: quality of representations can be improved when there is a degree of collapse. These conflicting results indicate that despite extensive empirical explorations, a gap remains in our understanding of the collapse phenomenon in SSL training. We argue that this gap is due to the lack of a theoretical framework to analyze the mechanisms promoting collapsed representations. We aim to close this gap by carefully studying the loss landscapes of SSL." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.74, + 0.825, + 0.825 + ], + "angle": 0, + "content": "In this work, we analytically solve the effective landscapes of linear models trained on several popular losses used in self-supervised learning, including InfoNCE (Oord et al., 2018), Normalized Temperature Cross-Entropy (NT-xent) (Chen et al., 2020a), Spectral Contrastive Loss (HaoChen et al., 2021), and Barlow Twins / VICReg (Zbontar et al., 2021; Bardes et al., 2021). The main thesis of this work is: the local geometry of the SSL landscapes around the origin crucially decides the learning behavior of SSL models. Technically, we show that" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.831, + 0.825, + 0.845 + ], + "angle": 0, + "content": "1. the interplay between data variation and data augmentation determines the geometry of the loss;" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.845, + 0.825, + 0.873 + ], + "angle": 0, + "content": "2. the geometry of the loss explains when dimensional collapse can be helpful and why certain SSL losses are robust against data imbalance, but not the others." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.831, + 0.825, + 0.873 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.88, + 0.825, + 0.909 + ], + "angle": 0, + "content": "To the best of our knowledge, our work is the first to study the landscape causes of collapse in SSL thoroughly." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.911, + 0.724, + 0.925 + ], + "angle": 0, + "content": "†Work done during an internship at Physics & Informatics Laboratories, NTT Research." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.09, + 0.325, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.196, + 0.306, + 0.21 + ], + "angle": 0, + "content": "(a) An eigenmode" + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.094, + 0.49, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.37, + 0.196, + 0.463, + 0.21 + ], + "angle": 0, + "content": "(b) No collapse" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.096, + 0.657, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.196, + 0.658, + 0.21 + ], + "angle": 0, + "content": "(c) Dimensional collapse" + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.097, + 0.822, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.682, + 0.196, + 0.815, + 0.21 + ], + "angle": 0, + "content": "(d) Complete collapse" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.215, + 0.827, + 0.332 + ], + "angle": 0, + "content": "Figure 1: Landscape in self-supervised learning (SSL). SSL losses generally depend only on the relative angle between pairs of network outputs (e.g., \\( f(x)^T f(x') \\)). Thus, the landscapes with a linear network (\\( f(x) = Wx \\)) have a global rotational symmetry and are symmetric about the origin. Our theory finds that the local stability at the origin decides the collapse, and larger data variation (green) prevents collapse, while strong data augmentation (red) can promote collapse. We plot the loss for a toy linear model with a diagonal weight matrix \\( diag(r_1, r_2) \\). (a) The 1d landscape when fixing one of the parameter. (b-d) The 2d landscape. (b) No collapse: the origin is an unstable local maximum, and surrounding local minima avoid collapse. The dimensionally collapsed solutions are the saddle points. (c) Dimensional collapse: the value of \\( w_1 \\) for all stable fixed points collapses to zero. (d) Complete collapse: the origin becomes the isolated local minimum." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.345, + 0.356, + 0.36 + ], + "angle": 0, + "content": "2 RELATED WORKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.485 + ], + "angle": 0, + "content": "SSL and Collapses. On the one hand, prior literature has often argued collapse as a harmful phenomenon that can deteriorate downstream task performance (Jing et al., 2021; Zbontar et al., 2021). Preventing such collapsed representations is a frequently discussed topic in literature (Hua et al., 2021; Jing et al., 2021; Pokle et al., 2022; Tian et al., 2021) and has motivated the design of several SSL techniques (Zbontar et al., 2021; Bardes et al., 2021; Ermolov et al., 2021). On the other hand, Cosentino et al. (2022) empirically showed that dimensional collapses under strong augmentations could significantly improve generalization performance. Our work demystifies these conflicting results by finding analytic solutions to loss landscapes of several standard SSL techniques." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.618 + ], + "angle": 0, + "content": "Theoretical Advances in SSL. Recently, several advances have been made towards understanding the success of SSL techniques from different perspectives: e.g., learning theory (Arora et al., 2019; Saunshi et al., 2022; Nozawa and Sato, 2021; Wei et al., 2021), information theory (Tsai et al., 2021a;b; Tosh et al., 2021), causality and data-generating processes (Zimmerman et al., 2021; Kugelgen et al., 2021; Trivedi et al., 2022; Tian et al., 2020; Mitrovic et al., 2020; Wang et al., 2022), dynamics (Wang and Isola, 2020; Tian et al., 2021; Tian, 2022; Wang and Liu, 2021; Simon et al., 2023), and loss landscapes (Pokle et al., 2022). These advances have unveiled practically useful properties of SSL, such as robustness to dataset imbalance (Liu et al., 2021) and principled solutions to avoid spurious correlations (Robinson et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.623, + 0.825, + 0.709 + ], + "angle": 0, + "content": "The work by Jing et al. (2021) is the closest to ours in problem setting. In that paper, the authors focused on studying the linearized learning dynamics and suggested that a competition between the feature signal strength and augmentation strength can lead to dimensional collapse. In contrast, our focus is on the landscape and our result implies that this feature-augmentation competition on its own is insufficient to cause a dimensional collapse. In fact, we show that there will be no collapse in the setting studied by Jing et al. (2021)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.725, + 0.691, + 0.74 + ], + "angle": 0, + "content": "3 A LANDSCAPE THEORY OF SELF-SUPERVISED-LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.797 + ], + "angle": 0, + "content": "This section presents the main theoretical results. Let \\(\\{\\hat{x}_i\\}_i^N\\) be a dataset with \\(N\\) data points. For every data point \\(\\hat{x}\\), we augment it with an i.i.d. noise \\(\\epsilon\\) such that \\(x \\coloneqq \\hat{x} + \\epsilon\\). To be concrete, we start with considering the standard contrastive loss, InfoNCE (Oord et al., 2018):" + }, + { + "type": "equation", + "bbox": [ + 0.231, + 0.804, + 0.825, + 0.842 + ], + "angle": 0, + "content": "\\[\nL = \\mathbb {E} _ {\\epsilon} \\left[ - \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp (- | f (x _ {i}) - f \\left(x _ {i} ^ {\\prime}\\right) | ^ {2} / 2)}{\\sum_ {j \\neq i} \\exp (- | f (x _ {i}) - f (\\chi_ {j}) | ^ {2} / 2) + \\exp (- | f (x _ {i}) - f \\left(x _ {i} ^ {\\prime}\\right) | ^ {2} / 2)} \\right], \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.85, + 0.827, + 0.926 + ], + "angle": 0, + "content": "where \\( f(x) \\in \\mathbb{R}^{d_1} \\) is the model output; all \\( x, x' \\) and \\( \\chi \\) are augmented data points for some independent additive noise \\( \\epsilon \\) such that \\( \\mathbb{E}_{\\epsilon}[x] = \\hat{x} = \\mathbb{E}_{\\epsilon}[x'] \\neq \\mathbb{E}_{\\epsilon}[\\chi] = \\hat{\\chi} \\). We decompose the model output into a general function \\( \\phi(x) \\in \\mathbb{R}^{d_0} \\) and the last-layer weight matrix \\( W \\in \\mathbb{R}^{d_1 \\times d_0} \\): \\( f(x) = W\\phi(x) \\). The covariance of \\( \\phi(\\hat{x}) \\) is \\( A_0 := \\mathbb{E}_{\\hat{x}}[\\phi(\\hat{x})\\phi(\\hat{x})^T] \\), and the covariance of the data-augmented penultimate layer representation is \\( \\Sigma := \\mathbb{E}_x[\\phi(x)\\phi(x)^T] \\). The effect of data augmentation on the learned" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.189 + ], + "angle": 0, + "content": "representation is captured through a symmetric matrix \\( C \\coloneqq \\Sigma - A_0 \\). For a general \\( \\phi \\), the eigenvalues of \\( C \\) can be either positive or negative. When \\( \\phi \\) is the identity mapping, \\( A_0 \\) becomes the empirical data covariance, \\( C \\) becomes positive semi-definite and is the covariance of the noise \\( \\epsilon \\), and \\( \\Sigma \\) is the covariance of the augmented data. In some sense, this loss function captures the essence of SSL: the numerator encourages the representation \\( f(x) \\) to be closer to the representation of similar data, and the denominator encourages a separation between dissimilar data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.633, + 0.209 + ], + "angle": 0, + "content": "For a fixed set of noises, we can write the InfoNCE in a cleaner form:" + }, + { + "type": "equation", + "bbox": [ + 0.276, + 0.215, + 0.825, + 0.248 + ], + "angle": 0, + "content": "\\[\nL _ {\\epsilon} = \\mathbb {E} _ {\\hat {x}} \\left\\{\\frac {1}{2} | f (x) - f \\left(x ^ {\\prime}\\right) | ^ {2} + \\log \\mathbb {E} _ {\\hat {\\chi}} \\left[ \\exp \\left(- \\frac {1}{2} | f (x) - f (\\chi) | ^ {2}\\right) \\right] \\right\\}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.252, + 0.589, + 0.268 + ], + "angle": 0, + "content": "where we used \\(\\mathbb{E}_{\\hat{x}}\\) to denote an averaging over the training set." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.273, + 0.495, + 0.44 + ], + "angle": 0, + "content": "In this notation, we have \\(\\mathbb{E}_{\\epsilon}\\mathbb{E}_{\\hat{x}}[x] = \\mathbb{E}_x[x]\\) and \\(\\mathbb{E}_{\\epsilon}[L_{\\epsilon}] = L\\). We first show that the expansion of the loss function around the origin takes a rather universal form. We then find analytical solutions to the stationary points of this landscape and study their relevance to feature learning and collapses. See Table 1 for a summary of the main results. The proofs are presented in Appendix E. For a quantitative understanding, we mainly focus on the case when \\(\\phi\\) is the identity function. We discuss the general nonlinear case in Section 4.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.453, + 0.456, + 0.468 + ], + "angle": 0, + "content": "3.1 LANDSCAPE OF A LINEAR MODEL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.476, + 0.495, + 0.532 + ], + "angle": 0, + "content": "We first analyze representative SSL loss functions and show that to leading order in \\( W \\), the local geometry of SSL losses takes the following form" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.536, + 0.493, + 0.567 + ], + "angle": 0, + "content": "\\[\nL = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right]. \\tag {3}\n\\]" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.275, + 0.828, + 0.394 + ], + "angle": 0, + "content": "
HessianDim.Complete
InfoNCEA0XX
NT-Xent (SimCLR)A0-C/N
Spectral ContrastiveCXX
Barlow TwinsA0+CXX
+ Normalization-X
+ bias-
+ Weight Decay+γI
" + }, + { + "type": "table_caption", + "bbox": [ + 0.503, + 0.399, + 0.826, + 0.565 + ], + "angle": 0, + "content": "Table 1: What shapes the SSL landscapes around the origin? For each of the SSL losses, the combination of data covariance \\((A_0)\\), data-augmentation covariance \\((C)\\), and dataset size \\((N)\\) can affect its stability and thus determine the presence \\((\\checkmark)\\) and absence \\((X)\\) of dimensional/complete collapse (Here, a \\(\\checkmark\\) means \"there exists a hyperparameter setting and data distribution such that the relevant collapse happens;\" see section 3). Beyond collapses, the theory implies that SCL, whose landscape is formed primarily by data augmentation, is more robust to data imbalance than InfoNCE, which is affected primarily by the data (see section 4)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.572, + 0.825, + 0.63 + ], + "angle": 0, + "content": "A distinctive feature of Eq. (3) is that its first and third-order terms vanish. This is because the loss function is invariant to a left rotation of \\( W \\). We will see that this symmetry in rotation is a crucial and general feature of the SSL loss functions that allow us to treat them in a universal way. We discuss how rotation symmetry can cause collapses in nonlinear settings in Section 4." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.635, + 0.452, + 0.65 + ], + "angle": 0, + "content": "InfoNCE. The loss function simplifies to:" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.656, + 0.826, + 0.706 + ], + "angle": 0, + "content": "\\[\nL = \\underbrace {\\operatorname {T r} \\left[ W C W ^ {T} \\right]} _ {E} + \\underbrace {\\mathbb {E} _ {\\epsilon , \\hat {x}} \\left\\{\\log \\mathbb {E} _ {\\hat {\\chi}} \\left[ \\exp \\left(- \\frac {1}{2} | W (x - \\chi) | ^ {2}\\right) \\right] \\right\\}} _ {- S}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.714, + 0.564, + 0.73 + ], + "angle": 0, + "content": "Expanding the entropy term to the fourth order, we obtain1" + }, + { + "type": "equation", + "bbox": [ + 0.279, + 0.736, + 0.826, + 0.767 + ], + "angle": 0, + "content": "\\[\n- S = - \\mathbb {E} _ {x} \\mathbb {E} _ {\\chi} \\left[ \\frac {1}{2} | W (x - \\chi) | ^ {2} \\right] + \\frac {1}{8} \\operatorname {V a r} [ | W (x - \\chi) | ^ {2} ] + O (\\| W \\| ^ {6}). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.825, + 0.829 + ], + "angle": 0, + "content": "This (perturbative) decomposition of entropy deserves some special attention. The entropy decomposes into a repulsion term that is second order in \\( W \\), and a variance term that is fourth order in \\( W \\). The first term encourages a repulsion between \\( x \\) and its augmentation, which counteracts the effect of the energy term. The repulsion term can be decomposed into" + }, + { + "type": "equation", + "bbox": [ + 0.309, + 0.836, + 0.825, + 0.868 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} _ {x} \\mathbb {E} _ {\\chi} \\left[ \\frac {1}{2} | W (x - \\chi) | ^ {2} \\right] = \\operatorname {T r} \\left[ W C W ^ {T} \\right] + \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right]. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.872, + 0.827, + 0.902 + ], + "angle": 0, + "content": "The first term encourages an expansion of \\( W \\) along the direction of the augmentation \\( C \\), while the second term encourages an expansion along the directions of feature \\( A_0 \\). It is intriguing to see" + }, + { + "type": "page_footnote", + "bbox": [ + 0.195, + 0.91, + 0.751, + 0.925 + ], + "angle": 0, + "content": "1 Throughout, we use \\( \\|\\cdot\\| \\) to denote the \\( L_{2} \\) norm for vectors and Frobenius norm for matrices." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.178 + ], + "angle": 0, + "content": "that the repulsion term dominates the attraction of the energy term: the motion along the direction of \\( C \\) completely cancels out, and only the expansion along \\( A_0 \\) remains. This means that to leading order, the learned representation has a larger variation along the directions where the data has a larger variation, which is what one naively expects. Collecting results, we have obtained the loss landscape in the neighborhood of the origin as \\( L = -\\mathrm{Tr}[WA_0W^T] + \\frac{1}{8}\\mathrm{Var}[|W(x - \\chi)|^2] + O(\\| W\\|^6) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.181, + 0.825, + 0.224 + ], + "angle": 0, + "content": "NT-xent (SimCLR). As an additional example, we analyze Normalized Temperature Cross-Entropy loss (NT-xent) used in SimCLR (Chen et al., 2020a). Tian (2022) shows that InfoNCE can be generalized to encompass NT-xent as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.223, + 0.229, + 0.825, + 0.268 + ], + "angle": 0, + "content": "\\[\nL = \\mathbb {E} _ {\\epsilon} \\left[ - \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp (- | f (x _ {i}) - f (x _ {i} ^ {\\prime}) | ^ {2} / 2)}{\\sum_ {\\chi \\neq x} \\exp (- | f (x _ {i}) - f (\\chi_ {j}) | ^ {2} / 2) + \\alpha \\exp (- | f (x _ {i}) - f (x _ {i} ^ {\\prime}) | ^ {2} / 2)} \\right]. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.825, + 0.327 + ], + "angle": 0, + "content": "In contrast to InfoNCE, here one of the terms in the denominator is reweighted by a factor of \\(\\alpha \\geq 0\\). Two interesting limits are \\(\\alpha = 1\\), where we recover the InfoNCE loss, and \\(\\alpha = 0\\), where we obtain NT-xent. For general \\(\\alpha\\), we refer to this loss as the weighted InfoNCE. We will see in section 3 that this weighted InfoNCE can have a mild dimensional collapse problem." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.333, + 0.525, + 0.349 + ], + "angle": 0, + "content": "The same perturbative expansion as Eq. (4)-(6) gives" + }, + { + "type": "equation", + "bbox": [ + 0.179, + 0.352, + 0.826, + 0.381 + ], + "angle": 0, + "content": "\\[\nL = \\frac {1 - \\alpha}{N} \\operatorname {T r} \\left[ W C W ^ {T} \\right] - \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right] + O \\left(\\left\\| W \\right\\| ^ {6}\\right) + O \\left(\\left\\| W \\right\\| ^ {4} N ^ {- 1}\\right). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.416 + ], + "angle": 0, + "content": "Now, the Hessian of the origin is no longer guaranteed to be negative definite. In fact, if \\(\\frac{1 - \\alpha}{N} C - A_0 \\geq 0\\), \\(W = 0\\) becomes an isolated local minimum." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.422, + 0.825, + 0.465 + ], + "angle": 0, + "content": "Landscape Analysis. The above discussion shows that the common loss landscapes in self-supervised contrastive learning can be reduced to an effective form in Eq. (3). The following proposition shows that the variance term of the loss takes a specific form when the data is Gaussian." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.467, + 0.825, + 0.499 + ], + "angle": 0, + "content": "Proposition 1. Let the data and noise be Gaussian. Then, \\( L = -\\mathrm{Tr}[W B W^T] + \\mathrm{Tr}[W\\Sigma W^T W\\Sigma W^T] \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.509, + 0.825, + 0.537 + ], + "angle": 0, + "content": "When the training ends, one expects the model to locate at (at least close to) a stationary point of the loss. It is thus important to identify all the stationary points of this loss function." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.54, + 0.827, + 0.603 + ], + "angle": 0, + "content": "Theorem 1. Let \\( d^{*} \\coloneqq \\min(d_{0}, d_{1}) \\). Let the data and noise be Gaussian. All stationary points \\( W \\) of Eq. (3) satisfy \\( W^{T}W = \\frac{1}{2}\\Sigma^{-1/2}UM\\Lambda U^{T}\\Sigma^{-1/2} \\), where \\( U\\Lambda U^{T} \\) is the eigenvalue decomposition of \\( \\Sigma^{-1/2}B\\Sigma^{-1/2} \\), and \\( M \\) is an arbitrary (masking) diagonal matrix containing only zero or one such that (1) \\( M_{ii} = 0 \\) if \\( \\Lambda_{ii} < 0 \\) and (2) contain at most \\( d^{*} \\) nonzero terms." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.609, + 0.592, + 0.625 + ], + "angle": 0, + "content": "Additionally, if \\( C \\) and \\( A_0 \\) commute, all stationary points satisfy" + }, + { + "type": "equation", + "bbox": [ + 0.416, + 0.628, + 0.825, + 0.657 + ], + "angle": 0, + "content": "\\[\nW ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} \\Sigma^ {- 1}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.661, + 0.706, + 0.677 + ], + "angle": 0, + "content": "where \\( B_M \\) denotes the matrix obtained by masking the eigenvalues of \\( B \\) with \\( M \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.687, + 0.825, + 0.798 + ], + "angle": 0, + "content": "This stationary-point condition implies the direct cause of the dimensional collapse. Namely, dimensional collapse happens when the eigenvalues of the matrix \\( B \\) become negative. The eigenvalues of \\( B \\), in turn, depend on the competition between data augmentation and the data feature. Comparing the commuting case with the noncommuting case, we see that the main difference is that when \\( C \\) does not commute with \\( A_0 \\), the augmentation can also change the orientation of the learned representation; otherwise, augmentation only affects the eigenvalues. To focus on the most important terms, we now assume that the augmentation is well-aligned with the features such that the augmentation covariance commute with the data covariance." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.802, + 0.538, + 0.817 + ], + "angle": 0, + "content": "Assumption 1. From now on, we assume \\(CA_0 = A_0C\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.826, + 0.926 + ], + "angle": 0, + "content": "For the case of weighted InfoNCE, we have that \\( B = A_0 - \\frac{1 - \\alpha}{N} C \\). Let \\( a_i \\) denote the \\( i \\)-th eigenvalue of the \\( A \\) and \\( c_i \\) that of \\( C \\) viewed in a predetermined order; then, the \\( i \\)th subspace collapses when \\( \\frac{1 - \\alpha}{N} c_i \\geq a_i \\), namely, when the variation introduced by the noise dominates that of the original data. Importantly, this collapse is a property shared by all stationary points of the landscape, and one cannot hope to fix the problem by, say, biasing the gradient descent towards a certain type of local minima. When weight decay is used, the condition for collapse becomes \\( \\frac{1 - \\alpha}{N} c_i + \\gamma \\geq a_i \\): it becomes easier to cause a collapse when weight decay is used." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "The global minimum of the loss function is also easy to find. For all stationary points, the loss function takes a simple form; \\( L = -\\frac{1}{4}\\mathrm{Tr}[\\Sigma^2 B_M B] \\). Thus, \\( L \\) becomes more and more negative if the eigenvalues of \\( B_M \\) align with the largest eigenvalues of \\( B \\). Namely, the global minimum is achieved if \\( M \\) leaves the largest eigenvalues of \\( B \\) intact." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.825, + 0.196 + ], + "angle": 0, + "content": "Because the stationary points contain collapsed solutions where the eigenvalues of \\( W^T W \\) are zero, one is naturally interested in how likely it is to converge to these solutions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.197, + 0.825, + 0.227 + ], + "angle": 0, + "content": "Proposition 2. \\((W^T W\\) achieves maximum possible rank) Let \\(m\\) denote the number of positive eigenvalues \\(B\\). Then, \\(\\mathrm{rank}(W^T W) = \\min(m, d^*)\\) for any local minimum." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.235, + 0.826, + 0.293 + ], + "angle": 0, + "content": "This proposition implies that the loss landscape of contrastive SSL (with a linear model) is rather benign because all local minima must achieve a maximum possible rank. In fact, this result implies that the collapses may be well controllable by carefully controlling and tuning the eigenvalues of the matrix \\( B \\), which directly depends on the nature of the data augmentation we use." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.304, + 0.468, + 0.318 + ], + "angle": 0, + "content": "3.2 LANDSCAPE WITH NORMALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.326, + 0.825, + 0.382 + ], + "angle": 0, + "content": "It is common in practice to normalize the learned representation such that \\( \\| f(x) \\|^2 = c \\). When normalization is applied, only the direction of the learned representation matters. While this is a simple trick in practice, its implication on the landscape is poorly understood. In this section, we extend our theory to analyze the effect of normalization." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.388, + 0.744, + 0.405 + ], + "angle": 0, + "content": "We model the effect of normalization as a regularization term: \\( R \\coloneqq (\\mathbb{E}_x\\| f(x)\\| ^2 -c)^2 \\)" + }, + { + "type": "equation", + "bbox": [ + 0.421, + 0.406, + 0.825, + 0.421 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {n o r m}} = E q. (3) + \\kappa R. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.422, + 0.826, + 0.603 + ], + "angle": 0, + "content": "Note that this regularization term achieves two things simultaneously: (1) \\( \\| f(x) \\|^2 = c \\) for all \\( x \\) is a minimizer of the loss function; (2) the regularization is invariant to any rotation of the learned representation. For a linear model, we note that this condition is not entirely the same as a direct normalization of the representation because it is generally impossible to achieve \\( \\| Wx \\|^2 = c \\) for all \\( x \\) because a linear model has limited expressivity. However, it is generally possible to achieve the slightly weaker condition: the representation has a norm 1 on average. This loss function can also be seen as a mathematical model of the VICReg loss (Bardes et al., 2021), where \\( R \\) effectively models the variance regularization term of VICReg loss and \\( \\kappa \\) is its strength. This modeling is necessary because the variance term of the original VICReg is not differentiable and thus cannot be expanded. The proposed term \\( R \\) captures the essence of the variance term because it also encourages the representation to have a constant variance. Our theory also explains why the VICReg is observed to experience collapses when \\( \\kappa \\) is not large enough. As \\( \\kappa \\) tends to infinity, this constraint will become perfectly satisfied. We thus take the infinite \\( \\kappa \\) limit to study the effect of normalization." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.609, + 0.748, + 0.625 + ], + "angle": 0, + "content": "The following proposition gives a condition that all stationary points of Eq. (10) satisfy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.625, + 0.825, + 0.685 + ], + "angle": 0, + "content": "Proposition 3. Let \\(\\rho(W) \\coloneqq \\operatorname{Tr}[W\\Sigma W^T]\\), \\(B' \\coloneqq B + 2\\kappa(c - \\rho)\\Sigma\\), and let \\(\\Lambda_i\\) be the eigenvalues of \\(B'\\). Then, every stationary point of Eq. (10) satisfy \\(W^T W = \\frac{1}{2}\\Sigma^{-1}B_M'\\Sigma^{-1}\\), where \\(M\\) is an arbitrary diagonal mask of the eigenvalues of \\(B'\\) containing only zero or one such that (1) \\(M_{ii} = 0\\) if \\(\\Lambda_i < 0\\) and (2) contain at most \\(d^*\\) nonzero terms." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.694, + 0.825, + 0.764 + ], + "angle": 0, + "content": "Compared with the unnormalized case, the term \\(2\\kappa (1 - \\rho)\\Sigma_{M}\\) emerges due to normalization. The effect of normalization is as expected: it shrinks the norm of the model if \\(\\rho > 1\\), and it expands the model if \\(\\rho < 1\\), and it does not have any effect if we have already achieved \\(\\rho = 1\\). Interestingly, this rescaling effect is anisotropic and stronger along the directions of larger eigenvalues of the covariance of the augmented data \\(\\Sigma\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.771, + 0.628, + 0.786 + ], + "angle": 0, + "content": "The next theorem gives the explicit form of \\(\\rho\\) at the stationary points." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.825, + 0.825 + ], + "angle": 0, + "content": "Proposition 4. For any stationary point \\( W^{*} \\), \\( c - \\rho(W^{*}) = \\frac{c - \\frac{1}{2}\\mathrm{Tr}[\\Sigma^{-1}B_{M}]}{1 + \\kappa d_{M}} \\), where \\( d_{M} \\) is the number of non-zero eigenvalues of \\( B_{M}' \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.876 + ], + "angle": 0, + "content": "For a finite \\(\\kappa\\), these results suggest that collapses can still happen. For VICReg, \\(B = -A_0\\), and the complete collapse can happen when \\(\\kappa \\ll \\| A_0\\| /c\\|\\Sigma\\|\\) - this explains the experimental observation of collapses for small values of \\(\\kappa\\) in VICReg loss (Bardes et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Lastly, to understand normalization, we are interested in the case of \\(\\kappa \\to \\infty\\). Combining Proposition 3 and 4, we have proved the following theorem, showing that the asymptotic solution converges to a form independent of \\(\\kappa\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.638, + 0.12 + ], + "angle": 0, + "content": "Theorem 2. Let \\( W_{\\kappa} \\) be a stationary point of Eq. (10) at fixed \\( \\kappa \\). Then," + }, + { + "type": "equation", + "bbox": [ + 0.307, + 0.125, + 0.825, + 0.162 + ], + "angle": 0, + "content": "\\[\n\\lim _ {\\kappa \\rightarrow \\infty} W _ {\\kappa} ^ {T} W _ {\\kappa} = \\frac {1}{2} \\Sigma^ {- 1} \\left[ B _ {M} + \\frac {2 c - \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{d _ {M}} \\Sigma_ {M} \\right] \\Sigma^ {- 1}. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.175, + 0.825, + 0.209 + ], + "angle": 0, + "content": "The correction term \\(\\frac{2c - \\mathrm{Tr}[\\Sigma B_M]}{d_0}\\Sigma_M\\) emerges as a result of applying normalization. The effect can be easier to understand if we write the solution as" + }, + { + "type": "equation", + "bbox": [ + 0.323, + 0.213, + 0.825, + 0.25 + ], + "angle": 0, + "content": "\\[\nW ^ {T} W = \\frac {1}{2} \\left[ \\Sigma^ {- 1} B _ {M} - \\frac {\\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{d _ {M}} M + \\frac {2 c}{d _ {M}} \\right] \\Sigma^ {- 1}, \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.256, + 0.825, + 0.301 + ], + "angle": 0, + "content": "where we have used the relation \\(\\Sigma_{M}\\Sigma^{-1} = M\\). Note the term in brackets: it subtracts the average eigenvalue of \\(\\Sigma^{-1}B_M\\) from \\(\\Sigma^{-1}B_M\\) and shifts the remaining eigenvalues positively by \\(2c / d_{M}\\). Because the eigenvalues of \\(WW^{T}\\) must be positive, the following condition must hold for all solutions:" + }, + { + "type": "equation", + "bbox": [ + 0.442, + 0.306, + 0.825, + 0.323 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {i} + 2 c / d _ {M} > \\bar {\\lambda}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.329, + 0.827, + 0.457 + ], + "angle": 0, + "content": "where \\(\\lambda_{i}\\) are the eigenvalues of \\(\\Sigma^{-1}B_M\\) and \\(\\bar{\\lambda}\\) is its average. Namely, for the \\(i\\)-th dimension not to collapse, it must be smaller than the average eigenvalues by at most \\(2c / d_{M}\\). Any smaller eigenvalues must collapse. Compared to the case without normalization, normalization makes collapses dependent on the relative strength of each feature and augmentation. In the following discussion, we let \\(c = 1\\) to simplify the discussion. We present a detailed analysis of this condition in Section D.1. One finds that the condition for collapse becomes heavily dependent on the data structure, and there are cases where collapses become harder, and there are cases where collapses become much easier. Importantly, it also becomes the case that a sufficiently strong augmentation can always cause a collapse in the corresponding subspace." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.461, + 0.825, + 0.56 + ], + "angle": 0, + "content": "Effect of Bias. Lastly, we study the effect of explicitly having a bias term: \\( Wx \\rightarrow Wx + b \\). First of all, when there is no normalization, the bias term does not affect the solution because the loss landscape is invariant to a translation in the learned representation. However, this effect dramatically changes if we apply normalization at the same time. This is because normalization removes the translation symmetry of the effective loss, and the trivial solution \\( W = 0 \\), \\( b = 1 \\) becomes the simplest way to achieve the norm-1 constraint. Our result shows that the addition of bias dramatically affects the stationary points." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.563, + 0.825, + 0.595 + ], + "angle": 0, + "content": "Theorem 3. Let \\( f(x) = Wx + b \\) and \\( \\mathbb{E}[x] = 0 \\). Then, all stationary points \\( W \\) satisfy Eq. (9), subject to the constraint that \\( \\mathrm{Tr}[W^T\\Sigma W]\\leq c \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.605, + 0.826, + 0.744 + ], + "angle": 0, + "content": "Namely, the solution reverts to the case where there is no normalization at all, except that the norm of the solution can no longer be larger than \\( c \\). This upper bound can make collapses much easier to happen. For example, if \\( c < (a_i - c_i) / (a_i + c_i) \\) for all \\( i \\), a complete collapse can happen despite normalization. When \\( c = 1 \\) and \\( c_i \\ll a_i \\), \\( \\rho \\approx d_M / 2 \\) and the constraint indicates that \\( d_M \\leq 2 \\): when the augmentation is very weak, there are at most 2 nontrivial subspaces. This is too restrictive for learning a meaningful representation, which helps us understand why dimensional collapse can harm learning in practice. The fact that simple normalization cannot prevent collapse has been noticed for a while for the simplest case of a cosine-similarity loss, and our result explains why previous works have tried to introduce asymmetry to cosine similarity to avoid collapses (Grill et al., 2020; Chen and He, 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.751, + 0.825, + 0.794 + ], + "angle": 0, + "content": "Relevant Loss Functions. Having developed a framework for understanding normalization, we show that other common loss functions in SSL can also be written in the form given in Eq. (3). The spectral contrastive loss (SCL) (HaoChen et al., 2021) reads" + }, + { + "type": "equation", + "bbox": [ + 0.235, + 0.799, + 0.825, + 0.819 + ], + "angle": 0, + "content": "\\[\nL _ {S C L} = - 2 \\mathbb {E} [ f (x) ^ {T} f (x ^ {\\prime}) ] + \\mathbb {E} [ (f (x) ^ {T} f (\\chi)) ^ {2} ] + c o n s t. \\quad \\text {s . t .} \\| f (x) \\| ^ {2} = 1. \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.824, + 0.825, + 0.852 + ], + "angle": 0, + "content": "Let \\( f(x) = Wx \\) be linear, the distributions are zero-mean Gaussian, and ignore the normalization. This loss function becomes" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.858, + 0.825, + 0.877 + ], + "angle": 0, + "content": "\\[\nL _ {S C L} = - 2 \\operatorname {T r} \\left[ W C W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right]. \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.883, + 0.825, + 0.926 + ], + "angle": 0, + "content": "When normalization exists, we can apply the result in Section 3.2. By our argument, there is no collapse in this loss function. The difference with InfoNCE loss is that the learned feature spreads along the directions of the augmentation \\( C \\), not along the directions of the feature \\( A_0 \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.109, + 0.331, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.198, + 0.328, + 0.211 + ], + "angle": 0, + "content": "(a) Landscape of ResNet" + }, + { + "type": "image", + "bbox": [ + 0.344, + 0.101, + 0.498, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.198, + 0.466, + 0.211 + ], + "angle": 0, + "content": "(b) No collapse" + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.101, + 0.662, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.511, + 0.198, + 0.659, + 0.212 + ], + "angle": 0, + "content": "(c) Dimensional collapse" + }, + { + "type": "image", + "bbox": [ + 0.674, + 0.101, + 0.825, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.683, + 0.198, + 0.815, + 0.211 + ], + "angle": 0, + "content": "(d) Complete collapse" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.221, + 0.331, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.189, + 0.308, + 0.317, + 0.321 + ], + "angle": 0, + "content": "(e) Landscape of ViT" + }, + { + "type": "image", + "bbox": [ + 0.344, + 0.212, + 0.495, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.374, + 0.308, + 0.464, + 0.321 + ], + "angle": 0, + "content": "(f) No collapse" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.212, + 0.658, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.308, + 0.657, + 0.322 + ], + "angle": 0, + "content": "(g) Dimensional collapse" + }, + { + "type": "image", + "bbox": [ + 0.67, + 0.212, + 0.821, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.678, + 0.308, + 0.81, + 0.321 + ], + "angle": 0, + "content": "(h) Complete collapse" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.327, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Figure 2: Landscape of Resnet18 (upper) and vision transformers (lower) on CIFAR10 with SimCLR qualitatively agrees with our linear theory. (a) Training objective \\( L \\) as a function of a rescaling of the last layer \\( W \\rightarrow aW \\). (b-d) \\( L \\) as a function of a \\( 2d \\) rescaling of the last layer where the data augmentation strength is (b) small, (c) intermediate, and (d) strong. Red indicates areas of high loss, blue indicates areas of low loss, and stars locate local minima. The use of data augmentation changes the stability of the origin, a qualitative change that leads to different types of collapses in qualitative agreement with our linear theory (cf. Figure 1). Additionally, we also notice the same qualitative changes of landscape in simpler nonlinear models (see Appendix A). (e-h) are the same setting but for ViT." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.825, + 0.515 + ], + "angle": 0, + "content": "The case of Barlow Twin (BT) (Zbontar et al., 2021) is similar. While the fourth-order term of BT is much more complicated due to the imbalance created by the \\(\\lambda\\) term. The second-order term can be identified easily: \\(L_{BT} = -2\\mathrm{Tr}[W\\Sigma W^T] + O(||W||^4)\\). This also does not collapse. A difference between the SCL loss and InfoNCE is that the learned representation has a spread that aligns with the combination of the feature and the augmentation strength." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.531, + 0.328, + 0.545 + ], + "angle": 0, + "content": "4 IMPLICATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.559, + 0.825, + 0.589 + ], + "angle": 0, + "content": "In this section, we explore some theoretical and practical implications of our results. In Appendix Section A, we also present numerical simulations that directly validate the predictions of the theory." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.6, + 0.479, + 0.613 + ], + "angle": 0, + "content": "4.1 RELEVANCE TO NONLINEAR MODELS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.623, + 0.825, + 0.75 + ], + "angle": 0, + "content": "An important question is how much of the analysis is relevant for deep nonlinear models in general. In fact, the loss landscape we have studied is quite close to the most general landscape one can have. Let \\( L(f(x)) \\) be a general SSL loss function for data point \\( x \\). The quality of the learned representation should be independent of the population-level orientation of the representation. Therefore, the loss function should satisfy a rotational invariance. Namely, for any rotation matrix \\( R \\), \\( L(x) = L(Rf(x)) \\); this rotational invariance implies that the loss should expand as \\( L(f(x)) = af(x)^T f(x) + b[f(x)^T f(x)]^2 + O(f(x)^6) \\). Note that all the odd-order terms of \\( f(x) \\) vanish due to the rotational symmetry. Substituting \\( f(x) = W\\phi(x) \\) in the loss function, we obtain a very general form of landscape that \\( W \\) obeys:" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.759, + 0.825, + 0.779 + ], + "angle": 0, + "content": "\\[\nL (W, \\phi) = \\operatorname {T r} \\left[ W ^ {T} W A \\right] + \\sum W _ {i m} W _ {j m} W _ {k n} W _ {l n} Z _ {i j k i}, \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "where \\(A\\) and \\(Z\\) are dependent on \\(\\phi\\). Note how all the examples we have studied take this form. For \\(W\\), its collapse entirely depends on the stability of the matrix \\(A\\). Thus the study of the stability of the matrix \\(A\\) becomes crucial for our understanding. To illustrate, we train a Resnet18 on CIFAR10 with the SimCLR loss with normalization and with weight decay strength \\(10^{-3}\\) until convergence to obtain the converged weights \\(W^{*}\\). The representation has a dimension 128. We rescale the weight matrix of the last layer \\(W_{\\mathrm{last}}^{*}\\) by a factor \\(a\\) and compute the loss as a function of \\(a\\). See Figure 2-a. We then partition the singular values of \\(W_{\\mathrm{last}}^{*}\\) into the larger half and the smaller half. We rescale the larger half by a factor \\(r_1\\) and the smaller half by \\(r_2\\). We plot the loss as a \\(2d\\) function of \\((r_1, r_2)\\) in Figure 2. We also perform experiments for vision transformers (ViT) in the lower row (Dosovitskiy et al., 2020). In all cases, the landscape features qualitative changes comparable to those in Figure 1." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.317 + ], + "angle": 0, + "content": "A connection to Landau theory in physics. Those familiar with statistical physics should note that the proposed theory is analogous to the Landau theory of second-order phase transitions. When treating the loss function as the free energy, the square root of the eigenvalues \\(\\sqrt{\\lambda}\\) of \\(W^T W\\) are the order parameters of the system, and the phase transitions happen when \\(\\lambda\\) turns from 0 to positive. These transitions (collapses) happen because of symmetry breaking (Landau and Lifshitz, 2013): the loss function (2) is symmetric in the sign of \\(W\\). Yet, for any nontrivial learning, \\(W\\) must be nonzero; thus, a symmetry breaking of the sign of \\(W\\) needs to happen for learning. The recent work by Ziyin and Ueda (2022) suggested how symmetry breaking around the origin and Landau theory could explain various types of collapses in deep learning. Therefore, the dimensional collapse could be related to neural collapses in supervised learning (Papyan et al., 2020; Ziyin et al., 2022a) and posterior collapse in Bayesian deep learning (Wang and Ziyin, 2022). Because second-order phase transitions should come with the divergence of the correlation function, one might also wonder what is \"divergent\" in the SSL problem. Here, the learning time scale for the collapsing dimension is divergent at the critical point because the second-order term vanishes in this direction, and so the dynamics are effectively frozen along this direction." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.328, + 0.496, + 0.342 + ], + "angle": 0, + "content": "4.2 ROBUSTLY INDUCING GOOD COLLAPSES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.35, + 0.581, + 0.463 + ], + "angle": 0, + "content": "Contrary to previous works, a recent work (Cosentino et al., 2022) has suggested that dimensional collapse can be beneficial and significantly improve the generalization performance of the model. This observation raises a question. How can dimensional collapse be beneficial and how can it be induced? In the following, we first introduce \\(\\beta\\)-InfoNCE, which can adjust the degree of dimensional collapse, and analyze the collapse behavior to elucidate the mechanism of task-alligned collapse." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.468, + 0.584, + 0.65 + ], + "angle": 0, + "content": "Adjusting the degree of dimensional collapse with \\(\\beta\\)-InfoNCE. Despite the potential benefit, existing SSL loss functions cannot robustly induce dimensional collapse. InfoNCE is insufficient to induce a collapse, and the collapse induced by SimCLR depends on a vanishingly small parameter \\(1/N\\). One thus wonders whether there is a loss function that allows us to induce collapsing behavior in a more predictable manner so that one might controllably extract some benefits from collapse. Our result suggests that one way to directly control collapses is through the strength of the competition for the model Hessian at the origin. For InfoNCE, one way to achieve this is to weigh the entropy term by a general factor \\(\\beta\\):" + }, + { + "type": "equation", + "bbox": [ + 0.171, + 0.655, + 0.589, + 0.687 + ], + "angle": 0, + "content": "\\[\n\\left. \\right. \\mathbb {E} _ {x} \\left\\{\\frac {1}{2} | f (x) - f \\left(x ^ {\\prime}\\right) | ^ {2} + \\beta \\log \\mathbb {E} _ {\\chi} \\left[ \\exp \\left(- \\frac {1}{2} | f (x) - f (\\chi) | ^ {2}\\right)\\right]\\right\\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.69, + 0.584, + 0.731 + ], + "angle": 0, + "content": "Due to its similarity with the \\(\\beta\\)-VAE in Bayesian learning, we call it the \\(\\beta\\)-InfoNCE. The leading term in the loss function becomes" + }, + { + "type": "equation", + "bbox": [ + 0.274, + 0.731, + 0.482, + 0.748 + ], + "angle": 0, + "content": "\\[\n- \\operatorname {T r} \\left[ W \\left(A _ {0} - (1 - \\beta) C\\right) W ^ {T} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.584, + 0.778 + ], + "angle": 0, + "content": "When \\(1 - \\beta > 0\\), the augmentations \\(C\\) pull the representation towards zero. When the augmentation is as strong as the fea-" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.779, + 0.825, + 0.863 + ], + "angle": 0, + "content": "ture variations, a collapse happens. One can thus introduce collapse by setting \\(\\beta\\) to be sufficiently small. When \\(1 - \\beta < 0\\), the augmentations push the weights away from the origin along its direction, resulting in no collapse at all: When one really wants to avoid collapse, one can use a rather large \\(\\beta\\); \\(\\beta = 1\\) is thus at the boundary of this bifurcating behavior. We note that existing loss functions often do not have a parameter that is directly controlling the collapse behavior (see Table 1). The \\(\\beta\\) parameter here directly controls the level of difficulty of collapse." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Achieving invariance with dimensional collapse. Here, we closely study an illustrative minimal example to demonstrate how collapses can be beneficial. Consider the following structured data generating process where the input features can be separated into two sets: (1) a task-relevant set with dimension \\( d_{c} < d_{0} \\) and (2) a task-irrelevant set: \\( x = (x_{1},\\dots,x_{d_{c}},\\dots,x_{d_{0}}) \\). Our result suggests" + }, + { + "type": "image", + "bbox": [ + 0.596, + 0.33, + 0.825, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.596, + 0.477, + 0.822, + 0.612 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.613, + 0.826, + 0.765 + ], + "angle": 0, + "content": "Figure 3: Top: Phase diagram of representational collapses. Bottom: \\(\\beta\\)-InfoNCE with \\(\\beta = 0.5\\). The generalization error of a downstream regression task where the data augmentation (1) is isotropic and noninformative or (2) aligns with the style. We see that the performance worsens as collapses happen for the noninformative augmentation and improves as the collapse happens for the style-targeting augmentation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "a precise way to remove the irrelevant features from the learned representation. For the purpose of causing a robust collapse, we use the \\(\\beta\\)-InfoNCE with \\(\\beta = 1/2\\). For illustration, we consider the simple case \\(d_c = 1\\) and \\(d_0 = 2\\). For any input \\(x = (x_1, x_2)\\), the label is generated as a linear function of \\(x_1\\): \\(y = cx_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.166, + 0.827, + 0.336 + ], + "angle": 0, + "content": "Correspondingly, we consider a structured data augmentation \\( x = \\hat{x} + \\sigma R\\xi \\), where \\( R \\in \\mathbb{R}^{d_0 \\times d_0} \\) is \\( R = \\text{diag}(\\sqrt{1 - \\theta}, \\sqrt{\\theta}) \\), where \\( \\theta \\in [0,1] \\). The parameter \\( \\sigma \\) controls the overall strength of the augmentation, and \\( \\theta \\) controls the orientation of the strength. When \\( \\theta = 0.5 \\), we have an uninformative isotropic noise that has often been used in practice. When \\( \\theta = 1 \\), the augmentation is only on the task-irrelevant feature, and when \\( \\theta = 0 \\), the augmentation is only on the content. Since the prediction target only depends on the content, we want to learn a representation invariant to the style. For the downstream regression task, we use the learned representations \\( z := f(\\hat{x}) \\) to train a ridge linear regressor that minimizes \\( \\min_G \\mathbb{E}_{\\hat{x}}[||Gz - y(\\hat{x})||^2] + 0.001||G||^2 \\). See Figure 3. The top panel shows the phase diagram of this problem with different combinations of the augmentation strengths and orientations. The bottom panel shows that collapses introduce phase-transition-like behaviors in the generalization performance and that a data augmentation aligning with the task-irrelevant dimension improves performance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.351, + 0.465, + 0.365 + ], + "angle": 0, + "content": "4.3 ROBUSTNESS TO DATA IMBALANCE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.375, + 0.486, + 0.584 + ], + "angle": 0, + "content": "Our theory is not only relevant for understanding collapses but can also be used to understand how an SSL model encodes the feature. Liu et al. (2021) recently showed that compared with supervised learning, SSL techniques are relatively more robust to imbalanced datasets that have disproportionately represented minority subgroups. As another application of our analysis, we illustrate the robustness of different techniques is not equal. As we have seen, the learned model \\( W^T W \\) has eigenvalues that, to the leading order, are proportional to the Hessian \\( B \\), which is different for each loss function. As previously summarized in Table 1, for InfoNCE and SimCLR, the learned model aligns" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.336, + 0.825, + 0.485 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.494, + 0.49, + 0.825, + 0.58 + ], + "angle": 0, + "content": "Figure 4: Spectral Contrastive loss (SCL) is more robust against data imbalance than InfoNCE. We train SimCLR and SCL ResNet-12 models on imbalanced versions of CIFAR-10. We see that SCL is more robust than SimCLR, as suggested by our theory. These results are especially pronounced when there is no projector head." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.584, + 0.825, + 0.655 + ], + "angle": 0, + "content": "with the eigenvalues of the data covariance \\( A_0 \\), which varies hugely as different classes of a dataset become more and more imbalanced. In comparison, the model trained with SCL aligns purely with the augmentation covariance \\( C \\), which is independent of the data imbalance. This suggests that the SCL landscape can be less dependent on data and thus more robust against data imbalance. See Figure 4. More experimental details are given in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.667, + 0.321, + 0.683 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.695, + 0.827, + 0.835 + ], + "angle": 0, + "content": "In this work, we approached the problem of collapses in SSL from a loss landscape perspective. We analytically solved an effective landscape that can be extended to understand the effect of normalization. Our result suggests that dimensional collapse can be well understood in the minimal setting and is something neutral to learning on its own. With the help from the theory, we also showed that when task-irrelevant dimensions are targeted, dimensional collapse can result in improved performance, whereas an uninformative noise will (without good luck) leads to collapses in the dimensions that are relevant to the task. It is thus important for practitioners to devise targeted data augmentation mechanisms that incorporate the correct domain knowledge. Also, we advocated the thesis that the local geometry of the loss landscape around the origin is an essential component for understanding collapses, and this should invite more future work to understand the landscape around the origin." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.827, + 0.926 + ], + "angle": 0, + "content": "The limitation of our work is clear; our result only identifies the causes of the collapse that can be directly attributed to the low-rank structure of the local minima of the landscape. One possible alternative cause of the collapse is dynamics. For example, having a large learning rate and small batch can sometimes cause a convergence towards the saddle points in the landscape (Ziyin et al., 2022b), which, as we have shown, are the collapsed solutions. Investigating the role of dynamics in the collapse is thus a crucial future problem." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.37, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.133, + 0.827, + 0.19 + ], + "angle": 0, + "content": "This work was supported by a KAKENHI Grant No. JP18H01145 from the Japan Society for the Promotion of Science. Ziyin has been financially supported by the JSPS fellowship and thanks Zihan for the generous help during the writing of this paper. ESL was partially supported via NSF under the award CNS-2008151." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.211, + 0.289, + 0.226 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.234, + 0.826, + 0.277 + ], + "angle": 0, + "content": "Arora, S., Khandeparkar, H., Khodak, M., Plevrakis, O., and Saunshi, N. (2019). A Theoretical Analysis of Contrastive Unsupervised Representation Learning. In Proc. Int. Conf. on Machine Learning (ICML)." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.285, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Bardes, A., Ponce, J., and LeCun, Y. (2021). Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.323, + 0.825, + 0.365 + ], + "angle": 0, + "content": "Caron, M., Misra, I., Mairal, J., Goyal, P., Bojanowski, P., and Joulin, A. (2020). Unsupervised Learning of Visual Features by Contrasting Cluster Assignments. In Proc. Adv. on Neural Information Processing Systems (NeurIPS)." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.374, + 0.824, + 0.403 + ], + "angle": 0, + "content": "Caron, M., Touvron, H., Misra, I., Jegou, H., Mairal, J., Bojanowski, P., and Joulin, A. (2021). Emerging Properties in Self-Supervised Vision Transformer. arXiv, abs/2104.14294." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.412, + 0.825, + 0.453 + ], + "angle": 0, + "content": "Chen, T., Kornblith, S., Norouzi, M., and Hinton, G. (2020a). A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.462, + 0.825, + 0.504 + ], + "angle": 0, + "content": "Chen, T., Kornblith, S., Swersky, K., Norouzi, M., and Hinton, G. E. (2020b). Big Self-Supervised Models are Strong Semi-Supervised Learners. Adv. in Neural Information Processing Systems, 33." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.514, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Chen, X. and He, K. (2021). Exploring Simple Siamese Representation Learning. In Proc. Int. Conf. on Computer Vision and Pattern Recognition (CVPR)." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.551, + 0.825, + 0.594 + ], + "angle": 0, + "content": "Cosentino, R., Sengupta, A., Avestimehr, S., Soltanolkotabi, M., Ortega, A., Willke, T., and Tepper, M. (2022). Toward a geometrical understanding of self-supervised contrastive learning. arXiv preprint arXiv:2205.06926." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.603, + 0.825, + 0.631 + ], + "angle": 0, + "content": "Dayan, P. and Abbott, L. F. (2005). Theoretical neuroscience: computational and mathematical modeling of neural systems. MIT press." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.64, + 0.825, + 0.682 + ], + "angle": 0, + "content": "Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al. (2020). An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.691, + 0.825, + 0.72 + ], + "angle": 0, + "content": "Ermolov, A., Siarohin, A., Sangineto, E., and Sebe, N. (2021). Whitening for self-supervised representation learning. In International Conference on Machine Learning, pages 3015-3024. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.728, + 0.825, + 0.785 + ], + "angle": 0, + "content": "Grill, J.-B., Strub, F., Altché, F., Tallec, C., Richemond, P., Buchatskaya, E., Doersch, C., Avila Pires, B., Guo, Z., Gheshlaghi Azar, M., Piot, B., kavukcuoglu, k., Munos, R., and Valko, M. (2020). Bootstrap your own latent: A new approach to self-supervised Learning. In Proc. Adv. on Neural Information Processing Systems (NeurIPS)." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.793, + 0.825, + 0.835 + ], + "angle": 0, + "content": "HaoChen, J. Z., Wei, C., Gaidon, A., and Ma, T. (2021). Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34:5000-5011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.825, + 0.887 + ], + "angle": 0, + "content": "He, K., Fan, H., Wu, Y., Xie, S., and Girschick, R. (2020). Momentum Contrast for Unsupervised Visual Representation Learning. In Proc. Int. Conf. on Computer Vision and Pattern Recognition (CVPR)." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.896, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Hsu, H., Qi, H., and Brown, M. (2019). Measuring the Effects of Non-Identical Data Distribution for Federated Visual Classification. arXiv, abs/1909.06335." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.234, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Hua, T., Wang, W., Xue, Z., Ren, S., Wang, Y., and Zhao, H. (2021). On feature decorrelation in self-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9598-9608." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.825, + 0.187 + ], + "angle": 0, + "content": "Jing, L., Vincent, P., LeCun, Y., and Tian, Y. (2021). Understanding dimensional collapse in contrastive self-supervised learning. arXiv preprint arXiv:2110.09348." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.197, + 0.825, + 0.239 + ], + "angle": 0, + "content": "Kugelgen, J., Sharma, Y., Gresle, L., Brendel, W., Scholkopf, B., Besserve, M., and Locatello, F. (2021). Self-Supervised Learning with Data Augmentations Provably Isolates Content from Style. arXiv, abs/2106.04619." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.25, + 0.777, + 0.265 + ], + "angle": 0, + "content": "Landau, L. D. and Lifshitz, E. M. (2013). Statistical Physics: Volume 5, volume 5. Elsevier." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.275, + 0.825, + 0.305 + ], + "angle": 0, + "content": "Liu, H., HaoChen, J. Z., Gaidon, A., and Ma, T. (2021). Self-supervised learning is more robust to dataset imbalance. International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.315, + 0.825, + 0.344 + ], + "angle": 0, + "content": "Mitrovic, J., McWilliams, B., Walker, J., Buesing, L., and Blundell, C. (2020). Representation learning via invariant causal mechanisms. arXiv preprint arXiv:2010.07922." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.354, + 0.825, + 0.396 + ], + "angle": 0, + "content": "Nozawa, K. and Sato, I. (2021). Understanding negative samples in instance discriminative self-supervised representation learning. Advances in Neural Information Processing Systems, 34:5784-5797." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.407, + 0.825, + 0.437 + ], + "angle": 0, + "content": "Oord, A. v. d., Li, Y., and Vinyals, O. (2018). Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.447, + 0.825, + 0.489 + ], + "angle": 0, + "content": "Papyan, V., Han, X., and Donoho, D. L. (2020). Prevalence of neural collapse during the terminal phase of deep learning training. Proceedings of the National Academy of Sciences, 117(40):24652-24663." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.5, + 0.825, + 0.529 + ], + "angle": 0, + "content": "Pokle, A., Tian, J., Li, Y., and Risteski, A. (2022). Contrasting the landscape of contrastive and non-contrastive learning. arXiv preprint arXiv:2203.15702." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.539, + 0.825, + 0.581 + ], + "angle": 0, + "content": "Robinson, J., Sun, L., Yu, K., Batmanghelich, K., Jegelka, S., and Sra, S. (2021). Can contrastive learning avoid shortcut solutions? Advances in neural information processing systems, 34:4974-4986." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.592, + 0.825, + 0.636 + ], + "angle": 0, + "content": "Saunshi, N., Ash, J., Goel, S., Misra, D., Zhang, C., Arora, S., Kakade, S., and Krishnamurthy, A. (2022). Understanding Contrastive Learning Requires Incorporating Inductive Biases. In Proc. Int. Conf. on Machine Learning (ICML)." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.646, + 0.825, + 0.675 + ], + "angle": 0, + "content": "Simon, J. B., Knutins, M., Ziyin, L., Geisz, D., Fetterman, A. J., and Albrecht, J. (2023). On the stepwise nature of self-supervised learning. arXiv preprint arXiv:2303.15438." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.685, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Tian, Y. (2022). Deep contrastive learning is provably (almost) principal component analysis. arXiv preprint arXiv:2201.12680." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.724, + 0.825, + 0.753 + ], + "angle": 0, + "content": "Tian, Y., Chen, X., and Ganguli, S. (2021). Understanding self-supervised Learning Dynamics without Contrastive Pairs. In Proc. Int. Conf. on Machine Learning (ICML)." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.764, + 0.825, + 0.806 + ], + "angle": 0, + "content": "Tian, Y., Sun, C., Poole, B., Krishnan, D., Schmid, C., and Isola, P. (2020). What makes for good views for contrastive learning? Advances in Neural Information Processing Systems, 33:6827-6839." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.817, + 0.825, + 0.846 + ], + "angle": 0, + "content": "Tosh, C., Krishnamurthy, A., and Hsu, D. (2021). Contrastive estimation reveals topic posterior information to linear models. J. Mach. Learn. Res., 22:281-1." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.856, + 0.825, + 0.886 + ], + "angle": 0, + "content": "Trivedi, P., Lubana, E. S., Heimann, M., Koutra, D., and Thiagarajan, J. J. (2022). Analyzing data-centric properties for contrastive learning on graphs. arXiv preprint arXiv:2208.02810." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Tsai, Y.-H., Wu, Y., Salakhutdinov, R., and Morency, L.-P. (2021a). Self-supervised Learning from a Multi-view Perspective. In Proc. Int. Conf. on Learning Representations (ICLR)." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Tsai, Y.-H. H., Ma, M. Q., Yang, M., Zhao, H., Morency, L.-P., and Salakhutdinov, R. (2021b). Self-supervised representation learning with relative predictive coding. International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.825, + 0.186 + ], + "angle": 0, + "content": "Wang, F. and Liu, H. (2021). Understanding the behaviour of contrastive loss. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2495-2504." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.193, + 0.825, + 0.224 + ], + "angle": 0, + "content": "Wang, T. and Isola, P. (2020). Understanding Contrastive Representation Learning through Alignment and Uniformity on the Hypersphere. In Proc. Int. Conf. on Machine Learning (ICML)." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.231, + 0.825, + 0.274 + ], + "angle": 0, + "content": "Wang, Y., Zhang, Q., Wang, Y., Yang, J., and Lin, Z. (2022). Chaos is a ladder: A new theoretical understanding of contrastive learning via augmentation overlap. International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.282, + 0.825, + 0.325 + ], + "angle": 0, + "content": "Wang, Z. and Ziyin, L. (2022). Posterior collapse of a linear latent variable model. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.334, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Wei, C., Shen, K., Chen, Y., and Ma, T. (2021). Theoretical Analysis of Self-Training with Deep Networks on Unlabeled Data. In Proc. Int. Conf. on Learning Representations (ICLR)." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.371, + 0.825, + 0.414 + ], + "angle": 0, + "content": "Zbontar, J., Jing, L., Misra, I., LeCun, Y., and Deny, S. (2021). Barlow twins: Self-supervised learning via redundancy reduction. In International Conference on Machine Learning, pages 12310-12320. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.422, + 0.825, + 0.453 + ], + "angle": 0, + "content": "Zimmerman, R., Sharma, Y., Schneider, S., Bethge, M., and Brendel, W. (2021). Contrastive Learning Inverts the Data Generating Process. In Proc. Int. Conf. on Machine Learning (ICML)." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.461, + 0.825, + 0.504 + ], + "angle": 0, + "content": "Ziyin, L., Li, B., and Meng, X. (2022a). Exact solutions of a deep linear network. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.512, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Ziyin, L., Li, B., Simon, J. B., and Ueda, M. (2022b). SGD can converge to local maxima. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.55, + 0.825, + 0.579 + ], + "angle": 0, + "content": "Ziyin, L. and Ueda, M. (2022). Exact phase transitions in deep learning. arXiv preprint arXiv:2205.12510." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.826, + 0.579 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.513, + 0.119 + ], + "angle": 0, + "content": "A ADDITIONAL NUMERICAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.825, + 0.163 + ], + "angle": 0, + "content": "In this section, we validate our theory with numerical results. Unless specified otherwise, the dimension of the learned representation is set to be equal to the input dimension: \\( d_0 = d_1 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.169, + 0.827, + 0.283 + ], + "angle": 0, + "content": "No Collapse for InfoNCE. We showed that there is no collapse at all for the vanilla InfoNCE, no matter how strong the augmentation is. Our result implies that the smallest singular of the model \\( W \\) scales as \\( \\sigma^4 \\) where \\( \\sigma^2 \\) is the strength (namely, the variance) of the augmentation. See the left panel of Fig. 5. We use the vanilla InfoNCE loss defined in (1) with a linear model. The training set is sampled from \\( \\mathcal{N}(0, I_{32}) \\). The training proceeds with Adam with a learning rate of \\( 6e - 4 \\) with full batch training for 5000 iterations. We use a simple diagonal Gaussian noise with variance \\( \\sigma^2 \\) for data augmentation. We see that the singular values scale as \\( \\sigma^4 \\) and never vanishes, as the theory predicts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.287, + 0.825, + 0.357 + ], + "angle": 0, + "content": "Nonrobust Collapses of Weighted InfoNCE. We now demonstrate that, as the theory predicts, collapses of weighted InfoNCE depend strongly on the dataset size. We use the same dataset and training procedure as the previous experiment. We set \\(\\alpha = 0.1\\) and change the size of the training set. Theory suggests that for a collapse in the \\(i\\)-th subspace to happen, the size of the dataset needs to obey" + }, + { + "type": "equation", + "bbox": [ + 0.417, + 0.357, + 0.825, + 0.385 + ], + "angle": 0, + "content": "\\[\nN > \\frac {a _ {i}}{c _ {i} (1 - \\alpha)} := N _ {\\text {c r i t}}. \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.388, + 0.825, + 0.432 + ], + "angle": 0, + "content": "See the middle panel of Figure 5. We show the smallest three eigenvalues of \\( W^T W \\) (roughly having similar magnitudes), and the critical dataset size for the smallest eigenvalue. We see that the theoretical threshold of collapse agrees well with where the collapse actually happens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.438, + 0.826, + 0.495 + ], + "angle": 0, + "content": "Collapses in \\(\\beta\\)-InfoNCE. With \\(\\beta < 1\\), one can cause collapses in a predictable and controllable way. In this experiment, we let \\(d_0 = 5\\) and we plot all five eigenvalues of \\(W^T W\\) as we increase the strength of an isotropic augmentation. As the numerical results show, collapses happen at the points predicted by the theory." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.501, + 0.827, + 0.572 + ], + "angle": 0, + "content": "Normalization Causes Dimensional Collapse. We also plot the three smallest eigenvalues of \\( W^T W \\) when we apply the standard representation normalization in practice: \\( f(x) \\to f(x) / \\| f(x) \\| \\). To facilitate comparison, we also use the same dataset and training procedure as before. See Figure 6. We see that normalization does cause a collapse in the smallest eigenvalues at an augmentation strength much smaller than the feature variation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.591, + 0.534, + 0.607 + ], + "angle": 0, + "content": "B LANDSCAPE OF A NONLINEAR MODEL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.622, + 0.827, + 0.68 + ], + "angle": 0, + "content": "In this section, we plot the landscape of the layer of nonlinear models on the same synthetic dataset we outlined in the previous section. We train a three-layer nonlinear network with output dimension 2 with SGD until convergence. We then rescale the optimized weight of the last by a factor \\(a\\): \\(W_{last} \\rightarrow aW_{last}\\) and plot the loss function along this direction. See the top panel of Figure 7 for" + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.705, + 0.396, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.403, + 0.704, + 0.599, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.702, + 0.797, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.836, + 0.828, + 0.92 + ], + "angle": 0, + "content": "Figure 5: The three smallest singular values of \\( W^T W \\) as a function of the augmentation strength. We see that our effective landscape theory around the origin accurately captures collapses in learning. Left: Vanilla InfoNCE. As the theory suggests, the singular values scale as \\( \\sigma^4 \\) and do not vanish for any finite value of \\( \\sigma \\). Mid: Weight InfoNCE. \\( \\alpha = 0.1 \\), \\( \\sigma = 5 \\). Collapse happens at the critical dataset size predicted by the theory. Right: (Sqrt) Eigenvalues of \\( WW^T \\) in \\( \\beta \\)-InfoNCE. The collapses can be well controlled." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.104, + 0.605, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.24, + 0.828, + 0.283 + ], + "angle": 0, + "content": "Figure 6: A collapse happens easily when the learned representation is normalized. The smallest eigenvalues of \\(A_0\\) are roughly 0.2, and the collapse happens much before the noise reaches this strength." + }, + { + "type": "image", + "bbox": [ + 0.404, + 0.299, + 0.599, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.416, + 0.391, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.4, + 0.416, + 0.603, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.416, + 0.813, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.54, + 0.825, + 0.598 + ], + "angle": 0, + "content": "Figure 7: The Landscape of nonlinear models is very similar to the landscape of linear models (cf. Figure 1). Top: 1d projection of the landscape of a two-layer tanh and ReLU network. Bottom Left: the landscape of a 2D projection of the last layer of a nonlinear model with a weak augmentation. Middle: with intermediate augmentation. Right: with strong augmentation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.624, + 0.827, + 0.669 + ], + "angle": 0, + "content": "both the tanh and the ReLU nonlinearity. We then rescale the two rows of the weight matrix of the model by \\( r_1 \\) and \\( r_2 \\) respectively: \\( W = (w_{1},W_{2})^{T}\\rightarrow (r_{1}w_{1},r_{2}w_{2}) \\). We see that the landscape of the model is qualitatively the same as that of the linear models, shown in Figure 1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.687, + 0.6, + 0.703 + ], + "angle": 0, + "content": "C SETUP FOR IMBALANCED DATA EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.719, + 0.825, + 0.817 + ], + "angle": 0, + "content": "Creating an Imbalanced Dataset: For our experiments measuring the influence on imbalanced datasets on SSL training, we use CIFAR-10 by sampling 20000 samples out of the 50000 training samples. The sampling process is described by a Dirichlet distribution and is often used to analyze effects of heterogeneity and data imbalance in Federated Learning problems (Hsu et al., 2019). Specifically, a small value of the distribution parameter yields a highly imbalanced dataset, while a large value yields a perfectly balanced dataset. We evaluate our models in three scenarios, for which we report below the number of samples per class:" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.822, + 0.608, + 0.839 + ], + "angle": 0, + "content": "High imbalance: [4890, 87, 5000, 0, 74, 0, 0, 212, 4788, 4947]" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.841, + 0.71, + 0.857 + ], + "angle": 0, + "content": "- Medium imbalance: [4268, 4296, 1741, 420, 945, 161, 4633, 1015, 131, 2386]" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.86, + 0.709, + 0.876 + ], + "angle": 0, + "content": "- No imbalance: [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000]" + }, + { + "type": "list", + "bbox": [ + 0.179, + 0.822, + 0.71, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Training Setup: We use ResNet-12 models as the backbone for all experiments due to computational constraints. SimCLR augmentations (Chen et al., 2020a) are followed, except for a reduced strength of resized cropping from 0.2 to 0.5. All training involves a standardly used cosine decay learning" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.19 + ], + "angle": 0, + "content": "rate schedule, starting at 0.03 and decaying to 0.001. When a projector module is used, it involves a two-layer MLP with hidden dimension of 512 and BatchNorm layer in between. We use SGD for optimization and perform the standardly used linear evaluation protocol for measuring the quality of the final representation. For training the linear layer, we use an initial learning rate of 10 and decay it to 0.01 with a cosine schedule. We note linear evaluation is used for supervised models as well, following the practice advocated by Liu et al. (2021)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.214, + 0.548, + 0.23 + ], + "angle": 0, + "content": "D ADDITIONAL THEORETICAL CONCERNS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.249, + 0.533, + 0.263 + ], + "angle": 0, + "content": "D.1 COLLAPSE CONDITION FOR NORMALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.277, + 0.825, + 0.307 + ], + "angle": 0, + "content": "The important condition for collapse in Eq. (13) can be better understood by considering the extreme cases. First of all, note that the eigenvalues of \\(\\Sigma B_{M}\\) are bounded between \\(-1\\) and \\(1\\)" + }, + { + "type": "equation", + "bbox": [ + 0.44, + 0.318, + 0.826, + 0.348 + ], + "angle": 0, + "content": "\\[\n- 1 \\leq \\frac {a _ {i} - c _ {i}}{a _ {i} + c _ {i}} \\leq 1, \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.357, + 0.611, + 0.373 + ], + "angle": 0, + "content": "and \\(-1\\) is achieved when \\(c_{i} \\gg a_{i}\\), and 1 is achieved when \\(a_{i} \\gg c_{i}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.378, + 0.825, + 0.406 + ], + "angle": 0, + "content": "When the augmentation is negligibly small, \\(\\Sigma^{-1}B_M\\approx M\\), and \\(\\lambda_{i}\\approx \\bar{\\lambda} = 1\\), the condition thus becomes" + }, + { + "type": "equation", + "bbox": [ + 0.468, + 0.41, + 0.826, + 0.441 + ], + "angle": 0, + "content": "\\[\n\\frac {2}{d _ {M}} > 0, \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.45, + 0.825, + 0.493 + ], + "angle": 0, + "content": "which always holds. Thus, a sufficiently small augmentation will never cause collapse. Next, when we apply very strong augmentation to the \\(j\\)-th subspace and zero augmentation to the others, the condition for the non-augmented spaces becomes" + }, + { + "type": "equation", + "bbox": [ + 0.433, + 0.505, + 0.826, + 0.537 + ], + "angle": 0, + "content": "\\[\n1 + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 2}{d _ {M}}, \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.547, + 0.684, + 0.563 + ], + "angle": 0, + "content": "meaning that the collapse will not happen. For the \\(j\\)-th space, the condition is" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.574, + 0.826, + 0.607 + ], + "angle": 0, + "content": "\\[\n- 1 + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 2}{d _ {M}} (\\Longleftrightarrow) \\frac {4}{d _ {M}} > 2, \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.616, + 0.825, + 0.7 + ], + "angle": 0, + "content": "which is only possible when \\( d_M = 1 \\), namely, the strongly augmented space is the only space that does not collapse. This is reasonable when the original data is rank-1 because the normalization will ensure that this space does not collapse, but when the original data is not rank-1, this stationary point will be a saddle and will not be preferred by gradient descent. In different words, a strong enough augmentation will cause a collapse in the corresponding subspace, as is the case without normalization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.707, + 0.827, + 0.737 + ], + "angle": 0, + "content": "It is also interesting to note that having \\( c_{i} \\geq a_{i} \\) is no longer sufficient to cause a collapse. For example, let \\( c_{1} = 0 \\) and \\( c_{j} = a_{j} \\) for \\( j \\neq 1 \\). The condition for \\( j \\neq 1 \\) becomes" + }, + { + "type": "equation", + "bbox": [ + 0.458, + 0.748, + 0.826, + 0.78 + ], + "angle": 0, + "content": "\\[\n\\frac {2}{d _ {M}} > \\frac {1}{d _ {M}}, \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.79, + 0.825, + 0.845 + ], + "angle": 0, + "content": "which always holds. At the same time, it does not mean that collapsing has become harder in general. For example, it is also possible for \\( c_{i} < a_{i} \\) to cause a collapse. Suppose we add a weak augmentation only to the first subspace such that \\( a_{i} - c_{i} = \\epsilon > 0 \\), the condition for this dimension to not to collapse is" + }, + { + "type": "equation", + "bbox": [ + 0.403, + 0.849, + 0.825, + 0.881 + ], + "angle": 0, + "content": "\\[\n\\frac {\\epsilon}{a _ {i} + c _ {i}} + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 1 + \\epsilon}{d _ {M}}, \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.891, + 0.825, + 0.926 + ], + "angle": 0, + "content": "which can be violated whenever \\(\\epsilon < \\frac{(a_i + c_i)(d_M - 3)}{a_i + c_i + d_m}\\). Namely, in some cases, normalization can in fact facilitate collapse." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.277, + 0.119 + ], + "angle": 0, + "content": "E PROOFS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.134, + 0.4, + 0.149 + ], + "angle": 0, + "content": "E.1 PROOF OF PROPOSITION 1" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.16, + 0.514, + 0.176 + ], + "angle": 0, + "content": "Proof. The second term in Eq. (3) can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.183, + 0.825, + 0.26 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right) ^ {2} \\right] - \\mathbb {E} \\left[ \\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right] \\right] ^ {2} (24) \\\\ = [ \\text {f i r s t} \\quad \\text {t e r m} ] - 4 \\operatorname {T r} [ W (A _ {0} + C) W ^ {T} ] ^ {2} (25) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2}, (26) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.265, + 0.595, + 0.28 + ], + "angle": 0, + "content": "where we have used the definition \\(\\Sigma = A_0 + C\\). The first term is" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.285, + 0.824, + 0.316 + ], + "angle": 0, + "content": "\\[\n[ f i r s t \\text {t e r m} ] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} [ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} ]\\right) ^ {2} \\right] = 4 \\operatorname {T r} [ W \\Sigma W ^ {T} ] ^ {2} + 8 \\operatorname {T r} [ W \\Sigma W ^ {T} W \\Sigma W ^ {T} ]. \\tag {27}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.316, + 0.642, + 0.331 + ], + "angle": 0, + "content": "Combining the above expressions, we see that Eq. (3) can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.338, + 0.824, + 0.385 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] (28) \\\\ = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right]. (29) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.391, + 0.345, + 0.407 + ], + "angle": 0, + "content": "This finishes the proof. \\(\\square\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.423, + 0.375, + 0.437 + ], + "angle": 0, + "content": "E.2 PROOF OF THEOREM 1" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.449, + 0.497, + 0.464 + ], + "angle": 0, + "content": "Proof. All stationary points have a zero gradient:" + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.471, + 0.824, + 0.487 + ], + "angle": 0, + "content": "\\[\n- 2 W B + 4 W \\Sigma W ^ {T} W \\Sigma = 0. \\tag {30}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.493, + 0.522, + 0.51 + ], + "angle": 0, + "content": "Multiplying by \\(W^T\\) on the left and \\(B^{-1}\\) on the right," + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.516, + 0.824, + 0.533 + ], + "angle": 0, + "content": "\\[\nW ^ {T} W = 2 W ^ {T} W \\Sigma W ^ {T} W \\Sigma B ^ {- 1} \\tag {31}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.539, + 0.824, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\left(\\Longleftrightarrow\\right) \\quad \\Sigma^ {1 / 2} W ^ {T} W \\Sigma^ {1 / 2} = 2 \\Sigma^ {1 / 2} W ^ {T} W \\Sigma W ^ {T} W \\Sigma B ^ {- 1} \\Sigma^ {1 / 2} \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.561, + 0.456, + 0.577 + ], + "angle": 0, + "content": "Defining \\(H \\coloneqq \\Sigma^{1/2} W^T W \\Sigma^{1/2}\\), we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.416, + 0.585, + 0.824, + 0.601 + ], + "angle": 0, + "content": "\\[\nH = 2 H ^ {2} \\Sigma^ {1 / 2} \\Sigma B ^ {- 1} \\Sigma^ {1 / 2}, \\tag {33}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.608, + 0.824, + 0.626 + ], + "angle": 0, + "content": "\\[\n\\left(\\Longleftrightarrow\\right) \\quad H \\left(I - 2 H \\Sigma^ {1 / 2} B ^ {- 1} \\Sigma^ {1 / 2}\\right) = 0. \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.63, + 0.825, + 0.677 + ], + "angle": 0, + "content": "Because both \\( H \\) and \\( \\Sigma^{1/2}\\Sigma B^{-1}\\Sigma^{1/2} \\) are symmetric, one can take the transpose of Eq. (33) to find that \\( H \\) and \\( \\Sigma^{1/2}B^{-1}\\Sigma^{1/2} \\) commute with each, which implies that \\( H \\) has the same eigenvectors as \\( \\Sigma^{1/2}B^{-1}\\Sigma^{1/2}/2 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Eq. (34) then implies that the eigenvalues of \\(H\\) is either the inverse of that of \\(\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}\\) or zero. This implies that any stationary point of \\(H\\) can be written in the form" + }, + { + "type": "equation", + "bbox": [ + 0.442, + 0.721, + 0.824, + 0.749 + ], + "angle": 0, + "content": "\\[\nH = \\frac {1}{2} U M \\Lambda U ^ {T}, \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.756, + 0.826, + 0.811 + ], + "angle": 0, + "content": "where \\(U\\) is a unitary matrix, \\(\\Lambda\\) is diagonal matrix containing the eigenvalues of \\(\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}\\), and \\(M\\) is an arbitrary (masking) diagonal matrix containing only zero or one such that (1) \\(M_{ii} = 0\\) if \\(\\Lambda_{ii} < 0\\) and (2) contain at most \\(d^*\\) nonzero terms. This then implies that the weight matrix \\(W\\) satisfies" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.81, + 0.824, + 0.838 + ], + "angle": 0, + "content": "\\[\nW ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1 / 2} U M \\Lambda U ^ {T} \\Sigma^ {- 1 / 2}. \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.84, + 0.632, + 0.856 + ], + "angle": 0, + "content": "Lastly, when \\(\\Sigma\\) and \\(B\\) commute, we can compactly write the result as" + }, + { + "type": "equation", + "bbox": [ + 0.419, + 0.862, + 0.824, + 0.89 + ], + "angle": 0, + "content": "\\[\nW ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} \\Sigma^ {- 1}, \\tag {37}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "where \\( B_{M} \\) denotes the matrix obtained by masking the eigenvalues of \\( B \\) with \\( M \\). This finishes the proof." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.4, + 0.119 + ], + "angle": 0, + "content": "E.3 PROOF OF PROPOSITION 2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.171 + ], + "angle": 0, + "content": "Proof. For all stationary points, \\( W^T W \\) commutes with \\( B \\) and \\( \\Sigma \\), which means that at these stationary points, one can simultaneously diagonalize all the matrices and the loss function (3) can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.428, + 0.171, + 0.824, + 0.207 + ], + "angle": 0, + "content": "\\[\nL = - \\sum_ {i = 1} ^ {d ^ {*}} \\lambda_ {i} b _ {i} + \\lambda_ {i} ^ {2} s _ {i} ^ {2} \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.21, + 0.628, + 0.226 + ], + "angle": 0, + "content": "where \\(\\lambda_{i}, b_{i}, s_{i}\\) are the eigenvalues of \\(W^{T}W\\), \\(B\\), and \\(\\Sigma\\) respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.231, + 0.825, + 0.289 + ], + "angle": 0, + "content": "We can thus consider each \\(i\\) separately. When \\(b_{i} > 0\\), \\(\\lambda_{i} = 0\\) cannot be a local minimum because the local Hessian is \\(-b_{i} < 0\\). When \\(b_{i} \\leq 0\\), the only stationary point is \\(\\lambda_{i} = 0\\). This sum covers at most \\(d^{*}\\) summands, and so, at the local minima, \\(\\lambda_{i} \\neq\\) if and only if \\(b_{i} > 0\\), and so the number of non-zero eigenvalues of \\(W^{T}W\\) is \\(\\min(m, d^{*})\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.304, + 0.4, + 0.318 + ], + "angle": 0, + "content": "E.4 PROOF OF PROPOSITION 3" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.33, + 0.457, + 0.345 + ], + "angle": 0, + "content": "Proof. The regularization can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.349, + 0.825, + 0.389 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} R = \\left[ \\left(\\mathbb {E} _ {x} \\| W x \\| ^ {2} - c\\right) ^ {2} \\right] (39) \\\\ = \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2} - 2 c \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] + c ^ {2}. (40) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.399, + 0.389, + 0.414 + ], + "angle": 0, + "content": "By Proposition 1, Eq. (10) reads" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.418, + 0.825, + 0.458 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right] + \\kappa \\left(\\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2} - 2 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] + 1\\right) (41) \\\\ = - \\operatorname {T r} \\left[ W (B + 2 \\kappa c \\Sigma) W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right] + \\kappa \\rho^ {2}. (42) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.462, + 0.316, + 0.476 + ], + "angle": 0, + "content": "The derivative of \\(\\rho\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.445, + 0.475, + 0.825, + 0.505 + ], + "angle": 0, + "content": "\\[\n\\frac {d}{d W} \\rho = 4 \\rho W \\Sigma . \\tag {43}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.505, + 0.397, + 0.52 + ], + "angle": 0, + "content": "The zero-gradient gradient is thus" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.525, + 0.825, + 0.543 + ], + "angle": 0, + "content": "\\[\n- 2 W (B + 2 \\kappa c \\Sigma - 2 \\kappa \\rho \\Sigma) + 4 W \\Sigma W ^ {T} W \\Sigma = 0. \\tag {44}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.546, + 0.825, + 0.587 + ], + "angle": 0, + "content": "We can define \\( B' \\coloneqq B + 2\\kappa c\\Sigma - 2\\kappa \\rho \\Sigma \\) to see that this condition is the same as Eq. (30) in the proof of Theorem 1. The rest of the proof thus follows from the arguments. We thus arrive at the theorem statement:" + }, + { + "type": "equation", + "bbox": [ + 0.417, + 0.586, + 0.825, + 0.614 + ], + "angle": 0, + "content": "\\[\nW ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} ^ {\\prime} \\Sigma^ {- 1}. \\tag {45}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.615, + 0.279, + 0.629 + ], + "angle": 0, + "content": "We are done. \\(\\square\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.646, + 0.4, + 0.659 + ], + "angle": 0, + "content": "E.5 PROOF OF PROPOSITION 4" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.671, + 0.825, + 0.7 + ], + "angle": 0, + "content": "Proof. Recalling that \\(\\rho = \\mathrm{Tr}[W\\Sigma W^T]\\), we multiply \\(\\Sigma\\) from the right to both sides of the solution in Proposition 3 and take trace:" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.705, + 0.825, + 0.774 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} ^ {\\prime} \\right] = \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} \\left(B _ {M} + 2 \\kappa (c - \\rho) \\Sigma_ {M}\\right) \\right] (46) \\\\ = \\operatorname {T r} \\left[ W ^ {T} W \\Sigma \\right] (47) \\\\ = \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] = \\rho . (48) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.779, + 0.394, + 0.793 + ], + "angle": 0, + "content": "The first line further simplifies to" + }, + { + "type": "equation", + "bbox": [ + 0.266, + 0.798, + 0.825, + 0.827 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right] + \\kappa (c - \\rho) \\operatorname {T r} \\left[ \\Sigma^ {- 1} \\Sigma_ {M} \\right] = \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right] + \\kappa (c - \\rho) d _ {M}, \\tag {49}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.831, + 0.61, + 0.848 + ], + "angle": 0, + "content": "where \\(d_M \\coloneqq \\operatorname{Tr}[M]\\) is the number of nonzero eigenvalues of \\(B_M'\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.853, + 0.445, + 0.867 + ], + "angle": 0, + "content": "This gives an equation of \\(\\rho\\) that solves to" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.871, + 0.825, + 0.907 + ], + "angle": 0, + "content": "\\[\nc - \\rho = \\frac {c - \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{1 + \\kappa d _ {M}}. \\tag {50}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.376, + 0.925 + ], + "angle": 0, + "content": "This proves the proposition. \\(\\square\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.543, + 0.119 + ], + "angle": 0, + "content": "F ADDITIONAL THEORETICAL CONCERNS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.134, + 0.663, + 0.149 + ], + "angle": 0, + "content": "F.1 CASE OF DATA-INDEPENDENT NON-GAUSSIAN AUGMENTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.16, + 0.827, + 0.215 + ], + "angle": 0, + "content": "In the main text, we mainly considered the case when the noise is Gaussian. In this section, we consider a case where the noise is data-dependent and non-Gaussian. We show that the results we discussed in the main text still hold qualitatively. The general form of the loss function in Eq. (3) still applies:" + }, + { + "type": "equation", + "bbox": [ + 0.356, + 0.216, + 0.825, + 0.243 + ], + "angle": 0, + "content": "\\[\nL = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right]. \\tag {51}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.246, + 0.592, + 0.261 + ], + "angle": 0, + "content": "We consider a global rescaling augmentation for each datum \\(x\\):" + }, + { + "type": "equation", + "bbox": [ + 0.472, + 0.269, + 0.825, + 0.284 + ], + "angle": 0, + "content": "\\[\nx = s \\hat {x}, \\tag {52}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.291, + 0.825, + 0.377 + ], + "angle": 0, + "content": "where \\( s \\sim \\exp(b) \\) obeys an exponential distribution with mean \\( b \\) and variance \\( b^2 \\). Note that even if \\( \\hat{x} \\) is Gaussian, the augmented data is no longer Gaussian. In particular, the augmentation now becomes data-dependent. This augmentation can also be seen as a structured, biologically plausible data augmentation that encourages the model to be scale-invariant, which is what Wien's law for biological perception demands (Dayan and Abbott, 2005): no matter whether an image is dark or bright, the content of the image is the same." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.382, + 0.761, + 0.397 + ], + "angle": 0, + "content": "Under this augmentation, the noise covariance is dependent on \\( x \\) and no longer Gaussian:" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.404, + 0.825, + 0.422 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ x x ^ {T} \\right] = 2 b ^ {2} A _ {0}. \\tag {53}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.429, + 0.304, + 0.443 + ], + "angle": 0, + "content": "We also obtain that" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.44, + 0.826, + 0.459 + ], + "angle": 0, + "content": "\\[\nC = \\mathbb {E} \\left[ (b - s) ^ {2} x x ^ {T} \\right] = b ^ {2} A _ {0}. \\tag {54}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.462, + 0.468, + 0.476 + ], + "angle": 0, + "content": "The second term in Eq. (3) can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.483, + 0.825, + 0.561 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left. \\right. \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] = \\mathbb {E} \\left[\\left(\\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right) ^ {2} \\right] - \\mathbb {E} \\left[ \\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right] ^ {2} (55) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} [ W (A _ {0} + C) W ^ {T} ] ^ {2} (56) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2}, (57) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.568, + 0.595, + 0.582 + ], + "angle": 0, + "content": "where we have used the definition \\(\\Sigma = A_0 + C\\). The first term is" + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.588, + 0.825, + 0.609 + ], + "angle": 0, + "content": "\\[\n[ f i r s t \\text {t e r m} ] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} [ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} ]\\right) ^ {2} \\right]. \\tag {58}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.615, + 0.825, + 0.647 + ], + "angle": 0, + "content": "However, for fixed rescaling factor \\( s_x \\) and \\( s_\\chi \\), each \\( W(x - \\chi) \\) obeys a multivariate Gaussian distribution with variance \\( 2(s_x^2 + s_\\chi^2)WA_0 \\), and so we have" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.654, + 0.826, + 0.674 + ], + "angle": 0, + "content": "\\[\n[ f i r s t \\text {t e r m} ] = \\mathbb {E} _ {s _ {x}, s _ {\\chi}} \\left[ \\left(s _ {x} ^ {2} + s _ {\\chi} ^ {2}\\right) ^ {2} \\right] \\left(4 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 8 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]\\right), \\tag {59}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.681, + 0.62, + 0.699 + ], + "angle": 0, + "content": "where \\(\\mathbb{E}_{s_x,s_\\chi}\\big[(s_x^2 +s_\\chi^2)^2\\big] = 56b^4\\) . Combining terms, we obtain that" + }, + { + "type": "equation", + "bbox": [ + 0.231, + 0.706, + 0.826, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right] = 4 8 b ^ {2} \\times 4 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 5 6 b ^ {4} \\times 8 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]. \\tag {60}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.739, + 0.341, + 0.753 + ], + "angle": 0, + "content": "The loss function is thus:" + }, + { + "type": "equation", + "bbox": [ + 0.253, + 0.759, + 0.826, + 0.779 + ], + "angle": 0, + "content": "\\[\nL = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + 2 4 b ^ {2} \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 5 6 b ^ {4} \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]. \\tag {61}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.825, + 0.876 + ], + "angle": 0, + "content": "Note that this loss function is a special case of the loss function in Eq. (10) where \\( c = 0 \\) and \\( \\kappa = 24b^2 \\) (and with a rescaled fourth-order term). As in the main text, \\( B \\) is different according to different choices of loss functions. Because \\( B \\) commute with \\( A_0 \\) by construction, one expects collapses to happen at locations predicted by Proposition 3 and 4 under suitable choices of parameters. Also note that the odd terms vanish as discussed, and so the local stability of the origin should decide the collapsing behavior of this situation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "This shows that collapse can also happen when the data augmentation is structured. We comment that the analysis in this section is minimal, and one important future direction is to provide more precise and insightful conditions of collapse under structured data augmentation." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ] +] \ No newline at end of file diff --git a/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_origin.pdf b/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1916eeca04225cbd396ac5da5d4b08cdcb3afab2 --- /dev/null +++ b/2023/What shapes the loss landscape of self supervised learning_/2fc00309-6678-46e3-bb56-f662dfd5b3bb_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2479185aa118f84e2b9d35841b3561f06744bbc09bcb88e3120321b4e048369 +size 4605342 diff --git a/2023/What shapes the loss landscape of self supervised learning_/full.md b/2023/What shapes the loss landscape of self supervised learning_/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a2508457b57c2a2d0bda83e7b133d199341d5088 --- /dev/null +++ b/2023/What shapes the loss landscape of self supervised learning_/full.md @@ -0,0 +1,677 @@ +# WHAT SHAPES THE LOSS LANDSCAPE OF SELF SUPERVISED LEARNING? + +Liu Ziyin $^{1,2,3\dagger}$ , Ekdeep Singh Lubana $^{2,3,4\dagger}$ , Masahito Ueda $^{1,5,6}$ , Hidenori Tanaka $^{2,3}$ + +$^{1}$ Department of Physics, The University of Tokyo, Tokyo, Japan + +$^{2}$ Physics & Informatics Laboratories, NTT Research, Inc., Sunnyvale, CA, USA + +3Center for Brain Science, Harvard University, Cambridge, USA + +$^{4}$ EECS Department, University of Michigan, Ann Arbor, USA + +$^{5}$ Institute for Physics of Intelligence, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo + +$^{6}$ RIKEN Center for Emergent Matter Science (CEMS), Wako, Saitama, Japan + +# ABSTRACT + +Prevention of complete and dimensional collapse of representations has recently become a design principle for self-supervised learning (SSL). However, questions remain in our theoretical understanding: When do those collapses occur? What are the mechanisms and causes? We answer these questions by deriving and thoroughly analyzing an analytically tractable theory of SSL loss landscapes. In this theory, we identify the causes of the dimensional collapse and study the effect of normalization and bias. Finally, we leverage the interpretability afforded by the analytical theory to understand how dimensional collapse can be beneficial and what affects the robustness of SSL against data imbalance. + +# 1 INTRODUCTION + +Self-supervised learning (SSL) methods have achieved remarkable success in learning good representations without labeled data (Chen et al., 2020b). Loss functions used in such SSL techniques promote representational similarity between pairs of related samples while using explicit penalties (Chen et al., 2020a; He et al., 2020; Zbontar et al., 2021; Caron et al., 2020) or asymmetric dynamics (Caron et al., 2021; Grill et al., 2020; Chen and He, 2021) to ensure that the distance between unrelated samples remains large. In practice, however, SSL training often experiences the phenomenon of dimensional collapse (Jing et al., 2021; Tian et al., 2021; Pokle et al., 2022), where the learned representation spans a low dimensional subspace of the overall available space. In the extreme case, this failure mode instantiates as a complete collapse, where the learned representation becomes zero-rank, and no informative features can be extracted. + +Prior work has primarily positioned such collapses in SSL as enemies of learning, arguing that they can negatively impact downstream task performance (Zbontar et al., 2021; Jing et al., 2021; Bardes et al., 2021). However, recent work by Cosentino et al. (2022) empirically demonstrates otherwise: quality of representations can be improved when there is a degree of collapse. These conflicting results indicate that despite extensive empirical explorations, a gap remains in our understanding of the collapse phenomenon in SSL training. We argue that this gap is due to the lack of a theoretical framework to analyze the mechanisms promoting collapsed representations. We aim to close this gap by carefully studying the loss landscapes of SSL. + +In this work, we analytically solve the effective landscapes of linear models trained on several popular losses used in self-supervised learning, including InfoNCE (Oord et al., 2018), Normalized Temperature Cross-Entropy (NT-xent) (Chen et al., 2020a), Spectral Contrastive Loss (HaoChen et al., 2021), and Barlow Twins / VICReg (Zbontar et al., 2021; Bardes et al., 2021). The main thesis of this work is: the local geometry of the SSL landscapes around the origin crucially decides the learning behavior of SSL models. Technically, we show that + +1. the interplay between data variation and data augmentation determines the geometry of the loss; +2. the geometry of the loss explains when dimensional collapse can be helpful and why certain SSL losses are robust against data imbalance, but not the others. + +To the best of our knowledge, our work is the first to study the landscape causes of collapse in SSL thoroughly. + +†Work done during an internship at Physics & Informatics Laboratories, NTT Research. + +![](images/8646768e22b533aecebc4335e5d0ac391451a5247e209ea684d46b6e91eeeaff.jpg) +(a) An eigenmode + +![](images/cbbd6117e91840b6e94713dcb5d6158eb9b8b02f0b21496e2397f12a4de3fadb.jpg) +(b) No collapse + +![](images/2c09e4bcd2758fb826668aee9a0defc0a67ebd2ab0285f41d0bca338d1f516d4.jpg) +(c) Dimensional collapse + +![](images/5f5d0ef31c9b33d250ec46485b1a7b883ec443bf1d735a885a7fdbec145b7bf4.jpg) +(d) Complete collapse +Figure 1: Landscape in self-supervised learning (SSL). SSL losses generally depend only on the relative angle between pairs of network outputs (e.g., $f(x)^T f(x')$ ). Thus, the landscapes with a linear network ( $f(x) = Wx$ ) have a global rotational symmetry and are symmetric about the origin. Our theory finds that the local stability at the origin decides the collapse, and larger data variation (green) prevents collapse, while strong data augmentation (red) can promote collapse. We plot the loss for a toy linear model with a diagonal weight matrix $diag(r_1, r_2)$ . (a) The 1d landscape when fixing one of the parameter. (b-d) The 2d landscape. (b) No collapse: the origin is an unstable local maximum, and surrounding local minima avoid collapse. The dimensionally collapsed solutions are the saddle points. (c) Dimensional collapse: the value of $w_1$ for all stable fixed points collapses to zero. (d) Complete collapse: the origin becomes the isolated local minimum. + +# 2 RELATED WORKS + +SSL and Collapses. On the one hand, prior literature has often argued collapse as a harmful phenomenon that can deteriorate downstream task performance (Jing et al., 2021; Zbontar et al., 2021). Preventing such collapsed representations is a frequently discussed topic in literature (Hua et al., 2021; Jing et al., 2021; Pokle et al., 2022; Tian et al., 2021) and has motivated the design of several SSL techniques (Zbontar et al., 2021; Bardes et al., 2021; Ermolov et al., 2021). On the other hand, Cosentino et al. (2022) empirically showed that dimensional collapses under strong augmentations could significantly improve generalization performance. Our work demystifies these conflicting results by finding analytic solutions to loss landscapes of several standard SSL techniques. + +Theoretical Advances in SSL. Recently, several advances have been made towards understanding the success of SSL techniques from different perspectives: e.g., learning theory (Arora et al., 2019; Saunshi et al., 2022; Nozawa and Sato, 2021; Wei et al., 2021), information theory (Tsai et al., 2021a;b; Tosh et al., 2021), causality and data-generating processes (Zimmerman et al., 2021; Kugelgen et al., 2021; Trivedi et al., 2022; Tian et al., 2020; Mitrovic et al., 2020; Wang et al., 2022), dynamics (Wang and Isola, 2020; Tian et al., 2021; Tian, 2022; Wang and Liu, 2021; Simon et al., 2023), and loss landscapes (Pokle et al., 2022). These advances have unveiled practically useful properties of SSL, such as robustness to dataset imbalance (Liu et al., 2021) and principled solutions to avoid spurious correlations (Robinson et al., 2021). + +The work by Jing et al. (2021) is the closest to ours in problem setting. In that paper, the authors focused on studying the linearized learning dynamics and suggested that a competition between the feature signal strength and augmentation strength can lead to dimensional collapse. In contrast, our focus is on the landscape and our result implies that this feature-augmentation competition on its own is insufficient to cause a dimensional collapse. In fact, we show that there will be no collapse in the setting studied by Jing et al. (2021). + +# 3 A LANDSCAPE THEORY OF SELF-SUPERVISED-LEARNING + +This section presents the main theoretical results. Let $\{\hat{x}_i\}_i^N$ be a dataset with $N$ data points. For every data point $\hat{x}$ , we augment it with an i.i.d. noise $\epsilon$ such that $x \coloneqq \hat{x} + \epsilon$ . To be concrete, we start with considering the standard contrastive loss, InfoNCE (Oord et al., 2018): + +$$ +L = \mathbb {E} _ {\epsilon} \left[ - \sum_ {i = 1} ^ {N} \log \frac {\exp (- | f (x _ {i}) - f \left(x _ {i} ^ {\prime}\right) | ^ {2} / 2)}{\sum_ {j \neq i} \exp (- | f (x _ {i}) - f (\chi_ {j}) | ^ {2} / 2) + \exp (- | f (x _ {i}) - f \left(x _ {i} ^ {\prime}\right) | ^ {2} / 2)} \right], \tag {1} +$$ + +where $f(x) \in \mathbb{R}^{d_1}$ is the model output; all $x, x'$ and $\chi$ are augmented data points for some independent additive noise $\epsilon$ such that $\mathbb{E}_{\epsilon}[x] = \hat{x} = \mathbb{E}_{\epsilon}[x'] \neq \mathbb{E}_{\epsilon}[\chi] = \hat{\chi}$ . We decompose the model output into a general function $\phi(x) \in \mathbb{R}^{d_0}$ and the last-layer weight matrix $W \in \mathbb{R}^{d_1 \times d_0}$ : $f(x) = W\phi(x)$ . The covariance of $\phi(\hat{x})$ is $A_0 := \mathbb{E}_{\hat{x}}[\phi(\hat{x})\phi(\hat{x})^T]$ , and the covariance of the data-augmented penultimate layer representation is $\Sigma := \mathbb{E}_x[\phi(x)\phi(x)^T]$ . The effect of data augmentation on the learned + +representation is captured through a symmetric matrix $C \coloneqq \Sigma - A_0$ . For a general $\phi$ , the eigenvalues of $C$ can be either positive or negative. When $\phi$ is the identity mapping, $A_0$ becomes the empirical data covariance, $C$ becomes positive semi-definite and is the covariance of the noise $\epsilon$ , and $\Sigma$ is the covariance of the augmented data. In some sense, this loss function captures the essence of SSL: the numerator encourages the representation $f(x)$ to be closer to the representation of similar data, and the denominator encourages a separation between dissimilar data. + +For a fixed set of noises, we can write the InfoNCE in a cleaner form: + +$$ +L _ {\epsilon} = \mathbb {E} _ {\hat {x}} \left\{\frac {1}{2} | f (x) - f \left(x ^ {\prime}\right) | ^ {2} + \log \mathbb {E} _ {\hat {\chi}} \left[ \exp \left(- \frac {1}{2} | f (x) - f (\chi) | ^ {2}\right) \right] \right\}, \tag {2} +$$ + +where we used $\mathbb{E}_{\hat{x}}$ to denote an averaging over the training set. + +In this notation, we have $\mathbb{E}_{\epsilon}\mathbb{E}_{\hat{x}}[x] = \mathbb{E}_x[x]$ and $\mathbb{E}_{\epsilon}[L_{\epsilon}] = L$ . We first show that the expansion of the loss function around the origin takes a rather universal form. We then find analytical solutions to the stationary points of this landscape and study their relevance to feature learning and collapses. See Table 1 for a summary of the main results. The proofs are presented in Appendix E. For a quantitative understanding, we mainly focus on the case when $\phi$ is the identity function. We discuss the general nonlinear case in Section 4.1. + +# 3.1 LANDSCAPE OF A LINEAR MODEL + +We first analyze representative SSL loss functions and show that to leading order in $W$ , the local geometry of SSL losses takes the following form + +$$ +L = - \operatorname {T r} \left[ W B W ^ {T} \right] + \frac {1}{8} \operatorname {V a r} \left[ \left| W (x - \chi) \right| ^ {2} \right]. \tag {3} +$$ + +
HessianDim.Complete
InfoNCEA0XX
NT-Xent (SimCLR)A0-C/N
Spectral ContrastiveCXX
Barlow TwinsA0+CXX
+ Normalization-X
+ bias-
+ Weight Decay+γI
+ +Table 1: What shapes the SSL landscapes around the origin? For each of the SSL losses, the combination of data covariance $(A_0)$ , data-augmentation covariance $(C)$ , and dataset size $(N)$ can affect its stability and thus determine the presence $(\checkmark)$ and absence $(X)$ of dimensional/complete collapse (Here, a $\checkmark$ means "there exists a hyperparameter setting and data distribution such that the relevant collapse happens;" see section 3). Beyond collapses, the theory implies that SCL, whose landscape is formed primarily by data augmentation, is more robust to data imbalance than InfoNCE, which is affected primarily by the data (see section 4). + +A distinctive feature of Eq. (3) is that its first and third-order terms vanish. This is because the loss function is invariant to a left rotation of $W$ . We will see that this symmetry in rotation is a crucial and general feature of the SSL loss functions that allow us to treat them in a universal way. We discuss how rotation symmetry can cause collapses in nonlinear settings in Section 4. + +InfoNCE. The loss function simplifies to: + +$$ +L = \underbrace {\operatorname {T r} \left[ W C W ^ {T} \right]} _ {E} + \underbrace {\mathbb {E} _ {\epsilon , \hat {x}} \left\{\log \mathbb {E} _ {\hat {\chi}} \left[ \exp \left(- \frac {1}{2} | W (x - \chi) | ^ {2}\right) \right] \right\}} _ {- S}. \tag {4} +$$ + +Expanding the entropy term to the fourth order, we obtain1 + +$$ +- S = - \mathbb {E} _ {x} \mathbb {E} _ {\chi} \left[ \frac {1}{2} | W (x - \chi) | ^ {2} \right] + \frac {1}{8} \operatorname {V a r} [ | W (x - \chi) | ^ {2} ] + O (\| W \| ^ {6}). \tag {5} +$$ + +This (perturbative) decomposition of entropy deserves some special attention. The entropy decomposes into a repulsion term that is second order in $W$ , and a variance term that is fourth order in $W$ . The first term encourages a repulsion between $x$ and its augmentation, which counteracts the effect of the energy term. The repulsion term can be decomposed into + +$$ +\mathbb {E} _ {x} \mathbb {E} _ {\chi} \left[ \frac {1}{2} | W (x - \chi) | ^ {2} \right] = \operatorname {T r} \left[ W C W ^ {T} \right] + \operatorname {T r} \left[ W A _ {0} W ^ {T} \right]. \tag {6} +$$ + +The first term encourages an expansion of $W$ along the direction of the augmentation $C$ , while the second term encourages an expansion along the directions of feature $A_0$ . It is intriguing to see + +that the repulsion term dominates the attraction of the energy term: the motion along the direction of $C$ completely cancels out, and only the expansion along $A_0$ remains. This means that to leading order, the learned representation has a larger variation along the directions where the data has a larger variation, which is what one naively expects. Collecting results, we have obtained the loss landscape in the neighborhood of the origin as $L = -\mathrm{Tr}[WA_0W^T] + \frac{1}{8}\mathrm{Var}[|W(x - \chi)|^2] + O(\| W\|^6)$ . + +NT-xent (SimCLR). As an additional example, we analyze Normalized Temperature Cross-Entropy loss (NT-xent) used in SimCLR (Chen et al., 2020a). Tian (2022) shows that InfoNCE can be generalized to encompass NT-xent as follows: + +$$ +L = \mathbb {E} _ {\epsilon} \left[ - \sum_ {i = 1} ^ {N} \log \frac {\exp (- | f (x _ {i}) - f (x _ {i} ^ {\prime}) | ^ {2} / 2)}{\sum_ {\chi \neq x} \exp (- | f (x _ {i}) - f (\chi_ {j}) | ^ {2} / 2) + \alpha \exp (- | f (x _ {i}) - f (x _ {i} ^ {\prime}) | ^ {2} / 2)} \right]. \tag {7} +$$ + +In contrast to InfoNCE, here one of the terms in the denominator is reweighted by a factor of $\alpha \geq 0$ . Two interesting limits are $\alpha = 1$ , where we recover the InfoNCE loss, and $\alpha = 0$ , where we obtain NT-xent. For general $\alpha$ , we refer to this loss as the weighted InfoNCE. We will see in section 3 that this weighted InfoNCE can have a mild dimensional collapse problem. + +The same perturbative expansion as Eq. (4)-(6) gives + +$$ +L = \frac {1 - \alpha}{N} \operatorname {T r} \left[ W C W ^ {T} \right] - \operatorname {T r} \left[ W A _ {0} W ^ {T} \right] + \frac {1}{8} \operatorname {V a r} \left[ \left| W (x - \chi) \right| ^ {2} \right] + O \left(\left\| W \right\| ^ {6}\right) + O \left(\left\| W \right\| ^ {4} N ^ {- 1}\right). \tag {8} +$$ + +Now, the Hessian of the origin is no longer guaranteed to be negative definite. In fact, if $\frac{1 - \alpha}{N} C - A_0 \geq 0$ , $W = 0$ becomes an isolated local minimum. + +Landscape Analysis. The above discussion shows that the common loss landscapes in self-supervised contrastive learning can be reduced to an effective form in Eq. (3). The following proposition shows that the variance term of the loss takes a specific form when the data is Gaussian. + +Proposition 1. Let the data and noise be Gaussian. Then, $L = -\mathrm{Tr}[W B W^T] + \mathrm{Tr}[W\Sigma W^T W\Sigma W^T]$ . + +When the training ends, one expects the model to locate at (at least close to) a stationary point of the loss. It is thus important to identify all the stationary points of this loss function. + +Theorem 1. Let $d^{*} \coloneqq \min(d_{0}, d_{1})$ . Let the data and noise be Gaussian. All stationary points $W$ of Eq. (3) satisfy $W^{T}W = \frac{1}{2}\Sigma^{-1/2}UM\Lambda U^{T}\Sigma^{-1/2}$ , where $U\Lambda U^{T}$ is the eigenvalue decomposition of $\Sigma^{-1/2}B\Sigma^{-1/2}$ , and $M$ is an arbitrary (masking) diagonal matrix containing only zero or one such that (1) $M_{ii} = 0$ if $\Lambda_{ii} < 0$ and (2) contain at most $d^{*}$ nonzero terms. + +Additionally, if $C$ and $A_0$ commute, all stationary points satisfy + +$$ +W ^ {T} W = \frac {1}{2} \Sigma^ {- 1} B _ {M} \Sigma^ {- 1}, \tag {9} +$$ + +where $B_M$ denotes the matrix obtained by masking the eigenvalues of $B$ with $M$ . + +This stationary-point condition implies the direct cause of the dimensional collapse. Namely, dimensional collapse happens when the eigenvalues of the matrix $B$ become negative. The eigenvalues of $B$ , in turn, depend on the competition between data augmentation and the data feature. Comparing the commuting case with the noncommuting case, we see that the main difference is that when $C$ does not commute with $A_0$ , the augmentation can also change the orientation of the learned representation; otherwise, augmentation only affects the eigenvalues. To focus on the most important terms, we now assume that the augmentation is well-aligned with the features such that the augmentation covariance commute with the data covariance. + +Assumption 1. From now on, we assume $CA_0 = A_0C$ . + +For the case of weighted InfoNCE, we have that $B = A_0 - \frac{1 - \alpha}{N} C$ . Let $a_i$ denote the $i$ -th eigenvalue of the $A$ and $c_i$ that of $C$ viewed in a predetermined order; then, the $i$ th subspace collapses when $\frac{1 - \alpha}{N} c_i \geq a_i$ , namely, when the variation introduced by the noise dominates that of the original data. Importantly, this collapse is a property shared by all stationary points of the landscape, and one cannot hope to fix the problem by, say, biasing the gradient descent towards a certain type of local minima. When weight decay is used, the condition for collapse becomes $\frac{1 - \alpha}{N} c_i + \gamma \geq a_i$ : it becomes easier to cause a collapse when weight decay is used. + +The global minimum of the loss function is also easy to find. For all stationary points, the loss function takes a simple form; $L = -\frac{1}{4}\mathrm{Tr}[\Sigma^2 B_M B]$ . Thus, $L$ becomes more and more negative if the eigenvalues of $B_M$ align with the largest eigenvalues of $B$ . Namely, the global minimum is achieved if $M$ leaves the largest eigenvalues of $B$ intact. + +Because the stationary points contain collapsed solutions where the eigenvalues of $W^T W$ are zero, one is naturally interested in how likely it is to converge to these solutions. + +Proposition 2. $(W^T W$ achieves maximum possible rank) Let $m$ denote the number of positive eigenvalues $B$ . Then, $\mathrm{rank}(W^T W) = \min(m, d^*)$ for any local minimum. + +This proposition implies that the loss landscape of contrastive SSL (with a linear model) is rather benign because all local minima must achieve a maximum possible rank. In fact, this result implies that the collapses may be well controllable by carefully controlling and tuning the eigenvalues of the matrix $B$ , which directly depends on the nature of the data augmentation we use. + +# 3.2 LANDSCAPE WITH NORMALIZATION + +It is common in practice to normalize the learned representation such that $\| f(x) \|^2 = c$ . When normalization is applied, only the direction of the learned representation matters. While this is a simple trick in practice, its implication on the landscape is poorly understood. In this section, we extend our theory to analyze the effect of normalization. + +We model the effect of normalization as a regularization term: $R \coloneqq (\mathbb{E}_x\| f(x)\| ^2 -c)^2$ + +$$ +L _ {\text {n o r m}} = E q. (3) + \kappa R. \tag {10} +$$ + +Note that this regularization term achieves two things simultaneously: (1) $\| f(x) \|^2 = c$ for all $x$ is a minimizer of the loss function; (2) the regularization is invariant to any rotation of the learned representation. For a linear model, we note that this condition is not entirely the same as a direct normalization of the representation because it is generally impossible to achieve $\| Wx \|^2 = c$ for all $x$ because a linear model has limited expressivity. However, it is generally possible to achieve the slightly weaker condition: the representation has a norm 1 on average. This loss function can also be seen as a mathematical model of the VICReg loss (Bardes et al., 2021), where $R$ effectively models the variance regularization term of VICReg loss and $\kappa$ is its strength. This modeling is necessary because the variance term of the original VICReg is not differentiable and thus cannot be expanded. The proposed term $R$ captures the essence of the variance term because it also encourages the representation to have a constant variance. Our theory also explains why the VICReg is observed to experience collapses when $\kappa$ is not large enough. As $\kappa$ tends to infinity, this constraint will become perfectly satisfied. We thus take the infinite $\kappa$ limit to study the effect of normalization. + +The following proposition gives a condition that all stationary points of Eq. (10) satisfy. + +Proposition 3. Let $\rho(W) \coloneqq \operatorname{Tr}[W\Sigma W^T]$ , $B' \coloneqq B + 2\kappa(c - \rho)\Sigma$ , and let $\Lambda_i$ be the eigenvalues of $B'$ . Then, every stationary point of Eq. (10) satisfy $W^T W = \frac{1}{2}\Sigma^{-1}B_M'\Sigma^{-1}$ , where $M$ is an arbitrary diagonal mask of the eigenvalues of $B'$ containing only zero or one such that (1) $M_{ii} = 0$ if $\Lambda_i < 0$ and (2) contain at most $d^*$ nonzero terms. + +Compared with the unnormalized case, the term $2\kappa (1 - \rho)\Sigma_{M}$ emerges due to normalization. The effect of normalization is as expected: it shrinks the norm of the model if $\rho > 1$ , and it expands the model if $\rho < 1$ , and it does not have any effect if we have already achieved $\rho = 1$ . Interestingly, this rescaling effect is anisotropic and stronger along the directions of larger eigenvalues of the covariance of the augmented data $\Sigma$ . + +The next theorem gives the explicit form of $\rho$ at the stationary points. + +Proposition 4. For any stationary point $W^{*}$ , $c - \rho(W^{*}) = \frac{c - \frac{1}{2}\mathrm{Tr}[\Sigma^{-1}B_{M}]}{1 + \kappa d_{M}}$ , where $d_{M}$ is the number of non-zero eigenvalues of $B_{M}'$ . + +For a finite $\kappa$ , these results suggest that collapses can still happen. For VICReg, $B = -A_0$ , and the complete collapse can happen when $\kappa \ll \| A_0\| /c\|\Sigma\|$ - this explains the experimental observation of collapses for small values of $\kappa$ in VICReg loss (Bardes et al., 2021). + +Lastly, to understand normalization, we are interested in the case of $\kappa \to \infty$ . Combining Proposition 3 and 4, we have proved the following theorem, showing that the asymptotic solution converges to a form independent of $\kappa$ . + +Theorem 2. Let $W_{\kappa}$ be a stationary point of Eq. (10) at fixed $\kappa$ . Then, + +$$ +\lim _ {\kappa \rightarrow \infty} W _ {\kappa} ^ {T} W _ {\kappa} = \frac {1}{2} \Sigma^ {- 1} \left[ B _ {M} + \frac {2 c - \operatorname {T r} \left[ \Sigma^ {- 1} B _ {M} \right]}{d _ {M}} \Sigma_ {M} \right] \Sigma^ {- 1}. \tag {11} +$$ + +The correction term $\frac{2c - \mathrm{Tr}[\Sigma B_M]}{d_0}\Sigma_M$ emerges as a result of applying normalization. The effect can be easier to understand if we write the solution as + +$$ +W ^ {T} W = \frac {1}{2} \left[ \Sigma^ {- 1} B _ {M} - \frac {\operatorname {T r} \left[ \Sigma^ {- 1} B _ {M} \right]}{d _ {M}} M + \frac {2 c}{d _ {M}} \right] \Sigma^ {- 1}, \tag {12} +$$ + +where we have used the relation $\Sigma_{M}\Sigma^{-1} = M$ . Note the term in brackets: it subtracts the average eigenvalue of $\Sigma^{-1}B_M$ from $\Sigma^{-1}B_M$ and shifts the remaining eigenvalues positively by $2c / d_{M}$ . Because the eigenvalues of $WW^{T}$ must be positive, the following condition must hold for all solutions: + +$$ +\lambda_ {i} + 2 c / d _ {M} > \bar {\lambda}, \tag {13} +$$ + +where $\lambda_{i}$ are the eigenvalues of $\Sigma^{-1}B_M$ and $\bar{\lambda}$ is its average. Namely, for the $i$ -th dimension not to collapse, it must be smaller than the average eigenvalues by at most $2c / d_{M}$ . Any smaller eigenvalues must collapse. Compared to the case without normalization, normalization makes collapses dependent on the relative strength of each feature and augmentation. In the following discussion, we let $c = 1$ to simplify the discussion. We present a detailed analysis of this condition in Section D.1. One finds that the condition for collapse becomes heavily dependent on the data structure, and there are cases where collapses become harder, and there are cases where collapses become much easier. Importantly, it also becomes the case that a sufficiently strong augmentation can always cause a collapse in the corresponding subspace. + +Effect of Bias. Lastly, we study the effect of explicitly having a bias term: $Wx \rightarrow Wx + b$ . First of all, when there is no normalization, the bias term does not affect the solution because the loss landscape is invariant to a translation in the learned representation. However, this effect dramatically changes if we apply normalization at the same time. This is because normalization removes the translation symmetry of the effective loss, and the trivial solution $W = 0$ , $b = 1$ becomes the simplest way to achieve the norm-1 constraint. Our result shows that the addition of bias dramatically affects the stationary points. + +Theorem 3. Let $f(x) = Wx + b$ and $\mathbb{E}[x] = 0$ . Then, all stationary points $W$ satisfy Eq. (9), subject to the constraint that $\mathrm{Tr}[W^T\Sigma W]\leq c$ . + +Namely, the solution reverts to the case where there is no normalization at all, except that the norm of the solution can no longer be larger than $c$ . This upper bound can make collapses much easier to happen. For example, if $c < (a_i - c_i) / (a_i + c_i)$ for all $i$ , a complete collapse can happen despite normalization. When $c = 1$ and $c_i \ll a_i$ , $\rho \approx d_M / 2$ and the constraint indicates that $d_M \leq 2$ : when the augmentation is very weak, there are at most 2 nontrivial subspaces. This is too restrictive for learning a meaningful representation, which helps us understand why dimensional collapse can harm learning in practice. The fact that simple normalization cannot prevent collapse has been noticed for a while for the simplest case of a cosine-similarity loss, and our result explains why previous works have tried to introduce asymmetry to cosine similarity to avoid collapses (Grill et al., 2020; Chen and He, 2021). + +Relevant Loss Functions. Having developed a framework for understanding normalization, we show that other common loss functions in SSL can also be written in the form given in Eq. (3). The spectral contrastive loss (SCL) (HaoChen et al., 2021) reads + +$$ +L _ {S C L} = - 2 \mathbb {E} [ f (x) ^ {T} f (x ^ {\prime}) ] + \mathbb {E} [ (f (x) ^ {T} f (\chi)) ^ {2} ] + c o n s t. \quad \text {s . t .} \| f (x) \| ^ {2} = 1. \tag {14} +$$ + +Let $f(x) = Wx$ be linear, the distributions are zero-mean Gaussian, and ignore the normalization. This loss function becomes + +$$ +L _ {S C L} = - 2 \operatorname {T r} \left[ W C W ^ {T} \right] + \operatorname {T r} \left[ W \Sigma W ^ {T} W \Sigma W ^ {T} \right]. \tag {15} +$$ + +When normalization exists, we can apply the result in Section 3.2. By our argument, there is no collapse in this loss function. The difference with InfoNCE loss is that the learned feature spreads along the directions of the augmentation $C$ , not along the directions of the feature $A_0$ . + +![](images/b6cdba91d2a565539e66ba75f413aca578134d7d83ec9125d2516d70353f36e2.jpg) +(a) Landscape of ResNet + +![](images/0635702b74af3b585911f8fb63feaf16d41cc921156f0b8b6074cc4dd163371a.jpg) +(b) No collapse + +![](images/760f7caf8872e36e82a2cee351701c1849aae17a3fbcd8d69f68db8cdae1d80e.jpg) +(c) Dimensional collapse + +![](images/3700ed29d4b867983f389d76e342c8dc1cf9f00536ba629c8fcce3994613d2ce.jpg) +(d) Complete collapse + +![](images/247de21cfec7c7aa2b9b112b93049f8d26a3a94d2541a04acb43288b78b9356b.jpg) +(e) Landscape of ViT +Figure 2: Landscape of Resnet18 (upper) and vision transformers (lower) on CIFAR10 with SimCLR qualitatively agrees with our linear theory. (a) Training objective $L$ as a function of a rescaling of the last layer $W \rightarrow aW$ . (b-d) $L$ as a function of a $2d$ rescaling of the last layer where the data augmentation strength is (b) small, (c) intermediate, and (d) strong. Red indicates areas of high loss, blue indicates areas of low loss, and stars locate local minima. The use of data augmentation changes the stability of the origin, a qualitative change that leads to different types of collapses in qualitative agreement with our linear theory (cf. Figure 1). Additionally, we also notice the same qualitative changes of landscape in simpler nonlinear models (see Appendix A). (e-h) are the same setting but for ViT. + +![](images/99218392843222b7950988269986742fda90110ffc047d25018fe0f4107c9a5c.jpg) +(f) No collapse + +![](images/74a020fac9322634c3ab15388122141e788c5a0121465d6f8f5a6eb4821660c8.jpg) +(g) Dimensional collapse + +![](images/d8c1fcf3738a0556ea7afca13056d481ca2ba01613f438daee1b8479e38e48ad.jpg) +(h) Complete collapse + +The case of Barlow Twin (BT) (Zbontar et al., 2021) is similar. While the fourth-order term of BT is much more complicated due to the imbalance created by the $\lambda$ term. The second-order term can be identified easily: $L_{BT} = -2\mathrm{Tr}[W\Sigma W^T] + O(||W||^4)$ . This also does not collapse. A difference between the SCL loss and InfoNCE is that the learned representation has a spread that aligns with the combination of the feature and the augmentation strength. + +# 4 IMPLICATIONS + +In this section, we explore some theoretical and practical implications of our results. In Appendix Section A, we also present numerical simulations that directly validate the predictions of the theory. + +# 4.1 RELEVANCE TO NONLINEAR MODELS + +An important question is how much of the analysis is relevant for deep nonlinear models in general. In fact, the loss landscape we have studied is quite close to the most general landscape one can have. Let $L(f(x))$ be a general SSL loss function for data point $x$ . The quality of the learned representation should be independent of the population-level orientation of the representation. Therefore, the loss function should satisfy a rotational invariance. Namely, for any rotation matrix $R$ , $L(x) = L(Rf(x))$ ; this rotational invariance implies that the loss should expand as $L(f(x)) = af(x)^T f(x) + b[f(x)^T f(x)]^2 + O(f(x)^6)$ . Note that all the odd-order terms of $f(x)$ vanish due to the rotational symmetry. Substituting $f(x) = W\phi(x)$ in the loss function, we obtain a very general form of landscape that $W$ obeys: + +$$ +L (W, \phi) = \operatorname {T r} \left[ W ^ {T} W A \right] + \sum W _ {i m} W _ {j m} W _ {k n} W _ {l n} Z _ {i j k i}, \tag {16} +$$ + +where $A$ and $Z$ are dependent on $\phi$ . Note how all the examples we have studied take this form. For $W$ , its collapse entirely depends on the stability of the matrix $A$ . Thus the study of the stability of the matrix $A$ becomes crucial for our understanding. To illustrate, we train a Resnet18 on CIFAR10 with the SimCLR loss with normalization and with weight decay strength $10^{-3}$ until convergence to obtain the converged weights $W^{*}$ . The representation has a dimension 128. We rescale the weight matrix of the last layer $W_{\mathrm{last}}^{*}$ by a factor $a$ and compute the loss as a function of $a$ . See Figure 2-a. We then partition the singular values of $W_{\mathrm{last}}^{*}$ into the larger half and the smaller half. We rescale the larger half by a factor $r_1$ and the smaller half by $r_2$ . We plot the loss as a $2d$ function of $(r_1, r_2)$ in Figure 2. We also perform experiments for vision transformers (ViT) in the lower row (Dosovitskiy et al., 2020). In all cases, the landscape features qualitative changes comparable to those in Figure 1. + +A connection to Landau theory in physics. Those familiar with statistical physics should note that the proposed theory is analogous to the Landau theory of second-order phase transitions. When treating the loss function as the free energy, the square root of the eigenvalues $\sqrt{\lambda}$ of $W^T W$ are the order parameters of the system, and the phase transitions happen when $\lambda$ turns from 0 to positive. These transitions (collapses) happen because of symmetry breaking (Landau and Lifshitz, 2013): the loss function (2) is symmetric in the sign of $W$ . Yet, for any nontrivial learning, $W$ must be nonzero; thus, a symmetry breaking of the sign of $W$ needs to happen for learning. The recent work by Ziyin and Ueda (2022) suggested how symmetry breaking around the origin and Landau theory could explain various types of collapses in deep learning. Therefore, the dimensional collapse could be related to neural collapses in supervised learning (Papyan et al., 2020; Ziyin et al., 2022a) and posterior collapse in Bayesian deep learning (Wang and Ziyin, 2022). Because second-order phase transitions should come with the divergence of the correlation function, one might also wonder what is "divergent" in the SSL problem. Here, the learning time scale for the collapsing dimension is divergent at the critical point because the second-order term vanishes in this direction, and so the dynamics are effectively frozen along this direction. + +# 4.2 ROBUSTLY INDUCING GOOD COLLAPSES + +Contrary to previous works, a recent work (Cosentino et al., 2022) has suggested that dimensional collapse can be beneficial and significantly improve the generalization performance of the model. This observation raises a question. How can dimensional collapse be beneficial and how can it be induced? In the following, we first introduce $\beta$ -InfoNCE, which can adjust the degree of dimensional collapse, and analyze the collapse behavior to elucidate the mechanism of task-alligned collapse. + +Adjusting the degree of dimensional collapse with $\beta$ -InfoNCE. Despite the potential benefit, existing SSL loss functions cannot robustly induce dimensional collapse. InfoNCE is insufficient to induce a collapse, and the collapse induced by SimCLR depends on a vanishingly small parameter $1/N$ . One thus wonders whether there is a loss function that allows us to induce collapsing behavior in a more predictable manner so that one might controllably extract some benefits from collapse. Our result suggests that one way to directly control collapses is through the strength of the competition for the model Hessian at the origin. For InfoNCE, one way to achieve this is to weigh the entropy term by a general factor $\beta$ : + +$$ +\left. \right. \mathbb {E} _ {x} \left\{\frac {1}{2} | f (x) - f \left(x ^ {\prime}\right) | ^ {2} + \beta \log \mathbb {E} _ {\chi} \left[ \exp \left(- \frac {1}{2} | f (x) - f (\chi) | ^ {2}\right)\right]\right\}. +$$ + +Due to its similarity with the $\beta$ -VAE in Bayesian learning, we call it the $\beta$ -InfoNCE. The leading term in the loss function becomes + +$$ +- \operatorname {T r} \left[ W \left(A _ {0} - (1 - \beta) C\right) W ^ {T} \right]. +$$ + +When $1 - \beta > 0$ , the augmentations $C$ pull the representation towards zero. When the augmentation is as strong as the fea- + +ture variations, a collapse happens. One can thus introduce collapse by setting $\beta$ to be sufficiently small. When $1 - \beta < 0$ , the augmentations push the weights away from the origin along its direction, resulting in no collapse at all: When one really wants to avoid collapse, one can use a rather large $\beta$ ; $\beta = 1$ is thus at the boundary of this bifurcating behavior. We note that existing loss functions often do not have a parameter that is directly controlling the collapse behavior (see Table 1). The $\beta$ parameter here directly controls the level of difficulty of collapse. + +Achieving invariance with dimensional collapse. Here, we closely study an illustrative minimal example to demonstrate how collapses can be beneficial. Consider the following structured data generating process where the input features can be separated into two sets: (1) a task-relevant set with dimension $d_{c} < d_{0}$ and (2) a task-irrelevant set: $x = (x_{1},\dots,x_{d_{c}},\dots,x_{d_{0}})$ . Our result suggests + +![](images/151d2f5ed48da89f2bf707f660fd4d9553cc4bfa4b0a668bd6b3db9ea7c6532f.jpg) + +![](images/ba1c6e376e5802b03cc41f400c616a162ddd4953464210016db0340c31011fdf.jpg) +Figure 3: Top: Phase diagram of representational collapses. Bottom: $\beta$ -InfoNCE with $\beta = 0.5$ . The generalization error of a downstream regression task where the data augmentation (1) is isotropic and noninformative or (2) aligns with the style. We see that the performance worsens as collapses happen for the noninformative augmentation and improves as the collapse happens for the style-targeting augmentation. + +a precise way to remove the irrelevant features from the learned representation. For the purpose of causing a robust collapse, we use the $\beta$ -InfoNCE with $\beta = 1/2$ . For illustration, we consider the simple case $d_c = 1$ and $d_0 = 2$ . For any input $x = (x_1, x_2)$ , the label is generated as a linear function of $x_1$ : $y = cx_1$ . + +Correspondingly, we consider a structured data augmentation $x = \hat{x} + \sigma R\xi$ , where $R \in \mathbb{R}^{d_0 \times d_0}$ is $R = \text{diag}(\sqrt{1 - \theta}, \sqrt{\theta})$ , where $\theta \in [0,1]$ . The parameter $\sigma$ controls the overall strength of the augmentation, and $\theta$ controls the orientation of the strength. When $\theta = 0.5$ , we have an uninformative isotropic noise that has often been used in practice. When $\theta = 1$ , the augmentation is only on the task-irrelevant feature, and when $\theta = 0$ , the augmentation is only on the content. Since the prediction target only depends on the content, we want to learn a representation invariant to the style. For the downstream regression task, we use the learned representations $z := f(\hat{x})$ to train a ridge linear regressor that minimizes $\min_G \mathbb{E}_{\hat{x}}[||Gz - y(\hat{x})||^2] + 0.001||G||^2$ . See Figure 3. The top panel shows the phase diagram of this problem with different combinations of the augmentation strengths and orientations. The bottom panel shows that collapses introduce phase-transition-like behaviors in the generalization performance and that a data augmentation aligning with the task-irrelevant dimension improves performance. + +# 4.3 ROBUSTNESS TO DATA IMBALANCE + +Our theory is not only relevant for understanding collapses but can also be used to understand how an SSL model encodes the feature. Liu et al. (2021) recently showed that compared with supervised learning, SSL techniques are relatively more robust to imbalanced datasets that have disproportionately represented minority subgroups. As another application of our analysis, we illustrate the robustness of different techniques is not equal. As we have seen, the learned model $W^T W$ has eigenvalues that, to the leading order, are proportional to the Hessian $B$ , which is different for each loss function. As previously summarized in Table 1, for InfoNCE and SimCLR, the learned model aligns + +![](images/98faacf0f58a577c4eaa29fb3811d104d74c5162d8911fbecf1fc66f2bd95273.jpg) +Figure 4: Spectral Contrastive loss (SCL) is more robust against data imbalance than InfoNCE. We train SimCLR and SCL ResNet-12 models on imbalanced versions of CIFAR-10. We see that SCL is more robust than SimCLR, as suggested by our theory. These results are especially pronounced when there is no projector head. + +with the eigenvalues of the data covariance $A_0$ , which varies hugely as different classes of a dataset become more and more imbalanced. In comparison, the model trained with SCL aligns purely with the augmentation covariance $C$ , which is independent of the data imbalance. This suggests that the SCL landscape can be less dependent on data and thus more robust against data imbalance. See Figure 4. More experimental details are given in Appendix C. + +# 5 CONCLUSION + +In this work, we approached the problem of collapses in SSL from a loss landscape perspective. We analytically solved an effective landscape that can be extended to understand the effect of normalization. Our result suggests that dimensional collapse can be well understood in the minimal setting and is something neutral to learning on its own. With the help from the theory, we also showed that when task-irrelevant dimensions are targeted, dimensional collapse can result in improved performance, whereas an uninformative noise will (without good luck) leads to collapses in the dimensions that are relevant to the task. It is thus important for practitioners to devise targeted data augmentation mechanisms that incorporate the correct domain knowledge. Also, we advocated the thesis that the local geometry of the loss landscape around the origin is an essential component for understanding collapses, and this should invite more future work to understand the landscape around the origin. + +The limitation of our work is clear; our result only identifies the causes of the collapse that can be directly attributed to the low-rank structure of the local minima of the landscape. One possible alternative cause of the collapse is dynamics. For example, having a large learning rate and small batch can sometimes cause a convergence towards the saddle points in the landscape (Ziyin et al., 2022b), which, as we have shown, are the collapsed solutions. Investigating the role of dynamics in the collapse is thus a crucial future problem. + +# ACKNOWLEDGEMENTS + +This work was supported by a KAKENHI Grant No. JP18H01145 from the Japan Society for the Promotion of Science. Ziyin has been financially supported by the JSPS fellowship and thanks Zihan for the generous help during the writing of this paper. ESL was partially supported via NSF under the award CNS-2008151. + +# REFERENCES + +Arora, S., Khandeparkar, H., Khodak, M., Plevrakis, O., and Saunshi, N. (2019). A Theoretical Analysis of Contrastive Unsupervised Representation Learning. In Proc. Int. Conf. on Machine Learning (ICML). +Bardes, A., Ponce, J., and LeCun, Y. (2021). Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906. +Caron, M., Misra, I., Mairal, J., Goyal, P., Bojanowski, P., and Joulin, A. (2020). Unsupervised Learning of Visual Features by Contrasting Cluster Assignments. In Proc. Adv. on Neural Information Processing Systems (NeurIPS). +Caron, M., Touvron, H., Misra, I., Jegou, H., Mairal, J., Bojanowski, P., and Joulin, A. (2021). Emerging Properties in Self-Supervised Vision Transformer. arXiv, abs/2104.14294. +Chen, T., Kornblith, S., Norouzi, M., and Hinton, G. (2020a). A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR. +Chen, T., Kornblith, S., Swersky, K., Norouzi, M., and Hinton, G. E. (2020b). Big Self-Supervised Models are Strong Semi-Supervised Learners. Adv. in Neural Information Processing Systems, 33. +Chen, X. and He, K. (2021). Exploring Simple Siamese Representation Learning. In Proc. Int. Conf. on Computer Vision and Pattern Recognition (CVPR). +Cosentino, R., Sengupta, A., Avestimehr, S., Soltanolkotabi, M., Ortega, A., Willke, T., and Tepper, M. (2022). Toward a geometrical understanding of self-supervised contrastive learning. arXiv preprint arXiv:2205.06926. +Dayan, P. and Abbott, L. F. (2005). Theoretical neuroscience: computational and mathematical modeling of neural systems. MIT press. +Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al. (2020). An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929. +Ermolov, A., Siarohin, A., Sangineto, E., and Sebe, N. (2021). Whitening for self-supervised representation learning. In International Conference on Machine Learning, pages 3015-3024. PMLR. +Grill, J.-B., Strub, F., Altché, F., Tallec, C., Richemond, P., Buchatskaya, E., Doersch, C., Avila Pires, B., Guo, Z., Gheshlaghi Azar, M., Piot, B., kavukcuoglu, k., Munos, R., and Valko, M. (2020). Bootstrap your own latent: A new approach to self-supervised Learning. In Proc. Adv. on Neural Information Processing Systems (NeurIPS). +HaoChen, J. Z., Wei, C., Gaidon, A., and Ma, T. (2021). Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34:5000-5011. +He, K., Fan, H., Wu, Y., Xie, S., and Girschick, R. (2020). Momentum Contrast for Unsupervised Visual Representation Learning. In Proc. Int. Conf. on Computer Vision and Pattern Recognition (CVPR). +Hsu, H., Qi, H., and Brown, M. (2019). Measuring the Effects of Non-Identical Data Distribution for Federated Visual Classification. arXiv, abs/1909.06335. + +Hua, T., Wang, W., Xue, Z., Ren, S., Wang, Y., and Zhao, H. (2021). On feature decorrelation in self-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9598-9608. +Jing, L., Vincent, P., LeCun, Y., and Tian, Y. (2021). Understanding dimensional collapse in contrastive self-supervised learning. arXiv preprint arXiv:2110.09348. +Kugelgen, J., Sharma, Y., Gresle, L., Brendel, W., Scholkopf, B., Besserve, M., and Locatello, F. (2021). Self-Supervised Learning with Data Augmentations Provably Isolates Content from Style. arXiv, abs/2106.04619. +Landau, L. D. and Lifshitz, E. M. (2013). Statistical Physics: Volume 5, volume 5. Elsevier. +Liu, H., HaoChen, J. Z., Gaidon, A., and Ma, T. (2021). Self-supervised learning is more robust to dataset imbalance. International Conference on Learning Representations. +Mitrovic, J., McWilliams, B., Walker, J., Buesing, L., and Blundell, C. (2020). Representation learning via invariant causal mechanisms. arXiv preprint arXiv:2010.07922. +Nozawa, K. and Sato, I. (2021). Understanding negative samples in instance discriminative self-supervised representation learning. Advances in Neural Information Processing Systems, 34:5784-5797. +Oord, A. v. d., Li, Y., and Vinyals, O. (2018). Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748. +Papyan, V., Han, X., and Donoho, D. L. (2020). Prevalence of neural collapse during the terminal phase of deep learning training. Proceedings of the National Academy of Sciences, 117(40):24652-24663. +Pokle, A., Tian, J., Li, Y., and Risteski, A. (2022). Contrasting the landscape of contrastive and non-contrastive learning. arXiv preprint arXiv:2203.15702. +Robinson, J., Sun, L., Yu, K., Batmanghelich, K., Jegelka, S., and Sra, S. (2021). Can contrastive learning avoid shortcut solutions? Advances in neural information processing systems, 34:4974-4986. +Saunshi, N., Ash, J., Goel, S., Misra, D., Zhang, C., Arora, S., Kakade, S., and Krishnamurthy, A. (2022). Understanding Contrastive Learning Requires Incorporating Inductive Biases. In Proc. Int. Conf. on Machine Learning (ICML). +Simon, J. B., Knutins, M., Ziyin, L., Geisz, D., Fetterman, A. J., and Albrecht, J. (2023). On the stepwise nature of self-supervised learning. arXiv preprint arXiv:2303.15438. +Tian, Y. (2022). Deep contrastive learning is provably (almost) principal component analysis. arXiv preprint arXiv:2201.12680. +Tian, Y., Chen, X., and Ganguli, S. (2021). Understanding self-supervised Learning Dynamics without Contrastive Pairs. In Proc. Int. Conf. on Machine Learning (ICML). +Tian, Y., Sun, C., Poole, B., Krishnan, D., Schmid, C., and Isola, P. (2020). What makes for good views for contrastive learning? Advances in Neural Information Processing Systems, 33:6827-6839. +Tosh, C., Krishnamurthy, A., and Hsu, D. (2021). Contrastive estimation reveals topic posterior information to linear models. J. Mach. Learn. Res., 22:281-1. +Trivedi, P., Lubana, E. S., Heimann, M., Koutra, D., and Thiagarajan, J. J. (2022). Analyzing data-centric properties for contrastive learning on graphs. arXiv preprint arXiv:2208.02810. +Tsai, Y.-H., Wu, Y., Salakhutdinov, R., and Morency, L.-P. (2021a). Self-supervised Learning from a Multi-view Perspective. In Proc. Int. Conf. on Learning Representations (ICLR). + +Tsai, Y.-H. H., Ma, M. Q., Yang, M., Zhao, H., Morency, L.-P., and Salakhutdinov, R. (2021b). Self-supervised representation learning with relative predictive coding. International Conference on Learning Representations. +Wang, F. and Liu, H. (2021). Understanding the behaviour of contrastive loss. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2495-2504. +Wang, T. and Isola, P. (2020). Understanding Contrastive Representation Learning through Alignment and Uniformity on the Hypersphere. In Proc. Int. Conf. on Machine Learning (ICML). +Wang, Y., Zhang, Q., Wang, Y., Yang, J., and Lin, Z. (2022). Chaos is a ladder: A new theoretical understanding of contrastive learning via augmentation overlap. International Conference on Learning Representations. +Wang, Z. and Ziyin, L. (2022). Posterior collapse of a linear latent variable model. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems. +Wei, C., Shen, K., Chen, Y., and Ma, T. (2021). Theoretical Analysis of Self-Training with Deep Networks on Unlabeled Data. In Proc. Int. Conf. on Learning Representations (ICLR). +Zbontar, J., Jing, L., Misra, I., LeCun, Y., and Deny, S. (2021). Barlow twins: Self-supervised learning via redundancy reduction. In International Conference on Machine Learning, pages 12310-12320. PMLR. +Zimmerman, R., Sharma, Y., Schneider, S., Bethge, M., and Brendel, W. (2021). Contrastive Learning Inverts the Data Generating Process. In Proc. Int. Conf. on Machine Learning (ICML). +Ziyin, L., Li, B., and Meng, X. (2022a). Exact solutions of a deep linear network. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems. +Ziyin, L., Li, B., Simon, J. B., and Ueda, M. (2022b). SGD can converge to local maxima. In International Conference on Learning Representations. +Ziyin, L. and Ueda, M. (2022). Exact phase transitions in deep learning. arXiv preprint arXiv:2205.12510. + +# A ADDITIONAL NUMERICAL RESULTS + +In this section, we validate our theory with numerical results. Unless specified otherwise, the dimension of the learned representation is set to be equal to the input dimension: $d_0 = d_1$ . + +No Collapse for InfoNCE. We showed that there is no collapse at all for the vanilla InfoNCE, no matter how strong the augmentation is. Our result implies that the smallest singular of the model $W$ scales as $\sigma^4$ where $\sigma^2$ is the strength (namely, the variance) of the augmentation. See the left panel of Fig. 5. We use the vanilla InfoNCE loss defined in (1) with a linear model. The training set is sampled from $\mathcal{N}(0, I_{32})$ . The training proceeds with Adam with a learning rate of $6e - 4$ with full batch training for 5000 iterations. We use a simple diagonal Gaussian noise with variance $\sigma^2$ for data augmentation. We see that the singular values scale as $\sigma^4$ and never vanishes, as the theory predicts. + +Nonrobust Collapses of Weighted InfoNCE. We now demonstrate that, as the theory predicts, collapses of weighted InfoNCE depend strongly on the dataset size. We use the same dataset and training procedure as the previous experiment. We set $\alpha = 0.1$ and change the size of the training set. Theory suggests that for a collapse in the $i$ -th subspace to happen, the size of the dataset needs to obey + +$$ +N > \frac {a _ {i}}{c _ {i} (1 - \alpha)} := N _ {\text {c r i t}}. \tag {17} +$$ + +See the middle panel of Figure 5. We show the smallest three eigenvalues of $W^T W$ (roughly having similar magnitudes), and the critical dataset size for the smallest eigenvalue. We see that the theoretical threshold of collapse agrees well with where the collapse actually happens. + +Collapses in $\beta$ -InfoNCE. With $\beta < 1$ , one can cause collapses in a predictable and controllable way. In this experiment, we let $d_0 = 5$ and we plot all five eigenvalues of $W^T W$ as we increase the strength of an isotropic augmentation. As the numerical results show, collapses happen at the points predicted by the theory. + +Normalization Causes Dimensional Collapse. We also plot the three smallest eigenvalues of $W^T W$ when we apply the standard representation normalization in practice: $f(x) \to f(x) / \| f(x) \|$ . To facilitate comparison, we also use the same dataset and training procedure as before. See Figure 6. We see that normalization does cause a collapse in the smallest eigenvalues at an augmentation strength much smaller than the feature variation. + +# B LANDSCAPE OF A NONLINEAR MODEL + +In this section, we plot the landscape of the layer of nonlinear models on the same synthetic dataset we outlined in the previous section. We train a three-layer nonlinear network with output dimension 2 with SGD until convergence. We then rescale the optimized weight of the last by a factor $a$ : $W_{last} \rightarrow aW_{last}$ and plot the loss function along this direction. See the top panel of Figure 7 for + +![](images/ccc7996109f2be90d2989e39053cb4dc258e6124b3ce2766def4469ed82262d4.jpg) +Figure 5: The three smallest singular values of $W^T W$ as a function of the augmentation strength. We see that our effective landscape theory around the origin accurately captures collapses in learning. Left: Vanilla InfoNCE. As the theory suggests, the singular values scale as $\sigma^4$ and do not vanish for any finite value of $\sigma$ . Mid: Weight InfoNCE. $\alpha = 0.1$ , $\sigma = 5$ . Collapse happens at the critical dataset size predicted by the theory. Right: (Sqrt) Eigenvalues of $WW^T$ in $\beta$ -InfoNCE. The collapses can be well controlled. + +![](images/99881e58f3d4b4bcc3669742d31c4f8e0145f69c41e93cfa315ed27252276a3b.jpg) + +![](images/b844c5ec134326f0df3db25f6a54bb2e3d3f33830b6942b46d8f8c0d95e40847.jpg) + +![](images/cd7b65731d8ca087e5133850229933d5468111b18b4cb5f7284188c5ff53b0c1.jpg) +Figure 6: A collapse happens easily when the learned representation is normalized. The smallest eigenvalues of $A_0$ are roughly 0.2, and the collapse happens much before the noise reaches this strength. + +![](images/2b537a60334e685645cae422322617aea2f6b2646af91a0e882bede9eddf0800.jpg) + +![](images/562a004beb8484fd766e2416e9b1d947231c3209ae9008d89406e86bbfd73d32.jpg) +Figure 7: The Landscape of nonlinear models is very similar to the landscape of linear models (cf. Figure 1). Top: 1d projection of the landscape of a two-layer tanh and ReLU network. Bottom Left: the landscape of a 2D projection of the last layer of a nonlinear model with a weak augmentation. Middle: with intermediate augmentation. Right: with strong augmentation. + +![](images/b73c66122800f71f6b2bd952d5c6f2d5f1490c04535ec96a92a152fd1bdcab3b.jpg) + +![](images/f41769dfc9310c2e798a0bc14d334452726fc74b2d23497988985f2779b3188c.jpg) + +both the tanh and the ReLU nonlinearity. We then rescale the two rows of the weight matrix of the model by $r_1$ and $r_2$ respectively: $W = (w_{1},W_{2})^{T}\rightarrow (r_{1}w_{1},r_{2}w_{2})$ . We see that the landscape of the model is qualitatively the same as that of the linear models, shown in Figure 1. + +# C SETUP FOR IMBALANCED DATA EXPERIMENTS + +Creating an Imbalanced Dataset: For our experiments measuring the influence on imbalanced datasets on SSL training, we use CIFAR-10 by sampling 20000 samples out of the 50000 training samples. The sampling process is described by a Dirichlet distribution and is often used to analyze effects of heterogeneity and data imbalance in Federated Learning problems (Hsu et al., 2019). Specifically, a small value of the distribution parameter yields a highly imbalanced dataset, while a large value yields a perfectly balanced dataset. We evaluate our models in three scenarios, for which we report below the number of samples per class: + +High imbalance: [4890, 87, 5000, 0, 74, 0, 0, 212, 4788, 4947] +- Medium imbalance: [4268, 4296, 1741, 420, 945, 161, 4633, 1015, 131, 2386] +- No imbalance: [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000] + +Training Setup: We use ResNet-12 models as the backbone for all experiments due to computational constraints. SimCLR augmentations (Chen et al., 2020a) are followed, except for a reduced strength of resized cropping from 0.2 to 0.5. All training involves a standardly used cosine decay learning + +rate schedule, starting at 0.03 and decaying to 0.001. When a projector module is used, it involves a two-layer MLP with hidden dimension of 512 and BatchNorm layer in between. We use SGD for optimization and perform the standardly used linear evaluation protocol for measuring the quality of the final representation. For training the linear layer, we use an initial learning rate of 10 and decay it to 0.01 with a cosine schedule. We note linear evaluation is used for supervised models as well, following the practice advocated by Liu et al. (2021). + +# D ADDITIONAL THEORETICAL CONCERNS + +# D.1 COLLAPSE CONDITION FOR NORMALIZATION + +The important condition for collapse in Eq. (13) can be better understood by considering the extreme cases. First of all, note that the eigenvalues of $\Sigma B_{M}$ are bounded between $-1$ and $1$ + +$$ +- 1 \leq \frac {a _ {i} - c _ {i}}{a _ {i} + c _ {i}} \leq 1, \tag {18} +$$ + +and $-1$ is achieved when $c_{i} \gg a_{i}$ , and 1 is achieved when $a_{i} \gg c_{i}$ . + +When the augmentation is negligibly small, $\Sigma^{-1}B_M\approx M$ , and $\lambda_{i}\approx \bar{\lambda} = 1$ , the condition thus becomes + +$$ +\frac {2}{d _ {M}} > 0, \tag {19} +$$ + +which always holds. Thus, a sufficiently small augmentation will never cause collapse. Next, when we apply very strong augmentation to the $j$ -th subspace and zero augmentation to the others, the condition for the non-augmented spaces becomes + +$$ +1 + \frac {2}{d _ {M}} > \frac {d _ {M} - 2}{d _ {M}}, \tag {20} +$$ + +meaning that the collapse will not happen. For the $j$ -th space, the condition is + +$$ +- 1 + \frac {2}{d _ {M}} > \frac {d _ {M} - 2}{d _ {M}} (\Longleftrightarrow) \frac {4}{d _ {M}} > 2, \tag {21} +$$ + +which is only possible when $d_M = 1$ , namely, the strongly augmented space is the only space that does not collapse. This is reasonable when the original data is rank-1 because the normalization will ensure that this space does not collapse, but when the original data is not rank-1, this stationary point will be a saddle and will not be preferred by gradient descent. In different words, a strong enough augmentation will cause a collapse in the corresponding subspace, as is the case without normalization. + +It is also interesting to note that having $c_{i} \geq a_{i}$ is no longer sufficient to cause a collapse. For example, let $c_{1} = 0$ and $c_{j} = a_{j}$ for $j \neq 1$ . The condition for $j \neq 1$ becomes + +$$ +\frac {2}{d _ {M}} > \frac {1}{d _ {M}}, \tag {22} +$$ + +which always holds. At the same time, it does not mean that collapsing has become harder in general. For example, it is also possible for $c_{i} < a_{i}$ to cause a collapse. Suppose we add a weak augmentation only to the first subspace such that $a_{i} - c_{i} = \epsilon > 0$ , the condition for this dimension to not to collapse is + +$$ +\frac {\epsilon}{a _ {i} + c _ {i}} + \frac {2}{d _ {M}} > \frac {d _ {M} - 1 + \epsilon}{d _ {M}}, \tag {23} +$$ + +which can be violated whenever $\epsilon < \frac{(a_i + c_i)(d_M - 3)}{a_i + c_i + d_m}$ . Namely, in some cases, normalization can in fact facilitate collapse. + +# E PROOFS + +# E.1 PROOF OF PROPOSITION 1 + +Proof. The second term in Eq. (3) can be written as + +$$ +\begin{array}{l} \operatorname {V a r} \left[ | W (x - \chi) | ^ {2} \right] = \mathbb {E} \left[ \left(\operatorname {T r} \left[ W (x - \chi) (x - \chi) ^ {T} W ^ {T} \right]\right) ^ {2} \right] - \mathbb {E} \left[ \operatorname {T r} \left[ W (x - \chi) (x - \chi) ^ {T} W ^ {T} \right] \right] ^ {2} (24) \\ = [ \text {f i r s t} \quad \text {t e r m} ] - 4 \operatorname {T r} [ W (A _ {0} + C) W ^ {T} ] ^ {2} (25) \\ = [ f i r s t \text {t e r m} ] - 4 \operatorname {T r} \left[ W \Sigma W ^ {T} \right] ^ {2}, (26) \\ \end{array} +$$ + +where we have used the definition $\Sigma = A_0 + C$ . The first term is + +$$ +[ f i r s t \text {t e r m} ] = \mathbb {E} \left[ \left(\operatorname {T r} [ W (x - \chi) (x - \chi) ^ {T} W ^ {T} ]\right) ^ {2} \right] = 4 \operatorname {T r} [ W \Sigma W ^ {T} ] ^ {2} + 8 \operatorname {T r} [ W \Sigma W ^ {T} W \Sigma W ^ {T} ]. \tag {27} +$$ + +Combining the above expressions, we see that Eq. (3) can be written as + +$$ +\begin{array}{l} L = - \operatorname {T r} \left[ W B W ^ {T} \right] + \frac {1}{8} \operatorname {V a r} \left[ | W (x - \chi) | ^ {2} \right] (28) \\ = - \operatorname {T r} \left[ W B W ^ {T} \right] + \operatorname {T r} \left[ W \Sigma W ^ {T} W \Sigma W ^ {T} \right]. (29) \\ \end{array} +$$ + +This finishes the proof. $\square$ + +# E.2 PROOF OF THEOREM 1 + +Proof. All stationary points have a zero gradient: + +$$ +- 2 W B + 4 W \Sigma W ^ {T} W \Sigma = 0. \tag {30} +$$ + +Multiplying by $W^T$ on the left and $B^{-1}$ on the right, + +$$ +W ^ {T} W = 2 W ^ {T} W \Sigma W ^ {T} W \Sigma B ^ {- 1} \tag {31} +$$ + +$$ +\left(\Longleftrightarrow\right) \quad \Sigma^ {1 / 2} W ^ {T} W \Sigma^ {1 / 2} = 2 \Sigma^ {1 / 2} W ^ {T} W \Sigma W ^ {T} W \Sigma B ^ {- 1} \Sigma^ {1 / 2} \tag {32} +$$ + +Defining $H \coloneqq \Sigma^{1/2} W^T W \Sigma^{1/2}$ , we obtain + +$$ +H = 2 H ^ {2} \Sigma^ {1 / 2} \Sigma B ^ {- 1} \Sigma^ {1 / 2}, \tag {33} +$$ + +$$ +\left(\Longleftrightarrow\right) \quad H \left(I - 2 H \Sigma^ {1 / 2} B ^ {- 1} \Sigma^ {1 / 2}\right) = 0. \tag {34} +$$ + +Because both $H$ and $\Sigma^{1/2}\Sigma B^{-1}\Sigma^{1/2}$ are symmetric, one can take the transpose of Eq. (33) to find that $H$ and $\Sigma^{1/2}B^{-1}\Sigma^{1/2}$ commute with each, which implies that $H$ has the same eigenvectors as $\Sigma^{1/2}B^{-1}\Sigma^{1/2}/2$ . + +Eq. (34) then implies that the eigenvalues of $H$ is either the inverse of that of $\Sigma^{1/2}B^{-1}\Sigma^{1/2}$ or zero. This implies that any stationary point of $H$ can be written in the form + +$$ +H = \frac {1}{2} U M \Lambda U ^ {T}, \tag {35} +$$ + +where $U$ is a unitary matrix, $\Lambda$ is diagonal matrix containing the eigenvalues of $\Sigma^{1/2}B^{-1}\Sigma^{1/2}$ , and $M$ is an arbitrary (masking) diagonal matrix containing only zero or one such that (1) $M_{ii} = 0$ if $\Lambda_{ii} < 0$ and (2) contain at most $d^*$ nonzero terms. This then implies that the weight matrix $W$ satisfies + +$$ +W ^ {T} W = \frac {1}{2} \Sigma^ {- 1 / 2} U M \Lambda U ^ {T} \Sigma^ {- 1 / 2}. \tag {36} +$$ + +Lastly, when $\Sigma$ and $B$ commute, we can compactly write the result as + +$$ +W ^ {T} W = \frac {1}{2} \Sigma^ {- 1} B _ {M} \Sigma^ {- 1}, \tag {37} +$$ + +where $B_{M}$ denotes the matrix obtained by masking the eigenvalues of $B$ with $M$ . This finishes the proof. + +# E.3 PROOF OF PROPOSITION 2 + +Proof. For all stationary points, $W^T W$ commutes with $B$ and $\Sigma$ , which means that at these stationary points, one can simultaneously diagonalize all the matrices and the loss function (3) can be written as + +$$ +L = - \sum_ {i = 1} ^ {d ^ {*}} \lambda_ {i} b _ {i} + \lambda_ {i} ^ {2} s _ {i} ^ {2} \tag {38} +$$ + +where $\lambda_{i}, b_{i}, s_{i}$ are the eigenvalues of $W^{T}W$ , $B$ , and $\Sigma$ respectively. + +We can thus consider each $i$ separately. When $b_{i} > 0$ , $\lambda_{i} = 0$ cannot be a local minimum because the local Hessian is $-b_{i} < 0$ . When $b_{i} \leq 0$ , the only stationary point is $\lambda_{i} = 0$ . This sum covers at most $d^{*}$ summands, and so, at the local minima, $\lambda_{i} \neq$ if and only if $b_{i} > 0$ , and so the number of non-zero eigenvalues of $W^{T}W$ is $\min(m, d^{*})$ . + +# E.4 PROOF OF PROPOSITION 3 + +Proof. The regularization can be written as + +$$ +\begin{array}{l} R = \left[ \left(\mathbb {E} _ {x} \| W x \| ^ {2} - c\right) ^ {2} \right] (39) \\ = \operatorname {T r} \left[ W \Sigma W ^ {T} \right] ^ {2} - 2 c \operatorname {T r} \left[ W \Sigma W ^ {T} \right] + c ^ {2}. (40) \\ \end{array} +$$ + +By Proposition 1, Eq. (10) reads + +$$ +\begin{array}{l} L = - \operatorname {T r} \left[ W B W ^ {T} \right] + \operatorname {T r} \left[ W \Sigma W ^ {T} W \Sigma W ^ {T} \right] + \kappa \left(\operatorname {T r} \left[ W \Sigma W ^ {T} \right] ^ {2} - 2 \operatorname {T r} \left[ W \Sigma W ^ {T} \right] + 1\right) (41) \\ = - \operatorname {T r} \left[ W (B + 2 \kappa c \Sigma) W ^ {T} \right] + \operatorname {T r} \left[ W \Sigma W ^ {T} W \Sigma W ^ {T} \right] + \kappa \rho^ {2}. (42) \\ \end{array} +$$ + +The derivative of $\rho$ is + +$$ +\frac {d}{d W} \rho = 4 \rho W \Sigma . \tag {43} +$$ + +The zero-gradient gradient is thus + +$$ +- 2 W (B + 2 \kappa c \Sigma - 2 \kappa \rho \Sigma) + 4 W \Sigma W ^ {T} W \Sigma = 0. \tag {44} +$$ + +We can define $B' \coloneqq B + 2\kappa c\Sigma - 2\kappa \rho \Sigma$ to see that this condition is the same as Eq. (30) in the proof of Theorem 1. The rest of the proof thus follows from the arguments. We thus arrive at the theorem statement: + +$$ +W ^ {T} W = \frac {1}{2} \Sigma^ {- 1} B _ {M} ^ {\prime} \Sigma^ {- 1}. \tag {45} +$$ + +We are done. $\square$ + +# E.5 PROOF OF PROPOSITION 4 + +Proof. Recalling that $\rho = \mathrm{Tr}[W\Sigma W^T]$ , we multiply $\Sigma$ from the right to both sides of the solution in Proposition 3 and take trace: + +$$ +\begin{array}{l} \frac {1}{2} \operatorname {T r} \left[ \Sigma^ {- 1} B _ {M} ^ {\prime} \right] = \frac {1}{2} \operatorname {T r} \left[ \Sigma^ {- 1} \left(B _ {M} + 2 \kappa (c - \rho) \Sigma_ {M}\right) \right] (46) \\ = \operatorname {T r} \left[ W ^ {T} W \Sigma \right] (47) \\ = \operatorname {T r} \left[ W \Sigma W ^ {T} \right] = \rho . (48) \\ \end{array} +$$ + +The first line further simplifies to + +$$ +\frac {1}{2} \operatorname {T r} \left[ \Sigma^ {- 1} B _ {M} \right] + \kappa (c - \rho) \operatorname {T r} \left[ \Sigma^ {- 1} \Sigma_ {M} \right] = \frac {1}{2} \operatorname {T r} \left[ \Sigma^ {- 1} B _ {M} \right] + \kappa (c - \rho) d _ {M}, \tag {49} +$$ + +where $d_M \coloneqq \operatorname{Tr}[M]$ is the number of nonzero eigenvalues of $B_M'$ . + +This gives an equation of $\rho$ that solves to + +$$ +c - \rho = \frac {c - \frac {1}{2} \operatorname {T r} \left[ \Sigma^ {- 1} B _ {M} \right]}{1 + \kappa d _ {M}}. \tag {50} +$$ + +This proves the proposition. $\square$ + +# F ADDITIONAL THEORETICAL CONCERNS + +# F.1 CASE OF DATA-INDEPENDENT NON-GAUSSIAN AUGMENTATION + +In the main text, we mainly considered the case when the noise is Gaussian. In this section, we consider a case where the noise is data-dependent and non-Gaussian. We show that the results we discussed in the main text still hold qualitatively. The general form of the loss function in Eq. (3) still applies: + +$$ +L = - \operatorname {T r} \left[ W B W ^ {T} \right] + \frac {1}{8} \operatorname {V a r} \left[ | W (x - \chi) | ^ {2} \right]. \tag {51} +$$ + +We consider a global rescaling augmentation for each datum $x$ : + +$$ +x = s \hat {x}, \tag {52} +$$ + +where $s \sim \exp(b)$ obeys an exponential distribution with mean $b$ and variance $b^2$ . Note that even if $\hat{x}$ is Gaussian, the augmented data is no longer Gaussian. In particular, the augmentation now becomes data-dependent. This augmentation can also be seen as a structured, biologically plausible data augmentation that encourages the model to be scale-invariant, which is what Wien's law for biological perception demands (Dayan and Abbott, 2005): no matter whether an image is dark or bright, the content of the image is the same. + +Under this augmentation, the noise covariance is dependent on $x$ and no longer Gaussian: + +$$ +\mathbb {E} \left[ x x ^ {T} \right] = 2 b ^ {2} A _ {0}. \tag {53} +$$ + +We also obtain that + +$$ +C = \mathbb {E} \left[ (b - s) ^ {2} x x ^ {T} \right] = b ^ {2} A _ {0}. \tag {54} +$$ + +The second term in Eq. (3) can be written as + +$$ +\begin{array}{l} \left. \right. \operatorname {V a r} \left[ | W (x - \chi) | ^ {2} \right] = \mathbb {E} \left[\left(\operatorname {T r} \left[ W (x - \chi) (x - \chi) ^ {T} W ^ {T} \right]\right) ^ {2} \right] - \mathbb {E} \left[ \operatorname {T r} \left[ W (x - \chi) (x - \chi) ^ {T} W ^ {T} \right]\right] ^ {2} (55) \\ = [ f i r s t \text {t e r m} ] - 4 \operatorname {T r} [ W (A _ {0} + C) W ^ {T} ] ^ {2} (56) \\ = [ f i r s t \text {t e r m} ] - 4 \operatorname {T r} \left[ W \Sigma W ^ {T} \right] ^ {2}, (57) \\ \end{array} +$$ + +where we have used the definition $\Sigma = A_0 + C$ . The first term is + +$$ +[ f i r s t \text {t e r m} ] = \mathbb {E} \left[ \left(\operatorname {T r} [ W (x - \chi) (x - \chi) ^ {T} W ^ {T} ]\right) ^ {2} \right]. \tag {58} +$$ + +However, for fixed rescaling factor $s_x$ and $s_\chi$ , each $W(x - \chi)$ obeys a multivariate Gaussian distribution with variance $2(s_x^2 + s_\chi^2)WA_0$ , and so we have + +$$ +[ f i r s t \text {t e r m} ] = \mathbb {E} _ {s _ {x}, s _ {\chi}} \left[ \left(s _ {x} ^ {2} + s _ {\chi} ^ {2}\right) ^ {2} \right] \left(4 \operatorname {T r} \left[ W A _ {0} W ^ {T} \right] ^ {2} + 8 \operatorname {T r} \left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \right]\right), \tag {59} +$$ + +where $\mathbb{E}_{s_x,s_\chi}\big[(s_x^2 +s_\chi^2)^2\big] = 56b^4$ . Combining terms, we obtain that + +$$ +\operatorname {V a r} \left[ \left| W (x - \chi) \right| ^ {2} \right] = 4 8 b ^ {2} \times 4 \operatorname {T r} \left[ W A _ {0} W ^ {T} \right] ^ {2} + 5 6 b ^ {4} \times 8 \operatorname {T r} \left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \right]. \tag {60} +$$ + +The loss function is thus: + +$$ +L = - \operatorname {T r} \left[ W B W ^ {T} \right] + 2 4 b ^ {2} \operatorname {T r} \left[ W A _ {0} W ^ {T} \right] ^ {2} + 5 6 b ^ {4} \operatorname {T r} \left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \right]. \tag {61} +$$ + +Note that this loss function is a special case of the loss function in Eq. (10) where $c = 0$ and $\kappa = 24b^2$ (and with a rescaled fourth-order term). As in the main text, $B$ is different according to different choices of loss functions. Because $B$ commute with $A_0$ by construction, one expects collapses to happen at locations predicted by Proposition 3 and 4 under suitable choices of parameters. Also note that the odd terms vanish as discussed, and so the local stability of the origin should decide the collapsing behavior of this situation. + +This shows that collapse can also happen when the data augmentation is structured. We comment that the analysis in this section is minimal, and one important future direction is to provide more precise and insightful conditions of collapse under structured data augmentation. \ No newline at end of file diff --git a/2023/What shapes the loss landscape of self supervised learning_/images.zip b/2023/What shapes the loss landscape of self supervised learning_/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..7d877eba82de47f34cad0e9cceea8583b3657d93 --- /dev/null +++ b/2023/What shapes the loss landscape of self supervised learning_/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a43c2a5ba798102cd90fedd3dc18736e0aa036e7fe6c6ef012e9669600be6866 +size 700180 diff --git a/2023/What shapes the loss landscape of self supervised learning_/layout.json b/2023/What shapes the loss landscape of self supervised learning_/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..35606637736f8743f76bd1d235cd270588d9deed --- /dev/null +++ b/2023/What shapes the loss landscape of self supervised learning_/layout.json @@ -0,0 +1,19629 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "WHAT SHAPES THE LOSS LANDSCAPE OF SELF SUPERVISED LEARNING?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "spans": [ + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "text", + "content": "Liu Ziyin" + }, + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "inline_equation", + "content": "^{1,2,3\\dagger}" + }, + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "text", + "content": ", Ekdeep Singh Lubana" + }, + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "inline_equation", + "content": "^{2,3,4\\dagger}" + }, + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "text", + "content": ", Masahito Ueda" + }, + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "inline_equation", + "content": "^{1,5,6}" + }, + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "text", + "content": ", Hidenori Tanaka" + }, + { + "bbox": [ + 134, + 138, + 474, + 152 + ], + "type": "inline_equation", + "content": "^{2,3}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 189, + 160, + 420, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 160, + 420, + 172 + ], + "spans": [ + { + "bbox": [ + 189, + 160, + 420, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 189, + 160, + 420, + 172 + ], + "type": "text", + "content": "Department of Physics, The University of Tokyo, Tokyo, Japan" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 161, + 171, + 449, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 171, + 449, + 182 + ], + "spans": [ + { + "bbox": [ + 161, + 171, + 449, + 182 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 161, + 171, + 449, + 182 + ], + "type": "text", + "content": "Physics & Informatics Laboratories, NTT Research, Inc., Sunnyvale, CA, USA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 187, + 181, + 423, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 181, + 423, + 191 + ], + "spans": [ + { + "bbox": [ + 187, + 181, + 423, + 191 + ], + "type": "text", + "content": "3Center for Brain Science, Harvard University, Cambridge, USA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 194, + 190, + 416, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 190, + 416, + 201 + ], + "spans": [ + { + "bbox": [ + 194, + 190, + 416, + 201 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 194, + 190, + 416, + 201 + ], + "type": "text", + "content": "EECS Department, University of Michigan, Ann Arbor, USA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 135, + 201, + 475, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 201, + 475, + 212 + ], + "spans": [ + { + "bbox": [ + 135, + 201, + 475, + 212 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 135, + 201, + 475, + 212 + ], + "type": "text", + "content": "Institute for Physics of Intelligence, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 164, + 211, + 444, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 211, + 444, + 222 + ], + "spans": [ + { + "bbox": [ + 164, + 211, + 444, + 222 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 164, + 211, + 444, + 222 + ], + "type": "text", + "content": "RIKEN Center for Emergent Matter Science (CEMS), Wako, Saitama, Japan" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 276, + 228, + 335, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 228, + 335, + 239 + ], + "spans": [ + { + "bbox": [ + 276, + 228, + 335, + 239 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 140, + 244, + 470, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 244, + 470, + 344 + ], + "spans": [ + { + "bbox": [ + 140, + 244, + 470, + 344 + ], + "type": "text", + "content": "Prevention of complete and dimensional collapse of representations has recently become a design principle for self-supervised learning (SSL). However, questions remain in our theoretical understanding: When do those collapses occur? What are the mechanisms and causes? We answer these questions by deriving and thoroughly analyzing an analytically tractable theory of SSL loss landscapes. In this theory, we identify the causes of the dimensional collapse and study the effect of normalization and bias. Finally, we leverage the interpretability afforded by the analytical theory to understand how dimensional collapse can be beneficial and what affects the robustness of SSL against data imbalance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 357, + 206, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 357, + 206, + 369 + ], + "spans": [ + { + "bbox": [ + 106, + 357, + 206, + 369 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 376, + 506, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 506, + 487 + ], + "type": "text", + "content": "Self-supervised learning (SSL) methods have achieved remarkable success in learning good representations without labeled data (Chen et al., 2020b). Loss functions used in such SSL techniques promote representational similarity between pairs of related samples while using explicit penalties (Chen et al., 2020a; He et al., 2020; Zbontar et al., 2021; Caron et al., 2020) or asymmetric dynamics (Caron et al., 2021; Grill et al., 2020; Chen and He, 2021) to ensure that the distance between unrelated samples remains large. In practice, however, SSL training often experiences the phenomenon of dimensional collapse (Jing et al., 2021; Tian et al., 2021; Pokle et al., 2022), where the learned representation spans a low dimensional subspace of the overall available space. In the extreme case, this failure mode instantiates as a complete collapse, where the learned representation becomes zero-rank, and no informative features can be extracted." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 492, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 504, + 581 + ], + "type": "text", + "content": "Prior work has primarily positioned such collapses in SSL as enemies of learning, arguing that they can negatively impact downstream task performance (Zbontar et al., 2021; Jing et al., 2021; Bardes et al., 2021). However, recent work by Cosentino et al. (2022) empirically demonstrates otherwise: quality of representations can be improved when there is a degree of collapse. These conflicting results indicate that despite extensive empirical explorations, a gap remains in our understanding of the collapse phenomenon in SSL training. We argue that this gap is due to the lack of a theoretical framework to analyze the mechanisms promoting collapsed representations. We aim to close this gap by carefully studying the loss landscapes of SSL." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 586, + 504, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 504, + 653 + ], + "type": "text", + "content": "In this work, we analytically solve the effective landscapes of linear models trained on several popular losses used in self-supervised learning, including InfoNCE (Oord et al., 2018), Normalized Temperature Cross-Entropy (NT-xent) (Chen et al., 2020a), Spectral Contrastive Loss (HaoChen et al., 2021), and Barlow Twins / VICReg (Zbontar et al., 2021; Bardes et al., 2021). The main thesis of this work is: the local geometry of the SSL landscapes around the origin crucially decides the learning behavior of SSL models. Technically, we show that" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 658, + 504, + 691 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 658, + 504, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 504, + 669 + ], + "type": "text", + "content": "1. the interplay between data variation and data augmentation determines the geometry of the loss;" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 669, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 669, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 669, + 504, + 691 + ], + "type": "text", + "content": "2. the geometry of the loss explains when dimensional collapse can be helpful and why certain SSL losses are robust against data imbalance, but not the others." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 696, + 504, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 696, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 696, + 504, + 719 + ], + "type": "text", + "content": "To the best of our knowledge, our work is the first to study the landscape causes of collapse in SSL thoroughly." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 123, + 721, + 443, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 721, + 443, + 732 + ], + "spans": [ + { + "bbox": [ + 123, + 721, + 443, + 732 + ], + "type": "text", + "content": "†Work done during an internship at Physics & Informatics Laboratories, NTT Research." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 71, + 198, + 150 + ], + "blocks": [ + { + "bbox": [ + 107, + 71, + 198, + 150 + ], + "lines": [ + { + "bbox": [ + 107, + 71, + 198, + 150 + ], + "spans": [ + { + "bbox": [ + 107, + 71, + 198, + 150 + ], + "type": "image", + "image_path": "8646768e22b533aecebc4335e5d0ac391451a5247e209ea684d46b6e91eeeaff.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 155, + 187, + 166 + ], + "lines": [ + { + "bbox": [ + 119, + 155, + 187, + 166 + ], + "spans": [ + { + "bbox": [ + 119, + 155, + 187, + 166 + ], + "type": "text", + "content": "(a) An eigenmode" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 206, + 74, + 299, + 149 + ], + "blocks": [ + { + "bbox": [ + 206, + 74, + 299, + 149 + ], + "lines": [ + { + "bbox": [ + 206, + 74, + 299, + 149 + ], + "spans": [ + { + "bbox": [ + 206, + 74, + 299, + 149 + ], + "type": "image", + "image_path": "cbbd6117e91840b6e94713dcb5d6158eb9b8b02f0b21496e2397f12a4de3fadb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 226, + 155, + 283, + 166 + ], + "lines": [ + { + "bbox": [ + 226, + 155, + 283, + 166 + ], + "spans": [ + { + "bbox": [ + 226, + 155, + 283, + 166 + ], + "type": "text", + "content": "(b) No collapse" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 76, + 402, + 149 + ], + "blocks": [ + { + "bbox": [ + 309, + 76, + 402, + 149 + ], + "lines": [ + { + "bbox": [ + 309, + 76, + 402, + 149 + ], + "spans": [ + { + "bbox": [ + 309, + 76, + 402, + 149 + ], + "type": "image", + "image_path": "2c09e4bcd2758fb826668aee9a0defc0a67ebd2ab0285f41d0bca338d1f516d4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 155, + 402, + 166 + ], + "lines": [ + { + "bbox": [ + 310, + 155, + 402, + 166 + ], + "spans": [ + { + "bbox": [ + 310, + 155, + 402, + 166 + ], + "type": "text", + "content": "(c) Dimensional collapse" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 410, + 76, + 503, + 150 + ], + "blocks": [ + { + "bbox": [ + 410, + 76, + 503, + 150 + ], + "lines": [ + { + "bbox": [ + 410, + 76, + 503, + 150 + ], + "spans": [ + { + "bbox": [ + 410, + 76, + 503, + 150 + ], + "type": "image", + "image_path": "5f5d0ef31c9b33d250ec46485b1a7b883ec443bf1d735a885a7fdbec145b7bf4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 417, + 155, + 498, + 166 + ], + "lines": [ + { + "bbox": [ + 417, + 155, + 498, + 166 + ], + "spans": [ + { + "bbox": [ + 417, + 155, + 498, + 166 + ], + "type": "text", + "content": "(d) Complete collapse" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "lines": [ + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "text", + "content": "Figure 1: Landscape in self-supervised learning (SSL). SSL losses generally depend only on the relative angle between pairs of network outputs (e.g., " + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "inline_equation", + "content": "f(x)^T f(x')" + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "text", + "content": "). Thus, the landscapes with a linear network (" + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "inline_equation", + "content": "f(x) = Wx" + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "text", + "content": ") have a global rotational symmetry and are symmetric about the origin. Our theory finds that the local stability at the origin decides the collapse, and larger data variation (green) prevents collapse, while strong data augmentation (red) can promote collapse. We plot the loss for a toy linear model with a diagonal weight matrix " + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "inline_equation", + "content": "diag(r_1, r_2)" + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "text", + "content": ". (a) The 1d landscape when fixing one of the parameter. (b-d) The 2d landscape. (b) No collapse: the origin is an unstable local maximum, and surrounding local minima avoid collapse. The dimensionally collapsed solutions are the saddle points. (c) Dimensional collapse: the value of " + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "inline_equation", + "content": "w_1" + }, + { + "bbox": [ + 104, + 170, + 506, + 262 + ], + "type": "text", + "content": " for all stable fixed points collapses to zero. (d) Complete collapse: the origin becomes the isolated local minimum." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 273, + 217, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 273, + 217, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 217, + 285 + ], + "type": "text", + "content": "2 RELATED WORKS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 295, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 384 + ], + "type": "text", + "content": "SSL and Collapses. On the one hand, prior literature has often argued collapse as a harmful phenomenon that can deteriorate downstream task performance (Jing et al., 2021; Zbontar et al., 2021). Preventing such collapsed representations is a frequently discussed topic in literature (Hua et al., 2021; Jing et al., 2021; Pokle et al., 2022; Tian et al., 2021) and has motivated the design of several SSL techniques (Zbontar et al., 2021; Bardes et al., 2021; Ermolov et al., 2021). On the other hand, Cosentino et al. (2022) empirically showed that dimensional collapses under strong augmentations could significantly improve generalization performance. Our work demystifies these conflicting results by finding analytic solutions to loss landscapes of several standard SSL techniques." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 388, + 504, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 489 + ], + "type": "text", + "content": "Theoretical Advances in SSL. Recently, several advances have been made towards understanding the success of SSL techniques from different perspectives: e.g., learning theory (Arora et al., 2019; Saunshi et al., 2022; Nozawa and Sato, 2021; Wei et al., 2021), information theory (Tsai et al., 2021a;b; Tosh et al., 2021), causality and data-generating processes (Zimmerman et al., 2021; Kugelgen et al., 2021; Trivedi et al., 2022; Tian et al., 2020; Mitrovic et al., 2020; Wang et al., 2022), dynamics (Wang and Isola, 2020; Tian et al., 2021; Tian, 2022; Wang and Liu, 2021; Simon et al., 2023), and loss landscapes (Pokle et al., 2022). These advances have unveiled practically useful properties of SSL, such as robustness to dataset imbalance (Liu et al., 2021) and principled solutions to avoid spurious correlations (Robinson et al., 2021)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 493, + 504, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 493, + 504, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 493, + 504, + 561 + ], + "type": "text", + "content": "The work by Jing et al. (2021) is the closest to ours in problem setting. In that paper, the authors focused on studying the linearized learning dynamics and suggested that a competition between the feature signal strength and augmentation strength can lead to dimensional collapse. In contrast, our focus is on the landscape and our result implies that this feature-augmentation competition on its own is insufficient to cause a dimensional collapse. In fact, we show that there will be no collapse in the setting studied by Jing et al. (2021)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 574, + 422, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 574, + 422, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 422, + 586 + ], + "type": "text", + "content": "3 A LANDSCAPE THEORY OF SELF-SUPERVISED-LEARNING" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "content": "This section presents the main theoretical results. Let " + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "inline_equation", + "content": "\\{\\hat{x}_i\\}_i^N" + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "content": " be a dataset with " + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "content": " data points. For every data point " + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "inline_equation", + "content": "\\hat{x}" + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "content": ", we augment it with an i.i.d. noise " + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "inline_equation", + "content": "x \\coloneqq \\hat{x} + \\epsilon" + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "content": ". To be concrete, we start with considering the standard contrastive loss, InfoNCE (Oord et al., 2018):" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 636, + 504, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 636, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 141, + 636, + 504, + 666 + ], + "type": "interline_equation", + "content": "L = \\mathbb {E} _ {\\epsilon} \\left[ - \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp (- | f (x _ {i}) - f \\left(x _ {i} ^ {\\prime}\\right) | ^ {2} / 2)}{\\sum_ {j \\neq i} \\exp (- | f (x _ {i}) - f (\\chi_ {j}) | ^ {2} / 2) + \\exp (- | f (x _ {i}) - f \\left(x _ {i} ^ {\\prime}\\right) | ^ {2} / 2)} \\right], \\tag {1}", + "image_path": "ddd734d3f21459af1c585f38854549c45d8c7b168c2c42984231b56c8043956b.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "f(x) \\in \\mathbb{R}^{d_1}" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": " is the model output; all " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "x, x'" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\chi" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": " are augmented data points for some independent additive noise " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\epsilon}[x] = \\hat{x} = \\mathbb{E}_{\\epsilon}[x'] \\neq \\mathbb{E}_{\\epsilon}[\\chi] = \\hat{\\chi}" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": ". We decompose the model output into a general function " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\phi(x) \\in \\mathbb{R}^{d_0}" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": " and the last-layer weight matrix " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "W \\in \\mathbb{R}^{d_1 \\times d_0}" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "f(x) = W\\phi(x)" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": ". The covariance of " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\phi(\\hat{x})" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "A_0 := \\mathbb{E}_{\\hat{x}}[\\phi(\\hat{x})\\phi(\\hat{x})^T]" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": ", and the covariance of the data-augmented penultimate layer representation is " + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\Sigma := \\mathbb{E}_x[\\phi(x)\\phi(x)^T]" + }, + { + "bbox": [ + 104, + 673, + 506, + 733 + ], + "type": "text", + "content": ". The effect of data augmentation on the learned" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": "representation is captured through a symmetric matrix " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "C \\coloneqq \\Sigma - A_0" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": ". For a general " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": ", the eigenvalues of " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " can be either positive or negative. When " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " is the identity mapping, " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " becomes the empirical data covariance, " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " becomes positive semi-definite and is the covariance of the noise " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " is the covariance of the augmented data. In some sense, this loss function captures the essence of SSL: the numerator encourages the representation " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "f(x)" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " to be closer to the representation of similar data, and the denominator encourages a separation between dissimilar data." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 154, + 387, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 387, + 165 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 387, + 165 + ], + "type": "text", + "content": "For a fixed set of noises, we can write the InfoNCE in a cleaner form:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 168, + 170, + 504, + 196 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 170, + 504, + 196 + ], + "spans": [ + { + "bbox": [ + 168, + 170, + 504, + 196 + ], + "type": "interline_equation", + "content": "L _ {\\epsilon} = \\mathbb {E} _ {\\hat {x}} \\left\\{\\frac {1}{2} | f (x) - f \\left(x ^ {\\prime}\\right) | ^ {2} + \\log \\mathbb {E} _ {\\hat {\\chi}} \\left[ \\exp \\left(- \\frac {1}{2} | f (x) - f (\\chi) | ^ {2}\\right) \\right] \\right\\}, \\tag {2}", + "image_path": "02fef9977cda26e7e79978322f3e67cdcc94f5e42475235c74e0487b20a8659a.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 199, + 360, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 199, + 360, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 360, + 212 + ], + "type": "text", + "content": "where we used " + }, + { + "bbox": [ + 104, + 199, + 360, + 212 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\hat{x}}" + }, + { + "bbox": [ + 104, + 199, + 360, + 212 + ], + "type": "text", + "content": " to denote an averaging over the training set." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "type": "text", + "content": "In this notation, we have " + }, + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\epsilon}\\mathbb{E}_{\\hat{x}}[x] = \\mathbb{E}_x[x]" + }, + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\epsilon}[L_{\\epsilon}] = L" + }, + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "type": "text", + "content": ". We first show that the expansion of the loss function around the origin takes a rather universal form. We then find analytical solutions to the stationary points of this landscape and study their relevance to feature learning and collapses. See Table 1 for a summary of the main results. The proofs are presented in Appendix E. For a quantitative understanding, we mainly focus on the case when " + }, + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 216, + 302, + 348 + ], + "type": "text", + "content": " is the identity function. We discuss the general nonlinear case in Section 4.1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 358, + 279, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 279, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 279, + 370 + ], + "type": "text", + "content": "3.1 LANDSCAPE OF A LINEAR MODEL" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 376, + 302, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 302, + 421 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 302, + 421 + ], + "type": "text", + "content": "We first analyze representative SSL loss functions and show that to leading order in " + }, + { + "bbox": [ + 104, + 376, + 302, + 421 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 376, + 302, + 421 + ], + "type": "text", + "content": ", the local geometry of SSL losses takes the following form" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 424, + 301, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 424, + 301, + 449 + ], + "spans": [ + { + "bbox": [ + 110, + 424, + 301, + 449 + ], + "type": "interline_equation", + "content": "L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right]. \\tag {3}", + "image_path": "c907399de5936a1546070e9a5c974169ef52af5b4a866cfc1ae4c794902be88d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 309, + 217, + 506, + 312 + ], + "blocks": [ + { + "bbox": [ + 309, + 217, + 506, + 312 + ], + "lines": [ + { + "bbox": [ + 309, + 217, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 309, + 217, + 506, + 312 + ], + "type": "table", + "html": "
HessianDim.Complete
InfoNCEA0XX
NT-Xent (SimCLR)A0-C/N
Spectral ContrastiveCXX
Barlow TwinsA0+CXX
+ Normalization-X
+ bias-
+ Weight Decay+γI
", + "image_path": "6ff6fe7b3c11a62ca709814ff0e28a82c3882af2577f35a1c57f569a23e321c1.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "lines": [ + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "spans": [ + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "text", + "content": "Table 1: What shapes the SSL landscapes around the origin? For each of the SSL losses, the combination of data covariance " + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "inline_equation", + "content": "(A_0)" + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "text", + "content": ", data-augmentation covariance " + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "inline_equation", + "content": "(C)" + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "text", + "content": ", and dataset size " + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "inline_equation", + "content": "(N)" + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "text", + "content": " can affect its stability and thus determine the presence " + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "inline_equation", + "content": "(\\checkmark)" + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "text", + "content": " and absence " + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "inline_equation", + "content": "(X)" + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "text", + "content": " of dimensional/complete collapse (Here, a " + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "inline_equation", + "content": "\\checkmark" + }, + { + "bbox": [ + 307, + 316, + 505, + 447 + ], + "type": "text", + "content": " means \"there exists a hyperparameter setting and data distribution such that the relevant collapse happens;\" see section 3). Beyond collapses, the theory implies that SCL, whose landscape is formed primarily by data augmentation, is more robust to data imbalance than InfoNCE, which is affected primarily by the data (see section 4)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 453, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 498 + ], + "type": "text", + "content": "A distinctive feature of Eq. (3) is that its first and third-order terms vanish. This is because the loss function is invariant to a left rotation of " + }, + { + "bbox": [ + 104, + 453, + 504, + 498 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 453, + 504, + 498 + ], + "type": "text", + "content": ". We will see that this symmetry in rotation is a crucial and general feature of the SSL loss functions that allow us to treat them in a universal way. We discuss how rotation symmetry can cause collapses in nonlinear settings in Section 4." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 502, + 276, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 276, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 276, + 514 + ], + "type": "text", + "content": "InfoNCE. The loss function simplifies to:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 181, + 519, + 505, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 519, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 181, + 519, + 505, + 559 + ], + "type": "interline_equation", + "content": "L = \\underbrace {\\operatorname {T r} \\left[ W C W ^ {T} \\right]} _ {E} + \\underbrace {\\mathbb {E} _ {\\epsilon , \\hat {x}} \\left\\{\\log \\mathbb {E} _ {\\hat {\\chi}} \\left[ \\exp \\left(- \\frac {1}{2} | W (x - \\chi) | ^ {2}\\right) \\right] \\right\\}} _ {- S}. \\tag {4}", + "image_path": "59036616e6f0db20fcba815cb1aa14599ec9968d164e12fcfdfdf2c57205011c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 565, + 345, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 565, + 345, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 345, + 578 + ], + "type": "text", + "content": "Expanding the entropy term to the fourth order, we obtain1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 170, + 582, + 505, + 607 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 582, + 505, + 607 + ], + "spans": [ + { + "bbox": [ + 170, + 582, + 505, + 607 + ], + "type": "interline_equation", + "content": "- S = - \\mathbb {E} _ {x} \\mathbb {E} _ {\\chi} \\left[ \\frac {1}{2} | W (x - \\chi) | ^ {2} \\right] + \\frac {1}{8} \\operatorname {V a r} [ | W (x - \\chi) | ^ {2} ] + O (\\| W \\| ^ {6}). \\tag {5}", + "image_path": "c78baf24220935e40a11f69530021aa73185f5407e652d4b27d53dd29a06c186.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "type": "text", + "content": "This (perturbative) decomposition of entropy deserves some special attention. The entropy decomposes into a repulsion term that is second order in " + }, + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "type": "text", + "content": ", and a variance term that is fourth order in " + }, + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "type": "text", + "content": ". The first term encourages a repulsion between " + }, + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 612, + 504, + 656 + ], + "type": "text", + "content": " and its augmentation, which counteracts the effect of the energy term. The repulsion term can be decomposed into" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 189, + 662, + 504, + 687 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 662, + 504, + 687 + ], + "spans": [ + { + "bbox": [ + 189, + 662, + 504, + 687 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {x} \\mathbb {E} _ {\\chi} \\left[ \\frac {1}{2} | W (x - \\chi) | ^ {2} \\right] = \\operatorname {T r} \\left[ W C W ^ {T} \\right] + \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right]. \\tag {6}", + "image_path": "688c3c07455f29cda39ac588ff2774134460f4e24b6cd1b072772289743cc9f4.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "type": "text", + "content": "The first term encourages an expansion of " + }, + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "type": "text", + "content": " along the direction of the augmentation " + }, + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "type": "text", + "content": ", while the second term encourages an expansion along the directions of feature " + }, + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 690, + 506, + 714 + ], + "type": "text", + "content": ". It is intriguing to see" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 720, + 459, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 720, + 459, + 732 + ], + "spans": [ + { + "bbox": [ + 119, + 720, + 459, + 732 + ], + "type": "text", + "content": "1 Throughout, we use " + }, + { + "bbox": [ + 119, + 720, + 459, + 732 + ], + "type": "inline_equation", + "content": "\\|\\cdot\\|" + }, + { + "bbox": [ + 119, + 720, + 459, + 732 + ], + "type": "text", + "content": " to denote the " + }, + { + "bbox": [ + 119, + 720, + 459, + 732 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 119, + 720, + 459, + 732 + ], + "type": "text", + "content": " norm for vectors and Frobenius norm for matrices." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": "that the repulsion term dominates the attraction of the energy term: the motion along the direction of " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": " completely cancels out, and only the expansion along " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": " remains. This means that to leading order, the learned representation has a larger variation along the directions where the data has a larger variation, which is what one naively expects. Collecting results, we have obtained the loss landscape in the neighborhood of the origin as " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "L = -\\mathrm{Tr}[WA_0W^T] + \\frac{1}{8}\\mathrm{Var}[|W(x - \\chi)|^2] + O(\\| W\\|^6)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 504, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 504, + 177 + ], + "type": "text", + "content": "NT-xent (SimCLR). As an additional example, we analyze Normalized Temperature Cross-Entropy loss (NT-xent) used in SimCLR (Chen et al., 2020a). Tian (2022) shows that InfoNCE can be generalized to encompass NT-xent as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 136, + 181, + 504, + 212 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 181, + 504, + 212 + ], + "spans": [ + { + "bbox": [ + 136, + 181, + 504, + 212 + ], + "type": "interline_equation", + "content": "L = \\mathbb {E} _ {\\epsilon} \\left[ - \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp (- | f (x _ {i}) - f (x _ {i} ^ {\\prime}) | ^ {2} / 2)}{\\sum_ {\\chi \\neq x} \\exp (- | f (x _ {i}) - f (\\chi_ {j}) | ^ {2} / 2) + \\alpha \\exp (- | f (x _ {i}) - f (x _ {i} ^ {\\prime}) | ^ {2} / 2)} \\right]. \\tag {7}", + "image_path": "18f4b227b33b8e102234aa624ff0f0eca0ad94addcd847f15b198f70c8e78fcd.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "text", + "content": "In contrast to InfoNCE, here one of the terms in the denominator is reweighted by a factor of " + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\alpha \\geq 0" + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "text", + "content": ". Two interesting limits are " + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\alpha = 1" + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "text", + "content": ", where we recover the InfoNCE loss, and " + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "text", + "content": ", where we obtain NT-xent. For general " + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 213, + 504, + 258 + ], + "type": "text", + "content": ", we refer to this loss as the weighted InfoNCE. We will see in section 3 that this weighted InfoNCE can have a mild dimensional collapse problem." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 263, + 321, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 321, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 321, + 276 + ], + "type": "text", + "content": "The same perturbative expansion as Eq. (4)-(6) gives" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 109, + 278, + 505, + 301 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 278, + 505, + 301 + ], + "spans": [ + { + "bbox": [ + 109, + 278, + 505, + 301 + ], + "type": "interline_equation", + "content": "L = \\frac {1 - \\alpha}{N} \\operatorname {T r} \\left[ W C W ^ {T} \\right] - \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right] + O \\left(\\left\\| W \\right\\| ^ {6}\\right) + O \\left(\\left\\| W \\right\\| ^ {4} N ^ {- 1}\\right). \\tag {8}", + "image_path": "191d244847a499bffbb52c528bac1125bb11174baff0b309d37e0de8ea7fcbce.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 306, + 504, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 329 + ], + "type": "text", + "content": "Now, the Hessian of the origin is no longer guaranteed to be negative definite. In fact, if " + }, + { + "bbox": [ + 104, + 306, + 504, + 329 + ], + "type": "inline_equation", + "content": "\\frac{1 - \\alpha}{N} C - A_0 \\geq 0" + }, + { + "bbox": [ + 104, + 306, + 504, + 329 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 306, + 504, + 329 + ], + "type": "inline_equation", + "content": "W = 0" + }, + { + "bbox": [ + 104, + 306, + 504, + 329 + ], + "type": "text", + "content": " becomes an isolated local minimum." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 334, + 504, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 504, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 504, + 368 + ], + "type": "text", + "content": "Landscape Analysis. The above discussion shows that the common loss landscapes in self-supervised contrastive learning can be reduced to an effective form in Eq. (3). The following proposition shows that the variance term of the loss takes a specific form when the data is Gaussian." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 369, + 504, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 504, + 395 + ], + "type": "text", + "content": "Proposition 1. Let the data and noise be Gaussian. Then, " + }, + { + "bbox": [ + 104, + 369, + 504, + 395 + ], + "type": "inline_equation", + "content": "L = -\\mathrm{Tr}[W B W^T] + \\mathrm{Tr}[W\\Sigma W^T W\\Sigma W^T]" + }, + { + "bbox": [ + 104, + 369, + 504, + 395 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "type": "text", + "content": "When the training ends, one expects the model to locate at (at least close to) a stationary point of the loss. It is thus important to identify all the stationary points of this loss function." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": "Theorem 1. Let " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "d^{*} \\coloneqq \\min(d_{0}, d_{1})" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": ". Let the data and noise be Gaussian. All stationary points " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": " of Eq. (3) satisfy " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "W^{T}W = \\frac{1}{2}\\Sigma^{-1/2}UM\\Lambda U^{T}\\Sigma^{-1/2}" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "U\\Lambda U^{T}" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": " is the eigenvalue decomposition of " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "\\Sigma^{-1/2}B\\Sigma^{-1/2}" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": " is an arbitrary (masking) diagonal matrix containing only zero or one such that (1) " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "M_{ii} = 0" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "\\Lambda_{ii} < 0" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": " and (2) contain at most " + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "inline_equation", + "content": "d^{*}" + }, + { + "bbox": [ + 104, + 427, + 506, + 477 + ], + "type": "text", + "content": " nonzero terms." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 482, + 362, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 482, + 362, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 482, + 362, + 495 + ], + "type": "text", + "content": "Additionally, if " + }, + { + "bbox": [ + 104, + 482, + 362, + 495 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 482, + 362, + 495 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 482, + 362, + 495 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 482, + 362, + 495 + ], + "type": "text", + "content": " commute, all stationary points satisfy" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 254, + 497, + 504, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 497, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 254, + 497, + 504, + 520 + ], + "type": "interline_equation", + "content": "W ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} \\Sigma^ {- 1}, \\tag {9}", + "image_path": "d5a0e6efe141868ad3a6cee10f19184da869e5da16ede1c1d817340d0014124c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "type": "inline_equation", + "content": "B_M" + }, + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "type": "text", + "content": " denotes the matrix obtained by masking the eigenvalues of " + }, + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 523, + 432, + 536 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "text", + "content": "This stationary-point condition implies the direct cause of the dimensional collapse. Namely, dimensional collapse happens when the eigenvalues of the matrix " + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "text", + "content": " become negative. The eigenvalues of " + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "text", + "content": ", in turn, depend on the competition between data augmentation and the data feature. Comparing the commuting case with the noncommuting case, we see that the main difference is that when " + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "text", + "content": " does not commute with " + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 544, + 504, + 632 + ], + "type": "text", + "content": ", the augmentation can also change the orientation of the learned representation; otherwise, augmentation only affects the eigenvalues. To focus on the most important terms, we now assume that the augmentation is well-aligned with the features such that the augmentation covariance commute with the data covariance." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 635, + 329, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 329, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 329, + 647 + ], + "type": "text", + "content": "Assumption 1. From now on, we assume " + }, + { + "bbox": [ + 105, + 635, + 329, + 647 + ], + "type": "inline_equation", + "content": "CA_0 = A_0C" + }, + { + "bbox": [ + 105, + 635, + 329, + 647 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": "For the case of weighted InfoNCE, we have that " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "B = A_0 - \\frac{1 - \\alpha}{N} C" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": " denote the " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": "-th eigenvalue of the " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": " that of " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": " viewed in a predetermined order; then, the " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": "th subspace collapses when " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\frac{1 - \\alpha}{N} c_i \\geq a_i" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": ", namely, when the variation introduced by the noise dominates that of the original data. Importantly, this collapse is a property shared by all stationary points of the landscape, and one cannot hope to fix the problem by, say, biasing the gradient descent towards a certain type of local minima. When weight decay is used, the condition for collapse becomes " + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\frac{1 - \\alpha}{N} c_i + \\gamma \\geq a_i" + }, + { + "bbox": [ + 104, + 654, + 505, + 733 + ], + "type": "text", + "content": ": it becomes easier to cause a collapse when weight decay is used." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "The global minimum of the loss function is also easy to find. For all stationary points, the loss function takes a simple form; " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "L = -\\frac{1}{4}\\mathrm{Tr}[\\Sigma^2 B_M B]" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ". Thus, " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": " becomes more and more negative if the eigenvalues of " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "B_M" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": " align with the largest eigenvalues of " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ". Namely, the global minimum is achieved if " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": " leaves the largest eigenvalues of " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": " intact." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 504, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 504, + 155 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 504, + 155 + ], + "type": "text", + "content": "Because the stationary points contain collapsed solutions where the eigenvalues of " + }, + { + "bbox": [ + 104, + 132, + 504, + 155 + ], + "type": "inline_equation", + "content": "W^T W" + }, + { + "bbox": [ + 104, + 132, + 504, + 155 + ], + "type": "text", + "content": " are zero, one is naturally interested in how likely it is to converge to these solutions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "text", + "content": "Proposition 2. " + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "inline_equation", + "content": "(W^T W" + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "text", + "content": " achieves maximum possible rank) Let " + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "text", + "content": " denote the number of positive eigenvalues " + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "inline_equation", + "content": "\\mathrm{rank}(W^T W) = \\min(m, d^*)" + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "text", + "content": " for any local minimum." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 186, + 505, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 505, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 505, + 232 + ], + "type": "text", + "content": "This proposition implies that the loss landscape of contrastive SSL (with a linear model) is rather benign because all local minima must achieve a maximum possible rank. In fact, this result implies that the collapses may be well controllable by carefully controlling and tuning the eigenvalues of the matrix " + }, + { + "bbox": [ + 104, + 186, + 505, + 232 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 186, + 505, + 232 + ], + "type": "text", + "content": ", which directly depends on the nature of the data augmentation we use." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 240, + 286, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 240, + 286, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 286, + 251 + ], + "type": "text", + "content": "3.2 LANDSCAPE WITH NORMALIZATION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 258, + 504, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 504, + 302 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 504, + 302 + ], + "type": "text", + "content": "It is common in practice to normalize the learned representation such that " + }, + { + "bbox": [ + 104, + 258, + 504, + 302 + ], + "type": "inline_equation", + "content": "\\| f(x) \\|^2 = c" + }, + { + "bbox": [ + 104, + 258, + 504, + 302 + ], + "type": "text", + "content": ". When normalization is applied, only the direction of the learned representation matters. While this is a simple trick in practice, its implication on the landscape is poorly understood. In this section, we extend our theory to analyze the effect of normalization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 307, + 455, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 307, + 455, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 455, + 320 + ], + "type": "text", + "content": "We model the effect of normalization as a regularization term: " + }, + { + "bbox": [ + 105, + 307, + 455, + 320 + ], + "type": "inline_equation", + "content": "R \\coloneqq (\\mathbb{E}_x\\| f(x)\\| ^2 -c)^2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 257, + 321, + 504, + 333 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 321, + 504, + 333 + ], + "spans": [ + { + "bbox": [ + 257, + 321, + 504, + 333 + ], + "type": "interline_equation", + "content": "L _ {\\text {n o r m}} = E q. (3) + \\kappa R. \\tag {10}", + "image_path": "b12bdeead7c1ed61f3d4ad07edab0d56ac600f11e28f681a78407eede475a676.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": "Note that this regularization term achieves two things simultaneously: (1) " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "\\| f(x) \\|^2 = c" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " is a minimizer of the loss function; (2) the regularization is invariant to any rotation of the learned representation. For a linear model, we note that this condition is not entirely the same as a direct normalization of the representation because it is generally impossible to achieve " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "\\| Wx \\|^2 = c" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " because a linear model has limited expressivity. However, it is generally possible to achieve the slightly weaker condition: the representation has a norm 1 on average. This loss function can also be seen as a mathematical model of the VICReg loss (Bardes et al., 2021), where " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " effectively models the variance regularization term of VICReg loss and " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " is its strength. This modeling is necessary because the variance term of the original VICReg is not differentiable and thus cannot be expanded. The proposed term " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " captures the essence of the variance term because it also encourages the representation to have a constant variance. Our theory also explains why the VICReg is observed to experience collapses when " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " is not large enough. As " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " tends to infinity, this constraint will become perfectly satisfied. We thus take the infinite " + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 334, + 505, + 477 + ], + "type": "text", + "content": " limit to study the effect of normalization." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 482, + 457, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 482, + 457, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 457, + 495 + ], + "type": "text", + "content": "The following proposition gives a condition that all stationary points of Eq. (10) satisfy." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": "Proposition 3. Let " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "\\rho(W) \\coloneqq \\operatorname{Tr}[W\\Sigma W^T]" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "B' \\coloneqq B + 2\\kappa(c - \\rho)\\Sigma" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": ", and let " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "\\Lambda_i" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": " be the eigenvalues of " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "B'" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": ". Then, every stationary point of Eq. (10) satisfy " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "W^T W = \\frac{1}{2}\\Sigma^{-1}B_M'\\Sigma^{-1}" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": " is an arbitrary diagonal mask of the eigenvalues of " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "B'" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": " containing only zero or one such that (1) " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "M_{ii} = 0" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "\\Lambda_i < 0" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": " and (2) contain at most " + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "inline_equation", + "content": "d^*" + }, + { + "bbox": [ + 104, + 495, + 504, + 542 + ], + "type": "text", + "content": " nonzero terms." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "text", + "content": "Compared with the unnormalized case, the term " + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "inline_equation", + "content": "2\\kappa (1 - \\rho)\\Sigma_{M}" + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "text", + "content": " emerges due to normalization. The effect of normalization is as expected: it shrinks the norm of the model if " + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "inline_equation", + "content": "\\rho > 1" + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "text", + "content": ", and it expands the model if " + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "inline_equation", + "content": "\\rho < 1" + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "text", + "content": ", and it does not have any effect if we have already achieved " + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "inline_equation", + "content": "\\rho = 1" + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "text", + "content": ". Interestingly, this rescaling effect is anisotropic and stronger along the directions of larger eigenvalues of the covariance of the augmented data " + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 104, + 549, + 504, + 605 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 610, + 384, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 610, + 384, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 610, + 384, + 622 + ], + "type": "text", + "content": "The next theorem gives the explicit form of " + }, + { + "bbox": [ + 105, + 610, + 384, + 622 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 105, + 610, + 384, + 622 + ], + "type": "text", + "content": " at the stationary points." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "text", + "content": "Proposition 4. For any stationary point " + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "inline_equation", + "content": "W^{*}" + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "inline_equation", + "content": "c - \\rho(W^{*}) = \\frac{c - \\frac{1}{2}\\mathrm{Tr}[\\Sigma^{-1}B_{M}]}{1 + \\kappa d_{M}}" + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "inline_equation", + "content": "d_{M}" + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "text", + "content": " is the number of non-zero eigenvalues of " + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "inline_equation", + "content": "B_{M}'" + }, + { + "bbox": [ + 104, + 624, + 504, + 653 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": "For a finite " + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": ", these results suggest that collapses can still happen. For VICReg, " + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "inline_equation", + "content": "B = -A_0" + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": ", and the complete collapse can happen when " + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "inline_equation", + "content": "\\kappa \\ll \\| A_0\\| /c\\|\\Sigma\\|" + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": " - this explains the experimental observation of collapses for small values of " + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": " in VICReg loss (Bardes et al., 2021)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Lastly, to understand normalization, we are interested in the case of " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\kappa \\to \\infty" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": ". Combining Proposition 3 and 4, we have proved the following theorem, showing that the asymptotic solution converges to a form independent of " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 390, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 390, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 390, + 95 + ], + "type": "text", + "content": "Theorem 2. Let " + }, + { + "bbox": [ + 105, + 82, + 390, + 95 + ], + "type": "inline_equation", + "content": "W_{\\kappa}" + }, + { + "bbox": [ + 105, + 82, + 390, + 95 + ], + "type": "text", + "content": " be a stationary point of Eq. (10) at fixed " + }, + { + "bbox": [ + 105, + 82, + 390, + 95 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 105, + 82, + 390, + 95 + ], + "type": "text", + "content": ". Then," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 187, + 99, + 504, + 128 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 99, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 187, + 99, + 504, + 128 + ], + "type": "interline_equation", + "content": "\\lim _ {\\kappa \\rightarrow \\infty} W _ {\\kappa} ^ {T} W _ {\\kappa} = \\frac {1}{2} \\Sigma^ {- 1} \\left[ B _ {M} + \\frac {2 c - \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{d _ {M}} \\Sigma_ {M} \\right] \\Sigma^ {- 1}. \\tag {11}", + "image_path": "35f69dd9f02ea4a277492b94b4685d7ccefdad3af1c1af4370db2b89761e41d6.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 138, + 504, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 138, + 504, + 165 + ], + "spans": [ + { + "bbox": [ + 104, + 138, + 504, + 165 + ], + "type": "text", + "content": "The correction term " + }, + { + "bbox": [ + 104, + 138, + 504, + 165 + ], + "type": "inline_equation", + "content": "\\frac{2c - \\mathrm{Tr}[\\Sigma B_M]}{d_0}\\Sigma_M" + }, + { + "bbox": [ + 104, + 138, + 504, + 165 + ], + "type": "text", + "content": " emerges as a result of applying normalization. The effect can be easier to understand if we write the solution as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 197, + 168, + 504, + 198 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 168, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 197, + 168, + 504, + 198 + ], + "type": "interline_equation", + "content": "W ^ {T} W = \\frac {1}{2} \\left[ \\Sigma^ {- 1} B _ {M} - \\frac {\\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{d _ {M}} M + \\frac {2 c}{d _ {M}} \\right] \\Sigma^ {- 1}, \\tag {12}", + "image_path": "89b340084734072004dff816ecb3e842afe0f6b238fb2530455f07e983ad2651.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "text", + "content": "where we have used the relation " + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "inline_equation", + "content": "\\Sigma_{M}\\Sigma^{-1} = M" + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "text", + "content": ". Note the term in brackets: it subtracts the average eigenvalue of " + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "inline_equation", + "content": "\\Sigma^{-1}B_M" + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "inline_equation", + "content": "\\Sigma^{-1}B_M" + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "text", + "content": " and shifts the remaining eigenvalues positively by " + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "inline_equation", + "content": "2c / d_{M}" + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "text", + "content": ". Because the eigenvalues of " + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "inline_equation", + "content": "WW^{T}" + }, + { + "bbox": [ + 104, + 202, + 504, + 238 + ], + "type": "text", + "content": " must be positive, the following condition must hold for all solutions:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 270, + 242, + 504, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 242, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 270, + 242, + 504, + 255 + ], + "type": "interline_equation", + "content": "\\lambda_ {i} + 2 c / d _ {M} > \\bar {\\lambda}, \\tag {13}", + "image_path": "9bf66bb25de314a219af4f622420dea264a9a00009ff9db80ca73bb97c15d2c5.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "inline_equation", + "content": "\\lambda_{i}" + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "text", + "content": " are the eigenvalues of " + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "inline_equation", + "content": "\\Sigma^{-1}B_M" + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "inline_equation", + "content": "\\bar{\\lambda}" + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "text", + "content": " is its average. Namely, for the " + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "text", + "content": "-th dimension not to collapse, it must be smaller than the average eigenvalues by at most " + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "inline_equation", + "content": "2c / d_{M}" + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "text", + "content": ". Any smaller eigenvalues must collapse. Compared to the case without normalization, normalization makes collapses dependent on the relative strength of each feature and augmentation. In the following discussion, we let " + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "inline_equation", + "content": "c = 1" + }, + { + "bbox": [ + 104, + 260, + 506, + 361 + ], + "type": "text", + "content": " to simplify the discussion. We present a detailed analysis of this condition in Section D.1. One finds that the condition for collapse becomes heavily dependent on the data structure, and there are cases where collapses become harder, and there are cases where collapses become much easier. Importantly, it also becomes the case that a sufficiently strong augmentation can always cause a collapse in the corresponding subspace." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "type": "text", + "content": "Effect of Bias. Lastly, we study the effect of explicitly having a bias term: " + }, + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "type": "inline_equation", + "content": "Wx \\rightarrow Wx + b" + }, + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "type": "text", + "content": ". First of all, when there is no normalization, the bias term does not affect the solution because the loss landscape is invariant to a translation in the learned representation. However, this effect dramatically changes if we apply normalization at the same time. This is because normalization removes the translation symmetry of the effective loss, and the trivial solution " + }, + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "type": "inline_equation", + "content": "W = 0" + }, + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "type": "inline_equation", + "content": "b = 1" + }, + { + "bbox": [ + 104, + 365, + 504, + 443 + ], + "type": "text", + "content": " becomes the simplest way to achieve the norm-1 constraint. Our result shows that the addition of bias dramatically affects the stationary points." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "text", + "content": "Theorem 3. Let " + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "inline_equation", + "content": "f(x) = Wx + b" + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[x] = 0" + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "text", + "content": ". Then, all stationary points " + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "text", + "content": " satisfy Eq. (9), subject to the constraint that " + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "inline_equation", + "content": "\\mathrm{Tr}[W^T\\Sigma W]\\leq c" + }, + { + "bbox": [ + 104, + 445, + 504, + 471 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "content": "Namely, the solution reverts to the case where there is no normalization at all, except that the norm of the solution can no longer be larger than " + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "content": ". This upper bound can make collapses much easier to happen. For example, if " + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "inline_equation", + "content": "c < (a_i - c_i) / (a_i + c_i)" + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "content": ", a complete collapse can happen despite normalization. When " + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "inline_equation", + "content": "c = 1" + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "inline_equation", + "content": "c_i \\ll a_i" + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "inline_equation", + "content": "\\rho \\approx d_M / 2" + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "content": " and the constraint indicates that " + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "inline_equation", + "content": "d_M \\leq 2" + }, + { + "bbox": [ + 104, + 479, + 505, + 589 + ], + "type": "text", + "content": ": when the augmentation is very weak, there are at most 2 nontrivial subspaces. This is too restrictive for learning a meaningful representation, which helps us understand why dimensional collapse can harm learning in practice. The fact that simple normalization cannot prevent collapse has been noticed for a while for the simplest case of a cosine-similarity loss, and our result explains why previous works have tried to introduce asymmetry to cosine similarity to avoid collapses (Grill et al., 2020; Chen and He, 2021)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "text", + "content": "Relevant Loss Functions. Having developed a framework for understanding normalization, we show that other common loss functions in SSL can also be written in the form given in Eq. (3). The spectral contrastive loss (SCL) (HaoChen et al., 2021) reads" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 143, + 632, + 504, + 648 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 632, + 504, + 648 + ], + "spans": [ + { + "bbox": [ + 143, + 632, + 504, + 648 + ], + "type": "interline_equation", + "content": "L _ {S C L} = - 2 \\mathbb {E} [ f (x) ^ {T} f (x ^ {\\prime}) ] + \\mathbb {E} [ (f (x) ^ {T} f (\\chi)) ^ {2} ] + c o n s t. \\quad \\text {s . t .} \\| f (x) \\| ^ {2} = 1. \\tag {14}", + "image_path": "cfaf19501068d4bb3005a7d7521f2731aedb2caf13a0efa5d44f4c816b93182f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 652, + 504, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 652, + 504, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 652, + 504, + 674 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 652, + 504, + 674 + ], + "type": "inline_equation", + "content": "f(x) = Wx" + }, + { + "bbox": [ + 104, + 652, + 504, + 674 + ], + "type": "text", + "content": " be linear, the distributions are zero-mean Gaussian, and ignore the normalization. This loss function becomes" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 202, + 679, + 504, + 694 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 679, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 202, + 679, + 504, + 694 + ], + "type": "interline_equation", + "content": "L _ {S C L} = - 2 \\operatorname {T r} \\left[ W C W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right]. \\tag {15}", + "image_path": "8cb6846417af7c267d8111326f3fd2852ae1a19e960203eba1f57f5aae8f61a9.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 699, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 699, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 699, + 504, + 733 + ], + "type": "text", + "content": "When normalization exists, we can apply the result in Section 3.2. By our argument, there is no collapse in this loss function. The difference with InfoNCE loss is that the learned feature spreads along the directions of the augmentation " + }, + { + "bbox": [ + 104, + 699, + 504, + 733 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 699, + 504, + 733 + ], + "type": "text", + "content": ", not along the directions of the feature " + }, + { + "bbox": [ + 104, + 699, + 504, + 733 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 699, + 504, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 86, + 202, + 156 + ], + "blocks": [ + { + "bbox": [ + 107, + 86, + 202, + 156 + ], + "lines": [ + { + "bbox": [ + 107, + 86, + 202, + 156 + ], + "spans": [ + { + "bbox": [ + 107, + 86, + 202, + 156 + ], + "type": "image", + "image_path": "b6cdba91d2a565539e66ba75f413aca578134d7d83ec9125d2516d70353f36e2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 109, + 156, + 200, + 167 + ], + "lines": [ + { + "bbox": [ + 109, + 156, + 200, + 167 + ], + "spans": [ + { + "bbox": [ + 109, + 156, + 200, + 167 + ], + "type": "text", + "content": "(a) Landscape of ResNet" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 210, + 79, + 304, + 156 + ], + "blocks": [ + { + "bbox": [ + 210, + 79, + 304, + 156 + ], + "lines": [ + { + "bbox": [ + 210, + 79, + 304, + 156 + ], + "spans": [ + { + "bbox": [ + 210, + 79, + 304, + 156 + ], + "type": "image", + "image_path": "0635702b74af3b585911f8fb63feaf16d41cc921156f0b8b6074cc4dd163371a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 156, + 285, + 167 + ], + "lines": [ + { + "bbox": [ + 229, + 156, + 285, + 167 + ], + "spans": [ + { + "bbox": [ + 229, + 156, + 285, + 167 + ], + "type": "text", + "content": "(b) No collapse" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 312, + 79, + 405, + 156 + ], + "blocks": [ + { + "bbox": [ + 312, + 79, + 405, + 156 + ], + "lines": [ + { + "bbox": [ + 312, + 79, + 405, + 156 + ], + "spans": [ + { + "bbox": [ + 312, + 79, + 405, + 156 + ], + "type": "image", + "image_path": "760f7caf8872e36e82a2cee351701c1849aae17a3fbcd8d69f68db8cdae1d80e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 312, + 156, + 403, + 167 + ], + "lines": [ + { + "bbox": [ + 312, + 156, + 403, + 167 + ], + "spans": [ + { + "bbox": [ + 312, + 156, + 403, + 167 + ], + "type": "text", + "content": "(c) Dimensional collapse" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 412, + 79, + 504, + 156 + ], + "blocks": [ + { + "bbox": [ + 412, + 79, + 504, + 156 + ], + "lines": [ + { + "bbox": [ + 412, + 79, + 504, + 156 + ], + "spans": [ + { + "bbox": [ + 412, + 79, + 504, + 156 + ], + "type": "image", + "image_path": "3700ed29d4b867983f389d76e342c8dc1cf9f00536ba629c8fcce3994613d2ce.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 417, + 156, + 498, + 167 + ], + "lines": [ + { + "bbox": [ + 417, + 156, + 498, + 167 + ], + "spans": [ + { + "bbox": [ + 417, + 156, + 498, + 167 + ], + "type": "text", + "content": "(d) Complete collapse" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 106, + 175, + 202, + 243 + ], + "blocks": [ + { + "bbox": [ + 106, + 175, + 202, + 243 + ], + "lines": [ + { + "bbox": [ + 106, + 175, + 202, + 243 + ], + "spans": [ + { + "bbox": [ + 106, + 175, + 202, + 243 + ], + "type": "image", + "image_path": "247de21cfec7c7aa2b9b112b93049f8d26a3a94d2541a04acb43288b78b9356b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 115, + 243, + 194, + 254 + ], + "lines": [ + { + "bbox": [ + 115, + 243, + 194, + 254 + ], + "spans": [ + { + "bbox": [ + 115, + 243, + 194, + 254 + ], + "type": "text", + "content": "(e) Landscape of ViT" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "lines": [ + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "text", + "content": "Figure 2: Landscape of Resnet18 (upper) and vision transformers (lower) on CIFAR10 with SimCLR qualitatively agrees with our linear theory. (a) Training objective " + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "text", + "content": " as a function of a rescaling of the last layer " + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "inline_equation", + "content": "W \\rightarrow aW" + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "text", + "content": ". (b-d) " + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "text", + "content": " as a function of a " + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "inline_equation", + "content": "2d" + }, + { + "bbox": [ + 104, + 258, + 504, + 340 + ], + "type": "text", + "content": " rescaling of the last layer where the data augmentation strength is (b) small, (c) intermediate, and (d) strong. Red indicates areas of high loss, blue indicates areas of low loss, and stars locate local minima. The use of data augmentation changes the stability of the origin, a qualitative change that leads to different types of collapses in qualitative agreement with our linear theory (cf. Figure 1). Additionally, we also notice the same qualitative changes of landscape in simpler nonlinear models (see Appendix A). (e-h) are the same setting but for ViT." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 210, + 167, + 302, + 243 + ], + "blocks": [ + { + "bbox": [ + 210, + 167, + 302, + 243 + ], + "lines": [ + { + "bbox": [ + 210, + 167, + 302, + 243 + ], + "spans": [ + { + "bbox": [ + 210, + 167, + 302, + 243 + ], + "type": "image", + "image_path": "99218392843222b7950988269986742fda90110ffc047d25018fe0f4107c9a5c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 228, + 243, + 283, + 254 + ], + "lines": [ + { + "bbox": [ + 228, + 243, + 283, + 254 + ], + "spans": [ + { + "bbox": [ + 228, + 243, + 283, + 254 + ], + "type": "text", + "content": "(f) No collapse" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 310, + 167, + 402, + 243 + ], + "blocks": [ + { + "bbox": [ + 310, + 167, + 402, + 243 + ], + "lines": [ + { + "bbox": [ + 310, + 167, + 402, + 243 + ], + "spans": [ + { + "bbox": [ + 310, + 167, + 402, + 243 + ], + "type": "image", + "image_path": "74a020fac9322634c3ab15388122141e788c5a0121465d6f8f5a6eb4821660c8.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 243, + 402, + 255 + ], + "lines": [ + { + "bbox": [ + 310, + 243, + 402, + 255 + ], + "spans": [ + { + "bbox": [ + 310, + 243, + 402, + 255 + ], + "type": "text", + "content": "(g) Dimensional collapse" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 410, + 167, + 502, + 243 + ], + "blocks": [ + { + "bbox": [ + 410, + 167, + 502, + 243 + ], + "lines": [ + { + "bbox": [ + 410, + 167, + 502, + 243 + ], + "spans": [ + { + "bbox": [ + 410, + 167, + 502, + 243 + ], + "type": "image", + "image_path": "d8c1fcf3738a0556ea7afca13056d481ca2ba01613f438daee1b8479e38e48ad.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 414, + 243, + 495, + 254 + ], + "lines": [ + { + "bbox": [ + 414, + 243, + 495, + 254 + ], + "spans": [ + { + "bbox": [ + 414, + 243, + 495, + 254 + ], + "type": "text", + "content": "(h) Complete collapse" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 352, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 504, + 407 + ], + "type": "text", + "content": "The case of Barlow Twin (BT) (Zbontar et al., 2021) is similar. While the fourth-order term of BT is much more complicated due to the imbalance created by the " + }, + { + "bbox": [ + 104, + 352, + 504, + 407 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 352, + 504, + 407 + ], + "type": "text", + "content": " term. The second-order term can be identified easily: " + }, + { + "bbox": [ + 104, + 352, + 504, + 407 + ], + "type": "inline_equation", + "content": "L_{BT} = -2\\mathrm{Tr}[W\\Sigma W^T] + O(||W||^4)" + }, + { + "bbox": [ + 104, + 352, + 504, + 407 + ], + "type": "text", + "content": ". This also does not collapse. A difference between the SCL loss and InfoNCE is that the learned representation has a spread that aligns with the combination of the feature and the augmentation strength." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 420, + 200, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 420, + 200, + 431 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 200, + 431 + ], + "type": "text", + "content": "4 IMPLICATIONS" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 442, + 504, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 442, + 504, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 442, + 504, + 466 + ], + "type": "text", + "content": "In this section, we explore some theoretical and practical implications of our results. In Appendix Section A, we also present numerical simulations that directly validate the predictions of the theory." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 475, + 293, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 293, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 293, + 485 + ], + "type": "text", + "content": "4.1 RELEVANCE TO NONLINEAR MODELS" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": "An important question is how much of the analysis is relevant for deep nonlinear models in general. In fact, the loss landscape we have studied is quite close to the most general landscape one can have. Let " + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "inline_equation", + "content": "L(f(x))" + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": " be a general SSL loss function for data point " + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": ". The quality of the learned representation should be independent of the population-level orientation of the representation. Therefore, the loss function should satisfy a rotational invariance. Namely, for any rotation matrix " + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "inline_equation", + "content": "L(x) = L(Rf(x))" + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": "; this rotational invariance implies that the loss should expand as " + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "inline_equation", + "content": "L(f(x)) = af(x)^T f(x) + b[f(x)^T f(x)]^2 + O(f(x)^6)" + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": ". Note that all the odd-order terms of " + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "inline_equation", + "content": "f(x)" + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": " vanish due to the rotational symmetry. Substituting " + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "inline_equation", + "content": "f(x) = W\\phi(x)" + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": " in the loss function, we obtain a very general form of landscape that " + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 493, + 504, + 594 + ], + "type": "text", + "content": " obeys:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 192, + 601, + 504, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 601, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 192, + 601, + 504, + 616 + ], + "type": "interline_equation", + "content": "L (W, \\phi) = \\operatorname {T r} \\left[ W ^ {T} W A \\right] + \\sum W _ {i m} W _ {j m} W _ {k n} W _ {l n} Z _ {i j k i}, \\tag {16}", + "image_path": "1ad83c6c53cb01497b5c9bdad5a07d5311d4f04c46e31fcdc53d1a4fe5ec0e54.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " are dependent on " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ". Note how all the examples we have studied take this form. For " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ", its collapse entirely depends on the stability of the matrix " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ". Thus the study of the stability of the matrix " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " becomes crucial for our understanding. To illustrate, we train a Resnet18 on CIFAR10 with the SimCLR loss with normalization and with weight decay strength " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "10^{-3}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " until convergence to obtain the converged weights " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "W^{*}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ". The representation has a dimension 128. We rescale the weight matrix of the last layer " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "W_{\\mathrm{last}}^{*}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " by a factor " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " and compute the loss as a function of " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ". See Figure 2-a. We then partition the singular values of " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "W_{\\mathrm{last}}^{*}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " into the larger half and the smaller half. We rescale the larger half by a factor " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " and the smaller half by " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "r_2" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ". We plot the loss as a " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "2d" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " function of " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "(r_1, r_2)" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " in Figure 2. We also perform experiments for vision transformers (ViT) in the lower row (Dosovitskiy et al., 2020). In all cases, the landscape features qualitative changes comparable to those in Figure 1." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "text", + "content": "A connection to Landau theory in physics. Those familiar with statistical physics should note that the proposed theory is analogous to the Landau theory of second-order phase transitions. When treating the loss function as the free energy, the square root of the eigenvalues " + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\sqrt{\\lambda}" + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "inline_equation", + "content": "W^T W" + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "text", + "content": " are the order parameters of the system, and the phase transitions happen when " + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "text", + "content": " turns from 0 to positive. These transitions (collapses) happen because of symmetry breaking (Landau and Lifshitz, 2013): the loss function (2) is symmetric in the sign of " + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "text", + "content": ". Yet, for any nontrivial learning, " + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "text", + "content": " must be nonzero; thus, a symmetry breaking of the sign of " + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 82, + 506, + 251 + ], + "type": "text", + "content": " needs to happen for learning. The recent work by Ziyin and Ueda (2022) suggested how symmetry breaking around the origin and Landau theory could explain various types of collapses in deep learning. Therefore, the dimensional collapse could be related to neural collapses in supervised learning (Papyan et al., 2020; Ziyin et al., 2022a) and posterior collapse in Bayesian deep learning (Wang and Ziyin, 2022). Because second-order phase transitions should come with the divergence of the correlation function, one might also wonder what is \"divergent\" in the SSL problem. Here, the learning time scale for the collapsing dimension is divergent at the critical point because the second-order term vanishes in this direction, and so the dynamics are effectively frozen along this direction." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 259, + 303, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 259, + 303, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 303, + 270 + ], + "type": "text", + "content": "4.2 ROBUSTLY INDUCING GOOD COLLAPSES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 277, + 355, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 355, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 355, + 366 + ], + "type": "text", + "content": "Contrary to previous works, a recent work (Cosentino et al., 2022) has suggested that dimensional collapse can be beneficial and significantly improve the generalization performance of the model. This observation raises a question. How can dimensional collapse be beneficial and how can it be induced? In the following, we first introduce " + }, + { + "bbox": [ + 104, + 277, + 355, + 366 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 277, + 355, + 366 + ], + "type": "text", + "content": "-InfoNCE, which can adjust the degree of dimensional collapse, and analyze the collapse behavior to elucidate the mechanism of task-alligned collapse." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "spans": [ + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "type": "text", + "content": "Adjusting the degree of dimensional collapse with " + }, + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "type": "text", + "content": "-InfoNCE. Despite the potential benefit, existing SSL loss functions cannot robustly induce dimensional collapse. InfoNCE is insufficient to induce a collapse, and the collapse induced by SimCLR depends on a vanishingly small parameter " + }, + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "type": "inline_equation", + "content": "1/N" + }, + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "type": "text", + "content": ". One thus wonders whether there is a loss function that allows us to induce collapsing behavior in a more predictable manner so that one might controllably extract some benefits from collapse. Our result suggests that one way to directly control collapses is through the strength of the competition for the model Hessian at the origin. For InfoNCE, one way to achieve this is to weigh the entropy term by a general factor " + }, + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 370, + 357, + 514 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 518, + 360, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 518, + 360, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 518, + 360, + 544 + ], + "type": "interline_equation", + "content": "\\left. \\right. \\mathbb {E} _ {x} \\left\\{\\frac {1}{2} | f (x) - f \\left(x ^ {\\prime}\\right) | ^ {2} + \\beta \\log \\mathbb {E} _ {\\chi} \\left[ \\exp \\left(- \\frac {1}{2} | f (x) - f (\\chi) | ^ {2}\\right)\\right]\\right\\}.", + "image_path": "d9aede6614659c7cb20a5d49ef7b734b7fd027f9baa1311708b5de2f2e3b5f66.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 546, + 357, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 546, + 357, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 546, + 357, + 578 + ], + "type": "text", + "content": "Due to its similarity with the " + }, + { + "bbox": [ + 104, + 546, + 357, + 578 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 546, + 357, + 578 + ], + "type": "text", + "content": "-VAE in Bayesian learning, we call it the " + }, + { + "bbox": [ + 104, + 546, + 357, + 578 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 546, + 357, + 578 + ], + "type": "text", + "content": "-InfoNCE. The leading term in the loss function becomes" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 167, + 578, + 294, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 578, + 294, + 592 + ], + "spans": [ + { + "bbox": [ + 167, + 578, + 294, + 592 + ], + "type": "interline_equation", + "content": "- \\operatorname {T r} \\left[ W \\left(A _ {0} - (1 - \\beta) C\\right) W ^ {T} \\right].", + "image_path": "72ab107450ef6e50e4648c363d17a11881ff4fc62efd68b768626eab6d0caaab.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 594, + 357, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 357, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 357, + 616 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 104, + 594, + 357, + 616 + ], + "type": "inline_equation", + "content": "1 - \\beta > 0" + }, + { + "bbox": [ + 104, + 594, + 357, + 616 + ], + "type": "text", + "content": ", the augmentations " + }, + { + "bbox": [ + 104, + 594, + 357, + 616 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 594, + 357, + 616 + ], + "type": "text", + "content": " pull the representation towards zero. When the augmentation is as strong as the fea-" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "text", + "content": "ture variations, a collapse happens. One can thus introduce collapse by setting " + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "text", + "content": " to be sufficiently small. When " + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "inline_equation", + "content": "1 - \\beta < 0" + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "text", + "content": ", the augmentations push the weights away from the origin along its direction, resulting in no collapse at all: When one really wants to avoid collapse, one can use a rather large " + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "inline_equation", + "content": "\\beta = 1" + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "text", + "content": " is thus at the boundary of this bifurcating behavior. We note that existing loss functions often do not have a parameter that is directly controlling the collapse behavior (see Table 1). The " + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 616, + 504, + 683 + ], + "type": "text", + "content": " parameter here directly controls the level of difficulty of collapse." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 687, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 734 + ], + "type": "text", + "content": "Achieving invariance with dimensional collapse. Here, we closely study an illustrative minimal example to demonstrate how collapses can be beneficial. Consider the following structured data generating process where the input features can be separated into two sets: (1) a task-relevant set with dimension " + }, + { + "bbox": [ + 104, + 687, + 504, + 734 + ], + "type": "inline_equation", + "content": "d_{c} < d_{0}" + }, + { + "bbox": [ + 104, + 687, + 504, + 734 + ], + "type": "text", + "content": " and (2) a task-irrelevant set: " + }, + { + "bbox": [ + 104, + 687, + 504, + 734 + ], + "type": "inline_equation", + "content": "x = (x_{1},\\dots,x_{d_{c}},\\dots,x_{d_{0}})" + }, + { + "bbox": [ + 104, + 687, + 504, + 734 + ], + "type": "text", + "content": ". Our result suggests" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 364, + 261, + 504, + 376 + ], + "blocks": [ + { + "bbox": [ + 364, + 261, + 504, + 376 + ], + "lines": [ + { + "bbox": [ + 364, + 261, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 364, + 261, + 504, + 376 + ], + "type": "image", + "image_path": "151d2f5ed48da89f2bf707f660fd4d9553cc4bfa4b0a668bd6b3db9ea7c6532f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 364, + 377, + 503, + 484 + ], + "blocks": [ + { + "bbox": [ + 364, + 377, + 503, + 484 + ], + "lines": [ + { + "bbox": [ + 364, + 377, + 503, + 484 + ], + "spans": [ + { + "bbox": [ + 364, + 377, + 503, + 484 + ], + "type": "image", + "image_path": "ba1c6e376e5802b03cc41f400c616a162ddd4953464210016db0340c31011fdf.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 485, + 505, + 605 + ], + "lines": [ + { + "bbox": [ + 362, + 485, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 362, + 485, + 505, + 605 + ], + "type": "text", + "content": "Figure 3: Top: Phase diagram of representational collapses. Bottom: " + }, + { + "bbox": [ + 362, + 485, + 505, + 605 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 362, + 485, + 505, + 605 + ], + "type": "text", + "content": "-InfoNCE with " + }, + { + "bbox": [ + 362, + 485, + 505, + 605 + ], + "type": "inline_equation", + "content": "\\beta = 0.5" + }, + { + "bbox": [ + 362, + 485, + 505, + 605 + ], + "type": "text", + "content": ". The generalization error of a downstream regression task where the data augmentation (1) is isotropic and noninformative or (2) aligns with the style. We see that the performance worsens as collapses happen for the noninformative augmentation and improves as the collapse happens for the style-targeting augmentation." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "a precise way to remove the irrelevant features from the learned representation. For the purpose of causing a robust collapse, we use the " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "-InfoNCE with " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "\\beta = 1/2" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ". For illustration, we consider the simple case " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "d_c = 1" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "d_0 = 2" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ". For any input " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "x = (x_1, x_2)" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ", the label is generated as a linear function of " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "x_1" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "y = cx_1" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": "Correspondingly, we consider a structured data augmentation " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "x = \\hat{x} + \\sigma R\\xi" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "R \\in \\mathbb{R}^{d_0 \\times d_0}" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "R = \\text{diag}(\\sqrt{1 - \\theta}, \\sqrt{\\theta})" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\theta \\in [0,1]" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": ". The parameter " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": " controls the overall strength of the augmentation, and " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": " controls the orientation of the strength. When " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\theta = 0.5" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": ", we have an uninformative isotropic noise that has often been used in practice. When " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\theta = 1" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": ", the augmentation is only on the task-irrelevant feature, and when " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\theta = 0" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": ", the augmentation is only on the content. Since the prediction target only depends on the content, we want to learn a representation invariant to the style. For the downstream regression task, we use the learned representations " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "z := f(\\hat{x})" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": " to train a ridge linear regressor that minimizes " + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\min_G \\mathbb{E}_{\\hat{x}}[||Gz - y(\\hat{x})||^2] + 0.001||G||^2" + }, + { + "bbox": [ + 104, + 131, + 506, + 266 + ], + "type": "text", + "content": ". See Figure 3. The top panel shows the phase diagram of this problem with different combinations of the augmentation strengths and orientations. The bottom panel shows that collapses introduce phase-transition-like behaviors in the generalization performance and that a data augmentation aligning with the task-irrelevant dimension improves performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 277, + 284, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 284, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 284, + 289 + ], + "type": "text", + "content": "4.3 ROBUSTNESS TO DATA IMBALANCE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 297, + 297, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 297, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 297, + 462 + ], + "type": "text", + "content": "Our theory is not only relevant for understanding collapses but can also be used to understand how an SSL model encodes the feature. Liu et al. (2021) recently showed that compared with supervised learning, SSL techniques are relatively more robust to imbalanced datasets that have disproportionately represented minority subgroups. As another application of our analysis, we illustrate the robustness of different techniques is not equal. As we have seen, the learned model " + }, + { + "bbox": [ + 104, + 297, + 297, + 462 + ], + "type": "inline_equation", + "content": "W^T W" + }, + { + "bbox": [ + 104, + 297, + 297, + 462 + ], + "type": "text", + "content": " has eigenvalues that, to the leading order, are proportional to the Hessian " + }, + { + "bbox": [ + 104, + 297, + 297, + 462 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 297, + 297, + 462 + ], + "type": "text", + "content": ", which is different for each loss function. As previously summarized in Table 1, for InfoNCE and SimCLR, the learned model aligns" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 304, + 266, + 504, + 384 + ], + "blocks": [ + { + "bbox": [ + 304, + 266, + 504, + 384 + ], + "lines": [ + { + "bbox": [ + 304, + 266, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 504, + 384 + ], + "type": "image", + "image_path": "98faacf0f58a577c4eaa29fb3811d104d74c5162d8911fbecf1fc66f2bd95273.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 388, + 504, + 459 + ], + "lines": [ + { + "bbox": [ + 302, + 388, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 302, + 388, + 504, + 459 + ], + "type": "text", + "content": "Figure 4: Spectral Contrastive loss (SCL) is more robust against data imbalance than InfoNCE. We train SimCLR and SCL ResNet-12 models on imbalanced versions of CIFAR-10. We see that SCL is more robust than SimCLR, as suggested by our theory. These results are especially pronounced when there is no projector head." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 462, + 504, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 504, + 518 + ], + "type": "text", + "content": "with the eigenvalues of the data covariance " + }, + { + "bbox": [ + 104, + 462, + 504, + 518 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 462, + 504, + 518 + ], + "type": "text", + "content": ", which varies hugely as different classes of a dataset become more and more imbalanced. In comparison, the model trained with SCL aligns purely with the augmentation covariance " + }, + { + "bbox": [ + 104, + 462, + 504, + 518 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 462, + 504, + 518 + ], + "type": "text", + "content": ", which is independent of the data imbalance. This suggests that the SCL landscape can be less dependent on data and thus more robust against data imbalance. See Figure 4. More experimental details are given in Appendix C." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 528, + 196, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 196, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 196, + 540 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 550, + 506, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 506, + 661 + ], + "type": "text", + "content": "In this work, we approached the problem of collapses in SSL from a loss landscape perspective. We analytically solved an effective landscape that can be extended to understand the effect of normalization. Our result suggests that dimensional collapse can be well understood in the minimal setting and is something neutral to learning on its own. With the help from the theory, we also showed that when task-irrelevant dimensions are targeted, dimensional collapse can result in improved performance, whereas an uninformative noise will (without good luck) leads to collapses in the dimensions that are relevant to the task. It is thus important for practitioners to devise targeted data augmentation mechanisms that incorporate the correct domain knowledge. Also, we advocated the thesis that the local geometry of the loss landscape around the origin is an essential component for understanding collapses, and this should invite more future work to understand the landscape around the origin." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "The limitation of our work is clear; our result only identifies the causes of the collapse that can be directly attributed to the low-rank structure of the local minima of the landscape. One possible alternative cause of the collapse is dynamics. For example, having a large learning rate and small batch can sometimes cause a convergence towards the saddle points in the landscape (Ziyin et al., 2022b), which, as we have shown, are the collapsed solutions. Investigating the role of dynamics in the collapse is thus a crucial future problem." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 226, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 226, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 226, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGEMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 150 + ], + "type": "text", + "content": "This work was supported by a KAKENHI Grant No. JP18H01145 from the Japan Society for the Promotion of Science. Ziyin has been financially supported by the JSPS fellowship and thanks Zihan for the generous help during the writing of this paper. ESL was partially supported via NSF under the award CNS-2008151." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 167, + 176, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 167, + 176, + 178 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 176, + 178 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 185, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 185, + 505, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 505, + 219 + ], + "type": "text", + "content": "Arora, S., Khandeparkar, H., Khodak, M., Plevrakis, O., and Saunshi, N. (2019). A Theoretical Analysis of Contrastive Unsupervised Representation Learning. In Proc. Int. Conf. on Machine Learning (ICML)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "text", + "content": "Bardes, A., Ponce, J., and LeCun, Y. (2021). Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 255, + 504, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 255, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 106, + 255, + 504, + 289 + ], + "type": "text", + "content": "Caron, M., Misra, I., Mairal, J., Goyal, P., Bojanowski, P., and Joulin, A. (2020). Unsupervised Learning of Visual Features by Contrasting Cluster Assignments. In Proc. Adv. on Neural Information Processing Systems (NeurIPS)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 296, + 504, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 296, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 106, + 296, + 504, + 319 + ], + "type": "text", + "content": "Caron, M., Touvron, H., Misra, I., Jegou, H., Mairal, J., Bojanowski, P., and Joulin, A. (2021). Emerging Properties in Self-Supervised Vision Transformer. arXiv, abs/2104.14294." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 326, + 504, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 326, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 504, + 358 + ], + "type": "text", + "content": "Chen, T., Kornblith, S., Norouzi, M., and Hinton, G. (2020a). A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 365, + 504, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 504, + 399 + ], + "type": "text", + "content": "Chen, T., Kornblith, S., Swersky, K., Norouzi, M., and Hinton, G. E. (2020b). Big Self-Supervised Models are Strong Semi-Supervised Learners. Adv. in Neural Information Processing Systems, 33." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 407, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 407, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 504, + 430 + ], + "type": "text", + "content": "Chen, X. and He, K. (2021). Exploring Simple Siamese Representation Learning. In Proc. Int. Conf. on Computer Vision and Pattern Recognition (CVPR)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 436, + 504, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 504, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 504, + 470 + ], + "type": "text", + "content": "Cosentino, R., Sengupta, A., Avestimehr, S., Soltanolkotabi, M., Ortega, A., Willke, T., and Tepper, M. (2022). Toward a geometrical understanding of self-supervised contrastive learning. arXiv preprint arXiv:2205.06926." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 477, + 504, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 477, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 504, + 499 + ], + "type": "text", + "content": "Dayan, P. and Abbott, L. F. (2005). Theoretical neuroscience: computational and mathematical modeling of neural systems. MIT press." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 506, + 504, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 504, + 540 + ], + "type": "text", + "content": "Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al. (2020). An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 547, + 504, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 547, + 504, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 504, + 570 + ], + "type": "text", + "content": "Ermolov, A., Siarohin, A., Sangineto, E., and Sebe, N. (2021). Whitening for self-supervised representation learning. In International Conference on Machine Learning, pages 3015-3024. PMLR." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 576, + 504, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 504, + 621 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 504, + 621 + ], + "type": "text", + "content": "Grill, J.-B., Strub, F., Altché, F., Tallec, C., Richemond, P., Buchatskaya, E., Doersch, C., Avila Pires, B., Guo, Z., Gheshlaghi Azar, M., Piot, B., kavukcuoglu, k., Munos, R., and Valko, M. (2020). Bootstrap your own latent: A new approach to self-supervised Learning. In Proc. Adv. on Neural Information Processing Systems (NeurIPS)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 628, + 504, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 504, + 661 + ], + "type": "text", + "content": "HaoChen, J. Z., Wei, C., Gaidon, A., and Ma, T. (2021). Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34:5000-5011." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 668, + 504, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 504, + 702 + ], + "type": "text", + "content": "He, K., Fan, H., Wu, Y., Xie, S., and Girschick, R. (2020). Momentum Contrast for Unsupervised Visual Representation Learning. In Proc. Int. Conf. on Computer Vision and Pattern Recognition (CVPR)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 709, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 504, + 731 + ], + "type": "text", + "content": "Hsu, H., Qi, H., and Brown, M. (2019). Measuring the Effects of Non-Identical Data Distribution for Federated Visual Classification. arXiv, abs/1909.06335." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Hua, T., Wang, W., Xue, Z., Ren, S., Wang, Y., and Zhao, H. (2021). On feature decorrelation in self-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9598-9608." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 504, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 504, + 148 + ], + "type": "text", + "content": "Jing, L., Vincent, P., LeCun, Y., and Tian, Y. (2021). Understanding dimensional collapse in contrastive self-supervised learning. arXiv preprint arXiv:2110.09348." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 156, + 504, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 504, + 189 + ], + "type": "text", + "content": "Kugelgen, J., Sharma, Y., Gresle, L., Brendel, W., Scholkopf, B., Besserve, M., and Locatello, F. (2021). Self-Supervised Learning with Data Augmentations Provably Isolates Content from Style. arXiv, abs/2106.04619." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 198, + 475, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 198, + 475, + 209 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 475, + 209 + ], + "type": "text", + "content": "Landau, L. D. and Lifshitz, E. M. (2013). Statistical Physics: Volume 5, volume 5. Elsevier." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 217, + 504, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 504, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 504, + 241 + ], + "type": "text", + "content": "Liu, H., HaoChen, J. Z., Gaidon, A., and Ma, T. (2021). Self-supervised learning is more robust to dataset imbalance. International Conference on Learning Representations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 249, + 504, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 249, + 504, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 249, + 504, + 272 + ], + "type": "text", + "content": "Mitrovic, J., McWilliams, B., Walker, J., Buesing, L., and Blundell, C. (2020). Representation learning via invariant causal mechanisms. arXiv preprint arXiv:2010.07922." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 280, + 504, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 280, + 504, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 504, + 313 + ], + "type": "text", + "content": "Nozawa, K. and Sato, I. (2021). Understanding negative samples in instance discriminative self-supervised representation learning. Advances in Neural Information Processing Systems, 34:5784-5797." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 322, + 504, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 322, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 504, + 346 + ], + "type": "text", + "content": "Oord, A. v. d., Li, Y., and Vinyals, O. (2018). Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 354, + 504, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 504, + 387 + ], + "type": "text", + "content": "Papyan, V., Han, X., and Donoho, D. L. (2020). Prevalence of neural collapse during the terminal phase of deep learning training. Proceedings of the National Academy of Sciences, 117(40):24652-24663." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 396, + 504, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 504, + 418 + ], + "type": "text", + "content": "Pokle, A., Tian, J., Li, Y., and Risteski, A. (2022). Contrasting the landscape of contrastive and non-contrastive learning. arXiv preprint arXiv:2203.15702." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 426, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 504, + 460 + ], + "type": "text", + "content": "Robinson, J., Sun, L., Yu, K., Batmanghelich, K., Jegelka, S., and Sra, S. (2021). Can contrastive learning avoid shortcut solutions? Advances in neural information processing systems, 34:4974-4986." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 468, + 504, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 504, + 503 + ], + "type": "text", + "content": "Saunshi, N., Ash, J., Goel, S., Misra, D., Zhang, C., Arora, S., Kakade, S., and Krishnamurthy, A. (2022). Understanding Contrastive Learning Requires Incorporating Inductive Biases. In Proc. Int. Conf. on Machine Learning (ICML)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 511, + 504, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 511, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 504, + 534 + ], + "type": "text", + "content": "Simon, J. B., Knutins, M., Ziyin, L., Geisz, D., Fetterman, A. J., and Albrecht, J. (2023). On the stepwise nature of self-supervised learning. arXiv preprint arXiv:2303.15438." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 542, + 504, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 542, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 504, + 565 + ], + "type": "text", + "content": "Tian, Y. (2022). Deep contrastive learning is provably (almost) principal component analysis. arXiv preprint arXiv:2201.12680." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 573, + 504, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 573, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 504, + 596 + ], + "type": "text", + "content": "Tian, Y., Chen, X., and Ganguli, S. (2021). Understanding self-supervised Learning Dynamics without Contrastive Pairs. In Proc. Int. Conf. on Machine Learning (ICML)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 605, + 504, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 504, + 638 + ], + "type": "text", + "content": "Tian, Y., Sun, C., Poole, B., Krishnan, D., Schmid, C., and Isola, P. (2020). What makes for good views for contrastive learning? Advances in Neural Information Processing Systems, 33:6827-6839." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 647, + 504, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 504, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 504, + 670 + ], + "type": "text", + "content": "Tosh, C., Krishnamurthy, A., and Hsu, D. (2021). Contrastive estimation reveals topic posterior information to linear models. J. Mach. Learn. Res., 22:281-1." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 677, + 504, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 504, + 701 + ], + "type": "text", + "content": "Trivedi, P., Lubana, E. S., Heimann, M., Koutra, D., and Thiagarajan, J. J. (2022). Analyzing data-centric properties for contrastive learning on graphs. arXiv preprint arXiv:2208.02810." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "text", + "content": "Tsai, Y.-H., Wu, Y., Salakhutdinov, R., and Morency, L.-P. (2021a). Self-supervised Learning from a Multi-view Perspective. In Proc. Int. Conf. on Learning Representations (ICLR)." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 458 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Tsai, Y.-H. H., Ma, M. Q., Yang, M., Zhao, H., Morency, L.-P., and Salakhutdinov, R. (2021b). Self-supervised representation learning with relative predictive coding. International Conference on Learning Representations." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 147 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 147 + ], + "type": "text", + "content": "Wang, F. and Liu, H. (2021). Understanding the behaviour of contrastive loss. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2495-2504." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 152, + 504, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 152, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 504, + 177 + ], + "type": "text", + "content": "Wang, T. and Isola, P. (2020). Understanding Contrastive Representation Learning through Alignment and Uniformity on the Hypersphere. In Proc. Int. Conf. on Machine Learning (ICML)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 182, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 504, + 217 + ], + "type": "text", + "content": "Wang, Y., Zhang, Q., Wang, Y., Yang, J., and Lin, Z. (2022). Chaos is a ladder: A new theoretical understanding of contrastive learning via augmentation overlap. International Conference on Learning Representations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 504, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 504, + 257 + ], + "type": "text", + "content": "Wang, Z. and Ziyin, L. (2022). Posterior collapse of a linear latent variable model. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 264, + 504, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 264, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 264, + 504, + 288 + ], + "type": "text", + "content": "Wei, C., Shen, K., Chen, Y., and Ma, T. (2021). Theoretical Analysis of Self-Training with Deep Networks on Unlabeled Data. In Proc. Int. Conf. on Learning Representations (ICLR)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "type": "text", + "content": "Zbontar, J., Jing, L., Misra, I., LeCun, Y., and Deny, S. (2021). Barlow twins: Self-supervised learning via redundancy reduction. In International Conference on Machine Learning, pages 12310-12320. PMLR." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "type": "text", + "content": "Zimmerman, R., Sharma, Y., Schneider, S., Bethge, M., and Brendel, W. (2021). Contrastive Learning Inverts the Data Generating Process. In Proc. Int. Conf. on Machine Learning (ICML)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 365, + 504, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 504, + 399 + ], + "type": "text", + "content": "Ziyin, L., Li, B., and Meng, X. (2022a). Exact solutions of a deep linear network. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 405, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 405, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 504, + 430 + ], + "type": "text", + "content": "Ziyin, L., Li, B., Simon, J. B., and Ueda, M. (2022b). SGD can converge to local maxima. In International Conference on Learning Representations." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 435, + 504, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 435, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 504, + 458 + ], + "type": "text", + "content": "Ziyin, L. and Ueda, M. (2022). Exact phase transitions in deep learning. arXiv preprint arXiv:2205.12510." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 313, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 313, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 313, + 94 + ], + "type": "text", + "content": "A ADDITIONAL NUMERICAL RESULTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 504, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 504, + 129 + ], + "type": "text", + "content": "In this section, we validate our theory with numerical results. Unless specified otherwise, the dimension of the learned representation is set to be equal to the input dimension: " + }, + { + "bbox": [ + 104, + 106, + 504, + 129 + ], + "type": "inline_equation", + "content": "d_0 = d_1" + }, + { + "bbox": [ + 104, + 106, + 504, + 129 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "content": "No Collapse for InfoNCE. We showed that there is no collapse at all for the vanilla InfoNCE, no matter how strong the augmentation is. Our result implies that the smallest singular of the model " + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "content": " scales as " + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\sigma^4" + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\sigma^2" + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "content": " is the strength (namely, the variance) of the augmentation. See the left panel of Fig. 5. We use the vanilla InfoNCE loss defined in (1) with a linear model. The training set is sampled from " + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0, I_{32})" + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "content": ". The training proceeds with Adam with a learning rate of " + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "inline_equation", + "content": "6e - 4" + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "content": " with full batch training for 5000 iterations. We use a simple diagonal Gaussian noise with variance " + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\sigma^2" + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "content": " for data augmentation. We see that the singular values scale as " + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\sigma^4" + }, + { + "bbox": [ + 104, + 133, + 506, + 224 + ], + "type": "text", + "content": " and never vanishes, as the theory predicts." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 227, + 504, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 227, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 227, + 504, + 282 + ], + "type": "text", + "content": "Nonrobust Collapses of Weighted InfoNCE. We now demonstrate that, as the theory predicts, collapses of weighted InfoNCE depend strongly on the dataset size. We use the same dataset and training procedure as the previous experiment. We set " + }, + { + "bbox": [ + 104, + 227, + 504, + 282 + ], + "type": "inline_equation", + "content": "\\alpha = 0.1" + }, + { + "bbox": [ + 104, + 227, + 504, + 282 + ], + "type": "text", + "content": " and change the size of the training set. Theory suggests that for a collapse in the " + }, + { + "bbox": [ + 104, + 227, + 504, + 282 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 227, + 504, + 282 + ], + "type": "text", + "content": "-th subspace to happen, the size of the dataset needs to obey" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 255, + 282, + 504, + 304 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 282, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 255, + 282, + 504, + 304 + ], + "type": "interline_equation", + "content": "N > \\frac {a _ {i}}{c _ {i} (1 - \\alpha)} := N _ {\\text {c r i t}}. \\tag {17}", + "image_path": "d2bc733ca249e9600dad778162bd75b2d632036d2788eb67b188518c643929e5.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 307, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 504, + 342 + ], + "type": "text", + "content": "See the middle panel of Figure 5. We show the smallest three eigenvalues of " + }, + { + "bbox": [ + 104, + 307, + 504, + 342 + ], + "type": "inline_equation", + "content": "W^T W" + }, + { + "bbox": [ + 104, + 307, + 504, + 342 + ], + "type": "text", + "content": " (roughly having similar magnitudes), and the critical dataset size for the smallest eigenvalue. We see that the theoretical threshold of collapse agrees well with where the collapse actually happens." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "text", + "content": "Collapses in " + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "text", + "content": "-InfoNCE. With " + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "inline_equation", + "content": "\\beta < 1" + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "text", + "content": ", one can cause collapses in a predictable and controllable way. In this experiment, we let " + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "inline_equation", + "content": "d_0 = 5" + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "text", + "content": " and we plot all five eigenvalues of " + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "inline_equation", + "content": "W^T W" + }, + { + "bbox": [ + 104, + 346, + 505, + 392 + ], + "type": "text", + "content": " as we increase the strength of an isotropic augmentation. As the numerical results show, collapses happen at the points predicted by the theory." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 396, + 506, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 506, + 453 + ], + "type": "text", + "content": "Normalization Causes Dimensional Collapse. We also plot the three smallest eigenvalues of " + }, + { + "bbox": [ + 104, + 396, + 506, + 453 + ], + "type": "inline_equation", + "content": "W^T W" + }, + { + "bbox": [ + 104, + 396, + 506, + 453 + ], + "type": "text", + "content": " when we apply the standard representation normalization in practice: " + }, + { + "bbox": [ + 104, + 396, + 506, + 453 + ], + "type": "inline_equation", + "content": "f(x) \\to f(x) / \\| f(x) \\|" + }, + { + "bbox": [ + 104, + 396, + 506, + 453 + ], + "type": "text", + "content": ". To facilitate comparison, we also use the same dataset and training procedure as before. See Figure 6. We see that normalization does cause a collapse in the smallest eigenvalues at an augmentation strength much smaller than the feature variation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 468, + 326, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 326, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 326, + 480 + ], + "type": "text", + "content": "B LANDSCAPE OF A NONLINEAR MODEL" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 492, + 506, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 506, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 506, + 538 + ], + "type": "text", + "content": "In this section, we plot the landscape of the layer of nonlinear models on the same synthetic dataset we outlined in the previous section. We train a three-layer nonlinear network with output dimension 2 with SGD until convergence. We then rescale the optimized weight of the last by a factor " + }, + { + "bbox": [ + 104, + 492, + 506, + 538 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 492, + 506, + 538 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 492, + 506, + 538 + ], + "type": "inline_equation", + "content": "W_{last} \\rightarrow aW_{last}" + }, + { + "bbox": [ + 104, + 492, + 506, + 538 + ], + "type": "text", + "content": " and plot the loss function along this direction. See the top panel of Figure 7 for" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 125, + 558, + 242, + 650 + ], + "blocks": [ + { + "bbox": [ + 125, + 558, + 242, + 650 + ], + "lines": [ + { + "bbox": [ + 125, + 558, + 242, + 650 + ], + "spans": [ + { + "bbox": [ + 125, + 558, + 242, + 650 + ], + "type": "image", + "image_path": "ccc7996109f2be90d2989e39053cb4dc258e6124b3ce2766def4469ed82262d4.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "lines": [ + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "text", + "content": "Figure 5: The three smallest singular values of " + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "inline_equation", + "content": "W^T W" + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "text", + "content": " as a function of the augmentation strength. We see that our effective landscape theory around the origin accurately captures collapses in learning. Left: Vanilla InfoNCE. As the theory suggests, the singular values scale as " + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "inline_equation", + "content": "\\sigma^4" + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "text", + "content": " and do not vanish for any finite value of " + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "text", + "content": ". Mid: Weight InfoNCE. " + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "inline_equation", + "content": "\\alpha = 0.1" + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "inline_equation", + "content": "\\sigma = 5" + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "text", + "content": ". Collapse happens at the critical dataset size predicted by the theory. Right: (Sqrt) Eigenvalues of " + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "inline_equation", + "content": "WW^T" + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 662, + 506, + 728 + ], + "type": "text", + "content": "-InfoNCE. The collapses can be well controlled." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 246, + 557, + 366, + 650 + ], + "blocks": [ + { + "bbox": [ + 246, + 557, + 366, + 650 + ], + "lines": [ + { + "bbox": [ + 246, + 557, + 366, + 650 + ], + "spans": [ + { + "bbox": [ + 246, + 557, + 366, + 650 + ], + "type": "image", + "image_path": "99881e58f3d4b4bcc3669742d31c4f8e0145f69c41e93cfa315ed27252276a3b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 370, + 555, + 487, + 650 + ], + "blocks": [ + { + "bbox": [ + 370, + 555, + 487, + 650 + ], + "lines": [ + { + "bbox": [ + 370, + 555, + 487, + 650 + ], + "spans": [ + { + "bbox": [ + 370, + 555, + 487, + 650 + ], + "type": "image", + "image_path": "b844c5ec134326f0df3db25f6a54bb2e3d3f33830b6942b46d8f8c0d95e40847.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 245, + 82, + 370, + 178 + ], + "blocks": [ + { + "bbox": [ + 245, + 82, + 370, + 178 + ], + "lines": [ + { + "bbox": [ + 245, + 82, + 370, + 178 + ], + "spans": [ + { + "bbox": [ + 245, + 82, + 370, + 178 + ], + "type": "image", + "image_path": "cd7b65731d8ca087e5133850229933d5468111b18b4cb5f7284188c5ff53b0c1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "lines": [ + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "text", + "content": "Figure 6: A collapse happens easily when the learned representation is normalized. The smallest eigenvalues of " + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "text", + "content": " are roughly 0.2, and the collapse happens much before the noise reaches this strength." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 247, + 236, + 366, + 326 + ], + "blocks": [ + { + "bbox": [ + 247, + 236, + 366, + 326 + ], + "lines": [ + { + "bbox": [ + 247, + 236, + 366, + 326 + ], + "spans": [ + { + "bbox": [ + 247, + 236, + 366, + 326 + ], + "type": "image", + "image_path": "2b537a60334e685645cae422322617aea2f6b2646af91a0e882bede9eddf0800.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 115, + 329, + 239, + 417 + ], + "blocks": [ + { + "bbox": [ + 115, + 329, + 239, + 417 + ], + "lines": [ + { + "bbox": [ + 115, + 329, + 239, + 417 + ], + "spans": [ + { + "bbox": [ + 115, + 329, + 239, + 417 + ], + "type": "image", + "image_path": "562a004beb8484fd766e2416e9b1d947231c3209ae9008d89406e86bbfd73d32.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 427, + 504, + 473 + ], + "lines": [ + { + "bbox": [ + 104, + 427, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 504, + 473 + ], + "type": "text", + "content": "Figure 7: The Landscape of nonlinear models is very similar to the landscape of linear models (cf. Figure 1). Top: 1d projection of the landscape of a two-layer tanh and ReLU network. Bottom Left: the landscape of a 2D projection of the last layer of a nonlinear model with a weak augmentation. Middle: with intermediate augmentation. Right: with strong augmentation." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 244, + 329, + 369, + 418 + ], + "blocks": [ + { + "bbox": [ + 244, + 329, + 369, + 418 + ], + "lines": [ + { + "bbox": [ + 244, + 329, + 369, + 418 + ], + "spans": [ + { + "bbox": [ + 244, + 329, + 369, + 418 + ], + "type": "image", + "image_path": "b73c66122800f71f6b2bd952d5c6f2d5f1490c04535ec96a92a152fd1bdcab3b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 372, + 329, + 497, + 417 + ], + "blocks": [ + { + "bbox": [ + 372, + 329, + 497, + 417 + ], + "lines": [ + { + "bbox": [ + 372, + 329, + 497, + 417 + ], + "spans": [ + { + "bbox": [ + 372, + 329, + 497, + 417 + ], + "type": "image", + "image_path": "f41769dfc9310c2e798a0bc14d334452726fc74b2d23497988985f2779b3188c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "type": "text", + "content": "both the tanh and the ReLU nonlinearity. We then rescale the two rows of the weight matrix of the model by " + }, + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "type": "inline_equation", + "content": "r_2" + }, + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "type": "text", + "content": " respectively: " + }, + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "type": "inline_equation", + "content": "W = (w_{1},W_{2})^{T}\\rightarrow (r_{1}w_{1},r_{2}w_{2})" + }, + { + "bbox": [ + 104, + 494, + 506, + 529 + ], + "type": "text", + "content": ". We see that the landscape of the model is qualitatively the same as that of the linear models, shown in Figure 1." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 544, + 367, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 367, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 367, + 556 + ], + "type": "text", + "content": "C SETUP FOR IMBALANCED DATA EXPERIMENTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 569, + 504, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 569, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 569, + 504, + 647 + ], + "type": "text", + "content": "Creating an Imbalanced Dataset: For our experiments measuring the influence on imbalanced datasets on SSL training, we use CIFAR-10 by sampling 20000 samples out of the 50000 training samples. The sampling process is described by a Dirichlet distribution and is often used to analyze effects of heterogeneity and data imbalance in Federated Learning problems (Hsu et al., 2019). Specifically, a small value of the distribution parameter yields a highly imbalanced dataset, while a large value yields a perfectly balanced dataset. We evaluate our models in three scenarios, for which we report below the number of samples per class:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 109, + 651, + 434, + 693 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 109, + 651, + 372, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 651, + 372, + 664 + ], + "spans": [ + { + "bbox": [ + 109, + 651, + 372, + 664 + ], + "type": "text", + "content": "High imbalance: [4890, 87, 5000, 0, 74, 0, 0, 212, 4788, 4947]" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 109, + 666, + 434, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 666, + 434, + 678 + ], + "spans": [ + { + "bbox": [ + 109, + 666, + 434, + 678 + ], + "type": "text", + "content": "- Medium imbalance: [4268, 4296, 1741, 420, 945, 161, 4633, 1015, 131, 2386]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 109, + 681, + 433, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 681, + 433, + 693 + ], + "spans": [ + { + "bbox": [ + 109, + 681, + 433, + 693 + ], + "type": "text", + "content": "- No imbalance: [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000]" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Training Setup: We use ResNet-12 models as the backbone for all experiments due to computational constraints. SimCLR augmentations (Chen et al., 2020a) are followed, except for a reduced strength of resized cropping from 0.2 to 0.5. All training involves a standardly used cosine decay learning" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "type": "text", + "content": "rate schedule, starting at 0.03 and decaying to 0.001. When a projector module is used, it involves a two-layer MLP with hidden dimension of 512 and BatchNorm layer in between. We use SGD for optimization and perform the standardly used linear evaluation protocol for measuring the quality of the final representation. For training the linear layer, we use an initial learning rate of 10 and decay it to 0.01 with a cosine schedule. We note linear evaluation is used for supervised models as well, following the practice advocated by Liu et al. (2021)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 169, + 335, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 169, + 335, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 169, + 335, + 182 + ], + "type": "text", + "content": "D ADDITIONAL THEORETICAL CONCERNS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 197, + 326, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 197, + 326, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 197, + 326, + 208 + ], + "type": "text", + "content": "D.1 COLLAPSE CONDITION FOR NORMALIZATION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "text", + "content": "The important condition for collapse in Eq. (13) can be better understood by considering the extreme cases. First of all, note that the eigenvalues of " + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\Sigma B_{M}" + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "text", + "content": " are bounded between " + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "inline_equation", + "content": "-1" + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "inline_equation", + "content": "1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 269, + 251, + 505, + 275 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 251, + 505, + 275 + ], + "spans": [ + { + "bbox": [ + 269, + 251, + 505, + 275 + ], + "type": "interline_equation", + "content": "- 1 \\leq \\frac {a _ {i} - c _ {i}}{a _ {i} + c _ {i}} \\leq 1, \\tag {18}", + "image_path": "efa963a9b4179e77d70018057862474924701a3ca3d17d626de44126653ffde0.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "type": "inline_equation", + "content": "-1" + }, + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "type": "text", + "content": " is achieved when " + }, + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "type": "inline_equation", + "content": "c_{i} \\gg a_{i}" + }, + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "type": "text", + "content": ", and 1 is achieved when " + }, + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "type": "inline_equation", + "content": "a_{i} \\gg c_{i}" + }, + { + "bbox": [ + 104, + 282, + 373, + 295 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 299, + 504, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 299, + 504, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 299, + 504, + 321 + ], + "type": "text", + "content": "When the augmentation is negligibly small, " + }, + { + "bbox": [ + 104, + 299, + 504, + 321 + ], + "type": "inline_equation", + "content": "\\Sigma^{-1}B_M\\approx M" + }, + { + "bbox": [ + 104, + 299, + 504, + 321 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 299, + 504, + 321 + ], + "type": "inline_equation", + "content": "\\lambda_{i}\\approx \\bar{\\lambda} = 1" + }, + { + "bbox": [ + 104, + 299, + 504, + 321 + ], + "type": "text", + "content": ", the condition thus becomes" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 286, + 324, + 505, + 349 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 324, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 286, + 324, + 505, + 349 + ], + "type": "interline_equation", + "content": "\\frac {2}{d _ {M}} > 0, \\tag {19}", + "image_path": "1603631850b1154c84e14fa5465e7baee7bbee270fd971199db12e43cb3bf549.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "type": "text", + "content": "which always holds. Thus, a sufficiently small augmentation will never cause collapse. Next, when we apply very strong augmentation to the " + }, + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 356, + 504, + 390 + ], + "type": "text", + "content": "-th subspace and zero augmentation to the others, the condition for the non-augmented spaces becomes" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 264, + 399, + 505, + 425 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 399, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 264, + 399, + 505, + 425 + ], + "type": "interline_equation", + "content": "1 + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 2}{d _ {M}}, \\tag {20}", + "image_path": "4ea452949f5822d7bf103f27dc91923c98d5cdb99edcbb444c4d5ddc4c23b10b.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 433, + 418, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 433, + 418, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 433, + 418, + 445 + ], + "type": "text", + "content": "meaning that the collapse will not happen. For the " + }, + { + "bbox": [ + 104, + 433, + 418, + 445 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 433, + 418, + 445 + ], + "type": "text", + "content": "-th space, the condition is" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 233, + 454, + 505, + 480 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 454, + 505, + 480 + ], + "spans": [ + { + "bbox": [ + 233, + 454, + 505, + 480 + ], + "type": "interline_equation", + "content": "- 1 + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 2}{d _ {M}} (\\Longleftrightarrow) \\frac {4}{d _ {M}} > 2, \\tag {21}", + "image_path": "612f161a294c0a5e2a460147d79cb12a808a64bf37354567ef833ce36b8974e1.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 487, + 504, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 554 + ], + "type": "text", + "content": "which is only possible when " + }, + { + "bbox": [ + 104, + 487, + 504, + 554 + ], + "type": "inline_equation", + "content": "d_M = 1" + }, + { + "bbox": [ + 104, + 487, + 504, + 554 + ], + "type": "text", + "content": ", namely, the strongly augmented space is the only space that does not collapse. This is reasonable when the original data is rank-1 because the normalization will ensure that this space does not collapse, but when the original data is not rank-1, this stationary point will be a saddle and will not be preferred by gradient descent. In different words, a strong enough augmentation will cause a collapse in the corresponding subspace, as is the case without normalization." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "text", + "content": "It is also interesting to note that having " + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "inline_equation", + "content": "c_{i} \\geq a_{i}" + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "text", + "content": " is no longer sufficient to cause a collapse. For example, let " + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "inline_equation", + "content": "c_{1} = 0" + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "inline_equation", + "content": "c_{j} = a_{j}" + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "inline_equation", + "content": "j \\neq 1" + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "text", + "content": ". The condition for " + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "inline_equation", + "content": "j \\neq 1" + }, + { + "bbox": [ + 104, + 559, + 506, + 583 + ], + "type": "text", + "content": " becomes" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 280, + 592, + 505, + 617 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 592, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 280, + 592, + 505, + 617 + ], + "type": "interline_equation", + "content": "\\frac {2}{d _ {M}} > \\frac {1}{d _ {M}}, \\tag {22}", + "image_path": "418f4170404d615422493c758fff29b42ffe23f0e0f670f3dd84b5845ee5aea7.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 625, + 504, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 625, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 625, + 504, + 669 + ], + "type": "text", + "content": "which always holds. At the same time, it does not mean that collapsing has become harder in general. For example, it is also possible for " + }, + { + "bbox": [ + 104, + 625, + 504, + 669 + ], + "type": "inline_equation", + "content": "c_{i} < a_{i}" + }, + { + "bbox": [ + 104, + 625, + 504, + 669 + ], + "type": "text", + "content": " to cause a collapse. Suppose we add a weak augmentation only to the first subspace such that " + }, + { + "bbox": [ + 104, + 625, + 504, + 669 + ], + "type": "inline_equation", + "content": "a_{i} - c_{i} = \\epsilon > 0" + }, + { + "bbox": [ + 104, + 625, + 504, + 669 + ], + "type": "text", + "content": ", the condition for this dimension to not to collapse is" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 246, + 672, + 504, + 697 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 672, + 504, + 697 + ], + "spans": [ + { + "bbox": [ + 246, + 672, + 504, + 697 + ], + "type": "interline_equation", + "content": "\\frac {\\epsilon}{a _ {i} + c _ {i}} + \\frac {2}{d _ {M}} > \\frac {d _ {M} - 1 + \\epsilon}{d _ {M}}, \\tag {23}", + "image_path": "0ff4c0d6865841823317a1d4f8875b94d1987e52e3a86f02e4f87b8c8d334f47.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 705, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 705, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 705, + 504, + 733 + ], + "type": "text", + "content": "which can be violated whenever " + }, + { + "bbox": [ + 104, + 705, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\epsilon < \\frac{(a_i + c_i)(d_M - 3)}{a_i + c_i + d_m}" + }, + { + "bbox": [ + 104, + 705, + 504, + 733 + ], + "type": "text", + "content": ". Namely, in some cases, normalization can in fact facilitate collapse." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 169, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 169, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 169, + 94 + ], + "type": "text", + "content": "E PROOFS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 244, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 244, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 244, + 118 + ], + "type": "text", + "content": "E.1 PROOF OF PROPOSITION 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 126, + 314, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 126, + 314, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 314, + 139 + ], + "type": "text", + "content": "Proof. The second term in Eq. (3) can be written as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 119, + 144, + 504, + 205 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 144, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 119, + 144, + 504, + 205 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right) ^ {2} \\right] - \\mathbb {E} \\left[ \\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right] \\right] ^ {2} (24) \\\\ = [ \\text {f i r s t} \\quad \\text {t e r m} ] - 4 \\operatorname {T r} [ W (A _ {0} + C) W ^ {T} ] ^ {2} (25) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2}, (26) \\\\ \\end{array}", + "image_path": "864415ba40fa8878ca274be0b22a86884900445c1c80f91c316c0463dbcc3463.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 209, + 364, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 209, + 364, + 221 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 364, + 221 + ], + "type": "text", + "content": "where we have used the definition " + }, + { + "bbox": [ + 105, + 209, + 364, + 221 + ], + "type": "inline_equation", + "content": "\\Sigma = A_0 + C" + }, + { + "bbox": [ + 105, + 209, + 364, + 221 + ], + "type": "text", + "content": ". The first term is" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 225, + 504, + 250 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 225, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 111, + 225, + 504, + 250 + ], + "type": "interline_equation", + "content": "[ f i r s t \\text {t e r m} ] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} [ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} ]\\right) ^ {2} \\right] = 4 \\operatorname {T r} [ W \\Sigma W ^ {T} ] ^ {2} + 8 \\operatorname {T r} [ W \\Sigma W ^ {T} W \\Sigma W ^ {T} ]. \\tag {27}", + "image_path": "157069277bb4aed3f8a5231de67a35ed3f4ec48df7b347c1db0eda811f0dd967.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 250, + 392, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 250, + 392, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 392, + 262 + ], + "type": "text", + "content": "Combining the above expressions, we see that Eq. (3) can be written as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 216, + 267, + 504, + 304 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 267, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 216, + 267, + 504, + 304 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] (28) \\\\ = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right]. (29) \\\\ \\end{array}", + "image_path": "56a97ceba0c1cedc9e9e49c452c54f435cc375c17b24c79c04e9b707bba6c27b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 309, + 211, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 309, + 211, + 322 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 211, + 322 + ], + "type": "text", + "content": "This finishes the proof. " + }, + { + "bbox": [ + 105, + 309, + 211, + 322 + ], + "type": "inline_equation", + "content": "\\square" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 335, + 229, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 229, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 229, + 346 + ], + "type": "text", + "content": "E.2 PROOF OF THEOREM 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 355, + 304, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 355, + 304, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 304, + 367 + ], + "type": "text", + "content": "Proof. All stationary points have a zero gradient:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 247, + 373, + 504, + 385 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 373, + 504, + 385 + ], + "spans": [ + { + "bbox": [ + 247, + 373, + 504, + 385 + ], + "type": "interline_equation", + "content": "- 2 W B + 4 W \\Sigma W ^ {T} W \\Sigma = 0. \\tag {30}", + "image_path": "7852bc81d7ee879f3fa081d1762a28a0fa5bcaf94b8ff554ce4f98e4e32ee617.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 390, + 319, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 319, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 319, + 403 + ], + "type": "text", + "content": "Multiplying by " + }, + { + "bbox": [ + 105, + 390, + 319, + 403 + ], + "type": "inline_equation", + "content": "W^T" + }, + { + "bbox": [ + 105, + 390, + 319, + 403 + ], + "type": "text", + "content": " on the left and " + }, + { + "bbox": [ + 105, + 390, + 319, + 403 + ], + "type": "inline_equation", + "content": "B^{-1}" + }, + { + "bbox": [ + 105, + 390, + 319, + 403 + ], + "type": "text", + "content": " on the right," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 240, + 408, + 504, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 408, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 240, + 408, + 504, + 422 + ], + "type": "interline_equation", + "content": "W ^ {T} W = 2 W ^ {T} W \\Sigma W ^ {T} W \\Sigma B ^ {- 1} \\tag {31}", + "image_path": "4cbe9aa013b2bd7a8b1265a9e33d0d10f8897c040a7b6a93f37f29c78650e65b.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 184, + 426, + 504, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 426, + 504, + 440 + ], + "spans": [ + { + "bbox": [ + 184, + 426, + 504, + 440 + ], + "type": "interline_equation", + "content": "\\left(\\Longleftrightarrow\\right) \\quad \\Sigma^ {1 / 2} W ^ {T} W \\Sigma^ {1 / 2} = 2 \\Sigma^ {1 / 2} W ^ {T} W \\Sigma W ^ {T} W \\Sigma B ^ {- 1} \\Sigma^ {1 / 2} \\tag {32}", + "image_path": "f3098a61fe65c0856c55dbba53cd5cae7489abe2e4fb11f465f67ca1c0c6472a.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 444, + 279, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 444, + 279, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 279, + 456 + ], + "type": "text", + "content": "Defining " + }, + { + "bbox": [ + 105, + 444, + 279, + 456 + ], + "type": "inline_equation", + "content": "H \\coloneqq \\Sigma^{1/2} W^T W \\Sigma^{1/2}" + }, + { + "bbox": [ + 105, + 444, + 279, + 456 + ], + "type": "text", + "content": ", we obtain" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 254, + 463, + 504, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 463, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 254, + 463, + 504, + 475 + ], + "type": "interline_equation", + "content": "H = 2 H ^ {2} \\Sigma^ {1 / 2} \\Sigma B ^ {- 1} \\Sigma^ {1 / 2}, \\tag {33}", + "image_path": "331f2115949692d8e191c4e84cc9e827c06093d102cdab8d806bc4f1e5e3634a.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 227, + 481, + 504, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 481, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 227, + 481, + 504, + 495 + ], + "type": "interline_equation", + "content": "\\left(\\Longleftrightarrow\\right) \\quad H \\left(I - 2 H \\Sigma^ {1 / 2} B ^ {- 1} \\Sigma^ {1 / 2}\\right) = 0. \\tag {34}", + "image_path": "e8b20b94dbd4ba0f2bea9b60dabb301f2a81bd4ea09e78b0d0e43e7058d768a0.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "text", + "content": "Because both " + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "inline_equation", + "content": "\\Sigma^{1/2}\\Sigma B^{-1}\\Sigma^{1/2}" + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "text", + "content": " are symmetric, one can take the transpose of Eq. (33) to find that " + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "inline_equation", + "content": "\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}" + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "text", + "content": " commute with each, which implies that " + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "text", + "content": " has the same eigenvectors as " + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "inline_equation", + "content": "\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}/2" + }, + { + "bbox": [ + 104, + 498, + 504, + 536 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "text", + "content": "Eq. (34) then implies that the eigenvalues of " + }, + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "text", + "content": " is either the inverse of that of " + }, + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "inline_equation", + "content": "\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}" + }, + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "text", + "content": " or zero. This implies that any stationary point of " + }, + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "text", + "content": " can be written in the form" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 270, + 571, + 504, + 593 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 571, + 504, + 593 + ], + "spans": [ + { + "bbox": [ + 270, + 571, + 504, + 593 + ], + "type": "interline_equation", + "content": "H = \\frac {1}{2} U M \\Lambda U ^ {T}, \\tag {35}", + "image_path": "b89f2ba9ba72c5b3948d9c951eb7a8c45a0249123d9a76ac44e6906862c7ef61.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": " is a unitary matrix, " + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "inline_equation", + "content": "\\Lambda" + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": " is diagonal matrix containing the eigenvalues of " + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "inline_equation", + "content": "\\Sigma^{1/2}B^{-1}\\Sigma^{1/2}" + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": " is an arbitrary (masking) diagonal matrix containing only zero or one such that (1) " + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "inline_equation", + "content": "M_{ii} = 0" + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "inline_equation", + "content": "\\Lambda_{ii} < 0" + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": " and (2) contain at most " + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "inline_equation", + "content": "d^*" + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": " nonzero terms. This then implies that the weight matrix " + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 105, + 598, + 505, + 642 + ], + "type": "text", + "content": " satisfies" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 237, + 641, + 504, + 663 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 641, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 237, + 641, + 504, + 663 + ], + "type": "interline_equation", + "content": "W ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1 / 2} U M \\Lambda U ^ {T} \\Sigma^ {- 1 / 2}. \\tag {36}", + "image_path": "0be2fcc76c5c0a243fa65327e20af1394edd86682556718eb0b3982d3569fdf2.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 665, + 386, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 665, + 386, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 386, + 677 + ], + "type": "text", + "content": "Lastly, when " + }, + { + "bbox": [ + 105, + 665, + 386, + 677 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 105, + 665, + 386, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 665, + 386, + 677 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 105, + 665, + 386, + 677 + ], + "type": "text", + "content": " commute, we can compactly write the result as" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 256, + 682, + 504, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 682, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 256, + 682, + 504, + 704 + ], + "type": "interline_equation", + "content": "W ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} \\Sigma^ {- 1}, \\tag {37}", + "image_path": "517e9f04302d5ab92fa891dfb278b126a1676dd3e40da5b9a9f145245fbd3c6c.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "B_{M}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " denotes the matrix obtained by masking the eigenvalues of " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": ". This finishes the proof." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 244, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 244, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 244, + 94 + ], + "type": "text", + "content": "E.3 PROOF OF PROPOSITION 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "type": "text", + "content": "Proof. For all stationary points, " + }, + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "type": "inline_equation", + "content": "W^T W" + }, + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "type": "text", + "content": " commutes with " + }, + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 104, + 102, + 504, + 135 + ], + "type": "text", + "content": ", which means that at these stationary points, one can simultaneously diagonalize all the matrices and the loss function (3) can be written as" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 261, + 135, + 504, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 135, + 504, + 163 + ], + "spans": [ + { + "bbox": [ + 261, + 135, + 504, + 163 + ], + "type": "interline_equation", + "content": "L = - \\sum_ {i = 1} ^ {d ^ {*}} \\lambda_ {i} b _ {i} + \\lambda_ {i} ^ {2} s _ {i} ^ {2} \\tag {38}", + "image_path": "b6f48d89c4f045dc09a34454e3db5d7f18e9b3a4fff8b9a6d40868e3b8858669.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "inline_equation", + "content": "\\lambda_{i}, b_{i}, s_{i}" + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "text", + "content": " are the eigenvalues of " + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "inline_equation", + "content": "W^{T}W" + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 104, + 166, + 384, + 178 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": "We can thus consider each " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": " separately. When " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "b_{i} > 0" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\lambda_{i} = 0" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": " cannot be a local minimum because the local Hessian is " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "-b_{i} < 0" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "b_{i} \\leq 0" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": ", the only stationary point is " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\lambda_{i} = 0" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": ". This sum covers at most " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "d^{*}" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": " summands, and so, at the local minima, " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\lambda_{i} \\neq" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": " if and only if " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "b_{i} > 0" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": ", and so the number of non-zero eigenvalues of " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "W^{T}W" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\min(m, d^{*})" + }, + { + "bbox": [ + 104, + 182, + 504, + 228 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 240, + 244, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 240, + 244, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 244, + 251 + ], + "type": "text", + "content": "E.4 PROOF OF PROPOSITION 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 261, + 279, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 261, + 279, + 273 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 279, + 273 + ], + "type": "text", + "content": "Proof. The regularization can be written as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 219, + 276, + 504, + 308 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 276, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 219, + 276, + 504, + 308 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} R = \\left[ \\left(\\mathbb {E} _ {x} \\| W x \\| ^ {2} - c\\right) ^ {2} \\right] (39) \\\\ = \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2} - 2 c \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] + c ^ {2}. (40) \\\\ \\end{array}", + "image_path": "8333f50d1cfbdb02b7c95aa8581d55b73bc5e2a2ff8e08d83fe0d68ccc62235e.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 316, + 238, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 238, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 238, + 327 + ], + "type": "text", + "content": "By Proposition 1, Eq. (10) reads" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 125, + 331, + 504, + 362 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 331, + 504, + 362 + ], + "spans": [ + { + "bbox": [ + 125, + 331, + 504, + 362 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right] + \\kappa \\left(\\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2} - 2 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] + 1\\right) (41) \\\\ = - \\operatorname {T r} \\left[ W (B + 2 \\kappa c \\Sigma) W ^ {T} \\right] + \\operatorname {T r} \\left[ W \\Sigma W ^ {T} W \\Sigma W ^ {T} \\right] + \\kappa \\rho^ {2}. (42) \\\\ \\end{array}", + "image_path": "2872a51fb6d82515c9f5bcab0edaaab9cf604af7f3b4d953ecdb8c0370490b8e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 365, + 193, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 193, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 193, + 376 + ], + "type": "text", + "content": "The derivative of " + }, + { + "bbox": [ + 105, + 365, + 193, + 376 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 105, + 365, + 193, + 376 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 272, + 376, + 504, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 376, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 272, + 376, + 504, + 399 + ], + "type": "interline_equation", + "content": "\\frac {d}{d W} \\rho = 4 \\rho W \\Sigma . \\tag {43}", + "image_path": "e78e1778ae8c7c62a31c48d23625a601a98d8f6340ed37990fcb720ca6b9436b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 399, + 242, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 242, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 242, + 411 + ], + "type": "text", + "content": "The zero-gradient gradient is thus" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 208, + 415, + 504, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 415, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 208, + 415, + 504, + 430 + ], + "type": "interline_equation", + "content": "- 2 W (B + 2 \\kappa c \\Sigma - 2 \\kappa \\rho \\Sigma) + 4 W \\Sigma W ^ {T} W \\Sigma = 0. \\tag {44}", + "image_path": "04578dbc55645beb578f3d54a8261205a17ca0b65cbc303e493a594a49479dee.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 432, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 432, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 432, + 504, + 464 + ], + "type": "text", + "content": "We can define " + }, + { + "bbox": [ + 104, + 432, + 504, + 464 + ], + "type": "inline_equation", + "content": "B' \\coloneqq B + 2\\kappa c\\Sigma - 2\\kappa \\rho \\Sigma" + }, + { + "bbox": [ + 104, + 432, + 504, + 464 + ], + "type": "text", + "content": " to see that this condition is the same as Eq. (30) in the proof of Theorem 1. The rest of the proof thus follows from the arguments. We thus arrive at the theorem statement:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 255, + 464, + 504, + 486 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 464, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 255, + 464, + 504, + 486 + ], + "type": "interline_equation", + "content": "W ^ {T} W = \\frac {1}{2} \\Sigma^ {- 1} B _ {M} ^ {\\prime} \\Sigma^ {- 1}. \\tag {45}", + "image_path": "b9fe6d45c51d2a229e147d7f9b89be5d637302747c7a9e01c1583527d5f65155.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 487, + 170, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 170, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 170, + 498 + ], + "type": "text", + "content": "We are done. " + }, + { + "bbox": [ + 105, + 487, + 170, + 498 + ], + "type": "inline_equation", + "content": "\\square" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 511, + 244, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 511, + 244, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 244, + 521 + ], + "type": "text", + "content": "E.5 PROOF OF PROPOSITION 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 531, + 504, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 504, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 504, + 554 + ], + "type": "text", + "content": "Proof. Recalling that " + }, + { + "bbox": [ + 104, + 531, + 504, + 554 + ], + "type": "inline_equation", + "content": "\\rho = \\mathrm{Tr}[W\\Sigma W^T]" + }, + { + "bbox": [ + 104, + 531, + 504, + 554 + ], + "type": "text", + "content": ", we multiply " + }, + { + "bbox": [ + 104, + 531, + 504, + 554 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 104, + 531, + 504, + 554 + ], + "type": "text", + "content": " from the right to both sides of the solution in Proposition 3 and take trace:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 202, + 558, + 504, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 558, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 202, + 558, + 504, + 613 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} ^ {\\prime} \\right] = \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} \\left(B _ {M} + 2 \\kappa (c - \\rho) \\Sigma_ {M}\\right) \\right] (46) \\\\ = \\operatorname {T r} \\left[ W ^ {T} W \\Sigma \\right] (47) \\\\ = \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] = \\rho . (48) \\\\ \\end{array}", + "image_path": "319a583048679275cbf77df934e2e2f59c96f309474e5a1752c9498c3a90f27f.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 616, + 241, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 241, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 241, + 628 + ], + "type": "text", + "content": "The first line further simplifies to" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 162, + 632, + 504, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 632, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 162, + 632, + 504, + 654 + ], + "type": "interline_equation", + "content": "\\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right] + \\kappa (c - \\rho) \\operatorname {T r} \\left[ \\Sigma^ {- 1} \\Sigma_ {M} \\right] = \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right] + \\kappa (c - \\rho) d _ {M}, \\tag {49}", + "image_path": "7ff38ae687369480beba016c4c49909d75dd6d7f53ca1e8239cc740269ba82b7.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 658, + 373, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 373, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 373, + 671 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 658, + 373, + 671 + ], + "type": "inline_equation", + "content": "d_M \\coloneqq \\operatorname{Tr}[M]" + }, + { + "bbox": [ + 105, + 658, + 373, + 671 + ], + "type": "text", + "content": " is the number of nonzero eigenvalues of " + }, + { + "bbox": [ + 105, + 658, + 373, + 671 + ], + "type": "inline_equation", + "content": "B_M'" + }, + { + "bbox": [ + 105, + 658, + 373, + 671 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 675, + 272, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 675, + 272, + 686 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 272, + 686 + ], + "type": "text", + "content": "This gives an equation of " + }, + { + "bbox": [ + 105, + 675, + 272, + 686 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 105, + 675, + 272, + 686 + ], + "type": "text", + "content": " that solves to" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 249, + 689, + 504, + 718 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 689, + 504, + 718 + ], + "spans": [ + { + "bbox": [ + 249, + 689, + 504, + 718 + ], + "type": "interline_equation", + "content": "c - \\rho = \\frac {c - \\frac {1}{2} \\operatorname {T r} \\left[ \\Sigma^ {- 1} B _ {M} \\right]}{1 + \\kappa d _ {M}}. \\tag {50}", + "image_path": "df134636da008685683e3cdba4380f7a48b9c8a28fe4208387dc659ed4439d7b.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 720, + 230, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 230, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 230, + 732 + ], + "type": "text", + "content": "This proves the proposition. " + }, + { + "bbox": [ + 105, + 720, + 230, + 732 + ], + "type": "inline_equation", + "content": "\\square" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 332, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 332, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 332, + 94 + ], + "type": "text", + "content": "F ADDITIONAL THEORETICAL CONCERNS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 405, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 405, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 405, + 118 + ], + "type": "text", + "content": "F.1 CASE OF DATA-INDEPENDENT NON-GAUSSIAN AUGMENTATION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 126, + 506, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 126, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 126, + 506, + 170 + ], + "type": "text", + "content": "In the main text, we mainly considered the case when the noise is Gaussian. In this section, we consider a case where the noise is data-dependent and non-Gaussian. We show that the results we discussed in the main text still hold qualitatively. The general form of the loss function in Eq. (3) still applies:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 217, + 171, + 504, + 192 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 171, + 504, + 192 + ], + "spans": [ + { + "bbox": [ + 217, + 171, + 504, + 192 + ], + "type": "interline_equation", + "content": "L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + \\frac {1}{8} \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right]. \\tag {51}", + "image_path": "96371c9414efea9e293e5e7b6f14318733fee0087792c4efea57dfcdf46390a8.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 194, + 362, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 362, + 206 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 362, + 206 + ], + "type": "text", + "content": "We consider a global rescaling augmentation for each datum " + }, + { + "bbox": [ + 105, + 194, + 362, + 206 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 105, + 194, + 362, + 206 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 288, + 213, + 504, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 213, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 288, + 213, + 504, + 224 + ], + "type": "interline_equation", + "content": "x = s \\hat {x}, \\tag {52}", + "image_path": "14bbbfc2f74b76d990acb4ff65c9d7e6d2741b9f9fb97d67e7d034d4ace9b21b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "inline_equation", + "content": "s \\sim \\exp(b)" + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "text", + "content": " obeys an exponential distribution with mean " + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "text", + "content": " and variance " + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "inline_equation", + "content": "b^2" + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "text", + "content": ". Note that even if " + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "inline_equation", + "content": "\\hat{x}" + }, + { + "bbox": [ + 104, + 230, + 504, + 298 + ], + "type": "text", + "content": " is Gaussian, the augmented data is no longer Gaussian. In particular, the augmentation now becomes data-dependent. This augmentation can also be seen as a structured, biologically plausible data augmentation that encourages the model to be scale-invariant, which is what Wien's law for biological perception demands (Dayan and Abbott, 2005): no matter whether an image is dark or bright, the content of the image is the same." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 302, + 465, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 302, + 465, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 302, + 465, + 314 + ], + "type": "text", + "content": "Under this augmentation, the noise covariance is dependent on " + }, + { + "bbox": [ + 105, + 302, + 465, + 314 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 105, + 302, + 465, + 314 + ], + "type": "text", + "content": " and no longer Gaussian:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 267, + 319, + 504, + 334 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 319, + 504, + 334 + ], + "spans": [ + { + "bbox": [ + 267, + 319, + 504, + 334 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ x x ^ {T} \\right] = 2 b ^ {2} A _ {0}. \\tag {53}", + "image_path": "d8ae0d234066967167abfe91dbdea8c3a2b414c215600e1d1f158b5d1b46cc2c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 339, + 186, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 339, + 186, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 186, + 350 + ], + "type": "text", + "content": "We also obtain that" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 245, + 348, + 505, + 363 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 348, + 505, + 363 + ], + "spans": [ + { + "bbox": [ + 245, + 348, + 505, + 363 + ], + "type": "interline_equation", + "content": "C = \\mathbb {E} \\left[ (b - s) ^ {2} x x ^ {T} \\right] = b ^ {2} A _ {0}. \\tag {54}", + "image_path": "395ccfbb98d0cdf900caac78c613eef00a012d99ba6049d2cdd39e502a3363c5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 365, + 286, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 286, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 286, + 376 + ], + "type": "text", + "content": "The second term in Eq. (3) can be written as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 117, + 382, + 504, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 382, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 117, + 382, + 504, + 444 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left. \\right. \\operatorname {V a r} \\left[ | W (x - \\chi) | ^ {2} \\right] = \\mathbb {E} \\left[\\left(\\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right) ^ {2} \\right] - \\mathbb {E} \\left[ \\operatorname {T r} \\left[ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} \\right]\\right] ^ {2} (55) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} [ W (A _ {0} + C) W ^ {T} ] ^ {2} (56) \\\\ = [ f i r s t \\text {t e r m} ] - 4 \\operatorname {T r} \\left[ W \\Sigma W ^ {T} \\right] ^ {2}, (57) \\\\ \\end{array}", + "image_path": "adf825dc399f51f04ef54673a243b8625a1794034c11662ede3d072d4d56884e.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 449, + 364, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 364, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 364, + 460 + ], + "type": "text", + "content": "where we have used the definition " + }, + { + "bbox": [ + 105, + 449, + 364, + 460 + ], + "type": "inline_equation", + "content": "\\Sigma = A_0 + C" + }, + { + "bbox": [ + 105, + 449, + 364, + 460 + ], + "type": "text", + "content": ". The first term is" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 197, + 465, + 504, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 465, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 197, + 465, + 504, + 482 + ], + "type": "interline_equation", + "content": "[ f i r s t \\text {t e r m} ] = \\mathbb {E} \\left[ \\left(\\operatorname {T r} [ W (x - \\chi) (x - \\chi) ^ {T} W ^ {T} ]\\right) ^ {2} \\right]. \\tag {58}", + "image_path": "49016d677ed495e264f884db78163ec798a5a10e773c0bc5cacf8704b0c19455.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "text", + "content": "However, for fixed rescaling factor " + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "inline_equation", + "content": "s_x" + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "inline_equation", + "content": "s_\\chi" + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "text", + "content": ", each " + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "inline_equation", + "content": "W(x - \\chi)" + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "text", + "content": " obeys a multivariate Gaussian distribution with variance " + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "inline_equation", + "content": "2(s_x^2 + s_\\chi^2)WA_0" + }, + { + "bbox": [ + 104, + 487, + 504, + 512 + ], + "type": "text", + "content": ", and so we have" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 127, + 517, + 505, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 517, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 127, + 517, + 505, + 533 + ], + "type": "interline_equation", + "content": "[ f i r s t \\text {t e r m} ] = \\mathbb {E} _ {s _ {x}, s _ {\\chi}} \\left[ \\left(s _ {x} ^ {2} + s _ {\\chi} ^ {2}\\right) ^ {2} \\right] \\left(4 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 8 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]\\right), \\tag {59}", + "image_path": "db203f8dc41fe37b937e82f293793a460378925742579c0f3679109daabbec7d.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 539, + 379, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 539, + 379, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 539, + 379, + 553 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 539, + 379, + 553 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{s_x,s_\\chi}\\big[(s_x^2 +s_\\chi^2)^2\\big] = 56b^4" + }, + { + "bbox": [ + 105, + 539, + 379, + 553 + ], + "type": "text", + "content": " . Combining terms, we obtain that" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 559, + 505, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 559, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 141, + 559, + 505, + 574 + ], + "type": "interline_equation", + "content": "\\operatorname {V a r} \\left[ \\left| W (x - \\chi) \\right| ^ {2} \\right] = 4 8 b ^ {2} \\times 4 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 5 6 b ^ {4} \\times 8 \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]. \\tag {60}", + "image_path": "3d3ddee324e613cd5357a838c83fc1b095317fb578950e20c78c4fd485069629.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 585, + 208, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 208, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 208, + 596 + ], + "type": "text", + "content": "The loss function is thus:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 154, + 601, + 505, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 601, + 505, + 616 + ], + "spans": [ + { + "bbox": [ + 154, + 601, + 505, + 616 + ], + "type": "interline_equation", + "content": "L = - \\operatorname {T r} \\left[ W B W ^ {T} \\right] + 2 4 b ^ {2} \\operatorname {T r} \\left[ W A _ {0} W ^ {T} \\right] ^ {2} + 5 6 b ^ {4} \\operatorname {T r} \\left[ W A _ {0} W ^ {T} W A _ {0} W ^ {T} \\right]. \\tag {61}", + "image_path": "f704f6ad47e2d61d42e527e3ef50ea2349250718fe7a6a0bf213d349fce2f256.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "text", + "content": "Note that this loss function is a special case of the loss function in Eq. (10) where " + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "inline_equation", + "content": "c = 0" + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "inline_equation", + "content": "\\kappa = 24b^2" + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "text", + "content": " (and with a rescaled fourth-order term). As in the main text, " + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "text", + "content": " is different according to different choices of loss functions. Because " + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "text", + "content": " commute with " + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "inline_equation", + "content": "A_0" + }, + { + "bbox": [ + 104, + 626, + 504, + 693 + ], + "type": "text", + "content": " by construction, one expects collapses to happen at locations predicted by Proposition 3 and 4 under suitable choices of parameters. Also note that the odd terms vanish as discussed, and so the local stability of the origin should decide the collapsing behavior of this situation." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "This shows that collapse can also happen when the data augmentation is structured. We comment that the analysis in this section is minimal, and one important future direction is to provide more precise and insightful conditions of collapse under structured data augmentation." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_content_list.json b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..44d6131b96e5a9367e9bba168d995d1f13bde8a4 --- /dev/null +++ b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_content_list.json @@ -0,0 +1,5924 @@ +[ + { + "type": "text", + "text": "WHEN DATA GEOMETRY MEETS DEEP FUNCTION: GENERALIZING OFFLINE REINFORCEMENT LEARNING", + "text_level": 1, + "bbox": [ + 171, + 99, + 826, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jianxiong Li $^{1}$ , Xianyuan Zhan $^{1,2*}$ , Haoran Xu $^{1}$ , Xiangyu Zhu $^{1}$ , Jingjing Liu $^{1}$ & Ya-Qin Zhang $^{1*}$", + "bbox": [ + 179, + 167, + 861, + 185 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Institute for AI Industry Research (AIR), Tsinghua University, Beijing, China", + "$^{2}$ Shanghai Artificial Intelligence Laboratory, Shanghai, China" + ], + "bbox": [ + 183, + 185, + 702, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "li-jx21@mails.tsinghua.edu.cn, zhanxianyuan@air.tsinghua.edu.cn", + "bbox": [ + 183, + 214, + 800, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 263, + 545, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In offline reinforcement learning (RL), one detrimental issue to policy learning is the error accumulation of deep $Q$ function in out-of-distribution (OOD) areas. Unfortunately, existing offline RL methods are often over-conservative, inevitably hurting generalization performance outside data distribution. In our study, one interesting observation is that deep $Q$ functions approximate well inside the convex hull of training data. Inspired by this, we propose a new method, DOGE (Distance-sensitive Offline RL with better Generalization). DOGE marries dataset geometry with deep function approximators in offline RL, and enables exploitation in generalizable OOD areas rather than strictly constraining policy within data distribution. Specifically, DOGE trains a state-conditioned distance function that can be readily plugged into standard actor-critic methods as a policy constraint. Simple yet elegant, our algorithm enjoys better generalization compared to state-of-the-art methods on D4RL benchmarks. Theoretical analysis demonstrates the superiority of our approach to existing methods that are solely based on data distribution or support constraints. Code is available at https://github.com/Facebear-ljx/DOGE.", + "bbox": [ + 228, + 295, + 769, + 518 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 547, + 336, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Offline reinforcement learning (RL) provides a new possibility to learn optimized policies from large, pre-collected datasets without any environment interaction (Levine et al., 2020). This holds great promise to solve many real-world problems when online interaction is costly or dangerous yet historical data is easily accessible (Zhan et al., 2022). However, the optimization nature of RL, as well as the need for counterfactual reasoning on unseen data under offline setting, have caused great technical challenges for designing effective offline RL algorithms. Evaluating value function outside data coverage areas can produce falsely optimistic values; without corrective information from online interaction, such estimation errors can accumulate quickly and misguide policy learning process (Van Hasselt et al., 2018; Fujimoto et al., 2018; Kumar et al., 2019).", + "bbox": [ + 169, + 578, + 826, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent model-free offline RL methods investigate this error accumulation challenge in several ways: 1) Policy Constraint: directly constraining learned policy to stay inside distribution, or with the support of dataset (Kumar et al., 2019); 2) Value Regularization: regularizing value function to assign low values at out-of-distribution (OOD) actions (Kumar et al., 2020b); 3) In-sample Learning: learning value function within data samples (Kostrikov et al., 2021b) or simply treating it as the value function of behavioral policy (Brandfonbrener et al., 2021). All three schools of methods share similar traits of being conservative and omitting evaluation on OOD data, which brings benefits of minimizing model exploitation error, but at the expense of poor generalization of learned policy in OOD regions. Thus, a gaping gap still exists when such methods are applied to real-world tasks, where most datasets only partially cover state-action space with suboptimal policies.", + "bbox": [ + 169, + 710, + 826, + 851 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Meanwhile, online deep reinforcement learning (DRL) that leverages powerful deep neural network (DNN) with optimistic exploration on unseen samples can yield high-performing policies with promising generalization performance (Mnih et al., 2015; Silver et al., 2017; Degrave et al., 2022;", + "bbox": [ + 169, + 856, + 826, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding authors", + "bbox": [ + 192, + 910, + 336, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/969693e52ea1b5a561742329d160d9dba76c5fe630bbdf5ecdfd7236acf0572b.jpg", + "image_caption": [ + "Figure 1: Left: Visualization of AntMaze dataset. Data transitions of two small areas on the critical pathways to the destination have been removed (red box). Right: Performance of three SOTA offline RL methods." + ], + "image_footnote": [], + "bbox": [ + 200, + 99, + 339, + 208 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/00f19f0a37f1972766c0bf4d1febc88cf192a7ea787ea37f46b857a71762b5fa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 99, + 491, + 208 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/8c7cb325e06f0782b93d14c398a47000591da83dc955e55688ba29e1b3b17338.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 99, + 645, + 208 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b7029b2d6fd977e225ef1f9c44d947476632d531aec6ffb20b26a7336166fdf1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 99, + 797, + 208 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Packer et al., 2018). This staring contrast propels us to re-think the question: Are we being too conservative? It is well known that DNN has unparalleled approximation and generalization abilities, compared with other function approximators. These attractive abilities have not only led to huge success in computer vision and natural language processing (He et al., 2016; Vaswani et al., 2017), but also amplified the power of RL. Ideally, in order to obtain the best policy, an algorithm should enable offline policy learning on unseen state-action pairs that function approximators (e.g., $Q$ function, policy network) can generalize well, and add penalization only on non-generalizable areas.", + "bbox": [ + 169, + 253, + 826, + 352 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, existing offline RL methods heed too much conservatism on data-related regularizations, while largely overlooking the generalization ability of deep function approximators. Intuitively, let us consider the well-known AntMaze task in the D4RL benchmark (Fu et al., 2020), where an ant navigates from the start to the destination in a large maze. We observe that existing offline RL methods fail miserably when we remove only small areas of data on the critical pathways to the destination. As shown in Figure 1, the two missing areas reside in close proximity to the trajectory data. Simply \"stitching\" up existing trajectories as approximation is not sufficient to form a near-optimal policy at missing regions. Exploiting the generalizability of deep function approximators, however, can potentially compensate for the missing information.", + "bbox": [ + 169, + 358, + 823, + 484 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In our study, we observe that the value function approximated by DNN can interpolate well but struggles to extrapolate (see Section 2.2). Such an \"interpolate well\" phenomenon is also observed in previous studies on the generalization of DNN (Haley & Soloway, 1992; Barnard & Wessels, 1992; Arora et al., 2019a; Xu et al., 2020; Florence et al., 2022). This finding motivates us to reconsider the generalization of function approximators in offline RL in the context of dataset geometry. Along this line, we discover that a closer distance between a training sample to the offline dataset often leads to a smaller value variation range of the learned neural network, which effectively yields more accurate inference of the value function inside the convex hull (formed by the dataset). By contrast, outside the convex hull, especially in those areas far from the training data, the value variation range usually renders too large to guarantee a small approximation error.", + "bbox": [ + 169, + 489, + 823, + 630 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by this, we design a new algorithm DOGE (Distance-sensitive Offline RL with better Generalization) from the perspective of generalization performance of deep $Q$ function. We first propose a state-conditioned distance function to characterize the geometry of offline datasets, whose output serves as a proxy to the network generalization ability. The resulting algorithm learns a state-conditioned distance function as a policy constraint on standard actor-critic RL framework. Theoretical analysis demonstrates the superior performance bound of our method compared to previous policy constraint methods that are based on data distribution or support constraints. Evaluations on D4RL benchmarks validate that our algorithm enjoys better performance and generalization abilities than state-of-the-art offline RL methods.", + "bbox": [ + 169, + 636, + 826, + 761 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 DATA GEOMETRY VS. DEEP $Q$ FUNCTIONS", + "text_level": 1, + "bbox": [ + 171, + 782, + 560, + 799 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 NOTATIONS", + "text_level": 1, + "bbox": [ + 171, + 814, + 295, + 828 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We consider the standard continuous action space Markov decision process (MDP) setting, which can be represented by a tuple $(S, \\mathcal{A}, \\mathcal{P}, r, \\gamma)$ , where $S$ and $\\mathcal{A}$ are the state and action space, $\\mathcal{P}(s'|s, a)$ is the transition dynamics, $r(s, a)$ is a reward function, and $\\gamma \\in [0,1)$ is a discount factor. The objective of the RL problem is to find a policy $\\pi(a|s)$ that maximizes the expected cumulative discounted return, which can be represented by a $Q$ function $Q_{\\theta}^{\\pi}(s, a) = \\mathbb{E}[\\sum_{t=0}^{\\infty} \\gamma^{t} r(s_{t}, a_{t}) | s_{0} = s, a_{0} = a, a_{t} \\sim \\pi(\\cdot | s_{t}), s_{t+1} \\sim \\mathcal{P}(\\cdot | s_{t}, a_{t})]$ . The $Q$ function is typically approximated by function", + "bbox": [ + 169, + 840, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6e9e3426dbdabdef6eeaa571b21dd63d5dd82524c444cb7cca9ab38614b521c8.jpg", + "image_caption": [ + "Figure 2: Approximation error of deep $Q$ functions with different dataset geometry. Offline data are marked as white dots (Please refer to Appendix E.5 for detailed experimental setup)." + ], + "image_footnote": [], + "bbox": [ + 174, + 102, + 816, + 200 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "approximators with learnable parameters $\\theta$ , such as deep neural networks. Under offline RL setting, we are only given a fixed dataset $\\mathcal{D}$ and cannot interact further with the environment. Therefore, the parameters $\\theta$ are optimized by minimizing the following temporal difference (TD) error:", + "bbox": [ + 169, + 253, + 826, + 297 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\theta} \\mathbb {E} _ {(s, a, s ^ {\\prime}) \\in \\mathcal {D}} \\left[ \\left(r (s, a) + \\gamma \\mathbb {E} _ {a ^ {\\prime} \\sim \\pi (\\cdot | s ^ {\\prime})} \\left[ Q _ {\\theta^ {\\prime}} ^ {\\pi} \\left(s ^ {\\prime}, a ^ {\\prime}\\right) \\right]\\right) - Q _ {\\theta} ^ {\\pi} (s, a) \\right] ^ {2} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 301, + 825, + 327 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $Q_{\\theta'}^{\\pi}$ is the target $Q$ function, which is a delayed copy of the current $Q$ network.", + "bbox": [ + 169, + 330, + 733, + 348 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 INTERPOLATE VS. EXTRAPOLATE", + "text_level": 1, + "bbox": [ + 171, + 363, + 449, + 376 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Motivating examples. Let's first consider a set of simple one-dimensional random walk tasks with different offline datasets, where agents at each step can take an action to move in the range of $[-1, 1]$ , and the state space is a straight line ranging from $[-10, 10]$ . The destination is located at $s = 10$ . The closer to the destination, the larger reward the agent gets (i.e., $r = 1$ at $s = 10$ , $r = 0$ at $s = -10$ ). The approximation errors of the learned $Q$ functions are visualized in Figure 2. Note that the approximation errors of the learned $Q$ functions tend to be low at state-action pairs that lie inside or near the boundaries of the convex hull formed by the dataset. Under continuous state-action space, state-action pairs within the convex hull of the dataset can be represented in an interpolated manner (referred as interpolated data), i.e., $x_{in} = \\sum_{i=1}^{n} \\alpha_{i} x_{i}$ , $\\sum_{i=1}^{n} \\alpha_{i} = 1$ , $\\alpha_{i} \\geq 0$ , $x_{i} = (s_{i}, a_{i}) \\in \\mathcal{D}$ ; similarly, we can define the extrapolated data that lie outside the convex hull of the dataset as $x_{out} = \\sum_{i=1}^{n} \\beta_{i} x_{i}$ , where $\\sum_{i=1}^{n} \\beta_{i} = 1$ and $\\beta_{i} \\geq 0$ do not hold simultaneously.", + "bbox": [ + 169, + 388, + 826, + 544 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We observe that the geometry of the datasets play a special role on the approximation error of deep $Q$ functions, or in other words, deep $Q$ functions interpolate well but struggle to extrapolate. This phenomenon is also reflected in studies on the generalization performance of deep neural networks under a supervised learning setting (Haley & Soloway, 1992; Barnard & Wessels, 1992; Arora et al., 2019a; Xu et al., 2020; Florence et al., 2022), but is largely overlooked in modern offline RL.", + "bbox": [ + 169, + 549, + 826, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Theoretical explanations. Based on advanced theoretical machinery from the generalization analysis of DNN, such as neural tangent kernel (NTK) (Jacot et al., 2018), we can theoretically demonstrate that this phenomenon is also carried over to the offline RL setting for deep $Q$ functions. Define $\\operatorname{Proj}_{\\mathcal{D}}(x) \\coloneqq \\arg \\min_{x_i \\in \\mathcal{D}} \\| x - x_i \\|$ (we denote $\\| x \\|$ as Euclidean norm) as the projection operator that projects unseen data $x$ to the nearest data point in dataset $\\mathcal{D}$ . Theorem 1 gives a theoretical explanation of the \"interploate well\" phenomenon for deep $Q$ functions under the NTK assumptions (see Appendix B.2 for detailed proofs):", + "bbox": [ + 169, + 626, + 826, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Theorem 1. (Value difference of deep $Q$ function for interpolated and extrapolated data). Under the NTK regime, given an unseen interpolated data $x_{in}$ and an extrapolated data $x_{out}$ , then the value difference of deep $Q$ function for interpolated and extrapolated input data can be bounded as:", + "bbox": [ + 169, + 727, + 825, + 771 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| Q _ {\\theta} (x _ {i n}) - Q _ {\\theta} (\\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n})) \\| \\leq C _ {1} (\\sqrt {\\min (\\| x _ {i n} \\| , \\| \\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n}) \\|)} \\sqrt {d _ {x _ {i n}}} + 2 d _ {x _ {i n}}) \\\\ \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\| x _ {i n} \\| , \\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right) \\|\\right)} \\sqrt {B} + 2 B\\right) \\tag {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 776, + 823, + 816 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\| Q _ {\\theta} (x _ {o u t}) - Q _ {\\theta} (\\operatorname {P r o j} _ {\\mathcal {D}} (x _ {o u t})) \\| \\leq C _ {1} (\\sqrt {\\min (\\| x _ {o u t} \\| , \\| \\operatorname {P r o j} _ {\\mathcal {D}} (x _ {o u t}) \\|)} \\sqrt {d _ {x _ {o u t}}} + 2 d _ {x _ {o u t}}) \\quad (3)\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 819, + 825, + 838 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $d_{x_{in}} = \\| x_{in} - \\mathrm{Proj}_{\\mathcal{D}}(x_{in})\\| \\leq \\max_{x_i\\in \\mathcal{D}}\\| x_{in} - x_i\\| \\leq B$ and $d_{x_{out}} = \\| x_{out} - \\mathrm{Proj}_{\\mathcal{D}}(x_{out})\\|$ are distances of $x_{in}$ and $x_{out}$ to the nearest data points in dataset $\\mathcal{D}$ . $B$ and $C_1$ are finite constants.", + "bbox": [ + 169, + 840, + 823, + 872 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Theorem 1 shows that given an unseen input $x$ , $Q_{\\theta}(x)$ can be controlled by in-sample $Q$ value $Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))$ and the distance $\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|$ . The smaller the distance, the more controllable the output of deep $Q$ functions. Therefore, because the distance to dataset is strictly bounded (at", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "most $B$ for interpolated data), the approximated $Q$ values at interpolated data as well as extrapolated data near the boundaries of the convex hull formed by the dataset cannot be too far off. Moreover, as $d_{x_{out}}$ can take substantially larger values than $d_{x_{in}}$ , interpolated data generally enjoys a tighter bound compared with extrapolated data, if the dataset only narrowly covers a large state-action space.", + "bbox": [ + 169, + 103, + 826, + 161 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Empirical observations in Figure 2 and Theorem 1 both demonstrate that data geometry can induce different approximation error accumulation patterns for deep $Q$ functions. While approximation error accumulation is generally detrimental to offline RL, a fine-grained analysis is missing in previous studies about where value function can approximate well. We argue that it is necessary to take data geometry into consideration when designing less conservative offline RL algorithms.", + "bbox": [ + 169, + 166, + 823, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 GENERALIZABLE OFFLINE RL FRAMEWORK", + "text_level": 1, + "bbox": [ + 171, + 256, + 578, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we present our algorithm DOGE (Distance-sensitive Offline RL with better GEneralization). By introducing a specially designed state-conditioned distance function to characterize the geometry of offline datasets, we can construct a very simple, less conservative and also more generalizable offline RL algorithm upon standard actor-critic framework.", + "bbox": [ + 169, + 287, + 826, + 347 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 STATE-CONDITIONED DISTANCE FUNCTION", + "text_level": 1, + "bbox": [ + 171, + 359, + 521, + 376 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As revealed in Theorem 1, the sample-to-dataset distance plays an important role in measuring the controllability of $Q$ values. However, given an arbitrary state-action sample $(s,a)$ , naively computing its distance to the closest data point in a large dataset can be costly and impractical. Ideally, we prefer to have a learnable distance function which also has the ability to reflect the overall dataset geometry. Based on this intuition, we design a state-conditioned distance function that can be learned in an elegantly simple supervised manner with desirable properties.", + "bbox": [ + 169, + 387, + 823, + 470 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, we learn the state-conditioned distance function $g(s,a)$ by solving the following regression problem, with state-action pairs $(s,a)\\sim \\mathcal{D}$ and synthetic noise actions sampled from the uniform distribution over the full action space $\\mathcal{A}$ :", + "bbox": [ + 169, + 477, + 826, + 520 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {g} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} [ \\| a - \\hat {a} \\| - g (s, \\hat {a}) ] ^ {2} \\right] \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 525, + 825, + 549 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In practical implementation, for each $(s,a)\\sim \\mathcal{D}$ , we sample $N$ noise actions uniformly in the action space $\\mathcal{A}$ to train $g(\\cdot)$ . More implementation details can be found in Appendix E. Moreover, with the optimization objective defined in Eq. (4), we can show that the optimal state-conditioned distance function has two desirable properties (proofs can be found in Appendix C):", + "bbox": [ + 169, + 556, + 823, + 613 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Property 1. The optimal state-conditioned distance function of Eq. (4) is convex w.r.t. actions and is an upper bound of the distance to the state-conditioned centroid $a_{o}(s)$ of training dataset $\\mathcal{D}$ :", + "bbox": [ + 169, + 617, + 823, + 646 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} g ^ {*} (s, \\hat {a}) = \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) \\| \\hat {a} - a \\| ] \\\\ \\geq \\left\\| \\hat {a} - \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) \\cdot a ] \\right\\| = \\left\\| \\hat {a} - a _ {o} (s) \\right\\|, \\quad \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\tag {5} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 651, + 825, + 689 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $C(s,a) = \\frac{\\mu(s,a)}{\\mathbb{E}_{a\\sim Unif(\\mathcal{A})}\\mu(s,a)}\\geq 0$ , $\\mu (s,a)$ is state-action distribution of dataset $\\mathcal{D}$ . Given a state $s\\in \\mathcal{D}$ , the state-conditioned centroid is defined as $a_{o}(s) = \\mathbb{E}_{a\\sim Unif(\\mathcal{A})}[C(s,a)\\cdot a]$ . Since $L_{2}$ -norm is convex and the non-negative combination of convex functions is still convex, $g^{*}(s,\\hat{a})$ is also a convex function w.r.t. $\\hat{a}$ .", + "bbox": [ + 169, + 718, + 823, + 782 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Property 2. The negative gradient of the optimal state-conditioned distance function at an extrapolated action $\\hat{a}$ , $-\\nabla_{\\hat{a}}g^{*}(s,\\hat{a})$ , points inside the convex hull of the dataset.", + "bbox": [ + 169, + 786, + 826, + 816 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "From Property 1, we can see that the optimal state-conditioned distance function characterizes data geometry and outputs an upper bound of the distance to the state-conditioned centroid of the training dataset. Property 2 indicates that if we use the learned distance function as a policy constraint, it can drive the learned policy to move inside the convex hull of training data. We visualize the value of the trained state-conditioned distance function in Figure 3. It is clear that the learned distance function can accurately predict the sample-to-dataset centroid distance. By utilizing such distance function, we can constrain the policy based on the global geometric information of training datasets. This", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3af0690a0c24cc1b62b4de981c2f912d5e3a67c1cc78c32e1ac4d92a039d2026.jpg", + "image_caption": [ + "(a) Illustration of $g^{*}(s,a)$" + ], + "image_footnote": [], + "bbox": [ + 189, + 104, + 421, + 242 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/fd84e0cbb87325f6c0b95df5f73e1f9da82216501e3997e72d8dcd90b7efb138.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 431, + 104, + 557, + 174 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/275170debf5794418b5549afc027ce1470e01658ad45a4b992cc8d2c3bbdbb8d.jpg", + "image_caption": [ + "Figure 3: Illustration of the state-conditioned distance function. The output of the optimal distance function is the non-negative combination of the distances to all training data. $G$ is the threshold in Eq. (6) In (b), Offline data are marked as white dots." + ], + "image_footnote": [], + "bbox": [ + 431, + 175, + 555, + 242 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/40bab1ca52bc6f560c40f8f642154c8e7cfe04c50c9776938bea31bb380634a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 106, + 686, + 174 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/eb5443643c996d51665a7019ba4d509d77d7fb18ff2831eb3156d2b9bd697521.jpg", + "image_caption": [ + "(b) Visualization of $g^{*}(s,a)$ trained on diverse 2D datasets" + ], + "image_footnote": [], + "bbox": [ + 560, + 174, + 687, + 242 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/07e86dde83c3f006921d3e67958f083dab941b5b164314d67e3f2fbc8b33be4a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 106, + 810, + 174 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/81f898b16f98f108db23152e53a13122211804c6c5620a2524eab7996ea88bcb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 174, + 810, + 242 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "desirable property is non-obtainable by simply constraining the policy based on sample-to-sample distance such as the MSE loss between policy generated and dataset actions, which can only bring local geometric information. Moreover, the learned distance function can not only predict well at in-distribution states but also generalize well at OOD states.", + "bbox": [ + 169, + 330, + 823, + 387 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 DISTANCE-SENSITIVE OFFLINE REINFORCEMENT LEARNING", + "text_level": 1, + "bbox": [ + 171, + 402, + 637, + 417 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Capturing the geometry of offline datasets, we now construct a minimalist distance-sensitive offline RL framework, by simply plugging the state-conditioned distance function as a policy constraint into standard online actor-critic methods (such as TD3 (Fujimoto et al., 2018) and SAC (Haarnoja et al., 2018)). This results in the following policy maximization objective:", + "bbox": [ + 169, + 429, + 825, + 486 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\pi = \\arg \\max _ {\\pi} \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ Q (s, a) ] \\quad s. t. \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ g (s, a) ] \\leq G \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 488, + 823, + 510 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $G$ is a task-dependent threshold varying across tasks. In our method, we adopt a non-parametric treatment by setting $G$ as the mean output (50% quantile) of the learned distance function on the training dataset, i.e., $\\mathbb{E}_{(s,a)\\sim \\mathcal{D}}[g(s,a)]$ , which is approximated over mini-batch samples to reduce computational complexity (see Appendix G for ablation on $G$ ). The constrained optimization problem in Eq. (6) can be reformulated as:", + "bbox": [ + 169, + 513, + 823, + 583 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\pi = \\arg \\max _ {\\pi} \\min _ {\\lambda} \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ \\beta Q (s, a) - \\lambda (g (s, a) - G) ] \\quad s. t. \\lambda \\geq 0 \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 248, + 587, + 823, + 608 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda$ is the Lagrangian multiplier, which is auto-adjusted using dual gradient descent. Following TD3+BC (Fujimoto & Gu, 2021), $Q$ values are rescaled by $\\beta = \\frac{\\alpha}{\\frac{1}{n}\\sum_{i=1}^{n}|Q(s_i,a_i)|}$ to balance $Q$ function maximization and policy constraint satisfaction, controlled by a hyperparameter $\\alpha$ . To reduce computations, the denominator of $\\beta$ is approximated over mini-batch of samples. The resulting algorithm is easy to implement. In our experiments, we use TD3. Please refer to Appendix E for implementation details.", + "bbox": [ + 169, + 613, + 823, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 RELAXATION WITH BELLMAN-CONSISTENT COEFFICIENT", + "text_level": 1, + "bbox": [ + 171, + 717, + 614, + 729 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.1 BELLMAN-CONSISTENT COEFFICIENT AND CONSTRAINED POLICY SET", + "text_level": 1, + "bbox": [ + 171, + 742, + 720, + 756 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The key difference between DOGE and other policy constraint methods lies in that DOGE relaxes the strong full coverage assumption1 on offline datasets and allows exploitation on generalizable OOD areas. To relax the unrealistic full-coverage assumption, we resort to a weaker condition proposed by (Xie et al., 2021a), the Bellman-consistent coefficient (Definition 1), to measure how well Bellman errors can transfer to different distributions (Theorem 2).", + "bbox": [ + 169, + 766, + 823, + 837 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Denote $\\| f\\|_{2,\\mu}^2 \\coloneqq \\mathbb{E}_\\mu [\\| f\\|^2]$ ; $\\mathcal{T}^\\pi Q$ is the Bellman operator of policy $\\pi$ , defined as $\\mathcal{T}^\\pi Q(s,a) \\coloneqq r(s,a) + \\gamma \\mathbb{E}_{a'\\sim \\pi (\\cdot |s'),s'\\sim \\mathcal{P}(\\cdot |s,a)}[Q(s',a')] \\coloneqq r(s,a) + \\gamma \\mathbb{P}^\\pi [Q(s',a')]$ . $\\mathbb{P}^\\pi [\\cdot ]$ is the brief notation for $\\mathbb{E}_{a'\\sim \\pi (\\cdot |s'),s'\\sim \\mathcal{P}(\\cdot |s,a)}[\\cdot ]$ . $\\mathcal{F}$ is the function class of $Q$ networks. The Bellman-consistent coefficient is defined as:", + "bbox": [ + 169, + 843, + 823, + 902 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "$\\sup_{s,a}\\frac{v(s,a)}{\\mu(s,a)} < \\infty, v$ and $\\mu$ are marginal distributions of the learned policy and the dataset (Le et al., 2019).", + "bbox": [ + 189, + 907, + 823, + 928 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Definition 1. (Bellman-consistent coefficient). We define $\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)$ to measure the distributional shift from an arbitrary distribution $v$ to data distribution $\\mu$ , w.r.t. $\\mathcal{F}$ and $\\pi$ ,", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) := \\sup _ {Q \\in \\mathcal {F}} \\frac {\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , v} ^ {2}}{\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , \\mu} ^ {2}} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 133, + 825, + 171 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This definition captures the generalization performance of function approximation across different distributions. Intuitively, a small value of $\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)$ means Bellman errors for policy $\\pi$ can accurately transfer from distribution $\\mu$ to $v$ . This suggests that Bellman errors can transfer well between two distributions even if a large discrepancy exists, as long as the Bellman-consistent coefficient is small.", + "bbox": [ + 169, + 186, + 823, + 257 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Based on Definition 1, we introduce the definition of Bellman-consistent constrained policy set.", + "bbox": [ + 169, + 263, + 799, + 279 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Definition 2. (Bellman-consistent constrained policy set). We define the Bellman-consistent constrained policy set as $\\Pi_{\\mathcal{B}}$ . The Bellman-consistent coefficient under the transition induced by $\\Pi_{\\mathcal{B}}$ can be bounded by some finite constants $l(k)$ :", + "bbox": [ + 169, + 281, + 825, + 324 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {B} \\left(\\rho_ {k}, \\mu , \\mathcal {F}, \\pi\\right) \\leq l (k) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 423, + 325, + 825, + 342 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\rho_{k} = \\rho_{0}P^{\\pi_{1}}\\ldots P^{\\pi_{k}},\\forall \\pi_{1},\\ldots ,\\pi_{k}\\in \\Pi_{\\mathcal{B}},\\rho_{0}$ is the initial state-action distribution and $P^{\\pi_i}$ is the transition operator induced by $\\pi_{i}$ , i.e., $P^{\\pi_i}(s',a'|s,a) = \\mathcal{P}(s'|s,a)\\pi_i(a'|s')$", + "bbox": [ + 169, + 344, + 823, + 375 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We denote the constrained Bellman operator induced by $\\Pi_{\\mathcal{B}}$ as $\\mathcal{T}^{\\Pi_B}$ , $\\mathcal{T}^{\\Pi_B}Q(s,a) := r(s,a) + \\max_{\\pi \\in \\Pi_B}\\gamma \\mathbb{P}^\\pi [Q(s',a')]$ . $\\mathcal{T}^{\\Pi_B}$ can be seen as a Bellman operator on a redefined MDP, thus theoretical results of MDP also carry over, such as contraction mapping and existence of a fixed point.", + "bbox": [ + 169, + 382, + 826, + 426 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3.2 BELLMAN CONSISTENT COEFFICIENT AND PERFORMANCE BOUND OF DOGE", + "text_level": 1, + "bbox": [ + 171, + 439, + 767, + 454 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We show that the policy set induced by DOGE is essentially a Bellman-consistent policy set defined in Definition 2. Meanwhile, the distance constraint in DOGE can produce a small value of $\\mathcal{B}$ and hence guarantee the learned policy deviates only to those generalizable areas.", + "bbox": [ + 169, + 463, + 823, + 506 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 2. (Upper bound of Bellman-consistent coefficient). Under the NTK assumption, the Bellman-consistent coefficient $\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)$ is upper bounded as:", + "bbox": [ + 169, + 508, + 823, + 537 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) \\leq \\frac {1}{\\epsilon_ {\\mu}} \\left\\| \\underbrace {(1 - \\gamma) Q \\left(s _ {o} , a _ {o}\\right) + R _ {\\max }} _ {\\mathcal {B} _ {1}} + \\underbrace {C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + d _ {1}\\right)} _ {\\mathcal {B} _ {2}} + \\underbrace {(2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + d _ {2}\\right)} _ {\\mathcal {B} _ {3}} \\right\\| _ {2, v} ^ {2} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 540, + 823, + 614 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where we denote $x = (s, a)$ and $x' = (s', a')$ . $x_o = \\mathbb{E}_{x \\sim \\mathcal{D}}[x]$ is the centroid of offline dataset. $d_1 = \\| x - x_o \\|$ and $d_2 = \\| x' - x_o \\|$ are the sample-to-centroid distances. $C_2 = \\sqrt{\\sup_{x \\in S \\times \\mathcal{A}} \\| x \\|}$ is related to the upper bound of the input scale. $\\epsilon_\\mu$ is the lower bound of Bellman error (square) for $\\pi$ under distribution $\\mu$ , i.e., $\\epsilon_\\mu \\leq \\| Q - T^\\pi Q \\|_{2,\\mu}^2$ .", + "bbox": [ + 169, + 613, + 825, + 676 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The RHS of Eq. (10) contains four parts: $\\frac{1}{\\epsilon_{\\mu}}$ , $\\mathcal{B}_1$ , $\\mathcal{B}_2$ and $\\mathcal{B}_3$ . It is reasonable to assume $\\epsilon_{\\mu} > 0$ , because of the approximation error of $Q$ networks and the distribution mismatch between $\\mu$ and $\\pi$ . $\\mathcal{B}_1$ is only dependent on the $Q$ value $Q(s_o, a_o)$ at the centroid of the dataset and the max reward $R_{\\mathrm{max}}$ . $\\mathcal{B}_2$ is related to distance $d_1$ and distribution $v$ . $\\mathcal{B}_3$ is related to $d_2$ , $v$ and $\\mathbb{P}^{\\pi}$ . To be mentioned, the distance regularization in DOGE compels the learned policy to output the action that is near the state-conditioned centroid of dataset, thus $\\mathcal{B}_2$ and $\\mathcal{B}_3$ can be driven to small values. Therefore, the RHS of Eq. (10) can be bounded by finite constants under DOGE, which shows that the constrained policy set induced by DOGE is essentially a Bellman-consistent constrained policy set.", + "bbox": [ + 169, + 684, + 826, + 800 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Then, the performance gap between the policy learned by DOGE and the optimal policy can be bounded as given in Theorem 3. See Appendix D.1 and D.2 for the proof of Theorem 2 and 3.", + "bbox": [ + 169, + 806, + 823, + 835 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3. (Performance bound of the learned policy by DOGE). Let $Q^{\\Pi_{\\mathcal{B}}}$ be the fixed point of $\\mathcal{T}^{\\Pi_{\\mathcal{B}}}$ , i.e., $Q^{\\Pi_{\\mathcal{B}}} = \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}}$ , and $\\epsilon_k = Q^k - \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{k-1}$ is the Bellman error at the $k$ -th iteration. $\\| f \\|_{\\mu} := \\mathbb{E}_{\\mu}[\\| f \\|]$ . The performance of the learned policy $\\pi_n$ is bounded by:", + "bbox": [ + 169, + 835, + 826, + 880 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {n \\rightarrow \\infty} \\| Q ^ {*} - Q ^ {\\pi_ {n}} \\| _ {\\rho_ {0}} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\left[ L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} + \\frac {1 - \\gamma}{2 \\gamma} \\alpha \\left(\\Pi_ {\\mathcal {B}}\\right)\\right] \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 883, + 825, + 917 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $L(\\Pi_{\\mathcal{B}}) = \\sqrt{(1 - \\gamma)^2 \\sum_{k=1}^{\\infty} k \\gamma^{k-1} l(k)}$ , which is similar to the concentrability coefficient in BEAR (Kumar et al., 2019) but in a different form. Note that $l(k)$ is related to the RHS of Eq. (10) and can be driven to a small value by DOGE according to Theorem 2. $\\alpha(\\Pi_{\\mathcal{B}}) = \\| \\mathcal{T}^{\\Pi_{\\mathcal{B}}} Q^{\\Pi_{\\mathcal{B}}} - \\mathcal{T} Q^{*} \\|_{\\infty}$ is the suboptimality constant, which is similar to $\\alpha(\\Pi) = \\| \\mathcal{T}^{\\Pi} Q^{\\Pi} - \\mathcal{T} Q^{*} \\|_{\\infty}$ in BEAR.", + "bbox": [ + 169, + 102, + 823, + 161 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Compared with BEAR, DOGE allows a policy shift to some generalizable OOD areas and relaxes the strong full-coverage assumption. In addition, we have $L(\\Pi_{\\mathcal{B}}) \\leq L(\\Pi) \\propto \\frac{\\rho_0 P^{\\pi_1} \\dots P^{\\pi_k}}{\\mu(s, a)}$ , where $L(\\Pi)$ is the concentrability coefficient in BEAR. This is evident when $\\mu(s, a) = 0$ and $\\rho_0 P^{\\pi_1} \\dots P^{\\pi_k}(s, a) > 0$ , $L(\\Pi_{\\mathcal{B}})$ can be bounded by finite constants but $L(\\Pi) \\to \\infty$ . Moreover, as $\\Pi_{\\mathcal{B}}$ extends the policy set to cover more generalizable OOD areas ( $\\Pi \\subseteq \\Pi_{\\mathcal{B}}$ ) and produces a larger feasible region for optimization, lower degree of suboptimality can be achieved (i.e., $\\alpha(\\Pi_{\\mathcal{B}}) \\leq \\alpha(\\Pi)$ ) compared to only performing optimization on $\\Pi$ . Therefore, we can see that DOGE enjoys a tighter performance bound than previous more conservative methods when allowed to exploit generalizable OOD areas.", + "bbox": [ + 169, + 166, + 826, + 285 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 313, + 328, + 329 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For evaluation, We compare DOGE and prior offline RL methods over D4RL Mujoco and AntMaze tasks (Fu et al., 2020). Mujoco is a standard benchmark commonly used in previous work. AntMaze tasks are far more challenging due to the non-markovian and mixed-quality offline dataset, the stochastic property of environments, and the high dimensional state-action space. Implementation details, experimental setup and additional experimental results can be found in Appendix E and F.", + "bbox": [ + 169, + 349, + 823, + 422 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 COMPARISON WITH SOTA", + "text_level": 1, + "bbox": [ + 171, + 446, + 401, + 462 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare DOGE with model-free SOTA methods, such as TD3+BC (Fujimoto & Gu, 2021), CQL (Kumar et al., 2020b) and IQL (Kostrikov et al., 2021b). For fairness, we use the “-v2” datasets for all methods. For most Mujoco tasks, we report the scores from the IQL paper. We obtain the other results using the authors' or our implementations. For AntMaze tasks, we obtain the results of CQL, TD3+BC, and IQL using the authors' implementations. For BC (Pomerleau, 1988), BCQ (Fujimoto et al., 2019) and BEAR (Kumar et al., 2019), we report the scores from (Fu et al., 2020). All methods are evaluated over the final 10 evaluations for Mujoco tasks and 100 for AntMaze tasks.", + "bbox": [ + 169, + 477, + 826, + 575 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1 shows that DOGE achieves comparable or better performance than SOTA methods on most Mujoco and AntMaze tasks. Compared to other policy constraint approaches such as BCQ, BEAR and TD3+BC, DOGE is the first policy constraint method to successfully solve AntMaze-medium and AntMaze-large tasks. Note that IQL is an algorithm designed for multi-step dynamics programming and attains strong advantage on AntMaze tasks. Nevertheless, DOGE can compete with or even surpass IQL on most AntMaze tasks, by only employing a generalization-oriented policy constraint. These results illustrate the benefits of allowing policy learning on generalizable OOD areas.", + "bbox": [ + 169, + 582, + 826, + 681 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 EVALUATION ON GENERALIZATION", + "text_level": 1, + "bbox": [ + 171, + 705, + 460, + 720 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To evaluate the generalization ability of DOGE, we remove small areas of data from the critical pathways to the destination in AntMaze medium and large tasks, to construct an OOD dataset. The two removed areas reside in close proximity to the trajectory data (see Figure 1). We evaluate representative methods (such as TD3+BC, CQL, IQL) and DOGE on these modified datasets. Figure 4 shows the comparison before and after data removal.", + "bbox": [ + 169, + 734, + 823, + 806 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For such a dataset with partial state-action space coverage, existing policy constraint methods tend to over-constrain the policy to stay inside the support of a dataset, where the optimal policy is not well-covered. Value regularization methods suffer from deteriorated generalization performance, as the value function is distorted to assign low value at all OOD areas. In-sample learning methods are only guaranteed to retain the best policy within the partially covered dataset (Kostrikov et al., 2021b). As shown in Figure 4, all these methods struggle to generalize well on the missing areas and suffer severe performance drop, while DOGE maintains competitive performance. This further demonstrates the benefits of relaxing over-conservatism in existing methods.", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/520ff2c2c765f7699a757e6b09d8f9ef32507256dbb6fda7389d9a6250987556.jpg", + "table_caption": [ + "Table 1: Average normalized scores and standard deviations over 5 seeds on benchmark tasks" + ], + "table_footnote": [], + "table_body": "
DatasetBCBCQBEARTD3+BCCQLIQLDOGE(ours)
hopper-r4.97.114.28.5±0.68.3±0.27.9±0.421.1±12.6
halfcheetah-r0.28.815.111.0±1.120.0±0.411.2±2.917.8±1.2
walker2d-r1.76.510.71.6±1.78.3±0.15.9±0.50.9 ± 2.4
hopper-m52.956.751.959.3±4.258.5±2.166.2±5.798.6±2.1
halfcheetah-m42.647.041.048.3±0.344.0±5.447.4±0.245.3±0.6
walker2d-m75.372.680.983.7±2.172.5±0.878.3±8.786.8±0.8
hopper-m-r18.153.337.360.9±18.895.0±6.494.7±8.676.2±17.7
halfcheetah-m-r36.640.429.744.6±0.545.5±0.544.2±1.242.8±0.6
walker2d-m-r26.052.118.581.8±5.577.2±5.573.8±7.187.3±2.3
hopper-m-e52.581.817.798.0±9.4105.4±6.891.5±14.3102.7±5.2
halfcheetah-m-e55.289.138.990.7±4.391.6±2.886.7±5.378.7±8.4
walker2d-m-e107.5109.595.4110.1±0.5108.8±0.7109.6±1.0110.4±1.5
locomation total473.5624.9451.3698.5±49.0726.1±31.7717.4±55.9768.6±55.4
antmaze-u65.078.973.091.3±5.784.8±2.388.2±1.997.0±1.8
antmaze-u-d55.055.061.054.6±16.243.3±5.466.7±4.063.5±9.3
antmaze-m-p0.00.00.00.065.2±4.870.4±5.380.6±6.5
antmaze-m-d0.00.08.00.054.0±11.774.6±3.277.6±6.1
antmaze-l-p0.06.70.00.018.8±15.343.5±4.548.2±8.1
antmaze-l-d0.02.20.00.031.6±9.545.6±7.636.4±9.1
antmaze-total120.0142.8142.0145.9±21.9297.7±49.0389.0±26.5403.3±40.9
", + "bbox": [ + 173, + 126, + 823, + 429 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/49c9e6018d1a5f759c914c9175843a44c4e4f03b845c72c3324eb83ee60e69ca.jpg", + "image_caption": [ + "Policy constraint" + ], + "image_footnote": [], + "bbox": [ + 174, + 463, + 331, + 553 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cf18c8e2896a643c58acf911f20a50282df75df8c16a4795cee76caed2e46f83.jpg", + "image_caption": [ + "Value regularization" + ], + "image_footnote": [], + "bbox": [ + 336, + 463, + 493, + 553 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a7f61701ad8687a1c5d5494369d8525816007f4cb4ab04ea008879415c832474.jpg", + "image_caption": [ + "In-sample learning" + ], + "image_footnote": [], + "bbox": [ + 500, + 463, + 656, + 553 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/82b5a4cba0f0bea289e48ace1d5263f2acd56e84dea74a0d32acaea0de1a03d5.jpg", + "image_caption": [ + "DOGE (Ours)" + ], + "image_footnote": [], + "bbox": [ + 661, + 463, + 818, + 553 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3d9ade4e8a1eb09cdc98ed3dc4bac11622e37474f17806f5665c4b25852c71c6.jpg", + "image_caption": [ + "Figure 4: Generalization performance after removing data from AntMaze large tasks (see Appendix F.1 for detailed setup and additional results on AntMaze medium tasks)." + ], + "image_footnote": [], + "bbox": [ + 173, + 560, + 333, + 651 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b9f5db12feacd952c5985cd236e6654c0b59a04770027710c915dd154ba2de40.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 338, + 560, + 496, + 650 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a9f1f2a7c49a7015c329cdedeee59aa83178cc2bed9a4abdf1d7cdb998f8579b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 560, + 658, + 650 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9ba79b5109bb6fa47399a02cfb0e1394cb3bef6a3a2547f25f0c3640dbc984af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 560, + 821, + 651 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 ABLATION STUDY", + "text_level": 1, + "bbox": [ + 171, + 750, + 341, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct ablation studies to evaluate the impact of the hyperparameter $\\alpha$ , the non-parametric distance threshold $G$ in Eq. (6), and the number of noise actions $N$ used to train the distance function. For $\\alpha$ , we add or subtract 2.5 to the original value; for $G$ , we choose $30\\%$ , $50\\%$ , $70\\%$ and $90\\%$ upper quantile of the distance values in mini-batch samples; for $N$ , we choose $N = 10, 20, 30$ .", + "bbox": [ + 169, + 790, + 826, + 848 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Compared to $N$ and $\\alpha$ , we find that $G$ has a more significant impact on the performance. Figure 5b shows that an overly restrictive $G$ (30% quantile) results in a policy set too small to cover near-optimal policies. A more tolerant $G$ , on the other hand, is unlikely to cause excessive error accumulation and achieves relatively good performance. In addition, Figure 5a and Figure 5c show that performance is stable across variations of hyperparameters, indicating that our method is hyperparameter-robust.", + "bbox": [ + 169, + 854, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f0c22aaf0812d5bdcd791e3ed965d26bb7f0cf71880de8ff0996763d37b8eb7c.jpg", + "image_caption": [ + "(a) $\\alpha$ has little effect on results" + ], + "image_footnote": [], + "bbox": [ + 174, + 113, + 380, + 231 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f237af1b604b5663d26e2d1de7fd8ea0af234e46d8774e911ae0bf4a5af03bdf.jpg", + "image_caption": [ + "Figure 5: Ablation results. The default parameters in our implementation are marked by $*$ . The error bars indicate min and max over 5 seeds. See Appendix G for more detailed ablation studies." + ], + "image_footnote": [], + "bbox": [ + 390, + 113, + 599, + 232 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/bbd5795ca3015234995854861d3116a3c8d81b6eb10efbd40d471f10634e7031.jpg", + "image_caption": [ + "(b) Small $G$ is harmful to results", + "(c) $N$ has little effect on results" + ], + "image_footnote": [], + "bbox": [ + 611, + 113, + 816, + 232 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 301, + 344, + 316 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To prevent distributional shift and exploitation error accumulation when inferring the value function at unseen samples, a direct approach is to restrict policy learning from deviating to OOD areas. To make sure the leaned policy stays inside the distribution or support of training data, These policy constraint methods either carefully parameterize the learned policy (Fujimoto et al., 2019; Matsushima et al., 2020), or use explicit divergence penalties (Kumar et al., 2019; Wu et al., 2019; Fujimoto & Gu, 2021; Xu et al., 2021; Dadashi et al., 2021) or implicit divergence constraints (Peng et al., 2019; Nair et al., 2020; Xu et al., 2022a). The theories behind these methods typically assume full state-action space coverage of the offline datasets(Le et al., 2019; Kumar et al., 2019). However, policy constraint under full-coverage assumption is unrealistic in most real-world settings, especially on datasets with partial coverage and only sub-optimal behavior policies. Some recent works try to relax the full-coverage assumption to partial coverage by introducing different distribution divergence metrics, but only in theoretical analysis (Liu et al., 2020; Zanette et al., 2021; Xie et al., 2021b; Uehara & Sun, 2021; Xie et al., 2021a). Our method is an enhanced policy constraint method, where we relax the full-coverage assumption and allow the policy to learn on OOD areas where networks can generalize well.", + "bbox": [ + 169, + 340, + 826, + 537 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Another type of offline RL method, value regularization (Kumar et al., 2020b; Kostrikov et al., 2021a; Yu et al., 2021; Xu et al., 2022b; 2023), directly penalizes the value function to produce low values at OOD actions. In-sample learning methods (Brandfonbrener et al., 2021; Kostrikov et al., 2021b), on the other hand, only learn the value function within data or treat it as the value function of the behavior policy. Compared with our approach, these methods exercise too much conservatism, which limits the generalization performance of deep neural networks on OOD regions, largely weakening the ability of dynamic programming. There are also uncertainty-based and model-based methods that regularize the value function or policy with epistemic uncertainty estimated from model or value function (Janner et al., 2019; Yu et al., 2020; Uehara & Sun, 2021; Wu et al., 2021; Zhan et al., 2022; Bai et al., 2021). However, the estimation of the epistemic uncertainty of DNN is still an under-explored area, with results highly dependent on evaluation methods and the structure of DNN.", + "bbox": [ + 169, + 542, + 826, + 696 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 731, + 320, + 744 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this study, we provide new insights on the relationship between approximation error of deep $Q$ functions and geometry of offline datasets. Through empirical and theoretical analysis, we find that deep $Q$ functions attain relatively low approximation error when interpolating rather than extrapolating the dataset. This phenomenon motivates us to design a new algorithm, DOGE, to empower policy learning on OOD samples within the convex hull of training data. DOGE is simple yet elegant, by plugging a dataset geometry-derived distance constraint into TD3. With such a minimal surgery, DOGE outperforms existing model-free offline RL methods on most D4RL tasks. We theoretically prove that DOGE enjoys a tighter performance bound compared with existing policy constraint methods under the more realistic partial-coverage assumption. Empirical results and theoretical analysis suggest the necessity of re-thinking the conservatism principle in offline RL algorithm design, and points to sufficient exploitation of the generalization ability of deep $Q$ functions.", + "bbox": [ + 169, + 771, + 826, + 924 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 104, + 328, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This work is supported by National Key Research and Development Program of China under Grant (2022YFB2502904). This work is also supported by Baidu Inc. through Apollo-AIR Joint Research Center. The authors would also like to thank the anonymous reviewers for their feedback on the manuscripts. Jianxiong Li would like to thank Zhixu Du, Yimu Wang, Li Jiang, Haoyi Niu, Hao Zhao and all colleagues in AIR-Dream group for valuable discussions.", + "bbox": [ + 169, + 127, + 826, + 199 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 218, + 285, + 233 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zeyuan Allen-Zhu, Yuanzhi Li, and Yingyu Liang. Learning and generalization in overparameterized neural networks, going beyond two layers. Advances in neural information processing systems, 32, 2019.", + "Gaon An, Seungyong Moon, Jang-Hyun Kim, and Hyun Oh Song. Uncertainty-based offline reinforcement learning with diversified q-ensemble. Advances in neural information processing systems, 34:7436-7447, 2021.", + "Anonymous. Lightweight uncertainty for offline reinforcement learning via bayesian posterior. In Submitted to The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=55Eet8WGJTv. under review.", + "Sanjeev Arora, Simon Du, Wei Hu, Zhiyuan Li, and Ruosong Wang. Fine-grained analysis of optimization and generalization for overparameterized two-layer neural networks. In International Conference on Machine Learning, pp. 322-332. PMLR, 2019a.", + "Sanjeev Arora, Simon S Du, Wei Hu, Zhiyuan Li, Russ R Salakhutdinov, and Ruosong Wang. On exact computation with an infinitely wide neural net. Advances in Neural Information Processing Systems, 32, 2019b.", + "Chenjia Bai, Lingxiao Wang, Zhuoran Yang, Zhi-Hong Deng, Animesh Garg, Peng Liu, and Zhao ran Wang. Pessimistic bootstrapping for uncertainty-driven offline reinforcement learning. In International Conference on Learning Representations, 2021.", + "Etienne Barnard and LFA Wessels. Extrapolation and interpolation in neural network classifiers. IEEE Control Systems Magazine, 12(5):50-53, 1992.", + "Peter L Bartlett and Shahar Mendelson. Rademacher and gaussian complexities: Risk bounds and structural results. Journal of Machine Learning Research, 3(Nov):463-482, 2002.", + "Alberto Bietti and Julien Mairal. On the inductive bias of neural tangent kernels. Advances in Neural Information Processing Systems, 32, 2019.", + "David Brandfonbrener, Will Whitney, Rajesh Ranganath, and Joan Bruna. Offline rl without off-policy evaluation. Advances in Neural Information Processing Systems, 34:4933-4946, 2021.", + "Qi Cai, Zhuoran Yang, Jason D Lee, and Zhaoran Wang. Neural temporal-difference learning converges to global optima. Advances in Neural Information Processing Systems, 32, 2019.", + "Robert Dadashi, Shideh RezaEIFar, Nino Vieillard, LEOnard Hussenot, Olivier Pietquin, and Matthieu Geist. Offline reinforcement learning with pseudometric learning. In International Conference on Machine Learning, pp. 2307-2318. PMLR, 2021.", + "Jonas Degrave, Federico Felici, Jonas Buchli, Michael Neunert, Brendan Tracey, Francesco Carpanese, Timo Ewalds, Roland Hafner, Abbas Abdelmaleki, Diego de Las Casas, et al. Magnetic control of tokamak plasmas through deep reinforcement learning. Nature, 602(7897):414-419, 2022.", + "Jianqing Fan, Zhaoran Wang, Yuchen Xie, and Zhuoran Yang. A theoretical analysis of deep q-learning. In Learning for Dynamics and Control, pp. 486-489. PMLR, 2020.", + "Pete Florence, Corey Lynch, Andy Zeng, Oscar A Ramirez, Ayzaan Wahid, Laura Downs, Adrian Wong, Johnny Lee, Igor Mordatch, and Jonathan Thompson. Implicit behavioral cloning. In Conference on Robot Learning, pp. 158-168. PMLR, 2022." + ], + "bbox": [ + 171, + 241, + 828, + 925 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 508, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4rl: Datasets for deep data-driven reinforcement learning. arXiv preprint arXiv:2004.07219, 2020.", + "Scott Fujimoto and Shixiang Shane Gu. A minimalist approach to offline reinforcement learning. Advances in Neural Information Processing Systems, 34, 2021.", + "Scott Fujimoto, Herke Hoof, and David Meger. Addressing function approximation error in actor-critic methods. In International conference on machine learning, pp. 1587-1596. PMLR, 2018.", + "Scott Fujimoto, David Meger, and Doina Precup. Off-policy deep reinforcement learning without exploration. In International Conference on Machine Learning, pp. 2052-2062. PMLR, 2019.", + "Seyed Kamyar Seyed Ghasemipour, Shixiang Shane Gu, and Ofir Nachum. Why so pessimistic? estimating uncertainties for offline rl through ensembles, and why their independence matters.", + "Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In International conference on machine learning, pp. 1861-1870. PMLR, 2018.", + "Pamela J Haley and DONALD Soloway. Extrapolation limitations of multilayer feedforward neural networks. In [Proceedings 1992] IJCNN International Joint Conference on Neural Networks, volume 4, pp. 25-30. IEEE, 1992.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.", + "Arthur Jacot, Franck Gabriel, and Clément Hongler. Neural tangent kernel: Convergence and generalization in neural networks. Advances in neural information processing systems, 31, 2018.", + "Michael Janner, Justin Fu, Marvin Zhang, and Sergey Levine. When to trust your model: Model-based policy optimization. Advances in Neural Information Processing Systems, 32, 2019.", + "Ilya Kostrikov, Rob Fergus, Jonathan Tompson, and Ofir Nachum. Offline reinforcement learning with fisher divergence critic regularization. In International Conference on Machine Learning, pp. 5774-5783. PMLR, 2021a.", + "Ilya Kostrikov, Ashvin Nair, and Sergey Levine. Offline reinforcement learning with implicit q-learning. In International Conference on Learning Representations, 2021b.", + "Aviral Kumar, Justin Fu, Matthew Soh, George Tucker, and Sergey Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. Advances in Neural Information Processing Systems, 32, 2019.", + "Aviral Kumar, Rishabh Agarwal, Dibya Ghosh, and Sergey Levine. Implicit under-parameterization inhibits data-efficient deep reinforcement learning. In International Conference on Learning Representations, 2020a.", + "Aviral Kumar, Aurick Zhou, George Tucker, and Sergey Levine. Conservative q-learning for offline reinforcement learning. Advances in Neural Information Processing Systems, 33:1179-1191, 2020b.", + "Hoang Le, Cameron Voloshin, and Yisong Yue. Batch policy learning under constraints. In International Conference on Machine Learning, pp. 3703-3712. PMLR, 2019.", + "Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020.", + "Boyi Liu, Qi Cai, Zhuoran Yang, and Zhaoran Wang. Neural trust region/proximal policy optimization attains globally optimal policy. Advances in neural information processing systems, 32, 2019.", + "Yao Liu, Adith Swaminathan, Alekh Agarwal, and Emma Brunskill. Provably good batch off-policy reinforcement learning without great exploration. Advances in Neural Information Processing Systems, 33:1264-1274, 2020." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tatsuya Matsushima, Hiroki Furuta, Yutaka Matsuo, Ofir Nachum, and Shixiang Gu. Deployment-efficient reinforcement learning via model-based offline optimization. In International Conference on Learning Representations, 2020.", + "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al. Human-level control through deep reinforcement learning. nature, 518(7540):529-533, 2015.", + "Ashvin Nair, Murtaza Dalal, Abhishek Gupta, and Sergey Levine. Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020.", + "Charles Packer, Katelyn Gao, Jernej Kos, Philipp Krahenbuhl, Vladlen Koltun, and Dawn Song. Assessing generalization in deep reinforcement learning. arXiv preprint arXiv:1810.12282, 2018.", + "Xue Bin Peng, Aviral Kumar, Grace Zhang, and Sergey Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019.", + "Dean A Pomerleau. Alvinn: An autonomous land vehicle in a neural network. Advances in neural information processing systems, 1, 1988.", + "David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al. Mastering the game of go without human knowledge. nature, 550(7676):354-359, 2017.", + "Masatoshi Uehara and Wen Sun. Pessimistic model-based offline reinforcement learning under partial coverage. In International Conference on Learning Representations, 2021.", + "Hado Van Hasselt, Yotam Doron, Florian Strub, Matteo Hessel, Nicolas Sonnerat, and Joseph Modayil. Deep reinforcement learning and the deadly triad. arXiv preprint arXiv:1812.02648, 2018.", + "Vladimir N Vapnik and A Ya Chervonenkis. On the uniform convergence of relative frequencies of events to their probabilities. In Measures of complexity, pp. 11-30. Springer, 2015.", + "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.", + "Yifan Wu, George Tucker, and Ofir Nachum. Behavior regularized offline reinforcement learning. arXiv preprint arXiv:1911.11361, 2019.", + "Yue Wu, Shuangfei Zhai, Nitish Srivastava, Joshua M Susskind, Jian Zhang, Ruslan Salakhutdinov, and Hanlin Goh. Uncertainty weighted actor-critic for offline reinforcement learning. In International Conference on Machine Learning, pp. 11319-11328. PMLR, 2021.", + "Chenjun Xiao, Bo Dai, Jincheng Mei, Oscar A Ramirez, Ramki Gummadi, Chris Harris, and Dale Schuurmans. Understanding and leveraging overparameterization in recursive value estimation. In International Conference on Learning Representations, 2021.", + "Tengyang Xie, Ching-An Cheng, Nan Jiang, Paul Mineiro, and Alekh Agarwal. Bellman-consistent pessimism for offline reinforcement learning. Advances in neural information processing systems, 34, 2021a.", + "Tengyang Xie, Nan Jiang, Huan Wang, Caiming Xiong, and Yu Bai. Policy finetuning: Bridging sample-efficient offline and online reinforcement learning. Advances in neural information processing systems, 34, 2021b.", + "Haoran Xu, Xianyuan Zhan, Jianxiong Li, and Honglei Yin. Offline reinforcement learning with soft behavior regularization. arXiv preprint arXiv:2110.07395, 2021.", + "Haoran Xu, Li Jiang, Jianxiong Li, and Xianyuan Zhan. A policy-guided imitation approach for offline reinforcement learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022a. URL https://openreview.net/forum?id=CKbqDtZnSc." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Haoran Xu, Xianyuan Zhan, and Xiangyu Zhu. Constraints penalized q-learning for safe offline reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2022b.", + "Haoran Xu, Li Jiang, Jianxiong Li, Zhuoran Yang, Zhaoran Wang, Victor Wai Kin Chan, and Xianyuan Zhan. Sparse q-learning: Offline reinforcement learning with implicit value regularization. In International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=ueYYgo2pSSU.", + "Keyulu Xu, Mozhi Zhang, Jingling Li, Simon Shaolei Du, Ken-Ichi Kawarabayashi, and Stefanie Jegelka. How neural networks extrapolate: From feedforward to graph neural networks. In International Conference on Learning Representations, 2020.", + "Pan Xu and Quanquan Gu. A finite-time analysis of q-learning with neural network function approximation. In International Conference on Machine Learning, pp. 10555-10565. PMLR, 2020.", + "Tianhe Yu, Garrett Thomas, Lantao Yu, Stefano Ermon, James Y Zou, Sergey Levine, Chelsea Finn, and Tengyu Ma. Mopo: Model-based offline policy optimization. Advances in Neural Information Processing Systems, 33:14129-14142, 2020.", + "Tianhe Yu, Aviral Kumar, Rafael Rafailov, Aravind Rajeswaran, Sergey Levine, and Chelsea Finn. Combo: Conservative offline model-based policy optimization. Advances in Neural Information Processing Systems, 34, 2021.", + "Andrea Zanette, Martin J Wainwright, and Emma Brunskill. Provable benefits of actor-critic methods for offline reinforcement learning. Advances in neural information processing systems, 34, 2021.", + "Xianyuan Zhan, Haoran Xu, Yue Zhang, Xiangyu Zhu, Honglei Yin, and Yu Zheng. Deepthermal: Combustion optimization for thermal power generating units using offline reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2022.", + "Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning (still) requires rethinking generalization. Communications of the ACM, 64(3):107-115, 2021." + ], + "bbox": [ + 171, + 102, + 826, + 544 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A SKETCH OF THEORETICAL ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 102, + 522, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this section, we present in Figure 6 a sketch of the overall logical flow in our theoretical analyses and the proposed algorithm, DOGE. We start by analyzing the effects of data geometry on the generalization patterns of deep Q-functions. We find that a small sample-to-dataset distance leads to a tightened Q-function approximation error and thus interpolation enjoys better generalization properties than extrapolation (Theorem 1). Motivated by this, we propose DOGE, which tries to control the upper bound of the sample-to-centroid distance to be small (Property 1) and enforces a convex hull based policy constraint (Property 2). Then, we dive deeper and find that the upper bound of the Bellman-consistent coefficient is well controlled by sample-to-centroid distance and thus DOGE enjoys a bounded bellman-consistent coefficient (Theorem 2). Based on these findings, we can derive a tighter performance bound of DOGE as compared to support constraint methods like BEAR (Theorem 3).", + "bbox": [ + 169, + 133, + 826, + 287 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/0623ba8fec6b85b0996f8cc0b44efa8bea72ee4e56762670f536b443ccace4fc.jpg", + "image_caption": [ + "Figure 6: Sketch of theoretical analysis" + ], + "image_footnote": [], + "bbox": [ + 173, + 300, + 823, + 589 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B THEORETICAL ANALYSIS OF THE IMPACT OF DATA GEOMETRY ON DEEP $Q$ FUNCTIONS", + "text_level": 1, + "bbox": [ + 171, + 645, + 823, + 679 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To analyze the generalization of a function approximator, one can refer to some classical methods such as Rademacher complexity (Bartlett & Mendelson, 2002) and VC-dimension (Vapnik & Chervonenkis, 2015). However, the generalization bounds that obtained by these methods are usually trivial and cannot explain the generalization behavior in the overparameterized regime (Zhang et al., 2021). Recent breakthroughs in neural tangent kernel (NTK) shed light on the generalization of DNN. NTK builds the connection between the training dynamics of DNN and the solution of the kernel regression w.r.t. NTK, and is widely used in recent analysis of DNN generalization (Jacot et al., 2018; Arora et al., 2019b; Bietti & Mairal, 2019). What's more, NTK is also a popular analyzing tool in the convergence and optimality of deep RL (Cai et al., 2019; Fan et al., 2020; Kumar et al., 2020a; Xiao et al., 2021) and thus is used in our study.", + "bbox": [ + 169, + 695, + 826, + 835 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.1 NEURAL TANGENT KERNEL", + "text_level": 1, + "bbox": [ + 171, + 853, + 413, + 867 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We denote a general neural network by $f(\\theta, x): \\mathbb{R}^d \\to \\mathbb{R}$ , where $\\theta$ is all the parameters in the network and $x \\in \\mathbb{R}^d$ is the input. Given, a training dataset $\\{(x_i, y_i)\\}_{i=1}^n$ , the parameters $\\theta$ are optimized by minimizing the squared loss function, i.e., $\\mathcal{L}(\\theta) = \\frac{1}{2} \\sum_{i=1}^n (f_\\theta(x_i) - y_i)^2$ by gradient", + "bbox": [ + 169, + 878, + 825, + 926 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "descent. The dynamics of the networks output can be formulated by Lemma 1 (Lemma 3.1. of (Arora et al., 2019b)); see (Arora et al., 2019b) for the proof of Lemma 1.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Lemma 1. Consider minimizing the squared loss $\\mathcal{L}(\\theta)$ by gradient descent with infinitesimally small learning rate, i.e., $\\frac{d\\theta(t)}{dt} = -\\nabla \\mathcal{L}(\\theta(t))$ . Let $\\mathbf{u}(t) = (f(\\theta(t), x_i))_{i \\in [n]} \\in \\mathbb{R}^n$ be the network outputs on all $x_i$ 's at time $t$ , and $\\mathbf{Y} = (y_i)_{i \\in [n]}$ be the desired outputs. Then $\\mathbf{u}(t)$ follows the following evolution, where $\\mathbf{H}(t)$ is an $n \\times n$ positive semidefinite matrix whose $(i,j)$ -th entry is $\\left\\langle \\frac{\\partial f(\\theta(t), x_i)}{\\partial \\theta}, \\frac{\\partial f(\\theta(t), x_j)}{\\partial \\theta} \\right\\rangle$ :", + "bbox": [ + 169, + 137, + 826, + 223 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d \\mathbf {u} (t)}{d t} = - \\mathbf {H} (t) \\cdot (\\mathbf {u} (t) - \\mathbf {Y}). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 224, + 825, + 255 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Plenty of works (Jacot et al., 2018; Arora et al., 2019b; Allen-Zhu et al., 2019; Xu et al., 2020) study the dynamics of the neural networks' training process and find that if the width of networks is sufficiently large, $\\mathbf{H}(t)$ stays almost constant during training, i.e., $\\mathbf{H}(t) = \\mathbf{H}(0)$ . What's more, if the neural networks' parameters are randomly initialized with certain scales and the networks width goes to infinity, $\\mathbf{H}(0)$ converges to a fixed matrix $\\mathbf{K}$ , called neural tangent kernel (NTK) (Jacot et al., 2018).", + "bbox": [ + 169, + 268, + 826, + 353 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {K} (x, x ^ {\\prime}) = \\mathbb {E} _ {\\theta \\sim W} \\left\\langle \\frac {\\partial f (\\theta (t) , x)}{\\partial \\theta}, \\frac {\\partial f (\\theta (t) , x ^ {\\prime})}{\\partial \\theta} \\right\\rangle \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 373, + 825, + 407 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where, $W$ is Gaussian distribution. The training dynamics in Lemma 1 is identical to the dynamics of kernel regression under gradient flow, because $\\mathbf{K}$ stays constant during training when the width of neural networks goes to infinity. Then, the final prediction function $(t \\to \\infty$ , assuming $\\mathbf{u}(0) = 0$ ) is equal to the kernel regression solution:", + "bbox": [ + 169, + 412, + 826, + 469 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nf _ {n t k} (x) = \\left(\\mathbf {K} \\left(x, x _ {1}\\right), \\dots , \\mathbf {K} \\left(x, x _ {n}\\right)\\right) \\cdot \\mathbf {K} _ {\\text {t r a i n}} ^ {- 1} \\mathbf {Y} \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 491, + 825, + 508 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\mathbf{K}_{train}^{-1}$ is the $n\\times n$ NTK for the training data (the state-action pair $x = (s,a)$ in the policy evaluation in offline RL) and stays constant during training once the training data is fixed. $\\mathbf{Y}$ is the training labels $(r(s,a) + \\gamma \\mathbb{E}_{a'\\sim \\pi (\\cdot |s')}[Q_{\\theta '}(s',a')]$ in offline RL). $\\mathbf{K}(x,x_i)$ is the kernel value between test data $x$ and training data $x_{i}$ . We denote the feature map of $\\mathbf{K}(\\cdot ,\\cdot)$ as $\\Phi (\\cdot)$ , and $\\mathbf{K}(x,x^{\\prime}) = \\langle \\Phi (x),\\Phi (x^{\\prime})\\rangle$ . Then, Eq. (14) is equivalent to:", + "bbox": [ + 169, + 517, + 826, + 590 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nf _ {n t k} (x) = \\left(\\langle \\Phi (x), \\Phi (x _ {1}) \\rangle , \\dots , \\langle \\Phi (x), \\Phi (x _ {n}) \\rangle\\right) \\cdot \\mathbf {K} _ {\\text {t r a i n}} ^ {- 1} \\mathbf {Y} \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 611, + 825, + 630 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.2 IMPACT OF DATA GEOMETRY ON DEEP $Q$ FUNCTIONS", + "text_level": 1, + "bbox": [ + 171, + 647, + 588, + 662 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this section, we analyze the impact of data geometry on deep $Q$ functions under the NTK regime. We first introduce the smoothness property of the feature map $\\Phi(x)$ induced by NTK (Lemma 2). Then, we introduce the equivalence between the kernel regression solution in Eq. (15) and a min-norm solution (Lemma 3). Builds on Lemma 2 and Lemma 3, Lemma 4 analyzes the smoothness of the deep $Q$ functions. At last, we study how data geometry affects deep $Q$ functions (Theorem 1).", + "bbox": [ + 169, + 674, + 826, + 746 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Assumption 1. (NTK assumption). We assume the function approximators discussed in our paper are two-layer fully-connected ReLU neural networks with infinity width and are trained with infinitesimally small learning rate unless otherwise specified.", + "bbox": [ + 169, + 750, + 826, + 792 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Although there exist some gaps between the NTK assumption and the real setting, NTK is one of the most advanced theoretical machinery from the generalization analysis of DNN. In addition, Assumption 1 is common in previous analysis on the generalization of DNN (Jacot et al., 2018; Arora et al., 2019a; Bietti & Mairal, 2019) and the convergence of DRL (Cai et al., 2019; Liu et al., 2019; Xu & Gu, 2020; Fan et al., 2020). For more accurate analysis, we should adopt more advanced analysis tools than NTK and hence leave it for future work.", + "bbox": [ + 169, + 805, + 826, + 888 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We first introduce Lemma 2 (Proposition 4 of (Bietti & Mairal, 2019)), which shows the feature map $\\Phi(x)$ induced by NTK is not Lipschitz continuous but holds a weaker Hölder smoothness property.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Lemma 2. (Smoothness of the kernel map of two-layer ReLU networks). Let $\\Phi$ be the kernel map of the neural tangent kernel induced by a two-layer ReLU neural network, $x$ and $y$ be two inputs, then $\\Phi$ satisfies the following smoothness property.", + "bbox": [ + 169, + 103, + 826, + 148 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\Phi (x) - \\Phi (y) \\right\\| \\leq \\sqrt {\\min (\\| x \\| , \\| y \\|) \\| x - y \\|} + 2 \\| x - y \\|. \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 165, + 825, + 185 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma 3 (Lemma 2 of (Xu et al., 2020)) builds the connection between the kernel regression solution in Eq. (14) and the a min-norm solution. For the proof of Lemma 3, we refer the reader to(Xu et al., 2020).", + "bbox": [ + 169, + 214, + 826, + 257 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma 3. (Equivalence to a min-norm optimization problem). Let $\\Phi(x)$ be the feature map induced by a neural tangent kernel, for any $x \\in \\mathbb{R}^d$ . The solution to the kernel regression in Eq. (14) and Eq. (15) is equivalent to $f_{ntk}(x) = \\Phi(x)^T \\beta_{ntk}$ , where $\\beta_{ntk}$ is the optimal solution of a min-norm optimization problem defined as", + "bbox": [ + 169, + 261, + 826, + 319 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\beta} \\| \\beta \\|\n$$\n", + "text_format": "latex", + "bbox": [ + 437, + 335, + 493, + 358 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\ns. t. \\Phi \\left(x _ {i}\\right) ^ {T} \\beta = y _ {i}, f o r i = 1, \\dots , n. \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 351, + 823, + 378 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Then, deep $Q$ functions satisfy the following smoothness property.", + "bbox": [ + 169, + 402, + 609, + 419 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma 4. (Smoothness for deep $Q$ functions). Given two inputs $x$ and $x'$ , the distance between these two data points is $d = \\| x - x' \\|$ . $C_1 \\coloneqq \\sup_{\\| \\beta_{ntk} \\|_\\infty} \\| \\beta_{ntk} \\|_\\infty$ is a finite constant. Then the difference between the output at $x$ and the output at $x'$ can be bounded by:", + "bbox": [ + 169, + 421, + 825, + 465 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| Q _ {\\theta} (x) - Q _ {\\theta} \\left(x ^ {\\prime}\\right) \\right\\| \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x \\right\\| , \\left\\| x ^ {\\prime} \\right\\|\\right)} \\sqrt {d} + 2 d\\right) \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 473, + 825, + 492 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. In offline RL, we denote a general $Q$ network by $Q_{\\theta}(x): \\mathbb{R}^{|S| + |\\mathcal{A}|} \\to \\mathbb{R}$ , where $\\theta$ is all the parameters in the network and $x = (s,a) \\in \\mathbb{R}^{|S| + |\\mathcal{A}|}$ is the brief notation for state-action pair $(s,a)$ . The $Q$ function is trained via minimizing the temporal difference error defined as $\\frac{1}{2}\\sum_{i=1}^{n}(Q_{\\theta}(x_i) - y_i)^2$ by gradient descent, where $y_i = r(x_i) + \\gamma \\mathbb{E}_{a_i' \\sim \\pi(\\cdot | s_i')} [Q_{\\theta'}^\\pi(x_i')] \\in \\mathbb{R}$ is the target value.", + "bbox": [ + 169, + 530, + 823, + 604 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Using kernel method from NTK, $Q$ function can be formulated as $Q_{\\theta}(x) = \\Phi (x)^{T}\\beta$ , where $\\Phi (x)$ is independent of the changes on training labels when NTK assumption holds. This is because as the width of a neural net goes to infinity, the NTK kernel $\\mathbf{K}(x,x^{\\prime}) = < \\Phi (x),\\Phi (x^{\\prime})>$ produced by this network stays constant during training, and so is the property of the feature map $\\Phi (x)$ (Jacot et al., 2018). So, the learning process under NTK framework is actually adjusting $\\beta$ to fit the label rather than $\\Phi (x)$ . As a result, Lemma 2 holds when deep $Q$ function satisfies NTK assumptions. Given two inputs $x$ and $x^{\\prime}$ , the distance between these two inputs is $d = \\| x - x^{\\prime}\\|$ . Based on Lemma 2, it is easy to see that", + "bbox": [ + 169, + 609, + 826, + 722 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| Q _ {\\theta} (x) - Q _ {\\theta} \\left(x ^ {\\prime}\\right) \\right\\| = \\left\\| \\Phi (x) ^ {T} \\beta - \\Phi \\left(x ^ {\\prime}\\right) ^ {T} \\beta \\right\\| \\\\ \\leq \\| \\Phi (x) - \\Phi (x ^ {\\prime}) \\| \\| \\beta \\| _ {\\infty} \\quad (\\text {I n f i n i t y n o r m}) \\\\ \\leq \\| \\beta \\| _ {\\infty} \\left(\\sqrt {\\min \\left(\\| x \\| , \\| x ^ {\\prime} \\|\\right) \\cdot \\| x - x ^ {\\prime} \\|} + 2 \\| x - x ^ {\\prime} \\|\\right) (\\text {L e m m a} 2) \\tag {19} \\\\ = \\| \\beta \\| _ {\\infty} (\\sqrt {\\min (\\| x \\| , \\| x ^ {\\prime} \\|) \\cdot d} + 2 d) \\\\ \\leq C _ {\\beta} \\left(\\sqrt {\\operatorname* {m i n} \\left(\\| x \\| , \\| x ^ {\\prime} \\|\\right) \\cdot d} + 2 d\\right) \\quad \\left(C _ {\\beta} := \\sup \\| \\beta \\| _ {\\infty}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 739, + 823, + 843 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Additionally, if we consider the delayed $Q$ target and delayed actor updates during policy learning, we can assume the target value used for $Q$ evaluation stays relatively stable during each policy evaluation step and the problem can be seen as solving a series of regression problems. Under this mild assumption, we can learn the actual $\\beta_{ntk}$ at each step ( $\\beta \\rightarrow \\beta_{ntk}$ and so $C_{\\beta} \\rightarrow C_1$ , where $C_1 \\coloneqq \\sup \\| \\beta_{ntk} \\|_{\\infty}$ ) and thus complete the proof. Similar assumptions and treatments are also used", + "bbox": [ + 169, + 854, + 826, + 926 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "in Section 4 of (Kumar et al., 2020a) that Q function at each iteration can fit its label well, Appendix A.8 of (Xiao et al., 2021), as well as Appendix F of (Ghasemipour et al.).", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/954b6d160de1bdbd32bae04107cf7708adb2e72ff9baa6acde8219101018a7c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 138, + 823, + 151 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lemma 4 states the value difference of a deep $Q$ function for two inputs is related to the distance between these two inputs. The closer the distance, the smaller the value difference.", + "bbox": [ + 169, + 169, + 823, + 196 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.2.1 PROOF OF THEOREM 1", + "text_level": 1, + "bbox": [ + 171, + 212, + 390, + 226 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Builds on Lemma 4, we can combine the data geometry and analyze the impact of data geometry on deep $Q$ functions.", + "bbox": [ + 169, + 237, + 823, + 266 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. We first review the definition of interpolated data and extrapolated data. Under continuous state-action space, state-action pairs within the convex hull of the dataset can be represented in an interpolated manner (referred as interpolated data $x_{in}$ ):", + "bbox": [ + 169, + 281, + 825, + 324 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nx _ {i n} = \\sum_ {i = 1} ^ {n} \\alpha_ {i} x _ {i}, \\quad \\sum_ {i = 1} ^ {n} \\alpha_ {i} = 1, \\alpha_ {i} \\geq 0 \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 330, + 825, + 371 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Similarly, we can define extrapolated data that lie outside the convex hull of the dataset as $x_{out}$ :", + "bbox": [ + 169, + 383, + 799, + 401 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nx _ {o u t} = \\sum_ {i = 1} ^ {n} \\beta_ {i} x _ {i}, \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 439, + 406, + 825, + 446 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $\\sum_{i=1}^{n} \\beta_{i} = 1$ and $\\beta_{i} \\geq 0$ does not hold simultaneously.", + "bbox": [ + 169, + 453, + 580, + 470 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We define $\\mathrm{Proj}_{\\mathcal{D}}(x) \\coloneqq \\arg \\min_{x_i \\in \\mathcal{D}} \\| x - x_i \\|$ as a projector that projects unseen data $x$ to its nearest data in dataset $\\mathcal{D}$ . Given an interpolated data $x_{in}$ and an extrapolated data $x_{out}$ , the distances to their nearest data in dataset are $d_{x_{in}} = \\| x_{in} - \\mathrm{Proj}_{\\mathcal{D}}(x_{in})\\|$ and $d_{x_{out}} = \\| x_{out} - \\mathrm{Proj}_{\\mathcal{D}}(x_{out})\\|$ . Because interpolated data lie inside the convex hull of training data, $d_{x_{in}} \\leq \\max_{x_i \\in \\mathcal{D}} \\| x_{in} - x_i\\| \\leq B$ is bounded, where $B \\coloneqq \\max_{x_i, x_j \\in \\mathcal{D}} \\| x_i - x_j\\|$ is a finite constant. Then, by applying Lemma 4, the value difference of deep $Q$ function for interpolated and extrapolated data can be formulated as the following shows.", + "bbox": [ + 169, + 474, + 823, + 571 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| Q _ {\\theta} \\left(x _ {i n}\\right) - Q _ {\\theta} \\left(\\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right)\\right) \\right\\| \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x _ {i n} \\right\\| , \\left\\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right) \\right\\|\\right)} \\sqrt {d _ {x i n}} + 2 d _ {x i n}\\right) (22) \\\\ \\leq C _ {1} (\\sqrt {\\min (\\| x _ {i n} \\| , \\| \\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n}) \\|)} \\sqrt {B} + 2 B) \\\\ \\left\\| Q _ {\\theta} \\left(x _ {o u t}\\right) - Q _ {\\theta} \\left(\\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {o u t}\\right) \\right\\Vert\\right) \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x _ {o u t} \\right\\| , \\left\\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {o u t}\\right) \\right\\|\\right)} \\sqrt {d _ {x _ {o u t}}} + 2 d _ {x _ {o u t}}\\right) (23) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 580, + 825, + 648 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/49b4837e3942e6b96296cee735f0ef3adf6a25c49272a1dfbe78e7965f274437.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 651, + 825, + 662 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.3 QUANTITATIVE EXPERIMENTS ON THEOREM 1", + "text_level": 1, + "bbox": [ + 171, + 681, + 540, + 696 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In addition to the one-dimensional random walk experiments presented in Section 2.2, we conduct additional experiments on the more complex and high-dimensional MuJoCo tasks (including D4RL Hopper-medium-v2, Halfcheetah-medium-v2, and Walker2d-medium-v2) to provide quantitative support to Theorem 1, in particular, the pertinence of interpolation and extrapolation. We first synthesize lots of interpolated data $x_{in}$ and extrapolated data $x_{out}$ ( $x = (s,a) \\in S \\times \\mathcal{A}$ ) and then search for their nearest data points in offline dataset $\\mathcal{D}$ accordingly, i.e., $\\mathrm{Proj}_{\\mathcal{D}}(x_{in})$ and $\\mathrm{Proj}_{\\mathcal{D}}(x_{out})$ . Then, we can evaluate the Q-value differences $\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x)) \\|$ (LHS of Theorem 1) at these generated data and see whether the Q-value differences align well with Theorem 1.", + "bbox": [ + 169, + 708, + 826, + 820 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For the detailed experiment setup, recall that an interpolated data point $x_{in}$ is a convex combination of the offline dataset, i.e., $x_{in} = \\sum_{i=1}^{n} \\alpha_i x_i$ , $x_i \\sim \\mathcal{D}$ with weights $\\alpha_i$ that satisfy $\\sum_{i=1}^{n} \\alpha_i = 1$ , $\\alpha_i \\geq 0$ . Therefore, we can interpolate the offline dataset based on $\\alpha_i$ sampled from the Dirichlet distribution to generate the interpolated data. Also, an extrapolated data point $x_{out}$ is expressed as a weighted sum of the offline dataset, i.e., $x_{out} = \\sum_{i=1}^{n} \\beta_i x_i$ , $x_i \\sim \\mathcal{D}$ , but its weights $\\beta_i$ do not satisfy the non-negativity and the summing to 1 constraint. Therefore, we can generate extrapolated data by setting the sign of some weights to negative values and varying the weights not summing to", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "1. After obtaining the interpolated and extrapolated data, we search for their closest data points in the offline dataset $\\mathcal{D}$ and calculate their corresponding distance $\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|$ and Q-value difference $\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|$ . Figure 7a shows the relationship between the distance to dataset $\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|$ and the Q value difference $\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|$ (LHS of Theorem 1). We also report the learned state-conditioned distance value $g(s,a)$ on these generated data in Figure 7b.", + "bbox": [ + 169, + 103, + 826, + 176 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/eeb3ecf239a7fddf6fea693422bd624507d740ee5eb0e0d9121775734a4a727b.jpg", + "image_caption": [ + "(a) Relationship between $\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|$ and $\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|$ ." + ], + "image_footnote": [], + "bbox": [ + 173, + 191, + 390, + 342 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/eab23ce40935c956c124f56f40c880ba2fbc3898c3859eb796b8a7a5982dc1f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 392, + 195, + 602, + 340 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/09526943a1758135a2ffa575529a71311d5357db9ec3ab1385b0eced0bbbc036.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 196, + 818, + 340 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/0beaed88dd5d90733469244a12c970450bed7aec876c42609af9dd8827c9385e.jpg", + "image_caption": [ + "(b) Relationship between $\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|$ and $g(x)$" + ], + "image_footnote": [], + "bbox": [ + 176, + 375, + 390, + 523 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/30752cd9b63981eab4965437fe946e333de3d07684b283e7ae370bce67f9ed57.jpg", + "image_caption": [ + "Figure 7: Quantitative experiments of Theorem 1 on the D4RL MuJoCo-medium datasets. The red star-shaped dots are the interpolated data and the circle dots are the extrapolated data. The color of the dots represents $\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|$ values in (a) and $g(x)$ values in (b), respectively. The darker the color, the smaller the corresponding value. In (a), the yellow dash line is the empirical upper bound of $\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|$ ." + ], + "image_footnote": [], + "bbox": [ + 392, + 375, + 604, + 523 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/52e5bf46ea9c1b5f3543bb8d123882bb81995122161d46e7eb5fb6e383d8a770.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 375, + 818, + 523 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figure 7a demonstrates that the interpolated data enjoy a tighter empirical upper bound of $\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x)) \\|$ (LHS of Theorem 1) than most of the extrapolated data. Moreover, the empirical upper bound of the Q-value difference grows with the increase of the sample-to-dataset distance $\\| x - \\mathrm{Proj}_{\\mathcal{D}(x)} \\|$ , which is consistent with Theorem 1 (the upper bound of value difference of deep Q function is well controlled by distance to the dataset). Figure 7b shows that the state-conditioned distance function $g(s, a)$ can output low values for interpolated data and some near-dataset extrapolated data, and thus can be used as a relaxed policy constraint in these OOD regions.", + "bbox": [ + 169, + 642, + 826, + 742 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C STATE-CONDITIONED DISTANCE FUNCTION", + "text_level": 1, + "bbox": [ + 171, + 767, + 576, + 782 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.1 PROOF OF PROPERTY 1", + "text_level": 1, + "bbox": [ + 171, + 801, + 377, + 816 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Proof. Given a state-action pair from the training data $(s, a) \\sim \\mathcal{D}$ , we synthetic random noise actions from a uniform distribution over the action space, i.e. $\\hat{a} \\sim \\text{Unif}(\\mathcal{A})$ . Then the distance function $g(\\cdot)$ is trained by Eq. (24).", + "bbox": [ + 169, + 830, + 825, + 875 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {g} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} \\left[ \\| \\hat {a} - a \\| - g (s, \\hat {a}) \\right] ^ {2} \\right] \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 898, + 825, + 929 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$\\left[\\| \\hat{a} - a\\| - g(s, \\hat{a})\\right]^2$ can be upper bounded by some finite constants because $S \\times \\mathcal{A}$ is compact in our analysis. The optimization problem in Eq. (24) can be reformulated as the following form according to the Fubini's Theorem.", + "bbox": [ + 169, + 103, + 823, + 146 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {g} \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} \\left[ \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\| \\hat {a} - a \\| - g (s, \\hat {a}) \\right] ^ {2} \\right] \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 162, + 825, + 191 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Note that the objective of Eq. (25) can be also written as a functional $J[g(s, \\hat{a})]$ with respect to function $g$ in the following form:", + "bbox": [ + 169, + 200, + 823, + 231 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nJ [ g (s, \\hat {a}) ] = \\int_ {\\mathcal {A}} \\frac {1}{| \\mathcal {A} |} \\left[ \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} [ \\| \\hat {a} - a \\| - g (s, \\hat {a}) ] ^ {2} \\right] \\mathrm {d} \\hat {a} = \\int_ {\\mathcal {A}} F (s, \\hat {a}, g (s, \\hat {a})) \\mathrm {d} \\hat {a} \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 236, + 825, + 268 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Based on calculus of variation, the extrema (maxima or minima) of functional $J[g(s,\\hat{a})]$ can be obtained by solving the associated Euler-Langrane equation $(\\partial F / \\partial g = 0)$ . In our case, it requires the optimal state-conditioned distance function $g^{*}$ satisfies the following conditions:", + "bbox": [ + 169, + 281, + 823, + 325 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {\\partial}{\\partial g ^ {*}} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} [ \\| \\hat {a} - a \\| - g ^ {*} (s, \\hat {a}) ] ^ {2} = 0 \\\\ \\Rightarrow \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\frac {\\partial}{\\partial g ^ {*}} [ \\| \\hat {a} - a \\| - g ^ {*} (s, \\hat {a}) ] ^ {2} \\right] = 0 (\\text {D N N i s c o n t i n u o u s}) \\tag {27} \\\\ \\Rightarrow \\quad \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\left\\| \\hat {a} - a \\right\\| - g ^ {*} (s, \\hat {a}) \\right] = 0 \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 340, + 825, + 426 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Conditioned on a state $s \\in \\mathcal{D}$ , the optimal state-conditioned distance function in Eq. (27) satisfies the following conditions:", + "bbox": [ + 169, + 436, + 823, + 465 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\int_ {\\mathcal {A}} \\| \\hat {a} - a \\| \\mu (s, a) \\mathrm {d} a - \\int_ {\\mathcal {A}} \\mu (s, a) \\mathrm {d} a g ^ {*} (s, \\hat {a}) = 0, s \\in \\mathcal {D} \\\\ \\Rightarrow g ^ {*} (s, \\hat {a}) = \\frac {\\int_ {\\mathcal {A}} \\| \\hat {a} - a \\| \\mu (s , a) \\mathrm {d} a}{\\int_ {\\mathcal {A}} \\mu (s , a) \\mathrm {d} a}, s \\in \\mathcal {D} \\tag {28} \\\\ \\Rightarrow g ^ {*} (s, \\hat {a}) = \\int_ {\\mathcal {A}} C (s, a) \\| \\hat {a} - a \\| \\mathrm {d} a, s \\in \\mathcal {D} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 470, + 825, + 575 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where, $\\mu(s,a)$ is the empirical distribution on a finite offline dataset $\\mathcal{D} = \\{(x_i)\\}_{i=1}^n$ , i.e., the sum of the Dirac measures $\\frac{1}{n}\\sum_{i=1}^{n}\\delta_{x_i}$ . $\\forall (s,a) \\notin \\mathcal{D}, \\mu(s,a) = 0. \\forall (s,a) \\in \\mathcal{D}, \\mu(s,a) > 0$ . $C(s,a) = \\frac{\\mu(s,a)}{\\int_A\\mu(s,a)\\mathrm{d}a} \\geq 0$ and $\\int_A C(s,a)\\mathrm{d}a = 1$ . Because $L_2$ -norm is convex and the non-negative combination of convex functions is still convex, $g^*(s,\\hat{a})$ is a convex function w.r.t. $\\hat{a}$ . In addition, $\\forall \\hat{a} \\in \\mathcal{A}$ , by the Jensen inequality, we have:", + "bbox": [ + 169, + 588, + 826, + 669 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\ng ^ {*} (s, \\hat {a}) \\geq \\left\\| \\hat {a} - \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) a ] \\right\\| = \\| \\hat {a} - a _ {o} (s) \\|, s \\in \\mathcal {D} \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 674, + 825, + 694 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where $a_{o}(s)\\coloneqq \\mathbb{E}_{a\\sim Unif(\\mathcal{A})}[C(s,a)a], s\\in \\mathcal{D}$ is the state-conditioned centroid of training dataset.", + "bbox": [ + 169, + 705, + 815, + 723 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/d73f8aa0feb0845b0c7bc820ddc4d1e4bcfe623ce3bb9813ccf118a16fc014c2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 728, + 825, + 741 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C.2 PROOF OF PROPERTY 2", + "text_level": 1, + "bbox": [ + 171, + 758, + 379, + 772 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Proof. The negative gradient of the optimal state-conditioned distance function can be formulated as:", + "bbox": [ + 169, + 785, + 826, + 801 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} - \\nabla_ {\\hat {a}} g ^ {*} (s, \\hat {a}) = - \\int_ {\\mathcal {A}} C (s, a) \\frac {\\hat {a} - a}{\\| \\hat {a} - a \\|} \\mathrm {d} a, \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\tag {30} \\\\ = \\frac {1}{\\int_ {\\mathcal {A}} \\mu (s , a) \\mathrm {d} a} \\int_ {\\mathcal {A}} \\mu (s, a) \\frac {- (\\hat {a} - a)}{\\| \\hat {a} - a \\|} \\mathrm {d} a, \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 816, + 825, + 885 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Observe that the direction of the negative gradient of $g^{*}(s,\\hat{a})$ is related to the integral of vector $-(\\hat{a} - a)$ (points towards $a$ ). When $(s,a)\\notin \\mathcal{D}, - (\\hat{a} - a)$ doesn't influence the final gradient because", + "bbox": [ + 171, + 895, + 825, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "$\\mu(s, a) = 0$ . Therefore, $-(\\hat{a} - a)$ only contribute to the final gradient of $g^{*}(s, \\hat{a})$ for $(s, a) \\in \\mathcal{D}$ as $\\mu(s, a) > 0$ . For a given $s \\in \\mathcal{D}$ and any extrapolated action $\\hat{a}$ that lies outside the convex hull of training data, the integral of vector $-( \\hat{a} - a)$ is basically a non-negative combination of vectors $-( \\hat{a} - a)$ that point toward actions $a \\in \\mathcal{D}$ inside the convex hull. As a result, it's easy to see that $-\\nabla_{\\hat{a}} g^{*}(s, \\hat{a})$ also points inside the convex hull formed by the data.", + "bbox": [ + 169, + 103, + 826, + 176 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/3a7448145dae4f9942d834fdc119dc02af699d7ba16aa91551155dadcaf152e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 181, + 823, + 193 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D THEORETICAL ANALYSIS OF DOGE", + "text_level": 1, + "bbox": [ + 171, + 215, + 511, + 231 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In this section, we analyze the performance of the policy learned by DOGE. We first adopt the Bellman-consistent coefficient from (Xie et al., 2021a) to quantify the distributional shift from the perspective of deep $Q$ functions generalization. Then, we gives the upper bound of the Bellman-consistent coefficient under the NTK regime (Appendix D.1). At last, we give the performance bound of DOGE (Appendix D.2).", + "bbox": [ + 169, + 247, + 826, + 319 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D.1 UPPER BOUND OF BELLMAN-CONSISTENT COEFFICIENT", + "text_level": 1, + "bbox": [ + 171, + 335, + 609, + 349 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Let us first review the definition of Bellman-consistent coefficient $\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)$ in (Xie et al., 2021a). We define $\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)$ to measure the distributional shift from an arbitrary distribution $v$ to data distribution $\\mu$ , w.r.t. $\\mathcal{F}$ and $\\pi$ . $\\mathcal{F}$ is the function class of $Q$ networks.", + "bbox": [ + 169, + 361, + 826, + 405 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) := \\sup _ {Q \\in \\mathcal {F}} \\frac {\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , v} ^ {2}}{\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , \\mu} ^ {2}} \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 421, + 825, + 459 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where the $\\mu$ -weighted norm (square) is defined as $\\|f\\|_{2,\\mu}^2 \\coloneqq \\mathbb{E}_{\\mu}[\\|f\\|^2]$ , which is also applicable for any distribution $v$ . $\\mathcal{T}^{\\pi}Q$ is the Bellman operator of policy $\\pi$ , defined as $\\mathcal{T}^{\\pi}Q(s,a) \\coloneqq r(s,a) + \\gamma \\mathbb{E}_{a' \\sim \\pi(\\cdot|s'), s' \\sim \\mathcal{P}(\\cdot|s,a)}[Q(s',a')] \\coloneqq r(s,a) + \\gamma \\mathbb{P}^{\\pi}[Q(s',a')]$ . $\\mathbb{P}^{\\pi}[\\cdot]$ is the brief notation for $\\mathbb{E}_{a' \\sim \\pi(\\cdot|s'), s' \\sim \\mathcal{P}(\\cdot|s,a)}[\\cdot]$ . The smaller the ratio of the Bellman error under $v$ and $\\mu$ , the more transferable the $Q$ function from $\\mu$ to $v$ , even when $\\sup_{(s,a)} \\frac{v(s,a)}{\\mu(s,a)} = \\infty$ . Then we give the proof of Theorem 2 (Upper bound of Bellman-consistent coefficient).", + "bbox": [ + 169, + 470, + 826, + 566 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Proof. We denote $x = (s, a)$ and $x' = (s', a')$ . $x_o = \\mathbb{E}_{x \\sim \\mathcal{D}}[x]$ is the centroid of offline dataset. $d_1 = \\| x - x_o \\|$ and $d_2 = \\| x' - x_o \\|$ are the sample-to-centroid distances. Let $\\mu(x)$ be the distribution under the offline dataset and $v(x)$ be any distribution. Then, for the numerator in Eq. (8) and Eq. (31), we have the following inequalities.", + "bbox": [ + 169, + 580, + 828, + 638 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, v} ^ {2} \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\| Q (x) - r (x) - \\gamma \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| ^ {2} \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\| Q (x) - \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] - r (x) + (1 - \\gamma) \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\| Q (x) - \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| + \\| r (x) \\| + \\| (1 - \\gamma) \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| \\right] ^ {2} (\\text {T r i a n g l e}) \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\| Q (x) - Q \\left(x _ {o}\\right) + Q \\left(x _ {o}\\right) - \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] \\| + \\| r (x) \\| + (1 - \\gamma) \\| \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] - Q \\left(x _ {o}\\right) + Q \\left(x _ {o}\\right) \\| \\right] ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ (1 - \\gamma) \\| Q \\left(x _ {o}\\right) \\| + \\| r (x) \\| + \\| Q (x) - Q \\left(x _ {o}\\right) \\| + (2 - \\gamma) \\| \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] - Q \\left(x _ {o}\\right) \\| \\right] ^ {2} (\\text {T r i a n g l e}) \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\underbrace {(1 - \\gamma) \\| Q \\left(x _ {o}\\right) \\| + \\| r (x) \\|} _ {\\mathcal {I} _ {1}} + \\underbrace {\\| Q (x) - Q \\left(x _ {o}\\right) \\|} _ {\\mathcal {I} _ {2}} + \\underbrace {(2 - \\gamma) \\mathbb {P} ^ {\\pi} [ \\| Q \\left(x ^ {\\prime}\\right) - Q \\left(x _ {o}\\right) \\| ]} _ {\\mathcal {I} _ {3}} \\right] ^ {2} (\\text {J e n s e n}) \\tag {32} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 646, + 846, + 888 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The RHS contains three parts: $\\mathcal{I}_1 = (1 - \\gamma)\\| Q(x_o)\\| +\\| r(x)\\|$ , $\\mathcal{I}_2 = \\| Q(x) - Q(x_o)\\|$ and $\\mathcal{I}_3 = (2 - \\gamma)\\mathbb{P}^\\pi [\\| Q(x') - Q(x_o)\\| ]$ . Because $\\| r(x)\\| \\in [0,R_{\\max}],\\forall x\\in S\\times \\mathcal{A},\\mathcal{I}_1$ can be upper", + "bbox": [ + 169, + 893, + 826, + 926 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "bounded as:", + "bbox": [ + 171, + 104, + 256, + 118 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {I} _ {1} \\leq (1 - \\gamma) Q \\left(x _ {o}\\right) + R _ {\\max } \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 119, + 826, + 136 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "By applying Lemma 4, $\\mathcal{I}_2$ is upper bounded as", + "bbox": [ + 171, + 148, + 483, + 165 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {I} _ {2} \\leq C _ {1} \\left[ \\sqrt {\\min (\\| x \\| , \\| x _ {o} \\|) d _ {1}} + 2 d _ {1} \\right] \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 184, + 826, + 210 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "$\\mathcal{I}_3$ is upper bounded as", + "bbox": [ + 169, + 223, + 328, + 238 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {I} _ {3} \\leq C _ {1} (2 - \\gamma) \\mathbb {P} ^ {\\pi} \\left[ \\sqrt {\\min (\\| x ^ {\\prime} \\| , \\| x _ {o} \\|) d _ {2}} + 2 d _ {2} \\right] \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 257, + 825, + 284 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In addition, we denote $C_2 \\coloneqq \\sqrt{\\sup_{x \\in S \\times A} \\|x\\|}$ . Then, $\\mathcal{I}_2$ and $\\mathcal{I}_3$ can be further upper bounded by", + "bbox": [ + 169, + 297, + 818, + 316 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {I} _ {2} \\leq C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right) \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 334, + 825, + 359 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {I} _ {3} \\leq (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right) \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 382, + 825, + 401 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The above relaxation of the upper bound in Eq. (36) and Eq. (37) is not necessary, but for notation brevity, we choose to relax the upper bound by treating $C_2 \\coloneqq \\sqrt{\\sup_{x \\in S \\times A} \\| x \\|}$ .", + "bbox": [ + 169, + 414, + 823, + 446 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Plug Eq. (33), Eq. (36) and Eq. (37) into the RHS of Eq. (32), we can get", + "bbox": [ + 169, + 450, + 658, + 465 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| Q - \\mathcal {T} ^ {\\pi} Q \\right\\| _ {2, v} ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ (1 - \\gamma) Q (x _ {o}) + R _ {\\max } + C _ {1} (C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}) + (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} (C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}) \\right] ^ {2} \\\\ = \\left\\| (1 - \\gamma) Q \\left(s _ {o}, a _ {o}\\right) + R _ {\\max } + C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right) + (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right) \\right\\| _ {2, v} ^ {2} \\tag {38} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 473, + 823, + 573 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For the denominator $\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2}$ in Eq. (8) and Eq. (31), because the $Q$ function is approximated, there exists approximation error between $Q$ and $\\mathcal{T}^{\\pi}Q$ , i.e., $Q - \\mathcal{T}^{\\pi}Q \\geq \\epsilon$ . In addition, the distribution $\\mu$ contains some mismatch w.r.t. the equilibrium distribution induced by policy $\\pi$ . Therefore, it is reasonable to assume $\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2} \\geq \\epsilon_{\\mu} > 0$ .", + "bbox": [ + 169, + 579, + 826, + 638 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Then, we can complete the proof by plugging the upper bound in Eq. (38) and $\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2}\\geq$ $\\epsilon_{\\mu} > 0$ into Eq. (8) or Eq. (31).", + "bbox": [ + 169, + 643, + 823, + 675 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) \\leq \\frac {1}{\\epsilon_ {\\mu}} \\left\\| \\underbrace {(1 - \\gamma) Q \\left(s _ {o} , a _ {o}\\right) + R _ {\\max }} _ {\\mathcal {B} _ {1}} + \\underbrace {C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right)} _ {\\mathcal {B} _ {2}} + \\underbrace {(2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right)} _ {\\mathcal {B} _ {3}} \\right\\| _ {2, v} ^ {2} \\tag {39}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 683, + 831, + 770 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To be mentioned, the distance regularization in DOGE compels the leaned policy to output the action that near the state-conditioned centroid of dataset and thus $\\mathcal{B}_2$ and $\\mathcal{B}_3$ can be driven to some small values. $\\mathcal{B}_1$ is independent on the distributional shift. Therefore, $\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)$ can be bounded by some finite constants under DOGE. Therefore, the constrained policy set induced by DOGE is essentially a Bellman-consistent constrained policy set $\\Pi_{\\mathcal{B}}$ defined in Definition 2. In addition, other policy constraint methods such as BEAR (Kumar et al., 2019) can also have bounded $\\mathcal{B}$ . However, these policy constraint methods do not allow the learned policy shifts to those generalizable distributions where $\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)$ is small but $\\sup_{(s,a)}\\frac{v(s,a)}{\\mu(s,a)}\\to \\infty$ , which is essentially different with DOGE.", + "bbox": [ + 169, + 792, + 826, + 924 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D.2 PERFORMANCE OF THE POLICY LEARNED BY DOGE", + "text_level": 1, + "bbox": [ + 171, + 103, + 584, + 118 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Here, we briefly review the definition of the Bellman-consistent constrained policy set $\\Pi_{\\mathcal{B}}$ defined in Definition 2. The Bellman-consistent coefficient under the transition induced by $\\Pi_{\\mathcal{B}}$ can be bounded by some finite constants $l(k)$ :", + "bbox": [ + 169, + 128, + 823, + 174 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {B} \\left(\\rho_ {k}, \\mu , \\mathcal {F}, \\pi\\right) \\leq l (k) \\tag {40}\n$$\n", + "text_format": "latex", + "bbox": [ + 423, + 191, + 825, + 209 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where, $\\rho_0$ is the initial state-action distribution and $\\mu$ is the distribution of training data. $\\rho_{k} = \\rho_{0}P^{\\pi_{1}}P^{\\pi_{2}}\\ldots P^{\\pi_{k}},\\forall \\pi_{1},\\pi_{2},\\ldots ,\\pi_{k}\\in \\Pi_{\\mathcal{B}}$ and $P^{\\pi_i}$ is the transition operator on states induced by $\\pi_{i}$ , i.e., $P^{\\pi_i}(s',a'|s,a) = \\mathcal{P}(s'|s,a)\\pi_i(a'|s')$ .", + "bbox": [ + 169, + 215, + 826, + 260 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We denote the constrained Bellman operator induced by $\\Pi_{\\mathcal{B}}$ as $\\mathcal{T}^{\\Pi_{\\mathcal{B}}}$ , and $\\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q(s,a) := r(s,a) + \\max_{\\pi \\in \\Pi_{\\mathcal{B}}} \\gamma \\mathbb{P}^{\\pi}[Q(s',a')]$ . $\\mathcal{T}^{\\Pi_{\\mathcal{B}}}$ can be seen as a operator in a redefined MDP and hence is a contraction mapping and exists a fixed point. We denote $Q^{\\Pi_{\\mathcal{B}}}$ as the fixed point of $\\mathcal{T}^{\\Pi_{\\mathcal{B}}}$ , i.e., $Q^{\\Pi_{\\mathcal{B}}} = \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}}$ .", + "bbox": [ + 169, + 263, + 826, + 321 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The Bellman optimal operator $\\mathcal{T}$ is", + "bbox": [ + 171, + 328, + 406, + 343 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} Q (s, a) := r (s, a) + \\max _ {\\pi} \\gamma \\mathbb {P} ^ {\\pi} [ Q \\left(s ^ {\\prime}, a ^ {\\prime}\\right) ] \\tag {41}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 344, + 825, + 367 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "$\\mathcal{T}$ is also a contraction mapping. Its fixed point is the optimal value function $Q^{*}$ and $Q^{*} = \\mathcal{T}Q^{*}$ .", + "bbox": [ + 169, + 376, + 808, + 393 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Then, by the triangle inequality, we have:", + "bbox": [ + 169, + 398, + 447, + 412 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| Q ^ {*} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}} = \\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} + Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}} \\\\ \\leq \\underbrace {\\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\rho_ {0}}} _ {L _ {1}} + \\underbrace {\\left\\| Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}}} _ {L _ {2}} \\tag {42} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 429, + 825, + 487 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where $Q^{\\pi_n}$ is the true $Q$ value of policy $\\pi_{n}$ . $\\pi_{n}$ is the greedy policy w.r.t. to $Q_{n}$ in the Bellman-consistent constrained policy set $\\Pi_{\\mathcal{B}}$ , i.e., $\\pi_{n} = \\sup_{\\pi \\in \\Pi_{\\mathcal{B}}}\\mathbb{E}_{a\\sim \\pi (\\cdot |s)}[Q_{n}(s,a)]$ . $Q_{n}$ is the $Q$ function after $n$ -th value iteration under the constrained Bellman operator $\\mathcal{T}^{\\Pi_{\\mathcal{B}}}$ .", + "bbox": [ + 169, + 494, + 826, + 540 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "For $L_{1}$ part in Eq. (42), we first focus on the infinity norm.", + "bbox": [ + 169, + 546, + 558, + 561 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\infty} = \\left\\| \\mathcal {T} Q ^ {*} - \\mathcal {T} ^ {\\Pi_ {\\mathcal {B}}} Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\infty} \\\\ \\leq \\left\\| T Q ^ {*} - T ^ {\\Pi_ {S}} Q ^ {\\Pi_ {S}} \\right\\| _ {\\infty} + \\left\\| T ^ {\\Pi_ {S}} Q ^ {\\Pi_ {S}} - T ^ {\\Pi_ {S}} Q ^ {*} \\right\\| _ {\\infty} \\\\ \\leq \\left\\| \\mathcal {T} Q ^ {*} - \\mathcal {T} ^ {\\Pi_ {B}} Q ^ {\\Pi_ {B}} \\right\\| _ {\\infty} + \\gamma \\left\\| Q ^ {*} - Q ^ {\\Pi_ {B}} \\right\\| _ {\\infty} \\quad \\left(\\mathcal {T} ^ {\\Pi_ {B}} \\text {i s} \\gamma - \\text {c o n t r a c t i o n}\\right) \\tag {43} \\\\ = \\alpha (\\Pi_ {\\mathcal {B}}) + \\gamma \\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\| _ {\\infty} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 580, + 823, + 661 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where $\\alpha (\\Pi_{\\mathcal{B}})\\coloneqq \\| \\mathcal{T}Q^{*} - \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}$ is the suboptimality constant. Then, we get $\\| Q^{*} - Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}\\leq$ $\\frac{\\alpha(\\Pi_{\\mathcal{B}})}{1 - \\gamma}$ and $L_{1}\\leq \\| Q^{*} - Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}\\leq \\frac{\\alpha(\\Pi_{\\mathcal{B}})}{1 - \\gamma}$", + "bbox": [ + 169, + 672, + 823, + 709 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "For $L_{2}$ , we introduce Lemma 5, which upper bounds $\\| Q^{\\Pi_{\\mathcal{B}}} - Q^{\\pi_n} \\|_{2,\\rho_0}^2$ . The proof of Lemma 5 can be get by directly replacing $Q^{*}$ with $Q^{\\Pi_{\\mathcal{B}}}$ in the Appendix F.3. In (Le et al., 2019), because $Q^{\\Pi_{\\mathcal{B}}}$ is the optimal value function under the modified MDP induced by $\\mathcal{T}^{\\Pi_{\\mathcal{B}}}$ .", + "bbox": [ + 169, + 715, + 823, + 761 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Lemma 5. (Upper bound of error propagation). $\\| Q^{\\Pi_{\\mathcal{B}}} - Q^{\\pi_n}\\|_{2,\\rho_0}^2$ can be upper bounded as", + "bbox": [ + 169, + 762, + 790, + 780 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {2, \\rho_ {0}} ^ {2} \\leq \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} (d s, d a) \\left[ \\sum_ {k = 0} ^ {n - 1} \\alpha_ {k} A _ {k} \\epsilon_ {k} ^ {2} + \\alpha_ {n} A _ {n} \\left(Q ^ {\\Pi_ {\\mathcal {B}}} - Q _ {0}\\right) ^ {2} \\right] (s, a) \\tag {44}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 782, + 825, + 832 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 171, + 832, + 217, + 844 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\epsilon_ {k} = Q _ {k + 1} - \\mathcal {T} ^ {\\Pi_ {B}} Q _ {k} \\tag {45}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 842, + 825, + 859 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {k} = \\frac {(1 - \\gamma) \\gamma^ {n - k - 1}}{1 - \\gamma^ {n + 1}} \\text {f o r} k < n \\tag {46}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 861, + 825, + 900 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {n} = \\frac {(1 - \\gamma) \\gamma^ {n}}{1 - \\gamma^ {n + 1}}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 895, + 501, + 928 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\nA _ {k} = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} \\left(P ^ {\\pi_ {n}}\\right) ^ {m} \\left[ \\left(P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}\\right) ^ {n - k} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {k + 1}} \\right] \\quad \\text {f o r} k < n \\tag {47}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 99, + 825, + 145 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\nA _ {n} = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} (P ^ {\\pi_ {n}}) ^ {m} \\left[ (P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}) ^ {n + 1} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {0}} \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 141, + 663, + 176 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "$Q_{0}$ is the $Q$ function after initialization. Note that $\\lim_{n\\to \\infty}\\left[\\alpha_nA_n(Q^{\\Pi_{\\mathcal{B}}} - Q_0)^2\\right] = 0$ , we leave out this term for analysis simplicity. In addition, each $A_{k}$ is a probability kernel that combine $P^{\\pi_i}$ and $P^{\\pi_{\\mathcal{B}}^{\\Pi}}$ (the transition operator on states induced by the constrained optimal policy $\\pi^{\\Pi_{\\mathcal{B}}}\\in \\Pi_{\\mathcal{B}}$ ) and $\\sum_{k}a_{k} = 1$ .", + "bbox": [ + 169, + 193, + 826, + 255 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The key part in Eq. (44) is $\\int_{\\mathcal{S} \\times \\mathcal{A}} \\rho_0 A_k \\epsilon_k^2$ and we expand this term as the following shows.", + "bbox": [ + 169, + 260, + 764, + 279 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} A _ {k} \\epsilon_ {k} ^ {2} = \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\frac {1 - \\gamma}{2} \\rho_ {0} \\sum_ {m \\geq 0} \\gamma^ {m} (P ^ {\\pi_ {n}}) ^ {m} \\left[ (P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}) ^ {n - k} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}}... P ^ {\\pi_ {k + 1}} \\right] \\epsilon_ {k} ^ {2} \\\\ = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\left[ \\left(P ^ {\\pi_ {n}}\\right) ^ {m} \\left(P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}\\right) ^ {n - k} + \\left(P ^ {\\pi_ {n}}\\right) ^ {m} P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {k + 1}} \\right] \\rho_ {0} \\epsilon_ {k} ^ {2} \\tag {48} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 282, + 823, + 373 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "As Eq. (40) shows, the policy set induced by DOGE is a Bellman-consistent constrained policy set $\\Pi_{\\mathcal{B}}$ defined in Definition 2. Therefore, let $\\rho_0$ be the initial state-action distribution and $\\mu$ denote the distribution of training data. For any policy $\\pi_1,\\pi_2,\\dots,\\pi_k\\in \\Pi_{\\mathcal{B}}$ , the distribution after $k$ -th Bellman-consistent iteration is $\\rho_{k} = \\rho_{0}P^{\\pi_{1}}P^{\\pi_{2}}\\ldots P^{\\pi_{k}}$ , there exists some finite constants $l(k)$ , that $\\mathcal{B}(\\rho_k,\\mu ,\\mathcal{F},\\pi)\\leq l(k)$ holds. Then we can get the following inequalities.", + "bbox": [ + 169, + 378, + 826, + 450 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, \\rho_ {k}} ^ {2} \\leq \\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, \\mu} ^ {2} l (k)\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 465, + 584, + 486 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {k} \\epsilon^ {2} \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\mu \\epsilon^ {2} l (k) \\quad (\\epsilon = Q - \\mathcal {T} ^ {\\pi} Q) \\tag {49}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 486, + 823, + 520 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "As a result, by applying the result of Eq. (49) to Eq. (48), we can get", + "bbox": [ + 169, + 527, + 627, + 544 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} A _ {k} \\epsilon_ {k} ^ {2} \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} (1 - \\gamma) \\sum_ {m \\geq 0} \\gamma^ {m} \\epsilon_ {k} ^ {2} \\mu l (m + n - k) \\tag {50}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 546, + 825, + 584 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Plugs Eq. (50) into Eq. (44) and leaves out $\\left[\\alpha_{n}A_{n}(Q^{\\Pi_{\\mathcal{B}}} - Q_{0})^{2}\\right]$ in Eq. (44), we get", + "bbox": [ + 169, + 595, + 733, + 614 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\lim _ {n \\to \\infty} L _ {2} ^ {2} \\leq \\lim _ {n \\to \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\sum_ {k = 0} ^ {n - 1} (1 - \\gamma) \\sum_ {m \\geq 0} \\gamma^ {m} l (m + n - k) \\alpha_ {k} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ = \\lim _ {n \\to \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\frac {1}{1 - \\gamma^ {n + 1}} \\sum_ {k = 0} ^ {n - 1} (1 - \\gamma) ^ {2} \\sum_ {m \\geq 0} \\gamma^ {m + n - k - 1} l (m + n - k) \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ \\leq \\lim _ {n \\rightarrow \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\frac {1}{1 - \\gamma^ {n + 1}} L (\\Pi_ {\\mathcal {B}}) ^ {2} \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ = \\left[ \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\right] ^ {2} L \\left(\\Pi_ {\\mathcal {B}}\\right) ^ {2} \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\tag {51} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 633, + 831, + 824 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where, $L(\\Pi_{\\mathcal{B}}) = \\sqrt{(1 - \\gamma)^2\\sum_{k=1}^{\\infty}k\\gamma^{k-1}l(k)}$ . Then, we can bound $L_2$ by", + "bbox": [ + 169, + 830, + 666, + 848 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {n \\rightarrow \\infty} L _ {2} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} \\tag {52}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 851, + 825, + 883 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "With the upper bound of $L_{1}$ and $\\lim_{n\\to \\infty}L_2$ , we can complete the proof by adding these two terms together.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {n \\rightarrow \\infty} \\| Q ^ {*} - Q ^ {\\pi_ {n}} \\| _ {\\rho_ {0}} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\left[ L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} + \\frac {1 - \\gamma}{2 \\gamma} \\alpha \\left(\\Pi_ {\\mathcal {B}}\\right)\\right] \\tag {53}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 113, + 825, + 150 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "E IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 165, + 439, + 181 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "DOGE can build on top of standard online actor-critic algorithms such as TD3(Fujimoto et al., 2018) and SAC(Haarnoja et al., 2018). We choose TD3 as our base because of its simplicity compared to other methods. We build DOGE on top of TD3 by simply plugging the state-conditioned distance function as a policy regularization term during policy training process. Then, the learning objective of policy $\\pi$ in Eq. (7) can be formulated as:", + "bbox": [ + 169, + 196, + 826, + 267 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\pi = \\arg \\max _ {\\pi} \\min _ {\\lambda} \\mathbb {E} _ {s \\sim \\mathcal {D}} [ \\beta Q (s, \\pi (s)) - \\lambda (g (s, \\pi (s)) - G) ] \\quad \\text {s . t .} \\lambda \\geq 0 \\tag {54}\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 273, + 825, + 297 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The $Q$ function, policy and state-conditioned distance function networks are represented by 3 layers ReLU activated MLPs with 256 units for each hidden layer and are optimized by Adam optimizer. In addition, we normalize each dimension of state to a standard normal distribution for Mujoco tasks. The hyperparameters of DOGE are listed in Table 2.", + "bbox": [ + 169, + 309, + 826, + 367 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/6b7007033bbc2523388d9280a0faa91e00d9e964a468f02d80fcb743b8ffd3e5.jpg", + "table_caption": [ + "Table 2: Hyperparameters of DOGE" + ], + "table_footnote": [], + "table_body": "
HyperparametersValue
Shared parametersOptimizerAdam
StandardNormalize stateTrue for Mujoco
False for AntMaze
Batch size256
Layers3
Hidden dim256
TD3Actor learning rate3 × 10-4
Critic learning rate3 × 10-4for Mujoco
1 × 10-3for AntMaze
Discount factor γ0.99 for Mujoco
0.995 for AntMaze
Number of iterations106
Target update rate τ0.005
Policy noise0.2
Policy noise clipping0.5
Policy update frequency2
State-Conditioned Distance FunctionLearning rate1 × 10-3for Mujoco
1 × 10-4for AntMaze
Number of noise actions N20
Number of iterations Ng105for Mujoco
106for AntMaze
DOGEα{7.5, 17.5} Mujoco
{5, 10, 70} AntMaze
Lagrangian multiplier λclipped to [1, 100]
λ learning rate3e-4
", + "bbox": [ + 209, + 405, + 789, + 777 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "E.1 TD3'S IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 800, + 455, + 814 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "For the choice of the Critic learning rate and discount factor $\\gamma$ , we find that for AntMaze tasks, a high Critic learning rate can improve the stability of value function during training process. This may be because the AntMaze tasks require the value function to dynamic programs more times to \"stitch\" suboptimal trajectories than Mujoco tasks. Therefore, we choose $1 \\times 10^{-3}$ and 0.995 as the Critic learning rate and discount factor $\\gamma$ for AntMaze tasks, respectively. The other implementations such as policy noise scale and policy noise clipping are the same with author's implementation (Fujimoto et al., 2018).", + "bbox": [ + 169, + 825, + 826, + 924 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "E.2 STATE-CONDITIONED DISTANCE FUNCTION'S IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 169, + 103, + 727, + 118 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "We sample $N = 20$ noise actions from a uniform distribution that covers the full action space to approximate the estimation value in Eq. (4). We find $N = 20$ can balance the computation complexity and estimation accuracy and is the same sample numbers with CQL (Kumar et al., 2020b). The ablation of $N$ can be found in Fig. 15. The practical training objective of the state-conditioned distance function is as follows:", + "bbox": [ + 169, + 128, + 826, + 199 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {g} \\mathbb {E} _ {(s, a) \\in \\mathcal {D}, \\hat {a} _ {i} \\sim U n i f (\\mathcal {A})} \\left[ \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left[ \\| a - \\hat {a} _ {i} \\| - g (s, \\hat {a} _ {i}) \\right] ^ {2} \\right] \\tag {55}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 202, + 825, + 244 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "We find that a wider sample range than the max action space $[-a_{\\mathrm{max}}, a_{\\mathrm{max}}]$ is helpful to characterize the geometry of the full offline dataset. This is because some actions in the offline dataset lie at the boundary of the action space, which can only be sampled with little probability when sampling from a narrow distribution. At this time, the noise actions may not cover the geometry information near the boundary. Therefore, we sample noise actions from a uniform distribution that is 3 times wider than the max action space, i.e., $\\hat{a} \\sim \\text{Unif}[-3a_{\\mathrm{max}}, 3a_{\\mathrm{max}}]$ . For the learning rate, we find that a high learning rate enables a stable training process in Mujoco tasks. Therefore, we choose $1 \\times 10^{-3}$ and $1 \\times 10^{-4}$ as the distance function learning rate for Mujoco and AntMaze, respectively. We also observe that for Mujoco tasks, $10^{5}$ iterations can already produce a relatively good state-conditioned distance function, and training more times won't hurt the final results. To reduce computation, we only train the state-conditioned distance function for $10^{5}$ steps for Mujoco tasks.", + "bbox": [ + 169, + 275, + 826, + 429 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "E.3 HYPERPARAMETERS TUNING OF DOGE", + "text_level": 1, + "bbox": [ + 171, + 445, + 496, + 459 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The scale of $\\alpha$ determines the strength of policy constraint. We tune $\\alpha$ to balance the trade-off between policy constraint and policy improvement. To be mentioned, $\\alpha$ is tuned within only 5 candidates for 20 tasks (17.5 for hopper-m, hopper-m-r and all Mujoco random datasets; 7.5 for other Mujoco datasets; 5 for antmaze-u; 10 for antmaze-u-d; 70 for other AntMaze tasks). This is acceptable in offline policy tuning following (Kumar et al., 2019; Brandfonbrener et al., 2021). To ensure numerical stability, we clip the Lagrangian multiplier $\\lambda$ to [1, 100]. We also find a large initial $\\lambda$ enables stable training for Mujoco tasks but slows down AntMaze training. Therefore, the initial value of Lagrangian multiplier $\\lambda$ is 5 for Mujoco and 1 for AntMaze tasks, respectively.", + "bbox": [ + 169, + 470, + 826, + 584 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "E.4 PSEUDOCODE OF DOGE", + "text_level": 1, + "bbox": [ + 171, + 599, + 390, + 612 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The pseudocode of DOGE is listed in Algorithm 1. Changes we make based on TD3 (Fujimoto et al., 2018) are marked in red. The only modification is the training process of the additional state-conditioned distance function and the constrained actor update. We can perform 1M training steps on one GTX 3080Ti GPU in less than $50\\mathrm{min}$ for Mujoco tasks and 1h $40\\mathrm{min}$ for AntMaze tasks.", + "bbox": [ + 169, + 625, + 826, + 681 + ], + "page_idx": 24 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Our implementation for DOGE" + ], + "code_body": "Require: Dataset $\\mathcal{D}$ . State-conditioned distance network $g_{\\psi}$ . Policy network $\\pi_{\\phi}$ and target policy network $\\pi_{\\phi^{\\prime}}$ with $\\phi^{\\prime}\\gets \\phi$ . Value network $Q_{\\theta_i},i = 1,2$ and target value network $Q_{\\theta_i'}$ $i = 1,2$ with $\\theta_i^\\prime \\leftarrow \\theta_i$ . State-conditioned distance network training steps $N_{g}$ . Policy update frequency m. \n1: for $t = 0,1,\\dots ,M$ do \n2: Sample mini-batch transitions $\\{(s_i,a_i,r_i,s_i')\\} \\sim \\mathcal{D}$ \n3: if $t < N_g$ then \n4: State-Conditioned Distance Function Update: Update $\\psi$ as Eq. (55) shows. \n5: end if \n6: Critic Update: Update $\\theta_{i}$ using policy evaluation method in TD3. \n7: if $t$ mod $m = 0$ then \n8: Constrained Actor Update: Update $\\phi ,\\lambda$ via Eq. (54). \n9: Update target networks: $\\theta_i^\\prime \\gets \\tau \\theta_i + (1 - \\tau)\\theta_i^\\prime$ $\\phi^{\\prime}\\gets \\tau \\phi +(1 - \\tau)\\phi$ \n10: end if \n11: end for", + "bbox": [ + 173, + 714, + 828, + 912 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "E.5 EXPERIMENT SETUP FOR THE IMPACT OF DATA GEOMETRY ON DEEP $Q$ FUNCTIONS", + "text_level": 1, + "bbox": [ + 169, + 103, + 799, + 119 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We consider an one-dimensional random walk task with a fixed-horizon (50 steps for each episode), where agents at each step can move in the range of $[-1, +1]$ and the state space is a straight range from $[-10, 10]$ . The destination is located at $s = 10$ . The closer the distance to the destination, the larger the reward that the agent can get. The discount factor $\\gamma = 0.9$ . The reward function is defined as follows:", + "bbox": [ + 169, + 128, + 826, + 196 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\nr = \\frac {4 0 0 - (s ^ {\\prime} - 1 0) ^ {2}}{4 0 0} \\tag {56}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 196, + 825, + 226 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We generate offline datasets with different geometry and train the agent based on these datasets. Each synthetic dataset consists of 200 transition steps. We get the approximated $Q$ value $\\hat{Q}$ by training TD3 for $1e + 4$ steps each dataset. The learning rate of Actor and Critic networks are both $10^{-3}$ . The other implementation details are the same as the implementation of original TD3 (Fujimoto et al., 2018). The true $Q$ function can be get by Monte-Carlo estimation. We find that the near-destination states hold higher approximation error than that far away from the destination due to the scale of true $Q$ value near the destination is large. To alleviate the impact of $Q$ value scale on the approximation error, we define the relative approximation error as follows:", + "bbox": [ + 169, + 234, + 823, + 349 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\epsilon} (s, a) = \\epsilon (s, a) - \\min _ {a} \\epsilon (s, a) \\tag {57}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 354, + 825, + 377 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "where, $\\epsilon(s,a) = \\hat{Q}(s,a) - Q(s,a)$ . The relative error in the above definition eliminates the effect of different states on the approximation error and can capture the over-estimation error that we care about. We plot the relative approximation error of deep $Q$ functions with different random seeds and data geometry in Fig. 13.", + "bbox": [ + 169, + 386, + 823, + 444 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "F ADDITIONAL EXPERIMENT RESULTS", + "text_level": 1, + "bbox": [ + 171, + 463, + 514, + 479 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "F.1 COMPARISON OF GENERALIZATION ABILITY", + "text_level": 1, + "bbox": [ + 171, + 494, + 526, + 508 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "In the well known AntMaze task in D4RL benchmark (Fu et al., 2020), where an ant needs to navigate from the start to the destination in a large maze. The trajectories with coordinates at $x \\times y \\in [4,13] \\times [7,9] \\cup [11.5,20.5] \\times [11,13]$ in AntMaze medium tasks and $x \\times y \\in [10.5,21] \\times [7,9] \\cup [19,29.5] \\times [15,17]$ in AntMaze large tasks are clipped, as Fig. 8 shows.", + "bbox": [ + 169, + 520, + 823, + 579 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/69fe1100f4a4cbdf16d46648b59017a703d00f054610fcfe5e174067a6e3fb48.jpg", + "image_caption": [ + "(a) Modified Medium AntMaze" + ], + "image_footnote": [], + "bbox": [ + 256, + 602, + 472, + 768 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/3863b685c8b28b2a7e4ffa25badcf673b66643b41bc4d5bd7adde3e41ba70453.jpg", + "image_caption": [ + "(b) Modified Large AntMaze", + "Figure 8: The trajectories in the offline dataset are visualized as blue. Data transitions of two small areas on the critical pathways to the destination have been removed (red box)." + ], + "image_footnote": [], + "bbox": [ + 517, + 601, + 736, + 768 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "These clipped data counts only about one-tenth of the original dataset and lies in the close proximity of the original trajectories. Under these modified datasets, simply relaying on \"stitching\" data transitions is not enough to solve the navigation problems. We evaluate representative policy constraint method (TD3+BC (Fujimoto & Gu, 2021)), value regularization method (CQL (Kumar et al., 2020b)), in-sample learning method (IQL (Kostrikov et al., 2021b)) and DOGE (our method) on these modified datasets. The evaluation results before and after clipping the trajectories are listed in Table 3. The", + "bbox": [ + 169, + 840, + 826, + 925 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "learning curves for the modified AntMaze medium and AntMaze large tasks are listed in Fig. 9 and Fig. 4.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Observe in Table 3 that existing offline RL methods fail miserably and suffer from severe performance drops. By contrast, DOGE maintains competitive performance after the modification of the dataset and shows good generalization ability on unknown areas.", + "bbox": [ + 169, + 138, + 823, + 181 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Apart from above experiments, we also evaluate DOGE when removing only one area: $[10.5, 21] \\times [7, 9]$ , $[10.5, 21] \\times [7, 9]$ for AntMaze-large datasets and $[4, 13] \\times [7, 9]$ , $[4, 13] \\times [7, 9]$ for AntMaze-medium datasets. The final results can be seen in Table 4.", + "bbox": [ + 169, + 186, + 828, + 229 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/65508669b4d9a2e7c335bad28535d7c8fb2f0863789706b358344787e86f7966.jpg", + "table_caption": [ + "Table 3: The performance drop after removing the data at the only way to destination." + ], + "table_footnote": [], + "table_body": "
Dataset typeTD3+BCCQLIQLDOGE(ours)
antmaze-m-p-v2full data065.2±4.870.4±5.380.6±6.5
miss data010.7±18.410.2±2.233.2±27.3
Performance drop ↓-84%86%59%
antmaze-m-d-v2full data054.0±11.774.6±3.277.6±6.1
miss data08.5±5.37.6±5.740.2±32.9
Performance drop ↓-84%90%48%
antmaze-l-p-v2full data018.8±15.343.5±4.548.2±8.1
miss data001.0±0.722.4±15.9
Performance drop ↓-100%98%54%
antmaze-l-d-v2full data031.6±9.545.6±7.636.4±9.1
miss data005.2±3.114.6±11.1
Performance drop ↓-100%89%60%
", + "bbox": [ + 202, + 266, + 792, + 511 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/2d14e1975493eec5b100880be7e7a0707be39792d61534412f50576c0d0e02f8.jpg", + "image_caption": [ + "Policy constraint" + ], + "image_footnote": [], + "bbox": [ + 178, + 561, + 334, + 662 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/fa0b632299ae9f8f3f4e4b912608dc3f50c1c492b4d50d180f4ea2388773158e.jpg", + "image_caption": [ + "Value regularization" + ], + "image_footnote": [], + "bbox": [ + 338, + 561, + 493, + 662 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/fe1bb5b8c998182507b7b084cb1d392a70ecc1ae990d388b900c8a987060c1fc.jpg", + "image_caption": [ + "In-sample learning" + ], + "image_footnote": [], + "bbox": [ + 500, + 561, + 653, + 662 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/5ed288c3b1348c5d6749ed5099c8ddf40e44722bece377c9730525858a93c3d7.jpg", + "image_caption": [ + "DOGE (Ours)" + ], + "image_footnote": [], + "bbox": [ + 661, + 561, + 815, + 662 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/1eab3984971d3da5a8ed4e393bc99b6de8e7748087575ab124b6a570e2f381bf.jpg", + "image_caption": [ + "Figure 9: Evaluation on TD3+BC(Fujimoto & Gu, 2021), CQL(Kumar et al., 2020b), IQL(Kostrikov et al., 2021b), and DOGE (ours) before and after removing the data shown in Fig.8a for AntMaze medium tasks." + ], + "image_footnote": [], + "bbox": [ + 178, + 672, + 334, + 773 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/c11e43bdf8b2f8429a39dc237c7db5bd9305c216d7d67fb5f65586e1daba26c4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 338, + 672, + 496, + 773 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/0b4be628e8803f3f87d2c5370b119c58ff5905d1cc2b7fa7f0b349850115b421.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 672, + 656, + 773 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/9a55cec306a559695fce48e3ee1e7ba2b8af7ba35beea46ece1cbbe66811c522.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 672, + 818, + 773 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/02e0f0241f8f846c15e1e56347f8fa1ee93b83569a48f16f6f68b1d0cb676494.jpg", + "table_caption": [ + "Table 4: Ablation for DOGE generalization with different removal areas." + ], + "table_footnote": [], + "table_body": "
DatasetFull datasetOne removalTwo removal
antmaze-m-p-v280.6±6.562.3±7.533.2±27.3
antmaze-m-d-v277.6±6.141.3±42.840.2±32.9
antmaze-l-p-v248.2±8.126.4±19.422.4±15.9
antmaze-l-d-v236.4±9.112.3±4.214.6±11.1
Total score242.8±29.8142.3±73.9110.4±87.2
", + "bbox": [ + 269, + 127, + 723, + 234 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "F.2 ADDITIONAL COMPARISON WITH TD3+BC", + "text_level": 1, + "bbox": [ + 171, + 247, + 516, + 261 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "In this section, we further demonstrate the superiority of DOGE over our most related practical work TD3+BC (Fujimoto & Gu, 2021). One can find that the biggest difference between DOGE and TD3+BC lies in the policy constraint used for policy optimization:", + "bbox": [ + 169, + 272, + 823, + 316 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- TD3+BC: constrains the policy to minimize the MSE BC loss.", + "- DOGE: constrains the policy to minimize the learned state-conditioned distance function $g(s, a)$ ." + ], + "bbox": [ + 169, + 321, + 816, + 359 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "As discussed in Section 3.1, the learned distance function $g(s,a)$ can capture the global geometric information of the offline dataset, while the MSE BC loss can only provide local sample-to-sample regularization, which may be noisy, especially in datasets that contain low-quality samples. Taking Figure 10 as an illustration, under strict BC constraint, policy learning on noisy low-quality samples may provide contradicting learning signals to near-optimal samples, which can cause inferior policy performance and unstable training process. By contrast, the state-conditioned distance function $g(s,a)$ in DOGE is trained on the whole dataset and hence brings global geometric information, which is far more informative and stable as compared with the MSE BC loss.", + "bbox": [ + 169, + 364, + 826, + 476 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/61c367d79e1e14f4df27054cd0558caf13e2c4e8cd4211c24596bc88cae3ca36.jpg", + "image_caption": [ + "(a) TD3+BC" + ], + "image_footnote": [], + "bbox": [ + 263, + 488, + 442, + 625 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/89ba3a7afafad8e81c7d2f1acc80924e841073b2a43ea614ecb116eab045c914.jpg", + "image_caption": [ + "(b) DOGE" + ], + "image_footnote": [], + "bbox": [ + 504, + 491, + 674, + 622 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/2717ce8236830b1fb75d049dd33db86dcd2847a1921796da5fc1663e20ed711a.jpg", + "image_caption": [ + "Figure 10: Illustrations of the differences between (a) the MSE BC constraint of TD3+BC and (b) the state-conditioned distance function constraint of DOGE. In (a), the MSE BC constraint in TD3+BC blindly enforces the imitation behavior on any data samples, which may lead to an inferior policy in the presence of noisy low-quality samples. In (b), the state-conditioned distance function $g(s,a)$ can provide more informative global dataset geometry information to guide the stable learning of the policy." + ], + "image_footnote": [ + "Low-quality Samples", + "MSE BC Constraints of Near-optimal Samples" + ], + "bbox": [ + 256, + 674, + 397, + 696 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/48382d6e18730127b150a123b45009acfa033e011c6d0fbb260efbb60bca7044.jpg", + "image_caption": [], + "image_footnote": [ + "Near-optimal Samples", + "Policy Outputs", + "g(s,a) Distance Function Values" + ], + "bbox": [ + 500, + 672, + 601, + 695 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "To better illustrate the superiority of DOGE over TD3+BC, we add extra comparative experiments with TD3+BC on a new set of mixed-quality datasets. In halfcheetah-random dataset, we add different proportions (1% to 20%) of the near-optimal halfcheetah-medium-expert dataset to form new mixed datasets and evaluate how TD3+BC and DOGE perform. See Figure 11 for detailed results.", + "bbox": [ + 169, + 791, + 823, + 848 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Figure 11 shows that DOGE enjoys more performance gains when the random dataset involves near-optimal data, while TD3+BC is heavily influenced by the local information from the larger proportion of the low-quality random data. Moreover, TD3+BC suffers from severe oscillation and training instability, while DOGE enjoys a stable training process due to the use of the more informative state-conditioned distance constraint that captures the overall dataset geometry.", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/7fe3c3376a72cd45956ebc3962e3c7f4b2006cf1851b4714cc200ae968a49d28.jpg", + "image_caption": [ + "Figure 11: Comparisons between DOGE and TD3+BC on mixed datasets with different proportions of halfcheetah-medium-expert dataset added into halfcheetah-random dataset. Ratio- $1\\%$ means $1\\%$ medium-expert dataset is added into the original halfcheetah-random dataset. TD3+BC suffers severe oscillation and training instability, while DOGE enjoys stable training processes and substantial performance gains." + ], + "image_footnote": [], + "bbox": [ + 181, + 99, + 305, + 181 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/8e1aa2029da2ee5851ccf604d7c757833b62ad9cfe7ac8a738b75692476d4308.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 308, + 99, + 434, + 181 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/6f5b108ba7bbffb91f7a732c63c672976819249a7e3bd97ddc10ae2d5efabacd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 436, + 101, + 560, + 181 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/5036814a0f6a941772b4948a362362c21b8b0999b661a847998aa7a19d7444a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 563, + 101, + 687, + 181 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/943b5c53751c20a7c1b8b0b7945e3ab481eadb25bde6ef4fcaf32c1d33871f8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 101, + 815, + 181 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "F.3 COMPARISON WITH UNCERTAINTY-BASED METHODS", + "text_level": 1, + "bbox": [ + 171, + 258, + 580, + 272 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We also compare DOGE with SOTA uncertainty-based offline RL approaches, including EDAC (An et al., 2021) and PBRL (Bai et al., 2021) are more complex D4RL AntMaze tasks. The final results are presented in Table 5. Table 5 shows that the SOTA uncertainty-based methods are unable to provide reasonable performance on the difficult Antmaze tasks, despite that they can achieve good performance on simpler MuJoCo tasks. A similar finding is also reported in a recent offline RL study (Anonymous, 2023).", + "bbox": [ + 169, + 297, + 823, + 381 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "In practical implementation of EDAC and PBRL, to obtain relatively accurate uncertainty measures and achieve reasonable performance, these methods typically need dozens of ensemble Q-networks, which can be quite costly and inefficient. Moreover, heavy hyperparameter tuning is also required for them to obtain the best performance. In contrast, our method quantifies the generalization ability of the Q-function from the perspective of dataset geometry and is trained using a simple regression loss in Eq. (4), which enjoys better training stability and simplicity.", + "bbox": [ + 169, + 387, + 826, + 472 + ], + "page_idx": 28 + }, + { + "type": "table", + "img_path": "images/f3989451d5d906747bb415b511ded8f56d6016de714fd88455786d824c586802.jpg", + "table_caption": [ + "Table 5: Average normalized scores over 5 seeds on Antmaze tasks" + ], + "table_footnote": [], + "table_body": "
DatasetEDACPBRLDOGE(Ours)
antmaze-u-v20097.0±1.8
antmaze-u-p-v20063.5±9.3
antmaze-m-p-v20080.6±6.5
antmaze-m-d-v20077.6±6.1
antmaze-l-p-v20048.2±8.1
antmaze-l-d-v20036.4±9.1
", + "bbox": [ + 305, + 537, + 687, + 654 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "F.4 ADDITIONAL ANALYSIS ON DISTANCE FUNCTION", + "text_level": 1, + "bbox": [ + 171, + 738, + 553, + 752 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We report the learning curves of the state-conditioned distance function $g(s, a)$ trained on different datasets (including hopper-m-v2, halfcheetah-m-v2, and walker2d-m-v2 in Figure 12. Our proposed state-conditioned distance function is learned through a simple regression task (Eq. (4)), which is very easy to train. Figure 12 shows that it reaches convergence within only 1K training steps on D4RL MuJoCo medium datasets.", + "bbox": [ + 169, + 777, + 823, + 847 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We also change the network configurations (i.e., number of hidden layers and hidden units) of the state-conditioned distance function $g(s, a)$ to investigate how the expressivity of $g$ influences the performance of the policy. Table 6 shows that DOGE achieves similar performance across different $g$ network configurations, indicating that DOGE is robust to model complexity and expressivity of the state-conditioned distance function.", + "bbox": [ + 169, + 854, + 823, + 922 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/d342ac9f433f1f2e71f4bb00a94d05d5146b9e6cce31f52cc81029326576f62f.jpg", + "image_caption": [ + "Figure 12: Learning curves of the state-conditioned distance function $g(s, a)$" + ], + "image_footnote": [], + "bbox": [ + 174, + 99, + 377, + 233 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/d9d446b0951fd812c5397d4784c7312867b933e2965029a5af41f85f20588e03.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 400, + 99, + 596, + 233 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/bf89404aced332aa669a577d33ebb36d479ddafa1a324f673d5947b607c471d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 99, + 821, + 233 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/e9fc41184aadef9caae797cdb5c2d732de931b3cd2fcbb8cf5f7dbbd1c1215ff.jpg", + "table_caption": [ + "Table 6: Normalized scores of DOGE trained on distance functions with different network configurations. [128, 128] means $g$ network has 2 hidden layers with 128 units. [256, 256, 256] means 3 hidden layers with 256 units." + ], + "table_footnote": [], + "table_body": "
Dataset[128, 128][256, 256][256, 256, 256]
hopper-m99.4101.498.6
halfcheetah-m47.446.945.3
walker2d-m85.386.486.8
", + "bbox": [ + 290, + 340, + 702, + 416 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.5 ADDITIONAL EXPERIMENTS OF THE IMPACT OF DATA GEOMETRY ON DEEP $Q$ FUNCTIONS", + "text_level": 1, + "bbox": [ + 171, + 441, + 756, + 468 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We run several experiments with different random seeds (see Figure 13). Although the approximation error pattern of different random seeds is not the same, they all perform in the same manner that deep $Q$ functions produce relatively low approximation error inside the convex hull of training data. We refer to this phenomenon as deep $Q$ functions interpolate well but struggle to extrapolate.", + "bbox": [ + 169, + 479, + 823, + 539 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/3212392b8892505c92bc3cbd780bd02a3d760bd5bf9df9a82b3eb76e282bd1d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 549, + 392, + 657 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/b8ba27213572322fedc4476000c6c131036e4ea81e8674f9c1afed6a1ea24a70.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 549, + 602, + 657 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/4412252625722646c5d2b50bbb23d8842d7c850bca6bad501c1717b9439c85bb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 549, + 813, + 657 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/352fc6c28bd5149b30ea4c07eab417b1bc5a8fb7dd0ab8f20216ccb1ee7247b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 657, + 390, + 765 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/d3e20f1b76060d0750c4bba328912261df74c45eb047a120a298f6d2665c4e3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 657, + 602, + 765 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/5e473fecd1c2cc1a3d7e05ee10cb0add89eafccf7315a8e0b0b85b6fcc0f24a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 657, + 813, + 765 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/d09492e5ef76cff94f6222beec87801b87c644ba724ef8751639a388f185c03e.jpg", + "image_caption": [ + "Figure 13: The figures above depict the effect of different data geometries on the final deep $Q$ functions approximation error. The training data are marked as white dots." + ], + "image_footnote": [], + "bbox": [ + 183, + 766, + 390, + 875 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/620451d0408b6163526836a07de10d2121261d8e507efef5501bf6b7831d16c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 766, + 602, + 875 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/7f0b86b44bcac4cebf02f1e17b338b5a7be787041c4388c535dcbad3ba78fff8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 766, + 813, + 875 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "G ABLATIONS", + "text_level": 1, + "bbox": [ + 171, + 102, + 308, + 117 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "We conduct ablation studies on the effect of $\\alpha$ in $\\beta = \\frac{\\alpha}{\\frac{1}{n}\\sum_{i=1}^{n}|Q(s_i,a_i)|}$ (see Figure 14), the non-parametric threshold $G$ in Eq. (6) (see Figure 16) and the non-parametric number of noise actions $N$ to train state-conditioned distance function (see Figure 15) on the performance of the final algorithm. We also conduct ablation studies on the effect of $G$ on the Lagrangian multiplier $\\lambda$ (see Figure 17).", + "bbox": [ + 169, + 133, + 826, + 194 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "For $\\alpha$ , we add or subtract 2.5 to the original value. For $N$ , we choose $N = 10, 20, 30$ to conduct experiments respectively. For $G$ , we choose $30\\%$ , $50\\%$ , $70\\%$ , $90\\%$ and $100\\%$ upper quantile of the distance value in mini-batch samples and the results can be found in Table 7.", + "bbox": [ + 169, + 199, + 823, + 242 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/b2e94712a55f024d4e0acbf39e071c494bd54e9d0802b46ca77ceec1a8ca6de9.jpg", + "table_caption": [ + "Table 7: Ablations on G with different quantile." + ], + "table_footnote": [], + "table_body": "
DatasetG = 30%G = 50%G = 70%G = 90%G = 100%
hopper-r-v219.8±0.321.1±12.615.5±13.517.6±12.216.4±12.4
halfcheetah-r-v219.4±0.617.8±1.217.8±0.717.7±1.017.7±0.8
walker2d-r-v22.6±3.90.9±2.42.2±2.61.8±3.32.2±3.2
hopper-m-v244.6±5.798.6±2.199.4±0.491.5±9.932.9±54.3
halfcheetah-m-v241.3±1.245.3±0.646.0±0.146.0±0.846.1±0.5
walker2d-m-v283.7±7.586.8±0.887.3±1.669.9±28.984.2±1.0
hopper-m-r-v251.5±11.276.2±17.779.6±36.978.4±27.665.7±37.2
halfcheetah-m-r-v25.9±5.742.8±0.643.2±0.142.2±0.842.0±0.6
walker2d-m-r-v228.3±14.387.3±2.387.9±2.477.8±21.678.6±24.1
hopper-m-e-v261.7±10.4102.7±5.282.8±5.888.9±17.770.0±48.4
halfcheetah-m-e-v246.9±5.278.7±8.475.1±15.473.5±13.669.9±8.7
walker2d-m-e-v2110.5±0.7110.4±1.5111.1±0.5110.2±22.580.0±54.3
", + "bbox": [ + 199, + 281, + 795, + 479 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Seen from Table 7 that using different $G$ for different tasks may achieve even better performance. Particularly, for some datasets with diverse data distributions that need to find good data from suboptimal data, a more tolerant quantile (e.g., $G = 70\\%$ ) can reasonably extend feasible region and increase the opportunity to find the optimal policy, such as hopper-m-r, halfcheetah-m-r, walker2d-m-r, hopper-m-e, halfcheetah-m-e. However, an overly relaxed quantile (e.g., $G = 90\\%$ and $100\\%$ ) increases the risk of including problematic OOD actions in policy learning, causing performance drop due to value overestimation and high variance.", + "bbox": [ + 169, + 493, + 823, + 590 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "By contrast, an overly restrictive quantile such as $G = 30\\%$ can be over-conservative and cause significant constraints violations that impede policy learning, as constraints satisfaction is favored over the max-Q operation in most updates. This can be reflected in the additional results for the Lagrangian multiplier $\\lambda$ (see Appendix E.2 for learning curves and Figure 11 for additional ablations), where $\\lambda \\rightarrow \\infty$ for some tasks under $G = 30\\%$ . This will cause the suboptimality gap $(\\frac{1 - \\gamma}{2\\gamma}\\alpha(\\Pi_{\\mathcal{D}}))$ in Theorem 3 to dominate the performance bound, leading to inferior policy.", + "bbox": [ + 169, + 597, + 823, + 686 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "As hyperparameter tuning in practical offline RL applications without online interaction is very difficult, to reduce the computational load, we set $G = 50\\%$ as default in a non-parametric manner, since it consistently achieves good performance, and is neither too conservative nor too aggressive for most tasks.", + "bbox": [ + 169, + 691, + 823, + 747 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Observe in Figure 14 that DOGE maintains the similar performance with the changes of $\\alpha$ on most of Mujoco tasks. At the same time, we also observe that the effect of $N$ on the experiment is not obvious. Compared with $N$ and $\\alpha$ , we find that $G$ has a more significant effect on the experimental results. Observe in Figure 16 that a small $G$ usually causes the policy set induced by DOGE to be too small to obtain near-optimal policy. By contrast, a large $G$ is not likely to cause excessive error accumulation and hence maintains relatively good performance.", + "bbox": [ + 169, + 753, + 823, + 839 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In addition, the ablation studies show that our method is hyperparameter-robust and maintains good performance with changes in hyperparameters.", + "bbox": [ + 169, + 844, + 823, + 873 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/b497c0689940a9f9426b7cb12c8ffb4ea614502dcd3f02d84d4560c0f7dbc628.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 99, + 336, + 196 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/8351bb38fc56653a03e65581991794509be44a76e45bfa6159db42132ca99d2c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 101, + 496, + 196 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/2a26e65bd3e1194986a10158a4a7aa95bcfd4e458ee6c9468195c450d0870cdb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 101, + 656, + 198 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/7a5bd1f031752dbe32eb8381a22037d4ecb9af59fe934b37e6624c8cf6ac171f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 101, + 815, + 198 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/d6ddab1808b5be419a0818bc318d02e5be9727792a56651bfe6fb6c4c70bd5d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 199, + 336, + 295 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/eaa86913dfe61325f25b4691e42eb46ce85f8776970d04b33599ed4459860b69.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 199, + 496, + 295 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/9ec4541ddd94166d6bb8214c4232a40123af9ded76f5cd20ca6e0b44c416af3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 199, + 656, + 295 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/7bfc9c772203b3c9e1ad705a66c213b6dd9ef30790615625735db3dd02a86b9a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 199, + 815, + 295 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/fe6aa72c48fe7ed2bdfb6ba6e57b07d802cb8c43d45e4920168a2d14b1ac04a2.jpg", + "image_caption": [ + "Figure 14: Ablation for $\\alpha$ . Error bars indicate min and max over 5 seeds." + ], + "image_footnote": [], + "bbox": [ + 181, + 296, + 336, + 393 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/58d7058bb3e951940f73b53cd6e8345ffd13594d412ee89bfc0e38e288880812.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 296, + 496, + 393 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/d8b0ec68d0da49a551e8df76c2fe8a622e9e1a6c98efafbbbf3c70d6e2fae92f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 296, + 656, + 393 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/d860b015b309b6776eba8bd564552ab8f708a3be73624485f8ec71c9f76ec9b1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 296, + 815, + 393 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/1cbc0d576afac3fc3112ce9401bd9388afbb211cc32d286ce4bf7876ee8658a8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 434, + 336, + 530 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/d1beb6a7e62f0a6d9207e577bb4d181d1375a289871167a59dafa665549874ec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 434, + 496, + 530 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/8f127cc4d24c131ed087b453c5806d833a1b1ae004f7a23828861fef0774c6ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 434, + 656, + 530 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/d2823754ccef8919b862b94b7298b52eeec33031f4fdd12e075c157ce282323a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 434, + 815, + 530 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/8acb23bf4361aab8395a3bda4df853ed734977366b215cde5ab9242d79138697.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 532, + 336, + 628 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/0f5c483ba0c4d6eff375c65fe8e84a933f1b2dbcd7fb5e674e1efdcee7b4962f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 532, + 496, + 628 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/eaeeb76c39862eecfec518fdbaae1782fb1e1c0987ce8aed42a924d373d96cbb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 532, + 656, + 628 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/44744c57079d49d80a20600f99478331e06ae50f5c3ef96d4b4e31be7446ade9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 532, + 815, + 628 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/3a0f6ab9a095152a9eaeeca45ceb9568a550aaa8de4e113425f5ba431b9878dc.jpg", + "image_caption": [ + "Figure 15: Ablation for $N$ . Error bars indicate min and max over 5 seeds." + ], + "image_footnote": [], + "bbox": [ + 181, + 630, + 336, + 726 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/5605b814f0d0e2a7f83bbcd0c1d030976969d00556e713784947a19096184e2e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 630, + 496, + 726 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/e132fcf9f153c1a9d11093ef758e00520cb6bccc2502ce76677bbd9f275bdca2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 630, + 656, + 726 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/7a4439af88b2668f7831e239a87c90597aa7ac1c7c446437d087a7b832d6ea11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 630, + 815, + 726 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "H LEARNING CURVES", + "text_level": 1, + "bbox": [ + 171, + 765, + 375, + 780 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The learning curves for Mujoco and AntMaze tasks are listed in Fig. 18 and Fig.19. The learned policies are evaluated for 10 episodes and 100 episodes each seed for Mujoco and AntMaze tasks, respectively. For AntMaze tasks, we subtract 1 from rewards for the AntMaze datasets following (Kumar et al., 2020b; Kostrikov et al., 2021b).", + "bbox": [ + 169, + 796, + 826, + 853 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/1117821df210f570b2e664f0450868a4667351ef55d3c52ead27ec675572ca33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 143, + 336, + 241 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/9db482154a813d8bd0b5a284eac4c62a481620938fd356ced920f6d1be4af221.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 143, + 496, + 241 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/3802b5bdb291a351f5ce1714eaae0ee7f4f86f8f9ffe76c88af102f8832f51d2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 143, + 656, + 241 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/b31f484605fe53d72a3e3ee21b4d4aa09a7285abc46d2956893d4bd9525ccfd5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 145, + 815, + 241 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/9a97af5faf44a23b5c803e4c54f916eae78f27e9ca6ff09ecf3fe5977f745481.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 243, + 336, + 339 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/e0ee746721b0ed9d87e215815f15bb47d8c41d7f7aba2d1702f0a58f07e68809.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 243, + 496, + 339 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/b57e91459af9580d4caafe5823dac513ca6f3e3893d474db6647c92a3ebaf3e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 243, + 656, + 339 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/be21adeab67bd19a9ace1fde4861e0175708c8d611007c33b6578958cfd9209a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 243, + 815, + 339 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/7dd899e830ab870b29b7ae0b3198539b68109a479b607697b922ed2249fcdbaa.jpg", + "image_caption": [ + "Figure 16: Ablation for $G$ . Error bars indicate min and max over 5 seeds." + ], + "image_footnote": [], + "bbox": [ + 179, + 340, + 336, + 438 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/74245e800fe04cee8de6c3219852adf7a4dc6d1a3560706ca47f41e71e5cbb5d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 340, + 496, + 438 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/ae96491b9731557ead454f4dafd177b210199887f65c5f489f7fcf46fc143048.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 340, + 656, + 438 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/29f4c93235d6c8d0be0f3b30cdec05ee62c441246ecf07dfe8599845f7f35381.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 340, + 815, + 436 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/e11f11c11388a51480f01ed44bdd7a5941a63b64daac9d701e33748f08ff5e25.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 560, + 336, + 656 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/e3d5b4359c7655bf7b27481c171714cc3c30045cb61713434320abd477da2c5b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 561, + 496, + 656 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/8355d0157f13aedcf0cb2de6c8216dfec88bfbe759678989f7f8aed0c3da0d2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 561, + 656, + 656 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/6f233afe55693ae2fe42ba1729c80bf145c224c4caa5dfbddb05e52661f78425.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 561, + 815, + 656 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/4683367b1ce613a98c64a41d95bc1db19a242abd078cd80308f22a0ab87214ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 657, + 336, + 753 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/17416b7a0ca6d681161ea5f3b61b662a6ca26fc673dc3ba0c07dcbef99f94360.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 657, + 496, + 753 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/058bbaebbeab618ce0ec96dda0a71064c2297a0a87d63a5878b4c81db6f8306a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 657, + 656, + 753 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/fb7e300fab8d85f1c91880555a47659b821a19f3d771603cceab4590f426e146.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 657, + 815, + 753 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/efccd5cd0f76fd0a1bc3b8bc98c66bdf81e63747a54314c8acaa22a9b7195e71.jpg", + "image_caption": [ + "Figure 17: Ablation for $\\lambda$ . Error bars indicate min and max over 5 seeds." + ], + "image_footnote": [], + "bbox": [ + 181, + 755, + 336, + 849 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/af2fb0b9d1205892ae42ffbd00680021cfa767cefc5cb12089de3a4a1bf07fea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 755, + 496, + 849 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/cf1c6b38bfcfbb7d032ea1d1d09353d63088df2d16d71cd391659076602bb430.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 755, + 656, + 849 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/77e50078ab69603957982726b54b65e0f08e5dc0bac8e12a7db922b3ef85be8c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 755, + 815, + 849 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/05c00a74cbaca202b5dd3cfc09a293da88e48342ce3473c284060cd3311c3ceb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 152, + 336, + 250 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/ee1be08b68ad7bd492e4a1c657ab3fa14b37a945bf2de22358a7b523588133de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 152, + 495, + 250 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/3d136a128715dd8f3041a96b5778277af78e9dce2a4cae6e1c76b98605d5c26a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 152, + 656, + 250 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/decb011829db7975397f4d76c29ed5ff1e979aad1d27b6cccfac52a836306072.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 152, + 815, + 250 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/70272e10ad7e942dfffb81e13addf8f85ba9701dcbdca4bfc427f46796ef7927.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 251, + 336, + 347 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/e3622fb9b64f98769333bd39649727ccdd770fd8689416e818d59730392266bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 251, + 495, + 347 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/1956bd406adef05af67e339711df4664bab1e2f7c06107688b80306b96ba556e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 251, + 656, + 347 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/76b8a9ec80424135f56da53ad068a45784896cbe000b589b8ad6879dcd72f82b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 251, + 815, + 347 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/d7d3bc5964c5ad8bda531cc85afd02938802605930b143e30047ccfdd10ee044.jpg", + "image_caption": [ + "Figure 18: Learning curves for Mujoco Tasks. Error bars indicate min and max over 5 seeds." + ], + "image_footnote": [], + "bbox": [ + 181, + 348, + 336, + 445 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/bed43712c3cb41198a3791a5420fb53d68a2e7aa47c2a30da8383ee8ff90f474.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 348, + 495, + 445 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/0830b54f912ce2aabd99b223b5df5324ee117dd4bd53121bdfe263a86d4900bd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 348, + 656, + 445 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/a7a4c340dc13c0cb909d3a5c3907a71d7cb2882ef8b9ef636fae79474a9a7883.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 348, + 815, + 445 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/e5788a93c10485df0265c246a4fe50417bd5dd3046c834d39c9af4c1dd600c57.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 583, + 390, + 712 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/bc4ad5d304127be2ef67f250649dfda7f2d919b476dbee94a1afd4c57783dfec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 583, + 602, + 712 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/8549272511775042df6c7cfedae412646a9423036c4449f8740b6baa92d77462.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 583, + 812, + 712 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/065d8fd34f195f971be35b7f5c03ea4a0b73afce9f3e350688df66e27371901d.jpg", + "image_caption": [ + "Figure 19: Learning curves for AntMaze Tasks. Error bars indicate min and max over 5 seeds." + ], + "image_footnote": [], + "bbox": [ + 183, + 713, + 390, + 842 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/6a1cae0beaaa1b4d765b5adbd7fbb3191dab2c1532ebee470990087c56c99bf7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 713, + 601, + 842 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/2586130a82e9ddb10ac8b5c495de5689e709ef2991573dad885b84f1ac40e0a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 713, + 812, + 842 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 33 + } +] \ No newline at end of file diff --git a/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_model.json b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e92c61c2af3a94d24bbbe675f7877b2812d71ef2 --- /dev/null +++ b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_model.json @@ -0,0 +1,6703 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.828, + 0.147 + ], + "angle": 0, + "content": "WHEN DATA GEOMETRY MEETS DEEP FUNCTION: GENERALIZING OFFLINE REINFORCEMENT LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.169, + 0.862, + 0.186 + ], + "angle": 0, + "content": "Jianxiong Li\\(^{1}\\), Xianyuan Zhan\\(^{1,2*}\\), Haoran Xu\\(^{1}\\), Xiangyu Zhu\\(^{1}\\), Jingjing Liu\\(^{1}\\) & Ya-Qin Zhang\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.186, + 0.704, + 0.2 + ], + "angle": 0, + "content": "\\(^{1}\\) Institute for AI Industry Research (AIR), Tsinghua University, Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.2, + 0.594, + 0.214 + ], + "angle": 0, + "content": "\\(^{2}\\) Shanghai Artificial Intelligence Laboratory, Shanghai, China" + }, + { + "type": "list", + "bbox": [ + 0.184, + 0.186, + 0.704, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.215, + 0.802, + 0.228 + ], + "angle": 0, + "content": "li-jx21@mails.tsinghua.edu.cn, zhanxianyuan@air.tsinghua.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.264, + 0.547, + 0.279 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.296, + 0.77, + 0.52 + ], + "angle": 0, + "content": "In offline reinforcement learning (RL), one detrimental issue to policy learning is the error accumulation of deep \\( Q \\) function in out-of-distribution (OOD) areas. Unfortunately, existing offline RL methods are often over-conservative, inevitably hurting generalization performance outside data distribution. In our study, one interesting observation is that deep \\( Q \\) functions approximate well inside the convex hull of training data. Inspired by this, we propose a new method, DOGE (Distance-sensitive Offline RL with better Generalization). DOGE marries dataset geometry with deep function approximators in offline RL, and enables exploitation in generalizable OOD areas rather than strictly constraining policy within data distribution. Specifically, DOGE trains a state-conditioned distance function that can be readily plugged into standard actor-critic methods as a policy constraint. Simple yet elegant, our algorithm enjoys better generalization compared to state-of-the-art methods on D4RL benchmarks. Theoretical analysis demonstrates the superiority of our approach to existing methods that are solely based on data distribution or support constraints. Code is available at https://github.com/Facebear-ljx/DOGE." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.548, + 0.338, + 0.563 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.579, + 0.828, + 0.706 + ], + "angle": 0, + "content": "Offline reinforcement learning (RL) provides a new possibility to learn optimized policies from large, pre-collected datasets without any environment interaction (Levine et al., 2020). This holds great promise to solve many real-world problems when online interaction is costly or dangerous yet historical data is easily accessible (Zhan et al., 2022). However, the optimization nature of RL, as well as the need for counterfactual reasoning on unseen data under offline setting, have caused great technical challenges for designing effective offline RL algorithms. Evaluating value function outside data coverage areas can produce falsely optimistic values; without corrective information from online interaction, such estimation errors can accumulate quickly and misguide policy learning process (Van Hasselt et al., 2018; Fujimoto et al., 2018; Kumar et al., 2019)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.711, + 0.828, + 0.852 + ], + "angle": 0, + "content": "Recent model-free offline RL methods investigate this error accumulation challenge in several ways: 1) Policy Constraint: directly constraining learned policy to stay inside distribution, or with the support of dataset (Kumar et al., 2019); 2) Value Regularization: regularizing value function to assign low values at out-of-distribution (OOD) actions (Kumar et al., 2020b); 3) In-sample Learning: learning value function within data samples (Kostrikov et al., 2021b) or simply treating it as the value function of behavioral policy (Brandfonbrener et al., 2021). All three schools of methods share similar traits of being conservative and omitting evaluation on OOD data, which brings benefits of minimizing model exploitation error, but at the expense of poor generalization of learned policy in OOD regions. Thus, a gaping gap still exists when such methods are applied to real-world tasks, where most datasets only partially cover state-action space with suboptimal policies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.857, + 0.828, + 0.901 + ], + "angle": 0, + "content": "Meanwhile, online deep reinforcement learning (DRL) that leverages powerful deep neural network (DNN) with optimistic exploration on unseen samples can yield high-performing policies with promising generalization performance (Mnih et al., 2015; Silver et al., 2017; Degrave et al., 2022;" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.911, + 0.338, + 0.925 + ], + "angle": 0, + "content": "*Corresponding authors" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.201, + 0.101, + 0.34, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.101, + 0.493, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.101, + 0.647, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.101, + 0.798, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.22, + 0.825, + 0.246 + ], + "angle": 0, + "content": "Figure 1: Left: Visualization of AntMaze dataset. Data transitions of two small areas on the critical pathways to the destination have been removed (red box). Right: Performance of three SOTA offline RL methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.255, + 0.827, + 0.353 + ], + "angle": 0, + "content": "Packer et al., 2018). This staring contrast propels us to re-think the question: Are we being too conservative? It is well known that DNN has unparalleled approximation and generalization abilities, compared with other function approximators. These attractive abilities have not only led to huge success in computer vision and natural language processing (He et al., 2016; Vaswani et al., 2017), but also amplified the power of RL. Ideally, in order to obtain the best policy, an algorithm should enable offline policy learning on unseen state-action pairs that function approximators (e.g., \\(Q\\) function, policy network) can generalize well, and add penalization only on non-generalizable areas." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.359, + 0.825, + 0.485 + ], + "angle": 0, + "content": "However, existing offline RL methods heed too much conservatism on data-related regularizations, while largely overlooking the generalization ability of deep function approximators. Intuitively, let us consider the well-known AntMaze task in the D4RL benchmark (Fu et al., 2020), where an ant navigates from the start to the destination in a large maze. We observe that existing offline RL methods fail miserably when we remove only small areas of data on the critical pathways to the destination. As shown in Figure 1, the two missing areas reside in close proximity to the trajectory data. Simply \"stitching\" up existing trajectories as approximation is not sufficient to form a near-optimal policy at missing regions. Exploiting the generalizability of deep function approximators, however, can potentially compensate for the missing information." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.631 + ], + "angle": 0, + "content": "In our study, we observe that the value function approximated by DNN can interpolate well but struggles to extrapolate (see Section 2.2). Such an \"interpolate well\" phenomenon is also observed in previous studies on the generalization of DNN (Haley & Soloway, 1992; Barnard & Wessels, 1992; Arora et al., 2019a; Xu et al., 2020; Florence et al., 2022). This finding motivates us to reconsider the generalization of function approximators in offline RL in the context of dataset geometry. Along this line, we discover that a closer distance between a training sample to the offline dataset often leads to a smaller value variation range of the learned neural network, which effectively yields more accurate inference of the value function inside the convex hull (formed by the dataset). By contrast, outside the convex hull, especially in those areas far from the training data, the value variation range usually renders too large to guarantee a small approximation error." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.637, + 0.827, + 0.762 + ], + "angle": 0, + "content": "Inspired by this, we design a new algorithm DOGE (Distance-sensitive Offline RL with better Generalization) from the perspective of generalization performance of deep \\(Q\\) function. We first propose a state-conditioned distance function to characterize the geometry of offline datasets, whose output serves as a proxy to the network generalization ability. The resulting algorithm learns a state-conditioned distance function as a policy constraint on standard actor-critic RL framework. Theoretical analysis demonstrates the superior performance bound of our method compared to previous policy constraint methods that are based on data distribution or support constraints. Evaluations on D4RL benchmarks validate that our algorithm enjoys better performance and generalization abilities than state-of-the-art offline RL methods." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.783, + 0.562, + 0.8 + ], + "angle": 0, + "content": "2 DATA GEOMETRY VS. DEEP \\(Q\\) FUNCTIONS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.815, + 0.296, + 0.829 + ], + "angle": 0, + "content": "2.1 NOTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We consider the standard continuous action space Markov decision process (MDP) setting, which can be represented by a tuple \\((S, \\mathcal{A}, \\mathcal{P}, r, \\gamma)\\), where \\(S\\) and \\(\\mathcal{A}\\) are the state and action space, \\(\\mathcal{P}(s'|s, a)\\) is the transition dynamics, \\(r(s, a)\\) is a reward function, and \\(\\gamma \\in [0,1)\\) is a discount factor. The objective of the RL problem is to find a policy \\(\\pi(a|s)\\) that maximizes the expected cumulative discounted return, which can be represented by a \\(Q\\) function \\(Q_{\\theta}^{\\pi}(s, a) = \\mathbb{E}[\\sum_{t=0}^{\\infty} \\gamma^{t} r(s_{t}, a_{t}) | s_{0} = s, a_{0} = a, a_{t} \\sim \\pi(\\cdot | s_{t}), s_{t+1} \\sim \\mathcal{P}(\\cdot | s_{t}, a_{t})]\\). The \\(Q\\) function is typically approximated by function" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.103, + 0.817, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.213, + 0.825, + 0.241 + ], + "angle": 0, + "content": "Figure 2: Approximation error of deep \\( Q \\) functions with different dataset geometry. Offline data are marked as white dots (Please refer to Appendix E.5 for detailed experimental setup)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.255, + 0.827, + 0.298 + ], + "angle": 0, + "content": "approximators with learnable parameters \\(\\theta\\), such as deep neural networks. Under offline RL setting, we are only given a fixed dataset \\(\\mathcal{D}\\) and cannot interact further with the environment. Therefore, the parameters \\(\\theta\\) are optimized by minimizing the following temporal difference (TD) error:" + }, + { + "type": "equation", + "bbox": [ + 0.273, + 0.303, + 0.826, + 0.328 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\theta} \\mathbb {E} _ {(s, a, s ^ {\\prime}) \\in \\mathcal {D}} \\left[ \\left(r (s, a) + \\gamma \\mathbb {E} _ {a ^ {\\prime} \\sim \\pi (\\cdot | s ^ {\\prime})} \\left[ Q _ {\\theta^ {\\prime}} ^ {\\pi} \\left(s ^ {\\prime}, a ^ {\\prime}\\right) \\right]\\right) - Q _ {\\theta} ^ {\\pi} (s, a) \\right] ^ {2} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.332, + 0.734, + 0.349 + ], + "angle": 0, + "content": "where \\( Q_{\\theta'}^{\\pi} \\) is the target \\( Q \\) function, which is a delayed copy of the current \\( Q \\) network." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.364, + 0.45, + 0.377 + ], + "angle": 0, + "content": "2.2 INTERPOLATE VS. EXTRAPOLATE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.827, + 0.545 + ], + "angle": 0, + "content": "Motivating examples. Let's first consider a set of simple one-dimensional random walk tasks with different offline datasets, where agents at each step can take an action to move in the range of \\([-1, 1]\\), and the state space is a straight line ranging from \\([-10, 10]\\). The destination is located at \\(s = 10\\). The closer to the destination, the larger reward the agent gets (i.e., \\(r = 1\\) at \\(s = 10\\), \\(r = 0\\) at \\(s = -10\\)). The approximation errors of the learned \\(Q\\) functions are visualized in Figure 2. Note that the approximation errors of the learned \\(Q\\) functions tend to be low at state-action pairs that lie inside or near the boundaries of the convex hull formed by the dataset. Under continuous state-action space, state-action pairs within the convex hull of the dataset can be represented in an interpolated manner (referred as interpolated data), i.e., \\(x_{in} = \\sum_{i=1}^{n} \\alpha_{i} x_{i}\\), \\(\\sum_{i=1}^{n} \\alpha_{i} = 1\\), \\(\\alpha_{i} \\geq 0\\), \\(x_{i} = (s_{i}, a_{i}) \\in \\mathcal{D}\\); similarly, we can define the extrapolated data that lie outside the convex hull of the dataset as \\(x_{out} = \\sum_{i=1}^{n} \\beta_{i} x_{i}\\), where \\(\\sum_{i=1}^{n} \\beta_{i} = 1\\) and \\(\\beta_{i} \\geq 0\\) do not hold simultaneously." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.828, + 0.621 + ], + "angle": 0, + "content": "We observe that the geometry of the datasets play a special role on the approximation error of deep \\(Q\\) functions, or in other words, deep \\(Q\\) functions interpolate well but struggle to extrapolate. This phenomenon is also reflected in studies on the generalization performance of deep neural networks under a supervised learning setting (Haley & Soloway, 1992; Barnard & Wessels, 1992; Arora et al., 2019a; Xu et al., 2020; Florence et al., 2022), but is largely overlooked in modern offline RL." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.827, + 0.725 + ], + "angle": 0, + "content": "Theoretical explanations. Based on advanced theoretical machinery from the generalization analysis of DNN, such as neural tangent kernel (NTK) (Jacot et al., 2018), we can theoretically demonstrate that this phenomenon is also carried over to the offline RL setting for deep \\( Q \\) functions. Define \\( \\operatorname{Proj}_{\\mathcal{D}}(x) \\coloneqq \\arg \\min_{x_i \\in \\mathcal{D}} \\| x - x_i \\| \\) (we denote \\( \\| x \\| \\) as Euclidean norm) as the projection operator that projects unseen data \\( x \\) to the nearest data point in dataset \\( \\mathcal{D} \\). Theorem 1 gives a theoretical explanation of the \"interploate well\" phenomenon for deep \\( Q \\) functions under the NTK assumptions (see Appendix B.2 for detailed proofs):" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.728, + 0.826, + 0.772 + ], + "angle": 0, + "content": "Theorem 1. (Value difference of deep \\( Q \\) function for interpolated and extrapolated data). Under the NTK regime, given an unseen interpolated data \\( x_{in} \\) and an extrapolated data \\( x_{out} \\), then the value difference of deep \\( Q \\) function for interpolated and extrapolated input data can be bounded as:" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.777, + 0.825, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| Q _ {\\theta} (x _ {i n}) - Q _ {\\theta} (\\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n})) \\| \\leq C _ {1} (\\sqrt {\\min (\\| x _ {i n} \\| , \\| \\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n}) \\|)} \\sqrt {d _ {x _ {i n}}} + 2 d _ {x _ {i n}}) \\\\ \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\| x _ {i n} \\| , \\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right) \\|\\right)} \\sqrt {B} + 2 B\\right) \\tag {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.82, + 0.826, + 0.839 + ], + "angle": 0, + "content": "\\[\n\\| Q _ {\\theta} (x _ {o u t}) - Q _ {\\theta} (\\operatorname {P r o j} _ {\\mathcal {D}} (x _ {o u t})) \\| \\leq C _ {1} (\\sqrt {\\min (\\| x _ {o u t} \\| , \\| \\operatorname {P r o j} _ {\\mathcal {D}} (x _ {o u t}) \\|)} \\sqrt {d _ {x _ {o u t}}} + 2 d _ {x _ {o u t}}) \\quad (3)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.842, + 0.825, + 0.873 + ], + "angle": 0, + "content": "where \\( d_{x_{in}} = \\| x_{in} - \\mathrm{Proj}_{\\mathcal{D}}(x_{in})\\| \\leq \\max_{x_i\\in \\mathcal{D}}\\| x_{in} - x_i\\| \\leq B \\) and \\( d_{x_{out}} = \\| x_{out} - \\mathrm{Proj}_{\\mathcal{D}}(x_{out})\\| \\) are distances of \\( x_{in} \\) and \\( x_{out} \\) to the nearest data points in dataset \\( \\mathcal{D} \\). \\( B \\) and \\( C_1 \\) are finite constants." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Theorem 1 shows that given an unseen input \\( x \\), \\( Q_{\\theta}(x) \\) can be controlled by in-sample \\( Q \\) value \\( Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x)) \\) and the distance \\( \\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\| \\). The smaller the distance, the more controllable the output of deep \\( Q \\) functions. Therefore, because the distance to dataset is strictly bounded (at" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.162 + ], + "angle": 0, + "content": "most \\( B \\) for interpolated data), the approximated \\( Q \\) values at interpolated data as well as extrapolated data near the boundaries of the convex hull formed by the dataset cannot be too far off. Moreover, as \\( d_{x_{out}} \\) can take substantially larger values than \\( d_{x_{in}} \\), interpolated data generally enjoys a tighter bound compared with extrapolated data, if the dataset only narrowly covers a large state-action space." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.825, + 0.239 + ], + "angle": 0, + "content": "Empirical observations in Figure 2 and Theorem 1 both demonstrate that data geometry can induce different approximation error accumulation patterns for deep \\( Q \\) functions. While approximation error accumulation is generally detrimental to offline RL, a fine-grained analysis is missing in previous studies about where value function can approximate well. We argue that it is necessary to take data geometry into consideration when designing less conservative offline RL algorithms." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.257, + 0.579, + 0.273 + ], + "angle": 0, + "content": "3 GENERALIZABLE OFFLINE RL FRAMEWORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.827, + 0.348 + ], + "angle": 0, + "content": "In this section, we present our algorithm DOGE (Distance-sensitive Offline RL with better GEneralization). By introducing a specially designed state-conditioned distance function to characterize the geometry of offline datasets, we can construct a very simple, less conservative and also more generalizable offline RL algorithm upon standard actor-critic framework." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.361, + 0.522, + 0.377 + ], + "angle": 0, + "content": "3.1 STATE-CONDITIONED DISTANCE FUNCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.388, + 0.825, + 0.472 + ], + "angle": 0, + "content": "As revealed in Theorem 1, the sample-to-dataset distance plays an important role in measuring the controllability of \\( Q \\) values. However, given an arbitrary state-action sample \\( (s,a) \\), naively computing its distance to the closest data point in a large dataset can be costly and impractical. Ideally, we prefer to have a learnable distance function which also has the ability to reflect the overall dataset geometry. Based on this intuition, we design a state-conditioned distance function that can be learned in an elegantly simple supervised manner with desirable properties." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.478, + 0.827, + 0.521 + ], + "angle": 0, + "content": "Specifically, we learn the state-conditioned distance function \\( g(s,a) \\) by solving the following regression problem, with state-action pairs \\( (s,a)\\sim \\mathcal{D} \\) and synthetic noise actions sampled from the uniform distribution over the full action space \\( \\mathcal{A} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.526, + 0.826, + 0.55 + ], + "angle": 0, + "content": "\\[\n\\min _ {g} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} [ \\| a - \\hat {a} \\| - g (s, \\hat {a}) ] ^ {2} \\right] \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.557, + 0.825, + 0.614 + ], + "angle": 0, + "content": "In practical implementation, for each \\((s,a)\\sim \\mathcal{D}\\), we sample \\(N\\) noise actions uniformly in the action space \\(\\mathcal{A}\\) to train \\(g(\\cdot)\\). More implementation details can be found in Appendix E. Moreover, with the optimization objective defined in Eq. (4), we can show that the optimal state-conditioned distance function has two desirable properties (proofs can be found in Appendix C):" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.618, + 0.825, + 0.647 + ], + "angle": 0, + "content": "Property 1. The optimal state-conditioned distance function of Eq. (4) is convex w.r.t. actions and is an upper bound of the distance to the state-conditioned centroid \\( a_{o}(s) \\) of training dataset \\( \\mathcal{D} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.652, + 0.826, + 0.69 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} g ^ {*} (s, \\hat {a}) = \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) \\| \\hat {a} - a \\| ] \\\\ \\geq \\left\\| \\hat {a} - \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) \\cdot a ] \\right\\| = \\left\\| \\hat {a} - a _ {o} (s) \\right\\|, \\quad \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\tag {5} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.719, + 0.825, + 0.783 + ], + "angle": 0, + "content": "where \\(C(s,a) = \\frac{\\mu(s,a)}{\\mathbb{E}_{a\\sim Unif(\\mathcal{A})}\\mu(s,a)}\\geq 0\\), \\(\\mu (s,a)\\) is state-action distribution of dataset \\(\\mathcal{D}\\). Given a state \\(s\\in \\mathcal{D}\\), the state-conditioned centroid is defined as \\(a_{o}(s) = \\mathbb{E}_{a\\sim Unif(\\mathcal{A})}[C(s,a)\\cdot a]\\). Since \\(L_{2}\\)-norm is convex and the non-negative combination of convex functions is still convex, \\(g^{*}(s,\\hat{a})\\) is also a convex function w.r.t. \\(\\hat{a}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.787, + 0.827, + 0.817 + ], + "angle": 0, + "content": "Property 2. The negative gradient of the optimal state-conditioned distance function at an extrapolated action \\(\\hat{a}\\), \\(-\\nabla_{\\hat{a}}g^{*}(s,\\hat{a})\\), points inside the convex hull of the dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "From Property 1, we can see that the optimal state-conditioned distance function characterizes data geometry and outputs an upper bound of the distance to the state-conditioned centroid of the training dataset. Property 2 indicates that if we use the learned distance function as a policy constraint, it can drive the learned policy to move inside the convex hull of training data. We visualize the value of the trained state-conditioned distance function in Figure 3. It is clear that the learned distance function can accurately predict the sample-to-dataset centroid distance. By utilizing such distance function, we can constrain the policy based on the global geometric information of training datasets. This" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.105, + 0.422, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.249, + 0.368, + 0.262 + ], + "angle": 0, + "content": "(a) Illustration of \\(g^{*}(s,a)\\)" + }, + { + "type": "image", + "bbox": [ + 0.432, + 0.106, + 0.558, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.432, + 0.176, + 0.557, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.486, + 0.249, + 0.788, + 0.262 + ], + "angle": 0, + "content": "(b) Visualization of \\(g^{*}(s,a)\\) trained on diverse 2D datasets" + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.107, + 0.687, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.175, + 0.688, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.107, + 0.812, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.175, + 0.811, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.278, + 0.825, + 0.318 + ], + "angle": 0, + "content": "Figure 3: Illustration of the state-conditioned distance function. The output of the optimal distance function is the non-negative combination of the distances to all training data. \\( G \\) is the threshold in Eq. (6) In (b), Offline data are marked as white dots." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.331, + 0.825, + 0.388 + ], + "angle": 0, + "content": "desirable property is non-obtainable by simply constraining the policy based on sample-to-sample distance such as the MSE loss between policy generated and dataset actions, which can only bring local geometric information. Moreover, the learned distance function can not only predict well at in-distribution states but also generalize well at OOD states." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.404, + 0.638, + 0.418 + ], + "angle": 0, + "content": "3.2 DISTANCE-SENSITIVE OFFLINE REINFORCEMENT LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.826, + 0.487 + ], + "angle": 0, + "content": "Capturing the geometry of offline datasets, we now construct a minimalist distance-sensitive offline RL framework, by simply plugging the state-conditioned distance function as a policy constraint into standard online actor-critic methods (such as TD3 (Fujimoto et al., 2018) and SAC (Haarnoja et al., 2018)). This results in the following policy maximization objective:" + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.489, + 0.825, + 0.511 + ], + "angle": 0, + "content": "\\[\n\\pi = \\arg \\max _ {\\pi} \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ Q (s, a) ] \\quad s. t. \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ g (s, a) ] \\leq G \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.515, + 0.825, + 0.584 + ], + "angle": 0, + "content": "where \\(G\\) is a task-dependent threshold varying across tasks. In our method, we adopt a non-parametric treatment by setting \\(G\\) as the mean output (50% quantile) of the learned distance function on the training dataset, i.e., \\(\\mathbb{E}_{(s,a)\\sim \\mathcal{D}}[g(s,a)]\\), which is approximated over mini-batch samples to reduce computational complexity (see Appendix G for ablation on \\(G\\)). The constrained optimization problem in Eq. (6) can be reformulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.249, + 0.588, + 0.825, + 0.609 + ], + "angle": 0, + "content": "\\[\n\\pi = \\arg \\max _ {\\pi} \\min _ {\\lambda} \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ \\beta Q (s, a) - \\lambda (g (s, a) - G) ] \\quad s. t. \\lambda \\geq 0 \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.614, + 0.825, + 0.702 + ], + "angle": 0, + "content": "where \\(\\lambda\\) is the Lagrangian multiplier, which is auto-adjusted using dual gradient descent. Following TD3+BC (Fujimoto & Gu, 2021), \\(Q\\) values are rescaled by \\(\\beta = \\frac{\\alpha}{\\frac{1}{n}\\sum_{i=1}^{n}|Q(s_i,a_i)|}\\) to balance \\(Q\\) function maximization and policy constraint satisfaction, controlled by a hyperparameter \\(\\alpha\\). To reduce computations, the denominator of \\(\\beta\\) is approximated over mini-batch of samples. The resulting algorithm is easy to implement. In our experiments, we use TD3. Please refer to Appendix E for implementation details." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.718, + 0.616, + 0.731 + ], + "angle": 0, + "content": "3.3 RELAXATION WITH BELLMAN-CONSISTENT COEFFICIENT" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.743, + 0.722, + 0.757 + ], + "angle": 0, + "content": "3.3.1 BELLMAN-CONSISTENT COEFFICIENT AND CONSTRAINED POLICY SET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.767, + 0.825, + 0.838 + ], + "angle": 0, + "content": "The key difference between DOGE and other policy constraint methods lies in that DOGE relaxes the strong full coverage assumption1 on offline datasets and allows exploitation on generalizable OOD areas. To relax the unrealistic full-coverage assumption, we resort to a weaker condition proposed by (Xie et al., 2021a), the Bellman-consistent coefficient (Definition 1), to measure how well Bellman errors can transfer to different distributions (Theorem 2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.844, + 0.825, + 0.903 + ], + "angle": 0, + "content": "Denote \\( \\| f\\|_{2,\\mu}^2 \\coloneqq \\mathbb{E}_\\mu [\\| f\\|^2] \\); \\( \\mathcal{T}^\\pi Q \\) is the Bellman operator of policy \\( \\pi \\), defined as \\( \\mathcal{T}^\\pi Q(s,a) \\coloneqq r(s,a) + \\gamma \\mathbb{E}_{a'\\sim \\pi (\\cdot |s'),s'\\sim \\mathcal{P}(\\cdot |s,a)}[Q(s',a')] \\coloneqq r(s,a) + \\gamma \\mathbb{P}^\\pi [Q(s',a')] \\). \\( \\mathbb{P}^\\pi [\\cdot ] \\) is the brief notation for \\( \\mathbb{E}_{a'\\sim \\pi (\\cdot |s'),s'\\sim \\mathcal{P}(\\cdot |s,a)}[\\cdot ] \\). \\( \\mathcal{F} \\) is the function class of \\( Q \\) networks. The Bellman-consistent coefficient is defined as:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.909, + 0.824, + 0.929 + ], + "angle": 0, + "content": "\\(\\sup_{s,a}\\frac{v(s,a)}{\\mu(s,a)} < \\infty, v\\) and \\(\\mu\\) are marginal distributions of the learned policy and the dataset (Le et al., 2019)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Definition 1. (Bellman-consistent coefficient). We define \\(\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)\\) to measure the distributional shift from an arbitrary distribution \\(v\\) to data distribution \\(\\mu\\), w.r.t. \\(\\mathcal{F}\\) and \\(\\pi\\)," + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.135, + 0.826, + 0.172 + ], + "angle": 0, + "content": "\\[\n\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) := \\sup _ {Q \\in \\mathcal {F}} \\frac {\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , v} ^ {2}}{\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , \\mu} ^ {2}} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.188, + 0.825, + 0.258 + ], + "angle": 0, + "content": "This definition captures the generalization performance of function approximation across different distributions. Intuitively, a small value of \\(\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)\\) means Bellman errors for policy \\(\\pi\\) can accurately transfer from distribution \\(\\mu\\) to \\(v\\). This suggests that Bellman errors can transfer well between two distributions even if a large discrepancy exists, as long as the Bellman-consistent coefficient is small." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.265, + 0.8, + 0.28 + ], + "angle": 0, + "content": "Based on Definition 1, we introduce the definition of Bellman-consistent constrained policy set." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.826, + 0.325 + ], + "angle": 0, + "content": "Definition 2. (Bellman-consistent constrained policy set). We define the Bellman-consistent constrained policy set as \\(\\Pi_{\\mathcal{B}}\\). The Bellman-consistent coefficient under the transition induced by \\(\\Pi_{\\mathcal{B}}\\) can be bounded by some finite constants \\(l(k)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.424, + 0.327, + 0.826, + 0.343 + ], + "angle": 0, + "content": "\\[\n\\mathcal {B} \\left(\\rho_ {k}, \\mu , \\mathcal {F}, \\pi\\right) \\leq l (k) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.345, + 0.825, + 0.375 + ], + "angle": 0, + "content": "where \\(\\rho_{k} = \\rho_{0}P^{\\pi_{1}}\\ldots P^{\\pi_{k}},\\forall \\pi_{1},\\ldots ,\\pi_{k}\\in \\Pi_{\\mathcal{B}},\\rho_{0}\\) is the initial state-action distribution and \\(P^{\\pi_i}\\) is the transition operator induced by \\(\\pi_{i}\\), i.e., \\(P^{\\pi_i}(s',a'|s,a) = \\mathcal{P}(s'|s,a)\\pi_i(a'|s')\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.383, + 0.827, + 0.427 + ], + "angle": 0, + "content": "We denote the constrained Bellman operator induced by \\(\\Pi_{\\mathcal{B}}\\) as \\(\\mathcal{T}^{\\Pi_B}\\), \\(\\mathcal{T}^{\\Pi_B}Q(s,a) := r(s,a) + \\max_{\\pi \\in \\Pi_B}\\gamma \\mathbb{P}^\\pi [Q(s',a')]\\). \\(\\mathcal{T}^{\\Pi_B}\\) can be seen as a Bellman operator on a redefined MDP, thus theoretical results of MDP also carry over, such as contraction mapping and existence of a fixed point." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.44, + 0.769, + 0.455 + ], + "angle": 0, + "content": "3.3.2 BELLMAN CONSISTENT COEFFICIENT AND PERFORMANCE BOUND OF DOGE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.464, + 0.825, + 0.507 + ], + "angle": 0, + "content": "We show that the policy set induced by DOGE is essentially a Bellman-consistent policy set defined in Definition 2. Meanwhile, the distance constraint in DOGE can produce a small value of \\(\\mathcal{B}\\) and hence guarantee the learned policy deviates only to those generalizable areas." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.509, + 0.825, + 0.539 + ], + "angle": 0, + "content": "Theorem 2. (Upper bound of Bellman-consistent coefficient). Under the NTK assumption, the Bellman-consistent coefficient \\(\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)\\) is upper bounded as:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.541, + 0.825, + 0.615 + ], + "angle": 0, + "content": "\\[\n\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) \\leq \\frac {1}{\\epsilon_ {\\mu}} \\left\\| \\underbrace {(1 - \\gamma) Q \\left(s _ {o} , a _ {o}\\right) + R _ {\\max }} _ {\\mathcal {B} _ {1}} + \\underbrace {C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + d _ {1}\\right)} _ {\\mathcal {B} _ {2}} + \\underbrace {(2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + d _ {2}\\right)} _ {\\mathcal {B} _ {3}} \\right\\| _ {2, v} ^ {2} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.614, + 0.826, + 0.677 + ], + "angle": 0, + "content": "where we denote \\( x = (s, a) \\) and \\( x' = (s', a') \\). \\( x_o = \\mathbb{E}_{x \\sim \\mathcal{D}}[x] \\) is the centroid of offline dataset. \\( d_1 = \\| x - x_o \\| \\) and \\( d_2 = \\| x' - x_o \\| \\) are the sample-to-centroid distances. \\( C_2 = \\sqrt{\\sup_{x \\in S \\times \\mathcal{A}} \\| x \\|} \\) is related to the upper bound of the input scale. \\( \\epsilon_\\mu \\) is the lower bound of Bellman error (square) for \\( \\pi \\) under distribution \\( \\mu \\), i.e., \\( \\epsilon_\\mu \\leq \\| Q - T^\\pi Q \\|_{2,\\mu}^2 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.827, + 0.801 + ], + "angle": 0, + "content": "The RHS of Eq. (10) contains four parts: \\(\\frac{1}{\\epsilon_{\\mu}}\\), \\(\\mathcal{B}_1\\), \\(\\mathcal{B}_2\\) and \\(\\mathcal{B}_3\\). It is reasonable to assume \\(\\epsilon_{\\mu} > 0\\), because of the approximation error of \\(Q\\) networks and the distribution mismatch between \\(\\mu\\) and \\(\\pi\\). \\(\\mathcal{B}_1\\) is only dependent on the \\(Q\\) value \\(Q(s_o, a_o)\\) at the centroid of the dataset and the max reward \\(R_{\\mathrm{max}}\\). \\(\\mathcal{B}_2\\) is related to distance \\(d_1\\) and distribution \\(v\\). \\(\\mathcal{B}_3\\) is related to \\(d_2\\), \\(v\\) and \\(\\mathbb{P}^{\\pi}\\). To be mentioned, the distance regularization in DOGE compels the learned policy to output the action that is near the state-conditioned centroid of dataset, thus \\(\\mathcal{B}_2\\) and \\(\\mathcal{B}_3\\) can be driven to small values. Therefore, the RHS of Eq. (10) can be bounded by finite constants under DOGE, which shows that the constrained policy set induced by DOGE is essentially a Bellman-consistent constrained policy set." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.807, + 0.825, + 0.836 + ], + "angle": 0, + "content": "Then, the performance gap between the policy learned by DOGE and the optimal policy can be bounded as given in Theorem 3. See Appendix D.1 and D.2 for the proof of Theorem 2 and 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.837, + 0.827, + 0.881 + ], + "angle": 0, + "content": "Theorem 3. (Performance bound of the learned policy by DOGE). Let \\( Q^{\\Pi_{\\mathcal{B}}} \\) be the fixed point of \\( \\mathcal{T}^{\\Pi_{\\mathcal{B}}} \\), i.e., \\( Q^{\\Pi_{\\mathcal{B}}} = \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}} \\), and \\( \\epsilon_k = Q^k - \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{k-1} \\) is the Bellman error at the \\( k \\)-th iteration. \\( \\| f \\|_{\\mu} := \\mathbb{E}_{\\mu}[\\| f \\|] \\). The performance of the learned policy \\( \\pi_n \\) is bounded by:" + }, + { + "type": "equation", + "bbox": [ + 0.265, + 0.884, + 0.826, + 0.918 + ], + "angle": 0, + "content": "\\[\n\\lim _ {n \\rightarrow \\infty} \\| Q ^ {*} - Q ^ {\\pi_ {n}} \\| _ {\\rho_ {0}} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\left[ L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} + \\frac {1 - \\gamma}{2 \\gamma} \\alpha \\left(\\Pi_ {\\mathcal {B}}\\right)\\right] \\tag {11}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.103, + 0.825, + 0.162 + ], + "angle": 0, + "content": "where \\( L(\\Pi_{\\mathcal{B}}) = \\sqrt{(1 - \\gamma)^2 \\sum_{k=1}^{\\infty} k \\gamma^{k-1} l(k)} \\), which is similar to the concentrability coefficient in BEAR (Kumar et al., 2019) but in a different form. Note that \\( l(k) \\) is related to the RHS of Eq. (10) and can be driven to a small value by DOGE according to Theorem 2. \\( \\alpha(\\Pi_{\\mathcal{B}}) = \\| \\mathcal{T}^{\\Pi_{\\mathcal{B}}} Q^{\\Pi_{\\mathcal{B}}} - \\mathcal{T} Q^{*} \\|_{\\infty} \\) is the suboptimality constant, which is similar to \\( \\alpha(\\Pi) = \\| \\mathcal{T}^{\\Pi} Q^{\\Pi} - \\mathcal{T} Q^{*} \\|_{\\infty} \\) in BEAR." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.827, + 0.286 + ], + "angle": 0, + "content": "Compared with BEAR, DOGE allows a policy shift to some generalizable OOD areas and relaxes the strong full-coverage assumption. In addition, we have \\( L(\\Pi_{\\mathcal{B}}) \\leq L(\\Pi) \\propto \\frac{\\rho_0 P^{\\pi_1} \\dots P^{\\pi_k}}{\\mu(s, a)} \\), where \\( L(\\Pi) \\) is the concentrability coefficient in BEAR. This is evident when \\( \\mu(s, a) = 0 \\) and \\( \\rho_0 P^{\\pi_1} \\dots P^{\\pi_k}(s, a) > 0 \\), \\( L(\\Pi_{\\mathcal{B}}) \\) can be bounded by finite constants but \\( L(\\Pi) \\to \\infty \\). Moreover, as \\( \\Pi_{\\mathcal{B}} \\) extends the policy set to cover more generalizable OOD areas (\\( \\Pi \\subseteq \\Pi_{\\mathcal{B}} \\)) and produces a larger feasible region for optimization, lower degree of suboptimality can be achieved (i.e., \\( \\alpha(\\Pi_{\\mathcal{B}}) \\leq \\alpha(\\Pi) \\)) compared to only performing optimization on \\( \\Pi \\). Therefore, we can see that DOGE enjoys a tighter performance bound than previous more conservative methods when allowed to exploit generalizable OOD areas." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.314, + 0.33, + 0.33 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.351, + 0.825, + 0.424 + ], + "angle": 0, + "content": "For evaluation, We compare DOGE and prior offline RL methods over D4RL Mujoco and AntMaze tasks (Fu et al., 2020). Mujoco is a standard benchmark commonly used in previous work. AntMaze tasks are far more challenging due to the non-markovian and mixed-quality offline dataset, the stochastic property of environments, and the high dimensional state-action space. Implementation details, experimental setup and additional experimental results can be found in Appendix E and F." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.448, + 0.402, + 0.463 + ], + "angle": 0, + "content": "4.1 COMPARISON WITH SOTA" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.478, + 0.827, + 0.577 + ], + "angle": 0, + "content": "We compare DOGE with model-free SOTA methods, such as TD3+BC (Fujimoto & Gu, 2021), CQL (Kumar et al., 2020b) and IQL (Kostrikov et al., 2021b). For fairness, we use the “-v2” datasets for all methods. For most Mujoco tasks, we report the scores from the IQL paper. We obtain the other results using the authors' or our implementations. For AntMaze tasks, we obtain the results of CQL, TD3+BC, and IQL using the authors' implementations. For BC (Pomerleau, 1988), BCQ (Fujimoto et al., 2019) and BEAR (Kumar et al., 2019), we report the scores from (Fu et al., 2020). All methods are evaluated over the final 10 evaluations for Mujoco tasks and 100 for AntMaze tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.583, + 0.827, + 0.683 + ], + "angle": 0, + "content": "Table 1 shows that DOGE achieves comparable or better performance than SOTA methods on most Mujoco and AntMaze tasks. Compared to other policy constraint approaches such as BCQ, BEAR and TD3+BC, DOGE is the first policy constraint method to successfully solve AntMaze-medium and AntMaze-large tasks. Note that IQL is an algorithm designed for multi-step dynamics programming and attains strong advantage on AntMaze tasks. Nevertheless, DOGE can compete with or even surpass IQL on most AntMaze tasks, by only employing a generalization-oriented policy constraint. These results illustrate the benefits of allowing policy learning on generalizable OOD areas." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.707, + 0.462, + 0.721 + ], + "angle": 0, + "content": "4.2 EVALUATION ON GENERALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.736, + 0.825, + 0.807 + ], + "angle": 0, + "content": "To evaluate the generalization ability of DOGE, we remove small areas of data from the critical pathways to the destination in AntMaze medium and large tasks, to construct an OOD dataset. The two removed areas reside in close proximity to the trajectory data (see Figure 1). We evaluate representative methods (such as TD3+BC, CQL, IQL) and DOGE on these modified datasets. Figure 4 shows the comparison before and after data removal." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.813, + 0.827, + 0.926 + ], + "angle": 0, + "content": "For such a dataset with partial state-action space coverage, existing policy constraint methods tend to over-constrain the policy to stay inside the support of a dataset, where the optimal policy is not well-covered. Value regularization methods suffer from deteriorated generalization performance, as the value function is distorted to assign low value at all OOD areas. In-sample learning methods are only guaranteed to retain the best policy within the partially covered dataset (Kostrikov et al., 2021b). As shown in Figure 4, all these methods struggle to generalize well on the missing areas and suffer severe performance drop, while DOGE maintains competitive performance. This further demonstrates the benefits of relaxing over-conservatism in existing methods." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.219, + 0.102, + 0.779, + 0.117 + ], + "angle": 0, + "content": "Table 1: Average normalized scores and standard deviations over 5 seeds on benchmark tasks" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.127, + 0.825, + 0.43 + ], + "angle": 0, + "content": "
DatasetBCBCQBEARTD3+BCCQLIQLDOGE(ours)
hopper-r4.97.114.28.5±0.68.3±0.27.9±0.421.1±12.6
halfcheetah-r0.28.815.111.0±1.120.0±0.411.2±2.917.8±1.2
walker2d-r1.76.510.71.6±1.78.3±0.15.9±0.50.9 ± 2.4
hopper-m52.956.751.959.3±4.258.5±2.166.2±5.798.6±2.1
halfcheetah-m42.647.041.048.3±0.344.0±5.447.4±0.245.3±0.6
walker2d-m75.372.680.983.7±2.172.5±0.878.3±8.786.8±0.8
hopper-m-r18.153.337.360.9±18.895.0±6.494.7±8.676.2±17.7
halfcheetah-m-r36.640.429.744.6±0.545.5±0.544.2±1.242.8±0.6
walker2d-m-r26.052.118.581.8±5.577.2±5.573.8±7.187.3±2.3
hopper-m-e52.581.817.798.0±9.4105.4±6.891.5±14.3102.7±5.2
halfcheetah-m-e55.289.138.990.7±4.391.6±2.886.7±5.378.7±8.4
walker2d-m-e107.5109.595.4110.1±0.5108.8±0.7109.6±1.0110.4±1.5
locomation total473.5624.9451.3698.5±49.0726.1±31.7717.4±55.9768.6±55.4
antmaze-u65.078.973.091.3±5.784.8±2.388.2±1.997.0±1.8
antmaze-u-d55.055.061.054.6±16.243.3±5.466.7±4.063.5±9.3
antmaze-m-p0.00.00.00.065.2±4.870.4±5.380.6±6.5
antmaze-m-d0.00.08.00.054.0±11.774.6±3.277.6±6.1
antmaze-l-p0.06.70.00.018.8±15.343.5±4.548.2±8.1
antmaze-l-d0.02.20.00.031.6±9.545.6±7.636.4±9.1
antmaze-total120.0142.8142.0145.9±21.9297.7±49.0389.0±26.5403.3±40.9
" + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.445, + 0.314, + 0.455 + ], + "angle": 0, + "content": "Policy constraint" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.464, + 0.333, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.365, + 0.445, + 0.482, + 0.456 + ], + "angle": 0, + "content": "Value regularization" + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.464, + 0.495, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.533, + 0.445, + 0.643, + 0.456 + ], + "angle": 0, + "content": "In-sample learning" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.464, + 0.657, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.711, + 0.445, + 0.786, + 0.456 + ], + "angle": 0, + "content": "DOGE (Ours)" + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.464, + 0.82, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.561, + 0.334, + 0.652 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.561, + 0.497, + 0.651 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.561, + 0.659, + 0.651 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.561, + 0.822, + 0.652 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.666, + 0.825, + 0.694 + ], + "angle": 0, + "content": "Figure 4: Generalization performance after removing data from AntMaze large tasks (see Appendix F.1 for detailed setup and additional results on AntMaze medium tasks)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.751, + 0.342, + 0.765 + ], + "angle": 0, + "content": "4.3 ABLATION STUDY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.827, + 0.849 + ], + "angle": 0, + "content": "We conduct ablation studies to evaluate the impact of the hyperparameter \\(\\alpha\\), the non-parametric distance threshold \\(G\\) in Eq. (6), and the number of noise actions \\(N\\) used to train the distance function. For \\(\\alpha\\), we add or subtract 2.5 to the original value; for \\(G\\), we choose \\(30\\%\\), \\(50\\%\\), \\(70\\%\\) and \\(90\\%\\) upper quantile of the distance values in mini-batch samples; for \\(N\\), we choose \\(N = 10, 20, 30\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Compared to \\( N \\) and \\( \\alpha \\), we find that \\( G \\) has a more significant impact on the performance. Figure 5b shows that an overly restrictive \\( G \\) (30% quantile) results in a policy set too small to cover near-optimal policies. A more tolerant \\( G \\), on the other hand, is unlikely to cause excessive error accumulation and achieves relatively good performance. In addition, Figure 5a and Figure 5c show that performance is stable across variations of hyperparameters, indicating that our method is hyperparameter-robust." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.114, + 0.382, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.188, + 0.238, + 0.373, + 0.251 + ], + "angle": 0, + "content": "(a) \\(\\alpha\\) has little effect on results" + }, + { + "type": "image", + "bbox": [ + 0.392, + 0.114, + 0.6, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.238, + 0.596, + 0.251 + ], + "angle": 0, + "content": "(b) Small \\(G\\) is harmful to results" + }, + { + "type": "image", + "bbox": [ + 0.612, + 0.114, + 0.818, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.624, + 0.238, + 0.81, + 0.251 + ], + "angle": 0, + "content": "(c) \\(N\\) has little effect on results" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.263, + 0.825, + 0.29 + ], + "angle": 0, + "content": "Figure 5: Ablation results. The default parameters in our implementation are marked by \\(*\\). The error bars indicate min and max over 5 seeds. See Appendix G for more detailed ablation studies." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.302, + 0.346, + 0.318 + ], + "angle": 0, + "content": "5 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.342, + 0.828, + 0.538 + ], + "angle": 0, + "content": "To prevent distributional shift and exploitation error accumulation when inferring the value function at unseen samples, a direct approach is to restrict policy learning from deviating to OOD areas. To make sure the leaned policy stays inside the distribution or support of training data, These policy constraint methods either carefully parameterize the learned policy (Fujimoto et al., 2019; Matsushima et al., 2020), or use explicit divergence penalties (Kumar et al., 2019; Wu et al., 2019; Fujimoto & Gu, 2021; Xu et al., 2021; Dadashi et al., 2021) or implicit divergence constraints (Peng et al., 2019; Nair et al., 2020; Xu et al., 2022a). The theories behind these methods typically assume full state-action space coverage of the offline datasets(Le et al., 2019; Kumar et al., 2019). However, policy constraint under full-coverage assumption is unrealistic in most real-world settings, especially on datasets with partial coverage and only sub-optimal behavior policies. Some recent works try to relax the full-coverage assumption to partial coverage by introducing different distribution divergence metrics, but only in theoretical analysis (Liu et al., 2020; Zanette et al., 2021; Xie et al., 2021b; Uehara & Sun, 2021; Xie et al., 2021a). Our method is an enhanced policy constraint method, where we relax the full-coverage assumption and allow the policy to learn on OOD areas where networks can generalize well." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.543, + 0.828, + 0.698 + ], + "angle": 0, + "content": "Another type of offline RL method, value regularization (Kumar et al., 2020b; Kostrikov et al., 2021a; Yu et al., 2021; Xu et al., 2022b; 2023), directly penalizes the value function to produce low values at OOD actions. In-sample learning methods (Brandfonbrener et al., 2021; Kostrikov et al., 2021b), on the other hand, only learn the value function within data or treat it as the value function of the behavior policy. Compared with our approach, these methods exercise too much conservatism, which limits the generalization performance of deep neural networks on OOD regions, largely weakening the ability of dynamic programming. There are also uncertainty-based and model-based methods that regularize the value function or policy with epistemic uncertainty estimated from model or value function (Janner et al., 2019; Yu et al., 2020; Uehara & Sun, 2021; Wu et al., 2021; Zhan et al., 2022; Bai et al., 2021). However, the estimation of the epistemic uncertainty of DNN is still an under-explored area, with results highly dependent on evaluation methods and the structure of DNN." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.732, + 0.321, + 0.746 + ], + "angle": 0, + "content": "6 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.827, + 0.925 + ], + "angle": 0, + "content": "In this study, we provide new insights on the relationship between approximation error of deep \\( Q \\) functions and geometry of offline datasets. Through empirical and theoretical analysis, we find that deep \\( Q \\) functions attain relatively low approximation error when interpolating rather than extrapolating the dataset. This phenomenon motivates us to design a new algorithm, DOGE, to empower policy learning on OOD samples within the convex hull of training data. DOGE is simple yet elegant, by plugging a dataset geometry-derived distance constraint into TD3. With such a minimal surgery, DOGE outperforms existing model-free offline RL methods on most D4RL tasks. We theoretically prove that DOGE enjoys a tighter performance bound compared with existing policy constraint methods under the more realistic partial-coverage assumption. Empirical results and theoretical analysis suggest the necessity of re-thinking the conservatism principle in offline RL algorithm design, and points to sufficient exploitation of the generalization ability of deep \\( Q \\) functions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.105, + 0.33, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.128, + 0.828, + 0.2 + ], + "angle": 0, + "content": "This work is supported by National Key Research and Development Program of China under Grant (2022YFB2502904). This work is also supported by Baidu Inc. through Apollo-AIR Joint Research Center. The authors would also like to thank the anonymous reviewers for their feedback on the manuscripts. Jianxiong Li would like to thank Zhixu Du, Yimu Wang, Li Jiang, Haoyi Niu, Hao Zhao and all colleagues in AIR-Dream group for valuable discussions." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.219, + 0.287, + 0.234 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.242, + 0.829, + 0.286 + ], + "angle": 0, + "content": "Zeyuan Allen-Zhu, Yuanzhi Li, and Yingyu Liang. Learning and generalization in overparameterized neural networks, going beyond two layers. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.293, + 0.826, + 0.338 + ], + "angle": 0, + "content": "Gaon An, Seungyong Moon, Jang-Hyun Kim, and Hyun Oh Song. Uncertainty-based offline reinforcement learning with diversified q-ensemble. Advances in neural information processing systems, 34:7436-7447, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.345, + 0.827, + 0.39 + ], + "angle": 0, + "content": "Anonymous. Lightweight uncertainty for offline reinforcement learning via bayesian posterior. In Submitted to The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=55Eet8WGJTv. under review." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.396, + 0.827, + 0.441 + ], + "angle": 0, + "content": "Sanjeev Arora, Simon Du, Wei Hu, Zhiyuan Li, and Ruosong Wang. Fine-grained analysis of optimization and generalization for overparameterized two-layer neural networks. In International Conference on Machine Learning, pp. 322-332. PMLR, 2019a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.448, + 0.826, + 0.492 + ], + "angle": 0, + "content": "Sanjeev Arora, Simon S Du, Wei Hu, Zhiyuan Li, Russ R Salakhutdinov, and Ruosong Wang. On exact computation with an infinitely wide neural net. Advances in Neural Information Processing Systems, 32, 2019b." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.499, + 0.827, + 0.544 + ], + "angle": 0, + "content": "Chenjia Bai, Lingxiao Wang, Zhuoran Yang, Zhi-Hong Deng, Animesh Garg, Peng Liu, and Zhao ran Wang. Pessimistic bootstrapping for uncertainty-driven offline reinforcement learning. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.551, + 0.827, + 0.581 + ], + "angle": 0, + "content": "Etienne Barnard and LFA Wessels. Extrapolation and interpolation in neural network classifiers. IEEE Control Systems Magazine, 12(5):50-53, 1992." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.589, + 0.825, + 0.62 + ], + "angle": 0, + "content": "Peter L Bartlett and Shahar Mendelson. Rademacher and gaussian complexities: Risk bounds and structural results. Journal of Machine Learning Research, 3(Nov):463-482, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.626, + 0.827, + 0.657 + ], + "angle": 0, + "content": "Alberto Bietti and Julien Mairal. On the inductive bias of neural tangent kernels. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.665, + 0.825, + 0.695 + ], + "angle": 0, + "content": "David Brandfonbrener, Will Whitney, Rajesh Ranganath, and Joan Bruna. Offline rl without off-policy evaluation. Advances in Neural Information Processing Systems, 34:4933-4946, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.702, + 0.825, + 0.734 + ], + "angle": 0, + "content": "Qi Cai, Zhuoran Yang, Jason D Lee, and Zhaoran Wang. Neural temporal-difference learning converges to global optima. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.74, + 0.825, + 0.784 + ], + "angle": 0, + "content": "Robert Dadashi, Shideh RezaEIFar, Nino Vieillard, LEOnard Hussenot, Olivier Pietquin, and Matthieu Geist. Offline reinforcement learning with pseudometric learning. In International Conference on Machine Learning, pp. 2307-2318. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.792, + 0.827, + 0.837 + ], + "angle": 0, + "content": "Jonas Degrave, Federico Felici, Jonas Buchli, Michael Neunert, Brendan Tracey, Francesco Carpanese, Timo Ewalds, Roland Hafner, Abbas Abdelmaleki, Diego de Las Casas, et al. Magnetic control of tokamak plasmas through deep reinforcement learning. Nature, 602(7897):414-419, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.844, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Jianqing Fan, Zhaoran Wang, Yuchen Xie, and Zhuoran Yang. A theoretical analysis of deep q-learning. In Learning for Dynamics and Control, pp. 486-489. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Pete Florence, Corey Lynch, Andy Zeng, Oscar A Ramirez, Ayzaan Wahid, Laura Downs, Adrian Wong, Johnny Lee, Igor Mordatch, and Jonathan Thompson. Implicit behavioral cloning. In Conference on Robot Learning, pp. 158-168. PMLR, 2022." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.242, + 0.829, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4rl: Datasets for deep data-driven reinforcement learning. arXiv preprint arXiv:2004.07219, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.827, + 0.172 + ], + "angle": 0, + "content": "Scott Fujimoto and Shixiang Shane Gu. A minimalist approach to offline reinforcement learning. Advances in Neural Information Processing Systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.179, + 0.827, + 0.21 + ], + "angle": 0, + "content": "Scott Fujimoto, Herke Hoof, and David Meger. Addressing function approximation error in actor-critic methods. In International conference on machine learning, pp. 1587-1596. PMLR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.217, + 0.825, + 0.248 + ], + "angle": 0, + "content": "Scott Fujimoto, David Meger, and Doina Precup. Off-policy deep reinforcement learning without exploration. In International Conference on Machine Learning, pp. 2052-2062. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.255, + 0.826, + 0.285 + ], + "angle": 0, + "content": "Seyed Kamyar Seyed Ghasemipour, Shixiang Shane Gu, and Ofir Nachum. Why so pessimistic? estimating uncertainties for offline rl through ensembles, and why their independence matters." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.293, + 0.825, + 0.336 + ], + "angle": 0, + "content": "Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In International conference on machine learning, pp. 1861-1870. PMLR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.344, + 0.827, + 0.388 + ], + "angle": 0, + "content": "Pamela J Haley and DONALD Soloway. Extrapolation limitations of multilayer feedforward neural networks. In [Proceedings 1992] IJCNN International Joint Conference on Neural Networks, volume 4, pp. 25-30. IEEE, 1992." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.396, + 0.827, + 0.44 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.447, + 0.826, + 0.478 + ], + "angle": 0, + "content": "Arthur Jacot, Franck Gabriel, and Clément Hongler. Neural tangent kernel: Convergence and generalization in neural networks. Advances in neural information processing systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.485, + 0.825, + 0.516 + ], + "angle": 0, + "content": "Michael Janner, Justin Fu, Marvin Zhang, and Sergey Levine. When to trust your model: Model-based policy optimization. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.524, + 0.827, + 0.567 + ], + "angle": 0, + "content": "Ilya Kostrikov, Rob Fergus, Jonathan Tompson, and Ofir Nachum. Offline reinforcement learning with fisher divergence critic regularization. In International Conference on Machine Learning, pp. 5774-5783. PMLR, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.575, + 0.825, + 0.606 + ], + "angle": 0, + "content": "Ilya Kostrikov, Ashvin Nair, and Sergey Levine. Offline reinforcement learning with implicit q-learning. In International Conference on Learning Representations, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.613, + 0.827, + 0.655 + ], + "angle": 0, + "content": "Aviral Kumar, Justin Fu, Matthew Soh, George Tucker, and Sergey Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.665, + 0.825, + 0.708 + ], + "angle": 0, + "content": "Aviral Kumar, Rishabh Agarwal, Dibya Ghosh, and Sergey Levine. Implicit under-parameterization inhibits data-efficient deep reinforcement learning. In International Conference on Learning Representations, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.716, + 0.827, + 0.758 + ], + "angle": 0, + "content": "Aviral Kumar, Aurick Zhou, George Tucker, and Sergey Levine. Conservative q-learning for offline reinforcement learning. Advances in Neural Information Processing Systems, 33:1179-1191, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.768, + 0.827, + 0.798 + ], + "angle": 0, + "content": "Hoang Le, Cameron Voloshin, and Yisong Yue. Batch policy learning under constraints. In International Conference on Machine Learning, pp. 3703-3712. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.806, + 0.827, + 0.837 + ], + "angle": 0, + "content": "Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.844, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Boyi Liu, Qi Cai, Zhuoran Yang, and Zhaoran Wang. Neural trust region/proximal policy optimization attains globally optimal policy. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Yao Liu, Adith Swaminathan, Alekh Agarwal, and Emma Brunskill. Provably good batch off-policy reinforcement learning without great exploration. Advances in Neural Information Processing Systems, 33:1264-1274, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Tatsuya Matsushima, Hiroki Furuta, Yutaka Matsuo, Ofir Nachum, and Shixiang Gu. Deployment-efficient reinforcement learning via model-based offline optimization. In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.827, + 0.199 + ], + "angle": 0, + "content": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al. Human-level control through deep reinforcement learning. nature, 518(7540):529-533, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.207, + 0.825, + 0.236 + ], + "angle": 0, + "content": "Ashvin Nair, Murtaza Dalal, Abhishek Gupta, and Sergey Levine. Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.244, + 0.827, + 0.274 + ], + "angle": 0, + "content": "Charles Packer, Katelyn Gao, Jernej Kos, Philipp Krahenbuhl, Vladlen Koltun, and Dawn Song. Assessing generalization in deep reinforcement learning. arXiv preprint arXiv:1810.12282, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.282, + 0.827, + 0.311 + ], + "angle": 0, + "content": "Xue Bin Peng, Aviral Kumar, Grace Zhang, and Sergey Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.32, + 0.825, + 0.349 + ], + "angle": 0, + "content": "Dean A Pomerleau. Alvinn: An autonomous land vehicle in a neural network. Advances in neural information processing systems, 1, 1988." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.357, + 0.827, + 0.401 + ], + "angle": 0, + "content": "David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al. Mastering the game of go without human knowledge. nature, 550(7676):354-359, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.409, + 0.825, + 0.438 + ], + "angle": 0, + "content": "Masatoshi Uehara and Wen Sun. Pessimistic model-based offline reinforcement learning under partial coverage. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.446, + 0.827, + 0.488 + ], + "angle": 0, + "content": "Hado Van Hasselt, Yotam Doron, Florian Strub, Matteo Hessel, Nicolas Sonnerat, and Joseph Modayil. Deep reinforcement learning and the deadly triad. arXiv preprint arXiv:1812.02648, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.497, + 0.827, + 0.527 + ], + "angle": 0, + "content": "Vladimir N Vapnik and A Ya Chervonenkis. On the uniform convergence of relative frequencies of events to their probabilities. In Measures of complexity, pp. 11-30. Springer, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.535, + 0.825, + 0.578 + ], + "angle": 0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.587, + 0.827, + 0.616 + ], + "angle": 0, + "content": "Yifan Wu, George Tucker, and Ofir Nachum. Behavior regularized offline reinforcement learning. arXiv preprint arXiv:1911.11361, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.624, + 0.827, + 0.668 + ], + "angle": 0, + "content": "Yue Wu, Shuangfei Zhai, Nitish Srivastava, Joshua M Susskind, Jian Zhang, Ruslan Salakhutdinov, and Hanlin Goh. Uncertainty weighted actor-critic for offline reinforcement learning. In International Conference on Machine Learning, pp. 11319-11328. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.676, + 0.825, + 0.72 + ], + "angle": 0, + "content": "Chenjun Xiao, Bo Dai, Jincheng Mei, Oscar A Ramirez, Ramki Gummadi, Chris Harris, and Dale Schuurmans. Understanding and leveraging overparameterization in recursive value estimation. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.727, + 0.827, + 0.77 + ], + "angle": 0, + "content": "Tengyang Xie, Ching-An Cheng, Nan Jiang, Paul Mineiro, and Alekh Agarwal. Bellman-consistent pessimism for offline reinforcement learning. Advances in neural information processing systems, 34, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.778, + 0.827, + 0.822 + ], + "angle": 0, + "content": "Tengyang Xie, Nan Jiang, Huan Wang, Caiming Xiong, and Yu Bai. Policy finetuning: Bridging sample-efficient offline and online reinforcement learning. Advances in neural information processing systems, 34, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.83, + 0.825, + 0.86 + ], + "angle": 0, + "content": "Haoran Xu, Xianyuan Zhan, Jianxiong Li, and Honglei Yin. Offline reinforcement learning with soft behavior regularization. arXiv preprint arXiv:2110.07395, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.868, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Haoran Xu, Li Jiang, Jianxiong Li, and Xianyuan Zhan. A policy-guided imitation approach for offline reinforcement learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022a. URL https://openreview.net/forum?id=CKbqDtZnSc." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Haoran Xu, Xianyuan Zhan, and Xiangyu Zhu. Constraints penalized q-learning for safe offline reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.827, + 0.199 + ], + "angle": 0, + "content": "Haoran Xu, Li Jiang, Jianxiong Li, Zhuoran Yang, Zhaoran Wang, Victor Wai Kin Chan, and Xianyuan Zhan. Sparse q-learning: Offline reinforcement learning with implicit value regularization. In International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=ueYYgo2pSSU." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.825, + 0.251 + ], + "angle": 0, + "content": "Keyulu Xu, Mozhi Zhang, Jingling Li, Simon Shaolei Du, Ken-Ichi Kawarabayashi, and Stefanie Jegelka. How neural networks extrapolate: From feedforward to graph neural networks. In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.258, + 0.826, + 0.301 + ], + "angle": 0, + "content": "Pan Xu and Quanquan Gu. A finite-time analysis of q-learning with neural network function approximation. In International Conference on Machine Learning, pp. 10555-10565. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.309, + 0.826, + 0.354 + ], + "angle": 0, + "content": "Tianhe Yu, Garrett Thomas, Lantao Yu, Stefano Ermon, James Y Zou, Sergey Levine, Chelsea Finn, and Tengyu Ma. Mopo: Model-based offline policy optimization. Advances in Neural Information Processing Systems, 33:14129-14142, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.361, + 0.826, + 0.405 + ], + "angle": 0, + "content": "Tianhe Yu, Aviral Kumar, Rafael Rafailov, Aravind Rajeswaran, Sergey Levine, and Chelsea Finn. Combo: Conservative offline model-based policy optimization. Advances in Neural Information Processing Systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.413, + 0.826, + 0.444 + ], + "angle": 0, + "content": "Andrea Zanette, Martin J Wainwright, and Emma Brunskill. Provable benefits of actor-critic methods for offline reinforcement learning. Advances in neural information processing systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.451, + 0.826, + 0.495 + ], + "angle": 0, + "content": "Xianyuan Zhan, Haoran Xu, Yue Zhang, Xiangyu Zhu, Honglei Yin, and Yu Zheng. Deepthermal: Combustion optimization for thermal power generating units using offline reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.502, + 0.826, + 0.545 + ], + "angle": 0, + "content": "Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning (still) requires rethinking generalization. Communications of the ACM, 64(3):107-115, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.545 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.524, + 0.119 + ], + "angle": 0, + "content": "A SKETCH OF THEORETICAL ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.828, + 0.289 + ], + "angle": 0, + "content": "In this section, we present in Figure 6 a sketch of the overall logical flow in our theoretical analyses and the proposed algorithm, DOGE. We start by analyzing the effects of data geometry on the generalization patterns of deep Q-functions. We find that a small sample-to-dataset distance leads to a tightened Q-function approximation error and thus interpolation enjoys better generalization properties than extrapolation (Theorem 1). Motivated by this, we propose DOGE, which tries to control the upper bound of the sample-to-centroid distance to be small (Property 1) and enforces a convex hull based policy constraint (Property 2). Then, we dive deeper and find that the upper bound of the Bellman-consistent coefficient is well controlled by sample-to-centroid distance and thus DOGE enjoys a bounded bellman-consistent coefficient (Theorem 2). Based on these findings, we can derive a tighter performance bound of DOGE as compared to support constraint methods like BEAR (Theorem 3)." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.301, + 0.825, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.598, + 0.63, + 0.614 + ], + "angle": 0, + "content": "Figure 6: Sketch of theoretical analysis" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.646, + 0.825, + 0.68 + ], + "angle": 0, + "content": "B THEORETICAL ANALYSIS OF THE IMPACT OF DATA GEOMETRY ON DEEP \\(Q\\) FUNCTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.696, + 0.827, + 0.837 + ], + "angle": 0, + "content": "To analyze the generalization of a function approximator, one can refer to some classical methods such as Rademacher complexity (Bartlett & Mendelson, 2002) and VC-dimension (Vapnik & Chervonenkis, 2015). However, the generalization bounds that obtained by these methods are usually trivial and cannot explain the generalization behavior in the overparameterized regime (Zhang et al., 2021). Recent breakthroughs in neural tangent kernel (NTK) shed light on the generalization of DNN. NTK builds the connection between the training dynamics of DNN and the solution of the kernel regression w.r.t. NTK, and is widely used in recent analysis of DNN generalization (Jacot et al., 2018; Arora et al., 2019b; Bietti & Mairal, 2019). What's more, NTK is also a popular analyzing tool in the convergence and optimality of deep RL (Cai et al., 2019; Fan et al., 2020; Kumar et al., 2020a; Xiao et al., 2021) and thus is used in our study." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.854, + 0.414, + 0.868 + ], + "angle": 0, + "content": "B.1 NEURAL TANGENT KERNEL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.88, + 0.826, + 0.927 + ], + "angle": 0, + "content": "We denote a general neural network by \\( f(\\theta, x): \\mathbb{R}^d \\to \\mathbb{R} \\), where \\( \\theta \\) is all the parameters in the network and \\( x \\in \\mathbb{R}^d \\) is the input. Given, a training dataset \\( \\{(x_i, y_i)\\}_{i=1}^n \\), the parameters \\( \\theta \\) are optimized by minimizing the squared loss function, i.e., \\( \\mathcal{L}(\\theta) = \\frac{1}{2} \\sum_{i=1}^n (f_\\theta(x_i) - y_i)^2 \\) by gradient" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "descent. The dynamics of the networks output can be formulated by Lemma 1 (Lemma 3.1. of (Arora et al., 2019b)); see (Arora et al., 2019b) for the proof of Lemma 1." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.138, + 0.827, + 0.224 + ], + "angle": 0, + "content": "Lemma 1. Consider minimizing the squared loss \\(\\mathcal{L}(\\theta)\\) by gradient descent with infinitesimally small learning rate, i.e., \\(\\frac{d\\theta(t)}{dt} = -\\nabla \\mathcal{L}(\\theta(t))\\). Let \\(\\mathbf{u}(t) = (f(\\theta(t), x_i))_{i \\in [n]} \\in \\mathbb{R}^n\\) be the network outputs on all \\(x_i\\)'s at time \\(t\\), and \\(\\mathbf{Y} = (y_i)_{i \\in [n]}\\) be the desired outputs. Then \\(\\mathbf{u}(t)\\) follows the following evolution, where \\(\\mathbf{H}(t)\\) is an \\(n \\times n\\) positive semidefinite matrix whose \\((i,j)\\)-th entry is \\(\\left\\langle \\frac{\\partial f(\\theta(t), x_i)}{\\partial \\theta}, \\frac{\\partial f(\\theta(t), x_j)}{\\partial \\theta} \\right\\rangle\\):" + }, + { + "type": "equation", + "bbox": [ + 0.396, + 0.226, + 0.826, + 0.256 + ], + "angle": 0, + "content": "\\[\n\\frac {d \\mathbf {u} (t)}{d t} = - \\mathbf {H} (t) \\cdot (\\mathbf {u} (t) - \\mathbf {Y}). \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.269, + 0.828, + 0.354 + ], + "angle": 0, + "content": "Plenty of works (Jacot et al., 2018; Arora et al., 2019b; Allen-Zhu et al., 2019; Xu et al., 2020) study the dynamics of the neural networks' training process and find that if the width of networks is sufficiently large, \\(\\mathbf{H}(t)\\) stays almost constant during training, i.e., \\(\\mathbf{H}(t) = \\mathbf{H}(0)\\). What's more, if the neural networks' parameters are randomly initialized with certain scales and the networks width goes to infinity, \\(\\mathbf{H}(0)\\) converges to a fixed matrix \\(\\mathbf{K}\\), called neural tangent kernel (NTK) (Jacot et al., 2018)." + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.374, + 0.826, + 0.408 + ], + "angle": 0, + "content": "\\[\n\\mathbf {K} (x, x ^ {\\prime}) = \\mathbb {E} _ {\\theta \\sim W} \\left\\langle \\frac {\\partial f (\\theta (t) , x)}{\\partial \\theta}, \\frac {\\partial f (\\theta (t) , x ^ {\\prime})}{\\partial \\theta} \\right\\rangle \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.413, + 0.827, + 0.47 + ], + "angle": 0, + "content": "where, \\( W \\) is Gaussian distribution. The training dynamics in Lemma 1 is identical to the dynamics of kernel regression under gradient flow, because \\( \\mathbf{K} \\) stays constant during training when the width of neural networks goes to infinity. Then, the final prediction function \\( (t \\to \\infty \\), assuming \\( \\mathbf{u}(0) = 0 \\)) is equal to the kernel regression solution:" + }, + { + "type": "equation", + "bbox": [ + 0.341, + 0.492, + 0.826, + 0.51 + ], + "angle": 0, + "content": "\\[\nf _ {n t k} (x) = \\left(\\mathbf {K} \\left(x, x _ {1}\\right), \\dots , \\mathbf {K} \\left(x, x _ {n}\\right)\\right) \\cdot \\mathbf {K} _ {\\text {t r a i n}} ^ {- 1} \\mathbf {Y} \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.518, + 0.828, + 0.592 + ], + "angle": 0, + "content": "where \\(\\mathbf{K}_{train}^{-1}\\) is the \\(n\\times n\\) NTK for the training data (the state-action pair \\(x = (s,a)\\) in the policy evaluation in offline RL) and stays constant during training once the training data is fixed. \\(\\mathbf{Y}\\) is the training labels \\((r(s,a) + \\gamma \\mathbb{E}_{a'\\sim \\pi (\\cdot |s')}[Q_{\\theta '}(s',a')]\\) in offline RL). \\(\\mathbf{K}(x,x_i)\\) is the kernel value between test data \\(x\\) and training data \\(x_{i}\\). We denote the feature map of \\(\\mathbf{K}(\\cdot ,\\cdot)\\) as \\(\\Phi (\\cdot)\\), and \\(\\mathbf{K}(x,x^{\\prime}) = \\langle \\Phi (x),\\Phi (x^{\\prime})\\rangle\\). Then, Eq. (14) is equivalent to:" + }, + { + "type": "equation", + "bbox": [ + 0.305, + 0.612, + 0.826, + 0.631 + ], + "angle": 0, + "content": "\\[\nf _ {n t k} (x) = \\left(\\langle \\Phi (x), \\Phi (x _ {1}) \\rangle , \\dots , \\langle \\Phi (x), \\Phi (x _ {n}) \\rangle\\right) \\cdot \\mathbf {K} _ {\\text {t r a i n}} ^ {- 1} \\mathbf {Y} \\tag {15}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.648, + 0.589, + 0.663 + ], + "angle": 0, + "content": "B.2 IMPACT OF DATA GEOMETRY ON DEEP \\(Q\\) FUNCTIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.675, + 0.827, + 0.747 + ], + "angle": 0, + "content": "In this section, we analyze the impact of data geometry on deep \\( Q \\) functions under the NTK regime. We first introduce the smoothness property of the feature map \\( \\Phi(x) \\) induced by NTK (Lemma 2). Then, we introduce the equivalence between the kernel regression solution in Eq. (15) and a min-norm solution (Lemma 3). Builds on Lemma 2 and Lemma 3, Lemma 4 analyzes the smoothness of the deep \\( Q \\) functions. At last, we study how data geometry affects deep \\( Q \\) functions (Theorem 1)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.75, + 0.828, + 0.793 + ], + "angle": 0, + "content": "Assumption 1. (NTK assumption). We assume the function approximators discussed in our paper are two-layer fully-connected ReLU neural networks with infinity width and are trained with infinitesimally small learning rate unless otherwise specified." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.806, + 0.827, + 0.889 + ], + "angle": 0, + "content": "Although there exist some gaps between the NTK assumption and the real setting, NTK is one of the most advanced theoretical machinery from the generalization analysis of DNN. In addition, Assumption 1 is common in previous analysis on the generalization of DNN (Jacot et al., 2018; Arora et al., 2019a; Bietti & Mairal, 2019) and the convergence of DRL (Cai et al., 2019; Liu et al., 2019; Xu & Gu, 2020; Fan et al., 2020). For more accurate analysis, we should adopt more advanced analysis tools than NTK and hence leave it for future work." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We first introduce Lemma 2 (Proposition 4 of (Bietti & Mairal, 2019)), which shows the feature map \\(\\Phi(x)\\) induced by NTK is not Lipschitz continuous but holds a weaker Hölder smoothness property." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.828, + 0.149 + ], + "angle": 0, + "content": "Lemma 2. (Smoothness of the kernel map of two-layer ReLU networks). Let \\(\\Phi\\) be the kernel map of the neural tangent kernel induced by a two-layer ReLU neural network, \\(x\\) and \\(y\\) be two inputs, then \\(\\Phi\\) satisfies the following smoothness property." + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.166, + 0.826, + 0.186 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\Phi (x) - \\Phi (y) \\right\\| \\leq \\sqrt {\\min (\\| x \\| , \\| y \\|) \\| x - y \\|} + 2 \\| x - y \\|. \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.215, + 0.827, + 0.258 + ], + "angle": 0, + "content": "Lemma 3 (Lemma 2 of (Xu et al., 2020)) builds the connection between the kernel regression solution in Eq. (14) and the a min-norm solution. For the proof of Lemma 3, we refer the reader to(Xu et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.262, + 0.828, + 0.32 + ], + "angle": 0, + "content": "Lemma 3. (Equivalence to a min-norm optimization problem). Let \\(\\Phi(x)\\) be the feature map induced by a neural tangent kernel, for any \\(x \\in \\mathbb{R}^d\\). The solution to the kernel regression in Eq. (14) and Eq. (15) is equivalent to \\(f_{ntk}(x) = \\Phi(x)^T \\beta_{ntk}\\), where \\(\\beta_{ntk}\\) is the optimal solution of a min-norm optimization problem defined as" + }, + { + "type": "equation", + "bbox": [ + 0.438, + 0.336, + 0.495, + 0.359 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\beta} \\| \\beta \\|\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.352, + 0.825, + 0.38 + ], + "angle": 0, + "content": "\\[\ns. t. \\Phi \\left(x _ {i}\\right) ^ {T} \\beta = y _ {i}, f o r i = 1, \\dots , n. \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.403, + 0.61, + 0.42 + ], + "angle": 0, + "content": "Then, deep \\(Q\\) functions satisfy the following smoothness property." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.422, + 0.826, + 0.466 + ], + "angle": 0, + "content": "Lemma 4. (Smoothness for deep \\( Q \\) functions). Given two inputs \\( x \\) and \\( x' \\), the distance between these two data points is \\( d = \\| x - x' \\| \\). \\( C_1 \\coloneqq \\sup_{\\| \\beta_{ntk} \\|_\\infty} \\| \\beta_{ntk} \\|_\\infty \\) is a finite constant. Then the difference between the output at \\( x \\) and the output at \\( x' \\) can be bounded by:" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.474, + 0.826, + 0.493 + ], + "angle": 0, + "content": "\\[\n\\left\\| Q _ {\\theta} (x) - Q _ {\\theta} \\left(x ^ {\\prime}\\right) \\right\\| \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x \\right\\| , \\left\\| x ^ {\\prime} \\right\\|\\right)} \\sqrt {d} + 2 d\\right) \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.531, + 0.825, + 0.605 + ], + "angle": 0, + "content": "Proof. In offline RL, we denote a general \\(Q\\) network by \\(Q_{\\theta}(x): \\mathbb{R}^{|S| + |\\mathcal{A}|} \\to \\mathbb{R}\\), where \\(\\theta\\) is all the parameters in the network and \\(x = (s,a) \\in \\mathbb{R}^{|S| + |\\mathcal{A}|}\\) is the brief notation for state-action pair \\((s,a)\\). The \\(Q\\) function is trained via minimizing the temporal difference error defined as \\(\\frac{1}{2}\\sum_{i=1}^{n}(Q_{\\theta}(x_i) - y_i)^2\\) by gradient descent, where \\(y_i = r(x_i) + \\gamma \\mathbb{E}_{a_i' \\sim \\pi(\\cdot | s_i')} [Q_{\\theta'}^\\pi(x_i')] \\in \\mathbb{R}\\) is the target value." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.61, + 0.827, + 0.723 + ], + "angle": 0, + "content": "Using kernel method from NTK, \\(Q\\) function can be formulated as \\(Q_{\\theta}(x) = \\Phi (x)^{T}\\beta\\), where \\(\\Phi (x)\\) is independent of the changes on training labels when NTK assumption holds. This is because as the width of a neural net goes to infinity, the NTK kernel \\(\\mathbf{K}(x,x^{\\prime}) = < \\Phi (x),\\Phi (x^{\\prime})>\\) produced by this network stays constant during training, and so is the property of the feature map \\(\\Phi (x)\\) (Jacot et al., 2018). So, the learning process under NTK framework is actually adjusting \\(\\beta\\) to fit the label rather than \\(\\Phi (x)\\). As a result, Lemma 2 holds when deep \\(Q\\) function satisfies NTK assumptions. Given two inputs \\(x\\) and \\(x^{\\prime}\\), the distance between these two inputs is \\(d = \\| x - x^{\\prime}\\|\\). Based on Lemma 2, it is easy to see that" + }, + { + "type": "equation", + "bbox": [ + 0.206, + 0.74, + 0.825, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| Q _ {\\theta} (x) - Q _ {\\theta} \\left(x ^ {\\prime}\\right) \\right\\| = \\left\\| \\Phi (x) ^ {T} \\beta - \\Phi \\left(x ^ {\\prime}\\right) ^ {T} \\beta \\right\\| \\\\ \\leq \\| \\Phi (x) - \\Phi (x ^ {\\prime}) \\| \\| \\beta \\| _ {\\infty} \\quad (\\text {I n f i n i t y n o r m}) \\\\ \\leq \\| \\beta \\| _ {\\infty} \\left(\\sqrt {\\min \\left(\\| x \\| , \\| x ^ {\\prime} \\|\\right) \\cdot \\| x - x ^ {\\prime} \\|} + 2 \\| x - x ^ {\\prime} \\|\\right) (\\text {L e m m a} 2) \\tag {19} \\\\ = \\| \\beta \\| _ {\\infty} (\\sqrt {\\min (\\| x \\| , \\| x ^ {\\prime} \\|) \\cdot d} + 2 d) \\\\ \\leq C _ {\\beta} \\left(\\sqrt {\\operatorname* {m i n} \\left(\\| x \\| , \\| x ^ {\\prime} \\|\\right) \\cdot d} + 2 d\\right) \\quad \\left(C _ {\\beta} := \\sup \\| \\beta \\| _ {\\infty}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Additionally, if we consider the delayed \\(Q\\) target and delayed actor updates during policy learning, we can assume the target value used for \\(Q\\) evaluation stays relatively stable during each policy evaluation step and the problem can be seen as solving a series of regression problems. Under this mild assumption, we can learn the actual \\(\\beta_{ntk}\\) at each step (\\(\\beta \\rightarrow \\beta_{ntk}\\) and so \\(C_{\\beta} \\rightarrow C_1\\), where \\(C_1 \\coloneqq \\sup \\| \\beta_{ntk} \\|_{\\infty}\\)) and thus complete the proof. Similar assumptions and treatments are also used" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "in Section 4 of (Kumar et al., 2020a) that Q function at each iteration can fit its label well, Appendix A.8 of (Xiao et al., 2021), as well as Appendix F of (Ghasemipour et al.)." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.14, + 0.825, + 0.152 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.17, + 0.825, + 0.198 + ], + "angle": 0, + "content": "Lemma 4 states the value difference of a deep \\( Q \\) function for two inputs is related to the distance between these two inputs. The closer the distance, the smaller the value difference." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.213, + 0.391, + 0.227 + ], + "angle": 0, + "content": "B.2.1 PROOF OF THEOREM 1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.238, + 0.825, + 0.267 + ], + "angle": 0, + "content": "Builds on Lemma 4, we can combine the data geometry and analyze the impact of data geometry on deep \\(Q\\) functions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.826, + 0.325 + ], + "angle": 0, + "content": "Proof. We first review the definition of interpolated data and extrapolated data. Under continuous state-action space, state-action pairs within the convex hull of the dataset can be represented in an interpolated manner (referred as interpolated data \\( x_{in} \\)):" + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.332, + 0.826, + 0.372 + ], + "angle": 0, + "content": "\\[\nx _ {i n} = \\sum_ {i = 1} ^ {n} \\alpha_ {i} x _ {i}, \\quad \\sum_ {i = 1} ^ {n} \\alpha_ {i} = 1, \\alpha_ {i} \\geq 0 \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.385, + 0.8, + 0.402 + ], + "angle": 0, + "content": "Similarly, we can define extrapolated data that lie outside the convex hull of the dataset as \\( x_{out} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.44, + 0.407, + 0.826, + 0.448 + ], + "angle": 0, + "content": "\\[\nx _ {o u t} = \\sum_ {i = 1} ^ {n} \\beta_ {i} x _ {i}, \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.581, + 0.471 + ], + "angle": 0, + "content": "where \\(\\sum_{i=1}^{n} \\beta_{i} = 1\\) and \\(\\beta_{i} \\geq 0\\) does not hold simultaneously." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.476, + 0.825, + 0.573 + ], + "angle": 0, + "content": "We define \\(\\mathrm{Proj}_{\\mathcal{D}}(x) \\coloneqq \\arg \\min_{x_i \\in \\mathcal{D}} \\| x - x_i \\|\\) as a projector that projects unseen data \\(x\\) to its nearest data in dataset \\(\\mathcal{D}\\). Given an interpolated data \\(x_{in}\\) and an extrapolated data \\(x_{out}\\), the distances to their nearest data in dataset are \\(d_{x_{in}} = \\| x_{in} - \\mathrm{Proj}_{\\mathcal{D}}(x_{in})\\|\\) and \\(d_{x_{out}} = \\| x_{out} - \\mathrm{Proj}_{\\mathcal{D}}(x_{out})\\|\\). Because interpolated data lie inside the convex hull of training data, \\(d_{x_{in}} \\leq \\max_{x_i \\in \\mathcal{D}} \\| x_{in} - x_i\\| \\leq B\\) is bounded, where \\(B \\coloneqq \\max_{x_i, x_j \\in \\mathcal{D}} \\| x_i - x_j\\|\\) is a finite constant. Then, by applying Lemma 4, the value difference of deep \\(Q\\) function for interpolated and extrapolated data can be formulated as the following shows." + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.582, + 0.826, + 0.65 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| Q _ {\\theta} \\left(x _ {i n}\\right) - Q _ {\\theta} \\left(\\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right)\\right) \\right\\| \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x _ {i n} \\right\\| , \\left\\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right) \\right\\|\\right)} \\sqrt {d _ {x i n}} + 2 d _ {x i n}\\right) (22) \\\\ \\leq C _ {1} (\\sqrt {\\min (\\| x _ {i n} \\| , \\| \\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n}) \\|)} \\sqrt {B} + 2 B) \\\\ \\left\\| Q _ {\\theta} \\left(x _ {o u t}\\right) - Q _ {\\theta} \\left(\\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {o u t}\\right) \\right\\Vert\\right) \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x _ {o u t} \\right\\| , \\left\\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {o u t}\\right) \\right\\|\\right)} \\sqrt {d _ {x _ {o u t}}} + 2 d _ {x _ {o u t}}\\right) (23) \\\\ \\end{array}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.652, + 0.826, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.683, + 0.541, + 0.698 + ], + "angle": 0, + "content": "B.3 QUANTITATIVE EXPERIMENTS ON THEOREM 1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.709, + 0.827, + 0.821 + ], + "angle": 0, + "content": "In addition to the one-dimensional random walk experiments presented in Section 2.2, we conduct additional experiments on the more complex and high-dimensional MuJoCo tasks (including D4RL Hopper-medium-v2, Halfcheetah-medium-v2, and Walker2d-medium-v2) to provide quantitative support to Theorem 1, in particular, the pertinence of interpolation and extrapolation. We first synthesize lots of interpolated data \\( x_{in} \\) and extrapolated data \\( x_{out} \\) (\\( x = (s,a) \\in S \\times \\mathcal{A} \\)) and then search for their nearest data points in offline dataset \\( \\mathcal{D} \\) accordingly, i.e., \\( \\mathrm{Proj}_{\\mathcal{D}}(x_{in}) \\) and \\( \\mathrm{Proj}_{\\mathcal{D}}(x_{out}) \\). Then, we can evaluate the Q-value differences \\( \\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x)) \\| \\) (LHS of Theorem 1) at these generated data and see whether the Q-value differences align well with Theorem 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "For the detailed experiment setup, recall that an interpolated data point \\( x_{in} \\) is a convex combination of the offline dataset, i.e., \\( x_{in} = \\sum_{i=1}^{n} \\alpha_i x_i \\), \\( x_i \\sim \\mathcal{D} \\) with weights \\( \\alpha_i \\) that satisfy \\( \\sum_{i=1}^{n} \\alpha_i = 1 \\), \\( \\alpha_i \\geq 0 \\). Therefore, we can interpolate the offline dataset based on \\( \\alpha_i \\) sampled from the Dirichlet distribution to generate the interpolated data. Also, an extrapolated data point \\( x_{out} \\) is expressed as a weighted sum of the offline dataset, i.e., \\( x_{out} = \\sum_{i=1}^{n} \\beta_i x_i \\), \\( x_i \\sim \\mathcal{D} \\), but its weights \\( \\beta_i \\) do not satisfy the non-negativity and the summing to 1 constraint. Therefore, we can generate extrapolated data by setting the sign of some weights to negative values and varying the weights not summing to" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.828, + 0.177 + ], + "angle": 0, + "content": "1. After obtaining the interpolated and extrapolated data, we search for their closest data points in the offline dataset \\(\\mathcal{D}\\) and calculate their corresponding distance \\(\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|\\) and Q-value difference \\(\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|\\). Figure 7a shows the relationship between the distance to dataset \\(\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|\\) and the Q value difference \\(\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|\\) (LHS of Theorem 1). We also report the learned state-conditioned distance value \\(g(s,a)\\) on these generated data in Figure 7b." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.193, + 0.391, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.196, + 0.603, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.605, + 0.197, + 0.82, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.273, + 0.348, + 0.724, + 0.365 + ], + "angle": 0, + "content": "(a) Relationship between \\(\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|\\) and \\(\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|\\)." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.375, + 0.391, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.376, + 0.605, + 0.524 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.376, + 0.82, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.342, + 0.527, + 0.655, + 0.542 + ], + "angle": 0, + "content": "(b) Relationship between \\(\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|\\) and \\(g(x)\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.552, + 0.825, + 0.625 + ], + "angle": 0, + "content": "Figure 7: Quantitative experiments of Theorem 1 on the D4RL MuJoCo-medium datasets. The red star-shaped dots are the interpolated data and the circle dots are the extrapolated data. The color of the dots represents \\(\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|\\) values in (a) and \\(g(x)\\) values in (b), respectively. The darker the color, the smaller the corresponding value. In (a), the yellow dash line is the empirical upper bound of \\(\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.827, + 0.743 + ], + "angle": 0, + "content": "Figure 7a demonstrates that the interpolated data enjoy a tighter empirical upper bound of \\( \\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x)) \\| \\) (LHS of Theorem 1) than most of the extrapolated data. Moreover, the empirical upper bound of the Q-value difference grows with the increase of the sample-to-dataset distance \\( \\| x - \\mathrm{Proj}_{\\mathcal{D}(x)} \\| \\), which is consistent with Theorem 1 (the upper bound of value difference of deep Q function is well controlled by distance to the dataset). Figure 7b shows that the state-conditioned distance function \\( g(s, a) \\) can output low values for interpolated data and some near-dataset extrapolated data, and thus can be used as a relaxed policy constraint in these OOD regions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.768, + 0.577, + 0.784 + ], + "angle": 0, + "content": "C STATE-CONDITIONED DISTANCE FUNCTION" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.803, + 0.379, + 0.817 + ], + "angle": 0, + "content": "C.1 PROOF OF PROPERTY 1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.832, + 0.826, + 0.876 + ], + "angle": 0, + "content": "Proof. Given a state-action pair from the training data \\((s, a) \\sim \\mathcal{D}\\), we synthetic random noise actions from a uniform distribution over the action space, i.e. \\(\\hat{a} \\sim \\text{Unif}(\\mathcal{A})\\). Then the distance function \\(g(\\cdot)\\) is trained by Eq. (24)." + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.899, + 0.826, + 0.93 + ], + "angle": 0, + "content": "\\[\n\\min _ {g} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} \\left[ \\| \\hat {a} - a \\| - g (s, \\hat {a}) \\right] ^ {2} \\right] \\tag {24}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.147 + ], + "angle": 0, + "content": "\\(\\left[\\| \\hat{a} - a\\| - g(s, \\hat{a})\\right]^2\\) can be upper bounded by some finite constants because \\(S \\times \\mathcal{A}\\) is compact in our analysis. The optimization problem in Eq. (24) can be reformulated as the following form according to the Fubini's Theorem." + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.164, + 0.826, + 0.192 + ], + "angle": 0, + "content": "\\[\n\\min _ {g} \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} \\left[ \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\| \\hat {a} - a \\| - g (s, \\hat {a}) \\right] ^ {2} \\right] \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.202, + 0.825, + 0.232 + ], + "angle": 0, + "content": "Note that the objective of Eq. (25) can be also written as a functional \\( J[g(s, \\hat{a})] \\) with respect to function \\( g \\) in the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.237, + 0.826, + 0.27 + ], + "angle": 0, + "content": "\\[\nJ [ g (s, \\hat {a}) ] = \\int_ {\\mathcal {A}} \\frac {1}{| \\mathcal {A} |} \\left[ \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} [ \\| \\hat {a} - a \\| - g (s, \\hat {a}) ] ^ {2} \\right] \\mathrm {d} \\hat {a} = \\int_ {\\mathcal {A}} F (s, \\hat {a}, g (s, \\hat {a})) \\mathrm {d} \\hat {a} \\tag {26}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.825, + 0.327 + ], + "angle": 0, + "content": "Based on calculus of variation, the extrema (maxima or minima) of functional \\( J[g(s,\\hat{a})] \\) can be obtained by solving the associated Euler-Langrane equation \\( (\\partial F / \\partial g = 0) \\). In our case, it requires the optimal state-conditioned distance function \\( g^{*} \\) satisfies the following conditions:" + }, + { + "type": "equation", + "bbox": [ + 0.261, + 0.342, + 0.826, + 0.428 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {\\partial}{\\partial g ^ {*}} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} [ \\| \\hat {a} - a \\| - g ^ {*} (s, \\hat {a}) ] ^ {2} = 0 \\\\ \\Rightarrow \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\frac {\\partial}{\\partial g ^ {*}} [ \\| \\hat {a} - a \\| - g ^ {*} (s, \\hat {a}) ] ^ {2} \\right] = 0 (\\text {D N N i s c o n t i n u o u s}) \\tag {27} \\\\ \\Rightarrow \\quad \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\left\\| \\hat {a} - a \\right\\| - g ^ {*} (s, \\hat {a}) \\right] = 0 \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.437, + 0.825, + 0.466 + ], + "angle": 0, + "content": "Conditioned on a state \\( s \\in \\mathcal{D} \\), the optimal state-conditioned distance function in Eq. (27) satisfies the following conditions:" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.472, + 0.826, + 0.576 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\int_ {\\mathcal {A}} \\| \\hat {a} - a \\| \\mu (s, a) \\mathrm {d} a - \\int_ {\\mathcal {A}} \\mu (s, a) \\mathrm {d} a g ^ {*} (s, \\hat {a}) = 0, s \\in \\mathcal {D} \\\\ \\Rightarrow g ^ {*} (s, \\hat {a}) = \\frac {\\int_ {\\mathcal {A}} \\| \\hat {a} - a \\| \\mu (s , a) \\mathrm {d} a}{\\int_ {\\mathcal {A}} \\mu (s , a) \\mathrm {d} a}, s \\in \\mathcal {D} \\tag {28} \\\\ \\Rightarrow g ^ {*} (s, \\hat {a}) = \\int_ {\\mathcal {A}} C (s, a) \\| \\hat {a} - a \\| \\mathrm {d} a, s \\in \\mathcal {D} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.589, + 0.827, + 0.67 + ], + "angle": 0, + "content": "where, \\(\\mu(s,a)\\) is the empirical distribution on a finite offline dataset \\(\\mathcal{D} = \\{(x_i)\\}_{i=1}^n\\), i.e., the sum of the Dirac measures \\(\\frac{1}{n}\\sum_{i=1}^{n}\\delta_{x_i}\\). \\(\\forall (s,a) \\notin \\mathcal{D}, \\mu(s,a) = 0. \\forall (s,a) \\in \\mathcal{D}, \\mu(s,a) > 0\\). \\(C(s,a) = \\frac{\\mu(s,a)}{\\int_A\\mu(s,a)\\mathrm{d}a} \\geq 0\\) and \\(\\int_A C(s,a)\\mathrm{d}a = 1\\). Because \\(L_2\\)-norm is convex and the non-negative combination of convex functions is still convex, \\(g^*(s,\\hat{a})\\) is a convex function w.r.t. \\(\\hat{a}\\). In addition, \\(\\forall \\hat{a} \\in \\mathcal{A}\\), by the Jensen inequality, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.675, + 0.826, + 0.695 + ], + "angle": 0, + "content": "\\[\ng ^ {*} (s, \\hat {a}) \\geq \\left\\| \\hat {a} - \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) a ] \\right\\| = \\| \\hat {a} - a _ {o} (s) \\|, s \\in \\mathcal {D} \\tag {29}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.707, + 0.816, + 0.724 + ], + "angle": 0, + "content": "where \\(a_{o}(s)\\coloneqq \\mathbb{E}_{a\\sim Unif(\\mathcal{A})}[C(s,a)a], s\\in \\mathcal{D}\\) is the state-conditioned centroid of training dataset." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.729, + 0.826, + 0.742 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.76, + 0.38, + 0.773 + ], + "angle": 0, + "content": "C.2 PROOF OF PROPERTY 2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.786, + 0.827, + 0.802 + ], + "angle": 0, + "content": "Proof. The negative gradient of the optimal state-conditioned distance function can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.266, + 0.817, + 0.826, + 0.886 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} - \\nabla_ {\\hat {a}} g ^ {*} (s, \\hat {a}) = - \\int_ {\\mathcal {A}} C (s, a) \\frac {\\hat {a} - a}{\\| \\hat {a} - a \\|} \\mathrm {d} a, \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\tag {30} \\\\ = \\frac {1}{\\int_ {\\mathcal {A}} \\mu (s , a) \\mathrm {d} a} \\int_ {\\mathcal {A}} \\mu (s, a) \\frac {- (\\hat {a} - a)}{\\| \\hat {a} - a \\|} \\mathrm {d} a, \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Observe that the direction of the negative gradient of \\( g^{*}(s,\\hat{a}) \\) is related to the integral of vector \\( -(\\hat{a} - a) \\) (points towards \\( a \\)). When \\( (s,a)\\notin \\mathcal{D}, - (\\hat{a} - a) \\) doesn't influence the final gradient because" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.177 + ], + "angle": 0, + "content": "\\(\\mu(s, a) = 0\\). Therefore, \\(-(\\hat{a} - a)\\) only contribute to the final gradient of \\(g^{*}(s, \\hat{a})\\) for \\((s, a) \\in \\mathcal{D}\\) as \\(\\mu(s, a) > 0\\). For a given \\(s \\in \\mathcal{D}\\) and any extrapolated action \\(\\hat{a}\\) that lies outside the convex hull of training data, the integral of vector \\(-( \\hat{a} - a)\\) is basically a non-negative combination of vectors \\(-( \\hat{a} - a)\\) that point toward actions \\(a \\in \\mathcal{D}\\) inside the convex hull. As a result, it's easy to see that \\(-\\nabla_{\\hat{a}} g^{*}(s, \\hat{a})\\) also points inside the convex hull formed by the data." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.182, + 0.825, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.216, + 0.513, + 0.232 + ], + "angle": 0, + "content": "D THEORETICAL ANALYSIS OF DOGE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.248, + 0.828, + 0.32 + ], + "angle": 0, + "content": "In this section, we analyze the performance of the policy learned by DOGE. We first adopt the Bellman-consistent coefficient from (Xie et al., 2021a) to quantify the distributional shift from the perspective of deep \\( Q \\) functions generalization. Then, we gives the upper bound of the Bellman-consistent coefficient under the NTK regime (Appendix D.1). At last, we give the performance bound of DOGE (Appendix D.2)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.336, + 0.61, + 0.35 + ], + "angle": 0, + "content": "D.1 UPPER BOUND OF BELLMAN-CONSISTENT COEFFICIENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.362, + 0.828, + 0.406 + ], + "angle": 0, + "content": "Let us first review the definition of Bellman-consistent coefficient \\(\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)\\) in (Xie et al., 2021a). We define \\(\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)\\) to measure the distributional shift from an arbitrary distribution \\(v\\) to data distribution \\(\\mu\\), w.r.t. \\(\\mathcal{F}\\) and \\(\\pi\\). \\(\\mathcal{F}\\) is the function class of \\(Q\\) networks." + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.422, + 0.826, + 0.46 + ], + "angle": 0, + "content": "\\[\n\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) := \\sup _ {Q \\in \\mathcal {F}} \\frac {\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , v} ^ {2}}{\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , \\mu} ^ {2}} \\tag {31}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.471, + 0.827, + 0.567 + ], + "angle": 0, + "content": "where the \\(\\mu\\)-weighted norm (square) is defined as \\(\\|f\\|_{2,\\mu}^2 \\coloneqq \\mathbb{E}_{\\mu}[\\|f\\|^2]\\), which is also applicable for any distribution \\(v\\). \\(\\mathcal{T}^{\\pi}Q\\) is the Bellman operator of policy \\(\\pi\\), defined as \\(\\mathcal{T}^{\\pi}Q(s,a) \\coloneqq r(s,a) + \\gamma \\mathbb{E}_{a' \\sim \\pi(\\cdot|s'), s' \\sim \\mathcal{P}(\\cdot|s,a)}[Q(s',a')] \\coloneqq r(s,a) + \\gamma \\mathbb{P}^{\\pi}[Q(s',a')]\\). \\(\\mathbb{P}^{\\pi}[\\cdot]\\) is the brief notation for \\(\\mathbb{E}_{a' \\sim \\pi(\\cdot|s'), s' \\sim \\mathcal{P}(\\cdot|s,a)}[\\cdot]\\). The smaller the ratio of the Bellman error under \\(v\\) and \\(\\mu\\), the more transferable the \\(Q\\) function from \\(\\mu\\) to \\(v\\), even when \\(\\sup_{(s,a)} \\frac{v(s,a)}{\\mu(s,a)} = \\infty\\). Then we give the proof of Theorem 2 (Upper bound of Bellman-consistent coefficient)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.582, + 0.829, + 0.64 + ], + "angle": 0, + "content": "Proof. We denote \\( x = (s, a) \\) and \\( x' = (s', a') \\). \\( x_o = \\mathbb{E}_{x \\sim \\mathcal{D}}[x] \\) is the centroid of offline dataset. \\( d_1 = \\| x - x_o \\| \\) and \\( d_2 = \\| x' - x_o \\| \\) are the sample-to-centroid distances. Let \\( \\mu(x) \\) be the distribution under the offline dataset and \\( v(x) \\) be any distribution. Then, for the numerator in Eq. (8) and Eq. (31), we have the following inequalities." + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.647, + 0.847, + 0.889 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, v} ^ {2} \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\| Q (x) - r (x) - \\gamma \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| ^ {2} \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\| Q (x) - \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] - r (x) + (1 - \\gamma) \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\| Q (x) - \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| + \\| r (x) \\| + \\| (1 - \\gamma) \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| \\right] ^ {2} (\\text {T r i a n g l e}) \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\| Q (x) - Q \\left(x _ {o}\\right) + Q \\left(x _ {o}\\right) - \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] \\| + \\| r (x) \\| + (1 - \\gamma) \\| \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] - Q \\left(x _ {o}\\right) + Q \\left(x _ {o}\\right) \\| \\right] ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ (1 - \\gamma) \\| Q \\left(x _ {o}\\right) \\| + \\| r (x) \\| + \\| Q (x) - Q \\left(x _ {o}\\right) \\| + (2 - \\gamma) \\| \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] - Q \\left(x _ {o}\\right) \\| \\right] ^ {2} (\\text {T r i a n g l e}) \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\underbrace {(1 - \\gamma) \\| Q \\left(x _ {o}\\right) \\| + \\| r (x) \\|} _ {\\mathcal {I} _ {1}} + \\underbrace {\\| Q (x) - Q \\left(x _ {o}\\right) \\|} _ {\\mathcal {I} _ {2}} + \\underbrace {(2 - \\gamma) \\mathbb {P} ^ {\\pi} [ \\| Q \\left(x ^ {\\prime}\\right) - Q \\left(x _ {o}\\right) \\| ]} _ {\\mathcal {I} _ {3}} \\right] ^ {2} (\\text {J e n s e n}) \\tag {32} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.895, + 0.827, + 0.927 + ], + "angle": 0, + "content": "The RHS contains three parts: \\(\\mathcal{I}_1 = (1 - \\gamma)\\| Q(x_o)\\| +\\| r(x)\\|\\), \\(\\mathcal{I}_2 = \\| Q(x) - Q(x_o)\\|\\) and \\(\\mathcal{I}_3 = (2 - \\gamma)\\mathbb{P}^\\pi [\\| Q(x') - Q(x_o)\\| ]\\). Because \\(\\| r(x)\\| \\in [0,R_{\\max}],\\forall x\\in S\\times \\mathcal{A},\\mathcal{I}_1\\) can be upper" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.258, + 0.119 + ], + "angle": 0, + "content": "bounded as:" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.121, + 0.827, + 0.137 + ], + "angle": 0, + "content": "\\[\n\\mathcal {I} _ {1} \\leq (1 - \\gamma) Q \\left(x _ {o}\\right) + R _ {\\max } \\tag {33}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.15, + 0.484, + 0.166 + ], + "angle": 0, + "content": "By applying Lemma 4, \\(\\mathcal{I}_2\\) is upper bounded as" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.185, + 0.828, + 0.212 + ], + "angle": 0, + "content": "\\[\n\\mathcal {I} _ {2} \\leq C _ {1} \\left[ \\sqrt {\\min (\\| x \\| , \\| x _ {o} \\|) d _ {1}} + 2 d _ {1} \\right] \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.33, + 0.239 + ], + "angle": 0, + "content": "\\(\\mathcal{I}_3\\) is upper bounded as" + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.258, + 0.826, + 0.285 + ], + "angle": 0, + "content": "\\[\n\\mathcal {I} _ {3} \\leq C _ {1} (2 - \\gamma) \\mathbb {P} ^ {\\pi} \\left[ \\sqrt {\\min (\\| x ^ {\\prime} \\| , \\| x _ {o} \\|) d _ {2}} + 2 d _ {2} \\right] \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.299, + 0.82, + 0.317 + ], + "angle": 0, + "content": "In addition, we denote \\(C_2 \\coloneqq \\sqrt{\\sup_{x \\in S \\times A} \\|x\\|}\\). Then, \\(\\mathcal{I}_2\\) and \\(\\mathcal{I}_3\\) can be further upper bounded by" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.335, + 0.826, + 0.361 + ], + "angle": 0, + "content": "\\[\n\\mathcal {I} _ {2} \\leq C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right) \\tag {36}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.378, + 0.383, + 0.826, + 0.402 + ], + "angle": 0, + "content": "\\[\n\\mathcal {I} _ {3} \\leq (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right) \\tag {37}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.415, + 0.825, + 0.447 + ], + "angle": 0, + "content": "The above relaxation of the upper bound in Eq. (36) and Eq. (37) is not necessary, but for notation brevity, we choose to relax the upper bound by treating \\( C_2 \\coloneqq \\sqrt{\\sup_{x \\in S \\times A} \\| x \\|} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.66, + 0.467 + ], + "angle": 0, + "content": "Plug Eq. (33), Eq. (36) and Eq. (37) into the RHS of Eq. (32), we can get" + }, + { + "type": "equation", + "bbox": [ + 0.181, + 0.474, + 0.825, + 0.574 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| Q - \\mathcal {T} ^ {\\pi} Q \\right\\| _ {2, v} ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ (1 - \\gamma) Q (x _ {o}) + R _ {\\max } + C _ {1} (C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}) + (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} (C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}) \\right] ^ {2} \\\\ = \\left\\| (1 - \\gamma) Q \\left(s _ {o}, a _ {o}\\right) + R _ {\\max } + C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right) + (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right) \\right\\| _ {2, v} ^ {2} \\tag {38} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.58, + 0.827, + 0.639 + ], + "angle": 0, + "content": "For the denominator \\(\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2}\\) in Eq. (8) and Eq. (31), because the \\(Q\\) function is approximated, there exists approximation error between \\(Q\\) and \\(\\mathcal{T}^{\\pi}Q\\), i.e., \\(Q - \\mathcal{T}^{\\pi}Q \\geq \\epsilon\\). In addition, the distribution \\(\\mu\\) contains some mismatch w.r.t. the equilibrium distribution induced by policy \\(\\pi\\). Therefore, it is reasonable to assume \\(\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2} \\geq \\epsilon_{\\mu} > 0\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.645, + 0.825, + 0.676 + ], + "angle": 0, + "content": "Then, we can complete the proof by plugging the upper bound in Eq. (38) and \\(\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2}\\geq\\) \\(\\epsilon_{\\mu} > 0\\) into Eq. (8) or Eq. (31)." + }, + { + "type": "equation", + "bbox": [ + 0.171, + 0.684, + 0.833, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) \\leq \\frac {1}{\\epsilon_ {\\mu}} \\left\\| \\underbrace {(1 - \\gamma) Q \\left(s _ {o} , a _ {o}\\right) + R _ {\\max }} _ {\\mathcal {B} _ {1}} + \\underbrace {C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right)} _ {\\mathcal {B} _ {2}} + \\underbrace {(2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right)} _ {\\mathcal {B} _ {3}} \\right\\| _ {2, v} ^ {2} \\tag {39}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.827, + 0.925 + ], + "angle": 0, + "content": "To be mentioned, the distance regularization in DOGE compels the leaned policy to output the action that near the state-conditioned centroid of dataset and thus \\(\\mathcal{B}_2\\) and \\(\\mathcal{B}_3\\) can be driven to some small values. \\(\\mathcal{B}_1\\) is independent on the distributional shift. Therefore, \\(\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)\\) can be bounded by some finite constants under DOGE. Therefore, the constrained policy set induced by DOGE is essentially a Bellman-consistent constrained policy set \\(\\Pi_{\\mathcal{B}}\\) defined in Definition 2. In addition, other policy constraint methods such as BEAR (Kumar et al., 2019) can also have bounded \\(\\mathcal{B}\\). However, these policy constraint methods do not allow the learned policy shifts to those generalizable distributions where \\(\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)\\) is small but \\(\\sup_{(s,a)}\\frac{v(s,a)}{\\mu(s,a)}\\to \\infty\\), which is essentially different with DOGE." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.586, + 0.119 + ], + "angle": 0, + "content": "D.2 PERFORMANCE OF THE POLICY LEARNED BY DOGE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.175 + ], + "angle": 0, + "content": "Here, we briefly review the definition of the Bellman-consistent constrained policy set \\(\\Pi_{\\mathcal{B}}\\) defined in Definition 2. The Bellman-consistent coefficient under the transition induced by \\(\\Pi_{\\mathcal{B}}\\) can be bounded by some finite constants \\(l(k)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.424, + 0.192, + 0.826, + 0.21 + ], + "angle": 0, + "content": "\\[\n\\mathcal {B} \\left(\\rho_ {k}, \\mu , \\mathcal {F}, \\pi\\right) \\leq l (k) \\tag {40}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.217, + 0.827, + 0.261 + ], + "angle": 0, + "content": "where, \\(\\rho_0\\) is the initial state-action distribution and \\(\\mu\\) is the distribution of training data. \\(\\rho_{k} = \\rho_{0}P^{\\pi_{1}}P^{\\pi_{2}}\\ldots P^{\\pi_{k}},\\forall \\pi_{1},\\pi_{2},\\ldots ,\\pi_{k}\\in \\Pi_{\\mathcal{B}}\\) and \\(P^{\\pi_i}\\) is the transition operator on states induced by \\(\\pi_{i}\\), i.e., \\(P^{\\pi_i}(s',a'|s,a) = \\mathcal{P}(s'|s,a)\\pi_i(a'|s')\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.265, + 0.828, + 0.323 + ], + "angle": 0, + "content": "We denote the constrained Bellman operator induced by \\(\\Pi_{\\mathcal{B}}\\) as \\(\\mathcal{T}^{\\Pi_{\\mathcal{B}}}\\), and \\(\\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q(s,a) := r(s,a) + \\max_{\\pi \\in \\Pi_{\\mathcal{B}}} \\gamma \\mathbb{P}^{\\pi}[Q(s',a')]\\). \\(\\mathcal{T}^{\\Pi_{\\mathcal{B}}}\\) can be seen as a operator in a redefined MDP and hence is a contraction mapping and exists a fixed point. We denote \\(Q^{\\Pi_{\\mathcal{B}}}\\) as the fixed point of \\(\\mathcal{T}^{\\Pi_{\\mathcal{B}}}\\), i.e., \\(Q^{\\Pi_{\\mathcal{B}}} = \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.329, + 0.408, + 0.344 + ], + "angle": 0, + "content": "The Bellman optimal operator \\(\\mathcal{T}\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.355, + 0.345, + 0.826, + 0.368 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} Q (s, a) := r (s, a) + \\max _ {\\pi} \\gamma \\mathbb {P} ^ {\\pi} [ Q \\left(s ^ {\\prime}, a ^ {\\prime}\\right) ] \\tag {41}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.377, + 0.81, + 0.394 + ], + "angle": 0, + "content": "\\(\\mathcal{T}\\) is also a contraction mapping. Its fixed point is the optimal value function \\(Q^{*}\\) and \\(Q^{*} = \\mathcal{T}Q^{*}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.399, + 0.449, + 0.414 + ], + "angle": 0, + "content": "Then, by the triangle inequality, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.43, + 0.826, + 0.488 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| Q ^ {*} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}} = \\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} + Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}} \\\\ \\leq \\underbrace {\\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\rho_ {0}}} _ {L _ {1}} + \\underbrace {\\left\\| Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}}} _ {L _ {2}} \\tag {42} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.496, + 0.827, + 0.541 + ], + "angle": 0, + "content": "where \\(Q^{\\pi_n}\\) is the true \\(Q\\) value of policy \\(\\pi_{n}\\). \\(\\pi_{n}\\) is the greedy policy w.r.t. to \\(Q_{n}\\) in the Bellman-consistent constrained policy set \\(\\Pi_{\\mathcal{B}}\\), i.e., \\(\\pi_{n} = \\sup_{\\pi \\in \\Pi_{\\mathcal{B}}}\\mathbb{E}_{a\\sim \\pi (\\cdot |s)}[Q_{n}(s,a)]\\). \\(Q_{n}\\) is the \\(Q\\) function after \\(n\\)-th value iteration under the constrained Bellman operator \\(\\mathcal{T}^{\\Pi_{\\mathcal{B}}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.547, + 0.56, + 0.562 + ], + "angle": 0, + "content": "For \\(L_{1}\\) part in Eq. (42), we first focus on the infinity norm." + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.582, + 0.825, + 0.662 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\infty} = \\left\\| \\mathcal {T} Q ^ {*} - \\mathcal {T} ^ {\\Pi_ {\\mathcal {B}}} Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\infty} \\\\ \\leq \\left\\| T Q ^ {*} - T ^ {\\Pi_ {S}} Q ^ {\\Pi_ {S}} \\right\\| _ {\\infty} + \\left\\| T ^ {\\Pi_ {S}} Q ^ {\\Pi_ {S}} - T ^ {\\Pi_ {S}} Q ^ {*} \\right\\| _ {\\infty} \\\\ \\leq \\left\\| \\mathcal {T} Q ^ {*} - \\mathcal {T} ^ {\\Pi_ {B}} Q ^ {\\Pi_ {B}} \\right\\| _ {\\infty} + \\gamma \\left\\| Q ^ {*} - Q ^ {\\Pi_ {B}} \\right\\| _ {\\infty} \\quad \\left(\\mathcal {T} ^ {\\Pi_ {B}} \\text {i s} \\gamma - \\text {c o n t r a c t i o n}\\right) \\tag {43} \\\\ = \\alpha (\\Pi_ {\\mathcal {B}}) + \\gamma \\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\| _ {\\infty} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.673, + 0.825, + 0.71 + ], + "angle": 0, + "content": "where \\(\\alpha (\\Pi_{\\mathcal{B}})\\coloneqq \\| \\mathcal{T}Q^{*} - \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}\\) is the suboptimality constant. Then, we get \\(\\| Q^{*} - Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}\\leq\\) \\(\\frac{\\alpha(\\Pi_{\\mathcal{B}})}{1 - \\gamma}\\) and \\(L_{1}\\leq \\| Q^{*} - Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}\\leq \\frac{\\alpha(\\Pi_{\\mathcal{B}})}{1 - \\gamma}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.716, + 0.825, + 0.762 + ], + "angle": 0, + "content": "For \\(L_{2}\\), we introduce Lemma 5, which upper bounds \\(\\| Q^{\\Pi_{\\mathcal{B}}} - Q^{\\pi_n} \\|_{2,\\rho_0}^2\\). The proof of Lemma 5 can be get by directly replacing \\(Q^{*}\\) with \\(Q^{\\Pi_{\\mathcal{B}}}\\) in the Appendix F.3. In (Le et al., 2019), because \\(Q^{\\Pi_{\\mathcal{B}}}\\) is the optimal value function under the modified MDP induced by \\(\\mathcal{T}^{\\Pi_{\\mathcal{B}}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.763, + 0.791, + 0.781 + ], + "angle": 0, + "content": "Lemma 5. (Upper bound of error propagation). \\(\\| Q^{\\Pi_{\\mathcal{B}}} - Q^{\\pi_n}\\|_{2,\\rho_0}^2\\) can be upper bounded as" + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.784, + 0.826, + 0.833 + ], + "angle": 0, + "content": "\\[\n\\left\\| Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {2, \\rho_ {0}} ^ {2} \\leq \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} (d s, d a) \\left[ \\sum_ {k = 0} ^ {n - 1} \\alpha_ {k} A _ {k} \\epsilon_ {k} ^ {2} + \\alpha_ {n} A _ {n} \\left(Q ^ {\\Pi_ {\\mathcal {B}}} - Q _ {0}\\right) ^ {2} \\right] (s, a) \\tag {44}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.833, + 0.218, + 0.845 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.843, + 0.826, + 0.861 + ], + "angle": 0, + "content": "\\[\n\\epsilon_ {k} = Q _ {k + 1} - \\mathcal {T} ^ {\\Pi_ {B}} Q _ {k} \\tag {45}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.862, + 0.826, + 0.901 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {k} = \\frac {(1 - \\gamma) \\gamma^ {n - k - 1}}{1 - \\gamma^ {n + 1}} \\text {f o r} k < n \\tag {46}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.896, + 0.503, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {n} = \\frac {(1 - \\gamma) \\gamma^ {n}}{1 - \\gamma^ {n + 1}}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.101, + 0.826, + 0.146 + ], + "angle": 0, + "content": "\\[\nA _ {k} = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} \\left(P ^ {\\pi_ {n}}\\right) ^ {m} \\left[ \\left(P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}\\right) ^ {n - k} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {k + 1}} \\right] \\quad \\text {f o r} k < n \\tag {47}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.142, + 0.664, + 0.177 + ], + "angle": 0, + "content": "\\[\nA _ {n} = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} (P ^ {\\pi_ {n}}) ^ {m} \\left[ (P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}) ^ {n + 1} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {0}} \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.194, + 0.827, + 0.256 + ], + "angle": 0, + "content": "\\(Q_{0}\\) is the \\(Q\\) function after initialization. Note that \\(\\lim_{n\\to \\infty}\\left[\\alpha_nA_n(Q^{\\Pi_{\\mathcal{B}}} - Q_0)^2\\right] = 0\\), we leave out this term for analysis simplicity. In addition, each \\(A_{k}\\) is a probability kernel that combine \\(P^{\\pi_i}\\) and \\(P^{\\pi_{\\mathcal{B}}^{\\Pi}}\\) (the transition operator on states induced by the constrained optimal policy \\(\\pi^{\\Pi_{\\mathcal{B}}}\\in \\Pi_{\\mathcal{B}}\\)) and \\(\\sum_{k}a_{k} = 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.261, + 0.766, + 0.28 + ], + "angle": 0, + "content": "The key part in Eq. (44) is \\(\\int_{\\mathcal{S} \\times \\mathcal{A}} \\rho_0 A_k \\epsilon_k^2\\) and we expand this term as the following shows." + }, + { + "type": "equation", + "bbox": [ + 0.177, + 0.283, + 0.825, + 0.374 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} A _ {k} \\epsilon_ {k} ^ {2} = \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\frac {1 - \\gamma}{2} \\rho_ {0} \\sum_ {m \\geq 0} \\gamma^ {m} (P ^ {\\pi_ {n}}) ^ {m} \\left[ (P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}) ^ {n - k} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}}... P ^ {\\pi_ {k + 1}} \\right] \\epsilon_ {k} ^ {2} \\\\ = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\left[ \\left(P ^ {\\pi_ {n}}\\right) ^ {m} \\left(P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}\\right) ^ {n - k} + \\left(P ^ {\\pi_ {n}}\\right) ^ {m} P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {k + 1}} \\right] \\rho_ {0} \\epsilon_ {k} ^ {2} \\tag {48} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.38, + 0.827, + 0.452 + ], + "angle": 0, + "content": "As Eq. (40) shows, the policy set induced by DOGE is a Bellman-consistent constrained policy set \\(\\Pi_{\\mathcal{B}}\\) defined in Definition 2. Therefore, let \\(\\rho_0\\) be the initial state-action distribution and \\(\\mu\\) denote the distribution of training data. For any policy \\(\\pi_1,\\pi_2,\\dots,\\pi_k\\in \\Pi_{\\mathcal{B}}\\), the distribution after \\(k\\)-th Bellman-consistent iteration is \\(\\rho_{k} = \\rho_{0}P^{\\pi_{1}}P^{\\pi_{2}}\\ldots P^{\\pi_{k}}\\), there exists some finite constants \\(l(k)\\), that \\(\\mathcal{B}(\\rho_k,\\mu ,\\mathcal{F},\\pi)\\leq l(k)\\) holds. Then we can get the following inequalities." + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.467, + 0.585, + 0.487 + ], + "angle": 0, + "content": "\\[\n\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, \\rho_ {k}} ^ {2} \\leq \\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, \\mu} ^ {2} l (k)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.487, + 0.825, + 0.521 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {k} \\epsilon^ {2} \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\mu \\epsilon^ {2} l (k) \\quad (\\epsilon = Q - \\mathcal {T} ^ {\\pi} Q) \\tag {49}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.529, + 0.628, + 0.545 + ], + "angle": 0, + "content": "As a result, by applying the result of Eq. (49) to Eq. (48), we can get" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.547, + 0.826, + 0.585 + ], + "angle": 0, + "content": "\\[\n\\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} A _ {k} \\epsilon_ {k} ^ {2} \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} (1 - \\gamma) \\sum_ {m \\geq 0} \\gamma^ {m} \\epsilon_ {k} ^ {2} \\mu l (m + n - k) \\tag {50}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.596, + 0.734, + 0.615 + ], + "angle": 0, + "content": "Plugs Eq. (50) into Eq. (44) and leaves out \\(\\left[\\alpha_{n}A_{n}(Q^{\\Pi_{\\mathcal{B}}} - Q_{0})^{2}\\right]\\) in Eq. (44), we get" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.635, + 0.833, + 0.825 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\lim _ {n \\to \\infty} L _ {2} ^ {2} \\leq \\lim _ {n \\to \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\sum_ {k = 0} ^ {n - 1} (1 - \\gamma) \\sum_ {m \\geq 0} \\gamma^ {m} l (m + n - k) \\alpha_ {k} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ = \\lim _ {n \\to \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\frac {1}{1 - \\gamma^ {n + 1}} \\sum_ {k = 0} ^ {n - 1} (1 - \\gamma) ^ {2} \\sum_ {m \\geq 0} \\gamma^ {m + n - k - 1} l (m + n - k) \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ \\leq \\lim _ {n \\rightarrow \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\frac {1}{1 - \\gamma^ {n + 1}} L (\\Pi_ {\\mathcal {B}}) ^ {2} \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ = \\left[ \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\right] ^ {2} L \\left(\\Pi_ {\\mathcal {B}}\\right) ^ {2} \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\tag {51} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.832, + 0.668, + 0.849 + ], + "angle": 0, + "content": "where, \\(L(\\Pi_{\\mathcal{B}}) = \\sqrt{(1 - \\gamma)^2\\sum_{k=1}^{\\infty}k\\gamma^{k-1}l(k)}\\). Then, we can bound \\(L_2\\) by" + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.852, + 0.826, + 0.885 + ], + "angle": 0, + "content": "\\[\n\\lim _ {n \\rightarrow \\infty} L _ {2} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} \\tag {52}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "With the upper bound of \\( L_{1} \\) and \\( \\lim_{n\\to \\infty}L_2 \\), we can complete the proof by adding these two terms together." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.265, + 0.114, + 0.826, + 0.151 + ], + "angle": 0, + "content": "\\[\n\\lim _ {n \\rightarrow \\infty} \\| Q ^ {*} - Q ^ {\\pi_ {n}} \\| _ {\\rho_ {0}} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\left[ L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} + \\frac {1 - \\gamma}{2 \\gamma} \\alpha \\left(\\Pi_ {\\mathcal {B}}\\right)\\right] \\tag {53}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.166, + 0.441, + 0.182 + ], + "angle": 0, + "content": "E IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.197, + 0.827, + 0.268 + ], + "angle": 0, + "content": "DOGE can build on top of standard online actor-critic algorithms such as TD3(Fujimoto et al., 2018) and SAC(Haarnoja et al., 2018). We choose TD3 as our base because of its simplicity compared to other methods. We build DOGE on top of TD3 by simply plugging the state-conditioned distance function as a policy regularization term during policy training process. Then, the learning objective of policy \\(\\pi\\) in Eq. (7) can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.253, + 0.274, + 0.826, + 0.298 + ], + "angle": 0, + "content": "\\[\n\\pi = \\arg \\max _ {\\pi} \\min _ {\\lambda} \\mathbb {E} _ {s \\sim \\mathcal {D}} [ \\beta Q (s, \\pi (s)) - \\lambda (g (s, \\pi (s)) - G) ] \\quad \\text {s . t .} \\lambda \\geq 0 \\tag {54}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.31, + 0.828, + 0.368 + ], + "angle": 0, + "content": "The \\(Q\\) function, policy and state-conditioned distance function networks are represented by 3 layers ReLU activated MLPs with 256 units for each hidden layer and are optimized by Adam optimizer. In addition, we normalize each dimension of state to a standard normal distribution for Mujoco tasks. The hyperparameters of DOGE are listed in Table 2." + }, + { + "type": "table_caption", + "bbox": [ + 0.377, + 0.38, + 0.622, + 0.396 + ], + "angle": 0, + "content": "Table 2: Hyperparameters of DOGE" + }, + { + "type": "table", + "bbox": [ + 0.21, + 0.406, + 0.79, + 0.779 + ], + "angle": 0, + "content": "
HyperparametersValue
Shared parametersOptimizerAdam
StandardNormalize stateTrue for Mujoco
False for AntMaze
Batch size256
Layers3
Hidden dim256
TD3Actor learning rate3 × 10-4
Critic learning rate3 × 10-4for Mujoco
1 × 10-3for AntMaze
Discount factor γ0.99 for Mujoco
0.995 for AntMaze
Number of iterations106
Target update rate τ0.005
Policy noise0.2
Policy noise clipping0.5
Policy update frequency2
State-Conditioned Distance FunctionLearning rate1 × 10-3for Mujoco
1 × 10-4for AntMaze
Number of noise actions N20
Number of iterations Ng105for Mujoco
106for AntMaze
DOGEα{7.5, 17.5} Mujoco
{5, 10, 70} AntMaze
Lagrangian multiplier λclipped to [1, 100]
λ learning rate3e-4
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.801, + 0.457, + 0.815 + ], + "angle": 0, + "content": "E.1 TD3'S IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.925 + ], + "angle": 0, + "content": "For the choice of the Critic learning rate and discount factor \\(\\gamma\\), we find that for AntMaze tasks, a high Critic learning rate can improve the stability of value function during training process. This may be because the AntMaze tasks require the value function to dynamic programs more times to \"stitch\" suboptimal trajectories than Mujoco tasks. Therefore, we choose \\(1 \\times 10^{-3}\\) and 0.995 as the Critic learning rate and discount factor \\(\\gamma\\) for AntMaze tasks, respectively. The other implementations such as policy noise scale and policy noise clipping are the same with author's implementation (Fujimoto et al., 2018)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.104, + 0.728, + 0.119 + ], + "angle": 0, + "content": "E.2 STATE-CONDITIONED DISTANCE FUNCTION'S IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.13, + 0.827, + 0.2 + ], + "angle": 0, + "content": "We sample \\(N = 20\\) noise actions from a uniform distribution that covers the full action space to approximate the estimation value in Eq. (4). We find \\(N = 20\\) can balance the computation complexity and estimation accuracy and is the same sample numbers with CQL (Kumar et al., 2020b). The ablation of \\(N\\) can be found in Fig. 15. The practical training objective of the state-conditioned distance function is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.203, + 0.826, + 0.246 + ], + "angle": 0, + "content": "\\[\n\\min _ {g} \\mathbb {E} _ {(s, a) \\in \\mathcal {D}, \\hat {a} _ {i} \\sim U n i f (\\mathcal {A})} \\left[ \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left[ \\| a - \\hat {a} _ {i} \\| - g (s, \\hat {a} _ {i}) \\right] ^ {2} \\right] \\tag {55}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.276, + 0.828, + 0.43 + ], + "angle": 0, + "content": "We find that a wider sample range than the max action space \\([-a_{\\mathrm{max}}, a_{\\mathrm{max}}]\\) is helpful to characterize the geometry of the full offline dataset. This is because some actions in the offline dataset lie at the boundary of the action space, which can only be sampled with little probability when sampling from a narrow distribution. At this time, the noise actions may not cover the geometry information near the boundary. Therefore, we sample noise actions from a uniform distribution that is 3 times wider than the max action space, i.e., \\(\\hat{a} \\sim \\text{Unif}[-3a_{\\mathrm{max}}, 3a_{\\mathrm{max}}]\\). For the learning rate, we find that a high learning rate enables a stable training process in Mujoco tasks. Therefore, we choose \\(1 \\times 10^{-3}\\) and \\(1 \\times 10^{-4}\\) as the distance function learning rate for Mujoco and AntMaze, respectively. We also observe that for Mujoco tasks, \\(10^{5}\\) iterations can already produce a relatively good state-conditioned distance function, and training more times won't hurt the final results. To reduce computation, we only train the state-conditioned distance function for \\(10^{5}\\) steps for Mujoco tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.446, + 0.497, + 0.46 + ], + "angle": 0, + "content": "E.3 HYPERPARAMETERS TUNING OF DOGE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.472, + 0.827, + 0.585 + ], + "angle": 0, + "content": "The scale of \\(\\alpha\\) determines the strength of policy constraint. We tune \\(\\alpha\\) to balance the trade-off between policy constraint and policy improvement. To be mentioned, \\(\\alpha\\) is tuned within only 5 candidates for 20 tasks (17.5 for hopper-m, hopper-m-r and all Mujoco random datasets; 7.5 for other Mujoco datasets; 5 for antmaze-u; 10 for antmaze-u-d; 70 for other AntMaze tasks). This is acceptable in offline policy tuning following (Kumar et al., 2019; Brandfonbrener et al., 2021). To ensure numerical stability, we clip the Lagrangian multiplier \\(\\lambda\\) to [1, 100]. We also find a large initial \\(\\lambda\\) enables stable training for Mujoco tasks but slows down AntMaze training. Therefore, the initial value of Lagrangian multiplier \\(\\lambda\\) is 5 for Mujoco and 1 for AntMaze tasks, respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.6, + 0.391, + 0.613 + ], + "angle": 0, + "content": "E.4 PSEUDOCODE OF DOGE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.625, + 0.828, + 0.683 + ], + "angle": 0, + "content": "The pseudocode of DOGE is listed in Algorithm 1. Changes we make based on TD3 (Fujimoto et al., 2018) are marked in red. The only modification is the training process of the additional state-conditioned distance function and the constrained actor update. We can perform 1M training steps on one GTX 3080Ti GPU in less than \\(50\\mathrm{min}\\) for Mujoco tasks and 1h \\(40\\mathrm{min}\\) for AntMaze tasks." + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.696, + 0.47, + 0.711 + ], + "angle": 0, + "content": "Algorithm 1 Our implementation for DOGE" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.715, + 0.829, + 0.914 + ], + "angle": 0, + "content": "Require: Dataset \\(\\mathcal{D}\\) . State-conditioned distance network \\(g_{\\psi}\\) . Policy network \\(\\pi_{\\phi}\\) and target policy network \\(\\pi_{\\phi^{\\prime}}\\) with \\(\\phi^{\\prime}\\gets \\phi\\) . Value network \\(Q_{\\theta_i},i = 1,2\\) and target value network \\(Q_{\\theta_i'}\\) \\(i = 1,2\\) with \\(\\theta_i^\\prime \\leftarrow \\theta_i\\) . State-conditioned distance network training steps \\(N_{g}\\) . Policy update frequency m. \n1: for \\(t = 0,1,\\dots ,M\\) do \n2: Sample mini-batch transitions \\(\\{(s_i,a_i,r_i,s_i')\\} \\sim \\mathcal{D}\\) \n3: if \\(t < N_g\\) then \n4: State-Conditioned Distance Function Update: Update \\(\\psi\\) as Eq. (55) shows. \n5: end if \n6: Critic Update: Update \\(\\theta_{i}\\) using policy evaluation method in TD3. \n7: if \\(t\\) mod \\(m = 0\\) then \n8: Constrained Actor Update: Update \\(\\phi ,\\lambda\\) via Eq. (54). \n9: Update target networks: \\(\\theta_i^\\prime \\gets \\tau \\theta_i + (1 - \\tau)\\theta_i^\\prime\\) \\(\\phi^{\\prime}\\gets \\tau \\phi +(1 - \\tau)\\phi\\) \n10: end if \n11: end for" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.104, + 0.8, + 0.12 + ], + "angle": 0, + "content": "E.5 EXPERIMENT SETUP FOR THE IMPACT OF DATA GEOMETRY ON DEEP \\(Q\\) FUNCTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.827, + 0.198 + ], + "angle": 0, + "content": "We consider an one-dimensional random walk task with a fixed-horizon (50 steps for each episode), where agents at each step can move in the range of \\([-1, +1]\\) and the state space is a straight range from \\([-10, 10]\\). The destination is located at \\(s = 10\\). The closer the distance to the destination, the larger the reward that the agent can get. The discount factor \\(\\gamma = 0.9\\). The reward function is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.422, + 0.198, + 0.826, + 0.227 + ], + "angle": 0, + "content": "\\[\nr = \\frac {4 0 0 - (s ^ {\\prime} - 1 0) ^ {2}}{4 0 0} \\tag {56}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.236, + 0.825, + 0.35 + ], + "angle": 0, + "content": "We generate offline datasets with different geometry and train the agent based on these datasets. Each synthetic dataset consists of 200 transition steps. We get the approximated \\( Q \\) value \\( \\hat{Q} \\) by training TD3 for \\( 1e + 4 \\) steps each dataset. The learning rate of Actor and Critic networks are both \\( 10^{-3} \\). The other implementation details are the same as the implementation of original TD3 (Fujimoto et al., 2018). The true \\( Q \\) function can be get by Monte-Carlo estimation. We find that the near-destination states hold higher approximation error than that far away from the destination due to the scale of true \\( Q \\) value near the destination is large. To alleviate the impact of \\( Q \\) value scale on the approximation error, we define the relative approximation error as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.395, + 0.356, + 0.826, + 0.378 + ], + "angle": 0, + "content": "\\[\n\\hat {\\epsilon} (s, a) = \\epsilon (s, a) - \\min _ {a} \\epsilon (s, a) \\tag {57}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.445 + ], + "angle": 0, + "content": "where, \\(\\epsilon(s,a) = \\hat{Q}(s,a) - Q(s,a)\\). The relative error in the above definition eliminates the effect of different states on the approximation error and can capture the over-estimation error that we care about. We plot the relative approximation error of deep \\(Q\\) functions with different random seeds and data geometry in Fig. 13." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.464, + 0.515, + 0.48 + ], + "angle": 0, + "content": "F ADDITIONAL EXPERIMENT RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.495, + 0.527, + 0.509 + ], + "angle": 0, + "content": "F.1 COMPARISON OF GENERALIZATION ABILITY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.521, + 0.825, + 0.58 + ], + "angle": 0, + "content": "In the well known AntMaze task in D4RL benchmark (Fu et al., 2020), where an ant needs to navigate from the start to the destination in a large maze. The trajectories with coordinates at \\( x \\times y \\in [4,13] \\times [7,9] \\cup [11.5,20.5] \\times [11,13] \\) in AntMaze medium tasks and \\( x \\times y \\in [10.5,21] \\times [7,9] \\cup [19,29.5] \\times [15,17] \\) in AntMaze large tasks are clipped, as Fig. 8 shows." + }, + { + "type": "image", + "bbox": [ + 0.258, + 0.603, + 0.473, + 0.769 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.27, + 0.772, + 0.46, + 0.786 + ], + "angle": 0, + "content": "(a) Modified Medium AntMaze" + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.602, + 0.738, + 0.77 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.542, + 0.772, + 0.716, + 0.786 + ], + "angle": 0, + "content": "(b) Modified Large AntMaze" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.797, + 0.825, + 0.827 + ], + "angle": 0, + "content": "Figure 8: The trajectories in the offline dataset are visualized as blue. Data transitions of two small areas on the critical pathways to the destination have been removed (red box)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.827, + 0.926 + ], + "angle": 0, + "content": "These clipped data counts only about one-tenth of the original dataset and lies in the close proximity of the original trajectories. Under these modified datasets, simply relaying on \"stitching\" data transitions is not enough to solve the navigation problems. We evaluate representative policy constraint method (TD3+BC (Fujimoto & Gu, 2021)), value regularization method (CQL (Kumar et al., 2020b)), in-sample learning method (IQL (Kostrikov et al., 2021b)) and DOGE (our method) on these modified datasets. The evaluation results before and after clipping the trajectories are listed in Table 3. The" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "learning curves for the modified AntMaze medium and AntMaze large tasks are listed in Fig. 9 and Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.825, + 0.182 + ], + "angle": 0, + "content": "Observe in Table 3 that existing offline RL methods fail miserably and suffer from severe performance drops. By contrast, DOGE maintains competitive performance after the modification of the dataset and shows good generalization ability on unknown areas." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.188, + 0.829, + 0.23 + ], + "angle": 0, + "content": "Apart from above experiments, we also evaluate DOGE when removing only one area: \\([10.5, 21] \\times [7, 9]\\), \\([10.5, 21] \\times [7, 9]\\) for AntMaze-large datasets and \\([4, 13] \\times [7, 9]\\), \\([4, 13] \\times [7, 9]\\) for AntMaze-medium datasets. The final results can be seen in Table 4." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.241, + 0.78, + 0.258 + ], + "angle": 0, + "content": "Table 3: The performance drop after removing the data at the only way to destination." + }, + { + "type": "table", + "bbox": [ + 0.203, + 0.267, + 0.794, + 0.512 + ], + "angle": 0, + "content": "
Dataset typeTD3+BCCQLIQLDOGE(ours)
antmaze-m-p-v2full data065.2±4.870.4±5.380.6±6.5
miss data010.7±18.410.2±2.233.2±27.3
Performance drop ↓-84%86%59%
antmaze-m-d-v2full data054.0±11.774.6±3.277.6±6.1
miss data08.5±5.37.6±5.740.2±32.9
Performance drop ↓-84%90%48%
antmaze-l-p-v2full data018.8±15.343.5±4.548.2±8.1
miss data001.0±0.722.4±15.9
Performance drop ↓-100%98%54%
antmaze-l-d-v2full data031.6±9.545.6±7.636.4±9.1
miss data005.2±3.114.6±11.1
Performance drop ↓-100%89%60%
" + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.542, + 0.316, + 0.555 + ], + "angle": 0, + "content": "Policy constraint" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.563, + 0.335, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.365, + 0.542, + 0.483, + 0.555 + ], + "angle": 0, + "content": "Value regularization" + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.563, + 0.495, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.542, + 0.641, + 0.555 + ], + "angle": 0, + "content": "In-sample learning" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.563, + 0.655, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.709, + 0.542, + 0.784, + 0.555 + ], + "angle": 0, + "content": "DOGE (Ours)" + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.563, + 0.816, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.673, + 0.335, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.673, + 0.497, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.673, + 0.658, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.673, + 0.819, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.785, + 0.825, + 0.827 + ], + "angle": 0, + "content": "Figure 9: Evaluation on TD3+BC(Fujimoto & Gu, 2021), CQL(Kumar et al., 2020b), IQL(Kostrikov et al., 2021b), and DOGE (ours) before and after removing the data shown in Fig.8a for AntMaze medium tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.259, + 0.102, + 0.738, + 0.117 + ], + "angle": 0, + "content": "Table 4: Ablation for DOGE generalization with different removal areas." + }, + { + "type": "table", + "bbox": [ + 0.27, + 0.128, + 0.725, + 0.235 + ], + "angle": 0, + "content": "
DatasetFull datasetOne removalTwo removal
antmaze-m-p-v280.6±6.562.3±7.533.2±27.3
antmaze-m-d-v277.6±6.141.3±42.840.2±32.9
antmaze-l-p-v248.2±8.126.4±19.422.4±15.9
antmaze-l-d-v236.4±9.112.3±4.214.6±11.1
Total score242.8±29.8142.3±73.9110.4±87.2
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.248, + 0.517, + 0.262 + ], + "angle": 0, + "content": "F.2 ADDITIONAL COMPARISON WITH TD3+BC" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.317 + ], + "angle": 0, + "content": "In this section, we further demonstrate the superiority of DOGE over our most related practical work TD3+BC (Fujimoto & Gu, 2021). One can find that the biggest difference between DOGE and TD3+BC lies in the policy constraint used for policy optimization:" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.322, + 0.592, + 0.337 + ], + "angle": 0, + "content": "- TD3+BC: constrains the policy to minimize the MSE BC loss." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.344, + 0.817, + 0.36 + ], + "angle": 0, + "content": "- DOGE: constrains the policy to minimize the learned state-conditioned distance function \\( g(s, a) \\)." + }, + { + "type": "list", + "bbox": [ + 0.17, + 0.322, + 0.817, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.365, + 0.827, + 0.477 + ], + "angle": 0, + "content": "As discussed in Section 3.1, the learned distance function \\( g(s,a) \\) can capture the global geometric information of the offline dataset, while the MSE BC loss can only provide local sample-to-sample regularization, which may be noisy, especially in datasets that contain low-quality samples. Taking Figure 10 as an illustration, under strict BC constraint, policy learning on noisy low-quality samples may provide contradicting learning signals to near-optimal samples, which can cause inferior policy performance and unstable training process. By contrast, the state-conditioned distance function \\( g(s,a) \\) in DOGE is trained on the whole dataset and hence brings global geometric information, which is far more informative and stable as compared with the MSE BC loss." + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.489, + 0.443, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.328, + 0.63, + 0.379, + 0.64 + ], + "angle": 0, + "content": "(a) TD3+BC" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.492, + 0.676, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.567, + 0.63, + 0.61, + 0.64 + ], + "angle": 0, + "content": "(b) DOGE" + }, + { + "type": "image_footnote", + "bbox": [ + 0.324, + 0.654, + 0.42, + 0.666 + ], + "angle": 0, + "content": "Low-quality Samples" + }, + { + "type": "image_footnote", + "bbox": [ + 0.421, + 0.654, + 0.53, + 0.665 + ], + "angle": 0, + "content": "Near-optimal Samples" + }, + { + "type": "image_footnote", + "bbox": [ + 0.551, + 0.654, + 0.627, + 0.665 + ], + "angle": 0, + "content": "Policy Outputs" + }, + { + "type": "image", + "bbox": [ + 0.257, + 0.675, + 0.398, + 0.697 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.399, + 0.675, + 0.495, + 0.697 + ], + "angle": 0, + "content": "MSE BC Constraints of Near-optimal Samples" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.673, + 0.602, + 0.696 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.603, + 0.675, + 0.721, + 0.696 + ], + "angle": 0, + "content": "g(s,a) Distance Function Values" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.712, + 0.827, + 0.777 + ], + "angle": 0, + "content": "Figure 10: Illustrations of the differences between (a) the MSE BC constraint of TD3+BC and (b) the state-conditioned distance function constraint of DOGE. In (a), the MSE BC constraint in TD3+BC blindly enforces the imitation behavior on any data samples, which may lead to an inferior policy in the presence of noisy low-quality samples. In (b), the state-conditioned distance function \\( g(s,a) \\) can provide more informative global dataset geometry information to guide the stable learning of the policy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.792, + 0.825, + 0.849 + ], + "angle": 0, + "content": "To better illustrate the superiority of DOGE over TD3+BC, we add extra comparative experiments with TD3+BC on a new set of mixed-quality datasets. In halfcheetah-random dataset, we add different proportions (1% to 20%) of the near-optimal halfcheetah-medium-expert dataset to form new mixed datasets and evaluate how TD3+BC and DOGE perform. See Figure 11 for detailed results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Figure 11 shows that DOGE enjoys more performance gains when the random dataset involves near-optimal data, while TD3+BC is heavily influenced by the local information from the larger proportion of the low-quality random data. Moreover, TD3+BC suffers from severe oscillation and training instability, while DOGE enjoys a stable training process due to the use of the more informative state-conditioned distance constraint that captures the overall dataset geometry." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.101, + 0.307, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.101, + 0.435, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.437, + 0.102, + 0.562, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.102, + 0.688, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.102, + 0.816, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.193, + 0.828, + 0.245 + ], + "angle": 0, + "content": "Figure 11: Comparisons between DOGE and TD3+BC on mixed datasets with different proportions of halfcheetah-medium-expert dataset added into halfcheetah-random dataset. Ratio- \\(1\\%\\) means \\(1\\%\\) medium-expert dataset is added into the original halfcheetah-random dataset. TD3+BC suffers severe oscillation and training instability, while DOGE enjoys stable training processes and substantial performance gains." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.259, + 0.581, + 0.273 + ], + "angle": 0, + "content": "F.3 COMPARISON WITH UNCERTAINTY-BASED METHODS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.298, + 0.825, + 0.382 + ], + "angle": 0, + "content": "We also compare DOGE with SOTA uncertainty-based offline RL approaches, including EDAC (An et al., 2021) and PBRL (Bai et al., 2021) are more complex D4RL AntMaze tasks. The final results are presented in Table 5. Table 5 shows that the SOTA uncertainty-based methods are unable to provide reasonable performance on the difficult Antmaze tasks, despite that they can achieve good performance on simpler MuJoCo tasks. A similar finding is also reported in a recent offline RL study (Anonymous, 2023)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.388, + 0.827, + 0.473 + ], + "angle": 0, + "content": "In practical implementation of EDAC and PBRL, to obtain relatively accurate uncertainty measures and achieve reasonable performance, these methods typically need dozens of ensemble Q-networks, which can be quite costly and inefficient. Moreover, heavy hyperparameter tuning is also required for them to obtain the best performance. In contrast, our method quantifies the generalization ability of the Q-function from the perspective of dataset geometry and is trained using a simple regression loss in Eq. (4), which enjoys better training stability and simplicity." + }, + { + "type": "table_caption", + "bbox": [ + 0.277, + 0.514, + 0.72, + 0.529 + ], + "angle": 0, + "content": "Table 5: Average normalized scores over 5 seeds on Antmaze tasks" + }, + { + "type": "table", + "bbox": [ + 0.307, + 0.539, + 0.688, + 0.655 + ], + "angle": 0, + "content": "
DatasetEDACPBRLDOGE(Ours)
antmaze-u-v20097.0±1.8
antmaze-u-p-v20063.5±9.3
antmaze-m-p-v20080.6±6.5
antmaze-m-d-v20077.6±6.1
antmaze-l-p-v20048.2±8.1
antmaze-l-d-v20036.4±9.1
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.739, + 0.555, + 0.753 + ], + "angle": 0, + "content": "F.4 ADDITIONAL ANALYSIS ON DISTANCE FUNCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.778, + 0.825, + 0.848 + ], + "angle": 0, + "content": "We report the learning curves of the state-conditioned distance function \\( g(s, a) \\) trained on different datasets (including hopper-m-v2, halfcheetah-m-v2, and walker2d-m-v2 in Figure 12. Our proposed state-conditioned distance function is learned through a simple regression task (Eq. (4)), which is very easy to train. Figure 12 shows that it reaches convergence within only 1K training steps on D4RL MuJoCo medium datasets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.924 + ], + "angle": 0, + "content": "We also change the network configurations (i.e., number of hidden layers and hidden units) of the state-conditioned distance function \\( g(s, a) \\) to investigate how the expressivity of \\( g \\) influences the performance of the policy. Table 6 shows that DOGE achieves similar performance across different \\( g \\) network configurations, indicating that DOGE is robust to model complexity and expressivity of the state-conditioned distance function." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.101, + 0.378, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.101, + 0.597, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.101, + 0.822, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.245, + 0.751, + 0.262 + ], + "angle": 0, + "content": "Figure 12: Learning curves of the state-conditioned distance function \\( g(s, a) \\)" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.289, + 0.828, + 0.332 + ], + "angle": 0, + "content": "Table 6: Normalized scores of DOGE trained on distance functions with different network configurations. [128, 128] means \\( g \\) network has 2 hidden layers with 128 units. [256, 256, 256] means 3 hidden layers with 256 units." + }, + { + "type": "table", + "bbox": [ + 0.291, + 0.342, + 0.704, + 0.417 + ], + "angle": 0, + "content": "
Dataset[128, 128][256, 256][256, 256, 256]
hopper-m99.4101.498.6
halfcheetah-m47.446.945.3
walker2d-m85.386.486.8
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.442, + 0.757, + 0.469 + ], + "angle": 0, + "content": "F.5 ADDITIONAL EXPERIMENTS OF THE IMPACT OF DATA GEOMETRY ON DEEP \\(Q\\) FUNCTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.825, + 0.54 + ], + "angle": 0, + "content": "We run several experiments with different random seeds (see Figure 13). Although the approximation error pattern of different random seeds is not the same, they all perform in the same manner that deep \\( Q \\) functions produce relatively low approximation error inside the convex hull of training data. We refer to this phenomenon as deep \\( Q \\) functions interpolate well but struggle to extrapolate." + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.55, + 0.393, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.55, + 0.603, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.55, + 0.815, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.659, + 0.392, + 0.766 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.659, + 0.603, + 0.766 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.659, + 0.815, + 0.766 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.767, + 0.392, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.767, + 0.603, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.767, + 0.815, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.887, + 0.825, + 0.917 + ], + "angle": 0, + "content": "Figure 13: The figures above depict the effect of different data geometries on the final deep \\(Q\\) functions approximation error. The training data are marked as white dots." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.31, + 0.118 + ], + "angle": 0, + "content": "G ABLATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.828, + 0.195 + ], + "angle": 0, + "content": "We conduct ablation studies on the effect of \\(\\alpha\\) in \\(\\beta = \\frac{\\alpha}{\\frac{1}{n}\\sum_{i=1}^{n}|Q(s_i,a_i)|}\\) (see Figure 14), the non-parametric threshold \\(G\\) in Eq. (6) (see Figure 16) and the non-parametric number of noise actions \\(N\\) to train state-conditioned distance function (see Figure 15) on the performance of the final algorithm. We also conduct ablation studies on the effect of \\(G\\) on the Lagrangian multiplier \\(\\lambda\\) (see Figure 17)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.2, + 0.825, + 0.243 + ], + "angle": 0, + "content": "For \\(\\alpha\\), we add or subtract 2.5 to the original value. For \\(N\\), we choose \\(N = 10, 20, 30\\) to conduct experiments respectively. For \\(G\\), we choose \\(30\\%\\), \\(50\\%\\), \\(70\\%\\), \\(90\\%\\) and \\(100\\%\\) upper quantile of the distance value in mini-batch samples and the results can be found in Table 7." + }, + { + "type": "table_caption", + "bbox": [ + 0.34, + 0.256, + 0.655, + 0.272 + ], + "angle": 0, + "content": "Table 7: Ablations on G with different quantile." + }, + { + "type": "table", + "bbox": [ + 0.2, + 0.282, + 0.797, + 0.48 + ], + "angle": 0, + "content": "
DatasetG = 30%G = 50%G = 70%G = 90%G = 100%
hopper-r-v219.8±0.321.1±12.615.5±13.517.6±12.216.4±12.4
halfcheetah-r-v219.4±0.617.8±1.217.8±0.717.7±1.017.7±0.8
walker2d-r-v22.6±3.90.9±2.42.2±2.61.8±3.32.2±3.2
hopper-m-v244.6±5.798.6±2.199.4±0.491.5±9.932.9±54.3
halfcheetah-m-v241.3±1.245.3±0.646.0±0.146.0±0.846.1±0.5
walker2d-m-v283.7±7.586.8±0.887.3±1.669.9±28.984.2±1.0
hopper-m-r-v251.5±11.276.2±17.779.6±36.978.4±27.665.7±37.2
halfcheetah-m-r-v25.9±5.742.8±0.643.2±0.142.2±0.842.0±0.6
walker2d-m-r-v228.3±14.387.3±2.387.9±2.477.8±21.678.6±24.1
hopper-m-e-v261.7±10.4102.7±5.282.8±5.888.9±17.770.0±48.4
halfcheetah-m-e-v246.9±5.278.7±8.475.1±15.473.5±13.669.9±8.7
walker2d-m-e-v2110.5±0.7110.4±1.5111.1±0.5110.2±22.580.0±54.3
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.494, + 0.825, + 0.592 + ], + "angle": 0, + "content": "Seen from Table 7 that using different \\( G \\) for different tasks may achieve even better performance. Particularly, for some datasets with diverse data distributions that need to find good data from suboptimal data, a more tolerant quantile (e.g., \\( G = 70\\% \\)) can reasonably extend feasible region and increase the opportunity to find the optimal policy, such as hopper-m-r, halfcheetah-m-r, walker2d-m-r, hopper-m-e, halfcheetah-m-e. However, an overly relaxed quantile (e.g., \\( G = 90\\% \\) and \\( 100\\% \\)) increases the risk of including problematic OOD actions in policy learning, causing performance drop due to value overestimation and high variance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.598, + 0.825, + 0.687 + ], + "angle": 0, + "content": "By contrast, an overly restrictive quantile such as \\( G = 30\\% \\) can be over-conservative and cause significant constraints violations that impede policy learning, as constraints satisfaction is favored over the max-Q operation in most updates. This can be reflected in the additional results for the Lagrangian multiplier \\( \\lambda \\) (see Appendix E.2 for learning curves and Figure 11 for additional ablations), where \\( \\lambda \\rightarrow \\infty \\) for some tasks under \\( G = 30\\% \\). This will cause the suboptimality gap \\( (\\frac{1 - \\gamma}{2\\gamma}\\alpha(\\Pi_{\\mathcal{D}})) \\) in Theorem 3 to dominate the performance bound, leading to inferior policy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.692, + 0.825, + 0.748 + ], + "angle": 0, + "content": "As hyperparameter tuning in practical offline RL applications without online interaction is very difficult, to reduce the computational load, we set \\( G = 50\\% \\) as default in a non-parametric manner, since it consistently achieves good performance, and is neither too conservative nor too aggressive for most tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.755, + 0.825, + 0.84 + ], + "angle": 0, + "content": "Observe in Figure 14 that DOGE maintains the similar performance with the changes of \\(\\alpha\\) on most of Mujoco tasks. At the same time, we also observe that the effect of \\(N\\) on the experiment is not obvious. Compared with \\(N\\) and \\(\\alpha\\), we find that \\(G\\) has a more significant effect on the experimental results. Observe in Figure 16 that a small \\(G\\) usually causes the policy set induced by DOGE to be too small to obtain near-optimal policy. By contrast, a large \\(G\\) is not likely to cause excessive error accumulation and hence maintains relatively good performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.845, + 0.825, + 0.874 + ], + "angle": 0, + "content": "In addition, the ablation studies show that our method is hyperparameter-robust and maintains good performance with changes in hyperparameters." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.101, + 0.338, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.102, + 0.497, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.102, + 0.657, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.102, + 0.816, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.2, + 0.338, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.2, + 0.497, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.2, + 0.657, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.2, + 0.816, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.297, + 0.338, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.297, + 0.497, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.297, + 0.657, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.297, + 0.816, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.405, + 0.738, + 0.421 + ], + "angle": 0, + "content": "Figure 14: Ablation for \\(\\alpha\\). Error bars indicate min and max over 5 seeds." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.435, + 0.338, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.435, + 0.497, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.435, + 0.657, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.435, + 0.816, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.533, + 0.338, + 0.629 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.533, + 0.497, + 0.629 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.533, + 0.657, + 0.629 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.533, + 0.816, + 0.629 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.631, + 0.338, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.631, + 0.497, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.631, + 0.657, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.631, + 0.816, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.255, + 0.738, + 0.741, + 0.753 + ], + "angle": 0, + "content": "Figure 15: Ablation for \\( N \\). Error bars indicate min and max over 5 seeds." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.766, + 0.376, + 0.781 + ], + "angle": 0, + "content": "H LEARNING CURVES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.797, + 0.828, + 0.854 + ], + "angle": 0, + "content": "The learning curves for Mujoco and AntMaze tasks are listed in Fig. 18 and Fig.19. The learned policies are evaluated for 10 episodes and 100 episodes each seed for Mujoco and AntMaze tasks, respectively. For AntMaze tasks, we subtract 1 from rewards for the AntMaze datasets following (Kumar et al., 2020b; Kostrikov et al., 2021b)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.145, + 0.338, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.145, + 0.497, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.145, + 0.657, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.146, + 0.816, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.244, + 0.338, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.244, + 0.497, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.244, + 0.657, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.244, + 0.816, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.342, + 0.338, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.342, + 0.497, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.342, + 0.657, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.342, + 0.816, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.257, + 0.449, + 0.741, + 0.465 + ], + "angle": 0, + "content": "Figure 16: Ablation for \\( G \\). Error bars indicate min and max over 5 seeds." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.561, + 0.338, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.562, + 0.497, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.562, + 0.657, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.562, + 0.816, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.659, + 0.338, + 0.754 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.659, + 0.497, + 0.754 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.659, + 0.657, + 0.754 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.659, + 0.816, + 0.754 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.756, + 0.338, + 0.851 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.756, + 0.497, + 0.851 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.756, + 0.657, + 0.851 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.756, + 0.816, + 0.851 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.862, + 0.738, + 0.878 + ], + "angle": 0, + "content": "Figure 17: Ablation for \\(\\lambda\\). Error bars indicate min and max over 5 seeds." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.153, + 0.338, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.153, + 0.496, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.153, + 0.657, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.153, + 0.816, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.252, + 0.338, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.252, + 0.496, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.252, + 0.657, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.252, + 0.816, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.349, + 0.338, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.349, + 0.496, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.349, + 0.657, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.349, + 0.816, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.457, + 0.802, + 0.473 + ], + "angle": 0, + "content": "Figure 18: Learning curves for Mujoco Tasks. Error bars indicate min and max over 5 seeds." + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.584, + 0.391, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.584, + 0.603, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.584, + 0.813, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.714, + 0.391, + 0.843 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.714, + 0.602, + 0.843 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.714, + 0.813, + 0.843 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.188, + 0.854, + 0.808, + 0.87 + ], + "angle": 0, + "content": "Figure 19: Learning curves for AntMaze Tasks. Error bars indicate min and max over 5 seeds." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "34" + } + ] +] \ No newline at end of file diff --git a/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_origin.pdf b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4660219f9c30e7f48ed2ebf4c6ead82f53e5f11b --- /dev/null +++ b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/01fada97-5ce7-4d5f-a893-a0388d8d2a96_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f720a8494f37ea10c092e6b0f0089fb42cd1f707f32694e0d89f3d4e1e47deaa +size 13539857 diff --git a/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/full.md b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f44e5e97adcba3113ea8d8fd69e44e5df39210ae --- /dev/null +++ b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/full.md @@ -0,0 +1,1081 @@ +# WHEN DATA GEOMETRY MEETS DEEP FUNCTION: GENERALIZING OFFLINE REINFORCEMENT LEARNING + +Jianxiong Li $^{1}$ , Xianyuan Zhan $^{1,2*}$ , Haoran Xu $^{1}$ , Xiangyu Zhu $^{1}$ , Jingjing Liu $^{1}$ & Ya-Qin Zhang $^{1*}$ + +$^{1}$ Institute for AI Industry Research (AIR), Tsinghua University, Beijing, China +$^{2}$ Shanghai Artificial Intelligence Laboratory, Shanghai, China + +li-jx21@mails.tsinghua.edu.cn, zhanxianyuan@air.tsinghua.edu.cn + +# ABSTRACT + +In offline reinforcement learning (RL), one detrimental issue to policy learning is the error accumulation of deep $Q$ function in out-of-distribution (OOD) areas. Unfortunately, existing offline RL methods are often over-conservative, inevitably hurting generalization performance outside data distribution. In our study, one interesting observation is that deep $Q$ functions approximate well inside the convex hull of training data. Inspired by this, we propose a new method, DOGE (Distance-sensitive Offline RL with better Generalization). DOGE marries dataset geometry with deep function approximators in offline RL, and enables exploitation in generalizable OOD areas rather than strictly constraining policy within data distribution. Specifically, DOGE trains a state-conditioned distance function that can be readily plugged into standard actor-critic methods as a policy constraint. Simple yet elegant, our algorithm enjoys better generalization compared to state-of-the-art methods on D4RL benchmarks. Theoretical analysis demonstrates the superiority of our approach to existing methods that are solely based on data distribution or support constraints. Code is available at https://github.com/Facebear-ljx/DOGE. + +# 1 INTRODUCTION + +Offline reinforcement learning (RL) provides a new possibility to learn optimized policies from large, pre-collected datasets without any environment interaction (Levine et al., 2020). This holds great promise to solve many real-world problems when online interaction is costly or dangerous yet historical data is easily accessible (Zhan et al., 2022). However, the optimization nature of RL, as well as the need for counterfactual reasoning on unseen data under offline setting, have caused great technical challenges for designing effective offline RL algorithms. Evaluating value function outside data coverage areas can produce falsely optimistic values; without corrective information from online interaction, such estimation errors can accumulate quickly and misguide policy learning process (Van Hasselt et al., 2018; Fujimoto et al., 2018; Kumar et al., 2019). + +Recent model-free offline RL methods investigate this error accumulation challenge in several ways: 1) Policy Constraint: directly constraining learned policy to stay inside distribution, or with the support of dataset (Kumar et al., 2019); 2) Value Regularization: regularizing value function to assign low values at out-of-distribution (OOD) actions (Kumar et al., 2020b); 3) In-sample Learning: learning value function within data samples (Kostrikov et al., 2021b) or simply treating it as the value function of behavioral policy (Brandfonbrener et al., 2021). All three schools of methods share similar traits of being conservative and omitting evaluation on OOD data, which brings benefits of minimizing model exploitation error, but at the expense of poor generalization of learned policy in OOD regions. Thus, a gaping gap still exists when such methods are applied to real-world tasks, where most datasets only partially cover state-action space with suboptimal policies. + +Meanwhile, online deep reinforcement learning (DRL) that leverages powerful deep neural network (DNN) with optimistic exploration on unseen samples can yield high-performing policies with promising generalization performance (Mnih et al., 2015; Silver et al., 2017; Degrave et al., 2022; + +![](images/969693e52ea1b5a561742329d160d9dba76c5fe630bbdf5ecdfd7236acf0572b.jpg) +Figure 1: Left: Visualization of AntMaze dataset. Data transitions of two small areas on the critical pathways to the destination have been removed (red box). Right: Performance of three SOTA offline RL methods. + +![](images/00f19f0a37f1972766c0bf4d1febc88cf192a7ea787ea37f46b857a71762b5fa.jpg) + +![](images/8c7cb325e06f0782b93d14c398a47000591da83dc955e55688ba29e1b3b17338.jpg) + +![](images/b7029b2d6fd977e225ef1f9c44d947476632d531aec6ffb20b26a7336166fdf1.jpg) + +Packer et al., 2018). This staring contrast propels us to re-think the question: Are we being too conservative? It is well known that DNN has unparalleled approximation and generalization abilities, compared with other function approximators. These attractive abilities have not only led to huge success in computer vision and natural language processing (He et al., 2016; Vaswani et al., 2017), but also amplified the power of RL. Ideally, in order to obtain the best policy, an algorithm should enable offline policy learning on unseen state-action pairs that function approximators (e.g., $Q$ function, policy network) can generalize well, and add penalization only on non-generalizable areas. + +However, existing offline RL methods heed too much conservatism on data-related regularizations, while largely overlooking the generalization ability of deep function approximators. Intuitively, let us consider the well-known AntMaze task in the D4RL benchmark (Fu et al., 2020), where an ant navigates from the start to the destination in a large maze. We observe that existing offline RL methods fail miserably when we remove only small areas of data on the critical pathways to the destination. As shown in Figure 1, the two missing areas reside in close proximity to the trajectory data. Simply "stitching" up existing trajectories as approximation is not sufficient to form a near-optimal policy at missing regions. Exploiting the generalizability of deep function approximators, however, can potentially compensate for the missing information. + +In our study, we observe that the value function approximated by DNN can interpolate well but struggles to extrapolate (see Section 2.2). Such an "interpolate well" phenomenon is also observed in previous studies on the generalization of DNN (Haley & Soloway, 1992; Barnard & Wessels, 1992; Arora et al., 2019a; Xu et al., 2020; Florence et al., 2022). This finding motivates us to reconsider the generalization of function approximators in offline RL in the context of dataset geometry. Along this line, we discover that a closer distance between a training sample to the offline dataset often leads to a smaller value variation range of the learned neural network, which effectively yields more accurate inference of the value function inside the convex hull (formed by the dataset). By contrast, outside the convex hull, especially in those areas far from the training data, the value variation range usually renders too large to guarantee a small approximation error. + +Inspired by this, we design a new algorithm DOGE (Distance-sensitive Offline RL with better Generalization) from the perspective of generalization performance of deep $Q$ function. We first propose a state-conditioned distance function to characterize the geometry of offline datasets, whose output serves as a proxy to the network generalization ability. The resulting algorithm learns a state-conditioned distance function as a policy constraint on standard actor-critic RL framework. Theoretical analysis demonstrates the superior performance bound of our method compared to previous policy constraint methods that are based on data distribution or support constraints. Evaluations on D4RL benchmarks validate that our algorithm enjoys better performance and generalization abilities than state-of-the-art offline RL methods. + +# 2 DATA GEOMETRY VS. DEEP $Q$ FUNCTIONS + +# 2.1 NOTATIONS + +We consider the standard continuous action space Markov decision process (MDP) setting, which can be represented by a tuple $(S, \mathcal{A}, \mathcal{P}, r, \gamma)$ , where $S$ and $\mathcal{A}$ are the state and action space, $\mathcal{P}(s'|s, a)$ is the transition dynamics, $r(s, a)$ is a reward function, and $\gamma \in [0,1)$ is a discount factor. The objective of the RL problem is to find a policy $\pi(a|s)$ that maximizes the expected cumulative discounted return, which can be represented by a $Q$ function $Q_{\theta}^{\pi}(s, a) = \mathbb{E}[\sum_{t=0}^{\infty} \gamma^{t} r(s_{t}, a_{t}) | s_{0} = s, a_{0} = a, a_{t} \sim \pi(\cdot | s_{t}), s_{t+1} \sim \mathcal{P}(\cdot | s_{t}, a_{t})]$ . The $Q$ function is typically approximated by function + +![](images/6e9e3426dbdabdef6eeaa571b21dd63d5dd82524c444cb7cca9ab38614b521c8.jpg) +Figure 2: Approximation error of deep $Q$ functions with different dataset geometry. Offline data are marked as white dots (Please refer to Appendix E.5 for detailed experimental setup). + +approximators with learnable parameters $\theta$ , such as deep neural networks. Under offline RL setting, we are only given a fixed dataset $\mathcal{D}$ and cannot interact further with the environment. Therefore, the parameters $\theta$ are optimized by minimizing the following temporal difference (TD) error: + +$$ +\min _ {\theta} \mathbb {E} _ {(s, a, s ^ {\prime}) \in \mathcal {D}} \left[ \left(r (s, a) + \gamma \mathbb {E} _ {a ^ {\prime} \sim \pi (\cdot | s ^ {\prime})} \left[ Q _ {\theta^ {\prime}} ^ {\pi} \left(s ^ {\prime}, a ^ {\prime}\right) \right]\right) - Q _ {\theta} ^ {\pi} (s, a) \right] ^ {2} \tag {1} +$$ + +where $Q_{\theta'}^{\pi}$ is the target $Q$ function, which is a delayed copy of the current $Q$ network. + +# 2.2 INTERPOLATE VS. EXTRAPOLATE + +Motivating examples. Let's first consider a set of simple one-dimensional random walk tasks with different offline datasets, where agents at each step can take an action to move in the range of $[-1, 1]$ , and the state space is a straight line ranging from $[-10, 10]$ . The destination is located at $s = 10$ . The closer to the destination, the larger reward the agent gets (i.e., $r = 1$ at $s = 10$ , $r = 0$ at $s = -10$ ). The approximation errors of the learned $Q$ functions are visualized in Figure 2. Note that the approximation errors of the learned $Q$ functions tend to be low at state-action pairs that lie inside or near the boundaries of the convex hull formed by the dataset. Under continuous state-action space, state-action pairs within the convex hull of the dataset can be represented in an interpolated manner (referred as interpolated data), i.e., $x_{in} = \sum_{i=1}^{n} \alpha_{i} x_{i}$ , $\sum_{i=1}^{n} \alpha_{i} = 1$ , $\alpha_{i} \geq 0$ , $x_{i} = (s_{i}, a_{i}) \in \mathcal{D}$ ; similarly, we can define the extrapolated data that lie outside the convex hull of the dataset as $x_{out} = \sum_{i=1}^{n} \beta_{i} x_{i}$ , where $\sum_{i=1}^{n} \beta_{i} = 1$ and $\beta_{i} \geq 0$ do not hold simultaneously. + +We observe that the geometry of the datasets play a special role on the approximation error of deep $Q$ functions, or in other words, deep $Q$ functions interpolate well but struggle to extrapolate. This phenomenon is also reflected in studies on the generalization performance of deep neural networks under a supervised learning setting (Haley & Soloway, 1992; Barnard & Wessels, 1992; Arora et al., 2019a; Xu et al., 2020; Florence et al., 2022), but is largely overlooked in modern offline RL. + +Theoretical explanations. Based on advanced theoretical machinery from the generalization analysis of DNN, such as neural tangent kernel (NTK) (Jacot et al., 2018), we can theoretically demonstrate that this phenomenon is also carried over to the offline RL setting for deep $Q$ functions. Define $\operatorname{Proj}_{\mathcal{D}}(x) \coloneqq \arg \min_{x_i \in \mathcal{D}} \| x - x_i \|$ (we denote $\| x \|$ as Euclidean norm) as the projection operator that projects unseen data $x$ to the nearest data point in dataset $\mathcal{D}$ . Theorem 1 gives a theoretical explanation of the "interploate well" phenomenon for deep $Q$ functions under the NTK assumptions (see Appendix B.2 for detailed proofs): + +Theorem 1. (Value difference of deep $Q$ function for interpolated and extrapolated data). Under the NTK regime, given an unseen interpolated data $x_{in}$ and an extrapolated data $x_{out}$ , then the value difference of deep $Q$ function for interpolated and extrapolated input data can be bounded as: + +$$ +\begin{array}{l} \| Q _ {\theta} (x _ {i n}) - Q _ {\theta} (\mathrm {P r o j} _ {\mathcal {D}} (x _ {i n})) \| \leq C _ {1} (\sqrt {\min (\| x _ {i n} \| , \| \mathrm {P r o j} _ {\mathcal {D}} (x _ {i n}) \|)} \sqrt {d _ {x _ {i n}}} + 2 d _ {x _ {i n}}) \\ \leq C _ {1} \left(\sqrt {\min \left(\| x _ {i n} \| , \| \operatorname {P r o j} _ {\mathcal {D}} \left(x _ {i n}\right) \|\right)} \sqrt {B} + 2 B\right) \tag {2} \\ \end{array} +$$ + +$$ +\| Q _ {\theta} (x _ {o u t}) - Q _ {\theta} (\operatorname {P r o j} _ {\mathcal {D}} (x _ {o u t})) \| \leq C _ {1} (\sqrt {\min (\| x _ {o u t} \| , \| \operatorname {P r o j} _ {\mathcal {D}} (x _ {o u t}) \|)} \sqrt {d _ {x _ {o u t}}} + 2 d _ {x _ {o u t}}) \quad (3) +$$ + +where $d_{x_{in}} = \| x_{in} - \mathrm{Proj}_{\mathcal{D}}(x_{in})\| \leq \max_{x_i\in \mathcal{D}}\| x_{in} - x_i\| \leq B$ and $d_{x_{out}} = \| x_{out} - \mathrm{Proj}_{\mathcal{D}}(x_{out})\|$ are distances of $x_{in}$ and $x_{out}$ to the nearest data points in dataset $\mathcal{D}$ . $B$ and $C_1$ are finite constants. + +Theorem 1 shows that given an unseen input $x$ , $Q_{\theta}(x)$ can be controlled by in-sample $Q$ value $Q_{\theta}(\mathrm{Proj}_{\mathcal{D}}(x))$ and the distance $\| x - \mathrm{Proj}_{\mathcal{D}}(x)\|$ . The smaller the distance, the more controllable the output of deep $Q$ functions. Therefore, because the distance to dataset is strictly bounded (at + +most $B$ for interpolated data), the approximated $Q$ values at interpolated data as well as extrapolated data near the boundaries of the convex hull formed by the dataset cannot be too far off. Moreover, as $d_{x_{out}}$ can take substantially larger values than $d_{x_{in}}$ , interpolated data generally enjoys a tighter bound compared with extrapolated data, if the dataset only narrowly covers a large state-action space. + +Empirical observations in Figure 2 and Theorem 1 both demonstrate that data geometry can induce different approximation error accumulation patterns for deep $Q$ functions. While approximation error accumulation is generally detrimental to offline RL, a fine-grained analysis is missing in previous studies about where value function can approximate well. We argue that it is necessary to take data geometry into consideration when designing less conservative offline RL algorithms. + +# 3 GENERALIZABLE OFFLINE RL FRAMEWORK + +In this section, we present our algorithm DOGE (Distance-sensitive Offline RL with better GEneralization). By introducing a specially designed state-conditioned distance function to characterize the geometry of offline datasets, we can construct a very simple, less conservative and also more generalizable offline RL algorithm upon standard actor-critic framework. + +# 3.1 STATE-CONDITIONED DISTANCE FUNCTION + +As revealed in Theorem 1, the sample-to-dataset distance plays an important role in measuring the controllability of $Q$ values. However, given an arbitrary state-action sample $(s,a)$ , naively computing its distance to the closest data point in a large dataset can be costly and impractical. Ideally, we prefer to have a learnable distance function which also has the ability to reflect the overall dataset geometry. Based on this intuition, we design a state-conditioned distance function that can be learned in an elegantly simple supervised manner with desirable properties. + +Specifically, we learn the state-conditioned distance function $g(s,a)$ by solving the following regression problem, with state-action pairs $(s,a)\sim \mathcal{D}$ and synthetic noise actions sampled from the uniform distribution over the full action space $\mathcal{A}$ : + +$$ +\min _ {g} \mathbb {E} _ {(s, a) \sim \mathcal {D}} \left[ \mathbb {E} _ {\hat {a} \sim U n i f (\mathcal {A})} [ \| a - \hat {a} \| - g (s, \hat {a}) ] ^ {2} \right] \tag {4} +$$ + +In practical implementation, for each $(s,a)\sim \mathcal{D}$ , we sample $N$ noise actions uniformly in the action space $\mathcal{A}$ to train $g(\cdot)$ . More implementation details can be found in Appendix E. Moreover, with the optimization objective defined in Eq. (4), we can show that the optimal state-conditioned distance function has two desirable properties (proofs can be found in Appendix C): + +Property 1. The optimal state-conditioned distance function of Eq. (4) is convex w.r.t. actions and is an upper bound of the distance to the state-conditioned centroid $a_{o}(s)$ of training dataset $\mathcal{D}$ : + +$$ +\begin{array}{l} g ^ {*} (s, \hat {a}) = \mathbb {E} _ {a \sim U n i f (\mathcal {A})} [ C (s, a) \| \hat {a} - a \| ] \\ \geq \left\| \hat {a} - \mathbb {E} _ {a \sim U n i f (\mathcal {A})} [ C (s, a) \cdot a ] \right\| = \left\| \hat {a} - a _ {o} (s) \right\|, \quad \forall \hat {a} \in \mathcal {A}, s \in \mathcal {D} \tag {5} \\ \end{array} +$$ + +where $C(s,a) = \frac{\mu(s,a)}{\mathbb{E}_{a\sim Unif(\mathcal{A})}\mu(s,a)}\geq 0$ , $\mu (s,a)$ is state-action distribution of dataset $\mathcal{D}$ . Given a state $s\in \mathcal{D}$ , the state-conditioned centroid is defined as $a_{o}(s) = \mathbb{E}_{a\sim Unif(\mathcal{A})}[C(s,a)\cdot a]$ . Since $L_{2}$ -norm is convex and the non-negative combination of convex functions is still convex, $g^{*}(s,\hat{a})$ is also a convex function w.r.t. $\hat{a}$ . + +Property 2. The negative gradient of the optimal state-conditioned distance function at an extrapolated action $\hat{a}$ , $-\nabla_{\hat{a}}g^{*}(s,\hat{a})$ , points inside the convex hull of the dataset. + +From Property 1, we can see that the optimal state-conditioned distance function characterizes data geometry and outputs an upper bound of the distance to the state-conditioned centroid of the training dataset. Property 2 indicates that if we use the learned distance function as a policy constraint, it can drive the learned policy to move inside the convex hull of training data. We visualize the value of the trained state-conditioned distance function in Figure 3. It is clear that the learned distance function can accurately predict the sample-to-dataset centroid distance. By utilizing such distance function, we can constrain the policy based on the global geometric information of training datasets. This + +![](images/3af0690a0c24cc1b62b4de981c2f912d5e3a67c1cc78c32e1ac4d92a039d2026.jpg) +(a) Illustration of $g^{*}(s,a)$ + +![](images/fd84e0cbb87325f6c0b95df5f73e1f9da82216501e3997e72d8dcd90b7efb138.jpg) + +![](images/275170debf5794418b5549afc027ce1470e01658ad45a4b992cc8d2c3bbdbb8d.jpg) +Figure 3: Illustration of the state-conditioned distance function. The output of the optimal distance function is the non-negative combination of the distances to all training data. $G$ is the threshold in Eq. (6) In (b), Offline data are marked as white dots. + +![](images/40bab1ca52bc6f560c40f8f642154c8e7cfe04c50c9776938bea31bb380634a6.jpg) + +![](images/eb5443643c996d51665a7019ba4d509d77d7fb18ff2831eb3156d2b9bd697521.jpg) +(b) Visualization of $g^{*}(s,a)$ trained on diverse 2D datasets + +![](images/07e86dde83c3f006921d3e67958f083dab941b5b164314d67e3f2fbc8b33be4a.jpg) + +![](images/81f898b16f98f108db23152e53a13122211804c6c5620a2524eab7996ea88bcb.jpg) + +desirable property is non-obtainable by simply constraining the policy based on sample-to-sample distance such as the MSE loss between policy generated and dataset actions, which can only bring local geometric information. Moreover, the learned distance function can not only predict well at in-distribution states but also generalize well at OOD states. + +# 3.2 DISTANCE-SENSITIVE OFFLINE REINFORCEMENT LEARNING + +Capturing the geometry of offline datasets, we now construct a minimalist distance-sensitive offline RL framework, by simply plugging the state-conditioned distance function as a policy constraint into standard online actor-critic methods (such as TD3 (Fujimoto et al., 2018) and SAC (Haarnoja et al., 2018)). This results in the following policy maximization objective: + +$$ +\pi = \arg \max _ {\pi} \mathbb {E} _ {s \sim \mathcal {D}, a \sim \pi (\cdot | s)} [ Q (s, a) ] \quad s. t. \mathbb {E} _ {s \sim \mathcal {D}, a \sim \pi (\cdot | s)} [ g (s, a) ] \leq G \tag {6} +$$ + +where $G$ is a task-dependent threshold varying across tasks. In our method, we adopt a non-parametric treatment by setting $G$ as the mean output (50% quantile) of the learned distance function on the training dataset, i.e., $\mathbb{E}_{(s,a)\sim \mathcal{D}}[g(s,a)]$ , which is approximated over mini-batch samples to reduce computational complexity (see Appendix G for ablation on $G$ ). The constrained optimization problem in Eq. (6) can be reformulated as: + +$$ +\pi = \arg \max _ {\pi} \min _ {\lambda} \mathbb {E} _ {s \sim \mathcal {D}, a \sim \pi (\cdot | s)} [ \beta Q (s, a) - \lambda (g (s, a) - G) ] \quad s. t. \lambda \geq 0 \tag {7} +$$ + +where $\lambda$ is the Lagrangian multiplier, which is auto-adjusted using dual gradient descent. Following TD3+BC (Fujimoto & Gu, 2021), $Q$ values are rescaled by $\beta = \frac{\alpha}{\frac{1}{n}\sum_{i=1}^{n}|Q(s_i,a_i)|}$ to balance $Q$ function maximization and policy constraint satisfaction, controlled by a hyperparameter $\alpha$ . To reduce computations, the denominator of $\beta$ is approximated over mini-batch of samples. The resulting algorithm is easy to implement. In our experiments, we use TD3. Please refer to Appendix E for implementation details. + +# 3.3 RELAXATION WITH BELLMAN-CONSISTENT COEFFICIENT + +# 3.3.1 BELLMAN-CONSISTENT COEFFICIENT AND CONSTRAINED POLICY SET + +The key difference between DOGE and other policy constraint methods lies in that DOGE relaxes the strong full coverage assumption1 on offline datasets and allows exploitation on generalizable OOD areas. To relax the unrealistic full-coverage assumption, we resort to a weaker condition proposed by (Xie et al., 2021a), the Bellman-consistent coefficient (Definition 1), to measure how well Bellman errors can transfer to different distributions (Theorem 2). + +Denote $\| f\|_{2,\mu}^2 \coloneqq \mathbb{E}_\mu [\| f\|^2]$ ; $\mathcal{T}^\pi Q$ is the Bellman operator of policy $\pi$ , defined as $\mathcal{T}^\pi Q(s,a) \coloneqq r(s,a) + \gamma \mathbb{E}_{a'\sim \pi (\cdot |s'),s'\sim \mathcal{P}(\cdot |s,a)}[Q(s',a')] \coloneqq r(s,a) + \gamma \mathbb{P}^\pi [Q(s',a')]$ . $\mathbb{P}^\pi [\cdot ]$ is the brief notation for $\mathbb{E}_{a'\sim \pi (\cdot |s'),s'\sim \mathcal{P}(\cdot |s,a)}[\cdot ]$ . $\mathcal{F}$ is the function class of $Q$ networks. The Bellman-consistent coefficient is defined as: + +Definition 1. (Bellman-consistent coefficient). We define $\mathcal{B}(v,\mu ,\mathcal{F},\pi)$ to measure the distributional shift from an arbitrary distribution $v$ to data distribution $\mu$ , w.r.t. $\mathcal{F}$ and $\pi$ , + +$$ +\mathcal {B} (v, \mu , \mathcal {F}, \pi) := \sup _ {Q \in \mathcal {F}} \frac {\| Q - \mathcal {T} ^ {\pi} Q \| _ {2 , v} ^ {2}}{\| Q - \mathcal {T} ^ {\pi} Q \| _ {2 , \mu} ^ {2}} \tag {8} +$$ + +This definition captures the generalization performance of function approximation across different distributions. Intuitively, a small value of $\mathcal{B}(v,\mu ,\mathcal{F},\pi)$ means Bellman errors for policy $\pi$ can accurately transfer from distribution $\mu$ to $v$ . This suggests that Bellman errors can transfer well between two distributions even if a large discrepancy exists, as long as the Bellman-consistent coefficient is small. + +Based on Definition 1, we introduce the definition of Bellman-consistent constrained policy set. + +Definition 2. (Bellman-consistent constrained policy set). We define the Bellman-consistent constrained policy set as $\Pi_{\mathcal{B}}$ . The Bellman-consistent coefficient under the transition induced by $\Pi_{\mathcal{B}}$ can be bounded by some finite constants $l(k)$ : + +$$ +\mathcal {B} \left(\rho_ {k}, \mu , \mathcal {F}, \pi\right) \leq l (k) \tag {9} +$$ + +where $\rho_{k} = \rho_{0}P^{\pi_{1}}\ldots P^{\pi_{k}},\forall \pi_{1},\ldots ,\pi_{k}\in \Pi_{\mathcal{B}},\rho_{0}$ is the initial state-action distribution and $P^{\pi_i}$ is the transition operator induced by $\pi_{i}$ , i.e., $P^{\pi_i}(s',a'|s,a) = \mathcal{P}(s'|s,a)\pi_i(a'|s')$ + +We denote the constrained Bellman operator induced by $\Pi_{\mathcal{B}}$ as $\mathcal{T}^{\Pi_B}$ , $\mathcal{T}^{\Pi_B}Q(s,a) := r(s,a) + \max_{\pi \in \Pi_B}\gamma \mathbb{P}^\pi [Q(s',a')]$ . $\mathcal{T}^{\Pi_B}$ can be seen as a Bellman operator on a redefined MDP, thus theoretical results of MDP also carry over, such as contraction mapping and existence of a fixed point. + +# 3.3.2 BELLMAN CONSISTENT COEFFICIENT AND PERFORMANCE BOUND OF DOGE + +We show that the policy set induced by DOGE is essentially a Bellman-consistent policy set defined in Definition 2. Meanwhile, the distance constraint in DOGE can produce a small value of $\mathcal{B}$ and hence guarantee the learned policy deviates only to those generalizable areas. + +Theorem 2. (Upper bound of Bellman-consistent coefficient). Under the NTK assumption, the Bellman-consistent coefficient $\mathcal{B}(v,\mu ,\mathcal{F},\pi)$ is upper bounded as: + +$$ +\mathcal {B} (v, \mu , \mathcal {F}, \pi) \leq \frac {1}{\epsilon_ {\mu}} \left\| \underbrace {(1 - \gamma) Q \left(s _ {o} , a _ {o}\right) + R _ {\max }} _ {\mathcal {B} _ {1}} + \underbrace {C _ {1} \left(C _ {2} \sqrt {d _ {1}} + d _ {1}\right)} _ {\mathcal {B} _ {2}} + \underbrace {(2 - \gamma) C _ {1} \mathbb {P} ^ {\pi} \left(C _ {2} \sqrt {d _ {2}} + d _ {2}\right)} _ {\mathcal {B} _ {3}} \right\| _ {2, v} ^ {2} \tag {10} +$$ + +where we denote $x = (s, a)$ and $x' = (s', a')$ . $x_o = \mathbb{E}_{x \sim \mathcal{D}}[x]$ is the centroid of offline dataset. $d_1 = \| x - x_o \|$ and $d_2 = \| x' - x_o \|$ are the sample-to-centroid distances. $C_2 = \sqrt{\sup_{x \in S \times \mathcal{A}} \| x \|}$ is related to the upper bound of the input scale. $\epsilon_\mu$ is the lower bound of Bellman error (square) for $\pi$ under distribution $\mu$ , i.e., $\epsilon_\mu \leq \| Q - T^\pi Q \|_{2,\mu}^2$ . + +The RHS of Eq. (10) contains four parts: $\frac{1}{\epsilon_{\mu}}$ , $\mathcal{B}_1$ , $\mathcal{B}_2$ and $\mathcal{B}_3$ . It is reasonable to assume $\epsilon_{\mu} > 0$ , because of the approximation error of $Q$ networks and the distribution mismatch between $\mu$ and $\pi$ . $\mathcal{B}_1$ is only dependent on the $Q$ value $Q(s_o, a_o)$ at the centroid of the dataset and the max reward $R_{\mathrm{max}}$ . $\mathcal{B}_2$ is related to distance $d_1$ and distribution $v$ . $\mathcal{B}_3$ is related to $d_2$ , $v$ and $\mathbb{P}^{\pi}$ . To be mentioned, the distance regularization in DOGE compels the learned policy to output the action that is near the state-conditioned centroid of dataset, thus $\mathcal{B}_2$ and $\mathcal{B}_3$ can be driven to small values. Therefore, the RHS of Eq. (10) can be bounded by finite constants under DOGE, which shows that the constrained policy set induced by DOGE is essentially a Bellman-consistent constrained policy set. + +Then, the performance gap between the policy learned by DOGE and the optimal policy can be bounded as given in Theorem 3. See Appendix D.1 and D.2 for the proof of Theorem 2 and 3. + +Theorem 3. (Performance bound of the learned policy by DOGE). Let $Q^{\Pi_{\mathcal{B}}}$ be the fixed point of $\mathcal{T}^{\Pi_{\mathcal{B}}}$ , i.e., $Q^{\Pi_{\mathcal{B}}} = \mathcal{T}^{\Pi_{\mathcal{B}}}Q^{\Pi_{\mathcal{B}}}$ , and $\epsilon_k = Q^k - \mathcal{T}^{\Pi_{\mathcal{B}}}Q^{k-1}$ is the Bellman error at the $k$ -th iteration. $\| f \|_{\mu} := \mathbb{E}_{\mu}[\| f \|]$ . The performance of the learned policy $\pi_n$ is bounded by: + +$$ +\lim _ {n \rightarrow \infty} \| Q ^ {*} - Q ^ {\pi_ {n}} \| _ {\rho_ {0}} \leq \frac {2 \gamma}{(1 - \gamma) ^ {2}} \left[ L \left(\Pi_ {\mathcal {B}}\right) \sup _ {k \geq 0} \| \epsilon_ {k} \| _ {\mu} + \frac {1 - \gamma}{2 \gamma} \alpha \left(\Pi_ {\mathcal {B}}\right)\right] \tag {11} +$$ + +where $L(\Pi_{\mathcal{B}}) = \sqrt{(1 - \gamma)^2 \sum_{k=1}^{\infty} k \gamma^{k-1} l(k)}$ , which is similar to the concentrability coefficient in BEAR (Kumar et al., 2019) but in a different form. Note that $l(k)$ is related to the RHS of Eq. (10) and can be driven to a small value by DOGE according to Theorem 2. $\alpha(\Pi_{\mathcal{B}}) = \| \mathcal{T}^{\Pi_{\mathcal{B}}} Q^{\Pi_{\mathcal{B}}} - \mathcal{T} Q^{*} \|_{\infty}$ is the suboptimality constant, which is similar to $\alpha(\Pi) = \| \mathcal{T}^{\Pi} Q^{\Pi} - \mathcal{T} Q^{*} \|_{\infty}$ in BEAR. + +Compared with BEAR, DOGE allows a policy shift to some generalizable OOD areas and relaxes the strong full-coverage assumption. In addition, we have $L(\Pi_{\mathcal{B}}) \leq L(\Pi) \propto \frac{\rho_0 P^{\pi_1} \dots P^{\pi_k}}{\mu(s, a)}$ , where $L(\Pi)$ is the concentrability coefficient in BEAR. This is evident when $\mu(s, a) = 0$ and $\rho_0 P^{\pi_1} \dots P^{\pi_k}(s, a) > 0$ , $L(\Pi_{\mathcal{B}})$ can be bounded by finite constants but $L(\Pi) \to \infty$ . Moreover, as $\Pi_{\mathcal{B}}$ extends the policy set to cover more generalizable OOD areas ( $\Pi \subseteq \Pi_{\mathcal{B}}$ ) and produces a larger feasible region for optimization, lower degree of suboptimality can be achieved (i.e., $\alpha(\Pi_{\mathcal{B}}) \leq \alpha(\Pi)$ ) compared to only performing optimization on $\Pi$ . Therefore, we can see that DOGE enjoys a tighter performance bound than previous more conservative methods when allowed to exploit generalizable OOD areas. + +# 4 EXPERIMENTS + +For evaluation, We compare DOGE and prior offline RL methods over D4RL Mujoco and AntMaze tasks (Fu et al., 2020). Mujoco is a standard benchmark commonly used in previous work. AntMaze tasks are far more challenging due to the non-markovian and mixed-quality offline dataset, the stochastic property of environments, and the high dimensional state-action space. Implementation details, experimental setup and additional experimental results can be found in Appendix E and F. + +# 4.1 COMPARISON WITH SOTA + +We compare DOGE with model-free SOTA methods, such as TD3+BC (Fujimoto & Gu, 2021), CQL (Kumar et al., 2020b) and IQL (Kostrikov et al., 2021b). For fairness, we use the “-v2” datasets for all methods. For most Mujoco tasks, we report the scores from the IQL paper. We obtain the other results using the authors' or our implementations. For AntMaze tasks, we obtain the results of CQL, TD3+BC, and IQL using the authors' implementations. For BC (Pomerleau, 1988), BCQ (Fujimoto et al., 2019) and BEAR (Kumar et al., 2019), we report the scores from (Fu et al., 2020). All methods are evaluated over the final 10 evaluations for Mujoco tasks and 100 for AntMaze tasks. + +Table 1 shows that DOGE achieves comparable or better performance than SOTA methods on most Mujoco and AntMaze tasks. Compared to other policy constraint approaches such as BCQ, BEAR and TD3+BC, DOGE is the first policy constraint method to successfully solve AntMaze-medium and AntMaze-large tasks. Note that IQL is an algorithm designed for multi-step dynamics programming and attains strong advantage on AntMaze tasks. Nevertheless, DOGE can compete with or even surpass IQL on most AntMaze tasks, by only employing a generalization-oriented policy constraint. These results illustrate the benefits of allowing policy learning on generalizable OOD areas. + +# 4.2 EVALUATION ON GENERALIZATION + +To evaluate the generalization ability of DOGE, we remove small areas of data from the critical pathways to the destination in AntMaze medium and large tasks, to construct an OOD dataset. The two removed areas reside in close proximity to the trajectory data (see Figure 1). We evaluate representative methods (such as TD3+BC, CQL, IQL) and DOGE on these modified datasets. Figure 4 shows the comparison before and after data removal. + +For such a dataset with partial state-action space coverage, existing policy constraint methods tend to over-constrain the policy to stay inside the support of a dataset, where the optimal policy is not well-covered. Value regularization methods suffer from deteriorated generalization performance, as the value function is distorted to assign low value at all OOD areas. In-sample learning methods are only guaranteed to retain the best policy within the partially covered dataset (Kostrikov et al., 2021b). As shown in Figure 4, all these methods struggle to generalize well on the missing areas and suffer severe performance drop, while DOGE maintains competitive performance. This further demonstrates the benefits of relaxing over-conservatism in existing methods. + +Table 1: Average normalized scores and standard deviations over 5 seeds on benchmark tasks + +
DatasetBCBCQBEARTD3+BCCQLIQLDOGE(ours)
hopper-r4.97.114.28.5±0.68.3±0.27.9±0.421.1±12.6
halfcheetah-r0.28.815.111.0±1.120.0±0.411.2±2.917.8±1.2
walker2d-r1.76.510.71.6±1.78.3±0.15.9±0.50.9 ± 2.4
hopper-m52.956.751.959.3±4.258.5±2.166.2±5.798.6±2.1
halfcheetah-m42.647.041.048.3±0.344.0±5.447.4±0.245.3±0.6
walker2d-m75.372.680.983.7±2.172.5±0.878.3±8.786.8±0.8
hopper-m-r18.153.337.360.9±18.895.0±6.494.7±8.676.2±17.7
halfcheetah-m-r36.640.429.744.6±0.545.5±0.544.2±1.242.8±0.6
walker2d-m-r26.052.118.581.8±5.577.2±5.573.8±7.187.3±2.3
hopper-m-e52.581.817.798.0±9.4105.4±6.891.5±14.3102.7±5.2
halfcheetah-m-e55.289.138.990.7±4.391.6±2.886.7±5.378.7±8.4
walker2d-m-e107.5109.595.4110.1±0.5108.8±0.7109.6±1.0110.4±1.5
locomation total473.5624.9451.3698.5±49.0726.1±31.7717.4±55.9768.6±55.4
antmaze-u65.078.973.091.3±5.784.8±2.388.2±1.997.0±1.8
antmaze-u-d55.055.061.054.6±16.243.3±5.466.7±4.063.5±9.3
antmaze-m-p0.00.00.00.065.2±4.870.4±5.380.6±6.5
antmaze-m-d0.00.08.00.054.0±11.774.6±3.277.6±6.1
antmaze-l-p0.06.70.00.018.8±15.343.5±4.548.2±8.1
antmaze-l-d0.02.20.00.031.6±9.545.6±7.636.4±9.1
antmaze-total120.0142.8142.0145.9±21.9297.7±49.0389.0±26.5403.3±40.9
+ +![](images/49c9e6018d1a5f759c914c9175843a44c4e4f03b845c72c3324eb83ee60e69ca.jpg) +Policy constraint + +![](images/cf18c8e2896a643c58acf911f20a50282df75df8c16a4795cee76caed2e46f83.jpg) +Value regularization + +![](images/a7f61701ad8687a1c5d5494369d8525816007f4cb4ab04ea008879415c832474.jpg) +In-sample learning + +![](images/82b5a4cba0f0bea289e48ace1d5263f2acd56e84dea74a0d32acaea0de1a03d5.jpg) +DOGE (Ours) + +![](images/3d9ade4e8a1eb09cdc98ed3dc4bac11622e37474f17806f5665c4b25852c71c6.jpg) +Figure 4: Generalization performance after removing data from AntMaze large tasks (see Appendix F.1 for detailed setup and additional results on AntMaze medium tasks). + +![](images/b9f5db12feacd952c5985cd236e6654c0b59a04770027710c915dd154ba2de40.jpg) + +![](images/a9f1f2a7c49a7015c329cdedeee59aa83178cc2bed9a4abdf1d7cdb998f8579b.jpg) + +![](images/9ba79b5109bb6fa47399a02cfb0e1394cb3bef6a3a2547f25f0c3640dbc984af.jpg) + +# 4.3 ABLATION STUDY + +We conduct ablation studies to evaluate the impact of the hyperparameter $\alpha$ , the non-parametric distance threshold $G$ in Eq. (6), and the number of noise actions $N$ used to train the distance function. For $\alpha$ , we add or subtract 2.5 to the original value; for $G$ , we choose $30\%$ , $50\%$ , $70\%$ and $90\%$ upper quantile of the distance values in mini-batch samples; for $N$ , we choose $N = 10, 20, 30$ . + +Compared to $N$ and $\alpha$ , we find that $G$ has a more significant impact on the performance. Figure 5b shows that an overly restrictive $G$ (30% quantile) results in a policy set too small to cover near-optimal policies. A more tolerant $G$ , on the other hand, is unlikely to cause excessive error accumulation and achieves relatively good performance. In addition, Figure 5a and Figure 5c show that performance is stable across variations of hyperparameters, indicating that our method is hyperparameter-robust. + +![](images/f0c22aaf0812d5bdcd791e3ed965d26bb7f0cf71880de8ff0996763d37b8eb7c.jpg) +(a) $\alpha$ has little effect on results + +![](images/f237af1b604b5663d26e2d1de7fd8ea0af234e46d8774e911ae0bf4a5af03bdf.jpg) +Figure 5: Ablation results. The default parameters in our implementation are marked by $*$ . The error bars indicate min and max over 5 seeds. See Appendix G for more detailed ablation studies. + +![](images/bbd5795ca3015234995854861d3116a3c8d81b6eb10efbd40d471f10634e7031.jpg) +(b) Small $G$ is harmful to results +(c) $N$ has little effect on results + +# 5 RELATED WORK + +To prevent distributional shift and exploitation error accumulation when inferring the value function at unseen samples, a direct approach is to restrict policy learning from deviating to OOD areas. To make sure the leaned policy stays inside the distribution or support of training data, These policy constraint methods either carefully parameterize the learned policy (Fujimoto et al., 2019; Matsushima et al., 2020), or use explicit divergence penalties (Kumar et al., 2019; Wu et al., 2019; Fujimoto & Gu, 2021; Xu et al., 2021; Dadashi et al., 2021) or implicit divergence constraints (Peng et al., 2019; Nair et al., 2020; Xu et al., 2022a). The theories behind these methods typically assume full state-action space coverage of the offline datasets(Le et al., 2019; Kumar et al., 2019). However, policy constraint under full-coverage assumption is unrealistic in most real-world settings, especially on datasets with partial coverage and only sub-optimal behavior policies. Some recent works try to relax the full-coverage assumption to partial coverage by introducing different distribution divergence metrics, but only in theoretical analysis (Liu et al., 2020; Zanette et al., 2021; Xie et al., 2021b; Uehara & Sun, 2021; Xie et al., 2021a). Our method is an enhanced policy constraint method, where we relax the full-coverage assumption and allow the policy to learn on OOD areas where networks can generalize well. + +Another type of offline RL method, value regularization (Kumar et al., 2020b; Kostrikov et al., 2021a; Yu et al., 2021; Xu et al., 2022b; 2023), directly penalizes the value function to produce low values at OOD actions. In-sample learning methods (Brandfonbrener et al., 2021; Kostrikov et al., 2021b), on the other hand, only learn the value function within data or treat it as the value function of the behavior policy. Compared with our approach, these methods exercise too much conservatism, which limits the generalization performance of deep neural networks on OOD regions, largely weakening the ability of dynamic programming. There are also uncertainty-based and model-based methods that regularize the value function or policy with epistemic uncertainty estimated from model or value function (Janner et al., 2019; Yu et al., 2020; Uehara & Sun, 2021; Wu et al., 2021; Zhan et al., 2022; Bai et al., 2021). However, the estimation of the epistemic uncertainty of DNN is still an under-explored area, with results highly dependent on evaluation methods and the structure of DNN. + +# 6 CONCLUSION + +In this study, we provide new insights on the relationship between approximation error of deep $Q$ functions and geometry of offline datasets. Through empirical and theoretical analysis, we find that deep $Q$ functions attain relatively low approximation error when interpolating rather than extrapolating the dataset. This phenomenon motivates us to design a new algorithm, DOGE, to empower policy learning on OOD samples within the convex hull of training data. DOGE is simple yet elegant, by plugging a dataset geometry-derived distance constraint into TD3. With such a minimal surgery, DOGE outperforms existing model-free offline RL methods on most D4RL tasks. We theoretically prove that DOGE enjoys a tighter performance bound compared with existing policy constraint methods under the more realistic partial-coverage assumption. Empirical results and theoretical analysis suggest the necessity of re-thinking the conservatism principle in offline RL algorithm design, and points to sufficient exploitation of the generalization ability of deep $Q$ functions. + +# ACKNOWLEDGMENTS + +This work is supported by National Key Research and Development Program of China under Grant (2022YFB2502904). This work is also supported by Baidu Inc. through Apollo-AIR Joint Research Center. The authors would also like to thank the anonymous reviewers for their feedback on the manuscripts. Jianxiong Li would like to thank Zhixu Du, Yimu Wang, Li Jiang, Haoyi Niu, Hao Zhao and all colleagues in AIR-Dream group for valuable discussions. + +# REFERENCES + +Zeyuan Allen-Zhu, Yuanzhi Li, and Yingyu Liang. Learning and generalization in overparameterized neural networks, going beyond two layers. Advances in neural information processing systems, 32, 2019. +Gaon An, Seungyong Moon, Jang-Hyun Kim, and Hyun Oh Song. Uncertainty-based offline reinforcement learning with diversified q-ensemble. Advances in neural information processing systems, 34:7436-7447, 2021. +Anonymous. Lightweight uncertainty for offline reinforcement learning via bayesian posterior. In Submitted to The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=55Eet8WGJTv. under review. +Sanjeev Arora, Simon Du, Wei Hu, Zhiyuan Li, and Ruosong Wang. Fine-grained analysis of optimization and generalization for overparameterized two-layer neural networks. In International Conference on Machine Learning, pp. 322-332. PMLR, 2019a. +Sanjeev Arora, Simon S Du, Wei Hu, Zhiyuan Li, Russ R Salakhutdinov, and Ruosong Wang. On exact computation with an infinitely wide neural net. Advances in Neural Information Processing Systems, 32, 2019b. +Chenjia Bai, Lingxiao Wang, Zhuoran Yang, Zhi-Hong Deng, Animesh Garg, Peng Liu, and Zhao ran Wang. Pessimistic bootstrapping for uncertainty-driven offline reinforcement learning. In International Conference on Learning Representations, 2021. +Etienne Barnard and LFA Wessels. Extrapolation and interpolation in neural network classifiers. IEEE Control Systems Magazine, 12(5):50-53, 1992. +Peter L Bartlett and Shahar Mendelson. Rademacher and gaussian complexities: Risk bounds and structural results. Journal of Machine Learning Research, 3(Nov):463-482, 2002. +Alberto Bietti and Julien Mairal. On the inductive bias of neural tangent kernels. Advances in Neural Information Processing Systems, 32, 2019. +David Brandfonbrener, Will Whitney, Rajesh Ranganath, and Joan Bruna. Offline rl without off-policy evaluation. Advances in Neural Information Processing Systems, 34:4933-4946, 2021. +Qi Cai, Zhuoran Yang, Jason D Lee, and Zhaoran Wang. Neural temporal-difference learning converges to global optima. Advances in Neural Information Processing Systems, 32, 2019. +Robert Dadashi, Shideh RezaEIFar, Nino Vieillard, LEOnard Hussenot, Olivier Pietquin, and Matthieu Geist. Offline reinforcement learning with pseudometric learning. In International Conference on Machine Learning, pp. 2307-2318. PMLR, 2021. +Jonas Degrave, Federico Felici, Jonas Buchli, Michael Neunert, Brendan Tracey, Francesco Carpanese, Timo Ewalds, Roland Hafner, Abbas Abdelmaleki, Diego de Las Casas, et al. Magnetic control of tokamak plasmas through deep reinforcement learning. Nature, 602(7897):414-419, 2022. +Jianqing Fan, Zhaoran Wang, Yuchen Xie, and Zhuoran Yang. A theoretical analysis of deep q-learning. In Learning for Dynamics and Control, pp. 486-489. PMLR, 2020. +Pete Florence, Corey Lynch, Andy Zeng, Oscar A Ramirez, Ayzaan Wahid, Laura Downs, Adrian Wong, Johnny Lee, Igor Mordatch, and Jonathan Thompson. Implicit behavioral cloning. In Conference on Robot Learning, pp. 158-168. PMLR, 2022. + +Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4rl: Datasets for deep data-driven reinforcement learning. arXiv preprint arXiv:2004.07219, 2020. +Scott Fujimoto and Shixiang Shane Gu. A minimalist approach to offline reinforcement learning. Advances in Neural Information Processing Systems, 34, 2021. +Scott Fujimoto, Herke Hoof, and David Meger. Addressing function approximation error in actor-critic methods. In International conference on machine learning, pp. 1587-1596. PMLR, 2018. +Scott Fujimoto, David Meger, and Doina Precup. Off-policy deep reinforcement learning without exploration. In International Conference on Machine Learning, pp. 2052-2062. PMLR, 2019. +Seyed Kamyar Seyed Ghasemipour, Shixiang Shane Gu, and Ofir Nachum. Why so pessimistic? estimating uncertainties for offline rl through ensembles, and why their independence matters. +Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In International conference on machine learning, pp. 1861-1870. PMLR, 2018. +Pamela J Haley and DONALD Soloway. Extrapolation limitations of multilayer feedforward neural networks. In [Proceedings 1992] IJCNN International Joint Conference on Neural Networks, volume 4, pp. 25-30. IEEE, 1992. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. +Arthur Jacot, Franck Gabriel, and Clément Hongler. Neural tangent kernel: Convergence and generalization in neural networks. Advances in neural information processing systems, 31, 2018. +Michael Janner, Justin Fu, Marvin Zhang, and Sergey Levine. When to trust your model: Model-based policy optimization. Advances in Neural Information Processing Systems, 32, 2019. +Ilya Kostrikov, Rob Fergus, Jonathan Tompson, and Ofir Nachum. Offline reinforcement learning with fisher divergence critic regularization. In International Conference on Machine Learning, pp. 5774-5783. PMLR, 2021a. +Ilya Kostrikov, Ashvin Nair, and Sergey Levine. Offline reinforcement learning with implicit q-learning. In International Conference on Learning Representations, 2021b. +Aviral Kumar, Justin Fu, Matthew Soh, George Tucker, and Sergey Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. Advances in Neural Information Processing Systems, 32, 2019. +Aviral Kumar, Rishabh Agarwal, Dibya Ghosh, and Sergey Levine. Implicit under-parameterization inhibits data-efficient deep reinforcement learning. In International Conference on Learning Representations, 2020a. +Aviral Kumar, Aurick Zhou, George Tucker, and Sergey Levine. Conservative q-learning for offline reinforcement learning. Advances in Neural Information Processing Systems, 33:1179-1191, 2020b. +Hoang Le, Cameron Voloshin, and Yisong Yue. Batch policy learning under constraints. In International Conference on Machine Learning, pp. 3703-3712. PMLR, 2019. +Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020. +Boyi Liu, Qi Cai, Zhuoran Yang, and Zhaoran Wang. Neural trust region/proximal policy optimization attains globally optimal policy. Advances in neural information processing systems, 32, 2019. +Yao Liu, Adith Swaminathan, Alekh Agarwal, and Emma Brunskill. Provably good batch off-policy reinforcement learning without great exploration. Advances in Neural Information Processing Systems, 33:1264-1274, 2020. + +Tatsuya Matsushima, Hiroki Furuta, Yutaka Matsuo, Ofir Nachum, and Shixiang Gu. Deployment-efficient reinforcement learning via model-based offline optimization. In International Conference on Learning Representations, 2020. +Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al. Human-level control through deep reinforcement learning. nature, 518(7540):529-533, 2015. +Ashvin Nair, Murtaza Dalal, Abhishek Gupta, and Sergey Levine. Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020. +Charles Packer, Katelyn Gao, Jernej Kos, Philipp Krahenbuhl, Vladlen Koltun, and Dawn Song. Assessing generalization in deep reinforcement learning. arXiv preprint arXiv:1810.12282, 2018. +Xue Bin Peng, Aviral Kumar, Grace Zhang, and Sergey Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019. +Dean A Pomerleau. Alvinn: An autonomous land vehicle in a neural network. Advances in neural information processing systems, 1, 1988. +David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al. Mastering the game of go without human knowledge. nature, 550(7676):354-359, 2017. +Masatoshi Uehara and Wen Sun. Pessimistic model-based offline reinforcement learning under partial coverage. In International Conference on Learning Representations, 2021. +Hado Van Hasselt, Yotam Doron, Florian Strub, Matteo Hessel, Nicolas Sonnerat, and Joseph Modayil. Deep reinforcement learning and the deadly triad. arXiv preprint arXiv:1812.02648, 2018. +Vladimir N Vapnik and A Ya Chervonenkis. On the uniform convergence of relative frequencies of events to their probabilities. In Measures of complexity, pp. 11-30. Springer, 2015. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. +Yifan Wu, George Tucker, and Ofir Nachum. Behavior regularized offline reinforcement learning. arXiv preprint arXiv:1911.11361, 2019. +Yue Wu, Shuangfei Zhai, Nitish Srivastava, Joshua M Susskind, Jian Zhang, Ruslan Salakhutdinov, and Hanlin Goh. Uncertainty weighted actor-critic for offline reinforcement learning. In International Conference on Machine Learning, pp. 11319-11328. PMLR, 2021. +Chenjun Xiao, Bo Dai, Jincheng Mei, Oscar A Ramirez, Ramki Gummadi, Chris Harris, and Dale Schuurmans. Understanding and leveraging overparameterization in recursive value estimation. In International Conference on Learning Representations, 2021. +Tengyang Xie, Ching-An Cheng, Nan Jiang, Paul Mineiro, and Alekh Agarwal. Bellman-consistent pessimism for offline reinforcement learning. Advances in neural information processing systems, 34, 2021a. +Tengyang Xie, Nan Jiang, Huan Wang, Caiming Xiong, and Yu Bai. Policy finetuning: Bridging sample-efficient offline and online reinforcement learning. Advances in neural information processing systems, 34, 2021b. +Haoran Xu, Xianyuan Zhan, Jianxiong Li, and Honglei Yin. Offline reinforcement learning with soft behavior regularization. arXiv preprint arXiv:2110.07395, 2021. +Haoran Xu, Li Jiang, Jianxiong Li, and Xianyuan Zhan. A policy-guided imitation approach for offline reinforcement learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022a. URL https://openreview.net/forum?id=CKbqDtZnSc. + +Haoran Xu, Xianyuan Zhan, and Xiangyu Zhu. Constraints penalized q-learning for safe offline reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2022b. +Haoran Xu, Li Jiang, Jianxiong Li, Zhuoran Yang, Zhaoran Wang, Victor Wai Kin Chan, and Xianyuan Zhan. Sparse q-learning: Offline reinforcement learning with implicit value regularization. In International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=ueYYgo2pSSU. +Keyulu Xu, Mozhi Zhang, Jingling Li, Simon Shaolei Du, Ken-Ichi Kawarabayashi, and Stefanie Jegelka. How neural networks extrapolate: From feedforward to graph neural networks. In International Conference on Learning Representations, 2020. +Pan Xu and Quanquan Gu. A finite-time analysis of q-learning with neural network function approximation. In International Conference on Machine Learning, pp. 10555-10565. PMLR, 2020. +Tianhe Yu, Garrett Thomas, Lantao Yu, Stefano Ermon, James Y Zou, Sergey Levine, Chelsea Finn, and Tengyu Ma. Mopo: Model-based offline policy optimization. Advances in Neural Information Processing Systems, 33:14129-14142, 2020. +Tianhe Yu, Aviral Kumar, Rafael Rafailov, Aravind Rajeswaran, Sergey Levine, and Chelsea Finn. Combo: Conservative offline model-based policy optimization. Advances in Neural Information Processing Systems, 34, 2021. +Andrea Zanette, Martin J Wainwright, and Emma Brunskill. Provable benefits of actor-critic methods for offline reinforcement learning. Advances in neural information processing systems, 34, 2021. +Xianyuan Zhan, Haoran Xu, Yue Zhang, Xiangyu Zhu, Honglei Yin, and Yu Zheng. Deepthermal: Combustion optimization for thermal power generating units using offline reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2022. +Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning (still) requires rethinking generalization. Communications of the ACM, 64(3):107-115, 2021. + +# A SKETCH OF THEORETICAL ANALYSIS + +In this section, we present in Figure 6 a sketch of the overall logical flow in our theoretical analyses and the proposed algorithm, DOGE. We start by analyzing the effects of data geometry on the generalization patterns of deep Q-functions. We find that a small sample-to-dataset distance leads to a tightened Q-function approximation error and thus interpolation enjoys better generalization properties than extrapolation (Theorem 1). Motivated by this, we propose DOGE, which tries to control the upper bound of the sample-to-centroid distance to be small (Property 1) and enforces a convex hull based policy constraint (Property 2). Then, we dive deeper and find that the upper bound of the Bellman-consistent coefficient is well controlled by sample-to-centroid distance and thus DOGE enjoys a bounded bellman-consistent coefficient (Theorem 2). Based on these findings, we can derive a tighter performance bound of DOGE as compared to support constraint methods like BEAR (Theorem 3). + +![](images/0623ba8fec6b85b0996f8cc0b44efa8bea72ee4e56762670f536b443ccace4fc.jpg) +Figure 6: Sketch of theoretical analysis + +# B THEORETICAL ANALYSIS OF THE IMPACT OF DATA GEOMETRY ON DEEP $Q$ FUNCTIONS + +To analyze the generalization of a function approximator, one can refer to some classical methods such as Rademacher complexity (Bartlett & Mendelson, 2002) and VC-dimension (Vapnik & Chervonenkis, 2015). However, the generalization bounds that obtained by these methods are usually trivial and cannot explain the generalization behavior in the overparameterized regime (Zhang et al., 2021). Recent breakthroughs in neural tangent kernel (NTK) shed light on the generalization of DNN. NTK builds the connection between the training dynamics of DNN and the solution of the kernel regression w.r.t. NTK, and is widely used in recent analysis of DNN generalization (Jacot et al., 2018; Arora et al., 2019b; Bietti & Mairal, 2019). What's more, NTK is also a popular analyzing tool in the convergence and optimality of deep RL (Cai et al., 2019; Fan et al., 2020; Kumar et al., 2020a; Xiao et al., 2021) and thus is used in our study. + +# B.1 NEURAL TANGENT KERNEL + +We denote a general neural network by $f(\theta, x): \mathbb{R}^d \to \mathbb{R}$ , where $\theta$ is all the parameters in the network and $x \in \mathbb{R}^d$ is the input. Given, a training dataset $\{(x_i, y_i)\}_{i=1}^n$ , the parameters $\theta$ are optimized by minimizing the squared loss function, i.e., $\mathcal{L}(\theta) = \frac{1}{2} \sum_{i=1}^n (f_\theta(x_i) - y_i)^2$ by gradient + +descent. The dynamics of the networks output can be formulated by Lemma 1 (Lemma 3.1. of (Arora et al., 2019b)); see (Arora et al., 2019b) for the proof of Lemma 1. + +Lemma 1. Consider minimizing the squared loss $\mathcal{L}(\theta)$ by gradient descent with infinitesimally small learning rate, i.e., $\frac{d\theta(t)}{dt} = -\nabla \mathcal{L}(\theta(t))$ . Let $\mathbf{u}(t) = (f(\theta(t), x_i))_{i \in [n]} \in \mathbb{R}^n$ be the network outputs on all $x_i$ 's at time $t$ , and $\mathbf{Y} = (y_i)_{i \in [n]}$ be the desired outputs. Then $\mathbf{u}(t)$ follows the following evolution, where $\mathbf{H}(t)$ is an $n \times n$ positive semidefinite matrix whose $(i,j)$ -th entry is $\left\langle \frac{\partial f(\theta(t), x_i)}{\partial \theta}, \frac{\partial f(\theta(t), x_j)}{\partial \theta} \right\rangle$ : + +$$ +\frac {d \mathbf {u} (t)}{d t} = - \mathbf {H} (t) \cdot (\mathbf {u} (t) - \mathbf {Y}). \tag {12} +$$ + +Plenty of works (Jacot et al., 2018; Arora et al., 2019b; Allen-Zhu et al., 2019; Xu et al., 2020) study the dynamics of the neural networks' training process and find that if the width of networks is sufficiently large, $\mathbf{H}(t)$ stays almost constant during training, i.e., $\mathbf{H}(t) = \mathbf{H}(0)$ . What's more, if the neural networks' parameters are randomly initialized with certain scales and the networks width goes to infinity, $\mathbf{H}(0)$ converges to a fixed matrix $\mathbf{K}$ , called neural tangent kernel (NTK) (Jacot et al., 2018). + +$$ +\mathbf {K} (x, x ^ {\prime}) = \mathbb {E} _ {\theta \sim W} \left\langle \frac {\partial f (\theta (t) , x)}{\partial \theta}, \frac {\partial f (\theta (t) , x ^ {\prime})}{\partial \theta} \right\rangle \tag {13} +$$ + +where, $W$ is Gaussian distribution. The training dynamics in Lemma 1 is identical to the dynamics of kernel regression under gradient flow, because $\mathbf{K}$ stays constant during training when the width of neural networks goes to infinity. Then, the final prediction function $(t \to \infty$ , assuming $\mathbf{u}(0) = 0$ ) is equal to the kernel regression solution: + +$$ +f _ {n t k} (x) = \left(\mathbf {K} \left(x, x _ {1}\right), \dots , \mathbf {K} \left(x, x _ {n}\right)\right) \cdot \mathbf {K} _ {\text {t r a i n}} ^ {- 1} \mathbf {Y} \tag {14} +$$ + +where $\mathbf{K}_{train}^{-1}$ is the $n\times n$ NTK for the training data (the state-action pair $x = (s,a)$ in the policy evaluation in offline RL) and stays constant during training once the training data is fixed. $\mathbf{Y}$ is the training labels $(r(s,a) + \gamma \mathbb{E}_{a'\sim \pi (\cdot |s')}[Q_{\theta '}(s',a')]$ in offline RL). $\mathbf{K}(x,x_i)$ is the kernel value between test data $x$ and training data $x_{i}$ . We denote the feature map of $\mathbf{K}(\cdot ,\cdot)$ as $\Phi (\cdot)$ , and $\mathbf{K}(x,x^{\prime}) = \langle \Phi (x),\Phi (x^{\prime})\rangle$ . Then, Eq. (14) is equivalent to: + +$$ +f _ {n t k} (x) = \left(\langle \Phi (x), \Phi (x _ {1}) \rangle , \dots , \langle \Phi (x), \Phi (x _ {n}) \rangle\right) \cdot \mathbf {K} _ {\text {t r a i n}} ^ {- 1} \mathbf {Y} \tag {15} +$$ + +# B.2 IMPACT OF DATA GEOMETRY ON DEEP $Q$ FUNCTIONS + +In this section, we analyze the impact of data geometry on deep $Q$ functions under the NTK regime. We first introduce the smoothness property of the feature map $\Phi(x)$ induced by NTK (Lemma 2). Then, we introduce the equivalence between the kernel regression solution in Eq. (15) and a min-norm solution (Lemma 3). Builds on Lemma 2 and Lemma 3, Lemma 4 analyzes the smoothness of the deep $Q$ functions. At last, we study how data geometry affects deep $Q$ functions (Theorem 1). + +Assumption 1. (NTK assumption). We assume the function approximators discussed in our paper are two-layer fully-connected ReLU neural networks with infinity width and are trained with infinitesimally small learning rate unless otherwise specified. + +Although there exist some gaps between the NTK assumption and the real setting, NTK is one of the most advanced theoretical machinery from the generalization analysis of DNN. In addition, Assumption 1 is common in previous analysis on the generalization of DNN (Jacot et al., 2018; Arora et al., 2019a; Bietti & Mairal, 2019) and the convergence of DRL (Cai et al., 2019; Liu et al., 2019; Xu & Gu, 2020; Fan et al., 2020). For more accurate analysis, we should adopt more advanced analysis tools than NTK and hence leave it for future work. + +We first introduce Lemma 2 (Proposition 4 of (Bietti & Mairal, 2019)), which shows the feature map $\Phi(x)$ induced by NTK is not Lipschitz continuous but holds a weaker Hölder smoothness property. + +Lemma 2. (Smoothness of the kernel map of two-layer ReLU networks). Let $\Phi$ be the kernel map of the neural tangent kernel induced by a two-layer ReLU neural network, $x$ and $y$ be two inputs, then $\Phi$ satisfies the following smoothness property. + +$$ +\left\| \Phi (x) - \Phi (y) \right\| \leq \sqrt {\min (\| x \| , \| y \|) \| x - y \|} + 2 \| x - y \|. \tag {16} +$$ + +Lemma 3 (Lemma 2 of (Xu et al., 2020)) builds the connection between the kernel regression solution in Eq. (14) and the a min-norm solution. For the proof of Lemma 3, we refer the reader to(Xu et al., 2020). + +Lemma 3. (Equivalence to a min-norm optimization problem). Let $\Phi(x)$ be the feature map induced by a neural tangent kernel, for any $x \in \mathbb{R}^d$ . The solution to the kernel regression in Eq. (14) and Eq. (15) is equivalent to $f_{ntk}(x) = \Phi(x)^T \beta_{ntk}$ , where $\beta_{ntk}$ is the optimal solution of a min-norm optimization problem defined as + +$$ +\min _ {\beta} \| \beta \| +$$ + +$$ +s. t. \Phi \left(x _ {i}\right) ^ {T} \beta = y _ {i}, f o r i = 1, \dots , n. \tag {17} +$$ + +Then, deep $Q$ functions satisfy the following smoothness property. + +Lemma 4. (Smoothness for deep $Q$ functions). Given two inputs $x$ and $x'$ , the distance between these two data points is $d = \| x - x' \|$ . $C_1 \coloneqq \sup_{\| \beta_{ntk} \|_\infty} \| \beta_{ntk} \|_\infty$ is a finite constant. Then the difference between the output at $x$ and the output at $x'$ can be bounded by: + +$$ +\left\| Q _ {\theta} (x) - Q _ {\theta} \left(x ^ {\prime}\right) \right\| \leq C _ {1} \left(\sqrt {\min \left(\left\| x \right\| , \left\| x ^ {\prime} \right\|\right)} \sqrt {d} + 2 d\right) \tag {18} +$$ + +Proof. In offline RL, we denote a general $Q$ network by $Q_{\theta}(x): \mathbb{R}^{|S| + |\mathcal{A}|} \to \mathbb{R}$ , where $\theta$ is all the parameters in the network and $x = (s,a) \in \mathbb{R}^{|S| + |\mathcal{A}|}$ is the brief notation for state-action pair $(s,a)$ . The $Q$ function is trained via minimizing the temporal difference error defined as $\frac{1}{2}\sum_{i=1}^{n}(Q_{\theta}(x_i) - y_i)^2$ by gradient descent, where $y_i = r(x_i) + \gamma \mathbb{E}_{a_i' \sim \pi(\cdot | s_i')} [Q_{\theta'}^\pi(x_i')] \in \mathbb{R}$ is the target value. + +Using kernel method from NTK, $Q$ function can be formulated as $Q_{\theta}(x) = \Phi (x)^{T}\beta$ , where $\Phi (x)$ is independent of the changes on training labels when NTK assumption holds. This is because as the width of a neural net goes to infinity, the NTK kernel $\mathbf{K}(x,x^{\prime}) = < \Phi (x),\Phi (x^{\prime})>$ produced by this network stays constant during training, and so is the property of the feature map $\Phi (x)$ (Jacot et al., 2018). So, the learning process under NTK framework is actually adjusting $\beta$ to fit the label rather than $\Phi (x)$ . As a result, Lemma 2 holds when deep $Q$ function satisfies NTK assumptions. Given two inputs $x$ and $x^{\prime}$ , the distance between these two inputs is $d = \| x - x^{\prime}\|$ . Based on Lemma 2, it is easy to see that + +$$ +\begin{array}{l} \left\| Q _ {\theta} (x) - Q _ {\theta} \left(x ^ {\prime}\right) \right\| = \left\| \Phi (x) ^ {T} \beta - \Phi \left(x ^ {\prime}\right) ^ {T} \beta \right\| \\ \leq \| \Phi (x) - \Phi (x ^ {\prime}) \| \| \beta \| _ {\infty} \quad (\text {I n f i n i t y n o r m}) \\ \leq \| \beta \| _ {\infty} \left(\sqrt {\min \left(\| x \| , \| x ^ {\prime} \|\right) \cdot \| x - x ^ {\prime} \|} + 2 \| x - x ^ {\prime} \|\right) (\text {L e m m a} 2) \tag {19} \\ = \| \beta \| _ {\infty} (\sqrt {\min (\| x \| , \| x ^ {\prime} \|) \cdot d} + 2 d) \\ \leq C _ {\beta} \left(\sqrt {\operatorname* {m i n} \left(\| x \| , \| x ^ {\prime} \|\right) \cdot d} + 2 d\right) \quad \left(C _ {\beta} := \sup \| \beta \| _ {\infty}\right) \\ \end{array} +$$ + +Additionally, if we consider the delayed $Q$ target and delayed actor updates during policy learning, we can assume the target value used for $Q$ evaluation stays relatively stable during each policy evaluation step and the problem can be seen as solving a series of regression problems. Under this mild assumption, we can learn the actual $\beta_{ntk}$ at each step ( $\beta \rightarrow \beta_{ntk}$ and so $C_{\beta} \rightarrow C_1$ , where $C_1 \coloneqq \sup \| \beta_{ntk} \|_{\infty}$ ) and thus complete the proof. Similar assumptions and treatments are also used + +in Section 4 of (Kumar et al., 2020a) that Q function at each iteration can fit its label well, Appendix A.8 of (Xiao et al., 2021), as well as Appendix F of (Ghasemipour et al.). + +![](images/954b6d160de1bdbd32bae04107cf7708adb2e72ff9baa6acde8219101018a7c7.jpg) + +Lemma 4 states the value difference of a deep $Q$ function for two inputs is related to the distance between these two inputs. The closer the distance, the smaller the value difference. + +# B.2.1 PROOF OF THEOREM 1 + +Builds on Lemma 4, we can combine the data geometry and analyze the impact of data geometry on deep $Q$ functions. + +Proof. We first review the definition of interpolated data and extrapolated data. Under continuous state-action space, state-action pairs within the convex hull of the dataset can be represented in an interpolated manner (referred as interpolated data $x_{in}$ ): + +$$ +x _ {i n} = \sum_ {i = 1} ^ {n} \alpha_ {i} x _ {i}, \quad \sum_ {i = 1} ^ {n} \alpha_ {i} = 1, \alpha_ {i} \geq 0 \tag {20} +$$ + +Similarly, we can define extrapolated data that lie outside the convex hull of the dataset as $x_{out}$ : + +$$ +x _ {o u t} = \sum_ {i = 1} ^ {n} \beta_ {i} x _ {i}, \tag {21} +$$ + +where $\sum_{i=1}^{n} \beta_{i} = 1$ and $\beta_{i} \geq 0$ does not hold simultaneously. + +We define $\mathrm{Proj}_{\mathcal{D}}(x) \coloneqq \arg \min_{x_i \in \mathcal{D}} \| x - x_i \|$ as a projector that projects unseen data $x$ to its nearest data in dataset $\mathcal{D}$ . Given an interpolated data $x_{in}$ and an extrapolated data $x_{out}$ , the distances to their nearest data in dataset are $d_{x_{in}} = \| x_{in} - \mathrm{Proj}_{\mathcal{D}}(x_{in})\|$ and $d_{x_{out}} = \| x_{out} - \mathrm{Proj}_{\mathcal{D}}(x_{out})\|$ . Because interpolated data lie inside the convex hull of training data, $d_{x_{in}} \leq \max_{x_i \in \mathcal{D}} \| x_{in} - x_i\| \leq B$ is bounded, where $B \coloneqq \max_{x_i, x_j \in \mathcal{D}} \| x_i - x_j\|$ is a finite constant. Then, by applying Lemma 4, the value difference of deep $Q$ function for interpolated and extrapolated data can be formulated as the following shows. + +$$ +\begin{array}{l} \left\| Q _ {\theta} \left(x _ {i n}\right) - Q _ {\theta} \left(\operatorname {P r o j} _ {\mathcal {D}} \left(x _ {i n}\right)\right) \right\| \leq C _ {1} \left(\sqrt {\min \left(\left\| x _ {i n} \right\| , \left\| \operatorname {P r o j} _ {\mathcal {D}} \left(x _ {i n}\right) \right\|\right)} \sqrt {d _ {x i n}} + 2 d _ {x i n}\right) (22) \\ \leq C _ {1} (\sqrt {\min (\| x _ {i n} \| , \| \mathrm {P r o j} _ {\mathcal {D}} (x _ {i n}) \|)} \sqrt {B} + 2 B) \\ \left\| Q _ {\theta} \left(x _ {o u t}\right) - Q _ {\theta} \left(\operatorname {P r o j} _ {\mathcal {D}} \left(x _ {o u t}\right) \right\Vert\right) \leq C _ {1} \left(\sqrt {\min \left(\left\| x _ {o u t} \right\| , \left\| \operatorname {P r o j} _ {\mathcal {D}} \left(x _ {o u t}\right) \right\|\right)} \sqrt {d _ {x _ {o u t}}} + 2 d _ {x _ {o u t}}\right) (23) \\ \end{array} +$$ + +![](images/49b4837e3942e6b96296cee735f0ef3adf6a25c49272a1dfbe78e7965f274437.jpg) + +# B.3 QUANTITATIVE EXPERIMENTS ON THEOREM 1 + +In addition to the one-dimensional random walk experiments presented in Section 2.2, we conduct additional experiments on the more complex and high-dimensional MuJoCo tasks (including D4RL Hopper-medium-v2, Halfcheetah-medium-v2, and Walker2d-medium-v2) to provide quantitative support to Theorem 1, in particular, the pertinence of interpolation and extrapolation. We first synthesize lots of interpolated data $x_{in}$ and extrapolated data $x_{out}$ ( $x = (s,a) \in S \times \mathcal{A}$ ) and then search for their nearest data points in offline dataset $\mathcal{D}$ accordingly, i.e., $\mathrm{Proj}_{\mathcal{D}}(x_{in})$ and $\mathrm{Proj}_{\mathcal{D}}(x_{out})$ . Then, we can evaluate the Q-value differences $\| Q_{\theta}(x) - Q_{\theta}(\mathrm{Proj}_{\mathcal{D}}(x)) \|$ (LHS of Theorem 1) at these generated data and see whether the Q-value differences align well with Theorem 1. + +For the detailed experiment setup, recall that an interpolated data point $x_{in}$ is a convex combination of the offline dataset, i.e., $x_{in} = \sum_{i=1}^{n} \alpha_i x_i$ , $x_i \sim \mathcal{D}$ with weights $\alpha_i$ that satisfy $\sum_{i=1}^{n} \alpha_i = 1$ , $\alpha_i \geq 0$ . Therefore, we can interpolate the offline dataset based on $\alpha_i$ sampled from the Dirichlet distribution to generate the interpolated data. Also, an extrapolated data point $x_{out}$ is expressed as a weighted sum of the offline dataset, i.e., $x_{out} = \sum_{i=1}^{n} \beta_i x_i$ , $x_i \sim \mathcal{D}$ , but its weights $\beta_i$ do not satisfy the non-negativity and the summing to 1 constraint. Therefore, we can generate extrapolated data by setting the sign of some weights to negative values and varying the weights not summing to + +1. After obtaining the interpolated and extrapolated data, we search for their closest data points in the offline dataset $\mathcal{D}$ and calculate their corresponding distance $\| x - \mathrm{Proj}_{\mathcal{D}}(x)\|$ and Q-value difference $\| Q_{\theta}(x) - Q_{\theta}(\mathrm{Proj}_{\mathcal{D}}(x))\|$ . Figure 7a shows the relationship between the distance to dataset $\| x - \mathrm{Proj}_{\mathcal{D}}(x)\|$ and the Q value difference $\| Q_{\theta}(x) - Q_{\theta}(\mathrm{Proj}_{\mathcal{D}}(x))\|$ (LHS of Theorem 1). We also report the learned state-conditioned distance value $g(s,a)$ on these generated data in Figure 7b. + +![](images/eeb3ecf239a7fddf6fea693422bd624507d740ee5eb0e0d9121775734a4a727b.jpg) +(a) Relationship between $\| x - \mathrm{Proj}_{\mathcal{D}}(x)\|$ and $\| Q_{\theta}(x) - Q_{\theta}(\mathrm{Proj}_{\mathcal{D}}(x))\|$ . + +![](images/eab23ce40935c956c124f56f40c880ba2fbc3898c3859eb796b8a7a5982dc1f0.jpg) + +![](images/09526943a1758135a2ffa575529a71311d5357db9ec3ab1385b0eced0bbbc036.jpg) + +![](images/0beaed88dd5d90733469244a12c970450bed7aec876c42609af9dd8827c9385e.jpg) +(b) Relationship between $\| x - \mathrm{Proj}_{\mathcal{D}}(x)\|$ and $g(x)$ + +![](images/30752cd9b63981eab4965437fe946e333de3d07684b283e7ae370bce67f9ed57.jpg) +Figure 7: Quantitative experiments of Theorem 1 on the D4RL MuJoCo-medium datasets. The red star-shaped dots are the interpolated data and the circle dots are the extrapolated data. The color of the dots represents $\| Q_{\theta}(x) - Q_{\theta}(\mathrm{Proj}_{\mathcal{D}}(x))\|$ values in (a) and $g(x)$ values in (b), respectively. The darker the color, the smaller the corresponding value. In (a), the yellow dash line is the empirical upper bound of $\| Q_{\theta}(x) - Q_{\theta}(\mathrm{Proj}_{\mathcal{D}}(x))\|$ . + +![](images/52e5bf46ea9c1b5f3543bb8d123882bb81995122161d46e7eb5fb6e383d8a770.jpg) + +Figure 7a demonstrates that the interpolated data enjoy a tighter empirical upper bound of $\| Q_{\theta}(x) - Q_{\theta}(\mathrm{Proj}_{\mathcal{D}}(x)) \|$ (LHS of Theorem 1) than most of the extrapolated data. Moreover, the empirical upper bound of the Q-value difference grows with the increase of the sample-to-dataset distance $\| x - \mathrm{Proj}_{\mathcal{D}(x)} \|$ , which is consistent with Theorem 1 (the upper bound of value difference of deep Q function is well controlled by distance to the dataset). Figure 7b shows that the state-conditioned distance function $g(s, a)$ can output low values for interpolated data and some near-dataset extrapolated data, and thus can be used as a relaxed policy constraint in these OOD regions. + +# C STATE-CONDITIONED DISTANCE FUNCTION + +# C.1 PROOF OF PROPERTY 1 + +Proof. Given a state-action pair from the training data $(s, a) \sim \mathcal{D}$ , we synthetic random noise actions from a uniform distribution over the action space, i.e. $\hat{a} \sim \text{Unif}(\mathcal{A})$ . Then the distance function $g(\cdot)$ is trained by Eq. (24). + +$$ +\min _ {g} \mathbb {E} _ {(s, a) \sim \mathcal {D}} \left[ \mathbb {E} _ {\hat {a} \sim U n i f (\mathcal {A})} \left[ \| \hat {a} - a \| - g (s, \hat {a}) \right] ^ {2} \right] \tag {24} +$$ + +$\left[\| \hat{a} - a\| - g(s, \hat{a})\right]^2$ can be upper bounded by some finite constants because $S \times \mathcal{A}$ is compact in our analysis. The optimization problem in Eq. (24) can be reformulated as the following form according to the Fubini's Theorem. + +$$ +\min _ {g} \mathbb {E} _ {\hat {a} \sim U n i f (\mathcal {A})} \left[ \mathbb {E} _ {(s, a) \sim \mathcal {D}} \left[ \| \hat {a} - a \| - g (s, \hat {a}) \right] ^ {2} \right] \tag {25} +$$ + +Note that the objective of Eq. (25) can be also written as a functional $J[g(s, \hat{a})]$ with respect to function $g$ in the following form: + +$$ +J [ g (s, \hat {a}) ] = \int_ {\mathcal {A}} \frac {1}{| \mathcal {A} |} \left[ \mathbb {E} _ {(s, a) \sim \mathcal {D}} [ \| \hat {a} - a \| - g (s, \hat {a}) ] ^ {2} \right] \mathrm {d} \hat {a} = \int_ {\mathcal {A}} F (s, \hat {a}, g (s, \hat {a})) \mathrm {d} \hat {a} \tag {26} +$$ + +Based on calculus of variation, the extrema (maxima or minima) of functional $J[g(s,\hat{a})]$ can be obtained by solving the associated Euler-Langrane equation $(\partial F / \partial g = 0)$ . In our case, it requires the optimal state-conditioned distance function $g^{*}$ satisfies the following conditions: + +$$ +\begin{array}{l} \frac {\partial}{\partial g ^ {*}} \mathbb {E} _ {(s, a) \sim \mathcal {D}} [ \| \hat {a} - a \| - g ^ {*} (s, \hat {a}) ] ^ {2} = 0 \\ \Rightarrow \mathbb {E} _ {(s, a) \sim \mathcal {D}} \left[ \frac {\partial}{\partial g ^ {*}} [ \| \hat {a} - a \| - g ^ {*} (s, \hat {a}) ] ^ {2} \right] = 0 (\text {D N N i s c o n t i n u o u s}) \tag {27} \\ \Rightarrow \quad \mathbb {E} _ {(s, a) \sim \mathcal {D}} \left[ \left\| \hat {a} - a \right\| - g ^ {*} (s, \hat {a}) \right] = 0 \\ \end{array} +$$ + +Conditioned on a state $s \in \mathcal{D}$ , the optimal state-conditioned distance function in Eq. (27) satisfies the following conditions: + +$$ +\begin{array}{l} \int_ {\mathcal {A}} \| \hat {a} - a \| \mu (s, a) \mathrm {d} a - \int_ {\mathcal {A}} \mu (s, a) \mathrm {d} a g ^ {*} (s, \hat {a}) = 0, s \in \mathcal {D} \\ \Rightarrow g ^ {*} (s, \hat {a}) = \frac {\int_ {\mathcal {A}} \| \hat {a} - a \| \mu (s , a) \mathrm {d} a}{\int_ {\mathcal {A}} \mu (s , a) \mathrm {d} a}, s \in \mathcal {D} \tag {28} \\ \Rightarrow g ^ {*} (s, \hat {a}) = \int_ {\mathcal {A}} C (s, a) \| \hat {a} - a \| \mathrm {d} a, s \in \mathcal {D} \\ \end{array} +$$ + +where, $\mu(s,a)$ is the empirical distribution on a finite offline dataset $\mathcal{D} = \{(x_i)\}_{i=1}^n$ , i.e., the sum of the Dirac measures $\frac{1}{n}\sum_{i=1}^{n}\delta_{x_i}$ . $\forall (s,a) \notin \mathcal{D}, \mu(s,a) = 0. \forall (s,a) \in \mathcal{D}, \mu(s,a) > 0$ . $C(s,a) = \frac{\mu(s,a)}{\int_A\mu(s,a)\mathrm{d}a} \geq 0$ and $\int_A C(s,a)\mathrm{d}a = 1$ . Because $L_2$ -norm is convex and the non-negative combination of convex functions is still convex, $g^*(s,\hat{a})$ is a convex function w.r.t. $\hat{a}$ . In addition, $\forall \hat{a} \in \mathcal{A}$ , by the Jensen inequality, we have: + +$$ +g ^ {*} (s, \hat {a}) \geq \left\| \hat {a} - \mathbb {E} _ {a \sim U n i f (\mathcal {A})} [ C (s, a) a ] \right\| = \| \hat {a} - a _ {o} (s) \|, s \in \mathcal {D} \tag {29} +$$ + +where $a_{o}(s)\coloneqq \mathbb{E}_{a\sim Unif(\mathcal{A})}[C(s,a)a], s\in \mathcal{D}$ is the state-conditioned centroid of training dataset. + +![](images/d73f8aa0feb0845b0c7bc820ddc4d1e4bcfe623ce3bb9813ccf118a16fc014c2.jpg) + +# C.2 PROOF OF PROPERTY 2 + +Proof. The negative gradient of the optimal state-conditioned distance function can be formulated as: + +$$ +\begin{array}{l} - \nabla_ {\hat {a}} g ^ {*} (s, \hat {a}) = - \int_ {\mathcal {A}} C (s, a) \frac {\hat {a} - a}{\| \hat {a} - a \|} \mathrm {d} a, \forall \hat {a} \in \mathcal {A}, s \in \mathcal {D} \tag {30} \\ = \frac {1}{\int_ {\mathcal {A}} \mu (s , a) \mathrm {d} a} \int_ {\mathcal {A}} \mu (s, a) \frac {- (\hat {a} - a)}{\| \hat {a} - a \|} \mathrm {d} a, \forall \hat {a} \in \mathcal {A}, s \in \mathcal {D} \\ \end{array} +$$ + +Observe that the direction of the negative gradient of $g^{*}(s,\hat{a})$ is related to the integral of vector $-(\hat{a} - a)$ (points towards $a$ ). When $(s,a)\notin \mathcal{D}, - (\hat{a} - a)$ doesn't influence the final gradient because + +$\mu(s, a) = 0$ . Therefore, $-(\hat{a} - a)$ only contribute to the final gradient of $g^{*}(s, \hat{a})$ for $(s, a) \in \mathcal{D}$ as $\mu(s, a) > 0$ . For a given $s \in \mathcal{D}$ and any extrapolated action $\hat{a}$ that lies outside the convex hull of training data, the integral of vector $-( \hat{a} - a)$ is basically a non-negative combination of vectors $-( \hat{a} - a)$ that point toward actions $a \in \mathcal{D}$ inside the convex hull. As a result, it's easy to see that $-\nabla_{\hat{a}} g^{*}(s, \hat{a})$ also points inside the convex hull formed by the data. + +![](images/3a7448145dae4f9942d834fdc119dc02af699d7ba16aa91551155dadcaf152e7.jpg) + +# D THEORETICAL ANALYSIS OF DOGE + +In this section, we analyze the performance of the policy learned by DOGE. We first adopt the Bellman-consistent coefficient from (Xie et al., 2021a) to quantify the distributional shift from the perspective of deep $Q$ functions generalization. Then, we gives the upper bound of the Bellman-consistent coefficient under the NTK regime (Appendix D.1). At last, we give the performance bound of DOGE (Appendix D.2). + +# D.1 UPPER BOUND OF BELLMAN-CONSISTENT COEFFICIENT + +Let us first review the definition of Bellman-consistent coefficient $\mathcal{B}(v,\mu ,\mathcal{F},\pi)$ in (Xie et al., 2021a). We define $\mathcal{B}(v,\mu ,\mathcal{F},\pi)$ to measure the distributional shift from an arbitrary distribution $v$ to data distribution $\mu$ , w.r.t. $\mathcal{F}$ and $\pi$ . $\mathcal{F}$ is the function class of $Q$ networks. + +$$ +\mathcal {B} (v, \mu , \mathcal {F}, \pi) := \sup _ {Q \in \mathcal {F}} \frac {\| Q - \mathcal {T} ^ {\pi} Q \| _ {2 , v} ^ {2}}{\| Q - \mathcal {T} ^ {\pi} Q \| _ {2 , \mu} ^ {2}} \tag {31} +$$ + +where the $\mu$ -weighted norm (square) is defined as $\|f\|_{2,\mu}^2 \coloneqq \mathbb{E}_{\mu}[\|f\|^2]$ , which is also applicable for any distribution $v$ . $\mathcal{T}^{\pi}Q$ is the Bellman operator of policy $\pi$ , defined as $\mathcal{T}^{\pi}Q(s,a) \coloneqq r(s,a) + \gamma \mathbb{E}_{a' \sim \pi(\cdot|s'), s' \sim \mathcal{P}(\cdot|s,a)}[Q(s',a')] \coloneqq r(s,a) + \gamma \mathbb{P}^{\pi}[Q(s',a')]$ . $\mathbb{P}^{\pi}[\cdot]$ is the brief notation for $\mathbb{E}_{a' \sim \pi(\cdot|s'), s' \sim \mathcal{P}(\cdot|s,a)}[\cdot]$ . The smaller the ratio of the Bellman error under $v$ and $\mu$ , the more transferable the $Q$ function from $\mu$ to $v$ , even when $\sup_{(s,a)} \frac{v(s,a)}{\mu(s,a)} = \infty$ . Then we give the proof of Theorem 2 (Upper bound of Bellman-consistent coefficient). + +Proof. We denote $x = (s, a)$ and $x' = (s', a')$ . $x_o = \mathbb{E}_{x \sim \mathcal{D}}[x]$ is the centroid of offline dataset. $d_1 = \| x - x_o \|$ and $d_2 = \| x' - x_o \|$ are the sample-to-centroid distances. Let $\mu(x)$ be the distribution under the offline dataset and $v(x)$ be any distribution. Then, for the numerator in Eq. (8) and Eq. (31), we have the following inequalities. + +$$ +\begin{array}{l} \| Q - \mathcal {T} ^ {\pi} Q \| _ {2, v} ^ {2} \\ = \int_ {\mathcal {S} \times \mathcal {A}} v (x) \| Q (x) - r (x) - \gamma \mathbb {P} ^ {\pi} [ Q (x ^ {\prime}) ] \| ^ {2} \\ = \int_ {\mathcal {S} \times \mathcal {A}} v (x) \| Q (x) - \mathbb {P} ^ {\pi} [ Q (x ^ {\prime}) ] - r (x) + (1 - \gamma) \mathbb {P} ^ {\pi} [ Q (x ^ {\prime}) ] \| ^ {2} \\ \leq \int_ {\mathcal {S} \times \mathcal {A}} v (x) \left[ \| Q (x) - \mathbb {P} ^ {\pi} [ Q (x ^ {\prime}) ] \| + \| r (x) \| + \| (1 - \gamma) \mathbb {P} ^ {\pi} [ Q (x ^ {\prime}) ] \| \right] ^ {2} (\text {T r i a n g l e}) \\ = \int_ {\mathcal {S} \times \mathcal {A}} v (x) \left[ \| Q (x) - Q \left(x _ {o}\right) + Q \left(x _ {o}\right) - \mathbb {P} ^ {\pi} \left[ Q \left(x ^ {\prime}\right) \right] \| + \| r (x) \| + (1 - \gamma) \| \mathbb {P} ^ {\pi} \left[ Q \left(x ^ {\prime}\right) \right] - Q \left(x _ {o}\right) + Q \left(x _ {o}\right) \| \right] ^ {2} \\ \leq \int_ {\mathcal {S} \times \mathcal {A}} v (x) \left[ (1 - \gamma) \| Q \left(x _ {o}\right) \| + \| r (x) \| + \| Q (x) - Q \left(x _ {o}\right) \| + (2 - \gamma) \| \mathbb {P} ^ {\pi} \left[ Q \left(x ^ {\prime}\right) \right] - Q \left(x _ {o}\right) \| \right] ^ {2} (\text {T r i a n g l e}) \\ \leq \int_ {\mathcal {S} \times \mathcal {A}} v (x) \left[ \underbrace {(1 - \gamma) \| Q \left(x _ {o}\right) \| + \| r (x) \|} _ {\mathcal {I} _ {1}} + \underbrace {\| Q (x) - Q \left(x _ {o}\right) \|} _ {\mathcal {I} _ {2}} + \underbrace {(2 - \gamma) \mathbb {P} ^ {\pi} [ \| Q \left(x ^ {\prime}\right) - Q \left(x _ {o}\right) \| ]} _ {\mathcal {I} _ {3}} \right] ^ {2} (\text {J e n s e n}) \tag {32} \\ \end{array} +$$ + +The RHS contains three parts: $\mathcal{I}_1 = (1 - \gamma)\| Q(x_o)\| +\| r(x)\|$ , $\mathcal{I}_2 = \| Q(x) - Q(x_o)\|$ and $\mathcal{I}_3 = (2 - \gamma)\mathbb{P}^\pi [\| Q(x') - Q(x_o)\| ]$ . Because $\| r(x)\| \in [0,R_{\max}],\forall x\in S\times \mathcal{A},\mathcal{I}_1$ can be upper + +bounded as: + +$$ +\mathcal {I} _ {1} \leq (1 - \gamma) Q \left(x _ {o}\right) + R _ {\max } \tag {33} +$$ + +By applying Lemma 4, $\mathcal{I}_2$ is upper bounded as + +$$ +\mathcal {I} _ {2} \leq C _ {1} \left[ \sqrt {\min (\| x \| , \| x _ {o} \|) d _ {1}} + 2 d _ {1} \right] \tag {34} +$$ + +$\mathcal{I}_3$ is upper bounded as + +$$ +\mathcal {I} _ {3} \leq C _ {1} (2 - \gamma) \mathbb {P} ^ {\pi} \left[ \sqrt {\min (\| x ^ {\prime} \| , \| x _ {o} \|) d _ {2}} + 2 d _ {2} \right] \tag {35} +$$ + +In addition, we denote $C_2 \coloneqq \sqrt{\sup_{x \in S \times A} \|x\|}$ . Then, $\mathcal{I}_2$ and $\mathcal{I}_3$ can be further upper bounded by + +$$ +\mathcal {I} _ {2} \leq C _ {1} \left(C _ {2} \sqrt {d _ {1}} + 2 d _ {1}\right) \tag {36} +$$ + +$$ +\mathcal {I} _ {3} \leq (2 - \gamma) C _ {1} \mathbb {P} ^ {\pi} \left(C _ {2} \sqrt {d _ {2}} + 2 d _ {2}\right) \tag {37} +$$ + +The above relaxation of the upper bound in Eq. (36) and Eq. (37) is not necessary, but for notation brevity, we choose to relax the upper bound by treating $C_2 \coloneqq \sqrt{\sup_{x \in S \times A} \| x \|}$ . + +Plug Eq. (33), Eq. (36) and Eq. (37) into the RHS of Eq. (32), we can get + +$$ +\begin{array}{l} \left\| Q - \mathcal {T} ^ {\pi} Q \right\| _ {2, v} ^ {2} \\ \leq \int_ {\mathcal {S} \times \mathcal {A}} v (x) \left[ (1 - \gamma) Q (x _ {o}) + R _ {\max } + C _ {1} (C _ {2} \sqrt {d _ {1}} + 2 d _ {1}) + (2 - \gamma) C _ {1} \mathbb {P} ^ {\pi} (C _ {2} \sqrt {d _ {2}} + 2 d _ {2}) \right] ^ {2} \\ = \left\| (1 - \gamma) Q \left(s _ {o}, a _ {o}\right) + R _ {\max } + C _ {1} \left(C _ {2} \sqrt {d _ {1}} + 2 d _ {1}\right) + (2 - \gamma) C _ {1} \mathbb {P} ^ {\pi} \left(C _ {2} \sqrt {d _ {2}} + 2 d _ {2}\right) \right\| _ {2, v} ^ {2} \tag {38} \\ \end{array} +$$ + +For the denominator $\| Q - \mathcal{T}^{\pi}Q\|_{2,\mu}^{2}$ in Eq. (8) and Eq. (31), because the $Q$ function is approximated, there exists approximation error between $Q$ and $\mathcal{T}^{\pi}Q$ , i.e., $Q - \mathcal{T}^{\pi}Q \geq \epsilon$ . In addition, the distribution $\mu$ contains some mismatch w.r.t. the equilibrium distribution induced by policy $\pi$ . Therefore, it is reasonable to assume $\| Q - \mathcal{T}^{\pi}Q\|_{2,\mu}^{2} \geq \epsilon_{\mu} > 0$ . + +Then, we can complete the proof by plugging the upper bound in Eq. (38) and $\| Q - \mathcal{T}^{\pi}Q\|_{2,\mu}^{2}\geq$ $\epsilon_{\mu} > 0$ into Eq. (8) or Eq. (31). + +$$ +\mathcal {B} (v, \mu , \mathcal {F}, \pi) \leq \frac {1}{\epsilon_ {\mu}} \left\| \underbrace {(1 - \gamma) Q \left(s _ {o} , a _ {o}\right) + R _ {\max }} _ {\mathcal {B} _ {1}} + \underbrace {C _ {1} \left(C _ {2} \sqrt {d _ {1}} + 2 d _ {1}\right)} _ {\mathcal {B} _ {2}} + \underbrace {(2 - \gamma) C _ {1} \mathbb {P} ^ {\pi} \left(C _ {2} \sqrt {d _ {2}} + 2 d _ {2}\right)} _ {\mathcal {B} _ {3}} \right\| _ {2, v} ^ {2} \tag {39} +$$ + +To be mentioned, the distance regularization in DOGE compels the leaned policy to output the action that near the state-conditioned centroid of dataset and thus $\mathcal{B}_2$ and $\mathcal{B}_3$ can be driven to some small values. $\mathcal{B}_1$ is independent on the distributional shift. Therefore, $\mathcal{B}(v,\mu ,\mathcal{F},\pi)$ can be bounded by some finite constants under DOGE. Therefore, the constrained policy set induced by DOGE is essentially a Bellman-consistent constrained policy set $\Pi_{\mathcal{B}}$ defined in Definition 2. In addition, other policy constraint methods such as BEAR (Kumar et al., 2019) can also have bounded $\mathcal{B}$ . However, these policy constraint methods do not allow the learned policy shifts to those generalizable distributions where $\mathcal{B}(v,\mu ,\mathcal{F},\pi)$ is small but $\sup_{(s,a)}\frac{v(s,a)}{\mu(s,a)}\to \infty$ , which is essentially different with DOGE. + +# D.2 PERFORMANCE OF THE POLICY LEARNED BY DOGE + +Here, we briefly review the definition of the Bellman-consistent constrained policy set $\Pi_{\mathcal{B}}$ defined in Definition 2. The Bellman-consistent coefficient under the transition induced by $\Pi_{\mathcal{B}}$ can be bounded by some finite constants $l(k)$ : + +$$ +\mathcal {B} \left(\rho_ {k}, \mu , \mathcal {F}, \pi\right) \leq l (k) \tag {40} +$$ + +where, $\rho_0$ is the initial state-action distribution and $\mu$ is the distribution of training data. $\rho_{k} = \rho_{0}P^{\pi_{1}}P^{\pi_{2}}\ldots P^{\pi_{k}},\forall \pi_{1},\pi_{2},\ldots ,\pi_{k}\in \Pi_{\mathcal{B}}$ and $P^{\pi_i}$ is the transition operator on states induced by $\pi_{i}$ , i.e., $P^{\pi_i}(s',a'|s,a) = \mathcal{P}(s'|s,a)\pi_i(a'|s')$ . + +We denote the constrained Bellman operator induced by $\Pi_{\mathcal{B}}$ as $\mathcal{T}^{\Pi_{\mathcal{B}}}$ , and $\mathcal{T}^{\Pi_{\mathcal{B}}}Q(s,a) := r(s,a) + \max_{\pi \in \Pi_{\mathcal{B}}} \gamma \mathbb{P}^{\pi}[Q(s',a')]$ . $\mathcal{T}^{\Pi_{\mathcal{B}}}$ can be seen as a operator in a redefined MDP and hence is a contraction mapping and exists a fixed point. We denote $Q^{\Pi_{\mathcal{B}}}$ as the fixed point of $\mathcal{T}^{\Pi_{\mathcal{B}}}$ , i.e., $Q^{\Pi_{\mathcal{B}}} = \mathcal{T}^{\Pi_{\mathcal{B}}}Q^{\Pi_{\mathcal{B}}}$ . + +The Bellman optimal operator $\mathcal{T}$ is + +$$ +\mathcal {T} Q (s, a) := r (s, a) + \max _ {\pi} \gamma \mathbb {P} ^ {\pi} [ Q \left(s ^ {\prime}, a ^ {\prime}\right) ] \tag {41} +$$ + +$\mathcal{T}$ is also a contraction mapping. Its fixed point is the optimal value function $Q^{*}$ and $Q^{*} = \mathcal{T}Q^{*}$ . + +Then, by the triangle inequality, we have: + +$$ +\begin{array}{l} \left\| Q ^ {*} - Q ^ {\pi_ {n}} \right\| _ {\rho_ {0}} = \left\| Q ^ {*} - Q ^ {\Pi_ {\mathcal {B}}} + Q ^ {\Pi_ {\mathcal {B}}} - Q ^ {\pi_ {n}} \right\| _ {\rho_ {0}} \\ \leq \underbrace {\left\| Q ^ {*} - Q ^ {\Pi_ {\mathcal {B}}} \right\| _ {\rho_ {0}}} _ {L _ {1}} + \underbrace {\left\| Q ^ {\Pi_ {\mathcal {B}}} - Q ^ {\pi_ {n}} \right\| _ {\rho_ {0}}} _ {L _ {2}} \tag {42} \\ \end{array} +$$ + +where $Q^{\pi_n}$ is the true $Q$ value of policy $\pi_{n}$ . $\pi_{n}$ is the greedy policy w.r.t. to $Q_{n}$ in the Bellman-consistent constrained policy set $\Pi_{\mathcal{B}}$ , i.e., $\pi_{n} = \sup_{\pi \in \Pi_{\mathcal{B}}}\mathbb{E}_{a\sim \pi (\cdot |s)}[Q_{n}(s,a)]$ . $Q_{n}$ is the $Q$ function after $n$ -th value iteration under the constrained Bellman operator $\mathcal{T}^{\Pi_{\mathcal{B}}}$ . + +For $L_{1}$ part in Eq. (42), we first focus on the infinity norm. + +$$ +\begin{array}{l} \left\| Q ^ {*} - Q ^ {\Pi_ {\mathcal {B}}} \right\| _ {\infty} = \left\| \mathcal {T} Q ^ {*} - \mathcal {T} ^ {\Pi_ {\mathcal {B}}} Q ^ {\Pi_ {\mathcal {B}}} \right\| _ {\infty} \\ \leq \left\| T Q ^ {*} - T ^ {\Pi_ {S}} Q ^ {\Pi_ {S}} \right\| _ {\infty} + \left\| T ^ {\Pi_ {S}} Q ^ {\Pi_ {S}} - T ^ {\Pi_ {S}} Q ^ {*} \right\| _ {\infty} \\ \leq \left\| \mathcal {T} Q ^ {*} - \mathcal {T} ^ {\Pi_ {B}} Q ^ {\Pi_ {B}} \right\| _ {\infty} + \gamma \left\| Q ^ {*} - Q ^ {\Pi_ {B}} \right\| _ {\infty} \quad \left(\mathcal {T} ^ {\Pi_ {B}} \text {i s} \gamma - \text {c o n t r a c t i o n}\right) \tag {43} \\ = \alpha (\Pi_ {\mathcal {B}}) + \gamma \| Q ^ {*} - Q ^ {\Pi_ {\mathcal {B}}} \| _ {\infty} \\ \end{array} +$$ + +where $\alpha (\Pi_{\mathcal{B}})\coloneqq \| \mathcal{T}Q^{*} - \mathcal{T}^{\Pi_{\mathcal{B}}}Q^{\Pi_{\mathcal{B}}}\|_{\infty}$ is the suboptimality constant. Then, we get $\| Q^{*} - Q^{\Pi_{\mathcal{B}}}\|_{\infty}\leq$ $\frac{\alpha(\Pi_{\mathcal{B}})}{1 - \gamma}$ and $L_{1}\leq \| Q^{*} - Q^{\Pi_{\mathcal{B}}}\|_{\infty}\leq \frac{\alpha(\Pi_{\mathcal{B}})}{1 - \gamma}$ + +For $L_{2}$ , we introduce Lemma 5, which upper bounds $\| Q^{\Pi_{\mathcal{B}}} - Q^{\pi_n} \|_{2,\rho_0}^2$ . The proof of Lemma 5 can be get by directly replacing $Q^{*}$ with $Q^{\Pi_{\mathcal{B}}}$ in the Appendix F.3. In (Le et al., 2019), because $Q^{\Pi_{\mathcal{B}}}$ is the optimal value function under the modified MDP induced by $\mathcal{T}^{\Pi_{\mathcal{B}}}$ . + +Lemma 5. (Upper bound of error propagation). $\| Q^{\Pi_{\mathcal{B}}} - Q^{\pi_n}\|_{2,\rho_0}^2$ can be upper bounded as + +$$ +\left\| Q ^ {\Pi_ {\mathcal {B}}} - Q ^ {\pi_ {n}} \right\| _ {2, \rho_ {0}} ^ {2} \leq \left[ \frac {2 \gamma (1 - \gamma^ {n + 1})}{(1 - \gamma) ^ {2}} \right] ^ {2} \int_ {\mathcal {S} \times \mathcal {A}} \rho_ {0} (d s, d a) \left[ \sum_ {k = 0} ^ {n - 1} \alpha_ {k} A _ {k} \epsilon_ {k} ^ {2} + \alpha_ {n} A _ {n} \left(Q ^ {\Pi_ {\mathcal {B}}} - Q _ {0}\right) ^ {2} \right] (s, a) \tag {44} +$$ + +where + +$$ +\epsilon_ {k} = Q _ {k + 1} - \mathcal {T} ^ {\Pi_ {B}} Q _ {k} \tag {45} +$$ + +$$ +\alpha_ {k} = \frac {(1 - \gamma) \gamma^ {n - k - 1}}{1 - \gamma^ {n + 1}} \text {f o r} k < n \tag {46} +$$ + +$$ +\alpha_ {n} = \frac {(1 - \gamma) \gamma^ {n}}{1 - \gamma^ {n + 1}} +$$ + +$$ +A _ {k} = \frac {1 - \gamma}{2} \sum_ {m \geq 0} \gamma^ {m} \left(P ^ {\pi_ {n}}\right) ^ {m} \left[ \left(P ^ {\pi^ {\Pi_ {\mathcal {B}}}}\right) ^ {n - k} + P ^ {\pi_ {n}} P ^ {\pi_ {n - 1}} \dots P ^ {\pi_ {k + 1}} \right] \quad \text {f o r} k < n \tag {47} +$$ + +$$ +A _ {n} = \frac {1 - \gamma}{2} \sum_ {m \geq 0} \gamma^ {m} (P ^ {\pi_ {n}}) ^ {m} \left[ (P ^ {\pi^ {\Pi_ {\mathcal {B}}}}) ^ {n + 1} + P ^ {\pi_ {n}} P ^ {\pi_ {n - 1}} \dots P ^ {\pi_ {0}} \right] +$$ + +$Q_{0}$ is the $Q$ function after initialization. Note that $\lim_{n\to \infty}\left[\alpha_nA_n(Q^{\Pi_{\mathcal{B}}} - Q_0)^2\right] = 0$ , we leave out this term for analysis simplicity. In addition, each $A_{k}$ is a probability kernel that combine $P^{\pi_i}$ and $P^{\pi_{\mathcal{B}}^{\Pi}}$ (the transition operator on states induced by the constrained optimal policy $\pi^{\Pi_{\mathcal{B}}}\in \Pi_{\mathcal{B}}$ ) and $\sum_{k}a_{k} = 1$ . + +The key part in Eq. (44) is $\int_{\mathcal{S} \times \mathcal{A}} \rho_0 A_k \epsilon_k^2$ and we expand this term as the following shows. + +$$ +\begin{array}{l} \int_ {\mathcal {S} \times \mathcal {A}} \rho_ {0} A _ {k} \epsilon_ {k} ^ {2} = \int_ {\mathcal {S} \times \mathcal {A}} \frac {1 - \gamma}{2} \rho_ {0} \sum_ {m \geq 0} \gamma^ {m} (P ^ {\pi_ {n}}) ^ {m} \left[ (P ^ {\pi^ {\Pi_ {\mathcal {B}}}}) ^ {n - k} + P ^ {\pi_ {n}} P ^ {\pi_ {n - 1}}... P ^ {\pi_ {k + 1}} \right] \epsilon_ {k} ^ {2} \\ = \frac {1 - \gamma}{2} \sum_ {m \geq 0} \gamma^ {m} \int_ {\mathcal {S} \times \mathcal {A}} \left[ \left(P ^ {\pi_ {n}}\right) ^ {m} \left(P ^ {\pi^ {\Pi_ {\mathcal {B}}}}\right) ^ {n - k} + \left(P ^ {\pi_ {n}}\right) ^ {m} P ^ {\pi_ {n}} P ^ {\pi_ {n - 1}} \dots P ^ {\pi_ {k + 1}} \right] \rho_ {0} \epsilon_ {k} ^ {2} \tag {48} \\ \end{array} +$$ + +As Eq. (40) shows, the policy set induced by DOGE is a Bellman-consistent constrained policy set $\Pi_{\mathcal{B}}$ defined in Definition 2. Therefore, let $\rho_0$ be the initial state-action distribution and $\mu$ denote the distribution of training data. For any policy $\pi_1,\pi_2,\dots,\pi_k\in \Pi_{\mathcal{B}}$ , the distribution after $k$ -th Bellman-consistent iteration is $\rho_{k} = \rho_{0}P^{\pi_{1}}P^{\pi_{2}}\ldots P^{\pi_{k}}$ , there exists some finite constants $l(k)$ , that $\mathcal{B}(\rho_k,\mu ,\mathcal{F},\pi)\leq l(k)$ holds. Then we can get the following inequalities. + +$$ +\| Q - \mathcal {T} ^ {\pi} Q \| _ {2, \rho_ {k}} ^ {2} \leq \| Q - \mathcal {T} ^ {\pi} Q \| _ {2, \mu} ^ {2} l (k) +$$ + +$$ +\int_ {\mathcal {S} \times \mathcal {A}} \rho_ {k} \epsilon^ {2} \leq \int_ {\mathcal {S} \times \mathcal {A}} \mu \epsilon^ {2} l (k) \quad (\epsilon = Q - \mathcal {T} ^ {\pi} Q) \tag {49} +$$ + +As a result, by applying the result of Eq. (49) to Eq. (48), we can get + +$$ +\int_ {\mathcal {S} \times \mathcal {A}} \rho_ {0} A _ {k} \epsilon_ {k} ^ {2} \leq \int_ {\mathcal {S} \times \mathcal {A}} (1 - \gamma) \sum_ {m \geq 0} \gamma^ {m} \epsilon_ {k} ^ {2} \mu l (m + n - k) \tag {50} +$$ + +Plugs Eq. (50) into Eq. (44) and leaves out $\left[\alpha_{n}A_{n}(Q^{\Pi_{\mathcal{B}}} - Q_{0})^{2}\right]$ in Eq. (44), we get + +$$ +\begin{array}{l} \lim _ {n \to \infty} L _ {2} ^ {2} \leq \lim _ {n \to \infty} \left[ \frac {2 \gamma (1 - \gamma^ {n + 1})}{(1 - \gamma) ^ {2}} \right] ^ {2} \left[ \sum_ {k = 0} ^ {n - 1} (1 - \gamma) \sum_ {m \geq 0} \gamma^ {m} l (m + n - k) \alpha_ {k} \| \epsilon_ {k} \| _ {2, \mu} ^ {2} \right] \\ = \lim _ {n \to \infty} \left[ \frac {2 \gamma (1 - \gamma^ {n + 1})}{(1 - \gamma) ^ {2}} \right] ^ {2} \left[ \frac {1}{1 - \gamma^ {n + 1}} \sum_ {k = 0} ^ {n - 1} (1 - \gamma) ^ {2} \sum_ {m \geq 0} \gamma^ {m + n - k - 1} l (m + n - k) \| \epsilon_ {k} \| _ {2, \mu} ^ {2} \right] \\ \leq \lim _ {n \rightarrow \infty} \left[ \frac {2 \gamma (1 - \gamma^ {n + 1})}{(1 - \gamma) ^ {2}} \right] ^ {2} \left[ \frac {1}{1 - \gamma^ {n + 1}} L (\Pi_ {\mathcal {B}}) ^ {2} \sup _ {k \geq 0} \| \epsilon_ {k} \| _ {2, \mu} ^ {2} \right] \\ = \left[ \frac {2 \gamma}{(1 - \gamma) ^ {2}} \right] ^ {2} L \left(\Pi_ {\mathcal {B}}\right) ^ {2} \sup _ {k \geq 0} \| \epsilon_ {k} \| _ {2, \mu} ^ {2} \tag {51} \\ \end{array} +$$ + +where, $L(\Pi_{\mathcal{B}}) = \sqrt{(1 - \gamma)^2\sum_{k=1}^{\infty}k\gamma^{k-1}l(k)}$ . Then, we can bound $L_2$ by + +$$ +\lim _ {n \rightarrow \infty} L _ {2} \leq \frac {2 \gamma}{(1 - \gamma) ^ {2}} L \left(\Pi_ {\mathcal {B}}\right) \sup _ {k \geq 0} \| \epsilon_ {k} \| _ {\mu} \tag {52} +$$ + +With the upper bound of $L_{1}$ and $\lim_{n\to \infty}L_2$ , we can complete the proof by adding these two terms together. + +$$ +\lim _ {n \rightarrow \infty} \| Q ^ {*} - Q ^ {\pi_ {n}} \| _ {\rho_ {0}} \leq \frac {2 \gamma}{(1 - \gamma) ^ {2}} \left[ L \left(\Pi_ {\mathcal {B}}\right) \sup _ {k \geq 0} \| \epsilon_ {k} \| _ {\mu} + \frac {1 - \gamma}{2 \gamma} \alpha \left(\Pi_ {\mathcal {B}}\right)\right] \tag {53} +$$ + +# E IMPLEMENTATION DETAILS + +DOGE can build on top of standard online actor-critic algorithms such as TD3(Fujimoto et al., 2018) and SAC(Haarnoja et al., 2018). We choose TD3 as our base because of its simplicity compared to other methods. We build DOGE on top of TD3 by simply plugging the state-conditioned distance function as a policy regularization term during policy training process. Then, the learning objective of policy $\pi$ in Eq. (7) can be formulated as: + +$$ +\pi = \arg \max _ {\pi} \min _ {\lambda} \mathbb {E} _ {s \sim \mathcal {D}} [ \beta Q (s, \pi (s)) - \lambda (g (s, \pi (s)) - G) ] \quad \text {s . t .} \lambda \geq 0 \tag {54} +$$ + +The $Q$ function, policy and state-conditioned distance function networks are represented by 3 layers ReLU activated MLPs with 256 units for each hidden layer and are optimized by Adam optimizer. In addition, we normalize each dimension of state to a standard normal distribution for Mujoco tasks. The hyperparameters of DOGE are listed in Table 2. + +Table 2: Hyperparameters of DOGE + +
HyperparametersValue
Shared parametersOptimizerAdam
StandardNormalize stateTrue for Mujoco
False for AntMaze
Batch size256
Layers3
Hidden dim256
TD3Actor learning rate3 × 10-4
Critic learning rate3 × 10-4for Mujoco
1 × 10-3for AntMaze
Discount factor γ0.99 for Mujoco
0.995 for AntMaze
Number of iterations106
Target update rate τ0.005
Policy noise0.2
Policy noise clipping0.5
Policy update frequency2
State-Conditioned Distance FunctionLearning rate1 × 10-3for Mujoco
1 × 10-4for AntMaze
Number of noise actions N20
Number of iterations Ng105for Mujoco
106for AntMaze
DOGEα{7.5, 17.5} Mujoco
{5, 10, 70} AntMaze
Lagrangian multiplier λclipped to [1, 100]
λ learning rate3e-4
+ +# E.1 TD3'S IMPLEMENTATION DETAILS + +For the choice of the Critic learning rate and discount factor $\gamma$ , we find that for AntMaze tasks, a high Critic learning rate can improve the stability of value function during training process. This may be because the AntMaze tasks require the value function to dynamic programs more times to "stitch" suboptimal trajectories than Mujoco tasks. Therefore, we choose $1 \times 10^{-3}$ and 0.995 as the Critic learning rate and discount factor $\gamma$ for AntMaze tasks, respectively. The other implementations such as policy noise scale and policy noise clipping are the same with author's implementation (Fujimoto et al., 2018). + +# E.2 STATE-CONDITIONED DISTANCE FUNCTION'S IMPLEMENTATION DETAILS + +We sample $N = 20$ noise actions from a uniform distribution that covers the full action space to approximate the estimation value in Eq. (4). We find $N = 20$ can balance the computation complexity and estimation accuracy and is the same sample numbers with CQL (Kumar et al., 2020b). The ablation of $N$ can be found in Fig. 15. The practical training objective of the state-conditioned distance function is as follows: + +$$ +\min _ {g} \mathbb {E} _ {(s, a) \in \mathcal {D}, \hat {a} _ {i} \sim U n i f (\mathcal {A})} \left[ \frac {1}{N} \sum_ {i = 1} ^ {N} \left[ \| a - \hat {a} _ {i} \| - g (s, \hat {a} _ {i}) \right] ^ {2} \right] \tag {55} +$$ + +We find that a wider sample range than the max action space $[-a_{\mathrm{max}}, a_{\mathrm{max}}]$ is helpful to characterize the geometry of the full offline dataset. This is because some actions in the offline dataset lie at the boundary of the action space, which can only be sampled with little probability when sampling from a narrow distribution. At this time, the noise actions may not cover the geometry information near the boundary. Therefore, we sample noise actions from a uniform distribution that is 3 times wider than the max action space, i.e., $\hat{a} \sim \text{Unif}[-3a_{\mathrm{max}}, 3a_{\mathrm{max}}]$ . For the learning rate, we find that a high learning rate enables a stable training process in Mujoco tasks. Therefore, we choose $1 \times 10^{-3}$ and $1 \times 10^{-4}$ as the distance function learning rate for Mujoco and AntMaze, respectively. We also observe that for Mujoco tasks, $10^{5}$ iterations can already produce a relatively good state-conditioned distance function, and training more times won't hurt the final results. To reduce computation, we only train the state-conditioned distance function for $10^{5}$ steps for Mujoco tasks. + +# E.3 HYPERPARAMETERS TUNING OF DOGE + +The scale of $\alpha$ determines the strength of policy constraint. We tune $\alpha$ to balance the trade-off between policy constraint and policy improvement. To be mentioned, $\alpha$ is tuned within only 5 candidates for 20 tasks (17.5 for hopper-m, hopper-m-r and all Mujoco random datasets; 7.5 for other Mujoco datasets; 5 for antmaze-u; 10 for antmaze-u-d; 70 for other AntMaze tasks). This is acceptable in offline policy tuning following (Kumar et al., 2019; Brandfonbrener et al., 2021). To ensure numerical stability, we clip the Lagrangian multiplier $\lambda$ to [1, 100]. We also find a large initial $\lambda$ enables stable training for Mujoco tasks but slows down AntMaze training. Therefore, the initial value of Lagrangian multiplier $\lambda$ is 5 for Mujoco and 1 for AntMaze tasks, respectively. + +# E.4 PSEUDOCODE OF DOGE + +The pseudocode of DOGE is listed in Algorithm 1. Changes we make based on TD3 (Fujimoto et al., 2018) are marked in red. The only modification is the training process of the additional state-conditioned distance function and the constrained actor update. We can perform 1M training steps on one GTX 3080Ti GPU in less than $50\mathrm{min}$ for Mujoco tasks and 1h $40\mathrm{min}$ for AntMaze tasks. + +Algorithm 1 Our implementation for DOGE +Require: Dataset $\mathcal{D}$ . State-conditioned distance network $g_{\psi}$ . Policy network $\pi_{\phi}$ and target policy network $\pi_{\phi^{\prime}}$ with $\phi^{\prime}\gets \phi$ . Value network $Q_{\theta_i},i = 1,2$ and target value network $Q_{\theta_i'}$ $i = 1,2$ with $\theta_i^\prime \leftarrow \theta_i$ . State-conditioned distance network training steps $N_{g}$ . Policy update frequency m. +1: for $t = 0,1,\dots ,M$ do +2: Sample mini-batch transitions $\{(s_i,a_i,r_i,s_i')\} \sim \mathcal{D}$ +3: if $t < N_g$ then +4: State-Conditioned Distance Function Update: Update $\psi$ as Eq. (55) shows. +5: end if +6: Critic Update: Update $\theta_{i}$ using policy evaluation method in TD3. +7: if $t$ mod $m = 0$ then +8: Constrained Actor Update: Update $\phi ,\lambda$ via Eq. (54). +9: Update target networks: $\theta_i^\prime \gets \tau \theta_i + (1 - \tau)\theta_i^\prime$ $\phi^{\prime}\gets \tau \phi +(1 - \tau)\phi$ +10: end if +11: end for + +# E.5 EXPERIMENT SETUP FOR THE IMPACT OF DATA GEOMETRY ON DEEP $Q$ FUNCTIONS + +We consider an one-dimensional random walk task with a fixed-horizon (50 steps for each episode), where agents at each step can move in the range of $[-1, +1]$ and the state space is a straight range from $[-10, 10]$ . The destination is located at $s = 10$ . The closer the distance to the destination, the larger the reward that the agent can get. The discount factor $\gamma = 0.9$ . The reward function is defined as follows: + +$$ +r = \frac {4 0 0 - (s ^ {\prime} - 1 0) ^ {2}}{4 0 0} \tag {56} +$$ + +We generate offline datasets with different geometry and train the agent based on these datasets. Each synthetic dataset consists of 200 transition steps. We get the approximated $Q$ value $\hat{Q}$ by training TD3 for $1e + 4$ steps each dataset. The learning rate of Actor and Critic networks are both $10^{-3}$ . The other implementation details are the same as the implementation of original TD3 (Fujimoto et al., 2018). The true $Q$ function can be get by Monte-Carlo estimation. We find that the near-destination states hold higher approximation error than that far away from the destination due to the scale of true $Q$ value near the destination is large. To alleviate the impact of $Q$ value scale on the approximation error, we define the relative approximation error as follows: + +$$ +\hat {\epsilon} (s, a) = \epsilon (s, a) - \min _ {a} \epsilon (s, a) \tag {57} +$$ + +where, $\epsilon(s,a) = \hat{Q}(s,a) - Q(s,a)$ . The relative error in the above definition eliminates the effect of different states on the approximation error and can capture the over-estimation error that we care about. We plot the relative approximation error of deep $Q$ functions with different random seeds and data geometry in Fig. 13. + +# F ADDITIONAL EXPERIMENT RESULTS + +# F.1 COMPARISON OF GENERALIZATION ABILITY + +In the well known AntMaze task in D4RL benchmark (Fu et al., 2020), where an ant needs to navigate from the start to the destination in a large maze. The trajectories with coordinates at $x \times y \in [4,13] \times [7,9] \cup [11.5,20.5] \times [11,13]$ in AntMaze medium tasks and $x \times y \in [10.5,21] \times [7,9] \cup [19,29.5] \times [15,17]$ in AntMaze large tasks are clipped, as Fig. 8 shows. + +![](images/69fe1100f4a4cbdf16d46648b59017a703d00f054610fcfe5e174067a6e3fb48.jpg) +(a) Modified Medium AntMaze + +![](images/3863b685c8b28b2a7e4ffa25badcf673b66643b41bc4d5bd7adde3e41ba70453.jpg) +(b) Modified Large AntMaze +Figure 8: The trajectories in the offline dataset are visualized as blue. Data transitions of two small areas on the critical pathways to the destination have been removed (red box). + +These clipped data counts only about one-tenth of the original dataset and lies in the close proximity of the original trajectories. Under these modified datasets, simply relaying on "stitching" data transitions is not enough to solve the navigation problems. We evaluate representative policy constraint method (TD3+BC (Fujimoto & Gu, 2021)), value regularization method (CQL (Kumar et al., 2020b)), in-sample learning method (IQL (Kostrikov et al., 2021b)) and DOGE (our method) on these modified datasets. The evaluation results before and after clipping the trajectories are listed in Table 3. The + +learning curves for the modified AntMaze medium and AntMaze large tasks are listed in Fig. 9 and Fig. 4. + +Observe in Table 3 that existing offline RL methods fail miserably and suffer from severe performance drops. By contrast, DOGE maintains competitive performance after the modification of the dataset and shows good generalization ability on unknown areas. + +Apart from above experiments, we also evaluate DOGE when removing only one area: $[10.5, 21] \times [7, 9]$ , $[10.5, 21] \times [7, 9]$ for AntMaze-large datasets and $[4, 13] \times [7, 9]$ , $[4, 13] \times [7, 9]$ for AntMaze-medium datasets. The final results can be seen in Table 4. + +Table 3: The performance drop after removing the data at the only way to destination. + +
Dataset typeTD3+BCCQLIQLDOGE(ours)
antmaze-m-p-v2full data065.2±4.870.4±5.380.6±6.5
miss data010.7±18.410.2±2.233.2±27.3
Performance drop ↓-84%86%59%
antmaze-m-d-v2full data054.0±11.774.6±3.277.6±6.1
miss data08.5±5.37.6±5.740.2±32.9
Performance drop ↓-84%90%48%
antmaze-l-p-v2full data018.8±15.343.5±4.548.2±8.1
miss data001.0±0.722.4±15.9
Performance drop ↓-100%98%54%
antmaze-l-d-v2full data031.6±9.545.6±7.636.4±9.1
miss data005.2±3.114.6±11.1
Performance drop ↓-100%89%60%
+ +![](images/2d14e1975493eec5b100880be7e7a0707be39792d61534412f50576c0d0e02f8.jpg) +Policy constraint + +![](images/fa0b632299ae9f8f3f4e4b912608dc3f50c1c492b4d50d180f4ea2388773158e.jpg) +Value regularization + +![](images/fe1bb5b8c998182507b7b084cb1d392a70ecc1ae990d388b900c8a987060c1fc.jpg) +In-sample learning + +![](images/5ed288c3b1348c5d6749ed5099c8ddf40e44722bece377c9730525858a93c3d7.jpg) +DOGE (Ours) + +![](images/1eab3984971d3da5a8ed4e393bc99b6de8e7748087575ab124b6a570e2f381bf.jpg) +Figure 9: Evaluation on TD3+BC(Fujimoto & Gu, 2021), CQL(Kumar et al., 2020b), IQL(Kostrikov et al., 2021b), and DOGE (ours) before and after removing the data shown in Fig.8a for AntMaze medium tasks. + +![](images/c11e43bdf8b2f8429a39dc237c7db5bd9305c216d7d67fb5f65586e1daba26c4.jpg) + +![](images/0b4be628e8803f3f87d2c5370b119c58ff5905d1cc2b7fa7f0b349850115b421.jpg) + +![](images/9a55cec306a559695fce48e3ee1e7ba2b8af7ba35beea46ece1cbbe66811c522.jpg) + +Table 4: Ablation for DOGE generalization with different removal areas. + +
DatasetFull datasetOne removalTwo removal
antmaze-m-p-v280.6±6.562.3±7.533.2±27.3
antmaze-m-d-v277.6±6.141.3±42.840.2±32.9
antmaze-l-p-v248.2±8.126.4±19.422.4±15.9
antmaze-l-d-v236.4±9.112.3±4.214.6±11.1
Total score242.8±29.8142.3±73.9110.4±87.2
+ +# F.2 ADDITIONAL COMPARISON WITH TD3+BC + +In this section, we further demonstrate the superiority of DOGE over our most related practical work TD3+BC (Fujimoto & Gu, 2021). One can find that the biggest difference between DOGE and TD3+BC lies in the policy constraint used for policy optimization: + +- TD3+BC: constrains the policy to minimize the MSE BC loss. +- DOGE: constrains the policy to minimize the learned state-conditioned distance function $g(s, a)$ . + +As discussed in Section 3.1, the learned distance function $g(s,a)$ can capture the global geometric information of the offline dataset, while the MSE BC loss can only provide local sample-to-sample regularization, which may be noisy, especially in datasets that contain low-quality samples. Taking Figure 10 as an illustration, under strict BC constraint, policy learning on noisy low-quality samples may provide contradicting learning signals to near-optimal samples, which can cause inferior policy performance and unstable training process. By contrast, the state-conditioned distance function $g(s,a)$ in DOGE is trained on the whole dataset and hence brings global geometric information, which is far more informative and stable as compared with the MSE BC loss. + +![](images/61c367d79e1e14f4df27054cd0558caf13e2c4e8cd4211c24596bc88cae3ca36.jpg) +(a) TD3+BC + +![](images/89ba3a7afafad8e81c7d2f1acc80924e841073b2a43ea614ecb116eab045c914.jpg) +(b) DOGE + +Figure 10: Illustrations of the differences between (a) the MSE BC constraint of TD3+BC and (b) the state-conditioned distance function constraint of DOGE. In (a), the MSE BC constraint in TD3+BC blindly enforces the imitation behavior on any data samples, which may lead to an inferior policy in the presence of noisy low-quality samples. In (b), the state-conditioned distance function $g(s,a)$ can provide more informative global dataset geometry information to guide the stable learning of the policy. +![](images/2717ce8236830b1fb75d049dd33db86dcd2847a1921796da5fc1663e20ed711a.jpg) +Low-quality Samples +MSE BC Constraints of Near-optimal Samples + +![](images/48382d6e18730127b150a123b45009acfa033e011c6d0fbb260efbb60bca7044.jpg) +Near-optimal Samples +Policy Outputs +g(s,a) Distance Function Values + +To better illustrate the superiority of DOGE over TD3+BC, we add extra comparative experiments with TD3+BC on a new set of mixed-quality datasets. In halfcheetah-random dataset, we add different proportions (1% to 20%) of the near-optimal halfcheetah-medium-expert dataset to form new mixed datasets and evaluate how TD3+BC and DOGE perform. See Figure 11 for detailed results. + +Figure 11 shows that DOGE enjoys more performance gains when the random dataset involves near-optimal data, while TD3+BC is heavily influenced by the local information from the larger proportion of the low-quality random data. Moreover, TD3+BC suffers from severe oscillation and training instability, while DOGE enjoys a stable training process due to the use of the more informative state-conditioned distance constraint that captures the overall dataset geometry. + +![](images/7fe3c3376a72cd45956ebc3962e3c7f4b2006cf1851b4714cc200ae968a49d28.jpg) +Figure 11: Comparisons between DOGE and TD3+BC on mixed datasets with different proportions of halfcheetah-medium-expert dataset added into halfcheetah-random dataset. Ratio- $1\%$ means $1\%$ medium-expert dataset is added into the original halfcheetah-random dataset. TD3+BC suffers severe oscillation and training instability, while DOGE enjoys stable training processes and substantial performance gains. + +![](images/8e1aa2029da2ee5851ccf604d7c757833b62ad9cfe7ac8a738b75692476d4308.jpg) + +![](images/6f5b108ba7bbffb91f7a732c63c672976819249a7e3bd97ddc10ae2d5efabacd.jpg) + +![](images/5036814a0f6a941772b4948a362362c21b8b0999b661a847998aa7a19d7444a7.jpg) + +![](images/943b5c53751c20a7c1b8b0b7945e3ab481eadb25bde6ef4fcaf32c1d33871f8a.jpg) + +# F.3 COMPARISON WITH UNCERTAINTY-BASED METHODS + +We also compare DOGE with SOTA uncertainty-based offline RL approaches, including EDAC (An et al., 2021) and PBRL (Bai et al., 2021) are more complex D4RL AntMaze tasks. The final results are presented in Table 5. Table 5 shows that the SOTA uncertainty-based methods are unable to provide reasonable performance on the difficult Antmaze tasks, despite that they can achieve good performance on simpler MuJoCo tasks. A similar finding is also reported in a recent offline RL study (Anonymous, 2023). + +In practical implementation of EDAC and PBRL, to obtain relatively accurate uncertainty measures and achieve reasonable performance, these methods typically need dozens of ensemble Q-networks, which can be quite costly and inefficient. Moreover, heavy hyperparameter tuning is also required for them to obtain the best performance. In contrast, our method quantifies the generalization ability of the Q-function from the perspective of dataset geometry and is trained using a simple regression loss in Eq. (4), which enjoys better training stability and simplicity. + +Table 5: Average normalized scores over 5 seeds on Antmaze tasks + +
DatasetEDACPBRLDOGE(Ours)
antmaze-u-v20097.0±1.8
antmaze-u-p-v20063.5±9.3
antmaze-m-p-v20080.6±6.5
antmaze-m-d-v20077.6±6.1
antmaze-l-p-v20048.2±8.1
antmaze-l-d-v20036.4±9.1
+ +# F.4 ADDITIONAL ANALYSIS ON DISTANCE FUNCTION + +We report the learning curves of the state-conditioned distance function $g(s, a)$ trained on different datasets (including hopper-m-v2, halfcheetah-m-v2, and walker2d-m-v2 in Figure 12. Our proposed state-conditioned distance function is learned through a simple regression task (Eq. (4)), which is very easy to train. Figure 12 shows that it reaches convergence within only 1K training steps on D4RL MuJoCo medium datasets. + +We also change the network configurations (i.e., number of hidden layers and hidden units) of the state-conditioned distance function $g(s, a)$ to investigate how the expressivity of $g$ influences the performance of the policy. Table 6 shows that DOGE achieves similar performance across different $g$ network configurations, indicating that DOGE is robust to model complexity and expressivity of the state-conditioned distance function. + +![](images/d342ac9f433f1f2e71f4bb00a94d05d5146b9e6cce31f52cc81029326576f62f.jpg) +Figure 12: Learning curves of the state-conditioned distance function $g(s, a)$ + +![](images/d9d446b0951fd812c5397d4784c7312867b933e2965029a5af41f85f20588e03.jpg) + +![](images/bf89404aced332aa669a577d33ebb36d479ddafa1a324f673d5947b607c471d3.jpg) + +Table 6: Normalized scores of DOGE trained on distance functions with different network configurations. [128, 128] means $g$ network has 2 hidden layers with 128 units. [256, 256, 256] means 3 hidden layers with 256 units. + +
Dataset[128, 128][256, 256][256, 256, 256]
hopper-m99.4101.498.6
halfcheetah-m47.446.945.3
walker2d-m85.386.486.8
+ +# F.5 ADDITIONAL EXPERIMENTS OF THE IMPACT OF DATA GEOMETRY ON DEEP $Q$ FUNCTIONS + +We run several experiments with different random seeds (see Figure 13). Although the approximation error pattern of different random seeds is not the same, they all perform in the same manner that deep $Q$ functions produce relatively low approximation error inside the convex hull of training data. We refer to this phenomenon as deep $Q$ functions interpolate well but struggle to extrapolate. + +![](images/3212392b8892505c92bc3cbd780bd02a3d760bd5bf9df9a82b3eb76e282bd1d8.jpg) + +![](images/b8ba27213572322fedc4476000c6c131036e4ea81e8674f9c1afed6a1ea24a70.jpg) + +![](images/4412252625722646c5d2b50bbb23d8842d7c850bca6bad501c1717b9439c85bb.jpg) + +![](images/352fc6c28bd5149b30ea4c07eab417b1bc5a8fb7dd0ab8f20216ccb1ee7247b3.jpg) + +![](images/d3e20f1b76060d0750c4bba328912261df74c45eb047a120a298f6d2665c4e3b.jpg) + +![](images/5e473fecd1c2cc1a3d7e05ee10cb0add89eafccf7315a8e0b0b85b6fcc0f24a0.jpg) + +![](images/d09492e5ef76cff94f6222beec87801b87c644ba724ef8751639a388f185c03e.jpg) +Figure 13: The figures above depict the effect of different data geometries on the final deep $Q$ functions approximation error. The training data are marked as white dots. + +![](images/620451d0408b6163526836a07de10d2121261d8e507efef5501bf6b7831d16c3.jpg) + +![](images/7f0b86b44bcac4cebf02f1e17b338b5a7be787041c4388c535dcbad3ba78fff8.jpg) + +# G ABLATIONS + +We conduct ablation studies on the effect of $\alpha$ in $\beta = \frac{\alpha}{\frac{1}{n}\sum_{i=1}^{n}|Q(s_i,a_i)|}$ (see Figure 14), the non-parametric threshold $G$ in Eq. (6) (see Figure 16) and the non-parametric number of noise actions $N$ to train state-conditioned distance function (see Figure 15) on the performance of the final algorithm. We also conduct ablation studies on the effect of $G$ on the Lagrangian multiplier $\lambda$ (see Figure 17). + +For $\alpha$ , we add or subtract 2.5 to the original value. For $N$ , we choose $N = 10, 20, 30$ to conduct experiments respectively. For $G$ , we choose $30\%$ , $50\%$ , $70\%$ , $90\%$ and $100\%$ upper quantile of the distance value in mini-batch samples and the results can be found in Table 7. + +Table 7: Ablations on G with different quantile. + +
DatasetG = 30%G = 50%G = 70%G = 90%G = 100%
hopper-r-v219.8±0.321.1±12.615.5±13.517.6±12.216.4±12.4
halfcheetah-r-v219.4±0.617.8±1.217.8±0.717.7±1.017.7±0.8
walker2d-r-v22.6±3.90.9±2.42.2±2.61.8±3.32.2±3.2
hopper-m-v244.6±5.798.6±2.199.4±0.491.5±9.932.9±54.3
halfcheetah-m-v241.3±1.245.3±0.646.0±0.146.0±0.846.1±0.5
walker2d-m-v283.7±7.586.8±0.887.3±1.669.9±28.984.2±1.0
hopper-m-r-v251.5±11.276.2±17.779.6±36.978.4±27.665.7±37.2
halfcheetah-m-r-v25.9±5.742.8±0.643.2±0.142.2±0.842.0±0.6
walker2d-m-r-v228.3±14.387.3±2.387.9±2.477.8±21.678.6±24.1
hopper-m-e-v261.7±10.4102.7±5.282.8±5.888.9±17.770.0±48.4
halfcheetah-m-e-v246.9±5.278.7±8.475.1±15.473.5±13.669.9±8.7
walker2d-m-e-v2110.5±0.7110.4±1.5111.1±0.5110.2±22.580.0±54.3
+ +Seen from Table 7 that using different $G$ for different tasks may achieve even better performance. Particularly, for some datasets with diverse data distributions that need to find good data from suboptimal data, a more tolerant quantile (e.g., $G = 70\%$ ) can reasonably extend feasible region and increase the opportunity to find the optimal policy, such as hopper-m-r, halfcheetah-m-r, walker2d-m-r, hopper-m-e, halfcheetah-m-e. However, an overly relaxed quantile (e.g., $G = 90\%$ and $100\%$ ) increases the risk of including problematic OOD actions in policy learning, causing performance drop due to value overestimation and high variance. + +By contrast, an overly restrictive quantile such as $G = 30\%$ can be over-conservative and cause significant constraints violations that impede policy learning, as constraints satisfaction is favored over the max-Q operation in most updates. This can be reflected in the additional results for the Lagrangian multiplier $\lambda$ (see Appendix E.2 for learning curves and Figure 11 for additional ablations), where $\lambda \rightarrow \infty$ for some tasks under $G = 30\%$ . This will cause the suboptimality gap $(\frac{1 - \gamma}{2\gamma}\alpha(\Pi_{\mathcal{D}}))$ in Theorem 3 to dominate the performance bound, leading to inferior policy. + +As hyperparameter tuning in practical offline RL applications without online interaction is very difficult, to reduce the computational load, we set $G = 50\%$ as default in a non-parametric manner, since it consistently achieves good performance, and is neither too conservative nor too aggressive for most tasks. + +Observe in Figure 14 that DOGE maintains the similar performance with the changes of $\alpha$ on most of Mujoco tasks. At the same time, we also observe that the effect of $N$ on the experiment is not obvious. Compared with $N$ and $\alpha$ , we find that $G$ has a more significant effect on the experimental results. Observe in Figure 16 that a small $G$ usually causes the policy set induced by DOGE to be too small to obtain near-optimal policy. By contrast, a large $G$ is not likely to cause excessive error accumulation and hence maintains relatively good performance. + +In addition, the ablation studies show that our method is hyperparameter-robust and maintains good performance with changes in hyperparameters. + +![](images/b497c0689940a9f9426b7cb12c8ffb4ea614502dcd3f02d84d4560c0f7dbc628.jpg) + +![](images/8351bb38fc56653a03e65581991794509be44a76e45bfa6159db42132ca99d2c.jpg) + +![](images/2a26e65bd3e1194986a10158a4a7aa95bcfd4e458ee6c9468195c450d0870cdb.jpg) + +![](images/7a5bd1f031752dbe32eb8381a22037d4ecb9af59fe934b37e6624c8cf6ac171f.jpg) + +![](images/d6ddab1808b5be419a0818bc318d02e5be9727792a56651bfe6fb6c4c70bd5d7.jpg) + +![](images/eaa86913dfe61325f25b4691e42eb46ce85f8776970d04b33599ed4459860b69.jpg) + +![](images/9ec4541ddd94166d6bb8214c4232a40123af9ded76f5cd20ca6e0b44c416af3b.jpg) + +![](images/7bfc9c772203b3c9e1ad705a66c213b6dd9ef30790615625735db3dd02a86b9a.jpg) + +![](images/fe6aa72c48fe7ed2bdfb6ba6e57b07d802cb8c43d45e4920168a2d14b1ac04a2.jpg) +Figure 14: Ablation for $\alpha$ . Error bars indicate min and max over 5 seeds. + +![](images/58d7058bb3e951940f73b53cd6e8345ffd13594d412ee89bfc0e38e288880812.jpg) + +![](images/d8b0ec68d0da49a551e8df76c2fe8a622e9e1a6c98efafbbbf3c70d6e2fae92f.jpg) + +![](images/d860b015b309b6776eba8bd564552ab8f708a3be73624485f8ec71c9f76ec9b1.jpg) + +![](images/1cbc0d576afac3fc3112ce9401bd9388afbb211cc32d286ce4bf7876ee8658a8.jpg) + +![](images/d1beb6a7e62f0a6d9207e577bb4d181d1375a289871167a59dafa665549874ec.jpg) + +![](images/8f127cc4d24c131ed087b453c5806d833a1b1ae004f7a23828861fef0774c6ff.jpg) + +![](images/d2823754ccef8919b862b94b7298b52eeec33031f4fdd12e075c157ce282323a.jpg) + +![](images/8acb23bf4361aab8395a3bda4df853ed734977366b215cde5ab9242d79138697.jpg) + +![](images/0f5c483ba0c4d6eff375c65fe8e84a933f1b2dbcd7fb5e674e1efdcee7b4962f.jpg) + +![](images/eaeeb76c39862eecfec518fdbaae1782fb1e1c0987ce8aed42a924d373d96cbb.jpg) + +![](images/44744c57079d49d80a20600f99478331e06ae50f5c3ef96d4b4e31be7446ade9.jpg) + +![](images/3a0f6ab9a095152a9eaeeca45ceb9568a550aaa8de4e113425f5ba431b9878dc.jpg) +Figure 15: Ablation for $N$ . Error bars indicate min and max over 5 seeds. + +![](images/5605b814f0d0e2a7f83bbcd0c1d030976969d00556e713784947a19096184e2e.jpg) + +![](images/e132fcf9f153c1a9d11093ef758e00520cb6bccc2502ce76677bbd9f275bdca2.jpg) + +![](images/7a4439af88b2668f7831e239a87c90597aa7ac1c7c446437d087a7b832d6ea11.jpg) + +# H LEARNING CURVES + +The learning curves for Mujoco and AntMaze tasks are listed in Fig. 18 and Fig.19. The learned policies are evaluated for 10 episodes and 100 episodes each seed for Mujoco and AntMaze tasks, respectively. For AntMaze tasks, we subtract 1 from rewards for the AntMaze datasets following (Kumar et al., 2020b; Kostrikov et al., 2021b). + +![](images/1117821df210f570b2e664f0450868a4667351ef55d3c52ead27ec675572ca33.jpg) + +![](images/9db482154a813d8bd0b5a284eac4c62a481620938fd356ced920f6d1be4af221.jpg) + +![](images/3802b5bdb291a351f5ce1714eaae0ee7f4f86f8f9ffe76c88af102f8832f51d2.jpg) + +![](images/b31f484605fe53d72a3e3ee21b4d4aa09a7285abc46d2956893d4bd9525ccfd5.jpg) + +![](images/9a97af5faf44a23b5c803e4c54f916eae78f27e9ca6ff09ecf3fe5977f745481.jpg) + +![](images/e0ee746721b0ed9d87e215815f15bb47d8c41d7f7aba2d1702f0a58f07e68809.jpg) + +![](images/b57e91459af9580d4caafe5823dac513ca6f3e3893d474db6647c92a3ebaf3e6.jpg) + +![](images/be21adeab67bd19a9ace1fde4861e0175708c8d611007c33b6578958cfd9209a.jpg) + +![](images/7dd899e830ab870b29b7ae0b3198539b68109a479b607697b922ed2249fcdbaa.jpg) +Figure 16: Ablation for $G$ . Error bars indicate min and max over 5 seeds. + +![](images/74245e800fe04cee8de6c3219852adf7a4dc6d1a3560706ca47f41e71e5cbb5d.jpg) + +![](images/ae96491b9731557ead454f4dafd177b210199887f65c5f489f7fcf46fc143048.jpg) + +![](images/29f4c93235d6c8d0be0f3b30cdec05ee62c441246ecf07dfe8599845f7f35381.jpg) + +![](images/e11f11c11388a51480f01ed44bdd7a5941a63b64daac9d701e33748f08ff5e25.jpg) + +![](images/e3d5b4359c7655bf7b27481c171714cc3c30045cb61713434320abd477da2c5b.jpg) + +![](images/8355d0157f13aedcf0cb2de6c8216dfec88bfbe759678989f7f8aed0c3da0d2a.jpg) + +![](images/6f233afe55693ae2fe42ba1729c80bf145c224c4caa5dfbddb05e52661f78425.jpg) + +![](images/4683367b1ce613a98c64a41d95bc1db19a242abd078cd80308f22a0ab87214ff.jpg) + +![](images/17416b7a0ca6d681161ea5f3b61b662a6ca26fc673dc3ba0c07dcbef99f94360.jpg) + +![](images/058bbaebbeab618ce0ec96dda0a71064c2297a0a87d63a5878b4c81db6f8306a.jpg) + +![](images/fb7e300fab8d85f1c91880555a47659b821a19f3d771603cceab4590f426e146.jpg) + +![](images/efccd5cd0f76fd0a1bc3b8bc98c66bdf81e63747a54314c8acaa22a9b7195e71.jpg) +Figure 17: Ablation for $\lambda$ . Error bars indicate min and max over 5 seeds. + +![](images/af2fb0b9d1205892ae42ffbd00680021cfa767cefc5cb12089de3a4a1bf07fea.jpg) + +![](images/cf1c6b38bfcfbb7d032ea1d1d09353d63088df2d16d71cd391659076602bb430.jpg) + +![](images/77e50078ab69603957982726b54b65e0f08e5dc0bac8e12a7db922b3ef85be8c.jpg) + +![](images/05c00a74cbaca202b5dd3cfc09a293da88e48342ce3473c284060cd3311c3ceb.jpg) + +![](images/ee1be08b68ad7bd492e4a1c657ab3fa14b37a945bf2de22358a7b523588133de.jpg) + +![](images/3d136a128715dd8f3041a96b5778277af78e9dce2a4cae6e1c76b98605d5c26a.jpg) + +![](images/decb011829db7975397f4d76c29ed5ff1e979aad1d27b6cccfac52a836306072.jpg) + +![](images/70272e10ad7e942dfffb81e13addf8f85ba9701dcbdca4bfc427f46796ef7927.jpg) + +![](images/e3622fb9b64f98769333bd39649727ccdd770fd8689416e818d59730392266bc.jpg) + +![](images/1956bd406adef05af67e339711df4664bab1e2f7c06107688b80306b96ba556e.jpg) + +![](images/76b8a9ec80424135f56da53ad068a45784896cbe000b589b8ad6879dcd72f82b.jpg) + +![](images/d7d3bc5964c5ad8bda531cc85afd02938802605930b143e30047ccfdd10ee044.jpg) +Figure 18: Learning curves for Mujoco Tasks. Error bars indicate min and max over 5 seeds. + +![](images/bed43712c3cb41198a3791a5420fb53d68a2e7aa47c2a30da8383ee8ff90f474.jpg) + +![](images/0830b54f912ce2aabd99b223b5df5324ee117dd4bd53121bdfe263a86d4900bd.jpg) + +![](images/a7a4c340dc13c0cb909d3a5c3907a71d7cb2882ef8b9ef636fae79474a9a7883.jpg) + +![](images/e5788a93c10485df0265c246a4fe50417bd5dd3046c834d39c9af4c1dd600c57.jpg) + +![](images/bc4ad5d304127be2ef67f250649dfda7f2d919b476dbee94a1afd4c57783dfec.jpg) + +![](images/8549272511775042df6c7cfedae412646a9423036c4449f8740b6baa92d77462.jpg) + +![](images/065d8fd34f195f971be35b7f5c03ea4a0b73afce9f3e350688df66e27371901d.jpg) +Figure 19: Learning curves for AntMaze Tasks. Error bars indicate min and max over 5 seeds. + +![](images/6a1cae0beaaa1b4d765b5adbd7fbb3191dab2c1532ebee470990087c56c99bf7.jpg) + +![](images/2586130a82e9ddb10ac8b5c495de5689e709ef2991573dad885b84f1ac40e0a4.jpg) \ No newline at end of file diff --git a/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/images.zip b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1fcc902b91d5674de5a3d579e46fcc6c646e5360 --- /dev/null +++ b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b74340c4066ee001eb9125ccceefeeb2c77ecf2cbd32ef5763d98af33fe0bf4 +size 2696669 diff --git a/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/layout.json b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..944ea78dfc93056baa5aee52e8af9e956c1a77dd --- /dev/null +++ b/2023/When Data Geometry Meets Deep Function_ Generalizing Offline Reinforcement Learning/layout.json @@ -0,0 +1,34221 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 506, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 506, + 116 + ], + "type": "text", + "content": "WHEN DATA GEOMETRY MEETS DEEP FUNCTION: GENERALIZING OFFLINE REINFORCEMENT LEARNING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "spans": [ + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "text", + "content": "Jianxiong Li" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "text", + "content": ", Xianyuan Zhan" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "text", + "content": ", Haoran Xu" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "text", + "content": ", Xiangyu Zhu" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "text", + "content": ", Jingjing Liu" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "text", + "content": " & Ya-Qin Zhang" + }, + { + "bbox": [ + 110, + 133, + 527, + 147 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 147, + 430, + 169 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 112, + 147, + 430, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 147, + 430, + 158 + ], + "spans": [ + { + "bbox": [ + 112, + 147, + 430, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 147, + 430, + 158 + ], + "type": "text", + "content": " Institute for AI Industry Research (AIR), Tsinghua University, Beijing, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 158, + 363, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 158, + 363, + 169 + ], + "spans": [ + { + "bbox": [ + 112, + 158, + 363, + 169 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 112, + 158, + 363, + 169 + ], + "type": "text", + "content": " Shanghai Artificial Intelligence Laboratory, Shanghai, China" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 112, + 170, + 490, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 170, + 490, + 180 + ], + "spans": [ + { + "bbox": [ + 112, + 170, + 490, + 180 + ], + "type": "text", + "content": "li-jx21@mails.tsinghua.edu.cn, zhanxianyuan@air.tsinghua.edu.cn" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "spans": [ + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "spans": [ + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "type": "text", + "content": "In offline reinforcement learning (RL), one detrimental issue to policy learning is the error accumulation of deep " + }, + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "type": "text", + "content": " function in out-of-distribution (OOD) areas. Unfortunately, existing offline RL methods are often over-conservative, inevitably hurting generalization performance outside data distribution. In our study, one interesting observation is that deep " + }, + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 140, + 234, + 471, + 411 + ], + "type": "text", + "content": " functions approximate well inside the convex hull of training data. Inspired by this, we propose a new method, DOGE (Distance-sensitive Offline RL with better Generalization). DOGE marries dataset geometry with deep function approximators in offline RL, and enables exploitation in generalizable OOD areas rather than strictly constraining policy within data distribution. Specifically, DOGE trains a state-conditioned distance function that can be readily plugged into standard actor-critic methods as a policy constraint. Simple yet elegant, our algorithm enjoys better generalization compared to state-of-the-art methods on D4RL benchmarks. Theoretical analysis demonstrates the superiority of our approach to existing methods that are solely based on data distribution or support constraints. Code is available at https://github.com/Facebear-ljx/DOGE." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 434, + 206, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 434, + 206, + 445 + ], + "spans": [ + { + "bbox": [ + 106, + 434, + 206, + 445 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 458, + 506, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 458, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 506, + 559 + ], + "type": "text", + "content": "Offline reinforcement learning (RL) provides a new possibility to learn optimized policies from large, pre-collected datasets without any environment interaction (Levine et al., 2020). This holds great promise to solve many real-world problems when online interaction is costly or dangerous yet historical data is easily accessible (Zhan et al., 2022). However, the optimization nature of RL, as well as the need for counterfactual reasoning on unseen data under offline setting, have caused great technical challenges for designing effective offline RL algorithms. Evaluating value function outside data coverage areas can produce falsely optimistic values; without corrective information from online interaction, such estimation errors can accumulate quickly and misguide policy learning process (Van Hasselt et al., 2018; Fujimoto et al., 2018; Kumar et al., 2019)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 563, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 506, + 674 + ], + "type": "text", + "content": "Recent model-free offline RL methods investigate this error accumulation challenge in several ways: 1) Policy Constraint: directly constraining learned policy to stay inside distribution, or with the support of dataset (Kumar et al., 2019); 2) Value Regularization: regularizing value function to assign low values at out-of-distribution (OOD) actions (Kumar et al., 2020b); 3) In-sample Learning: learning value function within data samples (Kostrikov et al., 2021b) or simply treating it as the value function of behavioral policy (Brandfonbrener et al., 2021). All three schools of methods share similar traits of being conservative and omitting evaluation on OOD data, which brings benefits of minimizing model exploitation error, but at the expense of poor generalization of learned policy in OOD regions. Thus, a gaping gap still exists when such methods are applied to real-world tasks, where most datasets only partially cover state-action space with suboptimal policies." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 678, + 506, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 678, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 104, + 678, + 506, + 713 + ], + "type": "text", + "content": "Meanwhile, online deep reinforcement learning (DRL) that leverages powerful deep neural network (DNN) with optimistic exploration on unseen samples can yield high-performing policies with promising generalization performance (Mnih et al., 2015; Silver et al., 2017; Degrave et al., 2022;" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 721, + 206, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 206, + 732 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 206, + 732 + ], + "type": "text", + "content": "*Corresponding authors" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 123, + 79, + 208, + 165 + ], + "blocks": [ + { + "bbox": [ + 123, + 79, + 208, + 165 + ], + "lines": [ + { + "bbox": [ + 123, + 79, + 208, + 165 + ], + "spans": [ + { + "bbox": [ + 123, + 79, + 208, + 165 + ], + "type": "image", + "image_path": "969693e52ea1b5a561742329d160d9dba76c5fe630bbdf5ecdfd7236acf0572b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 174, + 504, + 194 + ], + "lines": [ + { + "bbox": [ + 104, + 174, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 174, + 504, + 194 + ], + "type": "text", + "content": "Figure 1: Left: Visualization of AntMaze dataset. Data transitions of two small areas on the critical pathways to the destination have been removed (red box). Right: Performance of three SOTA offline RL methods." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 211, + 79, + 301, + 165 + ], + "blocks": [ + { + "bbox": [ + 211, + 79, + 301, + 165 + ], + "lines": [ + { + "bbox": [ + 211, + 79, + 301, + 165 + ], + "spans": [ + { + "bbox": [ + 211, + 79, + 301, + 165 + ], + "type": "image", + "image_path": "00f19f0a37f1972766c0bf4d1febc88cf192a7ea787ea37f46b857a71762b5fa.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 79, + 395, + 165 + ], + "blocks": [ + { + "bbox": [ + 306, + 79, + 395, + 165 + ], + "lines": [ + { + "bbox": [ + 306, + 79, + 395, + 165 + ], + "spans": [ + { + "bbox": [ + 306, + 79, + 395, + 165 + ], + "type": "image", + "image_path": "8c7cb325e06f0782b93d14c398a47000591da83dc955e55688ba29e1b3b17338.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 399, + 79, + 488, + 165 + ], + "blocks": [ + { + "bbox": [ + 399, + 79, + 488, + 165 + ], + "lines": [ + { + "bbox": [ + 399, + 79, + 488, + 165 + ], + "spans": [ + { + "bbox": [ + 399, + 79, + 488, + 165 + ], + "type": "image", + "image_path": "b7029b2d6fd977e225ef1f9c44d947476632d531aec6ffb20b26a7336166fdf1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 201, + 506, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 201, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 506, + 279 + ], + "type": "text", + "content": "Packer et al., 2018). This staring contrast propels us to re-think the question: Are we being too conservative? It is well known that DNN has unparalleled approximation and generalization abilities, compared with other function approximators. These attractive abilities have not only led to huge success in computer vision and natural language processing (He et al., 2016; Vaswani et al., 2017), but also amplified the power of RL. Ideally, in order to obtain the best policy, an algorithm should enable offline policy learning on unseen state-action pairs that function approximators (e.g., " + }, + { + "bbox": [ + 104, + 201, + 506, + 279 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 201, + 506, + 279 + ], + "type": "text", + "content": " function, policy network) can generalize well, and add penalization only on non-generalizable areas." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 284, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 284, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 284, + 504, + 384 + ], + "type": "text", + "content": "However, existing offline RL methods heed too much conservatism on data-related regularizations, while largely overlooking the generalization ability of deep function approximators. Intuitively, let us consider the well-known AntMaze task in the D4RL benchmark (Fu et al., 2020), where an ant navigates from the start to the destination in a large maze. We observe that existing offline RL methods fail miserably when we remove only small areas of data on the critical pathways to the destination. As shown in Figure 1, the two missing areas reside in close proximity to the trajectory data. Simply \"stitching\" up existing trajectories as approximation is not sufficient to form a near-optimal policy at missing regions. Exploiting the generalizability of deep function approximators, however, can potentially compensate for the missing information." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 388, + 504, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 499 + ], + "type": "text", + "content": "In our study, we observe that the value function approximated by DNN can interpolate well but struggles to extrapolate (see Section 2.2). Such an \"interpolate well\" phenomenon is also observed in previous studies on the generalization of DNN (Haley & Soloway, 1992; Barnard & Wessels, 1992; Arora et al., 2019a; Xu et al., 2020; Florence et al., 2022). This finding motivates us to reconsider the generalization of function approximators in offline RL in the context of dataset geometry. Along this line, we discover that a closer distance between a training sample to the offline dataset often leads to a smaller value variation range of the learned neural network, which effectively yields more accurate inference of the value function inside the convex hull (formed by the dataset). By contrast, outside the convex hull, especially in those areas far from the training data, the value variation range usually renders too large to guarantee a small approximation error." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 504, + 506, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 506, + 603 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 506, + 603 + ], + "type": "text", + "content": "Inspired by this, we design a new algorithm DOGE (Distance-sensitive Offline RL with better Generalization) from the perspective of generalization performance of deep " + }, + { + "bbox": [ + 104, + 504, + 506, + 603 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 504, + 506, + 603 + ], + "type": "text", + "content": " function. We first propose a state-conditioned distance function to characterize the geometry of offline datasets, whose output serves as a proxy to the network generalization ability. The resulting algorithm learns a state-conditioned distance function as a policy constraint on standard actor-critic RL framework. Theoretical analysis demonstrates the superior performance bound of our method compared to previous policy constraint methods that are based on data distribution or support constraints. Evaluations on D4RL benchmarks validate that our algorithm enjoys better performance and generalization abilities than state-of-the-art offline RL methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 620, + 343, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 620, + 343, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 343, + 633 + ], + "type": "text", + "content": "2 DATA GEOMETRY VS. DEEP " + }, + { + "bbox": [ + 105, + 620, + 343, + 633 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 105, + 620, + 343, + 633 + ], + "type": "text", + "content": " FUNCTIONS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 645, + 181, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 645, + 181, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 181, + 656 + ], + "type": "text", + "content": "2.1 NOTATIONS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": "We consider the standard continuous action space Markov decision process (MDP) setting, which can be represented by a tuple " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "(S, \\mathcal{A}, \\mathcal{P}, r, \\gamma)" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": " are the state and action space, " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{P}(s'|s, a)" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": " is the transition dynamics, " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "r(s, a)" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": " is a reward function, and " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\gamma \\in [0,1)" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": " is a discount factor. The objective of the RL problem is to find a policy " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\pi(a|s)" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": " that maximizes the expected cumulative discounted return, which can be represented by a " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": " function " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "Q_{\\theta}^{\\pi}(s, a) = \\mathbb{E}[\\sum_{t=0}^{\\infty} \\gamma^{t} r(s_{t}, a_{t}) | s_{0} = s, a_{0} = a, a_{t} \\sim \\pi(\\cdot | s_{t}), s_{t+1} \\sim \\mathcal{P}(\\cdot | s_{t}, a_{t})]" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 666, + 504, + 733 + ], + "type": "text", + "content": " function is typically approximated by function" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 500, + 159 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 500, + 159 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 500, + 159 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 500, + 159 + ], + "type": "image", + "image_path": "6e9e3426dbdabdef6eeaa571b21dd63d5dd82524c444cb7cca9ab38614b521c8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 168, + 504, + 190 + ], + "lines": [ + { + "bbox": [ + 104, + 168, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 504, + 190 + ], + "type": "text", + "content": "Figure 2: Approximation error of deep " + }, + { + "bbox": [ + 104, + 168, + 504, + 190 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 168, + 504, + 190 + ], + "type": "text", + "content": " functions with different dataset geometry. Offline data are marked as white dots (Please refer to Appendix E.5 for detailed experimental setup)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "type": "text", + "content": "approximators with learnable parameters " + }, + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "type": "text", + "content": ", such as deep neural networks. Under offline RL setting, we are only given a fixed dataset " + }, + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "type": "text", + "content": " and cannot interact further with the environment. Therefore, the parameters " + }, + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 201, + 506, + 236 + ], + "type": "text", + "content": " are optimized by minimizing the following temporal difference (TD) error:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 167, + 239, + 505, + 259 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 239, + 505, + 259 + ], + "spans": [ + { + "bbox": [ + 167, + 239, + 505, + 259 + ], + "type": "interline_equation", + "content": "\\min _ {\\theta} \\mathbb {E} _ {(s, a, s ^ {\\prime}) \\in \\mathcal {D}} \\left[ \\left(r (s, a) + \\gamma \\mathbb {E} _ {a ^ {\\prime} \\sim \\pi (\\cdot | s ^ {\\prime})} \\left[ Q _ {\\theta^ {\\prime}} ^ {\\pi} \\left(s ^ {\\prime}, a ^ {\\prime}\\right) \\right]\\right) - Q _ {\\theta} ^ {\\pi} (s, a) \\right] ^ {2} \\tag {1}", + "image_path": "441c2bae62d38b9b7c4b2beb0e59ddcf8d5e0e4b8b309709302d070db02c942a.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "type": "inline_equation", + "content": "Q_{\\theta'}^{\\pi}" + }, + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "type": "text", + "content": " is the target " + }, + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "type": "text", + "content": " function, which is a delayed copy of the current " + }, + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 262, + 449, + 276 + ], + "type": "text", + "content": " network." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 288, + 275, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 288, + 275, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 275, + 298 + ], + "type": "text", + "content": "2.2 INTERPOLATE VS. EXTRAPOLATE" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": "Motivating examples. Let's first consider a set of simple one-dimensional random walk tasks with different offline datasets, where agents at each step can take an action to move in the range of " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "[-1, 1]" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": ", and the state space is a straight line ranging from " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "[-10, 10]" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": ". The destination is located at " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "s = 10" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": ". The closer to the destination, the larger reward the agent gets (i.e., " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "r = 1" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "s = 10" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "r = 0" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "s = -10" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": "). The approximation errors of the learned " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": " functions are visualized in Figure 2. Note that the approximation errors of the learned " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": " functions tend to be low at state-action pairs that lie inside or near the boundaries of the convex hull formed by the dataset. Under continuous state-action space, state-action pairs within the convex hull of the dataset can be represented in an interpolated manner (referred as interpolated data), i.e., " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "x_{in} = \\sum_{i=1}^{n} \\alpha_{i} x_{i}" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{n} \\alpha_{i} = 1" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "\\alpha_{i} \\geq 0" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "x_{i} = (s_{i}, a_{i}) \\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": "; similarly, we can define the extrapolated data that lie outside the convex hull of the dataset as " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "x_{out} = \\sum_{i=1}^{n} \\beta_{i} x_{i}" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{n} \\beta_{i} = 1" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "inline_equation", + "content": "\\beta_{i} \\geq 0" + }, + { + "bbox": [ + 104, + 308, + 506, + 431 + ], + "type": "text", + "content": " do not hold simultaneously." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 435, + 506, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 506, + 491 + ], + "type": "text", + "content": "We observe that the geometry of the datasets play a special role on the approximation error of deep " + }, + { + "bbox": [ + 104, + 435, + 506, + 491 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 435, + 506, + 491 + ], + "type": "text", + "content": " functions, or in other words, deep " + }, + { + "bbox": [ + 104, + 435, + 506, + 491 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 435, + 506, + 491 + ], + "type": "text", + "content": " functions interpolate well but struggle to extrapolate. This phenomenon is also reflected in studies on the generalization performance of deep neural networks under a supervised learning setting (Haley & Soloway, 1992; Barnard & Wessels, 1992; Arora et al., 2019a; Xu et al., 2020; Florence et al., 2022), but is largely overlooked in modern offline RL." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": "Theoretical explanations. Based on advanced theoretical machinery from the generalization analysis of DNN, such as neural tangent kernel (NTK) (Jacot et al., 2018), we can theoretically demonstrate that this phenomenon is also carried over to the offline RL setting for deep " + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": " functions. Define " + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "inline_equation", + "content": "\\operatorname{Proj}_{\\mathcal{D}}(x) \\coloneqq \\arg \\min_{x_i \\in \\mathcal{D}} \\| x - x_i \\|" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": " (we denote " + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "inline_equation", + "content": "\\| x \\|" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": " as Euclidean norm) as the projection operator that projects unseen data " + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": " to the nearest data point in dataset " + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": ". Theorem 1 gives a theoretical explanation of the \"interploate well\" phenomenon for deep " + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": " functions under the NTK assumptions (see Appendix B.2 for detailed proofs):" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "text", + "content": "Theorem 1. (Value difference of deep " + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "text", + "content": " function for interpolated and extrapolated data). Under the NTK regime, given an unseen interpolated data " + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "inline_equation", + "content": "x_{in}" + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "text", + "content": " and an extrapolated data " + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "inline_equation", + "content": "x_{out}" + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "text", + "content": ", then the value difference of deep " + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 576, + 505, + 611 + ], + "type": "text", + "content": " function for interpolated and extrapolated input data can be bounded as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 127, + 615, + 504, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 615, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 127, + 615, + 504, + 647 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| Q _ {\\theta} (x _ {i n}) - Q _ {\\theta} (\\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n})) \\| \\leq C _ {1} (\\sqrt {\\min (\\| x _ {i n} \\| , \\| \\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n}) \\|)} \\sqrt {d _ {x _ {i n}}} + 2 d _ {x _ {i n}}) \\\\ \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\| x _ {i n} \\| , \\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right) \\|\\right)} \\sqrt {B} + 2 B\\right) \\tag {2} \\\\ \\end{array}", + "image_path": "242ed5c854ceac9be72908a0521a328d7ddc7354f221fd9d3471c81d40f3c009.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 649, + 505, + 664 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 649, + 505, + 664 + ], + "spans": [ + { + "bbox": [ + 118, + 649, + 505, + 664 + ], + "type": "interline_equation", + "content": "\\| Q _ {\\theta} (x _ {o u t}) - Q _ {\\theta} (\\operatorname {P r o j} _ {\\mathcal {D}} (x _ {o u t})) \\| \\leq C _ {1} (\\sqrt {\\min (\\| x _ {o u t} \\| , \\| \\operatorname {P r o j} _ {\\mathcal {D}} (x _ {o u t}) \\|)} \\sqrt {d _ {x _ {o u t}}} + 2 d _ {x _ {o u t}}) \\quad (3)", + "image_path": "132c919c6471927f64ba88ee69e5d0f898e48a958c5a6a574e5f6e89cad38aa0.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "inline_equation", + "content": "d_{x_{in}} = \\| x_{in} - \\mathrm{Proj}_{\\mathcal{D}}(x_{in})\\| \\leq \\max_{x_i\\in \\mathcal{D}}\\| x_{in} - x_i\\| \\leq B" + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "inline_equation", + "content": "d_{x_{out}} = \\| x_{out} - \\mathrm{Proj}_{\\mathcal{D}}(x_{out})\\|" + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "content": " are distances of " + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "inline_equation", + "content": "x_{in}" + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "inline_equation", + "content": "x_{out}" + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "content": " to the nearest data points in dataset " + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "inline_equation", + "content": "C_1" + }, + { + "bbox": [ + 104, + 666, + 504, + 691 + ], + "type": "text", + "content": " are finite constants." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "Theorem 1 shows that given an unseen input " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "Q_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " can be controlled by in-sample " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " value " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " and the distance " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": ". The smaller the distance, the more controllable the output of deep " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " functions. Therefore, because the distance to dataset is strictly bounded (at" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": "most " + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": " for interpolated data), the approximated " + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": " values at interpolated data as well as extrapolated data near the boundaries of the convex hull formed by the dataset cannot be too far off. Moreover, as " + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "inline_equation", + "content": "d_{x_{out}}" + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": " can take substantially larger values than " + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "inline_equation", + "content": "d_{x_{in}}" + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": ", interpolated data generally enjoys a tighter bound compared with extrapolated data, if the dataset only narrowly covers a large state-action space." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "content": "Empirical observations in Figure 2 and Theorem 1 both demonstrate that data geometry can induce different approximation error accumulation patterns for deep " + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "content": " functions. While approximation error accumulation is generally detrimental to offline RL, a fine-grained analysis is missing in previous studies about where value function can approximate well. We argue that it is necessary to take data geometry into consideration when designing less conservative offline RL algorithms." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 203, + 354, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 203, + 354, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 354, + 216 + ], + "type": "text", + "content": "3 GENERALIZABLE OFFLINE RL FRAMEWORK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 228, + 506, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 275 + ], + "type": "text", + "content": "In this section, we present our algorithm DOGE (Distance-sensitive Offline RL with better GEneralization). By introducing a specially designed state-conditioned distance function to characterize the geometry of offline datasets, we can construct a very simple, less conservative and also more generalizable offline RL algorithm upon standard actor-critic framework." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 285, + 319, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 319, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 319, + 298 + ], + "type": "text", + "content": "3.1 STATE-CONDITIONED DISTANCE FUNCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "text", + "content": "As revealed in Theorem 1, the sample-to-dataset distance plays an important role in measuring the controllability of " + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "text", + "content": " values. However, given an arbitrary state-action sample " + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "inline_equation", + "content": "(s,a)" + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "text", + "content": ", naively computing its distance to the closest data point in a large dataset can be costly and impractical. Ideally, we prefer to have a learnable distance function which also has the ability to reflect the overall dataset geometry. Based on this intuition, we design a state-conditioned distance function that can be learned in an elegantly simple supervised manner with desirable properties." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "type": "text", + "content": "Specifically, we learn the state-conditioned distance function " + }, + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "type": "inline_equation", + "content": "g(s,a)" + }, + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "type": "text", + "content": " by solving the following regression problem, with state-action pairs " + }, + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "type": "inline_equation", + "content": "(s,a)\\sim \\mathcal{D}" + }, + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "type": "text", + "content": " and synthetic noise actions sampled from the uniform distribution over the full action space " + }, + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 378, + 506, + 412 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 203, + 416, + 505, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 416, + 505, + 435 + ], + "spans": [ + { + "bbox": [ + 203, + 416, + 505, + 435 + ], + "type": "interline_equation", + "content": "\\min _ {g} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} [ \\| a - \\hat {a} \\| - g (s, \\hat {a}) ] ^ {2} \\right] \\tag {4}", + "image_path": "fe2e79198dc28539d39522c904fc1c7ebd1b2bfb24c341102e6d98a97239e109.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "content": "In practical implementation, for each " + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "inline_equation", + "content": "(s,a)\\sim \\mathcal{D}" + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "content": ", we sample " + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "content": " noise actions uniformly in the action space " + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "content": " to train " + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "content": ". More implementation details can be found in Appendix E. Moreover, with the optimization objective defined in Eq. (4), we can show that the optimal state-conditioned distance function has two desirable properties (proofs can be found in Appendix C):" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 489, + 504, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 489, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 104, + 489, + 504, + 512 + ], + "type": "text", + "content": "Property 1. The optimal state-conditioned distance function of Eq. (4) is convex w.r.t. actions and is an upper bound of the distance to the state-conditioned centroid " + }, + { + "bbox": [ + 104, + 489, + 504, + 512 + ], + "type": "inline_equation", + "content": "a_{o}(s)" + }, + { + "bbox": [ + 104, + 489, + 504, + 512 + ], + "type": "text", + "content": " of training dataset " + }, + { + "bbox": [ + 104, + 489, + 504, + 512 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 489, + 504, + 512 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 148, + 516, + 505, + 546 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 516, + 505, + 546 + ], + "spans": [ + { + "bbox": [ + 148, + 516, + 505, + 546 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} g ^ {*} (s, \\hat {a}) = \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) \\| \\hat {a} - a \\| ] \\\\ \\geq \\left\\| \\hat {a} - \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) \\cdot a ] \\right\\| = \\left\\| \\hat {a} - a _ {o} (s) \\right\\|, \\quad \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\tag {5} \\\\ \\end{array}", + "image_path": "23a9466f40f4b169fe0c09600261122e7554a8ce5065f3b1f04eb26ed61a8b68.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "inline_equation", + "content": "C(s,a) = \\frac{\\mu(s,a)}{\\mathbb{E}_{a\\sim Unif(\\mathcal{A})}\\mu(s,a)}\\geq 0" + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "inline_equation", + "content": "\\mu (s,a)" + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": " is state-action distribution of dataset " + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": ". Given a state " + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "inline_equation", + "content": "s\\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": ", the state-conditioned centroid is defined as " + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "inline_equation", + "content": "a_{o}(s) = \\mathbb{E}_{a\\sim Unif(\\mathcal{A})}[C(s,a)\\cdot a]" + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": "-norm is convex and the non-negative combination of convex functions is still convex, " + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "inline_equation", + "content": "g^{*}(s,\\hat{a})" + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": " is also a convex function w.r.t. " + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "inline_equation", + "content": "\\hat{a}" + }, + { + "bbox": [ + 104, + 569, + 504, + 620 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 623, + 506, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 623, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 506, + 647 + ], + "type": "text", + "content": "Property 2. The negative gradient of the optimal state-conditioned distance function at an extrapolated action " + }, + { + "bbox": [ + 104, + 623, + 506, + 647 + ], + "type": "inline_equation", + "content": "\\hat{a}" + }, + { + "bbox": [ + 104, + 623, + 506, + 647 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 623, + 506, + 647 + ], + "type": "inline_equation", + "content": "-\\nabla_{\\hat{a}}g^{*}(s,\\hat{a})" + }, + { + "bbox": [ + 104, + 623, + 506, + 647 + ], + "type": "text", + "content": ", points inside the convex hull of the dataset." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "From Property 1, we can see that the optimal state-conditioned distance function characterizes data geometry and outputs an upper bound of the distance to the state-conditioned centroid of the training dataset. Property 2 indicates that if we use the learned distance function as a policy constraint, it can drive the learned policy to move inside the convex hull of training data. We visualize the value of the trained state-conditioned distance function in Figure 3. It is clear that the learned distance function can accurately predict the sample-to-dataset centroid distance. By utilizing such distance function, we can constrain the policy based on the global geometric information of training datasets. This" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 83, + 258, + 192 + ], + "blocks": [ + { + "bbox": [ + 116, + 83, + 258, + 192 + ], + "lines": [ + { + "bbox": [ + 116, + 83, + 258, + 192 + ], + "spans": [ + { + "bbox": [ + 116, + 83, + 258, + 192 + ], + "type": "image", + "image_path": "3af0690a0c24cc1b62b4de981c2f912d5e3a67c1cc78c32e1ac4d92a039d2026.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 197, + 225, + 207 + ], + "lines": [ + { + "bbox": [ + 143, + 197, + 225, + 207 + ], + "spans": [ + { + "bbox": [ + 143, + 197, + 225, + 207 + ], + "type": "text", + "content": "(a) Illustration of " + }, + { + "bbox": [ + 143, + 197, + 225, + 207 + ], + "type": "inline_equation", + "content": "g^{*}(s,a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 264, + 83, + 341, + 138 + ], + "blocks": [ + { + "bbox": [ + 264, + 83, + 341, + 138 + ], + "lines": [ + { + "bbox": [ + 264, + 83, + 341, + 138 + ], + "spans": [ + { + "bbox": [ + 264, + 83, + 341, + 138 + ], + "type": "image", + "image_path": "fd84e0cbb87325f6c0b95df5f73e1f9da82216501e3997e72d8dcd90b7efb138.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 264, + 139, + 340, + 192 + ], + "blocks": [ + { + "bbox": [ + 264, + 139, + 340, + 192 + ], + "lines": [ + { + "bbox": [ + 264, + 139, + 340, + 192 + ], + "spans": [ + { + "bbox": [ + 264, + 139, + 340, + 192 + ], + "type": "image", + "image_path": "275170debf5794418b5549afc027ce1470e01658ad45a4b992cc8d2c3bbdbb8d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 220, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 251 + ], + "type": "text", + "content": "Figure 3: Illustration of the state-conditioned distance function. The output of the optimal distance function is the non-negative combination of the distances to all training data. " + }, + { + "bbox": [ + 104, + 220, + 504, + 251 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 220, + 504, + 251 + ], + "type": "text", + "content": " is the threshold in Eq. (6) In (b), Offline data are marked as white dots." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 343, + 84, + 420, + 138 + ], + "blocks": [ + { + "bbox": [ + 343, + 84, + 420, + 138 + ], + "lines": [ + { + "bbox": [ + 343, + 84, + 420, + 138 + ], + "spans": [ + { + "bbox": [ + 343, + 84, + 420, + 138 + ], + "type": "image", + "image_path": "40bab1ca52bc6f560c40f8f642154c8e7cfe04c50c9776938bea31bb380634a6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 343, + 138, + 421, + 192 + ], + "blocks": [ + { + "bbox": [ + 297, + 197, + 482, + 207 + ], + "lines": [ + { + "bbox": [ + 297, + 197, + 482, + 207 + ], + "spans": [ + { + "bbox": [ + 297, + 197, + 482, + 207 + ], + "type": "text", + "content": "(b) Visualization of " + }, + { + "bbox": [ + 297, + 197, + 482, + 207 + ], + "type": "inline_equation", + "content": "g^{*}(s,a)" + }, + { + "bbox": [ + 297, + 197, + 482, + 207 + ], + "type": "text", + "content": " trained on diverse 2D datasets" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 343, + 138, + 421, + 192 + ], + "lines": [ + { + "bbox": [ + 343, + 138, + 421, + 192 + ], + "spans": [ + { + "bbox": [ + 343, + 138, + 421, + 192 + ], + "type": "image", + "image_path": "eb5443643c996d51665a7019ba4d509d77d7fb18ff2831eb3156d2b9bd697521.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 422, + 84, + 496, + 138 + ], + "blocks": [ + { + "bbox": [ + 422, + 84, + 496, + 138 + ], + "lines": [ + { + "bbox": [ + 422, + 84, + 496, + 138 + ], + "spans": [ + { + "bbox": [ + 422, + 84, + 496, + 138 + ], + "type": "image", + "image_path": "07e86dde83c3f006921d3e67958f083dab941b5b164314d67e3f2fbc8b33be4a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 422, + 138, + 496, + 192 + ], + "blocks": [ + { + "bbox": [ + 422, + 138, + 496, + 192 + ], + "lines": [ + { + "bbox": [ + 422, + 138, + 496, + 192 + ], + "spans": [ + { + "bbox": [ + 422, + 138, + 496, + 192 + ], + "type": "image", + "image_path": "81f898b16f98f108db23152e53a13122211804c6c5620a2524eab7996ea88bcb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 307 + ], + "type": "text", + "content": "desirable property is non-obtainable by simply constraining the policy based on sample-to-sample distance such as the MSE loss between policy generated and dataset actions, which can only bring local geometric information. Moreover, the learned distance function can not only predict well at in-distribution states but also generalize well at OOD states." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 319, + 390, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 390, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 390, + 331 + ], + "type": "text", + "content": "3.2 DISTANCE-SENSITIVE OFFLINE REINFORCEMENT LEARNING" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 340, + 505, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 505, + 385 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 505, + 385 + ], + "type": "text", + "content": "Capturing the geometry of offline datasets, we now construct a minimalist distance-sensitive offline RL framework, by simply plugging the state-conditioned distance function as a policy constraint into standard online actor-critic methods (such as TD3 (Fujimoto et al., 2018) and SAC (Haarnoja et al., 2018)). This results in the following policy maximization objective:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 157, + 387, + 504, + 404 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 387, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 157, + 387, + 504, + 404 + ], + "type": "interline_equation", + "content": "\\pi = \\arg \\max _ {\\pi} \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ Q (s, a) ] \\quad s. t. \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ g (s, a) ] \\leq G \\tag {6}", + "image_path": "7bff9ba0f93b8dc18374d9f5bebb00773351953bd265e5c67f29fb49b114f9cc.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "text", + "content": " is a task-dependent threshold varying across tasks. In our method, we adopt a non-parametric treatment by setting " + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "text", + "content": " as the mean output (50% quantile) of the learned distance function on the training dataset, i.e., " + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{(s,a)\\sim \\mathcal{D}}[g(s,a)]" + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "text", + "content": ", which is approximated over mini-batch samples to reduce computational complexity (see Appendix G for ablation on " + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 407, + 504, + 462 + ], + "type": "text", + "content": "). The constrained optimization problem in Eq. (6) can be reformulated as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 152, + 465, + 504, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 465, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 152, + 465, + 504, + 482 + ], + "type": "interline_equation", + "content": "\\pi = \\arg \\max _ {\\pi} \\min _ {\\lambda} \\mathbb {E} _ {s \\sim \\mathcal {D}, a \\sim \\pi (\\cdot | s)} [ \\beta Q (s, a) - \\lambda (g (s, a) - G) ] \\quad s. t. \\lambda \\geq 0 \\tag {7}", + "image_path": "32e1e072e9f08ed66668d00520091aae9832150a227491d990a0076fa63f3ea6.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "text", + "content": " is the Lagrangian multiplier, which is auto-adjusted using dual gradient descent. Following TD3+BC (Fujimoto & Gu, 2021), " + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "text", + "content": " values are rescaled by " + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "inline_equation", + "content": "\\beta = \\frac{\\alpha}{\\frac{1}{n}\\sum_{i=1}^{n}|Q(s_i,a_i)|}" + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "text", + "content": " to balance " + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "text", + "content": " function maximization and policy constraint satisfaction, controlled by a hyperparameter " + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "text", + "content": ". To reduce computations, the denominator of " + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 486, + 504, + 555 + ], + "type": "text", + "content": " is approximated over mini-batch of samples. The resulting algorithm is easy to implement. In our experiments, we use TD3. Please refer to Appendix E for implementation details." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 568, + 376, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 568, + 376, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 376, + 578 + ], + "type": "text", + "content": "3.3 RELAXATION WITH BELLMAN-CONSISTENT COEFFICIENT" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 588, + 441, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 588, + 441, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 441, + 599 + ], + "type": "text", + "content": "3.3.1 BELLMAN-CONSISTENT COEFFICIENT AND CONSTRAINED POLICY SET" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 607, + 504, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 504, + 663 + ], + "type": "text", + "content": "The key difference between DOGE and other policy constraint methods lies in that DOGE relaxes the strong full coverage assumption1 on offline datasets and allows exploitation on generalizable OOD areas. To relax the unrealistic full-coverage assumption, we resort to a weaker condition proposed by (Xie et al., 2021a), the Bellman-consistent coefficient (Definition 1), to measure how well Bellman errors can transfer to different distributions (Theorem 2)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": "Denote " + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\| f\\|_{2,\\mu}^2 \\coloneqq \\mathbb{E}_\\mu [\\| f\\|^2]" + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^\\pi Q" + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": " is the Bellman operator of policy " + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": ", defined as " + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^\\pi Q(s,a) \\coloneqq r(s,a) + \\gamma \\mathbb{E}_{a'\\sim \\pi (\\cdot |s'),s'\\sim \\mathcal{P}(\\cdot |s,a)}[Q(s',a')] \\coloneqq r(s,a) + \\gamma \\mathbb{P}^\\pi [Q(s',a')]" + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\mathbb{P}^\\pi [\\cdot ]" + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": " is the brief notation for " + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{a'\\sim \\pi (\\cdot |s'),s'\\sim \\mathcal{P}(\\cdot |s,a)}[\\cdot ]" + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": " is the function class of " + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 668, + 504, + 715 + ], + "type": "text", + "content": " networks. The Bellman-consistent coefficient is defined as:" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 719, + 504, + 735 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 719, + 504, + 735 + ], + "spans": [ + { + "bbox": [ + 116, + 719, + 504, + 735 + ], + "type": "inline_equation", + "content": "\\sup_{s,a}\\frac{v(s,a)}{\\mu(s,a)} < \\infty, v" + }, + { + "bbox": [ + 116, + 719, + 504, + 735 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 116, + 719, + 504, + 735 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 116, + 719, + 504, + 735 + ], + "type": "text", + "content": " are marginal distributions of the learned policy and the dataset (Le et al., 2019)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "Definition 1. (Bellman-consistent coefficient). We define " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " to measure the distributional shift from an arbitrary distribution " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " to data distribution " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": ", w.r.t. " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 225, + 106, + 505, + 136 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 106, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 225, + 106, + 505, + 136 + ], + "type": "interline_equation", + "content": "\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) := \\sup _ {Q \\in \\mathcal {F}} \\frac {\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , v} ^ {2}}{\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , \\mu} ^ {2}} \\tag {8}", + "image_path": "900406cebd51f3b57ce7849a6fb14cf39460d992c295ffa5617463a455f4d805.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "text", + "content": "This definition captures the generalization performance of function approximation across different distributions. Intuitively, a small value of " + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "inline_equation", + "content": "\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)" + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "text", + "content": " means Bellman errors for policy " + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "text", + "content": " can accurately transfer from distribution " + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 148, + 504, + 204 + ], + "type": "text", + "content": ". This suggests that Bellman errors can transfer well between two distributions even if a large discrepancy exists, as long as the Bellman-consistent coefficient is small." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 209, + 489, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 489, + 221 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 489, + 221 + ], + "type": "text", + "content": "Based on Definition 1, we introduce the definition of Bellman-consistent constrained policy set." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "text", + "content": "Definition 2. (Bellman-consistent constrained policy set). We define the Bellman-consistent constrained policy set as " + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "text", + "content": ". The Bellman-consistent coefficient under the transition induced by " + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "text", + "content": " can be bounded by some finite constants " + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "inline_equation", + "content": "l(k)" + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 259, + 258, + 505, + 271 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 259, + 258, + 505, + 271 + ], + "spans": [ + { + "bbox": [ + 259, + 258, + 505, + 271 + ], + "type": "interline_equation", + "content": "\\mathcal {B} \\left(\\rho_ {k}, \\mu , \\mathcal {F}, \\pi\\right) \\leq l (k) \\tag {9}", + "image_path": "6f2570a265ba5493f3b06fb8d08f616d2c653906ff8359fbd99db1053cc31926.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "inline_equation", + "content": "\\rho_{k} = \\rho_{0}P^{\\pi_{1}}\\ldots P^{\\pi_{k}},\\forall \\pi_{1},\\ldots ,\\pi_{k}\\in \\Pi_{\\mathcal{B}},\\rho_{0}" + }, + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "text", + "content": " is the initial state-action distribution and " + }, + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "inline_equation", + "content": "P^{\\pi_i}" + }, + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "text", + "content": " is the transition operator induced by " + }, + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "inline_equation", + "content": "\\pi_{i}" + }, + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 273, + 504, + 297 + ], + "type": "inline_equation", + "content": "P^{\\pi_i}(s',a'|s,a) = \\mathcal{P}(s'|s,a)\\pi_i(a'|s')" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "text", + "content": "We denote the constrained Bellman operator induced by " + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_B}" + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_B}Q(s,a) := r(s,a) + \\max_{\\pi \\in \\Pi_B}\\gamma \\mathbb{P}^\\pi [Q(s',a')]" + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_B}" + }, + { + "bbox": [ + 104, + 303, + 506, + 338 + ], + "type": "text", + "content": " can be seen as a Bellman operator on a redefined MDP, thus theoretical results of MDP also carry over, such as contraction mapping and existence of a fixed point." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 348, + 470, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 348, + 470, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 470, + 360 + ], + "type": "text", + "content": "3.3.2 BELLMAN CONSISTENT COEFFICIENT AND PERFORMANCE BOUND OF DOGE" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "content": "We show that the policy set induced by DOGE is essentially a Bellman-consistent policy set defined in Definition 2. Meanwhile, the distance constraint in DOGE can produce a small value of " + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "content": " and hence guarantee the learned policy deviates only to those generalizable areas." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 403, + 504, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 426 + ], + "type": "text", + "content": "Theorem 2. (Upper bound of Bellman-consistent coefficient). Under the NTK assumption, the Bellman-consistent coefficient " + }, + { + "bbox": [ + 104, + 403, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)" + }, + { + "bbox": [ + 104, + 403, + 504, + 426 + ], + "type": "text", + "content": " is upper bounded as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 428, + 504, + 487 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 428, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 106, + 428, + 504, + 487 + ], + "type": "interline_equation", + "content": "\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) \\leq \\frac {1}{\\epsilon_ {\\mu}} \\left\\| \\underbrace {(1 - \\gamma) Q \\left(s _ {o} , a _ {o}\\right) + R _ {\\max }} _ {\\mathcal {B} _ {1}} + \\underbrace {C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + d _ {1}\\right)} _ {\\mathcal {B} _ {2}} + \\underbrace {(2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + d _ {2}\\right)} _ {\\mathcal {B} _ {3}} \\right\\| _ {2, v} ^ {2} \\tag {10}", + "image_path": "74ac72c7b71d777415227ea08c93ccb35838148c8b31b0d892fdd88595f8cef6.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": "where we denote " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "x = (s, a)" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "x' = (s', a')" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "x_o = \\mathbb{E}_{x \\sim \\mathcal{D}}[x]" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": " is the centroid of offline dataset. " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "d_1 = \\| x - x_o \\|" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "d_2 = \\| x' - x_o \\|" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": " are the sample-to-centroid distances. " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "C_2 = \\sqrt{\\sup_{x \\in S \\times \\mathcal{A}} \\| x \\|}" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": " is related to the upper bound of the input scale. " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "\\epsilon_\\mu" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": " is the lower bound of Bellman error (square) for " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": " under distribution " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "inline_equation", + "content": "\\epsilon_\\mu \\leq \\| Q - T^\\pi Q \\|_{2,\\mu}^2" + }, + { + "bbox": [ + 104, + 486, + 505, + 536 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": "The RHS of Eq. (10) contains four parts: " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\frac{1}{\\epsilon_{\\mu}}" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_1" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_2" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_3" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ". It is reasonable to assume " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mu} > 0" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ", because of the approximation error of " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " networks and the distribution mismatch between " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_1" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " is only dependent on the " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " value " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "Q(s_o, a_o)" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " at the centroid of the dataset and the max reward " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_2" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " is related to distance " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "d_1" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " and distribution " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_3" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " is related to " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "d_2" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathbb{P}^{\\pi}" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": ". To be mentioned, the distance regularization in DOGE compels the learned policy to output the action that is near the state-conditioned centroid of dataset, thus " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_2" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_3" + }, + { + "bbox": [ + 104, + 542, + 506, + 634 + ], + "type": "text", + "content": " can be driven to small values. Therefore, the RHS of Eq. (10) can be bounded by finite constants under DOGE, which shows that the constrained policy set induced by DOGE is essentially a Bellman-consistent constrained policy set." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "content": "Then, the performance gap between the policy learned by DOGE and the optimal policy can be bounded as given in Theorem 3. See Appendix D.1 and D.2 for the proof of Theorem 2 and 3." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "content": "Theorem 3. (Performance bound of the learned policy by DOGE). Let " + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "inline_equation", + "content": "Q^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "content": " be the fixed point of " + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "inline_equation", + "content": "Q^{\\Pi_{\\mathcal{B}}} = \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "inline_equation", + "content": "\\epsilon_k = Q^k - \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{k-1}" + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "content": " is the Bellman error at the " + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "content": "-th iteration. " + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "inline_equation", + "content": "\\| f \\|_{\\mu} := \\mathbb{E}_{\\mu}[\\| f \\|]" + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "content": ". The performance of the learned policy " + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "inline_equation", + "content": "\\pi_n" + }, + { + "bbox": [ + 104, + 662, + 506, + 697 + ], + "type": "text", + "content": " is bounded by:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 162, + 700, + 505, + 727 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 700, + 505, + 727 + ], + "spans": [ + { + "bbox": [ + 162, + 700, + 505, + 727 + ], + "type": "interline_equation", + "content": "\\lim _ {n \\rightarrow \\infty} \\| Q ^ {*} - Q ^ {\\pi_ {n}} \\| _ {\\rho_ {0}} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\left[ L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} + \\frac {1 - \\gamma}{2 \\gamma} \\alpha \\left(\\Pi_ {\\mathcal {B}}\\right)\\right] \\tag {11}", + "image_path": "edc76fa287948bc3c648e1c9ae87f85df2e560c28617b1f24f903f97b8687474.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "inline_equation", + "content": "L(\\Pi_{\\mathcal{B}}) = \\sqrt{(1 - \\gamma)^2 \\sum_{k=1}^{\\infty} k \\gamma^{k-1} l(k)}" + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": ", which is similar to the concentrability coefficient in BEAR (Kumar et al., 2019) but in a different form. Note that " + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "inline_equation", + "content": "l(k)" + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": " is related to the RHS of Eq. (10) and can be driven to a small value by DOGE according to Theorem 2. " + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\alpha(\\Pi_{\\mathcal{B}}) = \\| \\mathcal{T}^{\\Pi_{\\mathcal{B}}} Q^{\\Pi_{\\mathcal{B}}} - \\mathcal{T} Q^{*} \\|_{\\infty}" + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": " is the suboptimality constant, which is similar to " + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\alpha(\\Pi) = \\| \\mathcal{T}^{\\Pi} Q^{\\Pi} - \\mathcal{T} Q^{*} \\|_{\\infty}" + }, + { + "bbox": [ + 104, + 81, + 504, + 128 + ], + "type": "text", + "content": " in BEAR." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": "Compared with BEAR, DOGE allows a policy shift to some generalizable OOD areas and relaxes the strong full-coverage assumption. In addition, we have " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "L(\\Pi_{\\mathcal{B}}) \\leq L(\\Pi) \\propto \\frac{\\rho_0 P^{\\pi_1} \\dots P^{\\pi_k}}{\\mu(s, a)}" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "L(\\Pi)" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": " is the concentrability coefficient in BEAR. This is evident when " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "\\mu(s, a) = 0" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "\\rho_0 P^{\\pi_1} \\dots P^{\\pi_k}(s, a) > 0" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "L(\\Pi_{\\mathcal{B}})" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": " can be bounded by finite constants but " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "L(\\Pi) \\to \\infty" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": ". Moreover, as " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": " extends the policy set to cover more generalizable OOD areas (" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "\\Pi \\subseteq \\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": ") and produces a larger feasible region for optimization, lower degree of suboptimality can be achieved (i.e., " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "\\alpha(\\Pi_{\\mathcal{B}}) \\leq \\alpha(\\Pi)" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": ") compared to only performing optimization on " + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "inline_equation", + "content": "\\Pi" + }, + { + "bbox": [ + 104, + 132, + 506, + 226 + ], + "type": "text", + "content": ". Therefore, we can see that DOGE enjoys a tighter performance bound than previous more conservative methods when allowed to exploit generalizable OOD areas." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 248, + 201, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 248, + 201, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 201, + 261 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 277, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 335 + ], + "type": "text", + "content": "For evaluation, We compare DOGE and prior offline RL methods over D4RL Mujoco and AntMaze tasks (Fu et al., 2020). Mujoco is a standard benchmark commonly used in previous work. AntMaze tasks are far more challenging due to the non-markovian and mixed-quality offline dataset, the stochastic property of environments, and the high dimensional state-action space. Implementation details, experimental setup and additional experimental results can be found in Appendix E and F." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 354, + 246, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 246, + 366 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 246, + 366 + ], + "type": "text", + "content": "4.1 COMPARISON WITH SOTA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 378, + 506, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 506, + 456 + ], + "type": "text", + "content": "We compare DOGE with model-free SOTA methods, such as TD3+BC (Fujimoto & Gu, 2021), CQL (Kumar et al., 2020b) and IQL (Kostrikov et al., 2021b). For fairness, we use the “-v2” datasets for all methods. For most Mujoco tasks, we report the scores from the IQL paper. We obtain the other results using the authors' or our implementations. For AntMaze tasks, we obtain the results of CQL, TD3+BC, and IQL using the authors' implementations. For BC (Pomerleau, 1988), BCQ (Fujimoto et al., 2019) and BEAR (Kumar et al., 2019), we report the scores from (Fu et al., 2020). All methods are evaluated over the final 10 evaluations for Mujoco tasks and 100 for AntMaze tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 461, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 461, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 506, + 540 + ], + "type": "text", + "content": "Table 1 shows that DOGE achieves comparable or better performance than SOTA methods on most Mujoco and AntMaze tasks. Compared to other policy constraint approaches such as BCQ, BEAR and TD3+BC, DOGE is the first policy constraint method to successfully solve AntMaze-medium and AntMaze-large tasks. Note that IQL is an algorithm designed for multi-step dynamics programming and attains strong advantage on AntMaze tasks. Nevertheless, DOGE can compete with or even surpass IQL on most AntMaze tasks, by only employing a generalization-oriented policy constraint. These results illustrate the benefits of allowing policy learning on generalizable OOD areas." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 559, + 282, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 559, + 282, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 282, + 571 + ], + "type": "text", + "content": "4.2 EVALUATION ON GENERALIZATION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 582, + 504, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 639 + ], + "type": "text", + "content": "To evaluate the generalization ability of DOGE, we remove small areas of data from the critical pathways to the destination in AntMaze medium and large tasks, to construct an OOD dataset. The two removed areas reside in close proximity to the trajectory data (see Figure 1). We evaluate representative methods (such as TD3+BC, CQL, IQL) and DOGE on these modified datasets. Figure 4 shows the comparison before and after data removal." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "For such a dataset with partial state-action space coverage, existing policy constraint methods tend to over-constrain the policy to stay inside the support of a dataset, where the optimal policy is not well-covered. Value regularization methods suffer from deteriorated generalization performance, as the value function is distorted to assign low value at all OOD areas. In-sample learning methods are only guaranteed to retain the best policy within the partially covered dataset (Kostrikov et al., 2021b). As shown in Figure 4, all these methods struggle to generalize well on the missing areas and suffer severe performance drop, while DOGE maintains competitive performance. This further demonstrates the benefits of relaxing over-conservatism in existing methods." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 100, + 504, + 340 + ], + "blocks": [ + { + "bbox": [ + 134, + 80, + 476, + 92 + ], + "lines": [ + { + "bbox": [ + 134, + 80, + 476, + 92 + ], + "spans": [ + { + "bbox": [ + 134, + 80, + 476, + 92 + ], + "type": "text", + "content": "Table 1: Average normalized scores and standard deviations over 5 seeds on benchmark tasks" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 100, + 504, + 340 + ], + "lines": [ + { + "bbox": [ + 106, + 100, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 504, + 340 + ], + "type": "table", + "html": "
DatasetBCBCQBEARTD3+BCCQLIQLDOGE(ours)
hopper-r4.97.114.28.5±0.68.3±0.27.9±0.421.1±12.6
halfcheetah-r0.28.815.111.0±1.120.0±0.411.2±2.917.8±1.2
walker2d-r1.76.510.71.6±1.78.3±0.15.9±0.50.9 ± 2.4
hopper-m52.956.751.959.3±4.258.5±2.166.2±5.798.6±2.1
halfcheetah-m42.647.041.048.3±0.344.0±5.447.4±0.245.3±0.6
walker2d-m75.372.680.983.7±2.172.5±0.878.3±8.786.8±0.8
hopper-m-r18.153.337.360.9±18.895.0±6.494.7±8.676.2±17.7
halfcheetah-m-r36.640.429.744.6±0.545.5±0.544.2±1.242.8±0.6
walker2d-m-r26.052.118.581.8±5.577.2±5.573.8±7.187.3±2.3
hopper-m-e52.581.817.798.0±9.4105.4±6.891.5±14.3102.7±5.2
halfcheetah-m-e55.289.138.990.7±4.391.6±2.886.7±5.378.7±8.4
walker2d-m-e107.5109.595.4110.1±0.5108.8±0.7109.6±1.0110.4±1.5
locomation total473.5624.9451.3698.5±49.0726.1±31.7717.4±55.9768.6±55.4
antmaze-u65.078.973.091.3±5.784.8±2.388.2±1.997.0±1.8
antmaze-u-d55.055.061.054.6±16.243.3±5.466.7±4.063.5±9.3
antmaze-m-p0.00.00.00.065.2±4.870.4±5.380.6±6.5
antmaze-m-d0.00.08.00.054.0±11.774.6±3.277.6±6.1
antmaze-l-p0.06.70.00.018.8±15.343.5±4.548.2±8.1
antmaze-l-d0.02.20.00.031.6±9.545.6±7.636.4±9.1
antmaze-total120.0142.8142.0145.9±21.9297.7±49.0389.0±26.5403.3±40.9
", + "image_path": "520ff2c2c765f7699a757e6b09d8f9ef32507256dbb6fda7389d9a6250987556.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 107, + 367, + 203, + 438 + ], + "blocks": [ + { + "bbox": [ + 132, + 352, + 192, + 360 + ], + "lines": [ + { + "bbox": [ + 132, + 352, + 192, + 360 + ], + "spans": [ + { + "bbox": [ + 132, + 352, + 192, + 360 + ], + "type": "text", + "content": "Policy constraint" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 367, + 203, + 438 + ], + "lines": [ + { + "bbox": [ + 107, + 367, + 203, + 438 + ], + "spans": [ + { + "bbox": [ + 107, + 367, + 203, + 438 + ], + "type": "image", + "image_path": "49c9e6018d1a5f759c914c9175843a44c4e4f03b845c72c3324eb83ee60e69ca.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 206, + 367, + 302, + 438 + ], + "blocks": [ + { + "bbox": [ + 223, + 352, + 294, + 361 + ], + "lines": [ + { + "bbox": [ + 223, + 352, + 294, + 361 + ], + "spans": [ + { + "bbox": [ + 223, + 352, + 294, + 361 + ], + "type": "text", + "content": "Value regularization" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 206, + 367, + 302, + 438 + ], + "lines": [ + { + "bbox": [ + 206, + 367, + 302, + 438 + ], + "spans": [ + { + "bbox": [ + 206, + 367, + 302, + 438 + ], + "type": "image", + "image_path": "cf18c8e2896a643c58acf911f20a50282df75df8c16a4795cee76caed2e46f83.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 367, + 402, + 438 + ], + "blocks": [ + { + "bbox": [ + 326, + 352, + 393, + 361 + ], + "lines": [ + { + "bbox": [ + 326, + 352, + 393, + 361 + ], + "spans": [ + { + "bbox": [ + 326, + 352, + 393, + 361 + ], + "type": "text", + "content": "In-sample learning" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 367, + 402, + 438 + ], + "lines": [ + { + "bbox": [ + 306, + 367, + 402, + 438 + ], + "spans": [ + { + "bbox": [ + 306, + 367, + 402, + 438 + ], + "type": "image", + "image_path": "a7f61701ad8687a1c5d5494369d8525816007f4cb4ab04ea008879415c832474.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 405, + 367, + 501, + 438 + ], + "blocks": [ + { + "bbox": [ + 435, + 352, + 481, + 361 + ], + "lines": [ + { + "bbox": [ + 435, + 352, + 481, + 361 + ], + "spans": [ + { + "bbox": [ + 435, + 352, + 481, + 361 + ], + "type": "text", + "content": "DOGE (Ours)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 405, + 367, + 501, + 438 + ], + "lines": [ + { + "bbox": [ + 405, + 367, + 501, + 438 + ], + "spans": [ + { + "bbox": [ + 405, + 367, + 501, + 438 + ], + "type": "image", + "image_path": "82b5a4cba0f0bea289e48ace1d5263f2acd56e84dea74a0d32acaea0de1a03d5.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 106, + 444, + 204, + 516 + ], + "blocks": [ + { + "bbox": [ + 106, + 444, + 204, + 516 + ], + "lines": [ + { + "bbox": [ + 106, + 444, + 204, + 516 + ], + "spans": [ + { + "bbox": [ + 106, + 444, + 204, + 516 + ], + "type": "image", + "image_path": "3d9ade4e8a1eb09cdc98ed3dc4bac11622e37474f17806f5665c4b25852c71c6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 527, + 504, + 549 + ], + "lines": [ + { + "bbox": [ + 104, + 527, + 504, + 549 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 504, + 549 + ], + "type": "text", + "content": "Figure 4: Generalization performance after removing data from AntMaze large tasks (see Appendix F.1 for detailed setup and additional results on AntMaze medium tasks)." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 207, + 444, + 304, + 515 + ], + "blocks": [ + { + "bbox": [ + 207, + 444, + 304, + 515 + ], + "lines": [ + { + "bbox": [ + 207, + 444, + 304, + 515 + ], + "spans": [ + { + "bbox": [ + 207, + 444, + 304, + 515 + ], + "type": "image", + "image_path": "b9f5db12feacd952c5985cd236e6654c0b59a04770027710c915dd154ba2de40.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 306, + 444, + 403, + 515 + ], + "blocks": [ + { + "bbox": [ + 306, + 444, + 403, + 515 + ], + "lines": [ + { + "bbox": [ + 306, + 444, + 403, + 515 + ], + "spans": [ + { + "bbox": [ + 306, + 444, + 403, + 515 + ], + "type": "image", + "image_path": "a9f1f2a7c49a7015c329cdedeee59aa83178cc2bed9a4abdf1d7cdb998f8579b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 406, + 444, + 503, + 516 + ], + "blocks": [ + { + "bbox": [ + 406, + 444, + 503, + 516 + ], + "lines": [ + { + "bbox": [ + 406, + 444, + 503, + 516 + ], + "spans": [ + { + "bbox": [ + 406, + 444, + 503, + 516 + ], + "type": "image", + "image_path": "9ba79b5109bb6fa47399a02cfb0e1394cb3bef6a3a2547f25f0c3640dbc984af.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 594, + 209, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 209, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 209, + 605 + ], + "type": "text", + "content": "4.3 ABLATION STUDY" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": "We conduct ablation studies to evaluate the impact of the hyperparameter " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": ", the non-parametric distance threshold " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": " in Eq. (6), and the number of noise actions " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": " used to train the distance function. For " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": ", we add or subtract 2.5 to the original value; for " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": ", we choose " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": " upper quantile of the distance values in mini-batch samples; for " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": ", we choose " + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "inline_equation", + "content": "N = 10, 20, 30" + }, + { + "bbox": [ + 104, + 626, + 506, + 672 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": "Compared to " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": ", we find that " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": " has a more significant impact on the performance. Figure 5b shows that an overly restrictive " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": " (30% quantile) results in a policy set too small to cover near-optimal policies. A more tolerant " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": ", on the other hand, is unlikely to cause excessive error accumulation and achieves relatively good performance. In addition, Figure 5a and Figure 5c show that performance is stable across variations of hyperparameters, indicating that our method is hyperparameter-robust." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 90, + 233, + 183 + ], + "blocks": [ + { + "bbox": [ + 107, + 90, + 233, + 183 + ], + "lines": [ + { + "bbox": [ + 107, + 90, + 233, + 183 + ], + "spans": [ + { + "bbox": [ + 107, + 90, + 233, + 183 + ], + "type": "image", + "image_path": "f0c22aaf0812d5bdcd791e3ed965d26bb7f0cf71880de8ff0996763d37b8eb7c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 115, + 188, + 228, + 198 + ], + "lines": [ + { + "bbox": [ + 115, + 188, + 228, + 198 + ], + "spans": [ + { + "bbox": [ + 115, + 188, + 228, + 198 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 115, + 188, + 228, + 198 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 115, + 188, + 228, + 198 + ], + "type": "text", + "content": " has little effect on results" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 239, + 90, + 367, + 184 + ], + "blocks": [ + { + "bbox": [ + 239, + 90, + 367, + 184 + ], + "lines": [ + { + "bbox": [ + 239, + 90, + 367, + 184 + ], + "spans": [ + { + "bbox": [ + 239, + 90, + 367, + 184 + ], + "type": "image", + "image_path": "f237af1b604b5663d26e2d1de7fd8ea0af234e46d8774e911ae0bf4a5af03bdf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 208, + 504, + 229 + ], + "lines": [ + { + "bbox": [ + 104, + 208, + 504, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 208, + 504, + 229 + ], + "type": "text", + "content": "Figure 5: Ablation results. The default parameters in our implementation are marked by " + }, + { + "bbox": [ + 104, + 208, + 504, + 229 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 104, + 208, + 504, + 229 + ], + "type": "text", + "content": ". The error bars indicate min and max over 5 seeds. See Appendix G for more detailed ablation studies." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 374, + 90, + 500, + 184 + ], + "blocks": [ + { + "bbox": [ + 246, + 188, + 364, + 198 + ], + "lines": [ + { + "bbox": [ + 246, + 188, + 364, + 198 + ], + "spans": [ + { + "bbox": [ + 246, + 188, + 364, + 198 + ], + "type": "text", + "content": "(b) Small " + }, + { + "bbox": [ + 246, + 188, + 364, + 198 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 246, + 188, + 364, + 198 + ], + "type": "text", + "content": " is harmful to results" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 374, + 90, + 500, + 184 + ], + "lines": [ + { + "bbox": [ + 374, + 90, + 500, + 184 + ], + "spans": [ + { + "bbox": [ + 374, + 90, + 500, + 184 + ], + "type": "image", + "image_path": "bbd5795ca3015234995854861d3116a3c8d81b6eb10efbd40d471f10634e7031.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 381, + 188, + 495, + 198 + ], + "lines": [ + { + "bbox": [ + 381, + 188, + 495, + 198 + ], + "spans": [ + { + "bbox": [ + 381, + 188, + 495, + 198 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 381, + 188, + 495, + 198 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 381, + 188, + 495, + 198 + ], + "type": "text", + "content": " has little effect on results" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 239, + 211, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 211, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 211, + 251 + ], + "type": "text", + "content": "5 RELATED WORK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 270, + 506, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 426 + ], + "type": "text", + "content": "To prevent distributional shift and exploitation error accumulation when inferring the value function at unseen samples, a direct approach is to restrict policy learning from deviating to OOD areas. To make sure the leaned policy stays inside the distribution or support of training data, These policy constraint methods either carefully parameterize the learned policy (Fujimoto et al., 2019; Matsushima et al., 2020), or use explicit divergence penalties (Kumar et al., 2019; Wu et al., 2019; Fujimoto & Gu, 2021; Xu et al., 2021; Dadashi et al., 2021) or implicit divergence constraints (Peng et al., 2019; Nair et al., 2020; Xu et al., 2022a). The theories behind these methods typically assume full state-action space coverage of the offline datasets(Le et al., 2019; Kumar et al., 2019). However, policy constraint under full-coverage assumption is unrealistic in most real-world settings, especially on datasets with partial coverage and only sub-optimal behavior policies. Some recent works try to relax the full-coverage assumption to partial coverage by introducing different distribution divergence metrics, but only in theoretical analysis (Liu et al., 2020; Zanette et al., 2021; Xie et al., 2021b; Uehara & Sun, 2021; Xie et al., 2021a). Our method is an enhanced policy constraint method, where we relax the full-coverage assumption and allow the policy to learn on OOD areas where networks can generalize well." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 430, + 506, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 552 + ], + "type": "text", + "content": "Another type of offline RL method, value regularization (Kumar et al., 2020b; Kostrikov et al., 2021a; Yu et al., 2021; Xu et al., 2022b; 2023), directly penalizes the value function to produce low values at OOD actions. In-sample learning methods (Brandfonbrener et al., 2021; Kostrikov et al., 2021b), on the other hand, only learn the value function within data or treat it as the value function of the behavior policy. Compared with our approach, these methods exercise too much conservatism, which limits the generalization performance of deep neural networks on OOD regions, largely weakening the ability of dynamic programming. There are also uncertainty-based and model-based methods that regularize the value function or policy with epistemic uncertainty estimated from model or value function (Janner et al., 2019; Yu et al., 2020; Uehara & Sun, 2021; Wu et al., 2021; Zhan et al., 2022; Bai et al., 2021). However, the estimation of the epistemic uncertainty of DNN is still an under-explored area, with results highly dependent on evaluation methods and the structure of DNN." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 579, + 196, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 579, + 196, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 196, + 590 + ], + "type": "text", + "content": "6 CONCLUSION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": "In this study, we provide new insights on the relationship between approximation error of deep " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " functions and geometry of offline datasets. Through empirical and theoretical analysis, we find that deep " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " functions attain relatively low approximation error when interpolating rather than extrapolating the dataset. This phenomenon motivates us to design a new algorithm, DOGE, to empower policy learning on OOD samples within the convex hull of training data. DOGE is simple yet elegant, by plugging a dataset geometry-derived distance constraint into TD3. With such a minimal surgery, DOGE outperforms existing model-free offline RL methods on most D4RL tasks. We theoretically prove that DOGE enjoys a tighter performance bound compared with existing policy constraint methods under the more realistic partial-coverage assumption. Empirical results and theoretical analysis suggest the necessity of re-thinking the conservatism principle in offline RL algorithm design, and points to sufficient exploitation of the generalization ability of deep " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " functions." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 101, + 506, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 101, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 104, + 101, + 506, + 158 + ], + "type": "text", + "content": "This work is supported by National Key Research and Development Program of China under Grant (2022YFB2502904). This work is also supported by Baidu Inc. through Apollo-AIR Joint Research Center. The authors would also like to thank the anonymous reviewers for their feedback on the manuscripts. Jianxiong Li would like to thank Zhixu Du, Yimu Wang, Li Jiang, Haoyi Niu, Hao Zhao and all colleagues in AIR-Dream group for valuable discussions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 173, + 175, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 173, + 175, + 185 + ], + "spans": [ + { + "bbox": [ + 106, + 173, + 175, + 185 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 191, + 507, + 733 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 191, + 507, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 191, + 507, + 226 + ], + "spans": [ + { + "bbox": [ + 105, + 191, + 507, + 226 + ], + "type": "text", + "content": "Zeyuan Allen-Zhu, Yuanzhi Li, and Yingyu Liang. Learning and generalization in overparameterized neural networks, going beyond two layers. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 232, + 505, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 505, + 267 + ], + "type": "text", + "content": "Gaon An, Seungyong Moon, Jang-Hyun Kim, and Hyun Oh Song. Uncertainty-based offline reinforcement learning with diversified q-ensemble. Advances in neural information processing systems, 34:7436-7447, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 273, + 506, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 273, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 506, + 308 + ], + "type": "text", + "content": "Anonymous. Lightweight uncertainty for offline reinforcement learning via bayesian posterior. In Submitted to The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=55Eet8WGJTv. under review." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 313, + 506, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 313, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 506, + 349 + ], + "type": "text", + "content": "Sanjeev Arora, Simon Du, Wei Hu, Zhiyuan Li, and Ruosong Wang. Fine-grained analysis of optimization and generalization for overparameterized two-layer neural networks. In International Conference on Machine Learning, pp. 322-332. PMLR, 2019a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 354, + 505, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 505, + 389 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 505, + 389 + ], + "type": "text", + "content": "Sanjeev Arora, Simon S Du, Wei Hu, Zhiyuan Li, Russ R Salakhutdinov, and Ruosong Wang. On exact computation with an infinitely wide neural net. Advances in Neural Information Processing Systems, 32, 2019b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 395, + 506, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 395, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 395, + 506, + 430 + ], + "type": "text", + "content": "Chenjia Bai, Lingxiao Wang, Zhuoran Yang, Zhi-Hong Deng, Animesh Garg, Peng Liu, and Zhao ran Wang. Pessimistic bootstrapping for uncertainty-driven offline reinforcement learning. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 436, + 506, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 506, + 460 + ], + "type": "text", + "content": "Etienne Barnard and LFA Wessels. Extrapolation and interpolation in neural network classifiers. IEEE Control Systems Magazine, 12(5):50-53, 1992." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 466, + 504, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 466, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 504, + 491 + ], + "type": "text", + "content": "Peter L Bartlett and Shahar Mendelson. Rademacher and gaussian complexities: Risk bounds and structural results. Journal of Machine Learning Research, 3(Nov):463-482, 2002." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 495, + 506, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 506, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 506, + 520 + ], + "type": "text", + "content": "Alberto Bietti and Julien Mairal. On the inductive bias of neural tangent kernels. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 526, + 504, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 504, + 550 + ], + "type": "text", + "content": "David Brandfonbrener, Will Whitney, Rajesh Ranganath, and Joan Bruna. Offline rl without off-policy evaluation. Advances in Neural Information Processing Systems, 34:4933-4946, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 555, + 504, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 504, + 581 + ], + "type": "text", + "content": "Qi Cai, Zhuoran Yang, Jason D Lee, and Zhaoran Wang. Neural temporal-difference learning converges to global optima. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 586, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 504, + 620 + ], + "type": "text", + "content": "Robert Dadashi, Shideh RezaEIFar, Nino Vieillard, LEOnard Hussenot, Olivier Pietquin, and Matthieu Geist. Offline reinforcement learning with pseudometric learning. In International Conference on Machine Learning, pp. 2307-2318. PMLR, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 627, + 506, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 506, + 662 + ], + "type": "text", + "content": "Jonas Degrave, Federico Felici, Jonas Buchli, Michael Neunert, Brendan Tracey, Francesco Carpanese, Timo Ewalds, Roland Hafner, Abbas Abdelmaleki, Diego de Las Casas, et al. Magnetic control of tokamak plasmas through deep reinforcement learning. Nature, 602(7897):414-419, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 668, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 504, + 692 + ], + "type": "text", + "content": "Jianqing Fan, Zhaoran Wang, Yuchen Xie, and Zhuoran Yang. A theoretical analysis of deep q-learning. In Learning for Dynamics and Control, pp. 486-489. PMLR, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 698, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 733 + ], + "type": "text", + "content": "Pete Florence, Corey Lynch, Andy Zeng, Oscar A Ramirez, Ayzaan Wahid, Laura Downs, Adrian Wong, Johnny Lee, Igor Mordatch, and Jonathan Thompson. Implicit behavioral cloning. In Conference on Robot Learning, pp. 158-168. PMLR, 2022." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4rl: Datasets for deep data-driven reinforcement learning. arXiv preprint arXiv:2004.07219, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 506, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 506, + 136 + ], + "type": "text", + "content": "Scott Fujimoto and Shixiang Shane Gu. A minimalist approach to offline reinforcement learning. Advances in Neural Information Processing Systems, 34, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 141, + 506, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 141, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 107, + 141, + 506, + 166 + ], + "type": "text", + "content": "Scott Fujimoto, Herke Hoof, and David Meger. Addressing function approximation error in actor-critic methods. In International conference on machine learning, pp. 1587-1596. PMLR, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 171, + 504, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 171, + 504, + 196 + ], + "spans": [ + { + "bbox": [ + 107, + 171, + 504, + 196 + ], + "type": "text", + "content": "Scott Fujimoto, David Meger, and Doina Precup. Off-policy deep reinforcement learning without exploration. In International Conference on Machine Learning, pp. 2052-2062. PMLR, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 201, + 505, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 201, + 505, + 225 + ], + "spans": [ + { + "bbox": [ + 107, + 201, + 505, + 225 + ], + "type": "text", + "content": "Seyed Kamyar Seyed Ghasemipour, Shixiang Shane Gu, and Ofir Nachum. Why so pessimistic? estimating uncertainties for offline rl through ensembles, and why their independence matters." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 232, + 504, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 232, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 107, + 232, + 504, + 266 + ], + "type": "text", + "content": "Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In International conference on machine learning, pp. 1861-1870. PMLR, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 272, + 506, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 272, + 506, + 307 + ], + "spans": [ + { + "bbox": [ + 107, + 272, + 506, + 307 + ], + "type": "text", + "content": "Pamela J Haley and DONALD Soloway. Extrapolation limitations of multilayer feedforward neural networks. In [Proceedings 1992] IJCNN International Joint Conference on Neural Networks, volume 4, pp. 25-30. IEEE, 1992." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 313, + 506, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 313, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 107, + 313, + 506, + 348 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 354, + 505, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 354, + 505, + 378 + ], + "spans": [ + { + "bbox": [ + 107, + 354, + 505, + 378 + ], + "type": "text", + "content": "Arthur Jacot, Franck Gabriel, and Clément Hongler. Neural tangent kernel: Convergence and generalization in neural networks. Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 384, + 504, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 384, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 107, + 384, + 504, + 408 + ], + "type": "text", + "content": "Michael Janner, Justin Fu, Marvin Zhang, and Sergey Levine. When to trust your model: Model-based policy optimization. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 415, + 506, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 415, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 107, + 415, + 506, + 449 + ], + "type": "text", + "content": "Ilya Kostrikov, Rob Fergus, Jonathan Tompson, and Ofir Nachum. Offline reinforcement learning with fisher divergence critic regularization. In International Conference on Machine Learning, pp. 5774-5783. PMLR, 2021a." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 455, + 504, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 455, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 107, + 455, + 504, + 479 + ], + "type": "text", + "content": "Ilya Kostrikov, Ashvin Nair, and Sergey Levine. Offline reinforcement learning with implicit q-learning. In International Conference on Learning Representations, 2021b." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 485, + 506, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 485, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 107, + 485, + 506, + 518 + ], + "type": "text", + "content": "Aviral Kumar, Justin Fu, Matthew Soh, George Tucker, and Sergey Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 526, + 504, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 526, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 107, + 526, + 504, + 560 + ], + "type": "text", + "content": "Aviral Kumar, Rishabh Agarwal, Dibya Ghosh, and Sergey Levine. Implicit under-parameterization inhibits data-efficient deep reinforcement learning. In International Conference on Learning Representations, 2020a." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 567, + 506, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 567, + 506, + 600 + ], + "spans": [ + { + "bbox": [ + 107, + 567, + 506, + 600 + ], + "type": "text", + "content": "Aviral Kumar, Aurick Zhou, George Tucker, and Sergey Levine. Conservative q-learning for offline reinforcement learning. Advances in Neural Information Processing Systems, 33:1179-1191, 2020b." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 608, + 506, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 608, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 107, + 608, + 506, + 632 + ], + "type": "text", + "content": "Hoang Le, Cameron Voloshin, and Yisong Yue. Batch policy learning under constraints. In International Conference on Machine Learning, pp. 3703-3712. PMLR, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 638, + 506, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 638, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 107, + 638, + 506, + 662 + ], + "type": "text", + "content": "Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 668, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 668, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 107, + 668, + 504, + 692 + ], + "type": "text", + "content": "Boyi Liu, Qi Cai, Zhuoran Yang, and Zhaoran Wang. Neural trust region/proximal policy optimization attains globally optimal policy. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 504, + 732 + ], + "type": "text", + "content": "Yao Liu, Adith Swaminathan, Alekh Agarwal, and Emma Brunskill. Provably good batch off-policy reinforcement learning without great exploration. Advances in Neural Information Processing Systems, 33:1264-1274, 2020." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "text", + "content": "Tatsuya Matsushima, Hiroki Furuta, Yutaka Matsuo, Ofir Nachum, and Shixiang Gu. Deployment-efficient reinforcement learning via model-based offline optimization. In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 157 + ], + "type": "text", + "content": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al. Human-level control through deep reinforcement learning. nature, 518(7540):529-533, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 163, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 163, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 107, + 163, + 504, + 186 + ], + "type": "text", + "content": "Ashvin Nair, Murtaza Dalal, Abhishek Gupta, and Sergey Levine. Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 193, + 506, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 193, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 193, + 506, + 217 + ], + "type": "text", + "content": "Charles Packer, Katelyn Gao, Jernej Kos, Philipp Krahenbuhl, Vladlen Koltun, and Dawn Song. Assessing generalization in deep reinforcement learning. arXiv preprint arXiv:1810.12282, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 223, + 506, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 223, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 107, + 223, + 506, + 246 + ], + "type": "text", + "content": "Xue Bin Peng, Aviral Kumar, Grace Zhang, and Sergey Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 253, + 504, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 253, + 504, + 276 + ], + "spans": [ + { + "bbox": [ + 107, + 253, + 504, + 276 + ], + "type": "text", + "content": "Dean A Pomerleau. Alvinn: An autonomous land vehicle in a neural network. Advances in neural information processing systems, 1, 1988." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 282, + 506, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 282, + 506, + 317 + ], + "spans": [ + { + "bbox": [ + 107, + 282, + 506, + 317 + ], + "type": "text", + "content": "David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al. Mastering the game of go without human knowledge. nature, 550(7676):354-359, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 323, + 504, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 323, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 107, + 323, + 504, + 346 + ], + "type": "text", + "content": "Masatoshi Uehara and Wen Sun. Pessimistic model-based offline reinforcement learning under partial coverage. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 353, + 506, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 353, + 506, + 386 + ], + "spans": [ + { + "bbox": [ + 107, + 353, + 506, + 386 + ], + "type": "text", + "content": "Hado Van Hasselt, Yotam Doron, Florian Strub, Matteo Hessel, Nicolas Sonnerat, and Joseph Modayil. Deep reinforcement learning and the deadly triad. arXiv preprint arXiv:1812.02648, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 393, + 506, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 393, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 107, + 393, + 506, + 417 + ], + "type": "text", + "content": "Vladimir N Vapnik and A Ya Chervonenkis. On the uniform convergence of relative frequencies of events to their probabilities. In Measures of complexity, pp. 11-30. Springer, 2015." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 423, + 504, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 423, + 504, + 457 + ], + "spans": [ + { + "bbox": [ + 107, + 423, + 504, + 457 + ], + "type": "text", + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 464, + 506, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 464, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 107, + 464, + 506, + 487 + ], + "type": "text", + "content": "Yifan Wu, George Tucker, and Ofir Nachum. Behavior regularized offline reinforcement learning. arXiv preprint arXiv:1911.11361, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 494, + 506, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 494, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 107, + 494, + 506, + 529 + ], + "type": "text", + "content": "Yue Wu, Shuangfei Zhai, Nitish Srivastava, Joshua M Susskind, Jian Zhang, Ruslan Salakhutdinov, and Hanlin Goh. Uncertainty weighted actor-critic for offline reinforcement learning. In International Conference on Machine Learning, pp. 11319-11328. PMLR, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 535, + 504, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 535, + 504, + 570 + ], + "spans": [ + { + "bbox": [ + 107, + 535, + 504, + 570 + ], + "type": "text", + "content": "Chenjun Xiao, Bo Dai, Jincheng Mei, Oscar A Ramirez, Ramki Gummadi, Chris Harris, and Dale Schuurmans. Understanding and leveraging overparameterization in recursive value estimation. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 575, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 575, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 107, + 575, + 506, + 609 + ], + "type": "text", + "content": "Tengyang Xie, Ching-An Cheng, Nan Jiang, Paul Mineiro, and Alekh Agarwal. Bellman-consistent pessimism for offline reinforcement learning. Advances in neural information processing systems, 34, 2021a." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 616, + 506, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 616, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 107, + 616, + 506, + 651 + ], + "type": "text", + "content": "Tengyang Xie, Nan Jiang, Huan Wang, Caiming Xiong, and Yu Bai. Policy finetuning: Bridging sample-efficient offline and online reinforcement learning. Advances in neural information processing systems, 34, 2021b." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 657, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 657, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 107, + 657, + 504, + 681 + ], + "type": "text", + "content": "Haoran Xu, Xianyuan Zhan, Jianxiong Li, and Honglei Yin. Offline reinforcement learning with soft behavior regularization. arXiv preprint arXiv:2110.07395, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 687, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 687, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 687, + 506, + 732 + ], + "type": "text", + "content": "Haoran Xu, Li Jiang, Jianxiong Li, and Xianyuan Zhan. A policy-guided imitation approach for offline reinforcement learning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022a. URL https://openreview.net/forum?id=CKbqDtZnSc." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 431 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "type": "text", + "content": "Haoran Xu, Xianyuan Zhan, and Xiangyu Zhu. Constraints penalized q-learning for safe offline reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2022b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 506, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 506, + 157 + ], + "type": "text", + "content": "Haoran Xu, Li Jiang, Jianxiong Li, Zhuoran Yang, Zhaoran Wang, Victor Wai Kin Chan, and Xianyuan Zhan. Sparse q-learning: Offline reinforcement learning with implicit value regularization. In International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=ueYYgo2pSSU." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 198 + ], + "type": "text", + "content": "Keyulu Xu, Mozhi Zhang, Jingling Li, Simon Shaolei Du, Ken-Ichi Kawarabayashi, and Stefanie Jegelka. How neural networks extrapolate: From feedforward to graph neural networks. In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 505, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 505, + 238 + ], + "type": "text", + "content": "Pan Xu and Quanquan Gu. A finite-time analysis of q-learning with neural network function approximation. In International Conference on Machine Learning, pp. 10555-10565. PMLR, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 244, + 505, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 244, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 244, + 505, + 280 + ], + "type": "text", + "content": "Tianhe Yu, Garrett Thomas, Lantao Yu, Stefano Ermon, James Y Zou, Sergey Levine, Chelsea Finn, and Tengyu Ma. Mopo: Model-based offline policy optimization. Advances in Neural Information Processing Systems, 33:14129-14142, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 285, + 505, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 505, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 505, + 320 + ], + "type": "text", + "content": "Tianhe Yu, Aviral Kumar, Rafael Rafailov, Aravind Rajeswaran, Sergey Levine, and Chelsea Finn. Combo: Conservative offline model-based policy optimization. Advances in Neural Information Processing Systems, 34, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 327, + 505, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 505, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 505, + 351 + ], + "type": "text", + "content": "Andrea Zanette, Martin J Wainwright, and Emma Brunskill. Provable benefits of actor-critic methods for offline reinforcement learning. Advances in neural information processing systems, 34, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 357, + 505, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 505, + 392 + ], + "type": "text", + "content": "Xianyuan Zhan, Haoran Xu, Yue Zhang, Xiangyu Zhu, Honglei Yin, and Yu Zheng. Deepthermal: Combustion optimization for thermal power generating units using offline reinforcement learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 397, + 505, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 397, + 505, + 431 + ], + "spans": [ + { + "bbox": [ + 105, + 397, + 505, + 431 + ], + "type": "text", + "content": "Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning (still) requires rethinking generalization. Communications of the ACM, 64(3):107-115, 2021." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 320, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 320, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 320, + 94 + ], + "type": "text", + "content": "A SKETCH OF THEORETICAL ANALYSIS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 228 + ], + "type": "text", + "content": "In this section, we present in Figure 6 a sketch of the overall logical flow in our theoretical analyses and the proposed algorithm, DOGE. We start by analyzing the effects of data geometry on the generalization patterns of deep Q-functions. We find that a small sample-to-dataset distance leads to a tightened Q-function approximation error and thus interpolation enjoys better generalization properties than extrapolation (Theorem 1). Motivated by this, we propose DOGE, which tries to control the upper bound of the sample-to-centroid distance to be small (Property 1) and enforces a convex hull based policy constraint (Property 2). Then, we dive deeper and find that the upper bound of the Bellman-consistent coefficient is well controlled by sample-to-centroid distance and thus DOGE enjoys a bounded bellman-consistent coefficient (Theorem 2). Based on these findings, we can derive a tighter performance bound of DOGE as compared to support constraint methods like BEAR (Theorem 3)." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 238, + 504, + 467 + ], + "blocks": [ + { + "bbox": [ + 106, + 238, + 504, + 467 + ], + "lines": [ + { + "bbox": [ + 106, + 238, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 106, + 238, + 504, + 467 + ], + "type": "image", + "image_path": "0623ba8fec6b85b0996f8cc0b44efa8bea72ee4e56762670f536b443ccace4fc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 473, + 385, + 486 + ], + "lines": [ + { + "bbox": [ + 225, + 473, + 385, + 486 + ], + "spans": [ + { + "bbox": [ + 225, + 473, + 385, + 486 + ], + "type": "text", + "content": "Figure 6: Sketch of theoretical analysis" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 511, + 504, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 511, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 504, + 538 + ], + "type": "text", + "content": "B THEORETICAL ANALYSIS OF THE IMPACT OF DATA GEOMETRY ON DEEP " + }, + { + "bbox": [ + 105, + 511, + 504, + 538 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 105, + 511, + 504, + 538 + ], + "type": "text", + "content": " FUNCTIONS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 551, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 551, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 551, + 506, + 662 + ], + "type": "text", + "content": "To analyze the generalization of a function approximator, one can refer to some classical methods such as Rademacher complexity (Bartlett & Mendelson, 2002) and VC-dimension (Vapnik & Chervonenkis, 2015). However, the generalization bounds that obtained by these methods are usually trivial and cannot explain the generalization behavior in the overparameterized regime (Zhang et al., 2021). Recent breakthroughs in neural tangent kernel (NTK) shed light on the generalization of DNN. NTK builds the connection between the training dynamics of DNN and the solution of the kernel regression w.r.t. NTK, and is widely used in recent analysis of DNN generalization (Jacot et al., 2018; Arora et al., 2019b; Bietti & Mairal, 2019). What's more, NTK is also a popular analyzing tool in the convergence and optimality of deep RL (Cai et al., 2019; Fan et al., 2020; Kumar et al., 2020a; Xiao et al., 2021) and thus is used in our study." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 676, + 253, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 676, + 253, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 253, + 687 + ], + "type": "text", + "content": "B.1 NEURAL TANGENT KERNEL" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": "We denote a general neural network by " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "f(\\theta, x): \\mathbb{R}^d \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " is all the parameters in the network and " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " is the input. Given, a training dataset " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\{(x_i, y_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": ", the parameters " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " are optimized by minimizing the squared loss function, i.e., " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\theta) = \\frac{1}{2} \\sum_{i=1}^n (f_\\theta(x_i) - y_i)^2" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " by gradient" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "descent. The dynamics of the networks output can be formulated by Lemma 1 (Lemma 3.1. of (Arora et al., 2019b)); see (Arora et al., 2019b) for the proof of Lemma 1." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": "Lemma 1. Consider minimizing the squared loss " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\theta)" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": " by gradient descent with infinitesimally small learning rate, i.e., " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\frac{d\\theta(t)}{dt} = -\\nabla \\mathcal{L}(\\theta(t))" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{u}(t) = (f(\\theta(t), x_i))_{i \\in [n]} \\in \\mathbb{R}^n" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": " be the network outputs on all " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": "'s at time " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{Y} = (y_i)_{i \\in [n]}" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": " be the desired outputs. Then " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{u}(t)" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": " follows the following evolution, where " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{H}(t)" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": " is an " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "n \\times n" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": " positive semidefinite matrix whose " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": "-th entry is " + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\left\\langle \\frac{\\partial f(\\theta(t), x_i)}{\\partial \\theta}, \\frac{\\partial f(\\theta(t), x_j)}{\\partial \\theta} \\right\\rangle" + }, + { + "bbox": [ + 104, + 109, + 506, + 177 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 242, + 178, + 505, + 202 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 178, + 505, + 202 + ], + "spans": [ + { + "bbox": [ + 242, + 178, + 505, + 202 + ], + "type": "interline_equation", + "content": "\\frac {d \\mathbf {u} (t)}{d t} = - \\mathbf {H} (t) \\cdot (\\mathbf {u} (t) - \\mathbf {Y}). \\tag {12}", + "image_path": "1dd4994d2eafc373d32fd0b6c1771a10e234e1af8cb127f6428cc6e9644e8d27.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "text", + "content": "Plenty of works (Jacot et al., 2018; Arora et al., 2019b; Allen-Zhu et al., 2019; Xu et al., 2020) study the dynamics of the neural networks' training process and find that if the width of networks is sufficiently large, " + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "inline_equation", + "content": "\\mathbf{H}(t)" + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "text", + "content": " stays almost constant during training, i.e., " + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "inline_equation", + "content": "\\mathbf{H}(t) = \\mathbf{H}(0)" + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "text", + "content": ". What's more, if the neural networks' parameters are randomly initialized with certain scales and the networks width goes to infinity, " + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "inline_equation", + "content": "\\mathbf{H}(0)" + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "text", + "content": " converges to a fixed matrix " + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 104, + 213, + 506, + 280 + ], + "type": "text", + "content": ", called neural tangent kernel (NTK) (Jacot et al., 2018)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 205, + 296, + 505, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 296, + 505, + 323 + ], + "spans": [ + { + "bbox": [ + 205, + 296, + 505, + 323 + ], + "type": "interline_equation", + "content": "\\mathbf {K} (x, x ^ {\\prime}) = \\mathbb {E} _ {\\theta \\sim W} \\left\\langle \\frac {\\partial f (\\theta (t) , x)}{\\partial \\theta}, \\frac {\\partial f (\\theta (t) , x ^ {\\prime})}{\\partial \\theta} \\right\\rangle \\tag {13}", + "image_path": "048720593618566031df58acc27ee2ac3a8a062eb5421be5270b98231b180231.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "text", + "content": " is Gaussian distribution. The training dynamics in Lemma 1 is identical to the dynamics of kernel regression under gradient flow, because " + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "text", + "content": " stays constant during training when the width of neural networks goes to infinity. Then, the final prediction function " + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "inline_equation", + "content": "(t \\to \\infty" + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "text", + "content": ", assuming " + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{u}(0) = 0" + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "text", + "content": ") is equal to the kernel regression solution:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 208, + 389, + 505, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 389, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 208, + 389, + 505, + 403 + ], + "type": "interline_equation", + "content": "f _ {n t k} (x) = \\left(\\mathbf {K} \\left(x, x _ {1}\\right), \\dots , \\mathbf {K} \\left(x, x _ {n}\\right)\\right) \\cdot \\mathbf {K} _ {\\text {t r a i n}} ^ {- 1} \\mathbf {Y} \\tag {14}", + "image_path": "6aeb989ce36232df1b83d2108370594fb2e4b557c654295ed0c94a43a1494087.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{train}^{-1}" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "n\\times n" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": " NTK for the training data (the state-action pair " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "x = (s,a)" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": " in the policy evaluation in offline RL) and stays constant during training once the training data is fixed. " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": " is the training labels " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "(r(s,a) + \\gamma \\mathbb{E}_{a'\\sim \\pi (\\cdot |s')}[Q_{\\theta '}(s',a')]" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": " in offline RL). " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{K}(x,x_i)" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": " is the kernel value between test data " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": " and training data " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": ". We denote the feature map of " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{K}(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\Phi (\\cdot)" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{K}(x,x^{\\prime}) = \\langle \\Phi (x),\\Phi (x^{\\prime})\\rangle" + }, + { + "bbox": [ + 104, + 410, + 506, + 468 + ], + "type": "text", + "content": ". Then, Eq. (14) is equivalent to:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 186, + 484, + 505, + 499 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 484, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 186, + 484, + 505, + 499 + ], + "type": "interline_equation", + "content": "f _ {n t k} (x) = \\left(\\langle \\Phi (x), \\Phi (x _ {1}) \\rangle , \\dots , \\langle \\Phi (x), \\Phi (x _ {n}) \\rangle\\right) \\cdot \\mathbf {K} _ {\\text {t r a i n}} ^ {- 1} \\mathbf {Y} \\tag {15}", + "image_path": "505a655120e52e775776124610408ac7d80b1ae7724fec129c898c24faa51f1f.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 513, + 360, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 513, + 360, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 360, + 525 + ], + "type": "text", + "content": "B.2 IMPACT OF DATA GEOMETRY ON DEEP " + }, + { + "bbox": [ + 105, + 513, + 360, + 525 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 105, + 513, + 360, + 525 + ], + "type": "text", + "content": " FUNCTIONS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "text", + "content": "In this section, we analyze the impact of data geometry on deep " + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "text", + "content": " functions under the NTK regime. We first introduce the smoothness property of the feature map " + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "inline_equation", + "content": "\\Phi(x)" + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "text", + "content": " induced by NTK (Lemma 2). Then, we introduce the equivalence between the kernel regression solution in Eq. (15) and a min-norm solution (Lemma 3). Builds on Lemma 2 and Lemma 3, Lemma 4 analyzes the smoothness of the deep " + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "text", + "content": " functions. At last, we study how data geometry affects deep " + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 534, + 506, + 591 + ], + "type": "text", + "content": " functions (Theorem 1)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 594, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 506, + 628 + ], + "type": "text", + "content": "Assumption 1. (NTK assumption). We assume the function approximators discussed in our paper are two-layer fully-connected ReLU neural networks with infinity width and are trained with infinitesimally small learning rate unless otherwise specified." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 638, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 506, + 704 + ], + "type": "text", + "content": "Although there exist some gaps between the NTK assumption and the real setting, NTK is one of the most advanced theoretical machinery from the generalization analysis of DNN. In addition, Assumption 1 is common in previous analysis on the generalization of DNN (Jacot et al., 2018; Arora et al., 2019a; Bietti & Mairal, 2019) and the convergence of DRL (Cai et al., 2019; Liu et al., 2019; Xu & Gu, 2020; Fan et al., 2020). For more accurate analysis, we should adopt more advanced analysis tools than NTK and hence leave it for future work." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "We first introduce Lemma 2 (Proposition 4 of (Bietti & Mairal, 2019)), which shows the feature map " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\Phi(x)" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " induced by NTK is not Lipschitz continuous but holds a weaker Hölder smoothness property." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": "Lemma 2. (Smoothness of the kernel map of two-layer ReLU networks). Let " + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": " be the kernel map of the neural tangent kernel induced by a two-layer ReLU neural network, " + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": " be two inputs, then " + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": " satisfies the following smoothness property." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 187, + 131, + 505, + 147 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 131, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 187, + 131, + 505, + 147 + ], + "type": "interline_equation", + "content": "\\left\\| \\Phi (x) - \\Phi (y) \\right\\| \\leq \\sqrt {\\min (\\| x \\| , \\| y \\|) \\| x - y \\|} + 2 \\| x - y \\|. \\tag {16}", + "image_path": "55e0ca0aec27f9e4487e7429581feecf511580db46d4a7b2453e9afab929807d.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 170, + 506, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 506, + 204 + ], + "type": "text", + "content": "Lemma 3 (Lemma 2 of (Xu et al., 2020)) builds the connection between the kernel regression solution in Eq. (14) and the a min-norm solution. For the proof of Lemma 3, we refer the reader to(Xu et al., 2020)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "text", + "content": "Lemma 3. (Equivalence to a min-norm optimization problem). Let " + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "inline_equation", + "content": "\\Phi(x)" + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "text", + "content": " be the feature map induced by a neural tangent kernel, for any " + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "text", + "content": ". The solution to the kernel regression in Eq. (14) and Eq. (15) is equivalent to " + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "inline_equation", + "content": "f_{ntk}(x) = \\Phi(x)^T \\beta_{ntk}" + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "inline_equation", + "content": "\\beta_{ntk}" + }, + { + "bbox": [ + 104, + 207, + 506, + 253 + ], + "type": "text", + "content": " is the optimal solution of a min-norm optimization problem defined as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 268, + 266, + 302, + 284 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 266, + 302, + 284 + ], + "spans": [ + { + "bbox": [ + 268, + 266, + 302, + 284 + ], + "type": "interline_equation", + "content": "\\min _ {\\beta} \\| \\beta \\|", + "image_path": "00314da4038ffe4e87ddfae18a0b5eace863d783aeb2bbe919bcd664bd5aaa40.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 231, + 278, + 504, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 278, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 231, + 278, + 504, + 300 + ], + "type": "interline_equation", + "content": "s. t. \\Phi \\left(x _ {i}\\right) ^ {T} \\beta = y _ {i}, f o r i = 1, \\dots , n. \\tag {17}", + "image_path": "69668066355d4fdf5e394eee46522482626e7409a9ef00a0d676936622e7df7b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 319, + 373, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 373, + 332 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 373, + 332 + ], + "type": "text", + "content": "Then, deep " + }, + { + "bbox": [ + 104, + 319, + 373, + 332 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 319, + 373, + 332 + ], + "type": "text", + "content": " functions satisfy the following smoothness property." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "content": "Lemma 4. (Smoothness for deep " + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "content": " functions). Given two inputs " + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "content": ", the distance between these two data points is " + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "inline_equation", + "content": "d = \\| x - x' \\|" + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "inline_equation", + "content": "C_1 \\coloneqq \\sup_{\\| \\beta_{ntk} \\|_\\infty} \\| \\beta_{ntk} \\|_\\infty" + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "content": " is a finite constant. Then the difference between the output at " + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "content": " and the output at " + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 104, + 334, + 505, + 369 + ], + "type": "text", + "content": " can be bounded by:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 194, + 375, + 505, + 390 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 375, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 194, + 375, + 505, + 390 + ], + "type": "interline_equation", + "content": "\\left\\| Q _ {\\theta} (x) - Q _ {\\theta} \\left(x ^ {\\prime}\\right) \\right\\| \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x \\right\\| , \\left\\| x ^ {\\prime} \\right\\|\\right)} \\sqrt {d} + 2 d\\right) \\tag {18}", + "image_path": "5e4835932e7585d4e050ae68e44a492b4f4998a5a5a34382b10ea652c8bb9d8b.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": "Proof. In offline RL, we denote a general " + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": " network by " + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "inline_equation", + "content": "Q_{\\theta}(x): \\mathbb{R}^{|S| + |\\mathcal{A}|} \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": " is all the parameters in the network and " + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "inline_equation", + "content": "x = (s,a) \\in \\mathbb{R}^{|S| + |\\mathcal{A}|}" + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": " is the brief notation for state-action pair " + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "inline_equation", + "content": "(s,a)" + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": " function is trained via minimizing the temporal difference error defined as " + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}\\sum_{i=1}^{n}(Q_{\\theta}(x_i) - y_i)^2" + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": " by gradient descent, where " + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "inline_equation", + "content": "y_i = r(x_i) + \\gamma \\mathbb{E}_{a_i' \\sim \\pi(\\cdot | s_i')} [Q_{\\theta'}^\\pi(x_i')] \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 420, + 504, + 479 + ], + "type": "text", + "content": " is the target value." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": "Using kernel method from NTK, " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": " function can be formulated as " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "Q_{\\theta}(x) = \\Phi (x)^{T}\\beta" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "\\Phi (x)" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": " is independent of the changes on training labels when NTK assumption holds. This is because as the width of a neural net goes to infinity, the NTK kernel " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "\\mathbf{K}(x,x^{\\prime}) = < \\Phi (x),\\Phi (x^{\\prime})>" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": " produced by this network stays constant during training, and so is the property of the feature map " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "\\Phi (x)" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": " (Jacot et al., 2018). So, the learning process under NTK framework is actually adjusting " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": " to fit the label rather than " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "\\Phi (x)" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": ". As a result, Lemma 2 holds when deep " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": " function satisfies NTK assumptions. Given two inputs " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "x^{\\prime}" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": ", the distance between these two inputs is " + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "inline_equation", + "content": "d = \\| x - x^{\\prime}\\|" + }, + { + "bbox": [ + 104, + 483, + 506, + 572 + ], + "type": "text", + "content": ". Based on Lemma 2, it is easy to see that" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 126, + 586, + 504, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 586, + 504, + 668 + ], + "spans": [ + { + "bbox": [ + 126, + 586, + 504, + 668 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| Q _ {\\theta} (x) - Q _ {\\theta} \\left(x ^ {\\prime}\\right) \\right\\| = \\left\\| \\Phi (x) ^ {T} \\beta - \\Phi \\left(x ^ {\\prime}\\right) ^ {T} \\beta \\right\\| \\\\ \\leq \\| \\Phi (x) - \\Phi (x ^ {\\prime}) \\| \\| \\beta \\| _ {\\infty} \\quad (\\text {I n f i n i t y n o r m}) \\\\ \\leq \\| \\beta \\| _ {\\infty} \\left(\\sqrt {\\min \\left(\\| x \\| , \\| x ^ {\\prime} \\|\\right) \\cdot \\| x - x ^ {\\prime} \\|} + 2 \\| x - x ^ {\\prime} \\|\\right) (\\text {L e m m a} 2) \\tag {19} \\\\ = \\| \\beta \\| _ {\\infty} (\\sqrt {\\min (\\| x \\| , \\| x ^ {\\prime} \\|) \\cdot d} + 2 d) \\\\ \\leq C _ {\\beta} \\left(\\sqrt {\\operatorname* {m i n} \\left(\\| x \\| , \\| x ^ {\\prime} \\|\\right) \\cdot d} + 2 d\\right) \\quad \\left(C _ {\\beta} := \\sup \\| \\beta \\| _ {\\infty}\\right) \\\\ \\end{array}", + "image_path": "3414cb024cf8b30c1f251dee5b4fea4000e420c8aafa428765b675165f497b89.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": "Additionally, if we consider the delayed " + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": " target and delayed actor updates during policy learning, we can assume the target value used for " + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": " evaluation stays relatively stable during each policy evaluation step and the problem can be seen as solving a series of regression problems. Under this mild assumption, we can learn the actual " + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\beta_{ntk}" + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": " at each step (" + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\beta \\rightarrow \\beta_{ntk}" + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": " and so " + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "inline_equation", + "content": "C_{\\beta} \\rightarrow C_1" + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "inline_equation", + "content": "C_1 \\coloneqq \\sup \\| \\beta_{ntk} \\|_{\\infty}" + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": ") and thus complete the proof. Similar assumptions and treatments are also used" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "in Section 4 of (Kumar et al., 2020a) that Q function at each iteration can fit its label well, Appendix A.8 of (Xiao et al., 2021), as well as Appendix F of (Ghasemipour et al.)." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 494, + 110, + 504, + 120 + ], + "blocks": [ + { + "bbox": [ + 494, + 110, + 504, + 120 + ], + "lines": [ + { + "bbox": [ + 494, + 110, + 504, + 120 + ], + "spans": [ + { + "bbox": [ + 494, + 110, + 504, + 120 + ], + "type": "image", + "image_path": "954b6d160de1bdbd32bae04107cf7708adb2e72ff9baa6acde8219101018a7c7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 134, + 504, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 134, + 504, + 156 + ], + "spans": [ + { + "bbox": [ + 104, + 134, + 504, + 156 + ], + "type": "text", + "content": "Lemma 4 states the value difference of a deep " + }, + { + "bbox": [ + 104, + 134, + 504, + 156 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 134, + 504, + 156 + ], + "type": "text", + "content": " function for two inputs is related to the distance between these two inputs. The closer the distance, the smaller the value difference." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 168, + 239, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 168, + 239, + 179 + ], + "spans": [ + { + "bbox": [ + 105, + 168, + 239, + 179 + ], + "type": "text", + "content": "B.2.1 PROOF OF THEOREM 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "content": "Builds on Lemma 4, we can combine the data geometry and analyze the impact of data geometry on deep " + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "content": " functions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "text", + "content": "Proof. We first review the definition of interpolated data and extrapolated data. Under continuous state-action space, state-action pairs within the convex hull of the dataset can be represented in an interpolated manner (referred as interpolated data " + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "inline_equation", + "content": "x_{in}" + }, + { + "bbox": [ + 104, + 223, + 505, + 257 + ], + "type": "text", + "content": "):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 227, + 262, + 505, + 294 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 262, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 227, + 262, + 505, + 294 + ], + "type": "interline_equation", + "content": "x _ {i n} = \\sum_ {i = 1} ^ {n} \\alpha_ {i} x _ {i}, \\quad \\sum_ {i = 1} ^ {n} \\alpha_ {i} = 1, \\alpha_ {i} \\geq 0 \\tag {20}", + "image_path": "90095ab30669d0b4e06e7ed68ae58d1f9f5d02bdf40a91c3abb08cf23fb0bda7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 304, + 489, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 489, + 318 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 489, + 318 + ], + "type": "text", + "content": "Similarly, we can define extrapolated data that lie outside the convex hull of the dataset as " + }, + { + "bbox": [ + 104, + 304, + 489, + 318 + ], + "type": "inline_equation", + "content": "x_{out}" + }, + { + "bbox": [ + 104, + 304, + 489, + 318 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 269, + 322, + 505, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 322, + 505, + 354 + ], + "spans": [ + { + "bbox": [ + 269, + 322, + 505, + 354 + ], + "type": "interline_equation", + "content": "x _ {o u t} = \\sum_ {i = 1} ^ {n} \\beta_ {i} x _ {i}, \\tag {21}", + "image_path": "9cac3f0dffab78703d7e81e752cf3d77355dbe9f621fad810cd91910a481143f.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 359, + 355, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 355, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 355, + 373 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 359, + 355, + 373 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{n} \\beta_{i} = 1" + }, + { + "bbox": [ + 104, + 359, + 355, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 359, + 355, + 373 + ], + "type": "inline_equation", + "content": "\\beta_{i} \\geq 0" + }, + { + "bbox": [ + 104, + 359, + 355, + 373 + ], + "type": "text", + "content": " does not hold simultaneously." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": "We define " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "\\mathrm{Proj}_{\\mathcal{D}}(x) \\coloneqq \\arg \\min_{x_i \\in \\mathcal{D}} \\| x - x_i \\|" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": " as a projector that projects unseen data " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": " to its nearest data in dataset " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": ". Given an interpolated data " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "x_{in}" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": " and an extrapolated data " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "x_{out}" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": ", the distances to their nearest data in dataset are " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "d_{x_{in}} = \\| x_{in} - \\mathrm{Proj}_{\\mathcal{D}}(x_{in})\\|" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "d_{x_{out}} = \\| x_{out} - \\mathrm{Proj}_{\\mathcal{D}}(x_{out})\\|" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": ". Because interpolated data lie inside the convex hull of training data, " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "d_{x_{in}} \\leq \\max_{x_i \\in \\mathcal{D}} \\| x_{in} - x_i\\| \\leq B" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": " is bounded, where " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "B \\coloneqq \\max_{x_i, x_j \\in \\mathcal{D}} \\| x_i - x_j\\|" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": " is a finite constant. Then, by applying Lemma 4, the value difference of deep " + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 376, + 504, + 453 + ], + "type": "text", + "content": " function for interpolated and extrapolated data can be formulated as the following shows." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 460, + 505, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 460, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 115, + 460, + 505, + 514 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| Q _ {\\theta} \\left(x _ {i n}\\right) - Q _ {\\theta} \\left(\\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right)\\right) \\right\\| \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x _ {i n} \\right\\| , \\left\\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {i n}\\right) \\right\\|\\right)} \\sqrt {d _ {x i n}} + 2 d _ {x i n}\\right) (22) \\\\ \\leq C _ {1} (\\sqrt {\\min (\\| x _ {i n} \\| , \\| \\mathrm {P r o j} _ {\\mathcal {D}} (x _ {i n}) \\|)} \\sqrt {B} + 2 B) \\\\ \\left\\| Q _ {\\theta} \\left(x _ {o u t}\\right) - Q _ {\\theta} \\left(\\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {o u t}\\right) \\right\\Vert\\right) \\leq C _ {1} \\left(\\sqrt {\\min \\left(\\left\\| x _ {o u t} \\right\\| , \\left\\| \\operatorname {P r o j} _ {\\mathcal {D}} \\left(x _ {o u t}\\right) \\right\\|\\right)} \\sqrt {d _ {x _ {o u t}}} + 2 d _ {x _ {o u t}}\\right) (23) \\\\ \\end{array}", + "image_path": "8d8ddd76a4974036b8a2bb422b748f1a8b6c9b0dcfd27711cf5ba795c47b562c.jpg" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 494, + 516, + 505, + 525 + ], + "blocks": [ + { + "bbox": [ + 494, + 516, + 505, + 525 + ], + "lines": [ + { + "bbox": [ + 494, + 516, + 505, + 525 + ], + "spans": [ + { + "bbox": [ + 494, + 516, + 505, + 525 + ], + "type": "image", + "image_path": "49b4837e3942e6b96296cee735f0ef3adf6a25c49272a1dfbe78e7965f274437.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 540, + 331, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 331, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 331, + 552 + ], + "type": "text", + "content": "B.3 QUANTITATIVE EXPERIMENTS ON THEOREM 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "content": "In addition to the one-dimensional random walk experiments presented in Section 2.2, we conduct additional experiments on the more complex and high-dimensional MuJoCo tasks (including D4RL Hopper-medium-v2, Halfcheetah-medium-v2, and Walker2d-medium-v2) to provide quantitative support to Theorem 1, in particular, the pertinence of interpolation and extrapolation. We first synthesize lots of interpolated data " + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "inline_equation", + "content": "x_{in}" + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "content": " and extrapolated data " + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "inline_equation", + "content": "x_{out}" + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "inline_equation", + "content": "x = (s,a) \\in S \\times \\mathcal{A}" + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "content": ") and then search for their nearest data points in offline dataset " + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "content": " accordingly, i.e., " + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{Proj}_{\\mathcal{D}}(x_{in})" + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{Proj}_{\\mathcal{D}}(x_{out})" + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "content": ". Then, we can evaluate the Q-value differences " + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x)) \\|" + }, + { + "bbox": [ + 104, + 561, + 506, + 650 + ], + "type": "text", + "content": " (LHS of Theorem 1) at these generated data and see whether the Q-value differences align well with Theorem 1." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "For the detailed experiment setup, recall that an interpolated data point " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_{in}" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " is a convex combination of the offline dataset, i.e., " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_{in} = \\sum_{i=1}^{n} \\alpha_i x_i" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_i \\sim \\mathcal{D}" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " with weights " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\alpha_i" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " that satisfy " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{n} \\alpha_i = 1" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\alpha_i \\geq 0" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": ". Therefore, we can interpolate the offline dataset based on " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\alpha_i" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " sampled from the Dirichlet distribution to generate the interpolated data. Also, an extrapolated data point " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_{out}" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " is expressed as a weighted sum of the offline dataset, i.e., " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_{out} = \\sum_{i=1}^{n} \\beta_i x_i" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_i \\sim \\mathcal{D}" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": ", but its weights " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\beta_i" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " do not satisfy the non-negativity and the summing to 1 constraint. Therefore, we can generate extrapolated data by setting the sign of some weights to negative values and varying the weights not summing to" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "1. After obtaining the interpolated and extrapolated data, we search for their closest data points in the offline dataset " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " and calculate their corresponding distance " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " and Q-value difference " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": ". Figure 7a shows the relationship between the distance to dataset " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " and the Q value difference " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " (LHS of Theorem 1). We also report the learned state-conditioned distance value " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "g(s,a)" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " on these generated data in Figure 7b." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 152, + 239, + 271 + ], + "blocks": [ + { + "bbox": [ + 106, + 152, + 239, + 271 + ], + "lines": [ + { + "bbox": [ + 106, + 152, + 239, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 239, + 271 + ], + "type": "image", + "image_path": "eeb3ecf239a7fddf6fea693422bd624507d740ee5eb0e0d9121775734a4a727b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 167, + 275, + 443, + 289 + ], + "lines": [ + { + "bbox": [ + 167, + 275, + 443, + 289 + ], + "spans": [ + { + "bbox": [ + 167, + 275, + 443, + 289 + ], + "type": "text", + "content": "(a) Relationship between " + }, + { + "bbox": [ + 167, + 275, + 443, + 289 + ], + "type": "inline_equation", + "content": "\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|" + }, + { + "bbox": [ + 167, + 275, + 443, + 289 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 167, + 275, + 443, + 289 + ], + "type": "inline_equation", + "content": "\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|" + }, + { + "bbox": [ + 167, + 275, + 443, + 289 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 240, + 155, + 369, + 270 + ], + "blocks": [ + { + "bbox": [ + 240, + 155, + 369, + 270 + ], + "lines": [ + { + "bbox": [ + 240, + 155, + 369, + 270 + ], + "spans": [ + { + "bbox": [ + 240, + 155, + 369, + 270 + ], + "type": "image", + "image_path": "eab23ce40935c956c124f56f40c880ba2fbc3898c3859eb796b8a7a5982dc1f0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 370, + 156, + 501, + 270 + ], + "blocks": [ + { + "bbox": [ + 370, + 156, + 501, + 270 + ], + "lines": [ + { + "bbox": [ + 370, + 156, + 501, + 270 + ], + "spans": [ + { + "bbox": [ + 370, + 156, + 501, + 270 + ], + "type": "image", + "image_path": "09526943a1758135a2ffa575529a71311d5357db9ec3ab1385b0eced0bbbc036.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 108, + 297, + 239, + 415 + ], + "blocks": [ + { + "bbox": [ + 108, + 297, + 239, + 415 + ], + "lines": [ + { + "bbox": [ + 108, + 297, + 239, + 415 + ], + "spans": [ + { + "bbox": [ + 108, + 297, + 239, + 415 + ], + "type": "image", + "image_path": "0beaed88dd5d90733469244a12c970450bed7aec876c42609af9dd8827c9385e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 417, + 400, + 429 + ], + "lines": [ + { + "bbox": [ + 209, + 417, + 400, + 429 + ], + "spans": [ + { + "bbox": [ + 209, + 417, + 400, + 429 + ], + "type": "text", + "content": "(b) Relationship between " + }, + { + "bbox": [ + 209, + 417, + 400, + 429 + ], + "type": "inline_equation", + "content": "\\| x - \\mathrm{Proj}_{\\mathcal{D}}(x)\\|" + }, + { + "bbox": [ + 209, + 417, + 400, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 209, + 417, + 400, + 429 + ], + "type": "inline_equation", + "content": "g(x)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 240, + 297, + 370, + 415 + ], + "blocks": [ + { + "bbox": [ + 240, + 297, + 370, + 415 + ], + "lines": [ + { + "bbox": [ + 240, + 297, + 370, + 415 + ], + "spans": [ + { + "bbox": [ + 240, + 297, + 370, + 415 + ], + "type": "image", + "image_path": "30752cd9b63981eab4965437fe946e333de3d07684b283e7ae370bce67f9ed57.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "lines": [ + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "type": "text", + "content": "Figure 7: Quantitative experiments of Theorem 1 on the D4RL MuJoCo-medium datasets. The red star-shaped dots are the interpolated data and the circle dots are the extrapolated data. The color of the dots represents " + }, + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|" + }, + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "type": "text", + "content": " values in (a) and " + }, + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "type": "inline_equation", + "content": "g(x)" + }, + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "type": "text", + "content": " values in (b), respectively. The darker the color, the smaller the corresponding value. In (a), the yellow dash line is the empirical upper bound of " + }, + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x))\\|" + }, + { + "bbox": [ + 104, + 437, + 504, + 495 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 370, + 297, + 501, + 415 + ], + "blocks": [ + { + "bbox": [ + 370, + 297, + 501, + 415 + ], + "lines": [ + { + "bbox": [ + 370, + 297, + 501, + 415 + ], + "spans": [ + { + "bbox": [ + 370, + 297, + 501, + 415 + ], + "type": "image", + "image_path": "52e5bf46ea9c1b5f3543bb8d123882bb81995122161d46e7eb5fb6e383d8a770.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "text", + "content": "Figure 7a demonstrates that the interpolated data enjoy a tighter empirical upper bound of " + }, + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\| Q_{\\theta}(x) - Q_{\\theta}(\\mathrm{Proj}_{\\mathcal{D}}(x)) \\|" + }, + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "text", + "content": " (LHS of Theorem 1) than most of the extrapolated data. Moreover, the empirical upper bound of the Q-value difference grows with the increase of the sample-to-dataset distance " + }, + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\| x - \\mathrm{Proj}_{\\mathcal{D}(x)} \\|" + }, + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "text", + "content": ", which is consistent with Theorem 1 (the upper bound of value difference of deep Q function is well controlled by distance to the dataset). Figure 7b shows that the state-conditioned distance function " + }, + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "inline_equation", + "content": "g(s, a)" + }, + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "text", + "content": " can output low values for interpolated data and some near-dataset extrapolated data, and thus can be used as a relaxed policy constraint in these OOD regions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 608, + 353, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 608, + 353, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 353, + 620 + ], + "type": "text", + "content": "C STATE-CONDITIONED DISTANCE FUNCTION" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 635, + 231, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 231, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 231, + 647 + ], + "type": "text", + "content": "C.1 PROOF OF PROPERTY 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "type": "text", + "content": "Proof. Given a state-action pair from the training data " + }, + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "type": "inline_equation", + "content": "(s, a) \\sim \\mathcal{D}" + }, + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "type": "text", + "content": ", we synthetic random noise actions from a uniform distribution over the action space, i.e. " + }, + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "type": "inline_equation", + "content": "\\hat{a} \\sim \\text{Unif}(\\mathcal{A})" + }, + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "type": "text", + "content": ". Then the distance function " + }, + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 104, + 658, + 505, + 693 + ], + "type": "text", + "content": " is trained by Eq. (24)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 203, + 712, + 505, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 712, + 505, + 736 + ], + "spans": [ + { + "bbox": [ + 203, + 712, + 505, + 736 + ], + "type": "interline_equation", + "content": "\\min _ {g} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} \\left[ \\| \\hat {a} - a \\| - g (s, \\hat {a}) \\right] ^ {2} \\right] \\tag {24}", + "image_path": "ddb86309f2fad6f5a70105341c0c2a06bc53c7da88a0a01a5488c70268533726.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "type": "inline_equation", + "content": "\\left[\\| \\hat{a} - a\\| - g(s, \\hat{a})\\right]^2" + }, + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "type": "text", + "content": " can be upper bounded by some finite constants because " + }, + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "type": "inline_equation", + "content": "S \\times \\mathcal{A}" + }, + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "type": "text", + "content": " is compact in our analysis. The optimization problem in Eq. (24) can be reformulated as the following form according to the Fubini's Theorem." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 203, + 129, + 505, + 152 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 129, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 203, + 129, + 505, + 152 + ], + "type": "interline_equation", + "content": "\\min _ {g} \\mathbb {E} _ {\\hat {a} \\sim U n i f (\\mathcal {A})} \\left[ \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\| \\hat {a} - a \\| - g (s, \\hat {a}) \\right] ^ {2} \\right] \\tag {25}", + "image_path": "11c521f2e77e2b85bc49cc25453d2e7683992a409f5098635827bff3014a4539.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "type": "text", + "content": "Note that the objective of Eq. (25) can be also written as a functional " + }, + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "type": "inline_equation", + "content": "J[g(s, \\hat{a})]" + }, + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "type": "text", + "content": " with respect to function " + }, + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 159, + 504, + 183 + ], + "type": "text", + "content": " in the following form:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 141, + 187, + 505, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 187, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 141, + 187, + 505, + 213 + ], + "type": "interline_equation", + "content": "J [ g (s, \\hat {a}) ] = \\int_ {\\mathcal {A}} \\frac {1}{| \\mathcal {A} |} \\left[ \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} [ \\| \\hat {a} - a \\| - g (s, \\hat {a}) ] ^ {2} \\right] \\mathrm {d} \\hat {a} = \\int_ {\\mathcal {A}} F (s, \\hat {a}, g (s, \\hat {a})) \\mathrm {d} \\hat {a} \\tag {26}", + "image_path": "9a73cad03d5884c44d969a694b71df00cff64a75da53cc863a7bac7672af78b8.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "text", + "content": "Based on calculus of variation, the extrema (maxima or minima) of functional " + }, + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "inline_equation", + "content": "J[g(s,\\hat{a})]" + }, + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "text", + "content": " can be obtained by solving the associated Euler-Langrane equation " + }, + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "inline_equation", + "content": "(\\partial F / \\partial g = 0)" + }, + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "text", + "content": ". In our case, it requires the optimal state-conditioned distance function " + }, + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "inline_equation", + "content": "g^{*}" + }, + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "text", + "content": " satisfies the following conditions:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 159, + 270, + 505, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 270, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 159, + 270, + 505, + 338 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {\\partial}{\\partial g ^ {*}} \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} [ \\| \\hat {a} - a \\| - g ^ {*} (s, \\hat {a}) ] ^ {2} = 0 \\\\ \\Rightarrow \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\frac {\\partial}{\\partial g ^ {*}} [ \\| \\hat {a} - a \\| - g ^ {*} (s, \\hat {a}) ] ^ {2} \\right] = 0 (\\text {D N N i s c o n t i n u o u s}) \\tag {27} \\\\ \\Rightarrow \\quad \\mathbb {E} _ {(s, a) \\sim \\mathcal {D}} \\left[ \\left\\| \\hat {a} - a \\right\\| - g ^ {*} (s, \\hat {a}) \\right] = 0 \\\\ \\end{array}", + "image_path": "2915490704e57446f4a28a0dbac9f51630e00ec3915e1376f885b1f1795d665f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 346, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 504, + 369 + ], + "type": "text", + "content": "Conditioned on a state " + }, + { + "bbox": [ + 104, + 346, + 504, + 369 + ], + "type": "inline_equation", + "content": "s \\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 346, + 504, + 369 + ], + "type": "text", + "content": ", the optimal state-conditioned distance function in Eq. (27) satisfies the following conditions:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 188, + 373, + 505, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 373, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 188, + 373, + 505, + 456 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\int_ {\\mathcal {A}} \\| \\hat {a} - a \\| \\mu (s, a) \\mathrm {d} a - \\int_ {\\mathcal {A}} \\mu (s, a) \\mathrm {d} a g ^ {*} (s, \\hat {a}) = 0, s \\in \\mathcal {D} \\\\ \\Rightarrow g ^ {*} (s, \\hat {a}) = \\frac {\\int_ {\\mathcal {A}} \\| \\hat {a} - a \\| \\mu (s , a) \\mathrm {d} a}{\\int_ {\\mathcal {A}} \\mu (s , a) \\mathrm {d} a}, s \\in \\mathcal {D} \\tag {28} \\\\ \\Rightarrow g ^ {*} (s, \\hat {a}) = \\int_ {\\mathcal {A}} C (s, a) \\| \\hat {a} - a \\| \\mathrm {d} a, s \\in \\mathcal {D} \\\\ \\end{array}", + "image_path": "70280c36501ecabbe7ea11c5d8772ff9195bcd4df15e4d4b195505f5bba03206.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "\\mu(s,a)" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": " is the empirical distribution on a finite offline dataset " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{(x_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": ", i.e., the sum of the Dirac measures " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "\\frac{1}{n}\\sum_{i=1}^{n}\\delta_{x_i}" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "\\forall (s,a) \\notin \\mathcal{D}, \\mu(s,a) = 0. \\forall (s,a) \\in \\mathcal{D}, \\mu(s,a) > 0" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "C(s,a) = \\frac{\\mu(s,a)}{\\int_A\\mu(s,a)\\mathrm{d}a} \\geq 0" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "\\int_A C(s,a)\\mathrm{d}a = 1" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": ". Because " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "L_2" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": "-norm is convex and the non-negative combination of convex functions is still convex, " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "g^*(s,\\hat{a})" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": " is a convex function w.r.t. " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "\\hat{a}" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": ". In addition, " + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "inline_equation", + "content": "\\forall \\hat{a} \\in \\mathcal{A}" + }, + { + "bbox": [ + 104, + 466, + 506, + 530 + ], + "type": "text", + "content": ", by the Jensen inequality, we have:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 175, + 534, + 505, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 534, + 505, + 550 + ], + "spans": [ + { + "bbox": [ + 175, + 534, + 505, + 550 + ], + "type": "interline_equation", + "content": "g ^ {*} (s, \\hat {a}) \\geq \\left\\| \\hat {a} - \\mathbb {E} _ {a \\sim U n i f (\\mathcal {A})} [ C (s, a) a ] \\right\\| = \\| \\hat {a} - a _ {o} (s) \\|, s \\in \\mathcal {D} \\tag {29}", + "image_path": "0bad4644689909291e7108e85c91bb967f96222e4f888acab75ddd72811129c8.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 559, + 499, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 499, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 499, + 573 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 559, + 499, + 573 + ], + "type": "inline_equation", + "content": "a_{o}(s)\\coloneqq \\mathbb{E}_{a\\sim Unif(\\mathcal{A})}[C(s,a)a], s\\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 559, + 499, + 573 + ], + "type": "text", + "content": " is the state-conditioned centroid of training dataset." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 494, + 577, + 505, + 587 + ], + "blocks": [ + { + "bbox": [ + 494, + 577, + 505, + 587 + ], + "lines": [ + { + "bbox": [ + 494, + 577, + 505, + 587 + ], + "spans": [ + { + "bbox": [ + 494, + 577, + 505, + 587 + ], + "type": "image", + "image_path": "d73f8aa0feb0845b0c7bc820ddc4d1e4bcfe623ce3bb9813ccf118a16fc014c2.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 601, + 232, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 232, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 232, + 612 + ], + "type": "text", + "content": "C.2 PROOF OF PROPERTY 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 622, + 506, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 622, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 622, + 506, + 635 + ], + "type": "text", + "content": "Proof. The negative gradient of the optimal state-conditioned distance function can be formulated as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 162, + 647, + 505, + 701 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 647, + 505, + 701 + ], + "spans": [ + { + "bbox": [ + 162, + 647, + 505, + 701 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} - \\nabla_ {\\hat {a}} g ^ {*} (s, \\hat {a}) = - \\int_ {\\mathcal {A}} C (s, a) \\frac {\\hat {a} - a}{\\| \\hat {a} - a \\|} \\mathrm {d} a, \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\tag {30} \\\\ = \\frac {1}{\\int_ {\\mathcal {A}} \\mu (s , a) \\mathrm {d} a} \\int_ {\\mathcal {A}} \\mu (s, a) \\frac {- (\\hat {a} - a)}{\\| \\hat {a} - a \\|} \\mathrm {d} a, \\forall \\hat {a} \\in \\mathcal {A}, s \\in \\mathcal {D} \\\\ \\end{array}", + "image_path": "12335bb5844a9b3afc0d77d9876d2e34ea8b446278c22c832d64a501f20d623f.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "text", + "content": "Observe that the direction of the negative gradient of " + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "g^{*}(s,\\hat{a})" + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "text", + "content": " is related to the integral of vector " + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "-(\\hat{a} - a)" + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "text", + "content": " (points towards " + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "text", + "content": "). When " + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "(s,a)\\notin \\mathcal{D}, - (\\hat{a} - a)" + }, + { + "bbox": [ + 105, + 709, + 505, + 733 + ], + "type": "text", + "content": " doesn't influence the final gradient because" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\mu(s, a) = 0" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "-(\\hat{a} - a)" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " only contribute to the final gradient of " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "g^{*}(s, \\hat{a})" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "(s, a) \\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\mu(s, a) > 0" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": ". For a given " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "s \\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " and any extrapolated action " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\hat{a}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " that lies outside the convex hull of training data, the integral of vector " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "-( \\hat{a} - a)" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " is basically a non-negative combination of vectors " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "-( \\hat{a} - a)" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " that point toward actions " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "a \\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " inside the convex hull. As a result, it's easy to see that " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "-\\nabla_{\\hat{a}} g^{*}(s, \\hat{a})" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " also points inside the convex hull formed by the data." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 494, + 144, + 504, + 153 + ], + "blocks": [ + { + "bbox": [ + 494, + 144, + 504, + 153 + ], + "lines": [ + { + "bbox": [ + 494, + 144, + 504, + 153 + ], + "spans": [ + { + "bbox": [ + 494, + 144, + 504, + 153 + ], + "type": "image", + "image_path": "3a7448145dae4f9942d834fdc119dc02af699d7ba16aa91551155dadcaf152e7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 171, + 313, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 313, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 313, + 183 + ], + "type": "text", + "content": "D THEORETICAL ANALYSIS OF DOGE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 196, + 506, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 196, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 104, + 196, + 506, + 253 + ], + "type": "text", + "content": "In this section, we analyze the performance of the policy learned by DOGE. We first adopt the Bellman-consistent coefficient from (Xie et al., 2021a) to quantify the distributional shift from the perspective of deep " + }, + { + "bbox": [ + 104, + 196, + 506, + 253 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 196, + 506, + 253 + ], + "type": "text", + "content": " functions generalization. Then, we gives the upper bound of the Bellman-consistent coefficient under the NTK regime (Appendix D.1). At last, we give the performance bound of DOGE (Appendix D.2)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 266, + 373, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 266, + 373, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 373, + 277 + ], + "type": "text", + "content": "D.1 UPPER BOUND OF BELLMAN-CONSISTENT COEFFICIENT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": "Let us first review the definition of Bellman-consistent coefficient " + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "inline_equation", + "content": "\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)" + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": " in (Xie et al., 2021a). We define " + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "inline_equation", + "content": "\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)" + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": " to measure the distributional shift from an arbitrary distribution " + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": " to data distribution " + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": ", w.r.t. " + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": " is the function class of " + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 286, + 506, + 321 + ], + "type": "text", + "content": " networks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 225, + 334, + 505, + 364 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 334, + 505, + 364 + ], + "spans": [ + { + "bbox": [ + 225, + 334, + 505, + 364 + ], + "type": "interline_equation", + "content": "\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) := \\sup _ {Q \\in \\mathcal {F}} \\frac {\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , v} ^ {2}}{\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2 , \\mu} ^ {2}} \\tag {31}", + "image_path": "9a04e52486242ea7617dd4a0137e1bde8e1ad96545670bdeb0de540fb63aa5da.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": "where the " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": "-weighted norm (square) is defined as " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\|f\\|_{2,\\mu}^2 \\coloneqq \\mathbb{E}_{\\mu}[\\|f\\|^2]" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": ", which is also applicable for any distribution " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\pi}Q" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": " is the Bellman operator of policy " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": ", defined as " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\pi}Q(s,a) \\coloneqq r(s,a) + \\gamma \\mathbb{E}_{a' \\sim \\pi(\\cdot|s'), s' \\sim \\mathcal{P}(\\cdot|s,a)}[Q(s',a')] \\coloneqq r(s,a) + \\gamma \\mathbb{P}^{\\pi}[Q(s',a')]" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\mathbb{P}^{\\pi}[\\cdot]" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": " is the brief notation for " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{a' \\sim \\pi(\\cdot|s'), s' \\sim \\mathcal{P}(\\cdot|s,a)}[\\cdot]" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": ". The smaller the ratio of the Bellman error under " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": ", the more transferable the " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": " function from " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": ", even when " + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "inline_equation", + "content": "\\sup_{(s,a)} \\frac{v(s,a)}{\\mu(s,a)} = \\infty" + }, + { + "bbox": [ + 104, + 373, + 506, + 449 + ], + "type": "text", + "content": ". Then we give the proof of Theorem 2 (Upper bound of Bellman-consistent coefficient)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "content": "Proof. We denote " + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "inline_equation", + "content": "x = (s, a)" + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "inline_equation", + "content": "x' = (s', a')" + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "inline_equation", + "content": "x_o = \\mathbb{E}_{x \\sim \\mathcal{D}}[x]" + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "content": " is the centroid of offline dataset. " + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "inline_equation", + "content": "d_1 = \\| x - x_o \\|" + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "inline_equation", + "content": "d_2 = \\| x' - x_o \\|" + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "content": " are the sample-to-centroid distances. Let " + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "inline_equation", + "content": "\\mu(x)" + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "content": " be the distribution under the offline dataset and " + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "inline_equation", + "content": "v(x)" + }, + { + "bbox": [ + 104, + 460, + 507, + 506 + ], + "type": "text", + "content": " be any distribution. Then, for the numerator in Eq. (8) and Eq. (31), we have the following inequalities." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 512, + 518, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 512, + 518, + 704 + ], + "spans": [ + { + "bbox": [ + 106, + 512, + 518, + 704 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, v} ^ {2} \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\| Q (x) - r (x) - \\gamma \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| ^ {2} \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\| Q (x) - \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] - r (x) + (1 - \\gamma) \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\| Q (x) - \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| + \\| r (x) \\| + \\| (1 - \\gamma) \\mathbb {P} ^ {\\pi} [ Q (x ^ {\\prime}) ] \\| \\right] ^ {2} (\\text {T r i a n g l e}) \\\\ = \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\| Q (x) - Q \\left(x _ {o}\\right) + Q \\left(x _ {o}\\right) - \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] \\| + \\| r (x) \\| + (1 - \\gamma) \\| \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] - Q \\left(x _ {o}\\right) + Q \\left(x _ {o}\\right) \\| \\right] ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ (1 - \\gamma) \\| Q \\left(x _ {o}\\right) \\| + \\| r (x) \\| + \\| Q (x) - Q \\left(x _ {o}\\right) \\| + (2 - \\gamma) \\| \\mathbb {P} ^ {\\pi} \\left[ Q \\left(x ^ {\\prime}\\right) \\right] - Q \\left(x _ {o}\\right) \\| \\right] ^ {2} (\\text {T r i a n g l e}) \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ \\underbrace {(1 - \\gamma) \\| Q \\left(x _ {o}\\right) \\| + \\| r (x) \\|} _ {\\mathcal {I} _ {1}} + \\underbrace {\\| Q (x) - Q \\left(x _ {o}\\right) \\|} _ {\\mathcal {I} _ {2}} + \\underbrace {(2 - \\gamma) \\mathbb {P} ^ {\\pi} [ \\| Q \\left(x ^ {\\prime}\\right) - Q \\left(x _ {o}\\right) \\| ]} _ {\\mathcal {I} _ {3}} \\right] ^ {2} (\\text {J e n s e n}) \\tag {32} \\\\ \\end{array}", + "image_path": "1b9e62e467e81c8068977402b85572343c4bdc9c99e524f4b0e264961620cbe1.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "text", + "content": "The RHS contains three parts: " + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_1 = (1 - \\gamma)\\| Q(x_o)\\| +\\| r(x)\\|" + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_2 = \\| Q(x) - Q(x_o)\\|" + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_3 = (2 - \\gamma)\\mathbb{P}^\\pi [\\| Q(x') - Q(x_o)\\| ]" + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "text", + "content": ". Because " + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\| r(x)\\| \\in [0,R_{\\max}],\\forall x\\in S\\times \\mathcal{A},\\mathcal{I}_1" + }, + { + "bbox": [ + 104, + 708, + 506, + 734 + ], + "type": "text", + "content": " can be upper" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 157, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 157, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 157, + 94 + ], + "type": "text", + "content": "bounded as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 246, + 95, + 506, + 108 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 95, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 246, + 95, + 506, + 108 + ], + "type": "interline_equation", + "content": "\\mathcal {I} _ {1} \\leq (1 - \\gamma) Q \\left(x _ {o}\\right) + R _ {\\max } \\tag {33}", + "image_path": "0e6892287dc616f939128c023077d6ab935ce713d51ec67ed681ee44ba8c9e30.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 118, + 296, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 118, + 296, + 131 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 296, + 131 + ], + "type": "text", + "content": "By applying Lemma 4, " + }, + { + "bbox": [ + 105, + 118, + 296, + 131 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_2" + }, + { + "bbox": [ + 105, + 118, + 296, + 131 + ], + "type": "text", + "content": " is upper bounded as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 224, + 146, + 506, + 167 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 146, + 506, + 167 + ], + "spans": [ + { + "bbox": [ + 224, + 146, + 506, + 167 + ], + "type": "interline_equation", + "content": "\\mathcal {I} _ {2} \\leq C _ {1} \\left[ \\sqrt {\\min (\\| x \\| , \\| x _ {o} \\|) d _ {1}} + 2 d _ {1} \\right] \\tag {34}", + "image_path": "093aeb6d61f572fb134d66471cda1c63e98e3f79805eb668d84ac524c61e181b.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 177, + 201, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 201, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 201, + 189 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_3" + }, + { + "bbox": [ + 104, + 177, + 201, + 189 + ], + "type": "text", + "content": " is upper bounded as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 201, + 204, + 505, + 225 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 204, + 505, + 225 + ], + "spans": [ + { + "bbox": [ + 201, + 204, + 505, + 225 + ], + "type": "interline_equation", + "content": "\\mathcal {I} _ {3} \\leq C _ {1} (2 - \\gamma) \\mathbb {P} ^ {\\pi} \\left[ \\sqrt {\\min (\\| x ^ {\\prime} \\| , \\| x _ {o} \\|) d _ {2}} + 2 d _ {2} \\right] \\tag {35}", + "image_path": "84771fcdf4192c49111c1ef0cbb39b18c3d8d4b666ffdfe954a65fb2082681d7.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "type": "text", + "content": "In addition, we denote " + }, + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "type": "inline_equation", + "content": "C_2 \\coloneqq \\sqrt{\\sup_{x \\in S \\times A} \\|x\\|}" + }, + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_2" + }, + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_3" + }, + { + "bbox": [ + 104, + 236, + 501, + 251 + ], + "type": "text", + "content": " can be further upper bounded by" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 249, + 265, + 505, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 265, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 249, + 265, + 505, + 285 + ], + "type": "interline_equation", + "content": "\\mathcal {I} _ {2} \\leq C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right) \\tag {36}", + "image_path": "3a5ca48f9ffd63499ca660e9358a838d739e5ae65c66ff79ab29a1a9ca8ef02c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 231, + 303, + 505, + 318 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 303, + 505, + 318 + ], + "spans": [ + { + "bbox": [ + 231, + 303, + 505, + 318 + ], + "type": "interline_equation", + "content": "\\mathcal {I} _ {3} \\leq (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right) \\tag {37}", + "image_path": "c45594d73fd70e721babc2a3fe5b10a3d6650a2606b273cfe4e522783f2971b5.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 328, + 504, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 328, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 504, + 354 + ], + "type": "text", + "content": "The above relaxation of the upper bound in Eq. (36) and Eq. (37) is not necessary, but for notation brevity, we choose to relax the upper bound by treating " + }, + { + "bbox": [ + 104, + 328, + 504, + 354 + ], + "type": "inline_equation", + "content": "C_2 \\coloneqq \\sqrt{\\sup_{x \\in S \\times A} \\| x \\|}" + }, + { + "bbox": [ + 104, + 328, + 504, + 354 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 357, + 403, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 403, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 403, + 369 + ], + "type": "text", + "content": "Plug Eq. (33), Eq. (36) and Eq. (37) into the RHS of Eq. (32), we can get" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 375, + 504, + 454 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 375, + 504, + 454 + ], + "spans": [ + { + "bbox": [ + 110, + 375, + 504, + 454 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| Q - \\mathcal {T} ^ {\\pi} Q \\right\\| _ {2, v} ^ {2} \\\\ \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} v (x) \\left[ (1 - \\gamma) Q (x _ {o}) + R _ {\\max } + C _ {1} (C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}) + (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} (C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}) \\right] ^ {2} \\\\ = \\left\\| (1 - \\gamma) Q \\left(s _ {o}, a _ {o}\\right) + R _ {\\max } + C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right) + (2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right) \\right\\| _ {2, v} ^ {2} \\tag {38} \\\\ \\end{array}", + "image_path": "eccc83ffed35d26f68e02d84272acaa5bf5672de424b5f826c7c3beb4b6870df.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": "For the denominator " + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "inline_equation", + "content": "\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2}" + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": " in Eq. (8) and Eq. (31), because the " + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": " function is approximated, there exists approximation error between " + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\pi}Q" + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "inline_equation", + "content": "Q - \\mathcal{T}^{\\pi}Q \\geq \\epsilon" + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": ". In addition, the distribution " + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": " contains some mismatch w.r.t. the equilibrium distribution induced by policy " + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": ". Therefore, it is reasonable to assume " + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "inline_equation", + "content": "\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2} \\geq \\epsilon_{\\mu} > 0" + }, + { + "bbox": [ + 104, + 459, + 506, + 506 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 510, + 504, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 504, + 535 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 504, + 535 + ], + "type": "text", + "content": "Then, we can complete the proof by plugging the upper bound in Eq. (38) and " + }, + { + "bbox": [ + 104, + 510, + 504, + 535 + ], + "type": "inline_equation", + "content": "\\| Q - \\mathcal{T}^{\\pi}Q\\|_{2,\\mu}^{2}\\geq" + }, + { + "bbox": [ + 104, + 510, + 504, + 535 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mu} > 0" + }, + { + "bbox": [ + 104, + 510, + 504, + 535 + ], + "type": "text", + "content": " into Eq. (8) or Eq. (31)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 541, + 509, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 509, + 610 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 509, + 610 + ], + "type": "interline_equation", + "content": "\\mathcal {B} (v, \\mu , \\mathcal {F}, \\pi) \\leq \\frac {1}{\\epsilon_ {\\mu}} \\left\\| \\underbrace {(1 - \\gamma) Q \\left(s _ {o} , a _ {o}\\right) + R _ {\\max }} _ {\\mathcal {B} _ {1}} + \\underbrace {C _ {1} \\left(C _ {2} \\sqrt {d _ {1}} + 2 d _ {1}\\right)} _ {\\mathcal {B} _ {2}} + \\underbrace {(2 - \\gamma) C _ {1} \\mathbb {P} ^ {\\pi} \\left(C _ {2} \\sqrt {d _ {2}} + 2 d _ {2}\\right)} _ {\\mathcal {B} _ {3}} \\right\\| _ {2, v} ^ {2} \\tag {39}", + "image_path": "1b64a4a22ae2b55f3582bf5be8e0be9e98df38279d2a3c96867799b687775b31.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": "To be mentioned, the distance regularization in DOGE compels the leaned policy to output the action that near the state-conditioned centroid of dataset and thus " + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_2" + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_3" + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": " can be driven to some small values. " + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_1" + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": " is independent on the distributional shift. Therefore, " + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)" + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": " can be bounded by some finite constants under DOGE. Therefore, the constrained policy set induced by DOGE is essentially a Bellman-consistent constrained policy set " + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": " defined in Definition 2. In addition, other policy constraint methods such as BEAR (Kumar et al., 2019) can also have bounded " + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": ". However, these policy constraint methods do not allow the learned policy shifts to those generalizable distributions where " + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{B}(v,\\mu ,\\mathcal{F},\\pi)" + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": " is small but " + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\sup_{(s,a)}\\frac{v(s,a)}{\\mu(s,a)}\\to \\infty" + }, + { + "bbox": [ + 104, + 628, + 506, + 732 + ], + "type": "text", + "content": ", which is essentially different with DOGE." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 358, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 358, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 358, + 94 + ], + "type": "text", + "content": "D.2 PERFORMANCE OF THE POLICY LEARNED BY DOGE" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "type": "text", + "content": "Here, we briefly review the definition of the Bellman-consistent constrained policy set " + }, + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "type": "text", + "content": " defined in Definition 2. The Bellman-consistent coefficient under the transition induced by " + }, + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "type": "text", + "content": " can be bounded by some finite constants " + }, + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "type": "inline_equation", + "content": "l(k)" + }, + { + "bbox": [ + 104, + 102, + 504, + 138 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 259, + 152, + 505, + 166 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 259, + 152, + 505, + 166 + ], + "spans": [ + { + "bbox": [ + 259, + 152, + 505, + 166 + ], + "type": "interline_equation", + "content": "\\mathcal {B} \\left(\\rho_ {k}, \\mu , \\mathcal {F}, \\pi\\right) \\leq l (k) \\tag {40}", + "image_path": "ad33a13640a1060c4026eeb563b62a314772763aec04555689fc7b0a38d32424.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "inline_equation", + "content": "\\rho_0" + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "text", + "content": " is the initial state-action distribution and " + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "text", + "content": " is the distribution of training data. " + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "inline_equation", + "content": "\\rho_{k} = \\rho_{0}P^{\\pi_{1}}P^{\\pi_{2}}\\ldots P^{\\pi_{k}},\\forall \\pi_{1},\\pi_{2},\\ldots ,\\pi_{k}\\in \\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "inline_equation", + "content": "P^{\\pi_i}" + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "text", + "content": " is the transition operator on states induced by " + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "inline_equation", + "content": "\\pi_{i}" + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "inline_equation", + "content": "P^{\\pi_i}(s',a'|s,a) = \\mathcal{P}(s'|s,a)\\pi_i(a'|s')" + }, + { + "bbox": [ + 104, + 171, + 506, + 206 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "content": "We denote the constrained Bellman operator induced by " + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q(s,a) := r(s,a) + \\max_{\\pi \\in \\Pi_{\\mathcal{B}}} \\gamma \\mathbb{P}^{\\pi}[Q(s',a')]" + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "content": " can be seen as a operator in a redefined MDP and hence is a contraction mapping and exists a fixed point. We denote " + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "inline_equation", + "content": "Q^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "content": " as the fixed point of " + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "inline_equation", + "content": "Q^{\\Pi_{\\mathcal{B}}} = \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 209, + 506, + 255 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 260, + 249, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 260, + 249, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 249, + 272 + ], + "type": "text", + "content": "The Bellman optimal operator " + }, + { + "bbox": [ + 105, + 260, + 249, + 272 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 105, + 260, + 249, + 272 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 217, + 273, + 505, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 273, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 217, + 273, + 505, + 291 + ], + "type": "interline_equation", + "content": "\\mathcal {T} Q (s, a) := r (s, a) + \\max _ {\\pi} \\gamma \\mathbb {P} ^ {\\pi} [ Q \\left(s ^ {\\prime}, a ^ {\\prime}\\right) ] \\tag {41}", + "image_path": "1f7c25010924577e2a862baa8438e17b480bf47e4992259d2a48b89f7d2b86f2.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 298, + 495, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 495, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 495, + 312 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 298, + 495, + 312 + ], + "type": "text", + "content": " is also a contraction mapping. Its fixed point is the optimal value function " + }, + { + "bbox": [ + 104, + 298, + 495, + 312 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 298, + 495, + 312 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 298, + 495, + 312 + ], + "type": "inline_equation", + "content": "Q^{*} = \\mathcal{T}Q^{*}" + }, + { + "bbox": [ + 104, + 298, + 495, + 312 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 316, + 274, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 274, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 274, + 327 + ], + "type": "text", + "content": "Then, by the triangle inequality, we have:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 195, + 340, + 505, + 386 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 340, + 505, + 386 + ], + "spans": [ + { + "bbox": [ + 195, + 340, + 505, + 386 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| Q ^ {*} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}} = \\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} + Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}} \\\\ \\leq \\underbrace {\\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\rho_ {0}}} _ {L _ {1}} + \\underbrace {\\left\\| Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {\\rho_ {0}}} _ {L _ {2}} \\tag {42} \\\\ \\end{array}", + "image_path": "8d78e4deb943a5a801c4d06dcec0252077b6a4d552185ebb95f07d6719f0bf66.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "Q^{\\pi_n}" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": " is the true " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": " value of policy " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "\\pi_{n}" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "\\pi_{n}" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": " is the greedy policy w.r.t. to " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "Q_{n}" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": " in the Bellman-consistent constrained policy set " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "\\pi_{n} = \\sup_{\\pi \\in \\Pi_{\\mathcal{B}}}\\mathbb{E}_{a\\sim \\pi (\\cdot |s)}[Q_{n}(s,a)]" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "Q_{n}" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": " function after " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": "-th value iteration under the constrained Bellman operator " + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 392, + 506, + 428 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 433, + 342, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 433, + 342, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 433, + 342, + 445 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 104, + 433, + 342, + 445 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 104, + 433, + 342, + 445 + ], + "type": "text", + "content": " part in Eq. (42), we first focus on the infinity norm." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 118, + 460, + 504, + 524 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 460, + 504, + 524 + ], + "spans": [ + { + "bbox": [ + 118, + 460, + 504, + 524 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\infty} = \\left\\| \\mathcal {T} Q ^ {*} - \\mathcal {T} ^ {\\Pi_ {\\mathcal {B}}} Q ^ {\\Pi_ {\\mathcal {B}}} \\right\\| _ {\\infty} \\\\ \\leq \\left\\| T Q ^ {*} - T ^ {\\Pi_ {S}} Q ^ {\\Pi_ {S}} \\right\\| _ {\\infty} + \\left\\| T ^ {\\Pi_ {S}} Q ^ {\\Pi_ {S}} - T ^ {\\Pi_ {S}} Q ^ {*} \\right\\| _ {\\infty} \\\\ \\leq \\left\\| \\mathcal {T} Q ^ {*} - \\mathcal {T} ^ {\\Pi_ {B}} Q ^ {\\Pi_ {B}} \\right\\| _ {\\infty} + \\gamma \\left\\| Q ^ {*} - Q ^ {\\Pi_ {B}} \\right\\| _ {\\infty} \\quad \\left(\\mathcal {T} ^ {\\Pi_ {B}} \\text {i s} \\gamma - \\text {c o n t r a c t i o n}\\right) \\tag {43} \\\\ = \\alpha (\\Pi_ {\\mathcal {B}}) + \\gamma \\| Q ^ {*} - Q ^ {\\Pi_ {\\mathcal {B}}} \\| _ {\\infty} \\\\ \\end{array}", + "image_path": "8cc86537e48e95c621e46a11c146153688aa3570084b36f2e162b352e0f1faa4.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\alpha (\\Pi_{\\mathcal{B}})\\coloneqq \\| \\mathcal{T}Q^{*} - \\mathcal{T}^{\\Pi_{\\mathcal{B}}}Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}" + }, + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "type": "text", + "content": " is the suboptimality constant. Then, we get " + }, + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\| Q^{*} - Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}\\leq" + }, + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\frac{\\alpha(\\Pi_{\\mathcal{B}})}{1 - \\gamma}" + }, + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 533, + 504, + 562 + ], + "type": "inline_equation", + "content": "L_{1}\\leq \\| Q^{*} - Q^{\\Pi_{\\mathcal{B}}}\\|_{\\infty}\\leq \\frac{\\alpha(\\Pi_{\\mathcal{B}})}{1 - \\gamma}" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "text", + "content": ", we introduce Lemma 5, which upper bounds " + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\| Q^{\\Pi_{\\mathcal{B}}} - Q^{\\pi_n} \\|_{2,\\rho_0}^2" + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "text", + "content": ". The proof of Lemma 5 can be get by directly replacing " + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "inline_equation", + "content": "Q^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "text", + "content": " in the Appendix F.3. In (Le et al., 2019), because " + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "inline_equation", + "content": "Q^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "text", + "content": " is the optimal value function under the modified MDP induced by " + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\Pi_{\\mathcal{B}}}" + }, + { + "bbox": [ + 104, + 567, + 504, + 603 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 604, + 484, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 484, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 484, + 618 + ], + "type": "text", + "content": "Lemma 5. (Upper bound of error propagation). " + }, + { + "bbox": [ + 104, + 604, + 484, + 618 + ], + "type": "inline_equation", + "content": "\\| Q^{\\Pi_{\\mathcal{B}}} - Q^{\\pi_n}\\|_{2,\\rho_0}^2" + }, + { + "bbox": [ + 104, + 604, + 484, + 618 + ], + "type": "text", + "content": " can be upper bounded as" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 620, + 505, + 659 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 620, + 505, + 659 + ], + "spans": [ + { + "bbox": [ + 107, + 620, + 505, + 659 + ], + "type": "interline_equation", + "content": "\\left\\| Q ^ {\\Pi_ {\\mathcal {B}}} - Q ^ {\\pi_ {n}} \\right\\| _ {2, \\rho_ {0}} ^ {2} \\leq \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} (d s, d a) \\left[ \\sum_ {k = 0} ^ {n - 1} \\alpha_ {k} A _ {k} \\epsilon_ {k} ^ {2} + \\alpha_ {n} A _ {n} \\left(Q ^ {\\Pi_ {\\mathcal {B}}} - Q _ {0}\\right) ^ {2} \\right] (s, a) \\tag {44}", + "image_path": "2353583ceb9bd31f6b30d379a4951263e200280f418d465f1274a9e96a82d8d4.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 659, + 133, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 659, + 133, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 659, + 133, + 669 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 258, + 667, + 505, + 681 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 667, + 505, + 681 + ], + "spans": [ + { + "bbox": [ + 258, + 667, + 505, + 681 + ], + "type": "interline_equation", + "content": "\\epsilon_ {k} = Q _ {k + 1} - \\mathcal {T} ^ {\\Pi_ {B}} Q _ {k} \\tag {45}", + "image_path": "c9d02700fce8efb94f9a5205876de1544752978a0bed1322f28b1dcdea0a5872.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 237, + 682, + 505, + 713 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 682, + 505, + 713 + ], + "spans": [ + { + "bbox": [ + 237, + 682, + 505, + 713 + ], + "type": "interline_equation", + "content": "\\alpha_ {k} = \\frac {(1 - \\gamma) \\gamma^ {n - k - 1}}{1 - \\gamma^ {n + 1}} \\text {f o r} k < n \\tag {46}", + "image_path": "3fadbe4884f9d53d105936e50eca23c35078e41c55c08fab65b63cec89c646cd.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 237, + 709, + 307, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 709, + 307, + 735 + ], + "spans": [ + { + "bbox": [ + 237, + 709, + 307, + 735 + ], + "type": "interline_equation", + "content": "\\alpha_ {n} = \\frac {(1 - \\gamma) \\gamma^ {n}}{1 - \\gamma^ {n + 1}}", + "image_path": "930718745828224a0f57962e557f69d9f5396eb738479cb01a0d3022fca13aa4.jpg" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 147, + 79, + 505, + 115 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 79, + 505, + 115 + ], + "spans": [ + { + "bbox": [ + 147, + 79, + 505, + 115 + ], + "type": "interline_equation", + "content": "A _ {k} = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} \\left(P ^ {\\pi_ {n}}\\right) ^ {m} \\left[ \\left(P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}\\right) ^ {n - k} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {k + 1}} \\right] \\quad \\text {f o r} k < n \\tag {47}", + "image_path": "b9ffdb9bb4784a12281456740a215b431615d254a8d597e6d2c16d4deabbdf2f.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 147, + 112, + 406, + 140 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 112, + 406, + 140 + ], + "spans": [ + { + "bbox": [ + 147, + 112, + 406, + 140 + ], + "type": "interline_equation", + "content": "A _ {n} = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} (P ^ {\\pi_ {n}}) ^ {m} \\left[ (P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}) ^ {n + 1} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {0}} \\right]", + "image_path": "f176d1c68482e025cb09177c12a51edcf679bcc31690ea247929b4469dc75a3d.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "spans": [ + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "inline_equation", + "content": "Q_{0}" + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "content": " function after initialization. Note that " + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "inline_equation", + "content": "\\lim_{n\\to \\infty}\\left[\\alpha_nA_n(Q^{\\Pi_{\\mathcal{B}}} - Q_0)^2\\right] = 0" + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "content": ", we leave out this term for analysis simplicity. In addition, each " + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "inline_equation", + "content": "A_{k}" + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "content": " is a probability kernel that combine " + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "inline_equation", + "content": "P^{\\pi_i}" + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "inline_equation", + "content": "P^{\\pi_{\\mathcal{B}}^{\\Pi}}" + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "content": " (the transition operator on states induced by the constrained optimal policy " + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "inline_equation", + "content": "\\pi^{\\Pi_{\\mathcal{B}}}\\in \\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "inline_equation", + "content": "\\sum_{k}a_{k} = 1" + }, + { + "bbox": [ + 104, + 153, + 506, + 202 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 206, + 468, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 206, + 468, + 221 + ], + "spans": [ + { + "bbox": [ + 104, + 206, + 468, + 221 + ], + "type": "text", + "content": "The key part in Eq. (44) is " + }, + { + "bbox": [ + 104, + 206, + 468, + 221 + ], + "type": "inline_equation", + "content": "\\int_{\\mathcal{S} \\times \\mathcal{A}} \\rho_0 A_k \\epsilon_k^2" + }, + { + "bbox": [ + 104, + 206, + 468, + 221 + ], + "type": "text", + "content": " and we expand this term as the following shows." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 224, + 504, + 296 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 224, + 504, + 296 + ], + "spans": [ + { + "bbox": [ + 108, + 224, + 504, + 296 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} A _ {k} \\epsilon_ {k} ^ {2} = \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\frac {1 - \\gamma}{2} \\rho_ {0} \\sum_ {m \\geq 0} \\gamma^ {m} (P ^ {\\pi_ {n}}) ^ {m} \\left[ (P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}) ^ {n - k} + P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}}... P ^ {\\pi_ {k + 1}} \\right] \\epsilon_ {k} ^ {2} \\\\ = \\frac {1 - \\gamma}{2} \\sum_ {m \\geq 0} \\gamma^ {m} \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\left[ \\left(P ^ {\\pi_ {n}}\\right) ^ {m} \\left(P ^ {\\pi^ {\\Pi_ {\\mathcal {B}}}}\\right) ^ {n - k} + \\left(P ^ {\\pi_ {n}}\\right) ^ {m} P ^ {\\pi_ {n}} P ^ {\\pi_ {n - 1}} \\dots P ^ {\\pi_ {k + 1}} \\right] \\rho_ {0} \\epsilon_ {k} ^ {2} \\tag {48} \\\\ \\end{array}", + "image_path": "a01535b4d56d186c56d5df5988fe7163cc2ca7b0a3980e76b39b43f6cc94c221.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": "As Eq. (40) shows, the policy set induced by DOGE is a Bellman-consistent constrained policy set " + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": " defined in Definition 2. Therefore, let " + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\rho_0" + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": " be the initial state-action distribution and " + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": " denote the distribution of training data. For any policy " + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\pi_1,\\pi_2,\\dots,\\pi_k\\in \\Pi_{\\mathcal{B}}" + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": ", the distribution after " + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": "-th Bellman-consistent iteration is " + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\rho_{k} = \\rho_{0}P^{\\pi_{1}}P^{\\pi_{2}}\\ldots P^{\\pi_{k}}" + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": ", there exists some finite constants " + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "inline_equation", + "content": "l(k)" + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": ", that " + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "inline_equation", + "content": "\\mathcal{B}(\\rho_k,\\mu ,\\mathcal{F},\\pi)\\leq l(k)" + }, + { + "bbox": [ + 104, + 300, + 506, + 357 + ], + "type": "text", + "content": " holds. Then we can get the following inequalities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 195, + 369, + 358, + 385 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 369, + 358, + 385 + ], + "spans": [ + { + "bbox": [ + 195, + 369, + 358, + 385 + ], + "type": "interline_equation", + "content": "\\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, \\rho_ {k}} ^ {2} \\leq \\| Q - \\mathcal {T} ^ {\\pi} Q \\| _ {2, \\mu} ^ {2} l (k)", + "image_path": "374c193ab7927d53d60b23cbbdd73a8ec0c482eed0b236772ce94e23cbea3872.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 219, + 385, + 504, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 385, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 219, + 385, + 504, + 412 + ], + "type": "interline_equation", + "content": "\\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {k} \\epsilon^ {2} \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} \\mu \\epsilon^ {2} l (k) \\quad (\\epsilon = Q - \\mathcal {T} ^ {\\pi} Q) \\tag {49}", + "image_path": "04df5ec90fd0fb05741ef408eee11442faeca0276572a9cd496b5af79796e13e.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 418, + 384, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 384, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 384, + 431 + ], + "type": "text", + "content": "As a result, by applying the result of Eq. (49) to Eq. (48), we can get" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 188, + 433, + 505, + 463 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 433, + 505, + 463 + ], + "spans": [ + { + "bbox": [ + 188, + 433, + 505, + 463 + ], + "type": "interline_equation", + "content": "\\int_ {\\mathcal {S} \\times \\mathcal {A}} \\rho_ {0} A _ {k} \\epsilon_ {k} ^ {2} \\leq \\int_ {\\mathcal {S} \\times \\mathcal {A}} (1 - \\gamma) \\sum_ {m \\geq 0} \\gamma^ {m} \\epsilon_ {k} ^ {2} \\mu l (m + n - k) \\tag {50}", + "image_path": "0fd253dbd19566ffb87cf8be989665e33c5d403a622389e3b6ea990513d279c8.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 472, + 449, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 449, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 449, + 487 + ], + "type": "text", + "content": "Plugs Eq. (50) into Eq. (44) and leaves out " + }, + { + "bbox": [ + 104, + 472, + 449, + 487 + ], + "type": "inline_equation", + "content": "\\left[\\alpha_{n}A_{n}(Q^{\\Pi_{\\mathcal{B}}} - Q_{0})^{2}\\right]" + }, + { + "bbox": [ + 104, + 472, + 449, + 487 + ], + "type": "text", + "content": " in Eq. (44), we get" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 502, + 509, + 653 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 502, + 509, + 653 + ], + "spans": [ + { + "bbox": [ + 106, + 502, + 509, + 653 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\lim _ {n \\to \\infty} L _ {2} ^ {2} \\leq \\lim _ {n \\to \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\sum_ {k = 0} ^ {n - 1} (1 - \\gamma) \\sum_ {m \\geq 0} \\gamma^ {m} l (m + n - k) \\alpha_ {k} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ = \\lim _ {n \\to \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\frac {1}{1 - \\gamma^ {n + 1}} \\sum_ {k = 0} ^ {n - 1} (1 - \\gamma) ^ {2} \\sum_ {m \\geq 0} \\gamma^ {m + n - k - 1} l (m + n - k) \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ \\leq \\lim _ {n \\rightarrow \\infty} \\left[ \\frac {2 \\gamma (1 - \\gamma^ {n + 1})}{(1 - \\gamma) ^ {2}} \\right] ^ {2} \\left[ \\frac {1}{1 - \\gamma^ {n + 1}} L (\\Pi_ {\\mathcal {B}}) ^ {2} \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\right] \\\\ = \\left[ \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\right] ^ {2} L \\left(\\Pi_ {\\mathcal {B}}\\right) ^ {2} \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {2, \\mu} ^ {2} \\tag {51} \\\\ \\end{array}", + "image_path": "78381764cebc8b6f2d220995205a6c3538ad9417d24f0ac46930aa7fb81a4769.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 658, + 408, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 408, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 408, + 672 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 104, + 658, + 408, + 672 + ], + "type": "inline_equation", + "content": "L(\\Pi_{\\mathcal{B}}) = \\sqrt{(1 - \\gamma)^2\\sum_{k=1}^{\\infty}k\\gamma^{k-1}l(k)}" + }, + { + "bbox": [ + 104, + 658, + 408, + 672 + ], + "type": "text", + "content": ". Then, we can bound " + }, + { + "bbox": [ + 104, + 658, + 408, + 672 + ], + "type": "inline_equation", + "content": "L_2" + }, + { + "bbox": [ + 104, + 658, + 408, + 672 + ], + "type": "text", + "content": " by" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 225, + 674, + 505, + 700 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 674, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 225, + 674, + 505, + 700 + ], + "type": "interline_equation", + "content": "\\lim _ {n \\rightarrow \\infty} L _ {2} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} \\tag {52}", + "image_path": "c6eae7748880202955d0ebb5d02ba297d1bb8940bd0c37f121f0beb3186534d1.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "With the upper bound of " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\lim_{n\\to \\infty}L_2" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": ", we can complete the proof by adding these two terms together." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 162, + 90, + 505, + 119 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 90, + 505, + 119 + ], + "spans": [ + { + "bbox": [ + 162, + 90, + 505, + 119 + ], + "type": "interline_equation", + "content": "\\lim _ {n \\rightarrow \\infty} \\| Q ^ {*} - Q ^ {\\pi_ {n}} \\| _ {\\rho_ {0}} \\leq \\frac {2 \\gamma}{(1 - \\gamma) ^ {2}} \\left[ L \\left(\\Pi_ {\\mathcal {B}}\\right) \\sup _ {k \\geq 0} \\| \\epsilon_ {k} \\| _ {\\mu} + \\frac {1 - \\gamma}{2 \\gamma} \\alpha \\left(\\Pi_ {\\mathcal {B}}\\right)\\right] \\tag {53}", + "image_path": "b64ce4e85b5f5cf4bc504a62b1c9fe0db187ebbebd1db6cf629b7eeabd8709b6.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 131, + 269, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 131, + 269, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 131, + 269, + 144 + ], + "type": "text", + "content": "E IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 156, + 506, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 156, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 506, + 212 + ], + "type": "text", + "content": "DOGE can build on top of standard online actor-critic algorithms such as TD3(Fujimoto et al., 2018) and SAC(Haarnoja et al., 2018). We choose TD3 as our base because of its simplicity compared to other methods. We build DOGE on top of TD3 by simply plugging the state-conditioned distance function as a policy regularization term during policy training process. Then, the learning objective of policy " + }, + { + "bbox": [ + 104, + 156, + 506, + 212 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 156, + 506, + 212 + ], + "type": "text", + "content": " in Eq. (7) can be formulated as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 154, + 217, + 505, + 236 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 217, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 154, + 217, + 505, + 236 + ], + "type": "interline_equation", + "content": "\\pi = \\arg \\max _ {\\pi} \\min _ {\\lambda} \\mathbb {E} _ {s \\sim \\mathcal {D}} [ \\beta Q (s, \\pi (s)) - \\lambda (g (s, \\pi (s)) - G) ] \\quad \\text {s . t .} \\lambda \\geq 0 \\tag {54}", + "image_path": "ed0a2660949ad72a23a8bab39718d74785952cb9e6e827be5aa5b628c77e8cb9.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 245, + 506, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 245, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 104, + 245, + 506, + 291 + ], + "type": "text", + "content": "The " + }, + { + "bbox": [ + 104, + 245, + 506, + 291 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 245, + 506, + 291 + ], + "type": "text", + "content": " function, policy and state-conditioned distance function networks are represented by 3 layers ReLU activated MLPs with 256 units for each hidden layer and are optimized by Adam optimizer. In addition, we normalize each dimension of state to a standard normal distribution for Mujoco tasks. The hyperparameters of DOGE are listed in Table 2." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 128, + 321, + 483, + 616 + ], + "blocks": [ + { + "bbox": [ + 230, + 300, + 380, + 313 + ], + "lines": [ + { + "bbox": [ + 230, + 300, + 380, + 313 + ], + "spans": [ + { + "bbox": [ + 230, + 300, + 380, + 313 + ], + "type": "text", + "content": "Table 2: Hyperparameters of DOGE" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 128, + 321, + 483, + 616 + ], + "lines": [ + { + "bbox": [ + 128, + 321, + 483, + 616 + ], + "spans": [ + { + "bbox": [ + 128, + 321, + 483, + 616 + ], + "type": "table", + "html": "
HyperparametersValue
Shared parametersOptimizerAdam
StandardNormalize stateTrue for Mujoco
False for AntMaze
Batch size256
Layers3
Hidden dim256
TD3Actor learning rate3 × 10-4
Critic learning rate3 × 10-4for Mujoco
1 × 10-3for AntMaze
Discount factor γ0.99 for Mujoco
0.995 for AntMaze
Number of iterations106
Target update rate τ0.005
Policy noise0.2
Policy noise clipping0.5
Policy update frequency2
State-Conditioned Distance FunctionLearning rate1 × 10-3for Mujoco
1 × 10-4for AntMaze
Number of noise actions N20
Number of iterations Ng105for Mujoco
106for AntMaze
DOGEα{7.5, 17.5} Mujoco
{5, 10, 70} AntMaze
Lagrangian multiplier λclipped to [1, 100]
λ learning rate3e-4
", + "image_path": "6b7007033bbc2523388d9280a0faa91e00d9e964a468f02d80fcb743b8ffd3e5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 634, + 279, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 634, + 279, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 279, + 645 + ], + "type": "text", + "content": "E.1 TD3'S IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "content": "For the choice of the Critic learning rate and discount factor " + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "content": ", we find that for AntMaze tasks, a high Critic learning rate can improve the stability of value function during training process. This may be because the AntMaze tasks require the value function to dynamic programs more times to \"stitch\" suboptimal trajectories than Mujoco tasks. Therefore, we choose " + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "content": " and 0.995 as the Critic learning rate and discount factor " + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "content": " for AntMaze tasks, respectively. The other implementations such as policy noise scale and policy noise clipping are the same with author's implementation (Fujimoto et al., 2018)." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 445, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 445, + 94 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 445, + 94 + ], + "type": "text", + "content": "E.2 STATE-CONDITIONED DISTANCE FUNCTION'S IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "text", + "content": "We sample " + }, + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "inline_equation", + "content": "N = 20" + }, + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "text", + "content": " noise actions from a uniform distribution that covers the full action space to approximate the estimation value in Eq. (4). We find " + }, + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "inline_equation", + "content": "N = 20" + }, + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "text", + "content": " can balance the computation complexity and estimation accuracy and is the same sample numbers with CQL (Kumar et al., 2020b). The ablation of " + }, + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "text", + "content": " can be found in Fig. 15. The practical training objective of the state-conditioned distance function is as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 186, + 160, + 505, + 194 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 160, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 186, + 160, + 505, + 194 + ], + "type": "interline_equation", + "content": "\\min _ {g} \\mathbb {E} _ {(s, a) \\in \\mathcal {D}, \\hat {a} _ {i} \\sim U n i f (\\mathcal {A})} \\left[ \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left[ \\| a - \\hat {a} _ {i} \\| - g (s, \\hat {a} _ {i}) \\right] ^ {2} \\right] \\tag {55}", + "image_path": "a94b2924eb0618d1e50d6be053d0207b0a59ad21baf4b99e529da7b743d4330c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "text", + "content": "We find that a wider sample range than the max action space " + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "inline_equation", + "content": "[-a_{\\mathrm{max}}, a_{\\mathrm{max}}]" + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "text", + "content": " is helpful to characterize the geometry of the full offline dataset. This is because some actions in the offline dataset lie at the boundary of the action space, which can only be sampled with little probability when sampling from a narrow distribution. At this time, the noise actions may not cover the geometry information near the boundary. Therefore, we sample noise actions from a uniform distribution that is 3 times wider than the max action space, i.e., " + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\hat{a} \\sim \\text{Unif}[-3a_{\\mathrm{max}}, 3a_{\\mathrm{max}}]" + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "text", + "content": ". For the learning rate, we find that a high learning rate enables a stable training process in Mujoco tasks. Therefore, we choose " + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "text", + "content": " as the distance function learning rate for Mujoco and AntMaze, respectively. We also observe that for Mujoco tasks, " + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "inline_equation", + "content": "10^{5}" + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "text", + "content": " iterations can already produce a relatively good state-conditioned distance function, and training more times won't hurt the final results. To reduce computation, we only train the state-conditioned distance function for " + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "inline_equation", + "content": "10^{5}" + }, + { + "bbox": [ + 104, + 218, + 506, + 340 + ], + "type": "text", + "content": " steps for Mujoco tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 353, + 304, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 353, + 304, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 304, + 364 + ], + "type": "text", + "content": "E.3 HYPERPARAMETERS TUNING OF DOGE" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": "The scale of " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": " determines the strength of policy constraint. We tune " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": " to balance the trade-off between policy constraint and policy improvement. To be mentioned, " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": " is tuned within only 5 candidates for 20 tasks (17.5 for hopper-m, hopper-m-r and all Mujoco random datasets; 7.5 for other Mujoco datasets; 5 for antmaze-u; 10 for antmaze-u-d; 70 for other AntMaze tasks). This is acceptable in offline policy tuning following (Kumar et al., 2019; Brandfonbrener et al., 2021). To ensure numerical stability, we clip the Lagrangian multiplier " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": " to [1, 100]. We also find a large initial " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": " enables stable training for Mujoco tasks but slows down AntMaze training. Therefore, the initial value of Lagrangian multiplier " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": " is 5 for Mujoco and 1 for AntMaze tasks, respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 475, + 239, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 239, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 239, + 485 + ], + "type": "text", + "content": "E.4 PSEUDOCODE OF DOGE" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 495, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 540 + ], + "type": "text", + "content": "The pseudocode of DOGE is listed in Algorithm 1. Changes we make based on TD3 (Fujimoto et al., 2018) are marked in red. The only modification is the training process of the additional state-conditioned distance function and the constrained actor update. We can perform 1M training steps on one GTX 3080Ti GPU in less than " + }, + { + "bbox": [ + 104, + 495, + 506, + 540 + ], + "type": "inline_equation", + "content": "50\\mathrm{min}" + }, + { + "bbox": [ + 104, + 495, + 506, + 540 + ], + "type": "text", + "content": " for Mujoco tasks and 1h " + }, + { + "bbox": [ + 104, + 495, + 506, + 540 + ], + "type": "inline_equation", + "content": "40\\mathrm{min}" + }, + { + "bbox": [ + 104, + 495, + 506, + 540 + ], + "type": "text", + "content": " for AntMaze tasks." + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 106, + 566, + 507, + 723 + ], + "blocks": [ + { + "bbox": [ + 106, + 551, + 287, + 563 + ], + "lines": [ + { + "bbox": [ + 106, + 551, + 287, + 563 + ], + "spans": [ + { + "bbox": [ + 106, + 551, + 287, + 563 + ], + "type": "text", + "content": "Algorithm 1 Our implementation for DOGE" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "lines": [ + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": "Require: Dataset " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " . State-conditioned distance network " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "g_{\\psi}" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " . Policy network " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\pi_{\\phi}" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " and target policy network " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\pi_{\\phi^{\\prime}}" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\phi^{\\prime}\\gets \\phi" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " . Value network " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "Q_{\\theta_i},i = 1,2" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " and target value network " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "Q_{\\theta_i'}" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "i = 1,2" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\theta_i^\\prime \\leftarrow \\theta_i" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " . State-conditioned distance network training steps " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "N_{g}" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " . Policy update frequency m. \n1: for " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "t = 0,1,\\dots ,M" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " do \n2: Sample mini-batch transitions " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\{(s_i,a_i,r_i,s_i')\\} \\sim \\mathcal{D}" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " \n3: if " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "t < N_g" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " then \n4: State-Conditioned Distance Function Update: Update " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " as Eq. (55) shows. \n5: end if \n6: Critic Update: Update " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\theta_{i}" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " using policy evaluation method in TD3. \n7: if " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " mod " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "m = 0" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " then \n8: Constrained Actor Update: Update " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\phi ,\\lambda" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " via Eq. (54). \n9: Update target networks: " + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\theta_i^\\prime \\gets \\tau \\theta_i + (1 - \\tau)\\theta_i^\\prime" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "inline_equation", + "content": "\\phi^{\\prime}\\gets \\tau \\phi +(1 - \\tau)\\phi" + }, + { + "bbox": [ + 106, + 566, + 507, + 723 + ], + "type": "text", + "content": " \n10: end if \n11: end for" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "algorithm" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 489, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 489, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 489, + 95 + ], + "type": "text", + "content": "E.5 EXPERIMENT SETUP FOR THE IMPACT OF DATA GEOMETRY ON DEEP " + }, + { + "bbox": [ + 104, + 82, + 489, + 95 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 82, + 489, + 95 + ], + "type": "text", + "content": " FUNCTIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "text", + "content": "We consider an one-dimensional random walk task with a fixed-horizon (50 steps for each episode), where agents at each step can move in the range of " + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "inline_equation", + "content": "[-1, +1]" + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "text", + "content": " and the state space is a straight range from " + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "inline_equation", + "content": "[-10, 10]" + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "text", + "content": ". The destination is located at " + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "inline_equation", + "content": "s = 10" + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "text", + "content": ". The closer the distance to the destination, the larger the reward that the agent can get. The discount factor " + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "inline_equation", + "content": "\\gamma = 0.9" + }, + { + "bbox": [ + 104, + 102, + 506, + 156 + ], + "type": "text", + "content": ". The reward function is defined as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 258, + 156, + 505, + 179 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 156, + 505, + 179 + ], + "spans": [ + { + "bbox": [ + 258, + 156, + 505, + 179 + ], + "type": "interline_equation", + "content": "r = \\frac {4 0 0 - (s ^ {\\prime} - 1 0) ^ {2}}{4 0 0} \\tag {56}", + "image_path": "30713872438e0710707a89af8442cf54dc29ae48aea1b31b6d42ce742ed7d08c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "content": "We generate offline datasets with different geometry and train the agent based on these datasets. Each synthetic dataset consists of 200 transition steps. We get the approximated " + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "content": " value " + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "inline_equation", + "content": "\\hat{Q}" + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "content": " by training TD3 for " + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "inline_equation", + "content": "1e + 4" + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "content": " steps each dataset. The learning rate of Actor and Critic networks are both " + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "inline_equation", + "content": "10^{-3}" + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "content": ". The other implementation details are the same as the implementation of original TD3 (Fujimoto et al., 2018). The true " + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "content": " function can be get by Monte-Carlo estimation. We find that the near-destination states hold higher approximation error than that far away from the destination due to the scale of true " + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "content": " value near the destination is large. To alleviate the impact of " + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 186, + 504, + 277 + ], + "type": "text", + "content": " value scale on the approximation error, we define the relative approximation error as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 241, + 281, + 505, + 299 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 281, + 505, + 299 + ], + "spans": [ + { + "bbox": [ + 241, + 281, + 505, + 299 + ], + "type": "interline_equation", + "content": "\\hat {\\epsilon} (s, a) = \\epsilon (s, a) - \\min _ {a} \\epsilon (s, a) \\tag {57}", + "image_path": "e4326632d2e22fac716e233335597b00bb6b0ed61feb0a9a5407b22bbe778e45.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\epsilon(s,a) = \\hat{Q}(s,a) - Q(s,a)" + }, + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "type": "text", + "content": ". The relative error in the above definition eliminates the effect of different states on the approximation error and can capture the over-estimation error that we care about. We plot the relative approximation error of deep " + }, + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "type": "text", + "content": " functions with different random seeds and data geometry in Fig. 13." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 367, + 315, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 367, + 315, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 315, + 380 + ], + "type": "text", + "content": "F ADDITIONAL EXPERIMENT RESULTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 392, + 322, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 392, + 322, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 322, + 403 + ], + "type": "text", + "content": "F.1 COMPARISON OF GENERALIZATION ABILITY" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 412, + 504, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 412, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 412, + 504, + 459 + ], + "type": "text", + "content": "In the well known AntMaze task in D4RL benchmark (Fu et al., 2020), where an ant needs to navigate from the start to the destination in a large maze. The trajectories with coordinates at " + }, + { + "bbox": [ + 104, + 412, + 504, + 459 + ], + "type": "inline_equation", + "content": "x \\times y \\in [4,13] \\times [7,9] \\cup [11.5,20.5] \\times [11,13]" + }, + { + "bbox": [ + 104, + 412, + 504, + 459 + ], + "type": "text", + "content": " in AntMaze medium tasks and " + }, + { + "bbox": [ + 104, + 412, + 504, + 459 + ], + "type": "inline_equation", + "content": "x \\times y \\in [10.5,21] \\times [7,9] \\cup [19,29.5] \\times [15,17]" + }, + { + "bbox": [ + 104, + 412, + 504, + 459 + ], + "type": "text", + "content": " in AntMaze large tasks are clipped, as Fig. 8 shows." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 157, + 477, + 289, + 609 + ], + "blocks": [ + { + "bbox": [ + 157, + 477, + 289, + 609 + ], + "lines": [ + { + "bbox": [ + 157, + 477, + 289, + 609 + ], + "spans": [ + { + "bbox": [ + 157, + 477, + 289, + 609 + ], + "type": "image", + "image_path": "69fe1100f4a4cbdf16d46648b59017a703d00f054610fcfe5e174067a6e3fb48.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 611, + 281, + 622 + ], + "lines": [ + { + "bbox": [ + 165, + 611, + 281, + 622 + ], + "spans": [ + { + "bbox": [ + 165, + 611, + 281, + 622 + ], + "type": "text", + "content": "(a) Modified Medium AntMaze" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 317, + 476, + 451, + 609 + ], + "blocks": [ + { + "bbox": [ + 317, + 476, + 451, + 609 + ], + "lines": [ + { + "bbox": [ + 317, + 476, + 451, + 609 + ], + "spans": [ + { + "bbox": [ + 317, + 476, + 451, + 609 + ], + "type": "image", + "image_path": "3863b685c8b28b2a7e4ffa25badcf673b66643b41bc4d5bd7adde3e41ba70453.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 611, + 438, + 622 + ], + "lines": [ + { + "bbox": [ + 331, + 611, + 438, + 622 + ], + "spans": [ + { + "bbox": [ + 331, + 611, + 438, + 622 + ], + "type": "text", + "content": "(b) Modified Large AntMaze" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "lines": [ + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "type": "text", + "content": "Figure 8: The trajectories in the offline dataset are visualized as blue. Data transitions of two small areas on the critical pathways to the destination have been removed (red box)." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "content": "These clipped data counts only about one-tenth of the original dataset and lies in the close proximity of the original trajectories. Under these modified datasets, simply relaying on \"stitching\" data transitions is not enough to solve the navigation problems. We evaluate representative policy constraint method (TD3+BC (Fujimoto & Gu, 2021)), value regularization method (CQL (Kumar et al., 2020b)), in-sample learning method (IQL (Kostrikov et al., 2021b)) and DOGE (our method) on these modified datasets. The evaluation results before and after clipping the trajectories are listed in Table 3. The" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "learning curves for the modified AntMaze medium and AntMaze large tasks are listed in Fig. 9 and Fig. 4." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": "Observe in Table 3 that existing offline RL methods fail miserably and suffer from severe performance drops. By contrast, DOGE maintains competitive performance after the modification of the dataset and shows good generalization ability on unknown areas." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "text", + "content": "Apart from above experiments, we also evaluate DOGE when removing only one area: " + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "inline_equation", + "content": "[10.5, 21] \\times [7, 9]" + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "inline_equation", + "content": "[10.5, 21] \\times [7, 9]" + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "text", + "content": " for AntMaze-large datasets and " + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "inline_equation", + "content": "[4, 13] \\times [7, 9]" + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "inline_equation", + "content": "[4, 13] \\times [7, 9]" + }, + { + "bbox": [ + 104, + 148, + 507, + 182 + ], + "type": "text", + "content": " for AntMaze-medium datasets. The final results can be seen in Table 4." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 124, + 211, + 485, + 405 + ], + "blocks": [ + { + "bbox": [ + 132, + 190, + 477, + 204 + ], + "lines": [ + { + "bbox": [ + 132, + 190, + 477, + 204 + ], + "spans": [ + { + "bbox": [ + 132, + 190, + 477, + 204 + ], + "type": "text", + "content": "Table 3: The performance drop after removing the data at the only way to destination." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 124, + 211, + 485, + 405 + ], + "lines": [ + { + "bbox": [ + 124, + 211, + 485, + 405 + ], + "spans": [ + { + "bbox": [ + 124, + 211, + 485, + 405 + ], + "type": "table", + "html": "
Dataset typeTD3+BCCQLIQLDOGE(ours)
antmaze-m-p-v2full data065.2±4.870.4±5.380.6±6.5
miss data010.7±18.410.2±2.233.2±27.3
Performance drop ↓-84%86%59%
antmaze-m-d-v2full data054.0±11.774.6±3.277.6±6.1
miss data08.5±5.37.6±5.740.2±32.9
Performance drop ↓-84%90%48%
antmaze-l-p-v2full data018.8±15.343.5±4.548.2±8.1
miss data001.0±0.722.4±15.9
Performance drop ↓-100%98%54%
antmaze-l-d-v2full data031.6±9.545.6±7.636.4±9.1
miss data005.2±3.114.6±11.1
Performance drop ↓-100%89%60%
", + "image_path": "65508669b4d9a2e7c335bad28535d7c8fb2f0863789706b358344787e86f7966.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 109, + 445, + 205, + 525 + ], + "blocks": [ + { + "bbox": [ + 133, + 429, + 193, + 439 + ], + "lines": [ + { + "bbox": [ + 133, + 429, + 193, + 439 + ], + "spans": [ + { + "bbox": [ + 133, + 429, + 193, + 439 + ], + "type": "text", + "content": "Policy constraint" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 445, + 205, + 525 + ], + "lines": [ + { + "bbox": [ + 109, + 445, + 205, + 525 + ], + "spans": [ + { + "bbox": [ + 109, + 445, + 205, + 525 + ], + "type": "image", + "image_path": "2d14e1975493eec5b100880be7e7a0707be39792d61534412f50576c0d0e02f8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 207, + 445, + 302, + 525 + ], + "blocks": [ + { + "bbox": [ + 223, + 429, + 295, + 439 + ], + "lines": [ + { + "bbox": [ + 223, + 429, + 295, + 439 + ], + "spans": [ + { + "bbox": [ + 223, + 429, + 295, + 439 + ], + "type": "text", + "content": "Value regularization" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 207, + 445, + 302, + 525 + ], + "lines": [ + { + "bbox": [ + 207, + 445, + 302, + 525 + ], + "spans": [ + { + "bbox": [ + 207, + 445, + 302, + 525 + ], + "type": "image", + "image_path": "fa0b632299ae9f8f3f4e4b912608dc3f50c1c492b4d50d180f4ea2388773158e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 306, + 445, + 400, + 525 + ], + "blocks": [ + { + "bbox": [ + 325, + 429, + 392, + 439 + ], + "lines": [ + { + "bbox": [ + 325, + 429, + 392, + 439 + ], + "spans": [ + { + "bbox": [ + 325, + 429, + 392, + 439 + ], + "type": "text", + "content": "In-sample learning" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 445, + 400, + 525 + ], + "lines": [ + { + "bbox": [ + 306, + 445, + 400, + 525 + ], + "spans": [ + { + "bbox": [ + 306, + 445, + 400, + 525 + ], + "type": "image", + "image_path": "fe1bb5b8c998182507b7b084cb1d392a70ecc1ae990d388b900c8a987060c1fc.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 405, + 445, + 499, + 525 + ], + "blocks": [ + { + "bbox": [ + 433, + 429, + 479, + 439 + ], + "lines": [ + { + "bbox": [ + 433, + 429, + 479, + 439 + ], + "spans": [ + { + "bbox": [ + 433, + 429, + 479, + 439 + ], + "type": "text", + "content": "DOGE (Ours)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 405, + 445, + 499, + 525 + ], + "lines": [ + { + "bbox": [ + 405, + 445, + 499, + 525 + ], + "spans": [ + { + "bbox": [ + 405, + 445, + 499, + 525 + ], + "type": "image", + "image_path": "5ed288c3b1348c5d6749ed5099c8ddf40e44722bece377c9730525858a93c3d7.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 109, + 533, + 205, + 613 + ], + "blocks": [ + { + "bbox": [ + 109, + 533, + 205, + 613 + ], + "lines": [ + { + "bbox": [ + 109, + 533, + 205, + 613 + ], + "spans": [ + { + "bbox": [ + 109, + 533, + 205, + 613 + ], + "type": "image", + "image_path": "1eab3984971d3da5a8ed4e393bc99b6de8e7748087575ab124b6a570e2f381bf.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 621, + 504, + 654 + ], + "lines": [ + { + "bbox": [ + 104, + 621, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 504, + 654 + ], + "type": "text", + "content": "Figure 9: Evaluation on TD3+BC(Fujimoto & Gu, 2021), CQL(Kumar et al., 2020b), IQL(Kostrikov et al., 2021b), and DOGE (ours) before and after removing the data shown in Fig.8a for AntMaze medium tasks." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 207, + 533, + 304, + 613 + ], + "blocks": [ + { + "bbox": [ + 207, + 533, + 304, + 613 + ], + "lines": [ + { + "bbox": [ + 207, + 533, + 304, + 613 + ], + "spans": [ + { + "bbox": [ + 207, + 533, + 304, + 613 + ], + "type": "image", + "image_path": "c11e43bdf8b2f8429a39dc237c7db5bd9305c216d7d67fb5f65586e1daba26c4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 306, + 533, + 402, + 613 + ], + "blocks": [ + { + "bbox": [ + 306, + 533, + 402, + 613 + ], + "lines": [ + { + "bbox": [ + 306, + 533, + 402, + 613 + ], + "spans": [ + { + "bbox": [ + 306, + 533, + 402, + 613 + ], + "type": "image", + "image_path": "0b4be628e8803f3f87d2c5370b119c58ff5905d1cc2b7fa7f0b349850115b421.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 405, + 533, + 501, + 613 + ], + "blocks": [ + { + "bbox": [ + 405, + 533, + 501, + 613 + ], + "lines": [ + { + "bbox": [ + 405, + 533, + 501, + 613 + ], + "spans": [ + { + "bbox": [ + 405, + 533, + 501, + 613 + ], + "type": "image", + "image_path": "9a55cec306a559695fce48e3ee1e7ba2b8af7ba35beea46ece1cbbe66811c522.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 165, + 101, + 443, + 186 + ], + "blocks": [ + { + "bbox": [ + 158, + 80, + 451, + 92 + ], + "lines": [ + { + "bbox": [ + 158, + 80, + 451, + 92 + ], + "spans": [ + { + "bbox": [ + 158, + 80, + 451, + 92 + ], + "type": "text", + "content": "Table 4: Ablation for DOGE generalization with different removal areas." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 165, + 101, + 443, + 186 + ], + "lines": [ + { + "bbox": [ + 165, + 101, + 443, + 186 + ], + "spans": [ + { + "bbox": [ + 165, + 101, + 443, + 186 + ], + "type": "table", + "html": "
DatasetFull datasetOne removalTwo removal
antmaze-m-p-v280.6±6.562.3±7.533.2±27.3
antmaze-m-d-v277.6±6.141.3±42.840.2±32.9
antmaze-l-p-v248.2±8.126.4±19.422.4±15.9
antmaze-l-d-v236.4±9.112.3±4.214.6±11.1
Total score242.8±29.8142.3±73.9110.4±87.2
", + "image_path": "02e0f0241f8f846c15e1e56347f8fa1ee93b83569a48f16f6f68b1d0cb676494.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 196, + 316, + 207 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 196, + 316, + 207 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 316, + 207 + ], + "type": "text", + "content": "F.2 ADDITIONAL COMPARISON WITH TD3+BC" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 216, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 251 + ], + "type": "text", + "content": "In this section, we further demonstrate the superiority of DOGE over our most related practical work TD3+BC (Fujimoto & Gu, 2021). One can find that the biggest difference between DOGE and TD3+BC lies in the policy constraint used for policy optimization:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 255, + 500, + 285 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 104, + 255, + 362, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 362, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 362, + 266 + ], + "type": "text", + "content": "- TD3+BC: constrains the policy to minimize the MSE BC loss." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 272, + 500, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 500, + 285 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 500, + 285 + ], + "type": "text", + "content": "- DOGE: constrains the policy to minimize the learned state-conditioned distance function " + }, + { + "bbox": [ + 104, + 272, + 500, + 285 + ], + "type": "inline_equation", + "content": "g(s, a)" + }, + { + "bbox": [ + 104, + 272, + 500, + 285 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 289, + 506, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 506, + 377 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 506, + 377 + ], + "type": "text", + "content": "As discussed in Section 3.1, the learned distance function " + }, + { + "bbox": [ + 104, + 289, + 506, + 377 + ], + "type": "inline_equation", + "content": "g(s,a)" + }, + { + "bbox": [ + 104, + 289, + 506, + 377 + ], + "type": "text", + "content": " can capture the global geometric information of the offline dataset, while the MSE BC loss can only provide local sample-to-sample regularization, which may be noisy, especially in datasets that contain low-quality samples. Taking Figure 10 as an illustration, under strict BC constraint, policy learning on noisy low-quality samples may provide contradicting learning signals to near-optimal samples, which can cause inferior policy performance and unstable training process. By contrast, the state-conditioned distance function " + }, + { + "bbox": [ + 104, + 289, + 506, + 377 + ], + "type": "inline_equation", + "content": "g(s,a)" + }, + { + "bbox": [ + 104, + 289, + 506, + 377 + ], + "type": "text", + "content": " in DOGE is trained on the whole dataset and hence brings global geometric information, which is far more informative and stable as compared with the MSE BC loss." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 161, + 387, + 271, + 495 + ], + "blocks": [ + { + "bbox": [ + 161, + 387, + 271, + 495 + ], + "lines": [ + { + "bbox": [ + 161, + 387, + 271, + 495 + ], + "spans": [ + { + "bbox": [ + 161, + 387, + 271, + 495 + ], + "type": "image", + "image_path": "61c367d79e1e14f4df27054cd0558caf13e2c4e8cd4211c24596bc88cae3ca36.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 200, + 498, + 231, + 506 + ], + "lines": [ + { + "bbox": [ + 200, + 498, + 231, + 506 + ], + "spans": [ + { + "bbox": [ + 200, + 498, + 231, + 506 + ], + "type": "text", + "content": "(a) TD3+BC" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 389, + 413, + 493 + ], + "blocks": [ + { + "bbox": [ + 309, + 389, + 413, + 493 + ], + "lines": [ + { + "bbox": [ + 309, + 389, + 413, + 493 + ], + "spans": [ + { + "bbox": [ + 309, + 389, + 413, + 493 + ], + "type": "image", + "image_path": "89ba3a7afafad8e81c7d2f1acc80924e841073b2a43ea614ecb116eab045c914.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 347, + 498, + 373, + 506 + ], + "lines": [ + { + "bbox": [ + 347, + 498, + 373, + 506 + ], + "spans": [ + { + "bbox": [ + 347, + 498, + 373, + 506 + ], + "type": "text", + "content": "(b) DOGE" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 157, + 534, + 243, + 552 + ], + "blocks": [ + { + "bbox": [ + 198, + 517, + 257, + 527 + ], + "lines": [ + { + "bbox": [ + 198, + 517, + 257, + 527 + ], + "spans": [ + { + "bbox": [ + 198, + 517, + 257, + 527 + ], + "type": "text", + "content": "Low-quality Samples" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 157, + 534, + 243, + 552 + ], + "lines": [ + { + "bbox": [ + 157, + 534, + 243, + 552 + ], + "spans": [ + { + "bbox": [ + 157, + 534, + 243, + 552 + ], + "type": "image", + "image_path": "2717ce8236830b1fb75d049dd33db86dcd2847a1921796da5fc1663e20ed711a.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 244, + 534, + 302, + 552 + ], + "lines": [ + { + "bbox": [ + 244, + 534, + 302, + 552 + ], + "spans": [ + { + "bbox": [ + 244, + 534, + 302, + 552 + ], + "type": "text", + "content": "MSE BC Constraints of Near-optimal Samples" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 104, + 563, + 506, + 615 + ], + "lines": [ + { + "bbox": [ + 104, + 563, + 506, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 506, + 615 + ], + "type": "text", + "content": "Figure 10: Illustrations of the differences between (a) the MSE BC constraint of TD3+BC and (b) the state-conditioned distance function constraint of DOGE. In (a), the MSE BC constraint in TD3+BC blindly enforces the imitation behavior on any data samples, which may lead to an inferior policy in the presence of noisy low-quality samples. In (b), the state-conditioned distance function " + }, + { + "bbox": [ + 104, + 563, + 506, + 615 + ], + "type": "inline_equation", + "content": "g(s,a)" + }, + { + "bbox": [ + 104, + 563, + 506, + 615 + ], + "type": "text", + "content": " can provide more informative global dataset geometry information to guide the stable learning of the policy." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 306, + 533, + 368, + 551 + ], + "blocks": [ + { + "bbox": [ + 257, + 517, + 324, + 526 + ], + "lines": [ + { + "bbox": [ + 257, + 517, + 324, + 526 + ], + "spans": [ + { + "bbox": [ + 257, + 517, + 324, + 526 + ], + "type": "text", + "content": "Near-optimal Samples" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 337, + 517, + 383, + 526 + ], + "lines": [ + { + "bbox": [ + 337, + 517, + 383, + 526 + ], + "spans": [ + { + "bbox": [ + 337, + 517, + 383, + 526 + ], + "type": "text", + "content": "Policy Outputs" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 306, + 533, + 368, + 551 + ], + "lines": [ + { + "bbox": [ + 306, + 533, + 368, + 551 + ], + "spans": [ + { + "bbox": [ + 306, + 533, + 368, + 551 + ], + "type": "image", + "image_path": "48382d6e18730127b150a123b45009acfa033e011c6d0fbb260efbb60bca7044.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 369, + 534, + 441, + 551 + ], + "lines": [ + { + "bbox": [ + 369, + 534, + 441, + 551 + ], + "spans": [ + { + "bbox": [ + 369, + 534, + 441, + 551 + ], + "type": "text", + "content": "g(s,a) Distance Function Values" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 627, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 504, + 672 + ], + "type": "text", + "content": "To better illustrate the superiority of DOGE over TD3+BC, we add extra comparative experiments with TD3+BC on a new set of mixed-quality datasets. In halfcheetah-random dataset, we add different proportions (1% to 20%) of the near-optimal halfcheetah-medium-expert dataset to form new mixed datasets and evaluate how TD3+BC and DOGE perform. See Figure 11 for detailed results." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "Figure 11 shows that DOGE enjoys more performance gains when the random dataset involves near-optimal data, while TD3+BC is heavily influenced by the local information from the larger proportion of the low-quality random data. Moreover, TD3+BC suffers from severe oscillation and training instability, while DOGE enjoys a stable training process due to the use of the more informative state-conditioned distance constraint that captures the overall dataset geometry." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 79, + 187, + 144 + ], + "blocks": [ + { + "bbox": [ + 111, + 79, + 187, + 144 + ], + "lines": [ + { + "bbox": [ + 111, + 79, + 187, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 79, + 187, + 144 + ], + "type": "image", + "image_path": "7fe3c3376a72cd45956ebc3962e3c7f4b2006cf1851b4714cc200ae968a49d28.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 152, + 506, + 194 + ], + "lines": [ + { + "bbox": [ + 104, + 152, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 506, + 194 + ], + "type": "text", + "content": "Figure 11: Comparisons between DOGE and TD3+BC on mixed datasets with different proportions of halfcheetah-medium-expert dataset added into halfcheetah-random dataset. Ratio- " + }, + { + "bbox": [ + 104, + 152, + 506, + 194 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 152, + 506, + 194 + ], + "type": "text", + "content": " means " + }, + { + "bbox": [ + 104, + 152, + 506, + 194 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 152, + 506, + 194 + ], + "type": "text", + "content": " medium-expert dataset is added into the original halfcheetah-random dataset. TD3+BC suffers severe oscillation and training instability, while DOGE enjoys stable training processes and substantial performance gains." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 189, + 79, + 266, + 144 + ], + "blocks": [ + { + "bbox": [ + 189, + 79, + 266, + 144 + ], + "lines": [ + { + "bbox": [ + 189, + 79, + 266, + 144 + ], + "spans": [ + { + "bbox": [ + 189, + 79, + 266, + 144 + ], + "type": "image", + "image_path": "8e1aa2029da2ee5851ccf604d7c757833b62ad9cfe7ac8a738b75692476d4308.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 267, + 80, + 343, + 144 + ], + "blocks": [ + { + "bbox": [ + 267, + 80, + 343, + 144 + ], + "lines": [ + { + "bbox": [ + 267, + 80, + 343, + 144 + ], + "spans": [ + { + "bbox": [ + 267, + 80, + 343, + 144 + ], + "type": "image", + "image_path": "6f5b108ba7bbffb91f7a732c63c672976819249a7e3bd97ddc10ae2d5efabacd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 345, + 80, + 421, + 144 + ], + "blocks": [ + { + "bbox": [ + 345, + 80, + 421, + 144 + ], + "lines": [ + { + "bbox": [ + 345, + 80, + 421, + 144 + ], + "spans": [ + { + "bbox": [ + 345, + 80, + 421, + 144 + ], + "type": "image", + "image_path": "5036814a0f6a941772b4948a362362c21b8b0999b661a847998aa7a19d7444a7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 423, + 80, + 499, + 144 + ], + "blocks": [ + { + "bbox": [ + 423, + 80, + 499, + 144 + ], + "lines": [ + { + "bbox": [ + 423, + 80, + 499, + 144 + ], + "spans": [ + { + "bbox": [ + 423, + 80, + 499, + 144 + ], + "type": "image", + "image_path": "943b5c53751c20a7c1b8b0b7945e3ab481eadb25bde6ef4fcaf32c1d33871f8a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 205, + 355, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 355, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 355, + 216 + ], + "type": "text", + "content": "F.3 COMPARISON WITH UNCERTAINTY-BASED METHODS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 236, + 504, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 504, + 302 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 504, + 302 + ], + "type": "text", + "content": "We also compare DOGE with SOTA uncertainty-based offline RL approaches, including EDAC (An et al., 2021) and PBRL (Bai et al., 2021) are more complex D4RL AntMaze tasks. The final results are presented in Table 5. Table 5 shows that the SOTA uncertainty-based methods are unable to provide reasonable performance on the difficult Antmaze tasks, despite that they can achieve good performance on simpler MuJoCo tasks. A similar finding is also reported in a recent offline RL study (Anonymous, 2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 307, + 506, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 506, + 374 + ], + "type": "text", + "content": "In practical implementation of EDAC and PBRL, to obtain relatively accurate uncertainty measures and achieve reasonable performance, these methods typically need dozens of ensemble Q-networks, which can be quite costly and inefficient. Moreover, heavy hyperparameter tuning is also required for them to obtain the best performance. In contrast, our method quantifies the generalization ability of the Q-function from the perspective of dataset geometry and is trained using a simple regression loss in Eq. (4), which enjoys better training stability and simplicity." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 187, + 426, + 421, + 518 + ], + "blocks": [ + { + "bbox": [ + 169, + 407, + 440, + 418 + ], + "lines": [ + { + "bbox": [ + 169, + 407, + 440, + 418 + ], + "spans": [ + { + "bbox": [ + 169, + 407, + 440, + 418 + ], + "type": "text", + "content": "Table 5: Average normalized scores over 5 seeds on Antmaze tasks" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 187, + 426, + 421, + 518 + ], + "lines": [ + { + "bbox": [ + 187, + 426, + 421, + 518 + ], + "spans": [ + { + "bbox": [ + 187, + 426, + 421, + 518 + ], + "type": "table", + "html": "
DatasetEDACPBRLDOGE(Ours)
antmaze-u-v20097.0±1.8
antmaze-u-p-v20063.5±9.3
antmaze-m-p-v20080.6±6.5
antmaze-m-d-v20077.6±6.1
antmaze-l-p-v20048.2±8.1
antmaze-l-d-v20036.4±9.1
", + "image_path": "f3989451d5d906747bb415b511ded8f56d6016de714fd88455786d824c586802.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 585, + 339, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 339, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 339, + 596 + ], + "type": "text", + "content": "F.4 ADDITIONAL ANALYSIS ON DISTANCE FUNCTION" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 616, + 504, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 671 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 671 + ], + "type": "text", + "content": "We report the learning curves of the state-conditioned distance function " + }, + { + "bbox": [ + 104, + 616, + 504, + 671 + ], + "type": "inline_equation", + "content": "g(s, a)" + }, + { + "bbox": [ + 104, + 616, + 504, + 671 + ], + "type": "text", + "content": " trained on different datasets (including hopper-m-v2, halfcheetah-m-v2, and walker2d-m-v2 in Figure 12. Our proposed state-conditioned distance function is learned through a simple regression task (Eq. (4)), which is very easy to train. Figure 12 shows that it reaches convergence within only 1K training steps on D4RL MuJoCo medium datasets." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "text", + "content": "We also change the network configurations (i.e., number of hidden layers and hidden units) of the state-conditioned distance function " + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "inline_equation", + "content": "g(s, a)" + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "text", + "content": " to investigate how the expressivity of " + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "text", + "content": " influences the performance of the policy. Table 6 shows that DOGE achieves similar performance across different " + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "text", + "content": " network configurations, indicating that DOGE is robust to model complexity and expressivity of the state-conditioned distance function." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 231, + 185 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 231, + 185 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 231, + 185 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 231, + 185 + ], + "type": "image", + "image_path": "d342ac9f433f1f2e71f4bb00a94d05d5146b9e6cce31f52cc81029326576f62f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 194, + 459, + 207 + ], + "lines": [ + { + "bbox": [ + 149, + 194, + 459, + 207 + ], + "spans": [ + { + "bbox": [ + 149, + 194, + 459, + 207 + ], + "type": "text", + "content": "Figure 12: Learning curves of the state-conditioned distance function " + }, + { + "bbox": [ + 149, + 194, + 459, + 207 + ], + "type": "inline_equation", + "content": "g(s, a)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 245, + 79, + 365, + 185 + ], + "blocks": [ + { + "bbox": [ + 245, + 79, + 365, + 185 + ], + "lines": [ + { + "bbox": [ + 245, + 79, + 365, + 185 + ], + "spans": [ + { + "bbox": [ + 245, + 79, + 365, + 185 + ], + "type": "image", + "image_path": "d9d446b0951fd812c5397d4784c7312867b933e2965029a5af41f85f20588e03.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 383, + 79, + 503, + 185 + ], + "blocks": [ + { + "bbox": [ + 383, + 79, + 503, + 185 + ], + "lines": [ + { + "bbox": [ + 383, + 79, + 503, + 185 + ], + "spans": [ + { + "bbox": [ + 383, + 79, + 503, + 185 + ], + "type": "image", + "image_path": "bf89404aced332aa669a577d33ebb36d479ddafa1a324f673d5947b607c471d3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 178, + 270, + 430, + 330 + ], + "blocks": [ + { + "bbox": [ + 104, + 228, + 506, + 262 + ], + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 262 + ], + "type": "text", + "content": "Table 6: Normalized scores of DOGE trained on distance functions with different network configurations. [128, 128] means " + }, + { + "bbox": [ + 104, + 228, + 506, + 262 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 228, + 506, + 262 + ], + "type": "text", + "content": " network has 2 hidden layers with 128 units. [256, 256, 256] means 3 hidden layers with 256 units." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 178, + 270, + 430, + 330 + ], + "lines": [ + { + "bbox": [ + 178, + 270, + 430, + 330 + ], + "spans": [ + { + "bbox": [ + 178, + 270, + 430, + 330 + ], + "type": "table", + "html": "
Dataset[128, 128][256, 256][256, 256, 256]
hopper-m99.4101.498.6
halfcheetah-m47.446.945.3
walker2d-m85.386.486.8
", + "image_path": "e9fc41184aadef9caae797cdb5c2d732de931b3cd2fcbb8cf5f7dbbd1c1215ff.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 350, + 463, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 350, + 463, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 463, + 371 + ], + "type": "text", + "content": "F.5 ADDITIONAL EXPERIMENTS OF THE IMPACT OF DATA GEOMETRY ON DEEP " + }, + { + "bbox": [ + 105, + 350, + 463, + 371 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 105, + 350, + 463, + 371 + ], + "type": "text", + "content": " FUNCTIONS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 380, + 504, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 427 + ], + "type": "text", + "content": "We run several experiments with different random seeds (see Figure 13). Although the approximation error pattern of different random seeds is not the same, they all perform in the same manner that deep " + }, + { + "bbox": [ + 104, + 380, + 504, + 427 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 380, + 504, + 427 + ], + "type": "text", + "content": " functions produce relatively low approximation error inside the convex hull of training data. We refer to this phenomenon as deep " + }, + { + "bbox": [ + 104, + 380, + 504, + 427 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 380, + 504, + 427 + ], + "type": "text", + "content": " functions interpolate well but struggle to extrapolate." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 112, + 435, + 240, + 521 + ], + "blocks": [ + { + "bbox": [ + 112, + 435, + 240, + 521 + ], + "lines": [ + { + "bbox": [ + 112, + 435, + 240, + 521 + ], + "spans": [ + { + "bbox": [ + 112, + 435, + 240, + 521 + ], + "type": "image", + "image_path": "3212392b8892505c92bc3cbd780bd02a3d760bd5bf9df9a82b3eb76e282bd1d8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 242, + 435, + 369, + 521 + ], + "blocks": [ + { + "bbox": [ + 242, + 435, + 369, + 521 + ], + "lines": [ + { + "bbox": [ + 242, + 435, + 369, + 521 + ], + "spans": [ + { + "bbox": [ + 242, + 435, + 369, + 521 + ], + "type": "image", + "image_path": "b8ba27213572322fedc4476000c6c131036e4ea81e8674f9c1afed6a1ea24a70.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 370, + 435, + 498, + 521 + ], + "blocks": [ + { + "bbox": [ + 370, + 435, + 498, + 521 + ], + "lines": [ + { + "bbox": [ + 370, + 435, + 498, + 521 + ], + "spans": [ + { + "bbox": [ + 370, + 435, + 498, + 521 + ], + "type": "image", + "image_path": "4412252625722646c5d2b50bbb23d8842d7c850bca6bad501c1717b9439c85bb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 112, + 521, + 239, + 606 + ], + "blocks": [ + { + "bbox": [ + 112, + 521, + 239, + 606 + ], + "lines": [ + { + "bbox": [ + 112, + 521, + 239, + 606 + ], + "spans": [ + { + "bbox": [ + 112, + 521, + 239, + 606 + ], + "type": "image", + "image_path": "352fc6c28bd5149b30ea4c07eab417b1bc5a8fb7dd0ab8f20216ccb1ee7247b3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 242, + 521, + 369, + 606 + ], + "blocks": [ + { + "bbox": [ + 242, + 521, + 369, + 606 + ], + "lines": [ + { + "bbox": [ + 242, + 521, + 369, + 606 + ], + "spans": [ + { + "bbox": [ + 242, + 521, + 369, + 606 + ], + "type": "image", + "image_path": "d3e20f1b76060d0750c4bba328912261df74c45eb047a120a298f6d2665c4e3b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 370, + 521, + 498, + 606 + ], + "blocks": [ + { + "bbox": [ + 370, + 521, + 498, + 606 + ], + "lines": [ + { + "bbox": [ + 370, + 521, + 498, + 606 + ], + "spans": [ + { + "bbox": [ + 370, + 521, + 498, + 606 + ], + "type": "image", + "image_path": "5e473fecd1c2cc1a3d7e05ee10cb0add89eafccf7315a8e0b0b85b6fcc0f24a0.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 112, + 607, + 239, + 693 + ], + "blocks": [ + { + "bbox": [ + 112, + 607, + 239, + 693 + ], + "lines": [ + { + "bbox": [ + 112, + 607, + 239, + 693 + ], + "spans": [ + { + "bbox": [ + 112, + 607, + 239, + 693 + ], + "type": "image", + "image_path": "d09492e5ef76cff94f6222beec87801b87c644ba724ef8751639a388f185c03e.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 702, + 504, + 726 + ], + "lines": [ + { + "bbox": [ + 104, + 702, + 504, + 726 + ], + "spans": [ + { + "bbox": [ + 104, + 702, + 504, + 726 + ], + "type": "text", + "content": "Figure 13: The figures above depict the effect of different data geometries on the final deep " + }, + { + "bbox": [ + 104, + 702, + 504, + 726 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 702, + 504, + 726 + ], + "type": "text", + "content": " functions approximation error. The training data are marked as white dots." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 242, + 607, + 369, + 693 + ], + "blocks": [ + { + "bbox": [ + 242, + 607, + 369, + 693 + ], + "lines": [ + { + "bbox": [ + 242, + 607, + 369, + 693 + ], + "spans": [ + { + "bbox": [ + 242, + 607, + 369, + 693 + ], + "type": "image", + "image_path": "620451d0408b6163526836a07de10d2121261d8e507efef5501bf6b7831d16c3.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 370, + 607, + 498, + 693 + ], + "blocks": [ + { + "bbox": [ + 370, + 607, + 498, + 693 + ], + "lines": [ + { + "bbox": [ + 370, + 607, + 498, + 693 + ], + "spans": [ + { + "bbox": [ + 370, + 607, + 498, + 693 + ], + "type": "image", + "image_path": "7f0b86b44bcac4cebf02f1e17b338b5a7be787041c4388c535dcbad3ba78fff8.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 189, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 189, + 93 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 189, + 93 + ], + "type": "text", + "content": "G ABLATIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "text", + "content": "We conduct ablation studies on the effect of " + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "inline_equation", + "content": "\\beta = \\frac{\\alpha}{\\frac{1}{n}\\sum_{i=1}^{n}|Q(s_i,a_i)|}" + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "text", + "content": " (see Figure 14), the non-parametric threshold " + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "text", + "content": " in Eq. (6) (see Figure 16) and the non-parametric number of noise actions " + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "text", + "content": " to train state-conditioned distance function (see Figure 15) on the performance of the final algorithm. We also conduct ablation studies on the effect of " + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "text", + "content": " on the Lagrangian multiplier " + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 106, + 506, + 154 + ], + "type": "text", + "content": " (see Figure 17)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": ", we add or subtract 2.5 to the original value. For " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": ", we choose " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "N = 10, 20, 30" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": " to conduct experiments respectively. For " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": ", we choose " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": " upper quantile of the distance value in mini-batch samples and the results can be found in Table 7." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 122, + 223, + 487, + 380 + ], + "blocks": [ + { + "bbox": [ + 208, + 202, + 400, + 215 + ], + "lines": [ + { + "bbox": [ + 208, + 202, + 400, + 215 + ], + "spans": [ + { + "bbox": [ + 208, + 202, + 400, + 215 + ], + "type": "text", + "content": "Table 7: Ablations on G with different quantile." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 122, + 223, + 487, + 380 + ], + "lines": [ + { + "bbox": [ + 122, + 223, + 487, + 380 + ], + "spans": [ + { + "bbox": [ + 122, + 223, + 487, + 380 + ], + "type": "table", + "html": "
DatasetG = 30%G = 50%G = 70%G = 90%G = 100%
hopper-r-v219.8±0.321.1±12.615.5±13.517.6±12.216.4±12.4
halfcheetah-r-v219.4±0.617.8±1.217.8±0.717.7±1.017.7±0.8
walker2d-r-v22.6±3.90.9±2.42.2±2.61.8±3.32.2±3.2
hopper-m-v244.6±5.798.6±2.199.4±0.491.5±9.932.9±54.3
halfcheetah-m-v241.3±1.245.3±0.646.0±0.146.0±0.846.1±0.5
walker2d-m-v283.7±7.586.8±0.887.3±1.669.9±28.984.2±1.0
hopper-m-r-v251.5±11.276.2±17.779.6±36.978.4±27.665.7±37.2
halfcheetah-m-r-v25.9±5.742.8±0.643.2±0.142.2±0.842.0±0.6
walker2d-m-r-v228.3±14.387.3±2.387.9±2.477.8±21.678.6±24.1
hopper-m-e-v261.7±10.4102.7±5.282.8±5.888.9±17.770.0±48.4
halfcheetah-m-e-v246.9±5.278.7±8.475.1±15.473.5±13.669.9±8.7
walker2d-m-e-v2110.5±0.7110.4±1.5111.1±0.5110.2±22.580.0±54.3
", + "image_path": "b2e94712a55f024d4e0acbf39e071c494bd54e9d0802b46ca77ceec1a8ca6de9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "text", + "content": "Seen from Table 7 that using different " + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "text", + "content": " for different tasks may achieve even better performance. Particularly, for some datasets with diverse data distributions that need to find good data from suboptimal data, a more tolerant quantile (e.g., " + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "inline_equation", + "content": "G = 70\\%" + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "text", + "content": ") can reasonably extend feasible region and increase the opportunity to find the optimal policy, such as hopper-m-r, halfcheetah-m-r, walker2d-m-r, hopper-m-e, halfcheetah-m-e. However, an overly relaxed quantile (e.g., " + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "inline_equation", + "content": "G = 90\\%" + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 391, + 504, + 468 + ], + "type": "text", + "content": ") increases the risk of including problematic OOD actions in policy learning, causing performance drop due to value overestimation and high variance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "text", + "content": "By contrast, an overly restrictive quantile such as " + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "inline_equation", + "content": "G = 30\\%" + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "text", + "content": " can be over-conservative and cause significant constraints violations that impede policy learning, as constraints satisfaction is favored over the max-Q operation in most updates. This can be reflected in the additional results for the Lagrangian multiplier " + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "text", + "content": " (see Appendix E.2 for learning curves and Figure 11 for additional ablations), where " + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\lambda \\rightarrow \\infty" + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "text", + "content": " for some tasks under " + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "inline_equation", + "content": "G = 30\\%" + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "text", + "content": ". This will cause the suboptimality gap " + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "inline_equation", + "content": "(\\frac{1 - \\gamma}{2\\gamma}\\alpha(\\Pi_{\\mathcal{D}}))" + }, + { + "bbox": [ + 104, + 473, + 504, + 544 + ], + "type": "text", + "content": " in Theorem 3 to dominate the performance bound, leading to inferior policy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 548, + 504, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 592 + ], + "type": "text", + "content": "As hyperparameter tuning in practical offline RL applications without online interaction is very difficult, to reduce the computational load, we set " + }, + { + "bbox": [ + 104, + 548, + 504, + 592 + ], + "type": "inline_equation", + "content": "G = 50\\%" + }, + { + "bbox": [ + 104, + 548, + 504, + 592 + ], + "type": "text", + "content": " as default in a non-parametric manner, since it consistently achieves good performance, and is neither too conservative nor too aggressive for most tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "content": "Observe in Figure 14 that DOGE maintains the similar performance with the changes of " + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "content": " on most of Mujoco tasks. At the same time, we also observe that the effect of " + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "content": " on the experiment is not obvious. Compared with " + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "content": ", we find that " + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "content": " has a more significant effect on the experimental results. Observe in Figure 16 that a small " + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "content": " usually causes the policy set induced by DOGE to be too small to obtain near-optimal policy. By contrast, a large " + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 597, + 504, + 665 + ], + "type": "text", + "content": " is not likely to cause excessive error accumulation and hence maintains relatively good performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 669, + 504, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 669, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 669, + 504, + 692 + ], + "type": "text", + "content": "In addition, the ablation studies show that our method is hyperparameter-robust and maintains good performance with changes in hyperparameters." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 79, + 206, + 156 + ], + "blocks": [ + { + "bbox": [ + 110, + 79, + 206, + 156 + ], + "lines": [ + { + "bbox": [ + 110, + 79, + 206, + 156 + ], + "spans": [ + { + "bbox": [ + 110, + 79, + 206, + 156 + ], + "type": "image", + "image_path": "b497c0689940a9f9426b7cb12c8ffb4ea614502dcd3f02d84d4560c0f7dbc628.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 209, + 80, + 304, + 156 + ], + "blocks": [ + { + "bbox": [ + 209, + 80, + 304, + 156 + ], + "lines": [ + { + "bbox": [ + 209, + 80, + 304, + 156 + ], + "spans": [ + { + "bbox": [ + 209, + 80, + 304, + 156 + ], + "type": "image", + "image_path": "8351bb38fc56653a03e65581991794509be44a76e45bfa6159db42132ca99d2c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 80, + 402, + 157 + ], + "blocks": [ + { + "bbox": [ + 306, + 80, + 402, + 157 + ], + "lines": [ + { + "bbox": [ + 306, + 80, + 402, + 157 + ], + "spans": [ + { + "bbox": [ + 306, + 80, + 402, + 157 + ], + "type": "image", + "image_path": "2a26e65bd3e1194986a10158a4a7aa95bcfd4e458ee6c9468195c450d0870cdb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 405, + 80, + 499, + 157 + ], + "blocks": [ + { + "bbox": [ + 405, + 80, + 499, + 157 + ], + "lines": [ + { + "bbox": [ + 405, + 80, + 499, + 157 + ], + "spans": [ + { + "bbox": [ + 405, + 80, + 499, + 157 + ], + "type": "image", + "image_path": "7a5bd1f031752dbe32eb8381a22037d4ecb9af59fe934b37e6624c8cf6ac171f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 111, + 158, + 206, + 234 + ], + "blocks": [ + { + "bbox": [ + 111, + 158, + 206, + 234 + ], + "lines": [ + { + "bbox": [ + 111, + 158, + 206, + 234 + ], + "spans": [ + { + "bbox": [ + 111, + 158, + 206, + 234 + ], + "type": "image", + "image_path": "d6ddab1808b5be419a0818bc318d02e5be9727792a56651bfe6fb6c4c70bd5d7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 209, + 158, + 304, + 234 + ], + "blocks": [ + { + "bbox": [ + 209, + 158, + 304, + 234 + ], + "lines": [ + { + "bbox": [ + 209, + 158, + 304, + 234 + ], + "spans": [ + { + "bbox": [ + 209, + 158, + 304, + 234 + ], + "type": "image", + "image_path": "eaa86913dfe61325f25b4691e42eb46ce85f8776970d04b33599ed4459860b69.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 158, + 402, + 234 + ], + "blocks": [ + { + "bbox": [ + 306, + 158, + 402, + 234 + ], + "lines": [ + { + "bbox": [ + 306, + 158, + 402, + 234 + ], + "spans": [ + { + "bbox": [ + 306, + 158, + 402, + 234 + ], + "type": "image", + "image_path": "9ec4541ddd94166d6bb8214c4232a40123af9ded76f5cd20ca6e0b44c416af3b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 405, + 158, + 499, + 234 + ], + "blocks": [ + { + "bbox": [ + 405, + 158, + 499, + 234 + ], + "lines": [ + { + "bbox": [ + 405, + 158, + 499, + 234 + ], + "spans": [ + { + "bbox": [ + 405, + 158, + 499, + 234 + ], + "type": "image", + "image_path": "7bfc9c772203b3c9e1ad705a66c213b6dd9ef30790615625735db3dd02a86b9a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 111, + 235, + 206, + 312 + ], + "blocks": [ + { + "bbox": [ + 111, + 235, + 206, + 312 + ], + "lines": [ + { + "bbox": [ + 111, + 235, + 206, + 312 + ], + "spans": [ + { + "bbox": [ + 111, + 235, + 206, + 312 + ], + "type": "image", + "image_path": "fe6aa72c48fe7ed2bdfb6ba6e57b07d802cb8c43d45e4920168a2d14b1ac04a2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 320, + 451, + 333 + ], + "lines": [ + { + "bbox": [ + 157, + 320, + 451, + 333 + ], + "spans": [ + { + "bbox": [ + 157, + 320, + 451, + 333 + ], + "type": "text", + "content": "Figure 14: Ablation for " + }, + { + "bbox": [ + 157, + 320, + 451, + 333 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 157, + 320, + 451, + 333 + ], + "type": "text", + "content": ". Error bars indicate min and max over 5 seeds." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 209, + 235, + 304, + 312 + ], + "blocks": [ + { + "bbox": [ + 209, + 235, + 304, + 312 + ], + "lines": [ + { + "bbox": [ + 209, + 235, + 304, + 312 + ], + "spans": [ + { + "bbox": [ + 209, + 235, + 304, + 312 + ], + "type": "image", + "image_path": "58d7058bb3e951940f73b53cd6e8345ffd13594d412ee89bfc0e38e288880812.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 235, + 402, + 312 + ], + "blocks": [ + { + "bbox": [ + 306, + 235, + 402, + 312 + ], + "lines": [ + { + "bbox": [ + 306, + 235, + 402, + 312 + ], + "spans": [ + { + "bbox": [ + 306, + 235, + 402, + 312 + ], + "type": "image", + "image_path": "d8b0ec68d0da49a551e8df76c2fe8a622e9e1a6c98efafbbbf3c70d6e2fae92f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 405, + 235, + 499, + 312 + ], + "blocks": [ + { + "bbox": [ + 405, + 235, + 499, + 312 + ], + "lines": [ + { + "bbox": [ + 405, + 235, + 499, + 312 + ], + "spans": [ + { + "bbox": [ + 405, + 235, + 499, + 312 + ], + "type": "image", + "image_path": "d860b015b309b6776eba8bd564552ab8f708a3be73624485f8ec71c9f76ec9b1.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 111, + 344, + 206, + 420 + ], + "blocks": [ + { + "bbox": [ + 111, + 344, + 206, + 420 + ], + "lines": [ + { + "bbox": [ + 111, + 344, + 206, + 420 + ], + "spans": [ + { + "bbox": [ + 111, + 344, + 206, + 420 + ], + "type": "image", + "image_path": "1cbc0d576afac3fc3112ce9401bd9388afbb211cc32d286ce4bf7876ee8658a8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 209, + 344, + 304, + 420 + ], + "blocks": [ + { + "bbox": [ + 209, + 344, + 304, + 420 + ], + "lines": [ + { + "bbox": [ + 209, + 344, + 304, + 420 + ], + "spans": [ + { + "bbox": [ + 209, + 344, + 304, + 420 + ], + "type": "image", + "image_path": "d1beb6a7e62f0a6d9207e577bb4d181d1375a289871167a59dafa665549874ec.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 306, + 344, + 402, + 420 + ], + "blocks": [ + { + "bbox": [ + 306, + 344, + 402, + 420 + ], + "lines": [ + { + "bbox": [ + 306, + 344, + 402, + 420 + ], + "spans": [ + { + "bbox": [ + 306, + 344, + 402, + 420 + ], + "type": "image", + "image_path": "8f127cc4d24c131ed087b453c5806d833a1b1ae004f7a23828861fef0774c6ff.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 405, + 344, + 499, + 420 + ], + "blocks": [ + { + "bbox": [ + 405, + 344, + 499, + 420 + ], + "lines": [ + { + "bbox": [ + 405, + 344, + 499, + 420 + ], + "spans": [ + { + "bbox": [ + 405, + 344, + 499, + 420 + ], + "type": "image", + "image_path": "d2823754ccef8919b862b94b7298b52eeec33031f4fdd12e075c157ce282323a.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 111, + 422, + 206, + 498 + ], + "blocks": [ + { + "bbox": [ + 111, + 422, + 206, + 498 + ], + "lines": [ + { + "bbox": [ + 111, + 422, + 206, + 498 + ], + "spans": [ + { + "bbox": [ + 111, + 422, + 206, + 498 + ], + "type": "image", + "image_path": "8acb23bf4361aab8395a3bda4df853ed734977366b215cde5ab9242d79138697.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 209, + 422, + 304, + 498 + ], + "blocks": [ + { + "bbox": [ + 209, + 422, + 304, + 498 + ], + "lines": [ + { + "bbox": [ + 209, + 422, + 304, + 498 + ], + "spans": [ + { + "bbox": [ + 209, + 422, + 304, + 498 + ], + "type": "image", + "image_path": "0f5c483ba0c4d6eff375c65fe8e84a933f1b2dbcd7fb5e674e1efdcee7b4962f.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 306, + 422, + 402, + 498 + ], + "blocks": [ + { + "bbox": [ + 306, + 422, + 402, + 498 + ], + "lines": [ + { + "bbox": [ + 306, + 422, + 402, + 498 + ], + "spans": [ + { + "bbox": [ + 306, + 422, + 402, + 498 + ], + "type": "image", + "image_path": "eaeeb76c39862eecfec518fdbaae1782fb1e1c0987ce8aed42a924d373d96cbb.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 405, + 422, + 499, + 498 + ], + "blocks": [ + { + "bbox": [ + 405, + 422, + 499, + 498 + ], + "lines": [ + { + "bbox": [ + 405, + 422, + 499, + 498 + ], + "spans": [ + { + "bbox": [ + 405, + 422, + 499, + 498 + ], + "type": "image", + "image_path": "44744c57079d49d80a20600f99478331e06ae50f5c3ef96d4b4e31be7446ade9.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 111, + 499, + 206, + 575 + ], + "blocks": [ + { + "bbox": [ + 111, + 499, + 206, + 575 + ], + "lines": [ + { + "bbox": [ + 111, + 499, + 206, + 575 + ], + "spans": [ + { + "bbox": [ + 111, + 499, + 206, + 575 + ], + "type": "image", + "image_path": "3a0f6ab9a095152a9eaeeca45ceb9568a550aaa8de4e113425f5ba431b9878dc.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 156, + 584, + 453, + 596 + ], + "lines": [ + { + "bbox": [ + 156, + 584, + 453, + 596 + ], + "spans": [ + { + "bbox": [ + 156, + 584, + 453, + 596 + ], + "type": "text", + "content": "Figure 15: Ablation for " + }, + { + "bbox": [ + 156, + 584, + 453, + 596 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 156, + 584, + 453, + 596 + ], + "type": "text", + "content": ". Error bars indicate min and max over 5 seeds." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 209, + 499, + 304, + 575 + ], + "blocks": [ + { + "bbox": [ + 209, + 499, + 304, + 575 + ], + "lines": [ + { + "bbox": [ + 209, + 499, + 304, + 575 + ], + "spans": [ + { + "bbox": [ + 209, + 499, + 304, + 575 + ], + "type": "image", + "image_path": "5605b814f0d0e2a7f83bbcd0c1d030976969d00556e713784947a19096184e2e.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 306, + 499, + 402, + 575 + ], + "blocks": [ + { + "bbox": [ + 306, + 499, + 402, + 575 + ], + "lines": [ + { + "bbox": [ + 306, + 499, + 402, + 575 + ], + "spans": [ + { + "bbox": [ + 306, + 499, + 402, + 575 + ], + "type": "image", + "image_path": "e132fcf9f153c1a9d11093ef758e00520cb6bccc2502ce76677bbd9f275bdca2.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 405, + 499, + 499, + 575 + ], + "blocks": [ + { + "bbox": [ + 405, + 499, + 499, + 575 + ], + "lines": [ + { + "bbox": [ + 405, + 499, + 499, + 575 + ], + "spans": [ + { + "bbox": [ + 405, + 499, + 499, + 575 + ], + "type": "image", + "image_path": "7a4439af88b2668f7831e239a87c90597aa7ac1c7c446437d087a7b832d6ea11.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 606, + 230, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 230, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 230, + 618 + ], + "type": "text", + "content": "H LEARNING CURVES" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 631, + 506, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 631, + 506, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 631, + 506, + 676 + ], + "type": "text", + "content": "The learning curves for Mujoco and AntMaze tasks are listed in Fig. 18 and Fig.19. The learned policies are evaluated for 10 episodes and 100 episodes each seed for Mujoco and AntMaze tasks, respectively. For AntMaze tasks, we subtract 1 from rewards for the AntMaze datasets following (Kumar et al., 2020b; Kostrikov et al., 2021b)." + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 114, + 206, + 191 + ], + "blocks": [ + { + "bbox": [ + 110, + 114, + 206, + 191 + ], + "lines": [ + { + "bbox": [ + 110, + 114, + 206, + 191 + ], + "spans": [ + { + "bbox": [ + 110, + 114, + 206, + 191 + ], + "type": "image", + "image_path": "1117821df210f570b2e664f0450868a4667351ef55d3c52ead27ec675572ca33.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 209, + 114, + 304, + 191 + ], + "blocks": [ + { + "bbox": [ + 209, + 114, + 304, + 191 + ], + "lines": [ + { + "bbox": [ + 209, + 114, + 304, + 191 + ], + "spans": [ + { + "bbox": [ + 209, + 114, + 304, + 191 + ], + "type": "image", + "image_path": "9db482154a813d8bd0b5a284eac4c62a481620938fd356ced920f6d1be4af221.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 114, + 402, + 191 + ], + "blocks": [ + { + "bbox": [ + 306, + 114, + 402, + 191 + ], + "lines": [ + { + "bbox": [ + 306, + 114, + 402, + 191 + ], + "spans": [ + { + "bbox": [ + 306, + 114, + 402, + 191 + ], + "type": "image", + "image_path": "3802b5bdb291a351f5ce1714eaae0ee7f4f86f8f9ffe76c88af102f8832f51d2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 405, + 115, + 499, + 191 + ], + "blocks": [ + { + "bbox": [ + 405, + 115, + 499, + 191 + ], + "lines": [ + { + "bbox": [ + 405, + 115, + 499, + 191 + ], + "spans": [ + { + "bbox": [ + 405, + 115, + 499, + 191 + ], + "type": "image", + "image_path": "b31f484605fe53d72a3e3ee21b4d4aa09a7285abc46d2956893d4bd9525ccfd5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 109, + 193, + 206, + 269 + ], + "blocks": [ + { + "bbox": [ + 109, + 193, + 206, + 269 + ], + "lines": [ + { + "bbox": [ + 109, + 193, + 206, + 269 + ], + "spans": [ + { + "bbox": [ + 109, + 193, + 206, + 269 + ], + "type": "image", + "image_path": "9a97af5faf44a23b5c803e4c54f916eae78f27e9ca6ff09ecf3fe5977f745481.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 208, + 193, + 304, + 269 + ], + "blocks": [ + { + "bbox": [ + 208, + 193, + 304, + 269 + ], + "lines": [ + { + "bbox": [ + 208, + 193, + 304, + 269 + ], + "spans": [ + { + "bbox": [ + 208, + 193, + 304, + 269 + ], + "type": "image", + "image_path": "e0ee746721b0ed9d87e215815f15bb47d8c41d7f7aba2d1702f0a58f07e68809.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 193, + 402, + 269 + ], + "blocks": [ + { + "bbox": [ + 306, + 193, + 402, + 269 + ], + "lines": [ + { + "bbox": [ + 306, + 193, + 402, + 269 + ], + "spans": [ + { + "bbox": [ + 306, + 193, + 402, + 269 + ], + "type": "image", + "image_path": "b57e91459af9580d4caafe5823dac513ca6f3e3893d474db6647c92a3ebaf3e6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 405, + 193, + 499, + 269 + ], + "blocks": [ + { + "bbox": [ + 405, + 193, + 499, + 269 + ], + "lines": [ + { + "bbox": [ + 405, + 193, + 499, + 269 + ], + "spans": [ + { + "bbox": [ + 405, + 193, + 499, + 269 + ], + "type": "image", + "image_path": "be21adeab67bd19a9ace1fde4861e0175708c8d611007c33b6578958cfd9209a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 110, + 270, + 206, + 347 + ], + "blocks": [ + { + "bbox": [ + 110, + 270, + 206, + 347 + ], + "lines": [ + { + "bbox": [ + 110, + 270, + 206, + 347 + ], + "spans": [ + { + "bbox": [ + 110, + 270, + 206, + 347 + ], + "type": "image", + "image_path": "7dd899e830ab870b29b7ae0b3198539b68109a479b607697b922ed2249fcdbaa.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 355, + 453, + 368 + ], + "lines": [ + { + "bbox": [ + 157, + 355, + 453, + 368 + ], + "spans": [ + { + "bbox": [ + 157, + 355, + 453, + 368 + ], + "type": "text", + "content": "Figure 16: Ablation for " + }, + { + "bbox": [ + 157, + 355, + 453, + 368 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 157, + 355, + 453, + 368 + ], + "type": "text", + "content": ". Error bars indicate min and max over 5 seeds." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 208, + 270, + 304, + 347 + ], + "blocks": [ + { + "bbox": [ + 208, + 270, + 304, + 347 + ], + "lines": [ + { + "bbox": [ + 208, + 270, + 304, + 347 + ], + "spans": [ + { + "bbox": [ + 208, + 270, + 304, + 347 + ], + "type": "image", + "image_path": "74245e800fe04cee8de6c3219852adf7a4dc6d1a3560706ca47f41e71e5cbb5d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 270, + 402, + 347 + ], + "blocks": [ + { + "bbox": [ + 306, + 270, + 402, + 347 + ], + "lines": [ + { + "bbox": [ + 306, + 270, + 402, + 347 + ], + "spans": [ + { + "bbox": [ + 306, + 270, + 402, + 347 + ], + "type": "image", + "image_path": "ae96491b9731557ead454f4dafd177b210199887f65c5f489f7fcf46fc143048.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 405, + 270, + 499, + 346 + ], + "blocks": [ + { + "bbox": [ + 405, + 270, + 499, + 346 + ], + "lines": [ + { + "bbox": [ + 405, + 270, + 499, + 346 + ], + "spans": [ + { + "bbox": [ + 405, + 270, + 499, + 346 + ], + "type": "image", + "image_path": "29f4c93235d6c8d0be0f3b30cdec05ee62c441246ecf07dfe8599845f7f35381.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 110, + 444, + 206, + 520 + ], + "blocks": [ + { + "bbox": [ + 110, + 444, + 206, + 520 + ], + "lines": [ + { + "bbox": [ + 110, + 444, + 206, + 520 + ], + "spans": [ + { + "bbox": [ + 110, + 444, + 206, + 520 + ], + "type": "image", + "image_path": "e11f11c11388a51480f01ed44bdd7a5941a63b64daac9d701e33748f08ff5e25.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 208, + 445, + 304, + 520 + ], + "blocks": [ + { + "bbox": [ + 208, + 445, + 304, + 520 + ], + "lines": [ + { + "bbox": [ + 208, + 445, + 304, + 520 + ], + "spans": [ + { + "bbox": [ + 208, + 445, + 304, + 520 + ], + "type": "image", + "image_path": "e3d5b4359c7655bf7b27481c171714cc3c30045cb61713434320abd477da2c5b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 306, + 445, + 402, + 520 + ], + "blocks": [ + { + "bbox": [ + 306, + 445, + 402, + 520 + ], + "lines": [ + { + "bbox": [ + 306, + 445, + 402, + 520 + ], + "spans": [ + { + "bbox": [ + 306, + 445, + 402, + 520 + ], + "type": "image", + "image_path": "8355d0157f13aedcf0cb2de6c8216dfec88bfbe759678989f7f8aed0c3da0d2a.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 405, + 445, + 499, + 520 + ], + "blocks": [ + { + "bbox": [ + 405, + 445, + 499, + 520 + ], + "lines": [ + { + "bbox": [ + 405, + 445, + 499, + 520 + ], + "spans": [ + { + "bbox": [ + 405, + 445, + 499, + 520 + ], + "type": "image", + "image_path": "6f233afe55693ae2fe42ba1729c80bf145c224c4caa5dfbddb05e52661f78425.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 111, + 521, + 206, + 597 + ], + "blocks": [ + { + "bbox": [ + 111, + 521, + 206, + 597 + ], + "lines": [ + { + "bbox": [ + 111, + 521, + 206, + 597 + ], + "spans": [ + { + "bbox": [ + 111, + 521, + 206, + 597 + ], + "type": "image", + "image_path": "4683367b1ce613a98c64a41d95bc1db19a242abd078cd80308f22a0ab87214ff.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 208, + 521, + 304, + 597 + ], + "blocks": [ + { + "bbox": [ + 208, + 521, + 304, + 597 + ], + "lines": [ + { + "bbox": [ + 208, + 521, + 304, + 597 + ], + "spans": [ + { + "bbox": [ + 208, + 521, + 304, + 597 + ], + "type": "image", + "image_path": "17416b7a0ca6d681161ea5f3b61b662a6ca26fc673dc3ba0c07dcbef99f94360.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 306, + 521, + 402, + 597 + ], + "blocks": [ + { + "bbox": [ + 306, + 521, + 402, + 597 + ], + "lines": [ + { + "bbox": [ + 306, + 521, + 402, + 597 + ], + "spans": [ + { + "bbox": [ + 306, + 521, + 402, + 597 + ], + "type": "image", + "image_path": "058bbaebbeab618ce0ec96dda0a71064c2297a0a87d63a5878b4c81db6f8306a.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 405, + 521, + 499, + 597 + ], + "blocks": [ + { + "bbox": [ + 405, + 521, + 499, + 597 + ], + "lines": [ + { + "bbox": [ + 405, + 521, + 499, + 597 + ], + "spans": [ + { + "bbox": [ + 405, + 521, + 499, + 597 + ], + "type": "image", + "image_path": "fb7e300fab8d85f1c91880555a47659b821a19f3d771603cceab4590f426e146.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 111, + 598, + 206, + 673 + ], + "blocks": [ + { + "bbox": [ + 111, + 598, + 206, + 673 + ], + "lines": [ + { + "bbox": [ + 111, + 598, + 206, + 673 + ], + "spans": [ + { + "bbox": [ + 111, + 598, + 206, + 673 + ], + "type": "image", + "image_path": "efccd5cd0f76fd0a1bc3b8bc98c66bdf81e63747a54314c8acaa22a9b7195e71.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 682, + 451, + 695 + ], + "lines": [ + { + "bbox": [ + 157, + 682, + 451, + 695 + ], + "spans": [ + { + "bbox": [ + 157, + 682, + 451, + 695 + ], + "type": "text", + "content": "Figure 17: Ablation for " + }, + { + "bbox": [ + 157, + 682, + 451, + 695 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 157, + 682, + 451, + 695 + ], + "type": "text", + "content": ". Error bars indicate min and max over 5 seeds." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 208, + 598, + 304, + 673 + ], + "blocks": [ + { + "bbox": [ + 208, + 598, + 304, + 673 + ], + "lines": [ + { + "bbox": [ + 208, + 598, + 304, + 673 + ], + "spans": [ + { + "bbox": [ + 208, + 598, + 304, + 673 + ], + "type": "image", + "image_path": "af2fb0b9d1205892ae42ffbd00680021cfa767cefc5cb12089de3a4a1bf07fea.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 306, + 598, + 402, + 673 + ], + "blocks": [ + { + "bbox": [ + 306, + 598, + 402, + 673 + ], + "lines": [ + { + "bbox": [ + 306, + 598, + 402, + 673 + ], + "spans": [ + { + "bbox": [ + 306, + 598, + 402, + 673 + ], + "type": "image", + "image_path": "cf1c6b38bfcfbb7d032ea1d1d09353d63088df2d16d71cd391659076602bb430.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 405, + 598, + 499, + 673 + ], + "blocks": [ + { + "bbox": [ + 405, + 598, + 499, + 673 + ], + "lines": [ + { + "bbox": [ + 405, + 598, + 499, + 673 + ], + "spans": [ + { + "bbox": [ + 405, + 598, + 499, + 673 + ], + "type": "image", + "image_path": "77e50078ab69603957982726b54b65e0f08e5dc0bac8e12a7db922b3ef85be8c.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 121, + 206, + 198 + ], + "blocks": [ + { + "bbox": [ + 111, + 121, + 206, + 198 + ], + "lines": [ + { + "bbox": [ + 111, + 121, + 206, + 198 + ], + "spans": [ + { + "bbox": [ + 111, + 121, + 206, + 198 + ], + "type": "image", + "image_path": "05c00a74cbaca202b5dd3cfc09a293da88e48342ce3473c284060cd3311c3ceb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 209, + 121, + 303, + 198 + ], + "blocks": [ + { + "bbox": [ + 209, + 121, + 303, + 198 + ], + "lines": [ + { + "bbox": [ + 209, + 121, + 303, + 198 + ], + "spans": [ + { + "bbox": [ + 209, + 121, + 303, + 198 + ], + "type": "image", + "image_path": "ee1be08b68ad7bd492e4a1c657ab3fa14b37a945bf2de22358a7b523588133de.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 121, + 402, + 198 + ], + "blocks": [ + { + "bbox": [ + 307, + 121, + 402, + 198 + ], + "lines": [ + { + "bbox": [ + 307, + 121, + 402, + 198 + ], + "spans": [ + { + "bbox": [ + 307, + 121, + 402, + 198 + ], + "type": "image", + "image_path": "3d136a128715dd8f3041a96b5778277af78e9dce2a4cae6e1c76b98605d5c26a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 405, + 121, + 499, + 198 + ], + "blocks": [ + { + "bbox": [ + 405, + 121, + 499, + 198 + ], + "lines": [ + { + "bbox": [ + 405, + 121, + 499, + 198 + ], + "spans": [ + { + "bbox": [ + 405, + 121, + 499, + 198 + ], + "type": "image", + "image_path": "decb011829db7975397f4d76c29ed5ff1e979aad1d27b6cccfac52a836306072.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 111, + 199, + 206, + 275 + ], + "blocks": [ + { + "bbox": [ + 111, + 199, + 206, + 275 + ], + "lines": [ + { + "bbox": [ + 111, + 199, + 206, + 275 + ], + "spans": [ + { + "bbox": [ + 111, + 199, + 206, + 275 + ], + "type": "image", + "image_path": "70272e10ad7e942dfffb81e13addf8f85ba9701dcbdca4bfc427f46796ef7927.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 209, + 199, + 303, + 275 + ], + "blocks": [ + { + "bbox": [ + 209, + 199, + 303, + 275 + ], + "lines": [ + { + "bbox": [ + 209, + 199, + 303, + 275 + ], + "spans": [ + { + "bbox": [ + 209, + 199, + 303, + 275 + ], + "type": "image", + "image_path": "e3622fb9b64f98769333bd39649727ccdd770fd8689416e818d59730392266bc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 199, + 402, + 275 + ], + "blocks": [ + { + "bbox": [ + 306, + 199, + 402, + 275 + ], + "lines": [ + { + "bbox": [ + 306, + 199, + 402, + 275 + ], + "spans": [ + { + "bbox": [ + 306, + 199, + 402, + 275 + ], + "type": "image", + "image_path": "1956bd406adef05af67e339711df4664bab1e2f7c06107688b80306b96ba556e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 405, + 199, + 499, + 275 + ], + "blocks": [ + { + "bbox": [ + 405, + 199, + 499, + 275 + ], + "lines": [ + { + "bbox": [ + 405, + 199, + 499, + 275 + ], + "spans": [ + { + "bbox": [ + 405, + 199, + 499, + 275 + ], + "type": "image", + "image_path": "76b8a9ec80424135f56da53ad068a45784896cbe000b589b8ad6879dcd72f82b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 111, + 276, + 206, + 353 + ], + "blocks": [ + { + "bbox": [ + 111, + 276, + 206, + 353 + ], + "lines": [ + { + "bbox": [ + 111, + 276, + 206, + 353 + ], + "spans": [ + { + "bbox": [ + 111, + 276, + 206, + 353 + ], + "type": "image", + "image_path": "d7d3bc5964c5ad8bda531cc85afd02938802605930b143e30047ccfdd10ee044.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 361, + 490, + 374 + ], + "lines": [ + { + "bbox": [ + 118, + 361, + 490, + 374 + ], + "spans": [ + { + "bbox": [ + 118, + 361, + 490, + 374 + ], + "type": "text", + "content": "Figure 18: Learning curves for Mujoco Tasks. Error bars indicate min and max over 5 seeds." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 209, + 276, + 303, + 353 + ], + "blocks": [ + { + "bbox": [ + 209, + 276, + 303, + 353 + ], + "lines": [ + { + "bbox": [ + 209, + 276, + 303, + 353 + ], + "spans": [ + { + "bbox": [ + 209, + 276, + 303, + 353 + ], + "type": "image", + "image_path": "bed43712c3cb41198a3791a5420fb53d68a2e7aa47c2a30da8383ee8ff90f474.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 276, + 402, + 353 + ], + "blocks": [ + { + "bbox": [ + 306, + 276, + 402, + 353 + ], + "lines": [ + { + "bbox": [ + 306, + 276, + 402, + 353 + ], + "spans": [ + { + "bbox": [ + 306, + 276, + 402, + 353 + ], + "type": "image", + "image_path": "0830b54f912ce2aabd99b223b5df5324ee117dd4bd53121bdfe263a86d4900bd.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 405, + 276, + 499, + 353 + ], + "blocks": [ + { + "bbox": [ + 405, + 276, + 499, + 353 + ], + "lines": [ + { + "bbox": [ + 405, + 276, + 499, + 353 + ], + "spans": [ + { + "bbox": [ + 405, + 276, + 499, + 353 + ], + "type": "image", + "image_path": "a7a4c340dc13c0cb909d3a5c3907a71d7cb2882ef8b9ef636fae79474a9a7883.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 112, + 462, + 239, + 564 + ], + "blocks": [ + { + "bbox": [ + 112, + 462, + 239, + 564 + ], + "lines": [ + { + "bbox": [ + 112, + 462, + 239, + 564 + ], + "spans": [ + { + "bbox": [ + 112, + 462, + 239, + 564 + ], + "type": "image", + "image_path": "e5788a93c10485df0265c246a4fe50417bd5dd3046c834d39c9af4c1dd600c57.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 242, + 462, + 369, + 564 + ], + "blocks": [ + { + "bbox": [ + 242, + 462, + 369, + 564 + ], + "lines": [ + { + "bbox": [ + 242, + 462, + 369, + 564 + ], + "spans": [ + { + "bbox": [ + 242, + 462, + 369, + 564 + ], + "type": "image", + "image_path": "bc4ad5d304127be2ef67f250649dfda7f2d919b476dbee94a1afd4c57783dfec.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 370, + 462, + 497, + 564 + ], + "blocks": [ + { + "bbox": [ + 370, + 462, + 497, + 564 + ], + "lines": [ + { + "bbox": [ + 370, + 462, + 497, + 564 + ], + "spans": [ + { + "bbox": [ + 370, + 462, + 497, + 564 + ], + "type": "image", + "image_path": "8549272511775042df6c7cfedae412646a9423036c4449f8740b6baa92d77462.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 112, + 565, + 239, + 667 + ], + "blocks": [ + { + "bbox": [ + 112, + 565, + 239, + 667 + ], + "lines": [ + { + "bbox": [ + 112, + 565, + 239, + 667 + ], + "spans": [ + { + "bbox": [ + 112, + 565, + 239, + 667 + ], + "type": "image", + "image_path": "065d8fd34f195f971be35b7f5c03ea4a0b73afce9f3e350688df66e27371901d.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 115, + 676, + 494, + 689 + ], + "lines": [ + { + "bbox": [ + 115, + 676, + 494, + 689 + ], + "spans": [ + { + "bbox": [ + 115, + 676, + 494, + 689 + ], + "type": "text", + "content": "Figure 19: Learning curves for AntMaze Tasks. Error bars indicate min and max over 5 seeds." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 242, + 565, + 368, + 667 + ], + "blocks": [ + { + "bbox": [ + 242, + 565, + 368, + 667 + ], + "lines": [ + { + "bbox": [ + 242, + 565, + 368, + 667 + ], + "spans": [ + { + "bbox": [ + 242, + 565, + 368, + 667 + ], + "type": "image", + "image_path": "6a1cae0beaaa1b4d765b5adbd7fbb3191dab2c1532ebee470990087c56c99bf7.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 370, + 565, + 497, + 667 + ], + "blocks": [ + { + "bbox": [ + 370, + 565, + 497, + 667 + ], + "lines": [ + { + "bbox": [ + 370, + 565, + 497, + 667 + ], + "spans": [ + { + "bbox": [ + 370, + 565, + 497, + 667 + ], + "type": "image", + "image_path": "2586130a82e9ddb10ac8b5c495de5689e709ef2991573dad885b84f1ac40e0a4.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_content_list.json b/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..71f6fd59801fdcd7c56c112fd7f09b4cf840419d --- /dev/null +++ b/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_content_list.json @@ -0,0 +1,3420 @@ +[ + { + "type": "text", + "text": "WHEN TO MAKE AND BREAK COMMITMENTS?", + "text_level": 1, + "bbox": [ + 171, + 99, + 740, + 119 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Alihan Hüyük", + "bbox": [ + 181, + 145, + 285, + 159 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Cambridge", + "bbox": [ + 181, + 160, + 349, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ah2075@cam.ac.uk", + "bbox": [ + 181, + 174, + 341, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhaozhi Qian", + "bbox": [ + 395, + 145, + 496, + 159 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Cambridge", + "bbox": [ + 395, + 160, + 562, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zq224@cam.ac.uk", + "bbox": [ + 395, + 174, + 545, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mihaela van der Schaar", + "bbox": [ + 609, + 145, + 782, + 157 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Cambridge", + "bbox": [ + 609, + 160, + 779, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The Alan Turing Institute", + "bbox": [ + 609, + 174, + 779, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "mv472@cam.ac.uk", + "bbox": [ + 609, + 188, + 759, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 222, + 547, + 237 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In many scenarios, decision-makers must commit to long-term actions until their resolution before receiving the payoff of said actions, and usually, staying committed to such actions incurs continual costs. For instance, in healthcare, a newly-discovered treatment cannot be marketed to patients until a clinical trial is conducted, which both requires time and is also costly. Of course in such scenarios, not all commitments eventually pay off. For instance, a clinical trial might end up failing to show efficacy. Given the time pressure created by the continual cost of keeping a commitment, we aim to answer: When should a decision-maker break a commitment that is likely to fail—either to make an alternative commitment or to make no further commitments at all? First, we formulate this question as a new type of optimal stopping/switching problem called the optimal commitment problem (OCP). Then, we theoretically analyze OCP, and based on the insight we gain, propose a practical algorithm for solving it. Finally, we empirically evaluate the performance of our algorithm in running clinical trials with subpopulation selection.", + "bbox": [ + 228, + 244, + 769, + 441 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 455, + 338, + 470 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In many real-world settings, decision-makers must commit to long-term actions and wait until their resolution before receiving the payoff of said actions. Meanwhile, staying committed to such actions incurs continual costs. For instance, in portfolio management, it might take time for an asset to develop additional value after an initial investment, and keeping capital tied up in an asset comes with an opportunity cost for the investor (Markowitz, 1959; Merton, 1969; Karatzas and Wang, 2020). In an energy network, turning power stations on and off is not an immediate action, hence a sudden increase in energy demand can only be met with a delay after putting more stations into operation, and keeping stations operational obviously consumes resources (Rafique and Jianhua, 2018; Olofsson et al., 2022). In healthcare, a newly-discovered treatment can only be marketed to patients once a successful clinical trial that targets the said treatment is conducted, which both requires time and is also costly (Kaitin, 2010; Umscheid et al., 2011).", + "bbox": [ + 169, + 479, + 826, + 632 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Of course, not all commitments eventually pay off: An asset might end up losing value despite investments, energy demands might shift faster than a network can react to, and a clinical trial might fail to show efficacy for the targeted treatment. Given the time pressure created by the continual cost of keeping a commitment, our goal in this paper is to answer the question: When should a decision-maker break a commitment—thereby avoiding future costs but also forfeiting any potential returns—either to make an alternative commitment instead or to make no further commitments at all? Solving this problem optimally requires a careful balance between exploration and exploitation: The earlier a commitment that is bound to fail is broken, the more resources would be saved (cf. exploitation); but the longer one is kept, the more information is revealed regarding whether the commitment is actually failing or might still succeed (cf. exploration)—and in certain cases, also regarding the prospects of similar commitments one could make instead.", + "bbox": [ + 169, + 638, + 826, + 792 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Related problems are mostly studied within the context of adaptive experimentation and sequential hypothesis testing (see Section 5). As such, we focus on adaptive experimentation as our main application as well. More specifically, we consider the problem of selecting the target population of an adaptive experiment. Suppose an experimenter, who is interested in proving the efficacy of a new treatment, starts running an initial experiment that targets a certain population of patients. Incidentally, the treatment being tested is effective only for a relatively narrow subpopulation of patients but not for the wider population as a whole. Hence, an experiment targeting the overall population, but not the subpopulation specifically, will most probably fail to prove efficacy and prevent the deployment of the treatment for the patients who would have actually benefited from it, not to mention waste", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "time and resources (Moineddin et al., 2008; Lipkovich et al., 2017; Chiu et al., 2018). Of course, the experimenter has no knowledge of this in advance but the initial experiment they have set up would slowly reveal more information regarding the effects of the treatment and the fact that the ongoing experiment is bound to fail. In that case, we want to be able to determine at what point the experimenter has enough information to justify breaking their commitment to the initial experiment that targets too wide of a population to be successful, in favor of making a new commitment to a follow-up experiment that focuses on a narrower subpopulation instead?", + "bbox": [ + 169, + 103, + 826, + 202 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contributions Our contributions are threefold: First, we formulate the problem of making and breaking commitments in a timely manner as a new type of optimal stopping/switching problem called the optimal commitment problem (OCP) (Section 2). The defining feature of OCP is that rewards are received only when a known time point is reached but costs are incurred continually, requiring commitment to actions but with incentive to abandon those commitments. As we will show later, OCP cannot be easily solved via conventional reinforcement learning techniques due to its non-convex nature. Second, we theoretically analyze a simplified case of OCP to identify the characteristics of the optimal solution (Section 3), and based on the insights we gain, propose a practical algorithm for the more general case (Section 4). Third, we empirically evaluate the performance of our algorithm in running experiments with subpopulation selection (Section 6). Before we move on, it should be emphasized that, although we predominantly consider adaptive experimentation as our main application, our contributions remain generally applicable to portfolio management, energy systems, and any other decision-making scenarios that require commitments to long-term actions.", + "bbox": [ + 169, + 208, + 826, + 388 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 OPTIMAL COMMITMENT PROBLEM", + "text_level": 1, + "bbox": [ + 171, + 405, + 495, + 420 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We first introduce the problem of optimal commitment from the perspective of running experiments. As far as our formulation is concerned, experiments are conducted to confirm the efficacy of an intervention by observing the outcome of the said intervention for subjects belonging to a particular population. However, this experiment-focused perspective does not limit the applicability of OCP; we stress its generality later at the end of the section. We provide a glossary of terms and notation in Appendix K.", + "bbox": [ + 169, + 430, + 828, + 501 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Populations Let $\\mathcal{X}$ be a discrete set of atomic-populations such that every subject is only the member of exactly one atomic-population $x\\in \\mathcal{X}$ . Denote with $\\eta_{x}\\in [0,1]$ the probability of a subject being from atomic-population $x$ (such that $\\sum_{x\\in \\mathcal{X}}\\eta_x = 1$ ), and with $\\Omega_{x}$ the distribution of outcomes for atomic-population $x$ such that the mean outcome $\\theta_{x} = \\mathbb{E}_{y\\sim \\Omega_{x}}[y]$ is the effect of some intervention for atomic-population $x$ . Now, wider populations can be constructed by combining various atomic-populations. Let any $X\\subseteq \\mathcal{X}$ represent the population of subjects who belong to either one of the atomic-populations $\\{x\\in X\\}$ . Then, the probability of a subject being from population $X$ can be written as $\\eta_{X} = \\sum_{x\\in X}\\eta_{x}$ , the probability of a subject being from atomic-population $x$ conditioned on the fact that they are from population $X$ can be written as $\\eta_{x|X} = \\eta_x / \\eta_X$ , and the average effect for population $X$ can be written as $\\bar{\\theta}_X = \\sum_{x\\in X}\\eta_{x|X}\\theta_x$ .", + "bbox": [ + 169, + 507, + 826, + 648 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Experiments An experiment is largely characterized by the population it targets, its sample horizon, and its success criterion. During an experiment that targets population $X$ , at each time step $t \\in \\{1, 2, \\ldots\\}$ that the experiment continues, first a subject from some atomic-population $x_{t}$ within the targeted population $X$ arrives with probability $\\eta_{x_{t}|X}$ , and then the outcome $y_{t} \\sim \\Omega_{x_{t}}$ for that subject is observed. This process generates an online dataset $\\mathcal{D}_{t} = \\{x_{t'}, y_{t'}\\}_{t' = 1}^{t}$ . The experiment terminates when a pre-specified sample/time horizon $\\tau$ is reached. Once terminated, the experiment is declared a success if $\\rho(\\mathcal{D}_{\\tau}) = 1$ , where $\\rho: (\\mathcal{X} \\times \\mathbb{R})^{\\tau} \\to \\{0, 1\\}$ is the success criterion, and declared a failure otherwise. Formally, the tuple $\\psi = (X, \\tau, \\rho)$ constitutes an experiment design.", + "bbox": [ + 169, + 652, + 826, + 766 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Meta-experimenter Suppose a meta-experimenter is given a set of viable experiment designs $\\Psi$ and is tasked with running at least one successful experiment. Each experiment $\\psi \\in \\Psi$ has an associated cost $C_{\\psi} \\in \\mathbb{R}_{+}$ , which the experiment incurs per time step that it continues, and an associated reward $R_{\\psi} \\in \\mathbb{R}_{+}$ , which the experiment provides only if it eventually succeeds. The meta-experimenter aims to maximize utility—that is the difference between any eventual reward received and the total costs incurred by running experiments. They first pick an initial experiment $\\psi^{1} \\in \\Psi$ and start conducting it, which generates an online dataset $\\mathcal{D}_t^1$ as described earlier. Now at each time step $t$ , they need to decide whether they should stay committed to their initial decision and wait until $\\psi^{1}$ terminates, or stop $\\psi^{1}$ early in favor of starting a new experiment $\\psi^{2}$ . They might decide on the latter to avoid unnecessary costs if $\\mathcal{D}_t^1$ already indicates $\\psi^{1}$ is unlikely to succeed. If at some point a secondary experiment $\\psi^{2}$ is started, now the meta-experiment has a similar decision to make", + "bbox": [ + 169, + 771, + 828, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "regarding whether to stop $\\psi^2$ early in favor of starting a new experiment $\\psi^3 \\in \\Psi$ . This process continues until either an experiment finally succeeds or the meta-experimenter decides not to conduct any further experiments; let the random variable $n \\in \\{1, 2, \\ldots\\}$ be such that $\\psi^n$ is the last experiment. We denote with $\\psi^i = (X^i, \\tau^i, \\rho^i)$ the $i$ -th experiment conducted by the meta-experimenter, and with $T^i$ the number of time steps for which the $i$ -th experiment is conducted either until it was stopped by the meta-experimenter or the time horizon $\\tau^i$ was reached. Denote with $\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i)$ the decision-making policy of the meta-experimenter, where $t$ is the current time step of the latest experiment $\\psi^i$ and $\\bar{\\mathcal{D}}_t^i = (\\cup_{j=1}^{i-1} \\mathcal{D}_{T^j}^j) \\cup \\mathcal{D}_t^i$ is an aggregate dataset. We write (i) $\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\psi^i$ if the meta-experiment decides to keep conducting the current experiment $\\psi^i$ , (ii) $\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\psi' \\neq \\psi^i$ if the meta-experimenter decides to stop experiment $\\psi^i$ and start experiment $\\psi'$ instead, and (iii) $\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\emptyset$ if the meta-experimenter decides not to conduct any further experiments.", + "bbox": [ + 169, + 103, + 826, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Objective Once all experimentation is concluded, the meta-experimenter achieves the total utility", + "bbox": [ + 169, + 263, + 825, + 279 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nG = R _ {\\psi^ {n}} \\cdot \\mathbb {1} \\left\\{T ^ {n} = \\tau^ {n} \\right\\} \\cdot \\rho^ {n} \\left(\\mathcal {D} _ {\\tau^ {n}} ^ {n}\\right) - \\sum_ {i = 1} ^ {n} C _ {\\psi^ {i}} \\cdot T ^ {i}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 281, + 823, + 297 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Then, the optimal commitment problem is to find the optimal policy $\\pi^{*} = \\operatorname{argmax}_{\\pi} \\mathbb{E}_{\\pi}[G]$ that maximizes the expected utility given $\\Psi$ , $\\{\\eta_x\\}$ . $\\{R_{\\psi}, C_{\\psi}\\}$ without knowing mean outcomes $\\{\\theta_x\\}$ or outcome distributions $\\{\\Omega_x\\}$ . It is called the optimal commitment problem because each experiment $\\psi = (X, \\tau, \\rho)$ only provides a reward if the meta-experimenter commits to incurring its costs for at least $\\tau$ time steps, and the meta-experimenter needs to decide which experiment in $\\Psi$ is the better commitment—or if there is any experiment worth committing to at all—adaptively.", + "bbox": [ + 169, + 301, + 826, + 386 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "General applicability of OCP Although we have described OCP from the perspective of (meta-)experiment design, it can potentially be useful in modeling many other problems as we have stressed during the introduction (see Table 1). For instance, in portfolio management, atomic-populations can be regarded as various assets one can invest in, then a population would correspond to a portfolio", + "bbox": [ + 169, + 392, + 552, + 489 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "of assets. Similar to experiments, when these portfolios require a time commitment (cf. $\\tau$ ) before they provide their payoff (cf. $R_{\\psi}$ ) and incur an opportunity cost (cf. $C_{\\psi}$ ) in the mean time, the decision-making problem of managing when and which portfolio to invest in constitutes an instance of the optimal commitment problem. Another good examples is energy management, where power stations and the networks they form are akin to atomic-populations and populations. Since power stations cannot be turned on and off immediately, putting one in operation requires a certain amount of commitment.", + "bbox": [ + 169, + 489, + 826, + 573 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/4bf98f4f211005a6b5db045ff74ed3f9a5b73be4b0514cfdc18ee5d448f1b0ab.jpg", + "table_caption": [ + "Table 1: Equivalent concepts across different domains. OCP can model scenarios other than adaptive experimentation." + ], + "table_footnote": [], + "table_body": "
DomainEquivalent Concepts
Adaptive experimentationAtomic-populationPopulation
Portfolio managementFinancial assetPortfolio of assets
Energy systemsPower stationNetwork of stations
", + "bbox": [ + 560, + 438, + 823, + 477 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 WARM-UP: WHEN TO BREAK A SINGLE COMMITMENT?", + "text_level": 1, + "bbox": [ + 171, + 585, + 666, + 599 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, to gather insights, we commence by analyzing a simplified instance of OCP. Later, in Section 4, using these insights, we construct a practical algorithm for solving a more general case of OCP. As the simplified instance, we only consider one atomic-population such that $\\mathcal{X} = \\{\\mathcal{X}_0\\}$ and one experiment design that targets this atomic-population such that $\\Psi = \\{\\Psi_0 = (\\mathcal{X}_0,\\tau ,\\rho)\\}$ . Moreover, we assume that the outcomes are distributed normally with unit variance such that $\\Omega \\doteq \\Omega_{\\mathcal{X}_0}\\doteq \\mathcal{N}(\\theta \\doteq \\theta_{\\mathcal{X}_0},1)$ and the success criterion is a simple Z-test to see whether $\\theta >0$ such that $\\rho (\\mathcal{D}_{\\tau})\\doteq \\rho (\\mu_{\\tau})\\doteq \\mathbb{1}\\{\\mu_{\\tau} > \\alpha /\\sqrt{\\tau}\\}$ , where $\\mu_t = \\sum_{(x_{t'},y_{t'})\\in \\mathcal{D}_t}y_{t'} / |\\mathcal{D}_t|$ is the empirical mean outcome given dataset $\\mathcal{D}_t$ , and $\\alpha$ determines the significance threshold for the test. Since there is just one viable experiment in this setting, the only decision that needs to be made at each time step is whether to keep conducting experiment $\\psi^1 = \\Psi_0$ or to stop all experimentation. For this decision to be interesting, we will also assume that $C\\doteq C_{\\Psi_0} > 0$ so that never stopping is not necessarily optimal—and $R\\doteq R_{\\Psi_0} > \\tau C$ so that always stopping is not necessarily optimal either.", + "bbox": [ + 169, + 607, + 826, + 776 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Value and Q-functions Since $t$ and $\\mu_t$ are sufficient statistics to estimate the success probability of the experiment, it is also sufficient to only consider policies of the form $\\pi(t, \\mu)$ . For a given policy $\\pi$ ,", + "bbox": [ + 169, + 781, + 826, + 810 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nV ^ {\\pi} (t, \\mu) = \\mathbb {E} \\left[ R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {\\pi} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {\\pi}, \\tau \\right\\} - t\\right) \\mid \\mu_ {t} = \\mu \\right] \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 813, + 823, + 829 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nQ ^ {\\pi} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {\\pi} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {\\pi}, \\tau \\right\\} - t\\right) | \\mu_ {t} = \\mu ] \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 830, + 823, + 848 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "are the value function, and the Q-function of conducting the experiment for at least one more time step respectively, where $T_{t}^{\\pi} = \\min \\{t' \\geq t : \\pi(t', \\mu_{t'}) = \\emptyset\\}$ is the first time step at or after time $t$ that policy $\\pi$ decides to stop; let $V^{*} = V^{\\pi^{*}}$ and $Q^{*} = Q^{\\pi^{*}}$ be the optimal value and Q-functions. Note that the Q-factor of stopping all experimentation is always equal to zero for all policies. Hence, the optimal policy must be such that $\\pi^{*}(t, \\mu) = \\Psi_{0}$ if $Q^{*}(t, \\mu) > 0$ and $\\pi^{*}(t, \\mu) = \\emptyset$ otherwise.", + "bbox": [ + 169, + 851, + 826, + 922 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Once we identify the value and Q-functions, a naive attempt at finding the optimal policy would be to compute $V^{*}$ and $Q^{*}$ via dynamic programming as they would satisfy the following Bellman optimality conditions:", + "bbox": [ + 169, + 103, + 553, + 160 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nQ ^ {*} (t, \\mu) = - C + \\mathbb {E} \\left[ V ^ {*} (t + 1, \\mu_ {t + 1}) \\mid \\mu_ {t} = \\mu \\right] \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 162, + 549, + 180 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nV ^ {*} (t, \\mu) = \\max \\{0, Q ^ {*} (t, \\mu) \\} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 181, + 549, + 198 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and $V^{*}(\\tau, \\mu) = R \\cdot \\rho(\\mu)$ . However, a major complication in applying dynamic programming methods to compute $V^{*}$ and $Q^{*}$ is that they are continuous functions in $\\mu$ . In the literature of partially-observable Markov decision processes (POMDPs), which OCP happens to be an instance", + "bbox": [ + 169, + 200, + 550, + 271 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "of (see Appendix A), the standard approach of addressing this complication would be to leverage the convexity of $V^{*}$ and $Q^{*}$ , and approximate them with functions of the form $f(\\mu) = \\max_{i} a_{i}\\mu + b_{i}$ (Spanan, 2012). However, this standard approach is not applicable in OCP because, in general, neither $V^{*}(t,\\mu)$ nor $-V^{*}(t,\\mu)$ is a convex function with respect to $\\mu$ (see Figure 1):", + "bbox": [ + 169, + 271, + 826, + 327 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Proposition 1 (Non-convexity). There exist a problem instance $(C,R,\\tau,\\alpha)$ and $t\\in \\{1,\\dots ,\\tau -1\\}$ such that $\\exists \\mu ,\\mu^{\\prime}\\in \\mathbb{R},p\\in [0,1]:V^{*}(t,p\\mu +(1 - p)\\mu^{\\prime}) < pV^{*}(t,\\mu) + (1 - p)V^{*}(t,\\mu^{\\prime})$ and $\\exists \\mu ,\\mu^{\\prime}\\in \\mathbb{R},p\\in [0,1]: - V^{*}(t,p\\mu +(1 - p)\\mu^{\\prime}) < - pV^{*}(t,\\mu) - (1 - p)V^{*}(t,\\mu^{\\prime}).^{1}$", + "bbox": [ + 169, + 333, + 823, + 378 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Properties of the optimal policy Although identifying $\\pi^{*}$ exactly by computing $V^{*}$ and $Q^{*}$ is challenging, we can still identify some properties that $\\pi^{*}$ should have, which can then help us design a heuristic policy that we expect to perform well, albeit not optimally. First of all, the optimal policy $\\pi^{*}$ should be a \"thresholding-type\" policy—that is the meta-experimenter should keep conducting the experiment as long as $\\mu_{t}$ stays above a time-dependent threshold $\\mu_{t}^{*}$ and should stop all experimentation the moment $\\mu_{t}$ drops below that threshold (see the top panel of Figure 2):", + "bbox": [ + 169, + 383, + 823, + 468 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Proposition 2 (Thresholding). For all problem instances $(C, R, \\tau, \\alpha)$ , there exists time-dependent thresholds $\\{\\mu_t^* \\in \\mathbb{R}\\}_{t=1}^{\\tau-1}$ such that", + "bbox": [ + 169, + 474, + 823, + 505 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\pi^ {*} (t, \\mu) = \\left\\{\\Psi_ {0} \\quad i f \\mu > \\mu_ {t} ^ {*}; \\quad \\emptyset \\quad o t h e r w i s e \\right\\} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 508, + 823, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Intuitively, a higher test statistic $\\mu_t$ means that the experiment is only more likely to succeed, hence if it is optimal to continue conducting the experiment when $\\mu_t = \\mu$ , then it should also be optimal to continue when $\\mu_t = \\mu' > \\mu$ (likewise, lower $\\mu_t$ means success is even less likely hence $\\pi^*(t, \\mu) = \\emptyset$ implies $\\pi^*(t, \\mu') = \\emptyset$ for $\\mu' < \\mu$ ).", + "bbox": [ + 169, + 529, + 552, + 614 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Moreover, the optimal policy $\\pi^{*}$ must be \"optimistic\" that the experiment will succeed when making decisions. Consider a greedy policy $\\pi^{\\mathrm{greedy}}$ that continues as long as the expected utility of committing fully to conducting the experiment until it terminates at $t = \\tau$ is positive—that is $\\pi^{\\mathrm{greedy}} = \\Psi_0$ if and only if $V^{\\pi^{(0)}}(t,\\mu) > 0$ where $\\pi^{(0)}$ is the policy that always waits until the experiment terminates such that $\\pi^{(0)}(t,\\mu) = \\Psi_0$ for all $t,\\mu$ ; $\\pi^{\\mathrm{greedy}}$ is said to be greedy because the decision to continue is made assuming a full commitment to the experiment without considering the possibility to stop at a future time step. Then, whenever such greedy reasoning suggests continuing, the meta-experimenter should indeed continue. However, whenever the same reasoning suggests stopping, the meta-", + "bbox": [ + 169, + 619, + 550, + 814 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "experimenter should be optimistic that the experiment will succeed and occasionally make the decision to continue instead—that is $\\pi^{*}$ should be biased towards continuing (see the threshold gap in Figure 2):", + "bbox": [ + 169, + 814, + 826, + 842 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Proposition 3 (Optimism). First, $\\pi^{\\text{greedy}}$ is also of thresholding type and there exists $\\{\\mu_t^{\\text{greedy}} \\in \\mathbb{R}\\}_{t=1}^{\\tau-1}$ such that $\\pi^{\\text{greedy}}(t,\\mu) = \\Psi_0$ if and only if $\\mu > \\mu_t^{\\text{greedy}}$ . Moreover, for all $t \\in \\{1,\\dots,\\tau-1\\}$ ,", + "bbox": [ + 169, + 847, + 823, + 881 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {t} ^ {*} \\leq \\mu_ {t} ^ {\\text {g r e e d y}} \\quad \\Longleftrightarrow \\quad \\left\\{\\mu : \\pi^ {*} (t, \\mu) = \\Psi_ {0} \\right\\} \\supseteq \\left\\{\\mu : \\pi^ {\\text {g r e e d y}} (t, \\mu) = \\Psi_ {0} \\right\\} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 883, + 823, + 902 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/07487c83fa7edf52e9199370f2865392f818d5292dc95a654768533a6c1969f5.jpg", + "image_caption": [ + "Figure 1: Optimal value function $V^{*}(t,\\mu)$ for $C = 1$ , $R = 10$ , $\\tau = 4$ , and $\\alpha = 0$ . It can clearly be seen that neither $V^{*}$ nor $-V^{*}$ is convex in $\\mu$ (cf. Proposition 1)." + ], + "image_footnote": [], + "bbox": [ + 563, + 104, + 823, + 215 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1 Proofs of all propositions are given in Appendix I.", + "bbox": [ + 192, + 909, + 496, + 924 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Intuitively, the optimism of $\\pi^{*}$ accounts for the information gained from observing more samples when the experiment is continued. Remember that $\\pi^{\\mathrm{greedy}}$ estimates the reward to be received if the experiment is conducted until termination, and it stops whenever its estimate is negative. But, the estimate of $\\pi^{\\mathrm{greedy}}$ has some uncertainty associated with it. Whenever it is uncertain enough that the reward to be received is actually negative; incurring the cost of continuing for one more time step, gaining new information, and forming a more certain estimate can lead to a more accurate decision and a higher overall utility. Finally, the optimism of $\\pi^{*}$ has a strictly decreasing upper bound; denoting with $F(x) = (1 / \\sqrt{2\\pi})\\int_{-\\infty}^{x}e^{-(1 / 2)x^2}$ the c.d.f. of the standard normal distribution:", + "bbox": [ + 169, + 104, + 826, + 219 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Proposition 4 (Decreasing optimism). For all $t \\in \\{1, \\dots, \\tau - 1\\}$ ,", + "bbox": [ + 169, + 223, + 607, + 239 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\mu_ {t} ^ {*} - \\mu_ {t} ^ {\\text {g r e e d y}} \\right| \\leq \\sqrt {1 / t - 1 / \\tau} \\times \\left(F ^ {- 1} \\left(\\left(\\tau - t\\right) ^ {C} / _ {R}\\right) - F ^ {- 1} \\left(^ {C} / _ {R}\\right)\\right) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 284, + 243, + 823, + 263 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Intuitively, as the experiment continues, the information gained from one individual sample decreases relative to the total information accumulated, hence the optimism of $\\pi^{*}$ that accounts for the that information gain also decreases (see the bottom panel of Figure 2). Consider one extreme: When $t = \\tau - 1$ , there is no more information to be gained before the experiment terminates at $t = \\tau$ , hence $\\pi^{*}$ should make the same decisions as $\\pi^{\\mathrm{greedy}}$ . Indeed, Proposition 4 implies that $\\mu_{\\tau - 1}^{*} = \\mu_{\\tau - 1}^{\\mathrm{greedy}}$ .", + "bbox": [ + 169, + 268, + 823, + 339 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 A PRACTICAL ALGORITHM: BAYES-OCP", + "text_level": 1, + "bbox": [ + 169, + 349, + 545, + 366 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Summarizing our discussion in the previous section, we suspect the optimal policy to be (i) of thresholding type (cf. Proposition 2), (ii) optimistic (cf. Proposition 3), and (iii) increasingly more greedy (cf. Proposition 4). These findings are not a complete surprise as optimism-in-the-face-of-uncertainty is a well-known principle in solving online decision-making problems (Auer et al., 2002; Bubeck et al., 2012). Our earlier analysis shows rigorously that this principle holds for at least a special case of OCP and strengths our intuition that it should be applicable for more general cases of OCP as well.", + "bbox": [ + 169, + 373, + 826, + 458 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Keeping properties (i-iii) in mind, we now propose a practical algorithm for solving OCP in a more general setting than the one we analyzed earlier. Let $|\\mathcal{X}| \\geq 1$ and $\\Psi = \\{(X,\\tau,\\rho): X \\in 2^{\\mathcal{X}} \\setminus \\emptyset\\}$ include all experiment designs that target a unique subpopulation within $\\mathcal{X}$ for a given time horizon $\\tau$ and success criterion $\\rho$ ; let $C_X \\doteq C_{(X,\\tau,\\rho)}$ and $R_X \\doteq R_{(X,\\tau,\\rho)}$ . We assume that the conditional power of performing a hypothesis test at time $\\tau$ according to $\\rho$ —that is the probability of the test being successful conditioned on mean outcomes $\\{\\theta_x\\}$ —can be computed for interim datasets—that is", + "bbox": [ + 169, + 463, + 823, + 547 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {P} (X, \\mathcal {D} _ {t}; \\left\\{\\theta_ {x} \\right\\}) = \\mathbb {E} _ {x _ {t ^ {\\prime}} \\sim \\left\\{\\eta_ {x \\mid X} \\right\\} _ {x \\in X}, y _ {t ^ {\\prime}} \\sim \\mathcal {N} \\left(\\theta_ {x _ {t ^ {\\prime}}, 1}\\right)} [ \\rho (\\mathcal {D} _ {t} \\cup \\left(\\cup_ {t ^ {\\prime} = t + 1} ^ {\\tau} \\left\\{x _ {t ^ {\\prime}}, y _ {t ^ {\\prime}} \\right\\}\\right)) ] \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 551, + 823, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "can be evaluated efficiently. Then, based on this conditional power function, we define", + "bbox": [ + 169, + 575, + 741, + 590 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {G} \\left(X, \\mathcal {D} _ {t}; \\left\\{\\theta_ {x} \\right\\}\\right) (10) \\\\ = R _ {X} \\cdot \\mathcal {P} (X, \\mathcal {D} _ {t}) - C _ {X} \\cdot (\\tau - t) (10) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 599, + 483, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "as the expected utility of fully committing to an experiment and waiting until it terminates when the experiment targets population $X$ , is currently at time step $t$ , and has collected dataset $\\mathcal{D}_t$ so far. Denote with $\\mathcal{G}^{(0)}(X; \\{\\theta_x\\}) = \\mathcal{G}(X, \\emptyset; \\{\\theta_x\\})$ the same expected utility but for an experiment that is yet to start, and with $\\mathcal{G}^{(0)}(\\emptyset; \\{\\theta_x\\}) = 0$ the utility of stopping all experimentation.", + "bbox": [ + 169, + 638, + 486, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our algorithm is called Bayes-OCP and is given in Algorithm 1. It maintains a posterior distribution $\\mathcal{N}(\\mu_x,\\sigma_x^2)$ for each mean outcome $\\theta_{x}$ assuming that, given mean $\\theta_{x}$ , outcomes are distributed normally with unit variance—that is $\\Omega_{x} = \\mathcal{N}(\\theta_{x},1)$ . These posteriors are only used in deciding which experiment to run next and not in determining whether the experiment was a success or not. Hence, even when the assumption of outcomes being normally distributed is violated, the integrity of the experiments would not be effected; only the performance of Bayes", + "bbox": [ + 169, + 757, + 486, + 924 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/5ee11095d1dcf629c50370cd5060cddc02fed98ac2167dc18a201729966cb3a7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Algorithm 1 Bayes-OCP
1: Initialize μx and σx2 for all x ∈ X
2: X ← X, t ← 0, D0 ← ∅
3: Start experiment ψ = (X,τ,ρ)
4: loop:
5: t ← t + 1; Dt ← Dt-1 ∪ {xt,yt}
6: 1/σxt2 ← 1/σxt2 + 1
7: μxt← μxt + (yt - μxt)σxt2
(i) Identify a candidate subpopulation X' to replace X:
8: X' ← ∅
9: while X \\ X' ⊃ ∅:
10: x* ← argmaxx∈X\\X'
Eθx~N(μx,σx2)[G(0)(X' ∪ {x}; {θx})]
11: if Eθx~N(μx,σx2)[G(0)(X' ∪ {x*}; {θx})] > Eθx~N(μx,σx2)[G(0)(X'; {θx})]:
12: X' ← X' ∪ {x*}
13: else: break
(ii) Decide whether to actually replace X with X':
14: if Pθx~N(μx,σx2)[G(0)(X'; {θx}) > G(X, Dt; {θx})] > β:
15: X ← X', t ← 0, D0 ← ∅
16: Start a new experiment ψ = (X,τ,ρ)
", + "bbox": [ + 496, + 603, + 825, + 920 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "OCP in managing various experiments would degrade (see Appendix C for related experiments). Making use of the posteriors it maintains, Bayes-OCP performs two steps at each iteration:", + "bbox": [ + 169, + 103, + 826, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(i) First, a subpopulation $X' \\subset X$ within the currently targeted population $X$ is identified as a potential candidate to target next; due to the combinatorial size of $\\Psi$ , it would not be practical to consider every subpopulation individually as a candidate for large $|\\mathcal{X}|$ . The ideal candidate would be the subpopulation with the largest expected utility: $X' = \\operatorname{argmax}_{X' \\subset X} \\mathbb{E}_{\\theta_x \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)} [\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\})]$ . But again due to the combinatorial size of the search space, Bayes-OCP employs a greedy algorithm instead and forms candidate subpopulations by combining, one by one, the atomic-subpopulations that increase the expected utility the most, until the expected utility no longer improves. Note that it is common to use greedy algorithms to solve combinatorial optimization problems (Lawler, 1976; Papadimitriou and Steiglitz, 1982).", + "bbox": [ + 186, + 138, + 826, + 263 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(ii) Then, it is decided whether the current experiment targeting population $X$ should be stopped in favor of targeting candidate $X^{\\prime}$ identified earlier instead. A greedy strategy would have done so whenever $\\mathbb{E}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)[\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)[\\mathcal{G}(X, \\mathcal{D}_t; \\{\\theta_x\\})]$ . But from our earlier analysis, we have learned that the optimal strategy is optimistic (cf. Proposition 3). As such, Bayes-OCP checks whether it is overwhelmingly likely that the alternative experiment has higher expected utility—that is whether $\\mathbb{P}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)\\{\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\}) > \\mathcal{G}(X, \\mathcal{D}_t; \\{\\theta_x\\})\\} > \\beta$ , where $\\beta \\in (1/2, 1)$ controls the decision-making threshold. When $\\beta$ is large, we are more optimistic that the current experiment will succeed and require stronger evidence that the alternative experiment has higher expected utility. Note that, as the posteriors $\\mathcal{N}(\\mu_x, \\sigma_x^2)$ get narrower, the optimism of this rule naturally decreases, which should be the case for the optimal strategy (cf. Proposition 4). As one extreme, the two switching rules become equivalent when $\\{\\sigma_x^2 \\to 0\\}$ .", + "bbox": [ + 181, + 270, + 826, + 429 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 445, + 344, + 460 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Optimal stopping Optimal commitment is essentially a new type of optimal stopping/switching problem. In typical optimal stopping problems (OSPs), the reward an agent can receive evolves based on a stochastic process and the goal of the agent is to determine the optimal time step to stop when the reward to be received is in some sense maximized (Shiryaev, 2007). Optimal commitment is unique in that a positive reward can only be received by not stopping until a pre-specified time horizon $\\tau$ . In optimal commitment, there is still a stochastic process (namely, samples $y_{t}$ ) that gradually reveals more information regarding what that positive reward will be at the end, however, the reward—or rather the cost—of stopping earlier is independent of this stochastic process (and is equal to $-tC$ ).", + "bbox": [ + 169, + 470, + 826, + 584 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Sequential hypothesis testing Among other OSPs, optimal commitment is most closely related to sequential hypothesis testing (SHT), where an agent makes sequential observations regarding a given hypothesis and eventually needs to decide whether to reject the said (alternate) hypothesis or reject some null hypothesis (Wald and Wolfowitz, 1948; Yu et al., 2009; Drugowitsch et al., 2012; Shenoy and Angela, 2012; Zhang and Angela, 2013; Drugowitsch et al., 2014; Khalvati and Rao, 2015; Schonbrodt et al., 2017; Fauß et al., 2020). Rejecting the correct hypothesis provides a positive reward whereas waiting for more observations, while informative, is also costly as in OCP. It is well known that the optimal policy in the classic setting of SHT is a thresholding-type policy with fixed thresholds that do not vary over time: The null hypothesis is rejected if some test statistic gets above a threshold (and the alternate hypothesis is rejected if the same statistic gets below a different threshold).", + "bbox": [ + 169, + 589, + 828, + 731 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Optimal commitment can be thought of as a SHT problem with the crucial difference that the meta-experimenter has only the option of discarding the alternate hypothesis (i.e. breaking a commitment), and once some time horizon is reached (i.e. when a commitment is kept), either the null hypothesis or the alternate hypothesis is automatically rejected according to some external success criterion $\\rho$ , regardless of what the meta-experimenters' decision might have been otherwise. As we have shown in Proposition 2, the optimal policy still remains a thresholding-type policy, but since there is now a deadline to discard the alternate hypothesis early, the thresholds become time-varying; in particular, they become less and less optimistic as the said deadline approaches (cf. Proposition 4).", + "bbox": [ + 169, + 734, + 828, + 849 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Frazier and Angela (2007); Dayanik and Angela (2013); Alaa and van der Schaar (2016) consider SHT under stochastic deadlines, but different from optimal commitment, they still allow agents to reject both hypotheses at any time. In these works, the agent must make the rejection decision before the deadline is reached to be able to receive a positive reward, whereas in our case, the agent must wait until the deadline to see whether the null hypothesis will be rejected or not. Naghshvar and", + "bbox": [ + 169, + 854, + 826, + 926 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/91c7f1cd207a5213b5dcec4fc62f06920b3997bac17adcdc0e8f5b64e742df10.jpg", + "table_caption": [ + "Table 2: Comparison of related experiment designs. Optimal commitment is the only design that aims to decide both when an alternative population should be targeted—as opposed to switching the target population only at a fixed decision point—as well as which population to target among many potential candidates—as opposed to a simple binary decision of “overall population vs. sub-population” or “go vs. no-go”." + ], + "table_footnote": [], + "table_body": "
DesignReferenceWhen?Which?
Randomized Controlled Trial (RCT)Fisher (1935)NeverOnly the initial population
Adaptive Enrichment DesignOndra et al. (2019)Fixed decision pointOverall vs. fixed subpopulation
Adaptive Signature DesignZhang et al. (2017)Fixed decision pointPossibly any population
RCT with Futility StoppingHe et al. (2012)Possibly any timeGo vs. no-go
Optimal Commitment(Ours)Possibly any timeAmong multiple populations
", + "bbox": [ + 171, + 152, + 823, + 244 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Javidi (2013); Jarrett and van der Schaar (2020) consider active versions of SHT where the agent is able to choose what type of observations to make. Our case is \"passive\" in the sense that the meta-experimenter cannot influence what kind of samples they are going to receive from the currently running experiment. Finally, optimal commitment, and SHT in general, can be thought of as more structured instances of partially-observed reinforcement learning (RL). As we have discussed earlier, the standard technique here relies on convex reward structures whereas the optimal value function in our case is not convex in general (cf. Proposition 1, see Appendix A for a detailed discussion).", + "bbox": [ + 169, + 256, + 826, + 354 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Adaptive experimentation We introduced optimal commitment predominantly as a tool for population selection during an experiment. In clinical trials, dominant approach to population selection is adaptive enrichment (Mehta et al., 2009; Magnusson and Turnbull, 2013; Simon and Simon, 2013; Wang and Hung, 2013; Simon and Simon, 2018; Ondra et al., 2019; Thall, 2021) and adaptive signature designs (Freidlin and Simon, 2005; Freidlin et al., 2010; Mi, 2017; Zhang et al., 2017; Bhattacharyya and Rai, 2019). These designs are capable of adapting the target population of a trial as the trial continues, but unlike optimal commitment, they can only do so at fixed analysis points and not just at any time step. While adaptive signature designs can select arbitrary populations, adaptive enrichment designs are also limited by the number of pre-specified populations they can select between, which is typically only two: the overall population and an alternative subpopulation.", + "bbox": [ + 169, + 361, + 826, + 501 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Optimal commitment is also related to clinical trial designs with futility stopping, where an experimenter might terminate a trial early once it becomes apparent that the said trial is highly unlikely to succeed (van der Tweel and van Noord, 2003; Lachin, 2005; He et al., 2012; Jitlal et al., 2012; Kimani et al., 2013; Chang et al., 2020). However, this does not consider the possibility of switching to a new trial that targets a different population. As we will see during our experiments, switching to an alternative experiment might prove preferable even before an ongoing experiment can be deemed futile. In such cases, optimal commitment can make more timely decisions. Table 2 summarizes the experiment designs related to optimal commitment. Finally, it is worth mentioning that there are several methods for managing clinical trials at a portfolio level—that is determining which clinical trial is to be conducted next (Rogers et al., 2002; Colvin and Maravelias, 2008; Graham et al., 2020). Trial management in this vain is orthogonal to optimal commitment: They are concerned with the success of multiple new treatments and make decisions on a trial-by-trial basis whereas we only ever consider a single intervention and make decisions regarding the target population on a sample-by-sample basis while experiments still continue. See Appendix H for extended related work.", + "bbox": [ + 169, + 507, + 828, + 703 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 715, + 328, + 729 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We want to investigate how Bayes-OCP behaves in environments that differ in terms of ground-truth outcomes, for instance, what happens in environments where the original experiment is quite likely to succeed versus what happens in ones where switching to an alternative experiment is needed. To this end, we simulate experiments where mean outcomes are varied but other aspects of an experiment are fixed: In our environments, there are two atomic-populations, $\\mathcal{X} = \\{\\mathcal{X}_A,\\mathcal{X}_B\\}$ . Both atomic-populations have equal propensities $\\eta_{\\mathcal{X}_A} = \\eta_{\\mathcal{X}_B} = 1 / 2$ and the meta-experimenter has the same positively-biased prior for the mean outcome associated with each atomic-population: $\\theta_{\\mathcal{X}_A},\\theta_{\\mathcal{X}_B}\\sim \\mathcal{N}(0.1,0.1)$ . Experiment designs targeting one or both of these atomic-populations all have the same time horizon $\\tau = 600$ and success criterion $\\rho (\\mathcal{D}_{\\tau}) = \\mathbb{1}\\{\\Sigma_{(x_t,y_t)\\in \\mathcal{D}_{\\tau}}y_t / |\\mathcal{D}_{\\tau}| > \\alpha /\\sqrt{\\tau}\\}$ , where $\\alpha = F^{-1}(95\\%)$ . So, experiments are powered to detect a positive mean outcome of 0.1 with probability $\\sim 80$ . Rewards are given by $R_{X} = 1000\\eta_{X}^{0.1}$ the wider the target population is, the more people a successful intervention can be marketed to—and costs are given by $C_X = 1 / \\eta_X^{0.1}$ —the narrower the target population is, the harder it becomes to find subjects eligible to participate.", + "bbox": [ + 169, + 739, + 828, + 922 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Benchmarks We consider the metaexperiment designs summarized in Table 2 as benchmarks (see Appendix A.1 for an RL-based benchmark). Conventional RCT always targets the overall population and never stops early—that is it always conducts the experiment $\\psi = (\\{\\mathcal{X}_A,\\mathcal{X}_B\\} ,\\tau ,\\rho)$ until its completion. Adaptive Enrichment performs an intermediary analysis at $t = \\tau /2 = 300$ and greedily selects the experiment with the highest expected utility from $\\Psi = \\{(X,\\tau ,\\rho)\\}_{X\\subseteq \\{\\mathcal{X}_A,\\mathcal{X}_B\\}}$ . Futility Stopping is implemented via Bayes-OCP by initializing the set of all experiments as a singleton $\\Psi = \\{\\Psi_0 = (\\{\\mathcal{X}_A,\\mathcal{X}_B\\} ,\\tau ,\\rho)\\}$ . Intuitively, futility stopping only decides whether or not to stop the initial experiment that targets the overall population early. Bayes-OCP is initialized with $\\beta = 0.80$ (see Appendix E for a sensitivity analysis). We also consider an abla", + "bbox": [ + 169, + 104, + 421, + 435 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/124405db7f564c1a6224da1e823495a8baf4fbd4cebef593238f4117dfe0aa7a.jpg", + "table_caption": [ + "Table 3: Performance comparison in various environment instances. Bayes-OCP has the highest expected utility—and a smaller FWER then conventional RCTs—when averaged over all environment instances. This is because Bayes-OCP is a balanced design whose structure does not favor certain environment instances over others. As an example, compare it with conventional RCTs: RCTs do not have an adaptive structure hence they favor green environments where it is not necessary to adapt the target population of the initial experiment. *Instances favored/addressed partially" + ], + "table_footnote": [], + "table_body": "
Algorithms:Oracle RCTRCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
Favored Instances:N/AGreenGreen/Amber*Green/RedAmber*/RedBalanced (incl. Amber)
All Instances (100%)Utility260.4-39.4 (6.7)106.5 (6.9)150.0 (3.5)32.6 (3.1)171.8 (3.6)
FWER0.0%0.3% (0.1%)0.2% (0.1%)0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success75.2%56.1% (0.7%)53.2% (0.8%)45.4% (1.3%)10.5% (0.8%)52.4% (1.2%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)607.5 (1.9)615.1 (1.2)
T-to-F35.6600.0 (0.0)548.9 (16.1)57.6 (4.6)3.0 (0.5)70.8 (8.2)
Green Instances (47.3%)Utility389.6388.7 (3.9)385.6 (3.7)337.7 (5.7)63.1 (3.5)343.4 (7.3)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)0.9 (0.0)0.1 (0.0)
Success99.0%98.9% (0.4%)97.4% (0.7%)86.0% (1.4%)18.8% (0.9%)88.2% (2.0%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)605.8 (1.5)602.8 (0.4)
T-to-F600.0600.0 (0.0)759.4 (36.4)46.6 (7.6)2.5 (0.5)62.3 (14.3)
Amber Instances (29.4%)Utility258.6-300.3 (19.8)-17.6 (6.5)-5.3 (5.4)11.6 (3.4)63.2 (5.6)
FWER0.0%0.7% (0.3%)0.6% (0.3%)0.4% (0.3%)0.0% (0.0%)0.3% (0.2%)
Switches1.00.0 (0.0)0.7 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success96.6%30.0% (2.0%)22.6% (1.5%)15.2% (2.0%)5.3% (1.0%)35.2% (1.8%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)617.2 (5.9)663.9 (7.2)
T-to-F600.0600.0 (0.0)745.0 (13.1)78.3 (9.3)3.4 (0.6)104.4 (19.1)
Red Instances (23.3%)Utility0.0-579.2 (4.1)-304.2 (4.4)-35.1 (1.7)-2.8 (1.1)-39.7 (3.4)
FWER0.0%0.2% (0.3%)0.2% (0.3%)0.1% (0.2%)0.0% (0.0%)0.2% (0.3%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%2.1% (0.4%)1.8% (0.5%)0.9% (0.3%)0.3% (0.4%)1.6% (0.7%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)634.8 (39.1)
T-to-F0.0600.0 (0.0)343.1 (8.1)38.9 (2.1)3.5 (1.4)45.8 (2.9)
", + "bbox": [ + 431, + 220, + 826, + 429 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tion of Bayes-OCP where decisions are made greedily instead of optimistically (Greedy Bayes-OCP). As a baseline of maximum achievable performance, we consider an oracle (Oracle RCT) that always runs the RCT with the optimum target (or does not run any RCT at all if that happens to be optimal).", + "bbox": [ + 169, + 436, + 826, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "**Environments** A meta-experimenter's performance is specific to the environment instance. In particular, it depends on the ground-truth outcome distributions $\\{\\Omega_x\\}$ for different populations. For example, an algorithm that always immediately stops the experiment would perform best when the mean outcome is negative. Hence, to faithfully evaluate the benchmarks, we need to focus on the average performance across different environments. To this end, we randomly generated 1000 environments (repeated five times to obtain error bars) with true mean outcomes $\\theta_{\\mathcal{X}_A}, \\theta_{\\mathcal{X}_B}$ sampled independently from $\\mathcal{N}(0.1, 0.1)$ . Given these means, outcome distributions are set to be Gaussian with unit variance such that $\\Omega_x = \\mathcal{N}(\\theta_x, 1)$ . Depending on the true mean outcome, these environments can be categorized into three groups: (i) green instances where the initial experiment targeting the overall population has the highest utility, (ii) amber instances where an alternative experiment that targets a subpopulation has the highest utility, and (iii) red instances where no experiment has positive utility hence running no experiments is the optimal decision.", + "bbox": [ + 169, + 484, + 823, + 652 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Different benchmarks favor different instances (see the top row of Table 3): Conventional RCTs do not allow for any adaptation hence they favor green instances where the target population of the initial experiment does not need to be adapted. Adaptive Enrichment allows for adaptation but only at a certain time point, which is often too late to stop unsuccessful experiments (as in red instances). However, an adaptive enrichment design at least makes it possible to eventually target a subpopulation, even though it might be too late to do so at the pre-specified decision point, hence it partially accommodates amber instances. Futility Stopping decides between either continuing with the initial experiment or stopping all experimentation completely (targeting a subpopulation is not an option) hence it favors either green or red instances (but not amber instances). Greedy Bayes-OCP is pessimistic (or rather not optimistic enough) towards any ongoing experiment succeeding, hence it favors red instances where no experiment is likely to succeed. Similar to adaptive enrichment, Greedy Bayes-OCP at least allows subpopulations to be targeted hence it too partially accommodates amber instances.", + "bbox": [ + 169, + 657, + 826, + 825 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Main results Performance of a meta-experimenter is primarily measured by Bayesian utility which is the expected utility averaged over randomly sampled environment instances (Utility). Remember that maximizing utility was our main objective, and as such, Bayes-OCP has the highest expected utility when averaged over all environment instances, see Table 3. Unlike other benchmarks, Bayes-OCP strikes a good balance in prioritizing all environment instances at the same time. This is because Bayes-", + "bbox": [ + 169, + 830, + 826, + 902 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "OCP (i) can make timely decisions—unlike Adaptive Enrichment—and (ii) is optimistic hence it does not stop likely-to-succeed experiments prematurely—unlike Greedy Bayes-OCP. More specifically,", + "bbox": [ + 169, + 103, + 550, + 160 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "(i) Timeliness of Bayes-OCP: Bayes-OCP has an advantage in amber and red instances over adaptive enrichment and futility stopping. Consider the example in Figure 3: While Bayes-OCP stops in a timely manner, adaptive enrichment can only stop at a fixed decision point and experiments with futility stopping only stop when the ongoing experiment is failing not as soon as a better alternative emerges. This underlines the exploitative aspect of Bayes-OCP—making and breaking commitments to maximize utility.", + "bbox": [ + 187, + 166, + 553, + 306 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "(ii) Optimism of Bayes-OCP: While a design that favors early stopping is obviously desirable in amber and red environments, how much it is favored should be moderated to also succeed in green environments. Consider the example in Figure 4: Greedy Bayes-OCP prematurely stops the initial experiment in a green environment while Bayes-OCP does not. Theoretically, we know that the optimal policy should be optimistic towards the ongoing experiment succeeding and be hesitant to stop to a certain extend. This underlines the exploratory aspect of Bayes-OCP—keeping a seemingly failing commitment still has value as it reveals more information regarding whether the commitment is actually failing.", + "bbox": [ + 183, + 311, + 553, + 507 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In Table 3, in addition to Utility, we also report the family-wise error rate (FWER)—that is the frequency of runs where at least one experiment (denote it with $\\psi^i$ ) is declared successful (i.e. $\\rho^i(\\mathcal{D}_\\tau^i) = 1$ ) despite the mean outcome being negative for the targeted population (i.e. $\\bar{\\theta}_{X^i} < 0$ —the average number of times the target population has been switched (Switches), the probability of success which is defined as achieving positive utility (Success), the average time until a successful outcome (Timeto-Success, $T$ -to- $S$ ), and the average time until an unsuccessful outcome where all experimentation is stopped", + "bbox": [ + 169, + 513, + 553, + 667 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "with negative utility (Time-to-Failure, $T$ -to- $F$ ), see Appendix G for details. Importantly, Bayes-OCP does not compromise the error control of experiments, on the contrary, it even achieves a smaller FWER than conventional RCTs. This is because aggregate data is only ever used to select experiments, otherwise no two experiments consult each other's data when evaluating a success criterion so that the potential confoundedness that could have been caused by the adaptiveness of Bayes-OCP is avoided when declaring an experiment as successful (see Appendix B for a discussion on error control).", + "bbox": [ + 169, + 666, + 826, + 750 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Supplementary results We also provide supplementary results: Appendix A.1 evaluates RL-based benchmarks, Appendix B.1 investigates error control, Appendix C considers environments with non-Gaussian outcomes, Appendix D considers environments with more than two atomic-populations, and Appendix E analyzes the sensitivity of Bayes-OCP's performance to its hyper-parameter $\\beta$ .", + "bbox": [ + 169, + 756, + 828, + 814 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 828, + 320, + 843 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Two aspects of OCP require further discussion: (i) How can it be approached from the perspective of reinforcement learning? While OCP technically describes a special class of POMDPs, we have not found this to be constructive in finding a solution (see Appendix A). (ii) What are the implications of using Bayes-OCP in terms of error control? It has no impact on individual error rates and can be adapted to control FWER (see Appendix B). See Appendix F for a discussion on future work.", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/a619405fd97dc0546afb287a9790828788911848db4ae9f868bdd325c371a984.jpg", + "image_caption": [ + "Figure 3: Timeliness of Bayes-OCP. Bayes-OCP is first to (correctly) stop the initial experiment in an amber instance (excluding Greedy Bayes-OCP). Adaptive enrichment can only stop at a pre-specified time, while futility stopping fails to consider switching to an alternative experiment, which is proven to be preferable earlier than stopping." + ], + "image_footnote": [], + "bbox": [ + 562, + 106, + 821, + 294 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1391f46e577a92e0ab99ca7dfda5da32e778dbe68ef5fd7fa3e04ba1028ad26e.jpg", + "image_caption": [ + "Figure 4: Optimism of Bayes-OCP. Greedy Bayes-OCP (incorrectly) stops due to initial noise in a green instance while Bayes-OCP does not stop since it is more optimistic (as the optimal policy should, cf. Proposition 3)." + ], + "image_footnote": [], + "bbox": [ + 560, + 404, + 821, + 593 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ETHICS STATEMENT", + "text_level": 1, + "bbox": [ + 174, + 102, + 339, + 116 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As the main application of optimal commitment, we have focused on adaptive experimentation, particularly experiments that are run as part of clinical development. Clinical trials have a huge impact on the wellbeing of patients and this high-stakes nature of clinical trials naturally raises some ethical concerns; we discuss two major ones in this section. However before we start our discussion, it should be emphasized that clinical trials is not the only application domain of optimal commitment. As we have highlighted at the end of Section 2, our contributions are generally applicable to decision-making problems such as portfolio and energy management. Moreover, not all adaptive experiments are clinical and have the same high stakes as a clinical trial. For instance, A/B testing is common in online advertisement to determine what recommendation policies lead to more user engagement (Gui et al., 2015; Xu et al., 2015; Kohavi and Longbotham, 2017). Therefore, the ethical concerns we discuss here does not universally concern all possible applications of optimal commitment.", + "bbox": [ + 174, + 127, + 823, + 279 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The first concern is how the designed error rate of an individual experiment is affected when multiple such experiments are managed together using Bayes-OCP in an adaptive manner, in particular, whether any error rate is inflated by the use of Bayes-OCP or not. We discuss error control in Appendix B with supplementary experiments. But briefly, Bayes-OCP has essentially no impact on the error rate of experiments on an individual level, and when controlling their family-wise error rate is also a concern, it can easily be adapted to accommodate this additional constraint as well.", + "bbox": [ + 174, + 286, + 823, + 369 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The second concern is that an adaptive approach to population selection might lead to overly conservative experiments that unnecessarily limit the use of an effective treatment. As we have mentioned in the introduction to motivate the need for optimal commitment, when the treatment is effective only for a subpopulation (cf. amber instances in our experiments), population selection is absolutely necessary, otherwise the treatment is most likely to be found ineffective and discarded after an experiment that targets the overall patient population as a whole, which would deny the treatment for the subpopulation that would have benefited from it. On the flip side of this, when the treatment happens to be effective for everyone (cf. green instances in our experiment), population selection might lead to conducting a restrictive experiment that only targets a small subpopulation, which this time, would deny the treatment for the rest of the patient population. This is essentially the reason behind the performance drop between Bayes-OCP and conventional RCTs in green instances (see Table 3). There is a trade-off between the performance in amber instances and green instances; and Bayes-OCP achieves a better balance between the two compared with a conventional RCT as evidenced by its superior performance when averaged over all environment instances (again see Table 3); although it causes a drop in performance for green instances, it more than makes up for that drop in amber instances. This balance is partly controlled by how optimistic Bayes-OCP is, which is in turn dictated by its hyper-parameter $\\beta$ —larger $\\beta$ leads to more optimistic decisions towards ongoing experiments, which favors green instances more than amber instances. We analyze the sensitivity of Bayes-OCP's performance to $\\beta$ in Appendix E; and for all configurations that we have evaluated, Bayes-OCP always performs significantly better than a conventional RCT.", + "bbox": [ + 174, + 377, + 823, + 654 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REPRODUCIBILITY STATEMENT", + "text_level": 1, + "bbox": [ + 174, + 669, + 433, + 683 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "All our experiments are based on synthetic simulations, hence our results can easily be reproduced by following the specifications in Section 6 without needing access to any private dataset. In order to aid reproducibility, we have rigorously described all our benchmarks in algorithmic form, similar to Algorithm 1, in Appendix J. Moreover, the source code necessary to reproduce our main results in Table 3 is made publicly available at https://github.com/alihanhyk/optcommit and https://github.com/vanderschaarlab/optcommit.", + "bbox": [ + 174, + 691, + 823, + 776 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 174, + 791, + 354, + 805 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We would like to thank the reviewers and the members of the van der Schaar lab, for their valuable input, comments, and suggestions. This work was supported by the US Office of Naval Research (ONR) and the National Science Foundation (NSF, grant number 1722516).", + "bbox": [ + 174, + 814, + 823, + 857 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 174, + 871, + 285, + 885 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Alaa, A. M. and van der Schaar, M., \"Balancing suspense and surprise: Timely decision making with endogenous information acquisition,\" in Proc. Neural Inf. Process. Syst., 2016.", + "bbox": [ + 174, + 895, + 823, + 922 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 491, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Auer, P., Cesa-Bianchi, N., and Fischer, P., \"Finite-time analysis of the multiarmed bandit problem,\" Mach. Learn., vol. 47, no. 2, pp. 235-256, 2002.", + "Bhattacharyya, A. and Rai, S. N., \"Adaptive signature design—review of the biomarker guided adaptive phase-III controlled design,\" Contemporary Clin. Trials Commun., vol. 15, p. 100378, 2019.", + "Bubeck, S., Cesa-Bianchi, N. et al., \"Regret analysis of stochastic and nonstochastic multi-armed bandit problems,\" Found. Trends Mach. Learn., vol. 5, no. 1, pp. 1-122, 2012.", + "Chang, Y., Song, T., Monaco, J., and Ivanova, A., \"Futility stopping in clinical trials, optimality and practical considerations,\" J. Biopharmaceutical Statist., vol. 30, no. 6, pp. 1050-1059, 2020.", + "Chiu, Y.-D., Koenig, F., Posch, M., and Jaki, T., \"Design and estimation in clinical trials with subpopulation selection,\" Statist. Med., vol. 37, pp. 4335-4335-4352, 2018.", + "\"Trends, charts, and maps,\" ClinicalTrials.gov. [Online]. Available: https://www.clinicaltrials.gov/ct2/resources/trends#RegisteredStudiesOverTimePostedResults", + "Colvin, M. and Maravelias, C. T., \"A stochastic programming approach for clinical trial planning in new drug development,\" Comput. Chem. Eng., vol. 32, no. 11, pp. 2626-2642, 2008.", + "Dayanik, S. and Angela, J. Y., \"Reward-rate maximization in sequential identification under a stochastic deadline,\" SIAM J. Control Optim., vol. 51, no. 4, pp. 2922-2948, 2013.", + "Demets, D. L. and Lan, K. K. G., \"Interim analysis: The alpha spending function approach,\" Statist. Med., vol. 13, no. 13-14, pp. 1341-1352, 1994.", + "Drugowitsch, J., Moreno-Bote, R., Churchland, A. K., Shadlen, M. N., and Pouget, A., \"The cost of accumulating evidence in perceptual decision making,\" J. Neuroscience, vol. 32, no. 11, pp. 3612-3628, 2012.", + "Drugowitsch, J., Moreno-Bote, R., and Pouget, A., \"Optimal decision-making with time-varying evidence reliability,\" in Proc. Neural Inf. Process. Syst., 2014.", + "Fauß, M., Zoubir, A. M., and Poor, H. V., “Minimax optimal sequential hypothesis tests for Markov processes,” Ann. Statist., vol. 48, no. 5, pp. 2599–2621, 2020.", + "Fisher, R. A., The Design of Experiments. Edinburgh, Scotland: Oliver & Boyd, 1935.", + "Frazier, P. and Angela, J. Y., \"Sequential hypothesis testing under stochastic deadlines,\" in Proc. Neural Inf. Process. Syst., 2007.", + "Freidlin, B. and Simon, R., \"Adaptive signature design: An adaptive clinical trial design for generating and prospectively testing a gene expression signature for sensitive patients,\" *Clin. Cancer Res.*, vol. 11, no. 21, pp. 7872-7878, 2005.", + "Freidlin, B., Jiang, W., and Simon, R., \"The cross-validated adaptive signature design,\" Clin. Cancer Res., vol. 16, no. 2, pp. 691-698, 2010.", + "Ghare, G. and Leutenegger, S. T., \"Improving speedup and response times by replicating parallel programs on a SNOW,\" in Proc. Int. Conf. Job Scheduling Strategies Parallel Process., 2005.", + "Graham, E., Jaki, T., and Harbron, C., \"A comparison of stochastic programming methods for portfolio level decision-making,\" J. Biopharmaceutical Statist., vol. 30, no. 3, pp. 405-429, 2020.", + "Gui, H., Xu, Y., Bhasin, A., and Han, J., \"Network A/B testing: From sampling to estimation,\" in Proc. Int. Conf. World Wide Web, 2015.", + "He, P., Lai, T. L., and Liao, O. Y.-W., “Futility stopping in clinical trials,” Statist. Interface, vol. 5, no. 4, pp. 415-423, 2012.", + "Jarrett, D. and van der Schaar, M., \"Inverse active sensing: Modeling and understanding timely decision-making,\" in Int. Conf. on Mach. Learn., 2020." + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jitlal, M., Khan, I., Lee, S., and Hackshaw, A., \"Stopping clinical trials early for futility: retrospective analysis of several randomised clinical studies,\" Brit. J. Cancer, vol. 107, no. 6, pp. 910-917, 2012.", + "Kaitin, K. I., \"Deconstructing the drug development process: The new face of innovation,\" Clin. Pharmacology Therapeutics, vol. 87, no. 3, pp. 356-361, 2010.", + "Karatzas, I. and Wang, H., \"Utility maximization with discretionary stopping,\" SIAM J. Control Optim., vol. 39, pp. 306-329, 2020.", + "Khalvati, K. and Rao, R. P., \"A Bayesian framework for modeling confidence in perceptual decision making,\" in Proc. Neural Inf. Process. Syst., 2015.", + "Kimani, P. K., Todd, S., and Stallard, N., \"Conditionally unbiased estimation in phase II/III clinical trials with early stopping for futility,\" Statist. Med., vol. 32, no. 17, pp. 2893-2910, 2013.", + "Kohavi, R. and Longbotham, R., \"Online controlled experiments and A/B testing,\" Encyclopedia Mach. Learn. Data Mining, vol. 7, no. 8, pp. 922-929, 2017.", + "Lachin, J. M., “A review of methods for futility stopping based on conditional power,” Statist. Med., vol. 24, no. 18, pp. 2747-2764, 2005.", + "Lawler, E., Combinatorial Optimization, Networks and Matroids. New York: Holt, Rinehard & Winston, 1976.", + "Lipkovich, I., Dmitrienko, A., and D'Agostino Sr., R. B., \"Tutorial on biostatistics: Data-driven subgroup identification and analysis in clinical trials,\" Statist. Med., vol. 36, pp. 136-196, 2017.", + "Magnusson, B. P. and Turnbull, B. W., \"Group sequential enrichment design incorporating subgroup selection,\" Statist. Med., vol. 32, no. 16, pp. 2695-2714, 2013.", + "Markowitz, H., Portfolio selection: Efficient diversification of investment. New York: John Wiley, 1959.", + "Mehta, C., Gao, P., Bhatt, D. L., Harrington, R. A., Skerjanec, S., and Ware, J. H., \"Optimizing trial design: Sequential, adaptive, and enrichment strategies,\" Circulation, vol. 119, no. 4, pp. 597-605, 2009.", + "Merton, R. C., \"Life time portfolio selection under uncertainty: The continuous-time case,\" Rev. Econ. Statist., vol. 51, pp. 247-257, 1969.", + "Mi, G., \"Enhancement of the adaptive signature design for learning and confirming in a single pivotal trial,\" Pharmaceutical Statist., vol. 16, no. 5, pp. 312-321, 2017.", + "Moineddin, R., Butt, D. A., Tomlinson, G., and Beyene, J., \"Identifying subpopulations for subgroup analysis in a longitudinal clinical trial,\" Contemporary Clin. Trials, vol. 29, pp. 817-822, 2008.", + "Naghshvar, M. and Javidi, T., \"Active sequential hypothesis testing,\" Ann. Statist., vol. 41, no. 6, pp. 2703-2738, 2013.", + "Ni, T., Eysenbach, B., and Salakhutdinov, R., \"Recurrent model-free RL can be a strong baseline for many POMDPs,\" in Proc. Int. Conf. Mach. Learn., 2022.", + "Olofsson, M., Önskog, T., and Lundström, N. L. P., \"Management strategies for run-of-river hydropower plants: An optimal switching approach,\" Optim. Eng., vol. 23, pp. 1707-1731, 2022.", + "Ondra, T., Jobjörnsson, S., Beckman, R. A., Burman, C.-F., König, F., Stallard, N., and Posch, M., \"Optimized adaptive enrichment designs,\" Statist. Methods Med. Res., vol. 28, no. 7, pp. 2096-2111, 2019.", + "Papadimitriou, C. H. and Steiglitz, K., Combinatorial Optimization—Algorithms and Complexity. Englewood Cliffs, NJ: Prentice Hall, 1982.", + "Rafique, S. F. and Jianhua, Z., \"Energy management system, generation and demand predictors: A review,\" IET Gener. Transmiss. Distribution, vol. 12, no. 3, pp. 519-530, 2018." + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Rogers, M. J., Gupta, A., and Maranas, C. D., \"Real options based analysis of optimal pharmaceutical research and development portfolios,\" Ind. Eng. Chem. Res., vol. 41, no. 25, pp. 6607-6620, 2002.", + "Schönbrodt, F. D., Wagenmakers, E.-J., Zehetleitner, M., and Perugini, M., \"Sequential hypothesis testing with Bayes factors: Efficiently testing mean differences,\" Psychol. Methods, vol. 22, no. 2, p. 322, 2017.", + "Shenoy, P. and Angela, J. Y., \"Strategic impatience in Go/NoGo versus forced-choice decision-making,\" in Proc. Neural Inf. Process Syst., 2012.", + "Shiryaev, A. N., Optimal Stopping Rules. Springer Science & Business Media, 2007.", + "Simon, N. and Simon, R., \"Adaptive enrichment designs for clinical trials,\" Biostatistics, vol. 14, no. 4, pp. 613-625, 2013.", + "Simon, N. and Simon, R., \"Using Bayesian modeling in frequentist adaptive enrichment designs,\" Biostatistics, vol. 19, no. 1, pp. 27-41, 2018.", + "Spanan, M. T. J., \"Partially observable Markov decision processes,\" in Reinforcement Learning. Springer, 2012, pp. 387-414.", + "Takebe, T., imai, R., and Ono, S., \"The current status of drug discovery and development as originated in United States academia: The influence of industrial and academic collaboration on drug discovery and development,\" Clin. Transl. Sci., vol. 11, no. 6, pp. 597-606, 2018.", + "Thall, P. F., \"Adaptive enrichment designs in clinical trials,\" Annu. Rev. Statist. Appl., vol. 8, pp. 393-411, 2021.", + "Umscheid, C. A., Margolis, D. J., and Grossman, C. E., \"Key concepts of clinical trials: A narrative review,\" Postgraduate Med., vol. 123, no. 5, pp. 194-204, 2011.", + "van der Tweel, I. and van Noord, P. A., \"Early stopping in clinical trials and epidemiologic studies for 'futility': Conditional power versus sequential analysis,\" J. Clin. Epidemiology, vol. 56, no. 7, pp. 610-617, 2003.", + "Wald, A. and Wolfowitz, J., \"Optimum character of the sequential probability ratio test,\" Ann. Math. Statist., vol. 19, pp. 326-339, 1948.", + "Wand, D., Joshi, G., and Wornell, G., \"Efficient task replication for fast response times in parallel computation,\" in Proc. ACM SIGMETRICS Conf., 2014.", + "Wang, D., Joshi, G., and Wornell, G. W., \"Efficient straggler replication in large-scale parallel computing,\" ACM Trans. Model. Perform. Eval. Comput. Syst., vol. 4, no. 2, pp. 1-23, 2019.", + "Wang, S.-J. and Hung, H. J., \"Adaptive enrichment with subpopulation selection at interim: Methodologies, applications and design considerations,\" Contemporary clinical trials, vol. 36, no. 2, pp. 673-681, 2013.", + "Xu, Y., Chen, N., Fernandez, A., Sinno, O., and Bhasin, A., \"From infrastructure to culture: A/B testing challenges in large scale social networks,\" in Proc. ACM SIGKDD Int. Conf. Knowl. Discovery Data Mining, 2015.", + "Yu, A. J., Dayan, P., and Cohen, J. D., \"Dynamics of attentional selection under conflict: Toward a rational Bayesian account,\" J. Exp. Psychol. Human Perception Performance, vol. 35, no. 3, p. 700, 2009.", + "Zhang, S. and Angela, J. Y., \"Forgetful Bayes and myopic planning: Human learning and decision-making in a bandit setting,\" in Proc. Neural Inf. Process. Syst., 2013.", + "Zhang, Z., Li, M., Lin, M., Soon, G., Greene, T., and Shen, C., \"Subgroup selection in adaptive signature designs of confirmatory clinical trials,\" J. Roy. Statist. Soc.: Ser. C (Appl. Statist.), vol. 66, no. 2, pp. 345-361, 2017." + ], + "bbox": [ + 171, + 102, + 826, + 895 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A A REINFORCEMENT LEARNING PERSPECTIVE", + "text_level": 1, + "bbox": [ + 171, + 103, + 584, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Optimal commitment can be viewed as a partially-observed reinforcement learning problem. Let the tuple $(\\mathcal{S},\\mathcal{A},\\mathcal{Z},\\mathcal{T},\\mathcal{O},\\mathcal{R})$ denote a partially-observable Markov decision process (POMDP), where $\\mathcal{S}$ is the (unobserved) state space, $\\mathcal{A}$ is the action space, $\\mathcal{Z}$ is the observation space, $\\mathcal{T} \\in \\Delta(\\mathcal{S})^{S \\times S}$ describes the transition dynamics, $\\mathcal{O} \\in \\mathcal{Z}^S$ describes the observation dynamics, and $\\mathcal{R} \\in \\mathbb{R}^S$ describes the reward dynamics. Then, OCPs as defined in Section 2 can also be expressed as a special class of POMDPs: Letting $\\mathcal{Y} = \\mathbb{R}$ denote the outcome space for clarity, $\\mathfrak{D} = \\cup_{t=0}(\\mathcal{X} \\times \\mathcal{Y})^t$ be the space of all possible datasets $\\mathcal{D}_t$ , and $\\mathfrak{D}$ be the space of all possible outcome distributions $\\Omega$ ,", + "bbox": [ + 169, + 133, + 826, + 232 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $\\mathcal{S} \\doteq \\{\\varnothing\\} \\cup (\\Psi \\times \\mathfrak{D} \\times \\mathfrak{D}^{\\mathcal{X}})$ , where states $s = (\\psi, \\mathcal{D}_t, \\{\\Omega_x\\}_{x \\in \\mathcal{X}})$ consist of the ongoing experiment $\\psi \\in \\Psi$ , the dataset $\\mathcal{D}_t \\in \\mathfrak{D}$ collected by the ongoing experiment so far, and the true outcome distributions $\\{\\Omega_x \\in \\mathfrak{D}\\}$ ,", + "- $\\mathcal{A} \\doteq \\{\\varnothing\\} \\cup \\Psi$", + "- $\\mathcal{Z} \\doteq \\{\\varnothing\\} \\cup (\\mathcal{X} \\times \\mathcal{Y})$", + "- $\\mathcal{T}(s = \\emptyset, a) \\doteq \\emptyset$ and" + ], + "bbox": [ + 215, + 238, + 826, + 345 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {T} (s = (\\psi = (X, \\tau , \\rho), \\mathcal {D} _ {t}, \\{\\Omega_ {x} \\}), a) \\\\ \\begin{array}{c} \\dot {=} \\left\\{ \\begin{array}{l l} \\emptyset & \\text {i f} a = \\emptyset \\\\ s ^ {\\prime} = (\\psi , \\mathcal {D} _ {t + 1} = \\mathcal {D} _ {t} \\cup \\{x _ {t + 1}, y _ {t + 1} \\}, \\{\\Omega_ {x} \\}) \\\\ \\quad \\text {s . t .} x _ {t + 1} \\sim \\{\\eta_ {x | X} \\}, y _ {t + 1} \\sim \\Omega_ {x _ {t + 1}} & \\text {i f} a = \\psi \\\\ s ^ {\\prime} = (\\psi^ {\\prime}, \\mathcal {D} _ {1} = \\{x _ {1}, y _ {1} \\}, \\{\\Omega_ {x} \\}) \\\\ \\quad \\text {s . t .} x _ {1} \\sim \\{\\eta_ {x | X ^ {\\prime}} \\}, y _ {1} \\sim \\Omega_ {x _ {1}} & \\text {i f} a = \\psi^ {\\prime} = (X ^ {\\prime}, \\tau^ {\\prime}, \\rho^ {\\prime}) \\neq \\psi , \\end{array} \\right. \\end{array} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 353, + 815, + 459 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "- $\\mathcal{O}(s' = \\varnothing) \\doteq \\varnothing$ and", + "bbox": [ + 215, + 474, + 370, + 489 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {O} \\left(s ^ {\\prime} = (\\psi , \\mathcal {D} _ {t + 1} = \\mathcal {D} _ {t} \\cup \\{x _ {t + 1}, y _ {t + 1} \\}, \\{\\Omega_ {x} \\})\\right) \\doteq \\left(x _ {t + 1}, y _ {t + 1}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 497, + 740, + 515 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "- $\\mathcal{R}(s' = \\emptyset) \\doteq 0$ and", + "bbox": [ + 215, + 530, + 367, + 546 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} \\left(s ^ {\\prime} = (\\psi = (X, \\tau , \\rho), \\mathcal {D} _ {t + 1}, \\{\\Omega_ {x} \\})\\right) \\doteq - C _ {\\psi} + R _ {\\psi} \\cdot \\mathbb {1} \\{t + 1 = \\tau \\} \\cdot \\rho \\left(\\mathcal {D} _ {t + 1}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 554, + 790, + 571 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Since ongoing experiments $\\psi$ are completely dictated by actions, and datasets $\\mathcal{D}_t$ collected by the ongoing experiments consist solely of observations $(x_t,y_t)$ , the only unobserved component of the states in this POMDP is the true outcome distributions $\\{\\Omega_x\\}_{x\\in \\mathcal{X}}$ . Hence, the optimal policy should have the form $\\pi (\\psi ,\\mathcal{D}_t,b)$ where $b\\in \\Delta (\\mathfrak{O}^{\\mathcal{X}})$ denotes beliefs over $\\{\\Omega_x\\}$ that is posterior distributions over the true outcome distributions. For instance, when $\\Omega_{x} = \\mathcal{N}(\\theta_{x},1)$ as we have been assuming in Sections 3 and 4, posteriors over mean outcomes $\\{\\theta_x\\}_{x\\in \\mathcal{X}}$ , which are given by parameters $\\{\\mu_x,\\sigma_x^2\\}$ such that $\\theta_{x}|\\bar{\\mathcal{D}}_{t}^{i}\\sim \\mathcal{N}(\\mu_{x},\\sigma_{x}^{2})$ , constitute as beliefs.", + "bbox": [ + 169, + 585, + 823, + 685 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Now although an OCP can be expressed as a POMDP, doing so is not particularly helpful in finding a solution. As we have already discussed in Section 3, the standard approach to solving a POMDP would be to use dynamic programming and compute the optimal value function $V^{*}$ and the optimal Q-function $Q^{*}$ iteratively according to Bellman optimality conditions", + "bbox": [ + 169, + 690, + 826, + 747 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (b, a) = \\mathbb {E} _ {s \\sim b, s ^ {\\prime} \\sim \\mathcal {T} (s, a), z ^ {\\prime} = \\mathcal {O} (s ^ {\\prime}), b ^ {\\prime} | \\{b, z ^ {\\prime} \\}} \\left[ \\mathcal {R} \\left(s ^ {\\prime}\\right) + V ^ {*} \\left(b ^ {\\prime}\\right) \\right] \\\\ V ^ {*} (b) = \\max _ {a \\in \\mathcal {A}} Q ^ {*} (b, a), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 753, + 696, + 791 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $b'|\\{b, z'\\}$ denotes the updated belief $b'$ after having belief $b$ and making a new observation $z'$ . When the state space $S$ is discrete—or equivalently in our case, when the space of outcome distributions $\\Omega \\in \\mathfrak{D}$ is discrete— $V^*$ and $Q^*$ happen to be convex functions, which makes it possible to perform these iterations efficiently by approximating $V^*$ and $Q^*$ using functions of the form $f(b) = \\max\\{a_i b + a_j'\\}$ (Spaan, 2012). However, even in the simplest of cases where $S$ is continuous—or equivalently, the space of outcome distributions $\\Omega \\in \\mathfrak{D}$ is continuous, for instance when $\\Omega_x = \\mathcal{N}(\\theta_x, 1)$ —the convexity of $V^*$ and $Q^*$ no longer generally holds. In fact, we show in Proposition 1 that neither $V^*$ nor $-V^*$ is convex with respect to beliefs $b \\equiv \\{t, \\mu\\}$ for at least one instance of the simplified OCP that we have analyzed in Section 3.", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/51adf94dde740ab2026374795378f32e633ffe278c11f03a88597083afe70bbe.jpg", + "table_caption": [ + "Table 4: Performance comparison between Futility Stopping with RL-based algorithms and with Bayes-OCP." + ], + "table_footnote": [], + "table_body": "
Algorithms:Oracle RCTFutility Stopping w/ Discretized RLFutility Stopping w/ Deep Q-learningFutility Stopping w/ Bayes-OCP
All Instances (100%)Utility260.4131.8 (4.3)78.8 (3.1)150.0 (3.5)
FWER0.0%0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.6 (0.0)0.7 (0.0)0.5 (0.0)
Success75.2%41.0% (1.0%)24.2% (0.8%)45.4% (1.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F35.654.3 (2.2)23.6 (1.8)57.6 (4.6)
Green Instances (47.3%)Utility389.6309.5 (4.1)185.0 (4.9)337.7 (5.7)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.2 (0.0)0.5 (0.0)0.1 (0.0)
Success99.0%80.9% (0.7%)47.7% (1.1%)86.0% (1.4%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F600.072.7 (7.1)11.0 (0.8)46.6 (7.6)
Amber Instances (29.4%)Utility258.6-23.9 (5.4)-16.6 (6.8)-5.3 (5.4)
FWER0.0%0.2% (0.2%)0.1% (0.1%)0.4% (0.3%)
Switches1.00.9 (0.0)0.9 (0.0)0.8 (0.0)
Success96.6%9.1% (0.8%)5.2% (1.3%)15.2% (2.0%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F600.066.1 (4.3)39.5 (4.4)78.3 (9.3)
Red Instances (23.3%)Utility0.0-33.0 (1.6)-16.5 (5.2)-35.1 (1.7)
FWER0.0%0.1% (0.2%)0.1% (0.2%)0.1% (0.2%)
Switches1.01.0 (0.0)1.0 (0.0)1.0 (0.0)
Success0.0%0.2% (0.2%)0.4% (0.4%)0.9% (0.3%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F0.033.7 (0.9)18.3 (4.8)38.9 (2.1)
", + "bbox": [ + 199, + 102, + 795, + 464 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.1 EXPERIMENTS WITH REINFORCEMENT LEARNING BENCHMARKS", + "text_level": 1, + "bbox": [ + 171, + 503, + 663, + 518 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Having said all that, one naive way to still compute $V^{*}$ and $Q^{*}$ iteratively according to Bellman optimality conditions is to discretize the belief space. We call this benchmark Discretized RL and we use it to perform futility stopping—that is when $|\\Psi| = 1$ , deciding whether to stop the only viable experiment design early or not. Otherwise, the dimensionality of the belief state explodes combinatorially with respect to $|\\Psi|$ . We consider the same setting that we have considered during our experiments in Section 6 and compare the performance of Futility Stopping with Discretized RL with that of Futility Stopping with Bayes-OCP. When implementing discretized RL, instead of keeping track of the entire dataset $\\mathcal{D}_t$ , we only keep track of the sufficient statistic $\\mu_t = \\sum_{(x_{t'}, y_{t'})} y_{t'} / |\\mathcal{D}_t|$ , restrict the domain of $\\mu_t$ to interval $[-0.3, 0.3]$ , and discretize this interval into 100 equally-spaced bins. In addition to discretized RL, we also consider the approach proposed by Ni et al. (2022) for solving complex classes of POMDPs, which the optimal commitment problem is one of. Briefly, we employ deep Q-learning (as such, we call this benchmark Deep Q-learning) to train a neural network as an approximation of the Q-function $Q^{*}(b, a)$ using the POMDP we formalized earlier as a simulator. As the network architecture, we consider a multi-layer perceptron with two hidden layers of size 100 and with tanh activations.", + "bbox": [ + 169, + 539, + 826, + 750 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Results are given in Table 4; futility stopping with Bayes-OCP performs better than futility stopping with discretized RL as well as futility stopping with deep Q-learning. In addition to the bad performance of discretized RL, it is also not feasible to scale it to use cases beyond futility stopping. When $|\\Psi| > 1$ , we would need to keep separate track of each $\\mu_x$ . Moreover, we would also need to start keeping track of the scale parameters $\\{\\sigma_x\\}$ since it would now be possible to distribute samples among multiple atomic-populations in multiple ways by targeting different populations with different experiments (we no longer would be able to treat the target population of the only viable experiment design as the only atomic-population there is). Noting that $\\sigma_x$ 's already take discrete values with at least $\\tau$ -many possible values, merely increasing the number of viable experiments $|\\Psi|$ from one to two causes the dimensionality of the belief space to jump from 100 to $\\sim (100 \\times 600)^2 = 36 \\times 10^8$ . Deep Q-learning performs even worse as it ignores all structure present in the optimal commitment problem, and instead, views the POMDP that describes it as a black-box simulator.", + "bbox": [ + 169, + 757, + 826, + 924 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/7c4ba499e4778992d727b88b6a8062f297e8f53037bdf0f8d3d8ab0c2b356650.jpg", + "table_caption": [ + "Table 5: Performance comparison of algorithms with family-wise error control." + ], + "table_footnote": [], + "table_body": "
Algorithms:Oracle RCTRCTAdaptive Enrichment w/ Bonferroni Corr.Futility Stopping w/ Bayes-OCPGreedy Bayes-OCP w/ Bonferroni Corr.Bayes-OCP w/ Bonferroni Corr.
All Instances (100%)Utility260.4-39.4 (6.7)91.4 (5.4)150.0 (3.5)23.7 (2.2)158.7 (5.2)
FWER0.0%0.3% (0.1%)0.1% (0.1%)0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.0 (0.0)0.5 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success75.2%56.1% (0.7%)51.1% (0.7%)45.4% (1.3%)7.7% (0.5%)49.3% (1.5%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)606.2 (2.3)616.6 (1.7)
T-to-F35.6600.0 (0.0)543.0 (15.7)57.6 (4.6)2.3 (0.3)65.8 (7.0)
Green Instances (47.3%)Utility389.6388.7 (3.9)378.8 (3.1)337.7 (5.7)46.1 (3.3)325.8 (5.5)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)1.0 (0.0)0.2 (0.0)
Success99.0%98.9% (0.4%)96.1% (0.6%)86.0% (1.4%)13.8% (0.8%)84.7% (1.5%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)604.9 (2.0)604.0 (0.7)
T-to-F600.0600.0 (0.0)768.6 (19.0)46.6 (7.6)2.0 (0.3)66.5 (17.7)
Amber Instances (29.4%)Utility258.6-300.3 (19.8)-51.9 (15.6)-5.3 (5.4)8.3 (2.9)44.6 (4.6)
FWER0.0%0.7% (0.3%)0.3% (0.1%)0.4% (0.3%)0.0% (0.0%)0.2% (0.2%)
Switches1.00.0 (0.0)0.8 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success96.6%30.0% (2.0%)18.2% (2.2%)15.2% (2.0%)3.8% (0.9%)30.7% (1.4%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)613.4 (5.8)670.6 (8.9)
T-to-F600.0600.0 (0.0)724.6 (9.4)78.3 (9.3)2.6 (0.6)95.5 (18.4)
Red Instances (23.3%)Utility0.0-579.2 (4.1)-312.5 (2.3)-35.1 (1.7)-2.2 (0.3)-37.0 (2.4)
FWER0.0%0.2% (0.3%)0.2% (0.3%)0.1% (0.2%)0.0% (0.0%)0.1% (0.2%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%2.1% (0.4%)1.1% (0.4%)0.9% (0.3%)0.1% (0.2%)1.1% (0.7%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)649.8 (62.6)
T-to-F0.0600.0 (0.0)334.9 (3.7)38.9 (2.1)2.4 (0.5)39.8 (2.5)
", + "bbox": [ + 171, + 118, + 826, + 397 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B DISCUSSION ON ERROR CONTROL", + "text_level": 1, + "bbox": [ + 171, + 417, + 488, + 431 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Bayes-OCP is a method for managing experiments—that is deciding what experiment to conduct and when—as opposed to a hypothesis testing strategy in and of itself. Implication of this in terms of error control is that the type 1 error of any individual experiment run by Bayes-OCP can always be controlled by choosing an appropriate experimental design, in particular, by specifying an appropriate success criterion $\\rho$ . This individual-level error control built into the design of each experiment is not compromised by Bayes-OCP; no aggregate data from multiple experiments is ever fed into the success criterion of one alone (see Section 2, experiment $\\psi^i$ is successful if $\\rho^i(\\bar{D}_t^i) = 1$ not if $\\rho^i(\\bar{\\mathcal{D}}_t^i) = 1$ ); and any assumptions made by Bayes-OCP regarding outcomes in Section 4, whether accurate or inaccurate, have no effect on the results produced by an external success criterion.", + "bbox": [ + 169, + 450, + 823, + 577 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "While Bayes-OCP does not compromise the individual error control of experiments, neither does it control their collective family-wise error rate (FWER)—that is the probability of at least one experiment among all that are conducted making a false discovery. Bayes-OCP views the problem of managing experiments purely as a utility maximization problem with no additional constraints. Within the scope of our discussion, the purpose of measuring FWER as a metric is to check empirically whether the individual error rates are inflated or not (note that FWER is a stricter notion of error than individual error rate). In practice, depending on how closely related the managed experiments are, controlling FWER might not necessarily be a concern. Let us highlight this: Any algorithm that manages experiments for long enough is bound to make at least one false discovery. Each year more than a thousand clinical trials are launched (that eventually post results) and more than half of these trials succeed (Takebe et al., 2018; Cli). If the type 1 error rate of all these trials were $\\% 5$ , we would expect at least 25 false discoveries in a year, which is more than one hence it would have put FWER of all real-world trials at almost $100\\%$ when measured in a year-by-year basis. Of course, this is not problematic since not all clinical trials are related to each other closely enough to be considered a family.", + "bbox": [ + 169, + 584, + 826, + 779 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1 EXPERIMENTS WITH FAMILY-WISE ERROR CONTROL", + "text_level": 1, + "bbox": [ + 171, + 799, + 578, + 811 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "When controlling FWER is of concern, Bayes-OCP can easily be adapted to satisfy this additional constraint by first limiting the number of total experiments that can be conducted—that is putting an upper bound on $n$ and then using well-established methods for family-wise error control such as Bonferroni correction or alpha spending functions (Demets and Lan, 1994) to adjust the success criteria of the viable experiments in $\\Psi$ . We run additional experiments to evaluate the performance of Bayes-OCP with Bonferroni correction. We consider the same setting that we have considered during our experiments in Section 6 except for one difference: We limit the number of experiments", + "bbox": [ + 169, + 825, + 823, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/920d7c76807b5cdca570c9bc50b51938c411071356ef48c68c84e373d3ee226f.jpg", + "table_caption": [ + "Table 6: Performance comparison when the ground-truth outcome distributions are not Gaussian." + ], + "table_footnote": [], + "table_body": "
Algorithms:Oracle RCTRCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
All Instances (100%)Utility266.5-38.4 (14.7)110.0 (9.5)150.8 (8.5)46.3 (3.8)178.2 (7.3)
FWER0.0%0.1% (0.1%)0.0% (0.1%)0.0% (0.1%)0.0% (0.0%)0.0% (0.1%)
Switches0.50.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success76.6%56.2% (1.5%)53.6% (1.5%)46.5% (1.8%)14.7% (1.1%)54.9% (1.7%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)607.3 (1.0)617.2 (1.8)
T-to-F32.5600.0 (0.0)563.5 (9.9)65.8 (3.8)4.3 (0.5)81.9 (7.1)
Green Instances (48.0%)Utility391.3388.0 (4.1)383.7 (3.1)343.3 (4.4)89.4 (6.7)348.7 (3.5)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)0.9 (0.0)0.1 (0.0)
Success99.1%98.8% (0.4%)97.3% (0.4%)87.6% (0.7%)26.6% (2.0%)89.3% (0.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)605.8 (1.2)602.2 (1.3)
T-to-F600.0600.0 (0.0)710.1 (33.9)54.8 (10.0)4.2 (1.5)77.2 (10.6)
Amber Instances (29.9%)Utility263.5-316.2 (18.2)-13.1 (3.6)-18.7 (8.8)14.1 (2.4)67.6 (6.4)
FWER0.0%0.2% (0.3%)0.1% (0.3%)0.1% (0.3%)0.0% (0.0%)0.1% (0.3%)
Switches1.00.0 (0.0)0.7 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success97.2%28.4% (1.8%)22.6% (1.1%)14.7% (1.5%)6.3% (0.8%)39.3% (2.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)617.5 (6.1)670.6 (6.1)
T-to-F600.0600.0 (0.0)765.5 (11.4)91.0 (6.0)4.3 (1.3)126.0 (16.6)
Red Instances (22.1%)Utility0.0-588.3 (4.6)-316.6 (12.2)-37.0 (4.5)-3.8 (1.1)-41.7 (3.0)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%1.2% (0.5%)1.0% (0.5%)0.5% (0.2%)0.2% (0.2%)1.6% (0.4%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)654.7 (21.8)
T-to-F0.0600.0 (0.0)341.9 (5.1)39.4 (4.2)4.2 (1.0)46.4 (3.9)
", + "bbox": [ + 171, + 118, + 826, + 441 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "that can be conducted by each algorithm as at most two, and we specify $\\alpha = F^{-1}(0.975)$ for algorithms that can potentially run more than one experiment—namely, adaptive enrichment and (Greedy) Bayes-OCP—while we still specify $\\alpha = F^{-1}(0.95)$ for algorithms that always run exactly one experiment—namely, RCT and futility stopping. These specifications ensure that FWER of all algorithms are bounded by $5\\%$ . Results are given in Table 5; Bayes-OCP still performs the best when explicit control of FWER is required.", + "bbox": [ + 169, + 459, + 826, + 544 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C EXPERIMENTS WITH MISSPECIFIED OUTCOME DISTRIBUTIONS", + "text_level": 1, + "bbox": [ + 169, + 560, + 723, + 574 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We consider the same setting that we have considered during our experiments in Section 6. Except now, the ground-truth outcome distributions are such that, when $y \\sim \\Omega_x$ , $y = 1$ with probability $(\\theta_x + 1) / 2$ and $y = -1$ otherwise. In order to ensure that $\\theta_x \\in [-1,1]$ , we also sample ground-truth mean outcomes so that $\\theta_x = 2p - 1$ where $p$ is distributed according to Beta distribution with $\\alpha = 979 / 200$ and $\\beta = 801 / 200$ (note that the mean and variance of $\\theta_x$ remains the same as in our original experiments). Despite the fact that outcomes are now distributed in a non-Gaussian way, we leave the implementation of Bayes-OCP unchanged, which still assumes that outcomes distributions are Gaussian. So, there is now a mismatch between the structure of outcome distributions specified as part of Bayes-OCP and the ground-truth outcome distributions. Results are given in Table 6; Bayes-OCP still does not inflate FWER despite the misspecified outcome distributions.", + "bbox": [ + 169, + 585, + 826, + 727 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D EXPERIMENTS WITH MORE ATOMIC-POPULATIONS", + "text_level": 1, + "bbox": [ + 171, + 739, + 630, + 753 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We repeat our main experiments with more than two atomic-populations, specifically we set $|\\mathcal{X}| = 10$ . As before, all atomic-populations have equal propensities such that $\\eta_x = 1/10, \\forall x \\in \\mathcal{X}$ , and the meta-experimenter has the same positively-biased prior for the mean outcome associated with each atomic population: $\\theta_x \\sim \\mathcal{N}(0.1, 0.1)$ , $\\forall x \\in \\mathcal{X}$ . We randomly generated 100 environment (repeated five times to obtain error bars), and the results are given in Table 7. We observe that Bayes-OCP still performs the best. These results confirm that a greedy approximation is suitable in identifying candidate experiments when the number of atomic-populations is large.", + "bbox": [ + 169, + 761, + 826, + 861 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E SENSITIVITY ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 872, + 406, + 887 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Bayes-OCP has one hyper-parameter: $\\beta$ , which controls how optimistic the switching rule given in line 14 of Algorithm 1 is, from $\\beta = 1/2$ meaning decisions are made greedily to $\\beta = 1$ meaning", + "bbox": [ + 169, + 895, + 825, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/291c773a9e31f3c90ad9a5c1e98dbe7735c9c933a1ee01bc4307661efa168445.jpg", + "table_caption": [ + "Table 7: Performance comparison when the number of atomic-populations is 10." + ], + "table_footnote": [], + "table_body": "
Algorithms:RCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
All Instances (100%)Utility8.0 (39.2)143.9 (31.1)141.0 (27.5)40.3 (5.9)172.4 (23.8)
FWER0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success60.8% (3.9%)71.0% (3.6%)51.2% (4.7%)15.6% (2.6%)63.2% (4.2%)
T-to-S600.0 (0.0)678.9 (8.3)600.0 (0.0)648.5 (13.2)647.5 (4.4)
T-to-F600.0 (0.0)672.7 (58.8)130.4 (12.5)9.3 (5.0)200.5 (72.2)
", + "bbox": [ + 173, + 118, + 823, + 232 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/5cac31b589d8fd3ee95c3eda7fe1f144de0ed46052f72b20e7d32868261b8af8.jpg", + "image_caption": [ + "Figure 5: Utility achieved by Bayes-OCP for various values of hyper-parameter $\\beta$ ." + ], + "image_footnote": [], + "bbox": [ + 284, + 257, + 709, + 426 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "decisions are so extremely optimistic that the original experiment will never be abandoned (as there will always be a chance that it succeeds). As with all online algorithms, tuning $\\beta$ is challenging since no a priori data would be available to perform cross validation. However, a nice feature of Bayes-OCP is that $\\beta$ is rather interpretable, it is the evidence required against the ongoing experiment: An alternative experiment is preferred over the ongoing experiment only if it is believed to be the better experiment with at least $\\beta$ -confidence. We evaluate the sensitivity of Bayes-OCP's performance to hyper-parameter $\\beta$ in Figure 5; Bayes-OCP performs better than an RCT for all configurations and better than adaptive enrichment for most configurations.", + "bbox": [ + 169, + 476, + 823, + 588 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "F FUTURE WORK", + "text_level": 1, + "bbox": [ + 171, + 611, + 333, + 626 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Extending the scope of Bayes-OCP One limitation of Bayes-OCP is that it only adapts the target population $X\\subseteq \\mathcal{X}$ of experiments but not the sample horizon $\\tau$ or the success criterion $\\rho$ . We have chosen to focus on the selection of a target population since we believe the target population of an experiment to be the most critical design dimension to adjust adaptively. As we have already highlighted in our introduction, experiments with inflexible target populations can be problematic when responses to the treatment of interest are highly heterogeneous.", + "bbox": [ + 169, + 645, + 823, + 729 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "That being said, the high-level strategy of our proposed algorithm should still be applicable to adapting design dimensions other than the target population, namely $\\tau$ and $\\rho$ . At a high level, Bayes-OCP first identifies a candidate experiment and then compares the identified experiment to the ongoing experiment in a n optimistic manner. Regardless of the given set of viable experiment design $\\Psi$ , one could still follow the same strategy; the only complication would be to adapt how candidate experiments are identified depending on what design dimension varies across experiment designs in $\\Psi$ .", + "bbox": [ + 169, + 734, + 826, + 820 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For instance, when experiment designs varied in terms of $X$ , a combinatorial search was required to identify good candidate experiments, for which we proposed a greedy strategy. When experiment designs vary in terms of $\\rho$ , a simple search over all possible $\\rho$ would suffice for identifying candidate experiment. The case where experiment designs vary in terms of $\\tau$ is more complex; optimal $\\tau$ for an experiment would be dependent on unknown effects $\\theta_{x}$ ; selecting a good candidate experiment would involve estimating the optimal $\\tau$ given posteriors over $\\theta_{x}$ . This would be an interesting problem to explore as a future research direction.", + "bbox": [ + 169, + 825, + 823, + 924 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Performance guarantees While our theoretical results motivate the general use of an optimistic decision rule, they do not provide any guarantees about the performance of the specific rule we propose as part of Bayes-OCP. Another future research direction would be to prove an upper bound on the sub-optimality gap of Bayes-OCP.", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "G FURTHER DISCUSSION ON MAIN RESULTS", + "text_level": 1, + "bbox": [ + 171, + 179, + 555, + 195 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 3 report six metrics: Utility, FWER, Switches, Success, T-to-S, and T-to-F. We have already discussed the implications of Utility and FWER in Section 6. Here, we highlight other interesting phenomena regarding the remaining metrics. First, we see that Greedy Bayes-OCP switches experiments much more frequently compared with Bayes-OCP. This is because Greedy Bayes-OCP requires less evidence against the ongoing experiment when comparing it against an alternative experiment, whereas, Bayes-OCP favors the ongoing experiments more. Second, we see that a higher success probability does not necessarily also imply a higher utility. For instance, compare RCT with futility stopping, futility stopping is able to achieve higher utility than RCT by terminating risky experiments early and saving costs. However, this of course also means that futility stopping sees fewer experiments to completion hence leads to a lower success probability. Finally, we see that succeeding or failing early does not necessarily imply a higher utility either. Our best algorithm Bayes-OCP succeeds the latest on average as well as fails the latest compared with other benchmarks favoring red instances. This highlights the importance being conservative when making decisions, being optimistic, and favoring the status quo more than a potential adaptation.", + "bbox": [ + 169, + 208, + 826, + 402 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "H FURTHER DISCUSSION ON RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 422, + 565, + 436 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Multi-armed bandits The optimal commitment problem is similar to a multi-armed bandit (MAB) problem (Auer et al., 2002; Bubeck et al., 2012) in some aspects: Like arms in a MAB problem, each experiment design $\\psi$ has a random utility given by $R_{\\psi} \\cdot \\rho(\\mathcal{D}_{\\tau}) - \\tau C_{\\psi}$ , where $\\mathcal{D}_{\\tau}$ is the source of randomness, and the distribution of this utility is unknown. Also similar to a MAB problem, the overall goal is to sequentially select experiment designs (cf. arms) that yield the maximum cumulative utility. The main difference between the two problems is that, in a MAB problem, selecting an arm immediately reveals a sample from its random utility, while in optimal commitment, running an experiment $\\psi$ just for one time step only incurs a cost of $C_{\\psi}$ ; observing a full sample of its random utility requires the experiment to be run until its completion for $\\tau$ consecutive time steps, without selecting any other experiment design in the meantime.", + "bbox": [ + 169, + 450, + 826, + 590 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "One can naively apply a MAB algorithm by viewing each viable experiment design as a unique arm, and by running experiments/arms selected by the algorithm until their completion to observe full samples from their unknown utility distributions. However, this obviously side steps the main question we want to answer in optimal commitment: When can we abandon a commitment—in this case, the decision to run an experiment/arm selection until its completion—before fully observing its outcome? Looking at optimal commitment from a MAB perspective reveals that there are two explore-exploit dilemmas present in optimal commitment: One is with respect to which experiment to select next, and the other is with respect to when to preemptively stop the current experiment (i.e. breaking a commitment). MAB algorithms address the former dilemma but not the latter.", + "bbox": [ + 169, + 597, + 826, + 723 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Task replication in parallel computing There is work (Ghare and Leutenegger, 2005; Wand et al., 2014; Wang et al., 2019) that focuses on the problem of when to kill existing tasks and relaunch them in parallel computing, which is related to optimal stopping/switching. However there, the focus is on reasoning about when a stochastic event (i.e. successful completion of a computational task) will occur without any extra information other than the fact that the event of interest has not occurred yet. In contrast, in our setting, the decision-maker needs to process a streaming set of samples to reason about the random outcome of an event that is scheduled to happen at a deterministic time point (here, the event is an experiment reaching its conclusion). This means that our problem has a completely different information structure when compared with the problem of task replication. More formally, we observe samples $y_{t}$ that are informative of whether $\\rho(\\mathcal{D}_{\\tau}) = 1$ when $\\tau$ is a fixed variable. In contrast, the problem of task replication would correspond to the setting where $\\tau$ is a random variable with a known distribution and $\\rho = 1$ always holds (hence no need to observe any samples $y_{t}$ ). Among optimal stopping/switching problems, the structure of our problem is more closely related to sequential hypothesis testing, which we have already covered in Section 5.", + "bbox": [ + 169, + 729, + 828, + 924 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "I PROOFS OF PROPOSITIONS", + "text_level": 1, + "bbox": [ + 171, + 102, + 421, + 118 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "I.1 PROOF OF PROPOSITION 1", + "text_level": 1, + "bbox": [ + 171, + 133, + 395, + 148 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We start by relating the optimal value function $V^{*}$ to the optimal Q-function $Q^{*}$ . Letting $T_{t}^{*} = T_{t}^{\\pi^{*}}$ ,", + "bbox": [ + 169, + 159, + 826, + 176 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} V ^ {*} (t, \\mu) \\\\ = \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t) | \\mu_ {t} = \\mu ] \\\\ = \\mathbb {E} \\left[ \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\varnothing \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t\\right)\\right) \\right. \\\\ + \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\Psi_ {0} \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t)\\right) | \\mu_ {t} = \\mu \\right] \\\\ = \\mathbb {E} [ \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\varnothing \\right\\} \\cdot 0 \\\\ + \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\Psi_ {0} \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t)\\right) | \\mu_ {t} = \\mu \\right] (11) \\\\ = \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu) = \\Psi_ {0} \\right\\} \\cdot \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t\\right) | \\mu_ {t} = \\mu ] (12) \\\\ = \\mathbb {1} \\left\\{Q ^ {*} (t, \\mu) > 0 \\right\\} \\cdot Q ^ {*} (t, \\mu) (13) \\\\ = \\max \\left\\{0, Q ^ {*} (t, \\mu) \\right\\}, (14) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 183, + 823, + 342 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where (11) holds since $\\pi^{*}(t,\\mu_{t}) = \\varnothing \\Rightarrow T_{t}^{*} = t$ and $\\pi^{*}(t,\\mu_{t}) = \\Psi_{0} \\Rightarrow T_{t}^{*} \\geq t + 1 \\Rightarrow T_{t}^{*} = \\min \\{t^{\\prime} \\geq t : \\pi^{*}(t^{\\prime},\\mu_{t^{\\prime}}) = \\emptyset\\} = \\min \\{t^{\\prime} \\geq t + 1 : \\pi^{*}(t^{\\prime},\\mu_{t^{\\prime}}) = \\emptyset\\} = T_{t + 1}^{*}$ , (12) holds since $\\mu_{\\tau} \\perp \\mathbb{1}\\{\\pi^{*}(t,\\mu_{t}) = \\Psi_{0}\\}$ and $T_{t + 1}^{*} \\perp \\mathbb{1}\\{\\pi^{*}(t,\\mu_{t}) = \\Psi_{0}\\}$ when conditioned on $\\mu_{t} = \\mu$ , and (13) holds since $\\pi^{*}(t,\\mu) = \\Psi_{0} \\iff Q^{*}(t,\\mu) > 0$ . Intuitively, the maximum possible value at a given time is achieved either by stopping immediately or by conducting the experiment for at least one more time step and then following the optimal policy thereafter.", + "bbox": [ + 169, + 349, + 826, + 434 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Next, we observe that", + "bbox": [ + 171, + 440, + 318, + 454 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {P} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\mid \\mu_ {t} = \\mu \\right\\} = \\int \\mathbb {P} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\mid \\theta , \\mu_ {t} = \\mu \\right\\} \\mathrm {d} \\mathbb {P} \\left\\{\\theta \\mid \\mu_ {t} = \\mu \\right\\} \\\\ = \\int F \\left(\\mu^ {\\prime} - \\frac {\\theta + t \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (\\theta - \\mu ; ^ {1} / t) d \\theta \\\\ = \\iint \\mathbb {1} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(\\mu_ {t + 1} - \\frac {\\theta + t \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (\\theta - \\mu ; ^ {1} / t) d \\mu_ {t + 1} d \\theta \\\\ = \\iint \\mathbb {1} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(\\mu_ {t + 1} - \\frac {y + (t + 1) \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (y; ^ {1} / t) d \\mu_ {t + 1} d y \\\\ = \\iint \\mathbb {1} \\left\\{x + \\frac {y + (t + 1) \\mu}{t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(x; ^ {1} / (t + 1) ^ {2}\\right) f \\left(y; ^ {1} / t\\right) d x d y \\\\ = \\mathbb{P}_{\\substack{X\\sim \\mathcal{N}(0,1 / (t + 1)^{2})\\\\ Y\\sim \\mathcal{N}(0,1 / t)}}\\Bigg\\{X + \\frac{Y}{t + 1}\\leq \\mu^{\\prime} - \\mu \\Bigg\\} \\\\ = \\mathbb {P} _ {X + Y / (t + 1) \\sim \\mathcal {N} (0, 1 / t - 1 / t + 1)} \\left\\{X + \\frac {Y}{t + 1} \\leq \\mu^ {\\prime} - \\mu \\right\\} \\\\ = F \\left(\\mu^ {\\prime} - \\mu ; ^ {1} / t - ^ {1} / t + 1\\right), \\tag {15} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 463, + 823, + 713 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $f(x; \\sigma^2) = (1 / \\sqrt{2\\pi\\sigma^2})e^{-(1/2)x^2 / \\sigma^2}$ and $F(x; \\sigma^2) = (1 / \\sqrt{2\\pi\\sigma^2})\\int_{-\\infty}^{x}e^{-(1/2)x'^2 / \\sigma^2}dx'$ are the p.d.f. and the c.d.f. of the Gaussian distribution with mean zero and variance $\\sigma^2$ respectively. Hence $\\mathrm{d}\\mathbb{P}\\{\\mu_{t+1} = \\mu'| \\mu_t = \\mu\\} = f(\\mu' - \\mu; 1/t - 1/t+1)d\\mu'$ .", + "bbox": [ + 169, + 720, + 826, + 771 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Then, using the relationship between $V^{*}$ and $Q^{*}$ and the observation regarding $\\mathbb{P}\\{\\mu_{t + 1} \\leq \\mu' | \\mu_t = \\mu\\}$ , we drive the following Bellman optimality condition:", + "bbox": [ + 169, + 776, + 826, + 805 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t + 1} ^ {*} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t + 1} ^ {*}, \\tau \\} - t) | \\mu_ {t} = \\mu ] \\\\ = - C + \\mathbb {E} \\left[ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t - 1\\right) \\mid \\mu_ {t} = \\mu \\right] \\\\ = - C + \\int \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t + 1} ^ {*} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t + 1} ^ {*}, \\tau \\} - t - 1) | \\mu_ {t + 1} = \\mu^ {\\prime} ] \\\\ \\times \\mathrm {d} \\mathbb {P} \\left(\\mu_ {t + 1} = \\mu^ {\\prime} \\mid \\mu_ {t} = \\mu\\right) \\\\ = - C + \\int V ^ {*} (t + 1, \\mu^ {\\prime}) \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {*} (t + 1, \\mu^ {\\prime}) f (\\mu^ {\\prime} - \\mu ; 1 / t - 1 / t + 1) d \\mu^ {\\prime} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 813, + 807, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n= - C + \\int V ^ {*} (t + 1, \\mu + z) f \\left(z; ^ {1} / t - ^ {1} / t + 1\\right) d z \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 102, + 823, + 119 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n= - C + \\int \\max \\left\\{0, Q ^ {*} (t + 1, \\mu + z) \\right\\} f \\left(z; \\frac {1}{t} - \\frac {1}{t + 1}\\right) d z. \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 248, + 122, + 823, + 140 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For the problem setting where $C = 1$ , $R = 2$ , $\\alpha = 0$ , and $\\tau = 2$ , we have", + "bbox": [ + 169, + 154, + 656, + 169 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} V ^ {*} (1, \\mu) = \\max \\{0, - 1 + \\int V ^ {*} (2, \\mu + z) f (z; 1 / 2) d z \\} \\\\ = \\max \\{0, - 1 + 2 \\int \\mathbb {I} \\{\\mu + z > 0 \\} f (z; 1 / 2) d z \\} \\\\ = \\max \\left\\{0, - 1 + 2 \\int_ {- \\mu} ^ {\\infty} f (z; 1 / 2) d z \\right\\} \\\\ = \\max \\left\\{0, - 1 + 2 F (\\mu ; ^ {1} / _ {2}) \\right\\} \\\\ = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} \\mu < 0 \\\\ - 1 + 2 F (\\mu ; 1 / 2) & \\text {i f} \\mu \\geq 0 . \\end{array} \\right. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 176, + 683, + 305 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Notice that, for $\\mu > 0$ ,", + "bbox": [ + 171, + 311, + 323, + 327 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {d ^ {2}}{d \\mu^ {2}} V ^ {*} (1, \\mu) = \\frac {d ^ {2}}{d \\mu^ {2}} \\Big (- 1 + 2 F (\\mu ; 1 / 2) \\Big) \\\\ = \\frac {d}{d \\mu} \\left(2 f (\\mu ; ^ {1} / 2)\\right) \\\\ = - (4 / \\pi) \\mu e ^ {- \\mu^ {2}} < 0 \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 334, + 635, + 422 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "hence $V^{*}(1,\\mu)$ is concave at least on interval $\\mu \\in (0,\\infty)$ and is not a convex function. Moreover, $-V^{*}(1,\\mu)$ cannot be a convex function—or equivalently $V^{*}(1,\\mu)$ cannot be a purely concave function—either: For an arbitrary $\\mu \\in (0,\\infty), V^{*}(1,\\mu) > 0$ and $V^{*}(1,-\\mu) = 0$ hence $(1/2)V^{*}(1,\\mu) + (1/2)V^{*}(1,-\\mu) > 0$ but $V^{*}(1,(1/2)\\mu + (1/2)(-\\mu)) = V^{*}(1,0) = 0$ .", + "bbox": [ + 169, + 431, + 826, + 489 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "I.2 PROOF OF PROPOSITION 2", + "text_level": 1, + "bbox": [ + 171, + 505, + 395, + 518 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We will prove the proposition by showing that", + "bbox": [ + 169, + 531, + 478, + 546 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(i) $Q^{*}(t,\\mu)$ is non-decreasing in $\\mu$ —that is $\\mu < \\mu^{\\prime}\\Rightarrow Q^{*}(t,\\mu)\\leq Q^{*}(t,\\mu^{\\prime})$", + "(ii) $\\lim_{t\\to \\infty}Q^{*}(t,\\mu) = -(\\tau -t)C + R > 0$ , and", + "(iii) $\\lim_{t\\to -\\infty}Q^{*}(t,\\mu) = -C < 0$" + ], + "bbox": [ + 200, + 551, + 732, + 595 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "for all $t \\in \\{1, \\dots, \\tau - 1\\}$ via mathematical induction. Notice that these three facts—together with the fact that $Q^{*}(t, \\mu)$ is a continuous function in $\\mu$ for $t \\in \\{1, \\dots, \\tau - 1\\}$ —would imply the existence of a unique $\\mu_{t}^{*}$ such that $Q^{*}(t, \\mu_{t}^{*}) = 0$ , $Q^{*}(t, \\mu) > 0 \\iff \\mu > \\mu_{t}^{*}$ , and $Q^{*}(t, \\mu) \\leq 0 \\iff \\mu \\leq \\mu_{t}^{*}$ , which in turn would imply that", + "bbox": [ + 169, + 601, + 826, + 657 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\pi^ {*} (t, \\mu) = \\left\\{ \\begin{array}{l l} \\Psi_ {0} & \\text {i f} \\mu > \\mu_ {t} ^ {*} \\iff Q ^ {*} (t, \\mu) > 0 \\\\ \\varnothing & \\text {i f} \\mu \\leq \\mu_ {t} ^ {*} \\iff Q ^ {*} (t, \\mu) \\leq 0 , \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 666, + 665, + 700 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "meaning the optimal policy $\\pi^{*}$ is indeed of \"thresholding -type\" as the proposition states.", + "bbox": [ + 169, + 708, + 758, + 723 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "First, we observe the following base cases for $t = \\tau - 1$ :", + "bbox": [ + 169, + 729, + 547, + 743 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "(i) $Q^{*}(\\tau -1,\\mu)$ is non-decreasing in $\\mu$ . When $\\mu < \\mu^{\\prime}$", + "bbox": [ + 205, + 750, + 575, + 766 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (\\tau - 1, \\mu) = - C + \\int V ^ {*} (\\tau , \\mu + z) f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z (18) \\\\ = - C + R \\int \\mathbb {1} \\left\\{\\mu + z > \\alpha / \\sqrt {\\tau} \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ \\leq - C + R \\int \\mathbb {1} \\left\\{\\mu^ {\\prime} + z > \\alpha / \\sqrt {\\tau} \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z (19) \\\\ = Q ^ {*} (\\tau - 1, \\mu^ {\\prime}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 299, + 772, + 823, + 849 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "where (18) is due to (16), and (19) holds since $\\mu + z > \\alpha / \\sqrt{\\tau} \\Rightarrow \\mu' + z > \\mu + z > \\alpha / \\sqrt{\\tau}$ .", + "bbox": [ + 228, + 857, + 826, + 875 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "(ii) $\\lim_{\\mu \\to \\infty}Q^{*}(\\tau -1,\\mu) = -C + R > 0$ since", + "bbox": [ + 202, + 878, + 531, + 896 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {\\mu \\rightarrow \\infty} Q ^ {*} (\\tau - 1, \\mu) = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + R \\int \\mathbb {1} \\{\\mu + z > \\alpha / \\sqrt {\\tau} \\} f (z; ^ {1 / (\\tau - 1)} - ^ {1 / \\tau}) d z\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 902, + 799, + 926 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + R \\int_ {\\alpha / \\sqrt {\\tau} - \\mu} ^ {\\infty} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = - C + R \\int f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z \\\\ = - C + R. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 101, + 733, + 169 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "(iii) $\\lim_{\\mu \\to -\\infty}Q^{*}(\\tau -1,\\mu) = -C < 0$ since", + "bbox": [ + 197, + 175, + 509, + 191 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\lim _ {\\mu \\rightarrow - \\infty} Q ^ {*} (\\tau - 1, \\mu) = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\int \\mathbb {1} \\{\\mu + z > \\alpha / \\sqrt {\\tau} \\} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\int_ {\\alpha / \\sqrt {\\tau} - \\mu} ^ {\\infty} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\left(1 - \\int_ {- \\infty} ^ {\\alpha / \\sqrt {\\tau} - \\mu} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z\\right)\\right) \\\\ = - C + R \\left(1 - \\int f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = - C. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 198, + 805, + 333 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Then, we show that the following inductive cases hold for $t \\in \\{\\tau - 1, \\dots, 2\\}$ :", + "bbox": [ + 169, + 340, + 684, + 356 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "(i) Given that $Q^{*}(t,\\mu)$ is non-decreasing in $\\mu$ , $Q^{*}(t - 1,\\mu)$ is also non-decreasing in $\\mu$ . Similar to the base case, when $\\mu < \\mu'$ ,", + "bbox": [ + 205, + 361, + 823, + 391 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f \\left(z, ^ {1 / (t - 1)} - ^ {1 / t}\\right) d z \\tag {20} \\\\ \\leq - C + \\int \\max \\left\\{0, Q ^ {*} (t, \\mu^ {\\prime} + z) \\right\\} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z \\\\ = Q ^ {*} (t - 1, \\mu^ {\\prime}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 396, + 821, + 450 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where (20) is due to (17).", + "bbox": [ + 228, + 455, + 400, + 470 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "(ii) Given $\\lim_{\\mu \\to \\infty}Q^{*}(t,\\mu) = -(\\tau -t)C + R$ and also given that $Q^{*}(t,\\mu)$ is non-decreasing in $\\mu$ we have $\\lim_{\\mu \\to \\infty}Q^{*}(t - 1,\\mu) = -(\\tau -t + 1)C + R > 0$ which can be shown using the sandwich theorem:", + "bbox": [ + 202, + 476, + 823, + 518 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ \\leq - C + \\int \\max \\left\\{0, \\lim _ {\\mu^ {\\prime} \\rightarrow \\infty} Q ^ {*} \\left(t, \\mu^ {\\prime}\\right)\\right\\} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z \\\\ \\leq - C + (- (\\tau - t) C + R) \\int f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = - (\\tau - t - 1) C + R. \\tag {21} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 526, + 821, + 595 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ \\geq - C + \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z \\\\ \\geq - C + \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} \\max \\left\\{0, Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2} \\right\\} f (z, ^ {1 / (t - 1)} - ^ {1 / t}) d z \\\\ \\geq - C + Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2}) \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} f \\left(z, 1 / (t - 1) - 1 / t\\right) d z, \\tag {22} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 271, + 611, + 821, + 736 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Finally, observing that", + "bbox": [ + 228, + 742, + 382, + 756 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\lim _ {\\mu \\rightarrow \\infty} (2 2) = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2}) \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z\\right) \\\\ = - C + \\left(\\lim _ {\\mu^ {\\prime} \\rightarrow \\infty} Q ^ {*} (t, \\mu^ {\\prime})\\right) \\int f (z, 1 / (t - 1) - 1 / t) d z \\\\ = - (\\tau - t - 1) C + R, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 762, + 779, + 833 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "together with bounds (21) and (22), we obtain $\\lim_{\\mu \\to \\infty}Q^{*}(t - 1,\\mu) = -(\\tau -t + 1)C + R.$", + "bbox": [ + 228, + 839, + 825, + 854 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "(iii) Given $\\lim_{\\mu \\to -\\infty}Q^{*}(t,\\mu) = -C < 0$ and also given that $Q^{*}(t,\\mu)$ is non-decreasing in $\\mu$ and $\\lim_{\\mu \\to \\infty}Q^{*}(t,\\mu) > 0$ so that $\\mu_t^*$ exists—we have $\\lim_{\\mu \\to -\\infty}Q^{*}(t - 1,\\mu) = -C < 0$ , which again can be shown using the sandwich theorem:", + "bbox": [ + 197, + 861, + 825, + 902 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\nQ ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 907, + 751, + 925 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\geq - C. \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 103, + 823, + 119 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = \\int_ {\\mu_ {t} ^ {*} - \\mu} ^ {\\infty} Q ^ {*} (t, \\mu + z) f (z, 1 / (t - 1) - 1 / t) d z (24) \\\\ \\leq - C + R \\int_ {\\mu_ {t} ^ {*} - \\mu} ^ {\\infty} f (z, 1 / (t - 1) - 1 / t) d z (25) \\\\ \\leq - C + R \\left(1 - \\int_ {- \\infty} ^ {\\mu_ {t} ^ {*} - \\mu} f (z, 1 / (t - 1) - 1 / t) d z\\right), (26) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 132, + 825, + 261 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where (24) holds since $Q^{*}(t,\\mu +z) > 0$ if and only if $z > \\mu_t^* -\\mu$ and $\\max \\{0,Q^{*}(t,\\mu +z)\\} = 0$ otherwise, and (25) holds since $Q^{*}(t,\\mu)\\leq \\lim_{\\mu^{\\prime}\\to \\infty}Q^{*}(t,\\mu^{\\prime}) = -(\\tau -t)C + R\\leq R$ for all $\\mu$ as $Q^{*}(t,\\mu)$ is non-decreasing in $\\mu$ . Finally, observing", + "bbox": [ + 228, + 266, + 826, + 310 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\lim _ {\\mu \\rightarrow - \\infty} (2 6) = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\left(1 - \\int_ {- \\infty} ^ {\\mu_ {t} ^ {*} - \\mu} f \\left(z, \\frac {1}{(t - 1)} - \\frac {1}{t}\\right) d z\\right)\\right) \\\\ = - C, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 315, + 764, + 367 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "together with bounds (23) and (26), we obtain $\\lim_{\\mu \\to -\\infty}Q^{*}(t - 1,\\mu) = -C$", + "bbox": [ + 228, + 373, + 746, + 390 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "When put together, the base cases and the inductive cases above imply that conditions (i-iii) hold for all $t \\in \\{1, \\dots, \\tau - 1\\}$ hence $\\mu_t^*$ exists for all $t \\in \\{1, \\dots, \\tau - 1\\}$ which concludes our proof.", + "bbox": [ + 169, + 393, + 826, + 425 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "I.3 PROOF OF PROPOSITION 3", + "text_level": 1, + "bbox": [ + 171, + 440, + 395, + 454 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "First, we prove the existence of $\\mu_t^{\\mathrm{greedy}}$ for all $t\\in \\{0,\\dots ,\\tau -1\\}$ by driving an analytical formula for $V^{(0)}(t,\\mu)\\doteq V^{\\pi^{(0)}}(t,\\mu)$ . Letting $T_{t}^{(0)} = T_{t}^{\\pi^{(0)}}$ ,", + "bbox": [ + 169, + 465, + 826, + 500 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} V ^ {(0)} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t} ^ {(0)} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t} ^ {(0)}, \\tau \\} - t) | \\mu_ {t} = \\mu ] \\\\ = \\mathbb {E} [ R \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\tau - t) | \\mu_ {t} = \\mu ] (27) \\\\ = - C + \\int \\mathbb {E} [ R \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\tau - t - 1) | \\mu_ {t + 1} = \\mu^ {\\prime} ] \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu^ {\\prime}) \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu^ {\\prime}) f \\left(\\mu^ {\\prime} - \\mu ; ^ {1} / t - ^ {1} / (t + 1)\\right) d \\mu^ {\\prime} (28) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu + z) f \\left(z; \\frac {1}{t} - \\frac {1}{(t + 1)}\\right) d z, (29) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 503, + 823, + 623 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where (27) holds since $\\pi^{(0)}(t,\\mu) = \\Psi_0$ for all $t$ and $\\mu$ hence it is always the case that $T_{t}^{(0)} = \\infty$ , and (28) is due to (15).", + "bbox": [ + 169, + 631, + 826, + 662 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In the remainder of our proofs, we take $\\alpha = 0$ for notational brevity. This is without any loss of generality as, by simply shifting each value function and Q-function by $\\alpha / \\sqrt{\\tau}$ with respect to $\\mu$ , all of the following arguments would still hold. For $\\alpha = 0$ , we show that", + "bbox": [ + 169, + 667, + 826, + 710 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\nV ^ {(0)} (t, \\mu) = - (\\tau - t) C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / t - 1 / \\tau}}\\right) \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 715, + 825, + 752 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "for all $t \\in \\{1, \\dots, \\tau - 1\\}$ via mathematical induction. Note that (30) is true for $t = \\tau - 1$ :", + "bbox": [ + 169, + 756, + 767, + 772 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} V ^ {(0)} (\\tau - 1, \\mu) = - C + \\int V ^ {(0)} (\\tau , \\mu + z) f (z; 1 / _ {(\\tau - 1)} - 1 / _ {\\tau}) d z \\\\ = - C + R \\int \\mathbb {1} \\left\\{\\mu + z > 0 \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ = - C + R \\int_ {- \\mu} ^ {\\infty} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ = - C + R \\int_ {- \\infty} ^ {\\mu} f (z; ^ {1 / (\\tau - 1)} - ^ {1 / \\tau}) d z \\\\ = - C + R \\int_ {- \\infty} ^ {\\mu / \\sqrt {1 / (\\tau - 1) - 1 / \\tau}} f (z; 1) d z \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 777, + 710, + 928 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n= - C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / (\\tau - 1) - 1 / \\tau}}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 99, + 629, + 136 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "where $F(x) \\doteq F(x; 1)$ is the c.d.f. of the standard Gaussian distribution. Moreover, assuming (30) is true for $t$ , it is also true for $t - 1$ :", + "bbox": [ + 169, + 143, + 823, + 172 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} V ^ {(0)} (t - 1, \\mu) \\\\ = - C + \\int V ^ {(0)} (t, \\mu + z) f (z; ^ {1} / t - 1 - ^ {1} / t) d z \\\\ = - (\\tau - t + 1) C + R \\int F ((\\mu + z) / \\sqrt {1 / t - 1 / \\tau}; 1) f (z; 1 / (t - 1) - 1 / t) d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint_ {- \\infty} ^ {(\\mu + z) / \\sqrt {1 / t - 1 / \\tau}} f (z ^ {\\prime}; 1) f (z; 1 / (t - 1) - 1 / t) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint_ {- \\infty} ^ {\\mu + z} f \\left(z ^ {\\prime}; 1 / t - 1 / \\tau\\right) f \\left(z; 1 / (t - 1) - 1 / t\\right) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint \\mathbb {1} \\{z ^ {\\prime} \\leq \\mu + z \\} f \\left(z ^ {\\prime}; ^ {1} / t - ^ {1} / \\tau\\right) f \\left(z; ^ {1} / (t - 1) - ^ {1} / t\\right) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C + R \\cdot \\mathbb {P} _ {Z \\sim \\mathcal {N} (0, 1 / (t - 1) - 1 / t)} \\{Z ^ {\\prime} \\leq \\mu + Z \\} \\\\ = - (\\tau - t + 1) C + R \\cdot \\mathbb {P} _ {\\frac {Z ^ {\\prime} - Z}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\sim \\mathcal {N} (0, 1)} \\left\\{\\frac {Z ^ {\\prime} - Z}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\leq \\frac {\\mu}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\right\\} \\\\ = - (\\tau - t + 1) C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / (t - 1) - 1 / \\tau}}\\right). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 179, + 795, + 511 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Therefore, (30) indeed holds for all $t \\in \\{1, \\dots, \\tau - 1\\}$ .", + "bbox": [ + 169, + 518, + 537, + 534 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Next, we observe that $V^{(0)}(t,\\mu)$ has a root at $\\mu = F^{-1}((\\tau - t)C / R)\\sqrt{1 / t - 1 / \\tau}$ provided that $(\\tau - t)C / R \\in (0,1)$ , which is the case for all $t \\in \\{1, \\dots, \\tau - 1\\}$ since $\\tau C < R$ . Moreover, $V^{(0)}(t,\\mu)$ is a strictly increasing function in $\\mu$ . Hence, there exists a unique $\\mu_t^{\\mathrm{greedy}}$ for all $t \\in \\{1, \\dots, \\tau - 1\\}$ such that $V^{(0)}(t,\\mu_t^{\\mathrm{greedy}}) > 0$ and $V^{(0)}(t,\\mu) > 0 \\iff \\mu > \\mu_t^{\\mathrm{greedy}}$ . In other words, $\\pi^{\\mathrm{greedy}}$ is also a thresholding-type policy as the proposition states.", + "bbox": [ + 169, + 541, + 823, + 622 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Finally, we have $V^{(0)}(t,\\mu_t^*) = Q^{(0)}(t,\\mu_t^*) \\leq Q^* (t,\\mu_t^*) = 0$ hence $\\mu_t^* \\leq \\mu_t^{\\mathrm{greedy}}$ . This is because, by definition, $Q^{*}(t,\\mu) \\geq Q^{\\pi}(t,\\mu)$ for all $t,\\mu$ for any given policy $\\pi$ , including $\\pi^{(0)}$ .", + "bbox": [ + 169, + 628, + 825, + 661 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "I.4 PROOF OF PROPOSITION 4", + "text_level": 1, + "bbox": [ + 171, + 676, + 393, + 691 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "As in the proof of Proposition 3, we take $\\alpha = 0$ for notational brevity. Once again, this is without any loss of generality as, by simply shifting each value function and Q-function by $\\alpha / \\sqrt{\\tau}$ with respect to $\\mu$ , all of the following arguments would still hold. Remember that the formula we derived for $V^{(0)}(t, \\mu)$ in (30) holds when $\\alpha = 0$ .", + "bbox": [ + 169, + 703, + 823, + 762 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We start by deriving two bounds on the optimal Q-function $Q^{*}(t,\\mu)$ : (i) a lower bound and (ii) an upper bound. For the lower bound, it is sufficient to observe that", + "bbox": [ + 169, + 767, + 823, + 796 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\nV ^ {(0)} (t, \\mu) = Q ^ {(0)} (t, \\mu) \\leq Q ^ {*} (t, \\mu),\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 804, + 620, + 823 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "which holds since, by definition, $Q^{*}(t,\\mu) \\geq Q^{\\pi}(t,\\mu)$ for all $t,\\mu$ for any given policy $\\pi$ .", + "bbox": [ + 169, + 829, + 750, + 845 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "For the upper bound, we use mathematical induction to show that $Q^{*}(t,\\mu) \\leq (\\tau - t - 1)C + V^{(0)}(t,\\mu)$ . First, for the base case of $\\tau - 1$ ,", + "bbox": [ + 169, + 852, + 826, + 881 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (\\tau - 1, \\mu) = - C + \\int V ^ {*} (\\tau , \\mu + z) f (z; ^ {1} / t - ^ {1} / t + 1) d z \\tag {31} \\\\ = - C + \\int \\mathbb {1} \\left\\{\\mu + z > \\alpha \\right\\} f \\left(z; 1 / t - 1 / t + 1\\right) d z \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 888, + 823, + 925 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} = - C + \\int V ^ {(0)} (\\tau , \\mu + z) f (z; 1 / t - 1 / t + 1) d z \\\\ = V ^ {(0)} (\\tau - 1, \\mu), \\tag {32} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 102, + 823, + 140 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where (31) is due to (16), and (32) is due to (29). Then, for the inductive case, assuming $Q^{*}(t,\\mu)\\leq (\\tau -t - 1)C + V^{(0)}(t,\\mu)$", + "bbox": [ + 169, + 145, + 823, + 176 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z (33) \\\\ \\leq \\int Q ^ {*} (t, \\mu + z) f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z (34) \\\\ \\leq (\\tau - t - 1) C + \\int V ^ {(0)} (t, \\mu + z) f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = (\\tau - t) C + V ^ {(0)} (t - 1, \\mu + z), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 181, + 823, + 258 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where (33) is due to (17), and (34) holds since $-C \\leq Q^{*}(t, \\mu)$ implies that $\\max \\{0, Q^{*}(t, \\mu)\\} \\leq \\max \\{C + Q^{*}(t, \\mu), Q^{*}(t, \\mu)\\} \\leq C + Q^{*}(t, \\mu)$ .", + "bbox": [ + 169, + 263, + 825, + 294 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Define $\\mu_t^+$ and $\\mu_t^-$ such that", + "bbox": [ + 171, + 300, + 359, + 316 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\nV ^ {(0)} (t, \\mu_ {t} ^ {+}) = 0 \\iff \\mu_ {t} ^ {+} = F ^ {- 1} \\left(\\left(\\tau - t\\right) \\frac {C}{R}\\right) \\sqrt {\\frac {1}{t} - \\frac {1}{\\tau}}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 321, + 743, + 357 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n(\\tau - t - 1) C + V ^ {(0)} (t, \\mu_ {t} ^ {-}) = 0 \\iff \\mu_ {t} ^ {-} = F ^ {- 1} \\left(\\frac {C}{R}\\right) \\sqrt {\\frac {1}{t} - \\frac {1}{\\tau}},\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 359, + 704, + 395 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "which we are able to write in closed form using the formula we derived for $V^{(0)}(t,\\mu)$ in (30) during the proof of Proposition 3.", + "bbox": [ + 169, + 401, + 823, + 431 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "By definition, $\\mu_t^{\\mathrm{greedy}} = \\mu_t^+$ . Moreover, (i) $V^{(0)}(t,\\mu_t^*)\\leq Q^* (t,\\mu_t^*) = 0 = V^{(0)}(t,\\mu_t^+)$ due to our lower bound, hence $\\mu_t^*\\leq \\mu_t^+$ (remember that $V^{(0)}(t,\\mu)$ was a strictly increasing function in $\\mu$ ), and (ii) $(\\tau -t - 1)C + V^{(0)}(t,\\mu_t^-) = 0 = Q^* (t,\\mu_t^*)\\leq (\\tau -t - 1)C + V^{(0)}(t,\\mu_t^*)$ due to our upper bound, hence $V^{(0)}(t,\\mu_t^-)\\leq V^{(0)}(t,\\mu_t^*)$ meaning $\\mu_t^- \\leq \\mu_t^*$ . Putting together these facts, and also the fact that $\\mu_t^*\\leq \\mu_t^{\\mathrm{greedy}}$ , we obtain $|\\mu_t^* -\\mu_t^{\\mathrm{greedy}}|\\leq \\mu_t^+ -\\mu_t^-$ as the proposition states.", + "bbox": [ + 169, + 436, + 826, + 518 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "J BENCHMARKING ALGORITHMS", + "text_level": 1, + "bbox": [ + 171, + 537, + 465, + 551 + ], + "page_idx": 24 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2 Adaptive Enrichment, Futility Stopping with Bayes-OCP, Greedy Bayes-OCP" + ], + "code_body": "1: Initialize $\\mu_{x}$ and $\\sigma_x^2$ for all $x\\in \\mathcal{X}$ \n2: $X\\gets \\mathcal{X},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset$ \n3: Start experiment $\\psi = (\\mathcal{X},\\tau ,\\rho)$ \n4: loop: \n5: $t\\gets t + 1$ \n6: Observe $x_{t},y_{t}$ \n7: $\\mathcal{D}_t\\gets \\mathcal{D}_{t - 1}\\cup \\{x_t,y_t\\}$ \n8: $1 / \\sigma_{x_t}^2\\gets 1 / \\sigma_{x_t}^2 +1$ \n9: $\\mu_{x_t}\\gets \\mu_{x_t} + (y_t - \\mu_{x_t})\\sigma_{x_t}^2$ \n10: $X^{\\prime}\\gets \\emptyset$ \n11: while $X\\setminus X^{\\prime}\\supset \\emptyset$ .. \n12: $x^{*}\\gets \\mathrm{argmax}_{x\\in X\\setminus X^{\\prime}}\\mathbb{E}_{\\theta_{x}\\sim \\mathcal{N}(\\mu_{x},\\sigma_{x}^{2})}[\\mathcal{G}^{(0)}(X^{\\prime}\\cup \\{x\\};\\{\\theta_{x}\\})]$ \n13: if $\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X^{\\prime}\\cup \\{x^{*}\\} ;\\{\\theta_{x}\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X^{\\prime};\\{\\theta_{x}\\})]$ .. \n14: $X^{\\prime}\\gets X^{\\prime}\\cup \\{x^{*}\\}$ \n15: else: \n16: break \n17: if Adaptive Enrichment and $t = \\tau /2$ and $\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X';\\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}(X,\\mathcal{D}_t;\\{\\theta_x\\})]$ .. \n18: $X\\gets X^{\\prime},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset$ \n19: Start a new experiment $\\psi = (X,\\tau ,\\rho)$ \n20: if Greedy Bayes-OCP and $\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X';\\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}(X,\\mathcal{D}_t;\\{\\theta_x\\})]$ .. \n21: $X\\gets X^{\\prime},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset$ \n22: Start a new experiment $\\psi = (X,\\tau ,\\rho)$ \n23: if Futility Stopping with Bayes-OCP and $\\mathbb{P}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(\\emptyset ;\\{\\theta_x\\}) > \\mathcal{G}(X,\\mathcal{D}_0;\\{\\theta_x\\})] > \\beta$ \n24: Stop all experimentation", + "bbox": [ + 173, + 594, + 823, + 920 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/cdb63ad44372d32987538f8d058b458fda1bd889b6c86c68e97ef4949d57ee2f.jpg", + "table_caption": [ + "K GLOSSARY OF TERMS AND NOTATION" + ], + "table_footnote": [], + "table_body": "
TermNotationDescription
Experiment-Conducted to confirm efficacy of an intervention, e.g. a new treatment in clinical trials, or a new recommendation policy in online advertisement
Subject-Individual participant of an experiment, e.g. patients in a clinical trial, or customers in online advertisement
PopulationX⊆XCollection of subjects that all share the same qualities, e.g. all female patients in a clinical trial, or all customers with the same preferences in online advertisement
Atomic-populationx∈XIndivisible populations
PropensitiesηxThe probability that a subject being from atomic-population x
ηxThe probability that a subject being from population X
ηx|XThe probability that a subject being from atomic-population x conditioned on the fact that they from population X
Outcome distributionΩxDistribution of outcomes that is indicative of the effect of the intervention of interest for atomic-population x
Mean outcomesθxExpected outcome, i.e. the effect of the intervention of interest, for atomic-population x
θXExpected outcome for population X
Experiment designψ=(X,τ,ρ)Target population X, sample horizon τ, and success criterion ρ that characterize an experiment
Viable experiment designsΨExperiment designs that can potentially be followed by a meta-experimenter
Meta-experimenter-The decision-making agent that decides when to run experiments according to which experiment design in Ψ
Sample/time horizonτAn experiment is terminated when t=τ
Success criterionρAn experiment is declared a success if ρ(Dτ)=1
Online datasetDtData collected by an ongoing experiment at time step t
DtData collected by the i-th experiment run by the meta-experimenter at time step t
Aggregate datasetDItCollective data collected by all experiments up to time step t of the i-th experiment
-TtNumber of time steps for which the i-th experiment is conducted until it was stopped or its time horizon was reached
CostCost incurred per time step by running experiment ψ
RewardReward received if experiment ψ is successful
UtilityGSum of costs and rewards received after all experimentation is concluded
PolicyπDecision-making policy of the meta-experiment
Optimal policyπ*The optimal policy that maximizes utility G in expectation
Greedy policyπgreedySee Section 3
Test statisticμtIn the simplified case in Section 3, the empirical mean outcome
Value functionVπ(t,μ)The expected utility of following policy π when μt=μ
Q-functionQπ(t,μ)The expected utility of following policy π after conducting the ongoing experiment for one more time step when μt=μ
-TtπThe first time step at or after time step t that policy π decides to stop all experimentation
Optimal value functionV*The value function associated with π*
Optimal Q-functionQ*The Q-function associated with π*
Thresholdsμt*Decision-making thresholds associated with π*
μtgreedyDecision-making thresholds associated with πgreedy
Conditional power functionP(X, Dt; {θx})The probability of a hypothesis test being successful condi- tioned on mean outcomes {θx}
Expected utility functionG(X, Dt; {θx})The expected utility of fully committing to an experiment and waiting until it terminates when the experiment targets popula-tion X, is currently at time step t, and collected dataset Dt
PosteriorsN(μx, σx2)Posterior distributions over mean outcomes {θx} maintained by Bayes-OCP such that θx|D ~ N(μx, σx2)
", + "bbox": [ + 169, + 137, + 836, + 920 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 948, + 509, + 960 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 171, + 101, + 834, + 234 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + } +] \ No newline at end of file diff --git a/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_model.json b/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ae07e7453975ae895e64699f35cb291e2bd76e13 --- /dev/null +++ b/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_model.json @@ -0,0 +1,4148 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.741, + 0.121 + ], + "angle": 0, + "content": "WHEN TO MAKE AND BREAK COMMITMENTS?" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.146, + 0.287, + 0.16 + ], + "angle": 0, + "content": "Alihan Hüyük" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.161, + 0.35, + 0.174 + ], + "angle": 0, + "content": "University of Cambridge" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.175, + 0.343, + 0.187 + ], + "angle": 0, + "content": "ah2075@cam.ac.uk" + }, + { + "type": "text", + "bbox": [ + 0.397, + 0.146, + 0.498, + 0.16 + ], + "angle": 0, + "content": "Zhaozhi Qian" + }, + { + "type": "text", + "bbox": [ + 0.397, + 0.161, + 0.563, + 0.174 + ], + "angle": 0, + "content": "University of Cambridge" + }, + { + "type": "text", + "bbox": [ + 0.397, + 0.175, + 0.547, + 0.188 + ], + "angle": 0, + "content": "zq224@cam.ac.uk" + }, + { + "type": "text", + "bbox": [ + 0.611, + 0.146, + 0.783, + 0.159 + ], + "angle": 0, + "content": "Mihaela van der Schaar" + }, + { + "type": "text", + "bbox": [ + 0.611, + 0.161, + 0.78, + 0.174 + ], + "angle": 0, + "content": "University of Cambridge" + }, + { + "type": "text", + "bbox": [ + 0.611, + 0.175, + 0.78, + 0.188 + ], + "angle": 0, + "content": "The Alan Turing Institute" + }, + { + "type": "text", + "bbox": [ + 0.611, + 0.189, + 0.761, + 0.201 + ], + "angle": 0, + "content": "mv472@cam.ac.uk" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.223, + 0.548, + 0.238 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.246, + 0.771, + 0.442 + ], + "angle": 0, + "content": "In many scenarios, decision-makers must commit to long-term actions until their resolution before receiving the payoff of said actions, and usually, staying committed to such actions incurs continual costs. For instance, in healthcare, a newly-discovered treatment cannot be marketed to patients until a clinical trial is conducted, which both requires time and is also costly. Of course in such scenarios, not all commitments eventually pay off. For instance, a clinical trial might end up failing to show efficacy. Given the time pressure created by the continual cost of keeping a commitment, we aim to answer: When should a decision-maker break a commitment that is likely to fail—either to make an alternative commitment or to make no further commitments at all? First, we formulate this question as a new type of optimal stopping/switching problem called the optimal commitment problem (OCP). Then, we theoretically analyze OCP, and based on the insight we gain, propose a practical algorithm for solving it. Finally, we empirically evaluate the performance of our algorithm in running clinical trials with subpopulation selection." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.456, + 0.339, + 0.471 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.48, + 0.828, + 0.633 + ], + "angle": 0, + "content": "In many real-world settings, decision-makers must commit to long-term actions and wait until their resolution before receiving the payoff of said actions. Meanwhile, staying committed to such actions incurs continual costs. For instance, in portfolio management, it might take time for an asset to develop additional value after an initial investment, and keeping capital tied up in an asset comes with an opportunity cost for the investor (Markowitz, 1959; Merton, 1969; Karatzas and Wang, 2020). In an energy network, turning power stations on and off is not an immediate action, hence a sudden increase in energy demand can only be met with a delay after putting more stations into operation, and keeping stations operational obviously consumes resources (Rafique and Jianhua, 2018; Olofsson et al., 2022). In healthcare, a newly-discovered treatment can only be marketed to patients once a successful clinical trial that targets the said treatment is conducted, which both requires time and is also costly (Kaitin, 2010; Umscheid et al., 2011)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.639, + 0.828, + 0.793 + ], + "angle": 0, + "content": "Of course, not all commitments eventually pay off: An asset might end up losing value despite investments, energy demands might shift faster than a network can react to, and a clinical trial might fail to show efficacy for the targeted treatment. Given the time pressure created by the continual cost of keeping a commitment, our goal in this paper is to answer the question: When should a decision-maker break a commitment—thereby avoiding future costs but also forfeiting any potential returns—either to make an alternative commitment instead or to make no further commitments at all? Solving this problem optimally requires a careful balance between exploration and exploitation: The earlier a commitment that is bound to fail is broken, the more resources would be saved (cf. exploitation); but the longer one is kept, the more information is revealed regarding whether the commitment is actually failing or might still succeed (cf. exploration)—and in certain cases, also regarding the prospects of similar commitments one could make instead." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.799, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Related problems are mostly studied within the context of adaptive experimentation and sequential hypothesis testing (see Section 5). As such, we focus on adaptive experimentation as our main application as well. More specifically, we consider the problem of selecting the target population of an adaptive experiment. Suppose an experimenter, who is interested in proving the efficacy of a new treatment, starts running an initial experiment that targets a certain population of patients. Incidentally, the treatment being tested is effective only for a relatively narrow subpopulation of patients but not for the wider population as a whole. Hence, an experiment targeting the overall population, but not the subpopulation specifically, will most probably fail to prove efficacy and prevent the deployment of the treatment for the patients who would have actually benefited from it, not to mention waste" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.203 + ], + "angle": 0, + "content": "time and resources (Moineddin et al., 2008; Lipkovich et al., 2017; Chiu et al., 2018). Of course, the experimenter has no knowledge of this in advance but the initial experiment they have set up would slowly reveal more information regarding the effects of the treatment and the fact that the ongoing experiment is bound to fail. In that case, we want to be able to determine at what point the experimenter has enough information to justify breaking their commitment to the initial experiment that targets too wide of a population to be successful, in favor of making a new commitment to a follow-up experiment that focuses on a narrower subpopulation instead?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.209, + 0.828, + 0.39 + ], + "angle": 0, + "content": "Contributions Our contributions are threefold: First, we formulate the problem of making and breaking commitments in a timely manner as a new type of optimal stopping/switching problem called the optimal commitment problem (OCP) (Section 2). The defining feature of OCP is that rewards are received only when a known time point is reached but costs are incurred continually, requiring commitment to actions but with incentive to abandon those commitments. As we will show later, OCP cannot be easily solved via conventional reinforcement learning techniques due to its non-convex nature. Second, we theoretically analyze a simplified case of OCP to identify the characteristics of the optimal solution (Section 3), and based on the insights we gain, propose a practical algorithm for the more general case (Section 4). Third, we empirically evaluate the performance of our algorithm in running experiments with subpopulation selection (Section 6). Before we move on, it should be emphasized that, although we predominantly consider adaptive experimentation as our main application, our contributions remain generally applicable to portfolio management, energy systems, and any other decision-making scenarios that require commitments to long-term actions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.406, + 0.496, + 0.421 + ], + "angle": 0, + "content": "2 OPTIMAL COMMITMENT PROBLEM" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.431, + 0.829, + 0.502 + ], + "angle": 0, + "content": "We first introduce the problem of optimal commitment from the perspective of running experiments. As far as our formulation is concerned, experiments are conducted to confirm the efficacy of an intervention by observing the outcome of the said intervention for subjects belonging to a particular population. However, this experiment-focused perspective does not limit the applicability of OCP; we stress its generality later at the end of the section. We provide a glossary of terms and notation in Appendix K." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.508, + 0.827, + 0.65 + ], + "angle": 0, + "content": "Populations Let \\(\\mathcal{X}\\) be a discrete set of atomic-populations such that every subject is only the member of exactly one atomic-population \\(x\\in \\mathcal{X}\\). Denote with \\(\\eta_{x}\\in [0,1]\\) the probability of a subject being from atomic-population \\(x\\) (such that \\(\\sum_{x\\in \\mathcal{X}}\\eta_x = 1\\)), and with \\(\\Omega_{x}\\) the distribution of outcomes for atomic-population \\(x\\) such that the mean outcome \\(\\theta_{x} = \\mathbb{E}_{y\\sim \\Omega_{x}}[y]\\) is the effect of some intervention for atomic-population \\(x\\). Now, wider populations can be constructed by combining various atomic-populations. Let any \\(X\\subseteq \\mathcal{X}\\) represent the population of subjects who belong to either one of the atomic-populations \\(\\{x\\in X\\}\\). Then, the probability of a subject being from population \\(X\\) can be written as \\(\\eta_{X} = \\sum_{x\\in X}\\eta_{x}\\), the probability of a subject being from atomic-population \\(x\\) conditioned on the fact that they are from population \\(X\\) can be written as \\(\\eta_{x|X} = \\eta_x / \\eta_X\\), and the average effect for population \\(X\\) can be written as \\(\\bar{\\theta}_X = \\sum_{x\\in X}\\eta_{x|X}\\theta_x\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.654, + 0.828, + 0.767 + ], + "angle": 0, + "content": "Experiments An experiment is largely characterized by the population it targets, its sample horizon, and its success criterion. During an experiment that targets population \\( X \\), at each time step \\( t \\in \\{1, 2, \\ldots\\} \\) that the experiment continues, first a subject from some atomic-population \\( x_{t} \\) within the targeted population \\( X \\) arrives with probability \\( \\eta_{x_{t}|X} \\), and then the outcome \\( y_{t} \\sim \\Omega_{x_{t}} \\) for that subject is observed. This process generates an online dataset \\( \\mathcal{D}_{t} = \\{x_{t'}, y_{t'}\\}_{t' = 1}^{t} \\). The experiment terminates when a pre-specified sample/time horizon \\( \\tau \\) is reached. Once terminated, the experiment is declared a success if \\( \\rho(\\mathcal{D}_{\\tau}) = 1 \\), where \\( \\rho: (\\mathcal{X} \\times \\mathbb{R})^{\\tau} \\to \\{0, 1\\} \\) is the success criterion, and declared a failure otherwise. Formally, the tuple \\( \\psi = (X, \\tau, \\rho) \\) constitutes an experiment design." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.772, + 0.829, + 0.926 + ], + "angle": 0, + "content": "Meta-experimenter Suppose a meta-experimenter is given a set of viable experiment designs \\(\\Psi\\) and is tasked with running at least one successful experiment. Each experiment \\(\\psi \\in \\Psi\\) has an associated cost \\(C_{\\psi} \\in \\mathbb{R}_{+}\\), which the experiment incurs per time step that it continues, and an associated reward \\(R_{\\psi} \\in \\mathbb{R}_{+}\\), which the experiment provides only if it eventually succeeds. The meta-experimenter aims to maximize utility—that is the difference between any eventual reward received and the total costs incurred by running experiments. They first pick an initial experiment \\(\\psi^{1} \\in \\Psi\\) and start conducting it, which generates an online dataset \\(\\mathcal{D}_t^1\\) as described earlier. Now at each time step \\(t\\), they need to decide whether they should stay committed to their initial decision and wait until \\(\\psi^{1}\\) terminates, or stop \\(\\psi^{1}\\) early in favor of starting a new experiment \\(\\psi^{2}\\). They might decide on the latter to avoid unnecessary costs if \\(\\mathcal{D}_t^1\\) already indicates \\(\\psi^{1}\\) is unlikely to succeed. If at some point a secondary experiment \\(\\psi^{2}\\) is started, now the meta-experiment has a similar decision to make" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.259 + ], + "angle": 0, + "content": "regarding whether to stop \\(\\psi^2\\) early in favor of starting a new experiment \\(\\psi^3 \\in \\Psi\\). This process continues until either an experiment finally succeeds or the meta-experimenter decides not to conduct any further experiments; let the random variable \\(n \\in \\{1, 2, \\ldots\\}\\) be such that \\(\\psi^n\\) is the last experiment. We denote with \\(\\psi^i = (X^i, \\tau^i, \\rho^i)\\) the \\(i\\)-th experiment conducted by the meta-experimenter, and with \\(T^i\\) the number of time steps for which the \\(i\\)-th experiment is conducted either until it was stopped by the meta-experimenter or the time horizon \\(\\tau^i\\) was reached. Denote with \\(\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i)\\) the decision-making policy of the meta-experimenter, where \\(t\\) is the current time step of the latest experiment \\(\\psi^i\\) and \\(\\bar{\\mathcal{D}}_t^i = (\\cup_{j=1}^{i-1} \\mathcal{D}_{T^j}^j) \\cup \\mathcal{D}_t^i\\) is an aggregate dataset. We write (i) \\(\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\psi^i\\) if the meta-experiment decides to keep conducting the current experiment \\(\\psi^i\\), (ii) \\(\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\psi' \\neq \\psi^i\\) if the meta-experimenter decides to stop experiment \\(\\psi^i\\) and start experiment \\(\\psi'\\) instead, and (iii) \\(\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\emptyset\\) if the meta-experimenter decides not to conduct any further experiments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.264, + 0.826, + 0.28 + ], + "angle": 0, + "content": "Objective Once all experimentation is concluded, the meta-experimenter achieves the total utility" + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.282, + 0.825, + 0.299 + ], + "angle": 0, + "content": "\\[\nG = R _ {\\psi^ {n}} \\cdot \\mathbb {1} \\left\\{T ^ {n} = \\tau^ {n} \\right\\} \\cdot \\rho^ {n} \\left(\\mathcal {D} _ {\\tau^ {n}} ^ {n}\\right) - \\sum_ {i = 1} ^ {n} C _ {\\psi^ {i}} \\cdot T ^ {i}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.302, + 0.827, + 0.387 + ], + "angle": 0, + "content": "Then, the optimal commitment problem is to find the optimal policy \\(\\pi^{*} = \\operatorname{argmax}_{\\pi} \\mathbb{E}_{\\pi}[G]\\) that maximizes the expected utility given \\(\\Psi\\), \\(\\{\\eta_x\\}\\). \\(\\{R_{\\psi}, C_{\\psi}\\}\\) without knowing mean outcomes \\(\\{\\theta_x\\}\\) or outcome distributions \\(\\{\\Omega_x\\}\\). It is called the optimal commitment problem because each experiment \\(\\psi = (X, \\tau, \\rho)\\) only provides a reward if the meta-experimenter commits to incurring its costs for at least \\(\\tau\\) time steps, and the meta-experimenter needs to decide which experiment in \\(\\Psi\\) is the better commitment—or if there is any experiment worth committing to at all—adaptively." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.393, + 0.553, + 0.49 + ], + "angle": 0, + "content": "General applicability of OCP Although we have described OCP from the perspective of (meta-)experiment design, it can potentially be useful in modeling many other problems as we have stressed during the introduction (see Table 1). For instance, in portfolio management, atomic-populations can be regarded as various assets one can invest in, then a population would correspond to a portfolio" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.49, + 0.828, + 0.574 + ], + "angle": 0, + "content": "of assets. Similar to experiments, when these portfolios require a time commitment (cf. \\(\\tau\\)) before they provide their payoff (cf. \\(R_{\\psi}\\)) and incur an opportunity cost (cf. \\(C_{\\psi}\\)) in the mean time, the decision-making problem of managing when and which portfolio to invest in constitutes an instance of the optimal commitment problem. Another good examples is energy management, where power stations and the networks they form are akin to atomic-populations and populations. Since power stations cannot be turned on and off immediately, putting one in operation requires a certain amount of commitment." + }, + { + "type": "table_caption", + "bbox": [ + 0.56, + 0.395, + 0.827, + 0.435 + ], + "angle": 0, + "content": "Table 1: Equivalent concepts across different domains. OCP can model scenarios other than adaptive experimentation." + }, + { + "type": "table", + "bbox": [ + 0.561, + 0.439, + 0.825, + 0.478 + ], + "angle": 0, + "content": "
DomainEquivalent Concepts
Adaptive experimentationAtomic-populationPopulation
Portfolio managementFinancial assetPortfolio of assets
Energy systemsPower stationNetwork of stations
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.586, + 0.667, + 0.601 + ], + "angle": 0, + "content": "3 WARM-UP: WHEN TO BREAK A SINGLE COMMITMENT?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.608, + 0.827, + 0.777 + ], + "angle": 0, + "content": "In this section, to gather insights, we commence by analyzing a simplified instance of OCP. Later, in Section 4, using these insights, we construct a practical algorithm for solving a more general case of OCP. As the simplified instance, we only consider one atomic-population such that \\(\\mathcal{X} = \\{\\mathcal{X}_0\\}\\) and one experiment design that targets this atomic-population such that \\(\\Psi = \\{\\Psi_0 = (\\mathcal{X}_0,\\tau ,\\rho)\\}\\). Moreover, we assume that the outcomes are distributed normally with unit variance such that \\(\\Omega \\doteq \\Omega_{\\mathcal{X}_0}\\doteq \\mathcal{N}(\\theta \\doteq \\theta_{\\mathcal{X}_0},1)\\) and the success criterion is a simple Z-test to see whether \\(\\theta >0\\) such that \\(\\rho (\\mathcal{D}_{\\tau})\\doteq \\rho (\\mu_{\\tau})\\doteq \\mathbb{1}\\{\\mu_{\\tau} > \\alpha /\\sqrt{\\tau}\\}\\), where \\(\\mu_t = \\sum_{(x_{t'},y_{t'})\\in \\mathcal{D}_t}y_{t'} / |\\mathcal{D}_t|\\) is the empirical mean outcome given dataset \\(\\mathcal{D}_t\\), and \\(\\alpha\\) determines the significance threshold for the test. Since there is just one viable experiment in this setting, the only decision that needs to be made at each time step is whether to keep conducting experiment \\(\\psi^1 = \\Psi_0\\) or to stop all experimentation. For this decision to be interesting, we will also assume that \\(C\\doteq C_{\\Psi_0} > 0\\) so that never stopping is not necessarily optimal—and \\(R\\doteq R_{\\Psi_0} > \\tau C\\) so that always stopping is not necessarily optimal either." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.782, + 0.827, + 0.811 + ], + "angle": 0, + "content": "Value and Q-functions Since \\( t \\) and \\( \\mu_t \\) are sufficient statistics to estimate the success probability of the experiment, it is also sufficient to only consider policies of the form \\( \\pi(t, \\mu) \\). For a given policy \\( \\pi \\)," + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.814, + 0.825, + 0.83 + ], + "angle": 0, + "content": "\\[\nV ^ {\\pi} (t, \\mu) = \\mathbb {E} \\left[ R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {\\pi} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {\\pi}, \\tau \\right\\} - t\\right) \\mid \\mu_ {t} = \\mu \\right] \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.832, + 0.825, + 0.849 + ], + "angle": 0, + "content": "\\[\nQ ^ {\\pi} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {\\pi} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {\\pi}, \\tau \\right\\} - t\\right) | \\mu_ {t} = \\mu ] \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.852, + 0.827, + 0.923 + ], + "angle": 0, + "content": "are the value function, and the Q-function of conducting the experiment for at least one more time step respectively, where \\( T_{t}^{\\pi} = \\min \\{t' \\geq t : \\pi(t', \\mu_{t'}) = \\emptyset\\} \\) is the first time step at or after time \\( t \\) that policy \\( \\pi \\) decides to stop; let \\( V^{*} = V^{\\pi^{*}} \\) and \\( Q^{*} = Q^{\\pi^{*}} \\) be the optimal value and Q-functions. Note that the Q-factor of stopping all experimentation is always equal to zero for all policies. Hence, the optimal policy must be such that \\( \\pi^{*}(t, \\mu) = \\Psi_{0} \\) if \\( Q^{*}(t, \\mu) > 0 \\) and \\( \\pi^{*}(t, \\mu) = \\emptyset \\) otherwise." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.554, + 0.161 + ], + "angle": 0, + "content": "Once we identify the value and Q-functions, a naive attempt at finding the optimal policy would be to compute \\( V^{*} \\) and \\( Q^{*} \\) via dynamic programming as they would satisfy the following Bellman optimality conditions:" + }, + { + "type": "equation", + "bbox": [ + 0.199, + 0.164, + 0.55, + 0.181 + ], + "angle": 0, + "content": "\\[\nQ ^ {*} (t, \\mu) = - C + \\mathbb {E} \\left[ V ^ {*} (t + 1, \\mu_ {t + 1}) \\mid \\mu_ {t} = \\mu \\right] \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.199, + 0.182, + 0.55, + 0.199 + ], + "angle": 0, + "content": "\\[\nV ^ {*} (t, \\mu) = \\max \\{0, Q ^ {*} (t, \\mu) \\} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.201, + 0.552, + 0.272 + ], + "angle": 0, + "content": "and \\( V^{*}(\\tau, \\mu) = R \\cdot \\rho(\\mu) \\). However, a major complication in applying dynamic programming methods to compute \\( V^{*} \\) and \\( Q^{*} \\) is that they are continuous functions in \\( \\mu \\). In the literature of partially-observable Markov decision processes (POMDPs), which OCP happens to be an instance" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.272, + 0.827, + 0.328 + ], + "angle": 0, + "content": "of (see Appendix A), the standard approach of addressing this complication would be to leverage the convexity of \\( V^{*} \\) and \\( Q^{*} \\), and approximate them with functions of the form \\( f(\\mu) = \\max_{i} a_{i}\\mu + b_{i} \\) (Spanan, 2012). However, this standard approach is not applicable in OCP because, in general, neither \\( V^{*}(t,\\mu) \\) nor \\( -V^{*}(t,\\mu) \\) is a convex function with respect to \\( \\mu \\) (see Figure 1):" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.334, + 0.825, + 0.379 + ], + "angle": 0, + "content": "Proposition 1 (Non-convexity). There exist a problem instance \\((C,R,\\tau,\\alpha)\\) and \\(t\\in \\{1,\\dots ,\\tau -1\\}\\) such that \\(\\exists \\mu ,\\mu^{\\prime}\\in \\mathbb{R},p\\in [0,1]:V^{*}(t,p\\mu +(1 - p)\\mu^{\\prime}) < pV^{*}(t,\\mu) + (1 - p)V^{*}(t,\\mu^{\\prime})\\) and \\(\\exists \\mu ,\\mu^{\\prime}\\in \\mathbb{R},p\\in [0,1]: - V^{*}(t,p\\mu +(1 - p)\\mu^{\\prime}) < - pV^{*}(t,\\mu) - (1 - p)V^{*}(t,\\mu^{\\prime}).^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.384, + 0.825, + 0.469 + ], + "angle": 0, + "content": "Properties of the optimal policy Although identifying \\(\\pi^{*}\\) exactly by computing \\(V^{*}\\) and \\(Q^{*}\\) is challenging, we can still identify some properties that \\(\\pi^{*}\\) should have, which can then help us design a heuristic policy that we expect to perform well, albeit not optimally. First of all, the optimal policy \\(\\pi^{*}\\) should be a \"thresholding-type\" policy—that is the meta-experimenter should keep conducting the experiment as long as \\(\\mu_{t}\\) stays above a time-dependent threshold \\(\\mu_{t}^{*}\\) and should stop all experimentation the moment \\(\\mu_{t}\\) drops below that threshold (see the top panel of Figure 2):" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.475, + 0.825, + 0.506 + ], + "angle": 0, + "content": "Proposition 2 (Thresholding). For all problem instances \\((C, R, \\tau, \\alpha)\\), there exists time-dependent thresholds \\(\\{\\mu_t^* \\in \\mathbb{R}\\}_{t=1}^{\\tau-1}\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.509, + 0.825, + 0.526 + ], + "angle": 0, + "content": "\\[\n\\pi^ {*} (t, \\mu) = \\left\\{\\Psi_ {0} \\quad i f \\mu > \\mu_ {t} ^ {*}; \\quad \\emptyset \\quad o t h e r w i s e \\right\\} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.53, + 0.553, + 0.615 + ], + "angle": 0, + "content": "Intuitively, a higher test statistic \\(\\mu_t\\) means that the experiment is only more likely to succeed, hence if it is optimal to continue conducting the experiment when \\(\\mu_t = \\mu\\), then it should also be optimal to continue when \\(\\mu_t = \\mu' > \\mu\\) (likewise, lower \\(\\mu_t\\) means success is even less likely hence \\(\\pi^*(t, \\mu) = \\emptyset\\) implies \\(\\pi^*(t, \\mu') = \\emptyset\\) for \\(\\mu' < \\mu\\))." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.62, + 0.552, + 0.815 + ], + "angle": 0, + "content": "Moreover, the optimal policy \\(\\pi^{*}\\) must be \"optimistic\" that the experiment will succeed when making decisions. Consider a greedy policy \\(\\pi^{\\mathrm{greedy}}\\) that continues as long as the expected utility of committing fully to conducting the experiment until it terminates at \\(t = \\tau\\) is positive—that is \\(\\pi^{\\mathrm{greedy}} = \\Psi_0\\) if and only if \\(V^{\\pi^{(0)}}(t,\\mu) > 0\\) where \\(\\pi^{(0)}\\) is the policy that always waits until the experiment terminates such that \\(\\pi^{(0)}(t,\\mu) = \\Psi_0\\) for all \\(t,\\mu\\); \\(\\pi^{\\mathrm{greedy}}\\) is said to be greedy because the decision to continue is made assuming a full commitment to the experiment without considering the possibility to stop at a future time step. Then, whenever such greedy reasoning suggests continuing, the meta-experimenter should indeed continue. However, whenever the same reasoning suggests stopping, the meta-" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.827, + 0.843 + ], + "angle": 0, + "content": "experimenter should be optimistic that the experiment will succeed and occasionally make the decision to continue instead—that is \\(\\pi^{*}\\) should be biased towards continuing (see the threshold gap in Figure 2):" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.848, + 0.825, + 0.882 + ], + "angle": 0, + "content": "Proposition 3 (Optimism). First, \\(\\pi^{\\text{greedy}}\\) is also of thresholding type and there exists \\(\\{\\mu_t^{\\text{greedy}} \\in \\mathbb{R}\\}_{t=1}^{\\tau-1}\\) such that \\(\\pi^{\\text{greedy}}(t,\\mu) = \\Psi_0\\) if and only if \\(\\mu > \\mu_t^{\\text{greedy}}\\). Moreover, for all \\(t \\in \\{1,\\dots,\\tau-1\\}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.885, + 0.825, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\mu_ {t} ^ {*} \\leq \\mu_ {t} ^ {\\text {g r e e d y}} \\quad \\Longleftrightarrow \\quad \\left\\{\\mu : \\pi^ {*} (t, \\mu) = \\Psi_ {0} \\right\\} \\supseteq \\left\\{\\mu : \\pi^ {\\text {g r e e d y}} (t, \\mu) = \\Psi_ {0} \\right\\} \\tag {7}\n\\]" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.91, + 0.498, + 0.925 + ], + "angle": 0, + "content": "1 Proofs of all propositions are given in Appendix I." + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.105, + 0.824, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.56, + 0.217, + 0.827, + 0.269 + ], + "angle": 0, + "content": "Figure 1: Optimal value function \\( V^{*}(t,\\mu) \\) for \\( C = 1 \\), \\( R = 10 \\), \\( \\tau = 4 \\), and \\( \\alpha = 0 \\). It can clearly be seen that neither \\( V^{*} \\) nor \\( -V^{*} \\) is convex in \\( \\mu \\) (cf. Proposition 1)." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.106, + 0.827, + 0.22 + ], + "angle": 0, + "content": "Intuitively, the optimism of \\(\\pi^{*}\\) accounts for the information gained from observing more samples when the experiment is continued. Remember that \\(\\pi^{\\mathrm{greedy}}\\) estimates the reward to be received if the experiment is conducted until termination, and it stops whenever its estimate is negative. But, the estimate of \\(\\pi^{\\mathrm{greedy}}\\) has some uncertainty associated with it. Whenever it is uncertain enough that the reward to be received is actually negative; incurring the cost of continuing for one more time step, gaining new information, and forming a more certain estimate can lead to a more accurate decision and a higher overall utility. Finally, the optimism of \\(\\pi^{*}\\) has a strictly decreasing upper bound; denoting with \\(F(x) = (1 / \\sqrt{2\\pi})\\int_{-\\infty}^{x}e^{-(1 / 2)x^2}\\) the c.d.f. of the standard normal distribution:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.608, + 0.24 + ], + "angle": 0, + "content": "Proposition 4 (Decreasing optimism). For all \\( t \\in \\{1, \\dots, \\tau - 1\\} \\)," + }, + { + "type": "equation", + "bbox": [ + 0.285, + 0.244, + 0.825, + 0.264 + ], + "angle": 0, + "content": "\\[\n\\left| \\mu_ {t} ^ {*} - \\mu_ {t} ^ {\\text {g r e e d y}} \\right| \\leq \\sqrt {1 / t - 1 / \\tau} \\times \\left(F ^ {- 1} \\left(\\left(\\tau - t\\right) ^ {C} / _ {R}\\right) - F ^ {- 1} \\left(^ {C} / _ {R}\\right)\\right) \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.269, + 0.825, + 0.34 + ], + "angle": 0, + "content": "Intuitively, as the experiment continues, the information gained from one individual sample decreases relative to the total information accumulated, hence the optimism of \\(\\pi^{*}\\) that accounts for the that information gain also decreases (see the bottom panel of Figure 2). Consider one extreme: When \\(t = \\tau - 1\\), there is no more information to be gained before the experiment terminates at \\(t = \\tau\\), hence \\(\\pi^{*}\\) should make the same decisions as \\(\\pi^{\\mathrm{greedy}}\\). Indeed, Proposition 4 implies that \\(\\mu_{\\tau - 1}^{*} = \\mu_{\\tau - 1}^{\\mathrm{greedy}}\\)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.351, + 0.547, + 0.367 + ], + "angle": 0, + "content": "4 A PRACTICAL ALGORITHM: BAYES-OCP" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.374, + 0.827, + 0.459 + ], + "angle": 0, + "content": "Summarizing our discussion in the previous section, we suspect the optimal policy to be (i) of thresholding type (cf. Proposition 2), (ii) optimistic (cf. Proposition 3), and (iii) increasingly more greedy (cf. Proposition 4). These findings are not a complete surprise as optimism-in-the-face-of-uncertainty is a well-known principle in solving online decision-making problems (Auer et al., 2002; Bubeck et al., 2012). Our earlier analysis shows rigorously that this principle holds for at least a special case of OCP and strengths our intuition that it should be applicable for more general cases of OCP as well." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.464, + 0.825, + 0.549 + ], + "angle": 0, + "content": "Keeping properties (i-iii) in mind, we now propose a practical algorithm for solving OCP in a more general setting than the one we analyzed earlier. Let \\(|\\mathcal{X}| \\geq 1\\) and \\(\\Psi = \\{(X,\\tau,\\rho): X \\in 2^{\\mathcal{X}} \\setminus \\emptyset\\}\\) include all experiment designs that target a unique subpopulation within \\(\\mathcal{X}\\) for a given time horizon \\(\\tau\\) and success criterion \\(\\rho\\); let \\(C_X \\doteq C_{(X,\\tau,\\rho)}\\) and \\(R_X \\doteq R_{(X,\\tau,\\rho)}\\). We assume that the conditional power of performing a hypothesis test at time \\(\\tau\\) according to \\(\\rho\\)—that is the probability of the test being successful conditioned on mean outcomes \\(\\{\\theta_x\\}\\)—can be computed for interim datasets—that is" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.553, + 0.825, + 0.573 + ], + "angle": 0, + "content": "\\[\n\\mathcal {P} (X, \\mathcal {D} _ {t}; \\left\\{\\theta_ {x} \\right\\}) = \\mathbb {E} _ {x _ {t ^ {\\prime}} \\sim \\left\\{\\eta_ {x \\mid X} \\right\\} _ {x \\in X}, y _ {t ^ {\\prime}} \\sim \\mathcal {N} \\left(\\theta_ {x _ {t ^ {\\prime}}, 1}\\right)} [ \\rho (\\mathcal {D} _ {t} \\cup \\left(\\cup_ {t ^ {\\prime} = t + 1} ^ {\\tau} \\left\\{x _ {t ^ {\\prime}}, y _ {t ^ {\\prime}} \\right\\}\\right)) ] \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.576, + 0.742, + 0.592 + ], + "angle": 0, + "content": "can be evaluated efficiently. Then, based on this conditional power function, we define" + }, + { + "type": "equation", + "bbox": [ + 0.189, + 0.601, + 0.484, + 0.635 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {G} \\left(X, \\mathcal {D} _ {t}; \\left\\{\\theta_ {x} \\right\\}\\right) (10) \\\\ = R _ {X} \\cdot \\mathcal {P} (X, \\mathcal {D} _ {t}) - C _ {X} \\cdot (\\tau - t) (10) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.487, + 0.751 + ], + "angle": 0, + "content": "as the expected utility of fully committing to an experiment and waiting until it terminates when the experiment targets population \\(X\\), is currently at time step \\(t\\), and has collected dataset \\(\\mathcal{D}_t\\) so far. Denote with \\(\\mathcal{G}^{(0)}(X; \\{\\theta_x\\}) = \\mathcal{G}(X, \\emptyset; \\{\\theta_x\\})\\) the same expected utility but for an experiment that is yet to start, and with \\(\\mathcal{G}^{(0)}(\\emptyset; \\{\\theta_x\\}) = 0\\) the utility of stopping all experimentation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.487, + 0.925 + ], + "angle": 0, + "content": "Our algorithm is called Bayes-OCP and is given in Algorithm 1. It maintains a posterior distribution \\(\\mathcal{N}(\\mu_x,\\sigma_x^2)\\) for each mean outcome \\(\\theta_{x}\\) assuming that, given mean \\(\\theta_{x}\\), outcomes are distributed normally with unit variance—that is \\(\\Omega_{x} = \\mathcal{N}(\\theta_{x},1)\\). These posteriors are only used in deciding which experiment to run next and not in determining whether the experiment was a success or not. Hence, even when the assumption of outcomes being normally distributed is violated, the integrity of the experiments would not be effected; only the performance of Bayes" + }, + { + "type": "table", + "bbox": [ + 0.497, + 0.604, + 0.826, + 0.921 + ], + "angle": 0, + "content": "
Algorithm 1 Bayes-OCP
1: Initialize μx and σx2 for all x ∈ X
2: X ← X, t ← 0, D0 ← ∅
3: Start experiment ψ = (X,τ,ρ)
4: loop:
5: t ← t + 1; Dt ← Dt-1 ∪ {xt,yt}
6: 1/σxt2 ← 1/σxt2 + 1
7: μxt← μxt + (yt - μxt)σxt2
(i) Identify a candidate subpopulation X' to replace X:
8: X' ← ∅
9: while X \\ X' ⊃ ∅:
10: x* ← argmaxx∈X\\X'
Eθx~N(μx,σx2)[G(0)(X' ∪ {x}; {θx})]
11: if Eθx~N(μx,σx2)[G(0)(X' ∪ {x*}; {θx})] > Eθx~N(μx,σx2)[G(0)(X'; {θx})]:
12: X' ← X' ∪ {x*}
13: else: break
(ii) Decide whether to actually replace X with X':
14: if Pθx~N(μx,σx2)[G(0)(X'; {θx}) > G(X, Dt; {θx})] > β:
15: X ← X', t ← 0, D0 ← ∅
16: Start a new experiment ψ = (X,τ,ρ)
" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.134 + ], + "angle": 0, + "content": "OCP in managing various experiments would degrade (see Appendix C for related experiments). Making use of the posteriors it maintains, Bayes-OCP performs two steps at each iteration:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.139, + 0.827, + 0.265 + ], + "angle": 0, + "content": "(i) First, a subpopulation \\( X' \\subset X \\) within the currently targeted population \\( X \\) is identified as a potential candidate to target next; due to the combinatorial size of \\( \\Psi \\), it would not be practical to consider every subpopulation individually as a candidate for large \\( |\\mathcal{X}| \\). The ideal candidate would be the subpopulation with the largest expected utility: \\( X' = \\operatorname{argmax}_{X' \\subset X} \\mathbb{E}_{\\theta_x \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)} [\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\})] \\). But again due to the combinatorial size of the search space, Bayes-OCP employs a greedy algorithm instead and forms candidate subpopulations by combining, one by one, the atomic-subpopulations that increase the expected utility the most, until the expected utility no longer improves. Note that it is common to use greedy algorithms to solve combinatorial optimization problems (Lawler, 1976; Papadimitriou and Steiglitz, 1982)." + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.271, + 0.828, + 0.43 + ], + "angle": 0, + "content": "(ii) Then, it is decided whether the current experiment targeting population \\(X\\) should be stopped in favor of targeting candidate \\(X^{\\prime}\\) identified earlier instead. A greedy strategy would have done so whenever \\(\\mathbb{E}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)[\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)[\\mathcal{G}(X, \\mathcal{D}_t; \\{\\theta_x\\})]\\). But from our earlier analysis, we have learned that the optimal strategy is optimistic (cf. Proposition 3). As such, Bayes-OCP checks whether it is overwhelmingly likely that the alternative experiment has higher expected utility—that is whether \\(\\mathbb{P}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)\\{\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\}) > \\mathcal{G}(X, \\mathcal{D}_t; \\{\\theta_x\\})\\} > \\beta\\), where \\(\\beta \\in (1/2, 1)\\) controls the decision-making threshold. When \\(\\beta\\) is large, we are more optimistic that the current experiment will succeed and require stronger evidence that the alternative experiment has higher expected utility. Note that, as the posteriors \\(\\mathcal{N}(\\mu_x, \\sigma_x^2)\\) get narrower, the optimism of this rule naturally decreases, which should be the case for the optimal strategy (cf. Proposition 4). As one extreme, the two switching rules become equivalent when \\(\\{\\sigma_x^2 \\to 0\\}\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.446, + 0.345, + 0.461 + ], + "angle": 0, + "content": "5 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.472, + 0.828, + 0.585 + ], + "angle": 0, + "content": "Optimal stopping Optimal commitment is essentially a new type of optimal stopping/switching problem. In typical optimal stopping problems (OSPs), the reward an agent can receive evolves based on a stochastic process and the goal of the agent is to determine the optimal time step to stop when the reward to be received is in some sense maximized (Shiryaev, 2007). Optimal commitment is unique in that a positive reward can only be received by not stopping until a pre-specified time horizon \\(\\tau\\). In optimal commitment, there is still a stochastic process (namely, samples \\(y_{t}\\)) that gradually reveals more information regarding what that positive reward will be at the end, however, the reward—or rather the cost—of stopping earlier is independent of this stochastic process (and is equal to \\(-tC\\))." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.59, + 0.829, + 0.732 + ], + "angle": 0, + "content": "Sequential hypothesis testing Among other OSPs, optimal commitment is most closely related to sequential hypothesis testing (SHT), where an agent makes sequential observations regarding a given hypothesis and eventually needs to decide whether to reject the said (alternate) hypothesis or reject some null hypothesis (Wald and Wolfowitz, 1948; Yu et al., 2009; Drugowitsch et al., 2012; Shenoy and Angela, 2012; Zhang and Angela, 2013; Drugowitsch et al., 2014; Khalvati and Rao, 2015; Schonbrodt et al., 2017; Fauß et al., 2020). Rejecting the correct hypothesis provides a positive reward whereas waiting for more observations, while informative, is also costly as in OCP. It is well known that the optimal policy in the classic setting of SHT is a thresholding-type policy with fixed thresholds that do not vary over time: The null hypothesis is rejected if some test statistic gets above a threshold (and the alternate hypothesis is rejected if the same statistic gets below a different threshold)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.829, + 0.85 + ], + "angle": 0, + "content": "Optimal commitment can be thought of as a SHT problem with the crucial difference that the meta-experimenter has only the option of discarding the alternate hypothesis (i.e. breaking a commitment), and once some time horizon is reached (i.e. when a commitment is kept), either the null hypothesis or the alternate hypothesis is automatically rejected according to some external success criterion \\(\\rho\\), regardless of what the meta-experimenters' decision might have been otherwise. As we have shown in Proposition 2, the optimal policy still remains a thresholding-type policy, but since there is now a deadline to discard the alternate hypothesis early, the thresholds become time-varying; in particular, they become less and less optimistic as the said deadline approaches (cf. Proposition 4)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Frazier and Angela (2007); Dayanik and Angela (2013); Alaa and van der Schaar (2016) consider SHT under stochastic deadlines, but different from optimal commitment, they still allow agents to reject both hypotheses at any time. In these works, the agent must make the rejection decision before the deadline is reached to be able to receive a positive reward, whereas in our case, the agent must wait until the deadline to see whether the null hypothesis will be rejected or not. Naghshvar and" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.825, + 0.153 + ], + "angle": 0, + "content": "Table 2: Comparison of related experiment designs. Optimal commitment is the only design that aims to decide both when an alternative population should be targeted—as opposed to switching the target population only at a fixed decision point—as well as which population to target among many potential candidates—as opposed to a simple binary decision of “overall population vs. sub-population” or “go vs. no-go”." + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.153, + 0.825, + 0.246 + ], + "angle": 0, + "content": "
DesignReferenceWhen?Which?
Randomized Controlled Trial (RCT)Fisher (1935)NeverOnly the initial population
Adaptive Enrichment DesignOndra et al. (2019)Fixed decision pointOverall vs. fixed subpopulation
Adaptive Signature DesignZhang et al. (2017)Fixed decision pointPossibly any population
RCT with Futility StoppingHe et al. (2012)Possibly any timeGo vs. no-go
Optimal Commitment(Ours)Possibly any timeAmong multiple populations
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.257, + 0.827, + 0.356 + ], + "angle": 0, + "content": "Javidi (2013); Jarrett and van der Schaar (2020) consider active versions of SHT where the agent is able to choose what type of observations to make. Our case is \"passive\" in the sense that the meta-experimenter cannot influence what kind of samples they are going to receive from the currently running experiment. Finally, optimal commitment, and SHT in general, can be thought of as more structured instances of partially-observed reinforcement learning (RL). As we have discussed earlier, the standard technique here relies on convex reward structures whereas the optimal value function in our case is not convex in general (cf. Proposition 1, see Appendix A for a detailed discussion)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.362, + 0.828, + 0.502 + ], + "angle": 0, + "content": "Adaptive experimentation We introduced optimal commitment predominantly as a tool for population selection during an experiment. In clinical trials, dominant approach to population selection is adaptive enrichment (Mehta et al., 2009; Magnusson and Turnbull, 2013; Simon and Simon, 2013; Wang and Hung, 2013; Simon and Simon, 2018; Ondra et al., 2019; Thall, 2021) and adaptive signature designs (Freidlin and Simon, 2005; Freidlin et al., 2010; Mi, 2017; Zhang et al., 2017; Bhattacharyya and Rai, 2019). These designs are capable of adapting the target population of a trial as the trial continues, but unlike optimal commitment, they can only do so at fixed analysis points and not just at any time step. While adaptive signature designs can select arbitrary populations, adaptive enrichment designs are also limited by the number of pre-specified populations they can select between, which is typically only two: the overall population and an alternative subpopulation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.508, + 0.829, + 0.704 + ], + "angle": 0, + "content": "Optimal commitment is also related to clinical trial designs with futility stopping, where an experimenter might terminate a trial early once it becomes apparent that the said trial is highly unlikely to succeed (van der Tweel and van Noord, 2003; Lachin, 2005; He et al., 2012; Jitlal et al., 2012; Kimani et al., 2013; Chang et al., 2020). However, this does not consider the possibility of switching to a new trial that targets a different population. As we will see during our experiments, switching to an alternative experiment might prove preferable even before an ongoing experiment can be deemed futile. In such cases, optimal commitment can make more timely decisions. Table 2 summarizes the experiment designs related to optimal commitment. Finally, it is worth mentioning that there are several methods for managing clinical trials at a portfolio level—that is determining which clinical trial is to be conducted next (Rogers et al., 2002; Colvin and Maravelias, 2008; Graham et al., 2020). Trial management in this vain is orthogonal to optimal commitment: They are concerned with the success of multiple new treatments and make decisions on a trial-by-trial basis whereas we only ever consider a single intervention and make decisions regarding the target population on a sample-by-sample basis while experiments still continue. See Appendix H for extended related work." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.717, + 0.329, + 0.731 + ], + "angle": 0, + "content": "6 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.741, + 0.829, + 0.924 + ], + "angle": 0, + "content": "We want to investigate how Bayes-OCP behaves in environments that differ in terms of ground-truth outcomes, for instance, what happens in environments where the original experiment is quite likely to succeed versus what happens in ones where switching to an alternative experiment is needed. To this end, we simulate experiments where mean outcomes are varied but other aspects of an experiment are fixed: In our environments, there are two atomic-populations, \\(\\mathcal{X} = \\{\\mathcal{X}_A,\\mathcal{X}_B\\}\\). Both atomic-populations have equal propensities \\(\\eta_{\\mathcal{X}_A} = \\eta_{\\mathcal{X}_B} = 1 / 2\\) and the meta-experimenter has the same positively-biased prior for the mean outcome associated with each atomic-population: \\(\\theta_{\\mathcal{X}_A},\\theta_{\\mathcal{X}_B}\\sim \\mathcal{N}(0.1,0.1)\\). Experiment designs targeting one or both of these atomic-populations all have the same time horizon \\(\\tau = 600\\) and success criterion \\(\\rho (\\mathcal{D}_{\\tau}) = \\mathbb{1}\\{\\Sigma_{(x_t,y_t)\\in \\mathcal{D}_{\\tau}}y_t / |\\mathcal{D}_{\\tau}| > \\alpha /\\sqrt{\\tau}\\}\\), where \\(\\alpha = F^{-1}(95\\%)\\). So, experiments are powered to detect a positive mean outcome of 0.1 with probability \\(\\sim 80\\). Rewards are given by \\(R_{X} = 1000\\eta_{X}^{0.1}\\) the wider the target population is, the more people a successful intervention can be marketed to—and costs are given by \\(C_X = 1 / \\eta_X^{0.1}\\)—the narrower the target population is, the harder it becomes to find subjects eligible to participate." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.105, + 0.422, + 0.436 + ], + "angle": 0, + "content": "Benchmarks We consider the metaexperiment designs summarized in Table 2 as benchmarks (see Appendix A.1 for an RL-based benchmark). Conventional RCT always targets the overall population and never stops early—that is it always conducts the experiment \\(\\psi = (\\{\\mathcal{X}_A,\\mathcal{X}_B\\} ,\\tau ,\\rho)\\) until its completion. Adaptive Enrichment performs an intermediary analysis at \\(t = \\tau /2 = 300\\) and greedily selects the experiment with the highest expected utility from \\(\\Psi = \\{(X,\\tau ,\\rho)\\}_{X\\subseteq \\{\\mathcal{X}_A,\\mathcal{X}_B\\}}\\). Futility Stopping is implemented via Bayes-OCP by initializing the set of all experiments as a singleton \\(\\Psi = \\{\\Psi_0 = (\\{\\mathcal{X}_A,\\mathcal{X}_B\\} ,\\tau ,\\rho)\\}\\). Intuitively, futility stopping only decides whether or not to stop the initial experiment that targets the overall population early. Bayes-OCP is initialized with \\(\\beta = 0.80\\) (see Appendix E for a sensitivity analysis). We also consider an abla" + }, + { + "type": "table_caption", + "bbox": [ + 0.43, + 0.106, + 0.827, + 0.22 + ], + "angle": 0, + "content": "Table 3: Performance comparison in various environment instances. Bayes-OCP has the highest expected utility—and a smaller FWER then conventional RCTs—when averaged over all environment instances. This is because Bayes-OCP is a balanced design whose structure does not favor certain environment instances over others. As an example, compare it with conventional RCTs: RCTs do not have an adaptive structure hence they favor green environments where it is not necessary to adapt the target population of the initial experiment. *Instances favored/addressed partially" + }, + { + "type": "table", + "bbox": [ + 0.432, + 0.222, + 0.827, + 0.43 + ], + "angle": 0, + "content": "
Algorithms:Oracle RCTRCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
Favored Instances:N/AGreenGreen/Amber*Green/RedAmber*/RedBalanced (incl. Amber)
All Instances (100%)Utility260.4-39.4 (6.7)106.5 (6.9)150.0 (3.5)32.6 (3.1)171.8 (3.6)
FWER0.0%0.3% (0.1%)0.2% (0.1%)0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success75.2%56.1% (0.7%)53.2% (0.8%)45.4% (1.3%)10.5% (0.8%)52.4% (1.2%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)607.5 (1.9)615.1 (1.2)
T-to-F35.6600.0 (0.0)548.9 (16.1)57.6 (4.6)3.0 (0.5)70.8 (8.2)
Green Instances (47.3%)Utility389.6388.7 (3.9)385.6 (3.7)337.7 (5.7)63.1 (3.5)343.4 (7.3)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)0.9 (0.0)0.1 (0.0)
Success99.0%98.9% (0.4%)97.4% (0.7%)86.0% (1.4%)18.8% (0.9%)88.2% (2.0%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)605.8 (1.5)602.8 (0.4)
T-to-F600.0600.0 (0.0)759.4 (36.4)46.6 (7.6)2.5 (0.5)62.3 (14.3)
Amber Instances (29.4%)Utility258.6-300.3 (19.8)-17.6 (6.5)-5.3 (5.4)11.6 (3.4)63.2 (5.6)
FWER0.0%0.7% (0.3%)0.6% (0.3%)0.4% (0.3%)0.0% (0.0%)0.3% (0.2%)
Switches1.00.0 (0.0)0.7 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success96.6%30.0% (2.0%)22.6% (1.5%)15.2% (2.0%)5.3% (1.0%)35.2% (1.8%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)617.2 (5.9)663.9 (7.2)
T-to-F600.0600.0 (0.0)745.0 (13.1)78.3 (9.3)3.4 (0.6)104.4 (19.1)
Red Instances (23.3%)Utility0.0-579.2 (4.1)-304.2 (4.4)-35.1 (1.7)-2.8 (1.1)-39.7 (3.4)
FWER0.0%0.2% (0.3%)0.2% (0.3%)0.1% (0.2%)0.0% (0.0%)0.2% (0.3%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%2.1% (0.4%)1.8% (0.5%)0.9% (0.3%)0.3% (0.4%)1.6% (0.7%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)634.8 (39.1)
T-to-F0.0600.0 (0.0)343.1 (8.1)38.9 (2.1)3.5 (1.4)45.8 (2.9)
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.437, + 0.827, + 0.48 + ], + "angle": 0, + "content": "tion of Bayes-OCP where decisions are made greedily instead of optimistically (Greedy Bayes-OCP). As a baseline of maximum achievable performance, we consider an oracle (Oracle RCT) that always runs the RCT with the optimum target (or does not run any RCT at all if that happens to be optimal)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.486, + 0.825, + 0.653 + ], + "angle": 0, + "content": "**Environments** A meta-experimenter's performance is specific to the environment instance. In particular, it depends on the ground-truth outcome distributions \\(\\{\\Omega_x\\}\\) for different populations. For example, an algorithm that always immediately stops the experiment would perform best when the mean outcome is negative. Hence, to faithfully evaluate the benchmarks, we need to focus on the average performance across different environments. To this end, we randomly generated 1000 environments (repeated five times to obtain error bars) with true mean outcomes \\(\\theta_{\\mathcal{X}_A}, \\theta_{\\mathcal{X}_B}\\) sampled independently from \\(\\mathcal{N}(0.1, 0.1)\\). Given these means, outcome distributions are set to be Gaussian with unit variance such that \\(\\Omega_x = \\mathcal{N}(\\theta_x, 1)\\). Depending on the true mean outcome, these environments can be categorized into three groups: (i) green instances where the initial experiment targeting the overall population has the highest utility, (ii) amber instances where an alternative experiment that targets a subpopulation has the highest utility, and (iii) red instances where no experiment has positive utility hence running no experiments is the optimal decision." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.659, + 0.827, + 0.827 + ], + "angle": 0, + "content": "Different benchmarks favor different instances (see the top row of Table 3): Conventional RCTs do not allow for any adaptation hence they favor green instances where the target population of the initial experiment does not need to be adapted. Adaptive Enrichment allows for adaptation but only at a certain time point, which is often too late to stop unsuccessful experiments (as in red instances). However, an adaptive enrichment design at least makes it possible to eventually target a subpopulation, even though it might be too late to do so at the pre-specified decision point, hence it partially accommodates amber instances. Futility Stopping decides between either continuing with the initial experiment or stopping all experimentation completely (targeting a subpopulation is not an option) hence it favors either green or red instances (but not amber instances). Greedy Bayes-OCP is pessimistic (or rather not optimistic enough) towards any ongoing experiment succeeding, hence it favors red instances where no experiment is likely to succeed. Similar to adaptive enrichment, Greedy Bayes-OCP at least allows subpopulations to be targeted hence it too partially accommodates amber instances." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.832, + 0.827, + 0.903 + ], + "angle": 0, + "content": "Main results Performance of a meta-experimenter is primarily measured by Bayesian utility which is the expected utility averaged over randomly sampled environment instances (Utility). Remember that maximizing utility was our main objective, and as such, Bayes-OCP has the highest expected utility when averaged over all environment instances, see Table 3. Unlike other benchmarks, Bayes-OCP strikes a good balance in prioritizing all environment instances at the same time. This is because Bayes-" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.551, + 0.161 + ], + "angle": 0, + "content": "OCP (i) can make timely decisions—unlike Adaptive Enrichment—and (ii) is optimistic hence it does not stop likely-to-succeed experiments prematurely—unlike Greedy Bayes-OCP. More specifically," + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.167, + 0.554, + 0.307 + ], + "angle": 0, + "content": "(i) Timeliness of Bayes-OCP: Bayes-OCP has an advantage in amber and red instances over adaptive enrichment and futility stopping. Consider the example in Figure 3: While Bayes-OCP stops in a timely manner, adaptive enrichment can only stop at a fixed decision point and experiments with futility stopping only stop when the ongoing experiment is failing not as soon as a better alternative emerges. This underlines the exploitative aspect of Bayes-OCP—making and breaking commitments to maximize utility." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.313, + 0.554, + 0.508 + ], + "angle": 0, + "content": "(ii) Optimism of Bayes-OCP: While a design that favors early stopping is obviously desirable in amber and red environments, how much it is favored should be moderated to also succeed in green environments. Consider the example in Figure 4: Greedy Bayes-OCP prematurely stops the initial experiment in a green environment while Bayes-OCP does not. Theoretically, we know that the optimal policy should be optimistic towards the ongoing experiment succeeding and be hesitant to stop to a certain extend. This underlines the exploratory aspect of Bayes-OCP—keeping a seemingly failing commitment still has value as it reveals more information regarding whether the commitment is actually failing." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.514, + 0.554, + 0.668 + ], + "angle": 0, + "content": "In Table 3, in addition to Utility, we also report the family-wise error rate (FWER)—that is the frequency of runs where at least one experiment (denote it with \\(\\psi^i\\)) is declared successful (i.e. \\(\\rho^i(\\mathcal{D}_\\tau^i) = 1\\)) despite the mean outcome being negative for the targeted population (i.e. \\(\\bar{\\theta}_{X^i} < 0\\)—the average number of times the target population has been switched (Switches), the probability of success which is defined as achieving positive utility (Success), the average time until a successful outcome (Timeto-Success, \\(T\\)-to-\\(S\\)), and the average time until an unsuccessful outcome where all experimentation is stopped" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.827, + 0.751 + ], + "angle": 0, + "content": "with negative utility (Time-to-Failure, \\( T \\)-to- \\( F \\)), see Appendix G for details. Importantly, Bayes-OCP does not compromise the error control of experiments, on the contrary, it even achieves a smaller FWER than conventional RCTs. This is because aggregate data is only ever used to select experiments, otherwise no two experiments consult each other's data when evaluating a success criterion so that the potential confoundedness that could have been caused by the adaptiveness of Bayes-OCP is avoided when declaring an experiment as successful (see Appendix B for a discussion on error control)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.757, + 0.83, + 0.815 + ], + "angle": 0, + "content": "Supplementary results We also provide supplementary results: Appendix A.1 evaluates RL-based benchmarks, Appendix B.1 investigates error control, Appendix C considers environments with non-Gaussian outcomes, Appendix D considers environments with more than two atomic-populations, and Appendix E analyzes the sensitivity of Bayes-OCP's performance to its hyper-parameter \\(\\beta\\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.829, + 0.321, + 0.844 + ], + "angle": 0, + "content": "7 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Two aspects of OCP require further discussion: (i) How can it be approached from the perspective of reinforcement learning? While OCP technically describes a special class of POMDPs, we have not found this to be constructive in finding a solution (see Appendix A). (ii) What are the implications of using Bayes-OCP in terms of error control? It has no impact on individual error rates and can be adapted to control FWER (see Appendix B). See Appendix F for a discussion on future work." + }, + { + "type": "image", + "bbox": [ + 0.563, + 0.107, + 0.822, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.559, + 0.296, + 0.828, + 0.398 + ], + "angle": 0, + "content": "Figure 3: Timeliness of Bayes-OCP. Bayes-OCP is first to (correctly) stop the initial experiment in an amber instance (excluding Greedy Bayes-OCP). Adaptive enrichment can only stop at a pre-specified time, while futility stopping fails to consider switching to an alternative experiment, which is proven to be preferable earlier than stopping." + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.405, + 0.822, + 0.594 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.559, + 0.594, + 0.826, + 0.658 + ], + "angle": 0, + "content": "Figure 4: Optimism of Bayes-OCP. Greedy Bayes-OCP (incorrectly) stops due to initial noise in a green instance while Bayes-OCP does not stop since it is more optimistic (as the optimal policy should, cf. Proposition 3)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.103, + 0.341, + 0.117 + ], + "angle": 0, + "content": "ETHICS STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.128, + 0.825, + 0.28 + ], + "angle": 0, + "content": "As the main application of optimal commitment, we have focused on adaptive experimentation, particularly experiments that are run as part of clinical development. Clinical trials have a huge impact on the wellbeing of patients and this high-stakes nature of clinical trials naturally raises some ethical concerns; we discuss two major ones in this section. However before we start our discussion, it should be emphasized that clinical trials is not the only application domain of optimal commitment. As we have highlighted at the end of Section 2, our contributions are generally applicable to decision-making problems such as portfolio and energy management. Moreover, not all adaptive experiments are clinical and have the same high stakes as a clinical trial. For instance, A/B testing is common in online advertisement to determine what recommendation policies lead to more user engagement (Gui et al., 2015; Xu et al., 2015; Kohavi and Longbotham, 2017). Therefore, the ethical concerns we discuss here does not universally concern all possible applications of optimal commitment." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.287, + 0.825, + 0.37 + ], + "angle": 0, + "content": "The first concern is how the designed error rate of an individual experiment is affected when multiple such experiments are managed together using Bayes-OCP in an adaptive manner, in particular, whether any error rate is inflated by the use of Bayes-OCP or not. We discuss error control in Appendix B with supplementary experiments. But briefly, Bayes-OCP has essentially no impact on the error rate of experiments on an individual level, and when controlling their family-wise error rate is also a concern, it can easily be adapted to accommodate this additional constraint as well." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.378, + 0.825, + 0.655 + ], + "angle": 0, + "content": "The second concern is that an adaptive approach to population selection might lead to overly conservative experiments that unnecessarily limit the use of an effective treatment. As we have mentioned in the introduction to motivate the need for optimal commitment, when the treatment is effective only for a subpopulation (cf. amber instances in our experiments), population selection is absolutely necessary, otherwise the treatment is most likely to be found ineffective and discarded after an experiment that targets the overall patient population as a whole, which would deny the treatment for the subpopulation that would have benefited from it. On the flip side of this, when the treatment happens to be effective for everyone (cf. green instances in our experiment), population selection might lead to conducting a restrictive experiment that only targets a small subpopulation, which this time, would deny the treatment for the rest of the patient population. This is essentially the reason behind the performance drop between Bayes-OCP and conventional RCTs in green instances (see Table 3). There is a trade-off between the performance in amber instances and green instances; and Bayes-OCP achieves a better balance between the two compared with a conventional RCT as evidenced by its superior performance when averaged over all environment instances (again see Table 3); although it causes a drop in performance for green instances, it more than makes up for that drop in amber instances. This balance is partly controlled by how optimistic Bayes-OCP is, which is in turn dictated by its hyper-parameter \\(\\beta\\)—larger \\(\\beta\\) leads to more optimistic decisions towards ongoing experiments, which favors green instances more than amber instances. We analyze the sensitivity of Bayes-OCP's performance to \\(\\beta\\) in Appendix E; and for all configurations that we have evaluated, Bayes-OCP always performs significantly better than a conventional RCT." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.67, + 0.434, + 0.684 + ], + "angle": 0, + "content": "REPRODUCIBILITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.693, + 0.825, + 0.777 + ], + "angle": 0, + "content": "All our experiments are based on synthetic simulations, hence our results can easily be reproduced by following the specifications in Section 6 without needing access to any private dataset. In order to aid reproducibility, we have rigorously described all our benchmarks in algorithmic form, similar to Algorithm 1, in Appendix J. Moreover, the source code necessary to reproduce our main results in Table 3 is made publicly available at https://github.com/alihanhyk/optcommit and https://github.com/vanderschaarlab/optcommit." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.792, + 0.356, + 0.806 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.815, + 0.824, + 0.858 + ], + "angle": 0, + "content": "We would like to thank the reviewers and the members of the van der Schaar lab, for their valuable input, comments, and suggestions. This work was supported by the US Office of Naval Research (ONR) and the National Science Foundation (NSF, grant number 1722516)." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.872, + 0.286, + 0.886 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.896, + 0.824, + 0.924 + ], + "angle": 0, + "content": "Alaa, A. M. and van der Schaar, M., \"Balancing suspense and surprise: Timely decision making with endogenous information acquisition,\" in Proc. Neural Inf. Process. Syst., 2016." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.134 + ], + "angle": 0, + "content": "Auer, P., Cesa-Bianchi, N., and Fischer, P., \"Finite-time analysis of the multiarmed bandit problem,\" Mach. Learn., vol. 47, no. 2, pp. 235-256, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.829, + 0.185 + ], + "angle": 0, + "content": "Bhattacharyya, A. and Rai, S. N., \"Adaptive signature design—review of the biomarker guided adaptive phase-III controlled design,\" Contemporary Clin. Trials Commun., vol. 15, p. 100378, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.193, + 0.826, + 0.224 + ], + "angle": 0, + "content": "Bubeck, S., Cesa-Bianchi, N. et al., \"Regret analysis of stochastic and nonstochastic multi-armed bandit problems,\" Found. Trends Mach. Learn., vol. 5, no. 1, pp. 1-122, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.231, + 0.825, + 0.262 + ], + "angle": 0, + "content": "Chang, Y., Song, T., Monaco, J., and Ivanova, A., \"Futility stopping in clinical trials, optimality and practical considerations,\" J. Biopharmaceutical Statist., vol. 30, no. 6, pp. 1050-1059, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.269, + 0.825, + 0.301 + ], + "angle": 0, + "content": "Chiu, Y.-D., Koenig, F., Posch, M., and Jaki, T., \"Design and estimation in clinical trials with subpopulation selection,\" Statist. Med., vol. 37, pp. 4335-4335-4352, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.308, + 0.827, + 0.338 + ], + "angle": 0, + "content": "\"Trends, charts, and maps,\" ClinicalTrials.gov. [Online]. Available: https://www.clinicaltrials.gov/ct2/resources/trends#RegisteredStudiesOverTimePostedResults" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.346, + 0.825, + 0.377 + ], + "angle": 0, + "content": "Colvin, M. and Maravelias, C. T., \"A stochastic programming approach for clinical trial planning in new drug development,\" Comput. Chem. Eng., vol. 32, no. 11, pp. 2626-2642, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.384, + 0.825, + 0.415 + ], + "angle": 0, + "content": "Dayanik, S. and Angela, J. Y., \"Reward-rate maximization in sequential identification under a stochastic deadline,\" SIAM J. Control Optim., vol. 51, no. 4, pp. 2922-2948, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.422, + 0.825, + 0.453 + ], + "angle": 0, + "content": "Demets, D. L. and Lan, K. K. G., \"Interim analysis: The alpha spending function approach,\" Statist. Med., vol. 13, no. 13-14, pp. 1341-1352, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.46, + 0.827, + 0.504 + ], + "angle": 0, + "content": "Drugowitsch, J., Moreno-Bote, R., Churchland, A. K., Shadlen, M. N., and Pouget, A., \"The cost of accumulating evidence in perceptual decision making,\" J. Neuroscience, vol. 32, no. 11, pp. 3612-3628, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.512, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Drugowitsch, J., Moreno-Bote, R., and Pouget, A., \"Optimal decision-making with time-varying evidence reliability,\" in Proc. Neural Inf. Process. Syst., 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.551, + 0.825, + 0.582 + ], + "angle": 0, + "content": "Fauß, M., Zoubir, A. M., and Poor, H. V., “Minimax optimal sequential hypothesis tests for Markov processes,” Ann. Statist., vol. 48, no. 5, pp. 2599–2621, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.589, + 0.751, + 0.607 + ], + "angle": 0, + "content": "Fisher, R. A., The Design of Experiments. Edinburgh, Scotland: Oliver & Boyd, 1935." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.613, + 0.827, + 0.644 + ], + "angle": 0, + "content": "Frazier, P. and Angela, J. Y., \"Sequential hypothesis testing under stochastic deadlines,\" in Proc. Neural Inf. Process. Syst., 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.652, + 0.827, + 0.695 + ], + "angle": 0, + "content": "Freidlin, B. and Simon, R., \"Adaptive signature design: An adaptive clinical trial design for generating and prospectively testing a gene expression signature for sensitive patients,\" *Clin. Cancer Res.*, vol. 11, no. 21, pp. 7872-7878, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.704, + 0.825, + 0.735 + ], + "angle": 0, + "content": "Freidlin, B., Jiang, W., and Simon, R., \"The cross-validated adaptive signature design,\" Clin. Cancer Res., vol. 16, no. 2, pp. 691-698, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.742, + 0.825, + 0.773 + ], + "angle": 0, + "content": "Ghare, G. and Leutenegger, S. T., \"Improving speedup and response times by replicating parallel programs on a SNOW,\" in Proc. Int. Conf. Job Scheduling Strategies Parallel Process., 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.78, + 0.827, + 0.811 + ], + "angle": 0, + "content": "Graham, E., Jaki, T., and Harbron, C., \"A comparison of stochastic programming methods for portfolio level decision-making,\" J. Biopharmaceutical Statist., vol. 30, no. 3, pp. 405-429, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.818, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Gui, H., Xu, Y., Bhasin, A., and Han, J., \"Network A/B testing: From sampling to estimation,\" in Proc. Int. Conf. World Wide Web, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.856, + 0.825, + 0.887 + ], + "angle": 0, + "content": "He, P., Lai, T. L., and Liao, O. Y.-W., “Futility stopping in clinical trials,” Statist. Interface, vol. 5, no. 4, pp. 415-423, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Jarrett, D. and van der Schaar, M., \"Inverse active sensing: Modeling and understanding timely decision-making,\" in Int. Conf. on Mach. Learn., 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.135 + ], + "angle": 0, + "content": "Jitlal, M., Khan, I., Lee, S., and Hackshaw, A., \"Stopping clinical trials early for futility: retrospective analysis of several randomised clinical studies,\" Brit. J. Cancer, vol. 107, no. 6, pp. 910-917, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.142, + 0.826, + 0.172 + ], + "angle": 0, + "content": "Kaitin, K. I., \"Deconstructing the drug development process: The new face of innovation,\" Clin. Pharmacology Therapeutics, vol. 87, no. 3, pp. 356-361, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.18, + 0.825, + 0.21 + ], + "angle": 0, + "content": "Karatzas, I. and Wang, H., \"Utility maximization with discretionary stopping,\" SIAM J. Control Optim., vol. 39, pp. 306-329, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.218, + 0.825, + 0.248 + ], + "angle": 0, + "content": "Khalvati, K. and Rao, R. P., \"A Bayesian framework for modeling confidence in perceptual decision making,\" in Proc. Neural Inf. Process. Syst., 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.256, + 0.825, + 0.286 + ], + "angle": 0, + "content": "Kimani, P. K., Todd, S., and Stallard, N., \"Conditionally unbiased estimation in phase II/III clinical trials with early stopping for futility,\" Statist. Med., vol. 32, no. 17, pp. 2893-2910, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.294, + 0.825, + 0.324 + ], + "angle": 0, + "content": "Kohavi, R. and Longbotham, R., \"Online controlled experiments and A/B testing,\" Encyclopedia Mach. Learn. Data Mining, vol. 7, no. 8, pp. 922-929, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.332, + 0.826, + 0.363 + ], + "angle": 0, + "content": "Lachin, J. M., “A review of methods for futility stopping based on conditional power,” Statist. Med., vol. 24, no. 18, pp. 2747-2764, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.371, + 0.825, + 0.4 + ], + "angle": 0, + "content": "Lawler, E., Combinatorial Optimization, Networks and Matroids. New York: Holt, Rinehard & Winston, 1976." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.409, + 0.825, + 0.44 + ], + "angle": 0, + "content": "Lipkovich, I., Dmitrienko, A., and D'Agostino Sr., R. B., \"Tutorial on biostatistics: Data-driven subgroup identification and analysis in clinical trials,\" Statist. Med., vol. 36, pp. 136-196, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.448, + 0.825, + 0.478 + ], + "angle": 0, + "content": "Magnusson, B. P. and Turnbull, B. W., \"Group sequential enrichment design incorporating subgroup selection,\" Statist. Med., vol. 32, no. 16, pp. 2695-2714, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.486, + 0.826, + 0.514 + ], + "angle": 0, + "content": "Markowitz, H., Portfolio selection: Efficient diversification of investment. New York: John Wiley, 1959." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.524, + 0.826, + 0.566 + ], + "angle": 0, + "content": "Mehta, C., Gao, P., Bhatt, D. L., Harrington, R. A., Skerjanec, S., and Ware, J. H., \"Optimizing trial design: Sequential, adaptive, and enrichment strategies,\" Circulation, vol. 119, no. 4, pp. 597-605, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.576, + 0.826, + 0.606 + ], + "angle": 0, + "content": "Merton, R. C., \"Life time portfolio selection under uncertainty: The continuous-time case,\" Rev. Econ. Statist., vol. 51, pp. 247-257, 1969." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.614, + 0.825, + 0.644 + ], + "angle": 0, + "content": "Mi, G., \"Enhancement of the adaptive signature design for learning and confirming in a single pivotal trial,\" Pharmaceutical Statist., vol. 16, no. 5, pp. 312-321, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.652, + 0.825, + 0.683 + ], + "angle": 0, + "content": "Moineddin, R., Butt, D. A., Tomlinson, G., and Beyene, J., \"Identifying subpopulations for subgroup analysis in a longitudinal clinical trial,\" Contemporary Clin. Trials, vol. 29, pp. 817-822, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.69, + 0.826, + 0.719 + ], + "angle": 0, + "content": "Naghshvar, M. and Javidi, T., \"Active sequential hypothesis testing,\" Ann. Statist., vol. 41, no. 6, pp. 2703-2738, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.728, + 0.825, + 0.759 + ], + "angle": 0, + "content": "Ni, T., Eysenbach, B., and Salakhutdinov, R., \"Recurrent model-free RL can be a strong baseline for many POMDPs,\" in Proc. Int. Conf. Mach. Learn., 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.766, + 0.826, + 0.797 + ], + "angle": 0, + "content": "Olofsson, M., Önskog, T., and Lundström, N. L. P., \"Management strategies for run-of-river hydropower plants: An optimal switching approach,\" Optim. Eng., vol. 23, pp. 1707-1731, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.805, + 0.826, + 0.847 + ], + "angle": 0, + "content": "Ondra, T., Jobjörnsson, S., Beckman, R. A., Burman, C.-F., König, F., Stallard, N., and Posch, M., \"Optimized adaptive enrichment designs,\" Statist. Methods Med. Res., vol. 28, no. 7, pp. 2096-2111, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.857, + 0.826, + 0.886 + ], + "angle": 0, + "content": "Papadimitriou, C. H. and Steiglitz, K., Combinatorial Optimization—Algorithms and Complexity. Englewood Cliffs, NJ: Prentice Hall, 1982." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Rafique, S. F. and Jianhua, Z., \"Energy management system, generation and demand predictors: A review,\" IET Gener. Transmiss. Distribution, vol. 12, no. 3, pp. 519-530, 2018." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.134 + ], + "angle": 0, + "content": "Rogers, M. J., Gupta, A., and Maranas, C. D., \"Real options based analysis of optimal pharmaceutical research and development portfolios,\" Ind. Eng. Chem. Res., vol. 41, no. 25, pp. 6607-6620, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.827, + 0.185 + ], + "angle": 0, + "content": "Schönbrodt, F. D., Wagenmakers, E.-J., Zehetleitner, M., and Perugini, M., \"Sequential hypothesis testing with Bayes factors: Efficiently testing mean differences,\" Psychol. Methods, vol. 22, no. 2, p. 322, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.193, + 0.827, + 0.224 + ], + "angle": 0, + "content": "Shenoy, P. and Angela, J. Y., \"Strategic impatience in Go/NoGo versus forced-choice decision-making,\" in Proc. Neural Inf. Process Syst., 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.231, + 0.743, + 0.248 + ], + "angle": 0, + "content": "Shiryaev, A. N., Optimal Stopping Rules. Springer Science & Business Media, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.254, + 0.827, + 0.284 + ], + "angle": 0, + "content": "Simon, N. and Simon, R., \"Adaptive enrichment designs for clinical trials,\" Biostatistics, vol. 14, no. 4, pp. 613-625, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.292, + 0.827, + 0.323 + ], + "angle": 0, + "content": "Simon, N. and Simon, R., \"Using Bayesian modeling in frequentist adaptive enrichment designs,\" Biostatistics, vol. 19, no. 1, pp. 27-41, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.33, + 0.827, + 0.361 + ], + "angle": 0, + "content": "Spanan, M. T. J., \"Partially observable Markov decision processes,\" in Reinforcement Learning. Springer, 2012, pp. 387-414." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.368, + 0.825, + 0.412 + ], + "angle": 0, + "content": "Takebe, T., imai, R., and Ono, S., \"The current status of drug discovery and development as originated in United States academia: The influence of industrial and academic collaboration on drug discovery and development,\" Clin. Transl. Sci., vol. 11, no. 6, pp. 597-606, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.42, + 0.827, + 0.449 + ], + "angle": 0, + "content": "Thall, P. F., \"Adaptive enrichment designs in clinical trials,\" Annu. Rev. Statist. Appl., vol. 8, pp. 393-411, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.457, + 0.825, + 0.487 + ], + "angle": 0, + "content": "Umscheid, C. A., Margolis, D. J., and Grossman, C. E., \"Key concepts of clinical trials: A narrative review,\" Postgraduate Med., vol. 123, no. 5, pp. 194-204, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.495, + 0.827, + 0.539 + ], + "angle": 0, + "content": "van der Tweel, I. and van Noord, P. A., \"Early stopping in clinical trials and epidemiologic studies for 'futility': Conditional power versus sequential analysis,\" J. Clin. Epidemiology, vol. 56, no. 7, pp. 610-617, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.546, + 0.827, + 0.577 + ], + "angle": 0, + "content": "Wald, A. and Wolfowitz, J., \"Optimum character of the sequential probability ratio test,\" Ann. Math. Statist., vol. 19, pp. 326-339, 1948." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.584, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Wand, D., Joshi, G., and Wornell, G., \"Efficient task replication for fast response times in parallel computation,\" in Proc. ACM SIGMETRICS Conf., 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.622, + 0.825, + 0.653 + ], + "angle": 0, + "content": "Wang, D., Joshi, G., and Wornell, G. W., \"Efficient straggler replication in large-scale parallel computing,\" ACM Trans. Model. Perform. Eval. Comput. Syst., vol. 4, no. 2, pp. 1-23, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.66, + 0.827, + 0.703 + ], + "angle": 0, + "content": "Wang, S.-J. and Hung, H. J., \"Adaptive enrichment with subpopulation selection at interim: Methodologies, applications and design considerations,\" Contemporary clinical trials, vol. 36, no. 2, pp. 673-681, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.711, + 0.827, + 0.755 + ], + "angle": 0, + "content": "Xu, Y., Chen, N., Fernandez, A., Sinno, O., and Bhasin, A., \"From infrastructure to culture: A/B testing challenges in large scale social networks,\" in Proc. ACM SIGKDD Int. Conf. Knowl. Discovery Data Mining, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.763, + 0.827, + 0.805 + ], + "angle": 0, + "content": "Yu, A. J., Dayan, P., and Cohen, J. D., \"Dynamics of attentional selection under conflict: Toward a rational Bayesian account,\" J. Exp. Psychol. Human Perception Performance, vol. 35, no. 3, p. 700, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.814, + 0.827, + 0.845 + ], + "angle": 0, + "content": "Zhang, S. and Angela, J. Y., \"Forgetful Bayes and myopic planning: Human learning and decision-making in a bandit setting,\" in Proc. Neural Inf. Process. Syst., 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.853, + 0.827, + 0.896 + ], + "angle": 0, + "content": "Zhang, Z., Li, M., Lin, M., Soon, G., Greene, T., and Shen, C., \"Subgroup selection in adaptive signature designs of confirmatory clinical trials,\" J. Roy. Statist. Soc.: Ser. C (Appl. Statist.), vol. 66, no. 2, pp. 345-361, 2017." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.586, + 0.119 + ], + "angle": 0, + "content": "A A REINFORCEMENT LEARNING PERSPECTIVE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.827, + 0.233 + ], + "angle": 0, + "content": "Optimal commitment can be viewed as a partially-observed reinforcement learning problem. Let the tuple \\((\\mathcal{S},\\mathcal{A},\\mathcal{Z},\\mathcal{T},\\mathcal{O},\\mathcal{R})\\) denote a partially-observable Markov decision process (POMDP), where \\(\\mathcal{S}\\) is the (unobserved) state space, \\(\\mathcal{A}\\) is the action space, \\(\\mathcal{Z}\\) is the observation space, \\(\\mathcal{T} \\in \\Delta(\\mathcal{S})^{S \\times S}\\) describes the transition dynamics, \\(\\mathcal{O} \\in \\mathcal{Z}^S\\) describes the observation dynamics, and \\(\\mathcal{R} \\in \\mathbb{R}^S\\) describes the reward dynamics. Then, OCPs as defined in Section 2 can also be expressed as a special class of POMDPs: Letting \\(\\mathcal{Y} = \\mathbb{R}\\) denote the outcome space for clarity, \\(\\mathfrak{D} = \\cup_{t=0}(\\mathcal{X} \\times \\mathcal{Y})^t\\) be the space of all possible datasets \\(\\mathcal{D}_t\\), and \\(\\mathfrak{D}\\) be the space of all possible outcome distributions \\(\\Omega\\)," + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.239, + 0.827, + 0.283 + ], + "angle": 0, + "content": "- \\( \\mathcal{S} \\doteq \\{\\varnothing\\} \\cup (\\Psi \\times \\mathfrak{D} \\times \\mathfrak{D}^{\\mathcal{X}}) \\), where states \\( s = (\\psi, \\mathcal{D}_t, \\{\\Omega_x\\}_{x \\in \\mathcal{X}}) \\) consist of the ongoing experiment \\( \\psi \\in \\Psi \\), the dataset \\( \\mathcal{D}_t \\in \\mathfrak{D} \\) collected by the ongoing experiment so far, and the true outcome distributions \\( \\{\\Omega_x \\in \\mathfrak{D}\\} \\)," + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.288, + 0.336, + 0.304 + ], + "angle": 0, + "content": "- \\(\\mathcal{A} \\doteq \\{\\varnothing\\} \\cup \\Psi\\)" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.309, + 0.382, + 0.326 + ], + "angle": 0, + "content": "- \\(\\mathcal{Z} \\doteq \\{\\varnothing\\} \\cup (\\mathcal{X} \\times \\mathcal{Y})\\)" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.331, + 0.382, + 0.346 + ], + "angle": 0, + "content": "- \\(\\mathcal{T}(s = \\emptyset, a) \\doteq \\emptyset\\) and" + }, + { + "type": "list", + "bbox": [ + 0.216, + 0.239, + 0.827, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.354, + 0.816, + 0.46 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {T} (s = (\\psi = (X, \\tau , \\rho), \\mathcal {D} _ {t}, \\{\\Omega_ {x} \\}), a) \\\\ \\begin{array}{c} \\dot {=} \\left\\{ \\begin{array}{l l} \\emptyset & \\text {i f} a = \\emptyset \\\\ s ^ {\\prime} = (\\psi , \\mathcal {D} _ {t + 1} = \\mathcal {D} _ {t} \\cup \\{x _ {t + 1}, y _ {t + 1} \\}, \\{\\Omega_ {x} \\}) \\\\ \\quad \\text {s . t .} x _ {t + 1} \\sim \\{\\eta_ {x | X} \\}, y _ {t + 1} \\sim \\Omega_ {x _ {t + 1}} & \\text {i f} a = \\psi \\\\ s ^ {\\prime} = (\\psi^ {\\prime}, \\mathcal {D} _ {1} = \\{x _ {1}, y _ {1} \\}, \\{\\Omega_ {x} \\}) \\\\ \\quad \\text {s . t .} x _ {1} \\sim \\{\\eta_ {x | X ^ {\\prime}} \\}, y _ {1} \\sim \\Omega_ {x _ {1}} & \\text {i f} a = \\psi^ {\\prime} = (X ^ {\\prime}, \\tau^ {\\prime}, \\rho^ {\\prime}) \\neq \\psi , \\end{array} \\right. \\end{array} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.475, + 0.372, + 0.491 + ], + "angle": 0, + "content": "- \\(\\mathcal{O}(s' = \\varnothing) \\doteq \\varnothing\\) and" + }, + { + "type": "equation", + "bbox": [ + 0.312, + 0.498, + 0.741, + 0.516 + ], + "angle": 0, + "content": "\\[\n\\mathcal {O} \\left(s ^ {\\prime} = (\\psi , \\mathcal {D} _ {t + 1} = \\mathcal {D} _ {t} \\cup \\{x _ {t + 1}, y _ {t + 1} \\}, \\{\\Omega_ {x} \\})\\right) \\doteq \\left(x _ {t + 1}, y _ {t + 1}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.531, + 0.368, + 0.547 + ], + "angle": 0, + "content": "- \\(\\mathcal{R}(s' = \\emptyset) \\doteq 0\\) and" + }, + { + "type": "equation", + "bbox": [ + 0.261, + 0.555, + 0.792, + 0.572 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} \\left(s ^ {\\prime} = (\\psi = (X, \\tau , \\rho), \\mathcal {D} _ {t + 1}, \\{\\Omega_ {x} \\})\\right) \\doteq - C _ {\\psi} + R _ {\\psi} \\cdot \\mathbb {1} \\{t + 1 = \\tau \\} \\cdot \\rho \\left(\\mathcal {D} _ {t + 1}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.587, + 0.825, + 0.686 + ], + "angle": 0, + "content": "Since ongoing experiments \\(\\psi\\) are completely dictated by actions, and datasets \\(\\mathcal{D}_t\\) collected by the ongoing experiments consist solely of observations \\((x_t,y_t)\\), the only unobserved component of the states in this POMDP is the true outcome distributions \\(\\{\\Omega_x\\}_{x\\in \\mathcal{X}}\\). Hence, the optimal policy should have the form \\(\\pi (\\psi ,\\mathcal{D}_t,b)\\) where \\(b\\in \\Delta (\\mathfrak{O}^{\\mathcal{X}})\\) denotes beliefs over \\(\\{\\Omega_x\\}\\) that is posterior distributions over the true outcome distributions. For instance, when \\(\\Omega_{x} = \\mathcal{N}(\\theta_{x},1)\\) as we have been assuming in Sections 3 and 4, posteriors over mean outcomes \\(\\{\\theta_x\\}_{x\\in \\mathcal{X}}\\), which are given by parameters \\(\\{\\mu_x,\\sigma_x^2\\}\\) such that \\(\\theta_{x}|\\bar{\\mathcal{D}}_{t}^{i}\\sim \\mathcal{N}(\\mu_{x},\\sigma_{x}^{2})\\), constitute as beliefs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.691, + 0.827, + 0.748 + ], + "angle": 0, + "content": "Now although an OCP can be expressed as a POMDP, doing so is not particularly helpful in finding a solution. As we have already discussed in Section 3, the standard approach to solving a POMDP would be to use dynamic programming and compute the optimal value function \\( V^{*} \\) and the optimal Q-function \\( Q^{*} \\) iteratively according to Bellman optimality conditions" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.755, + 0.697, + 0.792 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (b, a) = \\mathbb {E} _ {s \\sim b, s ^ {\\prime} \\sim \\mathcal {T} (s, a), z ^ {\\prime} = \\mathcal {O} (s ^ {\\prime}), b ^ {\\prime} | \\{b, z ^ {\\prime} \\}} \\left[ \\mathcal {R} \\left(s ^ {\\prime}\\right) + V ^ {*} \\left(b ^ {\\prime}\\right) \\right] \\\\ V ^ {*} (b) = \\max _ {a \\in \\mathcal {A}} Q ^ {*} (b, a), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.827, + 0.926 + ], + "angle": 0, + "content": "where \\( b'|\\{b, z'\\} \\) denotes the updated belief \\( b' \\) after having belief \\( b \\) and making a new observation \\( z' \\). When the state space \\( S \\) is discrete—or equivalently in our case, when the space of outcome distributions \\( \\Omega \\in \\mathfrak{D} \\) is discrete—\\( V^* \\) and \\( Q^* \\) happen to be convex functions, which makes it possible to perform these iterations efficiently by approximating \\( V^* \\) and \\( Q^* \\) using functions of the form \\( f(b) = \\max\\{a_i b + a_j'\\} \\) (Spaan, 2012). However, even in the simplest of cases where \\( S \\) is continuous—or equivalently, the space of outcome distributions \\( \\Omega \\in \\mathfrak{D} \\) is continuous, for instance when \\( \\Omega_x = \\mathcal{N}(\\theta_x, 1) \\)—the convexity of \\( V^* \\) and \\( Q^* \\) no longer generally holds. In fact, we show in Proposition 1 that neither \\( V^* \\) nor \\( -V^* \\) is convex with respect to beliefs \\( b \\equiv \\{t, \\mu\\} \\) for at least one instance of the simplified OCP that we have analyzed in Section 3." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.176, + 0.084, + 0.822, + 0.099 + ], + "angle": 0, + "content": "Table 4: Performance comparison between Futility Stopping with RL-based algorithms and with Bayes-OCP." + }, + { + "type": "table", + "bbox": [ + 0.2, + 0.103, + 0.797, + 0.465 + ], + "angle": 0, + "content": "
Algorithms:Oracle RCTFutility Stopping w/ Discretized RLFutility Stopping w/ Deep Q-learningFutility Stopping w/ Bayes-OCP
All Instances (100%)Utility260.4131.8 (4.3)78.8 (3.1)150.0 (3.5)
FWER0.0%0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.6 (0.0)0.7 (0.0)0.5 (0.0)
Success75.2%41.0% (1.0%)24.2% (0.8%)45.4% (1.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F35.654.3 (2.2)23.6 (1.8)57.6 (4.6)
Green Instances (47.3%)Utility389.6309.5 (4.1)185.0 (4.9)337.7 (5.7)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.2 (0.0)0.5 (0.0)0.1 (0.0)
Success99.0%80.9% (0.7%)47.7% (1.1%)86.0% (1.4%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F600.072.7 (7.1)11.0 (0.8)46.6 (7.6)
Amber Instances (29.4%)Utility258.6-23.9 (5.4)-16.6 (6.8)-5.3 (5.4)
FWER0.0%0.2% (0.2%)0.1% (0.1%)0.4% (0.3%)
Switches1.00.9 (0.0)0.9 (0.0)0.8 (0.0)
Success96.6%9.1% (0.8%)5.2% (1.3%)15.2% (2.0%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F600.066.1 (4.3)39.5 (4.4)78.3 (9.3)
Red Instances (23.3%)Utility0.0-33.0 (1.6)-16.5 (5.2)-35.1 (1.7)
FWER0.0%0.1% (0.2%)0.1% (0.2%)0.1% (0.2%)
Switches1.01.0 (0.0)1.0 (0.0)1.0 (0.0)
Success0.0%0.2% (0.2%)0.4% (0.4%)0.9% (0.3%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F0.033.7 (0.9)18.3 (4.8)38.9 (2.1)
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.505, + 0.665, + 0.519 + ], + "angle": 0, + "content": "A.1 EXPERIMENTS WITH REINFORCEMENT LEARNING BENCHMARKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.54, + 0.827, + 0.751 + ], + "angle": 0, + "content": "Having said all that, one naive way to still compute \\( V^{*} \\) and \\( Q^{*} \\) iteratively according to Bellman optimality conditions is to discretize the belief space. We call this benchmark Discretized RL and we use it to perform futility stopping—that is when \\( |\\Psi| = 1 \\), deciding whether to stop the only viable experiment design early or not. Otherwise, the dimensionality of the belief state explodes combinatorially with respect to \\( |\\Psi| \\). We consider the same setting that we have considered during our experiments in Section 6 and compare the performance of Futility Stopping with Discretized RL with that of Futility Stopping with Bayes-OCP. When implementing discretized RL, instead of keeping track of the entire dataset \\( \\mathcal{D}_t \\), we only keep track of the sufficient statistic \\( \\mu_t = \\sum_{(x_{t'}, y_{t'})} y_{t'} / |\\mathcal{D}_t| \\), restrict the domain of \\( \\mu_t \\) to interval \\([-0.3, 0.3]\\), and discretize this interval into 100 equally-spaced bins. In addition to discretized RL, we also consider the approach proposed by Ni et al. (2022) for solving complex classes of POMDPs, which the optimal commitment problem is one of. Briefly, we employ deep Q-learning (as such, we call this benchmark Deep Q-learning) to train a neural network as an approximation of the Q-function \\( Q^{*}(b, a) \\) using the POMDP we formalized earlier as a simulator. As the network architecture, we consider a multi-layer perceptron with two hidden layers of size 100 and with tanh activations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.828, + 0.925 + ], + "angle": 0, + "content": "Results are given in Table 4; futility stopping with Bayes-OCP performs better than futility stopping with discretized RL as well as futility stopping with deep Q-learning. In addition to the bad performance of discretized RL, it is also not feasible to scale it to use cases beyond futility stopping. When \\( |\\Psi| > 1 \\), we would need to keep separate track of each \\( \\mu_x \\). Moreover, we would also need to start keeping track of the scale parameters \\( \\{\\sigma_x\\} \\) since it would now be possible to distribute samples among multiple atomic-populations in multiple ways by targeting different populations with different experiments (we no longer would be able to treat the target population of the only viable experiment design as the only atomic-population there is). Noting that \\( \\sigma_x \\)'s already take discrete values with at least \\( \\tau \\)-many possible values, merely increasing the number of viable experiments \\( |\\Psi| \\) from one to two causes the dimensionality of the belief space to jump from 100 to \\( \\sim (100 \\times 600)^2 = 36 \\times 10^8 \\). Deep Q-learning performs even worse as it ignores all structure present in the optimal commitment problem, and instead, views the POMDP that describes it as a black-box simulator." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.263, + 0.102, + 0.733, + 0.116 + ], + "angle": 0, + "content": "Table 5: Performance comparison of algorithms with family-wise error control." + }, + { + "type": "table", + "bbox": [ + 0.173, + 0.119, + 0.827, + 0.398 + ], + "angle": 0, + "content": "
Algorithms:Oracle RCTRCTAdaptive Enrichment w/ Bonferroni Corr.Futility Stopping w/ Bayes-OCPGreedy Bayes-OCP w/ Bonferroni Corr.Bayes-OCP w/ Bonferroni Corr.
All Instances (100%)Utility260.4-39.4 (6.7)91.4 (5.4)150.0 (3.5)23.7 (2.2)158.7 (5.2)
FWER0.0%0.3% (0.1%)0.1% (0.1%)0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.0 (0.0)0.5 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success75.2%56.1% (0.7%)51.1% (0.7%)45.4% (1.3%)7.7% (0.5%)49.3% (1.5%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)606.2 (2.3)616.6 (1.7)
T-to-F35.6600.0 (0.0)543.0 (15.7)57.6 (4.6)2.3 (0.3)65.8 (7.0)
Green Instances (47.3%)Utility389.6388.7 (3.9)378.8 (3.1)337.7 (5.7)46.1 (3.3)325.8 (5.5)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)1.0 (0.0)0.2 (0.0)
Success99.0%98.9% (0.4%)96.1% (0.6%)86.0% (1.4%)13.8% (0.8%)84.7% (1.5%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)604.9 (2.0)604.0 (0.7)
T-to-F600.0600.0 (0.0)768.6 (19.0)46.6 (7.6)2.0 (0.3)66.5 (17.7)
Amber Instances (29.4%)Utility258.6-300.3 (19.8)-51.9 (15.6)-5.3 (5.4)8.3 (2.9)44.6 (4.6)
FWER0.0%0.7% (0.3%)0.3% (0.1%)0.4% (0.3%)0.0% (0.0%)0.2% (0.2%)
Switches1.00.0 (0.0)0.8 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success96.6%30.0% (2.0%)18.2% (2.2%)15.2% (2.0%)3.8% (0.9%)30.7% (1.4%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)613.4 (5.8)670.6 (8.9)
T-to-F600.0600.0 (0.0)724.6 (9.4)78.3 (9.3)2.6 (0.6)95.5 (18.4)
Red Instances (23.3%)Utility0.0-579.2 (4.1)-312.5 (2.3)-35.1 (1.7)-2.2 (0.3)-37.0 (2.4)
FWER0.0%0.2% (0.3%)0.2% (0.3%)0.1% (0.2%)0.0% (0.0%)0.1% (0.2%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%2.1% (0.4%)1.1% (0.4%)0.9% (0.3%)0.1% (0.2%)1.1% (0.7%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)649.8 (62.6)
T-to-F0.0600.0 (0.0)334.9 (3.7)38.9 (2.1)2.4 (0.5)39.8 (2.5)
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.419, + 0.49, + 0.433 + ], + "angle": 0, + "content": "B DISCUSSION ON ERROR CONTROL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.452, + 0.825, + 0.578 + ], + "angle": 0, + "content": "Bayes-OCP is a method for managing experiments—that is deciding what experiment to conduct and when—as opposed to a hypothesis testing strategy in and of itself. Implication of this in terms of error control is that the type 1 error of any individual experiment run by Bayes-OCP can always be controlled by choosing an appropriate experimental design, in particular, by specifying an appropriate success criterion \\(\\rho\\). This individual-level error control built into the design of each experiment is not compromised by Bayes-OCP; no aggregate data from multiple experiments is ever fed into the success criterion of one alone (see Section 2, experiment \\(\\psi^i\\) is successful if \\(\\rho^i(\\bar{D}_t^i) = 1\\) not if \\(\\rho^i(\\bar{\\mathcal{D}}_t^i) = 1\\)); and any assumptions made by Bayes-OCP regarding outcomes in Section 4, whether accurate or inaccurate, have no effect on the results produced by an external success criterion." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.585, + 0.827, + 0.78 + ], + "angle": 0, + "content": "While Bayes-OCP does not compromise the individual error control of experiments, neither does it control their collective family-wise error rate (FWER)—that is the probability of at least one experiment among all that are conducted making a false discovery. Bayes-OCP views the problem of managing experiments purely as a utility maximization problem with no additional constraints. Within the scope of our discussion, the purpose of measuring FWER as a metric is to check empirically whether the individual error rates are inflated or not (note that FWER is a stricter notion of error than individual error rate). In practice, depending on how closely related the managed experiments are, controlling FWER might not necessarily be a concern. Let us highlight this: Any algorithm that manages experiments for long enough is bound to make at least one false discovery. Each year more than a thousand clinical trials are launched (that eventually post results) and more than half of these trials succeed (Takebe et al., 2018; Cli). If the type 1 error rate of all these trials were \\( \\% 5 \\), we would expect at least 25 false discoveries in a year, which is more than one hence it would have put FWER of all real-world trials at almost \\( 100\\% \\) when measured in a year-by-year basis. Of course, this is not problematic since not all clinical trials are related to each other closely enough to be considered a family." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.8, + 0.579, + 0.813 + ], + "angle": 0, + "content": "B.1 EXPERIMENTS WITH FAMILY-WISE ERROR CONTROL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.825, + 0.926 + ], + "angle": 0, + "content": "When controlling FWER is of concern, Bayes-OCP can easily be adapted to satisfy this additional constraint by first limiting the number of total experiments that can be conducted—that is putting an upper bound on \\( n \\) and then using well-established methods for family-wise error control such as Bonferroni correction or alpha spending functions (Demets and Lan, 1994) to adjust the success criteria of the viable experiments in \\( \\Psi \\). We run additional experiments to evaluate the performance of Bayes-OCP with Bonferroni correction. We consider the same setting that we have considered during our experiments in Section 6 except for one difference: We limit the number of experiments" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.211, + 0.102, + 0.786, + 0.115 + ], + "angle": 0, + "content": "Table 6: Performance comparison when the ground-truth outcome distributions are not Gaussian." + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.119, + 0.827, + 0.442 + ], + "angle": 0, + "content": "
Algorithms:Oracle RCTRCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
All Instances (100%)Utility266.5-38.4 (14.7)110.0 (9.5)150.8 (8.5)46.3 (3.8)178.2 (7.3)
FWER0.0%0.1% (0.1%)0.0% (0.1%)0.0% (0.1%)0.0% (0.0%)0.0% (0.1%)
Switches0.50.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success76.6%56.2% (1.5%)53.6% (1.5%)46.5% (1.8%)14.7% (1.1%)54.9% (1.7%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)607.3 (1.0)617.2 (1.8)
T-to-F32.5600.0 (0.0)563.5 (9.9)65.8 (3.8)4.3 (0.5)81.9 (7.1)
Green Instances (48.0%)Utility391.3388.0 (4.1)383.7 (3.1)343.3 (4.4)89.4 (6.7)348.7 (3.5)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)0.9 (0.0)0.1 (0.0)
Success99.1%98.8% (0.4%)97.3% (0.4%)87.6% (0.7%)26.6% (2.0%)89.3% (0.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)605.8 (1.2)602.2 (1.3)
T-to-F600.0600.0 (0.0)710.1 (33.9)54.8 (10.0)4.2 (1.5)77.2 (10.6)
Amber Instances (29.9%)Utility263.5-316.2 (18.2)-13.1 (3.6)-18.7 (8.8)14.1 (2.4)67.6 (6.4)
FWER0.0%0.2% (0.3%)0.1% (0.3%)0.1% (0.3%)0.0% (0.0%)0.1% (0.3%)
Switches1.00.0 (0.0)0.7 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success97.2%28.4% (1.8%)22.6% (1.1%)14.7% (1.5%)6.3% (0.8%)39.3% (2.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)617.5 (6.1)670.6 (6.1)
T-to-F600.0600.0 (0.0)765.5 (11.4)91.0 (6.0)4.3 (1.3)126.0 (16.6)
Red Instances (22.1%)Utility0.0-588.3 (4.6)-316.6 (12.2)-37.0 (4.5)-3.8 (1.1)-41.7 (3.0)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%1.2% (0.5%)1.0% (0.5%)0.5% (0.2%)0.2% (0.2%)1.6% (0.4%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)654.7 (21.8)
T-to-F0.0600.0 (0.0)341.9 (5.1)39.4 (4.2)4.2 (1.0)46.4 (3.9)
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.46, + 0.828, + 0.545 + ], + "angle": 0, + "content": "that can be conducted by each algorithm as at most two, and we specify \\(\\alpha = F^{-1}(0.975)\\) for algorithms that can potentially run more than one experiment—namely, adaptive enrichment and (Greedy) Bayes-OCP—while we still specify \\(\\alpha = F^{-1}(0.95)\\) for algorithms that always run exactly one experiment—namely, RCT and futility stopping. These specifications ensure that FWER of all algorithms are bounded by \\(5\\%\\). Results are given in Table 5; Bayes-OCP still performs the best when explicit control of FWER is required." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.561, + 0.725, + 0.575 + ], + "angle": 0, + "content": "C EXPERIMENTS WITH MISSPECIFIED OUTCOME DISTRIBUTIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.587, + 0.828, + 0.728 + ], + "angle": 0, + "content": "We consider the same setting that we have considered during our experiments in Section 6. Except now, the ground-truth outcome distributions are such that, when \\( y \\sim \\Omega_x \\), \\( y = 1 \\) with probability \\( (\\theta_x + 1) / 2 \\) and \\( y = -1 \\) otherwise. In order to ensure that \\( \\theta_x \\in [-1,1] \\), we also sample ground-truth mean outcomes so that \\( \\theta_x = 2p - 1 \\) where \\( p \\) is distributed according to Beta distribution with \\( \\alpha = 979 / 200 \\) and \\( \\beta = 801 / 200 \\) (note that the mean and variance of \\( \\theta_x \\) remains the same as in our original experiments). Despite the fact that outcomes are now distributed in a non-Gaussian way, we leave the implementation of Bayes-OCP unchanged, which still assumes that outcomes distributions are Gaussian. So, there is now a mismatch between the structure of outcome distributions specified as part of Bayes-OCP and the ground-truth outcome distributions. Results are given in Table 6; Bayes-OCP still does not inflate FWER despite the misspecified outcome distributions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.74, + 0.631, + 0.755 + ], + "angle": 0, + "content": "D EXPERIMENTS WITH MORE ATOMIC-POPULATIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.762, + 0.828, + 0.862 + ], + "angle": 0, + "content": "We repeat our main experiments with more than two atomic-populations, specifically we set \\( |\\mathcal{X}| = 10 \\). As before, all atomic-populations have equal propensities such that \\( \\eta_x = 1/10, \\forall x \\in \\mathcal{X} \\), and the meta-experimenter has the same positively-biased prior for the mean outcome associated with each atomic population: \\( \\theta_x \\sim \\mathcal{N}(0.1, 0.1) \\), \\( \\forall x \\in \\mathcal{X} \\). We randomly generated 100 environment (repeated five times to obtain error bars), and the results are given in Table 7. We observe that Bayes-OCP still performs the best. These results confirm that a greedy approximation is suitable in identifying candidate experiments when the number of atomic-populations is large." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.873, + 0.407, + 0.888 + ], + "angle": 0, + "content": "E SENSITIVITY ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Bayes-OCP has one hyper-parameter: \\(\\beta\\), which controls how optimistic the switching rule given in line 14 of Algorithm 1 is, from \\(\\beta = 1/2\\) meaning decisions are made greedily to \\(\\beta = 1\\) meaning" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.26, + 0.102, + 0.737, + 0.116 + ], + "angle": 0, + "content": "Table 7: Performance comparison when the number of atomic-populations is 10." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.119, + 0.825, + 0.233 + ], + "angle": 0, + "content": "
Algorithms:RCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
All Instances (100%)Utility8.0 (39.2)143.9 (31.1)141.0 (27.5)40.3 (5.9)172.4 (23.8)
FWER0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success60.8% (3.9%)71.0% (3.6%)51.2% (4.7%)15.6% (2.6%)63.2% (4.2%)
T-to-S600.0 (0.0)678.9 (8.3)600.0 (0.0)648.5 (13.2)647.5 (4.4)
T-to-F600.0 (0.0)672.7 (58.8)130.4 (12.5)9.3 (5.0)200.5 (72.2)
" + }, + { + "type": "image", + "bbox": [ + 0.285, + 0.258, + 0.71, + 0.428 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.254, + 0.432, + 0.743, + 0.447 + ], + "angle": 0, + "content": "Figure 5: Utility achieved by Bayes-OCP for various values of hyper-parameter \\(\\beta\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.477, + 0.825, + 0.589 + ], + "angle": 0, + "content": "decisions are so extremely optimistic that the original experiment will never be abandoned (as there will always be a chance that it succeeds). As with all online algorithms, tuning \\(\\beta\\) is challenging since no a priori data would be available to perform cross validation. However, a nice feature of Bayes-OCP is that \\(\\beta\\) is rather interpretable, it is the evidence required against the ongoing experiment: An alternative experiment is preferred over the ongoing experiment only if it is believed to be the better experiment with at least \\(\\beta\\)-confidence. We evaluate the sensitivity of Bayes-OCP's performance to hyper-parameter \\(\\beta\\) in Figure 5; Bayes-OCP performs better than an RCT for all configurations and better than adaptive enrichment for most configurations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.612, + 0.334, + 0.627 + ], + "angle": 0, + "content": "F FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.825, + 0.73 + ], + "angle": 0, + "content": "Extending the scope of Bayes-OCP One limitation of Bayes-OCP is that it only adapts the target population \\(X\\subseteq \\mathcal{X}\\) of experiments but not the sample horizon \\(\\tau\\) or the success criterion \\(\\rho\\). We have chosen to focus on the selection of a target population since we believe the target population of an experiment to be the most critical design dimension to adjust adaptively. As we have already highlighted in our introduction, experiments with inflexible target populations can be problematic when responses to the treatment of interest are highly heterogeneous." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.827, + 0.821 + ], + "angle": 0, + "content": "That being said, the high-level strategy of our proposed algorithm should still be applicable to adapting design dimensions other than the target population, namely \\(\\tau\\) and \\(\\rho\\). At a high level, Bayes-OCP first identifies a candidate experiment and then compares the identified experiment to the ongoing experiment in a n optimistic manner. Regardless of the given set of viable experiment design \\(\\Psi\\), one could still follow the same strategy; the only complication would be to adapt how candidate experiments are identified depending on what design dimension varies across experiment designs in \\(\\Psi\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.825, + 0.925 + ], + "angle": 0, + "content": "For instance, when experiment designs varied in terms of \\( X \\), a combinatorial search was required to identify good candidate experiments, for which we proposed a greedy strategy. When experiment designs vary in terms of \\( \\rho \\), a simple search over all possible \\( \\rho \\) would suffice for identifying candidate experiment. The case where experiment designs vary in terms of \\( \\tau \\) is more complex; optimal \\( \\tau \\) for an experiment would be dependent on unknown effects \\( \\theta_{x} \\); selecting a good candidate experiment would involve estimating the optimal \\( \\tau \\) given posteriors over \\( \\theta_{x} \\). This would be an interesting problem to explore as a future research direction." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "Performance guarantees While our theoretical results motivate the general use of an optimistic decision rule, they do not provide any guarantees about the performance of the specific rule we propose as part of Bayes-OCP. Another future research direction would be to prove an upper bound on the sub-optimality gap of Bayes-OCP." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.18, + 0.557, + 0.196 + ], + "angle": 0, + "content": "G FURTHER DISCUSSION ON MAIN RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.209, + 0.828, + 0.404 + ], + "angle": 0, + "content": "Table 3 report six metrics: Utility, FWER, Switches, Success, T-to-S, and T-to-F. We have already discussed the implications of Utility and FWER in Section 6. Here, we highlight other interesting phenomena regarding the remaining metrics. First, we see that Greedy Bayes-OCP switches experiments much more frequently compared with Bayes-OCP. This is because Greedy Bayes-OCP requires less evidence against the ongoing experiment when comparing it against an alternative experiment, whereas, Bayes-OCP favors the ongoing experiments more. Second, we see that a higher success probability does not necessarily also imply a higher utility. For instance, compare RCT with futility stopping, futility stopping is able to achieve higher utility than RCT by terminating risky experiments early and saving costs. However, this of course also means that futility stopping sees fewer experiments to completion hence leads to a lower success probability. Finally, we see that succeeding or failing early does not necessarily imply a higher utility either. Our best algorithm Bayes-OCP succeeds the latest on average as well as fails the latest compared with other benchmarks favoring red instances. This highlights the importance being conservative when making decisions, being optimistic, and favoring the status quo more than a potential adaptation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.423, + 0.566, + 0.438 + ], + "angle": 0, + "content": "H FURTHER DISCUSSION ON RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.452, + 0.827, + 0.592 + ], + "angle": 0, + "content": "Multi-armed bandits The optimal commitment problem is similar to a multi-armed bandit (MAB) problem (Auer et al., 2002; Bubeck et al., 2012) in some aspects: Like arms in a MAB problem, each experiment design \\(\\psi\\) has a random utility given by \\(R_{\\psi} \\cdot \\rho(\\mathcal{D}_{\\tau}) - \\tau C_{\\psi}\\), where \\(\\mathcal{D}_{\\tau}\\) is the source of randomness, and the distribution of this utility is unknown. Also similar to a MAB problem, the overall goal is to sequentially select experiment designs (cf. arms) that yield the maximum cumulative utility. The main difference between the two problems is that, in a MAB problem, selecting an arm immediately reveals a sample from its random utility, while in optimal commitment, running an experiment \\(\\psi\\) just for one time step only incurs a cost of \\(C_{\\psi}\\); observing a full sample of its random utility requires the experiment to be run until its completion for \\(\\tau\\) consecutive time steps, without selecting any other experiment design in the meantime." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.598, + 0.827, + 0.724 + ], + "angle": 0, + "content": "One can naively apply a MAB algorithm by viewing each viable experiment design as a unique arm, and by running experiments/arms selected by the algorithm until their completion to observe full samples from their unknown utility distributions. However, this obviously side steps the main question we want to answer in optimal commitment: When can we abandon a commitment—in this case, the decision to run an experiment/arm selection until its completion—before fully observing its outcome? Looking at optimal commitment from a MAB perspective reveals that there are two explore-exploit dilemmas present in optimal commitment: One is with respect to which experiment to select next, and the other is with respect to when to preemptively stop the current experiment (i.e. breaking a commitment). MAB algorithms address the former dilemma but not the latter." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Task replication in parallel computing There is work (Ghare and Leutenegger, 2005; Wand et al., 2014; Wang et al., 2019) that focuses on the problem of when to kill existing tasks and relaunch them in parallel computing, which is related to optimal stopping/switching. However there, the focus is on reasoning about when a stochastic event (i.e. successful completion of a computational task) will occur without any extra information other than the fact that the event of interest has not occurred yet. In contrast, in our setting, the decision-maker needs to process a streaming set of samples to reason about the random outcome of an event that is scheduled to happen at a deterministic time point (here, the event is an experiment reaching its conclusion). This means that our problem has a completely different information structure when compared with the problem of task replication. More formally, we observe samples \\( y_{t} \\) that are informative of whether \\( \\rho(\\mathcal{D}_{\\tau}) = 1 \\) when \\( \\tau \\) is a fixed variable. In contrast, the problem of task replication would correspond to the setting where \\( \\tau \\) is a random variable with a known distribution and \\( \\rho = 1 \\) always holds (hence no need to observe any samples \\( y_{t} \\)). Among optimal stopping/switching problems, the structure of our problem is more closely related to sequential hypothesis testing, which we have already covered in Section 5." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.423, + 0.119 + ], + "angle": 0, + "content": "I PROOFS OF PROPOSITIONS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.135, + 0.396, + 0.149 + ], + "angle": 0, + "content": "I.1 PROOF OF PROPOSITION 1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.16, + 0.827, + 0.177 + ], + "angle": 0, + "content": "We start by relating the optimal value function \\( V^{*} \\) to the optimal Q-function \\( Q^{*} \\). Letting \\( T_{t}^{*} = T_{t}^{\\pi^{*}} \\)," + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.184, + 0.824, + 0.343 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} V ^ {*} (t, \\mu) \\\\ = \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t) | \\mu_ {t} = \\mu ] \\\\ = \\mathbb {E} \\left[ \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\varnothing \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t\\right)\\right) \\right. \\\\ + \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\Psi_ {0} \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t)\\right) | \\mu_ {t} = \\mu \\right] \\\\ = \\mathbb {E} [ \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\varnothing \\right\\} \\cdot 0 \\\\ + \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\Psi_ {0} \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t)\\right) | \\mu_ {t} = \\mu \\right] (11) \\\\ = \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu) = \\Psi_ {0} \\right\\} \\cdot \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t\\right) | \\mu_ {t} = \\mu ] (12) \\\\ = \\mathbb {1} \\left\\{Q ^ {*} (t, \\mu) > 0 \\right\\} \\cdot Q ^ {*} (t, \\mu) (13) \\\\ = \\max \\left\\{0, Q ^ {*} (t, \\mu) \\right\\}, (14) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.35, + 0.827, + 0.435 + ], + "angle": 0, + "content": "where (11) holds since \\(\\pi^{*}(t,\\mu_{t}) = \\varnothing \\Rightarrow T_{t}^{*} = t\\) and \\(\\pi^{*}(t,\\mu_{t}) = \\Psi_{0} \\Rightarrow T_{t}^{*} \\geq t + 1 \\Rightarrow T_{t}^{*} = \\min \\{t^{\\prime} \\geq t : \\pi^{*}(t^{\\prime},\\mu_{t^{\\prime}}) = \\emptyset\\} = \\min \\{t^{\\prime} \\geq t + 1 : \\pi^{*}(t^{\\prime},\\mu_{t^{\\prime}}) = \\emptyset\\} = T_{t + 1}^{*}\\), (12) holds since \\(\\mu_{\\tau} \\perp \\mathbb{1}\\{\\pi^{*}(t,\\mu_{t}) = \\Psi_{0}\\}\\) and \\(T_{t + 1}^{*} \\perp \\mathbb{1}\\{\\pi^{*}(t,\\mu_{t}) = \\Psi_{0}\\}\\) when conditioned on \\(\\mu_{t} = \\mu\\), and (13) holds since \\(\\pi^{*}(t,\\mu) = \\Psi_{0} \\iff Q^{*}(t,\\mu) > 0\\). Intuitively, the maximum possible value at a given time is achieved either by stopping immediately or by conducting the experiment for at least one more time step and then following the optimal policy thereafter." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.441, + 0.32, + 0.455 + ], + "angle": 0, + "content": "Next, we observe that" + }, + { + "type": "equation", + "bbox": [ + 0.177, + 0.464, + 0.824, + 0.714 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {P} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\mid \\mu_ {t} = \\mu \\right\\} = \\int \\mathbb {P} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\mid \\theta , \\mu_ {t} = \\mu \\right\\} \\mathrm {d} \\mathbb {P} \\left\\{\\theta \\mid \\mu_ {t} = \\mu \\right\\} \\\\ = \\int F \\left(\\mu^ {\\prime} - \\frac {\\theta + t \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (\\theta - \\mu ; ^ {1} / t) d \\theta \\\\ = \\iint \\mathbb {1} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(\\mu_ {t + 1} - \\frac {\\theta + t \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (\\theta - \\mu ; ^ {1} / t) d \\mu_ {t + 1} d \\theta \\\\ = \\iint \\mathbb {1} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(\\mu_ {t + 1} - \\frac {y + (t + 1) \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (y; ^ {1} / t) d \\mu_ {t + 1} d y \\\\ = \\iint \\mathbb {1} \\left\\{x + \\frac {y + (t + 1) \\mu}{t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(x; ^ {1} / (t + 1) ^ {2}\\right) f \\left(y; ^ {1} / t\\right) d x d y \\\\ = \\mathbb{P}_{\\substack{X\\sim \\mathcal{N}(0,1 / (t + 1)^{2})\\\\ Y\\sim \\mathcal{N}(0,1 / t)}}\\Bigg\\{X + \\frac{Y}{t + 1}\\leq \\mu^{\\prime} - \\mu \\Bigg\\} \\\\ = \\mathbb {P} _ {X + Y / (t + 1) \\sim \\mathcal {N} (0, 1 / t - 1 / t + 1)} \\left\\{X + \\frac {Y}{t + 1} \\leq \\mu^ {\\prime} - \\mu \\right\\} \\\\ = F \\left(\\mu^ {\\prime} - \\mu ; ^ {1} / t - ^ {1} / t + 1\\right), \\tag {15} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.721, + 0.827, + 0.772 + ], + "angle": 0, + "content": "where \\( f(x; \\sigma^2) = (1 / \\sqrt{2\\pi\\sigma^2})e^{-(1/2)x^2 / \\sigma^2} \\) and \\( F(x; \\sigma^2) = (1 / \\sqrt{2\\pi\\sigma^2})\\int_{-\\infty}^{x}e^{-(1/2)x'^2 / \\sigma^2}dx' \\) are the p.d.f. and the c.d.f. of the Gaussian distribution with mean zero and variance \\( \\sigma^2 \\) respectively. Hence \\( \\mathrm{d}\\mathbb{P}\\{\\mu_{t+1} = \\mu'| \\mu_t = \\mu\\} = f(\\mu' - \\mu; 1/t - 1/t+1)d\\mu' \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.777, + 0.827, + 0.806 + ], + "angle": 0, + "content": "Then, using the relationship between \\(V^{*}\\) and \\(Q^{*}\\) and the observation regarding \\(\\mathbb{P}\\{\\mu_{t + 1} \\leq \\mu' | \\mu_t = \\mu\\}\\), we drive the following Bellman optimality condition:" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.814, + 0.808, + 0.926 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t + 1} ^ {*} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t + 1} ^ {*}, \\tau \\} - t) | \\mu_ {t} = \\mu ] \\\\ = - C + \\mathbb {E} \\left[ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t - 1\\right) \\mid \\mu_ {t} = \\mu \\right] \\\\ = - C + \\int \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t + 1} ^ {*} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t + 1} ^ {*}, \\tau \\} - t - 1) | \\mu_ {t + 1} = \\mu^ {\\prime} ] \\\\ \\times \\mathrm {d} \\mathbb {P} \\left(\\mu_ {t + 1} = \\mu^ {\\prime} \\mid \\mu_ {t} = \\mu\\right) \\\\ = - C + \\int V ^ {*} (t + 1, \\mu^ {\\prime}) \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {*} (t + 1, \\mu^ {\\prime}) f (\\mu^ {\\prime} - \\mu ; 1 / t - 1 / t + 1) d \\mu^ {\\prime} \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.103, + 0.825, + 0.12 + ], + "angle": 0, + "content": "\\[\n= - C + \\int V ^ {*} (t + 1, \\mu + z) f \\left(z; ^ {1} / t - ^ {1} / t + 1\\right) d z \\tag {16}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.249, + 0.123, + 0.825, + 0.141 + ], + "angle": 0, + "content": "\\[\n= - C + \\int \\max \\left\\{0, Q ^ {*} (t + 1, \\mu + z) \\right\\} f \\left(z; \\frac {1}{t} - \\frac {1}{t + 1}\\right) d z. \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.155, + 0.658, + 0.17 + ], + "angle": 0, + "content": "For the problem setting where \\(C = 1\\), \\(R = 2\\), \\(\\alpha = 0\\), and \\(\\tau = 2\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.311, + 0.177, + 0.684, + 0.306 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} V ^ {*} (1, \\mu) = \\max \\{0, - 1 + \\int V ^ {*} (2, \\mu + z) f (z; 1 / 2) d z \\} \\\\ = \\max \\{0, - 1 + 2 \\int \\mathbb {I} \\{\\mu + z > 0 \\} f (z; 1 / 2) d z \\} \\\\ = \\max \\left\\{0, - 1 + 2 \\int_ {- \\mu} ^ {\\infty} f (z; 1 / 2) d z \\right\\} \\\\ = \\max \\left\\{0, - 1 + 2 F (\\mu ; ^ {1} / _ {2}) \\right\\} \\\\ = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} \\mu < 0 \\\\ - 1 + 2 F (\\mu ; 1 / 2) & \\text {i f} \\mu \\geq 0 . \\end{array} \\right. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.313, + 0.325, + 0.328 + ], + "angle": 0, + "content": "Notice that, for \\(\\mu > 0\\)," + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.335, + 0.637, + 0.424 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {d ^ {2}}{d \\mu^ {2}} V ^ {*} (1, \\mu) = \\frac {d ^ {2}}{d \\mu^ {2}} \\Big (- 1 + 2 F (\\mu ; 1 / 2) \\Big) \\\\ = \\frac {d}{d \\mu} \\left(2 f (\\mu ; ^ {1} / 2)\\right) \\\\ = - (4 / \\pi) \\mu e ^ {- \\mu^ {2}} < 0 \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.828, + 0.49 + ], + "angle": 0, + "content": "hence \\( V^{*}(1,\\mu) \\) is concave at least on interval \\( \\mu \\in (0,\\infty) \\) and is not a convex function. Moreover, \\( -V^{*}(1,\\mu) \\) cannot be a convex function—or equivalently \\( V^{*}(1,\\mu) \\) cannot be a purely concave function—either: For an arbitrary \\( \\mu \\in (0,\\infty), V^{*}(1,\\mu) > 0 \\) and \\( V^{*}(1,-\\mu) = 0 \\) hence \\( (1/2)V^{*}(1,\\mu) + (1/2)V^{*}(1,-\\mu) > 0 \\) but \\( V^{*}(1,(1/2)\\mu + (1/2)(-\\mu)) = V^{*}(1,0) = 0 \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.506, + 0.396, + 0.519 + ], + "angle": 0, + "content": "I.2 PROOF OF PROPOSITION 2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.532, + 0.48, + 0.547 + ], + "angle": 0, + "content": "We will prove the proposition by showing that" + }, + { + "type": "text", + "bbox": [ + 0.206, + 0.552, + 0.733, + 0.569 + ], + "angle": 0, + "content": "(i) \\(Q^{*}(t,\\mu)\\) is non-decreasing in \\(\\mu\\) —that is \\(\\mu < \\mu^{\\prime}\\Rightarrow Q^{*}(t,\\mu)\\leq Q^{*}(t,\\mu^{\\prime})\\)" + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.568, + 0.538, + 0.582 + ], + "angle": 0, + "content": "(ii) \\(\\lim_{t\\to \\infty}Q^{*}(t,\\mu) = -(\\tau -t)C + R > 0\\) , and" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.582, + 0.437, + 0.596 + ], + "angle": 0, + "content": "(iii) \\(\\lim_{t\\to -\\infty}Q^{*}(t,\\mu) = -C < 0\\)" + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.552, + 0.733, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.602, + 0.827, + 0.659 + ], + "angle": 0, + "content": "for all \\( t \\in \\{1, \\dots, \\tau - 1\\} \\) via mathematical induction. Notice that these three facts—together with the fact that \\( Q^{*}(t, \\mu) \\) is a continuous function in \\( \\mu \\) for \\( t \\in \\{1, \\dots, \\tau - 1\\} \\)—would imply the existence of a unique \\( \\mu_{t}^{*} \\) such that \\( Q^{*}(t, \\mu_{t}^{*}) = 0 \\), \\( Q^{*}(t, \\mu) > 0 \\iff \\mu > \\mu_{t}^{*} \\), and \\( Q^{*}(t, \\mu) \\leq 0 \\iff \\mu \\leq \\mu_{t}^{*} \\), which in turn would imply that" + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.667, + 0.666, + 0.702 + ], + "angle": 0, + "content": "\\[\n\\pi^ {*} (t, \\mu) = \\left\\{ \\begin{array}{l l} \\Psi_ {0} & \\text {i f} \\mu > \\mu_ {t} ^ {*} \\iff Q ^ {*} (t, \\mu) > 0 \\\\ \\varnothing & \\text {i f} \\mu \\leq \\mu_ {t} ^ {*} \\iff Q ^ {*} (t, \\mu) \\leq 0 , \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.709, + 0.759, + 0.724 + ], + "angle": 0, + "content": "meaning the optimal policy \\(\\pi^{*}\\) is indeed of \"thresholding -type\" as the proposition states." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.548, + 0.744 + ], + "angle": 0, + "content": "First, we observe the following base cases for \\( t = \\tau - 1 \\):" + }, + { + "type": "text", + "bbox": [ + 0.206, + 0.751, + 0.576, + 0.767 + ], + "angle": 0, + "content": "(i) \\(Q^{*}(\\tau -1,\\mu)\\) is non-decreasing in \\(\\mu\\) . When \\(\\mu < \\mu^{\\prime}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.3, + 0.773, + 0.825, + 0.85 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (\\tau - 1, \\mu) = - C + \\int V ^ {*} (\\tau , \\mu + z) f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z (18) \\\\ = - C + R \\int \\mathbb {1} \\left\\{\\mu + z > \\alpha / \\sqrt {\\tau} \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ \\leq - C + R \\int \\mathbb {1} \\left\\{\\mu^ {\\prime} + z > \\alpha / \\sqrt {\\tau} \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z (19) \\\\ = Q ^ {*} (\\tau - 1, \\mu^ {\\prime}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.858, + 0.827, + 0.875 + ], + "angle": 0, + "content": "where (18) is due to (16), and (19) holds since \\(\\mu + z > \\alpha / \\sqrt{\\tau} \\Rightarrow \\mu' + z > \\mu + z > \\alpha / \\sqrt{\\tau}\\)." + }, + { + "type": "text", + "bbox": [ + 0.203, + 0.88, + 0.532, + 0.897 + ], + "angle": 0, + "content": "(ii) \\(\\lim_{\\mu \\to \\infty}Q^{*}(\\tau -1,\\mu) = -C + R > 0\\) since" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.904, + 0.8, + 0.928 + ], + "angle": 0, + "content": "\\[\n\\lim _ {\\mu \\rightarrow \\infty} Q ^ {*} (\\tau - 1, \\mu) = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + R \\int \\mathbb {1} \\{\\mu + z > \\alpha / \\sqrt {\\tau} \\} f (z; ^ {1 / (\\tau - 1)} - ^ {1 / \\tau}) d z\\right)\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.102, + 0.735, + 0.17 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + R \\int_ {\\alpha / \\sqrt {\\tau} - \\mu} ^ {\\infty} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = - C + R \\int f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z \\\\ = - C + R. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.176, + 0.51, + 0.193 + ], + "angle": 0, + "content": "(iii) \\(\\lim_{\\mu \\to -\\infty}Q^{*}(\\tau -1,\\mu) = -C < 0\\) since" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.199, + 0.807, + 0.334 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\lim _ {\\mu \\rightarrow - \\infty} Q ^ {*} (\\tau - 1, \\mu) = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\int \\mathbb {1} \\{\\mu + z > \\alpha / \\sqrt {\\tau} \\} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\int_ {\\alpha / \\sqrt {\\tau} - \\mu} ^ {\\infty} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\left(1 - \\int_ {- \\infty} ^ {\\alpha / \\sqrt {\\tau} - \\mu} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z\\right)\\right) \\\\ = - C + R \\left(1 - \\int f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = - C. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.686, + 0.357 + ], + "angle": 0, + "content": "Then, we show that the following inductive cases hold for \\(t \\in \\{\\tau - 1, \\dots, 2\\}\\):" + }, + { + "type": "text", + "bbox": [ + 0.206, + 0.362, + 0.825, + 0.392 + ], + "angle": 0, + "content": "(i) Given that \\( Q^{*}(t,\\mu) \\) is non-decreasing in \\( \\mu \\), \\( Q^{*}(t - 1,\\mu) \\) is also non-decreasing in \\( \\mu \\). Similar to the base case, when \\( \\mu < \\mu' \\)," + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.397, + 0.823, + 0.451 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f \\left(z, ^ {1 / (t - 1)} - ^ {1 / t}\\right) d z \\tag {20} \\\\ \\leq - C + \\int \\max \\left\\{0, Q ^ {*} (t, \\mu^ {\\prime} + z) \\right\\} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z \\\\ = Q ^ {*} (t - 1, \\mu^ {\\prime}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.457, + 0.401, + 0.471 + ], + "angle": 0, + "content": "where (20) is due to (17)." + }, + { + "type": "text", + "bbox": [ + 0.203, + 0.477, + 0.825, + 0.52 + ], + "angle": 0, + "content": "(ii) Given \\(\\lim_{\\mu \\to \\infty}Q^{*}(t,\\mu) = -(\\tau -t)C + R\\) and also given that \\(Q^{*}(t,\\mu)\\) is non-decreasing in \\(\\mu\\) we have \\(\\lim_{\\mu \\to \\infty}Q^{*}(t - 1,\\mu) = -(\\tau -t + 1)C + R > 0\\) which can be shown using the sandwich theorem:" + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.527, + 0.822, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ \\leq - C + \\int \\max \\left\\{0, \\lim _ {\\mu^ {\\prime} \\rightarrow \\infty} Q ^ {*} \\left(t, \\mu^ {\\prime}\\right)\\right\\} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z \\\\ \\leq - C + (- (\\tau - t) C + R) \\int f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = - (\\tau - t - 1) C + R. \\tag {21} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.272, + 0.612, + 0.823, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ \\geq - C + \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z \\\\ \\geq - C + \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} \\max \\left\\{0, Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2} \\right\\} f (z, ^ {1 / (t - 1)} - ^ {1 / t}) d z \\\\ \\geq - C + Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2}) \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} f \\left(z, 1 / (t - 1) - 1 / t\\right) d z, \\tag {22} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.743, + 0.383, + 0.757 + ], + "angle": 0, + "content": "Finally, observing that" + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.763, + 0.78, + 0.834 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\lim _ {\\mu \\rightarrow \\infty} (2 2) = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2}) \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z\\right) \\\\ = - C + \\left(\\lim _ {\\mu^ {\\prime} \\rightarrow \\infty} Q ^ {*} (t, \\mu^ {\\prime})\\right) \\int f (z, 1 / (t - 1) - 1 / t) d z \\\\ = - (\\tau - t - 1) C + R, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.84, + 0.826, + 0.856 + ], + "angle": 0, + "content": "together with bounds (21) and (22), we obtain \\(\\lim_{\\mu \\to \\infty}Q^{*}(t - 1,\\mu) = -(\\tau -t + 1)C + R.\\)" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.862, + 0.826, + 0.904 + ], + "angle": 0, + "content": "(iii) Given \\(\\lim_{\\mu \\to -\\infty}Q^{*}(t,\\mu) = -C < 0\\) and also given that \\(Q^{*}(t,\\mu)\\) is non-decreasing in \\(\\mu\\) and \\(\\lim_{\\mu \\to \\infty}Q^{*}(t,\\mu) > 0\\) so that \\(\\mu_t^*\\) exists—we have \\(\\lim_{\\mu \\to -\\infty}Q^{*}(t - 1,\\mu) = -C < 0\\), which again can be shown using the sandwich theorem:" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.909, + 0.753, + 0.926 + ], + "angle": 0, + "content": "\\[\nQ ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.104, + 0.825, + 0.12 + ], + "angle": 0, + "content": "\\[\n\\geq - C. \\tag {23}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.133, + 0.826, + 0.262 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = \\int_ {\\mu_ {t} ^ {*} - \\mu} ^ {\\infty} Q ^ {*} (t, \\mu + z) f (z, 1 / (t - 1) - 1 / t) d z (24) \\\\ \\leq - C + R \\int_ {\\mu_ {t} ^ {*} - \\mu} ^ {\\infty} f (z, 1 / (t - 1) - 1 / t) d z (25) \\\\ \\leq - C + R \\left(1 - \\int_ {- \\infty} ^ {\\mu_ {t} ^ {*} - \\mu} f (z, 1 / (t - 1) - 1 / t) d z\\right), (26) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.267, + 0.828, + 0.311 + ], + "angle": 0, + "content": "where (24) holds since \\( Q^{*}(t,\\mu +z) > 0 \\) if and only if \\( z > \\mu_t^* -\\mu \\) and \\( \\max \\{0,Q^{*}(t,\\mu +z)\\} = 0 \\) otherwise, and (25) holds since \\( Q^{*}(t,\\mu)\\leq \\lim_{\\mu^{\\prime}\\to \\infty}Q^{*}(t,\\mu^{\\prime}) = -(\\tau -t)C + R\\leq R \\) for all \\( \\mu \\) as \\( Q^{*}(t,\\mu) \\) is non-decreasing in \\( \\mu \\). Finally, observing" + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.316, + 0.765, + 0.368 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\lim _ {\\mu \\rightarrow - \\infty} (2 6) = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\left(1 - \\int_ {- \\infty} ^ {\\mu_ {t} ^ {*} - \\mu} f \\left(z, \\frac {1}{(t - 1)} - \\frac {1}{t}\\right) d z\\right)\\right) \\\\ = - C, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.374, + 0.747, + 0.391 + ], + "angle": 0, + "content": "together with bounds (23) and (26), we obtain \\(\\lim_{\\mu \\to -\\infty}Q^{*}(t - 1,\\mu) = -C\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.395, + 0.827, + 0.426 + ], + "angle": 0, + "content": "When put together, the base cases and the inductive cases above imply that conditions (i-iii) hold for all \\( t \\in \\{1, \\dots, \\tau - 1\\} \\) hence \\( \\mu_t^* \\) exists for all \\( t \\in \\{1, \\dots, \\tau - 1\\} \\) which concludes our proof." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.441, + 0.396, + 0.455 + ], + "angle": 0, + "content": "I.3 PROOF OF PROPOSITION 3" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.466, + 0.827, + 0.501 + ], + "angle": 0, + "content": "First, we prove the existence of \\(\\mu_t^{\\mathrm{greedy}}\\) for all \\(t\\in \\{0,\\dots ,\\tau -1\\}\\) by driving an analytical formula for \\(V^{(0)}(t,\\mu)\\doteq V^{\\pi^{(0)}}(t,\\mu)\\). Letting \\(T_{t}^{(0)} = T_{t}^{\\pi^{(0)}}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.505, + 0.825, + 0.624 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} V ^ {(0)} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t} ^ {(0)} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t} ^ {(0)}, \\tau \\} - t) | \\mu_ {t} = \\mu ] \\\\ = \\mathbb {E} [ R \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\tau - t) | \\mu_ {t} = \\mu ] (27) \\\\ = - C + \\int \\mathbb {E} [ R \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\tau - t - 1) | \\mu_ {t + 1} = \\mu^ {\\prime} ] \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu^ {\\prime}) \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu^ {\\prime}) f \\left(\\mu^ {\\prime} - \\mu ; ^ {1} / t - ^ {1} / (t + 1)\\right) d \\mu^ {\\prime} (28) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu + z) f \\left(z; \\frac {1}{t} - \\frac {1}{(t + 1)}\\right) d z, (29) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.632, + 0.827, + 0.663 + ], + "angle": 0, + "content": "where (27) holds since \\(\\pi^{(0)}(t,\\mu) = \\Psi_0\\) for all \\(t\\) and \\(\\mu\\) hence it is always the case that \\(T_{t}^{(0)} = \\infty\\), and (28) is due to (15)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.669, + 0.827, + 0.711 + ], + "angle": 0, + "content": "In the remainder of our proofs, we take \\(\\alpha = 0\\) for notational brevity. This is without any loss of generality as, by simply shifting each value function and Q-function by \\(\\alpha / \\sqrt{\\tau}\\) with respect to \\(\\mu\\), all of the following arguments would still hold. For \\(\\alpha = 0\\), we show that" + }, + { + "type": "equation", + "bbox": [ + 0.337, + 0.717, + 0.826, + 0.753 + ], + "angle": 0, + "content": "\\[\nV ^ {(0)} (t, \\mu) = - (\\tau - t) C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / t - 1 / \\tau}}\\right) \\tag {30}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.757, + 0.768, + 0.773 + ], + "angle": 0, + "content": "for all \\( t \\in \\{1, \\dots, \\tau - 1\\} \\) via mathematical induction. Note that (30) is true for \\( t = \\tau - 1 \\):" + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.778, + 0.712, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} V ^ {(0)} (\\tau - 1, \\mu) = - C + \\int V ^ {(0)} (\\tau , \\mu + z) f (z; 1 / _ {(\\tau - 1)} - 1 / _ {\\tau}) d z \\\\ = - C + R \\int \\mathbb {1} \\left\\{\\mu + z > 0 \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ = - C + R \\int_ {- \\mu} ^ {\\infty} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ = - C + R \\int_ {- \\infty} ^ {\\mu} f (z; ^ {1 / (\\tau - 1)} - ^ {1 / \\tau}) d z \\\\ = - C + R \\int_ {- \\infty} ^ {\\mu / \\sqrt {1 / (\\tau - 1) - 1 / \\tau}} f (z; 1) d z \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.101, + 0.63, + 0.137 + ], + "angle": 0, + "content": "\\[\n= - C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / (\\tau - 1) - 1 / \\tau}}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.144, + 0.825, + 0.173 + ], + "angle": 0, + "content": "where \\( F(x) \\doteq F(x; 1) \\) is the c.d.f. of the standard Gaussian distribution. Moreover, assuming (30) is true for \\( t \\), it is also true for \\( t - 1 \\):" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.18, + 0.796, + 0.512 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} V ^ {(0)} (t - 1, \\mu) \\\\ = - C + \\int V ^ {(0)} (t, \\mu + z) f (z; ^ {1} / t - 1 - ^ {1} / t) d z \\\\ = - (\\tau - t + 1) C + R \\int F ((\\mu + z) / \\sqrt {1 / t - 1 / \\tau}; 1) f (z; 1 / (t - 1) - 1 / t) d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint_ {- \\infty} ^ {(\\mu + z) / \\sqrt {1 / t - 1 / \\tau}} f (z ^ {\\prime}; 1) f (z; 1 / (t - 1) - 1 / t) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint_ {- \\infty} ^ {\\mu + z} f \\left(z ^ {\\prime}; 1 / t - 1 / \\tau\\right) f \\left(z; 1 / (t - 1) - 1 / t\\right) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint \\mathbb {1} \\{z ^ {\\prime} \\leq \\mu + z \\} f \\left(z ^ {\\prime}; ^ {1} / t - ^ {1} / \\tau\\right) f \\left(z; ^ {1} / (t - 1) - ^ {1} / t\\right) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C + R \\cdot \\mathbb {P} _ {Z \\sim \\mathcal {N} (0, 1 / (t - 1) - 1 / t)} \\{Z ^ {\\prime} \\leq \\mu + Z \\} \\\\ = - (\\tau - t + 1) C + R \\cdot \\mathbb {P} _ {\\frac {Z ^ {\\prime} - Z}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\sim \\mathcal {N} (0, 1)} \\left\\{\\frac {Z ^ {\\prime} - Z}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\leq \\frac {\\mu}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\right\\} \\\\ = - (\\tau - t + 1) C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / (t - 1) - 1 / \\tau}}\\right). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.52, + 0.538, + 0.535 + ], + "angle": 0, + "content": "Therefore, (30) indeed holds for all \\(t \\in \\{1, \\dots, \\tau - 1\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.825, + 0.623 + ], + "angle": 0, + "content": "Next, we observe that \\( V^{(0)}(t,\\mu) \\) has a root at \\( \\mu = F^{-1}((\\tau - t)C / R)\\sqrt{1 / t - 1 / \\tau} \\) provided that \\( (\\tau - t)C / R \\in (0,1) \\), which is the case for all \\( t \\in \\{1, \\dots, \\tau - 1\\} \\) since \\( \\tau C < R \\). Moreover, \\( V^{(0)}(t,\\mu) \\) is a strictly increasing function in \\( \\mu \\). Hence, there exists a unique \\( \\mu_t^{\\mathrm{greedy}} \\) for all \\( t \\in \\{1, \\dots, \\tau - 1\\} \\) such that \\( V^{(0)}(t,\\mu_t^{\\mathrm{greedy}}) > 0 \\) and \\( V^{(0)}(t,\\mu) > 0 \\iff \\mu > \\mu_t^{\\mathrm{greedy}} \\). In other words, \\( \\pi^{\\mathrm{greedy}} \\) is also a thresholding-type policy as the proposition states." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.629, + 0.826, + 0.662 + ], + "angle": 0, + "content": "Finally, we have \\( V^{(0)}(t,\\mu_t^*) = Q^{(0)}(t,\\mu_t^*) \\leq Q^* (t,\\mu_t^*) = 0 \\) hence \\( \\mu_t^* \\leq \\mu_t^{\\mathrm{greedy}} \\). This is because, by definition, \\( Q^{*}(t,\\mu) \\geq Q^{\\pi}(t,\\mu) \\) for all \\( t,\\mu \\) for any given policy \\( \\pi \\), including \\( \\pi^{(0)} \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.678, + 0.395, + 0.692 + ], + "angle": 0, + "content": "I.4 PROOF OF PROPOSITION 4" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.825, + 0.763 + ], + "angle": 0, + "content": "As in the proof of Proposition 3, we take \\(\\alpha = 0\\) for notational brevity. Once again, this is without any loss of generality as, by simply shifting each value function and Q-function by \\(\\alpha / \\sqrt{\\tau}\\) with respect to \\(\\mu\\), all of the following arguments would still hold. Remember that the formula we derived for \\(V^{(0)}(t, \\mu)\\) in (30) holds when \\(\\alpha = 0\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.768, + 0.825, + 0.797 + ], + "angle": 0, + "content": "We start by deriving two bounds on the optimal Q-function \\(Q^{*}(t,\\mu)\\): (i) a lower bound and (ii) an upper bound. For the lower bound, it is sufficient to observe that" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.805, + 0.621, + 0.824 + ], + "angle": 0, + "content": "\\[\nV ^ {(0)} (t, \\mu) = Q ^ {(0)} (t, \\mu) \\leq Q ^ {*} (t, \\mu),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.83, + 0.75, + 0.847 + ], + "angle": 0, + "content": "which holds since, by definition, \\( Q^{*}(t,\\mu) \\geq Q^{\\pi}(t,\\mu) \\) for all \\( t,\\mu \\) for any given policy \\( \\pi \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.853, + 0.827, + 0.882 + ], + "angle": 0, + "content": "For the upper bound, we use mathematical induction to show that \\( Q^{*}(t,\\mu) \\leq (\\tau - t - 1)C + V^{(0)}(t,\\mu) \\). First, for the base case of \\( \\tau - 1 \\)," + }, + { + "type": "equation", + "bbox": [ + 0.305, + 0.89, + 0.825, + 0.926 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (\\tau - 1, \\mu) = - C + \\int V ^ {*} (\\tau , \\mu + z) f (z; ^ {1} / t - ^ {1} / t + 1) d z \\tag {31} \\\\ = - C + \\int \\mathbb {1} \\left\\{\\mu + z > \\alpha \\right\\} f \\left(z; 1 / t - 1 / t + 1\\right) d z \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.103, + 0.824, + 0.141 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} = - C + \\int V ^ {(0)} (\\tau , \\mu + z) f (z; 1 / t - 1 / t + 1) d z \\\\ = V ^ {(0)} (\\tau - 1, \\mu), \\tag {32} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.146, + 0.825, + 0.178 + ], + "angle": 0, + "content": "where (31) is due to (16), and (32) is due to (29). Then, for the inductive case, assuming \\( Q^{*}(t,\\mu)\\leq (\\tau -t - 1)C + V^{(0)}(t,\\mu) \\)" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.182, + 0.825, + 0.26 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z (33) \\\\ \\leq \\int Q ^ {*} (t, \\mu + z) f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z (34) \\\\ \\leq (\\tau - t - 1) C + \\int V ^ {(0)} (t, \\mu + z) f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = (\\tau - t) C + V ^ {(0)} (t - 1, \\mu + z), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.264, + 0.826, + 0.295 + ], + "angle": 0, + "content": "where (33) is due to (17), and (34) holds since \\(-C \\leq Q^{*}(t, \\mu)\\) implies that \\(\\max \\{0, Q^{*}(t, \\mu)\\} \\leq \\max \\{C + Q^{*}(t, \\mu), Q^{*}(t, \\mu)\\} \\leq C + Q^{*}(t, \\mu)\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.301, + 0.361, + 0.317 + ], + "angle": 0, + "content": "Define \\(\\mu_t^+\\) and \\(\\mu_t^-\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.323, + 0.744, + 0.358 + ], + "angle": 0, + "content": "\\[\nV ^ {(0)} (t, \\mu_ {t} ^ {+}) = 0 \\iff \\mu_ {t} ^ {+} = F ^ {- 1} \\left(\\left(\\tau - t\\right) \\frac {C}{R}\\right) \\sqrt {\\frac {1}{t} - \\frac {1}{\\tau}}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.361, + 0.705, + 0.396 + ], + "angle": 0, + "content": "\\[\n(\\tau - t - 1) C + V ^ {(0)} (t, \\mu_ {t} ^ {-}) = 0 \\iff \\mu_ {t} ^ {-} = F ^ {- 1} \\left(\\frac {C}{R}\\right) \\sqrt {\\frac {1}{t} - \\frac {1}{\\tau}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.402, + 0.825, + 0.432 + ], + "angle": 0, + "content": "which we are able to write in closed form using the formula we derived for \\( V^{(0)}(t,\\mu) \\) in (30) during the proof of Proposition 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.438, + 0.827, + 0.52 + ], + "angle": 0, + "content": "By definition, \\(\\mu_t^{\\mathrm{greedy}} = \\mu_t^+\\). Moreover, (i) \\(V^{(0)}(t,\\mu_t^*)\\leq Q^* (t,\\mu_t^*) = 0 = V^{(0)}(t,\\mu_t^+)\\) due to our lower bound, hence \\(\\mu_t^*\\leq \\mu_t^+\\) (remember that \\(V^{(0)}(t,\\mu)\\) was a strictly increasing function in \\(\\mu\\)), and (ii) \\((\\tau -t - 1)C + V^{(0)}(t,\\mu_t^-) = 0 = Q^* (t,\\mu_t^*)\\leq (\\tau -t - 1)C + V^{(0)}(t,\\mu_t^*)\\) due to our upper bound, hence \\(V^{(0)}(t,\\mu_t^-)\\leq V^{(0)}(t,\\mu_t^*)\\) meaning \\(\\mu_t^- \\leq \\mu_t^*\\). Putting together these facts, and also the fact that \\(\\mu_t^*\\leq \\mu_t^{\\mathrm{greedy}}\\), we obtain \\(|\\mu_t^* -\\mu_t^{\\mathrm{greedy}}|\\leq \\mu_t^+ -\\mu_t^-\\) as the proposition states." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.538, + 0.467, + 0.553 + ], + "angle": 0, + "content": "J BENCHMARKING ALGORITHMS" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.576, + 0.773, + 0.592 + ], + "angle": 0, + "content": "Algorithm 2 Adaptive Enrichment, Futility Stopping with Bayes-OCP, Greedy Bayes-OCP" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.595, + 0.825, + 0.921 + ], + "angle": 0, + "content": "1: Initialize \\(\\mu_{x}\\) and \\(\\sigma_x^2\\) for all \\(x\\in \\mathcal{X}\\) \n2: \\(X\\gets \\mathcal{X},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset\\) \n3: Start experiment \\(\\psi = (\\mathcal{X},\\tau ,\\rho)\\) \n4: loop: \n5: \\(t\\gets t + 1\\) \n6: Observe \\(x_{t},y_{t}\\) \n7: \\(\\mathcal{D}_t\\gets \\mathcal{D}_{t - 1}\\cup \\{x_t,y_t\\}\\) \n8: \\(1 / \\sigma_{x_t}^2\\gets 1 / \\sigma_{x_t}^2 +1\\) \n9: \\(\\mu_{x_t}\\gets \\mu_{x_t} + (y_t - \\mu_{x_t})\\sigma_{x_t}^2\\) \n10: \\(X^{\\prime}\\gets \\emptyset\\) \n11: while \\(X\\setminus X^{\\prime}\\supset \\emptyset\\) .. \n12: \\(x^{*}\\gets \\mathrm{argmax}_{x\\in X\\setminus X^{\\prime}}\\mathbb{E}_{\\theta_{x}\\sim \\mathcal{N}(\\mu_{x},\\sigma_{x}^{2})}[\\mathcal{G}^{(0)}(X^{\\prime}\\cup \\{x\\};\\{\\theta_{x}\\})]\\) \n13: if \\(\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X^{\\prime}\\cup \\{x^{*}\\} ;\\{\\theta_{x}\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X^{\\prime};\\{\\theta_{x}\\})]\\) .. \n14: \\(X^{\\prime}\\gets X^{\\prime}\\cup \\{x^{*}\\}\\) \n15: else: \n16: break \n17: if Adaptive Enrichment and \\(t = \\tau /2\\) and \\(\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X';\\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}(X,\\mathcal{D}_t;\\{\\theta_x\\})]\\) .. \n18: \\(X\\gets X^{\\prime},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset\\) \n19: Start a new experiment \\(\\psi = (X,\\tau ,\\rho)\\) \n20: if Greedy Bayes-OCP and \\(\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X';\\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}(X,\\mathcal{D}_t;\\{\\theta_x\\})]\\) .. \n21: \\(X\\gets X^{\\prime},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset\\) \n22: Start a new experiment \\(\\psi = (X,\\tau ,\\rho)\\) \n23: if Futility Stopping with Bayes-OCP and \\(\\mathbb{P}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(\\emptyset ;\\{\\theta_x\\}) > \\mathcal{G}(X,\\mathcal{D}_0;\\{\\theta_x\\})] > \\beta\\) \n24: Stop all experimentation" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.173, + 0.103, + 0.524, + 0.119 + ], + "angle": 0, + "content": "K GLOSSARY OF TERMS AND NOTATION" + }, + { + "type": "table", + "bbox": [ + 0.171, + 0.138, + 0.837, + 0.921 + ], + "angle": 0, + "content": "
TermNotationDescription
Experiment-Conducted to confirm efficacy of an intervention, e.g. a new treatment in clinical trials, or a new recommendation policy in online advertisement
Subject-Individual participant of an experiment, e.g. patients in a clinical trial, or customers in online advertisement
PopulationX⊆XCollection of subjects that all share the same qualities, e.g. all female patients in a clinical trial, or all customers with the same preferences in online advertisement
Atomic-populationx∈XIndivisible populations
PropensitiesηxThe probability that a subject being from atomic-population x
ηxThe probability that a subject being from population X
ηx|XThe probability that a subject being from atomic-population x conditioned on the fact that they from population X
Outcome distributionΩxDistribution of outcomes that is indicative of the effect of the intervention of interest for atomic-population x
Mean outcomesθxExpected outcome, i.e. the effect of the intervention of interest, for atomic-population x
θXExpected outcome for population X
Experiment designψ=(X,τ,ρ)Target population X, sample horizon τ, and success criterion ρ that characterize an experiment
Viable experiment designsΨExperiment designs that can potentially be followed by a meta-experimenter
Meta-experimenter-The decision-making agent that decides when to run experiments according to which experiment design in Ψ
Sample/time horizonτAn experiment is terminated when t=τ
Success criterionρAn experiment is declared a success if ρ(Dτ)=1
Online datasetDtData collected by an ongoing experiment at time step t
DtData collected by the i-th experiment run by the meta-experimenter at time step t
Aggregate datasetDItCollective data collected by all experiments up to time step t of the i-th experiment
-TtNumber of time steps for which the i-th experiment is conducted until it was stopped or its time horizon was reached
CostCost incurred per time step by running experiment ψ
RewardReward received if experiment ψ is successful
UtilityGSum of costs and rewards received after all experimentation is concluded
PolicyπDecision-making policy of the meta-experiment
Optimal policyπ*The optimal policy that maximizes utility G in expectation
Greedy policyπgreedySee Section 3
Test statisticμtIn the simplified case in Section 3, the empirical mean outcome
Value functionVπ(t,μ)The expected utility of following policy π when μt=μ
Q-functionQπ(t,μ)The expected utility of following policy π after conducting the ongoing experiment for one more time step when μt=μ
-TtπThe first time step at or after time step t that policy π decides to stop all experimentation
Optimal value functionV*The value function associated with π*
Optimal Q-functionQ*The Q-function associated with π*
Thresholdsμt*Decision-making thresholds associated with π*
μtgreedyDecision-making thresholds associated with πgreedy
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.102, + 0.835, + 0.235 + ], + "angle": 0, + "content": "
TermNotationDescription
Conditional power functionP(X, Dt; {θx})The probability of a hypothesis test being successful condi- tioned on mean outcomes {θx}
Expected utility functionG(X, Dt; {θx})The expected utility of fully committing to an experiment and waiting until it terminates when the experiment targets popula-tion X, is currently at time step t, and collected dataset Dt
PosteriorsN(μx, σx2)Posterior distributions over mean outcomes {θx} maintained by Bayes-OCP such that θx|D ~ N(μx, σx2)
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ] +] \ No newline at end of file diff --git a/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_origin.pdf b/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..af6b8a143eeb92395d9c600f9cf2a2de212f0a08 --- /dev/null +++ b/2023/When to Make and Break Commitments_/8ecf68b7-cbf5-414a-9452-a5b931a222f9_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67ce946a5a99a4911968481ba75fd56fc5d47663f1a47e0c7ff470dd4b490433 +size 560198 diff --git a/2023/When to Make and Break Commitments_/full.md b/2023/When to Make and Break Commitments_/full.md new file mode 100644 index 0000000000000000000000000000000000000000..9b8c3ef24ae3ce08bd3bea386668593c17c8b090 --- /dev/null +++ b/2023/When to Make and Break Commitments_/full.md @@ -0,0 +1,666 @@ +# WHEN TO MAKE AND BREAK COMMITMENTS? + +Alihan Hüyük + +University of Cambridge + +ah2075@cam.ac.uk + +Zhaozhi Qian + +University of Cambridge + +zq224@cam.ac.uk + +Mihaela van der Schaar + +University of Cambridge + +The Alan Turing Institute + +mv472@cam.ac.uk + +# ABSTRACT + +In many scenarios, decision-makers must commit to long-term actions until their resolution before receiving the payoff of said actions, and usually, staying committed to such actions incurs continual costs. For instance, in healthcare, a newly-discovered treatment cannot be marketed to patients until a clinical trial is conducted, which both requires time and is also costly. Of course in such scenarios, not all commitments eventually pay off. For instance, a clinical trial might end up failing to show efficacy. Given the time pressure created by the continual cost of keeping a commitment, we aim to answer: When should a decision-maker break a commitment that is likely to fail—either to make an alternative commitment or to make no further commitments at all? First, we formulate this question as a new type of optimal stopping/switching problem called the optimal commitment problem (OCP). Then, we theoretically analyze OCP, and based on the insight we gain, propose a practical algorithm for solving it. Finally, we empirically evaluate the performance of our algorithm in running clinical trials with subpopulation selection. + +# 1 INTRODUCTION + +In many real-world settings, decision-makers must commit to long-term actions and wait until their resolution before receiving the payoff of said actions. Meanwhile, staying committed to such actions incurs continual costs. For instance, in portfolio management, it might take time for an asset to develop additional value after an initial investment, and keeping capital tied up in an asset comes with an opportunity cost for the investor (Markowitz, 1959; Merton, 1969; Karatzas and Wang, 2020). In an energy network, turning power stations on and off is not an immediate action, hence a sudden increase in energy demand can only be met with a delay after putting more stations into operation, and keeping stations operational obviously consumes resources (Rafique and Jianhua, 2018; Olofsson et al., 2022). In healthcare, a newly-discovered treatment can only be marketed to patients once a successful clinical trial that targets the said treatment is conducted, which both requires time and is also costly (Kaitin, 2010; Umscheid et al., 2011). + +Of course, not all commitments eventually pay off: An asset might end up losing value despite investments, energy demands might shift faster than a network can react to, and a clinical trial might fail to show efficacy for the targeted treatment. Given the time pressure created by the continual cost of keeping a commitment, our goal in this paper is to answer the question: When should a decision-maker break a commitment—thereby avoiding future costs but also forfeiting any potential returns—either to make an alternative commitment instead or to make no further commitments at all? Solving this problem optimally requires a careful balance between exploration and exploitation: The earlier a commitment that is bound to fail is broken, the more resources would be saved (cf. exploitation); but the longer one is kept, the more information is revealed regarding whether the commitment is actually failing or might still succeed (cf. exploration)—and in certain cases, also regarding the prospects of similar commitments one could make instead. + +Related problems are mostly studied within the context of adaptive experimentation and sequential hypothesis testing (see Section 5). As such, we focus on adaptive experimentation as our main application as well. More specifically, we consider the problem of selecting the target population of an adaptive experiment. Suppose an experimenter, who is interested in proving the efficacy of a new treatment, starts running an initial experiment that targets a certain population of patients. Incidentally, the treatment being tested is effective only for a relatively narrow subpopulation of patients but not for the wider population as a whole. Hence, an experiment targeting the overall population, but not the subpopulation specifically, will most probably fail to prove efficacy and prevent the deployment of the treatment for the patients who would have actually benefited from it, not to mention waste + +time and resources (Moineddin et al., 2008; Lipkovich et al., 2017; Chiu et al., 2018). Of course, the experimenter has no knowledge of this in advance but the initial experiment they have set up would slowly reveal more information regarding the effects of the treatment and the fact that the ongoing experiment is bound to fail. In that case, we want to be able to determine at what point the experimenter has enough information to justify breaking their commitment to the initial experiment that targets too wide of a population to be successful, in favor of making a new commitment to a follow-up experiment that focuses on a narrower subpopulation instead? + +Contributions Our contributions are threefold: First, we formulate the problem of making and breaking commitments in a timely manner as a new type of optimal stopping/switching problem called the optimal commitment problem (OCP) (Section 2). The defining feature of OCP is that rewards are received only when a known time point is reached but costs are incurred continually, requiring commitment to actions but with incentive to abandon those commitments. As we will show later, OCP cannot be easily solved via conventional reinforcement learning techniques due to its non-convex nature. Second, we theoretically analyze a simplified case of OCP to identify the characteristics of the optimal solution (Section 3), and based on the insights we gain, propose a practical algorithm for the more general case (Section 4). Third, we empirically evaluate the performance of our algorithm in running experiments with subpopulation selection (Section 6). Before we move on, it should be emphasized that, although we predominantly consider adaptive experimentation as our main application, our contributions remain generally applicable to portfolio management, energy systems, and any other decision-making scenarios that require commitments to long-term actions. + +# 2 OPTIMAL COMMITMENT PROBLEM + +We first introduce the problem of optimal commitment from the perspective of running experiments. As far as our formulation is concerned, experiments are conducted to confirm the efficacy of an intervention by observing the outcome of the said intervention for subjects belonging to a particular population. However, this experiment-focused perspective does not limit the applicability of OCP; we stress its generality later at the end of the section. We provide a glossary of terms and notation in Appendix K. + +Populations Let $\mathcal{X}$ be a discrete set of atomic-populations such that every subject is only the member of exactly one atomic-population $x\in \mathcal{X}$ . Denote with $\eta_{x}\in [0,1]$ the probability of a subject being from atomic-population $x$ (such that $\sum_{x\in \mathcal{X}}\eta_x = 1$ ), and with $\Omega_{x}$ the distribution of outcomes for atomic-population $x$ such that the mean outcome $\theta_{x} = \mathbb{E}_{y\sim \Omega_{x}}[y]$ is the effect of some intervention for atomic-population $x$ . Now, wider populations can be constructed by combining various atomic-populations. Let any $X\subseteq \mathcal{X}$ represent the population of subjects who belong to either one of the atomic-populations $\{x\in X\}$ . Then, the probability of a subject being from population $X$ can be written as $\eta_{X} = \sum_{x\in X}\eta_{x}$ , the probability of a subject being from atomic-population $x$ conditioned on the fact that they are from population $X$ can be written as $\eta_{x|X} = \eta_x / \eta_X$ , and the average effect for population $X$ can be written as $\bar{\theta}_X = \sum_{x\in X}\eta_{x|X}\theta_x$ . + +Experiments An experiment is largely characterized by the population it targets, its sample horizon, and its success criterion. During an experiment that targets population $X$ , at each time step $t \in \{1, 2, \ldots\}$ that the experiment continues, first a subject from some atomic-population $x_{t}$ within the targeted population $X$ arrives with probability $\eta_{x_{t}|X}$ , and then the outcome $y_{t} \sim \Omega_{x_{t}}$ for that subject is observed. This process generates an online dataset $\mathcal{D}_{t} = \{x_{t'}, y_{t'}\}_{t' = 1}^{t}$ . The experiment terminates when a pre-specified sample/time horizon $\tau$ is reached. Once terminated, the experiment is declared a success if $\rho(\mathcal{D}_{\tau}) = 1$ , where $\rho: (\mathcal{X} \times \mathbb{R})^{\tau} \to \{0, 1\}$ is the success criterion, and declared a failure otherwise. Formally, the tuple $\psi = (X, \tau, \rho)$ constitutes an experiment design. + +Meta-experimenter Suppose a meta-experimenter is given a set of viable experiment designs $\Psi$ and is tasked with running at least one successful experiment. Each experiment $\psi \in \Psi$ has an associated cost $C_{\psi} \in \mathbb{R}_{+}$ , which the experiment incurs per time step that it continues, and an associated reward $R_{\psi} \in \mathbb{R}_{+}$ , which the experiment provides only if it eventually succeeds. The meta-experimenter aims to maximize utility—that is the difference between any eventual reward received and the total costs incurred by running experiments. They first pick an initial experiment $\psi^{1} \in \Psi$ and start conducting it, which generates an online dataset $\mathcal{D}_t^1$ as described earlier. Now at each time step $t$ , they need to decide whether they should stay committed to their initial decision and wait until $\psi^{1}$ terminates, or stop $\psi^{1}$ early in favor of starting a new experiment $\psi^{2}$ . They might decide on the latter to avoid unnecessary costs if $\mathcal{D}_t^1$ already indicates $\psi^{1}$ is unlikely to succeed. If at some point a secondary experiment $\psi^{2}$ is started, now the meta-experiment has a similar decision to make + +regarding whether to stop $\psi^2$ early in favor of starting a new experiment $\psi^3 \in \Psi$ . This process continues until either an experiment finally succeeds or the meta-experimenter decides not to conduct any further experiments; let the random variable $n \in \{1, 2, \ldots\}$ be such that $\psi^n$ is the last experiment. We denote with $\psi^i = (X^i, \tau^i, \rho^i)$ the $i$ -th experiment conducted by the meta-experimenter, and with $T^i$ the number of time steps for which the $i$ -th experiment is conducted either until it was stopped by the meta-experimenter or the time horizon $\tau^i$ was reached. Denote with $\pi(t, \psi^i, \bar{\mathcal{D}}_t^i)$ the decision-making policy of the meta-experimenter, where $t$ is the current time step of the latest experiment $\psi^i$ and $\bar{\mathcal{D}}_t^i = (\cup_{j=1}^{i-1} \mathcal{D}_{T^j}^j) \cup \mathcal{D}_t^i$ is an aggregate dataset. We write (i) $\pi(t, \psi^i, \bar{\mathcal{D}}_t^i) = \psi^i$ if the meta-experiment decides to keep conducting the current experiment $\psi^i$ , (ii) $\pi(t, \psi^i, \bar{\mathcal{D}}_t^i) = \psi' \neq \psi^i$ if the meta-experimenter decides to stop experiment $\psi^i$ and start experiment $\psi'$ instead, and (iii) $\pi(t, \psi^i, \bar{\mathcal{D}}_t^i) = \emptyset$ if the meta-experimenter decides not to conduct any further experiments. + +Objective Once all experimentation is concluded, the meta-experimenter achieves the total utility + +$$ +G = R _ {\psi^ {n}} \cdot \mathbb {1} \left\{T ^ {n} = \tau^ {n} \right\} \cdot \rho^ {n} \left(\mathcal {D} _ {\tau^ {n}} ^ {n}\right) - \sum_ {i = 1} ^ {n} C _ {\psi^ {i}} \cdot T ^ {i}. \tag {1} +$$ + +Then, the optimal commitment problem is to find the optimal policy $\pi^{*} = \operatorname{argmax}_{\pi} \mathbb{E}_{\pi}[G]$ that maximizes the expected utility given $\Psi$ , $\{\eta_x\}$ . $\{R_{\psi}, C_{\psi}\}$ without knowing mean outcomes $\{\theta_x\}$ or outcome distributions $\{\Omega_x\}$ . It is called the optimal commitment problem because each experiment $\psi = (X, \tau, \rho)$ only provides a reward if the meta-experimenter commits to incurring its costs for at least $\tau$ time steps, and the meta-experimenter needs to decide which experiment in $\Psi$ is the better commitment—or if there is any experiment worth committing to at all—adaptively. + +General applicability of OCP Although we have described OCP from the perspective of (meta-)experiment design, it can potentially be useful in modeling many other problems as we have stressed during the introduction (see Table 1). For instance, in portfolio management, atomic-populations can be regarded as various assets one can invest in, then a population would correspond to a portfolio + +of assets. Similar to experiments, when these portfolios require a time commitment (cf. $\tau$ ) before they provide their payoff (cf. $R_{\psi}$ ) and incur an opportunity cost (cf. $C_{\psi}$ ) in the mean time, the decision-making problem of managing when and which portfolio to invest in constitutes an instance of the optimal commitment problem. Another good examples is energy management, where power stations and the networks they form are akin to atomic-populations and populations. Since power stations cannot be turned on and off immediately, putting one in operation requires a certain amount of commitment. + +Table 1: Equivalent concepts across different domains. OCP can model scenarios other than adaptive experimentation. + +
DomainEquivalent Concepts
Adaptive experimentationAtomic-populationPopulation
Portfolio managementFinancial assetPortfolio of assets
Energy systemsPower stationNetwork of stations
+ +# 3 WARM-UP: WHEN TO BREAK A SINGLE COMMITMENT? + +In this section, to gather insights, we commence by analyzing a simplified instance of OCP. Later, in Section 4, using these insights, we construct a practical algorithm for solving a more general case of OCP. As the simplified instance, we only consider one atomic-population such that $\mathcal{X} = \{\mathcal{X}_0\}$ and one experiment design that targets this atomic-population such that $\Psi = \{\Psi_0 = (\mathcal{X}_0,\tau ,\rho)\}$ . Moreover, we assume that the outcomes are distributed normally with unit variance such that $\Omega \doteq \Omega_{\mathcal{X}_0}\doteq \mathcal{N}(\theta \doteq \theta_{\mathcal{X}_0},1)$ and the success criterion is a simple Z-test to see whether $\theta >0$ such that $\rho (\mathcal{D}_{\tau})\doteq \rho (\mu_{\tau})\doteq \mathbb{1}\{\mu_{\tau} > \alpha /\sqrt{\tau}\}$ , where $\mu_t = \sum_{(x_{t'},y_{t'})\in \mathcal{D}_t}y_{t'} / |\mathcal{D}_t|$ is the empirical mean outcome given dataset $\mathcal{D}_t$ , and $\alpha$ determines the significance threshold for the test. Since there is just one viable experiment in this setting, the only decision that needs to be made at each time step is whether to keep conducting experiment $\psi^1 = \Psi_0$ or to stop all experimentation. For this decision to be interesting, we will also assume that $C\doteq C_{\Psi_0} > 0$ so that never stopping is not necessarily optimal—and $R\doteq R_{\Psi_0} > \tau C$ so that always stopping is not necessarily optimal either. + +Value and Q-functions Since $t$ and $\mu_t$ are sufficient statistics to estimate the success probability of the experiment, it is also sufficient to only consider policies of the form $\pi(t, \mu)$ . For a given policy $\pi$ , + +$$ +V ^ {\pi} (t, \mu) = \mathbb {E} \left[ R \cdot \mathbb {1} \left\{T _ {t} ^ {\pi} > \tau \right\} \cdot \rho (\mu_ {\tau}) - C \cdot \left(\min \left\{T _ {t} ^ {\pi}, \tau \right\} - t\right) \mid \mu_ {t} = \mu \right] \tag {2} +$$ + +$$ +Q ^ {\pi} (t, \mu) = \mathbb {E} [ R \cdot \mathbb {1} \left\{T _ {t + 1} ^ {\pi} > \tau \right\} \cdot \rho (\mu_ {\tau}) - C \cdot \left(\min \left\{T _ {t + 1} ^ {\pi}, \tau \right\} - t\right) | \mu_ {t} = \mu ] \tag {3} +$$ + +are the value function, and the Q-function of conducting the experiment for at least one more time step respectively, where $T_{t}^{\pi} = \min \{t' \geq t : \pi(t', \mu_{t'}) = \emptyset\}$ is the first time step at or after time $t$ that policy $\pi$ decides to stop; let $V^{*} = V^{\pi^{*}}$ and $Q^{*} = Q^{\pi^{*}}$ be the optimal value and Q-functions. Note that the Q-factor of stopping all experimentation is always equal to zero for all policies. Hence, the optimal policy must be such that $\pi^{*}(t, \mu) = \Psi_{0}$ if $Q^{*}(t, \mu) > 0$ and $\pi^{*}(t, \mu) = \emptyset$ otherwise. + +Once we identify the value and Q-functions, a naive attempt at finding the optimal policy would be to compute $V^{*}$ and $Q^{*}$ via dynamic programming as they would satisfy the following Bellman optimality conditions: + +$$ +Q ^ {*} (t, \mu) = - C + \mathbb {E} \left[ V ^ {*} (t + 1, \mu_ {t + 1}) \mid \mu_ {t} = \mu \right] \tag {4} +$$ + +$$ +V ^ {*} (t, \mu) = \max \{0, Q ^ {*} (t, \mu) \} \tag {5} +$$ + +and $V^{*}(\tau, \mu) = R \cdot \rho(\mu)$ . However, a major complication in applying dynamic programming methods to compute $V^{*}$ and $Q^{*}$ is that they are continuous functions in $\mu$ . In the literature of partially-observable Markov decision processes (POMDPs), which OCP happens to be an instance + +of (see Appendix A), the standard approach of addressing this complication would be to leverage the convexity of $V^{*}$ and $Q^{*}$ , and approximate them with functions of the form $f(\mu) = \max_{i} a_{i}\mu + b_{i}$ (Spanan, 2012). However, this standard approach is not applicable in OCP because, in general, neither $V^{*}(t,\mu)$ nor $-V^{*}(t,\mu)$ is a convex function with respect to $\mu$ (see Figure 1): + +Proposition 1 (Non-convexity). There exist a problem instance $(C,R,\tau,\alpha)$ and $t\in \{1,\dots ,\tau -1\}$ such that $\exists \mu ,\mu^{\prime}\in \mathbb{R},p\in [0,1]:V^{*}(t,p\mu +(1 - p)\mu^{\prime}) < pV^{*}(t,\mu) + (1 - p)V^{*}(t,\mu^{\prime})$ and $\exists \mu ,\mu^{\prime}\in \mathbb{R},p\in [0,1]: - V^{*}(t,p\mu +(1 - p)\mu^{\prime}) < - pV^{*}(t,\mu) - (1 - p)V^{*}(t,\mu^{\prime}).^{1}$ + +Properties of the optimal policy Although identifying $\pi^{*}$ exactly by computing $V^{*}$ and $Q^{*}$ is challenging, we can still identify some properties that $\pi^{*}$ should have, which can then help us design a heuristic policy that we expect to perform well, albeit not optimally. First of all, the optimal policy $\pi^{*}$ should be a "thresholding-type" policy—that is the meta-experimenter should keep conducting the experiment as long as $\mu_{t}$ stays above a time-dependent threshold $\mu_{t}^{*}$ and should stop all experimentation the moment $\mu_{t}$ drops below that threshold (see the top panel of Figure 2): + +Proposition 2 (Thresholding). For all problem instances $(C, R, \tau, \alpha)$ , there exists time-dependent thresholds $\{\mu_t^* \in \mathbb{R}\}_{t=1}^{\tau-1}$ such that + +$$ +\pi^ {*} (t, \mu) = \left\{\Psi_ {0} \quad i f \mu > \mu_ {t} ^ {*}; \quad \emptyset \quad o t h e r w i s e \right\} \tag {6} +$$ + +Intuitively, a higher test statistic $\mu_t$ means that the experiment is only more likely to succeed, hence if it is optimal to continue conducting the experiment when $\mu_t = \mu$ , then it should also be optimal to continue when $\mu_t = \mu' > \mu$ (likewise, lower $\mu_t$ means success is even less likely hence $\pi^*(t, \mu) = \emptyset$ implies $\pi^*(t, \mu') = \emptyset$ for $\mu' < \mu$ ). + +Moreover, the optimal policy $\pi^{*}$ must be "optimistic" that the experiment will succeed when making decisions. Consider a greedy policy $\pi^{\mathrm{greedy}}$ that continues as long as the expected utility of committing fully to conducting the experiment until it terminates at $t = \tau$ is positive—that is $\pi^{\mathrm{greedy}} = \Psi_0$ if and only if $V^{\pi^{(0)}}(t,\mu) > 0$ where $\pi^{(0)}$ is the policy that always waits until the experiment terminates such that $\pi^{(0)}(t,\mu) = \Psi_0$ for all $t,\mu$ ; $\pi^{\mathrm{greedy}}$ is said to be greedy because the decision to continue is made assuming a full commitment to the experiment without considering the possibility to stop at a future time step. Then, whenever such greedy reasoning suggests continuing, the meta-experimenter should indeed continue. However, whenever the same reasoning suggests stopping, the meta- + +experimenter should be optimistic that the experiment will succeed and occasionally make the decision to continue instead—that is $\pi^{*}$ should be biased towards continuing (see the threshold gap in Figure 2): + +Proposition 3 (Optimism). First, $\pi^{\text{greedy}}$ is also of thresholding type and there exists $\{\mu_t^{\text{greedy}} \in \mathbb{R}\}_{t=1}^{\tau-1}$ such that $\pi^{\text{greedy}}(t,\mu) = \Psi_0$ if and only if $\mu > \mu_t^{\text{greedy}}$ . Moreover, for all $t \in \{1,\dots,\tau-1\}$ , + +$$ +\mu_ {t} ^ {*} \leq \mu_ {t} ^ {\text {g r e e d y}} \quad \Longleftrightarrow \quad \left\{\mu : \pi^ {*} (t, \mu) = \Psi_ {0} \right\} \supseteq \left\{\mu : \pi^ {\text {g r e e d y}} (t, \mu) = \Psi_ {0} \right\} \tag {7} +$$ + +![](images/07487c83fa7edf52e9199370f2865392f818d5292dc95a654768533a6c1969f5.jpg) +Figure 1: Optimal value function $V^{*}(t,\mu)$ for $C = 1$ , $R = 10$ , $\tau = 4$ , and $\alpha = 0$ . It can clearly be seen that neither $V^{*}$ nor $-V^{*}$ is convex in $\mu$ (cf. Proposition 1). + +Intuitively, the optimism of $\pi^{*}$ accounts for the information gained from observing more samples when the experiment is continued. Remember that $\pi^{\mathrm{greedy}}$ estimates the reward to be received if the experiment is conducted until termination, and it stops whenever its estimate is negative. But, the estimate of $\pi^{\mathrm{greedy}}$ has some uncertainty associated with it. Whenever it is uncertain enough that the reward to be received is actually negative; incurring the cost of continuing for one more time step, gaining new information, and forming a more certain estimate can lead to a more accurate decision and a higher overall utility. Finally, the optimism of $\pi^{*}$ has a strictly decreasing upper bound; denoting with $F(x) = (1 / \sqrt{2\pi})\int_{-\infty}^{x}e^{-(1 / 2)x^2}$ the c.d.f. of the standard normal distribution: + +Proposition 4 (Decreasing optimism). For all $t \in \{1, \dots, \tau - 1\}$ , + +$$ +\left| \mu_ {t} ^ {*} - \mu_ {t} ^ {\text {g r e e d y}} \right| \leq \sqrt {1 / t - 1 / \tau} \times \left(F ^ {- 1} \left(\left(\tau - t\right) ^ {C} / _ {R}\right) - F ^ {- 1} \left(^ {C} / _ {R}\right)\right) \tag {8} +$$ + +Intuitively, as the experiment continues, the information gained from one individual sample decreases relative to the total information accumulated, hence the optimism of $\pi^{*}$ that accounts for the that information gain also decreases (see the bottom panel of Figure 2). Consider one extreme: When $t = \tau - 1$ , there is no more information to be gained before the experiment terminates at $t = \tau$ , hence $\pi^{*}$ should make the same decisions as $\pi^{\mathrm{greedy}}$ . Indeed, Proposition 4 implies that $\mu_{\tau - 1}^{*} = \mu_{\tau - 1}^{\mathrm{greedy}}$ . + +# 4 A PRACTICAL ALGORITHM: BAYES-OCP + +Summarizing our discussion in the previous section, we suspect the optimal policy to be (i) of thresholding type (cf. Proposition 2), (ii) optimistic (cf. Proposition 3), and (iii) increasingly more greedy (cf. Proposition 4). These findings are not a complete surprise as optimism-in-the-face-of-uncertainty is a well-known principle in solving online decision-making problems (Auer et al., 2002; Bubeck et al., 2012). Our earlier analysis shows rigorously that this principle holds for at least a special case of OCP and strengths our intuition that it should be applicable for more general cases of OCP as well. + +Keeping properties (i-iii) in mind, we now propose a practical algorithm for solving OCP in a more general setting than the one we analyzed earlier. Let $|\mathcal{X}| \geq 1$ and $\Psi = \{(X,\tau,\rho): X \in 2^{\mathcal{X}} \setminus \emptyset\}$ include all experiment designs that target a unique subpopulation within $\mathcal{X}$ for a given time horizon $\tau$ and success criterion $\rho$ ; let $C_X \doteq C_{(X,\tau,\rho)}$ and $R_X \doteq R_{(X,\tau,\rho)}$ . We assume that the conditional power of performing a hypothesis test at time $\tau$ according to $\rho$ —that is the probability of the test being successful conditioned on mean outcomes $\{\theta_x\}$ —can be computed for interim datasets—that is + +$$ +\mathcal {P} (X, \mathcal {D} _ {t}; \left\{\theta_ {x} \right\}) = \mathbb {E} _ {x _ {t ^ {\prime}} \sim \left\{\eta_ {x \mid X} \right\} _ {x \in X}, y _ {t ^ {\prime}} \sim \mathcal {N} \left(\theta_ {x _ {t ^ {\prime}}, 1}\right)} [ \rho (\mathcal {D} _ {t} \cup \left(\cup_ {t ^ {\prime} = t + 1} ^ {\tau} \left\{x _ {t ^ {\prime}}, y _ {t ^ {\prime}} \right\}\right)) ] \tag {9} +$$ + +can be evaluated efficiently. Then, based on this conditional power function, we define + +$$ +\begin{array}{l} \mathcal {G} \left(X, \mathcal {D} _ {t}; \left\{\theta_ {x} \right\}\right) (10) \\ = R _ {X} \cdot \mathcal {P} (X, \mathcal {D} _ {t}) - C _ {X} \cdot (\tau - t) (10) \\ \end{array} +$$ + +as the expected utility of fully committing to an experiment and waiting until it terminates when the experiment targets population $X$ , is currently at time step $t$ , and has collected dataset $\mathcal{D}_t$ so far. Denote with $\mathcal{G}^{(0)}(X; \{\theta_x\}) = \mathcal{G}(X, \emptyset; \{\theta_x\})$ the same expected utility but for an experiment that is yet to start, and with $\mathcal{G}^{(0)}(\emptyset; \{\theta_x\}) = 0$ the utility of stopping all experimentation. + +Our algorithm is called Bayes-OCP and is given in Algorithm 1. It maintains a posterior distribution $\mathcal{N}(\mu_x,\sigma_x^2)$ for each mean outcome $\theta_{x}$ assuming that, given mean $\theta_{x}$ , outcomes are distributed normally with unit variance—that is $\Omega_{x} = \mathcal{N}(\theta_{x},1)$ . These posteriors are only used in deciding which experiment to run next and not in determining whether the experiment was a success or not. Hence, even when the assumption of outcomes being normally distributed is violated, the integrity of the experiments would not be effected; only the performance of Bayes + +
Algorithm 1 Bayes-OCP
1: Initialize μx and σx2 for all x ∈ X
2: X ← X, t ← 0, D0 ← ∅
3: Start experiment ψ = (X,τ,ρ)
4: loop:
5: t ← t + 1; Dt ← Dt-1 ∪ {xt,yt}
6: 1/σxt2 ← 1/σxt2 + 1
7: μxt← μxt + (yt - μxt)σxt2
(i) Identify a candidate subpopulation X' to replace X:
8: X' ← ∅
9: while X \ X' ⊃ ∅:
10: x* ← argmaxx∈X\X'
Eθx~N(μx,σx2)[G(0)(X' ∪ {x}; {θx})]
11: if Eθx~N(μx,σx2)[G(0)(X' ∪ {x*}; {θx})] > Eθx~N(μx,σx2)[G(0)(X'; {θx})]:
12: X' ← X' ∪ {x*}
13: else: break
(ii) Decide whether to actually replace X with X':
14: if Pθx~N(μx,σx2)[G(0)(X'; {θx}) > G(X, Dt; {θx})] > β:
15: X ← X', t ← 0, D0 ← ∅
16: Start a new experiment ψ = (X,τ,ρ)
+ +OCP in managing various experiments would degrade (see Appendix C for related experiments). Making use of the posteriors it maintains, Bayes-OCP performs two steps at each iteration: + +(i) First, a subpopulation $X' \subset X$ within the currently targeted population $X$ is identified as a potential candidate to target next; due to the combinatorial size of $\Psi$ , it would not be practical to consider every subpopulation individually as a candidate for large $|\mathcal{X}|$ . The ideal candidate would be the subpopulation with the largest expected utility: $X' = \operatorname{argmax}_{X' \subset X} \mathbb{E}_{\theta_x \sim \mathcal{N}(\mu_x, \sigma_x^2)} [\mathcal{G}^{(0)}(X'; \{\theta_x\})]$ . But again due to the combinatorial size of the search space, Bayes-OCP employs a greedy algorithm instead and forms candidate subpopulations by combining, one by one, the atomic-subpopulations that increase the expected utility the most, until the expected utility no longer improves. Note that it is common to use greedy algorithms to solve combinatorial optimization problems (Lawler, 1976; Papadimitriou and Steiglitz, 1982). + +(ii) Then, it is decided whether the current experiment targeting population $X$ should be stopped in favor of targeting candidate $X^{\prime}$ identified earlier instead. A greedy strategy would have done so whenever $\mathbb{E}_{\theta_x} \sim \mathcal{N}(\mu_x, \sigma_x^2)[\mathcal{G}^{(0)}(X'; \{\theta_x\})] > \mathbb{E}_{\theta_x} \sim \mathcal{N}(\mu_x, \sigma_x^2)[\mathcal{G}(X, \mathcal{D}_t; \{\theta_x\})]$ . But from our earlier analysis, we have learned that the optimal strategy is optimistic (cf. Proposition 3). As such, Bayes-OCP checks whether it is overwhelmingly likely that the alternative experiment has higher expected utility—that is whether $\mathbb{P}_{\theta_x} \sim \mathcal{N}(\mu_x, \sigma_x^2)\{\mathcal{G}^{(0)}(X'; \{\theta_x\}) > \mathcal{G}(X, \mathcal{D}_t; \{\theta_x\})\} > \beta$ , where $\beta \in (1/2, 1)$ controls the decision-making threshold. When $\beta$ is large, we are more optimistic that the current experiment will succeed and require stronger evidence that the alternative experiment has higher expected utility. Note that, as the posteriors $\mathcal{N}(\mu_x, \sigma_x^2)$ get narrower, the optimism of this rule naturally decreases, which should be the case for the optimal strategy (cf. Proposition 4). As one extreme, the two switching rules become equivalent when $\{\sigma_x^2 \to 0\}$ . + +# 5 RELATED WORK + +Optimal stopping Optimal commitment is essentially a new type of optimal stopping/switching problem. In typical optimal stopping problems (OSPs), the reward an agent can receive evolves based on a stochastic process and the goal of the agent is to determine the optimal time step to stop when the reward to be received is in some sense maximized (Shiryaev, 2007). Optimal commitment is unique in that a positive reward can only be received by not stopping until a pre-specified time horizon $\tau$ . In optimal commitment, there is still a stochastic process (namely, samples $y_{t}$ ) that gradually reveals more information regarding what that positive reward will be at the end, however, the reward—or rather the cost—of stopping earlier is independent of this stochastic process (and is equal to $-tC$ ). + +Sequential hypothesis testing Among other OSPs, optimal commitment is most closely related to sequential hypothesis testing (SHT), where an agent makes sequential observations regarding a given hypothesis and eventually needs to decide whether to reject the said (alternate) hypothesis or reject some null hypothesis (Wald and Wolfowitz, 1948; Yu et al., 2009; Drugowitsch et al., 2012; Shenoy and Angela, 2012; Zhang and Angela, 2013; Drugowitsch et al., 2014; Khalvati and Rao, 2015; Schonbrodt et al., 2017; Fauß et al., 2020). Rejecting the correct hypothesis provides a positive reward whereas waiting for more observations, while informative, is also costly as in OCP. It is well known that the optimal policy in the classic setting of SHT is a thresholding-type policy with fixed thresholds that do not vary over time: The null hypothesis is rejected if some test statistic gets above a threshold (and the alternate hypothesis is rejected if the same statistic gets below a different threshold). + +Optimal commitment can be thought of as a SHT problem with the crucial difference that the meta-experimenter has only the option of discarding the alternate hypothesis (i.e. breaking a commitment), and once some time horizon is reached (i.e. when a commitment is kept), either the null hypothesis or the alternate hypothesis is automatically rejected according to some external success criterion $\rho$ , regardless of what the meta-experimenters' decision might have been otherwise. As we have shown in Proposition 2, the optimal policy still remains a thresholding-type policy, but since there is now a deadline to discard the alternate hypothesis early, the thresholds become time-varying; in particular, they become less and less optimistic as the said deadline approaches (cf. Proposition 4). + +Frazier and Angela (2007); Dayanik and Angela (2013); Alaa and van der Schaar (2016) consider SHT under stochastic deadlines, but different from optimal commitment, they still allow agents to reject both hypotheses at any time. In these works, the agent must make the rejection decision before the deadline is reached to be able to receive a positive reward, whereas in our case, the agent must wait until the deadline to see whether the null hypothesis will be rejected or not. Naghshvar and + +Table 2: Comparison of related experiment designs. Optimal commitment is the only design that aims to decide both when an alternative population should be targeted—as opposed to switching the target population only at a fixed decision point—as well as which population to target among many potential candidates—as opposed to a simple binary decision of “overall population vs. sub-population” or “go vs. no-go”. + +
DesignReferenceWhen?Which?
Randomized Controlled Trial (RCT)Fisher (1935)NeverOnly the initial population
Adaptive Enrichment DesignOndra et al. (2019)Fixed decision pointOverall vs. fixed subpopulation
Adaptive Signature DesignZhang et al. (2017)Fixed decision pointPossibly any population
RCT with Futility StoppingHe et al. (2012)Possibly any timeGo vs. no-go
Optimal Commitment(Ours)Possibly any timeAmong multiple populations
+ +Javidi (2013); Jarrett and van der Schaar (2020) consider active versions of SHT where the agent is able to choose what type of observations to make. Our case is "passive" in the sense that the meta-experimenter cannot influence what kind of samples they are going to receive from the currently running experiment. Finally, optimal commitment, and SHT in general, can be thought of as more structured instances of partially-observed reinforcement learning (RL). As we have discussed earlier, the standard technique here relies on convex reward structures whereas the optimal value function in our case is not convex in general (cf. Proposition 1, see Appendix A for a detailed discussion). + +Adaptive experimentation We introduced optimal commitment predominantly as a tool for population selection during an experiment. In clinical trials, dominant approach to population selection is adaptive enrichment (Mehta et al., 2009; Magnusson and Turnbull, 2013; Simon and Simon, 2013; Wang and Hung, 2013; Simon and Simon, 2018; Ondra et al., 2019; Thall, 2021) and adaptive signature designs (Freidlin and Simon, 2005; Freidlin et al., 2010; Mi, 2017; Zhang et al., 2017; Bhattacharyya and Rai, 2019). These designs are capable of adapting the target population of a trial as the trial continues, but unlike optimal commitment, they can only do so at fixed analysis points and not just at any time step. While adaptive signature designs can select arbitrary populations, adaptive enrichment designs are also limited by the number of pre-specified populations they can select between, which is typically only two: the overall population and an alternative subpopulation. + +Optimal commitment is also related to clinical trial designs with futility stopping, where an experimenter might terminate a trial early once it becomes apparent that the said trial is highly unlikely to succeed (van der Tweel and van Noord, 2003; Lachin, 2005; He et al., 2012; Jitlal et al., 2012; Kimani et al., 2013; Chang et al., 2020). However, this does not consider the possibility of switching to a new trial that targets a different population. As we will see during our experiments, switching to an alternative experiment might prove preferable even before an ongoing experiment can be deemed futile. In such cases, optimal commitment can make more timely decisions. Table 2 summarizes the experiment designs related to optimal commitment. Finally, it is worth mentioning that there are several methods for managing clinical trials at a portfolio level—that is determining which clinical trial is to be conducted next (Rogers et al., 2002; Colvin and Maravelias, 2008; Graham et al., 2020). Trial management in this vain is orthogonal to optimal commitment: They are concerned with the success of multiple new treatments and make decisions on a trial-by-trial basis whereas we only ever consider a single intervention and make decisions regarding the target population on a sample-by-sample basis while experiments still continue. See Appendix H for extended related work. + +# 6 EXPERIMENTS + +We want to investigate how Bayes-OCP behaves in environments that differ in terms of ground-truth outcomes, for instance, what happens in environments where the original experiment is quite likely to succeed versus what happens in ones where switching to an alternative experiment is needed. To this end, we simulate experiments where mean outcomes are varied but other aspects of an experiment are fixed: In our environments, there are two atomic-populations, $\mathcal{X} = \{\mathcal{X}_A,\mathcal{X}_B\}$ . Both atomic-populations have equal propensities $\eta_{\mathcal{X}_A} = \eta_{\mathcal{X}_B} = 1 / 2$ and the meta-experimenter has the same positively-biased prior for the mean outcome associated with each atomic-population: $\theta_{\mathcal{X}_A},\theta_{\mathcal{X}_B}\sim \mathcal{N}(0.1,0.1)$ . Experiment designs targeting one or both of these atomic-populations all have the same time horizon $\tau = 600$ and success criterion $\rho (\mathcal{D}_{\tau}) = \mathbb{1}\{\Sigma_{(x_t,y_t)\in \mathcal{D}_{\tau}}y_t / |\mathcal{D}_{\tau}| > \alpha /\sqrt{\tau}\}$ , where $\alpha = F^{-1}(95\%)$ . So, experiments are powered to detect a positive mean outcome of 0.1 with probability $\sim 80$ . Rewards are given by $R_{X} = 1000\eta_{X}^{0.1}$ the wider the target population is, the more people a successful intervention can be marketed to—and costs are given by $C_X = 1 / \eta_X^{0.1}$ —the narrower the target population is, the harder it becomes to find subjects eligible to participate. + +Benchmarks We consider the metaexperiment designs summarized in Table 2 as benchmarks (see Appendix A.1 for an RL-based benchmark). Conventional RCT always targets the overall population and never stops early—that is it always conducts the experiment $\psi = (\{\mathcal{X}_A,\mathcal{X}_B\} ,\tau ,\rho)$ until its completion. Adaptive Enrichment performs an intermediary analysis at $t = \tau /2 = 300$ and greedily selects the experiment with the highest expected utility from $\Psi = \{(X,\tau ,\rho)\}_{X\subseteq \{\mathcal{X}_A,\mathcal{X}_B\}}$ . Futility Stopping is implemented via Bayes-OCP by initializing the set of all experiments as a singleton $\Psi = \{\Psi_0 = (\{\mathcal{X}_A,\mathcal{X}_B\} ,\tau ,\rho)\}$ . Intuitively, futility stopping only decides whether or not to stop the initial experiment that targets the overall population early. Bayes-OCP is initialized with $\beta = 0.80$ (see Appendix E for a sensitivity analysis). We also consider an abla + +Table 3: Performance comparison in various environment instances. Bayes-OCP has the highest expected utility—and a smaller FWER then conventional RCTs—when averaged over all environment instances. This is because Bayes-OCP is a balanced design whose structure does not favor certain environment instances over others. As an example, compare it with conventional RCTs: RCTs do not have an adaptive structure hence they favor green environments where it is not necessary to adapt the target population of the initial experiment. *Instances favored/addressed partially + +
Algorithms:Oracle RCTRCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
Favored Instances:N/AGreenGreen/Amber*Green/RedAmber*/RedBalanced (incl. Amber)
All Instances (100%)Utility260.4-39.4 (6.7)106.5 (6.9)150.0 (3.5)32.6 (3.1)171.8 (3.6)
FWER0.0%0.3% (0.1%)0.2% (0.1%)0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success75.2%56.1% (0.7%)53.2% (0.8%)45.4% (1.3%)10.5% (0.8%)52.4% (1.2%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)607.5 (1.9)615.1 (1.2)
T-to-F35.6600.0 (0.0)548.9 (16.1)57.6 (4.6)3.0 (0.5)70.8 (8.2)
Green Instances (47.3%)Utility389.6388.7 (3.9)385.6 (3.7)337.7 (5.7)63.1 (3.5)343.4 (7.3)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)0.9 (0.0)0.1 (0.0)
Success99.0%98.9% (0.4%)97.4% (0.7%)86.0% (1.4%)18.8% (0.9%)88.2% (2.0%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)605.8 (1.5)602.8 (0.4)
T-to-F600.0600.0 (0.0)759.4 (36.4)46.6 (7.6)2.5 (0.5)62.3 (14.3)
Amber Instances (29.4%)Utility258.6-300.3 (19.8)-17.6 (6.5)-5.3 (5.4)11.6 (3.4)63.2 (5.6)
FWER0.0%0.7% (0.3%)0.6% (0.3%)0.4% (0.3%)0.0% (0.0%)0.3% (0.2%)
Switches1.00.0 (0.0)0.7 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success96.6%30.0% (2.0%)22.6% (1.5%)15.2% (2.0%)5.3% (1.0%)35.2% (1.8%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)617.2 (5.9)663.9 (7.2)
T-to-F600.0600.0 (0.0)745.0 (13.1)78.3 (9.3)3.4 (0.6)104.4 (19.1)
Red Instances (23.3%)Utility0.0-579.2 (4.1)-304.2 (4.4)-35.1 (1.7)-2.8 (1.1)-39.7 (3.4)
FWER0.0%0.2% (0.3%)0.2% (0.3%)0.1% (0.2%)0.0% (0.0%)0.2% (0.3%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%2.1% (0.4%)1.8% (0.5%)0.9% (0.3%)0.3% (0.4%)1.6% (0.7%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)634.8 (39.1)
T-to-F0.0600.0 (0.0)343.1 (8.1)38.9 (2.1)3.5 (1.4)45.8 (2.9)
+ +tion of Bayes-OCP where decisions are made greedily instead of optimistically (Greedy Bayes-OCP). As a baseline of maximum achievable performance, we consider an oracle (Oracle RCT) that always runs the RCT with the optimum target (or does not run any RCT at all if that happens to be optimal). + +**Environments** A meta-experimenter's performance is specific to the environment instance. In particular, it depends on the ground-truth outcome distributions $\{\Omega_x\}$ for different populations. For example, an algorithm that always immediately stops the experiment would perform best when the mean outcome is negative. Hence, to faithfully evaluate the benchmarks, we need to focus on the average performance across different environments. To this end, we randomly generated 1000 environments (repeated five times to obtain error bars) with true mean outcomes $\theta_{\mathcal{X}_A}, \theta_{\mathcal{X}_B}$ sampled independently from $\mathcal{N}(0.1, 0.1)$ . Given these means, outcome distributions are set to be Gaussian with unit variance such that $\Omega_x = \mathcal{N}(\theta_x, 1)$ . Depending on the true mean outcome, these environments can be categorized into three groups: (i) green instances where the initial experiment targeting the overall population has the highest utility, (ii) amber instances where an alternative experiment that targets a subpopulation has the highest utility, and (iii) red instances where no experiment has positive utility hence running no experiments is the optimal decision. + +Different benchmarks favor different instances (see the top row of Table 3): Conventional RCTs do not allow for any adaptation hence they favor green instances where the target population of the initial experiment does not need to be adapted. Adaptive Enrichment allows for adaptation but only at a certain time point, which is often too late to stop unsuccessful experiments (as in red instances). However, an adaptive enrichment design at least makes it possible to eventually target a subpopulation, even though it might be too late to do so at the pre-specified decision point, hence it partially accommodates amber instances. Futility Stopping decides between either continuing with the initial experiment or stopping all experimentation completely (targeting a subpopulation is not an option) hence it favors either green or red instances (but not amber instances). Greedy Bayes-OCP is pessimistic (or rather not optimistic enough) towards any ongoing experiment succeeding, hence it favors red instances where no experiment is likely to succeed. Similar to adaptive enrichment, Greedy Bayes-OCP at least allows subpopulations to be targeted hence it too partially accommodates amber instances. + +Main results Performance of a meta-experimenter is primarily measured by Bayesian utility which is the expected utility averaged over randomly sampled environment instances (Utility). Remember that maximizing utility was our main objective, and as such, Bayes-OCP has the highest expected utility when averaged over all environment instances, see Table 3. Unlike other benchmarks, Bayes-OCP strikes a good balance in prioritizing all environment instances at the same time. This is because Bayes- + +OCP (i) can make timely decisions—unlike Adaptive Enrichment—and (ii) is optimistic hence it does not stop likely-to-succeed experiments prematurely—unlike Greedy Bayes-OCP. More specifically, + +(i) Timeliness of Bayes-OCP: Bayes-OCP has an advantage in amber and red instances over adaptive enrichment and futility stopping. Consider the example in Figure 3: While Bayes-OCP stops in a timely manner, adaptive enrichment can only stop at a fixed decision point and experiments with futility stopping only stop when the ongoing experiment is failing not as soon as a better alternative emerges. This underlines the exploitative aspect of Bayes-OCP—making and breaking commitments to maximize utility. + +(ii) Optimism of Bayes-OCP: While a design that favors early stopping is obviously desirable in amber and red environments, how much it is favored should be moderated to also succeed in green environments. Consider the example in Figure 4: Greedy Bayes-OCP prematurely stops the initial experiment in a green environment while Bayes-OCP does not. Theoretically, we know that the optimal policy should be optimistic towards the ongoing experiment succeeding and be hesitant to stop to a certain extend. This underlines the exploratory aspect of Bayes-OCP—keeping a seemingly failing commitment still has value as it reveals more information regarding whether the commitment is actually failing. + +In Table 3, in addition to Utility, we also report the family-wise error rate (FWER)—that is the frequency of runs where at least one experiment (denote it with $\psi^i$ ) is declared successful (i.e. $\rho^i(\mathcal{D}_\tau^i) = 1$ ) despite the mean outcome being negative for the targeted population (i.e. $\bar{\theta}_{X^i} < 0$ —the average number of times the target population has been switched (Switches), the probability of success which is defined as achieving positive utility (Success), the average time until a successful outcome (Timeto-Success, $T$ -to- $S$ ), and the average time until an unsuccessful outcome where all experimentation is stopped + +with negative utility (Time-to-Failure, $T$ -to- $F$ ), see Appendix G for details. Importantly, Bayes-OCP does not compromise the error control of experiments, on the contrary, it even achieves a smaller FWER than conventional RCTs. This is because aggregate data is only ever used to select experiments, otherwise no two experiments consult each other's data when evaluating a success criterion so that the potential confoundedness that could have been caused by the adaptiveness of Bayes-OCP is avoided when declaring an experiment as successful (see Appendix B for a discussion on error control). + +Supplementary results We also provide supplementary results: Appendix A.1 evaluates RL-based benchmarks, Appendix B.1 investigates error control, Appendix C considers environments with non-Gaussian outcomes, Appendix D considers environments with more than two atomic-populations, and Appendix E analyzes the sensitivity of Bayes-OCP's performance to its hyper-parameter $\beta$ . + +# 7 CONCLUSION + +Two aspects of OCP require further discussion: (i) How can it be approached from the perspective of reinforcement learning? While OCP technically describes a special class of POMDPs, we have not found this to be constructive in finding a solution (see Appendix A). (ii) What are the implications of using Bayes-OCP in terms of error control? It has no impact on individual error rates and can be adapted to control FWER (see Appendix B). See Appendix F for a discussion on future work. + +![](images/a619405fd97dc0546afb287a9790828788911848db4ae9f868bdd325c371a984.jpg) +Figure 3: Timeliness of Bayes-OCP. Bayes-OCP is first to (correctly) stop the initial experiment in an amber instance (excluding Greedy Bayes-OCP). Adaptive enrichment can only stop at a pre-specified time, while futility stopping fails to consider switching to an alternative experiment, which is proven to be preferable earlier than stopping. + +![](images/1391f46e577a92e0ab99ca7dfda5da32e778dbe68ef5fd7fa3e04ba1028ad26e.jpg) +Figure 4: Optimism of Bayes-OCP. Greedy Bayes-OCP (incorrectly) stops due to initial noise in a green instance while Bayes-OCP does not stop since it is more optimistic (as the optimal policy should, cf. Proposition 3). + +# ETHICS STATEMENT + +As the main application of optimal commitment, we have focused on adaptive experimentation, particularly experiments that are run as part of clinical development. Clinical trials have a huge impact on the wellbeing of patients and this high-stakes nature of clinical trials naturally raises some ethical concerns; we discuss two major ones in this section. However before we start our discussion, it should be emphasized that clinical trials is not the only application domain of optimal commitment. As we have highlighted at the end of Section 2, our contributions are generally applicable to decision-making problems such as portfolio and energy management. Moreover, not all adaptive experiments are clinical and have the same high stakes as a clinical trial. For instance, A/B testing is common in online advertisement to determine what recommendation policies lead to more user engagement (Gui et al., 2015; Xu et al., 2015; Kohavi and Longbotham, 2017). Therefore, the ethical concerns we discuss here does not universally concern all possible applications of optimal commitment. + +The first concern is how the designed error rate of an individual experiment is affected when multiple such experiments are managed together using Bayes-OCP in an adaptive manner, in particular, whether any error rate is inflated by the use of Bayes-OCP or not. We discuss error control in Appendix B with supplementary experiments. But briefly, Bayes-OCP has essentially no impact on the error rate of experiments on an individual level, and when controlling their family-wise error rate is also a concern, it can easily be adapted to accommodate this additional constraint as well. + +The second concern is that an adaptive approach to population selection might lead to overly conservative experiments that unnecessarily limit the use of an effective treatment. As we have mentioned in the introduction to motivate the need for optimal commitment, when the treatment is effective only for a subpopulation (cf. amber instances in our experiments), population selection is absolutely necessary, otherwise the treatment is most likely to be found ineffective and discarded after an experiment that targets the overall patient population as a whole, which would deny the treatment for the subpopulation that would have benefited from it. On the flip side of this, when the treatment happens to be effective for everyone (cf. green instances in our experiment), population selection might lead to conducting a restrictive experiment that only targets a small subpopulation, which this time, would deny the treatment for the rest of the patient population. This is essentially the reason behind the performance drop between Bayes-OCP and conventional RCTs in green instances (see Table 3). There is a trade-off between the performance in amber instances and green instances; and Bayes-OCP achieves a better balance between the two compared with a conventional RCT as evidenced by its superior performance when averaged over all environment instances (again see Table 3); although it causes a drop in performance for green instances, it more than makes up for that drop in amber instances. This balance is partly controlled by how optimistic Bayes-OCP is, which is in turn dictated by its hyper-parameter $\beta$ —larger $\beta$ leads to more optimistic decisions towards ongoing experiments, which favors green instances more than amber instances. We analyze the sensitivity of Bayes-OCP's performance to $\beta$ in Appendix E; and for all configurations that we have evaluated, Bayes-OCP always performs significantly better than a conventional RCT. + +# REPRODUCIBILITY STATEMENT + +All our experiments are based on synthetic simulations, hence our results can easily be reproduced by following the specifications in Section 6 without needing access to any private dataset. In order to aid reproducibility, we have rigorously described all our benchmarks in algorithmic form, similar to Algorithm 1, in Appendix J. Moreover, the source code necessary to reproduce our main results in Table 3 is made publicly available at https://github.com/alihanhyk/optcommit and https://github.com/vanderschaarlab/optcommit. + +# ACKNOWLEDGMENTS + +We would like to thank the reviewers and the members of the van der Schaar lab, for their valuable input, comments, and suggestions. This work was supported by the US Office of Naval Research (ONR) and the National Science Foundation (NSF, grant number 1722516). + +# REFERENCES + +Alaa, A. M. and van der Schaar, M., "Balancing suspense and surprise: Timely decision making with endogenous information acquisition," in Proc. Neural Inf. Process. Syst., 2016. + +Auer, P., Cesa-Bianchi, N., and Fischer, P., "Finite-time analysis of the multiarmed bandit problem," Mach. Learn., vol. 47, no. 2, pp. 235-256, 2002. +Bhattacharyya, A. and Rai, S. N., "Adaptive signature design—review of the biomarker guided adaptive phase-III controlled design," Contemporary Clin. Trials Commun., vol. 15, p. 100378, 2019. +Bubeck, S., Cesa-Bianchi, N. et al., "Regret analysis of stochastic and nonstochastic multi-armed bandit problems," Found. Trends Mach. Learn., vol. 5, no. 1, pp. 1-122, 2012. +Chang, Y., Song, T., Monaco, J., and Ivanova, A., "Futility stopping in clinical trials, optimality and practical considerations," J. Biopharmaceutical Statist., vol. 30, no. 6, pp. 1050-1059, 2020. +Chiu, Y.-D., Koenig, F., Posch, M., and Jaki, T., "Design and estimation in clinical trials with subpopulation selection," Statist. Med., vol. 37, pp. 4335-4335-4352, 2018. +"Trends, charts, and maps," ClinicalTrials.gov. [Online]. Available: https://www.clinicaltrials.gov/ct2/resources/trends#RegisteredStudiesOverTimePostedResults +Colvin, M. and Maravelias, C. T., "A stochastic programming approach for clinical trial planning in new drug development," Comput. Chem. Eng., vol. 32, no. 11, pp. 2626-2642, 2008. +Dayanik, S. and Angela, J. Y., "Reward-rate maximization in sequential identification under a stochastic deadline," SIAM J. Control Optim., vol. 51, no. 4, pp. 2922-2948, 2013. +Demets, D. L. and Lan, K. K. G., "Interim analysis: The alpha spending function approach," Statist. Med., vol. 13, no. 13-14, pp. 1341-1352, 1994. +Drugowitsch, J., Moreno-Bote, R., Churchland, A. K., Shadlen, M. N., and Pouget, A., "The cost of accumulating evidence in perceptual decision making," J. Neuroscience, vol. 32, no. 11, pp. 3612-3628, 2012. +Drugowitsch, J., Moreno-Bote, R., and Pouget, A., "Optimal decision-making with time-varying evidence reliability," in Proc. Neural Inf. Process. Syst., 2014. +Fauß, M., Zoubir, A. M., and Poor, H. V., “Minimax optimal sequential hypothesis tests for Markov processes,” Ann. Statist., vol. 48, no. 5, pp. 2599–2621, 2020. +Fisher, R. A., The Design of Experiments. Edinburgh, Scotland: Oliver & Boyd, 1935. +Frazier, P. and Angela, J. Y., "Sequential hypothesis testing under stochastic deadlines," in Proc. Neural Inf. Process. Syst., 2007. +Freidlin, B. and Simon, R., "Adaptive signature design: An adaptive clinical trial design for generating and prospectively testing a gene expression signature for sensitive patients," *Clin. Cancer Res.*, vol. 11, no. 21, pp. 7872-7878, 2005. +Freidlin, B., Jiang, W., and Simon, R., "The cross-validated adaptive signature design," Clin. Cancer Res., vol. 16, no. 2, pp. 691-698, 2010. +Ghare, G. and Leutenegger, S. T., "Improving speedup and response times by replicating parallel programs on a SNOW," in Proc. Int. Conf. Job Scheduling Strategies Parallel Process., 2005. +Graham, E., Jaki, T., and Harbron, C., "A comparison of stochastic programming methods for portfolio level decision-making," J. Biopharmaceutical Statist., vol. 30, no. 3, pp. 405-429, 2020. +Gui, H., Xu, Y., Bhasin, A., and Han, J., "Network A/B testing: From sampling to estimation," in Proc. Int. Conf. World Wide Web, 2015. +He, P., Lai, T. L., and Liao, O. Y.-W., “Futility stopping in clinical trials,” Statist. Interface, vol. 5, no. 4, pp. 415-423, 2012. +Jarrett, D. and van der Schaar, M., "Inverse active sensing: Modeling and understanding timely decision-making," in Int. Conf. on Mach. Learn., 2020. + +Jitlal, M., Khan, I., Lee, S., and Hackshaw, A., "Stopping clinical trials early for futility: retrospective analysis of several randomised clinical studies," Brit. J. Cancer, vol. 107, no. 6, pp. 910-917, 2012. +Kaitin, K. I., "Deconstructing the drug development process: The new face of innovation," Clin. Pharmacology Therapeutics, vol. 87, no. 3, pp. 356-361, 2010. +Karatzas, I. and Wang, H., "Utility maximization with discretionary stopping," SIAM J. Control Optim., vol. 39, pp. 306-329, 2020. +Khalvati, K. and Rao, R. P., "A Bayesian framework for modeling confidence in perceptual decision making," in Proc. Neural Inf. Process. Syst., 2015. +Kimani, P. K., Todd, S., and Stallard, N., "Conditionally unbiased estimation in phase II/III clinical trials with early stopping for futility," Statist. Med., vol. 32, no. 17, pp. 2893-2910, 2013. +Kohavi, R. and Longbotham, R., "Online controlled experiments and A/B testing," Encyclopedia Mach. Learn. Data Mining, vol. 7, no. 8, pp. 922-929, 2017. +Lachin, J. M., “A review of methods for futility stopping based on conditional power,” Statist. Med., vol. 24, no. 18, pp. 2747-2764, 2005. +Lawler, E., Combinatorial Optimization, Networks and Matroids. New York: Holt, Rinehard & Winston, 1976. +Lipkovich, I., Dmitrienko, A., and D'Agostino Sr., R. B., "Tutorial on biostatistics: Data-driven subgroup identification and analysis in clinical trials," Statist. Med., vol. 36, pp. 136-196, 2017. +Magnusson, B. P. and Turnbull, B. W., "Group sequential enrichment design incorporating subgroup selection," Statist. Med., vol. 32, no. 16, pp. 2695-2714, 2013. +Markowitz, H., Portfolio selection: Efficient diversification of investment. New York: John Wiley, 1959. +Mehta, C., Gao, P., Bhatt, D. L., Harrington, R. A., Skerjanec, S., and Ware, J. H., "Optimizing trial design: Sequential, adaptive, and enrichment strategies," Circulation, vol. 119, no. 4, pp. 597-605, 2009. +Merton, R. C., "Life time portfolio selection under uncertainty: The continuous-time case," Rev. Econ. Statist., vol. 51, pp. 247-257, 1969. +Mi, G., "Enhancement of the adaptive signature design for learning and confirming in a single pivotal trial," Pharmaceutical Statist., vol. 16, no. 5, pp. 312-321, 2017. +Moineddin, R., Butt, D. A., Tomlinson, G., and Beyene, J., "Identifying subpopulations for subgroup analysis in a longitudinal clinical trial," Contemporary Clin. Trials, vol. 29, pp. 817-822, 2008. +Naghshvar, M. and Javidi, T., "Active sequential hypothesis testing," Ann. Statist., vol. 41, no. 6, pp. 2703-2738, 2013. +Ni, T., Eysenbach, B., and Salakhutdinov, R., "Recurrent model-free RL can be a strong baseline for many POMDPs," in Proc. Int. Conf. Mach. Learn., 2022. +Olofsson, M., Önskog, T., and Lundström, N. L. P., "Management strategies for run-of-river hydropower plants: An optimal switching approach," Optim. Eng., vol. 23, pp. 1707-1731, 2022. +Ondra, T., Jobjörnsson, S., Beckman, R. A., Burman, C.-F., König, F., Stallard, N., and Posch, M., "Optimized adaptive enrichment designs," Statist. Methods Med. Res., vol. 28, no. 7, pp. 2096-2111, 2019. +Papadimitriou, C. H. and Steiglitz, K., Combinatorial Optimization—Algorithms and Complexity. Englewood Cliffs, NJ: Prentice Hall, 1982. +Rafique, S. F. and Jianhua, Z., "Energy management system, generation and demand predictors: A review," IET Gener. Transmiss. Distribution, vol. 12, no. 3, pp. 519-530, 2018. + +Rogers, M. J., Gupta, A., and Maranas, C. D., "Real options based analysis of optimal pharmaceutical research and development portfolios," Ind. Eng. Chem. Res., vol. 41, no. 25, pp. 6607-6620, 2002. +Schönbrodt, F. D., Wagenmakers, E.-J., Zehetleitner, M., and Perugini, M., "Sequential hypothesis testing with Bayes factors: Efficiently testing mean differences," Psychol. Methods, vol. 22, no. 2, p. 322, 2017. +Shenoy, P. and Angela, J. Y., "Strategic impatience in Go/NoGo versus forced-choice decision-making," in Proc. Neural Inf. Process Syst., 2012. +Shiryaev, A. N., Optimal Stopping Rules. Springer Science & Business Media, 2007. +Simon, N. and Simon, R., "Adaptive enrichment designs for clinical trials," Biostatistics, vol. 14, no. 4, pp. 613-625, 2013. +Simon, N. and Simon, R., "Using Bayesian modeling in frequentist adaptive enrichment designs," Biostatistics, vol. 19, no. 1, pp. 27-41, 2018. +Spanan, M. T. J., "Partially observable Markov decision processes," in Reinforcement Learning. Springer, 2012, pp. 387-414. +Takebe, T., imai, R., and Ono, S., "The current status of drug discovery and development as originated in United States academia: The influence of industrial and academic collaboration on drug discovery and development," Clin. Transl. Sci., vol. 11, no. 6, pp. 597-606, 2018. +Thall, P. F., "Adaptive enrichment designs in clinical trials," Annu. Rev. Statist. Appl., vol. 8, pp. 393-411, 2021. +Umscheid, C. A., Margolis, D. J., and Grossman, C. E., "Key concepts of clinical trials: A narrative review," Postgraduate Med., vol. 123, no. 5, pp. 194-204, 2011. +van der Tweel, I. and van Noord, P. A., "Early stopping in clinical trials and epidemiologic studies for 'futility': Conditional power versus sequential analysis," J. Clin. Epidemiology, vol. 56, no. 7, pp. 610-617, 2003. +Wald, A. and Wolfowitz, J., "Optimum character of the sequential probability ratio test," Ann. Math. Statist., vol. 19, pp. 326-339, 1948. +Wand, D., Joshi, G., and Wornell, G., "Efficient task replication for fast response times in parallel computation," in Proc. ACM SIGMETRICS Conf., 2014. +Wang, D., Joshi, G., and Wornell, G. W., "Efficient straggler replication in large-scale parallel computing," ACM Trans. Model. Perform. Eval. Comput. Syst., vol. 4, no. 2, pp. 1-23, 2019. +Wang, S.-J. and Hung, H. J., "Adaptive enrichment with subpopulation selection at interim: Methodologies, applications and design considerations," Contemporary clinical trials, vol. 36, no. 2, pp. 673-681, 2013. +Xu, Y., Chen, N., Fernandez, A., Sinno, O., and Bhasin, A., "From infrastructure to culture: A/B testing challenges in large scale social networks," in Proc. ACM SIGKDD Int. Conf. Knowl. Discovery Data Mining, 2015. +Yu, A. J., Dayan, P., and Cohen, J. D., "Dynamics of attentional selection under conflict: Toward a rational Bayesian account," J. Exp. Psychol. Human Perception Performance, vol. 35, no. 3, p. 700, 2009. +Zhang, S. and Angela, J. Y., "Forgetful Bayes and myopic planning: Human learning and decision-making in a bandit setting," in Proc. Neural Inf. Process. Syst., 2013. +Zhang, Z., Li, M., Lin, M., Soon, G., Greene, T., and Shen, C., "Subgroup selection in adaptive signature designs of confirmatory clinical trials," J. Roy. Statist. Soc.: Ser. C (Appl. Statist.), vol. 66, no. 2, pp. 345-361, 2017. + +# A A REINFORCEMENT LEARNING PERSPECTIVE + +Optimal commitment can be viewed as a partially-observed reinforcement learning problem. Let the tuple $(\mathcal{S},\mathcal{A},\mathcal{Z},\mathcal{T},\mathcal{O},\mathcal{R})$ denote a partially-observable Markov decision process (POMDP), where $\mathcal{S}$ is the (unobserved) state space, $\mathcal{A}$ is the action space, $\mathcal{Z}$ is the observation space, $\mathcal{T} \in \Delta(\mathcal{S})^{S \times S}$ describes the transition dynamics, $\mathcal{O} \in \mathcal{Z}^S$ describes the observation dynamics, and $\mathcal{R} \in \mathbb{R}^S$ describes the reward dynamics. Then, OCPs as defined in Section 2 can also be expressed as a special class of POMDPs: Letting $\mathcal{Y} = \mathbb{R}$ denote the outcome space for clarity, $\mathfrak{D} = \cup_{t=0}(\mathcal{X} \times \mathcal{Y})^t$ be the space of all possible datasets $\mathcal{D}_t$ , and $\mathfrak{D}$ be the space of all possible outcome distributions $\Omega$ , + +- $\mathcal{S} \doteq \{\varnothing\} \cup (\Psi \times \mathfrak{D} \times \mathfrak{D}^{\mathcal{X}})$ , where states $s = (\psi, \mathcal{D}_t, \{\Omega_x\}_{x \in \mathcal{X}})$ consist of the ongoing experiment $\psi \in \Psi$ , the dataset $\mathcal{D}_t \in \mathfrak{D}$ collected by the ongoing experiment so far, and the true outcome distributions $\{\Omega_x \in \mathfrak{D}\}$ , +- $\mathcal{A} \doteq \{\varnothing\} \cup \Psi$ +- $\mathcal{Z} \doteq \{\varnothing\} \cup (\mathcal{X} \times \mathcal{Y})$ +- $\mathcal{T}(s = \emptyset, a) \doteq \emptyset$ and + +$$ +\begin{array}{l} \mathcal {T} (s = (\psi = (X, \tau , \rho), \mathcal {D} _ {t}, \{\Omega_ {x} \}), a) \\ \begin{array}{c} \dot {=} \left\{ \begin{array}{l l} \emptyset & \text {i f} a = \emptyset \\ s ^ {\prime} = (\psi , \mathcal {D} _ {t + 1} = \mathcal {D} _ {t} \cup \{x _ {t + 1}, y _ {t + 1} \}, \{\Omega_ {x} \}) \\ \quad \text {s . t .} x _ {t + 1} \sim \{\eta_ {x | X} \}, y _ {t + 1} \sim \Omega_ {x _ {t + 1}} & \text {i f} a = \psi \\ s ^ {\prime} = (\psi^ {\prime}, \mathcal {D} _ {1} = \{x _ {1}, y _ {1} \}, \{\Omega_ {x} \}) \\ \quad \text {s . t .} x _ {1} \sim \{\eta_ {x | X ^ {\prime}} \}, y _ {1} \sim \Omega_ {x _ {1}} & \text {i f} a = \psi^ {\prime} = (X ^ {\prime}, \tau^ {\prime}, \rho^ {\prime}) \neq \psi , \end{array} \right. \end{array} \\ \end{array} +$$ + +- $\mathcal{O}(s' = \varnothing) \doteq \varnothing$ and + +$$ +\mathcal {O} \left(s ^ {\prime} = (\psi , \mathcal {D} _ {t + 1} = \mathcal {D} _ {t} \cup \{x _ {t + 1}, y _ {t + 1} \}, \{\Omega_ {x} \})\right) \doteq \left(x _ {t + 1}, y _ {t + 1}\right), +$$ + +- $\mathcal{R}(s' = \emptyset) \doteq 0$ and + +$$ +\mathcal {R} \left(s ^ {\prime} = (\psi = (X, \tau , \rho), \mathcal {D} _ {t + 1}, \{\Omega_ {x} \})\right) \doteq - C _ {\psi} + R _ {\psi} \cdot \mathbb {1} \{t + 1 = \tau \} \cdot \rho \left(\mathcal {D} _ {t + 1}\right). +$$ + +Since ongoing experiments $\psi$ are completely dictated by actions, and datasets $\mathcal{D}_t$ collected by the ongoing experiments consist solely of observations $(x_t,y_t)$ , the only unobserved component of the states in this POMDP is the true outcome distributions $\{\Omega_x\}_{x\in \mathcal{X}}$ . Hence, the optimal policy should have the form $\pi (\psi ,\mathcal{D}_t,b)$ where $b\in \Delta (\mathfrak{O}^{\mathcal{X}})$ denotes beliefs over $\{\Omega_x\}$ that is posterior distributions over the true outcome distributions. For instance, when $\Omega_{x} = \mathcal{N}(\theta_{x},1)$ as we have been assuming in Sections 3 and 4, posteriors over mean outcomes $\{\theta_x\}_{x\in \mathcal{X}}$ , which are given by parameters $\{\mu_x,\sigma_x^2\}$ such that $\theta_{x}|\bar{\mathcal{D}}_{t}^{i}\sim \mathcal{N}(\mu_{x},\sigma_{x}^{2})$ , constitute as beliefs. + +Now although an OCP can be expressed as a POMDP, doing so is not particularly helpful in finding a solution. As we have already discussed in Section 3, the standard approach to solving a POMDP would be to use dynamic programming and compute the optimal value function $V^{*}$ and the optimal Q-function $Q^{*}$ iteratively according to Bellman optimality conditions + +$$ +\begin{array}{l} Q ^ {*} (b, a) = \mathbb {E} _ {s \sim b, s ^ {\prime} \sim \mathcal {T} (s, a), z ^ {\prime} = \mathcal {O} (s ^ {\prime}), b ^ {\prime} | \{b, z ^ {\prime} \}} \left[ \mathcal {R} \left(s ^ {\prime}\right) + V ^ {*} \left(b ^ {\prime}\right) \right] \\ V ^ {*} (b) = \max _ {a \in \mathcal {A}} Q ^ {*} (b, a), \\ \end{array} +$$ + +where $b'|\{b, z'\}$ denotes the updated belief $b'$ after having belief $b$ and making a new observation $z'$ . When the state space $S$ is discrete—or equivalently in our case, when the space of outcome distributions $\Omega \in \mathfrak{D}$ is discrete— $V^*$ and $Q^*$ happen to be convex functions, which makes it possible to perform these iterations efficiently by approximating $V^*$ and $Q^*$ using functions of the form $f(b) = \max\{a_i b + a_j'\}$ (Spaan, 2012). However, even in the simplest of cases where $S$ is continuous—or equivalently, the space of outcome distributions $\Omega \in \mathfrak{D}$ is continuous, for instance when $\Omega_x = \mathcal{N}(\theta_x, 1)$ —the convexity of $V^*$ and $Q^*$ no longer generally holds. In fact, we show in Proposition 1 that neither $V^*$ nor $-V^*$ is convex with respect to beliefs $b \equiv \{t, \mu\}$ for at least one instance of the simplified OCP that we have analyzed in Section 3. + +Table 4: Performance comparison between Futility Stopping with RL-based algorithms and with Bayes-OCP. + +
Algorithms:Oracle RCTFutility Stopping w/ Discretized RLFutility Stopping w/ Deep Q-learningFutility Stopping w/ Bayes-OCP
All Instances (100%)Utility260.4131.8 (4.3)78.8 (3.1)150.0 (3.5)
FWER0.0%0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.6 (0.0)0.7 (0.0)0.5 (0.0)
Success75.2%41.0% (1.0%)24.2% (0.8%)45.4% (1.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F35.654.3 (2.2)23.6 (1.8)57.6 (4.6)
Green Instances (47.3%)Utility389.6309.5 (4.1)185.0 (4.9)337.7 (5.7)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.2 (0.0)0.5 (0.0)0.1 (0.0)
Success99.0%80.9% (0.7%)47.7% (1.1%)86.0% (1.4%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F600.072.7 (7.1)11.0 (0.8)46.6 (7.6)
Amber Instances (29.4%)Utility258.6-23.9 (5.4)-16.6 (6.8)-5.3 (5.4)
FWER0.0%0.2% (0.2%)0.1% (0.1%)0.4% (0.3%)
Switches1.00.9 (0.0)0.9 (0.0)0.8 (0.0)
Success96.6%9.1% (0.8%)5.2% (1.3%)15.2% (2.0%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F600.066.1 (4.3)39.5 (4.4)78.3 (9.3)
Red Instances (23.3%)Utility0.0-33.0 (1.6)-16.5 (5.2)-35.1 (1.7)
FWER0.0%0.1% (0.2%)0.1% (0.2%)0.1% (0.2%)
Switches1.01.0 (0.0)1.0 (0.0)1.0 (0.0)
Success0.0%0.2% (0.2%)0.4% (0.4%)0.9% (0.3%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F0.033.7 (0.9)18.3 (4.8)38.9 (2.1)
+ +# A.1 EXPERIMENTS WITH REINFORCEMENT LEARNING BENCHMARKS + +Having said all that, one naive way to still compute $V^{*}$ and $Q^{*}$ iteratively according to Bellman optimality conditions is to discretize the belief space. We call this benchmark Discretized RL and we use it to perform futility stopping—that is when $|\Psi| = 1$ , deciding whether to stop the only viable experiment design early or not. Otherwise, the dimensionality of the belief state explodes combinatorially with respect to $|\Psi|$ . We consider the same setting that we have considered during our experiments in Section 6 and compare the performance of Futility Stopping with Discretized RL with that of Futility Stopping with Bayes-OCP. When implementing discretized RL, instead of keeping track of the entire dataset $\mathcal{D}_t$ , we only keep track of the sufficient statistic $\mu_t = \sum_{(x_{t'}, y_{t'})} y_{t'} / |\mathcal{D}_t|$ , restrict the domain of $\mu_t$ to interval $[-0.3, 0.3]$ , and discretize this interval into 100 equally-spaced bins. In addition to discretized RL, we also consider the approach proposed by Ni et al. (2022) for solving complex classes of POMDPs, which the optimal commitment problem is one of. Briefly, we employ deep Q-learning (as such, we call this benchmark Deep Q-learning) to train a neural network as an approximation of the Q-function $Q^{*}(b, a)$ using the POMDP we formalized earlier as a simulator. As the network architecture, we consider a multi-layer perceptron with two hidden layers of size 100 and with tanh activations. + +Results are given in Table 4; futility stopping with Bayes-OCP performs better than futility stopping with discretized RL as well as futility stopping with deep Q-learning. In addition to the bad performance of discretized RL, it is also not feasible to scale it to use cases beyond futility stopping. When $|\Psi| > 1$ , we would need to keep separate track of each $\mu_x$ . Moreover, we would also need to start keeping track of the scale parameters $\{\sigma_x\}$ since it would now be possible to distribute samples among multiple atomic-populations in multiple ways by targeting different populations with different experiments (we no longer would be able to treat the target population of the only viable experiment design as the only atomic-population there is). Noting that $\sigma_x$ 's already take discrete values with at least $\tau$ -many possible values, merely increasing the number of viable experiments $|\Psi|$ from one to two causes the dimensionality of the belief space to jump from 100 to $\sim (100 \times 600)^2 = 36 \times 10^8$ . Deep Q-learning performs even worse as it ignores all structure present in the optimal commitment problem, and instead, views the POMDP that describes it as a black-box simulator. + +Table 5: Performance comparison of algorithms with family-wise error control. + +
Algorithms:Oracle RCTRCTAdaptive Enrichment w/ Bonferroni Corr.Futility Stopping w/ Bayes-OCPGreedy Bayes-OCP w/ Bonferroni Corr.Bayes-OCP w/ Bonferroni Corr.
All Instances (100%)Utility260.4-39.4 (6.7)91.4 (5.4)150.0 (3.5)23.7 (2.2)158.7 (5.2)
FWER0.0%0.3% (0.1%)0.1% (0.1%)0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.0 (0.0)0.5 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success75.2%56.1% (0.7%)51.1% (0.7%)45.4% (1.3%)7.7% (0.5%)49.3% (1.5%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)606.2 (2.3)616.6 (1.7)
T-to-F35.6600.0 (0.0)543.0 (15.7)57.6 (4.6)2.3 (0.3)65.8 (7.0)
Green Instances (47.3%)Utility389.6388.7 (3.9)378.8 (3.1)337.7 (5.7)46.1 (3.3)325.8 (5.5)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)1.0 (0.0)0.2 (0.0)
Success99.0%98.9% (0.4%)96.1% (0.6%)86.0% (1.4%)13.8% (0.8%)84.7% (1.5%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)604.9 (2.0)604.0 (0.7)
T-to-F600.0600.0 (0.0)768.6 (19.0)46.6 (7.6)2.0 (0.3)66.5 (17.7)
Amber Instances (29.4%)Utility258.6-300.3 (19.8)-51.9 (15.6)-5.3 (5.4)8.3 (2.9)44.6 (4.6)
FWER0.0%0.7% (0.3%)0.3% (0.1%)0.4% (0.3%)0.0% (0.0%)0.2% (0.2%)
Switches1.00.0 (0.0)0.8 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success96.6%30.0% (2.0%)18.2% (2.2%)15.2% (2.0%)3.8% (0.9%)30.7% (1.4%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)613.4 (5.8)670.6 (8.9)
T-to-F600.0600.0 (0.0)724.6 (9.4)78.3 (9.3)2.6 (0.6)95.5 (18.4)
Red Instances (23.3%)Utility0.0-579.2 (4.1)-312.5 (2.3)-35.1 (1.7)-2.2 (0.3)-37.0 (2.4)
FWER0.0%0.2% (0.3%)0.2% (0.3%)0.1% (0.2%)0.0% (0.0%)0.1% (0.2%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%2.1% (0.4%)1.1% (0.4%)0.9% (0.3%)0.1% (0.2%)1.1% (0.7%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)649.8 (62.6)
T-to-F0.0600.0 (0.0)334.9 (3.7)38.9 (2.1)2.4 (0.5)39.8 (2.5)
+ +# B DISCUSSION ON ERROR CONTROL + +Bayes-OCP is a method for managing experiments—that is deciding what experiment to conduct and when—as opposed to a hypothesis testing strategy in and of itself. Implication of this in terms of error control is that the type 1 error of any individual experiment run by Bayes-OCP can always be controlled by choosing an appropriate experimental design, in particular, by specifying an appropriate success criterion $\rho$ . This individual-level error control built into the design of each experiment is not compromised by Bayes-OCP; no aggregate data from multiple experiments is ever fed into the success criterion of one alone (see Section 2, experiment $\psi^i$ is successful if $\rho^i(\bar{D}_t^i) = 1$ not if $\rho^i(\bar{\mathcal{D}}_t^i) = 1$ ); and any assumptions made by Bayes-OCP regarding outcomes in Section 4, whether accurate or inaccurate, have no effect on the results produced by an external success criterion. + +While Bayes-OCP does not compromise the individual error control of experiments, neither does it control their collective family-wise error rate (FWER)—that is the probability of at least one experiment among all that are conducted making a false discovery. Bayes-OCP views the problem of managing experiments purely as a utility maximization problem with no additional constraints. Within the scope of our discussion, the purpose of measuring FWER as a metric is to check empirically whether the individual error rates are inflated or not (note that FWER is a stricter notion of error than individual error rate). In practice, depending on how closely related the managed experiments are, controlling FWER might not necessarily be a concern. Let us highlight this: Any algorithm that manages experiments for long enough is bound to make at least one false discovery. Each year more than a thousand clinical trials are launched (that eventually post results) and more than half of these trials succeed (Takebe et al., 2018; Cli). If the type 1 error rate of all these trials were $\% 5$ , we would expect at least 25 false discoveries in a year, which is more than one hence it would have put FWER of all real-world trials at almost $100\%$ when measured in a year-by-year basis. Of course, this is not problematic since not all clinical trials are related to each other closely enough to be considered a family. + +# B.1 EXPERIMENTS WITH FAMILY-WISE ERROR CONTROL + +When controlling FWER is of concern, Bayes-OCP can easily be adapted to satisfy this additional constraint by first limiting the number of total experiments that can be conducted—that is putting an upper bound on $n$ and then using well-established methods for family-wise error control such as Bonferroni correction or alpha spending functions (Demets and Lan, 1994) to adjust the success criteria of the viable experiments in $\Psi$ . We run additional experiments to evaluate the performance of Bayes-OCP with Bonferroni correction. We consider the same setting that we have considered during our experiments in Section 6 except for one difference: We limit the number of experiments + +Table 6: Performance comparison when the ground-truth outcome distributions are not Gaussian. + +
Algorithms:Oracle RCTRCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
All Instances (100%)Utility266.5-38.4 (14.7)110.0 (9.5)150.8 (8.5)46.3 (3.8)178.2 (7.3)
FWER0.0%0.1% (0.1%)0.0% (0.1%)0.0% (0.1%)0.0% (0.0%)0.0% (0.1%)
Switches0.50.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success76.6%56.2% (1.5%)53.6% (1.5%)46.5% (1.8%)14.7% (1.1%)54.9% (1.7%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)607.3 (1.0)617.2 (1.8)
T-to-F32.5600.0 (0.0)563.5 (9.9)65.8 (3.8)4.3 (0.5)81.9 (7.1)
Green Instances (48.0%)Utility391.3388.0 (4.1)383.7 (3.1)343.3 (4.4)89.4 (6.7)348.7 (3.5)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)0.9 (0.0)0.1 (0.0)
Success99.1%98.8% (0.4%)97.3% (0.4%)87.6% (0.7%)26.6% (2.0%)89.3% (0.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)605.8 (1.2)602.2 (1.3)
T-to-F600.0600.0 (0.0)710.1 (33.9)54.8 (10.0)4.2 (1.5)77.2 (10.6)
Amber Instances (29.9%)Utility263.5-316.2 (18.2)-13.1 (3.6)-18.7 (8.8)14.1 (2.4)67.6 (6.4)
FWER0.0%0.2% (0.3%)0.1% (0.3%)0.1% (0.3%)0.0% (0.0%)0.1% (0.3%)
Switches1.00.0 (0.0)0.7 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success97.2%28.4% (1.8%)22.6% (1.1%)14.7% (1.5%)6.3% (0.8%)39.3% (2.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)617.5 (6.1)670.6 (6.1)
T-to-F600.0600.0 (0.0)765.5 (11.4)91.0 (6.0)4.3 (1.3)126.0 (16.6)
Red Instances (22.1%)Utility0.0-588.3 (4.6)-316.6 (12.2)-37.0 (4.5)-3.8 (1.1)-41.7 (3.0)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%1.2% (0.5%)1.0% (0.5%)0.5% (0.2%)0.2% (0.2%)1.6% (0.4%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)654.7 (21.8)
T-to-F0.0600.0 (0.0)341.9 (5.1)39.4 (4.2)4.2 (1.0)46.4 (3.9)
+ +that can be conducted by each algorithm as at most two, and we specify $\alpha = F^{-1}(0.975)$ for algorithms that can potentially run more than one experiment—namely, adaptive enrichment and (Greedy) Bayes-OCP—while we still specify $\alpha = F^{-1}(0.95)$ for algorithms that always run exactly one experiment—namely, RCT and futility stopping. These specifications ensure that FWER of all algorithms are bounded by $5\%$ . Results are given in Table 5; Bayes-OCP still performs the best when explicit control of FWER is required. + +# C EXPERIMENTS WITH MISSPECIFIED OUTCOME DISTRIBUTIONS + +We consider the same setting that we have considered during our experiments in Section 6. Except now, the ground-truth outcome distributions are such that, when $y \sim \Omega_x$ , $y = 1$ with probability $(\theta_x + 1) / 2$ and $y = -1$ otherwise. In order to ensure that $\theta_x \in [-1,1]$ , we also sample ground-truth mean outcomes so that $\theta_x = 2p - 1$ where $p$ is distributed according to Beta distribution with $\alpha = 979 / 200$ and $\beta = 801 / 200$ (note that the mean and variance of $\theta_x$ remains the same as in our original experiments). Despite the fact that outcomes are now distributed in a non-Gaussian way, we leave the implementation of Bayes-OCP unchanged, which still assumes that outcomes distributions are Gaussian. So, there is now a mismatch between the structure of outcome distributions specified as part of Bayes-OCP and the ground-truth outcome distributions. Results are given in Table 6; Bayes-OCP still does not inflate FWER despite the misspecified outcome distributions. + +# D EXPERIMENTS WITH MORE ATOMIC-POPULATIONS + +We repeat our main experiments with more than two atomic-populations, specifically we set $|\mathcal{X}| = 10$ . As before, all atomic-populations have equal propensities such that $\eta_x = 1/10, \forall x \in \mathcal{X}$ , and the meta-experimenter has the same positively-biased prior for the mean outcome associated with each atomic population: $\theta_x \sim \mathcal{N}(0.1, 0.1)$ , $\forall x \in \mathcal{X}$ . We randomly generated 100 environment (repeated five times to obtain error bars), and the results are given in Table 7. We observe that Bayes-OCP still performs the best. These results confirm that a greedy approximation is suitable in identifying candidate experiments when the number of atomic-populations is large. + +# E SENSITIVITY ANALYSIS + +Bayes-OCP has one hyper-parameter: $\beta$ , which controls how optimistic the switching rule given in line 14 of Algorithm 1 is, from $\beta = 1/2$ meaning decisions are made greedily to $\beta = 1$ meaning + +Table 7: Performance comparison when the number of atomic-populations is 10. + +
Algorithms:RCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
All Instances (100%)Utility8.0 (39.2)143.9 (31.1)141.0 (27.5)40.3 (5.9)172.4 (23.8)
FWER0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success60.8% (3.9%)71.0% (3.6%)51.2% (4.7%)15.6% (2.6%)63.2% (4.2%)
T-to-S600.0 (0.0)678.9 (8.3)600.0 (0.0)648.5 (13.2)647.5 (4.4)
T-to-F600.0 (0.0)672.7 (58.8)130.4 (12.5)9.3 (5.0)200.5 (72.2)
+ +![](images/5cac31b589d8fd3ee95c3eda7fe1f144de0ed46052f72b20e7d32868261b8af8.jpg) +Figure 5: Utility achieved by Bayes-OCP for various values of hyper-parameter $\beta$ . + +decisions are so extremely optimistic that the original experiment will never be abandoned (as there will always be a chance that it succeeds). As with all online algorithms, tuning $\beta$ is challenging since no a priori data would be available to perform cross validation. However, a nice feature of Bayes-OCP is that $\beta$ is rather interpretable, it is the evidence required against the ongoing experiment: An alternative experiment is preferred over the ongoing experiment only if it is believed to be the better experiment with at least $\beta$ -confidence. We evaluate the sensitivity of Bayes-OCP's performance to hyper-parameter $\beta$ in Figure 5; Bayes-OCP performs better than an RCT for all configurations and better than adaptive enrichment for most configurations. + +# F FUTURE WORK + +Extending the scope of Bayes-OCP One limitation of Bayes-OCP is that it only adapts the target population $X\subseteq \mathcal{X}$ of experiments but not the sample horizon $\tau$ or the success criterion $\rho$ . We have chosen to focus on the selection of a target population since we believe the target population of an experiment to be the most critical design dimension to adjust adaptively. As we have already highlighted in our introduction, experiments with inflexible target populations can be problematic when responses to the treatment of interest are highly heterogeneous. + +That being said, the high-level strategy of our proposed algorithm should still be applicable to adapting design dimensions other than the target population, namely $\tau$ and $\rho$ . At a high level, Bayes-OCP first identifies a candidate experiment and then compares the identified experiment to the ongoing experiment in a n optimistic manner. Regardless of the given set of viable experiment design $\Psi$ , one could still follow the same strategy; the only complication would be to adapt how candidate experiments are identified depending on what design dimension varies across experiment designs in $\Psi$ . + +For instance, when experiment designs varied in terms of $X$ , a combinatorial search was required to identify good candidate experiments, for which we proposed a greedy strategy. When experiment designs vary in terms of $\rho$ , a simple search over all possible $\rho$ would suffice for identifying candidate experiment. The case where experiment designs vary in terms of $\tau$ is more complex; optimal $\tau$ for an experiment would be dependent on unknown effects $\theta_{x}$ ; selecting a good candidate experiment would involve estimating the optimal $\tau$ given posteriors over $\theta_{x}$ . This would be an interesting problem to explore as a future research direction. + +Performance guarantees While our theoretical results motivate the general use of an optimistic decision rule, they do not provide any guarantees about the performance of the specific rule we propose as part of Bayes-OCP. Another future research direction would be to prove an upper bound on the sub-optimality gap of Bayes-OCP. + +# G FURTHER DISCUSSION ON MAIN RESULTS + +Table 3 report six metrics: Utility, FWER, Switches, Success, T-to-S, and T-to-F. We have already discussed the implications of Utility and FWER in Section 6. Here, we highlight other interesting phenomena regarding the remaining metrics. First, we see that Greedy Bayes-OCP switches experiments much more frequently compared with Bayes-OCP. This is because Greedy Bayes-OCP requires less evidence against the ongoing experiment when comparing it against an alternative experiment, whereas, Bayes-OCP favors the ongoing experiments more. Second, we see that a higher success probability does not necessarily also imply a higher utility. For instance, compare RCT with futility stopping, futility stopping is able to achieve higher utility than RCT by terminating risky experiments early and saving costs. However, this of course also means that futility stopping sees fewer experiments to completion hence leads to a lower success probability. Finally, we see that succeeding or failing early does not necessarily imply a higher utility either. Our best algorithm Bayes-OCP succeeds the latest on average as well as fails the latest compared with other benchmarks favoring red instances. This highlights the importance being conservative when making decisions, being optimistic, and favoring the status quo more than a potential adaptation. + +# H FURTHER DISCUSSION ON RELATED WORK + +Multi-armed bandits The optimal commitment problem is similar to a multi-armed bandit (MAB) problem (Auer et al., 2002; Bubeck et al., 2012) in some aspects: Like arms in a MAB problem, each experiment design $\psi$ has a random utility given by $R_{\psi} \cdot \rho(\mathcal{D}_{\tau}) - \tau C_{\psi}$ , where $\mathcal{D}_{\tau}$ is the source of randomness, and the distribution of this utility is unknown. Also similar to a MAB problem, the overall goal is to sequentially select experiment designs (cf. arms) that yield the maximum cumulative utility. The main difference between the two problems is that, in a MAB problem, selecting an arm immediately reveals a sample from its random utility, while in optimal commitment, running an experiment $\psi$ just for one time step only incurs a cost of $C_{\psi}$ ; observing a full sample of its random utility requires the experiment to be run until its completion for $\tau$ consecutive time steps, without selecting any other experiment design in the meantime. + +One can naively apply a MAB algorithm by viewing each viable experiment design as a unique arm, and by running experiments/arms selected by the algorithm until their completion to observe full samples from their unknown utility distributions. However, this obviously side steps the main question we want to answer in optimal commitment: When can we abandon a commitment—in this case, the decision to run an experiment/arm selection until its completion—before fully observing its outcome? Looking at optimal commitment from a MAB perspective reveals that there are two explore-exploit dilemmas present in optimal commitment: One is with respect to which experiment to select next, and the other is with respect to when to preemptively stop the current experiment (i.e. breaking a commitment). MAB algorithms address the former dilemma but not the latter. + +Task replication in parallel computing There is work (Ghare and Leutenegger, 2005; Wand et al., 2014; Wang et al., 2019) that focuses on the problem of when to kill existing tasks and relaunch them in parallel computing, which is related to optimal stopping/switching. However there, the focus is on reasoning about when a stochastic event (i.e. successful completion of a computational task) will occur without any extra information other than the fact that the event of interest has not occurred yet. In contrast, in our setting, the decision-maker needs to process a streaming set of samples to reason about the random outcome of an event that is scheduled to happen at a deterministic time point (here, the event is an experiment reaching its conclusion). This means that our problem has a completely different information structure when compared with the problem of task replication. More formally, we observe samples $y_{t}$ that are informative of whether $\rho(\mathcal{D}_{\tau}) = 1$ when $\tau$ is a fixed variable. In contrast, the problem of task replication would correspond to the setting where $\tau$ is a random variable with a known distribution and $\rho = 1$ always holds (hence no need to observe any samples $y_{t}$ ). Among optimal stopping/switching problems, the structure of our problem is more closely related to sequential hypothesis testing, which we have already covered in Section 5. + +# I PROOFS OF PROPOSITIONS + +# I.1 PROOF OF PROPOSITION 1 + +We start by relating the optimal value function $V^{*}$ to the optimal Q-function $Q^{*}$ . Letting $T_{t}^{*} = T_{t}^{\pi^{*}}$ , + +$$ +\begin{array}{l} V ^ {*} (t, \mu) \\ = \mathbb {E} [ R \cdot \mathbb {1} \left\{T _ {t} ^ {*} > \tau \right\} \cdot \rho (\mu_ {\tau}) - C \cdot (\min \left\{T _ {t} ^ {*}, \tau \right\} - t) | \mu_ {t} = \mu ] \\ = \mathbb {E} \left[ \mathbb {1} \left\{\pi^ {*} (t, \mu_ {t}) = \varnothing \right\} \left(R \cdot \mathbb {1} \left\{T _ {t} ^ {*} > \tau \right\} \cdot \rho (\mu_ {\tau}) - C \cdot \left(\min \left\{T _ {t} ^ {*}, \tau \right\} - t\right)\right) \right. \\ + \mathbb {1} \left\{\pi^ {*} (t, \mu_ {t}) = \Psi_ {0} \right\} \left(R \cdot \mathbb {1} \left\{T _ {t} ^ {*} > \tau \right\} \cdot \rho (\mu_ {\tau}) - C \cdot \left(\min \left\{T _ {t} ^ {*}, \tau \right\} - t)\right) | \mu_ {t} = \mu \right] \\ = \mathbb {E} [ \mathbb {1} \left\{\pi^ {*} (t, \mu_ {t}) = \varnothing \right\} \cdot 0 \\ + \mathbb {1} \left\{\pi^ {*} (t, \mu_ {t}) = \Psi_ {0} \right\} \left(R \cdot \mathbb {1} \left\{T _ {t + 1} ^ {*} > \tau \right\} \cdot \rho (\mu_ {\tau}) - C \cdot \left(\min \left\{T _ {t + 1} ^ {*}, \tau \right\} - t)\right) | \mu_ {t} = \mu \right] (11) \\ = \mathbb {1} \left\{\pi^ {*} (t, \mu) = \Psi_ {0} \right\} \cdot \mathbb {E} [ R \cdot \mathbb {1} \left\{T _ {t + 1} ^ {*} > \tau \right\} \cdot \rho (\mu_ {\tau}) - C \cdot \left(\min \left\{T _ {t + 1} ^ {*}, \tau \right\} - t\right) | \mu_ {t} = \mu ] (12) \\ = \mathbb {1} \left\{Q ^ {*} (t, \mu) > 0 \right\} \cdot Q ^ {*} (t, \mu) (13) \\ = \max \left\{0, Q ^ {*} (t, \mu) \right\}, (14) \\ \end{array} +$$ + +where (11) holds since $\pi^{*}(t,\mu_{t}) = \varnothing \Rightarrow T_{t}^{*} = t$ and $\pi^{*}(t,\mu_{t}) = \Psi_{0} \Rightarrow T_{t}^{*} \geq t + 1 \Rightarrow T_{t}^{*} = \min \{t^{\prime} \geq t : \pi^{*}(t^{\prime},\mu_{t^{\prime}}) = \emptyset\} = \min \{t^{\prime} \geq t + 1 : \pi^{*}(t^{\prime},\mu_{t^{\prime}}) = \emptyset\} = T_{t + 1}^{*}$ , (12) holds since $\mu_{\tau} \perp \mathbb{1}\{\pi^{*}(t,\mu_{t}) = \Psi_{0}\}$ and $T_{t + 1}^{*} \perp \mathbb{1}\{\pi^{*}(t,\mu_{t}) = \Psi_{0}\}$ when conditioned on $\mu_{t} = \mu$ , and (13) holds since $\pi^{*}(t,\mu) = \Psi_{0} \iff Q^{*}(t,\mu) > 0$ . Intuitively, the maximum possible value at a given time is achieved either by stopping immediately or by conducting the experiment for at least one more time step and then following the optimal policy thereafter. + +Next, we observe that + +$$ +\begin{array}{l} \mathbb {P} \left\{\mu_ {t + 1} \leq \mu^ {\prime} \mid \mu_ {t} = \mu \right\} = \int \mathbb {P} \left\{\mu_ {t + 1} \leq \mu^ {\prime} \mid \theta , \mu_ {t} = \mu \right\} \mathrm {d} \mathbb {P} \left\{\theta \mid \mu_ {t} = \mu \right\} \\ = \int F \left(\mu^ {\prime} - \frac {\theta + t \mu}{t + 1}; \frac {1}{(t + 1) ^ {2}}\right) f (\theta - \mu ; ^ {1} / t) d \theta \\ = \iint \mathbb {1} \left\{\mu_ {t + 1} \leq \mu^ {\prime} \right\} f \left(\mu_ {t + 1} - \frac {\theta + t \mu}{t + 1}; \frac {1}{(t + 1) ^ {2}}\right) f (\theta - \mu ; ^ {1} / t) d \mu_ {t + 1} d \theta \\ = \iint \mathbb {1} \left\{\mu_ {t + 1} \leq \mu^ {\prime} \right\} f \left(\mu_ {t + 1} - \frac {y + (t + 1) \mu}{t + 1}; \frac {1}{(t + 1) ^ {2}}\right) f (y; ^ {1} / t) d \mu_ {t + 1} d y \\ = \iint \mathbb {1} \left\{x + \frac {y + (t + 1) \mu}{t + 1} \leq \mu^ {\prime} \right\} f \left(x; ^ {1} / (t + 1) ^ {2}\right) f \left(y; ^ {1} / t\right) d x d y \\ = \mathbb{P}_{\substack{X\sim \mathcal{N}(0,1 / (t + 1)^{2})\\ Y\sim \mathcal{N}(0,1 / t)}}\Bigg\{X + \frac{Y}{t + 1}\leq \mu^{\prime} - \mu \Bigg\} \\ = \mathbb {P} _ {X + Y / (t + 1) \sim \mathcal {N} (0, 1 / t - 1 / t + 1)} \left\{X + \frac {Y}{t + 1} \leq \mu^ {\prime} - \mu \right\} \\ = F \left(\mu^ {\prime} - \mu ; ^ {1} / t - ^ {1} / t + 1\right), \tag {15} \\ \end{array} +$$ + +where $f(x; \sigma^2) = (1 / \sqrt{2\pi\sigma^2})e^{-(1/2)x^2 / \sigma^2}$ and $F(x; \sigma^2) = (1 / \sqrt{2\pi\sigma^2})\int_{-\infty}^{x}e^{-(1/2)x'^2 / \sigma^2}dx'$ are the p.d.f. and the c.d.f. of the Gaussian distribution with mean zero and variance $\sigma^2$ respectively. Hence $\mathrm{d}\mathbb{P}\{\mu_{t+1} = \mu'| \mu_t = \mu\} = f(\mu' - \mu; 1/t - 1/t+1)d\mu'$ . + +Then, using the relationship between $V^{*}$ and $Q^{*}$ and the observation regarding $\mathbb{P}\{\mu_{t + 1} \leq \mu' | \mu_t = \mu\}$ , we drive the following Bellman optimality condition: + +$$ +\begin{array}{l} Q ^ {*} (t, \mu) = \mathbb {E} [ R \cdot \mathbb {1} \{T _ {t + 1} ^ {*} > \tau \} \cdot \rho (\mu_ {\tau}) - C \cdot (\min \{T _ {t + 1} ^ {*}, \tau \} - t) | \mu_ {t} = \mu ] \\ = - C + \mathbb {E} \left[ R \cdot \mathbb {1} \left\{T _ {t + 1} ^ {*} > \tau \right\} \cdot \rho (\mu_ {\tau}) - C \cdot \left(\min \left\{T _ {t + 1} ^ {*}, \tau \right\} - t - 1\right) \mid \mu_ {t} = \mu \right] \\ = - C + \int \mathbb {E} [ R \cdot \mathbb {1} \{T _ {t + 1} ^ {*} > \tau \} \cdot \rho (\mu_ {\tau}) - C \cdot (\min \{T _ {t + 1} ^ {*}, \tau \} - t - 1) | \mu_ {t + 1} = \mu^ {\prime} ] \\ \times \mathrm {d} \mathbb {P} \left(\mu_ {t + 1} = \mu^ {\prime} \mid \mu_ {t} = \mu\right) \\ = - C + \int V ^ {*} (t + 1, \mu^ {\prime}) \mathrm {d} \mathbb {P} (\mu_ {t + 1} = \mu^ {\prime} | \mu_ {t} = \mu) \\ = - C + \int V ^ {*} (t + 1, \mu^ {\prime}) f (\mu^ {\prime} - \mu ; 1 / t - 1 / t + 1) d \mu^ {\prime} \\ \end{array} +$$ + +$$ += - C + \int V ^ {*} (t + 1, \mu + z) f \left(z; ^ {1} / t - ^ {1} / t + 1\right) d z \tag {16} +$$ + +$$ += - C + \int \max \left\{0, Q ^ {*} (t + 1, \mu + z) \right\} f \left(z; \frac {1}{t} - \frac {1}{t + 1}\right) d z. \tag {17} +$$ + +For the problem setting where $C = 1$ , $R = 2$ , $\alpha = 0$ , and $\tau = 2$ , we have + +$$ +\begin{array}{l} V ^ {*} (1, \mu) = \max \{0, - 1 + \int V ^ {*} (2, \mu + z) f (z; 1 / 2) d z \} \\ = \max \{0, - 1 + 2 \int \mathbb {I} \{\mu + z > 0 \} f (z; 1 / 2) d z \} \\ = \max \left\{0, - 1 + 2 \int_ {- \mu} ^ {\infty} f (z; 1 / 2) d z \right\} \\ = \max \left\{0, - 1 + 2 F (\mu ; ^ {1} / _ {2}) \right\} \\ = \left\{ \begin{array}{l l} 0 & \text {i f} \mu < 0 \\ - 1 + 2 F (\mu ; 1 / 2) & \text {i f} \mu \geq 0 . \end{array} \right. \\ \end{array} +$$ + +Notice that, for $\mu > 0$ , + +$$ +\begin{array}{l} \frac {d ^ {2}}{d \mu^ {2}} V ^ {*} (1, \mu) = \frac {d ^ {2}}{d \mu^ {2}} \Big (- 1 + 2 F (\mu ; 1 / 2) \Big) \\ = \frac {d}{d \mu} \left(2 f (\mu ; ^ {1} / 2)\right) \\ = - (4 / \pi) \mu e ^ {- \mu^ {2}} < 0 \\ \end{array} +$$ + +hence $V^{*}(1,\mu)$ is concave at least on interval $\mu \in (0,\infty)$ and is not a convex function. Moreover, $-V^{*}(1,\mu)$ cannot be a convex function—or equivalently $V^{*}(1,\mu)$ cannot be a purely concave function—either: For an arbitrary $\mu \in (0,\infty), V^{*}(1,\mu) > 0$ and $V^{*}(1,-\mu) = 0$ hence $(1/2)V^{*}(1,\mu) + (1/2)V^{*}(1,-\mu) > 0$ but $V^{*}(1,(1/2)\mu + (1/2)(-\mu)) = V^{*}(1,0) = 0$ . + +# I.2 PROOF OF PROPOSITION 2 + +We will prove the proposition by showing that + +(i) $Q^{*}(t,\mu)$ is non-decreasing in $\mu$ —that is $\mu < \mu^{\prime}\Rightarrow Q^{*}(t,\mu)\leq Q^{*}(t,\mu^{\prime})$ +(ii) $\lim_{t\to \infty}Q^{*}(t,\mu) = -(\tau -t)C + R > 0$ , and +(iii) $\lim_{t\to -\infty}Q^{*}(t,\mu) = -C < 0$ + +for all $t \in \{1, \dots, \tau - 1\}$ via mathematical induction. Notice that these three facts—together with the fact that $Q^{*}(t, \mu)$ is a continuous function in $\mu$ for $t \in \{1, \dots, \tau - 1\}$ —would imply the existence of a unique $\mu_{t}^{*}$ such that $Q^{*}(t, \mu_{t}^{*}) = 0$ , $Q^{*}(t, \mu) > 0 \iff \mu > \mu_{t}^{*}$ , and $Q^{*}(t, \mu) \leq 0 \iff \mu \leq \mu_{t}^{*}$ , which in turn would imply that + +$$ +\pi^ {*} (t, \mu) = \left\{ \begin{array}{l l} \Psi_ {0} & \text {i f} \mu > \mu_ {t} ^ {*} \iff Q ^ {*} (t, \mu) > 0 \\ \varnothing & \text {i f} \mu \leq \mu_ {t} ^ {*} \iff Q ^ {*} (t, \mu) \leq 0 , \end{array} \right. +$$ + +meaning the optimal policy $\pi^{*}$ is indeed of "thresholding -type" as the proposition states. + +First, we observe the following base cases for $t = \tau - 1$ : + +(i) $Q^{*}(\tau -1,\mu)$ is non-decreasing in $\mu$ . When $\mu < \mu^{\prime}$ + +$$ +\begin{array}{l} Q ^ {*} (\tau - 1, \mu) = - C + \int V ^ {*} (\tau , \mu + z) f (z; ^ {1} / (\tau - 1) - ^ {1} / \tau) d z (18) \\ = - C + R \int \mathbb {1} \left\{\mu + z > \alpha / \sqrt {\tau} \right\} f (z; 1 / (\tau - 1) - 1 / \tau) d z \\ \leq - C + R \int \mathbb {1} \left\{\mu^ {\prime} + z > \alpha / \sqrt {\tau} \right\} f (z; 1 / (\tau - 1) - 1 / \tau) d z (19) \\ = Q ^ {*} (\tau - 1, \mu^ {\prime}), \\ \end{array} +$$ + +where (18) is due to (16), and (19) holds since $\mu + z > \alpha / \sqrt{\tau} \Rightarrow \mu' + z > \mu + z > \alpha / \sqrt{\tau}$ . + +(ii) $\lim_{\mu \to \infty}Q^{*}(\tau -1,\mu) = -C + R > 0$ since + +$$ +\lim _ {\mu \rightarrow \infty} Q ^ {*} (\tau - 1, \mu) = \lim _ {\mu \rightarrow \infty} \left(- C + R \int \mathbb {1} \{\mu + z > \alpha / \sqrt {\tau} \} f (z; ^ {1 / (\tau - 1)} - ^ {1 / \tau}) d z\right) +$$ + +$$ +\begin{array}{l} = \lim _ {\mu \rightarrow \infty} \left(- C + R \int_ {\alpha / \sqrt {\tau} - \mu} ^ {\infty} f (z; ^ {1} / (\tau - 1) - ^ {1} / \tau) d z\right) \\ = - C + R \int f (z; ^ {1} / (\tau - 1) - ^ {1} / \tau) d z \\ = - C + R. \\ \end{array} +$$ + +(iii) $\lim_{\mu \to -\infty}Q^{*}(\tau -1,\mu) = -C < 0$ since + +$$ +\begin{array}{l} \lim _ {\mu \rightarrow - \infty} Q ^ {*} (\tau - 1, \mu) = \lim _ {\mu \rightarrow - \infty} \left(- C + R \int \mathbb {1} \{\mu + z > \alpha / \sqrt {\tau} \} f (z; ^ {1} / (\tau - 1) - ^ {1} / \tau) d z\right) \\ = \lim _ {\mu \rightarrow - \infty} \left(- C + R \int_ {\alpha / \sqrt {\tau} - \mu} ^ {\infty} f (z; ^ {1} / (\tau - 1) - ^ {1} / \tau) d z\right) \\ = \lim _ {\mu \rightarrow - \infty} \left(- C + R \left(1 - \int_ {- \infty} ^ {\alpha / \sqrt {\tau} - \mu} f (z; 1 / (\tau - 1) - 1 / \tau) d z\right)\right) \\ = - C + R \left(1 - \int f (z; ^ {1} / (\tau - 1) - ^ {1} / \tau) d z\right) \\ = - C. \\ \end{array} +$$ + +Then, we show that the following inductive cases hold for $t \in \{\tau - 1, \dots, 2\}$ : + +(i) Given that $Q^{*}(t,\mu)$ is non-decreasing in $\mu$ , $Q^{*}(t - 1,\mu)$ is also non-decreasing in $\mu$ . Similar to the base case, when $\mu < \mu'$ , + +$$ +\begin{array}{l} Q ^ {*} (t - 1, \mu) = - C + \int \max \{0, Q ^ {*} (t, \mu + z) \} f \left(z, ^ {1 / (t - 1)} - ^ {1 / t}\right) d z \tag {20} \\ \leq - C + \int \max \left\{0, Q ^ {*} (t, \mu^ {\prime} + z) \right\} f \left(z, ^ {1} / (t - 1) - ^ {1} / t\right) d z \\ = Q ^ {*} (t - 1, \mu^ {\prime}), \\ \end{array} +$$ + +where (20) is due to (17). + +(ii) Given $\lim_{\mu \to \infty}Q^{*}(t,\mu) = -(\tau -t)C + R$ and also given that $Q^{*}(t,\mu)$ is non-decreasing in $\mu$ we have $\lim_{\mu \to \infty}Q^{*}(t - 1,\mu) = -(\tau -t + 1)C + R > 0$ which can be shown using the sandwich theorem: + +$$ +\begin{array}{l} Q ^ {*} (t - 1, \mu) = - C + \int \max \{0, Q ^ {*} (t, \mu + z) \} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\ \leq - C + \int \max \left\{0, \lim _ {\mu^ {\prime} \rightarrow \infty} Q ^ {*} \left(t, \mu^ {\prime}\right)\right\} f \left(z, ^ {1} / (t - 1) - ^ {1} / t\right) d z \\ \leq - C + (- (\tau - t) C + R) \int f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\ = - (\tau - t - 1) C + R. \tag {21} \\ \end{array} +$$ + +$$ +\begin{array}{l} Q ^ {*} (t - 1, \mu) = - C + \int \max \{0, Q ^ {*} (t, \mu + z) \} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\ \geq - C + \int_ {- | \mu | ^ {1 / 2}} ^ {\infty} \max \{0, Q ^ {*} (t, \mu + z) \} f (z, 1 / (t - 1) - 1 / t) d z \\ \geq - C + \int_ {- | \mu | ^ {1 / 2}} ^ {\infty} \max \left\{0, Q ^ {*} (t, \mu - | \mu | ^ {1 / 2} \right\} f (z, ^ {1 / (t - 1)} - ^ {1 / t}) d z \\ \geq - C + Q ^ {*} (t, \mu - | \mu | ^ {1 / 2}) \int_ {- | \mu | ^ {1 / 2}} ^ {\infty} f \left(z, 1 / (t - 1) - 1 / t\right) d z, \tag {22} \\ \end{array} +$$ + +Finally, observing that + +$$ +\begin{array}{l} \lim _ {\mu \rightarrow \infty} (2 2) = \lim _ {\mu \rightarrow \infty} \left(- C + Q ^ {*} (t, \mu - | \mu | ^ {1 / 2}) \int_ {- | \mu | ^ {1 / 2}} ^ {\infty} f \left(z, ^ {1} / (t - 1) - ^ {1} / t\right) d z\right) \\ = - C + \left(\lim _ {\mu^ {\prime} \rightarrow \infty} Q ^ {*} (t, \mu^ {\prime})\right) \int f (z, 1 / (t - 1) - 1 / t) d z \\ = - (\tau - t - 1) C + R, \\ \end{array} +$$ + +together with bounds (21) and (22), we obtain $\lim_{\mu \to \infty}Q^{*}(t - 1,\mu) = -(\tau -t + 1)C + R.$ + +(iii) Given $\lim_{\mu \to -\infty}Q^{*}(t,\mu) = -C < 0$ and also given that $Q^{*}(t,\mu)$ is non-decreasing in $\mu$ and $\lim_{\mu \to \infty}Q^{*}(t,\mu) > 0$ so that $\mu_t^*$ exists—we have $\lim_{\mu \to -\infty}Q^{*}(t - 1,\mu) = -C < 0$ , which again can be shown using the sandwich theorem: + +$$ +Q ^ {*} (t - 1, \mu) = - C + \int \max \{0, Q ^ {*} (t, \mu + z) \} f (z, 1 / (t - 1) - 1 / t) d z +$$ + +$$ +\geq - C. \tag {23} +$$ + +$$ +\begin{array}{l} Q ^ {*} (t - 1, \mu) = - C + \int \max \{0, Q ^ {*} (t, \mu + z) \} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\ = \int_ {\mu_ {t} ^ {*} - \mu} ^ {\infty} Q ^ {*} (t, \mu + z) f (z, 1 / (t - 1) - 1 / t) d z (24) \\ \leq - C + R \int_ {\mu_ {t} ^ {*} - \mu} ^ {\infty} f (z, 1 / (t - 1) - 1 / t) d z (25) \\ \leq - C + R \left(1 - \int_ {- \infty} ^ {\mu_ {t} ^ {*} - \mu} f (z, 1 / (t - 1) - 1 / t) d z\right), (26) \\ \end{array} +$$ + +where (24) holds since $Q^{*}(t,\mu +z) > 0$ if and only if $z > \mu_t^* -\mu$ and $\max \{0,Q^{*}(t,\mu +z)\} = 0$ otherwise, and (25) holds since $Q^{*}(t,\mu)\leq \lim_{\mu^{\prime}\to \infty}Q^{*}(t,\mu^{\prime}) = -(\tau -t)C + R\leq R$ for all $\mu$ as $Q^{*}(t,\mu)$ is non-decreasing in $\mu$ . Finally, observing + +$$ +\begin{array}{l} \lim _ {\mu \rightarrow - \infty} (2 6) = \lim _ {\mu \rightarrow - \infty} \left(- C + R \left(1 - \int_ {- \infty} ^ {\mu_ {t} ^ {*} - \mu} f \left(z, \frac {1}{(t - 1)} - \frac {1}{t}\right) d z\right)\right) \\ = - C, \\ \end{array} +$$ + +together with bounds (23) and (26), we obtain $\lim_{\mu \to -\infty}Q^{*}(t - 1,\mu) = -C$ + +When put together, the base cases and the inductive cases above imply that conditions (i-iii) hold for all $t \in \{1, \dots, \tau - 1\}$ hence $\mu_t^*$ exists for all $t \in \{1, \dots, \tau - 1\}$ which concludes our proof. + +# I.3 PROOF OF PROPOSITION 3 + +First, we prove the existence of $\mu_t^{\mathrm{greedy}}$ for all $t\in \{0,\dots ,\tau -1\}$ by driving an analytical formula for $V^{(0)}(t,\mu)\doteq V^{\pi^{(0)}}(t,\mu)$ . Letting $T_{t}^{(0)} = T_{t}^{\pi^{(0)}}$ , + +$$ +\begin{array}{l} V ^ {(0)} (t, \mu) = \mathbb {E} [ R \cdot \mathbb {1} \{T _ {t} ^ {(0)} > \tau \} \cdot \rho (\mu_ {\tau}) - C \cdot (\min \{T _ {t} ^ {(0)}, \tau \} - t) | \mu_ {t} = \mu ] \\ = \mathbb {E} [ R \cdot \rho (\mu_ {\tau}) - C \cdot (\tau - t) | \mu_ {t} = \mu ] (27) \\ = - C + \int \mathbb {E} [ R \cdot \rho (\mu_ {\tau}) - C \cdot (\tau - t - 1) | \mu_ {t + 1} = \mu^ {\prime} ] \mathrm {d} \mathbb {P} (\mu_ {t + 1} = \mu^ {\prime} | \mu_ {t} = \mu) \\ = - C + \int V ^ {(0)} (t + 1, \mu^ {\prime}) \mathrm {d} \mathbb {P} (\mu_ {t + 1} = \mu^ {\prime} | \mu_ {t} = \mu) \\ = - C + \int V ^ {(0)} (t + 1, \mu^ {\prime}) f \left(\mu^ {\prime} - \mu ; ^ {1} / t - ^ {1} / (t + 1)\right) d \mu^ {\prime} (28) \\ = - C + \int V ^ {(0)} (t + 1, \mu + z) f \left(z; \frac {1}{t} - \frac {1}{(t + 1)}\right) d z, (29) \\ \end{array} +$$ + +where (27) holds since $\pi^{(0)}(t,\mu) = \Psi_0$ for all $t$ and $\mu$ hence it is always the case that $T_{t}^{(0)} = \infty$ , and (28) is due to (15). + +In the remainder of our proofs, we take $\alpha = 0$ for notational brevity. This is without any loss of generality as, by simply shifting each value function and Q-function by $\alpha / \sqrt{\tau}$ with respect to $\mu$ , all of the following arguments would still hold. For $\alpha = 0$ , we show that + +$$ +V ^ {(0)} (t, \mu) = - (\tau - t) C + R \cdot F \left(\frac {\mu}{\sqrt {1 / t - 1 / \tau}}\right) \tag {30} +$$ + +for all $t \in \{1, \dots, \tau - 1\}$ via mathematical induction. Note that (30) is true for $t = \tau - 1$ : + +$$ +\begin{array}{l} V ^ {(0)} (\tau - 1, \mu) = - C + \int V ^ {(0)} (\tau , \mu + z) f (z; 1 / _ {(\tau - 1)} - 1 / _ {\tau}) d z \\ = - C + R \int \mathbb {1} \left\{\mu + z > 0 \right\} f (z; 1 / (\tau - 1) - 1 / \tau) d z \\ = - C + R \int_ {- \mu} ^ {\infty} f (z; 1 / (\tau - 1) - 1 / \tau) d z \\ = - C + R \int_ {- \infty} ^ {\mu} f (z; ^ {1 / (\tau - 1)} - ^ {1 / \tau}) d z \\ = - C + R \int_ {- \infty} ^ {\mu / \sqrt {1 / (\tau - 1) - 1 / \tau}} f (z; 1) d z \\ \end{array} +$$ + +$$ += - C + R \cdot F \left(\frac {\mu}{\sqrt {1 / (\tau - 1) - 1 / \tau}}\right), +$$ + +where $F(x) \doteq F(x; 1)$ is the c.d.f. of the standard Gaussian distribution. Moreover, assuming (30) is true for $t$ , it is also true for $t - 1$ : + +$$ +\begin{array}{l} V ^ {(0)} (t - 1, \mu) \\ = - C + \int V ^ {(0)} (t, \mu + z) f (z; ^ {1} / t - 1 - ^ {1} / t) d z \\ = - (\tau - t + 1) C + R \int F ((\mu + z) / \sqrt {1 / t - 1 / \tau}; 1) f (z; 1 / (t - 1) - 1 / t) d z \\ = - (\tau - t + 1) C \\ + R \iint_ {- \infty} ^ {(\mu + z) / \sqrt {1 / t - 1 / \tau}} f (z ^ {\prime}; 1) f (z; 1 / (t - 1) - 1 / t) d z ^ {\prime} d z \\ = - (\tau - t + 1) C \\ + R \iint_ {- \infty} ^ {\mu + z} f \left(z ^ {\prime}; 1 / t - 1 / \tau\right) f \left(z; 1 / (t - 1) - 1 / t\right) d z ^ {\prime} d z \\ = - (\tau - t + 1) C \\ + R \iint \mathbb {1} \{z ^ {\prime} \leq \mu + z \} f \left(z ^ {\prime}; ^ {1} / t - ^ {1} / \tau\right) f \left(z; ^ {1} / (t - 1) - ^ {1} / t\right) d z ^ {\prime} d z \\ = - (\tau - t + 1) C + R \cdot \mathbb {P} _ {Z \sim \mathcal {N} (0, 1 / (t - 1) - 1 / t)} \{Z ^ {\prime} \leq \mu + Z \} \\ = - (\tau - t + 1) C + R \cdot \mathbb {P} _ {\frac {Z ^ {\prime} - Z}{\sqrt {1 / (t - 1) - 1 / \tau}} \sim \mathcal {N} (0, 1)} \left\{\frac {Z ^ {\prime} - Z}{\sqrt {1 / (t - 1) - 1 / \tau}} \leq \frac {\mu}{\sqrt {1 / (t - 1) - 1 / \tau}} \right\} \\ = - (\tau - t + 1) C + R \cdot F \left(\frac {\mu}{\sqrt {1 / (t - 1) - 1 / \tau}}\right). \\ \end{array} +$$ + +Therefore, (30) indeed holds for all $t \in \{1, \dots, \tau - 1\}$ . + +Next, we observe that $V^{(0)}(t,\mu)$ has a root at $\mu = F^{-1}((\tau - t)C / R)\sqrt{1 / t - 1 / \tau}$ provided that $(\tau - t)C / R \in (0,1)$ , which is the case for all $t \in \{1, \dots, \tau - 1\}$ since $\tau C < R$ . Moreover, $V^{(0)}(t,\mu)$ is a strictly increasing function in $\mu$ . Hence, there exists a unique $\mu_t^{\mathrm{greedy}}$ for all $t \in \{1, \dots, \tau - 1\}$ such that $V^{(0)}(t,\mu_t^{\mathrm{greedy}}) > 0$ and $V^{(0)}(t,\mu) > 0 \iff \mu > \mu_t^{\mathrm{greedy}}$ . In other words, $\pi^{\mathrm{greedy}}$ is also a thresholding-type policy as the proposition states. + +Finally, we have $V^{(0)}(t,\mu_t^*) = Q^{(0)}(t,\mu_t^*) \leq Q^* (t,\mu_t^*) = 0$ hence $\mu_t^* \leq \mu_t^{\mathrm{greedy}}$ . This is because, by definition, $Q^{*}(t,\mu) \geq Q^{\pi}(t,\mu)$ for all $t,\mu$ for any given policy $\pi$ , including $\pi^{(0)}$ . + +# I.4 PROOF OF PROPOSITION 4 + +As in the proof of Proposition 3, we take $\alpha = 0$ for notational brevity. Once again, this is without any loss of generality as, by simply shifting each value function and Q-function by $\alpha / \sqrt{\tau}$ with respect to $\mu$ , all of the following arguments would still hold. Remember that the formula we derived for $V^{(0)}(t, \mu)$ in (30) holds when $\alpha = 0$ . + +We start by deriving two bounds on the optimal Q-function $Q^{*}(t,\mu)$ : (i) a lower bound and (ii) an upper bound. For the lower bound, it is sufficient to observe that + +$$ +V ^ {(0)} (t, \mu) = Q ^ {(0)} (t, \mu) \leq Q ^ {*} (t, \mu), +$$ + +which holds since, by definition, $Q^{*}(t,\mu) \geq Q^{\pi}(t,\mu)$ for all $t,\mu$ for any given policy $\pi$ . + +For the upper bound, we use mathematical induction to show that $Q^{*}(t,\mu) \leq (\tau - t - 1)C + V^{(0)}(t,\mu)$ . First, for the base case of $\tau - 1$ , + +$$ +\begin{array}{l} Q ^ {*} (\tau - 1, \mu) = - C + \int V ^ {*} (\tau , \mu + z) f (z; ^ {1} / t - ^ {1} / t + 1) d z \tag {31} \\ = - C + \int \mathbb {1} \left\{\mu + z > \alpha \right\} f \left(z; 1 / t - 1 / t + 1\right) d z \\ \end{array} +$$ + +$$ +\begin{array}{l} = - C + \int V ^ {(0)} (\tau , \mu + z) f (z; 1 / t - 1 / t + 1) d z \\ = V ^ {(0)} (\tau - 1, \mu), \tag {32} \\ \end{array} +$$ + +where (31) is due to (16), and (32) is due to (29). Then, for the inductive case, assuming $Q^{*}(t,\mu)\leq (\tau -t - 1)C + V^{(0)}(t,\mu)$ + +$$ +\begin{array}{l} Q ^ {*} (t - 1, \mu) = - C + \int \max \{0, Q ^ {*} (t, \mu + z) \} f (z, 1 / (t - 1) - 1 / t) d z (33) \\ \leq \int Q ^ {*} (t, \mu + z) f \left(z, ^ {1} / (t - 1) - ^ {1} / t\right) d z (34) \\ \leq (\tau - t - 1) C + \int V ^ {(0)} (t, \mu + z) f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\ = (\tau - t) C + V ^ {(0)} (t - 1, \mu + z), \\ \end{array} +$$ + +where (33) is due to (17), and (34) holds since $-C \leq Q^{*}(t, \mu)$ implies that $\max \{0, Q^{*}(t, \mu)\} \leq \max \{C + Q^{*}(t, \mu), Q^{*}(t, \mu)\} \leq C + Q^{*}(t, \mu)$ . + +Define $\mu_t^+$ and $\mu_t^-$ such that + +$$ +V ^ {(0)} (t, \mu_ {t} ^ {+}) = 0 \iff \mu_ {t} ^ {+} = F ^ {- 1} \left(\left(\tau - t\right) \frac {C}{R}\right) \sqrt {\frac {1}{t} - \frac {1}{\tau}} +$$ + +$$ +(\tau - t - 1) C + V ^ {(0)} (t, \mu_ {t} ^ {-}) = 0 \iff \mu_ {t} ^ {-} = F ^ {- 1} \left(\frac {C}{R}\right) \sqrt {\frac {1}{t} - \frac {1}{\tau}}, +$$ + +which we are able to write in closed form using the formula we derived for $V^{(0)}(t,\mu)$ in (30) during the proof of Proposition 3. + +By definition, $\mu_t^{\mathrm{greedy}} = \mu_t^+$ . Moreover, (i) $V^{(0)}(t,\mu_t^*)\leq Q^* (t,\mu_t^*) = 0 = V^{(0)}(t,\mu_t^+)$ due to our lower bound, hence $\mu_t^*\leq \mu_t^+$ (remember that $V^{(0)}(t,\mu)$ was a strictly increasing function in $\mu$ ), and (ii) $(\tau -t - 1)C + V^{(0)}(t,\mu_t^-) = 0 = Q^* (t,\mu_t^*)\leq (\tau -t - 1)C + V^{(0)}(t,\mu_t^*)$ due to our upper bound, hence $V^{(0)}(t,\mu_t^-)\leq V^{(0)}(t,\mu_t^*)$ meaning $\mu_t^- \leq \mu_t^*$ . Putting together these facts, and also the fact that $\mu_t^*\leq \mu_t^{\mathrm{greedy}}$ , we obtain $|\mu_t^* -\mu_t^{\mathrm{greedy}}|\leq \mu_t^+ -\mu_t^-$ as the proposition states. + +# J BENCHMARKING ALGORITHMS + +Algorithm 2 Adaptive Enrichment, Futility Stopping with Bayes-OCP, Greedy Bayes-OCP +1: Initialize $\mu_{x}$ and $\sigma_x^2$ for all $x\in \mathcal{X}$ +2: $X\gets \mathcal{X},t\gets 0,\mathcal{D}_0\gets \emptyset$ +3: Start experiment $\psi = (\mathcal{X},\tau ,\rho)$ +4: loop: +5: $t\gets t + 1$ +6: Observe $x_{t},y_{t}$ +7: $\mathcal{D}_t\gets \mathcal{D}_{t - 1}\cup \{x_t,y_t\}$ +8: $1 / \sigma_{x_t}^2\gets 1 / \sigma_{x_t}^2 +1$ +9: $\mu_{x_t}\gets \mu_{x_t} + (y_t - \mu_{x_t})\sigma_{x_t}^2$ +10: $X^{\prime}\gets \emptyset$ +11: while $X\setminus X^{\prime}\supset \emptyset$ .. +12: $x^{*}\gets \mathrm{argmax}_{x\in X\setminus X^{\prime}}\mathbb{E}_{\theta_{x}\sim \mathcal{N}(\mu_{x},\sigma_{x}^{2})}[\mathcal{G}^{(0)}(X^{\prime}\cup \{x\};\{\theta_{x}\})]$ +13: if $\mathbb{E}_{\theta_x\sim \mathcal{N}(\mu_x,\sigma_x^2)}[\mathcal{G}^{(0)}(X^{\prime}\cup \{x^{*}\} ;\{\theta_{x}\})] > \mathbb{E}_{\theta_x\sim \mathcal{N}(\mu_x,\sigma_x^2)}[\mathcal{G}^{(0)}(X^{\prime};\{\theta_{x}\})]$ .. +14: $X^{\prime}\gets X^{\prime}\cup \{x^{*}\}$ +15: else: +16: break +17: if Adaptive Enrichment and $t = \tau /2$ and $\mathbb{E}_{\theta_x\sim \mathcal{N}(\mu_x,\sigma_x^2)}[\mathcal{G}^{(0)}(X';\{\theta_x\})] > \mathbb{E}_{\theta_x\sim \mathcal{N}(\mu_x,\sigma_x^2)}[\mathcal{G}(X,\mathcal{D}_t;\{\theta_x\})]$ .. +18: $X\gets X^{\prime},t\gets 0,\mathcal{D}_0\gets \emptyset$ +19: Start a new experiment $\psi = (X,\tau ,\rho)$ +20: if Greedy Bayes-OCP and $\mathbb{E}_{\theta_x\sim \mathcal{N}(\mu_x,\sigma_x^2)}[\mathcal{G}^{(0)}(X';\{\theta_x\})] > \mathbb{E}_{\theta_x\sim \mathcal{N}(\mu_x,\sigma_x^2)}[\mathcal{G}(X,\mathcal{D}_t;\{\theta_x\})]$ .. +21: $X\gets X^{\prime},t\gets 0,\mathcal{D}_0\gets \emptyset$ +22: Start a new experiment $\psi = (X,\tau ,\rho)$ +23: if Futility Stopping with Bayes-OCP and $\mathbb{P}_{\theta_x\sim \mathcal{N}(\mu_x,\sigma_x^2)}[\mathcal{G}^{(0)}(\emptyset ;\{\theta_x\}) > \mathcal{G}(X,\mathcal{D}_0;\{\theta_x\})] > \beta$ +24: Stop all experimentation + +K GLOSSARY OF TERMS AND NOTATION + +
TermNotationDescription
Experiment-Conducted to confirm efficacy of an intervention, e.g. a new treatment in clinical trials, or a new recommendation policy in online advertisement
Subject-Individual participant of an experiment, e.g. patients in a clinical trial, or customers in online advertisement
PopulationX⊆XCollection of subjects that all share the same qualities, e.g. all female patients in a clinical trial, or all customers with the same preferences in online advertisement
Atomic-populationx∈XIndivisible populations
PropensitiesηxThe probability that a subject being from atomic-population x
ηxThe probability that a subject being from population X
ηx|XThe probability that a subject being from atomic-population x conditioned on the fact that they from population X
Outcome distributionΩxDistribution of outcomes that is indicative of the effect of the intervention of interest for atomic-population x
Mean outcomesθxExpected outcome, i.e. the effect of the intervention of interest, for atomic-population x
θXExpected outcome for population X
Experiment designψ=(X,τ,ρ)Target population X, sample horizon τ, and success criterion ρ that characterize an experiment
Viable experiment designsΨExperiment designs that can potentially be followed by a meta-experimenter
Meta-experimenter-The decision-making agent that decides when to run experiments according to which experiment design in Ψ
Sample/time horizonτAn experiment is terminated when t=τ
Success criterionρAn experiment is declared a success if ρ(Dτ)=1
Online datasetDtData collected by an ongoing experiment at time step t
DtData collected by the i-th experiment run by the meta-experimenter at time step t
Aggregate datasetDItCollective data collected by all experiments up to time step t of the i-th experiment
-TtNumber of time steps for which the i-th experiment is conducted until it was stopped or its time horizon was reached
CostCost incurred per time step by running experiment ψ
RewardReward received if experiment ψ is successful
UtilityGSum of costs and rewards received after all experimentation is concluded
PolicyπDecision-making policy of the meta-experiment
Optimal policyπ*The optimal policy that maximizes utility G in expectation
Greedy policyπgreedySee Section 3
Test statisticμtIn the simplified case in Section 3, the empirical mean outcome
Value functionVπ(t,μ)The expected utility of following policy π when μt=μ
Q-functionQπ(t,μ)The expected utility of following policy π after conducting the ongoing experiment for one more time step when μt=μ
-TtπThe first time step at or after time step t that policy π decides to stop all experimentation
Optimal value functionV*The value function associated with π*
Optimal Q-functionQ*The Q-function associated with π*
Thresholdsμt*Decision-making thresholds associated with π*
μtgreedyDecision-making thresholds associated with πgreedy
Conditional power functionP(X, Dt; {θx})The probability of a hypothesis test being successful condi- tioned on mean outcomes {θx}
Expected utility functionG(X, Dt; {θx})The expected utility of fully committing to an experiment and waiting until it terminates when the experiment targets popula-tion X, is currently at time step t, and collected dataset Dt
PosteriorsN(μx, σx2)Posterior distributions over mean outcomes {θx} maintained by Bayes-OCP such that θx|D ~ N(μx, σx2)
\ No newline at end of file diff --git a/2023/When to Make and Break Commitments_/images.zip b/2023/When to Make and Break Commitments_/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..375806768bf07da8c786badceef1384d49d63f8c --- /dev/null +++ b/2023/When to Make and Break Commitments_/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7b5a266dd43ffec03bef68b5327f9ee3fb0822f65772bf21df625081d068ff7 +size 1785962 diff --git a/2023/When to Make and Break Commitments_/layout.json b/2023/When to Make and Break Commitments_/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..5a6fdbf43926691dc4eb4b9886f4b8b264009b07 --- /dev/null +++ b/2023/When to Make and Break Commitments_/layout.json @@ -0,0 +1,23069 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 453, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 453, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 453, + 95 + ], + "type": "text", + "content": "WHEN TO MAKE AND BREAK COMMITMENTS?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 115, + 175, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 115, + 175, + 126 + ], + "spans": [ + { + "bbox": [ + 111, + 115, + 175, + 126 + ], + "type": "text", + "content": "Alihan Hüyük" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 127, + 214, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 127, + 214, + 137 + ], + "spans": [ + { + "bbox": [ + 111, + 127, + 214, + 137 + ], + "type": "text", + "content": "University of Cambridge" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 138, + 209, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 138, + 209, + 148 + ], + "spans": [ + { + "bbox": [ + 111, + 138, + 209, + 148 + ], + "type": "text", + "content": "ah2075@cam.ac.uk" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 242, + 115, + 304, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 115, + 304, + 126 + ], + "spans": [ + { + "bbox": [ + 242, + 115, + 304, + 126 + ], + "type": "text", + "content": "Zhaozhi Qian" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 242, + 127, + 344, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 127, + 344, + 137 + ], + "spans": [ + { + "bbox": [ + 242, + 127, + 344, + 137 + ], + "type": "text", + "content": "University of Cambridge" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 242, + 138, + 334, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 138, + 334, + 148 + ], + "spans": [ + { + "bbox": [ + 242, + 138, + 334, + 148 + ], + "type": "text", + "content": "zq224@cam.ac.uk" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 373, + 115, + 479, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 115, + 479, + 125 + ], + "spans": [ + { + "bbox": [ + 373, + 115, + 479, + 125 + ], + "type": "text", + "content": "Mihaela van der Schaar" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 373, + 127, + 477, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 127, + 477, + 137 + ], + "spans": [ + { + "bbox": [ + 373, + 127, + 477, + 137 + ], + "type": "text", + "content": "University of Cambridge" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 373, + 138, + 477, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 138, + 477, + 148 + ], + "spans": [ + { + "bbox": [ + 373, + 138, + 477, + 148 + ], + "type": "text", + "content": "The Alan Turing Institute" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 373, + 149, + 465, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 149, + 465, + 159 + ], + "spans": [ + { + "bbox": [ + 373, + 149, + 465, + 159 + ], + "type": "text", + "content": "mv472@cam.ac.uk" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 276, + 176, + 335, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 176, + 335, + 188 + ], + "spans": [ + { + "bbox": [ + 276, + 176, + 335, + 188 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 194, + 471, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 194, + 471, + 350 + ], + "spans": [ + { + "bbox": [ + 140, + 194, + 471, + 350 + ], + "type": "text", + "content": "In many scenarios, decision-makers must commit to long-term actions until their resolution before receiving the payoff of said actions, and usually, staying committed to such actions incurs continual costs. For instance, in healthcare, a newly-discovered treatment cannot be marketed to patients until a clinical trial is conducted, which both requires time and is also costly. Of course in such scenarios, not all commitments eventually pay off. For instance, a clinical trial might end up failing to show efficacy. Given the time pressure created by the continual cost of keeping a commitment, we aim to answer: When should a decision-maker break a commitment that is likely to fail—either to make an alternative commitment or to make no further commitments at all? First, we formulate this question as a new type of optimal stopping/switching problem called the optimal commitment problem (OCP). Then, we theoretically analyze OCP, and based on the insight we gain, propose a practical algorithm for solving it. Finally, we empirically evaluate the performance of our algorithm in running clinical trials with subpopulation selection." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 361, + 207, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 361, + 207, + 373 + ], + "spans": [ + { + "bbox": [ + 106, + 361, + 207, + 373 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 380, + 506, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 501 + ], + "type": "text", + "content": "In many real-world settings, decision-makers must commit to long-term actions and wait until their resolution before receiving the payoff of said actions. Meanwhile, staying committed to such actions incurs continual costs. For instance, in portfolio management, it might take time for an asset to develop additional value after an initial investment, and keeping capital tied up in an asset comes with an opportunity cost for the investor (Markowitz, 1959; Merton, 1969; Karatzas and Wang, 2020). In an energy network, turning power stations on and off is not an immediate action, hence a sudden increase in energy demand can only be met with a delay after putting more stations into operation, and keeping stations operational obviously consumes resources (Rafique and Jianhua, 2018; Olofsson et al., 2022). In healthcare, a newly-discovered treatment can only be marketed to patients once a successful clinical trial that targets the said treatment is conducted, which both requires time and is also costly (Kaitin, 2010; Umscheid et al., 2011)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 506, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 506, + 628 + ], + "type": "text", + "content": "Of course, not all commitments eventually pay off: An asset might end up losing value despite investments, energy demands might shift faster than a network can react to, and a clinical trial might fail to show efficacy for the targeted treatment. Given the time pressure created by the continual cost of keeping a commitment, our goal in this paper is to answer the question: When should a decision-maker break a commitment—thereby avoiding future costs but also forfeiting any potential returns—either to make an alternative commitment instead or to make no further commitments at all? Solving this problem optimally requires a careful balance between exploration and exploitation: The earlier a commitment that is bound to fail is broken, the more resources would be saved (cf. exploitation); but the longer one is kept, the more information is revealed regarding whether the commitment is actually failing or might still succeed (cf. exploration)—and in certain cases, also regarding the prospects of similar commitments one could make instead." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "Related problems are mostly studied within the context of adaptive experimentation and sequential hypothesis testing (see Section 5). As such, we focus on adaptive experimentation as our main application as well. More specifically, we consider the problem of selecting the target population of an adaptive experiment. Suppose an experimenter, who is interested in proving the efficacy of a new treatment, starts running an initial experiment that targets a certain population of patients. Incidentally, the treatment being tested is effective only for a relatively narrow subpopulation of patients but not for the wider population as a whole. Hence, an experiment targeting the overall population, but not the subpopulation specifically, will most probably fail to prove efficacy and prevent the deployment of the treatment for the patients who would have actually benefited from it, not to mention waste" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "time and resources (Moineddin et al., 2008; Lipkovich et al., 2017; Chiu et al., 2018). Of course, the experimenter has no knowledge of this in advance but the initial experiment they have set up would slowly reveal more information regarding the effects of the treatment and the fact that the ongoing experiment is bound to fail. In that case, we want to be able to determine at what point the experimenter has enough information to justify breaking their commitment to the initial experiment that targets too wide of a population to be successful, in favor of making a new commitment to a follow-up experiment that focuses on a narrower subpopulation instead?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 165, + 506, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 506, + 308 + ], + "type": "text", + "content": "Contributions Our contributions are threefold: First, we formulate the problem of making and breaking commitments in a timely manner as a new type of optimal stopping/switching problem called the optimal commitment problem (OCP) (Section 2). The defining feature of OCP is that rewards are received only when a known time point is reached but costs are incurred continually, requiring commitment to actions but with incentive to abandon those commitments. As we will show later, OCP cannot be easily solved via conventional reinforcement learning techniques due to its non-convex nature. Second, we theoretically analyze a simplified case of OCP to identify the characteristics of the optimal solution (Section 3), and based on the insights we gain, propose a practical algorithm for the more general case (Section 4). Third, we empirically evaluate the performance of our algorithm in running experiments with subpopulation selection (Section 6). Before we move on, it should be emphasized that, although we predominantly consider adaptive experimentation as our main application, our contributions remain generally applicable to portfolio management, energy systems, and any other decision-making scenarios that require commitments to long-term actions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 321, + 303, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 321, + 303, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 303, + 333 + ], + "type": "text", + "content": "2 OPTIMAL COMMITMENT PROBLEM" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 341, + 507, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 507, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 507, + 397 + ], + "type": "text", + "content": "We first introduce the problem of optimal commitment from the perspective of running experiments. As far as our formulation is concerned, experiments are conducted to confirm the efficacy of an intervention by observing the outcome of the said intervention for subjects belonging to a particular population. However, this experiment-focused perspective does not limit the applicability of OCP; we stress its generality later at the end of the section. We provide a glossary of terms and notation in Appendix K." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": "Populations Let " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " be a discrete set of atomic-populations such that every subject is only the member of exactly one atomic-population " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "x\\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": ". Denote with " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\eta_{x}\\in [0,1]" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " the probability of a subject being from atomic-population " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " (such that " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\sum_{x\\in \\mathcal{X}}\\eta_x = 1" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": "), and with " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\Omega_{x}" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " the distribution of outcomes for atomic-population " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " such that the mean outcome " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\theta_{x} = \\mathbb{E}_{y\\sim \\Omega_{x}}[y]" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " is the effect of some intervention for atomic-population " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": ". Now, wider populations can be constructed by combining various atomic-populations. Let any " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "X\\subseteq \\mathcal{X}" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " represent the population of subjects who belong to either one of the atomic-populations " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\{x\\in X\\}" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": ". Then, the probability of a subject being from population " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " can be written as " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\eta_{X} = \\sum_{x\\in X}\\eta_{x}" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": ", the probability of a subject being from atomic-population " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " conditioned on the fact that they are from population " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " can be written as " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\eta_{x|X} = \\eta_x / \\eta_X" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": ", and the average effect for population " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": " can be written as " + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}_X = \\sum_{x\\in X}\\eta_{x|X}\\theta_x" + }, + { + "bbox": [ + 104, + 402, + 506, + 514 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": "Experiments An experiment is largely characterized by the population it targets, its sample horizon, and its success criterion. During an experiment that targets population " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": ", at each time step " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "t \\in \\{1, 2, \\ldots\\}" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": " that the experiment continues, first a subject from some atomic-population " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": " within the targeted population " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": " arrives with probability " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\eta_{x_{t}|X}" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": ", and then the outcome " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "y_{t} \\sim \\Omega_{x_{t}}" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": " for that subject is observed. This process generates an online dataset " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{t} = \\{x_{t'}, y_{t'}\\}_{t' = 1}^{t}" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": ". The experiment terminates when a pre-specified sample/time horizon " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": " is reached. Once terminated, the experiment is declared a success if " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\rho(\\mathcal{D}_{\\tau}) = 1" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\rho: (\\mathcal{X} \\times \\mathbb{R})^{\\tau} \\to \\{0, 1\\}" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": " is the success criterion, and declared a failure otherwise. Formally, the tuple " + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\psi = (X, \\tau, \\rho)" + }, + { + "bbox": [ + 104, + 517, + 506, + 607 + ], + "type": "text", + "content": " constitutes an experiment design." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": "Meta-experimenter Suppose a meta-experimenter is given a set of viable experiment designs " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " and is tasked with running at least one successful experiment. Each experiment " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\psi \\in \\Psi" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " has an associated cost " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "C_{\\psi} \\in \\mathbb{R}_{+}" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": ", which the experiment incurs per time step that it continues, and an associated reward " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "R_{\\psi} \\in \\mathbb{R}_{+}" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": ", which the experiment provides only if it eventually succeeds. The meta-experimenter aims to maximize utility—that is the difference between any eventual reward received and the total costs incurred by running experiments. They first pick an initial experiment " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\psi^{1} \\in \\Psi" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " and start conducting it, which generates an online dataset " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t^1" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " as described earlier. Now at each time step " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": ", they need to decide whether they should stay committed to their initial decision and wait until " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\psi^{1}" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " terminates, or stop " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\psi^{1}" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " early in favor of starting a new experiment " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\psi^{2}" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": ". They might decide on the latter to avoid unnecessary costs if " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t^1" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " already indicates " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\psi^{1}" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " is unlikely to succeed. If at some point a secondary experiment " + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\psi^{2}" + }, + { + "bbox": [ + 104, + 611, + 507, + 733 + ], + "type": "text", + "content": " is started, now the meta-experiment has a similar decision to make" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": "regarding whether to stop " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\psi^2" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " early in favor of starting a new experiment " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\psi^3 \\in \\Psi" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": ". This process continues until either an experiment finally succeeds or the meta-experimenter decides not to conduct any further experiments; let the random variable " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "n \\in \\{1, 2, \\ldots\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " be such that " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\psi^n" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " is the last experiment. We denote with " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\psi^i = (X^i, \\tau^i, \\rho^i)" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " the " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": "-th experiment conducted by the meta-experimenter, and with " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "T^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " the number of time steps for which the " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": "-th experiment is conducted either until it was stopped by the meta-experimenter or the time horizon " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\tau^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " was reached. Denote with " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i)" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " the decision-making policy of the meta-experimenter, where " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " is the current time step of the latest experiment " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\psi^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\bar{\\mathcal{D}}_t^i = (\\cup_{j=1}^{i-1} \\mathcal{D}_{T^j}^j) \\cup \\mathcal{D}_t^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " is an aggregate dataset. We write (i) " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\psi^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " if the meta-experiment decides to keep conducting the current experiment " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\psi^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": ", (ii) " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\psi' \\neq \\psi^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " if the meta-experimenter decides to stop experiment " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\psi^i" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " and start experiment " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\psi'" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " instead, and (iii) " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\pi(t, \\psi^i, \\bar{\\mathcal{D}}_t^i) = \\emptyset" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " if the meta-experimenter decides not to conduct any further experiments." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 209, + 505, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 505, + 221 + ], + "type": "text", + "content": "Objective Once all experimentation is concluded, the meta-experimenter achieves the total utility" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 194, + 223, + 504, + 236 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 223, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 194, + 223, + 504, + 236 + ], + "type": "interline_equation", + "content": "G = R _ {\\psi^ {n}} \\cdot \\mathbb {1} \\left\\{T ^ {n} = \\tau^ {n} \\right\\} \\cdot \\rho^ {n} \\left(\\mathcal {D} _ {\\tau^ {n}} ^ {n}\\right) - \\sum_ {i = 1} ^ {n} C _ {\\psi^ {i}} \\cdot T ^ {i}. \\tag {1}", + "image_path": "365ea0a633723331349af3695fab934b9a57b5ba78a5e4ba403cdf1b21719f6b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": "Then, the optimal commitment problem is to find the optimal policy " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\pi^{*} = \\operatorname{argmax}_{\\pi} \\mathbb{E}_{\\pi}[G]" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": " that maximizes the expected utility given " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\{\\eta_x\\}" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\{R_{\\psi}, C_{\\psi}\\}" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": " without knowing mean outcomes " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\{\\theta_x\\}" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": " or outcome distributions " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\{\\Omega_x\\}" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": ". It is called the optimal commitment problem because each experiment " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\psi = (X, \\tau, \\rho)" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": " only provides a reward if the meta-experimenter commits to incurring its costs for at least " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": " time steps, and the meta-experimenter needs to decide which experiment in " + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 104, + 239, + 506, + 306 + ], + "type": "text", + "content": " is the better commitment—or if there is any experiment worth committing to at all—adaptively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 311, + 338, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 311, + 338, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 311, + 338, + 388 + ], + "type": "text", + "content": "General applicability of OCP Although we have described OCP from the perspective of (meta-)experiment design, it can potentially be useful in modeling many other problems as we have stressed during the introduction (see Table 1). For instance, in portfolio management, atomic-populations can be regarded as various assets one can invest in, then a population would correspond to a portfolio" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "type": "text", + "content": "of assets. Similar to experiments, when these portfolios require a time commitment (cf. " + }, + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "type": "text", + "content": ") before they provide their payoff (cf. " + }, + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "type": "inline_equation", + "content": "R_{\\psi}" + }, + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "type": "text", + "content": ") and incur an opportunity cost (cf. " + }, + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "type": "inline_equation", + "content": "C_{\\psi}" + }, + { + "bbox": [ + 104, + 388, + 506, + 454 + ], + "type": "text", + "content": ") in the mean time, the decision-making problem of managing when and which portfolio to invest in constitutes an instance of the optimal commitment problem. Another good examples is energy management, where power stations and the networks they form are akin to atomic-populations and populations. Since power stations cannot be turned on and off immediately, putting one in operation requires a certain amount of commitment." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 343, + 347, + 504, + 378 + ], + "blocks": [ + { + "bbox": [ + 342, + 312, + 506, + 344 + ], + "lines": [ + { + "bbox": [ + 342, + 312, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 342, + 312, + 506, + 344 + ], + "type": "text", + "content": "Table 1: Equivalent concepts across different domains. OCP can model scenarios other than adaptive experimentation." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 343, + 347, + 504, + 378 + ], + "lines": [ + { + "bbox": [ + 343, + 347, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 343, + 347, + 504, + 378 + ], + "type": "table", + "html": "
DomainEquivalent Concepts
Adaptive experimentationAtomic-populationPopulation
Portfolio managementFinancial assetPortfolio of assets
Energy systemsPower stationNetwork of stations
", + "image_path": "4bf98f4f211005a6b5db045ff74ed3f9a5b73be4b0514cfdc18ee5d448f1b0ab.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 464, + 408, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 408, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 408, + 475 + ], + "type": "text", + "content": "3 WARM-UP: WHEN TO BREAK A SINGLE COMMITMENT?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": "In this section, to gather insights, we commence by analyzing a simplified instance of OCP. Later, in Section 4, using these insights, we construct a practical algorithm for solving a more general case of OCP. As the simplified instance, we only consider one atomic-population such that " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = \\{\\mathcal{X}_0\\}" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": " and one experiment design that targets this atomic-population such that " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\Psi = \\{\\Psi_0 = (\\mathcal{X}_0,\\tau ,\\rho)\\}" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": ". Moreover, we assume that the outcomes are distributed normally with unit variance such that " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\Omega \\doteq \\Omega_{\\mathcal{X}_0}\\doteq \\mathcal{N}(\\theta \\doteq \\theta_{\\mathcal{X}_0},1)" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": " and the success criterion is a simple Z-test to see whether " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\theta >0" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\rho (\\mathcal{D}_{\\tau})\\doteq \\rho (\\mu_{\\tau})\\doteq \\mathbb{1}\\{\\mu_{\\tau} > \\alpha /\\sqrt{\\tau}\\}" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\mu_t = \\sum_{(x_{t'},y_{t'})\\in \\mathcal{D}_t}y_{t'} / |\\mathcal{D}_t|" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": " is the empirical mean outcome given dataset " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": " determines the significance threshold for the test. Since there is just one viable experiment in this setting, the only decision that needs to be made at each time step is whether to keep conducting experiment " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "\\psi^1 = \\Psi_0" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": " or to stop all experimentation. For this decision to be interesting, we will also assume that " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "C\\doteq C_{\\Psi_0} > 0" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": " so that never stopping is not necessarily optimal—and " + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "inline_equation", + "content": "R\\doteq R_{\\Psi_0} > \\tau C" + }, + { + "bbox": [ + 104, + 481, + 506, + 615 + ], + "type": "text", + "content": " so that always stopping is not necessarily optimal either." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "text", + "content": "Value and Q-functions Since " + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "inline_equation", + "content": "\\mu_t" + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "text", + "content": " are sufficient statistics to estimate the success probability of the experiment, it is also sufficient to only consider policies of the form " + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "inline_equation", + "content": "\\pi(t, \\mu)" + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "text", + "content": ". For a given policy " + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 619, + 506, + 642 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 151, + 644, + 504, + 657 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 644, + 504, + 657 + ], + "spans": [ + { + "bbox": [ + 151, + 644, + 504, + 657 + ], + "type": "interline_equation", + "content": "V ^ {\\pi} (t, \\mu) = \\mathbb {E} \\left[ R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {\\pi} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {\\pi}, \\tau \\right\\} - t\\right) \\mid \\mu_ {t} = \\mu \\right] \\tag {2}", + "image_path": "f9c96161a37916517e9f2a9cfa7234a74c635ca43d443c286ac08c33e667a122.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 151, + 658, + 504, + 672 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 658, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 151, + 658, + 504, + 672 + ], + "type": "interline_equation", + "content": "Q ^ {\\pi} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {\\pi} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {\\pi}, \\tau \\right\\} - t\\right) | \\mu_ {t} = \\mu ] \\tag {3}", + "image_path": "112a65b7ad234cf383eaab18e93428ffa16e7bd367a874979ddcc174069f6083.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": "are the value function, and the Q-function of conducting the experiment for at least one more time step respectively, where " + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "inline_equation", + "content": "T_{t}^{\\pi} = \\min \\{t' \\geq t : \\pi(t', \\mu_{t'}) = \\emptyset\\}" + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": " is the first time step at or after time " + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": " that policy " + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": " decides to stop; let " + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "inline_equation", + "content": "V^{*} = V^{\\pi^{*}}" + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "inline_equation", + "content": "Q^{*} = Q^{\\pi^{*}}" + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": " be the optimal value and Q-functions. Note that the Q-factor of stopping all experimentation is always equal to zero for all policies. Hence, the optimal policy must be such that " + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\pi^{*}(t, \\mu) = \\Psi_{0}" + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "inline_equation", + "content": "Q^{*}(t, \\mu) > 0" + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\pi^{*}(t, \\mu) = \\emptyset" + }, + { + "bbox": [ + 104, + 674, + 506, + 731 + ], + "type": "text", + "content": " otherwise." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 339, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 339, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 339, + 127 + ], + "type": "text", + "content": "Once we identify the value and Q-functions, a naive attempt at finding the optimal policy would be to compute " + }, + { + "bbox": [ + 104, + 82, + 339, + 127 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 104, + 82, + 339, + 127 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 339, + 127 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 82, + 339, + 127 + ], + "type": "text", + "content": " via dynamic programming as they would satisfy the following Bellman optimality conditions:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 129, + 336, + 143 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 129, + 336, + 143 + ], + "spans": [ + { + "bbox": [ + 121, + 129, + 336, + 143 + ], + "type": "interline_equation", + "content": "Q ^ {*} (t, \\mu) = - C + \\mathbb {E} \\left[ V ^ {*} (t + 1, \\mu_ {t + 1}) \\mid \\mu_ {t} = \\mu \\right] \\tag {4}", + "image_path": "c95d68675fbf9a92831dd086249f1d79dad2f1d0a66f486760112ac7be75b4b1.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 144, + 336, + 157 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 144, + 336, + 157 + ], + "spans": [ + { + "bbox": [ + 121, + 144, + 336, + 157 + ], + "type": "interline_equation", + "content": "V ^ {*} (t, \\mu) = \\max \\{0, Q ^ {*} (t, \\mu) \\} \\tag {5}", + "image_path": "c252db74c8934e6a6a972e8c1403622494f6d46274ce5c232f06b3c070ef61ce.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "inline_equation", + "content": "V^{*}(\\tau, \\mu) = R \\cdot \\rho(\\mu)" + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "text", + "content": ". However, a major complication in applying dynamic programming methods to compute " + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "text", + "content": " is that they are continuous functions in " + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 159, + 337, + 215 + ], + "type": "text", + "content": ". In the literature of partially-observable Markov decision processes (POMDPs), which OCP happens to be an instance" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "spans": [ + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "text", + "content": "of (see Appendix A), the standard approach of addressing this complication would be to leverage the convexity of " + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "text", + "content": ", and approximate them with functions of the form " + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "inline_equation", + "content": "f(\\mu) = \\max_{i} a_{i}\\mu + b_{i}" + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "text", + "content": " (Spanan, 2012). However, this standard approach is not applicable in OCP because, in general, neither " + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "inline_equation", + "content": "V^{*}(t,\\mu)" + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "text", + "content": " nor " + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "inline_equation", + "content": "-V^{*}(t,\\mu)" + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "text", + "content": " is a convex function with respect to " + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 215, + 506, + 259 + ], + "type": "text", + "content": " (see Figure 1):" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "text", + "content": "Proposition 1 (Non-convexity). There exist a problem instance " + }, + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "inline_equation", + "content": "(C,R,\\tau,\\alpha)" + }, + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "inline_equation", + "content": "t\\in \\{1,\\dots ,\\tau -1\\}" + }, + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\exists \\mu ,\\mu^{\\prime}\\in \\mathbb{R},p\\in [0,1]:V^{*}(t,p\\mu +(1 - p)\\mu^{\\prime}) < pV^{*}(t,\\mu) + (1 - p)V^{*}(t,\\mu^{\\prime})" + }, + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 264, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\exists \\mu ,\\mu^{\\prime}\\in \\mathbb{R},p\\in [0,1]: - V^{*}(t,p\\mu +(1 - p)\\mu^{\\prime}) < - pV^{*}(t,\\mu) - (1 - p)V^{*}(t,\\mu^{\\prime}).^{1}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": "Properties of the optimal policy Although identifying " + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": " exactly by computing " + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": " is challenging, we can still identify some properties that " + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": " should have, which can then help us design a heuristic policy that we expect to perform well, albeit not optimally. First of all, the optimal policy " + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": " should be a \"thresholding-type\" policy—that is the meta-experimenter should keep conducting the experiment as long as " + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "inline_equation", + "content": "\\mu_{t}" + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": " stays above a time-dependent threshold " + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "inline_equation", + "content": "\\mu_{t}^{*}" + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": " and should stop all experimentation the moment " + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "inline_equation", + "content": "\\mu_{t}" + }, + { + "bbox": [ + 104, + 304, + 504, + 371 + ], + "type": "text", + "content": " drops below that threshold (see the top panel of Figure 2):" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "type": "text", + "content": "Proposition 2 (Thresholding). For all problem instances " + }, + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "type": "inline_equation", + "content": "(C, R, \\tau, \\alpha)" + }, + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "type": "text", + "content": ", there exists time-dependent thresholds " + }, + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\{\\mu_t^* \\in \\mathbb{R}\\}_{t=1}^{\\tau-1}" + }, + { + "bbox": [ + 104, + 376, + 504, + 400 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 209, + 403, + 504, + 416 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 403, + 504, + 416 + ], + "spans": [ + { + "bbox": [ + 209, + 403, + 504, + 416 + ], + "type": "interline_equation", + "content": "\\pi^ {*} (t, \\mu) = \\left\\{\\Psi_ {0} \\quad i f \\mu > \\mu_ {t} ^ {*}; \\quad \\emptyset \\quad o t h e r w i s e \\right\\} \\tag {6}", + "image_path": "dc2f666020d0b743c6f2f3bdc1c76c211c6373e14422403c8eb6902f3a556b7c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "content": "Intuitively, a higher test statistic " + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "inline_equation", + "content": "\\mu_t" + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "content": " means that the experiment is only more likely to succeed, hence if it is optimal to continue conducting the experiment when " + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "inline_equation", + "content": "\\mu_t = \\mu" + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "content": ", then it should also be optimal to continue when " + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "inline_equation", + "content": "\\mu_t = \\mu' > \\mu" + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "content": " (likewise, lower " + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "inline_equation", + "content": "\\mu_t" + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "content": " means success is even less likely hence " + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "inline_equation", + "content": "\\pi^*(t, \\mu) = \\emptyset" + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "content": " implies " + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "inline_equation", + "content": "\\pi^*(t, \\mu') = \\emptyset" + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "inline_equation", + "content": "\\mu' < \\mu" + }, + { + "bbox": [ + 104, + 419, + 338, + 487 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": "Moreover, the optimal policy " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": " must be \"optimistic\" that the experiment will succeed when making decisions. Consider a greedy policy " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "\\pi^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": " that continues as long as the expected utility of committing fully to conducting the experiment until it terminates at " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "t = \\tau" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": " is positive—that is " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "\\pi^{\\mathrm{greedy}} = \\Psi_0" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": " if and only if " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "V^{\\pi^{(0)}}(t,\\mu) > 0" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "\\pi^{(0)}" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": " is the policy that always waits until the experiment terminates such that " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "\\pi^{(0)}(t,\\mu) = \\Psi_0" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "t,\\mu" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "inline_equation", + "content": "\\pi^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 491, + 337, + 645 + ], + "type": "text", + "content": " is said to be greedy because the decision to continue is made assuming a full commitment to the experiment without considering the possibility to stop at a future time step. Then, whenever such greedy reasoning suggests continuing, the meta-experimenter should indeed continue. However, whenever the same reasoning suggests stopping, the meta-" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 645, + 506, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 667 + ], + "type": "text", + "content": "experimenter should be optimistic that the experiment will succeed and occasionally make the decision to continue instead—that is " + }, + { + "bbox": [ + 104, + 645, + 506, + 667 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 645, + 506, + 667 + ], + "type": "text", + "content": " should be biased towards continuing (see the threshold gap in Figure 2):" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "text", + "content": "Proposition 3 (Optimism). First, " + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "inline_equation", + "content": "\\pi^{\\text{greedy}}" + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "text", + "content": " is also of thresholding type and there exists " + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "inline_equation", + "content": "\\{\\mu_t^{\\text{greedy}} \\in \\mathbb{R}\\}_{t=1}^{\\tau-1}" + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "inline_equation", + "content": "\\pi^{\\text{greedy}}(t,\\mu) = \\Psi_0" + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "text", + "content": " if and only if " + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "inline_equation", + "content": "\\mu > \\mu_t^{\\text{greedy}}" + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "text", + "content": ". Moreover, for all " + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "inline_equation", + "content": "t \\in \\{1,\\dots,\\tau-1\\}" + }, + { + "bbox": [ + 104, + 671, + 504, + 698 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 157, + 700, + 504, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 700, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 157, + 700, + 504, + 715 + ], + "type": "interline_equation", + "content": "\\mu_ {t} ^ {*} \\leq \\mu_ {t} ^ {\\text {g r e e d y}} \\quad \\Longleftrightarrow \\quad \\left\\{\\mu : \\pi^ {*} (t, \\mu) = \\Psi_ {0} \\right\\} \\supseteq \\left\\{\\mu : \\pi^ {\\text {g r e e d y}} (t, \\mu) = \\Psi_ {0} \\right\\} \\tag {7}", + "image_path": "bf5531bfff95cdc54d615c1c5908bb42c0f65b9795a43815ef41d1dad99a75e5.jpg" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 345, + 83, + 504, + 171 + ], + "blocks": [ + { + "bbox": [ + 345, + 83, + 504, + 171 + ], + "lines": [ + { + "bbox": [ + 345, + 83, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 345, + 83, + 504, + 171 + ], + "type": "image", + "image_path": "07487c83fa7edf52e9199370f2865392f818d5292dc95a654768533a6c1969f5.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "lines": [ + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": "Figure 1: Optimal value function " + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "inline_equation", + "content": "V^{*}(t,\\mu)" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "inline_equation", + "content": "C = 1" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "inline_equation", + "content": "R = 10" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\tau = 4" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": ". It can clearly be seen that neither " + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": " nor " + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "inline_equation", + "content": "-V^{*}" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": " is convex in " + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 342, + 171, + 506, + 213 + ], + "type": "text", + "content": " (cf. Proposition 1)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 720, + 304, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 720, + 304, + 732 + ], + "spans": [ + { + "bbox": [ + 118, + 720, + 304, + 732 + ], + "type": "text", + "content": "1 Proofs of all propositions are given in Appendix I." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "text", + "content": "Intuitively, the optimism of " + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "text", + "content": " accounts for the information gained from observing more samples when the experiment is continued. Remember that " + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "inline_equation", + "content": "\\pi^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "text", + "content": " estimates the reward to be received if the experiment is conducted until termination, and it stops whenever its estimate is negative. But, the estimate of " + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "inline_equation", + "content": "\\pi^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "text", + "content": " has some uncertainty associated with it. Whenever it is uncertain enough that the reward to be received is actually negative; incurring the cost of continuing for one more time step, gaining new information, and forming a more certain estimate can lead to a more accurate decision and a higher overall utility. Finally, the optimism of " + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "text", + "content": " has a strictly decreasing upper bound; denoting with " + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "inline_equation", + "content": "F(x) = (1 / \\sqrt{2\\pi})\\int_{-\\infty}^{x}e^{-(1 / 2)x^2}" + }, + { + "bbox": [ + 104, + 83, + 506, + 174 + ], + "type": "text", + "content": " the c.d.f. of the standard normal distribution:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 177, + 372, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 372, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 372, + 190 + ], + "type": "text", + "content": "Proposition 4 (Decreasing optimism). For all " + }, + { + "bbox": [ + 104, + 177, + 372, + 190 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 177, + 372, + 190 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 174, + 193, + 504, + 209 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 193, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 174, + 193, + 504, + 209 + ], + "type": "interline_equation", + "content": "\\left| \\mu_ {t} ^ {*} - \\mu_ {t} ^ {\\text {g r e e d y}} \\right| \\leq \\sqrt {1 / t - 1 / \\tau} \\times \\left(F ^ {- 1} \\left(\\left(\\tau - t\\right) ^ {C} / _ {R}\\right) - F ^ {- 1} \\left(^ {C} / _ {R}\\right)\\right) \\tag {8}", + "image_path": "961975a9919486e4d29fe3d337dbe992dea76f6527d2a791bd75db32dd5a2794.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "text", + "content": "Intuitively, as the experiment continues, the information gained from one individual sample decreases relative to the total information accumulated, hence the optimism of " + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "text", + "content": " that accounts for the that information gain also decreases (see the bottom panel of Figure 2). Consider one extreme: When " + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "inline_equation", + "content": "t = \\tau - 1" + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "text", + "content": ", there is no more information to be gained before the experiment terminates at " + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "inline_equation", + "content": "t = \\tau" + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "text", + "content": ", hence " + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "text", + "content": " should make the same decisions as " + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "inline_equation", + "content": "\\pi^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "text", + "content": ". Indeed, Proposition 4 implies that " + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "inline_equation", + "content": "\\mu_{\\tau - 1}^{*} = \\mu_{\\tau - 1}^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 213, + 504, + 269 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 277, + 334, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 334, + 290 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 334, + 290 + ], + "type": "text", + "content": "4 A PRACTICAL ALGORITHM: BAYES-OCP" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 296, + 506, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 506, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 506, + 363 + ], + "type": "text", + "content": "Summarizing our discussion in the previous section, we suspect the optimal policy to be (i) of thresholding type (cf. Proposition 2), (ii) optimistic (cf. Proposition 3), and (iii) increasingly more greedy (cf. Proposition 4). These findings are not a complete surprise as optimism-in-the-face-of-uncertainty is a well-known principle in solving online decision-making problems (Auer et al., 2002; Bubeck et al., 2012). Our earlier analysis shows rigorously that this principle holds for at least a special case of OCP and strengths our intuition that it should be applicable for more general cases of OCP as well." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": "Keeping properties (i-iii) in mind, we now propose a practical algorithm for solving OCP in a more general setting than the one we analyzed earlier. Let " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "|\\mathcal{X}| \\geq 1" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\Psi = \\{(X,\\tau,\\rho): X \\in 2^{\\mathcal{X}} \\setminus \\emptyset\\}" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": " include all experiment designs that target a unique subpopulation within " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": " for a given time horizon " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": " and success criterion " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": "; let " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "C_X \\doteq C_{(X,\\tau,\\rho)}" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "R_X \\doteq R_{(X,\\tau,\\rho)}" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": ". We assume that the conditional power of performing a hypothesis test at time " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": " according to " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": "—that is the probability of the test being successful conditioned on mean outcomes " + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\{\\theta_x\\}" + }, + { + "bbox": [ + 104, + 367, + 504, + 434 + ], + "type": "text", + "content": "—can be computed for interim datasets—that is" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 146, + 437, + 504, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 437, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 146, + 437, + 504, + 453 + ], + "type": "interline_equation", + "content": "\\mathcal {P} (X, \\mathcal {D} _ {t}; \\left\\{\\theta_ {x} \\right\\}) = \\mathbb {E} _ {x _ {t ^ {\\prime}} \\sim \\left\\{\\eta_ {x \\mid X} \\right\\} _ {x \\in X}, y _ {t ^ {\\prime}} \\sim \\mathcal {N} \\left(\\theta_ {x _ {t ^ {\\prime}}, 1}\\right)} [ \\rho (\\mathcal {D} _ {t} \\cup \\left(\\cup_ {t ^ {\\prime} = t + 1} ^ {\\tau} \\left\\{x _ {t ^ {\\prime}}, y _ {t ^ {\\prime}} \\right\\}\\right)) ] \\tag {9}", + "image_path": "05b96697042eb5bd2a14d15090b1b18eb87e22323510e01c1f1ead47ee5c3a00.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 456, + 454, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 454, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 454, + 468 + ], + "type": "text", + "content": "can be evaluated efficiently. Then, based on this conditional power function, we define" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 475, + 296, + 502 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 475, + 296, + 502 + ], + "spans": [ + { + "bbox": [ + 115, + 475, + 296, + 502 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {G} \\left(X, \\mathcal {D} _ {t}; \\left\\{\\theta_ {x} \\right\\}\\right) (10) \\\\ = R _ {X} \\cdot \\mathcal {P} (X, \\mathcal {D} _ {t}) - C _ {X} \\cdot (\\tau - t) (10) \\\\ \\end{array}", + "image_path": "e04621837a5b84d0fa329e09cbee5aef208f48530b3efd0acf8eb017b5cd2412.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "text", + "content": "as the expected utility of fully committing to an experiment and waiting until it terminates when the experiment targets population " + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "text", + "content": ", is currently at time step " + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "text", + "content": ", and has collected dataset " + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t" + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "text", + "content": " so far. Denote with " + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "inline_equation", + "content": "\\mathcal{G}^{(0)}(X; \\{\\theta_x\\}) = \\mathcal{G}(X, \\emptyset; \\{\\theta_x\\})" + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "text", + "content": " the same expected utility but for an experiment that is yet to start, and with " + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "inline_equation", + "content": "\\mathcal{G}^{(0)}(\\emptyset; \\{\\theta_x\\}) = 0" + }, + { + "bbox": [ + 104, + 506, + 298, + 594 + ], + "type": "text", + "content": " the utility of stopping all experimentation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "text", + "content": "Our algorithm is called Bayes-OCP and is given in Algorithm 1. It maintains a posterior distribution " + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mu_x,\\sigma_x^2)" + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "text", + "content": " for each mean outcome " + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "inline_equation", + "content": "\\theta_{x}" + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "text", + "content": " assuming that, given mean " + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "inline_equation", + "content": "\\theta_{x}" + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "text", + "content": ", outcomes are distributed normally with unit variance—that is " + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "inline_equation", + "content": "\\Omega_{x} = \\mathcal{N}(\\theta_{x},1)" + }, + { + "bbox": [ + 104, + 600, + 298, + 732 + ], + "type": "text", + "content": ". These posteriors are only used in deciding which experiment to run next and not in determining whether the experiment was a success or not. Hence, even when the assumption of outcomes being normally distributed is violated, the integrity of the experiments would not be effected; only the performance of Bayes" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 304, + 478, + 505, + 729 + ], + "blocks": [ + { + "bbox": [ + 304, + 478, + 505, + 729 + ], + "lines": [ + { + "bbox": [ + 304, + 478, + 505, + 729 + ], + "spans": [ + { + "bbox": [ + 304, + 478, + 505, + 729 + ], + "type": "table", + "html": "
Algorithm 1 Bayes-OCP
1: Initialize μx and σx2 for all x ∈ X
2: X ← X, t ← 0, D0 ← ∅
3: Start experiment ψ = (X,τ,ρ)
4: loop:
5: t ← t + 1; Dt ← Dt-1 ∪ {xt,yt}
6: 1/σxt2 ← 1/σxt2 + 1
7: μxt← μxt + (yt - μxt)σxt2
(i) Identify a candidate subpopulation X' to replace X:
8: X' ← ∅
9: while X \\ X' ⊃ ∅:
10: x* ← argmaxx∈X\\X'
Eθx~N(μx,σx2)[G(0)(X' ∪ {x}; {θx})]
11: if Eθx~N(μx,σx2)[G(0)(X' ∪ {x*}; {θx})] > Eθx~N(μx,σx2)[G(0)(X'; {θx})]:
12: X' ← X' ∪ {x*}
13: else: break
(ii) Decide whether to actually replace X with X':
14: if Pθx~N(μx,σx2)[G(0)(X'; {θx}) > G(X, Dt; {θx})] > β:
15: X ← X', t ← 0, D0 ← ∅
16: Start a new experiment ψ = (X,τ,ρ)
", + "image_path": "5ee11095d1dcf629c50370cd5060cddc02fed98ac2167dc18a201729966cb3a7.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 106 + ], + "type": "text", + "content": "OCP in managing various experiments would degrade (see Appendix C for related experiments). Making use of the posteriors it maintains, Bayes-OCP performs two steps at each iteration:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "text", + "content": "(i) First, a subpopulation " + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "inline_equation", + "content": "X' \\subset X" + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "text", + "content": " within the currently targeted population " + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "text", + "content": " is identified as a potential candidate to target next; due to the combinatorial size of " + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "text", + "content": ", it would not be practical to consider every subpopulation individually as a candidate for large " + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "inline_equation", + "content": "|\\mathcal{X}|" + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "text", + "content": ". The ideal candidate would be the subpopulation with the largest expected utility: " + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "inline_equation", + "content": "X' = \\operatorname{argmax}_{X' \\subset X} \\mathbb{E}_{\\theta_x \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)} [\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\})]" + }, + { + "bbox": [ + 114, + 110, + 506, + 209 + ], + "type": "text", + "content": ". But again due to the combinatorial size of the search space, Bayes-OCP employs a greedy algorithm instead and forms candidate subpopulations by combining, one by one, the atomic-subpopulations that increase the expected utility the most, until the expected utility no longer improves. Note that it is common to use greedy algorithms to solve combinatorial optimization problems (Lawler, 1976; Papadimitriou and Steiglitz, 1982)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": "(ii) Then, it is decided whether the current experiment targeting population " + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": " should be stopped in favor of targeting candidate " + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "inline_equation", + "content": "X^{\\prime}" + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": " identified earlier instead. A greedy strategy would have done so whenever " + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)[\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)[\\mathcal{G}(X, \\mathcal{D}_t; \\{\\theta_x\\})]" + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": ". But from our earlier analysis, we have learned that the optimal strategy is optimistic (cf. Proposition 3). As such, Bayes-OCP checks whether it is overwhelmingly likely that the alternative experiment has higher expected utility—that is whether " + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_{\\theta_x} \\sim \\mathcal{N}(\\mu_x, \\sigma_x^2)\\{\\mathcal{G}^{(0)}(X'; \\{\\theta_x\\}) > \\mathcal{G}(X, \\mathcal{D}_t; \\{\\theta_x\\})\\} > \\beta" + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\beta \\in (1/2, 1)" + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": " controls the decision-making threshold. When " + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": " is large, we are more optimistic that the current experiment will succeed and require stronger evidence that the alternative experiment has higher expected utility. Note that, as the posteriors " + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mu_x, \\sigma_x^2)" + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": " get narrower, the optimism of this rule naturally decreases, which should be the case for the optimal strategy (cf. Proposition 4). As one extreme, the two switching rules become equivalent when " + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "inline_equation", + "content": "\\{\\sigma_x^2 \\to 0\\}" + }, + { + "bbox": [ + 111, + 214, + 506, + 340 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 353, + 211, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 353, + 211, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 211, + 365 + ], + "type": "text", + "content": "5 RELATED WORK" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": "Optimal stopping Optimal commitment is essentially a new type of optimal stopping/switching problem. In typical optimal stopping problems (OSPs), the reward an agent can receive evolves based on a stochastic process and the goal of the agent is to determine the optimal time step to stop when the reward to be received is in some sense maximized (Shiryaev, 2007). Optimal commitment is unique in that a positive reward can only be received by not stopping until a pre-specified time horizon " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": ". In optimal commitment, there is still a stochastic process (namely, samples " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": ") that gradually reveals more information regarding what that positive reward will be at the end, however, the reward—or rather the cost—of stopping earlier is independent of this stochastic process (and is equal to " + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "inline_equation", + "content": "-tC" + }, + { + "bbox": [ + 104, + 373, + 506, + 463 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 467, + 507, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 467, + 507, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 507, + 579 + ], + "type": "text", + "content": "Sequential hypothesis testing Among other OSPs, optimal commitment is most closely related to sequential hypothesis testing (SHT), where an agent makes sequential observations regarding a given hypothesis and eventually needs to decide whether to reject the said (alternate) hypothesis or reject some null hypothesis (Wald and Wolfowitz, 1948; Yu et al., 2009; Drugowitsch et al., 2012; Shenoy and Angela, 2012; Zhang and Angela, 2013; Drugowitsch et al., 2014; Khalvati and Rao, 2015; Schonbrodt et al., 2017; Fauß et al., 2020). Rejecting the correct hypothesis provides a positive reward whereas waiting for more observations, while informative, is also costly as in OCP. It is well known that the optimal policy in the classic setting of SHT is a thresholding-type policy with fixed thresholds that do not vary over time: The null hypothesis is rejected if some test statistic gets above a threshold (and the alternate hypothesis is rejected if the same statistic gets below a different threshold)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 582, + 507, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 507, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 507, + 673 + ], + "type": "text", + "content": "Optimal commitment can be thought of as a SHT problem with the crucial difference that the meta-experimenter has only the option of discarding the alternate hypothesis (i.e. breaking a commitment), and once some time horizon is reached (i.e. when a commitment is kept), either the null hypothesis or the alternate hypothesis is automatically rejected according to some external success criterion " + }, + { + "bbox": [ + 104, + 582, + 507, + 673 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 582, + 507, + 673 + ], + "type": "text", + "content": ", regardless of what the meta-experimenters' decision might have been otherwise. As we have shown in Proposition 2, the optimal policy still remains a thresholding-type policy, but since there is now a deadline to discard the alternate hypothesis early, the thresholds become time-varying; in particular, they become less and less optimistic as the said deadline approaches (cf. Proposition 4)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": "Frazier and Angela (2007); Dayanik and Angela (2013); Alaa and van der Schaar (2016) consider SHT under stochastic deadlines, but different from optimal commitment, they still allow agents to reject both hypotheses at any time. In these works, the agent must make the rejection decision before the deadline is reached to be able to receive a positive reward, whereas in our case, the agent must wait until the deadline to see whether the null hypothesis will be rejected or not. Naghshvar and" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 121, + 504, + 194 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 504, + 121 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 504, + 121 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 504, + 121 + ], + "type": "text", + "content": "Table 2: Comparison of related experiment designs. Optimal commitment is the only design that aims to decide both when an alternative population should be targeted—as opposed to switching the target population only at a fixed decision point—as well as which population to target among many potential candidates—as opposed to a simple binary decision of “overall population vs. sub-population” or “go vs. no-go”." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 105, + 121, + 504, + 194 + ], + "lines": [ + { + "bbox": [ + 105, + 121, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 504, + 194 + ], + "type": "table", + "html": "
DesignReferenceWhen?Which?
Randomized Controlled Trial (RCT)Fisher (1935)NeverOnly the initial population
Adaptive Enrichment DesignOndra et al. (2019)Fixed decision pointOverall vs. fixed subpopulation
Adaptive Signature DesignZhang et al. (2017)Fixed decision pointPossibly any population
RCT with Futility StoppingHe et al. (2012)Possibly any timeGo vs. no-go
Optimal Commitment(Ours)Possibly any timeAmong multiple populations
", + "image_path": "91c7f1cd207a5213b5dcec4fc62f06920b3997bac17adcdc0e8f5b64e742df10.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 203, + 506, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 203, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 506, + 281 + ], + "type": "text", + "content": "Javidi (2013); Jarrett and van der Schaar (2020) consider active versions of SHT where the agent is able to choose what type of observations to make. Our case is \"passive\" in the sense that the meta-experimenter cannot influence what kind of samples they are going to receive from the currently running experiment. Finally, optimal commitment, and SHT in general, can be thought of as more structured instances of partially-observed reinforcement learning (RL). As we have discussed earlier, the standard technique here relies on convex reward structures whereas the optimal value function in our case is not convex in general (cf. Proposition 1, see Appendix A for a detailed discussion)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 286, + 506, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 286, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 286, + 506, + 397 + ], + "type": "text", + "content": "Adaptive experimentation We introduced optimal commitment predominantly as a tool for population selection during an experiment. In clinical trials, dominant approach to population selection is adaptive enrichment (Mehta et al., 2009; Magnusson and Turnbull, 2013; Simon and Simon, 2013; Wang and Hung, 2013; Simon and Simon, 2018; Ondra et al., 2019; Thall, 2021) and adaptive signature designs (Freidlin and Simon, 2005; Freidlin et al., 2010; Mi, 2017; Zhang et al., 2017; Bhattacharyya and Rai, 2019). These designs are capable of adapting the target population of a trial as the trial continues, but unlike optimal commitment, they can only do so at fixed analysis points and not just at any time step. While adaptive signature designs can select arbitrary populations, adaptive enrichment designs are also limited by the number of pre-specified populations they can select between, which is typically only two: the overall population and an alternative subpopulation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 402, + 507, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 507, + 557 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 507, + 557 + ], + "type": "text", + "content": "Optimal commitment is also related to clinical trial designs with futility stopping, where an experimenter might terminate a trial early once it becomes apparent that the said trial is highly unlikely to succeed (van der Tweel and van Noord, 2003; Lachin, 2005; He et al., 2012; Jitlal et al., 2012; Kimani et al., 2013; Chang et al., 2020). However, this does not consider the possibility of switching to a new trial that targets a different population. As we will see during our experiments, switching to an alternative experiment might prove preferable even before an ongoing experiment can be deemed futile. In such cases, optimal commitment can make more timely decisions. Table 2 summarizes the experiment designs related to optimal commitment. Finally, it is worth mentioning that there are several methods for managing clinical trials at a portfolio level—that is determining which clinical trial is to be conducted next (Rogers et al., 2002; Colvin and Maravelias, 2008; Graham et al., 2020). Trial management in this vain is orthogonal to optimal commitment: They are concerned with the success of multiple new treatments and make decisions on a trial-by-trial basis whereas we only ever consider a single intervention and make decisions regarding the target population on a sample-by-sample basis while experiments still continue. See Appendix H for extended related work." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 567, + 201, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 201, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 201, + 578 + ], + "type": "text", + "content": "6 EXPERIMENTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": "We want to investigate how Bayes-OCP behaves in environments that differ in terms of ground-truth outcomes, for instance, what happens in environments where the original experiment is quite likely to succeed versus what happens in ones where switching to an alternative experiment is needed. To this end, we simulate experiments where mean outcomes are varied but other aspects of an experiment are fixed: In our environments, there are two atomic-populations, " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = \\{\\mathcal{X}_A,\\mathcal{X}_B\\}" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": ". Both atomic-populations have equal propensities " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathcal{X}_A} = \\eta_{\\mathcal{X}_B} = 1 / 2" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": " and the meta-experimenter has the same positively-biased prior for the mean outcome associated with each atomic-population: " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathcal{X}_A},\\theta_{\\mathcal{X}_B}\\sim \\mathcal{N}(0.1,0.1)" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": ". Experiment designs targeting one or both of these atomic-populations all have the same time horizon " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "\\tau = 600" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": " and success criterion " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "\\rho (\\mathcal{D}_{\\tau}) = \\mathbb{1}\\{\\Sigma_{(x_t,y_t)\\in \\mathcal{D}_{\\tau}}y_t / |\\mathcal{D}_{\\tau}| > \\alpha /\\sqrt{\\tau}\\}" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "\\alpha = F^{-1}(95\\%)" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": ". So, experiments are powered to detect a positive mean outcome of 0.1 with probability " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "\\sim 80" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": ". Rewards are given by " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "R_{X} = 1000\\eta_{X}^{0.1}" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": " the wider the target population is, the more people a successful intervention can be marketed to—and costs are given by " + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "inline_equation", + "content": "C_X = 1 / \\eta_X^{0.1}" + }, + { + "bbox": [ + 104, + 586, + 507, + 731 + ], + "type": "text", + "content": "—the narrower the target population is, the harder it becomes to find subjects eligible to participate." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "spans": [ + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "text", + "content": "Benchmarks We consider the metaexperiment designs summarized in Table 2 as benchmarks (see Appendix A.1 for an RL-based benchmark). Conventional RCT always targets the overall population and never stops early—that is it always conducts the experiment " + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "inline_equation", + "content": "\\psi = (\\{\\mathcal{X}_A,\\mathcal{X}_B\\} ,\\tau ,\\rho)" + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "text", + "content": " until its completion. Adaptive Enrichment performs an intermediary analysis at " + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "inline_equation", + "content": "t = \\tau /2 = 300" + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "text", + "content": " and greedily selects the experiment with the highest expected utility from " + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "inline_equation", + "content": "\\Psi = \\{(X,\\tau ,\\rho)\\}_{X\\subseteq \\{\\mathcal{X}_A,\\mathcal{X}_B\\}}" + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "text", + "content": ". Futility Stopping is implemented via Bayes-OCP by initializing the set of all experiments as a singleton " + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "inline_equation", + "content": "\\Psi = \\{\\Psi_0 = (\\{\\mathcal{X}_A,\\mathcal{X}_B\\} ,\\tau ,\\rho)\\}" + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "text", + "content": ". Intuitively, futility stopping only decides whether or not to stop the initial experiment that targets the overall population early. Bayes-OCP is initialized with " + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "inline_equation", + "content": "\\beta = 0.80" + }, + { + "bbox": [ + 104, + 83, + 258, + 345 + ], + "type": "text", + "content": " (see Appendix E for a sensitivity analysis). We also consider an abla" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 264, + 175, + 506, + 340 + ], + "blocks": [ + { + "bbox": [ + 263, + 83, + 506, + 174 + ], + "lines": [ + { + "bbox": [ + 263, + 83, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 263, + 83, + 506, + 174 + ], + "type": "text", + "content": "Table 3: Performance comparison in various environment instances. Bayes-OCP has the highest expected utility—and a smaller FWER then conventional RCTs—when averaged over all environment instances. This is because Bayes-OCP is a balanced design whose structure does not favor certain environment instances over others. As an example, compare it with conventional RCTs: RCTs do not have an adaptive structure hence they favor green environments where it is not necessary to adapt the target population of the initial experiment. *Instances favored/addressed partially" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 264, + 175, + 506, + 340 + ], + "lines": [ + { + "bbox": [ + 264, + 175, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 264, + 175, + 506, + 340 + ], + "type": "table", + "html": "
Algorithms:Oracle RCTRCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
Favored Instances:N/AGreenGreen/Amber*Green/RedAmber*/RedBalanced (incl. Amber)
All Instances (100%)Utility260.4-39.4 (6.7)106.5 (6.9)150.0 (3.5)32.6 (3.1)171.8 (3.6)
FWER0.0%0.3% (0.1%)0.2% (0.1%)0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success75.2%56.1% (0.7%)53.2% (0.8%)45.4% (1.3%)10.5% (0.8%)52.4% (1.2%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)607.5 (1.9)615.1 (1.2)
T-to-F35.6600.0 (0.0)548.9 (16.1)57.6 (4.6)3.0 (0.5)70.8 (8.2)
Green Instances (47.3%)Utility389.6388.7 (3.9)385.6 (3.7)337.7 (5.7)63.1 (3.5)343.4 (7.3)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)0.9 (0.0)0.1 (0.0)
Success99.0%98.9% (0.4%)97.4% (0.7%)86.0% (1.4%)18.8% (0.9%)88.2% (2.0%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)605.8 (1.5)602.8 (0.4)
T-to-F600.0600.0 (0.0)759.4 (36.4)46.6 (7.6)2.5 (0.5)62.3 (14.3)
Amber Instances (29.4%)Utility258.6-300.3 (19.8)-17.6 (6.5)-5.3 (5.4)11.6 (3.4)63.2 (5.6)
FWER0.0%0.7% (0.3%)0.6% (0.3%)0.4% (0.3%)0.0% (0.0%)0.3% (0.2%)
Switches1.00.0 (0.0)0.7 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success96.6%30.0% (2.0%)22.6% (1.5%)15.2% (2.0%)5.3% (1.0%)35.2% (1.8%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)617.2 (5.9)663.9 (7.2)
T-to-F600.0600.0 (0.0)745.0 (13.1)78.3 (9.3)3.4 (0.6)104.4 (19.1)
Red Instances (23.3%)Utility0.0-579.2 (4.1)-304.2 (4.4)-35.1 (1.7)-2.8 (1.1)-39.7 (3.4)
FWER0.0%0.2% (0.3%)0.2% (0.3%)0.1% (0.2%)0.0% (0.0%)0.2% (0.3%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%2.1% (0.4%)1.8% (0.5%)0.9% (0.3%)0.3% (0.4%)1.6% (0.7%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)634.8 (39.1)
T-to-F0.0600.0 (0.0)343.1 (8.1)38.9 (2.1)3.5 (1.4)45.8 (2.9)
", + "image_path": "124405db7f564c1a6224da1e823495a8baf4fbd4cebef593238f4117dfe0aa7a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 346, + 506, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 506, + 380 + ], + "type": "text", + "content": "tion of Bayes-OCP where decisions are made greedily instead of optimistically (Greedy Bayes-OCP). As a baseline of maximum achievable performance, we consider an oracle (Oracle RCT) that always runs the RCT with the optimum target (or does not run any RCT at all if that happens to be optimal)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "text", + "content": "**Environments** A meta-experimenter's performance is specific to the environment instance. In particular, it depends on the ground-truth outcome distributions " + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\{\\Omega_x\\}" + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "text", + "content": " for different populations. For example, an algorithm that always immediately stops the experiment would perform best when the mean outcome is negative. Hence, to faithfully evaluate the benchmarks, we need to focus on the average performance across different environments. To this end, we randomly generated 1000 environments (repeated five times to obtain error bars) with true mean outcomes " + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathcal{X}_A}, \\theta_{\\mathcal{X}_B}" + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "text", + "content": " sampled independently from " + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0.1, 0.1)" + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "text", + "content": ". Given these means, outcome distributions are set to be Gaussian with unit variance such that " + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\Omega_x = \\mathcal{N}(\\theta_x, 1)" + }, + { + "bbox": [ + 104, + 384, + 504, + 517 + ], + "type": "text", + "content": ". Depending on the true mean outcome, these environments can be categorized into three groups: (i) green instances where the initial experiment targeting the overall population has the highest utility, (ii) amber instances where an alternative experiment that targets a subpopulation has the highest utility, and (iii) red instances where no experiment has positive utility hence running no experiments is the optimal decision." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 521, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 506, + 654 + ], + "type": "text", + "content": "Different benchmarks favor different instances (see the top row of Table 3): Conventional RCTs do not allow for any adaptation hence they favor green instances where the target population of the initial experiment does not need to be adapted. Adaptive Enrichment allows for adaptation but only at a certain time point, which is often too late to stop unsuccessful experiments (as in red instances). However, an adaptive enrichment design at least makes it possible to eventually target a subpopulation, even though it might be too late to do so at the pre-specified decision point, hence it partially accommodates amber instances. Futility Stopping decides between either continuing with the initial experiment or stopping all experimentation completely (targeting a subpopulation is not an option) hence it favors either green or red instances (but not amber instances). Greedy Bayes-OCP is pessimistic (or rather not optimistic enough) towards any ongoing experiment succeeding, hence it favors red instances where no experiment is likely to succeed. Similar to adaptive enrichment, Greedy Bayes-OCP at least allows subpopulations to be targeted hence it too partially accommodates amber instances." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "content": "Main results Performance of a meta-experimenter is primarily measured by Bayesian utility which is the expected utility averaged over randomly sampled environment instances (Utility). Remember that maximizing utility was our main objective, and as such, Bayes-OCP has the highest expected utility when averaged over all environment instances, see Table 3. Unlike other benchmarks, Bayes-OCP strikes a good balance in prioritizing all environment instances at the same time. This is because Bayes-" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 337, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 337, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 337, + 127 + ], + "type": "text", + "content": "OCP (i) can make timely decisions—unlike Adaptive Enrichment—and (ii) is optimistic hence it does not stop likely-to-succeed experiments prematurely—unlike Greedy Bayes-OCP. More specifically," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 132, + 339, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 132, + 339, + 243 + ], + "spans": [ + { + "bbox": [ + 115, + 132, + 339, + 243 + ], + "type": "text", + "content": "(i) Timeliness of Bayes-OCP: Bayes-OCP has an advantage in amber and red instances over adaptive enrichment and futility stopping. Consider the example in Figure 3: While Bayes-OCP stops in a timely manner, adaptive enrichment can only stop at a fixed decision point and experiments with futility stopping only stop when the ongoing experiment is failing not as soon as a better alternative emerges. This underlines the exploitative aspect of Bayes-OCP—making and breaking commitments to maximize utility." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 247, + 339, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 247, + 339, + 402 + ], + "spans": [ + { + "bbox": [ + 112, + 247, + 339, + 402 + ], + "type": "text", + "content": "(ii) Optimism of Bayes-OCP: While a design that favors early stopping is obviously desirable in amber and red environments, how much it is favored should be moderated to also succeed in green environments. Consider the example in Figure 4: Greedy Bayes-OCP prematurely stops the initial experiment in a green environment while Bayes-OCP does not. Theoretically, we know that the optimal policy should be optimistic towards the ongoing experiment succeeding and be hesitant to stop to a certain extend. This underlines the exploratory aspect of Bayes-OCP—keeping a seemingly failing commitment still has value as it reveals more information regarding whether the commitment is actually failing." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "text", + "content": "In Table 3, in addition to Utility, we also report the family-wise error rate (FWER)—that is the frequency of runs where at least one experiment (denote it with " + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "inline_equation", + "content": "\\psi^i" + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "text", + "content": ") is declared successful (i.e. " + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "inline_equation", + "content": "\\rho^i(\\mathcal{D}_\\tau^i) = 1" + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "text", + "content": ") despite the mean outcome being negative for the targeted population (i.e. " + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}_{X^i} < 0" + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "text", + "content": "—the average number of times the target population has been switched (Switches), the probability of success which is defined as achieving positive utility (Success), the average time until a successful outcome (Timeto-Success, " + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "text", + "content": "-to-" + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 407, + 339, + 529 + ], + "type": "text", + "content": "), and the average time until an unsuccessful outcome where all experimentation is stopped" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 528, + 506, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 594 + ], + "type": "text", + "content": "with negative utility (Time-to-Failure, " + }, + { + "bbox": [ + 104, + 528, + 506, + 594 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 528, + 506, + 594 + ], + "type": "text", + "content": "-to- " + }, + { + "bbox": [ + 104, + 528, + 506, + 594 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 528, + 506, + 594 + ], + "type": "text", + "content": "), see Appendix G for details. Importantly, Bayes-OCP does not compromise the error control of experiments, on the contrary, it even achieves a smaller FWER than conventional RCTs. This is because aggregate data is only ever used to select experiments, otherwise no two experiments consult each other's data when evaluating a success criterion so that the potential confoundedness that could have been caused by the adaptiveness of Bayes-OCP is avoided when declaring an experiment as successful (see Appendix B for a discussion on error control)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 599, + 507, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 507, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 507, + 645 + ], + "type": "text", + "content": "Supplementary results We also provide supplementary results: Appendix A.1 evaluates RL-based benchmarks, Appendix B.1 investigates error control, Appendix C considers environments with non-Gaussian outcomes, Appendix D considers environments with more than two atomic-populations, and Appendix E analyzes the sensitivity of Bayes-OCP's performance to its hyper-parameter " + }, + { + "bbox": [ + 104, + 599, + 507, + 645 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 599, + 507, + 645 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 656, + 196, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 196, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 196, + 668 + ], + "type": "text", + "content": "7 CONCLUSION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "Two aspects of OCP require further discussion: (i) How can it be approached from the perspective of reinforcement learning? While OCP technically describes a special class of POMDPs, we have not found this to be constructive in finding a solution (see Appendix A). (ii) What are the implications of using Bayes-OCP in terms of error control? It has no impact on individual error rates and can be adapted to control FWER (see Appendix B). See Appendix F for a discussion on future work." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 344, + 84, + 503, + 233 + ], + "blocks": [ + { + "bbox": [ + 344, + 84, + 503, + 233 + ], + "lines": [ + { + "bbox": [ + 344, + 84, + 503, + 233 + ], + "spans": [ + { + "bbox": [ + 344, + 84, + 503, + 233 + ], + "type": "image", + "image_path": "a619405fd97dc0546afb287a9790828788911848db4ae9f868bdd325c371a984.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 234, + 506, + 315 + ], + "lines": [ + { + "bbox": [ + 342, + 234, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 342, + 234, + 506, + 315 + ], + "type": "text", + "content": "Figure 3: Timeliness of Bayes-OCP. Bayes-OCP is first to (correctly) stop the initial experiment in an amber instance (excluding Greedy Bayes-OCP). Adaptive enrichment can only stop at a pre-specified time, while futility stopping fails to consider switching to an alternative experiment, which is proven to be preferable earlier than stopping." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 343, + 320, + 503, + 470 + ], + "blocks": [ + { + "bbox": [ + 343, + 320, + 503, + 470 + ], + "lines": [ + { + "bbox": [ + 343, + 320, + 503, + 470 + ], + "spans": [ + { + "bbox": [ + 343, + 320, + 503, + 470 + ], + "type": "image", + "image_path": "1391f46e577a92e0ab99ca7dfda5da32e778dbe68ef5fd7fa3e04ba1028ad26e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 470, + 505, + 521 + ], + "lines": [ + { + "bbox": [ + 342, + 470, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 342, + 470, + 505, + 521 + ], + "type": "text", + "content": "Figure 4: Optimism of Bayes-OCP. Greedy Bayes-OCP (incorrectly) stops due to initial noise in a green instance while Bayes-OCP does not stop since it is more optimistic (as the optimal policy should, cf. Proposition 3)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 208, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 208, + 92 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 208, + 92 + ], + "type": "text", + "content": "ETHICS STATEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 101, + 504, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 101, + 504, + 221 + ], + "spans": [ + { + "bbox": [ + 107, + 101, + 504, + 221 + ], + "type": "text", + "content": "As the main application of optimal commitment, we have focused on adaptive experimentation, particularly experiments that are run as part of clinical development. Clinical trials have a huge impact on the wellbeing of patients and this high-stakes nature of clinical trials naturally raises some ethical concerns; we discuss two major ones in this section. However before we start our discussion, it should be emphasized that clinical trials is not the only application domain of optimal commitment. As we have highlighted at the end of Section 2, our contributions are generally applicable to decision-making problems such as portfolio and energy management. Moreover, not all adaptive experiments are clinical and have the same high stakes as a clinical trial. For instance, A/B testing is common in online advertisement to determine what recommendation policies lead to more user engagement (Gui et al., 2015; Xu et al., 2015; Kohavi and Longbotham, 2017). Therefore, the ethical concerns we discuss here does not universally concern all possible applications of optimal commitment." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 227, + 504, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 227, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 107, + 227, + 504, + 293 + ], + "type": "text", + "content": "The first concern is how the designed error rate of an individual experiment is affected when multiple such experiments are managed together using Bayes-OCP in an adaptive manner, in particular, whether any error rate is inflated by the use of Bayes-OCP or not. We discuss error control in Appendix B with supplementary experiments. But briefly, Bayes-OCP has essentially no impact on the error rate of experiments on an individual level, and when controlling their family-wise error rate is also a concern, it can easily be adapted to accommodate this additional constraint as well." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "type": "text", + "content": "The second concern is that an adaptive approach to population selection might lead to overly conservative experiments that unnecessarily limit the use of an effective treatment. As we have mentioned in the introduction to motivate the need for optimal commitment, when the treatment is effective only for a subpopulation (cf. amber instances in our experiments), population selection is absolutely necessary, otherwise the treatment is most likely to be found ineffective and discarded after an experiment that targets the overall patient population as a whole, which would deny the treatment for the subpopulation that would have benefited from it. On the flip side of this, when the treatment happens to be effective for everyone (cf. green instances in our experiment), population selection might lead to conducting a restrictive experiment that only targets a small subpopulation, which this time, would deny the treatment for the rest of the patient population. This is essentially the reason behind the performance drop between Bayes-OCP and conventional RCTs in green instances (see Table 3). There is a trade-off between the performance in amber instances and green instances; and Bayes-OCP achieves a better balance between the two compared with a conventional RCT as evidenced by its superior performance when averaged over all environment instances (again see Table 3); although it causes a drop in performance for green instances, it more than makes up for that drop in amber instances. This balance is partly controlled by how optimistic Bayes-OCP is, which is in turn dictated by its hyper-parameter " + }, + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "type": "text", + "content": "—larger " + }, + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "type": "text", + "content": " leads to more optimistic decisions towards ongoing experiments, which favors green instances more than amber instances. We analyze the sensitivity of Bayes-OCP's performance to " + }, + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 107, + 299, + 504, + 518 + ], + "type": "text", + "content": " in Appendix E; and for all configurations that we have evaluated, Bayes-OCP always performs significantly better than a conventional RCT." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 530, + 265, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 530, + 265, + 541 + ], + "spans": [ + { + "bbox": [ + 107, + 530, + 265, + 541 + ], + "type": "text", + "content": "REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 548, + 504, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 548, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 107, + 548, + 504, + 615 + ], + "type": "text", + "content": "All our experiments are based on synthetic simulations, hence our results can easily be reproduced by following the specifications in Section 6 without needing access to any private dataset. In order to aid reproducibility, we have rigorously described all our benchmarks in algorithmic form, similar to Algorithm 1, in Appendix J. Moreover, the source code necessary to reproduce our main results in Table 3 is made publicly available at https://github.com/alihanhyk/optcommit and https://github.com/vanderschaarlab/optcommit." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 627, + 217, + 638 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 627, + 217, + 638 + ], + "spans": [ + { + "bbox": [ + 107, + 627, + 217, + 638 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 645, + 504, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 645, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 107, + 645, + 504, + 679 + ], + "type": "text", + "content": "We would like to thank the reviewers and the members of the van der Schaar lab, for their valuable input, comments, and suggestions. This work was supported by the US Office of Naval Research (ONR) and the National Science Foundation (NSF, grant number 1722516)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 690, + 175, + 701 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 690, + 175, + 701 + ], + "spans": [ + { + "bbox": [ + 107, + 690, + 175, + 701 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 709, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 709, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 709, + 504, + 731 + ], + "type": "text", + "content": "Alaa, A. M. and van der Schaar, M., \"Balancing suspense and surprise: Timely decision making with endogenous information acquisition,\" in Proc. Neural Inf. Process. Syst., 2016." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "type": "text", + "content": "Auer, P., Cesa-Bianchi, N., and Fischer, P., \"Finite-time analysis of the multiarmed bandit problem,\" Mach. Learn., vol. 47, no. 2, pp. 235-256, 2002." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 507, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 507, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 507, + 146 + ], + "type": "text", + "content": "Bhattacharyya, A. and Rai, S. N., \"Adaptive signature design—review of the biomarker guided adaptive phase-III controlled design,\" Contemporary Clin. Trials Commun., vol. 15, p. 100378, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 505, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 505, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 505, + 177 + ], + "type": "text", + "content": "Bubeck, S., Cesa-Bianchi, N. et al., \"Regret analysis of stochastic and nonstochastic multi-armed bandit problems,\" Found. Trends Mach. Learn., vol. 5, no. 1, pp. 1-122, 2012." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 182, + 504, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 504, + 207 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 504, + 207 + ], + "type": "text", + "content": "Chang, Y., Song, T., Monaco, J., and Ivanova, A., \"Futility stopping in clinical trials, optimality and practical considerations,\" J. Biopharmaceutical Statist., vol. 30, no. 6, pp. 1050-1059, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 213, + 504, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 504, + 238 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 504, + 238 + ], + "type": "text", + "content": "Chiu, Y.-D., Koenig, F., Posch, M., and Jaki, T., \"Design and estimation in clinical trials with subpopulation selection,\" Statist. Med., vol. 37, pp. 4335-4335-4352, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 243, + 506, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 243, + 506, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 506, + 267 + ], + "type": "text", + "content": "\"Trends, charts, and maps,\" ClinicalTrials.gov. [Online]. Available: https://www.clinicaltrials.gov/ct2/resources/trends#RegisteredStudiesOverTimePostedResults" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 274, + 504, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 274, + 504, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 504, + 298 + ], + "type": "text", + "content": "Colvin, M. and Maravelias, C. T., \"A stochastic programming approach for clinical trial planning in new drug development,\" Comput. Chem. Eng., vol. 32, no. 11, pp. 2626-2642, 2008." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 304, + 504, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 504, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 504, + 328 + ], + "type": "text", + "content": "Dayanik, S. and Angela, J. Y., \"Reward-rate maximization in sequential identification under a stochastic deadline,\" SIAM J. Control Optim., vol. 51, no. 4, pp. 2922-2948, 2013." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "type": "text", + "content": "Demets, D. L. and Lan, K. K. G., \"Interim analysis: The alpha spending function approach,\" Statist. Med., vol. 13, no. 13-14, pp. 1341-1352, 1994." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 364, + 506, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 506, + 399 + ], + "type": "text", + "content": "Drugowitsch, J., Moreno-Bote, R., Churchland, A. K., Shadlen, M. N., and Pouget, A., \"The cost of accumulating evidence in perceptual decision making,\" J. Neuroscience, vol. 32, no. 11, pp. 3612-3628, 2012." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 405, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 405, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 504, + 430 + ], + "type": "text", + "content": "Drugowitsch, J., Moreno-Bote, R., and Pouget, A., \"Optimal decision-making with time-varying evidence reliability,\" in Proc. Neural Inf. Process. Syst., 2014." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 436, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 504, + 460 + ], + "type": "text", + "content": "Fauß, M., Zoubir, A. M., and Poor, H. V., “Minimax optimal sequential hypothesis tests for Markov processes,” Ann. Statist., vol. 48, no. 5, pp. 2599–2621, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 466, + 459, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 466, + 459, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 459, + 480 + ], + "type": "text", + "content": "Fisher, R. A., The Design of Experiments. Edinburgh, Scotland: Oliver & Boyd, 1935." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 485, + 506, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 485, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 506, + 510 + ], + "type": "text", + "content": "Frazier, P. and Angela, J. Y., \"Sequential hypothesis testing under stochastic deadlines,\" in Proc. Neural Inf. Process. Syst., 2007." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 516, + 506, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 516, + 506, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 506, + 550 + ], + "type": "text", + "content": "Freidlin, B. and Simon, R., \"Adaptive signature design: An adaptive clinical trial design for generating and prospectively testing a gene expression signature for sensitive patients,\" *Clin. Cancer Res.*, vol. 11, no. 21, pp. 7872-7878, 2005." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 557, + 504, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 557, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 504, + 582 + ], + "type": "text", + "content": "Freidlin, B., Jiang, W., and Simon, R., \"The cross-validated adaptive signature design,\" Clin. Cancer Res., vol. 16, no. 2, pp. 691-698, 2010." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 587, + 504, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 587, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 587, + 504, + 612 + ], + "type": "text", + "content": "Ghare, G. and Leutenegger, S. T., \"Improving speedup and response times by replicating parallel programs on a SNOW,\" in Proc. Int. Conf. Job Scheduling Strategies Parallel Process., 2005." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 617, + 506, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 617, + 506, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 506, + 642 + ], + "type": "text", + "content": "Graham, E., Jaki, T., and Harbron, C., \"A comparison of stochastic programming methods for portfolio level decision-making,\" J. Biopharmaceutical Statist., vol. 30, no. 3, pp. 405-429, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 647, + 504, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 504, + 672 + ], + "type": "text", + "content": "Gui, H., Xu, Y., Bhasin, A., and Han, J., \"Network A/B testing: From sampling to estimation,\" in Proc. Int. Conf. World Wide Web, 2015." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 677, + 504, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 504, + 702 + ], + "type": "text", + "content": "He, P., Lai, T. L., and Liao, O. Y.-W., “Futility stopping in clinical trials,” Statist. Interface, vol. 5, no. 4, pp. 415-423, 2012." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "text", + "content": "Jarrett, D. and van der Schaar, M., \"Inverse active sensing: Modeling and understanding timely decision-making,\" in Int. Conf. on Mach. Learn., 2020." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 106 + ], + "type": "text", + "content": "Jitlal, M., Khan, I., Lee, S., and Hackshaw, A., \"Stopping clinical trials early for futility: retrospective analysis of several randomised clinical studies,\" Brit. J. Cancer, vol. 107, no. 6, pp. 910-917, 2012." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 112, + 505, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 112, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 505, + 136 + ], + "type": "text", + "content": "Kaitin, K. I., \"Deconstructing the drug development process: The new face of innovation,\" Clin. Pharmacology Therapeutics, vol. 87, no. 3, pp. 356-361, 2010." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 142, + 504, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 504, + 166 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 504, + 166 + ], + "type": "text", + "content": "Karatzas, I. and Wang, H., \"Utility maximization with discretionary stopping,\" SIAM J. Control Optim., vol. 39, pp. 306-329, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 172, + 504, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 172, + 504, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 504, + 196 + ], + "type": "text", + "content": "Khalvati, K. and Rao, R. P., \"A Bayesian framework for modeling confidence in perceptual decision making,\" in Proc. Neural Inf. Process. Syst., 2015." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 202, + 504, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 202, + 504, + 226 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 504, + 226 + ], + "type": "text", + "content": "Kimani, P. K., Todd, S., and Stallard, N., \"Conditionally unbiased estimation in phase II/III clinical trials with early stopping for futility,\" Statist. Med., vol. 32, no. 17, pp. 2893-2910, 2013." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 232, + 504, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 504, + 256 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 504, + 256 + ], + "type": "text", + "content": "Kohavi, R. and Longbotham, R., \"Online controlled experiments and A/B testing,\" Encyclopedia Mach. Learn. Data Mining, vol. 7, no. 8, pp. 922-929, 2017." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 262, + 505, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 505, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 505, + 287 + ], + "type": "text", + "content": "Lachin, J. M., “A review of methods for futility stopping based on conditional power,” Statist. Med., vol. 24, no. 18, pp. 2747-2764, 2005." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 293, + 504, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 504, + 316 + ], + "type": "text", + "content": "Lawler, E., Combinatorial Optimization, Networks and Matroids. New York: Holt, Rinehard & Winston, 1976." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "type": "text", + "content": "Lipkovich, I., Dmitrienko, A., and D'Agostino Sr., R. B., \"Tutorial on biostatistics: Data-driven subgroup identification and analysis in clinical trials,\" Statist. Med., vol. 36, pp. 136-196, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 354, + 504, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 504, + 378 + ], + "type": "text", + "content": "Magnusson, B. P. and Turnbull, B. W., \"Group sequential enrichment design incorporating subgroup selection,\" Statist. Med., vol. 32, no. 16, pp. 2695-2714, 2013." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 384, + 505, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 505, + 407 + ], + "type": "text", + "content": "Markowitz, H., Portfolio selection: Efficient diversification of investment. New York: John Wiley, 1959." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 415, + 505, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 505, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 505, + 448 + ], + "type": "text", + "content": "Mehta, C., Gao, P., Bhatt, D. L., Harrington, R. A., Skerjanec, S., and Ware, J. H., \"Optimizing trial design: Sequential, adaptive, and enrichment strategies,\" Circulation, vol. 119, no. 4, pp. 597-605, 2009." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 456, + 505, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 505, + 479 + ], + "type": "text", + "content": "Merton, R. C., \"Life time portfolio selection under uncertainty: The continuous-time case,\" Rev. Econ. Statist., vol. 51, pp. 247-257, 1969." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 486, + 504, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 486, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 504, + 510 + ], + "type": "text", + "content": "Mi, G., \"Enhancement of the adaptive signature design for learning and confirming in a single pivotal trial,\" Pharmaceutical Statist., vol. 16, no. 5, pp. 312-321, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 516, + 504, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 516, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 504, + 540 + ], + "type": "text", + "content": "Moineddin, R., Butt, D. A., Tomlinson, G., and Beyene, J., \"Identifying subpopulations for subgroup analysis in a longitudinal clinical trial,\" Contemporary Clin. Trials, vol. 29, pp. 817-822, 2008." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 546, + 505, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 546, + 505, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 505, + 569 + ], + "type": "text", + "content": "Naghshvar, M. and Javidi, T., \"Active sequential hypothesis testing,\" Ann. Statist., vol. 41, no. 6, pp. 2703-2738, 2013." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 576, + 504, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 504, + 601 + ], + "type": "text", + "content": "Ni, T., Eysenbach, B., and Salakhutdinov, R., \"Recurrent model-free RL can be a strong baseline for many POMDPs,\" in Proc. Int. Conf. Mach. Learn., 2022." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 606, + 505, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 505, + 631 + ], + "type": "text", + "content": "Olofsson, M., Önskog, T., and Lundström, N. L. P., \"Management strategies for run-of-river hydropower plants: An optimal switching approach,\" Optim. Eng., vol. 23, pp. 1707-1731, 2022." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 637, + 505, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 637, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 505, + 670 + ], + "type": "text", + "content": "Ondra, T., Jobjörnsson, S., Beckman, R. A., Burman, C.-F., König, F., Stallard, N., and Posch, M., \"Optimized adaptive enrichment designs,\" Statist. Methods Med. Res., vol. 28, no. 7, pp. 2096-2111, 2019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 678, + 505, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 505, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 505, + 701 + ], + "type": "text", + "content": "Papadimitriou, C. H. and Steiglitz, K., Combinatorial Optimization—Algorithms and Complexity. Englewood Cliffs, NJ: Prentice Hall, 1982." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "text", + "content": "Rafique, S. F. and Jianhua, Z., \"Energy management system, generation and demand predictors: A review,\" IET Gener. Transmiss. Distribution, vol. 12, no. 3, pp. 519-530, 2018." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 709 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 106 + ], + "type": "text", + "content": "Rogers, M. J., Gupta, A., and Maranas, C. D., \"Real options based analysis of optimal pharmaceutical research and development portfolios,\" Ind. Eng. Chem. Res., vol. 41, no. 25, pp. 6607-6620, 2002." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 506, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 506, + 146 + ], + "type": "text", + "content": "Schönbrodt, F. D., Wagenmakers, E.-J., Zehetleitner, M., and Perugini, M., \"Sequential hypothesis testing with Bayes factors: Efficiently testing mean differences,\" Psychol. Methods, vol. 22, no. 2, p. 322, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 506, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 506, + 177 + ], + "type": "text", + "content": "Shenoy, P. and Angela, J. Y., \"Strategic impatience in Go/NoGo versus forced-choice decision-making,\" in Proc. Neural Inf. Process Syst., 2012." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 182, + 454, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 454, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 454, + 196 + ], + "type": "text", + "content": "Shiryaev, A. N., Optimal Stopping Rules. Springer Science & Business Media, 2007." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 201, + 506, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 506, + 224 + ], + "type": "text", + "content": "Simon, N. and Simon, R., \"Adaptive enrichment designs for clinical trials,\" Biostatistics, vol. 14, no. 4, pp. 613-625, 2013." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 231, + 506, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 506, + 255 + ], + "type": "text", + "content": "Simon, N. and Simon, R., \"Using Bayesian modeling in frequentist adaptive enrichment designs,\" Biostatistics, vol. 19, no. 1, pp. 27-41, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 261, + 506, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 261, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 506, + 285 + ], + "type": "text", + "content": "Spanan, M. T. J., \"Partially observable Markov decision processes,\" in Reinforcement Learning. Springer, 2012, pp. 387-414." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 291, + 504, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 291, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 504, + 326 + ], + "type": "text", + "content": "Takebe, T., imai, R., and Ono, S., \"The current status of drug discovery and development as originated in United States academia: The influence of industrial and academic collaboration on drug discovery and development,\" Clin. Transl. Sci., vol. 11, no. 6, pp. 597-606, 2018." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 332, + 506, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 332, + 506, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 332, + 506, + 355 + ], + "type": "text", + "content": "Thall, P. F., \"Adaptive enrichment designs in clinical trials,\" Annu. Rev. Statist. Appl., vol. 8, pp. 393-411, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 361, + 504, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 504, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 504, + 385 + ], + "type": "text", + "content": "Umscheid, C. A., Margolis, D. J., and Grossman, C. E., \"Key concepts of clinical trials: A narrative review,\" Postgraduate Med., vol. 123, no. 5, pp. 194-204, 2011." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 392, + 506, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 392, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 506, + 426 + ], + "type": "text", + "content": "van der Tweel, I. and van Noord, P. A., \"Early stopping in clinical trials and epidemiologic studies for 'futility': Conditional power versus sequential analysis,\" J. Clin. Epidemiology, vol. 56, no. 7, pp. 610-617, 2003." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 432, + 506, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 432, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 506, + 456 + ], + "type": "text", + "content": "Wald, A. and Wolfowitz, J., \"Optimum character of the sequential probability ratio test,\" Ann. Math. Statist., vol. 19, pp. 326-339, 1948." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 462, + 504, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 462, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 504, + 486 + ], + "type": "text", + "content": "Wand, D., Joshi, G., and Wornell, G., \"Efficient task replication for fast response times in parallel computation,\" in Proc. ACM SIGMETRICS Conf., 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 492, + 504, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 504, + 517 + ], + "type": "text", + "content": "Wang, D., Joshi, G., and Wornell, G. W., \"Efficient straggler replication in large-scale parallel computing,\" ACM Trans. Model. Perform. Eval. Comput. Syst., vol. 4, no. 2, pp. 1-23, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 522, + 506, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 506, + 556 + ], + "type": "text", + "content": "Wang, S.-J. and Hung, H. J., \"Adaptive enrichment with subpopulation selection at interim: Methodologies, applications and design considerations,\" Contemporary clinical trials, vol. 36, no. 2, pp. 673-681, 2013." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 563, + 506, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 506, + 597 + ], + "type": "text", + "content": "Xu, Y., Chen, N., Fernandez, A., Sinno, O., and Bhasin, A., \"From infrastructure to culture: A/B testing challenges in large scale social networks,\" in Proc. ACM SIGKDD Int. Conf. Knowl. Discovery Data Mining, 2015." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 604, + 506, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 506, + 637 + ], + "type": "text", + "content": "Yu, A. J., Dayan, P., and Cohen, J. D., \"Dynamics of attentional selection under conflict: Toward a rational Bayesian account,\" J. Exp. Psychol. Human Perception Performance, vol. 35, no. 3, p. 700, 2009." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 644, + 506, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 644, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 506, + 669 + ], + "type": "text", + "content": "Zhang, S. and Angela, J. Y., \"Forgetful Bayes and myopic planning: Human learning and decision-making in a bandit setting,\" in Proc. Neural Inf. Process. Syst., 2013." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 675, + 506, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 675, + 506, + 709 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 506, + 709 + ], + "type": "text", + "content": "Zhang, Z., Li, M., Lin, M., Soon, G., Greene, T., and Shen, C., \"Subgroup selection in adaptive signature designs of confirmatory clinical trials,\" J. Roy. Statist. Soc.: Ser. C (Appl. Statist.), vol. 66, no. 2, pp. 345-361, 2017." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 358, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 358, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 358, + 94 + ], + "type": "text", + "content": "A A REINFORCEMENT LEARNING PERSPECTIVE" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": "Optimal commitment can be viewed as a partially-observed reinforcement learning problem. Let the tuple " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "(\\mathcal{S},\\mathcal{A},\\mathcal{Z},\\mathcal{T},\\mathcal{O},\\mathcal{R})" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " denote a partially-observable Markov decision process (POMDP), where " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " is the (unobserved) state space, " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " is the action space, " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " is the observation space, " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{T} \\in \\Delta(\\mathcal{S})^{S \\times S}" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " describes the transition dynamics, " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{O} \\in \\mathcal{Z}^S" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " describes the observation dynamics, and " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{R} \\in \\mathbb{R}^S" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " describes the reward dynamics. Then, OCPs as defined in Section 2 can also be expressed as a special class of POMDPs: Letting " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{Y} = \\mathbb{R}" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " denote the outcome space for clarity, " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathfrak{D} = \\cup_{t=0}(\\mathcal{X} \\times \\mathcal{Y})^t" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " be the space of all possible datasets " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\mathfrak{D}" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": " be the space of all possible outcome distributions " + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 189, + 506, + 274 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\mathcal{S} \\doteq \\{\\varnothing\\} \\cup (\\Psi \\times \\mathfrak{D} \\times \\mathfrak{D}^{\\mathcal{X}})" + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "text", + "content": ", where states " + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "inline_equation", + "content": "s = (\\psi, \\mathcal{D}_t, \\{\\Omega_x\\}_{x \\in \\mathcal{X}})" + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "text", + "content": " consist of the ongoing experiment " + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\psi \\in \\Psi" + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "text", + "content": ", the dataset " + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t \\in \\mathfrak{D}" + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "text", + "content": " collected by the ongoing experiment so far, and the true outcome distributions " + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\{\\Omega_x \\in \\mathfrak{D}\\}" + }, + { + "bbox": [ + 132, + 189, + 506, + 224 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 228, + 205, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 228, + 205, + 240 + ], + "spans": [ + { + "bbox": [ + 132, + 228, + 205, + 240 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 228, + 205, + 240 + ], + "type": "inline_equation", + "content": "\\mathcal{A} \\doteq \\{\\varnothing\\} \\cup \\Psi" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 244, + 233, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 244, + 233, + 258 + ], + "spans": [ + { + "bbox": [ + 132, + 244, + 233, + 258 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 244, + 233, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{Z} \\doteq \\{\\varnothing\\} \\cup (\\mathcal{X} \\times \\mathcal{Y})" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 262, + 233, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 262, + 233, + 274 + ], + "spans": [ + { + "bbox": [ + 132, + 262, + 233, + 274 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 262, + 233, + 274 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(s = \\emptyset, a) \\doteq \\emptyset" + }, + { + "bbox": [ + 132, + 262, + 233, + 274 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 144, + 280, + 499, + 364 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 280, + 499, + 364 + ], + "spans": [ + { + "bbox": [ + 144, + 280, + 499, + 364 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {T} (s = (\\psi = (X, \\tau , \\rho), \\mathcal {D} _ {t}, \\{\\Omega_ {x} \\}), a) \\\\ \\begin{array}{c} \\dot {=} \\left\\{ \\begin{array}{l l} \\emptyset & \\text {i f} a = \\emptyset \\\\ s ^ {\\prime} = (\\psi , \\mathcal {D} _ {t + 1} = \\mathcal {D} _ {t} \\cup \\{x _ {t + 1}, y _ {t + 1} \\}, \\{\\Omega_ {x} \\}) \\\\ \\quad \\text {s . t .} x _ {t + 1} \\sim \\{\\eta_ {x | X} \\}, y _ {t + 1} \\sim \\Omega_ {x _ {t + 1}} & \\text {i f} a = \\psi \\\\ s ^ {\\prime} = (\\psi^ {\\prime}, \\mathcal {D} _ {1} = \\{x _ {1}, y _ {1} \\}, \\{\\Omega_ {x} \\}) \\\\ \\quad \\text {s . t .} x _ {1} \\sim \\{\\eta_ {x | X ^ {\\prime}} \\}, y _ {1} \\sim \\Omega_ {x _ {1}} & \\text {i f} a = \\psi^ {\\prime} = (X ^ {\\prime}, \\tau^ {\\prime}, \\rho^ {\\prime}) \\neq \\psi , \\end{array} \\right. \\end{array} \\\\ \\end{array}", + "image_path": "5d2ca2a540c264f79a072525721002dc0d9970516ffb6d3675b7dfe4e5e4d9df.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 376, + 227, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 376, + 227, + 388 + ], + "spans": [ + { + "bbox": [ + 132, + 376, + 227, + 388 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 376, + 227, + 388 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(s' = \\varnothing) \\doteq \\varnothing" + }, + { + "bbox": [ + 132, + 376, + 227, + 388 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 190, + 394, + 453, + 408 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 394, + 453, + 408 + ], + "spans": [ + { + "bbox": [ + 190, + 394, + 453, + 408 + ], + "type": "interline_equation", + "content": "\\mathcal {O} \\left(s ^ {\\prime} = (\\psi , \\mathcal {D} _ {t + 1} = \\mathcal {D} _ {t} \\cup \\{x _ {t + 1}, y _ {t + 1} \\}, \\{\\Omega_ {x} \\})\\right) \\doteq \\left(x _ {t + 1}, y _ {t + 1}\\right),", + "image_path": "a5c9c6487c40c02c9741be0b84f5153a98619dbfcbd1ffa1ec47cb8faa9ad523.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 420, + 225, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 420, + 225, + 433 + ], + "spans": [ + { + "bbox": [ + 132, + 420, + 225, + 433 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 420, + 225, + 433 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(s' = \\emptyset) \\doteq 0" + }, + { + "bbox": [ + 132, + 420, + 225, + 433 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 159, + 439, + 484, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 439, + 484, + 453 + ], + "spans": [ + { + "bbox": [ + 159, + 439, + 484, + 453 + ], + "type": "interline_equation", + "content": "\\mathcal {R} \\left(s ^ {\\prime} = (\\psi = (X, \\tau , \\rho), \\mathcal {D} _ {t + 1}, \\{\\Omega_ {x} \\})\\right) \\doteq - C _ {\\psi} + R _ {\\psi} \\cdot \\mathbb {1} \\{t + 1 = \\tau \\} \\cdot \\rho \\left(\\mathcal {D} _ {t + 1}\\right).", + "image_path": "ef367a465062abe3eba0c0373d28070bc50462ee5993f9ee95ce07d79dda9aa0.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": "Since ongoing experiments " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": " are completely dictated by actions, and datasets " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": " collected by the ongoing experiments consist solely of observations " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "(x_t,y_t)" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": ", the only unobserved component of the states in this POMDP is the true outcome distributions " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\{\\Omega_x\\}_{x\\in \\mathcal{X}}" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": ". Hence, the optimal policy should have the form " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\pi (\\psi ,\\mathcal{D}_t,b)" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "b\\in \\Delta (\\mathfrak{O}^{\\mathcal{X}})" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": " denotes beliefs over " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\{\\Omega_x\\}" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": " that is posterior distributions over the true outcome distributions. For instance, when " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\Omega_{x} = \\mathcal{N}(\\theta_{x},1)" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": " as we have been assuming in Sections 3 and 4, posteriors over mean outcomes " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\{\\theta_x\\}_{x\\in \\mathcal{X}}" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": ", which are given by parameters " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\{\\mu_x,\\sigma_x^2\\}" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "inline_equation", + "content": "\\theta_{x}|\\bar{\\mathcal{D}}_{t}^{i}\\sim \\mathcal{N}(\\mu_{x},\\sigma_{x}^{2})" + }, + { + "bbox": [ + 104, + 464, + 504, + 543 + ], + "type": "text", + "content": ", constitute as beliefs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 547, + 506, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 547, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 547, + 506, + 592 + ], + "type": "text", + "content": "Now although an OCP can be expressed as a POMDP, doing so is not particularly helpful in finding a solution. As we have already discussed in Section 3, the standard approach to solving a POMDP would be to use dynamic programming and compute the optimal value function " + }, + { + "bbox": [ + 104, + 547, + 506, + 592 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 104, + 547, + 506, + 592 + ], + "type": "text", + "content": " and the optimal Q-function " + }, + { + "bbox": [ + 104, + 547, + 506, + 592 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 547, + 506, + 592 + ], + "type": "text", + "content": " iteratively according to Bellman optimality conditions" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 181, + 597, + 426, + 627 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 597, + 426, + 627 + ], + "spans": [ + { + "bbox": [ + 181, + 597, + 426, + 627 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (b, a) = \\mathbb {E} _ {s \\sim b, s ^ {\\prime} \\sim \\mathcal {T} (s, a), z ^ {\\prime} = \\mathcal {O} (s ^ {\\prime}), b ^ {\\prime} | \\{b, z ^ {\\prime} \\}} \\left[ \\mathcal {R} \\left(s ^ {\\prime}\\right) + V ^ {*} \\left(b ^ {\\prime}\\right) \\right] \\\\ V ^ {*} (b) = \\max _ {a \\in \\mathcal {A}} Q ^ {*} (b, a), \\\\ \\end{array}", + "image_path": "010a4a2570be057284b5e5e248f12be8db65d2adb866d104a31172cbc88e15a9.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "b'|\\{b, z'\\}" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " denotes the updated belief " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "b'" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " after having belief " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " and making a new observation " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "z'" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": ". When the state space " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " is discrete—or equivalently in our case, when the space of outcome distributions " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\Omega \\in \\mathfrak{D}" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " is discrete—" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "V^*" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "Q^*" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " happen to be convex functions, which makes it possible to perform these iterations efficiently by approximating " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "V^*" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "Q^*" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " using functions of the form " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "f(b) = \\max\\{a_i b + a_j'\\}" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " (Spaan, 2012). However, even in the simplest of cases where " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " is continuous—or equivalently, the space of outcome distributions " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\Omega \\in \\mathfrak{D}" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " is continuous, for instance when " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\Omega_x = \\mathcal{N}(\\theta_x, 1)" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "—the convexity of " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "V^*" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "Q^*" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " no longer generally holds. In fact, we show in Proposition 1 that neither " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "V^*" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " nor " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "-V^*" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " is convex with respect to beliefs " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "b \\equiv \\{t, \\mu\\}" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": " for at least one instance of the simplified OCP that we have analyzed in Section 3." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 122, + 81, + 487, + 368 + ], + "blocks": [ + { + "bbox": [ + 107, + 66, + 503, + 78 + ], + "lines": [ + { + "bbox": [ + 107, + 66, + 503, + 78 + ], + "spans": [ + { + "bbox": [ + 107, + 66, + 503, + 78 + ], + "type": "text", + "content": "Table 4: Performance comparison between Futility Stopping with RL-based algorithms and with Bayes-OCP." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 122, + 81, + 487, + 368 + ], + "lines": [ + { + "bbox": [ + 122, + 81, + 487, + 368 + ], + "spans": [ + { + "bbox": [ + 122, + 81, + 487, + 368 + ], + "type": "table", + "html": "
Algorithms:Oracle RCTFutility Stopping w/ Discretized RLFutility Stopping w/ Deep Q-learningFutility Stopping w/ Bayes-OCP
All Instances (100%)Utility260.4131.8 (4.3)78.8 (3.1)150.0 (3.5)
FWER0.0%0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.6 (0.0)0.7 (0.0)0.5 (0.0)
Success75.2%41.0% (1.0%)24.2% (0.8%)45.4% (1.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F35.654.3 (2.2)23.6 (1.8)57.6 (4.6)
Green Instances (47.3%)Utility389.6309.5 (4.1)185.0 (4.9)337.7 (5.7)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.2 (0.0)0.5 (0.0)0.1 (0.0)
Success99.0%80.9% (0.7%)47.7% (1.1%)86.0% (1.4%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F600.072.7 (7.1)11.0 (0.8)46.6 (7.6)
Amber Instances (29.4%)Utility258.6-23.9 (5.4)-16.6 (6.8)-5.3 (5.4)
FWER0.0%0.2% (0.2%)0.1% (0.1%)0.4% (0.3%)
Switches1.00.9 (0.0)0.9 (0.0)0.8 (0.0)
Success96.6%9.1% (0.8%)5.2% (1.3%)15.2% (2.0%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F600.066.1 (4.3)39.5 (4.4)78.3 (9.3)
Red Instances (23.3%)Utility0.0-33.0 (1.6)-16.5 (5.2)-35.1 (1.7)
FWER0.0%0.1% (0.2%)0.1% (0.2%)0.1% (0.2%)
Switches1.01.0 (0.0)1.0 (0.0)1.0 (0.0)
Success0.0%0.2% (0.2%)0.4% (0.4%)0.9% (0.3%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)
T-to-F0.033.7 (0.9)18.3 (4.8)38.9 (2.1)
", + "image_path": "51adf94dde740ab2026374795378f32e633ffe278c11f03a88597083afe70bbe.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 399, + 406, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 406, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 406, + 411 + ], + "type": "text", + "content": "A.1 EXPERIMENTS WITH REINFORCEMENT LEARNING BENCHMARKS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": "Having said all that, one naive way to still compute " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": " iteratively according to Bellman optimality conditions is to discretize the belief space. We call this benchmark Discretized RL and we use it to perform futility stopping—that is when " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "|\\Psi| = 1" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": ", deciding whether to stop the only viable experiment design early or not. Otherwise, the dimensionality of the belief state explodes combinatorially with respect to " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "|\\Psi|" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": ". We consider the same setting that we have considered during our experiments in Section 6 and compare the performance of Futility Stopping with Discretized RL with that of Futility Stopping with Bayes-OCP. When implementing discretized RL, instead of keeping track of the entire dataset " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": ", we only keep track of the sufficient statistic " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "\\mu_t = \\sum_{(x_{t'}, y_{t'})} y_{t'} / |\\mathcal{D}_t|" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": ", restrict the domain of " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "\\mu_t" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": " to interval " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "[-0.3, 0.3]" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": ", and discretize this interval into 100 equally-spaced bins. In addition to discretized RL, we also consider the approach proposed by Ni et al. (2022) for solving complex classes of POMDPs, which the optimal commitment problem is one of. Briefly, we employ deep Q-learning (as such, we call this benchmark Deep Q-learning) to train a neural network as an approximation of the Q-function " + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "inline_equation", + "content": "Q^{*}(b, a)" + }, + { + "bbox": [ + 104, + 427, + 506, + 594 + ], + "type": "text", + "content": " using the POMDP we formalized earlier as a simulator. As the network architecture, we consider a multi-layer perceptron with two hidden layers of size 100 and with tanh activations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": "Results are given in Table 4; futility stopping with Bayes-OCP performs better than futility stopping with discretized RL as well as futility stopping with deep Q-learning. In addition to the bad performance of discretized RL, it is also not feasible to scale it to use cases beyond futility stopping. When " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "|\\Psi| > 1" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": ", we would need to keep separate track of each " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mu_x" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": ". Moreover, we would also need to start keeping track of the scale parameters " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\{\\sigma_x\\}" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": " since it would now be possible to distribute samples among multiple atomic-populations in multiple ways by targeting different populations with different experiments (we no longer would be able to treat the target population of the only viable experiment design as the only atomic-population there is). Noting that " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\sigma_x" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": "'s already take discrete values with at least " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": "-many possible values, merely increasing the number of viable experiments " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "|\\Psi|" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": " from one to two causes the dimensionality of the belief space to jump from 100 to " + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\sim (100 \\times 600)^2 = 36 \\times 10^8" + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": ". Deep Q-learning performs even worse as it ignores all structure present in the optimal commitment problem, and instead, views the POMDP that describes it as a black-box simulator." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 94, + 506, + 315 + ], + "blocks": [ + { + "bbox": [ + 160, + 80, + 448, + 91 + ], + "lines": [ + { + "bbox": [ + 160, + 80, + 448, + 91 + ], + "spans": [ + { + "bbox": [ + 160, + 80, + 448, + 91 + ], + "type": "text", + "content": "Table 5: Performance comparison of algorithms with family-wise error control." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 105, + 94, + 506, + 315 + ], + "lines": [ + { + "bbox": [ + 105, + 94, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 94, + 506, + 315 + ], + "type": "table", + "html": "
Algorithms:Oracle RCTRCTAdaptive Enrichment w/ Bonferroni Corr.Futility Stopping w/ Bayes-OCPGreedy Bayes-OCP w/ Bonferroni Corr.Bayes-OCP w/ Bonferroni Corr.
All Instances (100%)Utility260.4-39.4 (6.7)91.4 (5.4)150.0 (3.5)23.7 (2.2)158.7 (5.2)
FWER0.0%0.3% (0.1%)0.1% (0.1%)0.1% (0.1%)0.0% (0.0%)0.1% (0.1%)
Switches0.50.0 (0.0)0.5 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success75.2%56.1% (0.7%)51.1% (0.7%)45.4% (1.3%)7.7% (0.5%)49.3% (1.5%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)606.2 (2.3)616.6 (1.7)
T-to-F35.6600.0 (0.0)543.0 (15.7)57.6 (4.6)2.3 (0.3)65.8 (7.0)
Green Instances (47.3%)Utility389.6388.7 (3.9)378.8 (3.1)337.7 (5.7)46.1 (3.3)325.8 (5.5)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)1.0 (0.0)0.2 (0.0)
Success99.0%98.9% (0.4%)96.1% (0.6%)86.0% (1.4%)13.8% (0.8%)84.7% (1.5%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)604.9 (2.0)604.0 (0.7)
T-to-F600.0600.0 (0.0)768.6 (19.0)46.6 (7.6)2.0 (0.3)66.5 (17.7)
Amber Instances (29.4%)Utility258.6-300.3 (19.8)-51.9 (15.6)-5.3 (5.4)8.3 (2.9)44.6 (4.6)
FWER0.0%0.7% (0.3%)0.3% (0.1%)0.4% (0.3%)0.0% (0.0%)0.2% (0.2%)
Switches1.00.0 (0.0)0.8 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success96.6%30.0% (2.0%)18.2% (2.2%)15.2% (2.0%)3.8% (0.9%)30.7% (1.4%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)613.4 (5.8)670.6 (8.9)
T-to-F600.0600.0 (0.0)724.6 (9.4)78.3 (9.3)2.6 (0.6)95.5 (18.4)
Red Instances (23.3%)Utility0.0-579.2 (4.1)-312.5 (2.3)-35.1 (1.7)-2.2 (0.3)-37.0 (2.4)
FWER0.0%0.2% (0.3%)0.2% (0.3%)0.1% (0.2%)0.0% (0.0%)0.1% (0.2%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%2.1% (0.4%)1.1% (0.4%)0.9% (0.3%)0.1% (0.2%)1.1% (0.7%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)649.8 (62.6)
T-to-F0.0600.0 (0.0)334.9 (3.7)38.9 (2.1)2.4 (0.5)39.8 (2.5)
", + "image_path": "7c4ba499e4778992d727b88b6a8062f297e8f53037bdf0f8d3d8ab0c2b356650.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 331, + 299, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 299, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 299, + 342 + ], + "type": "text", + "content": "B DISCUSSION ON ERROR CONTROL" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "text", + "content": "Bayes-OCP is a method for managing experiments—that is deciding what experiment to conduct and when—as opposed to a hypothesis testing strategy in and of itself. Implication of this in terms of error control is that the type 1 error of any individual experiment run by Bayes-OCP can always be controlled by choosing an appropriate experimental design, in particular, by specifying an appropriate success criterion " + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "text", + "content": ". This individual-level error control built into the design of each experiment is not compromised by Bayes-OCP; no aggregate data from multiple experiments is ever fed into the success criterion of one alone (see Section 2, experiment " + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "inline_equation", + "content": "\\psi^i" + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "text", + "content": " is successful if " + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "inline_equation", + "content": "\\rho^i(\\bar{D}_t^i) = 1" + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "text", + "content": " not if " + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "inline_equation", + "content": "\\rho^i(\\bar{\\mathcal{D}}_t^i) = 1" + }, + { + "bbox": [ + 104, + 357, + 504, + 457 + ], + "type": "text", + "content": "); and any assumptions made by Bayes-OCP regarding outcomes in Section 4, whether accurate or inaccurate, have no effect on the results produced by an external success criterion." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 463, + 506, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 463, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 463, + 506, + 617 + ], + "type": "text", + "content": "While Bayes-OCP does not compromise the individual error control of experiments, neither does it control their collective family-wise error rate (FWER)—that is the probability of at least one experiment among all that are conducted making a false discovery. Bayes-OCP views the problem of managing experiments purely as a utility maximization problem with no additional constraints. Within the scope of our discussion, the purpose of measuring FWER as a metric is to check empirically whether the individual error rates are inflated or not (note that FWER is a stricter notion of error than individual error rate). In practice, depending on how closely related the managed experiments are, controlling FWER might not necessarily be a concern. Let us highlight this: Any algorithm that manages experiments for long enough is bound to make at least one false discovery. Each year more than a thousand clinical trials are launched (that eventually post results) and more than half of these trials succeed (Takebe et al., 2018; Cli). If the type 1 error rate of all these trials were " + }, + { + "bbox": [ + 104, + 463, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\% 5" + }, + { + "bbox": [ + 104, + 463, + 506, + 617 + ], + "type": "text", + "content": ", we would expect at least 25 false discoveries in a year, which is more than one hence it would have put FWER of all real-world trials at almost " + }, + { + "bbox": [ + 104, + 463, + 506, + 617 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 463, + 506, + 617 + ], + "type": "text", + "content": " when measured in a year-by-year basis. Of course, this is not problematic since not all clinical trials are related to each other closely enough to be considered a family." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 633, + 354, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 633, + 354, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 354, + 643 + ], + "type": "text", + "content": "B.1 EXPERIMENTS WITH FAMILY-WISE ERROR CONTROL" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": "When controlling FWER is of concern, Bayes-OCP can easily be adapted to satisfy this additional constraint by first limiting the number of total experiments that can be conducted—that is putting an upper bound on " + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": " and then using well-established methods for family-wise error control such as Bonferroni correction or alpha spending functions (Demets and Lan, 1994) to adjust the success criteria of the viable experiments in " + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": ". We run additional experiments to evaluate the performance of Bayes-OCP with Bonferroni correction. We consider the same setting that we have considered during our experiments in Section 6 except for one difference: We limit the number of experiments" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 94, + 506, + 350 + ], + "blocks": [ + { + "bbox": [ + 129, + 80, + 481, + 91 + ], + "lines": [ + { + "bbox": [ + 129, + 80, + 481, + 91 + ], + "spans": [ + { + "bbox": [ + 129, + 80, + 481, + 91 + ], + "type": "text", + "content": "Table 6: Performance comparison when the ground-truth outcome distributions are not Gaussian." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 105, + 94, + 506, + 350 + ], + "lines": [ + { + "bbox": [ + 105, + 94, + 506, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 94, + 506, + 350 + ], + "type": "table", + "html": "
Algorithms:Oracle RCTRCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
All Instances (100%)Utility266.5-38.4 (14.7)110.0 (9.5)150.8 (8.5)46.3 (3.8)178.2 (7.3)
FWER0.0%0.1% (0.1%)0.0% (0.1%)0.0% (0.1%)0.0% (0.0%)0.0% (0.1%)
Switches0.50.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success76.6%56.2% (1.5%)53.6% (1.5%)46.5% (1.8%)14.7% (1.1%)54.9% (1.7%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)607.3 (1.0)617.2 (1.8)
T-to-F32.5600.0 (0.0)563.5 (9.9)65.8 (3.8)4.3 (0.5)81.9 (7.1)
Green Instances (48.0%)Utility391.3388.0 (4.1)383.7 (3.1)343.3 (4.4)89.4 (6.7)348.7 (3.5)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.00.0 (0.0)0.0 (0.0)0.1 (0.0)0.9 (0.0)0.1 (0.0)
Success99.1%98.8% (0.4%)97.3% (0.4%)87.6% (0.7%)26.6% (2.0%)89.3% (0.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)605.8 (1.2)602.2 (1.3)
T-to-F600.0600.0 (0.0)710.1 (33.9)54.8 (10.0)4.2 (1.5)77.2 (10.6)
Amber Instances (29.9%)Utility263.5-316.2 (18.2)-13.1 (3.6)-18.7 (8.8)14.1 (2.4)67.6 (6.4)
FWER0.0%0.2% (0.3%)0.1% (0.3%)0.1% (0.3%)0.0% (0.0%)0.1% (0.3%)
Switches1.00.0 (0.0)0.7 (0.0)0.8 (0.0)1.1 (0.0)0.9 (0.0)
Success97.2%28.4% (1.8%)22.6% (1.1%)14.7% (1.5%)6.3% (0.8%)39.3% (2.3%)
T-to-S600.0600.0 (0.0)600.0 (0.0)600.0 (0.0)617.5 (6.1)670.6 (6.1)
T-to-F600.0600.0 (0.0)765.5 (11.4)91.0 (6.0)4.3 (1.3)126.0 (16.6)
Red Instances (22.1%)Utility0.0-588.3 (4.6)-316.6 (12.2)-37.0 (4.5)-3.8 (1.1)-41.7 (3.0)
FWER0.0%0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches1.00.0 (0.0)1.0 (0.0)1.0 (0.0)1.1 (0.0)1.0 (0.0)
Success0.0%1.2% (0.5%)1.0% (0.5%)0.5% (0.2%)0.2% (0.2%)1.6% (0.4%)
T-to-S-600.0 (0.0)600.0 (0.0)600.0 (0.0)600.0 (0.0)654.7 (21.8)
T-to-F0.0600.0 (0.0)341.9 (5.1)39.4 (4.2)4.2 (1.0)46.4 (3.9)
", + "image_path": "920d7c76807b5cdca570c9bc50b51938c411071356ef48c68c84e373d3ee226f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "type": "text", + "content": "that can be conducted by each algorithm as at most two, and we specify " + }, + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "type": "inline_equation", + "content": "\\alpha = F^{-1}(0.975)" + }, + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "type": "text", + "content": " for algorithms that can potentially run more than one experiment—namely, adaptive enrichment and (Greedy) Bayes-OCP—while we still specify " + }, + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "type": "inline_equation", + "content": "\\alpha = F^{-1}(0.95)" + }, + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "type": "text", + "content": " for algorithms that always run exactly one experiment—namely, RCT and futility stopping. These specifications ensure that FWER of all algorithms are bounded by " + }, + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 364, + 506, + 431 + ], + "type": "text", + "content": ". Results are given in Table 5; Bayes-OCP still performs the best when explicit control of FWER is required." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 444, + 443, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 444, + 443, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 444, + 443, + 455 + ], + "type": "text", + "content": "C EXPERIMENTS WITH MISSPECIFIED OUTCOME DISTRIBUTIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": "We consider the same setting that we have considered during our experiments in Section 6. Except now, the ground-truth outcome distributions are such that, when " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "y \\sim \\Omega_x" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "y = 1" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": " with probability " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "(\\theta_x + 1) / 2" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "y = -1" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": " otherwise. In order to ensure that " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\theta_x \\in [-1,1]" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": ", we also sample ground-truth mean outcomes so that " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\theta_x = 2p - 1" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": " is distributed according to Beta distribution with " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\alpha = 979 / 200" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\beta = 801 / 200" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": " (note that the mean and variance of " + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\theta_x" + }, + { + "bbox": [ + 104, + 464, + 506, + 576 + ], + "type": "text", + "content": " remains the same as in our original experiments). Despite the fact that outcomes are now distributed in a non-Gaussian way, we leave the implementation of Bayes-OCP unchanged, which still assumes that outcomes distributions are Gaussian. So, there is now a mismatch between the structure of outcome distributions specified as part of Bayes-OCP and the ground-truth outcome distributions. Results are given in Table 6; Bayes-OCP still does not inflate FWER despite the misspecified outcome distributions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 586, + 386, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 386, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 386, + 597 + ], + "type": "text", + "content": "D EXPERIMENTS WITH MORE ATOMIC-POPULATIONS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "text", + "content": "We repeat our main experiments with more than two atomic-populations, specifically we set " + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "inline_equation", + "content": "|\\mathcal{X}| = 10" + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "text", + "content": ". As before, all atomic-populations have equal propensities such that " + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "inline_equation", + "content": "\\eta_x = 1/10, \\forall x \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "text", + "content": ", and the meta-experimenter has the same positively-biased prior for the mean outcome associated with each atomic population: " + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "inline_equation", + "content": "\\theta_x \\sim \\mathcal{N}(0.1, 0.1)" + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "inline_equation", + "content": "\\forall x \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 603, + 506, + 682 + ], + "type": "text", + "content": ". We randomly generated 100 environment (repeated five times to obtain error bars), and the results are given in Table 7. We observe that Bayes-OCP still performs the best. These results confirm that a greedy approximation is suitable in identifying candidate experiments when the number of atomic-populations is large." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 691, + 249, + 703 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 691, + 249, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 691, + 249, + 703 + ], + "type": "text", + "content": "E SENSITIVITY ANALYSIS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": "Bayes-OCP has one hyper-parameter: " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": ", which controls how optimistic the switching rule given in line 14 of Algorithm 1 is, from " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\beta = 1/2" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": " meaning decisions are made greedily to " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\beta = 1" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": " meaning" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 94, + 504, + 184 + ], + "blocks": [ + { + "bbox": [ + 159, + 80, + 451, + 91 + ], + "lines": [ + { + "bbox": [ + 159, + 80, + 451, + 91 + ], + "spans": [ + { + "bbox": [ + 159, + 80, + 451, + 91 + ], + "type": "text", + "content": "Table 7: Performance comparison when the number of atomic-populations is 10." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 94, + 504, + 184 + ], + "lines": [ + { + "bbox": [ + 106, + 94, + 504, + 184 + ], + "spans": [ + { + "bbox": [ + 106, + 94, + 504, + 184 + ], + "type": "table", + "html": "
Algorithms:RCTAdaptive EnrichmentFutility Stopping w/ Bayes-OCPGreedy Bayes-OCPBayes-OCP
All Instances (100%)Utility8.0 (39.2)143.9 (31.1)141.0 (27.5)40.3 (5.9)172.4 (23.8)
FWER0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)0.0% (0.0%)
Switches0.0 (0.0)0.4 (0.0)0.5 (0.0)1.0 (0.0)0.6 (0.0)
Success60.8% (3.9%)71.0% (3.6%)51.2% (4.7%)15.6% (2.6%)63.2% (4.2%)
T-to-S600.0 (0.0)678.9 (8.3)600.0 (0.0)648.5 (13.2)647.5 (4.4)
T-to-F600.0 (0.0)672.7 (58.8)130.4 (12.5)9.3 (5.0)200.5 (72.2)
", + "image_path": "291c773a9e31f3c90ad9a5c1e98dbe7735c9c933a1ee01bc4307661efa168445.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 174, + 204, + 434, + 338 + ], + "blocks": [ + { + "bbox": [ + 174, + 204, + 434, + 338 + ], + "lines": [ + { + "bbox": [ + 174, + 204, + 434, + 338 + ], + "spans": [ + { + "bbox": [ + 174, + 204, + 434, + 338 + ], + "type": "image", + "image_path": "5cac31b589d8fd3ee95c3eda7fe1f144de0ed46052f72b20e7d32868261b8af8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 155, + 342, + 454, + 354 + ], + "lines": [ + { + "bbox": [ + 155, + 342, + 454, + 354 + ], + "spans": [ + { + "bbox": [ + 155, + 342, + 454, + 354 + ], + "type": "text", + "content": "Figure 5: Utility achieved by Bayes-OCP for various values of hyper-parameter " + }, + { + "bbox": [ + 155, + 342, + 454, + 354 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 155, + 342, + 454, + 354 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "text", + "content": "decisions are so extremely optimistic that the original experiment will never be abandoned (as there will always be a chance that it succeeds). As with all online algorithms, tuning " + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "text", + "content": " is challenging since no a priori data would be available to perform cross validation. However, a nice feature of Bayes-OCP is that " + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "text", + "content": " is rather interpretable, it is the evidence required against the ongoing experiment: An alternative experiment is preferred over the ongoing experiment only if it is believed to be the better experiment with at least " + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "text", + "content": "-confidence. We evaluate the sensitivity of Bayes-OCP's performance to hyper-parameter " + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 377, + 504, + 466 + ], + "type": "text", + "content": " in Figure 5; Bayes-OCP performs better than an RCT for all configurations and better than adaptive enrichment for most configurations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 484, + 204, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 204, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 204, + 496 + ], + "type": "text", + "content": "F FUTURE WORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "type": "text", + "content": "Extending the scope of Bayes-OCP One limitation of Bayes-OCP is that it only adapts the target population " + }, + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "type": "inline_equation", + "content": "X\\subseteq \\mathcal{X}" + }, + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "type": "text", + "content": " of experiments but not the sample horizon " + }, + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "type": "text", + "content": " or the success criterion " + }, + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 511, + 504, + 578 + ], + "type": "text", + "content": ". We have chosen to focus on the selection of a target population since we believe the target population of an experiment to be the most critical design dimension to adjust adaptively. As we have already highlighted in our introduction, experiments with inflexible target populations can be problematic when responses to the treatment of interest are highly heterogeneous." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": "That being said, the high-level strategy of our proposed algorithm should still be applicable to adapting design dimensions other than the target population, namely " + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": ". At a high level, Bayes-OCP first identifies a candidate experiment and then compares the identified experiment to the ongoing experiment in a n optimistic manner. Regardless of the given set of viable experiment design " + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": ", one could still follow the same strategy; the only complication would be to adapt how candidate experiments are identified depending on what design dimension varies across experiment designs in " + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": "For instance, when experiment designs varied in terms of " + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": ", a combinatorial search was required to identify good candidate experiments, for which we proposed a greedy strategy. When experiment designs vary in terms of " + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": ", a simple search over all possible " + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": " would suffice for identifying candidate experiment. The case where experiment designs vary in terms of " + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": " is more complex; optimal " + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": " for an experiment would be dependent on unknown effects " + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\theta_{x}" + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": "; selecting a good candidate experiment would involve estimating the optimal " + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": " given posteriors over " + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\theta_{x}" + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": ". This would be an interesting problem to explore as a future research direction." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "Performance guarantees While our theoretical results motivate the general use of an optimistic decision rule, they do not provide any guarantees about the performance of the specific rule we propose as part of Bayes-OCP. Another future research direction would be to prove an upper bound on the sub-optimality gap of Bayes-OCP." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 142, + 340, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 340, + 155 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 340, + 155 + ], + "type": "text", + "content": "G FURTHER DISCUSSION ON MAIN RESULTS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 165, + 506, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 506, + 319 + ], + "type": "text", + "content": "Table 3 report six metrics: Utility, FWER, Switches, Success, T-to-S, and T-to-F. We have already discussed the implications of Utility and FWER in Section 6. Here, we highlight other interesting phenomena regarding the remaining metrics. First, we see that Greedy Bayes-OCP switches experiments much more frequently compared with Bayes-OCP. This is because Greedy Bayes-OCP requires less evidence against the ongoing experiment when comparing it against an alternative experiment, whereas, Bayes-OCP favors the ongoing experiments more. Second, we see that a higher success probability does not necessarily also imply a higher utility. For instance, compare RCT with futility stopping, futility stopping is able to achieve higher utility than RCT by terminating risky experiments early and saving costs. However, this of course also means that futility stopping sees fewer experiments to completion hence leads to a lower success probability. Finally, we see that succeeding or failing early does not necessarily imply a higher utility either. Our best algorithm Bayes-OCP succeeds the latest on average as well as fails the latest compared with other benchmarks favoring red instances. This highlights the importance being conservative when making decisions, being optimistic, and favoring the status quo more than a potential adaptation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 335, + 346, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 346, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 346, + 346 + ], + "type": "text", + "content": "H FURTHER DISCUSSION ON RELATED WORK" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "text", + "content": "Multi-armed bandits The optimal commitment problem is similar to a multi-armed bandit (MAB) problem (Auer et al., 2002; Bubeck et al., 2012) in some aspects: Like arms in a MAB problem, each experiment design " + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "text", + "content": " has a random utility given by " + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "inline_equation", + "content": "R_{\\psi} \\cdot \\rho(\\mathcal{D}_{\\tau}) - \\tau C_{\\psi}" + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\tau}" + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "text", + "content": " is the source of randomness, and the distribution of this utility is unknown. Also similar to a MAB problem, the overall goal is to sequentially select experiment designs (cf. arms) that yield the maximum cumulative utility. The main difference between the two problems is that, in a MAB problem, selecting an arm immediately reveals a sample from its random utility, while in optimal commitment, running an experiment " + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "text", + "content": " just for one time step only incurs a cost of " + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "inline_equation", + "content": "C_{\\psi}" + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "text", + "content": "; observing a full sample of its random utility requires the experiment to be run until its completion for " + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 357, + 506, + 468 + ], + "type": "text", + "content": " consecutive time steps, without selecting any other experiment design in the meantime." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 473, + 506, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 473, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 473, + 506, + 573 + ], + "type": "text", + "content": "One can naively apply a MAB algorithm by viewing each viable experiment design as a unique arm, and by running experiments/arms selected by the algorithm until their completion to observe full samples from their unknown utility distributions. However, this obviously side steps the main question we want to answer in optimal commitment: When can we abandon a commitment—in this case, the decision to run an experiment/arm selection until its completion—before fully observing its outcome? Looking at optimal commitment from a MAB perspective reveals that there are two explore-exploit dilemmas present in optimal commitment: One is with respect to which experiment to select next, and the other is with respect to when to preemptively stop the current experiment (i.e. breaking a commitment). MAB algorithms address the former dilemma but not the latter." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "content": "Task replication in parallel computing There is work (Ghare and Leutenegger, 2005; Wand et al., 2014; Wang et al., 2019) that focuses on the problem of when to kill existing tasks and relaunch them in parallel computing, which is related to optimal stopping/switching. However there, the focus is on reasoning about when a stochastic event (i.e. successful completion of a computational task) will occur without any extra information other than the fact that the event of interest has not occurred yet. In contrast, in our setting, the decision-maker needs to process a streaming set of samples to reason about the random outcome of an event that is scheduled to happen at a deterministic time point (here, the event is an experiment reaching its conclusion). This means that our problem has a completely different information structure when compared with the problem of task replication. More formally, we observe samples " + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "content": " that are informative of whether " + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "inline_equation", + "content": "\\rho(\\mathcal{D}_{\\tau}) = 1" + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "content": " is a fixed variable. In contrast, the problem of task replication would correspond to the setting where " + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "content": " is a random variable with a known distribution and " + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "inline_equation", + "content": "\\rho = 1" + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "content": " always holds (hence no need to observe any samples " + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "content": "). Among optimal stopping/switching problems, the structure of our problem is more closely related to sequential hypothesis testing, which we have already covered in Section 5." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 258, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 258, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 258, + 94 + ], + "type": "text", + "content": "I PROOFS OF PROPOSITIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 242, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 242, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 242, + 118 + ], + "type": "text", + "content": "I.1 PROOF OF PROPOSITION 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "type": "text", + "content": "We start by relating the optimal value function " + }, + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "type": "text", + "content": " to the optimal Q-function " + }, + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "type": "text", + "content": ". Letting " + }, + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "type": "inline_equation", + "content": "T_{t}^{*} = T_{t}^{\\pi^{*}}" + }, + { + "bbox": [ + 104, + 126, + 506, + 140 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 145, + 504, + 271 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 145, + 504, + 271 + ], + "spans": [ + { + "bbox": [ + 112, + 145, + 504, + 271 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} V ^ {*} (t, \\mu) \\\\ = \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t) | \\mu_ {t} = \\mu ] \\\\ = \\mathbb {E} \\left[ \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\varnothing \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t\\right)\\right) \\right. \\\\ + \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\Psi_ {0} \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t} ^ {*}, \\tau \\right\\} - t)\\right) | \\mu_ {t} = \\mu \\right] \\\\ = \\mathbb {E} [ \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\varnothing \\right\\} \\cdot 0 \\\\ + \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu_ {t}) = \\Psi_ {0} \\right\\} \\left(R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t)\\right) | \\mu_ {t} = \\mu \\right] (11) \\\\ = \\mathbb {1} \\left\\{\\pi^ {*} (t, \\mu) = \\Psi_ {0} \\right\\} \\cdot \\mathbb {E} [ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t\\right) | \\mu_ {t} = \\mu ] (12) \\\\ = \\mathbb {1} \\left\\{Q ^ {*} (t, \\mu) > 0 \\right\\} \\cdot Q ^ {*} (t, \\mu) (13) \\\\ = \\max \\left\\{0, Q ^ {*} (t, \\mu) \\right\\}, (14) \\\\ \\end{array}", + "image_path": "4ac43084945a1855d39609ac3f177e3e650ecead98f86eebec48e6813af1aa3f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "text", + "content": "where (11) holds since " + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\pi^{*}(t,\\mu_{t}) = \\varnothing \\Rightarrow T_{t}^{*} = t" + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\pi^{*}(t,\\mu_{t}) = \\Psi_{0} \\Rightarrow T_{t}^{*} \\geq t + 1 \\Rightarrow T_{t}^{*} = \\min \\{t^{\\prime} \\geq t : \\pi^{*}(t^{\\prime},\\mu_{t^{\\prime}}) = \\emptyset\\} = \\min \\{t^{\\prime} \\geq t + 1 : \\pi^{*}(t^{\\prime},\\mu_{t^{\\prime}}) = \\emptyset\\} = T_{t + 1}^{*}" + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "text", + "content": ", (12) holds since " + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\mu_{\\tau} \\perp \\mathbb{1}\\{\\pi^{*}(t,\\mu_{t}) = \\Psi_{0}\\}" + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "inline_equation", + "content": "T_{t + 1}^{*} \\perp \\mathbb{1}\\{\\pi^{*}(t,\\mu_{t}) = \\Psi_{0}\\}" + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "text", + "content": " when conditioned on " + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\mu_{t} = \\mu" + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "text", + "content": ", and (13) holds since " + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\pi^{*}(t,\\mu) = \\Psi_{0} \\iff Q^{*}(t,\\mu) > 0" + }, + { + "bbox": [ + 104, + 277, + 506, + 344 + ], + "type": "text", + "content": ". Intuitively, the maximum possible value at a given time is achieved either by stopping immediately or by conducting the experiment for at least one more time step and then following the optimal policy thereafter." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 349, + 195, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 349, + 195, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 349, + 195, + 360 + ], + "type": "text", + "content": "Next, we observe that" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 108, + 367, + 504, + 565 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 367, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 108, + 367, + 504, + 565 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {P} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\mid \\mu_ {t} = \\mu \\right\\} = \\int \\mathbb {P} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\mid \\theta , \\mu_ {t} = \\mu \\right\\} \\mathrm {d} \\mathbb {P} \\left\\{\\theta \\mid \\mu_ {t} = \\mu \\right\\} \\\\ = \\int F \\left(\\mu^ {\\prime} - \\frac {\\theta + t \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (\\theta - \\mu ; ^ {1} / t) d \\theta \\\\ = \\iint \\mathbb {1} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(\\mu_ {t + 1} - \\frac {\\theta + t \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (\\theta - \\mu ; ^ {1} / t) d \\mu_ {t + 1} d \\theta \\\\ = \\iint \\mathbb {1} \\left\\{\\mu_ {t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(\\mu_ {t + 1} - \\frac {y + (t + 1) \\mu}{t + 1}; \\frac {1}{(t + 1) ^ {2}}\\right) f (y; ^ {1} / t) d \\mu_ {t + 1} d y \\\\ = \\iint \\mathbb {1} \\left\\{x + \\frac {y + (t + 1) \\mu}{t + 1} \\leq \\mu^ {\\prime} \\right\\} f \\left(x; ^ {1} / (t + 1) ^ {2}\\right) f \\left(y; ^ {1} / t\\right) d x d y \\\\ = \\mathbb{P}_{\\substack{X\\sim \\mathcal{N}(0,1 / (t + 1)^{2})\\\\ Y\\sim \\mathcal{N}(0,1 / t)}}\\Bigg\\{X + \\frac{Y}{t + 1}\\leq \\mu^{\\prime} - \\mu \\Bigg\\} \\\\ = \\mathbb {P} _ {X + Y / (t + 1) \\sim \\mathcal {N} (0, 1 / t - 1 / t + 1)} \\left\\{X + \\frac {Y}{t + 1} \\leq \\mu^ {\\prime} - \\mu \\right\\} \\\\ = F \\left(\\mu^ {\\prime} - \\mu ; ^ {1} / t - ^ {1} / t + 1\\right), \\tag {15} \\\\ \\end{array}", + "image_path": "1c339d48a183e5f75318c05f52ca75b64cb4fe14365b4264fe508bdb1f58bad1.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "inline_equation", + "content": "f(x; \\sigma^2) = (1 / \\sqrt{2\\pi\\sigma^2})e^{-(1/2)x^2 / \\sigma^2}" + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "inline_equation", + "content": "F(x; \\sigma^2) = (1 / \\sqrt{2\\pi\\sigma^2})\\int_{-\\infty}^{x}e^{-(1/2)x'^2 / \\sigma^2}dx'" + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "text", + "content": " are the p.d.f. and the c.d.f. of the Gaussian distribution with mean zero and variance " + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "inline_equation", + "content": "\\sigma^2" + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "text", + "content": " respectively. Hence " + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "inline_equation", + "content": "\\mathrm{d}\\mathbb{P}\\{\\mu_{t+1} = \\mu'| \\mu_t = \\mu\\} = f(\\mu' - \\mu; 1/t - 1/t+1)d\\mu'" + }, + { + "bbox": [ + 104, + 571, + 506, + 611 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "type": "text", + "content": "Then, using the relationship between " + }, + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "type": "inline_equation", + "content": "V^{*}" + }, + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "type": "inline_equation", + "content": "Q^{*}" + }, + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "type": "text", + "content": " and the observation regarding " + }, + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "type": "inline_equation", + "content": "\\mathbb{P}\\{\\mu_{t + 1} \\leq \\mu' | \\mu_t = \\mu\\}" + }, + { + "bbox": [ + 104, + 615, + 506, + 638 + ], + "type": "text", + "content": ", we drive the following Bellman optimality condition:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 644, + 494, + 733 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 644, + 494, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 644, + 494, + 733 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t + 1} ^ {*} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t + 1} ^ {*}, \\tau \\} - t) | \\mu_ {t} = \\mu ] \\\\ = - C + \\mathbb {E} \\left[ R \\cdot \\mathbb {1} \\left\\{T _ {t + 1} ^ {*} > \\tau \\right\\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot \\left(\\min \\left\\{T _ {t + 1} ^ {*}, \\tau \\right\\} - t - 1\\right) \\mid \\mu_ {t} = \\mu \\right] \\\\ = - C + \\int \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t + 1} ^ {*} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t + 1} ^ {*}, \\tau \\} - t - 1) | \\mu_ {t + 1} = \\mu^ {\\prime} ] \\\\ \\times \\mathrm {d} \\mathbb {P} \\left(\\mu_ {t + 1} = \\mu^ {\\prime} \\mid \\mu_ {t} = \\mu\\right) \\\\ = - C + \\int V ^ {*} (t + 1, \\mu^ {\\prime}) \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {*} (t + 1, \\mu^ {\\prime}) f (\\mu^ {\\prime} - \\mu ; 1 / t - 1 / t + 1) d \\mu^ {\\prime} \\\\ \\end{array}", + "image_path": "6ff16c2df85e62dffb339c305a0756f134da4b3ebbd4cc85b65e3be231653348.jpg" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 150, + 81, + 504, + 95 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 81, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 150, + 81, + 504, + 95 + ], + "type": "interline_equation", + "content": "= - C + \\int V ^ {*} (t + 1, \\mu + z) f \\left(z; ^ {1} / t - ^ {1} / t + 1\\right) d z \\tag {16}", + "image_path": "8d54ee68ca815589b656116a618be45b47c6902e3ae134f5e8ddde81cc07f7c3.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 152, + 97, + 504, + 111 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 97, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 152, + 97, + 504, + 111 + ], + "type": "interline_equation", + "content": "= - C + \\int \\max \\left\\{0, Q ^ {*} (t + 1, \\mu + z) \\right\\} f \\left(z; \\frac {1}{t} - \\frac {1}{t + 1}\\right) d z. \\tag {17}", + "image_path": "9ee45e24c6526418bdb13cc288df34a5f407d78dc0172ea173557b0a9defef64.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "spans": [ + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "text", + "content": "For the problem setting where " + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "inline_equation", + "content": "C = 1" + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "inline_equation", + "content": "R = 2" + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "inline_equation", + "content": "\\tau = 2" + }, + { + "bbox": [ + 104, + 122, + 402, + 134 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 190, + 140, + 418, + 242 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 140, + 418, + 242 + ], + "spans": [ + { + "bbox": [ + 190, + 140, + 418, + 242 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} V ^ {*} (1, \\mu) = \\max \\{0, - 1 + \\int V ^ {*} (2, \\mu + z) f (z; 1 / 2) d z \\} \\\\ = \\max \\{0, - 1 + 2 \\int \\mathbb {I} \\{\\mu + z > 0 \\} f (z; 1 / 2) d z \\} \\\\ = \\max \\left\\{0, - 1 + 2 \\int_ {- \\mu} ^ {\\infty} f (z; 1 / 2) d z \\right\\} \\\\ = \\max \\left\\{0, - 1 + 2 F (\\mu ; ^ {1} / _ {2}) \\right\\} \\\\ = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} \\mu < 0 \\\\ - 1 + 2 F (\\mu ; 1 / 2) & \\text {i f} \\mu \\geq 0 . \\end{array} \\right. \\\\ \\end{array}", + "image_path": "108be793d63e7517b3a4f2a1c5df40e778cffc7884ceeaeb7916ad76ad6a3b1f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 247, + 198, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 198, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 198, + 259 + ], + "type": "text", + "content": "Notice that, for " + }, + { + "bbox": [ + 105, + 247, + 198, + 259 + ], + "type": "inline_equation", + "content": "\\mu > 0" + }, + { + "bbox": [ + 105, + 247, + 198, + 259 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 219, + 265, + 389, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 265, + 389, + 335 + ], + "spans": [ + { + "bbox": [ + 219, + 265, + 389, + 335 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {d ^ {2}}{d \\mu^ {2}} V ^ {*} (1, \\mu) = \\frac {d ^ {2}}{d \\mu^ {2}} \\Big (- 1 + 2 F (\\mu ; 1 / 2) \\Big) \\\\ = \\frac {d}{d \\mu} \\left(2 f (\\mu ; ^ {1} / 2)\\right) \\\\ = - (4 / \\pi) \\mu e ^ {- \\mu^ {2}} < 0 \\\\ \\end{array}", + "image_path": "2b1c145605d90e9e830266ff1d9fb42c3804bbc84315e38b1506eaff55e520d3.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": "hence " + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "inline_equation", + "content": "V^{*}(1,\\mu)" + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": " is concave at least on interval " + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "inline_equation", + "content": "\\mu \\in (0,\\infty)" + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": " and is not a convex function. Moreover, " + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "inline_equation", + "content": "-V^{*}(1,\\mu)" + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": " cannot be a convex function—or equivalently " + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "inline_equation", + "content": "V^{*}(1,\\mu)" + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": " cannot be a purely concave function—either: For an arbitrary " + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "inline_equation", + "content": "\\mu \\in (0,\\infty), V^{*}(1,\\mu) > 0" + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "inline_equation", + "content": "V^{*}(1,-\\mu) = 0" + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": " hence " + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "inline_equation", + "content": "(1/2)V^{*}(1,\\mu) + (1/2)V^{*}(1,-\\mu) > 0" + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": " but " + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "inline_equation", + "content": "V^{*}(1,(1/2)\\mu + (1/2)(-\\mu)) = V^{*}(1,0) = 0" + }, + { + "bbox": [ + 104, + 342, + 506, + 388 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 400, + 242, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 400, + 242, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 242, + 411 + ], + "type": "text", + "content": "I.2 PROOF OF PROPOSITION 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 421, + 293, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 421, + 293, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 421, + 293, + 433 + ], + "type": "text", + "content": "We will prove the proposition by showing that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 437, + 448, + 472 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 126, + 437, + 448, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 437, + 448, + 450 + ], + "spans": [ + { + "bbox": [ + 126, + 437, + 448, + 450 + ], + "type": "text", + "content": "(i) " + }, + { + "bbox": [ + 126, + 437, + 448, + 450 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu)" + }, + { + "bbox": [ + 126, + 437, + 448, + 450 + ], + "type": "text", + "content": " is non-decreasing in " + }, + { + "bbox": [ + 126, + 437, + 448, + 450 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 126, + 437, + 448, + 450 + ], + "type": "text", + "content": " —that is " + }, + { + "bbox": [ + 126, + 437, + 448, + 450 + ], + "type": "inline_equation", + "content": "\\mu < \\mu^{\\prime}\\Rightarrow Q^{*}(t,\\mu)\\leq Q^{*}(t,\\mu^{\\prime})" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 125, + 449, + 329, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 449, + 329, + 460 + ], + "spans": [ + { + "bbox": [ + 125, + 449, + 329, + 460 + ], + "type": "text", + "content": "(ii) " + }, + { + "bbox": [ + 125, + 449, + 329, + 460 + ], + "type": "inline_equation", + "content": "\\lim_{t\\to \\infty}Q^{*}(t,\\mu) = -(\\tau -t)C + R > 0" + }, + { + "bbox": [ + 125, + 449, + 329, + 460 + ], + "type": "text", + "content": " , and" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 123, + 460, + 267, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 460, + 267, + 472 + ], + "spans": [ + { + "bbox": [ + 123, + 460, + 267, + 472 + ], + "type": "text", + "content": "(iii) " + }, + { + "bbox": [ + 123, + 460, + 267, + 472 + ], + "type": "inline_equation", + "content": "\\lim_{t\\to -\\infty}Q^{*}(t,\\mu) = -C < 0" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": "for all " + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": " via mathematical induction. Notice that these three facts—together with the fact that " + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "inline_equation", + "content": "Q^{*}(t, \\mu)" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": " is a continuous function in " + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": "—would imply the existence of a unique " + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "inline_equation", + "content": "\\mu_{t}^{*}" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "inline_equation", + "content": "Q^{*}(t, \\mu_{t}^{*}) = 0" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "inline_equation", + "content": "Q^{*}(t, \\mu) > 0 \\iff \\mu > \\mu_{t}^{*}" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "inline_equation", + "content": "Q^{*}(t, \\mu) \\leq 0 \\iff \\mu \\leq \\mu_{t}^{*}" + }, + { + "bbox": [ + 104, + 476, + 506, + 521 + ], + "type": "text", + "content": ", which in turn would imply that" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 201, + 528, + 407, + 555 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 528, + 407, + 555 + ], + "spans": [ + { + "bbox": [ + 201, + 528, + 407, + 555 + ], + "type": "interline_equation", + "content": "\\pi^ {*} (t, \\mu) = \\left\\{ \\begin{array}{l l} \\Psi_ {0} & \\text {i f} \\mu > \\mu_ {t} ^ {*} \\iff Q ^ {*} (t, \\mu) > 0 \\\\ \\varnothing & \\text {i f} \\mu \\leq \\mu_ {t} ^ {*} \\iff Q ^ {*} (t, \\mu) \\leq 0 , \\end{array} \\right.", + "image_path": "a6dfefeab3bcc9ce916e9a0b8860903ee0f71dcc0bf8bd114400ece7cda61b13.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 561, + 464, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 464, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 464, + 573 + ], + "type": "text", + "content": "meaning the optimal policy " + }, + { + "bbox": [ + 104, + 561, + 464, + 573 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 561, + 464, + 573 + ], + "type": "text", + "content": " is indeed of \"thresholding -type\" as the proposition states." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 578, + 335, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 335, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 335, + 589 + ], + "type": "text", + "content": "First, we observe the following base cases for " + }, + { + "bbox": [ + 104, + 578, + 335, + 589 + ], + "type": "inline_equation", + "content": "t = \\tau - 1" + }, + { + "bbox": [ + 104, + 578, + 335, + 589 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 126, + 594, + 352, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 594, + 352, + 607 + ], + "spans": [ + { + "bbox": [ + 126, + 594, + 352, + 607 + ], + "type": "text", + "content": "(i) " + }, + { + "bbox": [ + 126, + 594, + 352, + 607 + ], + "type": "inline_equation", + "content": "Q^{*}(\\tau -1,\\mu)" + }, + { + "bbox": [ + 126, + 594, + 352, + 607 + ], + "type": "text", + "content": " is non-decreasing in " + }, + { + "bbox": [ + 126, + 594, + 352, + 607 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 126, + 594, + 352, + 607 + ], + "type": "text", + "content": " . When " + }, + { + "bbox": [ + 126, + 594, + 352, + 607 + ], + "type": "inline_equation", + "content": "\\mu < \\mu^{\\prime}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 183, + 612, + 504, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 612, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 183, + 612, + 504, + 673 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (\\tau - 1, \\mu) = - C + \\int V ^ {*} (\\tau , \\mu + z) f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z (18) \\\\ = - C + R \\int \\mathbb {1} \\left\\{\\mu + z > \\alpha / \\sqrt {\\tau} \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ \\leq - C + R \\int \\mathbb {1} \\left\\{\\mu^ {\\prime} + z > \\alpha / \\sqrt {\\tau} \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z (19) \\\\ = Q ^ {*} (\\tau - 1, \\mu^ {\\prime}), \\\\ \\end{array}", + "image_path": "3ca5982855b38c8bd2adb0c7f8af26f5f3479cea42df39b3bd78b4ff3bd0f0d1.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 140, + 679, + 506, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 679, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 140, + 679, + 506, + 693 + ], + "type": "text", + "content": "where (18) is due to (16), and (19) holds since " + }, + { + "bbox": [ + 140, + 679, + 506, + 693 + ], + "type": "inline_equation", + "content": "\\mu + z > \\alpha / \\sqrt{\\tau} \\Rightarrow \\mu' + z > \\mu + z > \\alpha / \\sqrt{\\tau}" + }, + { + "bbox": [ + 140, + 679, + 506, + 693 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 124, + 696, + 325, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 696, + 325, + 710 + ], + "spans": [ + { + "bbox": [ + 124, + 696, + 325, + 710 + ], + "type": "text", + "content": "(ii) " + }, + { + "bbox": [ + 124, + 696, + 325, + 710 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to \\infty}Q^{*}(\\tau -1,\\mu) = -C + R > 0" + }, + { + "bbox": [ + 124, + 696, + 325, + 710 + ], + "type": "text", + "content": " since" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 156, + 715, + 489, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 715, + 489, + 734 + ], + "spans": [ + { + "bbox": [ + 156, + 715, + 489, + 734 + ], + "type": "interline_equation", + "content": "\\lim _ {\\mu \\rightarrow \\infty} Q ^ {*} (\\tau - 1, \\mu) = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + R \\int \\mathbb {1} \\{\\mu + z > \\alpha / \\sqrt {\\tau} \\} f (z; ^ {1 / (\\tau - 1)} - ^ {1 / \\tau}) d z\\right)", + "image_path": "8cd4e11dacbab8af465b8d67f8b0b0f95d7e4995c300e6403fad7ce934f2d869.jpg" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 237, + 80, + 449, + 134 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 80, + 449, + 134 + ], + "spans": [ + { + "bbox": [ + 237, + 80, + 449, + 134 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + R \\int_ {\\alpha / \\sqrt {\\tau} - \\mu} ^ {\\infty} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = - C + R \\int f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z \\\\ = - C + R. \\\\ \\end{array}", + "image_path": "0382e0994fdfd7bfddd246cae46a4904518b08d5245a7e3edae430916d72142d.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 139, + 312, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 139, + 312, + 152 + ], + "spans": [ + { + "bbox": [ + 121, + 139, + 312, + 152 + ], + "type": "text", + "content": "(iii) " + }, + { + "bbox": [ + 121, + 139, + 312, + 152 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to -\\infty}Q^{*}(\\tau -1,\\mu) = -C < 0" + }, + { + "bbox": [ + 121, + 139, + 312, + 152 + ], + "type": "text", + "content": " since" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 151, + 157, + 493, + 264 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 157, + 493, + 264 + ], + "spans": [ + { + "bbox": [ + 151, + 157, + 493, + 264 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\lim _ {\\mu \\rightarrow - \\infty} Q ^ {*} (\\tau - 1, \\mu) = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\int \\mathbb {1} \\{\\mu + z > \\alpha / \\sqrt {\\tau} \\} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\int_ {\\alpha / \\sqrt {\\tau} - \\mu} ^ {\\infty} f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\left(1 - \\int_ {- \\infty} ^ {\\alpha / \\sqrt {\\tau} - \\mu} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z\\right)\\right) \\\\ = - C + R \\left(1 - \\int f (z; ^ {1} / (\\tau - 1) - ^ {1} / \\tau) d z\\right) \\\\ = - C. \\\\ \\end{array}", + "image_path": "e41af3a94049b922495a1f2424350f2b7a1f977ec9eca5dd27bbca7eeaa889d5.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 270, + 419, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 419, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 419, + 282 + ], + "type": "text", + "content": "Then, we show that the following inductive cases hold for " + }, + { + "bbox": [ + 104, + 270, + 419, + 282 + ], + "type": "inline_equation", + "content": "t \\in \\{\\tau - 1, \\dots, 2\\}" + }, + { + "bbox": [ + 104, + 270, + 419, + 282 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "text", + "content": "(i) Given that " + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu)" + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "text", + "content": " is non-decreasing in " + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "inline_equation", + "content": "Q^{*}(t - 1,\\mu)" + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "text", + "content": " is also non-decreasing in " + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "text", + "content": ". Similar to the base case, when " + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\mu < \\mu'" + }, + { + "bbox": [ + 126, + 286, + 504, + 310 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 185, + 314, + 503, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 314, + 503, + 357 + ], + "spans": [ + { + "bbox": [ + 185, + 314, + 503, + 357 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f \\left(z, ^ {1 / (t - 1)} - ^ {1 / t}\\right) d z \\tag {20} \\\\ \\leq - C + \\int \\max \\left\\{0, Q ^ {*} (t, \\mu^ {\\prime} + z) \\right\\} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z \\\\ = Q ^ {*} (t - 1, \\mu^ {\\prime}), \\\\ \\end{array}", + "image_path": "e240372e2484bdc926013df69ff0f2a42665fa15684c9a2198897cfa4e0d4813.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 361, + 245, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 361, + 245, + 373 + ], + "spans": [ + { + "bbox": [ + 140, + 361, + 245, + 373 + ], + "type": "text", + "content": "where (20) is due to (17)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "text", + "content": "(ii) Given " + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to \\infty}Q^{*}(t,\\mu) = -(\\tau -t)C + R" + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "text", + "content": " and also given that " + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu)" + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "text", + "content": " is non-decreasing in " + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "text", + "content": " we have " + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to \\infty}Q^{*}(t - 1,\\mu) = -(\\tau -t + 1)C + R > 0" + }, + { + "bbox": [ + 124, + 377, + 504, + 411 + ], + "type": "text", + "content": " which can be shown using the sandwich theorem:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 175, + 417, + 503, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 417, + 503, + 472 + ], + "spans": [ + { + "bbox": [ + 175, + 417, + 503, + 472 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ \\leq - C + \\int \\max \\left\\{0, \\lim _ {\\mu^ {\\prime} \\rightarrow \\infty} Q ^ {*} \\left(t, \\mu^ {\\prime}\\right)\\right\\} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z \\\\ \\leq - C + (- (\\tau - t) C + R) \\int f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = - (\\tau - t - 1) C + R. \\tag {21} \\\\ \\end{array}", + "image_path": "2f3115ba3ed00cb9b9c9f584b5501409b6cb11af2713c568974f51e1f4aeac68.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 166, + 484, + 503, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 484, + 503, + 583 + ], + "spans": [ + { + "bbox": [ + 166, + 484, + 503, + 583 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ \\geq - C + \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z \\\\ \\geq - C + \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} \\max \\left\\{0, Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2} \\right\\} f (z, ^ {1 / (t - 1)} - ^ {1 / t}) d z \\\\ \\geq - C + Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2}) \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} f \\left(z, 1 / (t - 1) - 1 / t\\right) d z, \\tag {22} \\\\ \\end{array}", + "image_path": "01e0885472ab6455ca0f6833c2cb3a86c2d5e51f50d67067e6f3556b5883b1f5.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 588, + 234, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 588, + 234, + 599 + ], + "spans": [ + { + "bbox": [ + 140, + 588, + 234, + 599 + ], + "type": "text", + "content": "Finally, observing that" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 168, + 604, + 477, + 660 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 604, + 477, + 660 + ], + "spans": [ + { + "bbox": [ + 168, + 604, + 477, + 660 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\lim _ {\\mu \\rightarrow \\infty} (2 2) = \\lim _ {\\mu \\rightarrow \\infty} \\left(- C + Q ^ {*} (t, \\mu - | \\mu | ^ {1 / 2}) \\int_ {- | \\mu | ^ {1 / 2}} ^ {\\infty} f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z\\right) \\\\ = - C + \\left(\\lim _ {\\mu^ {\\prime} \\rightarrow \\infty} Q ^ {*} (t, \\mu^ {\\prime})\\right) \\int f (z, 1 / (t - 1) - 1 / t) d z \\\\ = - (\\tau - t - 1) C + R, \\\\ \\end{array}", + "image_path": "93c798a8555d2bb47a758a3bf082a010b71e5e941d7188039fe8c462cae15c08.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 665, + 505, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 665, + 505, + 677 + ], + "spans": [ + { + "bbox": [ + 140, + 665, + 505, + 677 + ], + "type": "text", + "content": "together with bounds (21) and (22), we obtain " + }, + { + "bbox": [ + 140, + 665, + 505, + 677 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to \\infty}Q^{*}(t - 1,\\mu) = -(\\tau -t + 1)C + R." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "spans": [ + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "text", + "content": "(iii) Given " + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to -\\infty}Q^{*}(t,\\mu) = -C < 0" + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "text", + "content": " and also given that " + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu)" + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "text", + "content": " is non-decreasing in " + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to \\infty}Q^{*}(t,\\mu) > 0" + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "inline_equation", + "content": "\\mu_t^*" + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "text", + "content": " exists—we have " + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to -\\infty}Q^{*}(t - 1,\\mu) = -C < 0" + }, + { + "bbox": [ + 121, + 682, + 505, + 715 + ], + "type": "text", + "content": ", which again can be shown using the sandwich theorem:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 187, + 719, + 460, + 733 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 719, + 460, + 733 + ], + "spans": [ + { + "bbox": [ + 187, + 719, + 460, + 733 + ], + "type": "interline_equation", + "content": "Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z", + "image_path": "ce277266dadd4e8a3e36b4bb4c6e020ebd142a8876af0bb01ed5e8d2576bbea1.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 239, + 82, + 504, + 95 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 82, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 239, + 82, + 504, + 95 + ], + "type": "interline_equation", + "content": "\\geq - C. \\tag {23}", + "image_path": "673cb613e155b06cfb137ce90be8bc28aea83ab80d6704c500b9eb7e8e0eb1a7.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 186, + 105, + 505, + 207 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 105, + 505, + 207 + ], + "spans": [ + { + "bbox": [ + 186, + 105, + 505, + 207 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = \\int_ {\\mu_ {t} ^ {*} - \\mu} ^ {\\infty} Q ^ {*} (t, \\mu + z) f (z, 1 / (t - 1) - 1 / t) d z (24) \\\\ \\leq - C + R \\int_ {\\mu_ {t} ^ {*} - \\mu} ^ {\\infty} f (z, 1 / (t - 1) - 1 / t) d z (25) \\\\ \\leq - C + R \\left(1 - \\int_ {- \\infty} ^ {\\mu_ {t} ^ {*} - \\mu} f (z, 1 / (t - 1) - 1 / t) d z\\right), (26) \\\\ \\end{array}", + "image_path": "2184054df91bcdda014248e32143f097a56d268466d9bbc4f9aae6d418bd1f98.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "content": "where (24) holds since " + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu +z) > 0" + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "content": " if and only if " + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "inline_equation", + "content": "z > \\mu_t^* -\\mu" + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "inline_equation", + "content": "\\max \\{0,Q^{*}(t,\\mu +z)\\} = 0" + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "content": " otherwise, and (25) holds since " + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu)\\leq \\lim_{\\mu^{\\prime}\\to \\infty}Q^{*}(t,\\mu^{\\prime}) = -(\\tau -t)C + R\\leq R" + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu)" + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "content": " is non-decreasing in " + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 140, + 211, + 506, + 246 + ], + "type": "text", + "content": ". Finally, observing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 177, + 250, + 468, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 250, + 468, + 291 + ], + "spans": [ + { + "bbox": [ + 177, + 250, + 468, + 291 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\lim _ {\\mu \\rightarrow - \\infty} (2 6) = \\lim _ {\\mu \\rightarrow - \\infty} \\left(- C + R \\left(1 - \\int_ {- \\infty} ^ {\\mu_ {t} ^ {*} - \\mu} f \\left(z, \\frac {1}{(t - 1)} - \\frac {1}{t}\\right) d z\\right)\\right) \\\\ = - C, \\\\ \\end{array}", + "image_path": "5b9098a06887d5b3ad7654aa9f97750c31b8e672d793776a01a69a70c0d787a0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 296, + 457, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 296, + 457, + 309 + ], + "spans": [ + { + "bbox": [ + 140, + 296, + 457, + 309 + ], + "type": "text", + "content": "together with bounds (23) and (26), we obtain " + }, + { + "bbox": [ + 140, + 296, + 457, + 309 + ], + "type": "inline_equation", + "content": "\\lim_{\\mu \\to -\\infty}Q^{*}(t - 1,\\mu) = -C" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "type": "text", + "content": "When put together, the base cases and the inductive cases above imply that conditions (i-iii) hold for all " + }, + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "type": "text", + "content": " hence " + }, + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "type": "inline_equation", + "content": "\\mu_t^*" + }, + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "type": "text", + "content": " exists for all " + }, + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 312, + 506, + 337 + ], + "type": "text", + "content": " which concludes our proof." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 349, + 242, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 349, + 242, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 349, + 242, + 360 + ], + "type": "text", + "content": "I.3 PROOF OF PROPOSITION 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "text", + "content": "First, we prove the existence of " + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "inline_equation", + "content": "\\mu_t^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "inline_equation", + "content": "t\\in \\{0,\\dots ,\\tau -1\\}" + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "text", + "content": " by driving an analytical formula for " + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu)\\doteq V^{\\pi^{(0)}}(t,\\mu)" + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "text", + "content": ". Letting " + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "inline_equation", + "content": "T_{t}^{(0)} = T_{t}^{\\pi^{(0)}}" + }, + { + "bbox": [ + 104, + 369, + 506, + 396 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 129, + 399, + 504, + 494 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 399, + 504, + 494 + ], + "spans": [ + { + "bbox": [ + 129, + 399, + 504, + 494 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} V ^ {(0)} (t, \\mu) = \\mathbb {E} [ R \\cdot \\mathbb {1} \\{T _ {t} ^ {(0)} > \\tau \\} \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\min \\{T _ {t} ^ {(0)}, \\tau \\} - t) | \\mu_ {t} = \\mu ] \\\\ = \\mathbb {E} [ R \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\tau - t) | \\mu_ {t} = \\mu ] (27) \\\\ = - C + \\int \\mathbb {E} [ R \\cdot \\rho (\\mu_ {\\tau}) - C \\cdot (\\tau - t - 1) | \\mu_ {t + 1} = \\mu^ {\\prime} ] \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu^ {\\prime}) \\mathrm {d} \\mathbb {P} (\\mu_ {t + 1} = \\mu^ {\\prime} | \\mu_ {t} = \\mu) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu^ {\\prime}) f \\left(\\mu^ {\\prime} - \\mu ; ^ {1} / t - ^ {1} / (t + 1)\\right) d \\mu^ {\\prime} (28) \\\\ = - C + \\int V ^ {(0)} (t + 1, \\mu + z) f \\left(z; \\frac {1}{t} - \\frac {1}{(t + 1)}\\right) d z, (29) \\\\ \\end{array}", + "image_path": "56341033bbe2b672a00ee2ef912eaa0600e0c168d460a70269b1501ba0f4271c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "text", + "content": "where (27) holds since " + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\pi^{(0)}(t,\\mu) = \\Psi_0" + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "text", + "content": " hence it is always the case that " + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "inline_equation", + "content": "T_{t}^{(0)} = \\infty" + }, + { + "bbox": [ + 104, + 500, + 506, + 525 + ], + "type": "text", + "content": ", and (28) is due to (15)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "text", + "content": "In the remainder of our proofs, we take " + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "text", + "content": " for notational brevity. This is without any loss of generality as, by simply shifting each value function and Q-function by " + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\alpha / \\sqrt{\\tau}" + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "text", + "content": " with respect to " + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "text", + "content": ", all of the following arguments would still hold. For " + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 104, + 529, + 506, + 563 + ], + "type": "text", + "content": ", we show that" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 206, + 567, + 505, + 596 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 567, + 505, + 596 + ], + "spans": [ + { + "bbox": [ + 206, + 567, + 505, + 596 + ], + "type": "interline_equation", + "content": "V ^ {(0)} (t, \\mu) = - (\\tau - t) C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / t - 1 / \\tau}}\\right) \\tag {30}", + "image_path": "0f89107960c18a56be54a52e144e7d2ba0f9e117dfbcb00461988a7efb119fed.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 599, + 470, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 470, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 470, + 612 + ], + "type": "text", + "content": "for all " + }, + { + "bbox": [ + 104, + 599, + 470, + 612 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 599, + 470, + 612 + ], + "type": "text", + "content": " via mathematical induction. Note that (30) is true for " + }, + { + "bbox": [ + 104, + 599, + 470, + 612 + ], + "type": "inline_equation", + "content": "t = \\tau - 1" + }, + { + "bbox": [ + 104, + 599, + 470, + 612 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 175, + 616, + 435, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 616, + 435, + 735 + ], + "spans": [ + { + "bbox": [ + 175, + 616, + 435, + 735 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} V ^ {(0)} (\\tau - 1, \\mu) = - C + \\int V ^ {(0)} (\\tau , \\mu + z) f (z; 1 / _ {(\\tau - 1)} - 1 / _ {\\tau}) d z \\\\ = - C + R \\int \\mathbb {1} \\left\\{\\mu + z > 0 \\right\\} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ = - C + R \\int_ {- \\mu} ^ {\\infty} f (z; 1 / (\\tau - 1) - 1 / \\tau) d z \\\\ = - C + R \\int_ {- \\infty} ^ {\\mu} f (z; ^ {1 / (\\tau - 1)} - ^ {1 / \\tau}) d z \\\\ = - C + R \\int_ {- \\infty} ^ {\\mu / \\sqrt {1 / (\\tau - 1) - 1 / \\tau}} f (z; 1) d z \\\\ \\end{array}", + "image_path": "e8e7d75c06405c1e5267af32a268063ae3bd74fcde21f5da46ca090cc8a8101e.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 237, + 79, + 385, + 108 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 79, + 385, + 108 + ], + "spans": [ + { + "bbox": [ + 237, + 79, + 385, + 108 + ], + "type": "interline_equation", + "content": "= - C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / (\\tau - 1) - 1 / \\tau}}\\right),", + "image_path": "9409a437455665fc8afc1d54ed47908e1c4c9e6f8fa7c685b68d2e6b82c97103.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "type": "inline_equation", + "content": "F(x) \\doteq F(x; 1)" + }, + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "type": "text", + "content": " is the c.d.f. of the standard Gaussian distribution. Moreover, assuming (30) is true for " + }, + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "type": "text", + "content": ", it is also true for " + }, + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "type": "inline_equation", + "content": "t - 1" + }, + { + "bbox": [ + 104, + 114, + 504, + 137 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 123, + 142, + 487, + 405 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 142, + 487, + 405 + ], + "spans": [ + { + "bbox": [ + 123, + 142, + 487, + 405 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} V ^ {(0)} (t - 1, \\mu) \\\\ = - C + \\int V ^ {(0)} (t, \\mu + z) f (z; ^ {1} / t - 1 - ^ {1} / t) d z \\\\ = - (\\tau - t + 1) C + R \\int F ((\\mu + z) / \\sqrt {1 / t - 1 / \\tau}; 1) f (z; 1 / (t - 1) - 1 / t) d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint_ {- \\infty} ^ {(\\mu + z) / \\sqrt {1 / t - 1 / \\tau}} f (z ^ {\\prime}; 1) f (z; 1 / (t - 1) - 1 / t) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint_ {- \\infty} ^ {\\mu + z} f \\left(z ^ {\\prime}; 1 / t - 1 / \\tau\\right) f \\left(z; 1 / (t - 1) - 1 / t\\right) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C \\\\ + R \\iint \\mathbb {1} \\{z ^ {\\prime} \\leq \\mu + z \\} f \\left(z ^ {\\prime}; ^ {1} / t - ^ {1} / \\tau\\right) f \\left(z; ^ {1} / (t - 1) - ^ {1} / t\\right) d z ^ {\\prime} d z \\\\ = - (\\tau - t + 1) C + R \\cdot \\mathbb {P} _ {Z \\sim \\mathcal {N} (0, 1 / (t - 1) - 1 / t)} \\{Z ^ {\\prime} \\leq \\mu + Z \\} \\\\ = - (\\tau - t + 1) C + R \\cdot \\mathbb {P} _ {\\frac {Z ^ {\\prime} - Z}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\sim \\mathcal {N} (0, 1)} \\left\\{\\frac {Z ^ {\\prime} - Z}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\leq \\frac {\\mu}{\\sqrt {1 / (t - 1) - 1 / \\tau}} \\right\\} \\\\ = - (\\tau - t + 1) C + R \\cdot F \\left(\\frac {\\mu}{\\sqrt {1 / (t - 1) - 1 / \\tau}}\\right). \\\\ \\end{array}", + "image_path": "8c5dc6467eea16b0957f7f63dfd911ca36d38e839da6ffea7865fdafaf1a0622.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 411, + 329, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 329, + 423 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 329, + 423 + ], + "type": "text", + "content": "Therefore, (30) indeed holds for all " + }, + { + "bbox": [ + 104, + 411, + 329, + 423 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 411, + 329, + 423 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": "Next, we observe that " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu)" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": " has a root at " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "\\mu = F^{-1}((\\tau - t)C / R)\\sqrt{1 / t - 1 / \\tau}" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": " provided that " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "(\\tau - t)C / R \\in (0,1)" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": ", which is the case for all " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": " since " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "\\tau C < R" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": ". Moreover, " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu)" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": " is a strictly increasing function in " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": ". Hence, there exists a unique " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "\\mu_t^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "t \\in \\{1, \\dots, \\tau - 1\\}" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu_t^{\\mathrm{greedy}}) > 0" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu) > 0 \\iff \\mu > \\mu_t^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": ". In other words, " + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "inline_equation", + "content": "\\pi^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 429, + 504, + 493 + ], + "type": "text", + "content": " is also a thresholding-type policy as the proposition states." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "text", + "content": "Finally, we have " + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu_t^*) = Q^{(0)}(t,\\mu_t^*) \\leq Q^* (t,\\mu_t^*) = 0" + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "text", + "content": " hence " + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "inline_equation", + "content": "\\mu_t^* \\leq \\mu_t^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "text", + "content": ". This is because, by definition, " + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu) \\geq Q^{\\pi}(t,\\mu)" + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "inline_equation", + "content": "t,\\mu" + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "text", + "content": " for any given policy " + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "text", + "content": ", including " + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "inline_equation", + "content": "\\pi^{(0)}" + }, + { + "bbox": [ + 104, + 498, + 505, + 524 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 536, + 241, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 536, + 241, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 241, + 548 + ], + "type": "text", + "content": "I.4 PROOF OF PROPOSITION 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": "As in the proof of Proposition 3, we take " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": " for notational brevity. Once again, this is without any loss of generality as, by simply shifting each value function and Q-function by " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "\\alpha / \\sqrt{\\tau}" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": " with respect to " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": ", all of the following arguments would still hold. Remember that the formula we derived for " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "V^{(0)}(t, \\mu)" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": " in (30) holds when " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 608, + 504, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 608, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 504, + 631 + ], + "type": "text", + "content": "We start by deriving two bounds on the optimal Q-function " + }, + { + "bbox": [ + 104, + 608, + 504, + 631 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu)" + }, + { + "bbox": [ + 104, + 608, + 504, + 631 + ], + "type": "text", + "content": ": (i) a lower bound and (ii) an upper bound. For the lower bound, it is sufficient to observe that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 230, + 637, + 380, + 652 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 637, + 380, + 652 + ], + "spans": [ + { + "bbox": [ + 230, + 637, + 380, + 652 + ], + "type": "interline_equation", + "content": "V ^ {(0)} (t, \\mu) = Q ^ {(0)} (t, \\mu) \\leq Q ^ {*} (t, \\mu),", + "image_path": "766b96290671791beedff0fc87331ec9f1bd0e87808305ad5bdf49ff5136b8f2.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "type": "text", + "content": "which holds since, by definition, " + }, + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu) \\geq Q^{\\pi}(t,\\mu)" + }, + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "type": "inline_equation", + "content": "t,\\mu" + }, + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "type": "text", + "content": " for any given policy " + }, + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 657, + 459, + 670 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 675, + 506, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 675, + 506, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 506, + 698 + ], + "type": "text", + "content": "For the upper bound, we use mathematical induction to show that " + }, + { + "bbox": [ + 104, + 675, + 506, + 698 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu) \\leq (\\tau - t - 1)C + V^{(0)}(t,\\mu)" + }, + { + "bbox": [ + 104, + 675, + 506, + 698 + ], + "type": "text", + "content": ". First, for the base case of " + }, + { + "bbox": [ + 104, + 675, + 506, + 698 + ], + "type": "inline_equation", + "content": "\\tau - 1" + }, + { + "bbox": [ + 104, + 675, + 506, + 698 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 186, + 704, + 504, + 733 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 704, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 186, + 704, + 504, + 733 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (\\tau - 1, \\mu) = - C + \\int V ^ {*} (\\tau , \\mu + z) f (z; ^ {1} / t - ^ {1} / t + 1) d z \\tag {31} \\\\ = - C + \\int \\mathbb {1} \\left\\{\\mu + z > \\alpha \\right\\} f \\left(z; 1 / t - 1 / t + 1\\right) d z \\\\ \\end{array}", + "image_path": "8629e3305cb5b053424b86697c7e077d539eac95e377cd97bd490fb0e63e4977.jpg" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 243, + 81, + 504, + 111 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 81, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 243, + 81, + 504, + 111 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} = - C + \\int V ^ {(0)} (\\tau , \\mu + z) f (z; 1 / t - 1 / t + 1) d z \\\\ = V ^ {(0)} (\\tau - 1, \\mu), \\tag {32} \\\\ \\end{array}", + "image_path": "76990e07a538cd492b4f31d00a928d976a61decfff98ddf935f6243e902b7325.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 115, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 115, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 115, + 504, + 140 + ], + "type": "text", + "content": "where (31) is due to (16), and (32) is due to (29). Then, for the inductive case, assuming " + }, + { + "bbox": [ + 104, + 115, + 504, + 140 + ], + "type": "inline_equation", + "content": "Q^{*}(t,\\mu)\\leq (\\tau -t - 1)C + V^{(0)}(t,\\mu)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 165, + 144, + 504, + 205 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 144, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 165, + 144, + 504, + 205 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q ^ {*} (t - 1, \\mu) = - C + \\int \\max \\{0, Q ^ {*} (t, \\mu + z) \\} f (z, 1 / (t - 1) - 1 / t) d z (33) \\\\ \\leq \\int Q ^ {*} (t, \\mu + z) f \\left(z, ^ {1} / (t - 1) - ^ {1} / t\\right) d z (34) \\\\ \\leq (\\tau - t - 1) C + \\int V ^ {(0)} (t, \\mu + z) f (z, ^ {1} / (t - 1) - ^ {1} / t) d z \\\\ = (\\tau - t) C + V ^ {(0)} (t - 1, \\mu + z), \\\\ \\end{array}", + "image_path": "02361da1361cb230aedd7415b6a6be5d3328a100eea757238ccd2a1981d49abb.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 209, + 505, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 505, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 505, + 233 + ], + "type": "text", + "content": "where (33) is due to (17), and (34) holds since " + }, + { + "bbox": [ + 104, + 209, + 505, + 233 + ], + "type": "inline_equation", + "content": "-C \\leq Q^{*}(t, \\mu)" + }, + { + "bbox": [ + 104, + 209, + 505, + 233 + ], + "type": "text", + "content": " implies that " + }, + { + "bbox": [ + 104, + 209, + 505, + 233 + ], + "type": "inline_equation", + "content": "\\max \\{0, Q^{*}(t, \\mu)\\} \\leq \\max \\{C + Q^{*}(t, \\mu), Q^{*}(t, \\mu)\\} \\leq C + Q^{*}(t, \\mu)" + }, + { + "bbox": [ + 104, + 209, + 505, + 233 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 238, + 220, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 238, + 220, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 220, + 251 + ], + "type": "text", + "content": "Define " + }, + { + "bbox": [ + 105, + 238, + 220, + 251 + ], + "type": "inline_equation", + "content": "\\mu_t^+" + }, + { + "bbox": [ + 105, + 238, + 220, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 238, + 220, + 251 + ], + "type": "inline_equation", + "content": "\\mu_t^-" + }, + { + "bbox": [ + 105, + 238, + 220, + 251 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 223, + 255, + 455, + 283 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 255, + 455, + 283 + ], + "spans": [ + { + "bbox": [ + 223, + 255, + 455, + 283 + ], + "type": "interline_equation", + "content": "V ^ {(0)} (t, \\mu_ {t} ^ {+}) = 0 \\iff \\mu_ {t} ^ {+} = F ^ {- 1} \\left(\\left(\\tau - t\\right) \\frac {C}{R}\\right) \\sqrt {\\frac {1}{t} - \\frac {1}{\\tau}}", + "image_path": "a3a147d01372c7645c8e4a3a0f4eb10b21021b6e93f728cc62a0e9da7286f8f6.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 157, + 285, + 431, + 313 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 285, + 431, + 313 + ], + "spans": [ + { + "bbox": [ + 157, + 285, + 431, + 313 + ], + "type": "interline_equation", + "content": "(\\tau - t - 1) C + V ^ {(0)} (t, \\mu_ {t} ^ {-}) = 0 \\iff \\mu_ {t} ^ {-} = F ^ {- 1} \\left(\\frac {C}{R}\\right) \\sqrt {\\frac {1}{t} - \\frac {1}{\\tau}},", + "image_path": "c93851a0b0becfcd6e4fec3c9fac5c3d60e5c364e1d823ac342bebf5f2dda980.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "content": "which we are able to write in closed form using the formula we derived for " + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu)" + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "content": " in (30) during the proof of Proposition 3." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": "By definition, " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "\\mu_t^{\\mathrm{greedy}} = \\mu_t^+" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": ". Moreover, (i) " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu_t^*)\\leq Q^* (t,\\mu_t^*) = 0 = V^{(0)}(t,\\mu_t^+)" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": " due to our lower bound, hence " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "\\mu_t^*\\leq \\mu_t^+" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": " (remember that " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu)" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": " was a strictly increasing function in " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": "), and (ii) " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "(\\tau -t - 1)C + V^{(0)}(t,\\mu_t^-) = 0 = Q^* (t,\\mu_t^*)\\leq (\\tau -t - 1)C + V^{(0)}(t,\\mu_t^*)" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": " due to our upper bound, hence " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "V^{(0)}(t,\\mu_t^-)\\leq V^{(0)}(t,\\mu_t^*)" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": " meaning " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "\\mu_t^- \\leq \\mu_t^*" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": ". Putting together these facts, and also the fact that " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "\\mu_t^*\\leq \\mu_t^{\\mathrm{greedy}}" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": ", we obtain " + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "inline_equation", + "content": "|\\mu_t^* -\\mu_t^{\\mathrm{greedy}}|\\leq \\mu_t^+ -\\mu_t^-" + }, + { + "bbox": [ + 104, + 346, + 506, + 411 + ], + "type": "text", + "content": " as the proposition states." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 426, + 285, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 285, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 285, + 437 + ], + "type": "text", + "content": "J BENCHMARKING ALGORITHMS" + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 106, + 471, + 504, + 729 + ], + "blocks": [ + { + "bbox": [ + 106, + 456, + 473, + 468 + ], + "lines": [ + { + "bbox": [ + 106, + 456, + 473, + 468 + ], + "spans": [ + { + "bbox": [ + 106, + 456, + 473, + 468 + ], + "type": "text", + "content": "Algorithm 2 Adaptive Enrichment, Futility Stopping with Bayes-OCP, Greedy Bayes-OCP" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "lines": [ + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": "1: Initialize " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\mu_{x}" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\sigma_x^2" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "x\\in \\mathcal{X}" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n2: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "X\\gets \\mathcal{X},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n3: Start experiment " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\psi = (\\mathcal{X},\\tau ,\\rho)" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n4: loop: \n5: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "t\\gets t + 1" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n6: Observe " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "x_{t},y_{t}" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n7: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t\\gets \\mathcal{D}_{t - 1}\\cup \\{x_t,y_t\\}" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n8: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "1 / \\sigma_{x_t}^2\\gets 1 / \\sigma_{x_t}^2 +1" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n9: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\mu_{x_t}\\gets \\mu_{x_t} + (y_t - \\mu_{x_t})\\sigma_{x_t}^2" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n10: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "X^{\\prime}\\gets \\emptyset" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n11: while " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "X\\setminus X^{\\prime}\\supset \\emptyset" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " .. \n12: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "x^{*}\\gets \\mathrm{argmax}_{x\\in X\\setminus X^{\\prime}}\\mathbb{E}_{\\theta_{x}\\sim \\mathcal{N}(\\mu_{x},\\sigma_{x}^{2})}[\\mathcal{G}^{(0)}(X^{\\prime}\\cup \\{x\\};\\{\\theta_{x}\\})]" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n13: if " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X^{\\prime}\\cup \\{x^{*}\\} ;\\{\\theta_{x}\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X^{\\prime};\\{\\theta_{x}\\})]" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " .. \n14: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "X^{\\prime}\\gets X^{\\prime}\\cup \\{x^{*}\\}" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n15: else: \n16: break \n17: if Adaptive Enrichment and " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "t = \\tau /2" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X';\\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}(X,\\mathcal{D}_t;\\{\\theta_x\\})]" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " .. \n18: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "X\\gets X^{\\prime},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n19: Start a new experiment " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\psi = (X,\\tau ,\\rho)" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n20: if Greedy Bayes-OCP and " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(X';\\{\\theta_x\\})] > \\mathbb{E}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}(X,\\mathcal{D}_t;\\{\\theta_x\\})]" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " .. \n21: " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "X\\gets X^{\\prime},t\\gets 0,\\mathcal{D}_0\\gets \\emptyset" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n22: Start a new experiment " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\psi = (X,\\tau ,\\rho)" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n23: if Futility Stopping with Bayes-OCP and " + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_{\\theta_x\\sim \\mathcal{N}(\\mu_x,\\sigma_x^2)}[\\mathcal{G}^{(0)}(\\emptyset ;\\{\\theta_x\\}) > \\mathcal{G}(X,\\mathcal{D}_0;\\{\\theta_x\\})] > \\beta" + }, + { + "bbox": [ + 106, + 471, + 504, + 729 + ], + "type": "text", + "content": " \n24: Stop all experimentation" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "algorithm" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 104, + 109, + 512, + 729 + ], + "blocks": [ + { + "bbox": [ + 105, + 81, + 320, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 320, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 320, + 94 + ], + "type": "text", + "content": "K GLOSSARY OF TERMS AND NOTATION" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 104, + 109, + 512, + 729 + ], + "lines": [ + { + "bbox": [ + 104, + 109, + 512, + 729 + ], + "spans": [ + { + "bbox": [ + 104, + 109, + 512, + 729 + ], + "type": "table", + "html": "
TermNotationDescription
Experiment-Conducted to confirm efficacy of an intervention, e.g. a new treatment in clinical trials, or a new recommendation policy in online advertisement
Subject-Individual participant of an experiment, e.g. patients in a clinical trial, or customers in online advertisement
PopulationX⊆XCollection of subjects that all share the same qualities, e.g. all female patients in a clinical trial, or all customers with the same preferences in online advertisement
Atomic-populationx∈XIndivisible populations
PropensitiesηxThe probability that a subject being from atomic-population x
ηxThe probability that a subject being from population X
ηx|XThe probability that a subject being from atomic-population x conditioned on the fact that they from population X
Outcome distributionΩxDistribution of outcomes that is indicative of the effect of the intervention of interest for atomic-population x
Mean outcomesθxExpected outcome, i.e. the effect of the intervention of interest, for atomic-population x
θXExpected outcome for population X
Experiment designψ=(X,τ,ρ)Target population X, sample horizon τ, and success criterion ρ that characterize an experiment
Viable experiment designsΨExperiment designs that can potentially be followed by a meta-experimenter
Meta-experimenter-The decision-making agent that decides when to run experiments according to which experiment design in Ψ
Sample/time horizonτAn experiment is terminated when t=τ
Success criterionρAn experiment is declared a success if ρ(Dτ)=1
Online datasetDtData collected by an ongoing experiment at time step t
DtData collected by the i-th experiment run by the meta-experimenter at time step t
Aggregate datasetDItCollective data collected by all experiments up to time step t of the i-th experiment
-TtNumber of time steps for which the i-th experiment is conducted until it was stopped or its time horizon was reached
CostCost incurred per time step by running experiment ψ
RewardReward received if experiment ψ is successful
UtilityGSum of costs and rewards received after all experimentation is concluded
PolicyπDecision-making policy of the meta-experiment
Optimal policyπ*The optimal policy that maximizes utility G in expectation
Greedy policyπgreedySee Section 3
Test statisticμtIn the simplified case in Section 3, the empirical mean outcome
Value functionVπ(t,μ)The expected utility of following policy π when μt=μ
Q-functionQπ(t,μ)The expected utility of following policy π after conducting the ongoing experiment for one more time step when μt=μ
-TtπThe first time step at or after time step t that policy π decides to stop all experimentation
Optimal value functionV*The value function associated with π*
Optimal Q-functionQ*The Q-function associated with π*
Thresholdsμt*Decision-making thresholds associated with π*
μtgreedyDecision-making thresholds associated with πgreedy
Conditional power functionP(X, Dt; {θx})The probability of a hypothesis test being successful condi- tioned on mean outcomes {θx}
Expected utility functionG(X, Dt; {θx})The expected utility of fully committing to an experiment and waiting until it terminates when the experiment targets popula-tion X, is currently at time step t, and collected dataset Dt
PosteriorsN(μx, σx2)Posterior distributions over mean outcomes {θx} maintained by Bayes-OCP such that θx|D ~ N(μx, σx2)
", + "image_path": "cdb63ad44372d32987538f8d058b458fda1bd889b6c86c68e97ef4949d57ee2f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 80, + 511, + 186 + ], + "blocks": [ + { + "bbox": [ + 105, + 80, + 511, + 186 + ], + "lines": [], + "index": 1, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_content_list.json b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3df1eaeb08bc2edaa0ffef0e98447e9a8389d1e4 --- /dev/null +++ b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_content_list.json @@ -0,0 +1,4844 @@ +[ + { + "type": "text", + "text": "WHERE TO DIFFUSE, HOW TO DIFFUSE, AND HOW TO GET BACK: AUTOMATED LEARNING FOR MULTIVARI-ATE DIFFUSIONS", + "text_level": 1, + "bbox": [ + 171, + 98, + 823, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Raghav Singhal\\*,1, Mark Goldstein\\*,1, Rajesh Ranganath\\*,2", + "bbox": [ + 179, + 193, + 602, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Courant Institute of Mathematical Sciences1, New York University", + "bbox": [ + 179, + 209, + 622, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Center for Data Science2, New York University", + "bbox": [ + 179, + 224, + 496, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 273, + 547, + 290 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion-based generative models (DBGMs) perturb data to a target noise distribution and reverse this process to generate samples. The choice of noising process, or inference diffusion process, affects both likelihoods and sample quality. For example, extending the inference process with auxiliary variables leads to improved sample quality. While there are many such multivariate diffusions to explore, each new one requires significant model-specific analysis, hindering rapid prototyping and evaluation. In this work, we study Multivariate Diffusion Models (MDMs). For any number of auxiliary variables, we provide a recipe for maximizing a lower-bound on the MDMs likelihood without requiring any model-specific analysis. We then demonstrate how to parameterize the diffusion for a specified target noise distribution; these two points together enable optimizing the inference diffusion process. Optimizing the diffusion expands easy experimentation from just a few well-known processes to an automatic search over all linear diffusions. To demonstrate these ideas, we introduce two new specific diffusions as well as learn a diffusion process on the MNIST, CIFAR10, andImagenet32 datasets. We show learned MDMs match or surpass bits-per-dims (BPDs) relative to fixed choices of diffusions for a given dataset and model architecture.", + "bbox": [ + 228, + 306, + 769, + 544 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 569, + 339, + 585 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion-based generative models (DBGMs) perturb data to a target noise distribution and reverse this process to generate samples. They have achieved impressive performance in image generation, editing, translation (Dhariwal & Nichol, 2021; Nichol & Dhariwal, 2021; Sasaki et al., 2021; Ho et al., 2022), conditional text-to-image tasks (Nichol et al., 2021; Ramesh et al., 2022; Sahara et al., 2022) and music and audio generation (Chen et al., 2020; Kong et al., 2020; Mittal et al., 2021). They are often trained by maximizing a lower bound on the log likelihood, featuring an inference process interpreted as gradually \"noising\" the data (Sohl-Dickstein et al., 2015; Ho et al., 2020).", + "bbox": [ + 169, + 599, + 826, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The choice of this inference process affects both likelihoods and sample quality. On different datasets and models, different inference processes work better; there is no universal best choice of inference, and the choice matters (Song et al., 2020b).", + "bbox": [ + 169, + 705, + 823, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While some work has improved performance by designing score model architectures (Ho et al., 2020; Kingma et al., 2021; Dhariwal & Nichol, 2021), Dockhorn et al. (2021) instead introduce the critically-damped Langevin diffusion (CLD), showing that significant improvements in sample generation can be gained by carefully designing new processes. CLD pairs each data dimension with an auxiliary \"velocity\" variable and diffuses them jointly using second-order Langevin dynamics.", + "bbox": [ + 169, + 753, + 826, + 825 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A natural question: if introducing new diffusions results in dramatic performance gains, why are there only a handful of diffusions (variance-preserving stochastic differential equation (VPSDE), variance exploding (VE), CLD, sub-VPSDE) used in DBGMs? For instance, are there other auxiliary variable diffusions that would lead to improvements like CLD? This avenue seems promising as auxiliary variables have improved other generative models and inferences, such as normalizing flows", + "bbox": [ + 169, + 830, + 828, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal Contribution. Correspondence to {rsinghal, goldstein} at nyu.edu.", + "bbox": [ + 197, + 909, + 723, + 925 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(Huang et al., 2020), neural ordinary differential equations (ODEs) (Dupont et al., 2019), hierarchical variational models (Ranganath et al., 2016), ladder variational autoencoder (Sønderby et al., 2016), among others.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite its success, CLD also provides evidence that each new process requires significant model-specific analysis. Deriving the evidence lower bound (ELBO) and training algorithm for diffusions is challenging (Huang et al., 2021; Kingma et al., 2021; Song et al., 2021) and is carried out in a case-by-case manner for new diffusions (Campbell et al., 2022). Auxiliary variables seemingly complicate this process further; computing conditionals of the inference process necessitates solving matrix Lyupanov equations (section 3.3). Deriving the inference stationary distribution—which helps the model and inference match—can be intractable. These challenges limit rapid prototyping and evaluation of new inference processes.", + "bbox": [ + 169, + 152, + 826, + 266 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Concretely, training a diffusion model requires:", + "bbox": [ + 171, + 271, + 485, + 285 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(R1): Selecting an inference and model process pair such that the inference process converges to the model prior", + "(R2): Deriving the ELBO for this pair", + "(R3): Estimating the ELBO and its gradients by deriving and computing the inference process' transition kernel" + ], + "bbox": [ + 184, + 297, + 823, + 383 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we introduce Multivariate Diffusion Models (MDMs) and a method for training and evaluating them. MDMs are diffusion-based generative models trained with auxiliary variables. We provide a recipe for training MDMs beyond specific instantiations—like VPSDE and CLD—to all linear inference processes that have a stationary distribution, with any number of auxiliary variables.", + "bbox": [ + 169, + 396, + 823, + 454 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "First, we bring results from gradient-based MCMC (Ma et al., 2015) to diffusion modeling to construct MDMs that converge to a chosen model prior (R1); this tightens the ELBO. Secondly, for any number of auxiliary variables, we derive the MDM ELBO (R2). Finally, we show that the transition kernel of linear MDMs, necessary for the ELBO, can be computed automatically and generically, for higher-dimensional auxiliary systems (R3).", + "bbox": [ + 169, + 459, + 826, + 531 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With these tools, we explore a variety of new inference processes for diffusion-based generative models. We then note that the automatic transitions and fixed stationary distributions facilitate directly learning the inference to maximize the MDM ELBO. Learning turns diffusion model training into a search not only over score models but also inference processes, at no extra derivational cost.", + "bbox": [ + 169, + 536, + 826, + 594 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Methodological Contributions. In summary, our methodological contributions are:", + "bbox": [ + 171, + 609, + 736, + 626 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Deriving ELBOs for training and evaluating multivariate diffusion models (MDMs) with auxiliary variables.", + "2. Showing that the diffusion transition covariance does not need to be manually derived for each new diffusion. We instead demonstrate that a matrix factorization technique, previously unused in diffusion models, can automatically compute the covariance analytically for any linear MDM.", + "3. Using results from gradient-based Markov chain Monte Carlo (MCMC) to construct MDMs with a complete parameterization of inference processes whose stationary distribution matches the model prior.", + "4. Combining the above into an algorithm called Automatic Multivariate Diffusion Training (AMDT) that enables training without diffusion-specific derivations. AMDT enables training score models for any linear diffusion, including optimizing the diffusion and score jointly." + ], + "bbox": [ + 210, + 637, + 823, + 827 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To demonstrate these ideas, we develop MDMs with two specific diffusions as well as learned multivariate diffusions. The specific diffusions are accelerated Langevin diffusion (ALDA) (introduced in Mou et al. (2019) as a higher-order scheme for gradient-based MCMC) and an alteration, modified accelerated Langevin diffusion (MALDA). Previously, using these diffusions for generative modeling would require significant model-specific analysis. Instead, AMDT for these diffusions is derivation-free.", + "bbox": [ + 169, + 840, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Empirical contributions. We train MDMs on the MNIST,Imagenet32 and CIFAR-10 datasets. In the experiments, we show that:", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Training new and existing fixed diffusions, such as ALDA and MALDA, is easy with the proposed algorithm AMDT.", + "2. Using AMDT to learn the choice of diffusion for the MDM matches or surpasses the performance of fixed choices of diffusion process; sometimes the learned diffusion and VPSDE do best; other times the learned diffusion and CLD do best.", + "3. There are new and existing MDMs, trained and evaluated with the MDM ELBO, that account for as much performance improvement over VPSDE as a three-fold increase in score model size for a fixed univariate diffusion." + ], + "bbox": [ + 209, + 143, + 826, + 266 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These findings affirm that the choice of diffusion affects the optimization problem, and that learning the choice bypasses the process of choosing diffusions for each new dataset and score architecture. We additionally show the utility of the MDM ELBO by showing on a dataset that CLD achieves better bits-per-dims (BPDs) than previously reported with the probability flow ODE (Dockhorn et al., 2021).", + "bbox": [ + 169, + 279, + 825, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 SETUP", + "text_level": 1, + "bbox": [ + 171, + 357, + 263, + 372 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We present diffusions by starting with the generative model and then describing its likelihood lower bound (Sohl-Dickstein et al., 2015; Huang et al., 2021; Kingma et al., 2021). Diffusions sample from a model prior $\\mathbf{z}_0\\sim \\pi_\\theta$ and then evolve a continuous-time stochastic process $\\mathbf{z}_t\\in \\mathbb{R}^d$ :", + "bbox": [ + 169, + 388, + 823, + 431 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {z} = h _ {\\theta} (\\mathbf {z}, t) d t + \\beta_ {\\theta} (t) d \\mathbf {B} _ {t}, \\quad t \\in [ 0, T ] \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 439, + 823, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{B}_t$ is a $d$ -dimensional Brownian motion. The model is trained so that $\\mathbf{z}_T$ approximates the data $\\mathbf{x} \\sim q_{\\mathrm{data}}$ .1 Maximum likelihood training of diffusion models is intractable (Huang et al., 2021; Song et al., 2021; Kingma et al., 2021). Instead, they are trained using a variational lower bound on $\\log p_{\\theta}(\\mathbf{z}_T = x)$ . The bound requires an inference process $q_{\\phi}(\\mathbf{y}_s | \\mathbf{x} = x)$ :2", + "bbox": [ + 169, + 463, + 823, + 521 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = f _ {\\phi} (\\mathbf {y}, s) d s + g _ {\\phi} (s) d \\widehat {\\mathbf {B}} _ {s}, \\quad s \\in [ 0, T ] \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 530, + 823, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\widehat{\\mathbf{B}}_s$ is another Brownian motion independent of $\\mathbf{B}_t$ . The inference process is usually taken to be specified rather than learned, and chosen to be i.i.d. for each $y_{tj}$ conditional on each $x_j$ . This leads to the interpretation of the $y_{tj}$ as noisy versions of features $x_j$ (Ho et al., 2020). While the diffusion ELBO is challenging to derive in general, Huang et al. (2021); Song et al. (2021) show that when the model process takes the form:", + "bbox": [ + 169, + 556, + 825, + 628 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {z} = \\left[ g _ {\\phi} ^ {2} (T - t) s _ {\\theta} (\\mathbf {z}, T - t) - f _ {\\phi} (\\mathbf {z}, T - t) \\right] d t + g _ {\\phi} (T - t) d \\mathbf {B} _ {t}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 635, + 823, + 655 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the ELBO is:", + "bbox": [ + 171, + 661, + 256, + 674 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\log p _ {\\theta} (x) \\geq \\mathcal {L} ^ {\\mathrm {i s m}} (x) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) + \\int_ {0} ^ {T} - \\frac {1}{2} \\| s _ {\\theta} \\| _ {g _ {\\phi} ^ {2}} ^ {2} - \\nabla \\cdot (g _ {\\phi} ^ {2} s _ {\\theta} - f _ {\\phi}) d s \\right], \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 681, + 823, + 722 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $f_{\\phi}, g_{\\phi}, s_{\\theta}$ are evaluated at $(\\mathbf{y}_s, s)$ , $\\|\\mathbf{x}\\|_{\\mathbf{A}}^2 = \\mathbf{x}^\\top \\mathbf{A}\\mathbf{x}$ and $g^2 = gg^\\top$ . Equation (4) features the Implicit Score Matching (ISM) loss (Song et al., 2020a), and can be re-written as an ELBO $\\mathcal{L}^{\\mathrm{dsm}}$ featuring Denoising Score Matching (DSM) (Vincent, 2011; Song et al., 2020b), see appendix F.1.", + "bbox": [ + 169, + 729, + 823, + 776 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 A RECIPE FOR MULTIVARIATE DIFFUSION MODELS", + "text_level": 1, + "bbox": [ + 171, + 796, + 633, + 811 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As has been shown in prior work (Song et al., 2021; Dockhorn et al., 2021), the choice of diffusion matters. Drawing on principles from previous generative models (section 6), we can consider a wide class of diffusion inference processes by constructing them using auxiliary variables.", + "bbox": [ + 169, + 828, + 823, + 871 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Following Huang et al. (2021); Dockhorn et al. (2021) we integrate all processes in forward time 0 to $T$ . It may be helpful to think of an additional variable $\\hat{\\mathbf{x}}_t \\triangleq \\mathbf{z}_{T-t}$ so that $\\hat{\\mathbf{x}}_0$ approximates $\\mathbf{x} \\sim q_{\\mathrm{data}}$ .", + "bbox": [ + 169, + 881, + 823, + 910 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "2We use $\\mathbf{y}$ as the inference variable over the same space as the model's $\\mathbf{z}$ .", + "bbox": [ + 192, + 910, + 627, + 922 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "At first glance, training such diffusions can seem challenging. First, one needs an ELBO that includes auxiliary variables. This ELBO will require sampling from the transition kernel, and setting the model prior to the specified inference stationary distribution. But doing such diffusion-specific analysis manually is challenging and hinders rapid prototyping.", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section we show how to address these challenges and introduce an algorithm, AMDT, to simplify and automate modeling with MDMs. AMDT can be used to train new and existing diffusions, including those with auxiliary variables, and including those that learn the inference process. In appendix A we discuss how the presented methods can also be used to automate and improve simplified score matching and noise prediction objectives used to train diffusion models.", + "bbox": [ + 169, + 166, + 823, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 MULTIVARIATE MODEL AND INFERENCE", + "text_level": 1, + "bbox": [ + 171, + 255, + 500, + 268 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the $j^{th}$ data coordinate at each time $t$ , MDMs pair $\\mathbf{z}_{tj} \\in \\mathbb{R}$ with a vector of auxiliary variables $\\mathbf{v}_{tj} \\in \\mathbb{R}^{K-1}$ into a joint vector $\\mathbf{u}_t$ and diffuse in the extended space:", + "bbox": [ + 169, + 281, + 823, + 313 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {u} _ {0} \\sim \\pi_ {\\theta}, \\quad d \\mathbf {u} = h _ {\\theta} \\left(\\mathbf {u} _ {t} = \\left[ \\begin{array}{l} \\mathbf {z} _ {t} \\\\ \\mathbf {v} _ {t} \\end{array} \\right], t\\right) d t + \\beta_ {\\theta} (t) d \\mathbf {B} _ {t}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 321, + 825, + 354 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MDMs model the data $\\mathbf{x}$ with $\\mathbf{z}_T$ , a coordinate in $\\mathbf{u}_T \\sim p_\\theta$ . For the $j^{th}$ feature $\\mathbf{x}_j$ , each $\\mathbf{u}_{tj} \\in \\mathbb{R}^K$ consists of a \"data\" dimension $\\mathbf{u}_{tj}^z$ and auxiliary variable $\\mathbf{u}_{tj}^v$ . Therefore $\\mathbf{u} \\in \\mathbb{R}^{dK}$ . We extend the drift coefficient $h_\\theta$ from a function in $\\mathbb{R}^d \\times \\mathbb{R}_+ \\to \\mathbb{R}^d$ to the extended space $\\mathbb{R}^{dK} \\times \\mathbb{R}_+ \\to \\mathbb{R}^{dK}$ . We likewise extend the diffusion coefficient to a matrix $\\beta_\\theta$ acting on Brownian motion $\\mathbf{B}_t \\in \\mathbb{R}^{dK}$ .", + "bbox": [ + 169, + 364, + 823, + 428 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Because the MDM model is over the extended space, the inference distribution $\\mathbf{y}$ must be too. We then set $q(\\mathbf{y}_0^v |\\mathbf{y}_0^z = x)$ to any chosen initial distribution, e.g. $\\mathcal{N}(\\mathbf{0},\\mathbf{I})$ and discuss this choice in section 4. Then $\\mathbf{y}_s$ evolves according to the auxiliary variable inference process:", + "bbox": [ + 169, + 431, + 825, + 476 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = f _ {\\phi} (\\mathbf {y}, s) d s + g _ {\\phi} (s) d \\widehat {\\mathbf {B}} _ {s}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 484, + 823, + 503 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the inference drift and diffusion coefficients $f_{\\phi}, g_{\\phi}$ are now over the extended space $\\mathbf{y} = [\\mathbf{y}^z, \\mathbf{y}^v]$ . The function $f_{\\phi}$ lets the $z$ and $v$ coordinates of $\\mathbf{y}_{tj}$ interact in the inference process.", + "bbox": [ + 169, + 512, + 823, + 542 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ASSUMPTIONS", + "text_level": 1, + "bbox": [ + 171, + 559, + 279, + 571 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This work demonstrates how to parameterize time-varying Itô processes, used for diffusion modeling, to have a stationary distribution that matches the given model prior. To take advantage of the automatic transition kernels also presented, the inferences considered for modeling are linear time-varying processes and take the form:", + "bbox": [ + 169, + 585, + 825, + 642 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = \\mathbf {A} _ {\\phi} (s) \\mathbf {y} d s + g _ {\\phi} (s) d \\mathbf {B} _ {s}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 651, + 599, + 667 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{A}_{\\phi}(s):\\mathbb{R}_{+}\\to dK\\times dK$ and $g_{\\phi}(s):\\mathbb{R}_{+}\\to dK\\times dK$ are matrix-valued functions.", + "bbox": [ + 169, + 675, + 777, + 691 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 ELBO FOR MDMS", + "text_level": 1, + "bbox": [ + 171, + 709, + 331, + 723 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We now show how to train MDMs to optimize a lower bound on the log likelihood of the data. Like in the univariate case, we use the parameterization in eq. (3) to obtain a tractable ELBO.", + "bbox": [ + 169, + 734, + 823, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Theorem 1. The MDM log marginal likelihood of the data is lower-bounded by:", + "bbox": [ + 171, + 768, + 700, + 785 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\log p _ {\\theta} (x) \\geq \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\underbrace {\\log \\pi_ {\\theta} (\\mathbf {y} _ {T})} _ {\\ell_ {T}} - \\int_ {0} ^ {T} \\frac {1}{2} \\| s _ {\\theta} \\| _ {g _ {\\phi} ^ {2}} ^ {2} + \\nabla \\cdot (g _ {\\phi} ^ {2} s _ {\\theta} - f _ {\\phi}) d s - \\underbrace {\\log q _ {\\phi} (\\mathbf {y} _ {0} ^ {v} | x)} _ {\\ell_ {q}} \\right] \\quad (\\mathcal {L} ^ {m i s m}) \\\\ = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\ell_ {T} + \\int_ {0} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi} ^ {2}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi} ^ {2}} ^ {2} + (\\nabla \\cdot f _ {\\phi}) d s - \\ell_ {q} \\right] \\quad \\left(\\mathcal {L} ^ {m d s m}\\right). \\tag {7} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 792, + 825, + 898 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where divergences and gradients are taken with respect to $\\mathbf{y}_s$ and $s_{\\phi} = \\nabla_{\\mathbf{y}_s}\\log q_{\\phi}(\\mathbf{y}_s|x)$", + "bbox": [ + 171, + 909, + 764, + 926 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Proof. The proof for the MDM ISM ELBO $\\mathcal{L}^{\\mathrm{mism}}$ is in appendix F. In short, we introduce auxiliary variables, apply Theorem 1 of Huang et al. (2021) (equivalently, Theorem 3 of Song et al. (2021) or appendix E of Kingma et al. (2021)) to the joint space, and then apply an additional variational bound to $\\mathbf{v}_0$ . The MDM DSM ELBO $\\mathcal{L}^{\\mathrm{mdsm}}$ is likewise derived in appendix F, similarly to Huang et al. (2021); Song et al. (2021), but extended to multivariate diffusions.", + "bbox": [ + 169, + 103, + 823, + 176 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We train MDM's by estimating the gradients of $\\mathcal{L}^{\\mathrm{mdsm}}$ , as estimates of $\\mathcal{L}^{\\mathrm{mism}}$ can be computationally prohibitive. For numerical stability, the integral in eq. (7) is computed on $[\\epsilon, T]$ rather than $[0, T]$ . One can regard this as a bound for a variable $\\mathbf{u}_{\\epsilon}$ . To maintain a proper likelihood bound for the data, one can choose a likelihood $\\mathbf{u}_0|\\mathbf{u}_{\\epsilon}$ and compose bounds as we demonstrate in appendix I. We report the ELBO with this likelihood term, which plays the same role as the discretized Gaussian in Nichol & Dhariwal (2021) and Tweedie's formula in Song et al. (2021).", + "bbox": [ + 169, + 188, + 826, + 273 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 INGREDIENT 1: COMPUTING THE TRANSITION $q_{\\phi}(\\mathbf{y}_s|x)$", + "text_level": 1, + "bbox": [ + 171, + 287, + 599, + 304 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To estimate eq. (7) and its gradients, we need samples from $q(\\mathbf{y}_s|x)$ and to compute $\\nabla \\log q(\\mathbf{y}_s|x)$ . While an intractable problem for MDMs in general, we provide two ingredients for tightening and optimizing these bounds in a generic fashion for linear inference MDMs.", + "bbox": [ + 169, + 313, + 823, + 357 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first show how to automate computation of $q(\\mathbf{y}_s|\\mathbf{y}_0)$ and then $q(\\mathbf{y}_s|x)$ . For linear MDMs of the form:", + "bbox": [ + 169, + 363, + 823, + 390 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = \\mathbf {A} (s) \\mathbf {y} d s + g (s) d \\mathbf {B} _ {s},\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 393, + 591, + 409 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the transition kernel $q(\\mathbf{y}_s|\\mathbf{y}_0)$ is Gaussian (Särkkä & Solin, 2019). Let $f(\\mathbf{y}, s) = \\mathbf{A}(s)\\mathbf{y}$ . Then, the mean and covariance are solutions to the following ODEs:", + "bbox": [ + 169, + 410, + 823, + 439 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {m} _ {s | 0} / d s = \\mathbf {A} (s) \\mathbf {m} _ {s | 0}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 441, + 500, + 458 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd \\boldsymbol {\\Sigma} _ {s | 0} / d s = \\mathbf {A} (s) \\boldsymbol {\\Sigma} _ {s | 0} + \\boldsymbol {\\Sigma} _ {s | 0} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 460, + 823, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The mean can be solved analytically:", + "bbox": [ + 171, + 479, + 419, + 494 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\mathbf {y} _ {0} \\underbrace {= \\exp (s \\mathbf {A}) \\mathbf {y} _ {0}} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}}. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 496, + 823, + 540 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The covariance equation does not have as simple a solution because eq. (9) as the unknown matrix $\\pmb{\\Sigma}_{s|0}$ is being multiplied both from the left and the right.", + "bbox": [ + 169, + 542, + 823, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Instead of solving eq. (8) for a specific diffusion manually, as done in previous work (e.g. pages 50-54 of Dockhorn et al. (2021)), we show that a matrix factorization technique (Särkkä & Solin (2019), sec. 6.3) previously unused in diffusion-based generative models can automatically compute $\\Sigma_{s|0}$ generically for any linear MDM. Define $\\mathbf{C}_s$ , $\\mathbf{H}_s$ that evolve according to:", + "bbox": [ + 169, + 577, + 823, + 633 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\binom {d \\mathbf {C} _ {s} / d s} {d \\mathbf {H} _ {s} / d s} = \\binom {\\mathbf {A} (s)} {\\mathbf {0}} \\begin{array}{c} g ^ {2} (s) \\\\ - \\mathbf {A} ^ {\\top} (s) \\end{array} \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 635, + 823, + 669 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "then $\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s\\mathbf{H}_s^{-1}$ for $\\mathbf{C}_0 = \\mathbf{\\Sigma}_0$ and $\\mathbf{H}_0 = \\mathbf{I}$ (Appendix D). These equations can be solved in closed-form,", + "bbox": [ + 169, + 671, + 823, + 699 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {[ \\mathbf {A} ] _ {s}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- [ \\mathbf {A} ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}} \\underbrace {= \\exp \\left[ s \\binom {\\mathbf {A}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- \\mathbf {A} ^ {\\top}} \\end{array} \\right]} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}, g (\\nu) = g} \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 227, + 700, + 823, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $[\\mathbf{A}]_s = \\int_0^s\\mathbf{A}(\\nu)d\\nu$ . To condition on $\\mathbf{y}_0 = (x,v)$ , we set $\\pmb {\\Sigma}_0 = \\mathbf{0}$", + "bbox": [ + 169, + 763, + 648, + 782 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Computing $q_{\\phi}(\\mathbf{y}_s|x)$ . For the covariance $\\pmb{\\Sigma}_{s|0}$ , to condition on $x$ instead of $\\mathbf{y}_0$ , we set $\\pmb{\\Sigma_0}$ to", + "bbox": [ + 169, + 795, + 795, + 811 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Sigma} _ {0} = \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\boldsymbol {\\Sigma} _ {\\mathbf {v} _ {0}} \\end{array} \\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 429, + 814, + 563, + 847 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To compute the mean, it is the same expression as for $q(\\mathbf{y}_s|\\mathbf{y}_0)$ , but with a different initial condition:", + "bbox": [ + 169, + 848, + 823, + 864 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\binom {x} {\\mathbb {E} _ {q} [ \\mathbf {y} _ {0} ^ {v} | x ]} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 864, + 823, + 898 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "See appendix D for more details.", + "bbox": [ + 169, + 898, + 390, + 914 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Automatic Multivariate Diffusion Training" + ], + "code_body": "Input: Data $\\{x_i\\}$ , inference process matrices $\\mathbf{Q}_{\\phi}, \\mathbf{D}_{\\phi}$ , model prior $\\pi_{\\theta}$ , initial distribution $q_{\\phi}(\\mathbf{y}_0^v | x)$ , and score model architecture $s_\\theta$ \nReturns: Trained score model $s_\\theta$ \nwhile $s_\\theta$ not converged do \n Sample $x \\sim \\sum_{i=1}^{N} \\frac{1}{N} \\delta_{x_i}$ , $v_0 \\sim q_{\\phi}(\\mathbf{y}_0^v | x)$ \n Sample $\\mathbf{s} \\sim \\mathbf{U}[0, T]$ and $\\mathbf{y}_s, \\mathbf{y}_T \\sim q_{\\phi}(\\mathbf{y}_s | x)$ using algorithm 2 \n Estimate the stochastic gradient of the MDM ELBO, $\\nabla_\\theta \\mathcal{L}(\\theta, \\phi)$ , using eq. (7) \n $\\theta \\leftarrow \\theta + \\alpha \\nabla_\\theta \\mathcal{L}(\\theta, \\phi)$ . \n if learning inference then \n $\\phi \\leftarrow \\phi + \\alpha \\nabla_\\phi \\mathcal{L}(\\theta, \\phi)$ \nend if \nend while \nOutput $s_\\theta$", + "bbox": [ + 173, + 119, + 823, + 305 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A fast and simple algorithm. We show in algorithm 2 (appendix H) that computing the transition kernel only requires knowing $f, g$ and requires no diffusion-specific analysis. For $K - 1$ auxiliary variables, $\\mathbf{A}, g$ are $K \\times K$ . Like for scalar diffusions, these parameters are shared across data coordinates. This means matrix exponentials and inverses are done on $K \\times K$ matrices, where $K$ is only 2 or 3 in our experiments. In table 1, we compare the time to sample a batch of size 256 from the transition kernel for CIFAR 10 and MNIST. The table shows the extra computa", + "bbox": [ + 169, + 330, + 553, + 469 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1: Runtime Comparison: we compare the run time of sampling from the CLD diffusion analytically versus using the automated algorithm.", + "bbox": [ + 563, + 332, + 823, + 390 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a061757b1d390105d15c80009563af533b0d52b3df5dd5021a6a2604c3b5e9f7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCIFAR-10MNIST
Analytical0.0270.0062
Automated0.0290.007
", + "bbox": [ + 570, + 400, + 807, + 443 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tional cost of the automated algorithm is negligible. This automation likewise applies to simplified score matching and noise prediction objectives, since all rely on $q_{\\phi}(\\mathbf{y}_s|x)$ (appendix A).", + "bbox": [ + 169, + 469, + 823, + 500 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 INGREDIENT 2: MDM PARAMETERIZATION", + "text_level": 1, + "bbox": [ + 171, + 513, + 509, + 527 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The MDM ELBO (eq. (7)) is tighter when the inference $\\mathbf{y}_T$ tends toward the model's prior $\\pi_{\\theta}$ . Here we construct inference processes with the model prior $\\pi_{\\theta}$ as a specified stationary distribution $q_{\\infty}$ .", + "bbox": [ + 169, + 541, + 823, + 570 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Ma et al. (2015) provide a complete recipe for constructing gradient-based MCMC samplers; the recipe constructs non-linear time-homogeneous Itô processes with a given stationary distribution, and show that the parameterization spans all such Itô processes with that stationary distribution.", + "bbox": [ + 169, + 575, + 823, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Diffusion models usually have time-varying drift and diffusion coefficients (e.g. use of the $\\beta(t)$ function). To build diffusion models that match the model prior, we first extend Theorem 1 from Ma et al. (2015) to construct non-linear Itô processes with time-varying drift and diffusion coefficients with a given stationary distribution (Appendix C). Then, to keep transitions tractable (per Section 3.3), we specialize this result to linear Itô diffusions.", + "bbox": [ + 169, + 625, + 823, + 696 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We directly state the result for linear time-varying diffusions with stationary distributions. The parameterization requires a skew-symmetric matrix $-\\mathbf{Q}(s) = \\mathbf{Q}(s)^{\\top}$ , a positive semi-definite matrix $\\mathbf{D}(s)$ , and a function $\\nabla H(\\mathbf{y})$ such that the desired stationary distribution $q_{\\infty}$ is proportional to $\\exp[-H(\\mathbf{y})]$ . Linear Itô diffusions have Gaussian stationary distributions (Särkkä & Solin, 2019) meaning that $\\nabla H$ is linear and can be expressed as $\\mathbf{S}\\mathbf{y}$ for some matrix $\\mathbf{S}$ . For a matrix $\\mathbf{A}$ , let $\\sqrt{\\mathbf{A}}$ refer to the matrix square root defined by $\\mathbf{a} = \\sqrt{\\mathbf{A}} \\Longleftrightarrow \\mathbf{A} = \\mathbf{aa}^{\\top}$ . Then, the Itô diffusion:", + "bbox": [ + 169, + 702, + 823, + 790 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = \\underbrace {- \\left[ \\mathbf {Q} (s) + \\mathbf {D} (s) \\right] \\mathbf {S y}} _ {f (\\mathbf {y}, s)} d s + \\underbrace {\\sqrt {2 \\mathbf {D} (s)}} _ {g (s)} d \\widehat {\\mathbf {B}} _ {s}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 797, + 823, + 844 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "has Gaussian stationary distribution $\\mathcal{N}(\\mathbf{0},\\mathbf{S}^{-1})$ where $\\mathbf{Q}(s),\\mathbf{D}(s)$ and $\\mathbf{S}$ are parameters. For a discussion of convergence to the stationary distribution, as well as skew-symmetric and positive semi-definite parameterizations, see appendix C, where we also show that existing diffusion processes such as VPSDE and CLD are included in $\\mathbf{Q} / \\mathbf{D}$ parameterization. We display the ELBO in terms of $\\mathbf{Q} / \\mathbf{D}$ in appendix G and an algorithm in appendix H.", + "bbox": [ + 169, + 853, + 825, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For score matching and noise prediction losses and a given $q_{\\phi}$ , achieving a minimizing value with respect to $s_{\\theta}$ does not imply that the generative model score will match the inference score. Modeling the data also requires the marginal distribution of $q_{\\phi, T}$ to approximate $\\pi$ . When $q_{\\phi}$ is constant, it is important to confirm the stationary distribution is appropriately set, and the tools used here for the ELBO can be used to satisfy this requirement for score matching and noise prediction (appendix A).", + "bbox": [ + 169, + 103, + 826, + 176 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.5 LEARNING THE INFERENCE PROCESS", + "text_level": 1, + "bbox": [ + 171, + 190, + 472, + 204 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The choice of diffusion matters, and the ELBOs in eq. (7) have no requirement for fixed $q_{\\phi}$ . We therefore learn the inference process jointly with $s_{\\theta}$ . Under linear transitions (ingredient 1), no algorithmic details change as the diffusion changes during training. Under stationary parameterization (ingredient 2), we can learn without the stationary distribution going awry. In the experiments, learning matches or surpasses BPDs of fixed diffusions for a given dataset and score architecture.", + "bbox": [ + 169, + 215, + 823, + 287 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In $\\mathcal{L}^{\\mathrm{mdsm}}$ or $\\mathcal{L}^{\\mathrm{mism}}$ , $q_{\\phi, \\infty}$ may be set to equal $\\pi_{\\theta}$ , but it is $\\mathbf{y}_T \\sim q_{\\phi, T}$ for the chosen $T$ that is featured in the ELBO. Learning $q_{\\phi}$ can choose $\\mathbf{y}_T$ to reduce the cross-entropy:", + "bbox": [ + 169, + 292, + 825, + 321 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n- \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {T} | x)} [ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) ]. \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 325, + 823, + 344 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Minimizing eq. (14) will tighten the ELBO for any $s_\\theta$ . Next, $q_\\phi$ is featured in the remaining terms that feature $s_\\theta$ ; optimizing for $q_\\phi$ will tighten and improve the ELBO alongside $s_\\theta$ . Finally, $q_\\phi$ is featured in the expectations and the $-\\log q_\\phi$ term:", + "bbox": [ + 169, + 345, + 823, + 390 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\log p _ {\\theta} \\left(\\mathbf {u} _ {T} ^ {z} = x\\right) \\geq = \\underbrace {\\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} = v \\mid x\\right)}} \\left[ \\left(\\mathcal {L} ^ {\\mathrm {d s m}} \\text {o r} \\mathcal {L} ^ {\\mathrm {i s m}}\\right) \\underbrace {- \\log q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} = v \\mid x\\right)} \\right] \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 393, + 825, + 426 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The $q_{\\phi}(\\mathbf{y}_0^v |x)$ terms impose an optimality condition that $p_{\\theta}(\\mathbf{u}_T^v |\\mathbf{u}_T^z) = q_{\\phi}(\\mathbf{y}_0^v |\\mathbf{y}_0^z)$ (appendix E). When it is satisfied, no looseness in the ELBO is due to the initial time zero auxiliary variables.", + "bbox": [ + 169, + 436, + 823, + 467 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To learn, $\\mathbf{Q},\\mathbf{D}$ need to be specified with parameters $\\phi$ that enable gradients. We keep $\\mathbf{S}$ fixed at inverse covariance of $\\pi_{\\theta}$ . The transition kernel $q_{\\phi}(\\mathbf{y}_s|x)$ depends on $\\mathbf{Q},\\mathbf{D}$ through its mean and covariance. Gaussian distributions permit gradient estimation with reparameterization or score-function gradients (Kingma & Welling, 2013; Ranganath et al., 2014; Rezende & Mohamed, 2015; Titsias & Lázaro-Gredilla, 2014). Reparameterization is accomplished via:", + "bbox": [ + 169, + 472, + 823, + 544 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} _ {s} = \\mathbf {m} _ {s | 0} + \\mathbf {L} _ {s | 0} \\epsilon \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 429, + 549, + 823, + 566 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\epsilon \\sim \\mathcal{N}(0, I_{dK})$ and $\\mathbf{L}_{s|0}$ satisfies $\\mathbf{L}_{s|0} \\mathbf{L}_{s|0}^{\\top} = \\boldsymbol{\\Sigma}_{s|0}$ , derived using coordinate-wise Cholesky decomposition. Gradients flow through eq. (16) from $\\mathbf{y}_s$ to $\\mathbf{m}_{s|0}$ and $\\boldsymbol{\\Sigma}_{s|0}$ to $\\mathbf{Q}$ , $\\mathbf{D}$ to parameters $\\phi$ .", + "bbox": [ + 169, + 571, + 823, + 603 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Algorithm 1 displays Automatic Multivariate Diffusion Training (AMDT). AMDT provides a training method for diffusion-based generative models for either fixed $\\mathbf{Q}$ , $\\mathbf{D}$ matrices or for learning the $\\mathbf{Q}_{\\phi}, \\mathbf{D}_{\\phi}$ matrices, without requiring any diffusion-specific analysis.", + "bbox": [ + 169, + 608, + 823, + 654 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Learning in other diffusion objectives. Like in the ELBO, learning in score matching or noise prediction objectives can improve the match between the inference process and implied generative model (appendix A).", + "bbox": [ + 169, + 666, + 823, + 709 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 INSIGHTS INTO MULTIVARIATE DIFFUSIONS", + "text_level": 1, + "bbox": [ + 171, + 728, + 571, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Scalar versus Multivariate Processes. Equation (13) clarifies what can change while preserving $q_{\\infty}$ . Recall that $\\mathbf{Q}$ and $\\mathbf{D}$ are $K \\times K$ for $K - 1$ auxiliary variables. Because $0$ is the only $1 \\times 1$ skew-symmetric matrix, scalar processes must set $\\mathbf{Q} = 0$ . With $q_{\\phi,\\infty} = \\mathcal{N}(0,\\mathbf{I})$ , the process is:", + "bbox": [ + 169, + 758, + 823, + 801 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = - \\mathbf {D} (s) \\mathbf {y} d s + \\sqrt {2 \\mathbf {D} (s)} d \\widehat {\\mathbf {B}} _ {s}. \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 806, + 823, + 825 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "What is left is the VPSDE process used widely in diffusion models where $\\mathbf{D}(s) = \\frac{1}{2}\\beta (s)$ is $1\\times 1$ (Song et al., 2020b). This reveals that the VPSDE process is the only scalar diffusion with a stationary distribution. This also clarifies the role of $\\mathbf{Q}$ : it accounts for mixing between dimensions in multivariate processes, as do non-diagonal entries in $\\mathbf{D}$ for $K > 1$ .", + "bbox": [ + 169, + 830, + 825, + 888 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "3There are processes such as sub-VPSDE (Song et al., 2020b) which are covered in the sense that they tend to members of this parameterization as $T$ grows: sub-VP converges to VPSDE.", + "bbox": [ + 169, + 897, + 825, + 925 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "CLD optimizes a log-likelihood lower bound. Differentiating $\\mathcal{L}^{\\mathrm{mdsm}}$ (eq. (7)) with respect to the score model parameters, we show that the objective for CLD (Dockhorn et al., 2021) maximizes a lower bound on $\\log p_{\\theta}(x)$ , not just $\\log p_{\\theta}(\\mathbf{u}_0)$ , without appealing to the probability flow ODE.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Does my model use auxiliary variables? An example initial distribution is $q(\\mathbf{y}_0^v |x) = \\mathcal{N}(0,\\mathbf{I})$ . It is also common to set $\\pi_{\\theta} = \\mathcal{N}(0,\\mathbf{I})$ . Because the optimum for diffusions is $p_{\\theta} = q$ , the optimal model has main and auxiliary dimensions independent at endpoints 0 and $T$ . Does this mean that the model does not use auxiliary variables? In appendix B, we show that in this case the model can still use auxiliary variables at intermediate times. A sufficient condition is non-diagonal $\\mathbf{Q} + \\mathbf{D}$ .", + "bbox": [ + 169, + 161, + 823, + 233 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 252, + 328, + 268 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We test the MDM framework with handcrafted and learned diffusions. The handcrafted diffusions are (a) ALDA, used in (Mou et al., 2019) for accelerated gradient-based MCMC sampling (eq. (32)) and (b) MALDA: a modified version of ALDA (eq. (33)). Both have two auxiliary variables. We also learn diffusions with 1 and 2 auxiliary variables. We compare with VPSDE and ELBO-trained CLD.", + "bbox": [ + 169, + 284, + 823, + 342 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/fdf8d67ef7ab386142ecce30396d5f2a1d6315eef6683da71db7614f71f6e3f6.jpg", + "table_caption": [ + "Table 2: BPD upper-bounds on image generation for a fixed architecture. CIFAR-10: learning outperforms CLD, and both outperform the standard choice of VPSDE. MNIST: learning matches VPSDE while the fixed auxiliary diffusions are worse. IMAGENET32: all perform similarly. Learning matches or surpasses the best fixed diffusion, while bypassing the need to choose a diffusion." + ], + "table_footnote": [], + "table_body": "
ModelKCIFAR-10IMAGENET32MNIST
VPSDE13.203.701.26
Learned23.073.711.28
Learned33.083.721.33
CLD23.113.701.35
MALDA33.133.721.65
ALDA329.4333.08124.60
", + "bbox": [ + 313, + 420, + 683, + 522 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/01192ce71982b1e7db7996fd6356dc767ae1a7fde3f013b46de19085bbbf4b46.jpg", + "table_caption": [ + "Table 3: Parameter Efficiency. The first two rows display diffusions from previous work: VPSDE and CLD, both using score models with 108 million parameters on CIFAR-10. We train the rest using a score model with 35.7 million parameters. The learned diffusion matches the performance of VPSDE-large; changes in the inference can account for as much improvement as a 3x increase in score parameters. BPDs are upper-bounds." + ], + "table_footnote": [], + "table_body": "
ModelKParametersCIFAR-10
VPSDE-large (Song et al., 2021)1108M3.08
CLD-large (Dockhorn et al., 2021)2108M3.31
Learned235.7M3.07
CLD235.7M3.11
VPSDE135.7M3.20
", + "bbox": [ + 271, + 625, + 725, + 712 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Following prior work, we train DBGMs for image generation. We use the U-Net from Ho et al. (2020). We input the auxiliary variables as extra channels, which only increases the score model parameters in the input and output convolutions (CLD and Learned 2 have 7,000 more parameters than VPSDE on CIFAR-10 and IMAGENET32 and only 865 more for MNIST). We use simple uniform dequantization. We report estimates of $\\mathcal{L}^{\\mathrm{mdsm}}$ (which reduces to the standard $\\mathcal{L}^{\\mathrm{dsm}}$ for $K = 1$ ). We sample times using the importance sampling distribution from Song et al. (2021) with truncation set to $\\epsilon = 10^{-3}$ . To ensure the truncated bound is proper, we use a likelihood described in appendix I.", + "bbox": [ + 169, + 727, + 823, + 825 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results. Table 2 shows that the inference process matters and displays. It displays DBGMs that we train and evaluate on CIFAR-10, IMAGENET32 and MNIST. This includes the existing VPSDE and CLD, the new MALDA and ALDA, and the new learned inference processes. All are trained with the 35.7M parameter architecture. For CIFAR-10, learning outperforms CLD, and both outperform the standard choice of VPSDE. For MNIST, learned diffusions match VPSDE while the three fixed auxiliary diffusions are worse. On IMAGENET32, all perform similarly. The take-away is that learning", + "bbox": [ + 169, + 839, + 825, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "matches or surpasses the best fixed diffusion performance and bypasses the choice of diffusion for each new dataset or score architecture. In Figure 1 we plot the generated samples from CIFAR10.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 3's first two rows display diffusion models from previous work: VPSDE (Song et al., 2021) and CLD (Dockhorn et al., 2021) both with the 108 million score model from Song et al. (2021) (labeled \"large\"). The rest are DBGMs that we train using the U-Net with 35.7 million parameters for CIFAR-10 and IMAGENET32 and 1.1 million for MNIST. Despite using significantly fewer parameters, the learned diffusion achieves similar BPD compared to the larger models, showing that changes in inference can account for as much improvement as a three-fold increase in parameters. While the larger architecture requires two GPUs for batch size 128 on CIFAR-10 on A100s, the smaller one only requires one; exploring inference processes can make diffusions more computationally accessible. Table 3 also demonstrates a tighter bound for CLD trained and evaluated with the MDM ELBO ( $\\leq$ 3.11) relative to existing probability flow-based evaluations (3.31).", + "bbox": [ + 169, + 138, + 826, + 280 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/73a7012161f0d7f4fed755ef1cf3aafd7643b185e471793fa0b40a09f59c1436.jpg", + "image_caption": [ + "Figure 1: CIFAR10 samples generated from the \"learned 2\" and MALDA generative models." + ], + "image_footnote": [], + "bbox": [ + 264, + 297, + 472, + 458 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/429ec45c9b5517fcde31c68a944981b3368910a0b2c7d782d6604750d35a3300.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 527, + 297, + 736, + 458 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 507, + 346, + 523 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evidence Lower Bounds. Song et al. (2021); Huang et al. (2021) derive the ISM and DSM lower bounds on the model log likelihood. Our work extends their analysis to the multivariate diffusion setting to derive lower bounds on the log marginal of the data in the presence of auxiliary variables.", + "bbox": [ + 169, + 537, + 823, + 582 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Auxiliary variables. Dupont et al. (2019) shows that augmented neural ODEs model a richer set of functions and Huang et al. (2020) uses this principle for normalizing flows. Hierarchical variational models and auto-encoders marginalize auxiliary variables to build expressive distributions (Ranganath et al., 2016; Sønderby et al., 2016; Maaløe et al., 2019; Vahdat & Kautz, 2020; Child, 2020). We apply this principle to DBGMs, including and extending CLD (Dockhorn et al., 2021).", + "bbox": [ + 169, + 594, + 823, + 667 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Learning inference. Learning $q_{\\phi}$ with $p_{\\theta}$ is motivated in previous work (Kingma & Welling, 2013; Sohl-Dickstein et al., 2015; Kingma et al., 2021). Kingma et al. (2021) learn the noise schedule for VPSDE. For MDMs, there are parameters to learn beyond the noise schedule; $\\mathbf{Q}$ can be non-zero, $\\mathbf{D}$ can diagonal or full, give $\\mathbf{Q}$ and $\\mathbf{D}$ different time-varying functions, and learn $\\nabla \\mathbf{H}$ .", + "bbox": [ + 169, + 678, + 823, + 737 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 753, + 310, + 768 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We present an algorithm for training multivariate diffusions with linear time-varying inference processes with a specified stationary distribution and any number of auxiliary variables. This includes automating transition kernel computation and providing a parameterization of diffusions that have a specified stationary distribution, which facilitate working with new diffusion processes, including learning the diffusion. The experiments show that learning matches or surpasses the best fixed diffusion performance, bypassing the need to choose a diffusion. MDMs achieve BPDs similar to univariate diffusions, with as many as three times more score parameters. The proposed MDM ELBO reports a tighter bound for the existing CLD relative to existing probability flow-based evaluations. This work enables future directions including interactions across data coordinates and using new stationary distributions.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "8 ACKNOWLEDGEMENTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 398, + 118 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "This work was generously funded by NIH/NHLBI Award R01HL148248, NSF Award 1922658 NRT-HDR: FUTURE Foundations, Translation, and Responsibility for Data Science, and NSF CAREER Award 2145542. The authors would additionally like to thank Chin-Wei Huang for helpful discussing regarding Huang et al. (2021).", + "bbox": [ + 171, + 132, + 826, + 191 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 210, + 287, + 224 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Andrew D Barbour. Stein's method and poisson process convergence. Journal of Applied Probability, 25(A):175-184, 1988.", + "Andrew Campbell, Joe Benton, Valentin De Bortoli, Tom Rainforth, George Deligiannidis, and Arnaud Doucet. A continuous time framework for discrete denoising models. arXiv preprint arXiv:2205.14987, 2022.", + "Nanxin Chen, Yu Zhang, Heiga Zen, Ron J Weiss, Mohammad Norouzi, and William Chan. Wavegrad: Estimating gradients for waveform generation. arXiv preprint arXiv:2009.00713, 2020.", + "Rewon Child. Very deep vaes generalize autoregressive models and can outperform them on images. arXiv preprint arXiv:2011.10650, 2020.", + "Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34, 2021.", + "Tim Dockhorn, Arash Vahdat, and Karsten Kreis. Score-based generative modeling with critically-damped Langevin diffusion. arXiv preprint arXiv:2112.07068, 2021.", + "Emilien Dupont, Arnaud Doucet, and Yee Whye Teh. Augmented neural odes. Advances in Neural Information Processing Systems, 32, 2019.", + "Bradley Efron. Tweedie's formula and selection bias. Journal of the American Statistical Association, 106(496):1602-1614, 2011.", + "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239, 2020.", + "Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. *J. Mach. Learn. Res.*, 23: 47-1, 2022.", + "Chin-Wei Huang, Laurent Dinh, and Aaron Courville. Augmented normalizing flows: Bridging the gap between generative flows and latent variable models. arXiv preprint arXiv:2002.07101, 2020.", + "Chin-Wei Huang, Jae Hyun Lim, and Aaron C Courville. A variational perspective on diffusion-based generative models and score matching. Advances in Neural Information Processing Systems, 34, 2021.", + "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.", + "Diederik P Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. arXiv preprint arXiv:2107.00630, 2021.", + "Zhifeng Kong, Wei Ping, Jiaji Huang, Kexin Zhao, and Bryan Catanzaro. Diffwave: A versatile diffusion model for audio synthesis. arXiv preprint arXiv:2009.09761, 2020.", + "Yi-An Ma, Tianqi Chen, and Emily Fox. A complete recipe for stochastic gradient mcmc. Advances in neural information processing systems, 28, 2015.", + "Lars Maaløe, Marco Fraccaro, Valentin Lievin, and Ole Winther. Biva: A very deep hierarchy of latent variables for generative modeling. Advances in neural information processing systems, 32, 2019." + ], + "bbox": [ + 171, + 233, + 826, + 922 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Gautam Mittal, Jesse Engel, Curtis Hawthorne, and Ian Simon. Symbolic music generation with diffusion models. arXiv preprint arXiv:2103.16091, 2021.", + "Wenlong Mou, Yi-An Ma, Martin J Wainwright, Peter L Bartlett, and Michael I Jordan. High-order Langevin diffusion yields an accelerated mcmc algorithm. arXiv preprint arXiv:1908.10859, 2019.", + "Alex Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. arXiv preprint arXiv:2102.09672, 2021.", + "Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021.", + "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022.", + "Rajesh Ranganath, Sean Gerrish, and David Blei. Black box variational inference. In Artificial intelligence and statistics, pp. 814-822. PMLR, 2014.", + "Rajesh Ranganath, Dustin Tran, and David Blei. Hierarchical variational models. In International conference on machine learning, pp. 324-333. PMLR, 2016.", + "Danilo Rezende and Shakir Mohamed. Variational inference with normalizing flows. In International Conference on Machine Learning, pp. 1530-1538. PMLR, 2015.", + "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022.", + "Simo Särkkä and Arno Solin. Applied stochastic differential equations, volume 10. Cambridge University Press, 2019.", + "Hiroshi Sasaki, Chris G Willcocks, and Toby P Breckon. Unit-ddpm: Unpaired image translation with denoising diffusion probabilistic models. arXiv preprint arXiv:2104.05358, 2021.", + "Jianghong Shi, Tianqi Chen, Ruoshi Yuan, Bo Yuan, and Ping Ao. Relation of a new interpretation of stochastic differential equations to ito process. Journal of Statistical physics, 148:579-590, 2012.", + "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015.", + "Casper Kaae Sønderby, Tapani Raiko, Lars Maaløe, Søren Kaae Sønderby, and Ole Winther. Ladder variational autoencoders. Advances in neural information processing systems, 29, 2016.", + "Yang Song, Sahaj Garg, Jiaxin Shi, and Stefano Ermon. Sliced score matching: A scalable approach to density and score estimation. In Uncertainty in Artificial Intelligence, pp. 574-584. PMLR, 2020a.", + "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020b.", + "Yang Song, Conor Durkan, Iain Murray, and Stefano Ermon. Maximum likelihood training of score-based diffusion models. Advances in Neural Information Processing Systems, 34:1415-1428, 2021.", + "Michalis Titsias and Miguel Lázaro-Gredilla. Doubly stochastic variational bayes for non-conjugate inference. In International conference on machine learning, pp. 1971-1979. PMLR, 2014.", + "Arash Vahdat and Jan Kautz. Nvae: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pascal Vincent. A connection between score matching and denoising autoencoders. *Neural computation*, 23(7):1661-1674, 2011.", + "L Yin and P Ao. Existence and construction of dynamical potential in nonequilibrium processes without detailed balance. Journal of Physics A: Mathematical and General, 39(27):8593, 2006.", + "Zhenzhong Zhang and Dayue Chen. A new criterion on existence and uniqueness of stationary distribution for diffusion processes. Advances in Difference Equations, 2013(1):1-6, 2013." + ], + "bbox": [ + 171, + 102, + 823, + 209 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A AUTOMATED SCORE MATCHING WITH LEARNED INFERENCE", + "text_level": 1, + "bbox": [ + 171, + 102, + 718, + 118 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Like for the MDM ELBO, the methods in this work apply to training with the score matching loss:", + "bbox": [ + 169, + 133, + 808, + 148 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {S M}} (x, \\theta , \\phi) = T \\mathbb {E} _ {t \\sim U [ 0, T ]} \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\lambda (t) \\| s _ {\\theta} (\\mathbf {y} _ {t}, t) - \\nabla_ {\\mathbf {y} _ {t}} \\log q _ {\\phi} (\\mathbf {y} _ {t} | x) \\| _ {2} ^ {2} \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 152, + 756, + 178 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $\\lambda :[0,T]\\to \\mathbb{R}_+$ is a weighing function. The score-matching loss is often optimized in its simplified noise prediction form:", + "bbox": [ + 169, + 181, + 823, + 210 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {N P}} (x, \\theta , \\phi) = T \\mathbb {E} _ {t \\sim U [ 0, T ]} \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\| \\epsilon_ {\\theta} (\\mathbf {y} _ {t}, t) - \\epsilon \\| _ {2} ^ {2} \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 214, + 676, + 241 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $s_{\\theta} = -\\mathbf{L}_{t}^{-\\top}\\epsilon_{\\theta}$ and $\\mathbf{y}_t = \\mu_t + \\mathbf{L}_t\\epsilon$ and $\\epsilon$ is the noise used in sampling $\\mathbf{y}_t$ . We describe here how the improvements to the ELBO studied in this work carry over to $\\mathcal{L}_{\\mathrm{SM}}$ and $\\mathcal{L}_{\\mathrm{NP}}$ . In the following let $q_{0}$ be the data distribution, let $p_{(\\theta ,\\phi),0}$ be the model's distribution of the data, and recall that the model is defined by $(s_{\\theta},f_{\\phi},g_{\\phi})$ and prior $\\pi$ via a continuous-time stochastic process with drift coefficient $g_{\\phi}^{2}s_{\\theta} - f_{\\phi}$ and diffusion coefficient $g_{\\phi}$ .", + "bbox": [ + 169, + 244, + 823, + 321 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "First, minimizing $\\mathcal{L}_{\\mathrm{SM}}$ or $\\mathcal{L}_{\\mathrm{NP}}$ so that $\\nabla_{\\mathbf{y}_t}\\log q_\\phi (\\mathbf{y}_t) = s_\\theta (\\mathbf{y}_t,t)$ does not alone imply that $p_{(\\theta ,\\phi),0}$ will equal $q_{0}$ ; it must also be that $q_{\\phi ,T}\\approx \\pi$ . Foregoing this requirement means $\\pi$ will produce samples that the generative model may not be able to push onto the path the model was trained on (formally, the score of the generative model would not equal the time-reversal of the forward score even if $s_\\theta$ equals the forward score). This condition can be satisfied if $q_{\\phi}$ can be chosen with stationary distribution $\\pi$ . Section 3.4 describes how to accomplish this.", + "bbox": [ + 169, + 327, + 823, + 412 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Next, for any fixed $q_{\\phi}$ , automatic transitions from section 3.3 streamline the computation of the score matching loss, allowing for simple score computation for a wide class of diffusions beyond VP.", + "bbox": [ + 169, + 417, + 823, + 448 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Finally, for a fixed $q_{\\phi}$ with $q_{\\phi,T} \\approx \\pi$ and a score architecture $s_{\\theta}$ , minimizing $\\mathcal{L}_{\\mathrm{SM}}$ or $\\mathcal{L}_{\\mathrm{NP}}$ w.r.t $\\theta$ may be suboptimal. Optimization, like for the elbo, carries over to score matching and can close this gap; learning w.r.t. both $\\theta, \\phi$ increases the ability to successfully minimize the loss at each $t$ (section 3.5). In other words, since the generative model is defined by $(s_{\\theta}, f_{\\phi}, g_{\\phi})$ , learning $q_{\\phi}$ means the loss trains all three components of the generative model rather than just one. In summary, score matching is automatic and can learn over the space of linear diffusions that tend to the model prior.", + "bbox": [ + 169, + 453, + 825, + 539 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B DOES MY MODEL USE AUXILIARY VARIABLES?", + "text_level": 1, + "bbox": [ + 171, + 556, + 599, + 573 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In section 3 we gave the example choice of $q(\\mathbf{y}_0^v |x) = \\mathcal{N}(0,\\mathbf{I})$ coordinate-wise. It is also a common choice to set $\\pi_{\\theta} = \\mathcal{N}(0,\\mathbf{I})$ . Because the optimum in diffusion models is $p_{\\theta} = q$ for all $t$ , we see a peculiar phenomenon under this choice: the model has main and auxiliary dimensions independent at both endpoints 0 and $T$ . Does this mean that the model does not use auxiliary variables? We show that even when $q_{\\phi}(\\mathbf{y}_0)$ and $\\pi_{\\theta}$ have main and auxiliary variables independent, the model can use the auxiliary variables. A sufficient condition is $\\mathbf{Q} + \\mathbf{D}$ is non-diagonal.", + "bbox": [ + 169, + 587, + 823, + 672 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To make this precise, we recall that we model with $p_{\\theta}(\\mathbf{u}_T^z = x)$ . To show the model is using auxiliary variables, we just need to show that $\\mathbf{u}_T^z$ (main coordinate at $T$ ) depends on $\\mathbf{u}_t^v$ (aux. coordinate at $t$ ) for $T > t$ . At optimum, $p_{\\theta}(\\mathbf{u}_T^z,\\mathbf{u}_t^v) = q_{\\phi}(\\mathbf{y}_0^z,\\mathbf{y}_{T - t}^v)$ . Therefore it is sufficient to show that for some time $s$ , $q_{\\phi}(\\mathbf{y}_s^v |\\mathbf{y}_0^z)\\neq q_{\\phi}(\\mathbf{y}_s^v)$ . Because $\\mathbf{y}_0^z$ , is determined by $x$ we need to show that $q_{\\phi}(\\mathbf{y}_s^v |x)\\neq q_{\\phi}(\\mathbf{y}_s^v)$ . To do that, we first derive $q(\\mathbf{y}_s|x)$ and then marginalize to get $q(\\mathbf{y}_s^v |x)$ from it. Since the former is 2D Gaussian, the latter is available in terms of the former's mean and covariance. Suppose $\\mathbb{E}[\\mathbf{y}_0^v ] = 0$ , $\\mathbf{Q} = [[0, - 1],[1,0]]$ and $\\mathbf{D} = [[1,0],[0,1]]$ and we have $s = .1$ We have:", + "bbox": [ + 169, + 678, + 823, + 779 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\mathbf {y} _ {s} | x ] = \\exp \\left[ - s (\\mathbf {Q} + \\mathbf {D}) \\right] \\binom {x} {0} = \\exp \\left[ \\left[ \\begin{array}{l l} -. 1 & . 1 \\\\ -. 1 & -. 1 \\end{array} \\right] \\right] \\binom {x} {0} = \\binom {0. 9 0 0 3 x} {- 0. 0 9 0 x} \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 782, + 823, + 816 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Regardless of the covariance any 1D of this 2D gaussian will have mean that is a function of $x$ , meaning that $q(\\mathbf{y}_s^v |x)$ does not equal $q(\\mathbf{y}_s^v)$ (which is also a Gaussian but with mean depending on $\\mathbf{x}'s$ mean rather than $x$ itself. Therefore, even under the setup with independent endpoints, the optimal model makes use of the intermediate auxiliary variables in its final modeling distribution $p_{\\theta}(\\mathbf{u}_T^z = x)$ .", + "bbox": [ + 169, + 818, + 823, + 890 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Are there choices of $\\mathbf{Q}$ and $\\mathbf{D}$ that lead to learning models that don't make use of the extra dimensions? As mentioned, in the inference process, $\\mathbf{Q}$ is responsible for mixing information among the", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "coordinates, and is the only source of this when $\\mathbf{D}$ is diagonal. Then, if $\\mathbf{Q} = \\mathbf{0}$ and $\\mathbf{D}$ is diagonal, none of the coordinates for a given feature $\\mathbf{x}_j$ (including $\\mathbf{u}_{tj}^z$ , $\\mathbf{u}_{tj}^{v_1},\\ldots ,\\mathbf{u}_{tj}^{v_{K - 1}}$ ) interact for any $t$ . Then, since $p_\\theta = q$ at optimum, independence of the coordinates at all $t$ in $q$ imply the same in $p_\\theta$ and the model will not make use of any auxiliary variables when modeling the marginal $\\log p_{\\theta}(\\mathbf{u}_T^z = x)$ .", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C STATIONARY PARAMETERIZATION", + "text_level": 1, + "bbox": [ + 171, + 181, + 493, + 196 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The non-linear time-homogeneous Ito process family is:", + "bbox": [ + 171, + 212, + 545, + 227 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = f (\\mathbf {y}) d t + g (\\mathbf {y}) \\mathbf {B} _ {t}. \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 236, + 823, + 252 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This family can be restricted to those with stationary distributions. Ma et al. (2015) show a complete recipe to span the subset of this family with a desired stationary distribution. Let $\\mathbf{Q}$ be skew-symmetric $(-\\mathbf{Q} = \\mathbf{Q}^{\\top})$ and $\\mathbf{D}$ is positive semi-definite. Suppose the desired stationary distribution is $q_{\\infty}(\\mathbf{y})$ . For a matrix $\\mathbf{A}$ , let $\\sqrt{\\mathbf{A}}$ refer to the matrix square root defined by $\\mathbf{a} = \\sqrt{\\mathbf{A}} \\iff \\mathbf{A} = \\mathbf{aa}^{\\top}$ . Then, Ma et al. (2015) show that, setting $\\mathbf{H}(\\mathbf{y}) = -\\log q_{\\infty}(\\mathbf{y})$ , $g(\\mathbf{y}) = \\sqrt{2\\mathbf{D}(\\mathbf{y})}$ , and", + "bbox": [ + 169, + 258, + 823, + 337 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nf (\\mathbf {y}) = - \\left[ \\mathbf {D} (\\mathbf {y}) + \\mathbf {Q} (\\mathbf {y}) \\right] \\nabla \\mathbf {H} (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}), \\quad \\boldsymbol {\\Gamma} _ {i} (\\mathbf {y}) = \\sum_ {j = 1} ^ {d} \\frac {\\partial}{\\partial \\mathbf {z} _ {j}} \\left(\\mathbf {D} _ {i j} (\\mathbf {y}) + \\mathbf {Q} _ {i j} (\\mathbf {y})\\right), \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 345, + 825, + 388 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "yields a process $\\mathbf{y}_t$ with stationary distribution $q_{\\infty}$ . We extend it to time-varying (time inhomogeneous) processes.", + "bbox": [ + 169, + 396, + 823, + 425 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Theorem 2. $q_{\\infty}(\\mathbf{y})\\propto \\exp [-H(\\mathbf{y})]$ is a stationary distribution of", + "bbox": [ + 171, + 441, + 612, + 458 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = \\left(- [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] \\nabla \\mathbf {H} (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}, t)\\right) d t + \\sqrt {2 \\mathbf {D} (\\mathbf {y} , t)} \\mathbf {B} _ {t}, \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 465, + 823, + 507 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "for", + "bbox": [ + 169, + 513, + 197, + 529 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Gamma} _ {i} (\\mathbf {y}, t) = \\sum_ {j = 1} ^ {d} \\frac {\\partial}{\\partial \\mathbf {y} _ {j}} \\left(\\mathbf {D} _ {i j} (\\mathbf {y}, t) + \\mathbf {Q} _ {i j} (\\mathbf {y}, t)\\right). \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 537, + 825, + 579 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Proof. The Fokker Planck equation is:", + "bbox": [ + 171, + 597, + 428, + 612 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\partial_ {t} q (\\mathbf {y}, t) = - \\sum_ {i} \\frac {\\partial}{\\partial \\mathbf {y} _ {i}} [ f _ {i} (\\mathbf {y}, t) q (\\mathbf {y}, t) ] + \\sum_ {i, j} \\frac {\\partial^ {2}}{\\partial \\mathbf {y} _ {i} \\partial \\mathbf {y} _ {j}} [ \\mathbf {D} _ {i j} (\\mathbf {y}, t) q (\\mathbf {y}, t) ] \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 253, + 619, + 825, + 657 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A stationary distribution is one where the Fokker-Planck right hand side is equal to 0. To show that the stationary characterization also holds of time-inhomogeneous processes with $\\mathbf{D}(\\mathbf{y},t)$ and $\\mathbf{Q}(\\mathbf{y},t)$ , we take two steps, closely following Yin & Ao (2006); Shi et al. (2012); Ma et al. (2015), but noting that there is no requirement for $\\mathbf{Q}$ , $\\mathbf{D}$ to be free of $t$ . First, we show that the Fokker-Plack equation can be re-written as:", + "bbox": [ + 169, + 666, + 823, + 736 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\partial_ {t} q (\\mathbf {y}, t) = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right]\\right) \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 744, + 825, + 785 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Second, because the whole expression is set to 0 when the inside expression equals 0", + "bbox": [ + 171, + 792, + 730, + 806 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nq (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) = 0, \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 815, + 823, + 832 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "we just need to show that this holds when $q(\\mathbf{y},t) = \\exp [-H(\\mathbf{y})] / \\mathbf{Z}$ . The second step is concluded because", + "bbox": [ + 169, + 839, + 823, + 866 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right] = \\frac {1}{\\mathbf {Z}} \\left[ \\exp [ - H (\\mathbf {y}) ] \\nabla H (\\mathbf {y}) + \\nabla \\exp [ - H (\\mathbf {y}) ] \\right] = 0,\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 872, + 767, + 902 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\mathbf{Z}$ is the normalization constant of $\\exp (-H(y))$ .", + "bbox": [ + 171, + 909, + 535, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 948, + 509, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "It only remains to show that Fokker-Plack can be re-written in divergence form with time-dependent $\\mathbf{Q},\\mathbf{D}$ . In the following let $Q_{ijt}$ denote $\\mathbf{Q}_{ij}(\\mathbf{y},t)$ and likewise for $D_{ijt}$ . Let $\\partial_i$ denote $\\frac{\\partial}{\\partial\\mathbf{y}_i}$ and let it denote $\\frac{d}{d\\mathbf{y}_i}$ for scalar functions. We will use $[Ax]_i = \\sum_jA_{ij}x_j$ .", + "bbox": [ + 169, + 103, + 826, + 155 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {t} q _ {t} = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] [ q \\nabla H + \\nabla q ]\\right) \\\\ = \\sum_ {i} \\partial_ {i} \\left(\\left[ [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] [ q \\nabla H + \\nabla q ] \\right] _ {i}\\right) \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\nabla H + \\nabla q \\right] _ {j} \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H + \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} D _ {i j t} \\left[ \\partial_ {j} q \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} Q _ {i j t} \\left[ \\partial_ {j} q \\right] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 222, + 159, + 774, + 361 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We re-write the 2nd and 3rd term. Holding $i$ fixed and noting $q$ is scalar, we get the product rule $\\sum_{j}D_{ijt}(\\partial_{j}q) = \\sum_{j}\\partial_{j}[D_{ijt}q] - q\\sum_{j}\\partial_{j}D_{ijt}$ for each $i$ , and likewise for $q$ :", + "bbox": [ + 169, + 363, + 823, + 395 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} D _ {i j t} \\left[ \\partial_ {j} q \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} Q _ {i j t} \\left[ \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} D _ {i j t} \\\\ + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\partial_ {j} \\left[ Q _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} Q _ {i j t} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 398, + 751, + 501 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Because $\\mathbf{Q}(\\mathbf{y},t)$ is skew-symmetric, we have that $\\sum_{i}\\partial_{i}\\sum_{j}\\partial_{j}[Q_{ijt}q] = 0$ , leaving", + "bbox": [ + 171, + 503, + 723, + 522 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial_ {t} q _ {t} = \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] \\right] + \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} D _ {i j t} - q \\sum_ {j} \\partial_ {j} Q _ {i j t} \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] q \\right] + \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right) \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\left[ \\left(\\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] - \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right)\\right) q \\right] + \\sum_ {i} \\sum_ {j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left(D _ {i j t} q\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 525, + 818, + 656 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Recalling that $f_{i}(\\mathbf{y},t) = \\left(-[D + Q]\\nabla H + \\Gamma\\right)_{i}$ and again that $[Ax]_i = \\sum_j A_{ij}x_j$ , we have equality with the original Fokker-Planck", + "bbox": [ + 169, + 680, + 823, + 715 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} = \\sum_ {i} \\partial_ {i} \\left[ \\left(\\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] - \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right)\\right) q \\right] + \\sum_ {i j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left(D _ {i j t} q\\right) \\\\ = - \\sum_ {i} \\frac {\\partial}{\\partial \\mathbf {y} _ {i}} \\left[ f _ {i} (\\mathbf {y}, t) q (\\mathbf {y}, t) \\right] + \\sum_ {i j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left[ \\mathbf {D} _ {i j} (\\mathbf {y}, t) q (\\mathbf {y}, t) \\right] \\\\ = \\partial_ {t} q (\\mathbf {y}, t) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 719, + 772, + 821 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/5a70606959cc1f55516e353eacfd3cd59104183ac5d0662fefdc5b1c756ac428.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 823, + 823, + 835 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We have shown $\\exp[-H(\\mathbf{y})] / \\mathbf{Z}$ is a stationary distribution of the time-varying non-linear Ito process:", + "bbox": [ + 169, + 852, + 823, + 878 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = \\left(- [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] \\nabla H (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}, t)\\right) d t + \\sqrt {2 \\mathbf {D} (\\mathbf {y} , t)} \\mathbf {B} _ {t}. \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 881, + 823, + 921 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "However, for some choices of $\\mathbf{Q}, \\mathbf{D}$ , $\\exp[-H(\\mathbf{y})] / \\mathbf{Z}$ is not necessarily the unique stationary distribution. One problematic case can occur as follows. Suppose that row $i$ of $(\\mathbf{Q} + \\mathbf{D})$ is all-zero; in this case, $d\\mathbf{y}_i = 0$ which implies that $(\\mathbf{y}_i)_t = (\\mathbf{y}_i)_0$ for all $t > 0$ . Then, the initial distribution is also a stationary distribution. To rule out such pathological diffusions, we make the assumption that $\\mathbf{Q} + \\mathbf{D}$ is full rank. Then, for uniqueness, recall that stationary distributions are the zeros of", + "bbox": [ + 169, + 103, + 823, + 174 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\partial_ {t} q (\\mathbf {y}, t) = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right]\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 179, + 725, + 219 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where the expression is of the form $\\mathbf{A}\\mathbf{v}$ for $\\mathbf{A} = \\mathbf{D}(\\mathbf{y},t) + \\mathbf{Q}(\\mathbf{y},t)$ and", + "bbox": [ + 171, + 224, + 645, + 241 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {v} = \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 244, + 612, + 272 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Under the assumption that $\\mathbf{Q} + \\mathbf{D}$ is full rank, the expression can only be zero when $\\mathbf{v}$ is zero. To show uniqueness under the full rank assumption, one must then show that", + "bbox": [ + 169, + 276, + 823, + 306 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla q (\\mathbf {y}, t) = - q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}).\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 311, + 596, + 328 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "holds only if $q(\\mathbf{y}, t) = \\exp[-H(\\mathbf{y})] / \\mathbf{Z}$ . Even if $\\exp[-H(\\mathbf{y})] / \\mathbf{Z}$ is the unique stationary distribution, convergence to that distribution is a question. See Zhang & Chen (2013) for more details.", + "bbox": [ + 169, + 332, + 823, + 362 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Learning $\\mathbf{Q}_{\\phi}$ , $\\mathbf{D}_{\\phi}$ in the MDM ELBO helps push $\\mathbf{y}_T$ to the model prior $\\pi_{\\theta}$ and avoid issues like those discussed.", + "bbox": [ + 169, + 368, + 823, + 396 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 LINEAR PROCESSES", + "text_level": 1, + "bbox": [ + 171, + 412, + 356, + 426 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Next, we specialize this general family to linear Itô processes to maintain tractable transition distributions. A linear process is one where the drift $f(\\mathbf{y},t)$ and diffusion $g(\\mathbf{y},t)$ are linear functions of $\\mathbf{y}$ . We express the drift function of a non-linear time-varying Itô process with stationary distribution proportional to $\\exp[-H(\\mathbf{y})]$ as", + "bbox": [ + 169, + 439, + 823, + 494 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n- (\\mathbf {Q} (\\mathbf {y}, t) + \\mathbf {D} (\\mathbf {y}, t)) \\nabla H (\\mathbf {y}) + \\Gamma (\\mathbf {y}, t).\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 500, + 633, + 517 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Next, linear Ito processes have Gaussian stationary distributions (Särkkä & Solin, 2019) so $H(\\mathbf{y})$ must be quadratic and $\\nabla H(\\mathbf{y})$ is linear, and neither are constant in $\\mathbf{y}$ . Because $\\nabla H(\\mathbf{y})$ is linear, it can be expressed as $\\mathbf{S}\\mathbf{y}$ for some matrix $\\mathbf{S}$ where $\\mathbf{S}$ is the inverse of the covariance matrix. Because $\\nabla H$ is multiplied by $\\mathbf{Q}, \\mathbf{D}$ , this means that $\\mathbf{Q}, \\mathbf{D}$ must be free of $\\mathbf{y}$ . Recalling that $\\Gamma$ is expressed as a sum of derivatives w.r.t $\\mathbf{y}$ of $\\mathbf{Q} + \\mathbf{D}$ , this means that $\\Gamma$ must satisfy $\\Gamma = 0$ . Next, because of the stationary requirement that $g(t) = \\sqrt{2\\mathbf{D}(\\mathbf{y},t)}$ , we can also conclude by the restriction on $\\mathbf{D}$ that the diffusion coefficient function must be independent of the state $\\mathbf{y}$ . Our final form for linear time-varying processes with stationary distributions $\\mathcal{N}(0,\\mathbf{S}^{-1})$ is:", + "bbox": [ + 169, + 522, + 825, + 638 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = \\underbrace {- \\left[ \\mathbf {Q} (t) + \\mathbf {D} (t) \\right] \\mathbf {S} \\mathbf {y}} _ {f (\\mathbf {y}, t)} d t + \\underbrace {\\sqrt {2 \\mathbf {D} (t)}} _ {g (t)} d \\mathbf {B} _ {t} \\tag {27}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 643, + 823, + 689 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.2 PARAMETERIZING $\\mathbf{Q}_{\\phi}$", + "text_level": 1, + "bbox": [ + 171, + 705, + 372, + 722 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Suppose $b_{q}(s)$ is a positive scalar function defined on the time domain with known integral. Suppose $\\tilde{\\mathbf{Q}}_{\\phi}$ is any matrix. Then $\\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top}$ is skew-symmetric with $\\tilde{\\mathbf{Q}}_{\\phi, ij} = -\\tilde{\\mathbf{Q}}_{\\phi, ji}$ . We can set $\\mathbf{Q}_{\\phi}$ to", + "bbox": [ + 169, + 732, + 823, + 770 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} _ {\\phi} (s) = b _ {q} (s) \\cdot \\left[ \\tilde {\\mathbf {Q}} _ {\\phi} - \\tilde {\\mathbf {Q}} _ {\\phi} ^ {\\top} \\right] \\tag {28}\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 773, + 823, + 800 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "This is a general parameterization of time-independent skew-symmetric matrices, which have number of degrees of freedom equal to the number of entries in one of the triangles of the matrix, excluding the diagonal.", + "bbox": [ + 169, + 805, + 823, + 848 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.3 PARAMETERIZING $\\mathbf{D}_{\\phi}$", + "text_level": 1, + "bbox": [ + 171, + 864, + 372, + 880 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Suppose $b_{d}(s)$ is a positive scalar function defined on the time domain with known integral. Suppose $\\tilde{\\mathbf{D}}_{\\phi}$ is any matrix. Then $\\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top}$ is positive semi-definite and spans all time-independent positive", + "bbox": [ + 169, + 890, + 823, + 926 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "semi-definite matrices. We can set $\\mathbf{D}_{\\phi}$ to", + "bbox": [ + 169, + 103, + 444, + 118 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {D} _ {\\phi} (s) = b _ {d} (s) \\cdot \\left[ \\tilde {\\mathbf {D}} _ {\\phi} \\tilde {\\mathbf {D}} _ {\\phi} ^ {\\top} \\right] \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 122, + 823, + 150 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To show $\\tilde{\\mathbf{D}}\\tilde{\\mathbf{D}}^{\\top}$ spans all positive semi-definite matrices: suppose $\\mathbf{M}$ is positive semi-definite. Then it is square. Then it can be eigen-decomposed into $\\mathbf{M} = \\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top}$ . The degrees of freedom in $\\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top}$ are just $\\mathbf{R} = \\mathbf{V}\\sqrt{\\pmb{\\Sigma}}$ since $\\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top} = \\mathbf{R}\\mathbf{R}^{\\top}$ and the square root is taken element-wise because $\\pmb{\\Sigma}$ is diagonal and is real because each $\\pmb{\\Sigma}_{ij}\\geq 0$ , which is true because $\\mathbf{M}$ is positive semi-definite. Take $\\mathbf{D} = \\mathbf{R}$ .", + "bbox": [ + 169, + 152, + 823, + 229 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In our experiments we parameterize $\\mathbf{D}$ as a diagonal-only matrix.", + "bbox": [ + 169, + 237, + 602, + 252 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.4 INTEGRALS", + "text_level": 1, + "bbox": [ + 171, + 268, + 300, + 281 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The known integral requirement comes from the integrals required in the transition kernel, and can be relaxed two possible ways:", + "bbox": [ + 169, + 292, + 823, + 321 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- numerical integration of function with unknown integral. This is expected to have low error given that the function is scalar-in scalar-out.", + "- Directly parameterize the integral and use auto-grad when needing the functions not-integrated." + ], + "bbox": [ + 215, + 332, + 823, + 393 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We stick with the known integrals. In conclusion, the underlying parameters are positive scalar functions $b_{q}(s), b_{d}(s)$ defined on the time domain and with known integral, and general matrices $\\tilde{\\mathbf{Q}}_{\\phi}, \\tilde{\\mathbf{D}}_{\\phi}$ .", + "bbox": [ + 169, + 402, + 823, + 449 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.5 INSTANCES", + "text_level": 1, + "bbox": [ + 171, + 464, + 297, + 478 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "VPSDE. VPSDE has $K = 1$ . Consequently, $\\mathbf{Q}, \\mathbf{D}$ are $K \\times K$ . The only $1 \\times 1$ skew-symmetric matrix is 0, so $\\mathbf{Q} = 0$ . Setting $\\mathbf{D}(t) = \\frac{1}{2}\\beta(t)$ recovers VPSDE:", + "bbox": [ + 169, + 489, + 823, + 520 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = - \\frac {\\beta (t)}{2} \\mathbf {y} d t + \\sqrt {\\beta (t)} d \\mathbf {B} _ {t} \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 523, + 823, + 554 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "$\\nabla H(\\mathbf{y}) = \\mathbf{y}$ so $\\mathbf{H}(\\mathbf{y}) = \\frac{1}{2}\\| \\mathbf{y}\\| _2^2$ . The stationary distribution is $\\mathcal{N}(0,\\mathbf{I})$", + "bbox": [ + 169, + 556, + 640, + 574 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "CLD. The CLD process (eq 5 in Dockhorn et al. (2021)) is defined as", + "bbox": [ + 169, + 585, + 635, + 601 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left( \\begin{array}{c} d \\mathbf {z} _ {t} \\\\ d \\mathbf {v} _ {r} \\end{array} \\right) = d \\mathbf {y} _ {t} = \\left( \\begin{array}{c c} 0 & \\frac {\\beta}{M} \\\\ - \\beta & - \\frac {\\Gamma \\beta}{M} \\end{array} \\right) \\mathbf {y} _ {t} + \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\sqrt {2 \\Gamma \\beta} \\end{array} \\right) d \\mathbf {B} _ {t}.\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 603, + 687, + 640 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In $\\mathbf{Q} / \\mathbf{D}$ parameterization, we have", + "bbox": [ + 169, + 642, + 406, + 657 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nH (\\mathbf {y}) = \\frac {1}{2} \\| \\mathbf {z} \\| _ {2} ^ {2} + \\frac {1}{2 M} \\| \\mathbf {v} \\| _ {2} ^ {2}, \\qquad \\nabla_ {\\mathbf {u}} H (\\mathbf {y}) = \\left( \\begin{array}{c} \\mathbf {z} \\\\ \\frac {1}{M} \\mathbf {v} \\end{array} \\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 661, + 681, + 694 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} = \\left( \\begin{array}{c c} 0 & - \\beta \\\\ \\beta & 0 \\end{array} \\right), \\qquad \\mathbf {D} = \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\Gamma \\beta \\end{array} \\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 696, + 593, + 729 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The stationary distribution of this process is:", + "bbox": [ + 171, + 739, + 467, + 753 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nq _ {\\phi , \\infty} \\propto \\exp (- H (\\mathbf {y})) = \\mathcal {N} (\\mathbf {z}; 0, I _ {d}) \\mathcal {N} (\\mathbf {v}; 0, M I _ {d}) \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 757, + 823, + 773 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "ALDA. Mou et al. (2019) define a third-order diffusion process for the purpose of gradient-based MCMC sampling. The ALDA diffusion process can be specified as", + "bbox": [ + 169, + 782, + 823, + 811 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} = \\left( \\begin{array}{c c c} 0 & - \\frac {1}{L} I & 0 \\\\ \\frac {1}{L} I & 0 & - \\gamma I \\\\ 0 & \\gamma I & 0 \\end{array} \\right), \\quad \\mathbf {D} = \\left( \\begin{array}{c c c} 0 & 0 & 0 \\\\ 0 & 0 & 0 \\\\ 0 & 0 & \\frac {\\xi}{L} I \\end{array} \\right). \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 815, + 823, + 864 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Note that $\\mathbf{Q}$ is skew-symmetric and $\\mathbf{D}$ is positive semi-definite, therefore we have that $q_{t}(\\mathbf{u})\\to q_{\\phi ,\\infty}$ . In this case,", + "bbox": [ + 169, + 866, + 823, + 896 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nq _ {\\phi , \\infty} = \\mathcal {N} (\\mathbf {z}; 0, \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {1}; 0, \\frac {1}{L} \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {2}; 0, \\frac {1}{L} \\mathbf {I} _ {d})\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 898, + 661, + 929 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "MALDA. Similar to ALDA, we specify a diffusion process we term MALDA which we specify as", + "bbox": [ + 169, + 103, + 813, + 119 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} = \\left( \\begin{array}{c c c} 0 & - \\frac {1}{L} I & - \\frac {1}{L} \\\\ \\frac {1}{L} I & 0 & - \\gamma I \\\\ \\frac {1}{L} & \\gamma I & 0 \\end{array} \\right), \\quad \\mathbf {D} = \\left( \\begin{array}{c c c} 0 & 0 & 0 \\\\ 0 & \\frac {1}{L} I & 0 \\\\ 0 & 0 & \\frac {1}{L} I \\end{array} \\right). \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 312, + 126, + 823, + 175 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Note that $\\mathbf{Q}$ is skew-symmetric and $\\mathbf{D}$ is positive semi-definite. In this case this is", + "bbox": [ + 169, + 181, + 712, + 196 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nq _ {\\phi , \\infty} = \\mathcal {N} (\\mathbf {z}; 0, \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {1}; 0, \\frac {1}{L} I _ {d}) \\mathcal {N} (\\mathbf {v} _ {2}; 0, \\frac {1}{L} I _ {d})\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 204, + 665, + 233 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D TRANSITIONS FOR LINEAR PROCESSES", + "text_level": 1, + "bbox": [ + 171, + 250, + 532, + 265 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For time variable $s$ and Brownian motion $\\widehat{\\mathbf{B}}_s$ driving diffusions of the form", + "bbox": [ + 169, + 280, + 666, + 297 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {y} = f (\\mathbf {y}, s) d s + g (s) d \\widehat {\\mathbf {B}} _ {s}, \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 305, + 823, + 325 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "when $f_{\\phi}(\\mathbf{y}_s, s)$ , $g_{\\phi}(s)$ are linear, the transition kernel $q_{\\phi}(\\mathbf{y}_s | \\mathbf{y}_0)$ is always normal (Särkkä & Solin, 2019). Therefore, we just find the mean $\\mathbf{m}_{s|0}$ and covariance $\\boldsymbol{\\Sigma}_{s|0}$ of $q(\\mathbf{y}_s | \\mathbf{y}_0)$ . Let $f(\\mathbf{y}, s) = \\mathbf{A}(s)\\mathbf{y}$ . The un-conditional time $s$ mean and covariance are solutions to", + "bbox": [ + 169, + 330, + 823, + 376 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nd \\mathbf {m} _ {s} / d s = \\mathbf {A} (s) \\mathbf {m} _ {s}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 383, + 496, + 398 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nd \\boldsymbol {\\Sigma} _ {s} / d s = \\mathbf {A} (s) \\boldsymbol {\\Sigma} _ {s} + \\boldsymbol {\\Sigma} _ {s} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s) \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 395, + 823, + 421 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "By (6.6) in Särkkä & Solin (2019), for computing conditionals $q(\\mathbf{y}_s|\\mathbf{y}_0)$ , we can take the marginal distribution ODEs and compute conditionals by simply setting the time 0 mean and covariance initial conditions to the conditioning value and to 0 respectively. We take (6.36-6.39) and set $\\mathbf{m}_0 = \\mathbf{u}_0$ and $\\boldsymbol{\\Sigma}_{0} = 0$ to condition. Let $[\\mathbf{A}]_s = \\int_0^s\\mathbf{A}(\\nu)d\\nu$ . The mean is", + "bbox": [ + 169, + 428, + 823, + 487 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\mathbf {y} _ {0} = \\exp \\left(\\left[ A \\right] _ {s}\\right) \\underbrace {\\exp (s \\mathbf {A}) \\mathbf {y} _ {0}} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}}, \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 494, + 823, + 539 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\exp$ denotes matrix exponential. (6.36-6.39) state the covariance $q(\\mathbf{y}_s|\\mathbf{y}_0)$ as a matrix factorization, for which a derivation is provided below $\\boldsymbol{\\Sigma}_{s} = \\mathbf{C}_{s}(\\mathbf{H}_{s})^{-1}$ for $\\mathbf{C}_s,\\mathbf{H}_s$ being the solutions of:", + "bbox": [ + 169, + 547, + 823, + 589 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\left( \\begin{array}{c} \\frac {d}{d s} \\mathbf {C} _ {s} \\\\ \\frac {d}{d s} \\mathbf {H} _ {s} \\end{array} \\right) = \\left( \\begin{array}{c c} \\mathbf {A} (s) & g ^ {2} (s) \\\\ \\mathbf {0} & - \\mathbf {A} ^ {\\top} (s) \\end{array} \\right) \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 595, + 823, + 631 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To condition and get $\\Sigma_{s|0}$ from $\\Sigma_s$ , we set $\\Sigma_0 = 0$ , and initialize $\\mathbf{C}_s, \\mathbf{H}_s$ by $\\mathbf{C}_0 = \\mathbf{0}$ and $\\mathbf{H}_0 = \\mathbf{I}$ .", + "bbox": [ + 169, + 636, + 818, + 652 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {[ \\mathbf {A} ] _ {s}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- [ \\mathbf {A} ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\underbrace {\\exp \\left[ s \\binom {\\mathbf {A}} {\\mathbf {0}} \\quad \\begin{array}{c} {g ^ {2}} \\\\ {- \\mathbf {A} ^ {\\top}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}}} _ {\\text {n o i n t e g r a t i o n i f A (v a r p h i) = A , g (v a r p h i) = g}}. \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 660, + 823, + 724 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Finally, $\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}$", + "bbox": [ + 171, + 729, + 359, + 750 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D.1 DERIVATION OF THE COVARIANCE MATRIX SOLUTION", + "text_level": 1, + "bbox": [ + 171, + 765, + 593, + 779 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Equation (35) gives an expression for $d\\pmb{\\Sigma}_s / ds$ . To derive the matrix factorization technique used in eq. (37), we use eq. (35) and the desired condition $\\pmb{\\Sigma}_s = \\mathbf{C}_s\\mathbf{H}_s^{-1}$ to derive expressions for $d\\mathbf{C}_s / ds$ and $d\\mathbf{H}_s / ds$ and suitable initial conditions so that the factorization also starts at the desired $\\pmb{\\Sigma}_0$ . Let $\\pmb{\\Sigma}_s = \\mathbf{C}_s\\mathbf{H}_s^{-1}$ , then note that $\\mathbf{C}_s, \\mathbf{H}_s$ satisfies", + "bbox": [ + 169, + 790, + 825, + 849 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {d}{d s} \\boldsymbol {\\Sigma} _ {s} = \\frac {d}{d s} \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\\\ = \\mathbf {C} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 856, + 625, + 922 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "And using the fact that", + "bbox": [ + 171, + 104, + 326, + 118 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d}{d s} \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} = 0\n$$\n", + "text_format": "latex", + "bbox": [ + 437, + 128, + 545, + 157 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {H} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) = 0\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 160, + 544, + 189 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} = - \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 459, + 191, + 669, + 220 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "we get that", + "bbox": [ + 171, + 229, + 250, + 244 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {C} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 253, + 750, + 287 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = \\mathbf {A} (s) \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s) \\\\ = \\mathbf {A} (s) \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} + g ^ {2} (s) \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 290, + 841, + 343 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left(- \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} + \\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = \\left(\\mathbf {A} (s) \\mathbf {C} _ {s} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} \\\\ - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} + \\frac {d}{d s} \\mathbf {C} _ {s} = \\mathbf {A} (s) \\mathbf {C} _ {s} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s} \\\\ \\left[ \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\quad \\mathbf {I} _ {d} \\right] ^ {\\top} \\frac {d}{d s} \\left( \\begin{array}{c} \\mathbf {H} _ {s} \\\\ \\mathbf {C} _ {s} \\end{array} \\right) = \\left[ \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\quad \\mathbf {I} _ {d} \\right] ^ {\\top} \\left( \\begin{array}{c} - \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} \\\\ \\mathbf {A} (s) \\mathbf {C} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s} \\end{array} \\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 345, + 795, + 445 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Now, we note $\\mathbf{C}_s$ , $\\mathbf{H}_s$ satisfy the following", + "bbox": [ + 171, + 454, + 457, + 469 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d}{d s} \\mathbf {H} _ {s} = - \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s}\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 479, + 545, + 508 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d}{d s} \\mathbf {C} _ {s} = \\mathbf {A} (s) \\mathbf {C} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 511, + 596, + 540 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "which implies that", + "bbox": [ + 171, + 549, + 297, + 564 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d}{d s} \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) = \\left( \\begin{array}{c c} \\mathbf {A} (s) & g ^ {2} (s) \\\\ \\mathbf {0} & - \\mathbf {A} ^ {\\top} (s) \\end{array} \\right) \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) \\tag {39}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 574, + 823, + 608 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "with $\\mathbf{C}_0 = \\pmb{\\Sigma}_0$ and $\\mathbf{H}_0 = \\mathbf{I}_d$ , as $\\mathbf{C}_0\\mathbf{H}_0^{-1} = \\pmb{\\Sigma}_0$ .", + "bbox": [ + 171, + 618, + 488, + 636 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D.2 HYBRID SCORE MATCHING", + "text_level": 1, + "bbox": [ + 171, + 655, + 410, + 669 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Instead of computing $q(\\mathbf{y}_s|\\mathbf{y}_0)$ , we can apply the hybrid score matching principle (Dockhorn et al., 2021) to reduce variance by compute objectives using $q(\\mathbf{y}_s|x)$ instead of $q(\\mathbf{y}_s|\\mathbf{y}_0)$ , which amounts to integrating out $\\mathbf{v}_0$ . To accomplish this, following Särkkä & Solin (2019), we simply replace $\\mathbf{y}_0$ with $[x,\\mathbb{E}[\\mathbf{v}_0]]$ in the expression for $\\mathbf{m}_{s|0}$ , i.e. replace the conditioning value of $\\mathbf{v}_0$ with the mean of its chosen initial distribution:", + "bbox": [ + 169, + 681, + 823, + 752 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\mathbf {y} _ {s} | x ] = \\exp \\left[ \\int_ {0} ^ {s} A (\\nu) d \\nu \\right] \\binom {x} {\\mathbb {E} [ \\mathbf {v} _ {0} ]} \\tag {40}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 760, + 823, + 801 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "For the covariance, instead of using $\\mathbf{C}_0 = \\boldsymbol{\\Sigma}_0 = \\mathbf{0}$ , we use a block matrix to condition on $x$ but not $\\mathbf{v}_0$ . We decompose $\\boldsymbol{\\Sigma}_0$ into its blocks $\\boldsymbol{\\Sigma}_{0,xx}$ , $\\boldsymbol{\\Sigma}_{0,vv}$ , $\\boldsymbol{\\Sigma}_{0,xv}$ . As before, to condition on $x$ we set $\\boldsymbol{\\Sigma}_{0,xx} = \\mathbf{0}$ . Because $q(\\mathbf{v}_0)$ is set to be independent of $x$ , $\\boldsymbol{\\Sigma}_{0,xv}$ is also set to $\\mathbf{0}$ . Finally, instead of $\\mathbf{0}$ , to marginalize out $\\mathbf{v}_0$ , $\\boldsymbol{\\Sigma}_{0,vv}$ is set to the covariance of the chosen initial time zero distribution for $\\mathbf{v}_0$ . E.g. if $\\mathbf{v}_{0,j} \\sim N(0,\\gamma)$ for each dimension, then $\\boldsymbol{\\Sigma}_{0,vv} = N(0,\\gamma I)$ .", + "bbox": [ + 169, + 818, + 823, + 890 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We operationalize this in a simple piece of code, which makes the ELBO tractable and easy, i.e. skips both analytic derivations and numerical forward integration during training.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D.3 TRANSITIONS IN STATIONARY PARAMETERIZATION", + "text_level": 1, + "bbox": [ + 171, + 104, + 576, + 118 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In terms of $\\mathbf{Q}$ , $\\mathbf{D}$ , the transitions $q(\\mathbf{y}_s|\\mathbf{y}_0)$ for time $s$ are normal with mean $\\mathbf{m}_{s|0}$ and $\\pmb{\\Sigma}_{s|0}$ equal to:", + "bbox": [ + 169, + 135, + 823, + 152 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {s \\mid 0} = \\exp \\left(- \\left[ \\mathbf {Q} + \\mathbf {D} \\right] _ {s}\\right) \\mathbf {y} _ {0}, \\quad \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {- [ \\mathbf {Q} + \\mathbf {D} ] _ {s}} {\\mathbf {0}} \\begin{array}{c c} {[ 2 \\mathbf {D} ] _ {s}} \\\\ {[ (\\mathbf {Q} + \\mathbf {D}) ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {41}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 172, + 825, + 229 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}$ . For the time invariant case, this simplifies to", + "bbox": [ + 169, + 252, + 643, + 268 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {s \\mid 0} = \\exp [ - s (\\mathbf {Q} + \\mathbf {D}) ] \\mathbf {y} _ {0}, \\quad \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ s \\binom {- (\\mathbf {Q} + \\mathbf {D})} {\\mathbf {0}} \\quad \\binom {2 \\mathbf {D}} {(\\mathbf {Q} + \\mathbf {D}) ^ {\\top}} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {42}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 290, + 825, + 330 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "E GENERIC CHANGE OF MEASURE AND JENSEN'S FOR APPROXIMATE MARGINALIZATION", + "text_level": 1, + "bbox": [ + 171, + 364, + 766, + 397 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Suppose $\\mathbf{u} = [\\mathbf{z},\\mathbf{v}]$ and we have an expression for $p(\\mathbf{u} = [z,v]) = p(\\mathbf{z} = z,\\mathbf{v} = v)$ . By marginalization, we can get $p(\\mathbf{z} = z)$ , and we can introduce another distribution $q$ to pick a sampling distribution of our choice:", + "bbox": [ + 169, + 422, + 823, + 465 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} p (\\mathbf {z} = z) = \\int_ {v} p (\\mathbf {z} = z, \\mathbf {v} = v) d v \\\\ = \\int_ {v} p (\\mathbf {z} = z | \\mathbf {v} = v) p (\\mathbf {v} = v) d v \\\\ = \\int_ {v} \\frac {q (\\mathbf {v} = v | \\mathbf {z} = z)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} p (\\mathbf {z} = z | \\mathbf {v} = v) p (\\mathbf {v} = v) d v \\tag {43} \\\\ = \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 484, + 823, + 619 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We often work with these expressions in log space, and need to pull the expectation outside to use Monte Carlo. Jensen's bound allows this:", + "bbox": [ + 169, + 640, + 823, + 667 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\log p (\\mathbf {z} = z) = \\log \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\geq \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 689, + 666, + 755 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The following shows that the bound is tight when $q(\\mathbf{v} = v|\\mathbf{z} = z) = p(\\mathbf{v} = v|\\mathbf{z} = z)$ :", + "bbox": [ + 171, + 776, + 736, + 792 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] = _ {\\text {a s s u m e}} \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{p (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ = \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\left(\\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{p (\\mathbf {v} = v , \\mathbf {z} = z)} \\cdot p (\\mathbf {z} = z)\\right) \\right] \\tag {44} \\\\ = \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log p (\\mathbf {z} = z) \\right] \\\\ = \\log p (\\mathbf {z} = z) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 815, + 823, + 926 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F ELBO FOR MDMS", + "text_level": 1, + "bbox": [ + 171, + 102, + 346, + 116 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\log p _ {\\theta} (x) = \\log \\int_ {v _ {0}} p _ {\\theta} \\left(x _ {0}, v _ {0}\\right) d v _ {0} (45) \\\\ = \\log \\int_ {v _ {0}} p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) (46) \\\\ = \\log \\int_ {v _ {0}} \\frac {q \\left(v _ {0} \\mid x\\right)}{q \\left(v _ {0} \\mid x\\right)} p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) (47) \\\\ = \\log \\mathbb {E} _ {q \\left(v _ {0} \\mid x\\right)} \\left[ \\frac {p _ {\\theta} \\left(u _ {0} = [ x , v _ {0} ]\\right)}{q \\left(v _ {0} \\mid x\\right)} \\right] (48) \\\\ \\geq \\mathbb {E} _ {q \\left(v _ {0} \\mid x\\right)} \\left[ \\log p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) - \\log q \\left(v _ {0} \\mid x\\right) \\right] (49) \\\\ \\geq \\mathbb {E} _ {q (y | x)} \\left[ \\log \\pi_ {\\theta} (y _ {T}) + \\int_ {0} ^ {T} - \\| s _ {\\theta} \\| _ {g ^ {2}} ^ {2} - \\nabla \\cdot \\left(g ^ {2} s _ {\\theta} - f\\right) d s - \\log q \\left(y _ {0} ^ {v} | x\\right) \\right] (50) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 132, + 825, + 364 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The first inequality holds due to Jensen's inequality and the second due to an application of Theorem 1 from Huang et al. (2021) or Theorem 3 from Song et al. (2021) applied to the joint variable $\\mathbf{u}_0$ .", + "bbox": [ + 171, + 371, + 823, + 401 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F.1 ISM TO DSM", + "text_level": 1, + "bbox": [ + 171, + 417, + 312, + 431 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F.1.1 LEMMA: EXPECTATION BY PARTS", + "text_level": 1, + "bbox": [ + 171, + 444, + 460, + 458 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We will need a form of multivariate integration by parts which gives us for some $f$ and some $q(x)$ , $E_{q(x)}[\\nabla_x \\cdot f(x)] = -E_{q(x)}[f(x)^\\top \\nabla_x \\log q(x)]$", + "bbox": [ + 171, + 468, + 823, + 500 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} E _ {q (x)} [ \\nabla_ {x} \\cdot f _ {i} (x) ] = \\int q (x) \\sum_ {i = 1} ^ {d} [ \\nabla_ {x _ {i}} f _ {i} (x) ] d x \\\\ = \\int \\sum_ {i = 1} ^ {d} q (x) \\nabla_ {x _ {i}} f _ {i} (x) d x \\\\ = \\sum_ {i = 1} ^ {d} \\int_ {x _ {- i}} \\int_ {x _ {i}} q (x) \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ \\left[ q (x) \\int \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} \\right] _ {- \\infty} ^ {\\infty} - \\int \\nabla_ {x _ {i}} q (x) \\int \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ - \\int \\nabla_ {x _ {i}} q (x) f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ - \\int q (x) \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} - \\int \\int q (x) \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) d x _ {i} d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} - E _ {q (x)} \\left[ \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) \\right] \\\\ = - E _ {q (x)} [ f (x) ^ {\\top} \\nabla_ {x} \\log q (x) ] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 517, + 807, + 888 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "This equality also follows directly from the Stein operator using the generator method to the Langevin diffusion (Barbour, 1988).", + "bbox": [ + 169, + 895, + 823, + 924 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F.1.2 DSM ELBO", + "text_level": 1, + "bbox": [ + 171, + 103, + 310, + 118 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Using the \"expectation by parts\", we have:", + "bbox": [ + 171, + 127, + 455, + 143 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} _ {q (u _ {t} | x)} [ \\nabla_ {u _ {t}} \\cdot g ^ {2} (t) s _ {\\theta} (u _ {t}, t) ] = - \\mathbb {E} _ {q (u _ {t} | x)} [ (g ^ {2} (t) s _ {\\theta} (u _ {t}, t)) ^ {\\top} \\nabla_ {u _ {t}} \\log q (u _ {t} | x) ]\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 148, + 754, + 170 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Also we have, for $s_{\\theta}$ evaluated at $(u_t, t)$ , by completing the square,", + "bbox": [ + 171, + 174, + 614, + 191 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n- \\frac {1}{2} | | s _ {\\theta} | | _ {g ^ {2} (t)} + s _ {\\theta} ^ {\\top} g ^ {2} (t) \\nabla \\log q (u _ {t} | x) = - \\frac {1}{2} | | s _ {\\theta} - \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} +. 5 | | \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 196, + 823, + 226 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The two together give us:", + "bbox": [ + 171, + 229, + 341, + 244 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\log p (x) \\geq \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\Big [ - \\nabla \\cdot g ^ {2} s _ {\\theta} - . 5 | | s _ {\\theta} | | _ {g ^ {2} (t)} ^ {2} + \\nabla \\cdot f \\Big ] d t \\right] \\\\ = \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\left[ \\left(g ^ {2} s _ {\\theta}\\right) ^ {\\top} \\nabla_ {u _ {t}} \\log q (u _ {t} | x) - . 5 | | s _ {\\theta} | | _ {g ^ {2} (t)} ^ {2} + \\nabla \\cdot f \\right] d t \\right] \\\\ = \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\left[ - \\frac {1}{2} | | s _ {\\theta} - \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} \\right. \\right. \\\\ \\left. + . 5 \\left| | \\nabla \\log q \\left(u _ {t} | x\\right) \\right| _ {g ^ {2} (t)} ^ {2} + \\nabla_ {u _ {t}} \\cdot f \\right] \\Biggr ] d t \\tag {51} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 251, + 823, + 435 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "F.2 NOISE PREDICTION", + "text_level": 1, + "bbox": [ + 171, + 450, + 349, + 465 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We have that for normal $\\mathcal{N}(\\mathbf{y}_s;\\mathbf{m}_{s|0},\\pmb{\\Sigma}_{s|0})$ , we can sample $\\mathbf{y}_s$ with normal noise $\\epsilon \\sim \\mathcal{N}(0,I)$ and $\\mathbf{y}_s = \\mathbf{m}_{s|0} + \\mathbf{L}\\epsilon$ where $\\mathbf{L}$ is the cholesky decomposition of $\\pmb{\\Sigma}_{s|0}$ . Then, the score is", + "bbox": [ + 169, + 476, + 823, + 508 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\nabla_ {\\mathbf {y} _ {s}} \\log q (\\mathbf {y} _ {s} | \\mathbf {y} _ {0}) \\Bigg | _ {\\mathbf {y} _ {s} = \\mathbf {m} _ {s | 0} + \\mathbf {L} \\epsilon} \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\mathbf {y} _ {s} - \\mathbf {m} _ {s | 0}\\right) \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\left[ \\mathbf {m} _ {s | 0} + \\mathbf {L} \\epsilon \\right] - \\mathbf {m} _ {s | 0}\\right) \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\mathbf {L} \\epsilon\\right) \\\\ = - \\left(\\mathbf {L} \\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\left(\\mathbf {L} \\epsilon\\right) \\\\ = - \\left(\\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\mathbf {L} ^ {- 1} \\mathbf {L} \\epsilon \\\\ = - \\left(\\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\epsilon = - \\left(\\mathbf {L} ^ {- 1}\\right) ^ {\\top} \\epsilon = - \\mathbf {L} ^ {\\top , - 1} \\epsilon \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 513, + 656, + 733 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Parameterize $s_{\\theta}(\\mathbf{y}_s,s)$ as $s_{\\theta}(\\mathbf{y}_s,s) = -\\mathbf{L}^{\\top, -1}\\epsilon_{\\theta}(\\mathbf{y},s)$ . This gives", + "bbox": [ + 171, + 747, + 612, + 766 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {1}{2} \\| - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s) \\quad - \\quad - \\mathbf {L} ^ {\\top , - 1} \\epsilon \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\\\ = \\frac {1}{2} \\| \\mathbf {L} ^ {\\top , - 1} \\epsilon \\quad - \\quad \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s) \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\\\ = \\frac {1}{2} \\left(\\mathbf {L} ^ {\\top , - 1} \\epsilon - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s)\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\mathbf {L} ^ {\\top , - 1} \\epsilon - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s)\\right) \\\\ = \\frac {1}{2} \\left(\\mathbf {L} ^ {\\top , - 1} \\left[ \\epsilon - \\epsilon_ {\\theta} (\\mathbf {y}, s) \\right]\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\mathbf {L} ^ {\\top , - 1} \\left[ \\epsilon - \\epsilon_ {\\theta} (\\mathbf {y}, s) \\right]\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 233, + 772, + 759, + 922 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We can also use this insight to analytically compute the quadratic score term (following is computed per data-dimension, so must be multiplied by $D$ when computing the ELBO):", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\frac {1}{2} \\| \\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0}) \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\right] = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\left(\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})\\right) \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\left(- \\mathbf {L} ^ {\\top , - 1} \\epsilon\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(- \\mathbf {L} ^ {\\top , - 1} \\epsilon\\right) \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\epsilon^ {\\top} (- \\mathbf {L} ^ {- 1}) g _ {\\phi} ^ {2} (s) (- \\mathbf {L} ^ {\\top , - 1}) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\epsilon} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\epsilon} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\operatorname {T r a c e} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 142, + 887, + 439 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "G ELBOS IN STATIONARY PARAMETERIZATION", + "text_level": 1, + "bbox": [ + 171, + 458, + 573, + 472 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We use the stationary parameterization described in appendix C. We now specialize the ELBO to the linear stationary parameterization.", + "bbox": [ + 169, + 489, + 823, + 518 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Recall $f_{\\phi}(\\mathbf{y},s) = -[\\mathbf{Q}_{\\phi}(s) + \\mathbf{D}_{\\phi}(s)]\\mathbf{y}$ . Recall $g_{\\phi}(s) = \\sqrt{2\\mathbf{D}_{\\phi}(s)}$ We have $g_{\\phi}^{2}(s) = 2\\mathbf{D}_{\\phi}(s)$ . We can write the MDM ISM ELBO as", + "bbox": [ + 169, + 526, + 823, + 556 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} ^ {\\text {m i s m}} = \\mathbb {E} _ {v \\sim q _ {\\gamma}} \\left[ \\mathbb {E} _ {s \\sim \\operatorname {U n i f} (0, T)} \\left[ \\ell_ {s} ^ {(i s m)} \\right] + \\ell_ {T} + \\ell_ {q} \\right] \\tag {52}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 564, + 823, + 604 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 171, + 613, + 217, + 626 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {s _ {\\theta}} = - \\frac {1}{2} \\| s _ {\\theta} (\\mathbf {y} _ {s}, s) \\| _ {\\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}}} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 635, + 516, + 686 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {\\mathrm {d i v - f g s}} = \\nabla_ {\\mathbf {y} _ {s}} \\cdot \\left[ \\underbrace {- [ \\mathbf {Q} _ {\\phi} (s) + \\mathbf {D} _ {\\phi} (s) ] \\mathbf {y} _ {s}} _ {f _ {\\phi}} - \\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}} s _ {\\theta} (\\mathbf {y} _ {s}, s) \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 689, + 696, + 731 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {s} ^ {\\text {i s m}} = \\mathbb {E} _ {\\substack {q _ {\\phi , s, (x, v)} \\\\ \\text {depends on } Q, D}} \\left[ \\ell_ {s \\theta} + \\ell_ {\\text {div - f g s}} \\right] \\tag{53}\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 731, + 823, + 776 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {T} = \\mathbb {E} _ {\\substack {q _ {\\phi , T}, (x, v) \\\\ \\text{depends on} \\mathbf {Q}, \\mathbf {D}}} \\Big [ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) \\Big ]\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 779, + 537, + 821 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {q} = - \\log q _ {\\gamma} (v | x)\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 825, + 450, + 842 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "For the DSM form,", + "bbox": [ + 171, + 857, + 297, + 871 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} ^ {\\mathrm {m d s m}} = \\mathbb {E} _ {v \\sim q _ {\\gamma}} \\left[ \\mathbb {E} _ {s \\sim \\operatorname {U n i f} (0, T)} \\left[ \\ell_ {s} ^ {(d s m)} \\right] + \\ell_ {T} + \\ell_ {q} \\right] \\tag {54}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 880, + 823, + 921 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 171, + 104, + 217, + 116 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\ell_ {\\mathrm {d i v - f}} = \\nabla_ {\\mathbf {y} _ {s}} \\cdot \\underbrace {- [ \\mathbf {Q} _ {\\phi} (s) + \\mathbf {D} _ {\\phi} (s) ] \\mathbf {y} _ {s}} _ {f _ {\\phi}} \\\\ \\ell_{\\text{fwd - score}} = \\frac{1}{2}\\bigg|\\bigg|\\underbrace{\\nabla_{\\mathbf{y}_{s}}\\log q_{\\phi}(\\mathbf{y}_{s}|\\mathbf{y}_{0})}_{\\text{depends on}\\mathbf{Q},\\mathbf{D}}\\| \\underbrace{^ {2}\\mathbf{D}_{\\phi}(s)}_{g_{\\phi}^{2}} \\\\ \\ell_ {\\text {n e g - s c o r e d i f f}} = - \\frac {1}{2} \\| s _ {\\theta} (\\mathbf {y} _ {s}, s) - \\underbrace {\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})} _ {\\text {d e p e n d s o n Q , D}} \\| _ {\\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}}} ^ {2} \\\\ \\ell_ {s} ^ {(d s m)} = \\mathbb {E} _ {\\substack {q _ {\\phi , s, (x, v)} \\\\ \\text{depends on} \\mathbf {Q}, \\mathbf {D}}} \\left[ \\ell_ {\\text{neg - score diff}} + \\ell_ {\\text{fwd - score}} + \\ell_ {\\text{div - f}} \\right] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 126, + 694, + 321 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "H ALGORITHMS", + "text_level": 1, + "bbox": [ + 171, + 102, + 326, + 118 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "H.1 GENERIC TRANSITION KERNEL", + "text_level": 1, + "bbox": [ + 171, + 133, + 439, + 148 + ], + "page_idx": 24 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2 Get transition distribution $\\mathbf{y}_s|x$" + ], + "code_body": "Input: data $x$ time $s$ A, $g$ \ncompute: $\\mathbf{A}(s)$ and $g(s)$ \ncompute: $\\mathbf{M}_s = \\int_0^s\\mathbf{A}(t)dt$ (integrated drift) \ncompute: $\\mathbf{N}_s = \\int_0^s g^2 (t)dt$ (integrated diffusions squared) \ncompute: $\\gamma_{s|0} = \\exp \\left(\\mathbf{M}_s\\right)$ (mean coefficient) \nset: $\\mathbf{y}_0 = [x,0_1,\\dots ,0_{K - 1}],\\pmb{\\Sigma}_{0,zz} = \\mathbf{0}$ , and $\\pmb{\\Sigma}_{0,zv},\\pmb{\\Sigma}_{0,vv}$ to chosen initial distribution \ncompute: $\\mathbf{m}_{s|0} = \\gamma_{s|0}\\mathbf{y}_0$ (mean) \ncompute:", + "bbox": [ + 187, + 184, + 766, + 311 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {\\mathbf {M} _ {s}} {\\mathbf {0}} \\binom {\\mathbf {N} _ {s}} {- \\mathbf {M} _ {s} ^ {\\top}} \\right] \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}} \\quad (\\text {i n g r e d i e n t s f o r c o v .}) \\tag {55}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 316, + 825, + 359 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "compute: $\\pmb{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}$ (cov.)", + "bbox": [ + 187, + 368, + 442, + 385 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Output: $\\mathcal{N}(\\mathbf{m}_{s|0},\\pmb{\\Sigma}_{s|0})$", + "bbox": [ + 187, + 385, + 352, + 401 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "H.2 TRANSITIONS WITH $Q, D$", + "text_level": 1, + "bbox": [ + 171, + 429, + 395, + 445 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Current param matrices $\\tilde{\\mathbf{Q}}_{\\phi},\\tilde{\\mathbf{D}}_{\\phi}$ and along with fixed time-in scalar-out functions $b_{q}(s),b_{d}(s)$ and their known integrals $B_{q}(s),B_{d}(s)$ . $q_{\\gamma}(v_0|z_0 = x)$ taken to be parameterless so that $v_{0}\\sim \\mathcal{N}(0,I)$ . Model params are $s_\\theta$ fixed $\\pi_{\\theta}$ .", + "bbox": [ + 169, + 455, + 823, + 502 + ], + "page_idx": 24 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 3 Get Q, D and their integrated terms M, N" + ], + "code_body": "Input: time $s$ and current params $\\phi$ \ncompute: $[b_q]_s = \\int_0^s b_q(\\nu)d\\nu$ using known integral $B_{q}(s) - B_{q}(0)$ \ncompute: $[b_d]_s = \\int_0^s b_d(\\nu)d\\nu$ using known integral $B_{d}(s) - B_{d}(0)$ . \ncompute: $[\\mathbf{Q}_{\\phi}]_{s} = [b_{q}]_{s}\\cdot [\\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top}]$ for current params $\\tilde{\\mathbf{Q}}_{\\phi}$ . \ncompute: $[\\mathbf{D}_{\\phi}]_{s} = [b_{d}]_{s}\\cdot [\\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top}]$ for current params $\\tilde{\\mathbf{D}}_{\\phi}$ . \ncompute: $\\mathbf{M}_s = -((\\mathbf{Q}_{\\phi}]_s + [\\mathbf{D}_{\\phi}]_s)$ (M just a variable name) \ncompute: $\\mathbf{N}_s = [2\\mathbf{D}_{\\phi}]_s = 2\\cdot [\\mathbf{D}_{\\phi}]_s$ (N just a variable name) \ncompute: $\\mathbf{Q}_s = b_q(s)\\cdot [\\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top}]$ (not integrated) \ncompute: $\\mathbf{D}_s = b_d(s)\\cdot [\\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top}]$ (not integrated) \ncompute: $A_s = -[\\mathbf{Q}_s + \\mathbf{D}_s]$ (drift coef.) \ncompute: $g_s^2 = 2\\mathbf{D}_s$ (diffusion coef. squared) \nOutput: $\\mathbf{A}_s, g_s^2, \\mathbf{M}_s, \\mathbf{N}_s$", + "bbox": [ + 187, + 534, + 640, + 750 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "H.3 ELBO ALGORITHMS", + "text_level": 1, + "bbox": [ + 171, + 773, + 354, + 787 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Input: Sample $\\mathbf{y}_0 = (x, v)$ and time $s$ . Current params $\\phi$", + "bbox": [ + 187, + 119, + 568, + 136 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "set: $\\mathbf{A}_s,g_s^2,\\mathbf{M}_s,\\mathbf{N}_s\\gets$ algorithm 3", + "bbox": [ + 189, + 135, + 428, + 148 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "compute: $\\mathbf{m}_{s|0} = \\exp \\left(\\mathbf{M}_s\\right)\\mathbf{y}_0$ (transition mean)", + "bbox": [ + 189, + 148, + 522, + 172 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "compute: ingredients for transition cov. matrix:", + "bbox": [ + 189, + 172, + 506, + 186 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {\\mathbf {M} _ {s}} {\\mathbf {0}} \\binom {\\mathbf {N} _ {s}} {- \\mathbf {M} _ {s} ^ {\\top}} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {56}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 193, + 823, + 234 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "compute: $\\Sigma_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}$ (transition cov).", + "bbox": [ + 187, + 244, + 493, + 261 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "instantiate: $q_{\\phi ,s,(x,v)} = q_{\\phi}(\\mathbf{y}_s|\\mathbf{y}_0) = \\mathcal{N}(\\mathbf{m}_{s|0},\\pmb {\\Sigma}_{s|0})$", + "bbox": [ + 189, + 261, + 547, + 277 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Output: $q_{\\phi ,s,(x,v)},A_s,g_s^2$", + "bbox": [ + 189, + 277, + 359, + 292 + ], + "page_idx": 25 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 4 Get transition distributions", + "Algorithm 5 Compute ELBO with ism or dsm" + ], + "code_body": "input: Data point $x$ and current params $\\theta, \\phi, \\gamma$ \ndraw: an aux. sample $v \\sim q_{\\gamma}(v|x)$ \ndraw: a sample $s \\sim \\mathrm{Unif}(0,T)$ \nset: $\\mathbf{y}_0 = (x,v)$ \nset: $q_{\\phi,s,\\mathbf{y}_0}, A_s, g_s^2 \\gets \\text{algorithm 4 called on } \\mathbf{y}_0, s, \\phi$ \ndraw: $\\mathbf{y}_s \\sim q_{\\phi,s,\\mathbf{y}_0}$ \ncompute: $\\ell_s$ with $\\mathrm{dsm}(s)$ (algorithm 6) or $\\mathrm{ism}(s)$ (algorithm 7) on $\\mathbf{y}_s, \\theta, A_s, g_s^2, q_{\\phi,s,\\mathbf{y}_0}$ \nset: $q_{\\phi,T,\\mathbf{y}_0}, --, -- \\gets \\text{algorithm 4 called on } \\mathbf{y}_0, T, \\phi$ \ndraw: $\\mathbf{y}_T \\sim q_{\\phi,T,\\mathbf{y}_0}$ \noutput: $\\ell_s + \\log \\pi_\\theta(\\mathbf{y}_T) - \\log q_\\gamma(v)$", + "guess_lang": "txt", + "bbox": [ + 187, + 333, + 764, + 474 + ], + "page_idx": 25 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 6 Compute $\\mathrm{dsm}(s)$" + ], + "code_body": "input: $\\mathbf{y}_s, \\theta, A_s, g_s^2, q_{\\phi,s,\\mathbf{y}_0}$ . \ncompute: fwd-score = $\\nabla_{\\mathbf{y}_s}$ log $q_{\\phi}(\\mathbf{y}_s|\\mathbf{y}_0)$ \ncompute: model-score = $s_\\theta(\\mathbf{y}_s, s)$ \ncompute: fwd-score-term = $\\frac{1}{2}(\\mathrm{fwd-score})^\\top g_s^2$ (fwd-score) \ncompute: score-diff = model-score - fwd-score \ncompute: diff-term = $-\\frac{1}{2}$ score-diff $\\nabla_{\\mathbf{y}_s}$ score-diff \ncompute: div-f = $\\nabla_{\\mathbf{y}_s} \\cdot A_s\\mathbf{y}_s$ \noutput: $\\mathrm{dsm}(s) = \\mathrm{fwd-score-term} + \\mathrm{diff-term} + \\mathrm{div-f}$", + "guess_lang": "txt", + "bbox": [ + 187, + 515, + 576, + 630 + ], + "page_idx": 25 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 7 Compute $\\operatorname{ism}(s)$" + ], + "code_body": "input: $\\mathbf{y}_s,\\theta ,A_s,g_s^2,q_{\\phi ,s,\\mathbf{y}_0}$ \ncompute: model-score $= s_{\\theta}(\\mathbf{y}_{s},s)$ \ncompute: score-term $= -\\frac{1}{2}$ model-score $\\top g_s^2$ model-score \ncompute: div-gs $= \\nabla_{\\mathbf{y}_s}\\cdot g_s^2 s_\\theta (\\mathbf{y}_s,s)$ \ncompute: div-f $= \\nabla_{\\mathbf{y}_s}\\cdot A_s\\mathbf{y}_s$ \ncompute: div-term $= -$ div-gs $^+$ div-f \noutput: ism(s) $=$ score-term $^+$ div-term", + "guess_lang": "txt", + "bbox": [ + 187, + 670, + 566, + 773 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "I VALID ELBO WITH TRUNCATION", + "text_level": 1, + "bbox": [ + 171, + 805, + 470, + 819 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The integrand in the ELBO and its gradients is not bounded at time 0. Therefore, following Sohl-Dickstein et al. (2015) and Song et al. (2021) the integrand in eq. (7) is integrated from $[\\epsilon, T]$ , rather than $[0, T]$ . However, that integral is not a valid lower bound on $\\log p_{\\theta}(x)$ . Instead, it can be viewed as a proper lower bound on the prior for a latent variable $\\mathbf{y}_{\\epsilon}$ . Therefore, to provide a bound for the data, one can introduce a likelihood and substitute the prior lower bound into a standard variational bound that integrates out the latent.", + "bbox": [ + 169, + 839, + 823, + 924 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "To provide a valid lower bound for multivariate diffusions, we extend theorem 6 in Song et al. (2021) from univariate to multivariate diffusions.", + "bbox": [ + 169, + 103, + 823, + 131 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Theorem 3. For transition kernel $q_{\\phi}(\\mathbf{y}_s \\mid \\mathbf{y}_0)$ , we can compute upper bound the model likelihood at time 0 as follows, for any $\\epsilon > 0$", + "bbox": [ + 169, + 133, + 825, + 162 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\log p _ {\\theta} (x) \\geq \\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} \\mid x\\right)} \\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}\\right)} \\left[ \\log \\frac {p _ {\\theta} \\left(\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}\\right)}{q _ {\\phi} \\left(\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}\\right)} + \\mathcal {L} _ {m d m} (\\mathbf {y} _ {\\epsilon}, \\epsilon) - \\log q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} \\mid x\\right) \\right], \\tag {57}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 167, + 825, + 200 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{mdm}(\\mathbf{y}_{\\epsilon},\\epsilon)$ is defined as", + "bbox": [ + 171, + 204, + 383, + 220 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {m d m} (\\mathbf {y} _ {\\epsilon}, \\epsilon) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {> \\epsilon} | \\mathbf {y} _ {\\epsilon})} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) - \\int_ {\\epsilon} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} + \\nabla \\cdot f _ {\\phi} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 223, + 782, + 265 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Proof. For transition kernel $q_{\\phi}(\\mathbf{y}_s \\mid \\mathbf{y}_0)$ , we can compute upper bound the model likelihood at time 0 following an application of the variational bound", + "bbox": [ + 169, + 277, + 823, + 306 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\log p _ {\\theta} (x) = \\log \\int_ {v _ {0}} p _ {\\theta} (\\mathbf {y} _ {0} = [ x, v _ {0} ]) d v _ {0} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} p _ {\\theta} (\\mathbf {y} _ {0}, \\mathbf {y} _ {\\epsilon}) d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}) \\frac {q (v _ {0} \\mid x)}{q (v _ {0} \\mid x)} \\frac {p _ {\\theta} (\\mathbf {y} _ {0} , \\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}) \\frac {q (v _ {0} \\mid x)}{q (v _ {0} \\mid x)} \\frac {p _ {\\theta} (\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}) p _ {\\theta} (\\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ \\geq \\mathbb {E} _ {q (v _ {0} | x) q _ {\\phi} (\\mathbf {y} _ {\\epsilon} | \\mathbf {y} _ {0})} \\left[ \\log \\frac {p _ {\\theta} (\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} - \\log q _ {\\phi} (\\mathbf {y} _ {0} ^ {v} \\mid x) + \\log p _ {\\theta} (\\mathbf {y} _ {\\epsilon}) \\right] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 311, + 754, + 488 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "A lower bound for $\\log p_{\\theta}(\\mathbf{y}_{\\epsilon})$ can be derived in a similar manner to eq. (7), such that", + "bbox": [ + 171, + 500, + 730, + 513 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\log p _ {\\theta} (\\mathbf {y} _ {\\epsilon}) \\geq \\mathcal {L} _ {\\mathrm {m d m}} (\\mathbf {y} _ {\\epsilon}, \\epsilon) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {> \\epsilon} | \\mathbf {y} _ {\\epsilon})} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) - \\int_ {\\epsilon} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} + \\nabla \\cdot f _ {\\phi} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 517, + 834, + 558 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "The choice of $p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})$ is arbitrary, however following Sohl-Dickstein et al. (2015); Song et al. (2021) we let $p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})$ be Gaussian with mean $\\mu_{p_{\\theta},\\epsilon}$ and covariance $\\Sigma_{p_{\\theta},\\epsilon}$ . Suppose $q_{\\phi}(\\mathbf{y}_{\\epsilon} \\mid \\mathbf{y}_0) = \\mathcal{N}(\\mathbf{y}_{\\epsilon} \\mid \\mathbf{A}\\mathbf{y}_0, \\Sigma)$ , then we select the following mean $\\mu_{p_{\\theta},\\epsilon}$ and covariance $\\Sigma_{p_{\\theta},\\epsilon}$ for $p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})$", + "bbox": [ + 169, + 561, + 823, + 619 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {p _ {\\theta}, \\epsilon} = \\mathbf {A} ^ {- 1} \\Sigma s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + \\mathbf {A} ^ {- 1} \\mathbf {y} _ {\\epsilon}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 622, + 612, + 640 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\Sigma_ {p _ {\\theta}, \\epsilon} = \\mathbf {A} ^ {- 1} \\Sigma \\mathbf {A} ^ {- \\top}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 642, + 522, + 660 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "where $\\mu_{p_{\\theta},\\epsilon},\\Sigma_{p_{\\theta},\\epsilon}$ are derived using Tweedie's formula (Efron, 2011) by setting $\\mu_{\\epsilon} = \\mathbb{E}[\\mathbf{y}_0\\mid \\mathbf{y}_{\\epsilon}]$ and $\\Sigma_{\\epsilon} = \\mathrm{Var}(\\mathbf{y}_0\\mid \\mathbf{y}_{\\epsilon})$", + "bbox": [ + 169, + 662, + 823, + 694 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "We next derive this choice as an approximation of the optimal Gaussian likelihood.", + "bbox": [ + 171, + 707, + 717, + 722 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "I.1 LIKELIHOOD DERIVATION", + "text_level": 1, + "bbox": [ + 171, + 738, + 393, + 752 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Suppose $\\mathbf{y}_0\\sim q_0(\\mathbf{y}_0)$ and $\\mathbf{y}_{\\epsilon}\\sim \\mathcal{N}(\\mathbf{y}_{\\epsilon}\\mid A\\mathbf{y}_0,\\Sigma)$ . Here, $A,\\Sigma$ are the mean coefficient and covariance derived from the transition kernel at time $\\epsilon$ . We use Tweedie's formula to get the mean and covariance of $\\mathbf{y}_0$ given $\\mathbf{y}_{\\epsilon}$ under $q$ . This mean and covariance feature the true score $\\nabla_{\\mathbf{y}_{\\epsilon}}\\log q(\\mathbf{y}_{\\epsilon})$ . We replace the score with the score model $s_\\theta$ and then set $p_{\\theta}(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})$ to have the resulting approximate mean and covariance. We make this choice because the optimal $p_{\\theta}(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})$ equals the true $q(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})$ as discussed throughout the work.", + "bbox": [ + 169, + 763, + 825, + 849 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Here $\\mathbf{y}_0 = [\\mathbf{x}_0,\\mathbf{v}_0]$ where $\\mathbf{x}_0\\sim q_{\\mathrm{data}}$", + "bbox": [ + 171, + 854, + 419, + 871 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Let $\\eta$ be the natural parameter for the multivariate Gaussian likelihood $\\mathcal{N}(\\mathbf{y}_{\\epsilon} \\mid A\\mathbf{y}_0, \\Sigma)$ . Then, Tweedie's formula (Efron, 2011) states that:", + "bbox": [ + 169, + 876, + 823, + 904 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\eta \\mid \\mathbf {u} _ {\\epsilon} ] = \\nabla_ {\\mathbf {y} _ {\\epsilon}} l (\\mathbf {y} _ {\\epsilon}) - \\nabla_ {\\mathbf {y} _ {\\epsilon}} l _ {0} (\\mathbf {y} _ {\\epsilon})\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 907, + 616, + 926 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$l(\\mathbf{y}_{\\epsilon}) = \\log q(\\mathbf{y}_{\\epsilon})$", + "- $s_{\\theta}(\\mathbf{y}_{\\epsilon}, \\epsilon)$ is taken to be the true score $\\nabla_{\\mathbf{y}_{\\epsilon}} \\log q(\\mathbf{y}_{\\epsilon})$ so that $\\nabla_{\\mathbf{y}_{\\epsilon}} l(\\mathbf{y}_{\\epsilon}) = s_{\\theta}(\\mathbf{y}_{\\epsilon}, \\epsilon)$ .", + "- $l_{0}$ is the log of the base distribution defined in the exponential family parameterization." + ], + "bbox": [ + 215, + 103, + 800, + 156 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "The base distribution is a multivariate Gaussian with mean 0 and covariance $\\Sigma$ , therefore $\\nabla_{\\mathbf{y}_{\\epsilon}}l_{0}(\\mathbf{y}_{\\epsilon}) = -\\Sigma^{-1}\\mathbf{y}_{\\epsilon}$", + "bbox": [ + 169, + 167, + 823, + 198 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\eta \\mid \\mathbf {y} _ {\\epsilon} ] = s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + \\Sigma^ {- 1} \\mathbf {y} _ {\\epsilon}.\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 204, + 602, + 222 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "However, Tweedie's formula is not directly applicable since our $\\mathbf{y}_{\\epsilon}$ is not directly normal with mean $\\mathbf{y}_0$ . Instead, to derive the conditional mean of $\\mathbf{y}_0$ given $\\mathbf{y}_{\\epsilon}$ , we use the relation $\\eta = \\Sigma^{-1}\\mathbf{A}\\mathbf{y}_0$ and the linearity of conditional expectation to get", + "bbox": [ + 169, + 228, + 825, + 272 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} \\left[ \\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon} \\right] = \\mathbb {E} \\left[ A ^ {- 1} \\Sigma \\eta \\mid \\mathbf {y} _ {\\epsilon} \\right] \\\\ = A ^ {- 1} \\Sigma \\mathbb {E} [ \\eta \\mid \\mathbf {y} _ {\\epsilon} ] \\\\ = A ^ {- 1} \\Sigma \\left(s _ {\\theta} \\left(\\mathbf {y} _ {\\epsilon}, \\epsilon\\right) + \\Sigma^ {- 1} \\mathbf {y} _ {\\epsilon}\\right) \\\\ = A ^ {- 1} \\Sigma s _ {\\theta} \\left(\\mathbf {y} _ {\\epsilon}, \\epsilon\\right) + A ^ {- 1} \\mathbf {y} _ {\\epsilon}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 277, + 633, + 354 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "For the variance, we use the following relation $\\mathbf{y}_{\\epsilon} = A\\mathbf{y}_0 + \\sqrt{\\Sigma}\\epsilon$ , which implies that", + "bbox": [ + 169, + 372, + 732, + 388 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} _ {0} = A ^ {- 1} \\mathbf {y} _ {\\epsilon} - A ^ {- 1} \\sqrt {\\sum} \\epsilon\n$$\n", + "text_format": "latex", + "bbox": [ + 444, + 395, + 617, + 412 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {V a r} \\left(\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}\\right) = A ^ {- 1} \\Sigma A ^ {- T}.\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 415, + 566, + 435 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Therefore, for the model posterior distribution $p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})$ we choose a Normal with mean and covariance", + "bbox": [ + 169, + 448, + 823, + 474 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mu_ {p _ {\\theta}, \\epsilon} = A ^ {- 1} \\Sigma s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + A ^ {- 1} \\mathbf {y} _ {\\epsilon} \\\\ \\Sigma_ {p _ {\\theta}, \\epsilon} = A ^ {- 1} \\Sigma A ^ {- T} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 483, + 609, + 522 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + } +] \ No newline at end of file diff --git a/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_model.json b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9b267b159a20be85fb94b56ba8326daeedc7b542 --- /dev/null +++ b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_model.json @@ -0,0 +1,5349 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.825, + 0.173 + ], + "angle": 0, + "content": "WHERE TO DIFFUSE, HOW TO DIFFUSE, AND HOW TO GET BACK: AUTOMATED LEARNING FOR MULTIVARI-ATE DIFFUSIONS" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.194, + 0.603, + 0.21 + ], + "angle": 0, + "content": "Raghav Singhal\\*,1, Mark Goldstein\\*,1, Rajesh Ranganath\\*,2" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.21, + 0.624, + 0.225 + ], + "angle": 0, + "content": "Courant Institute of Mathematical Sciences1, New York University" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.225, + 0.498, + 0.24 + ], + "angle": 0, + "content": "Center for Data Science2, New York University" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.275, + 0.548, + 0.291 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.307, + 0.77, + 0.545 + ], + "angle": 0, + "content": "Diffusion-based generative models (DBGMs) perturb data to a target noise distribution and reverse this process to generate samples. The choice of noising process, or inference diffusion process, affects both likelihoods and sample quality. For example, extending the inference process with auxiliary variables leads to improved sample quality. While there are many such multivariate diffusions to explore, each new one requires significant model-specific analysis, hindering rapid prototyping and evaluation. In this work, we study Multivariate Diffusion Models (MDMs). For any number of auxiliary variables, we provide a recipe for maximizing a lower-bound on the MDMs likelihood without requiring any model-specific analysis. We then demonstrate how to parameterize the diffusion for a specified target noise distribution; these two points together enable optimizing the inference diffusion process. Optimizing the diffusion expands easy experimentation from just a few well-known processes to an automatic search over all linear diffusions. To demonstrate these ideas, we introduce two new specific diffusions as well as learn a diffusion process on the MNIST, CIFAR10, andImagenet32 datasets. We show learned MDMs match or surpass bits-per-dims (BPDs) relative to fixed choices of diffusions for a given dataset and model architecture." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.57, + 0.341, + 0.587 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.601, + 0.827, + 0.701 + ], + "angle": 0, + "content": "Diffusion-based generative models (DBGMs) perturb data to a target noise distribution and reverse this process to generate samples. They have achieved impressive performance in image generation, editing, translation (Dhariwal & Nichol, 2021; Nichol & Dhariwal, 2021; Sasaki et al., 2021; Ho et al., 2022), conditional text-to-image tasks (Nichol et al., 2021; Ramesh et al., 2022; Sahara et al., 2022) and music and audio generation (Chen et al., 2020; Kong et al., 2020; Mittal et al., 2021). They are often trained by maximizing a lower bound on the log likelihood, featuring an inference process interpreted as gradually \"noising\" the data (Sohl-Dickstein et al., 2015; Ho et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.706, + 0.825, + 0.75 + ], + "angle": 0, + "content": "The choice of this inference process affects both likelihoods and sample quality. On different datasets and models, different inference processes work better; there is no universal best choice of inference, and the choice matters (Song et al., 2020b)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.755, + 0.827, + 0.827 + ], + "angle": 0, + "content": "While some work has improved performance by designing score model architectures (Ho et al., 2020; Kingma et al., 2021; Dhariwal & Nichol, 2021), Dockhorn et al. (2021) instead introduce the critically-damped Langevin diffusion (CLD), showing that significant improvements in sample generation can be gained by carefully designing new processes. CLD pairs each data dimension with an auxiliary \"velocity\" variable and diffuses them jointly using second-order Langevin dynamics." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.832, + 0.829, + 0.903 + ], + "angle": 0, + "content": "A natural question: if introducing new diffusions results in dramatic performance gains, why are there only a handful of diffusions (variance-preserving stochastic differential equation (VPSDE), variance exploding (VE), CLD, sub-VPSDE) used in DBGMs? For instance, are there other auxiliary variable diffusions that would lead to improvements like CLD? This avenue seems promising as auxiliary variables have improved other generative models and inferences, such as normalizing flows" + }, + { + "type": "page_footnote", + "bbox": [ + 0.199, + 0.91, + 0.725, + 0.926 + ], + "angle": 0, + "content": "* Equal Contribution. Correspondence to {rsinghal, goldstein} at nyu.edu." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "(Huang et al., 2020), neural ordinary differential equations (ODEs) (Dupont et al., 2019), hierarchical variational models (Ranganath et al., 2016), ladder variational autoencoder (Sønderby et al., 2016), among others." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.153, + 0.827, + 0.267 + ], + "angle": 0, + "content": "Despite its success, CLD also provides evidence that each new process requires significant model-specific analysis. Deriving the evidence lower bound (ELBO) and training algorithm for diffusions is challenging (Huang et al., 2021; Kingma et al., 2021; Song et al., 2021) and is carried out in a case-by-case manner for new diffusions (Campbell et al., 2022). Auxiliary variables seemingly complicate this process further; computing conditionals of the inference process necessitates solving matrix Lyupanov equations (section 3.3). Deriving the inference stationary distribution—which helps the model and inference match—can be intractable. These challenges limit rapid prototyping and evaluation of new inference processes." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.272, + 0.486, + 0.286 + ], + "angle": 0, + "content": "Concretely, training a diffusion model requires:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.299, + 0.825, + 0.328 + ], + "angle": 0, + "content": "(R1): Selecting an inference and model process pair such that the inference process converges to the model prior" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.334, + 0.44, + 0.35 + ], + "angle": 0, + "content": "(R2): Deriving the ELBO for this pair" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.355, + 0.825, + 0.384 + ], + "angle": 0, + "content": "(R3): Estimating the ELBO and its gradients by deriving and computing the inference process' transition kernel" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.299, + 0.825, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.825, + 0.455 + ], + "angle": 0, + "content": "In this work, we introduce Multivariate Diffusion Models (MDMs) and a method for training and evaluating them. MDMs are diffusion-based generative models trained with auxiliary variables. We provide a recipe for training MDMs beyond specific instantiations—like VPSDE and CLD—to all linear inference processes that have a stationary distribution, with any number of auxiliary variables." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.46, + 0.827, + 0.532 + ], + "angle": 0, + "content": "First, we bring results from gradient-based MCMC (Ma et al., 2015) to diffusion modeling to construct MDMs that converge to a chosen model prior (R1); this tightens the ELBO. Secondly, for any number of auxiliary variables, we derive the MDM ELBO (R2). Finally, we show that the transition kernel of linear MDMs, necessary for the ELBO, can be computed automatically and generically, for higher-dimensional auxiliary systems (R3)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.537, + 0.827, + 0.595 + ], + "angle": 0, + "content": "With these tools, we explore a variety of new inference processes for diffusion-based generative models. We then note that the automatic transitions and fixed stationary distributions facilitate directly learning the inference to maximize the MDM ELBO. Learning turns diffusion model training into a search not only over score models but also inference processes, at no extra derivational cost." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.611, + 0.738, + 0.627 + ], + "angle": 0, + "content": "Methodological Contributions. In summary, our methodological contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.638, + 0.825, + 0.667 + ], + "angle": 0, + "content": "1. Deriving ELBOs for training and evaluating multivariate diffusion models (MDMs) with auxiliary variables." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.674, + 0.825, + 0.729 + ], + "angle": 0, + "content": "2. Showing that the diffusion transition covariance does not need to be manually derived for each new diffusion. We instead demonstrate that a matrix factorization technique, previously unused in diffusion models, can automatically compute the covariance analytically for any linear MDM." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.737, + 0.825, + 0.779 + ], + "angle": 0, + "content": "3. Using results from gradient-based Markov chain Monte Carlo (MCMC) to construct MDMs with a complete parameterization of inference processes whose stationary distribution matches the model prior." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.786, + 0.825, + 0.828 + ], + "angle": 0, + "content": "4. Combining the above into an algorithm called Automatic Multivariate Diffusion Training (AMDT) that enables training without diffusion-specific derivations. AMDT enables training score models for any linear diffusion, including optimizing the diffusion and score jointly." + }, + { + "type": "list", + "bbox": [ + 0.211, + 0.638, + 0.825, + 0.828 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.827, + 0.926 + ], + "angle": 0, + "content": "To demonstrate these ideas, we develop MDMs with two specific diffusions as well as learned multivariate diffusions. The specific diffusions are accelerated Langevin diffusion (ALDA) (introduced in Mou et al. (2019) as a higher-order scheme for gradient-based MCMC) and an alteration, modified accelerated Langevin diffusion (MALDA). Previously, using these diffusions for generative modeling would require significant model-specific analysis. Instead, AMDT for these diffusions is derivation-free." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Empirical contributions. We train MDMs on the MNIST,Imagenet32 and CIFAR-10 datasets. In the experiments, we show that:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.145, + 0.825, + 0.174 + ], + "angle": 0, + "content": "1. Training new and existing fixed diffusions, such as ALDA and MALDA, is easy with the proposed algorithm AMDT." + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.178, + 0.825, + 0.22 + ], + "angle": 0, + "content": "2. Using AMDT to learn the choice of diffusion for the MDM matches or surpasses the performance of fixed choices of diffusion process; sometimes the learned diffusion and VPSDE do best; other times the learned diffusion and CLD do best." + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.226, + 0.827, + 0.267 + ], + "angle": 0, + "content": "3. There are new and existing MDMs, trained and evaluated with the MDM ELBO, that account for as much performance improvement over VPSDE as a three-fold increase in score model size for a fixed univariate diffusion." + }, + { + "type": "list", + "bbox": [ + 0.21, + 0.145, + 0.827, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.28, + 0.826, + 0.337 + ], + "angle": 0, + "content": "These findings affirm that the choice of diffusion affects the optimization problem, and that learning the choice bypasses the process of choosing diffusions for each new dataset and score architecture. We additionally show the utility of the MDM ELBO by showing on a dataset that CLD achieves better bits-per-dims (BPDs) than previously reported with the probability flow ODE (Dockhorn et al., 2021)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.358, + 0.264, + 0.373 + ], + "angle": 0, + "content": "2 SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.389, + 0.825, + 0.432 + ], + "angle": 0, + "content": "We present diffusions by starting with the generative model and then describing its likelihood lower bound (Sohl-Dickstein et al., 2015; Huang et al., 2021; Kingma et al., 2021). Diffusions sample from a model prior \\(\\mathbf{z}_0\\sim \\pi_\\theta\\) and then evolve a continuous-time stochastic process \\(\\mathbf{z}_t\\in \\mathbb{R}^d\\):" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.44, + 0.825, + 0.457 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {z} = h _ {\\theta} (\\mathbf {z}, t) d t + \\beta_ {\\theta} (t) d \\mathbf {B} _ {t}, \\quad t \\in [ 0, T ] \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.464, + 0.825, + 0.522 + ], + "angle": 0, + "content": "where \\(\\mathbf{B}_t\\) is a \\(d\\)-dimensional Brownian motion. The model is trained so that \\(\\mathbf{z}_T\\) approximates the data \\(\\mathbf{x} \\sim q_{\\mathrm{data}}\\).1 Maximum likelihood training of diffusion models is intractable (Huang et al., 2021; Song et al., 2021; Kingma et al., 2021). Instead, they are trained using a variational lower bound on \\(\\log p_{\\theta}(\\mathbf{z}_T = x)\\). The bound requires an inference process \\(q_{\\phi}(\\mathbf{y}_s | \\mathbf{x} = x)\\):2" + }, + { + "type": "equation", + "bbox": [ + 0.353, + 0.531, + 0.825, + 0.549 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = f _ {\\phi} (\\mathbf {y}, s) d s + g _ {\\phi} (s) d \\widehat {\\mathbf {B}} _ {s}, \\quad s \\in [ 0, T ] \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.558, + 0.826, + 0.63 + ], + "angle": 0, + "content": "where \\(\\widehat{\\mathbf{B}}_s\\) is another Brownian motion independent of \\(\\mathbf{B}_t\\). The inference process is usually taken to be specified rather than learned, and chosen to be i.i.d. for each \\(y_{tj}\\) conditional on each \\(x_j\\). This leads to the interpretation of the \\(y_{tj}\\) as noisy versions of features \\(x_j\\) (Ho et al., 2020). While the diffusion ELBO is challenging to derive in general, Huang et al. (2021); Song et al. (2021) show that when the model process takes the form:" + }, + { + "type": "equation", + "bbox": [ + 0.276, + 0.636, + 0.825, + 0.656 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {z} = \\left[ g _ {\\phi} ^ {2} (T - t) s _ {\\theta} (\\mathbf {z}, T - t) - f _ {\\phi} (\\mathbf {z}, T - t) \\right] d t + g _ {\\phi} (T - t) d \\mathbf {B} _ {t}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.662, + 0.258, + 0.675 + ], + "angle": 0, + "content": "the ELBO is:" + }, + { + "type": "equation", + "bbox": [ + 0.199, + 0.682, + 0.825, + 0.723 + ], + "angle": 0, + "content": "\\[\n\\log p _ {\\theta} (x) \\geq \\mathcal {L} ^ {\\mathrm {i s m}} (x) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) + \\int_ {0} ^ {T} - \\frac {1}{2} \\| s _ {\\theta} \\| _ {g _ {\\phi} ^ {2}} ^ {2} - \\nabla \\cdot (g _ {\\phi} ^ {2} s _ {\\theta} - f _ {\\phi}) d s \\right], \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.825, + 0.777 + ], + "angle": 0, + "content": "where \\(f_{\\phi}, g_{\\phi}, s_{\\theta}\\) are evaluated at \\((\\mathbf{y}_s, s)\\), \\(\\|\\mathbf{x}\\|_{\\mathbf{A}}^2 = \\mathbf{x}^\\top \\mathbf{A}\\mathbf{x}\\) and \\(g^2 = gg^\\top\\). Equation (4) features the Implicit Score Matching (ISM) loss (Song et al., 2020a), and can be re-written as an ELBO \\(\\mathcal{L}^{\\mathrm{dsm}}\\) featuring Denoising Score Matching (DSM) (Vincent, 2011; Song et al., 2020b), see appendix F.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.797, + 0.634, + 0.812 + ], + "angle": 0, + "content": "3 A RECIPE FOR MULTIVARIATE DIFFUSION MODELS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.825, + 0.872 + ], + "angle": 0, + "content": "As has been shown in prior work (Song et al., 2021; Dockhorn et al., 2021), the choice of diffusion matters. Drawing on principles from previous generative models (section 6), we can consider a wide class of diffusion inference processes by constructing them using auxiliary variables." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.911 + ], + "angle": 0, + "content": "\\(^{1}\\)Following Huang et al. (2021); Dockhorn et al. (2021) we integrate all processes in forward time 0 to \\(T\\). It may be helpful to think of an additional variable \\(\\hat{\\mathbf{x}}_t \\triangleq \\mathbf{z}_{T-t}\\) so that \\(\\hat{\\mathbf{x}}_0\\) approximates \\(\\mathbf{x} \\sim q_{\\mathrm{data}}\\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.194, + 0.911, + 0.629, + 0.924 + ], + "angle": 0, + "content": "2We use \\(\\mathbf{y}\\) as the inference variable over the same space as the model's \\(\\mathbf{z}\\)." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "At first glance, training such diffusions can seem challenging. First, one needs an ELBO that includes auxiliary variables. This ELBO will require sampling from the transition kernel, and setting the model prior to the specified inference stationary distribution. But doing such diffusion-specific analysis manually is challenging and hinders rapid prototyping." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.825, + 0.239 + ], + "angle": 0, + "content": "In this section we show how to address these challenges and introduce an algorithm, AMDT, to simplify and automate modeling with MDMs. AMDT can be used to train new and existing diffusions, including those with auxiliary variables, and including those that learn the inference process. In appendix A we discuss how the presented methods can also be used to automate and improve simplified score matching and noise prediction objectives used to train diffusion models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.256, + 0.5, + 0.27 + ], + "angle": 0, + "content": "3.1 MULTIVARIATE MODEL AND INFERENCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.825, + 0.314 + ], + "angle": 0, + "content": "For the \\(j^{th}\\) data coordinate at each time \\(t\\), MDMs pair \\(\\mathbf{z}_{tj} \\in \\mathbb{R}\\) with a vector of auxiliary variables \\(\\mathbf{v}_{tj} \\in \\mathbb{R}^{K-1}\\) into a joint vector \\(\\mathbf{u}_t\\) and diffuse in the extended space:" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.322, + 0.826, + 0.356 + ], + "angle": 0, + "content": "\\[\n\\mathbf {u} _ {0} \\sim \\pi_ {\\theta}, \\quad d \\mathbf {u} = h _ {\\theta} \\left(\\mathbf {u} _ {t} = \\left[ \\begin{array}{l} \\mathbf {z} _ {t} \\\\ \\mathbf {v} _ {t} \\end{array} \\right], t\\right) d t + \\beta_ {\\theta} (t) d \\mathbf {B} _ {t}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.365, + 0.825, + 0.429 + ], + "angle": 0, + "content": "MDMs model the data \\(\\mathbf{x}\\) with \\(\\mathbf{z}_T\\), a coordinate in \\(\\mathbf{u}_T \\sim p_\\theta\\). For the \\(j^{th}\\) feature \\(\\mathbf{x}_j\\), each \\(\\mathbf{u}_{tj} \\in \\mathbb{R}^K\\) consists of a \"data\" dimension \\(\\mathbf{u}_{tj}^z\\) and auxiliary variable \\(\\mathbf{u}_{tj}^v\\). Therefore \\(\\mathbf{u} \\in \\mathbb{R}^{dK}\\). We extend the drift coefficient \\(h_\\theta\\) from a function in \\(\\mathbb{R}^d \\times \\mathbb{R}_+ \\to \\mathbb{R}^d\\) to the extended space \\(\\mathbb{R}^{dK} \\times \\mathbb{R}_+ \\to \\mathbb{R}^{dK}\\). We likewise extend the diffusion coefficient to a matrix \\(\\beta_\\theta\\) acting on Brownian motion \\(\\mathbf{B}_t \\in \\mathbb{R}^{dK}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.826, + 0.477 + ], + "angle": 0, + "content": "Because the MDM model is over the extended space, the inference distribution \\(\\mathbf{y}\\) must be too. We then set \\(q(\\mathbf{y}_0^v |\\mathbf{y}_0^z = x)\\) to any chosen initial distribution, e.g. \\(\\mathcal{N}(\\mathbf{0},\\mathbf{I})\\) and discuss this choice in section 4. Then \\(\\mathbf{y}_s\\) evolves according to the auxiliary variable inference process:" + }, + { + "type": "equation", + "bbox": [ + 0.395, + 0.486, + 0.825, + 0.505 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = f _ {\\phi} (\\mathbf {y}, s) d s + g _ {\\phi} (s) d \\widehat {\\mathbf {B}} _ {s}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.513, + 0.825, + 0.543 + ], + "angle": 0, + "content": "where the inference drift and diffusion coefficients \\( f_{\\phi}, g_{\\phi} \\) are now over the extended space \\( \\mathbf{y} = [\\mathbf{y}^z, \\mathbf{y}^v] \\). The function \\( f_{\\phi} \\) lets the \\( z \\) and \\( v \\) coordinates of \\( \\mathbf{y}_{tj} \\) interact in the inference process." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.56, + 0.28, + 0.573 + ], + "angle": 0, + "content": "ASSUMPTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.587, + 0.826, + 0.643 + ], + "angle": 0, + "content": "This work demonstrates how to parameterize time-varying Itô processes, used for diffusion modeling, to have a stationary distribution that matches the given model prior. To take advantage of the automatic transition kernels also presented, the inferences considered for modeling are linear time-varying processes and take the form:" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.652, + 0.6, + 0.669 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = \\mathbf {A} _ {\\phi} (s) \\mathbf {y} d s + g _ {\\phi} (s) d \\mathbf {B} _ {s}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.676, + 0.779, + 0.693 + ], + "angle": 0, + "content": "where \\(\\mathbf{A}_{\\phi}(s):\\mathbb{R}_{+}\\to dK\\times dK\\) and \\(g_{\\phi}(s):\\mathbb{R}_{+}\\to dK\\times dK\\) are matrix-valued functions." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.71, + 0.332, + 0.724 + ], + "angle": 0, + "content": "3.2 ELBO FOR MDMS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.825, + 0.765 + ], + "angle": 0, + "content": "We now show how to train MDMs to optimize a lower bound on the log likelihood of the data. Like in the univariate case, we use the parameterization in eq. (3) to obtain a tractable ELBO." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.77, + 0.701, + 0.786 + ], + "angle": 0, + "content": "Theorem 1. The MDM log marginal likelihood of the data is lower-bounded by:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.793, + 0.826, + 0.9 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\log p _ {\\theta} (x) \\geq \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\underbrace {\\log \\pi_ {\\theta} (\\mathbf {y} _ {T})} _ {\\ell_ {T}} - \\int_ {0} ^ {T} \\frac {1}{2} \\| s _ {\\theta} \\| _ {g _ {\\phi} ^ {2}} ^ {2} + \\nabla \\cdot (g _ {\\phi} ^ {2} s _ {\\theta} - f _ {\\phi}) d s - \\underbrace {\\log q _ {\\phi} (\\mathbf {y} _ {0} ^ {v} | x)} _ {\\ell_ {q}} \\right] \\quad (\\mathcal {L} ^ {m i s m}) \\\\ = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\ell_ {T} + \\int_ {0} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi} ^ {2}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi} ^ {2}} ^ {2} + (\\nabla \\cdot f _ {\\phi}) d s - \\ell_ {q} \\right] \\quad \\left(\\mathcal {L} ^ {m d s m}\\right). \\tag {7} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.766, + 0.927 + ], + "angle": 0, + "content": "where divergences and gradients are taken with respect to \\(\\mathbf{y}_s\\) and \\(s_{\\phi} = \\nabla_{\\mathbf{y}_s}\\log q_{\\phi}(\\mathbf{y}_s|x)\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.177 + ], + "angle": 0, + "content": "Proof. The proof for the MDM ISM ELBO \\(\\mathcal{L}^{\\mathrm{mism}}\\) is in appendix F. In short, we introduce auxiliary variables, apply Theorem 1 of Huang et al. (2021) (equivalently, Theorem 3 of Song et al. (2021) or appendix E of Kingma et al. (2021)) to the joint space, and then apply an additional variational bound to \\(\\mathbf{v}_0\\). The MDM DSM ELBO \\(\\mathcal{L}^{\\mathrm{mdsm}}\\) is likewise derived in appendix F, similarly to Huang et al. (2021); Song et al. (2021), but extended to multivariate diffusions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.189, + 0.827, + 0.274 + ], + "angle": 0, + "content": "We train MDM's by estimating the gradients of \\(\\mathcal{L}^{\\mathrm{mdsm}}\\), as estimates of \\(\\mathcal{L}^{\\mathrm{mism}}\\) can be computationally prohibitive. For numerical stability, the integral in eq. (7) is computed on \\([\\epsilon, T]\\) rather than \\([0, T]\\). One can regard this as a bound for a variable \\(\\mathbf{u}_{\\epsilon}\\). To maintain a proper likelihood bound for the data, one can choose a likelihood \\(\\mathbf{u}_0|\\mathbf{u}_{\\epsilon}\\) and compose bounds as we demonstrate in appendix I. We report the ELBO with this likelihood term, which plays the same role as the discretized Gaussian in Nichol & Dhariwal (2021) and Tweedie's formula in Song et al. (2021)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.289, + 0.6, + 0.305 + ], + "angle": 0, + "content": "3.3 INGREDIENT 1: COMPUTING THE TRANSITION \\( q_{\\phi}(\\mathbf{y}_s|x) \\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.314, + 0.825, + 0.358 + ], + "angle": 0, + "content": "To estimate eq. (7) and its gradients, we need samples from \\( q(\\mathbf{y}_s|x) \\) and to compute \\( \\nabla \\log q(\\mathbf{y}_s|x) \\). While an intractable problem for MDMs in general, we provide two ingredients for tightening and optimizing these bounds in a generic fashion for linear inference MDMs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.364, + 0.825, + 0.391 + ], + "angle": 0, + "content": "We first show how to automate computation of \\( q(\\mathbf{y}_s|\\mathbf{y}_0) \\) and then \\( q(\\mathbf{y}_s|x) \\). For linear MDMs of the form:" + }, + { + "type": "equation", + "bbox": [ + 0.404, + 0.394, + 0.593, + 0.41 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = \\mathbf {A} (s) \\mathbf {y} d s + g (s) d \\mathbf {B} _ {s},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.411, + 0.825, + 0.44 + ], + "angle": 0, + "content": "the transition kernel \\(q(\\mathbf{y}_s|\\mathbf{y}_0)\\) is Gaussian (Särkkä & Solin, 2019). Let \\(f(\\mathbf{y}, s) = \\mathbf{A}(s)\\mathbf{y}\\). Then, the mean and covariance are solutions to the following ODEs:" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.442, + 0.501, + 0.459 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {m} _ {s | 0} / d s = \\mathbf {A} (s) \\mathbf {m} _ {s | 0}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.461, + 0.825, + 0.48 + ], + "angle": 0, + "content": "\\[\nd \\boldsymbol {\\Sigma} _ {s | 0} / d s = \\mathbf {A} (s) \\boldsymbol {\\Sigma} _ {s | 0} + \\boldsymbol {\\Sigma} _ {s | 0} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.481, + 0.42, + 0.495 + ], + "angle": 0, + "content": "The mean can be solved analytically:" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.497, + 0.825, + 0.541 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\mathbf {y} _ {0} \\underbrace {= \\exp (s \\mathbf {A}) \\mathbf {y} _ {0}} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}}. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.543, + 0.825, + 0.572 + ], + "angle": 0, + "content": "The covariance equation does not have as simple a solution because eq. (9) as the unknown matrix \\(\\pmb{\\Sigma}_{s|0}\\) is being multiplied both from the left and the right." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.825, + 0.635 + ], + "angle": 0, + "content": "Instead of solving eq. (8) for a specific diffusion manually, as done in previous work (e.g. pages 50-54 of Dockhorn et al. (2021)), we show that a matrix factorization technique (Särkkä & Solin (2019), sec. 6.3) previously unused in diffusion-based generative models can automatically compute \\(\\Sigma_{s|0}\\) generically for any linear MDM. Define \\(\\mathbf{C}_s\\), \\(\\mathbf{H}_s\\) that evolve according to:" + }, + { + "type": "equation", + "bbox": [ + 0.353, + 0.636, + 0.825, + 0.67 + ], + "angle": 0, + "content": "\\[\n\\binom {d \\mathbf {C} _ {s} / d s} {d \\mathbf {H} _ {s} / d s} = \\binom {\\mathbf {A} (s)} {\\mathbf {0}} \\begin{array}{c} g ^ {2} (s) \\\\ - \\mathbf {A} ^ {\\top} (s) \\end{array} \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.672, + 0.825, + 0.7 + ], + "angle": 0, + "content": "then \\(\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s\\mathbf{H}_s^{-1}\\) for \\(\\mathbf{C}_0 = \\mathbf{\\Sigma}_0\\) and \\(\\mathbf{H}_0 = \\mathbf{I}\\) (Appendix D). These equations can be solved in closed-form," + }, + { + "type": "equation", + "bbox": [ + 0.228, + 0.701, + 0.825, + 0.763 + ], + "angle": 0, + "content": "\\[\n\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {[ \\mathbf {A} ] _ {s}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- [ \\mathbf {A} ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}} \\underbrace {= \\exp \\left[ s \\binom {\\mathbf {A}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- \\mathbf {A} ^ {\\top}} \\end{array} \\right]} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}, g (\\nu) = g} \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.765, + 0.65, + 0.783 + ], + "angle": 0, + "content": "where \\([\\mathbf{A}]_s = \\int_0^s\\mathbf{A}(\\nu)d\\nu\\) . To condition on \\(\\mathbf{y}_0 = (x,v)\\) , we set \\(\\pmb {\\Sigma}_0 = \\mathbf{0}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.796, + 0.797, + 0.813 + ], + "angle": 0, + "content": "Computing \\( q_{\\phi}(\\mathbf{y}_s|x) \\). For the covariance \\( \\pmb{\\Sigma}_{s|0} \\), to condition on \\( x \\) instead of \\( \\mathbf{y}_0 \\), we set \\( \\pmb{\\Sigma_0} \\) to" + }, + { + "type": "equation", + "bbox": [ + 0.431, + 0.815, + 0.564, + 0.848 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Sigma} _ {0} = \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\boldsymbol {\\Sigma} _ {\\mathbf {v} _ {0}} \\end{array} \\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.849, + 0.825, + 0.865 + ], + "angle": 0, + "content": "To compute the mean, it is the same expression as for \\( q(\\mathbf{y}_s|\\mathbf{y}_0) \\), but with a different initial condition:" + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.866, + 0.825, + 0.9 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\binom {x} {\\mathbb {E} _ {q} [ \\mathbf {y} _ {0} ^ {v} | x ]} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.9, + 0.391, + 0.915 + ], + "angle": 0, + "content": "See appendix D for more details." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.103, + 0.55, + 0.119 + ], + "angle": 0, + "content": "Algorithm 1 Automatic Multivariate Diffusion Training" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.121, + 0.825, + 0.306 + ], + "angle": 0, + "content": "Input: Data \\(\\{x_i\\}\\), inference process matrices \\(\\mathbf{Q}_{\\phi}, \\mathbf{D}_{\\phi}\\), model prior \\(\\pi_{\\theta}\\), initial distribution \\(q_{\\phi}(\\mathbf{y}_0^v | x)\\), and score model architecture \\(s_\\theta\\) \nReturns: Trained score model \\(s_\\theta\\) \nwhile \\(s_\\theta\\) not converged do \n Sample \\(x \\sim \\sum_{i=1}^{N} \\frac{1}{N} \\delta_{x_i}\\), \\(v_0 \\sim q_{\\phi}(\\mathbf{y}_0^v | x)\\) \n Sample \\(\\mathbf{s} \\sim \\mathbf{U}[0, T]\\) and \\(\\mathbf{y}_s, \\mathbf{y}_T \\sim q_{\\phi}(\\mathbf{y}_s | x)\\) using algorithm 2 \n Estimate the stochastic gradient of the MDM ELBO, \\(\\nabla_\\theta \\mathcal{L}(\\theta, \\phi)\\), using eq. (7) \n \\(\\theta \\leftarrow \\theta + \\alpha \\nabla_\\theta \\mathcal{L}(\\theta, \\phi)\\). \n if learning inference then \n \\(\\phi \\leftarrow \\phi + \\alpha \\nabla_\\phi \\mathcal{L}(\\theta, \\phi)\\) \nend if \nend while \nOutput \\(s_\\theta\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.332, + 0.554, + 0.47 + ], + "angle": 0, + "content": "A fast and simple algorithm. We show in algorithm 2 (appendix H) that computing the transition kernel only requires knowing \\( f, g \\) and requires no diffusion-specific analysis. For \\( K - 1 \\) auxiliary variables, \\( \\mathbf{A}, g \\) are \\( K \\times K \\). Like for scalar diffusions, these parameters are shared across data coordinates. This means matrix exponentials and inverses are done on \\( K \\times K \\) matrices, where \\( K \\) is only 2 or 3 in our experiments. In table 1, we compare the time to sample a batch of size 256 from the transition kernel for CIFAR 10 and MNIST. The table shows the extra computa" + }, + { + "type": "text", + "bbox": [ + 0.565, + 0.333, + 0.825, + 0.391 + ], + "angle": 0, + "content": "Table 1: Runtime Comparison: we compare the run time of sampling from the CLD diffusion analytically versus using the automated algorithm." + }, + { + "type": "table", + "bbox": [ + 0.571, + 0.401, + 0.808, + 0.444 + ], + "angle": 0, + "content": "
MethodCIFAR-10MNIST
Analytical0.0270.0062
Automated0.0290.007
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.47, + 0.825, + 0.5 + ], + "angle": 0, + "content": "tional cost of the automated algorithm is negligible. This automation likewise applies to simplified score matching and noise prediction objectives, since all rely on \\(q_{\\phi}(\\mathbf{y}_s|x)\\) (appendix A)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.515, + 0.51, + 0.529 + ], + "angle": 0, + "content": "3.4 INGREDIENT 2: MDM PARAMETERIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.825, + 0.571 + ], + "angle": 0, + "content": "The MDM ELBO (eq. (7)) is tighter when the inference \\(\\mathbf{y}_T\\) tends toward the model's prior \\(\\pi_{\\theta}\\). Here we construct inference processes with the model prior \\(\\pi_{\\theta}\\) as a specified stationary distribution \\(q_{\\infty}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.577, + 0.825, + 0.62 + ], + "angle": 0, + "content": "Ma et al. (2015) provide a complete recipe for constructing gradient-based MCMC samplers; the recipe constructs non-linear time-homogeneous Itô processes with a given stationary distribution, and show that the parameterization spans all such Itô processes with that stationary distribution." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.825, + 0.697 + ], + "angle": 0, + "content": "Diffusion models usually have time-varying drift and diffusion coefficients (e.g. use of the \\(\\beta(t)\\) function). To build diffusion models that match the model prior, we first extend Theorem 1 from Ma et al. (2015) to construct non-linear Itô processes with time-varying drift and diffusion coefficients with a given stationary distribution (Appendix C). Then, to keep transitions tractable (per Section 3.3), we specialize this result to linear Itô diffusions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.703, + 0.825, + 0.791 + ], + "angle": 0, + "content": "We directly state the result for linear time-varying diffusions with stationary distributions. The parameterization requires a skew-symmetric matrix \\(-\\mathbf{Q}(s) = \\mathbf{Q}(s)^{\\top}\\), a positive semi-definite matrix \\(\\mathbf{D}(s)\\), and a function \\(\\nabla H(\\mathbf{y})\\) such that the desired stationary distribution \\(q_{\\infty}\\) is proportional to \\(\\exp[-H(\\mathbf{y})]\\). Linear Itô diffusions have Gaussian stationary distributions (Särkkä & Solin, 2019) meaning that \\(\\nabla H\\) is linear and can be expressed as \\(\\mathbf{S}\\mathbf{y}\\) for some matrix \\(\\mathbf{S}\\). For a matrix \\(\\mathbf{A}\\), let \\(\\sqrt{\\mathbf{A}}\\) refer to the matrix square root defined by \\(\\mathbf{a} = \\sqrt{\\mathbf{A}} \\Longleftrightarrow \\mathbf{A} = \\mathbf{aa}^{\\top}\\). Then, the Itô diffusion:" + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.798, + 0.825, + 0.845 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = \\underbrace {- \\left[ \\mathbf {Q} (s) + \\mathbf {D} (s) \\right] \\mathbf {S y}} _ {f (\\mathbf {y}, s)} d s + \\underbrace {\\sqrt {2 \\mathbf {D} (s)}} _ {g (s)} d \\widehat {\\mathbf {B}} _ {s}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.854, + 0.826, + 0.926 + ], + "angle": 0, + "content": "has Gaussian stationary distribution \\(\\mathcal{N}(\\mathbf{0},\\mathbf{S}^{-1})\\) where \\(\\mathbf{Q}(s),\\mathbf{D}(s)\\) and \\(\\mathbf{S}\\) are parameters. For a discussion of convergence to the stationary distribution, as well as skew-symmetric and positive semi-definite parameterizations, see appendix C, where we also show that existing diffusion processes such as VPSDE and CLD are included in \\(\\mathbf{Q} / \\mathbf{D}\\) parameterization. We display the ELBO in terms of \\(\\mathbf{Q} / \\mathbf{D}\\) in appendix G and an algorithm in appendix H." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.177 + ], + "angle": 0, + "content": "For score matching and noise prediction losses and a given \\( q_{\\phi} \\), achieving a minimizing value with respect to \\( s_{\\theta} \\) does not imply that the generative model score will match the inference score. Modeling the data also requires the marginal distribution of \\( q_{\\phi, T} \\) to approximate \\( \\pi \\). When \\( q_{\\phi} \\) is constant, it is important to confirm the stationary distribution is appropriately set, and the tools used here for the ELBO can be used to satisfy this requirement for score matching and noise prediction (appendix A)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.191, + 0.473, + 0.205 + ], + "angle": 0, + "content": "3.5 LEARNING THE INFERENCE PROCESS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.217, + 0.825, + 0.288 + ], + "angle": 0, + "content": "The choice of diffusion matters, and the ELBOs in eq. (7) have no requirement for fixed \\( q_{\\phi} \\). We therefore learn the inference process jointly with \\( s_{\\theta} \\). Under linear transitions (ingredient 1), no algorithmic details change as the diffusion changes during training. Under stationary parameterization (ingredient 2), we can learn without the stationary distribution going awry. In the experiments, learning matches or surpasses BPDs of fixed diffusions for a given dataset and score architecture." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.293, + 0.826, + 0.323 + ], + "angle": 0, + "content": "In \\(\\mathcal{L}^{\\mathrm{mdsm}}\\) or \\(\\mathcal{L}^{\\mathrm{mism}}\\), \\(q_{\\phi, \\infty}\\) may be set to equal \\(\\pi_{\\theta}\\), but it is \\(\\mathbf{y}_T \\sim q_{\\phi, T}\\) for the chosen \\(T\\) that is featured in the ELBO. Learning \\(q_{\\phi}\\) can choose \\(\\mathbf{y}_T\\) to reduce the cross-entropy:" + }, + { + "type": "equation", + "bbox": [ + 0.415, + 0.326, + 0.825, + 0.345 + ], + "angle": 0, + "content": "\\[\n- \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {T} | x)} [ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) ]. \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.347, + 0.825, + 0.391 + ], + "angle": 0, + "content": "Minimizing eq. (14) will tighten the ELBO for any \\( s_\\theta \\). Next, \\( q_\\phi \\) is featured in the remaining terms that feature \\( s_\\theta \\); optimizing for \\( q_\\phi \\) will tighten and improve the ELBO alongside \\( s_\\theta \\). Finally, \\( q_\\phi \\) is featured in the expectations and the \\( -\\log q_\\phi \\) term:" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.395, + 0.826, + 0.427 + ], + "angle": 0, + "content": "\\[\n\\log p _ {\\theta} \\left(\\mathbf {u} _ {T} ^ {z} = x\\right) \\geq = \\underbrace {\\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} = v \\mid x\\right)}} \\left[ \\left(\\mathcal {L} ^ {\\mathrm {d s m}} \\text {o r} \\mathcal {L} ^ {\\mathrm {i s m}}\\right) \\underbrace {- \\log q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} = v \\mid x\\right)} \\right] \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.438, + 0.825, + 0.468 + ], + "angle": 0, + "content": "The \\(q_{\\phi}(\\mathbf{y}_0^v |x)\\) terms impose an optimality condition that \\(p_{\\theta}(\\mathbf{u}_T^v |\\mathbf{u}_T^z) = q_{\\phi}(\\mathbf{y}_0^v |\\mathbf{y}_0^z)\\) (appendix E). When it is satisfied, no looseness in the ELBO is due to the initial time zero auxiliary variables." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.473, + 0.825, + 0.545 + ], + "angle": 0, + "content": "To learn, \\(\\mathbf{Q},\\mathbf{D}\\) need to be specified with parameters \\(\\phi\\) that enable gradients. We keep \\(\\mathbf{S}\\) fixed at inverse covariance of \\(\\pi_{\\theta}\\). The transition kernel \\(q_{\\phi}(\\mathbf{y}_s|x)\\) depends on \\(\\mathbf{Q},\\mathbf{D}\\) through its mean and covariance. Gaussian distributions permit gradient estimation with reparameterization or score-function gradients (Kingma & Welling, 2013; Ranganath et al., 2014; Rezende & Mohamed, 2015; Titsias & Lázaro-Gredilla, 2014). Reparameterization is accomplished via:" + }, + { + "type": "equation", + "bbox": [ + 0.431, + 0.55, + 0.825, + 0.567 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} _ {s} = \\mathbf {m} _ {s | 0} + \\mathbf {L} _ {s | 0} \\epsilon \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.572, + 0.825, + 0.604 + ], + "angle": 0, + "content": "where \\(\\epsilon \\sim \\mathcal{N}(0, I_{dK})\\) and \\(\\mathbf{L}_{s|0}\\) satisfies \\(\\mathbf{L}_{s|0} \\mathbf{L}_{s|0}^{\\top} = \\boldsymbol{\\Sigma}_{s|0}\\), derived using coordinate-wise Cholesky decomposition. Gradients flow through eq. (16) from \\(\\mathbf{y}_s\\) to \\(\\mathbf{m}_{s|0}\\) and \\(\\boldsymbol{\\Sigma}_{s|0}\\) to \\(\\mathbf{Q}\\), \\(\\mathbf{D}\\) to parameters \\(\\phi\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.609, + 0.825, + 0.655 + ], + "angle": 0, + "content": "Algorithm 1 displays Automatic Multivariate Diffusion Training (AMDT). AMDT provides a training method for diffusion-based generative models for either fixed \\(\\mathbf{Q}\\), \\(\\mathbf{D}\\) matrices or for learning the \\(\\mathbf{Q}_{\\phi}, \\mathbf{D}_{\\phi}\\) matrices, without requiring any diffusion-specific analysis." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.825, + 0.71 + ], + "angle": 0, + "content": "Learning in other diffusion objectives. Like in the ELBO, learning in score matching or noise prediction objectives can improve the match between the inference process and implied generative model (appendix A)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.729, + 0.573, + 0.745 + ], + "angle": 0, + "content": "4 INSIGHTS INTO MULTIVARIATE DIFFUSIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.759, + 0.825, + 0.803 + ], + "angle": 0, + "content": "Scalar versus Multivariate Processes. Equation (13) clarifies what can change while preserving \\( q_{\\infty} \\). Recall that \\( \\mathbf{Q} \\) and \\( \\mathbf{D} \\) are \\( K \\times K \\) for \\( K - 1 \\) auxiliary variables. Because \\( 0 \\) is the only \\( 1 \\times 1 \\) skew-symmetric matrix, scalar processes must set \\( \\mathbf{Q} = 0 \\). With \\( q_{\\phi,\\infty} = \\mathcal{N}(0,\\mathbf{I}) \\), the process is:" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.808, + 0.825, + 0.827 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = - \\mathbf {D} (s) \\mathbf {y} d s + \\sqrt {2 \\mathbf {D} (s)} d \\widehat {\\mathbf {B}} _ {s}. \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.832, + 0.826, + 0.889 + ], + "angle": 0, + "content": "What is left is the VPSDE process used widely in diffusion models where \\(\\mathbf{D}(s) = \\frac{1}{2}\\beta (s)\\) is \\(1\\times 1\\) (Song et al., 2020b). This reveals that the VPSDE process is the only scalar diffusion with a stationary distribution. This also clarifies the role of \\(\\mathbf{Q}\\): it accounts for mixing between dimensions in multivariate processes, as do non-diagonal entries in \\(\\mathbf{D}\\) for \\(K > 1\\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.826, + 0.926 + ], + "angle": 0, + "content": "3There are processes such as sub-VPSDE (Song et al., 2020b) which are covered in the sense that they tend to members of this parameterization as \\(T\\) grows: sub-VP converges to VPSDE." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "CLD optimizes a log-likelihood lower bound. Differentiating \\(\\mathcal{L}^{\\mathrm{mdsm}}\\) (eq. (7)) with respect to the score model parameters, we show that the objective for CLD (Dockhorn et al., 2021) maximizes a lower bound on \\(\\log p_{\\theta}(x)\\), not just \\(\\log p_{\\theta}(\\mathbf{u}_0)\\), without appealing to the probability flow ODE." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.162, + 0.825, + 0.234 + ], + "angle": 0, + "content": "Does my model use auxiliary variables? An example initial distribution is \\( q(\\mathbf{y}_0^v |x) = \\mathcal{N}(0,\\mathbf{I}) \\). It is also common to set \\( \\pi_{\\theta} = \\mathcal{N}(0,\\mathbf{I}) \\). Because the optimum for diffusions is \\( p_{\\theta} = q \\), the optimal model has main and auxiliary dimensions independent at endpoints 0 and \\( T \\). Does this mean that the model does not use auxiliary variables? In appendix B, we show that in this case the model can still use auxiliary variables at intermediate times. A sufficient condition is non-diagonal \\( \\mathbf{Q} + \\mathbf{D} \\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.253, + 0.33, + 0.269 + ], + "angle": 0, + "content": "5 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.285, + 0.825, + 0.343 + ], + "angle": 0, + "content": "We test the MDM framework with handcrafted and learned diffusions. The handcrafted diffusions are (a) ALDA, used in (Mou et al., 2019) for accelerated gradient-based MCMC sampling (eq. (32)) and (b) MALDA: a modified version of ALDA (eq. (33)). Both have two auxiliary variables. We also learn diffusions with 1 and 2 auxiliary variables. We compare with VPSDE and ELBO-trained CLD." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.355, + 0.825, + 0.412 + ], + "angle": 0, + "content": "Table 2: BPD upper-bounds on image generation for a fixed architecture. CIFAR-10: learning outperforms CLD, and both outperform the standard choice of VPSDE. MNIST: learning matches VPSDE while the fixed auxiliary diffusions are worse. IMAGENET32: all perform similarly. Learning matches or surpasses the best fixed diffusion, while bypassing the need to choose a diffusion." + }, + { + "type": "table", + "bbox": [ + 0.315, + 0.421, + 0.684, + 0.523 + ], + "angle": 0, + "content": "
ModelKCIFAR-10IMAGENET32MNIST
VPSDE13.203.701.26
Learned23.073.711.28
Learned33.083.721.33
CLD23.113.701.35
MALDA33.133.721.65
ALDA329.4333.08124.60
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.544, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Table 3: Parameter Efficiency. The first two rows display diffusions from previous work: VPSDE and CLD, both using score models with 108 million parameters on CIFAR-10. We train the rest using a score model with 35.7 million parameters. The learned diffusion matches the performance of VPSDE-large; changes in the inference can account for as much improvement as a 3x increase in score parameters. BPDs are upper-bounds." + }, + { + "type": "table", + "bbox": [ + 0.272, + 0.625, + 0.727, + 0.713 + ], + "angle": 0, + "content": "
ModelKParametersCIFAR-10
VPSDE-large (Song et al., 2021)1108M3.08
CLD-large (Dockhorn et al., 2021)2108M3.31
Learned235.7M3.07
CLD235.7M3.11
VPSDE135.7M3.20
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.728, + 0.825, + 0.826 + ], + "angle": 0, + "content": "Following prior work, we train DBGMs for image generation. We use the U-Net from Ho et al. (2020). We input the auxiliary variables as extra channels, which only increases the score model parameters in the input and output convolutions (CLD and Learned 2 have 7,000 more parameters than VPSDE on CIFAR-10 and IMAGENET32 and only 865 more for MNIST). We use simple uniform dequantization. We report estimates of \\(\\mathcal{L}^{\\mathrm{mdsm}}\\) (which reduces to the standard \\(\\mathcal{L}^{\\mathrm{dsm}}\\) for \\(K = 1\\)). We sample times using the importance sampling distribution from Song et al. (2021) with truncation set to \\(\\epsilon = 10^{-3}\\). To ensure the truncated bound is proper, we use a likelihood described in appendix I." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Results. Table 2 shows that the inference process matters and displays. It displays DBGMs that we train and evaluate on CIFAR-10, IMAGENET32 and MNIST. This includes the existing VPSDE and CLD, the new MALDA and ALDA, and the new learned inference processes. All are trained with the 35.7M parameter architecture. For CIFAR-10, learning outperforms CLD, and both outperform the standard choice of VPSDE. For MNIST, learned diffusions match VPSDE while the three fixed auxiliary diffusions are worse. On IMAGENET32, all perform similarly. The take-away is that learning" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "matches or surpasses the best fixed diffusion performance and bypasses the choice of diffusion for each new dataset or score architecture. In Figure 1 we plot the generated samples from CIFAR10." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.281 + ], + "angle": 0, + "content": "Table 3's first two rows display diffusion models from previous work: VPSDE (Song et al., 2021) and CLD (Dockhorn et al., 2021) both with the 108 million score model from Song et al. (2021) (labeled \"large\"). The rest are DBGMs that we train using the U-Net with 35.7 million parameters for CIFAR-10 and IMAGENET32 and 1.1 million for MNIST. Despite using significantly fewer parameters, the learned diffusion achieves similar BPD compared to the larger models, showing that changes in inference can account for as much improvement as a three-fold increase in parameters. While the larger architecture requires two GPUs for batch size 128 on CIFAR-10 on A100s, the smaller one only requires one; exploring inference processes can make diffusions more computationally accessible. Table 3 also demonstrates a tighter bound for CLD trained and evaluated with the MDM ELBO (\\(\\leq\\) 3.11) relative to existing probability flow-based evaluations (3.31)." + }, + { + "type": "image", + "bbox": [ + 0.266, + 0.299, + 0.473, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.529, + 0.299, + 0.738, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.196, + 0.478, + 0.8, + 0.494 + ], + "angle": 0, + "content": "Figure 1: CIFAR10 samples generated from the \"learned 2\" and MALDA generative models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.508, + 0.348, + 0.525 + ], + "angle": 0, + "content": "6 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.538, + 0.825, + 0.583 + ], + "angle": 0, + "content": "Evidence Lower Bounds. Song et al. (2021); Huang et al. (2021) derive the ISM and DSM lower bounds on the model log likelihood. Our work extends their analysis to the multivariate diffusion setting to derive lower bounds on the log marginal of the data in the presence of auxiliary variables." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.595, + 0.825, + 0.668 + ], + "angle": 0, + "content": "Auxiliary variables. Dupont et al. (2019) shows that augmented neural ODEs model a richer set of functions and Huang et al. (2020) uses this principle for normalizing flows. Hierarchical variational models and auto-encoders marginalize auxiliary variables to build expressive distributions (Ranganath et al., 2016; Sønderby et al., 2016; Maaløe et al., 2019; Vahdat & Kautz, 2020; Child, 2020). We apply this principle to DBGMs, including and extending CLD (Dockhorn et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.679, + 0.825, + 0.738 + ], + "angle": 0, + "content": "Learning inference. Learning \\( q_{\\phi} \\) with \\( p_{\\theta} \\) is motivated in previous work (Kingma & Welling, 2013; Sohl-Dickstein et al., 2015; Kingma et al., 2021). Kingma et al. (2021) learn the noise schedule for VPSDE. For MDMs, there are parameters to learn beyond the noise schedule; \\( \\mathbf{Q} \\) can be non-zero, \\( \\mathbf{D} \\) can diagonal or full, give \\( \\mathbf{Q} \\) and \\( \\mathbf{D} \\) different time-varying functions, and learn \\( \\nabla \\mathbf{H} \\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.755, + 0.312, + 0.77 + ], + "angle": 0, + "content": "7 DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We present an algorithm for training multivariate diffusions with linear time-varying inference processes with a specified stationary distribution and any number of auxiliary variables. This includes automating transition kernel computation and providing a parameterization of diffusions that have a specified stationary distribution, which facilitate working with new diffusion processes, including learning the diffusion. The experiments show that learning matches or surpasses the best fixed diffusion performance, bypassing the need to choose a diffusion. MDMs achieve BPDs similar to univariate diffusions, with as many as three times more score parameters. The proposed MDM ELBO reports a tighter bound for the existing CLD relative to existing probability flow-based evaluations. This work enables future directions including interactions across data coordinates and using new stationary distributions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.4, + 0.119 + ], + "angle": 0, + "content": "8 ACKNOWLEDGEMENTS" + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.133, + 0.827, + 0.192 + ], + "angle": 0, + "content": "This work was generously funded by NIH/NHLBI Award R01HL148248, NSF Award 1922658 NRT-HDR: FUTURE Foundations, Translation, and Responsibility for Data Science, and NSF CAREER Award 2145542. The authors would additionally like to thank Chin-Wei Huang for helpful discussing regarding Huang et al. (2021)." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.211, + 0.289, + 0.226 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.234, + 0.826, + 0.264 + ], + "angle": 0, + "content": "Andrew D Barbour. Stein's method and poisson process convergence. Journal of Applied Probability, 25(A):175-184, 1988." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.273, + 0.827, + 0.316 + ], + "angle": 0, + "content": "Andrew Campbell, Joe Benton, Valentin De Bortoli, Tom Rainforth, George Deligiannidis, and Arnaud Doucet. A continuous time framework for discrete denoising models. arXiv preprint arXiv:2205.14987, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.324, + 0.825, + 0.354 + ], + "angle": 0, + "content": "Nanxin Chen, Yu Zhang, Heiga Zen, Ron J Weiss, Mohammad Norouzi, and William Chan. Wavegrad: Estimating gradients for waveform generation. arXiv preprint arXiv:2009.00713, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.362, + 0.825, + 0.393 + ], + "angle": 0, + "content": "Rewon Child. Very deep vaes generalize autoregressive models and can outperform them on images. arXiv preprint arXiv:2011.10650, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.4, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.438, + 0.825, + 0.468 + ], + "angle": 0, + "content": "Tim Dockhorn, Arash Vahdat, and Karsten Kreis. Score-based generative modeling with critically-damped Langevin diffusion. arXiv preprint arXiv:2112.07068, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.476, + 0.826, + 0.506 + ], + "angle": 0, + "content": "Emilien Dupont, Arnaud Doucet, and Yee Whye Teh. Augmented neural odes. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.514, + 0.825, + 0.544 + ], + "angle": 0, + "content": "Bradley Efron. Tweedie's formula and selection bias. Journal of the American Statistical Association, 106(496):1602-1614, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.551, + 0.826, + 0.581 + ], + "angle": 0, + "content": "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.589, + 0.826, + 0.631 + ], + "angle": 0, + "content": "Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. *J. Mach. Learn. Res.*, 23: 47-1, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.641, + 0.826, + 0.672 + ], + "angle": 0, + "content": "Chin-Wei Huang, Laurent Dinh, and Aaron Courville. Augmented normalizing flows: Bridging the gap between generative flows and latent variable models. arXiv preprint arXiv:2002.07101, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.679, + 0.826, + 0.721 + ], + "angle": 0, + "content": "Chin-Wei Huang, Jae Hyun Lim, and Aaron C Courville. A variational perspective on diffusion-based generative models and score matching. Advances in Neural Information Processing Systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.73, + 0.826, + 0.76 + ], + "angle": 0, + "content": "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.768, + 0.825, + 0.799 + ], + "angle": 0, + "content": "Diederik P Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. arXiv preprint arXiv:2107.00630, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.806, + 0.825, + 0.837 + ], + "angle": 0, + "content": "Zhifeng Kong, Wei Ping, Jiaji Huang, Kexin Zhao, and Bryan Catanzaro. Diffwave: A versatile diffusion model for audio synthesis. arXiv preprint arXiv:2009.09761, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.844, + 0.825, + 0.875 + ], + "angle": 0, + "content": "Yi-An Ma, Tianqi Chen, and Emily Fox. A complete recipe for stochastic gradient mcmc. Advances in neural information processing systems, 28, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.826, + 0.924 + ], + "angle": 0, + "content": "Lars Maaløe, Marco Fraccaro, Valentin Lievin, and Ole Winther. Biva: A very deep hierarchy of latent variables for generative modeling. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.234, + 0.827, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Gautam Mittal, Jesse Engel, Curtis Hawthorne, and Ian Simon. Symbolic music generation with diffusion models. arXiv preprint arXiv:2103.16091, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.826, + 0.172 + ], + "angle": 0, + "content": "Wenlong Mou, Yi-An Ma, Martin J Wainwright, Peter L Bartlett, and Michael I Jordan. High-order Langevin diffusion yields an accelerated mcmc algorithm. arXiv preprint arXiv:1908.10859, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.179, + 0.826, + 0.21 + ], + "angle": 0, + "content": "Alex Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. arXiv preprint arXiv:2102.09672, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.217, + 0.826, + 0.261 + ], + "angle": 0, + "content": "Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.268, + 0.825, + 0.299 + ], + "angle": 0, + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.306, + 0.826, + 0.337 + ], + "angle": 0, + "content": "Rajesh Ranganath, Sean Gerrish, and David Blei. Black box variational inference. In Artificial intelligence and statistics, pp. 814-822. PMLR, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.344, + 0.826, + 0.375 + ], + "angle": 0, + "content": "Rajesh Ranganath, Dustin Tran, and David Blei. Hierarchical variational models. In International conference on machine learning, pp. 324-333. PMLR, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.382, + 0.825, + 0.413 + ], + "angle": 0, + "content": "Danilo Rezende and Shakir Mohamed. Variational inference with normalizing flows. In International Conference on Machine Learning, pp. 1530-1538. PMLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.42, + 0.826, + 0.477 + ], + "angle": 0, + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.485, + 0.825, + 0.516 + ], + "angle": 0, + "content": "Simo Särkkä and Arno Solin. Applied stochastic differential equations, volume 10. Cambridge University Press, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.523, + 0.825, + 0.554 + ], + "angle": 0, + "content": "Hiroshi Sasaki, Chris G Willcocks, and Toby P Breckon. Unit-ddpm: Unpaired image translation with denoising diffusion probabilistic models. arXiv preprint arXiv:2104.05358, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.561, + 0.825, + 0.604 + ], + "angle": 0, + "content": "Jianghong Shi, Tianqi Chen, Ruoshi Yuan, Bo Yuan, and Ping Ao. Relation of a new interpretation of stochastic differential equations to ito process. Journal of Statistical physics, 148:579-590, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.613, + 0.825, + 0.657 + ], + "angle": 0, + "content": "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.665, + 0.825, + 0.695 + ], + "angle": 0, + "content": "Casper Kaae Sønderby, Tapani Raiko, Lars Maaløe, Søren Kaae Sønderby, and Ole Winther. Ladder variational autoencoders. Advances in neural information processing systems, 29, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.703, + 0.825, + 0.746 + ], + "angle": 0, + "content": "Yang Song, Sahaj Garg, Jiaxin Shi, and Stefano Ermon. Sliced score matching: A scalable approach to density and score estimation. In Uncertainty in Artificial Intelligence, pp. 574-584. PMLR, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.754, + 0.825, + 0.797 + ], + "angle": 0, + "content": "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.806, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Yang Song, Conor Durkan, Iain Murray, and Stefano Ermon. Maximum likelihood training of score-based diffusion models. Advances in Neural Information Processing Systems, 34:1415-1428, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.857, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Michalis Titsias and Miguel Lázaro-Gredilla. Doubly stochastic variational bayes for non-conjugate inference. In International conference on machine learning, pp. 1971-1979. PMLR, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Arash Vahdat and Jan Kautz. Nvae: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Pascal Vincent. A connection between score matching and denoising autoencoders. *Neural computation*, 23(7):1661-1674, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.825, + 0.172 + ], + "angle": 0, + "content": "L Yin and P Ao. Existence and construction of dynamical potential in nonequilibrium processes without detailed balance. Journal of Physics A: Mathematical and General, 39(27):8593, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.179, + 0.825, + 0.21 + ], + "angle": 0, + "content": "Zhenzhong Zhang and Dayue Chen. A new criterion on existence and uniqueness of stationary distribution for diffusion processes. Advances in Difference Equations, 2013(1):1-6, 2013." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.825, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.719, + 0.119 + ], + "angle": 0, + "content": "A AUTOMATED SCORE MATCHING WITH LEARNED INFERENCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.809, + 0.15 + ], + "angle": 0, + "content": "Like for the MDM ELBO, the methods in this work apply to training with the score matching loss:" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.153, + 0.757, + 0.179 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {S M}} (x, \\theta , \\phi) = T \\mathbb {E} _ {t \\sim U [ 0, T ]} \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\lambda (t) \\| s _ {\\theta} (\\mathbf {y} _ {t}, t) - \\nabla_ {\\mathbf {y} _ {t}} \\log q _ {\\phi} (\\mathbf {y} _ {t} | x) \\| _ {2} ^ {2} \\right],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.182, + 0.825, + 0.211 + ], + "angle": 0, + "content": "where \\(\\lambda :[0,T]\\to \\mathbb{R}_+\\) is a weighing function. The score-matching loss is often optimized in its simplified noise prediction form:" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.215, + 0.677, + 0.242 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {N P}} (x, \\theta , \\phi) = T \\mathbb {E} _ {t \\sim U [ 0, T ]} \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\| \\epsilon_ {\\theta} (\\mathbf {y} _ {t}, t) - \\epsilon \\| _ {2} ^ {2} \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.246, + 0.825, + 0.322 + ], + "angle": 0, + "content": "where \\( s_{\\theta} = -\\mathbf{L}_{t}^{-\\top}\\epsilon_{\\theta} \\) and \\( \\mathbf{y}_t = \\mu_t + \\mathbf{L}_t\\epsilon \\) and \\( \\epsilon \\) is the noise used in sampling \\( \\mathbf{y}_t \\). We describe here how the improvements to the ELBO studied in this work carry over to \\( \\mathcal{L}_{\\mathrm{SM}} \\) and \\( \\mathcal{L}_{\\mathrm{NP}} \\). In the following let \\( q_{0} \\) be the data distribution, let \\( p_{(\\theta ,\\phi),0} \\) be the model's distribution of the data, and recall that the model is defined by \\( (s_{\\theta},f_{\\phi},g_{\\phi}) \\) and prior \\( \\pi \\) via a continuous-time stochastic process with drift coefficient \\( g_{\\phi}^{2}s_{\\theta} - f_{\\phi} \\) and diffusion coefficient \\( g_{\\phi} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.328, + 0.825, + 0.413 + ], + "angle": 0, + "content": "First, minimizing \\(\\mathcal{L}_{\\mathrm{SM}}\\) or \\(\\mathcal{L}_{\\mathrm{NP}}\\) so that \\(\\nabla_{\\mathbf{y}_t}\\log q_\\phi (\\mathbf{y}_t) = s_\\theta (\\mathbf{y}_t,t)\\) does not alone imply that \\(p_{(\\theta ,\\phi),0}\\) will equal \\(q_{0}\\); it must also be that \\(q_{\\phi ,T}\\approx \\pi\\). Foregoing this requirement means \\(\\pi\\) will produce samples that the generative model may not be able to push onto the path the model was trained on (formally, the score of the generative model would not equal the time-reversal of the forward score even if \\(s_\\theta\\) equals the forward score). This condition can be satisfied if \\(q_{\\phi}\\) can be chosen with stationary distribution \\(\\pi\\). Section 3.4 describes how to accomplish this." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.419, + 0.825, + 0.449 + ], + "angle": 0, + "content": "Next, for any fixed \\( q_{\\phi} \\), automatic transitions from section 3.3 streamline the computation of the score matching loss, allowing for simple score computation for a wide class of diffusions beyond VP." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.826, + 0.54 + ], + "angle": 0, + "content": "Finally, for a fixed \\( q_{\\phi} \\) with \\( q_{\\phi,T} \\approx \\pi \\) and a score architecture \\( s_{\\theta} \\), minimizing \\( \\mathcal{L}_{\\mathrm{SM}} \\) or \\( \\mathcal{L}_{\\mathrm{NP}} \\) w.r.t \\( \\theta \\) may be suboptimal. Optimization, like for the elbo, carries over to score matching and can close this gap; learning w.r.t. both \\( \\theta, \\phi \\) increases the ability to successfully minimize the loss at each \\( t \\) (section 3.5). In other words, since the generative model is defined by \\( (s_{\\theta}, f_{\\phi}, g_{\\phi}) \\), learning \\( q_{\\phi} \\) means the loss trains all three components of the generative model rather than just one. In summary, score matching is automatic and can learn over the space of linear diffusions that tend to the model prior." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.558, + 0.6, + 0.574 + ], + "angle": 0, + "content": "B DOES MY MODEL USE AUXILIARY VARIABLES?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.588, + 0.825, + 0.674 + ], + "angle": 0, + "content": "In section 3 we gave the example choice of \\( q(\\mathbf{y}_0^v |x) = \\mathcal{N}(0,\\mathbf{I}) \\) coordinate-wise. It is also a common choice to set \\( \\pi_{\\theta} = \\mathcal{N}(0,\\mathbf{I}) \\). Because the optimum in diffusion models is \\( p_{\\theta} = q \\) for all \\( t \\), we see a peculiar phenomenon under this choice: the model has main and auxiliary dimensions independent at both endpoints 0 and \\( T \\). Does this mean that the model does not use auxiliary variables? We show that even when \\( q_{\\phi}(\\mathbf{y}_0) \\) and \\( \\pi_{\\theta} \\) have main and auxiliary variables independent, the model can use the auxiliary variables. A sufficient condition is \\( \\mathbf{Q} + \\mathbf{D} \\) is non-diagonal." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.679, + 0.825, + 0.78 + ], + "angle": 0, + "content": "To make this precise, we recall that we model with \\( p_{\\theta}(\\mathbf{u}_T^z = x) \\). To show the model is using auxiliary variables, we just need to show that \\( \\mathbf{u}_T^z \\) (main coordinate at \\( T \\)) depends on \\( \\mathbf{u}_t^v \\) (aux. coordinate at \\( t \\)) for \\( T > t \\). At optimum, \\( p_{\\theta}(\\mathbf{u}_T^z,\\mathbf{u}_t^v) = q_{\\phi}(\\mathbf{y}_0^z,\\mathbf{y}_{T - t}^v) \\). Therefore it is sufficient to show that for some time \\( s \\), \\( q_{\\phi}(\\mathbf{y}_s^v |\\mathbf{y}_0^z)\\neq q_{\\phi}(\\mathbf{y}_s^v) \\). Because \\( \\mathbf{y}_0^z \\), is determined by \\( x \\) we need to show that \\( q_{\\phi}(\\mathbf{y}_s^v |x)\\neq q_{\\phi}(\\mathbf{y}_s^v) \\). To do that, we first derive \\( q(\\mathbf{y}_s|x) \\) and then marginalize to get \\( q(\\mathbf{y}_s^v |x) \\) from it. Since the former is 2D Gaussian, the latter is available in terms of the former's mean and covariance. Suppose \\( \\mathbb{E}[\\mathbf{y}_0^v ] = 0 \\), \\( \\mathbf{Q} = [[0, - 1],[1,0]] \\) and \\( \\mathbf{D} = [[1,0],[0,1]] \\) and we have \\( s = .1 \\) We have:" + }, + { + "type": "equation", + "bbox": [ + 0.227, + 0.783, + 0.825, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\mathbf {y} _ {s} | x ] = \\exp \\left[ - s (\\mathbf {Q} + \\mathbf {D}) \\right] \\binom {x} {0} = \\exp \\left[ \\left[ \\begin{array}{l l} -. 1 & . 1 \\\\ -. 1 & -. 1 \\end{array} \\right] \\right] \\binom {x} {0} = \\binom {0. 9 0 0 3 x} {- 0. 0 9 0 x} \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.819, + 0.825, + 0.891 + ], + "angle": 0, + "content": "Regardless of the covariance any 1D of this 2D gaussian will have mean that is a function of \\( x \\), meaning that \\( q(\\mathbf{y}_s^v |x) \\) does not equal \\( q(\\mathbf{y}_s^v) \\) (which is also a Gaussian but with mean depending on \\( \\mathbf{x}'s \\) mean rather than \\( x \\) itself. Therefore, even under the setup with independent endpoints, the optimal model makes use of the intermediate auxiliary variables in its final modeling distribution \\( p_{\\theta}(\\mathbf{u}_T^z = x) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Are there choices of \\(\\mathbf{Q}\\) and \\(\\mathbf{D}\\) that lead to learning models that don't make use of the extra dimensions? As mentioned, in the inference process, \\(\\mathbf{Q}\\) is responsible for mixing information among the" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "coordinates, and is the only source of this when \\(\\mathbf{D}\\) is diagonal. Then, if \\(\\mathbf{Q} = \\mathbf{0}\\) and \\(\\mathbf{D}\\) is diagonal, none of the coordinates for a given feature \\(\\mathbf{x}_j\\) (including \\(\\mathbf{u}_{tj}^z\\), \\(\\mathbf{u}_{tj}^{v_1},\\ldots ,\\mathbf{u}_{tj}^{v_{K - 1}}\\)) interact for any \\(t\\). Then, since \\(p_\\theta = q\\) at optimum, independence of the coordinates at all \\(t\\) in \\(q\\) imply the same in \\(p_\\theta\\) and the model will not make use of any auxiliary variables when modeling the marginal \\(\\log p_{\\theta}(\\mathbf{u}_T^z = x)\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.182, + 0.494, + 0.198 + ], + "angle": 0, + "content": "C STATIONARY PARAMETERIZATION" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.213, + 0.546, + 0.228 + ], + "angle": 0, + "content": "The non-linear time-homogeneous Ito process family is:" + }, + { + "type": "equation", + "bbox": [ + 0.414, + 0.237, + 0.825, + 0.253 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = f (\\mathbf {y}) d t + g (\\mathbf {y}) \\mathbf {B} _ {t}. \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.26, + 0.825, + 0.338 + ], + "angle": 0, + "content": "This family can be restricted to those with stationary distributions. Ma et al. (2015) show a complete recipe to span the subset of this family with a desired stationary distribution. Let \\(\\mathbf{Q}\\) be skew-symmetric \\((-\\mathbf{Q} = \\mathbf{Q}^{\\top})\\) and \\(\\mathbf{D}\\) is positive semi-definite. Suppose the desired stationary distribution is \\(q_{\\infty}(\\mathbf{y})\\). For a matrix \\(\\mathbf{A}\\), let \\(\\sqrt{\\mathbf{A}}\\) refer to the matrix square root defined by \\(\\mathbf{a} = \\sqrt{\\mathbf{A}} \\iff \\mathbf{A} = \\mathbf{aa}^{\\top}\\). Then, Ma et al. (2015) show that, setting \\(\\mathbf{H}(\\mathbf{y}) = -\\log q_{\\infty}(\\mathbf{y})\\), \\(g(\\mathbf{y}) = \\sqrt{2\\mathbf{D}(\\mathbf{y})}\\), and" + }, + { + "type": "equation", + "bbox": [ + 0.199, + 0.346, + 0.826, + 0.389 + ], + "angle": 0, + "content": "\\[\nf (\\mathbf {y}) = - \\left[ \\mathbf {D} (\\mathbf {y}) + \\mathbf {Q} (\\mathbf {y}) \\right] \\nabla \\mathbf {H} (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}), \\quad \\boldsymbol {\\Gamma} _ {i} (\\mathbf {y}) = \\sum_ {j = 1} ^ {d} \\frac {\\partial}{\\partial \\mathbf {z} _ {j}} \\left(\\mathbf {D} _ {i j} (\\mathbf {y}) + \\mathbf {Q} _ {i j} (\\mathbf {y})\\right), \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.825, + 0.426 + ], + "angle": 0, + "content": "yields a process \\(\\mathbf{y}_t\\) with stationary distribution \\(q_{\\infty}\\). We extend it to time-varying (time inhomogeneous) processes." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.443, + 0.613, + 0.459 + ], + "angle": 0, + "content": "Theorem 2. \\(q_{\\infty}(\\mathbf{y})\\propto \\exp [-H(\\mathbf{y})]\\) is a stationary distribution of" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.467, + 0.825, + 0.508 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = \\left(- [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] \\nabla \\mathbf {H} (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}, t)\\right) d t + \\sqrt {2 \\mathbf {D} (\\mathbf {y} , t)} \\mathbf {B} _ {t}, \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.515, + 0.199, + 0.53 + ], + "angle": 0, + "content": "for" + }, + { + "type": "equation", + "bbox": [ + 0.35, + 0.538, + 0.826, + 0.58 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Gamma} _ {i} (\\mathbf {y}, t) = \\sum_ {j = 1} ^ {d} \\frac {\\partial}{\\partial \\mathbf {y} _ {j}} \\left(\\mathbf {D} _ {i j} (\\mathbf {y}, t) + \\mathbf {Q} _ {i j} (\\mathbf {y}, t)\\right). \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.598, + 0.429, + 0.613 + ], + "angle": 0, + "content": "Proof. The Fokker Planck equation is:" + }, + { + "type": "equation", + "bbox": [ + 0.254, + 0.62, + 0.826, + 0.659 + ], + "angle": 0, + "content": "\\[\n\\partial_ {t} q (\\mathbf {y}, t) = - \\sum_ {i} \\frac {\\partial}{\\partial \\mathbf {y} _ {i}} [ f _ {i} (\\mathbf {y}, t) q (\\mathbf {y}, t) ] + \\sum_ {i, j} \\frac {\\partial^ {2}}{\\partial \\mathbf {y} _ {i} \\partial \\mathbf {y} _ {j}} [ \\mathbf {D} _ {i j} (\\mathbf {y}, t) q (\\mathbf {y}, t) ] \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.825, + 0.737 + ], + "angle": 0, + "content": "A stationary distribution is one where the Fokker-Planck right hand side is equal to 0. To show that the stationary characterization also holds of time-inhomogeneous processes with \\(\\mathbf{D}(\\mathbf{y},t)\\) and \\(\\mathbf{Q}(\\mathbf{y},t)\\), we take two steps, closely following Yin & Ao (2006); Shi et al. (2012); Ma et al. (2015), but noting that there is no requirement for \\(\\mathbf{Q}\\), \\(\\mathbf{D}\\) to be free of \\(t\\). First, we show that the Fokker-Plack equation can be re-written as:" + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.745, + 0.826, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\partial_ {t} q (\\mathbf {y}, t) = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right]\\right) \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.793, + 0.731, + 0.808 + ], + "angle": 0, + "content": "Second, because the whole expression is set to 0 when the inside expression equals 0" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.816, + 0.825, + 0.833 + ], + "angle": 0, + "content": "\\[\nq (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) = 0, \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.825, + 0.867 + ], + "angle": 0, + "content": "we just need to show that this holds when \\( q(\\mathbf{y},t) = \\exp [-H(\\mathbf{y})] / \\mathbf{Z} \\). The second step is concluded because" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.873, + 0.768, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right] = \\frac {1}{\\mathbf {Z}} \\left[ \\exp [ - H (\\mathbf {y}) ] \\nabla H (\\mathbf {y}) + \\nabla \\exp [ - H (\\mathbf {y}) ] \\right] = 0,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.536, + 0.926 + ], + "angle": 0, + "content": "where \\(\\mathbf{Z}\\) is the normalization constant of \\(\\exp (-H(y))\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.51, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.156 + ], + "angle": 0, + "content": "It only remains to show that Fokker-Plack can be re-written in divergence form with time-dependent \\(\\mathbf{Q},\\mathbf{D}\\). In the following let \\(Q_{ijt}\\) denote \\(\\mathbf{Q}_{ij}(\\mathbf{y},t)\\) and likewise for \\(D_{ijt}\\). Let \\(\\partial_i\\) denote \\(\\frac{\\partial}{\\partial\\mathbf{y}_i}\\) and let it denote \\(\\frac{d}{d\\mathbf{y}_i}\\) for scalar functions. We will use \\([Ax]_i = \\sum_jA_{ij}x_j\\)." + }, + { + "type": "equation", + "bbox": [ + 0.223, + 0.16, + 0.776, + 0.362 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {t} q _ {t} = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] [ q \\nabla H + \\nabla q ]\\right) \\\\ = \\sum_ {i} \\partial_ {i} \\left(\\left[ [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] [ q \\nabla H + \\nabla q ] \\right] _ {i}\\right) \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\nabla H + \\nabla q \\right] _ {j} \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H + \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} D _ {i j t} \\left[ \\partial_ {j} q \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} Q _ {i j t} \\left[ \\partial_ {j} q \\right] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.364, + 0.825, + 0.396 + ], + "angle": 0, + "content": "We re-write the 2nd and 3rd term. Holding \\(i\\) fixed and noting \\(q\\) is scalar, we get the product rule \\(\\sum_{j}D_{ijt}(\\partial_{j}q) = \\sum_{j}\\partial_{j}[D_{ijt}q] - q\\sum_{j}\\partial_{j}D_{ijt}\\) for each \\(i\\), and likewise for \\(q\\):" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.399, + 0.753, + 0.502 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} D _ {i j t} \\left[ \\partial_ {j} q \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} Q _ {i j t} \\left[ \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} D _ {i j t} \\\\ + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\partial_ {j} \\left[ Q _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} Q _ {i j t} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.505, + 0.724, + 0.523 + ], + "angle": 0, + "content": "Because \\(\\mathbf{Q}(\\mathbf{y},t)\\) is skew-symmetric, we have that \\(\\sum_{i}\\partial_{i}\\sum_{j}\\partial_{j}[Q_{ijt}q] = 0\\), leaving" + }, + { + "type": "equation", + "bbox": [ + 0.177, + 0.526, + 0.819, + 0.657 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial_ {t} q _ {t} = \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] \\right] + \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} D _ {i j t} - q \\sum_ {j} \\partial_ {j} Q _ {i j t} \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] q \\right] + \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right) \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\left[ \\left(\\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] - \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right)\\right) q \\right] + \\sum_ {i} \\sum_ {j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left(D _ {i j t} q\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.825, + 0.717 + ], + "angle": 0, + "content": "Recalling that \\( f_{i}(\\mathbf{y},t) = \\left(-[D + Q]\\nabla H + \\Gamma\\right)_{i} \\) and again that \\( [Ax]_i = \\sum_j A_{ij}x_j \\), we have equality with the original Fokker-Planck" + }, + { + "type": "equation", + "bbox": [ + 0.229, + 0.72, + 0.773, + 0.822 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} = \\sum_ {i} \\partial_ {i} \\left[ \\left(\\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] - \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right)\\right) q \\right] + \\sum_ {i j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left(D _ {i j t} q\\right) \\\\ = - \\sum_ {i} \\frac {\\partial}{\\partial \\mathbf {y} _ {i}} \\left[ f _ {i} (\\mathbf {y}, t) q (\\mathbf {y}, t) \\right] + \\sum_ {i j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left[ \\mathbf {D} _ {i j} (\\mathbf {y}, t) q (\\mathbf {y}, t) \\right] \\\\ = \\partial_ {t} q (\\mathbf {y}, t) \\\\ \\end{array}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.824, + 0.824, + 0.836 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.853, + 0.825, + 0.88 + ], + "angle": 0, + "content": "We have shown \\(\\exp[-H(\\mathbf{y})] / \\mathbf{Z}\\) is a stationary distribution of the time-varying non-linear Ito process:" + }, + { + "type": "equation", + "bbox": [ + 0.265, + 0.882, + 0.824, + 0.922 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = \\left(- [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] \\nabla H (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}, t)\\right) d t + \\sqrt {2 \\mathbf {D} (\\mathbf {y} , t)} \\mathbf {B} _ {t}. \\tag {26}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.175 + ], + "angle": 0, + "content": "However, for some choices of \\(\\mathbf{Q}, \\mathbf{D}\\), \\(\\exp[-H(\\mathbf{y})] / \\mathbf{Z}\\) is not necessarily the unique stationary distribution. One problematic case can occur as follows. Suppose that row \\(i\\) of \\((\\mathbf{Q} + \\mathbf{D})\\) is all-zero; in this case, \\(d\\mathbf{y}_i = 0\\) which implies that \\((\\mathbf{y}_i)_t = (\\mathbf{y}_i)_0\\) for all \\(t > 0\\). Then, the initial distribution is also a stationary distribution. To rule out such pathological diffusions, we make the assumption that \\(\\mathbf{Q} + \\mathbf{D}\\) is full rank. Then, for uniqueness, recall that stationary distributions are the zeros of" + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.18, + 0.726, + 0.22 + ], + "angle": 0, + "content": "\\[\n\\partial_ {t} q (\\mathbf {y}, t) = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right]\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.226, + 0.647, + 0.242 + ], + "angle": 0, + "content": "where the expression is of the form \\(\\mathbf{A}\\mathbf{v}\\) for \\(\\mathbf{A} = \\mathbf{D}(\\mathbf{y},t) + \\mathbf{Q}(\\mathbf{y},t)\\) and" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.246, + 0.613, + 0.273 + ], + "angle": 0, + "content": "\\[\n\\mathbf {v} = \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.277, + 0.825, + 0.307 + ], + "angle": 0, + "content": "Under the assumption that \\(\\mathbf{Q} + \\mathbf{D}\\) is full rank, the expression can only be zero when \\(\\mathbf{v}\\) is zero. To show uniqueness under the full rank assumption, one must then show that" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.312, + 0.597, + 0.329 + ], + "angle": 0, + "content": "\\[\n\\nabla q (\\mathbf {y}, t) = - q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.333, + 0.825, + 0.363 + ], + "angle": 0, + "content": "holds only if \\( q(\\mathbf{y}, t) = \\exp[-H(\\mathbf{y})] / \\mathbf{Z} \\). Even if \\( \\exp[-H(\\mathbf{y})] / \\mathbf{Z} \\) is the unique stationary distribution, convergence to that distribution is a question. See Zhang & Chen (2013) for more details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.369, + 0.825, + 0.397 + ], + "angle": 0, + "content": "Learning \\(\\mathbf{Q}_{\\phi}\\), \\(\\mathbf{D}_{\\phi}\\) in the MDM ELBO helps push \\(\\mathbf{y}_T\\) to the model prior \\(\\pi_{\\theta}\\) and avoid issues like those discussed." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.414, + 0.357, + 0.427 + ], + "angle": 0, + "content": "C.1 LINEAR PROCESSES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.44, + 0.825, + 0.496 + ], + "angle": 0, + "content": "Next, we specialize this general family to linear Itô processes to maintain tractable transition distributions. A linear process is one where the drift \\( f(\\mathbf{y},t) \\) and diffusion \\( g(\\mathbf{y},t) \\) are linear functions of \\( \\mathbf{y} \\). We express the drift function of a non-linear time-varying Itô process with stationary distribution proportional to \\( \\exp[-H(\\mathbf{y})] \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.501, + 0.635, + 0.518 + ], + "angle": 0, + "content": "\\[\n- (\\mathbf {Q} (\\mathbf {y}, t) + \\mathbf {D} (\\mathbf {y}, t)) \\nabla H (\\mathbf {y}) + \\Gamma (\\mathbf {y}, t).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.523, + 0.826, + 0.639 + ], + "angle": 0, + "content": "Next, linear Ito processes have Gaussian stationary distributions (Särkkä & Solin, 2019) so \\( H(\\mathbf{y}) \\) must be quadratic and \\( \\nabla H(\\mathbf{y}) \\) is linear, and neither are constant in \\( \\mathbf{y} \\). Because \\( \\nabla H(\\mathbf{y}) \\) is linear, it can be expressed as \\( \\mathbf{S}\\mathbf{y} \\) for some matrix \\( \\mathbf{S} \\) where \\( \\mathbf{S} \\) is the inverse of the covariance matrix. Because \\( \\nabla H \\) is multiplied by \\( \\mathbf{Q}, \\mathbf{D} \\), this means that \\( \\mathbf{Q}, \\mathbf{D} \\) must be free of \\( \\mathbf{y} \\). Recalling that \\( \\Gamma \\) is expressed as a sum of derivatives w.r.t \\( \\mathbf{y} \\) of \\( \\mathbf{Q} + \\mathbf{D} \\), this means that \\( \\Gamma \\) must satisfy \\( \\Gamma = 0 \\). Next, because of the stationary requirement that \\( g(t) = \\sqrt{2\\mathbf{D}(\\mathbf{y},t)} \\), we can also conclude by the restriction on \\( \\mathbf{D} \\) that the diffusion coefficient function must be independent of the state \\( \\mathbf{y} \\). Our final form for linear time-varying processes with stationary distributions \\( \\mathcal{N}(0,\\mathbf{S}^{-1}) \\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.345, + 0.644, + 0.825, + 0.69 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = \\underbrace {- \\left[ \\mathbf {Q} (t) + \\mathbf {D} (t) \\right] \\mathbf {S} \\mathbf {y}} _ {f (\\mathbf {y}, t)} d t + \\underbrace {\\sqrt {2 \\mathbf {D} (t)}} _ {g (t)} d \\mathbf {B} _ {t} \\tag {27}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.706, + 0.373, + 0.723 + ], + "angle": 0, + "content": "C.2 PARAMETERIZING \\(\\mathbf{Q}_{\\phi}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.733, + 0.825, + 0.771 + ], + "angle": 0, + "content": "Suppose \\( b_{q}(s) \\) is a positive scalar function defined on the time domain with known integral. Suppose \\( \\tilde{\\mathbf{Q}}_{\\phi} \\) is any matrix. Then \\( \\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top} \\) is skew-symmetric with \\( \\tilde{\\mathbf{Q}}_{\\phi, ij} = -\\tilde{\\mathbf{Q}}_{\\phi, ji} \\). We can set \\( \\mathbf{Q}_{\\phi} \\) to" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.775, + 0.825, + 0.801 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} _ {\\phi} (s) = b _ {q} (s) \\cdot \\left[ \\tilde {\\mathbf {Q}} _ {\\phi} - \\tilde {\\mathbf {Q}} _ {\\phi} ^ {\\top} \\right] \\tag {28}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.825, + 0.849 + ], + "angle": 0, + "content": "This is a general parameterization of time-independent skew-symmetric matrices, which have number of degrees of freedom equal to the number of entries in one of the triangles of the matrix, excluding the diagonal." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.865, + 0.373, + 0.881 + ], + "angle": 0, + "content": "C.3 PARAMETERIZING \\(\\mathbf{D}_{\\phi}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.891, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Suppose \\( b_{d}(s) \\) is a positive scalar function defined on the time domain with known integral. Suppose \\( \\tilde{\\mathbf{D}}_{\\phi} \\) is any matrix. Then \\( \\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top} \\) is positive semi-definite and spans all time-independent positive" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.445, + 0.119 + ], + "angle": 0, + "content": "semi-definite matrices. We can set \\(\\mathbf{D}_{\\phi}\\) to" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.123, + 0.825, + 0.151 + ], + "angle": 0, + "content": "\\[\n\\mathbf {D} _ {\\phi} (s) = b _ {d} (s) \\cdot \\left[ \\tilde {\\mathbf {D}} _ {\\phi} \\tilde {\\mathbf {D}} _ {\\phi} ^ {\\top} \\right] \\tag {29}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.154, + 0.825, + 0.23 + ], + "angle": 0, + "content": "To show \\(\\tilde{\\mathbf{D}}\\tilde{\\mathbf{D}}^{\\top}\\) spans all positive semi-definite matrices: suppose \\(\\mathbf{M}\\) is positive semi-definite. Then it is square. Then it can be eigen-decomposed into \\(\\mathbf{M} = \\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top}\\). The degrees of freedom in \\(\\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top}\\) are just \\(\\mathbf{R} = \\mathbf{V}\\sqrt{\\pmb{\\Sigma}}\\) since \\(\\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top} = \\mathbf{R}\\mathbf{R}^{\\top}\\) and the square root is taken element-wise because \\(\\pmb{\\Sigma}\\) is diagonal and is real because each \\(\\pmb{\\Sigma}_{ij}\\geq 0\\), which is true because \\(\\mathbf{M}\\) is positive semi-definite. Take \\(\\mathbf{D} = \\mathbf{R}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.238, + 0.603, + 0.253 + ], + "angle": 0, + "content": "In our experiments we parameterize \\(\\mathbf{D}\\) as a diagonal-only matrix." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.269, + 0.301, + 0.282 + ], + "angle": 0, + "content": "C.4 INTEGRALS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.294, + 0.825, + 0.323 + ], + "angle": 0, + "content": "The known integral requirement comes from the integrals required in the transition kernel, and can be relaxed two possible ways:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.333, + 0.825, + 0.361 + ], + "angle": 0, + "content": "- numerical integration of function with unknown integral. This is expected to have low error given that the function is scalar-in scalar-out." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.365, + 0.825, + 0.394 + ], + "angle": 0, + "content": "- Directly parameterize the integral and use auto-grad when needing the functions not-integrated." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.333, + 0.825, + 0.394 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.403, + 0.825, + 0.45 + ], + "angle": 0, + "content": "We stick with the known integrals. In conclusion, the underlying parameters are positive scalar functions \\( b_{q}(s), b_{d}(s) \\) defined on the time domain and with known integral, and general matrices \\( \\tilde{\\mathbf{Q}}_{\\phi}, \\tilde{\\mathbf{D}}_{\\phi} \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.465, + 0.298, + 0.479 + ], + "angle": 0, + "content": "C.5 INSTANCES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.49, + 0.825, + 0.521 + ], + "angle": 0, + "content": "VPSDE. VPSDE has \\( K = 1 \\). Consequently, \\( \\mathbf{Q}, \\mathbf{D} \\) are \\( K \\times K \\). The only \\( 1 \\times 1 \\) skew-symmetric matrix is 0, so \\( \\mathbf{Q} = 0 \\). Setting \\( \\mathbf{D}(t) = \\frac{1}{2}\\beta(t) \\) recovers VPSDE:" + }, + { + "type": "equation", + "bbox": [ + 0.394, + 0.524, + 0.825, + 0.555 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = - \\frac {\\beta (t)}{2} \\mathbf {y} d t + \\sqrt {\\beta (t)} d \\mathbf {B} _ {t} \\tag {30}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.557, + 0.642, + 0.575 + ], + "angle": 0, + "content": "\\(\\nabla H(\\mathbf{y}) = \\mathbf{y}\\) so \\(\\mathbf{H}(\\mathbf{y}) = \\frac{1}{2}\\| \\mathbf{y}\\| _2^2\\) . The stationary distribution is \\(\\mathcal{N}(0,\\mathbf{I})\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.587, + 0.636, + 0.602 + ], + "angle": 0, + "content": "CLD. The CLD process (eq 5 in Dockhorn et al. (2021)) is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.309, + 0.604, + 0.688, + 0.641 + ], + "angle": 0, + "content": "\\[\n\\left( \\begin{array}{c} d \\mathbf {z} _ {t} \\\\ d \\mathbf {v} _ {r} \\end{array} \\right) = d \\mathbf {y} _ {t} = \\left( \\begin{array}{c c} 0 & \\frac {\\beta}{M} \\\\ - \\beta & - \\frac {\\Gamma \\beta}{M} \\end{array} \\right) \\mathbf {y} _ {t} + \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\sqrt {2 \\Gamma \\beta} \\end{array} \\right) d \\mathbf {B} _ {t}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.408, + 0.658 + ], + "angle": 0, + "content": "In \\(\\mathbf{Q} / \\mathbf{D}\\) parameterization, we have" + }, + { + "type": "equation", + "bbox": [ + 0.311, + 0.662, + 0.682, + 0.695 + ], + "angle": 0, + "content": "\\[\nH (\\mathbf {y}) = \\frac {1}{2} \\| \\mathbf {z} \\| _ {2} ^ {2} + \\frac {1}{2 M} \\| \\mathbf {v} \\| _ {2} ^ {2}, \\qquad \\nabla_ {\\mathbf {u}} H (\\mathbf {y}) = \\left( \\begin{array}{c} \\mathbf {z} \\\\ \\frac {1}{M} \\mathbf {v} \\end{array} \\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.697, + 0.594, + 0.73 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} = \\left( \\begin{array}{c c} 0 & - \\beta \\\\ \\beta & 0 \\end{array} \\right), \\qquad \\mathbf {D} = \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\Gamma \\beta \\end{array} \\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.74, + 0.468, + 0.755 + ], + "angle": 0, + "content": "The stationary distribution of this process is:" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.758, + 0.825, + 0.775 + ], + "angle": 0, + "content": "\\[\nq _ {\\phi , \\infty} \\propto \\exp (- H (\\mathbf {y})) = \\mathcal {N} (\\mathbf {z}; 0, I _ {d}) \\mathcal {N} (\\mathbf {v}; 0, M I _ {d}) \\tag {31}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.784, + 0.825, + 0.813 + ], + "angle": 0, + "content": "ALDA. Mou et al. (2019) define a third-order diffusion process for the purpose of gradient-based MCMC sampling. The ALDA diffusion process can be specified as" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.816, + 0.825, + 0.865 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} = \\left( \\begin{array}{c c c} 0 & - \\frac {1}{L} I & 0 \\\\ \\frac {1}{L} I & 0 & - \\gamma I \\\\ 0 & \\gamma I & 0 \\end{array} \\right), \\quad \\mathbf {D} = \\left( \\begin{array}{c c c} 0 & 0 & 0 \\\\ 0 & 0 & 0 \\\\ 0 & 0 & \\frac {\\xi}{L} I \\end{array} \\right). \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.867, + 0.825, + 0.897 + ], + "angle": 0, + "content": "Note that \\(\\mathbf{Q}\\) is skew-symmetric and \\(\\mathbf{D}\\) is positive semi-definite, therefore we have that \\(q_{t}(\\mathbf{u})\\to q_{\\phi ,\\infty}\\). In this case," + }, + { + "type": "equation", + "bbox": [ + 0.332, + 0.9, + 0.663, + 0.93 + ], + "angle": 0, + "content": "\\[\nq _ {\\phi , \\infty} = \\mathcal {N} (\\mathbf {z}; 0, \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {1}; 0, \\frac {1}{L} \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {2}; 0, \\frac {1}{L} \\mathbf {I} _ {d})\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.814, + 0.12 + ], + "angle": 0, + "content": "MALDA. Similar to ALDA, we specify a diffusion process we term MALDA which we specify as" + }, + { + "type": "equation", + "bbox": [ + 0.313, + 0.127, + 0.825, + 0.176 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} = \\left( \\begin{array}{c c c} 0 & - \\frac {1}{L} I & - \\frac {1}{L} \\\\ \\frac {1}{L} I & 0 & - \\gamma I \\\\ \\frac {1}{L} & \\gamma I & 0 \\end{array} \\right), \\quad \\mathbf {D} = \\left( \\begin{array}{c c c} 0 & 0 & 0 \\\\ 0 & \\frac {1}{L} I & 0 \\\\ 0 & 0 & \\frac {1}{L} I \\end{array} \\right). \\tag {33}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.182, + 0.714, + 0.198 + ], + "angle": 0, + "content": "Note that \\(\\mathbf{Q}\\) is skew-symmetric and \\(\\mathbf{D}\\) is positive semi-definite. In this case this is" + }, + { + "type": "equation", + "bbox": [ + 0.332, + 0.205, + 0.666, + 0.234 + ], + "angle": 0, + "content": "\\[\nq _ {\\phi , \\infty} = \\mathcal {N} (\\mathbf {z}; 0, \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {1}; 0, \\frac {1}{L} I _ {d}) \\mathcal {N} (\\mathbf {v} _ {2}; 0, \\frac {1}{L} I _ {d})\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.251, + 0.533, + 0.266 + ], + "angle": 0, + "content": "D TRANSITIONS FOR LINEAR PROCESSES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.281, + 0.668, + 0.298 + ], + "angle": 0, + "content": "For time variable \\(s\\) and Brownian motion \\(\\widehat{\\mathbf{B}}_s\\) driving diffusions of the form" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.306, + 0.825, + 0.326 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {y} = f (\\mathbf {y}, s) d s + g (s) d \\widehat {\\mathbf {B}} _ {s}, \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.331, + 0.825, + 0.377 + ], + "angle": 0, + "content": "when \\( f_{\\phi}(\\mathbf{y}_s, s) \\), \\( g_{\\phi}(s) \\) are linear, the transition kernel \\( q_{\\phi}(\\mathbf{y}_s | \\mathbf{y}_0) \\) is always normal (Särkkä & Solin, 2019). Therefore, we just find the mean \\( \\mathbf{m}_{s|0} \\) and covariance \\( \\boldsymbol{\\Sigma}_{s|0} \\) of \\( q(\\mathbf{y}_s | \\mathbf{y}_0) \\). Let \\( f(\\mathbf{y}, s) = \\mathbf{A}(s)\\mathbf{y} \\). The un-conditional time \\( s \\) mean and covariance are solutions to" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.384, + 0.498, + 0.4 + ], + "angle": 0, + "content": "\\[\nd \\mathbf {m} _ {s} / d s = \\mathbf {A} (s) \\mathbf {m} _ {s}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.396, + 0.824, + 0.422 + ], + "angle": 0, + "content": "\\[\nd \\boldsymbol {\\Sigma} _ {s} / d s = \\mathbf {A} (s) \\boldsymbol {\\Sigma} _ {s} + \\boldsymbol {\\Sigma} _ {s} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s) \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.429, + 0.825, + 0.488 + ], + "angle": 0, + "content": "By (6.6) in Särkkä & Solin (2019), for computing conditionals \\( q(\\mathbf{y}_s|\\mathbf{y}_0) \\), we can take the marginal distribution ODEs and compute conditionals by simply setting the time 0 mean and covariance initial conditions to the conditioning value and to 0 respectively. We take (6.36-6.39) and set \\( \\mathbf{m}_0 = \\mathbf{u}_0 \\) and \\( \\boldsymbol{\\Sigma}_{0} = 0 \\) to condition. Let \\( [\\mathbf{A}]_s = \\int_0^s\\mathbf{A}(\\nu)d\\nu \\). The mean is" + }, + { + "type": "equation", + "bbox": [ + 0.273, + 0.495, + 0.825, + 0.54 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\mathbf {y} _ {0} = \\exp \\left(\\left[ A \\right] _ {s}\\right) \\underbrace {\\exp (s \\mathbf {A}) \\mathbf {y} _ {0}} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}}, \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.549, + 0.825, + 0.59 + ], + "angle": 0, + "content": "where \\(\\exp\\) denotes matrix exponential. (6.36-6.39) state the covariance \\(q(\\mathbf{y}_s|\\mathbf{y}_0)\\) as a matrix factorization, for which a derivation is provided below \\(\\boldsymbol{\\Sigma}_{s} = \\mathbf{C}_{s}(\\mathbf{H}_{s})^{-1}\\) for \\(\\mathbf{C}_s,\\mathbf{H}_s\\) being the solutions of:" + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.596, + 0.825, + 0.632 + ], + "angle": 0, + "content": "\\[\n\\left( \\begin{array}{c} \\frac {d}{d s} \\mathbf {C} _ {s} \\\\ \\frac {d}{d s} \\mathbf {H} _ {s} \\end{array} \\right) = \\left( \\begin{array}{c c} \\mathbf {A} (s) & g ^ {2} (s) \\\\ \\mathbf {0} & - \\mathbf {A} ^ {\\top} (s) \\end{array} \\right) \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) \\tag {37}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.637, + 0.82, + 0.654 + ], + "angle": 0, + "content": "To condition and get \\(\\Sigma_{s|0}\\) from \\(\\Sigma_s\\), we set \\(\\Sigma_0 = 0\\), and initialize \\(\\mathbf{C}_s, \\mathbf{H}_s\\) by \\(\\mathbf{C}_0 = \\mathbf{0}\\) and \\(\\mathbf{H}_0 = \\mathbf{I}\\)." + }, + { + "type": "equation", + "bbox": [ + 0.25, + 0.661, + 0.825, + 0.725 + ], + "angle": 0, + "content": "\\[\n\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {[ \\mathbf {A} ] _ {s}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- [ \\mathbf {A} ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\underbrace {\\exp \\left[ s \\binom {\\mathbf {A}} {\\mathbf {0}} \\quad \\begin{array}{c} {g ^ {2}} \\\\ {- \\mathbf {A} ^ {\\top}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}}} _ {\\text {n o i n t e g r a t i o n i f A (v a r p h i) = A , g (v a r p h i) = g}}. \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.731, + 0.361, + 0.75 + ], + "angle": 0, + "content": "Finally, \\(\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.766, + 0.594, + 0.78 + ], + "angle": 0, + "content": "D.1 DERIVATION OF THE COVARIANCE MATRIX SOLUTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.826, + 0.851 + ], + "angle": 0, + "content": "Equation (35) gives an expression for \\(d\\pmb{\\Sigma}_s / ds\\). To derive the matrix factorization technique used in eq. (37), we use eq. (35) and the desired condition \\(\\pmb{\\Sigma}_s = \\mathbf{C}_s\\mathbf{H}_s^{-1}\\) to derive expressions for \\(d\\mathbf{C}_s / ds\\) and \\(d\\mathbf{H}_s / ds\\) and suitable initial conditions so that the factorization also starts at the desired \\(\\pmb{\\Sigma}_0\\). Let \\(\\pmb{\\Sigma}_s = \\mathbf{C}_s\\mathbf{H}_s^{-1}\\), then note that \\(\\mathbf{C}_s, \\mathbf{H}_s\\) satisfies" + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.857, + 0.627, + 0.923 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {d}{d s} \\boldsymbol {\\Sigma} _ {s} = \\frac {d}{d s} \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\\\ = \\mathbf {C} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.327, + 0.119 + ], + "angle": 0, + "content": "And using the fact that" + }, + { + "type": "equation", + "bbox": [ + 0.438, + 0.13, + 0.546, + 0.159 + ], + "angle": 0, + "content": "\\[\n\\frac {d}{d s} \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} = 0\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.161, + 0.545, + 0.19 + ], + "angle": 0, + "content": "\\[\n\\mathbf {H} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) = 0\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.46, + 0.192, + 0.671, + 0.222 + ], + "angle": 0, + "content": "\\[\n\\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} = - \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.231, + 0.251, + 0.245 + ], + "angle": 0, + "content": "we get that" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.255, + 0.75, + 0.289 + ], + "angle": 0, + "content": "\\[\n\\mathbf {C} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.291, + 0.842, + 0.344 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = \\mathbf {A} (s) \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s) \\\\ = \\mathbf {A} (s) \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} + g ^ {2} (s) \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.346, + 0.796, + 0.446 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left(- \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} + \\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = \\left(\\mathbf {A} (s) \\mathbf {C} _ {s} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} \\\\ - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} + \\frac {d}{d s} \\mathbf {C} _ {s} = \\mathbf {A} (s) \\mathbf {C} _ {s} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s} \\\\ \\left[ \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\quad \\mathbf {I} _ {d} \\right] ^ {\\top} \\frac {d}{d s} \\left( \\begin{array}{c} \\mathbf {H} _ {s} \\\\ \\mathbf {C} _ {s} \\end{array} \\right) = \\left[ \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\quad \\mathbf {I} _ {d} \\right] ^ {\\top} \\left( \\begin{array}{c} - \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} \\\\ \\mathbf {A} (s) \\mathbf {C} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s} \\end{array} \\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.455, + 0.459, + 0.47 + ], + "angle": 0, + "content": "Now, we note \\(\\mathbf{C}_s\\), \\(\\mathbf{H}_s\\) satisfy the following" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.48, + 0.546, + 0.51 + ], + "angle": 0, + "content": "\\[\n\\frac {d}{d s} \\mathbf {H} _ {s} = - \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.512, + 0.598, + 0.541 + ], + "angle": 0, + "content": "\\[\n\\frac {d}{d s} \\mathbf {C} _ {s} = \\mathbf {A} (s) \\mathbf {C} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.55, + 0.299, + 0.565 + ], + "angle": 0, + "content": "which implies that" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.575, + 0.825, + 0.609 + ], + "angle": 0, + "content": "\\[\n\\frac {d}{d s} \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) = \\left( \\begin{array}{c c} \\mathbf {A} (s) & g ^ {2} (s) \\\\ \\mathbf {0} & - \\mathbf {A} ^ {\\top} (s) \\end{array} \\right) \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) \\tag {39}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.619, + 0.489, + 0.637 + ], + "angle": 0, + "content": "with \\(\\mathbf{C}_0 = \\pmb{\\Sigma}_0\\) and \\(\\mathbf{H}_0 = \\mathbf{I}_d\\), as \\(\\mathbf{C}_0\\mathbf{H}_0^{-1} = \\pmb{\\Sigma}_0\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.656, + 0.411, + 0.67 + ], + "angle": 0, + "content": "D.2 HYBRID SCORE MATCHING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.683, + 0.825, + 0.753 + ], + "angle": 0, + "content": "Instead of computing \\( q(\\mathbf{y}_s|\\mathbf{y}_0) \\), we can apply the hybrid score matching principle (Dockhorn et al., 2021) to reduce variance by compute objectives using \\( q(\\mathbf{y}_s|x) \\) instead of \\( q(\\mathbf{y}_s|\\mathbf{y}_0) \\), which amounts to integrating out \\( \\mathbf{v}_0 \\). To accomplish this, following Särkkä & Solin (2019), we simply replace \\( \\mathbf{y}_0 \\) with \\( [x,\\mathbb{E}[\\mathbf{v}_0]] \\) in the expression for \\( \\mathbf{m}_{s|0} \\), i.e. replace the conditioning value of \\( \\mathbf{v}_0 \\) with the mean of its chosen initial distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.761, + 0.825, + 0.802 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\mathbf {y} _ {s} | x ] = \\exp \\left[ \\int_ {0} ^ {s} A (\\nu) d \\nu \\right] \\binom {x} {\\mathbb {E} [ \\mathbf {v} _ {0} ]} \\tag {40}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.819, + 0.825, + 0.891 + ], + "angle": 0, + "content": "For the covariance, instead of using \\(\\mathbf{C}_0 = \\boldsymbol{\\Sigma}_0 = \\mathbf{0}\\), we use a block matrix to condition on \\(x\\) but not \\(\\mathbf{v}_0\\). We decompose \\(\\boldsymbol{\\Sigma}_0\\) into its blocks \\(\\boldsymbol{\\Sigma}_{0,xx}\\), \\(\\boldsymbol{\\Sigma}_{0,vv}\\), \\(\\boldsymbol{\\Sigma}_{0,xv}\\). As before, to condition on \\(x\\) we set \\(\\boldsymbol{\\Sigma}_{0,xx} = \\mathbf{0}\\). Because \\(q(\\mathbf{v}_0)\\) is set to be independent of \\(x\\), \\(\\boldsymbol{\\Sigma}_{0,xv}\\) is also set to \\(\\mathbf{0}\\). Finally, instead of \\(\\mathbf{0}\\), to marginalize out \\(\\mathbf{v}_0\\), \\(\\boldsymbol{\\Sigma}_{0,vv}\\) is set to the covariance of the chosen initial time zero distribution for \\(\\mathbf{v}_0\\). E.g. if \\(\\mathbf{v}_{0,j} \\sim N(0,\\gamma)\\) for each dimension, then \\(\\boldsymbol{\\Sigma}_{0,vv} = N(0,\\gamma I)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We operationalize this in a simple piece of code, which makes the ELBO tractable and easy, i.e. skips both analytic derivations and numerical forward integration during training." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.578, + 0.119 + ], + "angle": 0, + "content": "D.3 TRANSITIONS IN STATIONARY PARAMETERIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.136, + 0.825, + 0.153 + ], + "angle": 0, + "content": "In terms of \\(\\mathbf{Q}\\), \\(\\mathbf{D}\\), the transitions \\(q(\\mathbf{y}_s|\\mathbf{y}_0)\\) for time \\(s\\) are normal with mean \\(\\mathbf{m}_{s|0}\\) and \\(\\pmb{\\Sigma}_{s|0}\\) equal to:" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.174, + 0.826, + 0.23 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {s \\mid 0} = \\exp \\left(- \\left[ \\mathbf {Q} + \\mathbf {D} \\right] _ {s}\\right) \\mathbf {y} _ {0}, \\quad \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {- [ \\mathbf {Q} + \\mathbf {D} ] _ {s}} {\\mathbf {0}} \\begin{array}{c c} {[ 2 \\mathbf {D} ] _ {s}} \\\\ {[ (\\mathbf {Q} + \\mathbf {D}) ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {41}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.253, + 0.644, + 0.27 + ], + "angle": 0, + "content": "where \\(\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}\\). For the time invariant case, this simplifies to" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.291, + 0.826, + 0.332 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {s \\mid 0} = \\exp [ - s (\\mathbf {Q} + \\mathbf {D}) ] \\mathbf {y} _ {0}, \\quad \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ s \\binom {- (\\mathbf {Q} + \\mathbf {D})} {\\mathbf {0}} \\quad \\binom {2 \\mathbf {D}} {(\\mathbf {Q} + \\mathbf {D}) ^ {\\top}} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {42}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.365, + 0.767, + 0.398 + ], + "angle": 0, + "content": "E GENERIC CHANGE OF MEASURE AND JENSEN'S FOR APPROXIMATE MARGINALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.424, + 0.825, + 0.466 + ], + "angle": 0, + "content": "Suppose \\(\\mathbf{u} = [\\mathbf{z},\\mathbf{v}]\\) and we have an expression for \\(p(\\mathbf{u} = [z,v]) = p(\\mathbf{z} = z,\\mathbf{v} = v)\\). By marginalization, we can get \\(p(\\mathbf{z} = z)\\), and we can introduce another distribution \\(q\\) to pick a sampling distribution of our choice:" + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.486, + 0.824, + 0.62 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} p (\\mathbf {z} = z) = \\int_ {v} p (\\mathbf {z} = z, \\mathbf {v} = v) d v \\\\ = \\int_ {v} p (\\mathbf {z} = z | \\mathbf {v} = v) p (\\mathbf {v} = v) d v \\\\ = \\int_ {v} \\frac {q (\\mathbf {v} = v | \\mathbf {z} = z)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} p (\\mathbf {z} = z | \\mathbf {v} = v) p (\\mathbf {v} = v) d v \\tag {43} \\\\ = \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.641, + 0.825, + 0.669 + ], + "angle": 0, + "content": "We often work with these expressions in log space, and need to pull the expectation outside to use Monte Carlo. Jensen's bound allows this:" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.69, + 0.668, + 0.756 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\log p (\\mathbf {z} = z) = \\log \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\geq \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.777, + 0.738, + 0.793 + ], + "angle": 0, + "content": "The following shows that the bound is tight when \\( q(\\mathbf{v} = v|\\mathbf{z} = z) = p(\\mathbf{v} = v|\\mathbf{z} = z) \\):" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.816, + 0.824, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] = _ {\\text {a s s u m e}} \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{p (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ = \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\left(\\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{p (\\mathbf {v} = v , \\mathbf {z} = z)} \\cdot p (\\mathbf {z} = z)\\right) \\right] \\tag {44} \\\\ = \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log p (\\mathbf {z} = z) \\right] \\\\ = \\log p (\\mathbf {z} = z) \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.347, + 0.117 + ], + "angle": 0, + "content": "F ELBO FOR MDMS" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.133, + 0.826, + 0.365 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\log p _ {\\theta} (x) = \\log \\int_ {v _ {0}} p _ {\\theta} \\left(x _ {0}, v _ {0}\\right) d v _ {0} (45) \\\\ = \\log \\int_ {v _ {0}} p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) (46) \\\\ = \\log \\int_ {v _ {0}} \\frac {q \\left(v _ {0} \\mid x\\right)}{q \\left(v _ {0} \\mid x\\right)} p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) (47) \\\\ = \\log \\mathbb {E} _ {q \\left(v _ {0} \\mid x\\right)} \\left[ \\frac {p _ {\\theta} \\left(u _ {0} = [ x , v _ {0} ]\\right)}{q \\left(v _ {0} \\mid x\\right)} \\right] (48) \\\\ \\geq \\mathbb {E} _ {q \\left(v _ {0} \\mid x\\right)} \\left[ \\log p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) - \\log q \\left(v _ {0} \\mid x\\right) \\right] (49) \\\\ \\geq \\mathbb {E} _ {q (y | x)} \\left[ \\log \\pi_ {\\theta} (y _ {T}) + \\int_ {0} ^ {T} - \\| s _ {\\theta} \\| _ {g ^ {2}} ^ {2} - \\nabla \\cdot \\left(g ^ {2} s _ {\\theta} - f\\right) d s - \\log q \\left(y _ {0} ^ {v} | x\\right) \\right] (50) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.372, + 0.825, + 0.402 + ], + "angle": 0, + "content": "The first inequality holds due to Jensen's inequality and the second due to an application of Theorem 1 from Huang et al. (2021) or Theorem 3 from Song et al. (2021) applied to the joint variable \\(\\mathbf{u}_0\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.419, + 0.313, + 0.433 + ], + "angle": 0, + "content": "F.1 ISM TO DSM" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.445, + 0.461, + 0.459 + ], + "angle": 0, + "content": "F.1.1 LEMMA: EXPECTATION BY PARTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.469, + 0.825, + 0.501 + ], + "angle": 0, + "content": "We will need a form of multivariate integration by parts which gives us for some \\( f \\) and some \\( q(x) \\), \\( E_{q(x)}[\\nabla_x \\cdot f(x)] = -E_{q(x)}[f(x)^\\top \\nabla_x \\log q(x)] \\)" + }, + { + "type": "equation", + "bbox": [ + 0.191, + 0.518, + 0.808, + 0.889 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} E _ {q (x)} [ \\nabla_ {x} \\cdot f _ {i} (x) ] = \\int q (x) \\sum_ {i = 1} ^ {d} [ \\nabla_ {x _ {i}} f _ {i} (x) ] d x \\\\ = \\int \\sum_ {i = 1} ^ {d} q (x) \\nabla_ {x _ {i}} f _ {i} (x) d x \\\\ = \\sum_ {i = 1} ^ {d} \\int_ {x _ {- i}} \\int_ {x _ {i}} q (x) \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ \\left[ q (x) \\int \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} \\right] _ {- \\infty} ^ {\\infty} - \\int \\nabla_ {x _ {i}} q (x) \\int \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ - \\int \\nabla_ {x _ {i}} q (x) f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ - \\int q (x) \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} - \\int \\int q (x) \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) d x _ {i} d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} - E _ {q (x)} \\left[ \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) \\right] \\\\ = - E _ {q (x)} [ f (x) ^ {\\top} \\nabla_ {x} \\log q (x) ] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "This equality also follows directly from the Stein operator using the generator method to the Langevin diffusion (Barbour, 1988)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.312, + 0.119 + ], + "angle": 0, + "content": "F.1.2 DSM ELBO" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.128, + 0.457, + 0.145 + ], + "angle": 0, + "content": "Using the \"expectation by parts\", we have:" + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.149, + 0.756, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} _ {q (u _ {t} | x)} [ \\nabla_ {u _ {t}} \\cdot g ^ {2} (t) s _ {\\theta} (u _ {t}, t) ] = - \\mathbb {E} _ {q (u _ {t} | x)} [ (g ^ {2} (t) s _ {\\theta} (u _ {t}, t)) ^ {\\top} \\nabla_ {u _ {t}} \\log q (u _ {t} | x) ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.175, + 0.616, + 0.192 + ], + "angle": 0, + "content": "Also we have, for \\( s_{\\theta} \\) evaluated at \\( (u_t, t) \\), by completing the square," + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.197, + 0.824, + 0.227 + ], + "angle": 0, + "content": "\\[\n- \\frac {1}{2} | | s _ {\\theta} | | _ {g ^ {2} (t)} + s _ {\\theta} ^ {\\top} g ^ {2} (t) \\nabla \\log q (u _ {t} | x) = - \\frac {1}{2} | | s _ {\\theta} - \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} +. 5 | | \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.231, + 0.343, + 0.246 + ], + "angle": 0, + "content": "The two together give us:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.252, + 0.824, + 0.436 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\log p (x) \\geq \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\Big [ - \\nabla \\cdot g ^ {2} s _ {\\theta} - . 5 | | s _ {\\theta} | | _ {g ^ {2} (t)} ^ {2} + \\nabla \\cdot f \\Big ] d t \\right] \\\\ = \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\left[ \\left(g ^ {2} s _ {\\theta}\\right) ^ {\\top} \\nabla_ {u _ {t}} \\log q (u _ {t} | x) - . 5 | | s _ {\\theta} | | _ {g ^ {2} (t)} ^ {2} + \\nabla \\cdot f \\right] d t \\right] \\\\ = \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\left[ - \\frac {1}{2} | | s _ {\\theta} - \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} \\right. \\right. \\\\ \\left. + . 5 \\left| | \\nabla \\log q \\left(u _ {t} | x\\right) \\right| _ {g ^ {2} (t)} ^ {2} + \\nabla_ {u _ {t}} \\cdot f \\right] \\Biggr ] d t \\tag {51} \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.452, + 0.351, + 0.466 + ], + "angle": 0, + "content": "F.2 NOISE PREDICTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.477, + 0.825, + 0.509 + ], + "angle": 0, + "content": "We have that for normal \\(\\mathcal{N}(\\mathbf{y}_s;\\mathbf{m}_{s|0},\\pmb{\\Sigma}_{s|0})\\), we can sample \\(\\mathbf{y}_s\\) with normal noise \\(\\epsilon \\sim \\mathcal{N}(0,I)\\) and \\(\\mathbf{y}_s = \\mathbf{m}_{s|0} + \\mathbf{L}\\epsilon\\) where \\(\\mathbf{L}\\) is the cholesky decomposition of \\(\\pmb{\\Sigma}_{s|0}\\). Then, the score is" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.515, + 0.657, + 0.734 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\nabla_ {\\mathbf {y} _ {s}} \\log q (\\mathbf {y} _ {s} | \\mathbf {y} _ {0}) \\Bigg | _ {\\mathbf {y} _ {s} = \\mathbf {m} _ {s | 0} + \\mathbf {L} \\epsilon} \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\mathbf {y} _ {s} - \\mathbf {m} _ {s | 0}\\right) \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\left[ \\mathbf {m} _ {s | 0} + \\mathbf {L} \\epsilon \\right] - \\mathbf {m} _ {s | 0}\\right) \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\mathbf {L} \\epsilon\\right) \\\\ = - \\left(\\mathbf {L} \\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\left(\\mathbf {L} \\epsilon\\right) \\\\ = - \\left(\\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\mathbf {L} ^ {- 1} \\mathbf {L} \\epsilon \\\\ = - \\left(\\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\epsilon = - \\left(\\mathbf {L} ^ {- 1}\\right) ^ {\\top} \\epsilon = - \\mathbf {L} ^ {\\top , - 1} \\epsilon \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.748, + 0.614, + 0.767 + ], + "angle": 0, + "content": "Parameterize \\( s_{\\theta}(\\mathbf{y}_s,s) \\) as \\( s_{\\theta}(\\mathbf{y}_s,s) = -\\mathbf{L}^{\\top, -1}\\epsilon_{\\theta}(\\mathbf{y},s) \\). This gives" + }, + { + "type": "equation", + "bbox": [ + 0.235, + 0.773, + 0.76, + 0.923 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {1}{2} \\| - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s) \\quad - \\quad - \\mathbf {L} ^ {\\top , - 1} \\epsilon \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\\\ = \\frac {1}{2} \\| \\mathbf {L} ^ {\\top , - 1} \\epsilon \\quad - \\quad \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s) \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\\\ = \\frac {1}{2} \\left(\\mathbf {L} ^ {\\top , - 1} \\epsilon - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s)\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\mathbf {L} ^ {\\top , - 1} \\epsilon - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s)\\right) \\\\ = \\frac {1}{2} \\left(\\mathbf {L} ^ {\\top , - 1} \\left[ \\epsilon - \\epsilon_ {\\theta} (\\mathbf {y}, s) \\right]\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\mathbf {L} ^ {\\top , - 1} \\left[ \\epsilon - \\epsilon_ {\\theta} (\\mathbf {y}, s) \\right]\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "We can also use this insight to analytically compute the quadratic score term (following is computed per data-dimension, so must be multiplied by \\( D \\) when computing the ELBO):" + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.143, + 0.888, + 0.44 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\frac {1}{2} \\| \\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0}) \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\right] = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\left(\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})\\right) \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\left(- \\mathbf {L} ^ {\\top , - 1} \\epsilon\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(- \\mathbf {L} ^ {\\top , - 1} \\epsilon\\right) \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\epsilon^ {\\top} (- \\mathbf {L} ^ {- 1}) g _ {\\phi} ^ {2} (s) (- \\mathbf {L} ^ {\\top , - 1}) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\epsilon} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\epsilon} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\operatorname {T r a c e} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.459, + 0.575, + 0.473 + ], + "angle": 0, + "content": "G ELBOS IN STATIONARY PARAMETERIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.52 + ], + "angle": 0, + "content": "We use the stationary parameterization described in appendix C. We now specialize the ELBO to the linear stationary parameterization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.527, + 0.825, + 0.557 + ], + "angle": 0, + "content": "Recall \\(f_{\\phi}(\\mathbf{y},s) = -[\\mathbf{Q}_{\\phi}(s) + \\mathbf{D}_{\\phi}(s)]\\mathbf{y}\\). Recall \\(g_{\\phi}(s) = \\sqrt{2\\mathbf{D}_{\\phi}(s)}\\) We have \\(g_{\\phi}^{2}(s) = 2\\mathbf{D}_{\\phi}(s)\\). We can write the MDM ISM ELBO as" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.565, + 0.825, + 0.606 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} ^ {\\text {m i s m}} = \\mathbb {E} _ {v \\sim q _ {\\gamma}} \\left[ \\mathbb {E} _ {s \\sim \\operatorname {U n i f} (0, T)} \\left[ \\ell_ {s} ^ {(i s m)} \\right] + \\ell_ {T} + \\ell_ {q} \\right] \\tag {52}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.614, + 0.218, + 0.627 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.636, + 0.517, + 0.687 + ], + "angle": 0, + "content": "\\[\n\\ell_ {s _ {\\theta}} = - \\frac {1}{2} \\| s _ {\\theta} (\\mathbf {y} _ {s}, s) \\| _ {\\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}}} ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.301, + 0.69, + 0.697, + 0.732 + ], + "angle": 0, + "content": "\\[\n\\ell_ {\\mathrm {d i v - f g s}} = \\nabla_ {\\mathbf {y} _ {s}} \\cdot \\left[ \\underbrace {- [ \\mathbf {Q} _ {\\phi} (s) + \\mathbf {D} _ {\\phi} (s) ] \\mathbf {y} _ {s}} _ {f _ {\\phi}} - \\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}} s _ {\\theta} (\\mathbf {y} _ {s}, s) \\right]\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.316, + 0.732, + 0.824, + 0.777 + ], + "angle": 0, + "content": "\\[\n\\ell_ {s} ^ {\\text {i s m}} = \\mathbb {E} _ {\\substack {q _ {\\phi , s, (x, v)} \\\\ \\text {depends on } Q, D}} \\left[ \\ell_ {s \\theta} + \\ell_ {\\text {div - f g s}} \\right] \\tag{53}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.78, + 0.538, + 0.822 + ], + "angle": 0, + "content": "\\[\n\\ell_ {T} = \\mathbb {E} _ {\\substack {q _ {\\phi , T}, (x, v) \\\\ \\text{depends on} \\mathbf {Q}, \\mathbf {D}}} \\Big [ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) \\Big ]\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.826, + 0.452, + 0.843 + ], + "angle": 0, + "content": "\\[\n\\ell_ {q} = - \\log q _ {\\gamma} (v | x)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.858, + 0.299, + 0.872 + ], + "angle": 0, + "content": "For the DSM form," + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.881, + 0.825, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} ^ {\\mathrm {m d s m}} = \\mathbb {E} _ {v \\sim q _ {\\gamma}} \\left[ \\mathbb {E} _ {s \\sim \\operatorname {U n i f} (0, T)} \\left[ \\ell_ {s} ^ {(d s m)} \\right] + \\ell_ {T} + \\ell_ {q} \\right] \\tag {54}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.218, + 0.117 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.305, + 0.127, + 0.695, + 0.323 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\ell_ {\\mathrm {d i v - f}} = \\nabla_ {\\mathbf {y} _ {s}} \\cdot \\underbrace {- [ \\mathbf {Q} _ {\\phi} (s) + \\mathbf {D} _ {\\phi} (s) ] \\mathbf {y} _ {s}} _ {f _ {\\phi}} \\\\ \\ell_{\\text{fwd - score}} = \\frac{1}{2}\\bigg|\\bigg|\\underbrace{\\nabla_{\\mathbf{y}_{s}}\\log q_{\\phi}(\\mathbf{y}_{s}|\\mathbf{y}_{0})}_{\\text{depends on}\\mathbf{Q},\\mathbf{D}}\\| \\underbrace{^ {2}\\mathbf{D}_{\\phi}(s)}_{g_{\\phi}^{2}} \\\\ \\ell_ {\\text {n e g - s c o r e d i f f}} = - \\frac {1}{2} \\| s _ {\\theta} (\\mathbf {y} _ {s}, s) - \\underbrace {\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})} _ {\\text {d e p e n d s o n Q , D}} \\| _ {\\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}}} ^ {2} \\\\ \\ell_ {s} ^ {(d s m)} = \\mathbb {E} _ {\\substack {q _ {\\phi , s, (x, v)} \\\\ \\text{depends on} \\mathbf {Q}, \\mathbf {D}}} \\left[ \\ell_ {\\text{neg - score diff}} + \\ell_ {\\text{fwd - score}} + \\ell_ {\\text{div - f}} \\right] \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.327, + 0.119 + ], + "angle": 0, + "content": "H ALGORITHMS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.134, + 0.44, + 0.149 + ], + "angle": 0, + "content": "H.1 GENERIC TRANSITION KERNEL" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.166, + 0.474, + 0.183 + ], + "angle": 0, + "content": "Algorithm 2 Get transition distribution \\(\\mathbf{y}_s|x\\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.188, + 0.185, + 0.767, + 0.313 + ], + "angle": 0, + "content": "Input: data \\(x\\) time \\(s\\) A, \\(g\\) \ncompute: \\(\\mathbf{A}(s)\\) and \\(g(s)\\) \ncompute: \\(\\mathbf{M}_s = \\int_0^s\\mathbf{A}(t)dt\\) (integrated drift) \ncompute: \\(\\mathbf{N}_s = \\int_0^s g^2 (t)dt\\) (integrated diffusions squared) \ncompute: \\(\\gamma_{s|0} = \\exp \\left(\\mathbf{M}_s\\right)\\) (mean coefficient) \nset: \\(\\mathbf{y}_0 = [x,0_1,\\dots ,0_{K - 1}],\\pmb{\\Sigma}_{0,zz} = \\mathbf{0}\\) , and \\(\\pmb{\\Sigma}_{0,zv},\\pmb{\\Sigma}_{0,vv}\\) to chosen initial distribution \ncompute: \\(\\mathbf{m}_{s|0} = \\gamma_{s|0}\\mathbf{y}_0\\) (mean) \ncompute:" + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.318, + 0.826, + 0.361 + ], + "angle": 0, + "content": "\\[\n\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {\\mathbf {M} _ {s}} {\\mathbf {0}} \\binom {\\mathbf {N} _ {s}} {- \\mathbf {M} _ {s} ^ {\\top}} \\right] \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}} \\quad (\\text {i n g r e d i e n t s f o r c o v .}) \\tag {55}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.369, + 0.444, + 0.386 + ], + "angle": 0, + "content": "compute: \\(\\pmb{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}\\) (cov.)" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.386, + 0.354, + 0.402 + ], + "angle": 0, + "content": "Output: \\(\\mathcal{N}(\\mathbf{m}_{s|0},\\pmb{\\Sigma}_{s|0})\\)" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.43, + 0.396, + 0.446 + ], + "angle": 0, + "content": "H.2 TRANSITIONS WITH \\(Q, D\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.457, + 0.825, + 0.503 + ], + "angle": 0, + "content": "Current param matrices \\(\\tilde{\\mathbf{Q}}_{\\phi},\\tilde{\\mathbf{D}}_{\\phi}\\) and along with fixed time-in scalar-out functions \\(b_{q}(s),b_{d}(s)\\) and their known integrals \\(B_{q}(s),B_{d}(s)\\). \\(q_{\\gamma}(v_0|z_0 = x)\\) taken to be parameterless so that \\(v_{0}\\sim \\mathcal{N}(0,I)\\). Model params are \\(s_\\theta\\) fixed \\(\\pi_{\\theta}\\)." + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.516, + 0.548, + 0.532 + ], + "angle": 0, + "content": "Algorithm 3 Get Q, D and their integrated terms M, N" + }, + { + "type": "algorithm", + "bbox": [ + 0.188, + 0.535, + 0.641, + 0.75 + ], + "angle": 0, + "content": "Input: time \\(s\\) and current params \\(\\phi\\) \ncompute: \\([b_q]_s = \\int_0^s b_q(\\nu)d\\nu\\) using known integral \\(B_{q}(s) - B_{q}(0)\\) \ncompute: \\([b_d]_s = \\int_0^s b_d(\\nu)d\\nu\\) using known integral \\(B_{d}(s) - B_{d}(0)\\). \ncompute: \\([\\mathbf{Q}_{\\phi}]_{s} = [b_{q}]_{s}\\cdot [\\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top}]\\) for current params \\(\\tilde{\\mathbf{Q}}_{\\phi}\\). \ncompute: \\([\\mathbf{D}_{\\phi}]_{s} = [b_{d}]_{s}\\cdot [\\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top}]\\) for current params \\(\\tilde{\\mathbf{D}}_{\\phi}\\). \ncompute: \\(\\mathbf{M}_s = -((\\mathbf{Q}_{\\phi}]_s + [\\mathbf{D}_{\\phi}]_s)\\) (M just a variable name) \ncompute: \\(\\mathbf{N}_s = [2\\mathbf{D}_{\\phi}]_s = 2\\cdot [\\mathbf{D}_{\\phi}]_s\\) (N just a variable name) \ncompute: \\(\\mathbf{Q}_s = b_q(s)\\cdot [\\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top}]\\) (not integrated) \ncompute: \\(\\mathbf{D}_s = b_d(s)\\cdot [\\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top}]\\) (not integrated) \ncompute: \\(A_s = -[\\mathbf{Q}_s + \\mathbf{D}_s]\\) (drift coef.) \ncompute: \\(g_s^2 = 2\\mathbf{D}_s\\) (diffusion coef. squared) \nOutput: \\(\\mathbf{A}_s, g_s^2, \\mathbf{M}_s, \\mathbf{N}_s\\)" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.774, + 0.355, + 0.788 + ], + "angle": 0, + "content": "H.3 ELBO ALGORITHMS" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.175, + 0.104, + 0.444, + 0.119 + ], + "angle": 0, + "content": "Algorithm 4 Get transition distributions" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.121, + 0.57, + 0.137 + ], + "angle": 0, + "content": "Input: Sample \\(\\mathbf{y}_0 = (x, v)\\) and time \\(s\\). Current params \\(\\phi\\)" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.136, + 0.429, + 0.15 + ], + "angle": 0, + "content": "set: \\(\\mathbf{A}_s,g_s^2,\\mathbf{M}_s,\\mathbf{N}_s\\gets\\) algorithm 3" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.15, + 0.523, + 0.174 + ], + "angle": 0, + "content": "compute: \\(\\mathbf{m}_{s|0} = \\exp \\left(\\mathbf{M}_s\\right)\\mathbf{y}_0\\) (transition mean)" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.173, + 0.507, + 0.187 + ], + "angle": 0, + "content": "compute: ingredients for transition cov. matrix:" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.194, + 0.824, + 0.236 + ], + "angle": 0, + "content": "\\[\n\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {\\mathbf {M} _ {s}} {\\mathbf {0}} \\binom {\\mathbf {N} _ {s}} {- \\mathbf {M} _ {s} ^ {\\top}} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {56}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.245, + 0.495, + 0.262 + ], + "angle": 0, + "content": "compute: \\(\\Sigma_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}\\) (transition cov)." + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.262, + 0.548, + 0.278 + ], + "angle": 0, + "content": "instantiate: \\(q_{\\phi ,s,(x,v)} = q_{\\phi}(\\mathbf{y}_s|\\mathbf{y}_0) = \\mathcal{N}(\\mathbf{m}_{s|0},\\pmb {\\Sigma}_{s|0})\\)" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.278, + 0.361, + 0.294 + ], + "angle": 0, + "content": "Output: \\(q_{\\phi ,s,(x,v)},A_s,g_s^2\\)" + }, + { + "type": "code_caption", + "bbox": [ + 0.175, + 0.316, + 0.477, + 0.331 + ], + "angle": 0, + "content": "Algorithm 5 Compute ELBO with ism or dsm" + }, + { + "type": "code", + "bbox": [ + 0.189, + 0.334, + 0.765, + 0.476 + ], + "angle": 0, + "content": "input: Data point \\(x\\) and current params \\(\\theta, \\phi, \\gamma\\) \ndraw: an aux. sample \\(v \\sim q_{\\gamma}(v|x)\\) \ndraw: a sample \\(s \\sim \\mathrm{Unif}(0,T)\\) \nset: \\(\\mathbf{y}_0 = (x,v)\\) \nset: \\(q_{\\phi,s,\\mathbf{y}_0}, A_s, g_s^2 \\gets \\text{algorithm 4 called on } \\mathbf{y}_0, s, \\phi\\) \ndraw: \\(\\mathbf{y}_s \\sim q_{\\phi,s,\\mathbf{y}_0}\\) \ncompute: \\(\\ell_s\\) with \\(\\mathrm{dsm}(s)\\) (algorithm 6) or \\(\\mathrm{ism}(s)\\) (algorithm 7) on \\(\\mathbf{y}_s, \\theta, A_s, g_s^2, q_{\\phi,s,\\mathbf{y}_0}\\) \nset: \\(q_{\\phi,T,\\mathbf{y}_0}, --, -- \\gets \\text{algorithm 4 called on } \\mathbf{y}_0, T, \\phi\\) \ndraw: \\(\\mathbf{y}_T \\sim q_{\\phi,T,\\mathbf{y}_0}\\) \noutput: \\(\\ell_s + \\log \\pi_\\theta(\\mathbf{y}_T) - \\log q_\\gamma(v)\\)" + }, + { + "type": "code_caption", + "bbox": [ + 0.175, + 0.498, + 0.376, + 0.513 + ], + "angle": 0, + "content": "Algorithm 6 Compute \\(\\mathrm{dsm}(s)\\)" + }, + { + "type": "code", + "bbox": [ + 0.189, + 0.516, + 0.578, + 0.631 + ], + "angle": 0, + "content": "input: \\(\\mathbf{y}_s, \\theta, A_s, g_s^2, q_{\\phi,s,\\mathbf{y}_0}\\). \ncompute: fwd-score = \\(\\nabla_{\\mathbf{y}_s}\\) log \\(q_{\\phi}(\\mathbf{y}_s|\\mathbf{y}_0)\\) \ncompute: model-score = \\(s_\\theta(\\mathbf{y}_s, s)\\) \ncompute: fwd-score-term = \\(\\frac{1}{2}(\\mathrm{fwd-score})^\\top g_s^2\\) (fwd-score) \ncompute: score-diff = model-score - fwd-score \ncompute: diff-term = \\(-\\frac{1}{2}\\) score-diff\\(\\nabla_{\\mathbf{y}_s}\\) score-diff \ncompute: div-f = \\(\\nabla_{\\mathbf{y}_s} \\cdot A_s\\mathbf{y}_s\\) \noutput: \\(\\mathrm{dsm}(s) = \\mathrm{fwd-score-term} + \\mathrm{diff-term} + \\mathrm{div-f}\\)" + }, + { + "type": "code_caption", + "bbox": [ + 0.175, + 0.653, + 0.373, + 0.668 + ], + "angle": 0, + "content": "Algorithm 7 Compute \\(\\operatorname{ism}(s)\\)" + }, + { + "type": "code", + "bbox": [ + 0.189, + 0.671, + 0.567, + 0.774 + ], + "angle": 0, + "content": "input: \\(\\mathbf{y}_s,\\theta ,A_s,g_s^2,q_{\\phi ,s,\\mathbf{y}_0}\\) \ncompute: model-score \\(= s_{\\theta}(\\mathbf{y}_{s},s)\\) \ncompute: score-term \\(= -\\frac{1}{2}\\) model-score \\(\\top g_s^2\\) model-score \ncompute: div-gs \\(= \\nabla_{\\mathbf{y}_s}\\cdot g_s^2 s_\\theta (\\mathbf{y}_s,s)\\) \ncompute: div-f \\(= \\nabla_{\\mathbf{y}_s}\\cdot A_s\\mathbf{y}_s\\) \ncompute: div-term \\(= -\\) div-gs \\(^+\\) div-f \noutput: ism(s) \\(=\\) score-term \\(^+\\) div-term" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.806, + 0.471, + 0.82 + ], + "angle": 0, + "content": "I VALID ELBO WITH TRUNCATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.825, + 0.925 + ], + "angle": 0, + "content": "The integrand in the ELBO and its gradients is not bounded at time 0. Therefore, following Sohl-Dickstein et al. (2015) and Song et al. (2021) the integrand in eq. (7) is integrated from \\([\\epsilon, T]\\), rather than \\([0, T]\\). However, that integral is not a valid lower bound on \\(\\log p_{\\theta}(x)\\). Instead, it can be viewed as a proper lower bound on the prior for a latent variable \\(\\mathbf{y}_{\\epsilon}\\). Therefore, to provide a bound for the data, one can introduce a likelihood and substitute the prior lower bound into a standard variational bound that integrates out the latent." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.132 + ], + "angle": 0, + "content": "To provide a valid lower bound for multivariate diffusions, we extend theorem 6 in Song et al. (2021) from univariate to multivariate diffusions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.826, + 0.164 + ], + "angle": 0, + "content": "Theorem 3. For transition kernel \\(q_{\\phi}(\\mathbf{y}_s \\mid \\mathbf{y}_0)\\), we can compute upper bound the model likelihood at time 0 as follows, for any \\(\\epsilon > 0\\)" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.168, + 0.826, + 0.202 + ], + "angle": 0, + "content": "\\[\n\\log p _ {\\theta} (x) \\geq \\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} \\mid x\\right)} \\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}\\right)} \\left[ \\log \\frac {p _ {\\theta} \\left(\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}\\right)}{q _ {\\phi} \\left(\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}\\right)} + \\mathcal {L} _ {m d m} (\\mathbf {y} _ {\\epsilon}, \\epsilon) - \\log q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} \\mid x\\right) \\right], \\tag {57}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.205, + 0.384, + 0.221 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{mdm}(\\mathbf{y}_{\\epsilon},\\epsilon)\\) is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.224, + 0.784, + 0.266 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {m d m} (\\mathbf {y} _ {\\epsilon}, \\epsilon) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {> \\epsilon} | \\mathbf {y} _ {\\epsilon})} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) - \\int_ {\\epsilon} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} + \\nabla \\cdot f _ {\\phi} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.279, + 0.825, + 0.308 + ], + "angle": 0, + "content": "Proof. For transition kernel \\( q_{\\phi}(\\mathbf{y}_s \\mid \\mathbf{y}_0) \\), we can compute upper bound the model likelihood at time 0 following an application of the variational bound" + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.312, + 0.756, + 0.489 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\log p _ {\\theta} (x) = \\log \\int_ {v _ {0}} p _ {\\theta} (\\mathbf {y} _ {0} = [ x, v _ {0} ]) d v _ {0} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} p _ {\\theta} (\\mathbf {y} _ {0}, \\mathbf {y} _ {\\epsilon}) d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}) \\frac {q (v _ {0} \\mid x)}{q (v _ {0} \\mid x)} \\frac {p _ {\\theta} (\\mathbf {y} _ {0} , \\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}) \\frac {q (v _ {0} \\mid x)}{q (v _ {0} \\mid x)} \\frac {p _ {\\theta} (\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}) p _ {\\theta} (\\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ \\geq \\mathbb {E} _ {q (v _ {0} | x) q _ {\\phi} (\\mathbf {y} _ {\\epsilon} | \\mathbf {y} _ {0})} \\left[ \\log \\frac {p _ {\\theta} (\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} - \\log q _ {\\phi} (\\mathbf {y} _ {0} ^ {v} \\mid x) + \\log p _ {\\theta} (\\mathbf {y} _ {\\epsilon}) \\right] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.5, + 0.731, + 0.515 + ], + "angle": 0, + "content": "A lower bound for \\(\\log p_{\\theta}(\\mathbf{y}_{\\epsilon})\\) can be derived in a similar manner to eq. (7), such that" + }, + { + "type": "equation", + "bbox": [ + 0.171, + 0.518, + 0.835, + 0.559 + ], + "angle": 0, + "content": "\\[\n\\log p _ {\\theta} (\\mathbf {y} _ {\\epsilon}) \\geq \\mathcal {L} _ {\\mathrm {m d m}} (\\mathbf {y} _ {\\epsilon}, \\epsilon) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {> \\epsilon} | \\mathbf {y} _ {\\epsilon})} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) - \\int_ {\\epsilon} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} + \\nabla \\cdot f _ {\\phi} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.563, + 0.825, + 0.62 + ], + "angle": 0, + "content": "The choice of \\(p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})\\) is arbitrary, however following Sohl-Dickstein et al. (2015); Song et al. (2021) we let \\(p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})\\) be Gaussian with mean \\(\\mu_{p_{\\theta},\\epsilon}\\) and covariance \\(\\Sigma_{p_{\\theta},\\epsilon}\\). Suppose \\(q_{\\phi}(\\mathbf{y}_{\\epsilon} \\mid \\mathbf{y}_0) = \\mathcal{N}(\\mathbf{y}_{\\epsilon} \\mid \\mathbf{A}\\mathbf{y}_0, \\Sigma)\\), then we select the following mean \\(\\mu_{p_{\\theta},\\epsilon}\\) and covariance \\(\\Sigma_{p_{\\theta},\\epsilon}\\) for \\(p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})\\)" + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.623, + 0.614, + 0.641 + ], + "angle": 0, + "content": "\\[\n\\mu_ {p _ {\\theta}, \\epsilon} = \\mathbf {A} ^ {- 1} \\Sigma s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + \\mathbf {A} ^ {- 1} \\mathbf {y} _ {\\epsilon}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.643, + 0.523, + 0.661 + ], + "angle": 0, + "content": "\\[\n\\Sigma_ {p _ {\\theta}, \\epsilon} = \\mathbf {A} ^ {- 1} \\Sigma \\mathbf {A} ^ {- \\top}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.664, + 0.825, + 0.695 + ], + "angle": 0, + "content": "where \\(\\mu_{p_{\\theta},\\epsilon},\\Sigma_{p_{\\theta},\\epsilon}\\) are derived using Tweedie's formula (Efron, 2011) by setting \\(\\mu_{\\epsilon} = \\mathbb{E}[\\mathbf{y}_0\\mid \\mathbf{y}_{\\epsilon}]\\) and \\(\\Sigma_{\\epsilon} = \\mathrm{Var}(\\mathbf{y}_0\\mid \\mathbf{y}_{\\epsilon})\\)" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.708, + 0.718, + 0.723 + ], + "angle": 0, + "content": "We next derive this choice as an approximation of the optimal Gaussian likelihood." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.739, + 0.394, + 0.753 + ], + "angle": 0, + "content": "I.1 LIKELIHOOD DERIVATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.826, + 0.85 + ], + "angle": 0, + "content": "Suppose \\(\\mathbf{y}_0\\sim q_0(\\mathbf{y}_0)\\) and \\(\\mathbf{y}_{\\epsilon}\\sim \\mathcal{N}(\\mathbf{y}_{\\epsilon}\\mid A\\mathbf{y}_0,\\Sigma)\\). Here, \\(A,\\Sigma\\) are the mean coefficient and covariance derived from the transition kernel at time \\(\\epsilon\\). We use Tweedie's formula to get the mean and covariance of \\(\\mathbf{y}_0\\) given \\(\\mathbf{y}_{\\epsilon}\\) under \\(q\\). This mean and covariance feature the true score \\(\\nabla_{\\mathbf{y}_{\\epsilon}}\\log q(\\mathbf{y}_{\\epsilon})\\). We replace the score with the score model \\(s_\\theta\\) and then set \\(p_{\\theta}(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})\\) to have the resulting approximate mean and covariance. We make this choice because the optimal \\(p_{\\theta}(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})\\) equals the true \\(q(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})\\) as discussed throughout the work." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.855, + 0.421, + 0.872 + ], + "angle": 0, + "content": "Here \\(\\mathbf{y}_0 = [\\mathbf{x}_0,\\mathbf{v}_0]\\) where \\(\\mathbf{x}_0\\sim q_{\\mathrm{data}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.877, + 0.825, + 0.905 + ], + "angle": 0, + "content": "Let \\(\\eta\\) be the natural parameter for the multivariate Gaussian likelihood \\(\\mathcal{N}(\\mathbf{y}_{\\epsilon} \\mid A\\mathbf{y}_0, \\Sigma)\\). Then, Tweedie's formula (Efron, 2011) states that:" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.909, + 0.617, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\eta \\mid \\mathbf {u} _ {\\epsilon} ] = \\nabla_ {\\mathbf {y} _ {\\epsilon}} l (\\mathbf {y} _ {\\epsilon}) - \\nabla_ {\\mathbf {y} _ {\\epsilon}} l _ {0} (\\mathbf {y} _ {\\epsilon})\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.104, + 0.352, + 0.12 + ], + "angle": 0, + "content": "\\(l(\\mathbf{y}_{\\epsilon}) = \\log q(\\mathbf{y}_{\\epsilon})\\)" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.123, + 0.764, + 0.139 + ], + "angle": 0, + "content": "- \\( s_{\\theta}(\\mathbf{y}_{\\epsilon}, \\epsilon) \\) is taken to be the true score \\( \\nabla_{\\mathbf{y}_{\\epsilon}} \\log q(\\mathbf{y}_{\\epsilon}) \\) so that \\( \\nabla_{\\mathbf{y}_{\\epsilon}} l(\\mathbf{y}_{\\epsilon}) = s_{\\theta}(\\mathbf{y}_{\\epsilon}, \\epsilon) \\)." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.142, + 0.802, + 0.157 + ], + "angle": 0, + "content": "- \\( l_{0} \\) is the log of the base distribution defined in the exponential family parameterization." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.104, + 0.802, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.168, + 0.825, + 0.199 + ], + "angle": 0, + "content": "The base distribution is a multivariate Gaussian with mean 0 and covariance \\(\\Sigma\\), therefore \\(\\nabla_{\\mathbf{y}_{\\epsilon}}l_{0}(\\mathbf{y}_{\\epsilon}) = -\\Sigma^{-1}\\mathbf{y}_{\\epsilon}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.205, + 0.604, + 0.223 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\eta \\mid \\mathbf {y} _ {\\epsilon} ] = s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + \\Sigma^ {- 1} \\mathbf {y} _ {\\epsilon}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.229, + 0.826, + 0.273 + ], + "angle": 0, + "content": "However, Tweedie's formula is not directly applicable since our \\(\\mathbf{y}_{\\epsilon}\\) is not directly normal with mean \\(\\mathbf{y}_0\\). Instead, to derive the conditional mean of \\(\\mathbf{y}_0\\) given \\(\\mathbf{y}_{\\epsilon}\\), we use the relation \\(\\eta = \\Sigma^{-1}\\mathbf{A}\\mathbf{y}_0\\) and the linearity of conditional expectation to get" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.278, + 0.634, + 0.356 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} \\left[ \\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon} \\right] = \\mathbb {E} \\left[ A ^ {- 1} \\Sigma \\eta \\mid \\mathbf {y} _ {\\epsilon} \\right] \\\\ = A ^ {- 1} \\Sigma \\mathbb {E} [ \\eta \\mid \\mathbf {y} _ {\\epsilon} ] \\\\ = A ^ {- 1} \\Sigma \\left(s _ {\\theta} \\left(\\mathbf {y} _ {\\epsilon}, \\epsilon\\right) + \\Sigma^ {- 1} \\mathbf {y} _ {\\epsilon}\\right) \\\\ = A ^ {- 1} \\Sigma s _ {\\theta} \\left(\\mathbf {y} _ {\\epsilon}, \\epsilon\\right) + A ^ {- 1} \\mathbf {y} _ {\\epsilon}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.733, + 0.39 + ], + "angle": 0, + "content": "For the variance, we use the following relation \\(\\mathbf{y}_{\\epsilon} = A\\mathbf{y}_0 + \\sqrt{\\Sigma}\\epsilon\\), which implies that" + }, + { + "type": "equation", + "bbox": [ + 0.446, + 0.396, + 0.619, + 0.414 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} _ {0} = A ^ {- 1} \\mathbf {y} _ {\\epsilon} - A ^ {- 1} \\sqrt {\\sum} \\epsilon\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.416, + 0.568, + 0.436 + ], + "angle": 0, + "content": "\\[\n\\operatorname {V a r} \\left(\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}\\right) = A ^ {- 1} \\Sigma A ^ {- T}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.449, + 0.825, + 0.476 + ], + "angle": 0, + "content": "Therefore, for the model posterior distribution \\( p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon}) \\) we choose a Normal with mean and covariance" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.484, + 0.611, + 0.523 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mu_ {p _ {\\theta}, \\epsilon} = A ^ {- 1} \\Sigma s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + A ^ {- 1} \\mathbf {y} _ {\\epsilon} \\\\ \\Sigma_ {p _ {\\theta}, \\epsilon} = A ^ {- 1} \\Sigma A ^ {- T} \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ] +] \ No newline at end of file diff --git a/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_origin.pdf b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1a432eabee1f42de8257f991adb426c6b747f388 --- /dev/null +++ b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/f7a9c89f-158a-46a9-8f48-9b5bcdfbc0da_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e52627b81ed0e422d5d9786628995949e63f3a6db4814360e4d5d49e3bd6d7ba +size 735535 diff --git a/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/full.md b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1771f9f71d10e950b1ca46cd2478112974e8f5df --- /dev/null +++ b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/full.md @@ -0,0 +1,1036 @@ +# WHERE TO DIFFUSE, HOW TO DIFFUSE, AND HOW TO GET BACK: AUTOMATED LEARNING FOR MULTIVARI-ATE DIFFUSIONS + +Raghav Singhal\*,1, Mark Goldstein\*,1, Rajesh Ranganath\*,2 + +Courant Institute of Mathematical Sciences1, New York University + +Center for Data Science2, New York University + +# ABSTRACT + +Diffusion-based generative models (DBGMs) perturb data to a target noise distribution and reverse this process to generate samples. The choice of noising process, or inference diffusion process, affects both likelihoods and sample quality. For example, extending the inference process with auxiliary variables leads to improved sample quality. While there are many such multivariate diffusions to explore, each new one requires significant model-specific analysis, hindering rapid prototyping and evaluation. In this work, we study Multivariate Diffusion Models (MDMs). For any number of auxiliary variables, we provide a recipe for maximizing a lower-bound on the MDMs likelihood without requiring any model-specific analysis. We then demonstrate how to parameterize the diffusion for a specified target noise distribution; these two points together enable optimizing the inference diffusion process. Optimizing the diffusion expands easy experimentation from just a few well-known processes to an automatic search over all linear diffusions. To demonstrate these ideas, we introduce two new specific diffusions as well as learn a diffusion process on the MNIST, CIFAR10, andImagenet32 datasets. We show learned MDMs match or surpass bits-per-dims (BPDs) relative to fixed choices of diffusions for a given dataset and model architecture. + +# 1 INTRODUCTION + +Diffusion-based generative models (DBGMs) perturb data to a target noise distribution and reverse this process to generate samples. They have achieved impressive performance in image generation, editing, translation (Dhariwal & Nichol, 2021; Nichol & Dhariwal, 2021; Sasaki et al., 2021; Ho et al., 2022), conditional text-to-image tasks (Nichol et al., 2021; Ramesh et al., 2022; Sahara et al., 2022) and music and audio generation (Chen et al., 2020; Kong et al., 2020; Mittal et al., 2021). They are often trained by maximizing a lower bound on the log likelihood, featuring an inference process interpreted as gradually "noising" the data (Sohl-Dickstein et al., 2015; Ho et al., 2020). + +The choice of this inference process affects both likelihoods and sample quality. On different datasets and models, different inference processes work better; there is no universal best choice of inference, and the choice matters (Song et al., 2020b). + +While some work has improved performance by designing score model architectures (Ho et al., 2020; Kingma et al., 2021; Dhariwal & Nichol, 2021), Dockhorn et al. (2021) instead introduce the critically-damped Langevin diffusion (CLD), showing that significant improvements in sample generation can be gained by carefully designing new processes. CLD pairs each data dimension with an auxiliary "velocity" variable and diffuses them jointly using second-order Langevin dynamics. + +A natural question: if introducing new diffusions results in dramatic performance gains, why are there only a handful of diffusions (variance-preserving stochastic differential equation (VPSDE), variance exploding (VE), CLD, sub-VPSDE) used in DBGMs? For instance, are there other auxiliary variable diffusions that would lead to improvements like CLD? This avenue seems promising as auxiliary variables have improved other generative models and inferences, such as normalizing flows + +(Huang et al., 2020), neural ordinary differential equations (ODEs) (Dupont et al., 2019), hierarchical variational models (Ranganath et al., 2016), ladder variational autoencoder (Sønderby et al., 2016), among others. + +Despite its success, CLD also provides evidence that each new process requires significant model-specific analysis. Deriving the evidence lower bound (ELBO) and training algorithm for diffusions is challenging (Huang et al., 2021; Kingma et al., 2021; Song et al., 2021) and is carried out in a case-by-case manner for new diffusions (Campbell et al., 2022). Auxiliary variables seemingly complicate this process further; computing conditionals of the inference process necessitates solving matrix Lyupanov equations (section 3.3). Deriving the inference stationary distribution—which helps the model and inference match—can be intractable. These challenges limit rapid prototyping and evaluation of new inference processes. + +Concretely, training a diffusion model requires: + +(R1): Selecting an inference and model process pair such that the inference process converges to the model prior +(R2): Deriving the ELBO for this pair +(R3): Estimating the ELBO and its gradients by deriving and computing the inference process' transition kernel + +In this work, we introduce Multivariate Diffusion Models (MDMs) and a method for training and evaluating them. MDMs are diffusion-based generative models trained with auxiliary variables. We provide a recipe for training MDMs beyond specific instantiations—like VPSDE and CLD—to all linear inference processes that have a stationary distribution, with any number of auxiliary variables. + +First, we bring results from gradient-based MCMC (Ma et al., 2015) to diffusion modeling to construct MDMs that converge to a chosen model prior (R1); this tightens the ELBO. Secondly, for any number of auxiliary variables, we derive the MDM ELBO (R2). Finally, we show that the transition kernel of linear MDMs, necessary for the ELBO, can be computed automatically and generically, for higher-dimensional auxiliary systems (R3). + +With these tools, we explore a variety of new inference processes for diffusion-based generative models. We then note that the automatic transitions and fixed stationary distributions facilitate directly learning the inference to maximize the MDM ELBO. Learning turns diffusion model training into a search not only over score models but also inference processes, at no extra derivational cost. + +Methodological Contributions. In summary, our methodological contributions are: + +1. Deriving ELBOs for training and evaluating multivariate diffusion models (MDMs) with auxiliary variables. +2. Showing that the diffusion transition covariance does not need to be manually derived for each new diffusion. We instead demonstrate that a matrix factorization technique, previously unused in diffusion models, can automatically compute the covariance analytically for any linear MDM. +3. Using results from gradient-based Markov chain Monte Carlo (MCMC) to construct MDMs with a complete parameterization of inference processes whose stationary distribution matches the model prior. +4. Combining the above into an algorithm called Automatic Multivariate Diffusion Training (AMDT) that enables training without diffusion-specific derivations. AMDT enables training score models for any linear diffusion, including optimizing the diffusion and score jointly. + +To demonstrate these ideas, we develop MDMs with two specific diffusions as well as learned multivariate diffusions. The specific diffusions are accelerated Langevin diffusion (ALDA) (introduced in Mou et al. (2019) as a higher-order scheme for gradient-based MCMC) and an alteration, modified accelerated Langevin diffusion (MALDA). Previously, using these diffusions for generative modeling would require significant model-specific analysis. Instead, AMDT for these diffusions is derivation-free. + +Empirical contributions. We train MDMs on the MNIST,Imagenet32 and CIFAR-10 datasets. In the experiments, we show that: + +1. Training new and existing fixed diffusions, such as ALDA and MALDA, is easy with the proposed algorithm AMDT. +2. Using AMDT to learn the choice of diffusion for the MDM matches or surpasses the performance of fixed choices of diffusion process; sometimes the learned diffusion and VPSDE do best; other times the learned diffusion and CLD do best. +3. There are new and existing MDMs, trained and evaluated with the MDM ELBO, that account for as much performance improvement over VPSDE as a three-fold increase in score model size for a fixed univariate diffusion. + +These findings affirm that the choice of diffusion affects the optimization problem, and that learning the choice bypasses the process of choosing diffusions for each new dataset and score architecture. We additionally show the utility of the MDM ELBO by showing on a dataset that CLD achieves better bits-per-dims (BPDs) than previously reported with the probability flow ODE (Dockhorn et al., 2021). + +# 2 SETUP + +We present diffusions by starting with the generative model and then describing its likelihood lower bound (Sohl-Dickstein et al., 2015; Huang et al., 2021; Kingma et al., 2021). Diffusions sample from a model prior $\mathbf{z}_0\sim \pi_\theta$ and then evolve a continuous-time stochastic process $\mathbf{z}_t\in \mathbb{R}^d$ : + +$$ +d \mathbf {z} = h _ {\theta} (\mathbf {z}, t) d t + \beta_ {\theta} (t) d \mathbf {B} _ {t}, \quad t \in [ 0, T ] \tag {1} +$$ + +where $\mathbf{B}_t$ is a $d$ -dimensional Brownian motion. The model is trained so that $\mathbf{z}_T$ approximates the data $\mathbf{x} \sim q_{\mathrm{data}}$ .1 Maximum likelihood training of diffusion models is intractable (Huang et al., 2021; Song et al., 2021; Kingma et al., 2021). Instead, they are trained using a variational lower bound on $\log p_{\theta}(\mathbf{z}_T = x)$ . The bound requires an inference process $q_{\phi}(\mathbf{y}_s | \mathbf{x} = x)$ :2 + +$$ +d \mathbf {y} = f _ {\phi} (\mathbf {y}, s) d s + g _ {\phi} (s) d \widehat {\mathbf {B}} _ {s}, \quad s \in [ 0, T ] \tag {2} +$$ + +where $\widehat{\mathbf{B}}_s$ is another Brownian motion independent of $\mathbf{B}_t$ . The inference process is usually taken to be specified rather than learned, and chosen to be i.i.d. for each $y_{tj}$ conditional on each $x_j$ . This leads to the interpretation of the $y_{tj}$ as noisy versions of features $x_j$ (Ho et al., 2020). While the diffusion ELBO is challenging to derive in general, Huang et al. (2021); Song et al. (2021) show that when the model process takes the form: + +$$ +d \mathbf {z} = \left[ g _ {\phi} ^ {2} (T - t) s _ {\theta} (\mathbf {z}, T - t) - f _ {\phi} (\mathbf {z}, T - t) \right] d t + g _ {\phi} (T - t) d \mathbf {B} _ {t}, \tag {3} +$$ + +the ELBO is: + +$$ +\log p _ {\theta} (x) \geq \mathcal {L} ^ {\mathrm {i s m}} (x) = \mathbb {E} _ {q _ {\phi} (\mathbf {y} | x)} \left[ \log \pi_ {\theta} (\mathbf {y} _ {T}) + \int_ {0} ^ {T} - \frac {1}{2} \| s _ {\theta} \| _ {g _ {\phi} ^ {2}} ^ {2} - \nabla \cdot (g _ {\phi} ^ {2} s _ {\theta} - f _ {\phi}) d s \right], \tag {4} +$$ + +where $f_{\phi}, g_{\phi}, s_{\theta}$ are evaluated at $(\mathbf{y}_s, s)$ , $\|\mathbf{x}\|_{\mathbf{A}}^2 = \mathbf{x}^\top \mathbf{A}\mathbf{x}$ and $g^2 = gg^\top$ . Equation (4) features the Implicit Score Matching (ISM) loss (Song et al., 2020a), and can be re-written as an ELBO $\mathcal{L}^{\mathrm{dsm}}$ featuring Denoising Score Matching (DSM) (Vincent, 2011; Song et al., 2020b), see appendix F.1. + +# 3 A RECIPE FOR MULTIVARIATE DIFFUSION MODELS + +As has been shown in prior work (Song et al., 2021; Dockhorn et al., 2021), the choice of diffusion matters. Drawing on principles from previous generative models (section 6), we can consider a wide class of diffusion inference processes by constructing them using auxiliary variables. + +At first glance, training such diffusions can seem challenging. First, one needs an ELBO that includes auxiliary variables. This ELBO will require sampling from the transition kernel, and setting the model prior to the specified inference stationary distribution. But doing such diffusion-specific analysis manually is challenging and hinders rapid prototyping. + +In this section we show how to address these challenges and introduce an algorithm, AMDT, to simplify and automate modeling with MDMs. AMDT can be used to train new and existing diffusions, including those with auxiliary variables, and including those that learn the inference process. In appendix A we discuss how the presented methods can also be used to automate and improve simplified score matching and noise prediction objectives used to train diffusion models. + +# 3.1 MULTIVARIATE MODEL AND INFERENCE + +For the $j^{th}$ data coordinate at each time $t$ , MDMs pair $\mathbf{z}_{tj} \in \mathbb{R}$ with a vector of auxiliary variables $\mathbf{v}_{tj} \in \mathbb{R}^{K-1}$ into a joint vector $\mathbf{u}_t$ and diffuse in the extended space: + +$$ +\mathbf {u} _ {0} \sim \pi_ {\theta}, \quad d \mathbf {u} = h _ {\theta} \left(\mathbf {u} _ {t} = \left[ \begin{array}{l} \mathbf {z} _ {t} \\ \mathbf {v} _ {t} \end{array} \right], t\right) d t + \beta_ {\theta} (t) d \mathbf {B} _ {t}. \tag {5} +$$ + +MDMs model the data $\mathbf{x}$ with $\mathbf{z}_T$ , a coordinate in $\mathbf{u}_T \sim p_\theta$ . For the $j^{th}$ feature $\mathbf{x}_j$ , each $\mathbf{u}_{tj} \in \mathbb{R}^K$ consists of a "data" dimension $\mathbf{u}_{tj}^z$ and auxiliary variable $\mathbf{u}_{tj}^v$ . Therefore $\mathbf{u} \in \mathbb{R}^{dK}$ . We extend the drift coefficient $h_\theta$ from a function in $\mathbb{R}^d \times \mathbb{R}_+ \to \mathbb{R}^d$ to the extended space $\mathbb{R}^{dK} \times \mathbb{R}_+ \to \mathbb{R}^{dK}$ . We likewise extend the diffusion coefficient to a matrix $\beta_\theta$ acting on Brownian motion $\mathbf{B}_t \in \mathbb{R}^{dK}$ . + +Because the MDM model is over the extended space, the inference distribution $\mathbf{y}$ must be too. We then set $q(\mathbf{y}_0^v |\mathbf{y}_0^z = x)$ to any chosen initial distribution, e.g. $\mathcal{N}(\mathbf{0},\mathbf{I})$ and discuss this choice in section 4. Then $\mathbf{y}_s$ evolves according to the auxiliary variable inference process: + +$$ +d \mathbf {y} = f _ {\phi} (\mathbf {y}, s) d s + g _ {\phi} (s) d \widehat {\mathbf {B}} _ {s}, \tag {6} +$$ + +where the inference drift and diffusion coefficients $f_{\phi}, g_{\phi}$ are now over the extended space $\mathbf{y} = [\mathbf{y}^z, \mathbf{y}^v]$ . The function $f_{\phi}$ lets the $z$ and $v$ coordinates of $\mathbf{y}_{tj}$ interact in the inference process. + +# ASSUMPTIONS + +This work demonstrates how to parameterize time-varying Itô processes, used for diffusion modeling, to have a stationary distribution that matches the given model prior. To take advantage of the automatic transition kernels also presented, the inferences considered for modeling are linear time-varying processes and take the form: + +$$ +d \mathbf {y} = \mathbf {A} _ {\phi} (s) \mathbf {y} d s + g _ {\phi} (s) d \mathbf {B} _ {s} +$$ + +where $\mathbf{A}_{\phi}(s):\mathbb{R}_{+}\to dK\times dK$ and $g_{\phi}(s):\mathbb{R}_{+}\to dK\times dK$ are matrix-valued functions. + +# 3.2 ELBO FOR MDMS + +We now show how to train MDMs to optimize a lower bound on the log likelihood of the data. Like in the univariate case, we use the parameterization in eq. (3) to obtain a tractable ELBO. + +Theorem 1. The MDM log marginal likelihood of the data is lower-bounded by: + +$$ +\begin{array}{l} \log p _ {\theta} (x) \geq \mathbb {E} _ {q _ {\phi} (\mathbf {y} | x)} \left[ \underbrace {\log \pi_ {\theta} (\mathbf {y} _ {T})} _ {\ell_ {T}} - \int_ {0} ^ {T} \frac {1}{2} \| s _ {\theta} \| _ {g _ {\phi} ^ {2}} ^ {2} + \nabla \cdot (g _ {\phi} ^ {2} s _ {\theta} - f _ {\phi}) d s - \underbrace {\log q _ {\phi} (\mathbf {y} _ {0} ^ {v} | x)} _ {\ell_ {q}} \right] \quad (\mathcal {L} ^ {m i s m}) \\ = \mathbb {E} _ {q _ {\phi} (\mathbf {y} | x)} \left[ \ell_ {T} + \int_ {0} ^ {T} \frac {1}{2} \| s _ {\phi} \| _ {g _ {\phi} ^ {2}} ^ {2} - \frac {1}{2} \| s _ {\theta} - s _ {\phi} \| _ {g _ {\phi} ^ {2}} ^ {2} + (\nabla \cdot f _ {\phi}) d s - \ell_ {q} \right] \quad \left(\mathcal {L} ^ {m d s m}\right). \tag {7} \\ \end{array} +$$ + +where divergences and gradients are taken with respect to $\mathbf{y}_s$ and $s_{\phi} = \nabla_{\mathbf{y}_s}\log q_{\phi}(\mathbf{y}_s|x)$ + +Proof. The proof for the MDM ISM ELBO $\mathcal{L}^{\mathrm{mism}}$ is in appendix F. In short, we introduce auxiliary variables, apply Theorem 1 of Huang et al. (2021) (equivalently, Theorem 3 of Song et al. (2021) or appendix E of Kingma et al. (2021)) to the joint space, and then apply an additional variational bound to $\mathbf{v}_0$ . The MDM DSM ELBO $\mathcal{L}^{\mathrm{mdsm}}$ is likewise derived in appendix F, similarly to Huang et al. (2021); Song et al. (2021), but extended to multivariate diffusions. + +We train MDM's by estimating the gradients of $\mathcal{L}^{\mathrm{mdsm}}$ , as estimates of $\mathcal{L}^{\mathrm{mism}}$ can be computationally prohibitive. For numerical stability, the integral in eq. (7) is computed on $[\epsilon, T]$ rather than $[0, T]$ . One can regard this as a bound for a variable $\mathbf{u}_{\epsilon}$ . To maintain a proper likelihood bound for the data, one can choose a likelihood $\mathbf{u}_0|\mathbf{u}_{\epsilon}$ and compose bounds as we demonstrate in appendix I. We report the ELBO with this likelihood term, which plays the same role as the discretized Gaussian in Nichol & Dhariwal (2021) and Tweedie's formula in Song et al. (2021). + +# 3.3 INGREDIENT 1: COMPUTING THE TRANSITION $q_{\phi}(\mathbf{y}_s|x)$ + +To estimate eq. (7) and its gradients, we need samples from $q(\mathbf{y}_s|x)$ and to compute $\nabla \log q(\mathbf{y}_s|x)$ . While an intractable problem for MDMs in general, we provide two ingredients for tightening and optimizing these bounds in a generic fashion for linear inference MDMs. + +We first show how to automate computation of $q(\mathbf{y}_s|\mathbf{y}_0)$ and then $q(\mathbf{y}_s|x)$ . For linear MDMs of the form: + +$$ +d \mathbf {y} = \mathbf {A} (s) \mathbf {y} d s + g (s) d \mathbf {B} _ {s}, +$$ + +the transition kernel $q(\mathbf{y}_s|\mathbf{y}_0)$ is Gaussian (Särkkä & Solin, 2019). Let $f(\mathbf{y}, s) = \mathbf{A}(s)\mathbf{y}$ . Then, the mean and covariance are solutions to the following ODEs: + +$$ +d \mathbf {m} _ {s | 0} / d s = \mathbf {A} (s) \mathbf {m} _ {s | 0} +$$ + +$$ +d \boldsymbol {\Sigma} _ {s | 0} / d s = \mathbf {A} (s) \boldsymbol {\Sigma} _ {s | 0} + \boldsymbol {\Sigma} _ {s | 0} \mathbf {A} ^ {\top} (s) + g ^ {2} (s). \tag {8} +$$ + +The mean can be solved analytically: + +$$ +\mathbf {m} _ {s \mid 0} = \exp \left[ \int_ {0} ^ {s} \mathbf {A} (\nu) d \nu \right] \mathbf {y} _ {0} \underbrace {= \exp (s \mathbf {A}) \mathbf {y} _ {0}} _ {\text {n o i n t e g r a t i o n i f} \mathbf {A} (\nu) = \mathbf {A}}. \tag {9} +$$ + +The covariance equation does not have as simple a solution because eq. (9) as the unknown matrix $\pmb{\Sigma}_{s|0}$ is being multiplied both from the left and the right. + +Instead of solving eq. (8) for a specific diffusion manually, as done in previous work (e.g. pages 50-54 of Dockhorn et al. (2021)), we show that a matrix factorization technique (Särkkä & Solin (2019), sec. 6.3) previously unused in diffusion-based generative models can automatically compute $\Sigma_{s|0}$ generically for any linear MDM. Define $\mathbf{C}_s$ , $\mathbf{H}_s$ that evolve according to: + +$$ +\binom {d \mathbf {C} _ {s} / d s} {d \mathbf {H} _ {s} / d s} = \binom {\mathbf {A} (s)} {\mathbf {0}} \begin{array}{c} g ^ {2} (s) \\ - \mathbf {A} ^ {\top} (s) \end{array} \binom {\mathbf {C} _ {s}} {\mathbf {H} _ {s}}, \tag {10} +$$ + +then $\mathbf{\Sigma}_{s|0} = \mathbf{C}_s\mathbf{H}_s^{-1}$ for $\mathbf{C}_0 = \mathbf{\Sigma}_0$ and $\mathbf{H}_0 = \mathbf{I}$ (Appendix D). These equations can be solved in closed-form, + +$$ +\binom {\mathbf {C} _ {s}} {\mathbf {H} _ {s}} = \exp \left[ \binom {[ \mathbf {A} ] _ {s}} {\mathbf {0}} \quad \begin{array}{c} {[ g ^ {2} ] _ {s}} \\ {- [ \mathbf {A} ^ {\top} ] _ {s}} \end{array} \right] \binom {\boldsymbol {\Sigma} _ {0}} {\mathbf {I}} \underbrace {= \exp \left[ s \binom {\mathbf {A}} {\mathbf {0}} \quad \begin{array}{c} {[ g ^ {2} ] _ {s}} \\ {- \mathbf {A} ^ {\top}} \end{array} \right]} _ {\text {n o i n t e g r a t i o n i f} \mathbf {A} (\nu) = \mathbf {A}, g (\nu) = g} \binom {\boldsymbol {\Sigma} _ {0}} {\mathbf {I}}, \tag {11} +$$ + +where $[\mathbf{A}]_s = \int_0^s\mathbf{A}(\nu)d\nu$ . To condition on $\mathbf{y}_0 = (x,v)$ , we set $\pmb {\Sigma}_0 = \mathbf{0}$ + +Computing $q_{\phi}(\mathbf{y}_s|x)$ . For the covariance $\pmb{\Sigma}_{s|0}$ , to condition on $x$ instead of $\mathbf{y}_0$ , we set $\pmb{\Sigma_0}$ to + +$$ +\boldsymbol {\Sigma} _ {0} = \left( \begin{array}{c c} 0 & 0 \\ 0 & \boldsymbol {\Sigma} _ {\mathbf {v} _ {0}} \end{array} \right), +$$ + +To compute the mean, it is the same expression as for $q(\mathbf{y}_s|\mathbf{y}_0)$ , but with a different initial condition: + +$$ +\mathbf {m} _ {s \mid 0} = \exp \left[ \int_ {0} ^ {s} \mathbf {A} (\nu) d \nu \right] \binom {x} {\mathbb {E} _ {q} [ \mathbf {y} _ {0} ^ {v} | x ]} \tag {12} +$$ + +See appendix D for more details. + +Algorithm 1 Automatic Multivariate Diffusion Training +Input: Data $\{x_i\}$ , inference process matrices $\mathbf{Q}_{\phi}, \mathbf{D}_{\phi}$ , model prior $\pi_{\theta}$ , initial distribution $q_{\phi}(\mathbf{y}_0^v | x)$ , and score model architecture $s_\theta$ +Returns: Trained score model $s_\theta$ +while $s_\theta$ not converged do + Sample $x \sim \sum_{i=1}^{N} \frac{1}{N} \delta_{x_i}$ , $v_0 \sim q_{\phi}(\mathbf{y}_0^v | x)$ + Sample $\mathbf{s} \sim \mathbf{U}[0, T]$ and $\mathbf{y}_s, \mathbf{y}_T \sim q_{\phi}(\mathbf{y}_s | x)$ using algorithm 2 + Estimate the stochastic gradient of the MDM ELBO, $\nabla_\theta \mathcal{L}(\theta, \phi)$ , using eq. (7) + $\theta \leftarrow \theta + \alpha \nabla_\theta \mathcal{L}(\theta, \phi)$ . + if learning inference then + $\phi \leftarrow \phi + \alpha \nabla_\phi \mathcal{L}(\theta, \phi)$ +end if +end while +Output $s_\theta$ + +A fast and simple algorithm. We show in algorithm 2 (appendix H) that computing the transition kernel only requires knowing $f, g$ and requires no diffusion-specific analysis. For $K - 1$ auxiliary variables, $\mathbf{A}, g$ are $K \times K$ . Like for scalar diffusions, these parameters are shared across data coordinates. This means matrix exponentials and inverses are done on $K \times K$ matrices, where $K$ is only 2 or 3 in our experiments. In table 1, we compare the time to sample a batch of size 256 from the transition kernel for CIFAR 10 and MNIST. The table shows the extra computa + +Table 1: Runtime Comparison: we compare the run time of sampling from the CLD diffusion analytically versus using the automated algorithm. + +
MethodCIFAR-10MNIST
Analytical0.0270.0062
Automated0.0290.007
+ +tional cost of the automated algorithm is negligible. This automation likewise applies to simplified score matching and noise prediction objectives, since all rely on $q_{\phi}(\mathbf{y}_s|x)$ (appendix A). + +# 3.4 INGREDIENT 2: MDM PARAMETERIZATION + +The MDM ELBO (eq. (7)) is tighter when the inference $\mathbf{y}_T$ tends toward the model's prior $\pi_{\theta}$ . Here we construct inference processes with the model prior $\pi_{\theta}$ as a specified stationary distribution $q_{\infty}$ . + +Ma et al. (2015) provide a complete recipe for constructing gradient-based MCMC samplers; the recipe constructs non-linear time-homogeneous Itô processes with a given stationary distribution, and show that the parameterization spans all such Itô processes with that stationary distribution. + +Diffusion models usually have time-varying drift and diffusion coefficients (e.g. use of the $\beta(t)$ function). To build diffusion models that match the model prior, we first extend Theorem 1 from Ma et al. (2015) to construct non-linear Itô processes with time-varying drift and diffusion coefficients with a given stationary distribution (Appendix C). Then, to keep transitions tractable (per Section 3.3), we specialize this result to linear Itô diffusions. + +We directly state the result for linear time-varying diffusions with stationary distributions. The parameterization requires a skew-symmetric matrix $-\mathbf{Q}(s) = \mathbf{Q}(s)^{\top}$ , a positive semi-definite matrix $\mathbf{D}(s)$ , and a function $\nabla H(\mathbf{y})$ such that the desired stationary distribution $q_{\infty}$ is proportional to $\exp[-H(\mathbf{y})]$ . Linear Itô diffusions have Gaussian stationary distributions (Särkkä & Solin, 2019) meaning that $\nabla H$ is linear and can be expressed as $\mathbf{S}\mathbf{y}$ for some matrix $\mathbf{S}$ . For a matrix $\mathbf{A}$ , let $\sqrt{\mathbf{A}}$ refer to the matrix square root defined by $\mathbf{a} = \sqrt{\mathbf{A}} \Longleftrightarrow \mathbf{A} = \mathbf{aa}^{\top}$ . Then, the Itô diffusion: + +$$ +d \mathbf {y} = \underbrace {- \left[ \mathbf {Q} (s) + \mathbf {D} (s) \right] \mathbf {S y}} _ {f (\mathbf {y}, s)} d s + \underbrace {\sqrt {2 \mathbf {D} (s)}} _ {g (s)} d \widehat {\mathbf {B}} _ {s}, \tag {13} +$$ + +has Gaussian stationary distribution $\mathcal{N}(\mathbf{0},\mathbf{S}^{-1})$ where $\mathbf{Q}(s),\mathbf{D}(s)$ and $\mathbf{S}$ are parameters. For a discussion of convergence to the stationary distribution, as well as skew-symmetric and positive semi-definite parameterizations, see appendix C, where we also show that existing diffusion processes such as VPSDE and CLD are included in $\mathbf{Q} / \mathbf{D}$ parameterization. We display the ELBO in terms of $\mathbf{Q} / \mathbf{D}$ in appendix G and an algorithm in appendix H. + +For score matching and noise prediction losses and a given $q_{\phi}$ , achieving a minimizing value with respect to $s_{\theta}$ does not imply that the generative model score will match the inference score. Modeling the data also requires the marginal distribution of $q_{\phi, T}$ to approximate $\pi$ . When $q_{\phi}$ is constant, it is important to confirm the stationary distribution is appropriately set, and the tools used here for the ELBO can be used to satisfy this requirement for score matching and noise prediction (appendix A). + +# 3.5 LEARNING THE INFERENCE PROCESS + +The choice of diffusion matters, and the ELBOs in eq. (7) have no requirement for fixed $q_{\phi}$ . We therefore learn the inference process jointly with $s_{\theta}$ . Under linear transitions (ingredient 1), no algorithmic details change as the diffusion changes during training. Under stationary parameterization (ingredient 2), we can learn without the stationary distribution going awry. In the experiments, learning matches or surpasses BPDs of fixed diffusions for a given dataset and score architecture. + +In $\mathcal{L}^{\mathrm{mdsm}}$ or $\mathcal{L}^{\mathrm{mism}}$ , $q_{\phi, \infty}$ may be set to equal $\pi_{\theta}$ , but it is $\mathbf{y}_T \sim q_{\phi, T}$ for the chosen $T$ that is featured in the ELBO. Learning $q_{\phi}$ can choose $\mathbf{y}_T$ to reduce the cross-entropy: + +$$ +- \mathbb {E} _ {q _ {\phi} (\mathbf {y} _ {T} | x)} [ \log \pi_ {\theta} (\mathbf {y} _ {T}) ]. \tag {14} +$$ + +Minimizing eq. (14) will tighten the ELBO for any $s_\theta$ . Next, $q_\phi$ is featured in the remaining terms that feature $s_\theta$ ; optimizing for $q_\phi$ will tighten and improve the ELBO alongside $s_\theta$ . Finally, $q_\phi$ is featured in the expectations and the $-\log q_\phi$ term: + +$$ +\log p _ {\theta} \left(\mathbf {u} _ {T} ^ {z} = x\right) \geq = \underbrace {\mathbb {E} _ {q _ {\phi} \left(\mathbf {y} _ {0} ^ {v} = v \mid x\right)}} \left[ \left(\mathcal {L} ^ {\mathrm {d s m}} \text {o r} \mathcal {L} ^ {\mathrm {i s m}}\right) \underbrace {- \log q _ {\phi} \left(\mathbf {y} _ {0} ^ {v} = v \mid x\right)} \right] \tag {15} +$$ + +The $q_{\phi}(\mathbf{y}_0^v |x)$ terms impose an optimality condition that $p_{\theta}(\mathbf{u}_T^v |\mathbf{u}_T^z) = q_{\phi}(\mathbf{y}_0^v |\mathbf{y}_0^z)$ (appendix E). When it is satisfied, no looseness in the ELBO is due to the initial time zero auxiliary variables. + +To learn, $\mathbf{Q},\mathbf{D}$ need to be specified with parameters $\phi$ that enable gradients. We keep $\mathbf{S}$ fixed at inverse covariance of $\pi_{\theta}$ . The transition kernel $q_{\phi}(\mathbf{y}_s|x)$ depends on $\mathbf{Q},\mathbf{D}$ through its mean and covariance. Gaussian distributions permit gradient estimation with reparameterization or score-function gradients (Kingma & Welling, 2013; Ranganath et al., 2014; Rezende & Mohamed, 2015; Titsias & Lázaro-Gredilla, 2014). Reparameterization is accomplished via: + +$$ +\mathbf {y} _ {s} = \mathbf {m} _ {s | 0} + \mathbf {L} _ {s | 0} \epsilon \tag {16} +$$ + +where $\epsilon \sim \mathcal{N}(0, I_{dK})$ and $\mathbf{L}_{s|0}$ satisfies $\mathbf{L}_{s|0} \mathbf{L}_{s|0}^{\top} = \boldsymbol{\Sigma}_{s|0}$ , derived using coordinate-wise Cholesky decomposition. Gradients flow through eq. (16) from $\mathbf{y}_s$ to $\mathbf{m}_{s|0}$ and $\boldsymbol{\Sigma}_{s|0}$ to $\mathbf{Q}$ , $\mathbf{D}$ to parameters $\phi$ . + +Algorithm 1 displays Automatic Multivariate Diffusion Training (AMDT). AMDT provides a training method for diffusion-based generative models for either fixed $\mathbf{Q}$ , $\mathbf{D}$ matrices or for learning the $\mathbf{Q}_{\phi}, \mathbf{D}_{\phi}$ matrices, without requiring any diffusion-specific analysis. + +Learning in other diffusion objectives. Like in the ELBO, learning in score matching or noise prediction objectives can improve the match between the inference process and implied generative model (appendix A). + +# 4 INSIGHTS INTO MULTIVARIATE DIFFUSIONS + +Scalar versus Multivariate Processes. Equation (13) clarifies what can change while preserving $q_{\infty}$ . Recall that $\mathbf{Q}$ and $\mathbf{D}$ are $K \times K$ for $K - 1$ auxiliary variables. Because $0$ is the only $1 \times 1$ skew-symmetric matrix, scalar processes must set $\mathbf{Q} = 0$ . With $q_{\phi,\infty} = \mathcal{N}(0,\mathbf{I})$ , the process is: + +$$ +d \mathbf {y} = - \mathbf {D} (s) \mathbf {y} d s + \sqrt {2 \mathbf {D} (s)} d \widehat {\mathbf {B}} _ {s}. \tag {17} +$$ + +What is left is the VPSDE process used widely in diffusion models where $\mathbf{D}(s) = \frac{1}{2}\beta (s)$ is $1\times 1$ (Song et al., 2020b). This reveals that the VPSDE process is the only scalar diffusion with a stationary distribution. This also clarifies the role of $\mathbf{Q}$ : it accounts for mixing between dimensions in multivariate processes, as do non-diagonal entries in $\mathbf{D}$ for $K > 1$ . + +CLD optimizes a log-likelihood lower bound. Differentiating $\mathcal{L}^{\mathrm{mdsm}}$ (eq. (7)) with respect to the score model parameters, we show that the objective for CLD (Dockhorn et al., 2021) maximizes a lower bound on $\log p_{\theta}(x)$ , not just $\log p_{\theta}(\mathbf{u}_0)$ , without appealing to the probability flow ODE. + +Does my model use auxiliary variables? An example initial distribution is $q(\mathbf{y}_0^v |x) = \mathcal{N}(0,\mathbf{I})$ . It is also common to set $\pi_{\theta} = \mathcal{N}(0,\mathbf{I})$ . Because the optimum for diffusions is $p_{\theta} = q$ , the optimal model has main and auxiliary dimensions independent at endpoints 0 and $T$ . Does this mean that the model does not use auxiliary variables? In appendix B, we show that in this case the model can still use auxiliary variables at intermediate times. A sufficient condition is non-diagonal $\mathbf{Q} + \mathbf{D}$ . + +# 5 EXPERIMENTS + +We test the MDM framework with handcrafted and learned diffusions. The handcrafted diffusions are (a) ALDA, used in (Mou et al., 2019) for accelerated gradient-based MCMC sampling (eq. (32)) and (b) MALDA: a modified version of ALDA (eq. (33)). Both have two auxiliary variables. We also learn diffusions with 1 and 2 auxiliary variables. We compare with VPSDE and ELBO-trained CLD. + +Table 2: BPD upper-bounds on image generation for a fixed architecture. CIFAR-10: learning outperforms CLD, and both outperform the standard choice of VPSDE. MNIST: learning matches VPSDE while the fixed auxiliary diffusions are worse. IMAGENET32: all perform similarly. Learning matches or surpasses the best fixed diffusion, while bypassing the need to choose a diffusion. + +
ModelKCIFAR-10IMAGENET32MNIST
VPSDE13.203.701.26
Learned23.073.711.28
Learned33.083.721.33
CLD23.113.701.35
MALDA33.133.721.65
ALDA329.4333.08124.60
+ +Table 3: Parameter Efficiency. The first two rows display diffusions from previous work: VPSDE and CLD, both using score models with 108 million parameters on CIFAR-10. We train the rest using a score model with 35.7 million parameters. The learned diffusion matches the performance of VPSDE-large; changes in the inference can account for as much improvement as a 3x increase in score parameters. BPDs are upper-bounds. + +
ModelKParametersCIFAR-10
VPSDE-large (Song et al., 2021)1108M3.08
CLD-large (Dockhorn et al., 2021)2108M3.31
Learned235.7M3.07
CLD235.7M3.11
VPSDE135.7M3.20
+ +Following prior work, we train DBGMs for image generation. We use the U-Net from Ho et al. (2020). We input the auxiliary variables as extra channels, which only increases the score model parameters in the input and output convolutions (CLD and Learned 2 have 7,000 more parameters than VPSDE on CIFAR-10 and IMAGENET32 and only 865 more for MNIST). We use simple uniform dequantization. We report estimates of $\mathcal{L}^{\mathrm{mdsm}}$ (which reduces to the standard $\mathcal{L}^{\mathrm{dsm}}$ for $K = 1$ ). We sample times using the importance sampling distribution from Song et al. (2021) with truncation set to $\epsilon = 10^{-3}$ . To ensure the truncated bound is proper, we use a likelihood described in appendix I. + +Results. Table 2 shows that the inference process matters and displays. It displays DBGMs that we train and evaluate on CIFAR-10, IMAGENET32 and MNIST. This includes the existing VPSDE and CLD, the new MALDA and ALDA, and the new learned inference processes. All are trained with the 35.7M parameter architecture. For CIFAR-10, learning outperforms CLD, and both outperform the standard choice of VPSDE. For MNIST, learned diffusions match VPSDE while the three fixed auxiliary diffusions are worse. On IMAGENET32, all perform similarly. The take-away is that learning + +matches or surpasses the best fixed diffusion performance and bypasses the choice of diffusion for each new dataset or score architecture. In Figure 1 we plot the generated samples from CIFAR10. + +Table 3's first two rows display diffusion models from previous work: VPSDE (Song et al., 2021) and CLD (Dockhorn et al., 2021) both with the 108 million score model from Song et al. (2021) (labeled "large"). The rest are DBGMs that we train using the U-Net with 35.7 million parameters for CIFAR-10 and IMAGENET32 and 1.1 million for MNIST. Despite using significantly fewer parameters, the learned diffusion achieves similar BPD compared to the larger models, showing that changes in inference can account for as much improvement as a three-fold increase in parameters. While the larger architecture requires two GPUs for batch size 128 on CIFAR-10 on A100s, the smaller one only requires one; exploring inference processes can make diffusions more computationally accessible. Table 3 also demonstrates a tighter bound for CLD trained and evaluated with the MDM ELBO ( $\leq$ 3.11) relative to existing probability flow-based evaluations (3.31). + +![](images/73a7012161f0d7f4fed755ef1cf3aafd7643b185e471793fa0b40a09f59c1436.jpg) +Figure 1: CIFAR10 samples generated from the "learned 2" and MALDA generative models. + +![](images/429ec45c9b5517fcde31c68a944981b3368910a0b2c7d782d6604750d35a3300.jpg) + +# 6 RELATED WORK + +Evidence Lower Bounds. Song et al. (2021); Huang et al. (2021) derive the ISM and DSM lower bounds on the model log likelihood. Our work extends their analysis to the multivariate diffusion setting to derive lower bounds on the log marginal of the data in the presence of auxiliary variables. + +Auxiliary variables. Dupont et al. (2019) shows that augmented neural ODEs model a richer set of functions and Huang et al. (2020) uses this principle for normalizing flows. Hierarchical variational models and auto-encoders marginalize auxiliary variables to build expressive distributions (Ranganath et al., 2016; Sønderby et al., 2016; Maaløe et al., 2019; Vahdat & Kautz, 2020; Child, 2020). We apply this principle to DBGMs, including and extending CLD (Dockhorn et al., 2021). + +Learning inference. Learning $q_{\phi}$ with $p_{\theta}$ is motivated in previous work (Kingma & Welling, 2013; Sohl-Dickstein et al., 2015; Kingma et al., 2021). Kingma et al. (2021) learn the noise schedule for VPSDE. For MDMs, there are parameters to learn beyond the noise schedule; $\mathbf{Q}$ can be non-zero, $\mathbf{D}$ can diagonal or full, give $\mathbf{Q}$ and $\mathbf{D}$ different time-varying functions, and learn $\nabla \mathbf{H}$ . + +# 7 DISCUSSION + +We present an algorithm for training multivariate diffusions with linear time-varying inference processes with a specified stationary distribution and any number of auxiliary variables. This includes automating transition kernel computation and providing a parameterization of diffusions that have a specified stationary distribution, which facilitate working with new diffusion processes, including learning the diffusion. The experiments show that learning matches or surpasses the best fixed diffusion performance, bypassing the need to choose a diffusion. MDMs achieve BPDs similar to univariate diffusions, with as many as three times more score parameters. The proposed MDM ELBO reports a tighter bound for the existing CLD relative to existing probability flow-based evaluations. This work enables future directions including interactions across data coordinates and using new stationary distributions. + +# 8 ACKNOWLEDGEMENTS + +This work was generously funded by NIH/NHLBI Award R01HL148248, NSF Award 1922658 NRT-HDR: FUTURE Foundations, Translation, and Responsibility for Data Science, and NSF CAREER Award 2145542. The authors would additionally like to thank Chin-Wei Huang for helpful discussing regarding Huang et al. (2021). + +# REFERENCES + +Andrew D Barbour. Stein's method and poisson process convergence. Journal of Applied Probability, 25(A):175-184, 1988. +Andrew Campbell, Joe Benton, Valentin De Bortoli, Tom Rainforth, George Deligiannidis, and Arnaud Doucet. A continuous time framework for discrete denoising models. arXiv preprint arXiv:2205.14987, 2022. +Nanxin Chen, Yu Zhang, Heiga Zen, Ron J Weiss, Mohammad Norouzi, and William Chan. Wavegrad: Estimating gradients for waveform generation. arXiv preprint arXiv:2009.00713, 2020. +Rewon Child. Very deep vaes generalize autoregressive models and can outperform them on images. arXiv preprint arXiv:2011.10650, 2020. +Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34, 2021. +Tim Dockhorn, Arash Vahdat, and Karsten Kreis. Score-based generative modeling with critically-damped Langevin diffusion. arXiv preprint arXiv:2112.07068, 2021. +Emilien Dupont, Arnaud Doucet, and Yee Whye Teh. Augmented neural odes. Advances in Neural Information Processing Systems, 32, 2019. +Bradley Efron. Tweedie's formula and selection bias. Journal of the American Statistical Association, 106(496):1602-1614, 2011. +Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239, 2020. +Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. *J. Mach. Learn. Res.*, 23: 47-1, 2022. +Chin-Wei Huang, Laurent Dinh, and Aaron Courville. Augmented normalizing flows: Bridging the gap between generative flows and latent variable models. arXiv preprint arXiv:2002.07101, 2020. +Chin-Wei Huang, Jae Hyun Lim, and Aaron C Courville. A variational perspective on diffusion-based generative models and score matching. Advances in Neural Information Processing Systems, 34, 2021. +Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. +Diederik P Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. arXiv preprint arXiv:2107.00630, 2021. +Zhifeng Kong, Wei Ping, Jiaji Huang, Kexin Zhao, and Bryan Catanzaro. Diffwave: A versatile diffusion model for audio synthesis. arXiv preprint arXiv:2009.09761, 2020. +Yi-An Ma, Tianqi Chen, and Emily Fox. A complete recipe for stochastic gradient mcmc. Advances in neural information processing systems, 28, 2015. +Lars Maaløe, Marco Fraccaro, Valentin Lievin, and Ole Winther. Biva: A very deep hierarchy of latent variables for generative modeling. Advances in neural information processing systems, 32, 2019. + +Gautam Mittal, Jesse Engel, Curtis Hawthorne, and Ian Simon. Symbolic music generation with diffusion models. arXiv preprint arXiv:2103.16091, 2021. +Wenlong Mou, Yi-An Ma, Martin J Wainwright, Peter L Bartlett, and Michael I Jordan. High-order Langevin diffusion yields an accelerated mcmc algorithm. arXiv preprint arXiv:1908.10859, 2019. +Alex Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. arXiv preprint arXiv:2102.09672, 2021. +Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. +Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. +Rajesh Ranganath, Sean Gerrish, and David Blei. Black box variational inference. In Artificial intelligence and statistics, pp. 814-822. PMLR, 2014. +Rajesh Ranganath, Dustin Tran, and David Blei. Hierarchical variational models. In International conference on machine learning, pp. 324-333. PMLR, 2016. +Danilo Rezende and Shakir Mohamed. Variational inference with normalizing flows. In International Conference on Machine Learning, pp. 1530-1538. PMLR, 2015. +Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022. +Simo Särkkä and Arno Solin. Applied stochastic differential equations, volume 10. Cambridge University Press, 2019. +Hiroshi Sasaki, Chris G Willcocks, and Toby P Breckon. Unit-ddpm: Unpaired image translation with denoising diffusion probabilistic models. arXiv preprint arXiv:2104.05358, 2021. +Jianghong Shi, Tianqi Chen, Ruoshi Yuan, Bo Yuan, and Ping Ao. Relation of a new interpretation of stochastic differential equations to ito process. Journal of Statistical physics, 148:579-590, 2012. +Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015. +Casper Kaae Sønderby, Tapani Raiko, Lars Maaløe, Søren Kaae Sønderby, and Ole Winther. Ladder variational autoencoders. Advances in neural information processing systems, 29, 2016. +Yang Song, Sahaj Garg, Jiaxin Shi, and Stefano Ermon. Sliced score matching: A scalable approach to density and score estimation. In Uncertainty in Artificial Intelligence, pp. 574-584. PMLR, 2020a. +Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020b. +Yang Song, Conor Durkan, Iain Murray, and Stefano Ermon. Maximum likelihood training of score-based diffusion models. Advances in Neural Information Processing Systems, 34:1415-1428, 2021. +Michalis Titsias and Miguel Lázaro-Gredilla. Doubly stochastic variational bayes for non-conjugate inference. In International conference on machine learning, pp. 1971-1979. PMLR, 2014. +Arash Vahdat and Jan Kautz. Nvae: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020. + +Pascal Vincent. A connection between score matching and denoising autoencoders. *Neural computation*, 23(7):1661-1674, 2011. +L Yin and P Ao. Existence and construction of dynamical potential in nonequilibrium processes without detailed balance. Journal of Physics A: Mathematical and General, 39(27):8593, 2006. +Zhenzhong Zhang and Dayue Chen. A new criterion on existence and uniqueness of stationary distribution for diffusion processes. Advances in Difference Equations, 2013(1):1-6, 2013. + +# A AUTOMATED SCORE MATCHING WITH LEARNED INFERENCE + +Like for the MDM ELBO, the methods in this work apply to training with the score matching loss: + +$$ +\mathcal {L} _ {\mathrm {S M}} (x, \theta , \phi) = T \mathbb {E} _ {t \sim U [ 0, T ]} \mathbb {E} _ {q _ {\phi} (\mathbf {y} | x)} \left[ \lambda (t) \| s _ {\theta} (\mathbf {y} _ {t}, t) - \nabla_ {\mathbf {y} _ {t}} \log q _ {\phi} (\mathbf {y} _ {t} | x) \| _ {2} ^ {2} \right], +$$ + +where $\lambda :[0,T]\to \mathbb{R}_+$ is a weighing function. The score-matching loss is often optimized in its simplified noise prediction form: + +$$ +\mathcal {L} _ {\mathrm {N P}} (x, \theta , \phi) = T \mathbb {E} _ {t \sim U [ 0, T ]} \mathbb {E} _ {q _ {\phi} (\mathbf {y} | x)} \left[ \| \epsilon_ {\theta} (\mathbf {y} _ {t}, t) - \epsilon \| _ {2} ^ {2} \right] +$$ + +where $s_{\theta} = -\mathbf{L}_{t}^{-\top}\epsilon_{\theta}$ and $\mathbf{y}_t = \mu_t + \mathbf{L}_t\epsilon$ and $\epsilon$ is the noise used in sampling $\mathbf{y}_t$ . We describe here how the improvements to the ELBO studied in this work carry over to $\mathcal{L}_{\mathrm{SM}}$ and $\mathcal{L}_{\mathrm{NP}}$ . In the following let $q_{0}$ be the data distribution, let $p_{(\theta ,\phi),0}$ be the model's distribution of the data, and recall that the model is defined by $(s_{\theta},f_{\phi},g_{\phi})$ and prior $\pi$ via a continuous-time stochastic process with drift coefficient $g_{\phi}^{2}s_{\theta} - f_{\phi}$ and diffusion coefficient $g_{\phi}$ . + +First, minimizing $\mathcal{L}_{\mathrm{SM}}$ or $\mathcal{L}_{\mathrm{NP}}$ so that $\nabla_{\mathbf{y}_t}\log q_\phi (\mathbf{y}_t) = s_\theta (\mathbf{y}_t,t)$ does not alone imply that $p_{(\theta ,\phi),0}$ will equal $q_{0}$ ; it must also be that $q_{\phi ,T}\approx \pi$ . Foregoing this requirement means $\pi$ will produce samples that the generative model may not be able to push onto the path the model was trained on (formally, the score of the generative model would not equal the time-reversal of the forward score even if $s_\theta$ equals the forward score). This condition can be satisfied if $q_{\phi}$ can be chosen with stationary distribution $\pi$ . Section 3.4 describes how to accomplish this. + +Next, for any fixed $q_{\phi}$ , automatic transitions from section 3.3 streamline the computation of the score matching loss, allowing for simple score computation for a wide class of diffusions beyond VP. + +Finally, for a fixed $q_{\phi}$ with $q_{\phi,T} \approx \pi$ and a score architecture $s_{\theta}$ , minimizing $\mathcal{L}_{\mathrm{SM}}$ or $\mathcal{L}_{\mathrm{NP}}$ w.r.t $\theta$ may be suboptimal. Optimization, like for the elbo, carries over to score matching and can close this gap; learning w.r.t. both $\theta, \phi$ increases the ability to successfully minimize the loss at each $t$ (section 3.5). In other words, since the generative model is defined by $(s_{\theta}, f_{\phi}, g_{\phi})$ , learning $q_{\phi}$ means the loss trains all three components of the generative model rather than just one. In summary, score matching is automatic and can learn over the space of linear diffusions that tend to the model prior. + +# B DOES MY MODEL USE AUXILIARY VARIABLES? + +In section 3 we gave the example choice of $q(\mathbf{y}_0^v |x) = \mathcal{N}(0,\mathbf{I})$ coordinate-wise. It is also a common choice to set $\pi_{\theta} = \mathcal{N}(0,\mathbf{I})$ . Because the optimum in diffusion models is $p_{\theta} = q$ for all $t$ , we see a peculiar phenomenon under this choice: the model has main and auxiliary dimensions independent at both endpoints 0 and $T$ . Does this mean that the model does not use auxiliary variables? We show that even when $q_{\phi}(\mathbf{y}_0)$ and $\pi_{\theta}$ have main and auxiliary variables independent, the model can use the auxiliary variables. A sufficient condition is $\mathbf{Q} + \mathbf{D}$ is non-diagonal. + +To make this precise, we recall that we model with $p_{\theta}(\mathbf{u}_T^z = x)$ . To show the model is using auxiliary variables, we just need to show that $\mathbf{u}_T^z$ (main coordinate at $T$ ) depends on $\mathbf{u}_t^v$ (aux. coordinate at $t$ ) for $T > t$ . At optimum, $p_{\theta}(\mathbf{u}_T^z,\mathbf{u}_t^v) = q_{\phi}(\mathbf{y}_0^z,\mathbf{y}_{T - t}^v)$ . Therefore it is sufficient to show that for some time $s$ , $q_{\phi}(\mathbf{y}_s^v |\mathbf{y}_0^z)\neq q_{\phi}(\mathbf{y}_s^v)$ . Because $\mathbf{y}_0^z$ , is determined by $x$ we need to show that $q_{\phi}(\mathbf{y}_s^v |x)\neq q_{\phi}(\mathbf{y}_s^v)$ . To do that, we first derive $q(\mathbf{y}_s|x)$ and then marginalize to get $q(\mathbf{y}_s^v |x)$ from it. Since the former is 2D Gaussian, the latter is available in terms of the former's mean and covariance. Suppose $\mathbb{E}[\mathbf{y}_0^v ] = 0$ , $\mathbf{Q} = [[0, - 1],[1,0]]$ and $\mathbf{D} = [[1,0],[0,1]]$ and we have $s = .1$ We have: + +$$ +\mathbb {E} [ \mathbf {y} _ {s} | x ] = \exp \left[ - s (\mathbf {Q} + \mathbf {D}) \right] \binom {x} {0} = \exp \left[ \left[ \begin{array}{l l} -. 1 & . 1 \\ -. 1 & -. 1 \end{array} \right] \right] \binom {x} {0} = \binom {0. 9 0 0 3 x} {- 0. 0 9 0 x} \tag {18} +$$ + +Regardless of the covariance any 1D of this 2D gaussian will have mean that is a function of $x$ , meaning that $q(\mathbf{y}_s^v |x)$ does not equal $q(\mathbf{y}_s^v)$ (which is also a Gaussian but with mean depending on $\mathbf{x}'s$ mean rather than $x$ itself. Therefore, even under the setup with independent endpoints, the optimal model makes use of the intermediate auxiliary variables in its final modeling distribution $p_{\theta}(\mathbf{u}_T^z = x)$ . + +Are there choices of $\mathbf{Q}$ and $\mathbf{D}$ that lead to learning models that don't make use of the extra dimensions? As mentioned, in the inference process, $\mathbf{Q}$ is responsible for mixing information among the + +coordinates, and is the only source of this when $\mathbf{D}$ is diagonal. Then, if $\mathbf{Q} = \mathbf{0}$ and $\mathbf{D}$ is diagonal, none of the coordinates for a given feature $\mathbf{x}_j$ (including $\mathbf{u}_{tj}^z$ , $\mathbf{u}_{tj}^{v_1},\ldots ,\mathbf{u}_{tj}^{v_{K - 1}}$ ) interact for any $t$ . Then, since $p_\theta = q$ at optimum, independence of the coordinates at all $t$ in $q$ imply the same in $p_\theta$ and the model will not make use of any auxiliary variables when modeling the marginal $\log p_{\theta}(\mathbf{u}_T^z = x)$ . + +# C STATIONARY PARAMETERIZATION + +The non-linear time-homogeneous Ito process family is: + +$$ +d \mathbf {y} = f (\mathbf {y}) d t + g (\mathbf {y}) \mathbf {B} _ {t}. \tag {19} +$$ + +This family can be restricted to those with stationary distributions. Ma et al. (2015) show a complete recipe to span the subset of this family with a desired stationary distribution. Let $\mathbf{Q}$ be skew-symmetric $(-\mathbf{Q} = \mathbf{Q}^{\top})$ and $\mathbf{D}$ is positive semi-definite. Suppose the desired stationary distribution is $q_{\infty}(\mathbf{y})$ . For a matrix $\mathbf{A}$ , let $\sqrt{\mathbf{A}}$ refer to the matrix square root defined by $\mathbf{a} = \sqrt{\mathbf{A}} \iff \mathbf{A} = \mathbf{aa}^{\top}$ . Then, Ma et al. (2015) show that, setting $\mathbf{H}(\mathbf{y}) = -\log q_{\infty}(\mathbf{y})$ , $g(\mathbf{y}) = \sqrt{2\mathbf{D}(\mathbf{y})}$ , and + +$$ +f (\mathbf {y}) = - \left[ \mathbf {D} (\mathbf {y}) + \mathbf {Q} (\mathbf {y}) \right] \nabla \mathbf {H} (\mathbf {y}) + \boldsymbol {\Gamma} (\mathbf {y}), \quad \boldsymbol {\Gamma} _ {i} (\mathbf {y}) = \sum_ {j = 1} ^ {d} \frac {\partial}{\partial \mathbf {z} _ {j}} \left(\mathbf {D} _ {i j} (\mathbf {y}) + \mathbf {Q} _ {i j} (\mathbf {y})\right), \tag {20} +$$ + +yields a process $\mathbf{y}_t$ with stationary distribution $q_{\infty}$ . We extend it to time-varying (time inhomogeneous) processes. + +Theorem 2. $q_{\infty}(\mathbf{y})\propto \exp [-H(\mathbf{y})]$ is a stationary distribution of + +$$ +d \mathbf {y} = \left(- [ \mathbf {D} (\mathbf {y}, t) + \mathbf {Q} (\mathbf {y}, t) ] \nabla \mathbf {H} (\mathbf {y}) + \boldsymbol {\Gamma} (\mathbf {y}, t)\right) d t + \sqrt {2 \mathbf {D} (\mathbf {y} , t)} \mathbf {B} _ {t}, \tag {21} +$$ + +for + +$$ +\boldsymbol {\Gamma} _ {i} (\mathbf {y}, t) = \sum_ {j = 1} ^ {d} \frac {\partial}{\partial \mathbf {y} _ {j}} \left(\mathbf {D} _ {i j} (\mathbf {y}, t) + \mathbf {Q} _ {i j} (\mathbf {y}, t)\right). \tag {22} +$$ + +Proof. The Fokker Planck equation is: + +$$ +\partial_ {t} q (\mathbf {y}, t) = - \sum_ {i} \frac {\partial}{\partial \mathbf {y} _ {i}} [ f _ {i} (\mathbf {y}, t) q (\mathbf {y}, t) ] + \sum_ {i, j} \frac {\partial^ {2}}{\partial \mathbf {y} _ {i} \partial \mathbf {y} _ {j}} [ \mathbf {D} _ {i j} (\mathbf {y}, t) q (\mathbf {y}, t) ] \tag {23} +$$ + +A stationary distribution is one where the Fokker-Planck right hand side is equal to 0. To show that the stationary characterization also holds of time-inhomogeneous processes with $\mathbf{D}(\mathbf{y},t)$ and $\mathbf{Q}(\mathbf{y},t)$ , we take two steps, closely following Yin & Ao (2006); Shi et al. (2012); Ma et al. (2015), but noting that there is no requirement for $\mathbf{Q}$ , $\mathbf{D}$ to be free of $t$ . First, we show that the Fokker-Plack equation can be re-written as: + +$$ +\partial_ {t} q (\mathbf {y}, t) = \nabla \cdot \left(\left[ \mathbf {D} (\mathbf {y}, t) + \mathbf {Q} (\mathbf {y}, t) \right] \left[ q (\mathbf {y}, t) \nabla H (\mathbf {y}) + \nabla q (\mathbf {y}, t) \right]\right) \tag {24} +$$ + +Second, because the whole expression is set to 0 when the inside expression equals 0 + +$$ +q (\mathbf {y}, t) \nabla H (\mathbf {y}) + \nabla q (\mathbf {y}, t) = 0, \tag {25} +$$ + +we just need to show that this holds when $q(\mathbf{y},t) = \exp [-H(\mathbf{y})] / \mathbf{Z}$ . The second step is concluded because + +$$ +\left[ q (\mathbf {y}, t) \nabla H (\mathbf {y}) + \nabla q (\mathbf {y}, t) \right] = \frac {1}{\mathbf {Z}} \left[ \exp [ - H (\mathbf {y}) ] \nabla H (\mathbf {y}) + \nabla \exp [ - H (\mathbf {y}) ] \right] = 0, +$$ + +where $\mathbf{Z}$ is the normalization constant of $\exp (-H(y))$ . + +It only remains to show that Fokker-Plack can be re-written in divergence form with time-dependent $\mathbf{Q},\mathbf{D}$ . In the following let $Q_{ijt}$ denote $\mathbf{Q}_{ij}(\mathbf{y},t)$ and likewise for $D_{ijt}$ . Let $\partial_i$ denote $\frac{\partial}{\partial\mathbf{y}_i}$ and let it denote $\frac{d}{d\mathbf{y}_i}$ for scalar functions. We will use $[Ax]_i = \sum_jA_{ij}x_j$ . + +$$ +\begin{array}{l} \partial_ {t} q _ {t} = \nabla \cdot \left(\left[ \mathbf {D} (\mathbf {y}, t) + \mathbf {Q} (\mathbf {y}, t) \right] [ q \nabla H + \nabla q ]\right) \\ = \sum_ {i} \partial_ {i} \left(\left[ [ \mathbf {D} (\mathbf {y}, t) + \mathbf {Q} (\mathbf {y}, t) ] [ q \nabla H + \nabla q ] \right] _ {i}\right) \\ = \sum_ {i} \partial_ {i} \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ q \nabla H + \nabla q \right] _ {j} \\ = \sum_ {i} \partial_ {i} \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ q \partial_ {j} H + \partial_ {j} q \right] \\ = \sum_ {i} \partial_ {i} \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ q \partial_ {j} H \right] + \sum_ {i} \partial_ {i} \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ \partial_ {j} q \right] \\ = \sum_ {i} \partial_ {i} \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ q \partial_ {j} H \right] + \sum_ {i} \partial_ {i} \sum_ {j} D _ {i j t} \left[ \partial_ {j} q \right] + \sum_ {i} \partial_ {i} \sum_ {j} Q _ {i j t} \left[ \partial_ {j} q \right] \\ \end{array} +$$ + +We re-write the 2nd and 3rd term. Holding $i$ fixed and noting $q$ is scalar, we get the product rule $\sum_{j}D_{ijt}(\partial_{j}q) = \sum_{j}\partial_{j}[D_{ijt}q] - q\sum_{j}\partial_{j}D_{ijt}$ for each $i$ , and likewise for $q$ : + +$$ +\begin{array}{l} \sum_ {i} \partial_ {i} \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ q \partial_ {j} H \right] + \sum_ {i} \partial_ {i} \sum_ {j} D _ {i j t} \left[ \partial_ {j} q \right] + \sum_ {i} \partial_ {i} \sum_ {j} Q _ {i j t} \left[ \partial_ {j} q \right] \\ = \sum_ {i} \partial_ {i} \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ q \partial_ {j} H \right] + \sum_ {i} \partial_ {i} \sum_ {j} \partial_ {j} \left[ D _ {i j t} q \right] - q \sum_ {j} \partial_ {j} D _ {i j t} \\ + \sum_ {i} \partial_ {i} \sum_ {j} \partial_ {j} \left[ Q _ {i j t} q \right] - q \sum_ {j} \partial_ {j} Q _ {i j t} \\ \end{array} +$$ + +Because $\mathbf{Q}(\mathbf{y},t)$ is skew-symmetric, we have that $\sum_{i}\partial_{i}\sum_{j}\partial_{j}[Q_{ijt}q] = 0$ , leaving + +$$ +\begin{array}{l} \partial_ {t} q _ {t} = \sum_ {i} \partial_ {i} \left[ \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ q \partial_ {j} H \right] \right] + \sum_ {i} \partial_ {i} \left[ \sum_ {j} \partial_ {j} \left[ D _ {i j t} q \right] - q \sum_ {j} \partial_ {j} D _ {i j t} - q \sum_ {j} \partial_ {j} Q _ {i j t} \right] \\ = \sum_ {i} \partial_ {i} \left[ \sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ \partial_ {j} H \right] q \right] + \sum_ {i} \partial_ {i} \left[ \sum_ {j} \partial_ {j} \left[ D _ {i j t} q \right] - q \sum_ {j} \partial_ {j} \left(D _ {i j t} + Q _ {i j t}\right) \right] \\ = \sum_ {i} \partial_ {i} \left[ \left(\sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ \partial_ {j} H \right] - \sum_ {j} \partial_ {j} \left(D _ {i j t} + Q _ {i j t}\right)\right) q \right] + \sum_ {i} \sum_ {j} \frac {\partial^ {2}}{\mathbf {y} _ {i} \mathbf {y} _ {j}} \left(D _ {i j t} q\right) \\ \end{array} +$$ + +Recalling that $f_{i}(\mathbf{y},t) = \left(-[D + Q]\nabla H + \Gamma\right)_{i}$ and again that $[Ax]_i = \sum_j A_{ij}x_j$ , we have equality with the original Fokker-Planck + +$$ +\begin{array}{l} = \sum_ {i} \partial_ {i} \left[ \left(\sum_ {j} \left[ D _ {i j t} + Q _ {i j t} \right] \left[ \partial_ {j} H \right] - \sum_ {j} \partial_ {j} \left(D _ {i j t} + Q _ {i j t}\right)\right) q \right] + \sum_ {i j} \frac {\partial^ {2}}{\mathbf {y} _ {i} \mathbf {y} _ {j}} \left(D _ {i j t} q\right) \\ = - \sum_ {i} \frac {\partial}{\partial \mathbf {y} _ {i}} \left[ f _ {i} (\mathbf {y}, t) q (\mathbf {y}, t) \right] + \sum_ {i j} \frac {\partial^ {2}}{\mathbf {y} _ {i} \mathbf {y} _ {j}} \left[ \mathbf {D} _ {i j} (\mathbf {y}, t) q (\mathbf {y}, t) \right] \\ = \partial_ {t} q (\mathbf {y}, t) \\ \end{array} +$$ + +![](images/5a70606959cc1f55516e353eacfd3cd59104183ac5d0662fefdc5b1c756ac428.jpg) + +We have shown $\exp[-H(\mathbf{y})] / \mathbf{Z}$ is a stationary distribution of the time-varying non-linear Ito process: + +$$ +d \mathbf {y} = \left(- [ \mathbf {D} (\mathbf {y}, t) + \mathbf {Q} (\mathbf {y}, t) ] \nabla H (\mathbf {y}) + \boldsymbol {\Gamma} (\mathbf {y}, t)\right) d t + \sqrt {2 \mathbf {D} (\mathbf {y} , t)} \mathbf {B} _ {t}. \tag {26} +$$ + +However, for some choices of $\mathbf{Q}, \mathbf{D}$ , $\exp[-H(\mathbf{y})] / \mathbf{Z}$ is not necessarily the unique stationary distribution. One problematic case can occur as follows. Suppose that row $i$ of $(\mathbf{Q} + \mathbf{D})$ is all-zero; in this case, $d\mathbf{y}_i = 0$ which implies that $(\mathbf{y}_i)_t = (\mathbf{y}_i)_0$ for all $t > 0$ . Then, the initial distribution is also a stationary distribution. To rule out such pathological diffusions, we make the assumption that $\mathbf{Q} + \mathbf{D}$ is full rank. Then, for uniqueness, recall that stationary distributions are the zeros of + +$$ +\partial_ {t} q (\mathbf {y}, t) = \nabla \cdot \left(\left[ \mathbf {D} (\mathbf {y}, t) + \mathbf {Q} (\mathbf {y}, t) \right] \left[ q (\mathbf {y}, t) \nabla H (\mathbf {y}) + \nabla q (\mathbf {y}, t) \right]\right) +$$ + +where the expression is of the form $\mathbf{A}\mathbf{v}$ for $\mathbf{A} = \mathbf{D}(\mathbf{y},t) + \mathbf{Q}(\mathbf{y},t)$ and + +$$ +\mathbf {v} = \left[ q (\mathbf {y}, t) \nabla H (\mathbf {y}) + \nabla q (\mathbf {y}, t) \right]. +$$ + +Under the assumption that $\mathbf{Q} + \mathbf{D}$ is full rank, the expression can only be zero when $\mathbf{v}$ is zero. To show uniqueness under the full rank assumption, one must then show that + +$$ +\nabla q (\mathbf {y}, t) = - q (\mathbf {y}, t) \nabla H (\mathbf {y}). +$$ + +holds only if $q(\mathbf{y}, t) = \exp[-H(\mathbf{y})] / \mathbf{Z}$ . Even if $\exp[-H(\mathbf{y})] / \mathbf{Z}$ is the unique stationary distribution, convergence to that distribution is a question. See Zhang & Chen (2013) for more details. + +Learning $\mathbf{Q}_{\phi}$ , $\mathbf{D}_{\phi}$ in the MDM ELBO helps push $\mathbf{y}_T$ to the model prior $\pi_{\theta}$ and avoid issues like those discussed. + +# C.1 LINEAR PROCESSES + +Next, we specialize this general family to linear Itô processes to maintain tractable transition distributions. A linear process is one where the drift $f(\mathbf{y},t)$ and diffusion $g(\mathbf{y},t)$ are linear functions of $\mathbf{y}$ . We express the drift function of a non-linear time-varying Itô process with stationary distribution proportional to $\exp[-H(\mathbf{y})]$ as + +$$ +- (\mathbf {Q} (\mathbf {y}, t) + \mathbf {D} (\mathbf {y}, t)) \nabla H (\mathbf {y}) + \Gamma (\mathbf {y}, t). +$$ + +Next, linear Ito processes have Gaussian stationary distributions (Särkkä & Solin, 2019) so $H(\mathbf{y})$ must be quadratic and $\nabla H(\mathbf{y})$ is linear, and neither are constant in $\mathbf{y}$ . Because $\nabla H(\mathbf{y})$ is linear, it can be expressed as $\mathbf{S}\mathbf{y}$ for some matrix $\mathbf{S}$ where $\mathbf{S}$ is the inverse of the covariance matrix. Because $\nabla H$ is multiplied by $\mathbf{Q}, \mathbf{D}$ , this means that $\mathbf{Q}, \mathbf{D}$ must be free of $\mathbf{y}$ . Recalling that $\Gamma$ is expressed as a sum of derivatives w.r.t $\mathbf{y}$ of $\mathbf{Q} + \mathbf{D}$ , this means that $\Gamma$ must satisfy $\Gamma = 0$ . Next, because of the stationary requirement that $g(t) = \sqrt{2\mathbf{D}(\mathbf{y},t)}$ , we can also conclude by the restriction on $\mathbf{D}$ that the diffusion coefficient function must be independent of the state $\mathbf{y}$ . Our final form for linear time-varying processes with stationary distributions $\mathcal{N}(0,\mathbf{S}^{-1})$ is: + +$$ +d \mathbf {y} = \underbrace {- \left[ \mathbf {Q} (t) + \mathbf {D} (t) \right] \mathbf {S} \mathbf {y}} _ {f (\mathbf {y}, t)} d t + \underbrace {\sqrt {2 \mathbf {D} (t)}} _ {g (t)} d \mathbf {B} _ {t} \tag {27} +$$ + +# C.2 PARAMETERIZING $\mathbf{Q}_{\phi}$ + +Suppose $b_{q}(s)$ is a positive scalar function defined on the time domain with known integral. Suppose $\tilde{\mathbf{Q}}_{\phi}$ is any matrix. Then $\tilde{\mathbf{Q}}_{\phi} - \tilde{\mathbf{Q}}_{\phi}^{\top}$ is skew-symmetric with $\tilde{\mathbf{Q}}_{\phi, ij} = -\tilde{\mathbf{Q}}_{\phi, ji}$ . We can set $\mathbf{Q}_{\phi}$ to + +$$ +\mathbf {Q} _ {\phi} (s) = b _ {q} (s) \cdot \left[ \tilde {\mathbf {Q}} _ {\phi} - \tilde {\mathbf {Q}} _ {\phi} ^ {\top} \right] \tag {28} +$$ + +This is a general parameterization of time-independent skew-symmetric matrices, which have number of degrees of freedom equal to the number of entries in one of the triangles of the matrix, excluding the diagonal. + +# C.3 PARAMETERIZING $\mathbf{D}_{\phi}$ + +Suppose $b_{d}(s)$ is a positive scalar function defined on the time domain with known integral. Suppose $\tilde{\mathbf{D}}_{\phi}$ is any matrix. Then $\tilde{\mathbf{D}}_{\phi}\tilde{\mathbf{D}}_{\phi}^{\top}$ is positive semi-definite and spans all time-independent positive + +semi-definite matrices. We can set $\mathbf{D}_{\phi}$ to + +$$ +\mathbf {D} _ {\phi} (s) = b _ {d} (s) \cdot \left[ \tilde {\mathbf {D}} _ {\phi} \tilde {\mathbf {D}} _ {\phi} ^ {\top} \right] \tag {29} +$$ + +To show $\tilde{\mathbf{D}}\tilde{\mathbf{D}}^{\top}$ spans all positive semi-definite matrices: suppose $\mathbf{M}$ is positive semi-definite. Then it is square. Then it can be eigen-decomposed into $\mathbf{M} = \mathbf{V}\pmb {\Sigma}\mathbf{V}^{\top}$ . The degrees of freedom in $\mathbf{V}\pmb {\Sigma}\mathbf{V}^{\top}$ are just $\mathbf{R} = \mathbf{V}\sqrt{\pmb{\Sigma}}$ since $\mathbf{V}\pmb {\Sigma}\mathbf{V}^{\top} = \mathbf{R}\mathbf{R}^{\top}$ and the square root is taken element-wise because $\pmb{\Sigma}$ is diagonal and is real because each $\pmb{\Sigma}_{ij}\geq 0$ , which is true because $\mathbf{M}$ is positive semi-definite. Take $\mathbf{D} = \mathbf{R}$ . + +In our experiments we parameterize $\mathbf{D}$ as a diagonal-only matrix. + +# C.4 INTEGRALS + +The known integral requirement comes from the integrals required in the transition kernel, and can be relaxed two possible ways: + +- numerical integration of function with unknown integral. This is expected to have low error given that the function is scalar-in scalar-out. +- Directly parameterize the integral and use auto-grad when needing the functions not-integrated. + +We stick with the known integrals. In conclusion, the underlying parameters are positive scalar functions $b_{q}(s), b_{d}(s)$ defined on the time domain and with known integral, and general matrices $\tilde{\mathbf{Q}}_{\phi}, \tilde{\mathbf{D}}_{\phi}$ . + +# C.5 INSTANCES + +VPSDE. VPSDE has $K = 1$ . Consequently, $\mathbf{Q}, \mathbf{D}$ are $K \times K$ . The only $1 \times 1$ skew-symmetric matrix is 0, so $\mathbf{Q} = 0$ . Setting $\mathbf{D}(t) = \frac{1}{2}\beta(t)$ recovers VPSDE: + +$$ +d \mathbf {y} = - \frac {\beta (t)}{2} \mathbf {y} d t + \sqrt {\beta (t)} d \mathbf {B} _ {t} \tag {30} +$$ + +$\nabla H(\mathbf{y}) = \mathbf{y}$ so $\mathbf{H}(\mathbf{y}) = \frac{1}{2}\| \mathbf{y}\| _2^2$ . The stationary distribution is $\mathcal{N}(0,\mathbf{I})$ + +CLD. The CLD process (eq 5 in Dockhorn et al. (2021)) is defined as + +$$ +\left( \begin{array}{c} d \mathbf {z} _ {t} \\ d \mathbf {v} _ {r} \end{array} \right) = d \mathbf {y} _ {t} = \left( \begin{array}{c c} 0 & \frac {\beta}{M} \\ - \beta & - \frac {\Gamma \beta}{M} \end{array} \right) \mathbf {y} _ {t} + \left( \begin{array}{c c} 0 & 0 \\ 0 & \sqrt {2 \Gamma \beta} \end{array} \right) d \mathbf {B} _ {t}. +$$ + +In $\mathbf{Q} / \mathbf{D}$ parameterization, we have + +$$ +H (\mathbf {y}) = \frac {1}{2} \| \mathbf {z} \| _ {2} ^ {2} + \frac {1}{2 M} \| \mathbf {v} \| _ {2} ^ {2}, \qquad \nabla_ {\mathbf {u}} H (\mathbf {y}) = \left( \begin{array}{c} \mathbf {z} \\ \frac {1}{M} \mathbf {v} \end{array} \right) +$$ + +$$ +\mathbf {Q} = \left( \begin{array}{c c} 0 & - \beta \\ \beta & 0 \end{array} \right), \qquad \mathbf {D} = \left( \begin{array}{c c} 0 & 0 \\ 0 & \Gamma \beta \end{array} \right) +$$ + +The stationary distribution of this process is: + +$$ +q _ {\phi , \infty} \propto \exp (- H (\mathbf {y})) = \mathcal {N} (\mathbf {z}; 0, I _ {d}) \mathcal {N} (\mathbf {v}; 0, M I _ {d}) \tag {31} +$$ + +ALDA. Mou et al. (2019) define a third-order diffusion process for the purpose of gradient-based MCMC sampling. The ALDA diffusion process can be specified as + +$$ +\mathbf {Q} = \left( \begin{array}{c c c} 0 & - \frac {1}{L} I & 0 \\ \frac {1}{L} I & 0 & - \gamma I \\ 0 & \gamma I & 0 \end{array} \right), \quad \mathbf {D} = \left( \begin{array}{c c c} 0 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & \frac {\xi}{L} I \end{array} \right). \tag {32} +$$ + +Note that $\mathbf{Q}$ is skew-symmetric and $\mathbf{D}$ is positive semi-definite, therefore we have that $q_{t}(\mathbf{u})\to q_{\phi ,\infty}$ . In this case, + +$$ +q _ {\phi , \infty} = \mathcal {N} (\mathbf {z}; 0, \mathbf {I} _ {d}) \mathcal {N} (\mathbf {v} _ {1}; 0, \frac {1}{L} \mathbf {I} _ {d}) \mathcal {N} (\mathbf {v} _ {2}; 0, \frac {1}{L} \mathbf {I} _ {d}) +$$ + +MALDA. Similar to ALDA, we specify a diffusion process we term MALDA which we specify as + +$$ +\mathbf {Q} = \left( \begin{array}{c c c} 0 & - \frac {1}{L} I & - \frac {1}{L} \\ \frac {1}{L} I & 0 & - \gamma I \\ \frac {1}{L} & \gamma I & 0 \end{array} \right), \quad \mathbf {D} = \left( \begin{array}{c c c} 0 & 0 & 0 \\ 0 & \frac {1}{L} I & 0 \\ 0 & 0 & \frac {1}{L} I \end{array} \right). \tag {33} +$$ + +Note that $\mathbf{Q}$ is skew-symmetric and $\mathbf{D}$ is positive semi-definite. In this case this is + +$$ +q _ {\phi , \infty} = \mathcal {N} (\mathbf {z}; 0, \mathbf {I} _ {d}) \mathcal {N} (\mathbf {v} _ {1}; 0, \frac {1}{L} I _ {d}) \mathcal {N} (\mathbf {v} _ {2}; 0, \frac {1}{L} I _ {d}) +$$ + +# D TRANSITIONS FOR LINEAR PROCESSES + +For time variable $s$ and Brownian motion $\widehat{\mathbf{B}}_s$ driving diffusions of the form + +$$ +d \mathbf {y} = f (\mathbf {y}, s) d s + g (s) d \widehat {\mathbf {B}} _ {s}, \tag {34} +$$ + +when $f_{\phi}(\mathbf{y}_s, s)$ , $g_{\phi}(s)$ are linear, the transition kernel $q_{\phi}(\mathbf{y}_s | \mathbf{y}_0)$ is always normal (Särkkä & Solin, 2019). Therefore, we just find the mean $\mathbf{m}_{s|0}$ and covariance $\boldsymbol{\Sigma}_{s|0}$ of $q(\mathbf{y}_s | \mathbf{y}_0)$ . Let $f(\mathbf{y}, s) = \mathbf{A}(s)\mathbf{y}$ . The un-conditional time $s$ mean and covariance are solutions to + +$$ +d \mathbf {m} _ {s} / d s = \mathbf {A} (s) \mathbf {m} _ {s} +$$ + +$$ +d \boldsymbol {\Sigma} _ {s} / d s = \mathbf {A} (s) \boldsymbol {\Sigma} _ {s} + \boldsymbol {\Sigma} _ {s} \mathbf {A} ^ {\top} (s) + g ^ {2} (s) \tag {35} +$$ + +By (6.6) in Särkkä & Solin (2019), for computing conditionals $q(\mathbf{y}_s|\mathbf{y}_0)$ , we can take the marginal distribution ODEs and compute conditionals by simply setting the time 0 mean and covariance initial conditions to the conditioning value and to 0 respectively. We take (6.36-6.39) and set $\mathbf{m}_0 = \mathbf{u}_0$ and $\boldsymbol{\Sigma}_{0} = 0$ to condition. Let $[\mathbf{A}]_s = \int_0^s\mathbf{A}(\nu)d\nu$ . The mean is + +$$ +\mathbf {m} _ {s \mid 0} = \exp \left[ \int_ {0} ^ {s} \mathbf {A} (\nu) d \nu \right] \mathbf {y} _ {0} = \exp \left(\left[ A \right] _ {s}\right) \underbrace {\exp (s \mathbf {A}) \mathbf {y} _ {0}} _ {\text {n o i n t e g r a t i o n i f} \mathbf {A} (\nu) = \mathbf {A}}, \tag {36} +$$ + +where $\exp$ denotes matrix exponential. (6.36-6.39) state the covariance $q(\mathbf{y}_s|\mathbf{y}_0)$ as a matrix factorization, for which a derivation is provided below $\boldsymbol{\Sigma}_{s} = \mathbf{C}_{s}(\mathbf{H}_{s})^{-1}$ for $\mathbf{C}_s,\mathbf{H}_s$ being the solutions of: + +$$ +\left( \begin{array}{c} \frac {d}{d s} \mathbf {C} _ {s} \\ \frac {d}{d s} \mathbf {H} _ {s} \end{array} \right) = \left( \begin{array}{c c} \mathbf {A} (s) & g ^ {2} (s) \\ \mathbf {0} & - \mathbf {A} ^ {\top} (s) \end{array} \right) \left( \begin{array}{c} \mathbf {C} _ {s} \\ \mathbf {H} _ {s} \end{array} \right) \tag {37} +$$ + +To condition and get $\Sigma_{s|0}$ from $\Sigma_s$ , we set $\Sigma_0 = 0$ , and initialize $\mathbf{C}_s, \mathbf{H}_s$ by $\mathbf{C}_0 = \mathbf{0}$ and $\mathbf{H}_0 = \mathbf{I}$ . + +$$ +\binom {\mathbf {C} _ {s}} {\mathbf {H} _ {s}} = \exp \left[ \binom {[ \mathbf {A} ] _ {s}} {\mathbf {0}} \quad \begin{array}{c} {[ g ^ {2} ] _ {s}} \\ {- [ \mathbf {A} ^ {\top} ] _ {s}} \end{array} \right] \binom {\mathbf {0}} {\mathbf {I}} \underbrace {\exp \left[ s \binom {\mathbf {A}} {\mathbf {0}} \quad \begin{array}{c} {g ^ {2}} \\ {- \mathbf {A} ^ {\top}} \end{array} \right] \binom {\mathbf {0}} {\mathbf {I}}} _ {\text {n o i n t e g r a t i o n i f A (v a r p h i) = A , g (v a r p h i) = g}}. \tag {38} +$$ + +Finally, $\mathbf{\Sigma}_{s|0} = \mathbf{C}_s(\mathbf{H}_s)^{-1}$ + +# D.1 DERIVATION OF THE COVARIANCE MATRIX SOLUTION + +Equation (35) gives an expression for $d\pmb{\Sigma}_s / ds$ . To derive the matrix factorization technique used in eq. (37), we use eq. (35) and the desired condition $\pmb{\Sigma}_s = \mathbf{C}_s\mathbf{H}_s^{-1}$ to derive expressions for $d\mathbf{C}_s / ds$ and $d\mathbf{H}_s / ds$ and suitable initial conditions so that the factorization also starts at the desired $\pmb{\Sigma}_0$ . Let $\pmb{\Sigma}_s = \mathbf{C}_s\mathbf{H}_s^{-1}$ , then note that $\mathbf{C}_s, \mathbf{H}_s$ satisfies + +$$ +\begin{array}{l} \frac {d}{d s} \boldsymbol {\Sigma} _ {s} = \frac {d}{d s} \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \\ = \mathbf {C} _ {s} \frac {d}{d s} \mathbf {H} _ {s} ^ {- 1} + \left(\frac {d}{d s} \mathbf {C} _ {s}\right) \mathbf {H} _ {s} ^ {- 1} \\ \end{array} +$$ + +And using the fact that + +$$ +\frac {d}{d s} \mathbf {H} _ {s} \mathbf {H} _ {s} ^ {- 1} = 0 +$$ + +$$ +\mathbf {H} _ {s} \frac {d}{d s} \mathbf {H} _ {s} ^ {- 1} + \frac {d}{d s} \mathbf {H} _ {s} \left(\mathbf {H} _ {s} ^ {- 1}\right) = 0 +$$ + +$$ +\frac {d}{d s} \mathbf {H} _ {s} ^ {- 1} = - \mathbf {H} _ {s} ^ {- 1} \frac {d}{d s} \mathbf {H} _ {s} \left(\mathbf {H} _ {s} ^ {- 1}\right) +$$ + +we get that + +$$ +\mathbf {C} _ {s} \frac {d}{d s} \mathbf {H} _ {s} ^ {- 1} + \left(\frac {d}{d s} \mathbf {C} _ {s}\right) \mathbf {H} _ {s} ^ {- 1} = - \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \frac {d}{d s} \mathbf {H} _ {s} \left(\mathbf {H} _ {s} ^ {- 1}\right) + \left(\frac {d}{d s} \mathbf {C} _ {s}\right) \mathbf {H} _ {s} ^ {- 1} +$$ + +$$ +\begin{array}{l} - \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \frac {d}{d s} \mathbf {H} _ {s} \left(\mathbf {H} _ {s} ^ {- 1}\right) + \left(\frac {d}{d s} \mathbf {C} _ {s}\right) \mathbf {H} _ {s} ^ {- 1} = \mathbf {A} (s) \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} + \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \mathbf {A} ^ {\top} (s) + g ^ {2} (s) \\ = \mathbf {A} (s) \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} + \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \mathbf {A} ^ {\top} (s) \mathbf {H} _ {s} \mathbf {H} _ {s} ^ {- 1} + g ^ {2} (s) \mathbf {H} _ {s} \mathbf {H} _ {s} ^ {- 1} \\ \end{array} +$$ + +$$ +\begin{array}{l} \left(- \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \frac {d}{d s} \mathbf {H} _ {s} + \frac {d}{d s} \mathbf {C} _ {s}\right) \mathbf {H} _ {s} ^ {- 1} = \left(\mathbf {A} (s) \mathbf {C} _ {s} + \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \mathbf {A} ^ {\top} (s) \mathbf {H} _ {s} + g ^ {2} (s) \mathbf {H} _ {s}\right) \mathbf {H} _ {s} ^ {- 1} \\ - \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \frac {d}{d s} \mathbf {H} _ {s} + \frac {d}{d s} \mathbf {C} _ {s} = \mathbf {A} (s) \mathbf {C} _ {s} + \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \mathbf {A} ^ {\top} (s) \mathbf {H} _ {s} + g ^ {2} (s) \mathbf {H} _ {s} \\ \left[ \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \quad \mathbf {I} _ {d} \right] ^ {\top} \frac {d}{d s} \left( \begin{array}{c} \mathbf {H} _ {s} \\ \mathbf {C} _ {s} \end{array} \right) = \left[ \mathbf {C} _ {s} \mathbf {H} _ {s} ^ {- 1} \quad \mathbf {I} _ {d} \right] ^ {\top} \left( \begin{array}{c} - \mathbf {A} ^ {\top} (s) \mathbf {H} _ {s} \\ \mathbf {A} (s) \mathbf {C} _ {s} + g ^ {2} (s) \mathbf {H} _ {s} \end{array} \right) \\ \end{array} +$$ + +Now, we note $\mathbf{C}_s$ , $\mathbf{H}_s$ satisfy the following + +$$ +\frac {d}{d s} \mathbf {H} _ {s} = - \mathbf {A} ^ {\top} (s) \mathbf {H} _ {s} +$$ + +$$ +\frac {d}{d s} \mathbf {C} _ {s} = \mathbf {A} (s) \mathbf {C} _ {s} + g ^ {2} (s) \mathbf {H} _ {s} +$$ + +which implies that + +$$ +\frac {d}{d s} \left( \begin{array}{c} \mathbf {C} _ {s} \\ \mathbf {H} _ {s} \end{array} \right) = \left( \begin{array}{c c} \mathbf {A} (s) & g ^ {2} (s) \\ \mathbf {0} & - \mathbf {A} ^ {\top} (s) \end{array} \right) \left( \begin{array}{c} \mathbf {C} _ {s} \\ \mathbf {H} _ {s} \end{array} \right) \tag {39} +$$ + +with $\mathbf{C}_0 = \pmb{\Sigma}_0$ and $\mathbf{H}_0 = \mathbf{I}_d$ , as $\mathbf{C}_0\mathbf{H}_0^{-1} = \pmb{\Sigma}_0$ . + +# D.2 HYBRID SCORE MATCHING + +Instead of computing $q(\mathbf{y}_s|\mathbf{y}_0)$ , we can apply the hybrid score matching principle (Dockhorn et al., 2021) to reduce variance by compute objectives using $q(\mathbf{y}_s|x)$ instead of $q(\mathbf{y}_s|\mathbf{y}_0)$ , which amounts to integrating out $\mathbf{v}_0$ . To accomplish this, following Särkkä & Solin (2019), we simply replace $\mathbf{y}_0$ with $[x,\mathbb{E}[\mathbf{v}_0]]$ in the expression for $\mathbf{m}_{s|0}$ , i.e. replace the conditioning value of $\mathbf{v}_0$ with the mean of its chosen initial distribution: + +$$ +\mathbb {E} [ \mathbf {y} _ {s} | x ] = \exp \left[ \int_ {0} ^ {s} A (\nu) d \nu \right] \binom {x} {\mathbb {E} [ \mathbf {v} _ {0} ]} \tag {40} +$$ + +For the covariance, instead of using $\mathbf{C}_0 = \boldsymbol{\Sigma}_0 = \mathbf{0}$ , we use a block matrix to condition on $x$ but not $\mathbf{v}_0$ . We decompose $\boldsymbol{\Sigma}_0$ into its blocks $\boldsymbol{\Sigma}_{0,xx}$ , $\boldsymbol{\Sigma}_{0,vv}$ , $\boldsymbol{\Sigma}_{0,xv}$ . As before, to condition on $x$ we set $\boldsymbol{\Sigma}_{0,xx} = \mathbf{0}$ . Because $q(\mathbf{v}_0)$ is set to be independent of $x$ , $\boldsymbol{\Sigma}_{0,xv}$ is also set to $\mathbf{0}$ . Finally, instead of $\mathbf{0}$ , to marginalize out $\mathbf{v}_0$ , $\boldsymbol{\Sigma}_{0,vv}$ is set to the covariance of the chosen initial time zero distribution for $\mathbf{v}_0$ . E.g. if $\mathbf{v}_{0,j} \sim N(0,\gamma)$ for each dimension, then $\boldsymbol{\Sigma}_{0,vv} = N(0,\gamma I)$ . + +We operationalize this in a simple piece of code, which makes the ELBO tractable and easy, i.e. skips both analytic derivations and numerical forward integration during training. + +# D.3 TRANSITIONS IN STATIONARY PARAMETERIZATION + +In terms of $\mathbf{Q}$ , $\mathbf{D}$ , the transitions $q(\mathbf{y}_s|\mathbf{y}_0)$ for time $s$ are normal with mean $\mathbf{m}_{s|0}$ and $\pmb{\Sigma}_{s|0}$ equal to: + +$$ +\mathbf {m} _ {s \mid 0} = \exp \left(- \left[ \mathbf {Q} + \mathbf {D} \right] _ {s}\right) \mathbf {y} _ {0}, \quad \binom {\mathbf {C} _ {s}} {\mathbf {H} _ {s}} = \exp \left[ \binom {- [ \mathbf {Q} + \mathbf {D} ] _ {s}} {\mathbf {0}} \begin{array}{c c} {[ 2 \mathbf {D} ] _ {s}} \\ {[ (\mathbf {Q} + \mathbf {D}) ^ {\top} ] _ {s}} \end{array} \right] \binom {\mathbf {0}} {\mathbf {I}} \tag {41} +$$ + +where $\mathbf{\Sigma}_{s|0} = \mathbf{C}_s(\mathbf{H}_s)^{-1}$ . For the time invariant case, this simplifies to + +$$ +\mathbf {m} _ {s \mid 0} = \exp [ - s (\mathbf {Q} + \mathbf {D}) ] \mathbf {y} _ {0}, \quad \binom {\mathbf {C} _ {s}} {\mathbf {H} _ {s}} = \exp \left[ s \binom {- (\mathbf {Q} + \mathbf {D})} {\mathbf {0}} \quad \binom {2 \mathbf {D}} {(\mathbf {Q} + \mathbf {D}) ^ {\top}} \right] \binom {\mathbf {0}} {\mathbf {I}} \tag {42} +$$ + +# E GENERIC CHANGE OF MEASURE AND JENSEN'S FOR APPROXIMATE MARGINALIZATION + +Suppose $\mathbf{u} = [\mathbf{z},\mathbf{v}]$ and we have an expression for $p(\mathbf{u} = [z,v]) = p(\mathbf{z} = z,\mathbf{v} = v)$ . By marginalization, we can get $p(\mathbf{z} = z)$ , and we can introduce another distribution $q$ to pick a sampling distribution of our choice: + +$$ +\begin{array}{l} p (\mathbf {z} = z) = \int_ {v} p (\mathbf {z} = z, \mathbf {v} = v) d v \\ = \int_ {v} p (\mathbf {z} = z | \mathbf {v} = v) p (\mathbf {v} = v) d v \\ = \int_ {v} \frac {q (\mathbf {v} = v | \mathbf {z} = z)}{q (\mathbf {v} = v | \mathbf {z} = z)} p (\mathbf {z} = z | \mathbf {v} = v) p (\mathbf {v} = v) d v \tag {43} \\ = \mathbb {E} _ {q (\mathbf {v} = v | \mathbf {z} = z)} \left[ \frac {p (\mathbf {z} = z , \mathbf {v} = v)}{q (\mathbf {v} = v | \mathbf {z} = z)} \right] \\ \end{array} +$$ + +We often work with these expressions in log space, and need to pull the expectation outside to use Monte Carlo. Jensen's bound allows this: + +$$ +\begin{array}{l} \log p (\mathbf {z} = z) = \log \mathbb {E} _ {q (\mathbf {v} = v | \mathbf {z} = z)} \left[ \frac {p (\mathbf {z} = z , \mathbf {v} = v)}{q (\mathbf {v} = v | \mathbf {z} = z)} \right] \\ \geq \mathbb {E} _ {q (\mathbf {v} = v | \mathbf {z} = z)} \left[ \log \frac {p (\mathbf {z} = z , \mathbf {v} = v)}{q (\mathbf {v} = v | \mathbf {z} = z)} \right] \\ \end{array} +$$ + +The following shows that the bound is tight when $q(\mathbf{v} = v|\mathbf{z} = z) = p(\mathbf{v} = v|\mathbf{z} = z)$ : + +$$ +\begin{array}{l} \mathbb {E} _ {q (\mathbf {v} = v | \mathbf {z} = z)} \left[ \log \frac {p (\mathbf {z} = z , \mathbf {v} = v)}{q (\mathbf {v} = v | \mathbf {z} = z)} \right] = _ {\text {a s s u m e}} \mathbb {E} _ {p (\mathbf {v} = v | \mathbf {z} = z)} \left[ \log \frac {p (\mathbf {z} = z , \mathbf {v} = v)}{p (\mathbf {v} = v | \mathbf {z} = z)} \right] \\ = \mathbb {E} _ {p (\mathbf {v} = v | \mathbf {z} = z)} \left[ \log \left(\frac {p (\mathbf {z} = z , \mathbf {v} = v)}{p (\mathbf {v} = v , \mathbf {z} = z)} \cdot p (\mathbf {z} = z)\right) \right] \tag {44} \\ = \mathbb {E} _ {p (\mathbf {v} = v | \mathbf {z} = z)} \left[ \log p (\mathbf {z} = z) \right] \\ = \log p (\mathbf {z} = z) \\ \end{array} +$$ + +# F ELBO FOR MDMS + +$$ +\begin{array}{l} \log p _ {\theta} (x) = \log \int_ {v _ {0}} p _ {\theta} \left(x _ {0}, v _ {0}\right) d v _ {0} (45) \\ = \log \int_ {v _ {0}} p _ {\theta} \left(u _ {0} = [ x, v _ {0} ]\right) (46) \\ = \log \int_ {v _ {0}} \frac {q \left(v _ {0} \mid x\right)}{q \left(v _ {0} \mid x\right)} p _ {\theta} \left(u _ {0} = [ x, v _ {0} ]\right) (47) \\ = \log \mathbb {E} _ {q \left(v _ {0} \mid x\right)} \left[ \frac {p _ {\theta} \left(u _ {0} = [ x , v _ {0} ]\right)}{q \left(v _ {0} \mid x\right)} \right] (48) \\ \geq \mathbb {E} _ {q \left(v _ {0} \mid x\right)} \left[ \log p _ {\theta} \left(u _ {0} = [ x, v _ {0} ]\right) - \log q \left(v _ {0} \mid x\right) \right] (49) \\ \geq \mathbb {E} _ {q (y | x)} \left[ \log \pi_ {\theta} (y _ {T}) + \int_ {0} ^ {T} - \| s _ {\theta} \| _ {g ^ {2}} ^ {2} - \nabla \cdot \left(g ^ {2} s _ {\theta} - f\right) d s - \log q \left(y _ {0} ^ {v} | x\right) \right] (50) \\ \end{array} +$$ + +The first inequality holds due to Jensen's inequality and the second due to an application of Theorem 1 from Huang et al. (2021) or Theorem 3 from Song et al. (2021) applied to the joint variable $\mathbf{u}_0$ . + +# F.1 ISM TO DSM + +# F.1.1 LEMMA: EXPECTATION BY PARTS + +We will need a form of multivariate integration by parts which gives us for some $f$ and some $q(x)$ , $E_{q(x)}[\nabla_x \cdot f(x)] = -E_{q(x)}[f(x)^\top \nabla_x \log q(x)]$ + +$$ +\begin{array}{l} E _ {q (x)} [ \nabla_ {x} \cdot f _ {i} (x) ] = \int q (x) \sum_ {i = 1} ^ {d} [ \nabla_ {x _ {i}} f _ {i} (x) ] d x \\ = \int \sum_ {i = 1} ^ {d} q (x) \nabla_ {x _ {i}} f _ {i} (x) d x \\ = \sum_ {i = 1} ^ {d} \int_ {x _ {- i}} \int_ {x _ {i}} q (x) \nabla_ {x _ {i}} f _ {i} (x) d x _ {i} d x _ {- i} \\ = \sum_ {i = 1} ^ {d} \int \left[ \left[ q (x) \int \nabla_ {x _ {i}} f _ {i} (x) d x _ {i} \right] _ {- \infty} ^ {\infty} - \int \nabla_ {x _ {i}} q (x) \int \nabla_ {x _ {i}} f _ {i} (x) d x _ {i} \right] d x _ {- i} \\ = \sum_ {i = 1} ^ {d} \int \left[ - \int \nabla_ {x _ {i}} q (x) f _ {i} (x) d x _ {i} \right] d x _ {- i} \\ = \sum_ {i = 1} ^ {d} \int \left[ - \int q (x) \nabla_ {x _ {i}} \log q (x) f _ {i} (x) d x _ {i} \right] d x _ {- i} \\ = \sum_ {i = 1} ^ {d} - \int \int q (x) \nabla_ {x _ {i}} \log q (x) f _ {i} (x) d x _ {i} d x _ {- i} \\ = \sum_ {i = 1} ^ {d} - E _ {q (x)} \left[ \nabla_ {x _ {i}} \log q (x) f _ {i} (x) \right] \\ = - E _ {q (x)} [ f (x) ^ {\top} \nabla_ {x} \log q (x) ] \\ \end{array} +$$ + +This equality also follows directly from the Stein operator using the generator method to the Langevin diffusion (Barbour, 1988). + +# F.1.2 DSM ELBO + +Using the "expectation by parts", we have: + +$$ +\mathbb {E} _ {q (u _ {t} | x)} [ \nabla_ {u _ {t}} \cdot g ^ {2} (t) s _ {\theta} (u _ {t}, t) ] = - \mathbb {E} _ {q (u _ {t} | x)} [ (g ^ {2} (t) s _ {\theta} (u _ {t}, t)) ^ {\top} \nabla_ {u _ {t}} \log q (u _ {t} | x) ] +$$ + +Also we have, for $s_{\theta}$ evaluated at $(u_t, t)$ , by completing the square, + +$$ +- \frac {1}{2} | | s _ {\theta} | | _ {g ^ {2} (t)} + s _ {\theta} ^ {\top} g ^ {2} (t) \nabla \log q (u _ {t} | x) = - \frac {1}{2} | | s _ {\theta} - \nabla \log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} +. 5 | | \nabla \log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} +$$ + +The two together give us: + +$$ +\begin{array}{l} \log p (x) \geq \mathbb {E} _ {q (u _ {T} | x)} \left[ \log \pi \right] + \int_ {0} ^ {T} \left[ \mathbb {E} _ {q (u _ {t} | x)} \Big [ - \nabla \cdot g ^ {2} s _ {\theta} - . 5 | | s _ {\theta} | | _ {g ^ {2} (t)} ^ {2} + \nabla \cdot f \Big ] d t \right] \\ = \mathbb {E} _ {q (u _ {T} | x)} \left[ \log \pi \right] + \int_ {0} ^ {T} \left[ \mathbb {E} _ {q (u _ {t} | x)} \left[ \left(g ^ {2} s _ {\theta}\right) ^ {\top} \nabla_ {u _ {t}} \log q (u _ {t} | x) - . 5 | | s _ {\theta} | | _ {g ^ {2} (t)} ^ {2} + \nabla \cdot f \right] d t \right] \\ = \mathbb {E} _ {q (u _ {T} | x)} \left[ \log \pi \right] + \int_ {0} ^ {T} \left[ \mathbb {E} _ {q (u _ {t} | x)} \left[ - \frac {1}{2} | | s _ {\theta} - \nabla \log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} \right. \right. \\ \left. + . 5 \left| | \nabla \log q \left(u _ {t} | x\right) \right| _ {g ^ {2} (t)} ^ {2} + \nabla_ {u _ {t}} \cdot f \right] \Biggr ] d t \tag {51} \\ \end{array} +$$ + +# F.2 NOISE PREDICTION + +We have that for normal $\mathcal{N}(\mathbf{y}_s;\mathbf{m}_{s|0},\pmb{\Sigma}_{s|0})$ , we can sample $\mathbf{y}_s$ with normal noise $\epsilon \sim \mathcal{N}(0,I)$ and $\mathbf{y}_s = \mathbf{m}_{s|0} + \mathbf{L}\epsilon$ where $\mathbf{L}$ is the cholesky decomposition of $\pmb{\Sigma}_{s|0}$ . Then, the score is + +$$ +\begin{array}{l} \nabla_ {\mathbf {y} _ {s}} \log q (\mathbf {y} _ {s} | \mathbf {y} _ {0}) \Bigg | _ {\mathbf {y} _ {s} = \mathbf {m} _ {s | 0} + \mathbf {L} \epsilon} \\ = - \boldsymbol {\Sigma} _ {s | 0} ^ {- 1} \left(\mathbf {y} _ {s} - \mathbf {m} _ {s | 0}\right) \\ = - \boldsymbol {\Sigma} _ {s | 0} ^ {- 1} \left(\left[ \mathbf {m} _ {s | 0} + \mathbf {L} \epsilon \right] - \mathbf {m} _ {s | 0}\right) \\ = - \boldsymbol {\Sigma} _ {s | 0} ^ {- 1} \left(\mathbf {L} \epsilon\right) \\ = - \left(\mathbf {L} \mathbf {L} ^ {\top}\right) ^ {- 1} \left(\mathbf {L} \epsilon\right) \\ = - \left(\mathbf {L} ^ {\top}\right) ^ {- 1} \mathbf {L} ^ {- 1} \mathbf {L} \epsilon \\ = - \left(\mathbf {L} ^ {\top}\right) ^ {- 1} \epsilon = - \left(\mathbf {L} ^ {- 1}\right) ^ {\top} \epsilon = - \mathbf {L} ^ {\top , - 1} \epsilon \\ \end{array} +$$ + +Parameterize $s_{\theta}(\mathbf{y}_s,s)$ as $s_{\theta}(\mathbf{y}_s,s) = -\mathbf{L}^{\top, -1}\epsilon_{\theta}(\mathbf{y},s)$ . This gives + +$$ +\begin{array}{l} \frac {1}{2} \| - \mathbf {L} ^ {\top , - 1} \epsilon_ {\theta} (\mathbf {y}, s) \quad - \quad - \mathbf {L} ^ {\top , - 1} \epsilon \| _ {g _ {\phi} ^ {2} (s)} ^ {2} \\ = \frac {1}{2} \| \mathbf {L} ^ {\top , - 1} \epsilon \quad - \quad \mathbf {L} ^ {\top , - 1} \epsilon_ {\theta} (\mathbf {y}, s) \| _ {g _ {\phi} ^ {2} (s)} ^ {2} \\ = \frac {1}{2} \left(\mathbf {L} ^ {\top , - 1} \epsilon - \mathbf {L} ^ {\top , - 1} \epsilon_ {\theta} (\mathbf {y}, s)\right) ^ {\top} g _ {\phi} ^ {2} (s) \left(\mathbf {L} ^ {\top , - 1} \epsilon - \mathbf {L} ^ {\top , - 1} \epsilon_ {\theta} (\mathbf {y}, s)\right) \\ = \frac {1}{2} \left(\mathbf {L} ^ {\top , - 1} \left[ \epsilon - \epsilon_ {\theta} (\mathbf {y}, s) \right]\right) ^ {\top} g _ {\phi} ^ {2} (s) \left(\mathbf {L} ^ {\top , - 1} \left[ \epsilon - \epsilon_ {\theta} (\mathbf {y}, s) \right]\right) \\ \end{array} +$$ + +We can also use this insight to analytically compute the quadratic score term (following is computed per data-dimension, so must be multiplied by $D$ when computing the ELBO): + +$$ +\begin{array}{l} \mathbb {E} _ {\mathbf {y} _ {0}} \mathbb {E} _ {\mathbf {y} _ {s} | \mathbf {y} _ {0}} \left[ \frac {1}{2} \| \nabla_ {\mathbf {y} _ {s}} \log q _ {\phi} (\mathbf {y} _ {s} | \mathbf {y} _ {0}) \| _ {g _ {\phi} ^ {2} (s)} ^ {2} \right] = \mathbb {E} _ {\mathbf {y} _ {0}} \mathbb {E} _ {\mathbf {y} _ {s} | \mathbf {y} _ {0}} \left[ \left(\nabla_ {\mathbf {y} _ {s}} \log q _ {\phi} (\mathbf {y} _ {s} | \mathbf {y} _ {0})\right) ^ {\top} g _ {\phi} ^ {2} (s) \left(\nabla_ {\mathbf {y} _ {s}} \log q _ {\phi} (\mathbf {y} _ {s} | \mathbf {y} _ {0})\right) \right] \\ = \mathbb {E} _ {\mathbf {y} _ {0}} \mathbb {E} _ {\mathbf {y} _ {s} | \mathbf {y} _ {0}} \left[ \left(- \mathbf {L} ^ {\top , - 1} \epsilon\right) ^ {\top} g _ {\phi} ^ {2} (s) \left(- \mathbf {L} ^ {\top , - 1} \epsilon\right) \right] \\ = \mathbb {E} _ {\mathbf {y} _ {0}} \mathbb {E} _ {\mathbf {y} _ {s} | \mathbf {y} _ {0}} \left[ \epsilon^ {\top} (- \mathbf {L} ^ {- 1}) g _ {\phi} ^ {2} (s) (- \mathbf {L} ^ {\top , - 1}) \epsilon \right] \\ = \mathbb {E} _ {\mathbf {y} _ {0}} \mathbb {E} _ {\mathbf {y} _ {s} | \mathbf {y} _ {0}} \left[ \epsilon^ {\top} \left(\mathbf {L} ^ {- 1} g _ {\phi} ^ {2} (s) \mathbf {L} ^ {\top , - 1}\right) \epsilon \right] \\ = \mathbb {E} _ {\mathbf {y} _ {0}} \mathbb {E} _ {\epsilon} \left[ \epsilon^ {\top} \left(\mathbf {L} ^ {- 1} g _ {\phi} ^ {2} (s) \mathbf {L} ^ {\top , - 1}\right) \epsilon \right] \\ = \mathbb {E} _ {\epsilon} \left[ \epsilon^ {\top} \left(\mathbf {L} ^ {- 1} g _ {\phi} ^ {2} (s) \mathbf {L} ^ {\top , - 1}\right) \epsilon \right] \\ = \operatorname {T r a c e} \left(\mathbf {L} ^ {- 1} g _ {\phi} ^ {2} (s) \mathbf {L} ^ {\top , - 1}\right) \\ \end{array} +$$ + +# G ELBOS IN STATIONARY PARAMETERIZATION + +We use the stationary parameterization described in appendix C. We now specialize the ELBO to the linear stationary parameterization. + +Recall $f_{\phi}(\mathbf{y},s) = -[\mathbf{Q}_{\phi}(s) + \mathbf{D}_{\phi}(s)]\mathbf{y}$ . Recall $g_{\phi}(s) = \sqrt{2\mathbf{D}_{\phi}(s)}$ We have $g_{\phi}^{2}(s) = 2\mathbf{D}_{\phi}(s)$ . We can write the MDM ISM ELBO as + +$$ +\mathcal {L} ^ {\text {m i s m}} = \mathbb {E} _ {v \sim q _ {\gamma}} \left[ \mathbb {E} _ {s \sim \operatorname {U n i f} (0, T)} \left[ \ell_ {s} ^ {(i s m)} \right] + \ell_ {T} + \ell_ {q} \right] \tag {52} +$$ + +where + +$$ +\ell_ {s _ {\theta}} = - \frac {1}{2} \| s _ {\theta} (\mathbf {y} _ {s}, s) \| _ {\underbrace {2 \mathbf {D} _ {\phi} (s)} _ {g _ {\phi} ^ {2}}} ^ {2} +$$ + +$$ +\ell_ {\mathrm {d i v - f g s}} = \nabla_ {\mathbf {y} _ {s}} \cdot \left[ \underbrace {- [ \mathbf {Q} _ {\phi} (s) + \mathbf {D} _ {\phi} (s) ] \mathbf {y} _ {s}} _ {f _ {\phi}} - \underbrace {2 \mathbf {D} _ {\phi} (s)} _ {g _ {\phi} ^ {2}} s _ {\theta} (\mathbf {y} _ {s}, s) \right] +$$ + +$$ +\ell_ {s} ^ {\text {i s m}} = \mathbb {E} _ {\substack {q _ {\phi , s, (x, v)} \\ \text {depends on } Q, D}} \left[ \ell_ {s \theta} + \ell_ {\text {div - f g s}} \right] \tag{53} +$$ + +$$ +\ell_ {T} = \mathbb {E} _ {\substack {q _ {\phi , T}, (x, v) \\ \text{depends on} \mathbf {Q}, \mathbf {D}}} \Big [ \log \pi_ {\theta} (\mathbf {y} _ {T}) \Big ] +$$ + +$$ +\ell_ {q} = - \log q _ {\gamma} (v | x) +$$ + +For the DSM form, + +$$ +\mathcal {L} ^ {\mathrm {m d s m}} = \mathbb {E} _ {v \sim q _ {\gamma}} \left[ \mathbb {E} _ {s \sim \operatorname {U n i f} (0, T)} \left[ \ell_ {s} ^ {(d s m)} \right] + \ell_ {T} + \ell_ {q} \right] \tag {54} +$$ + +where + +$$ +\begin{array}{l} \ell_ {\mathrm {d i v - f}} = \nabla_ {\mathbf {y} _ {s}} \cdot \underbrace {- [ \mathbf {Q} _ {\phi} (s) + \mathbf {D} _ {\phi} (s) ] \mathbf {y} _ {s}} _ {f _ {\phi}} \\ \ell_{\text{fwd - score}} = \frac{1}{2}\bigg|\bigg|\underbrace{\nabla_{\mathbf{y}_{s}}\log q_{\phi}(\mathbf{y}_{s}|\mathbf{y}_{0})}_{\text{depends on}\mathbf{Q},\mathbf{D}}\| \underbrace{^ {2}\mathbf{D}_{\phi}(s)}_{g_{\phi}^{2}} \\ \ell_ {\text {n e g - s c o r e d i f f}} = - \frac {1}{2} \| s _ {\theta} (\mathbf {y} _ {s}, s) - \underbrace {\nabla_ {\mathbf {y} _ {s}} \log q _ {\phi} (\mathbf {y} _ {s} | \mathbf {y} _ {0})} _ {\text {d e p e n d s o n Q , D}} \| _ {\underbrace {2 \mathbf {D} _ {\phi} (s)} _ {g _ {\phi} ^ {2}}} ^ {2} \\ \ell_ {s} ^ {(d s m)} = \mathbb {E} _ {\substack {q _ {\phi , s, (x, v)} \\ \text{depends on} \mathbf {Q}, \mathbf {D}}} \left[ \ell_ {\text{neg - score diff}} + \ell_ {\text{fwd - score}} + \ell_ {\text{div - f}} \right] \\ \end{array} +$$ + +# H ALGORITHMS + +# H.1 GENERIC TRANSITION KERNEL + +Algorithm 2 Get transition distribution $\mathbf{y}_s|x$ +Input: data $x$ time $s$ A, $g$ +compute: $\mathbf{A}(s)$ and $g(s)$ +compute: $\mathbf{M}_s = \int_0^s\mathbf{A}(t)dt$ (integrated drift) +compute: $\mathbf{N}_s = \int_0^s g^2 (t)dt$ (integrated diffusions squared) +compute: $\gamma_{s|0} = \exp \left(\mathbf{M}_s\right)$ (mean coefficient) +set: $\mathbf{y}_0 = [x,0_1,\dots ,0_{K - 1}],\pmb{\Sigma}_{0,zz} = \mathbf{0}$ , and $\pmb{\Sigma}_{0,zv},\pmb{\Sigma}_{0,vv}$ to chosen initial distribution +compute: $\mathbf{m}_{s|0} = \gamma_{s|0}\mathbf{y}_0$ (mean) +compute: + +$$ +\binom {\mathbf {C} _ {s}} {\mathbf {H} _ {s}} = \exp \left[ \binom {\mathbf {M} _ {s}} {\mathbf {0}} \binom {\mathbf {N} _ {s}} {- \mathbf {M} _ {s} ^ {\top}} \right] \binom {\boldsymbol {\Sigma} _ {0}} {\mathbf {I}} \quad (\text {i n g r e d i e n t s f o r c o v .}) \tag {55} +$$ + +compute: $\pmb{\Sigma}_{s|0} = \mathbf{C}_s(\mathbf{H}_s)^{-1}$ (cov.) + +Output: $\mathcal{N}(\mathbf{m}_{s|0},\pmb{\Sigma}_{s|0})$ + +# H.2 TRANSITIONS WITH $Q, D$ + +Current param matrices $\tilde{\mathbf{Q}}_{\phi},\tilde{\mathbf{D}}_{\phi}$ and along with fixed time-in scalar-out functions $b_{q}(s),b_{d}(s)$ and their known integrals $B_{q}(s),B_{d}(s)$ . $q_{\gamma}(v_0|z_0 = x)$ taken to be parameterless so that $v_{0}\sim \mathcal{N}(0,I)$ . Model params are $s_\theta$ fixed $\pi_{\theta}$ . + +Algorithm 3 Get Q, D and their integrated terms M, N +Input: time $s$ and current params $\phi$ +compute: $[b_q]_s = \int_0^s b_q(\nu)d\nu$ using known integral $B_{q}(s) - B_{q}(0)$ +compute: $[b_d]_s = \int_0^s b_d(\nu)d\nu$ using known integral $B_{d}(s) - B_{d}(0)$ . +compute: $[\mathbf{Q}_{\phi}]_{s} = [b_{q}]_{s}\cdot [\tilde{\mathbf{Q}}_{\phi} - \tilde{\mathbf{Q}}_{\phi}^{\top}]$ for current params $\tilde{\mathbf{Q}}_{\phi}$ . +compute: $[\mathbf{D}_{\phi}]_{s} = [b_{d}]_{s}\cdot [\tilde{\mathbf{D}}_{\phi}\tilde{\mathbf{D}}_{\phi}^{\top}]$ for current params $\tilde{\mathbf{D}}_{\phi}$ . +compute: $\mathbf{M}_s = -((\mathbf{Q}_{\phi}]_s + [\mathbf{D}_{\phi}]_s)$ (M just a variable name) +compute: $\mathbf{N}_s = [2\mathbf{D}_{\phi}]_s = 2\cdot [\mathbf{D}_{\phi}]_s$ (N just a variable name) +compute: $\mathbf{Q}_s = b_q(s)\cdot [\tilde{\mathbf{Q}}_{\phi} - \tilde{\mathbf{Q}}_{\phi}^{\top}]$ (not integrated) +compute: $\mathbf{D}_s = b_d(s)\cdot [\tilde{\mathbf{D}}_{\phi}\tilde{\mathbf{D}}_{\phi}^{\top}]$ (not integrated) +compute: $A_s = -[\mathbf{Q}_s + \mathbf{D}_s]$ (drift coef.) +compute: $g_s^2 = 2\mathbf{D}_s$ (diffusion coef. squared) +Output: $\mathbf{A}_s, g_s^2, \mathbf{M}_s, \mathbf{N}_s$ + +# H.3 ELBO ALGORITHMS + +Input: Sample $\mathbf{y}_0 = (x, v)$ and time $s$ . Current params $\phi$ + +set: $\mathbf{A}_s,g_s^2,\mathbf{M}_s,\mathbf{N}_s\gets$ algorithm 3 + +compute: $\mathbf{m}_{s|0} = \exp \left(\mathbf{M}_s\right)\mathbf{y}_0$ (transition mean) + +compute: ingredients for transition cov. matrix: + +$$ +\binom {\mathbf {C} _ {s}} {\mathbf {H} _ {s}} = \exp \left[ \binom {\mathbf {M} _ {s}} {\mathbf {0}} \binom {\mathbf {N} _ {s}} {- \mathbf {M} _ {s} ^ {\top}} \right] \binom {\mathbf {0}} {\mathbf {I}} \tag {56} +$$ + +compute: $\Sigma_{s|0} = \mathbf{C}_s(\mathbf{H}_s)^{-1}$ (transition cov). + +instantiate: $q_{\phi ,s,(x,v)} = q_{\phi}(\mathbf{y}_s|\mathbf{y}_0) = \mathcal{N}(\mathbf{m}_{s|0},\pmb {\Sigma}_{s|0})$ + +Output: $q_{\phi ,s,(x,v)},A_s,g_s^2$ + +Algorithm 4 Get transition distributions +Algorithm 5 Compute ELBO with ism or dsm +```txt +input: Data point $x$ and current params $\theta, \phi, \gamma$ +draw: an aux. sample $v \sim q_{\gamma}(v|x)$ +draw: a sample $s \sim \mathrm{Unif}(0,T)$ +set: $\mathbf{y}_0 = (x,v)$ +set: $q_{\phi,s,\mathbf{y}_0}, A_s, g_s^2 \gets \text{algorithm 4 called on } \mathbf{y}_0, s, \phi$ +draw: $\mathbf{y}_s \sim q_{\phi,s,\mathbf{y}_0}$ +compute: $\ell_s$ with $\mathrm{dsm}(s)$ (algorithm 6) or $\mathrm{ism}(s)$ (algorithm 7) on $\mathbf{y}_s, \theta, A_s, g_s^2, q_{\phi,s,\mathbf{y}_0}$ +set: $q_{\phi,T,\mathbf{y}_0}, --, -- \gets \text{algorithm 4 called on } \mathbf{y}_0, T, \phi$ +draw: $\mathbf{y}_T \sim q_{\phi,T,\mathbf{y}_0}$ +output: $\ell_s + \log \pi_\theta(\mathbf{y}_T) - \log q_\gamma(v)$ +``` + +Algorithm 6 Compute $\mathrm{dsm}(s)$ +```txt +input: $\mathbf{y}_s, \theta, A_s, g_s^2, q_{\phi,s,\mathbf{y}_0}$ . +compute: fwd-score = $\nabla_{\mathbf{y}_s}$ log $q_{\phi}(\mathbf{y}_s|\mathbf{y}_0)$ +compute: model-score = $s_\theta(\mathbf{y}_s, s)$ +compute: fwd-score-term = $\frac{1}{2}(\mathrm{fwd-score})^\top g_s^2$ (fwd-score) +compute: score-diff = model-score - fwd-score +compute: diff-term = $-\frac{1}{2}$ score-diff $\nabla_{\mathbf{y}_s}$ score-diff +compute: div-f = $\nabla_{\mathbf{y}_s} \cdot A_s\mathbf{y}_s$ +output: $\mathrm{dsm}(s) = \mathrm{fwd-score-term} + \mathrm{diff-term} + \mathrm{div-f}$ +``` + +Algorithm 7 Compute $\operatorname{ism}(s)$ +```txt +input: $\mathbf{y}_s,\theta ,A_s,g_s^2,q_{\phi ,s,\mathbf{y}_0}$ +compute: model-score $= s_{\theta}(\mathbf{y}_{s},s)$ +compute: score-term $= -\frac{1}{2}$ model-score $\top g_s^2$ model-score +compute: div-gs $= \nabla_{\mathbf{y}_s}\cdot g_s^2 s_\theta (\mathbf{y}_s,s)$ +compute: div-f $= \nabla_{\mathbf{y}_s}\cdot A_s\mathbf{y}_s$ +compute: div-term $= -$ div-gs $^+$ div-f +output: ism(s) $=$ score-term $^+$ div-term +``` + +# I VALID ELBO WITH TRUNCATION + +The integrand in the ELBO and its gradients is not bounded at time 0. Therefore, following Sohl-Dickstein et al. (2015) and Song et al. (2021) the integrand in eq. (7) is integrated from $[\epsilon, T]$ , rather than $[0, T]$ . However, that integral is not a valid lower bound on $\log p_{\theta}(x)$ . Instead, it can be viewed as a proper lower bound on the prior for a latent variable $\mathbf{y}_{\epsilon}$ . Therefore, to provide a bound for the data, one can introduce a likelihood and substitute the prior lower bound into a standard variational bound that integrates out the latent. + +To provide a valid lower bound for multivariate diffusions, we extend theorem 6 in Song et al. (2021) from univariate to multivariate diffusions. + +Theorem 3. For transition kernel $q_{\phi}(\mathbf{y}_s \mid \mathbf{y}_0)$ , we can compute upper bound the model likelihood at time 0 as follows, for any $\epsilon > 0$ + +$$ +\log p _ {\theta} (x) \geq \mathbb {E} _ {q _ {\phi} \left(\mathbf {y} _ {0} ^ {v} \mid x\right)} \mathbb {E} _ {q _ {\phi} \left(\mathbf {y} _ {\epsilon} \mid \mathbf {y} _ {0}\right)} \left[ \log \frac {p _ {\theta} \left(\mathbf {y} _ {0} \mid \mathbf {y} _ {\epsilon}\right)}{q _ {\phi} \left(\mathbf {y} _ {\epsilon} \mid \mathbf {y} _ {0}\right)} + \mathcal {L} _ {m d m} (\mathbf {y} _ {\epsilon}, \epsilon) - \log q _ {\phi} \left(\mathbf {y} _ {0} ^ {v} \mid x\right) \right], \tag {57} +$$ + +where $\mathcal{L}_{mdm}(\mathbf{y}_{\epsilon},\epsilon)$ is defined as + +$$ +\mathcal {L} _ {m d m} (\mathbf {y} _ {\epsilon}, \epsilon) = \mathbb {E} _ {q _ {\phi} (\mathbf {y} _ {> \epsilon} | \mathbf {y} _ {\epsilon})} \left[ \log \pi_ {\theta} (\mathbf {y} _ {T}) - \int_ {\epsilon} ^ {T} \frac {1}{2} \| s _ {\phi} \| _ {g _ {\phi}} ^ {2} - \frac {1}{2} \| s _ {\theta} - s _ {\phi} \| _ {g _ {\phi}} ^ {2} + \nabla \cdot f _ {\phi} \right]. +$$ + +Proof. For transition kernel $q_{\phi}(\mathbf{y}_s \mid \mathbf{y}_0)$ , we can compute upper bound the model likelihood at time 0 following an application of the variational bound + +$$ +\begin{array}{l} \log p _ {\theta} (x) = \log \int_ {v _ {0}} p _ {\theta} (\mathbf {y} _ {0} = [ x, v _ {0} ]) d v _ {0} \\ = \log \int_ {v _ {0}, \mathbf {y} _ {\epsilon}} p _ {\theta} (\mathbf {y} _ {0}, \mathbf {y} _ {\epsilon}) d v _ {0} d \mathbf {y} _ {\epsilon} \\ = \log \int_ {v _ {0}, \mathbf {y} _ {\epsilon}} q _ {\phi} (\mathbf {y} _ {\epsilon} \mid \mathbf {y} _ {0}) \frac {q (v _ {0} \mid x)}{q (v _ {0} \mid x)} \frac {p _ {\theta} (\mathbf {y} _ {0} , \mathbf {y} _ {\epsilon})}{q _ {\phi} (\mathbf {y} _ {\epsilon} \mid \mathbf {y} _ {0})} d v _ {0} d \mathbf {y} _ {\epsilon} \\ = \log \int_ {v _ {0}, \mathbf {y} _ {\epsilon}} q _ {\phi} (\mathbf {y} _ {\epsilon} \mid \mathbf {y} _ {0}) \frac {q (v _ {0} \mid x)}{q (v _ {0} \mid x)} \frac {p _ {\theta} (\mathbf {y} _ {0} \mid \mathbf {y} _ {\epsilon}) p _ {\theta} (\mathbf {y} _ {\epsilon})}{q _ {\phi} (\mathbf {y} _ {\epsilon} \mid \mathbf {y} _ {0})} d v _ {0} d \mathbf {y} _ {\epsilon} \\ \geq \mathbb {E} _ {q (v _ {0} | x) q _ {\phi} (\mathbf {y} _ {\epsilon} | \mathbf {y} _ {0})} \left[ \log \frac {p _ {\theta} (\mathbf {y} _ {0} \mid \mathbf {y} _ {\epsilon})}{q _ {\phi} (\mathbf {y} _ {\epsilon} \mid \mathbf {y} _ {0})} - \log q _ {\phi} (\mathbf {y} _ {0} ^ {v} \mid x) + \log p _ {\theta} (\mathbf {y} _ {\epsilon}) \right] \\ \end{array} +$$ + +A lower bound for $\log p_{\theta}(\mathbf{y}_{\epsilon})$ can be derived in a similar manner to eq. (7), such that + +$$ +\log p _ {\theta} (\mathbf {y} _ {\epsilon}) \geq \mathcal {L} _ {\mathrm {m d m}} (\mathbf {y} _ {\epsilon}, \epsilon) = \mathbb {E} _ {q _ {\phi} (\mathbf {y} _ {> \epsilon} | \mathbf {y} _ {\epsilon})} \left[ \log \pi_ {\theta} (\mathbf {y} _ {T}) - \int_ {\epsilon} ^ {T} \frac {1}{2} \| s _ {\phi} \| _ {g _ {\phi}} ^ {2} - \frac {1}{2} \| s _ {\theta} - s _ {\phi} \| _ {g _ {\phi}} ^ {2} + \nabla \cdot f _ {\phi} \right]. +$$ + +The choice of $p_{\theta}(\mathbf{y}_0 \mid \mathbf{y}_{\epsilon})$ is arbitrary, however following Sohl-Dickstein et al. (2015); Song et al. (2021) we let $p_{\theta}(\mathbf{y}_0 \mid \mathbf{y}_{\epsilon})$ be Gaussian with mean $\mu_{p_{\theta},\epsilon}$ and covariance $\Sigma_{p_{\theta},\epsilon}$ . Suppose $q_{\phi}(\mathbf{y}_{\epsilon} \mid \mathbf{y}_0) = \mathcal{N}(\mathbf{y}_{\epsilon} \mid \mathbf{A}\mathbf{y}_0, \Sigma)$ , then we select the following mean $\mu_{p_{\theta},\epsilon}$ and covariance $\Sigma_{p_{\theta},\epsilon}$ for $p_{\theta}(\mathbf{y}_0 \mid \mathbf{y}_{\epsilon})$ + +$$ +\mu_ {p _ {\theta}, \epsilon} = \mathbf {A} ^ {- 1} \Sigma s _ {\theta} (\mathbf {y} _ {\epsilon}, \epsilon) + \mathbf {A} ^ {- 1} \mathbf {y} _ {\epsilon} +$$ + +$$ +\Sigma_ {p _ {\theta}, \epsilon} = \mathbf {A} ^ {- 1} \Sigma \mathbf {A} ^ {- \top} +$$ + +where $\mu_{p_{\theta},\epsilon},\Sigma_{p_{\theta},\epsilon}$ are derived using Tweedie's formula (Efron, 2011) by setting $\mu_{\epsilon} = \mathbb{E}[\mathbf{y}_0\mid \mathbf{y}_{\epsilon}]$ and $\Sigma_{\epsilon} = \mathrm{Var}(\mathbf{y}_0\mid \mathbf{y}_{\epsilon})$ + +We next derive this choice as an approximation of the optimal Gaussian likelihood. + +# I.1 LIKELIHOOD DERIVATION + +Suppose $\mathbf{y}_0\sim q_0(\mathbf{y}_0)$ and $\mathbf{y}_{\epsilon}\sim \mathcal{N}(\mathbf{y}_{\epsilon}\mid A\mathbf{y}_0,\Sigma)$ . Here, $A,\Sigma$ are the mean coefficient and covariance derived from the transition kernel at time $\epsilon$ . We use Tweedie's formula to get the mean and covariance of $\mathbf{y}_0$ given $\mathbf{y}_{\epsilon}$ under $q$ . This mean and covariance feature the true score $\nabla_{\mathbf{y}_{\epsilon}}\log q(\mathbf{y}_{\epsilon})$ . We replace the score with the score model $s_\theta$ and then set $p_{\theta}(\mathbf{y}_0|\mathbf{y}_{\epsilon})$ to have the resulting approximate mean and covariance. We make this choice because the optimal $p_{\theta}(\mathbf{y}_0|\mathbf{y}_{\epsilon})$ equals the true $q(\mathbf{y}_0|\mathbf{y}_{\epsilon})$ as discussed throughout the work. + +Here $\mathbf{y}_0 = [\mathbf{x}_0,\mathbf{v}_0]$ where $\mathbf{x}_0\sim q_{\mathrm{data}}$ + +Let $\eta$ be the natural parameter for the multivariate Gaussian likelihood $\mathcal{N}(\mathbf{y}_{\epsilon} \mid A\mathbf{y}_0, \Sigma)$ . Then, Tweedie's formula (Efron, 2011) states that: + +$$ +\mathbb {E} [ \eta \mid \mathbf {u} _ {\epsilon} ] = \nabla_ {\mathbf {y} _ {\epsilon}} l (\mathbf {y} _ {\epsilon}) - \nabla_ {\mathbf {y} _ {\epsilon}} l _ {0} (\mathbf {y} _ {\epsilon}) +$$ + +$l(\mathbf{y}_{\epsilon}) = \log q(\mathbf{y}_{\epsilon})$ +- $s_{\theta}(\mathbf{y}_{\epsilon}, \epsilon)$ is taken to be the true score $\nabla_{\mathbf{y}_{\epsilon}} \log q(\mathbf{y}_{\epsilon})$ so that $\nabla_{\mathbf{y}_{\epsilon}} l(\mathbf{y}_{\epsilon}) = s_{\theta}(\mathbf{y}_{\epsilon}, \epsilon)$ . +- $l_{0}$ is the log of the base distribution defined in the exponential family parameterization. + +The base distribution is a multivariate Gaussian with mean 0 and covariance $\Sigma$ , therefore $\nabla_{\mathbf{y}_{\epsilon}}l_{0}(\mathbf{y}_{\epsilon}) = -\Sigma^{-1}\mathbf{y}_{\epsilon}$ + +$$ +\mathbb {E} [ \eta \mid \mathbf {y} _ {\epsilon} ] = s _ {\theta} (\mathbf {y} _ {\epsilon}, \epsilon) + \Sigma^ {- 1} \mathbf {y} _ {\epsilon}. +$$ + +However, Tweedie's formula is not directly applicable since our $\mathbf{y}_{\epsilon}$ is not directly normal with mean $\mathbf{y}_0$ . Instead, to derive the conditional mean of $\mathbf{y}_0$ given $\mathbf{y}_{\epsilon}$ , we use the relation $\eta = \Sigma^{-1}\mathbf{A}\mathbf{y}_0$ and the linearity of conditional expectation to get + +$$ +\begin{array}{l} \mathbb {E} \left[ \mathbf {y} _ {0} \mid \mathbf {y} _ {\epsilon} \right] = \mathbb {E} \left[ A ^ {- 1} \Sigma \eta \mid \mathbf {y} _ {\epsilon} \right] \\ = A ^ {- 1} \Sigma \mathbb {E} [ \eta \mid \mathbf {y} _ {\epsilon} ] \\ = A ^ {- 1} \Sigma \left(s _ {\theta} \left(\mathbf {y} _ {\epsilon}, \epsilon\right) + \Sigma^ {- 1} \mathbf {y} _ {\epsilon}\right) \\ = A ^ {- 1} \Sigma s _ {\theta} \left(\mathbf {y} _ {\epsilon}, \epsilon\right) + A ^ {- 1} \mathbf {y} _ {\epsilon}. \\ \end{array} +$$ + +For the variance, we use the following relation $\mathbf{y}_{\epsilon} = A\mathbf{y}_0 + \sqrt{\Sigma}\epsilon$ , which implies that + +$$ +\mathbf {y} _ {0} = A ^ {- 1} \mathbf {y} _ {\epsilon} - A ^ {- 1} \sqrt {\sum} \epsilon +$$ + +$$ +\operatorname {V a r} \left(\mathbf {y} _ {0} \mid \mathbf {y} _ {\epsilon}\right) = A ^ {- 1} \Sigma A ^ {- T}. +$$ + +Therefore, for the model posterior distribution $p_{\theta}(\mathbf{y}_0 \mid \mathbf{y}_{\epsilon})$ we choose a Normal with mean and covariance + +$$ +\begin{array}{l} \mu_ {p _ {\theta}, \epsilon} = A ^ {- 1} \Sigma s _ {\theta} (\mathbf {y} _ {\epsilon}, \epsilon) + A ^ {- 1} \mathbf {y} _ {\epsilon} \\ \Sigma_ {p _ {\theta}, \epsilon} = A ^ {- 1} \Sigma A ^ {- T} \\ \end{array} +$$ \ No newline at end of file diff --git a/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/images.zip b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..e03c1a1ae7f027b480304ecda5efb1c074ce9e9d --- /dev/null +++ b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38f65dda67e16b56d4f286df1a9f33236eaec628eeb712cc40b35f23d0070e8c +size 1236552 diff --git a/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/layout.json b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..327b6e2f115438752d510571be69ebc88df009f5 --- /dev/null +++ b/2023/Where to Diffuse, How to Diffuse, and How to Get Back_ Automated Learning for Multivariate Diffusions/layout.json @@ -0,0 +1,28713 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "type": "text", + "content": "WHERE TO DIFFUSE, HOW TO DIFFUSE, AND HOW TO GET BACK: AUTOMATED LEARNING FOR MULTIVARI-ATE DIFFUSIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 153, + 369, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 153, + 369, + 166 + ], + "spans": [ + { + "bbox": [ + 110, + 153, + 369, + 166 + ], + "type": "text", + "content": "Raghav Singhal\\*,1, Mark Goldstein\\*,1, Rajesh Ranganath\\*,2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 166, + 381, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 166, + 381, + 178 + ], + "spans": [ + { + "bbox": [ + 110, + 166, + 381, + 178 + ], + "type": "text", + "content": "Courant Institute of Mathematical Sciences1, New York University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 178, + 304, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 178, + 304, + 190 + ], + "spans": [ + { + "bbox": [ + 110, + 178, + 304, + 190 + ], + "type": "text", + "content": "Center for Data Science2, New York University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 276, + 217, + 335, + 230 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 217, + 335, + 230 + ], + "spans": [ + { + "bbox": [ + 276, + 217, + 335, + 230 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 243, + 471, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 243, + 471, + 431 + ], + "spans": [ + { + "bbox": [ + 140, + 243, + 471, + 431 + ], + "type": "text", + "content": "Diffusion-based generative models (DBGMs) perturb data to a target noise distribution and reverse this process to generate samples. The choice of noising process, or inference diffusion process, affects both likelihoods and sample quality. For example, extending the inference process with auxiliary variables leads to improved sample quality. While there are many such multivariate diffusions to explore, each new one requires significant model-specific analysis, hindering rapid prototyping and evaluation. In this work, we study Multivariate Diffusion Models (MDMs). For any number of auxiliary variables, we provide a recipe for maximizing a lower-bound on the MDMs likelihood without requiring any model-specific analysis. We then demonstrate how to parameterize the diffusion for a specified target noise distribution; these two points together enable optimizing the inference diffusion process. Optimizing the diffusion expands easy experimentation from just a few well-known processes to an automatic search over all linear diffusions. To demonstrate these ideas, we introduce two new specific diffusions as well as learn a diffusion process on the MNIST, CIFAR10, andImagenet32 datasets. We show learned MDMs match or surpass bits-per-dims (BPDs) relative to fixed choices of diffusions for a given dataset and model architecture." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 451, + 208, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 451, + 208, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 208, + 464 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 475, + 506, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 506, + 555 + ], + "type": "text", + "content": "Diffusion-based generative models (DBGMs) perturb data to a target noise distribution and reverse this process to generate samples. They have achieved impressive performance in image generation, editing, translation (Dhariwal & Nichol, 2021; Nichol & Dhariwal, 2021; Sasaki et al., 2021; Ho et al., 2022), conditional text-to-image tasks (Nichol et al., 2021; Ramesh et al., 2022; Sahara et al., 2022) and music and audio generation (Chen et al., 2020; Kong et al., 2020; Mittal et al., 2021). They are often trained by maximizing a lower bound on the log likelihood, featuring an inference process interpreted as gradually \"noising\" the data (Sohl-Dickstein et al., 2015; Ho et al., 2020)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 559, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 504, + 594 + ], + "type": "text", + "content": "The choice of this inference process affects both likelihoods and sample quality. On different datasets and models, different inference processes work better; there is no universal best choice of inference, and the choice matters (Song et al., 2020b)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 597, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 506, + 654 + ], + "type": "text", + "content": "While some work has improved performance by designing score model architectures (Ho et al., 2020; Kingma et al., 2021; Dhariwal & Nichol, 2021), Dockhorn et al. (2021) instead introduce the critically-damped Langevin diffusion (CLD), showing that significant improvements in sample generation can be gained by carefully designing new processes. CLD pairs each data dimension with an auxiliary \"velocity\" variable and diffuses them jointly using second-order Langevin dynamics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 658, + 507, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 507, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 507, + 715 + ], + "type": "text", + "content": "A natural question: if introducing new diffusions results in dramatic performance gains, why are there only a handful of diffusions (variance-preserving stochastic differential equation (VPSDE), variance exploding (VE), CLD, sub-VPSDE) used in DBGMs? For instance, are there other auxiliary variable diffusions that would lead to improvements like CLD? This avenue seems promising as auxiliary variables have improved other generative models and inferences, such as normalizing flows" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 720, + 443, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 720, + 443, + 733 + ], + "spans": [ + { + "bbox": [ + 121, + 720, + 443, + 733 + ], + "type": "text", + "content": "* Equal Contribution. Correspondence to {rsinghal, goldstein} at nyu.edu." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "(Huang et al., 2020), neural ordinary differential equations (ODEs) (Dupont et al., 2019), hierarchical variational models (Ranganath et al., 2016), ladder variational autoencoder (Sønderby et al., 2016), among others." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 506, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 211 + ], + "type": "text", + "content": "Despite its success, CLD also provides evidence that each new process requires significant model-specific analysis. Deriving the evidence lower bound (ELBO) and training algorithm for diffusions is challenging (Huang et al., 2021; Kingma et al., 2021; Song et al., 2021) and is carried out in a case-by-case manner for new diffusions (Campbell et al., 2022). Auxiliary variables seemingly complicate this process further; computing conditionals of the inference process necessitates solving matrix Lyupanov equations (section 3.3). Deriving the inference stationary distribution—which helps the model and inference match—can be intractable. These challenges limit rapid prototyping and evaluation of new inference processes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 215, + 297, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 215, + 297, + 226 + ], + "spans": [ + { + "bbox": [ + 105, + 215, + 297, + 226 + ], + "type": "text", + "content": "Concretely, training a diffusion model requires:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 236, + 504, + 304 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 113, + 236, + 504, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 236, + 504, + 259 + ], + "spans": [ + { + "bbox": [ + 113, + 236, + 504, + 259 + ], + "type": "text", + "content": "(R1): Selecting an inference and model process pair such that the inference process converges to the model prior" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 114, + 264, + 269, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 264, + 269, + 277 + ], + "spans": [ + { + "bbox": [ + 114, + 264, + 269, + 277 + ], + "type": "text", + "content": "(R2): Deriving the ELBO for this pair" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 113, + 281, + 504, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 281, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 113, + 281, + 504, + 304 + ], + "type": "text", + "content": "(R3): Estimating the ELBO and its gradients by deriving and computing the inference process' transition kernel" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 314, + 504, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 504, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 504, + 360 + ], + "type": "text", + "content": "In this work, we introduce Multivariate Diffusion Models (MDMs) and a method for training and evaluating them. MDMs are diffusion-based generative models trained with auxiliary variables. We provide a recipe for training MDMs beyond specific instantiations—like VPSDE and CLD—to all linear inference processes that have a stationary distribution, with any number of auxiliary variables." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 364, + 506, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 364, + 506, + 421 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 506, + 421 + ], + "type": "text", + "content": "First, we bring results from gradient-based MCMC (Ma et al., 2015) to diffusion modeling to construct MDMs that converge to a chosen model prior (R1); this tightens the ELBO. Secondly, for any number of auxiliary variables, we derive the MDM ELBO (R2). Finally, we show that the transition kernel of linear MDMs, necessary for the ELBO, can be computed automatically and generically, for higher-dimensional auxiliary systems (R3)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 425, + 506, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 425, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 425, + 506, + 471 + ], + "type": "text", + "content": "With these tools, we explore a variety of new inference processes for diffusion-based generative models. We then note that the automatic transitions and fixed stationary distributions facilitate directly learning the inference to maximize the MDM ELBO. Learning turns diffusion model training into a search not only over score models but also inference processes, at no extra derivational cost." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 483, + 451, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 451, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 451, + 496 + ], + "type": "text", + "content": "Methodological Contributions. In summary, our methodological contributions are:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 505, + 504, + 655 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 129, + 505, + 504, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 505, + 504, + 528 + ], + "spans": [ + { + "bbox": [ + 129, + 505, + 504, + 528 + ], + "type": "text", + "content": "1. Deriving ELBOs for training and evaluating multivariate diffusion models (MDMs) with auxiliary variables." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 129, + 533, + 504, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 533, + 504, + 577 + ], + "spans": [ + { + "bbox": [ + 129, + 533, + 504, + 577 + ], + "type": "text", + "content": "2. Showing that the diffusion transition covariance does not need to be manually derived for each new diffusion. We instead demonstrate that a matrix factorization technique, previously unused in diffusion models, can automatically compute the covariance analytically for any linear MDM." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 129, + 583, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 583, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 129, + 583, + 504, + 616 + ], + "type": "text", + "content": "3. Using results from gradient-based Markov chain Monte Carlo (MCMC) to construct MDMs with a complete parameterization of inference processes whose stationary distribution matches the model prior." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 622, + 504, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 622, + 504, + 655 + ], + "spans": [ + { + "bbox": [ + 129, + 622, + 504, + 655 + ], + "type": "text", + "content": "4. Combining the above into an algorithm called Automatic Multivariate Diffusion Training (AMDT) that enables training without diffusion-specific derivations. AMDT enables training score models for any linear diffusion, including optimizing the diffusion and score jointly." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "content": "To demonstrate these ideas, we develop MDMs with two specific diffusions as well as learned multivariate diffusions. The specific diffusions are accelerated Langevin diffusion (ALDA) (introduced in Mou et al. (2019) as a higher-order scheme for gradient-based MCMC) and an alteration, modified accelerated Langevin diffusion (MALDA). Previously, using these diffusions for generative modeling would require significant model-specific analysis. Instead, AMDT for these diffusions is derivation-free." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "Empirical contributions. We train MDMs on the MNIST,Imagenet32 and CIFAR-10 datasets. In the experiments, we show that:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 128, + 114, + 506, + 211 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 129, + 114, + 504, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 114, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 129, + 114, + 504, + 137 + ], + "type": "text", + "content": "1. Training new and existing fixed diffusions, such as ALDA and MALDA, is easy with the proposed algorithm AMDT." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 128, + 140, + 504, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 140, + 504, + 174 + ], + "spans": [ + { + "bbox": [ + 128, + 140, + 504, + 174 + ], + "type": "text", + "content": "2. Using AMDT to learn the choice of diffusion for the MDM matches or surpasses the performance of fixed choices of diffusion process; sometimes the learned diffusion and VPSDE do best; other times the learned diffusion and CLD do best." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 128, + 178, + 506, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 178, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 128, + 178, + 506, + 211 + ], + "type": "text", + "content": "3. There are new and existing MDMs, trained and evaluated with the MDM ELBO, that account for as much performance improvement over VPSDE as a three-fold increase in score model size for a fixed univariate diffusion." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 221, + 505, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 221, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 505, + 266 + ], + "type": "text", + "content": "These findings affirm that the choice of diffusion affects the optimization problem, and that learning the choice bypasses the process of choosing diffusions for each new dataset and score architecture. We additionally show the utility of the MDM ELBO by showing on a dataset that CLD achieves better bits-per-dims (BPDs) than previously reported with the probability flow ODE (Dockhorn et al., 2021)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 283, + 161, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 283, + 161, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 161, + 295 + ], + "type": "text", + "content": "2 SETUP" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "type": "text", + "content": "We present diffusions by starting with the generative model and then describing its likelihood lower bound (Sohl-Dickstein et al., 2015; Huang et al., 2021; Kingma et al., 2021). Diffusions sample from a model prior " + }, + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_0\\sim \\pi_\\theta" + }, + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "type": "text", + "content": " and then evolve a continuous-time stochastic process " + }, + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 308, + 504, + 342 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 219, + 348, + 504, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 348, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 219, + 348, + 504, + 361 + ], + "type": "interline_equation", + "content": "d \\mathbf {z} = h _ {\\theta} (\\mathbf {z}, t) d t + \\beta_ {\\theta} (t) d \\mathbf {B} _ {t}, \\quad t \\in [ 0, T ] \\tag {1}", + "image_path": "ecbc0af365b4b160bbc8e0d80b245e3e60249ef74e170feeea4b70cff3c6de63.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_t" + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "text", + "content": "-dimensional Brownian motion. The model is trained so that " + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_T" + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "text", + "content": " approximates the data " + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\sim q_{\\mathrm{data}}" + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "text", + "content": ".1 Maximum likelihood training of diffusion models is intractable (Huang et al., 2021; Song et al., 2021; Kingma et al., 2021). Instead, they are trained using a variational lower bound on " + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\log p_{\\theta}(\\mathbf{z}_T = x)" + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "text", + "content": ". The bound requires an inference process " + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s | \\mathbf{x} = x)" + }, + { + "bbox": [ + 104, + 367, + 504, + 413 + ], + "type": "text", + "content": ":2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 216, + 420, + 504, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 420, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 216, + 420, + 504, + 434 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = f _ {\\phi} (\\mathbf {y}, s) d s + g _ {\\phi} (s) d \\widehat {\\mathbf {B}} _ {s}, \\quad s \\in [ 0, T ] \\tag {2}", + "image_path": "50a0f667bac59fa67e63f60e19e48cebf958bb862db195da86ff871e3db6d66c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{B}}_s" + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "text", + "content": " is another Brownian motion independent of " + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_t" + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "text", + "content": ". The inference process is usually taken to be specified rather than learned, and chosen to be i.i.d. for each " + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "inline_equation", + "content": "y_{tj}" + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "text", + "content": " conditional on each " + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "inline_equation", + "content": "x_j" + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "text", + "content": ". This leads to the interpretation of the " + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "inline_equation", + "content": "y_{tj}" + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "text", + "content": " as noisy versions of features " + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "inline_equation", + "content": "x_j" + }, + { + "bbox": [ + 104, + 441, + 505, + 498 + ], + "type": "text", + "content": " (Ho et al., 2020). While the diffusion ELBO is challenging to derive in general, Huang et al. (2021); Song et al. (2021) show that when the model process takes the form:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 168, + 503, + 504, + 519 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 503, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 168, + 503, + 504, + 519 + ], + "type": "interline_equation", + "content": "d \\mathbf {z} = \\left[ g _ {\\phi} ^ {2} (T - t) s _ {\\theta} (\\mathbf {z}, T - t) - f _ {\\phi} (\\mathbf {z}, T - t) \\right] d t + g _ {\\phi} (T - t) d \\mathbf {B} _ {t}, \\tag {3}", + "image_path": "6f1b46f3d3fa658d37aeedb9e20bd81d365042c3d0ce680f688d7555d18f7758.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 524, + 157, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 524, + 157, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 157, + 534 + ], + "type": "text", + "content": "the ELBO is:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 540, + 504, + 572 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 540, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 121, + 540, + 504, + 572 + ], + "type": "interline_equation", + "content": "\\log p _ {\\theta} (x) \\geq \\mathcal {L} ^ {\\mathrm {i s m}} (x) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) + \\int_ {0} ^ {T} - \\frac {1}{2} \\| s _ {\\theta} \\| _ {g _ {\\phi} ^ {2}} ^ {2} - \\nabla \\cdot (g _ {\\phi} ^ {2} s _ {\\theta} - f _ {\\phi}) d s \\right], \\tag {4}", + "image_path": "e8d623338e69c70647a8eca548428736cfb4c0293ac85acb8376b6d9d4c0829a.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "inline_equation", + "content": "f_{\\phi}, g_{\\phi}, s_{\\theta}" + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "text", + "content": " are evaluated at " + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "inline_equation", + "content": "(\\mathbf{y}_s, s)" + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "inline_equation", + "content": "\\|\\mathbf{x}\\|_{\\mathbf{A}}^2 = \\mathbf{x}^\\top \\mathbf{A}\\mathbf{x}" + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "inline_equation", + "content": "g^2 = gg^\\top" + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "text", + "content": ". Equation (4) features the Implicit Score Matching (ISM) loss (Song et al., 2020a), and can be re-written as an ELBO " + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{dsm}}" + }, + { + "bbox": [ + 104, + 578, + 504, + 615 + ], + "type": "text", + "content": " featuring Denoising Score Matching (DSM) (Vincent, 2011; Song et al., 2020b), see appendix F.1." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 631, + 388, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 631, + 388, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 388, + 643 + ], + "type": "text", + "content": "3 A RECIPE FOR MULTIVARIATE DIFFUSION MODELS" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 656, + 504, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 690 + ], + "type": "text", + "content": "As has been shown in prior work (Song et al., 2021; Dockhorn et al., 2021), the choice of diffusion matters. Drawing on principles from previous generative models (section 6), we can consider a wide class of diffusion inference processes by constructing them using auxiliary variables." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "text", + "content": "Following Huang et al. (2021); Dockhorn et al. (2021) we integrate all processes in forward time 0 to " + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "text", + "content": ". It may be helpful to think of an additional variable " + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_t \\triangleq \\mathbf{z}_{T-t}" + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}_0" + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "text", + "content": " approximates " + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\sim q_{\\mathrm{data}}" + }, + { + "bbox": [ + 104, + 698, + 504, + 721 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 118, + 721, + 384, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 384, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 384, + 731 + ], + "type": "text", + "content": "2We use " + }, + { + "bbox": [ + 118, + 721, + 384, + 731 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 118, + 721, + 384, + 731 + ], + "type": "text", + "content": " as the inference variable over the same space as the model's " + }, + { + "bbox": [ + 118, + 721, + 384, + 731 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 118, + 721, + 384, + 731 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "At first glance, training such diffusions can seem challenging. First, one needs an ELBO that includes auxiliary variables. This ELBO will require sampling from the transition kernel, and setting the model prior to the specified inference stationary distribution. But doing such diffusion-specific analysis manually is challenging and hinders rapid prototyping." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 504, + 189 + ], + "type": "text", + "content": "In this section we show how to address these challenges and introduce an algorithm, AMDT, to simplify and automate modeling with MDMs. AMDT can be used to train new and existing diffusions, including those with auxiliary variables, and including those that learn the inference process. In appendix A we discuss how the presented methods can also be used to automate and improve simplified score matching and noise prediction objectives used to train diffusion models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 202, + 306, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 202, + 306, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 306, + 213 + ], + "type": "text", + "content": "3.1 MULTIVARIATE MODEL AND INFERENCE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "text", + "content": "For the " + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "inline_equation", + "content": "j^{th}" + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "text", + "content": " data coordinate at each time " + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "text", + "content": ", MDMs pair " + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{tj} \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "text", + "content": " with a vector of auxiliary variables " + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{tj} \\in \\mathbb{R}^{K-1}" + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "text", + "content": " into a joint vector " + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_t" + }, + { + "bbox": [ + 104, + 223, + 504, + 248 + ], + "type": "text", + "content": " and diffuse in the extended space:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 194, + 255, + 505, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 255, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 194, + 255, + 505, + 281 + ], + "type": "interline_equation", + "content": "\\mathbf {u} _ {0} \\sim \\pi_ {\\theta}, \\quad d \\mathbf {u} = h _ {\\theta} \\left(\\mathbf {u} _ {t} = \\left[ \\begin{array}{l} \\mathbf {z} _ {t} \\\\ \\mathbf {v} _ {t} \\end{array} \\right], t\\right) d t + \\beta_ {\\theta} (t) d \\mathbf {B} _ {t}. \\tag {5}", + "image_path": "1c640f3f836ab17b6e3672740c4047bb20736320600a37a270a619202c0962e2.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": "MDMs model the data " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_T" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": ", a coordinate in " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_T \\sim p_\\theta" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": ". For the " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "j^{th}" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": " feature " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_j" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": ", each " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{tj} \\in \\mathbb{R}^K" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": " consists of a \"data\" dimension " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{tj}^z" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": " and auxiliary variable " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{tj}^v" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": ". Therefore " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{u} \\in \\mathbb{R}^{dK}" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": ". We extend the drift coefficient " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "h_\\theta" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": " from a function in " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d \\times \\mathbb{R}_+ \\to \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": " to the extended space " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{dK} \\times \\mathbb{R}_+ \\to \\mathbb{R}^{dK}" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": ". We likewise extend the diffusion coefficient to a matrix " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\beta_\\theta" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": " acting on Brownian motion " + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_t \\in \\mathbb{R}^{dK}" + }, + { + "bbox": [ + 104, + 289, + 504, + 339 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "text", + "content": "Because the MDM model is over the extended space, the inference distribution " + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "text", + "content": " must be too. We then set " + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_0^v |\\mathbf{y}_0^z = x)" + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "text", + "content": " to any chosen initial distribution, e.g. " + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "text", + "content": " and discuss this choice in section 4. Then " + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s" + }, + { + "bbox": [ + 104, + 342, + 505, + 377 + ], + "type": "text", + "content": " evolves according to the auxiliary variable inference process:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 241, + 384, + 504, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 384, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 241, + 384, + 504, + 399 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = f _ {\\phi} (\\mathbf {y}, s) d s + g _ {\\phi} (s) d \\widehat {\\mathbf {B}} _ {s}, \\tag {6}", + "image_path": "e52d58b1efff2e0afa66e9b116e2795e92867e0a12356233949446d769c58e58.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "text", + "content": "where the inference drift and diffusion coefficients " + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "inline_equation", + "content": "f_{\\phi}, g_{\\phi}" + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "text", + "content": " are now over the extended space " + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{y} = [\\mathbf{y}^z, \\mathbf{y}^v]" + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "text", + "content": ". The function " + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "inline_equation", + "content": "f_{\\phi}" + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "text", + "content": " lets the " + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "text", + "content": " coordinates of " + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{tj}" + }, + { + "bbox": [ + 104, + 406, + 504, + 430 + ], + "type": "text", + "content": " interact in the inference process." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 443, + 171, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 171, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 171, + 453 + ], + "type": "text", + "content": "ASSUMPTIONS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 464, + 505, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 505, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 505, + 509 + ], + "type": "text", + "content": "This work demonstrates how to parameterize time-varying Itô processes, used for diffusion modeling, to have a stationary distribution that matches the given model prior. To take advantage of the automatic transition kernels also presented, the inferences considered for modeling are linear time-varying processes and take the form:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 242, + 516, + 367, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 516, + 367, + 529 + ], + "spans": [ + { + "bbox": [ + 242, + 516, + 367, + 529 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = \\mathbf {A} _ {\\phi} (s) \\mathbf {y} d s + g _ {\\phi} (s) d \\mathbf {B} _ {s}", + "image_path": "5320123a03a0a7f8f6db18160520b88d90c45dace4cbd1c989ab6b31bb783da8.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 535, + 476, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 535, + 476, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 535, + 476, + 548 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 535, + 476, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{A}_{\\phi}(s):\\mathbb{R}_{+}\\to dK\\times dK" + }, + { + "bbox": [ + 104, + 535, + 476, + 548 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 535, + 476, + 548 + ], + "type": "inline_equation", + "content": "g_{\\phi}(s):\\mathbb{R}_{+}\\to dK\\times dK" + }, + { + "bbox": [ + 104, + 535, + 476, + 548 + ], + "type": "text", + "content": " are matrix-valued functions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 562, + 203, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 562, + 203, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 203, + 573 + ], + "type": "text", + "content": "3.2 ELBO FOR MDMS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 582, + 504, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 605 + ], + "type": "text", + "content": "We now show how to train MDMs to optimize a lower bound on the log likelihood of the data. Like in the univariate case, we use the parameterization in eq. (3) to obtain a tractable ELBO." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 609, + 429, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 429, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 429, + 622 + ], + "type": "text", + "content": "Theorem 1. The MDM log marginal likelihood of the data is lower-bounded by:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 628, + 505, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 628, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 106, + 628, + 505, + 712 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\log p _ {\\theta} (x) \\geq \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\underbrace {\\log \\pi_ {\\theta} (\\mathbf {y} _ {T})} _ {\\ell_ {T}} - \\int_ {0} ^ {T} \\frac {1}{2} \\| s _ {\\theta} \\| _ {g _ {\\phi} ^ {2}} ^ {2} + \\nabla \\cdot (g _ {\\phi} ^ {2} s _ {\\theta} - f _ {\\phi}) d s - \\underbrace {\\log q _ {\\phi} (\\mathbf {y} _ {0} ^ {v} | x)} _ {\\ell_ {q}} \\right] \\quad (\\mathcal {L} ^ {m i s m}) \\\\ = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\ell_ {T} + \\int_ {0} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi} ^ {2}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi} ^ {2}} ^ {2} + (\\nabla \\cdot f _ {\\phi}) d s - \\ell_ {q} \\right] \\quad \\left(\\mathcal {L} ^ {m d s m}\\right). \\tag {7} \\\\ \\end{array}", + "image_path": "e7b7e7a69409b196784ac8287569e91780cede8d027ced1e4855bf215881b221.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 720, + 468, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 468, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 468, + 734 + ], + "type": "text", + "content": "where divergences and gradients are taken with respect to " + }, + { + "bbox": [ + 105, + 720, + 468, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s" + }, + { + "bbox": [ + 105, + 720, + 468, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 720, + 468, + 734 + ], + "type": "inline_equation", + "content": "s_{\\phi} = \\nabla_{\\mathbf{y}_s}\\log q_{\\phi}(\\mathbf{y}_s|x)" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": "Proof. The proof for the MDM ISM ELBO " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{mism}}" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": " is in appendix F. In short, we introduce auxiliary variables, apply Theorem 1 of Huang et al. (2021) (equivalently, Theorem 3 of Song et al. (2021) or appendix E of Kingma et al. (2021)) to the joint space, and then apply an additional variational bound to " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_0" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": ". The MDM DSM ELBO " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{mdsm}}" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": " is likewise derived in appendix F, similarly to Huang et al. (2021); Song et al. (2021), but extended to multivariate diffusions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "content": "We train MDM's by estimating the gradients of " + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{mdsm}}" + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "content": ", as estimates of " + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{mism}}" + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "content": " can be computationally prohibitive. For numerical stability, the integral in eq. (7) is computed on " + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "inline_equation", + "content": "[\\epsilon, T]" + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "content": " rather than " + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "inline_equation", + "content": "[0, T]" + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "content": ". One can regard this as a bound for a variable " + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{\\epsilon}" + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "content": ". To maintain a proper likelihood bound for the data, one can choose a likelihood " + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_0|\\mathbf{u}_{\\epsilon}" + }, + { + "bbox": [ + 104, + 149, + 506, + 217 + ], + "type": "text", + "content": " and compose bounds as we demonstrate in appendix I. We report the ELBO with this likelihood term, which plays the same role as the discretized Gaussian in Nichol & Dhariwal (2021) and Tweedie's formula in Song et al. (2021)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 228, + 367, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 228, + 367, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 367, + 241 + ], + "type": "text", + "content": "3.3 INGREDIENT 1: COMPUTING THE TRANSITION " + }, + { + "bbox": [ + 105, + 228, + 367, + 241 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s|x)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 248, + 504, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 248, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 504, + 283 + ], + "type": "text", + "content": "To estimate eq. (7) and its gradients, we need samples from " + }, + { + "bbox": [ + 104, + 248, + 504, + 283 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|x)" + }, + { + "bbox": [ + 104, + 248, + 504, + 283 + ], + "type": "text", + "content": " and to compute " + }, + { + "bbox": [ + 104, + 248, + 504, + 283 + ], + "type": "inline_equation", + "content": "\\nabla \\log q(\\mathbf{y}_s|x)" + }, + { + "bbox": [ + 104, + 248, + 504, + 283 + ], + "type": "text", + "content": ". While an intractable problem for MDMs in general, we provide two ingredients for tightening and optimizing these bounds in a generic fashion for linear inference MDMs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 288, + 504, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 288, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 288, + 504, + 309 + ], + "type": "text", + "content": "We first show how to automate computation of " + }, + { + "bbox": [ + 104, + 288, + 504, + 309 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 288, + 504, + 309 + ], + "type": "text", + "content": " and then " + }, + { + "bbox": [ + 104, + 288, + 504, + 309 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|x)" + }, + { + "bbox": [ + 104, + 288, + 504, + 309 + ], + "type": "text", + "content": ". For linear MDMs of the form:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 247, + 312, + 362, + 324 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 312, + 362, + 324 + ], + "spans": [ + { + "bbox": [ + 247, + 312, + 362, + 324 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = \\mathbf {A} (s) \\mathbf {y} d s + g (s) d \\mathbf {B} _ {s},", + "image_path": "c405b76e52d00812d8714b3ad00446a2154e085b01f98073109bc88ee7622390.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "text", + "content": "the transition kernel " + }, + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "text", + "content": " is Gaussian (Särkkä & Solin, 2019). Let " + }, + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "inline_equation", + "content": "f(\\mathbf{y}, s) = \\mathbf{A}(s)\\mathbf{y}" + }, + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "text", + "content": ". Then, the mean and covariance are solutions to the following ODEs:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 208, + 350, + 306, + 363 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 350, + 306, + 363 + ], + "spans": [ + { + "bbox": [ + 208, + 350, + 306, + 363 + ], + "type": "interline_equation", + "content": "d \\mathbf {m} _ {s | 0} / d s = \\mathbf {A} (s) \\mathbf {m} _ {s | 0}", + "image_path": "514b92bd6d7e7af05e0cbe39717365b5aac137a765fa1f44813b80967d76b0f3.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 209, + 365, + 504, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 365, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 209, + 365, + 504, + 380 + ], + "type": "interline_equation", + "content": "d \\boldsymbol {\\Sigma} _ {s | 0} / d s = \\mathbf {A} (s) \\boldsymbol {\\Sigma} _ {s | 0} + \\boldsymbol {\\Sigma} _ {s | 0} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s). \\tag {8}", + "image_path": "c59809fd13da08e182a2c419e99bda06d005efa964cf3dfbbed48989de3250ed.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 380, + 257, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 257, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 257, + 392 + ], + "type": "text", + "content": "The mean can be solved analytically:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 198, + 393, + 504, + 428 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 393, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 198, + 393, + 504, + 428 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\mathbf {y} _ {0} \\underbrace {= \\exp (s \\mathbf {A}) \\mathbf {y} _ {0}} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}}. \\tag {9}", + "image_path": "34cd20a7243704aaf48e8f86bf8e49583d7e749fa0e81a0e01d169619e92d0bb.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 430, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 504, + 453 + ], + "type": "text", + "content": "The covariance equation does not have as simple a solution because eq. (9) as the unknown matrix " + }, + { + "bbox": [ + 104, + 430, + 504, + 453 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{s|0}" + }, + { + "bbox": [ + 104, + 430, + 504, + 453 + ], + "type": "text", + "content": " is being multiplied both from the left and the right." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "type": "text", + "content": "Instead of solving eq. (8) for a specific diffusion manually, as done in previous work (e.g. pages 50-54 of Dockhorn et al. (2021)), we show that a matrix factorization technique (Särkkä & Solin (2019), sec. 6.3) previously unused in diffusion-based generative models can automatically compute " + }, + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\Sigma_{s|0}" + }, + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "type": "text", + "content": " generically for any linear MDM. Define " + }, + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_s" + }, + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_s" + }, + { + "bbox": [ + 104, + 457, + 504, + 502 + ], + "type": "text", + "content": " that evolve according to:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 216, + 503, + 504, + 530 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 503, + 504, + 530 + ], + "spans": [ + { + "bbox": [ + 216, + 503, + 504, + 530 + ], + "type": "interline_equation", + "content": "\\binom {d \\mathbf {C} _ {s} / d s} {d \\mathbf {H} _ {s} / d s} = \\binom {\\mathbf {A} (s)} {\\mathbf {0}} \\begin{array}{c} g ^ {2} (s) \\\\ - \\mathbf {A} ^ {\\top} (s) \\end{array} \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}}, \\tag {10}", + "image_path": "ac7a441e9447d4246268ba215598459f458f2f37e97896561d1aef2bb29ad6f6.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "type": "text", + "content": "then " + }, + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "type": "inline_equation", + "content": "\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s\\mathbf{H}_s^{-1}" + }, + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_0 = \\mathbf{\\Sigma}_0" + }, + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_0 = \\mathbf{I}" + }, + { + "bbox": [ + 104, + 532, + 504, + 554 + ], + "type": "text", + "content": " (Appendix D). These equations can be solved in closed-form," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 139, + 555, + 504, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 555, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 139, + 555, + 504, + 604 + ], + "type": "interline_equation", + "content": "\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {[ \\mathbf {A} ] _ {s}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- [ \\mathbf {A} ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}} \\underbrace {= \\exp \\left[ s \\binom {\\mathbf {A}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- \\mathbf {A} ^ {\\top}} \\end{array} \\right]} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}, g (\\nu) = g} \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}}, \\tag {11}", + "image_path": "edb852595b2b06f2d861a12452ac7806a10cc740a88f6fb9ee1ee5dd672de29e.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 605, + 397, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 397, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 397, + 620 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 605, + 397, + 620 + ], + "type": "inline_equation", + "content": "[\\mathbf{A}]_s = \\int_0^s\\mathbf{A}(\\nu)d\\nu" + }, + { + "bbox": [ + 104, + 605, + 397, + 620 + ], + "type": "text", + "content": " . To condition on " + }, + { + "bbox": [ + 104, + 605, + 397, + 620 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0 = (x,v)" + }, + { + "bbox": [ + 104, + 605, + 397, + 620 + ], + "type": "text", + "content": " , we set " + }, + { + "bbox": [ + 104, + 605, + 397, + 620 + ], + "type": "inline_equation", + "content": "\\pmb {\\Sigma}_0 = \\mathbf{0}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "text", + "content": "Computing " + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s|x)" + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "text", + "content": ". For the covariance " + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{s|0}" + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "text", + "content": ", to condition on " + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0" + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "text", + "content": ", we set " + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma_0}" + }, + { + "bbox": [ + 104, + 630, + 487, + 643 + ], + "type": "text", + "content": " to" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 263, + 645, + 345, + 671 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 645, + 345, + 671 + ], + "spans": [ + { + "bbox": [ + 263, + 645, + 345, + 671 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Sigma} _ {0} = \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\boldsymbol {\\Sigma} _ {\\mathbf {v} _ {0}} \\end{array} \\right),", + "image_path": "25d1d5018e65dc4da96eecb8cc88de0064dd33351c30f416ea7427932dc066a3.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 672, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 672, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 504, + 685 + ], + "type": "text", + "content": "To compute the mean, it is the same expression as for " + }, + { + "bbox": [ + 104, + 672, + 504, + 685 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 672, + 504, + 685 + ], + "type": "text", + "content": ", but with a different initial condition:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 222, + 685, + 504, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 685, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 222, + 685, + 504, + 712 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\binom {x} {\\mathbb {E} _ {q} [ \\mathbf {y} _ {0} ^ {v} | x ]} \\tag {12}", + "image_path": "fb2fabdaa87e12128e12aede5fc7940668c215b5771319acf482e32384876fcd.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 712, + 239, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 712, + 239, + 724 + ], + "spans": [ + { + "bbox": [ + 104, + 712, + 239, + 724 + ], + "type": "text", + "content": "See appendix D for more details." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 95, + 504, + 242 + ], + "blocks": [ + { + "bbox": [ + 106, + 81, + 336, + 94 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 336, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 336, + 94 + ], + "type": "text", + "content": "Algorithm 1 Automatic Multivariate Diffusion Training" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "lines": [ + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": "Input: Data " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "\\{x_i\\}" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": ", inference process matrices " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\phi}, \\mathbf{D}_{\\phi}" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": ", model prior " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": ", initial distribution " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_0^v | x)" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": ", and score model architecture " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": " \nReturns: Trained score model " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": " \nwhile " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": " not converged do \n Sample " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "x \\sim \\sum_{i=1}^{N} \\frac{1}{N} \\delta_{x_i}" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "v_0 \\sim q_{\\phi}(\\mathbf{y}_0^v | x)" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": " \n Sample " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{s} \\sim \\mathbf{U}[0, T]" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s, \\mathbf{y}_T \\sim q_{\\phi}(\\mathbf{y}_s | x)" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": " using algorithm 2 \n Estimate the stochastic gradient of the MDM ELBO, " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "\\nabla_\\theta \\mathcal{L}(\\theta, \\phi)" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": ", using eq. (7) \n " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "\\theta \\leftarrow \\theta + \\alpha \\nabla_\\theta \\mathcal{L}(\\theta, \\phi)" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": ". \n if learning inference then \n " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "\\phi \\leftarrow \\phi + \\alpha \\nabla_\\phi \\mathcal{L}(\\theta, \\phi)" + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "text", + "content": " \nend if \nend while \nOutput " + }, + { + "bbox": [ + 106, + 95, + 504, + 242 + ], + "type": "inline_equation", + "content": "s_\\theta" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "text", + "content": "A fast and simple algorithm. We show in algorithm 2 (appendix H) that computing the transition kernel only requires knowing " + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "inline_equation", + "content": "f, g" + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "text", + "content": " and requires no diffusion-specific analysis. For " + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "inline_equation", + "content": "K - 1" + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "text", + "content": " auxiliary variables, " + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{A}, g" + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "inline_equation", + "content": "K \\times K" + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "text", + "content": ". Like for scalar diffusions, these parameters are shared across data coordinates. This means matrix exponentials and inverses are done on " + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "inline_equation", + "content": "K \\times K" + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "text", + "content": " matrices, where " + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 262, + 339, + 372 + ], + "type": "text", + "content": " is only 2 or 3 in our experiments. In table 1, we compare the time to sample a batch of size 256 from the transition kernel for CIFAR 10 and MNIST. The table shows the extra computa" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 345, + 263, + 504, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 263, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 345, + 263, + 504, + 309 + ], + "type": "text", + "content": "Table 1: Runtime Comparison: we compare the run time of sampling from the CLD diffusion analytically versus using the automated algorithm." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 349, + 317, + 494, + 351 + ], + "blocks": [ + { + "bbox": [ + 349, + 317, + 494, + 351 + ], + "lines": [ + { + "bbox": [ + 349, + 317, + 494, + 351 + ], + "spans": [ + { + "bbox": [ + 349, + 317, + 494, + 351 + ], + "type": "table", + "html": "
MethodCIFAR-10MNIST
Analytical0.0270.0062
Automated0.0290.007
", + "image_path": "a061757b1d390105d15c80009563af533b0d52b3df5dd5021a6a2604c3b5e9f7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 372, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 504, + 396 + ], + "type": "text", + "content": "tional cost of the automated algorithm is negligible. This automation likewise applies to simplified score matching and noise prediction objectives, since all rely on " + }, + { + "bbox": [ + 104, + 372, + 504, + 396 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s|x)" + }, + { + "bbox": [ + 104, + 372, + 504, + 396 + ], + "type": "text", + "content": " (appendix A)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 407, + 312, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 407, + 312, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 312, + 418 + ], + "type": "text", + "content": "3.4 INGREDIENT 2: MDM PARAMETERIZATION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": "The MDM ELBO (eq. (7)) is tighter when the inference " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_T" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": " tends toward the model's prior " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": ". Here we construct inference processes with the model prior " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": " as a specified stationary distribution " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "q_{\\infty}" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "content": "Ma et al. (2015) provide a complete recipe for constructing gradient-based MCMC samplers; the recipe constructs non-linear time-homogeneous Itô processes with a given stationary distribution, and show that the parameterization spans all such Itô processes with that stationary distribution." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 495, + 504, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 504, + 552 + ], + "type": "text", + "content": "Diffusion models usually have time-varying drift and diffusion coefficients (e.g. use of the " + }, + { + "bbox": [ + 104, + 495, + 504, + 552 + ], + "type": "inline_equation", + "content": "\\beta(t)" + }, + { + "bbox": [ + 104, + 495, + 504, + 552 + ], + "type": "text", + "content": " function). To build diffusion models that match the model prior, we first extend Theorem 1 from Ma et al. (2015) to construct non-linear Itô processes with time-varying drift and diffusion coefficients with a given stationary distribution (Appendix C). Then, to keep transitions tractable (per Section 3.3), we specialize this result to linear Itô diffusions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": "We directly state the result for linear time-varying diffusions with stationary distributions. The parameterization requires a skew-symmetric matrix " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "-\\mathbf{Q}(s) = \\mathbf{Q}(s)^{\\top}" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": ", a positive semi-definite matrix " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{D}(s)" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": ", and a function " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\nabla H(\\mathbf{y})" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": " such that the desired stationary distribution " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "q_{\\infty}" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": " is proportional to " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\exp[-H(\\mathbf{y})]" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": ". Linear Itô diffusions have Gaussian stationary distributions (Särkkä & Solin, 2019) meaning that " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\nabla H" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": " is linear and can be expressed as " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{S}\\mathbf{y}" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": " for some matrix " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": ". For a matrix " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\sqrt{\\mathbf{A}}" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": " refer to the matrix square root defined by " + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "inline_equation", + "content": "\\mathbf{a} = \\sqrt{\\mathbf{A}} \\Longleftrightarrow \\mathbf{A} = \\mathbf{aa}^{\\top}" + }, + { + "bbox": [ + 104, + 556, + 504, + 626 + ], + "type": "text", + "content": ". Then, the Itô diffusion:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 206, + 632, + 504, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 632, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 206, + 632, + 504, + 669 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = \\underbrace {- \\left[ \\mathbf {Q} (s) + \\mathbf {D} (s) \\right] \\mathbf {S y}} _ {f (\\mathbf {y}, s)} d s + \\underbrace {\\sqrt {2 \\mathbf {D} (s)}} _ {g (s)} d \\widehat {\\mathbf {B}} _ {s}, \\tag {13}", + "image_path": "601d54882b2d6d074f120948cd7025fadefa86d41e6ed6aa9ced1a9b574c32d4.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "content": "has Gaussian stationary distribution " + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mathbf{0},\\mathbf{S}^{-1})" + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}(s),\\mathbf{D}(s)" + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "content": " are parameters. For a discussion of convergence to the stationary distribution, as well as skew-symmetric and positive semi-definite parameterizations, see appendix C, where we also show that existing diffusion processes such as VPSDE and CLD are included in " + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} / \\mathbf{D}" + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "content": " parameterization. We display the ELBO in terms of " + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} / \\mathbf{D}" + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "content": " in appendix G and an algorithm in appendix H." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "For score matching and noise prediction losses and a given " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": ", achieving a minimizing value with respect to " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "s_{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " does not imply that the generative model score will match the inference score. Modeling the data also requires the marginal distribution of " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "q_{\\phi, T}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " to approximate " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " is constant, it is important to confirm the stationary distribution is appropriately set, and the tools used here for the ELBO can be used to satisfy this requirement for score matching and noise prediction (appendix A)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 151, + 289, + 162 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 151, + 289, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 289, + 162 + ], + "type": "text", + "content": "3.5 LEARNING THE INFERENCE PROCESS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "text", + "content": "The choice of diffusion matters, and the ELBOs in eq. (7) have no requirement for fixed " + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "text", + "content": ". We therefore learn the inference process jointly with " + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "inline_equation", + "content": "s_{\\theta}" + }, + { + "bbox": [ + 104, + 171, + 504, + 228 + ], + "type": "text", + "content": ". Under linear transitions (ingredient 1), no algorithmic details change as the diffusion changes during training. Under stationary parameterization (ingredient 2), we can learn without the stationary distribution going awry. In the experiments, learning matches or surpasses BPDs of fixed diffusions for a given dataset and score architecture." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": "In " + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{mdsm}}" + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{mism}}" + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "inline_equation", + "content": "q_{\\phi, \\infty}" + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": " may be set to equal " + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": ", but it is " + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_T \\sim q_{\\phi, T}" + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": " for the chosen " + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": " that is featured in the ELBO. Learning " + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": " can choose " + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_T" + }, + { + "bbox": [ + 104, + 232, + 505, + 255 + ], + "type": "text", + "content": " to reduce the cross-entropy:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 253, + 258, + 504, + 273 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 258, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 253, + 258, + 504, + 273 + ], + "type": "interline_equation", + "content": "- \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {T} | x)} [ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) ]. \\tag {14}", + "image_path": "4a7caf112b5ff31ad3cc28a5744bb7f2365bd89a59dd52357d6cf22547f7a9ab.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "content": "Minimizing eq. (14) will tighten the ELBO for any " + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "content": ". Next, " + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "inline_equation", + "content": "q_\\phi" + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "content": " is featured in the remaining terms that feature " + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "content": "; optimizing for " + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "inline_equation", + "content": "q_\\phi" + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "content": " will tighten and improve the ELBO alongside " + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "content": ". Finally, " + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "inline_equation", + "content": "q_\\phi" + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "content": " is featured in the expectations and the " + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "inline_equation", + "content": "-\\log q_\\phi" + }, + { + "bbox": [ + 104, + 274, + 504, + 309 + ], + "type": "text", + "content": " term:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 160, + 312, + 505, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 312, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 160, + 312, + 505, + 338 + ], + "type": "interline_equation", + "content": "\\log p _ {\\theta} \\left(\\mathbf {u} _ {T} ^ {z} = x\\right) \\geq = \\underbrace {\\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} = v \\mid x\\right)}} \\left[ \\left(\\mathcal {L} ^ {\\mathrm {d s m}} \\text {o r} \\mathcal {L} ^ {\\mathrm {i s m}}\\right) \\underbrace {- \\log q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} = v \\mid x\\right)} \\right] \\tag {15}", + "image_path": "d2f12cdecf97405f9a8805e9a721a170f12376437f5dfcc1da8adc23ecf1e16c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 346, + 504, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 504, + 370 + ], + "type": "text", + "content": "The " + }, + { + "bbox": [ + 104, + 346, + 504, + 370 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_0^v |x)" + }, + { + "bbox": [ + 104, + 346, + 504, + 370 + ], + "type": "text", + "content": " terms impose an optimality condition that " + }, + { + "bbox": [ + 104, + 346, + 504, + 370 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{u}_T^v |\\mathbf{u}_T^z) = q_{\\phi}(\\mathbf{y}_0^v |\\mathbf{y}_0^z)" + }, + { + "bbox": [ + 104, + 346, + 504, + 370 + ], + "type": "text", + "content": " (appendix E). When it is satisfied, no looseness in the ELBO is due to the initial time zero auxiliary variables." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "text", + "content": "To learn, " + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\mathbf{Q},\\mathbf{D}" + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "text", + "content": " need to be specified with parameters " + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "text", + "content": " that enable gradients. We keep " + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "text", + "content": " fixed at inverse covariance of " + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "text", + "content": ". The transition kernel " + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s|x)" + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "text", + "content": " depends on " + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\mathbf{Q},\\mathbf{D}" + }, + { + "bbox": [ + 104, + 374, + 504, + 431 + ], + "type": "text", + "content": " through its mean and covariance. Gaussian distributions permit gradient estimation with reparameterization or score-function gradients (Kingma & Welling, 2013; Ranganath et al., 2014; Rezende & Mohamed, 2015; Titsias & Lázaro-Gredilla, 2014). Reparameterization is accomplished via:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 263, + 435, + 504, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 435, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 263, + 435, + 504, + 449 + ], + "type": "interline_equation", + "content": "\\mathbf {y} _ {s} = \\mathbf {m} _ {s | 0} + \\mathbf {L} _ {s | 0} \\epsilon \\tag {16}", + "image_path": "24d55598fd4af59dc4416d553ac79d3bd717f46272f4e6d62e1ed1b6a313fde2.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(0, I_{dK})" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\mathbf{L}_{s|0}" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": " satisfies " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\mathbf{L}_{s|0} \\mathbf{L}_{s|0}^{\\top} = \\boldsymbol{\\Sigma}_{s|0}" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": ", derived using coordinate-wise Cholesky decomposition. Gradients flow through eq. (16) from " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{s|0}" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{s|0}" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": " to parameters " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "type": "text", + "content": "Algorithm 1 displays Automatic Multivariate Diffusion Training (AMDT). AMDT provides a training method for diffusion-based generative models for either fixed " + }, + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "type": "text", + "content": " matrices or for learning the " + }, + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\phi}, \\mathbf{D}_{\\phi}" + }, + { + "bbox": [ + 104, + 482, + 504, + 518 + ], + "type": "text", + "content": " matrices, without requiring any diffusion-specific analysis." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "type": "text", + "content": "Learning in other diffusion objectives. Like in the ELBO, learning in score matching or noise prediction objectives can improve the match between the inference process and implied generative model (appendix A)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 577, + 350, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 577, + 350, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 350, + 590 + ], + "type": "text", + "content": "4 INSIGHTS INTO MULTIVARIATE DIFFUSIONS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": "Scalar versus Multivariate Processes. Equation (13) clarifies what can change while preserving " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "q_{\\infty}" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": ". Recall that " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "K \\times K" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "K - 1" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": " auxiliary variables. Because " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "0" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": " is the only " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": " skew-symmetric matrix, scalar processes must set " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} = 0" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": ". With " + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "inline_equation", + "content": "q_{\\phi,\\infty} = \\mathcal{N}(0,\\mathbf{I})" + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": ", the process is:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 233, + 639, + 504, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 639, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 233, + 639, + 504, + 654 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = - \\mathbf {D} (s) \\mathbf {y} d s + \\sqrt {2 \\mathbf {D} (s)} d \\widehat {\\mathbf {B}} _ {s}. \\tag {17}", + "image_path": "a34a5b9837ad684b9505da41b4e620c8d616d09ea591ad64ef04ecad5205faf5.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "content": "What is left is the VPSDE process used widely in diffusion models where " + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{D}(s) = \\frac{1}{2}\\beta (s)" + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "inline_equation", + "content": "1\\times 1" + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "content": " (Song et al., 2020b). This reveals that the VPSDE process is the only scalar diffusion with a stationary distribution. This also clarifies the role of " + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "content": ": it accounts for mixing between dimensions in multivariate processes, as do non-diagonal entries in " + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "inline_equation", + "content": "K > 1" + }, + { + "bbox": [ + 104, + 658, + 505, + 704 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 505, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 505, + 733 + ], + "type": "text", + "content": "3There are processes such as sub-VPSDE (Song et al., 2020b) which are covered in the sense that they tend to members of this parameterization as " + }, + { + "bbox": [ + 104, + 711, + 505, + 733 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 711, + 505, + 733 + ], + "type": "text", + "content": " grows: sub-VP converges to VPSDE." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "CLD optimizes a log-likelihood lower bound. Differentiating " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{mdsm}}" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " (eq. (7)) with respect to the score model parameters, we show that the objective for CLD (Dockhorn et al., 2021) maximizes a lower bound on " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\log p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": ", not just " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\log p_{\\theta}(\\mathbf{u}_0)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": ", without appealing to the probability flow ODE." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "text", + "content": "Does my model use auxiliary variables? An example initial distribution is " + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_0^v |x) = \\mathcal{N}(0,\\mathbf{I})" + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "text", + "content": ". It is also common to set " + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta} = \\mathcal{N}(0,\\mathbf{I})" + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "text", + "content": ". Because the optimum for diffusions is " + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "inline_equation", + "content": "p_{\\theta} = q" + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "text", + "content": ", the optimal model has main and auxiliary dimensions independent at endpoints 0 and " + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "text", + "content": ". Does this mean that the model does not use auxiliary variables? In appendix B, we show that in this case the model can still use auxiliary variables at intermediate times. A sufficient condition is non-diagonal " + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} + \\mathbf{D}" + }, + { + "bbox": [ + 104, + 128, + 504, + 185 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 200, + 201, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 200, + 201, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 200, + 201, + 213 + ], + "type": "text", + "content": "5 EXPERIMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 225, + 504, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 225, + 504, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 225, + 504, + 271 + ], + "type": "text", + "content": "We test the MDM framework with handcrafted and learned diffusions. The handcrafted diffusions are (a) ALDA, used in (Mou et al., 2019) for accelerated gradient-based MCMC sampling (eq. (32)) and (b) MALDA: a modified version of ALDA (eq. (33)). Both have two auxiliary variables. We also learn diffusions with 1 and 2 auxiliary variables. We compare with VPSDE and ELBO-trained CLD." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 192, + 333, + 418, + 414 + ], + "blocks": [ + { + "bbox": [ + 104, + 281, + 504, + 326 + ], + "lines": [ + { + "bbox": [ + 104, + 281, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 504, + 326 + ], + "type": "text", + "content": "Table 2: BPD upper-bounds on image generation for a fixed architecture. CIFAR-10: learning outperforms CLD, and both outperform the standard choice of VPSDE. MNIST: learning matches VPSDE while the fixed auxiliary diffusions are worse. IMAGENET32: all perform similarly. Learning matches or surpasses the best fixed diffusion, while bypassing the need to choose a diffusion." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 192, + 333, + 418, + 414 + ], + "lines": [ + { + "bbox": [ + 192, + 333, + 418, + 414 + ], + "spans": [ + { + "bbox": [ + 192, + 333, + 418, + 414 + ], + "type": "table", + "html": "
ModelKCIFAR-10IMAGENET32MNIST
VPSDE13.203.701.26
Learned23.073.711.28
Learned33.083.721.33
CLD23.113.701.35
MALDA33.133.721.65
ALDA329.4333.08124.60
", + "image_path": "fdf8d67ef7ab386142ecce30396d5f2a1d6315eef6683da71db7614f71f6e3f6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 166, + 495, + 444, + 564 + ], + "blocks": [ + { + "bbox": [ + 104, + 430, + 504, + 486 + ], + "lines": [ + { + "bbox": [ + 104, + 430, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 504, + 486 + ], + "type": "text", + "content": "Table 3: Parameter Efficiency. The first two rows display diffusions from previous work: VPSDE and CLD, both using score models with 108 million parameters on CIFAR-10. We train the rest using a score model with 35.7 million parameters. The learned diffusion matches the performance of VPSDE-large; changes in the inference can account for as much improvement as a 3x increase in score parameters. BPDs are upper-bounds." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 166, + 495, + 444, + 564 + ], + "lines": [ + { + "bbox": [ + 166, + 495, + 444, + 564 + ], + "spans": [ + { + "bbox": [ + 166, + 495, + 444, + 564 + ], + "type": "table", + "html": "
ModelKParametersCIFAR-10
VPSDE-large (Song et al., 2021)1108M3.08
CLD-large (Dockhorn et al., 2021)2108M3.31
Learned235.7M3.07
CLD235.7M3.11
VPSDE135.7M3.20
", + "image_path": "01192ce71982b1e7db7996fd6356dc767ae1a7fde3f013b46de19085bbbf4b46.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "text", + "content": "Following prior work, we train DBGMs for image generation. We use the U-Net from Ho et al. (2020). We input the auxiliary variables as extra channels, which only increases the score model parameters in the input and output convolutions (CLD and Learned 2 have 7,000 more parameters than VPSDE on CIFAR-10 and IMAGENET32 and only 865 more for MNIST). We use simple uniform dequantization. We report estimates of " + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{mdsm}}" + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "text", + "content": " (which reduces to the standard " + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{\\mathrm{dsm}}" + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "inline_equation", + "content": "K = 1" + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "text", + "content": "). We sample times using the importance sampling distribution from Song et al. (2021) with truncation set to " + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "inline_equation", + "content": "\\epsilon = 10^{-3}" + }, + { + "bbox": [ + 104, + 576, + 504, + 654 + ], + "type": "text", + "content": ". To ensure the truncated bound is proper, we use a likelihood described in appendix I." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": "Results. Table 2 shows that the inference process matters and displays. It displays DBGMs that we train and evaluate on CIFAR-10, IMAGENET32 and MNIST. This includes the existing VPSDE and CLD, the new MALDA and ALDA, and the new learned inference processes. All are trained with the 35.7M parameter architecture. For CIFAR-10, learning outperforms CLD, and both outperform the standard choice of VPSDE. For MNIST, learned diffusions match VPSDE while the three fixed auxiliary diffusions are worse. On IMAGENET32, all perform similarly. The take-away is that learning" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "matches or surpasses the best fixed diffusion performance and bypasses the choice of diffusion for each new dataset or score architecture. In Figure 1 we plot the generated samples from CIFAR10." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "content": "Table 3's first two rows display diffusion models from previous work: VPSDE (Song et al., 2021) and CLD (Dockhorn et al., 2021) both with the 108 million score model from Song et al. (2021) (labeled \"large\"). The rest are DBGMs that we train using the U-Net with 35.7 million parameters for CIFAR-10 and IMAGENET32 and 1.1 million for MNIST. Despite using significantly fewer parameters, the learned diffusion achieves similar BPD compared to the larger models, showing that changes in inference can account for as much improvement as a three-fold increase in parameters. While the larger architecture requires two GPUs for batch size 128 on CIFAR-10 on A100s, the smaller one only requires one; exploring inference processes can make diffusions more computationally accessible. Table 3 also demonstrates a tighter bound for CLD trained and evaluated with the MDM ELBO (" + }, + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "inline_equation", + "content": "\\leq" + }, + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "content": " 3.11) relative to existing probability flow-based evaluations (3.31)." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 162, + 236, + 289, + 363 + ], + "blocks": [ + { + "bbox": [ + 162, + 236, + 289, + 363 + ], + "lines": [ + { + "bbox": [ + 162, + 236, + 289, + 363 + ], + "spans": [ + { + "bbox": [ + 162, + 236, + 289, + 363 + ], + "type": "image", + "image_path": "73a7012161f0d7f4fed755ef1cf3aafd7643b185e471793fa0b40a09f59c1436.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 378, + 489, + 391 + ], + "lines": [ + { + "bbox": [ + 119, + 378, + 489, + 391 + ], + "spans": [ + { + "bbox": [ + 119, + 378, + 489, + 391 + ], + "type": "text", + "content": "Figure 1: CIFAR10 samples generated from the \"learned 2\" and MALDA generative models." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 323, + 236, + 451, + 363 + ], + "blocks": [ + { + "bbox": [ + 323, + 236, + 451, + 363 + ], + "lines": [ + { + "bbox": [ + 323, + 236, + 451, + 363 + ], + "spans": [ + { + "bbox": [ + 323, + 236, + 451, + 363 + ], + "type": "image", + "image_path": "429ec45c9b5517fcde31c68a944981b3368910a0b2c7d782d6604750d35a3300.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 402, + 212, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 212, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 212, + 415 + ], + "type": "text", + "content": "6 RELATED WORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 426, + 504, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 461 + ], + "type": "text", + "content": "Evidence Lower Bounds. Song et al. (2021); Huang et al. (2021) derive the ISM and DSM lower bounds on the model log likelihood. Our work extends their analysis to the multivariate diffusion setting to derive lower bounds on the log marginal of the data in the presence of auxiliary variables." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 471, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 471, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 471, + 504, + 529 + ], + "type": "text", + "content": "Auxiliary variables. Dupont et al. (2019) shows that augmented neural ODEs model a richer set of functions and Huang et al. (2020) uses this principle for normalizing flows. Hierarchical variational models and auto-encoders marginalize auxiliary variables to build expressive distributions (Ranganath et al., 2016; Sønderby et al., 2016; Maaløe et al., 2019; Vahdat & Kautz, 2020; Child, 2020). We apply this principle to DBGMs, including and extending CLD (Dockhorn et al., 2021)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "content": "Learning inference. Learning " + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "inline_equation", + "content": "p_{\\theta}" + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "content": " is motivated in previous work (Kingma & Welling, 2013; Sohl-Dickstein et al., 2015; Kingma et al., 2021). Kingma et al. (2021) learn the noise schedule for VPSDE. For MDMs, there are parameters to learn beyond the noise schedule; " + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "content": " can be non-zero, " + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "content": " can diagonal or full, give " + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "content": " different time-varying functions, and learn " + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "inline_equation", + "content": "\\nabla \\mathbf{H}" + }, + { + "bbox": [ + 104, + 537, + 504, + 584 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 597, + 190, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 190, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 190, + 609 + ], + "type": "text", + "content": "7 DISCUSSION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "We present an algorithm for training multivariate diffusions with linear time-varying inference processes with a specified stationary distribution and any number of auxiliary variables. This includes automating transition kernel computation and providing a parameterization of diffusions that have a specified stationary distribution, which facilitate working with new diffusion processes, including learning the diffusion. The experiments show that learning matches or surpasses the best fixed diffusion performance, bypassing the need to choose a diffusion. MDMs achieve BPDs similar to univariate diffusions, with as many as three times more score parameters. The proposed MDM ELBO reports a tighter bound for the existing CLD relative to existing probability flow-based evaluations. This work enables future directions including interactions across data coordinates and using new stationary distributions." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 244, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 244, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 244, + 94 + ], + "type": "text", + "content": "8 ACKNOWLEDGEMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 506, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 152 + ], + "type": "text", + "content": "This work was generously funded by NIH/NHLBI Award R01HL148248, NSF Award 1922658 NRT-HDR: FUTURE Foundations, Translation, and Responsibility for Data Science, and NSF CAREER Award 2145542. The authors would additionally like to thank Chin-Wei Huang for helpful discussing regarding Huang et al. (2021)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 167, + 176, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 167, + 176, + 178 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 176, + 178 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 185, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 105, + 185, + 505, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 505, + 209 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 505, + 209 + ], + "type": "text", + "content": "Andrew D Barbour. Stein's method and poisson process convergence. Journal of Applied Probability, 25(A):175-184, 1988." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 216, + 506, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 216, + 506, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 216, + 506, + 250 + ], + "type": "text", + "content": "Andrew Campbell, Joe Benton, Valentin De Bortoli, Tom Rainforth, George Deligiannidis, and Arnaud Doucet. A continuous time framework for discrete denoising models. arXiv preprint arXiv:2205.14987, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 256, + 504, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 256, + 504, + 280 + ], + "spans": [ + { + "bbox": [ + 107, + 256, + 504, + 280 + ], + "type": "text", + "content": "Nanxin Chen, Yu Zhang, Heiga Zen, Ron J Weiss, Mohammad Norouzi, and William Chan. Wavegrad: Estimating gradients for waveform generation. arXiv preprint arXiv:2009.00713, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 286, + 504, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 286, + 504, + 311 + ], + "spans": [ + { + "bbox": [ + 107, + 286, + 504, + 311 + ], + "type": "text", + "content": "Rewon Child. Very deep vaes generalize autoregressive models and can outperform them on images. arXiv preprint arXiv:2011.10650, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 316, + 504, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 316, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 107, + 316, + 504, + 340 + ], + "type": "text", + "content": "Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 346, + 504, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 346, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 504, + 370 + ], + "type": "text", + "content": "Tim Dockhorn, Arash Vahdat, and Karsten Kreis. Score-based generative modeling with critically-damped Langevin diffusion. arXiv preprint arXiv:2112.07068, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 376, + 505, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 376, + 505, + 400 + ], + "spans": [ + { + "bbox": [ + 107, + 376, + 505, + 400 + ], + "type": "text", + "content": "Emilien Dupont, Arnaud Doucet, and Yee Whye Teh. Augmented neural odes. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 407, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 407, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 107, + 407, + 504, + 430 + ], + "type": "text", + "content": "Bradley Efron. Tweedie's formula and selection bias. Journal of the American Statistical Association, 106(496):1602-1614, 2011." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 436, + 505, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 436, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 107, + 436, + 505, + 460 + ], + "type": "text", + "content": "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 466, + 505, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 466, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 107, + 466, + 505, + 499 + ], + "type": "text", + "content": "Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. *J. Mach. Learn. Res.*, 23: 47-1, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 507, + 505, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 507, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 107, + 507, + 505, + 532 + ], + "type": "text", + "content": "Chin-Wei Huang, Laurent Dinh, and Aaron Courville. Augmented normalizing flows: Bridging the gap between generative flows and latent variable models. arXiv preprint arXiv:2002.07101, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 537, + 505, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 537, + 505, + 571 + ], + "spans": [ + { + "bbox": [ + 107, + 537, + 505, + 571 + ], + "type": "text", + "content": "Chin-Wei Huang, Jae Hyun Lim, and Aaron C Courville. A variational perspective on diffusion-based generative models and score matching. Advances in Neural Information Processing Systems, 34, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 578, + 505, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 578, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 107, + 578, + 505, + 601 + ], + "type": "text", + "content": "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 608, + 504, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 608, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 107, + 608, + 504, + 632 + ], + "type": "text", + "content": "Diederik P Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. arXiv preprint arXiv:2107.00630, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 638, + 504, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 638, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 107, + 638, + 504, + 662 + ], + "type": "text", + "content": "Zhifeng Kong, Wei Ping, Jiaji Huang, Kexin Zhao, and Bryan Catanzaro. Diffwave: A versatile diffusion model for audio synthesis. arXiv preprint arXiv:2009.09761, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 668, + 504, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 668, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 107, + 668, + 504, + 693 + ], + "type": "text", + "content": "Yi-An Ma, Tianqi Chen, and Emily Fox. A complete recipe for stochastic gradient mcmc. Advances in neural information processing systems, 28, 2015." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 698, + 505, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 505, + 731 + ], + "type": "text", + "content": "Lars Maaløe, Marco Fraccaro, Valentin Lievin, and Ole Winther. Biva: A very deep hierarchy of latent variables for generative modeling. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Gautam Mittal, Jesse Engel, Curtis Hawthorne, and Ian Simon. Symbolic music generation with diffusion models. arXiv preprint arXiv:2103.16091, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "type": "text", + "content": "Wenlong Mou, Yi-An Ma, Martin J Wainwright, Peter L Bartlett, and Michael I Jordan. High-order Langevin diffusion yields an accelerated mcmc algorithm. arXiv preprint arXiv:1908.10859, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 141, + 505, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 141, + 505, + 166 + ], + "spans": [ + { + "bbox": [ + 106, + 141, + 505, + 166 + ], + "type": "text", + "content": "Alex Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. arXiv preprint arXiv:2102.09672, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 171, + 505, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 171, + 505, + 206 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 505, + 206 + ], + "type": "text", + "content": "Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 212, + 504, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 212, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 504, + 236 + ], + "type": "text", + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 242, + 505, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 505, + 266 + ], + "type": "text", + "content": "Rajesh Ranganath, Sean Gerrish, and David Blei. Black box variational inference. In Artificial intelligence and statistics, pp. 814-822. PMLR, 2014." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 272, + 505, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 272, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 505, + 297 + ], + "type": "text", + "content": "Rajesh Ranganath, Dustin Tran, and David Blei. Hierarchical variational models. In International conference on machine learning, pp. 324-333. PMLR, 2016." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 302, + 504, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 302, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 302, + 504, + 327 + ], + "type": "text", + "content": "Danilo Rezende and Shakir Mohamed. Variational inference with normalizing flows. In International Conference on Machine Learning, pp. 1530-1538. PMLR, 2015." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 332, + 505, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 332, + 505, + 377 + ], + "spans": [ + { + "bbox": [ + 105, + 332, + 505, + 377 + ], + "type": "text", + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 384, + 504, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 504, + 408 + ], + "type": "text", + "content": "Simo Särkkä and Arno Solin. Applied stochastic differential equations, volume 10. Cambridge University Press, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 414, + 504, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 414, + 504, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 504, + 438 + ], + "type": "text", + "content": "Hiroshi Sasaki, Chris G Willcocks, and Toby P Breckon. Unit-ddpm: Unpaired image translation with denoising diffusion probabilistic models. arXiv preprint arXiv:2104.05358, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 444, + 504, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 444, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 504, + 478 + ], + "type": "text", + "content": "Jianghong Shi, Tianqi Chen, Ruoshi Yuan, Bo Yuan, and Ping Ao. Relation of a new interpretation of stochastic differential equations to ito process. Journal of Statistical physics, 148:579-590, 2012." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 485, + 504, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 485, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 504, + 520 + ], + "type": "text", + "content": "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 526, + 504, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 504, + 550 + ], + "type": "text", + "content": "Casper Kaae Sønderby, Tapani Raiko, Lars Maaløe, Søren Kaae Sønderby, and Ole Winther. Ladder variational autoencoders. Advances in neural information processing systems, 29, 2016." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 556, + 504, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 556, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 504, + 590 + ], + "type": "text", + "content": "Yang Song, Sahaj Garg, Jiaxin Shi, and Stefano Ermon. Sliced score matching: A scalable approach to density and score estimation. In Uncertainty in Artificial Intelligence, pp. 574-584. PMLR, 2020a." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 597, + 504, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 504, + 631 + ], + "type": "text", + "content": "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020b." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 638, + 504, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 638, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 504, + 672 + ], + "type": "text", + "content": "Yang Song, Conor Durkan, Iain Murray, and Stefano Ermon. Maximum likelihood training of score-based diffusion models. Advances in Neural Information Processing Systems, 34:1415-1428, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 504, + 703 + ], + "type": "text", + "content": "Michalis Titsias and Miguel Lázaro-Gredilla. Doubly stochastic variational bayes for non-conjugate inference. In International conference on machine learning, pp. 1971-1979. PMLR, 2014." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "text", + "content": "Arash Vahdat and Jan Kautz. Nvae: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 504, + 166 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 105, + 81, + 504, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 504, + 105 + ], + "type": "text", + "content": "Pascal Vincent. A connection between score matching and denoising autoencoders. *Neural computation*, 23(7):1661-1674, 2011." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 504, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 504, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 504, + 136 + ], + "type": "text", + "content": "L Yin and P Ao. Existence and construction of dynamical potential in nonequilibrium processes without detailed balance. Journal of Physics A: Mathematical and General, 39(27):8593, 2006." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 141, + 504, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 141, + 504, + 166 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 504, + 166 + ], + "type": "text", + "content": "Zhenzhong Zhang and Dayue Chen. A new criterion on existence and uniqueness of stationary distribution for diffusion processes. Advances in Difference Equations, 2013(1):1-6, 2013." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 440, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 440, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 440, + 94 + ], + "type": "text", + "content": "A AUTOMATED SCORE MATCHING WITH LEARNED INFERENCE" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 495, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 495, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 495, + 118 + ], + "type": "text", + "content": "Like for the MDM ELBO, the methods in this work apply to training with the score matching loss:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 146, + 121, + 463, + 141 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 121, + 463, + 141 + ], + "spans": [ + { + "bbox": [ + 146, + 121, + 463, + 141 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {S M}} (x, \\theta , \\phi) = T \\mathbb {E} _ {t \\sim U [ 0, T ]} \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\lambda (t) \\| s _ {\\theta} (\\mathbf {y} _ {t}, t) - \\nabla_ {\\mathbf {y} _ {t}} \\log q _ {\\phi} (\\mathbf {y} _ {t} | x) \\| _ {2} ^ {2} \\right],", + "image_path": "2a889a26a13f2da5ba67833f655253113f32cbc80ef7a1ad24d94fbaffe6f3b4.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "inline_equation", + "content": "\\lambda :[0,T]\\to \\mathbb{R}_+" + }, + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "text", + "content": " is a weighing function. The score-matching loss is often optimized in its simplified noise prediction form:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 194, + 170, + 414, + 191 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 170, + 414, + 191 + ], + "spans": [ + { + "bbox": [ + 194, + 170, + 414, + 191 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {N P}} (x, \\theta , \\phi) = T \\mathbb {E} _ {t \\sim U [ 0, T ]} \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} | x)} \\left[ \\| \\epsilon_ {\\theta} (\\mathbf {y} _ {t}, t) - \\epsilon \\| _ {2} ^ {2} \\right]", + "image_path": "7f438d6f2b28625ccb97cc26e66ce7d26b441504841f37f3c29361dcac4a5971.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "s_{\\theta} = -\\mathbf{L}_{t}^{-\\top}\\epsilon_{\\theta}" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_t = \\mu_t + \\mathbf{L}_t\\epsilon" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " is the noise used in sampling " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_t" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": ". We describe here how the improvements to the ELBO studied in this work carry over to " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{SM}}" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{NP}}" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": ". In the following let " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "q_{0}" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " be the data distribution, let " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "p_{(\\theta ,\\phi),0}" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " be the model's distribution of the data, and recall that the model is defined by " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "(s_{\\theta},f_{\\phi},g_{\\phi})" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " and prior " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " via a continuous-time stochastic process with drift coefficient " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "g_{\\phi}^{2}s_{\\theta} - f_{\\phi}" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": " and diffusion coefficient " + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "inline_equation", + "content": "g_{\\phi}" + }, + { + "bbox": [ + 104, + 194, + 504, + 255 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": "First, minimizing " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{SM}}" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{NP}}" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{y}_t}\\log q_\\phi (\\mathbf{y}_t) = s_\\theta (\\mathbf{y}_t,t)" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": " does not alone imply that " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "p_{(\\theta ,\\phi),0}" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": " will equal " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "q_{0}" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": "; it must also be that " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "q_{\\phi ,T}\\approx \\pi" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": ". Foregoing this requirement means " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": " will produce samples that the generative model may not be able to push onto the path the model was trained on (formally, the score of the generative model would not equal the time-reversal of the forward score even if " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": " equals the forward score). This condition can be satisfied if " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": " can be chosen with stationary distribution " + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 259, + 504, + 327 + ], + "type": "text", + "content": ". Section 3.4 describes how to accomplish this." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 331, + 504, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 504, + 355 + ], + "type": "text", + "content": "Next, for any fixed " + }, + { + "bbox": [ + 104, + 331, + 504, + 355 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 331, + 504, + 355 + ], + "type": "text", + "content": ", automatic transitions from section 3.3 streamline the computation of the score matching loss, allowing for simple score computation for a wide class of diffusions beyond VP." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": "Finally, for a fixed " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "q_{\\phi,T} \\approx \\pi" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": " and a score architecture " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "s_{\\theta}" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": ", minimizing " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{SM}}" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{NP}}" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": " w.r.t " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": " may be suboptimal. Optimization, like for the elbo, carries over to score matching and can close this gap; learning w.r.t. both " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "\\theta, \\phi" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": " increases the ability to successfully minimize the loss at each " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": " (section 3.5). In other words, since the generative model is defined by " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "(s_{\\theta}, f_{\\phi}, g_{\\phi})" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": ", learning " + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "inline_equation", + "content": "q_{\\phi}" + }, + { + "bbox": [ + 104, + 359, + 505, + 427 + ], + "type": "text", + "content": " means the loss trains all three components of the generative model rather than just one. In summary, score matching is automatic and can learn over the space of linear diffusions that tend to the model prior." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 441, + 367, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 367, + 454 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 367, + 454 + ], + "type": "text", + "content": "B DOES MY MODEL USE AUXILIARY VARIABLES?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": "In section 3 we gave the example choice of " + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_0^v |x) = \\mathcal{N}(0,\\mathbf{I})" + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": " coordinate-wise. It is also a common choice to set " + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta} = \\mathcal{N}(0,\\mathbf{I})" + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": ". Because the optimum in diffusion models is " + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "inline_equation", + "content": "p_{\\theta} = q" + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": ", we see a peculiar phenomenon under this choice: the model has main and auxiliary dimensions independent at both endpoints 0 and " + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": ". Does this mean that the model does not use auxiliary variables? We show that even when " + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": " have main and auxiliary variables independent, the model can use the auxiliary variables. A sufficient condition is " + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} + \\mathbf{D}" + }, + { + "bbox": [ + 104, + 465, + 504, + 533 + ], + "type": "text", + "content": " is non-diagonal." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": "To make this precise, we recall that we model with " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{u}_T^z = x)" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ". To show the model is using auxiliary variables, we just need to show that " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_T^z" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": " (main coordinate at " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ") depends on " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_t^v" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": " (aux. coordinate at " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ") for " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "T > t" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ". At optimum, " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{u}_T^z,\\mathbf{u}_t^v) = q_{\\phi}(\\mathbf{y}_0^z,\\mathbf{y}_{T - t}^v)" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ". Therefore it is sufficient to show that for some time " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s^v |\\mathbf{y}_0^z)\\neq q_{\\phi}(\\mathbf{y}_s^v)" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ". Because " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0^z" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ", is determined by " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": " we need to show that " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s^v |x)\\neq q_{\\phi}(\\mathbf{y}_s^v)" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ". To do that, we first derive " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|x)" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": " and then marginalize to get " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s^v |x)" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": " from it. Since the former is 2D Gaussian, the latter is available in terms of the former's mean and covariance. Suppose " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\mathbf{y}_0^v ] = 0" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} = [[0, - 1],[1,0]]" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\mathbf{D} = [[1,0],[0,1]]" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": " and we have " + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "inline_equation", + "content": "s = .1" + }, + { + "bbox": [ + 104, + 537, + 504, + 617 + ], + "type": "text", + "content": " We have:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 620, + 504, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 620, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 138, + 620, + 504, + 647 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\mathbf {y} _ {s} | x ] = \\exp \\left[ - s (\\mathbf {Q} + \\mathbf {D}) \\right] \\binom {x} {0} = \\exp \\left[ \\left[ \\begin{array}{l l} -. 1 & . 1 \\\\ -. 1 & -. 1 \\end{array} \\right] \\right] \\binom {x} {0} = \\binom {0. 9 0 0 3 x} {- 0. 0 9 0 x} \\tag {18}", + "image_path": "7d090f2a19e8018c8df7f4db2ad8f65bcbdca691b6a1d60c83f500eecefbd371.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": "Regardless of the covariance any 1D of this 2D gaussian will have mean that is a function of " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ", meaning that " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s^v |x)" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " does not equal " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s^v)" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " (which is also a Gaussian but with mean depending on " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{x}'s" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " mean rather than " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " itself. Therefore, even under the setup with independent endpoints, the optimal model makes use of the intermediate auxiliary variables in its final modeling distribution " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{u}_T^z = x)" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Are there choices of " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " that lead to learning models that don't make use of the extra dimensions? As mentioned, in the inference process, " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " is responsible for mixing information among the" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "coordinates, and is the only source of this when " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " is diagonal. Then, if " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " is diagonal, none of the coordinates for a given feature " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_j" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " (including " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{tj}^z" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{tj}^{v_1},\\ldots ,\\mathbf{u}_{tj}^{v_{K - 1}}" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": ") interact for any " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": ". Then, since " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "p_\\theta = q" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " at optimum, independence of the coordinates at all " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " imply the same in " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "p_\\theta" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": " and the model will not make use of any auxiliary variables when modeling the marginal " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "\\log p_{\\theta}(\\mathbf{u}_T^z = x)" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 302, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 302, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 302, + 156 + ], + "type": "text", + "content": "C STATIONARY PARAMETERIZATION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 168, + 334, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 168, + 334, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 168, + 334, + 180 + ], + "type": "text", + "content": "The non-linear time-homogeneous Ito process family is:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 253, + 187, + 504, + 200 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 187, + 504, + 200 + ], + "spans": [ + { + "bbox": [ + 253, + 187, + 504, + 200 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = f (\\mathbf {y}) d t + g (\\mathbf {y}) \\mathbf {B} _ {t}. \\tag {19}", + "image_path": "97acfde9bd58131b822b1bc256be037755bc6db94528ad3398fc32ff7379265d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": "This family can be restricted to those with stationary distributions. Ma et al. (2015) show a complete recipe to span the subset of this family with a desired stationary distribution. Let " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": " be skew-symmetric " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "(-\\mathbf{Q} = \\mathbf{Q}^{\\top})" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": " is positive semi-definite. Suppose the desired stationary distribution is " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "q_{\\infty}(\\mathbf{y})" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": ". For a matrix " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "\\sqrt{\\mathbf{A}}" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": " refer to the matrix square root defined by " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "\\mathbf{a} = \\sqrt{\\mathbf{A}} \\iff \\mathbf{A} = \\mathbf{aa}^{\\top}" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": ". Then, Ma et al. (2015) show that, setting " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "\\mathbf{H}(\\mathbf{y}) = -\\log q_{\\infty}(\\mathbf{y})" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "inline_equation", + "content": "g(\\mathbf{y}) = \\sqrt{2\\mathbf{D}(\\mathbf{y})}" + }, + { + "bbox": [ + 104, + 205, + 504, + 267 + ], + "type": "text", + "content": ", and" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 274, + 505, + 308 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 274, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 121, + 274, + 505, + 308 + ], + "type": "interline_equation", + "content": "f (\\mathbf {y}) = - \\left[ \\mathbf {D} (\\mathbf {y}) + \\mathbf {Q} (\\mathbf {y}) \\right] \\nabla \\mathbf {H} (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}), \\quad \\boldsymbol {\\Gamma} _ {i} (\\mathbf {y}) = \\sum_ {j = 1} ^ {d} \\frac {\\partial}{\\partial \\mathbf {z} _ {j}} \\left(\\mathbf {D} _ {i j} (\\mathbf {y}) + \\mathbf {Q} _ {i j} (\\mathbf {y})\\right), \\tag {20}", + "image_path": "7054b77073e17156a4a813911bdaf1a80887946625038a1c4a62d89d4567ca4e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 314, + 504, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 504, + 337 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 504, + 337 + ], + "type": "text", + "content": "yields a process " + }, + { + "bbox": [ + 104, + 314, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_t" + }, + { + "bbox": [ + 104, + 314, + 504, + 337 + ], + "type": "text", + "content": " with stationary distribution " + }, + { + "bbox": [ + 104, + 314, + 504, + 337 + ], + "type": "inline_equation", + "content": "q_{\\infty}" + }, + { + "bbox": [ + 104, + 314, + 504, + 337 + ], + "type": "text", + "content": ". We extend it to time-varying (time inhomogeneous) processes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 350, + 375, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 350, + 375, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 375, + 363 + ], + "type": "text", + "content": "Theorem 2. " + }, + { + "bbox": [ + 105, + 350, + 375, + 363 + ], + "type": "inline_equation", + "content": "q_{\\infty}(\\mathbf{y})\\propto \\exp [-H(\\mathbf{y})]" + }, + { + "bbox": [ + 105, + 350, + 375, + 363 + ], + "type": "text", + "content": " is a stationary distribution of" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 160, + 369, + 504, + 402 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 369, + 504, + 402 + ], + "spans": [ + { + "bbox": [ + 160, + 369, + 504, + 402 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = \\left(- [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] \\nabla \\mathbf {H} (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}, t)\\right) d t + \\sqrt {2 \\mathbf {D} (\\mathbf {y} , t)} \\mathbf {B} _ {t}, \\tag {21}", + "image_path": "b5221a671d822782fcc029f061b36f1ea3d305d7c50f661ea95718a07a523ef7.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 407, + 121, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 121, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 121, + 419 + ], + "type": "text", + "content": "for" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 214, + 426, + 505, + 459 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 426, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 214, + 426, + 505, + 459 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Gamma} _ {i} (\\mathbf {y}, t) = \\sum_ {j = 1} ^ {d} \\frac {\\partial}{\\partial \\mathbf {y} _ {j}} \\left(\\mathbf {D} _ {i j} (\\mathbf {y}, t) + \\mathbf {Q} _ {i j} (\\mathbf {y}, t)\\right). \\tag {22}", + "image_path": "bd9b1ffb57a75147759a09ddc08b1a4b0f6ab43fd521bd17ff076190e8cfacde.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 473, + 262, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 473, + 262, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 262, + 485 + ], + "type": "text", + "content": "Proof. The Fokker Planck equation is:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 155, + 491, + 505, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 491, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 155, + 491, + 505, + 521 + ], + "type": "interline_equation", + "content": "\\partial_ {t} q (\\mathbf {y}, t) = - \\sum_ {i} \\frac {\\partial}{\\partial \\mathbf {y} _ {i}} [ f _ {i} (\\mathbf {y}, t) q (\\mathbf {y}, t) ] + \\sum_ {i, j} \\frac {\\partial^ {2}}{\\partial \\mathbf {y} _ {i} \\partial \\mathbf {y} _ {j}} [ \\mathbf {D} _ {i j} (\\mathbf {y}, t) q (\\mathbf {y}, t) ] \\tag {23}", + "image_path": "e635d6b0b88cda704c5cb6575cc2ad47b74440b42660e76731eb903c5cbda7bd.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "text", + "content": "A stationary distribution is one where the Fokker-Planck right hand side is equal to 0. To show that the stationary characterization also holds of time-inhomogeneous processes with " + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "inline_equation", + "content": "\\mathbf{D}(\\mathbf{y},t)" + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}(\\mathbf{y},t)" + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "text", + "content": ", we take two steps, closely following Yin & Ao (2006); Shi et al. (2012); Ma et al. (2015), but noting that there is no requirement for " + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "text", + "content": " to be free of " + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 528, + 504, + 583 + ], + "type": "text", + "content": ". First, we show that the Fokker-Plack equation can be re-written as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 164, + 590, + 505, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 590, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 164, + 590, + 505, + 622 + ], + "type": "interline_equation", + "content": "\\partial_ {t} q (\\mathbf {y}, t) = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right]\\right) \\tag {24}", + "image_path": "bfdfc012ef47ced36d1a66f8c5f41c58f950dae3b7eebd1dd965adb33453ccbe.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 628, + 447, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 447, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 447, + 639 + ], + "type": "text", + "content": "Second, because the whole expression is set to 0 when the inside expression equals 0" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 239, + 646, + 504, + 659 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 646, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 239, + 646, + 504, + 659 + ], + "type": "interline_equation", + "content": "q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) = 0, \\tag {25}", + "image_path": "3c7ca8a8921dceaa24bda2ca8d89fe2466682631a9125441404e1cb77ac8f091.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 665, + 504, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 686 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 686 + ], + "type": "text", + "content": "we just need to show that this holds when " + }, + { + "bbox": [ + 104, + 665, + 504, + 686 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y},t) = \\exp [-H(\\mathbf{y})] / \\mathbf{Z}" + }, + { + "bbox": [ + 104, + 665, + 504, + 686 + ], + "type": "text", + "content": ". The second step is concluded because" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 140, + 691, + 470, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 691, + 470, + 715 + ], + "spans": [ + { + "bbox": [ + 140, + 691, + 470, + 715 + ], + "type": "interline_equation", + "content": "\\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right] = \\frac {1}{\\mathbf {Z}} \\left[ \\exp [ - H (\\mathbf {y}) ] \\nabla H (\\mathbf {y}) + \\nabla \\exp [ - H (\\mathbf {y}) ] \\right] = 0,", + "image_path": "f9224076cffb458bf067d4dedff56c8bc9b40b991232695b76274fbfaeac3628.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 720, + 328, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 328, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 328, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 720, + 328, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}" + }, + { + "bbox": [ + 105, + 720, + 328, + 733 + ], + "type": "text", + "content": " is the normalization constant of " + }, + { + "bbox": [ + 105, + 720, + 328, + 733 + ], + "type": "inline_equation", + "content": "\\exp (-H(y))" + }, + { + "bbox": [ + 105, + 720, + 328, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": "It only remains to show that Fokker-Plack can be re-written in divergence form with time-dependent " + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "inline_equation", + "content": "\\mathbf{Q},\\mathbf{D}" + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": ". In the following let " + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "inline_equation", + "content": "Q_{ijt}" + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": " denote " + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{ij}(\\mathbf{y},t)" + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": " and likewise for " + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "inline_equation", + "content": "D_{ijt}" + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "inline_equation", + "content": "\\partial_i" + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": " denote " + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "inline_equation", + "content": "\\frac{\\partial}{\\partial\\mathbf{y}_i}" + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": " and let it denote " + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "inline_equation", + "content": "\\frac{d}{d\\mathbf{y}_i}" + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": " for scalar functions. We will use " + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "inline_equation", + "content": "[Ax]_i = \\sum_jA_{ij}x_j" + }, + { + "bbox": [ + 104, + 82, + 506, + 123 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 136, + 126, + 474, + 286 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 126, + 474, + 286 + ], + "spans": [ + { + "bbox": [ + 136, + 126, + 474, + 286 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {t} q _ {t} = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] [ q \\nabla H + \\nabla q ]\\right) \\\\ = \\sum_ {i} \\partial_ {i} \\left(\\left[ [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] [ q \\nabla H + \\nabla q ] \\right] _ {i}\\right) \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\nabla H + \\nabla q \\right] _ {j} \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H + \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} D _ {i j t} \\left[ \\partial_ {j} q \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} Q _ {i j t} \\left[ \\partial_ {j} q \\right] \\\\ \\end{array}", + "image_path": "61dd0ba33e4a2fa80de6b4e2a041d36f4e29569b452f06bac10b9a20398849c6.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "text", + "content": "We re-write the 2nd and 3rd term. Holding " + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "text", + "content": " fixed and noting " + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "text", + "content": " is scalar, we get the product rule " + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "inline_equation", + "content": "\\sum_{j}D_{ijt}(\\partial_{j}q) = \\sum_{j}\\partial_{j}[D_{ijt}q] - q\\sum_{j}\\partial_{j}D_{ijt}" + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "text", + "content": ", and likewise for " + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 288, + 504, + 313 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 151, + 316, + 460, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 316, + 460, + 397 + ], + "spans": [ + { + "bbox": [ + 151, + 316, + 460, + 397 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} D _ {i j t} \\left[ \\partial_ {j} q \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} Q _ {i j t} \\left[ \\partial_ {j} q \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} D _ {i j t} \\\\ + \\sum_ {i} \\partial_ {i} \\sum_ {j} \\partial_ {j} \\left[ Q _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} Q _ {i j t} \\\\ \\end{array}", + "image_path": "47b59331cbff3bc1493dfc70fa975600bab0bb055ae3d39a7804d1598b844028.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 399, + 443, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 443, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 443, + 414 + ], + "type": "text", + "content": "Because " + }, + { + "bbox": [ + 105, + 399, + 443, + 414 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}(\\mathbf{y},t)" + }, + { + "bbox": [ + 105, + 399, + 443, + 414 + ], + "type": "text", + "content": " is skew-symmetric, we have that " + }, + { + "bbox": [ + 105, + 399, + 443, + 414 + ], + "type": "inline_equation", + "content": "\\sum_{i}\\partial_{i}\\sum_{j}\\partial_{j}[Q_{ijt}q] = 0" + }, + { + "bbox": [ + 105, + 399, + 443, + 414 + ], + "type": "text", + "content": ", leaving" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 416, + 501, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 416, + 501, + 520 + ], + "spans": [ + { + "bbox": [ + 108, + 416, + 501, + 520 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial_ {t} q _ {t} = \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ q \\partial_ {j} H \\right] \\right] + \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} D _ {i j t} - q \\sum_ {j} \\partial_ {j} Q _ {i j t} \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] q \\right] + \\sum_ {i} \\partial_ {i} \\left[ \\sum_ {j} \\partial_ {j} \\left[ D _ {i j t} q \\right] - q \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right) \\right] \\\\ = \\sum_ {i} \\partial_ {i} \\left[ \\left(\\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] - \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right)\\right) q \\right] + \\sum_ {i} \\sum_ {j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left(D _ {i j t} q\\right) \\\\ \\end{array}", + "image_path": "bd01d56dbc4e2c759664a0cc04513e395f76a4fff87e03753479000399afe858.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 539, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 567 + ], + "type": "text", + "content": "Recalling that " + }, + { + "bbox": [ + 104, + 539, + 504, + 567 + ], + "type": "inline_equation", + "content": "f_{i}(\\mathbf{y},t) = \\left(-[D + Q]\\nabla H + \\Gamma\\right)_{i}" + }, + { + "bbox": [ + 104, + 539, + 504, + 567 + ], + "type": "text", + "content": " and again that " + }, + { + "bbox": [ + 104, + 539, + 504, + 567 + ], + "type": "inline_equation", + "content": "[Ax]_i = \\sum_j A_{ij}x_j" + }, + { + "bbox": [ + 104, + 539, + 504, + 567 + ], + "type": "text", + "content": ", we have equality with the original Fokker-Planck" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 570, + 473, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 570, + 473, + 651 + ], + "spans": [ + { + "bbox": [ + 140, + 570, + 473, + 651 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} = \\sum_ {i} \\partial_ {i} \\left[ \\left(\\sum_ {j} \\left[ D _ {i j t} + Q _ {i j t} \\right] \\left[ \\partial_ {j} H \\right] - \\sum_ {j} \\partial_ {j} \\left(D _ {i j t} + Q _ {i j t}\\right)\\right) q \\right] + \\sum_ {i j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left(D _ {i j t} q\\right) \\\\ = - \\sum_ {i} \\frac {\\partial}{\\partial \\mathbf {y} _ {i}} \\left[ f _ {i} (\\mathbf {y}, t) q (\\mathbf {y}, t) \\right] + \\sum_ {i j} \\frac {\\partial^ {2}}{\\mathbf {y} _ {i} \\mathbf {y} _ {j}} \\left[ \\mathbf {D} _ {i j} (\\mathbf {y}, t) q (\\mathbf {y}, t) \\right] \\\\ = \\partial_ {t} q (\\mathbf {y}, t) \\\\ \\end{array}", + "image_path": "3d9f718b6abeb0f9643706f107bb5417dd155ec46c0f700860a1a67eba2da9d6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 494, + 652, + 504, + 662 + ], + "blocks": [ + { + "bbox": [ + 494, + 652, + 504, + 662 + ], + "lines": [ + { + "bbox": [ + 494, + 652, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 494, + 652, + 504, + 662 + ], + "type": "image", + "image_path": "5a70606959cc1f55516e353eacfd3cd59104183ac5d0662fefdc5b1c756ac428.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 675, + 504, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 675, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 504, + 696 + ], + "type": "text", + "content": "We have shown " + }, + { + "bbox": [ + 104, + 675, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\exp[-H(\\mathbf{y})] / \\mathbf{Z}" + }, + { + "bbox": [ + 104, + 675, + 504, + 696 + ], + "type": "text", + "content": " is a stationary distribution of the time-varying non-linear Ito process:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 162, + 698, + 504, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 698, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 162, + 698, + 504, + 730 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = \\left(- [ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) ] \\nabla H (\\mathbf {y}) + \\boldsymbol {\\Gamma} (\\mathbf {y}, t)\\right) d t + \\sqrt {2 \\mathbf {D} (\\mathbf {y} , t)} \\mathbf {B} _ {t}. \\tag {26}", + "image_path": "18179233917b3c43faf542e2865e2eda67069cfb3e04904e1cc78d96292ea9a1.jpg" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": "However, for some choices of " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}, \\mathbf{D}" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\exp[-H(\\mathbf{y})] / \\mathbf{Z}" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " is not necessarily the unique stationary distribution. One problematic case can occur as follows. Suppose that row " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "(\\mathbf{Q} + \\mathbf{D})" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " is all-zero; in this case, " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "d\\mathbf{y}_i = 0" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " which implies that " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "(\\mathbf{y}_i)_t = (\\mathbf{y}_i)_0" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "t > 0" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": ". Then, the initial distribution is also a stationary distribution. To rule out such pathological diffusions, we make the assumption that " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} + \\mathbf{D}" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " is full rank. Then, for uniqueness, recall that stationary distributions are the zeros of" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 164, + 142, + 444, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 142, + 444, + 174 + ], + "spans": [ + { + "bbox": [ + 164, + 142, + 444, + 174 + ], + "type": "interline_equation", + "content": "\\partial_ {t} q (\\mathbf {y}, t) = \\nabla \\cdot \\left(\\left[ \\mathbf {D} (\\mathbf {y}, t) + \\mathbf {Q} (\\mathbf {y}, t) \\right] \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right]\\right)", + "image_path": "b5933e1f6a55b78dbf05ffb614d138f0e871f98dc6696b965c8a85ce61f0a969.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 178, + 395, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 395, + 191 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 395, + 191 + ], + "type": "text", + "content": "where the expression is of the form " + }, + { + "bbox": [ + 105, + 178, + 395, + 191 + ], + "type": "inline_equation", + "content": "\\mathbf{A}\\mathbf{v}" + }, + { + "bbox": [ + 105, + 178, + 395, + 191 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 105, + 178, + 395, + 191 + ], + "type": "inline_equation", + "content": "\\mathbf{A} = \\mathbf{D}(\\mathbf{y},t) + \\mathbf{Q}(\\mathbf{y},t)" + }, + { + "bbox": [ + 105, + 178, + 395, + 191 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 233, + 194, + 375, + 216 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 194, + 375, + 216 + ], + "spans": [ + { + "bbox": [ + 233, + 194, + 375, + 216 + ], + "type": "interline_equation", + "content": "\\mathbf {v} = \\left[ q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}) + \\nabla q (\\mathbf {y}, t) \\right].", + "image_path": "d4dc5cc41a47a23c0f9ab5279b1616949161348c97c0628e559f0c5b7b352306.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "text", + "content": "Under the assumption that " + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} + \\mathbf{D}" + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "text", + "content": " is full rank, the expression can only be zero when " + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 104, + 219, + 504, + 243 + ], + "type": "text", + "content": " is zero. To show uniqueness under the full rank assumption, one must then show that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 244, + 247, + 365, + 260 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 247, + 365, + 260 + ], + "spans": [ + { + "bbox": [ + 244, + 247, + 365, + 260 + ], + "type": "interline_equation", + "content": "\\nabla q (\\mathbf {y}, t) = - q (\\mathbf {y}, t) \\nabla H (\\mathbf {y}).", + "image_path": "4582585f49a1fbfe1df5df5cbab26d9bb268905bf3c5181d859682d5f6514b25.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 263, + 504, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 263, + 504, + 287 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 504, + 287 + ], + "type": "text", + "content": "holds only if " + }, + { + "bbox": [ + 104, + 263, + 504, + 287 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}, t) = \\exp[-H(\\mathbf{y})] / \\mathbf{Z}" + }, + { + "bbox": [ + 104, + 263, + 504, + 287 + ], + "type": "text", + "content": ". Even if " + }, + { + "bbox": [ + 104, + 263, + 504, + 287 + ], + "type": "inline_equation", + "content": "\\exp[-H(\\mathbf{y})] / \\mathbf{Z}" + }, + { + "bbox": [ + 104, + 263, + 504, + 287 + ], + "type": "text", + "content": " is the unique stationary distribution, convergence to that distribution is a question. See Zhang & Chen (2013) for more details." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "text", + "content": "Learning " + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\phi}" + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_{\\phi}" + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "text", + "content": " in the MDM ELBO helps push " + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_T" + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "text", + "content": " to the model prior " + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 292, + 504, + 314 + ], + "type": "text", + "content": " and avoid issues like those discussed." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 327, + 218, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 218, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 218, + 338 + ], + "type": "text", + "content": "C.1 LINEAR PROCESSES" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": "Next, we specialize this general family to linear Itô processes to maintain tractable transition distributions. A linear process is one where the drift " + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "inline_equation", + "content": "f(\\mathbf{y},t)" + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": " and diffusion " + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "inline_equation", + "content": "g(\\mathbf{y},t)" + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": " are linear functions of " + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": ". We express the drift function of a non-linear time-varying Itô process with stationary distribution proportional to " + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\exp[-H(\\mathbf{y})]" + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 221, + 396, + 388, + 410 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 396, + 388, + 410 + ], + "spans": [ + { + "bbox": [ + 221, + 396, + 388, + 410 + ], + "type": "interline_equation", + "content": "- (\\mathbf {Q} (\\mathbf {y}, t) + \\mathbf {D} (\\mathbf {y}, t)) \\nabla H (\\mathbf {y}) + \\Gamma (\\mathbf {y}, t).", + "image_path": "112e1a8f53de6417bc285fcac3d8704407fddaa30004c9c621ae2b6f47cefe18.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": "Next, linear Ito processes have Gaussian stationary distributions (Särkkä & Solin, 2019) so " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "H(\\mathbf{y})" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " must be quadratic and " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\nabla H(\\mathbf{y})" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " is linear, and neither are constant in " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": ". Because " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\nabla H(\\mathbf{y})" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " is linear, it can be expressed as " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{S}\\mathbf{y}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " for some matrix " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " is the inverse of the covariance matrix. Because " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\nabla H" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " is multiplied by " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}, \\mathbf{D}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": ", this means that " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}, \\mathbf{D}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " must be free of " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": ". Recalling that " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " is expressed as a sum of derivatives w.r.t " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} + \\mathbf{D}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": ", this means that " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " must satisfy " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\Gamma = 0" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": ". Next, because of the stationary requirement that " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "g(t) = \\sqrt{2\\mathbf{D}(\\mathbf{y},t)}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": ", we can also conclude by the restriction on " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " that the diffusion coefficient function must be independent of the state " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": ". Our final form for linear time-varying processes with stationary distributions " + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,\\mathbf{S}^{-1})" + }, + { + "bbox": [ + 104, + 414, + 505, + 506 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 211, + 510, + 504, + 546 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 510, + 504, + 546 + ], + "spans": [ + { + "bbox": [ + 211, + 510, + 504, + 546 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = \\underbrace {- \\left[ \\mathbf {Q} (t) + \\mathbf {D} (t) \\right] \\mathbf {S} \\mathbf {y}} _ {f (\\mathbf {y}, t)} d t + \\underbrace {\\sqrt {2 \\mathbf {D} (t)}} _ {g (t)} d \\mathbf {B} _ {t} \\tag {27}", + "image_path": "aefb2d4e69183891bf1a77bd4fdf5b58f11d19d4aa83edb98c0af06e86ab6975.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 559, + 228, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 559, + 228, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 228, + 572 + ], + "type": "text", + "content": "C.2 PARAMETERIZING " + }, + { + "bbox": [ + 105, + 559, + 228, + 572 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\phi}" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "inline_equation", + "content": "b_{q}(s)" + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "text", + "content": " is a positive scalar function defined on the time domain with known integral. Suppose " + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Q}}_{\\phi}" + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "text", + "content": " is any matrix. Then " + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top}" + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "text", + "content": " is skew-symmetric with " + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Q}}_{\\phi, ij} = -\\tilde{\\mathbf{Q}}_{\\phi, ji}" + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "text", + "content": ". We can set " + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\phi}" + }, + { + "bbox": [ + 104, + 580, + 504, + 610 + ], + "type": "text", + "content": " to" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 243, + 613, + 504, + 634 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 613, + 504, + 634 + ], + "spans": [ + { + "bbox": [ + 243, + 613, + 504, + 634 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} _ {\\phi} (s) = b _ {q} (s) \\cdot \\left[ \\tilde {\\mathbf {Q}} _ {\\phi} - \\tilde {\\mathbf {Q}} _ {\\phi} ^ {\\top} \\right] \\tag {28}", + "image_path": "13f59198ed0599ea02e2d773d51b5c5484a89e414068918f4c3ee82828836cf5.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 638, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 504, + 672 + ], + "type": "text", + "content": "This is a general parameterization of time-independent skew-symmetric matrices, which have number of degrees of freedom equal to the number of entries in one of the triangles of the matrix, excluding the diagonal." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 685, + 228, + 697 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 685, + 228, + 697 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 228, + 697 + ], + "type": "text", + "content": "C.3 PARAMETERIZING " + }, + { + "bbox": [ + 105, + 685, + 228, + 697 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_{\\phi}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "type": "inline_equation", + "content": "b_{d}(s)" + }, + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "type": "text", + "content": " is a positive scalar function defined on the time domain with known integral. Suppose " + }, + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{D}}_{\\phi}" + }, + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "type": "text", + "content": " is any matrix. Then " + }, + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top}" + }, + { + "bbox": [ + 104, + 705, + 504, + 734 + ], + "type": "text", + "content": " is positive semi-definite and spans all time-independent positive" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 272, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 272, + 94 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 272, + 94 + ], + "type": "text", + "content": "semi-definite matrices. We can set " + }, + { + "bbox": [ + 104, + 82, + 272, + 94 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_{\\phi}" + }, + { + "bbox": [ + 104, + 82, + 272, + 94 + ], + "type": "text", + "content": " to" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 249, + 97, + 504, + 119 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 97, + 504, + 119 + ], + "spans": [ + { + "bbox": [ + 249, + 97, + 504, + 119 + ], + "type": "interline_equation", + "content": "\\mathbf {D} _ {\\phi} (s) = b _ {d} (s) \\cdot \\left[ \\tilde {\\mathbf {D}} _ {\\phi} \\tilde {\\mathbf {D}} _ {\\phi} ^ {\\top} \\right] \\tag {29}", + "image_path": "ab32e2a9b7fdd8732ed10a6bec76a479298a844d0837d5f4d80b0ce4d57dd3cf.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": "To show " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{D}}\\tilde{\\mathbf{D}}^{\\top}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": " spans all positive semi-definite matrices: suppose " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": " is positive semi-definite. Then it is square. Then it can be eigen-decomposed into " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\mathbf{M} = \\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": ". The degrees of freedom in " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": " are just " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\mathbf{R} = \\mathbf{V}\\sqrt{\\pmb{\\Sigma}}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": " since " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\mathbf{V}\\pmb {\\Sigma}\\mathbf{V}^{\\top} = \\mathbf{R}\\mathbf{R}^{\\top}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": " and the square root is taken element-wise because " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": " is diagonal and is real because each " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{ij}\\geq 0" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": ", which is true because " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": " is positive semi-definite. Take " + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\mathbf{D} = \\mathbf{R}" + }, + { + "bbox": [ + 104, + 121, + 504, + 182 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 188, + 369, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 188, + 369, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 188, + 369, + 200 + ], + "type": "text", + "content": "In our experiments we parameterize " + }, + { + "bbox": [ + 104, + 188, + 369, + 200 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 188, + 369, + 200 + ], + "type": "text", + "content": " as a diagonal-only matrix." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 213, + 184, + 223 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 184, + 223 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 184, + 223 + ], + "type": "text", + "content": "C.4 INTEGRALS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 232, + 504, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 504, + 255 + ], + "type": "text", + "content": "The known integral requirement comes from the integrals required in the transition kernel, and can be relaxed two possible ways:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 263, + 504, + 312 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 132, + 263, + 504, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 263, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 132, + 263, + 504, + 285 + ], + "type": "text", + "content": "- numerical integration of function with unknown integral. This is expected to have low error given that the function is scalar-in scalar-out." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 289, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 504, + 312 + ], + "type": "text", + "content": "- Directly parameterize the integral and use auto-grad when needing the functions not-integrated." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 319, + 504, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 356 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 356 + ], + "type": "text", + "content": "We stick with the known integrals. In conclusion, the underlying parameters are positive scalar functions " + }, + { + "bbox": [ + 104, + 319, + 504, + 356 + ], + "type": "inline_equation", + "content": "b_{q}(s), b_{d}(s)" + }, + { + "bbox": [ + 104, + 319, + 504, + 356 + ], + "type": "text", + "content": " defined on the time domain and with known integral, and general matrices " + }, + { + "bbox": [ + 104, + 319, + 504, + 356 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Q}}_{\\phi}, \\tilde{\\mathbf{D}}_{\\phi}" + }, + { + "bbox": [ + 104, + 319, + 504, + 356 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 368, + 182, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 368, + 182, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 182, + 379 + ], + "type": "text", + "content": "C.5 INSTANCES" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "text", + "content": "VPSDE. VPSDE has " + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "inline_equation", + "content": "K = 1" + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "text", + "content": ". Consequently, " + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}, \\mathbf{D}" + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "inline_equation", + "content": "K \\times K" + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "text", + "content": ". The only " + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "text", + "content": " skew-symmetric matrix is 0, so " + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} = 0" + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "text", + "content": ". Setting " + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "inline_equation", + "content": "\\mathbf{D}(t) = \\frac{1}{2}\\beta(t)" + }, + { + "bbox": [ + 104, + 388, + 504, + 412 + ], + "type": "text", + "content": " recovers VPSDE:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 241, + 415, + 504, + 439 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 415, + 504, + 439 + ], + "spans": [ + { + "bbox": [ + 241, + 415, + 504, + 439 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = - \\frac {\\beta (t)}{2} \\mathbf {y} d t + \\sqrt {\\beta (t)} d \\mathbf {B} _ {t} \\tag {30}", + "image_path": "c770a2dece5876a33c6b1c8f1f42e826fee35989988a331c21ca5d859f01ce6f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 441, + 392, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 392, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 392, + 455 + ], + "type": "inline_equation", + "content": "\\nabla H(\\mathbf{y}) = \\mathbf{y}" + }, + { + "bbox": [ + 104, + 441, + 392, + 455 + ], + "type": "text", + "content": " so " + }, + { + "bbox": [ + 104, + 441, + 392, + 455 + ], + "type": "inline_equation", + "content": "\\mathbf{H}(\\mathbf{y}) = \\frac{1}{2}\\| \\mathbf{y}\\| _2^2" + }, + { + "bbox": [ + 104, + 441, + 392, + 455 + ], + "type": "text", + "content": " . The stationary distribution is " + }, + { + "bbox": [ + 104, + 441, + 392, + 455 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,\\mathbf{I})" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 464, + 389, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 389, + 476 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 389, + 476 + ], + "type": "text", + "content": "CLD. The CLD process (eq 5 in Dockhorn et al. (2021)) is defined as" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 189, + 478, + 421, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 478, + 421, + 507 + ], + "spans": [ + { + "bbox": [ + 189, + 478, + 421, + 507 + ], + "type": "interline_equation", + "content": "\\left( \\begin{array}{c} d \\mathbf {z} _ {t} \\\\ d \\mathbf {v} _ {r} \\end{array} \\right) = d \\mathbf {y} _ {t} = \\left( \\begin{array}{c c} 0 & \\frac {\\beta}{M} \\\\ - \\beta & - \\frac {\\Gamma \\beta}{M} \\end{array} \\right) \\mathbf {y} _ {t} + \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\sqrt {2 \\Gamma \\beta} \\end{array} \\right) d \\mathbf {B} _ {t}.", + "image_path": "1af4e2f56f573c70d975b8a46b0ba3cca8616e43bd60017f26eafd807e9b9e2e.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 509, + 249, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 249, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 249, + 521 + ], + "type": "text", + "content": "In " + }, + { + "bbox": [ + 104, + 509, + 249, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} / \\mathbf{D}" + }, + { + "bbox": [ + 104, + 509, + 249, + 521 + ], + "type": "text", + "content": " parameterization, we have" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 190, + 524, + 417, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 524, + 417, + 550 + ], + "spans": [ + { + "bbox": [ + 190, + 524, + 417, + 550 + ], + "type": "interline_equation", + "content": "H (\\mathbf {y}) = \\frac {1}{2} \\| \\mathbf {z} \\| _ {2} ^ {2} + \\frac {1}{2 M} \\| \\mathbf {v} \\| _ {2} ^ {2}, \\qquad \\nabla_ {\\mathbf {u}} H (\\mathbf {y}) = \\left( \\begin{array}{c} \\mathbf {z} \\\\ \\frac {1}{M} \\mathbf {v} \\end{array} \\right)", + "image_path": "9cd100ac59c0881b82cb7632d976e88ee6690923bf8b69cfc44bc37d7075d1a5.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 205, + 552, + 363, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 552, + 363, + 578 + ], + "spans": [ + { + "bbox": [ + 205, + 552, + 363, + 578 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} = \\left( \\begin{array}{c c} 0 & - \\beta \\\\ \\beta & 0 \\end{array} \\right), \\qquad \\mathbf {D} = \\left( \\begin{array}{c c} 0 & 0 \\\\ 0 & \\Gamma \\beta \\end{array} \\right)", + "image_path": "71c73da10b9933eab51be6da67254efc97af1a9ff49d20485626fc5fc0e6d7e9.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 586, + 286, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 286, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 286, + 597 + ], + "type": "text", + "content": "The stationary distribution of this process is:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 201, + 600, + 504, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 600, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 201, + 600, + 504, + 613 + ], + "type": "interline_equation", + "content": "q _ {\\phi , \\infty} \\propto \\exp (- H (\\mathbf {y})) = \\mathcal {N} (\\mathbf {z}; 0, I _ {d}) \\mathcal {N} (\\mathbf {v}; 0, M I _ {d}) \\tag {31}", + "image_path": "19b0f3bd92857a3a5b6a2b072d586a28af635891375e06caa955274239d8f9a6.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 620, + 504, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 620, + 504, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 620, + 504, + 643 + ], + "type": "text", + "content": "ALDA. Mou et al. (2019) define a third-order diffusion process for the purpose of gradient-based MCMC sampling. The ALDA diffusion process can be specified as" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 195, + 646, + 504, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 646, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 195, + 646, + 504, + 685 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} = \\left( \\begin{array}{c c c} 0 & - \\frac {1}{L} I & 0 \\\\ \\frac {1}{L} I & 0 & - \\gamma I \\\\ 0 & \\gamma I & 0 \\end{array} \\right), \\quad \\mathbf {D} = \\left( \\begin{array}{c c c} 0 & 0 & 0 \\\\ 0 & 0 & 0 \\\\ 0 & 0 & \\frac {\\xi}{L} I \\end{array} \\right). \\tag {32}", + "image_path": "67bac03a969eba7c4510a688a000f9c1a4957563bbb6db2458b17e5d08790317.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "spans": [ + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "type": "text", + "content": " is skew-symmetric and " + }, + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "type": "text", + "content": " is positive semi-definite, therefore we have that " + }, + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "type": "inline_equation", + "content": "q_{t}(\\mathbf{u})\\to q_{\\phi ,\\infty}" + }, + { + "bbox": [ + 104, + 686, + 504, + 710 + ], + "type": "text", + "content": ". In this case," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 203, + 712, + 405, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 712, + 405, + 736 + ], + "spans": [ + { + "bbox": [ + 203, + 712, + 405, + 736 + ], + "type": "interline_equation", + "content": "q _ {\\phi , \\infty} = \\mathcal {N} (\\mathbf {z}; 0, \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {1}; 0, \\frac {1}{L} \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {2}; 0, \\frac {1}{L} \\mathbf {I} _ {d})", + "image_path": "6e74816e905da2afc2d6391a1831431af48c6a7ec5b37399fd8c3c8085bf723d.jpg" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 498, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 498, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 498, + 95 + ], + "type": "text", + "content": "MALDA. Similar to ALDA, we specify a diffusion process we term MALDA which we specify as" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 191, + 100, + 504, + 139 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 100, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 191, + 100, + 504, + 139 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} = \\left( \\begin{array}{c c c} 0 & - \\frac {1}{L} I & - \\frac {1}{L} \\\\ \\frac {1}{L} I & 0 & - \\gamma I \\\\ \\frac {1}{L} & \\gamma I & 0 \\end{array} \\right), \\quad \\mathbf {D} = \\left( \\begin{array}{c c c} 0 & 0 & 0 \\\\ 0 & \\frac {1}{L} I & 0 \\\\ 0 & 0 & \\frac {1}{L} I \\end{array} \\right). \\tag {33}", + "image_path": "045609f07c446f7ba1745cd8ace59acfeb9f8af38576eed7e4ced61c77fb50e7.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 144, + 436, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 436, + 156 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 436, + 156 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 104, + 144, + 436, + 156 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 144, + 436, + 156 + ], + "type": "text", + "content": " is skew-symmetric and " + }, + { + "bbox": [ + 104, + 144, + 436, + 156 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 144, + 436, + 156 + ], + "type": "text", + "content": " is positive semi-definite. In this case this is" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 203, + 162, + 407, + 185 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 162, + 407, + 185 + ], + "spans": [ + { + "bbox": [ + 203, + 162, + 407, + 185 + ], + "type": "interline_equation", + "content": "q _ {\\phi , \\infty} = \\mathcal {N} (\\mathbf {z}; 0, \\mathbf {I} _ {d}) \\mathcal {N} (\\mathbf {v} _ {1}; 0, \\frac {1}{L} I _ {d}) \\mathcal {N} (\\mathbf {v} _ {2}; 0, \\frac {1}{L} I _ {d})", + "image_path": "d65d5b12a33966a6d023eaeaa9379725d72e70db424ae9767f6227d6a199deea.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 198, + 326, + 210 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 198, + 326, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 326, + 210 + ], + "type": "text", + "content": "D TRANSITIONS FOR LINEAR PROCESSES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 222, + 408, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 222, + 408, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 408, + 236 + ], + "type": "text", + "content": "For time variable " + }, + { + "bbox": [ + 104, + 222, + 408, + 236 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 222, + 408, + 236 + ], + "type": "text", + "content": " and Brownian motion " + }, + { + "bbox": [ + 104, + 222, + 408, + 236 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{B}}_s" + }, + { + "bbox": [ + 104, + 222, + 408, + 236 + ], + "type": "text", + "content": " driving diffusions of the form" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 246, + 242, + 504, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 242, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 246, + 242, + 504, + 258 + ], + "type": "interline_equation", + "content": "d \\mathbf {y} = f (\\mathbf {y}, s) d s + g (s) d \\widehat {\\mathbf {B}} _ {s}, \\tag {34}", + "image_path": "8bd6d02921aa33eaa67c9f822350a3a879c270cc7e9cbd92a49ab22c76db6d94.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": "when " + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "inline_equation", + "content": "f_{\\phi}(\\mathbf{y}_s, s)" + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "inline_equation", + "content": "g_{\\phi}(s)" + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": " are linear, the transition kernel " + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s | \\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": " is always normal (Särkkä & Solin, 2019). Therefore, we just find the mean " + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{s|0}" + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": " and covariance " + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{s|0}" + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s | \\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "inline_equation", + "content": "f(\\mathbf{y}, s) = \\mathbf{A}(s)\\mathbf{y}" + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": ". The un-conditional time " + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 262, + 504, + 298 + ], + "type": "text", + "content": " mean and covariance are solutions to" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 219, + 304, + 304, + 316 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 304, + 304, + 316 + ], + "spans": [ + { + "bbox": [ + 219, + 304, + 304, + 316 + ], + "type": "interline_equation", + "content": "d \\mathbf {m} _ {s} / d s = \\mathbf {A} (s) \\mathbf {m} _ {s}", + "image_path": "8edf94f6a29c7dbe986b59e0f79eaf322b28e8903c26e3a6a2949c9e51c8229c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 221, + 313, + 504, + 334 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 313, + 504, + 334 + ], + "spans": [ + { + "bbox": [ + 221, + 313, + 504, + 334 + ], + "type": "interline_equation", + "content": "d \\boldsymbol {\\Sigma} _ {s} / d s = \\mathbf {A} (s) \\boldsymbol {\\Sigma} _ {s} + \\boldsymbol {\\Sigma} _ {s} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s) \\tag {35}", + "image_path": "c6f4e0100b7badd671993a051a419819e19367d078095d6dec575b55674cace2.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "text", + "content": "By (6.6) in Särkkä & Solin (2019), for computing conditionals " + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "text", + "content": ", we can take the marginal distribution ODEs and compute conditionals by simply setting the time 0 mean and covariance initial conditions to the conditioning value and to 0 respectively. We take (6.36-6.39) and set " + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_0 = \\mathbf{u}_0" + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{0} = 0" + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "text", + "content": " to condition. Let " + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "inline_equation", + "content": "[\\mathbf{A}]_s = \\int_0^s\\mathbf{A}(\\nu)d\\nu" + }, + { + "bbox": [ + 104, + 339, + 504, + 386 + ], + "type": "text", + "content": ". The mean is" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 167, + 392, + 504, + 427 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 392, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 167, + 392, + 504, + 427 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {s \\mid 0} = \\exp \\left[ \\int_ {0} ^ {s} \\mathbf {A} (\\nu) d \\nu \\right] \\mathbf {y} _ {0} = \\exp \\left(\\left[ A \\right] _ {s}\\right) \\underbrace {\\exp (s \\mathbf {A}) \\mathbf {y} _ {0}} _ {\\text {n o i n t e g r a t i o n i f} \\mathbf {A} (\\nu) = \\mathbf {A}}, \\tag {36}", + "image_path": "d277f354ed47f1aacc67e697329d62836e2fa4887cb905df51e3c4bc66da3c1d.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\exp" + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "text", + "content": " denotes matrix exponential. (6.36-6.39) state the covariance " + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "text", + "content": " as a matrix factorization, for which a derivation is provided below " + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{s} = \\mathbf{C}_{s}(\\mathbf{H}_{s})^{-1}" + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_s,\\mathbf{H}_s" + }, + { + "bbox": [ + 104, + 434, + 504, + 467 + ], + "type": "text", + "content": " being the solutions of:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 223, + 472, + 504, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 472, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 223, + 472, + 504, + 500 + ], + "type": "interline_equation", + "content": "\\left( \\begin{array}{c} \\frac {d}{d s} \\mathbf {C} _ {s} \\\\ \\frac {d}{d s} \\mathbf {H} _ {s} \\end{array} \\right) = \\left( \\begin{array}{c c} \\mathbf {A} (s) & g ^ {2} (s) \\\\ \\mathbf {0} & - \\mathbf {A} ^ {\\top} (s) \\end{array} \\right) \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) \\tag {37}", + "image_path": "9f21de24ba8240718db8daa64c16c9d58e0350b17fb11d4eed3e4c7b807ef71c.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "text", + "content": "To condition and get " + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "inline_equation", + "content": "\\Sigma_{s|0}" + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "inline_equation", + "content": "\\Sigma_s" + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "text", + "content": ", we set " + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "inline_equation", + "content": "\\Sigma_0 = 0" + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "text", + "content": ", and initialize " + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_s, \\mathbf{H}_s" + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_0 = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_0 = \\mathbf{I}" + }, + { + "bbox": [ + 104, + 504, + 501, + 517 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 153, + 523, + 504, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 523, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 153, + 523, + 504, + 574 + ], + "type": "interline_equation", + "content": "\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {[ \\mathbf {A} ] _ {s}} {\\mathbf {0}} \\quad \\begin{array}{c} {[ g ^ {2} ] _ {s}} \\\\ {- [ \\mathbf {A} ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\underbrace {\\exp \\left[ s \\binom {\\mathbf {A}} {\\mathbf {0}} \\quad \\begin{array}{c} {g ^ {2}} \\\\ {- \\mathbf {A} ^ {\\top}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}}} _ {\\text {n o i n t e g r a t i o n i f A (v a r p h i) = A , g (v a r p h i) = g}}. \\tag {38}", + "image_path": "e9bdfc6d0f5069f720e0f462c9481101c65d432092a389b1401180f96a13c8d5.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 578, + 220, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 220, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 220, + 594 + ], + "type": "text", + "content": "Finally, " + }, + { + "bbox": [ + 105, + 578, + 220, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 606, + 363, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 363, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 363, + 617 + ], + "type": "text", + "content": "D.1 DERIVATION OF THE COVARIANCE MATRIX SOLUTION" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "content": "Equation (35) gives an expression for " + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "inline_equation", + "content": "d\\pmb{\\Sigma}_s / ds" + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "content": ". To derive the matrix factorization technique used in eq. (37), we use eq. (35) and the desired condition " + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_s = \\mathbf{C}_s\\mathbf{H}_s^{-1}" + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "content": " to derive expressions for " + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "inline_equation", + "content": "d\\mathbf{C}_s / ds" + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "inline_equation", + "content": "d\\mathbf{H}_s / ds" + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "content": " and suitable initial conditions so that the factorization also starts at the desired " + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_0" + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_s = \\mathbf{C}_s\\mathbf{H}_s^{-1}" + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "content": ", then note that " + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_s, \\mathbf{H}_s" + }, + { + "bbox": [ + 104, + 626, + 505, + 673 + ], + "type": "text", + "content": " satisfies" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 226, + 678, + 383, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 678, + 383, + 731 + ], + "spans": [ + { + "bbox": [ + 226, + 678, + 383, + 731 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {d}{d s} \\boldsymbol {\\Sigma} _ {s} = \\frac {d}{d s} \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\\\ = \\mathbf {C} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} \\\\ \\end{array}", + "image_path": "322389d13eba33830e0824713eef97aadf39f9791ef753ac17be8f7d0cb08372.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 200, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 200, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 200, + 94 + ], + "type": "text", + "content": "And using the fact that" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 268, + 102, + 334, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 102, + 334, + 125 + ], + "spans": [ + { + "bbox": [ + 268, + 102, + 334, + 125 + ], + "type": "interline_equation", + "content": "\\frac {d}{d s} \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} = 0", + "image_path": "1dbfb7720d1d0480184378275d4da1cd4e4bf4a3636a51e7e3cbb7ad0816e90f.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 199, + 127, + 333, + 150 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 127, + 333, + 150 + ], + "spans": [ + { + "bbox": [ + 199, + 127, + 333, + 150 + ], + "type": "interline_equation", + "content": "\\mathbf {H} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) = 0", + "image_path": "71ebb3857815bdc55f049a6447d153824005766db430013d66c6cc2ddef10481.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 281, + 152, + 410, + 175 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 152, + 410, + 175 + ], + "spans": [ + { + "bbox": [ + 281, + 152, + 410, + 175 + ], + "type": "interline_equation", + "content": "\\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} = - \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right)", + "image_path": "bcc38c037e9333d448effcb75be3158b43258f362ca074cc00f516266261d01f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 182, + 153, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 153, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 153, + 194 + ], + "type": "text", + "content": "we get that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 156, + 201, + 459, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 201, + 459, + 228 + ], + "spans": [ + { + "bbox": [ + 156, + 201, + 459, + 228 + ], + "type": "interline_equation", + "content": "\\mathbf {C} _ {s} \\frac {d}{d s} \\mathbf {H} _ {s} ^ {- 1} + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1}", + "image_path": "ea625c7ba1377732d1174e0c65b585183616f2179769b2ddae22f232e8296015.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 230, + 515, + 272 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 230, + 515, + 272 + ], + "spans": [ + { + "bbox": [ + 106, + 230, + 515, + 272 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} \\left(\\mathbf {H} _ {s} ^ {- 1}\\right) + \\left(\\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = \\mathbf {A} (s) \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) + g ^ {2} (s) \\\\ = \\mathbf {A} (s) \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} + g ^ {2} (s) \\mathbf {H} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\\\ \\end{array}", + "image_path": "49c436bff496730f29f4ca86b3411420086704fa730ec16816bcd67276901bdd.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 137, + 274, + 487, + 353 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 274, + 487, + 353 + ], + "spans": [ + { + "bbox": [ + 137, + 274, + 487, + 353 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left(- \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} + \\frac {d}{d s} \\mathbf {C} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} = \\left(\\mathbf {A} (s) \\mathbf {C} _ {s} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s}\\right) \\mathbf {H} _ {s} ^ {- 1} \\\\ - \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\frac {d}{d s} \\mathbf {H} _ {s} + \\frac {d}{d s} \\mathbf {C} _ {s} = \\mathbf {A} (s) \\mathbf {C} _ {s} + \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s} \\\\ \\left[ \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\quad \\mathbf {I} _ {d} \\right] ^ {\\top} \\frac {d}{d s} \\left( \\begin{array}{c} \\mathbf {H} _ {s} \\\\ \\mathbf {C} _ {s} \\end{array} \\right) = \\left[ \\mathbf {C} _ {s} \\mathbf {H} _ {s} ^ {- 1} \\quad \\mathbf {I} _ {d} \\right] ^ {\\top} \\left( \\begin{array}{c} - \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s} \\\\ \\mathbf {A} (s) \\mathbf {C} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s} \\end{array} \\right) \\\\ \\end{array}", + "image_path": "2adaa92d5faddae4d74baba4d793564bae5fcf5e1cc9b8de5f373f5920e1b4fd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 360, + 280, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 360, + 280, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 280, + 372 + ], + "type": "text", + "content": "Now, we note " + }, + { + "bbox": [ + 105, + 360, + 280, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_s" + }, + { + "bbox": [ + 105, + 360, + 280, + 372 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 105, + 360, + 280, + 372 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_s" + }, + { + "bbox": [ + 105, + 360, + 280, + 372 + ], + "type": "text", + "content": " satisfy the following" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 243, + 380, + 334, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 380, + 334, + 403 + ], + "spans": [ + { + "bbox": [ + 243, + 380, + 334, + 403 + ], + "type": "interline_equation", + "content": "\\frac {d}{d s} \\mathbf {H} _ {s} = - \\mathbf {A} ^ {\\top} (s) \\mathbf {H} _ {s}", + "image_path": "c6241f5435d588460d30852684d4de1a36b5cac9e33098ee4ae17c2aa70ab7eb.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 246, + 405, + 365, + 428 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 405, + 365, + 428 + ], + "spans": [ + { + "bbox": [ + 246, + 405, + 365, + 428 + ], + "type": "interline_equation", + "content": "\\frac {d}{d s} \\mathbf {C} _ {s} = \\mathbf {A} (s) \\mathbf {C} _ {s} + g ^ {2} (s) \\mathbf {H} _ {s}", + "image_path": "f3e481ea6bd71d29819bf17142de55b9d02c254a8c511687911493a8b5f9da13.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 435, + 182, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 435, + 182, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 182, + 447 + ], + "type": "text", + "content": "which implies that" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 220, + 455, + 504, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 455, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 220, + 455, + 504, + 482 + ], + "type": "interline_equation", + "content": "\\frac {d}{d s} \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) = \\left( \\begin{array}{c c} \\mathbf {A} (s) & g ^ {2} (s) \\\\ \\mathbf {0} & - \\mathbf {A} ^ {\\top} (s) \\end{array} \\right) \\left( \\begin{array}{c} \\mathbf {C} _ {s} \\\\ \\mathbf {H} _ {s} \\end{array} \\right) \\tag {39}", + "image_path": "e193d166270e228db1f5039bf27e54e4d10eabf21c2cbf0131ddc8f8b98073b9.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_0 = \\pmb{\\Sigma}_0" + }, + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_0 = \\mathbf{I}_d" + }, + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "type": "text", + "content": ", as " + }, + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_0\\mathbf{H}_0^{-1} = \\pmb{\\Sigma}_0" + }, + { + "bbox": [ + 105, + 490, + 299, + 504 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 519, + 251, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 519, + 251, + 530 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 251, + 530 + ], + "type": "text", + "content": "D.2 HYBRID SCORE MATCHING" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": "Instead of computing " + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": ", we can apply the hybrid score matching principle (Dockhorn et al., 2021) to reduce variance by compute objectives using " + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|x)" + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": ", which amounts to integrating out " + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_0" + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": ". To accomplish this, following Särkkä & Solin (2019), we simply replace " + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0" + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "inline_equation", + "content": "[x,\\mathbb{E}[\\mathbf{v}_0]]" + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": " in the expression for " + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{s|0}" + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": ", i.e. replace the conditioning value of " + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_0" + }, + { + "bbox": [ + 104, + 540, + 504, + 596 + ], + "type": "text", + "content": " with the mean of its chosen initial distribution:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 222, + 602, + 504, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 602, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 222, + 602, + 504, + 635 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\mathbf {y} _ {s} | x ] = \\exp \\left[ \\int_ {0} ^ {s} A (\\nu) d \\nu \\right] \\binom {x} {\\mathbb {E} [ \\mathbf {v} _ {0} ]} \\tag {40}", + "image_path": "4bd8c4bb40b598bcd95e033016837a772c2e0bf423096f55a9693287d1c73b61.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": "For the covariance, instead of using " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_0 = \\boldsymbol{\\Sigma}_0 = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ", we use a block matrix to condition on " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " but not " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_0" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ". We decompose " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_0" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " into its blocks " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{0,xx}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{0,vv}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{0,xv}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ". As before, to condition on " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " we set " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{0,xx} = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ". Because " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "q(\\mathbf{v}_0)" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " is set to be independent of " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{0,xv}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " is also set to " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{0}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ". Finally, instead of " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{0}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ", to marginalize out " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_0" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{0,vv}" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " is set to the covariance of the chosen initial time zero distribution for " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_0" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": ". E.g. if " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{0,j} \\sim N(0,\\gamma)" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": " for each dimension, then " + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{0,vv} = N(0,\\gamma I)" + }, + { + "bbox": [ + 104, + 648, + 504, + 705 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "We operationalize this in a simple piece of code, which makes the ELBO tractable and easy, i.e. skips both analytic derivations and numerical forward integration during training." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 353, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 353, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 353, + 94 + ], + "type": "text", + "content": "D.3 TRANSITIONS IN STATIONARY PARAMETERIZATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "spans": [ + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "text", + "content": "In terms of " + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "inline_equation", + "content": "\\mathbf{D}" + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "text", + "content": ", the transitions " + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "text", + "content": " for time " + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "text", + "content": " are normal with mean " + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{s|0}" + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{s|0}" + }, + { + "bbox": [ + 104, + 107, + 504, + 121 + ], + "type": "text", + "content": " equal to:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 137, + 505, + 182 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 137, + 505, + 182 + ], + "spans": [ + { + "bbox": [ + 116, + 137, + 505, + 182 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {s \\mid 0} = \\exp \\left(- \\left[ \\mathbf {Q} + \\mathbf {D} \\right] _ {s}\\right) \\mathbf {y} _ {0}, \\quad \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {- [ \\mathbf {Q} + \\mathbf {D} ] _ {s}} {\\mathbf {0}} \\begin{array}{c c} {[ 2 \\mathbf {D} ] _ {s}} \\\\ {[ (\\mathbf {Q} + \\mathbf {D}) ^ {\\top} ] _ {s}} \\end{array} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {41}", + "image_path": "2a89af24c0ce680e46759f5f98bede034155ffd3332e0e3b50cef0bf0d2a2c3f.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 200, + 394, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 200, + 394, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 200, + 394, + 213 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 200, + 394, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}" + }, + { + "bbox": [ + 104, + 200, + 394, + 213 + ], + "type": "text", + "content": ". For the time invariant case, this simplifies to" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 118, + 230, + 505, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 230, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 118, + 230, + 505, + 262 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {s \\mid 0} = \\exp [ - s (\\mathbf {Q} + \\mathbf {D}) ] \\mathbf {y} _ {0}, \\quad \\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ s \\binom {- (\\mathbf {Q} + \\mathbf {D})} {\\mathbf {0}} \\quad \\binom {2 \\mathbf {D}} {(\\mathbf {Q} + \\mathbf {D}) ^ {\\top}} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {42}", + "image_path": "19e3797daa43ec6825c673dd46ae3a4cb917c9c4efcc916babbd0fc3e0373da8.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 289, + 469, + 315 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 469, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 469, + 315 + ], + "type": "text", + "content": "E GENERIC CHANGE OF MEASURE AND JENSEN'S FOR APPROXIMATE MARGINALIZATION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\mathbf{u} = [\\mathbf{z},\\mathbf{v}]" + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "text", + "content": " and we have an expression for " + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "inline_equation", + "content": "p(\\mathbf{u} = [z,v]) = p(\\mathbf{z} = z,\\mathbf{v} = v)" + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "text", + "content": ". By marginalization, we can get " + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "inline_equation", + "content": "p(\\mathbf{z} = z)" + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "text", + "content": ", and we can introduce another distribution " + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 335, + 504, + 369 + ], + "type": "text", + "content": " to pick a sampling distribution of our choice:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 185, + 384, + 504, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 384, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 185, + 384, + 504, + 491 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} p (\\mathbf {z} = z) = \\int_ {v} p (\\mathbf {z} = z, \\mathbf {v} = v) d v \\\\ = \\int_ {v} p (\\mathbf {z} = z | \\mathbf {v} = v) p (\\mathbf {v} = v) d v \\\\ = \\int_ {v} \\frac {q (\\mathbf {v} = v | \\mathbf {z} = z)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} p (\\mathbf {z} = z | \\mathbf {v} = v) p (\\mathbf {v} = v) d v \\tag {43} \\\\ = \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\end{array}", + "image_path": "08b3ea96a6c323ce7a14c25ba6cb3240ebc4c04189624884ef26cb3df6e3d838.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 507, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 507, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 507, + 504, + 529 + ], + "type": "text", + "content": "We often work with these expressions in log space, and need to pull the expectation outside to use Monte Carlo. Jensen's bound allows this:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 200, + 546, + 408, + 598 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 546, + 408, + 598 + ], + "spans": [ + { + "bbox": [ + 200, + 546, + 408, + 598 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\log p (\\mathbf {z} = z) = \\log \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\geq \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ \\end{array}", + "image_path": "f1ef7a915dbab1430bbd06202e8974f788140ce708568b812f132cd074f3dd20.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 615, + 451, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 615, + 451, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 451, + 628 + ], + "type": "text", + "content": "The following shows that the bound is tight when " + }, + { + "bbox": [ + 105, + 615, + 451, + 628 + ], + "type": "inline_equation", + "content": "q(\\mathbf{v} = v|\\mathbf{z} = z) = p(\\mathbf{v} = v|\\mathbf{z} = z)" + }, + { + "bbox": [ + 105, + 615, + 451, + 628 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 119, + 646, + 504, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 646, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 119, + 646, + 504, + 734 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} _ {q (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{q (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] = _ {\\text {a s s u m e}} \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{p (\\mathbf {v} = v | \\mathbf {z} = z)} \\right] \\\\ = \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log \\left(\\frac {p (\\mathbf {z} = z , \\mathbf {v} = v)}{p (\\mathbf {v} = v , \\mathbf {z} = z)} \\cdot p (\\mathbf {z} = z)\\right) \\right] \\tag {44} \\\\ = \\mathbb {E} _ {p (\\mathbf {v} = v | \\mathbf {z} = z)} \\left[ \\log p (\\mathbf {z} = z) \\right] \\\\ = \\log p (\\mathbf {z} = z) \\\\ \\end{array}", + "image_path": "4b0da0b782f5e71bfa5ea032c00fea094de83e4dc8db0919b9c01f628c426be7.jpg" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 212, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 212, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 212, + 92 + ], + "type": "text", + "content": "F ELBO FOR MDMS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 125, + 105, + 505, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 105, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 125, + 105, + 505, + 289 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\log p _ {\\theta} (x) = \\log \\int_ {v _ {0}} p _ {\\theta} \\left(x _ {0}, v _ {0}\\right) d v _ {0} (45) \\\\ = \\log \\int_ {v _ {0}} p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) (46) \\\\ = \\log \\int_ {v _ {0}} \\frac {q \\left(v _ {0} \\mid x\\right)}{q \\left(v _ {0} \\mid x\\right)} p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) (47) \\\\ = \\log \\mathbb {E} _ {q \\left(v _ {0} \\mid x\\right)} \\left[ \\frac {p _ {\\theta} \\left(u _ {0} = [ x , v _ {0} ]\\right)}{q \\left(v _ {0} \\mid x\\right)} \\right] (48) \\\\ \\geq \\mathbb {E} _ {q \\left(v _ {0} \\mid x\\right)} \\left[ \\log p _ {\\theta} \\left(u _ {0} = [ x, v _ {0} ]\\right) - \\log q \\left(v _ {0} \\mid x\\right) \\right] (49) \\\\ \\geq \\mathbb {E} _ {q (y | x)} \\left[ \\log \\pi_ {\\theta} (y _ {T}) + \\int_ {0} ^ {T} - \\| s _ {\\theta} \\| _ {g ^ {2}} ^ {2} - \\nabla \\cdot \\left(g ^ {2} s _ {\\theta} - f\\right) d s - \\log q \\left(y _ {0} ^ {v} | x\\right) \\right] (50) \\\\ \\end{array}", + "image_path": "489eda865817daefa3c2b9380b53b59feda776b6d292748649869f4a9dabd0ba.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 294, + 504, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 294, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 294, + 504, + 318 + ], + "type": "text", + "content": "The first inequality holds due to Jensen's inequality and the second due to an application of Theorem 1 from Huang et al. (2021) or Theorem 3 from Song et al. (2021) applied to the joint variable " + }, + { + "bbox": [ + 105, + 294, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_0" + }, + { + "bbox": [ + 105, + 294, + 504, + 318 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 331, + 191, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 191, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 191, + 342 + ], + "type": "text", + "content": "F.1 ISM TO DSM" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 352, + 282, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 352, + 282, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 282, + 363 + ], + "type": "text", + "content": "F.1.1 LEMMA: EXPECTATION BY PARTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 371, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 371, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 504, + 396 + ], + "type": "text", + "content": "We will need a form of multivariate integration by parts which gives us for some " + }, + { + "bbox": [ + 105, + 371, + 504, + 396 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 105, + 371, + 504, + 396 + ], + "type": "text", + "content": " and some " + }, + { + "bbox": [ + 105, + 371, + 504, + 396 + ], + "type": "inline_equation", + "content": "q(x)" + }, + { + "bbox": [ + 105, + 371, + 504, + 396 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 105, + 371, + 504, + 396 + ], + "type": "inline_equation", + "content": "E_{q(x)}[\\nabla_x \\cdot f(x)] = -E_{q(x)}[f(x)^\\top \\nabla_x \\log q(x)]" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 410, + 494, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 410, + 494, + 704 + ], + "spans": [ + { + "bbox": [ + 116, + 410, + 494, + 704 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} E _ {q (x)} [ \\nabla_ {x} \\cdot f _ {i} (x) ] = \\int q (x) \\sum_ {i = 1} ^ {d} [ \\nabla_ {x _ {i}} f _ {i} (x) ] d x \\\\ = \\int \\sum_ {i = 1} ^ {d} q (x) \\nabla_ {x _ {i}} f _ {i} (x) d x \\\\ = \\sum_ {i = 1} ^ {d} \\int_ {x _ {- i}} \\int_ {x _ {i}} q (x) \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ \\left[ q (x) \\int \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} \\right] _ {- \\infty} ^ {\\infty} - \\int \\nabla_ {x _ {i}} q (x) \\int \\nabla_ {x _ {i}} f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ - \\int \\nabla_ {x _ {i}} q (x) f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} \\int \\left[ - \\int q (x) \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) d x _ {i} \\right] d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} - \\int \\int q (x) \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) d x _ {i} d x _ {- i} \\\\ = \\sum_ {i = 1} ^ {d} - E _ {q (x)} \\left[ \\nabla_ {x _ {i}} \\log q (x) f _ {i} (x) \\right] \\\\ = - E _ {q (x)} [ f (x) ^ {\\top} \\nabla_ {x} \\log q (x) ] \\\\ \\end{array}", + "image_path": "886082783120eacbee45f91ab6b8db4027763aa45d40b5d7e2f647cf2ae2d394.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "type": "text", + "content": "This equality also follows directly from the Stein operator using the generator method to the Langevin diffusion (Barbour, 1988)." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 190, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 190, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 190, + 94 + ], + "type": "text", + "content": "F.1.2 DSM ELBO" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 101, + 279, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 101, + 279, + 114 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 279, + 114 + ], + "type": "text", + "content": "Using the \"expectation by parts\", we have:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 148, + 118, + 462, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 118, + 462, + 135 + ], + "spans": [ + { + "bbox": [ + 148, + 118, + 462, + 135 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {q (u _ {t} | x)} [ \\nabla_ {u _ {t}} \\cdot g ^ {2} (t) s _ {\\theta} (u _ {t}, t) ] = - \\mathbb {E} _ {q (u _ {t} | x)} [ (g ^ {2} (t) s _ {\\theta} (u _ {t}, t)) ^ {\\top} \\nabla_ {u _ {t}} \\log q (u _ {t} | x) ]", + "image_path": "77c3b377beca179e8939782c3013df9b195795dc1f61789f691cb1f8663e26fa.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 138, + 376, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 138, + 376, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 138, + 376, + 152 + ], + "type": "text", + "content": "Also we have, for " + }, + { + "bbox": [ + 105, + 138, + 376, + 152 + ], + "type": "inline_equation", + "content": "s_{\\theta}" + }, + { + "bbox": [ + 105, + 138, + 376, + 152 + ], + "type": "text", + "content": " evaluated at " + }, + { + "bbox": [ + 105, + 138, + 376, + 152 + ], + "type": "inline_equation", + "content": "(u_t, t)" + }, + { + "bbox": [ + 105, + 138, + 376, + 152 + ], + "type": "text", + "content": ", by completing the square," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 156, + 504, + 179 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 156, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 108, + 156, + 504, + 179 + ], + "type": "interline_equation", + "content": "- \\frac {1}{2} | | s _ {\\theta} | | _ {g ^ {2} (t)} + s _ {\\theta} ^ {\\top} g ^ {2} (t) \\nabla \\log q (u _ {t} | x) = - \\frac {1}{2} | | s _ {\\theta} - \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} +. 5 | | \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2}", + "image_path": "a43dcf15676274f0cdd2e949a5f6c83d86c38f1b6186b52e1bc83d84a49f1171.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 182, + 209, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 209, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 209, + 194 + ], + "type": "text", + "content": "The two together give us:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 199, + 504, + 345 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 199, + 504, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 504, + 345 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\log p (x) \\geq \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\Big [ - \\nabla \\cdot g ^ {2} s _ {\\theta} - . 5 | | s _ {\\theta} | | _ {g ^ {2} (t)} ^ {2} + \\nabla \\cdot f \\Big ] d t \\right] \\\\ = \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\left[ \\left(g ^ {2} s _ {\\theta}\\right) ^ {\\top} \\nabla_ {u _ {t}} \\log q (u _ {t} | x) - . 5 | | s _ {\\theta} | | _ {g ^ {2} (t)} ^ {2} + \\nabla \\cdot f \\right] d t \\right] \\\\ = \\mathbb {E} _ {q (u _ {T} | x)} \\left[ \\log \\pi \\right] + \\int_ {0} ^ {T} \\left[ \\mathbb {E} _ {q (u _ {t} | x)} \\left[ - \\frac {1}{2} | | s _ {\\theta} - \\nabla \\log q (u _ {t} | x) | | _ {g ^ {2} (t)} ^ {2} \\right. \\right. \\\\ \\left. + . 5 \\left| | \\nabla \\log q \\left(u _ {t} | x\\right) \\right| _ {g ^ {2} (t)} ^ {2} + \\nabla_ {u _ {t}} \\cdot f \\right] \\Biggr ] d t \\tag {51} \\\\ \\end{array}", + "image_path": "80a9fe66e953aab5e933ea07158d212d047a19b7753e4fac87d8cf9680d408a4.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 357, + 214, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 214, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 214, + 369 + ], + "type": "text", + "content": "F.2 NOISE PREDICTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "text", + "content": "We have that for normal " + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mathbf{y}_s;\\mathbf{m}_{s|0},\\pmb{\\Sigma}_{s|0})" + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "text", + "content": ", we can sample " + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s" + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "text", + "content": " with normal noise " + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(0,I)" + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s = \\mathbf{m}_{s|0} + \\mathbf{L}\\epsilon" + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{L}" + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "text", + "content": " is the cholesky decomposition of " + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{s|0}" + }, + { + "bbox": [ + 104, + 377, + 504, + 403 + ], + "type": "text", + "content": ". Then, the score is" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 209, + 407, + 402, + 581 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 407, + 402, + 581 + ], + "spans": [ + { + "bbox": [ + 209, + 407, + 402, + 581 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\nabla_ {\\mathbf {y} _ {s}} \\log q (\\mathbf {y} _ {s} | \\mathbf {y} _ {0}) \\Bigg | _ {\\mathbf {y} _ {s} = \\mathbf {m} _ {s | 0} + \\mathbf {L} \\epsilon} \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\mathbf {y} _ {s} - \\mathbf {m} _ {s | 0}\\right) \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\left[ \\mathbf {m} _ {s | 0} + \\mathbf {L} \\epsilon \\right] - \\mathbf {m} _ {s | 0}\\right) \\\\ = - \\boldsymbol {\\Sigma} _ {s | 0} ^ {- 1} \\left(\\mathbf {L} \\epsilon\\right) \\\\ = - \\left(\\mathbf {L} \\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\left(\\mathbf {L} \\epsilon\\right) \\\\ = - \\left(\\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\mathbf {L} ^ {- 1} \\mathbf {L} \\epsilon \\\\ = - \\left(\\mathbf {L} ^ {\\top}\\right) ^ {- 1} \\epsilon = - \\left(\\mathbf {L} ^ {- 1}\\right) ^ {\\top} \\epsilon = - \\mathbf {L} ^ {\\top , - 1} \\epsilon \\\\ \\end{array}", + "image_path": "b6a093e1277ebfaa1f766926b04056d30a03482d69fc3763535ee13a0365db81.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 592, + 375, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 375, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 375, + 607 + ], + "type": "text", + "content": "Parameterize " + }, + { + "bbox": [ + 105, + 592, + 375, + 607 + ], + "type": "inline_equation", + "content": "s_{\\theta}(\\mathbf{y}_s,s)" + }, + { + "bbox": [ + 105, + 592, + 375, + 607 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 105, + 592, + 375, + 607 + ], + "type": "inline_equation", + "content": "s_{\\theta}(\\mathbf{y}_s,s) = -\\mathbf{L}^{\\top, -1}\\epsilon_{\\theta}(\\mathbf{y},s)" + }, + { + "bbox": [ + 105, + 592, + 375, + 607 + ], + "type": "text", + "content": ". This gives" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 143, + 612, + 465, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 612, + 465, + 731 + ], + "spans": [ + { + "bbox": [ + 143, + 612, + 465, + 731 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {1}{2} \\| - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s) \\quad - \\quad - \\mathbf {L} ^ {\\top , - 1} \\epsilon \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\\\ = \\frac {1}{2} \\| \\mathbf {L} ^ {\\top , - 1} \\epsilon \\quad - \\quad \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s) \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\\\ = \\frac {1}{2} \\left(\\mathbf {L} ^ {\\top , - 1} \\epsilon - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s)\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\mathbf {L} ^ {\\top , - 1} \\epsilon - \\mathbf {L} ^ {\\top , - 1} \\epsilon_ {\\theta} (\\mathbf {y}, s)\\right) \\\\ = \\frac {1}{2} \\left(\\mathbf {L} ^ {\\top , - 1} \\left[ \\epsilon - \\epsilon_ {\\theta} (\\mathbf {y}, s) \\right]\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\mathbf {L} ^ {\\top , - 1} \\left[ \\epsilon - \\epsilon_ {\\theta} (\\mathbf {y}, s) \\right]\\right) \\\\ \\end{array}", + "image_path": "bb4ce96d8f9189f8f659f2042b8afc83eb98abc094e635d7a0a60af978a1570d.jpg" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "We can also use this insight to analytically compute the quadratic score term (following is computed per data-dimension, so must be multiplied by " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " when computing the ELBO):" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 113, + 543, + 348 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 113, + 543, + 348 + ], + "spans": [ + { + "bbox": [ + 107, + 113, + 543, + 348 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\frac {1}{2} \\| \\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0}) \\| _ {g _ {\\phi} ^ {2} (s)} ^ {2} \\right] = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\left(\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})\\right) \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\left(- \\mathbf {L} ^ {\\top , - 1} \\epsilon\\right) ^ {\\top} g _ {\\phi} ^ {2} (s) \\left(- \\mathbf {L} ^ {\\top , - 1} \\epsilon\\right) \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\epsilon^ {\\top} (- \\mathbf {L} ^ {- 1}) g _ {\\phi} ^ {2} (s) (- \\mathbf {L} ^ {\\top , - 1}) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\mathbf {y} _ {s} | \\mathbf {y} _ {0}} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\mathbf {y} _ {0}} \\mathbb {E} _ {\\epsilon} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\mathbb {E} _ {\\epsilon} \\left[ \\epsilon^ {\\top} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\epsilon \\right] \\\\ = \\operatorname {T r a c e} \\left(\\mathbf {L} ^ {- 1} g _ {\\phi} ^ {2} (s) \\mathbf {L} ^ {\\top , - 1}\\right) \\\\ \\end{array}", + "image_path": "9ef71958d2b1d1bf0fe7ddc1f3ba9ac69888b0d24ca3b6c3f5089a3031bf688b.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 363, + 351, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 363, + 351, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 351, + 374 + ], + "type": "text", + "content": "G ELBOS IN STATIONARY PARAMETERIZATION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 388, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 411 + ], + "type": "text", + "content": "We use the stationary parameterization described in appendix C. We now specialize the ELBO to the linear stationary parameterization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "type": "text", + "content": "Recall " + }, + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "type": "inline_equation", + "content": "f_{\\phi}(\\mathbf{y},s) = -[\\mathbf{Q}_{\\phi}(s) + \\mathbf{D}_{\\phi}(s)]\\mathbf{y}" + }, + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "type": "text", + "content": ". Recall " + }, + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "type": "inline_equation", + "content": "g_{\\phi}(s) = \\sqrt{2\\mathbf{D}_{\\phi}(s)}" + }, + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "type": "text", + "content": " We have " + }, + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "type": "inline_equation", + "content": "g_{\\phi}^{2}(s) = 2\\mathbf{D}_{\\phi}(s)" + }, + { + "bbox": [ + 104, + 417, + 504, + 441 + ], + "type": "text", + "content": ". We can write the MDM ISM ELBO as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 203, + 447, + 504, + 479 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 447, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 203, + 447, + 504, + 479 + ], + "type": "interline_equation", + "content": "\\mathcal {L} ^ {\\text {m i s m}} = \\mathbb {E} _ {v \\sim q _ {\\gamma}} \\left[ \\mathbb {E} _ {s \\sim \\operatorname {U n i f} (0, T)} \\left[ \\ell_ {s} ^ {(i s m)} \\right] + \\ell_ {T} + \\ell_ {q} \\right] \\tag {52}", + "image_path": "37007c089f47c50e885524a1c7a5c6c4c4760ba88ed1ae4a0cfc4ede4b78305f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 486, + 133, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 486, + 133, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 133, + 496 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 195, + 503, + 316, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 503, + 316, + 544 + ], + "spans": [ + { + "bbox": [ + 195, + 503, + 316, + 544 + ], + "type": "interline_equation", + "content": "\\ell_ {s _ {\\theta}} = - \\frac {1}{2} \\| s _ {\\theta} (\\mathbf {y} _ {s}, s) \\| _ {\\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}}} ^ {2}", + "image_path": "f4e3acf4c823a89b6c801872a89345e367fb5112418dfa84e22463d6ec5004fd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 184, + 546, + 426, + 579 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 546, + 426, + 579 + ], + "spans": [ + { + "bbox": [ + 184, + 546, + 426, + 579 + ], + "type": "interline_equation", + "content": "\\ell_ {\\mathrm {d i v - f g s}} = \\nabla_ {\\mathbf {y} _ {s}} \\cdot \\left[ \\underbrace {- [ \\mathbf {Q} _ {\\phi} (s) + \\mathbf {D} _ {\\phi} (s) ] \\mathbf {y} _ {s}} _ {f _ {\\phi}} - \\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}} s _ {\\theta} (\\mathbf {y} _ {s}, s) \\right]", + "image_path": "a9515ce032dcc8a71e0f8c1182f0a92462e3e85cbaa51622f5d79a56f6f51aed.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 193, + 579, + 504, + 615 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 579, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 193, + 579, + 504, + 615 + ], + "type": "interline_equation", + "content": "\\ell_ {s} ^ {\\text {i s m}} = \\mathbb {E} _ {\\substack {q _ {\\phi , s, (x, v)} \\\\ \\text {depends on } Q, D}} \\left[ \\ell_ {s \\theta} + \\ell_ {\\text {div - f g s}} \\right] \\tag{53}", + "image_path": "42ed619dbdbe1b6b1d941d56a9e37e88a78bf9de8f821bab30809b55a248512d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 197, + 617, + 329, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 617, + 329, + 651 + ], + "spans": [ + { + "bbox": [ + 197, + 617, + 329, + 651 + ], + "type": "interline_equation", + "content": "\\ell_ {T} = \\mathbb {E} _ {\\substack {q _ {\\phi , T}, (x, v) \\\\ \\text{depends on} \\mathbf {Q}, \\mathbf {D}}} \\Big [ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) \\Big ]", + "image_path": "222247b16ffbcb61935ac50b0627a63b69d9c08cfdbb945649709b262da98e46.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 199, + 654, + 276, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 654, + 276, + 667 + ], + "spans": [ + { + "bbox": [ + 199, + 654, + 276, + 667 + ], + "type": "interline_equation", + "content": "\\ell_ {q} = - \\log q _ {\\gamma} (v | x)", + "image_path": "9666156b1e404a6095390266ab9d01464f55cc0f8639b74a3b0107401b6cdbdb.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 679, + 182, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 182, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 182, + 690 + ], + "type": "text", + "content": "For the DSM form," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 202, + 697, + 504, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 697, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 202, + 697, + 504, + 730 + ], + "type": "interline_equation", + "content": "\\mathcal {L} ^ {\\mathrm {m d s m}} = \\mathbb {E} _ {v \\sim q _ {\\gamma}} \\left[ \\mathbb {E} _ {s \\sim \\operatorname {U n i f} (0, T)} \\left[ \\ell_ {s} ^ {(d s m)} \\right] + \\ell_ {T} + \\ell_ {q} \\right] \\tag {54}", + "image_path": "021b6b76aa68baf2c8232c1120871681ccd77e077043432109ad5501dd1d9bb7.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 133, + 92 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 133, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 133, + 92 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 186, + 100, + 425, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 100, + 425, + 255 + ], + "spans": [ + { + "bbox": [ + 186, + 100, + 425, + 255 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\ell_ {\\mathrm {d i v - f}} = \\nabla_ {\\mathbf {y} _ {s}} \\cdot \\underbrace {- [ \\mathbf {Q} _ {\\phi} (s) + \\mathbf {D} _ {\\phi} (s) ] \\mathbf {y} _ {s}} _ {f _ {\\phi}} \\\\ \\ell_{\\text{fwd - score}} = \\frac{1}{2}\\bigg|\\bigg|\\underbrace{\\nabla_{\\mathbf{y}_{s}}\\log q_{\\phi}(\\mathbf{y}_{s}|\\mathbf{y}_{0})}_{\\text{depends on}\\mathbf{Q},\\mathbf{D}}\\| \\underbrace{^ {2}\\mathbf{D}_{\\phi}(s)}_{g_{\\phi}^{2}} \\\\ \\ell_ {\\text {n e g - s c o r e d i f f}} = - \\frac {1}{2} \\| s _ {\\theta} (\\mathbf {y} _ {s}, s) - \\underbrace {\\nabla_ {\\mathbf {y} _ {s}} \\log q _ {\\phi} (\\mathbf {y} _ {s} | \\mathbf {y} _ {0})} _ {\\text {d e p e n d s o n Q , D}} \\| _ {\\underbrace {2 \\mathbf {D} _ {\\phi} (s)} _ {g _ {\\phi} ^ {2}}} ^ {2} \\\\ \\ell_ {s} ^ {(d s m)} = \\mathbb {E} _ {\\substack {q _ {\\phi , s, (x, v)} \\\\ \\text{depends on} \\mathbf {Q}, \\mathbf {D}}} \\left[ \\ell_ {\\text{neg - score diff}} + \\ell_ {\\text{fwd - score}} + \\ell_ {\\text{div - f}} \\right] \\\\ \\end{array}", + "image_path": "6117e331b5e2769be75852b2fcbf649bdd77a0c6125b2e395bf12a7dc9a98127.jpg" + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 200, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 200, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 200, + 94 + ], + "type": "text", + "content": "H ALGORITHMS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 269, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 269, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 269, + 118 + ], + "type": "text", + "content": "H.1 GENERIC TRANSITION KERNEL" + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 115, + 146, + 469, + 247 + ], + "blocks": [ + { + "bbox": [ + 106, + 131, + 290, + 144 + ], + "lines": [ + { + "bbox": [ + 106, + 131, + 290, + 144 + ], + "spans": [ + { + "bbox": [ + 106, + 131, + 290, + 144 + ], + "type": "text", + "content": "Algorithm 2 Get transition distribution " + }, + { + "bbox": [ + 106, + 131, + 290, + 144 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s|x" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "lines": [ + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "spans": [ + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": "Input: data " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " time " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " A, " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " \ncompute: " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{A}(s)" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "g(s)" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " \ncompute: " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_s = \\int_0^s\\mathbf{A}(t)dt" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " (integrated drift) \ncompute: " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{N}_s = \\int_0^s g^2 (t)dt" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " (integrated diffusions squared) \ncompute: " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "\\gamma_{s|0} = \\exp \\left(\\mathbf{M}_s\\right)" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " (mean coefficient) \nset: " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0 = [x,0_1,\\dots ,0_{K - 1}],\\pmb{\\Sigma}_{0,zz} = \\mathbf{0}" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " , and " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{0,zv},\\pmb{\\Sigma}_{0,vv}" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " to chosen initial distribution \ncompute: " + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{s|0} = \\gamma_{s|0}\\mathbf{y}_0" + }, + { + "bbox": [ + 115, + 146, + 469, + 247 + ], + "type": "text", + "content": " (mean) \ncompute:" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "algorithm" + }, + { + "bbox": [ + 178, + 251, + 505, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 251, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 178, + 251, + 505, + 285 + ], + "type": "interline_equation", + "content": "\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {\\mathbf {M} _ {s}} {\\mathbf {0}} \\binom {\\mathbf {N} _ {s}} {- \\mathbf {M} _ {s} ^ {\\top}} \\right] \\binom {\\boldsymbol {\\Sigma} _ {0}} {\\mathbf {I}} \\quad (\\text {i n g r e d i e n t s f o r c o v .}) \\tag {55}", + "image_path": "729b3786783ca3e9044f33c84f83cb7c164585865ab9c8cd8589c0126d15e96a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 292, + 271, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 292, + 271, + 305 + ], + "spans": [ + { + "bbox": [ + 115, + 292, + 271, + 305 + ], + "type": "text", + "content": "compute: " + }, + { + "bbox": [ + 115, + 292, + 271, + 305 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}" + }, + { + "bbox": [ + 115, + 292, + 271, + 305 + ], + "type": "text", + "content": " (cov.)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 305, + 216, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 305, + 216, + 318 + ], + "spans": [ + { + "bbox": [ + 115, + 305, + 216, + 318 + ], + "type": "text", + "content": "Output: " + }, + { + "bbox": [ + 115, + 305, + 216, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mathbf{m}_{s|0},\\pmb{\\Sigma}_{s|0})" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 340, + 242, + 353 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 340, + 242, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 242, + 353 + ], + "type": "text", + "content": "H.2 TRANSITIONS WITH " + }, + { + "bbox": [ + 105, + 340, + 242, + 353 + ], + "type": "inline_equation", + "content": "Q, D" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "content": "Current param matrices " + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Q}}_{\\phi},\\tilde{\\mathbf{D}}_{\\phi}" + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "content": " and along with fixed time-in scalar-out functions " + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "inline_equation", + "content": "b_{q}(s),b_{d}(s)" + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "content": " and their known integrals " + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "inline_equation", + "content": "B_{q}(s),B_{d}(s)" + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "inline_equation", + "content": "q_{\\gamma}(v_0|z_0 = x)" + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "content": " taken to be parameterless so that " + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "inline_equation", + "content": "v_{0}\\sim \\mathcal{N}(0,I)" + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "content": ". Model params are " + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "content": " fixed " + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 361, + 504, + 398 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 115, + 423, + 392, + 594 + ], + "blocks": [ + { + "bbox": [ + 106, + 408, + 335, + 421 + ], + "lines": [ + { + "bbox": [ + 106, + 408, + 335, + 421 + ], + "spans": [ + { + "bbox": [ + 106, + 408, + 335, + 421 + ], + "type": "text", + "content": "Algorithm 3 Get Q, D and their integrated terms M, N" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "lines": [ + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "spans": [ + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": "Input: time " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " and current params " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "[b_q]_s = \\int_0^s b_q(\\nu)d\\nu" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " using known integral " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "B_{q}(s) - B_{q}(0)" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "[b_d]_s = \\int_0^s b_d(\\nu)d\\nu" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " using known integral " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "B_{d}(s) - B_{d}(0)" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": ". \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "[\\mathbf{Q}_{\\phi}]_{s} = [b_{q}]_{s}\\cdot [\\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top}]" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " for current params " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Q}}_{\\phi}" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": ". \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "[\\mathbf{D}_{\\phi}]_{s} = [b_{d}]_{s}\\cdot [\\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top}]" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " for current params " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{D}}_{\\phi}" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": ". \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_s = -((\\mathbf{Q}_{\\phi}]_s + [\\mathbf{D}_{\\phi}]_s)" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " (M just a variable name) \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{N}_s = [2\\mathbf{D}_{\\phi}]_s = 2\\cdot [\\mathbf{D}_{\\phi}]_s" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " (N just a variable name) \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_s = b_q(s)\\cdot [\\tilde{\\mathbf{Q}}_{\\phi} - \\tilde{\\mathbf{Q}}_{\\phi}^{\\top}]" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " (not integrated) \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_s = b_d(s)\\cdot [\\tilde{\\mathbf{D}}_{\\phi}\\tilde{\\mathbf{D}}_{\\phi}^{\\top}]" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " (not integrated) \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "A_s = -[\\mathbf{Q}_s + \\mathbf{D}_s]" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " (drift coef.) \ncompute: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "g_s^2 = 2\\mathbf{D}_s" + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "text", + "content": " (diffusion coef. squared) \nOutput: " + }, + { + "bbox": [ + 115, + 423, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{A}_s, g_s^2, \\mathbf{M}_s, \\mathbf{N}_s" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "algorithm" + }, + { + "bbox": [ + 105, + 613, + 217, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 217, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 217, + 624 + ], + "type": "text", + "content": "H.3 ELBO ALGORITHMS" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 115, + 95, + 348, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 95, + 348, + 108 + ], + "spans": [ + { + "bbox": [ + 115, + 95, + 348, + 108 + ], + "type": "text", + "content": "Input: Sample " + }, + { + "bbox": [ + 115, + 95, + 348, + 108 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0 = (x, v)" + }, + { + "bbox": [ + 115, + 95, + 348, + 108 + ], + "type": "text", + "content": " and time " + }, + { + "bbox": [ + 115, + 95, + 348, + 108 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 115, + 95, + 348, + 108 + ], + "type": "text", + "content": ". Current params " + }, + { + "bbox": [ + 115, + 95, + 348, + 108 + ], + "type": "inline_equation", + "content": "\\phi" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 107, + 262, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 107, + 262, + 118 + ], + "spans": [ + { + "bbox": [ + 116, + 107, + 262, + 118 + ], + "type": "text", + "content": "set: " + }, + { + "bbox": [ + 116, + 107, + 262, + 118 + ], + "type": "inline_equation", + "content": "\\mathbf{A}_s,g_s^2,\\mathbf{M}_s,\\mathbf{N}_s\\gets" + }, + { + "bbox": [ + 116, + 107, + 262, + 118 + ], + "type": "text", + "content": " algorithm 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 118, + 320, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 118, + 320, + 137 + ], + "spans": [ + { + "bbox": [ + 116, + 118, + 320, + 137 + ], + "type": "text", + "content": "compute: " + }, + { + "bbox": [ + 116, + 118, + 320, + 137 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{s|0} = \\exp \\left(\\mathbf{M}_s\\right)\\mathbf{y}_0" + }, + { + "bbox": [ + 116, + 118, + 320, + 137 + ], + "type": "text", + "content": " (transition mean)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 137, + 310, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 137, + 310, + 148 + ], + "spans": [ + { + "bbox": [ + 116, + 137, + 310, + 148 + ], + "type": "text", + "content": "compute: ingredients for transition cov. matrix:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 229, + 153, + 504, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 153, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 229, + 153, + 504, + 186 + ], + "type": "interline_equation", + "content": "\\binom {\\mathbf {C} _ {s}} {\\mathbf {H} _ {s}} = \\exp \\left[ \\binom {\\mathbf {M} _ {s}} {\\mathbf {0}} \\binom {\\mathbf {N} _ {s}} {- \\mathbf {M} _ {s} ^ {\\top}} \\right] \\binom {\\mathbf {0}} {\\mathbf {I}} \\tag {56}", + "image_path": "5e7e15a1908215f928e4dd91fd1db161ce32231dca6a87711b938a9bd2174799.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 194, + 302, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 194, + 302, + 207 + ], + "spans": [ + { + "bbox": [ + 115, + 194, + 302, + 207 + ], + "type": "text", + "content": "compute: " + }, + { + "bbox": [ + 115, + 194, + 302, + 207 + ], + "type": "inline_equation", + "content": "\\Sigma_{s|0} = \\mathbf{C}_s(\\mathbf{H}_s)^{-1}" + }, + { + "bbox": [ + 115, + 194, + 302, + 207 + ], + "type": "text", + "content": " (transition cov)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 207, + 335, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 207, + 335, + 220 + ], + "spans": [ + { + "bbox": [ + 116, + 207, + 335, + 220 + ], + "type": "text", + "content": "instantiate: " + }, + { + "bbox": [ + 116, + 207, + 335, + 220 + ], + "type": "inline_equation", + "content": "q_{\\phi ,s,(x,v)} = q_{\\phi}(\\mathbf{y}_s|\\mathbf{y}_0) = \\mathcal{N}(\\mathbf{m}_{s|0},\\pmb {\\Sigma}_{s|0})" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 220, + 220, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 220, + 220, + 232 + ], + "spans": [ + { + "bbox": [ + 116, + 220, + 220, + 232 + ], + "type": "text", + "content": "Output: " + }, + { + "bbox": [ + 116, + 220, + 220, + 232 + ], + "type": "inline_equation", + "content": "q_{\\phi ,s,(x,v)},A_s,g_s^2" + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 115, + 264, + 468, + 376 + ], + "blocks": [ + { + "bbox": [ + 107, + 82, + 271, + 94 + ], + "lines": [ + { + "bbox": [ + 107, + 82, + 271, + 94 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 271, + 94 + ], + "type": "text", + "content": "Algorithm 4 Get transition distributions" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 107, + 250, + 291, + 262 + ], + "lines": [ + { + "bbox": [ + 107, + 250, + 291, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 250, + 291, + 262 + ], + "type": "text", + "content": "Algorithm 5 Compute ELBO with ism or dsm" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "lines": [ + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "spans": [ + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": "input: Data point " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " and current params " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\theta, \\phi, \\gamma" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \ndraw: an aux. sample " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "v \\sim q_{\\gamma}(v|x)" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \ndraw: a sample " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "s \\sim \\mathrm{Unif}(0,T)" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \nset: " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0 = (x,v)" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \nset: " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "q_{\\phi,s,\\mathbf{y}_0}, A_s, g_s^2 \\gets \\text{algorithm 4 called on } \\mathbf{y}_0, s, \\phi" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \ndraw: " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s \\sim q_{\\phi,s,\\mathbf{y}_0}" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \ncompute: " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\ell_s" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\mathrm{dsm}(s)" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " (algorithm 6) or " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\mathrm{ism}(s)" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " (algorithm 7) on " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s, \\theta, A_s, g_s^2, q_{\\phi,s,\\mathbf{y}_0}" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \nset: " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "q_{\\phi,T,\\mathbf{y}_0}, --, -- \\gets \\text{algorithm 4 called on } \\mathbf{y}_0, T, \\phi" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \ndraw: " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_T \\sim q_{\\phi,T,\\mathbf{y}_0}" + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "text", + "content": " \noutput: " + }, + { + "bbox": [ + 115, + 264, + 468, + 376 + ], + "type": "inline_equation", + "content": "\\ell_s + \\log \\pi_\\theta(\\mathbf{y}_T) - \\log q_\\gamma(v)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 115, + 408, + 353, + 499 + ], + "blocks": [ + { + "bbox": [ + 107, + 394, + 230, + 406 + ], + "lines": [ + { + "bbox": [ + 107, + 394, + 230, + 406 + ], + "spans": [ + { + "bbox": [ + 107, + 394, + 230, + 406 + ], + "type": "text", + "content": "Algorithm 6 Compute " + }, + { + "bbox": [ + 107, + 394, + 230, + 406 + ], + "type": "inline_equation", + "content": "\\mathrm{dsm}(s)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "lines": [ + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "spans": [ + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": "input: " + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s, \\theta, A_s, g_s^2, q_{\\phi,s,\\mathbf{y}_0}" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": ". \ncompute: fwd-score = " + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{y}_s}" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": " log " + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s|\\mathbf{y}_0)" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": " \ncompute: model-score = " + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "s_\\theta(\\mathbf{y}_s, s)" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": " \ncompute: fwd-score-term = " + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}(\\mathrm{fwd-score})^\\top g_s^2" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": " (fwd-score) \ncompute: score-diff = model-score - fwd-score \ncompute: diff-term = " + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "-\\frac{1}{2}" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": " score-diff" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{y}_s}" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": " score-diff \ncompute: div-f = " + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{y}_s} \\cdot A_s\\mathbf{y}_s" + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "text", + "content": " \noutput: " + }, + { + "bbox": [ + 115, + 408, + 353, + 499 + ], + "type": "inline_equation", + "content": "\\mathrm{dsm}(s) = \\mathrm{fwd-score-term} + \\mathrm{diff-term} + \\mathrm{div-f}" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "code_body" + } + ], + "index": 13, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 115, + 531, + 347, + 613 + ], + "blocks": [ + { + "bbox": [ + 107, + 517, + 228, + 529 + ], + "lines": [ + { + "bbox": [ + 107, + 517, + 228, + 529 + ], + "spans": [ + { + "bbox": [ + 107, + 517, + 228, + 529 + ], + "type": "text", + "content": "Algorithm 7 Compute " + }, + { + "bbox": [ + 107, + 517, + 228, + 529 + ], + "type": "inline_equation", + "content": "\\operatorname{ism}(s)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "lines": [ + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "spans": [ + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": "input: " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_s,\\theta ,A_s,g_s^2,q_{\\phi ,s,\\mathbf{y}_0}" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " \ncompute: model-score " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "= s_{\\theta}(\\mathbf{y}_{s},s)" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " \ncompute: score-term " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "= -\\frac{1}{2}" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " model-score " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "\\top g_s^2" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " model-score \ncompute: div-gs " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "= \\nabla_{\\mathbf{y}_s}\\cdot g_s^2 s_\\theta (\\mathbf{y}_s,s)" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " \ncompute: div-f " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "= \\nabla_{\\mathbf{y}_s}\\cdot A_s\\mathbf{y}_s" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " \ncompute: div-term " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "= -" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " div-gs " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "^+" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " div-f \noutput: ism(s) " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " score-term " + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "inline_equation", + "content": "^+" + }, + { + "bbox": [ + 115, + 531, + 347, + 613 + ], + "type": "text", + "content": " div-term" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "code_body" + } + ], + "index": 15, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 638, + 288, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 638, + 288, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 288, + 649 + ], + "type": "text", + "content": "I VALID ELBO WITH TRUNCATION" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "text", + "content": "The integrand in the ELBO and its gradients is not bounded at time 0. Therefore, following Sohl-Dickstein et al. (2015) and Song et al. (2021) the integrand in eq. (7) is integrated from " + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "inline_equation", + "content": "[\\epsilon, T]" + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "text", + "content": ", rather than " + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "inline_equation", + "content": "[0, T]" + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "text", + "content": ". However, that integral is not a valid lower bound on " + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\log p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "text", + "content": ". Instead, it can be viewed as a proper lower bound on the prior for a latent variable " + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{\\epsilon}" + }, + { + "bbox": [ + 104, + 665, + 504, + 732 + ], + "type": "text", + "content": ". Therefore, to provide a bound for the data, one can introduce a likelihood and substitute the prior lower bound into a standard variational bound that integrates out the latent." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 104 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 104 + ], + "type": "text", + "content": "To provide a valid lower bound for multivariate diffusions, we extend theorem 6 in Song et al. (2021) from univariate to multivariate diffusions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 505, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 505, + 129 + ], + "type": "text", + "content": "Theorem 3. For transition kernel " + }, + { + "bbox": [ + 104, + 106, + 505, + 129 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s \\mid \\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 106, + 505, + 129 + ], + "type": "text", + "content": ", we can compute upper bound the model likelihood at time 0 as follows, for any " + }, + { + "bbox": [ + 104, + 106, + 505, + 129 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 125, + 133, + 505, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 133, + 505, + 159 + ], + "spans": [ + { + "bbox": [ + 125, + 133, + 505, + 159 + ], + "type": "interline_equation", + "content": "\\log p _ {\\theta} (x) \\geq \\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} \\mid x\\right)} \\mathbb {E} _ {q _ {\\phi} \\left(\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}\\right)} \\left[ \\log \\frac {p _ {\\theta} \\left(\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}\\right)}{q _ {\\phi} \\left(\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}\\right)} + \\mathcal {L} _ {m d m} (\\mathbf {y} _ {\\epsilon}, \\epsilon) - \\log q _ {\\phi} \\left(\\mathbf {y} _ {0} ^ {v} \\mid x\\right) \\right], \\tag {57}", + "image_path": "eeb1aa8cf0cc6142b05f1b14b889e54c8a7830a3f733d05778220dd13ef592ea.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 162, + 235, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 162, + 235, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 235, + 175 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 162, + 235, + 175 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{mdm}(\\mathbf{y}_{\\epsilon},\\epsilon)" + }, + { + "bbox": [ + 105, + 162, + 235, + 175 + ], + "type": "text", + "content": " is defined as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 177, + 479, + 210 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 177, + 479, + 210 + ], + "spans": [ + { + "bbox": [ + 129, + 177, + 479, + 210 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {m d m} (\\mathbf {y} _ {\\epsilon}, \\epsilon) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {> \\epsilon} | \\mathbf {y} _ {\\epsilon})} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) - \\int_ {\\epsilon} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} + \\nabla \\cdot f _ {\\phi} \\right].", + "image_path": "780018978e14cfb4c58e861a138ffd0f1a37743a8ea97fdc72d5bf8995161017.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "type": "text", + "content": "Proof. For transition kernel " + }, + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_s \\mid \\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "type": "text", + "content": ", we can compute upper bound the model likelihood at time 0 following an application of the variational bound" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 144, + 247, + 462, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 247, + 462, + 387 + ], + "spans": [ + { + "bbox": [ + 144, + 247, + 462, + 387 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\log p _ {\\theta} (x) = \\log \\int_ {v _ {0}} p _ {\\theta} (\\mathbf {y} _ {0} = [ x, v _ {0} ]) d v _ {0} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} p _ {\\theta} (\\mathbf {y} _ {0}, \\mathbf {y} _ {\\epsilon}) d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}) \\frac {q (v _ {0} \\mid x)}{q (v _ {0} \\mid x)} \\frac {p _ {\\theta} (\\mathbf {y} _ {0} , \\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ = \\log \\int_ {v _ {0}, \\mathbf {y} _ {\\epsilon}} q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0}) \\frac {q (v _ {0} \\mid x)}{q (v _ {0} \\mid x)} \\frac {p _ {\\theta} (\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}) p _ {\\theta} (\\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} d v _ {0} d \\mathbf {y} _ {\\epsilon} \\\\ \\geq \\mathbb {E} _ {q (v _ {0} | x) q _ {\\phi} (\\mathbf {y} _ {\\epsilon} | \\mathbf {y} _ {0})} \\left[ \\log \\frac {p _ {\\theta} (\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon})}{q _ {\\phi} (\\mathbf {y} _ {\\epsilon} \\mid \\mathbf {y} _ {0})} - \\log q _ {\\phi} (\\mathbf {y} _ {0} ^ {v} \\mid x) + \\log p _ {\\theta} (\\mathbf {y} _ {\\epsilon}) \\right] \\\\ \\end{array}", + "image_path": "351349343802abd6458a5a150def9fc0e0173cf974ee51363932d737cd10c412.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 396, + 447, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 447, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 447, + 407 + ], + "type": "text", + "content": "A lower bound for " + }, + { + "bbox": [ + 105, + 396, + 447, + 407 + ], + "type": "inline_equation", + "content": "\\log p_{\\theta}(\\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 105, + 396, + 447, + 407 + ], + "type": "text", + "content": " can be derived in a similar manner to eq. (7), such that" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 410, + 511, + 442 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 410, + 511, + 442 + ], + "spans": [ + { + "bbox": [ + 104, + 410, + 511, + 442 + ], + "type": "interline_equation", + "content": "\\log p _ {\\theta} (\\mathbf {y} _ {\\epsilon}) \\geq \\mathcal {L} _ {\\mathrm {m d m}} (\\mathbf {y} _ {\\epsilon}, \\epsilon) = \\mathbb {E} _ {q _ {\\phi} (\\mathbf {y} _ {> \\epsilon} | \\mathbf {y} _ {\\epsilon})} \\left[ \\log \\pi_ {\\theta} (\\mathbf {y} _ {T}) - \\int_ {\\epsilon} ^ {T} \\frac {1}{2} \\| s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} - \\frac {1}{2} \\| s _ {\\theta} - s _ {\\phi} \\| _ {g _ {\\phi}} ^ {2} + \\nabla \\cdot f _ {\\phi} \\right].", + "image_path": "9d264da8c96eed5ddfbcaa13e3f13b14e58b17595b25f8e6c85fba86f5280d65.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "content": "The choice of " + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "content": " is arbitrary, however following Sohl-Dickstein et al. (2015); Song et al. (2021) we let " + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "content": " be Gaussian with mean " + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\mu_{p_{\\theta},\\epsilon}" + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "content": " and covariance " + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\Sigma_{p_{\\theta},\\epsilon}" + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "content": ". Suppose " + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "inline_equation", + "content": "q_{\\phi}(\\mathbf{y}_{\\epsilon} \\mid \\mathbf{y}_0) = \\mathcal{N}(\\mathbf{y}_{\\epsilon} \\mid \\mathbf{A}\\mathbf{y}_0, \\Sigma)" + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "content": ", then we select the following mean " + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\mu_{p_{\\theta},\\epsilon}" + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "content": " and covariance " + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\Sigma_{p_{\\theta},\\epsilon}" + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 445, + 504, + 491 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 235, + 493, + 375, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 493, + 375, + 507 + ], + "spans": [ + { + "bbox": [ + 235, + 493, + 375, + 507 + ], + "type": "interline_equation", + "content": "\\mu_ {p _ {\\theta}, \\epsilon} = \\mathbf {A} ^ {- 1} \\Sigma s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + \\mathbf {A} ^ {- 1} \\mathbf {y} _ {\\epsilon}", + "image_path": "5c37f1f91870868fa44256a0e2c2037c1aa956a3dc26769a7cc9975d31e3f602.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 235, + 509, + 320, + 523 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 509, + 320, + 523 + ], + "spans": [ + { + "bbox": [ + 235, + 509, + 320, + 523 + ], + "type": "interline_equation", + "content": "\\Sigma_ {p _ {\\theta}, \\epsilon} = \\mathbf {A} ^ {- 1} \\Sigma \\mathbf {A} ^ {- \\top}", + "image_path": "6e3bc152ceafd448b092a41f9f63d3707884cf3b29b3b64794780afcf925d09c.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 525, + 504, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 550 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 525, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\mu_{p_{\\theta},\\epsilon},\\Sigma_{p_{\\theta},\\epsilon}" + }, + { + "bbox": [ + 104, + 525, + 504, + 550 + ], + "type": "text", + "content": " are derived using Tweedie's formula (Efron, 2011) by setting " + }, + { + "bbox": [ + 104, + 525, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\mu_{\\epsilon} = \\mathbb{E}[\\mathbf{y}_0\\mid \\mathbf{y}_{\\epsilon}]" + }, + { + "bbox": [ + 104, + 525, + 504, + 550 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 525, + 504, + 550 + ], + "type": "inline_equation", + "content": "\\Sigma_{\\epsilon} = \\mathrm{Var}(\\mathbf{y}_0\\mid \\mathbf{y}_{\\epsilon})" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 560, + 439, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 560, + 439, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 439, + 572 + ], + "type": "text", + "content": "We next derive this choice as an approximation of the optimal Gaussian likelihood." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 585, + 241, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 241, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 241, + 596 + ], + "type": "text", + "content": "I.1 LIKELIHOOD DERIVATION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0\\sim q_0(\\mathbf{y}_0)" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{\\epsilon}\\sim \\mathcal{N}(\\mathbf{y}_{\\epsilon}\\mid A\\mathbf{y}_0,\\Sigma)" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "A,\\Sigma" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": " are the mean coefficient and covariance derived from the transition kernel at time " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": ". We use Tweedie's formula to get the mean and covariance of " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{\\epsilon}" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": " under " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": ". This mean and covariance feature the true score " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{y}_{\\epsilon}}\\log q(\\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": ". We replace the score with the score model " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "s_\\theta" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": " and then set " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": " to have the resulting approximate mean and covariance. We make this choice because the optimal " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": " equals the true " + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "inline_equation", + "content": "q(\\mathbf{y}_0|\\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 104, + 605, + 505, + 673 + ], + "type": "text", + "content": " as discussed throughout the work." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 677, + 257, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 257, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 257, + 690 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 105, + 677, + 257, + 690 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0 = [\\mathbf{x}_0,\\mathbf{v}_0]" + }, + { + "bbox": [ + 105, + 677, + 257, + 690 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 105, + 677, + 257, + 690 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0\\sim q_{\\mathrm{data}}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 694, + 504, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 694, + 504, + 716 + ], + "spans": [ + { + "bbox": [ + 104, + 694, + 504, + 716 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 694, + 504, + 716 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 694, + 504, + 716 + ], + "type": "text", + "content": " be the natural parameter for the multivariate Gaussian likelihood " + }, + { + "bbox": [ + 104, + 694, + 504, + 716 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\mathbf{y}_{\\epsilon} \\mid A\\mathbf{y}_0, \\Sigma)" + }, + { + "bbox": [ + 104, + 694, + 504, + 716 + ], + "type": "text", + "content": ". Then, Tweedie's formula (Efron, 2011) states that:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 231, + 719, + 377, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 719, + 377, + 734 + ], + "spans": [ + { + "bbox": [ + 231, + 719, + 377, + 734 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\eta \\mid \\mathbf {u} _ {\\epsilon} ] = \\nabla_ {\\mathbf {y} _ {\\epsilon}} l (\\mathbf {y} _ {\\epsilon}) - \\nabla_ {\\mathbf {y} _ {\\epsilon}} l _ {0} (\\mathbf {y} _ {\\epsilon})", + "image_path": "9cb87263418d1a6931bd0e44e895307ad2b10a33d38e9529d0d5e62686453632.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 82, + 490, + 124 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 132, + 82, + 215, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 82, + 215, + 95 + ], + "spans": [ + { + "bbox": [ + 132, + 82, + 215, + 95 + ], + "type": "inline_equation", + "content": "l(\\mathbf{y}_{\\epsilon}) = \\log q(\\mathbf{y}_{\\epsilon})" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "spans": [ + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "type": "inline_equation", + "content": "s_{\\theta}(\\mathbf{y}_{\\epsilon}, \\epsilon)" + }, + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "type": "text", + "content": " is taken to be the true score " + }, + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{y}_{\\epsilon}} \\log q(\\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{y}_{\\epsilon}} l(\\mathbf{y}_{\\epsilon}) = s_{\\theta}(\\mathbf{y}_{\\epsilon}, \\epsilon)" + }, + { + "bbox": [ + 132, + 97, + 467, + 110 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 112, + 490, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 112, + 490, + 124 + ], + "spans": [ + { + "bbox": [ + 132, + 112, + 490, + 124 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 112, + 490, + 124 + ], + "type": "inline_equation", + "content": "l_{0}" + }, + { + "bbox": [ + 132, + 112, + 490, + 124 + ], + "type": "text", + "content": " is the log of the base distribution defined in the exponential family parameterization." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 133, + 504, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 504, + 157 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 504, + 157 + ], + "type": "text", + "content": "The base distribution is a multivariate Gaussian with mean 0 and covariance " + }, + { + "bbox": [ + 104, + 133, + 504, + 157 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 104, + 133, + 504, + 157 + ], + "type": "text", + "content": ", therefore " + }, + { + "bbox": [ + 104, + 133, + 504, + 157 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{y}_{\\epsilon}}l_{0}(\\mathbf{y}_{\\epsilon}) = -\\Sigma^{-1}\\mathbf{y}_{\\epsilon}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 239, + 162, + 369, + 176 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 162, + 369, + 176 + ], + "spans": [ + { + "bbox": [ + 239, + 162, + 369, + 176 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\eta \\mid \\mathbf {y} _ {\\epsilon} ] = s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + \\Sigma^ {- 1} \\mathbf {y} _ {\\epsilon}.", + "image_path": "e5b6d2ca2184262f01d199213650b709104c0b9d1a965f6751e290b459dbbb48.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "text", + "content": "However, Tweedie's formula is not directly applicable since our " + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{\\epsilon}" + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "text", + "content": " is not directly normal with mean " + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0" + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "text", + "content": ". Instead, to derive the conditional mean of " + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_0" + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{\\epsilon}" + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "text", + "content": ", we use the relation " + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "inline_equation", + "content": "\\eta = \\Sigma^{-1}\\mathbf{A}\\mathbf{y}_0" + }, + { + "bbox": [ + 104, + 181, + 505, + 216 + ], + "type": "text", + "content": " and the linearity of conditional expectation to get" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 220, + 220, + 388, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 220, + 388, + 281 + ], + "spans": [ + { + "bbox": [ + 220, + 220, + 388, + 281 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} \\left[ \\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon} \\right] = \\mathbb {E} \\left[ A ^ {- 1} \\Sigma \\eta \\mid \\mathbf {y} _ {\\epsilon} \\right] \\\\ = A ^ {- 1} \\Sigma \\mathbb {E} [ \\eta \\mid \\mathbf {y} _ {\\epsilon} ] \\\\ = A ^ {- 1} \\Sigma \\left(s _ {\\theta} \\left(\\mathbf {y} _ {\\epsilon}, \\epsilon\\right) + \\Sigma^ {- 1} \\mathbf {y} _ {\\epsilon}\\right) \\\\ = A ^ {- 1} \\Sigma s _ {\\theta} \\left(\\mathbf {y} _ {\\epsilon}, \\epsilon\\right) + A ^ {- 1} \\mathbf {y} _ {\\epsilon}. \\\\ \\end{array}", + "image_path": "0ded96b3f97ebd7df3a30a3531952920a48b5155e64010bc4b022f32a02c5e2c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 295, + 448, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 448, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 448, + 308 + ], + "type": "text", + "content": "For the variance, we use the following relation " + }, + { + "bbox": [ + 104, + 295, + 448, + 308 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{\\epsilon} = A\\mathbf{y}_0 + \\sqrt{\\Sigma}\\epsilon" + }, + { + "bbox": [ + 104, + 295, + 448, + 308 + ], + "type": "text", + "content": ", which implies that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 272, + 313, + 378, + 327 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 313, + 378, + 327 + ], + "spans": [ + { + "bbox": [ + 272, + 313, + 378, + 327 + ], + "type": "interline_equation", + "content": "\\mathbf {y} _ {0} = A ^ {- 1} \\mathbf {y} _ {\\epsilon} - A ^ {- 1} \\sqrt {\\sum} \\epsilon", + "image_path": "0d4f50bfb1f861796b9481e1266b1b7445d6ae810ee421cd9ed431347813f3c4.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 233, + 329, + 347, + 345 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 329, + 347, + 345 + ], + "spans": [ + { + "bbox": [ + 233, + 329, + 347, + 345 + ], + "type": "interline_equation", + "content": "\\operatorname {V a r} \\left(\\mathbf {y} _ {0} \\mid \\mathbf {y} _ {\\epsilon}\\right) = A ^ {- 1} \\Sigma A ^ {- T}.", + "image_path": "5da32c1e444daceb093557557591aa8678856d793e8b8fa099f1a3806f6c59d0.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 355, + 504, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 355, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 355, + 504, + 376 + ], + "type": "text", + "content": "Therefore, for the model posterior distribution " + }, + { + "bbox": [ + 104, + 355, + 504, + 376 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathbf{y}_0 \\mid \\mathbf{y}_{\\epsilon})" + }, + { + "bbox": [ + 104, + 355, + 504, + 376 + ], + "type": "text", + "content": " we choose a Normal with mean and covariance" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 235, + 383, + 373, + 414 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 383, + 373, + 414 + ], + "spans": [ + { + "bbox": [ + 235, + 383, + 373, + 414 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mu_ {p _ {\\theta}, \\epsilon} = A ^ {- 1} \\Sigma s _ {\\theta} (\\mathbf {y} _ {\\epsilon}, \\epsilon) + A ^ {- 1} \\mathbf {y} _ {\\epsilon} \\\\ \\Sigma_ {p _ {\\theta}, \\epsilon} = A ^ {- 1} \\Sigma A ^ {- T} \\\\ \\end{array}", + "image_path": "1b9eda1c534534b5196fd3f00232997087bb9c7a3feb65082afd85abd95e1b5d.jpg" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_content_list.json b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..5ebc8ebef271ad62c2de755df407f5da33bd91f6 --- /dev/null +++ b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_content_list.json @@ -0,0 +1,3074 @@ +[ + { + "type": "text", + "text": "WHICH LAYER IS LEARNING FASTER? A SYSTEMATIC EXPLORATION OF LAYER-WISE CONVERGENCE RATE FOR DEEP NEURAL NETWORKS", + "text_level": 1, + "bbox": [ + 171, + 99, + 821, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yixiong Chen $^{1}$ Alan Yuille $^{2}$ Zongwei Zhou $^{2}$", + "bbox": [ + 181, + 194, + 521, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1The Chinese University of Hong Kong - Shenzhen 2Johns Hopkins University", + "bbox": [ + 181, + 209, + 717, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "yixiongchen@link.cuhk.edu.cn ayuille1@jhu.edu zzhou82@jh.edu", + "bbox": [ + 181, + 224, + 810, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 275, + 545, + 289 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The deeply hierarchical structures enable deep neural networks (DNNs) to fit extremely complex target functions. However, the complex interaction between layers also makes the learning process of a particular layer poorly understood. This work demonstrates that the shallower layers of DNNs tend to converge faster than the deeper layers. We call this phenomenon Layer Convergence Bias. We also uncover the fundamental reason behind this phenomenon: Flatter local minima of shallower layers make their gradients more stable and predictive, allowing for faster training. Another surprising result is that the shallower layers tend to learn the low-frequency components of the target function, while the deeper layers usually learn the high-frequency components. It is consistent with the recent discovery that DNNs learn lower frequency objects faster.", + "bbox": [ + 228, + 309, + 767, + 464 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 498, + 336, + 513 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Over the last decade, breakthrough progress has been made by deep neural networks (DNNs) on a wide range of complicated tasks in computer vision (Krizhevsky et al., 2017), natural language processing (Sutskever et al., 2014), speech recognition (Graves et al., 2013), game playing (Silver et al., 2016), and biomedical prediction (Jumper et al., 2021). Such progress hinged on a number of advances in hardware technology, dataset construction, and model architectural designs. Among them, the invention and application of very-deep network architectures play a decisive role.", + "bbox": [ + 169, + 532, + 823, + 617 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deepening the network is an effective way to empower its fitting ability. Extensive studies (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Lu et al., 2017) compared the power between deeper and wider neural networks and showed that the polynomial growth of depth has a similar effect to the exponential growth of width. Therefore, modern DNNs (Simonyan & Zisserman, 2014; He et al., 2016) usually contain tens of layers to ensure their modeling abilities for real-world applications.", + "bbox": [ + 169, + 623, + 823, + 695 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although the practical success of deep architectures is indisputable, they make the learning hardly predictable since complex interaction happens between layers when co-adapting to the target (Yosinski et al., 2014). By now, we still have a poor understanding of how different layers learn differently. Currently, a widely accepted view relates to the vanishing gradient problem Hochreiter (1991); Hochreiter et al. (2001). The gradients are getting weaker and weaker as they move back through the hidden layers, making the shallower layers converge more slowly (Nielsen, 2015). Informally, it is reasonable that larger gradient values bring higher learning speed.", + "bbox": [ + 169, + 700, + 823, + 799 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Even though this view somewhat makes sense, we seem to have little concrete evidence supporting it. In particular, it is dubious how higher-level features can be built based on the unstable features extracted by the unconverged shallower layers (Raghu et al., 2017). This paper aims to find a credible answer for the parameters of which layer are learning faster towards the convergence point (defined as the convergence rate in this work) with a systematic exploration. Our results lead to somewhat startling discoveries.", + "bbox": [ + 169, + 805, + 823, + 888 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Our Contributions. Our point of start is illustrating that there does not seem to be a reliable positive correlation between the gradient magnitude and the convergence rate of a particular layer.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Instead, we find that shallower layers tend to converge faster than the deeper ones, even with smaller gradients. The phenomenon is called layer convergence bias in this paper.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We then turn our attention to excavating the underlying mechanism for the faster convergence of shallower layers. Specifically, we find out that the depth of a layer has a fundamental effect on its training: the parameters of shallower layers are usually optimized on flatter landscapes than deeper layers. This finding reveals that the gradients of shallower layers may be more predictive and thus have the potential to allow the larger learning rates (LRs) to be performed, making the convergence faster.", + "bbox": [ + 169, + 138, + 826, + 223 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Finally, we find that the layer convergence bias is also tied to the frequency of the function they are modeling. When fitting a complex target function, the shallower layers tend to fit the low-frequency (usually simpler) components. On the contrary, the deeper layers struggle to fit the remaining high-frequency components. It is a consistent result of the recent discovery that DNNs prioritize learning low-frequency components of the modeling function, while having very low learning speed on high-frequency components that tend to be more complex (Rahaman et al., 2019). This finding provides us with another perspective to understand why deeper layers learn more slowly.", + "bbox": [ + 169, + 229, + 823, + 328 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We believe that understanding the roots of such a fundamental convergence bias can give us a better grasp of the complicated learning process of DNNs. In turn, it can motivate more in-depth algorithmic progress for the deep learning community.", + "bbox": [ + 169, + 334, + 823, + 378 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This paper is organized as follows. In Section 2, we introduce our method for measuring convergence speed for different layers, and formally define the layer convergence bias. In Section 3, we examine the relationship between gradient magnitude and convergence rate, and show that the shallower layers tend to converge faster even with smaller gradients. Then in Section 4, we analyze the mechanism behind the layer convergence bias in DNN training. The layer-frequency correspondence is demonstrated in Section 5. The practical significance of layer convergence bias is presented in Section 6. We further discuss the related work in Section 7 and conclude in Section 8.", + "bbox": [ + 169, + 382, + 825, + 481 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 LAYER CONVERGENCE BIAS", + "text_level": 1, + "bbox": [ + 171, + 500, + 446, + 515 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The deep architecture of DNNs is arguably one of the most important factors for their powerful fitting abilities. With the benefit brought by the deep structures, there are also extra complexities in the training process coming into being. So far, we do not have a firm conclusion about whether some layers are learning faster than others.", + "bbox": [ + 169, + 530, + 823, + 588 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For examining the convergence progress for a DNN, a common practice is checking its loss curve. However, this is not applicable for comparing the convergence between different layers. In this work, we define a measurement for layer-wise convergence in the following.", + "bbox": [ + 169, + 594, + 823, + 638 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Definition 2.1 (Layer-wise convergence rate) At the training time $t$ , let the deep neural network with $L$ layers $\\{T_{l}^{(t)}\\}_{l=1}^{L}$ be $f(\\pmb{x}) = (T_{L}^{(t)} \\circ T_{L-1}^{(t)} \\circ \\dots \\circ T_{1}^{(t)})(\\pmb{x}) : \\mathbb{R}^{i} \\to \\mathbb{R}^{o}$ , where $i, o$ are the dimension of its inputs and outputs. We use $\\theta_{l}^{(t)}$ to denote the parameters of the $l$ -th layer $T_{l}^{(t)}$ . Assuming that $\\theta_{l}^{(t)}$ can finally converge to its optimal point $\\theta_{l}^{*}$ when $t \\to \\infty$ , we define the convergence rate of $\\theta_{l}$ during the time interval $[t_1, t_2]$ to be", + "bbox": [ + 169, + 643, + 826, + 727 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nC _ {l} ^ {(t _ {1}, t _ {2})} = \\frac {1}{(t _ {2} - t _ {1})} \\cdot \\frac {\\| \\theta_ {l} ^ {(t _ {1})} - \\theta_ {l} ^ {*} \\| _ {2} - \\| \\theta_ {l} ^ {(t _ {2})} - \\theta_ {l} ^ {*} \\| _ {2}}{\\| \\theta_ {l} ^ {(t _ {0})} - \\theta_ {l} ^ {*} \\| _ {2}},\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 742, + 678, + 785 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $t_0$ denotes the time point when the training starts.", + "bbox": [ + 171, + 792, + 542, + 806 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this definition, the numerator $\\| \\theta_l^{(t_1)} - \\theta_l^*\\| _2 - \\| \\theta_l^{(t_2)} - \\theta_l^*\\| _2$ denotes how much the distance of the parameter $\\theta_{l}$ to the optimal point is shortened in the period $[t_1,t_2]$ . The denominator $\\| \\theta_l^{(t_0)} - \\theta_l^*\\| _2$ represents the distance between the initial point to the convergence point, whose primary function is to normalize the speed, allowing the convergence of different layers to compare with each other. Thus, the convergence rate of $\\theta_{l}$ can be understood as the ratio of normalized distance to time. Common optimization works (Yi et al., 1999; Nesterov, 2003) defined the rate of convergence for $\\theta$ as $\\lim_{k\\to \\infty}\\frac{\\|\\theta^{(k + 1)} - \\theta^*\\|_2}{\\|\\theta^{(k)} - \\theta^*\\|_2}$ . It focuses on measuring an exponential level convergence when the", + "bbox": [ + 169, + 814, + 826, + 928 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "optimization step goes to infinity. Since the difference in convergence rates between layers usually appears at an early stage of training, and it is not large enough to compare at an exponential level, we define our new convergence metric to present the convergence difference in a clearer way.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Observation 2.1 (Layer convergence bias). For $l_1 < l_2$ , $\\exists \\tilde{t} > 0$ , such that $C_{l_1}^{(t_1, t_2)} > C_{l_2}^{(t_1, t_2)}$ when $t_1 < t_2 < \\tilde{t}$ .", + "bbox": [ + 169, + 152, + 825, + 189 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Layer convergence bias indicates that at an early training phase $t < \\tilde{t}$ , the parameters $\\theta_{l_1}$ of a shallower layer $l_1$ tend to move to $\\theta_{l_1}^*$ faster than a deeper layer $\\theta_{l_2}$ moving to $\\theta_{l_2}^*$ . In the following, we use both synthetic and real datasets to show that the layer convergence bias appears for both fully-connected neural networks (FCNNs) and convolutional neural networks (CNNs).", + "bbox": [ + 169, + 194, + 826, + 251 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 VERIFICATION OF LAYER CONVERGENCE BIAS", + "text_level": 1, + "bbox": [ + 171, + 273, + 599, + 290 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we try to substantiate the central claim of this work. First, we use the FCNNs to show that the shallower layers tend to converge faster than the deeper layers on the regression task, even when the gradient values for shallower layers are smaller. We then use CNNs with modern architectures to verify that layer convergence bias is a common phenomenon in practical applications. All experimental settings in this work can be found in Appendix A.1.", + "bbox": [ + 169, + 306, + 823, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 LAYER CONVERGENCE BIAS IN FULLY-CONNECTED NETWORKS", + "text_level": 1, + "bbox": [ + 171, + 398, + 663, + 412 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For FCNNs, we construct a simple regression task to demonstrate that layers with smaller gradients do not necessarily learn more slowly than layers with larger gradients. The fitting target is $f(x) = \\sin (x) + \\frac{1}{3}\\sin (3x) + \\frac{1}{10}\\sin (10x) + \\frac{1}{30}\\sin (30x)$ , with mean square error loss for training.", + "bbox": [ + 169, + 425, + 823, + 470 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "First, we use the FCNN [1-32-32-32-1] with the Sigmoid activations as a simple example. In the following analysis, the first fully-connected layer (1-32) is named Layer 1, and the subsequent two layers (32-32) are called Hidden layer 1, Hidden layer 2 respectively. The gradient values and the convergence processes for these layers are shown in Fig. 1 (a). Two observations can be obtained from the plots: 1) The gradient of Hidden layer 1 is nearly always smaller than the gradient of Hidden layer 2. 2) Although shallower layers have smaller gradients, they seem to converge faster. For the first 50 epochs, the shallower layers are moving faster to their convergence point (e.g., $C_{Layer_1}^{(t_0,t_{50})} \\approx 0.012$ , $C_{Hidden\\_layer_1}^{(t_0,t_{50})} \\approx 0.009$ , $C_{Hidden\\_layer_2}^{(t_0,t_{50})} \\approx 0.006$ ), which is inconsistent with the previous view that higher gradients lead to faster learning (Nielsen, 2015).", + "bbox": [ + 169, + 474, + 825, + 604 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To further validate the above results with a deeper network, we adopt residual connections (He et al., 2016) for the FCNN (deep network fails to be trained in this task without residual connections) and use the ReLU activation function. The FCNN [1-(128-128)-(128-128)-(128-128)-(128-128)-1] with four residual blocks of width 128 shows similar results to the shallow FCNN without residual connection (see Fig. 1 (b)). In this case, the difference in layer-wise convergence rate can be observed even earlier (i.e., $C_{Res - Block_1}^{(t_0,t_5)} \\approx 2C_{Res - Block_4}^{(t_0,t_5)}$ ), which shows that the layer convergence bias also happens for deeper FCNNs with residual connections. It is noteworthy that our convergence metric is crucial to observe the layer convergence bias, which is elaborated in Appendix A.2.", + "bbox": [ + 169, + 611, + 826, + 728 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/01daa463fcec291619bed042e058d7cd00a900134e3e6ef5025120b39f7e0c8d.jpg", + "image_caption": [ + "(a) FCNN without residual connection" + ], + "image_footnote": [], + "bbox": [ + 173, + 752, + 493, + 847 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/366ed45631c4991c2eb157aa0417ce6e0d19f38cd21676dff2458c94a8777658.jpg", + "image_caption": [ + "(b) FCNN with four residual blocks", + "Figure 1: Left (a,b): The absolute mean gradient values for different layers for FCNNs w/o residual connections in training. For both networks, deeper layers have larger gradients. Right (a,b): The convergence process of different layers for FCNNs. Shallower layers converge faster." + ], + "image_footnote": [], + "bbox": [ + 500, + 752, + 821, + 845 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/38ce4d83602ce56082f5d9781a1105280b6566c07103a885666d054749025f08.jpg", + "image_caption": [ + "(a) ResNet-50 Val Acc $73.24\\%$" + ], + "image_footnote": [], + "bbox": [ + 240, + 99, + 506, + 210 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a422b587ba3b45adb3388e705d0e42809d28d6d38418adbde044e773073a06b6.jpg", + "image_caption": [ + "(b) VGG-19 Val Acc $71.89\\%$", + "Figure 2: The convergence process of ResNet-50 and VGG-19 on ImageNet. During the first 50 epochs, shallower layers converge much faster than deeper layers. After the learning rate decays at the 50th epoch, parameters of deeper layers accelerate to move to their convergence points." + ], + "image_footnote": [], + "bbox": [ + 513, + 101, + 754, + 210 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Clearly, these results cannot reconcile with the previous view that larger gradients bring a higher learning speed for deeper layers, at least for the DNNs used in this work. Instead, from the optimization point of view, the parameters of shallower layers are learning faster to converge.", + "bbox": [ + 169, + 301, + 823, + 345 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 LAYER CONVERGENCE BIAS IN CONVOLUTIONAL NETWORKS", + "text_level": 1, + "bbox": [ + 171, + 362, + 648, + 376 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Real-world datasets are very different from the synthetic data used in our previous experiments. In order to utilize the layer convergence bias to understand and better improve DNNs in real applications, it is important to verify whether the layer convergence bias holds for CNNs on images.", + "bbox": [ + 169, + 388, + 823, + 433 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the following experiments, we examine the layer-wise convergence process on ImageNet (Rusakovsky et al., 2015) dataset with both ResNet-50 (He et al., 2016) and VGG-19 (Simonyan & Zisserman, 2014). We train the CNNs for 120 epochs with learning rate decay at the 50th epoch $(0.1\\rightarrow 0.01)$ and the 100th epoch $(0.01\\to 0.001)$ . The training processes are shown in Fig. 2.", + "bbox": [ + 169, + 438, + 823, + 494 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For ResNet-50, we visualize the learning process of the first convolutional layer and its subsequent four stages. One can easily observe that at the beginning of training, the shallower layers converge much faster than the deeper layers ( $C_{Stage1}^{(t_0,t_{20})} \\approx 3C_{Stage4}^{(t_0,t_{20})}$ ). However, after the learning rate decays at the 50th epoch, deeper layers begin to learn effectively and achieve a higher convergence rate than the shallower layers ( $C_{Stage1}^{(t_{50},t_{60})} \\approx 0.5C_{Stage4}^{(t_{50},t_{60})}$ ). We conjecture that the initial learning rate is too large for the deeper layers to learn.", + "bbox": [ + 169, + 501, + 823, + 594 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For VGG-19, we visualize its 1st, 5th, 9th, 13th, and 17th layers. This network shows a more significant convergence difference between layers than ResNet-50. At the first training stage with the initial learning rate, $\\| \\theta_l^{(t_5)} - \\theta_l^*\\| >\\| \\theta_l^{(t_0)} - \\theta_l^*\\|$ for $l\\in \\{5,9,13,17\\}$ , which means that all layers but the first one even slightly diverge. Usually, the divergence appears when the learning rate is too large. This phenomenon confirms that the deeper layers cannot effectively learn with the large learning rate at the beginning.", + "bbox": [ + 169, + 601, + 823, + 688 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The experiments of FCNNs and CNNs verify that layer convergence bias is a common phenomenon for DNNs. In Section 5 and Appendix A.3, A.4, we discuss the factors that would affect the phenomenon, and some in-depth findings they reveal.", + "bbox": [ + 169, + 694, + 823, + 738 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 MECHANISM BEHIND LAYER CONVERGENCE BIAS", + "text_level": 1, + "bbox": [ + 171, + 758, + 627, + 773 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "So far, our investigation shows that the seemingly-right perspective for linking the layer-wise gradient and convergence rate is tenuous, at best. Both FCNNs and CNNs demonstrate an evident bias that shallower layers learn faster. Can we explain why this is the case?", + "bbox": [ + 169, + 791, + 823, + 834 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Gradient Predictiveness. Since gradient values cannot determine the convergence rate, we wonder if the directions of the gradients play a more critical role. More chaotic update directions make convergence slower. Here we examine the gradient predictiveness (Santurkar et al., 2018) of different layers. If the gradient behavior is \"predictive\", less change in the gradient directions would appear when 1) the gradients are calculated with different batches of data; 2) the parameters of other layers update. Predictiveness can also be simply understood as the stability of gradient direction.", + "bbox": [ + 169, + 840, + 825, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d8a1116278bfd7475a7dd383c92f1ae3120ea0e0aab2194a5560d1e0174023af.jpg", + "image_caption": [ + "(a) Gradient predictiveness w.r.t. data" + ], + "image_footnote": [], + "bbox": [ + 207, + 102, + 495, + 205 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f49f932ecb10171b0587245b6b5f4bc458ab696742d61826cebd388d3706a4ac.jpg", + "image_caption": [ + "(b) Gradient predictiveness w.r.t. parameters", + "Figure 3: The gradient predictiveness of shallower and deeper layers of FCNN. The learning rate decreases from 0.1 to 0.01 at Epoch 150." + ], + "image_footnote": [], + "bbox": [ + 500, + 102, + 787, + 205 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Definition 4.1 Let $(x^{(t)},y^{(t)})$ be a batch of input-label pairs for the DNN to train at time $t$ , and $(x^{\\prime (t)},y^{\\prime (t)})$ be another batch of data. We define the gradient predictiveness of the $l$ th layer at time $t$ w.r.t. data as the cosine similarity $\\begin{array}{r}\\operatorname {sim}(G_{l,t},G_{l,t}^{\\prime}) = \\frac{\\|G_{l,t}G_{l,t}^{\\prime}\\|}{\\|G_{l,t}\\|\\|G_{l,t}^{\\prime}\\|}\\in [-1,1] \\end{array}$ . Likewise, the gradient predictiveness w.r.t. parameters is defined as $\\operatorname {sim}(G_{l,t},G_{l,t}^{\\prime \\prime})$ , where", + "bbox": [ + 169, + 287, + 826, + 359 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} G _ {l, t} = \\nabla_ {\\theta_ {l} ^ {(t)}} L \\left(\\theta_ {1} ^ {(t)}, \\dots , \\theta_ {L} ^ {(t)}; x ^ {(t)}, y ^ {(t)}\\right) \\\\ G _ {l, t} ^ {\\prime} = \\nabla_ {\\theta_ {l} ^ {(t)}} L (\\theta_ {1} ^ {(t)}, \\dots , \\theta_ {L} ^ {(t)}; x ^ {\\prime (t)}, y ^ {\\prime (t)}) \\\\ G _ {l, t} ^ {\\prime \\prime} = \\nabla_ {\\theta_ {l} ^ {(t)}} L (\\theta_ {1} ^ {(t + 1)}, \\dots , \\theta_ {l - 1} ^ {(t + 1)}, \\theta_ {l} ^ {(t)}, \\theta_ {l + 1} ^ {(t + 1)}, \\dots , \\theta_ {L} ^ {(t + 1)}; x ^ {(t)}, y ^ {(t)}) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 367, + 714, + 444 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $G_{l,t}$ corresponds to the gradient of $\\theta_l^{(t)}$ . $G_{l,t}^{\\prime}$ is the gradient of this layer with another batch of data, while $G_{l,t}^{\\prime \\prime}$ means the gradient after all the other layers have updated to new values. Therefore, $sim(G_{l,t},G_{l,t}^{\\prime})$ indicates the stability of gradients with different data batches. $sim(G_{l,t},G_{l,t}^{\\prime \\prime})$ reflects whether the currently estimated gradient is in a consistent decreasing direction when the loss landscape is affected by the updating of other layers' parameters. The gradient predictiveness during training is shown in Fig. 3, where Res-Block 1 has more predictive gradients than Res-Block 4.", + "bbox": [ + 169, + 452, + 823, + 544 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Visualizing the Loss Landscapes. We are curious about why gradients for deeper layers have poorer predictiveness. A hypothesis is that the loss landscapes for deeper layers are more rugged, making the parameters fluctuate more. A straightforward method to validate this hypothesis is plotting the loss landscapes for the parameters. To do this for a particular layer $l$ , one can choose a central point $\\theta_{l}^{*}$ and two direction vectors $d_{l,1}$ , $d_{l,2}$ . Then the loss landscape can be drawn with", + "bbox": [ + 169, + 550, + 823, + 621 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nf \\left(\\beta_ {1}, \\beta_ {2}\\right) = L \\left(\\theta_ {l} ^ {*} + \\beta_ {1} d _ {l, 1} + \\beta_ {2} d _ {l, 2}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 627, + 622, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "in the 3D space with $\\beta_{1},\\beta_{2}$ forming a simplified parameter space. In this work, we generate random Gaussian directions for different layers, and normalize them to obtain the same norm of the corresponding layer. Specifically, we make the replacement $d_{l}\\gets \\frac{d_{l}}{\\|d_{l}\\|}\\| \\theta_{l}^{*}\\|$ for a fully connected layer. For a convolutional layer, we use filter-wise normalization $d_l^k\\gets \\frac{d_l^k}{\\|d_l^k\\|}\\| \\theta_l^{k*}\\|$ as in (Li et al., 2018), where $d_l^k$ represents the $k$ th filter of the $l$ th layer. We set both $\\beta_{1}$ and $\\beta_{2}$ in the domain of $[-1,1]$ .", + "bbox": [ + 169, + 656, + 823, + 744 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "**Landscapes for FCNN.** The loss landscapes for four residual blocks of the FCNN are shown in Fig. 4. For the shallower blocks, the surfaces are flatter near the minimizer, meaning that the gradient magnitudes may be small. However, small gradients do not necessarily lead to slow learning speed in this case. Combined with the gradient predictiveness discussed above, a flatter loss landscape may lead to more consistent gradient directions, making the learning more smooth.", + "bbox": [ + 169, + 750, + 823, + 821 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Landscapes for CNNs. The loss landscapes for ResNet-50 and VGG-19 on ImageNet are shown in Fig. 5. It is interesting that deep convolutional networks with/without residual connections present totally different loss landscapes. For ResNet-50, its landscapes near the convergence point $\\theta_l^*$ are smooth and nearly convex, making the neural network easier to train. On the contrary, VGG-19 has much more shattered landscapes, the initial iterations probably lie in the chaotic regions, prohibiting its training (Balduzzi et al., 2017). This may explain the much less efficient convergence towards the optimal point for VGG than ResNet at the initial phase (Fig. 2).", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/dcf9e20c6e81e7c73e4b19b4206b4afdb783893a1e47d077efe5cca2fb4b1872.jpg", + "image_caption": [ + "(a) Res-Block 1" + ], + "image_footnote": [], + "bbox": [ + 178, + 99, + 326, + 186 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ff6a098118a0a6866965702a9e436500e7a6d9c8c802352f9ca8ab635e08db12.jpg", + "image_caption": [ + "(b) Res-Block 2" + ], + "image_footnote": [], + "bbox": [ + 343, + 99, + 491, + 185 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1787402fa4768622d7529fd2645f10d7054d15e5be80417749af249eb960c844.jpg", + "image_caption": [ + "(c) Res-Block 3" + ], + "image_footnote": [], + "bbox": [ + 511, + 99, + 658, + 185 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/465dae870d469c4cd45fee3ef006b75e4bd48370444c96442e2dc92599785c90.jpg", + "image_caption": [ + "(d) Res-Block 4" + ], + "image_footnote": [], + "bbox": [ + 676, + 101, + 823, + 185 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/eec5cf65633ddf56eafd0ca7308921055aeac035f68d35a2c9b618030e483e68.jpg", + "image_caption": [ + "Figure 4: The loss landscapes of different layers of FCNN. Deeper layers are optimized on more rugged landscapes, slowing down the learning process.", + "(a) ResNet-50 Res-Block 1", + "Figure 5: The loss landscapes of different layers of ResNet-50 (a,b) and VGG-19 (c,d) on ImageNet. The shallower layers for both networks have flatter minima, making them converge faster than the deeper layers. The plots for all layers can be found in Appendix A.5." + ], + "image_footnote": [], + "bbox": [ + 173, + 253, + 330, + 342 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/279c1246dcbe7bca3a88308d661c3f202523a38c709bd62867e8e958c5143087.jpg", + "image_caption": [ + "(b) ResNet-50 Res-Block 4" + ], + "image_footnote": [], + "bbox": [ + 336, + 253, + 493, + 342 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3ae53b8f727a9a11ba44cd6c77636a06eed18e34892842dc485e7307f69fe3c3.jpg", + "image_caption": [ + "(c) VGG-19 Layer 1" + ], + "image_footnote": [], + "bbox": [ + 503, + 255, + 658, + 342 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4d6c447583c0f747b2ca42a90a6cd78058ba671f16c8032956bced0511abb923.jpg", + "image_caption": [ + "(d) VGG-19 Layer 13" + ], + "image_footnote": [], + "bbox": [ + 666, + 255, + 821, + 342 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparing different layers in the CNNs, the answer for layer convergence bias becomes clearer. The key difference between different layers' loss landscapes of ResNet-50 is the sharpness of the local minima (Fig. 5 (a,b)). We conjecture it is because of a well-known fact that the shallower layers of CNNs tend to learn general features which are applicable to various datasets and tasks, while the deeper layers usually learn task-specific features (Yosinski et al., 2014). Before our work, (Zeiler & Fergus, 2014) also revealed that the general features in a five-layer CNN stabilized faster than the specific features. Since the general features are more evenly distributed, they usually cause less fluctuation for training, leading to flatter optima. Theoretically, flatter minimizers are easier to be found by SGD optimizers (Pan et al., 2020). For VGG-19, its shallower and deeper layers also have flatter and sharper minima (Fig. 5 (c,d)), respectively. The shattered loss landscape for its deeper layers may also explain its inefficient learning process with a large learning rate (Fig. 2 (b)).", + "bbox": [ + 169, + 440, + 823, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here we summarize the mechanism behind layer convergence bias: the parameters of shallower layers are easier to optimize due to their flatter loss landscapes. At a higher level, shallower layers learn general features, which are usually easier.", + "bbox": [ + 169, + 599, + 823, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 DEeper LAYERS FIT THE HIGH-FREQUENCY COMPONENTS", + "text_level": 1, + "bbox": [ + 171, + 662, + 699, + 679 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Recent advances in the learning process of DNNs (Rahaman et al., 2019; Ronen et al., 2019; Xu & Zhou, 2021) revealed that the low-frequency components of the target function are fitted much faster than the high-frequency components. There is a natural question about whether there is some inherent link between layer convergence bias and this result. In this section, we investigate the answer, and surprisingly find that: the low-frequency parts are usually fitted by the shallower layers, while the remaining higher frequencies are mainly learned by the deeper layers. It provides us with an alternative perspective to understand the layer convergence bias.", + "bbox": [ + 169, + 694, + 823, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The Correspondence for FCNN. With the residual structures, we can straightforwardly visualize what each block of a FCNN learns. Considering the FCNN with one input layer $z_0 = T_0(x) : \\mathbb{R}^1 \\to \\mathbb{R}^{128}$ , four residual blocks $z_l = T_l'(z_{l-1}) = T_l(z_{l-1}) + z_{l-1} : \\mathbb{R}^{128} \\to \\mathbb{R}^{128}$ , $l \\in \\{1, 2, 3, 4\\}$ , and an output layer $y = T_5(z_4) : \\mathbb{R}^{128} \\to \\mathbb{R}^1$ . The whole network can be expressed as", + "bbox": [ + 169, + 797, + 823, + 854 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\ny = T _ {5} \\left(z _ {1} + T _ {2} \\left(z _ {1}\\right) + T _ {3} \\left(z _ {2}\\right) + T _ {4} \\left(z _ {3}\\right)\\right) = T _ {5} \\left(z _ {1}\\right) + T _ {5} \\left(T _ {2} \\left(z _ {1}\\right)\\right) + T _ {5} \\left(T _ {3} \\left(z _ {2}\\right)\\right) + T _ {5} \\left(T _ {4} \\left(z _ {3}\\right)\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 859, + 807, + 877 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "if the output layer $T_{5}$ is a linear transformation. The fitting results for each layer are shown in Fig. 6. It can be seen that the deeper layers tend to fit the more complex components of the target function $y = \\sin (x) + \\frac{1}{3}\\sin (3x) + \\frac{1}{10}\\sin (10x) + \\frac{1}{30}\\sin (30x)$ . Besides the curvature, the fitted functions", + "bbox": [ + 169, + 881, + 823, + 926 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bb78dfff1b269ef8e0ea2ec041cad774391c664cadbe592299e2d6fde9fb54c6.jpg", + "image_caption": [ + "y" + ], + "image_footnote": [], + "bbox": [ + 176, + 99, + 297, + 169 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/30d9145078e0f083998f1660437ed0ce09d7cb65f2eb734b7994195063078aa4.jpg", + "image_caption": [ + "$T_{5}(z_{1})$", + "Figure 6: The visualization of what each residual block of the FCNN learns. From the first to the fourth block, the fitted function becomes more complex with smaller amplitude." + ], + "image_footnote": [], + "bbox": [ + 300, + 101, + 429, + 170 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d6ef0e059f3f0dc1645ae9f7607ec365446cff1498dff72ba12042b3787d4a0d.jpg", + "image_caption": [ + "$T_{5}(T_{2}(z_{1}))$" + ], + "image_footnote": [], + "bbox": [ + 431, + 102, + 558, + 170 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a30a74cbbe2923ef6bbaa39191a64525217c3f262158cad296ab2fe71c630c51.jpg", + "image_caption": [ + "$T_{5}(T_{3}(z_{2}))$" + ], + "image_footnote": [], + "bbox": [ + 560, + 102, + 687, + 170 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/25db2ae0c1966056c496cf9e6d63337ebe2588d47d0006f42ed1f6b9915cde76.jpg", + "image_caption": [ + "$T_{5}(T_{4}(z_{3}))$" + ], + "image_footnote": [], + "bbox": [ + 691, + 102, + 820, + 170 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a1a9026ccad7593f40929b85467635e4d9c29f8acc3f9ec6c9a14cdb9e90f51b.jpg", + "image_caption": [ + "(a) ResNet-50", + "Figure 7: The visualization of response frequencies for CNNs. As the training goes on, deeper layers become more sensitive to perturbations, indicating that they have higher response frequencies." + ], + "image_footnote": [], + "bbox": [ + 238, + 263, + 491, + 358 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/79e1ba213d462d7a60bd8a5e3e789a4ff15bf0cfd18dcfb9ce196cc6f8b6035b.jpg", + "image_caption": [ + "(b) VGG-19" + ], + "image_footnote": [], + "bbox": [ + 504, + 263, + 756, + 358 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "are also consistent with the amplitudes of the components. Specifically, the ranges of the four fitted functions are 2.3, 0.7, 0.5, and 0.06, which are similar to the four components. This result further confirms the relationship between layers and frequencies.", + "bbox": [ + 169, + 443, + 823, + 487 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The Correspondence for CNNs. For CNNs, we verify their layer-frequency correspondence through the response frequency (Xu et al., 2019). In a nutshell, if an input-output mapping $f$ possesses significant high frequencies, then a small change in its input induces a large change in the output. We generate standard Gaussian-distributed input $x$ for different residual blocks of ResNet-50 and different layers of VGG-19. At the same time, small Gaussian perturbation $\\Delta x$ is added to the input. A larger change $\\Delta y$ of the layer output means the layer handles higher frequencies. The response frequencies are shown in Fig. 7. At the first 5 epochs of training on ImageNet, different layers for both ResNet-50 and VGG-19 do not show significantly different response frequencies. But after about ten epochs, the response frequencies for deeper layers (e.g., stage 4 for ResNet-50, layer 13 for VGG-19) increase while the shallower layers show lower response frequencies. Therefore, we conclude that the layer-frequency correspondence also holds for CNNs. In addition, it is not an innate nature of the layers, but a result of the training process.", + "bbox": [ + 169, + 492, + 826, + 660 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "How the target frequency affects layer convergence bias? To demonstrate the effect of layer-frequency correspondence on the layer convergence bias, we try fitting simpler targets with less high-frequency components, and see what would happen to the layer-wise convergence rate of FCNN. In Fig. 8 (a-d), we only keep several lowest frequencies of the target, e.g., the target function $y = \\sin(x)$ is named \"Complexity=1\", and $y = \\sin(x) + \\frac{1}{3} \\sin(3x)$ is named \"Complexity=2\", etc. After discarding more and more high-frequency components, the deeper layers converge faster and faster. In this case, the layer convergence bias does not strictly hold anymore. In Fig. 8 (b), the Res-Block 4 converges faster than Res-Block 3 after the 5th epoch. In Fig. 8 (c), the Res-Block 4 converges with a similar speed as Res-Block 2, while the Res-Block 3 even learns faster than Res-Block 2. It seems that removing the high-frequency component that corresponds to a deep layer can effectively accelerate its training. For CNNs, we also observe similar phenomena (Fig. 8 (e-h)). On simpler targets (e.g., CIFAR 10), the deeper layers converge faster than on more complex targets (e.g., CIFAR100). An implication of this result is that the data complexity may be too low for the model. In practice, CIFAR datasets only need ResNet-18 to fit well (Wu et al., 2020).", + "bbox": [ + 169, + 666, + 826, + 862 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In fact, (Rahaman et al., 2019) had shown that different layers have some links to different frequencies, but the authors did not provide further insight for this phenomenon. This work verifies the underlying relationship between layers and fitting frequencies, and establishes a connection for this relationship to the layer convergence bias.", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c2703188bafc4178271ae4735f09e3fbe016ef647c53c152323a69e9d1cd12f4.jpg", + "image_caption": [ + "(a) FCNN Complexity=4" + ], + "image_footnote": [], + "bbox": [ + 174, + 99, + 362, + 204 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d49a2ff4a53e499804eb29b593fb5e0e3e0f37fc6cfc4cc3d3fe253fb2182f8b.jpg", + "image_caption": [ + "(b) FCNN Complexity=3" + ], + "image_footnote": [], + "bbox": [ + 366, + 101, + 514, + 204 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5c16f8c07486995949efa3f7a87b00691ce5eee35754ebb1625df16072e9dfeb.jpg", + "image_caption": [ + "(c) FCNN Complexity=2" + ], + "image_footnote": [], + "bbox": [ + 517, + 101, + 666, + 204 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/81e9cbf6884ab5717b67482b33b04616ceaebe7f6af14870434a4e64affab2a3.jpg", + "image_caption": [ + "(d) FCNN Complexity $= 1$" + ], + "image_footnote": [], + "bbox": [ + 669, + 101, + 821, + 204 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7ec69d55de3b60c5c24832ef1d9a12997306db37fae0246e09c0eaf129ea5c42.jpg", + "image_caption": [ + "(e) ResNet-50 CIFAR100" + ], + "image_footnote": [], + "bbox": [ + 173, + 233, + 362, + 335 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c6ee73b224215a5536d2c53e6ac7f83ef3a1e68516fdd3b1772b11f7f499bb1e.jpg", + "image_caption": [ + "(f) ResNet-50 CIFAR10" + ], + "image_footnote": [], + "bbox": [ + 366, + 233, + 514, + 337 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e5c47c8658bab4c2e4e07fda183dc40cb52d0a977494cb3c695f25af9d3165f1.jpg", + "image_caption": [ + "(g) VGG-19 CIFAR100", + "Figure 8: The convergence curves with different learning target complexities. (a-d): Decreasing target complexities for FCNNs. The deeper layers accelerate more than the shallower ones when high-frequency components are removed. (e-h): For CNNs, the deepest layers (i.e., Stage 4 / Layer 17) learn faster on CIFAR10 than on CIFAR100 while the other layers do not change much." + ], + "image_footnote": [], + "bbox": [ + 517, + 233, + 666, + 335 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fd22289d46f02ff40c21c1a9f04c20aa6999d25ce28ec1f07b215a4f637c2573.jpg", + "image_caption": [ + "(h) VGG-19 CIFAR10" + ], + "image_footnote": [], + "bbox": [ + 669, + 233, + 820, + 335 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 PRACTICAL SIGNIFICANCE", + "text_level": 1, + "bbox": [ + 171, + 450, + 429, + 465 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Up to now, we have been analyzing the layer convergence bias from a theoretical perspective. This section discusses its practical use to drive the development of DNN architecture design, and a new explanation for the acceleration effect of transfer learning with the help of layer convergence bias.", + "bbox": [ + 169, + 483, + 823, + 527 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.1 DNN ARCHITECTURE DESIGN", + "text_level": 1, + "bbox": [ + 171, + 546, + 426, + 559 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Modern CNN architectures (He et al., 2016) usually contain layers from narrow to wide (e.g., 64 channels of the first layer to 2048 channels of the last layer). From the perspective of computational complexity, the narrower shallower layers make the corresponding large feature maps less computation-consuming. Considering the layer convergence bias, deeper layers with larger capacities are also beneficial for the corresponding high-frequencies to be learned easier. Although this is a common design for CNNs, Transformers (Dosovitskiy et al., 2020) usually apply the same architecture for all encoders. For a vision Transformer with 12 encoders, we use encoders with width $2/4/8$ to construct three variants. The variants only differ in the arrangement of different encoders, we use $W$ to denote the widths, and $N$ to denote the number of each kind of encoders. The configures are summarized below:", + "bbox": [ + 169, + 573, + 624, + 768 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- deeper encoders wider: $W = (2,4,8)$ , $N = (6,3,3)$", + "- vanilla architecture: $W = (4, 4, 4)$ , $N = (4, 4, 4)$", + "- deeper encoders narrower: $W = (8,4,2)$ , $N = (3,3,6)$" + ], + "bbox": [ + 215, + 781, + 601, + 840 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/67c9db64e8a19b92a2e2742a4ec2cc2a9bfd477ef8ce171a951b58f590686f34.jpg", + "image_caption": [ + "Figure 9: Performance of three variants of ViTs on ImageNet." + ], + "image_footnote": [], + "bbox": [ + 635, + 590, + 823, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Fig. 9 shows their performances, with the best accuracy of $80.75\\%$ , $78.88\\%$ , and $75.75\\%$ respectively. We find that with the same number of parameters, putting the wider layers deeper results in higher training performance. This finding may serve as an effective way to improve the model capacity. The causal connection between layer complexity distribution and model performance is discussed in Appendix A.6. And layer convergence bias for ViT is analyzed in Appendix A.7.", + "bbox": [ + 169, + 853, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.2 ACCELERATION EFFECT OF TRANSFER LEARNING", + "text_level": 1, + "bbox": [ + 171, + 104, + 565, + 118 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Transfer learning (fine-tuning with the pre-trained models) is a widely-used technique that can accelerate the model convergence (Shao et al., 2018b; a; Liang & Zheng, 2020). We show the layer convergence curves w/o transfer learning on the Flowers dataset (Nilsback & Zisserman, 2006). When training from scratch (Fig. 10 (a)), the shallower layers converge faster so that the deeper layers can extract semantic features based on basic features. Local minima of Stage 4 is sharp in this case. However, with transfer learning (Fig. 10 (b)), deeper layers can directly be built on the pre-trained basic features. The Stage 4 shows a much higher convergence rate among all layers, its loss landscape also becomes flatter. Two observations that are not consistent with layer convergence bias are summarized in the following: 1) the pre-trained shallower layers are nearly optimal, so they don't present fast convergence in transfer learning; 2) although the pre-trained deeper layers are not as optimal as the shallower layers do, their loss landscapes are much flatter than training from scratch, which makes them converge much faster.", + "bbox": [ + 169, + 130, + 826, + 297 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/08d76005d97ea52de82dddccf093f0e6f27f636217cbfa31a4e830f349ae5720.jpg", + "image_caption": [ + "(a) Train from scratch" + ], + "image_footnote": [], + "bbox": [ + 173, + 310, + 336, + 401 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/485b491ea971b145d122a9e124cb7177b0d69285d2adc5eed481bf5316c090b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 311, + 495, + 398 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c6ae2dceb0c9621794a2f3e74785b0e7801adbb91d909ca664c3f582778281c3.jpg", + "image_caption": [ + "Figure 10: Effects of transfer learning on the training process. Left (a,b): The layer convergence process of ResNet-50. Right (a,b): The loss landscapes of Stage 4 w/o transfer learning." + ], + "image_footnote": [], + "bbox": [ + 504, + 311, + 666, + 401 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/44c1782dcb66f53943d0cf57fa8f4482379eff83247c299544dbcd9351ae8b52.jpg", + "image_caption": [ + "(b) Fine-tuning" + ], + "image_footnote": [], + "bbox": [ + 669, + 311, + 821, + 397 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 489, + 346, + 503 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "DNNs with gradient-based training show great potential to fit targets with arbitrary complexities (Hornik et al., 1989; Leshno et al., 1993), given sufficient width. With the advances in the last decade to verify the capability of the depth of universal approximators (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Lu et al., 2017), practitioners tried to reduce the width of neural networks by adding more layers (Simonyan & Zisserman, 2014; He et al., 2016; Huang et al., 2017). We are also inspired by research on local properties (sharpness/flatness) of loss functions at minima (Keskar et al., 2017; Li et al., 2018) and relationship between convergence rate and generalization (Hardt et al., 2016). Furthermore, LARS optimizer (You et al., 2017) shares some valuable insights on layer convergence, which are discussed in Appendix A.8. In practice, the idea of layer convergence bias had been intuitively applied to accelerate DNN training (Huang et al., 2016; Brock et al., 2017) and mitigating catastrophic forgetting (Ramasesh et al., 2020). The arrangement schemes of CNN/Transformer blocks were explored by (Liu et al., 2022b;a).", + "bbox": [ + 169, + 521, + 826, + 689 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "8 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 710, + 318, + 724 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we empirically studied the phenomenon that the shallower layers of DNNs tend to converge faster than the deeper layers, called layer convergence bias. This phenomenon is a natural preference in the process of DNN training: the shallower layers are responsible for extracting low-level features which are more evenly distributed and easier to learn, while deeper layers refine these features to do specific tasks. This makes the loss landscapes for shallower layers flatter than the landscapes for deeper layers, making shallower layers converge faster. In addition, this work established a connection between layers and learned frequencies. By showing deeper layers tend to fit the high-frequency components in the target function, we can understand the layer convergence bias from another perspective. We finally took DNN architecture design and transfer learning as two examples to show how theoretical findings in this work can shed light on the practical applications of deep learning. For progress to continue, a more in-depth understanding of the properties of neural networks is needed. We also hope that the layer convergence bias can inspire more practical improvements in the DNNs' architecture design and training schemes.", + "bbox": [ + 169, + 743, + 826, + 924 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 173, + 102, + 356, + 118 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "This work was supported by the Lustgarten Foundation for Pancreatic Cancer Research and the McGovern Foundation.", + "bbox": [ + 171, + 133, + 823, + 161 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 181, + 287, + 196 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016.", + "David Balduzzi, Marcus Frean, Lennox Leary, JP Lewis, Kurt Wan-Duo Ma, and Brian McWilliams. The shattered gradients problem: If resnets are the answer, then what is the question? In International Conference on Machine Learning, pp. 342-350. PMLR, 2017.", + "Andrew Brock, Theodore Lim, James Millar Ritchie, and Nicholas J Weston. Freezeout: Accelerate training by progressively freezing layers. In NIPS 2017 Workshop on Optimization: 10th NIPS Workshop on Optimization for Machine Learning, 2017.", + "Olivier Delalleau and Yoshua Bengio. Shallow vs. deep sum-product networks. Advances in neural information processing systems, 24, 2011.", + "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020.", + "Ronen Eldan and Ohad Shamir. The power of depth for feedforward neural networks. In Conference on learning theory, pp. 907-940. PMLR, 2016.", + "Alex Graves, Abdel-rahman Mohamed, and Geoffrey Hinton. Speech recognition with deep recurrent neural networks. In 2013 IEEE international conference on acoustics, speech and signal processing, pp. 6645-6649. IEEE, 2013.", + "Moritz Hardt, Ben Recht, and Yoram Singer. Train faster, generalize better: Stability of stochastic gradient descent. In International conference on machine learning, pp. 1225-1234. PMLR, 2016.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.", + "Sepp Hochreiter. Untersuchungen zu dynamischen neuronalen netzen. Diploma, Technische Universität München, 91(1), 1991.", + "Sepp Hochreiter, Yoshua Bengio, Paolo Frasconi, Jürgen Schmidhuber, et al. Gradient flow in recurrent nets: the difficulty of learning long-term dependencies, 2001.", + "Kurt Hornik, Maxwell Stinchcombe, and Halbert White. Multilayer feedforward networks are universal approximators. Neural networks, 2(5):359-366, 1989.", + "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pp. 646-661. Springer, 2016.", + "Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4700-4708, 2017.", + "Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pp. 448-456. PMLR, 2015.", + "John Jumper, Richard Evans, Alexander Pritzel, Tim Green, Michael Figurnov, Olaf Ronneberger, Kathryn Tunyasuvunakool, Russ Bates, Augustin Žídek, Anna Potapenko, et al. Highly accurate protein structure prediction with alphafold. Nature, 596(7873):583-589, 2021." + ], + "bbox": [ + 171, + 205, + 825, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Nitish Shirish Keskar, Jorge Nocedal, Ping Tak Peter Tang, Dheevatsa Mudigere, and Mikhail Smelyanskiy. On large-batch training for deep learning: Generalization gap and sharp minima. In 5th International Conference on Learning Representations, ICLR 2017, 2017.", + "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60(6):84-90, 2017.", + "Moshe Leshno, Vladimir Ya Lin, Allan Pinkus, and Shimon Schocken. Multilayer feedforward networks with a nonpolynomial activation function can approximate any function. Neural networks, 6(6):861-867, 1993.", + "Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. Advances in neural information processing systems, 31, 2018.", + "Gaobo Liang and Lixin Zheng. A transfer learning method with deep residual network for pediatric pneumonia diagnosis. Computer methods and programs in biomedicine, 187:104964, 2020.", + "Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3202-3211, 2022a.", + "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022b.", + "Zhou Lu, Hongming Pu, Feicheng Wang, Zhiqiang Hu, and Liwei Wang. The expressive power of neural networks: A view from the width. Advances in neural information processing systems, 30, 2017.", + "Yuri Nesterov. Introductory lectures on convex optimization: A basic course, volume 87. Springer Science & Business Media, 2003.", + "Michael A Nielsen. Neural networks and deep learning, volume 25. Determination press San Francisco, CA, USA, 2015.", + "M-E Nilsback and Andrew Zisserman. A visual vocabulary for flower classification. In CVPR, volume 2, pp. 1447-1454. IEEE, 2006.", + "Zhou Pan, Feng Jiashi, Ma Chao, Xiong Caiming, Chu Hong Hoi Steven, and E Weinan. Towards theoretically understanding why sgd generalizes better than adam in deep learning. In Advances in Neural Information Processing Systems, pp. 21285-21296, 2020.", + "Maithra Raghu, Justin Gilmer, Jason Yosinski, and Jascha Sohl-Dickstein. Svcca: Singular vector canonical correlation analysis for deep learning dynamics and interpretability. Advances in neural information processing systems, 30, 2017.", + "Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In International Conference on Machine Learning, pp. 5301-5310. PMLR, 2019.", + "Vinay V Ramasesh, Ethan Dyer, and Maithra Raghu. Anatomy of catastrophic forgetting: Hidden representations and task semantics. arXiv preprint arXiv:2007.07400, 2020.", + "Basri Ronen, David Jacobs, Yoni Kasten, and Shira Kritchman. The convergence rate of neural networks for learned functions of different frequencies. Advances in Neural Information Processing Systems, 32, 2019.", + "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. IJCV, 115(3):211-252, 2015.", + "Shibani Santurkar, Dimitris Tsipras, Andrew Ilyas, and Aleksander Madry. How does batch normalization help optimization? Advances in neural information processing systems, 31, 2018." + ], + "bbox": [ + 171, + 102, + 825, + 925 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kun Shao, Yuanheng Zhu, and Dongbin Zhao. Starcraft micromanagement with reinforcement learning and curriculum transfer learning. IEEE Transactions on Emerging Topics in Computational Intelligence, 3(1):73-84, 2018a.", + "Siyu Shao, Stephen McAleer, Ruqiang Yan, and Pierre Baldi. Highly accurate machine fault diagnosis using deep transfer learning. IEEE Transactions on Industrial Informatics, 15(4):2446-2455, 2018b.", + "David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al. Mastering the game of go with deep neural networks and tree search. nature, 529(7587):484-489, 2016.", + "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014.", + "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. Sequence to sequence learning with neural networks. Advances in neural information processing systems, 27, 2014.", + "Pengxiang Wu, Songzhu Zheng, Mayank Goswami, Dimitris Metaxas, and Chao Chen. A topological filter for learning with label noise. Advances in neural information processing systems, 33: 21382-21393, 2020.", + "Yuxin Wu and Kaiming He. Group normalization. In Proceedings of the European conference on computer vision (ECCV), pp. 3-19, 2018.", + "Zhi-Qin John Xu, Yaoyu Zhang, Tao Luo, Yanyang Xiao, and Zheng Ma. Frequency principle: Fourier analysis sheds light on deep neural networks. arXiv preprint arXiv:1901.06523, 2019.", + "Zhiqin John Xu and Hanxu Zhou. Deep frequency principle towards understanding why deeper learning is faster. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 10541-10550, 2021.", + "Zhang Yi, Pheng-Ann Heng, and Ada Wai-Chee Fu. Estimate of exponential convergence rate and exponential stability for neural networks. IEEE Transactions on Neural Networks, 10(6):1487-1493, 1999.", + "Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. How transferable are features in deep neural networks? Advances in neural information processing systems, 27, 2014.", + "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017.", + "Matthew D Zeiler and Rob Fergus. Visualizing and understanding convolutional networks. In European conference on computer vision, pp. 818-833. Springer, 2014." + ], + "bbox": [ + 171, + 102, + 825, + 667 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A APPENDIX", + "text_level": 1, + "bbox": [ + 171, + 102, + 299, + 118 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.1 EXPERIMENTAL SETTINGS", + "text_level": 1, + "bbox": [ + 171, + 133, + 403, + 148 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Datasets. The synthetic and real datasets are summarized in the Tab. 1", + "bbox": [ + 171, + 159, + 635, + 174 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/f74c448402e96f1f33bd096b683b53537d61ab7380df54455fc97500f6a2e325.jpg", + "table_caption": [ + "Table 1: Descriptions and statistics of the datasets used in this work." + ], + "table_footnote": [], + "table_body": "
DatasetSize (train/test)ClassesData description
Sine regression5000/5000n/aFunction with four sine components, domain [-2,2]
ImageNet1,281,167/50,0001000Photos of common objects
CIFAR-1050,000/10,00010Photos of common objects, image sizes 32 × 32
CIFAR-10050,000/10,000100Photos of common objects, image sizes 32 × 32
Flowers1,088/27217Find-grained photos of flowers
FGVC Aircraft6,667/3,333100Find-grained photos of aircrafts
Caltech-1013,060/6,084102Photos/paintings/sketches of common objects
CUB-2005,994/5,794200Find-grained photos of birds
DomainNet painting50,416/21,850345Oil Paintings, murals, drawings, tattoos
", + "bbox": [ + 220, + 212, + 777, + 330 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Network Architectures. The FCNNs, CNNs, and Vision Transformers are summarized in the Tab. 2.", + "bbox": [ + 169, + 344, + 823, + 371 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/48a7442661b84f2092086f4e1e109ad4ad97d550a61b13a9781720f6bf1b99fe.jpg", + "table_caption": [ + "Table 2: Complexities and architectures of DNNs used in this work." + ], + "table_footnote": [], + "table_body": "
Model#ParametersMult-addsArchitecture description
FCNN (no res)2k10k4 fc layers [1-32-32-32-1]
FCNN (res)132k390kfc [1-128] → 4 res-blocks [128-128-128] → fc [128-1]
ResNet-5025.6M4.1Gconv → 4 stages with [3,4,6,3] res-blocks → fc
VGG-19143.7M19.8G16 conv layers, 3 fc layers
ViT9.9M77.2M12 Transformer encoder blocks (basic width 256), 1 fc layer
", + "bbox": [ + 218, + 409, + 779, + 487 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Training Hyper-parameters. For the regression task, we train FCNNs with SGD optimizers for 300 epochs. The initial learning rate is 0.1, with a learning rate decay (to 0.01) at the 150th epoch. The batch size is 128, no weight decay ( $L_{2}$ regularization) is conducted.", + "bbox": [ + 169, + 503, + 823, + 546 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For the ImageNet classification task with CNNs, we train ResNet-50 and VGG-19 for 120 epochs with SGD optimizers. The initial learning rate is 0.1, with learning rate decays at the 50th and 100th epoch to 0.01 and 0.001, respectively. The batch size is 256, the input image size is $224^2$ , and the weight decay coefficient is $10^{-4}$ .", + "bbox": [ + 169, + 551, + 823, + 608 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For Vision Transformers on ImageNet dataset, we train them for 200 epochs with Adam optimizers. The peak learning rate is set to 0.0003. We use linear learning rate warm-up for 10,000 iterations, and a subsequent cosine learning rate decay. The batch size is 256, the input image size is $224^2$ , and the weight decay coefficient is $10^{-4}$ .", + "bbox": [ + 169, + 614, + 823, + 672 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For CNN image classification on other datasets, we train models for 100 epochs with SGD optimizers. Initial learning rate of 0.01 and cosine learning rate scheduler are applied. The batch size is 128, the input image sizes are $32^2$ (for CIFAR) and $224^2$ (for Flowers, Aircraft, Caltech, CUB, and DomainNet), and the weight decay coefficient is $10^{-4}$ .", + "bbox": [ + 169, + 678, + 826, + 736 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.2 CONVERGENCE MEASUREMENT USING WEIGHT VARIATION", + "text_level": 1, + "bbox": [ + 171, + 103, + 640, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In Section 2, we have introduced the convergence measurement in this work. This measurement is simple and straightforward, and it can show how each layer in a DNN converges during the whole training process (Fig. 1 for fully connected networks and Fig. 2 for CNNs) by examining the distance between the training parameters and the converged parameters. However, it has not been verified whether calculating the parameter distance variation to the convergence point between two adjacent epochs is necessary. After all, the measurement highly depends on the convergence point, which can only be obtained after the whole training process.", + "bbox": [ + 169, + 128, + 826, + 228 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We come up with a simplified convergence measurement. This method uses weight variation as a metric to examine how fast a layer is learning, and whether this layer reaches a state of convergence. If a layer is learning actively, it is reasonable that its weights vary drastically during training. For the converged layers, their weights usually keep stable. So we use $\\|\\theta_l^{(t_k)} - \\theta_l^{(t_{k+1})}\\|_2 / \\|\\theta_l^{(t_k)}\\|_2$ , the normalized weight variation of layer $l$ during epoch $k$ and $k + 1$ , to illustrate how actively it is learning.", + "bbox": [ + 169, + 234, + 826, + 323 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/c14d3d7d8d8b5f86c9a0418fdbfe649e1f98f972535305d86b6a15f72d8e7cea.jpg", + "image_caption": [ + "(a) ResNet-50 Val Acc $73.24\\%$" + ], + "image_footnote": [], + "bbox": [ + 238, + 333, + 511, + 443 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/04c8d0fa6a2eff40a964496ab70bbe2e9626de759594d4b1b1a8fe629a41f40d.jpg", + "image_caption": [ + "(b) VGG-19 Val Acc $71.89\\%$", + "Figure 11: The convergence processes of ResNet-50 and VGG-19 on ImageNet. The results are illustrated with weight variations. The learning rate decays at epoch 50 and epoch 100." + ], + "image_footnote": [], + "bbox": [ + 516, + 333, + 759, + 443 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The results of ResNet-50 and VGG-19 training process on ImageNet are shown in Fig. 11. From this plot, we can see that after learning rate decays at epoch 50 and 100, the weight variations drop evidently. However, the weight variations of each layer do not show apparent decreasing trend when the learning rate keeps stable, which indicates that the training of DNNs do not converge as usual convex optimization problems do (e.g., linear programming). Therefore, it is hard for us to compare the convergence rates of different layers by observing their convergence curves. We cannot find a clear clue like what was given by the convergence measurement in Section 2 to get the layer convergence bias. All in all, we can safely claim that, it is crucial for the convergence metric to consider direction information to measure how fast different layers are learning towards their convergence points. Our previous convergence measurement really needs to examine convergence by calculating the parameter distance between the current point to convergence point.", + "bbox": [ + 169, + 513, + 826, + 670 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.3 FACTORS AFFECTING LAYER CONVERGENCE BIAS", + "text_level": 1, + "bbox": [ + 171, + 103, + 571, + 118 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In Section 5, we have shown that the complexity of the datasets is an important factor affecting layer convergence bias. When the fitting target function is complex enough with both low and high frequency components, the shallower layers learn the low low-frequency components while the deeper layers learn the high-frequency components. Here we use the FCNNs with residual connections to show whether some other important factors would affect the layer convergence bias. All following experiments are conducted on the same regression task in Section 3.", + "bbox": [ + 169, + 131, + 823, + 215 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Model Depth. The default architecture used in previous experiments is the four-blocks FCNN, here we try adding more blocks to make the network deeper and see what change will happen. As shown in Fig. 12, all the networks show layer convergence bias. With more and more res-blocks, the overall convergence of the network becomes slightly faster.", + "bbox": [ + 169, + 220, + 826, + 279 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/cc2aff774b33d159dddbe57297a41ea623c4b6e1c8238f3210ae1746b98d667a.jpg", + "image_caption": [ + "(a) 4 Res-Blocks" + ], + "image_footnote": [], + "bbox": [ + 173, + 292, + 357, + 396 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/453f84c65a6baabe53e224356ca74b0c776c7f0a8c00e38898063f8c269a97cc.jpg", + "image_caption": [ + "(b) 8 Res-Blocks" + ], + "image_footnote": [], + "bbox": [ + 364, + 294, + 514, + 396 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/6c70da1522273b0ccd7717aad2cbed9966f2fe1ea3415c9334212634b8ac9363.jpg", + "image_caption": [ + "(c) 12 Res-Blocks", + "Figure 12: The convergence process of FCNNs with different number of res-blocks." + ], + "image_footnote": [], + "bbox": [ + 519, + 294, + 668, + 396 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/8f5bd8c36a3e1c63d8619e1feeea44664d4633aa087c1d2b2c8dc2215dda3798.jpg", + "image_caption": [ + "(d) 16 Res-Blocks" + ], + "image_footnote": [], + "bbox": [ + 674, + 294, + 821, + 396 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Learning Rate. The results with different learning rates are shown in Fig. 13. When the learning rate gets smaller, layer convergence bias becomes weaker. This is because the gradient predictiveness w.r.t. parameters of all layers get close to 1 (see Fig. 3 (b,right) for the predictiveness with the learning rate of 0.01). In this case, a layer is less influenced by the updates of parameters in other layers, only the gradient predictiveness w.r.t. data matters for the convergence rate. In addition, smaller learning rates are beneficial for the deeper layers to converge because of their sharper minima.", + "bbox": [ + 169, + 458, + 823, + 554 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/9e940352ce29aa0753b43623966f621e44b442ff5f45f01ca7ec52a8799cd859.jpg", + "image_caption": [ + "(a) $\\mathrm{LR} = 0.01$", + "Figure 13: The convergence process of FCNNs with different learning rates." + ], + "image_footnote": [], + "bbox": [ + 254, + 570, + 433, + 670 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/a68e1ef770b3c062097e63279ea52fc7603fb3b14fdd2f5587008ed112684517.jpg", + "image_caption": [ + "(b) LR=0.03" + ], + "image_footnote": [], + "bbox": [ + 439, + 570, + 586, + 670 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/d1508a6d1aeb53f993f1f0855f04bf7469b186772ab04214f180caa548e8eb72.jpg", + "image_caption": [ + "(c) $\\mathrm{LR} = 0.1$" + ], + "image_footnote": [], + "bbox": [ + 594, + 570, + 741, + 670 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Weight Decay. The experiments with FCNNs in previous sections are conducted without weight decay. It is interesting to investigate the sensitivity of the layer-wise model convergence with different weight decay strengths. The results are shown in Fig. 14. We can see that when the weight decay becomes stronger, the residual blocks converge slower in a more and more similar convergence rate. We conjecture the reason is that weight decay dominates the total loss when its coefficient is large. In this way, the layer parameters with similar initialization scales tend to converge in similar speed toward zero. Because the residual blocks have identical architectures, they share the same initial parameter distribution, and converge in the same speed when weight decay is strong.", + "bbox": [ + 169, + 734, + 823, + 848 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Optimizer. In Section 4, we have discussed the mechanism behind layer convergence bias. The flatter/sharper minimizers of different layers make SGD learn at different speeds. This is because SGD is more good at finding flatter minimizers (Pan et al., 2020). In Fig. 15, we compare SGD with three adaptive optimizers: Adagrad, RMSprop, and Adam. It is evident that with adaptive optimizers, layer convergence bias does not hold anymore. We conjecture the reason behind this", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/8c910a09f36ed47a776ac4c222b9f922099d411872ba29c1a8ddad5b2028fd69.jpg", + "image_caption": [ + "(a) $\\mathrm{WD} = 1\\mathrm{e} - 7$" + ], + "image_footnote": [], + "bbox": [ + 174, + 99, + 356, + 200 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/1be2a755db41c189bba26cb171501af3ca144f7d7cb4a4fb852009ec3e43e59d.jpg", + "image_caption": [ + "(b) $\\mathrm{WD} = 1\\mathrm{e} - 6$" + ], + "image_footnote": [], + "bbox": [ + 356, + 101, + 509, + 200 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/48c54aefd14b8aa698fa80c269dec5b09730df1cb1ce2b04444a89aa91c71723.jpg", + "image_caption": [ + "(c) $\\mathrm{WD} = 1\\mathrm{e} - 5$" + ], + "image_footnote": [], + "bbox": [ + 511, + 101, + 665, + 200 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/c9b32c2ce60e956db6ac87943ab3ce0b46075be910f394f49232fabafa4b331e.jpg", + "image_caption": [ + "(d) $\\mathrm{WD} = 1\\mathrm{e} - 4$" + ], + "image_footnote": [], + "bbox": [ + 666, + 101, + 821, + 200 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7426eff0147450ebff0521730b61325b7c4e73b9c2627fbe66d33214ec987820.jpg", + "image_caption": [ + "Figure 14: The convergence process of FCNNs with different weight decay strengths.", + "(a) SGD", + "Figure 15: The convergence process of FCNNs with different optimizers." + ], + "image_footnote": [], + "bbox": [ + 173, + 257, + 354, + 357 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/e135b9bc895215ddb16dfe5463737e3ca474af3384917dd410758b939cbce60c.jpg", + "image_caption": [ + "(b) Adagrad" + ], + "image_footnote": [], + "bbox": [ + 356, + 258, + 509, + 357 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/aa8a87fcac06a0fd497ed4e36c4b736c2fdf0976d37c37db1d7917240dd929ff.jpg", + "image_caption": [ + "(c) RMSprop" + ], + "image_footnote": [], + "bbox": [ + 511, + 258, + 665, + 357 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/a343fbd451eb81c933889a4dc2ceebfd43a11cf133804cb1fa6cc26406b9771c.jpg", + "image_caption": [ + "(d) Adam" + ], + "image_footnote": [], + "bbox": [ + 666, + 258, + 821, + 357 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "is that the adaptive optimizers heuristically assign different learning rates for different parameters, making their optimization hardly predictable.", + "bbox": [ + 169, + 431, + 823, + 460 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Normalization Methods. Like residual connection, batch normalization Ioffe & Szegedy (2015) is also a common design in modern DNN architectures. As discussed in previous literature, normalization in the neural networks helps to make the layer inputs more stable and make the loss landscapes smoother, thus accelerates the model training Santurkar et al. (2018). In Section 3 and Section 4, we mainly use the FCNNs without normalization to verify and explore the layer convergence bias. Here we investigate how the normalization methods (i.e., batch normalization, layer normalization Ba et al. (2016), and group normalization Wu & He (2018)) help the convergence, and whether the shallower layers still converge faster in these cases. As shown in Fig. 16, all layers converge faster when adding batch normalization to them. Particularly, \"Res-Block 1\" accelerates the most and reach a similar convergence rate as \"Layer 1\". The layer convergence bias also holds for batch normalization. For layer normalization and group normalization, the models show a significantly faster convergence rate than the model using batch normalization. All layers show effective convergence at an early stage of training (i.e., the first five epochs). In these two cases, different layers have similar convergence rates, thus no evident layer convergence bias emerges.", + "bbox": [ + 169, + 465, + 826, + 661 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/c18964d8ef4698f80162ae831dd96c2e2a854ed39750a3c40037c9ee297a46f6.jpg", + "image_caption": [ + "(a) Without normalization", + "Figure 16: The convergence process of FCNNs with different normalization methods. When using group normalization, we set the group number to 8." + ], + "image_footnote": [], + "bbox": [ + 173, + 674, + 356, + 773 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5cc61b139c36fdbd843318044c92bc9f08b4928b9a7d11f8640c8bc6d3bcbff4.jpg", + "image_caption": [ + "(b) Batch normalization" + ], + "image_footnote": [], + "bbox": [ + 357, + 674, + 511, + 773 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/048efec3e0d90b64ad0f8eb197f1a9266058a077430180c8f05ad823f565a850.jpg", + "image_caption": [ + "(c) Layer normalization" + ], + "image_footnote": [], + "bbox": [ + 511, + 674, + 665, + 773 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/bec197c0b7083842ddd26fb8f7bc3bcf15f14334ad8cae797d0304729bab6ec6.jpg", + "image_caption": [ + "(d) Group normalization" + ], + "image_footnote": [], + "bbox": [ + 666, + 674, + 821, + 773 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.4 RESULTS ON HARDER DATASETS", + "text_level": 1, + "bbox": [ + 171, + 104, + 449, + 118 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For verifying the layer convergence bias on more datasets, we show more convergence results on four harder image classification datasets (see Fig. 17). Most of the classes in these datasets only have $< 100$ samples, making them harder to learn. Note that the experiments are conducted with the learning rate of 0.01 (learning rate of 0.1 failed in some cases because these datasets have too many classes but not sufficient samples, leading to non-decreasing loss), some deeper layers have quite similar convergence rates because of the small learning rate. But roughly speaking, layer convergence bias still holds for these datasets.", + "bbox": [ + 169, + 131, + 826, + 229 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/2d3d1b6de4d187a9f4de51739ebda2bd2438b98ef0df07a2ec2e90e179fb1ac2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 247, + 356, + 361 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/5a38646221862fb698cd2ba10f02054544c4c8e8783063dadc260832cda985eb.jpg", + "image_caption": [ + "(a) ResNet-50" + ], + "image_footnote": [], + "bbox": [ + 362, + 247, + 509, + 359 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/1b8738cead3470357291c6178fcedc909a1b18863b2b10d24230d8adfce0843f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 247, + 665, + 359 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/21cdb294d3de63caa9d5e62609dd7916741247e216ee9e4b576d1a306bf34aab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 673, + 247, + 821, + 359 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/8c7fa7738b52dd3f1b0ec00f850490002e879985c7d932e599678ea0b1ac2450.jpg", + "image_caption": [ + "Figure 17: The convergence process of CNNs on four image classification tasks." + ], + "image_footnote": [], + "bbox": [ + 176, + 383, + 357, + 494 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/1c7ee3bcce9f991f5e31e8c2bc232673e8ef3c56dc6ef8260e33833b5a10e435.jpg", + "image_caption": [ + "(b) VGG-19" + ], + "image_footnote": [], + "bbox": [ + 367, + 385, + 511, + 494 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/d71898cc82aa5829b15c3ade625fc9ab56b6a1a49a1491a20217c627531e45f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 385, + 663, + 494 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/fc3727fa8b46bdfc62303d8fa3e601e021b217453e63b6b086a1c6c8dd903884.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 385, + 821, + 496 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.5 REPEATABILITY OF THE VISUALIZATIONS", + "text_level": 1, + "bbox": [ + 171, + 574, + 509, + 589 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Do different ImageNet trained models produce dramatically different loss landscapes? We plot the loss landscapes of different models with different random seeds in Fig. 18, 19. Quite similar patterns of the landscapes for different layers can be observed on both ResNet and VGG with different random seeds.", + "bbox": [ + 169, + 602, + 826, + 659 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cdc4602f4900831a817c6ba0b16166988b92a5ed27d06b262133d493de8cab68.jpg", + "image_caption": [ + "(a) Stage 1 Seed 1" + ], + "image_footnote": [], + "bbox": [ + 176, + 675, + 331, + 762 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/abd8bef9c0bde4ccf521ae8780f8f963813e3757ad1011194da9d4b3b7754b33.jpg", + "image_caption": [ + "(b) Stage 2 Seed 1" + ], + "image_footnote": [], + "bbox": [ + 341, + 676, + 493, + 762 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ba6b51581f09344b7a5294ad231451ffff407aae50122918db4380b0c9eed752.jpg", + "image_caption": [ + "(c) Stage 3 Seed 1" + ], + "image_footnote": [], + "bbox": [ + 504, + 676, + 658, + 762 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/71814b1519c57e68b5914a4e19460521f39b1313e1a78c9a324836a55c1fb1c4.jpg", + "image_caption": [ + "(c) Stage 4 Seed 1" + ], + "image_footnote": [], + "bbox": [ + 663, + 676, + 818, + 762 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/5a8c2c25bc5f0158bd56ecd9578c8bafb193298aa7b548989b51f1497106dd4c.jpg", + "image_caption": [ + "(d) Stage 1 Seed 2" + ], + "image_footnote": [], + "bbox": [ + 176, + 781, + 331, + 869 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0f04e3025b3c16525aa272aafd25438907ada7c2fe5faf9090434da866ae0c5c.jpg", + "image_caption": [ + "(e) Stage 2 Seed 2" + ], + "image_footnote": [], + "bbox": [ + 339, + 781, + 493, + 869 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0c3aa08d1725a94bfc6c869b66063bbc9bc36ecc11014c1fa3bb55756c712189.jpg", + "image_caption": [ + "(f) Stage 3 Seed 2" + ], + "image_footnote": [], + "bbox": [ + 503, + 781, + 656, + 869 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/80f071771fa09a7bc8bd98ee2b6b0dd524a8816e603accd1cbe259d50f42514c.jpg", + "image_caption": [ + "(g) Stage 4 Seed 2", + "Figure 18: The loss landscapes of different layers of ResNet-50." + ], + "image_footnote": [], + "bbox": [ + 663, + 781, + 818, + 869 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/a1f24b60ab084ff5e91c86c22102eb1c5984cd7693d5367bd8e88c97f0f9b9c6.jpg", + "image_caption": [ + "(a) Layer 1 Seed 1" + ], + "image_footnote": [], + "bbox": [ + 176, + 102, + 333, + 190 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/2826defe3fa06f37e46b41cf27059aad6d783ff328c1d5a830c6867bb075a758.jpg", + "image_caption": [ + "(b) Layer 5 Seed 1" + ], + "image_footnote": [], + "bbox": [ + 339, + 102, + 496, + 190 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/96d8380a41f38818184333786a210af8d272d30b5769c7160da01a5b1ac06baf.jpg", + "image_caption": [ + "(c) Stage 9 Seed 1" + ], + "image_footnote": [], + "bbox": [ + 503, + 102, + 658, + 190 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/f0073c99ef1020a065dc77f78c5fbf0c9f3a31bae17fac5292db87edaaba2b55.jpg", + "image_caption": [ + "(d) Stage 13 Seed 1" + ], + "image_footnote": [], + "bbox": [ + 665, + 102, + 821, + 190 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/5d78a9bcedc51405b94b36804c97cfef494769d5001591f09d66b069e0b0959e.jpg", + "image_caption": [ + "(e) Layer 1 Seed 2" + ], + "image_footnote": [], + "bbox": [ + 176, + 212, + 333, + 297 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/8ae304f21bc0a477f488c981a76b210feeab8251686500b20c9109678f4c0bc6.jpg", + "image_caption": [ + "(f) Layer 5 Seed 2" + ], + "image_footnote": [], + "bbox": [ + 341, + 212, + 496, + 297 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/aac8258375312fda109c1b7d60b5900e75f4d2163d868f0b185381d13a4693ff.jpg", + "image_caption": [ + "(g) Stage 9 Seed 2", + "Figure 19: The loss landscapes of different layers of VGG-19." + ], + "image_footnote": [], + "bbox": [ + 504, + 212, + 658, + 297 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/1019864b6bee7ce9126cd66adff4dc0edca7f8e353221128bf2ef43d992d1dd4.jpg", + "image_caption": [ + "(h) Stage 13 Seed 2" + ], + "image_footnote": [], + "bbox": [ + 665, + 212, + 821, + 297 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.6 MODELS OBEYING LAYER CONVERGENCE BIAS PERFORM BETTER", + "text_level": 1, + "bbox": [ + 171, + 372, + 686, + 386 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In Section 4 and Section 5, it is discussed that layer convergence bias indicates that the shallower layers are learning low-level features (or low-frequency components of the target function). It is reasonable learning low-level features first have greater potential to reach good model performance, since the model can establish its high-level features based on relatively stable low-level feature spaces.", + "bbox": [ + 169, + 401, + 823, + 470 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To examine whether the fast establishment of low-level features benefits model performance, we train four different FCNN models with the same amount of parameters, but different architectures, to fit the Sine target with four components. This experiment is based on a finding that a residual block with more layers in it tends to converge more slowly. We construct four FCNN models, each of them has four residual blocks (maybe in different sizes). The convergence processes are shown in Fig. 20. We can see that the blocks with the largest complexity always converge the most slowly. As the block with depth $= 4$ being placed shallower in the FCNN, the regression MSE loss goes higher. In other words, if a shallower layer converges slowly, the model gets poorer performance. This may due to the vulnerability of deeper layers. If they converge based on changing shallower layers, it is hard for them to learn good features based on their unstable inputs.", + "bbox": [ + 169, + 477, + 823, + 617 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/58ba3b7388ef6af39c999e96716c1e2975a05240b2b63efc7614553ec8c19502.jpg", + "image_caption": [ + "(a) Res-Blocks $= (4,1,1,1)$ Val loss 2.7e-4", + "Figure 20: The convergence process of FCNNs with different residual block sizes and their validation performance on the regression task. Each model has a four-layer residual block and three one-layer residual blocks (e.g., \"Res-Blocks=(4,1,1,1)\" means the first residual block has four layers, and the rest three blocks have only one layer)." + ], + "image_footnote": [], + "bbox": [ + 173, + 636, + 356, + 737 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/986a500e127a3b567a7d483321e402198416d167b8ac976335581d11af8cc7ec.jpg", + "image_caption": [ + "(b) Res-Blocks $= (1,4,1,1)$ Val loss 2.4e-4" + ], + "image_footnote": [], + "bbox": [ + 366, + 636, + 511, + 737 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/05f6f9bb1fbf28a7268b87985221b47d7564cc5546ffb4f70207192a9ffd9168.jpg", + "image_caption": [ + "(c) Res-Blocks $= (1,1,4,1)$ Val loss 1.8e-4" + ], + "image_footnote": [], + "bbox": [ + 521, + 636, + 666, + 737 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/212a7681039b14ab8973ec4b79d85bf91f44408c5bc8b952bc412e7c3ba6b5c7.jpg", + "image_caption": [ + "(d) Res-Blocks $= (1,1,1,4)$ Val loss 1.4e-4" + ], + "image_footnote": [], + "bbox": [ + 676, + 636, + 821, + 737 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The results can also be understood from another perspective. If the deeper block contains more parameters (with more fully connected layers in it), it would be helpful for this block to learn the corresponding high-frequency components of the target function. Therefore, the model can reach better performance. A similar observation is obtained in Section 6.1: when putting wider layers of the ViT deeper, the model can reach higher performance.", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.7 LAYER CONVERGENCE BIAS FOR VISION TRANSFORMERS", + "text_level": 1, + "bbox": [ + 171, + 104, + 625, + 118 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "As discussed in Section 6, ViT can benefit from distributing more parameters in the deeper layers. This result comes from one of our main findings about layer convergence bias: the deeper layers tend to learn high-frequency components of the target function, thus converge more slowly. So adding more parameters for the deeper layers is beneficial for these layers to learn the high-frequency components which are usually harder.", + "bbox": [ + 169, + 128, + 823, + 200 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "When making this claim, we do not verify the layer convergence bias for the ViT. The main difficulty for verifying layer convergence bias for ViTs is brought by its typical training scheme. ViT needs adaptive optimizers to train, otherwise it converges very slowly. However, adaptive optimizers change the learning rates of different parameters according to their optimization procedures. This leads to unfair convergence comparison between layers, thus affects the layer convergence bias, as shown in Fig 15. Therefore, we try both SGD and Adam optimizers for training ViTs on ImageNet, and see whether layer convergence bias holds in some cases. As shown in Fig. 21 (a), the ViT shows a roughly trend of layer convergence bias when optimizing with Adam, where the deepest \"Encoder Block 12\" converges the slowest. However, some other layers do not strictly obey layer convergence bias (e.g., the shallowest \"Patch Embedding\" does not learn fastest among all blocks). When optimizing with SGD, the ViT shows a good layer convergence bias. The results indicate that ViTs approximately share the same rules as FCNNs and CNNs, thus supports the discussions in Section 6.", + "bbox": [ + 169, + 205, + 826, + 387 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/0af94e7b19f8f6f3270ef8ff11d3b3b1c3f0212f5324ea3e4a2fb4ab6b730aba.jpg", + "image_caption": [ + "(a) ViT Adam" + ], + "image_footnote": [], + "bbox": [ + 271, + 398, + 509, + 523 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/f2ab7458058264a1c486e2c09681f0a30cecc2ec3babc56a0515bcaf96f8eb4d.jpg", + "image_caption": [ + "(b) ViT SGD", + "Figure 21: The convergence curves of ViTs on ImageNet with different optimizers. With Adam optimizer, the ViT does not obey the layer convergence bias strictly. While SGD can ensure relatively ideal faster convergence processes of shallower layers." + ], + "image_footnote": [], + "bbox": [ + 511, + 398, + 725, + 523 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.8 CONNECTION TO LARS OPTIMIZATION SCHEME", + "text_level": 1, + "bbox": [ + 171, + 625, + 558, + 638 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "One of the most important factors that affect the optimization procedure is the learning rate. In this work, it is shown that the shallower layers can learn effectively with large learning rates, but the deeper layers only learn fast after learning rate decays. Is there any connection between layers and its suitable learning rate?", + "bbox": [ + 169, + 648, + 823, + 707 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "LARS optimizer You et al. (2017) made a significant contribution to training DNNs with huge batch sizes and large learning rates. The key observation in the literature is that the weight-to-gradient ratio highly varies in different layers. If a layer has greater gradients and relatively smaller weights, it would be hard for it to converge due to the vigorous parameter update. So LARS considers the scale of the weights and its gradient norms in each layer and assigns a local learning rate for a layer to make it converge effectively and stably. For FCNNs in our work, its different hidden layers are initialized with the same scale due to their identical architecture, but the deeper layers usually have larger gradients. As a result, the larger gradients may make these layers struggle to converge. Similarly, the CNNs (i.e., ResNet-50 and VGG-19) have wider deeper layers. These layers have smaller initial parameters, so their gradients may lead to drastic weight variations if the learning rate is too large. In this way, we can understand why they cannot get close to their optimal points effectively at the early stage of training. It explains layer convergence bias from another perspective.", + "bbox": [ + 169, + 712, + 826, + 881 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_model.json b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_model.json new file mode 100644 index 0000000000000000000000000000000000000000..44ce3480418b687ee160016873658547e0929ce3 --- /dev/null +++ b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_model.json @@ -0,0 +1,4308 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.822, + 0.174 + ], + "angle": 0, + "content": "WHICH LAYER IS LEARNING FASTER? A SYSTEMATIC EXPLORATION OF LAYER-WISE CONVERGENCE RATE FOR DEEP NEURAL NETWORKS" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.195, + 0.522, + 0.21 + ], + "angle": 0, + "content": "Yixiong Chen\\(^{1}\\) Alan Yuille\\(^{2}\\) Zongwei Zhou\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.21, + 0.718, + 0.225 + ], + "angle": 0, + "content": "1The Chinese University of Hong Kong - Shenzhen 2Johns Hopkins University" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.225, + 0.812, + 0.239 + ], + "angle": 0, + "content": "yixiongchen@link.cuhk.edu.cn ayuille1@jhu.edu zzhou82@jh.edu" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.276, + 0.547, + 0.29 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.31, + 0.768, + 0.465 + ], + "angle": 0, + "content": "The deeply hierarchical structures enable deep neural networks (DNNs) to fit extremely complex target functions. However, the complex interaction between layers also makes the learning process of a particular layer poorly understood. This work demonstrates that the shallower layers of DNNs tend to converge faster than the deeper layers. We call this phenomenon Layer Convergence Bias. We also uncover the fundamental reason behind this phenomenon: Flatter local minima of shallower layers make their gradients more stable and predictive, allowing for faster training. Another surprising result is that the shallower layers tend to learn the low-frequency components of the target function, while the deeper layers usually learn the high-frequency components. It is consistent with the recent discovery that DNNs learn lower frequency objects faster." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.499, + 0.338, + 0.514 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.534, + 0.825, + 0.618 + ], + "angle": 0, + "content": "Over the last decade, breakthrough progress has been made by deep neural networks (DNNs) on a wide range of complicated tasks in computer vision (Krizhevsky et al., 2017), natural language processing (Sutskever et al., 2014), speech recognition (Graves et al., 2013), game playing (Silver et al., 2016), and biomedical prediction (Jumper et al., 2021). Such progress hinged on a number of advances in hardware technology, dataset construction, and model architectural designs. Among them, the invention and application of very-deep network architectures play a decisive role." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.624, + 0.825, + 0.696 + ], + "angle": 0, + "content": "Deepening the network is an effective way to empower its fitting ability. Extensive studies (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Lu et al., 2017) compared the power between deeper and wider neural networks and showed that the polynomial growth of depth has a similar effect to the exponential growth of width. Therefore, modern DNNs (Simonyan & Zisserman, 2014; He et al., 2016) usually contain tens of layers to ensure their modeling abilities for real-world applications." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.701, + 0.825, + 0.8 + ], + "angle": 0, + "content": "Although the practical success of deep architectures is indisputable, they make the learning hardly predictable since complex interaction happens between layers when co-adapting to the target (Yosinski et al., 2014). By now, we still have a poor understanding of how different layers learn differently. Currently, a widely accepted view relates to the vanishing gradient problem Hochreiter (1991); Hochreiter et al. (2001). The gradients are getting weaker and weaker as they move back through the hidden layers, making the shallower layers converge more slowly (Nielsen, 2015). Informally, it is reasonable that larger gradient values bring higher learning speed." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.806, + 0.825, + 0.89 + ], + "angle": 0, + "content": "Even though this view somewhat makes sense, we seem to have little concrete evidence supporting it. In particular, it is dubious how higher-level features can be built based on the unstable features extracted by the unconverged shallower layers (Raghu et al., 2017). This paper aims to find a credible answer for the parameters of which layer are learning faster towards the convergence point (defined as the convergence rate in this work) with a systematic exploration. Our results lead to somewhat startling discoveries." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Our Contributions. Our point of start is illustrating that there does not seem to be a reliable positive correlation between the gradient magnitude and the convergence rate of a particular layer." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "Instead, we find that shallower layers tend to converge faster than the deeper ones, even with smaller gradients. The phenomenon is called layer convergence bias in this paper." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.224 + ], + "angle": 0, + "content": "We then turn our attention to excavating the underlying mechanism for the faster convergence of shallower layers. Specifically, we find out that the depth of a layer has a fundamental effect on its training: the parameters of shallower layers are usually optimized on flatter landscapes than deeper layers. This finding reveals that the gradients of shallower layers may be more predictive and thus have the potential to allow the larger learning rates (LRs) to be performed, making the convergence faster." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.23, + 0.825, + 0.329 + ], + "angle": 0, + "content": "Finally, we find that the layer convergence bias is also tied to the frequency of the function they are modeling. When fitting a complex target function, the shallower layers tend to fit the low-frequency (usually simpler) components. On the contrary, the deeper layers struggle to fit the remaining high-frequency components. It is a consistent result of the recent discovery that DNNs prioritize learning low-frequency components of the modeling function, while having very low learning speed on high-frequency components that tend to be more complex (Rahaman et al., 2019). This finding provides us with another perspective to understand why deeper layers learn more slowly." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.825, + 0.379 + ], + "angle": 0, + "content": "We believe that understanding the roots of such a fundamental convergence bias can give us a better grasp of the complicated learning process of DNNs. In turn, it can motivate more in-depth algorithmic progress for the deep learning community." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.383, + 0.826, + 0.482 + ], + "angle": 0, + "content": "This paper is organized as follows. In Section 2, we introduce our method for measuring convergence speed for different layers, and formally define the layer convergence bias. In Section 3, we examine the relationship between gradient magnitude and convergence rate, and show that the shallower layers tend to converge faster even with smaller gradients. Then in Section 4, we analyze the mechanism behind the layer convergence bias in DNN training. The layer-frequency correspondence is demonstrated in Section 5. The practical significance of layer convergence bias is presented in Section 6. We further discuss the related work in Section 7 and conclude in Section 8." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.501, + 0.447, + 0.516 + ], + "angle": 0, + "content": "2 LAYER CONVERGENCE BIAS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.531, + 0.825, + 0.589 + ], + "angle": 0, + "content": "The deep architecture of DNNs is arguably one of the most important factors for their powerful fitting abilities. With the benefit brought by the deep structures, there are also extra complexities in the training process coming into being. So far, we do not have a firm conclusion about whether some layers are learning faster than others." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.595, + 0.825, + 0.639 + ], + "angle": 0, + "content": "For examining the convergence progress for a DNN, a common practice is checking its loss curve. However, this is not applicable for comparing the convergence between different layers. In this work, we define a measurement for layer-wise convergence in the following." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.644, + 0.827, + 0.728 + ], + "angle": 0, + "content": "Definition 2.1 (Layer-wise convergence rate) At the training time \\( t \\), let the deep neural network with \\( L \\) layers \\( \\{T_{l}^{(t)}\\}_{l=1}^{L} \\) be \\( f(\\pmb{x}) = (T_{L}^{(t)} \\circ T_{L-1}^{(t)} \\circ \\dots \\circ T_{1}^{(t)})(\\pmb{x}) : \\mathbb{R}^{i} \\to \\mathbb{R}^{o} \\), where \\( i, o \\) are the dimension of its inputs and outputs. We use \\( \\theta_{l}^{(t)} \\) to denote the parameters of the \\( l \\)-th layer \\( T_{l}^{(t)} \\). Assuming that \\( \\theta_{l}^{(t)} \\) can finally converge to its optimal point \\( \\theta_{l}^{*} \\) when \\( t \\to \\infty \\), we define the convergence rate of \\( \\theta_{l} \\) during the time interval \\( [t_1, t_2] \\) to be" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.743, + 0.679, + 0.786 + ], + "angle": 0, + "content": "\\[\nC _ {l} ^ {(t _ {1}, t _ {2})} = \\frac {1}{(t _ {2} - t _ {1})} \\cdot \\frac {\\| \\theta_ {l} ^ {(t _ {1})} - \\theta_ {l} ^ {*} \\| _ {2} - \\| \\theta_ {l} ^ {(t _ {2})} - \\theta_ {l} ^ {*} \\| _ {2}}{\\| \\theta_ {l} ^ {(t _ {0})} - \\theta_ {l} ^ {*} \\| _ {2}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.793, + 0.544, + 0.808 + ], + "angle": 0, + "content": "where \\( t_0 \\) denotes the time point when the training starts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.827, + 0.929 + ], + "angle": 0, + "content": "In this definition, the numerator \\(\\| \\theta_l^{(t_1)} - \\theta_l^*\\| _2 - \\| \\theta_l^{(t_2)} - \\theta_l^*\\| _2\\) denotes how much the distance of the parameter \\(\\theta_{l}\\) to the optimal point is shortened in the period \\([t_1,t_2]\\). The denominator \\(\\| \\theta_l^{(t_0)} - \\theta_l^*\\| _2\\) represents the distance between the initial point to the convergence point, whose primary function is to normalize the speed, allowing the convergence of different layers to compare with each other. Thus, the convergence rate of \\(\\theta_{l}\\) can be understood as the ratio of normalized distance to time. Common optimization works (Yi et al., 1999; Nesterov, 2003) defined the rate of convergence for \\(\\theta\\) as \\(\\lim_{k\\to \\infty}\\frac{\\|\\theta^{(k + 1)} - \\theta^*\\|_2}{\\|\\theta^{(k)} - \\theta^*\\|_2}\\). It focuses on measuring an exponential level convergence when the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "optimization step goes to infinity. Since the difference in convergence rates between layers usually appears at an early stage of training, and it is not large enough to compare at an exponential level, we define our new convergence metric to present the convergence difference in a clearer way." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.154, + 0.826, + 0.19 + ], + "angle": 0, + "content": "Observation 2.1 (Layer convergence bias). For \\( l_1 < l_2 \\), \\( \\exists \\tilde{t} > 0 \\), such that \\( C_{l_1}^{(t_1, t_2)} > C_{l_2}^{(t_1, t_2)} \\) when \\( t_1 < t_2 < \\tilde{t} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.827, + 0.252 + ], + "angle": 0, + "content": "Layer convergence bias indicates that at an early training phase \\( t < \\tilde{t} \\), the parameters \\( \\theta_{l_1} \\) of a shallower layer \\( l_1 \\) tend to move to \\( \\theta_{l_1}^* \\) faster than a deeper layer \\( \\theta_{l_2} \\) moving to \\( \\theta_{l_2}^* \\). In the following, we use both synthetic and real datasets to show that the layer convergence bias appears for both fully-connected neural networks (FCNNs) and convolutional neural networks (CNNs)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.275, + 0.6, + 0.291 + ], + "angle": 0, + "content": "3 VERIFICATION OF LAYER CONVERGENCE BIAS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.825, + 0.38 + ], + "angle": 0, + "content": "In this section, we try to substantiate the central claim of this work. First, we use the FCNNs to show that the shallower layers tend to converge faster than the deeper layers on the regression task, even when the gradient values for shallower layers are smaller. We then use CNNs with modern architectures to verify that layer convergence bias is a common phenomenon in practical applications. All experimental settings in this work can be found in Appendix A.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.399, + 0.664, + 0.413 + ], + "angle": 0, + "content": "3.1 LAYER CONVERGENCE BIAS IN FULLY-CONNECTED NETWORKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.426, + 0.825, + 0.472 + ], + "angle": 0, + "content": "For FCNNs, we construct a simple regression task to demonstrate that layers with smaller gradients do not necessarily learn more slowly than layers with larger gradients. The fitting target is \\( f(x) = \\sin (x) + \\frac{1}{3}\\sin (3x) + \\frac{1}{10}\\sin (10x) + \\frac{1}{30}\\sin (30x) \\), with mean square error loss for training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.475, + 0.826, + 0.606 + ], + "angle": 0, + "content": "First, we use the FCNN [1-32-32-32-1] with the Sigmoid activations as a simple example. In the following analysis, the first fully-connected layer (1-32) is named Layer 1, and the subsequent two layers (32-32) are called Hidden layer 1, Hidden layer 2 respectively. The gradient values and the convergence processes for these layers are shown in Fig. 1 (a). Two observations can be obtained from the plots: 1) The gradient of Hidden layer 1 is nearly always smaller than the gradient of Hidden layer 2. 2) Although shallower layers have smaller gradients, they seem to converge faster. For the first 50 epochs, the shallower layers are moving faster to their convergence point (e.g., \\( C_{Layer_1}^{(t_0,t_{50})} \\approx 0.012 \\), \\( C_{Hidden\\_layer_1}^{(t_0,t_{50})} \\approx 0.009 \\), \\( C_{Hidden\\_layer_2}^{(t_0,t_{50})} \\approx 0.006 \\)), which is inconsistent with the previous view that higher gradients lead to faster learning (Nielsen, 2015)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.612, + 0.827, + 0.729 + ], + "angle": 0, + "content": "To further validate the above results with a deeper network, we adopt residual connections (He et al., 2016) for the FCNN (deep network fails to be trained in this task without residual connections) and use the ReLU activation function. The FCNN [1-(128-128)-(128-128)-(128-128)-(128-128)-1] with four residual blocks of width 128 shows similar results to the shallow FCNN without residual connection (see Fig. 1 (b)). In this case, the difference in layer-wise convergence rate can be observed even earlier (i.e., \\( C_{Res - Block_1}^{(t_0,t_5)} \\approx 2C_{Res - Block_4}^{(t_0,t_5)} \\)), which shows that the layer convergence bias also happens for deeper FCNNs with residual connections. It is noteworthy that our convergence metric is crucial to observe the layer convergence bias, which is elaborated in Appendix A.2." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.753, + 0.495, + 0.848 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.853, + 0.458, + 0.867 + ], + "angle": 0, + "content": "(a) FCNN without residual connection" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.753, + 0.822, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.562, + 0.853, + 0.775, + 0.867 + ], + "angle": 0, + "content": "(b) FCNN with four residual blocks" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.878, + 0.825, + 0.922 + ], + "angle": 0, + "content": "Figure 1: Left (a,b): The absolute mean gradient values for different layers for FCNNs w/o residual connections in training. For both networks, deeper layers have larger gradients. Right (a,b): The convergence process of different layers for FCNNs. Shallower layers converge faster." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.101, + 0.508, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.219, + 0.472, + 0.231 + ], + "angle": 0, + "content": "(a) ResNet-50 Val Acc \\(73.24\\%\\)" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.102, + 0.756, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.567, + 0.219, + 0.713, + 0.231 + ], + "angle": 0, + "content": "(b) VGG-19 Val Acc \\(71.89\\%\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.242, + 0.825, + 0.285 + ], + "angle": 0, + "content": "Figure 2: The convergence process of ResNet-50 and VGG-19 on ImageNet. During the first 50 epochs, shallower layers converge much faster than deeper layers. After the learning rate decays at the 50th epoch, parameters of deeper layers accelerate to move to their convergence points." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.302, + 0.825, + 0.346 + ], + "angle": 0, + "content": "Clearly, these results cannot reconcile with the previous view that larger gradients bring a higher learning speed for deeper layers, at least for the DNNs used in this work. Instead, from the optimization point of view, the parameters of shallower layers are learning faster to converge." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.363, + 0.649, + 0.377 + ], + "angle": 0, + "content": "3.2 LAYER CONVERGENCE BIAS IN CONVOLUTIONAL NETWORKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.434 + ], + "angle": 0, + "content": "Real-world datasets are very different from the synthetic data used in our previous experiments. In order to utilize the layer convergence bias to understand and better improve DNNs in real applications, it is important to verify whether the layer convergence bias holds for CNNs on images." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.439, + 0.825, + 0.496 + ], + "angle": 0, + "content": "In the following experiments, we examine the layer-wise convergence process on ImageNet (Rusakovsky et al., 2015) dataset with both ResNet-50 (He et al., 2016) and VGG-19 (Simonyan & Zisserman, 2014). We train the CNNs for 120 epochs with learning rate decay at the 50th epoch \\((0.1\\rightarrow 0.01)\\) and the 100th epoch \\((0.01\\to 0.001)\\). The training processes are shown in Fig. 2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.825, + 0.595 + ], + "angle": 0, + "content": "For ResNet-50, we visualize the learning process of the first convolutional layer and its subsequent four stages. One can easily observe that at the beginning of training, the shallower layers converge much faster than the deeper layers (\\( C_{Stage1}^{(t_0,t_{20})} \\approx 3C_{Stage4}^{(t_0,t_{20})} \\)). However, after the learning rate decays at the 50th epoch, deeper layers begin to learn effectively and achieve a higher convergence rate than the shallower layers (\\( C_{Stage1}^{(t_{50},t_{60})} \\approx 0.5C_{Stage4}^{(t_{50},t_{60})} \\)). We conjecture that the initial learning rate is too large for the deeper layers to learn." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.602, + 0.825, + 0.689 + ], + "angle": 0, + "content": "For VGG-19, we visualize its 1st, 5th, 9th, 13th, and 17th layers. This network shows a more significant convergence difference between layers than ResNet-50. At the first training stage with the initial learning rate, \\(\\| \\theta_l^{(t_5)} - \\theta_l^*\\| >\\| \\theta_l^{(t_0)} - \\theta_l^*\\|\\) for \\(l\\in \\{5,9,13,17\\}\\), which means that all layers but the first one even slightly diverge. Usually, the divergence appears when the learning rate is too large. This phenomenon confirms that the deeper layers cannot effectively learn with the large learning rate at the beginning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.695, + 0.825, + 0.739 + ], + "angle": 0, + "content": "The experiments of FCNNs and CNNs verify that layer convergence bias is a common phenomenon for DNNs. In Section 5 and Appendix A.3, A.4, we discuss the factors that would affect the phenomenon, and some in-depth findings they reveal." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.76, + 0.629, + 0.775 + ], + "angle": 0, + "content": "4 MECHANISM BEHIND LAYER CONVERGENCE BIAS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.792, + 0.825, + 0.835 + ], + "angle": 0, + "content": "So far, our investigation shows that the seemingly-right perspective for linking the layer-wise gradient and convergence rate is tenuous, at best. Both FCNNs and CNNs demonstrate an evident bias that shallower layers learn faster. Can we explain why this is the case?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Gradient Predictiveness. Since gradient values cannot determine the convergence rate, we wonder if the directions of the gradients play a more critical role. More chaotic update directions make convergence slower. Here we examine the gradient predictiveness (Santurkar et al., 2018) of different layers. If the gradient behavior is \"predictive\", less change in the gradient directions would appear when 1) the gradients are calculated with different batches of data; 2) the parameters of other layers update. Predictiveness can also be simply understood as the stability of gradient direction." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.103, + 0.496, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.209, + 0.457, + 0.221 + ], + "angle": 0, + "content": "(a) Gradient predictiveness w.r.t. data" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.103, + 0.788, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.54, + 0.209, + 0.765, + 0.221 + ], + "angle": 0, + "content": "(b) Gradient predictiveness w.r.t. parameters" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.234, + 0.825, + 0.264 + ], + "angle": 0, + "content": "Figure 3: The gradient predictiveness of shallower and deeper layers of FCNN. The learning rate decreases from 0.1 to 0.01 at Epoch 150." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.827, + 0.361 + ], + "angle": 0, + "content": "Definition 4.1 Let \\((x^{(t)},y^{(t)})\\) be a batch of input-label pairs for the DNN to train at time \\(t\\), and \\((x^{\\prime (t)},y^{\\prime (t)})\\) be another batch of data. We define the gradient predictiveness of the \\(l\\)th layer at time \\(t\\) w.r.t. data as the cosine similarity \\(\\begin{array}{r}\\operatorname {sim}(G_{l,t},G_{l,t}^{\\prime}) = \\frac{\\|G_{l,t}G_{l,t}^{\\prime}\\|}{\\|G_{l,t}\\|\\|G_{l,t}^{\\prime}\\|}\\in [-1,1] \\end{array}\\) . Likewise, the gradient predictiveness w.r.t. parameters is defined as \\(\\operatorname {sim}(G_{l,t},G_{l,t}^{\\prime \\prime})\\) , where" + }, + { + "type": "equation", + "bbox": [ + 0.28, + 0.368, + 0.715, + 0.445 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} G _ {l, t} = \\nabla_ {\\theta_ {l} ^ {(t)}} L \\left(\\theta_ {1} ^ {(t)}, \\dots , \\theta_ {L} ^ {(t)}; x ^ {(t)}, y ^ {(t)}\\right) \\\\ G _ {l, t} ^ {\\prime} = \\nabla_ {\\theta_ {l} ^ {(t)}} L (\\theta_ {1} ^ {(t)}, \\dots , \\theta_ {L} ^ {(t)}; x ^ {\\prime (t)}, y ^ {\\prime (t)}) \\\\ G _ {l, t} ^ {\\prime \\prime} = \\nabla_ {\\theta_ {l} ^ {(t)}} L (\\theta_ {1} ^ {(t + 1)}, \\dots , \\theta_ {l - 1} ^ {(t + 1)}, \\theta_ {l} ^ {(t)}, \\theta_ {l + 1} ^ {(t + 1)}, \\dots , \\theta_ {L} ^ {(t + 1)}; x ^ {(t)}, y ^ {(t)}) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.453, + 0.825, + 0.545 + ], + "angle": 0, + "content": "Here, \\( G_{l,t} \\) corresponds to the gradient of \\( \\theta_l^{(t)} \\). \\( G_{l,t}^{\\prime} \\) is the gradient of this layer with another batch of data, while \\( G_{l,t}^{\\prime \\prime} \\) means the gradient after all the other layers have updated to new values. Therefore, \\( sim(G_{l,t},G_{l,t}^{\\prime}) \\) indicates the stability of gradients with different data batches. \\( sim(G_{l,t},G_{l,t}^{\\prime \\prime}) \\) reflects whether the currently estimated gradient is in a consistent decreasing direction when the loss landscape is affected by the updating of other layers' parameters. The gradient predictiveness during training is shown in Fig. 3, where Res-Block 1 has more predictive gradients than Res-Block 4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.551, + 0.825, + 0.622 + ], + "angle": 0, + "content": "Visualizing the Loss Landscapes. We are curious about why gradients for deeper layers have poorer predictiveness. A hypothesis is that the loss landscapes for deeper layers are more rugged, making the parameters fluctuate more. A straightforward method to validate this hypothesis is plotting the loss landscapes for the parameters. To do this for a particular layer \\( l \\), one can choose a central point \\( \\theta_{l}^{*} \\) and two direction vectors \\( d_{l,1} \\), \\( d_{l,2} \\). Then the loss landscape can be drawn with" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.628, + 0.624, + 0.644 + ], + "angle": 0, + "content": "\\[\nf \\left(\\beta_ {1}, \\beta_ {2}\\right) = L \\left(\\theta_ {l} ^ {*} + \\beta_ {1} d _ {l, 1} + \\beta_ {2} d _ {l, 2}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.825, + 0.746 + ], + "angle": 0, + "content": "in the 3D space with \\(\\beta_{1},\\beta_{2}\\) forming a simplified parameter space. In this work, we generate random Gaussian directions for different layers, and normalize them to obtain the same norm of the corresponding layer. Specifically, we make the replacement \\(d_{l}\\gets \\frac{d_{l}}{\\|d_{l}\\|}\\| \\theta_{l}^{*}\\|\\) for a fully connected layer. For a convolutional layer, we use filter-wise normalization \\(d_l^k\\gets \\frac{d_l^k}{\\|d_l^k\\|}\\| \\theta_l^{k*}\\|\\) as in (Li et al., 2018), where \\(d_l^k\\) represents the \\(k\\)th filter of the \\(l\\)th layer. We set both \\(\\beta_{1}\\) and \\(\\beta_{2}\\) in the domain of \\([-1,1]\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.825, + 0.822 + ], + "angle": 0, + "content": "**Landscapes for FCNN.** The loss landscapes for four residual blocks of the FCNN are shown in Fig. 4. For the shallower blocks, the surfaces are flatter near the minimizer, meaning that the gradient magnitudes may be small. However, small gradients do not necessarily lead to slow learning speed in this case. Combined with the gradient predictiveness discussed above, a flatter loss landscape may lead to more consistent gradient directions, making the learning more smooth." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Landscapes for CNNs. The loss landscapes for ResNet-50 and VGG-19 on ImageNet are shown in Fig. 5. It is interesting that deep convolutional networks with/without residual connections present totally different loss landscapes. For ResNet-50, its landscapes near the convergence point \\(\\theta_l^*\\) are smooth and nearly convex, making the neural network easier to train. On the contrary, VGG-19 has much more shattered landscapes, the initial iterations probably lie in the chaotic regions, prohibiting its training (Balduzzi et al., 2017). This may explain the much less efficient convergence towards the optimal point for VGG than ResNet at the initial phase (Fig. 2)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.101, + 0.327, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.189, + 0.29, + 0.2 + ], + "angle": 0, + "content": "(a) Res-Block 1" + }, + { + "type": "image", + "bbox": [ + 0.344, + 0.101, + 0.492, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.383, + 0.189, + 0.46, + 0.2 + ], + "angle": 0, + "content": "(b) Res-Block 2" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.101, + 0.659, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.189, + 0.626, + 0.2 + ], + "angle": 0, + "content": "(c) Res-Block 3" + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.102, + 0.824, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.713, + 0.189, + 0.789, + 0.2 + ], + "angle": 0, + "content": "(d) Res-Block 4" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.211, + 0.825, + 0.24 + ], + "angle": 0, + "content": "Figure 4: The loss landscapes of different layers of FCNN. Deeper layers are optimized on more rugged landscapes, slowing down the learning process." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.255, + 0.331, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.185, + 0.348, + 0.321, + 0.36 + ], + "angle": 0, + "content": "(a) ResNet-50 Res-Block 1" + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.255, + 0.495, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.348, + 0.348, + 0.486, + 0.36 + ], + "angle": 0, + "content": "(b) ResNet-50 Res-Block 4" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.256, + 0.659, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.531, + 0.349, + 0.633, + 0.36 + ], + "angle": 0, + "content": "(c) VGG-19 Layer 1" + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.256, + 0.822, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.695, + 0.349, + 0.804, + 0.36 + ], + "angle": 0, + "content": "(d) VGG-19 Layer 13" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.416 + ], + "angle": 0, + "content": "Figure 5: The loss landscapes of different layers of ResNet-50 (a,b) and VGG-19 (c,d) on ImageNet. The shallower layers for both networks have flatter minima, making them converge faster than the deeper layers. The plots for all layers can be found in Appendix A.5." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.441, + 0.825, + 0.596 + ], + "angle": 0, + "content": "Comparing different layers in the CNNs, the answer for layer convergence bias becomes clearer. The key difference between different layers' loss landscapes of ResNet-50 is the sharpness of the local minima (Fig. 5 (a,b)). We conjecture it is because of a well-known fact that the shallower layers of CNNs tend to learn general features which are applicable to various datasets and tasks, while the deeper layers usually learn task-specific features (Yosinski et al., 2014). Before our work, (Zeiler & Fergus, 2014) also revealed that the general features in a five-layer CNN stabilized faster than the specific features. Since the general features are more evenly distributed, they usually cause less fluctuation for training, leading to flatter optima. Theoretically, flatter minimizers are easier to be found by SGD optimizers (Pan et al., 2020). For VGG-19, its shallower and deeper layers also have flatter and sharper minima (Fig. 5 (c,d)), respectively. The shattered loss landscape for its deeper layers may also explain its inefficient learning process with a large learning rate (Fig. 2 (b))." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.601, + 0.825, + 0.644 + ], + "angle": 0, + "content": "Here we summarize the mechanism behind layer convergence bias: the parameters of shallower layers are easier to optimize due to their flatter loss landscapes. At a higher level, shallower layers learn general features, which are usually easier." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.664, + 0.7, + 0.68 + ], + "angle": 0, + "content": "5 DEeper LAYERS FIT THE HIGH-FREQUENCY COMPONENTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.695, + 0.825, + 0.793 + ], + "angle": 0, + "content": "Recent advances in the learning process of DNNs (Rahaman et al., 2019; Ronen et al., 2019; Xu & Zhou, 2021) revealed that the low-frequency components of the target function are fitted much faster than the high-frequency components. There is a natural question about whether there is some inherent link between layer convergence bias and this result. In this section, we investigate the answer, and surprisingly find that: the low-frequency parts are usually fitted by the shallower layers, while the remaining higher frequencies are mainly learned by the deeper layers. It provides us with an alternative perspective to understand the layer convergence bias." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.825, + 0.856 + ], + "angle": 0, + "content": "The Correspondence for FCNN. With the residual structures, we can straightforwardly visualize what each block of a FCNN learns. Considering the FCNN with one input layer \\( z_0 = T_0(x) : \\mathbb{R}^1 \\to \\mathbb{R}^{128} \\), four residual blocks \\( z_l = T_l'(z_{l-1}) = T_l(z_{l-1}) + z_{l-1} : \\mathbb{R}^{128} \\to \\mathbb{R}^{128} \\), \\( l \\in \\{1, 2, 3, 4\\} \\), and an output layer \\( y = T_5(z_4) : \\mathbb{R}^{128} \\to \\mathbb{R}^1 \\). The whole network can be expressed as" + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.861, + 0.808, + 0.878 + ], + "angle": 0, + "content": "\\[\ny = T _ {5} \\left(z _ {1} + T _ {2} \\left(z _ {1}\\right) + T _ {3} \\left(z _ {2}\\right) + T _ {4} \\left(z _ {3}\\right)\\right) = T _ {5} \\left(z _ {1}\\right) + T _ {5} \\left(T _ {2} \\left(z _ {1}\\right)\\right) + T _ {5} \\left(T _ {3} \\left(z _ {2}\\right)\\right) + T _ {5} \\left(T _ {4} \\left(z _ {3}\\right)\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.927 + ], + "angle": 0, + "content": "if the output layer \\( T_{5} \\) is a linear transformation. The fitting results for each layer are shown in Fig. 6. It can be seen that the deeper layers tend to fit the more complex components of the target function \\( y = \\sin (x) + \\frac{1}{3}\\sin (3x) + \\frac{1}{10}\\sin (10x) + \\frac{1}{30}\\sin (30x) \\). Besides the curvature, the fitted functions" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.101, + 0.298, + 0.17 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.176, + 0.246, + 0.188 + ], + "angle": 0, + "content": "y" + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.102, + 0.43, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.175, + 0.393, + 0.188 + ], + "angle": 0, + "content": "\\(T_{5}(z_{1})\\)" + }, + { + "type": "image", + "bbox": [ + 0.433, + 0.103, + 0.559, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.47, + 0.174, + 0.534, + 0.188 + ], + "angle": 0, + "content": "\\(T_{5}(T_{2}(z_{1}))\\)" + }, + { + "type": "image", + "bbox": [ + 0.561, + 0.103, + 0.689, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.601, + 0.174, + 0.665, + 0.188 + ], + "angle": 0, + "content": "\\(T_{5}(T_{3}(z_{2}))\\)" + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.103, + 0.821, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.737, + 0.175, + 0.8, + 0.188 + ], + "angle": 0, + "content": "\\(T_{5}(T_{4}(z_{3}))\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.201, + 0.825, + 0.23 + ], + "angle": 0, + "content": "Figure 6: The visualization of what each residual block of the FCNN learns. From the first to the fourth block, the fitted function becomes more complex with smaller amplitude." + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.265, + 0.493, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.364, + 0.408, + 0.375 + ], + "angle": 0, + "content": "(a) ResNet-50" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.265, + 0.758, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.364, + 0.674, + 0.375 + ], + "angle": 0, + "content": "(b) VGG-19" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.385, + 0.825, + 0.415 + ], + "angle": 0, + "content": "Figure 7: The visualization of response frequencies for CNNs. As the training goes on, deeper layers become more sensitive to perturbations, indicating that they have higher response frequencies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.444, + 0.825, + 0.488 + ], + "angle": 0, + "content": "are also consistent with the amplitudes of the components. Specifically, the ranges of the four fitted functions are 2.3, 0.7, 0.5, and 0.06, which are similar to the four components. This result further confirms the relationship between layers and frequencies." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.493, + 0.827, + 0.661 + ], + "angle": 0, + "content": "The Correspondence for CNNs. For CNNs, we verify their layer-frequency correspondence through the response frequency (Xu et al., 2019). In a nutshell, if an input-output mapping \\( f \\) possesses significant high frequencies, then a small change in its input induces a large change in the output. We generate standard Gaussian-distributed input \\( x \\) for different residual blocks of ResNet-50 and different layers of VGG-19. At the same time, small Gaussian perturbation \\( \\Delta x \\) is added to the input. A larger change \\( \\Delta y \\) of the layer output means the layer handles higher frequencies. The response frequencies are shown in Fig. 7. At the first 5 epochs of training on ImageNet, different layers for both ResNet-50 and VGG-19 do not show significantly different response frequencies. But after about ten epochs, the response frequencies for deeper layers (e.g., stage 4 for ResNet-50, layer 13 for VGG-19) increase while the shallower layers show lower response frequencies. Therefore, we conclude that the layer-frequency correspondence also holds for CNNs. In addition, it is not an innate nature of the layers, but a result of the training process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.827, + 0.863 + ], + "angle": 0, + "content": "How the target frequency affects layer convergence bias? To demonstrate the effect of layer-frequency correspondence on the layer convergence bias, we try fitting simpler targets with less high-frequency components, and see what would happen to the layer-wise convergence rate of FCNN. In Fig. 8 (a-d), we only keep several lowest frequencies of the target, e.g., the target function \\( y = \\sin(x) \\) is named \"Complexity=1\", and \\( y = \\sin(x) + \\frac{1}{3} \\sin(3x) \\) is named \"Complexity=2\", etc. After discarding more and more high-frequency components, the deeper layers converge faster and faster. In this case, the layer convergence bias does not strictly hold anymore. In Fig. 8 (b), the Res-Block 4 converges faster than Res-Block 3 after the 5th epoch. In Fig. 8 (c), the Res-Block 4 converges with a similar speed as Res-Block 2, while the Res-Block 3 even learns faster than Res-Block 2. It seems that removing the high-frequency component that corresponds to a deep layer can effectively accelerate its training. For CNNs, we also observe similar phenomena (Fig. 8 (e-h)). On simpler targets (e.g., CIFAR 10), the deeper layers converge faster than on more complex targets (e.g., CIFAR100). An implication of this result is that the data complexity may be too low for the model. In practice, CIFAR datasets only need ResNet-18 to fit well (Wu et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In fact, (Rahaman et al., 2019) had shown that different layers have some links to different frequencies, but the authors did not provide further insight for this phenomenon. This work verifies the underlying relationship between layers and fitting frequencies, and establishes a connection for this relationship to the layer convergence bias." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.101, + 0.363, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.207, + 0.345, + 0.22 + ], + "angle": 0, + "content": "(a) FCNN Complexity=4" + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.102, + 0.515, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.379, + 0.208, + 0.503, + 0.22 + ], + "angle": 0, + "content": "(b) FCNN Complexity=3" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.102, + 0.668, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.528, + 0.208, + 0.651, + 0.22 + ], + "angle": 0, + "content": "(c) FCNN Complexity=2" + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.102, + 0.822, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.683, + 0.207, + 0.806, + 0.22 + ], + "angle": 0, + "content": "(d) FCNN Complexity \\(= 1\\)" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.234, + 0.363, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.224, + 0.342, + 0.349, + 0.354 + ], + "angle": 0, + "content": "(e) ResNet-50 CIFAR100" + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.234, + 0.515, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.38, + 0.342, + 0.498, + 0.354 + ], + "angle": 0, + "content": "(f) ResNet-50 CIFAR10" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.234, + 0.668, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.536, + 0.342, + 0.652, + 0.354 + ], + "angle": 0, + "content": "(g) VGG-19 CIFAR100" + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.234, + 0.821, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.693, + 0.342, + 0.804, + 0.354 + ], + "angle": 0, + "content": "(h) VGG-19 CIFAR10" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.366, + 0.825, + 0.423 + ], + "angle": 0, + "content": "Figure 8: The convergence curves with different learning target complexities. (a-d): Decreasing target complexities for FCNNs. The deeper layers accelerate more than the shallower ones when high-frequency components are removed. (e-h): For CNNs, the deepest layers (i.e., Stage 4 / Layer 17) learn faster on CIFAR10 than on CIFAR100 while the other layers do not change much." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.451, + 0.431, + 0.466 + ], + "angle": 0, + "content": "6 PRACTICAL SIGNIFICANCE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.484, + 0.825, + 0.528 + ], + "angle": 0, + "content": "Up to now, we have been analyzing the layer convergence bias from a theoretical perspective. This section discusses its practical use to drive the development of DNN architecture design, and a new explanation for the acceleration effect of transfer learning with the help of layer convergence bias." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.547, + 0.428, + 0.56 + ], + "angle": 0, + "content": "6.1 DNN ARCHITECTURE DESIGN" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.574, + 0.625, + 0.77 + ], + "angle": 0, + "content": "Modern CNN architectures (He et al., 2016) usually contain layers from narrow to wide (e.g., 64 channels of the first layer to 2048 channels of the last layer). From the perspective of computational complexity, the narrower shallower layers make the corresponding large feature maps less computation-consuming. Considering the layer convergence bias, deeper layers with larger capacities are also beneficial for the corresponding high-frequencies to be learned easier. Although this is a common design for CNNs, Transformers (Dosovitskiy et al., 2020) usually apply the same architecture for all encoders. For a vision Transformer with 12 encoders, we use encoders with width \\(2/4/8\\) to construct three variants. The variants only differ in the arrangement of different encoders, we use \\(W\\) to denote the widths, and \\(N\\) to denote the number of each kind of encoders. The configures are summarized below:" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.782, + 0.581, + 0.798 + ], + "angle": 0, + "content": "- deeper encoders wider: \\( W = (2,4,8) \\), \\( N = (6,3,3) \\)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.804, + 0.56, + 0.82 + ], + "angle": 0, + "content": "- vanilla architecture: \\( W = (4, 4, 4) \\), \\( N = (4, 4, 4) \\)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.826, + 0.602, + 0.842 + ], + "angle": 0, + "content": "- deeper encoders narrower: \\( W = (8,4,2) \\), \\( N = (3,3,6) \\)" + }, + { + "type": "list", + "bbox": [ + 0.216, + 0.782, + 0.602, + 0.842 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.591, + 0.824, + 0.701 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.633, + 0.711, + 0.826, + 0.755 + ], + "angle": 0, + "content": "Figure 9: Performance of three variants of ViTs on ImageNet." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.854, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Fig. 9 shows their performances, with the best accuracy of \\(80.75\\%\\), \\(78.88\\%\\), and \\(75.75\\%\\) respectively. We find that with the same number of parameters, putting the wider layers deeper results in higher training performance. This finding may serve as an effective way to improve the model capacity. The causal connection between layer complexity distribution and model performance is discussed in Appendix A.6. And layer convergence bias for ViT is analyzed in Appendix A.7." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.566, + 0.119 + ], + "angle": 0, + "content": "6.2 ACCELERATION EFFECT OF TRANSFER LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.131, + 0.827, + 0.298 + ], + "angle": 0, + "content": "Transfer learning (fine-tuning with the pre-trained models) is a widely-used technique that can accelerate the model convergence (Shao et al., 2018b; a; Liang & Zheng, 2020). We show the layer convergence curves w/o transfer learning on the Flowers dataset (Nilsback & Zisserman, 2006). When training from scratch (Fig. 10 (a)), the shallower layers converge faster so that the deeper layers can extract semantic features based on basic features. Local minima of Stage 4 is sharp in this case. However, with transfer learning (Fig. 10 (b)), deeper layers can directly be built on the pre-trained basic features. The Stage 4 shows a much higher convergence rate among all layers, its loss landscape also becomes flatter. Two observations that are not consistent with layer convergence bias are summarized in the following: 1) the pre-trained shallower layers are nearly optimal, so they don't present fast convergence in transfer learning; 2) although the pre-trained deeper layers are not as optimal as the shallower layers do, their loss landscapes are much flatter than training from scratch, which makes them converge much faster." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.311, + 0.338, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.406, + 0.393, + 0.417 + ], + "angle": 0, + "content": "(a) Train from scratch" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.313, + 0.496, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.312, + 0.668, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.312, + 0.822, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.631, + 0.406, + 0.702, + 0.417 + ], + "angle": 0, + "content": "(b) Fine-tuning" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.429, + 0.825, + 0.459 + ], + "angle": 0, + "content": "Figure 10: Effects of transfer learning on the training process. Left (a,b): The layer convergence process of ResNet-50. Right (a,b): The loss landscapes of Stage 4 w/o transfer learning." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.49, + 0.347, + 0.505 + ], + "angle": 0, + "content": "7 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.522, + 0.827, + 0.69 + ], + "angle": 0, + "content": "DNNs with gradient-based training show great potential to fit targets with arbitrary complexities (Hornik et al., 1989; Leshno et al., 1993), given sufficient width. With the advances in the last decade to verify the capability of the depth of universal approximators (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Lu et al., 2017), practitioners tried to reduce the width of neural networks by adding more layers (Simonyan & Zisserman, 2014; He et al., 2016; Huang et al., 2017). We are also inspired by research on local properties (sharpness/flatness) of loss functions at minima (Keskar et al., 2017; Li et al., 2018) and relationship between convergence rate and generalization (Hardt et al., 2016). Furthermore, LARS optimizer (You et al., 2017) shares some valuable insights on layer convergence, which are discussed in Appendix A.8. In practice, the idea of layer convergence bias had been intuitively applied to accelerate DNN training (Huang et al., 2016; Brock et al., 2017) and mitigating catastrophic forgetting (Ramasesh et al., 2020). The arrangement schemes of CNN/Transformer blocks were explored by (Liu et al., 2022b;a)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.712, + 0.32, + 0.726 + ], + "angle": 0, + "content": "8 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.744, + 0.827, + 0.925 + ], + "angle": 0, + "content": "In this work, we empirically studied the phenomenon that the shallower layers of DNNs tend to converge faster than the deeper layers, called layer convergence bias. This phenomenon is a natural preference in the process of DNN training: the shallower layers are responsible for extracting low-level features which are more evenly distributed and easier to learn, while deeper layers refine these features to do specific tasks. This makes the loss landscapes for shallower layers flatter than the landscapes for deeper layers, making shallower layers converge faster. In addition, this work established a connection between layers and learned frequencies. By showing deeper layers tend to fit the high-frequency components in the target function, we can understand the layer convergence bias from another perspective. We finally took DNN architecture design and transfer learning as two examples to show how theoretical findings in this work can shed light on the practical applications of deep learning. For progress to continue, a more in-depth understanding of the properties of neural networks is needed. We also hope that the layer convergence bias can inspire more practical improvements in the DNNs' architecture design and training schemes." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.103, + 0.357, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.134, + 0.825, + 0.162 + ], + "angle": 0, + "content": "This work was supported by the Lustgarten Foundation for Pancreatic Cancer Research and the McGovern Foundation." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.183, + 0.289, + 0.198 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.826, + 0.235 + ], + "angle": 0, + "content": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.244, + 0.825, + 0.287 + ], + "angle": 0, + "content": "David Balduzzi, Marcus Frean, Lennox Leary, JP Lewis, Kurt Wan-Duo Ma, and Brian McWilliams. The shattered gradients problem: If resnets are the answer, then what is the question? In International Conference on Machine Learning, pp. 342-350. PMLR, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.295, + 0.825, + 0.338 + ], + "angle": 0, + "content": "Andrew Brock, Theodore Lim, James Millar Ritchie, and Nicholas J Weston. Freezeout: Accelerate training by progressively freezing layers. In NIPS 2017 Workshop on Optimization: 10th NIPS Workshop on Optimization for Machine Learning, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.346, + 0.825, + 0.375 + ], + "angle": 0, + "content": "Olivier Delalleau and Yoshua Bengio. Shallow vs. deep sum-product networks. Advances in neural information processing systems, 24, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.384, + 0.825, + 0.441 + ], + "angle": 0, + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.449, + 0.825, + 0.479 + ], + "angle": 0, + "content": "Ronen Eldan and Ohad Shamir. The power of depth for feedforward neural networks. In Conference on learning theory, pp. 907-940. PMLR, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.487, + 0.825, + 0.53 + ], + "angle": 0, + "content": "Alex Graves, Abdel-rahman Mohamed, and Geoffrey Hinton. Speech recognition with deep recurrent neural networks. In 2013 IEEE international conference on acoustics, speech and signal processing, pp. 6645-6649. IEEE, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.538, + 0.825, + 0.568 + ], + "angle": 0, + "content": "Moritz Hardt, Ben Recht, and Yoram Singer. Train faster, generalize better: Stability of stochastic gradient descent. In International conference on machine learning, pp. 1225-1234. PMLR, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.576, + 0.825, + 0.618 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.627, + 0.825, + 0.656 + ], + "angle": 0, + "content": "Sepp Hochreiter. Untersuchungen zu dynamischen neuronalen netzen. Diploma, Technische Universität München, 91(1), 1991." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.665, + 0.825, + 0.694 + ], + "angle": 0, + "content": "Sepp Hochreiter, Yoshua Bengio, Paolo Frasconi, Jürgen Schmidhuber, et al. Gradient flow in recurrent nets: the difficulty of learning long-term dependencies, 2001." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.702, + 0.825, + 0.732 + ], + "angle": 0, + "content": "Kurt Hornik, Maxwell Stinchcombe, and Halbert White. Multilayer feedforward networks are universal approximators. Neural networks, 2(5):359-366, 1989." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.74, + 0.825, + 0.77 + ], + "angle": 0, + "content": "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pp. 646-661. Springer, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.777, + 0.825, + 0.821 + ], + "angle": 0, + "content": "Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4700-4708, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.829, + 0.825, + 0.871 + ], + "angle": 0, + "content": "Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pp. 448-456. PMLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.88, + 0.825, + 0.925 + ], + "angle": 0, + "content": "John Jumper, Richard Evans, Alexander Pritzel, Tim Green, Michael Figurnov, Olaf Ronneberger, Kathryn Tunyasuvunakool, Russ Bates, Augustin Žídek, Anna Potapenko, et al. Highly accurate protein structure prediction with alphafold. Nature, 596(7873):583-589, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.206, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Nitish Shirish Keskar, Jorge Nocedal, Ping Tak Peter Tang, Dheevatsa Mudigere, and Mikhail Smelyanskiy. On large-batch training for deep learning: Generalization gap and sharp minima. In 5th International Conference on Learning Representations, ICLR 2017, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.825, + 0.187 + ], + "angle": 0, + "content": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60(6):84-90, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.194, + 0.825, + 0.237 + ], + "angle": 0, + "content": "Moshe Leshno, Vladimir Ya Lin, Allan Pinkus, and Shimon Schocken. Multilayer feedforward networks with a nonpolynomial activation function can approximate any function. Neural networks, 6(6):861-867, 1993." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.825, + 0.277 + ], + "angle": 0, + "content": "Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. Advances in neural information processing systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.285, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Gaobo Liang and Lixin Zheng. A transfer learning method with deep residual network for pediatric pneumonia diagnosis. Computer methods and programs in biomedicine, 187:104964, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.324, + 0.825, + 0.368 + ], + "angle": 0, + "content": "Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3202-3211, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.375, + 0.825, + 0.42 + ], + "angle": 0, + "content": "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.428, + 0.825, + 0.471 + ], + "angle": 0, + "content": "Zhou Lu, Hongming Pu, Feicheng Wang, Zhiqiang Hu, and Liwei Wang. The expressive power of neural networks: A view from the width. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.48, + 0.825, + 0.51 + ], + "angle": 0, + "content": "Yuri Nesterov. Introductory lectures on convex optimization: A basic course, volume 87. Springer Science & Business Media, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.518, + 0.825, + 0.548 + ], + "angle": 0, + "content": "Michael A Nielsen. Neural networks and deep learning, volume 25. Determination press San Francisco, CA, USA, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.557, + 0.825, + 0.587 + ], + "angle": 0, + "content": "M-E Nilsback and Andrew Zisserman. A visual vocabulary for flower classification. In CVPR, volume 2, pp. 1447-1454. IEEE, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.595, + 0.825, + 0.64 + ], + "angle": 0, + "content": "Zhou Pan, Feng Jiashi, Ma Chao, Xiong Caiming, Chu Hong Hoi Steven, and E Weinan. Towards theoretically understanding why sgd generalizes better than adam in deep learning. In Advances in Neural Information Processing Systems, pp. 21285-21296, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.647, + 0.825, + 0.692 + ], + "angle": 0, + "content": "Maithra Raghu, Justin Gilmer, Jason Yosinski, and Jascha Sohl-Dickstein. Svcca: Singular vector canonical correlation analysis for deep learning dynamics and interpretability. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.7, + 0.825, + 0.744 + ], + "angle": 0, + "content": "Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In International Conference on Machine Learning, pp. 5301-5310. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.752, + 0.825, + 0.783 + ], + "angle": 0, + "content": "Vinay V Ramasesh, Ethan Dyer, and Maithra Raghu. Anatomy of catastrophic forgetting: Hidden representations and task semantics. arXiv preprint arXiv:2007.07400, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.791, + 0.825, + 0.834 + ], + "angle": 0, + "content": "Basri Ronen, David Jacobs, Yoni Kasten, and Shira Kritchman. The convergence rate of neural networks for learned functions of different frequencies. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.843, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. IJCV, 115(3):211-252, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.895, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Shibani Santurkar, Dimitris Tsipras, Andrew Ilyas, and Aleksander Madry. How does batch normalization help optimization? Advances in neural information processing systems, 31, 2018." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Kun Shao, Yuanheng Zhu, and Dongbin Zhao. Starcraft micromanagement with reinforcement learning and curriculum transfer learning. IEEE Transactions on Emerging Topics in Computational Intelligence, 3(1):73-84, 2018a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.197 + ], + "angle": 0, + "content": "Siyu Shao, Stephen McAleer, Ruqiang Yan, and Pierre Baldi. Highly accurate machine fault diagnosis using deep transfer learning. IEEE Transactions on Industrial Informatics, 15(4):2446-2455, 2018b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.826, + 0.251 + ], + "angle": 0, + "content": "David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al. Mastering the game of go with deep neural networks and tree search. nature, 529(7587):484-489, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.258, + 0.826, + 0.288 + ], + "angle": 0, + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.296, + 0.826, + 0.326 + ], + "angle": 0, + "content": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. Sequence to sequence learning with neural networks. Advances in neural information processing systems, 27, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.334, + 0.826, + 0.376 + ], + "angle": 0, + "content": "Pengxiang Wu, Songzhu Zheng, Mayank Goswami, Dimitris Metaxas, and Chao Chen. A topological filter for learning with label noise. Advances in neural information processing systems, 33: 21382-21393, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.826, + 0.415 + ], + "angle": 0, + "content": "Yuxin Wu and Kaiming He. Group normalization. In Proceedings of the European conference on computer vision (ECCV), pp. 3-19, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.423, + 0.826, + 0.453 + ], + "angle": 0, + "content": "Zhi-Qin John Xu, Yaoyu Zhang, Tao Luo, Yanyang Xiao, and Zheng Ma. Frequency principle: Fourier analysis sheds light on deep neural networks. arXiv preprint arXiv:1901.06523, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.461, + 0.826, + 0.505 + ], + "angle": 0, + "content": "Zhiqin John Xu and Hanxu Zhou. Deep frequency principle towards understanding why deeper learning is faster. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 10541-10550, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.512, + 0.826, + 0.554 + ], + "angle": 0, + "content": "Zhang Yi, Pheng-Ann Heng, and Ada Wai-Chee Fu. Estimate of exponential convergence rate and exponential stability for neural networks. IEEE Transactions on Neural Networks, 10(6):1487-1493, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.564, + 0.826, + 0.594 + ], + "angle": 0, + "content": "Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. How transferable are features in deep neural networks? Advances in neural information processing systems, 27, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.602, + 0.826, + 0.631 + ], + "angle": 0, + "content": "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.639, + 0.826, + 0.669 + ], + "angle": 0, + "content": "Matthew D Zeiler and Rob Fergus. Visualizing and understanding convolutional networks. In European conference on computer vision, pp. 818-833. Springer, 2014." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.669 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.3, + 0.119 + ], + "angle": 0, + "content": "A APPENDIX" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.134, + 0.405, + 0.149 + ], + "angle": 0, + "content": "A.1 EXPERIMENTAL SETTINGS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.16, + 0.637, + 0.175 + ], + "angle": 0, + "content": "Datasets. The synthetic and real datasets are summarized in the Tab. 1" + }, + { + "type": "table_caption", + "bbox": [ + 0.273, + 0.187, + 0.724, + 0.203 + ], + "angle": 0, + "content": "Table 1: Descriptions and statistics of the datasets used in this work." + }, + { + "type": "table", + "bbox": [ + 0.222, + 0.213, + 0.778, + 0.332 + ], + "angle": 0, + "content": "
DatasetSize (train/test)ClassesData description
Sine regression5000/5000n/aFunction with four sine components, domain [-2,2]
ImageNet1,281,167/50,0001000Photos of common objects
CIFAR-1050,000/10,00010Photos of common objects, image sizes 32 × 32
CIFAR-10050,000/10,000100Photos of common objects, image sizes 32 × 32
Flowers1,088/27217Find-grained photos of flowers
FGVC Aircraft6,667/3,333100Find-grained photos of aircrafts
Caltech-1013,060/6,084102Photos/paintings/sketches of common objects
CUB-2005,994/5,794200Find-grained photos of birds
DomainNet painting50,416/21,850345Oil Paintings, murals, drawings, tattoos
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.345, + 0.825, + 0.372 + ], + "angle": 0, + "content": "Network Architectures. The FCNNs, CNNs, and Vision Transformers are summarized in the Tab. 2." + }, + { + "type": "table_caption", + "bbox": [ + 0.274, + 0.384, + 0.722, + 0.399 + ], + "angle": 0, + "content": "Table 2: Complexities and architectures of DNNs used in this work." + }, + { + "type": "table", + "bbox": [ + 0.219, + 0.41, + 0.78, + 0.488 + ], + "angle": 0, + "content": "
Model#ParametersMult-addsArchitecture description
FCNN (no res)2k10k4 fc layers [1-32-32-32-1]
FCNN (res)132k390kfc [1-128] → 4 res-blocks [128-128-128] → fc [128-1]
ResNet-5025.6M4.1Gconv → 4 stages with [3,4,6,3] res-blocks → fc
VGG-19143.7M19.8G16 conv layers, 3 fc layers
ViT9.9M77.2M12 Transformer encoder blocks (basic width 256), 1 fc layer
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.504, + 0.825, + 0.547 + ], + "angle": 0, + "content": "Training Hyper-parameters. For the regression task, we train FCNNs with SGD optimizers for 300 epochs. The initial learning rate is 0.1, with a learning rate decay (to 0.01) at the 150th epoch. The batch size is 128, no weight decay (\\(L_{2}\\) regularization) is conducted." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.553, + 0.825, + 0.609 + ], + "angle": 0, + "content": "For the ImageNet classification task with CNNs, we train ResNet-50 and VGG-19 for 120 epochs with SGD optimizers. The initial learning rate is 0.1, with learning rate decays at the 50th and 100th epoch to 0.01 and 0.001, respectively. The batch size is 256, the input image size is \\(224^2\\), and the weight decay coefficient is \\(10^{-4}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.616, + 0.825, + 0.673 + ], + "angle": 0, + "content": "For Vision Transformers on ImageNet dataset, we train them for 200 epochs with Adam optimizers. The peak learning rate is set to 0.0003. We use linear learning rate warm-up for 10,000 iterations, and a subsequent cosine learning rate decay. The batch size is 256, the input image size is \\(224^2\\), and the weight decay coefficient is \\(10^{-4}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.679, + 0.827, + 0.737 + ], + "angle": 0, + "content": "For CNN image classification on other datasets, we train models for 100 epochs with SGD optimizers. Initial learning rate of 0.01 and cosine learning rate scheduler are applied. The batch size is 128, the input image sizes are \\(32^2\\) (for CIFAR) and \\(224^2\\) (for Flowers, Aircraft, Caltech, CUB, and DomainNet), and the weight decay coefficient is \\(10^{-4}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.642, + 0.119 + ], + "angle": 0, + "content": "A.2 CONVERGENCE MEASUREMENT USING WEIGHT VARIATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.827, + 0.229 + ], + "angle": 0, + "content": "In Section 2, we have introduced the convergence measurement in this work. This measurement is simple and straightforward, and it can show how each layer in a DNN converges during the whole training process (Fig. 1 for fully connected networks and Fig. 2 for CNNs) by examining the distance between the training parameters and the converged parameters. However, it has not been verified whether calculating the parameter distance variation to the convergence point between two adjacent epochs is necessary. After all, the measurement highly depends on the convergence point, which can only be obtained after the whole training process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.235, + 0.828, + 0.324 + ], + "angle": 0, + "content": "We come up with a simplified convergence measurement. This method uses weight variation as a metric to examine how fast a layer is learning, and whether this layer reaches a state of convergence. If a layer is learning actively, it is reasonable that its weights vary drastically during training. For the converged layers, their weights usually keep stable. So we use \\( \\|\\theta_l^{(t_k)} - \\theta_l^{(t_{k+1})}\\|_2 / \\|\\theta_l^{(t_k)}\\|_2 \\), the normalized weight variation of layer \\( l \\) during epoch \\( k \\) and \\( k + 1 \\), to illustrate how actively it is learning." + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.334, + 0.512, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.326, + 0.449, + 0.48, + 0.46 + ], + "angle": 0, + "content": "(a) ResNet-50 Val Acc \\(73.24\\%\\)" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.334, + 0.761, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.581, + 0.449, + 0.726, + 0.46 + ], + "angle": 0, + "content": "(b) VGG-19 Val Acc \\(71.89\\%\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.471, + 0.825, + 0.501 + ], + "angle": 0, + "content": "Figure 11: The convergence processes of ResNet-50 and VGG-19 on ImageNet. The results are illustrated with weight variations. The learning rate decays at epoch 50 and epoch 100." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.515, + 0.827, + 0.671 + ], + "angle": 0, + "content": "The results of ResNet-50 and VGG-19 training process on ImageNet are shown in Fig. 11. From this plot, we can see that after learning rate decays at epoch 50 and 100, the weight variations drop evidently. However, the weight variations of each layer do not show apparent decreasing trend when the learning rate keeps stable, which indicates that the training of DNNs do not converge as usual convex optimization problems do (e.g., linear programming). Therefore, it is hard for us to compare the convergence rates of different layers by observing their convergence curves. We cannot find a clear clue like what was given by the convergence measurement in Section 2 to get the layer convergence bias. All in all, we can safely claim that, it is crucial for the convergence metric to consider direction information to measure how fast different layers are learning towards their convergence points. Our previous convergence measurement really needs to examine convergence by calculating the parameter distance between the current point to convergence point." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.573, + 0.119 + ], + "angle": 0, + "content": "A.3 FACTORS AFFECTING LAYER CONVERGENCE BIAS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.132, + 0.825, + 0.217 + ], + "angle": 0, + "content": "In Section 5, we have shown that the complexity of the datasets is an important factor affecting layer convergence bias. When the fitting target function is complex enough with both low and high frequency components, the shallower layers learn the low low-frequency components while the deeper layers learn the high-frequency components. Here we use the FCNNs with residual connections to show whether some other important factors would affect the layer convergence bias. All following experiments are conducted on the same regression task in Section 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.222, + 0.827, + 0.28 + ], + "angle": 0, + "content": "Model Depth. The default architecture used in previous experiments is the four-blocks FCNN, here we try adding more blocks to make the network deeper and see what change will happen. As shown in Fig. 12, all the networks show layer convergence bias. With more and more res-blocks, the overall convergence of the network becomes slightly faster." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.294, + 0.359, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.236, + 0.403, + 0.324, + 0.414 + ], + "angle": 0, + "content": "(a) 4 Res-Blocks" + }, + { + "type": "image", + "bbox": [ + 0.365, + 0.295, + 0.515, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.394, + 0.402, + 0.482, + 0.413 + ], + "angle": 0, + "content": "(b) 8 Res-Blocks" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.295, + 0.669, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.548, + 0.402, + 0.642, + 0.413 + ], + "angle": 0, + "content": "(c) 12 Res-Blocks" + }, + { + "type": "image", + "bbox": [ + 0.675, + 0.295, + 0.822, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.702, + 0.402, + 0.797, + 0.413 + ], + "angle": 0, + "content": "(d) 16 Res-Blocks" + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.425, + 0.776, + 0.441 + ], + "angle": 0, + "content": "Figure 12: The convergence process of FCNNs with different number of res-blocks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.825, + 0.555 + ], + "angle": 0, + "content": "Learning Rate. The results with different learning rates are shown in Fig. 13. When the learning rate gets smaller, layer convergence bias becomes weaker. This is because the gradient predictiveness w.r.t. parameters of all layers get close to 1 (see Fig. 3 (b,right) for the predictiveness with the learning rate of 0.01). In this case, a layer is less influenced by the updates of parameters in other layers, only the gradient predictiveness w.r.t. data matters for the convergence rate. In addition, smaller learning rates are beneficial for the deeper layers to converge because of their sharper minima." + }, + { + "type": "image", + "bbox": [ + 0.256, + 0.571, + 0.434, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.329, + 0.678, + 0.386, + 0.689 + ], + "angle": 0, + "content": "(a) \\(\\mathrm{LR} = 0.01\\)" + }, + { + "type": "image", + "bbox": [ + 0.441, + 0.571, + 0.588, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.483, + 0.678, + 0.542, + 0.689 + ], + "angle": 0, + "content": "(b) LR=0.03" + }, + { + "type": "image", + "bbox": [ + 0.596, + 0.571, + 0.743, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.678, + 0.69, + 0.689 + ], + "angle": 0, + "content": "(c) \\(\\mathrm{LR} = 0.1\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.247, + 0.7, + 0.75, + 0.716 + ], + "angle": 0, + "content": "Figure 13: The convergence process of FCNNs with different learning rates." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Weight Decay. The experiments with FCNNs in previous sections are conducted without weight decay. It is interesting to investigate the sensitivity of the layer-wise model convergence with different weight decay strengths. The results are shown in Fig. 14. We can see that when the weight decay becomes stronger, the residual blocks converge slower in a more and more similar convergence rate. We conjecture the reason is that weight decay dominates the total loss when its coefficient is large. In this way, the layer parameters with similar initialization scales tend to converge in similar speed toward zero. Because the residual blocks have identical architectures, they share the same initial parameter distribution, and converge in the same speed when weight decay is strong." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Optimizer. In Section 4, we have discussed the mechanism behind layer convergence bias. The flatter/sharper minimizers of different layers make SGD learn at different speeds. This is because SGD is more good at finding flatter minimizers (Pan et al., 2020). In Fig. 15, we compare SGD with three adaptive optimizers: Adagrad, RMSprop, and Adam. It is evident that with adaptive optimizers, layer convergence bias does not hold anymore. We conjecture the reason behind this" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.101, + 0.357, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.207, + 0.317, + 0.22 + ], + "angle": 0, + "content": "(a) \\(\\mathrm{WD} = 1\\mathrm{e} - 7\\)" + }, + { + "type": "image", + "bbox": [ + 0.357, + 0.102, + 0.51, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.207, + 0.473, + 0.22 + ], + "angle": 0, + "content": "(b) \\(\\mathrm{WD} = 1\\mathrm{e} - 6\\)" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.102, + 0.666, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.557, + 0.207, + 0.627, + 0.22 + ], + "angle": 0, + "content": "(c) \\(\\mathrm{WD} = 1\\mathrm{e} - 5\\)" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.102, + 0.822, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.713, + 0.207, + 0.783, + 0.22 + ], + "angle": 0, + "content": "(d) \\(\\mathrm{WD} = 1\\mathrm{e} - 4\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.229, + 0.78, + 0.246 + ], + "angle": 0, + "content": "Figure 14: The convergence process of FCNNs with different weight decay strengths." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.258, + 0.356, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.256, + 0.366, + 0.303, + 0.378 + ], + "angle": 0, + "content": "(a) SGD" + }, + { + "type": "image", + "bbox": [ + 0.357, + 0.259, + 0.511, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.366, + 0.468, + 0.379 + ], + "angle": 0, + "content": "(b) Adagrad" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.259, + 0.666, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.555, + 0.366, + 0.628, + 0.379 + ], + "angle": 0, + "content": "(c) RMSprop" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.259, + 0.822, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.722, + 0.366, + 0.774, + 0.378 + ], + "angle": 0, + "content": "(d) Adam" + }, + { + "type": "image_caption", + "bbox": [ + 0.257, + 0.39, + 0.741, + 0.406 + ], + "angle": 0, + "content": "Figure 15: The convergence process of FCNNs with different optimizers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.825, + 0.462 + ], + "angle": 0, + "content": "is that the adaptive optimizers heuristically assign different learning rates for different parameters, making their optimization hardly predictable." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.467, + 0.827, + 0.662 + ], + "angle": 0, + "content": "Normalization Methods. Like residual connection, batch normalization Ioffe & Szegedy (2015) is also a common design in modern DNN architectures. As discussed in previous literature, normalization in the neural networks helps to make the layer inputs more stable and make the loss landscapes smoother, thus accelerates the model training Santurkar et al. (2018). In Section 3 and Section 4, we mainly use the FCNNs without normalization to verify and explore the layer convergence bias. Here we investigate how the normalization methods (i.e., batch normalization, layer normalization Ba et al. (2016), and group normalization Wu & He (2018)) help the convergence, and whether the shallower layers still converge faster in these cases. As shown in Fig. 16, all layers converge faster when adding batch normalization to them. Particularly, \"Res-Block 1\" accelerates the most and reach a similar convergence rate as \"Layer 1\". The layer convergence bias also holds for batch normalization. For layer normalization and group normalization, the models show a significantly faster convergence rate than the model using batch normalization. All layers show effective convergence at an early stage of training (i.e., the first five epochs). In these two cases, different layers have similar convergence rates, thus no evident layer convergence bias emerges." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.675, + 0.357, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.78, + 0.342, + 0.79 + ], + "angle": 0, + "content": "(a) Without normalization" + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.675, + 0.512, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.391, + 0.78, + 0.495, + 0.79 + ], + "angle": 0, + "content": "(b) Batch normalization" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.675, + 0.666, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.78, + 0.646, + 0.79 + ], + "angle": 0, + "content": "(c) Layer normalization" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.675, + 0.823, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.697, + 0.78, + 0.802, + 0.79 + ], + "angle": 0, + "content": "(d) Group normalization" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.803, + 0.825, + 0.832 + ], + "angle": 0, + "content": "Figure 16: The convergence process of FCNNs with different normalization methods. When using group normalization, we set the group number to 8." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.45, + 0.119 + ], + "angle": 0, + "content": "A.4 RESULTS ON HARDER DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.132, + 0.828, + 0.231 + ], + "angle": 0, + "content": "For verifying the layer convergence bias on more datasets, we show more convergence results on four harder image classification datasets (see Fig. 17). Most of the classes in these datasets only have \\(< 100\\) samples, making them harder to learn. Note that the experiments are conducted with the learning rate of 0.01 (learning rate of 0.1 failed in some cases because these datasets have too many classes but not sufficient samples, leading to non-decreasing loss), some deeper layers have quite similar convergence rates because of the small learning rate. But roughly speaking, layer convergence bias still holds for these datasets." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.248, + 0.357, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.364, + 0.248, + 0.511, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.248, + 0.666, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.674, + 0.248, + 0.822, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.475, + 0.365, + 0.553, + 0.377 + ], + "angle": 0, + "content": "(a) ResNet-50" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.385, + 0.358, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.386, + 0.512, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.386, + 0.665, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.386, + 0.822, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.482, + 0.501, + 0.551, + 0.514 + ], + "angle": 0, + "content": "(b) VGG-19" + }, + { + "type": "image_caption", + "bbox": [ + 0.233, + 0.525, + 0.764, + 0.541 + ], + "angle": 0, + "content": "Figure 17: The convergence process of CNNs on four image classification tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.575, + 0.51, + 0.59 + ], + "angle": 0, + "content": "A.5 REPEATABILITY OF THE VISUALIZATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.603, + 0.827, + 0.66 + ], + "angle": 0, + "content": "Do different ImageNet trained models produce dramatically different loss landscapes? We plot the loss landscapes of different models with different random seeds in Fig. 18, 19. Quite similar patterns of the landscapes for different layers can be observed on both ResNet and VGG with different random seeds." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.676, + 0.332, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.765, + 0.296, + 0.776 + ], + "angle": 0, + "content": "(a) Stage 1 Seed 1" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.677, + 0.495, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.765, + 0.459, + 0.775 + ], + "angle": 0, + "content": "(b) Stage 2 Seed 1" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.677, + 0.659, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.765, + 0.624, + 0.776 + ], + "angle": 0, + "content": "(c) Stage 3 Seed 1" + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.677, + 0.82, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.765, + 0.784, + 0.775 + ], + "angle": 0, + "content": "(c) Stage 4 Seed 1" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.782, + 0.332, + 0.87 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.872, + 0.296, + 0.883 + ], + "angle": 0, + "content": "(d) Stage 1 Seed 2" + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.782, + 0.495, + 0.87 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.872, + 0.459, + 0.882 + ], + "angle": 0, + "content": "(e) Stage 2 Seed 2" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.782, + 0.658, + 0.87 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.871, + 0.623, + 0.882 + ], + "angle": 0, + "content": "(f) Stage 3 Seed 2" + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.782, + 0.82, + 0.87 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.871, + 0.785, + 0.882 + ], + "angle": 0, + "content": "(g) Stage 4 Seed 2" + }, + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.894, + 0.712, + 0.91 + ], + "angle": 0, + "content": "Figure 18: The loss landscapes of different layers of ResNet-50." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.103, + 0.334, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.193, + 0.298, + 0.204 + ], + "angle": 0, + "content": "(a) Layer 1 Seed 1" + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.103, + 0.498, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.381, + 0.193, + 0.466, + 0.204 + ], + "angle": 0, + "content": "(b) Layer 5 Seed 1" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.103, + 0.659, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.542, + 0.193, + 0.625, + 0.204 + ], + "angle": 0, + "content": "(c) Stage 9 Seed 1" + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.103, + 0.822, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.193, + 0.795, + 0.204 + ], + "angle": 0, + "content": "(d) Stage 13 Seed 1" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.213, + 0.334, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.302, + 0.299, + 0.313 + ], + "angle": 0, + "content": "(e) Layer 1 Seed 2" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.213, + 0.498, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.381, + 0.302, + 0.465, + 0.313 + ], + "angle": 0, + "content": "(f) Layer 5 Seed 2" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.213, + 0.659, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.542, + 0.302, + 0.626, + 0.313 + ], + "angle": 0, + "content": "(g) Stage 9 Seed 2" + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.213, + 0.822, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.302, + 0.795, + 0.313 + ], + "angle": 0, + "content": "(h) Stage 13 Seed 2" + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.325, + 0.701, + 0.34 + ], + "angle": 0, + "content": "Figure 19: The loss landscapes of different layers of VGG-19." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.373, + 0.687, + 0.387 + ], + "angle": 0, + "content": "A.6 MODELS OBEYING LAYER CONVERGENCE BIAS PERFORM BETTER" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.402, + 0.825, + 0.472 + ], + "angle": 0, + "content": "In Section 4 and Section 5, it is discussed that layer convergence bias indicates that the shallower layers are learning low-level features (or low-frequency components of the target function). It is reasonable learning low-level features first have greater potential to reach good model performance, since the model can establish its high-level features based on relatively stable low-level feature spaces." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.478, + 0.825, + 0.618 + ], + "angle": 0, + "content": "To examine whether the fast establishment of low-level features benefits model performance, we train four different FCNN models with the same amount of parameters, but different architectures, to fit the Sine target with four components. This experiment is based on a finding that a residual block with more layers in it tends to converge more slowly. We construct four FCNN models, each of them has four residual blocks (maybe in different sizes). The convergence processes are shown in Fig. 20. We can see that the blocks with the largest complexity always converge the most slowly. As the block with depth \\(= 4\\) being placed shallower in the FCNN, the regression MSE loss goes higher. In other words, if a shallower layer converges slowly, the model gets poorer performance. This may due to the vulnerability of deeper layers. If they converge based on changing shallower layers, it is hard for them to learn good features based on their unstable inputs." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.637, + 0.357, + 0.738 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.743, + 0.347, + 0.764 + ], + "angle": 0, + "content": "(a) Res-Blocks \\(= (4,1,1,1)\\) Val loss 2.7e-4" + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.637, + 0.512, + 0.738 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.743, + 0.502, + 0.764 + ], + "angle": 0, + "content": "(b) Res-Blocks \\(= (1,4,1,1)\\) Val loss 2.4e-4" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.637, + 0.667, + 0.738 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.743, + 0.656, + 0.764 + ], + "angle": 0, + "content": "(c) Res-Blocks \\(= (1,1,4,1)\\) Val loss 1.8e-4" + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.637, + 0.822, + 0.738 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.687, + 0.743, + 0.812, + 0.764 + ], + "angle": 0, + "content": "(d) Res-Blocks \\(= (1,1,1,4)\\) Val loss 1.4e-4" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.777, + 0.825, + 0.834 + ], + "angle": 0, + "content": "Figure 20: The convergence process of FCNNs with different residual block sizes and their validation performance on the regression task. Each model has a four-layer residual block and three one-layer residual blocks (e.g., \"Res-Blocks=(4,1,1,1)\" means the first residual block has four layers, and the rest three blocks have only one layer)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "The results can also be understood from another perspective. If the deeper block contains more parameters (with more fully connected layers in it), it would be helpful for this block to learn the corresponding high-frequency components of the target function. Therefore, the model can reach better performance. A similar observation is obtained in Section 6.1: when putting wider layers of the ViT deeper, the model can reach higher performance." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.627, + 0.119 + ], + "angle": 0, + "content": "A.7 LAYER CONVERGENCE BIAS FOR VISION TRANSFORMERS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.201 + ], + "angle": 0, + "content": "As discussed in Section 6, ViT can benefit from distributing more parameters in the deeper layers. This result comes from one of our main findings about layer convergence bias: the deeper layers tend to learn high-frequency components of the target function, thus converge more slowly. So adding more parameters for the deeper layers is beneficial for these layers to learn the high-frequency components which are usually harder." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.207, + 0.827, + 0.388 + ], + "angle": 0, + "content": "When making this claim, we do not verify the layer convergence bias for the ViT. The main difficulty for verifying layer convergence bias for ViTs is brought by its typical training scheme. ViT needs adaptive optimizers to train, otherwise it converges very slowly. However, adaptive optimizers change the learning rates of different parameters according to their optimization procedures. This leads to unfair convergence comparison between layers, thus affects the layer convergence bias, as shown in Fig 15. Therefore, we try both SGD and Adam optimizers for training ViTs on ImageNet, and see whether layer convergence bias holds in some cases. As shown in Fig. 21 (a), the ViT shows a roughly trend of layer convergence bias when optimizing with Adam, where the deepest \"Encoder Block 12\" converges the slowest. However, some other layers do not strictly obey layer convergence bias (e.g., the shallowest \"Patch Embedding\" does not learn fastest among all blocks). When optimizing with SGD, the ViT shows a good layer convergence bias. The results indicate that ViTs approximately share the same rules as FCNNs and CNNs, thus supports the discussions in Section 6." + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.4, + 0.51, + 0.524 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.53, + 0.444, + 0.542 + ], + "angle": 0, + "content": "(a) ViT Adam" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.4, + 0.727, + 0.524 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.53, + 0.658, + 0.542 + ], + "angle": 0, + "content": "(b) ViT SGD" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.554, + 0.825, + 0.597 + ], + "angle": 0, + "content": "Figure 21: The convergence curves of ViTs on ImageNet with different optimizers. With Adam optimizer, the ViT does not obey the layer convergence bias strictly. While SGD can ensure relatively ideal faster convergence processes of shallower layers." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.625, + 0.559, + 0.639 + ], + "angle": 0, + "content": "A.8 CONNECTION TO LARS OPTIMIZATION SCHEME" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.65, + 0.825, + 0.708 + ], + "angle": 0, + "content": "One of the most important factors that affect the optimization procedure is the learning rate. In this work, it is shown that the shallower layers can learn effectively with large learning rates, but the deeper layers only learn fast after learning rate decays. Is there any connection between layers and its suitable learning rate?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.713, + 0.827, + 0.882 + ], + "angle": 0, + "content": "LARS optimizer You et al. (2017) made a significant contribution to training DNNs with huge batch sizes and large learning rates. The key observation in the literature is that the weight-to-gradient ratio highly varies in different layers. If a layer has greater gradients and relatively smaller weights, it would be hard for it to converge due to the vigorous parameter update. So LARS considers the scale of the weights and its gradient norms in each layer and assigns a local learning rate for a layer to make it converge effectively and stably. For FCNNs in our work, its different hidden layers are initialized with the same scale due to their identical architecture, but the deeper layers usually have larger gradients. As a result, the larger gradients may make these layers struggle to converge. Similarly, the CNNs (i.e., ResNet-50 and VGG-19) have wider deeper layers. These layers have smaller initial parameters, so their gradients may lead to drastic weight variations if the learning rate is too large. In this way, we can understand why they cannot get close to their optimal points effectively at the early stage of training. It explains layer convergence bias from another perspective." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ] +] \ No newline at end of file diff --git a/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_origin.pdf b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..facb72a953bdf35a0fd72018a5ca2d6022643e0c --- /dev/null +++ b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/38a6c3d5-1c42-41e7-83e7-973b9e617235_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd1c43d21745352e4f46ce87415147710a0ac157f73106cfc34970099d8630d9 +size 3622317 diff --git a/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/full.md b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b5f54a61a2814465a4164b5b5c50a7f5c5668eb7 --- /dev/null +++ b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/full.md @@ -0,0 +1,556 @@ +# WHICH LAYER IS LEARNING FASTER? A SYSTEMATIC EXPLORATION OF LAYER-WISE CONVERGENCE RATE FOR DEEP NEURAL NETWORKS + +Yixiong Chen $^{1}$ Alan Yuille $^{2}$ Zongwei Zhou $^{2}$ + +1The Chinese University of Hong Kong - Shenzhen 2Johns Hopkins University + +yixiongchen@link.cuhk.edu.cn ayuille1@jhu.edu zzhou82@jh.edu + +# ABSTRACT + +The deeply hierarchical structures enable deep neural networks (DNNs) to fit extremely complex target functions. However, the complex interaction between layers also makes the learning process of a particular layer poorly understood. This work demonstrates that the shallower layers of DNNs tend to converge faster than the deeper layers. We call this phenomenon Layer Convergence Bias. We also uncover the fundamental reason behind this phenomenon: Flatter local minima of shallower layers make their gradients more stable and predictive, allowing for faster training. Another surprising result is that the shallower layers tend to learn the low-frequency components of the target function, while the deeper layers usually learn the high-frequency components. It is consistent with the recent discovery that DNNs learn lower frequency objects faster. + +# 1 INTRODUCTION + +Over the last decade, breakthrough progress has been made by deep neural networks (DNNs) on a wide range of complicated tasks in computer vision (Krizhevsky et al., 2017), natural language processing (Sutskever et al., 2014), speech recognition (Graves et al., 2013), game playing (Silver et al., 2016), and biomedical prediction (Jumper et al., 2021). Such progress hinged on a number of advances in hardware technology, dataset construction, and model architectural designs. Among them, the invention and application of very-deep network architectures play a decisive role. + +Deepening the network is an effective way to empower its fitting ability. Extensive studies (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Lu et al., 2017) compared the power between deeper and wider neural networks and showed that the polynomial growth of depth has a similar effect to the exponential growth of width. Therefore, modern DNNs (Simonyan & Zisserman, 2014; He et al., 2016) usually contain tens of layers to ensure their modeling abilities for real-world applications. + +Although the practical success of deep architectures is indisputable, they make the learning hardly predictable since complex interaction happens between layers when co-adapting to the target (Yosinski et al., 2014). By now, we still have a poor understanding of how different layers learn differently. Currently, a widely accepted view relates to the vanishing gradient problem Hochreiter (1991); Hochreiter et al. (2001). The gradients are getting weaker and weaker as they move back through the hidden layers, making the shallower layers converge more slowly (Nielsen, 2015). Informally, it is reasonable that larger gradient values bring higher learning speed. + +Even though this view somewhat makes sense, we seem to have little concrete evidence supporting it. In particular, it is dubious how higher-level features can be built based on the unstable features extracted by the unconverged shallower layers (Raghu et al., 2017). This paper aims to find a credible answer for the parameters of which layer are learning faster towards the convergence point (defined as the convergence rate in this work) with a systematic exploration. Our results lead to somewhat startling discoveries. + +Our Contributions. Our point of start is illustrating that there does not seem to be a reliable positive correlation between the gradient magnitude and the convergence rate of a particular layer. + +Instead, we find that shallower layers tend to converge faster than the deeper ones, even with smaller gradients. The phenomenon is called layer convergence bias in this paper. + +We then turn our attention to excavating the underlying mechanism for the faster convergence of shallower layers. Specifically, we find out that the depth of a layer has a fundamental effect on its training: the parameters of shallower layers are usually optimized on flatter landscapes than deeper layers. This finding reveals that the gradients of shallower layers may be more predictive and thus have the potential to allow the larger learning rates (LRs) to be performed, making the convergence faster. + +Finally, we find that the layer convergence bias is also tied to the frequency of the function they are modeling. When fitting a complex target function, the shallower layers tend to fit the low-frequency (usually simpler) components. On the contrary, the deeper layers struggle to fit the remaining high-frequency components. It is a consistent result of the recent discovery that DNNs prioritize learning low-frequency components of the modeling function, while having very low learning speed on high-frequency components that tend to be more complex (Rahaman et al., 2019). This finding provides us with another perspective to understand why deeper layers learn more slowly. + +We believe that understanding the roots of such a fundamental convergence bias can give us a better grasp of the complicated learning process of DNNs. In turn, it can motivate more in-depth algorithmic progress for the deep learning community. + +This paper is organized as follows. In Section 2, we introduce our method for measuring convergence speed for different layers, and formally define the layer convergence bias. In Section 3, we examine the relationship between gradient magnitude and convergence rate, and show that the shallower layers tend to converge faster even with smaller gradients. Then in Section 4, we analyze the mechanism behind the layer convergence bias in DNN training. The layer-frequency correspondence is demonstrated in Section 5. The practical significance of layer convergence bias is presented in Section 6. We further discuss the related work in Section 7 and conclude in Section 8. + +# 2 LAYER CONVERGENCE BIAS + +The deep architecture of DNNs is arguably one of the most important factors for their powerful fitting abilities. With the benefit brought by the deep structures, there are also extra complexities in the training process coming into being. So far, we do not have a firm conclusion about whether some layers are learning faster than others. + +For examining the convergence progress for a DNN, a common practice is checking its loss curve. However, this is not applicable for comparing the convergence between different layers. In this work, we define a measurement for layer-wise convergence in the following. + +Definition 2.1 (Layer-wise convergence rate) At the training time $t$ , let the deep neural network with $L$ layers $\{T_{l}^{(t)}\}_{l=1}^{L}$ be $f(\pmb{x}) = (T_{L}^{(t)} \circ T_{L-1}^{(t)} \circ \dots \circ T_{1}^{(t)})(\pmb{x}) : \mathbb{R}^{i} \to \mathbb{R}^{o}$ , where $i, o$ are the dimension of its inputs and outputs. We use $\theta_{l}^{(t)}$ to denote the parameters of the $l$ -th layer $T_{l}^{(t)}$ . Assuming that $\theta_{l}^{(t)}$ can finally converge to its optimal point $\theta_{l}^{*}$ when $t \to \infty$ , we define the convergence rate of $\theta_{l}$ during the time interval $[t_1, t_2]$ to be + +$$ +C _ {l} ^ {(t _ {1}, t _ {2})} = \frac {1}{(t _ {2} - t _ {1})} \cdot \frac {\| \theta_ {l} ^ {(t _ {1})} - \theta_ {l} ^ {*} \| _ {2} - \| \theta_ {l} ^ {(t _ {2})} - \theta_ {l} ^ {*} \| _ {2}}{\| \theta_ {l} ^ {(t _ {0})} - \theta_ {l} ^ {*} \| _ {2}}, +$$ + +where $t_0$ denotes the time point when the training starts. + +In this definition, the numerator $\| \theta_l^{(t_1)} - \theta_l^*\| _2 - \| \theta_l^{(t_2)} - \theta_l^*\| _2$ denotes how much the distance of the parameter $\theta_{l}$ to the optimal point is shortened in the period $[t_1,t_2]$ . The denominator $\| \theta_l^{(t_0)} - \theta_l^*\| _2$ represents the distance between the initial point to the convergence point, whose primary function is to normalize the speed, allowing the convergence of different layers to compare with each other. Thus, the convergence rate of $\theta_{l}$ can be understood as the ratio of normalized distance to time. Common optimization works (Yi et al., 1999; Nesterov, 2003) defined the rate of convergence for $\theta$ as $\lim_{k\to \infty}\frac{\|\theta^{(k + 1)} - \theta^*\|_2}{\|\theta^{(k)} - \theta^*\|_2}$ . It focuses on measuring an exponential level convergence when the + +optimization step goes to infinity. Since the difference in convergence rates between layers usually appears at an early stage of training, and it is not large enough to compare at an exponential level, we define our new convergence metric to present the convergence difference in a clearer way. + +Observation 2.1 (Layer convergence bias). For $l_1 < l_2$ , $\exists \tilde{t} > 0$ , such that $C_{l_1}^{(t_1, t_2)} > C_{l_2}^{(t_1, t_2)}$ when $t_1 < t_2 < \tilde{t}$ . + +Layer convergence bias indicates that at an early training phase $t < \tilde{t}$ , the parameters $\theta_{l_1}$ of a shallower layer $l_1$ tend to move to $\theta_{l_1}^*$ faster than a deeper layer $\theta_{l_2}$ moving to $\theta_{l_2}^*$ . In the following, we use both synthetic and real datasets to show that the layer convergence bias appears for both fully-connected neural networks (FCNNs) and convolutional neural networks (CNNs). + +# 3 VERIFICATION OF LAYER CONVERGENCE BIAS + +In this section, we try to substantiate the central claim of this work. First, we use the FCNNs to show that the shallower layers tend to converge faster than the deeper layers on the regression task, even when the gradient values for shallower layers are smaller. We then use CNNs with modern architectures to verify that layer convergence bias is a common phenomenon in practical applications. All experimental settings in this work can be found in Appendix A.1. + +# 3.1 LAYER CONVERGENCE BIAS IN FULLY-CONNECTED NETWORKS + +For FCNNs, we construct a simple regression task to demonstrate that layers with smaller gradients do not necessarily learn more slowly than layers with larger gradients. The fitting target is $f(x) = \sin (x) + \frac{1}{3}\sin (3x) + \frac{1}{10}\sin (10x) + \frac{1}{30}\sin (30x)$ , with mean square error loss for training. + +First, we use the FCNN [1-32-32-32-1] with the Sigmoid activations as a simple example. In the following analysis, the first fully-connected layer (1-32) is named Layer 1, and the subsequent two layers (32-32) are called Hidden layer 1, Hidden layer 2 respectively. The gradient values and the convergence processes for these layers are shown in Fig. 1 (a). Two observations can be obtained from the plots: 1) The gradient of Hidden layer 1 is nearly always smaller than the gradient of Hidden layer 2. 2) Although shallower layers have smaller gradients, they seem to converge faster. For the first 50 epochs, the shallower layers are moving faster to their convergence point (e.g., $C_{Layer_1}^{(t_0,t_{50})} \approx 0.012$ , $C_{Hidden\_layer_1}^{(t_0,t_{50})} \approx 0.009$ , $C_{Hidden\_layer_2}^{(t_0,t_{50})} \approx 0.006$ ), which is inconsistent with the previous view that higher gradients lead to faster learning (Nielsen, 2015). + +To further validate the above results with a deeper network, we adopt residual connections (He et al., 2016) for the FCNN (deep network fails to be trained in this task without residual connections) and use the ReLU activation function. The FCNN [1-(128-128)-(128-128)-(128-128)-(128-128)-1] with four residual blocks of width 128 shows similar results to the shallow FCNN without residual connection (see Fig. 1 (b)). In this case, the difference in layer-wise convergence rate can be observed even earlier (i.e., $C_{Res - Block_1}^{(t_0,t_5)} \approx 2C_{Res - Block_4}^{(t_0,t_5)}$ ), which shows that the layer convergence bias also happens for deeper FCNNs with residual connections. It is noteworthy that our convergence metric is crucial to observe the layer convergence bias, which is elaborated in Appendix A.2. + +![](images/01daa463fcec291619bed042e058d7cd00a900134e3e6ef5025120b39f7e0c8d.jpg) +(a) FCNN without residual connection + +![](images/366ed45631c4991c2eb157aa0417ce6e0d19f38cd21676dff2458c94a8777658.jpg) +(b) FCNN with four residual blocks +Figure 1: Left (a,b): The absolute mean gradient values for different layers for FCNNs w/o residual connections in training. For both networks, deeper layers have larger gradients. Right (a,b): The convergence process of different layers for FCNNs. Shallower layers converge faster. + +![](images/38ce4d83602ce56082f5d9781a1105280b6566c07103a885666d054749025f08.jpg) +(a) ResNet-50 Val Acc $73.24\%$ + +![](images/a422b587ba3b45adb3388e705d0e42809d28d6d38418adbde044e773073a06b6.jpg) +(b) VGG-19 Val Acc $71.89\%$ +Figure 2: The convergence process of ResNet-50 and VGG-19 on ImageNet. During the first 50 epochs, shallower layers converge much faster than deeper layers. After the learning rate decays at the 50th epoch, parameters of deeper layers accelerate to move to their convergence points. + +Clearly, these results cannot reconcile with the previous view that larger gradients bring a higher learning speed for deeper layers, at least for the DNNs used in this work. Instead, from the optimization point of view, the parameters of shallower layers are learning faster to converge. + +# 3.2 LAYER CONVERGENCE BIAS IN CONVOLUTIONAL NETWORKS + +Real-world datasets are very different from the synthetic data used in our previous experiments. In order to utilize the layer convergence bias to understand and better improve DNNs in real applications, it is important to verify whether the layer convergence bias holds for CNNs on images. + +In the following experiments, we examine the layer-wise convergence process on ImageNet (Rusakovsky et al., 2015) dataset with both ResNet-50 (He et al., 2016) and VGG-19 (Simonyan & Zisserman, 2014). We train the CNNs for 120 epochs with learning rate decay at the 50th epoch $(0.1\rightarrow 0.01)$ and the 100th epoch $(0.01\to 0.001)$ . The training processes are shown in Fig. 2. + +For ResNet-50, we visualize the learning process of the first convolutional layer and its subsequent four stages. One can easily observe that at the beginning of training, the shallower layers converge much faster than the deeper layers ( $C_{Stage1}^{(t_0,t_{20})} \approx 3C_{Stage4}^{(t_0,t_{20})}$ ). However, after the learning rate decays at the 50th epoch, deeper layers begin to learn effectively and achieve a higher convergence rate than the shallower layers ( $C_{Stage1}^{(t_{50},t_{60})} \approx 0.5C_{Stage4}^{(t_{50},t_{60})}$ ). We conjecture that the initial learning rate is too large for the deeper layers to learn. + +For VGG-19, we visualize its 1st, 5th, 9th, 13th, and 17th layers. This network shows a more significant convergence difference between layers than ResNet-50. At the first training stage with the initial learning rate, $\| \theta_l^{(t_5)} - \theta_l^*\| >\| \theta_l^{(t_0)} - \theta_l^*\|$ for $l\in \{5,9,13,17\}$ , which means that all layers but the first one even slightly diverge. Usually, the divergence appears when the learning rate is too large. This phenomenon confirms that the deeper layers cannot effectively learn with the large learning rate at the beginning. + +The experiments of FCNNs and CNNs verify that layer convergence bias is a common phenomenon for DNNs. In Section 5 and Appendix A.3, A.4, we discuss the factors that would affect the phenomenon, and some in-depth findings they reveal. + +# 4 MECHANISM BEHIND LAYER CONVERGENCE BIAS + +So far, our investigation shows that the seemingly-right perspective for linking the layer-wise gradient and convergence rate is tenuous, at best. Both FCNNs and CNNs demonstrate an evident bias that shallower layers learn faster. Can we explain why this is the case? + +Gradient Predictiveness. Since gradient values cannot determine the convergence rate, we wonder if the directions of the gradients play a more critical role. More chaotic update directions make convergence slower. Here we examine the gradient predictiveness (Santurkar et al., 2018) of different layers. If the gradient behavior is "predictive", less change in the gradient directions would appear when 1) the gradients are calculated with different batches of data; 2) the parameters of other layers update. Predictiveness can also be simply understood as the stability of gradient direction. + +![](images/d8a1116278bfd7475a7dd383c92f1ae3120ea0e0aab2194a5560d1e0174023af.jpg) +(a) Gradient predictiveness w.r.t. data + +![](images/f49f932ecb10171b0587245b6b5f4bc458ab696742d61826cebd388d3706a4ac.jpg) +(b) Gradient predictiveness w.r.t. parameters +Figure 3: The gradient predictiveness of shallower and deeper layers of FCNN. The learning rate decreases from 0.1 to 0.01 at Epoch 150. + +Definition 4.1 Let $(x^{(t)},y^{(t)})$ be a batch of input-label pairs for the DNN to train at time $t$ , and $(x^{\prime (t)},y^{\prime (t)})$ be another batch of data. We define the gradient predictiveness of the $l$ th layer at time $t$ w.r.t. data as the cosine similarity $\begin{array}{r}\operatorname {sim}(G_{l,t},G_{l,t}^{\prime}) = \frac{\|G_{l,t}G_{l,t}^{\prime}\|}{\|G_{l,t}\|\|G_{l,t}^{\prime}\|}\in [-1,1] \end{array}$ . Likewise, the gradient predictiveness w.r.t. parameters is defined as $\operatorname {sim}(G_{l,t},G_{l,t}^{\prime \prime})$ , where + +$$ +\begin{array}{l} G _ {l, t} = \nabla_ {\theta_ {l} ^ {(t)}} L \left(\theta_ {1} ^ {(t)}, \dots , \theta_ {L} ^ {(t)}; x ^ {(t)}, y ^ {(t)}\right) \\ G _ {l, t} ^ {\prime} = \nabla_ {\theta_ {l} ^ {(t)}} L (\theta_ {1} ^ {(t)}, \dots , \theta_ {L} ^ {(t)}; x ^ {\prime (t)}, y ^ {\prime (t)}) \\ G _ {l, t} ^ {\prime \prime} = \nabla_ {\theta_ {l} ^ {(t)}} L (\theta_ {1} ^ {(t + 1)}, \dots , \theta_ {l - 1} ^ {(t + 1)}, \theta_ {l} ^ {(t)}, \theta_ {l + 1} ^ {(t + 1)}, \dots , \theta_ {L} ^ {(t + 1)}; x ^ {(t)}, y ^ {(t)}) \\ \end{array} +$$ + +Here, $G_{l,t}$ corresponds to the gradient of $\theta_l^{(t)}$ . $G_{l,t}^{\prime}$ is the gradient of this layer with another batch of data, while $G_{l,t}^{\prime \prime}$ means the gradient after all the other layers have updated to new values. Therefore, $sim(G_{l,t},G_{l,t}^{\prime})$ indicates the stability of gradients with different data batches. $sim(G_{l,t},G_{l,t}^{\prime \prime})$ reflects whether the currently estimated gradient is in a consistent decreasing direction when the loss landscape is affected by the updating of other layers' parameters. The gradient predictiveness during training is shown in Fig. 3, where Res-Block 1 has more predictive gradients than Res-Block 4. + +Visualizing the Loss Landscapes. We are curious about why gradients for deeper layers have poorer predictiveness. A hypothesis is that the loss landscapes for deeper layers are more rugged, making the parameters fluctuate more. A straightforward method to validate this hypothesis is plotting the loss landscapes for the parameters. To do this for a particular layer $l$ , one can choose a central point $\theta_{l}^{*}$ and two direction vectors $d_{l,1}$ , $d_{l,2}$ . Then the loss landscape can be drawn with + +$$ +f \left(\beta_ {1}, \beta_ {2}\right) = L \left(\theta_ {l} ^ {*} + \beta_ {1} d _ {l, 1} + \beta_ {2} d _ {l, 2}\right) +$$ + +in the 3D space with $\beta_{1},\beta_{2}$ forming a simplified parameter space. In this work, we generate random Gaussian directions for different layers, and normalize them to obtain the same norm of the corresponding layer. Specifically, we make the replacement $d_{l}\gets \frac{d_{l}}{\|d_{l}\|}\| \theta_{l}^{*}\|$ for a fully connected layer. For a convolutional layer, we use filter-wise normalization $d_l^k\gets \frac{d_l^k}{\|d_l^k\|}\| \theta_l^{k*}\|$ as in (Li et al., 2018), where $d_l^k$ represents the $k$ th filter of the $l$ th layer. We set both $\beta_{1}$ and $\beta_{2}$ in the domain of $[-1,1]$ . + +**Landscapes for FCNN.** The loss landscapes for four residual blocks of the FCNN are shown in Fig. 4. For the shallower blocks, the surfaces are flatter near the minimizer, meaning that the gradient magnitudes may be small. However, small gradients do not necessarily lead to slow learning speed in this case. Combined with the gradient predictiveness discussed above, a flatter loss landscape may lead to more consistent gradient directions, making the learning more smooth. + +Landscapes for CNNs. The loss landscapes for ResNet-50 and VGG-19 on ImageNet are shown in Fig. 5. It is interesting that deep convolutional networks with/without residual connections present totally different loss landscapes. For ResNet-50, its landscapes near the convergence point $\theta_l^*$ are smooth and nearly convex, making the neural network easier to train. On the contrary, VGG-19 has much more shattered landscapes, the initial iterations probably lie in the chaotic regions, prohibiting its training (Balduzzi et al., 2017). This may explain the much less efficient convergence towards the optimal point for VGG than ResNet at the initial phase (Fig. 2). + +![](images/dcf9e20c6e81e7c73e4b19b4206b4afdb783893a1e47d077efe5cca2fb4b1872.jpg) +(a) Res-Block 1 + +![](images/ff6a098118a0a6866965702a9e436500e7a6d9c8c802352f9ca8ab635e08db12.jpg) +(b) Res-Block 2 + +![](images/1787402fa4768622d7529fd2645f10d7054d15e5be80417749af249eb960c844.jpg) +(c) Res-Block 3 + +![](images/465dae870d469c4cd45fee3ef006b75e4bd48370444c96442e2dc92599785c90.jpg) +(d) Res-Block 4 + +![](images/eec5cf65633ddf56eafd0ca7308921055aeac035f68d35a2c9b618030e483e68.jpg) +Figure 4: The loss landscapes of different layers of FCNN. Deeper layers are optimized on more rugged landscapes, slowing down the learning process. +(a) ResNet-50 Res-Block 1 +Figure 5: The loss landscapes of different layers of ResNet-50 (a,b) and VGG-19 (c,d) on ImageNet. The shallower layers for both networks have flatter minima, making them converge faster than the deeper layers. The plots for all layers can be found in Appendix A.5. + +![](images/279c1246dcbe7bca3a88308d661c3f202523a38c709bd62867e8e958c5143087.jpg) +(b) ResNet-50 Res-Block 4 + +![](images/3ae53b8f727a9a11ba44cd6c77636a06eed18e34892842dc485e7307f69fe3c3.jpg) +(c) VGG-19 Layer 1 + +![](images/4d6c447583c0f747b2ca42a90a6cd78058ba671f16c8032956bced0511abb923.jpg) +(d) VGG-19 Layer 13 + +Comparing different layers in the CNNs, the answer for layer convergence bias becomes clearer. The key difference between different layers' loss landscapes of ResNet-50 is the sharpness of the local minima (Fig. 5 (a,b)). We conjecture it is because of a well-known fact that the shallower layers of CNNs tend to learn general features which are applicable to various datasets and tasks, while the deeper layers usually learn task-specific features (Yosinski et al., 2014). Before our work, (Zeiler & Fergus, 2014) also revealed that the general features in a five-layer CNN stabilized faster than the specific features. Since the general features are more evenly distributed, they usually cause less fluctuation for training, leading to flatter optima. Theoretically, flatter minimizers are easier to be found by SGD optimizers (Pan et al., 2020). For VGG-19, its shallower and deeper layers also have flatter and sharper minima (Fig. 5 (c,d)), respectively. The shattered loss landscape for its deeper layers may also explain its inefficient learning process with a large learning rate (Fig. 2 (b)). + +Here we summarize the mechanism behind layer convergence bias: the parameters of shallower layers are easier to optimize due to their flatter loss landscapes. At a higher level, shallower layers learn general features, which are usually easier. + +# 5 DEeper LAYERS FIT THE HIGH-FREQUENCY COMPONENTS + +Recent advances in the learning process of DNNs (Rahaman et al., 2019; Ronen et al., 2019; Xu & Zhou, 2021) revealed that the low-frequency components of the target function are fitted much faster than the high-frequency components. There is a natural question about whether there is some inherent link between layer convergence bias and this result. In this section, we investigate the answer, and surprisingly find that: the low-frequency parts are usually fitted by the shallower layers, while the remaining higher frequencies are mainly learned by the deeper layers. It provides us with an alternative perspective to understand the layer convergence bias. + +The Correspondence for FCNN. With the residual structures, we can straightforwardly visualize what each block of a FCNN learns. Considering the FCNN with one input layer $z_0 = T_0(x) : \mathbb{R}^1 \to \mathbb{R}^{128}$ , four residual blocks $z_l = T_l'(z_{l-1}) = T_l(z_{l-1}) + z_{l-1} : \mathbb{R}^{128} \to \mathbb{R}^{128}$ , $l \in \{1, 2, 3, 4\}$ , and an output layer $y = T_5(z_4) : \mathbb{R}^{128} \to \mathbb{R}^1$ . The whole network can be expressed as + +$$ +y = T _ {5} \left(z _ {1} + T _ {2} \left(z _ {1}\right) + T _ {3} \left(z _ {2}\right) + T _ {4} \left(z _ {3}\right)\right) = T _ {5} \left(z _ {1}\right) + T _ {5} \left(T _ {2} \left(z _ {1}\right)\right) + T _ {5} \left(T _ {3} \left(z _ {2}\right)\right) + T _ {5} \left(T _ {4} \left(z _ {3}\right)\right) +$$ + +if the output layer $T_{5}$ is a linear transformation. The fitting results for each layer are shown in Fig. 6. It can be seen that the deeper layers tend to fit the more complex components of the target function $y = \sin (x) + \frac{1}{3}\sin (3x) + \frac{1}{10}\sin (10x) + \frac{1}{30}\sin (30x)$ . Besides the curvature, the fitted functions + +![](images/bb78dfff1b269ef8e0ea2ec041cad774391c664cadbe592299e2d6fde9fb54c6.jpg) +y + +![](images/30d9145078e0f083998f1660437ed0ce09d7cb65f2eb734b7994195063078aa4.jpg) +$T_{5}(z_{1})$ +Figure 6: The visualization of what each residual block of the FCNN learns. From the first to the fourth block, the fitted function becomes more complex with smaller amplitude. + +![](images/d6ef0e059f3f0dc1645ae9f7607ec365446cff1498dff72ba12042b3787d4a0d.jpg) +$T_{5}(T_{2}(z_{1}))$ + +![](images/a30a74cbbe2923ef6bbaa39191a64525217c3f262158cad296ab2fe71c630c51.jpg) +$T_{5}(T_{3}(z_{2}))$ + +![](images/25db2ae0c1966056c496cf9e6d63337ebe2588d47d0006f42ed1f6b9915cde76.jpg) +$T_{5}(T_{4}(z_{3}))$ + +![](images/a1a9026ccad7593f40929b85467635e4d9c29f8acc3f9ec6c9a14cdb9e90f51b.jpg) +(a) ResNet-50 +Figure 7: The visualization of response frequencies for CNNs. As the training goes on, deeper layers become more sensitive to perturbations, indicating that they have higher response frequencies. + +![](images/79e1ba213d462d7a60bd8a5e3e789a4ff15bf0cfd18dcfb9ce196cc6f8b6035b.jpg) +(b) VGG-19 + +are also consistent with the amplitudes of the components. Specifically, the ranges of the four fitted functions are 2.3, 0.7, 0.5, and 0.06, which are similar to the four components. This result further confirms the relationship between layers and frequencies. + +The Correspondence for CNNs. For CNNs, we verify their layer-frequency correspondence through the response frequency (Xu et al., 2019). In a nutshell, if an input-output mapping $f$ possesses significant high frequencies, then a small change in its input induces a large change in the output. We generate standard Gaussian-distributed input $x$ for different residual blocks of ResNet-50 and different layers of VGG-19. At the same time, small Gaussian perturbation $\Delta x$ is added to the input. A larger change $\Delta y$ of the layer output means the layer handles higher frequencies. The response frequencies are shown in Fig. 7. At the first 5 epochs of training on ImageNet, different layers for both ResNet-50 and VGG-19 do not show significantly different response frequencies. But after about ten epochs, the response frequencies for deeper layers (e.g., stage 4 for ResNet-50, layer 13 for VGG-19) increase while the shallower layers show lower response frequencies. Therefore, we conclude that the layer-frequency correspondence also holds for CNNs. In addition, it is not an innate nature of the layers, but a result of the training process. + +How the target frequency affects layer convergence bias? To demonstrate the effect of layer-frequency correspondence on the layer convergence bias, we try fitting simpler targets with less high-frequency components, and see what would happen to the layer-wise convergence rate of FCNN. In Fig. 8 (a-d), we only keep several lowest frequencies of the target, e.g., the target function $y = \sin(x)$ is named "Complexity=1", and $y = \sin(x) + \frac{1}{3} \sin(3x)$ is named "Complexity=2", etc. After discarding more and more high-frequency components, the deeper layers converge faster and faster. In this case, the layer convergence bias does not strictly hold anymore. In Fig. 8 (b), the Res-Block 4 converges faster than Res-Block 3 after the 5th epoch. In Fig. 8 (c), the Res-Block 4 converges with a similar speed as Res-Block 2, while the Res-Block 3 even learns faster than Res-Block 2. It seems that removing the high-frequency component that corresponds to a deep layer can effectively accelerate its training. For CNNs, we also observe similar phenomena (Fig. 8 (e-h)). On simpler targets (e.g., CIFAR 10), the deeper layers converge faster than on more complex targets (e.g., CIFAR100). An implication of this result is that the data complexity may be too low for the model. In practice, CIFAR datasets only need ResNet-18 to fit well (Wu et al., 2020). + +In fact, (Rahaman et al., 2019) had shown that different layers have some links to different frequencies, but the authors did not provide further insight for this phenomenon. This work verifies the underlying relationship between layers and fitting frequencies, and establishes a connection for this relationship to the layer convergence bias. + +![](images/c2703188bafc4178271ae4735f09e3fbe016ef647c53c152323a69e9d1cd12f4.jpg) +(a) FCNN Complexity=4 + +![](images/d49a2ff4a53e499804eb29b593fb5e0e3e0f37fc6cfc4cc3d3fe253fb2182f8b.jpg) +(b) FCNN Complexity=3 + +![](images/5c16f8c07486995949efa3f7a87b00691ce5eee35754ebb1625df16072e9dfeb.jpg) +(c) FCNN Complexity=2 + +![](images/81e9cbf6884ab5717b67482b33b04616ceaebe7f6af14870434a4e64affab2a3.jpg) +(d) FCNN Complexity $= 1$ + +![](images/7ec69d55de3b60c5c24832ef1d9a12997306db37fae0246e09c0eaf129ea5c42.jpg) +(e) ResNet-50 CIFAR100 + +![](images/c6ee73b224215a5536d2c53e6ac7f83ef3a1e68516fdd3b1772b11f7f499bb1e.jpg) +(f) ResNet-50 CIFAR10 + +![](images/e5c47c8658bab4c2e4e07fda183dc40cb52d0a977494cb3c695f25af9d3165f1.jpg) +(g) VGG-19 CIFAR100 +Figure 8: The convergence curves with different learning target complexities. (a-d): Decreasing target complexities for FCNNs. The deeper layers accelerate more than the shallower ones when high-frequency components are removed. (e-h): For CNNs, the deepest layers (i.e., Stage 4 / Layer 17) learn faster on CIFAR10 than on CIFAR100 while the other layers do not change much. + +![](images/fd22289d46f02ff40c21c1a9f04c20aa6999d25ce28ec1f07b215a4f637c2573.jpg) +(h) VGG-19 CIFAR10 + +# 6 PRACTICAL SIGNIFICANCE + +Up to now, we have been analyzing the layer convergence bias from a theoretical perspective. This section discusses its practical use to drive the development of DNN architecture design, and a new explanation for the acceleration effect of transfer learning with the help of layer convergence bias. + +# 6.1 DNN ARCHITECTURE DESIGN + +Modern CNN architectures (He et al., 2016) usually contain layers from narrow to wide (e.g., 64 channels of the first layer to 2048 channels of the last layer). From the perspective of computational complexity, the narrower shallower layers make the corresponding large feature maps less computation-consuming. Considering the layer convergence bias, deeper layers with larger capacities are also beneficial for the corresponding high-frequencies to be learned easier. Although this is a common design for CNNs, Transformers (Dosovitskiy et al., 2020) usually apply the same architecture for all encoders. For a vision Transformer with 12 encoders, we use encoders with width $2/4/8$ to construct three variants. The variants only differ in the arrangement of different encoders, we use $W$ to denote the widths, and $N$ to denote the number of each kind of encoders. The configures are summarized below: + +- deeper encoders wider: $W = (2,4,8)$ , $N = (6,3,3)$ +- vanilla architecture: $W = (4, 4, 4)$ , $N = (4, 4, 4)$ +- deeper encoders narrower: $W = (8,4,2)$ , $N = (3,3,6)$ + +![](images/67c9db64e8a19b92a2e2742a4ec2cc2a9bfd477ef8ce171a951b58f590686f34.jpg) +Figure 9: Performance of three variants of ViTs on ImageNet. + +Fig. 9 shows their performances, with the best accuracy of $80.75\%$ , $78.88\%$ , and $75.75\%$ respectively. We find that with the same number of parameters, putting the wider layers deeper results in higher training performance. This finding may serve as an effective way to improve the model capacity. The causal connection between layer complexity distribution and model performance is discussed in Appendix A.6. And layer convergence bias for ViT is analyzed in Appendix A.7. + +# 6.2 ACCELERATION EFFECT OF TRANSFER LEARNING + +Transfer learning (fine-tuning with the pre-trained models) is a widely-used technique that can accelerate the model convergence (Shao et al., 2018b; a; Liang & Zheng, 2020). We show the layer convergence curves w/o transfer learning on the Flowers dataset (Nilsback & Zisserman, 2006). When training from scratch (Fig. 10 (a)), the shallower layers converge faster so that the deeper layers can extract semantic features based on basic features. Local minima of Stage 4 is sharp in this case. However, with transfer learning (Fig. 10 (b)), deeper layers can directly be built on the pre-trained basic features. The Stage 4 shows a much higher convergence rate among all layers, its loss landscape also becomes flatter. Two observations that are not consistent with layer convergence bias are summarized in the following: 1) the pre-trained shallower layers are nearly optimal, so they don't present fast convergence in transfer learning; 2) although the pre-trained deeper layers are not as optimal as the shallower layers do, their loss landscapes are much flatter than training from scratch, which makes them converge much faster. + +![](images/08d76005d97ea52de82dddccf093f0e6f27f636217cbfa31a4e830f349ae5720.jpg) +(a) Train from scratch + +![](images/485b491ea971b145d122a9e124cb7177b0d69285d2adc5eed481bf5316c090b8.jpg) + +![](images/c6ae2dceb0c9621794a2f3e74785b0e7801adbb91d909ca664c3f582778281c3.jpg) +Figure 10: Effects of transfer learning on the training process. Left (a,b): The layer convergence process of ResNet-50. Right (a,b): The loss landscapes of Stage 4 w/o transfer learning. + +![](images/44c1782dcb66f53943d0cf57fa8f4482379eff83247c299544dbcd9351ae8b52.jpg) +(b) Fine-tuning + +# 7 RELATED WORK + +DNNs with gradient-based training show great potential to fit targets with arbitrary complexities (Hornik et al., 1989; Leshno et al., 1993), given sufficient width. With the advances in the last decade to verify the capability of the depth of universal approximators (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Lu et al., 2017), practitioners tried to reduce the width of neural networks by adding more layers (Simonyan & Zisserman, 2014; He et al., 2016; Huang et al., 2017). We are also inspired by research on local properties (sharpness/flatness) of loss functions at minima (Keskar et al., 2017; Li et al., 2018) and relationship between convergence rate and generalization (Hardt et al., 2016). Furthermore, LARS optimizer (You et al., 2017) shares some valuable insights on layer convergence, which are discussed in Appendix A.8. In practice, the idea of layer convergence bias had been intuitively applied to accelerate DNN training (Huang et al., 2016; Brock et al., 2017) and mitigating catastrophic forgetting (Ramasesh et al., 2020). The arrangement schemes of CNN/Transformer blocks were explored by (Liu et al., 2022b;a). + +# 8 CONCLUSION + +In this work, we empirically studied the phenomenon that the shallower layers of DNNs tend to converge faster than the deeper layers, called layer convergence bias. This phenomenon is a natural preference in the process of DNN training: the shallower layers are responsible for extracting low-level features which are more evenly distributed and easier to learn, while deeper layers refine these features to do specific tasks. This makes the loss landscapes for shallower layers flatter than the landscapes for deeper layers, making shallower layers converge faster. In addition, this work established a connection between layers and learned frequencies. By showing deeper layers tend to fit the high-frequency components in the target function, we can understand the layer convergence bias from another perspective. We finally took DNN architecture design and transfer learning as two examples to show how theoretical findings in this work can shed light on the practical applications of deep learning. For progress to continue, a more in-depth understanding of the properties of neural networks is needed. We also hope that the layer convergence bias can inspire more practical improvements in the DNNs' architecture design and training schemes. + +# ACKNOWLEDGMENTS + +This work was supported by the Lustgarten Foundation for Pancreatic Cancer Research and the McGovern Foundation. + +# REFERENCES + +Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. +David Balduzzi, Marcus Frean, Lennox Leary, JP Lewis, Kurt Wan-Duo Ma, and Brian McWilliams. The shattered gradients problem: If resnets are the answer, then what is the question? In International Conference on Machine Learning, pp. 342-350. PMLR, 2017. +Andrew Brock, Theodore Lim, James Millar Ritchie, and Nicholas J Weston. Freezeout: Accelerate training by progressively freezing layers. In NIPS 2017 Workshop on Optimization: 10th NIPS Workshop on Optimization for Machine Learning, 2017. +Olivier Delalleau and Yoshua Bengio. Shallow vs. deep sum-product networks. Advances in neural information processing systems, 24, 2011. +Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. +Ronen Eldan and Ohad Shamir. The power of depth for feedforward neural networks. In Conference on learning theory, pp. 907-940. PMLR, 2016. +Alex Graves, Abdel-rahman Mohamed, and Geoffrey Hinton. Speech recognition with deep recurrent neural networks. In 2013 IEEE international conference on acoustics, speech and signal processing, pp. 6645-6649. IEEE, 2013. +Moritz Hardt, Ben Recht, and Yoram Singer. Train faster, generalize better: Stability of stochastic gradient descent. In International conference on machine learning, pp. 1225-1234. PMLR, 2016. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. +Sepp Hochreiter. Untersuchungen zu dynamischen neuronalen netzen. Diploma, Technische Universität München, 91(1), 1991. +Sepp Hochreiter, Yoshua Bengio, Paolo Frasconi, Jürgen Schmidhuber, et al. Gradient flow in recurrent nets: the difficulty of learning long-term dependencies, 2001. +Kurt Hornik, Maxwell Stinchcombe, and Halbert White. Multilayer feedforward networks are universal approximators. Neural networks, 2(5):359-366, 1989. +Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pp. 646-661. Springer, 2016. +Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4700-4708, 2017. +Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pp. 448-456. PMLR, 2015. +John Jumper, Richard Evans, Alexander Pritzel, Tim Green, Michael Figurnov, Olaf Ronneberger, Kathryn Tunyasuvunakool, Russ Bates, Augustin Žídek, Anna Potapenko, et al. Highly accurate protein structure prediction with alphafold. Nature, 596(7873):583-589, 2021. + +Nitish Shirish Keskar, Jorge Nocedal, Ping Tak Peter Tang, Dheevatsa Mudigere, and Mikhail Smelyanskiy. On large-batch training for deep learning: Generalization gap and sharp minima. In 5th International Conference on Learning Representations, ICLR 2017, 2017. +Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60(6):84-90, 2017. +Moshe Leshno, Vladimir Ya Lin, Allan Pinkus, and Shimon Schocken. Multilayer feedforward networks with a nonpolynomial activation function can approximate any function. Neural networks, 6(6):861-867, 1993. +Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. Advances in neural information processing systems, 31, 2018. +Gaobo Liang and Lixin Zheng. A transfer learning method with deep residual network for pediatric pneumonia diagnosis. Computer methods and programs in biomedicine, 187:104964, 2020. +Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3202-3211, 2022a. +Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022b. +Zhou Lu, Hongming Pu, Feicheng Wang, Zhiqiang Hu, and Liwei Wang. The expressive power of neural networks: A view from the width. Advances in neural information processing systems, 30, 2017. +Yuri Nesterov. Introductory lectures on convex optimization: A basic course, volume 87. Springer Science & Business Media, 2003. +Michael A Nielsen. Neural networks and deep learning, volume 25. Determination press San Francisco, CA, USA, 2015. +M-E Nilsback and Andrew Zisserman. A visual vocabulary for flower classification. In CVPR, volume 2, pp. 1447-1454. IEEE, 2006. +Zhou Pan, Feng Jiashi, Ma Chao, Xiong Caiming, Chu Hong Hoi Steven, and E Weinan. Towards theoretically understanding why sgd generalizes better than adam in deep learning. In Advances in Neural Information Processing Systems, pp. 21285-21296, 2020. +Maithra Raghu, Justin Gilmer, Jason Yosinski, and Jascha Sohl-Dickstein. Svcca: Singular vector canonical correlation analysis for deep learning dynamics and interpretability. Advances in neural information processing systems, 30, 2017. +Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In International Conference on Machine Learning, pp. 5301-5310. PMLR, 2019. +Vinay V Ramasesh, Ethan Dyer, and Maithra Raghu. Anatomy of catastrophic forgetting: Hidden representations and task semantics. arXiv preprint arXiv:2007.07400, 2020. +Basri Ronen, David Jacobs, Yoni Kasten, and Shira Kritchman. The convergence rate of neural networks for learned functions of different frequencies. Advances in Neural Information Processing Systems, 32, 2019. +Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. IJCV, 115(3):211-252, 2015. +Shibani Santurkar, Dimitris Tsipras, Andrew Ilyas, and Aleksander Madry. How does batch normalization help optimization? Advances in neural information processing systems, 31, 2018. + +Kun Shao, Yuanheng Zhu, and Dongbin Zhao. Starcraft micromanagement with reinforcement learning and curriculum transfer learning. IEEE Transactions on Emerging Topics in Computational Intelligence, 3(1):73-84, 2018a. +Siyu Shao, Stephen McAleer, Ruqiang Yan, and Pierre Baldi. Highly accurate machine fault diagnosis using deep transfer learning. IEEE Transactions on Industrial Informatics, 15(4):2446-2455, 2018b. +David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al. Mastering the game of go with deep neural networks and tree search. nature, 529(7587):484-489, 2016. +Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. +Ilya Sutskever, Oriol Vinyals, and Quoc V Le. Sequence to sequence learning with neural networks. Advances in neural information processing systems, 27, 2014. +Pengxiang Wu, Songzhu Zheng, Mayank Goswami, Dimitris Metaxas, and Chao Chen. A topological filter for learning with label noise. Advances in neural information processing systems, 33: 21382-21393, 2020. +Yuxin Wu and Kaiming He. Group normalization. In Proceedings of the European conference on computer vision (ECCV), pp. 3-19, 2018. +Zhi-Qin John Xu, Yaoyu Zhang, Tao Luo, Yanyang Xiao, and Zheng Ma. Frequency principle: Fourier analysis sheds light on deep neural networks. arXiv preprint arXiv:1901.06523, 2019. +Zhiqin John Xu and Hanxu Zhou. Deep frequency principle towards understanding why deeper learning is faster. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 10541-10550, 2021. +Zhang Yi, Pheng-Ann Heng, and Ada Wai-Chee Fu. Estimate of exponential convergence rate and exponential stability for neural networks. IEEE Transactions on Neural Networks, 10(6):1487-1493, 1999. +Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. How transferable are features in deep neural networks? Advances in neural information processing systems, 27, 2014. +Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017. +Matthew D Zeiler and Rob Fergus. Visualizing and understanding convolutional networks. In European conference on computer vision, pp. 818-833. Springer, 2014. + +# A APPENDIX + +# A.1 EXPERIMENTAL SETTINGS + +Datasets. The synthetic and real datasets are summarized in the Tab. 1 + +Table 1: Descriptions and statistics of the datasets used in this work. + +
DatasetSize (train/test)ClassesData description
Sine regression5000/5000n/aFunction with four sine components, domain [-2,2]
ImageNet1,281,167/50,0001000Photos of common objects
CIFAR-1050,000/10,00010Photos of common objects, image sizes 32 × 32
CIFAR-10050,000/10,000100Photos of common objects, image sizes 32 × 32
Flowers1,088/27217Find-grained photos of flowers
FGVC Aircraft6,667/3,333100Find-grained photos of aircrafts
Caltech-1013,060/6,084102Photos/paintings/sketches of common objects
CUB-2005,994/5,794200Find-grained photos of birds
DomainNet painting50,416/21,850345Oil Paintings, murals, drawings, tattoos
+ +Network Architectures. The FCNNs, CNNs, and Vision Transformers are summarized in the Tab. 2. + +Table 2: Complexities and architectures of DNNs used in this work. + +
Model#ParametersMult-addsArchitecture description
FCNN (no res)2k10k4 fc layers [1-32-32-32-1]
FCNN (res)132k390kfc [1-128] → 4 res-blocks [128-128-128] → fc [128-1]
ResNet-5025.6M4.1Gconv → 4 stages with [3,4,6,3] res-blocks → fc
VGG-19143.7M19.8G16 conv layers, 3 fc layers
ViT9.9M77.2M12 Transformer encoder blocks (basic width 256), 1 fc layer
+ +Training Hyper-parameters. For the regression task, we train FCNNs with SGD optimizers for 300 epochs. The initial learning rate is 0.1, with a learning rate decay (to 0.01) at the 150th epoch. The batch size is 128, no weight decay ( $L_{2}$ regularization) is conducted. + +For the ImageNet classification task with CNNs, we train ResNet-50 and VGG-19 for 120 epochs with SGD optimizers. The initial learning rate is 0.1, with learning rate decays at the 50th and 100th epoch to 0.01 and 0.001, respectively. The batch size is 256, the input image size is $224^2$ , and the weight decay coefficient is $10^{-4}$ . + +For Vision Transformers on ImageNet dataset, we train them for 200 epochs with Adam optimizers. The peak learning rate is set to 0.0003. We use linear learning rate warm-up for 10,000 iterations, and a subsequent cosine learning rate decay. The batch size is 256, the input image size is $224^2$ , and the weight decay coefficient is $10^{-4}$ . + +For CNN image classification on other datasets, we train models for 100 epochs with SGD optimizers. Initial learning rate of 0.01 and cosine learning rate scheduler are applied. The batch size is 128, the input image sizes are $32^2$ (for CIFAR) and $224^2$ (for Flowers, Aircraft, Caltech, CUB, and DomainNet), and the weight decay coefficient is $10^{-4}$ . + +# A.2 CONVERGENCE MEASUREMENT USING WEIGHT VARIATION + +In Section 2, we have introduced the convergence measurement in this work. This measurement is simple and straightforward, and it can show how each layer in a DNN converges during the whole training process (Fig. 1 for fully connected networks and Fig. 2 for CNNs) by examining the distance between the training parameters and the converged parameters. However, it has not been verified whether calculating the parameter distance variation to the convergence point between two adjacent epochs is necessary. After all, the measurement highly depends on the convergence point, which can only be obtained after the whole training process. + +We come up with a simplified convergence measurement. This method uses weight variation as a metric to examine how fast a layer is learning, and whether this layer reaches a state of convergence. If a layer is learning actively, it is reasonable that its weights vary drastically during training. For the converged layers, their weights usually keep stable. So we use $\|\theta_l^{(t_k)} - \theta_l^{(t_{k+1})}\|_2 / \|\theta_l^{(t_k)}\|_2$ , the normalized weight variation of layer $l$ during epoch $k$ and $k + 1$ , to illustrate how actively it is learning. + +![](images/c14d3d7d8d8b5f86c9a0418fdbfe649e1f98f972535305d86b6a15f72d8e7cea.jpg) +(a) ResNet-50 Val Acc $73.24\%$ + +![](images/04c8d0fa6a2eff40a964496ab70bbe2e9626de759594d4b1b1a8fe629a41f40d.jpg) +(b) VGG-19 Val Acc $71.89\%$ +Figure 11: The convergence processes of ResNet-50 and VGG-19 on ImageNet. The results are illustrated with weight variations. The learning rate decays at epoch 50 and epoch 100. + +The results of ResNet-50 and VGG-19 training process on ImageNet are shown in Fig. 11. From this plot, we can see that after learning rate decays at epoch 50 and 100, the weight variations drop evidently. However, the weight variations of each layer do not show apparent decreasing trend when the learning rate keeps stable, which indicates that the training of DNNs do not converge as usual convex optimization problems do (e.g., linear programming). Therefore, it is hard for us to compare the convergence rates of different layers by observing their convergence curves. We cannot find a clear clue like what was given by the convergence measurement in Section 2 to get the layer convergence bias. All in all, we can safely claim that, it is crucial for the convergence metric to consider direction information to measure how fast different layers are learning towards their convergence points. Our previous convergence measurement really needs to examine convergence by calculating the parameter distance between the current point to convergence point. + +# A.3 FACTORS AFFECTING LAYER CONVERGENCE BIAS + +In Section 5, we have shown that the complexity of the datasets is an important factor affecting layer convergence bias. When the fitting target function is complex enough with both low and high frequency components, the shallower layers learn the low low-frequency components while the deeper layers learn the high-frequency components. Here we use the FCNNs with residual connections to show whether some other important factors would affect the layer convergence bias. All following experiments are conducted on the same regression task in Section 3. + +Model Depth. The default architecture used in previous experiments is the four-blocks FCNN, here we try adding more blocks to make the network deeper and see what change will happen. As shown in Fig. 12, all the networks show layer convergence bias. With more and more res-blocks, the overall convergence of the network becomes slightly faster. + +![](images/cc2aff774b33d159dddbe57297a41ea623c4b6e1c8238f3210ae1746b98d667a.jpg) +(a) 4 Res-Blocks + +![](images/453f84c65a6baabe53e224356ca74b0c776c7f0a8c00e38898063f8c269a97cc.jpg) +(b) 8 Res-Blocks + +![](images/6c70da1522273b0ccd7717aad2cbed9966f2fe1ea3415c9334212634b8ac9363.jpg) +(c) 12 Res-Blocks +Figure 12: The convergence process of FCNNs with different number of res-blocks. + +![](images/8f5bd8c36a3e1c63d8619e1feeea44664d4633aa087c1d2b2c8dc2215dda3798.jpg) +(d) 16 Res-Blocks + +Learning Rate. The results with different learning rates are shown in Fig. 13. When the learning rate gets smaller, layer convergence bias becomes weaker. This is because the gradient predictiveness w.r.t. parameters of all layers get close to 1 (see Fig. 3 (b,right) for the predictiveness with the learning rate of 0.01). In this case, a layer is less influenced by the updates of parameters in other layers, only the gradient predictiveness w.r.t. data matters for the convergence rate. In addition, smaller learning rates are beneficial for the deeper layers to converge because of their sharper minima. + +![](images/9e940352ce29aa0753b43623966f621e44b442ff5f45f01ca7ec52a8799cd859.jpg) +(a) $\mathrm{LR} = 0.01$ +Figure 13: The convergence process of FCNNs with different learning rates. + +![](images/a68e1ef770b3c062097e63279ea52fc7603fb3b14fdd2f5587008ed112684517.jpg) +(b) LR=0.03 + +![](images/d1508a6d1aeb53f993f1f0855f04bf7469b186772ab04214f180caa548e8eb72.jpg) +(c) $\mathrm{LR} = 0.1$ + +Weight Decay. The experiments with FCNNs in previous sections are conducted without weight decay. It is interesting to investigate the sensitivity of the layer-wise model convergence with different weight decay strengths. The results are shown in Fig. 14. We can see that when the weight decay becomes stronger, the residual blocks converge slower in a more and more similar convergence rate. We conjecture the reason is that weight decay dominates the total loss when its coefficient is large. In this way, the layer parameters with similar initialization scales tend to converge in similar speed toward zero. Because the residual blocks have identical architectures, they share the same initial parameter distribution, and converge in the same speed when weight decay is strong. + +Optimizer. In Section 4, we have discussed the mechanism behind layer convergence bias. The flatter/sharper minimizers of different layers make SGD learn at different speeds. This is because SGD is more good at finding flatter minimizers (Pan et al., 2020). In Fig. 15, we compare SGD with three adaptive optimizers: Adagrad, RMSprop, and Adam. It is evident that with adaptive optimizers, layer convergence bias does not hold anymore. We conjecture the reason behind this + +![](images/8c910a09f36ed47a776ac4c222b9f922099d411872ba29c1a8ddad5b2028fd69.jpg) +(a) $\mathrm{WD} = 1\mathrm{e} - 7$ + +![](images/1be2a755db41c189bba26cb171501af3ca144f7d7cb4a4fb852009ec3e43e59d.jpg) +(b) $\mathrm{WD} = 1\mathrm{e} - 6$ + +![](images/48c54aefd14b8aa698fa80c269dec5b09730df1cb1ce2b04444a89aa91c71723.jpg) +(c) $\mathrm{WD} = 1\mathrm{e} - 5$ + +![](images/c9b32c2ce60e956db6ac87943ab3ce0b46075be910f394f49232fabafa4b331e.jpg) +(d) $\mathrm{WD} = 1\mathrm{e} - 4$ + +![](images/7426eff0147450ebff0521730b61325b7c4e73b9c2627fbe66d33214ec987820.jpg) +Figure 14: The convergence process of FCNNs with different weight decay strengths. +(a) SGD +Figure 15: The convergence process of FCNNs with different optimizers. + +![](images/e135b9bc895215ddb16dfe5463737e3ca474af3384917dd410758b939cbce60c.jpg) +(b) Adagrad + +![](images/aa8a87fcac06a0fd497ed4e36c4b736c2fdf0976d37c37db1d7917240dd929ff.jpg) +(c) RMSprop + +![](images/a343fbd451eb81c933889a4dc2ceebfd43a11cf133804cb1fa6cc26406b9771c.jpg) +(d) Adam + +is that the adaptive optimizers heuristically assign different learning rates for different parameters, making their optimization hardly predictable. + +Normalization Methods. Like residual connection, batch normalization Ioffe & Szegedy (2015) is also a common design in modern DNN architectures. As discussed in previous literature, normalization in the neural networks helps to make the layer inputs more stable and make the loss landscapes smoother, thus accelerates the model training Santurkar et al. (2018). In Section 3 and Section 4, we mainly use the FCNNs without normalization to verify and explore the layer convergence bias. Here we investigate how the normalization methods (i.e., batch normalization, layer normalization Ba et al. (2016), and group normalization Wu & He (2018)) help the convergence, and whether the shallower layers still converge faster in these cases. As shown in Fig. 16, all layers converge faster when adding batch normalization to them. Particularly, "Res-Block 1" accelerates the most and reach a similar convergence rate as "Layer 1". The layer convergence bias also holds for batch normalization. For layer normalization and group normalization, the models show a significantly faster convergence rate than the model using batch normalization. All layers show effective convergence at an early stage of training (i.e., the first five epochs). In these two cases, different layers have similar convergence rates, thus no evident layer convergence bias emerges. + +![](images/c18964d8ef4698f80162ae831dd96c2e2a854ed39750a3c40037c9ee297a46f6.jpg) +(a) Without normalization +Figure 16: The convergence process of FCNNs with different normalization methods. When using group normalization, we set the group number to 8. + +![](images/5cc61b139c36fdbd843318044c92bc9f08b4928b9a7d11f8640c8bc6d3bcbff4.jpg) +(b) Batch normalization + +![](images/048efec3e0d90b64ad0f8eb197f1a9266058a077430180c8f05ad823f565a850.jpg) +(c) Layer normalization + +![](images/bec197c0b7083842ddd26fb8f7bc3bcf15f14334ad8cae797d0304729bab6ec6.jpg) +(d) Group normalization + +# A.4 RESULTS ON HARDER DATASETS + +For verifying the layer convergence bias on more datasets, we show more convergence results on four harder image classification datasets (see Fig. 17). Most of the classes in these datasets only have $< 100$ samples, making them harder to learn. Note that the experiments are conducted with the learning rate of 0.01 (learning rate of 0.1 failed in some cases because these datasets have too many classes but not sufficient samples, leading to non-decreasing loss), some deeper layers have quite similar convergence rates because of the small learning rate. But roughly speaking, layer convergence bias still holds for these datasets. + +![](images/2d3d1b6de4d187a9f4de51739ebda2bd2438b98ef0df07a2ec2e90e179fb1ac2.jpg) + +![](images/5a38646221862fb698cd2ba10f02054544c4c8e8783063dadc260832cda985eb.jpg) +(a) ResNet-50 + +![](images/1b8738cead3470357291c6178fcedc909a1b18863b2b10d24230d8adfce0843f.jpg) + +![](images/21cdb294d3de63caa9d5e62609dd7916741247e216ee9e4b576d1a306bf34aab.jpg) + +![](images/8c7fa7738b52dd3f1b0ec00f850490002e879985c7d932e599678ea0b1ac2450.jpg) +Figure 17: The convergence process of CNNs on four image classification tasks. + +![](images/1c7ee3bcce9f991f5e31e8c2bc232673e8ef3c56dc6ef8260e33833b5a10e435.jpg) +(b) VGG-19 + +![](images/d71898cc82aa5829b15c3ade625fc9ab56b6a1a49a1491a20217c627531e45f4.jpg) + +![](images/fc3727fa8b46bdfc62303d8fa3e601e021b217453e63b6b086a1c6c8dd903884.jpg) + +# A.5 REPEATABILITY OF THE VISUALIZATIONS + +Do different ImageNet trained models produce dramatically different loss landscapes? We plot the loss landscapes of different models with different random seeds in Fig. 18, 19. Quite similar patterns of the landscapes for different layers can be observed on both ResNet and VGG with different random seeds. + +![](images/cdc4602f4900831a817c6ba0b16166988b92a5ed27d06b262133d493de8cab68.jpg) +(a) Stage 1 Seed 1 + +![](images/abd8bef9c0bde4ccf521ae8780f8f963813e3757ad1011194da9d4b3b7754b33.jpg) +(b) Stage 2 Seed 1 + +![](images/ba6b51581f09344b7a5294ad231451ffff407aae50122918db4380b0c9eed752.jpg) +(c) Stage 3 Seed 1 + +![](images/71814b1519c57e68b5914a4e19460521f39b1313e1a78c9a324836a55c1fb1c4.jpg) +(c) Stage 4 Seed 1 + +![](images/5a8c2c25bc5f0158bd56ecd9578c8bafb193298aa7b548989b51f1497106dd4c.jpg) +(d) Stage 1 Seed 2 + +![](images/0f04e3025b3c16525aa272aafd25438907ada7c2fe5faf9090434da866ae0c5c.jpg) +(e) Stage 2 Seed 2 + +![](images/0c3aa08d1725a94bfc6c869b66063bbc9bc36ecc11014c1fa3bb55756c712189.jpg) +(f) Stage 3 Seed 2 + +![](images/80f071771fa09a7bc8bd98ee2b6b0dd524a8816e603accd1cbe259d50f42514c.jpg) +(g) Stage 4 Seed 2 +Figure 18: The loss landscapes of different layers of ResNet-50. + +![](images/a1f24b60ab084ff5e91c86c22102eb1c5984cd7693d5367bd8e88c97f0f9b9c6.jpg) +(a) Layer 1 Seed 1 + +![](images/2826defe3fa06f37e46b41cf27059aad6d783ff328c1d5a830c6867bb075a758.jpg) +(b) Layer 5 Seed 1 + +![](images/96d8380a41f38818184333786a210af8d272d30b5769c7160da01a5b1ac06baf.jpg) +(c) Stage 9 Seed 1 + +![](images/f0073c99ef1020a065dc77f78c5fbf0c9f3a31bae17fac5292db87edaaba2b55.jpg) +(d) Stage 13 Seed 1 + +![](images/5d78a9bcedc51405b94b36804c97cfef494769d5001591f09d66b069e0b0959e.jpg) +(e) Layer 1 Seed 2 + +![](images/8ae304f21bc0a477f488c981a76b210feeab8251686500b20c9109678f4c0bc6.jpg) +(f) Layer 5 Seed 2 + +![](images/aac8258375312fda109c1b7d60b5900e75f4d2163d868f0b185381d13a4693ff.jpg) +(g) Stage 9 Seed 2 +Figure 19: The loss landscapes of different layers of VGG-19. + +![](images/1019864b6bee7ce9126cd66adff4dc0edca7f8e353221128bf2ef43d992d1dd4.jpg) +(h) Stage 13 Seed 2 + +# A.6 MODELS OBEYING LAYER CONVERGENCE BIAS PERFORM BETTER + +In Section 4 and Section 5, it is discussed that layer convergence bias indicates that the shallower layers are learning low-level features (or low-frequency components of the target function). It is reasonable learning low-level features first have greater potential to reach good model performance, since the model can establish its high-level features based on relatively stable low-level feature spaces. + +To examine whether the fast establishment of low-level features benefits model performance, we train four different FCNN models with the same amount of parameters, but different architectures, to fit the Sine target with four components. This experiment is based on a finding that a residual block with more layers in it tends to converge more slowly. We construct four FCNN models, each of them has four residual blocks (maybe in different sizes). The convergence processes are shown in Fig. 20. We can see that the blocks with the largest complexity always converge the most slowly. As the block with depth $= 4$ being placed shallower in the FCNN, the regression MSE loss goes higher. In other words, if a shallower layer converges slowly, the model gets poorer performance. This may due to the vulnerability of deeper layers. If they converge based on changing shallower layers, it is hard for them to learn good features based on their unstable inputs. + +![](images/58ba3b7388ef6af39c999e96716c1e2975a05240b2b63efc7614553ec8c19502.jpg) +(a) Res-Blocks $= (4,1,1,1)$ Val loss 2.7e-4 +Figure 20: The convergence process of FCNNs with different residual block sizes and their validation performance on the regression task. Each model has a four-layer residual block and three one-layer residual blocks (e.g., "Res-Blocks=(4,1,1,1)" means the first residual block has four layers, and the rest three blocks have only one layer). + +![](images/986a500e127a3b567a7d483321e402198416d167b8ac976335581d11af8cc7ec.jpg) +(b) Res-Blocks $= (1,4,1,1)$ Val loss 2.4e-4 + +![](images/05f6f9bb1fbf28a7268b87985221b47d7564cc5546ffb4f70207192a9ffd9168.jpg) +(c) Res-Blocks $= (1,1,4,1)$ Val loss 1.8e-4 + +![](images/212a7681039b14ab8973ec4b79d85bf91f44408c5bc8b952bc412e7c3ba6b5c7.jpg) +(d) Res-Blocks $= (1,1,1,4)$ Val loss 1.4e-4 + +The results can also be understood from another perspective. If the deeper block contains more parameters (with more fully connected layers in it), it would be helpful for this block to learn the corresponding high-frequency components of the target function. Therefore, the model can reach better performance. A similar observation is obtained in Section 6.1: when putting wider layers of the ViT deeper, the model can reach higher performance. + +# A.7 LAYER CONVERGENCE BIAS FOR VISION TRANSFORMERS + +As discussed in Section 6, ViT can benefit from distributing more parameters in the deeper layers. This result comes from one of our main findings about layer convergence bias: the deeper layers tend to learn high-frequency components of the target function, thus converge more slowly. So adding more parameters for the deeper layers is beneficial for these layers to learn the high-frequency components which are usually harder. + +When making this claim, we do not verify the layer convergence bias for the ViT. The main difficulty for verifying layer convergence bias for ViTs is brought by its typical training scheme. ViT needs adaptive optimizers to train, otherwise it converges very slowly. However, adaptive optimizers change the learning rates of different parameters according to their optimization procedures. This leads to unfair convergence comparison between layers, thus affects the layer convergence bias, as shown in Fig 15. Therefore, we try both SGD and Adam optimizers for training ViTs on ImageNet, and see whether layer convergence bias holds in some cases. As shown in Fig. 21 (a), the ViT shows a roughly trend of layer convergence bias when optimizing with Adam, where the deepest "Encoder Block 12" converges the slowest. However, some other layers do not strictly obey layer convergence bias (e.g., the shallowest "Patch Embedding" does not learn fastest among all blocks). When optimizing with SGD, the ViT shows a good layer convergence bias. The results indicate that ViTs approximately share the same rules as FCNNs and CNNs, thus supports the discussions in Section 6. + +![](images/0af94e7b19f8f6f3270ef8ff11d3b3b1c3f0212f5324ea3e4a2fb4ab6b730aba.jpg) +(a) ViT Adam + +![](images/f2ab7458058264a1c486e2c09681f0a30cecc2ec3babc56a0515bcaf96f8eb4d.jpg) +(b) ViT SGD +Figure 21: The convergence curves of ViTs on ImageNet with different optimizers. With Adam optimizer, the ViT does not obey the layer convergence bias strictly. While SGD can ensure relatively ideal faster convergence processes of shallower layers. + +# A.8 CONNECTION TO LARS OPTIMIZATION SCHEME + +One of the most important factors that affect the optimization procedure is the learning rate. In this work, it is shown that the shallower layers can learn effectively with large learning rates, but the deeper layers only learn fast after learning rate decays. Is there any connection between layers and its suitable learning rate? + +LARS optimizer You et al. (2017) made a significant contribution to training DNNs with huge batch sizes and large learning rates. The key observation in the literature is that the weight-to-gradient ratio highly varies in different layers. If a layer has greater gradients and relatively smaller weights, it would be hard for it to converge due to the vigorous parameter update. So LARS considers the scale of the weights and its gradient norms in each layer and assigns a local learning rate for a layer to make it converge effectively and stably. For FCNNs in our work, its different hidden layers are initialized with the same scale due to their identical architecture, but the deeper layers usually have larger gradients. As a result, the larger gradients may make these layers struggle to converge. Similarly, the CNNs (i.e., ResNet-50 and VGG-19) have wider deeper layers. These layers have smaller initial parameters, so their gradients may lead to drastic weight variations if the learning rate is too large. In this way, we can understand why they cannot get close to their optimal points effectively at the early stage of training. It explains layer convergence bias from another perspective. \ No newline at end of file diff --git a/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/images.zip b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d55f8972fc2062a48cf536b3fb735b7f8cc2f33b --- /dev/null +++ b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d2f6d7e474b42d9ed2eec2ffdd999fbfa2329c4d124a241a72ce4f734e3bda6 +size 1147432 diff --git a/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/layout.json b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..dd8fc8c5ebcca0a03f6b1328edc8dc01deb30397 --- /dev/null +++ b/2023/Which Layer is Learning Faster_ A Systematic Exploration of Layer-wise Convergence Rate for Deep Neural Networks/layout.json @@ -0,0 +1,16343 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 503, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 503, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 503, + 137 + ], + "type": "text", + "content": "WHICH LAYER IS LEARNING FASTER? A SYSTEMATIC EXPLORATION OF LAYER-WISE CONVERGENCE RATE FOR DEEP NEURAL NETWORKS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 154, + 319, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 154, + 319, + 166 + ], + "spans": [ + { + "bbox": [ + 111, + 154, + 319, + 166 + ], + "type": "text", + "content": "Yixiong Chen" + }, + { + "bbox": [ + 111, + 154, + 319, + 166 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 111, + 154, + 319, + 166 + ], + "type": "text", + "content": " Alan Yuille" + }, + { + "bbox": [ + 111, + 154, + 319, + 166 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 111, + 154, + 319, + 166 + ], + "type": "text", + "content": " Zongwei Zhou" + }, + { + "bbox": [ + 111, + 154, + 319, + 166 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 166, + 439, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 166, + 439, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 166, + 439, + 178 + ], + "type": "text", + "content": "1The Chinese University of Hong Kong - Shenzhen 2Johns Hopkins University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 178, + 496, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 178, + 496, + 189 + ], + "spans": [ + { + "bbox": [ + 111, + 178, + 496, + 189 + ], + "type": "text", + "content": "yixiongchen@link.cuhk.edu.cn ayuille1@jhu.edu zzhou82@jh.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 276, + 218, + 334, + 229 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 218, + 334, + 229 + ], + "spans": [ + { + "bbox": [ + 276, + 218, + 334, + 229 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 245, + 470, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 245, + 470, + 368 + ], + "spans": [ + { + "bbox": [ + 140, + 245, + 470, + 368 + ], + "type": "text", + "content": "The deeply hierarchical structures enable deep neural networks (DNNs) to fit extremely complex target functions. However, the complex interaction between layers also makes the learning process of a particular layer poorly understood. This work demonstrates that the shallower layers of DNNs tend to converge faster than the deeper layers. We call this phenomenon Layer Convergence Bias. We also uncover the fundamental reason behind this phenomenon: Flatter local minima of shallower layers make their gradients more stable and predictive, allowing for faster training. Another surprising result is that the shallower layers tend to learn the low-frequency components of the target function, while the deeper layers usually learn the high-frequency components. It is consistent with the recent discovery that DNNs learn lower frequency objects faster." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 395, + 206, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 395, + 206, + 407 + ], + "spans": [ + { + "bbox": [ + 106, + 395, + 206, + 407 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 422, + 504, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 489 + ], + "type": "text", + "content": "Over the last decade, breakthrough progress has been made by deep neural networks (DNNs) on a wide range of complicated tasks in computer vision (Krizhevsky et al., 2017), natural language processing (Sutskever et al., 2014), speech recognition (Graves et al., 2013), game playing (Silver et al., 2016), and biomedical prediction (Jumper et al., 2021). Such progress hinged on a number of advances in hardware technology, dataset construction, and model architectural designs. Among them, the invention and application of very-deep network architectures play a decisive role." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 494, + 504, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 494, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 504, + 551 + ], + "type": "text", + "content": "Deepening the network is an effective way to empower its fitting ability. Extensive studies (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Lu et al., 2017) compared the power between deeper and wider neural networks and showed that the polynomial growth of depth has a similar effect to the exponential growth of width. Therefore, modern DNNs (Simonyan & Zisserman, 2014; He et al., 2016) usually contain tens of layers to ensure their modeling abilities for real-world applications." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 555, + 504, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 633 + ], + "type": "text", + "content": "Although the practical success of deep architectures is indisputable, they make the learning hardly predictable since complex interaction happens between layers when co-adapting to the target (Yosinski et al., 2014). By now, we still have a poor understanding of how different layers learn differently. Currently, a widely accepted view relates to the vanishing gradient problem Hochreiter (1991); Hochreiter et al. (2001). The gradients are getting weaker and weaker as they move back through the hidden layers, making the shallower layers converge more slowly (Nielsen, 2015). Informally, it is reasonable that larger gradient values bring higher learning speed." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "content": "Even though this view somewhat makes sense, we seem to have little concrete evidence supporting it. In particular, it is dubious how higher-level features can be built based on the unstable features extracted by the unconverged shallower layers (Raghu et al., 2017). This paper aims to find a credible answer for the parameters of which layer are learning faster towards the convergence point (defined as the convergence rate in this work) with a systematic exploration. Our results lead to somewhat startling discoveries." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Our Contributions. Our point of start is illustrating that there does not seem to be a reliable positive correlation between the gradient magnitude and the convergence rate of a particular layer." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Instead, we find that shallower layers tend to converge faster than the deeper ones, even with smaller gradients. The phenomenon is called layer convergence bias in this paper." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "content": "We then turn our attention to excavating the underlying mechanism for the faster convergence of shallower layers. Specifically, we find out that the depth of a layer has a fundamental effect on its training: the parameters of shallower layers are usually optimized on flatter landscapes than deeper layers. This finding reveals that the gradients of shallower layers may be more predictive and thus have the potential to allow the larger learning rates (LRs) to be performed, making the convergence faster." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 504, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 260 + ], + "type": "text", + "content": "Finally, we find that the layer convergence bias is also tied to the frequency of the function they are modeling. When fitting a complex target function, the shallower layers tend to fit the low-frequency (usually simpler) components. On the contrary, the deeper layers struggle to fit the remaining high-frequency components. It is a consistent result of the recent discovery that DNNs prioritize learning low-frequency components of the modeling function, while having very low learning speed on high-frequency components that tend to be more complex (Rahaman et al., 2019). This finding provides us with another perspective to understand why deeper layers learn more slowly." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 265, + 504, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 300 + ], + "type": "text", + "content": "We believe that understanding the roots of such a fundamental convergence bias can give us a better grasp of the complicated learning process of DNNs. In turn, it can motivate more in-depth algorithmic progress for the deep learning community." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 303, + 505, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 303, + 505, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 303, + 505, + 381 + ], + "type": "text", + "content": "This paper is organized as follows. In Section 2, we introduce our method for measuring convergence speed for different layers, and formally define the layer convergence bias. In Section 3, we examine the relationship between gradient magnitude and convergence rate, and show that the shallower layers tend to converge faster even with smaller gradients. Then in Section 4, we analyze the mechanism behind the layer convergence bias in DNN training. The layer-frequency correspondence is demonstrated in Section 5. The practical significance of layer convergence bias is presented in Section 6. We further discuss the related work in Section 7 and conclude in Section 8." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 396, + 273, + 408 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 273, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 273, + 408 + ], + "type": "text", + "content": "2 LAYER CONVERGENCE BIAS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 420, + 504, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 420, + 504, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 420, + 504, + 466 + ], + "type": "text", + "content": "The deep architecture of DNNs is arguably one of the most important factors for their powerful fitting abilities. With the benefit brought by the deep structures, there are also extra complexities in the training process coming into being. So far, we do not have a firm conclusion about whether some layers are learning faster than others." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 471, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 471, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 471, + 504, + 506 + ], + "type": "text", + "content": "For examining the convergence progress for a DNN, a common practice is checking its loss curve. However, this is not applicable for comparing the convergence between different layers. In this work, we define a measurement for layer-wise convergence in the following." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": "Definition 2.1 (Layer-wise convergence rate) At the training time " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": ", let the deep neural network with " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": " layers " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\{T_{l}^{(t)}\\}_{l=1}^{L}" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": " be " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "f(\\pmb{x}) = (T_{L}^{(t)} \\circ T_{L-1}^{(t)} \\circ \\dots \\circ T_{1}^{(t)})(\\pmb{x}) : \\mathbb{R}^{i} \\to \\mathbb{R}^{o}" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "i, o" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": " are the dimension of its inputs and outputs. We use " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\theta_{l}^{(t)}" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": " to denote the parameters of the " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": "-th layer " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "T_{l}^{(t)}" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": ". Assuming that " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\theta_{l}^{(t)}" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": " can finally converge to its optimal point " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\theta_{l}^{*}" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "t \\to \\infty" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": ", we define the convergence rate of " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "\\theta_{l}" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": " during the time interval " + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "inline_equation", + "content": "[t_1, t_2]" + }, + { + "bbox": [ + 104, + 510, + 506, + 576 + ], + "type": "text", + "content": " to be" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 194, + 588, + 415, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 588, + 415, + 622 + ], + "spans": [ + { + "bbox": [ + 194, + 588, + 415, + 622 + ], + "type": "interline_equation", + "content": "C _ {l} ^ {(t _ {1}, t _ {2})} = \\frac {1}{(t _ {2} - t _ {1})} \\cdot \\frac {\\| \\theta_ {l} ^ {(t _ {1})} - \\theta_ {l} ^ {*} \\| _ {2} - \\| \\theta_ {l} ^ {(t _ {2})} - \\theta_ {l} ^ {*} \\| _ {2}}{\\| \\theta_ {l} ^ {(t _ {0})} - \\theta_ {l} ^ {*} \\| _ {2}},", + "image_path": "9bab0f0d461122cd9997f920b95a20fac30ebc4ef701e5f81f16503b8225a872.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 628, + 332, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 332, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 332, + 639 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 628, + 332, + 639 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 105, + 628, + 332, + 639 + ], + "type": "text", + "content": " denotes the time point when the training starts." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "content": "In this definition, the numerator " + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\| \\theta_l^{(t_1)} - \\theta_l^*\\| _2 - \\| \\theta_l^{(t_2)} - \\theta_l^*\\| _2" + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "content": " denotes how much the distance of the parameter " + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\theta_{l}" + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "content": " to the optimal point is shortened in the period " + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "inline_equation", + "content": "[t_1,t_2]" + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "content": ". The denominator " + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\| \\theta_l^{(t_0)} - \\theta_l^*\\| _2" + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "content": " represents the distance between the initial point to the convergence point, whose primary function is to normalize the speed, allowing the convergence of different layers to compare with each other. Thus, the convergence rate of " + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\theta_{l}" + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "content": " can be understood as the ratio of normalized distance to time. Common optimization works (Yi et al., 1999; Nesterov, 2003) defined the rate of convergence for " + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\lim_{k\\to \\infty}\\frac{\\|\\theta^{(k + 1)} - \\theta^*\\|_2}{\\|\\theta^{(k)} - \\theta^*\\|_2}" + }, + { + "bbox": [ + 104, + 645, + 506, + 735 + ], + "type": "text", + "content": ". It focuses on measuring an exponential level convergence when the" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "optimization step goes to infinity. Since the difference in convergence rates between layers usually appears at an early stage of training, and it is not large enough to compare at an exponential level, we define our new convergence metric to present the convergence difference in a clearer way." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "text", + "content": "Observation 2.1 (Layer convergence bias). For " + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "inline_equation", + "content": "l_1 < l_2" + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "inline_equation", + "content": "\\exists \\tilde{t} > 0" + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "inline_equation", + "content": "C_{l_1}^{(t_1, t_2)} > C_{l_2}^{(t_1, t_2)}" + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "inline_equation", + "content": "t_1 < t_2 < \\tilde{t}" + }, + { + "bbox": [ + 104, + 121, + 505, + 150 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "text", + "content": "Layer convergence bias indicates that at an early training phase " + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "inline_equation", + "content": "t < \\tilde{t}" + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "text", + "content": ", the parameters " + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "inline_equation", + "content": "\\theta_{l_1}" + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "text", + "content": " of a shallower layer " + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "inline_equation", + "content": "l_1" + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "text", + "content": " tend to move to " + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "inline_equation", + "content": "\\theta_{l_1}^*" + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "text", + "content": " faster than a deeper layer " + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "inline_equation", + "content": "\\theta_{l_2}" + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "text", + "content": " moving to " + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "inline_equation", + "content": "\\theta_{l_2}^*" + }, + { + "bbox": [ + 104, + 154, + 506, + 199 + ], + "type": "text", + "content": ". In the following, we use both synthetic and real datasets to show that the layer convergence bias appears for both fully-connected neural networks (FCNNs) and convolutional neural networks (CNNs)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 217, + 367, + 230 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 367, + 230 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 367, + 230 + ], + "type": "text", + "content": "3 VERIFICATION OF LAYER CONVERGENCE BIAS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 243, + 504, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 300 + ], + "type": "text", + "content": "In this section, we try to substantiate the central claim of this work. First, we use the FCNNs to show that the shallower layers tend to converge faster than the deeper layers on the regression task, even when the gradient values for shallower layers are smaller. We then use CNNs with modern architectures to verify that layer convergence bias is a common phenomenon in practical applications. All experimental settings in this work can be found in Appendix A.1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 316, + 406, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 406, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 406, + 327 + ], + "type": "text", + "content": "3.1 LAYER CONVERGENCE BIAS IN FULLY-CONNECTED NETWORKS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 337, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 337, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 504, + 373 + ], + "type": "text", + "content": "For FCNNs, we construct a simple regression task to demonstrate that layers with smaller gradients do not necessarily learn more slowly than layers with larger gradients. The fitting target is " + }, + { + "bbox": [ + 104, + 337, + 504, + 373 + ], + "type": "inline_equation", + "content": "f(x) = \\sin (x) + \\frac{1}{3}\\sin (3x) + \\frac{1}{10}\\sin (10x) + \\frac{1}{30}\\sin (30x)" + }, + { + "bbox": [ + 104, + 337, + 504, + 373 + ], + "type": "text", + "content": ", with mean square error loss for training." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "type": "text", + "content": "First, we use the FCNN [1-32-32-32-1] with the Sigmoid activations as a simple example. In the following analysis, the first fully-connected layer (1-32) is named Layer 1, and the subsequent two layers (32-32) are called Hidden layer 1, Hidden layer 2 respectively. The gradient values and the convergence processes for these layers are shown in Fig. 1 (a). Two observations can be obtained from the plots: 1) The gradient of Hidden layer 1 is nearly always smaller than the gradient of Hidden layer 2. 2) Although shallower layers have smaller gradients, they seem to converge faster. For the first 50 epochs, the shallower layers are moving faster to their convergence point (e.g., " + }, + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "type": "inline_equation", + "content": "C_{Layer_1}^{(t_0,t_{50})} \\approx 0.012" + }, + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "type": "inline_equation", + "content": "C_{Hidden\\_layer_1}^{(t_0,t_{50})} \\approx 0.009" + }, + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "type": "inline_equation", + "content": "C_{Hidden\\_layer_2}^{(t_0,t_{50})} \\approx 0.006" + }, + { + "bbox": [ + 104, + 376, + 505, + 479 + ], + "type": "text", + "content": "), which is inconsistent with the previous view that higher gradients lead to faster learning (Nielsen, 2015)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 484, + 506, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 506, + 577 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 506, + 577 + ], + "type": "text", + "content": "To further validate the above results with a deeper network, we adopt residual connections (He et al., 2016) for the FCNN (deep network fails to be trained in this task without residual connections) and use the ReLU activation function. The FCNN [1-(128-128)-(128-128)-(128-128)-(128-128)-1] with four residual blocks of width 128 shows similar results to the shallow FCNN without residual connection (see Fig. 1 (b)). In this case, the difference in layer-wise convergence rate can be observed even earlier (i.e., " + }, + { + "bbox": [ + 104, + 484, + 506, + 577 + ], + "type": "inline_equation", + "content": "C_{Res - Block_1}^{(t_0,t_5)} \\approx 2C_{Res - Block_4}^{(t_0,t_5)}" + }, + { + "bbox": [ + 104, + 484, + 506, + 577 + ], + "type": "text", + "content": "), which shows that the layer convergence bias also happens for deeper FCNNs with residual connections. It is noteworthy that our convergence metric is crucial to observe the layer convergence bias, which is elaborated in Appendix A.2." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 106, + 596, + 302, + 671 + ], + "blocks": [ + { + "bbox": [ + 106, + 596, + 302, + 671 + ], + "lines": [ + { + "bbox": [ + 106, + 596, + 302, + 671 + ], + "spans": [ + { + "bbox": [ + 106, + 596, + 302, + 671 + ], + "type": "image", + "image_path": "01daa463fcec291619bed042e058d7cd00a900134e3e6ef5025120b39f7e0c8d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 675, + 280, + 686 + ], + "lines": [ + { + "bbox": [ + 140, + 675, + 280, + 686 + ], + "spans": [ + { + "bbox": [ + 140, + 675, + 280, + 686 + ], + "type": "text", + "content": "(a) FCNN without residual connection" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 596, + 503, + 670 + ], + "blocks": [ + { + "bbox": [ + 306, + 596, + 503, + 670 + ], + "lines": [ + { + "bbox": [ + 306, + 596, + 503, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 596, + 503, + 670 + ], + "type": "image", + "image_path": "366ed45631c4991c2eb157aa0417ce6e0d19f38cd21676dff2458c94a8777658.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 675, + 474, + 686 + ], + "lines": [ + { + "bbox": [ + 343, + 675, + 474, + 686 + ], + "spans": [ + { + "bbox": [ + 343, + 675, + 474, + 686 + ], + "type": "text", + "content": "(b) FCNN with four residual blocks" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 695, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 104, + 695, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 695, + 504, + 730 + ], + "type": "text", + "content": "Figure 1: Left (a,b): The absolute mean gradient values for different layers for FCNNs w/o residual connections in training. For both networks, deeper layers have larger gradients. Right (a,b): The convergence process of different layers for FCNNs. Shallower layers converge faster." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 147, + 79, + 310, + 167 + ], + "blocks": [ + { + "bbox": [ + 147, + 79, + 310, + 167 + ], + "lines": [ + { + "bbox": [ + 147, + 79, + 310, + 167 + ], + "spans": [ + { + "bbox": [ + 147, + 79, + 310, + 167 + ], + "type": "image", + "image_path": "38ce4d83602ce56082f5d9781a1105280b6566c07103a885666d054749025f08.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 194, + 173, + 288, + 182 + ], + "lines": [ + { + "bbox": [ + 194, + 173, + 288, + 182 + ], + "spans": [ + { + "bbox": [ + 194, + 173, + 288, + 182 + ], + "type": "text", + "content": "(a) ResNet-50 Val Acc " + }, + { + "bbox": [ + 194, + 173, + 288, + 182 + ], + "type": "inline_equation", + "content": "73.24\\%" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 314, + 80, + 462, + 167 + ], + "blocks": [ + { + "bbox": [ + 314, + 80, + 462, + 167 + ], + "lines": [ + { + "bbox": [ + 314, + 80, + 462, + 167 + ], + "spans": [ + { + "bbox": [ + 314, + 80, + 462, + 167 + ], + "type": "image", + "image_path": "a422b587ba3b45adb3388e705d0e42809d28d6d38418adbde044e773073a06b6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 347, + 173, + 436, + 182 + ], + "lines": [ + { + "bbox": [ + 347, + 173, + 436, + 182 + ], + "spans": [ + { + "bbox": [ + 347, + 173, + 436, + 182 + ], + "type": "text", + "content": "(b) VGG-19 Val Acc " + }, + { + "bbox": [ + 347, + 173, + 436, + 182 + ], + "type": "inline_equation", + "content": "71.89\\%" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 191, + 504, + 225 + ], + "lines": [ + { + "bbox": [ + 104, + 191, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 504, + 225 + ], + "type": "text", + "content": "Figure 2: The convergence process of ResNet-50 and VGG-19 on ImageNet. During the first 50 epochs, shallower layers converge much faster than deeper layers. After the learning rate decays at the 50th epoch, parameters of deeper layers accelerate to move to their convergence points." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "content": "Clearly, these results cannot reconcile with the previous view that larger gradients bring a higher learning speed for deeper layers, at least for the DNNs used in this work. Instead, from the optimization point of view, the parameters of shallower layers are learning faster to converge." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 287, + 397, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 287, + 397, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 397, + 298 + ], + "type": "text", + "content": "3.2 LAYER CONVERGENCE BIAS IN CONVOLUTIONAL NETWORKS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 308, + 504, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 343 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 343 + ], + "type": "text", + "content": "Real-world datasets are very different from the synthetic data used in our previous experiments. In order to utilize the layer convergence bias to understand and better improve DNNs in real applications, it is important to verify whether the layer convergence bias holds for CNNs on images." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 347, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 504, + 392 + ], + "type": "text", + "content": "In the following experiments, we examine the layer-wise convergence process on ImageNet (Rusakovsky et al., 2015) dataset with both ResNet-50 (He et al., 2016) and VGG-19 (Simonyan & Zisserman, 2014). We train the CNNs for 120 epochs with learning rate decay at the 50th epoch " + }, + { + "bbox": [ + 104, + 347, + 504, + 392 + ], + "type": "inline_equation", + "content": "(0.1\\rightarrow 0.01)" + }, + { + "bbox": [ + 104, + 347, + 504, + 392 + ], + "type": "text", + "content": " and the 100th epoch " + }, + { + "bbox": [ + 104, + 347, + 504, + 392 + ], + "type": "inline_equation", + "content": "(0.01\\to 0.001)" + }, + { + "bbox": [ + 104, + 347, + 504, + 392 + ], + "type": "text", + "content": ". The training processes are shown in Fig. 2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 397, + 504, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 471 + ], + "type": "text", + "content": "For ResNet-50, we visualize the learning process of the first convolutional layer and its subsequent four stages. One can easily observe that at the beginning of training, the shallower layers converge much faster than the deeper layers (" + }, + { + "bbox": [ + 104, + 397, + 504, + 471 + ], + "type": "inline_equation", + "content": "C_{Stage1}^{(t_0,t_{20})} \\approx 3C_{Stage4}^{(t_0,t_{20})}" + }, + { + "bbox": [ + 104, + 397, + 504, + 471 + ], + "type": "text", + "content": "). However, after the learning rate decays at the 50th epoch, deeper layers begin to learn effectively and achieve a higher convergence rate than the shallower layers (" + }, + { + "bbox": [ + 104, + 397, + 504, + 471 + ], + "type": "inline_equation", + "content": "C_{Stage1}^{(t_{50},t_{60})} \\approx 0.5C_{Stage4}^{(t_{50},t_{60})}" + }, + { + "bbox": [ + 104, + 397, + 504, + 471 + ], + "type": "text", + "content": "). We conjecture that the initial learning rate is too large for the deeper layers to learn." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 476, + 504, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 504, + 545 + ], + "type": "text", + "content": "For VGG-19, we visualize its 1st, 5th, 9th, 13th, and 17th layers. This network shows a more significant convergence difference between layers than ResNet-50. At the first training stage with the initial learning rate, " + }, + { + "bbox": [ + 104, + 476, + 504, + 545 + ], + "type": "inline_equation", + "content": "\\| \\theta_l^{(t_5)} - \\theta_l^*\\| >\\| \\theta_l^{(t_0)} - \\theta_l^*\\|" + }, + { + "bbox": [ + 104, + 476, + 504, + 545 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 476, + 504, + 545 + ], + "type": "inline_equation", + "content": "l\\in \\{5,9,13,17\\}" + }, + { + "bbox": [ + 104, + 476, + 504, + 545 + ], + "type": "text", + "content": ", which means that all layers but the first one even slightly diverge. Usually, the divergence appears when the learning rate is too large. This phenomenon confirms that the deeper layers cannot effectively learn with the large learning rate at the beginning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 550, + 504, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 504, + 585 + ], + "type": "text", + "content": "The experiments of FCNNs and CNNs verify that layer convergence bias is a common phenomenon for DNNs. In Section 5 and Appendix A.3, A.4, we discuss the factors that would affect the phenomenon, and some in-depth findings they reveal." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 601, + 384, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 384, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 384, + 613 + ], + "type": "text", + "content": "4 MECHANISM BEHIND LAYER CONVERGENCE BIAS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 627, + 504, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 504, + 661 + ], + "type": "text", + "content": "So far, our investigation shows that the seemingly-right perspective for linking the layer-wise gradient and convergence rate is tenuous, at best. Both FCNNs and CNNs demonstrate an evident bias that shallower layers learn faster. Can we explain why this is the case?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "type": "text", + "content": "Gradient Predictiveness. Since gradient values cannot determine the convergence rate, we wonder if the directions of the gradients play a more critical role. More chaotic update directions make convergence slower. Here we examine the gradient predictiveness (Santurkar et al., 2018) of different layers. If the gradient behavior is \"predictive\", less change in the gradient directions would appear when 1) the gradients are calculated with different batches of data; 2) the parameters of other layers update. Predictiveness can also be simply understood as the stability of gradient direction." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 127, + 81, + 303, + 163 + ], + "blocks": [ + { + "bbox": [ + 127, + 81, + 303, + 163 + ], + "lines": [ + { + "bbox": [ + 127, + 81, + 303, + 163 + ], + "spans": [ + { + "bbox": [ + 127, + 81, + 303, + 163 + ], + "type": "image", + "image_path": "d8a1116278bfd7475a7dd383c92f1ae3120ea0e0aab2194a5560d1e0174023af.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 163, + 165, + 279, + 175 + ], + "lines": [ + { + "bbox": [ + 163, + 165, + 279, + 175 + ], + "spans": [ + { + "bbox": [ + 163, + 165, + 279, + 175 + ], + "type": "text", + "content": "(a) Gradient predictiveness w.r.t. data" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 306, + 81, + 482, + 163 + ], + "blocks": [ + { + "bbox": [ + 306, + 81, + 482, + 163 + ], + "lines": [ + { + "bbox": [ + 306, + 81, + 482, + 163 + ], + "spans": [ + { + "bbox": [ + 306, + 81, + 482, + 163 + ], + "type": "image", + "image_path": "f49f932ecb10171b0587245b6b5f4bc458ab696742d61826cebd388d3706a4ac.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 330, + 165, + 468, + 175 + ], + "lines": [ + { + "bbox": [ + 330, + 165, + 468, + 175 + ], + "spans": [ + { + "bbox": [ + 330, + 165, + 468, + 175 + ], + "type": "text", + "content": "(b) Gradient predictiveness w.r.t. parameters" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 185, + 504, + 209 + ], + "lines": [ + { + "bbox": [ + 104, + 185, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 185, + 504, + 209 + ], + "type": "text", + "content": "Figure 3: The gradient predictiveness of shallower and deeper layers of FCNN. The learning rate decreases from 0.1 to 0.01 at Epoch 150." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "content": "Definition 4.1 Let " + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "inline_equation", + "content": "(x^{(t)},y^{(t)})" + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "content": " be a batch of input-label pairs for the DNN to train at time " + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "inline_equation", + "content": "(x^{\\prime (t)},y^{\\prime (t)})" + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "content": " be another batch of data. We define the gradient predictiveness of the " + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "content": "th layer at time " + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "content": " w.r.t. data as the cosine similarity " + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "inline_equation", + "content": "\\begin{array}{r}\\operatorname {sim}(G_{l,t},G_{l,t}^{\\prime}) = \\frac{\\|G_{l,t}G_{l,t}^{\\prime}\\|}{\\|G_{l,t}\\|\\|G_{l,t}^{\\prime}\\|}\\in [-1,1] \\end{array}" + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "content": " . Likewise, the gradient predictiveness w.r.t. parameters is defined as " + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "inline_equation", + "content": "\\operatorname {sim}(G_{l,t},G_{l,t}^{\\prime \\prime})" + }, + { + "bbox": [ + 104, + 228, + 506, + 285 + ], + "type": "text", + "content": " , where" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 171, + 291, + 437, + 352 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 291, + 437, + 352 + ], + "spans": [ + { + "bbox": [ + 171, + 291, + 437, + 352 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} G _ {l, t} = \\nabla_ {\\theta_ {l} ^ {(t)}} L \\left(\\theta_ {1} ^ {(t)}, \\dots , \\theta_ {L} ^ {(t)}; x ^ {(t)}, y ^ {(t)}\\right) \\\\ G _ {l, t} ^ {\\prime} = \\nabla_ {\\theta_ {l} ^ {(t)}} L (\\theta_ {1} ^ {(t)}, \\dots , \\theta_ {L} ^ {(t)}; x ^ {\\prime (t)}, y ^ {\\prime (t)}) \\\\ G _ {l, t} ^ {\\prime \\prime} = \\nabla_ {\\theta_ {l} ^ {(t)}} L (\\theta_ {1} ^ {(t + 1)}, \\dots , \\theta_ {l - 1} ^ {(t + 1)}, \\theta_ {l} ^ {(t)}, \\theta_ {l + 1} ^ {(t + 1)}, \\dots , \\theta_ {L} ^ {(t + 1)}; x ^ {(t)}, y ^ {(t)}) \\\\ \\end{array}", + "image_path": "d82f52ebd532e4ba4f29a20fe9edb903fbc6ae3358662f727f82ef6ef8bcec24.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "inline_equation", + "content": "G_{l,t}" + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "text", + "content": " corresponds to the gradient of " + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\theta_l^{(t)}" + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "inline_equation", + "content": "G_{l,t}^{\\prime}" + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "text", + "content": " is the gradient of this layer with another batch of data, while " + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "inline_equation", + "content": "G_{l,t}^{\\prime \\prime}" + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "text", + "content": " means the gradient after all the other layers have updated to new values. Therefore, " + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "inline_equation", + "content": "sim(G_{l,t},G_{l,t}^{\\prime})" + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "text", + "content": " indicates the stability of gradients with different data batches. " + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "inline_equation", + "content": "sim(G_{l,t},G_{l,t}^{\\prime \\prime})" + }, + { + "bbox": [ + 104, + 358, + 504, + 431 + ], + "type": "text", + "content": " reflects whether the currently estimated gradient is in a consistent decreasing direction when the loss landscape is affected by the updating of other layers' parameters. The gradient predictiveness during training is shown in Fig. 3, where Res-Block 1 has more predictive gradients than Res-Block 4." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "text", + "content": "Visualizing the Loss Landscapes. We are curious about why gradients for deeper layers have poorer predictiveness. A hypothesis is that the loss landscapes for deeper layers are more rugged, making the parameters fluctuate more. A straightforward method to validate this hypothesis is plotting the loss landscapes for the parameters. To do this for a particular layer " + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "text", + "content": ", one can choose a central point " + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "inline_equation", + "content": "\\theta_{l}^{*}" + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "text", + "content": " and two direction vectors " + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "inline_equation", + "content": "d_{l,1}" + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "inline_equation", + "content": "d_{l,2}" + }, + { + "bbox": [ + 104, + 436, + 504, + 492 + ], + "type": "text", + "content": ". Then the loss landscape can be drawn with" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 228, + 497, + 381, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 497, + 381, + 510 + ], + "spans": [ + { + "bbox": [ + 228, + 497, + 381, + 510 + ], + "type": "interline_equation", + "content": "f \\left(\\beta_ {1}, \\beta_ {2}\\right) = L \\left(\\theta_ {l} ^ {*} + \\beta_ {1} d _ {l, 1} + \\beta_ {2} d _ {l, 2}\\right)", + "image_path": "44b56cbd403c6c2c225b8921d00ce80bfeed751862b1db3c68de2c27928b38f7.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": "in the 3D space with " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\beta_{1},\\beta_{2}" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": " forming a simplified parameter space. In this work, we generate random Gaussian directions for different layers, and normalize them to obtain the same norm of the corresponding layer. Specifically, we make the replacement " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "d_{l}\\gets \\frac{d_{l}}{\\|d_{l}\\|}\\| \\theta_{l}^{*}\\|" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": " for a fully connected layer. For a convolutional layer, we use filter-wise normalization " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "d_l^k\\gets \\frac{d_l^k}{\\|d_l^k\\|}\\| \\theta_l^{k*}\\|" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": " as in (Li et al., 2018), where " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "d_l^k" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": "th filter of the " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": "th layer. We set both " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\beta_{1}" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\beta_{2}" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": " in the domain of " + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "inline_equation", + "content": "[-1,1]" + }, + { + "bbox": [ + 104, + 520, + 504, + 590 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 594, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 651 + ], + "type": "text", + "content": "**Landscapes for FCNN.** The loss landscapes for four residual blocks of the FCNN are shown in Fig. 4. For the shallower blocks, the surfaces are flatter near the minimizer, meaning that the gradient magnitudes may be small. However, small gradients do not necessarily lead to slow learning speed in this case. Combined with the gradient predictiveness discussed above, a flatter loss landscape may lead to more consistent gradient directions, making the learning more smooth." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "Landscapes for CNNs. The loss landscapes for ResNet-50 and VGG-19 on ImageNet are shown in Fig. 5. It is interesting that deep convolutional networks with/without residual connections present totally different loss landscapes. For ResNet-50, its landscapes near the convergence point " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\theta_l^*" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " are smooth and nearly convex, making the neural network easier to train. On the contrary, VGG-19 has much more shattered landscapes, the initial iterations probably lie in the chaotic regions, prohibiting its training (Balduzzi et al., 2017). This may explain the much less efficient convergence towards the optimal point for VGG than ResNet at the initial phase (Fig. 2)." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 79, + 200, + 148 + ], + "blocks": [ + { + "bbox": [ + 109, + 79, + 200, + 148 + ], + "lines": [ + { + "bbox": [ + 109, + 79, + 200, + 148 + ], + "spans": [ + { + "bbox": [ + 109, + 79, + 200, + 148 + ], + "type": "image", + "image_path": "dcf9e20c6e81e7c73e4b19b4206b4afdb783893a1e47d077efe5cca2fb4b1872.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 149, + 177, + 158 + ], + "lines": [ + { + "bbox": [ + 130, + 149, + 177, + 158 + ], + "spans": [ + { + "bbox": [ + 130, + 149, + 177, + 158 + ], + "type": "text", + "content": "(a) Res-Block 1" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 210, + 79, + 301, + 147 + ], + "blocks": [ + { + "bbox": [ + 210, + 79, + 301, + 147 + ], + "lines": [ + { + "bbox": [ + 210, + 79, + 301, + 147 + ], + "spans": [ + { + "bbox": [ + 210, + 79, + 301, + 147 + ], + "type": "image", + "image_path": "ff6a098118a0a6866965702a9e436500e7a6d9c8c802352f9ca8ab635e08db12.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 234, + 149, + 281, + 158 + ], + "lines": [ + { + "bbox": [ + 234, + 149, + 281, + 158 + ], + "spans": [ + { + "bbox": [ + 234, + 149, + 281, + 158 + ], + "type": "text", + "content": "(b) Res-Block 2" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 313, + 79, + 403, + 147 + ], + "blocks": [ + { + "bbox": [ + 313, + 79, + 403, + 147 + ], + "lines": [ + { + "bbox": [ + 313, + 79, + 403, + 147 + ], + "spans": [ + { + "bbox": [ + 313, + 79, + 403, + 147 + ], + "type": "image", + "image_path": "1787402fa4768622d7529fd2645f10d7054d15e5be80417749af249eb960c844.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 149, + 383, + 158 + ], + "lines": [ + { + "bbox": [ + 336, + 149, + 383, + 158 + ], + "spans": [ + { + "bbox": [ + 336, + 149, + 383, + 158 + ], + "type": "text", + "content": "(c) Res-Block 3" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 414, + 80, + 504, + 147 + ], + "blocks": [ + { + "bbox": [ + 414, + 80, + 504, + 147 + ], + "lines": [ + { + "bbox": [ + 414, + 80, + 504, + 147 + ], + "spans": [ + { + "bbox": [ + 414, + 80, + 504, + 147 + ], + "type": "image", + "image_path": "465dae870d469c4cd45fee3ef006b75e4bd48370444c96442e2dc92599785c90.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 149, + 482, + 158 + ], + "lines": [ + { + "bbox": [ + 436, + 149, + 482, + 158 + ], + "spans": [ + { + "bbox": [ + 436, + 149, + 482, + 158 + ], + "type": "text", + "content": "(d) Res-Block 4" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 106, + 201, + 202, + 271 + ], + "blocks": [ + { + "bbox": [ + 104, + 167, + 504, + 190 + ], + "lines": [ + { + "bbox": [ + 104, + 167, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 504, + 190 + ], + "type": "text", + "content": "Figure 4: The loss landscapes of different layers of FCNN. Deeper layers are optimized on more rugged landscapes, slowing down the learning process." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 201, + 202, + 271 + ], + "lines": [ + { + "bbox": [ + 106, + 201, + 202, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 201, + 202, + 271 + ], + "type": "image", + "image_path": "eec5cf65633ddf56eafd0ca7308921055aeac035f68d35a2c9b618030e483e68.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 275, + 196, + 285 + ], + "lines": [ + { + "bbox": [ + 113, + 275, + 196, + 285 + ], + "spans": [ + { + "bbox": [ + 113, + 275, + 196, + 285 + ], + "type": "text", + "content": "(a) ResNet-50 Res-Block 1" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "type": "text", + "content": "Figure 5: The loss landscapes of different layers of ResNet-50 (a,b) and VGG-19 (c,d) on ImageNet. The shallower layers for both networks have flatter minima, making them converge faster than the deeper layers. The plots for all layers can be found in Appendix A.5." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 206, + 201, + 302, + 271 + ], + "blocks": [ + { + "bbox": [ + 206, + 201, + 302, + 271 + ], + "lines": [ + { + "bbox": [ + 206, + 201, + 302, + 271 + ], + "spans": [ + { + "bbox": [ + 206, + 201, + 302, + 271 + ], + "type": "image", + "image_path": "279c1246dcbe7bca3a88308d661c3f202523a38c709bd62867e8e958c5143087.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 212, + 275, + 297, + 285 + ], + "lines": [ + { + "bbox": [ + 212, + 275, + 297, + 285 + ], + "spans": [ + { + "bbox": [ + 212, + 275, + 297, + 285 + ], + "type": "text", + "content": "(b) ResNet-50 Res-Block 4" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 308, + 202, + 403, + 271 + ], + "blocks": [ + { + "bbox": [ + 308, + 202, + 403, + 271 + ], + "lines": [ + { + "bbox": [ + 308, + 202, + 403, + 271 + ], + "spans": [ + { + "bbox": [ + 308, + 202, + 403, + 271 + ], + "type": "image", + "image_path": "3ae53b8f727a9a11ba44cd6c77636a06eed18e34892842dc485e7307f69fe3c3.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 276, + 387, + 285 + ], + "lines": [ + { + "bbox": [ + 324, + 276, + 387, + 285 + ], + "spans": [ + { + "bbox": [ + 324, + 276, + 387, + 285 + ], + "type": "text", + "content": "(c) VGG-19 Layer 1" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 408, + 202, + 503, + 271 + ], + "blocks": [ + { + "bbox": [ + 408, + 202, + 503, + 271 + ], + "lines": [ + { + "bbox": [ + 408, + 202, + 503, + 271 + ], + "spans": [ + { + "bbox": [ + 408, + 202, + 503, + 271 + ], + "type": "image", + "image_path": "4d6c447583c0f747b2ca42a90a6cd78058ba671f16c8032956bced0511abb923.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 425, + 276, + 492, + 285 + ], + "lines": [ + { + "bbox": [ + 425, + 276, + 492, + 285 + ], + "spans": [ + { + "bbox": [ + 425, + 276, + 492, + 285 + ], + "type": "text", + "content": "(d) VGG-19 Layer 13" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 349, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 349, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 504, + 472 + ], + "type": "text", + "content": "Comparing different layers in the CNNs, the answer for layer convergence bias becomes clearer. The key difference between different layers' loss landscapes of ResNet-50 is the sharpness of the local minima (Fig. 5 (a,b)). We conjecture it is because of a well-known fact that the shallower layers of CNNs tend to learn general features which are applicable to various datasets and tasks, while the deeper layers usually learn task-specific features (Yosinski et al., 2014). Before our work, (Zeiler & Fergus, 2014) also revealed that the general features in a five-layer CNN stabilized faster than the specific features. Since the general features are more evenly distributed, they usually cause less fluctuation for training, leading to flatter optima. Theoretically, flatter minimizers are easier to be found by SGD optimizers (Pan et al., 2020). For VGG-19, its shallower and deeper layers also have flatter and sharper minima (Fig. 5 (c,d)), respectively. The shattered loss landscape for its deeper layers may also explain its inefficient learning process with a large learning rate (Fig. 2 (b))." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": "Here we summarize the mechanism behind layer convergence bias: the parameters of shallower layers are easier to optimize due to their flatter loss landscapes. At a higher level, shallower layers learn general features, which are usually easier." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 525, + 428, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 428, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 428, + 538 + ], + "type": "text", + "content": "5 DEeper LAYERS FIT THE HIGH-FREQUENCY COMPONENTS" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 550, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 504, + 628 + ], + "type": "text", + "content": "Recent advances in the learning process of DNNs (Rahaman et al., 2019; Ronen et al., 2019; Xu & Zhou, 2021) revealed that the low-frequency components of the target function are fitted much faster than the high-frequency components. There is a natural question about whether there is some inherent link between layer convergence bias and this result. In this section, we investigate the answer, and surprisingly find that: the low-frequency parts are usually fitted by the shallower layers, while the remaining higher frequencies are mainly learned by the deeper layers. It provides us with an alternative perspective to understand the layer convergence bias." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "text", + "content": "The Correspondence for FCNN. With the residual structures, we can straightforwardly visualize what each block of a FCNN learns. Considering the FCNN with one input layer " + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "inline_equation", + "content": "z_0 = T_0(x) : \\mathbb{R}^1 \\to \\mathbb{R}^{128}" + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "text", + "content": ", four residual blocks " + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "inline_equation", + "content": "z_l = T_l'(z_{l-1}) = T_l(z_{l-1}) + z_{l-1} : \\mathbb{R}^{128} \\to \\mathbb{R}^{128}" + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "inline_equation", + "content": "l \\in \\{1, 2, 3, 4\\}" + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "text", + "content": ", and an output layer " + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "inline_equation", + "content": "y = T_5(z_4) : \\mathbb{R}^{128} \\to \\mathbb{R}^1" + }, + { + "bbox": [ + 104, + 632, + 504, + 677 + ], + "type": "text", + "content": ". The whole network can be expressed as" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 681, + 494, + 695 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 681, + 494, + 695 + ], + "spans": [ + { + "bbox": [ + 114, + 681, + 494, + 695 + ], + "type": "interline_equation", + "content": "y = T _ {5} \\left(z _ {1} + T _ {2} \\left(z _ {1}\\right) + T _ {3} \\left(z _ {2}\\right) + T _ {4} \\left(z _ {3}\\right)\\right) = T _ {5} \\left(z _ {1}\\right) + T _ {5} \\left(T _ {2} \\left(z _ {1}\\right)\\right) + T _ {5} \\left(T _ {3} \\left(z _ {2}\\right)\\right) + T _ {5} \\left(T _ {4} \\left(z _ {3}\\right)\\right)", + "image_path": "e32594909d7259f0a56affa1b95871eb2b213dd6c20ad1262f7e52203d2f2736.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "content": "if the output layer " + }, + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "inline_equation", + "content": "T_{5}" + }, + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "content": " is a linear transformation. The fitting results for each layer are shown in Fig. 6. It can be seen that the deeper layers tend to fit the more complex components of the target function " + }, + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "inline_equation", + "content": "y = \\sin (x) + \\frac{1}{3}\\sin (3x) + \\frac{1}{10}\\sin (10x) + \\frac{1}{30}\\sin (30x)" + }, + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "content": ". Besides the curvature, the fitted functions" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 79, + 182, + 134 + ], + "blocks": [ + { + "bbox": [ + 108, + 79, + 182, + 134 + ], + "lines": [ + { + "bbox": [ + 108, + 79, + 182, + 134 + ], + "spans": [ + { + "bbox": [ + 108, + 79, + 182, + 134 + ], + "type": "image", + "image_path": "bb78dfff1b269ef8e0ea2ec041cad774391c664cadbe592299e2d6fde9fb54c6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 139, + 150, + 148 + ], + "lines": [ + { + "bbox": [ + 143, + 139, + 150, + 148 + ], + "spans": [ + { + "bbox": [ + 143, + 139, + 150, + 148 + ], + "type": "text", + "content": "y" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 184, + 80, + 263, + 135 + ], + "blocks": [ + { + "bbox": [ + 184, + 80, + 263, + 135 + ], + "lines": [ + { + "bbox": [ + 184, + 80, + 263, + 135 + ], + "spans": [ + { + "bbox": [ + 184, + 80, + 263, + 135 + ], + "type": "image", + "image_path": "30d9145078e0f083998f1660437ed0ce09d7cb65f2eb734b7994195063078aa4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 138, + 240, + 148 + ], + "lines": [ + { + "bbox": [ + 216, + 138, + 240, + 148 + ], + "spans": [ + { + "bbox": [ + 216, + 138, + 240, + 148 + ], + "type": "inline_equation", + "content": "T_{5}(z_{1})" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 159, + 504, + 182 + ], + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 182 + ], + "type": "text", + "content": "Figure 6: The visualization of what each residual block of the FCNN learns. From the first to the fourth block, the fitted function becomes more complex with smaller amplitude." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 264, + 81, + 342, + 135 + ], + "blocks": [ + { + "bbox": [ + 264, + 81, + 342, + 135 + ], + "lines": [ + { + "bbox": [ + 264, + 81, + 342, + 135 + ], + "spans": [ + { + "bbox": [ + 264, + 81, + 342, + 135 + ], + "type": "image", + "image_path": "d6ef0e059f3f0dc1645ae9f7607ec365446cff1498dff72ba12042b3787d4a0d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 287, + 137, + 326, + 148 + ], + "lines": [ + { + "bbox": [ + 287, + 137, + 326, + 148 + ], + "spans": [ + { + "bbox": [ + 287, + 137, + 326, + 148 + ], + "type": "inline_equation", + "content": "T_{5}(T_{2}(z_{1}))" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 343, + 81, + 421, + 135 + ], + "blocks": [ + { + "bbox": [ + 343, + 81, + 421, + 135 + ], + "lines": [ + { + "bbox": [ + 343, + 81, + 421, + 135 + ], + "spans": [ + { + "bbox": [ + 343, + 81, + 421, + 135 + ], + "type": "image", + "image_path": "a30a74cbbe2923ef6bbaa39191a64525217c3f262158cad296ab2fe71c630c51.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 367, + 137, + 406, + 148 + ], + "lines": [ + { + "bbox": [ + 367, + 137, + 406, + 148 + ], + "spans": [ + { + "bbox": [ + 367, + 137, + 406, + 148 + ], + "type": "inline_equation", + "content": "T_{5}(T_{3}(z_{2}))" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 423, + 81, + 502, + 135 + ], + "blocks": [ + { + "bbox": [ + 423, + 81, + 502, + 135 + ], + "lines": [ + { + "bbox": [ + 423, + 81, + 502, + 135 + ], + "spans": [ + { + "bbox": [ + 423, + 81, + 502, + 135 + ], + "type": "image", + "image_path": "25db2ae0c1966056c496cf9e6d63337ebe2588d47d0006f42ed1f6b9915cde76.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 451, + 138, + 489, + 148 + ], + "lines": [ + { + "bbox": [ + 451, + 138, + 489, + 148 + ], + "spans": [ + { + "bbox": [ + 451, + 138, + 489, + 148 + ], + "type": "inline_equation", + "content": "T_{5}(T_{4}(z_{3}))" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 146, + 209, + 301, + 284 + ], + "blocks": [ + { + "bbox": [ + 146, + 209, + 301, + 284 + ], + "lines": [ + { + "bbox": [ + 146, + 209, + 301, + 284 + ], + "spans": [ + { + "bbox": [ + 146, + 209, + 301, + 284 + ], + "type": "image", + "image_path": "a1a9026ccad7593f40929b85467635e4d9c29f8acc3f9ec6c9a14cdb9e90f51b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 288, + 249, + 297 + ], + "lines": [ + { + "bbox": [ + 211, + 288, + 249, + 297 + ], + "spans": [ + { + "bbox": [ + 211, + 288, + 249, + 297 + ], + "type": "text", + "content": "(a) ResNet-50" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "text", + "content": "Figure 7: The visualization of response frequencies for CNNs. As the training goes on, deeper layers become more sensitive to perturbations, indicating that they have higher response frequencies." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 309, + 209, + 463, + 284 + ], + "blocks": [ + { + "bbox": [ + 309, + 209, + 463, + 284 + ], + "lines": [ + { + "bbox": [ + 309, + 209, + 463, + 284 + ], + "spans": [ + { + "bbox": [ + 309, + 209, + 463, + 284 + ], + "type": "image", + "image_path": "79e1ba213d462d7a60bd8a5e3e789a4ff15bf0cfd18dcfb9ce196cc6f8b6035b.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 288, + 412, + 297 + ], + "lines": [ + { + "bbox": [ + 378, + 288, + 412, + 297 + ], + "spans": [ + { + "bbox": [ + 378, + 288, + 412, + 297 + ], + "type": "text", + "content": "(b) VGG-19" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 351, + 504, + 386 + ], + "type": "text", + "content": "are also consistent with the amplitudes of the components. Specifically, the ranges of the four fitted functions are 2.3, 0.7, 0.5, and 0.06, which are similar to the four components. This result further confirms the relationship between layers and frequencies." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "text", + "content": "The Correspondence for CNNs. For CNNs, we verify their layer-frequency correspondence through the response frequency (Xu et al., 2019). In a nutshell, if an input-output mapping " + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "text", + "content": " possesses significant high frequencies, then a small change in its input induces a large change in the output. We generate standard Gaussian-distributed input " + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "text", + "content": " for different residual blocks of ResNet-50 and different layers of VGG-19. At the same time, small Gaussian perturbation " + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "inline_equation", + "content": "\\Delta x" + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "text", + "content": " is added to the input. A larger change " + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "inline_equation", + "content": "\\Delta y" + }, + { + "bbox": [ + 104, + 390, + 506, + 523 + ], + "type": "text", + "content": " of the layer output means the layer handles higher frequencies. The response frequencies are shown in Fig. 7. At the first 5 epochs of training on ImageNet, different layers for both ResNet-50 and VGG-19 do not show significantly different response frequencies. But after about ten epochs, the response frequencies for deeper layers (e.g., stage 4 for ResNet-50, layer 13 for VGG-19) increase while the shallower layers show lower response frequencies. Therefore, we conclude that the layer-frequency correspondence also holds for CNNs. In addition, it is not an innate nature of the layers, but a result of the training process." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 528, + 506, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 683 + ], + "type": "text", + "content": "How the target frequency affects layer convergence bias? To demonstrate the effect of layer-frequency correspondence on the layer convergence bias, we try fitting simpler targets with less high-frequency components, and see what would happen to the layer-wise convergence rate of FCNN. In Fig. 8 (a-d), we only keep several lowest frequencies of the target, e.g., the target function " + }, + { + "bbox": [ + 104, + 528, + 506, + 683 + ], + "type": "inline_equation", + "content": "y = \\sin(x)" + }, + { + "bbox": [ + 104, + 528, + 506, + 683 + ], + "type": "text", + "content": " is named \"Complexity=1\", and " + }, + { + "bbox": [ + 104, + 528, + 506, + 683 + ], + "type": "inline_equation", + "content": "y = \\sin(x) + \\frac{1}{3} \\sin(3x)" + }, + { + "bbox": [ + 104, + 528, + 506, + 683 + ], + "type": "text", + "content": " is named \"Complexity=2\", etc. After discarding more and more high-frequency components, the deeper layers converge faster and faster. In this case, the layer convergence bias does not strictly hold anymore. In Fig. 8 (b), the Res-Block 4 converges faster than Res-Block 3 after the 5th epoch. In Fig. 8 (c), the Res-Block 4 converges with a similar speed as Res-Block 2, while the Res-Block 3 even learns faster than Res-Block 2. It seems that removing the high-frequency component that corresponds to a deep layer can effectively accelerate its training. For CNNs, we also observe similar phenomena (Fig. 8 (e-h)). On simpler targets (e.g., CIFAR 10), the deeper layers converge faster than on more complex targets (e.g., CIFAR100). An implication of this result is that the data complexity may be too low for the model. In practice, CIFAR datasets only need ResNet-18 to fit well (Wu et al., 2020)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "In fact, (Rahaman et al., 2019) had shown that different layers have some links to different frequencies, but the authors did not provide further insight for this phenomenon. This work verifies the underlying relationship between layers and fitting frequencies, and establishes a connection for this relationship to the layer convergence bias." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 222, + 162 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 222, + 162 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 222, + 162 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 222, + 162 + ], + "type": "image", + "image_path": "c2703188bafc4178271ae4735f09e3fbe016ef647c53c152323a69e9d1cd12f4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 163, + 211, + 174 + ], + "lines": [ + { + "bbox": [ + 135, + 163, + 211, + 174 + ], + "spans": [ + { + "bbox": [ + 135, + 163, + 211, + 174 + ], + "type": "text", + "content": "(a) FCNN Complexity=4" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 224, + 80, + 315, + 162 + ], + "blocks": [ + { + "bbox": [ + 224, + 80, + 315, + 162 + ], + "lines": [ + { + "bbox": [ + 224, + 80, + 315, + 162 + ], + "spans": [ + { + "bbox": [ + 224, + 80, + 315, + 162 + ], + "type": "image", + "image_path": "d49a2ff4a53e499804eb29b593fb5e0e3e0f37fc6cfc4cc3d3fe253fb2182f8b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 231, + 164, + 307, + 174 + ], + "lines": [ + { + "bbox": [ + 231, + 164, + 307, + 174 + ], + "spans": [ + { + "bbox": [ + 231, + 164, + 307, + 174 + ], + "type": "text", + "content": "(b) FCNN Complexity=3" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 317, + 80, + 408, + 162 + ], + "blocks": [ + { + "bbox": [ + 317, + 80, + 408, + 162 + ], + "lines": [ + { + "bbox": [ + 317, + 80, + 408, + 162 + ], + "spans": [ + { + "bbox": [ + 317, + 80, + 408, + 162 + ], + "type": "image", + "image_path": "5c16f8c07486995949efa3f7a87b00691ce5eee35754ebb1625df16072e9dfeb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 164, + 398, + 174 + ], + "lines": [ + { + "bbox": [ + 323, + 164, + 398, + 174 + ], + "spans": [ + { + "bbox": [ + 323, + 164, + 398, + 174 + ], + "type": "text", + "content": "(c) FCNN Complexity=2" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 410, + 80, + 503, + 162 + ], + "blocks": [ + { + "bbox": [ + 410, + 80, + 503, + 162 + ], + "lines": [ + { + "bbox": [ + 410, + 80, + 503, + 162 + ], + "spans": [ + { + "bbox": [ + 410, + 80, + 503, + 162 + ], + "type": "image", + "image_path": "81e9cbf6884ab5717b67482b33b04616ceaebe7f6af14870434a4e64affab2a3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 417, + 163, + 493, + 174 + ], + "lines": [ + { + "bbox": [ + 417, + 163, + 493, + 174 + ], + "spans": [ + { + "bbox": [ + 417, + 163, + 493, + 174 + ], + "type": "text", + "content": "(d) FCNN Complexity " + }, + { + "bbox": [ + 417, + 163, + 493, + 174 + ], + "type": "inline_equation", + "content": "= 1" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 106, + 185, + 222, + 266 + ], + "blocks": [ + { + "bbox": [ + 106, + 185, + 222, + 266 + ], + "lines": [ + { + "bbox": [ + 106, + 185, + 222, + 266 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 222, + 266 + ], + "type": "image", + "image_path": "7ec69d55de3b60c5c24832ef1d9a12997306db37fae0246e09c0eaf129ea5c42.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 270, + 213, + 280 + ], + "lines": [ + { + "bbox": [ + 137, + 270, + 213, + 280 + ], + "spans": [ + { + "bbox": [ + 137, + 270, + 213, + 280 + ], + "type": "text", + "content": "(e) ResNet-50 CIFAR100" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 224, + 185, + 315, + 267 + ], + "blocks": [ + { + "bbox": [ + 224, + 185, + 315, + 267 + ], + "lines": [ + { + "bbox": [ + 224, + 185, + 315, + 267 + ], + "spans": [ + { + "bbox": [ + 224, + 185, + 315, + 267 + ], + "type": "image", + "image_path": "c6ee73b224215a5536d2c53e6ac7f83ef3a1e68516fdd3b1772b11f7f499bb1e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 232, + 270, + 304, + 280 + ], + "lines": [ + { + "bbox": [ + 232, + 270, + 304, + 280 + ], + "spans": [ + { + "bbox": [ + 232, + 270, + 304, + 280 + ], + "type": "text", + "content": "(f) ResNet-50 CIFAR10" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 317, + 185, + 408, + 266 + ], + "blocks": [ + { + "bbox": [ + 317, + 185, + 408, + 266 + ], + "lines": [ + { + "bbox": [ + 317, + 185, + 408, + 266 + ], + "spans": [ + { + "bbox": [ + 317, + 185, + 408, + 266 + ], + "type": "image", + "image_path": "e5c47c8658bab4c2e4e07fda183dc40cb52d0a977494cb3c695f25af9d3165f1.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 270, + 399, + 280 + ], + "lines": [ + { + "bbox": [ + 328, + 270, + 399, + 280 + ], + "spans": [ + { + "bbox": [ + 328, + 270, + 399, + 280 + ], + "type": "text", + "content": "(g) VGG-19 CIFAR100" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 289, + 504, + 335 + ], + "lines": [ + { + "bbox": [ + 104, + 289, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 504, + 335 + ], + "type": "text", + "content": "Figure 8: The convergence curves with different learning target complexities. (a-d): Decreasing target complexities for FCNNs. The deeper layers accelerate more than the shallower ones when high-frequency components are removed. (e-h): For CNNs, the deepest layers (i.e., Stage 4 / Layer 17) learn faster on CIFAR10 than on CIFAR100 while the other layers do not change much." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 410, + 185, + 502, + 266 + ], + "blocks": [ + { + "bbox": [ + 410, + 185, + 502, + 266 + ], + "lines": [ + { + "bbox": [ + 410, + 185, + 502, + 266 + ], + "spans": [ + { + "bbox": [ + 410, + 185, + 502, + 266 + ], + "type": "image", + "image_path": "fd22289d46f02ff40c21c1a9f04c20aa6999d25ce28ec1f07b215a4f637c2573.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 424, + 270, + 492, + 280 + ], + "lines": [ + { + "bbox": [ + 424, + 270, + 492, + 280 + ], + "spans": [ + { + "bbox": [ + 424, + 270, + 492, + 280 + ], + "type": "text", + "content": "(h) VGG-19 CIFAR10" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 357, + 263, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 263, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 263, + 369 + ], + "type": "text", + "content": "6 PRACTICAL SIGNIFICANCE" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 383, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 383, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 383, + 504, + 418 + ], + "type": "text", + "content": "Up to now, we have been analyzing the layer convergence bias from a theoretical perspective. This section discusses its practical use to drive the development of DNN architecture design, and a new explanation for the acceleration effect of transfer learning with the help of layer convergence bias." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 433, + 261, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 433, + 261, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 433, + 261, + 443 + ], + "type": "text", + "content": "6.1 DNN ARCHITECTURE DESIGN" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "type": "text", + "content": "Modern CNN architectures (He et al., 2016) usually contain layers from narrow to wide (e.g., 64 channels of the first layer to 2048 channels of the last layer). From the perspective of computational complexity, the narrower shallower layers make the corresponding large feature maps less computation-consuming. Considering the layer convergence bias, deeper layers with larger capacities are also beneficial for the corresponding high-frequencies to be learned easier. Although this is a common design for CNNs, Transformers (Dosovitskiy et al., 2020) usually apply the same architecture for all encoders. For a vision Transformer with 12 encoders, we use encoders with width " + }, + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "type": "inline_equation", + "content": "2/4/8" + }, + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "type": "text", + "content": " to construct three variants. The variants only differ in the arrangement of different encoders, we use " + }, + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "type": "text", + "content": " to denote the widths, and " + }, + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 454, + 382, + 609 + ], + "type": "text", + "content": " to denote the number of each kind of encoders. The configures are summarized below:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 619, + 368, + 666 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 132, + 619, + 355, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 619, + 355, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 619, + 355, + 632 + ], + "type": "text", + "content": "- deeper encoders wider: " + }, + { + "bbox": [ + 132, + 619, + 355, + 632 + ], + "type": "inline_equation", + "content": "W = (2,4,8)" + }, + { + "bbox": [ + 132, + 619, + 355, + 632 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 619, + 355, + 632 + ], + "type": "inline_equation", + "content": "N = (6,3,3)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 132, + 636, + 342, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 636, + 342, + 649 + ], + "spans": [ + { + "bbox": [ + 132, + 636, + 342, + 649 + ], + "type": "text", + "content": "- vanilla architecture: " + }, + { + "bbox": [ + 132, + 636, + 342, + 649 + ], + "type": "inline_equation", + "content": "W = (4, 4, 4)" + }, + { + "bbox": [ + 132, + 636, + 342, + 649 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 636, + 342, + 649 + ], + "type": "inline_equation", + "content": "N = (4, 4, 4)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 132, + 654, + 368, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 654, + 368, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 654, + 368, + 666 + ], + "type": "text", + "content": "- deeper encoders narrower: " + }, + { + "bbox": [ + 132, + 654, + 368, + 666 + ], + "type": "inline_equation", + "content": "W = (8,4,2)" + }, + { + "bbox": [ + 132, + 654, + 368, + 666 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 654, + 368, + 666 + ], + "type": "inline_equation", + "content": "N = (3,3,6)" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 389, + 468, + 504, + 555 + ], + "blocks": [ + { + "bbox": [ + 389, + 468, + 504, + 555 + ], + "lines": [ + { + "bbox": [ + 389, + 468, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 389, + 468, + 504, + 555 + ], + "type": "image", + "image_path": "67c9db64e8a19b92a2e2742a4ec2cc2a9bfd477ef8ce171a951b58f590686f34.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 387, + 563, + 505, + 597 + ], + "lines": [ + { + "bbox": [ + 387, + 563, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 387, + 563, + 505, + 597 + ], + "type": "text", + "content": "Figure 9: Performance of three variants of ViTs on ImageNet." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "text", + "content": "Fig. 9 shows their performances, with the best accuracy of " + }, + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "inline_equation", + "content": "80.75\\%" + }, + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "inline_equation", + "content": "78.88\\%" + }, + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "inline_equation", + "content": "75.75\\%" + }, + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "text", + "content": " respectively. We find that with the same number of parameters, putting the wider layers deeper results in higher training performance. This finding may serve as an effective way to improve the model capacity. The causal connection between layer complexity distribution and model performance is discussed in Appendix A.6. And layer convergence bias for ViT is analyzed in Appendix A.7." + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 346, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 346, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 346, + 94 + ], + "type": "text", + "content": "6.2 ACCELERATION EFFECT OF TRANSFER LEARNING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 103, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 103, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 103, + 506, + 236 + ], + "type": "text", + "content": "Transfer learning (fine-tuning with the pre-trained models) is a widely-used technique that can accelerate the model convergence (Shao et al., 2018b; a; Liang & Zheng, 2020). We show the layer convergence curves w/o transfer learning on the Flowers dataset (Nilsback & Zisserman, 2006). When training from scratch (Fig. 10 (a)), the shallower layers converge faster so that the deeper layers can extract semantic features based on basic features. Local minima of Stage 4 is sharp in this case. However, with transfer learning (Fig. 10 (b)), deeper layers can directly be built on the pre-trained basic features. The Stage 4 shows a much higher convergence rate among all layers, its loss landscape also becomes flatter. Two observations that are not consistent with layer convergence bias are summarized in the following: 1) the pre-trained shallower layers are nearly optimal, so they don't present fast convergence in transfer learning; 2) although the pre-trained deeper layers are not as optimal as the shallower layers do, their loss landscapes are much flatter than training from scratch, which makes them converge much faster." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 246, + 206, + 318 + ], + "blocks": [ + { + "bbox": [ + 106, + 246, + 206, + 318 + ], + "lines": [ + { + "bbox": [ + 106, + 246, + 206, + 318 + ], + "spans": [ + { + "bbox": [ + 106, + 246, + 206, + 318 + ], + "type": "image", + "image_path": "08d76005d97ea52de82dddccf093f0e6f27f636217cbfa31a4e830f349ae5720.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 321, + 240, + 330 + ], + "lines": [ + { + "bbox": [ + 179, + 321, + 240, + 330 + ], + "spans": [ + { + "bbox": [ + 179, + 321, + 240, + 330 + ], + "type": "text", + "content": "(a) Train from scratch" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 209, + 247, + 303, + 316 + ], + "blocks": [ + { + "bbox": [ + 209, + 247, + 303, + 316 + ], + "lines": [ + { + "bbox": [ + 209, + 247, + 303, + 316 + ], + "spans": [ + { + "bbox": [ + 209, + 247, + 303, + 316 + ], + "type": "image", + "image_path": "485b491ea971b145d122a9e124cb7177b0d69285d2adc5eed481bf5316c090b8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 309, + 247, + 408, + 318 + ], + "blocks": [ + { + "bbox": [ + 309, + 247, + 408, + 318 + ], + "lines": [ + { + "bbox": [ + 309, + 247, + 408, + 318 + ], + "spans": [ + { + "bbox": [ + 309, + 247, + 408, + 318 + ], + "type": "image", + "image_path": "c6ae2dceb0c9621794a2f3e74785b0e7801adbb91d909ca664c3f582778281c3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "lines": [ + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "type": "text", + "content": "Figure 10: Effects of transfer learning on the training process. Left (a,b): The layer convergence process of ResNet-50. Right (a,b): The loss landscapes of Stage 4 w/o transfer learning." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 410, + 247, + 503, + 315 + ], + "blocks": [ + { + "bbox": [ + 410, + 247, + 503, + 315 + ], + "lines": [ + { + "bbox": [ + 410, + 247, + 503, + 315 + ], + "spans": [ + { + "bbox": [ + 410, + 247, + 503, + 315 + ], + "type": "image", + "image_path": "44c1782dcb66f53943d0cf57fa8f4482379eff83247c299544dbcd9351ae8b52.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 386, + 321, + 429, + 330 + ], + "lines": [ + { + "bbox": [ + 386, + 321, + 429, + 330 + ], + "spans": [ + { + "bbox": [ + 386, + 321, + 429, + 330 + ], + "type": "text", + "content": "(b) Fine-tuning" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 388, + 212, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 388, + 212, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 212, + 399 + ], + "type": "text", + "content": "7 RELATED WORK" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 413, + 506, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 413, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 104, + 413, + 506, + 546 + ], + "type": "text", + "content": "DNNs with gradient-based training show great potential to fit targets with arbitrary complexities (Hornik et al., 1989; Leshno et al., 1993), given sufficient width. With the advances in the last decade to verify the capability of the depth of universal approximators (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Lu et al., 2017), practitioners tried to reduce the width of neural networks by adding more layers (Simonyan & Zisserman, 2014; He et al., 2016; Huang et al., 2017). We are also inspired by research on local properties (sharpness/flatness) of loss functions at minima (Keskar et al., 2017; Li et al., 2018) and relationship between convergence rate and generalization (Hardt et al., 2016). Furthermore, LARS optimizer (You et al., 2017) shares some valuable insights on layer convergence, which are discussed in Appendix A.8. In practice, the idea of layer convergence bias had been intuitively applied to accelerate DNN training (Huang et al., 2016; Brock et al., 2017) and mitigating catastrophic forgetting (Ramasesh et al., 2020). The arrangement schemes of CNN/Transformer blocks were explored by (Liu et al., 2022b;a)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 563, + 195, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 195, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 195, + 574 + ], + "type": "text", + "content": "8 CONCLUSION" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": "In this work, we empirically studied the phenomenon that the shallower layers of DNNs tend to converge faster than the deeper layers, called layer convergence bias. This phenomenon is a natural preference in the process of DNN training: the shallower layers are responsible for extracting low-level features which are more evenly distributed and easier to learn, while deeper layers refine these features to do specific tasks. This makes the loss landscapes for shallower layers flatter than the landscapes for deeper layers, making shallower layers converge faster. In addition, this work established a connection between layers and learned frequencies. By showing deeper layers tend to fit the high-frequency components in the target function, we can understand the layer convergence bias from another perspective. We finally took DNN architecture design and transfer learning as two examples to show how theoretical findings in this work can shed light on the practical applications of deep learning. For progress to continue, a more in-depth understanding of the properties of neural networks is needed. We also hope that the layer convergence bias can inspire more practical improvements in the DNNs' architecture design and training schemes." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 218, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 218, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 218, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 504, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 504, + 128 + ], + "type": "text", + "content": "This work was supported by the Lustgarten Foundation for Pancreatic Cancer Research and the McGovern Foundation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 144, + 176, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 144, + 176, + 156 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 176, + 156 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 163, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 163, + 505, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 186 + ], + "type": "text", + "content": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 193, + 504, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 504, + 227 + ], + "type": "text", + "content": "David Balduzzi, Marcus Frean, Lennox Leary, JP Lewis, Kurt Wan-Duo Ma, and Brian McWilliams. The shattered gradients problem: If resnets are the answer, then what is the question? In International Conference on Machine Learning, pp. 342-350. PMLR, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 233, + 504, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 504, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 504, + 267 + ], + "type": "text", + "content": "Andrew Brock, Theodore Lim, James Millar Ritchie, and Nicholas J Weston. Freezeout: Accelerate training by progressively freezing layers. In NIPS 2017 Workshop on Optimization: 10th NIPS Workshop on Optimization for Machine Learning, 2017." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 274, + 504, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 274, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 504, + 297 + ], + "type": "text", + "content": "Olivier Delalleau and Yoshua Bengio. Shallow vs. deep sum-product networks. Advances in neural information processing systems, 24, 2011." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 304, + 504, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 504, + 349 + ], + "type": "text", + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 355, + 504, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 355, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 504, + 379 + ], + "type": "text", + "content": "Ronen Eldan and Ohad Shamir. The power of depth for feedforward neural networks. In Conference on learning theory, pp. 907-940. PMLR, 2016." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 385, + 504, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 385, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 504, + 419 + ], + "type": "text", + "content": "Alex Graves, Abdel-rahman Mohamed, and Geoffrey Hinton. Speech recognition with deep recurrent neural networks. In 2013 IEEE international conference on acoustics, speech and signal processing, pp. 6645-6649. IEEE, 2013." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 426, + 504, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 504, + 449 + ], + "type": "text", + "content": "Moritz Hardt, Ben Recht, and Yoram Singer. Train faster, generalize better: Stability of stochastic gradient descent. In International conference on machine learning, pp. 1225-1234. PMLR, 2016." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 456, + 504, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 504, + 489 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 496, + 504, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 504, + 519 + ], + "type": "text", + "content": "Sepp Hochreiter. Untersuchungen zu dynamischen neuronalen netzen. Diploma, Technische Universität München, 91(1), 1991." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 526, + 504, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 504, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 504, + 549 + ], + "type": "text", + "content": "Sepp Hochreiter, Yoshua Bengio, Paolo Frasconi, Jürgen Schmidhuber, et al. Gradient flow in recurrent nets: the difficulty of learning long-term dependencies, 2001." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 555, + 504, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 504, + 579 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 504, + 579 + ], + "type": "text", + "content": "Kurt Hornik, Maxwell Stinchcombe, and Halbert White. Multilayer feedforward networks are universal approximators. Neural networks, 2(5):359-366, 1989." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 586, + 504, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 504, + 609 + ], + "type": "text", + "content": "Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pp. 646-661. Springer, 2016." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 615, + 504, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 615, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 504, + 650 + ], + "type": "text", + "content": "Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4700-4708, 2017." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 656, + 504, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 504, + 689 + ], + "type": "text", + "content": "Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pp. 448-456. PMLR, 2015." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 696, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 696, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 696, + 504, + 732 + ], + "type": "text", + "content": "John Jumper, Richard Evans, Alexander Pritzel, Tim Green, Michael Figurnov, Olaf Ronneberger, Kathryn Tunyasuvunakool, Russ Bates, Augustin Žídek, Anna Potapenko, et al. Highly accurate protein structure prediction with alphafold. Nature, 596(7873):583-589, 2021." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 733 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Nitish Shirish Keskar, Jorge Nocedal, Ping Tak Peter Tang, Dheevatsa Mudigere, and Mikhail Smelyanskiy. On large-batch training for deep learning: Generalization gap and sharp minima. In 5th International Conference on Learning Representations, ICLR 2017, 2017." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 148 + ], + "type": "text", + "content": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Communications of the ACM, 60(6):84-90, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 153, + 504, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 153, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 106, + 153, + 504, + 187 + ], + "type": "text", + "content": "Moshe Leshno, Vladimir Ya Lin, Allan Pinkus, and Shimon Schocken. Multilayer feedforward networks with a nonpolynomial activation function can approximate any function. Neural networks, 6(6):861-867, 1993." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 194, + 504, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 504, + 219 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 504, + 219 + ], + "type": "text", + "content": "Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "text", + "content": "Gaobo Liang and Lixin Zheng. A transfer learning method with deep residual network for pediatric pneumonia diagnosis. Computer methods and programs in biomedicine, 187:104964, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 256, + 504, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 256, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 107, + 256, + 504, + 291 + ], + "type": "text", + "content": "Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3202-3211, 2022a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 297, + 504, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 297, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 107, + 297, + 504, + 332 + ], + "type": "text", + "content": "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 338, + 504, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 338, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 107, + 338, + 504, + 373 + ], + "type": "text", + "content": "Zhou Lu, Hongming Pu, Feicheng Wang, Zhiqiang Hu, and Liwei Wang. The expressive power of neural networks: A view from the width. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "type": "text", + "content": "Yuri Nesterov. Introductory lectures on convex optimization: A basic course, volume 87. Springer Science & Business Media, 2003." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "type": "text", + "content": "Michael A Nielsen. Neural networks and deep learning, volume 25. Determination press San Francisco, CA, USA, 2015." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "type": "text", + "content": "M-E Nilsback and Andrew Zisserman. A visual vocabulary for flower classification. In CVPR, volume 2, pp. 1447-1454. IEEE, 2006." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 471, + 504, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 471, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 107, + 471, + 504, + 506 + ], + "type": "text", + "content": "Zhou Pan, Feng Jiashi, Ma Chao, Xiong Caiming, Chu Hong Hoi Steven, and E Weinan. Towards theoretically understanding why sgd generalizes better than adam in deep learning. In Advances in Neural Information Processing Systems, pp. 21285-21296, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 512, + 504, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 512, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 107, + 512, + 504, + 548 + ], + "type": "text", + "content": "Maithra Raghu, Justin Gilmer, Jason Yosinski, and Jascha Sohl-Dickstein. Svcca: Singular vector canonical correlation analysis for deep learning dynamics and interpretability. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 554, + 504, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 554, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 107, + 554, + 504, + 589 + ], + "type": "text", + "content": "Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In International Conference on Machine Learning, pp. 5301-5310. PMLR, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 595, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 595, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 107, + 595, + 504, + 620 + ], + "type": "text", + "content": "Vinay V Ramasesh, Ethan Dyer, and Maithra Raghu. Anatomy of catastrophic forgetting: Hidden representations and task semantics. arXiv preprint arXiv:2007.07400, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 626, + 504, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 626, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 107, + 626, + 504, + 660 + ], + "type": "text", + "content": "Basri Ronen, David Jacobs, Yoni Kasten, and Shira Kritchman. The convergence rate of neural networks for learned functions of different frequencies. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 667, + 504, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 667, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 107, + 667, + 504, + 702 + ], + "type": "text", + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. IJCV, 115(3):211-252, 2015." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 708, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 708, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 107, + 708, + 504, + 733 + ], + "type": "text", + "content": "Shibani Santurkar, Dimitris Tsipras, Andrew Ilyas, and Aleksander Madry. How does batch normalization help optimization? Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 529 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Kun Shao, Yuanheng Zhu, and Dongbin Zhao. Starcraft micromanagement with reinforcement learning and curriculum transfer learning. IEEE Transactions on Emerging Topics in Computational Intelligence, 3(1):73-84, 2018a." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 156 + ], + "type": "text", + "content": "Siyu Shao, Stephen McAleer, Ruqiang Yan, and Pierre Baldi. Highly accurate machine fault diagnosis using deep transfer learning. IEEE Transactions on Industrial Informatics, 15(4):2446-2455, 2018b." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "text", + "content": "David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al. Mastering the game of go with deep neural networks and tree search. nature, 529(7587):484-489, 2016." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "type": "text", + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 234, + 505, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 234, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 505, + 258 + ], + "type": "text", + "content": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. Sequence to sequence learning with neural networks. Advances in neural information processing systems, 27, 2014." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 264, + 505, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 264, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 264, + 505, + 297 + ], + "type": "text", + "content": "Pengxiang Wu, Songzhu Zheng, Mayank Goswami, Dimitris Metaxas, and Chao Chen. A topological filter for learning with label noise. Advances in neural information processing systems, 33: 21382-21393, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 304, + 505, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 505, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 505, + 328 + ], + "type": "text", + "content": "Yuxin Wu and Kaiming He. Group normalization. In Proceedings of the European conference on computer vision (ECCV), pp. 3-19, 2018." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "type": "text", + "content": "Zhi-Qin John Xu, Yaoyu Zhang, Tao Luo, Yanyang Xiao, and Zheng Ma. Frequency principle: Fourier analysis sheds light on deep neural networks. arXiv preprint arXiv:1901.06523, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "type": "text", + "content": "Zhiqin John Xu and Hanxu Zhou. Deep frequency principle towards understanding why deeper learning is faster. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 10541-10550, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 405, + 505, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 405, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 505, + 438 + ], + "type": "text", + "content": "Zhang Yi, Pheng-Ann Heng, and Ada Wai-Chee Fu. Estimate of exponential convergence rate and exponential stability for neural networks. IEEE Transactions on Neural Networks, 10(6):1487-1493, 1999." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 446, + 505, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 446, + 505, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 505, + 470 + ], + "type": "text", + "content": "Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. How transferable are features in deep neural networks? Advances in neural information processing systems, 27, 2014." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 476, + 505, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 476, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 476, + 505, + 499 + ], + "type": "text", + "content": "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 506, + 505, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 505, + 529 + ], + "type": "text", + "content": "Matthew D Zeiler and Rob Fergus. Visualizing and understanding convolutional networks. In European conference on computer vision, pp. 818-833. Springer, 2014." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "type": "text", + "content": "A APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 247, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 247, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 247, + 118 + ], + "type": "text", + "content": "A.1 EXPERIMENTAL SETTINGS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 126, + 389, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 126, + 389, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 389, + 138 + ], + "type": "text", + "content": "Datasets. The synthetic and real datasets are summarized in the Tab. 1" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 135, + 168, + 476, + 262 + ], + "blocks": [ + { + "bbox": [ + 167, + 148, + 443, + 160 + ], + "lines": [ + { + "bbox": [ + 167, + 148, + 443, + 160 + ], + "spans": [ + { + "bbox": [ + 167, + 148, + 443, + 160 + ], + "type": "text", + "content": "Table 1: Descriptions and statistics of the datasets used in this work." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 168, + 476, + 262 + ], + "lines": [ + { + "bbox": [ + 135, + 168, + 476, + 262 + ], + "spans": [ + { + "bbox": [ + 135, + 168, + 476, + 262 + ], + "type": "table", + "html": "
DatasetSize (train/test)ClassesData description
Sine regression5000/5000n/aFunction with four sine components, domain [-2,2]
ImageNet1,281,167/50,0001000Photos of common objects
CIFAR-1050,000/10,00010Photos of common objects, image sizes 32 × 32
CIFAR-10050,000/10,000100Photos of common objects, image sizes 32 × 32
Flowers1,088/27217Find-grained photos of flowers
FGVC Aircraft6,667/3,333100Find-grained photos of aircrafts
Caltech-1013,060/6,084102Photos/paintings/sketches of common objects
CUB-2005,994/5,794200Find-grained photos of birds
DomainNet painting50,416/21,850345Oil Paintings, murals, drawings, tattoos
", + "image_path": "f74c448402e96f1f33bd096b683b53537d61ab7380df54455fc97500f6a2e325.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 273, + 504, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 504, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 504, + 294 + ], + "type": "text", + "content": "Network Architectures. The FCNNs, CNNs, and Vision Transformers are summarized in the Tab. 2." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 134, + 324, + 477, + 386 + ], + "blocks": [ + { + "bbox": [ + 167, + 304, + 441, + 316 + ], + "lines": [ + { + "bbox": [ + 167, + 304, + 441, + 316 + ], + "spans": [ + { + "bbox": [ + 167, + 304, + 441, + 316 + ], + "type": "text", + "content": "Table 2: Complexities and architectures of DNNs used in this work." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 324, + 477, + 386 + ], + "lines": [ + { + "bbox": [ + 134, + 324, + 477, + 386 + ], + "spans": [ + { + "bbox": [ + 134, + 324, + 477, + 386 + ], + "type": "table", + "html": "
Model#ParametersMult-addsArchitecture description
FCNN (no res)2k10k4 fc layers [1-32-32-32-1]
FCNN (res)132k390kfc [1-128] → 4 res-blocks [128-128-128] → fc [128-1]
ResNet-5025.6M4.1Gconv → 4 stages with [3,4,6,3] res-blocks → fc
VGG-19143.7M19.8G16 conv layers, 3 fc layers
ViT9.9M77.2M12 Transformer encoder blocks (basic width 256), 1 fc layer
", + "image_path": "48a7442661b84f2092086f4e1e109ad4ad97d550a61b13a9781720f6bf1b99fe.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 399, + 504, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 504, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 504, + 433 + ], + "type": "text", + "content": "Training Hyper-parameters. For the regression task, we train FCNNs with SGD optimizers for 300 epochs. The initial learning rate is 0.1, with a learning rate decay (to 0.01) at the 150th epoch. The batch size is 128, no weight decay (" + }, + { + "bbox": [ + 104, + 399, + 504, + 433 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 104, + 399, + 504, + 433 + ], + "type": "text", + "content": " regularization) is conducted." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 437, + 504, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 504, + 482 + ], + "type": "text", + "content": "For the ImageNet classification task with CNNs, we train ResNet-50 and VGG-19 for 120 epochs with SGD optimizers. The initial learning rate is 0.1, with learning rate decays at the 50th and 100th epoch to 0.01 and 0.001, respectively. The batch size is 256, the input image size is " + }, + { + "bbox": [ + 104, + 437, + 504, + 482 + ], + "type": "inline_equation", + "content": "224^2" + }, + { + "bbox": [ + 104, + 437, + 504, + 482 + ], + "type": "text", + "content": ", and the weight decay coefficient is " + }, + { + "bbox": [ + 104, + 437, + 504, + 482 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 104, + 437, + 504, + 482 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 487, + 504, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 533 + ], + "type": "text", + "content": "For Vision Transformers on ImageNet dataset, we train them for 200 epochs with Adam optimizers. The peak learning rate is set to 0.0003. We use linear learning rate warm-up for 10,000 iterations, and a subsequent cosine learning rate decay. The batch size is 256, the input image size is " + }, + { + "bbox": [ + 104, + 487, + 504, + 533 + ], + "type": "inline_equation", + "content": "224^2" + }, + { + "bbox": [ + 104, + 487, + 504, + 533 + ], + "type": "text", + "content": ", and the weight decay coefficient is " + }, + { + "bbox": [ + 104, + 487, + 504, + 533 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 104, + 487, + 504, + 533 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "type": "text", + "content": "For CNN image classification on other datasets, we train models for 100 epochs with SGD optimizers. Initial learning rate of 0.01 and cosine learning rate scheduler are applied. The batch size is 128, the input image sizes are " + }, + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "type": "inline_equation", + "content": "32^2" + }, + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "type": "text", + "content": " (for CIFAR) and " + }, + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "type": "inline_equation", + "content": "224^2" + }, + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "type": "text", + "content": " (for Flowers, Aircraft, Caltech, CUB, and DomainNet), and the weight decay coefficient is " + }, + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 104, + 537, + 506, + 583 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 392, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 392, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 392, + 94 + ], + "type": "text", + "content": "A.2 CONVERGENCE MEASUREMENT USING WEIGHT VARIATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 181 + ], + "type": "text", + "content": "In Section 2, we have introduced the convergence measurement in this work. This measurement is simple and straightforward, and it can show how each layer in a DNN converges during the whole training process (Fig. 1 for fully connected networks and Fig. 2 for CNNs) by examining the distance between the training parameters and the converged parameters. However, it has not been verified whether calculating the parameter distance variation to the convergence point between two adjacent epochs is necessary. After all, the measurement highly depends on the convergence point, which can only be obtained after the whole training process." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "text", + "content": "We come up with a simplified convergence measurement. This method uses weight variation as a metric to examine how fast a layer is learning, and whether this layer reaches a state of convergence. If a layer is learning actively, it is reasonable that its weights vary drastically during training. For the converged layers, their weights usually keep stable. So we use " + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "inline_equation", + "content": "\\|\\theta_l^{(t_k)} - \\theta_l^{(t_{k+1})}\\|_2 / \\|\\theta_l^{(t_k)}\\|_2" + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "text", + "content": ", the normalized weight variation of layer " + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "text", + "content": " during epoch " + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "inline_equation", + "content": "k + 1" + }, + { + "bbox": [ + 104, + 186, + 506, + 256 + ], + "type": "text", + "content": ", to illustrate how actively it is learning." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 146, + 264, + 313, + 351 + ], + "blocks": [ + { + "bbox": [ + 146, + 264, + 313, + 351 + ], + "lines": [ + { + "bbox": [ + 146, + 264, + 313, + 351 + ], + "spans": [ + { + "bbox": [ + 146, + 264, + 313, + 351 + ], + "type": "image", + "image_path": "c14d3d7d8d8b5f86c9a0418fdbfe649e1f98f972535305d86b6a15f72d8e7cea.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 199, + 355, + 293, + 364 + ], + "lines": [ + { + "bbox": [ + 199, + 355, + 293, + 364 + ], + "spans": [ + { + "bbox": [ + 199, + 355, + 293, + 364 + ], + "type": "text", + "content": "(a) ResNet-50 Val Acc " + }, + { + "bbox": [ + 199, + 355, + 293, + 364 + ], + "type": "inline_equation", + "content": "73.24\\%" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 316, + 264, + 465, + 351 + ], + "blocks": [ + { + "bbox": [ + 316, + 264, + 465, + 351 + ], + "lines": [ + { + "bbox": [ + 316, + 264, + 465, + 351 + ], + "spans": [ + { + "bbox": [ + 316, + 264, + 465, + 351 + ], + "type": "image", + "image_path": "04c8d0fa6a2eff40a964496ab70bbe2e9626de759594d4b1b1a8fe629a41f40d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 355, + 355, + 444, + 364 + ], + "lines": [ + { + "bbox": [ + 355, + 355, + 444, + 364 + ], + "spans": [ + { + "bbox": [ + 355, + 355, + 444, + 364 + ], + "type": "text", + "content": "(b) VGG-19 Val Acc " + }, + { + "bbox": [ + 355, + 355, + 444, + 364 + ], + "type": "inline_equation", + "content": "71.89\\%" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 373, + 504, + 396 + ], + "lines": [ + { + "bbox": [ + 104, + 373, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 504, + 396 + ], + "type": "text", + "content": "Figure 11: The convergence processes of ResNet-50 and VGG-19 on ImageNet. The results are illustrated with weight variations. The learning rate decays at epoch 50 and epoch 100." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 407, + 506, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 506, + 531 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 506, + 531 + ], + "type": "text", + "content": "The results of ResNet-50 and VGG-19 training process on ImageNet are shown in Fig. 11. From this plot, we can see that after learning rate decays at epoch 50 and 100, the weight variations drop evidently. However, the weight variations of each layer do not show apparent decreasing trend when the learning rate keeps stable, which indicates that the training of DNNs do not converge as usual convex optimization problems do (e.g., linear programming). Therefore, it is hard for us to compare the convergence rates of different layers by observing their convergence curves. We cannot find a clear clue like what was given by the convergence measurement in Section 2 to get the layer convergence bias. All in all, we can safely claim that, it is crucial for the convergence metric to consider direction information to measure how fast different layers are learning towards their convergence points. Our previous convergence measurement really needs to examine convergence by calculating the parameter distance between the current point to convergence point." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 350, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 350, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 350, + 94 + ], + "type": "text", + "content": "A.3 FACTORS AFFECTING LAYER CONVERGENCE BIAS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 104, + 504, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 104, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 104, + 104, + 504, + 171 + ], + "type": "text", + "content": "In Section 5, we have shown that the complexity of the datasets is an important factor affecting layer convergence bias. When the fitting target function is complex enough with both low and high frequency components, the shallower layers learn the low low-frequency components while the deeper layers learn the high-frequency components. Here we use the FCNNs with residual connections to show whether some other important factors would affect the layer convergence bias. All following experiments are conducted on the same regression task in Section 3." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 175, + 506, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 506, + 221 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 506, + 221 + ], + "type": "text", + "content": "Model Depth. The default architecture used in previous experiments is the four-blocks FCNN, here we try adding more blocks to make the network deeper and see what change will happen. As shown in Fig. 12, all the networks show layer convergence bias. With more and more res-blocks, the overall convergence of the network becomes slightly faster." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 232, + 219, + 314 + ], + "blocks": [ + { + "bbox": [ + 106, + 232, + 219, + 314 + ], + "lines": [ + { + "bbox": [ + 106, + 232, + 219, + 314 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 219, + 314 + ], + "type": "image", + "image_path": "cc2aff774b33d159dddbe57297a41ea623c4b6e1c8238f3210ae1746b98d667a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 144, + 319, + 198, + 327 + ], + "lines": [ + { + "bbox": [ + 144, + 319, + 198, + 327 + ], + "spans": [ + { + "bbox": [ + 144, + 319, + 198, + 327 + ], + "type": "text", + "content": "(a) 4 Res-Blocks" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 223, + 233, + 315, + 314 + ], + "blocks": [ + { + "bbox": [ + 223, + 233, + 315, + 314 + ], + "lines": [ + { + "bbox": [ + 223, + 233, + 315, + 314 + ], + "spans": [ + { + "bbox": [ + 223, + 233, + 315, + 314 + ], + "type": "image", + "image_path": "453f84c65a6baabe53e224356ca74b0c776c7f0a8c00e38898063f8c269a97cc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 241, + 318, + 294, + 327 + ], + "lines": [ + { + "bbox": [ + 241, + 318, + 294, + 327 + ], + "spans": [ + { + "bbox": [ + 241, + 318, + 294, + 327 + ], + "type": "text", + "content": "(b) 8 Res-Blocks" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 318, + 233, + 409, + 314 + ], + "blocks": [ + { + "bbox": [ + 318, + 233, + 409, + 314 + ], + "lines": [ + { + "bbox": [ + 318, + 233, + 409, + 314 + ], + "spans": [ + { + "bbox": [ + 318, + 233, + 409, + 314 + ], + "type": "image", + "image_path": "6c70da1522273b0ccd7717aad2cbed9966f2fe1ea3415c9334212634b8ac9363.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 335, + 318, + 392, + 327 + ], + "lines": [ + { + "bbox": [ + 335, + 318, + 392, + 327 + ], + "spans": [ + { + "bbox": [ + 335, + 318, + 392, + 327 + ], + "type": "text", + "content": "(c) 12 Res-Blocks" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 135, + 336, + 474, + 349 + ], + "lines": [ + { + "bbox": [ + 135, + 336, + 474, + 349 + ], + "spans": [ + { + "bbox": [ + 135, + 336, + 474, + 349 + ], + "type": "text", + "content": "Figure 12: The convergence process of FCNNs with different number of res-blocks." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 413, + 233, + 503, + 314 + ], + "blocks": [ + { + "bbox": [ + 413, + 233, + 503, + 314 + ], + "lines": [ + { + "bbox": [ + 413, + 233, + 503, + 314 + ], + "spans": [ + { + "bbox": [ + 413, + 233, + 503, + 314 + ], + "type": "image", + "image_path": "8f5bd8c36a3e1c63d8619e1feeea44664d4633aa087c1d2b2c8dc2215dda3798.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 429, + 318, + 487, + 327 + ], + "lines": [ + { + "bbox": [ + 429, + 318, + 487, + 327 + ], + "spans": [ + { + "bbox": [ + 429, + 318, + 487, + 327 + ], + "type": "text", + "content": "(d) 16 Res-Blocks" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 363, + 504, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 504, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 504, + 439 + ], + "type": "text", + "content": "Learning Rate. The results with different learning rates are shown in Fig. 13. When the learning rate gets smaller, layer convergence bias becomes weaker. This is because the gradient predictiveness w.r.t. parameters of all layers get close to 1 (see Fig. 3 (b,right) for the predictiveness with the learning rate of 0.01). In this case, a layer is less influenced by the updates of parameters in other layers, only the gradient predictiveness w.r.t. data matters for the convergence rate. In addition, smaller learning rates are beneficial for the deeper layers to converge because of their sharper minima." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 156, + 452, + 265, + 531 + ], + "blocks": [ + { + "bbox": [ + 156, + 452, + 265, + 531 + ], + "lines": [ + { + "bbox": [ + 156, + 452, + 265, + 531 + ], + "spans": [ + { + "bbox": [ + 156, + 452, + 265, + 531 + ], + "type": "image", + "image_path": "9e940352ce29aa0753b43623966f621e44b442ff5f45f01ca7ec52a8799cd859.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 201, + 536, + 236, + 545 + ], + "lines": [ + { + "bbox": [ + 201, + 536, + 236, + 545 + ], + "spans": [ + { + "bbox": [ + 201, + 536, + 236, + 545 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 201, + 536, + 236, + 545 + ], + "type": "inline_equation", + "content": "\\mathrm{LR} = 0.01" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 151, + 554, + 459, + 567 + ], + "lines": [ + { + "bbox": [ + 151, + 554, + 459, + 567 + ], + "spans": [ + { + "bbox": [ + 151, + 554, + 459, + 567 + ], + "type": "text", + "content": "Figure 13: The convergence process of FCNNs with different learning rates." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 269, + 452, + 359, + 531 + ], + "blocks": [ + { + "bbox": [ + 269, + 452, + 359, + 531 + ], + "lines": [ + { + "bbox": [ + 269, + 452, + 359, + 531 + ], + "spans": [ + { + "bbox": [ + 269, + 452, + 359, + 531 + ], + "type": "image", + "image_path": "a68e1ef770b3c062097e63279ea52fc7603fb3b14fdd2f5587008ed112684517.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 295, + 536, + 331, + 545 + ], + "lines": [ + { + "bbox": [ + 295, + 536, + 331, + 545 + ], + "spans": [ + { + "bbox": [ + 295, + 536, + 331, + 545 + ], + "type": "text", + "content": "(b) LR=0.03" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 364, + 452, + 454, + 531 + ], + "blocks": [ + { + "bbox": [ + 364, + 452, + 454, + 531 + ], + "lines": [ + { + "bbox": [ + 364, + 452, + 454, + 531 + ], + "spans": [ + { + "bbox": [ + 364, + 452, + 454, + 531 + ], + "type": "image", + "image_path": "d1508a6d1aeb53f993f1f0855f04bf7469b186772ab04214f180caa548e8eb72.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 390, + 536, + 422, + 545 + ], + "lines": [ + { + "bbox": [ + 390, + 536, + 422, + 545 + ], + "spans": [ + { + "bbox": [ + 390, + 536, + 422, + 545 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 390, + 536, + 422, + 545 + ], + "type": "inline_equation", + "content": "\\mathrm{LR} = 0.1" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 582, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 672 + ], + "type": "text", + "content": "Weight Decay. The experiments with FCNNs in previous sections are conducted without weight decay. It is interesting to investigate the sensitivity of the layer-wise model convergence with different weight decay strengths. The results are shown in Fig. 14. We can see that when the weight decay becomes stronger, the residual blocks converge slower in a more and more similar convergence rate. We conjecture the reason is that weight decay dominates the total loss when its coefficient is large. In this way, the layer parameters with similar initialization scales tend to converge in similar speed toward zero. Because the residual blocks have identical architectures, they share the same initial parameter distribution, and converge in the same speed when weight decay is strong." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "Optimizer. In Section 4, we have discussed the mechanism behind layer convergence bias. The flatter/sharper minimizers of different layers make SGD learn at different speeds. This is because SGD is more good at finding flatter minimizers (Pan et al., 2020). In Fig. 15, we compare SGD with three adaptive optimizers: Adagrad, RMSprop, and Adam. It is evident that with adaptive optimizers, layer convergence bias does not hold anymore. We conjecture the reason behind this" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 218, + 159 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 218, + 159 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 218, + 159 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 218, + 159 + ], + "type": "image", + "image_path": "8c910a09f36ed47a776ac4c222b9f922099d411872ba29c1a8ddad5b2028fd69.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 163, + 194, + 174 + ], + "lines": [ + { + "bbox": [ + 149, + 163, + 194, + 174 + ], + "spans": [ + { + "bbox": [ + 149, + 163, + 194, + 174 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 149, + 163, + 194, + 174 + ], + "type": "inline_equation", + "content": "\\mathrm{WD} = 1\\mathrm{e} - 7" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 218, + 80, + 312, + 159 + ], + "blocks": [ + { + "bbox": [ + 218, + 80, + 312, + 159 + ], + "lines": [ + { + "bbox": [ + 218, + 80, + 312, + 159 + ], + "spans": [ + { + "bbox": [ + 218, + 80, + 312, + 159 + ], + "type": "image", + "image_path": "1be2a755db41c189bba26cb171501af3ca144f7d7cb4a4fb852009ec3e43e59d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 163, + 289, + 174 + ], + "lines": [ + { + "bbox": [ + 246, + 163, + 289, + 174 + ], + "spans": [ + { + "bbox": [ + 246, + 163, + 289, + 174 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 246, + 163, + 289, + 174 + ], + "type": "inline_equation", + "content": "\\mathrm{WD} = 1\\mathrm{e} - 6" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 313, + 80, + 407, + 159 + ], + "blocks": [ + { + "bbox": [ + 313, + 80, + 407, + 159 + ], + "lines": [ + { + "bbox": [ + 313, + 80, + 407, + 159 + ], + "spans": [ + { + "bbox": [ + 313, + 80, + 407, + 159 + ], + "type": "image", + "image_path": "48c54aefd14b8aa698fa80c269dec5b09730df1cb1ce2b04444a89aa91c71723.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 340, + 163, + 383, + 174 + ], + "lines": [ + { + "bbox": [ + 340, + 163, + 383, + 174 + ], + "spans": [ + { + "bbox": [ + 340, + 163, + 383, + 174 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 340, + 163, + 383, + 174 + ], + "type": "inline_equation", + "content": "\\mathrm{WD} = 1\\mathrm{e} - 5" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 408, + 80, + 503, + 159 + ], + "blocks": [ + { + "bbox": [ + 408, + 80, + 503, + 159 + ], + "lines": [ + { + "bbox": [ + 408, + 80, + 503, + 159 + ], + "spans": [ + { + "bbox": [ + 408, + 80, + 503, + 159 + ], + "type": "image", + "image_path": "c9b32c2ce60e956db6ac87943ab3ce0b46075be910f394f49232fabafa4b331e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 163, + 479, + 174 + ], + "lines": [ + { + "bbox": [ + 436, + 163, + 479, + 174 + ], + "spans": [ + { + "bbox": [ + 436, + 163, + 479, + 174 + ], + "type": "text", + "content": "(d) " + }, + { + "bbox": [ + 436, + 163, + 479, + 174 + ], + "type": "inline_equation", + "content": "\\mathrm{WD} = 1\\mathrm{e} - 4" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 106, + 204, + 217, + 283 + ], + "blocks": [ + { + "bbox": [ + 132, + 181, + 477, + 194 + ], + "lines": [ + { + "bbox": [ + 132, + 181, + 477, + 194 + ], + "spans": [ + { + "bbox": [ + 132, + 181, + 477, + 194 + ], + "type": "text", + "content": "Figure 14: The convergence process of FCNNs with different weight decay strengths." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 204, + 217, + 283 + ], + "lines": [ + { + "bbox": [ + 106, + 204, + 217, + 283 + ], + "spans": [ + { + "bbox": [ + 106, + 204, + 217, + 283 + ], + "type": "image", + "image_path": "7426eff0147450ebff0521730b61325b7c4e73b9c2627fbe66d33214ec987820.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 156, + 289, + 185, + 299 + ], + "lines": [ + { + "bbox": [ + 156, + 289, + 185, + 299 + ], + "spans": [ + { + "bbox": [ + 156, + 289, + 185, + 299 + ], + "type": "text", + "content": "(a) SGD" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 157, + 308, + 453, + 321 + ], + "lines": [ + { + "bbox": [ + 157, + 308, + 453, + 321 + ], + "spans": [ + { + "bbox": [ + 157, + 308, + 453, + 321 + ], + "type": "text", + "content": "Figure 15: The convergence process of FCNNs with different optimizers." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 218, + 205, + 312, + 283 + ], + "blocks": [ + { + "bbox": [ + 218, + 205, + 312, + 283 + ], + "lines": [ + { + "bbox": [ + 218, + 205, + 312, + 283 + ], + "spans": [ + { + "bbox": [ + 218, + 205, + 312, + 283 + ], + "type": "image", + "image_path": "e135b9bc895215ddb16dfe5463737e3ca474af3384917dd410758b939cbce60c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 289, + 286, + 300 + ], + "lines": [ + { + "bbox": [ + 246, + 289, + 286, + 300 + ], + "spans": [ + { + "bbox": [ + 246, + 289, + 286, + 300 + ], + "type": "text", + "content": "(b) Adagrad" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 313, + 205, + 407, + 283 + ], + "blocks": [ + { + "bbox": [ + 313, + 205, + 407, + 283 + ], + "lines": [ + { + "bbox": [ + 313, + 205, + 407, + 283 + ], + "spans": [ + { + "bbox": [ + 313, + 205, + 407, + 283 + ], + "type": "image", + "image_path": "aa8a87fcac06a0fd497ed4e36c4b736c2fdf0976d37c37db1d7917240dd929ff.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 339, + 289, + 384, + 300 + ], + "lines": [ + { + "bbox": [ + 339, + 289, + 384, + 300 + ], + "spans": [ + { + "bbox": [ + 339, + 289, + 384, + 300 + ], + "type": "text", + "content": "(c) RMSprop" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 408, + 205, + 503, + 283 + ], + "blocks": [ + { + "bbox": [ + 408, + 205, + 503, + 283 + ], + "lines": [ + { + "bbox": [ + 408, + 205, + 503, + 283 + ], + "spans": [ + { + "bbox": [ + 408, + 205, + 503, + 283 + ], + "type": "image", + "image_path": "a343fbd451eb81c933889a4dc2ceebfd43a11cf133804cb1fa6cc26406b9771c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 441, + 289, + 473, + 299 + ], + "lines": [ + { + "bbox": [ + 441, + 289, + 473, + 299 + ], + "spans": [ + { + "bbox": [ + 441, + 289, + 473, + 299 + ], + "type": "text", + "content": "(d) Adam" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": "is that the adaptive optimizers heuristically assign different learning rates for different parameters, making their optimization hardly predictable." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 369, + 506, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 506, + 524 + ], + "type": "text", + "content": "Normalization Methods. Like residual connection, batch normalization Ioffe & Szegedy (2015) is also a common design in modern DNN architectures. As discussed in previous literature, normalization in the neural networks helps to make the layer inputs more stable and make the loss landscapes smoother, thus accelerates the model training Santurkar et al. (2018). In Section 3 and Section 4, we mainly use the FCNNs without normalization to verify and explore the layer convergence bias. Here we investigate how the normalization methods (i.e., batch normalization, layer normalization Ba et al. (2016), and group normalization Wu & He (2018)) help the convergence, and whether the shallower layers still converge faster in these cases. As shown in Fig. 16, all layers converge faster when adding batch normalization to them. Particularly, \"Res-Block 1\" accelerates the most and reach a similar convergence rate as \"Layer 1\". The layer convergence bias also holds for batch normalization. For layer normalization and group normalization, the models show a significantly faster convergence rate than the model using batch normalization. All layers show effective convergence at an early stage of training (i.e., the first five epochs). In these two cases, different layers have similar convergence rates, thus no evident layer convergence bias emerges." + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 106, + 534, + 218, + 613 + ], + "blocks": [ + { + "bbox": [ + 106, + 534, + 218, + 613 + ], + "lines": [ + { + "bbox": [ + 106, + 534, + 218, + 613 + ], + "spans": [ + { + "bbox": [ + 106, + 534, + 218, + 613 + ], + "type": "image", + "image_path": "c18964d8ef4698f80162ae831dd96c2e2a854ed39750a3c40037c9ee297a46f6.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 617, + 209, + 625 + ], + "lines": [ + { + "bbox": [ + 140, + 617, + 209, + 625 + ], + "spans": [ + { + "bbox": [ + 140, + 617, + 209, + 625 + ], + "type": "text", + "content": "(a) Without normalization" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "type": "text", + "content": "Figure 16: The convergence process of FCNNs with different normalization methods. When using group normalization, we set the group number to 8." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 219, + 534, + 313, + 613 + ], + "blocks": [ + { + "bbox": [ + 219, + 534, + 313, + 613 + ], + "lines": [ + { + "bbox": [ + 219, + 534, + 313, + 613 + ], + "spans": [ + { + "bbox": [ + 219, + 534, + 313, + 613 + ], + "type": "image", + "image_path": "5cc61b139c36fdbd843318044c92bc9f08b4928b9a7d11f8640c8bc6d3bcbff4.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 617, + 302, + 625 + ], + "lines": [ + { + "bbox": [ + 239, + 617, + 302, + 625 + ], + "spans": [ + { + "bbox": [ + 239, + 617, + 302, + 625 + ], + "type": "text", + "content": "(b) Batch normalization" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 313, + 534, + 407, + 613 + ], + "blocks": [ + { + "bbox": [ + 313, + 534, + 407, + 613 + ], + "lines": [ + { + "bbox": [ + 313, + 534, + 407, + 613 + ], + "spans": [ + { + "bbox": [ + 313, + 534, + 407, + 613 + ], + "type": "image", + "image_path": "048efec3e0d90b64ad0f8eb197f1a9266058a077430180c8f05ad823f565a850.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 332, + 617, + 395, + 625 + ], + "lines": [ + { + "bbox": [ + 332, + 617, + 395, + 625 + ], + "spans": [ + { + "bbox": [ + 332, + 617, + 395, + 625 + ], + "type": "text", + "content": "(c) Layer normalization" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 408, + 534, + 503, + 613 + ], + "blocks": [ + { + "bbox": [ + 408, + 534, + 503, + 613 + ], + "lines": [ + { + "bbox": [ + 408, + 534, + 503, + 613 + ], + "spans": [ + { + "bbox": [ + 408, + 534, + 503, + 613 + ], + "type": "image", + "image_path": "bec197c0b7083842ddd26fb8f7bc3bcf15f14334ad8cae797d0304729bab6ec6.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 426, + 617, + 490, + 625 + ], + "lines": [ + { + "bbox": [ + 426, + 617, + 490, + 625 + ], + "spans": [ + { + "bbox": [ + 426, + 617, + 490, + 625 + ], + "type": "text", + "content": "(d) Group normalization" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 275, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 275, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 275, + 94 + ], + "type": "text", + "content": "A.4 RESULTS ON HARDER DATASETS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 104, + 506, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 104, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 104, + 506, + 182 + ], + "type": "text", + "content": "For verifying the layer convergence bias on more datasets, we show more convergence results on four harder image classification datasets (see Fig. 17). Most of the classes in these datasets only have " + }, + { + "bbox": [ + 104, + 104, + 506, + 182 + ], + "type": "inline_equation", + "content": "< 100" + }, + { + "bbox": [ + 104, + 104, + 506, + 182 + ], + "type": "text", + "content": " samples, making them harder to learn. Note that the experiments are conducted with the learning rate of 0.01 (learning rate of 0.1 failed in some cases because these datasets have too many classes but not sufficient samples, leading to non-decreasing loss), some deeper layers have quite similar convergence rates because of the small learning rate. But roughly speaking, layer convergence bias still holds for these datasets." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 196, + 218, + 286 + ], + "blocks": [ + { + "bbox": [ + 108, + 196, + 218, + 286 + ], + "lines": [ + { + "bbox": [ + 108, + 196, + 218, + 286 + ], + "spans": [ + { + "bbox": [ + 108, + 196, + 218, + 286 + ], + "type": "image", + "image_path": "2d3d1b6de4d187a9f4de51739ebda2bd2438b98ef0df07a2ec2e90e179fb1ac2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 222, + 196, + 312, + 285 + ], + "blocks": [ + { + "bbox": [ + 222, + 196, + 312, + 285 + ], + "lines": [ + { + "bbox": [ + 222, + 196, + 312, + 285 + ], + "spans": [ + { + "bbox": [ + 222, + 196, + 312, + 285 + ], + "type": "image", + "image_path": "5a38646221862fb698cd2ba10f02054544c4c8e8783063dadc260832cda985eb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 290, + 289, + 338, + 298 + ], + "lines": [ + { + "bbox": [ + 290, + 289, + 338, + 298 + ], + "spans": [ + { + "bbox": [ + 290, + 289, + 338, + 298 + ], + "type": "text", + "content": "(a) ResNet-50" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 318, + 196, + 407, + 285 + ], + "blocks": [ + { + "bbox": [ + 318, + 196, + 407, + 285 + ], + "lines": [ + { + "bbox": [ + 318, + 196, + 407, + 285 + ], + "spans": [ + { + "bbox": [ + 318, + 196, + 407, + 285 + ], + "type": "image", + "image_path": "1b8738cead3470357291c6178fcedc909a1b18863b2b10d24230d8adfce0843f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 412, + 196, + 503, + 285 + ], + "blocks": [ + { + "bbox": [ + 412, + 196, + 503, + 285 + ], + "lines": [ + { + "bbox": [ + 412, + 196, + 503, + 285 + ], + "spans": [ + { + "bbox": [ + 412, + 196, + 503, + 285 + ], + "type": "image", + "image_path": "21cdb294d3de63caa9d5e62609dd7916741247e216ee9e4b576d1a306bf34aab.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 108, + 304, + 219, + 392 + ], + "blocks": [ + { + "bbox": [ + 108, + 304, + 219, + 392 + ], + "lines": [ + { + "bbox": [ + 108, + 304, + 219, + 392 + ], + "spans": [ + { + "bbox": [ + 108, + 304, + 219, + 392 + ], + "type": "image", + "image_path": "8c7fa7738b52dd3f1b0ec00f850490002e879985c7d932e599678ea0b1ac2450.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 142, + 415, + 467, + 428 + ], + "lines": [ + { + "bbox": [ + 142, + 415, + 467, + 428 + ], + "spans": [ + { + "bbox": [ + 142, + 415, + 467, + 428 + ], + "type": "text", + "content": "Figure 17: The convergence process of CNNs on four image classification tasks." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 225, + 305, + 313, + 392 + ], + "blocks": [ + { + "bbox": [ + 225, + 305, + 313, + 392 + ], + "lines": [ + { + "bbox": [ + 225, + 305, + 313, + 392 + ], + "spans": [ + { + "bbox": [ + 225, + 305, + 313, + 392 + ], + "type": "image", + "image_path": "1c7ee3bcce9f991f5e31e8c2bc232673e8ef3c56dc6ef8260e33833b5a10e435.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 294, + 396, + 337, + 407 + ], + "lines": [ + { + "bbox": [ + 294, + 396, + 337, + 407 + ], + "spans": [ + { + "bbox": [ + 294, + 396, + 337, + 407 + ], + "type": "text", + "content": "(b) VGG-19" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 320, + 305, + 406, + 392 + ], + "blocks": [ + { + "bbox": [ + 320, + 305, + 406, + 392 + ], + "lines": [ + { + "bbox": [ + 320, + 305, + 406, + 392 + ], + "spans": [ + { + "bbox": [ + 320, + 305, + 406, + 392 + ], + "type": "image", + "image_path": "d71898cc82aa5829b15c3ade625fc9ab56b6a1a49a1491a20217c627531e45f4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 414, + 305, + 503, + 393 + ], + "blocks": [ + { + "bbox": [ + 414, + 305, + 503, + 393 + ], + "lines": [ + { + "bbox": [ + 414, + 305, + 503, + 393 + ], + "spans": [ + { + "bbox": [ + 414, + 305, + 503, + 393 + ], + "type": "image", + "image_path": "fc3727fa8b46bdfc62303d8fa3e601e021b217453e63b6b086a1c6c8dd903884.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 455, + 312, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 455, + 312, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 312, + 467 + ], + "type": "text", + "content": "A.5 REPEATABILITY OF THE VISUALIZATIONS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 477, + 506, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 477, + 506, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 477, + 506, + 522 + ], + "type": "text", + "content": "Do different ImageNet trained models produce dramatically different loss landscapes? We plot the loss landscapes of different models with different random seeds in Fig. 18, 19. Quite similar patterns of the landscapes for different layers can be observed on both ResNet and VGG with different random seeds." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 108, + 535, + 203, + 604 + ], + "blocks": [ + { + "bbox": [ + 108, + 535, + 203, + 604 + ], + "lines": [ + { + "bbox": [ + 108, + 535, + 203, + 604 + ], + "spans": [ + { + "bbox": [ + 108, + 535, + 203, + 604 + ], + "type": "image", + "image_path": "cdc4602f4900831a817c6ba0b16166988b92a5ed27d06b262133d493de8cab68.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 605, + 181, + 614 + ], + "lines": [ + { + "bbox": [ + 130, + 605, + 181, + 614 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 181, + 614 + ], + "type": "text", + "content": "(a) Stage 1 Seed 1" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 209, + 536, + 302, + 604 + ], + "blocks": [ + { + "bbox": [ + 209, + 536, + 302, + 604 + ], + "lines": [ + { + "bbox": [ + 209, + 536, + 302, + 604 + ], + "spans": [ + { + "bbox": [ + 209, + 536, + 302, + 604 + ], + "type": "image", + "image_path": "abd8bef9c0bde4ccf521ae8780f8f963813e3757ad1011194da9d4b3b7754b33.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 605, + 280, + 613 + ], + "lines": [ + { + "bbox": [ + 230, + 605, + 280, + 613 + ], + "spans": [ + { + "bbox": [ + 230, + 605, + 280, + 613 + ], + "type": "text", + "content": "(b) Stage 2 Seed 1" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 309, + 536, + 403, + 604 + ], + "blocks": [ + { + "bbox": [ + 309, + 536, + 403, + 604 + ], + "lines": [ + { + "bbox": [ + 309, + 536, + 403, + 604 + ], + "spans": [ + { + "bbox": [ + 309, + 536, + 403, + 604 + ], + "type": "image", + "image_path": "ba6b51581f09344b7a5294ad231451ffff407aae50122918db4380b0c9eed752.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 332, + 605, + 381, + 614 + ], + "lines": [ + { + "bbox": [ + 332, + 605, + 381, + 614 + ], + "spans": [ + { + "bbox": [ + 332, + 605, + 381, + 614 + ], + "type": "text", + "content": "(c) Stage 3 Seed 1" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 406, + 536, + 501, + 604 + ], + "blocks": [ + { + "bbox": [ + 406, + 536, + 501, + 604 + ], + "lines": [ + { + "bbox": [ + 406, + 536, + 501, + 604 + ], + "spans": [ + { + "bbox": [ + 406, + 536, + 501, + 604 + ], + "type": "image", + "image_path": "71814b1519c57e68b5914a4e19460521f39b1313e1a78c9a324836a55c1fb1c4.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 605, + 479, + 613 + ], + "lines": [ + { + "bbox": [ + 430, + 605, + 479, + 613 + ], + "spans": [ + { + "bbox": [ + 430, + 605, + 479, + 613 + ], + "type": "text", + "content": "(c) Stage 4 Seed 1" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 108, + 619, + 203, + 689 + ], + "blocks": [ + { + "bbox": [ + 108, + 619, + 203, + 689 + ], + "lines": [ + { + "bbox": [ + 108, + 619, + 203, + 689 + ], + "spans": [ + { + "bbox": [ + 108, + 619, + 203, + 689 + ], + "type": "image", + "image_path": "5a8c2c25bc5f0158bd56ecd9578c8bafb193298aa7b548989b51f1497106dd4c.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 690, + 181, + 699 + ], + "lines": [ + { + "bbox": [ + 130, + 690, + 181, + 699 + ], + "spans": [ + { + "bbox": [ + 130, + 690, + 181, + 699 + ], + "type": "text", + "content": "(d) Stage 1 Seed 2" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 208, + 619, + 302, + 689 + ], + "blocks": [ + { + "bbox": [ + 208, + 619, + 302, + 689 + ], + "lines": [ + { + "bbox": [ + 208, + 619, + 302, + 689 + ], + "spans": [ + { + "bbox": [ + 208, + 619, + 302, + 689 + ], + "type": "image", + "image_path": "0f04e3025b3c16525aa272aafd25438907ada7c2fe5faf9090434da866ae0c5c.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 690, + 280, + 698 + ], + "lines": [ + { + "bbox": [ + 230, + 690, + 280, + 698 + ], + "spans": [ + { + "bbox": [ + 230, + 690, + 280, + 698 + ], + "type": "text", + "content": "(e) Stage 2 Seed 2" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 308, + 619, + 402, + 689 + ], + "blocks": [ + { + "bbox": [ + 308, + 619, + 402, + 689 + ], + "lines": [ + { + "bbox": [ + 308, + 619, + 402, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 619, + 402, + 689 + ], + "type": "image", + "image_path": "0c3aa08d1725a94bfc6c869b66063bbc9bc36ecc11014c1fa3bb55756c712189.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 332, + 689, + 381, + 698 + ], + "lines": [ + { + "bbox": [ + 332, + 689, + 381, + 698 + ], + "spans": [ + { + "bbox": [ + 332, + 689, + 381, + 698 + ], + "type": "text", + "content": "(f) Stage 3 Seed 2" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 406, + 619, + 501, + 689 + ], + "blocks": [ + { + "bbox": [ + 406, + 619, + 501, + 689 + ], + "lines": [ + { + "bbox": [ + 406, + 619, + 501, + 689 + ], + "spans": [ + { + "bbox": [ + 406, + 619, + 501, + 689 + ], + "type": "image", + "image_path": "80f071771fa09a7bc8bd98ee2b6b0dd524a8816e603accd1cbe259d50f42514c.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 689, + 480, + 698 + ], + "lines": [ + { + "bbox": [ + 430, + 689, + 480, + 698 + ], + "spans": [ + { + "bbox": [ + 430, + 689, + 480, + 698 + ], + "type": "text", + "content": "(g) Stage 4 Seed 2" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 175, + 708, + 435, + 720 + ], + "lines": [ + { + "bbox": [ + 175, + 708, + 435, + 720 + ], + "spans": [ + { + "bbox": [ + 175, + 708, + 435, + 720 + ], + "type": "text", + "content": "Figure 18: The loss landscapes of different layers of ResNet-50." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 81, + 204, + 151 + ], + "blocks": [ + { + "bbox": [ + 108, + 81, + 204, + 151 + ], + "lines": [ + { + "bbox": [ + 108, + 81, + 204, + 151 + ], + "spans": [ + { + "bbox": [ + 108, + 81, + 204, + 151 + ], + "type": "image", + "image_path": "a1f24b60ab084ff5e91c86c22102eb1c5984cd7693d5367bd8e88c97f0f9b9c6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 152, + 182, + 161 + ], + "lines": [ + { + "bbox": [ + 130, + 152, + 182, + 161 + ], + "spans": [ + { + "bbox": [ + 130, + 152, + 182, + 161 + ], + "type": "text", + "content": "(a) Layer 1 Seed 1" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 208, + 81, + 304, + 151 + ], + "blocks": [ + { + "bbox": [ + 208, + 81, + 304, + 151 + ], + "lines": [ + { + "bbox": [ + 208, + 81, + 304, + 151 + ], + "spans": [ + { + "bbox": [ + 208, + 81, + 304, + 151 + ], + "type": "image", + "image_path": "2826defe3fa06f37e46b41cf27059aad6d783ff328c1d5a830c6867bb075a758.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 233, + 152, + 285, + 161 + ], + "lines": [ + { + "bbox": [ + 233, + 152, + 285, + 161 + ], + "spans": [ + { + "bbox": [ + 233, + 152, + 285, + 161 + ], + "type": "text", + "content": "(b) Layer 5 Seed 1" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 308, + 81, + 403, + 151 + ], + "blocks": [ + { + "bbox": [ + 308, + 81, + 403, + 151 + ], + "lines": [ + { + "bbox": [ + 308, + 81, + 403, + 151 + ], + "spans": [ + { + "bbox": [ + 308, + 81, + 403, + 151 + ], + "type": "image", + "image_path": "96d8380a41f38818184333786a210af8d272d30b5769c7160da01a5b1ac06baf.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 152, + 382, + 161 + ], + "lines": [ + { + "bbox": [ + 331, + 152, + 382, + 161 + ], + "spans": [ + { + "bbox": [ + 331, + 152, + 382, + 161 + ], + "type": "text", + "content": "(c) Stage 9 Seed 1" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 407, + 81, + 503, + 151 + ], + "blocks": [ + { + "bbox": [ + 407, + 81, + 503, + 151 + ], + "lines": [ + { + "bbox": [ + 407, + 81, + 503, + 151 + ], + "spans": [ + { + "bbox": [ + 407, + 81, + 503, + 151 + ], + "type": "image", + "image_path": "f0073c99ef1020a065dc77f78c5fbf0c9f3a31bae17fac5292db87edaaba2b55.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 152, + 486, + 161 + ], + "lines": [ + { + "bbox": [ + 430, + 152, + 486, + 161 + ], + "spans": [ + { + "bbox": [ + 430, + 152, + 486, + 161 + ], + "type": "text", + "content": "(d) Stage 13 Seed 1" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 108, + 168, + 204, + 236 + ], + "blocks": [ + { + "bbox": [ + 108, + 168, + 204, + 236 + ], + "lines": [ + { + "bbox": [ + 108, + 168, + 204, + 236 + ], + "spans": [ + { + "bbox": [ + 108, + 168, + 204, + 236 + ], + "type": "image", + "image_path": "5d78a9bcedc51405b94b36804c97cfef494769d5001591f09d66b069e0b0959e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 239, + 182, + 247 + ], + "lines": [ + { + "bbox": [ + 130, + 239, + 182, + 247 + ], + "spans": [ + { + "bbox": [ + 130, + 239, + 182, + 247 + ], + "type": "text", + "content": "(e) Layer 1 Seed 2" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 209, + 168, + 304, + 236 + ], + "blocks": [ + { + "bbox": [ + 209, + 168, + 304, + 236 + ], + "lines": [ + { + "bbox": [ + 209, + 168, + 304, + 236 + ], + "spans": [ + { + "bbox": [ + 209, + 168, + 304, + 236 + ], + "type": "image", + "image_path": "8ae304f21bc0a477f488c981a76b210feeab8251686500b20c9109678f4c0bc6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 233, + 239, + 284, + 247 + ], + "lines": [ + { + "bbox": [ + 233, + 239, + 284, + 247 + ], + "spans": [ + { + "bbox": [ + 233, + 239, + 284, + 247 + ], + "type": "text", + "content": "(f) Layer 5 Seed 2" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 168, + 403, + 236 + ], + "blocks": [ + { + "bbox": [ + 309, + 168, + 403, + 236 + ], + "lines": [ + { + "bbox": [ + 309, + 168, + 403, + 236 + ], + "spans": [ + { + "bbox": [ + 309, + 168, + 403, + 236 + ], + "type": "image", + "image_path": "aac8258375312fda109c1b7d60b5900e75f4d2163d868f0b185381d13a4693ff.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 239, + 383, + 247 + ], + "lines": [ + { + "bbox": [ + 331, + 239, + 383, + 247 + ], + "spans": [ + { + "bbox": [ + 331, + 239, + 383, + 247 + ], + "type": "text", + "content": "(g) Stage 9 Seed 2" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 179, + 257, + 429, + 269 + ], + "lines": [ + { + "bbox": [ + 179, + 257, + 429, + 269 + ], + "spans": [ + { + "bbox": [ + 179, + 257, + 429, + 269 + ], + "type": "text", + "content": "Figure 19: The loss landscapes of different layers of VGG-19." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 407, + 168, + 503, + 236 + ], + "blocks": [ + { + "bbox": [ + 407, + 168, + 503, + 236 + ], + "lines": [ + { + "bbox": [ + 407, + 168, + 503, + 236 + ], + "spans": [ + { + "bbox": [ + 407, + 168, + 503, + 236 + ], + "type": "image", + "image_path": "1019864b6bee7ce9126cd66adff4dc0edca7f8e353221128bf2ef43d992d1dd4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 239, + 486, + 247 + ], + "lines": [ + { + "bbox": [ + 430, + 239, + 486, + 247 + ], + "spans": [ + { + "bbox": [ + 430, + 239, + 486, + 247 + ], + "type": "text", + "content": "(h) Stage 13 Seed 2" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 295, + 420, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 295, + 420, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 420, + 306 + ], + "type": "text", + "content": "A.6 MODELS OBEYING LAYER CONVERGENCE BIAS PERFORM BETTER" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 318, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 318, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 318, + 504, + 373 + ], + "type": "text", + "content": "In Section 4 and Section 5, it is discussed that layer convergence bias indicates that the shallower layers are learning low-level features (or low-frequency components of the target function). It is reasonable learning low-level features first have greater potential to reach good model performance, since the model can establish its high-level features based on relatively stable low-level feature spaces." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 378, + 504, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 504, + 489 + ], + "type": "text", + "content": "To examine whether the fast establishment of low-level features benefits model performance, we train four different FCNN models with the same amount of parameters, but different architectures, to fit the Sine target with four components. This experiment is based on a finding that a residual block with more layers in it tends to converge more slowly. We construct four FCNN models, each of them has four residual blocks (maybe in different sizes). The convergence processes are shown in Fig. 20. We can see that the blocks with the largest complexity always converge the most slowly. As the block with depth " + }, + { + "bbox": [ + 104, + 378, + 504, + 489 + ], + "type": "inline_equation", + "content": "= 4" + }, + { + "bbox": [ + 104, + 378, + 504, + 489 + ], + "type": "text", + "content": " being placed shallower in the FCNN, the regression MSE loss goes higher. In other words, if a shallower layer converges slowly, the model gets poorer performance. This may due to the vulnerability of deeper layers. If they converge based on changing shallower layers, it is hard for them to learn good features based on their unstable inputs." + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 106, + 504, + 218, + 584 + ], + "blocks": [ + { + "bbox": [ + 106, + 504, + 218, + 584 + ], + "lines": [ + { + "bbox": [ + 106, + 504, + 218, + 584 + ], + "spans": [ + { + "bbox": [ + 106, + 504, + 218, + 584 + ], + "type": "image", + "image_path": "58ba3b7388ef6af39c999e96716c1e2975a05240b2b63efc7614553ec8c19502.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 588, + 212, + 605 + ], + "lines": [ + { + "bbox": [ + 133, + 588, + 212, + 605 + ], + "spans": [ + { + "bbox": [ + 133, + 588, + 212, + 605 + ], + "type": "text", + "content": "(a) Res-Blocks " + }, + { + "bbox": [ + 133, + 588, + 212, + 605 + ], + "type": "inline_equation", + "content": "= (4,1,1,1)" + }, + { + "bbox": [ + 133, + 588, + 212, + 605 + ], + "type": "text", + "content": " Val loss 2.7e-4" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 615, + 504, + 660 + ], + "lines": [ + { + "bbox": [ + 104, + 615, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 504, + 660 + ], + "type": "text", + "content": "Figure 20: The convergence process of FCNNs with different residual block sizes and their validation performance on the regression task. Each model has a four-layer residual block and three one-layer residual blocks (e.g., \"Res-Blocks=(4,1,1,1)\" means the first residual block has four layers, and the rest three blocks have only one layer)." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 224, + 504, + 313, + 584 + ], + "blocks": [ + { + "bbox": [ + 224, + 504, + 313, + 584 + ], + "lines": [ + { + "bbox": [ + 224, + 504, + 313, + 584 + ], + "spans": [ + { + "bbox": [ + 224, + 504, + 313, + 584 + ], + "type": "image", + "image_path": "986a500e127a3b567a7d483321e402198416d167b8ac976335581d11af8cc7ec.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 588, + 307, + 605 + ], + "lines": [ + { + "bbox": [ + 230, + 588, + 307, + 605 + ], + "spans": [ + { + "bbox": [ + 230, + 588, + 307, + 605 + ], + "type": "text", + "content": "(b) Res-Blocks " + }, + { + "bbox": [ + 230, + 588, + 307, + 605 + ], + "type": "inline_equation", + "content": "= (1,4,1,1)" + }, + { + "bbox": [ + 230, + 588, + 307, + 605 + ], + "type": "text", + "content": " Val loss 2.4e-4" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 319, + 504, + 408, + 584 + ], + "blocks": [ + { + "bbox": [ + 319, + 504, + 408, + 584 + ], + "lines": [ + { + "bbox": [ + 319, + 504, + 408, + 584 + ], + "spans": [ + { + "bbox": [ + 319, + 504, + 408, + 584 + ], + "type": "image", + "image_path": "05f6f9bb1fbf28a7268b87985221b47d7564cc5546ffb4f70207192a9ffd9168.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 588, + 401, + 605 + ], + "lines": [ + { + "bbox": [ + 325, + 588, + 401, + 605 + ], + "spans": [ + { + "bbox": [ + 325, + 588, + 401, + 605 + ], + "type": "text", + "content": "(c) Res-Blocks " + }, + { + "bbox": [ + 325, + 588, + 401, + 605 + ], + "type": "inline_equation", + "content": "= (1,1,4,1)" + }, + { + "bbox": [ + 325, + 588, + 401, + 605 + ], + "type": "text", + "content": " Val loss 1.8e-4" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 414, + 504, + 503, + 584 + ], + "blocks": [ + { + "bbox": [ + 414, + 504, + 503, + 584 + ], + "lines": [ + { + "bbox": [ + 414, + 504, + 503, + 584 + ], + "spans": [ + { + "bbox": [ + 414, + 504, + 503, + 584 + ], + "type": "image", + "image_path": "212a7681039b14ab8973ec4b79d85bf91f44408c5bc8b952bc412e7c3ba6b5c7.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 420, + 588, + 496, + 605 + ], + "lines": [ + { + "bbox": [ + 420, + 588, + 496, + 605 + ], + "spans": [ + { + "bbox": [ + 420, + 588, + 496, + 605 + ], + "type": "text", + "content": "(d) Res-Blocks " + }, + { + "bbox": [ + 420, + 588, + 496, + 605 + ], + "type": "inline_equation", + "content": "= (1,1,1,4)" + }, + { + "bbox": [ + 420, + 588, + 496, + 605 + ], + "type": "text", + "content": " Val loss 1.4e-4" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "The results can also be understood from another perspective. If the deeper block contains more parameters (with more fully connected layers in it), it would be helpful for this block to learn the corresponding high-frequency components of the target function. Therefore, the model can reach better performance. A similar observation is obtained in Section 6.1: when putting wider layers of the ViT deeper, the model can reach higher performance." + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 383, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 383, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 383, + 94 + ], + "type": "text", + "content": "A.7 LAYER CONVERGENCE BIAS FOR VISION TRANSFORMERS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "content": "As discussed in Section 6, ViT can benefit from distributing more parameters in the deeper layers. This result comes from one of our main findings about layer convergence bias: the deeper layers tend to learn high-frequency components of the target function, thus converge more slowly. So adding more parameters for the deeper layers is beneficial for these layers to learn the high-frequency components which are usually harder." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 163, + 506, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 163, + 506, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 163, + 506, + 307 + ], + "type": "text", + "content": "When making this claim, we do not verify the layer convergence bias for the ViT. The main difficulty for verifying layer convergence bias for ViTs is brought by its typical training scheme. ViT needs adaptive optimizers to train, otherwise it converges very slowly. However, adaptive optimizers change the learning rates of different parameters according to their optimization procedures. This leads to unfair convergence comparison between layers, thus affects the layer convergence bias, as shown in Fig 15. Therefore, we try both SGD and Adam optimizers for training ViTs on ImageNet, and see whether layer convergence bias holds in some cases. As shown in Fig. 21 (a), the ViT shows a roughly trend of layer convergence bias when optimizing with Adam, where the deepest \"Encoder Block 12\" converges the slowest. However, some other layers do not strictly obey layer convergence bias (e.g., the shallowest \"Patch Embedding\" does not learn fastest among all blocks). When optimizing with SGD, the ViT shows a good layer convergence bias. The results indicate that ViTs approximately share the same rules as FCNNs and CNNs, thus supports the discussions in Section 6." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 166, + 316, + 312, + 415 + ], + "blocks": [ + { + "bbox": [ + 166, + 316, + 312, + 415 + ], + "lines": [ + { + "bbox": [ + 166, + 316, + 312, + 415 + ], + "spans": [ + { + "bbox": [ + 166, + 316, + 312, + 415 + ], + "type": "image", + "image_path": "0af94e7b19f8f6f3270ef8ff11d3b3b1c3f0212f5324ea3e4a2fb4ab6b730aba.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 419, + 271, + 429 + ], + "lines": [ + { + "bbox": [ + 229, + 419, + 271, + 429 + ], + "spans": [ + { + "bbox": [ + 229, + 419, + 271, + 429 + ], + "type": "text", + "content": "(a) ViT Adam" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 313, + 316, + 444, + 415 + ], + "blocks": [ + { + "bbox": [ + 313, + 316, + 444, + 415 + ], + "lines": [ + { + "bbox": [ + 313, + 316, + 444, + 415 + ], + "spans": [ + { + "bbox": [ + 313, + 316, + 444, + 415 + ], + "type": "image", + "image_path": "f2ab7458058264a1c486e2c09681f0a30cecc2ec3babc56a0515bcaf96f8eb4d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 419, + 402, + 429 + ], + "lines": [ + { + "bbox": [ + 362, + 419, + 402, + 429 + ], + "spans": [ + { + "bbox": [ + 362, + 419, + 402, + 429 + ], + "type": "text", + "content": "(b) ViT SGD" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 438, + 504, + 472 + ], + "lines": [ + { + "bbox": [ + 104, + 438, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 504, + 472 + ], + "type": "text", + "content": "Figure 21: The convergence curves of ViTs on ImageNet with different optimizers. With Adam optimizer, the ViT does not obey the layer convergence bias strictly. While SGD can ensure relatively ideal faster convergence processes of shallower layers." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 495, + 342, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 342, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 342, + 506 + ], + "type": "text", + "content": "A.8 CONNECTION TO LARS OPTIMIZATION SCHEME" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 514, + 504, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 504, + 560 + ], + "type": "text", + "content": "One of the most important factors that affect the optimization procedure is the learning rate. In this work, it is shown that the shallower layers can learn effectively with large learning rates, but the deeper layers only learn fast after learning rate decays. Is there any connection between layers and its suitable learning rate?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 564, + 506, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 564, + 506, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 506, + 698 + ], + "type": "text", + "content": "LARS optimizer You et al. (2017) made a significant contribution to training DNNs with huge batch sizes and large learning rates. The key observation in the literature is that the weight-to-gradient ratio highly varies in different layers. If a layer has greater gradients and relatively smaller weights, it would be hard for it to converge due to the vigorous parameter update. So LARS considers the scale of the weights and its gradient norms in each layer and assigns a local learning rate for a layer to make it converge effectively and stably. For FCNNs in our work, its different hidden layers are initialized with the same scale due to their identical architecture, but the deeper layers usually have larger gradients. As a result, the larger gradients may make these layers struggle to converge. Similarly, the CNNs (i.e., ResNet-50 and VGG-19) have wider deeper layers. These layers have smaller initial parameters, so their gradients may lead to drastic weight variations if the learning rate is too large. In this way, we can understand why they cannot get close to their optimal points effectively at the early stage of training. It explains layer convergence bias from another perspective." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_content_list.json b/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1aac46e938d700ed0902cc004d0c407746c93d4e --- /dev/null +++ b/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_content_list.json @@ -0,0 +1,15331 @@ +[ + { + "type": "text", + "text": "WHY (AND WHEN) DOES LOCAL SGD GENERALIZE BETTER THAN SGD?", + "text_level": 1, + "bbox": [ + 171, + 98, + 823, + 147 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xinran Gu*", + "bbox": [ + 210, + 169, + 299, + 183 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Institute for Interdisciplinary Information Sciences Tsinghua University", + "bbox": [ + 210, + 184, + 545, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "gxr21@mails.tsinghua.edu.cn", + "bbox": [ + 210, + 212, + 478, + 226 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kaifeng Lyu*", + "bbox": [ + 562, + 170, + 658, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Department of Computer Science Princeton University", + "bbox": [ + 562, + 184, + 785, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "klyu@cs.princeton.edu", + "bbox": [ + 562, + 212, + 771, + 226 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Longbo Huang†", + "bbox": [ + 210, + 247, + 326, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Institute for Interdisciplinary Information Sciences", + "bbox": [ + 210, + 261, + 545, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tsinghua University", + "bbox": [ + 210, + 276, + 349, + 289 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "longbohuang@tsinghua.edu.cn", + "bbox": [ + 210, + 289, + 478, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sanjeev Arora†", + "bbox": [ + 562, + 247, + 671, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Department of Computer Science", + "bbox": [ + 562, + 261, + 785, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Princeton University", + "bbox": [ + 562, + 276, + 700, + 289 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "arora@cs.princeton.edu", + "bbox": [ + 562, + 290, + 779, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 321, + 545, + 335 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Local SGD is a communication-efficient variant of SGD for large-scale training, where multiple GPUs perform SGD independently and average the model parameters periodically. It has been recently observed that Local SGD can not only achieve the design goal of reducing the communication overhead but also lead to higher test accuracy than the corresponding SGD baseline (Lin et al., 2020b), though the training regimes for this to happen are still in debate (Ortiz et al., 2021). This paper aims to understand why (and when) Local SGD generalizes better based on Stochastic Differential Equation (SDE) approximation. The main contributions of this paper include (i) the derivation of an SDE that captures the long-term behavior of Local SGD in the small learning rate regime, showing how noise drives the iterate to drift and diffuse after it has reached close to the manifold of local minima, (ii) a comparison between the SDEs of Local SGD and SGD, showing that Local SGD induces a stronger drift term that can result in a stronger effect of regularization, e.g., a faster reduction of sharpness, and (iii) empirical evidence validating that having a small learning rate and long enough training time enables the generalization improvement over SGD but removing either of the two conditions leads to no improvement.", + "bbox": [ + 228, + 353, + 767, + 590 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 617, + 336, + 631 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As deep models have grown larger, training them with reasonable wall-clock times has led to new distributed environments and new variants of gradient-based training. Recall that Stochastic Gradient Descent (SGD) tries to solve $\\min_{\\pmb{\\theta} \\in \\mathbb{R}^d} \\mathbb{E}_{\\xi \\sim \\hat{\\mathcal{D}}}[\\ell(\\pmb{\\theta}; \\xi)]$ , where $\\pmb{\\theta} \\in \\mathbb{R}^d$ is the parameter vector of the model, $\\ell(\\pmb{\\theta}; \\xi)$ is the loss function for a data sample $\\xi$ drawn from the training distribution $\\tilde{\\mathcal{D}}$ , e.g., the uniform distribution over the training set. SGD with learning rate $\\eta$ and batch size $B$ does the following update at each step, using a batch of $B$ independent $\\xi_{t,1}, \\ldots, \\xi_{t,B} \\sim \\tilde{\\mathcal{D}}$ :", + "bbox": [ + 169, + 648, + 826, + 739 + ], + "page_idx": 0 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {t + 1} \\leftarrow \\boldsymbol {\\theta} _ {t} - \\eta \\boldsymbol {g} _ {t}, \\quad \\text {w h e r e} \\quad \\boldsymbol {g} _ {t} = \\frac {1}{B} \\sum_ {i = 1} ^ {B} \\nabla \\ell \\left(\\boldsymbol {\\theta} _ {t}; \\xi_ {t, i}\\right). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 747, + 825, + 787 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Parallel SGD tries to improve wall-clock time when the batch size $B$ is large enough. It distributes the gradient computation to $K \\geq 2$ workers, each of whom focuses on a local batch of $B_{\\mathrm{loc}} := B / K$ samples and computes the average gradient over the local batch. Finally, $g_{t}$ is obtained by averaging the local gradients over the $K$ workers.", + "bbox": [ + 169, + 795, + 823, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, large-batch training leads to a significant test accuracy drop compared to a small-batch training baseline with the same number of training steps or epochs (Smith et al., 2020; Shallue et al.,", + "bbox": [ + 169, + 858, + 825, + 888 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution", + "bbox": [ + 189, + 896, + 313, + 910 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Corresponding authors", + "bbox": [ + 189, + 910, + 336, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d2ddf20977319359843449176528686667e651b70728681363f7c2b748262ac9.jpg", + "image_caption": [ + "(a) CIFAR-10, $B = 4096$ , ResNet-56." + ], + "image_footnote": [], + "bbox": [ + 228, + 56, + 480, + 176 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5535c2fbc915d92756ec5f3da4bc10bda5e4734ecdc6d2e3b634ab473d1da50c.jpg", + "image_caption": [ + "(b) ImageNet, $B = 8192$ , ResNet-50.", + "Figure 1: Post-Local SGD ( $H > 1$ ) generalizes better than SGD ( $H = 1$ ). We switch to Local SGD at the first learning rate decay (epoch #250) for CIFAR-10 and at the second learning rate decay (epoch #100) for ImageNet. See Appendix M.1 for training details." + ], + "image_footnote": [], + "bbox": [ + 514, + 56, + 767, + 176 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2019; Keskar et al., 2017; Jastrzebski et al., 2017). Reducing this generalization gap is the goal of much subsequent research. It was suggested that the generalization gap arises because larger batches lead to a reduction in the level of noise in batch gradient (see Appendix A for more discussion). The Linear Scaling Rule (Krizhevsky, 2014; Goyal et al., 2017; Jastrzebski et al., 2017) tries to fix this by increasing the learning rate in proportion to batch size. This is found to reduce the generalization gap for (parallel) SGD, but does not entirely eliminate it.", + "bbox": [ + 169, + 251, + 823, + 335 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To reduce the generalization gap further, Lin et al. (2020b) discovered that a variant of SGD, called Local SGD (Yu et al., 2019; Wang & Joshi, 2019; Zhou & Cong, 2018), can be used as a strong component. Perhaps surprisingly, Local SGD itself is not designed for improving generalization, but for reducing the high communication cost for synchronization among the workers, which is another important issue that often bottlenecks large-batch training (Seide et al., 2014; Strom, 2015; Chen et al., 2016; Recht et al., 2011). Instead of averaging the local gradients per step as in parallel SGD, Local SGD allows $K$ workers to train their models locally and averages the local model parameters whenever they finish $H$ local steps. Here every worker samples a new batch at each local step, and in this paper we focus on the case where all the workers draw samples with or without replacement from the same training set. See Appendix C for the pseudocode.", + "bbox": [ + 169, + 340, + 826, + 482 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "More specifically, Lin et al. (2020b) proposed Post-local SGD, a hybrid method that starts with parallel SGD (equivalent to Local SGD with $H = 1$ in math) and switches to Local SGD with $H > 1$ after a fixed number of steps $t_0$ . They showed through extensive experiments that Post-local SGD significantly outperforms parallel SGD in test accuracy when $t_0$ is carefully chosen. In Figure 1, we reproduce this phenomenon on both CIFAR-10 and ImageNet.", + "bbox": [ + 169, + 487, + 823, + 559 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As suggested by the success of Post-local SGD, Local SGD can improve the generalization of SGD by merely adding more local steps (while fixing the other hyperparameters), at least when the training starts from a model pre-trained by SGD. But the underlying mechanism is not very clear, and there is also controversy about when this phenomenon can happen (see Section 2.1 for a survey). The current paper tries to understand: Why does Local SGD generalize better? Under what general conditions does this generalization benefit arise?", + "bbox": [ + 169, + 564, + 826, + 648 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Previous theoretical research on Local SGD is mainly restricted to the convergence rate for minimizing a convex or non-convex objective (see Appendix A for a survey). A related line of works (Stich, 2018; Yu et al., 2019; Khaled et al., 2020) showed that Local SGD has a slower convergence rate compared with parallel SGD after running the same number of steps/epochs. This convergence result suggests that Local SGD may implicitly regularize the model through insufficient optimization, but this does not explain why parallel SGD with early stopping, which may incur an even higher training loss, still generalizes worse than Post-local SGD.", + "bbox": [ + 169, + 655, + 823, + 753 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our Contributions. In this paper, we provide the first theoretical understanding on why (and when) switching from parallel SGD to Local SGD improves generalization.", + "bbox": [ + 169, + 758, + 823, + 789 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. In Section 2.2, we conduct ablation studies on CIFAR-10 and ImageNet and identify a clean setting where adding local steps to SGD consistently improves generalization: if the learning rate is small and the total number of steps is sufficient, Local SGD eventually generalizes better than the corresponding (parallel) SGD baseline.", + "2. In Section 3.2, we derive a special SDE that characterizes the long-term behavior of Local SGD in the small learning rate regime, as inspired by a previous work (Li et al., 2021b) that proposed this type of SDE for modeling SGD. These SDEs can track the dynamics after the iterate has reached close to a manifold of minima. In this regime, the expected gradient is near zero, but the gradient noise can drive the iterate to wander around. In contrast to the conventional SDE (3) for" + ], + "bbox": [ + 179, + 792, + 825, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "SGD, where the drift and diffusion terms are connected respectively to the expected gradient and gradient noise, the SDE we derived for Local SGD has drift and diffusion terms both connected to gradient noise.", + "bbox": [ + 197, + 103, + 823, + 147 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3. Section 3.3 explains the generalization improvement of Local SGD over SGD by comparing the corresponding SDEs: increasing the number of local steps $H$ strengthens the drift term of SDE while keeping the diffusion term untouched. We hypothesize that having a stronger drift term can benefit generalization.", + "4. As a by-product, we provide a new proof technique that can give the first quantitative approximation bound for how well Li et al. (2021b)'s SDE approximates SGD." + ], + "bbox": [ + 178, + 154, + 825, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Back to the discussion on the generalization gap between small- and large-batch training, we remark that this gap can occur early in training when the learning rate is very large (Smith et al., 2020) and Local SGD cannot prevent this gap in this phase. Instead, our theory suggests that Local SGD can reduce the gap in late training phases after decaying the learning rate.", + "bbox": [ + 169, + 255, + 823, + 313 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 WHEN DOES LOCAL SGD GENERALIZE BETTER?", + "text_level": 1, + "bbox": [ + 171, + 328, + 620, + 345 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In our motivating example of Post-local SGD, switching from SGD to Local SGD can outperform running SGD alone (i.e., no switching) in test accuracy, but this improvement does not always arise and can depend on the choice of the switching time point. Because of this, a necessary first step for developing a theoretical understanding of Local SGD is to identify under what general conditions Local SGD can improve the generalization of SGD by merely adding local steps.", + "bbox": [ + 169, + 354, + 823, + 426 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 THE DEBATE ON LOCAL SGD", + "text_level": 1, + "bbox": [ + 171, + 441, + 423, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We first summarize a debate in the literature regarding when to switch from SGD to Local SGD in running Post-local SGD, which hints the conditions so that Local SGD can improve upon SGD.", + "bbox": [ + 169, + 463, + 823, + 493 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Local SGD generalizes better than SGD on CIFAR-10. Lin et al. (2020b) empirically observed that Post-local SGD exhibits a better generalization performance than SGD. Most of their experiments are conducted on CIFAR-10 and CIFAR-100 with multiple learning rate decays, and the algorithm switches from (parallel) SGD to Local SGD right after the first learning rate decay. We refer to this particular choice of the switching time point as the first-decay switching strategy for short. To justify this strategy, they empirically showed that the generalization improvement can be less significant if starting Local SGD from the beginning or right after the second learning rate decay. It has also been observed by Wang & Joshi (2021) that running Local SGD from the beginning improves generalization, but the test accuracy improvement may not be large enough. A subsequent work by Lin et al. (2020a) showed that adding local steps to Extrap-SGD, a variant of SGD proposed therein, after the first learning rate decay also improves generalization, suggesting that the first-decay switching strategy can also be applied to the post-local variant of other optimizers.", + "bbox": [ + 169, + 498, + 825, + 667 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Does Local SGD exhibit the same generalization benefit on large-scale datasets? Going beyond CIFAR-10, Lin et al. (2020b) conducted a few ImageNet experiments and showed that Post-local SGD with first-decay switching strategy still leads to better generalization than SGD. However, the improvement is sometimes marginal, e.g., $0.1\\%$ for batch size 8192. For the general case, they suggested that the time of switching should be tuned aiming at \"capturing the time when trajectory starts to get into the influence basin of a local minimum\" in a footnote, but no further discussion or experiments are provided to justify this guideline. Ortiz et al. (2021) conducted a more extensive evaluation on ImageNet (with a different set of hyperparameters) and concluded with the opposite: the first-decay switching strategy can hurt the validation accuracy. Instead, switching at a later time, such as the second learning rate decay, leads to a better validation accuracy than SGD. $^{1}$ To explain this phenomenon, they conjecture that switching to Local SGD has a regularization effect that is beneficial only in the short-term, so it is always better to switch as late as possible. They further conjecture that this discrepancy between CIFAR-10 and ImageNet is mainly due to the task scale. On TinyImageNet, which is a spatially downscaled subset of ImageNet, the first-decay switching strategy indeed leads to better validation accuracy.", + "bbox": [ + 169, + 672, + 825, + 882 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "This generalization improvement is not mentioned explicitly in (Ortiz et al., 2021) but can be clearly seen from Figures 7 and 8 in their paper.", + "bbox": [ + 169, + 897, + 823, + 925 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f570556fdfaa0f6044b6f240a2cb7d16b8e147d57d421fd6b15c4809f3cddcfc.jpg", + "image_caption": [ + "(a) CIFAR-10, start from random." + ], + "image_footnote": [], + "bbox": [ + 181, + 65, + 372, + 156 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6461538aea94e321f86ced7d94c6c8a21dd6a9c4cd72c786139df36ceef1178e.jpg", + "image_caption": [ + "(b) CIFAR-10, start from #250." + ], + "image_footnote": [], + "bbox": [ + 403, + 65, + 591, + 155 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b98462c3f9e7557bf75477fde8fe81b76ca936bbd82faa548f0ffa3271ebb7bc.jpg", + "image_caption": [ + "(c) ImageNet, start from #100." + ], + "image_footnote": [], + "bbox": [ + 625, + 65, + 813, + 155 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/195a334edf2abcfae1bb681d2e115a4eea3b4fcca6268203023bb1ad7ae80af2.jpg", + "image_caption": [ + "(d) ImageNet, first phase $\\eta = 3.2$" + ], + "image_footnote": [], + "bbox": [ + 181, + 186, + 372, + 277 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/68ffb33aa41408a4060ba829d1d7b2754f404172061830c7aa362204cfde84d1.jpg", + "image_caption": [ + "(e) CIFAR-10, test acc v.s. $H$" + ], + "image_footnote": [], + "bbox": [ + 403, + 186, + 591, + 277 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/0c72702dcc36c753b25919680336467916535f7964024c130c3ff1332f48704b.jpg", + "image_caption": [ + "(f) ImageNet, test acc v.s. $H$", + "Figure 2: Ablation studies on $\\eta$ , $H$ and training time in the same setting as Figure 1. For (a)(d), we train from random initialization. For (b)(c)(e)(f), we start training from the checkpoints saved at the switching time points in Figure 1 (epoch #250 for CIFAR-10 and epoch #100 for ImageNet). See Appendix M.2 for training details." + ], + "image_footnote": [], + "bbox": [ + 625, + 186, + 813, + 277 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 KEY FACTORS: SMALL LEARNING RATE AND SUFFICIENT TRAINING TIME", + "text_level": 1, + "bbox": [ + 171, + 349, + 733, + 364 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "All the above papers agree that Post-local/Local SGD improves upon SGD to some extent. However, it is in debate under what conditions the generalization benefit can consistently occur. We now conduct ablation studies to identify the key factors so that adding local steps improves the generalization of SGD. We run parallel SGD and Local SGD with the same learning rate $\\eta$ , local batch size $B_{\\mathrm{loc}}$ , and number of workers $K$ . We start training from the same initialization and compare their generalization after the same number of epochs. As Post-local SGD can be viewed as Local SGD starting from an SGD-pretrained model, the initial point in our experiments can be either random or a checkpoint of SGD training. See Appendix C for implementation details and Appendix M.2 for more details about the experimental setup.", + "bbox": [ + 169, + 372, + 823, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The first observation we have is that the generalization benefits can be reproduced on both CIFAR-10 and ImageNet in our setting (see Figure 1). We remark that Post-local SGD and SGD in Lin et al. (2020b); Ortiz et al. (2021) are implemented with accompanying Nesterov momentum terms. The learning rate also decays a couple of times in training with Local SGD. Nevertheless, our experiments show that the Nesterov momentum and learning rate decay are not necessary for Local SGD to generalize better than SGD. Our main finding after further ablation studies is summarized below:", + "bbox": [ + 169, + 503, + 823, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Finding 2.1. Given a sufficiently small learning rate and a sufficiently long training time, Local SGD exhibits better generalization than SGD, if the number of local steps $H$ per round is tuned properly according to the learning rate. This holds for both training from random initialization and from pre-trained models.", + "bbox": [ + 169, + 593, + 826, + 650 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Now we go through each point of our main finding. See also Appendix F for more plots.", + "bbox": [ + 169, + 655, + 751, + 670 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1). Pretraining is not necessary. In contrast to previous works claiming the benefits of Post-local SGD over Local SGD (Lin et al., 2020b; Ortiz et al., 2021), we observe that Local SGD with random initialization also generalizes significantly better than SGD, as long as the learning rate is small and the training time is sufficiently long (Figure 2(a)). Starting from a pretrained model may shorten the time to reach this generalization benefit to show up (Figure 2(b)), but it is not necessary.", + "(2). Learning rate should be small. We experiment with a wide range of learning rates to conclude that setting a small learning rate is necessary. The learning rate is 0.32 for Figures 2(a) and 2(b) and is 0.16 for Figure 2(c). As shown in Figure 2(d), Local SGD encounters optimization difficulty in the first phase where $\\eta$ is large ( $\\eta = 3.2$ ), resulting in inferior final test accuracy. Even for training from a pretrained model, the generalization improvement of Local SGD disappears for large learning rates (e.g., $\\eta = 1.6$ in Figure 5(d)). In contrast, if a longer training time is allowed, reducing the learning rate of Local SGD does not lead to test accuracy drop (Figure 5(c)).", + "(3). Training time should be long enough. To investigate the effect of training time, in Figures 2(b) and 2(c), we extend the training budget for the Post-local SGD experiments in Figure 1 and observe that a longer training time leads to greater generalization improvement upon SGD. On the other hand, Local SGD generalizes worse than SGD in the first few epochs of Figures 2(a) and 2(c); see Figures 5(a) and 5(b) for an enlarged view." + ], + "bbox": [ + 169, + 672, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(4). The number of local steps $H$ should be tuned carefully. The number of local steps $H$ has a complex interplay with the learning rate $\\eta$ , but generally speaking, a smaller $\\eta$ needs a higher $H$ to achieve consistent generalization improvement. For CIFAR-10 with a post-local training budget of 250 epochs (see Figure 2(e)), the test accuracy first rises as $H$ increases, and begins to fall as $H$ exceeds some threshold for relatively large $\\eta$ (e.g., $\\eta \\geq 0.5$ ) while keeps growing for smaller $\\eta$ (e.g., $\\eta < 0.5$ ). For ImageNet with a post-local training budget of 50 epochs (see Figure 2(f)), the test accuracy first increases and then decreases in $H$ for all learning rates.", + "bbox": [ + 169, + 103, + 826, + 202 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reconciling previous works. Our finding can help to settle the debate presented in Section 2.1 to a large extent. Simultaneously requiring a small learning rate and sufficient training time poses a trade-off when learning rate decay is used with a limited training budget: switching to Local SGD earlier may lead to a large learning rate, while switching later makes the generalization improvement of Local SGD less noticeable due to fewer update steps. It is thus unsurprising that first-decay switching strategy is not always the best. The need for sufficient training time does not contradict with Ortiz et al. (2021)'s conjecture that Local SGD only has a \"short-term\" generalization benefit. In their experiments, the generalization improvement usually disappears right after the next learning rate decay (instead of after a fixed amount of time). We suspect that the real reason why the improvement vanishes is that the number of local steps $H$ was kept as a constant, but our finding suggests tuning $H$ after $\\eta$ changes. In Figure 5(e), we reproduce this phenomenon and show that increasing $H$ after learning rate decay retains the improvement.", + "bbox": [ + 169, + 208, + 826, + 377 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Generalization performances at the optimal learning rate of SGD. In practice, the learning rate of SGD is usually tuned to achieve the best training loss/Validation accuracy within a fixed training budget. Our finding suggests that when the tuned learning rate is small and the training time is sufficient, Local SGD can offer generalization improvement over SGD. As an example, in our experiments on training from an SGD-pretrained model, the optimal learning rate for SGD is 0.5 on CIFAR-10 (Figure 2(e)) and 0.064 on ImageNet (Figure 2(f)). With the same learning rate as SGD, the test accuracy is improved by $1.1\\%$ on CIFAR-10 and $0.3\\%$ on ImageNet when using Local SGD with $H = 750$ and $H = 26$ respectively. The improvement could become even higher if the learning rate of Local SGD is carefully tuned.", + "bbox": [ + 169, + 381, + 823, + 507 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 THEORETICAL ANALYSIS OF LOCAL SGD: THE SLOW SDE", + "text_level": 1, + "bbox": [ + 171, + 515, + 702, + 530 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we adopt an SDE-based approach to rigorously establish the generalization benefit of Local SGD in a general setting. Below, we first identify the difficulty of adapting the SDE framework to Local SGD. Then, we present our novel SDE characterization of Local SGD around the manifold of minimizers and explain the generalization benefit of Local SGD with our SDE.", + "bbox": [ + 169, + 537, + 823, + 594 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Notations. We follow the notations in Section 1. We denote by $\\eta$ the learning rate, $K$ the number of workers, $B$ the (global) batch size, $B_{\\mathrm{loc}}\\coloneqq B / K$ the local batch size, $H$ the number of local steps, $\\ell (\\pmb {\\theta};\\zeta)$ the loss function for a data sample $\\zeta$ , and $\\tilde{\\mathcal{D}}$ the training distribution. Furthermore, we define $\\mathcal{L}(\\pmb {\\theta})\\coloneqq \\mathbb{E}_{\\xi \\sim \\tilde{\\mathcal{D}}}[\\ell (\\pmb {\\theta};\\xi)]$ as the expected loss, $\\Sigma (\\pmb {\\theta})\\coloneqq \\operatorname{Cov}_{\\xi \\sim \\tilde{\\mathcal{D}}}[\\nabla \\ell (\\pmb {\\theta};\\xi)]$ as the noise covariance of gradients at $\\pmb{\\theta}$ . Let $\\{W_t\\}_{t\\geq 0}$ denote the standard Wiener process. For a mapping $F:\\mathbb{R}^d\\to \\mathbb{R}^d$ , denote by $\\partial F(\\pmb {\\theta})$ the Jacobian at $\\pmb{\\theta}$ and $\\partial^2 F(\\pmb {\\theta})$ the second order derivative at $\\pmb{\\theta}$ . Furthermore, for any matrix $M\\in \\mathbb{R}^{d\\times d}$ , $\\partial^2 F(\\pmb {\\theta})[M] = \\sum_{i\\in [d]}\\langle \\frac{\\partial^2F_i}{\\partial\\theta^2},M\\rangle e_i$ where $e_i$ is the $i$ -th vector of the standard basis. We write $\\partial^2 (\\nabla \\mathcal{L})(\\pmb {\\theta})[M]$ as $\\nabla^3\\mathcal{L}(\\pmb {\\theta})[M]$ for short.", + "bbox": [ + 169, + 599, + 823, + 726 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Local SGD. We use the following formulation of Local SGD for theoretical analysis. See also Appendix C for the pseudocode. Local SGD proceeds in multiple rounds of model averaging, where each round produces a global iterate $\\bar{\\theta}^{(s)}$ . In the $(s + 1)$ -th round, every worker $k \\in [K]$ starts with its local copy of the global iterate $\\pmb{\\theta}_{k,0}^{(s)} \\gets \\bar{\\pmb{\\theta}}^{(s)}$ and does $H$ steps of SGD with local batches. In the $t$ -th local step of the $k$ -th worker, it draws a local batch of $B_{\\mathrm{loc}} \\coloneqq B / K$ independent samples $\\xi_{k,t,1}^{(s)}, \\dots, \\xi_{k,t,B_{\\mathrm{loc}}}^{(s)}$ from a shared training distribution $\\tilde{\\mathcal{D}}$ and updates as follows:", + "bbox": [ + 169, + 731, + 826, + 828 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} \\leftarrow \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {g} _ {k, t} ^ {(s)}, \\quad \\text {w h e r e} \\quad \\boldsymbol {g} _ {k, t} ^ {(s)} = \\frac {1}{B _ {\\mathrm {l o c}}} \\sum_ {i = 1} ^ {B _ {\\mathrm {l o c}}} \\nabla \\ell \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}; \\xi_ {k, t, i} ^ {(s)}\\right), \\quad t = 0, \\dots , H - 1. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 829, + 823, + 868 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The local updates on different workers are independent of each other as there is no communication. After finishing the $H$ local steps, the workers aggregate the resulting local iterates $\\pmb{\\theta}_{k,H}^{(s)}$ and assign the average to the next global iterate: $\\bar{\\pmb{\\theta}}^{(s + 1)}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}\\pmb{\\theta}_{k,H}^{(s)}$ .", + "bbox": [ + 169, + 872, + 823, + 928 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 DIFFICULTY OF ADAPTING THE SDE FRAMEWORK TO LOCAL SGD", + "text_level": 1, + "bbox": [ + 171, + 103, + 683, + 118 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A widely-adopted approach to understanding the dynamics of SGD is to approximate it from a continuous perspective with the following SDE (3), which we call the conventional SDE approximation. Below, we discuss why it cannot be directly adopted to characterize the behavior of Local SGD.", + "bbox": [ + 169, + 125, + 823, + 167 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {X} (t) = - \\nabla \\mathcal {L} (\\boldsymbol {X}) \\mathrm {d} t + \\sqrt {\\frac {\\eta}{B}} \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {X}) \\mathrm {d} \\boldsymbol {W} _ {t}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 171, + 823, + 196 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "It is proved by Li et al. (2019a) that this SDE is a first-order approximation to SGD, where each discrete step corresponds to a continuous time interval of $\\eta$ . Several previous works adopt this SDE approximation and connect good generalization to having a large diffusion term $\\sqrt{\\frac{\\eta}{B}} \\Sigma^{1/2} \\mathrm{d}W_t$ in the SDE (Jastrzewski et al., 2017; Smith et al., 2020), because a suitable amount of noise can be necessary for large-batch training to generalize well (see also Appendix A).", + "bbox": [ + 169, + 198, + 825, + 268 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "According to Finding 2.1, it is tempting to consider the limit $\\eta \\to 0$ and see if Local SGD can also be modeled via a variant of the conventional SDE. In this case the typical time length that guarantees a good SDE approximation error is $\\mathcal{O}(\\eta^{-1})$ discrete steps (Li et al., 2019a; 2021a). However, this time scaling is too short for the difference to appear between Local SGD and SGD. Indeed, Theorem 3.1 below shows that they closely track each other for $\\mathcal{O}(\\eta^{-1})$ steps.", + "bbox": [ + 169, + 275, + 823, + 347 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3.1. Assume that the loss function $\\mathcal{L}$ is $\\mathcal{C}^3$ -smooth with bounded second and third order derivatives and that $\\nabla \\ell (\\pmb {\\theta};\\xi)$ is bounded. Let $T > 0$ be a constant, $\\bar{\\pmb{\\theta}}^{(s)}$ be the $s$ -th global iterate of Local SGD and $\\pmb {w}_t$ be the $t$ -th iterate of SGD with the same initialization $\\pmb {w}_0 = \\bar{\\pmb{\\theta}}^{(0)}$ and same $\\eta, B_{\\mathrm{loc}}, K$ . Then for any $H\\leq \\frac{T}{\\eta}$ and $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , it holds with probability at least $1 - \\delta$ that for all $s\\leq \\frac{T}{\\eta H}$ , $\\| \\bar{\\pmb{\\theta}}^{(s)} - \\pmb{w}_{sH}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})$ .", + "bbox": [ + 169, + 347, + 826, + 431 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We defer the proof to Appendix I. See also Appendix D for Lin et al. (2020b)'s attempt to model Local SGD with multiple conventional SDEs and discussions on why it does not give much insight.", + "bbox": [ + 169, + 434, + 825, + 464 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 SDE APPROXIMATION NEAR THE MINIMIZER MANIFOLD", + "text_level": 1, + "bbox": [ + 171, + 470, + 612, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Inspired by a recent paper (Li et al., 2021b), our strategy to overcome the shortcomings of the conventional SDE is to design a new SDE that can guarantee a good approximation for $\\mathcal{O}(\\eta^{-2})$ discrete steps, much longer than the $\\mathcal{O}(\\eta^{-1})$ discrete steps for the conventional SDE. Following their setting, we assume the existence of a manifold $\\Gamma$ consisting only of local minimizers and track the global iterate $\\bar{\\theta}^{(s)}$ around $\\Gamma$ after it takes $\\tilde{\\mathcal{O}} (\\eta^{-1})$ steps to approach $\\Gamma$ . Though the expected gradient $\\nabla \\mathcal{L}$ is near zero around $\\Gamma$ , the dynamics are still non-trivial because the noise can drive the iterate to move a significant distance in $\\mathcal{O}(\\eta^{-2})$ steps.", + "bbox": [ + 169, + 492, + 823, + 592 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Assumption 3.1. The loss function $\\mathcal{L}(\\cdot)$ and the matrix square root of the noise covariance $\\Sigma^{1/2}(\\cdot)$ are $\\mathcal{C}^\\infty$ -smooth. Besides, we assume that $\\|\\nabla \\ell(\\boldsymbol{\\theta}; \\xi)\\|_2$ is bounded by a constant for all $\\boldsymbol{\\theta}$ and $\\xi$ .", + "bbox": [ + 169, + 594, + 823, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Assumption 3.2. $\\Gamma$ is a $\\mathcal{C}^\\infty$ -smooth, $(d - m)$ -dimensional submanifold of $\\mathbb{R}^d$ , where any $\\zeta \\in \\Gamma$ is a local minimizer of $\\mathcal{L}$ . For all $\\zeta \\in \\Gamma$ , $\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m$ . Additionally, there exists an open neighborhood of $\\Gamma$ , denoted as $U$ , such that $\\Gamma = \\arg \\min_{\\pmb{\\theta} \\in U} \\mathcal{L}(\\pmb{\\theta})$ .", + "bbox": [ + 169, + 626, + 825, + 670 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Assumption 3.3. $\\Gamma$ is a compact manifold.", + "bbox": [ + 171, + 672, + 459, + 688 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The smoothness assumption on $\\mathcal{L}$ is generally satisfied when we use smooth activation functions, such as Swish (Ramachandran et al., 2017), softplus and GeLU (Hendrycks & Gimpel, 2016), which work equally well as ReLU in many circumstances. The existence of a minimizer manifold with $\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m$ has also been made as a key assumption in Fehrman et al. (2020); Li et al. (2021b); Lyu et al. (2022), where $\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m$ ensures that the Hessian is maximally nondegenerate on the manifold and implies that the tangent space at $\\zeta \\in \\Gamma$ equals the null space of $\\nabla^2\\mathcal{L}(\\zeta)$ . The last assumption is made to prevent the analysis from being too technically involved.", + "bbox": [ + 169, + 691, + 825, + 790 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our SDE for Local SGD characterizes the training dynamics near $\\Gamma$ . For ease of presentation, we define the following projection operators $\\Phi, P_{\\zeta}$ for points and differential forms respectively.", + "bbox": [ + 169, + 796, + 823, + 825 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Definition 3.1 (Gradient Flow Projection). Fix a point $\\theta_{\\mathrm{null}} \\notin \\Gamma$ . For $\\pmb{x} \\in \\mathbb{R}^d$ , consider the gradient flow $\\frac{\\mathrm{d}\\pmb{x}(t)}{\\mathrm{d}t} = -\\nabla \\mathcal{L}(\\pmb{x}(t))$ with $\\pmb{x}(0) = \\pmb{x}$ . We denote the gradient flow projection of $\\pmb{x}$ as $\\Phi(\\pmb{x})$ . $\\Phi(\\pmb{x}) := \\lim_{t \\to +\\infty} \\pmb{x}(t)$ if the limit exists and belongs to $\\Gamma$ ; otherwise, $\\Phi(\\pmb{x}) = \\theta_{\\mathrm{null}}$ .", + "bbox": [ + 168, + 830, + 823, + 876 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Definition 3.2. For any $\\zeta \\in \\Gamma$ and any differential form $\\mathbf{AdW}_t + \\mathbf{bdt}$ in Itô calculus, where $\\mathbf{A}$ is a matrix and $\\mathbf{b}$ is a vector, we use $P_{\\zeta}(\\mathbf{AdW}_t + \\mathbf{bdt})$ as a shorthand for the differential form $\\partial \\Phi (\\zeta)\\mathbf{AdW}_t + \\left(\\partial \\Phi (\\zeta)\\mathbf{b} + \\frac{1}{2}\\partial^2\\Phi (\\zeta)[\\mathbf{AA}^\\top ]\\right)\\mathrm{d}t.$", + "bbox": [ + 169, + 878, + 825, + 926 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "See Øksendal (2013) for an introduction to Itô calculus. Here $P_{\\zeta}$ equals $\\Phi (\\zeta +A\\mathrm{d}\\pmb {W}_t + \\pmb {b}\\mathrm{d}t) - \\Phi (\\zeta)$ by Itô calculus, which means that $P_{\\zeta}$ projects an infinitesimal step from $\\zeta$ , so that $\\zeta$ after taking the projected step does not leave the manifold $\\Gamma$ . It can be shown by simple calculus that $\\partial \\Phi (\\zeta)$ equals the projection matrix onto the tangent space of $\\Gamma$ at $\\zeta$ . We decompose the noise covariance $\\Sigma (\\zeta)$ for $\\zeta \\in \\Gamma$ into two parts: the noise in the tangent space $\\Sigma_{\\parallel}(\\zeta)\\coloneqq \\partial \\Phi (\\zeta)\\Sigma (\\zeta)\\partial \\Phi (\\zeta)$ and the noise in the rest $\\Sigma_{\\diamond}(\\zeta)\\coloneqq \\Sigma (\\zeta) - \\Sigma_{\\parallel}(\\zeta)$ . Now we are ready to state our SDE for Local SGD.", + "bbox": [ + 169, + 103, + 823, + 189 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Definition 3.3 (Slow SDE for Local SGD). Given $\\eta, H > 0$ and $\\zeta_0 \\in \\Gamma$ , define $\\zeta(t)$ as the solution of the following SDE with initial condition $\\zeta(0) = \\zeta_0$ :", + "bbox": [ + 169, + 191, + 823, + 222 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {(a) d i f f u s i o n} - \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(b) d r i f t - I} - \\underbrace {- \\frac {K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(c) d r i f t - I I}\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 224, + 825, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Here $\\widehat{\\Sigma}_{\\diamond}(\\zeta), \\widehat{\\Psi}(\\zeta) \\in \\mathbb{R}^{d \\times d}$ are defined as", + "bbox": [ + 171, + 273, + 455, + 291 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) := \\sum_ {i, j: (\\lambda_ {i} \\neq 0) \\vee (\\lambda_ {j} \\neq 0)} \\frac {1}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {\\Sigma} _ {\\diamond} (\\boldsymbol {\\zeta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 294, + 825, + 316 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) := \\sum_ {i, j: (\\lambda_ {i} \\neq 0) \\vee (\\lambda_ {j} \\neq 0)} \\frac {\\psi (\\eta H \\cdot (\\lambda_ {i} + \\lambda_ {j}))}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {\\Sigma} _ {\\diamond} (\\boldsymbol {\\zeta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 284, + 318, + 823, + 340 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\{\\pmb{v}_i\\}_{i=1}^d$ is a set of eigenvectors of $\\nabla^2\\mathcal{L}(\\zeta)$ that forms an orthonormal eigenbasis, and $\\lambda_1, \\ldots, \\lambda_d$ are the corresponding eigenvalues. Additionally, $\\psi(x) := \\frac{e^{-x} - 1 + x}{x}$ for $x \\neq 0$ and $\\psi(0) = 0$ .", + "bbox": [ + 169, + 345, + 826, + 392 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The use of $P_{\\zeta}$ keeps $\\zeta(t)$ on the manifold $\\Gamma$ through projection. $\\Sigma_{\\parallel}^{\\frac{1}{2}}(\\zeta)$ introduces a diffusion term to the SDE in the tangent space. The two drift terms involve $\\widehat{\\Sigma}_{\\diamond}(\\cdot)$ and $\\widehat{\\Psi}(\\cdot)$ , which can be intuitively understood as rescaling the entries of the noise covariance in the eigenbasis of Hessian. In the special case where $\\nabla^{2}\\mathcal{L} = \\mathrm{diag}(\\lambda_{1},\\dots,\\lambda_{d}) \\in \\mathbb{R}^{d\\times d}$ , we have $\\widehat{\\Sigma}_{\\diamond,i,j} = \\frac{1}{\\lambda_i + \\lambda_j}\\Sigma_{0,i,j}$ . $\\widehat{\\Psi}_{i,j} = \\frac{\\psi(\\eta H(\\lambda_i + \\lambda_j))}{\\lambda_i + \\lambda_j}\\Sigma_{0,i,j}$ . $\\psi(x)$ is a monotonically increasing function, which goes from 0 to 1 as $x$ goes from 0 to infinity (see Figure 9)", + "bbox": [ + 169, + 398, + 823, + 505 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We name this SDE as the Slow SDE for Local SGD because we will show that each discrete step of Local SGD corresponds to a continuous time interval of $\\eta^2$ instead of an interval of $\\eta$ in the conventional SDE. In this sense, our SDE is \"slower\" than the conventional SDE (and hence can track a longer horizon). This Slow SDE is inspired by Li et al. (2021b). Under nearly the same set of assumptions, they proved that SGD can be tracked by an SDE that is essentially equivalent to (4) with $K = 1$ , namely, without the drift-II term.", + "bbox": [ + 169, + 510, + 825, + 594 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} - \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}}\\right), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 597, + 825, + 641 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We refer to (7) as the Slow SDE for SGD. We remark that the drfit-II term in (4) is novel and is the key to separate the generalization behaviors of Local SGD and SGD in theory. We will discuss this point later in Section 3.3. Now we present our SDE approximation theorem for Local SGD.", + "bbox": [ + 169, + 643, + 823, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Theorem 3.2. Let Assumptions 3.1 to 3.3 hold. Let $T > 0$ be a constant and $\\zeta(t)$ be the solution to (4) with the initial condition $\\zeta(0) = \\Phi(\\bar{\\theta}^{(0)}) \\in \\Gamma$ . If $H$ is set to $\\frac{\\alpha}{\\eta}$ for some constant $\\alpha > 0$ , then for any $\\mathcal{C}^3$ -smooth function $g(\\pmb{\\theta})$ , $\\max_{0 \\leq s \\leq \\frac{T}{H\\eta^2}} \\left| \\mathbb{E}[g(\\Phi(\\bar{\\pmb{\\theta}}^{(s)}))] - \\mathbb{E}[g(\\pmb{\\zeta}(sH\\eta^2)] \\right| = \\tilde{\\mathcal{O}}(\\eta^{0.25})$ , where $\\tilde{\\mathcal{O}}(\\cdot)$ hides log factors and constants that are independent of $\\eta$ but can depend on $g(\\pmb{\\theta})$ .", + "bbox": [ + 169, + 689, + 823, + 761 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Theorem 3.3. For $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ , it holds for all $\\mathcal{O}\\left(\\frac{1}{\\alpha}\\log \\frac{1}{\\eta}\\right)\\leq s\\leq \\frac{T}{\\alpha\\eta}$ that $\\Phi (\\bar{\\pmb{\\theta}}^{(s)})\\in \\Gamma$ and $\\| \\bar{\\pmb{\\theta}}^{(s)} - \\Phi (\\bar{\\pmb{\\theta}}^{(s)})\\| _2 = \\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})$ , where $\\mathcal{O}(\\cdot)$ hides constants independent of $\\eta$ , $\\alpha$ and $\\delta$ .", + "bbox": [ + 169, + 761, + 823, + 819 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Theorem 3.2 suggests that the trajectories of the manifold projection and the solution to the Slow SDE (4) are close to each other in the weak approximation sense. That is, $\\{\\Phi (\\bar{\\theta}^{(s)})\\}$ and $\\{\\zeta (t)\\}$ cannot be distinguished by evaluating test functions from a wide function class, including all polynomials. This measurement of closeness between the iterates of stochastic gradient algorithms and their SDE approximations is also adopted by Li et al. (2019a; 2021a); Malladi et al. (2022), but their analyses are for conventional SDEs. Theorem 3.3 further states that the iterate $\\bar{\\theta}^{(s)}$ keeps close to its manifold projection after the first few rounds.", + "bbox": [ + 169, + 823, + 825, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Remark 3.1. To connect to Finding 2.1, we remark that our theorems (1) do not require the model to be pre-trained (as long as the gradient flow starting with $\\theta^{(0)}$ converges to $\\Gamma$ ); (2) give better bounds for smaller $\\eta$ ; (3) characterize a long training horizon $\\sim \\eta^{-2}$ . The need for tuning $H$ will be discussed in Section 3.3.3.", + "bbox": [ + 169, + 103, + 826, + 160 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Technical Contribution. The proof technique for Theorem 3.2 is novel and significantly different from the Slow SDE analysis of SGD in Li et al. (2021a). Their analysis uses advanced stochastic calculus and invokes Katzenberger's theorem (Katzenberger, 1991) to show that SGD converges to the Slow SDE in distribution, but no quantitative error bounds are provided. Also, due to the local updates and multiple aggregation steps in Local SGD, it is unclear how to extend Katzenberger's theorem to our case. To overcome this difficulty, we develop a new approach to analyze the Slow SDEs, which is based on the method of moments (Li et al., 2019a) and can provide the quantitative error bound $\\tilde{\\mathcal{O}} (\\eta^{0.25})$ in weak approximation. See Appendix J for our proof outline. A by-product of our result is the first quantitative approximation bound for the Slow SDE approximation for SGD, which can be easily obtained by setting $K = 1$ .", + "bbox": [ + 169, + 162, + 826, + 305 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3 INTERPRETATION OF THE SLOW SDEs", + "text_level": 1, + "bbox": [ + 171, + 313, + 482, + 327 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this subsection, we compare the Slow SDEs for SGD and Local SGD and provide an important insight into why Local SGD generalizes better than SGD: Local SGD strengthens the drift term in the Slow SDE, which makes the implicit regularization of stochastic gradient noise more effective.", + "bbox": [ + 169, + 334, + 823, + 378 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3.1 INTERPRETATION OF THE SLOW SDE FOR SGD.", + "text_level": 1, + "bbox": [ + 171, + 385, + 563, + 398 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The Slow SDE for SGD (7) consists of the diffusion and drift-I terms. The former injects noise into the dynamics in the tangent space; the latter one drives the dynamics to move along the negative gradient of $\\frac{1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle$ projected onto the tangent space, but ignoring the dependency of $\\widehat{\\Sigma}_{\\diamond}(\\zeta)$ on $\\zeta$ . This can be connected to the class of semi-gradient methods which only computes a part of the gradient (Mnih et al., 2015; Sutton & Barto, 1998; Brandonbrener & Bruna, 2020). In this view, the long-term behavior of SGD is similar to a stochastic semi-gradient method minimizing the implicit regularizer $\\frac{1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle$ on the minimizer manifold of the original loss $\\mathcal{L}$ .", + "bbox": [ + 169, + 406, + 823, + 516 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Though the semi-gradient method may not perfectly optimize its objective, the above argument reveals that SGD has a deterministic trend toward the region with a smaller magnitude of Hessian, which is commonly believed to correlate with better generalization (Hochreiter & Schmidhuber, 1997; Keskar et al., 2017; Neyshabur et al., 2017; Jiang et al., 2020) (see Appendix A for more discussions). In contrast, the diffusion term can be regarded as a random perturbation to this trend, which can impede optimization when the drift-I term is not strong enough.", + "bbox": [ + 169, + 518, + 823, + 604 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Based on this view, we conjecture that strengthening the drift term of the Slow SDE can help SGD to better regularize the model, yielding a better generalization performance. More specifically, we propose the following hypothesis, which compares the generalization performances of the following generalized Slow SDEs. Note that $\\left(\\frac{1}{B},\\frac{1}{2B}\\right)$ -Slow SDE corresponds to the Slow SDE for SGD (7).", + "bbox": [ + 169, + 609, + 823, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Definition 3.4. For $\\kappa_{1},\\kappa_{2}\\geq 0$ define $(\\kappa_{1},\\kappa_{2})$ -Slow SDE to be the following:", + "bbox": [ + 169, + 669, + 687, + 684 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\xi}} \\left(\\sqrt {\\kappa_ {1}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} - \\kappa_ {2} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 686, + 823, + 710 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Hypothesis 3.1. Starting at a minimizer $\\zeta_0\\in \\Gamma$ , run $(\\kappa_{1},\\kappa_{2})$ -Slow SDE and $(\\kappa_{1},\\kappa_{2}^{\\prime})$ -Slow SDE respectively for the same amount of time $T > 0$ and obtain $\\zeta (T),\\zeta '(T)$ . If $\\kappa_{2} > \\kappa_{2}^{\\prime}$ , then the expected test accuracy at $\\zeta (T)$ is better than that at $\\zeta '(T)$ .", + "bbox": [ + 169, + 715, + 823, + 760 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Due to the No Free Lunch Theorem, we do not claim that our hypothesis is always true, but we do believe that the hypothesis holds when training usual neural networks (e.g., ResNets, VGGNets) on standard benchmarks (e.g., CIFAR-10, ImageNet).", + "bbox": [ + 169, + 762, + 823, + 805 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Example: Training with Label Noise Regularization. To exemplify the generalization benefit of having a larger drift term, we follow a line of theoretical works (Li et al., 2021b; Blanc et al., 2020; Damian et al., 2021) to study the case of training over-parameterized neural nets with label noise regularization. For a $C$ -class classification task, the label noise regularization is as follows: every time we draw a sample from the training set, we make the true label as it is with probability $1 - p$ and replace it with any other label with equal probability $\\frac{p}{C-1}$ . When we use cross-entropy loss, the Slow SDE for SGD turns out to be a simple deterministic gradient flow on $\\Gamma$ (instead of a semigroup method) for minimizing the trace of Hessian: $\\mathrm{d}\\boldsymbol{\\zeta}(t) = -\\frac{1}{4B}\\nabla_{\\Gamma}\\mathrm{tr}(\\nabla^{2}\\mathcal{L}(\\boldsymbol{\\zeta}))\\mathrm{d}t$ , where $\\nabla_{\\Gamma}f$", + "bbox": [ + 169, + 810, + 826, + 926 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "stands for the gradient of the function $f$ projected to the tangent space of $\\Gamma$ . Checking the validity of our hypothesis reduces to the following question: Is minimizing the trace of Hessian beneficial to generalization? Many works prove positive results in concrete settings, including the line of works we just mentioned. We refer the readers to Appendix G for further discussion.", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.3.2 LOCAL SGD STRENGTHENS THE DRIFT TERM IN SLOW SDE.", + "text_level": 1, + "bbox": [ + 171, + 167, + 656, + 181 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Based on Hypothesis 3.1, we argue that Local SGD improves generalization by strengthening the drift term of the Slow SDE. First, it can be seen from (4) that the Slow SDE for Local SGD has an additional drfit-II term. Similar to the drift-I term of the Slow SDE for SGD, this drift-II term drives the dynamics to move along the negative semi-gradient of $\\frac{K - 1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Psi} (\\zeta)\\rangle$ (with the dependency of $\\widehat{\\Psi} (\\zeta)$ on $\\zeta$ ignored). Combining it with the implicit regularizer induced by the drift-I term, we can see that the long-term behavior of Local SGD is similar to a stochastic semi-gradient method minimizing the implicit regularizer $\\frac{1}{2B}\\langle \\nabla^{2}\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle +\\frac{K - 1}{2B}\\langle \\nabla^{2}\\mathcal{L}(\\zeta),\\widehat{\\Psi} (\\zeta)\\rangle$ on $\\Gamma$ .", + "bbox": [ + 169, + 188, + 823, + 297 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Comparing the definitions of $\\widehat{\\Sigma}_{\\diamond}(\\zeta)$ (5) and $\\widehat{\\Psi}(\\zeta)$ (6), we can see that $\\widehat{\\Psi}(\\zeta)$ is basically a rescaling of the entries of $\\widehat{\\Sigma}_{\\diamond}(\\zeta)$ in the eigenbasis of Hessian, where the rescaling factor $\\psi(\\eta H \\cdot (\\lambda_i + \\lambda_j))$ for each entry is between 0 and 1 (see Figure 9 for the plot of $\\psi$ ). When $\\eta H$ is small, the rescaling factors should be close to $\\psi(0) = 0$ , then $\\widehat{\\Psi}(\\zeta) \\approx \\mathbf{0}$ , leading to almost no additional regularization. On the other hand, when $\\eta H$ is large, the rescaling factors should be close to $\\psi(+\\infty) = 1$ , so $\\widehat{\\Psi}(\\zeta) \\approx \\widehat{\\Sigma}_{\\diamond}(\\zeta)$ . We can then merge the two implicit regularizers as $\\frac{K}{2B} \\langle \\nabla^2 \\mathcal{L}(\\zeta), \\widehat{\\Sigma}_{\\diamond}(\\zeta) \\rangle$ , and (4) becomes the $(\\frac{1}{B}, \\frac{K}{2B})$ -Slow SDE, which is restated below:", + "bbox": [ + 169, + 303, + 823, + 412 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} - \\frac {K}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 411, + 823, + 431 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "From the above argument we know how the Slow SDE of Local SGD (4) changes as $\\eta H$ transitions from 0 to $+\\infty$ . Initially, when $\\eta H = 0$ , (4) is the same as the $(\\frac{1}{B}, \\frac{1}{2B})$ -Slow SDE for SGD. Then increasing $\\eta H$ strengthens the drift term of (4). As $\\eta H \\to +\\infty$ , (4) transitions to the $(\\frac{1}{B}, \\frac{K}{2B})$ -Slow SDE, where the drift term becomes $K$ times larger.", + "bbox": [ + 169, + 431, + 823, + 489 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "According to Hypothesis 3.1, the $(\\frac{1}{B},\\frac{K}{2B})$ -Slow SDE generalizes better than the $(\\frac{1}{B},\\frac{1}{2B})$ -Slow SDE, so Local SGD with $\\eta H = +\\infty$ should generalize better than SGD. When $\\eta H$ is chosen realistically as a finite value, the generalization performance of Local SGD interpolates between these two cases, which results in a worse generalization than $\\eta H = +\\infty$ but should still be better than SGD.", + "bbox": [ + 169, + 494, + 823, + 553 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.3.3 THEORETICAL INSIGHTS INTO TUNING THE NUMBER OF LOCAL STEPS", + "text_level": 1, + "bbox": [ + 171, + 559, + 720, + 573 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Based on our Slow SDE approximations, we now discuss how the number of local steps $H$ affects the generalization of Local SGD. When $\\eta$ is small but finite, tuning $H$ offers a trade-off between regularization strength and SDE approximation quality. Larger $\\alpha \\coloneqq \\eta H$ makes the regularization stronger in the SDE (as discussed in Section 3.3.2), but the SDE itself may lose track of Local SGD, which can be seen from the error bound $\\mathcal{O}(\\sqrt{\\alpha\\eta\\log(\\alpha / \\eta\\delta)})$ in Theorem 3.3. Therefore, we expect the test accuracy to first increase and then decrease as we gradually increase $H$ . Indeed, we observe in Figures 2(e) and 2(f) that the plot of test accuracy versus $H$ is unimodal for each $\\eta$ .", + "bbox": [ + 169, + 580, + 823, + 680 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "It is thus necessary to tune $H$ for the best generalization. When $H$ is tuned together with other hyperparameters, such as learning rate $\\eta$ , our Slow SDE approximation recommends setting $H$ to be at least $\\Omega(\\eta^{-1})$ so that $\\alpha := \\eta H$ does not vanish in the Slow SDE. Since larger $\\alpha$ gives a stronger regularization effect, the optimal $H$ should be set to the largest value so that the Slow SDE does not lose track of Local SGD. Indeed, we empirically observed that when $H$ is tuned optimally, $\\alpha$ increases as $\\eta$ decreases, suggesting that the optimal $H$ grows faster than $\\Omega(\\eta^{-1})$ . See Figure 5(f).", + "bbox": [ + 169, + 686, + 823, + 772 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 CONCLUSIONS", + "text_level": 1, + "bbox": [ + 171, + 777, + 330, + 792 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we analyze the long-term generalization behavior of Local SGD in the small learning rate regime by deriving the Slow SDE for Local SGD as a generalization of that for SGD (Li et al., 2021b). We attribute the generalization improvement over SGD to the larger drift term in the SDE for Local SGD. Our empirical validation shows that Local SGD indeed induces generalization benefits with small learning rate and long enough training time. The main limitation of our work is that our analysis does not imply any direct theoretical separation between SGD and Local SGD in test accuracy, which requires a much deeper understanding of the loss landscape and the Slow SDEs and is left for future work. Another direction for future work is to design distributed training methods that provably generalize better than SGD based on the theoretical insights obtained from Slow SDEs.", + "bbox": [ + 169, + 797, + 825, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENT AND DISCLOSURE OF FUNDING", + "text_level": 1, + "bbox": [ + 171, + 102, + 627, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The work of Xinran Gu and Longbo Huang is supported by the Technology and Innovation Major Project of the Ministry of Science and Technology of China under Grant 2020AAA0108400 and 2020AAA0108403, the Tsinghua University Initiative Scientific Research Program, and Tsinghua Precision Medicine Foundation 10001020109. The work of Kaifeng Lyu and Sanjeev Arora is supported by funding from NSF, ONR, Simons Foundation, DARPA and SRC.", + "bbox": [ + 171, + 133, + 826, + 204 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 224, + 287, + 239 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kwangjun Ahn, Jingzhao Zhang, and Suvrit Sra. Understanding the unstable convergence of gradient descent. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 247-257. PMLR, 17-23 Jul 2022.", + "Debraj Basu, Deepesh Data, Can Karakus, and Suhas Diggavi. Qsparse-local-SGD: Distributed SGD with quantization, sparsification and local computations. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019.", + "Yoshua Bengio. Practical Recommendations for Gradient-Based Training of Deep Architectures, pp. 437-478. Springer Berlin Heidelberg, Berlin, Heidelberg, 2012. ISBN 978-3-642-35289-8. doi: 10.1007/978-3-642-35289-8_26.", + "Guy Blanc, Neha Gupta, Gregory Valiant, and Paul Valiant. Implicit regularization for deep neural networks driven by an Ornstein-uhlenbeck like process. In Jacob Abernethy and Shivani Agarwal (eds.), Proceedings of Thirty Third Conference on Learning Theory, volume 125 of Proceedings of Machine Learning Research, pp. 483–513. PMLR, 09–12 Jul 2020.", + "David Brandfonbrener and Joan Bruna. Geometric insights into the convergence of nonlinear TD learning. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020.", + "Jianmin Chen, Xinghao Pan, Rajat Monga, Samy Bengio, and Rafal Jozefowicz. Revisiting distributed synchronous SGD. arXiv preprint arXiv:1604.00981, 2016.", + "Kai Chen and Qiang Huo. Scalable training of deep learning machines by incremental block training with intra-block parallel optimization and blockwise model-update filtering. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5880-5884, 2016. doi: 10.1109/ICASSP.2016.7472805.", + "Alex Damian, Tengyu Ma, and Jason D. Lee. Label noise SGD provably prefers flat global minimizers. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, 2021.", + "Laurent Dinh, Razvan Pascanu, Samy Bengio, and Yoshua Bengio. Sharp minima can generalize for deep nets. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 1019-1028. PMLR, 06-11 Aug 2017.", + "Aijun Du and JinQiao Duan. Invariant manifold reduction for stochastic dynamical systems. Dynamic Systems and Applications, 16:681-696, 2007.", + "KJ Falconer. Differentiation of the limit mapping in a dynamical system. Journal of the London Mathematical Society, 2(2):356-372, 1983.", + "Benjamin Fehrman, Benjamin Gess, and Arnulf Jentzen. Convergence rates for the stochastic gradient descent method for non-convex objective functions. Journal of Machine Learning Research, 21:136, 2020.", + "Damir Filipović. Invariant manifolds for weak solutions to stochastic equations. *Probability theory and related fields*, 118(3):323-341, 2000." + ], + "bbox": [ + 171, + 247, + 825, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In International Conference on Learning Representations, 2021.", + "Margalit R Glasgow, Honglin Yuan, and Tengyu Ma. Sharp bounds for federated averaging (Local SGD) and continuous perspective. In International Conference on Artificial Intelligence and Statistics, pp. 9050-9090. PMLR, 2022.", + "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017.", + "Farzin Haddadpour, Mohammad Mahdi Kamani, Mehrdad Mahdavi, and Viveck Cadambe. Local SGD with periodic averaging: Tighter analysis and adaptive synchronization. Advances in Neural Information Processing Systems, 32, 2019.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. In Proceedings of the IEEE international conference on computer vision, pp. 1026-1034, 2015.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.", + "Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016.", + "Sepp Hochreiter and Jürgen Schmidhuber. Flat minima. Neural computation, 9(1):1-42, 1997.", + "Elad Hoffer, Itay Hubara, and Daniel Soudry. Train longer, generalize better: closing the generalization gap in large batch training of neural networks. Advances in neural information processing systems, 30, 2017.", + "Wenqing Hu, Chris Junchi Li, Lei Li, and Jian-Guo Liu. On the diffusion approximation of nonconvex stochastic gradient descent. arXiv preprint arXiv:1705.07562, 2017.", + "Hikaru Ibayashi and Masaaki Imaizumi. Exponential escape efficiency of SGD from sharp minima in non-stationary regime. arXiv preprint arXiv:2111.04004, 2021.", + "Stanisław Jastrzebski, Zachary Kenton, Devansh Arpit, Nicolas Ballas, Asja Fischer, Yoshua Bengio, and Amos Storkey. Three factors influencing minima in SGD. arXiv preprint arXiv:1711.04623, 2017.", + "Xianyan Jia, Shutao Song, Wei He, Yangzihao Wang, Haidong Rong, Feihu Zhou, Liqiang Xie, Zhenyu Guo, Yuzhou Yang, Liwei Yu, et al. Highly scalable deep learning training system with mixed-precision: Training imagenet in four minutes. Advances in Neural Information Processing Systems, 2018.", + "Yiding Jiang, Behnam Neyshabur, Hossein Mobahi, Dilip Krishnan, and Samy Bengio. *Fantastic generalization measures and where to find them.* In International Conference on Learning Representations, 2020.", + "Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. Advances and open problems in federated learning. Foundations and Trends® in Machine Learning, 14(1-2):1-210, 2021.", + "Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank Reddi, Sebastian Stich, and Ananda Theertha Suresh. Scaffold: Stochastic controlled averaging for federated learning. In International Conference on Machine Learning, pp. 5132-5143. PMLR, 2020.", + "G. S. Katzenberger. Solutions of a stochastic differential equation forced onto a manifold by a large drift. The Annals of Probability, 19(4):1587 - 1628, 1991." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. In International Conference on Learning Representations, 2017.", + "Ahmed Khaled, Konstantin Mishchenko, and Peter Richtárik. Tighter theory for local SGD on identical and heterogeneous data. In International Conference on Artificial Intelligence and Statistics, pp. 4519-4529. PMLR, 2020.", + "Bobby Kleinberg, Yanzhi Li, and Yang Yuan. An alternative view: When does SGD escape local minima? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 2698-2707. PMLR, 10-15 Jul 2018.", + "Alex Krizhevsky. One weird trick for parallelizing convolutional neural networks. arXiv preprint arXiv:1404.5997, 2014.", + "Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009.", + "Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/, 2022.", + "Yann A. LeCun, Léon Bottou, Genevieve B. Orr, and Klaus-Robert Müller. Efficient BackProp, pp. 9-48. Springer Berlin Heidelberg, Berlin, Heidelberg, 2012. ISBN 978-3-642-35289-8. doi: 10.1007/978-3-642-35289-8_3.", + "Qianxiao Li, Cheng Tai, and Weinan E. Stochastic modified equations and dynamics of stochastic gradient algorithms i: Mathematical foundations. Journal of Machine Learning Research, 20(40): 1-47, 2019a.", + "Xiang Li, Kaixuan Huang, Wenhao Yang, Shusen Wang, and Zhihua Zhang. On the convergence of fedavg on non-iid data. In International Conference on Learning Representations, 2019b.", + "Zhiyuan Li, Kaifeng Lyu, and Sanjeev Arora. Reconciling modern deep learning with traditional optimization analyses: The intrinsic learning rate. Advances in Neural Information Processing Systems, 33:14544-14555, 2020.", + "Zhiyuan Li, Sadhika Malladi, and Sanjeev Arora. On the validity of modeling SGD with stochastic differential equations (sdes). Advances in Neural Information Processing Systems, 34:12712-12725, 2021a.", + "Zhiyuan Li, Tianhao Wang, and Sanjeev Arora. What happens after SGD reaches zero loss? a mathematical framework. In International Conference on Learning Representations, 2021b.", + "Zhiyuan Li, Tianhao Wang, and Dingli Yu. Fast mixing of stochastic gradient descent with normalization and weight decay. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022.", + "Tao Lin, Lingjing Kong, Sebastian Stich, and Martin Jaggi. Extrapolation for large-batch training in deep learning. In Hal Daumé III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 6094-6104. PMLR, 13-18 Jul 2020a.", + "Tao Lin, Sebastian U. Stich, Kumar Kshitij Patel, and Martin Jaggi. Don't use large mini-batches, use Local SGD. In International Conference on Learning Representations, 2020b.", + "Kaifeng Lyu, Zhiyuan Li, and Sanjeev Arora. Understanding the generalization benefit of normalization layers: Sharpness reduction, 2022.", + "Chao Ma and Lexing Ying. On linear stability of SGD and input-smoothness of neural networks. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, pp. 16805-16817. Curran Associates, Inc., 2021." + ], + "bbox": [ + 171, + 102, + 825, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Sadhika Malladi, Kaifeng Lyu, Abhishek Panigrahi, and Sanjeev Arora. On the SDEs and scaling rules for adaptive gradient algorithms. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022.", + "Gideon Mann, Ryan T. McDonald, Mehryar Mohri, Nathan Silberman, and Dan Walker. Efficient large-scale distributed training of conditional maximum entropy models. In Advances in Neural Information Processing Systems 22, pp. 1231-1239, 2009.", + "Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics, pp. 1273-1282. PMLR, 2017.", + "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al. Human-level control through deep reinforcement learning. nature, 518(7540):529-533, 2015.", + "Behnam Neyshabur, Srinadh Bhojanapalli, David Mcallester, and Nati Srebro. Exploring generalization in deep learning. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017.", + "Jose Javier Gonzalez Ortiz, Jonathan Frankle, Mike Rabbat, Ari Morcos, and Nicolas Ballas. Trade-offs of Local SGD at scale: An empirical study. arXiv preprint arXiv:2110.08133, 2021.", + "Daniel Povey, Xiaohui Zhang, and Sanjeev Khudanpur. Parallel training of dnns with natural gradient and parameter averaging. arXiv preprint arXiv:1410.7455, 2014.", + "Prajit Ramachandran, Barret Zoph, and Quoc V Le. Searching for activation functions. arXiv preprint arXiv:1710.05941, 2017.", + "Benjamin Recht, Christopher Ré, Stephen J. Wright, and Feng Niu. Hogwild: A lock-free approach to parallelizing stochastic gradient descent. In Advances in Neural Information Processing Systems 24, pp. 693-701, 2011.", + "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 115(3):211-252, 2015. doi: 10.1007/s11263-015-0816-y.", + "Frank Seide, Hao Fu, Jasha Droppo, Gang Li, and Dong Yu. 1-bit stochastic gradient descent and its application to data-parallel distributed training of speech dnns. In Haizhou Li, Helen M. Meng, Bin Ma, Engsiong Chng, and Lei Xie (eds.), INTERSPEECH 2014, 15th Annual Conference of the International Speech Communication Association, Singapore, September 14-18, 2014, pp. 1058-1062. ISCA, 2014. URL http://www.isca-speech.org/archive/interspeech_2014/i14_1058.html.", + "Christopher J. Shallue, Jaehoon Lee, Joseph Antognini, Jascha Sohl-Dickstein, Roy Frostig, and George E. Dahl. Measuring the effects of data parallelism on neural network training. Journal of Machine Learning Research, 20(112):1-49, 2019.", + "K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, May 2015.", + "Samuel Smith, Erich Elsen, and Soham De. On the generalization benefit of noise in stochastic gradient descent. In Hal Daumé III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 9058-9067. PMLR, 13-18 Jul 2020.", + "Samuel L Smith, Benoit Dherin, David Barrett, and Soham De. On the origin of implicit regularization in stochastic gradient descent. In International Conference on Learning Representations, 2021.", + "Sebastian U Stich. Local SGD converges fast and communicates little. In International Conference on Learning Representations, 2018." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Nikko Strom. Scalable distributed DNN training using commodity GPU cloud computing. In IN-TERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany, September 6-10, 2015, pp. 1488-1492. ISCA, 2015.", + "Hang Su and Haoyu Chen. Experiments on parallel training of deep neural network using model averaging. arXiv preprint arXiv:1507.01239, 2015.", + "Richard S. Sutton and Andrew G. Barto. Reinforcement learning - an introduction. Adaptive computation and machine learning. MIT Press, 1998. ISBN 978-0-262-19398-6.", + "Jianyu Wang and Gauri Joshi. Adaptive communication strategies to achieve the best error-routine trade-off in local-update SGD. Proceedings of Machine Learning and Systems, 1:212-229, 2019.", + "Jianyu Wang and Gauri Joshi. Cooperative SGD: A unified framework for the design and analysis of local-update SGD algorithms. Journal of Machine Learning Research, 22(213):1-50, 2021.", + "Jianyu Wang, Rudrajit Das, Gauri Joshi, Satyen Kale, Zheng Xu, and Tong Zhang. On the unreasonable effectiveness of federated averaging with heterogeneous data. arXiv preprint arXiv:2206.04723, 2022.", + "Blake Woodworth, Kumar Kshitij Patel, Sebastian Stich, Zhen Dai, Brian Bullins, Brendan Mcmahan, Ohad Shamir, and Nathan Srebro. Is local sgd better than minibatch sgd? In International Conference on Machine Learning, pp. 10334-10343. PMLR, 2020a.", + "Blake E Woodworth, Kumar Kshitij Patel, and Nati Srebro. Minibatch vs Local SGD for heterogeneous distributed learning. Advances in Neural Information Processing Systems, 33:6281-6292, 2020b.", + "Lei Wu, Chao Ma, and Weinan E. How sgd selects the global minima in over-parameterized learning: A dynamical stability perspective. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesà-Bianchi, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018.", + "Zeke Xie, Issei Sato, and Masashi Sugiyama. A diffusion theory for deep learning dynamics: Stochastic gradient descent exponentially favors flat minima. In International Conference on Learning Representations, 2021.", + "Yang You, Zhao Zhang, Cho-Jui Hsieh, James Demmel, and Kurt Keutzer. Imagenet training in minutes. In Proceedings of the 47th International Conference on Parallel Processing, pp. 1-10, 2018.", + "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In International Conference on Learning Representations, 2020.", + "Hao Yu, Sen Yang, and Shenghuo Zhu. Parallel restarted SGD with faster convergence and less communication: Demystifying why model averaging works for deep learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 5693-5700, 2019.", + "Jingzhao Zhang, Sai Praneeth Karimireddy, Andreas Veit, Seungyeon Kim, Sashank Reddi, Sanjiv Kumar, and Suvrit Sra. Why are adaptive methods good for attention models? Advances in Neural Information Processing Systems, 33:15383-15393, 2020.", + "Xiaohui Zhang, Jan Trmal, Daniel Povey, and Sanjeev Khudanpur. Improving deep neural network acoustic models using generalized maxout networks. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 215-219, 2014. doi: 10.1109/ICASSP.2014.6853589.", + "Fan Zhou and Guojing Cong. On the convergence properties of a k-step averaging stochastic gradient descent algorithm for nonconvex optimization. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18, pp. 3219-3227. International Joint Conferences on Artificial Intelligence Organization, 7 2018. doi: 10.24963/ijcai.2018/447. URL https://doi.org/10.24963/ijcai.2018/447." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zhanxing Zhu, Jingfeng Wu, Bing Yu, Lei Wu, and Jinwen Ma. The anisotropic noise in stochastic gradient descent: Its behavior of escaping from sharp minima and regularization effects. arXiv preprint arXiv:1803.00195, 2018.", + "Martin Zinkevich, Markus Weimer, Lihong Li, and Alex Smola. Parallelized stochastic gradient descent. In J. Lafferty, C. Williams, J. Shawe-Taylor, R. Zemel, and A. Culotta (eds.), Advances in Neural Information Processing Systems, volume 23. Curran Associates, Inc., 2010.", + "Bernt Øksendal. Stochastic differential equations: an introduction with applications. Springer Science & Business Media, 2013." + ], + "bbox": [ + 171, + 102, + 825, + 234 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "CONTENTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 269, + 117 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Introduction 1", + "2 When does Local SGD Generalize Better? 3" + ], + "bbox": [ + 173, + 138, + 825, + 188 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2.1 The Debate on Local SGD 3", + "2.2 Key Factors: Small Learning Rate and Sufficient Training Time 4" + ], + "bbox": [ + 196, + 194, + 825, + 232 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3 Theoretical Analysis of Local SGD: The Slow SDE 5", + "bbox": [ + 173, + 251, + 825, + 266 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3.1 Difficulty of Adapting the SDE Framework to Local SGD 6", + "3.2SDE Approximation near the Minimizer Manifold 6", + "3.3 Interpretation of the Slow SDEs 8" + ], + "bbox": [ + 196, + 272, + 825, + 332 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3.3.1 Interpretation of the Slow SDE for SGD. 8", + "3.3.2 Local SGD Strengthens the Drift Term in Slow SDE. 9", + "3.3.3 Theoretical Insights into Tuning the Number of Local Steps 9" + ], + "bbox": [ + 233, + 337, + 825, + 396 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4 Conclusions 9", + "bbox": [ + 173, + 415, + 825, + 430 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A Additional Related Works 18", + "B Additional Discussions 19", + "C Implementation Details of Parallel SGD, Local SGD and Post-local SGD 20", + "D Modeling Local SGD with Multiple Conventional SDEs 23" + ], + "bbox": [ + 173, + 450, + 825, + 571 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "E Additional Interpretation of the Slow SDEs 23", + "bbox": [ + 173, + 590, + 825, + 606 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "E.1 Understanding the Diffusion Term in the Slow SDE 23", + "E.2 The Effect of Global Batch Size on Generalization 24" + ], + "bbox": [ + 196, + 612, + 825, + 648 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "F Additional Experimental Results 25", + "G Discussions on Local SGD with Label Noise Regularization 27" + ], + "bbox": [ + 173, + 667, + 825, + 719 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "G.1 The Slow SDE for Local SGD with Label Noise Regularization 27", + "G.2 The Equivalence of Enlarging the Learning Rate and Adding Local Steps 28" + ], + "bbox": [ + 196, + 724, + 825, + 763 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "H Deriving the Slow SDE after Applying the LSR 28", + "I Proof of Theorem 3.1 30", + "J Proof Outline of Main Theorems 33", + "K Proof Details of Main Theorems 33" + ], + "bbox": [ + 173, + 782, + 825, + 902 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "K.1 Additional Notations 34", + "bbox": [ + 197, + 909, + 825, + 924 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "K.2 Computing the Derivatives of the Limiting Mapping 34", + "K.3 Preliminary Lemmas for GD and GF 35", + "K.4 Construction of working zones 38", + "K.5 Phase 1: Iterate Approaching the Manifold 39" + ], + "bbox": [ + 197, + 102, + 825, + 183 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "K.5.1 Additional notations 39", + "K.5.2 Proof for Subphase 1 39", + "K.5.3 Proof for Subphase 2 43" + ], + "bbox": [ + 233, + 188, + 825, + 247 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "K.6 Phase 2: Iterates Staying Close to Manifold 46", + "bbox": [ + 197, + 252, + 825, + 268 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "K.6.1 Additional notations 46", + "K.6.2 Proof for the High Probability Bounds 46" + ], + "bbox": [ + 233, + 273, + 825, + 311 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "K.7 Summary of the dynamics and Proof of Theorems J.1 and J.2 51", + "K.8 Proof of Theorem 3.3 52", + "K.9 Computing the Moments for One \"Giant Step\" 53" + ], + "bbox": [ + 197, + 316, + 825, + 376 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "K.10 Proof of Weak Approximation 66", + "bbox": [ + 197, + 381, + 825, + 397 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "K.10.1 Preliminaries and additional notations 67", + "K.10.2 Proof of the approximation in our context 68" + ], + "bbox": [ + 233, + 402, + 825, + 439 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "L Deriving the Slow SDE for Label Noise Regularization 72", + "text_level": 1, + "bbox": [ + 171, + 457, + 825, + 473 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "M Experimental Details 74", + "text_level": 1, + "bbox": [ + 171, + 491, + 825, + 507 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "M.1 Post-local SGD Experiments in Section 1 74", + "M.2 Experimental Details for Figures 2 and 5 74", + "M.3 Details for Experiments in Figure 6. 75", + "M.4 Details for Experiments on the Effect of the Diffusion Term 75", + "M.5 Details for Experiments on the Effect of Global Batch Size 76", + "M.6 Details for Experiments on Label Noise Regularization 76" + ], + "bbox": [ + 197, + 513, + 825, + 635 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A ADDITIONAL RELATED WORKS", + "text_level": 1, + "bbox": [ + 174, + 103, + 470, + 116 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "**Optimization aspect of Local SGD.** Local SGD is a communication-efficient variant of parallel SGD, where multiple workers perform SGD independently and average the model parameters periodically. Dating back to Mann et al. (2009) and Zinkevich et al. (2010), this strategy has been widely adopted to reduce the communication cost and speed up training in both scenarios of data center distributed training (Chen & Huo, 2016; Zhang et al., 2014; Povey et al., 2014; Su & Chen, 2015) and Federated Learning (McMahan et al., 2017; Kairouz et al., 2021). To further accelerate training, Wang & Joshi (2019) and Haddadpour et al. (2019) proposed adaptive schemes for the averaging frequency, and Basu et al. (2019) combined Local SGD with gradient compression. Motivated to theoretically understand the empirical success of Local SGD, a lot of researchers analyzed the convergence rate of Local SGD under various settings, e.g., homogeneous/heterogeneous data and convex/non-convex objective functions. Among them, Yu et al. (2019); Stich (2018); Khaled et al. (2020); Woodworth et al. (2020a) focus on the homogeneous setting where data for each worker are independent and identically distributed (IID). Li et al. (2019b); Karimireddy et al. (2020); Glasgow et al. (2022); Woodworth et al. (2020b); Wang et al. (2022) study the heterogeneous setting, where workers have non-IID data and local updates may induce \"client drift\" (Karimireddy et al., 2020) and hurt optimization. The error bound of Local SGD obtained by these works is typically inferior to that of SGD with the same global batch size for fixed number of iterations/epochs and becomes worse as the number of local steps increases, revealing a trade-off between less communication and better optimization. In this paper, we are interested in the generalization aspect of Local SGD in the homogeneous setting, assuming the training loss can be optimized to a small value.", + "bbox": [ + 174, + 200, + 823, + 479 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Gradient noise and generalization. The effect of stochastic gradient noise on generalization has been studied from different aspects, e.g., changing the order of learning different patterns Li et al. (2019a), inducing an implicit regularizer in the second-order SDE approximation Smith et al. (2021); Li et al. (2019a). Our work follows a line of works studying the effect of noise in the lens of sharpness, which is long believed to be related to generalization Hochreiter & Schmidhuber (1997); Neyshabur et al. (2017). Keskar et al. (2017) empirically observed that large-batch training leads to worse generalization and sharper minima than small-batch training. Wu et al. (2018); Hu et al. (2017); Ma & Ying (2021) showed that gradient noise destabilizes the training around sharp minima, and Kleinberg et al. (2018); Zhu et al. (2018); Xie et al. (2021); Ibayashi & Imaizumi (2021) quantitatively characterized how SGD escapes sharp minima. The most related papers are Blanc et al. (2020); Damian et al. (2021); Li et al. (2021b), which focus on the training dynamics near a manifold of minima and study the effect of noise on sharpness (see also Section 3.2). Though the mathematical definition of sharpness may be vulnerable to the various symmetries in deep neural nets (Dinh et al., 2017), sharpness still appears to be one of the most promising tools for predicting generalization (Jiang et al., 2020; Foret et al., 2021).", + "bbox": [ + 174, + 486, + 823, + 694 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Improving generalization in large-batch training. The generalization issue of the large-batch (or full-batch) training has been observed as early as (Bengio, 2012; LeCun et al., 2012). As mentioned in Section 1, the generalization issue of large-batch training could be due to the lack of a sufficient amount of stochastic noise. To make up the noise in large-batch training, Krizhevsky (2014); Goyal et al. (2017) empirically discovered the Linear Scaling Rule for SGD, which suggests enlarging the learning rate proportionally to the batch size. Jastrzebski et al. (2017) adopted an SDE-based analysis to justify that this scaling rule indeed retains the same amount of noise as small-batch training (see also Section 3.1). However, the SDE approximation may fail if the learning rate is too large (Li et al., 2021a), especially in the early phase of training before the first learning rate decay (Smith et al., 2020). Shallue et al. (2019) demonstrated that generalization gap between small- and large-batch training can also depend on many other training hyperparameters. Besides enlarging the learning rate, other approaches have also been proposed to reduce the gap, including training longer (Hoffer et al., 2017), learning rate warmup (Goyal et al., 2017), LARS (You et al., 2018), LAMB (You et al., 2020). In this paper, we focus on using Local SGD to improve generalization, but adding local steps is a generic training trick that can also be combined with others, e.g., Local LARS (Lin et al., 2020b), Local Extrap-SGD (Lin et al., 2020a).", + "bbox": [ + 174, + 702, + 823, + 922 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 491, + 948, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B ADDITIONAL DISCUSSIONS", + "text_level": 1, + "bbox": [ + 171, + 102, + 439, + 118 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Connection to the conventional wisdom that the diffusion term matters more. As mentioned in Section 3.1, it is believed in the literature is that a large diffusion term in the conventional SDE leads to good generalization. One may think that the diffusion term in the Slow SDE corresponds to that in the conventional SDE, and thus enlarging the diffusion term rather than the drift term should lead to better generalization. However, we note that both the diffusion and drift terms in the Slow SDEs result from the long-term effects of the diffusion term in the conventional SDE (Slow SDEs become stationary if $\\Sigma = 0$ ). This means our view characterizes the role of gradient noise in more detail, and therefore, goes one step further on the conventional wisdom.", + "bbox": [ + 169, + 133, + 826, + 247 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Slow SDEs for neural nets with modern training techniques. In modern neural net training, it is common to add normalization layers and weight decay ( $L^2$ -regularization) for better optimization and generalization. However, these techniques lead to violations of our assumptions, e.g., no fixed point exists in the regularized loss (Li et al., 2020; Ahn et al., 2022). Still, a minimizer manifold can be expected to exist for the unregularized loss. Li et al. (2022) noted that the drift and diffusion around the manifold proceeds faster in this case, and derived a Slow SDE for SGD that captures $\\mathcal{O}\\left(\\frac{1}{\\eta} \\log \\frac{1}{\\eta}\\right)$ discrete steps instead of $\\mathcal{O}\\left(\\frac{1}{\\eta^2}\\right)$ . We believe that our analysis can also be extended to this case, and that adding local steps still results in the effect of strengthening the drift term.", + "bbox": [ + 169, + 251, + 826, + 367 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C IMPLEMENTATION DETAILS OF PARALLEL SGD, LOCAL SGD AND POST-LOCAL SGD", + "text_level": 1, + "bbox": [ + 171, + 102, + 767, + 137 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In this section, we present the formal procedures for Parallel SGD, Local SGD and Post-local SGD. Given a training dataset and a data augmentation function, Algorithms 1 and 2 show the implementations of distributed samplers for sampling local batches with and without replacement. Then Algorithms 3 to 5 show the implementations of parallel SGD, Local SGD and Post-local SGD that can run with either of the samplers.", + "bbox": [ + 169, + 151, + 823, + 220 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Sampling with replacement. Our theory analyzes parallel SGD, Local SGD and Post-local SGD when local batches are sampled with replacement (Algorithm 1). That is, local batches consist of IID samples from the same training distribution $\\hat{D}$ , where $\\hat{D}$ serves as an abstraction of the distribution of an augmented sample drawn from the training dataset. The mathematical formulations are given in Section 1.", + "bbox": [ + 169, + 227, + 826, + 299 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Sampling without replacement. Slightly different from our theory, we use the sampling without replacement (Algorithm 2) in our experiments unless otherwise stated. This sampling scheme is standard in practice: it is used by Goyal et al. (2017) for parallel SGD and by Lin et al. (2020b); Ortiz et al. (2021) for Post-local/Local SGD. This sampling scheme works as follows. At the beginning of every epoch, the whole training dataset is shuffled and evenly partitioned into $K$ shards. Each worker takes one shard and samples batches without replacement. When all workers pass their own shard, the next epoch begins and the whole dataset is reshuffled. An alternative view is that the workers always share the same dataset. For each epoch, they perform local steps by sampling batches of data without replacement until the dataset contains too few data to form a batch. Then another epoch starts with the dataset reloaded to the initial state.", + "bbox": [ + 169, + 306, + 826, + 446 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Discrepancy in Sampling Schemes. We argue that this discrepancy between theory and experiments on sample schemes is minor. Though sampling without replacement is standard in practice, most previous works, e.g., Wang & Joshi (2019); Li et al. (2021a); Zhang et al. (2020), analyze sampling with replacement for technical simplicity and yields meaningful results.", + "bbox": [ + 169, + 452, + 823, + 508 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Moreover, even if we change the sampling scheme to with replacement, Local SGD can still improve the generalization of SGD (by merely adding local steps). See Appendix F for the experiments. We believe that the reasons for better generalization of Local SGD with either sampling scheme are similar and leave the analysis for sampling without replacement for future work.", + "bbox": [ + 169, + 515, + 826, + 571 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Distributed Sampler on $K$ Workers (Sampling with Replacement)" + ], + "code_body": "Require: shared training dataset $\\mathcal{D}$ data augmentation function $\\mathcal{A}(\\hat{\\xi})$ \nHyperparameters: local batch size $B_{\\mathrm{loc}}$ \nFunction Sample () on worker k: Draw $B_{\\mathrm{loc}}$ IID samples $\\hat{\\xi}_1,\\dots ,\\hat{\\xi}_{B_{\\mathrm{loc}}}$ from $\\mathcal{D}$ with replacement; $\\xi_b\\gets \\mathcal{A}(\\hat{\\xi}_b)$ for all $1\\leq b\\leq B_{\\mathrm{loc}}$ // apply data augmentation", + "bbox": [ + 158, + 126, + 799, + 246 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2: Distributed Sampler on $K$ Workers (Sampling without Replacement)" + ], + "code_body": "Require: shared training dataset $\\mathcal{D}$ data augmentation function $\\mathcal{A}(\\hat{\\xi})$ \nHyperparameters: local batch size $B_{\\mathrm{loc}}$ \nConstant: $N_{\\mathrm{loc}}\\coloneqq \\left\\lfloor \\frac{|D|}{KB_{\\mathrm{loc}}}\\right\\rfloor$ // number of local batches per worker per epoch \nLocal Variables: $c^{(k)}\\gets N_{\\mathrm{loc}}B_{\\mathrm{loc}}$ for worker k // number of samples drawn in this epoch \nFunction Sample () on worker k: \nif $c^{(k)} = N_{\\mathrm{loc}}B_{\\mathrm{loc}}$ then // Now start a new epoch Wait until all the other workers reach this line; // synchronize Draw a random permutation $P$ of 1,..., $|D|$ jointly with other workers so that the same permutation is shared among all workers; // reshuffle the dataset $Q_{j}^{(k)}\\gets P_{(k - 1)N_{\\mathrm{loc}}B_{\\mathrm{loc}} + j}$ for all $1\\leq j\\leq N_{\\mathrm{loc}}$ // partition the dataset $c^{(k)}\\gets 0$ end \nfor $i = 1,\\dots ,B_{\\mathrm{loc}}$ do $\\hat{\\xi}_i\\gets \\hat{\\xi}_i$ the $Q_{c^{(k)} + i}^{(k)}$ th data point of $\\mathcal{D}$ // sample without replacement $\\xi_i\\gets \\mathcal{A}(\\hat{\\xi}_i)$ // apply data augmentation \nend \n $c^{(k)}\\gets c^{(k)} + B_{\\mathrm{loc}}$ . \nreturn $(\\xi_1,\\ldots ,\\xi_{B_{\\mathrm{loc}}})$ ..", + "bbox": [ + 158, + 440, + 799, + 767 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 3: Parallel SGD on $K$ Workers" + ], + "code_body": "Input: loss function $\\ell (\\pmb {\\theta};\\xi)$ , initial parameter $\\pmb{\\theta}_{0}$ \nHyperparameters: total number of iterations $T$ , learning rate $\\eta$ , local batch size $B_{\\mathrm{loc}}$ \nfor $t = 0,\\dots ,T - 1$ do \nfor each worker k do in parallel \n $(\\xi_{k,t,1},\\ldots ,\\xi_{k,t,B_{\\mathrm{loc}}})\\gets \\mathrm{Sample}()$ // sample a local batch \n $g_{k,t}\\gets \\frac{1}{B_{\\mathrm{loc}}}\\sum_{i = 1}^{B_{\\mathrm{loc}}}\\nabla \\ell (\\pmb {\\theta}_t;\\xi_{k,t,i})$ // computing the local gradient \nend \n $g_{t}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}g_{k,t}$ // all-Reduce aggregation of local gradients \n $\\pmb{\\theta}_{t + 1}\\gets \\pmb{\\theta}_{t} - \\eta_{t}\\pmb{g}_{t}$ // update the model \nend", + "bbox": [ + 158, + 125, + 803, + 290 + ], + "page_idx": 21 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 4: Local SGD on $K$ Workers" + ], + "code_body": "Input: loss function $\\ell (\\pmb {\\theta};\\xi)$ , initial parameter $\\bar{\\theta}^{(0)}$ \nHyperparameters: total number of rounds $R$ , number of local steps $H$ per round \nHyperparameters: learning rate $\\eta$ , local batch size $B_{\\mathrm{loc}}$ \nfor $s = 0,\\dots ,R - 1$ do \nfor each worker k do in parallel $\\theta_{k,0}^{(s)}\\gets \\bar{\\theta}^{(0)};$ // maintain a local copy of the global iterate \nfor $t = 0,\\ldots ,H - 1$ do $(\\xi_{k,t,1}^{(s)},\\dots ,\\xi_{k,t,B_{\\mathrm{loc}}}^{(s)})\\leftarrow \\mathrm{Sample}()$ // sample a local batch \n $g_{k,t}^{(s)}\\leftarrow \\frac{1}{B_{\\mathrm{loc}}}\\sum_{i = 1}^{B_{\\mathrm{loc}}}\\nabla \\ell (\\pmb{\\theta}_{k,t}^{(s)};\\xi_{k,t,i}^{(s)})$ // computing the local gradient \n $\\theta_{k,t + 1}^{(s)}\\gets \\theta_{k,t}^{(s)} - \\eta g_{k,t}^{(s)}$ // update the local model \nend \nend \n $\\bar{\\theta}^{(s + 1)}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}\\theta_{k,H}^{(s)}$ // all-Reduce aggregation of local iterates \nend", + "bbox": [ + 158, + 356, + 802, + 598 + ], + "page_idx": 21 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 5: Post-local SGD on $K$ Workers" + ], + "code_body": "1 Input: loss function $\\ell (\\pmb {\\theta};\\xi)$ , initial parameter $\\pmb{\\theta}_{0}$ \n2 Hyperparameters: total number of iterations $T$ , learning rate $\\eta$ , local batch size $B_{\\mathrm{loc}}$ \n3 Hyperparameters: switching time point $t_0$ , number of local steps $H$ per round \n4 Ensure: $T - t_0$ is a multiple of $H$ \n5 Starting from $\\pmb{\\theta}_{0}$ , run Parallel SGD for $t_0$ iterations and obtain $\\pmb{\\theta}_{t_0}$ . \n6 Starting from $\\pmb{\\theta}_{t_0}$ , run Local SGD for $\\frac{1}{H} (T - t_0)$ rounds with $H$ local steps per round; \n7 return the final global iterate of Local SGD ;", + "bbox": [ + 158, + 647, + 743, + 762 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D MODELING LOCAL SGD WITH MULTIPLE CONVENTIONAL SDES", + "text_level": 1, + "bbox": [ + 169, + 102, + 756, + 118 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Lin et al. (2020b) tried to informally explain the success of Local SGD by adopting the argument that larger diffusion term in the conventional SDE leads to better generalization (see Section 3.1 and appendix A). Basically, they attempted to write multiple SDEs, each of which describes the $H$ -step local training process of each worker in each round (from $\\theta_{k,0}^{(s)}$ to $\\theta_{k,H}^{(s)}$ ). The key difference between each of these SDEs and the SDE for SGD (3) is that the former one has a larger diffusion term because the workers use batch size $B_{\\mathrm{loc}}$ instead of $B$ :", + "bbox": [ + 169, + 132, + 826, + 223 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {X} (t) = - \\nabla \\mathcal {L} (\\boldsymbol {X}) \\mathrm {d} t + \\sqrt {\\frac {\\eta}{B _ {\\mathrm {l o c}}}} \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {X}) \\mathrm {d} \\boldsymbol {W} _ {t}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 228, + 823, + 261 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Lin et al. (2020b) then argue that the total amount of \"noise\" in the training dynamics of Local SGD is larger than that of SGD. However, it is hard to see whether it is indeed larger, since the model averaging step at the end of each round can reduce the variance in training and may cancel the effect of having larger diffusion terms.", + "bbox": [ + 169, + 266, + 823, + 324 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "More formally, a complete modeling of Local SGD following this idea should view the sequence of global iterates $\\{\\bar{\\theta}^{(s)}\\}$ as a Markov process $\\{X^{(s)}\\}$ . Let $\\mathcal{P}_X(x,B,t)$ the distribution of $X(t)$ in (3) with initial condition $X(0) = x$ . Then the Markov transition should be $X^{(s + 1)} = \\frac{1}{K}\\sum_{k = 1}^{K}X_{k,H}^{(s)}$ where $X_{1,H}^{(s)},\\ldots ,X_{K,H}^{(s)}$ are $K$ independent samples from $\\mathcal{P}_X(X^{(s)},B_{\\mathrm{loc}},H\\eta)$ , i.e., sampling from (10).", + "bbox": [ + 169, + 329, + 825, + 412 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Consider one round of model averaging. It is true that $\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B_{\\mathrm{loc}}, H\\eta)$ may have a larger variance than the corresponding SGD baseline $\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)$ because the former one has a smaller batch size. However, it is unclear whether $\\mathbf{X}^{(s + 1)}$ also has a larger variance than $\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)$ . This is because $\\mathbf{X}^{(s + 1)}$ is the average of $K$ samples, which means we have to compare $\\frac{1}{K}$ times the variance of $\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B_{\\mathrm{loc}}, H\\eta)$ with the variance of $\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)$ . Then it is unclear which one is larger.", + "bbox": [ + 169, + 417, + 823, + 512 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In the special case where $H\\eta$ is small, $\\mathcal{P}_X(X^{(s)},B_{\\mathrm{loc}},H\\eta)$ is approximately equal to the following Gaussian distribution:", + "bbox": [ + 169, + 518, + 823, + 547 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {N} \\left(\\boldsymbol {X} ^ {(s)} - \\eta H \\nabla \\mathcal {L} \\left(\\boldsymbol {X} ^ {(s)}\\right), \\frac {\\eta^ {2} H}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} \\left(\\boldsymbol {X} ^ {(s)}\\right)\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 551, + 823, + 585 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Then averaging over $K$ samples gives", + "bbox": [ + 171, + 590, + 426, + 606 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {N} \\left(\\boldsymbol {X} ^ {(s)} - \\eta H \\nabla \\mathcal {L} \\left(\\boldsymbol {X} ^ {(s)}\\right), \\frac {\\eta^ {2} H}{B} \\boldsymbol {\\Sigma} \\left(\\boldsymbol {X} ^ {(s)}\\right)\\right), \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 343, + 612, + 823, + 647 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "which is exactly the same as the Gaussian approximation of the SGD baseline. This means there do exist certain cases where Lin et al. (2020b)'s argument does not give a good separation between Local SGD and SGD.", + "bbox": [ + 169, + 651, + 823, + 694 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Moreover, we do not gain any further insights from this modeling since it is hard to see how model averaging interacts with the SDEs.", + "bbox": [ + 169, + 700, + 823, + 729 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "E ADDITIONAL INTERPRETATION OF THE SLOW SDES", + "text_level": 1, + "bbox": [ + 169, + 750, + 640, + 766 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "E.1 UNDERSTANDING THE DIFFUSION TERM IN THE SLOW SDE", + "text_level": 1, + "bbox": [ + 171, + 780, + 633, + 794 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "So far, we have discussed why adding local steps enlarges the drift term in the Slow SDE and why enlarging the drift term can benefit generalization. Besides this, here we remark that another way to accelerate the corresponding semi-gradient method for minimizing the implicit regularizer is to reduce the diffusion term, so that the trajectory more closely follows the drift term. More formally, we propose the following:", + "bbox": [ + 169, + 806, + 823, + 878 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Hypothesis E.1. Starting at a minimizer $\\zeta_0\\in \\Gamma$ , run $(\\kappa_{1},\\kappa_{2})$ -Slow SDE and $(\\kappa_{1},\\kappa_{2}^{\\prime})$ -Slow SDE respectively for the same amount of time $T > 0$ and obtain $\\zeta (T),\\zeta '(T)$ . If $\\pmb{\\Sigma}_{\\parallel}\\neq \\mathbf{0}$ and $\\kappa_{1} < \\kappa_{1}^{\\prime}$ then the expected test accuracy at $\\zeta (T)$ is better than that at $\\zeta^{\\prime}(T)$ .", + "bbox": [ + 169, + 880, + 823, + 925 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/e0c0b984628c8eae8c0add91a8226ad06ea92b62a4d4e5aef542e842730b3d1d.jpg", + "image_caption": [ + "(a) CIFAR-10, $H = 600$ for $K > 1$ .", + "(a) diffusion (unchanged)", + "(b) drift-I (unchanged)" + ], + "image_footnote": [], + "bbox": [ + 228, + 66, + 480, + 186 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/8ff4d6cbc4e8c06b0fb2a0179d56166cdb3698769edde39d3752e98467c422f8.jpg", + "image_caption": [ + "(b) ImageNet, $H = 78$ for $K > 1$ .", + "Figure 3: Reducing the diffusion term of the Slow SDE for Local SGD leads to better generalization. Test accuracy improves as we increase $K$ with fixed $\\eta$ and $H$ to reduce the diffusion term while keeping the drift term untouched. See Appendix M.4 for details.", + "(c) drift-II (rescaled)" + ], + "image_footnote": [], + "bbox": [ + 509, + 66, + 764, + 186 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Here we exclude the case of $\\boldsymbol{\\Sigma}_{\\parallel} \\equiv \\mathbf{0}$ because in this case the diffusion term in the Slow SDE is always zero. To verify Hypothesis E.1, we set the product $\\alpha \\coloneqq \\eta H$ large, keep $H, \\eta$ fixed, increase the number of workers $K$ , and compare the generalization performances after a fixed amount of training steps (but after different numbers of epochs). This case corresponds to the $(\\frac{1}{KB_{\\mathrm{loc}}}, \\frac{1}{2B_{\\mathrm{loc}}})$ -Slow SDE, so adding more workers should reduce the diffusion term. As shown in Figure 3, a higher test accuracy is indeed achieved for larger $K$ .", + "bbox": [ + 169, + 272, + 823, + 361 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "**Implication:** Enlarging the learning rate is not equally effective as adding local steps. Given that Local SGD improves generalization by strengthening the drift term, it is natural to wonder if enlarging the learning rate of SGD would also lead to similar improvements. While it is true that enlarging the learning rate effectively increases the drift term, it also increases the diffusion term simultaneously, which can hinder the implicit regularization by Hypothesis E.1. In contrast, adding local steps does not change the diffusion term. As shown in Figure 6(a), even when the learning rate of SGD is increased, SGD still underperforms Local SGD by about $2\\%$ in test accuracy.", + "bbox": [ + 169, + 364, + 823, + 464 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "On the other hand, in the special case of where $\\pmb{\\Sigma}_{\\parallel} \\equiv \\mathbf{0}$ , Hypothesis E.1 does not hold, and enlarging the learning rate by $\\sqrt{K}$ results in the same Slow SDE as adding local steps (see Appendix G for derivation). Then these two actions should produce the same generalization improvement, unless the learning rate is so large that Slow SDE loses track of the training dynamics. As an example of such a special case, an experiment with label noise regularization is presented in Figure 8.", + "bbox": [ + 169, + 469, + 823, + 544 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "E.2 THE EFFECT OF GLOBAL BATCH SIZE ON GENERALIZATION", + "text_level": 1, + "bbox": [ + 171, + 560, + 633, + 574 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In this section, we discuss the effect of global batch size on the generalization of Local SGD. Given that the computation power of a single worker is limited, we consider the case where the local batch size $B_{\\mathrm{loc}}$ is fixed and the global batch size $B = KB_{\\mathrm{loc}}$ is tuned by adding or removing the workers. This scenario is relevant to the practice because one may want to know the maximum parallelism possible to train the neural net without causing generalization degradation.", + "bbox": [ + 169, + 585, + 823, + 657 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "For SGD, previous works have proposed the Linear Scaling Rule (LSR) (Krizhevsky, 2014; Goyal et al., 2017; Jastrzebski et al., 2017): scaling the learning rate $\\eta \\mapsto \\kappa \\eta$ linearly with the global batch size $B \\mapsto \\kappa B$ yields the same conventional SDE (3) under a constant epoch budget, hence leading to almost the same generalization performance as long as the SDE approximation does not fail.", + "bbox": [ + 169, + 662, + 823, + 720 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We show in Theorem H.1 that the LSR does not change the Slow SDE of SGD either. Experiments in Figure 4 show that the LSR indeed holds nicely when we continue training with small learning rates from the same CIFAR-10 and ImageNet checkpoints as in Figure 2. Here we choose $K = 16$ and $K = 256$ as the base settings for CIFAR-10 and ImageNet, respectively, and then tune the learning rate to maximize the test accuracy. As shown in Figures 4(a) and 4(b), the optimal learning rate turns out to be small enough that the LSR can be applied to scale the global batch size with only a minor change in test accuracy.", + "bbox": [ + 169, + 724, + 823, + 824 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Now, assuming the learning rate is scaled as LSR, we study how to tune the number of local steps $H$ for Local SGD for better generalization. A natural choice is to tune $H$ in the base settings and keep $\\alpha$ unchanged via scaling $H \\mapsto H / \\kappa$ . Then the following SDE can be derived (see Theorem H.2):", + "bbox": [ + 169, + 830, + 823, + 873 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {一}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {一}} \\underbrace {- \\frac {\\kappa K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t}\\right). \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 877, + 823, + 906 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/3676ff827c60f1a31442e006bbe0b8dc2e7b554d1a323506edaa5e06d9e514d2.jpg", + "image_caption": [ + "(a) CIFAR-10, start from #250." + ], + "image_footnote": [], + "bbox": [ + 236, + 66, + 488, + 186 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/a1e59652e5f629d271daf5ab8ac58410d3e6f780b4b78d3f02453fe297cacbb9.jpg", + "image_caption": [ + "(b) ImageNet, start from #100." + ], + "image_footnote": [], + "bbox": [ + 506, + 65, + 759, + 186 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/16d0dcb2cbba12d9c0d81b5872aaf76706eeae69248c986653a788fee20e9ca3.jpg", + "image_caption": [ + "(c) CIFAR-10, start from #250." + ], + "image_footnote": [], + "bbox": [ + 233, + 214, + 486, + 334 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/d3af3481e36bca72561c65e4fdd5cf3f1fcc7663812a8c6632e27435a132cad4.jpg", + "image_caption": [ + "(d) ImageNet, start from #100.", + "Figure 4: For training from CIFAR-10 and ImageNet checkpoints, Local SGD consistently outperforms SGD $(H = 1)$ across different batch sizes $B$ (fixing $B_{\\mathrm{loc}}$ and varying $K$ ), where the learning rate is scaled by the LSR $\\eta \\propto B$ . Two possible ways of tuning the number of local steps $H$ are considered: (1). Tune $H$ for the best test accuracy for $K = 16$ and $K = 256$ respectively on CIFAR-10 and ImageNet, then scale $H$ as $H \\propto 1 / B$ so that $\\alpha \\coloneqq \\eta H$ is constant; (2). Tune $H$ specifically for each $K$ . See Appendix M.5 for training details." + ], + "image_footnote": [], + "bbox": [ + 506, + 214, + 756, + 335 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Compared with (4), the drift-II term here is rescaled by a positive factor. Again, when $\\alpha$ is large, we can follow the argument in Section 3.3.2 to approximate $\\widehat{\\Psi} (\\zeta)\\approx \\widehat{\\Sigma}_{\\diamond}(\\zeta)$ and obtain the following $(\\frac{1}{B},\\frac{\\kappa K}{B})$ -Slow SDE:", + "bbox": [ + 169, + 470, + 823, + 518 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} (t) - \\frac {\\kappa K}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 532, + 823, + 558 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The drift term of the above SDE is always stronger than SGD (7), as long as there exists more than one worker after the scaling (i.e., $\\kappa K > 1$ ). As expected from Hypothesis 3.1, we observed in the experiments that the generalization performance of Local SGD is always better than or at least comparable to SGD across different batch sizes (see Figures 4(a) and 4(b)).", + "bbox": [ + 169, + 569, + 823, + 626 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Taking a closer look into the drift term in the Slow SDE (14), we can find that it scales linearly with $\\kappa$ . According to Hypothesis 3.1, the SDE is expected to generalize better when adding more workers ( $\\kappa > 1$ ) and to generalize worse when removing some workers ( $\\kappa < 1$ ). For the latter case, we indeed observed that the test accuracy of Local SGD drops when removing workers. For the case of adding workers, however, we also need to take into account that the LSR specifies a larger learning rate and causes a larger SDE approximation error for the same $\\alpha$ , which may cancel the generalization improvement brought by strengthening the drift term. In the experiments, we observed that the test accuracy does not rise when adding more workers to the base settings.", + "bbox": [ + 169, + 631, + 823, + 744 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Since $\\alpha$ also controls the regularization strength (Section 3.3.3), it would be beneficial to decrease $\\alpha$ for large batch size so as to better trade-off between regularization strength and approximation quality. In Figures 4(c) and 4(d), we plot the optimal value of $\\alpha$ for each batch size, and we indeed observed that the optimal $\\alpha$ drops as we scale up $K$ . Conversely, a smaller batch size (and hence a smaller learning rate) allows for using a larger $\\alpha$ to enhance regularization while still keeping a low approximation error (Theorem 3.3). The test accuracy curves in Figures 4(a) and 4(b) indeed show that setting a larger $\\alpha$ can compensate for the accuracy drop when reducing the batch size.", + "bbox": [ + 169, + 750, + 826, + 849 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "F ADDITIONAL EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 875, + 534, + 888 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In this section, we present additional experimental results to further verify our finding.", + "bbox": [ + 169, + 909, + 738, + 925 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Supplementary Plot: Training time should be long enough. Figures 5(a) and 5(b) show enlarged views for Figures 2(a) and 2(c) respectively, showing that Local SGD can generalize worse than SGD in the first few epochs.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Supplementary Plot: Learning rate should be small. Figure 5(c) shows that reducing the learning rate from 0.32 to 0.064 does not lead to test accuracy drop for Local SGD on CIFAR-10, if the training time is allowed to be longer and the number of local steps $H$ is set properly. Figure 5(d) presents the case where, with a large learning rate, the generalization improvement of Local SGD disappears even starting from a pre-trained model.", + "bbox": [ + 169, + 152, + 823, + 223 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Supplementary Plot: Reconciling our main finding with Ortiz et al. (2021). In Figure 5(e), the generalization benefit of Local SGD with $H = 24$ becomes less significant after the learning rate decay at epoch 226, which is consistent with the observation by Ortiz et al. (2021) that the generalization benefit of Local SGD usually disappears after the learning rate decay. But we can preserve the improvement by increasing $H$ to 900. Here, we use Local SGD with momentum.", + "bbox": [ + 169, + 229, + 823, + 301 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Supplementary Plot: Optimal $\\alpha$ gets larger for smaller $\\eta$ . In Figure 5(f), we summarize the optimal $\\alpha := \\eta H$ that enables the highest test accuracy for each learning rate in Figure 2(f). We can see that the optimal $\\alpha$ increases as we decrease the learning rate. The reason is that the approximation error bound $\\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})$ in Theorem 3.3 decreases with $\\eta$ , allowing for a larger value of $\\alpha$ to better regularize the model.", + "bbox": [ + 169, + 306, + 823, + 386 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/4fe5d7371a7100bcba8d8ff41d407fafc7f8d0c675a2f7d7c27b6914f412512b.jpg", + "image_caption": [ + "(a) CIFAR-10, start from random." + ], + "image_footnote": [], + "bbox": [ + 179, + 406, + 370, + 498 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/caa2fbfba3a461f8cfac378f2bf2bfdc47d2fa72a2416418e2b784b3ab4c599e.jpg", + "image_caption": [ + "(b) ImageNet, start from #250." + ], + "image_footnote": [], + "bbox": [ + 401, + 406, + 591, + 498 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/d0d67ab8cec46763de39bf7c882d39a6c278122af06efbe7c944353a4661de2e.jpg", + "image_caption": [ + "(c) CIFAR-10, start from #100." + ], + "image_footnote": [], + "bbox": [ + 625, + 407, + 813, + 498 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/1f251751866deb3e4fbe1cf8e012190ad7fb7e1404930e0e6f5b0b64dffd3e77.jpg", + "image_caption": [ + "(d) ImageNet, start from #100." + ], + "image_footnote": [], + "bbox": [ + 179, + 532, + 370, + 625 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/63f25ecc16e69a31d38a161ee74e36cd48754561db452ccd63c53c38cd528ed1.jpg", + "image_caption": [ + "(e) CIFAR-10, start from #150." + ], + "image_footnote": [], + "bbox": [ + 403, + 532, + 591, + 625 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/80b925b82ab5d697c33f6b926a904856dea02e9328d7cdb3ba25d81786e80019.jpg", + "image_caption": [ + "(f) ImageNet, optimal $\\alpha$ v.s. $\\eta$ ." + ], + "image_footnote": [], + "bbox": [ + 625, + 532, + 813, + 625 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/0e19c6e0ffe575769e7933dcb5dda9fdc3428f58151baedbe2d2e1b3a250b1ed.jpg", + "image_caption": [ + "Figure 5: Additional experimental results about the effect of the learning rate, training time and the number of local steps. See Appendix M.2 for details.", + "(a) SGD with various $\\eta$", + "Figure 6: Additional experimental results on CIFAR-10. See Appendix M.3 for details." + ], + "image_footnote": [], + "bbox": [ + 181, + 710, + 370, + 801 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/61b57afd24323269dec4a5494e2a94ec95720c1ef0b1b6f084d37af55d1289d1.jpg", + "image_caption": [ + "(b) SGD with larger batch sizes." + ], + "image_footnote": [], + "bbox": [ + 403, + 710, + 591, + 801 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/9ab02fb5af9589b4e7d99396f3e0415fad83b4d9248f9232a36feb2295e1c293.jpg", + "image_caption": [ + "(c) Post-local SGD, sampling with replacement." + ], + "image_footnote": [], + "bbox": [ + 625, + 710, + 813, + 801 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "SGD generalizes worse even with extensively tuned learning rates. In Figure 6(a), we run SGD from both random initialization and the pre-trained model for another 3,000 epochs with various learning rates and report the test accuracy. We can see that none of the SGD runs beat Local SGD with the fixed learning rate $\\eta = 0.32$ . Therefore, the inferior performance of SGD in Figures 2(a) and 2(b) is not due to the improper learning rate and Local SGD indeed generalizes better.", + "bbox": [ + 169, + 854, + 823, + 925 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "SGD with larger batch sizes performs no better. In Figure 6(b), we enlarge the batch size of SGD and report the test accuracy for various learning rates. We can see that SGD with larger batch sizes performs no better and none of the SGD runs outperform Local SGD with the fixed learning rate $\\eta = 0.32$ . This result is unsurprising since it is well established in the literature (Jastrzebski et al., 2017; Smith et al., 2020; Keskar et al., 2017) that larger batch size typically leads to worse generalization. See Appendix A for a survey of empirical and theoretical works on understanding and resolving this phenomenon.", + "bbox": [ + 169, + 103, + 826, + 202 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Sampling with or without replacement does not matter. Note that there is a slight discrepancy in sampling schemes between our theoretical and experimental setup: the update rules (1) and (2) assume that data are sampled with replacement while most experiments use sampling without replacement (Appendix C). To eliminate the effect of this discrepancy, we conduct additional experiments on Post-local SGD using sampling with replacement (see Figure 6(c)) and Post-local SGD significantly outperforms SGD.", + "bbox": [ + 169, + 208, + 823, + 292 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "G DISCUSSIONS ON LOCAL SGD WITH LABEL NOISE REGULARIZATION", + "text_level": 1, + "bbox": [ + 169, + 311, + 794, + 328 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "G.1 THE SLOW SDE FOR LOCAL SGD WITH LABEL NOISE REGULARIZATION", + "text_level": 1, + "bbox": [ + 169, + 343, + 730, + 357 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "In this subsection, we present the Slow SDE for Local SGD in the case of label noise regularization and show that Local SGD indeed induces a stronger regularization term, which presumably leads to better generalization.", + "bbox": [ + 169, + 369, + 823, + 412 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Theorem G.1 (Slow SDE for Local SGD with label noise regularization). For a $C$ -class classification task with cross-entropy loss, the slow SDE of Local SGD with label noise has the following form:", + "bbox": [ + 168, + 415, + 823, + 458 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {1}{4 B} \\nabla_ {\\Gamma} \\left(\\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) + (K - 1) \\cdot \\frac {\\operatorname {t r} \\left(F \\left(2 H \\eta \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right)\\right)}{2 H \\eta}\\right) \\mathrm {d} t, \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 464, + 823, + 500 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "where $F(x) \\coloneqq \\int_0^x \\psi(y) \\, \\mathrm{d}y$ and is interpreted as a matrix function. Additionally, $\\nabla_{\\Gamma} f$ stands for the gradient of a function $f$ projected to the tangent space of $\\Gamma$ .", + "bbox": [ + 169, + 506, + 823, + 537 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Proof. See Appendix L.", + "bbox": [ + 171, + 550, + 334, + 566 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/ce5c716a629e476b0ea86ea3fe76183d34c1f7f8c04e9a6e6b894dd51c2c08d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 551, + 825, + 564 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Note that the magnitude of the RHS in (15) becomes larger as $H$ increases. By letting $H$ to go to infinity, we further have the following theorem.", + "bbox": [ + 169, + 580, + 823, + 609 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Theorem G.2. As the number of local steps $H$ goes to infinity, the slow SDE of Local SGD with label noise (15) can be simplified as:", + "bbox": [ + 169, + 613, + 823, + 643 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {K}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) \\mathrm {d} t. \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 648, + 823, + 679 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Proof. We obtain the corollary by simply taking the limit. By L'Hospital's rule,", + "bbox": [ + 169, + 691, + 696, + 707 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {x \\rightarrow + \\infty} \\frac {F (a x)}{x} = \\lim _ {x \\rightarrow + \\infty} \\frac {\\mathrm {d} F (a x)}{\\mathrm {d} x} = \\lim _ {x \\rightarrow + \\infty} a \\psi (a x) = a.\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 712, + 684, + 744 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 750, + 246, + 763 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\lim _ {x \\rightarrow + \\infty} \\frac {\\operatorname {t r} (F (2 H \\eta \\nabla^ {2} \\mathcal {L} (\\zeta)))}{2 H \\eta} = \\operatorname {t r} (\\nabla^ {2} \\mathcal {L} (\\zeta)). \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 348, + 770, + 823, + 804 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Substituting (17) into (15) yields (16).", + "bbox": [ + 169, + 809, + 426, + 825 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/f21d3d40f406238e9caec9860c77af2b568b84405a6757c7b980202a49f88a24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 810, + 825, + 821 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "As introduced in Section 3.3, the Slow SDE for SGD with label noise regularization has the following form:", + "bbox": [ + 169, + 839, + 823, + 868 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {1}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) \\mathrm {d} t, \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 875, + 823, + 904 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "which is a deterministic flow that keeps reducing the trace of Hessian.", + "bbox": [ + 169, + 909, + 632, + 924 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/95fa377cc60c341232565c9293256304933bfec9021c461af4ee1f75a97d2d61.jpg", + "image_caption": [ + "(a) ResNet-56 + GroupNorm." + ], + "image_footnote": [], + "bbox": [ + 204, + 109, + 416, + 212 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/4d961518e31054ba64c59978b73d65730b2d0232abf93fea368ff5d0e48d6c9a.jpg", + "image_caption": [ + "(b) VGG-16 w/o normalization.", + "Figure 7: Local SGD with label noise regularization on CIFAR-10 without data augmentation using $K = 32$ , $B_{\\mathrm{loc}} = 128$ . A larger number of local steps indeed enables higher test accuracy. For both architectures, we replace ReLU with Swish. See Appendix M.6 for training details." + ], + "image_footnote": [], + "bbox": [ + 454, + 111, + 776, + 212 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "As the trace of Hessian can be seen as a measure for the sharpness of the local loss landscape, (18) indicates that SGD with label noise regularization has an implicit bias toward flatter minima, which presumably promotes generalization (Hochreiter & Schmidhuber, 1997; Keskar et al., 2017; Neyshabur et al., 2017). More concretely, Blanc et al. (2020) and Li et al. (2021b) connect minimizing the trace of Hessian to finding sparse or low-rank solutions for training two-layer linear nets. Damian et al. (2021) empirically showed that good generalization correlates with a smaller trace of Hessian in training ResNets with label noise. Besides, Ma & Ying (2021) connect the trace of Hessian to the smoothness of the function represented by a deep neural net.", + "bbox": [ + 169, + 316, + 826, + 429 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "From Theorems G.1 and G.2, we can conclude that Local SGD accelerates the process of sharpness reduction, thereby leading to better generalization. Furthermore, the regularization effect gets stronger for larger $H$ and is approximately $K$ times that of SGD. We also conduct experiments on non-augmented CIFAR-10 with label noise regularization to verify our conclusion. As shown in Figure 7, increasing the number of local steps indeed gives better generalization performance.", + "bbox": [ + 169, + 434, + 823, + 506 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "G.2 THE EQUIVALENCE OF ENLARGING THE LEARNING RATE AND ADDING LOCAL STEPS", + "text_level": 1, + "bbox": [ + 169, + 523, + 818, + 539 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "In this subsection, we explain in detail why training with label noise regularization is a special case where enlarging the learning rate of SGD can bring the same generalization benefit as adding local steps. TWhen we scale up the learning rate of SGD $\\eta \\mapsto \\kappa \\eta$ (while keeping other hyperparameters unchanged), the corresponding Slow SDE is (18) with time horizon $\\kappa^2 T$ instead of $T$ , where SGD tracks a continuous interval of $\\kappa^2 \\eta^2$ per step instead of $\\eta^2$ . After rescaling the time horizon to $T$ so that SGD tracks a continuous interval of $\\eta^2$ per step, we obtain", + "bbox": [ + 169, + 550, + 823, + 635 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\zeta (t) = - \\frac {\\kappa^ {2}}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\zeta)\\right) \\mathrm {d} t. \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 643, + 823, + 676 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Let $\\kappa = \\sqrt{K}$ in (19) and we obtain the same Slow SDE as (16), which is for Local SGD with a large number of local steps. In Figure 8, we conduct experiments to verify that SGD indeed achieves comparable test accuracy to that of Local SGD with a large $H$ if its learning rate is scaled up by $\\sqrt{K}$ that of Local SGD.", + "bbox": [ + 169, + 684, + 823, + 743 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "H DERIVING THE SLOW SDE AFTER APPLYING THE LSR", + "text_level": 1, + "bbox": [ + 171, + 765, + 666, + 782 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "In this section, we derive the Slow SDEs for SGD and Local SGD after applying the LSR in Appendix E.2. The results are formally summarized in the following theorems.", + "bbox": [ + 169, + 797, + 823, + 827 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Theorem H.1 (Slow SDE for SGD after applying the LSR). Let Assumptions 3.1 to 3.3 hold. Assume that we run SGD with learning rate $\\eta' = \\kappa \\eta$ and the number of workers $K' = \\kappa K$ for some constant $\\kappa > 0$ . Let $T > 0$ be a constant and $\\zeta(t)$ be the solution to (7) with the initial condition $\\zeta(0) = \\Phi(\\theta_0) \\in \\Gamma$ . Then for any $\\mathcal{C}^3$ -smooth function $g(\\pmb{\\theta})$ , $\\max_{0 \\leq s \\leq \\frac{\\kappa T}{\\eta'^2}} \\left| \\mathbb{E}[g(\\Phi(\\pmb{\\theta}_s)] - \\mathbb{E}[g(\\pmb{\\zeta}(s\\eta'^2/\\kappa)] \\right| = \\tilde{\\mathcal{O}}(\\eta'^{0.25})$ , where $\\tilde{\\mathcal{O}}(\\cdot)$ hides log factors and constants that are independent of $\\eta'$ but can depend on $g(\\pmb{\\theta})$ .", + "bbox": [ + 169, + 832, + 825, + 925 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/930392f60ac43dce43184eddb2665e2ee62f7f7142ea9db31582d4f43c89203a.jpg", + "image_caption": [ + "Figure 8: Local SGD with label noise regularization on CIFAR-10 without data augmentation using $K = 4$ , $B_{\\mathrm{loc}} = 128$ . SGD ( $H = 1$ ) indeed achieves comparable test accuracy as Local SGD with a large $H$ when we scale up its learning rate to $\\sqrt{K}$ times that of Local SGD. See Appendix M.6 for training details." + ], + "image_footnote": [], + "bbox": [ + 364, + 104, + 632, + 210 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Proof. Replacing $B$ with $\\kappa B$ in the original Slow SDE for Local SGD (7) gives the following Slow SDE:", + "bbox": [ + 169, + 311, + 823, + 339 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\underbrace {\\frac {1}{\\sqrt {\\kappa B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t}\\right). \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 344, + 823, + 387 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Note that the continuous time horizon for (20) is $\\kappa T$ instead of $T$ since after applying the LSR, SGD tracks a continuous interval of $\\kappa^2\\eta^2$ per step instead of $\\eta^2$ while the total number of steps is scaled down by $\\kappa$ . We can then rescale the time scaling to obtain (7) that holds for $T$ .", + "bbox": [ + 169, + 395, + 823, + 439 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Theorem H.2 (Slow SDE for Local SGD after applying the LSR). Let Assumptions 3.1 to 3.3 hold. Assume that we run Local SGD with learning rate $\\eta' = \\kappa \\eta$ , the number of workers $K' = \\kappa K$ , and the number of local steps $H' = \\frac{\\alpha}{\\kappa \\eta}$ for some constants $\\alpha, \\kappa > 0$ . Let $T > 0$ be a constant and $\\zeta(t)$ be the solution to (21) with the initial condition $\\zeta(0) = \\Phi(\\bar{\\theta}^{(0)}) \\in \\Gamma$ . Then for any $\\mathcal{C}^3$ -smooth function $g(\\pmb{\\theta})$ , $\\max_{0 \\leq s \\leq \\frac{\\kappa T}{H' \\eta'^2}} |\\mathbb{E}[g(\\Phi(\\bar{\\theta}^{(s)})] - \\mathbb{E}[g(\\zeta(sH' \\eta'^2 / \\kappa)]| = \\tilde{\\mathcal{O}}(\\eta'^{0.25})$ , where $\\tilde{\\mathcal{O}}(\\cdot)$ hides log factors and constants that are independent of $\\eta'$ but can depend on $g(\\pmb{\\theta})$ .", + "bbox": [ + 169, + 445, + 825, + 544 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {(a) \\text {d i f f u s i o n (u n c h a n g e d)}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(b) \\text {d r i f t - I (u n c h a n g e d)}} \\underbrace {- \\frac {\\kappa K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(c) \\text {d r i f t - I I (r e s c a l e d)}}\\right). \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 550, + 823, + 594 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Proof. Replacing $B$ with $\\kappa B$ in the original Slow SDE for Local SGD (4) gives the following Slow SDE:", + "bbox": [ + 169, + 608, + 823, + 635 + ], + "page_idx": 28 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {\\kappa B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}} \\underbrace {- \\frac {\\kappa K - 1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(c) d r i f t - I I}}\\right). \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 642, + 823, + 684 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Note that the continuous time horizon for (22) is $\\kappa T$ instead of $T$ since after applying the LSR, Local SGD tracks a continuous interval of $\\kappa^2\\eta^2$ per step instead of $\\eta^2$ while the total number of steps is scaled down by $\\kappa$ . We can then rescale the time scaling to obtain (21) that holds for $T$ .", + "bbox": [ + 169, + 691, + 826, + 736 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "I PROOF OF THEOREM 3.1", + "text_level": 1, + "bbox": [ + 171, + 102, + 408, + 118 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "This section presents the proof for Theorem 3.1. First, we introduce some notations that will be used throughout this section. For the sequence of Local SGD iterates $\\{\\pmb{\\theta}_{k,t}^{(s)}:k\\in [K],0\\leq t\\leq H,s\\geq 0\\}$ , we introduce an auxiliary sequence $\\{\\hat{u}_t\\}_{t\\in \\mathbb{N}}$ , which consists of GD iterates from $\\bar{\\pmb{\\theta}}^{(0)}$ :", + "bbox": [ + 169, + 133, + 823, + 184 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {u}} _ {0} = \\bar {\\boldsymbol {\\theta}} ^ {(0)}, \\qquad \\hat {\\boldsymbol {u}} _ {t + 1} \\leftarrow \\hat {\\boldsymbol {u}} _ {t} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}).\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 191, + 635, + 210 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "For convenience, let $\\hat{\\pmb{u}}_t^{(s)}\\coloneqq \\hat{\\pmb{u}}_{sH + t}$ and $\\pmb {z}_{k,sH + t}\\coloneqq \\pmb{z}_{k,t}^{(s)}$ . We will use $\\hat{\\pmb{u}}_t^{(s)}$ and $\\hat{\\pmb{u}}_{sH + t},\\pmb{z}_{k,t}^{(s)}$ and $\\pmb {z}_{k,sH + t}$ interchangeably. Recall that we have assumed that $\\mathcal{L}$ is $\\mathcal{C}^3$ -smooth with bounded second and third order derivatives. Let $\\nu_{2}\\coloneqq \\sup_{\\pmb {\\theta}\\in \\mathbb{R}^{d}}\\| \\nabla^{2}\\mathcal{L}(\\pmb {\\theta})\\|_{2}$ and $\\nu_{3}\\coloneqq \\sup_{\\pmb {\\theta}\\in \\mathbb{R}^{d}}\\| \\nabla^{3}\\mathcal{L}(\\pmb {\\theta})\\|_{2}$ . Since $\\nabla \\ell (\\pmb {\\theta};\\pmb {\\zeta})$ is bounded, the gradient noise $z_{k,t}^{(s)}$ is also bounded. We denote by $\\sigma_{\\mathrm{max}}$ an upper bound such that $\\| z_{k,t}^{(s)}\\| _2\\leq \\sigma_{\\mathrm{max}}$ holds for all $s,k,t$", + "bbox": [ + 169, + 215, + 823, + 306 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "To prove Theorem 3.1, we will show that both Local SGD iterates $\\bar{\\theta}^{(s)}$ and SGD iterates $\\boldsymbol{w}_{sH}$ track GD iterates $\\hat{\\boldsymbol{u}}_{sH}$ closely with high probability. For each client $k$ , define the following sequence $\\{\\hat{Z}_{k,t}:t\\geq 0\\}$ , which will be used in the proof for bounding the overall effect of noise.", + "bbox": [ + 169, + 314, + 823, + 361 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {Z}} _ {k, t} = \\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\boldsymbol {z} _ {k, \\tau}, \\quad \\hat {\\boldsymbol {Z}} _ {k, 0} = \\boldsymbol {0}, \\quad \\forall k \\in [ K ].\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 368, + 743, + 411 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The following lemma shows that $\\hat{Z}_{k,t}$ is concentrated around the origin.", + "bbox": [ + 169, + 420, + 645, + 436 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Lemma I.1 (Concentration property of $\\{\\hat{Z}_{k,t}\\}$ ). With probability at least $1 - \\delta$ , the following holds simultaneously for all $k \\in [K]$ , $0 \\leq t < \\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor$ :", + "bbox": [ + 169, + 441, + 823, + 477 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\delta \\eta}},\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 484, + 619, + 526 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "where $\\hat{C}_1\\coloneqq \\exp (T\\nu_2)$", + "bbox": [ + 171, + 534, + 333, + 551 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Proof. For each $\\hat{\\mathbf{Z}}_{k,t}$ , construct a sequence $\\{\\hat{\\mathbf{Z}}_{k,t,t'}\\}_{t'=0}^t$ :", + "bbox": [ + 169, + 566, + 553, + 585 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {Z}} _ {k, t, t ^ {\\prime}} := \\sum_ {\\tau = 0} ^ {t ^ {\\prime} - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, t, 0} ^ {(s)} = \\boldsymbol {0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 284, + 592, + 710, + 635 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Since $\\| \\nabla^2\\mathcal{L}(\\hat{\\boldsymbol{u}}_l)\\| _2\\leq \\nu_2$ for all $l\\geq 0$ , the following holds for all $0\\leq \\tau < t - 1$ and $0 < t < \\lfloor \\frac{T}{\\eta}\\rfloor$ :", + "bbox": [ + 169, + 643, + 818, + 662 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {t} \\leq \\exp (T \\nu_ {2}) = \\hat {C} _ {1}.\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 669, + 699, + 714 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "So $\\{\\hat{Z}_{k,t,t'}\\}_{t' = 0}^t$ is a martingale with $\\| \\hat{Z}_{k,t,t'} - \\hat{Z}_{k,t,t' - 1}\\| _2\\leq \\hat{C}_1\\sigma_{\\max}$ . Since $\\hat{Z}_{k,t} = \\hat{Z}_{k,t,t}$ , by Azuma-Hoeffding's inequality,", + "bbox": [ + 169, + 720, + 823, + 753 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} (\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 t \\left(\\hat {C} _ {1} \\sigma_ {\\max }\\right) ^ {2}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 758, + 651, + 816 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Taking union bound on all $k \\in [K]$ and $0 \\leq t \\leq \\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor$ , we can conclude that with probability at least $1 - \\delta$ ,", + "bbox": [ + 169, + 823, + 823, + 854 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\delta \\eta}}, \\quad \\forall 0 \\leq t < \\left\\lfloor \\frac {T}{\\eta} \\right\\rfloor , k \\in [ K ].\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 862, + 720, + 902 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/449f7d06a1a0e66247d3a67f4b5f904d041aed7497481a0dab7af257988683a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 909, + 823, + 921 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The following lemma states that, with high probability, Local SGD iterates $\\theta_{k,t}^{(s)}$ and $\\bar{\\theta}^{(s)}$ closely track the gradient descent iterates $\\hat{\\pmb{u}}_{sH}$ for $\\lfloor \\frac{T}{H\\eta}\\rfloor$ rounds.", + "bbox": [ + 169, + 102, + 823, + 140 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Lemma I.2. For $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , the following inequalities hold with probability at least $1 - \\delta$ :", + "bbox": [ + 169, + 143, + 810, + 160 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\hat {\\boldsymbol {u}} _ {s H + t} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq s < \\left\\lfloor \\frac {T}{H \\eta} \\right\\rfloor , 0 \\leq t \\leq H,\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 167, + 767, + 203 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 169, + 210, + 202, + 223 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\hat {\\boldsymbol {u}} _ {s H} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s \\leq \\left\\lfloor \\frac {T}{H \\eta} \\right\\rfloor ,\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 231, + 687, + 266 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "where $\\hat{C}_3$ is a constant independent of $\\eta$ and $H$ .", + "bbox": [ + 169, + 275, + 488, + 291 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Proof. Let $\\hat{\\Delta}_{k,t}^{(s)}\\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\hat{\\pmb{u}}_t^{(s)}$ and $\\bar{\\Delta}^{(s)}\\coloneqq \\bar{\\pmb{\\theta}}^{(s)} - \\hat{\\pmb{u}}_{0}^{(s)}$ be the differences between the Local SGD and GD iterates. According to the update rule for $\\pmb{\\theta}_{k,t}^{(s)}$ and $\\hat{\\pmb{u}}_t^{(s)}$ ,", + "bbox": [ + 169, + 310, + 823, + 349 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 358, + 823, + 381 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {u}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}\\right). \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 383, + 823, + 404 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Subtracting (23) by (24) gives", + "bbox": [ + 169, + 410, + 372, + 426 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta (\\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t} ^ {(s)})) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ = \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\eta \\hat {\\boldsymbol {v}} _ {k, t} ^ {(s)}, \\tag {25} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 434, + 823, + 479 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "where $\\hat{\\pmb{v}}_{k,t}^{(s)}$ is a remainder term with norm $\\| \\hat{\\pmb{v}}_{k,t}^{(s)}\\| _2\\leq \\frac{\\nu_3}{2}\\| \\hat{\\pmb{\\Delta}}_{k,t}^{(s)}\\| _2^2$ . For the $s$ -th round of Local SGD, we can apply (25) $t$ times to obtain the following:", + "bbox": [ + 169, + 488, + 823, + 522 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = \\left[ \\prod_ {\\tau = 0} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {\\tau} ^ {(s)}\\right)\\right) \\right] \\hat {\\boldsymbol {\\Delta}} _ {k, 0} ^ {(s)} - \\eta \\underbrace {\\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\right] \\boldsymbol {z} _ {k , \\tau} ^ {(s)}} _ {\\tau} \\tag {26} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} (\\pmb {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\pmb {u}} _ {l} ^ {(s)})) \\hat {\\pmb {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 529, + 823, + 637 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Here, $\\mathcal{T}$ can be expressed in the following form:", + "bbox": [ + 169, + 645, + 491, + 660 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} = \\hat {\\boldsymbol {Z}} _ {k, s H + t} - \\left[ \\prod_ {l = s H} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H}.\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 667, + 671, + 710 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Substituting in $t = H$ and taking the average, we derive the following recursion:", + "bbox": [ + 169, + 718, + 700, + 733 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\bar {\\boldsymbol {\\Delta}} ^ {(s + 1)} = \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {\\Delta}} _ {k, H} ^ {(s)} \\\\ = \\left[ \\prod_ {\\tau = 0} ^ {H - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {\\tau} ^ {(s)})\\right) \\right] \\bar {\\boldsymbol {\\Delta}} ^ {(s)} \\\\ - \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k, (s + 1) H} + \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\left[ \\prod_ {l = s H} ^ {(s + 1) H - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H} \\\\ + \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\sum_ {\\tau = 0} ^ {H - 1} \\prod_ {l = \\tau + 1} ^ {H - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\tag {27} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 741, + 823, + 921 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Applying (27) $s$ times yields", + "bbox": [ + 171, + 103, + 364, + 119 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\boldsymbol {\\Delta}} ^ {(s)} = - \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k, s H} + \\frac {\\eta}{K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\left[ \\prod_ {l = r H + \\tau + 1} ^ {s H} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(r)}. \\tag {28}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 123, + 823, + 167 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Substitute (28) into (26) and we have", + "bbox": [ + 171, + 171, + 419, + 186 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = - \\frac {\\eta}{K} \\sum_ {k ^ {\\prime} \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k ^ {\\prime}, s H} - \\eta \\hat {\\boldsymbol {Z}} _ {k, s H + t} + \\eta \\left[ \\prod_ {l = s H} ^ {s H + t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H} \\\\ + \\frac {\\eta}{K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k ^ {\\prime} \\in [ K ]} \\left[ \\prod_ {l = r H + \\tau + 1} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k ^ {\\prime}, \\tau} ^ {(r)} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = s H + \\tau + 1} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 191, + 759, + 327 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "By Cauchy-Schwartz inequality and triangle inequality, we have", + "bbox": [ + 171, + 332, + 596, + 347 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\frac {\\eta}{K} \\left(\\sum_ {k ^ {\\prime} \\in [ K ]} \\left\\| \\hat {\\boldsymbol {Z}} _ {k ^ {\\prime}, s H} \\right\\| _ {2}\\right) + \\eta \\left\\| \\hat {\\boldsymbol {Z}} _ {k, s H + t} \\right\\| _ {2} + \\eta \\hat {C} _ {1} \\left\\| \\hat {\\boldsymbol {Z}} _ {k, s H} \\right\\| _ {2} \\tag {29} \\\\ + \\frac {\\eta \\hat {C} _ {1} \\nu_ {3}}{2 K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k ^ {\\prime} \\in [ K ]} \\| \\hat {\\mathbf {A}} _ {k ^ {\\prime}, \\tau} ^ {(r)} \\| _ {2} ^ {2} + \\frac {\\eta \\hat {C} _ {1} \\nu_ {3}}{2} \\sum_ {\\tau = 0} ^ {t - 1} \\| \\hat {\\mathbf {A}} _ {k, \\tau} ^ {(r)} \\| _ {2} ^ {2}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 263, + 351, + 823, + 446 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "where $\\hat{C}_1 = \\exp (\\nu_2T)$", + "bbox": [ + 171, + 452, + 328, + 469 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Below we prove by induction that for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , if", + "bbox": [ + 171, + 474, + 553, + 491 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\right\\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\eta \\delta}}, \\quad \\forall 0 \\leq t < \\left\\lfloor \\frac {T}{\\eta} \\right\\rfloor , k \\in [ K ], \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 494, + 823, + 537 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "then there exists a constant $\\hat{C}_2$ such that for all $k\\in [K],0\\leq s < \\left\\lfloor \\frac{T}{\\eta H}\\right\\rfloor$ and $0\\leq t\\leq H$", + "bbox": [ + 171, + 541, + 750, + 561 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\hat {\\Delta} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\hat {C} _ {2} \\sqrt {\\eta \\log \\frac {2 T K}{\\eta \\delta}}. \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 566, + 823, + 606 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "First, for all $k \\in [K]$ , $\\| \\hat{\\Delta}_{k,0}^{(0)}\\|_2 = 0$ and hence (31) holds. Assuming that (31) holds for all $\\hat{\\Delta}_{k',\\tau}^{(r)}$ where $k' \\in [K]$ , $0 \\leq r < s$ , $0 \\leq \\tau \\leq H$ and $r = s$ , $0 \\leq \\tau < t$ , then by (29), for all $k \\in [K]$ , the following holds:", + "bbox": [ + 169, + 612, + 823, + 660 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\| _ {2} \\leq 3 \\hat {C} _ {1} ^ {2} \\sigma_ {\\max} \\sqrt {2 T \\eta \\log \\frac {2 T K}{\\eta \\delta}} + \\hat {C} _ {1} \\hat {C} _ {2} ^ {2} T \\eta \\nu_ {3} \\log \\frac {2 T K}{\\eta \\delta}.\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 665, + 709, + 705 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Let $\\hat{C}_2 \\geq 6\\hat{C}_1^2\\sigma_{\\max}\\sqrt{2T}$ . Then for sufficiently small $\\eta$ , (31) holds. By Lemma I.1, (30) holds with probability at least $1 - \\delta$ . Furthermore, notice that $\\bar{\\pmb{\\theta}}^{(s)} - \\hat{\\pmb{u}}_{sH} = \\frac{1}{K}\\sum_{k\\in [K]}\\hat{\\pmb{\\Delta}}_{k,H}^{(s - 1)}$ . Hence we have the lemma.", + "bbox": [ + 169, + 710, + 826, + 761 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The iterates of standard SGD can be viewed as the local iterates on a single client with the number of local steps $\\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor$ . Therefore, we can directly apply Lemma I.2 and obtain the following lemma about the SGD iterates $\\boldsymbol{w}_t$ .", + "bbox": [ + 169, + 775, + 823, + 821 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Corollary 1.1. For $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , the following holds with probability at least $1 - \\delta$ :", + "bbox": [ + 171, + 823, + 751, + 840 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {w} _ {s H} - \\hat {\\boldsymbol {u}} _ {s H} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s \\leq \\frac {T}{H \\eta},\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 844, + 679, + 878 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "where $\\hat{C}_3$ is the same constant as in Lemma I.2.", + "bbox": [ + 171, + 883, + 488, + 898 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Applying Lemma I.2 and Corollary I.1 and taking the union bound, we have Theorem 3.1.", + "bbox": [ + 171, + 909, + 764, + 925 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "J PROOF OUTLINE OF MAIN THEOREMS", + "text_level": 1, + "bbox": [ + 171, + 102, + 524, + 118 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "We adopt the general framework proposed by Li et al. (2019a) to bound the closeness of discrete algorithms and SDE solutions via the method of moments. However, their framework is not directly applicable to our case since they provide approximation guarantees for discrete algorithms with learning rate $\\eta$ for $\\mathcal{O}(\\eta^{-1})$ steps while we want to capture Local SGD for $\\mathcal{O}(\\eta^{-2})$ steps. To overcome this difficulty, we treat $R_{\\mathrm{grp}} := \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}} \\right\\rfloor$ rounds as a \"giant step\" of Local SGD with an \"effective\" learning rate $\\eta^{1 - \\beta}$ , where $\\beta$ is a constant in $(0,1)$ , and derive the recursive formulas to compute the moments for the change in every step, every round, and every $R_{\\mathrm{grp}}$ rounds. The formulation of the recursions requires a detailed analysis of the limiting dynamics of the iterate and careful control of approximation errors.", + "bbox": [ + 169, + 133, + 826, + 263 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The dynamics of the iterate can be divided into two phases: the approaching phase (Phase 1) and the drift phase (Phase 2). The approaching phase only lasts for $\\mathcal{O}(\\log \\frac{1}{\\eta})$ rounds, during which the iterate is quickly driven to the minimizer manifold by the negative gradient and ends up within only $\\tilde{\\mathcal{O}} (\\sqrt{\\eta})$ from $\\Gamma$ (see Appendix K.5). After that, the iterate enters the drifting phase and moves in the tangent space of $\\Gamma$ while staying close to $\\Gamma$ (see Appendix K.6). The closeness of the iterates (local and global) and $\\Gamma$ is summarized in the following theorem.", + "bbox": [ + 169, + 268, + 823, + 358 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Theorem J.1 (Closeness of the iterates and $\\Gamma$ ). For $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ , for all $\\mathcal{O}(\\log \\frac{1}{\\eta}) \\leq s \\leq \\lfloor T / (H\\eta^2) \\rfloor$ ,", + "bbox": [ + 166, + 359, + 823, + 391 + ], + "page_idx": 32 + }, + { + "type": "equation", + "text": "\n$$\n\\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\in \\Gamma , \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 397, + 692, + 431 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Also, for all $\\mathcal{O}(\\log \\frac{1}{\\eta}) \\leq s < \\lfloor T / (H\\eta^2) \\rfloor$ , $k \\in [K]$ and $0 \\leq t \\leq H$ ,", + "bbox": [ + 169, + 435, + 617, + 454 + ], + "page_idx": 32 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\pmb {\\theta} _ {k, t} ^ {(s)} - \\Phi (\\bar {\\pmb {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 459, + 640, + 494 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Here, $\\mathcal{O}(\\cdot)$ hides constants independent of $\\eta$ and $\\delta$ .", + "bbox": [ + 169, + 497, + 508, + 512 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "To control the approximation errors, we also provide a high probability bound for the change of the manifold projection within $R_{\\mathrm{grp}}$ rounds.", + "bbox": [ + 169, + 523, + 823, + 551 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Theorem J.2 (High probability bound for the change of manifold projection). For $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ with probability at least $1 - \\delta$ , for all $0 \\leq s \\leq \\lfloor T / (H\\eta^2) \\rfloor - R_{\\mathrm{grp}}$ and $0 \\leq r \\leq R_{\\mathrm{grp}}$ ,", + "bbox": [ + 169, + 553, + 823, + 585 + ], + "page_idx": 32 + }, + { + "type": "equation", + "text": "\n$$\n\\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}), \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + r)}) \\in \\Gamma , \\quad \\| \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + r)}) - \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 588, + 774, + 623 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "where $\\mathcal{O}(\\cdot)$ hides constants independent of $\\eta$ and $\\delta$ .", + "bbox": [ + 171, + 626, + 511, + 642 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The proof of Theorems J.1 and J.2 is based on the analysis of the dynamics of the iterate and presented in Appendix K.7.", + "bbox": [ + 169, + 651, + 823, + 681 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Utilizing Theorems J.1 and J.2, we move on to estimate the first and second moments of the change of the manifold projection every $R_{\\mathrm{grp}}$ rounds. However, the randomness during training might drive the iterate far from the manifold (with a low probability, though), making the dynamics intractable. To tackle this issue, we construct a well-behaved auxiliary sequence $\\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\}$ , which is constrained to the neighborhood of $\\Gamma$ and equals the original sequence $\\{\\pmb{\\theta}_{k,t}^{(s)}\\}$ with high probability (see Definition K.5). Then we can formulate recursions for the change of manifold projection of the auxiliary sequence using the nice properties near $\\Gamma$ . The estimate of moments is summarized in Theorem K.2.", + "bbox": [ + 169, + 686, + 825, + 797 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Finally, based on the moment estimates, we apply the framework in Li et al. (2019a) to show that the manifold projection and the SDE solution are weak approximations of each other in Appendix K.10.", + "bbox": [ + 169, + 801, + 823, + 832 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "K PROOF DETAILS OF MAIN THEOREMS", + "text_level": 1, + "bbox": [ + 171, + 849, + 527, + 866 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The detailed proof is organized as follows. In Appendix K.1, we introduce the notations that will be used throughout the proof. To establish preliminary knowledge, Appendix K.2 provides explicit expression for the projection operator $\\Phi (\\cdot)$ , and Appendix K.3 presents lemmas about gradient descent", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "(GD) and gradient flow (GF). Based on the preliminary knowledge, we construct a nested working zone to characterize the closeness of the iterate and $\\Gamma$ in Appendix K.4. Appendices K.5 to K.10 make up the main body of the proof. Specifically, Appendices K.5 and K.6 analyze the dynamics of Local SGD iterates for phases 1 and 2, respectively. Utilizing these analyses, we provide the proof of Theorems J.1 and J.2 in Appendix K.7 and the proof of Theorem 3.3 in Appendix K.8. Then we derive the estimation for the first and second moments of one \"giant step\" $\\Phi (\\bar{\\theta}^{(s + R_{\\mathrm{grp}})}) - \\Phi (\\bar{\\theta}^{(s)})$ in Appendix K.9. Finally, we prove the approximation theorem 3.2 in Appendix K.10.", + "bbox": [ + 169, + 103, + 826, + 203 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "K.1 ADDITIONAL NOTATIONS", + "text_level": 1, + "bbox": [ + 171, + 219, + 395, + 233 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Let $R_{\\mathrm{tot}} \\coloneqq \\left\\lfloor \\frac{T}{H\\eta^2} \\right\\rfloor$ be the total number of rounds. Denote by $\\phi^{(s)}$ the manifold projection of the global iterate at the beginning of round $s$ . Let $\\pmb{x}_{k,t}^{(s)} \\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\phi^{(s)}$ be the difference between the local iterate and the manifold projection of the global iterate. Also define $\\bar{\\pmb{x}}_H^{(s)} \\coloneqq \\frac{1}{K}\\sum_{k\\in [K]}\\pmb{x}_{k,H}^{(s)}$ and $\\bar{\\pmb{x}}_0^{(s)} \\coloneqq \\frac{1}{K}\\sum_{k\\in [K]}\\pmb{x}_{k,0}^{(s)}$ which is the average of $\\pmb{x}_{k,t}^{(s)}$ among $K$ workers at step 0 and $H$ . Then for all $k\\in [K]$ , $\\pmb{x}_{k,0}^{(s)} = \\bar{\\pmb{x}}_0^{(s)} = \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(s)}$ . Finally, Since $\\nabla \\ell(\\pmb{\\theta};\\pmb{\\zeta})$ is bounded, the gradient noise $z_{k,t}^{(s)}$ is also bounded and we denote by $\\sigma_{\\max}$ the upper bound such that $\\| z_{k,t}^{(s)}\\|_2 \\leq \\sigma_{\\max}, \\forall s,k,t$ .", + "bbox": [ + 169, + 244, + 823, + 363 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "We first introduce the notion of $\\mu$ -PL. We will later show that there exists a neighborhood of the minimizer manifold $\\Gamma$ where $\\mathcal{L}$ satisfies $\\mu$ -PL.", + "bbox": [ + 169, + 368, + 823, + 398 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Definition K.1 (Polyak-Lojasiewicz Condition). For $\\mu > 0$ , we say a function $\\mathcal{L}(\\cdot)$ satisfies $\\mu$ -Polyak-Lojasiewicz condition (abbreviated as $\\mu$ -PL) on set $U$ if", + "bbox": [ + 169, + 401, + 823, + 431 + ], + "page_idx": 33 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\| _ {2} ^ {2} \\geq \\mu (\\mathcal {L} (\\boldsymbol {\\theta}) - \\inf _ {\\boldsymbol {\\theta} ^ {\\prime} \\in U} \\mathcal {L} (\\boldsymbol {\\theta} ^ {\\prime})).\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 436, + 629, + 465 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "We then introduce the definitions of the $\\epsilon$ -ball at a point and the $\\epsilon$ -neighborhood of a set. For $\\pmb{\\theta} \\in \\mathbb{R}^d$ and $\\epsilon > 0$ , $B^{\\epsilon}(\\pmb{\\theta}) \\coloneqq \\{\\pmb{\\theta}' : \\| \\pmb{\\theta}' - \\pmb{\\theta}\\|_2 < \\epsilon\\}$ is the open $\\epsilon$ -ball centered at $\\pmb{\\theta}$ . For a set $\\mathcal{Z} \\subseteq \\mathbb{R}^d$ , $\\mathcal{Z}^{\\epsilon} \\coloneqq \\bigcup_{\\pmb{\\theta} \\in \\mathcal{Z}} B^{\\epsilon}(\\pmb{\\theta})$ is the $\\epsilon$ -neighborhood of $\\mathcal{Z}$ .", + "bbox": [ + 169, + 479, + 823, + 525 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "K.2 COMPUTING THE DERIVATIVES OF THE LIMITING MAPPING", + "text_level": 1, + "bbox": [ + 171, + 540, + 633, + 554 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "In subsection, we present lemmas that relate the derivatives of the limiting mapping $\\Phi(\\cdot)$ to the derivatives of the loss function $\\mathcal{L}(\\cdot)$ . We first introduce the operator $\\nu_{H}$ .", + "bbox": [ + 169, + 566, + 823, + 595 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Definition K.2. For a semi-definite symmetric matrix $\\mathbf{H} \\in \\mathbb{R}^{d \\times d}$ , let $\\lambda_j$ , $\\mathbf{v}_j$ be the $j$ -th eigenvalue and eigenvector and $\\mathbf{v}_j$ 's form an orthonormal basis of $\\mathbb{R}^d$ . Then, define the operator $\\mathcal{V}_{\\mathbf{H}}: \\mathbb{R}^{d \\times d} \\to \\mathbb{R}^{d \\times d}$ as", + "bbox": [ + 169, + 598, + 823, + 641 + ], + "page_idx": 33 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {V} _ {\\boldsymbol {H}} (\\boldsymbol {M}) := \\sum_ {i, j: \\lambda_ {i} \\neq 0 \\vee \\lambda_ {j} \\neq 0} \\frac {1}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {M}, \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\forall \\boldsymbol {M} \\in \\mathbb {R} ^ {d \\times d}.\n$$\n", + "text_format": "latex", + "bbox": [ + 271, + 641, + 723, + 678 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Intuitively, this operator projects $M$ to the base matrix $\\mathbf{v}_i\\mathbf{v}_j^\\top$ and sums up the projections with weights $\\frac{1}{\\lambda_i + \\lambda_j}$ .", + "bbox": [ + 169, + 683, + 823, + 720 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Additionally, for $\\theta \\in \\Gamma$ , denote by $T_{\\theta}$ and $T_{\\theta}^{\\perp}$ the tangent and normal space of $\\Gamma$ at $\\theta$ respectively. Lemmas K.1 to K.4 are from Li et al. (2021b). We include them to make the paper self-contained.", + "bbox": [ + 169, + 729, + 823, + 760 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Lemma K.1 (Lemma C.1 of Li et al. (2021b)). For any $\\pmb{\\theta} \\in \\Gamma$ and any $\\pmb{v} \\in T_{\\pmb{\\theta}}(\\Gamma)$ , it holds that $\\nabla^2 \\mathcal{L}(\\pmb{\\theta}) \\pmb{v} = \\mathbf{0}$ .", + "bbox": [ + 169, + 763, + 823, + 792 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Lemma K.2 (Lemma 4.3 of Li et al. (2021b)). For any $\\pmb{\\theta} \\in \\Gamma$ , $\\partial \\Phi(\\pmb{\\theta}) \\in \\mathbb{R}^{d \\times d}$ is the projection matrix onto the tangent space $T_{\\pmb{\\theta}}(\\Gamma)$ .", + "bbox": [ + 169, + 795, + 823, + 824 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Lemma K.3 (Lemma C.4 of Li et al. (2021b)). For any $\\pmb{\\theta} \\in \\Gamma$ , $\\pmb{u} \\in \\mathbb{R}^d$ and $\\pmb{v} \\in T_{\\pmb{\\theta}}(\\Gamma)$ , it holds that", + "bbox": [ + 169, + 827, + 823, + 854 + ], + "page_idx": 33 + }, + { + "type": "equation", + "text": "\n$$\n\\partial^ {2} \\Phi (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\boldsymbol {u} ] = - \\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) ^ {+} \\boldsymbol {u} ] - \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) ^ {+} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\partial \\Phi (\\boldsymbol {\\theta}) \\boldsymbol {u} ].\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 861, + 772, + 880 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Lemma K.4 (Lemma C.6 of Li et al. (2021b)). For any $\\pmb{\\theta} \\in \\Gamma$ and $\\pmb{\\Sigma} \\in \\operatorname{span}\\{\\pmb{u}\\pmb{u}^{\\top} \\mid \\pmb{u} \\in T_{\\pmb{\\theta}}^{\\perp}(\\Gamma)\\}$ ,", + "bbox": [ + 169, + 883, + 823, + 902 + ], + "page_idx": 33 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\langle \\partial^ {2} \\Phi (\\boldsymbol {\\theta}), \\boldsymbol {\\Sigma} \\right\\rangle = - \\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) \\left[ \\mathcal {V} _ {\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta})} (\\boldsymbol {\\Sigma}) \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 907, + 656, + 926 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Lemma K.5. For all $\\theta \\in \\Gamma$ , $\\pmb{u}, \\pmb{v} \\in T_{\\theta}(\\Gamma)$ , it holds that", + "bbox": [ + 169, + 103, + 545, + 119 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} [ \\boldsymbol {v} \\boldsymbol {u} ^ {\\top} ] = \\mathbf {0}. \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 125, + 823, + 142 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Proof. This proof is inspired by Lemma C.4 of Li et al. (2021b). For any $\\pmb{\\theta} \\in \\Gamma$ , consider a parameterized smooth curve $\\pmb{v}(t), t \\geq 0$ on $\\Gamma$ such that $\\pmb{v}(0) = \\pmb{\\theta}$ and $\\pmb{v}'(0) = \\pmb{v}$ . Let $P_{\\parallel}(t) = \\partial \\Phi(\\pmb{v}(t))$ , $P_{\\perp}(t) = I - \\partial \\Phi(\\pmb{v}(t))$ and $\\pmb{H}(t) = \\nabla^2 \\mathcal{L}(\\pmb{v}(t))$ . By Lemma C.1 and 4.3 in Li et al. (2021b),", + "bbox": [ + 169, + 156, + 826, + 214 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {H} (t) = \\boldsymbol {P} _ {\\perp} (t) \\boldsymbol {H} (t).\n$$\n", + "text_format": "latex", + "bbox": [ + 424, + 220, + 571, + 237 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Take the derivative with respect to $t$ on both sides,", + "bbox": [ + 169, + 242, + 504, + 257 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\boldsymbol {H} ^ {\\prime} (t) = \\boldsymbol {P} _ {\\perp} (t) \\boldsymbol {H} ^ {\\prime} (t) + \\boldsymbol {P} _ {\\perp} ^ {\\prime} (t) \\boldsymbol {H} (t) \\\\ \\Rightarrow \\boldsymbol {P} _ {\\parallel} (t) \\boldsymbol {H} ^ {\\prime} (t) = \\boldsymbol {P} _ {\\perp} ^ {\\prime} (t) \\boldsymbol {H} (t) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (t) \\boldsymbol {H} (t). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 262, + 653, + 301 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "At $t = 0$ , we have", + "bbox": [ + 171, + 306, + 295, + 319 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {P} _ {\\parallel} (0) \\boldsymbol {H} ^ {\\prime} (0) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) \\boldsymbol {H} (0). \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 325, + 823, + 344 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "WLOG let $H(0) = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_d), \\in \\mathbb{R}^{d \\times d}$ , where $\\lambda_i = 0$ for all $m < i \\leq d$ . Therefore $P_{\\perp}(0) = \\begin{bmatrix} I_m & 0 \\\\ 0 & 0 \\end{bmatrix}$ , $P_{\\parallel}(0) = \\begin{bmatrix} 0 & 0 \\\\ 0 & I_{d - m} \\end{bmatrix}$ . Decompose $P_{\\parallel}'(0)$ , $H(0)$ and $H'(0)$ as follows.", + "bbox": [ + 169, + 349, + 823, + 398 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {P} _ {\\parallel , 1 1} ^ {\\prime} (0) & \\boldsymbol {P} _ {\\parallel , 1 2} ^ {\\prime} (0) \\\\ \\boldsymbol {P} _ {\\parallel , 2 1} ^ {\\prime} (0) & \\boldsymbol {P} _ {\\parallel , 2 2} ^ {\\prime} (0) \\end{array} \\right], \\boldsymbol {H} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {H} _ {1 1} (0) & \\boldsymbol {0} \\\\ \\boldsymbol {0} & \\boldsymbol {0} \\end{array} \\right], \\boldsymbol {H} ^ {\\prime} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {H} _ {1 1} ^ {\\prime} (0) & \\boldsymbol {H} _ {1 2} ^ {\\prime} (0) \\\\ \\boldsymbol {H} _ {2 1} ^ {\\prime} (0) & \\boldsymbol {H} _ {2 2} ^ {\\prime} (0) \\end{array} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 402, + 795, + 439 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Substituting the decomposition into (33), we have", + "bbox": [ + 169, + 443, + 503, + 458 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\begin{array}{c c} \\mathbf {0} & \\mathbf {0} \\\\ \\mathbf {H} _ {2 1} ^ {\\prime} (0) & \\mathbf {H} _ {2 2} ^ {\\prime} (0) \\end{array} \\right] = - \\left[ \\begin{array}{c c} \\mathbf {P} _ {\\parallel , 1 1} ^ {\\prime} (0) \\mathbf {H} _ {1 1} (0) & \\mathbf {0} \\\\ \\mathbf {P} _ {\\parallel , 2 1} ^ {\\prime} (0) \\mathbf {H} _ {1 1} (0) & \\mathbf {0} \\end{array} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 463, + 665, + 500 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Therefore, $H_{22}'(0) = 0$ and", + "bbox": [ + 169, + 503, + 359, + 520 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {P} _ {\\parallel} (0) \\boldsymbol {H} ^ {\\prime} (0) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) \\boldsymbol {H} (0) = - \\left[ \\begin{array}{c c} \\boldsymbol {0} & \\boldsymbol {0} \\\\ \\boldsymbol {H} _ {2 1} ^ {\\prime} (0) & \\boldsymbol {0} \\end{array} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 523, + 666, + 559 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Any $\\pmb{u} \\in T_{\\pmb{\\theta}}(\\Gamma)$ can be decomposed as $\\pmb{u} = [\\pmb{0}, \\pmb{u}_2]^\\top$ where $\\pmb{u}_2 \\in \\mathbb{R}^{d - m}$ . With this decomposition, we have $\\pmb{P}_{\\parallel}(0)\\pmb{H}'(0)\\pmb{u} = \\pmb{0}$ . Also, note that $\\pmb{H}'(0) = \\nabla^3\\mathcal{L}(\\pmb{\\theta})[\\pmb{v}]$ . Hence,", + "bbox": [ + 169, + 565, + 823, + 599 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v} \\boldsymbol {u} ^ {T} ] = \\boldsymbol {0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 604, + 588, + 623 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/42ce0cb502203a7079070b686c3695160cc407ba9340f5f62f1024cbd5760322.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 628, + 823, + 641 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "K.3 PRELIMINARY LEMMAS FOR GD AND GF", + "text_level": 1, + "bbox": [ + 171, + 659, + 508, + 672 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "In this subsection, we introduce a few useful preliminary lemmas about gradient descent and gradient flow. Before presenting the lemmas, we introduce some notations and assumptions that will be used in this subsection.", + "bbox": [ + 169, + 685, + 826, + 727 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Assume that the loss function $\\mathcal{L}(\\pmb{\\theta})$ is $\\rho$ -smooth and $\\mu$ -PL in an open, convex neighborhood $U$ of a local minimizer $\\pmb{\\theta}^*$ . Denote by $\\mathcal{L}^* := \\mathcal{L}(\\pmb{\\theta}^*)$ the minimum value for simplicity. Let $\\epsilon'$ be the radius of the open $\\epsilon'$ -ball centered at $\\pmb{\\theta}^*$ such that $B^{\\epsilon'}(\\pmb{\\theta}^*) \\subseteq U$ . We also define a potential function $\\tilde{\\Psi}(\\pmb{\\theta}) := \\sqrt{\\mathcal{L}(\\pmb{\\theta}) - \\mathcal{L}^*}$ .", + "bbox": [ + 169, + 734, + 825, + 797 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Consider gradient descent iterates $\\{\\hat{u}_t\\}_{t\\in \\mathbb{N}}$ following the update rule $\\hat{\\pmb{u}}_{t + 1} = \\hat{\\pmb{u}}_t - \\eta \\nabla \\mathcal{L}(\\hat{\\pmb{u}}_t)$ . We first introduce the descent lemma for gradient descent.", + "bbox": [ + 169, + 801, + 823, + 830 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Lemma K.6 (Descent lemma for GD). If $\\hat{\\boldsymbol{u}}_t\\in U$ and $\\eta \\leq \\frac{1}{\\rho}$ , then", + "bbox": [ + 169, + 832, + 612, + 852 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\eta}{2} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}),\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 857, + 617, + 883 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 888, + 202, + 901 + ], + "page_idx": 34 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t + 1}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) \\left(\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t}\\right) - \\mathcal {L} ^ {*}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 907, + 635, + 926 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Proof. By $\\rho$ -smoothness,", + "bbox": [ + 171, + 104, + 343, + 119 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}) \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) + \\langle \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}), \\hat {\\boldsymbol {u}} _ {t + 1} - \\hat {\\boldsymbol {u}} _ {t} \\rangle + \\frac {\\rho \\eta^ {2}}{2} \\| \\hat {\\boldsymbol {u}} _ {t + 1} - \\hat {\\boldsymbol {u}} _ {t} \\| _ {2} ^ {2} \\\\ = \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\eta (1 - \\frac {\\rho \\eta}{2}) \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\\\ \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\frac {\\eta}{2} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 125, + 715, + 212 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "By the definition of $\\mu$ -PL, we have", + "bbox": [ + 171, + 218, + 403, + 233 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t + 1}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) \\left(\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t}\\right) - \\mathcal {L} ^ {*}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 239, + 637, + 257 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/7dd68ab7f70f27aac1d64197a58e4432d5d5ec93b7e237eb54a00ccbc8150af3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 263, + 823, + 276 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Then we prove the Lipschitzness of $\\tilde{\\Psi} (\\pmb {\\theta})$", + "bbox": [ + 169, + 292, + 450, + 310 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Lemma K.7 (Lipschitzness of $\\tilde{\\Psi}(\\pmb{\\theta})$ ). $\\tilde{\\Psi}(\\pmb{\\theta})$ is $\\sqrt{2\\rho}$ -Lipschitz for $\\pmb{\\theta} \\in U$ . That is, for any $\\pmb{\\theta}_1, \\pmb{\\theta}_2 \\in U$ ,", + "bbox": [ + 169, + 314, + 823, + 344 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) \\right| \\leq \\sqrt {2 \\rho} \\| \\boldsymbol {\\theta} _ {1} - \\boldsymbol {\\theta} _ {2} \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 349, + 625, + 369 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Proof. Fix $\\pmb{\\theta}_{1}$ and $\\pmb{\\theta}_{2}$ . Denote by $\\pmb{\\theta}(t) \\coloneqq (1 - t)\\pmb{\\theta}_{1} + t\\pmb{\\theta}_{2}$ the convex combination of $\\pmb{\\theta}_{1}$ and $\\pmb{\\theta}_{2}$ where $t \\in [0,1]$ . Further define $f(t) \\coloneqq \\tilde{\\Psi}(\\pmb{\\theta}(t))$ . Below we consider two cases.", + "bbox": [ + 169, + 383, + 823, + 416 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Case 1. If $\\forall t\\in (0,1)$ $f(t) > 0$ , then $f(t)$ is differentiable on $(0,1)$", + "bbox": [ + 169, + 430, + 635, + 446 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| = | f (1) - f (0) | \\\\ = \\left| \\int_ {0} ^ {1} f ^ {\\prime} (t) \\mathrm {d} t \\right| \\\\ = \\left| \\int_ {0} ^ {1} \\left\\langle \\nabla \\tilde {\\Psi} (\\boldsymbol {\\theta} (t)), \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\right\\rangle \\mathrm {d} t \\right| \\\\ = \\left| \\int_ {0} ^ {1} \\frac {\\langle \\nabla \\mathcal {L} (\\boldsymbol {\\theta} (t)) , \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\rangle}{\\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} (t)) - \\mathcal {L} ^ {*}}} \\mathrm {d} t \\right| \\\\ \\leq \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2} \\int_ {0} ^ {1} \\frac {\\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} (t)) \\| _ {2}}{\\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} (t)) - \\mathcal {L} ^ {*}}} \\mathrm {d} t. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 453, + 684, + 627 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "By $\\rho$ -smoothness of $\\mathcal{L}$ , for all $\\pmb{\\theta} \\in U$ ,", + "bbox": [ + 171, + 633, + 421, + 648 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\right\\| _ {2} ^ {2} \\leq 2 \\rho (\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}).\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 654, + 601, + 672 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Since $\\sqrt{\\mathcal{L}(\\pmb{\\theta}(t)) - \\mathcal{L}^*} > 0$ for all $t \\in (0,1)$ , $\\frac{\\|\\nabla\\mathcal{L}(\\pmb{\\theta}(t))\\|_2}{\\sqrt{\\mathcal{L}(\\pmb{\\theta}(t)) - \\mathcal{L}^*}} \\leq \\sqrt{2\\rho}$ . Therefore,", + "bbox": [ + 169, + 678, + 686, + 705 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| \\leq \\sqrt {2 \\rho_ {2}} \\left\\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\right\\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 712, + 627, + 733 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Case 2. If $\\exists t' \\in (0,1)$ such that $f(t') = 0$ , then", + "bbox": [ + 169, + 744, + 501, + 762 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| = | f (1) - f (0) | \\\\ = \\left| (1 - t ^ {\\prime}) \\frac {f (1) - f (t ^ {\\prime})}{1 - t ^ {\\prime}} + t ^ {\\prime} \\left(\\frac {f (t ^ {\\prime}) - f (0)}{t ^ {\\prime}}\\right) \\right| \\\\ \\leq \\max \\left(\\frac {f (1)}{1 - t ^ {\\prime}}, \\frac {f (0)}{t ^ {\\prime}}\\right). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 281, + 767, + 714, + 858 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Since $\\pmb{\\theta}(t')$ minimizes $\\mathcal{L}$ in an open set, $\\nabla \\mathcal{L}(\\pmb{\\theta}(t')) = \\mathbf{0}$ . By $\\rho$ -smoothness of $\\mathcal{L}$ , for all $\\pmb{\\theta} \\in U$ ,", + "bbox": [ + 169, + 864, + 794, + 881 + ], + "page_idx": 35 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\boldsymbol {\\theta}) \\leq \\mathcal {L} ^ {*} + \\frac {\\rho}{2} \\| \\boldsymbol {\\theta} - \\boldsymbol {\\theta} (t ^ {\\prime}) \\| _ {2} ^ {2} \\quad \\Rightarrow \\quad \\tilde {\\Psi} (\\boldsymbol {\\theta}) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} - \\boldsymbol {\\theta} (t ^ {\\prime}) \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 281, + 888, + 712, + 921 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 948, + 509, + 960 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 104, + 243, + 116 + ], + "page_idx": 36 + }, + { + "type": "equation", + "text": "\n$$\nf (1) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} \\left(t ^ {\\prime}\\right) \\| _ {2} = \\left(1 - t ^ {\\prime}\\right) \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 126, + 674, + 157 + ], + "page_idx": 36 + }, + { + "type": "equation", + "text": "\n$$\nf (0) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {1} - \\boldsymbol {\\theta} \\left(t ^ {\\prime}\\right) \\| _ {2} = t ^ {\\prime} \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 161, + 638, + 193 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Then we have", + "bbox": [ + 171, + 199, + 267, + 213 + ], + "page_idx": 36 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 219, + 622, + 252 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Combining case 1 and case 2, we conclude the proof.", + "bbox": [ + 169, + 258, + 524, + 273 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/7c229c0422d5cc0825d4812c31a06ccbaea051b1488dc6642b0e75cb1a27b46c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 258, + 823, + 270 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Below we introduce a lemma that relates the movement of one step gradient descent to the change of the potential function.", + "bbox": [ + 169, + 287, + 823, + 316 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Lemma K.8 (Lemma G.1 in Lyu et al. (2022)). If $\\hat{\\pmb{u}}_t\\in U$ and $\\eta \\leq 1 / \\rho_{2}$ then", + "bbox": [ + 169, + 320, + 684, + 335 + ], + "page_idx": 36 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1}) \\geq \\frac {\\sqrt {2 \\mu}}{4} \\eta \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 343, + 633, + 373 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Proof.", + "bbox": [ + 171, + 387, + 217, + 401 + ], + "page_idx": 36 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1}) = \\frac {\\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1})}{\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) + \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1})} \\\\ \\geq \\frac {\\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t})}{2 \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})} \\\\ \\geq \\frac {\\eta (1 - \\rho_ {2} \\eta / 2) \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2}}{2 \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 407, + 658, + 515 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "where the two inequalities use Lemma K.6. By $\\mu$ -PL, $\\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_t) \\leq \\frac{1}{\\sqrt{2\\mu}} \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2$ . Therefore, we have $\\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_t) - \\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_{t+1}) \\geq \\frac{\\sqrt{2\\mu}}{2}(1 - \\eta\\rho/2)\\eta \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2 \\geq \\frac{\\sqrt{2\\mu}}{4}\\eta \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2$ .", + "bbox": [ + 169, + 523, + 823, + 563 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Based on Lemma K.8, we have the following lemma that bounds the movement of GD over multiple steps.", + "bbox": [ + 169, + 575, + 823, + 604 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Lemma K.9 (Bounding the movement of GD). If $\\hat{\\pmb{u}}_0$ is initialized such that $\\| \\hat{\\pmb{u}}_0 - \\pmb{\\theta}^*\\| _2\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho}}\\epsilon '$ , then for all $t\\geq 0$ , $\\hat{\\pmb{u}}_t\\in B^{\\epsilon '}(\\pmb {\\theta}^*)$ and", + "bbox": [ + 169, + 609, + 823, + 650 + ], + "page_idx": 36 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\right\\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}).\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 657, + 589, + 691 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Proof. We prove the proposition by induction. When $t = 0$ , it trivially holds. Assume that the proposition holds for $\\hat{\\pmb{u}}_{\\tau}$ , $0 \\leq \\tau < t$ . For step $t$ , since $\\hat{\\pmb{u}}_{\\tau} \\in B^{\\epsilon'}(\\pmb{\\theta}^{*})$ , we apply Lemma K.8 and obtain", + "bbox": [ + 169, + 704, + 823, + 747 + ], + "page_idx": 36 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\| _ {2} \\leq \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {\\tau}) \\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\left(\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})\\right) \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}).\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 753, + 750, + 794 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Further by $\\rho$ -smoothness of $\\mathcal{L}(\\cdot)$ ,", + "bbox": [ + 169, + 801, + 393, + 816 + ], + "page_idx": 36 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\right\\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}) \\leq 2 \\sqrt {\\frac {\\rho}{\\mu}} \\left\\| \\hat {\\boldsymbol {u}} _ {0} - \\boldsymbol {\\theta} ^ {*} \\right\\| _ {2} \\leq \\frac {1}{2} \\epsilon^ {\\prime}.\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 823, + 679, + 858 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Therefore, $\\| \\hat{\\pmb{u}}_t - \\pmb{\\theta}^* \\|_2 \\leq \\| \\hat{\\pmb{u}}_t - \\hat{\\pmb{u}}_0 \\|_2 + \\| \\hat{\\pmb{u}}_0 - \\pmb{\\theta}^* \\|_2 < \\epsilon'$ , which concludes the proof.", + "bbox": [ + 169, + 864, + 741, + 881 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/f7898de853e2fec26ec19d41b0d08e55c88c36513bdf75804aa359d44976016a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 866, + 823, + 878 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Finally, we introduce a lemma adapted from Thm. D.4 of which bounds the movement of GF. Lyu et al. (2022).", + "bbox": [ + 169, + 895, + 823, + 922 + ], + "page_idx": 36 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Lemma K.10. Assume that $\\| \\pmb{\\theta}_0 - \\pmb{\\theta}^*\\|_2 < \\sqrt{\\frac{\\mu}{\\rho}}\\epsilon'$ . The gradient flow $\\pmb{\\theta}(t) = -\\frac{\\mathrm{d}\\mathcal{L}(\\pmb{\\theta}(t))}{\\mathrm{d}t}$ starting at $\\pmb{\\theta}_0$ converges to a point in $U$ and", + "bbox": [ + 169, + 101, + 823, + 138 + ], + "page_idx": 37 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\boldsymbol {\\theta} _ {0} - \\lim _ {t \\rightarrow + \\infty} \\boldsymbol {\\theta} (t) \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} _ {0}) - \\mathcal {L} ^ {*}} \\leq \\sqrt {\\frac {\\rho}{\\mu}} \\| \\boldsymbol {\\theta} _ {0} - \\boldsymbol {\\theta} ^ {*} \\| _ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 143, + 707, + 180 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Proof. Let $T \\coloneqq \\inf \\{t : \\theta \\notin U\\}$ . Then for all $t < T$ ,", + "bbox": [ + 169, + 191, + 522, + 208 + ], + "page_idx": 37 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {\\mathrm {d}}{\\mathrm {d} t} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} = \\frac {1}{2} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {- 1 / 2} \\cdot \\left\\langle \\nabla \\mathcal {L} (\\boldsymbol {\\theta}), \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\right\\rangle \\\\ = - \\frac {1}{2} (\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}) ^ {- 1 / 2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\| _ {2} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\| _ {2}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 212, + 704, + 277 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "By $\\mu$ -PL, $\\|\\nabla \\mathcal{L}(\\pmb{\\theta})\\|_2 \\geq \\sqrt{2\\mu(\\mathcal{L}(\\pmb{\\theta}) - \\mathcal{L}^*)}$ . Hence,", + "bbox": [ + 169, + 282, + 509, + 300 + ], + "page_idx": 37 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\mathrm {d}}{\\mathrm {d} t} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} \\leq - \\frac {\\sqrt {2 \\mu}}{2} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 304, + 622, + 335 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Integrating both sides, we have", + "bbox": [ + 169, + 339, + 380, + 353 + ], + "page_idx": 37 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {0} ^ {T} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta} (\\tau)}{\\mathrm {d} \\tau} \\| \\mathrm {d} \\tau \\leq \\frac {2}{\\sqrt {2 \\mu}} \\left(\\mathcal {L} \\left(\\boldsymbol {\\theta} _ {0}\\right) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} \\leq \\sqrt {\\frac {\\rho}{\\mu}} \\| \\boldsymbol {\\theta} _ {0} - \\boldsymbol {\\theta} ^ {*} \\| _ {2} < \\epsilon^ {\\prime},\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 357, + 723, + 393 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "where the second inequality uses $\\rho$ -smoothness of $\\mathcal{L}$ . Therefore, $T = +\\infty$ and $\\pmb{\\theta}(t)$ converges to some point in $U$ .", + "bbox": [ + 169, + 397, + 826, + 428 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "K.4 CONSTRUCTION OF WORKING ZONES", + "text_level": 1, + "bbox": [ + 171, + 443, + 477, + 455 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "We construct four nested working zones $(\\Gamma^{\\epsilon_0},\\Gamma^{\\epsilon_1},\\Gamma^{\\epsilon_2},\\Gamma^{\\epsilon_3})$ in the neighborhood of $\\Gamma$ . Later we will show that the local iterates $\\pmb{\\theta}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2}$ and the global iterates $\\bar{\\pmb{\\theta}}^{(s)}\\in \\Gamma^{\\epsilon_0}$ with high probability after $\\mathcal{O}(\\log \\frac{1}{\\eta})$ rounds. The following lemma illustrates the properties the working zones should satisfy.", + "bbox": [ + 169, + 468, + 823, + 521 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Lemma K.11 (Working zone lemma). There exists constants $\\epsilon_0 < \\epsilon_1 < \\epsilon_2 < \\epsilon_3$ such that $(\\Gamma^{\\epsilon_0}, \\Gamma^{\\epsilon_1}, \\Gamma^{\\epsilon_2}, \\Gamma^{\\epsilon_3})$ satisfy the following properties:", + "bbox": [ + 169, + 523, + 825, + 554 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. $\\mathcal{L}$ satisfies $\\mu$ -PL in $\\Gamma^{\\epsilon_3}$ for some $\\mu > 0$ .", + "2. Any gradient flow starting in $\\Gamma^{\\epsilon_2}$ converges to some point in $\\Gamma$ . Then, by Falconer (1983), $\\Phi(\\cdot)$ is $\\mathcal{C}^\\infty$ in $\\Gamma^{\\epsilon_2}$ .", + "3. Any $\\pmb{\\theta} \\in \\Gamma^{\\epsilon_1}$ has an $\\epsilon_1$ -neighborhood $B^{\\epsilon_1}(\\pmb{\\theta})$ such that $B^{\\epsilon_1}(\\pmb{\\theta}) \\subseteq \\Gamma^{\\epsilon_2}$ .", + "4. Any gradient descent starting in $\\Gamma^{\\epsilon_0}$ with sufficiently small learning rate will stay in $\\Gamma^{\\epsilon_1}$ ." + ], + "bbox": [ + 178, + 561, + 823, + 662 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Proof. Let $\\bar{\\theta}^{(0)}$ be initialized such that $\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma$ . Let $\\mathcal{Z}$ be the set of all points on the gradient flow trajectory starting from $\\bar{\\theta}^{(0)}$ and $\\mathcal{Z}^{\\epsilon}$ be the $\\epsilon$ -neighborhood of $\\mathcal{Z}$ , where $\\epsilon$ is a positive constant. Since the gradient flow converges to $\\phi^{(0)}$ , $\\mathcal{Z}$ and $\\mathcal{Z}^{\\epsilon}$ are bounded.", + "bbox": [ + 169, + 676, + 823, + 724 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "We construct four nested working zones. By Lemma H.3 in Lyu et al. (2022), there exists an $\\epsilon_3$ -neighborhood of $\\Gamma$ , $\\Gamma^{\\epsilon_3}$ , such that $\\mathcal{L}$ satisfies $\\mu$ -PL for some $\\mu > 0$ . Let $\\mathcal{M}$ be the convex hull of $\\Gamma^{\\epsilon_3} \\cup \\mathcal{Z}^\\epsilon$ and $\\mathcal{M}^{\\epsilon_4}$ be the $\\epsilon_4$ -neighborhood of $\\mathcal{M}$ where $\\epsilon_4$ is a positive constant. Then $\\mathcal{M}^{\\epsilon_4}$ is bounded.", + "bbox": [ + 169, + 729, + 826, + 786 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Define $\\rho_{2} = \\sup_{\\pmb{\\theta}\\in \\mathcal{M}^{\\epsilon_{4}}}\\| \\nabla^{2}\\mathcal{L}(\\pmb {\\theta})\\|_{2}$ and $\\rho_{3} = \\sup_{\\mathcal{M}^{\\epsilon_{4}}}\\| \\nabla^{3}\\mathcal{L}(\\pmb {\\theta})\\|_{2}$ . By Lemma K.10, we can construct an $\\epsilon_{2}$ -neighborhood of $\\Gamma$ where $\\epsilon_{2} < \\sqrt{\\frac{\\mu}{\\rho_{2}}}\\epsilon_{3}$ such that all GF starting in $\\Gamma^{\\epsilon_2}$ converges to $\\Gamma$ . By Falconer (1983), $\\Phi (\\cdot)$ is $\\mathcal{C}^2$ in $\\Gamma^{\\epsilon_3}$ . Define $\\nu_{1} = \\sup_{\\pmb {\\theta}\\in \\Gamma^{\\epsilon_{3}}}\\| \\partial \\Phi (\\pmb {\\theta})\\|_{2}$ and $\\nu_{2} = \\sup_{\\pmb {\\theta}\\in \\Gamma^{\\epsilon_{3}}}\\| \\partial^{2}\\Phi (\\pmb {\\theta})\\|_{2}$ . We also construct an $\\epsilon_{1}$ neighborhood of $\\Gamma$ , $\\Gamma^{\\epsilon_1}$ , where $\\epsilon_{1}\\leq \\frac{1}{2}\\epsilon_{2} < \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_{2}}}\\epsilon_{3}$ such that all $\\pmb {\\theta}\\in \\Gamma^{\\epsilon_1}$ has an $\\epsilon_{1}$ neighborhood where $\\Phi$ is well defined. Finally, by Lemma K.9, there exists an $\\epsilon_0$ -neighborhood of $\\Gamma$ where $\\epsilon_0\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_1$ such that all gradient descent iterates starting in $\\Gamma^{\\epsilon_0}$ with $\\eta \\leq \\frac{1}{\\rho_2}$ will stay in $\\Gamma^{\\epsilon_1}$ .", + "bbox": [ + 169, + 791, + 826, + 928 + ], + "page_idx": 37 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Note that the notions of $\\mathcal{Z}^{\\epsilon}$ , $\\mathcal{M}^{\\epsilon_4}$ , $\\rho_2$ , $\\rho_3$ , $\\nu_{1}$ , and $\\nu_{2}$ defined in the proof will be useful in the remaining part of this section. When analyzing the limiting dynamics of Local SGD, we will show that all $\\pmb{\\theta}_{k,t}^{(s)}$ stays in $\\Gamma^{\\epsilon_2}$ , $\\tilde{\\pmb{u}}_t^{(s)} \\in \\Gamma^{\\epsilon_1}$ , $\\tilde{\\pmb{\\theta}}^{(s)} \\in \\Gamma^{\\epsilon_0}$ with high probability after $\\mathcal{O}(\\log \\frac{1}{\\eta})$ rounds.", + "bbox": [ + 169, + 103, + 823, + 154 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "K.5 PHASE 1:ITERATE APPROACHING THE MANIFOLD", + "text_level": 1, + "bbox": [ + 171, + 170, + 571, + 186 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "The approaching phase can be further divided into two subphases. In the first subphase, $\\bar{\\theta}^{(0)}$ is initialized such that $\\phi^{(0)}\\in \\Gamma$ . We will show that after a constant number of rounds $s_0$ , $\\bar{\\theta}^{(s_0)}$ goes to the inner part of $\\Gamma^{\\epsilon_0}$ such that $\\| \\bar{\\theta}^{(s_0)} - \\phi^{(0)}\\| _2\\leq c\\epsilon_0$ with high probability, where $0 < c < 1$ and the constants will be specified later (see Appendix K.5.2). In the second subphase, we show that the iterate can reach within $\\tilde{\\mathcal{O}} (\\sqrt{\\eta})$ distance from $\\Gamma$ after $\\mathcal{O}(\\log \\frac{1}{\\eta})$ rounds with high probability (see Appendix K.5.3).", + "bbox": [ + 169, + 196, + 826, + 289 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "K.5.1 ADDITIONAL NOTATIONS", + "text_level": 1, + "bbox": [ + 171, + 305, + 406, + 319 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Consider an auxiliary sequence $\\{\\tilde{u}_t^{(s)}\\}$ where $\\tilde{\\pmb{u}}_0^{(s)} = \\bar{\\pmb{\\theta}}^{(s)}$ and $\\tilde{\\pmb{u}}_{t + 1}^{(s)} = \\tilde{\\pmb{u}}_t^{(s)} - \\eta \\nabla \\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})$ , $0\\leq t\\leq H - 1$ . Define $\\tilde{\\Delta}_{k,t}^{(s)}\\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\tilde{\\pmb{u}}_t^{(s)}$ to be the difference between the local iterate and the gradient descent iterate. Notice that $\\tilde{\\Delta}_{k,0}^{(s)} = 0$ , for all $k$ and $s$ .", + "bbox": [ + 169, + 327, + 823, + 387 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Consider a gradient flow $\\{\\pmb{u}(t)\\}_{t\\geq 0}$ with the initial condition $\\pmb{u}(0) = \\bar{\\pmb{\\theta}}^{(0)}$ and converges to $\\phi^{(0)}\\in \\Gamma$ . For simplicity, let $\\pmb{u}_t^{(s)}\\coloneqq \\pmb {u}(s\\alpha +t\\eta)$ be the gradient flow after $s$ rounds plus $t$ steps. Let $s_0$ be the smallest number such that $\\| \\pmb{u}_0^{(s_0)} - \\pmb{\\phi}^{(0)}\\| _2\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_0$ . Note that $s_0$ is a constant independent of $\\eta$ .", + "bbox": [ + 169, + 393, + 823, + 465 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "In this subsection, the minimum value of the loss in Appendix K.3 corresponds to the loss value on $\\Gamma$ , i.e., $\\mathcal{L}^{*} = \\mathcal{L}(\\phi), \\forall \\phi \\in \\Gamma$ .", + "bbox": [ + 169, + 470, + 823, + 501 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "We also define the following sequence $\\{\\tilde{\\mathbf{Z}}_{k,t}^{(s)}\\}_{t = 0}^{H}$ that will be used in the proof. Define", + "bbox": [ + 171, + 507, + 746, + 527 + ], + "page_idx": 38 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, 0} ^ {(s)} = \\boldsymbol {0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 537, + 705, + 580 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "K.5.2 PROOF FOR SUBPHASE 1", + "text_level": 1, + "bbox": [ + 171, + 595, + 403, + 609 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "First, we have the following lemma about the concentration of $\\tilde{\\mathbf{Z}}_{k,t}^{(s)}$ .", + "bbox": [ + 169, + 619, + 619, + 640 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Lemma K.12 (Concentration property of $\\{\\tilde{\\mathbf{Z}}_{k,t}^{(s)}\\}_{t = 0}^{H}$ ). Given $\\bar{\\theta}^{(s)}$ such that $\\tilde{\\boldsymbol{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon$ for all $0\\leq t\\leq H$ , then with probability at least $1 - \\delta$ ,", + "bbox": [ + 169, + 645, + 826, + 679 + ], + "page_idx": 38 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n$$\n", + "text_format": "latex", + "bbox": [ + 284, + 688, + 710, + 722 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "where $\\tilde{C}_1\\coloneqq \\exp (\\alpha \\rho_2)$", + "bbox": [ + 171, + 731, + 333, + 750 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Proof. For each $\\tilde{\\mathbf{Z}}_{k,t}^{(s)}$ , construct a sequence $\\{\\tilde{\\mathbf{Z}}_{k,t,t'}^{(s)}\\}_{t'=0}^t$ :", + "bbox": [ + 169, + 770, + 553, + 792 + ], + "page_idx": 38 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {Z}} _ {k, t, t ^ {\\prime}} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t ^ {\\prime} - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, t, 0} ^ {(s)} = \\boldsymbol {0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 801, + 715, + 845 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Since $\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon$ , we have $\\| \\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})\\| _2\\leq \\rho_2$ for all $0\\leq t\\leq H$ . Then, for all $\\tau$ and $t$ ,", + "bbox": [ + 169, + 856, + 790, + 875 + ], + "page_idx": 38 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {H} \\leq \\exp (\\alpha \\rho_ {2}) = \\tilde {C} _ {1}.\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 883, + 709, + 928 + ], + "page_idx": 38 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Notice that for all $0 \\leq t \\leq H$ , $\\{\\tilde{Z}_{k,t,t'}^{(s)}\\}_{t'=0}^t$ is a martingale with $\\| \\tilde{Z}_{k,t,t'}^{(s)} - \\tilde{Z}_{k,t,t'-1}^{(s)} \\|_2 \\leq \\tilde{C}_1 \\sigma_{\\max}$ . By Azuma-Hoeffding's inequality,", + "bbox": [ + 169, + 101, + 823, + 136 + ], + "page_idx": 39 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} (\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 t (\\tilde {C} _ {1} \\sigma_ {\\max}) ^ {2}}\\right) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 H (\\tilde {C} _ {1} \\sigma_ {\\max}) ^ {2}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 141, + 751, + 200 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Taking a union bound on all $k \\in [K]$ and $0 \\leq t \\leq H$ , we can conclude that with probability at least $1 - \\delta$ ,", + "bbox": [ + 169, + 204, + 823, + 234 + ], + "page_idx": 39 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ].\n$$\n", + "text_format": "latex", + "bbox": [ + 284, + 239, + 712, + 273 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/79f43c4c7290c0e6a1cbcb998982f7f4fc2061b5a5c08015aef42898ad274bc7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 280, + 823, + 292 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "The following lemma states that the gradient descent iterates will closely track the gradient flow with the same initial point.", + "bbox": [ + 169, + 310, + 823, + 339 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Lemma K.13. Denote $G \\coloneqq \\sup_{t \\geq 0} \\| \\nabla \\mathcal{L}(\\boldsymbol{u}(t)) \\|_2$ as the upper bound of the gradient on the gradient flow trajectory. If $\\| \\tilde{\\boldsymbol{u}}_t^{(s)} - \\boldsymbol{u}_t^{(s)} \\|_2 = \\mathcal{O}(\\sqrt{\\eta})$ , then for all $0 \\leq t \\leq H$ , the closeness of $\\tilde{\\boldsymbol{u}}_t^{(s)}$ and $\\boldsymbol{u}_t^{(s)}$ is bounded by", + "bbox": [ + 169, + 342, + 825, + 395 + ], + "page_idx": 39 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G,\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 402, + 653, + 424 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "where $\\tilde{C}_1 = \\exp (\\alpha \\rho_2)$", + "bbox": [ + 171, + 431, + 328, + 449 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Proof. We prove by induction that", + "bbox": [ + 171, + 462, + 403, + 478 + ], + "page_idx": 39 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {t} \\left\\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\right\\| _ {2} + \\rho_ {2} \\eta^ {2} G \\sum_ {\\tau = 0} ^ {t - 1} (1 + \\rho_ {2} \\eta) ^ {\\tau}. \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 484, + 825, + 526 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "When $t = 0$ , (34) holds trivially. Assume that (34) holds for $0 \\leq \\tau \\leq t$ , then", + "bbox": [ + 171, + 532, + 676, + 547 + ], + "page_idx": 39 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} - \\boldsymbol {u} _ {t + 1} ^ {(s)} = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\left(\\boldsymbol {u} _ {t} - \\int_ {s \\alpha + t \\eta} ^ {s \\alpha + (t + 1) \\eta} \\nabla \\mathcal {L} (\\boldsymbol {u} (v)) d v\\right) \\\\ = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} - \\eta (\\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)})) \\\\ - \\int_ {s \\alpha + t \\eta} ^ {s \\alpha + (t + 1) \\eta} \\left(\\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} (v))\\right) d v. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 554, + 740, + 662 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "By smoothness of $\\mathcal{L}$", + "bbox": [ + 171, + 667, + 313, + 683 + ], + "page_idx": 39 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} (v)) \\| _ {2} \\leq \\rho_ {2} \\| \\boldsymbol {u} _ {t} ^ {(s)} - \\boldsymbol {u} (v) \\| _ {2} \\\\ \\leq \\rho_ {2} \\int_ {s \\alpha + t \\eta} ^ {v} \\| \\nabla \\mathcal {L} (\\boldsymbol {u} (w)) \\| _ {2} d w \\\\ \\leq \\rho_ {2} \\eta G. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 689, + 692, + 765 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Since $\\rho_2^2\\eta^2 G\\sum_{\\tau = 0}^{t - 1}(1 + \\rho_2\\eta)^\\tau \\leq \\eta G(1 + \\rho_2\\eta)^t\\leq \\exp (\\alpha \\rho_2)\\eta G$ , then $\\| \\tilde{\\pmb{u}}_t^{(s)} - \\pmb {u}_t^{(s)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta})$ which implies that $\\tilde{\\pmb{u}}_t^{(s)}\\in \\mathcal{M}^{\\epsilon_4}$ . Hence, $\\| \\nabla \\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)}) - \\mathcal{L}(\\pmb {u}_t^{(s)})\\| _2\\leq \\rho_2\\| \\tilde{\\pmb{u}}_t^{(s)} - \\pmb {u}_t^{(s)}\\| _2.$", + "bbox": [ + 169, + 773, + 823, + 811 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "By triangle inequality,", + "bbox": [ + 171, + 816, + 323, + 832 + ], + "page_idx": 39 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} - \\boldsymbol {u} _ {t + 1} ^ {(s)} \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) \\left\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\right\\| _ {2} + \\rho_ {2} \\eta^ {2} G \\\\ \\leq \\left(1 + \\rho_ {2} \\eta\\right) ^ {t + 1} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} + \\rho_ {2} \\eta^ {2} G \\sum_ {\\tau = 0} ^ {t} (1 + \\rho_ {2} \\eta) ^ {\\tau}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 837, + 743, + 902 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "which concludes the induction step. Applying $1 + \\rho_{2}\\eta \\leq \\exp (\\rho_{2}\\eta)$ , we have the lemma.", + "bbox": [ + 171, + 909, + 746, + 925 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/12b0357f5a598edb572a3e0dbbcd1ac182759da65bdd92f6d2610597a7c3f138.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 909, + 823, + 921 + ], + "page_idx": 39 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Utilizing the concentration probability of $\\{\\tilde{Z}_{k,t}^{(s)}\\}$ , we can obtain the following lemma which implies that the Local SGD iterates will closely track the gradient descent iterates with high probability.", + "bbox": [ + 169, + 102, + 823, + 136 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Lemma K.14. Given $\\bar{\\theta}^{(s)}$ such that $\\tilde{u}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon$ for all $0\\leq t\\leq H$ , then for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ with probability at least $1 - \\delta$ , there exists a constant $\\tilde{C}_3$ such that", + "bbox": [ + 169, + 138, + 823, + 172 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 176, + 691, + 209 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 169, + 212, + 202, + 226 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 229, + 617, + 263 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Proof. Since $\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon$ for all $0\\leq t\\leq H$ , we have $\\| \\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})\\| _2\\leq \\rho_2$ . According to the update rule for $\\theta_{k,t}^{(s)}$ and $\\tilde{\\pmb{u}}_t^{(s)}$ ,", + "bbox": [ + 169, + 277, + 823, + 315 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)}, \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 319, + 823, + 340 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right). \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 343, + 823, + 364 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Subtracting (36) from (35) gives", + "bbox": [ + 169, + 367, + 388, + 382 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\tilde {\\boldsymbol {\\Delta}} _ {k, t + 1} ^ {(s)} = \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta (\\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)})) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ = \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\eta \\tilde {\\boldsymbol {v}} _ {k, t} ^ {(s)}. \\tag {37} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 385, + 823, + 431 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Here, $\\tilde{\\pmb{v}}_{k,t}^{(s)} = (1 - \\beta_{k,t}^{(s)})\\pmb{\\theta}_{k,t}^{(s)} + \\beta_{k,t}^{(s)}\\tilde{\\pmb{u}}_{k,t}^{(s)}$ , where $\\beta_{k,t}^{(s)} \\in (0,1)$ depends on $\\pmb{\\theta}_{k,t}^{(s)}$ and $\\tilde{\\pmb{u}}_t^{(s)}$ . Therefore, $\\| \\tilde{\\pmb{v}}_{k,t}^{(s)}\\| _2 \\leq \\frac{\\rho_3}{2}\\| \\tilde{\\pmb{\\Delta}}_{k,t}^{(s)}\\| _2^2$ if $\\pmb{\\theta}_{k,t}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}$ . Applying (37) $t$ times, we have", + "bbox": [ + 169, + 435, + 823, + 476 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = \\left[ \\prod_ {\\tau = 0} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {\\tau} ^ {(s)})) \\right] \\tilde {\\boldsymbol {\\Delta}} _ {k, 0} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)})) \\boldsymbol {z} _ {k, \\tau} ^ {(s)} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\tilde {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 479, + 751, + 568 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "By Cauchy-Schwartz inequality, triangle inequality and the definition of $\\tilde{\\pmb{Z}}_{k,t}^{(s)}$ , if for all $0 \\leq \\tau \\leq t - 1$ and $k \\in [K]$ , $\\pmb{\\theta}_{k,\\tau}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}$ , then we have", + "bbox": [ + 169, + 575, + 823, + 614 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\eta \\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} + \\frac {1}{2} \\eta \\rho_ {3} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {C} _ {1} \\left\\| \\tilde {\\boldsymbol {\\Delta}} _ {k, \\tau} ^ {(s)} \\right\\| _ {2} ^ {2}. \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 619, + 823, + 660 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Applying Lemma K.12 and substituting in the value of $H$ , we have that with probability at least $1 - \\delta$ ,", + "bbox": [ + 169, + 662, + 823, + 691 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in K, 0 \\leq t \\leq H. \\tag {39}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 696, + 823, + 736 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Now we show by induction that for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , when (39) holds, there exists a constant $\\tilde{C}_2 > 2\\sigma_{\\max}\\sqrt{2\\alpha}\\tilde{C}_1$ such that $\\| \\tilde{\\Delta}_{k,t}^{(s)}\\|_2 \\leq \\tilde{C}_2\\sqrt{\\eta\\log\\frac{2\\alpha K}{\\eta\\delta}}$ .", + "bbox": [ + 169, + 739, + 823, + 780 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "When $t = 0$ , $\\tilde{\\Delta}_{k,0}^{(s)} = 0$ . Assume that $\\| \\tilde{\\Delta}_{k,\\tau}^{(s)} \\|_2 \\leq \\tilde{C}_2 \\sqrt{\\eta \\log \\frac{2\\alpha K}{\\eta \\delta}}$ , for all $k \\in [K]$ , $0 \\leq \\tau \\leq t - 1$ . Then for all $0 \\leq \\tau \\leq t - 1$ , $\\pmb{\\theta}_{k,\\tau}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}$ . Therefore, we can apply (38) and obtain", + "bbox": [ + 169, + 787, + 823, + 832 + ], + "page_idx": 40 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\tilde {\\Delta} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\eta \\| \\tilde {Z} _ {k, t} ^ {(s)} \\| _ {2} + \\frac {1}{2} \\eta \\rho_ {3} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {C} _ {1} \\| \\tilde {\\Delta} _ {k, \\tau} ^ {(s)} \\| _ {2} ^ {2} \\\\ \\leq \\tilde {C} _ {1} \\sigma_ {\\max} \\sqrt {2 \\alpha \\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}} + \\frac {1}{2} \\tilde {C} _ {1} \\tilde {C} _ {2} ^ {2} \\sigma_ {\\max} ^ {2} \\alpha \\rho_ {3} \\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 837, + 723, + 922 + ], + "page_idx": 40 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Given that $\\tilde{C}_2 \\geq 2\\sigma_{\\max}\\sqrt{2\\alpha}\\tilde{C}_1$ and $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , when $\\eta$ is sufficiently small, $\\|\\tilde{\\Delta}_{k,t}^{(s)}\\|_2 \\leq \\tilde{C}_2\\sqrt{\\eta\\log\\frac{2\\alpha K}{\\eta\\delta}}$ .", + "bbox": [ + 169, + 101, + 823, + 145 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "To sum up, for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ , $\\| \\tilde{\\Delta}_{k,t}^{(s)} \\|_2 \\leq \\tilde{C}_2 \\sqrt{\\eta \\log \\frac{2\\alpha K}{\\eta \\delta}}$ for all $k \\in [K]$ , $0 \\leq t \\leq H$ . By triangle inequality,", + "bbox": [ + 169, + 152, + 825, + 191 + ], + "page_idx": 41 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\mathbf {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\frac {1}{K} \\sum_ {k \\in [ K ]} \\| \\tilde {\\mathbf {A}} _ {k, H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {2} \\sqrt {\\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 196, + 699, + 242 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/a563d52b12a9a29f592334f6cdd7f71cbaef23261e24e30e5d89320540334e5b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 244, + 823, + 258 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "The combination of Lemma K.13 and Lemma K.14 leads to the following lemma, which states that the Local SGD iterate will enter $\\Gamma^{\\epsilon_1}$ after $s_0$ rounds with high probability.", + "bbox": [ + 169, + 273, + 823, + 303 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Lemma K.15. Given $\\bar{\\theta}^{(0)}$ such that $\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma$ , then for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , there exists a positive constant $\\tilde{C}_4$ such that with probability at least $1 - \\delta$ ,", + "bbox": [ + 169, + 306, + 825, + 339 + ], + "page_idx": 41 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\boldsymbol {\\phi} ^ {(0)} \\| _ {2} \\leq \\frac {1}{4} \\sqrt {\\frac {\\mu}{\\rho_ {2}}} \\epsilon_ {0} + \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 344, + 653, + 378 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Proof. First, we prove by induction that for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , when", + "bbox": [ + 171, + 392, + 617, + 407 + ], + "page_idx": 41 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K s _ {0}}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < s _ {0}, \\tag {40}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 412, + 825, + 446 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "the closeness of $\\bar{\\pmb{\\theta}}^{(s)}$ and $\\pmb{u}_0^{(s)}$ is bounded by", + "bbox": [ + 169, + 450, + 468, + 470 + ], + "page_idx": 41 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\right\\| _ {2} \\leq \\sum_ {l = 1} ^ {s} \\tilde {C} _ {1} ^ {l} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right), \\quad \\forall 0 \\leq s \\leq s _ {0}. \\tag {41}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 474, + 825, + 513 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "When $s = 0$ , $\\bar{\\theta}^{(0)} = \\pmb{u}_0^{(0)}$ . Assume that (41) holds for round $s$ . Then by Lemma K.13, for all $0 \\leq t \\leq H$ ,", + "bbox": [ + 169, + 518, + 823, + 550 + ], + "page_idx": 41 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G \\\\ = \\tilde {C} _ {1} \\| \\bar {\\boldsymbol {\\theta}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G \\\\ \\leq \\sum_ {l = 1} ^ {s} \\tilde {C} _ {1} ^ {l + 1} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right) + \\tilde {C} _ {1} \\eta G. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 553, + 704, + 637 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Therefore, for sufficiently small $\\eta$ , $\\tilde{\\pmb{u}}_t^{(s)} \\in \\mathcal{Z}^\\epsilon$ , $\\forall 0 \\leq t \\leq H$ . Combining the above inequality with Lemma K.14, we have", + "bbox": [ + 169, + 645, + 823, + 674 + ], + "page_idx": 41 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\boldsymbol {u} _ {0} ^ {(s + 1)} \\right\\| _ {2} = \\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\boldsymbol {u} _ {H} ^ {(s)} \\right\\| _ {2} \\\\ \\leq \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} + \\| \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} - \\boldsymbol {u} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\sum_ {l = 1} ^ {s + 1} \\tilde {C} _ {1} ^ {l + 1} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 678, + 694, + 763 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "which concludes the induction.", + "bbox": [ + 171, + 768, + 380, + 782 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Therefore, when (40) holds, there exists a positive constant $\\tilde{C}_4$ such that", + "bbox": [ + 171, + 789, + 648, + 804 + ], + "page_idx": 41 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\boldsymbol {u} _ {0} ^ {(s _ {0})} \\right\\| _ {2} \\leq \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 809, + 616, + 843 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "By definition of $\\pmb{u}_0^{(s_0)}$ ,", + "bbox": [ + 171, + 847, + 323, + 867 + ], + "page_idx": 41 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\frac {1}{4} \\sqrt {\\frac {\\mu}{\\rho_ {2}}} \\epsilon_ {0} + \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 871, + 653, + 906 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Finally, according to Lemma K.12, (40) holds with probability at least $1 - \\delta$ .", + "bbox": [ + 171, + 907, + 678, + 924 + ], + "page_idx": 41 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "K.5.3 PROOF FOR SUBPHASE 2", + "text_level": 1, + "bbox": [ + 171, + 103, + 405, + 118 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "In subphase 2, we show that the iterate can reach within $\\tilde{\\mathcal{O}} (\\sqrt{\\eta})$ distance from $\\Gamma$ after $\\mathcal{O}(\\log \\frac{1}{\\eta})$ rounds with high probability. The following lemma manifests how the potential function $\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(s)})$ evolves after one round.", + "bbox": [ + 169, + 126, + 823, + 174 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Lemma K.16. Given $\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}$ , for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$", + "bbox": [ + 169, + 176, + 740, + 194 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\leq \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 202, + 750, + 234 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 169, + 241, + 202, + 253 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 260, + 728, + 294 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "where $\\tilde{C}_5$ is a positive constant.", + "bbox": [ + 169, + 301, + 383, + 316 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Proof. Since $\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}$ , then for all $0\\leq t\\leq H$ , $\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1}$ by the definition of the working zone. By Lemma K.6, for $\\eta \\leq \\frac{1}{\\rho_2}$ ,", + "bbox": [ + 169, + 334, + 823, + 368 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {t} \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right) \\leq \\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}, \\quad \\forall 0 \\leq t \\leq H.\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 375, + 754, + 400 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Specially, for $t = H$", + "bbox": [ + 169, + 406, + 315, + 421 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {\\frac {\\alpha}{\\eta}} \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right) \\leq \\exp (- \\alpha \\mu) \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 428, + 746, + 452 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 459, + 245, + 473 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 479, + 609, + 500 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "According to the proof of Lemma K.14, for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , when", + "bbox": [ + 171, + 506, + 616, + 522 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, \\tag {42}\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 529, + 823, + 570 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "there exists a constant $\\tilde{C}_3$ such that", + "bbox": [ + 169, + 577, + 406, + 593 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 599, + 689, + 633 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 638, + 200, + 652 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 657, + 617, + 691 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Since $\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1},\\forall 0\\leq t\\leq H,\\bar{\\pmb{\\theta}}^{(s + 1)}\\in \\Gamma^{\\epsilon_2}$ and $\\bar{\\pmb{\\theta}}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2},\\forall 0\\leq t\\leq H,k\\in [K]$", + "bbox": [ + 169, + 698, + 727, + 720 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "By Lemma K.7, $\\tilde{\\Psi}(\\cdot)$ is $\\sqrt{2\\rho_2}$ -Lipschitz in $\\mathcal{M}^{\\epsilon_4}$ . Therefore, when (42) holds, there exists a constant $\\tilde{C}_5 := \\sqrt{2\\rho_2}\\tilde{C}_3$ such that", + "bbox": [ + 169, + 726, + 823, + 758 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) + \\sqrt {2 \\rho_ {2}} \\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\\\ \\leq \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 765, + 643, + 823 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 829, + 200, + 840 + ], + "page_idx": 42 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) \\leq \\tilde {\\Psi} (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) + \\sqrt {2 \\rho_ {2}} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 847, + 673, + 902 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Finally, by Lemma K.12, (42) holds with probability at least $1 - \\delta$ .", + "bbox": [ + 169, + 909, + 612, + 924 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/84816759e8f94b41e6aec8648bc3a6fe2aa13567683cabc44b1d77d48be46554.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 909, + 823, + 921 + ], + "page_idx": 42 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "We are thus led to the following lemma which characterizes the evolution of the potential $\\tilde{\\Psi} (\\bar{\\theta}^{(s)})$ and $\\tilde{\\Psi} (\\pmb{\\theta}_{k,t}^{(s)})$ over multiple rounds.", + "bbox": [ + 169, + 102, + 823, + 137 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Lemma K.17. Given $\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2\\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_0,$ for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ and any integer $1\\le R\\le$ $R_{\\mathrm{tot}}$ , with probability at least $1 - \\delta$", + "bbox": [ + 169, + 143, + 825, + 180 + ], + "page_idx": 43 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\boldsymbol {\\theta}} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {0}}, \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq \\exp (- \\alpha \\mu s / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(0)}\\right) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\forall 0 \\leq s \\leq R. \\tag {43}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 184, + 825, + 239 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Furthermore,", + "bbox": [ + 171, + 244, + 264, + 258 + ], + "page_idx": 43 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, 0 \\leq s < R, k \\in [ K ]. \\tag {44}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 262, + 825, + 304 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Proof. We prove induction that for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , when", + "bbox": [ + 171, + 316, + 560, + 333 + ], + "page_idx": 43 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 R \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < R, \\tag {45}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 339, + 825, + 378 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "then for all $0 \\leq s \\leq R$ , (43) and (44) hold.", + "bbox": [ + 169, + 383, + 455, + 398 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "When $s = 0$ , $\\bar{\\theta}^{(0)} \\in \\Gamma^{\\epsilon_0}$ and (43) trivially holds. By Lemma K.16, (44) holds. Assume that (43) and (44) hold for round $s - 1$ . Then for round $s$ , by Lemma K.16, $\\bar{\\theta}^{(s)} \\in \\Gamma^{\\epsilon_2}$ and", + "bbox": [ + 169, + 402, + 823, + 434 + ], + "page_idx": 43 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\Psi (\\bar {\\pmb {\\theta}} ^ {(s)}) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(s - 1)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}} \\\\ \\leq \\exp (- \\alpha \\mu s / 2) \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(0)}) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 439, + 728, + 523 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "where the second inequality comes from the induction hypothesis. By Lemma K.10,", + "bbox": [ + 171, + 527, + 725, + 542 + ], + "page_idx": 43 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {\\phi} ^ {(s)} \\| _ {2} \\leq \\frac {2}{\\sqrt {2 \\mu}} \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\\\ \\leq \\frac {2}{\\sqrt {2 \\mu}} \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(0)}) + \\frac {2}{\\sqrt {2 \\mu} (1 - \\exp (- \\alpha \\mu / 2))} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}} \\\\ \\leq \\frac {1}{2} \\epsilon_ {0} + \\frac {2}{\\sqrt {2 \\mu} (1 - \\exp (- \\alpha \\mu / 2))} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 546, + 741, + 664 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Here, the last inequality uses $\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(0)})\\leq \\sqrt{\\frac{\\rho_2}{2}}\\| \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(0)}\\| _2\\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{2}}\\epsilon_0$ . Hence, when $\\eta$ is sufficiently small, $\\bar{\\pmb{\\theta}}^{(s)}\\in \\Gamma^{\\epsilon_0}$ . Still by Lemma K.16, $\\bar{\\pmb{\\theta}}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2}$ and", + "bbox": [ + 169, + 669, + 823, + 707 + ], + "page_idx": 43 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 712, + 620, + 753 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Finally, according to Lemma K.12, (45) holds with probability at least $1 - \\delta$ .", + "bbox": [ + 171, + 756, + 676, + 772 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/eb002efdbfca18cd279a314f92e736b1ccc68740b225b627fcac84867376f49e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 779, + 823, + 791 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "The following corollary is a direct consequence of Lemma K.17 and Lemma K.10.", + "bbox": [ + 169, + 808, + 715, + 823 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Corollary K.1. Let $s_1 \\coloneqq \\lceil \\frac{20}{\\alpha \\mu} \\log \\frac{1}{\\eta} \\rceil$ . Given $\\| \\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)} \\|_2 \\leq \\frac{1}{2} \\sqrt{\\frac{\\mu}{\\rho_2}} \\epsilon_0$ , for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$", + "bbox": [ + 169, + 827, + 823, + 864 + ], + "page_idx": 43 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s _ {1})}\\right) \\leq \\tilde {C} _ {6} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {1})} - \\phi^ {(s _ {1})} \\| _ {2} \\leq \\tilde {C} _ {6} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\tag {46}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 869, + 825, + 902 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "where $\\tilde{C}_6$ is a constant.", + "bbox": [ + 171, + 907, + 328, + 924 + ], + "page_idx": 43 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Proof. Substituting in $R = s_1$ to Lemma K.17 and applying $\\| \\bar{\\pmb{\\theta}}^{(s_1)} - \\phi^{(s)}\\|_2 \\leq \\sqrt{\\frac{2}{\\mu}}\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(s_1)})$ for $\\bar{\\pmb{\\theta}}^{(s_1)} \\in \\Gamma^{\\epsilon_0}$ , we have the lemma.", + "bbox": [ + 169, + 102, + 826, + 142 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Finally, we provide a high probability bound for the change of the projection on the manifold after $s_1$ rounds $\\| \\phi^{(s_1)} - \\phi^{(0)} \\|_2$ .", + "bbox": [ + 169, + 157, + 823, + 189 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lemma K.18. Let $s_1 \\coloneqq \\lceil \\frac{20}{\\alpha \\mu} \\log \\frac{1}{\\eta} \\rceil$ . Given $\\| \\bar{\\theta}^{(0)} - \\phi^{(0)} \\|_2 \\leq \\frac{1}{2} \\sqrt{\\frac{\\mu}{\\rho_2}} \\epsilon_0$ . For $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ ,", + "bbox": [ + 169, + 194, + 823, + 232 + ], + "page_idx": 44 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\tilde {C} _ {8} \\log \\frac {1}{\\eta} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 239, + 633, + 273 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Proof. From Lemma K.17, for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , when", + "bbox": [ + 169, + 289, + 534, + 306 + ], + "page_idx": 44 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 s _ {1} \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < s _ {1}, \\tag {47}\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 313, + 825, + 353 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "then $\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}$ , for all $0\\leq s\\leq s_{1}$ . By the definition of $\\Gamma^{\\epsilon_0}$ , $\\tilde{u}_t^{(s)}\\in \\Gamma^{\\epsilon_1}$ , for all $0\\leq t\\leq H,0\\leq s\\leq s_{1}$ . By triangle inequality, $\\| \\phi^{(s_1)} - \\phi^{(0)}\\| _2$ can be decomposed as follows.", + "bbox": [ + 169, + 363, + 823, + 397 + ], + "page_idx": 44 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\| _ {2} \\leq \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\phi^ {(s + 1)} - \\phi^ {(s)} \\| _ {2} \\\\ \\leq \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) \\| _ {2} + \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) \\| _ {2}. \\tag {48} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 404, + 825, + 489 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "By Lemma K.14, when (47) hold, then for all $0 \\leq s < s_1 - 1$ ,", + "bbox": [ + 171, + 496, + 589, + 511 + ], + "page_idx": 44 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\pmb {\\theta}} ^ {(s + 1)} - \\tilde {\\pmb {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 518, + 617, + 554 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "This implies that $\\bar{\\pmb{\\theta}}^{(s + 1)}\\in B^{\\epsilon_1}(\\tilde{\\pmb{u}}_H^{(s)})$ . Since for all $\\pmb {\\theta}\\in \\Gamma^{\\epsilon_2}$ , $\\| \\partial \\Phi (\\pmb {\\theta})\\| _2\\leq \\nu_1$ , then $\\Phi (\\cdot)$ is $\\nu_{1}$ -Lipschitz in $B^{\\epsilon_1}(\\tilde{\\pmb{u}}_H^{(s)})$ . This gives", + "bbox": [ + 169, + 561, + 823, + 598 + ], + "page_idx": 44 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) \\| _ {2} \\leq \\nu_ {1} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\nu_ {1} \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}}. \\tag {49} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 604, + 825, + 662 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Then we analyze $\\| \\bar{\\pmb{\\theta}}^{(s + 1)} - \\tilde{\\pmb{u}}_H^{(s)}\\| _2$ . By Lemma K.9 and the definition of $\\Gamma^{\\epsilon_0}$ and $\\Gamma^{\\epsilon_1}$ , there exists $\\phi \\in \\Gamma$ such that $\\tilde{\\pmb{u}}_t^{(s)}\\in B^{\\epsilon_1}(\\phi),\\forall 0\\leq t\\leq H$ . Therefore, we can expand $\\Phi (\\tilde{\\pmb{u}}_{t + 1}^{(s)})$ as follows:", + "bbox": [ + 169, + 671, + 823, + 708 + ], + "page_idx": 44 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right) = \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\\\ = \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\eta \\partial \\Phi (\\tilde {\\boldsymbol {u}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) + \\frac {\\eta^ {2}}{2} \\partial^ {2} \\Phi (\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ] \\\\ = \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) + \\frac {\\eta^ {2}}{2} \\partial^ {2} \\Phi \\left(c _ {t} ^ {(s)} \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} + (1 - c _ {t} ^ {(s)}) \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ], \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 220, + 715, + 774, + 801 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "where $c_t^{(s)} \\in (0,1)$ . Then we have", + "bbox": [ + 171, + 809, + 403, + 828 + ], + "page_idx": 44 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}) \\| _ {2} \\leq \\frac {\\eta^ {2}}{2} \\sum_ {t = 0} ^ {H - 1} \\| \\partial^ {2} \\Phi \\left(\\left(c _ {t} ^ {(s)} \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} + (1 - c _ {t} ^ {(s)}) \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right)\\right) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ] \\| _ {2} \\\\ \\leq \\frac {\\eta^ {2}}{2} \\nu_ {2} \\sum_ {t = 0} ^ {H - 1} \\| \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) \\| _ {2} ^ {2}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 835, + 813, + 921 + ], + "page_idx": 44 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "By Lemma K.6, $\\frac{\\eta}{2}\\| \\nabla \\mathcal{L}(\\tilde{\\boldsymbol{u}}_t^{(s)})\\| _2^2\\leq \\mathcal{L}(\\tilde{\\boldsymbol{u}}_t^{(s)}) - \\mathcal{L}(\\tilde{\\boldsymbol{u}}_{t + 1}^{(s)})$ . Therefore,", + "bbox": [ + 169, + 101, + 614, + 121 + ], + "page_idx": 45 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\eta \\nu_ {2} \\left(\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) - \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right)\\right) \\\\ \\leq \\eta \\nu_ {2} \\left[ \\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\right] ^ {2} \\\\ \\leq \\nu_ {2} \\eta \\left[ 2 \\exp (- \\alpha s \\mu) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(0)}) + \\frac {\\tilde {C} _ {5} ^ {2} \\eta}{(1 - \\exp (- \\alpha \\mu / 2)) ^ {2}} \\log \\frac {s _ {1}}{\\eta \\delta} \\right], \\tag {50} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 127, + 823, + 212 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "where the last inequality uses Cauchy-Schwartz inequality and Lemma K.17. Summing up (50), we obtain", + "bbox": [ + 169, + 217, + 823, + 244 + ], + "page_idx": 45 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}) \\| _ {2} \\leq \\nu_ {2} \\eta \\left[ 2 \\tilde {\\Psi} (\\tilde {\\boldsymbol {\\theta}} ^ {(0)}) \\sum_ {s = 0} ^ {s _ {1} - 1} \\exp (- \\alpha \\mu s) + \\frac {s _ {1} \\tilde {C} _ {5} ^ {2} \\eta}{(1 - \\exp (- \\alpha \\mu / 2)) ^ {2}} \\log \\frac {s _ {1}}{\\eta \\delta} \\right] \\\\ \\leq \\tilde {C} _ {7} \\eta \\log \\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta}, \\tag {51} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 250, + 823, + 325 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "where $\\tilde{C}_7$ is a constant. Substituting (49) and (51) into (48), for sufficiently small $\\eta$ , we have", + "bbox": [ + 169, + 332, + 781, + 349 + ], + "page_idx": 45 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\nu_ {1} \\tilde {C} _ {3} s _ {1} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}} + \\tilde {C} _ {7} \\eta \\log \\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta} \\\\ \\leq \\tilde {C} _ {8} \\log \\frac {1}{\\eta} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 354, + 691, + 425 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "where $\\tilde{C}_8$ is a constant. Finally, according to Lemma K.12, (47) holds with probability at least $1 - \\delta$ .", + "bbox": [ + 169, + 431, + 823, + 460 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "K.6 PHASE 2:ITERATES STAYING CLOSE TO MANIFOLD", + "text_level": 1, + "bbox": [ + 171, + 479, + 581, + 493 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "In this subsection, we show that $\\| \\pmb{x}_{k,t}^{(s)}\\| _2 = \\tilde{\\mathcal{O}} (\\sqrt{\\eta})$ and $\\| \\bar{\\pmb{\\theta}}^{(s + r)} - \\bar{\\pmb{\\theta}}^{(s)}\\| _2 = \\tilde{\\mathcal{O}} (\\eta^{0.5 - 0.5\\beta}),\\forall 0\\leq r\\leq R_{\\mathrm{grp}}$ with high probability.", + "bbox": [ + 169, + 502, + 823, + 536 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "K.6.1 ADDITIONAL NOTATIONS", + "text_level": 1, + "bbox": [ + 171, + 550, + 406, + 563 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Before presenting the lemmas, we define the following martingale $\\{\\pmb{m}_{k,t}^{(s)}\\}_{t = 0}^{H}$ that will be useful in the proof:", + "bbox": [ + 169, + 571, + 823, + 603 + ], + "page_idx": 45 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {m} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\quad \\boldsymbol {m} _ {k, 0} = \\mathbf {0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 604, + 604, + 643 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "We also define $\\tilde{P}:\\mathbb{R}^d\\to \\mathbb{R}^{d\\times d}$ as an extension of $\\partial \\Phi$", + "bbox": [ + 171, + 648, + 537, + 664 + ], + "page_idx": 45 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {P}} (\\boldsymbol {\\theta}) := \\left\\{ \\begin{array}{l l} \\partial \\Phi (\\boldsymbol {\\theta}), & \\text {i f} \\boldsymbol {\\theta} \\in \\Gamma^ {\\epsilon_ {2}}, \\\\ \\mathbf {0}, & \\text {o t h e r w i s e}. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 670, + 604, + 704 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Finally, we define a martingale $\\{Z_t^{(s)}: s \\geq 0, 0 \\leq t \\leq H\\}$ :", + "bbox": [ + 169, + 713, + 563, + 731 + ], + "page_idx": 45 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {Z} _ {t} ^ {(s)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\tilde {\\boldsymbol {P}} (\\bar {\\boldsymbol {\\theta}} ^ {(r)}) \\boldsymbol {z} _ {k, t} ^ {(r)} + \\frac {1}{K} \\sum_ {k \\in [ K ]} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {\\boldsymbol {P}} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\boldsymbol {z} _ {k, t} ^ {(s)}, \\quad \\boldsymbol {Z} _ {0} ^ {(0)} = \\mathbf {0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 738, + 766, + 781 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "K.6.2 PROOF FOR THE HIGH PROBABILITY BOUNDS", + "text_level": 1, + "bbox": [ + 171, + 796, + 550, + 809 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "A direct application of Azuma-Hoeffding's inequality yields the following lemma.", + "bbox": [ + 169, + 820, + 712, + 835 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Lemma K.19 (Concentration property of $m_{k,t}^{(s)}$ ). With probability at least $1 - \\delta$ , the following holds:", + "bbox": [ + 169, + 840, + 823, + 859 + ], + "page_idx": 45 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {9} \\sqrt {\\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}},\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 867, + 720, + 901 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "where $\\tilde{C}_9$ is a constant.", + "bbox": [ + 171, + 907, + 328, + 924 + ], + "page_idx": 45 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Proof. Notice that $\\| \\pmb{m}_{k,t + 1}^{(s)} - \\pmb{m}_{k,t}^{(s)}\\| _2\\leq \\sigma_{\\max}$ . Then by Azuma-Hoeffdings inequality,", + "bbox": [ + 169, + 101, + 750, + 122 + ], + "page_idx": 46 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} \\left(\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}\\right) \\leq 2 \\exp \\left(- \\frac {\\epsilon^ {\\prime 2}}{2 t \\sigma_ {\\max } ^ {2}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 130, + 633, + 165 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Taking union bound on $K$ clients, $H$ local steps and $R_{\\mathrm{grp}}$ rounds, we obtain that the following inequality holds with probability at least $1 - \\delta$ :", + "bbox": [ + 169, + 172, + 823, + 202 + ], + "page_idx": 46 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 K H R _ {\\mathrm {g r p}}}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 233, + 210, + 759, + 244 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Substituting in $H = \\frac{\\alpha}{\\eta}$ and $R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}}\\right\\rfloor$ yields the lemma.", + "bbox": [ + 169, + 253, + 571, + 273 + ], + "page_idx": 46 + }, + { + "type": "image", + "img_path": "images/5605f695561744585b52e30a41f9e93710d68c24a2fb59f618e73162bc199eac.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 253, + 823, + 267 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Again applying Azuma-Hoeffding's inequality, we have the following lemma about the concentration property of $Z_{t}^{(s)}$ .", + "bbox": [ + 169, + 292, + 823, + 325 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Lemma K.20 (Concentration property of $Z_{t}^{(s)}$ ). With probability at least $1 - \\delta$ , the following inequality holds:", + "bbox": [ + 169, + 330, + 823, + 362 + ], + "page_idx": 46 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {Z} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {- 0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 369, + 681, + 405 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Proof. Notice that $\\| \\mathbf{Z}_{t + 1}^{(s)} - \\mathbf{Z}_t^{(s)}\\| _2\\leq \\nu_2\\sigma_{\\max},\\forall 0\\leq t\\leq H - 1$ and $\\| \\mathbf{Z}_0^{(s + 1)} - \\mathbf{Z}_H^{(s)}\\| _2\\leq \\nu_2\\sigma_{\\max}$ . By Azuma-Hoeffding's inequality,", + "bbox": [ + 169, + 425, + 823, + 458 + ], + "page_idx": 46 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} (\\| \\pmb {Z} _ {t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(- \\frac {\\epsilon^ {\\prime 2}}{2 (s H + t) \\nu_ {2} ^ {2} \\sigma_ {\\mathrm {m a x}} ^ {2}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 465, + 669, + 501 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Taking union bound on $R_{\\mathrm{grp}}$ rounds, we obtain that the following inequality holds with probability at least $1 - \\delta$ :", + "bbox": [ + 169, + 508, + 823, + 536 + ], + "page_idx": 46 + }, + { + "type": "equation", + "text": "\n$$\n\\| Z _ {H} ^ {(s)} \\| _ {2} \\leq \\sigma_ {\\max } \\nu_ {2} \\sqrt {2 H R _ {\\mathrm {g r p}} \\log \\frac {2 R _ {\\mathrm {g r p}}}{\\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 544, + 699, + 578 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Substituting in $H = \\frac{\\alpha}{\\eta}$ and $R_{\\mathrm{gpr}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}}\\right\\rfloor$ yields the lemma.", + "bbox": [ + 169, + 587, + 571, + 606 + ], + "page_idx": 46 + }, + { + "type": "image", + "img_path": "images/623b23c9acf1b65ac7da7488d501bfbee0de06fd8b00de1bb3ce750aa4f27220.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 588, + 823, + 601 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "We proceed to present a direct corollary of Lemma K.17 which provides a bound for the potential function over $R_{\\mathrm{grp}}$ rounds.", + "bbox": [ + 169, + 626, + 823, + 656 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Lemma K.21. Given $\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2\\leq C_0\\sqrt{\\eta\\log\\frac{1}{\\eta}}$ where $C_0$ is a constant, then for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ ,", + "bbox": [ + 169, + 662, + 823, + 700 + ], + "page_idx": 46 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\boldsymbol {\\theta}} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {0}}, \\quad \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, \\tag {52}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 709, + 823, + 744 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 751, + 202, + 763 + ], + "page_idx": 46 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, k \\in [ K ], \\tag {53}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 772, + 825, + 806 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "where $C_1$ is a constant that can depend on $C_0$ .", + "bbox": [ + 169, + 814, + 480, + 829 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Furthermore,", + "bbox": [ + 171, + 840, + 263, + 854 + ], + "page_idx": 46 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(R _ {\\mathrm {g r p}})}) \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 864, + 599, + 898 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "where $\\tilde{C}_9$ is a constant independent of $C_0$ .", + "bbox": [ + 171, + 907, + 452, + 925 + ], + "page_idx": 46 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Proof. By $\\rho_{2}$ -smoothness of $\\mathcal{L}$ , $\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(0)}) \\leq C_0 \\sqrt{\\frac{\\eta \\rho_2}{2} \\log \\frac{1}{\\eta}}$ . Substituting $R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha \\eta^{\\beta}} \\right\\rfloor$ and $\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(0)}) \\leq C_0 \\sqrt{\\frac{\\eta \\rho_2}{2} \\log \\frac{1}{\\eta}}$ into Lemma K.17, for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ , (52) and (53) where $C_1$ is a constant that can depend on $C_0$ .", + "bbox": [ + 169, + 102, + 823, + 165 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Furthermore, for round $\\bar{\\theta}^{(R_{\\mathrm{grp}})}$", + "bbox": [ + 171, + 169, + 382, + 185 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(R _ {\\mathrm {g r p}})}) \\leq \\exp (- \\mathcal {O} (\\eta^ {- \\beta})) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R _ {\\mathrm {g r p}}}{\\eta \\delta}} \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 194, + 785, + 234 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "where $\\tilde{C}_9$ is a constant independent of $C_0$ .", + "bbox": [ + 171, + 243, + 452, + 260 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/3fbad9ac8622b8a91201b5a489176323ab972ffbf93a6adb7ee011279b012a48.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 244, + 823, + 257 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Lemma K.22. Given $\\| \\bar{\\theta}^{(0)} - \\phi^{(0)} \\|_2 \\leq C_0 \\sqrt{\\eta \\log \\frac{1}{\\eta}}$ where $C_0$ is a constant, then for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ , for all $0 \\leq s_0 < R_{\\mathrm{grp}}, 0 \\leq t \\leq H, k \\in [K]$ ,", + "bbox": [ + 169, + 272, + 823, + 311 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\| \\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 318, + 730, + 388 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "where $C_2$ is a constant that can depend $C_0$ . Furthermore,", + "bbox": [ + 171, + 395, + 552, + 410 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\pmb {\\theta}} ^ {(R _ {\\mathrm {g r p}})} - \\pmb {\\phi} ^ {(R _ {\\mathrm {g r p}})} \\| _ {2} \\leq \\tilde {C} _ {1 1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 417, + 633, + 452 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "where $\\tilde{C}_{11}$ is a constant independent of $C_0$", + "bbox": [ + 171, + 460, + 457, + 478 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Proof. Decomposing $\\boldsymbol{x}_{k,t}^{(s)}$ by triangle inequality, we have", + "bbox": [ + 171, + 494, + 553, + 513 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} + \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {\\phi} ^ {(s)} \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 348, + 523, + 645, + 545 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "We first bound $\\| \\bar{\\theta}^{(s)} - \\phi^{(s)} \\|_2$ . By Lemma K.21, for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\frac{\\delta}{2}$ ,", + "bbox": [ + 169, + 553, + 823, + 587 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, \\tag {54}\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 595, + 823, + 628 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, \\tag {55}\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 631, + 823, + 665 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 672, + 200, + 685 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)}\\right) \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\tag {56}\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 693, + 823, + 727 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "where $C_2$ is a constant that may depend on $C_0$ and $\\tilde{C}_{10}$ is a constant independent of $C_0$ . When (54) and (56) hold, by Lemma K.10,", + "bbox": [ + 169, + 734, + 823, + 765 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\phi^ {(s)} \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\frac {2 \\eta}{\\mu} \\log \\frac {2}{\\eta \\delta}}, \\tag {57}\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 773, + 823, + 806 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)} - \\phi^ {\\left(R _ {\\mathrm {g r p}}\\right)} \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)}\\right) \\leq \\tilde {C} _ {1 0} \\sqrt {\\frac {2 \\eta}{\\mu} \\log \\frac {2}{\\eta \\delta}}. \\tag {58}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 809, + 823, + 843 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Then we bound $\\| \\pmb{\\theta}_{k,t}^{(s)} - \\bar{\\pmb{\\theta}}^{(s)}\\| _2$ . By the update rule, we have", + "bbox": [ + 171, + 851, + 571, + 872 + ], + "page_idx": 47 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {k, t} ^ {(s)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\boldsymbol {z} _ {k, \\tau} ^ {(s)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) - \\eta \\boldsymbol {m} _ {k, t} ^ {(s)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 880, + 761, + 921 + ], + "page_idx": 47 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Still by triangle inequality, we have", + "bbox": [ + 171, + 103, + 410, + 119 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) \\| _ {2} + \\eta \\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 128, + 669, + 170 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Due to $\\rho_{2}$ -smoothness of $\\mathcal{L}$ , when (55) holds,", + "bbox": [ + 171, + 179, + 473, + 194 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\sqrt {2 \\rho_ {2}} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {2 \\rho_ {2} \\eta \\log \\frac {2}{\\eta \\delta}}. \\tag {59}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 204, + 823, + 238 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "By Lemma K.19, with probability at least $1 - \\frac{\\delta}{2}$ ,", + "bbox": [ + 171, + 246, + 496, + 266 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {9} \\sqrt {\\frac {1}{\\eta} \\log \\frac {2}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {60}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 275, + 823, + 310 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Combining (59) and (60), when (55) and (56) hold simultaneously, there exists a constant $C_3$ which can depend on $C_0$ such that", + "bbox": [ + 169, + 316, + 823, + 347 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H. \\tag {61}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 356, + 825, + 390 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "By triangle inequality,", + "bbox": [ + 171, + 398, + 323, + 414 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\pmb {\\theta}} ^ {(s + 1)} - \\bar {\\pmb {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 424, + 617, + 458 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Combining (57), (58) and (61), we complete the proof.", + "bbox": [ + 171, + 465, + 535, + 481 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/81f1985b57526f1bc28ab22f11b85687c90ead9c450b4c1809bd0f9183a00e5a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 465, + 823, + 478 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Then we provide high probability bounds for the movement of $\\phi^{(s)}$ within $R_{\\mathrm{grp}}$ rounds.", + "bbox": [ + 169, + 505, + 750, + 522 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Lemma K.23. Given $\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\|_2 \\leq C_0\\sqrt{\\eta\\log\\frac{1}{\\eta}}$ where $C_0$ is a constant, then for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ ,", + "bbox": [ + 169, + 527, + 823, + 566 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\phi^ {(s)} - \\phi^ {(0)} \\| _ {2} \\leq C _ {4} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 1 \\leq s \\leq R _ {\\mathrm {g r p}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 575, + 697, + 609 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "where $C_4$ is a constant that can depend on $C_0$ .", + "bbox": [ + 171, + 618, + 480, + 633 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Proof. By the update rule of Local SGD,", + "bbox": [ + 171, + 657, + 444, + 672 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\pmb {\\theta} _ {k, H} ^ {(s)} = \\bar {\\pmb {\\theta}} ^ {(s)} - \\eta \\sum_ {t = 0} ^ {H - 1} \\nabla \\mathcal {L} (\\pmb {\\theta} _ {k, t} ^ {(s)}) - \\eta \\sum_ {t = 0} ^ {H - 1} \\pmb {z} _ {k, t} ^ {(s)}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 681, + 651, + 724 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Averaging among $K$ clients gives", + "bbox": [ + 171, + 732, + 397, + 748 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\boldsymbol {z} _ {k, t} ^ {(s)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 757, + 704, + 801 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "By Lemma K.22, for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , the following holds with probability at least $1 - \\delta / 3$ ,", + "bbox": [ + 171, + 811, + 779, + 828 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {3}{\\eta \\delta}}, \\quad \\boldsymbol {\\theta} _ {k, t} ^ {(s)} \\in B ^ {\\epsilon_ {0}} \\left(\\phi^ {(s)}\\right), \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, k \\in [ K ], \\tag {62}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 838, + 823, + 883 + ], + "page_idx": 48 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {3}{\\eta \\delta}}, \\quad \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} \\in B ^ {\\epsilon_ {0}} \\left(\\phi^ {(s)}\\right), \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {63}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 887, + 823, + 922 + ], + "page_idx": 48 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "When (62) and (63) hold, we can expand $\\Phi (\\bar{\\theta}^{(s + 1)})$ as follows:", + "bbox": [ + 171, + 102, + 591, + 119 + ], + "page_idx": 49 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\phi^ {(s + 1)} = \\phi^ {(s)} + \\partial \\Phi (\\bar {\\theta} ^ {(s)}) (\\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)}) + \\frac {1}{2} \\partial^ {2} \\Phi (\\tilde {\\theta} ^ {(s)}) [ \\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)}, \\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)} ] \\\\ = \\phi^ {(s)} \\underbrace {- \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k , t} ^ {(s)})} _ {\\mathcal {T} _ {1} ^ {(s)}} \\underbrace {- \\frac {\\eta}{K} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} z _ {k , t} ^ {(s)}} _ {\\mathcal {T} _ {2} ^ {(s)}} \\\\ + \\underbrace {\\frac {1}{2} \\partial^ {2} \\Phi (a ^ {(s)} \\bar {\\boldsymbol {\\theta}} ^ {(s)} + (1 - a ^ {(s)}) \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) [ \\boldsymbol {\\theta} ^ {(s + 1)} - \\boldsymbol {\\theta} ^ {(s)} , \\boldsymbol {\\theta} ^ {(s + 1)} - \\boldsymbol {\\theta} ^ {(s)} ]} _ {\\mathcal {T} _ {3} ^ {(s)}}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 128, + 785, + 282 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "where $a^{(s)}\\in (0,1)$ . Telescoping from round 0 to $s - 1$ , we have", + "bbox": [ + 171, + 292, + 599, + 309 + ], + "page_idx": 49 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\phi^ {(s)} - \\phi^ {(0)} \\| _ {2} = \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {1} ^ {(r)} + \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {2} ^ {(r)} + \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {3} ^ {(r)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 319, + 669, + 359 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "From (63), we can bound $\\| \\mathcal{T}_3^{(s)}\\| _2$ by $\\| \\mathcal{T}_3^{(s)}\\| _2\\leq \\frac{1}{2}\\nu_2C_2^2\\eta \\log \\frac{3}{\\eta\\delta}$ . We proceed to bound $\\| \\mathcal{T}_1^{(s)}\\| _2$ . When (62) and (63) hold, we have", + "bbox": [ + 169, + 378, + 823, + 411 + ], + "page_idx": 49 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) = \\partial \\Phi (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) + \\partial^ {2} \\Phi (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) [ \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) ] \\\\ = \\partial^ {2} \\Phi (b _ {k, t} ^ {(s)} \\bar {\\boldsymbol {\\theta}} ^ {(s)} + (1 - b _ {k, t} ^ {(s)}) \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) [ \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) ], \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 421, + 754, + 465 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "where $b_{k,t}^{(s)} \\in (0,1)$ . By Lemma K.17, with probability at least $1 - \\delta /3$ , the following holds:", + "bbox": [ + 171, + 476, + 777, + 494 + ], + "page_idx": 49 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\sqrt {2 \\rho_ {2}} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {2 \\rho_ {2} \\eta \\log \\frac {3}{\\eta \\delta}}, \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {64}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 507, + 823, + 539 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "When (62), (63) and (64) hold simultaneously, we have for all $0 \\leq s < R_{\\mathrm{grp}}$", + "bbox": [ + 171, + 547, + 679, + 564 + ], + "page_idx": 49 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\mathcal {T} _ {1} ^ {(s)} \\| _ {2} \\leq \\frac {\\eta \\nu_ {2}}{K} \\sum_ {t = 0} ^ {H - 1} \\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\| _ {2} \\\\ \\leq \\frac {\\alpha \\nu_ {2} \\sqrt {2 \\rho_ {2}} C _ {1} C _ {2}}{K} \\eta \\log \\frac {3}{\\eta \\delta}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 575, + 658, + 650 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Finally, we bound $\\| \\sum_{r = 0}^{s - 1}\\mathcal{T}_2^{(r)}\\| _2$ . By Lemma K.20, the following inequality holds with probability at least $1 - \\delta /3$ :", + "bbox": [ + 169, + 667, + 823, + 700 + ], + "page_idx": 49 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\boldsymbol {Z} _ {H} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {- 0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {3}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {65}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 710, + 823, + 743 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "When (62), (63) and (65) hold simultaneously, we have", + "bbox": [ + 171, + 751, + 537, + 766 + ], + "page_idx": 49 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\sum_ {r = 0} ^ {s} \\mathcal {T} _ {2} ^ {(r)} \\| _ {2} = \\eta \\| \\boldsymbol {Z} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {3}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 777, + 730, + 814 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Combining the bounds for $\\| \\mathcal{T}_1^{(s)}\\| _2, \\| \\sum_{r = 0}^s\\mathcal{T}_2^{(r)}\\| _2$ and $\\| \\mathcal{T}_3^{(s)}\\| _2$ and taking union bound, we obtain that for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , the following inequality holds with probability at least $1 - \\delta$ :", + "bbox": [ + 169, + 825, + 823, + 858 + ], + "page_idx": 49 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {\\phi} ^ {(s)} - \\boldsymbol {\\phi} ^ {(0)} \\| _ {2} \\leq C _ {4} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 1 \\leq s \\leq R _ {\\mathrm {g r p}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 868, + 696, + 901 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "where $C_4$ is a constant that can depend on $C_0$ .", + "bbox": [ + 171, + 909, + 477, + 924 + ], + "page_idx": 49 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "K.7 SUMMARY OF THE DYNAMICS AND PROOF OF THEOREMS J.1 AND J.2", + "text_level": 1, + "bbox": [ + 171, + 103, + 702, + 118 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Based on the results in Appendix K.5 and Appendix K.6, we summarize the dynamics of Local SGD iterates and then present the proof of Theorems J.1 and J.2 in this subsection. For convenience, we first introduce the definition of global step and $\\delta$ -good step.", + "bbox": [ + 169, + 128, + 823, + 172 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Definition K.3 (Global step). Define $\\mathcal{I}$ as the index set $\\{(s,t):s\\geq 0,0\\leq t\\leq H\\}$ with lexicographical order, which means $(s_1,t_1)\\preceq (s_2,t_2)$ if and only if $s_1 < s_2$ or $(s_{1} = s_{2}$ and $t_1\\leq t_2)$ . A global step is indexed by $(s,t)$ corresponding to the $t$ -th local step at round $s$ .", + "bbox": [ + 169, + 172, + 823, + 217 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Definition K.4 ( $\\delta$ -good step). In the training process of Local SGD, we say the global step $(s,t) \\preceq (R_{\\mathrm{tot}},0)$ is $\\delta$ -good if the following inequalities hold:", + "bbox": [ + 169, + 218, + 823, + 248 + ], + "page_idx": 50 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\tilde {\\mathbf {Z}} _ {k, \\tau} ^ {(r)} \\| _ {2} \\leq \\exp (\\alpha \\rho_ {2}) \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {6 H R _ {\\operatorname* {t o t}} K}{\\delta}}, \\quad \\forall k \\in [ K ], (r, \\tau) \\preceq (s, t),\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 250, + 774, + 284 + ], + "page_idx": 50 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {m} _ {k, \\tau} ^ {(r)} \\| _ {2} \\leq \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {6 K H R _ {\\mathrm {t o t}}}{\\delta}}, \\quad \\forall k \\in [ K ], (r, \\tau) \\preceq (s, t),\n$$\n", + "text_format": "latex", + "bbox": [ + 222, + 286, + 774, + 319 + ], + "page_idx": 50 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {Z} _ {H} ^ {(r)} \\| _ {2} \\leq \\sigma_ {\\max } \\nu_ {2} \\sqrt {2 H R _ {\\mathrm {g r p}} \\log \\frac {2 R _ {\\mathrm {t o t}}}{\\delta}}, \\quad \\forall 0 \\leq r < s.\n$$\n", + "text_format": "latex", + "bbox": [ + 227, + 321, + 774, + 354 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Applying the concentration properties of $\\tilde{\\pmb{Z}}_{k,\\tau}^{(r)},\\pmb{m}_{k,\\tau}^{(r)}$ and $\\pmb{Z}_H^{(r)}$ (Lemmas K.20, K.19 and K.12) yields the following theorem.", + "bbox": [ + 169, + 366, + 823, + 398 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Theorem K.1. For $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ , all global steps $(s,t) \\preceq (R_{\\mathrm{tot}},0)$ are $\\delta$ -good.", + "bbox": [ + 171, + 400, + 823, + 430 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "In the remainder of this subsection, we use $\\mathcal{O}(\\cdot)$ notation to hide constants independent of $\\delta$ and $\\eta$ .", + "bbox": [ + 169, + 438, + 818, + 455 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Below we present a summary of the dynamics of Local SGD when $\\bar{\\theta}^{(0)}$ is initialized such that $\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma$ and all global steps are $\\delta$ -good. Phase 1 lasts for $s_0 + s_1 = \\mathcal{O}(\\log \\frac{1}{\\eta})$ rounds. At the end of phase 1, the iterate reaches within $\\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})$ from $\\Gamma$ , i.e., $\\| \\bar{\\pmb{\\theta}}^{(s_0 + s_1)} - \\pmb {\\phi}^{(s_0 + s_1)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})$ . The change of the projection on manifold over $s_0 + s_1$ rounds, $\\| \\phi^{(s_1 + s_0)} - \\phi^{(0)}\\| _2$ is bounded by $\\mathcal{O}(\\log \\frac{1}{\\eta}\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})$ .", + "bbox": [ + 169, + 460, + 823, + 568 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "After $s_0 + s_1$ rounds, the dynamic enters phase 2 when the iterates stay close to $\\Gamma$ with $\\bar{\\theta}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}}$ and $\\pmb{\\theta}_{k,t}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall k \\in [K], (s_0 + s_1, 0) \\preceq (s,t) \\preceq (R_{\\mathrm{tot}}, 0)$ . Furthermore, $\\| \\pmb{x}_{k,t}^{(s)} \\|_2$ and $\\| \\bar{\\pmb{x}}_H^{(s)} \\|_2$ satisfy the following equations:", + "bbox": [ + 169, + 575, + 823, + 630 + ], + "page_idx": 50 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}},\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 633, + 759, + 659 + ], + "page_idx": 50 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\tilde {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 662, + 759, + 686 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Moreover, for $s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}}$ , the change of the manifold projection within $R_{\\mathrm{grp}}$ rounds can be bounded as follows:", + "bbox": [ + 169, + 689, + 823, + 715 + ], + "page_idx": 50 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} = \\mathcal {O} (\\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}), \\quad \\forall 1 \\leq r \\leq R _ {\\mathrm {g r p}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 719, + 707, + 751 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "After combing through the dynamics of Local SGD iterates during the approaching and drift phase, we are ready to present the proof of Theorems J.1 and J.2, which are direct consequences of the lemmas in Appendix K.5 and K.6.", + "bbox": [ + 169, + 752, + 823, + 796 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Proof of Theorem J.1. By Lemmas K.15, K.22 and Corollary K.1, for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , when all global steps are $\\delta$ -good, $\\bar{\\pmb{\\theta}}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}}$ and $\\pmb{\\theta}_{k,t}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall k \\in [K], (s_0 + s_1, 0) \\preceq (s,t) \\preceq (R_{\\mathrm{tot}}, 0)$ and $\\| \\pmb{x}_{k,t}^{(s)} \\|_2, \\| \\bar{\\pmb{x}}_H^{(s)} \\|_2$ satisfy the following equations:", + "bbox": [ + 169, + 809, + 825, + 864 + ], + "page_idx": 50 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol{x}_{k,t}^{(s)}\\|_{2} = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}}),\\quad \\forall k\\in [K],0\\leq t\\leq H,s_{0} + s_{1}\\leq s < R_{\\text{tot}},\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 868, + 759, + 892 + ], + "page_idx": 50 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 896, + 759, + 921 + ], + "page_idx": 50 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 50 + }, + { + "type": "page_number", + "text": "51", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Hence $\\| \\tilde{\\pmb{x}}_0^{(R_{\\mathrm{tot}})}\\| _2 = \\mathcal{O}(\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(R_{\\mathrm{tot}})})) = \\mathcal{O}(\\| \\tilde{\\pmb{x}}_H^{(R_{\\mathrm{tot}} - 1)}\\| _2) = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})$ by smoothness of $\\mathcal{L}$ and Lemma K.10. According to Theorem K.1, with probability at least $1 - \\delta$ , all global steps are $\\delta$ -good, thus completing the proof.", + "bbox": [ + 169, + 101, + 826, + 156 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Proof of Theorem J.2. By Lemma K.23, for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , when all global steps are $\\delta$ -good, then $\\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}}$ ,", + "bbox": [ + 169, + 175, + 823, + 205 + ], + "page_idx": 51 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - 0. 5 \\beta}), \\quad \\forall 0 \\leq r \\leq R _ {\\mathrm {g r p}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 215, + 678, + 234 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Also, by Lemma K.18, when all global steps are $\\delta$ -good, the change of projection on manifold over $s_0 + s_1$ rounds (i.e., Phase 1), $\\| \\phi^{(s_0 + s_1)} - \\phi^{(0)} \\|_2$ is bounded by $\\tilde{\\mathcal{O}}(\\sqrt{\\eta})$ . According to Theorem K.1, with probability at least $1 - \\delta$ , all global steps are $\\delta$ -good, thus completing the proof.", + "bbox": [ + 169, + 242, + 825, + 287 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "K.8 PROOF OF THEOREM 3.3", + "text_level": 1, + "bbox": [ + 171, + 306, + 390, + 320 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "In this subsection, we explicitly derive the dependency of the approximation error on $\\alpha$ . The proofs are quite similar to those in Appendix K.5 and hence we only state the key proof idea for brevity. With the same method as the proofs in Appendix K.5.2, we can show that with high probability, $\\| \\bar{\\theta}^{(s)} - \\phi^{(s)}\\|_2 \\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_2}}$ after $s_0' = \\mathcal{O}(1)$ rounds. Below we focus on the dynamics of Local SGD thereafter. We first remind the readers of the definition of $\\{\\tilde{Z}_{k,t}^s\\}$ :", + "bbox": [ + 169, + 333, + 823, + 417 + ], + "page_idx": 51 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right)\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, 0} ^ {(s)} = \\boldsymbol {0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 428, + 704, + 470 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "We have the following lemma that controls the norm of the matrix product $\\prod_{l = \\tau +1}^{t - 1}(\\boldsymbol {I} - \\eta \\nabla^2\\mathcal{L}(\\tilde{\\boldsymbol{u}}_l^{(s)}))$", + "bbox": [ + 169, + 481, + 823, + 518 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Lemma K.24. Given $\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}$ , then there exists a positive constant $C_3^\\prime$ independent of $\\alpha$ such that for all $0\\leq \\tau < t\\leq H$", + "bbox": [ + 169, + 523, + 825, + 555 + ], + "page_idx": 51 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)})) \\right\\| _ {2} \\leq C _ {3} ^ {\\prime}.\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 563, + 617, + 608 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Proof. Since $\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}$ , then $\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1}$ for all $0\\leq t\\leq H$ . We first bound the minimum eigenvalue of $\\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})$ . Due to the PL condition, by Lemma K.6, for $\\eta \\leq \\frac{1}{\\rho_2}$", + "bbox": [ + 169, + 631, + 823, + 670 + ], + "page_idx": 51 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {t} \\left(\\mathcal {L} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) - \\mathcal {L} ^ {*}\\right) \\leq \\exp (- \\mu t \\eta) (\\mathcal {L} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) - \\mathcal {L} ^ {*}), \\quad \\forall 0 \\leq t \\leq H.\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 678, + 800, + 705 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 712, + 243, + 726 + ], + "page_idx": 51 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) \\leq \\exp (- \\mu t \\eta / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 736, + 611, + 756 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Let $C_1^\\prime = \\rho_3\\sqrt{\\frac{\\rho_2}{\\mu}}$ . By Weyl's inequality,", + "bbox": [ + 169, + 767, + 442, + 792 + ], + "page_idx": 51 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left| \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\right| = \\left| \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) - \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\right. \\right| \\\\ \\leq \\rho_ {3} \\| \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) - \\nabla^ {2} \\mathcal {L} \\left(\\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\| _ {2} \\\\ \\leq \\rho_ {3} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) \\| _ {2} \\\\ \\leq \\rho_ {3} \\sqrt {\\frac {2}{\\mu}} \\exp (- \\mu t \\eta / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\\\ \\leq C _ {1} ^ {\\prime} \\exp (- \\mu t \\eta / 2) \\epsilon_ {0}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 284, + 803, + 714, + 920 + ], + "page_idx": 51 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 51 + }, + { + "type": "page_number", + "text": "52", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "where the last two inequalities use Lemmas K.10 and K.7 respectively. Therefore, for all $0 \\leq t \\leq H$ and $0 \\leq \\tau \\leq t - 1$ ,", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 52 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\| _ {2} \\leq \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(1 + \\eta \\left| \\lambda_ {\\min } \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right) \\right|\\right) \\\\ \\leq \\prod_ {l = 0} ^ {\\infty} (1 + \\eta | \\lambda_ {\\min } \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}) |) \\\\ \\leq \\exp \\left(\\eta \\epsilon_ {0} C _ {1} ^ {\\prime} \\sum_ {l = 0} ^ {\\infty} \\exp (- \\mu l \\eta / 2)\\right). \\tag {66} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 137, + 823, + 263 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "For sufficiently small $\\eta$ , there exists a constant $C_2'$ such that", + "bbox": [ + 171, + 268, + 566, + 286 + ], + "page_idx": 52 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {l = 0} ^ {\\infty} \\exp (- \\mu l \\eta / 2)) = \\frac {1}{1 - \\exp (- \\mu \\eta / 2)} \\leq \\frac {C _ {2} ^ {\\prime}}{\\eta}. \\tag {67}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 291, + 823, + 330 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Substituting (67) into (66), we obtain the lemma.", + "bbox": [ + 171, + 335, + 496, + 351 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/339c2726326801c8e83e538fd68f4467abc476dbd7ac53fcc8d671f5ae3c1c12.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 335, + 823, + 348 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Based on Lemma K.24, we obtain the following lemma about the concentration property of $\\tilde{Z}_{k,t}^{(s)}$ , which can be derived in the same way as Lemma K.12.", + "bbox": [ + 169, + 367, + 823, + 400 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Lemma K.25. Given $\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}$ , then with probability at least $1 - \\delta$", + "bbox": [ + 171, + 402, + 635, + 419 + ], + "page_idx": 52 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq C _ {3} ^ {\\prime} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 425, + 709, + 465 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "where $C_3^\\prime$ is defined in Lemma K.24.", + "bbox": [ + 171, + 470, + 413, + 486 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "The following lemma can be derived analogously to Lemma K.14 but the error bound is tighter in terms of its dependency on $\\alpha$ .", + "bbox": [ + 169, + 496, + 823, + 526 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Lemma K.26. Given $\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_1}$ , then for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ , with probability at least $1 - \\delta$ , there exists a constant $C_4^\\prime$ independent of $\\alpha$ such that", + "bbox": [ + 169, + 529, + 825, + 559 + ], + "page_idx": 52 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\| _ {2} \\leq C _ {4} ^ {\\prime} \\sqrt {\\alpha \\eta \\log \\frac {\\alpha}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 565, + 697, + 598 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 604, + 202, + 616 + ], + "page_idx": 52 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq C _ {4} ^ {\\prime} \\sqrt {\\alpha \\eta \\log \\frac {\\alpha}{\\eta \\delta}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 622, + 622, + 655 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Then, similar to Lemma K.17, we can show that for $\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))$ and simultaneously all $s\\geq s_0^{\\prime} + s_1^{\\prime}$ where $s_1^\\prime = \\mathcal{O}(\\frac{1}{\\alpha}\\log \\frac{1}{\\eta})$ , it holds with probability at least $1 - \\delta$ that $\\| \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(s)}\\| _2 = \\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})$ . Note that to eliminate the dependency of the second term's denominator on $\\alpha$ in (44), we can discuss the cases of $\\alpha >c_{0}$ and $\\alpha < c_{0}$ respectively where $c_{0}$ can be an arbitrary positive constant independent of $\\alpha$ . For the case of $\\alpha < c_{0}$ group $\\lceil \\frac{c_0}{\\alpha}\\rceil$ rounds together and repeat the arguments in this subsection to analyze the closeness between Local SGD and GD iterates as well as the evolution of loss.", + "bbox": [ + 169, + 667, + 826, + 779 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "K.9 COMPUTING THE MOMENTS FOR ONE \"GIANT STEP\"", + "text_level": 1, + "bbox": [ + 171, + 796, + 589, + 810 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "In this subsection, we compute the first and second moments for the change of manifold projection every $R_{\\mathrm{grp}}$ rounds of Local SGD. Since the randomness in training might drive the iterate out of the working zone, making the dynamic intractable, we analyze a more well-behaved sequence $\\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}: (s,t) \\preceq (R_{\\mathrm{tot}},0), k \\in [K]\\}$ which is equal to $\\{\\pmb{\\theta}_{k,t}^{(s)}\\}$ with high probability. Specifically, $\\hat{\\pmb{\\theta}}_{k,t}^{(s)}$ equal to $\\pmb{\\theta}_{k,t}^{(s)}$ if the global step $(s,t)$ is $\\eta^{100}$ -good and is set as a point $\\phi_{\\mathrm{null}} \\in \\Gamma$ otherwise. The formal definition is as follows.", + "bbox": [ + 169, + 821, + 826, + 924 + ], + "page_idx": 52 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 52 + }, + { + "type": "page_number", + "text": "53", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/ffc695507921c80a14b163e44d2b9a7edaf3e6efd525e42e284333a0590fabc9.jpg", + "image_caption": [ + "Figure 9: A plot of $\\psi (x)$" + ], + "image_footnote": [], + "bbox": [ + 401, + 85, + 581, + 186 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Definition K.5 (Well-behaved sequence). Denote by $\\mathcal{E}_t^{(s)}$ the event $\\{\\text{global step } (s, t) \\text{ is } \\eta^{100} \\text{-good}\\}$ . Define a well-behaved sequence $\\hat{\\pmb{\\theta}}_{k,t}^{(s)} := \\pmb{\\theta}_{k,t}^{(s)}\\mathbb{1}_{\\mathcal{E}_t^{(s)}} + \\phi_{\\mathrm{null}}\\mathbb{1}_{\\bar{\\mathcal{E}}_t^{(s)}}$ , which satisfies the following update rule:", + "bbox": [ + 169, + 242, + 826, + 294 + ], + "page_idx": 53 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {\\theta}} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} \\mathbb {1} _ {\\mathcal {E} _ {t + 1} ^ {(s)}} + \\phi_ {\\text {n u l l}} \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} (68) \\\\ = \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\underbrace {- \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} (\\hat {\\boldsymbol {\\theta}} _ {k , t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k , t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k , t} ^ {(s)}) + \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} \\phi_ {\\mathrm {n u l l}}} _ {:= \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)}}. (69) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 296, + 825, + 375 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "By Theorem K.1, with probability at least $1 - \\eta^{100}$ , $\\hat{\\pmb{\\theta}}_{k,t}^{(s)} = \\pmb{\\theta}_{k,t}^{(s)}, \\forall k \\in [K], (s,t) \\preceq (R_{\\mathrm{tot}},0)$ . Similar to $\\{\\pmb{\\theta}_{k,t}^{(s)}\\}$ , we define the following variables with respect to $\\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\}$ :", + "bbox": [ + 169, + 386, + 823, + 425 + ], + "page_idx": 53 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {\\theta}} _ {\\mathrm {a v g}} ^ {(s + 1)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {\\theta}} _ {k, H} ^ {(s)}, \\quad \\hat {\\boldsymbol {\\phi}} ^ {(s)} := \\Phi (\\hat {\\boldsymbol {\\theta}} _ {\\mathrm {a v g}} ^ {(s)}),\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 428, + 545, + 467 + ], + "page_idx": 53 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\pmb {x}} _ {k, t} ^ {(s)} := \\hat {\\pmb {\\theta}} _ {k, t} ^ {(s)} - \\hat {\\pmb {\\phi}} ^ {(s)}, \\quad \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} := \\hat {\\pmb {\\theta}} _ {\\mathrm {a v g}} ^ {(s)} - \\hat {\\pmb {\\phi}} ^ {(s)}, \\quad \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\pmb {x}} _ {k, H} ^ {(s)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 469, + 754, + 507 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Notice that $\\hat{\\pmb{x}}_{k,0}^{(s)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)}$ for all $k\\in [K]$ . Finally, we introduce the following mapping $\\Psi (\\pmb {\\theta}):$ $\\Gamma \\to \\mathbb{R}^{d\\times d}$ , which is closely related to $\\widehat{\\pmb{\\Psi}}$ defined in Theorem 3.2.", + "bbox": [ + 169, + 512, + 823, + 547 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Definition K.6. For $\\pmb{\\theta} \\in \\Gamma$ , we define the mapping $\\Psi(\\pmb{\\theta}) : \\Gamma \\to \\mathbb{R}^{d \\times d}$ :", + "bbox": [ + 169, + 549, + 640, + 565 + ], + "page_idx": 53 + }, + { + "type": "equation", + "text": "\n$$\n\\Psi (\\boldsymbol {\\theta}) = \\sum_ {i, j \\in [ d ]} \\psi \\left(\\eta H \\left(\\lambda_ {i} + \\lambda_ {j}\\right)\\right) \\left\\langle \\boldsymbol {\\Sigma} (\\boldsymbol {\\theta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top},\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 568, + 674, + 602 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "where $\\lambda_{i},\\pmb{v}_{i}$ are the $i$ -th eigenvalue and eigenvector of $\\nabla^2\\mathcal{L}(\\pmb {\\theta})$ and $\\pmb {v}_i$ 's form an orthonormal basis of $\\mathbb{R}^d$ . Additionally, $\\psi (x)\\coloneqq \\frac{e^{-x} - 1 + x}{x}$ and $\\psi (0) = 0$ ; see Figure 9 for a plot.", + "bbox": [ + 169, + 606, + 823, + 640 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Remark K.1. Intuitively, $\\Psi(\\pmb{\\theta})$ rescales the entries of $\\pmb{\\Sigma}(\\pmb{\\theta})$ in the eigenbasis of $\\nabla^2\\mathcal{L}(\\pmb{\\theta})$ . When $\\nabla^2\\mathcal{L}(\\pmb{\\theta}) = \\mathrm{diag}(\\lambda_1,\\dots ,\\lambda_d)\\in \\mathbb{R}^{d\\times d}$ , where $\\lambda_{i} = 0$ for all $m < i\\leq d$ , $\\Psi (\\pmb{\\Sigma}_0)_{i,j} = \\psi (\\eta H(\\lambda_i + \\lambda_j))\\Sigma_{0,i,j}$ . Note that $\\Psi (\\pmb{\\theta})$ can also be written as", + "bbox": [ + 169, + 642, + 823, + 686 + ], + "page_idx": 53 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {v e c} (\\boldsymbol {\\Psi} (\\boldsymbol {\\theta})) = \\psi (\\eta H (\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) \\oplus \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}))) \\operatorname {v e c} (\\boldsymbol {\\Sigma} (\\boldsymbol {\\theta})),\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 688, + 679, + 705 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "where $\\oplus$ denotes the Kronecker sum $A\\oplus B = A\\otimes I_d + I_d\\otimes B$ , $\\operatorname{vec}(\\cdot)$ is the vectorization operator of a matrix and $\\psi (\\cdot)$ is interpreted as a matrix function.", + "bbox": [ + 169, + 708, + 823, + 738 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Now we are ready to present the result about the moments of $\\hat{\\phi}^{(s + R_{\\mathrm{grp}})} - \\hat{\\phi}^{(s)}$", + "bbox": [ + 169, + 747, + 694, + 763 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Theorem K.2. For $s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}}$ and $0 < \\beta < 0.5$ , the first and second moments of $\\hat{\\phi}^{(s + R_{\\mathrm{grp}})} - \\hat{\\phi}^{(s)}$ are as follows:", + "bbox": [ + 169, + 766, + 826, + 797 + ], + "page_idx": 53 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)} \\mid \\hat {\\phi} ^ {(s)}, \\mathcal {E} _ {0} ^ {(s)} \\right] = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(s)}\\right) \\left[ \\boldsymbol {\\Sigma} \\left(\\hat {\\phi} ^ {(s)}\\right) + (K - 1) \\Psi \\left(\\hat {\\phi} ^ {(s)}\\right) \\right] \\tag {70} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 800, + 823, + 852 + ], + "page_idx": 53 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left(\\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)}\\right) \\left(\\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)}\\right) ^ {\\top} \\mid \\hat {\\phi} ^ {(s)}, \\mathcal {E} _ {0} ^ {(s)} \\right] = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {\\|} \\left(\\hat {\\phi} ^ {(s)}\\right) + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 2 \\beta}\\right) + \\tilde {\\mathcal {O}} (\\eta), \\tag {71}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 858, + 823, + 902 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "where $\\tilde{\\mathcal{O}} (\\cdot)$ hides log terms and constants independent of $\\eta$", + "bbox": [ + 169, + 907, + 565, + 925 + ], + "page_idx": 53 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 53 + }, + { + "type": "page_number", + "text": "54", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Remark K.2. By Theorem K.1 and the definition of $\\hat{\\pmb{\\theta}}_{k,t}^{(s)}$ , (70) and (71) still hold when we replace $\\hat{\\phi}^{(s)}$ with $\\phi^{(s)}$ and replace $\\hat{\\phi}^{(s + R_{\\mathrm{grp}})}$ with $\\phi^{(s + R_{\\mathrm{grp}})}$ .", + "bbox": [ + 169, + 102, + 823, + 138 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "We shall have Theorem K.2 if we prove the following theorem, which directly gives Theorem K.2 with a simple shift of index. For brevity, denote by $\\Delta \\hat{\\phi}^{(s)}\\coloneqq \\hat{\\phi}^{(s)} - \\hat{\\phi}^{(0)}$ $\\Sigma_0\\coloneqq \\Sigma (\\hat{\\phi}^{(0)})$ $\\Sigma_{0,\\parallel}\\coloneqq \\Sigma_{\\parallel}(\\hat{\\phi}^{(0)})$", + "bbox": [ + 169, + 148, + 823, + 198 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Theorem K.3. Given $\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})$ , for $0 < \\beta < 0.5$ , the first and second moments of $\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}$ are as follows:", + "bbox": [ + 169, + 203, + 825, + 244 + ], + "page_idx": 54 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} ] = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {\\Sigma} _ {0} + (K - 1) \\pmb {\\Psi} (\\hat {\\phi} ^ {(0)}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta),\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 250, + 803, + 281 + ], + "page_idx": 54 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}}) ^ {\\top}} ] = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 1. 5 \\beta}) + \\tilde {\\mathcal {O}} (\\eta).\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 282, + 609, + 313 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "We will prove Theorem K.3 in the remainder of this subsection. For convenience, we introduce more notations that will be used throughout the proof. Let $\\pmb{H}_0 \\coloneqq \\nabla^2\\mathcal{L}(\\hat{\\phi}^{(0)})$ . By Assumption 3.2, $\\mathrm{rank}(H_0) = m$ . WLOG, assume $H_0 = \\mathrm{diag}(\\lambda_1,\\dots ,\\lambda_d)\\in \\mathbb{R}^{d\\times d}$ , where $\\lambda_{i} = 0$ for all $m < i\\leq d$ and $\\lambda_{1}\\geq \\lambda_{2}\\dots \\geq \\lambda_{m}$ . By Lemma K.2, $\\partial \\Phi (\\hat{\\phi}^{(0)})$ is the projection matrix onto the tangent space $T_{\\hat{\\phi}^{(0)}}(\\Gamma)$ (i.e. the null space of $\\nabla^2\\mathcal{L}(\\hat{\\phi}^{(0)})$ ) and therefore, $\\partial \\Phi (\\hat{\\phi}^{(0)}) = \\left[ \\begin{array}{cc}0 & 0\\\\ 0 & I_{d - m} \\end{array} \\right]$ . Let $P_{\\parallel}\\coloneqq \\partial \\Phi (\\hat{\\phi}^{(0)})$ and $P_{\\perp}\\coloneqq I_d - P_{\\parallel}$ .", + "bbox": [ + 169, + 325, + 823, + 436 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Let $\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)\\top}], \\hat{\\pmb{q}}_t^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}]$ and $\\hat{\\pmb{B}}_t^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}]$ . The latter two notations are independent of $k$ since $\\hat{\\pmb{\\theta}}_{1,t}^{(s)}, \\dots, \\hat{\\pmb{\\theta}}_{K,t}^{(s)}$ are identically distributed. The following lemma computes the first and second moments of the change of manifold projection every round.", + "bbox": [ + 169, + 443, + 823, + 497 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Lemma K.27. Given $\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\pmb{\\phi}}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})$ , for $0\\leq s < R_{\\mathrm{grp}}$ , the first and second moments of $\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)}$ are as follows:", + "bbox": [ + 169, + 502, + 823, + 542 + ], + "page_idx": 54 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} \\right] = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {72}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 547, + 823, + 575 + ], + "page_idx": 54 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}\\right) \\left(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}\\right) ^ {\\top} \\right] = P _ {\\|} \\hat {A} _ {\\text {a v g}} ^ {(s)} P _ {\\|} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right). \\tag {73}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 578, + 823, + 598 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Proof. By Taylor expansion, we have", + "bbox": [ + 171, + 611, + 423, + 626 + ], + "page_idx": 54 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\phi} ^ {(s + 1)} = \\Phi (\\hat {\\phi} ^ {(s)} + \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)}) \\\\ = \\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(s)}) [ \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] + \\mathcal {O} (\\| \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}) \\\\ = \\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(0)} + \\Delta \\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)} + \\Delta \\hat {\\phi} ^ {(s)}) [ \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] \\\\ + \\mathcal {O} (\\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}) \\\\ = \\hat {\\phi} ^ {(s)} + P _ {\\parallel} \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] \\\\ + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} + \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} + \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 633, + 790, + 796 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Rearrange the terms and we obtain:", + "bbox": [ + 171, + 801, + 406, + 816 + ], + "page_idx": 54 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s) \\top} ] \\tag {74} \\\\ + \\mathcal {O} \\left(\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} + \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} + \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}\\right). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 824, + 823, + 875 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Moreover,", + "bbox": [ + 171, + 878, + 243, + 893 + ], + "page_idx": 54 + }, + { + "type": "equation", + "text": "\n$$\n(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ^ {\\top} = P _ {\\|} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} P _ {\\|} + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}). \\tag {75}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 900, + 823, + 922 + ], + "page_idx": 54 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 54 + }, + { + "type": "page_number", + "text": "55", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Noticing that $\\hat{\\pmb{x}}_{k,H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}$ are identically distributed for all $k\\in [K]$ , we have $\\mathbb{E}[\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}] = \\frac{1}{K}\\sum_{k\\in [K]}\\mathbb{E}[\\hat{\\pmb{x}}_{k,H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}] = \\hat{\\pmb{B}}_H^{(s)}$ . Then taking expectation of both sides of (74) gives", + "bbox": [ + 169, + 101, + 823, + 142 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] \\\\ + \\mathcal {O} (\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ] + \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ] + \\mathbb {E} [ \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} ]). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 154, + 812, + 205 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Again taking expectation of both sides of (75) yields", + "bbox": [ + 171, + 214, + 524, + 231 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\Delta \\hat {\\phi} ^ {(s) \\top}) ] = P _ {\\parallel} \\hat {A} _ {\\mathrm {a v g}} ^ {(s)} P _ {\\parallel} + \\mathcal {O} (\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ]).\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 239, + 779, + 263 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "By Lemmas K.22 and K.23, the following holds simultaneously with probability at least $1 - \\eta^{100}$ :", + "bbox": [ + 169, + 272, + 816, + 289 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - 0. 5 \\beta}), \\quad \\| \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5}).\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 299, + 674, + 321 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Furthermore, since for all $k \\in [K]$ and $(s,t) \\preceq (R_{\\mathrm{tot}},0)$ , $\\hat{\\pmb{\\theta}}_{k,t}^{(s)}$ stays in $\\Gamma^{\\epsilon_2}$ which is a bounded set, $\\| \\Delta \\hat{\\phi}^{(s)}\\| _2$ and $\\| \\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\| _2$ are also bounded. Therefore, we have", + "bbox": [ + 169, + 334, + 823, + 375 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {76}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 385, + 823, + 405 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\tag {77}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 407, + 823, + 429 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5}), \\tag {78}\n$$\n", + "text_format": "latex", + "bbox": [ + 428, + 431, + 823, + 453 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "which concludes the proof.", + "bbox": [ + 171, + 462, + 352, + 478 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "We compute $\\hat{A}_{\\mathrm{avg}}^{(s)}, \\hat{q}_t^{(s)}$ and $\\hat{B}_t^{(s)}$ by solving a set of recursions, which is formulated in the following lemma. Additionally, define $\\hat{A}_t^{(s)} \\coloneqq \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\hat{\\pmb{x}}_{k,t}^{(s)\\top}]$ and $\\hat{M}_t^{(s)} \\coloneqq \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\hat{\\pmb{x}}_{k,l}^{(s)}], (k \\neq l)$ .", + "bbox": [ + 169, + 505, + 823, + 542 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Lemma K.28. Given $\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})$ , for $0\\leq s < R_{\\mathrm{grp}}$ and $0\\leq t < H$ , we have the following recursions.", + "bbox": [ + 169, + 549, + 826, + 585 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {q}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} \\left(\\phi^ {(0)}\\right) \\left[ \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} \\right] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} \\left(\\phi^ {(0)}\\right) \\left[ \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\right] + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right), \\tag {79}\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 597, + 823, + 622 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {A}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}), \\tag {80}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 625, + 823, + 657 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {M}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {81}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 660, + 823, + 679 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {B}} _ {t + 1} ^ {(s)} = \\left(\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}\\right) \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right). \\tag {82}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 683, + 823, + 702 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Moreover,", + "bbox": [ + 171, + 712, + 243, + 724 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} = \\frac {1}{K} \\hat {\\boldsymbol {A}} _ {H} ^ {(s)} + (1 - \\frac {1}{K}) \\hat {\\boldsymbol {M}} _ {H} ^ {(s)}, \\tag {83}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 737, + 823, + 766 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {M}} _ {0} ^ {(s + 1)} = \\hat {\\boldsymbol {A}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\perp} + \\mathcal {O} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right), \\tag {84}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 768, + 823, + 787 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {q}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\phi^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\phi^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {85}\n$$\n", + "text_format": "latex", + "bbox": [ + 258, + 790, + 823, + 818 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {B}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} + \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\text {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {86}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 821, + 823, + 840 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Proof. We first derive the recursion for $\\hat{\\pmb{q}}_t^{(s)}$ . Recall the update rule for $\\hat{\\pmb{\\theta}}_{k,t}^{(s)}$ :", + "bbox": [ + 171, + 868, + 673, + 888 + ], + "page_idx": 55 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {\\theta}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 898, + 645, + 921 + ], + "page_idx": 55 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 55 + }, + { + "type": "page_number", + "text": "56", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Subtracting $\\hat{\\phi}^{(s)}$ from both sides gives", + "bbox": [ + 171, + 102, + 429, + 119 + ], + "page_idx": 56 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {x}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\left(\\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {\\phi}} ^ {(s)}) \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} + \\frac {1}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\boldsymbol {\\phi}} ^ {(s)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s) \\top} ] + \\mathcal {O} (\\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3})\\right) \\\\ - \\eta z _ {k, t} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\left(\\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {\\phi}} ^ {(0)}\\right) + \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\boldsymbol {\\phi}} ^ {(0)}\\right) \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\mathcal {O} \\left(\\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| ^ {2}\\right)\\right) \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\\\ - \\frac {\\eta}{2} \\left(\\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) + \\mathcal {O} \\left(\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2}\\right)\\right) \\left[ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k t} ^ {(s) \\top} \\right] - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\mathcal {O} \\left(\\eta \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}\\right) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s) \\top} ] - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ + \\mathcal {O} (\\eta \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\eta \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} + \\eta \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}), \\tag {87} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 128, + 821, + 315 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "where the second and third equality perform Taylor expansion. Taking expectation on both sides gives", + "bbox": [ + 169, + 324, + 823, + 353 + ], + "page_idx": 56 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\pmb {q}} _ {t + 1} ^ {(s)} = (\\pmb {I} - \\eta \\pmb {H} _ {0}) \\hat {\\pmb {q}} _ {t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {q}} _ {t} ^ {(s)} ] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {t} ^ {(s)} ] \\\\ + \\mathcal {O} \\left(\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} ] + \\eta \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} ] + \\eta \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ] + \\mathbb {E} [ \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]\\right). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 363, + 803, + 415 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "By Theorem K.1, with probability at least $1 - \\eta^{100}$ , $\\hat{e}_{k,t}^{(s)} = \\mathbf{0}, \\forall k \\in [K], (s,t) \\preceq (R_{\\mathrm{grp}},0)$ . Also notice that both $\\hat{\\theta}_{k,t}^{(s)}$ and $\\phi_{\\mathrm{null}}$ belong to the bounded set $\\Gamma^{\\epsilon_2}$ . Therefore, $\\| \\hat{e}_{k,t}^{(s)} \\|_2$ is bounded and we have $\\mathbb{E}[\\| \\hat{e}_{k,t}^{(s)} \\|_2] = \\mathcal{O}(\\eta^{100})$ . Combining this with (76) to (78) yields (79).", + "bbox": [ + 169, + 426, + 823, + 484 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Secondly, we derive the recursion for $\\hat{B}_t^{(s)}$ . Multiplying both sides of (87) by $\\Delta \\hat{\\phi}^{(s)\\top}$ and taking expectation, we have", + "bbox": [ + 169, + 494, + 823, + 525 + ], + "page_idx": 56 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {B}} _ {t + 1} ^ {(s)} = (\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}) \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} + \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]).\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 534, + 782, + 555 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Still by Theorem K.1 and (76) to (78), we have (82).", + "bbox": [ + 171, + 564, + 517, + 579 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Thirdly, we derive the recursion for $\\hat{A}_t^{(s)}$ . By (87), we have", + "bbox": [ + 171, + 587, + 565, + 604 + ], + "page_idx": 56 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {A}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\mathcal {O} (\\eta^ {2} \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ]) \\\\ + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]) \\\\ = (\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}) \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 614, + 772, + 704 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "which establishes (80).", + "bbox": [ + 171, + 712, + 326, + 727 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Fourthly, we derive the recursion for $\\hat{M}_t^{(s)}$ . Multiplying both sides of (87) by $\\hat{\\pmb{x}}_{l,t + 1}^{(s)}$ and taking expectation, $l\\neq k$ , we obtain", + "bbox": [ + 169, + 734, + 823, + 767 + ], + "page_idx": 56 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {M}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} ]) \\\\ + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 777, + 759, + 821 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "By a similar argument to the proof of Lemma K.27, we have", + "bbox": [ + 171, + 830, + 571, + 845 + ], + "page_idx": 56 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, (s)} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} ] = \\bar {\\mathcal {O}} (\\eta^ {1. 5}),\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 857, + 614, + 876 + ], + "page_idx": 56 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\right] = \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 880, + 648, + 898 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "which yields (81).", + "bbox": [ + 171, + 909, + 294, + 924 + ], + "page_idx": 56 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 56 + }, + { + "type": "page_number", + "text": "57", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 56 + }, + { + "type": "text", + "text": "Now we proceed to prove (83) to (86). By definition of $\\hat{A}_{\\mathrm{avg}}^{(s)}$", + "bbox": [ + 169, + 101, + 578, + 119 + ], + "page_idx": 57 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} = \\frac {1}{K ^ {2}} \\mathbb {E} [ (\\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)}) (\\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)}) ^ {\\top} ] \\\\ = \\frac {1}{K ^ {2}} \\sum_ {k \\in [ K ]} \\mathbb {E} \\left[ \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s) \\top} \\right] + \\frac {1}{K ^ {2}} \\sum_ {k, l \\in [ K ], k \\neq l} \\mathbb {E} \\left[ \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {l, H} ^ {(s) \\top} \\right] \\\\ = \\frac {1}{K} \\hat {\\boldsymbol {A}} _ {H} ^ {(s)} + (1 - \\frac {1}{K}) \\hat {\\boldsymbol {M}} _ {H} ^ {(s)}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 281, + 133, + 714, + 244 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "which demonstrates (83). Then we derive (84). By definition of $\\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s + 1)}$", + "bbox": [ + 171, + 258, + 643, + 281 + ], + "page_idx": 57 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} = \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - \\Phi \\big (\\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\\\ = \\hat {\\phi} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} - \\left(\\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2})\\right) \\\\ = \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - \\left(\\pmb {P} _ {\\parallel} + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2})\\right) \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}) \\\\ = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} + \\mathcal {O} \\left(\\left\\| \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\right\\| _ {2} ^ {2} + \\left\\| \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\right\\| _ {2} \\left\\| \\Delta \\hat {\\phi} ^ {(s)} \\right\\| _ {2}\\right). \\tag {88} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 294, + 823, + 396 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "Hence,", + "bbox": [ + 171, + 407, + 222, + 422 + ], + "page_idx": 57 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {M}} _ {0} ^ {(s + 1)} = \\hat {\\boldsymbol {A}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s) \\top} ] \\\\ = \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} + \\mathcal {O} (\\mathbb {E} [ \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ]). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 436, + 725, + 482 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "By (76) and (78), we obtain (84). By (74),", + "bbox": [ + 171, + 494, + 452, + 511 + ], + "page_idx": 57 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} = P _ {\\|} \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} + \\mathcal {O} \\left(\\left\\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\right\\| _ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}\\right). \\tag {89}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 525, + 823, + 547 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "Combining (88) and (89) gives", + "bbox": [ + 171, + 561, + 380, + 577 + ], + "page_idx": 57 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} (\\hat {\\pmb {\\phi}} ^ {(s + 1)} - \\hat {\\pmb {\\phi}} ^ {(s)}) ^ {\\top} ] = \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 299, + 590, + 696, + 613 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 625, + 245, + 640 + ], + "page_idx": 57 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\boldsymbol {B}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s + 1) \\top} ] = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} (\\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) ^ {\\top} ] \\\\ = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} + \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 654, + 733, + 699 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "Finally, we apply Lemma K.27 to derive (85).", + "bbox": [ + 171, + 710, + 475, + 727 + ], + "page_idx": 57 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {\\pmb {q}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} ] = \\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - (\\hat {\\pmb {\\phi}} ^ {(s + 1)} - \\hat {\\pmb {\\phi}} ^ {(s)}) ] \\\\ = \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\pmb {P} _ {\\parallel} \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\pmb {P} _ {\\perp} \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 741, + 761, + 824 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "which concludes the proof.", + "bbox": [ + 171, + 838, + 354, + 853 + ], + "page_idx": 57 + }, + { + "type": "image", + "img_path": "images/d4dcbf8df32884e9d32fcdd59e3a618e31cb9e1d7d85998673d0924bf5a9fe95.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 838, + 825, + 849 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "With the assumption that the hessian at $\\hat{\\phi}^{(0)}$ is diagonal, we have the following corollary that formulates the recursions for each matrix element.", + "bbox": [ + 169, + 893, + 823, + 922 + ], + "page_idx": 57 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 57 + }, + { + "type": "page_number", + "text": "58", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "Corollary K.2. Given $\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})$ , for $0\\leq s < R_{\\mathrm{grp}}$ and $0\\leq t < H$ , we have the following elementwise recursions.", + "bbox": [ + 169, + 102, + 823, + 138 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\hat {A} _ {t, i, j} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {90}\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 152, + 825, + 184 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {M} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\hat {M} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {91}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 186, + 823, + 207 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\lambda_ {i} \\eta\\right) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right), \\tag {92}\n$$\n", + "text_format": "latex", + "bbox": [ + 253, + 210, + 823, + 231 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} = \\frac {1}{K} \\left(\\hat {A} _ {H, i, j} ^ {(s)} - \\hat {M} _ {H, i, j} ^ {(s)}\\right) + \\hat {M} _ {H, i, j} ^ {(s)}, \\tag {93}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 233, + 823, + 261 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {M} _ {0, i, j} ^ {(s + 1)} = \\hat {A} _ {0, i, j} ^ {(s + 1)} = \\left\\{ \\begin{array}{l l} \\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & 1 \\leq i \\leq m, 1 \\leq j \\leq m, \\\\ \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {94}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 263, + 823, + 303 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {0, i, j} ^ {(s + 1)} = \\left\\{ \\begin{array}{l l} \\hat {B} _ {H, i, j} ^ {(s)} + \\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & 1 \\leq i \\leq m, m < j \\leq d, \\\\ \\hat {B} _ {H, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & 1 \\leq i \\leq m, 1 \\leq j \\leq m, \\\\ \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & m < i \\leq d. \\end{array} \\right. \\tag {95}\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 306, + 823, + 362 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "Having formulated the recursions, we are ready to solve out the explicit expressions. We will split each matrix into four parts and them one by on. Specifically, a matrix $M$ can be split into $P_{\\parallel}MP_{\\parallel}$ in the tangent space of $\\Gamma$ at $\\hat{\\phi}^{(0)}, P_{\\perp}MP_{\\perp}$ in the normal space, along with $P_{\\parallel}MP_{\\perp}$ and $P_{\\perp}MP_{\\parallel}$ across both spaces.", + "bbox": [ + 169, + 382, + 823, + 444 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "We first compute the elements of $P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\perp}$ and $P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}$ .", + "bbox": [ + 171, + 449, + 588, + 470 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "Lemma K.29 (General formula for $P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\perp}$ and $P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}$ ). Let $R_0 \\coloneqq \\lceil \\frac{10}{\\lambda_m\\alpha}\\log \\frac{1}{\\eta}\\rceil$ . Then for $1\\leq i\\leq m,1\\leq j\\leq m$ and $R_0\\leq s < R_{\\mathrm{grp}}$", + "bbox": [ + 166, + 474, + 823, + 511 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 521, + 545, + 555 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 556, + 799, + 592 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "For $s < R_0$ , $\\hat{A}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)$ and $\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)$ .", + "bbox": [ + 171, + 603, + 501, + 625 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "Proof. For $1 \\leq i \\leq m, 1 \\leq j \\leq m, \\lambda_i > 0, \\lambda_j > 0$ . By (90),", + "bbox": [ + 169, + 657, + 573, + 674 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} \\\\ + \\tilde {\\mathcal {O}} (\\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\eta^ {2. 5 - 0. 5 \\beta}) \\\\ = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 686, + 771, + 808 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "where the second inequality uses $\\sum_{\\tau = 0}^{t - 1}(1 - (\\lambda_i + \\lambda_j)\\eta)^\\tau = \\frac{1 - (1 - (\\lambda_i + \\lambda_j)\\eta)^t}{(\\lambda_i + \\lambda_j)\\eta}\\leq \\frac{1}{(\\lambda_i + \\lambda_j)\\eta}$ . By (91),", + "bbox": [ + 171, + 820, + 823, + 845 + ], + "page_idx": 58 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {M} _ {t, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {M} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\eta^ {2. 5 - 0. 5 \\beta}\\right) \\\\ = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 857, + 736, + 922 + ], + "page_idx": 58 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 58 + }, + { + "type": "page_number", + "text": "59", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 58 + }, + { + "type": "text", + "text": "where the second equality uses $M_0^{(s + 1)} = A_0^{(s + 1)}$ . By (93) and (94),", + "bbox": [ + 169, + 99, + 629, + 119 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 209, + 123, + 785, + 157 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {A} _ {0, i, j} ^ {(s + 1)} = \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}) \\\\ = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 159, + 785, + 217 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Then we obtain", + "bbox": [ + 171, + 220, + 277, + 233 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {A} _ {0, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H} \\hat {A} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} \\sum_ {r = 0} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta} \\sum_ {r = R _ {0}} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 237, + 805, + 321 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Notice that $|1 - (\\lambda_i + \\lambda_j)\\eta | < 1$ and", + "bbox": [ + 171, + 325, + 419, + 342 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) ^ {H} \\leq \\exp \\left(- \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta H\\right) = \\exp \\left(- \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\alpha\\right). \\tag {96}\n$$\n", + "text_format": "latex", + "bbox": [ + 277, + 345, + 823, + 364 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 367, + 243, + 380 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {r = 0} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H} = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H}}{1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}} \\leq \\frac {1}{1 - \\exp (- (\\lambda_ {i} + \\lambda_ {j}) \\alpha)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 385, + 764, + 425 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Then we have", + "bbox": [ + 171, + 429, + 267, + 441 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {0, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H} \\hat {A} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 209, + 444, + 785, + 479 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Finally, we demonstrate that for $s \\geq R_0$ , $\\hat{A}_{0,i,j}^{(s)}$ and $\\hat{A}_{\\mathrm{avg},i,j}^{(s)}$ is approximately equal to $\\frac{\\eta}{(\\lambda_i + \\lambda_j)KB_{\\mathrm{loc}}}\\Sigma_{0,i,j}$ . By (96), when $s \\geq R_0$ , $(1 - (\\lambda_i + \\lambda_j)\\eta)^{sH} = \\mathcal{O}(\\eta^{10})$ , which gives", + "bbox": [ + 169, + 484, + 823, + 523 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 525, + 545, + 556 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\nA _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 559, + 797, + 594 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "For $s < R_0$ , since $\\hat{A}_0^{(0)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)}\\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)\\top} = \\tilde{\\mathcal{O}} (\\eta)$ , we have $\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)$ and $\\hat{A}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)$ .", + "bbox": [ + 169, + 595, + 823, + 619 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Secondly, we compute $P_{\\parallel}\\hat{A}_t^{(s)}P_\\perp$ and $P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}$", + "bbox": [ + 169, + 633, + 513, + 654 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Lemma K.30 (General formula for $P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\parallel}$ and $P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel})$ . For $1\\leq i\\leq m,m < j\\leq d,$", + "bbox": [ + 169, + 656, + 792, + 676 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {t, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 679, + 663, + 712 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 713, + 669, + 747 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Proof. Note that for $1 \\leq i \\leq m, m < j \\leq d$ and $\\lambda_i > 0, \\lambda_j = 0$ . By (90) and (94),", + "bbox": [ + 169, + 760, + 717, + 776 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}) \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 779, + 718, + 845 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "By (91) and (94), $\\hat{M}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})$ . Then,", + "bbox": [ + 171, + 849, + 491, + 869 + ], + "page_idx": 59 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 873, + 669, + 907 + ], + "page_idx": 59 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 59 + }, + { + "type": "page_number", + "text": "60", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 59 + }, + { + "type": "text", + "text": "Similar to Lemma K.30, we have the following lemma for the general formula of $P_{\\parallel} \\hat{A}_t^{(s)} P_{\\perp}$ and $P_{\\parallel} \\hat{A}_{\\mathrm{avg}}^{(s)} P_{\\perp}$ .", + "bbox": [ + 169, + 102, + 823, + 140 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Lemma K.31 (General formula for $P_{\\parallel}\\hat{A}_t^{(s)}P_\\perp$ and $P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}$ ). For $m < i \\leq d$ and $1 \\leq j \\leq m$ ,", + "bbox": [ + 169, + 143, + 818, + 165 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {t, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {j} \\eta) ^ {t}}{\\lambda_ {j} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 170, + 666, + 204 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {j} \\eta) ^ {H}}{\\lambda_ {j} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 205, + 671, + 242 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Finally, we derive the general formula for $P_{\\parallel}\\hat{A}_t^{(s)}P_{\\parallel}$ and $P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel}$ .", + "bbox": [ + 169, + 256, + 630, + 277 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Lemma K.32 (General formula for $P_{\\parallel}\\hat{A}_t^{(s)}P_{\\parallel}$ and $P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel}$ ). For $m < i \\leq d$ and $m < j \\leq d$ ,", + "bbox": [ + 169, + 280, + 813, + 301 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {H \\eta^ {2}}{K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 306, + 616, + 339 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {t, i, j} ^ {(s)} = \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {t \\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 340, + 656, + 375 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Proof. Note that for $m < i \\leq d$ , $m < j \\leq d$ and $\\lambda_i = \\lambda_j = 0$ . (90) is then simplified as", + "bbox": [ + 169, + 387, + 751, + 405 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {t + 1, i, j} ^ {(s)} = \\hat {A} _ {t, i, j} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 411, + 658, + 444 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 449, + 246, + 464 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {t, i, j} ^ {(s)} = \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {t \\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right). \\tag {97}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 470, + 823, + 503 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "According to (91), $\\hat{M}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})$ for $m < i\\leq d$ and $m < j\\leq d$ . Combining (91), (94) and (97) yields", + "bbox": [ + 169, + 511, + 823, + 542 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {H \\eta^ {2}}{K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 549, + 635, + 583 + ], + "page_idx": 60 + }, + { + "type": "image", + "img_path": "images/4f9db5562bba8d982c04450bdefd155a9b7b4bbdeb34727f4888a7895dc935c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 589, + 823, + 601 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Now, we move on to compute the general formula for $\\hat{B}_t^{(s)}$ .", + "bbox": [ + 169, + 616, + 566, + 635 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Lemma K.33 (The general formula for $P_{\\perp} \\hat{B}_t^{(s)} P_{\\parallel}$ ). Note that for $1 \\leq i \\leq m$ and $m < j \\leq d$ , when $R_0 := \\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\rceil \\leq s < R_{\\mathrm{grp}}$ ,", + "bbox": [ + 169, + 638, + 823, + 676 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t, i, j} ^ {(s)} = \\frac {(1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 683, + 637, + 715 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "For $s < R_0$ , $\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)$ .", + "bbox": [ + 171, + 722, + 357, + 744 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Proof. Note that for $1 \\leq i \\leq m$ , $\\lambda_i > 0$ . By (92),", + "bbox": [ + 169, + 757, + 501, + 773 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t + 1, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 779, + 635, + 801 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Hence,", + "bbox": [ + 171, + 806, + 223, + 820 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 827, + 630, + 849 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "According to (95),", + "bbox": [ + 171, + 854, + 297, + 869 + ], + "page_idx": 60 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s + 1)} = \\hat {B} _ {H, i, j} ^ {(s)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {H} \\hat {B} _ {0, i, j} ^ {(s)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 875, + 674, + 922 + ], + "page_idx": 60 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 60 + }, + { + "type": "page_number", + "text": "61", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 60 + }, + { + "type": "text", + "text": "Then we have", + "bbox": [ + 171, + 104, + 269, + 116 + ], + "page_idx": 61 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} \\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} + \\tilde {\\mathcal {O}} (\\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} \\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {s H}}{1 - (1 - \\lambda_ {i} \\eta) ^ {H}} \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {s H}}{1 - (1 - \\lambda_ {i} \\eta) ^ {H}} \\hat {A} _ {\\mathrm {a v g},, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 121, + 779, + 234 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "where the second equality uses (96) and the last inequality uses $\\hat{B}_0^{(0)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(0)}\\Delta \\hat{\\phi}^{(0)} = \\mathbf{0}$ . For $s\\geq R_0$ , $\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\frac{1 - (1 - \\lambda_i\\eta)^H}{\\lambda_iKB_{\\mathrm{loc}}} \\eta \\Sigma_{0,i,j} + \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})$ , which gives", + "bbox": [ + 169, + 239, + 826, + 282 + ], + "page_idx": 61 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {0, i, j} ^ {(s)} = \\frac {\\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 286, + 627, + 315 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 319, + 246, + 332 + ], + "page_idx": 61 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t, i, j} ^ {(s)} = \\frac {(1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 337, + 637, + 369 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "For $s < R_0$ , $\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)$ and therefore, $\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)$ .", + "bbox": [ + 169, + 375, + 570, + 397 + ], + "page_idx": 61 + }, + { + "type": "image", + "img_path": "images/32b84bff80b5f374e13d7d220662f423fc6d255fd2589d0d27ef5bad9a6bcc3f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 378, + 825, + 391 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Lemma K.34 (General formula for the elements of $P_{\\perp} \\hat{B}_t^{(s)} P_{\\perp}$ ). For $1 \\leq i \\leq m$ and $1 \\leq j \\leq m$ , $\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}}(\\eta^{1.5 - \\beta})$ .", + "bbox": [ + 169, + 406, + 823, + 444 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Proof. Note that for $1 \\leq i \\leq m$ , $\\lambda_i > 0$ . By (92),", + "bbox": [ + 169, + 455, + 503, + 470 + ], + "page_idx": 61 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t + 1, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 476, + 635, + 498 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Hence,", + "bbox": [ + 171, + 501, + 223, + 515 + ], + "page_idx": 61 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 520, + 630, + 542 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "By (95),", + "bbox": [ + 171, + 544, + 230, + 559 + ], + "page_idx": 61 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s + 1)} = \\hat {B} _ {H, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {H} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\tilde {\\mathcal {O}} (\\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} \\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 564, + 692, + 696 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "where the last inequality uses $\\hat{B}_0^{(0)} = 0$ .", + "bbox": [ + 169, + 703, + 441, + 722 + ], + "page_idx": 61 + }, + { + "type": "image", + "img_path": "images/52eb9e3b58cb8b8311c94d5e68838c18fdd54aa2bca9b33745bd0a2ca977a4ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 705, + 825, + 719 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Lemma K.35 (General formula for $P_{\\parallel}\\hat{B}_t^{(s)}$ ). For $m < i \\leq d$ , $\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - \\beta})$", + "bbox": [ + 169, + 729, + 718, + 750 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Proof. Note that $\\lambda_{i} = 0$ for $m < i\\leq d$ . By (92) and (95),", + "bbox": [ + 169, + 763, + 557, + 779 + ], + "page_idx": 61 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t + 1} ^ {(s)} = \\hat {B} _ {t} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}), \\quad \\hat {B} _ {0} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 782, + 665, + 804 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 808, + 246, + 821 + ], + "page_idx": 61 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {B} _ {t} ^ {(s)} = t \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) + \\hat {B} _ {0} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 827, + 638, + 848 + ], + "page_idx": 61 + }, + { + "type": "image", + "img_path": "images/b78b55b4a4a343eca118a828771d231d6eabb1f549175c74f9751c0664dd16df.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 852, + 825, + 864 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Having obtained the expressions for $\\hat{B}_t^{(s)}$ , $\\hat{A}_t^{(s)}$ and $\\hat{A}_{\\mathrm{avg}}^{(s)}$ , we now provide explicit expressions for the first and second moments of the change of manifold projection every round in the following two lemmas.", + "bbox": [ + 169, + 878, + 826, + 922 + ], + "page_idx": 61 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 61 + }, + { + "type": "page_number", + "text": "62", + "bbox": [ + 488, + 948, + 509, + 960 + ], + "page_idx": 61 + }, + { + "type": "text", + "text": "Lemma K.36. The expectation of the change of manifold projection every round is", + "bbox": [ + 169, + 103, + 720, + 119 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} \\right] = \\left\\{ \\begin{array}{l l} \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {\\Sigma} _ {0} + \\boldsymbol {\\Psi} \\left(\\hat {\\phi} ^ {(0)}\\right) \\right] + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & R _ {0} < s < R _ {\\mathrm {g r p}}, \\\\ \\tilde {\\mathcal {O}} (\\eta), & s \\leq R _ {0} \\end{array} , \\right. \\tag {98}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 125, + 823, + 167 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "where $R_0 \\coloneqq \\left\\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\right\\rceil$ .", + "bbox": [ + 171, + 172, + 354, + 193 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Proof. We first compute $\\mathbb{E}[\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)}]$ . By (72), we only need to compute $P_{\\parallel}\\hat{q}_H^{(s)}$ by relating it to these matrices. Multiplying both sides of (79) by $P_{\\parallel}$ gives", + "bbox": [ + 169, + 208, + 826, + 242 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {t + 1} ^ {(s)} = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} ] - \\frac {\\eta}{2} \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}). \\tag {99}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 248, + 825, + 273 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Similarly, according to (85), we have", + "bbox": [ + 171, + 279, + 419, + 294 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {0} ^ {(s + 1)} = - \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {100}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 301, + 825, + 330 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Combining (99) and (100) yields", + "bbox": [ + 171, + 335, + 392, + 351 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} = - \\frac {1}{2} \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\text {a v g}} ^ {(s - 1)} ] - \\frac {\\eta}{2} \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\sum_ {t = 0} ^ {H - 1} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} ] \\tag {101} \\\\ - \\eta P _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t} ^ {(s)} ] - P _ {\\|} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {B} _ {H} ^ {(s - 1)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 358, + 823, + 443 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "By Lemmas K.29, K.32 and K.30, for $s \\leq R_0 = \\left\\lfloor \\frac{10}{\\lambda \\alpha} \\log \\frac{1}{\\eta} \\right\\rfloor$ , $\\hat{\\pmb{A}}_t^{(s)} = \\tilde{\\mathcal{O}}(\\eta)$ , $\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} = \\tilde{\\mathcal{O}}(\\eta)$ and $\\hat{\\pmb{B}}_t^{(s)} = \\tilde{\\mathcal{O}}(\\eta)$ . Therefore, $\\mathbb{E}[\\hat{\\phi}^{(s+1)} - \\hat{\\phi}^{(s)}] = \\tilde{\\mathcal{O}}(\\eta)$ . For $s > R_0$ , $\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s-1)} = \\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} + \\tilde{\\mathcal{O}}(\\eta^{1.5-0.5\\beta})$ . Substituting (101) into (72) gives", + "bbox": [ + 169, + 450, + 826, + 503 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{r l} & {\\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] = \\underbrace {\\frac {1}{2} P _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {A} _ {\\mathrm {a v g}} ^ {(s)} ] + P _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {B} _ {H} ^ {(s)} ]} _ {\\mathcal {T} _ {1}}} \\\\ & {\\qquad \\overbrace {- \\eta P _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\underbrace {\\frac {1}{2} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t} ^ {(s)} + \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t} ^ {(s)} ]} _ {\\mathcal {T} _ {3}} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).} \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 508, + 754, + 641 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Below we compute $\\mathcal{T}_1$ and $\\mathcal{T}_2$ for $s > R_0$ respectively. By Lemma K.3,", + "bbox": [ + 171, + 646, + 642, + 662 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} ] = \\mathbf {0},\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 667, + 707, + 688 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ].\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 689, + 651, + 710 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "By Lemma K.4,", + "bbox": [ + 171, + 715, + 282, + 729 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} ] = \\mathbf {0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 737, + 609, + 757 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "Therefore, for $s > R_0$ ,", + "bbox": [ + 171, + 762, + 325, + 776 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] = \\frac {H \\eta^ {2}}{2 K B _ {\\mathrm {l o c}}} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) \\Phi [ \\boldsymbol {\\Sigma} _ {0, \\parallel} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 782, + 725, + 816 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "where we apply Lemma K.32. Similarly, for $s > R_0$", + "bbox": [ + 171, + 821, + 522, + 835 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] = \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {B}} _ {H} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 843, + 702, + 864 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "where we apply Lemma K.35. Hence,", + "bbox": [ + 171, + 869, + 426, + 883 + ], + "page_idx": 62 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} _ {1} = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0, \\parallel} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {102}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 891, + 825, + 922 + ], + "page_idx": 62 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 62 + }, + { + "type": "page_number", + "text": "63", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 62 + }, + { + "type": "text", + "text": "We move on to show that", + "bbox": [ + 171, + 104, + 343, + 117 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} _ {2} = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0} - \\boldsymbol {\\Sigma} _ {0, \\parallel} + (K - 1) \\boldsymbol {\\Psi} (\\hat {\\phi} ^ {(0)}) ]. \\tag {103}\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 122, + 825, + 154 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Similar to the way we compute $\\hat{A}_t^{(s)}$ , $\\hat{A}_{\\mathrm{avg}}^{(s)}$ and $\\hat{B}_t^{(s)}$ , we compute $\\mathcal{T}_2$ by splitting $\\mathcal{T}_3$ into four matrices and then substituting them into the linear operator $-\\eta P_{\\parallel}\\nabla^3\\mathcal{L}(\\hat{\\phi}^{(0)})[\\cdot ]$ one by one. First, we show that", + "bbox": [ + 169, + 161, + 826, + 205 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} - \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {P} _ {\\perp} \\mathcal {T} _ {3} \\boldsymbol {P} _ {\\perp} \\right] = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {\\Sigma} _ {0, \\perp} + (K - 1) \\psi \\left(\\boldsymbol {\\Sigma} _ {0, \\perp}\\right) \\right] \\tag {104} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 212, + 825, + 262 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "where $\\psi (\\cdot)$ is interpreted as an elementwise matrix function here. By Lemmas K.29 and K.34, for $1\\leq i\\leq m$ $1\\le j\\le m$ and $s > R_0$", + "bbox": [ + 169, + 268, + 826, + 297 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\hat {B} _ {t, i, j} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 303, + 794, + 361 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 366, + 246, + 378 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) ^ {2} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\frac {H \\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0., i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{K \\left(\\lambda_ {i} + \\lambda_ {j}\\right) B _ {\\mathrm {l o c}}} \\Sigma_ {0.., i, j} \\\\ + \\left(1 - \\frac {1}{K}\\right) \\frac {H \\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\underbrace {\\left[ 1 - \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{H \\eta (\\lambda_ {i} + \\lambda_ {j})} \\right]} _ {\\tau_ {4}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 386, + 812, + 518 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t, i, j} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 520, + 354, + 561 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Then we simplify $\\mathcal{T}_4$ . Notice that", + "bbox": [ + 171, + 566, + 395, + 583 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {i}\\right) \\eta\\right) ^ {H} = \\exp \\left(- H \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\left[ 1 + \\mathcal {O} \\left(H \\eta^ {2}\\right) \\right] \\\\ = \\exp (- H (\\lambda_ {i} + \\lambda_ {j}) \\eta) + \\mathcal {O} (\\eta). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 588, + 689, + 625 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 171, + 630, + 246, + 643 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} _ {4} = \\psi \\left(\\left(\\lambda_ {i} + \\lambda_ {j}\\right) H \\eta\\right) + \\mathcal {O} (\\eta).\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 651, + 604, + 667 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Substituting $\\mathcal{T}_4$ back into the expression for $\\sum_{t=0}^{H-1} \\hat{A}_{t,i,j}^{(s)}$ gives", + "bbox": [ + 169, + 675, + 589, + 696 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = \\frac {H \\eta}{K (\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0.., i, j} + \\left(1 - \\frac {1}{K}\\right) \\frac {H \\eta \\psi ((\\lambda_ {i} + \\lambda_ {j}) H \\eta)}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 703, + 803, + 744 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Combining the elementwise results, we obtain the following matrix form expression:", + "bbox": [ + 169, + 750, + 730, + 765 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} - \\eta \\pmb {P} _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\mathcal {T} _ {3} \\pmb {P} _ {\\perp} ] = - \\frac {H \\eta^ {2}}{2 B} \\pmb {P} _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\mathcal {V} _ {\\pmb {H} _ {0}} (\\pmb {\\Sigma} _ {0, \\perp} + (K - 1) \\psi (\\pmb {\\Sigma} _ {0, \\perp})) ] \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 770, + 784, + 821 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "By Lemma K.4, we have (104).", + "bbox": [ + 171, + 827, + 383, + 842 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "Secondly, we show that for $s > R_0$", + "bbox": [ + 169, + 849, + 411, + 863 + ], + "page_idx": 63 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} - \\eta P _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ P _ {\\perp} \\mathcal {T} _ {3} P _ {\\|} + P _ {\\|} \\mathcal {T} _ {3} P _ {\\perp} ] \\\\ = \\frac {H \\eta^ {2}}{B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel} + (K - 1) \\psi (\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {105} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 869, + 823, + 922 + ], + "page_idx": 63 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 63 + }, + { + "type": "page_number", + "text": "64", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 63 + }, + { + "type": "text", + "text": "where $\\psi (\\cdot)$ is interpreted as an elementwise matrix function here. By symmetry of $\\hat{A}_t^{(s)}$ 's and $\\nabla^3\\mathcal{L}(\\hat{\\phi}^{(0)})$", + "bbox": [ + 169, + 102, + 823, + 136 + ], + "page_idx": 64 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\perp} \\right] = \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\parallel} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 142, + 782, + 185 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Therefore, we only have to evaluate", + "bbox": [ + 171, + 188, + 410, + 203 + ], + "page_idx": 64 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} (\\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\hat {\\boldsymbol {B}} _ {t} ^ {(s)}) \\boldsymbol {P} _ {\\parallel} + \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\perp} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 210, + 699, + 252 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "To compute the elements of $\\sum_{t=0}^{H-1} P_{\\perp} (\\hat{A}_t^{(s)} + \\hat{B}_t^{(s)}) P_{\\parallel}$ , we combine Lemmas K.30 and K.33 to obtain that for $1 \\leq i \\leq m$ and $m < j \\leq d$ ,", + "bbox": [ + 169, + 258, + 823, + 291 + ], + "page_idx": 64 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = \\sum_ {t = 0} ^ {H - 1} \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} ^ {2} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\left(1 - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} H \\eta}\\right) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\psi (\\lambda_ {i} H \\eta) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 297, + 714, + 444 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 171, + 449, + 200, + 462 + ], + "page_idx": 64 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t, i, j} ^ {(s)} = \\sum_ {t = 0} ^ {H - 1} \\frac {\\left(1 - \\lambda_ {i} \\eta\\right) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} ^ {2} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\left(1 - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} H \\eta}\\right) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\psi (\\lambda_ {i} H \\eta) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 467, + 782, + 614 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Therefore, the matrix form of $\\sum_{t=0}^{H-1} P_{\\perp} (\\hat{A}_t^{(s)} + \\hat{B}_t^{(s)}) P_{\\parallel}$ is", + "bbox": [ + 171, + 619, + 576, + 641 + ], + "page_idx": 64 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} (\\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\hat {\\boldsymbol {B}} _ {t} ^ {(s)}) \\boldsymbol {P} _ {\\parallel} = \\frac {H \\eta}{B} \\mathcal {V} _ {\\boldsymbol {H} _ {0}} \\left(\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel} + (K - 1) \\psi (\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel})\\right) + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}),\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 647, + 776, + 686 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "where $\\psi (\\cdot)$ is interpreted as an elementwise matrix function here. Furthermore, by Lemma K.35, $\\sum_{t = 0}^{H - 1}\\hat{B}_t^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{0.5 - \\beta})$ . Applying Lemma K.3, we have (105). Finally, directly applying Lemma K.5, we have", + "bbox": [ + 169, + 694, + 823, + 739 + ], + "page_idx": 64 + }, + { + "type": "equation", + "text": "\n$$\n- \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {P} _ {\\parallel} \\mathcal {T} _ {3} \\boldsymbol {P} _ {\\parallel} \\right] = \\boldsymbol {0}. \\tag {106}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 746, + 823, + 766 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Notice that $\\psi(\\Sigma_{0,||}) = 0$ where $\\psi(\\cdot)$ operates on each element. Combining (104), (105) and (106), we obtain (103). By (102) and (103), we have (98).", + "bbox": [ + 169, + 771, + 823, + 801 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Lemma K.37. The second moment of the change of manifold projection every round is", + "bbox": [ + 169, + 808, + 746, + 823 + ], + "page_idx": 64 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ (\\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) (\\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) ^ {\\top} ] = \\left\\{ \\begin{array}{l l} \\frac {H \\eta^ {2}}{B} \\pmb {\\Sigma} _ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & R _ {0} \\leq s < R _ {\\mathrm {g r p}} \\\\ \\tilde {\\mathcal {O}} (\\eta), & s < R _ {0} \\end{array} \\right.,\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 830, + 790, + 869 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "where $R_0 \\coloneqq \\left\\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\right\\rceil$ .", + "bbox": [ + 171, + 875, + 352, + 896 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "Proof. Directly apply Lemma K.32 and Lemma K.27 and we have the lemma.", + "bbox": [ + 171, + 909, + 687, + 924 + ], + "page_idx": 64 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 64 + }, + { + "type": "page_number", + "text": "65", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 64 + }, + { + "type": "text", + "text": "With Lemmas K.36 and K.37, we are ready to prove Theorem K.3.", + "bbox": [ + 171, + 103, + 612, + 119 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Proof of Theorem K.3. We first derive $\\mathbb{E}[\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}]$ . Recall that $R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}} \\right\\rfloor = \\frac{1}{H\\eta^{1 + \\beta}} + o(1)$ where $0 < \\beta < 0.5$ . By Lemma K.36,", + "bbox": [ + 169, + 133, + 823, + 167 + ], + "page_idx": 65 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} [ \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(0)} ] = \\sum_ {s = 0} ^ {R _ {0}} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] + \\sum_ {s = R _ {0} + 1} ^ {R _ {\\mathrm {g r p}} - 1} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] \\\\ = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {\\Sigma} _ {0} + \\pmb {\\Psi} (\\hat {\\phi} ^ {(0)}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 172, + 756, + 252 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Then we compute $\\mathbb{E}[\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})^{\\top}}]$ .", + "bbox": [ + 171, + 257, + 457, + 273 + ], + "page_idx": 65 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} \\left[ \\left(\\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)})\\right) \\left(\\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)})\\right) ^ {\\top} \\right] \\\\ = \\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} \\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ^ {\\top} ] + \\sum_ {s \\neq s ^ {\\prime}} \\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ] \\mathbb {E} [ (\\hat {\\phi} ^ {(s ^ {\\prime} + 1)} - \\hat {\\phi} ^ {(s ^ {\\prime})}) ^ {\\top} ] \\\\ = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta) + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 1. 5 \\beta}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 281, + 818, + 417 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "where the last inequality uses $\\mathbb{E}[(\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)})]\\mathbb{E}[(\\hat{\\phi}^{(s' + 1)} - \\hat{\\phi}^{(s')})^\\top ] = \\tilde{\\mathcal{O}} (\\eta^2)$", + "bbox": [ + 171, + 422, + 712, + 441 + ], + "page_idx": 65 + }, + { + "type": "image", + "img_path": "images/78b29f9c05159682ca3d886a219bb7e9cb859ab59189ae22eecd9c91e12a2eba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 425, + 825, + 436 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "K.10 PROOF OF WEAK APPROXIMATION", + "text_level": 1, + "bbox": [ + 171, + 455, + 470, + 469 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "We are now in a position to utilize the estimate of moments obtained in previous subsections to prove the closeness of the sequence $\\{\\phi^{(s)}\\}_{s = 0}^{\\lfloor T / (H\\eta^2)\\rfloor}$ and the SDE solution $\\{\\zeta :t\\in [0,T]\\}$ in the sense of weak approximation. Recall the SDE that we expect the manifold projection $\\{\\Phi (\\bar{\\theta}^{(s)})\\}_{s = 0}^{\\lfloor T / (H\\eta^2)\\rfloor}$ to track:", + "bbox": [ + 169, + 482, + 826, + 547 + ], + "page_idx": 65 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}}} _ {\\text {(c) d r i f t - I I}} - \\underbrace {- \\frac {K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(d) d r i f t - I I}}\\right), \\tag {107}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 551, + 825, + 595 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "According to Lemma K.3 and Lemma K.4, the drift term in total can be written as the following form:", + "bbox": [ + 169, + 601, + 823, + 628 + ], + "page_idx": 65 + }, + { + "type": "equation", + "text": "\n$$\n(\\mathbf {b}) + (\\mathbf {c}) = \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ].\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 633, + 658, + 662 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Then by definition of $P_{\\zeta}$ , (107) is equivalent to the following SDE:", + "bbox": [ + 171, + 666, + 612, + 681 + ], + "page_idx": 65 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} + \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t. \\tag {108}\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 686, + 825, + 720 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Therefore, we only have to show that $\\phi^{(s)}$ closely tracks $\\{\\zeta(t)\\}$ satisfying Equation (108). By Lemma K.11, there exists an $\\epsilon_3$ neighborhood of $\\Gamma$ , $\\Gamma^{\\epsilon_3}$ , where $\\Phi(\\cdot)$ is $\\mathcal{C}^\\infty$ -smooth. Due to compactness of $\\Gamma$ , $\\Gamma^{\\epsilon_3}$ is bounded and the mappings $\\partial^2\\Phi(\\cdot)$ , $\\partial\\Phi(\\cdot)$ , $\\Sigma^{1/2}(\\cdot)$ , $\\Sigma(\\cdot)$ and $\\Psi(\\cdot)$ are all Lipschitz in $\\Gamma^{\\epsilon_3}$ . By Kirschbraun theorem, both the drift and diffusion term of (108) can be extended as Lipschitz functions on $\\mathbb{R}^d$ . Therefore, the solution to the extended SDE exists and is unique. We further show that the solution, if initialized as a point on $\\Gamma$ , always stays on the manifold almost surely.", + "bbox": [ + 169, + 726, + 826, + 827 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "As a preparation, we first show that $\\Gamma$ has no boundary.", + "bbox": [ + 171, + 833, + 537, + 848 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Lemma K.38. Under Assumptions 3.1 to 3.3, $\\Gamma$ has no boundary.", + "bbox": [ + 171, + 851, + 607, + 867 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Proof. We prove by contradiction. If $\\Gamma$ has boundary $\\partial \\Gamma$ , WLOG, for a point $\\pmb{p} \\in \\partial \\Gamma$ , let the Hessian at $\\pmb{p}$ be diagonal with the form $\\nabla^2 \\mathcal{L}(\\pmb{p}) = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_d)$ where $\\lambda_i > 0$ for $1 \\leq i \\leq m$ and $\\lambda_i = 0$ for $m < i \\leq d$ .", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 65 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 65 + }, + { + "type": "page_number", + "text": "66", + "bbox": [ + 488, + 948, + 509, + 960 + ], + "page_idx": 65 + }, + { + "type": "text", + "text": "Denote by $\\pmb{x}_{i:j} := (x_i, x_{i+1}, \\dots, x_j)$ ( $i \\leq j$ ) the $(j - i + 1)$ -dimensional vector formed by the $i$ -th to $j$ -th coordinates of $\\pmb{x}$ . Since $\\frac{\\partial(\\nabla\\mathcal{L}(\\pmb{p}))}{\\partial\\pmb{p}_{1:m}} = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_m)$ is invertible, by the implicit function theorem, there exists an open neighborhood $V$ of $\\pmb{p}_{m+1:d}$ such that $\\nabla\\mathcal{L}(\\pmb{v}) = \\mathbf{0}$ , $\\forall \\pmb{v} \\in V$ . Then, $\\mathcal{L}(\\pmb{v}) = \\mathcal{L}(\\pmb{p}) = \\min_{\\pmb{\\theta} \\in U} \\mathcal{L}(\\pmb{\\theta})$ and hence $V \\subset \\Gamma$ , which contradicts with $\\pmb{p} \\in \\partial \\Gamma$ .", + "bbox": [ + 169, + 103, + 823, + 167 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Therefore, $\\Gamma$ is a closed manifold (i.e., compact and without boundary). Then we have the following lemma stating that $\\Gamma$ is invariant for (108).", + "bbox": [ + 169, + 181, + 823, + 210 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Lemma K.39. Let $\\zeta(t)$ be the solution to (108) with $\\zeta(0) \\in \\Gamma$ , then $\\zeta(t) \\in \\Gamma$ for all $t \\geq 0$ . In other words, $\\Gamma$ is invariant for (108).", + "bbox": [ + 169, + 212, + 825, + 242 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Proof. According to Filipovic (2000) and Du & Duan (2007), for a closed manifold $\\mathcal{M}$ to be viable for the SDE $\\mathrm{d}\\pmb {X}(t) = F(\\pmb {X}(t))\\mathrm{d}t + \\pmb {B}(\\pmb {X}(t))\\mathrm{d}\\pmb{W}_t$ where $F:\\mathbb{R}^d\\to \\mathbb{R}^d$ and $\\pmb {B}:\\mathbb{R}^d\\rightarrow \\mathbb{R}^d$ are locally Lipschitz, we only have to verify the following Nagumo type consistency condition:", + "bbox": [ + 169, + 256, + 823, + 301 + ], + "page_idx": 66 + }, + { + "type": "equation", + "text": "\n$$\n\\mu (\\pmb {x}) := F (\\pmb {x}) - \\frac {1}{2} \\sum_ {j} \\mathrm {D} [ B _ {j} (\\pmb {x}) ] B _ {j} (\\pmb {x}) \\in T _ {\\pmb {x}} (\\mathcal {M}), \\quad B _ {j} (\\pmb {x}) \\in T _ {\\pmb {x}} (\\mathcal {M}),\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 303, + 740, + 339 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "where $\\mathrm{D}[\\cdot ]$ is the Jacobian operator and $B_{j}(\\pmb {x})$ denotes the $j$ -th column of $\\pmb {B}(\\pmb {x})$ .", + "bbox": [ + 169, + 344, + 702, + 361 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "In our context, since for $\\phi \\in \\Gamma$ , $\\partial \\Phi(\\phi)$ is a projection matrix onto $T_{\\phi}(\\Gamma)$ , each column of $\\partial \\Phi(\\phi)\\Sigma^{1/2}(\\phi)$ belongs to $T_{\\phi}(\\Gamma)$ , verifying the second condition. Denote by $P_{\\perp}(\\phi) := I_d - \\partial \\Phi(\\phi)$ the projection onto the normal space of $\\Gamma$ at $\\phi$ . To verify the first condition, it suffices to show that $P_{\\perp}(\\phi)\\mu(\\phi) = 0$ . We evaluate $\\sum_{j} P_{\\perp}(\\phi)\\mathrm{D}[B_j(\\phi)]B_j(\\phi)$ as follows.", + "bbox": [ + 169, + 364, + 825, + 426 + ], + "page_idx": 66 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sum_ {j} \\boldsymbol {P} _ {\\perp} (\\phi) \\mathrm {D} [ B _ {j} (\\phi) ] B _ {j} (\\phi) = \\frac {1}{B} \\sum_ {j} \\mathrm {D} [ \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) ] \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) \\\\ = \\frac {1}{B} P _ {\\perp} (\\phi) \\sum_ {j} \\partial^ {2} \\Phi (\\phi) [ \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi), \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) ] \\\\ = - \\frac {1}{B} \\nabla^ {2} \\mathcal {L} (\\phi) ^ {+} \\nabla^ {3} \\mathcal {L} (\\phi) [ \\boldsymbol {\\Sigma} _ {\\parallel} (\\phi) ], \\tag {109} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 431, + 823, + 539 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "where the last inequality uses Lemma K.3. Again applying Lemma K.3, we have", + "bbox": [ + 171, + 541, + 702, + 556 + ], + "page_idx": 66 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {P} _ {\\perp} (\\phi) F (\\phi) = - \\frac {1}{2 B} \\nabla^ {2} \\mathcal {L} (\\phi) ^ {+} \\nabla^ {3} \\mathcal {L} (\\phi) [ \\boldsymbol {\\Sigma} _ {\\parallel} (\\phi) ]. \\tag {110}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 560, + 823, + 589 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Combining (109) and (110), we can verify the first condition.", + "bbox": [ + 169, + 590, + 575, + 606 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "In order to establish Theorem 3.2, it suffices to prove the following theorem, which captures the closeness of $\\phi^{(s)}$ and $\\zeta(t)$ every $R_{\\mathrm{grp}}$ rounds.", + "bbox": [ + 169, + 621, + 823, + 652 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "Theorem K.4. If $\\| \\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})$ and $\\zeta (0) = \\phi^{(0)}\\in \\Gamma$ , then for $R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{0.75}}\\right\\rfloor$ every test function $g\\in \\mathcal{C}^3$ ,", + "bbox": [ + 171, + 656, + 823, + 698 + ], + "page_idx": 66 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} g (\\boldsymbol {\\phi} ^ {(n R _ {\\mathrm {g r p}})}) - \\mathbb {E} g (\\boldsymbol {\\zeta} (n \\eta^ {0. 7 5})) \\right| \\leq C _ {g} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b},\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 702, + 727, + 732 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "where $C_g > 0$ is a constant independent of $\\eta$ but can depend on $g(\\cdot)$ and $b > 0$ is a constant independent of $\\eta$ and $g(\\cdot)$ .", + "bbox": [ + 169, + 734, + 823, + 766 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "K.10.1 PRELIMINARIES AND ADDITIONAL NOTATIONS", + "text_level": 1, + "bbox": [ + 171, + 779, + 563, + 792 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "We first introduce a general formulation for stochastic gradient algorithms (SGAs) and then specify the components of this formulation in our context. Consider the following SGA:", + "bbox": [ + 169, + 803, + 823, + 832 + ], + "page_idx": 66 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {n + 1} = \\boldsymbol {x} _ {n} + \\eta_ {\\mathrm {e}} \\boldsymbol {h} (\\boldsymbol {x} _ {n}, \\boldsymbol {\\xi} _ {n}),\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 835, + 593, + 852 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "where $\\pmb{x}_n \\in \\mathbb{R}^d$ is the parameter, $\\eta_{\\mathrm{e}}$ is the learning rate, $h(\\cdot, \\cdot)$ is the update which depends on $\\pmb{x}_n$ and a random vector $\\pmb{\\xi}_n$ sampled from some distribution $\\Xi(\\pmb{x}_n)$ . Also, consider the following Stochastic Differential Equation (SDE).", + "bbox": [ + 169, + 857, + 825, + 901 + ], + "page_idx": 66 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {X} (t) = \\boldsymbol {b} (\\boldsymbol {X} (t)) \\mathrm {d} t + \\boldsymbol {\\sigma} (\\boldsymbol {X} (t)) \\mathrm {d} \\boldsymbol {W} _ {t},\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 904, + 627, + 921 + ], + "page_idx": 66 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 66 + }, + { + "type": "page_number", + "text": "67", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 66 + }, + { + "type": "text", + "text": "where $\\pmb {b}(\\cdot):\\mathbb{R}^d\\to \\mathbb{R}^d$ is the drift function and $\\sigma (\\cdot):\\mathbb{R}^{d\\times d}\\rightarrow \\mathbb{R}^{d\\times d}$ is the diffusion matrix.", + "bbox": [ + 169, + 102, + 777, + 119 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Denote by $\\mathcal{P}_X(\\pmb {x},s,t)$ the distribution of $X(t)$ with the initial condition $X(s) = x$ .Define", + "bbox": [ + 169, + 125, + 771, + 140 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {\\Delta}} (\\boldsymbol {x}, n) := \\boldsymbol {X} _ {(n + 1) \\eta_ {\\mathrm {e}}} - \\boldsymbol {x}, \\quad \\text {w h e r e} \\boldsymbol {X} _ {(n + 1) \\eta_ {\\mathrm {e}}} \\sim \\mathcal {P} _ {\\boldsymbol {X}} (\\boldsymbol {x}, n \\eta_ {\\mathrm {e}}, (n + 1) \\eta_ {\\mathrm {e}}),\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 147, + 767, + 166 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "which characterizes the update in one step.", + "bbox": [ + 169, + 171, + 455, + 186 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "In our context, we view the change of manifold projection over $R_{\\mathrm{grp}} \\coloneqq \\left\\lfloor \\frac{1}{\\alpha\\eta^{1 - \\beta}} \\right\\rfloor (\\beta \\in (0, 0.5))$ rounds as one \"giant step\". Hence the $\\phi^{(nR_{\\mathrm{grp}})}$ corresponds to the discrete time random variable $x_{n}$ corresponds to and $\\zeta(t)$ corresponds to the continuous time random variable $X_{t}$ . According to Theorem K.2, we set", + "bbox": [ + 169, + 191, + 823, + 253 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\n\\eta_ {\\mathrm {e}} = \\eta^ {1 - \\beta}, \\quad \\boldsymbol {b} (\\boldsymbol {\\zeta}) = \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) \\left[ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) \\right], \\quad \\boldsymbol {\\sigma} (\\boldsymbol {\\zeta}) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 258, + 799, + 290 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Due to compactness of $\\Gamma$ , $b(\\cdot)$ and $\\sigma(\\cdot)$ are Lipschitz on $\\Gamma$ .", + "bbox": [ + 169, + 296, + 563, + 313 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "As for the update in one step, $\\tilde{\\Delta} (\\cdot ,\\cdot)$ is defined in our context as:", + "bbox": [ + 169, + 319, + 599, + 335 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\Delta} (\\phi , n) := \\zeta_ {(n + 1) \\eta_ {\\mathrm {e}}} - \\phi , \\qquad \\text {w h e r e} \\zeta_ {(n + 1) \\eta_ {\\mathrm {e}}} \\sim \\mathcal {P} _ {\\zeta} (\\phi , n \\eta_ {\\mathrm {e}}, (n + 1) \\eta_ {\\mathrm {e}}) \\text {a n d} \\phi \\in \\Gamma .\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 342, + 785, + 362 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "For convenience, we further define", + "bbox": [ + 169, + 366, + 403, + 380 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Delta} ^ {(n)} := \\hat {\\phi} ^ {((n + 1) R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(n R _ {\\mathrm {g r p}})}, \\qquad \\qquad \\tilde {\\boldsymbol {\\Delta}} ^ {(n)} := \\tilde {\\boldsymbol {\\Delta}} (\\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})}, n),\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 385, + 740, + 405 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {b} ^ {(n)} := \\boldsymbol {b} (\\hat {\\boldsymbol {\\phi}} ^ {(n R _ {\\mathrm {g r p}})}), \\qquad \\qquad \\boldsymbol {\\sigma} ^ {(n)} := \\boldsymbol {\\sigma} (\\hat {\\boldsymbol {\\phi}} ^ {(n R _ {\\mathrm {g r p}})}).\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 407, + 720, + 426 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "We use $C_{g,i}$ to denote constants that can depend on the test function $g$ and independent of $\\eta_{\\mathrm{e}}$ . The following lemma relates the moments of $\\tilde{\\Delta}(\\phi, n)$ to $b(\\phi)$ and $\\sigma(\\phi)$ .", + "bbox": [ + 169, + 431, + 823, + 464 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Lemma K.40. There exists a positive constant $C_0$ independent of $\\eta_{\\mathrm{e}}$ and $g$ such that for all $\\phi \\in \\Gamma$", + "bbox": [ + 169, + 467, + 821, + 483 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\n| \\mathbb {E} [ \\tilde {\\Delta} _ {i} (\\phi , n) ] - \\eta_ {\\mathrm {e}} b _ {i} (\\phi) | \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {2}, \\quad \\forall 1 \\leq i \\leq d,\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 488, + 779, + 508 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\n| \\mathbb {E} [ \\tilde {\\Delta} _ {i} (\\phi , n) \\tilde {\\Delta} _ {j} (\\pmb {x}, n) ] - \\eta_ {\\mathrm {e}} \\sum_ {l = 1} ^ {d} \\sigma_ {i, l} (\\phi) \\sigma_ {l, j} (\\phi) | \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {2}, \\quad \\forall 1 \\leq i, j \\leq d,\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 511, + 779, + 551 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\tilde {\\Delta} _ {i _ {s}} (\\phi , n) \\right| \\right] \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {3}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d.\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 554, + 777, + 595 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "The lemma below states that the expectation of the test function is smooth with respect to the initial value.", + "bbox": [ + 169, + 601, + 823, + 628 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Proof. Noticing that (i) the solution to (108) always stays on $\\Gamma$ almost surely if its initial value $\\zeta(0)$ belongs to $\\Gamma$ , (ii) $b(\\cdot)$ and $\\sigma(\\cdot)$ are $\\mathcal{C}^\\infty$ and (iii) $\\Gamma$ is compact, we can directly apply Lemma B.3 in Malladi et al. (2022) and Lemma 26 in Li et al. (2019a) to obtain the above lemma.", + "bbox": [ + 169, + 645, + 825, + 686 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "The following lemma states that the expectation of $g(\\zeta(t))$ for $g \\in \\mathcal{C}^3$ is smooth with respect to the initial value of the SDE solution.", + "bbox": [ + 169, + 702, + 823, + 729 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Lemma K.41. Let $s \\in [0, T]$ , $\\phi \\in \\Gamma$ and $g \\in \\mathcal{C}^3$ . For $t \\in [s, T]$ , define", + "bbox": [ + 169, + 734, + 640, + 750 + ], + "page_idx": 67 + }, + { + "type": "equation", + "text": "\n$$\nu (\\phi , s, t) := \\mathbb {E} _ {\\zeta_ {t} \\sim \\mathcal {P} _ {\\zeta} (\\phi , s, t)} [ g (\\zeta_ {t}) ].\n$$\n", + "text_format": "latex", + "bbox": [ + 382, + 756, + 611, + 773 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Then $u(\\cdot ,s,t)\\in \\mathcal{C}^3$ uniformly in $s,t$", + "bbox": [ + 169, + 781, + 416, + 796 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Proof. A slight modification of Lemma B.4 in Malladi et al. (2022) will give the above lemma.", + "bbox": [ + 169, + 811, + 795, + 825 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "K.10.2 PROOF OF THE APPROXIMATION IN OUR CONTEXT", + "text_level": 1, + "bbox": [ + 169, + 840, + 586, + 854 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "For $\\beta \\in (0, 0.5)$ , define $\\gamma_1 \\coloneqq \\frac{1.5 - 2\\beta}{1 - \\beta}$ , $\\gamma_2 \\coloneqq \\frac{1}{1 - \\beta}$ , and then $1 < \\gamma_1 < 1.5$ , $1 < \\gamma_2 < 2$ . We introduce the following lemma which serves as a key step to control the approximation error. Specifically, this lemma bounds the difference in one step change between the discrete process and the continuous one as well as the product of higher orders.", + "bbox": [ + 169, + 864, + 823, + 924 + ], + "page_idx": 67 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 67 + }, + { + "type": "page_number", + "text": "68", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 67 + }, + { + "type": "text", + "text": "Lemma K.42. If $\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})$ , then there exist positive constants $C_1$ and $b$ independent of $\\eta_{\\mathrm{e}}$ and $g$ such that for all $0\\leq n < \\lfloor T / \\eta_{\\mathrm{e}}\\rfloor$", + "bbox": [ + 169, + 103, + 826, + 142 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "1.", + "bbox": [ + 179, + 148, + 196, + 162 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} - \\tilde {\\Delta} _ {i} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\quad \\forall 1 \\leq i \\leq d, \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 162, + 815, + 186 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n| \\mathbb {E} [ \\Delta_ {i} ^ {(n)} \\Delta_ {j} ^ {(n)} - \\tilde {\\Delta} _ {i} ^ {(n)} \\tilde {\\Delta} _ {j} ^ {(n)} | \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} | \\leq C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b} + C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b}, \\forall 1 \\leq i, j \\leq d.\n$$\n", + "text_format": "latex", + "bbox": [ + 209, + 189, + 815, + 212 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "2.", + "bbox": [ + 179, + 220, + 196, + 234 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d,\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 234, + 767, + 277 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\tilde {\\Delta} _ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d.\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 280, + 767, + 321 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Proof. According to Appendix K.7, we have", + "bbox": [ + 171, + 335, + 470, + 349 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] = \\tilde {\\mathcal {O}} (\\eta^ {3 - 3 \\beta}).\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 352, + 633, + 393 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Since $\\gamma_{1} < 1.5$ and $\\gamma_{2} < 2$ , we can utilize Theorem K.3 and conclude that there exist positive constants $C_2$ and $b$ independent of $\\eta_{\\mathrm{e}}$ and $g$ such that", + "bbox": [ + 169, + 397, + 823, + 425 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} - \\eta_ {\\mathrm {e}} b _ {i} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| \\leq C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\forall 1 \\leq i \\leq d, \\tag {111}\n$$\n", + "text_format": "latex", + "bbox": [ + 277, + 428, + 823, + 476 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} \\Delta_ {j} ^ {(n)} - \\eta_ {\\mathrm {e}} \\sum_ {l = 1} ^ {d} \\sigma_ {i, l} ^ {(n)} \\sigma_ {l, j} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| \\leq C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\forall 1 \\leq i, j \\leq d, \\tag {112}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 479, + 823, + 536 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {2} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d. \\tag {113}\n$$\n", + "text_format": "latex", + "bbox": [ + 299, + 539, + 825, + 580 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Combining (111) - (113) with Lemma K.40 gives the above lemma.", + "bbox": [ + 169, + 583, + 619, + 598 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Lemma K.43. For a test function $g \\in \\mathcal{C}^3$ , let $u_{l,n}(\\phi) \\coloneqq u(\\phi, l\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}}) = \\mathbb{E}_{\\zeta_t \\sim \\mathcal{P}_{\\zeta}(\\phi, l\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}})}[g(\\zeta_t)]$ . If $\\|\\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta \\log \\frac{1}{\\eta}})$ , then for all $0 \\leq l \\leq n-1$ and $1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor$ ,", + "bbox": [ + 169, + 604, + 823, + 646 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\mathbb {E} [ u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\pmb {\\Delta} ^ {(l)}) - u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\pmb {\\Delta}} ^ {(l + 1)}) \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\leq C _ {g, 1} (\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}) \\log (\\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b},\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 648, + 823, + 675 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "where $C_{g,1}$ is a positive constant independent of $\\eta$ and $\\hat{\\phi}^{(lR_{\\mathrm{grp}})}$ but can depend on $g$ .", + "bbox": [ + 171, + 680, + 730, + 698 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "Proof. By Lemma K.41, $u_{l,n}(\\phi) \\in \\mathcal{C}^3$ for all $l$ and $n$ . That is, there exists $K(\\cdot) \\in G$ such that for all $l, n, u_{l,n}(\\phi)$ and its partial derivatives up to the third order are bounded by $K(\\phi)$ .", + "bbox": [ + 169, + 710, + 826, + 742 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "By the law of total expectation and triangle inequality,", + "bbox": [ + 171, + 747, + 532, + 763 + ], + "page_idx": 68 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left| \\mathbb {E} [ u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\pmb {\\Delta} ^ {(l)}) - u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\pmb {\\Delta}} ^ {(l)}) ] \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} \\right| \\\\ \\leq \\underbrace {\\left| \\mathbb {E} \\left[ u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\boldsymbol {\\Delta} ^ {(l)}\\right) - u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\boldsymbol {\\Delta}} ^ {(l)}\\right) \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} , \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right] \\right|} _ {\\mathcal {A} _ {1}} \\\\ + \\underbrace {\\eta^ {1 0 0} \\mathbb {E} \\left[ \\left| u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\boldsymbol {\\Delta} ^ {(l)}\\right) \\right| \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\bar {\\mathcal {E}} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right]} _ {\\mathcal {A} _ {2}} \\\\ + \\underbrace {\\eta^ {1 0 0} \\mathbb {E} [ | u _ {l + 1 , n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\Delta} ^ {(l)}) | | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} , \\bar {\\mathcal {E}} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ]} _ {\\mathcal {A} _ {3}}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 243, + 765, + 759, + 921 + ], + "page_idx": 68 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 68 + }, + { + "type": "page_number", + "text": "69", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 68 + }, + { + "type": "text", + "text": "We first bound $\\mathcal{A}_2$ and $\\mathcal{A}_3$ . Since $\\hat{\\phi}^{(lR_{\\mathrm{grp}})} \\in \\Gamma$ , both $\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\pmb{\\Delta}^{(l)}$ and $\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\tilde{\\Delta}^{(l)}$ belong to $\\Gamma$ . Due to compactness of $\\Gamma$ and smoothness of $u_{l+1,n}(\\cdot)$ on $\\Gamma$ , there exist a positive constant $C_{g,2}$ such that $\\mathcal{A}_2 + \\mathcal{A}_3 \\leq C_{g,2}\\eta^{100}$ .", + "bbox": [ + 169, + 101, + 823, + 148 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "We proceed to bound $\\mathcal{A}_1$ . Expanding $u_{l + 1,n}(\\cdot)$ at $\\hat{\\phi}^{(lR_{\\mathrm{grp}})}$ and by triangle inequality,", + "bbox": [ + 171, + 155, + 732, + 172 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {A} _ {1} ^ {(s)} \\leq \\underbrace {\\sum_ {i = 1} ^ {d} \\left| \\mathbb {E} \\big [ \\frac {\\partial u _ {l + 1 , n}}{\\partial \\phi_ {i}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}) \\left(\\Delta_ {i} ^ {(l)} - \\tilde {\\Delta} _ {i} ^ {(l)}\\right) | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right|} _ {\\mathcal {B} _ {1}} \\\\ + \\underbrace {\\frac {1}{2} \\sum_ {1 \\leq i , j \\leq d} \\left| \\mathbb {E} [ \\frac {\\partial^ {2} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}) (\\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} - \\tilde {\\Delta} _ {i} ^ {(l)} \\tilde {\\Delta} _ {j} ^ {(l)}) | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right|} _ {\\mathcal {B} _ {2}} \\\\ + | \\mathcal {R} | + | \\tilde {\\mathcal {R}} |, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 176, + 789, + 318 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "where the remainders $\\mathcal{R}$ and $\\tilde{\\mathcal{R}}$ are", + "bbox": [ + 171, + 321, + 405, + 337 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} = \\frac {1}{6} \\sum_ {1 \\leq i, j, p \\leq d} \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\boldsymbol {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ],\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 338, + 750, + 378 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\mathcal {R}} = \\frac {1}{6} \\sum_ {1 \\leq i, j, p \\leq d} \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\theta} \\tilde {\\Delta} ^ {(l)}) \\tilde {\\Delta} _ {i} ^ {(l)} \\tilde {\\Delta} _ {j} ^ {(l)} \\tilde {\\Delta} _ {p} ^ {(l)} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ],\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 380, + 777, + 419 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "for some $\\theta, \\tilde{\\theta} \\in (0,1)$ . Since $\\hat{\\phi}^{(LR_{\\mathrm{grp}})}$ belongs to $\\Gamma$ which is compact, there exists a constant $C_{g,3}$ such that for all $1 \\leq i,j \\leq d, 0 \\leq l \\leq n-1, 1 \\leq n \\leq \\lfloor T/\\eta_{\\mathrm{e}} \\rfloor$ ,", + "bbox": [ + 169, + 422, + 823, + 455 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n| \\frac {\\partial u _ {l + 1 , n}}{\\partial \\phi_ {i}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}) | \\leq C _ {g, 3}, \\qquad | \\frac {\\partial^ {2} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}) | \\leq C _ {g, 3}.\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 457, + 709, + 491 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "By Lemma K.42,", + "bbox": [ + 171, + 492, + 290, + 507 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {B} _ {1} \\leq d C _ {g, 3} C _ {1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\quad \\mathcal {B} _ {2} \\leq \\frac {d ^ {2}}{2} C _ {g, 3} C _ {1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 508, + 761, + 540 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Now we bound the remainders. By Cauchy-Schwartz inequality,", + "bbox": [ + 171, + 541, + 596, + 556 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left| \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\pmb {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)} \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\\\ \\leq \\left(\\mathbb {E} \\left[ \\left(\\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\boldsymbol {\\Delta} ^ {(l)})\\right) ^ {2} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right]\\right) ^ {1 / 2} \\times \\\\ \\left(\\mathbb {E} [ (\\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)}) ^ {2} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} ]\\right) ^ {1 / 2}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 558, + 741, + 670 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Since $\\hat{\\phi}^{(lR_{\\mathrm{grp}})}$ and $\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\Delta^{(l)}$ both belong to $\\Gamma$ which is compact, there exists a constant $C_{g,4}$ such that for all $1 \\leq i, j, p \\leq d, 0 \\leq l \\leq n - 1$ and $1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor$ ,", + "bbox": [ + 169, + 680, + 823, + 712 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\right.\\left(\\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\Delta^ {(l)}\\right)\\right) ^ {2} \\leq C _ {g, 4} ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 713, + 637, + 741 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Combining the above inequality with Lemma K.42, we have", + "bbox": [ + 171, + 743, + 571, + 758 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\pmb {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\leq C _ {g, 4} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log (\\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b}.\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 760, + 803, + 795 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Hence, for all $1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor, 0 \\leq l \\leq n - 1$", + "bbox": [ + 171, + 796, + 486, + 811 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n| \\mathcal {R} | \\leq \\frac {d ^ {3}}{6} C _ {g, 4} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log \\left(\\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 815, + 599, + 845 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Similarly, we can show that there exists a constant $C_{g,5}$ such that for all $1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor$ , $0 \\leq l \\leq n - 1$ ,", + "bbox": [ + 169, + 847, + 823, + 875 + ], + "page_idx": 69 + }, + { + "type": "equation", + "text": "\n$$\n| \\tilde {\\mathcal {R}} | \\leq \\frac {d ^ {3}}{6} C _ {g, 5} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log \\left(\\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 877, + 599, + 907 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Combining the bounds on $\\mathcal{A}_1$ to $\\mathcal{A}_3$ , we have the lemma.", + "bbox": [ + 171, + 909, + 549, + 924 + ], + "page_idx": 69 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 69 + }, + { + "type": "page_number", + "text": "70", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 69 + }, + { + "type": "text", + "text": "Finally, we prove Theorem K.4.", + "bbox": [ + 171, + 103, + 383, + 118 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Proof. For $0 \\leq l \\leq n$ , define the random variable $\\hat{\\zeta}_{l,n}$ which follows the distribution $\\mathcal{P}_{\\zeta}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})}, l, n)$ conditioned on $\\hat{\\phi}^{(lR_{\\mathrm{grp}})}$ . Therefore, $\\mathbb{P}(\\hat{\\zeta}_{n,n} = \\hat{\\phi}^{(nR_{\\mathrm{grp}})}) = 1$ and $\\hat{\\zeta}_{0,n} \\sim \\zeta_{n\\eta_{\\mathrm{e}}}$ . Denote by $u(\\phi, s, t) \\coloneqq \\mathbb{E}_{\\zeta_t \\sim \\mathcal{P}_{\\zeta}(\\phi, s, t)}[g(\\zeta_t)]$ and $\\mathcal{T}_{l+1,n} \\coloneqq u_{l+1,n}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\Delta^{(l)}, (l+1)\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}}) - u_{l+1,n}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\tilde{\\Delta}^{(l)}, (l+1)\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}})$ .", + "bbox": [ + 169, + 133, + 825, + 204 + ], + "page_idx": 70 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left| \\mathbb {E} \\left[ g \\left(\\boldsymbol {\\phi} ^ {\\left(n R _ {\\mathrm {g r p}}\\right)}\\right) \\right] - \\mathbb {E} \\left[ g \\left(\\boldsymbol {\\zeta} \\left(n \\eta_ {\\mathrm {e}}\\right)\\right) \\right] \\right| \\\\ \\leq \\left| \\mathbb {E} \\left[ g \\left(\\hat {\\zeta} _ {n, n}\\right) - g \\left(\\hat {\\zeta} _ {0, n}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ \\leq \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ g \\left(\\hat {\\zeta} _ {l + 1, n}\\right) - g \\left(\\hat {\\zeta} _ {l, n}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ = \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ u \\left(\\hat {\\phi} ^ {\\left((l + 1) R _ {\\mathrm {g r p}}\\right)}, (l + 1) \\eta_ {\\mathrm {e}}, n \\eta_ {\\mathrm {e}}\\right) - u \\left(\\hat {\\zeta} _ {l, l + 1}, (l + 1) \\eta_ {\\mathrm {e}}, n \\eta_ {\\mathrm {e}}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ = \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ \\mathcal {T} _ {l + 1, n} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 210, + 800, + 395 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Noticing that $\\mathbb{E}[\\mathcal{T}_{l + 1,n}\\mid \\mathcal{E}_0^{(nR_{\\mathrm{grp}})}] = \\mathbb{E}[\\mathbb{E}[\\mathcal{T}_{l + 1,n}\\mid \\hat{\\phi}^{(lR_{\\mathrm{grp}})},\\mathcal{E}_0^{(lR_{\\mathrm{grp}})}]\\mid \\mathcal{E}_0^{(nR_{\\mathrm{grp}})}]$ , we can apply Lemma K.43 and obtain that for all $0\\leq n\\leq \\lfloor T / \\eta_{\\mathrm{e}}\\rfloor$", + "bbox": [ + 169, + 402, + 823, + 436 + ], + "page_idx": 70 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left| \\mathbb {E} \\left[ g \\left(\\phi^ {\\left(n R _ {\\mathrm {g r p}}\\right)}\\right) \\right] - \\mathbb {E} \\left[ g \\left(\\zeta \\left(n \\eta_ {\\mathrm {e}}\\right)\\right) \\right] \\right| \\leq n C _ {g, 1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} \\\\ \\leq T C _ {g, 1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1} - 1} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2} - 1}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 444, + 725, + 491 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Notice that $\\eta_{\\mathrm{e}}^{\\gamma_1} + \\eta_{\\mathrm{e}}^{\\gamma_2} = \\eta^{0.5 - \\beta} + \\eta^\\beta$ and $T, C_{g,1}$ are both constants that are independent of $\\eta_{\\mathrm{e}}$ . Let $\\beta = 0.25$ and we have Theorem K.4.", + "bbox": [ + 169, + 500, + 823, + 529 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Having established Theorem K.4, we are thus led to prove Theorem 3.2.", + "bbox": [ + 171, + 542, + 645, + 559 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Proof of Theorem 3.2. Denote by $s_{\\mathrm{cls}} = s_0 + s_1 = \\mathcal{O}(\\log \\frac{1}{\\eta})$ , which is the time the global iterate $\\bar{\\theta}^{(s)}$ will reach within $\\tilde{\\mathcal{O}} (\\eta)$ from $\\Gamma$ with high probability. Define $\\tilde{\\zeta} (t)$ to be the solution to the limiting SDE (108) conditioned on $\\mathcal{E}_0^{(s_{\\mathrm{cls}})}$ and $\\tilde{\\zeta}(0) = \\phi^{(s_{\\mathrm{cls}})}$ . By Theorem K.4, we have", + "bbox": [ + 169, + 573, + 823, + 626 + ], + "page_idx": 70 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} [ g (\\phi^ {(n R _ {\\mathrm {g r p}} + s _ {\\mathrm {c l s}})}) - g (\\tilde {\\zeta} (n \\eta^ {0. 7 5})) | \\phi^ {(s _ {\\mathrm {c l s}})}, \\mathcal {E} _ {0} ^ {(s _ {\\mathrm {c l s}})} ] \\right| \\leq C _ {g} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b},\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 633, + 792, + 662 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "where $R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{0.75}}\\right\\rfloor$ . Noticing that (i) $g\\in \\mathcal{C}^3$ (ii) $\\pmb {b},\\pmb {\\sigma}\\in \\mathcal{C}^{\\infty}$ and (iii) $\\zeta (t),\\tilde{\\zeta} (t)\\in \\Gamma ,t\\in [0,\\infty)$ almost surely, we can conclude that given $\\mathcal{E}_0^{(s_{\\mathrm{cls}})}$", + "bbox": [ + 169, + 670, + 823, + 709 + ], + "page_idx": 70 + }, + { + "type": "equation", + "text": "\n$$\n\\| \\boldsymbol {\\zeta} (t) - \\tilde {\\boldsymbol {\\zeta}} (t) \\| _ {2} = \\tilde {\\mathcal {O}} (\\sqrt {\\eta}), \\quad \\forall t \\in [ 0, T ].\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 715, + 633, + 734 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Then there exists positive constant $b'$ independent of $\\eta$ and $g$ , and $C_g'$ which is independent of $\\eta$ but can depend on $g$ such that", + "bbox": [ + 169, + 739, + 823, + 768 + ], + "page_idx": 70 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} \\left[ g \\left(\\phi^ {\\left(n R _ {\\mathrm {g r p}} + s _ {\\mathrm {c l s}}\\right)}\\right) - g \\left(\\zeta \\left(n \\eta^ {0. 7 5} + s _ {\\mathrm {c l s}} H \\eta^ {2}\\right)\\right) \\right] \\right| \\leq C _ {g} ^ {\\prime} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b ^ {\\prime}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 776, + 779, + 805 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "We can view the random variable pairs $\\{(\\phi^{(nR_{\\mathrm{grp}} + s_{\\mathrm{cls}})},\\zeta_{n\\eta^{0.75} + s_{\\mathrm{cls}}\\alpha \\eta}):n = 0,\\dots ,\\lfloor T / \\eta^{0.75}\\rfloor \\}$ as reference points and then approximate the value of $g(\\phi^{(s)})$ and $g(\\zeta (sH\\eta^2))$ with the value at the nearest reference points. By Lemmas K.18 and K.23, for $0\\leq r\\leq R_{\\mathrm{grp}}$ and $0\\leq s\\leq R_{\\mathrm{tot}} - r$", + "bbox": [ + 169, + 813, + 823, + 861 + ], + "page_idx": 70 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {0. 3 7 5}).\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 867, + 612, + 886 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "Since the values of $\\phi^{(s)}$ and $\\zeta$ are restricted to a bounded set, $g(\\cdot)$ is Lipschitz on that set. Therefore, we have the theorem.", + "bbox": [ + 169, + 893, + 823, + 922 + ], + "page_idx": 70 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 70 + }, + { + "type": "page_number", + "text": "71", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 70 + }, + { + "type": "text", + "text": "L DERIVING THE SLOW SDE FOR LABEL NOISE REGULARIZATION", + "text_level": 1, + "bbox": [ + 169, + 102, + 750, + 118 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "In this section, we formulate how label noise regularization works and provide a detailed derivation of the theoretical results in Appendix G.", + "bbox": [ + 169, + 133, + 823, + 162 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Consider training a model for $C$ -class classification on dataset $\\mathcal{D} = \\{(x_i, y_i)\\}_{i=1}^N$ , where $x_i$ denotes the input and $y_i \\in [C]$ denotes the label. Denote by $\\Delta_+^{C-1}$ the $(C-1)$ -open simplex. Let $f(\\theta; x) \\in \\Delta_+^{C-1}$ be the model output on input $x$ with parameter $\\theta$ , whose $j$ -th coordinate $f_j(\\theta; x)$ stands for the probability of $x$ belonging to class $j$ . Let $\\ell(\\theta; x, y)$ be the cross entropy loss given input $x$ and label $y$ , i.e., $\\ell(\\theta; x, y) = -\\log f_y(\\theta; x)$ .", + "bbox": [ + 169, + 167, + 823, + 246 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Adding label noise means replacing the true label $y$ with a fresh noisy label $\\hat{y}$ every time we access the sample. Specifically, $\\hat{y}$ is set as the true label $y$ with probability $1 - p$ and as any other label with probability $\\frac{p}{C - 1}$ , where $p$ is the fixed corruption probability. The training loss is defined as $\\mathcal{L}(\\boldsymbol{\\theta}) = \\frac{1}{N}\\sum_{i=1}^{N}\\mathbb{E}[\\ell(\\boldsymbol{\\theta};\\boldsymbol{x}_i,\\hat{y}_i)]$ , where the expectation is taken over the stochasticity of $\\hat{y}_i$ . Notice that given a sample $(x,y)$ ,", + "bbox": [ + 169, + 250, + 825, + 325 + ], + "page_idx": 71 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) ] = - (1 - p) \\log f _ {y} (\\boldsymbol {\\theta}; \\boldsymbol {x}) - \\frac {p}{C - 1} \\sum_ {j \\neq y} \\log f _ {j} (\\boldsymbol {\\theta}; \\boldsymbol {x}). \\tag {114}\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 330, + 825, + 364 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "By the property of cross-entropy loss, (114) attains its global minimum if and only if $f_{j} = \\frac{p}{C - 1}$ , for all $j \\in [C], j \\neq y$ and $f_{y} = 1 - p$ . Due to the large expressiveness of modern deep learning models, there typically exists a set $S^{*} := \\{\\pmb{\\theta} \\mid f_{i}(\\pmb{\\theta}) = \\mathbb{E}[\\hat{y}_{i}], \\forall i \\in [N]\\}$ such that all elements of $S^{*}$ minimize $\\mathcal{L}(\\pmb{\\theta})$ . Then, the manifold $\\Gamma$ is a subset of $S^{*}$ . The following lemma relates the noise covariance $\\pmb{\\Sigma}(\\pmb{\\theta}) := \\frac{1}{N}\\sum_{i \\in [N]}\\mathbb{E}[(\\nabla\\ell(\\pmb{\\theta};\\pmb{x}_{i},\\hat{y}_{i}) - \\nabla\\mathcal{L}(\\pmb{\\theta}))(\\nabla\\ell(\\pmb{\\theta};\\pmb{x}_{i},\\hat{y}_{i}) - \\nabla\\mathcal{L}(\\pmb{\\theta}))^{\\top}]$ to the hessian $\\nabla^{2}\\mathcal{L}(\\pmb{\\theta})$ for all $\\pmb{\\theta} \\in S^{*}$ .", + "bbox": [ + 169, + 369, + 823, + 462 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Lemma L.1. If $f(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i)$ is $\\mathcal{C}^2$ -smooth on $\\mathbb{R}^d$ given any $i \\in [N]$ , $\\hat{y}_i \\in [C]$ and $\\mathcal{S}^* \\neq \\emptyset$ , then for all $\\pmb{\\theta} \\in \\mathcal{S}^*$ , $\\pmb{\\Sigma}(\\pmb{\\theta}) = \\nabla^2 \\mathcal{L}(\\pmb{\\theta})$ .", + "bbox": [ + 169, + 464, + 823, + 494 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Proof. Since $\\mathcal{L}(\\cdot)$ is $\\mathcal{C}_2$ -smooth, $\\nabla \\mathcal{L}(\\pmb{\\theta}) = \\mathbf{0}$ for all $\\pmb{\\theta} \\in S^*$ . To prove the above lemma, it suffices to show that $\\forall i \\in [N]$ , $\\mathbb{E}[\\nabla \\ell(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i) \\nabla \\ell(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i)^\\top] = \\nabla^2 \\mathcal{L}(\\pmb{\\theta})$ . W.L.O.G, let $y = 1$ and therefore for all $\\pmb{\\theta} \\in S^*$ ,", + "bbox": [ + 169, + 508, + 823, + 551 + ], + "page_idx": 71 + }, + { + "type": "equation", + "text": "\n$$\nf _ {1} (\\boldsymbol {\\theta}; \\boldsymbol {x}) = 1 - p =: a _ {1},\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 556, + 521, + 571 + ], + "page_idx": 71 + }, + { + "type": "equation", + "text": "\n$$\nf _ {j} (\\boldsymbol {\\theta}; \\boldsymbol {x}) = \\frac {p}{C - 1} =: a _ {2}, \\forall j > 1, j \\in [ C ].\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 574, + 638, + 601 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Additionally, let $h(x) \\coloneqq -\\log (x), x \\in \\mathbb{R}^{+}$ . The stochastic gradient $\\nabla \\ell(\\pmb{\\theta}; \\pmb{x}, \\hat{y})$ follows the distribution:", + "bbox": [ + 169, + 606, + 823, + 633 + ], + "page_idx": 71 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) = \\left\\{ \\begin{array}{l l} h ^ {\\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}} & \\text {w . p .} 1 - p, \\\\ h ^ {\\prime} (a _ {2}) \\frac {\\partial f _ {j}}{\\partial \\boldsymbol {\\theta}}, & \\text {w . p .} \\frac {p}{C - 1}, \\forall j \\in [ C ], j > 1. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 637, + 689, + 676 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Then the covariance of the gradient noise is:", + "bbox": [ + 171, + 681, + 465, + 695 + ], + "page_idx": 71 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} [ \\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) \\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) ^ {\\top} ] = (1 - p) \\left(h ^ {\\prime} \\left(a _ {1}\\right)\\right) ^ {2} \\frac {\\partial f _ {1} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}} \\left(\\frac {\\partial f _ {1} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}}\\right) ^ {\\top} \\\\ + \\frac {p \\left(h ^ {\\prime} \\left(a _ {2}\\right)\\right) ^ {2}}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}} \\left(\\frac {\\partial f _ {j} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}}\\right) ^ {\\top}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 700, + 741, + 779 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "And the hessian is:", + "bbox": [ + 171, + 784, + 300, + 797 + ], + "page_idx": 71 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) = (1 - p) h ^ {\\prime} (a _ {1}) \\frac {\\partial^ {2} f _ {1}}{\\partial \\boldsymbol {\\theta} ^ {2}} + \\frac {p h ^ {\\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial^ {2} f _ {j}}{\\partial \\boldsymbol {\\theta} ^ {2}} \\\\ \\underbrace {\\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad} _ {\\mathcal {T}} \\\\ + (1 - p) h ^ {\\prime \\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}} \\left(\\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}}\\right) ^ {\\top} + \\frac {p h ^ {\\prime \\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j}}{\\partial \\boldsymbol {\\theta}} \\left(\\frac {\\partial f _ {j} (\\boldsymbol {\\theta})}{\\partial \\boldsymbol {\\theta}}\\right) ^ {\\top}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 800, + 756, + 901 + ], + "page_idx": 71 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 71 + }, + { + "type": "page_number", + "text": "72", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 71 + }, + { + "type": "text", + "text": "Since $\\sum_{j\\in [C]}f_i = 1$", + "bbox": [ + 171, + 102, + 318, + 122 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\partial^ {2} f _ {1}}{\\partial \\boldsymbol {\\theta} ^ {2}} = - \\sum_ {j > 1} \\frac {\\partial^ {2} f _ {j}}{\\partial \\boldsymbol {\\theta} ^ {2}}. \\tag {115}\n$$\n", + "text_format": "latex", + "bbox": [ + 426, + 128, + 823, + 167 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Also, notice that $h^\\prime (x) = -\\frac{1}{x}$ . Therefore,", + "bbox": [ + 171, + 175, + 449, + 194 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n(1 - p) h ^ {\\prime} \\left(a _ {1}\\right) = \\frac {p h ^ {\\prime} \\left(a _ {2}\\right)}{C - 1}. \\tag {116}\n$$\n", + "text_format": "latex", + "bbox": [ + 408, + 200, + 825, + 231 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Substituting (115) and (116) into the expression of $\\mathcal{T}$ gives $\\mathcal{T} = \\mathbf{0}$ , which simplifies $\\nabla^2\\mathcal{L}(\\pmb{\\theta})$ as the following form:", + "bbox": [ + 169, + 238, + 823, + 268 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla^ {2} \\mathcal {L} (\\pmb {\\theta}) = (1 - p) h ^ {\\prime \\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\pmb {\\theta}} \\left(\\frac {\\partial f _ {j} (\\pmb {\\theta})}{\\partial \\pmb {\\theta}}\\right) ^ {\\top} + \\frac {p h ^ {\\prime \\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j}}{\\partial \\pmb {\\theta}} \\left(\\frac {\\partial f _ {j} (\\pmb {\\theta})}{\\partial \\pmb {\\theta}}\\right) ^ {\\top}.\n$$\n", + "text_format": "latex", + "bbox": [ + 233, + 273, + 761, + 316 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Again notice that $h''(x) = h'(x)$ for all $x \\in \\mathbb{R}^+$ . Therefore, $\\nabla^2\\mathcal{L}(\\pmb{\\theta}) = \\pmb{\\Sigma}(\\pmb{\\theta})$ .", + "bbox": [ + 171, + 343, + 687, + 359 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/c34ca2c7035c13ce87c8c0a9518312e2649ffe323b4770bd03aac6d8f4a67397.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 345, + 823, + 357 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "With the property $\\pmb{\\Sigma}(\\pmb{\\theta}) = \\nabla^2\\mathcal{L}(\\pmb{\\theta})$ , we are ready to prove Theorem G.1.", + "bbox": [ + 171, + 375, + 653, + 390 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Proof of Theorem G.1. Recall the general form of the slow SDE:", + "bbox": [ + 171, + 404, + 599, + 419 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} (t) + \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t, \\tag {117}\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 425, + 825, + 458 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "where $\\Psi$ is defined in Definition K.6. Since for $\\zeta \\in \\Gamma$ , $\\Sigma(\\zeta) = \\nabla^2\\mathcal{L}(\\zeta)$ , then", + "bbox": [ + 171, + 465, + 686, + 482 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n\\partial \\Phi (\\zeta) \\Sigma^ {1 / 2} (\\zeta) = \\mathbf {0}. \\tag {118}\n$$\n", + "text_format": "latex", + "bbox": [ + 426, + 489, + 825, + 507 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Now we show that", + "bbox": [ + 171, + 513, + 299, + 527 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) ] = - \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right). \\tag {119}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 534, + 825, + 553 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Since $\\nabla^2\\mathcal{L}(\\zeta) = \\Sigma (\\zeta)$ , $\\mathcal{V}_{\\nabla^2\\mathcal{L}(\\zeta)}[\\Sigma ] = \\frac{1}{2}\\pmb {I}$ . By Lemma K.4,", + "bbox": [ + 169, + 559, + 571, + 578 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) ] = - \\frac {1}{2} \\partial \\Phi (\\boldsymbol {\\zeta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\boldsymbol {I} ] = - \\frac {1}{2} \\nabla_ {\\Gamma} \\mathrm {t r} (\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})).\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 584, + 705, + 614 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Finally, we show that", + "bbox": [ + 171, + 619, + 316, + 633 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\Psi (\\boldsymbol {\\zeta}) ] = - \\nabla_ {\\Gamma} \\frac {1}{2 H \\eta} \\operatorname {t r} (F (2 H \\eta \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta}))). \\tag {120}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 641, + 825, + 672 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "Define $\\hat{\\psi}(x) \\coloneqq x\\psi(x) = e^{-x} - 1 + x$ . By definition of $\\Psi(\\zeta)$ , when $\\Sigma(\\zeta) = \\nabla^2\\mathcal{L}(\\zeta)$ , $\\Psi(\\zeta) = \\hat{\\psi}(2\\eta H\\nabla^2\\mathcal{L}(\\zeta))$ , where $\\hat{\\psi}(\\cdot)$ is interpreted as a matrix function. Since $\\psi(2\\eta H\\nabla^2\\mathcal{L}(\\zeta)) \\in \\operatorname{span}\\{\\pmb{u}\\pmb{u}^\\top \\mid \\pmb{u} \\in T_\\zeta^\\perp(\\Gamma)\\}$ , by Lemma K.4,", + "bbox": [ + 169, + 680, + 823, + 729 + ], + "page_idx": 72 + }, + { + "type": "equation", + "text": "\n$$\n\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\Psi (\\boldsymbol {\\zeta}) ] = - \\frac {1}{2} \\partial \\Phi (\\boldsymbol {\\zeta}) \\mathrm {t r} \\psi (2 \\eta H \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})).\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 736, + 656, + 765 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "By the chain rule, we have (120). Combining (118),(119) and (120) gives the theorem.", + "bbox": [ + 171, + 771, + 741, + 787 + ], + "page_idx": 72 + }, + { + "type": "image", + "img_path": "images/96cdd3309715f5f0e273dec7f88f6f3d099b9cc141fe0c1bcc7856562356a5e3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 772, + 823, + 784 + ], + "page_idx": 72 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 72 + }, + { + "type": "page_number", + "text": "73", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 72 + }, + { + "type": "text", + "text": "M EXPERIMENTAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 102, + 426, + 118 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "In this section, we specify the experimental details that are omitted in the main text. Our experiments are conducted on CIFAR-10 (Krizhevsky et al., 2009) and ImageNet Russakovsky et al. (2015). Our code is available at https://github.com/hmgxr128/Local-SGD. Our implementation of ResNet-56 (He et al., 2016) and VGG-16 (Simonyan & Zisserman, 2015) is based on the high-starred repository by Wei Yang $^{2}$ and we use the implementation of ResNet-50 from torchvision 0.3.1. We run all CIFAR-10 experiments with $B_{\\mathrm{loc}} = 128$ on 8 NVIDIA Tesla P100 GPUs while ImageNet experiments are run on 8 NVIDIA A5000 GPU with $B_{\\mathrm{loc}} = 32$ . All ImageNet experiments are trained with ResNet-50.", + "bbox": [ + 169, + 133, + 826, + 247 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "We generally adopt the following training strategies. We do not add any momentum unless otherwise stated. We follow the suggestions by Jia et al. (2018) and do not add weight decay to the bias and learnable parameters in the normalization layers. For all models with BatchNorm layers, we go through 100 batches of data with batch size $B_{\\mathrm{loc}}$ to estimate the running mean and variance before evaluation. Experiments on both datasets follow the standard data augmentation pipeline in He et al. (2016) except the label noise experiments. Additionally, we use FFCV (Leclerc et al., 2022) to accelerate data loading for ImageNet training.", + "bbox": [ + 169, + 252, + 826, + 353 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "Slightly different from the update rule of Local SGD in Section 1, we use sampling without replacement unless otherwise stated. See Appendix C for implementation details and discussion.", + "bbox": [ + 169, + 357, + 823, + 387 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "M.1 POST-LOCAL SGD EXPERIMENTS IN SECTION 1", + "text_level": 1, + "bbox": [ + 171, + 404, + 553, + 419 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "CIFAR-10 experiments. We simulate 32 clients with $B = 4096$ . We follow the linear scaling rule and linear learning rate warmup strategy suggested by Goyal et al. (2017). We first run 250 epochs of SGD with the learning rate gradually ramping up from 0.1 to 3.2 for the first 50 epochs. Resuming from the model obtained at epoch 250, we run Local SGD with $\\eta = 0.32$ . Note that we conduct grid search for the initial learning rate among $\\{0.005, 0.01, 0.05, 0.1, 0.15, 0.2\\}$ and choose the learning rate with which parallel SGD $(H = 1)$ achieves the best test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. The weight decay $\\lambda$ is set as $5 \\times 10^{-4}$ . As for the initialization scheme, we follow Lin et al. (2020b) and Goyal et al. (2017). Specifically, we use Kaiming Normal (He et al., 2015) for the weights of convolutional layers and initialize the weights of fully-connected layers by a Gaussian distribution with mean zero and standard deviation 0.01. The weights for normalization layers are initialized as one. All bias parameters are initialized as zero. We report the mean and standard deviation over 5 runs.", + "bbox": [ + 169, + 430, + 826, + 599 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "ImageNet experiments. We simulate 256 workers with $B = 8192$ . We follow the linear scaling rule and linear learning rate warmup strategy suggested by Goyal et al. (2017). We first run 100 epochs of SGD where the learning rate linearly ramps up from 0.5 to 16 for the first 5 epochs and then decays by a factor of 0.1 at epoch 50. Resuming from epoch 100, we run Local SGD with $\\eta = 0.16$ . Note that we conduct grid search for the initial learning rate among $\\{0.05, 0.1, 0.5, 1\\}$ and choose the learning rate with which parallel SGD $(H = 1)$ achieves the best test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. The weight decay $\\lambda$ is set as $1 \\times 10^{-4}$ and we do not add any momentum. The initialization scheme follows the implementation of torchvision 0.3.1. We report the mean and standard deviation over 3 runs.", + "bbox": [ + 169, + 614, + 826, + 742 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "M.2 EXPERIMENTAL DETAILS FOR FIGURES 2 AND 5", + "text_level": 1, + "bbox": [ + 171, + 758, + 555, + 772 + ], + "page_idx": 73 + }, + { + "type": "text", + "text": "CIFAR-10 experiments. We use ResNet-56 for all CIFAR-10 experiments in the two figures. We simulate 32 workers with $B = 4096$ and set the weight decay as $5 \\times 10^{-4}$ . For Figures 2(a) and 2(b), we set $\\eta = 0.32$ , which is the same as the learning rate after decay in Figure 1(a). For Figure 2(a), we adopt the same initialization scheme introduced in the corresponding paragraph in Appendix M.1. For Figures 2(b), 2(e) and 5(c), we use the model at epoch 250 in Figure 1(a) as the pre-trained model. Additionally, we use a training budget of 250 epochs for Figure 2(e). In Figure 5(e), we use Local SGD with momentum 0.9, where the momentum buffer is kept locally and never averaged. We run SGD with momentum 0.9 for 150 epochs to obtain the pre-trained model, where the learning", + "bbox": [ + 169, + 785, + 826, + 898 + ], + "page_idx": 73 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 73 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://github.com/bearpaw/pytorch-classification", + "bbox": [ + 191, + 909, + 493, + 924 + ], + "page_idx": 73 + }, + { + "type": "page_number", + "text": "74", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 73 + }, + { + "type": "image", + "img_path": "images/612528912af565b1974a5b0405841ebf3de8c57a1667022b0675b195111bc322.jpg", + "image_caption": [ + "(a) CIFAR-10, start from #250." + ], + "image_footnote": [], + "bbox": [ + 228, + 65, + 480, + 186 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/2c96fc87eb556badec6bbbfd81dc240c15e15b91feb73206c8d82f2bd1f576f8.jpg", + "image_caption": [ + "(b) ImageNet, start from #100." + ], + "image_footnote": [], + "bbox": [ + 516, + 66, + 767, + 186 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/15b9a9acdf8d649523f76a01f91a745314bc26039553bf831b31cfae87f772b9.jpg", + "image_caption": [ + "(c) CIFAR-10, start from #250, optimal $H$ ." + ], + "image_footnote": [], + "bbox": [ + 227, + 222, + 478, + 344 + ], + "page_idx": 74 + }, + { + "type": "image", + "img_path": "images/e69a09c2ea1ac40840af919aaed448f9ff983a5c9933c7c9ba82086774a9ec9f.jpg", + "image_caption": [ + "(d) ImageNet, start from #100, optimal $H$ .", + "Figure 10: The learning curves for experiments in Figure 4." + ], + "image_footnote": [], + "bbox": [ + 514, + 223, + 766, + 344 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "rate ramps up from 0.05 to 1.6 linearly in the first 150 epochs. Note that we conduct grid search for the initial learning rate among $\\{0.01, 0.05, 0.1, 0.15, 0.2\\}$ and choose the learning rate with which parallel SGD $(H = 1)$ achieves the highest test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. Resuming from epoch 150, we run Local SGD $H = 1$ (i.e., SGD) and 24 with $\\eta = 0.16$ and decay $\\eta$ by 0.1 at epoch 226. For Local SGD $H = 900$ , we resume from the model at epoch 226 of $H = 24$ with $\\eta = 0.016$ . We report the mean and standard deviation over 3 runs for Figures 2(a), 2(b) and 5(c), and over 5 runs for Figure 2(e).", + "bbox": [ + 169, + 404, + 826, + 503 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "ImageNet experiments. We simulate 256 clients with $B = 8192$ and set the weight decay as $1 \\times 10^{-4}$ . In Figure 2(d), both Local SGD and SGD start from the same random initialization. We warm up the learning rate from 0.1 to 3.2 in the first 5 epochs and decay the learning rate by a factor of 0.1 at epochs 50 and 100. For Figures 2(c), 2(f) and 5(d), we use the model at epoch 100 in Figure 1(b) as the pre-trained model. In Figure 2(c), we set the learning rate as 0.16, which is the same as the learning rate after epoch 100 in Figure 1(b). Finally, in Figures 2(c), 2(f), 5(b) and 5(d), we report the mean and average over 3 runs.", + "bbox": [ + 169, + 516, + 826, + 616 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "M.3 DETAILS FOR EXPERIMENTS IN FIGURE 6", + "text_level": 1, + "bbox": [ + 171, + 631, + 511, + 645 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "For all experiments in Figure 6, we train a ResNet-56 model on CIFAR-10. We report mean test accuracy over three runs and the shaded area reflects the standard deviation. For Figure 6(a), we use the same setup as Figures 2(a) and 2(b) for training from random initialization and from a pre-trained model respectively except the learning rate. For Figure 6(b), we resume from the model obtained at epoch 250 in Figure 1(a) and train for another 250 epochs. For Figure 6(c), we follow the same procedure as Figure 1(a) except that we use sampling with replacement. We also ensure that the total numbers of iterations in Figures 1(a) and 6(c) are the same.", + "bbox": [ + 169, + 656, + 826, + 755 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "M.4 DETAILS FOR EXPERIMENTS ON THE EFFECT OF THE DIFFUSION TERM", + "text_level": 1, + "bbox": [ + 171, + 771, + 714, + 785 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "CIFAR-10 experiments. The model we use is ResNet-56. For Figure 3(a), we first run SGD with batch size 128 and learning rate $\\eta = 0.5$ for 250 epochs to obtain the pre-trained model. The initialization scheme is the same as the corresponding paragraph in Appendix M.1. Resuming from epoch 250 with $\\eta = 0.05$ , we run Local SGD with $K = 16$ until epoch 6000 and run all other setups for the same number of iterations. We report the mean and standard deviation over 3 runs.", + "bbox": [ + 169, + 796, + 823, + 868 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "ImageNet experiments. For Figures 3(b) and 4(b), we start from the model obtained at epoch 100 in Figure 1(b). In Figure 3(b), we run Local SGD with $K = 256$ for another 150 epochs with $\\eta = 0.032$ . We run all other setups for the same number of iterations with the same learning rate.", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 74 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 74 + }, + { + "type": "page_number", + "text": "75", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 74 + }, + { + "type": "text", + "text": "M.5 DETAILS FOR EXPERIMENTS ON THE EFFECT OF GLOBAL BATCH SIZE", + "text_level": 1, + "bbox": [ + 171, + 103, + 709, + 118 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "CIFAR-10 experiments. The model we use is ResNet-56. We resume from the model obtained in Figure 1(a) at epoch 250 and train for another 250 epochs. The local batch size for all runs is $B_{\\mathrm{loc}} = 128$ . We first make grid search of $\\eta$ for SGD with $K = 16$ among $\\{0.04, 0.08, 0.16, 0.32, 0.64\\}$ and find that the final test accuracy varies little across different learning rates (within $0.1\\%$ ). Then we choose $\\eta = 0.32$ . For the green curve in Figure 4(a), we search for the optimal $H$ for $K = 16$ and keep $\\alpha$ fixed when scaling $\\eta$ with $K$ . For the red curve in Figure 4(a), we search for the optimal $H$ for each $K$ among $\\{6, 12, 60, 120, 300, 750, 1500, 3000, 6000, 12000, 24000\\}$ and also make sure that $H$ does not exceed the total number of iterations for 250 epochs. The learning curves for constant and optimal $\\alpha$ are visualized in Figures 10(a) and 10(c) respectively. We report the mean and standard deviation over three runs.", + "bbox": [ + 169, + 128, + 826, + 268 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "ImageNet experiments. We start from the model obtained at epoch 100 in Figure 1(b) and train for another 50 epochs. The local batch size for all runs is $B_{\\mathrm{loc}} = 32$ . We first make grid search among $\\{0.032, 0.064, 0.16, 0.32\\}$ for $H = 1$ to achieve the best test accuracy and choose $H = 0.064$ . For the orange curve in Figure 4(b), we search $H$ among $\\{2, 4, 6, 13, 26, 52, 78, 156\\}$ for $K = 256$ to achieve the optimal test accuracy and the keep $\\alpha$ constant as we scale $\\eta$ with $K$ . To obtain the optimal $H$ for each $K$ , we search among $\\{6240, 7800, 10400, 12480, 15600, 20800, 24960, 31200\\}$ for $K = 16$ , $\\{1600, 3120, 4160, 5200, 6240, 7800, 10400\\}$ for $K = 32$ , $\\{312, 480, 520, 624, 800, 975, 1040, 1248, 1560, 1950\\}$ for $K = 64$ , and $\\{1, 2, 3, 6, 13\\}$ for $K = 512$ . The learning curves for constant and optimal $\\alpha$ are visualized in Figures 10(b) and 10(d) respectively. We report the mean and standard deviation over three runs.", + "bbox": [ + 169, + 284, + 826, + 422 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "M.6 DETAILS FOR EXPERIMENTS ON LABEL NOISE REGULARIZATION", + "text_level": 1, + "bbox": [ + 171, + 440, + 674, + 455 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "For all label noise experiments, we do not use data augmentation, use sampling with replacement, and set the corruption probability as 0.1. We simulate 32 workers with $B = 4096$ in Figure 7 and 4 workers with $B = 512$ in Figure 8. We use ResNet-56 with GroupNorm with the number of groups 8 for Figure 7(a) and VGG-16 without normalization for Figures 7(b) and 8. Below we list the training details for ResNet-56 and VGG-16 respectively.", + "bbox": [ + 169, + 465, + 826, + 537 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "ResNet-56. As for the model architecture, we replace the batch normalization layer in Yang's implementation with group normalization such that the training loss is independent of the sampling order. We also use Swish activation (Ramachandran et al., 2017) in place of ReLU to ensure the smoothness of the loss function. We generate the pre-trained model by running label noise SGD with corruption probability $p = 0.1$ for 500 epochs (6,000 iterations). We initialize the model by the same strategy introduced in the first paragraph of Appendix M.1. Applying the linear warmup scheme proposed by Goyal et al. (2017), we gradually ramp up the learning rate $\\eta$ from 0.1 to 3.2 for the first 20 epochs and multiply the learning rate by 0.1 at epoch 250. All subsequent experiments in Figure 7(a) (a) use learning rate 0.1. The weight decay $\\lambda$ is set as $5 \\times 10^{-4}$ . Note that adding weight decay in the presence of normalization accelerates the limiting dynamics and will not affect the implicit regularization on the original loss function (Li et al., 2022).", + "bbox": [ + 169, + 551, + 826, + 705 + ], + "page_idx": 75 + }, + { + "type": "text", + "text": "VGG-16. We follow Yang's implementation of the model architecture except that we replace maximum pooling with average pooling and use Swish activation (Ramachandran et al., 2017) to make the training loss smooth. We initialize all weight parameters by Kaiming Normal and all bias parameters as zero. The pre-trained model is obtained by running label noise SGD with total batch size 4096 and corruption probability $p = 0.1$ for 6000 iterations. We use a linear learning rate warmup from 0.1 to 0.5 in the first 500 iterations. All runs in Figures 7(b) and 8 resume from the model obtained by SGD with label noise. In Figure 7(b), we use learning rate $\\eta = 0.1$ . In Figure 8, we set $\\eta = 0.005$ for $H = 97,000$ and $\\eta = 0.01$ for SGD $(H = 1)$ . The weight decay $\\lambda$ is set as zero.", + "bbox": [ + 169, + 719, + 826, + 832 + ], + "page_idx": 75 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 75 + }, + { + "type": "page_number", + "text": "76", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 75 + } +] \ No newline at end of file diff --git a/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_model.json b/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e1e625530a3ce13e5f89f3e350dc805e2310f8eb --- /dev/null +++ b/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_model.json @@ -0,0 +1,16698 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.825, + 0.148 + ], + "angle": 0, + "content": "WHY (AND WHEN) DOES LOCAL SGD GENERALIZE BETTER THAN SGD?" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.17, + 0.3, + 0.184 + ], + "angle": 0, + "content": "Xinran Gu*" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.185, + 0.547, + 0.213 + ], + "angle": 0, + "content": "Institute for Interdisciplinary Information Sciences Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.213, + 0.48, + 0.227 + ], + "angle": 0, + "content": "gxr21@mails.tsinghua.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.563, + 0.171, + 0.66, + 0.185 + ], + "angle": 0, + "content": "Kaifeng Lyu*" + }, + { + "type": "text", + "bbox": [ + 0.563, + 0.185, + 0.786, + 0.213 + ], + "angle": 0, + "content": "Department of Computer Science Princeton University" + }, + { + "type": "text", + "bbox": [ + 0.563, + 0.213, + 0.772, + 0.227 + ], + "angle": 0, + "content": "klyu@cs.princeton.edu" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.248, + 0.327, + 0.262 + ], + "angle": 0, + "content": "Longbo Huang†" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.262, + 0.547, + 0.277 + ], + "angle": 0, + "content": "Institute for Interdisciplinary Information Sciences" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.277, + 0.35, + 0.29 + ], + "angle": 0, + "content": "Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.29, + 0.48, + 0.304 + ], + "angle": 0, + "content": "longbohuang@tsinghua.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.563, + 0.248, + 0.673, + 0.262 + ], + "angle": 0, + "content": "Sanjeev Arora†" + }, + { + "type": "text", + "bbox": [ + 0.563, + 0.262, + 0.786, + 0.276 + ], + "angle": 0, + "content": "Department of Computer Science" + }, + { + "type": "text", + "bbox": [ + 0.563, + 0.277, + 0.702, + 0.29 + ], + "angle": 0, + "content": "Princeton University" + }, + { + "type": "text", + "bbox": [ + 0.563, + 0.291, + 0.781, + 0.304 + ], + "angle": 0, + "content": "arora@cs.princeton.edu" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.323, + 0.547, + 0.337 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.354, + 0.769, + 0.592 + ], + "angle": 0, + "content": "Local SGD is a communication-efficient variant of SGD for large-scale training, where multiple GPUs perform SGD independently and average the model parameters periodically. It has been recently observed that Local SGD can not only achieve the design goal of reducing the communication overhead but also lead to higher test accuracy than the corresponding SGD baseline (Lin et al., 2020b), though the training regimes for this to happen are still in debate (Ortiz et al., 2021). This paper aims to understand why (and when) Local SGD generalizes better based on Stochastic Differential Equation (SDE) approximation. The main contributions of this paper include (i) the derivation of an SDE that captures the long-term behavior of Local SGD in the small learning rate regime, showing how noise drives the iterate to drift and diffuse after it has reached close to the manifold of local minima, (ii) a comparison between the SDEs of Local SGD and SGD, showing that Local SGD induces a stronger drift term that can result in a stronger effect of regularization, e.g., a faster reduction of sharpness, and (iii) empirical evidence validating that having a small learning rate and long enough training time enables the generalization improvement over SGD but removing either of the two conditions leads to no improvement." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.618, + 0.338, + 0.632 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.649, + 0.828, + 0.74 + ], + "angle": 0, + "content": "As deep models have grown larger, training them with reasonable wall-clock times has led to new distributed environments and new variants of gradient-based training. Recall that Stochastic Gradient Descent (SGD) tries to solve \\(\\min_{\\pmb{\\theta} \\in \\mathbb{R}^d} \\mathbb{E}_{\\xi \\sim \\hat{\\mathcal{D}}}[\\ell(\\pmb{\\theta}; \\xi)]\\), where \\(\\pmb{\\theta} \\in \\mathbb{R}^d\\) is the parameter vector of the model, \\(\\ell(\\pmb{\\theta}; \\xi)\\) is the loss function for a data sample \\(\\xi\\) drawn from the training distribution \\(\\tilde{\\mathcal{D}}\\), e.g., the uniform distribution over the training set. SGD with learning rate \\(\\eta\\) and batch size \\(B\\) does the following update at each step, using a batch of \\(B\\) independent \\(\\xi_{t,1}, \\ldots, \\xi_{t,B} \\sim \\tilde{\\mathcal{D}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.316, + 0.748, + 0.826, + 0.789 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {t + 1} \\leftarrow \\boldsymbol {\\theta} _ {t} - \\eta \\boldsymbol {g} _ {t}, \\quad \\text {w h e r e} \\quad \\boldsymbol {g} _ {t} = \\frac {1}{B} \\sum_ {i = 1} ^ {B} \\nabla \\ell \\left(\\boldsymbol {\\theta} _ {t}; \\xi_ {t, i}\\right). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.796, + 0.825, + 0.853 + ], + "angle": 0, + "content": "Parallel SGD tries to improve wall-clock time when the batch size \\( B \\) is large enough. It distributes the gradient computation to \\( K \\geq 2 \\) workers, each of whom focuses on a local batch of \\( B_{\\mathrm{loc}} := B / K \\) samples and computes the average gradient over the local batch. Finally, \\( g_{t} \\) is obtained by averaging the local gradients over the \\( K \\) workers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.859, + 0.826, + 0.889 + ], + "angle": 0, + "content": "However, large-batch training leads to a significant test accuracy drop compared to a small-batch training baseline with the same number of training steps or epochs (Smith et al., 2020; Shallue et al.," + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.314, + 0.911 + ], + "angle": 0, + "content": "*Equal contribution" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.911, + 0.338, + 0.925 + ], + "angle": 0, + "content": "†Corresponding authors" + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.897, + 0.338, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.058, + 0.481, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.185, + 0.466, + 0.199 + ], + "angle": 0, + "content": "(a) CIFAR-10, \\(B = 4096\\) , ResNet-56." + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.057, + 0.768, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.186, + 0.751, + 0.199 + ], + "angle": 0, + "content": "(b) ImageNet, \\(B = 8192\\), ResNet-50." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.202, + 0.825, + 0.242 + ], + "angle": 0, + "content": "Figure 1: Post-Local SGD (\\(H > 1\\)) generalizes better than SGD (\\(H = 1\\)). We switch to Local SGD at the first learning rate decay (epoch #250) for CIFAR-10 and at the second learning rate decay (epoch #100) for ImageNet. See Appendix M.1 for training details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.252, + 0.825, + 0.336 + ], + "angle": 0, + "content": "2019; Keskar et al., 2017; Jastrzebski et al., 2017). Reducing this generalization gap is the goal of much subsequent research. It was suggested that the generalization gap arises because larger batches lead to a reduction in the level of noise in batch gradient (see Appendix A for more discussion). The Linear Scaling Rule (Krizhevsky, 2014; Goyal et al., 2017; Jastrzebski et al., 2017) tries to fix this by increasing the learning rate in proportion to batch size. This is found to reduce the generalization gap for (parallel) SGD, but does not entirely eliminate it." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.342, + 0.827, + 0.483 + ], + "angle": 0, + "content": "To reduce the generalization gap further, Lin et al. (2020b) discovered that a variant of SGD, called Local SGD (Yu et al., 2019; Wang & Joshi, 2019; Zhou & Cong, 2018), can be used as a strong component. Perhaps surprisingly, Local SGD itself is not designed for improving generalization, but for reducing the high communication cost for synchronization among the workers, which is another important issue that often bottlenecks large-batch training (Seide et al., 2014; Strom, 2015; Chen et al., 2016; Recht et al., 2011). Instead of averaging the local gradients per step as in parallel SGD, Local SGD allows \\( K \\) workers to train their models locally and averages the local model parameters whenever they finish \\( H \\) local steps. Here every worker samples a new batch at each local step, and in this paper we focus on the case where all the workers draw samples with or without replacement from the same training set. See Appendix C for the pseudocode." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.488, + 0.825, + 0.56 + ], + "angle": 0, + "content": "More specifically, Lin et al. (2020b) proposed Post-local SGD, a hybrid method that starts with parallel SGD (equivalent to Local SGD with \\( H = 1 \\) in math) and switches to Local SGD with \\( H > 1 \\) after a fixed number of steps \\( t_0 \\). They showed through extensive experiments that Post-local SGD significantly outperforms parallel SGD in test accuracy when \\( t_0 \\) is carefully chosen. In Figure 1, we reproduce this phenomenon on both CIFAR-10 and ImageNet." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.565, + 0.827, + 0.65 + ], + "angle": 0, + "content": "As suggested by the success of Post-local SGD, Local SGD can improve the generalization of SGD by merely adding more local steps (while fixing the other hyperparameters), at least when the training starts from a model pre-trained by SGD. But the underlying mechanism is not very clear, and there is also controversy about when this phenomenon can happen (see Section 2.1 for a survey). The current paper tries to understand: Why does Local SGD generalize better? Under what general conditions does this generalization benefit arise?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.825, + 0.754 + ], + "angle": 0, + "content": "Previous theoretical research on Local SGD is mainly restricted to the convergence rate for minimizing a convex or non-convex objective (see Appendix A for a survey). A related line of works (Stich, 2018; Yu et al., 2019; Khaled et al., 2020) showed that Local SGD has a slower convergence rate compared with parallel SGD after running the same number of steps/epochs. This convergence result suggests that Local SGD may implicitly regularize the model through insufficient optimization, but this does not explain why parallel SGD with early stopping, which may incur an even higher training loss, still generalizes worse than Post-local SGD." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.76, + 0.825, + 0.79 + ], + "angle": 0, + "content": "Our Contributions. In this paper, we provide the first theoretical understanding on why (and when) switching from parallel SGD to Local SGD improves generalization." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.793, + 0.825, + 0.849 + ], + "angle": 0, + "content": "1. In Section 2.2, we conduct ablation studies on CIFAR-10 and ImageNet and identify a clean setting where adding local steps to SGD consistently improves generalization: if the learning rate is small and the total number of steps is sufficient, Local SGD eventually generalizes better than the corresponding (parallel) SGD baseline." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.854, + 0.826, + 0.926 + ], + "angle": 0, + "content": "2. In Section 3.2, we derive a special SDE that characterizes the long-term behavior of Local SGD in the small learning rate regime, as inspired by a previous work (Li et al., 2021b) that proposed this type of SDE for modeling SGD. These SDEs can track the dynamics after the iterate has reached close to a manifold of minima. In this regime, the expected gradient is near zero, but the gradient noise can drive the iterate to wander around. In contrast to the conventional SDE (3) for" + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.793, + 0.826, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "SGD, where the drift and diffusion terms are connected respectively to the expected gradient and gradient noise, the SDE we derived for Local SGD has drift and diffusion terms both connected to gradient noise." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.155, + 0.826, + 0.212 + ], + "angle": 0, + "content": "3. Section 3.3 explains the generalization improvement of Local SGD over SGD by comparing the corresponding SDEs: increasing the number of local steps \\( H \\) strengthens the drift term of SDE while keeping the diffusion term untouched. We hypothesize that having a stronger drift term can benefit generalization." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.221, + 0.825, + 0.25 + ], + "angle": 0, + "content": "4. As a by-product, we provide a new proof technique that can give the first quantitative approximation bound for how well Li et al. (2021b)'s SDE approximates SGD." + }, + { + "type": "list", + "bbox": [ + 0.179, + 0.155, + 0.826, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.256, + 0.825, + 0.314 + ], + "angle": 0, + "content": "Back to the discussion on the generalization gap between small- and large-batch training, we remark that this gap can occur early in training when the learning rate is very large (Smith et al., 2020) and Local SGD cannot prevent this gap in this phase. Instead, our theory suggests that Local SGD can reduce the gap in late training phases after decaying the learning rate." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.329, + 0.621, + 0.347 + ], + "angle": 0, + "content": "2 WHEN DOES LOCAL SGD GENERALIZE BETTER?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.355, + 0.825, + 0.427 + ], + "angle": 0, + "content": "In our motivating example of Post-local SGD, switching from SGD to Local SGD can outperform running SGD alone (i.e., no switching) in test accuracy, but this improvement does not always arise and can depend on the choice of the switching time point. Because of this, a necessary first step for developing a theoretical understanding of Local SGD is to identify under what general conditions Local SGD can improve the generalization of SGD by merely adding local steps." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.442, + 0.424, + 0.456 + ], + "angle": 0, + "content": "2.1 THE DEBATE ON LOCAL SGD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.464, + 0.825, + 0.494 + ], + "angle": 0, + "content": "We first summarize a debate in the literature regarding when to switch from SGD to Local SGD in running Post-local SGD, which hints the conditions so that Local SGD can improve upon SGD." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.499, + 0.826, + 0.668 + ], + "angle": 0, + "content": "Local SGD generalizes better than SGD on CIFAR-10. Lin et al. (2020b) empirically observed that Post-local SGD exhibits a better generalization performance than SGD. Most of their experiments are conducted on CIFAR-10 and CIFAR-100 with multiple learning rate decays, and the algorithm switches from (parallel) SGD to Local SGD right after the first learning rate decay. We refer to this particular choice of the switching time point as the first-decay switching strategy for short. To justify this strategy, they empirically showed that the generalization improvement can be less significant if starting Local SGD from the beginning or right after the second learning rate decay. It has also been observed by Wang & Joshi (2021) that running Local SGD from the beginning improves generalization, but the test accuracy improvement may not be large enough. A subsequent work by Lin et al. (2020a) showed that adding local steps to Extrap-SGD, a variant of SGD proposed therein, after the first learning rate decay also improves generalization, suggesting that the first-decay switching strategy can also be applied to the post-local variant of other optimizers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.673, + 0.826, + 0.883 + ], + "angle": 0, + "content": "Does Local SGD exhibit the same generalization benefit on large-scale datasets? Going beyond CIFAR-10, Lin et al. (2020b) conducted a few ImageNet experiments and showed that Post-local SGD with first-decay switching strategy still leads to better generalization than SGD. However, the improvement is sometimes marginal, e.g., \\(0.1\\%\\) for batch size 8192. For the general case, they suggested that the time of switching should be tuned aiming at \"capturing the time when trajectory starts to get into the influence basin of a local minimum\" in a footnote, but no further discussion or experiments are provided to justify this guideline. Ortiz et al. (2021) conducted a more extensive evaluation on ImageNet (with a different set of hyperparameters) and concluded with the opposite: the first-decay switching strategy can hurt the validation accuracy. Instead, switching at a later time, such as the second learning rate decay, leads to a better validation accuracy than SGD.\\(^{1}\\) To explain this phenomenon, they conjecture that switching to Local SGD has a regularization effect that is beneficial only in the short-term, so it is always better to switch as late as possible. They further conjecture that this discrepancy between CIFAR-10 and ImageNet is mainly due to the task scale. On TinyImageNet, which is a spatially downscaled subset of ImageNet, the first-decay switching strategy indeed leads to better validation accuracy." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.825, + 0.926 + ], + "angle": 0, + "content": "This generalization improvement is not mentioned explicitly in (Ortiz et al., 2021) but can be clearly seen from Figures 7 and 8 in their paper." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.066, + 0.373, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.175, + 0.163, + 0.379, + 0.177 + ], + "angle": 0, + "content": "(a) CIFAR-10, start from random." + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.066, + 0.593, + 0.156 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.405, + 0.163, + 0.594, + 0.177 + ], + "angle": 0, + "content": "(b) CIFAR-10, start from #250." + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.066, + 0.815, + 0.156 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.629, + 0.163, + 0.813, + 0.177 + ], + "angle": 0, + "content": "(c) ImageNet, start from #100." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.187, + 0.373, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.174, + 0.284, + 0.379, + 0.298 + ], + "angle": 0, + "content": "(d) ImageNet, first phase \\(\\eta = 3.2\\)" + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.187, + 0.593, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.408, + 0.284, + 0.59, + 0.297 + ], + "angle": 0, + "content": "(e) CIFAR-10, test acc v.s. \\(H\\)" + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.187, + 0.815, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.633, + 0.284, + 0.808, + 0.297 + ], + "angle": 0, + "content": "(f) ImageNet, test acc v.s. \\(H\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.302, + 0.825, + 0.343 + ], + "angle": 0, + "content": "Figure 2: Ablation studies on \\(\\eta\\), \\(H\\) and training time in the same setting as Figure 1. For (a)(d), we train from random initialization. For (b)(c)(e)(f), we start training from the checkpoints saved at the switching time points in Figure 1 (epoch #250 for CIFAR-10 and epoch #100 for ImageNet). See Appendix M.2 for training details." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.351, + 0.735, + 0.366 + ], + "angle": 0, + "content": "2.2 KEY FACTORS: SMALL LEARNING RATE AND SUFFICIENT TRAINING TIME" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.501 + ], + "angle": 0, + "content": "All the above papers agree that Post-local/Local SGD improves upon SGD to some extent. However, it is in debate under what conditions the generalization benefit can consistently occur. We now conduct ablation studies to identify the key factors so that adding local steps improves the generalization of SGD. We run parallel SGD and Local SGD with the same learning rate \\(\\eta\\), local batch size \\(B_{\\mathrm{loc}}\\), and number of workers \\(K\\). We start training from the same initialization and compare their generalization after the same number of epochs. As Post-local SGD can be viewed as Local SGD starting from an SGD-pretrained model, the initial point in our experiments can be either random or a checkpoint of SGD training. See Appendix C for implementation details and Appendix M.2 for more details about the experimental setup." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.505, + 0.825, + 0.59 + ], + "angle": 0, + "content": "The first observation we have is that the generalization benefits can be reproduced on both CIFAR-10 and ImageNet in our setting (see Figure 1). We remark that Post-local SGD and SGD in Lin et al. (2020b); Ortiz et al. (2021) are implemented with accompanying Nesterov momentum terms. The learning rate also decays a couple of times in training with Local SGD. Nevertheless, our experiments show that the Nesterov momentum and learning rate decay are not necessary for Local SGD to generalize better than SGD. Our main finding after further ablation studies is summarized below:" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.594, + 0.827, + 0.651 + ], + "angle": 0, + "content": "Finding 2.1. Given a sufficiently small learning rate and a sufficiently long training time, Local SGD exhibits better generalization than SGD, if the number of local steps \\( H \\) per round is tuned properly according to the learning rate. This holds for both training from random initialization and from pre-trained models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.753, + 0.671 + ], + "angle": 0, + "content": "Now we go through each point of our main finding. See also Appendix F for more plots." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.673, + 0.825, + 0.744 + ], + "angle": 0, + "content": "(1). Pretraining is not necessary. In contrast to previous works claiming the benefits of Post-local SGD over Local SGD (Lin et al., 2020b; Ortiz et al., 2021), we observe that Local SGD with random initialization also generalizes significantly better than SGD, as long as the learning rate is small and the training time is sufficiently long (Figure 2(a)). Starting from a pretrained model may shorten the time to reach this generalization benefit to show up (Figure 2(b)), but it is not necessary." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.825, + 0.849 + ], + "angle": 0, + "content": "(2). Learning rate should be small. We experiment with a wide range of learning rates to conclude that setting a small learning rate is necessary. The learning rate is 0.32 for Figures 2(a) and 2(b) and is 0.16 for Figure 2(c). As shown in Figure 2(d), Local SGD encounters optimization difficulty in the first phase where \\(\\eta\\) is large (\\(\\eta = 3.2\\)), resulting in inferior final test accuracy. Even for training from a pretrained model, the generalization improvement of Local SGD disappears for large learning rates (e.g., \\(\\eta = 1.6\\) in Figure 5(d)). In contrast, if a longer training time is allowed, reducing the learning rate of Local SGD does not lead to test accuracy drop (Figure 5(c))." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.926 + ], + "angle": 0, + "content": "(3). Training time should be long enough. To investigate the effect of training time, in Figures 2(b) and 2(c), we extend the training budget for the Post-local SGD experiments in Figure 1 and observe that a longer training time leads to greater generalization improvement upon SGD. On the other hand, Local SGD generalizes worse than SGD in the first few epochs of Figures 2(a) and 2(c); see Figures 5(a) and 5(b) for an enlarged view." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.673, + 0.825, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.203 + ], + "angle": 0, + "content": "(4). The number of local steps \\( H \\) should be tuned carefully. The number of local steps \\( H \\) has a complex interplay with the learning rate \\( \\eta \\), but generally speaking, a smaller \\( \\eta \\) needs a higher \\( H \\) to achieve consistent generalization improvement. For CIFAR-10 with a post-local training budget of 250 epochs (see Figure 2(e)), the test accuracy first rises as \\( H \\) increases, and begins to fall as \\( H \\) exceeds some threshold for relatively large \\( \\eta \\) (e.g., \\( \\eta \\geq 0.5 \\)) while keeps growing for smaller \\( \\eta \\) (e.g., \\( \\eta < 0.5 \\)). For ImageNet with a post-local training budget of 50 epochs (see Figure 2(f)), the test accuracy first increases and then decreases in \\( H \\) for all learning rates." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.209, + 0.828, + 0.378 + ], + "angle": 0, + "content": "Reconciling previous works. Our finding can help to settle the debate presented in Section 2.1 to a large extent. Simultaneously requiring a small learning rate and sufficient training time poses a trade-off when learning rate decay is used with a limited training budget: switching to Local SGD earlier may lead to a large learning rate, while switching later makes the generalization improvement of Local SGD less noticeable due to fewer update steps. It is thus unsurprising that first-decay switching strategy is not always the best. The need for sufficient training time does not contradict with Ortiz et al. (2021)'s conjecture that Local SGD only has a \"short-term\" generalization benefit. In their experiments, the generalization improvement usually disappears right after the next learning rate decay (instead of after a fixed amount of time). We suspect that the real reason why the improvement vanishes is that the number of local steps \\(H\\) was kept as a constant, but our finding suggests tuning \\(H\\) after \\(\\eta\\) changes. In Figure 5(e), we reproduce this phenomenon and show that increasing \\(H\\) after learning rate decay retains the improvement." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.382, + 0.825, + 0.508 + ], + "angle": 0, + "content": "Generalization performances at the optimal learning rate of SGD. In practice, the learning rate of SGD is usually tuned to achieve the best training loss/Validation accuracy within a fixed training budget. Our finding suggests that when the tuned learning rate is small and the training time is sufficient, Local SGD can offer generalization improvement over SGD. As an example, in our experiments on training from an SGD-pretrained model, the optimal learning rate for SGD is 0.5 on CIFAR-10 (Figure 2(e)) and 0.064 on ImageNet (Figure 2(f)). With the same learning rate as SGD, the test accuracy is improved by \\(1.1\\%\\) on CIFAR-10 and \\(0.3\\%\\) on ImageNet when using Local SGD with \\(H = 750\\) and \\(H = 26\\) respectively. The improvement could become even higher if the learning rate of Local SGD is carefully tuned." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.516, + 0.704, + 0.531 + ], + "angle": 0, + "content": "3 THEORETICAL ANALYSIS OF LOCAL SGD: THE SLOW SDE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.538, + 0.825, + 0.595 + ], + "angle": 0, + "content": "In this section, we adopt an SDE-based approach to rigorously establish the generalization benefit of Local SGD in a general setting. Below, we first identify the difficulty of adapting the SDE framework to Local SGD. Then, we present our novel SDE characterization of Local SGD around the manifold of minimizers and explain the generalization benefit of Local SGD with our SDE." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.6, + 0.825, + 0.727 + ], + "angle": 0, + "content": "Notations. We follow the notations in Section 1. We denote by \\(\\eta\\) the learning rate, \\(K\\) the number of workers, \\(B\\) the (global) batch size, \\(B_{\\mathrm{loc}}\\coloneqq B / K\\) the local batch size, \\(H\\) the number of local steps, \\(\\ell (\\pmb {\\theta};\\zeta)\\) the loss function for a data sample \\(\\zeta\\), and \\(\\tilde{\\mathcal{D}}\\) the training distribution. Furthermore, we define \\(\\mathcal{L}(\\pmb {\\theta})\\coloneqq \\mathbb{E}_{\\xi \\sim \\tilde{\\mathcal{D}}}[\\ell (\\pmb {\\theta};\\xi)]\\) as the expected loss, \\(\\Sigma (\\pmb {\\theta})\\coloneqq \\operatorname{Cov}_{\\xi \\sim \\tilde{\\mathcal{D}}}[\\nabla \\ell (\\pmb {\\theta};\\xi)]\\) as the noise covariance of gradients at \\(\\pmb{\\theta}\\). Let \\(\\{W_t\\}_{t\\geq 0}\\) denote the standard Wiener process. For a mapping \\(F:\\mathbb{R}^d\\to \\mathbb{R}^d\\), denote by \\(\\partial F(\\pmb {\\theta})\\) the Jacobian at \\(\\pmb{\\theta}\\) and \\(\\partial^2 F(\\pmb {\\theta})\\) the second order derivative at \\(\\pmb{\\theta}\\). Furthermore, for any matrix \\(M\\in \\mathbb{R}^{d\\times d}\\), \\(\\partial^2 F(\\pmb {\\theta})[M] = \\sum_{i\\in [d]}\\langle \\frac{\\partial^2F_i}{\\partial\\theta^2},M\\rangle e_i\\) where \\(e_i\\) is the \\(i\\)-th vector of the standard basis. We write \\(\\partial^2 (\\nabla \\mathcal{L})(\\pmb {\\theta})[M]\\) as \\(\\nabla^3\\mathcal{L}(\\pmb {\\theta})[M]\\) for short." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.732, + 0.827, + 0.829 + ], + "angle": 0, + "content": "Local SGD. We use the following formulation of Local SGD for theoretical analysis. See also Appendix C for the pseudocode. Local SGD proceeds in multiple rounds of model averaging, where each round produces a global iterate \\(\\bar{\\theta}^{(s)}\\). In the \\((s + 1)\\)-th round, every worker \\(k \\in [K]\\) starts with its local copy of the global iterate \\(\\pmb{\\theta}_{k,0}^{(s)} \\gets \\bar{\\pmb{\\theta}}^{(s)}\\) and does \\(H\\) steps of SGD with local batches. In the \\(t\\)-th local step of the \\(k\\)-th worker, it draws a local batch of \\(B_{\\mathrm{loc}} \\coloneqq B / K\\) independent samples \\(\\xi_{k,t,1}^{(s)}, \\dots, \\xi_{k,t,B_{\\mathrm{loc}}}^{(s)}\\) from a shared training distribution \\(\\tilde{\\mathcal{D}}\\) and updates as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.193, + 0.83, + 0.825, + 0.869 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} \\leftarrow \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {g} _ {k, t} ^ {(s)}, \\quad \\text {w h e r e} \\quad \\boldsymbol {g} _ {k, t} ^ {(s)} = \\frac {1}{B _ {\\mathrm {l o c}}} \\sum_ {i = 1} ^ {B _ {\\mathrm {l o c}}} \\nabla \\ell \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}; \\xi_ {k, t, i} ^ {(s)}\\right), \\quad t = 0, \\dots , H - 1. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.873, + 0.825, + 0.929 + ], + "angle": 0, + "content": "The local updates on different workers are independent of each other as there is no communication. After finishing the \\(H\\) local steps, the workers aggregate the resulting local iterates \\(\\pmb{\\theta}_{k,H}^{(s)}\\) and assign the average to the next global iterate: \\(\\bar{\\pmb{\\theta}}^{(s + 1)}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}\\pmb{\\theta}_{k,H}^{(s)}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.684, + 0.119 + ], + "angle": 0, + "content": "3.1 DIFFICULTY OF ADAPTING THE SDE FRAMEWORK TO LOCAL SGD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.126, + 0.825, + 0.169 + ], + "angle": 0, + "content": "A widely-adopted approach to understanding the dynamics of SGD is to approximate it from a continuous perspective with the following SDE (3), which we call the conventional SDE approximation. Below, we discuss why it cannot be directly adopted to characterize the behavior of Local SGD." + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.172, + 0.825, + 0.197 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {X} (t) = - \\nabla \\mathcal {L} (\\boldsymbol {X}) \\mathrm {d} t + \\sqrt {\\frac {\\eta}{B}} \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {X}) \\mathrm {d} \\boldsymbol {W} _ {t}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.199, + 0.826, + 0.27 + ], + "angle": 0, + "content": "It is proved by Li et al. (2019a) that this SDE is a first-order approximation to SGD, where each discrete step corresponds to a continuous time interval of \\(\\eta\\). Several previous works adopt this SDE approximation and connect good generalization to having a large diffusion term \\(\\sqrt{\\frac{\\eta}{B}} \\Sigma^{1/2} \\mathrm{d}W_t\\) in the SDE (Jastrzewski et al., 2017; Smith et al., 2020), because a suitable amount of noise can be necessary for large-batch training to generalize well (see also Appendix A)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.276, + 0.825, + 0.348 + ], + "angle": 0, + "content": "According to Finding 2.1, it is tempting to consider the limit \\(\\eta \\to 0\\) and see if Local SGD can also be modeled via a variant of the conventional SDE. In this case the typical time length that guarantees a good SDE approximation error is \\(\\mathcal{O}(\\eta^{-1})\\) discrete steps (Li et al., 2019a; 2021a). However, this time scaling is too short for the difference to appear between Local SGD and SGD. Indeed, Theorem 3.1 below shows that they closely track each other for \\(\\mathcal{O}(\\eta^{-1})\\) steps." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.348, + 0.827, + 0.433 + ], + "angle": 0, + "content": "Theorem 3.1. Assume that the loss function \\(\\mathcal{L}\\) is \\(\\mathcal{C}^3\\)-smooth with bounded second and third order derivatives and that \\(\\nabla \\ell (\\pmb {\\theta};\\xi)\\) is bounded. Let \\(T > 0\\) be a constant, \\(\\bar{\\pmb{\\theta}}^{(s)}\\) be the \\(s\\)-th global iterate of Local SGD and \\(\\pmb {w}_t\\) be the \\(t\\)-th iterate of SGD with the same initialization \\(\\pmb {w}_0 = \\bar{\\pmb{\\theta}}^{(0)}\\) and same \\(\\eta, B_{\\mathrm{loc}}, K\\). Then for any \\(H\\leq \\frac{T}{\\eta}\\) and \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), it holds with probability at least \\(1 - \\delta\\) that for all \\(s\\leq \\frac{T}{\\eta H}\\), \\(\\| \\bar{\\pmb{\\theta}}^{(s)} - \\pmb{w}_{sH}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.435, + 0.826, + 0.465 + ], + "angle": 0, + "content": "We defer the proof to Appendix I. See also Appendix D for Lin et al. (2020b)'s attempt to model Local SGD with multiple conventional SDEs and discussions on why it does not give much insight." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.471, + 0.613, + 0.485 + ], + "angle": 0, + "content": "3.2 SDE APPROXIMATION NEAR THE MINIMIZER MANIFOLD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.493, + 0.825, + 0.593 + ], + "angle": 0, + "content": "Inspired by a recent paper (Li et al., 2021b), our strategy to overcome the shortcomings of the conventional SDE is to design a new SDE that can guarantee a good approximation for \\(\\mathcal{O}(\\eta^{-2})\\) discrete steps, much longer than the \\(\\mathcal{O}(\\eta^{-1})\\) discrete steps for the conventional SDE. Following their setting, we assume the existence of a manifold \\(\\Gamma\\) consisting only of local minimizers and track the global iterate \\(\\bar{\\theta}^{(s)}\\) around \\(\\Gamma\\) after it takes \\(\\tilde{\\mathcal{O}} (\\eta^{-1})\\) steps to approach \\(\\Gamma\\). Though the expected gradient \\(\\nabla \\mathcal{L}\\) is near zero around \\(\\Gamma\\), the dynamics are still non-trivial because the noise can drive the iterate to move a significant distance in \\(\\mathcal{O}(\\eta^{-2})\\) steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.595, + 0.825, + 0.625 + ], + "angle": 0, + "content": "Assumption 3.1. The loss function \\(\\mathcal{L}(\\cdot)\\) and the matrix square root of the noise covariance \\(\\Sigma^{1/2}(\\cdot)\\) are \\(\\mathcal{C}^\\infty\\)-smooth. Besides, we assume that \\(\\|\\nabla \\ell(\\boldsymbol{\\theta}; \\xi)\\|_2\\) is bounded by a constant for all \\(\\boldsymbol{\\theta}\\) and \\(\\xi\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.826, + 0.671 + ], + "angle": 0, + "content": "Assumption 3.2. \\(\\Gamma\\) is a \\(\\mathcal{C}^\\infty\\)-smooth, \\((d - m)\\)-dimensional submanifold of \\(\\mathbb{R}^d\\), where any \\(\\zeta \\in \\Gamma\\) is a local minimizer of \\(\\mathcal{L}\\). For all \\(\\zeta \\in \\Gamma\\), \\(\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m\\). Additionally, there exists an open neighborhood of \\(\\Gamma\\), denoted as \\(U\\), such that \\(\\Gamma = \\arg \\min_{\\pmb{\\theta} \\in U} \\mathcal{L}(\\pmb{\\theta})\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.674, + 0.46, + 0.689 + ], + "angle": 0, + "content": "Assumption 3.3. \\(\\Gamma\\) is a compact manifold." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.693, + 0.826, + 0.791 + ], + "angle": 0, + "content": "The smoothness assumption on \\(\\mathcal{L}\\) is generally satisfied when we use smooth activation functions, such as Swish (Ramachandran et al., 2017), softplus and GeLU (Hendrycks & Gimpel, 2016), which work equally well as ReLU in many circumstances. The existence of a minimizer manifold with \\(\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m\\) has also been made as a key assumption in Fehrman et al. (2020); Li et al. (2021b); Lyu et al. (2022), where \\(\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m\\) ensures that the Hessian is maximally nondegenerate on the manifold and implies that the tangent space at \\(\\zeta \\in \\Gamma\\) equals the null space of \\(\\nabla^2\\mathcal{L}(\\zeta)\\). The last assumption is made to prevent the analysis from being too technically involved." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.797, + 0.825, + 0.827 + ], + "angle": 0, + "content": "Our SDE for Local SGD characterizes the training dynamics near \\(\\Gamma\\). For ease of presentation, we define the following projection operators \\(\\Phi, P_{\\zeta}\\) for points and differential forms respectively." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.831, + 0.825, + 0.877 + ], + "angle": 0, + "content": "Definition 3.1 (Gradient Flow Projection). Fix a point \\(\\theta_{\\mathrm{null}} \\notin \\Gamma\\). For \\(\\pmb{x} \\in \\mathbb{R}^d\\), consider the gradient flow \\(\\frac{\\mathrm{d}\\pmb{x}(t)}{\\mathrm{d}t} = -\\nabla \\mathcal{L}(\\pmb{x}(t))\\) with \\(\\pmb{x}(0) = \\pmb{x}\\). We denote the gradient flow projection of \\(\\pmb{x}\\) as \\(\\Phi(\\pmb{x})\\). \\(\\Phi(\\pmb{x}) := \\lim_{t \\to +\\infty} \\pmb{x}(t)\\) if the limit exists and belongs to \\(\\Gamma\\); otherwise, \\(\\Phi(\\pmb{x}) = \\theta_{\\mathrm{null}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.88, + 0.826, + 0.927 + ], + "angle": 0, + "content": "Definition 3.2. For any \\(\\zeta \\in \\Gamma\\) and any differential form \\(\\mathbf{AdW}_t + \\mathbf{bdt}\\) in Itô calculus, where \\(\\mathbf{A}\\) is a matrix and \\(\\mathbf{b}\\) is a vector, we use \\(P_{\\zeta}(\\mathbf{AdW}_t + \\mathbf{bdt})\\) as a shorthand for the differential form \\(\\partial \\Phi (\\zeta)\\mathbf{AdW}_t + \\left(\\partial \\Phi (\\zeta)\\mathbf{b} + \\frac{1}{2}\\partial^2\\Phi (\\zeta)[\\mathbf{AA}^\\top ]\\right)\\mathrm{d}t.\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.19 + ], + "angle": 0, + "content": "See Øksendal (2013) for an introduction to Itô calculus. Here \\(P_{\\zeta}\\) equals \\(\\Phi (\\zeta +A\\mathrm{d}\\pmb {W}_t + \\pmb {b}\\mathrm{d}t) - \\Phi (\\zeta)\\) by Itô calculus, which means that \\(P_{\\zeta}\\) projects an infinitesimal step from \\(\\zeta\\) , so that \\(\\zeta\\) after taking the projected step does not leave the manifold \\(\\Gamma\\) . It can be shown by simple calculus that \\(\\partial \\Phi (\\zeta)\\) equals the projection matrix onto the tangent space of \\(\\Gamma\\) at \\(\\zeta\\) . We decompose the noise covariance \\(\\Sigma (\\zeta)\\) for \\(\\zeta \\in \\Gamma\\) into two parts: the noise in the tangent space \\(\\Sigma_{\\parallel}(\\zeta)\\coloneqq \\partial \\Phi (\\zeta)\\Sigma (\\zeta)\\partial \\Phi (\\zeta)\\) and the noise in the rest \\(\\Sigma_{\\diamond}(\\zeta)\\coloneqq \\Sigma (\\zeta) - \\Sigma_{\\parallel}(\\zeta)\\) . Now we are ready to state our SDE for Local SGD." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.193, + 0.825, + 0.223 + ], + "angle": 0, + "content": "Definition 3.3 (Slow SDE for Local SGD). Given \\(\\eta, H > 0\\) and \\(\\zeta_0 \\in \\Gamma\\), define \\(\\zeta(t)\\) as the solution of the following SDE with initial condition \\(\\zeta(0) = \\zeta_0\\):" + }, + { + "type": "equation", + "bbox": [ + 0.226, + 0.226, + 0.826, + 0.269 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {(a) d i f f u s i o n} - \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(b) d r i f t - I} - \\underbrace {- \\frac {K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(c) d r i f t - I I}\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.274, + 0.457, + 0.292 + ], + "angle": 0, + "content": "Here \\(\\widehat{\\Sigma}_{\\diamond}(\\zeta), \\widehat{\\Psi}(\\zeta) \\in \\mathbb{R}^{d \\times d}\\) are defined as" + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.295, + 0.826, + 0.317 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) := \\sum_ {i, j: (\\lambda_ {i} \\neq 0) \\vee (\\lambda_ {j} \\neq 0)} \\frac {1}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {\\Sigma} _ {\\diamond} (\\boldsymbol {\\zeta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.285, + 0.319, + 0.825, + 0.342 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) := \\sum_ {i, j: (\\lambda_ {i} \\neq 0) \\vee (\\lambda_ {j} \\neq 0)} \\frac {\\psi (\\eta H \\cdot (\\lambda_ {i} + \\lambda_ {j}))}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {\\Sigma} _ {\\diamond} (\\boldsymbol {\\zeta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.346, + 0.827, + 0.393 + ], + "angle": 0, + "content": "where \\(\\{\\pmb{v}_i\\}_{i=1}^d\\) is a set of eigenvectors of \\(\\nabla^2\\mathcal{L}(\\zeta)\\) that forms an orthonormal eigenbasis, and \\(\\lambda_1, \\ldots, \\lambda_d\\) are the corresponding eigenvalues. Additionally, \\(\\psi(x) := \\frac{e^{-x} - 1 + x}{x}\\) for \\(x \\neq 0\\) and \\(\\psi(0) = 0\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.399, + 0.825, + 0.506 + ], + "angle": 0, + "content": "The use of \\(P_{\\zeta}\\) keeps \\(\\zeta(t)\\) on the manifold \\(\\Gamma\\) through projection. \\(\\Sigma_{\\parallel}^{\\frac{1}{2}}(\\zeta)\\) introduces a diffusion term to the SDE in the tangent space. The two drift terms involve \\(\\widehat{\\Sigma}_{\\diamond}(\\cdot)\\) and \\(\\widehat{\\Psi}(\\cdot)\\), which can be intuitively understood as rescaling the entries of the noise covariance in the eigenbasis of Hessian. In the special case where \\(\\nabla^{2}\\mathcal{L} = \\mathrm{diag}(\\lambda_{1},\\dots,\\lambda_{d}) \\in \\mathbb{R}^{d\\times d}\\), we have \\(\\widehat{\\Sigma}_{\\diamond,i,j} = \\frac{1}{\\lambda_i + \\lambda_j}\\Sigma_{0,i,j}\\). \\(\\widehat{\\Psi}_{i,j} = \\frac{\\psi(\\eta H(\\lambda_i + \\lambda_j))}{\\lambda_i + \\lambda_j}\\Sigma_{0,i,j}\\). \\(\\psi(x)\\) is a monotonically increasing function, which goes from 0 to 1 as \\(x\\) goes from 0 to infinity (see Figure 9)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.511, + 0.826, + 0.595 + ], + "angle": 0, + "content": "We name this SDE as the Slow SDE for Local SGD because we will show that each discrete step of Local SGD corresponds to a continuous time interval of \\(\\eta^2\\) instead of an interval of \\(\\eta\\) in the conventional SDE. In this sense, our SDE is \"slower\" than the conventional SDE (and hence can track a longer horizon). This Slow SDE is inspired by Li et al. (2021b). Under nearly the same set of assumptions, they proved that SGD can be tracked by an SDE that is essentially equivalent to (4) with \\(K = 1\\), namely, without the drift-II term." + }, + { + "type": "equation", + "bbox": [ + 0.307, + 0.598, + 0.826, + 0.642 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} - \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}}\\right), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.645, + 0.825, + 0.688 + ], + "angle": 0, + "content": "We refer to (7) as the Slow SDE for SGD. We remark that the drfit-II term in (4) is novel and is the key to separate the generalization behaviors of Local SGD and SGD in theory. We will discuss this point later in Section 3.3. Now we present our SDE approximation theorem for Local SGD." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.69, + 0.825, + 0.762 + ], + "angle": 0, + "content": "Theorem 3.2. Let Assumptions 3.1 to 3.3 hold. Let \\( T > 0 \\) be a constant and \\( \\zeta(t) \\) be the solution to (4) with the initial condition \\( \\zeta(0) = \\Phi(\\bar{\\theta}^{(0)}) \\in \\Gamma \\). If \\( H \\) is set to \\( \\frac{\\alpha}{\\eta} \\) for some constant \\( \\alpha > 0 \\), then for any \\( \\mathcal{C}^3 \\)-smooth function \\( g(\\pmb{\\theta}) \\), \\( \\max_{0 \\leq s \\leq \\frac{T}{H\\eta^2}} \\left| \\mathbb{E}[g(\\Phi(\\bar{\\pmb{\\theta}}^{(s)}))] - \\mathbb{E}[g(\\pmb{\\zeta}(sH\\eta^2)] \\right| = \\tilde{\\mathcal{O}}(\\eta^{0.25}) \\), where \\( \\tilde{\\mathcal{O}}(\\cdot) \\) hides log factors and constants that are independent of \\( \\eta \\) but can depend on \\( g(\\pmb{\\theta}) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.762, + 0.825, + 0.82 + ], + "angle": 0, + "content": "Theorem 3.3. For \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\), it holds for all \\(\\mathcal{O}\\left(\\frac{1}{\\alpha}\\log \\frac{1}{\\eta}\\right)\\leq s\\leq \\frac{T}{\\alpha\\eta}\\) that \\(\\Phi (\\bar{\\pmb{\\theta}}^{(s)})\\in \\Gamma\\) and \\(\\| \\bar{\\pmb{\\theta}}^{(s)} - \\Phi (\\bar{\\pmb{\\theta}}^{(s)})\\| _2 = \\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})\\), where \\(\\mathcal{O}(\\cdot)\\) hides constants independent of \\(\\eta\\), \\(\\alpha\\) and \\(\\delta\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.824, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Theorem 3.2 suggests that the trajectories of the manifold projection and the solution to the Slow SDE (4) are close to each other in the weak approximation sense. That is, \\(\\{\\Phi (\\bar{\\theta}^{(s)})\\}\\) and \\(\\{\\zeta (t)\\}\\) cannot be distinguished by evaluating test functions from a wide function class, including all polynomials. This measurement of closeness between the iterates of stochastic gradient algorithms and their SDE approximations is also adopted by Li et al. (2019a; 2021a); Malladi et al. (2022), but their analyses are for conventional SDEs. Theorem 3.3 further states that the iterate \\(\\bar{\\theta}^{(s)}\\) keeps close to its manifold projection after the first few rounds." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.161 + ], + "angle": 0, + "content": "Remark 3.1. To connect to Finding 2.1, we remark that our theorems (1) do not require the model to be pre-trained (as long as the gradient flow starting with \\(\\theta^{(0)}\\) converges to \\(\\Gamma\\)); (2) give better bounds for smaller \\(\\eta\\); (3) characterize a long training horizon \\(\\sim \\eta^{-2}\\). The need for tuning \\(H\\) will be discussed in Section 3.3.3." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.164, + 0.828, + 0.306 + ], + "angle": 0, + "content": "Technical Contribution. The proof technique for Theorem 3.2 is novel and significantly different from the Slow SDE analysis of SGD in Li et al. (2021a). Their analysis uses advanced stochastic calculus and invokes Katzenberger's theorem (Katzenberger, 1991) to show that SGD converges to the Slow SDE in distribution, but no quantitative error bounds are provided. Also, due to the local updates and multiple aggregation steps in Local SGD, it is unclear how to extend Katzenberger's theorem to our case. To overcome this difficulty, we develop a new approach to analyze the Slow SDEs, which is based on the method of moments (Li et al., 2019a) and can provide the quantitative error bound \\(\\tilde{\\mathcal{O}} (\\eta^{0.25})\\) in weak approximation. See Appendix J for our proof outline. A by-product of our result is the first quantitative approximation bound for the Slow SDE approximation for SGD, which can be easily obtained by setting \\(K = 1\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.314, + 0.483, + 0.328 + ], + "angle": 0, + "content": "3.3 INTERPRETATION OF THE SLOW SDEs" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.335, + 0.825, + 0.379 + ], + "angle": 0, + "content": "In this subsection, we compare the Slow SDEs for SGD and Local SGD and provide an important insight into why Local SGD generalizes better than SGD: Local SGD strengthens the drift term in the Slow SDE, which makes the implicit regularization of stochastic gradient noise more effective." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.386, + 0.564, + 0.4 + ], + "angle": 0, + "content": "3.3.1 INTERPRETATION OF THE SLOW SDE FOR SGD." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.407, + 0.825, + 0.517 + ], + "angle": 0, + "content": "The Slow SDE for SGD (7) consists of the diffusion and drift-I terms. The former injects noise into the dynamics in the tangent space; the latter one drives the dynamics to move along the negative gradient of \\(\\frac{1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle\\) projected onto the tangent space, but ignoring the dependency of \\(\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\) on \\(\\zeta\\). This can be connected to the class of semi-gradient methods which only computes a part of the gradient (Mnih et al., 2015; Sutton & Barto, 1998; Brandonbrener & Bruna, 2020). In this view, the long-term behavior of SGD is similar to a stochastic semi-gradient method minimizing the implicit regularizer \\(\\frac{1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle\\) on the minimizer manifold of the original loss \\(\\mathcal{L}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.52, + 0.825, + 0.605 + ], + "angle": 0, + "content": "Though the semi-gradient method may not perfectly optimize its objective, the above argument reveals that SGD has a deterministic trend toward the region with a smaller magnitude of Hessian, which is commonly believed to correlate with better generalization (Hochreiter & Schmidhuber, 1997; Keskar et al., 2017; Neyshabur et al., 2017; Jiang et al., 2020) (see Appendix A for more discussions). In contrast, the diffusion term can be regarded as a random perturbation to this trend, which can impede optimization when the drift-I term is not strong enough." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.61, + 0.825, + 0.669 + ], + "angle": 0, + "content": "Based on this view, we conjecture that strengthening the drift term of the Slow SDE can help SGD to better regularize the model, yielding a better generalization performance. More specifically, we propose the following hypothesis, which compares the generalization performances of the following generalized Slow SDEs. Note that \\(\\left(\\frac{1}{B},\\frac{1}{2B}\\right)\\)-Slow SDE corresponds to the Slow SDE for SGD (7)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.688, + 0.685 + ], + "angle": 0, + "content": "Definition 3.4. For \\(\\kappa_{1},\\kappa_{2}\\geq 0\\) define \\((\\kappa_{1},\\kappa_{2})\\) -Slow SDE to be the following:" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.687, + 0.825, + 0.711 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\xi}} \\left(\\sqrt {\\kappa_ {1}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} - \\kappa_ {2} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.716, + 0.825, + 0.761 + ], + "angle": 0, + "content": "Hypothesis 3.1. Starting at a minimizer \\(\\zeta_0\\in \\Gamma\\), run \\((\\kappa_{1},\\kappa_{2})\\)-Slow SDE and \\((\\kappa_{1},\\kappa_{2}^{\\prime})\\)-Slow SDE respectively for the same amount of time \\(T > 0\\) and obtain \\(\\zeta (T),\\zeta '(T)\\). If \\(\\kappa_{2} > \\kappa_{2}^{\\prime}\\), then the expected test accuracy at \\(\\zeta (T)\\) is better than that at \\(\\zeta '(T)\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.763, + 0.825, + 0.806 + ], + "angle": 0, + "content": "Due to the No Free Lunch Theorem, we do not claim that our hypothesis is always true, but we do believe that the hypothesis holds when training usual neural networks (e.g., ResNets, VGGNets) on standard benchmarks (e.g., CIFAR-10, ImageNet)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.811, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Example: Training with Label Noise Regularization. To exemplify the generalization benefit of having a larger drift term, we follow a line of theoretical works (Li et al., 2021b; Blanc et al., 2020; Damian et al., 2021) to study the case of training over-parameterized neural nets with label noise regularization. For a \\(C\\)-class classification task, the label noise regularization is as follows: every time we draw a sample from the training set, we make the true label as it is with probability \\(1 - p\\) and replace it with any other label with equal probability \\(\\frac{p}{C-1}\\). When we use cross-entropy loss, the Slow SDE for SGD turns out to be a simple deterministic gradient flow on \\(\\Gamma\\) (instead of a semigroup method) for minimizing the trace of Hessian: \\(\\mathrm{d}\\boldsymbol{\\zeta}(t) = -\\frac{1}{4B}\\nabla_{\\Gamma}\\mathrm{tr}(\\nabla^{2}\\mathcal{L}(\\boldsymbol{\\zeta}))\\mathrm{d}t\\), where \\(\\nabla_{\\Gamma}f\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "stands for the gradient of the function \\( f \\) projected to the tangent space of \\( \\Gamma \\). Checking the validity of our hypothesis reduces to the following question: Is minimizing the trace of Hessian beneficial to generalization? Many works prove positive results in concrete settings, including the line of works we just mentioned. We refer the readers to Appendix G for further discussion." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.168, + 0.658, + 0.182 + ], + "angle": 0, + "content": "3.3.2 LOCAL SGD STRENGTHENS THE DRIFT TERM IN SLOW SDE." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.189, + 0.825, + 0.299 + ], + "angle": 0, + "content": "Based on Hypothesis 3.1, we argue that Local SGD improves generalization by strengthening the drift term of the Slow SDE. First, it can be seen from (4) that the Slow SDE for Local SGD has an additional drfit-II term. Similar to the drift-I term of the Slow SDE for SGD, this drift-II term drives the dynamics to move along the negative semi-gradient of \\(\\frac{K - 1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Psi} (\\zeta)\\rangle\\) (with the dependency of \\(\\widehat{\\Psi} (\\zeta)\\) on \\(\\zeta\\) ignored). Combining it with the implicit regularizer induced by the drift-I term, we can see that the long-term behavior of Local SGD is similar to a stochastic semi-gradient method minimizing the implicit regularizer \\(\\frac{1}{2B}\\langle \\nabla^{2}\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle +\\frac{K - 1}{2B}\\langle \\nabla^{2}\\mathcal{L}(\\zeta),\\widehat{\\Psi} (\\zeta)\\rangle\\) on \\(\\Gamma\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.304, + 0.825, + 0.413 + ], + "angle": 0, + "content": "Comparing the definitions of \\(\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\) (5) and \\(\\widehat{\\Psi}(\\zeta)\\) (6), we can see that \\(\\widehat{\\Psi}(\\zeta)\\) is basically a rescaling of the entries of \\(\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\) in the eigenbasis of Hessian, where the rescaling factor \\(\\psi(\\eta H \\cdot (\\lambda_i + \\lambda_j))\\) for each entry is between 0 and 1 (see Figure 9 for the plot of \\(\\psi\\)). When \\(\\eta H\\) is small, the rescaling factors should be close to \\(\\psi(0) = 0\\), then \\(\\widehat{\\Psi}(\\zeta) \\approx \\mathbf{0}\\), leading to almost no additional regularization. On the other hand, when \\(\\eta H\\) is large, the rescaling factors should be close to \\(\\psi(+\\infty) = 1\\), so \\(\\widehat{\\Psi}(\\zeta) \\approx \\widehat{\\Sigma}_{\\diamond}(\\zeta)\\). We can then merge the two implicit regularizers as \\(\\frac{K}{2B} \\langle \\nabla^2 \\mathcal{L}(\\zeta), \\widehat{\\Sigma}_{\\diamond}(\\zeta) \\rangle\\), and (4) becomes the \\((\\frac{1}{B}, \\frac{K}{2B})\\)-Slow SDE, which is restated below:" + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.412, + 0.825, + 0.433 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} - \\frac {K}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.825, + 0.491 + ], + "angle": 0, + "content": "From the above argument we know how the Slow SDE of Local SGD (4) changes as \\(\\eta H\\) transitions from 0 to \\(+\\infty\\). Initially, when \\(\\eta H = 0\\), (4) is the same as the \\((\\frac{1}{B}, \\frac{1}{2B})\\)-Slow SDE for SGD. Then increasing \\(\\eta H\\) strengthens the drift term of (4). As \\(\\eta H \\to +\\infty\\), (4) transitions to the \\((\\frac{1}{B}, \\frac{K}{2B})\\)-Slow SDE, where the drift term becomes \\(K\\) times larger." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.825, + 0.554 + ], + "angle": 0, + "content": "According to Hypothesis 3.1, the \\((\\frac{1}{B},\\frac{K}{2B})\\)-Slow SDE generalizes better than the \\((\\frac{1}{B},\\frac{1}{2B})\\)-Slow SDE, so Local SGD with \\(\\eta H = +\\infty\\) should generalize better than SGD. When \\(\\eta H\\) is chosen realistically as a finite value, the generalization performance of Local SGD interpolates between these two cases, which results in a worse generalization than \\(\\eta H = +\\infty\\) but should still be better than SGD." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.56, + 0.722, + 0.574 + ], + "angle": 0, + "content": "3.3.3 THEORETICAL INSIGHTS INTO TUNING THE NUMBER OF LOCAL STEPS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.581, + 0.825, + 0.681 + ], + "angle": 0, + "content": "Based on our Slow SDE approximations, we now discuss how the number of local steps \\( H \\) affects the generalization of Local SGD. When \\( \\eta \\) is small but finite, tuning \\( H \\) offers a trade-off between regularization strength and SDE approximation quality. Larger \\( \\alpha \\coloneqq \\eta H \\) makes the regularization stronger in the SDE (as discussed in Section 3.3.2), but the SDE itself may lose track of Local SGD, which can be seen from the error bound \\( \\mathcal{O}(\\sqrt{\\alpha\\eta\\log(\\alpha / \\eta\\delta)}) \\) in Theorem 3.3. Therefore, we expect the test accuracy to first increase and then decrease as we gradually increase \\( H \\). Indeed, we observe in Figures 2(e) and 2(f) that the plot of test accuracy versus \\( H \\) is unimodal for each \\( \\eta \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.688, + 0.825, + 0.773 + ], + "angle": 0, + "content": "It is thus necessary to tune \\( H \\) for the best generalization. When \\( H \\) is tuned together with other hyperparameters, such as learning rate \\( \\eta \\), our Slow SDE approximation recommends setting \\( H \\) to be at least \\( \\Omega(\\eta^{-1}) \\) so that \\( \\alpha := \\eta H \\) does not vanish in the Slow SDE. Since larger \\( \\alpha \\) gives a stronger regularization effect, the optimal \\( H \\) should be set to the largest value so that the Slow SDE does not lose track of Local SGD. Indeed, we empirically observed that when \\( H \\) is tuned optimally, \\( \\alpha \\) increases as \\( \\eta \\) decreases, suggesting that the optimal \\( H \\) grows faster than \\( \\Omega(\\eta^{-1}) \\). See Figure 5(f)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.778, + 0.331, + 0.793 + ], + "angle": 0, + "content": "4 CONCLUSIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.826, + 0.926 + ], + "angle": 0, + "content": "In this paper, we analyze the long-term generalization behavior of Local SGD in the small learning rate regime by deriving the Slow SDE for Local SGD as a generalization of that for SGD (Li et al., 2021b). We attribute the generalization improvement over SGD to the larger drift term in the SDE for Local SGD. Our empirical validation shows that Local SGD indeed induces generalization benefits with small learning rate and long enough training time. The main limitation of our work is that our analysis does not imply any direct theoretical separation between SGD and Local SGD in test accuracy, which requires a much deeper understanding of the loss landscape and the Slow SDEs and is left for future work. Another direction for future work is to design distributed training methods that provably generalize better than SGD based on the theoretical insights obtained from Slow SDEs." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.628, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENT AND DISCLOSURE OF FUNDING" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.134, + 0.827, + 0.205 + ], + "angle": 0, + "content": "The work of Xinran Gu and Longbo Huang is supported by the Technology and Innovation Major Project of the Ministry of Science and Technology of China under Grant 2020AAA0108400 and 2020AAA0108403, the Tsinghua University Initiative Scientific Research Program, and Tsinghua Precision Medicine Foundation 10001020109. The work of Kaifeng Lyu and Sanjeev Arora is supported by funding from NSF, ONR, Simons Foundation, DARPA and SRC." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.225, + 0.289, + 0.24 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.248, + 0.826, + 0.306 + ], + "angle": 0, + "content": "Kwangjun Ahn, Jingzhao Zhang, and Suvrit Sra. Understanding the unstable convergence of gradient descent. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 247-257. PMLR, 17-23 Jul 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.313, + 0.826, + 0.371 + ], + "angle": 0, + "content": "Debraj Basu, Deepesh Data, Can Karakus, and Suhas Diggavi. Qsparse-local-SGD: Distributed SGD with quantization, sparsification and local computations. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.379, + 0.826, + 0.422 + ], + "angle": 0, + "content": "Yoshua Bengio. Practical Recommendations for Gradient-Based Training of Deep Architectures, pp. 437-478. Springer Berlin Heidelberg, Berlin, Heidelberg, 2012. ISBN 978-3-642-35289-8. doi: 10.1007/978-3-642-35289-8_26." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.43, + 0.826, + 0.487 + ], + "angle": 0, + "content": "Guy Blanc, Neha Gupta, Gregory Valiant, and Paul Valiant. Implicit regularization for deep neural networks driven by an Ornstein-uhlenbeck like process. In Jacob Abernethy and Shivani Agarwal (eds.), Proceedings of Thirty Third Conference on Learning Theory, volume 125 of Proceedings of Machine Learning Research, pp. 483–513. PMLR, 09–12 Jul 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.496, + 0.826, + 0.54 + ], + "angle": 0, + "content": "David Brandfonbrener and Joan Bruna. Geometric insights into the convergence of nonlinear TD learning. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.548, + 0.826, + 0.577 + ], + "angle": 0, + "content": "Jianmin Chen, Xinghao Pan, Rajat Monga, Samy Bengio, and Rafal Jozefowicz. Revisiting distributed synchronous SGD. arXiv preprint arXiv:1604.00981, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.586, + 0.826, + 0.642 + ], + "angle": 0, + "content": "Kai Chen and Qiang Huo. Scalable training of deep learning machines by incremental block training with intra-block parallel optimization and blockwise model-update filtering. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5880-5884, 2016. doi: 10.1109/ICASSP.2016.7472805." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.651, + 0.826, + 0.695 + ], + "angle": 0, + "content": "Alex Damian, Tengyu Ma, and Jason D. Lee. Label noise SGD provably prefers flat global minimizers. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.703, + 0.826, + 0.76 + ], + "angle": 0, + "content": "Laurent Dinh, Razvan Pascanu, Samy Bengio, and Yoshua Bengio. Sharp minima can generalize for deep nets. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 1019-1028. PMLR, 06-11 Aug 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.768, + 0.826, + 0.797 + ], + "angle": 0, + "content": "Aijun Du and JinQiao Duan. Invariant manifold reduction for stochastic dynamical systems. Dynamic Systems and Applications, 16:681-696, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.806, + 0.826, + 0.836 + ], + "angle": 0, + "content": "KJ Falconer. Differentiation of the limit mapping in a dynamical system. Journal of the London Mathematical Society, 2(2):356-372, 1983." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.826, + 0.886 + ], + "angle": 0, + "content": "Benjamin Fehrman, Benjamin Gess, and Arnulf Jentzen. Convergence rates for the stochastic gradient descent method for non-convex objective functions. Journal of Machine Learning Research, 21:136, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Damir Filipović. Invariant manifolds for weak solutions to stochastic equations. *Probability theory and related fields*, 118(3):323-341, 2000." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.248, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Margalit R Glasgow, Honglin Yuan, and Tengyu Ma. Sharp bounds for federated averaging (Local SGD) and continuous perspective. In International Conference on Artificial Intelligence and Statistics, pp. 9050-9090. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.826, + 0.252 + ], + "angle": 0, + "content": "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.259, + 0.826, + 0.304 + ], + "angle": 0, + "content": "Farzin Haddadpour, Mohammad Mahdi Kamani, Mehrdad Mahdavi, and Viveck Cadambe. Local SGD with periodic averaging: Tighter analysis and adaptive synchronization. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.311, + 0.826, + 0.356 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. In Proceedings of the IEEE international conference on computer vision, pp. 1026-1034, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.364, + 0.826, + 0.407 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.416, + 0.826, + 0.445 + ], + "angle": 0, + "content": "Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.454, + 0.795, + 0.471 + ], + "angle": 0, + "content": "Sepp Hochreiter and Jürgen Schmidhuber. Flat minima. Neural computation, 9(1):1-42, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.478, + 0.826, + 0.522 + ], + "angle": 0, + "content": "Elad Hoffer, Itay Hubara, and Daniel Soudry. Train longer, generalize better: closing the generalization gap in large batch training of neural networks. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.53, + 0.826, + 0.561 + ], + "angle": 0, + "content": "Wenqing Hu, Chris Junchi Li, Lei Li, and Jian-Guo Liu. On the diffusion approximation of nonconvex stochastic gradient descent. arXiv preprint arXiv:1705.07562, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.569, + 0.826, + 0.599 + ], + "angle": 0, + "content": "Hikaru Ibayashi and Masaaki Imaizumi. Exponential escape efficiency of SGD from sharp minima in non-stationary regime. arXiv preprint arXiv:2111.04004, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.607, + 0.826, + 0.651 + ], + "angle": 0, + "content": "Stanisław Jastrzebski, Zachary Kenton, Devansh Arpit, Nicolas Ballas, Asja Fischer, Yoshua Bengio, and Amos Storkey. Three factors influencing minima in SGD. arXiv preprint arXiv:1711.04623, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.659, + 0.826, + 0.716 + ], + "angle": 0, + "content": "Xianyan Jia, Shutao Song, Wei He, Yangzihao Wang, Haidong Rong, Feihu Zhou, Liqiang Xie, Zhenyu Guo, Yuzhou Yang, Liwei Yu, et al. Highly scalable deep learning training system with mixed-precision: Training imagenet in four minutes. Advances in Neural Information Processing Systems, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.725, + 0.826, + 0.769 + ], + "angle": 0, + "content": "Yiding Jiang, Behnam Neyshabur, Hossein Mobahi, Dilip Krishnan, and Samy Bengio. *Fantastic generalization measures and where to find them.* In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.777, + 0.826, + 0.834 + ], + "angle": 0, + "content": "Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. Advances and open problems in federated learning. Foundations and Trends® in Machine Learning, 14(1-2):1-210, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.826, + 0.887 + ], + "angle": 0, + "content": "Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank Reddi, Sebastian Stich, and Ananda Theertha Suresh. Scaffold: Stochastic controlled averaging for federated learning. In International Conference on Machine Learning, pp. 5132-5143. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.925 + ], + "angle": 0, + "content": "G. S. Katzenberger. Solutions of a stochastic differential equation forced onto a manifold by a large drift. The Annals of Probability, 19(4):1587 - 1628, 1991." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. In International Conference on Learning Representations, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Ahmed Khaled, Konstantin Mishchenko, and Peter Richtárik. Tighter theory for local SGD on identical and heterogeneous data. In International Conference on Artificial Intelligence and Statistics, pp. 4519-4529. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.209, + 0.826, + 0.267 + ], + "angle": 0, + "content": "Bobby Kleinberg, Yanzhi Li, and Yang Yuan. An alternative view: When does SGD escape local minima? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 2698-2707. PMLR, 10-15 Jul 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.276, + 0.826, + 0.306 + ], + "angle": 0, + "content": "Alex Krizhevsky. One weird trick for parallelizing convolutional neural networks. arXiv preprint arXiv:1404.5997, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.315, + 0.725, + 0.332 + ], + "angle": 0, + "content": "Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.341, + 0.826, + 0.371 + ], + "angle": 0, + "content": "Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.38, + 0.826, + 0.422 + ], + "angle": 0, + "content": "Yann A. LeCun, Léon Bottou, Genevieve B. Orr, and Klaus-Robert Müller. Efficient BackProp, pp. 9-48. Springer Berlin Heidelberg, Berlin, Heidelberg, 2012. ISBN 978-3-642-35289-8. doi: 10.1007/978-3-642-35289-8_3." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.432, + 0.826, + 0.475 + ], + "angle": 0, + "content": "Qianxiao Li, Cheng Tai, and Weinan E. Stochastic modified equations and dynamics of stochastic gradient algorithms i: Mathematical foundations. Journal of Machine Learning Research, 20(40): 1-47, 2019a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.485, + 0.826, + 0.516 + ], + "angle": 0, + "content": "Xiang Li, Kaixuan Huang, Wenhao Yang, Shusen Wang, and Zhihua Zhang. On the convergence of fedavg on non-iid data. In International Conference on Learning Representations, 2019b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.525, + 0.826, + 0.568 + ], + "angle": 0, + "content": "Zhiyuan Li, Kaifeng Lyu, and Sanjeev Arora. Reconciling modern deep learning with traditional optimization analyses: The intrinsic learning rate. Advances in Neural Information Processing Systems, 33:14544-14555, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.578, + 0.826, + 0.621 + ], + "angle": 0, + "content": "Zhiyuan Li, Sadhika Malladi, and Sanjeev Arora. On the validity of modeling SGD with stochastic differential equations (sdes). Advances in Neural Information Processing Systems, 34:12712-12725, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.63, + 0.826, + 0.661 + ], + "angle": 0, + "content": "Zhiyuan Li, Tianhao Wang, and Sanjeev Arora. What happens after SGD reaches zero loss? a mathematical framework. In International Conference on Learning Representations, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.67, + 0.826, + 0.713 + ], + "angle": 0, + "content": "Zhiyuan Li, Tianhao Wang, and Dingli Yu. Fast mixing of stochastic gradient descent with normalization and weight decay. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.722, + 0.826, + 0.78 + ], + "angle": 0, + "content": "Tao Lin, Lingjing Kong, Sebastian Stich, and Martin Jaggi. Extrapolation for large-batch training in deep learning. In Hal Daumé III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 6094-6104. PMLR, 13-18 Jul 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.789, + 0.826, + 0.82 + ], + "angle": 0, + "content": "Tao Lin, Sebastian U. Stich, Kumar Kshitij Patel, and Martin Jaggi. Don't use large mini-batches, use Local SGD. In International Conference on Learning Representations, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.828, + 0.826, + 0.859 + ], + "angle": 0, + "content": "Kaifeng Lyu, Zhiyuan Li, and Sanjeev Arora. Understanding the generalization benefit of normalization layers: Sharpness reduction, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.826, + 0.924 + ], + "angle": 0, + "content": "Chao Ma and Lexing Ying. On linear stability of SGD and input-smoothness of neural networks. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, pp. 16805-16817. Curran Associates, Inc., 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Sadhika Malladi, Kaifeng Lyu, Abhishek Panigrahi, and Sanjeev Arora. On the SDEs and scaling rules for adaptive gradient algorithms. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.154, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Gideon Mann, Ryan T. McDonald, Mehryar Mohri, Nathan Silberman, and Dan Walker. Efficient large-scale distributed training of conditional maximum entropy models. In Advances in Neural Information Processing Systems 22, pp. 1231-1239, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.205, + 0.824, + 0.249 + ], + "angle": 0, + "content": "Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics, pp. 1273-1282. PMLR, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.255, + 0.825, + 0.301 + ], + "angle": 0, + "content": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al. Human-level control through deep reinforcement learning. nature, 518(7540):529-533, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.306, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Behnam Neyshabur, Srinadh Bhojanapalli, David Mcallester, and Nati Srebro. Exploring generalization in deep learning. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.371, + 0.824, + 0.402 + ], + "angle": 0, + "content": "Jose Javier Gonzalez Ortiz, Jonathan Frankle, Mike Rabbat, Ari Morcos, and Nicolas Ballas. Trade-offs of Local SGD at scale: An empirical study. arXiv preprint arXiv:2110.08133, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.408, + 0.824, + 0.44 + ], + "angle": 0, + "content": "Daniel Povey, Xiaohui Zhang, and Sanjeev Khudanpur. Parallel training of dnns with natural gradient and parameter averaging. arXiv preprint arXiv:1410.7455, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.446, + 0.825, + 0.476 + ], + "angle": 0, + "content": "Prajit Ramachandran, Barret Zoph, and Quoc V Le. Searching for activation functions. arXiv preprint arXiv:1710.05941, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.483, + 0.825, + 0.527 + ], + "angle": 0, + "content": "Benjamin Recht, Christopher Ré, Stephen J. Wright, and Feng Niu. Hogwild: A lock-free approach to parallelizing stochastic gradient descent. In Advances in Neural Information Processing Systems 24, pp. 693-701, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.534, + 0.825, + 0.592 + ], + "angle": 0, + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 115(3):211-252, 2015. doi: 10.1007/s11263-015-0816-y." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.598, + 0.825, + 0.683 + ], + "angle": 0, + "content": "Frank Seide, Hao Fu, Jasha Droppo, Gang Li, and Dong Yu. 1-bit stochastic gradient descent and its application to data-parallel distributed training of speech dnns. In Haizhou Li, Helen M. Meng, Bin Ma, Engsiong Chng, and Lei Xie (eds.), INTERSPEECH 2014, 15th Annual Conference of the International Speech Communication Association, Singapore, September 14-18, 2014, pp. 1058-1062. ISCA, 2014. URL http://www.isca-speech.org/archive/interspeech_2014/i14_1058.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.691, + 0.827, + 0.736 + ], + "angle": 0, + "content": "Christopher J. Shallue, Jaehoon Lee, Joseph Antognini, Jascha Sohl-Dickstein, Roy Frostig, and George E. Dahl. Measuring the effects of data parallelism on neural network training. Journal of Machine Learning Research, 20(112):1-49, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.742, + 0.825, + 0.772 + ], + "angle": 0, + "content": "K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, May 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.779, + 0.825, + 0.837 + ], + "angle": 0, + "content": "Samuel Smith, Erich Elsen, and Soham De. On the generalization benefit of noise in stochastic gradient descent. In Hal Daumé III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 9058-9067. PMLR, 13-18 Jul 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Samuel L Smith, Benoit Dherin, David Barrett, and Soham De. On the origin of implicit regularization in stochastic gradient descent. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Sebastian U Stich. Local SGD converges fast and communicates little. In International Conference on Learning Representations, 2018." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Nikko Strom. Scalable distributed DNN training using commodity GPU cloud computing. In IN-TERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany, September 6-10, 2015, pp. 1488-1492. ISCA, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.154, + 0.825, + 0.185 + ], + "angle": 0, + "content": "Hang Su and Haoyu Chen. Experiments on parallel training of deep neural network using model averaging. arXiv preprint arXiv:1507.01239, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.191, + 0.825, + 0.222 + ], + "angle": 0, + "content": "Richard S. Sutton and Andrew G. Barto. Reinforcement learning - an introduction. Adaptive computation and machine learning. MIT Press, 1998. ISBN 978-0-262-19398-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.228, + 0.825, + 0.259 + ], + "angle": 0, + "content": "Jianyu Wang and Gauri Joshi. Adaptive communication strategies to achieve the best error-routine trade-off in local-update SGD. Proceedings of Machine Learning and Systems, 1:212-229, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.266, + 0.825, + 0.296 + ], + "angle": 0, + "content": "Jianyu Wang and Gauri Joshi. Cooperative SGD: A unified framework for the design and analysis of local-update SGD algorithms. Journal of Machine Learning Research, 22(213):1-50, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.303, + 0.825, + 0.346 + ], + "angle": 0, + "content": "Jianyu Wang, Rudrajit Das, Gauri Joshi, Satyen Kale, Zheng Xu, and Tong Zhang. On the unreasonable effectiveness of federated averaging with heterogeneous data. arXiv preprint arXiv:2206.04723, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.354, + 0.825, + 0.398 + ], + "angle": 0, + "content": "Blake Woodworth, Kumar Kshitij Patel, Sebastian Stich, Zhen Dai, Brian Bullins, Brendan Mcmahan, Ohad Shamir, and Nathan Srebro. Is local sgd better than minibatch sgd? In International Conference on Machine Learning, pp. 10334-10343. PMLR, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.404, + 0.825, + 0.448 + ], + "angle": 0, + "content": "Blake E Woodworth, Kumar Kshitij Patel, and Nati Srebro. Minibatch vs Local SGD for heterogeneous distributed learning. Advances in Neural Information Processing Systems, 33:6281-6292, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.455, + 0.825, + 0.513 + ], + "angle": 0, + "content": "Lei Wu, Chao Ma, and Weinan E. How sgd selects the global minima in over-parameterized learning: A dynamical stability perspective. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesà-Bianchi, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.52, + 0.825, + 0.564 + ], + "angle": 0, + "content": "Zeke Xie, Issei Sato, and Masashi Sugiyama. A diffusion theory for deep learning dynamics: Stochastic gradient descent exponentially favors flat minima. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.571, + 0.825, + 0.614 + ], + "angle": 0, + "content": "Yang You, Zhao Zhang, Cho-Jui Hsieh, James Demmel, and Kurt Keutzer. Imagenet training in minutes. In Proceedings of the 47th International Conference on Parallel Processing, pp. 1-10, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.622, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.687, + 0.825, + 0.731 + ], + "angle": 0, + "content": "Hao Yu, Sen Yang, and Shenghuo Zhu. Parallel restarted SGD with faster convergence and less communication: Demystifying why model averaging works for deep learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 5693-5700, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.738, + 0.825, + 0.782 + ], + "angle": 0, + "content": "Jingzhao Zhang, Sai Praneeth Karimireddy, Andreas Veit, Seungyeon Kim, Sashank Reddi, Sanjiv Kumar, and Suvrit Sra. Why are adaptive methods good for attention models? Advances in Neural Information Processing Systems, 33:15383-15393, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.788, + 0.825, + 0.845 + ], + "angle": 0, + "content": "Xiaohui Zhang, Jan Trmal, Daniel Povey, and Sanjeev Khudanpur. Improving deep neural network acoustic models using generalized maxout networks. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 215-219, 2014. doi: 10.1109/ICASSP.2014.6853589." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.853, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Fan Zhou and Guojing Cong. On the convergence properties of a k-step averaging stochastic gradient descent algorithm for nonconvex optimization. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18, pp. 3219-3227. International Joint Conferences on Artificial Intelligence Organization, 7 2018. doi: 10.24963/ijcai.2018/447. URL https://doi.org/10.24963/ijcai.2018/447." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Zhanxing Zhu, Jingfeng Wu, Bing Yu, Lei Wu, and Jinwen Ma. The anisotropic noise in stochastic gradient descent: Its behavior of escaping from sharp minima and regularization effects. arXiv preprint arXiv:1803.00195, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Martin Zinkevich, Markus Weimer, Lihong Li, and Alex Smola. Parallelized stochastic gradient descent. In J. Lafferty, C. Williams, J. Shawe-Taylor, R. Zemel, and A. Culotta (eds.), Advances in Neural Information Processing Systems, volume 23. Curran Associates, Inc., 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.826, + 0.235 + ], + "angle": 0, + "content": "Bernt Øksendal. Stochastic differential equations: an introduction with applications. Springer Science & Business Media, 2013." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.27, + 0.118 + ], + "angle": 0, + "content": "CONTENTS" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.139, + 0.826, + 0.154 + ], + "angle": 0, + "content": "1 Introduction 1" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.173, + 0.826, + 0.189 + ], + "angle": 0, + "content": "2 When does Local SGD Generalize Better? 3" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.139, + 0.826, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.195, + 0.826, + 0.211 + ], + "angle": 0, + "content": "2.1 The Debate on Local SGD 3" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.217, + 0.826, + 0.233 + ], + "angle": 0, + "content": "2.2 Key Factors: Small Learning Rate and Sufficient Training Time 4" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.195, + 0.826, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.252, + 0.826, + 0.267 + ], + "angle": 0, + "content": "3 Theoretical Analysis of Local SGD: The Slow SDE 5" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.273, + 0.826, + 0.29 + ], + "angle": 0, + "content": "3.1 Difficulty of Adapting the SDE Framework to Local SGD 6" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.295, + 0.826, + 0.311 + ], + "angle": 0, + "content": "3.2SDE Approximation near the Minimizer Manifold 6" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.317, + 0.826, + 0.333 + ], + "angle": 0, + "content": "3.3 Interpretation of the Slow SDEs 8" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.273, + 0.826, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.338, + 0.826, + 0.354 + ], + "angle": 0, + "content": "3.3.1 Interpretation of the Slow SDE for SGD. 8" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.36, + 0.826, + 0.376 + ], + "angle": 0, + "content": "3.3.2 Local SGD Strengthens the Drift Term in Slow SDE. 9" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.381, + 0.826, + 0.397 + ], + "angle": 0, + "content": "3.3.3 Theoretical Insights into Tuning the Number of Local Steps 9" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.338, + 0.826, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.416, + 0.826, + 0.431 + ], + "angle": 0, + "content": "4 Conclusions 9" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.451, + 0.826, + 0.467 + ], + "angle": 0, + "content": "A Additional Related Works 18" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.486, + 0.826, + 0.501 + ], + "angle": 0, + "content": "B Additional Discussions 19" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.521, + 0.826, + 0.537 + ], + "angle": 0, + "content": "C Implementation Details of Parallel SGD, Local SGD and Post-local SGD 20" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.556, + 0.826, + 0.572 + ], + "angle": 0, + "content": "D Modeling Local SGD with Multiple Conventional SDEs 23" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.451, + 0.826, + 0.572 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.591, + 0.826, + 0.607 + ], + "angle": 0, + "content": "E Additional Interpretation of the Slow SDEs 23" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.613, + 0.826, + 0.629 + ], + "angle": 0, + "content": "E.1 Understanding the Diffusion Term in the Slow SDE 23" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.635, + 0.826, + 0.65 + ], + "angle": 0, + "content": "E.2 The Effect of Global Batch Size on Generalization 24" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.613, + 0.826, + 0.65 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.669, + 0.826, + 0.685 + ], + "angle": 0, + "content": "F Additional Experimental Results 25" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.704, + 0.826, + 0.72 + ], + "angle": 0, + "content": "G Discussions on Local SGD with Label Noise Regularization 27" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.669, + 0.826, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.726, + 0.826, + 0.742 + ], + "angle": 0, + "content": "G.1 The Slow SDE for Local SGD with Label Noise Regularization 27" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.748, + 0.826, + 0.764 + ], + "angle": 0, + "content": "G.2 The Equivalence of Enlarging the Learning Rate and Adding Local Steps 28" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.726, + 0.826, + 0.764 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.783, + 0.826, + 0.799 + ], + "angle": 0, + "content": "H Deriving the Slow SDE after Applying the LSR 28" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.818, + 0.826, + 0.833 + ], + "angle": 0, + "content": "I Proof of Theorem 3.1 30" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.853, + 0.826, + 0.868 + ], + "angle": 0, + "content": "J Proof Outline of Main Theorems 33" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.888, + 0.826, + 0.903 + ], + "angle": 0, + "content": "K Proof Details of Main Theorems 33" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.783, + 0.826, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.91, + 0.826, + 0.925 + ], + "angle": 0, + "content": "K.1 Additional Notations 34" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.103, + 0.826, + 0.121 + ], + "angle": 0, + "content": "K.2 Computing the Derivatives of the Limiting Mapping 34" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.125, + 0.826, + 0.141 + ], + "angle": 0, + "content": "K.3 Preliminary Lemmas for GD and GF 35" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.146, + 0.826, + 0.163 + ], + "angle": 0, + "content": "K.4 Construction of working zones 38" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.167, + 0.826, + 0.184 + ], + "angle": 0, + "content": "K.5 Phase 1: Iterate Approaching the Manifold 39" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.103, + 0.826, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.189, + 0.826, + 0.205 + ], + "angle": 0, + "content": "K.5.1 Additional notations 39" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.21, + 0.826, + 0.226 + ], + "angle": 0, + "content": "K.5.2 Proof for Subphase 1 39" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.232, + 0.826, + 0.248 + ], + "angle": 0, + "content": "K.5.3 Proof for Subphase 2 43" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.189, + 0.826, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.253, + 0.826, + 0.27 + ], + "angle": 0, + "content": "K.6 Phase 2: Iterates Staying Close to Manifold 46" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.275, + 0.826, + 0.291 + ], + "angle": 0, + "content": "K.6.1 Additional notations 46" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.296, + 0.826, + 0.312 + ], + "angle": 0, + "content": "K.6.2 Proof for the High Probability Bounds 46" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.275, + 0.826, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.318, + 0.826, + 0.334 + ], + "angle": 0, + "content": "K.7 Summary of the dynamics and Proof of Theorems J.1 and J.2 51" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.339, + 0.826, + 0.355 + ], + "angle": 0, + "content": "K.8 Proof of Theorem 3.3 52" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.361, + 0.826, + 0.377 + ], + "angle": 0, + "content": "K.9 Computing the Moments for One \"Giant Step\" 53" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.318, + 0.826, + 0.377 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.382, + 0.826, + 0.398 + ], + "angle": 0, + "content": "K.10 Proof of Weak Approximation 66" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.403, + 0.826, + 0.418 + ], + "angle": 0, + "content": "K.10.1 Preliminaries and additional notations 67" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.424, + 0.826, + 0.44 + ], + "angle": 0, + "content": "K.10.2 Proof of the approximation in our context 68" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.403, + 0.826, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.458, + 0.826, + 0.474 + ], + "angle": 0, + "content": "L Deriving the Slow SDE for Label Noise Regularization 72" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.492, + 0.826, + 0.508 + ], + "angle": 0, + "content": "M Experimental Details 74" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.514, + 0.826, + 0.529 + ], + "angle": 0, + "content": "M.1 Post-local SGD Experiments in Section 1 74" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.535, + 0.826, + 0.551 + ], + "angle": 0, + "content": "M.2 Experimental Details for Figures 2 and 5 74" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.556, + 0.826, + 0.572 + ], + "angle": 0, + "content": "M.3 Details for Experiments in Figure 6. 75" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.578, + 0.826, + 0.593 + ], + "angle": 0, + "content": "M.4 Details for Experiments on the Effect of the Diffusion Term 75" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.6, + 0.826, + 0.615 + ], + "angle": 0, + "content": "M.5 Details for Experiments on the Effect of Global Batch Size 76" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.62, + 0.826, + 0.636 + ], + "angle": 0, + "content": "M.6 Details for Experiments on Label Noise Regularization 76" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.514, + 0.826, + 0.636 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.104, + 0.472, + 0.117 + ], + "angle": 0, + "content": "A ADDITIONAL RELATED WORKS" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.202, + 0.824, + 0.48 + ], + "angle": 0, + "content": "**Optimization aspect of Local SGD.** Local SGD is a communication-efficient variant of parallel SGD, where multiple workers perform SGD independently and average the model parameters periodically. Dating back to Mann et al. (2009) and Zinkevich et al. (2010), this strategy has been widely adopted to reduce the communication cost and speed up training in both scenarios of data center distributed training (Chen & Huo, 2016; Zhang et al., 2014; Povey et al., 2014; Su & Chen, 2015) and Federated Learning (McMahan et al., 2017; Kairouz et al., 2021). To further accelerate training, Wang & Joshi (2019) and Haddadpour et al. (2019) proposed adaptive schemes for the averaging frequency, and Basu et al. (2019) combined Local SGD with gradient compression. Motivated to theoretically understand the empirical success of Local SGD, a lot of researchers analyzed the convergence rate of Local SGD under various settings, e.g., homogeneous/heterogeneous data and convex/non-convex objective functions. Among them, Yu et al. (2019); Stich (2018); Khaled et al. (2020); Woodworth et al. (2020a) focus on the homogeneous setting where data for each worker are independent and identically distributed (IID). Li et al. (2019b); Karimireddy et al. (2020); Glasgow et al. (2022); Woodworth et al. (2020b); Wang et al. (2022) study the heterogeneous setting, where workers have non-IID data and local updates may induce \"client drift\" (Karimireddy et al., 2020) and hurt optimization. The error bound of Local SGD obtained by these works is typically inferior to that of SGD with the same global batch size for fixed number of iterations/epochs and becomes worse as the number of local steps increases, revealing a trade-off between less communication and better optimization. In this paper, we are interested in the generalization aspect of Local SGD in the homogeneous setting, assuming the training loss can be optimized to a small value." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.487, + 0.824, + 0.695 + ], + "angle": 0, + "content": "Gradient noise and generalization. The effect of stochastic gradient noise on generalization has been studied from different aspects, e.g., changing the order of learning different patterns Li et al. (2019a), inducing an implicit regularizer in the second-order SDE approximation Smith et al. (2021); Li et al. (2019a). Our work follows a line of works studying the effect of noise in the lens of sharpness, which is long believed to be related to generalization Hochreiter & Schmidhuber (1997); Neyshabur et al. (2017). Keskar et al. (2017) empirically observed that large-batch training leads to worse generalization and sharper minima than small-batch training. Wu et al. (2018); Hu et al. (2017); Ma & Ying (2021) showed that gradient noise destabilizes the training around sharp minima, and Kleinberg et al. (2018); Zhu et al. (2018); Xie et al. (2021); Ibayashi & Imaizumi (2021) quantitatively characterized how SGD escapes sharp minima. The most related papers are Blanc et al. (2020); Damian et al. (2021); Li et al. (2021b), which focus on the training dynamics near a manifold of minima and study the effect of noise on sharpness (see also Section 3.2). Though the mathematical definition of sharpness may be vulnerable to the various symmetries in deep neural nets (Dinh et al., 2017), sharpness still appears to be one of the most promising tools for predicting generalization (Jiang et al., 2020; Foret et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.703, + 0.824, + 0.923 + ], + "angle": 0, + "content": "Improving generalization in large-batch training. The generalization issue of the large-batch (or full-batch) training has been observed as early as (Bengio, 2012; LeCun et al., 2012). As mentioned in Section 1, the generalization issue of large-batch training could be due to the lack of a sufficient amount of stochastic noise. To make up the noise in large-batch training, Krizhevsky (2014); Goyal et al. (2017) empirically discovered the Linear Scaling Rule for SGD, which suggests enlarging the learning rate proportionally to the batch size. Jastrzebski et al. (2017) adopted an SDE-based analysis to justify that this scaling rule indeed retains the same amount of noise as small-batch training (see also Section 3.1). However, the SDE approximation may fail if the learning rate is too large (Li et al., 2021a), especially in the early phase of training before the first learning rate decay (Smith et al., 2020). Shallue et al. (2019) demonstrated that generalization gap between small- and large-batch training can also depend on many other training hyperparameters. Besides enlarging the learning rate, other approaches have also been proposed to reduce the gap, including training longer (Hoffer et al., 2017), learning rate warmup (Goyal et al., 2017), LARS (You et al., 2018), LAMB (You et al., 2020). In this paper, we focus on using Local SGD to improve generalization, but adding local steps is a generic training trick that can also be combined with others, e.g., Local LARS (Lin et al., 2020b), Local Extrap-SGD (Lin et al., 2020a)." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.44, + 0.119 + ], + "angle": 0, + "content": "B ADDITIONAL DISCUSSIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.248 + ], + "angle": 0, + "content": "Connection to the conventional wisdom that the diffusion term matters more. As mentioned in Section 3.1, it is believed in the literature is that a large diffusion term in the conventional SDE leads to good generalization. One may think that the diffusion term in the Slow SDE corresponds to that in the conventional SDE, and thus enlarging the diffusion term rather than the drift term should lead to better generalization. However, we note that both the diffusion and drift terms in the Slow SDEs result from the long-term effects of the diffusion term in the conventional SDE (Slow SDEs become stationary if \\(\\Sigma = 0\\)). This means our view characterizes the role of gradient noise in more detail, and therefore, goes one step further on the conventional wisdom." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.252, + 0.828, + 0.368 + ], + "angle": 0, + "content": "Slow SDEs for neural nets with modern training techniques. In modern neural net training, it is common to add normalization layers and weight decay (\\(L^2\\)-regularization) for better optimization and generalization. However, these techniques lead to violations of our assumptions, e.g., no fixed point exists in the regularized loss (Li et al., 2020; Ahn et al., 2022). Still, a minimizer manifold can be expected to exist for the unregularized loss. Li et al. (2022) noted that the drift and diffusion around the manifold proceeds faster in this case, and derived a Slow SDE for SGD that captures \\(\\mathcal{O}\\left(\\frac{1}{\\eta} \\log \\frac{1}{\\eta}\\right)\\) discrete steps instead of \\(\\mathcal{O}\\left(\\frac{1}{\\eta^2}\\right)\\). We believe that our analysis can also be extended to this case, and that adding local steps still results in the effect of strengthening the drift term." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.768, + 0.138 + ], + "angle": 0, + "content": "C IMPLEMENTATION DETAILS OF PARALLEL SGD, LOCAL SGD AND POST-LOCAL SGD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.152, + 0.825, + 0.222 + ], + "angle": 0, + "content": "In this section, we present the formal procedures for Parallel SGD, Local SGD and Post-local SGD. Given a training dataset and a data augmentation function, Algorithms 1 and 2 show the implementations of distributed samplers for sampling local batches with and without replacement. Then Algorithms 3 to 5 show the implementations of parallel SGD, Local SGD and Post-local SGD that can run with either of the samplers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.228, + 0.827, + 0.3 + ], + "angle": 0, + "content": "Sampling with replacement. Our theory analyzes parallel SGD, Local SGD and Post-local SGD when local batches are sampled with replacement (Algorithm 1). That is, local batches consist of IID samples from the same training distribution \\(\\hat{D}\\), where \\(\\hat{D}\\) serves as an abstraction of the distribution of an augmented sample drawn from the training dataset. The mathematical formulations are given in Section 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.307, + 0.827, + 0.448 + ], + "angle": 0, + "content": "Sampling without replacement. Slightly different from our theory, we use the sampling without replacement (Algorithm 2) in our experiments unless otherwise stated. This sampling scheme is standard in practice: it is used by Goyal et al. (2017) for parallel SGD and by Lin et al. (2020b); Ortiz et al. (2021) for Post-local/Local SGD. This sampling scheme works as follows. At the beginning of every epoch, the whole training dataset is shuffled and evenly partitioned into \\( K \\) shards. Each worker takes one shard and samples batches without replacement. When all workers pass their own shard, the next epoch begins and the whole dataset is reshuffled. An alternative view is that the workers always share the same dataset. For each epoch, they perform local steps by sampling batches of data without replacement until the dataset contains too few data to form a batch. Then another epoch starts with the dataset reloaded to the initial state." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.453, + 0.825, + 0.51 + ], + "angle": 0, + "content": "Discrepancy in Sampling Schemes. We argue that this discrepancy between theory and experiments on sample schemes is minor. Though sampling without replacement is standard in practice, most previous works, e.g., Wang & Joshi (2019); Li et al. (2021a); Zhang et al. (2020), analyze sampling with replacement for technical simplicity and yields meaningful results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.516, + 0.827, + 0.573 + ], + "angle": 0, + "content": "Moreover, even if we change the sampling scheme to with replacement, Local SGD can still improve the generalization of SGD (by merely adding local steps). See Appendix F for the experiments. We believe that the reasons for better generalization of Local SGD with either sampling scheme are similar and leave the analysis for sampling without replacement for future work." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.173, + 0.107, + 0.7, + 0.124 + ], + "angle": 0, + "content": "Algorithm 1: Distributed Sampler on \\( K \\) Workers (Sampling with Replacement)" + }, + { + "type": "algorithm", + "bbox": [ + 0.159, + 0.127, + 0.8, + 0.247 + ], + "angle": 0, + "content": "Require: shared training dataset \\(\\mathcal{D}\\) data augmentation function \\(\\mathcal{A}(\\hat{\\xi})\\) \nHyperparameters: local batch size \\(B_{\\mathrm{loc}}\\) \nFunction Sample () on worker k: Draw \\(B_{\\mathrm{loc}}\\) IID samples \\(\\hat{\\xi}_1,\\dots ,\\hat{\\xi}_{B_{\\mathrm{loc}}}\\) from \\(\\mathcal{D}\\) with replacement; \\(\\xi_b\\gets \\mathcal{A}(\\hat{\\xi}_b)\\) for all \\(1\\leq b\\leq B_{\\mathrm{loc}}\\) // apply data augmentation" + }, + { + "type": "code_caption", + "bbox": [ + 0.173, + 0.422, + 0.722, + 0.438 + ], + "angle": 0, + "content": "Algorithm 2: Distributed Sampler on \\( K \\) Workers (Sampling without Replacement)" + }, + { + "type": "algorithm", + "bbox": [ + 0.159, + 0.441, + 0.8, + 0.768 + ], + "angle": 0, + "content": "Require: shared training dataset \\(\\mathcal{D}\\) data augmentation function \\(\\mathcal{A}(\\hat{\\xi})\\) \nHyperparameters: local batch size \\(B_{\\mathrm{loc}}\\) \nConstant: \\(N_{\\mathrm{loc}}\\coloneqq \\left\\lfloor \\frac{|D|}{KB_{\\mathrm{loc}}}\\right\\rfloor\\) // number of local batches per worker per epoch \nLocal Variables: \\(c^{(k)}\\gets N_{\\mathrm{loc}}B_{\\mathrm{loc}}\\) for worker k // number of samples drawn in this epoch \nFunction Sample () on worker k: \nif \\(c^{(k)} = N_{\\mathrm{loc}}B_{\\mathrm{loc}}\\) then // Now start a new epoch Wait until all the other workers reach this line; // synchronize Draw a random permutation \\(P\\) of 1,..., \\(|D|\\) jointly with other workers so that the same permutation is shared among all workers; // reshuffle the dataset \\(Q_{j}^{(k)}\\gets P_{(k - 1)N_{\\mathrm{loc}}B_{\\mathrm{loc}} + j}\\) for all \\(1\\leq j\\leq N_{\\mathrm{loc}}\\) // partition the dataset \\(c^{(k)}\\gets 0\\) end \nfor \\(i = 1,\\dots ,B_{\\mathrm{loc}}\\) do \\(\\hat{\\xi}_i\\gets \\hat{\\xi}_i\\) the \\(Q_{c^{(k)} + i}^{(k)}\\) th data point of \\(\\mathcal{D}\\) // sample without replacement \\(\\xi_i\\gets \\mathcal{A}(\\hat{\\xi}_i)\\) // apply data augmentation \nend \n\\(c^{(k)}\\gets c^{(k)} + B_{\\mathrm{loc}}\\) . \nreturn \\((\\xi_1,\\ldots ,\\xi_{B_{\\mathrm{loc}}})\\) .." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.173, + 0.107, + 0.457, + 0.123 + ], + "angle": 0, + "content": "Algorithm 3: Parallel SGD on \\( K \\) Workers" + }, + { + "type": "algorithm", + "bbox": [ + 0.159, + 0.126, + 0.805, + 0.291 + ], + "angle": 0, + "content": "Input: loss function \\(\\ell (\\pmb {\\theta};\\xi)\\) , initial parameter \\(\\pmb{\\theta}_{0}\\) \nHyperparameters: total number of iterations \\(T\\) , learning rate \\(\\eta\\) , local batch size \\(B_{\\mathrm{loc}}\\) \nfor \\(t = 0,\\dots ,T - 1\\) do \nfor each worker k do in parallel \n\\((\\xi_{k,t,1},\\ldots ,\\xi_{k,t,B_{\\mathrm{loc}}})\\gets \\mathrm{Sample}()\\) // sample a local batch \n\\(g_{k,t}\\gets \\frac{1}{B_{\\mathrm{loc}}}\\sum_{i = 1}^{B_{\\mathrm{loc}}}\\nabla \\ell (\\pmb {\\theta}_t;\\xi_{k,t,i})\\) // computing the local gradient \nend \n\\(g_{t}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}g_{k,t}\\) // all-Reduce aggregation of local gradients \n\\(\\pmb{\\theta}_{t + 1}\\gets \\pmb{\\theta}_{t} - \\eta_{t}\\pmb{g}_{t}\\) // update the model \nend" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.338, + 0.444, + 0.354 + ], + "angle": 0, + "content": "Algorithm 4: Local SGD on \\( K \\) Workers" + }, + { + "type": "algorithm", + "bbox": [ + 0.159, + 0.357, + 0.803, + 0.599 + ], + "angle": 0, + "content": "Input: loss function \\(\\ell (\\pmb {\\theta};\\xi)\\) , initial parameter \\(\\bar{\\theta}^{(0)}\\) \nHyperparameters: total number of rounds \\(R\\) , number of local steps \\(H\\) per round \nHyperparameters: learning rate \\(\\eta\\) , local batch size \\(B_{\\mathrm{loc}}\\) \nfor \\(s = 0,\\dots ,R - 1\\) do \nfor each worker k do in parallel \\(\\theta_{k,0}^{(s)}\\gets \\bar{\\theta}^{(0)};\\) // maintain a local copy of the global iterate \nfor \\(t = 0,\\ldots ,H - 1\\) do \\((\\xi_{k,t,1}^{(s)},\\dots ,\\xi_{k,t,B_{\\mathrm{loc}}}^{(s)})\\leftarrow \\mathrm{Sample}()\\) // sample a local batch \n\\(g_{k,t}^{(s)}\\leftarrow \\frac{1}{B_{\\mathrm{loc}}}\\sum_{i = 1}^{B_{\\mathrm{loc}}}\\nabla \\ell (\\pmb{\\theta}_{k,t}^{(s)};\\xi_{k,t,i}^{(s)})\\) // computing the local gradient \n\\(\\theta_{k,t + 1}^{(s)}\\gets \\theta_{k,t}^{(s)} - \\eta g_{k,t}^{(s)}\\) // update the local model \nend \nend \n\\(\\bar{\\theta}^{(s + 1)}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}\\theta_{k,H}^{(s)}\\) // all-Reduce aggregation of local iterates \nend" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.631, + 0.472, + 0.645 + ], + "angle": 0, + "content": "Algorithm 5: Post-local SGD on \\( K \\) Workers" + }, + { + "type": "algorithm", + "bbox": [ + 0.16, + 0.648, + 0.745, + 0.763 + ], + "angle": 0, + "content": "1 Input: loss function \\(\\ell (\\pmb {\\theta};\\xi)\\) , initial parameter \\(\\pmb{\\theta}_{0}\\) \n2 Hyperparameters: total number of iterations \\(T\\) , learning rate \\(\\eta\\) , local batch size \\(B_{\\mathrm{loc}}\\) \n3 Hyperparameters: switching time point \\(t_0\\) , number of local steps \\(H\\) per round \n4 Ensure: \\(T - t_0\\) is a multiple of \\(H\\) \n5 Starting from \\(\\pmb{\\theta}_{0}\\) , run Parallel SGD for \\(t_0\\) iterations and obtain \\(\\pmb{\\theta}_{t_0}\\) . \n6 Starting from \\(\\pmb{\\theta}_{t_0}\\) , run Local SGD for \\(\\frac{1}{H} (T - t_0)\\) rounds with \\(H\\) local steps per round; \n7 return the final global iterate of Local SGD ;" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.103, + 0.757, + 0.119 + ], + "angle": 0, + "content": "D MODELING LOCAL SGD WITH MULTIPLE CONVENTIONAL SDES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.133, + 0.827, + 0.224 + ], + "angle": 0, + "content": "Lin et al. (2020b) tried to informally explain the success of Local SGD by adopting the argument that larger diffusion term in the conventional SDE leads to better generalization (see Section 3.1 and appendix A). Basically, they attempted to write multiple SDEs, each of which describes the \\(H\\)-step local training process of each worker in each round (from \\(\\theta_{k,0}^{(s)}\\) to \\(\\theta_{k,H}^{(s)}\\)). The key difference between each of these SDEs and the SDE for SGD (3) is that the former one has a larger diffusion term because the workers use batch size \\(B_{\\mathrm{loc}}\\) instead of \\(B\\):" + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.229, + 0.825, + 0.262 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {X} (t) = - \\nabla \\mathcal {L} (\\boldsymbol {X}) \\mathrm {d} t + \\sqrt {\\frac {\\eta}{B _ {\\mathrm {l o c}}}} \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {X}) \\mathrm {d} \\boldsymbol {W} _ {t}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.267, + 0.825, + 0.325 + ], + "angle": 0, + "content": "Lin et al. (2020b) then argue that the total amount of \"noise\" in the training dynamics of Local SGD is larger than that of SGD. However, it is hard to see whether it is indeed larger, since the model averaging step at the end of each round can reduce the variance in training and may cancel the effect of having larger diffusion terms." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.33, + 0.826, + 0.414 + ], + "angle": 0, + "content": "More formally, a complete modeling of Local SGD following this idea should view the sequence of global iterates \\(\\{\\bar{\\theta}^{(s)}\\}\\) as a Markov process \\(\\{X^{(s)}\\}\\). Let \\(\\mathcal{P}_X(x,B,t)\\) the distribution of \\(X(t)\\) in (3) with initial condition \\(X(0) = x\\). Then the Markov transition should be \\(X^{(s + 1)} = \\frac{1}{K}\\sum_{k = 1}^{K}X_{k,H}^{(s)}\\) where \\(X_{1,H}^{(s)},\\ldots ,X_{K,H}^{(s)}\\) are \\(K\\) independent samples from \\(\\mathcal{P}_X(X^{(s)},B_{\\mathrm{loc}},H\\eta)\\), i.e., sampling from (10)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.419, + 0.825, + 0.513 + ], + "angle": 0, + "content": "Consider one round of model averaging. It is true that \\(\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B_{\\mathrm{loc}}, H\\eta)\\) may have a larger variance than the corresponding SGD baseline \\(\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)\\) because the former one has a smaller batch size. However, it is unclear whether \\(\\mathbf{X}^{(s + 1)}\\) also has a larger variance than \\(\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)\\). This is because \\(\\mathbf{X}^{(s + 1)}\\) is the average of \\(K\\) samples, which means we have to compare \\(\\frac{1}{K}\\) times the variance of \\(\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B_{\\mathrm{loc}}, H\\eta)\\) with the variance of \\(\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)\\). Then it is unclear which one is larger." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.519, + 0.825, + 0.548 + ], + "angle": 0, + "content": "In the special case where \\(H\\eta\\) is small, \\(\\mathcal{P}_X(X^{(s)},B_{\\mathrm{loc}},H\\eta)\\) is approximately equal to the following Gaussian distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.552, + 0.825, + 0.587 + ], + "angle": 0, + "content": "\\[\n\\mathcal {N} \\left(\\boldsymbol {X} ^ {(s)} - \\eta H \\nabla \\mathcal {L} \\left(\\boldsymbol {X} ^ {(s)}\\right), \\frac {\\eta^ {2} H}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} \\left(\\boldsymbol {X} ^ {(s)}\\right)\\right) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.592, + 0.427, + 0.607 + ], + "angle": 0, + "content": "Then averaging over \\(K\\) samples gives" + }, + { + "type": "equation", + "bbox": [ + 0.344, + 0.613, + 0.825, + 0.648 + ], + "angle": 0, + "content": "\\[\n\\mathcal {N} \\left(\\boldsymbol {X} ^ {(s)} - \\eta H \\nabla \\mathcal {L} \\left(\\boldsymbol {X} ^ {(s)}\\right), \\frac {\\eta^ {2} H}{B} \\boldsymbol {\\Sigma} \\left(\\boldsymbol {X} ^ {(s)}\\right)\\right), \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.825, + 0.695 + ], + "angle": 0, + "content": "which is exactly the same as the Gaussian approximation of the SGD baseline. This means there do exist certain cases where Lin et al. (2020b)'s argument does not give a good separation between Local SGD and SGD." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.701, + 0.825, + 0.731 + ], + "angle": 0, + "content": "Moreover, we do not gain any further insights from this modeling since it is hard to see how model averaging interacts with the SDEs." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.75, + 0.642, + 0.767 + ], + "angle": 0, + "content": "E ADDITIONAL INTERPRETATION OF THE SLOW SDES" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.781, + 0.634, + 0.795 + ], + "angle": 0, + "content": "E.1 UNDERSTANDING THE DIFFUSION TERM IN THE SLOW SDE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.807, + 0.825, + 0.879 + ], + "angle": 0, + "content": "So far, we have discussed why adding local steps enlarges the drift term in the Slow SDE and why enlarging the drift term can benefit generalization. Besides this, here we remark that another way to accelerate the corresponding semi-gradient method for minimizing the implicit regularizer is to reduce the diffusion term, so that the trajectory more closely follows the drift term. More formally, we propose the following:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.881, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Hypothesis E.1. Starting at a minimizer \\(\\zeta_0\\in \\Gamma\\), run \\((\\kappa_{1},\\kappa_{2})\\)-Slow SDE and \\((\\kappa_{1},\\kappa_{2}^{\\prime})\\)-Slow SDE respectively for the same amount of time \\(T > 0\\) and obtain \\(\\zeta (T),\\zeta '(T)\\). If \\(\\pmb{\\Sigma}_{\\parallel}\\neq \\mathbf{0}\\) and \\(\\kappa_{1} < \\kappa_{1}^{\\prime}\\) then the expected test accuracy at \\(\\zeta (T)\\) is better than that at \\(\\zeta^{\\prime}(T)\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.067, + 0.481, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.243, + 0.195, + 0.465, + 0.209 + ], + "angle": 0, + "content": "(a) CIFAR-10, \\( H = 600 \\) for \\( K > 1 \\)." + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.067, + 0.765, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.534, + 0.195, + 0.743, + 0.209 + ], + "angle": 0, + "content": "(b) ImageNet, \\( H = 78 \\) for \\( K > 1 \\)." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.215, + 0.825, + 0.259 + ], + "angle": 0, + "content": "Figure 3: Reducing the diffusion term of the Slow SDE for Local SGD leads to better generalization. Test accuracy improves as we increase \\( K \\) with fixed \\( \\eta \\) and \\( H \\) to reduce the diffusion term while keeping the drift term untouched. See Appendix M.4 for details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.362 + ], + "angle": 0, + "content": "Here we exclude the case of \\(\\boldsymbol{\\Sigma}_{\\parallel} \\equiv \\mathbf{0}\\) because in this case the diffusion term in the Slow SDE is always zero. To verify Hypothesis E.1, we set the product \\(\\alpha \\coloneqq \\eta H\\) large, keep \\(H, \\eta\\) fixed, increase the number of workers \\(K\\), and compare the generalization performances after a fixed amount of training steps (but after different numbers of epochs). This case corresponds to the \\((\\frac{1}{KB_{\\mathrm{loc}}}, \\frac{1}{2B_{\\mathrm{loc}}})\\)-Slow SDE, so adding more workers should reduce the diffusion term. As shown in Figure 3, a higher test accuracy is indeed achieved for larger \\(K\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.366, + 0.825, + 0.465 + ], + "angle": 0, + "content": "**Implication:** Enlarging the learning rate is not equally effective as adding local steps. Given that Local SGD improves generalization by strengthening the drift term, it is natural to wonder if enlarging the learning rate of SGD would also lead to similar improvements. While it is true that enlarging the learning rate effectively increases the drift term, it also increases the diffusion term simultaneously, which can hinder the implicit regularization by Hypothesis E.1. In contrast, adding local steps does not change the diffusion term. As shown in Figure 6(a), even when the learning rate of SGD is increased, SGD still underperforms Local SGD by about \\(2\\%\\) in test accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.47, + 0.825, + 0.545 + ], + "angle": 0, + "content": "On the other hand, in the special case of where \\(\\pmb{\\Sigma}_{\\parallel} \\equiv \\mathbf{0}\\), Hypothesis E.1 does not hold, and enlarging the learning rate by \\(\\sqrt{K}\\) results in the same Slow SDE as adding local steps (see Appendix G for derivation). Then these two actions should produce the same generalization improvement, unless the learning rate is so large that Slow SDE loses track of the training dynamics. As an example of such a special case, an experiment with label noise regularization is presented in Figure 8." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.561, + 0.634, + 0.575 + ], + "angle": 0, + "content": "E.2 THE EFFECT OF GLOBAL BATCH SIZE ON GENERALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.587, + 0.825, + 0.658 + ], + "angle": 0, + "content": "In this section, we discuss the effect of global batch size on the generalization of Local SGD. Given that the computation power of a single worker is limited, we consider the case where the local batch size \\( B_{\\mathrm{loc}} \\) is fixed and the global batch size \\( B = KB_{\\mathrm{loc}} \\) is tuned by adding or removing the workers. This scenario is relevant to the practice because one may want to know the maximum parallelism possible to train the neural net without causing generalization degradation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.663, + 0.825, + 0.721 + ], + "angle": 0, + "content": "For SGD, previous works have proposed the Linear Scaling Rule (LSR) (Krizhevsky, 2014; Goyal et al., 2017; Jastrzebski et al., 2017): scaling the learning rate \\(\\eta \\mapsto \\kappa \\eta\\) linearly with the global batch size \\(B \\mapsto \\kappa B\\) yields the same conventional SDE (3) under a constant epoch budget, hence leading to almost the same generalization performance as long as the SDE approximation does not fail." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.726, + 0.825, + 0.825 + ], + "angle": 0, + "content": "We show in Theorem H.1 that the LSR does not change the Slow SDE of SGD either. Experiments in Figure 4 show that the LSR indeed holds nicely when we continue training with small learning rates from the same CIFAR-10 and ImageNet checkpoints as in Figure 2. Here we choose \\( K = 16 \\) and \\( K = 256 \\) as the base settings for CIFAR-10 and ImageNet, respectively, and then tune the learning rate to maximize the test accuracy. As shown in Figures 4(a) and 4(b), the optimal learning rate turns out to be small enough that the LSR can be applied to scale the global batch size with only a minor change in test accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.831, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Now, assuming the learning rate is scaled as LSR, we study how to tune the number of local steps \\( H \\) for Local SGD for better generalization. A natural choice is to tune \\( H \\) in the base settings and keep \\( \\alpha \\) unchanged via scaling \\( H \\mapsto H / \\kappa \\). Then the following SDE can be derived (see Theorem H.2):" + }, + { + "type": "equation", + "bbox": [ + 0.206, + 0.878, + 0.825, + 0.907 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {一}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {一}} \\underbrace {- \\frac {\\kappa K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t}\\right). \\tag {13}\n\\]" + }, + { + "type": "image_caption", + "bbox": [ + 0.298, + 0.91, + 0.418, + 0.921 + ], + "angle": 0, + "content": "(a) diffusion (unchanged)" + }, + { + "type": "image_caption", + "bbox": [ + 0.444, + 0.907, + 0.55, + 0.919 + ], + "angle": 0, + "content": "(b) drift-I (unchanged)" + }, + { + "type": "image_caption", + "bbox": [ + 0.612, + 0.907, + 0.71, + 0.918 + ], + "angle": 0, + "content": "(c) drift-II (rescaled)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.237, + 0.067, + 0.49, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.195, + 0.457, + 0.209 + ], + "angle": 0, + "content": "(a) CIFAR-10, start from #250." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.066, + 0.761, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.195, + 0.727, + 0.209 + ], + "angle": 0, + "content": "(b) ImageNet, start from #100." + }, + { + "type": "image", + "bbox": [ + 0.235, + 0.215, + 0.487, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.343, + 0.456, + 0.357 + ], + "angle": 0, + "content": "(c) CIFAR-10, start from #250." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.215, + 0.758, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.539, + 0.343, + 0.725, + 0.357 + ], + "angle": 0, + "content": "(d) ImageNet, start from #100." + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.364, + 0.825, + 0.449 + ], + "angle": 0, + "content": "Figure 4: For training from CIFAR-10 and ImageNet checkpoints, Local SGD consistently outperforms SGD \\((H = 1)\\) across different batch sizes \\(B\\) (fixing \\(B_{\\mathrm{loc}}\\) and varying \\(K\\)), where the learning rate is scaled by the LSR \\(\\eta \\propto B\\). Two possible ways of tuning the number of local steps \\(H\\) are considered: (1). Tune \\(H\\) for the best test accuracy for \\(K = 16\\) and \\(K = 256\\) respectively on CIFAR-10 and ImageNet, then scale \\(H\\) as \\(H \\propto 1 / B\\) so that \\(\\alpha \\coloneqq \\eta H\\) is constant; (2). Tune \\(H\\) specifically for each \\(K\\). See Appendix M.5 for training details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.471, + 0.825, + 0.52 + ], + "angle": 0, + "content": "Compared with (4), the drift-II term here is rescaled by a positive factor. Again, when \\(\\alpha\\) is large, we can follow the argument in Section 3.3.2 to approximate \\(\\widehat{\\Psi} (\\zeta)\\approx \\widehat{\\Sigma}_{\\diamond}(\\zeta)\\) and obtain the following \\((\\frac{1}{B},\\frac{\\kappa K}{B})\\)-Slow SDE:" + }, + { + "type": "equation", + "bbox": [ + 0.299, + 0.533, + 0.825, + 0.559 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} (t) - \\frac {\\kappa K}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.57, + 0.825, + 0.627 + ], + "angle": 0, + "content": "The drift term of the above SDE is always stronger than SGD (7), as long as there exists more than one worker after the scaling (i.e., \\(\\kappa K > 1\\)). As expected from Hypothesis 3.1, we observed in the experiments that the generalization performance of Local SGD is always better than or at least comparable to SGD across different batch sizes (see Figures 4(a) and 4(b))." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.632, + 0.825, + 0.746 + ], + "angle": 0, + "content": "Taking a closer look into the drift term in the Slow SDE (14), we can find that it scales linearly with \\(\\kappa\\). According to Hypothesis 3.1, the SDE is expected to generalize better when adding more workers (\\(\\kappa > 1\\)) and to generalize worse when removing some workers (\\(\\kappa < 1\\)). For the latter case, we indeed observed that the test accuracy of Local SGD drops when removing workers. For the case of adding workers, however, we also need to take into account that the LSR specifies a larger learning rate and causes a larger SDE approximation error for the same \\(\\alpha\\), which may cancel the generalization improvement brought by strengthening the drift term. In the experiments, we observed that the test accuracy does not rise when adding more workers to the base settings." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.751, + 0.827, + 0.85 + ], + "angle": 0, + "content": "Since \\(\\alpha\\) also controls the regularization strength (Section 3.3.3), it would be beneficial to decrease \\(\\alpha\\) for large batch size so as to better trade-off between regularization strength and approximation quality. In Figures 4(c) and 4(d), we plot the optimal value of \\(\\alpha\\) for each batch size, and we indeed observed that the optimal \\(\\alpha\\) drops as we scale up \\(K\\). Conversely, a smaller batch size (and hence a smaller learning rate) allows for using a larger \\(\\alpha\\) to enhance regularization while still keeping a low approximation error (Theorem 3.3). The test accuracy curves in Figures 4(a) and 4(b) indeed show that setting a larger \\(\\alpha\\) can compensate for the accuracy drop when reducing the batch size." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.875, + 0.535, + 0.89 + ], + "angle": 0, + "content": "F ADDITIONAL EXPERIMENTAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.91, + 0.739, + 0.926 + ], + "angle": 0, + "content": "In this section, we present additional experimental results to further verify our finding." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "Supplementary Plot: Training time should be long enough. Figures 5(a) and 5(b) show enlarged views for Figures 2(a) and 2(c) respectively, showing that Local SGD can generalize worse than SGD in the first few epochs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.153, + 0.825, + 0.224 + ], + "angle": 0, + "content": "Supplementary Plot: Learning rate should be small. Figure 5(c) shows that reducing the learning rate from 0.32 to 0.064 does not lead to test accuracy drop for Local SGD on CIFAR-10, if the training time is allowed to be longer and the number of local steps \\( H \\) is set properly. Figure 5(d) presents the case where, with a large learning rate, the generalization improvement of Local SGD disappears even starting from a pre-trained model." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.23, + 0.825, + 0.302 + ], + "angle": 0, + "content": "Supplementary Plot: Reconciling our main finding with Ortiz et al. (2021). In Figure 5(e), the generalization benefit of Local SGD with \\( H = 24 \\) becomes less significant after the learning rate decay at epoch 226, which is consistent with the observation by Ortiz et al. (2021) that the generalization benefit of Local SGD usually disappears after the learning rate decay. But we can preserve the improvement by increasing \\( H \\) to 900. Here, we use Local SGD with momentum." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.307, + 0.825, + 0.387 + ], + "angle": 0, + "content": "Supplementary Plot: Optimal \\(\\alpha\\) gets larger for smaller \\(\\eta\\). In Figure 5(f), we summarize the optimal \\(\\alpha := \\eta H\\) that enables the highest test accuracy for each learning rate in Figure 2(f). We can see that the optimal \\(\\alpha\\) increases as we decrease the learning rate. The reason is that the approximation error bound \\(\\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})\\) in Theorem 3.3 decreases with \\(\\eta\\), allowing for a larger value of \\(\\alpha\\) to better regularize the model." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.407, + 0.372, + 0.499 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.174, + 0.505, + 0.379, + 0.519 + ], + "angle": 0, + "content": "(a) CIFAR-10, start from random." + }, + { + "type": "image", + "bbox": [ + 0.403, + 0.407, + 0.593, + 0.499 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.406, + 0.505, + 0.592, + 0.519 + ], + "angle": 0, + "content": "(b) ImageNet, start from #250." + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.408, + 0.815, + 0.499 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.627, + 0.505, + 0.815, + 0.519 + ], + "angle": 0, + "content": "(c) CIFAR-10, start from #100." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.533, + 0.372, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.184, + 0.631, + 0.37, + 0.644 + ], + "angle": 0, + "content": "(d) ImageNet, start from #100." + }, + { + "type": "image", + "bbox": [ + 0.404, + 0.533, + 0.593, + 0.625 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.405, + 0.631, + 0.593, + 0.644 + ], + "angle": 0, + "content": "(e) CIFAR-10, start from #150." + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.533, + 0.815, + 0.625 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.628, + 0.631, + 0.813, + 0.645 + ], + "angle": 0, + "content": "(f) ImageNet, optimal \\(\\alpha\\) v.s. \\(\\eta\\)." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.666, + 0.825, + 0.696 + ], + "angle": 0, + "content": "Figure 5: Additional experimental results about the effect of the learning rate, training time and the number of local steps. See Appendix M.2 for details." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.711, + 0.372, + 0.803 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.203, + 0.808, + 0.35, + 0.823 + ], + "angle": 0, + "content": "(a) SGD with various \\(\\eta\\)" + }, + { + "type": "image", + "bbox": [ + 0.404, + 0.711, + 0.593, + 0.803 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.808, + 0.596, + 0.822 + ], + "angle": 0, + "content": "(b) SGD with larger batch sizes." + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.711, + 0.815, + 0.802 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.618, + 0.808, + 0.824, + 0.835 + ], + "angle": 0, + "content": "(c) Post-local SGD, sampling with replacement." + }, + { + "type": "image_caption", + "bbox": [ + 0.21, + 0.838, + 0.786, + 0.853 + ], + "angle": 0, + "content": "Figure 6: Additional experimental results on CIFAR-10. See Appendix M.3 for details." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.825, + 0.926 + ], + "angle": 0, + "content": "SGD generalizes worse even with extensively tuned learning rates. In Figure 6(a), we run SGD from both random initialization and the pre-trained model for another 3,000 epochs with various learning rates and report the test accuracy. We can see that none of the SGD runs beat Local SGD with the fixed learning rate \\(\\eta = 0.32\\). Therefore, the inferior performance of SGD in Figures 2(a) and 2(b) is not due to the improper learning rate and Local SGD indeed generalizes better." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.203 + ], + "angle": 0, + "content": "SGD with larger batch sizes performs no better. In Figure 6(b), we enlarge the batch size of SGD and report the test accuracy for various learning rates. We can see that SGD with larger batch sizes performs no better and none of the SGD runs outperform Local SGD with the fixed learning rate \\(\\eta = 0.32\\). This result is unsurprising since it is well established in the literature (Jastrzebski et al., 2017; Smith et al., 2020; Keskar et al., 2017) that larger batch size typically leads to worse generalization. See Appendix A for a survey of empirical and theoretical works on understanding and resolving this phenomenon." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.209, + 0.825, + 0.294 + ], + "angle": 0, + "content": "Sampling with or without replacement does not matter. Note that there is a slight discrepancy in sampling schemes between our theoretical and experimental setup: the update rules (1) and (2) assume that data are sampled with replacement while most experiments use sampling without replacement (Appendix C). To eliminate the effect of this discrepancy, we conduct additional experiments on Post-local SGD using sampling with replacement (see Figure 6(c)) and Post-local SGD significantly outperforms SGD." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.313, + 0.795, + 0.329 + ], + "angle": 0, + "content": "G DISCUSSIONS ON LOCAL SGD WITH LABEL NOISE REGULARIZATION" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.344, + 0.731, + 0.358 + ], + "angle": 0, + "content": "G.1 THE SLOW SDE FOR LOCAL SGD WITH LABEL NOISE REGULARIZATION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.37, + 0.825, + 0.413 + ], + "angle": 0, + "content": "In this subsection, we present the Slow SDE for Local SGD in the case of label noise regularization and show that Local SGD indeed induces a stronger regularization term, which presumably leads to better generalization." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.416, + 0.825, + 0.459 + ], + "angle": 0, + "content": "Theorem G.1 (Slow SDE for Local SGD with label noise regularization). For a \\(C\\)-class classification task with cross-entropy loss, the slow SDE of Local SGD with label noise has the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.257, + 0.465, + 0.825, + 0.501 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {1}{4 B} \\nabla_ {\\Gamma} \\left(\\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) + (K - 1) \\cdot \\frac {\\operatorname {t r} \\left(F \\left(2 H \\eta \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right)\\right)}{2 H \\eta}\\right) \\mathrm {d} t, \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.507, + 0.825, + 0.539 + ], + "angle": 0, + "content": "where \\( F(x) \\coloneqq \\int_0^x \\psi(y) \\, \\mathrm{d}y \\) and is interpreted as a matrix function. Additionally, \\( \\nabla_{\\Gamma} f \\) stands for the gradient of a function \\( f \\) projected to the tangent space of \\( \\Gamma \\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.551, + 0.336, + 0.567 + ], + "angle": 0, + "content": "Proof. See Appendix L." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.552, + 0.826, + 0.565 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.581, + 0.825, + 0.611 + ], + "angle": 0, + "content": "Note that the magnitude of the RHS in (15) becomes larger as \\( H \\) increases. By letting \\( H \\) to go to infinity, we further have the following theorem." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.614, + 0.825, + 0.644 + ], + "angle": 0, + "content": "Theorem G.2. As the number of local steps \\( H \\) goes to infinity, the slow SDE of Local SGD with label noise (15) can be simplified as:" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.649, + 0.825, + 0.68 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {K}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) \\mathrm {d} t. \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.692, + 0.697, + 0.708 + ], + "angle": 0, + "content": "Proof. We obtain the corollary by simply taking the limit. By L'Hospital's rule," + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.713, + 0.686, + 0.745 + ], + "angle": 0, + "content": "\\[\n\\lim _ {x \\rightarrow + \\infty} \\frac {F (a x)}{x} = \\lim _ {x \\rightarrow + \\infty} \\frac {\\mathrm {d} F (a x)}{\\mathrm {d} x} = \\lim _ {x \\rightarrow + \\infty} a \\psi (a x) = a.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.751, + 0.247, + 0.765 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.349, + 0.771, + 0.825, + 0.805 + ], + "angle": 0, + "content": "\\[\n\\lim _ {x \\rightarrow + \\infty} \\frac {\\operatorname {t r} (F (2 H \\eta \\nabla^ {2} \\mathcal {L} (\\zeta)))}{2 H \\eta} = \\operatorname {t r} (\\nabla^ {2} \\mathcal {L} (\\zeta)). \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.81, + 0.427, + 0.826 + ], + "angle": 0, + "content": "Substituting (17) into (15) yields (16)." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.811, + 0.826, + 0.823 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.825, + 0.869 + ], + "angle": 0, + "content": "As introduced in Section 3.3, the Slow SDE for SGD with label noise regularization has the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.876, + 0.825, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {1}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) \\mathrm {d} t, \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.91, + 0.633, + 0.925 + ], + "angle": 0, + "content": "which is a deterministic flow that keeps reducing the trace of Hessian." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.11, + 0.418, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.22, + 0.402, + 0.234 + ], + "angle": 0, + "content": "(a) ResNet-56 + GroupNorm." + }, + { + "type": "image", + "bbox": [ + 0.455, + 0.112, + 0.777, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.22, + 0.719, + 0.234 + ], + "angle": 0, + "content": "(b) VGG-16 w/o normalization." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.241, + 0.825, + 0.285 + ], + "angle": 0, + "content": "Figure 7: Local SGD with label noise regularization on CIFAR-10 without data augmentation using \\( K = 32 \\), \\( B_{\\mathrm{loc}} = 128 \\). A larger number of local steps indeed enables higher test accuracy. For both architectures, we replace ReLU with Swish. See Appendix M.6 for training details." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.317, + 0.827, + 0.43 + ], + "angle": 0, + "content": "As the trace of Hessian can be seen as a measure for the sharpness of the local loss landscape, (18) indicates that SGD with label noise regularization has an implicit bias toward flatter minima, which presumably promotes generalization (Hochreiter & Schmidhuber, 1997; Keskar et al., 2017; Neyshabur et al., 2017). More concretely, Blanc et al. (2020) and Li et al. (2021b) connect minimizing the trace of Hessian to finding sparse or low-rank solutions for training two-layer linear nets. Damian et al. (2021) empirically showed that good generalization correlates with a smaller trace of Hessian in training ResNets with label noise. Besides, Ma & Ying (2021) connect the trace of Hessian to the smoothness of the function represented by a deep neural net." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.435, + 0.825, + 0.507 + ], + "angle": 0, + "content": "From Theorems G.1 and G.2, we can conclude that Local SGD accelerates the process of sharpness reduction, thereby leading to better generalization. Furthermore, the regularization effect gets stronger for larger \\( H \\) and is approximately \\( K \\) times that of SGD. We also conduct experiments on non-augmented CIFAR-10 with label noise regularization to verify our conclusion. As shown in Figure 7, increasing the number of local steps indeed gives better generalization performance." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.525, + 0.819, + 0.54 + ], + "angle": 0, + "content": "G.2 THE EQUIVALENCE OF ENLARGING THE LEARNING RATE AND ADDING LOCAL STEPS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.551, + 0.825, + 0.636 + ], + "angle": 0, + "content": "In this subsection, we explain in detail why training with label noise regularization is a special case where enlarging the learning rate of SGD can bring the same generalization benefit as adding local steps. TWhen we scale up the learning rate of SGD \\(\\eta \\mapsto \\kappa \\eta\\) (while keeping other hyperparameters unchanged), the corresponding Slow SDE is (18) with time horizon \\(\\kappa^2 T\\) instead of \\(T\\), where SGD tracks a continuous interval of \\(\\kappa^2 \\eta^2\\) per step instead of \\(\\eta^2\\). After rescaling the time horizon to \\(T\\) so that SGD tracks a continuous interval of \\(\\eta^2\\) per step, we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.644, + 0.825, + 0.677 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\zeta (t) = - \\frac {\\kappa^ {2}}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\zeta)\\right) \\mathrm {d} t. \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.825, + 0.744 + ], + "angle": 0, + "content": "Let \\(\\kappa = \\sqrt{K}\\) in (19) and we obtain the same Slow SDE as (16), which is for Local SGD with a large number of local steps. In Figure 8, we conduct experiments to verify that SGD indeed achieves comparable test accuracy to that of Local SGD with a large \\(H\\) if its learning rate is scaled up by \\(\\sqrt{K}\\) that of Local SGD." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.766, + 0.667, + 0.783 + ], + "angle": 0, + "content": "H DERIVING THE SLOW SDE AFTER APPLYING THE LSR" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.825, + 0.828 + ], + "angle": 0, + "content": "In this section, we derive the Slow SDEs for SGD and Local SGD after applying the LSR in Appendix E.2. The results are formally summarized in the following theorems." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Theorem H.1 (Slow SDE for SGD after applying the LSR). Let Assumptions 3.1 to 3.3 hold. Assume that we run SGD with learning rate \\(\\eta' = \\kappa \\eta\\) and the number of workers \\(K' = \\kappa K\\) for some constant \\(\\kappa > 0\\). Let \\(T > 0\\) be a constant and \\(\\zeta(t)\\) be the solution to (7) with the initial condition \\(\\zeta(0) = \\Phi(\\theta_0) \\in \\Gamma\\). Then for any \\(\\mathcal{C}^3\\)-smooth function \\(g(\\pmb{\\theta})\\), \\(\\max_{0 \\leq s \\leq \\frac{\\kappa T}{\\eta'^2}} \\left| \\mathbb{E}[g(\\Phi(\\pmb{\\theta}_s)] - \\mathbb{E}[g(\\pmb{\\zeta}(s\\eta'^2/\\kappa)] \\right| = \\tilde{\\mathcal{O}}(\\eta'^{0.25})\\), where \\(\\tilde{\\mathcal{O}}(\\cdot)\\) hides log factors and constants that are independent of \\(\\eta'\\) but can depend on \\(g(\\pmb{\\theta})\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.365, + 0.105, + 0.633, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.222, + 0.828, + 0.281 + ], + "angle": 0, + "content": "Figure 8: Local SGD with label noise regularization on CIFAR-10 without data augmentation using \\( K = 4 \\), \\( B_{\\mathrm{loc}} = 128 \\). SGD (\\( H = 1 \\)) indeed achieves comparable test accuracy as Local SGD with a large \\( H \\) when we scale up its learning rate to \\( \\sqrt{K} \\) times that of Local SGD. See Appendix M.6 for training details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.825, + 0.34 + ], + "angle": 0, + "content": "Proof. Replacing \\(B\\) with \\(\\kappa B\\) in the original Slow SDE for Local SGD (7) gives the following Slow SDE:" + }, + { + "type": "equation", + "bbox": [ + 0.299, + 0.345, + 0.825, + 0.388 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\underbrace {\\frac {1}{\\sqrt {\\kappa B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t}\\right). \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.396, + 0.825, + 0.44 + ], + "angle": 0, + "content": "Note that the continuous time horizon for (20) is \\(\\kappa T\\) instead of \\(T\\) since after applying the LSR, SGD tracks a continuous interval of \\(\\kappa^2\\eta^2\\) per step instead of \\(\\eta^2\\) while the total number of steps is scaled down by \\(\\kappa\\). We can then rescale the time scaling to obtain (7) that holds for \\(T\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.446, + 0.826, + 0.545 + ], + "angle": 0, + "content": "Theorem H.2 (Slow SDE for Local SGD after applying the LSR). Let Assumptions 3.1 to 3.3 hold. Assume that we run Local SGD with learning rate \\(\\eta' = \\kappa \\eta\\), the number of workers \\(K' = \\kappa K\\), and the number of local steps \\(H' = \\frac{\\alpha}{\\kappa \\eta}\\) for some constants \\(\\alpha, \\kappa > 0\\). Let \\(T > 0\\) be a constant and \\(\\zeta(t)\\) be the solution to (21) with the initial condition \\(\\zeta(0) = \\Phi(\\bar{\\theta}^{(0)}) \\in \\Gamma\\). Then for any \\(\\mathcal{C}^3\\)-smooth function \\(g(\\pmb{\\theta})\\), \\(\\max_{0 \\leq s \\leq \\frac{\\kappa T}{H' \\eta'^2}} |\\mathbb{E}[g(\\Phi(\\bar{\\theta}^{(s)})] - \\mathbb{E}[g(\\zeta(sH' \\eta'^2 / \\kappa)]| = \\tilde{\\mathcal{O}}(\\eta'^{0.25})\\), where \\(\\tilde{\\mathcal{O}}(\\cdot)\\) hides log factors and constants that are independent of \\(\\eta'\\) but can depend on \\(g(\\pmb{\\theta})\\)." + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.551, + 0.825, + 0.595 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {(a) \\text {d i f f u s i o n (u n c h a n g e d)}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(b) \\text {d r i f t - I (u n c h a n g e d)}} \\underbrace {- \\frac {\\kappa K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(c) \\text {d r i f t - I I (r e s c a l e d)}}\\right). \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.609, + 0.825, + 0.636 + ], + "angle": 0, + "content": "Proof. Replacing \\(B\\) with \\(\\kappa B\\) in the original Slow SDE for Local SGD (4) gives the following Slow SDE:" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.643, + 0.825, + 0.685 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {\\kappa B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}} \\underbrace {- \\frac {\\kappa K - 1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(c) d r i f t - I I}}\\right). \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.693, + 0.827, + 0.737 + ], + "angle": 0, + "content": "Note that the continuous time horizon for (22) is \\(\\kappa T\\) instead of \\(T\\) since after applying the LSR, Local SGD tracks a continuous interval of \\(\\kappa^2\\eta^2\\) per step instead of \\(\\eta^2\\) while the total number of steps is scaled down by \\(\\kappa\\). We can then rescale the time scaling to obtain (21) that holds for \\(T\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.409, + 0.119 + ], + "angle": 0, + "content": "I PROOF OF THEOREM 3.1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.825, + 0.185 + ], + "angle": 0, + "content": "This section presents the proof for Theorem 3.1. First, we introduce some notations that will be used throughout this section. For the sequence of Local SGD iterates \\(\\{\\pmb{\\theta}_{k,t}^{(s)}:k\\in [K],0\\leq t\\leq H,s\\geq 0\\}\\), we introduce an auxiliary sequence \\(\\{\\hat{u}_t\\}_{t\\in \\mathbb{N}}\\), which consists of GD iterates from \\(\\bar{\\pmb{\\theta}}^{(0)}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.192, + 0.637, + 0.211 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {u}} _ {0} = \\bar {\\boldsymbol {\\theta}} ^ {(0)}, \\qquad \\hat {\\boldsymbol {u}} _ {t + 1} \\leftarrow \\hat {\\boldsymbol {u}} _ {t} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.217, + 0.825, + 0.308 + ], + "angle": 0, + "content": "For convenience, let \\(\\hat{\\pmb{u}}_t^{(s)}\\coloneqq \\hat{\\pmb{u}}_{sH + t}\\) and \\(\\pmb {z}_{k,sH + t}\\coloneqq \\pmb{z}_{k,t}^{(s)}\\) . We will use \\(\\hat{\\pmb{u}}_t^{(s)}\\) and \\(\\hat{\\pmb{u}}_{sH + t},\\pmb{z}_{k,t}^{(s)}\\) and \\(\\pmb {z}_{k,sH + t}\\) interchangeably. Recall that we have assumed that \\(\\mathcal{L}\\) is \\(\\mathcal{C}^3\\) -smooth with bounded second and third order derivatives. Let \\(\\nu_{2}\\coloneqq \\sup_{\\pmb {\\theta}\\in \\mathbb{R}^{d}}\\| \\nabla^{2}\\mathcal{L}(\\pmb {\\theta})\\|_{2}\\) and \\(\\nu_{3}\\coloneqq \\sup_{\\pmb {\\theta}\\in \\mathbb{R}^{d}}\\| \\nabla^{3}\\mathcal{L}(\\pmb {\\theta})\\|_{2}\\) . Since \\(\\nabla \\ell (\\pmb {\\theta};\\pmb {\\zeta})\\) is bounded, the gradient noise \\(z_{k,t}^{(s)}\\) is also bounded. We denote by \\(\\sigma_{\\mathrm{max}}\\) an upper bound such that \\(\\| z_{k,t}^{(s)}\\| _2\\leq \\sigma_{\\mathrm{max}}\\) holds for all \\(s,k,t\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.315, + 0.825, + 0.362 + ], + "angle": 0, + "content": "To prove Theorem 3.1, we will show that both Local SGD iterates \\(\\bar{\\theta}^{(s)}\\) and SGD iterates \\(\\boldsymbol{w}_{sH}\\) track GD iterates \\(\\hat{\\boldsymbol{u}}_{sH}\\) closely with high probability. For each client \\(k\\), define the following sequence \\(\\{\\hat{Z}_{k,t}:t\\geq 0\\}\\), which will be used in the proof for bounding the overall effect of noise." + }, + { + "type": "equation", + "bbox": [ + 0.252, + 0.369, + 0.744, + 0.412 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {Z}} _ {k, t} = \\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\boldsymbol {z} _ {k, \\tau}, \\quad \\hat {\\boldsymbol {Z}} _ {k, 0} = \\boldsymbol {0}, \\quad \\forall k \\in [ K ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.421, + 0.647, + 0.437 + ], + "angle": 0, + "content": "The following lemma shows that \\(\\hat{Z}_{k,t}\\) is concentrated around the origin." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.443, + 0.825, + 0.478 + ], + "angle": 0, + "content": "Lemma I.1 (Concentration property of \\(\\{\\hat{Z}_{k,t}\\}\\)). With probability at least \\(1 - \\delta\\), the following holds simultaneously for all \\(k \\in [K]\\), \\(0 \\leq t < \\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor\\):" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.486, + 0.62, + 0.527 + ], + "angle": 0, + "content": "\\[\n\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\delta \\eta}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.535, + 0.334, + 0.552 + ], + "angle": 0, + "content": "where \\(\\hat{C}_1\\coloneqq \\exp (T\\nu_2)\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.567, + 0.554, + 0.586 + ], + "angle": 0, + "content": "Proof. For each \\(\\hat{\\mathbf{Z}}_{k,t}\\), construct a sequence \\(\\{\\hat{\\mathbf{Z}}_{k,t,t'}\\}_{t'=0}^t\\):" + }, + { + "type": "equation", + "bbox": [ + 0.285, + 0.593, + 0.712, + 0.636 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {Z}} _ {k, t, t ^ {\\prime}} := \\sum_ {\\tau = 0} ^ {t ^ {\\prime} - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, t, 0} ^ {(s)} = \\boldsymbol {0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.644, + 0.82, + 0.664 + ], + "angle": 0, + "content": "Since \\(\\| \\nabla^2\\mathcal{L}(\\hat{\\boldsymbol{u}}_l)\\| _2\\leq \\nu_2\\) for all \\(l\\geq 0\\), the following holds for all \\(0\\leq \\tau < t - 1\\) and \\(0 < t < \\lfloor \\frac{T}{\\eta}\\rfloor\\):" + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.67, + 0.7, + 0.715 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {t} \\leq \\exp (T \\nu_ {2}) = \\hat {C} _ {1}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.722, + 0.825, + 0.754 + ], + "angle": 0, + "content": "So \\(\\{\\hat{Z}_{k,t,t'}\\}_{t' = 0}^t\\) is a martingale with \\(\\| \\hat{Z}_{k,t,t'} - \\hat{Z}_{k,t,t' - 1}\\| _2\\leq \\hat{C}_1\\sigma_{\\max}\\). Since \\(\\hat{Z}_{k,t} = \\hat{Z}_{k,t,t}\\), by Azuma-Hoeffding's inequality," + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.76, + 0.652, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} (\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 t \\left(\\hat {C} _ {1} \\sigma_ {\\max }\\right) ^ {2}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.824, + 0.825, + 0.856 + ], + "angle": 0, + "content": "Taking union bound on all \\( k \\in [K] \\) and \\( 0 \\leq t \\leq \\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor \\), we can conclude that with probability at least \\( 1 - \\delta \\)," + }, + { + "type": "equation", + "bbox": [ + 0.274, + 0.863, + 0.721, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\delta \\eta}}, \\quad \\forall 0 \\leq t < \\left\\lfloor \\frac {T}{\\eta} \\right\\rfloor , k \\in [ K ].\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.91, + 0.825, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.141 + ], + "angle": 0, + "content": "The following lemma states that, with high probability, Local SGD iterates \\(\\theta_{k,t}^{(s)}\\) and \\(\\bar{\\theta}^{(s)}\\) closely track the gradient descent iterates \\(\\hat{\\pmb{u}}_{sH}\\) for \\(\\lfloor \\frac{T}{H\\eta}\\rfloor\\) rounds." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.145, + 0.812, + 0.161 + ], + "angle": 0, + "content": "Lemma I.2. For \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), the following inequalities hold with probability at least \\(1 - \\delta\\):" + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.169, + 0.768, + 0.204 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\hat {\\boldsymbol {u}} _ {s H + t} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq s < \\left\\lfloor \\frac {T}{H \\eta} \\right\\rfloor , 0 \\leq t \\leq H,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.211, + 0.204, + 0.224 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.309, + 0.232, + 0.689, + 0.267 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\hat {\\boldsymbol {u}} _ {s H} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s \\leq \\left\\lfloor \\frac {T}{H \\eta} \\right\\rfloor ,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.276, + 0.49, + 0.292 + ], + "angle": 0, + "content": "where \\(\\hat{C}_3\\) is a constant independent of \\(\\eta\\) and \\(H\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.311, + 0.825, + 0.351 + ], + "angle": 0, + "content": "Proof. Let \\(\\hat{\\Delta}_{k,t}^{(s)}\\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\hat{\\pmb{u}}_t^{(s)}\\) and \\(\\bar{\\Delta}^{(s)}\\coloneqq \\bar{\\pmb{\\theta}}^{(s)} - \\hat{\\pmb{u}}_{0}^{(s)}\\) be the differences between the Local SGD and GD iterates. According to the update rule for \\(\\pmb{\\theta}_{k,t}^{(s)}\\) and \\(\\hat{\\pmb{u}}_t^{(s)}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.359, + 0.825, + 0.382 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\tag {23}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.385, + 0.825, + 0.405 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {u}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}\\right). \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.411, + 0.374, + 0.427 + ], + "angle": 0, + "content": "Subtracting (23) by (24) gives" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.435, + 0.825, + 0.481 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta (\\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t} ^ {(s)})) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ = \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\eta \\hat {\\boldsymbol {v}} _ {k, t} ^ {(s)}, \\tag {25} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.489, + 0.825, + 0.523 + ], + "angle": 0, + "content": "where \\(\\hat{\\pmb{v}}_{k,t}^{(s)}\\) is a remainder term with norm \\(\\| \\hat{\\pmb{v}}_{k,t}^{(s)}\\| _2\\leq \\frac{\\nu_3}{2}\\| \\hat{\\pmb{\\Delta}}_{k,t}^{(s)}\\| _2^2\\). For the \\(s\\)-th round of Local SGD, we can apply (25) \\(t\\) times to obtain the following:" + }, + { + "type": "equation", + "bbox": [ + 0.231, + 0.53, + 0.825, + 0.638 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = \\left[ \\prod_ {\\tau = 0} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {\\tau} ^ {(s)}\\right)\\right) \\right] \\hat {\\boldsymbol {\\Delta}} _ {k, 0} ^ {(s)} - \\eta \\underbrace {\\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\right] \\boldsymbol {z} _ {k , \\tau} ^ {(s)}} _ {\\tau} \\tag {26} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} (\\pmb {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\pmb {u}} _ {l} ^ {(s)})) \\hat {\\pmb {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.492, + 0.661 + ], + "angle": 0, + "content": "Here, \\(\\mathcal{T}\\) can be expressed in the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.669, + 0.672, + 0.711 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} = \\hat {\\boldsymbol {Z}} _ {k, s H + t} - \\left[ \\prod_ {l = s H} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.719, + 0.701, + 0.734 + ], + "angle": 0, + "content": "Substituting in \\( t = H \\) and taking the average, we derive the following recursion:" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.742, + 0.825, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\bar {\\boldsymbol {\\Delta}} ^ {(s + 1)} = \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {\\Delta}} _ {k, H} ^ {(s)} \\\\ = \\left[ \\prod_ {\\tau = 0} ^ {H - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {\\tau} ^ {(s)})\\right) \\right] \\bar {\\boldsymbol {\\Delta}} ^ {(s)} \\\\ - \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k, (s + 1) H} + \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\left[ \\prod_ {l = s H} ^ {(s + 1) H - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H} \\\\ + \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\sum_ {\\tau = 0} ^ {H - 1} \\prod_ {l = \\tau + 1} ^ {H - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\tag {27} \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.365, + 0.12 + ], + "angle": 0, + "content": "Applying (27) \\(s\\) times yields" + }, + { + "type": "equation", + "bbox": [ + 0.227, + 0.124, + 0.825, + 0.168 + ], + "angle": 0, + "content": "\\[\n\\bar {\\boldsymbol {\\Delta}} ^ {(s)} = - \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k, s H} + \\frac {\\eta}{K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\left[ \\prod_ {l = r H + \\tau + 1} ^ {s H} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(r)}. \\tag {28}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.172, + 0.421, + 0.187 + ], + "angle": 0, + "content": "Substitute (28) into (26) and we have" + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.192, + 0.76, + 0.328 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = - \\frac {\\eta}{K} \\sum_ {k ^ {\\prime} \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k ^ {\\prime}, s H} - \\eta \\hat {\\boldsymbol {Z}} _ {k, s H + t} + \\eta \\left[ \\prod_ {l = s H} ^ {s H + t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H} \\\\ + \\frac {\\eta}{K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k ^ {\\prime} \\in [ K ]} \\left[ \\prod_ {l = r H + \\tau + 1} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k ^ {\\prime}, \\tau} ^ {(r)} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = s H + \\tau + 1} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.333, + 0.597, + 0.348 + ], + "angle": 0, + "content": "By Cauchy-Schwartz inequality and triangle inequality, we have" + }, + { + "type": "equation", + "bbox": [ + 0.264, + 0.352, + 0.824, + 0.447 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\frac {\\eta}{K} \\left(\\sum_ {k ^ {\\prime} \\in [ K ]} \\left\\| \\hat {\\boldsymbol {Z}} _ {k ^ {\\prime}, s H} \\right\\| _ {2}\\right) + \\eta \\left\\| \\hat {\\boldsymbol {Z}} _ {k, s H + t} \\right\\| _ {2} + \\eta \\hat {C} _ {1} \\left\\| \\hat {\\boldsymbol {Z}} _ {k, s H} \\right\\| _ {2} \\tag {29} \\\\ + \\frac {\\eta \\hat {C} _ {1} \\nu_ {3}}{2 K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k ^ {\\prime} \\in [ K ]} \\| \\hat {\\mathbf {A}} _ {k ^ {\\prime}, \\tau} ^ {(r)} \\| _ {2} ^ {2} + \\frac {\\eta \\hat {C} _ {1} \\nu_ {3}}{2} \\sum_ {\\tau = 0} ^ {t - 1} \\| \\hat {\\mathbf {A}} _ {k, \\tau} ^ {(r)} \\| _ {2} ^ {2}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.453, + 0.33, + 0.47 + ], + "angle": 0, + "content": "where \\(\\hat{C}_1 = \\exp (\\nu_2T)\\)" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.475, + 0.554, + 0.492 + ], + "angle": 0, + "content": "Below we prove by induction that for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), if" + }, + { + "type": "equation", + "bbox": [ + 0.283, + 0.496, + 0.825, + 0.538 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\right\\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\eta \\delta}}, \\quad \\forall 0 \\leq t < \\left\\lfloor \\frac {T}{\\eta} \\right\\rfloor , k \\in [ K ], \\tag {30}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.542, + 0.75, + 0.562 + ], + "angle": 0, + "content": "then there exists a constant \\(\\hat{C}_2\\) such that for all \\(k\\in [K],0\\leq s < \\left\\lfloor \\frac{T}{\\eta H}\\right\\rfloor\\) and \\(0\\leq t\\leq H\\)" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.567, + 0.825, + 0.607 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\hat {\\Delta} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\hat {C} _ {2} \\sqrt {\\eta \\log \\frac {2 T K}{\\eta \\delta}}. \\tag {31}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.613, + 0.825, + 0.661 + ], + "angle": 0, + "content": "First, for all \\( k \\in [K] \\), \\( \\| \\hat{\\Delta}_{k,0}^{(0)}\\|_2 = 0 \\) and hence (31) holds. Assuming that (31) holds for all \\( \\hat{\\Delta}_{k',\\tau}^{(r)} \\) where \\( k' \\in [K] \\), \\( 0 \\leq r < s \\), \\( 0 \\leq \\tau \\leq H \\) and \\( r = s \\), \\( 0 \\leq \\tau < t \\), then by (29), for all \\( k \\in [K] \\), the following holds:" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.666, + 0.71, + 0.707 + ], + "angle": 0, + "content": "\\[\n\\| \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\| _ {2} \\leq 3 \\hat {C} _ {1} ^ {2} \\sigma_ {\\max} \\sqrt {2 T \\eta \\log \\frac {2 T K}{\\eta \\delta}} + \\hat {C} _ {1} \\hat {C} _ {2} ^ {2} T \\eta \\nu_ {3} \\log \\frac {2 T K}{\\eta \\delta}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.712, + 0.827, + 0.762 + ], + "angle": 0, + "content": "Let \\(\\hat{C}_2 \\geq 6\\hat{C}_1^2\\sigma_{\\max}\\sqrt{2T}\\). Then for sufficiently small \\(\\eta\\), (31) holds. By Lemma I.1, (30) holds with probability at least \\(1 - \\delta\\). Furthermore, notice that \\(\\bar{\\pmb{\\theta}}^{(s)} - \\hat{\\pmb{u}}_{sH} = \\frac{1}{K}\\sum_{k\\in [K]}\\hat{\\pmb{\\Delta}}_{k,H}^{(s - 1)}\\). Hence we have the lemma." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.776, + 0.825, + 0.822 + ], + "angle": 0, + "content": "The iterates of standard SGD can be viewed as the local iterates on a single client with the number of local steps \\(\\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor\\). Therefore, we can directly apply Lemma I.2 and obtain the following lemma about the SGD iterates \\(\\boldsymbol{w}_t\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.824, + 0.753, + 0.841 + ], + "angle": 0, + "content": "Corollary 1.1. For \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), the following holds with probability at least \\(1 - \\delta\\):" + }, + { + "type": "equation", + "bbox": [ + 0.316, + 0.845, + 0.68, + 0.88 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {w} _ {s H} - \\hat {\\boldsymbol {u}} _ {s H} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s \\leq \\frac {T}{H \\eta},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.884, + 0.489, + 0.9 + ], + "angle": 0, + "content": "where \\(\\hat{C}_3\\) is the same constant as in Lemma I.2." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.766, + 0.926 + ], + "angle": 0, + "content": "Applying Lemma I.2 and Corollary I.1 and taking the union bound, we have Theorem 3.1." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.525, + 0.119 + ], + "angle": 0, + "content": "J PROOF OUTLINE OF MAIN THEOREMS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.265 + ], + "angle": 0, + "content": "We adopt the general framework proposed by Li et al. (2019a) to bound the closeness of discrete algorithms and SDE solutions via the method of moments. However, their framework is not directly applicable to our case since they provide approximation guarantees for discrete algorithms with learning rate \\(\\eta\\) for \\(\\mathcal{O}(\\eta^{-1})\\) steps while we want to capture Local SGD for \\(\\mathcal{O}(\\eta^{-2})\\) steps. To overcome this difficulty, we treat \\(R_{\\mathrm{grp}} := \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}} \\right\\rfloor\\) rounds as a \"giant step\" of Local SGD with an \"effective\" learning rate \\(\\eta^{1 - \\beta}\\), where \\(\\beta\\) is a constant in \\((0,1)\\), and derive the recursive formulas to compute the moments for the change in every step, every round, and every \\(R_{\\mathrm{grp}}\\) rounds. The formulation of the recursions requires a detailed analysis of the limiting dynamics of the iterate and careful control of approximation errors." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.825, + 0.359 + ], + "angle": 0, + "content": "The dynamics of the iterate can be divided into two phases: the approaching phase (Phase 1) and the drift phase (Phase 2). The approaching phase only lasts for \\(\\mathcal{O}(\\log \\frac{1}{\\eta})\\) rounds, during which the iterate is quickly driven to the minimizer manifold by the negative gradient and ends up within only \\(\\tilde{\\mathcal{O}} (\\sqrt{\\eta})\\) from \\(\\Gamma\\) (see Appendix K.5). After that, the iterate enters the drifting phase and moves in the tangent space of \\(\\Gamma\\) while staying close to \\(\\Gamma\\) (see Appendix K.6). The closeness of the iterates (local and global) and \\(\\Gamma\\) is summarized in the following theorem." + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.36, + 0.825, + 0.392 + ], + "angle": 0, + "content": "Theorem J.1 (Closeness of the iterates and \\(\\Gamma\\)). For \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\), for all \\(\\mathcal{O}(\\log \\frac{1}{\\eta}) \\leq s \\leq \\lfloor T / (H\\eta^2) \\rfloor\\)," + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.398, + 0.693, + 0.432 + ], + "angle": 0, + "content": "\\[\n\\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\in \\Gamma , \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.436, + 0.619, + 0.455 + ], + "angle": 0, + "content": "Also, for all \\(\\mathcal{O}(\\log \\frac{1}{\\eta}) \\leq s < \\lfloor T / (H\\eta^2) \\rfloor\\), \\(k \\in [K]\\) and \\(0 \\leq t \\leq H\\)," + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.46, + 0.641, + 0.495 + ], + "angle": 0, + "content": "\\[\n\\| \\pmb {\\theta} _ {k, t} ^ {(s)} - \\Phi (\\bar {\\pmb {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.498, + 0.509, + 0.513 + ], + "angle": 0, + "content": "Here, \\(\\mathcal{O}(\\cdot)\\) hides constants independent of \\(\\eta\\) and \\(\\delta\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.524, + 0.825, + 0.553 + ], + "angle": 0, + "content": "To control the approximation errors, we also provide a high probability bound for the change of the manifold projection within \\( R_{\\mathrm{grp}} \\) rounds." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.554, + 0.825, + 0.586 + ], + "angle": 0, + "content": "Theorem J.2 (High probability bound for the change of manifold projection). For \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\) with probability at least \\(1 - \\delta\\), for all \\(0 \\leq s \\leq \\lfloor T / (H\\eta^2) \\rfloor - R_{\\mathrm{grp}}\\) and \\(0 \\leq r \\leq R_{\\mathrm{grp}}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.589, + 0.776, + 0.624 + ], + "angle": 0, + "content": "\\[\n\\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}), \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + r)}) \\in \\Gamma , \\quad \\| \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + r)}) - \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.627, + 0.513, + 0.643 + ], + "angle": 0, + "content": "where \\(\\mathcal{O}(\\cdot)\\) hides constants independent of \\(\\eta\\) and \\(\\delta\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.825, + 0.682 + ], + "angle": 0, + "content": "The proof of Theorems J.1 and J.2 is based on the analysis of the dynamics of the iterate and presented in Appendix K.7." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.688, + 0.826, + 0.798 + ], + "angle": 0, + "content": "Utilizing Theorems J.1 and J.2, we move on to estimate the first and second moments of the change of the manifold projection every \\( R_{\\mathrm{grp}} \\) rounds. However, the randomness during training might drive the iterate far from the manifold (with a low probability, though), making the dynamics intractable. To tackle this issue, we construct a well-behaved auxiliary sequence \\( \\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\} \\), which is constrained to the neighborhood of \\( \\Gamma \\) and equals the original sequence \\( \\{\\pmb{\\theta}_{k,t}^{(s)}\\} \\) with high probability (see Definition K.5). Then we can formulate recursions for the change of manifold projection of the auxiliary sequence using the nice properties near \\( \\Gamma \\). The estimate of moments is summarized in Theorem K.2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.803, + 0.825, + 0.833 + ], + "angle": 0, + "content": "Finally, based on the moment estimates, we apply the framework in Li et al. (2019a) to show that the manifold projection and the SDE solution are weak approximations of each other in Appendix K.10." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.851, + 0.528, + 0.867 + ], + "angle": 0, + "content": "K PROOF DETAILS OF MAIN THEOREMS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "The detailed proof is organized as follows. In Appendix K.1, we introduce the notations that will be used throughout the proof. To establish preliminary knowledge, Appendix K.2 provides explicit expression for the projection operator \\(\\Phi (\\cdot)\\), and Appendix K.3 presents lemmas about gradient descent" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.204 + ], + "angle": 0, + "content": "(GD) and gradient flow (GF). Based on the preliminary knowledge, we construct a nested working zone to characterize the closeness of the iterate and \\(\\Gamma\\) in Appendix K.4. Appendices K.5 to K.10 make up the main body of the proof. Specifically, Appendices K.5 and K.6 analyze the dynamics of Local SGD iterates for phases 1 and 2, respectively. Utilizing these analyses, we provide the proof of Theorems J.1 and J.2 in Appendix K.7 and the proof of Theorem 3.3 in Appendix K.8. Then we derive the estimation for the first and second moments of one \"giant step\" \\(\\Phi (\\bar{\\theta}^{(s + R_{\\mathrm{grp}})}) - \\Phi (\\bar{\\theta}^{(s)})\\) in Appendix K.9. Finally, we prove the approximation theorem 3.2 in Appendix K.10." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.22, + 0.396, + 0.234 + ], + "angle": 0, + "content": "K.1 ADDITIONAL NOTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.245, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Let \\( R_{\\mathrm{tot}} \\coloneqq \\left\\lfloor \\frac{T}{H\\eta^2} \\right\\rfloor \\) be the total number of rounds. Denote by \\( \\phi^{(s)} \\) the manifold projection of the global iterate at the beginning of round \\( s \\). Let \\( \\pmb{x}_{k,t}^{(s)} \\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\phi^{(s)} \\) be the difference between the local iterate and the manifold projection of the global iterate. Also define \\( \\bar{\\pmb{x}}_H^{(s)} \\coloneqq \\frac{1}{K}\\sum_{k\\in [K]}\\pmb{x}_{k,H}^{(s)} \\) and \\( \\bar{\\pmb{x}}_0^{(s)} \\coloneqq \\frac{1}{K}\\sum_{k\\in [K]}\\pmb{x}_{k,0}^{(s)} \\) which is the average of \\( \\pmb{x}_{k,t}^{(s)} \\) among \\( K \\) workers at step 0 and \\( H \\). Then for all \\( k\\in [K] \\), \\( \\pmb{x}_{k,0}^{(s)} = \\bar{\\pmb{x}}_0^{(s)} = \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(s)} \\). Finally, Since \\( \\nabla \\ell(\\pmb{\\theta};\\pmb{\\zeta}) \\) is bounded, the gradient noise \\( z_{k,t}^{(s)} \\) is also bounded and we denote by \\( \\sigma_{\\max} \\) the upper bound such that \\( \\| z_{k,t}^{(s)}\\|_2 \\leq \\sigma_{\\max}, \\forall s,k,t \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.369, + 0.825, + 0.399 + ], + "angle": 0, + "content": "We first introduce the notion of \\(\\mu\\)-PL. We will later show that there exists a neighborhood of the minimizer manifold \\(\\Gamma\\) where \\(\\mathcal{L}\\) satisfies \\(\\mu\\)-PL." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.402, + 0.825, + 0.432 + ], + "angle": 0, + "content": "Definition K.1 (Polyak-Lojasiewicz Condition). For \\(\\mu > 0\\), we say a function \\(\\mathcal{L}(\\cdot)\\) satisfies \\(\\mu\\)-Polyak-Lojasiewicz condition (abbreviated as \\(\\mu\\)-PL) on set \\(U\\) if" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.437, + 0.63, + 0.466 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\| _ {2} ^ {2} \\geq \\mu (\\mathcal {L} (\\boldsymbol {\\theta}) - \\inf _ {\\boldsymbol {\\theta} ^ {\\prime} \\in U} \\mathcal {L} (\\boldsymbol {\\theta} ^ {\\prime})).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.825, + 0.526 + ], + "angle": 0, + "content": "We then introduce the definitions of the \\(\\epsilon\\)-ball at a point and the \\(\\epsilon\\)-neighborhood of a set. For \\(\\pmb{\\theta} \\in \\mathbb{R}^d\\) and \\(\\epsilon > 0\\), \\(B^{\\epsilon}(\\pmb{\\theta}) \\coloneqq \\{\\pmb{\\theta}' : \\| \\pmb{\\theta}' - \\pmb{\\theta}\\|_2 < \\epsilon\\}\\) is the open \\(\\epsilon\\)-ball centered at \\(\\pmb{\\theta}\\). For a set \\(\\mathcal{Z} \\subseteq \\mathbb{R}^d\\), \\(\\mathcal{Z}^{\\epsilon} \\coloneqq \\bigcup_{\\pmb{\\theta} \\in \\mathcal{Z}} B^{\\epsilon}(\\pmb{\\theta})\\) is the \\(\\epsilon\\)-neighborhood of \\(\\mathcal{Z}\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.541, + 0.635, + 0.555 + ], + "angle": 0, + "content": "K.2 COMPUTING THE DERIVATIVES OF THE LIMITING MAPPING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.567, + 0.825, + 0.597 + ], + "angle": 0, + "content": "In subsection, we present lemmas that relate the derivatives of the limiting mapping \\(\\Phi(\\cdot)\\) to the derivatives of the loss function \\(\\mathcal{L}(\\cdot)\\). We first introduce the operator \\(\\nu_{H}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.599, + 0.825, + 0.642 + ], + "angle": 0, + "content": "Definition K.2. For a semi-definite symmetric matrix \\( \\mathbf{H} \\in \\mathbb{R}^{d \\times d} \\), let \\( \\lambda_j \\), \\( \\mathbf{v}_j \\) be the \\( j \\)-th eigenvalue and eigenvector and \\( \\mathbf{v}_j \\)'s form an orthonormal basis of \\( \\mathbb{R}^d \\). Then, define the operator \\( \\mathcal{V}_{\\mathbf{H}}: \\mathbb{R}^{d \\times d} \\to \\mathbb{R}^{d \\times d} \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.272, + 0.642, + 0.724, + 0.679 + ], + "angle": 0, + "content": "\\[\n\\mathcal {V} _ {\\boldsymbol {H}} (\\boldsymbol {M}) := \\sum_ {i, j: \\lambda_ {i} \\neq 0 \\vee \\lambda_ {j} \\neq 0} \\frac {1}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {M}, \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\forall \\boldsymbol {M} \\in \\mathbb {R} ^ {d \\times d}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.825, + 0.721 + ], + "angle": 0, + "content": "Intuitively, this operator projects \\( M \\) to the base matrix \\( \\mathbf{v}_i\\mathbf{v}_j^\\top \\) and sums up the projections with weights \\( \\frac{1}{\\lambda_i + \\lambda_j} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.825, + 0.761 + ], + "angle": 0, + "content": "Additionally, for \\(\\theta \\in \\Gamma\\), denote by \\(T_{\\theta}\\) and \\(T_{\\theta}^{\\perp}\\) the tangent and normal space of \\(\\Gamma\\) at \\(\\theta\\) respectively. Lemmas K.1 to K.4 are from Li et al. (2021b). We include them to make the paper self-contained." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.825, + 0.793 + ], + "angle": 0, + "content": "Lemma K.1 (Lemma C.1 of Li et al. (2021b)). For any \\(\\pmb{\\theta} \\in \\Gamma\\) and any \\(\\pmb{v} \\in T_{\\pmb{\\theta}}(\\Gamma)\\), it holds that \\(\\nabla^2 \\mathcal{L}(\\pmb{\\theta}) \\pmb{v} = \\mathbf{0}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.796, + 0.825, + 0.825 + ], + "angle": 0, + "content": "Lemma K.2 (Lemma 4.3 of Li et al. (2021b)). For any \\(\\pmb{\\theta} \\in \\Gamma\\), \\(\\partial \\Phi(\\pmb{\\theta}) \\in \\mathbb{R}^{d \\times d}\\) is the projection matrix onto the tangent space \\(T_{\\pmb{\\theta}}(\\Gamma)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.825, + 0.856 + ], + "angle": 0, + "content": "Lemma K.3 (Lemma C.4 of Li et al. (2021b)). For any \\(\\pmb{\\theta} \\in \\Gamma\\), \\(\\pmb{u} \\in \\mathbb{R}^d\\) and \\(\\pmb{v} \\in T_{\\pmb{\\theta}}(\\Gamma)\\), it holds that" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.862, + 0.774, + 0.881 + ], + "angle": 0, + "content": "\\[\n\\partial^ {2} \\Phi (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\boldsymbol {u} ] = - \\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) ^ {+} \\boldsymbol {u} ] - \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) ^ {+} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\partial \\Phi (\\boldsymbol {\\theta}) \\boldsymbol {u} ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.903 + ], + "angle": 0, + "content": "Lemma K.4 (Lemma C.6 of Li et al. (2021b)). For any \\(\\pmb{\\theta} \\in \\Gamma\\) and \\(\\pmb{\\Sigma} \\in \\operatorname{span}\\{\\pmb{u}\\pmb{u}^{\\top} \\mid \\pmb{u} \\in T_{\\pmb{\\theta}}^{\\perp}(\\Gamma)\\}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.908, + 0.657, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\left\\langle \\partial^ {2} \\Phi (\\boldsymbol {\\theta}), \\boldsymbol {\\Sigma} \\right\\rangle = - \\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) \\left[ \\mathcal {V} _ {\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta})} (\\boldsymbol {\\Sigma}) \\right].\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.547, + 0.12 + ], + "angle": 0, + "content": "Lemma K.5. For all \\(\\theta \\in \\Gamma\\), \\(\\pmb{u}, \\pmb{v} \\in T_{\\theta}(\\Gamma)\\), it holds that" + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.125, + 0.825, + 0.143 + ], + "angle": 0, + "content": "\\[\n\\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} [ \\boldsymbol {v} \\boldsymbol {u} ^ {\\top} ] = \\mathbf {0}. \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.157, + 0.827, + 0.215 + ], + "angle": 0, + "content": "Proof. This proof is inspired by Lemma C.4 of Li et al. (2021b). For any \\(\\pmb{\\theta} \\in \\Gamma\\), consider a parameterized smooth curve \\(\\pmb{v}(t), t \\geq 0\\) on \\(\\Gamma\\) such that \\(\\pmb{v}(0) = \\pmb{\\theta}\\) and \\(\\pmb{v}'(0) = \\pmb{v}\\). Let \\(P_{\\parallel}(t) = \\partial \\Phi(\\pmb{v}(t))\\), \\(P_{\\perp}(t) = I - \\partial \\Phi(\\pmb{v}(t))\\) and \\(\\pmb{H}(t) = \\nabla^2 \\mathcal{L}(\\pmb{v}(t))\\). By Lemma C.1 and 4.3 in Li et al. (2021b)," + }, + { + "type": "equation", + "bbox": [ + 0.426, + 0.222, + 0.572, + 0.238 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {H} (t) = \\boldsymbol {P} _ {\\perp} (t) \\boldsymbol {H} (t).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.243, + 0.506, + 0.258 + ], + "angle": 0, + "content": "Take the derivative with respect to \\(t\\) on both sides," + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.263, + 0.655, + 0.302 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\boldsymbol {H} ^ {\\prime} (t) = \\boldsymbol {P} _ {\\perp} (t) \\boldsymbol {H} ^ {\\prime} (t) + \\boldsymbol {P} _ {\\perp} ^ {\\prime} (t) \\boldsymbol {H} (t) \\\\ \\Rightarrow \\boldsymbol {P} _ {\\parallel} (t) \\boldsymbol {H} ^ {\\prime} (t) = \\boldsymbol {P} _ {\\perp} ^ {\\prime} (t) \\boldsymbol {H} (t) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (t) \\boldsymbol {H} (t). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.307, + 0.296, + 0.32 + ], + "angle": 0, + "content": "At \\(t = 0\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.396, + 0.326, + 0.825, + 0.345 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {P} _ {\\parallel} (0) \\boldsymbol {H} ^ {\\prime} (0) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) \\boldsymbol {H} (0). \\tag {33}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.351, + 0.825, + 0.399 + ], + "angle": 0, + "content": "WLOG let \\( H(0) = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_d), \\in \\mathbb{R}^{d \\times d} \\), where \\( \\lambda_i = 0 \\) for all \\( m < i \\leq d \\). Therefore \\( P_{\\perp}(0) = \\begin{bmatrix} I_m & 0 \\\\ 0 & 0 \\end{bmatrix} \\), \\( P_{\\parallel}(0) = \\begin{bmatrix} 0 & 0 \\\\ 0 & I_{d - m} \\end{bmatrix} \\). Decompose \\( P_{\\parallel}'(0) \\), \\( H(0) \\) and \\( H'(0) \\) as follows." + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.404, + 0.796, + 0.44 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {P} _ {\\parallel , 1 1} ^ {\\prime} (0) & \\boldsymbol {P} _ {\\parallel , 1 2} ^ {\\prime} (0) \\\\ \\boldsymbol {P} _ {\\parallel , 2 1} ^ {\\prime} (0) & \\boldsymbol {P} _ {\\parallel , 2 2} ^ {\\prime} (0) \\end{array} \\right], \\boldsymbol {H} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {H} _ {1 1} (0) & \\boldsymbol {0} \\\\ \\boldsymbol {0} & \\boldsymbol {0} \\end{array} \\right], \\boldsymbol {H} ^ {\\prime} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {H} _ {1 1} ^ {\\prime} (0) & \\boldsymbol {H} _ {1 2} ^ {\\prime} (0) \\\\ \\boldsymbol {H} _ {2 1} ^ {\\prime} (0) & \\boldsymbol {H} _ {2 2} ^ {\\prime} (0) \\end{array} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.444, + 0.504, + 0.459 + ], + "angle": 0, + "content": "Substituting the decomposition into (33), we have" + }, + { + "type": "equation", + "bbox": [ + 0.332, + 0.464, + 0.666, + 0.5 + ], + "angle": 0, + "content": "\\[\n\\left[ \\begin{array}{c c} \\mathbf {0} & \\mathbf {0} \\\\ \\mathbf {H} _ {2 1} ^ {\\prime} (0) & \\mathbf {H} _ {2 2} ^ {\\prime} (0) \\end{array} \\right] = - \\left[ \\begin{array}{c c} \\mathbf {P} _ {\\parallel , 1 1} ^ {\\prime} (0) \\mathbf {H} _ {1 1} (0) & \\mathbf {0} \\\\ \\mathbf {P} _ {\\parallel , 2 1} ^ {\\prime} (0) \\mathbf {H} _ {1 1} (0) & \\mathbf {0} \\end{array} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.504, + 0.361, + 0.521 + ], + "angle": 0, + "content": "Therefore, \\( H_{22}'(0) = 0 \\) and" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.525, + 0.668, + 0.56 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {P} _ {\\parallel} (0) \\boldsymbol {H} ^ {\\prime} (0) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) \\boldsymbol {H} (0) = - \\left[ \\begin{array}{c c} \\boldsymbol {0} & \\boldsymbol {0} \\\\ \\boldsymbol {H} _ {2 1} ^ {\\prime} (0) & \\boldsymbol {0} \\end{array} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.566, + 0.825, + 0.6 + ], + "angle": 0, + "content": "Any \\(\\pmb{u} \\in T_{\\pmb{\\theta}}(\\Gamma)\\) can be decomposed as \\(\\pmb{u} = [\\pmb{0}, \\pmb{u}_2]^\\top\\) where \\(\\pmb{u}_2 \\in \\mathbb{R}^{d - m}\\). With this decomposition, we have \\(\\pmb{P}_{\\parallel}(0)\\pmb{H}'(0)\\pmb{u} = \\pmb{0}\\). Also, note that \\(\\pmb{H}'(0) = \\nabla^3\\mathcal{L}(\\pmb{\\theta})[\\pmb{v}]\\). Hence," + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.606, + 0.589, + 0.624 + ], + "angle": 0, + "content": "\\[\n\\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v} \\boldsymbol {u} ^ {T} ] = \\boldsymbol {0}.\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.629, + 0.825, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.66, + 0.509, + 0.674 + ], + "angle": 0, + "content": "K.3 PRELIMINARY LEMMAS FOR GD AND GF" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.686, + 0.827, + 0.728 + ], + "angle": 0, + "content": "In this subsection, we introduce a few useful preliminary lemmas about gradient descent and gradient flow. Before presenting the lemmas, we introduce some notations and assumptions that will be used in this subsection." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.826, + 0.798 + ], + "angle": 0, + "content": "Assume that the loss function \\(\\mathcal{L}(\\pmb{\\theta})\\) is \\(\\rho\\)-smooth and \\(\\mu\\)-PL in an open, convex neighborhood \\(U\\) of a local minimizer \\(\\pmb{\\theta}^*\\). Denote by \\(\\mathcal{L}^* := \\mathcal{L}(\\pmb{\\theta}^*)\\) the minimum value for simplicity. Let \\(\\epsilon'\\) be the radius of the open \\(\\epsilon'\\)-ball centered at \\(\\pmb{\\theta}^*\\) such that \\(B^{\\epsilon'}(\\pmb{\\theta}^*) \\subseteq U\\). We also define a potential function \\(\\tilde{\\Psi}(\\pmb{\\theta}) := \\sqrt{\\mathcal{L}(\\pmb{\\theta}) - \\mathcal{L}^*}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.802, + 0.825, + 0.832 + ], + "angle": 0, + "content": "Consider gradient descent iterates \\(\\{\\hat{u}_t\\}_{t\\in \\mathbb{N}}\\) following the update rule \\(\\hat{\\pmb{u}}_{t + 1} = \\hat{\\pmb{u}}_t - \\eta \\nabla \\mathcal{L}(\\hat{\\pmb{u}}_t)\\). We first introduce the descent lemma for gradient descent." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.613, + 0.853 + ], + "angle": 0, + "content": "Lemma K.6 (Descent lemma for GD). If \\(\\hat{\\boldsymbol{u}}_t\\in U\\) and \\(\\eta \\leq \\frac{1}{\\rho}\\), then" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.858, + 0.619, + 0.885 + ], + "angle": 0, + "content": "\\[\n\\frac {\\eta}{2} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.89, + 0.204, + 0.902 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.909, + 0.637, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t + 1}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) \\left(\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t}\\right) - \\mathcal {L} ^ {*}\\right).\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.344, + 0.12 + ], + "angle": 0, + "content": "Proof. By \\(\\rho\\)-smoothness," + }, + { + "type": "equation", + "bbox": [ + 0.283, + 0.126, + 0.717, + 0.213 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}) \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) + \\langle \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}), \\hat {\\boldsymbol {u}} _ {t + 1} - \\hat {\\boldsymbol {u}} _ {t} \\rangle + \\frac {\\rho \\eta^ {2}}{2} \\| \\hat {\\boldsymbol {u}} _ {t + 1} - \\hat {\\boldsymbol {u}} _ {t} \\| _ {2} ^ {2} \\\\ = \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\eta (1 - \\frac {\\rho \\eta}{2}) \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\\\ \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\frac {\\eta}{2} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.219, + 0.405, + 0.234 + ], + "angle": 0, + "content": "By the definition of \\(\\mu\\)-PL, we have" + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.24, + 0.638, + 0.258 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t + 1}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) \\left(\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t}\\right) - \\mathcal {L} ^ {*}\\right).\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.265, + 0.825, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.294, + 0.452, + 0.311 + ], + "angle": 0, + "content": "Then we prove the Lipschitzness of \\(\\tilde{\\Psi} (\\pmb {\\theta})\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.315, + 0.825, + 0.345 + ], + "angle": 0, + "content": "Lemma K.7 (Lipschitzness of \\(\\tilde{\\Psi}(\\pmb{\\theta})\\)). \\(\\tilde{\\Psi}(\\pmb{\\theta})\\) is \\(\\sqrt{2\\rho}\\)-Lipschitz for \\(\\pmb{\\theta} \\in U\\). That is, for any \\(\\pmb{\\theta}_1, \\pmb{\\theta}_2 \\in U\\)," + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.351, + 0.626, + 0.371 + ], + "angle": 0, + "content": "\\[\n\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) \\right| \\leq \\sqrt {2 \\rho} \\| \\boldsymbol {\\theta} _ {1} - \\boldsymbol {\\theta} _ {2} \\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.385, + 0.825, + 0.417 + ], + "angle": 0, + "content": "Proof. Fix \\(\\pmb{\\theta}_{1}\\) and \\(\\pmb{\\theta}_{2}\\). Denote by \\(\\pmb{\\theta}(t) \\coloneqq (1 - t)\\pmb{\\theta}_{1} + t\\pmb{\\theta}_{2}\\) the convex combination of \\(\\pmb{\\theta}_{1}\\) and \\(\\pmb{\\theta}_{2}\\) where \\(t \\in [0,1]\\). Further define \\(f(t) \\coloneqq \\tilde{\\Psi}(\\pmb{\\theta}(t))\\). Below we consider two cases." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.431, + 0.636, + 0.447 + ], + "angle": 0, + "content": "Case 1. If \\(\\forall t\\in (0,1)\\) \\(f(t) > 0\\) , then \\(f(t)\\) is differentiable on \\((0,1)\\)" + }, + { + "type": "equation", + "bbox": [ + 0.311, + 0.454, + 0.686, + 0.628 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| = | f (1) - f (0) | \\\\ = \\left| \\int_ {0} ^ {1} f ^ {\\prime} (t) \\mathrm {d} t \\right| \\\\ = \\left| \\int_ {0} ^ {1} \\left\\langle \\nabla \\tilde {\\Psi} (\\boldsymbol {\\theta} (t)), \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\right\\rangle \\mathrm {d} t \\right| \\\\ = \\left| \\int_ {0} ^ {1} \\frac {\\langle \\nabla \\mathcal {L} (\\boldsymbol {\\theta} (t)) , \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\rangle}{\\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} (t)) - \\mathcal {L} ^ {*}}} \\mathrm {d} t \\right| \\\\ \\leq \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2} \\int_ {0} ^ {1} \\frac {\\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} (t)) \\| _ {2}}{\\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} (t)) - \\mathcal {L} ^ {*}}} \\mathrm {d} t. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.634, + 0.422, + 0.65 + ], + "angle": 0, + "content": "By \\(\\rho\\)-smoothness of \\(\\mathcal{L}\\), for all \\(\\pmb{\\theta} \\in U\\)," + }, + { + "type": "equation", + "bbox": [ + 0.394, + 0.655, + 0.602, + 0.674 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\right\\| _ {2} ^ {2} \\leq 2 \\rho (\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.679, + 0.687, + 0.707 + ], + "angle": 0, + "content": "Since \\(\\sqrt{\\mathcal{L}(\\pmb{\\theta}(t)) - \\mathcal{L}^*} > 0\\) for all \\(t \\in (0,1)\\), \\(\\frac{\\|\\nabla\\mathcal{L}(\\pmb{\\theta}(t))\\|_2}{\\sqrt{\\mathcal{L}(\\pmb{\\theta}(t)) - \\mathcal{L}^*}} \\leq \\sqrt{2\\rho}\\). Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.713, + 0.629, + 0.734 + ], + "angle": 0, + "content": "\\[\n\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| \\leq \\sqrt {2 \\rho_ {2}} \\left\\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\right\\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.502, + 0.763 + ], + "angle": 0, + "content": "Case 2. If \\(\\exists t' \\in (0,1)\\) such that \\(f(t') = 0\\), then" + }, + { + "type": "equation", + "bbox": [ + 0.282, + 0.768, + 0.715, + 0.859 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| = | f (1) - f (0) | \\\\ = \\left| (1 - t ^ {\\prime}) \\frac {f (1) - f (t ^ {\\prime})}{1 - t ^ {\\prime}} + t ^ {\\prime} \\left(\\frac {f (t ^ {\\prime}) - f (0)}{t ^ {\\prime}}\\right) \\right| \\\\ \\leq \\max \\left(\\frac {f (1)}{1 - t ^ {\\prime}}, \\frac {f (0)}{t ^ {\\prime}}\\right). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.865, + 0.795, + 0.882 + ], + "angle": 0, + "content": "Since \\(\\pmb{\\theta}(t')\\) minimizes \\(\\mathcal{L}\\) in an open set, \\(\\nabla \\mathcal{L}(\\pmb{\\theta}(t')) = \\mathbf{0}\\). By \\(\\rho\\)-smoothness of \\(\\mathcal{L}\\), for all \\(\\pmb{\\theta} \\in U\\)," + }, + { + "type": "equation", + "bbox": [ + 0.282, + 0.889, + 0.714, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\boldsymbol {\\theta}) \\leq \\mathcal {L} ^ {*} + \\frac {\\rho}{2} \\| \\boldsymbol {\\theta} - \\boldsymbol {\\theta} (t ^ {\\prime}) \\| _ {2} ^ {2} \\quad \\Rightarrow \\quad \\tilde {\\Psi} (\\boldsymbol {\\theta}) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} - \\boldsymbol {\\theta} (t ^ {\\prime}) \\| _ {2}.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.245, + 0.117 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.323, + 0.127, + 0.675, + 0.158 + ], + "angle": 0, + "content": "\\[\nf (1) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} \\left(t ^ {\\prime}\\right) \\| _ {2} = \\left(1 - t ^ {\\prime}\\right) \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.162, + 0.639, + 0.194 + ], + "angle": 0, + "content": "\\[\nf (0) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {1} - \\boldsymbol {\\theta} \\left(t ^ {\\prime}\\right) \\| _ {2} = t ^ {\\prime} \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.2, + 0.269, + 0.214 + ], + "angle": 0, + "content": "Then we have" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.22, + 0.623, + 0.253 + ], + "angle": 0, + "content": "\\[\n\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.259, + 0.525, + 0.274 + ], + "angle": 0, + "content": "Combining case 1 and case 2, we conclude the proof." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.259, + 0.824, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.825, + 0.317 + ], + "angle": 0, + "content": "Below we introduce a lemma that relates the movement of one step gradient descent to the change of the potential function." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.321, + 0.685, + 0.337 + ], + "angle": 0, + "content": "Lemma K.8 (Lemma G.1 in Lyu et al. (2022)). If \\(\\hat{\\pmb{u}}_t\\in U\\) and \\(\\eta \\leq 1 / \\rho_{2}\\) then" + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.344, + 0.635, + 0.374 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1}) \\geq \\frac {\\sqrt {2 \\mu}}{4} \\eta \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.388, + 0.218, + 0.402 + ], + "angle": 0, + "content": "Proof." + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.409, + 0.659, + 0.516 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1}) = \\frac {\\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1})}{\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) + \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1})} \\\\ \\geq \\frac {\\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t})}{2 \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})} \\\\ \\geq \\frac {\\eta (1 - \\rho_ {2} \\eta / 2) \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2}}{2 \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.825, + 0.564 + ], + "angle": 0, + "content": "where the two inequalities use Lemma K.6. By \\(\\mu\\)-PL, \\(\\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_t) \\leq \\frac{1}{\\sqrt{2\\mu}} \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2\\). Therefore, we have \\(\\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_t) - \\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_{t+1}) \\geq \\frac{\\sqrt{2\\mu}}{2}(1 - \\eta\\rho/2)\\eta \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2 \\geq \\frac{\\sqrt{2\\mu}}{4}\\eta \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.577, + 0.825, + 0.606 + ], + "angle": 0, + "content": "Based on Lemma K.8, we have the following lemma that bounds the movement of GD over multiple steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.61, + 0.825, + 0.651 + ], + "angle": 0, + "content": "Lemma K.9 (Bounding the movement of GD). If \\(\\hat{\\pmb{u}}_0\\) is initialized such that \\(\\| \\hat{\\pmb{u}}_0 - \\pmb{\\theta}^*\\| _2\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho}}\\epsilon '\\), then for all \\(t\\geq 0\\), \\(\\hat{\\pmb{u}}_t\\in B^{\\epsilon '}(\\pmb {\\theta}^*)\\) and" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.658, + 0.59, + 0.692 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\right\\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.825, + 0.748 + ], + "angle": 0, + "content": "Proof. We prove the proposition by induction. When \\( t = 0 \\), it trivially holds. Assume that the proposition holds for \\( \\hat{\\pmb{u}}_{\\tau} \\), \\( 0 \\leq \\tau < t \\). For step \\( t \\), since \\( \\hat{\\pmb{u}}_{\\tau} \\in B^{\\epsilon'}(\\pmb{\\theta}^{*}) \\), we apply Lemma K.8 and obtain" + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.755, + 0.75, + 0.795 + ], + "angle": 0, + "content": "\\[\n\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\| _ {2} \\leq \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {\\tau}) \\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\left(\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})\\right) \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.802, + 0.394, + 0.817 + ], + "angle": 0, + "content": "Further by \\(\\rho\\)-smoothness of \\(\\mathcal{L}(\\cdot)\\)," + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.824, + 0.681, + 0.859 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\right\\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}) \\leq 2 \\sqrt {\\frac {\\rho}{\\mu}} \\left\\| \\hat {\\boldsymbol {u}} _ {0} - \\boldsymbol {\\theta} ^ {*} \\right\\| _ {2} \\leq \\frac {1}{2} \\epsilon^ {\\prime}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.865, + 0.743, + 0.882 + ], + "angle": 0, + "content": "Therefore, \\( \\| \\hat{\\pmb{u}}_t - \\pmb{\\theta}^* \\|_2 \\leq \\| \\hat{\\pmb{u}}_t - \\hat{\\pmb{u}}_0 \\|_2 + \\| \\hat{\\pmb{u}}_0 - \\pmb{\\theta}^* \\|_2 < \\epsilon' \\), which concludes the proof." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.867, + 0.824, + 0.88 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Finally, we introduce a lemma adapted from Thm. D.4 of which bounds the movement of GF. Lyu et al. (2022)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.102, + 0.825, + 0.14 + ], + "angle": 0, + "content": "Lemma K.10. Assume that \\(\\| \\pmb{\\theta}_0 - \\pmb{\\theta}^*\\|_2 < \\sqrt{\\frac{\\mu}{\\rho}}\\epsilon'\\). The gradient flow \\(\\pmb{\\theta}(t) = -\\frac{\\mathrm{d}\\mathcal{L}(\\pmb{\\theta}(t))}{\\mathrm{d}t}\\) starting at \\(\\pmb{\\theta}_0\\) converges to a point in \\(U\\) and" + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.144, + 0.709, + 0.181 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\boldsymbol {\\theta} _ {0} - \\lim _ {t \\rightarrow + \\infty} \\boldsymbol {\\theta} (t) \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} _ {0}) - \\mathcal {L} ^ {*}} \\leq \\sqrt {\\frac {\\rho}{\\mu}} \\| \\boldsymbol {\\theta} _ {0} - \\boldsymbol {\\theta} ^ {*} \\| _ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.193, + 0.523, + 0.209 + ], + "angle": 0, + "content": "Proof. Let \\( T \\coloneqq \\inf \\{t : \\theta \\notin U\\} \\). Then for all \\( t < T \\)," + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.213, + 0.705, + 0.279 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {\\mathrm {d}}{\\mathrm {d} t} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} = \\frac {1}{2} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {- 1 / 2} \\cdot \\left\\langle \\nabla \\mathcal {L} (\\boldsymbol {\\theta}), \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\right\\rangle \\\\ = - \\frac {1}{2} (\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}) ^ {- 1 / 2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\| _ {2} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\| _ {2}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.283, + 0.51, + 0.301 + ], + "angle": 0, + "content": "By \\(\\mu\\)-PL, \\(\\|\\nabla \\mathcal{L}(\\pmb{\\theta})\\|_2 \\geq \\sqrt{2\\mu(\\mathcal{L}(\\pmb{\\theta}) - \\mathcal{L}^*)}\\). Hence," + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.305, + 0.624, + 0.336 + ], + "angle": 0, + "content": "\\[\n\\frac {\\mathrm {d}}{\\mathrm {d} t} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} \\leq - \\frac {\\sqrt {2 \\mu}}{2} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.381, + 0.354 + ], + "angle": 0, + "content": "Integrating both sides, we have" + }, + { + "type": "equation", + "bbox": [ + 0.273, + 0.358, + 0.724, + 0.395 + ], + "angle": 0, + "content": "\\[\n\\int_ {0} ^ {T} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta} (\\tau)}{\\mathrm {d} \\tau} \\| \\mathrm {d} \\tau \\leq \\frac {2}{\\sqrt {2 \\mu}} \\left(\\mathcal {L} \\left(\\boldsymbol {\\theta} _ {0}\\right) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} \\leq \\sqrt {\\frac {\\rho}{\\mu}} \\| \\boldsymbol {\\theta} _ {0} - \\boldsymbol {\\theta} ^ {*} \\| _ {2} < \\epsilon^ {\\prime},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.398, + 0.827, + 0.429 + ], + "angle": 0, + "content": "where the second inequality uses \\(\\rho\\)-smoothness of \\(\\mathcal{L}\\). Therefore, \\(T = +\\infty\\) and \\(\\pmb{\\theta}(t)\\) converges to some point in \\(U\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.444, + 0.478, + 0.457 + ], + "angle": 0, + "content": "K.4 CONSTRUCTION OF WORKING ZONES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.469, + 0.825, + 0.522 + ], + "angle": 0, + "content": "We construct four nested working zones \\((\\Gamma^{\\epsilon_0},\\Gamma^{\\epsilon_1},\\Gamma^{\\epsilon_2},\\Gamma^{\\epsilon_3})\\) in the neighborhood of \\(\\Gamma\\). Later we will show that the local iterates \\(\\pmb{\\theta}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2}\\) and the global iterates \\(\\bar{\\pmb{\\theta}}^{(s)}\\in \\Gamma^{\\epsilon_0}\\) with high probability after \\(\\mathcal{O}(\\log \\frac{1}{\\eta})\\) rounds. The following lemma illustrates the properties the working zones should satisfy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.524, + 0.826, + 0.555 + ], + "angle": 0, + "content": "Lemma K.11 (Working zone lemma). There exists constants \\(\\epsilon_0 < \\epsilon_1 < \\epsilon_2 < \\epsilon_3\\) such that \\((\\Gamma^{\\epsilon_0}, \\Gamma^{\\epsilon_1}, \\Gamma^{\\epsilon_2}, \\Gamma^{\\epsilon_3})\\) satisfy the following properties:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.563, + 0.462, + 0.578 + ], + "angle": 0, + "content": "1. \\(\\mathcal{L}\\) satisfies \\(\\mu\\)-PL in \\(\\Gamma^{\\epsilon_3}\\) for some \\(\\mu > 0\\)." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.586, + 0.825, + 0.615 + ], + "angle": 0, + "content": "2. Any gradient flow starting in \\(\\Gamma^{\\epsilon_2}\\) converges to some point in \\(\\Gamma\\). Then, by Falconer (1983), \\(\\Phi(\\cdot)\\) is \\(\\mathcal{C}^\\infty\\) in \\(\\Gamma^{\\epsilon_2}\\)." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.624, + 0.664, + 0.64 + ], + "angle": 0, + "content": "3. Any \\(\\pmb{\\theta} \\in \\Gamma^{\\epsilon_1}\\) has an \\(\\epsilon_1\\)-neighborhood \\(B^{\\epsilon_1}(\\pmb{\\theta})\\) such that \\(B^{\\epsilon_1}(\\pmb{\\theta}) \\subseteq \\Gamma^{\\epsilon_2}\\)." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.648, + 0.78, + 0.664 + ], + "angle": 0, + "content": "4. Any gradient descent starting in \\(\\Gamma^{\\epsilon_0}\\) with sufficiently small learning rate will stay in \\(\\Gamma^{\\epsilon_1}\\)." + }, + { + "type": "list", + "bbox": [ + 0.179, + 0.563, + 0.825, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.677, + 0.825, + 0.725 + ], + "angle": 0, + "content": "Proof. Let \\(\\bar{\\theta}^{(0)}\\) be initialized such that \\(\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma\\). Let \\(\\mathcal{Z}\\) be the set of all points on the gradient flow trajectory starting from \\(\\bar{\\theta}^{(0)}\\) and \\(\\mathcal{Z}^{\\epsilon}\\) be the \\(\\epsilon\\)-neighborhood of \\(\\mathcal{Z}\\), where \\(\\epsilon\\) is a positive constant. Since the gradient flow converges to \\(\\phi^{(0)}\\), \\(\\mathcal{Z}\\) and \\(\\mathcal{Z}^{\\epsilon}\\) are bounded." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.827, + 0.787 + ], + "angle": 0, + "content": "We construct four nested working zones. By Lemma H.3 in Lyu et al. (2022), there exists an \\(\\epsilon_3\\)-neighborhood of \\(\\Gamma\\), \\(\\Gamma^{\\epsilon_3}\\), such that \\(\\mathcal{L}\\) satisfies \\(\\mu\\)-PL for some \\(\\mu > 0\\). Let \\(\\mathcal{M}\\) be the convex hull of \\(\\Gamma^{\\epsilon_3} \\cup \\mathcal{Z}^\\epsilon\\) and \\(\\mathcal{M}^{\\epsilon_4}\\) be the \\(\\epsilon_4\\)-neighborhood of \\(\\mathcal{M}\\) where \\(\\epsilon_4\\) is a positive constant. Then \\(\\mathcal{M}^{\\epsilon_4}\\) is bounded." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.792, + 0.827, + 0.929 + ], + "angle": 0, + "content": "Define \\(\\rho_{2} = \\sup_{\\pmb{\\theta}\\in \\mathcal{M}^{\\epsilon_{4}}}\\| \\nabla^{2}\\mathcal{L}(\\pmb {\\theta})\\|_{2}\\) and \\(\\rho_{3} = \\sup_{\\mathcal{M}^{\\epsilon_{4}}}\\| \\nabla^{3}\\mathcal{L}(\\pmb {\\theta})\\|_{2}\\). By Lemma K.10, we can construct an \\(\\epsilon_{2}\\)-neighborhood of \\(\\Gamma\\) where \\(\\epsilon_{2} < \\sqrt{\\frac{\\mu}{\\rho_{2}}}\\epsilon_{3}\\) such that all GF starting in \\(\\Gamma^{\\epsilon_2}\\) converges to \\(\\Gamma\\). By Falconer (1983), \\(\\Phi (\\cdot)\\) is \\(\\mathcal{C}^2\\) in \\(\\Gamma^{\\epsilon_3}\\). Define \\(\\nu_{1} = \\sup_{\\pmb {\\theta}\\in \\Gamma^{\\epsilon_{3}}}\\| \\partial \\Phi (\\pmb {\\theta})\\|_{2}\\) and \\(\\nu_{2} = \\sup_{\\pmb {\\theta}\\in \\Gamma^{\\epsilon_{3}}}\\| \\partial^{2}\\Phi (\\pmb {\\theta})\\|_{2}\\). We also construct an \\(\\epsilon_{1}\\) neighborhood of \\(\\Gamma\\), \\(\\Gamma^{\\epsilon_1}\\), where \\(\\epsilon_{1}\\leq \\frac{1}{2}\\epsilon_{2} < \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_{2}}}\\epsilon_{3}\\) such that all \\(\\pmb {\\theta}\\in \\Gamma^{\\epsilon_1}\\) has an \\(\\epsilon_{1}\\) neighborhood where \\(\\Phi\\) is well defined. Finally, by Lemma K.9, there exists an \\(\\epsilon_0\\)-neighborhood of \\(\\Gamma\\) where \\(\\epsilon_0\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_1\\) such that all gradient descent iterates starting in \\(\\Gamma^{\\epsilon_0}\\) with \\(\\eta \\leq \\frac{1}{\\rho_2}\\) will stay in \\(\\Gamma^{\\epsilon_1}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.155 + ], + "angle": 0, + "content": "Note that the notions of \\(\\mathcal{Z}^{\\epsilon}\\), \\(\\mathcal{M}^{\\epsilon_4}\\), \\(\\rho_2\\), \\(\\rho_3\\), \\(\\nu_{1}\\), and \\(\\nu_{2}\\) defined in the proof will be useful in the remaining part of this section. When analyzing the limiting dynamics of Local SGD, we will show that all \\(\\pmb{\\theta}_{k,t}^{(s)}\\) stays in \\(\\Gamma^{\\epsilon_2}\\), \\(\\tilde{\\pmb{u}}_t^{(s)} \\in \\Gamma^{\\epsilon_1}\\), \\(\\tilde{\\pmb{\\theta}}^{(s)} \\in \\Gamma^{\\epsilon_0}\\) with high probability after \\(\\mathcal{O}(\\log \\frac{1}{\\eta})\\) rounds." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.171, + 0.573, + 0.187 + ], + "angle": 0, + "content": "K.5 PHASE 1:ITERATE APPROACHING THE MANIFOLD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.197, + 0.827, + 0.29 + ], + "angle": 0, + "content": "The approaching phase can be further divided into two subphases. In the first subphase, \\(\\bar{\\theta}^{(0)}\\) is initialized such that \\(\\phi^{(0)}\\in \\Gamma\\). We will show that after a constant number of rounds \\(s_0\\), \\(\\bar{\\theta}^{(s_0)}\\) goes to the inner part of \\(\\Gamma^{\\epsilon_0}\\) such that \\(\\| \\bar{\\theta}^{(s_0)} - \\phi^{(0)}\\| _2\\leq c\\epsilon_0\\) with high probability, where \\(0 < c < 1\\) and the constants will be specified later (see Appendix K.5.2). In the second subphase, we show that the iterate can reach within \\(\\tilde{\\mathcal{O}} (\\sqrt{\\eta})\\) distance from \\(\\Gamma\\) after \\(\\mathcal{O}(\\log \\frac{1}{\\eta})\\) rounds with high probability (see Appendix K.5.3)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.306, + 0.408, + 0.32 + ], + "angle": 0, + "content": "K.5.1 ADDITIONAL NOTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.328, + 0.825, + 0.388 + ], + "angle": 0, + "content": "Consider an auxiliary sequence \\(\\{\\tilde{u}_t^{(s)}\\}\\) where \\(\\tilde{\\pmb{u}}_0^{(s)} = \\bar{\\pmb{\\theta}}^{(s)}\\) and \\(\\tilde{\\pmb{u}}_{t + 1}^{(s)} = \\tilde{\\pmb{u}}_t^{(s)} - \\eta \\nabla \\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})\\), \\(0\\leq t\\leq H - 1\\). Define \\(\\tilde{\\Delta}_{k,t}^{(s)}\\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\tilde{\\pmb{u}}_t^{(s)}\\) to be the difference between the local iterate and the gradient descent iterate. Notice that \\(\\tilde{\\Delta}_{k,0}^{(s)} = 0\\), for all \\(k\\) and \\(s\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.394, + 0.825, + 0.466 + ], + "angle": 0, + "content": "Consider a gradient flow \\(\\{\\pmb{u}(t)\\}_{t\\geq 0}\\) with the initial condition \\(\\pmb{u}(0) = \\bar{\\pmb{\\theta}}^{(0)}\\) and converges to \\(\\phi^{(0)}\\in \\Gamma\\). For simplicity, let \\(\\pmb{u}_t^{(s)}\\coloneqq \\pmb {u}(s\\alpha +t\\eta)\\) be the gradient flow after \\(s\\) rounds plus \\(t\\) steps. Let \\(s_0\\) be the smallest number such that \\(\\| \\pmb{u}_0^{(s_0)} - \\pmb{\\phi}^{(0)}\\| _2\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_0\\). Note that \\(s_0\\) is a constant independent of \\(\\eta\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.472, + 0.825, + 0.502 + ], + "angle": 0, + "content": "In this subsection, the minimum value of the loss in Appendix K.3 corresponds to the loss value on \\(\\Gamma\\), i.e., \\(\\mathcal{L}^{*} = \\mathcal{L}(\\phi), \\forall \\phi \\in \\Gamma\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.508, + 0.747, + 0.528 + ], + "angle": 0, + "content": "We also define the following sequence \\(\\{\\tilde{\\mathbf{Z}}_{k,t}^{(s)}\\}_{t = 0}^{H}\\) that will be used in the proof. Define" + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.538, + 0.706, + 0.581 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, 0} ^ {(s)} = \\boldsymbol {0}.\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.597, + 0.404, + 0.611 + ], + "angle": 0, + "content": "K.5.2 PROOF FOR SUBPHASE 1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.62, + 0.62, + 0.641 + ], + "angle": 0, + "content": "First, we have the following lemma about the concentration of \\(\\tilde{\\mathbf{Z}}_{k,t}^{(s)}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.827, + 0.68 + ], + "angle": 0, + "content": "Lemma K.12 (Concentration property of \\(\\{\\tilde{\\mathbf{Z}}_{k,t}^{(s)}\\}_{t = 0}^{H}\\)). Given \\(\\bar{\\theta}^{(s)}\\) such that \\(\\tilde{\\boldsymbol{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon\\) for all \\(0\\leq t\\leq H\\), then with probability at least \\(1 - \\delta\\)," + }, + { + "type": "equation", + "bbox": [ + 0.285, + 0.689, + 0.712, + 0.723 + ], + "angle": 0, + "content": "\\[\n\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.732, + 0.334, + 0.75 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_1\\coloneqq \\exp (\\alpha \\rho_2)\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.771, + 0.554, + 0.793 + ], + "angle": 0, + "content": "Proof. For each \\(\\tilde{\\mathbf{Z}}_{k,t}^{(s)}\\), construct a sequence \\(\\{\\tilde{\\mathbf{Z}}_{k,t,t'}^{(s)}\\}_{t'=0}^t\\):" + }, + { + "type": "equation", + "bbox": [ + 0.28, + 0.803, + 0.717, + 0.847 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {Z}} _ {k, t, t ^ {\\prime}} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t ^ {\\prime} - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, t, 0} ^ {(s)} = \\boldsymbol {0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.857, + 0.791, + 0.876 + ], + "angle": 0, + "content": "Since \\(\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon\\), we have \\(\\| \\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})\\| _2\\leq \\rho_2\\) for all \\(0\\leq t\\leq H\\). Then, for all \\(\\tau\\) and \\(t\\)," + }, + { + "type": "equation", + "bbox": [ + 0.287, + 0.884, + 0.71, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {H} \\leq \\exp (\\alpha \\rho_ {2}) = \\tilde {C} _ {1}.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.825, + 0.137 + ], + "angle": 0, + "content": "Notice that for all \\(0 \\leq t \\leq H\\), \\(\\{\\tilde{Z}_{k,t,t'}^{(s)}\\}_{t'=0}^t\\) is a martingale with \\(\\| \\tilde{Z}_{k,t,t'}^{(s)} - \\tilde{Z}_{k,t,t'-1}^{(s)} \\|_2 \\leq \\tilde{C}_1 \\sigma_{\\max}\\). By Azuma-Hoeffding's inequality," + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.142, + 0.753, + 0.201 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} (\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 t (\\tilde {C} _ {1} \\sigma_ {\\max}) ^ {2}}\\right) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 H (\\tilde {C} _ {1} \\sigma_ {\\max}) ^ {2}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.205, + 0.825, + 0.235 + ], + "angle": 0, + "content": "Taking a union bound on all \\( k \\in [K] \\) and \\( 0 \\leq t \\leq H \\), we can conclude that with probability at least \\( 1 - \\delta \\)," + }, + { + "type": "equation", + "bbox": [ + 0.285, + 0.241, + 0.714, + 0.275 + ], + "angle": 0, + "content": "\\[\n\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ].\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.281, + 0.824, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.311, + 0.825, + 0.34 + ], + "angle": 0, + "content": "The following lemma states that the gradient descent iterates will closely track the gradient flow with the same initial point." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.343, + 0.826, + 0.396 + ], + "angle": 0, + "content": "Lemma K.13. Denote \\(G \\coloneqq \\sup_{t \\geq 0} \\| \\nabla \\mathcal{L}(\\boldsymbol{u}(t)) \\|_2\\) as the upper bound of the gradient on the gradient flow trajectory. If \\(\\| \\tilde{\\boldsymbol{u}}_t^{(s)} - \\boldsymbol{u}_t^{(s)} \\|_2 = \\mathcal{O}(\\sqrt{\\eta})\\), then for all \\(0 \\leq t \\leq H\\), the closeness of \\(\\tilde{\\boldsymbol{u}}_t^{(s)}\\) and \\(\\boldsymbol{u}_t^{(s)}\\) is bounded by" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.403, + 0.655, + 0.425 + ], + "angle": 0, + "content": "\\[\n\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.432, + 0.329, + 0.45 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_1 = \\exp (\\alpha \\rho_2)\\)" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.463, + 0.405, + 0.479 + ], + "angle": 0, + "content": "Proof. We prove by induction that" + }, + { + "type": "equation", + "bbox": [ + 0.265, + 0.485, + 0.826, + 0.527 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {t} \\left\\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\right\\| _ {2} + \\rho_ {2} \\eta^ {2} G \\sum_ {\\tau = 0} ^ {t - 1} (1 + \\rho_ {2} \\eta) ^ {\\tau}. \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.533, + 0.678, + 0.549 + ], + "angle": 0, + "content": "When \\( t = 0 \\), (34) holds trivially. Assume that (34) holds for \\( 0 \\leq \\tau \\leq t \\), then" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.555, + 0.741, + 0.663 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} - \\boldsymbol {u} _ {t + 1} ^ {(s)} = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\left(\\boldsymbol {u} _ {t} - \\int_ {s \\alpha + t \\eta} ^ {s \\alpha + (t + 1) \\eta} \\nabla \\mathcal {L} (\\boldsymbol {u} (v)) d v\\right) \\\\ = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} - \\eta (\\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)})) \\\\ - \\int_ {s \\alpha + t \\eta} ^ {s \\alpha + (t + 1) \\eta} \\left(\\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} (v))\\right) d v. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.669, + 0.314, + 0.684 + ], + "angle": 0, + "content": "By smoothness of \\(\\mathcal{L}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.69, + 0.693, + 0.766 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} (v)) \\| _ {2} \\leq \\rho_ {2} \\| \\boldsymbol {u} _ {t} ^ {(s)} - \\boldsymbol {u} (v) \\| _ {2} \\\\ \\leq \\rho_ {2} \\int_ {s \\alpha + t \\eta} ^ {v} \\| \\nabla \\mathcal {L} (\\boldsymbol {u} (w)) \\| _ {2} d w \\\\ \\leq \\rho_ {2} \\eta G. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.774, + 0.825, + 0.813 + ], + "angle": 0, + "content": "Since \\(\\rho_2^2\\eta^2 G\\sum_{\\tau = 0}^{t - 1}(1 + \\rho_2\\eta)^\\tau \\leq \\eta G(1 + \\rho_2\\eta)^t\\leq \\exp (\\alpha \\rho_2)\\eta G\\) , then \\(\\| \\tilde{\\pmb{u}}_t^{(s)} - \\pmb {u}_t^{(s)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta})\\) which implies that \\(\\tilde{\\pmb{u}}_t^{(s)}\\in \\mathcal{M}^{\\epsilon_4}\\) . Hence, \\(\\| \\nabla \\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)}) - \\mathcal{L}(\\pmb {u}_t^{(s)})\\| _2\\leq \\rho_2\\| \\tilde{\\pmb{u}}_t^{(s)} - \\pmb {u}_t^{(s)}\\| _2.\\)" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.817, + 0.324, + 0.833 + ], + "angle": 0, + "content": "By triangle inequality," + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.838, + 0.745, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} - \\boldsymbol {u} _ {t + 1} ^ {(s)} \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) \\left\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\right\\| _ {2} + \\rho_ {2} \\eta^ {2} G \\\\ \\leq \\left(1 + \\rho_ {2} \\eta\\right) ^ {t + 1} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} + \\rho_ {2} \\eta^ {2} G \\sum_ {\\tau = 0} ^ {t} (1 + \\rho_ {2} \\eta) ^ {\\tau}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.747, + 0.926 + ], + "angle": 0, + "content": "which concludes the induction step. Applying \\(1 + \\rho_{2}\\eta \\leq \\exp (\\rho_{2}\\eta)\\), we have the lemma." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.91, + 0.824, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.137 + ], + "angle": 0, + "content": "Utilizing the concentration probability of \\(\\{\\tilde{Z}_{k,t}^{(s)}\\}\\), we can obtain the following lemma which implies that the Local SGD iterates will closely track the gradient descent iterates with high probability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.139, + 0.825, + 0.173 + ], + "angle": 0, + "content": "Lemma K.14. Given \\(\\bar{\\theta}^{(s)}\\) such that \\(\\tilde{u}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon\\) for all \\(0\\leq t\\leq H\\), then for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\) with probability at least \\(1 - \\delta\\), there exists a constant \\(\\tilde{C}_3\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.305, + 0.177, + 0.692, + 0.21 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.213, + 0.204, + 0.227 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.23, + 0.619, + 0.264 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.279, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Proof. Since \\(\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon\\) for all \\(0\\leq t\\leq H\\), we have \\(\\| \\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})\\| _2\\leq \\rho_2\\). According to the update rule for \\(\\theta_{k,t}^{(s)}\\) and \\(\\tilde{\\pmb{u}}_t^{(s)}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.32, + 0.825, + 0.342 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)}, \\tag {35}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.344, + 0.825, + 0.365 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right). \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.39, + 0.383 + ], + "angle": 0, + "content": "Subtracting (36) from (35) gives" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.386, + 0.825, + 0.432 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\tilde {\\boldsymbol {\\Delta}} _ {k, t + 1} ^ {(s)} = \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta (\\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)})) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ = \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\eta \\tilde {\\boldsymbol {v}} _ {k, t} ^ {(s)}. \\tag {37} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.436, + 0.825, + 0.477 + ], + "angle": 0, + "content": "Here, \\(\\tilde{\\pmb{v}}_{k,t}^{(s)} = (1 - \\beta_{k,t}^{(s)})\\pmb{\\theta}_{k,t}^{(s)} + \\beta_{k,t}^{(s)}\\tilde{\\pmb{u}}_{k,t}^{(s)}\\), where \\(\\beta_{k,t}^{(s)} \\in (0,1)\\) depends on \\(\\pmb{\\theta}_{k,t}^{(s)}\\) and \\(\\tilde{\\pmb{u}}_t^{(s)}\\). Therefore, \\(\\| \\tilde{\\pmb{v}}_{k,t}^{(s)}\\| _2 \\leq \\frac{\\rho_3}{2}\\| \\tilde{\\pmb{\\Delta}}_{k,t}^{(s)}\\| _2^2\\) if \\(\\pmb{\\theta}_{k,t}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}\\). Applying (37) \\(t\\) times, we have" + }, + { + "type": "equation", + "bbox": [ + 0.242, + 0.481, + 0.753, + 0.569 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = \\left[ \\prod_ {\\tau = 0} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {\\tau} ^ {(s)})) \\right] \\tilde {\\boldsymbol {\\Delta}} _ {k, 0} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)})) \\boldsymbol {z} _ {k, \\tau} ^ {(s)} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\tilde {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.576, + 0.825, + 0.615 + ], + "angle": 0, + "content": "By Cauchy-Schwartz inequality, triangle inequality and the definition of \\(\\tilde{\\pmb{Z}}_{k,t}^{(s)}\\), if for all \\(0 \\leq \\tau \\leq t - 1\\) and \\(k \\in [K]\\), \\(\\pmb{\\theta}_{k,\\tau}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}\\), then we have" + }, + { + "type": "equation", + "bbox": [ + 0.341, + 0.62, + 0.825, + 0.661 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\eta \\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} + \\frac {1}{2} \\eta \\rho_ {3} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {C} _ {1} \\left\\| \\tilde {\\boldsymbol {\\Delta}} _ {k, \\tau} ^ {(s)} \\right\\| _ {2} ^ {2}. \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.664, + 0.825, + 0.692 + ], + "angle": 0, + "content": "Applying Lemma K.12 and substituting in the value of \\(H\\), we have that with probability at least \\(1 - \\delta\\)," + }, + { + "type": "equation", + "bbox": [ + 0.292, + 0.697, + 0.825, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in K, 0 \\leq t \\leq H. \\tag {39}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.741, + 0.825, + 0.781 + ], + "angle": 0, + "content": "Now we show by induction that for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), when (39) holds, there exists a constant \\(\\tilde{C}_2 > 2\\sigma_{\\max}\\sqrt{2\\alpha}\\tilde{C}_1\\) such that \\(\\| \\tilde{\\Delta}_{k,t}^{(s)}\\|_2 \\leq \\tilde{C}_2\\sqrt{\\eta\\log\\frac{2\\alpha K}{\\eta\\delta}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.789, + 0.825, + 0.833 + ], + "angle": 0, + "content": "When \\(t = 0\\), \\(\\tilde{\\Delta}_{k,0}^{(s)} = 0\\). Assume that \\(\\| \\tilde{\\Delta}_{k,\\tau}^{(s)} \\|_2 \\leq \\tilde{C}_2 \\sqrt{\\eta \\log \\frac{2\\alpha K}{\\eta \\delta}}\\), for all \\(k \\in [K]\\), \\(0 \\leq \\tau \\leq t - 1\\). Then for all \\(0 \\leq \\tau \\leq t - 1\\), \\(\\pmb{\\theta}_{k,\\tau}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}\\). Therefore, we can apply (38) and obtain" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.838, + 0.724, + 0.924 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\tilde {\\Delta} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\eta \\| \\tilde {Z} _ {k, t} ^ {(s)} \\| _ {2} + \\frac {1}{2} \\eta \\rho_ {3} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {C} _ {1} \\| \\tilde {\\Delta} _ {k, \\tau} ^ {(s)} \\| _ {2} ^ {2} \\\\ \\leq \\tilde {C} _ {1} \\sigma_ {\\max} \\sqrt {2 \\alpha \\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}} + \\frac {1}{2} \\tilde {C} _ {1} \\tilde {C} _ {2} ^ {2} \\sigma_ {\\max} ^ {2} \\alpha \\rho_ {3} \\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}. \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.825, + 0.146 + ], + "angle": 0, + "content": "Given that \\(\\tilde{C}_2 \\geq 2\\sigma_{\\max}\\sqrt{2\\alpha}\\tilde{C}_1\\) and \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), when \\(\\eta\\) is sufficiently small, \\(\\|\\tilde{\\Delta}_{k,t}^{(s)}\\|_2 \\leq \\tilde{C}_2\\sqrt{\\eta\\log\\frac{2\\alpha K}{\\eta\\delta}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.154, + 0.826, + 0.193 + ], + "angle": 0, + "content": "To sum up, for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\), \\(\\| \\tilde{\\Delta}_{k,t}^{(s)} \\|_2 \\leq \\tilde{C}_2 \\sqrt{\\eta \\log \\frac{2\\alpha K}{\\eta \\delta}}\\) for all \\(k \\in [K]\\), \\(0 \\leq t \\leq H\\). By triangle inequality," + }, + { + "type": "equation", + "bbox": [ + 0.299, + 0.198, + 0.7, + 0.243 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\mathbf {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\frac {1}{K} \\sum_ {k \\in [ K ]} \\| \\tilde {\\mathbf {A}} _ {k, H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {2} \\sqrt {\\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}}.\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.246, + 0.825, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.275, + 0.825, + 0.304 + ], + "angle": 0, + "content": "The combination of Lemma K.13 and Lemma K.14 leads to the following lemma, which states that the Local SGD iterate will enter \\(\\Gamma^{\\epsilon_1}\\) after \\(s_0\\) rounds with high probability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.826, + 0.34 + ], + "angle": 0, + "content": "Lemma K.15. Given \\(\\bar{\\theta}^{(0)}\\) such that \\(\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma\\), then for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), there exists a positive constant \\(\\tilde{C}_4\\) such that with probability at least \\(1 - \\delta\\)," + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.345, + 0.655, + 0.379 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\boldsymbol {\\phi} ^ {(0)} \\| _ {2} \\leq \\frac {1}{4} \\sqrt {\\frac {\\mu}{\\rho_ {2}}} \\epsilon_ {0} + \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.393, + 0.618, + 0.409 + ], + "angle": 0, + "content": "Proof. First, we prove by induction that for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), when" + }, + { + "type": "equation", + "bbox": [ + 0.237, + 0.414, + 0.826, + 0.448 + ], + "angle": 0, + "content": "\\[\n\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K s _ {0}}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < s _ {0}, \\tag {40}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.452, + 0.469, + 0.471 + ], + "angle": 0, + "content": "the closeness of \\(\\bar{\\pmb{\\theta}}^{(s)}\\) and \\(\\pmb{u}_0^{(s)}\\) is bounded by" + }, + { + "type": "equation", + "bbox": [ + 0.269, + 0.476, + 0.826, + 0.515 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\right\\| _ {2} \\leq \\sum_ {l = 1} ^ {s} \\tilde {C} _ {1} ^ {l} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right), \\quad \\forall 0 \\leq s \\leq s _ {0}. \\tag {41}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.52, + 0.825, + 0.551 + ], + "angle": 0, + "content": "When \\(s = 0\\), \\(\\bar{\\theta}^{(0)} = \\pmb{u}_0^{(0)}\\). Assume that (41) holds for round \\(s\\). Then by Lemma K.13, for all \\(0 \\leq t \\leq H\\)," + }, + { + "type": "equation", + "bbox": [ + 0.293, + 0.554, + 0.705, + 0.638 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G \\\\ = \\tilde {C} _ {1} \\| \\bar {\\boldsymbol {\\theta}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G \\\\ \\leq \\sum_ {l = 1} ^ {s} \\tilde {C} _ {1} ^ {l + 1} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right) + \\tilde {C} _ {1} \\eta G. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.825, + 0.675 + ], + "angle": 0, + "content": "Therefore, for sufficiently small \\(\\eta\\), \\(\\tilde{\\pmb{u}}_t^{(s)} \\in \\mathcal{Z}^\\epsilon\\), \\(\\forall 0 \\leq t \\leq H\\). Combining the above inequality with Lemma K.14, we have" + }, + { + "type": "equation", + "bbox": [ + 0.301, + 0.679, + 0.695, + 0.765 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\boldsymbol {u} _ {0} ^ {(s + 1)} \\right\\| _ {2} = \\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\boldsymbol {u} _ {H} ^ {(s)} \\right\\| _ {2} \\\\ \\leq \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} + \\| \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} - \\boldsymbol {u} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\sum_ {l = 1} ^ {s + 1} \\tilde {C} _ {1} ^ {l + 1} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.77, + 0.381, + 0.783 + ], + "angle": 0, + "content": "which concludes the induction." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.79, + 0.649, + 0.805 + ], + "angle": 0, + "content": "Therefore, when (40) holds, there exists a positive constant \\(\\tilde{C}_4\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.81, + 0.617, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\boldsymbol {u} _ {0} ^ {(s _ {0})} \\right\\| _ {2} \\leq \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.848, + 0.324, + 0.868 + ], + "angle": 0, + "content": "By definition of \\(\\pmb{u}_0^{(s_0)}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.872, + 0.654, + 0.907 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\frac {1}{4} \\sqrt {\\frac {\\mu}{\\rho_ {2}}} \\epsilon_ {0} + \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.909, + 0.679, + 0.925 + ], + "angle": 0, + "content": "Finally, according to Lemma K.12, (40) holds with probability at least \\( 1 - \\delta \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.406, + 0.119 + ], + "angle": 0, + "content": "K.5.3 PROOF FOR SUBPHASE 2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.127, + 0.825, + 0.175 + ], + "angle": 0, + "content": "In subphase 2, we show that the iterate can reach within \\(\\tilde{\\mathcal{O}} (\\sqrt{\\eta})\\) distance from \\(\\Gamma\\) after \\(\\mathcal{O}(\\log \\frac{1}{\\eta})\\) rounds with high probability. The following lemma manifests how the potential function \\(\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(s)})\\) evolves after one round." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.178, + 0.741, + 0.195 + ], + "angle": 0, + "content": "Lemma K.16. Given \\(\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}\\), for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\)" + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.203, + 0.75, + 0.236 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\leq \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.242, + 0.204, + 0.255 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.261, + 0.73, + 0.295 + ], + "angle": 0, + "content": "\\[\n\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.302, + 0.384, + 0.318 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_5\\) is a positive constant." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.825, + 0.369 + ], + "angle": 0, + "content": "Proof. Since \\(\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}\\), then for all \\(0\\leq t\\leq H\\), \\(\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1}\\) by the definition of the working zone. By Lemma K.6, for \\(\\eta \\leq \\frac{1}{\\rho_2}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.376, + 0.756, + 0.401 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {t} \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right) \\leq \\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}, \\quad \\forall 0 \\leq t \\leq H.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.407, + 0.316, + 0.422 + ], + "angle": 0, + "content": "Specially, for \\(t = H\\)" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.429, + 0.748, + 0.453 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {\\frac {\\alpha}{\\eta}} \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right) \\leq \\exp (- \\alpha \\mu) \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.46, + 0.246, + 0.474 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.481, + 0.61, + 0.501 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.507, + 0.617, + 0.523 + ], + "angle": 0, + "content": "According to the proof of Lemma K.14, for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), when" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.53, + 0.825, + 0.571 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, \\tag {42}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.408, + 0.594 + ], + "angle": 0, + "content": "there exists a constant \\(\\tilde{C}_3\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.601, + 0.69, + 0.635 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.64, + 0.202, + 0.653 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.659, + 0.619, + 0.693 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.699, + 0.728, + 0.721 + ], + "angle": 0, + "content": "Since \\(\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1},\\forall 0\\leq t\\leq H,\\bar{\\pmb{\\theta}}^{(s + 1)}\\in \\Gamma^{\\epsilon_2}\\) and \\(\\bar{\\pmb{\\theta}}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2},\\forall 0\\leq t\\leq H,k\\in [K]\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.825, + 0.76 + ], + "angle": 0, + "content": "By Lemma K.7, \\(\\tilde{\\Psi}(\\cdot)\\) is \\(\\sqrt{2\\rho_2}\\)-Lipschitz in \\(\\mathcal{M}^{\\epsilon_4}\\). Therefore, when (42) holds, there exists a constant \\(\\tilde{C}_5 := \\sqrt{2\\rho_2}\\tilde{C}_3\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.351, + 0.766, + 0.644, + 0.824 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) + \\sqrt {2 \\rho_ {2}} \\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\\\ \\leq \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.83, + 0.202, + 0.842 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.323, + 0.848, + 0.674, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) \\leq \\tilde {\\Psi} (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) + \\sqrt {2 \\rho_ {2}} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.91, + 0.614, + 0.925 + ], + "angle": 0, + "content": "Finally, by Lemma K.12, (42) holds with probability at least \\( 1 - \\delta \\)." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.91, + 0.825, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.138 + ], + "angle": 0, + "content": "We are thus led to the following lemma which characterizes the evolution of the potential \\(\\tilde{\\Psi} (\\bar{\\theta}^{(s)})\\) and \\(\\tilde{\\Psi} (\\pmb{\\theta}_{k,t}^{(s)})\\) over multiple rounds." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.144, + 0.826, + 0.181 + ], + "angle": 0, + "content": "Lemma K.17. Given \\(\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2\\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_0,\\) for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\) and any integer \\(1\\le R\\le\\) \\(R_{\\mathrm{tot}}\\) , with probability at least \\(1 - \\delta\\)" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.185, + 0.826, + 0.24 + ], + "angle": 0, + "content": "\\[\n\\bar {\\boldsymbol {\\theta}} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {0}}, \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq \\exp (- \\alpha \\mu s / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(0)}\\right) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\forall 0 \\leq s \\leq R. \\tag {43}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.245, + 0.265, + 0.259 + ], + "angle": 0, + "content": "Furthermore," + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.263, + 0.826, + 0.305 + ], + "angle": 0, + "content": "\\[\n\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, 0 \\leq s < R, k \\in [ K ]. \\tag {44}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.318, + 0.561, + 0.334 + ], + "angle": 0, + "content": "Proof. We prove induction that for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), when" + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.34, + 0.826, + 0.38 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 R \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < R, \\tag {45}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.384, + 0.457, + 0.399 + ], + "angle": 0, + "content": "then for all \\(0 \\leq s \\leq R\\), (43) and (44) hold." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.404, + 0.825, + 0.435 + ], + "angle": 0, + "content": "When \\(s = 0\\), \\(\\bar{\\theta}^{(0)} \\in \\Gamma^{\\epsilon_0}\\) and (43) trivially holds. By Lemma K.16, (44) holds. Assume that (43) and (44) hold for round \\(s - 1\\). Then for round \\(s\\), by Lemma K.16, \\(\\bar{\\theta}^{(s)} \\in \\Gamma^{\\epsilon_2}\\) and" + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.44, + 0.729, + 0.524 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\Psi (\\bar {\\pmb {\\theta}} ^ {(s)}) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(s - 1)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}} \\\\ \\leq \\exp (- \\alpha \\mu s / 2) \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(0)}) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.528, + 0.726, + 0.543 + ], + "angle": 0, + "content": "where the second inequality comes from the induction hypothesis. By Lemma K.10," + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.547, + 0.743, + 0.665 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {\\phi} ^ {(s)} \\| _ {2} \\leq \\frac {2}{\\sqrt {2 \\mu}} \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\\\ \\leq \\frac {2}{\\sqrt {2 \\mu}} \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(0)}) + \\frac {2}{\\sqrt {2 \\mu} (1 - \\exp (- \\alpha \\mu / 2))} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}} \\\\ \\leq \\frac {1}{2} \\epsilon_ {0} + \\frac {2}{\\sqrt {2 \\mu} (1 - \\exp (- \\alpha \\mu / 2))} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.825, + 0.708 + ], + "angle": 0, + "content": "Here, the last inequality uses \\(\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(0)})\\leq \\sqrt{\\frac{\\rho_2}{2}}\\| \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(0)}\\| _2\\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{2}}\\epsilon_0\\). Hence, when \\(\\eta\\) is sufficiently small, \\(\\bar{\\pmb{\\theta}}^{(s)}\\in \\Gamma^{\\epsilon_0}\\). Still by Lemma K.16, \\(\\bar{\\pmb{\\theta}}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2}\\) and" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.713, + 0.622, + 0.754 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.757, + 0.678, + 0.773 + ], + "angle": 0, + "content": "Finally, according to Lemma K.12, (45) holds with probability at least \\( 1 - \\delta \\)." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.78, + 0.825, + 0.792 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.809, + 0.716, + 0.824 + ], + "angle": 0, + "content": "The following corollary is a direct consequence of Lemma K.17 and Lemma K.10." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.825, + 0.866 + ], + "angle": 0, + "content": "Corollary K.1. Let \\( s_1 \\coloneqq \\lceil \\frac{20}{\\alpha \\mu} \\log \\frac{1}{\\eta} \\rceil \\). Given \\( \\| \\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)} \\|_2 \\leq \\frac{1}{2} \\sqrt{\\frac{\\mu}{\\rho_2}} \\epsilon_0 \\), for \\( \\delta = \\mathcal{O}(\\mathrm{poly}(\\eta)) \\), with probability at least \\( 1 - \\delta \\)" + }, + { + "type": "equation", + "bbox": [ + 0.283, + 0.87, + 0.826, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s _ {1})}\\right) \\leq \\tilde {C} _ {6} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {1})} - \\phi^ {(s _ {1})} \\| _ {2} \\leq \\tilde {C} _ {6} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\tag {46}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.908, + 0.33, + 0.925 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_6\\) is a constant." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.827, + 0.143 + ], + "angle": 0, + "content": "Proof. Substituting in \\( R = s_1 \\) to Lemma K.17 and applying \\( \\| \\bar{\\pmb{\\theta}}^{(s_1)} - \\phi^{(s)}\\|_2 \\leq \\sqrt{\\frac{2}{\\mu}}\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(s_1)}) \\) for \\( \\bar{\\pmb{\\theta}}^{(s_1)} \\in \\Gamma^{\\epsilon_0} \\), we have the lemma." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.159, + 0.825, + 0.19 + ], + "angle": 0, + "content": "Finally, we provide a high probability bound for the change of the projection on the manifold after \\( s_1 \\) rounds \\( \\| \\phi^{(s_1)} - \\phi^{(0)} \\|_2 \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.195, + 0.825, + 0.233 + ], + "angle": 0, + "content": "Lemma K.18. Let \\(s_1 \\coloneqq \\lceil \\frac{20}{\\alpha \\mu} \\log \\frac{1}{\\eta} \\rceil\\). Given \\(\\| \\bar{\\theta}^{(0)} - \\phi^{(0)} \\|_2 \\leq \\frac{1}{2} \\sqrt{\\frac{\\mu}{\\rho_2}} \\epsilon_0\\). For \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\)," + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.24, + 0.635, + 0.274 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\tilde {C} _ {8} \\log \\frac {1}{\\eta} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.29, + 0.535, + 0.307 + ], + "angle": 0, + "content": "Proof. From Lemma K.17, for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), when" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.314, + 0.826, + 0.354 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 s _ {1} \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < s _ {1}, \\tag {47}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.364, + 0.825, + 0.398 + ], + "angle": 0, + "content": "then \\(\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}\\), for all \\(0\\leq s\\leq s_{1}\\). By the definition of \\(\\Gamma^{\\epsilon_0}\\), \\(\\tilde{u}_t^{(s)}\\in \\Gamma^{\\epsilon_1}\\), for all \\(0\\leq t\\leq H,0\\leq s\\leq s_{1}\\). By triangle inequality, \\(\\| \\phi^{(s_1)} - \\phi^{(0)}\\| _2\\) can be decomposed as follows." + }, + { + "type": "equation", + "bbox": [ + 0.231, + 0.405, + 0.826, + 0.49 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\| _ {2} \\leq \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\phi^ {(s + 1)} - \\phi^ {(s)} \\| _ {2} \\\\ \\leq \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) \\| _ {2} + \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) \\| _ {2}. \\tag {48} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.497, + 0.591, + 0.512 + ], + "angle": 0, + "content": "By Lemma K.14, when (47) hold, then for all \\(0 \\leq s < s_1 - 1\\)," + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.52, + 0.619, + 0.555 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\pmb {\\theta}} ^ {(s + 1)} - \\tilde {\\pmb {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.563, + 0.825, + 0.599 + ], + "angle": 0, + "content": "This implies that \\(\\bar{\\pmb{\\theta}}^{(s + 1)}\\in B^{\\epsilon_1}(\\tilde{\\pmb{u}}_H^{(s)})\\). Since for all \\(\\pmb {\\theta}\\in \\Gamma^{\\epsilon_2}\\), \\(\\| \\partial \\Phi (\\pmb {\\theta})\\| _2\\leq \\nu_1\\), then \\(\\Phi (\\cdot)\\) is \\(\\nu_{1}\\)-Lipschitz in \\(B^{\\epsilon_1}(\\tilde{\\pmb{u}}_H^{(s)})\\). This gives" + }, + { + "type": "equation", + "bbox": [ + 0.337, + 0.606, + 0.826, + 0.664 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) \\| _ {2} \\leq \\nu_ {1} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\nu_ {1} \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}}. \\tag {49} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.672, + 0.825, + 0.709 + ], + "angle": 0, + "content": "Then we analyze \\(\\| \\bar{\\pmb{\\theta}}^{(s + 1)} - \\tilde{\\pmb{u}}_H^{(s)}\\| _2\\). By Lemma K.9 and the definition of \\(\\Gamma^{\\epsilon_0}\\) and \\(\\Gamma^{\\epsilon_1}\\), there exists \\(\\phi \\in \\Gamma\\) such that \\(\\tilde{\\pmb{u}}_t^{(s)}\\in B^{\\epsilon_1}(\\phi),\\forall 0\\leq t\\leq H\\). Therefore, we can expand \\(\\Phi (\\tilde{\\pmb{u}}_{t + 1}^{(s)})\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.221, + 0.716, + 0.775, + 0.802 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right) = \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\\\ = \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\eta \\partial \\Phi (\\tilde {\\boldsymbol {u}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) + \\frac {\\eta^ {2}}{2} \\partial^ {2} \\Phi (\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ] \\\\ = \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) + \\frac {\\eta^ {2}}{2} \\partial^ {2} \\Phi \\left(c _ {t} ^ {(s)} \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} + (1 - c _ {t} ^ {(s)}) \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ], \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.81, + 0.405, + 0.829 + ], + "angle": 0, + "content": "where \\(c_t^{(s)} \\in (0,1)\\). Then we have" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.837, + 0.815, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}) \\| _ {2} \\leq \\frac {\\eta^ {2}}{2} \\sum_ {t = 0} ^ {H - 1} \\| \\partial^ {2} \\Phi \\left(\\left(c _ {t} ^ {(s)} \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} + (1 - c _ {t} ^ {(s)}) \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right)\\right) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ] \\| _ {2} \\\\ \\leq \\frac {\\eta^ {2}}{2} \\nu_ {2} \\sum_ {t = 0} ^ {H - 1} \\| \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) \\| _ {2} ^ {2}. \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.616, + 0.122 + ], + "angle": 0, + "content": "By Lemma K.6, \\(\\frac{\\eta}{2}\\| \\nabla \\mathcal{L}(\\tilde{\\boldsymbol{u}}_t^{(s)})\\| _2^2\\leq \\mathcal{L}(\\tilde{\\boldsymbol{u}}_t^{(s)}) - \\mathcal{L}(\\tilde{\\boldsymbol{u}}_{t + 1}^{(s)})\\) . Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.128, + 0.824, + 0.213 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\eta \\nu_ {2} \\left(\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) - \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right)\\right) \\\\ \\leq \\eta \\nu_ {2} \\left[ \\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\right] ^ {2} \\\\ \\leq \\nu_ {2} \\eta \\left[ 2 \\exp (- \\alpha s \\mu) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(0)}) + \\frac {\\tilde {C} _ {5} ^ {2} \\eta}{(1 - \\exp (- \\alpha \\mu / 2)) ^ {2}} \\log \\frac {s _ {1}}{\\eta \\delta} \\right], \\tag {50} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.218, + 0.825, + 0.246 + ], + "angle": 0, + "content": "where the last inequality uses Cauchy-Schwartz inequality and Lemma K.17. Summing up (50), we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.251, + 0.824, + 0.326 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}) \\| _ {2} \\leq \\nu_ {2} \\eta \\left[ 2 \\tilde {\\Psi} (\\tilde {\\boldsymbol {\\theta}} ^ {(0)}) \\sum_ {s = 0} ^ {s _ {1} - 1} \\exp (- \\alpha \\mu s) + \\frac {s _ {1} \\tilde {C} _ {5} ^ {2} \\eta}{(1 - \\exp (- \\alpha \\mu / 2)) ^ {2}} \\log \\frac {s _ {1}}{\\eta \\delta} \\right] \\\\ \\leq \\tilde {C} _ {7} \\eta \\log \\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta}, \\tag {51} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.333, + 0.782, + 0.35 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_7\\) is a constant. Substituting (49) and (51) into (48), for sufficiently small \\(\\eta\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.304, + 0.356, + 0.692, + 0.426 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\nu_ {1} \\tilde {C} _ {3} s _ {1} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}} + \\tilde {C} _ {7} \\eta \\log \\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta} \\\\ \\leq \\tilde {C} _ {8} \\log \\frac {1}{\\eta} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.825, + 0.462 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_8\\) is a constant. Finally, according to Lemma K.12, (47) holds with probability at least \\(1 - \\delta\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.48, + 0.582, + 0.494 + ], + "angle": 0, + "content": "K.6 PHASE 2:ITERATES STAYING CLOSE TO MANIFOLD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.503, + 0.825, + 0.537 + ], + "angle": 0, + "content": "In this subsection, we show that \\(\\| \\pmb{x}_{k,t}^{(s)}\\| _2 = \\tilde{\\mathcal{O}} (\\sqrt{\\eta})\\) and \\(\\| \\bar{\\pmb{\\theta}}^{(s + r)} - \\bar{\\pmb{\\theta}}^{(s)}\\| _2 = \\tilde{\\mathcal{O}} (\\eta^{0.5 - 0.5\\beta}),\\forall 0\\leq r\\leq R_{\\mathrm{grp}}\\) with high probability." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.551, + 0.408, + 0.564 + ], + "angle": 0, + "content": "K.6.1 ADDITIONAL NOTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.573, + 0.825, + 0.604 + ], + "angle": 0, + "content": "Before presenting the lemmas, we define the following martingale \\(\\{\\pmb{m}_{k,t}^{(s)}\\}_{t = 0}^{H}\\) that will be useful in the proof:" + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.605, + 0.606, + 0.645 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {m} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\quad \\boldsymbol {m} _ {k, 0} = \\mathbf {0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.649, + 0.538, + 0.665 + ], + "angle": 0, + "content": "We also define \\(\\tilde{P}:\\mathbb{R}^d\\to \\mathbb{R}^{d\\times d}\\) as an extension of \\(\\partial \\Phi\\)" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.671, + 0.605, + 0.705 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {P}} (\\boldsymbol {\\theta}) := \\left\\{ \\begin{array}{l l} \\partial \\Phi (\\boldsymbol {\\theta}), & \\text {i f} \\boldsymbol {\\theta} \\in \\Gamma^ {\\epsilon_ {2}}, \\\\ \\mathbf {0}, & \\text {o t h e r w i s e}. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.714, + 0.564, + 0.732 + ], + "angle": 0, + "content": "Finally, we define a martingale \\(\\{Z_t^{(s)}: s \\geq 0, 0 \\leq t \\leq H\\}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.739, + 0.767, + 0.782 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {Z} _ {t} ^ {(s)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\tilde {\\boldsymbol {P}} (\\bar {\\boldsymbol {\\theta}} ^ {(r)}) \\boldsymbol {z} _ {k, t} ^ {(r)} + \\frac {1}{K} \\sum_ {k \\in [ K ]} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {\\boldsymbol {P}} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\boldsymbol {z} _ {k, t} ^ {(s)}, \\quad \\boldsymbol {Z} _ {0} ^ {(0)} = \\mathbf {0}.\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.797, + 0.552, + 0.81 + ], + "angle": 0, + "content": "K.6.2 PROOF FOR THE HIGH PROBABILITY BOUNDS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.821, + 0.714, + 0.836 + ], + "angle": 0, + "content": "A direct application of Azuma-Hoeffding's inequality yields the following lemma." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.825, + 0.86 + ], + "angle": 0, + "content": "Lemma K.19 (Concentration property of \\( m_{k,t}^{(s)} \\)). With probability at least \\( 1 - \\delta \\), the following holds:" + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.868, + 0.721, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {9} \\sqrt {\\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.908, + 0.329, + 0.925 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_9\\) is a constant." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.75, + 0.123 + ], + "angle": 0, + "content": "Proof. Notice that \\(\\| \\pmb{m}_{k,t + 1}^{(s)} - \\pmb{m}_{k,t}^{(s)}\\| _2\\leq \\sigma_{\\max}\\). Then by Azuma-Hoeffdings inequality," + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.131, + 0.635, + 0.166 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} \\left(\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}\\right) \\leq 2 \\exp \\left(- \\frac {\\epsilon^ {\\prime 2}}{2 t \\sigma_ {\\max } ^ {2}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.173, + 0.825, + 0.203 + ], + "angle": 0, + "content": "Taking union bound on \\( K \\) clients, \\( H \\) local steps and \\( R_{\\mathrm{grp}} \\) rounds, we obtain that the following inequality holds with probability at least \\( 1 - \\delta \\):" + }, + { + "type": "equation", + "bbox": [ + 0.235, + 0.211, + 0.761, + 0.245 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 K H R _ {\\mathrm {g r p}}}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.254, + 0.573, + 0.274 + ], + "angle": 0, + "content": "Substituting in \\(H = \\frac{\\alpha}{\\eta}\\) and \\(R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}}\\right\\rfloor\\) yields the lemma." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.255, + 0.824, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.293, + 0.825, + 0.326 + ], + "angle": 0, + "content": "Again applying Azuma-Hoeffding's inequality, we have the following lemma about the concentration property of \\( Z_{t}^{(s)} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.332, + 0.825, + 0.363 + ], + "angle": 0, + "content": "Lemma K.20 (Concentration property of \\( Z_{t}^{(s)} \\)). With probability at least \\( 1 - \\delta \\), the following inequality holds:" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.371, + 0.682, + 0.406 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {Z} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {- 0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.426, + 0.825, + 0.459 + ], + "angle": 0, + "content": "Proof. Notice that \\(\\| \\mathbf{Z}_{t + 1}^{(s)} - \\mathbf{Z}_t^{(s)}\\| _2\\leq \\nu_2\\sigma_{\\max},\\forall 0\\leq t\\leq H - 1\\) and \\(\\| \\mathbf{Z}_0^{(s + 1)} - \\mathbf{Z}_H^{(s)}\\| _2\\leq \\nu_2\\sigma_{\\max}\\). By Azuma-Hoeffding's inequality," + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.467, + 0.67, + 0.502 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} (\\| \\pmb {Z} _ {t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(- \\frac {\\epsilon^ {\\prime 2}}{2 (s H + t) \\nu_ {2} ^ {2} \\sigma_ {\\mathrm {m a x}} ^ {2}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.509, + 0.825, + 0.537 + ], + "angle": 0, + "content": "Taking union bound on \\( R_{\\mathrm{grp}} \\) rounds, we obtain that the following inequality holds with probability at least \\( 1 - \\delta \\):" + }, + { + "type": "equation", + "bbox": [ + 0.298, + 0.545, + 0.7, + 0.579 + ], + "angle": 0, + "content": "\\[\n\\| Z _ {H} ^ {(s)} \\| _ {2} \\leq \\sigma_ {\\max } \\nu_ {2} \\sqrt {2 H R _ {\\mathrm {g r p}} \\log \\frac {2 R _ {\\mathrm {g r p}}}{\\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.588, + 0.573, + 0.607 + ], + "angle": 0, + "content": "Substituting in \\(H = \\frac{\\alpha}{\\eta}\\) and \\(R_{\\mathrm{gpr}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}}\\right\\rfloor\\) yields the lemma." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.589, + 0.824, + 0.602 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.825, + 0.657 + ], + "angle": 0, + "content": "We proceed to present a direct corollary of Lemma K.17 which provides a bound for the potential function over \\( R_{\\mathrm{grp}} \\) rounds." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.663, + 0.825, + 0.702 + ], + "angle": 0, + "content": "Lemma K.21. Given \\(\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2\\leq C_0\\sqrt{\\eta\\log\\frac{1}{\\eta}}\\) where \\(C_0\\) is a constant, then for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\)," + }, + { + "type": "equation", + "bbox": [ + 0.301, + 0.71, + 0.825, + 0.745 + ], + "angle": 0, + "content": "\\[\n\\bar {\\boldsymbol {\\theta}} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {0}}, \\quad \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, \\tag {52}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.752, + 0.204, + 0.765 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.773, + 0.826, + 0.808 + ], + "angle": 0, + "content": "\\[\n\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, k \\in [ K ], \\tag {53}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.481, + 0.83 + ], + "angle": 0, + "content": "where \\(C_1\\) is a constant that can depend on \\(C_0\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.842, + 0.264, + 0.856 + ], + "angle": 0, + "content": "Furthermore," + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.865, + 0.6, + 0.9 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(R _ {\\mathrm {g r p}})}) \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.908, + 0.454, + 0.926 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_9\\) is a constant independent of \\(C_0\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.166 + ], + "angle": 0, + "content": "Proof. By \\(\\rho_{2}\\)-smoothness of \\(\\mathcal{L}\\), \\(\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(0)}) \\leq C_0 \\sqrt{\\frac{\\eta \\rho_2}{2} \\log \\frac{1}{\\eta}}\\). Substituting \\(R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha \\eta^{\\beta}} \\right\\rfloor\\) and \\(\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(0)}) \\leq C_0 \\sqrt{\\frac{\\eta \\rho_2}{2} \\log \\frac{1}{\\eta}}\\) into Lemma K.17, for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\), (52) and (53) where \\(C_1\\) is a constant that can depend on \\(C_0\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.17, + 0.383, + 0.186 + ], + "angle": 0, + "content": "Furthermore, for round \\(\\bar{\\theta}^{(R_{\\mathrm{grp}})}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.195, + 0.787, + 0.236 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(R _ {\\mathrm {g r p}})}) \\leq \\exp (- \\mathcal {O} (\\eta^ {- \\beta})) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R _ {\\mathrm {g r p}}}{\\eta \\delta}} \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.244, + 0.453, + 0.261 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_9\\) is a constant independent of \\(C_0\\)." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.246, + 0.824, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.312 + ], + "angle": 0, + "content": "Lemma K.22. Given \\( \\| \\bar{\\theta}^{(0)} - \\phi^{(0)} \\|_2 \\leq C_0 \\sqrt{\\eta \\log \\frac{1}{\\eta}} \\) where \\( C_0 \\) is a constant, then for \\( \\delta = \\mathcal{O}(\\mathrm{poly}(\\eta)) \\), with probability at least \\( 1 - \\delta \\), for all \\( 0 \\leq s_0 < R_{\\mathrm{grp}}, 0 \\leq t \\leq H, k \\in [K] \\)," + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.319, + 0.732, + 0.389 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\| \\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.396, + 0.553, + 0.411 + ], + "angle": 0, + "content": "where \\(C_2\\) is a constant that can depend \\(C_0\\). Furthermore," + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.419, + 0.635, + 0.453 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\pmb {\\theta}} ^ {(R _ {\\mathrm {g r p}})} - \\pmb {\\phi} ^ {(R _ {\\mathrm {g r p}})} \\| _ {2} \\leq \\tilde {C} _ {1 1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.461, + 0.459, + 0.479 + ], + "angle": 0, + "content": "where \\(\\tilde{C}_{11}\\) is a constant independent of \\(C_0\\)" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.496, + 0.554, + 0.515 + ], + "angle": 0, + "content": "Proof. Decomposing \\( \\boldsymbol{x}_{k,t}^{(s)} \\) by triangle inequality, we have" + }, + { + "type": "equation", + "bbox": [ + 0.349, + 0.524, + 0.647, + 0.546 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} + \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {\\phi} ^ {(s)} \\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.554, + 0.825, + 0.588 + ], + "angle": 0, + "content": "We first bound \\( \\| \\bar{\\theta}^{(s)} - \\phi^{(s)} \\|_2 \\). By Lemma K.21, for \\( \\delta = \\mathcal{O}(\\mathrm{poly}(\\eta)) \\), with probability at least \\( 1 - \\frac{\\delta}{2} \\)," + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.596, + 0.825, + 0.629 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, \\tag {54}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.311, + 0.632, + 0.825, + 0.666 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, \\tag {55}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.673, + 0.202, + 0.686 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.694, + 0.825, + 0.728 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)}\\right) \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\tag {56}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.825, + 0.766 + ], + "angle": 0, + "content": "where \\( C_2 \\) is a constant that may depend on \\( C_0 \\) and \\( \\tilde{C}_{10} \\) is a constant independent of \\( C_0 \\). When (54) and (56) hold, by Lemma K.10," + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.774, + 0.825, + 0.808 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\phi^ {(s)} \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\frac {2 \\eta}{\\mu} \\log \\frac {2}{\\eta \\delta}}, \\tag {57}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.81, + 0.825, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)} - \\phi^ {\\left(R _ {\\mathrm {g r p}}\\right)} \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)}\\right) \\leq \\tilde {C} _ {1 0} \\sqrt {\\frac {2 \\eta}{\\mu} \\log \\frac {2}{\\eta \\delta}}. \\tag {58}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.852, + 0.573, + 0.873 + ], + "angle": 0, + "content": "Then we bound \\(\\| \\pmb{\\theta}_{k,t}^{(s)} - \\bar{\\pmb{\\theta}}^{(s)}\\| _2\\) . By the update rule, we have" + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.881, + 0.763, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {k, t} ^ {(s)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\boldsymbol {z} _ {k, \\tau} ^ {(s)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) - \\eta \\boldsymbol {m} _ {k, t} ^ {(s)}.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "48" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.411, + 0.12 + ], + "angle": 0, + "content": "Still by triangle inequality, we have" + }, + { + "type": "equation", + "bbox": [ + 0.328, + 0.129, + 0.67, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) \\| _ {2} + \\eta \\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.18, + 0.474, + 0.195 + ], + "angle": 0, + "content": "Due to \\(\\rho_{2}\\)-smoothness of \\(\\mathcal{L}\\), when (55) holds," + }, + { + "type": "equation", + "bbox": [ + 0.323, + 0.205, + 0.825, + 0.239 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\sqrt {2 \\rho_ {2}} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {2 \\rho_ {2} \\eta \\log \\frac {2}{\\eta \\delta}}. \\tag {59}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.247, + 0.497, + 0.267 + ], + "angle": 0, + "content": "By Lemma K.19, with probability at least \\( 1 - \\frac{\\delta}{2} \\)," + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.276, + 0.825, + 0.311 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {9} \\sqrt {\\frac {1}{\\eta} \\log \\frac {2}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {60}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.318, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Combining (59) and (60), when (55) and (56) hold simultaneously, there exists a constant \\( C_3 \\) which can depend on \\( C_0 \\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.307, + 0.357, + 0.826, + 0.391 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H. \\tag {61}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.399, + 0.324, + 0.415 + ], + "angle": 0, + "content": "By triangle inequality," + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.425, + 0.619, + 0.459 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\pmb {\\theta}} ^ {(s + 1)} - \\bar {\\pmb {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.466, + 0.536, + 0.482 + ], + "angle": 0, + "content": "Combining (57), (58) and (61), we complete the proof." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.467, + 0.825, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.506, + 0.75, + 0.523 + ], + "angle": 0, + "content": "Then we provide high probability bounds for the movement of \\(\\phi^{(s)}\\) within \\(R_{\\mathrm{grp}}\\) rounds." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.529, + 0.825, + 0.567 + ], + "angle": 0, + "content": "Lemma K.23. Given \\( \\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\|_2 \\leq C_0\\sqrt{\\eta\\log\\frac{1}{\\eta}} \\) where \\( C_0 \\) is a constant, then for \\( \\delta = \\mathcal{O}(\\mathrm{poly}(\\eta)) \\), with probability at least \\( 1 - \\delta \\)," + }, + { + "type": "equation", + "bbox": [ + 0.299, + 0.577, + 0.698, + 0.611 + ], + "angle": 0, + "content": "\\[\n\\| \\phi^ {(s)} - \\phi^ {(0)} \\| _ {2} \\leq C _ {4} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 1 \\leq s \\leq R _ {\\mathrm {g r p}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.619, + 0.481, + 0.634 + ], + "angle": 0, + "content": "where \\(C_4\\) is a constant that can depend on \\(C_0\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.658, + 0.446, + 0.673 + ], + "angle": 0, + "content": "Proof. By the update rule of Local SGD," + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.683, + 0.652, + 0.725 + ], + "angle": 0, + "content": "\\[\n\\pmb {\\theta} _ {k, H} ^ {(s)} = \\bar {\\pmb {\\theta}} ^ {(s)} - \\eta \\sum_ {t = 0} ^ {H - 1} \\nabla \\mathcal {L} (\\pmb {\\theta} _ {k, t} ^ {(s)}) - \\eta \\sum_ {t = 0} ^ {H - 1} \\pmb {z} _ {k, t} ^ {(s)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.733, + 0.398, + 0.749 + ], + "angle": 0, + "content": "Averaging among \\(K\\) clients gives" + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.758, + 0.705, + 0.803 + ], + "angle": 0, + "content": "\\[\n\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\boldsymbol {z} _ {k, t} ^ {(s)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.812, + 0.78, + 0.829 + ], + "angle": 0, + "content": "By Lemma K.22, for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), the following holds with probability at least \\(1 - \\delta / 3\\)," + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.839, + 0.824, + 0.885 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {3}{\\eta \\delta}}, \\quad \\boldsymbol {\\theta} _ {k, t} ^ {(s)} \\in B ^ {\\epsilon_ {0}} \\left(\\phi^ {(s)}\\right), \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, k \\in [ K ], \\tag {62}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.888, + 0.825, + 0.923 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {3}{\\eta \\delta}}, \\quad \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} \\in B ^ {\\epsilon_ {0}} \\left(\\phi^ {(s)}\\right), \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {63}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "49" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.103, + 0.592, + 0.12 + ], + "angle": 0, + "content": "When (62) and (63) hold, we can expand \\(\\Phi (\\bar{\\theta}^{(s + 1)})\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.13, + 0.786, + 0.283 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\phi^ {(s + 1)} = \\phi^ {(s)} + \\partial \\Phi (\\bar {\\theta} ^ {(s)}) (\\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)}) + \\frac {1}{2} \\partial^ {2} \\Phi (\\tilde {\\theta} ^ {(s)}) [ \\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)}, \\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)} ] \\\\ = \\phi^ {(s)} \\underbrace {- \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k , t} ^ {(s)})} _ {\\mathcal {T} _ {1} ^ {(s)}} \\underbrace {- \\frac {\\eta}{K} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} z _ {k , t} ^ {(s)}} _ {\\mathcal {T} _ {2} ^ {(s)}} \\\\ + \\underbrace {\\frac {1}{2} \\partial^ {2} \\Phi (a ^ {(s)} \\bar {\\boldsymbol {\\theta}} ^ {(s)} + (1 - a ^ {(s)}) \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) [ \\boldsymbol {\\theta} ^ {(s + 1)} - \\boldsymbol {\\theta} ^ {(s)} , \\boldsymbol {\\theta} ^ {(s + 1)} - \\boldsymbol {\\theta} ^ {(s)} ]} _ {\\mathcal {T} _ {3} ^ {(s)}}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.294, + 0.6, + 0.31 + ], + "angle": 0, + "content": "where \\(a^{(s)}\\in (0,1)\\). Telescoping from round 0 to \\(s - 1\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.328, + 0.32, + 0.67, + 0.361 + ], + "angle": 0, + "content": "\\[\n\\| \\phi^ {(s)} - \\phi^ {(0)} \\| _ {2} = \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {1} ^ {(r)} + \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {2} ^ {(r)} + \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {3} ^ {(r)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.379, + 0.825, + 0.412 + ], + "angle": 0, + "content": "From (63), we can bound \\(\\| \\mathcal{T}_3^{(s)}\\| _2\\) by \\(\\| \\mathcal{T}_3^{(s)}\\| _2\\leq \\frac{1}{2}\\nu_2C_2^2\\eta \\log \\frac{3}{\\eta\\delta}\\). We proceed to bound \\(\\| \\mathcal{T}_1^{(s)}\\| _2\\). When (62) and (63) hold, we have" + }, + { + "type": "equation", + "bbox": [ + 0.242, + 0.422, + 0.755, + 0.466 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) = \\partial \\Phi (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) + \\partial^ {2} \\Phi (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) [ \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) ] \\\\ = \\partial^ {2} \\Phi (b _ {k, t} ^ {(s)} \\bar {\\boldsymbol {\\theta}} ^ {(s)} + (1 - b _ {k, t} ^ {(s)}) \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) [ \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) ], \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.477, + 0.778, + 0.496 + ], + "angle": 0, + "content": "where \\( b_{k,t}^{(s)} \\in (0,1) \\). By Lemma K.17, with probability at least \\( 1 - \\delta /3 \\), the following holds:" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.508, + 0.824, + 0.54 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\sqrt {2 \\rho_ {2}} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {2 \\rho_ {2} \\eta \\log \\frac {3}{\\eta \\delta}}, \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {64}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.548, + 0.68, + 0.565 + ], + "angle": 0, + "content": "When (62), (63) and (64) hold simultaneously, we have for all \\(0 \\leq s < R_{\\mathrm{grp}}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.576, + 0.66, + 0.651 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\mathcal {T} _ {1} ^ {(s)} \\| _ {2} \\leq \\frac {\\eta \\nu_ {2}}{K} \\sum_ {t = 0} ^ {H - 1} \\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\| _ {2} \\\\ \\leq \\frac {\\alpha \\nu_ {2} \\sqrt {2 \\rho_ {2}} C _ {1} C _ {2}}{K} \\eta \\log \\frac {3}{\\eta \\delta}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.669, + 0.825, + 0.701 + ], + "angle": 0, + "content": "Finally, we bound \\(\\| \\sum_{r = 0}^{s - 1}\\mathcal{T}_2^{(r)}\\| _2\\). By Lemma K.20, the following inequality holds with probability at least \\(1 - \\delta /3\\):" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.711, + 0.824, + 0.744 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\boldsymbol {Z} _ {H} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {- 0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {3}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {65}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.752, + 0.538, + 0.767 + ], + "angle": 0, + "content": "When (62), (63) and (65) hold simultaneously, we have" + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.778, + 0.731, + 0.815 + ], + "angle": 0, + "content": "\\[\n\\| \\sum_ {r = 0} ^ {s} \\mathcal {T} _ {2} ^ {(r)} \\| _ {2} = \\eta \\| \\boldsymbol {Z} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {3}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.826, + 0.825, + 0.859 + ], + "angle": 0, + "content": "Combining the bounds for \\( \\| \\mathcal{T}_1^{(s)}\\| _2, \\| \\sum_{r = 0}^s\\mathcal{T}_2^{(r)}\\| _2 \\) and \\( \\| \\mathcal{T}_3^{(s)}\\| _2 \\) and taking union bound, we obtain that for \\( \\delta = \\mathcal{O}(\\mathrm{poly}(\\eta)) \\), the following inequality holds with probability at least \\( 1 - \\delta \\):" + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.869, + 0.697, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {\\phi} ^ {(s)} - \\boldsymbol {\\phi} ^ {(0)} \\| _ {2} \\leq C _ {4} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 1 \\leq s \\leq R _ {\\mathrm {g r p}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.478, + 0.925 + ], + "angle": 0, + "content": "where \\(C_4\\) is a constant that can depend on \\(C_0\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "50" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.704, + 0.119 + ], + "angle": 0, + "content": "K.7 SUMMARY OF THE DYNAMICS AND PROOF OF THEOREMS J.1 AND J.2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.174 + ], + "angle": 0, + "content": "Based on the results in Appendix K.5 and Appendix K.6, we summarize the dynamics of Local SGD iterates and then present the proof of Theorems J.1 and J.2 in this subsection. For convenience, we first introduce the definition of global step and \\(\\delta\\)-good step." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.174, + 0.825, + 0.218 + ], + "angle": 0, + "content": "Definition K.3 (Global step). Define \\(\\mathcal{I}\\) as the index set \\(\\{(s,t):s\\geq 0,0\\leq t\\leq H\\}\\) with lexicographical order, which means \\((s_1,t_1)\\preceq (s_2,t_2)\\) if and only if \\(s_1 < s_2\\) or \\((s_{1} = s_{2}\\) and \\(t_1\\leq t_2)\\). A global step is indexed by \\((s,t)\\) corresponding to the \\(t\\)-th local step at round \\(s\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.219, + 0.825, + 0.249 + ], + "angle": 0, + "content": "Definition K.4 (\\(\\delta\\)-good step). In the training process of Local SGD, we say the global step \\((s,t) \\preceq (R_{\\mathrm{tot}},0)\\) is \\(\\delta\\)-good if the following inequalities hold:" + }, + { + "type": "equation", + "bbox": [ + 0.226, + 0.251, + 0.776, + 0.285 + ], + "angle": 0, + "content": "\\[\n\\| \\tilde {\\mathbf {Z}} _ {k, \\tau} ^ {(r)} \\| _ {2} \\leq \\exp (\\alpha \\rho_ {2}) \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {6 H R _ {\\operatorname* {t o t}} K}{\\delta}}, \\quad \\forall k \\in [ K ], (r, \\tau) \\preceq (s, t),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.223, + 0.287, + 0.776, + 0.32 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {m} _ {k, \\tau} ^ {(r)} \\| _ {2} \\leq \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {6 K H R _ {\\mathrm {t o t}}}{\\delta}}, \\quad \\forall k \\in [ K ], (r, \\tau) \\preceq (s, t),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.228, + 0.322, + 0.776, + 0.356 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {Z} _ {H} ^ {(r)} \\| _ {2} \\leq \\sigma_ {\\max } \\nu_ {2} \\sqrt {2 H R _ {\\mathrm {g r p}} \\log \\frac {2 R _ {\\mathrm {t o t}}}{\\delta}}, \\quad \\forall 0 \\leq r < s.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.367, + 0.825, + 0.4 + ], + "angle": 0, + "content": "Applying the concentration properties of \\(\\tilde{\\pmb{Z}}_{k,\\tau}^{(r)},\\pmb{m}_{k,\\tau}^{(r)}\\) and \\(\\pmb{Z}_H^{(r)}\\) (Lemmas K.20, K.19 and K.12) yields the following theorem." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.401, + 0.825, + 0.431 + ], + "angle": 0, + "content": "Theorem K.1. For \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\), all global steps \\((s,t) \\preceq (R_{\\mathrm{tot}},0)\\) are \\(\\delta\\)-good." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.439, + 0.82, + 0.457 + ], + "angle": 0, + "content": "In the remainder of this subsection, we use \\(\\mathcal{O}(\\cdot)\\) notation to hide constants independent of \\(\\delta\\) and \\(\\eta\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.462, + 0.825, + 0.569 + ], + "angle": 0, + "content": "Below we present a summary of the dynamics of Local SGD when \\(\\bar{\\theta}^{(0)}\\) is initialized such that \\(\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma\\) and all global steps are \\(\\delta\\)-good. Phase 1 lasts for \\(s_0 + s_1 = \\mathcal{O}(\\log \\frac{1}{\\eta})\\) rounds. At the end of phase 1, the iterate reaches within \\(\\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})\\) from \\(\\Gamma\\), i.e., \\(\\| \\bar{\\pmb{\\theta}}^{(s_0 + s_1)} - \\pmb {\\phi}^{(s_0 + s_1)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})\\). The change of the projection on manifold over \\(s_0 + s_1\\) rounds, \\(\\| \\phi^{(s_1 + s_0)} - \\phi^{(0)}\\| _2\\) is bounded by \\(\\mathcal{O}(\\log \\frac{1}{\\eta}\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.576, + 0.825, + 0.631 + ], + "angle": 0, + "content": "After \\(s_0 + s_1\\) rounds, the dynamic enters phase 2 when the iterates stay close to \\(\\Gamma\\) with \\(\\bar{\\theta}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}}\\) and \\(\\pmb{\\theta}_{k,t}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall k \\in [K], (s_0 + s_1, 0) \\preceq (s,t) \\preceq (R_{\\mathrm{tot}}, 0)\\). Furthermore, \\(\\| \\pmb{x}_{k,t}^{(s)} \\|_2\\) and \\(\\| \\bar{\\pmb{x}}_H^{(s)} \\|_2\\) satisfy the following equations:" + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.635, + 0.76, + 0.66 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.664, + 0.76, + 0.688 + ], + "angle": 0, + "content": "\\[\n\\| \\tilde {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.69, + 0.825, + 0.717 + ], + "angle": 0, + "content": "Moreover, for \\( s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}} \\), the change of the manifold projection within \\( R_{\\mathrm{grp}} \\) rounds can be bounded as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.72, + 0.709, + 0.752 + ], + "angle": 0, + "content": "\\[\n\\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} = \\mathcal {O} (\\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}), \\quad \\forall 1 \\leq r \\leq R _ {\\mathrm {g r p}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.797 + ], + "angle": 0, + "content": "After combing through the dynamics of Local SGD iterates during the approaching and drift phase, we are ready to present the proof of Theorems J.1 and J.2, which are direct consequences of the lemmas in Appendix K.5 and K.6." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.81, + 0.826, + 0.866 + ], + "angle": 0, + "content": "Proof of Theorem J.1. By Lemmas K.15, K.22 and Corollary K.1, for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), when all global steps are \\(\\delta\\)-good, \\(\\bar{\\pmb{\\theta}}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}}\\) and \\(\\pmb{\\theta}_{k,t}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall k \\in [K], (s_0 + s_1, 0) \\preceq (s,t) \\preceq (R_{\\mathrm{tot}}, 0)\\) and \\(\\| \\pmb{x}_{k,t}^{(s)} \\|_2, \\| \\bar{\\pmb{x}}_H^{(s)} \\|_2\\) satisfy the following equations:" + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.869, + 0.76, + 0.893 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol{x}_{k,t}^{(s)}\\|_{2} = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}}),\\quad \\forall k\\in [K],0\\leq t\\leq H,s_{0} + s_{1}\\leq s < R_{\\text{tot}},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.897, + 0.76, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}}.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "51" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.827, + 0.157 + ], + "angle": 0, + "content": "Hence \\(\\| \\tilde{\\pmb{x}}_0^{(R_{\\mathrm{tot}})}\\| _2 = \\mathcal{O}(\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(R_{\\mathrm{tot}})})) = \\mathcal{O}(\\| \\tilde{\\pmb{x}}_H^{(R_{\\mathrm{tot}} - 1)}\\| _2) = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})\\) by smoothness of \\(\\mathcal{L}\\) and Lemma K.10. According to Theorem K.1, with probability at least \\(1 - \\delta\\) , all global steps are \\(\\delta\\) -good, thus completing the proof." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.176, + 0.825, + 0.207 + ], + "angle": 0, + "content": "Proof of Theorem J.2. By Lemma K.23, for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), when all global steps are \\(\\delta\\)-good, then \\(\\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.216, + 0.679, + 0.236 + ], + "angle": 0, + "content": "\\[\n\\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - 0. 5 \\beta}), \\quad \\forall 0 \\leq r \\leq R _ {\\mathrm {g r p}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.243, + 0.826, + 0.289 + ], + "angle": 0, + "content": "Also, by Lemma K.18, when all global steps are \\(\\delta\\)-good, the change of projection on manifold over \\(s_0 + s_1\\) rounds (i.e., Phase 1), \\(\\| \\phi^{(s_0 + s_1)} - \\phi^{(0)} \\|_2\\) is bounded by \\(\\tilde{\\mathcal{O}}(\\sqrt{\\eta})\\). According to Theorem K.1, with probability at least \\(1 - \\delta\\), all global steps are \\(\\delta\\)-good, thus completing the proof." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.307, + 0.391, + 0.321 + ], + "angle": 0, + "content": "K.8 PROOF OF THEOREM 3.3" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.334, + 0.825, + 0.419 + ], + "angle": 0, + "content": "In this subsection, we explicitly derive the dependency of the approximation error on \\(\\alpha\\). The proofs are quite similar to those in Appendix K.5 and hence we only state the key proof idea for brevity. With the same method as the proofs in Appendix K.5.2, we can show that with high probability, \\(\\| \\bar{\\theta}^{(s)} - \\phi^{(s)}\\|_2 \\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_2}}\\) after \\(s_0' = \\mathcal{O}(1)\\) rounds. Below we focus on the dynamics of Local SGD thereafter. We first remind the readers of the definition of \\(\\{\\tilde{Z}_{k,t}^s\\}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.429, + 0.705, + 0.471 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right)\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, 0} ^ {(s)} = \\boldsymbol {0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.482, + 0.825, + 0.52 + ], + "angle": 0, + "content": "We have the following lemma that controls the norm of the matrix product \\(\\prod_{l = \\tau +1}^{t - 1}(\\boldsymbol {I} - \\eta \\nabla^2\\mathcal{L}(\\tilde{\\boldsymbol{u}}_l^{(s)}))\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.826, + 0.556 + ], + "angle": 0, + "content": "Lemma K.24. Given \\(\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}\\), then there exists a positive constant \\(C_3^\\prime\\) independent of \\(\\alpha\\) such that for all \\(0\\leq \\tau < t\\leq H\\)" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.564, + 0.619, + 0.609 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)})) \\right\\| _ {2} \\leq C _ {3} ^ {\\prime}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.632, + 0.825, + 0.671 + ], + "angle": 0, + "content": "Proof. Since \\(\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}\\), then \\(\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1}\\) for all \\(0\\leq t\\leq H\\). We first bound the minimum eigenvalue of \\(\\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})\\). Due to the PL condition, by Lemma K.6, for \\(\\eta \\leq \\frac{1}{\\rho_2}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.679, + 0.801, + 0.706 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {t} \\left(\\mathcal {L} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) - \\mathcal {L} ^ {*}\\right) \\leq \\exp (- \\mu t \\eta) (\\mathcal {L} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) - \\mathcal {L} ^ {*}), \\quad \\forall 0 \\leq t \\leq H.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.713, + 0.245, + 0.727 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.737, + 0.612, + 0.757 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) \\leq \\exp (- \\mu t \\eta / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.768, + 0.443, + 0.793 + ], + "angle": 0, + "content": "Let \\(C_1^\\prime = \\rho_3\\sqrt{\\frac{\\rho_2}{\\mu}}\\) . By Weyl's inequality," + }, + { + "type": "equation", + "bbox": [ + 0.285, + 0.804, + 0.715, + 0.921 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left| \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\right| = \\left| \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) - \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\right. \\right| \\\\ \\leq \\rho_ {3} \\| \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) - \\nabla^ {2} \\mathcal {L} \\left(\\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\| _ {2} \\\\ \\leq \\rho_ {3} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) \\| _ {2} \\\\ \\leq \\rho_ {3} \\sqrt {\\frac {2}{\\mu}} \\exp (- \\mu t \\eta / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\\\ \\leq C _ {1} ^ {\\prime} \\exp (- \\mu t \\eta / 2) \\epsilon_ {0}, \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "52" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "where the last two inequalities use Lemmas K.10 and K.7 respectively. Therefore, for all \\( 0 \\leq t \\leq H \\) and \\( 0 \\leq \\tau \\leq t - 1 \\)," + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.138, + 0.824, + 0.264 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\| _ {2} \\leq \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(1 + \\eta \\left| \\lambda_ {\\min } \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right) \\right|\\right) \\\\ \\leq \\prod_ {l = 0} ^ {\\infty} (1 + \\eta | \\lambda_ {\\min } \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}) |) \\\\ \\leq \\exp \\left(\\eta \\epsilon_ {0} C _ {1} ^ {\\prime} \\sum_ {l = 0} ^ {\\infty} \\exp (- \\mu l \\eta / 2)\\right). \\tag {66} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.27, + 0.568, + 0.287 + ], + "angle": 0, + "content": "For sufficiently small \\(\\eta\\), there exists a constant \\(C_2'\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.292, + 0.825, + 0.331 + ], + "angle": 0, + "content": "\\[\n\\sum_ {l = 0} ^ {\\infty} \\exp (- \\mu l \\eta / 2)) = \\frac {1}{1 - \\exp (- \\mu \\eta / 2)} \\leq \\frac {C _ {2} ^ {\\prime}}{\\eta}. \\tag {67}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.337, + 0.497, + 0.352 + ], + "angle": 0, + "content": "Substituting (67) into (66), we obtain the lemma." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.337, + 0.824, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.825, + 0.401 + ], + "angle": 0, + "content": "Based on Lemma K.24, we obtain the following lemma about the concentration property of \\(\\tilde{Z}_{k,t}^{(s)}\\), which can be derived in the same way as Lemma K.12." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.404, + 0.637, + 0.42 + ], + "angle": 0, + "content": "Lemma K.25. Given \\(\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}\\), then with probability at least \\(1 - \\delta\\)" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.426, + 0.71, + 0.466 + ], + "angle": 0, + "content": "\\[\n\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq C _ {3} ^ {\\prime} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.471, + 0.414, + 0.487 + ], + "angle": 0, + "content": "where \\(C_3^\\prime\\) is defined in Lemma K.24." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.497, + 0.825, + 0.527 + ], + "angle": 0, + "content": "The following lemma can be derived analogously to Lemma K.14 but the error bound is tighter in terms of its dependency on \\(\\alpha\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.53, + 0.826, + 0.56 + ], + "angle": 0, + "content": "Lemma K.26. Given \\(\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_1}\\), then for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\), with probability at least \\(1 - \\delta\\), there exists a constant \\(C_4^\\prime\\) independent of \\(\\alpha\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.301, + 0.566, + 0.698, + 0.599 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\| _ {2} \\leq C _ {4} ^ {\\prime} \\sqrt {\\alpha \\eta \\log \\frac {\\alpha}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.605, + 0.204, + 0.617 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.623, + 0.624, + 0.656 + ], + "angle": 0, + "content": "\\[\n\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq C _ {4} ^ {\\prime} \\sqrt {\\alpha \\eta \\log \\frac {\\alpha}{\\eta \\delta}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.668, + 0.827, + 0.78 + ], + "angle": 0, + "content": "Then, similar to Lemma K.17, we can show that for \\(\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))\\) and simultaneously all \\(s\\geq s_0^{\\prime} + s_1^{\\prime}\\) where \\(s_1^\\prime = \\mathcal{O}(\\frac{1}{\\alpha}\\log \\frac{1}{\\eta})\\), it holds with probability at least \\(1 - \\delta\\) that \\(\\| \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(s)}\\| _2 = \\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})\\). Note that to eliminate the dependency of the second term's denominator on \\(\\alpha\\) in (44), we can discuss the cases of \\(\\alpha >c_{0}\\) and \\(\\alpha < c_{0}\\) respectively where \\(c_{0}\\) can be an arbitrary positive constant independent of \\(\\alpha\\). For the case of \\(\\alpha < c_{0}\\) group \\(\\lceil \\frac{c_0}{\\alpha}\\rceil\\) rounds together and repeat the arguments in this subsection to analyze the closeness between Local SGD and GD iterates as well as the evolution of loss." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.797, + 0.591, + 0.811 + ], + "angle": 0, + "content": "K.9 COMPUTING THE MOMENTS FOR ONE \"GIANT STEP\"" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.823, + 0.827, + 0.925 + ], + "angle": 0, + "content": "In this subsection, we compute the first and second moments for the change of manifold projection every \\(R_{\\mathrm{grp}}\\) rounds of Local SGD. Since the randomness in training might drive the iterate out of the working zone, making the dynamic intractable, we analyze a more well-behaved sequence \\(\\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}: (s,t) \\preceq (R_{\\mathrm{tot}},0), k \\in [K]\\}\\) which is equal to \\(\\{\\pmb{\\theta}_{k,t}^{(s)}\\}\\) with high probability. Specifically, \\(\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\) equal to \\(\\pmb{\\theta}_{k,t}^{(s)}\\) if the global step \\((s,t)\\) is \\(\\eta^{100}\\)-good and is set as a point \\(\\phi_{\\mathrm{null}} \\in \\Gamma\\) otherwise. The formal definition is as follows." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "53" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.402, + 0.086, + 0.583, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.413, + 0.202, + 0.584, + 0.217 + ], + "angle": 0, + "content": "Figure 9: A plot of \\(\\psi (x)\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.243, + 0.828, + 0.295 + ], + "angle": 0, + "content": "Definition K.5 (Well-behaved sequence). Denote by \\(\\mathcal{E}_t^{(s)}\\) the event \\(\\{\\text{global step } (s, t) \\text{ is } \\eta^{100} \\text{-good}\\}\\). Define a well-behaved sequence \\(\\hat{\\pmb{\\theta}}_{k,t}^{(s)} := \\pmb{\\theta}_{k,t}^{(s)}\\mathbb{1}_{\\mathcal{E}_t^{(s)}} + \\phi_{\\mathrm{null}}\\mathbb{1}_{\\bar{\\mathcal{E}}_t^{(s)}}\\), which satisfies the following update rule:" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.297, + 0.826, + 0.375 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {\\theta}} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} \\mathbb {1} _ {\\mathcal {E} _ {t + 1} ^ {(s)}} + \\phi_ {\\text {n u l l}} \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} (68) \\\\ = \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\underbrace {- \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} (\\hat {\\boldsymbol {\\theta}} _ {k , t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k , t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k , t} ^ {(s)}) + \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} \\phi_ {\\mathrm {n u l l}}} _ {:= \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)}}. (69) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.426 + ], + "angle": 0, + "content": "By Theorem K.1, with probability at least \\( 1 - \\eta^{100} \\), \\( \\hat{\\pmb{\\theta}}_{k,t}^{(s)} = \\pmb{\\theta}_{k,t}^{(s)}, \\forall k \\in [K], (s,t) \\preceq (R_{\\mathrm{tot}},0) \\). Similar to \\( \\{\\pmb{\\theta}_{k,t}^{(s)}\\} \\), we define the following variables with respect to \\( \\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.429, + 0.547, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {\\theta}} _ {\\mathrm {a v g}} ^ {(s + 1)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {\\theta}} _ {k, H} ^ {(s)}, \\quad \\hat {\\boldsymbol {\\phi}} ^ {(s)} := \\Phi (\\hat {\\boldsymbol {\\theta}} _ {\\mathrm {a v g}} ^ {(s)}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.257, + 0.47, + 0.756, + 0.508 + ], + "angle": 0, + "content": "\\[\n\\hat {\\pmb {x}} _ {k, t} ^ {(s)} := \\hat {\\pmb {\\theta}} _ {k, t} ^ {(s)} - \\hat {\\pmb {\\phi}} ^ {(s)}, \\quad \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} := \\hat {\\pmb {\\theta}} _ {\\mathrm {a v g}} ^ {(s)} - \\hat {\\pmb {\\phi}} ^ {(s)}, \\quad \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\pmb {x}} _ {k, H} ^ {(s)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.513, + 0.825, + 0.548 + ], + "angle": 0, + "content": "Notice that \\(\\hat{\\pmb{x}}_{k,0}^{(s)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)}\\) for all \\(k\\in [K]\\). Finally, we introduce the following mapping \\(\\Psi (\\pmb {\\theta}):\\) \\(\\Gamma \\to \\mathbb{R}^{d\\times d}\\), which is closely related to \\(\\widehat{\\pmb{\\Psi}}\\) defined in Theorem 3.2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.641, + 0.566 + ], + "angle": 0, + "content": "Definition K.6. For \\(\\pmb{\\theta} \\in \\Gamma\\), we define the mapping \\(\\Psi(\\pmb{\\theta}) : \\Gamma \\to \\mathbb{R}^{d \\times d}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.569, + 0.676, + 0.603 + ], + "angle": 0, + "content": "\\[\n\\Psi (\\boldsymbol {\\theta}) = \\sum_ {i, j \\in [ d ]} \\psi \\left(\\eta H \\left(\\lambda_ {i} + \\lambda_ {j}\\right)\\right) \\left\\langle \\boldsymbol {\\Sigma} (\\boldsymbol {\\theta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.607, + 0.825, + 0.641 + ], + "angle": 0, + "content": "where \\(\\lambda_{i},\\pmb{v}_{i}\\) are the \\(i\\) -th eigenvalue and eigenvector of \\(\\nabla^2\\mathcal{L}(\\pmb {\\theta})\\) and \\(\\pmb {v}_i\\) 's form an orthonormal basis of \\(\\mathbb{R}^d\\) . Additionally, \\(\\psi (x)\\coloneqq \\frac{e^{-x} - 1 + x}{x}\\) and \\(\\psi (0) = 0\\) ; see Figure 9 for a plot." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.825, + 0.687 + ], + "angle": 0, + "content": "Remark K.1. Intuitively, \\(\\Psi(\\pmb{\\theta})\\) rescales the entries of \\(\\pmb{\\Sigma}(\\pmb{\\theta})\\) in the eigenbasis of \\(\\nabla^2\\mathcal{L}(\\pmb{\\theta})\\). When \\(\\nabla^2\\mathcal{L}(\\pmb{\\theta}) = \\mathrm{diag}(\\lambda_1,\\dots ,\\lambda_d)\\in \\mathbb{R}^{d\\times d}\\), where \\(\\lambda_{i} = 0\\) for all \\(m < i\\leq d\\), \\(\\Psi (\\pmb{\\Sigma}_0)_{i,j} = \\psi (\\eta H(\\lambda_i + \\lambda_j))\\Sigma_{0,i,j}\\). Note that \\(\\Psi (\\pmb{\\theta})\\) can also be written as" + }, + { + "type": "equation", + "bbox": [ + 0.316, + 0.689, + 0.68, + 0.707 + ], + "angle": 0, + "content": "\\[\n\\operatorname {v e c} (\\boldsymbol {\\Psi} (\\boldsymbol {\\theta})) = \\psi (\\eta H (\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) \\oplus \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}))) \\operatorname {v e c} (\\boldsymbol {\\Sigma} (\\boldsymbol {\\theta})),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.709, + 0.825, + 0.739 + ], + "angle": 0, + "content": "where \\(\\oplus\\) denotes the Kronecker sum \\(A\\oplus B = A\\otimes I_d + I_d\\otimes B\\), \\(\\operatorname{vec}(\\cdot)\\) is the vectorization operator of a matrix and \\(\\psi (\\cdot)\\) is interpreted as a matrix function." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.748, + 0.696, + 0.765 + ], + "angle": 0, + "content": "Now we are ready to present the result about the moments of \\(\\hat{\\phi}^{(s + R_{\\mathrm{grp}})} - \\hat{\\phi}^{(s)}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.767, + 0.827, + 0.799 + ], + "angle": 0, + "content": "Theorem K.2. For \\( s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}} \\) and \\( 0 < \\beta < 0.5 \\), the first and second moments of \\( \\hat{\\phi}^{(s + R_{\\mathrm{grp}})} - \\hat{\\phi}^{(s)} \\) are as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.227, + 0.801, + 0.825, + 0.853 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)} \\mid \\hat {\\phi} ^ {(s)}, \\mathcal {E} _ {0} ^ {(s)} \\right] = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(s)}\\right) \\left[ \\boldsymbol {\\Sigma} \\left(\\hat {\\phi} ^ {(s)}\\right) + (K - 1) \\Psi \\left(\\hat {\\phi} ^ {(s)}\\right) \\right] \\tag {70} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta), \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.859, + 0.825, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left(\\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)}\\right) \\left(\\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)}\\right) ^ {\\top} \\mid \\hat {\\phi} ^ {(s)}, \\mathcal {E} _ {0} ^ {(s)} \\right] = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {\\|} \\left(\\hat {\\phi} ^ {(s)}\\right) + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 2 \\beta}\\right) + \\tilde {\\mathcal {O}} (\\eta), \\tag {71}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.909, + 0.566, + 0.926 + ], + "angle": 0, + "content": "where \\(\\tilde{\\mathcal{O}} (\\cdot)\\) hides log terms and constants independent of \\(\\eta\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "54" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.14 + ], + "angle": 0, + "content": "Remark K.2. By Theorem K.1 and the definition of \\(\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\), (70) and (71) still hold when we replace \\(\\hat{\\phi}^{(s)}\\) with \\(\\phi^{(s)}\\) and replace \\(\\hat{\\phi}^{(s + R_{\\mathrm{grp}})}\\) with \\(\\phi^{(s + R_{\\mathrm{grp}})}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.149, + 0.825, + 0.199 + ], + "angle": 0, + "content": "We shall have Theorem K.2 if we prove the following theorem, which directly gives Theorem K.2 with a simple shift of index. For brevity, denote by \\(\\Delta \\hat{\\phi}^{(s)}\\coloneqq \\hat{\\phi}^{(s)} - \\hat{\\phi}^{(0)}\\) \\(\\Sigma_0\\coloneqq \\Sigma (\\hat{\\phi}^{(0)})\\) \\(\\Sigma_{0,\\parallel}\\coloneqq \\Sigma_{\\parallel}(\\hat{\\phi}^{(0)})\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.204, + 0.826, + 0.245 + ], + "angle": 0, + "content": "Theorem K.3. Given \\(\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})\\), for \\(0 < \\beta < 0.5\\), the first and second moments of \\(\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}\\) are as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.269, + 0.251, + 0.804, + 0.282 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} ] = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {\\Sigma} _ {0} + (K - 1) \\pmb {\\Psi} (\\hat {\\phi} ^ {(0)}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.284, + 0.611, + 0.314 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}}) ^ {\\top}} ] = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 1. 5 \\beta}) + \\tilde {\\mathcal {O}} (\\eta).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.327, + 0.825, + 0.438 + ], + "angle": 0, + "content": "We will prove Theorem K.3 in the remainder of this subsection. For convenience, we introduce more notations that will be used throughout the proof. Let \\( \\pmb{H}_0 \\coloneqq \\nabla^2\\mathcal{L}(\\hat{\\phi}^{(0)}) \\). By Assumption 3.2, \\( \\mathrm{rank}(H_0) = m \\). WLOG, assume \\( H_0 = \\mathrm{diag}(\\lambda_1,\\dots ,\\lambda_d)\\in \\mathbb{R}^{d\\times d} \\), where \\( \\lambda_{i} = 0 \\) for all \\( m < i\\leq d \\) and \\( \\lambda_{1}\\geq \\lambda_{2}\\dots \\geq \\lambda_{m} \\). By Lemma K.2, \\( \\partial \\Phi (\\hat{\\phi}^{(0)}) \\) is the projection matrix onto the tangent space \\( T_{\\hat{\\phi}^{(0)}}(\\Gamma) \\) (i.e. the null space of \\( \\nabla^2\\mathcal{L}(\\hat{\\phi}^{(0)}) \\)) and therefore, \\( \\partial \\Phi (\\hat{\\phi}^{(0)}) = \\left[ \\begin{array}{cc}0 & 0\\\\ 0 & I_{d - m} \\end{array} \\right] \\). Let \\( P_{\\parallel}\\coloneqq \\partial \\Phi (\\hat{\\phi}^{(0)}) \\) and \\( P_{\\perp}\\coloneqq I_d - P_{\\parallel} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.444, + 0.825, + 0.498 + ], + "angle": 0, + "content": "Let \\(\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)\\top}], \\hat{\\pmb{q}}_t^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}]\\) and \\(\\hat{\\pmb{B}}_t^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}]\\). The latter two notations are independent of \\(k\\) since \\(\\hat{\\pmb{\\theta}}_{1,t}^{(s)}, \\dots, \\hat{\\pmb{\\theta}}_{K,t}^{(s)}\\) are identically distributed. The following lemma computes the first and second moments of the change of manifold projection every round." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.503, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Lemma K.27. Given \\(\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\pmb{\\phi}}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})\\), for \\(0\\leq s < R_{\\mathrm{grp}}\\), the first and second moments of \\(\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)}\\) are as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.549, + 0.825, + 0.577 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} \\right] = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {72}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.579, + 0.825, + 0.599 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}\\right) \\left(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}\\right) ^ {\\top} \\right] = P _ {\\|} \\hat {A} _ {\\text {a v g}} ^ {(s)} P _ {\\|} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right). \\tag {73}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.612, + 0.424, + 0.627 + ], + "angle": 0, + "content": "Proof. By Taylor expansion, we have" + }, + { + "type": "equation", + "bbox": [ + 0.207, + 0.634, + 0.791, + 0.797 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\phi} ^ {(s + 1)} = \\Phi (\\hat {\\phi} ^ {(s)} + \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)}) \\\\ = \\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(s)}) [ \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] + \\mathcal {O} (\\| \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}) \\\\ = \\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(0)} + \\Delta \\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)} + \\Delta \\hat {\\phi} ^ {(s)}) [ \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] \\\\ + \\mathcal {O} (\\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}) \\\\ = \\hat {\\phi} ^ {(s)} + P _ {\\parallel} \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] \\\\ + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} + \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} + \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.802, + 0.408, + 0.817 + ], + "angle": 0, + "content": "Rearrange the terms and we obtain:" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.825, + 0.824, + 0.875 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s) \\top} ] \\tag {74} \\\\ + \\mathcal {O} \\left(\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} + \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} + \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}\\right). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.88, + 0.245, + 0.894 + ], + "angle": 0, + "content": "Moreover," + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.901, + 0.825, + 0.923 + ], + "angle": 0, + "content": "\\[\n(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ^ {\\top} = P _ {\\|} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} P _ {\\|} + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}). \\tag {75}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "55" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.825, + 0.143 + ], + "angle": 0, + "content": "Noticing that \\(\\hat{\\pmb{x}}_{k,H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}\\) are identically distributed for all \\(k\\in [K]\\), we have \\(\\mathbb{E}[\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}] = \\frac{1}{K}\\sum_{k\\in [K]}\\mathbb{E}[\\hat{\\pmb{x}}_{k,H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}] = \\hat{\\pmb{B}}_H^{(s)}\\). Then taking expectation of both sides of (74) gives" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.155, + 0.813, + 0.207 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] \\\\ + \\mathcal {O} (\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ] + \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ] + \\mathbb {E} [ \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} ]). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.215, + 0.525, + 0.232 + ], + "angle": 0, + "content": "Again taking expectation of both sides of (75) yields" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.241, + 0.78, + 0.264 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\Delta \\hat {\\phi} ^ {(s) \\top}) ] = P _ {\\parallel} \\hat {A} _ {\\mathrm {a v g}} ^ {(s)} P _ {\\parallel} + \\mathcal {O} (\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ]).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.817, + 0.29 + ], + "angle": 0, + "content": "By Lemmas K.22 and K.23, the following holds simultaneously with probability at least \\(1 - \\eta^{100}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.3, + 0.675, + 0.322 + ], + "angle": 0, + "content": "\\[\n\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - 0. 5 \\beta}), \\quad \\| \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.825, + 0.375 + ], + "angle": 0, + "content": "Furthermore, since for all \\( k \\in [K] \\) and \\( (s,t) \\preceq (R_{\\mathrm{tot}},0) \\), \\( \\hat{\\pmb{\\theta}}_{k,t}^{(s)} \\) stays in \\( \\Gamma^{\\epsilon_2} \\) which is a bounded set, \\( \\| \\Delta \\hat{\\phi}^{(s)}\\| _2 \\) and \\( \\| \\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\| _2 \\) are also bounded. Therefore, we have" + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.386, + 0.825, + 0.406 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {76}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.409, + 0.825, + 0.43 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\tag {77}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.429, + 0.433, + 0.825, + 0.454 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5}), \\tag {78}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.463, + 0.354, + 0.479 + ], + "angle": 0, + "content": "which concludes the proof." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.506, + 0.825, + 0.543 + ], + "angle": 0, + "content": "We compute \\(\\hat{A}_{\\mathrm{avg}}^{(s)}, \\hat{q}_t^{(s)}\\) and \\(\\hat{B}_t^{(s)}\\) by solving a set of recursions, which is formulated in the following lemma. Additionally, define \\(\\hat{A}_t^{(s)} \\coloneqq \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\hat{\\pmb{x}}_{k,t}^{(s)\\top}]\\) and \\(\\hat{M}_t^{(s)} \\coloneqq \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\hat{\\pmb{x}}_{k,l}^{(s)}], (k \\neq l)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.827, + 0.587 + ], + "angle": 0, + "content": "Lemma K.28. Given \\(\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})\\), for \\(0\\leq s < R_{\\mathrm{grp}}\\) and \\(0\\leq t < H\\), we have the following recursions." + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.598, + 0.825, + 0.623 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {q}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} \\left(\\phi^ {(0)}\\right) \\left[ \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} \\right] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} \\left(\\phi^ {(0)}\\right) \\left[ \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\right] + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right), \\tag {79}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.626, + 0.825, + 0.658 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {A}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}), \\tag {80}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.661, + 0.825, + 0.68 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {M}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {81}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.684, + 0.825, + 0.703 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {B}} _ {t + 1} ^ {(s)} = \\left(\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}\\right) \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right). \\tag {82}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.713, + 0.245, + 0.726 + ], + "angle": 0, + "content": "Moreover," + }, + { + "type": "equation", + "bbox": [ + 0.269, + 0.738, + 0.825, + 0.767 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} = \\frac {1}{K} \\hat {\\boldsymbol {A}} _ {H} ^ {(s)} + (1 - \\frac {1}{K}) \\hat {\\boldsymbol {M}} _ {H} ^ {(s)}, \\tag {83}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.25, + 0.769, + 0.825, + 0.789 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {M}} _ {0} ^ {(s + 1)} = \\hat {\\boldsymbol {A}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\perp} + \\mathcal {O} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right), \\tag {84}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.259, + 0.791, + 0.825, + 0.819 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {q}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\phi^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\phi^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {85}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.822, + 0.825, + 0.842 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {B}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} + \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\text {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {86}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.869, + 0.674, + 0.89 + ], + "angle": 0, + "content": "Proof. We first derive the recursion for \\(\\hat{\\pmb{q}}_t^{(s)}\\). Recall the update rule for \\(\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.35, + 0.9, + 0.646, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {\\theta}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)}.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "56" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.103, + 0.43, + 0.12 + ], + "angle": 0, + "content": "Subtracting \\(\\hat{\\phi}^{(s)}\\) from both sides gives" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.129, + 0.823, + 0.316 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {x}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\left(\\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {\\phi}} ^ {(s)}) \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} + \\frac {1}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\boldsymbol {\\phi}} ^ {(s)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s) \\top} ] + \\mathcal {O} (\\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3})\\right) \\\\ - \\eta z _ {k, t} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\left(\\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {\\phi}} ^ {(0)}\\right) + \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\boldsymbol {\\phi}} ^ {(0)}\\right) \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\mathcal {O} \\left(\\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| ^ {2}\\right)\\right) \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\\\ - \\frac {\\eta}{2} \\left(\\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) + \\mathcal {O} \\left(\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2}\\right)\\right) \\left[ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k t} ^ {(s) \\top} \\right] - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\mathcal {O} \\left(\\eta \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}\\right) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s) \\top} ] - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ + \\mathcal {O} (\\eta \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\eta \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} + \\eta \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}), \\tag {87} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.325, + 0.825, + 0.354 + ], + "angle": 0, + "content": "where the second and third equality perform Taylor expansion. Taking expectation on both sides gives" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.364, + 0.805, + 0.416 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\pmb {q}} _ {t + 1} ^ {(s)} = (\\pmb {I} - \\eta \\pmb {H} _ {0}) \\hat {\\pmb {q}} _ {t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {q}} _ {t} ^ {(s)} ] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {t} ^ {(s)} ] \\\\ + \\mathcal {O} \\left(\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} ] + \\eta \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} ] + \\eta \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ] + \\mathbb {E} [ \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]\\right). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.428, + 0.825, + 0.486 + ], + "angle": 0, + "content": "By Theorem K.1, with probability at least \\( 1 - \\eta^{100} \\), \\( \\hat{e}_{k,t}^{(s)} = \\mathbf{0}, \\forall k \\in [K], (s,t) \\preceq (R_{\\mathrm{grp}},0) \\). Also notice that both \\( \\hat{\\theta}_{k,t}^{(s)} \\) and \\( \\phi_{\\mathrm{null}} \\) belong to the bounded set \\( \\Gamma^{\\epsilon_2} \\). Therefore, \\( \\| \\hat{e}_{k,t}^{(s)} \\|_2 \\) is bounded and we have \\( \\mathbb{E}[\\| \\hat{e}_{k,t}^{(s)} \\|_2] = \\mathcal{O}(\\eta^{100}) \\). Combining this with (76) to (78) yields (79)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.825, + 0.526 + ], + "angle": 0, + "content": "Secondly, we derive the recursion for \\(\\hat{B}_t^{(s)}\\). Multiplying both sides of (87) by \\(\\Delta \\hat{\\phi}^{(s)\\top}\\) and taking expectation, we have" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.535, + 0.784, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {B}} _ {t + 1} ^ {(s)} = (\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}) \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} + \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.565, + 0.519, + 0.58 + ], + "angle": 0, + "content": "Still by Theorem K.1 and (76) to (78), we have (82)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.588, + 0.566, + 0.606 + ], + "angle": 0, + "content": "Thirdly, we derive the recursion for \\(\\hat{A}_t^{(s)}\\). By (87), we have" + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.616, + 0.773, + 0.705 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {A}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\mathcal {O} (\\eta^ {2} \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ]) \\\\ + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]) \\\\ = (\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}) \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.713, + 0.327, + 0.728 + ], + "angle": 0, + "content": "which establishes (80)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.825, + 0.768 + ], + "angle": 0, + "content": "Fourthly, we derive the recursion for \\(\\hat{M}_t^{(s)}\\). Multiplying both sides of (87) by \\(\\hat{\\pmb{x}}_{l,t + 1}^{(s)}\\) and taking expectation, \\(l\\neq k\\), we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.778, + 0.76, + 0.822 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {M}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} ]) \\\\ + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.832, + 0.572, + 0.846 + ], + "angle": 0, + "content": "By a similar argument to the proof of Lemma K.27, we have" + }, + { + "type": "equation", + "bbox": [ + 0.413, + 0.858, + 0.616, + 0.877 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, (s)} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} ] = \\bar {\\mathcal {O}} (\\eta^ {1. 5}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.881, + 0.65, + 0.9 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\right] = \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.295, + 0.925 + ], + "angle": 0, + "content": "which yields (81)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "57" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.58, + 0.121 + ], + "angle": 0, + "content": "Now we proceed to prove (83) to (86). By definition of \\(\\hat{A}_{\\mathrm{avg}}^{(s)}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.282, + 0.135, + 0.715, + 0.246 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} = \\frac {1}{K ^ {2}} \\mathbb {E} [ (\\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)}) (\\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)}) ^ {\\top} ] \\\\ = \\frac {1}{K ^ {2}} \\sum_ {k \\in [ K ]} \\mathbb {E} \\left[ \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s) \\top} \\right] + \\frac {1}{K ^ {2}} \\sum_ {k, l \\in [ K ], k \\neq l} \\mathbb {E} \\left[ \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {l, H} ^ {(s) \\top} \\right] \\\\ = \\frac {1}{K} \\hat {\\boldsymbol {A}} _ {H} ^ {(s)} + (1 - \\frac {1}{K}) \\hat {\\boldsymbol {M}} _ {H} ^ {(s)}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.26, + 0.644, + 0.282 + ], + "angle": 0, + "content": "which demonstrates (83). Then we derive (84). By definition of \\(\\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s + 1)}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.295, + 0.824, + 0.397 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} = \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - \\Phi \\big (\\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\\\ = \\hat {\\phi} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} - \\left(\\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2})\\right) \\\\ = \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - \\left(\\pmb {P} _ {\\parallel} + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2})\\right) \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}) \\\\ = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} + \\mathcal {O} \\left(\\left\\| \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\right\\| _ {2} ^ {2} + \\left\\| \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\right\\| _ {2} \\left\\| \\Delta \\hat {\\phi} ^ {(s)} \\right\\| _ {2}\\right). \\tag {88} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.409, + 0.223, + 0.423 + ], + "angle": 0, + "content": "Hence," + }, + { + "type": "equation", + "bbox": [ + 0.27, + 0.437, + 0.726, + 0.483 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {M}} _ {0} ^ {(s + 1)} = \\hat {\\boldsymbol {A}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s) \\top} ] \\\\ = \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} + \\mathcal {O} (\\mathbb {E} [ \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ]). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.496, + 0.454, + 0.512 + ], + "angle": 0, + "content": "By (76) and (78), we obtain (84). By (74)," + }, + { + "type": "equation", + "bbox": [ + 0.266, + 0.526, + 0.825, + 0.548 + ], + "angle": 0, + "content": "\\[\n\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} = P _ {\\|} \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} + \\mathcal {O} \\left(\\left\\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\right\\| _ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}\\right). \\tag {89}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.562, + 0.381, + 0.578 + ], + "angle": 0, + "content": "Combining (88) and (89) gives" + }, + { + "type": "equation", + "bbox": [ + 0.3, + 0.592, + 0.697, + 0.614 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} (\\hat {\\pmb {\\phi}} ^ {(s + 1)} - \\hat {\\pmb {\\phi}} ^ {(s)}) ^ {\\top} ] = \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.626, + 0.246, + 0.641 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.261, + 0.655, + 0.735, + 0.7 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\boldsymbol {B}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s + 1) \\top} ] = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} (\\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) ^ {\\top} ] \\\\ = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} + \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.712, + 0.477, + 0.728 + ], + "angle": 0, + "content": "Finally, we apply Lemma K.27 to derive (85)." + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.742, + 0.763, + 0.825 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {\\pmb {q}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} ] = \\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - (\\hat {\\pmb {\\phi}} ^ {(s + 1)} - \\hat {\\pmb {\\phi}} ^ {(s)}) ] \\\\ = \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\pmb {P} _ {\\parallel} \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\pmb {P} _ {\\perp} \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.839, + 0.355, + 0.854 + ], + "angle": 0, + "content": "which concludes the proof." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.839, + 0.826, + 0.851 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.895, + 0.825, + 0.924 + ], + "angle": 0, + "content": "With the assumption that the hessian at \\(\\hat{\\phi}^{(0)}\\) is diagonal, we have the following corollary that formulates the recursions for each matrix element." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "58" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.14 + ], + "angle": 0, + "content": "Corollary K.2. Given \\(\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})\\), for \\(0\\leq s < R_{\\mathrm{grp}}\\) and \\(0\\leq t < H\\), we have the following elementwise recursions." + }, + { + "type": "equation", + "bbox": [ + 0.253, + 0.153, + 0.826, + 0.185 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\hat {A} _ {t, i, j} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {90}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.25, + 0.187, + 0.825, + 0.208 + ], + "angle": 0, + "content": "\\[\n\\hat {M} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\hat {M} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {91}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.254, + 0.211, + 0.825, + 0.232 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\lambda_ {i} \\eta\\right) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right), \\tag {92}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.234, + 0.825, + 0.262 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} = \\frac {1}{K} \\left(\\hat {A} _ {H, i, j} ^ {(s)} - \\hat {M} _ {H, i, j} ^ {(s)}\\right) + \\hat {M} _ {H, i, j} ^ {(s)}, \\tag {93}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.264, + 0.825, + 0.304 + ], + "angle": 0, + "content": "\\[\n\\hat {M} _ {0, i, j} ^ {(s + 1)} = \\hat {A} _ {0, i, j} ^ {(s + 1)} = \\left\\{ \\begin{array}{l l} \\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & 1 \\leq i \\leq m, 1 \\leq j \\leq m, \\\\ \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {94}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.307, + 0.825, + 0.363 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {0, i, j} ^ {(s + 1)} = \\left\\{ \\begin{array}{l l} \\hat {B} _ {H, i, j} ^ {(s)} + \\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & 1 \\leq i \\leq m, m < j \\leq d, \\\\ \\hat {B} _ {H, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & 1 \\leq i \\leq m, 1 \\leq j \\leq m, \\\\ \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & m < i \\leq d. \\end{array} \\right. \\tag {95}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.383, + 0.825, + 0.445 + ], + "angle": 0, + "content": "Having formulated the recursions, we are ready to solve out the explicit expressions. We will split each matrix into four parts and them one by on. Specifically, a matrix \\(M\\) can be split into \\(P_{\\parallel}MP_{\\parallel}\\) in the tangent space of \\(\\Gamma\\) at \\(\\hat{\\phi}^{(0)}, P_{\\perp}MP_{\\perp}\\) in the normal space, along with \\(P_{\\parallel}MP_{\\perp}\\) and \\(P_{\\perp}MP_{\\parallel}\\) across both spaces." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.45, + 0.589, + 0.471 + ], + "angle": 0, + "content": "We first compute the elements of \\(P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\perp}\\) and \\(P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}\\)." + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.476, + 0.825, + 0.512 + ], + "angle": 0, + "content": "Lemma K.29 (General formula for \\(P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\perp}\\) and \\(P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}\\)). Let \\(R_0 \\coloneqq \\lceil \\frac{10}{\\lambda_m\\alpha}\\log \\frac{1}{\\eta}\\rceil\\). Then for \\(1\\leq i\\leq m,1\\leq j\\leq m\\) and \\(R_0\\leq s < R_{\\mathrm{grp}}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.522, + 0.547, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.557, + 0.8, + 0.593 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.604, + 0.502, + 0.626 + ], + "angle": 0, + "content": "For \\(s < R_0\\), \\(\\hat{A}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)\\) and \\(\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.658, + 0.575, + 0.675 + ], + "angle": 0, + "content": "Proof. For \\(1 \\leq i \\leq m, 1 \\leq j \\leq m, \\lambda_i > 0, \\lambda_j > 0\\). By (90)," + }, + { + "type": "equation", + "bbox": [ + 0.226, + 0.688, + 0.772, + 0.809 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} \\\\ + \\tilde {\\mathcal {O}} (\\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\eta^ {2. 5 - 0. 5 \\beta}) \\\\ = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.821, + 0.825, + 0.846 + ], + "angle": 0, + "content": "where the second inequality uses \\(\\sum_{\\tau = 0}^{t - 1}(1 - (\\lambda_i + \\lambda_j)\\eta)^\\tau = \\frac{1 - (1 - (\\lambda_i + \\lambda_j)\\eta)^t}{(\\lambda_i + \\lambda_j)\\eta}\\leq \\frac{1}{(\\lambda_i + \\lambda_j)\\eta}\\). By (91)," + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.858, + 0.737, + 0.923 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {M} _ {t, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {M} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\eta^ {2. 5 - 0. 5 \\beta}\\right) \\\\ = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "59" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.101, + 0.63, + 0.121 + ], + "angle": 0, + "content": "where the second equality uses \\( M_0^{(s + 1)} = A_0^{(s + 1)} \\). By (93) and (94)," + }, + { + "type": "equation", + "bbox": [ + 0.21, + 0.124, + 0.787, + 0.159 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.16, + 0.786, + 0.218 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {A} _ {0, i, j} ^ {(s + 1)} = \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}) \\\\ = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.221, + 0.279, + 0.234 + ], + "angle": 0, + "content": "Then we obtain" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.238, + 0.806, + 0.323 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {A} _ {0, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H} \\hat {A} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} \\sum_ {r = 0} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta} \\sum_ {r = R _ {0}} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.327, + 0.421, + 0.343 + ], + "angle": 0, + "content": "Notice that \\(|1 - (\\lambda_i + \\lambda_j)\\eta | < 1\\) and" + }, + { + "type": "equation", + "bbox": [ + 0.278, + 0.346, + 0.824, + 0.365 + ], + "angle": 0, + "content": "\\[\n\\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) ^ {H} \\leq \\exp \\left(- \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta H\\right) = \\exp \\left(- \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\alpha\\right). \\tag {96}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.368, + 0.245, + 0.381 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.386, + 0.765, + 0.426 + ], + "angle": 0, + "content": "\\[\n\\sum_ {r = 0} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H} = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H}}{1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}} \\leq \\frac {1}{1 - \\exp (- (\\lambda_ {i} + \\lambda_ {j}) \\alpha)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.43, + 0.269, + 0.442 + ], + "angle": 0, + "content": "Then we have" + }, + { + "type": "equation", + "bbox": [ + 0.21, + 0.445, + 0.787, + 0.48 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {0, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H} \\hat {A} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.485, + 0.825, + 0.524 + ], + "angle": 0, + "content": "Finally, we demonstrate that for \\(s \\geq R_0\\), \\(\\hat{A}_{0,i,j}^{(s)}\\) and \\(\\hat{A}_{\\mathrm{avg},i,j}^{(s)}\\) is approximately equal to \\(\\frac{\\eta}{(\\lambda_i + \\lambda_j)KB_{\\mathrm{loc}}}\\Sigma_{0,i,j}\\). By (96), when \\(s \\geq R_0\\), \\((1 - (\\lambda_i + \\lambda_j)\\eta)^{sH} = \\mathcal{O}(\\eta^{10})\\), which gives" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.526, + 0.546, + 0.558 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.56, + 0.799, + 0.595 + ], + "angle": 0, + "content": "\\[\nA _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.597, + 0.825, + 0.621 + ], + "angle": 0, + "content": "For \\(s < R_0\\), since \\(\\hat{A}_0^{(0)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)}\\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)\\top} = \\tilde{\\mathcal{O}} (\\eta)\\), we have \\(\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)\\) and \\(\\hat{A}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.635, + 0.514, + 0.655 + ], + "angle": 0, + "content": "Secondly, we compute \\(P_{\\parallel}\\hat{A}_t^{(s)}P_\\perp\\) and \\(P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.794, + 0.677 + ], + "angle": 0, + "content": "Lemma K.30 (General formula for \\(P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\parallel}\\) and \\(P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel})\\) . For \\(1\\leq i\\leq m,m < j\\leq d,\\)" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.68, + 0.664, + 0.713 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {t, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.328, + 0.714, + 0.67, + 0.748 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.761, + 0.718, + 0.777 + ], + "angle": 0, + "content": "Proof. Note that for \\(1 \\leq i \\leq m, m < j \\leq d\\) and \\(\\lambda_i > 0, \\lambda_j = 0\\). By (90) and (94)," + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.78, + 0.72, + 0.847 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}) \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.851, + 0.492, + 0.871 + ], + "angle": 0, + "content": "By (91) and (94), \\(\\hat{M}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})\\). Then," + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.874, + 0.671, + 0.908 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "60" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.141 + ], + "angle": 0, + "content": "Similar to Lemma K.30, we have the following lemma for the general formula of \\( P_{\\parallel} \\hat{A}_t^{(s)} P_{\\perp} \\) and \\( P_{\\parallel} \\hat{A}_{\\mathrm{avg}}^{(s)} P_{\\perp} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.144, + 0.82, + 0.166 + ], + "angle": 0, + "content": "Lemma K.31 (General formula for \\(P_{\\parallel}\\hat{A}_t^{(s)}P_\\perp\\) and \\(P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}\\)). For \\(m < i \\leq d\\) and \\(1 \\leq j \\leq m\\)," + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.171, + 0.667, + 0.205 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {t, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {j} \\eta) ^ {t}}{\\lambda_ {j} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.207, + 0.672, + 0.243 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {j} \\eta) ^ {H}}{\\lambda_ {j} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.257, + 0.632, + 0.278 + ], + "angle": 0, + "content": "Finally, we derive the general formula for \\(P_{\\parallel}\\hat{A}_t^{(s)}P_{\\parallel}\\) and \\(P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.281, + 0.815, + 0.302 + ], + "angle": 0, + "content": "Lemma K.32 (General formula for \\(P_{\\parallel}\\hat{A}_t^{(s)}P_{\\parallel}\\) and \\(P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel}\\)). For \\(m < i \\leq d\\) and \\(m < j \\leq d\\)," + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.308, + 0.617, + 0.34 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {H \\eta^ {2}}{K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.355, + 0.342, + 0.657, + 0.375 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {t, i, j} ^ {(s)} = \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {t \\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.388, + 0.752, + 0.406 + ], + "angle": 0, + "content": "Proof. Note that for \\(m < i \\leq d\\), \\(m < j \\leq d\\) and \\(\\lambda_i = \\lambda_j = 0\\). (90) is then simplified as" + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.412, + 0.659, + 0.445 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {t + 1, i, j} ^ {(s)} = \\hat {A} _ {t, i, j} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.45, + 0.248, + 0.465 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.346, + 0.471, + 0.825, + 0.505 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {t, i, j} ^ {(s)} = \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {t \\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right). \\tag {97}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.512, + 0.825, + 0.543 + ], + "angle": 0, + "content": "According to (91), \\(\\hat{M}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})\\) for \\(m < i\\leq d\\) and \\(m < j\\leq d\\). Combining (91), (94) and (97) yields" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.55, + 0.637, + 0.584 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {H \\eta^ {2}}{K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.59, + 0.825, + 0.602 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.617, + 0.568, + 0.636 + ], + "angle": 0, + "content": "Now, we move on to compute the general formula for \\(\\hat{B}_t^{(s)}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.825, + 0.677 + ], + "angle": 0, + "content": "Lemma K.33 (The general formula for \\( P_{\\perp} \\hat{B}_t^{(s)} P_{\\parallel} \\)). Note that for \\( 1 \\leq i \\leq m \\) and \\( m < j \\leq d \\), when \\( R_0 := \\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\rceil \\leq s < R_{\\mathrm{grp}} \\)," + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.684, + 0.638, + 0.717 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t, i, j} ^ {(s)} = \\frac {(1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.723, + 0.359, + 0.745 + ], + "angle": 0, + "content": "For \\(s < R_0\\), \\(\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.502, + 0.775 + ], + "angle": 0, + "content": "Proof. Note that for \\(1 \\leq i \\leq m\\), \\(\\lambda_i > 0\\). By (92)," + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.78, + 0.637, + 0.803 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t + 1, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.807, + 0.224, + 0.821 + ], + "angle": 0, + "content": "Hence," + }, + { + "type": "equation", + "bbox": [ + 0.366, + 0.828, + 0.631, + 0.851 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.855, + 0.299, + 0.87 + ], + "angle": 0, + "content": "According to (95)," + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.876, + 0.676, + 0.924 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s + 1)} = \\hat {B} _ {H, i, j} ^ {(s)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {H} \\hat {B} _ {0, i, j} ^ {(s)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "61" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.27, + 0.117 + ], + "angle": 0, + "content": "Then we have" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.122, + 0.78, + 0.236 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} \\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} + \\tilde {\\mathcal {O}} (\\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} \\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {s H}}{1 - (1 - \\lambda_ {i} \\eta) ^ {H}} \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {s H}}{1 - (1 - \\lambda_ {i} \\eta) ^ {H}} \\hat {A} _ {\\mathrm {a v g},, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.241, + 0.827, + 0.283 + ], + "angle": 0, + "content": "where the second equality uses (96) and the last inequality uses \\(\\hat{B}_0^{(0)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(0)}\\Delta \\hat{\\phi}^{(0)} = \\mathbf{0}\\). For \\(s\\geq R_0\\), \\(\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\frac{1 - (1 - \\lambda_i\\eta)^H}{\\lambda_iKB_{\\mathrm{loc}}} \\eta \\Sigma_{0,i,j} + \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})\\), which gives" + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.287, + 0.629, + 0.316 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {0, i, j} ^ {(s)} = \\frac {\\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.32, + 0.248, + 0.333 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.338, + 0.638, + 0.371 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t, i, j} ^ {(s)} = \\frac {(1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.376, + 0.571, + 0.398 + ], + "angle": 0, + "content": "For \\(s < R_0\\), \\(\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)\\) and therefore, \\(\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)\\)." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.379, + 0.826, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.407, + 0.825, + 0.445 + ], + "angle": 0, + "content": "Lemma K.34 (General formula for the elements of \\( P_{\\perp} \\hat{B}_t^{(s)} P_{\\perp} \\)). For \\( 1 \\leq i \\leq m \\) and \\( 1 \\leq j \\leq m \\), \\( \\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}}(\\eta^{1.5 - \\beta}) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.457, + 0.504, + 0.472 + ], + "angle": 0, + "content": "Proof. Note that for \\(1 \\leq i \\leq m\\), \\(\\lambda_i > 0\\). By (92)," + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.477, + 0.637, + 0.499 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t + 1, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.502, + 0.224, + 0.516 + ], + "angle": 0, + "content": "Hence," + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.521, + 0.632, + 0.543 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.545, + 0.232, + 0.56 + ], + "angle": 0, + "content": "By (95)," + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.565, + 0.694, + 0.697 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s + 1)} = \\hat {B} _ {H, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {H} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\tilde {\\mathcal {O}} (\\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} \\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.442, + 0.723 + ], + "angle": 0, + "content": "where the last inequality uses \\(\\hat{B}_0^{(0)} = 0\\)." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.707, + 0.826, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.72, + 0.751 + ], + "angle": 0, + "content": "Lemma K.35 (General formula for \\(P_{\\parallel}\\hat{B}_t^{(s)}\\)). For \\(m < i \\leq d\\), \\(\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - \\beta})\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.558, + 0.78 + ], + "angle": 0, + "content": "Proof. Note that \\(\\lambda_{i} = 0\\) for \\(m < i\\leq d\\). By (92) and (95)," + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.784, + 0.666, + 0.805 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t + 1} ^ {(s)} = \\hat {B} _ {t} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}), \\quad \\hat {B} _ {0} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.809, + 0.247, + 0.822 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.828, + 0.639, + 0.849 + ], + "angle": 0, + "content": "\\[\n\\hat {B} _ {t} ^ {(s)} = t \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) + \\hat {B} _ {0} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.853, + 0.826, + 0.865 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.88, + 0.827, + 0.924 + ], + "angle": 0, + "content": "Having obtained the expressions for \\(\\hat{B}_t^{(s)}\\), \\(\\hat{A}_t^{(s)}\\) and \\(\\hat{A}_{\\mathrm{avg}}^{(s)}\\), we now provide explicit expressions for the first and second moments of the change of manifold projection every round in the following two lemmas." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "62" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.722, + 0.12 + ], + "angle": 0, + "content": "Lemma K.36. The expectation of the change of manifold projection every round is" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.126, + 0.825, + 0.168 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} \\right] = \\left\\{ \\begin{array}{l l} \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {\\Sigma} _ {0} + \\boldsymbol {\\Psi} \\left(\\hat {\\phi} ^ {(0)}\\right) \\right] + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & R _ {0} < s < R _ {\\mathrm {g r p}}, \\\\ \\tilde {\\mathcal {O}} (\\eta), & s \\leq R _ {0} \\end{array} , \\right. \\tag {98}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.173, + 0.355, + 0.194 + ], + "angle": 0, + "content": "where \\(R_0 \\coloneqq \\left\\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\right\\rceil\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.209, + 0.827, + 0.243 + ], + "angle": 0, + "content": "Proof. We first compute \\(\\mathbb{E}[\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)}]\\). By (72), we only need to compute \\(P_{\\parallel}\\hat{q}_H^{(s)}\\) by relating it to these matrices. Multiplying both sides of (79) by \\(P_{\\parallel}\\) gives" + }, + { + "type": "equation", + "bbox": [ + 0.211, + 0.249, + 0.826, + 0.275 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {t + 1} ^ {(s)} = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} ] - \\frac {\\eta}{2} \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}). \\tag {99}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.28, + 0.421, + 0.295 + ], + "angle": 0, + "content": "Similarly, according to (85), we have" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.302, + 0.826, + 0.331 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {0} ^ {(s + 1)} = - \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {100}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.336, + 0.393, + 0.352 + ], + "angle": 0, + "content": "Combining (99) and (100) yields" + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.359, + 0.825, + 0.444 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} = - \\frac {1}{2} \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\text {a v g}} ^ {(s - 1)} ] - \\frac {\\eta}{2} \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\sum_ {t = 0} ^ {H - 1} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} ] \\tag {101} \\\\ - \\eta P _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t} ^ {(s)} ] - P _ {\\|} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {B} _ {H} ^ {(s - 1)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.827, + 0.505 + ], + "angle": 0, + "content": "By Lemmas K.29, K.32 and K.30, for \\(s \\leq R_0 = \\left\\lfloor \\frac{10}{\\lambda \\alpha} \\log \\frac{1}{\\eta} \\right\\rfloor\\), \\(\\hat{\\pmb{A}}_t^{(s)} = \\tilde{\\mathcal{O}}(\\eta)\\), \\(\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} = \\tilde{\\mathcal{O}}(\\eta)\\) and \\(\\hat{\\pmb{B}}_t^{(s)} = \\tilde{\\mathcal{O}}(\\eta)\\). Therefore, \\(\\mathbb{E}[\\hat{\\phi}^{(s+1)} - \\hat{\\phi}^{(s)}] = \\tilde{\\mathcal{O}}(\\eta)\\). For \\(s > R_0\\), \\(\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s-1)} = \\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} + \\tilde{\\mathcal{O}}(\\eta^{1.5-0.5\\beta})\\). Substituting (101) into (72) gives" + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.51, + 0.756, + 0.642 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{r l} & {\\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] = \\underbrace {\\frac {1}{2} P _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {A} _ {\\mathrm {a v g}} ^ {(s)} ] + P _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {B} _ {H} ^ {(s)} ]} _ {\\mathcal {T} _ {1}}} \\\\ & {\\qquad \\overbrace {- \\eta P _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\underbrace {\\frac {1}{2} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t} ^ {(s)} + \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t} ^ {(s)} ]} _ {\\mathcal {T} _ {3}} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).} \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.647, + 0.643, + 0.663 + ], + "angle": 0, + "content": "Below we compute \\(\\mathcal{T}_1\\) and \\(\\mathcal{T}_2\\) for \\(s > R_0\\) respectively. By Lemma K.3," + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.668, + 0.709, + 0.689 + ], + "angle": 0, + "content": "\\[\n\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} ] = \\mathbf {0},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.293, + 0.69, + 0.652, + 0.712 + ], + "angle": 0, + "content": "\\[\n\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.716, + 0.283, + 0.731 + ], + "angle": 0, + "content": "By Lemma K.4," + }, + { + "type": "equation", + "bbox": [ + 0.387, + 0.738, + 0.61, + 0.758 + ], + "angle": 0, + "content": "\\[\n\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} ] = \\mathbf {0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.763, + 0.326, + 0.777 + ], + "angle": 0, + "content": "Therefore, for \\( s > R_0 \\)," + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.783, + 0.726, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] = \\frac {H \\eta^ {2}}{2 K B _ {\\mathrm {l o c}}} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) \\Phi [ \\boldsymbol {\\Sigma} _ {0, \\parallel} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.822, + 0.524, + 0.837 + ], + "angle": 0, + "content": "where we apply Lemma K.32. Similarly, for \\(s > R_0\\)" + }, + { + "type": "equation", + "bbox": [ + 0.294, + 0.844, + 0.704, + 0.865 + ], + "angle": 0, + "content": "\\[\n\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] = \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {B}} _ {H} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.87, + 0.427, + 0.885 + ], + "angle": 0, + "content": "where we apply Lemma K.35. Hence," + }, + { + "type": "equation", + "bbox": [ + 0.355, + 0.892, + 0.826, + 0.923 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} _ {1} = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0, \\parallel} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {102}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "63" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.344, + 0.118 + ], + "angle": 0, + "content": "We move on to show that" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.123, + 0.826, + 0.155 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} _ {2} = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0} - \\boldsymbol {\\Sigma} _ {0, \\parallel} + (K - 1) \\boldsymbol {\\Psi} (\\hat {\\phi} ^ {(0)}) ]. \\tag {103}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.162, + 0.827, + 0.207 + ], + "angle": 0, + "content": "Similar to the way we compute \\(\\hat{A}_t^{(s)}\\), \\(\\hat{A}_{\\mathrm{avg}}^{(s)}\\) and \\(\\hat{B}_t^{(s)}\\), we compute \\(\\mathcal{T}_2\\) by splitting \\(\\mathcal{T}_3\\) into four matrices and then substituting them into the linear operator \\(-\\eta P_{\\parallel}\\nabla^3\\mathcal{L}(\\hat{\\phi}^{(0)})[\\cdot ]\\) one by one. First, we show that" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.213, + 0.826, + 0.263 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} - \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {P} _ {\\perp} \\mathcal {T} _ {3} \\boldsymbol {P} _ {\\perp} \\right] = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {\\Sigma} _ {0, \\perp} + (K - 1) \\psi \\left(\\boldsymbol {\\Sigma} _ {0, \\perp}\\right) \\right] \\tag {104} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.827, + 0.299 + ], + "angle": 0, + "content": "where \\(\\psi (\\cdot)\\) is interpreted as an elementwise matrix function here. By Lemmas K.29 and K.34, for \\(1\\leq i\\leq m\\) \\(1\\le j\\le m\\) and \\(s > R_0\\)" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.304, + 0.795, + 0.362 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\hat {B} _ {t, i, j} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.367, + 0.248, + 0.38 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.387, + 0.813, + 0.519 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) ^ {2} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\frac {H \\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0., i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{K \\left(\\lambda_ {i} + \\lambda_ {j}\\right) B _ {\\mathrm {l o c}}} \\Sigma_ {0.., i, j} \\\\ + \\left(1 - \\frac {1}{K}\\right) \\frac {H \\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\underbrace {\\left[ 1 - \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{H \\eta (\\lambda_ {i} + \\lambda_ {j})} \\right]} _ {\\tau_ {4}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.521, + 0.356, + 0.562 + ], + "angle": 0, + "content": "\\[\n\\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t, i, j} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.568, + 0.396, + 0.584 + ], + "angle": 0, + "content": "Then we simplify \\(\\mathcal{T}_4\\). Notice that" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.589, + 0.69, + 0.626 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {i}\\right) \\eta\\right) ^ {H} = \\exp \\left(- H \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\left[ 1 + \\mathcal {O} \\left(H \\eta^ {2}\\right) \\right] \\\\ = \\exp (- H (\\lambda_ {i} + \\lambda_ {j}) \\eta) + \\mathcal {O} (\\eta). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.631, + 0.247, + 0.644 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.652, + 0.605, + 0.669 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} _ {4} = \\psi \\left(\\left(\\lambda_ {i} + \\lambda_ {j}\\right) H \\eta\\right) + \\mathcal {O} (\\eta).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.676, + 0.591, + 0.698 + ], + "angle": 0, + "content": "Substituting \\(\\mathcal{T}_4\\) back into the expression for \\(\\sum_{t=0}^{H-1} \\hat{A}_{t,i,j}^{(s)}\\) gives" + }, + { + "type": "equation", + "bbox": [ + 0.191, + 0.704, + 0.805, + 0.745 + ], + "angle": 0, + "content": "\\[\n\\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = \\frac {H \\eta}{K (\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0.., i, j} + \\left(1 - \\frac {1}{K}\\right) \\frac {H \\eta \\psi ((\\lambda_ {i} + \\lambda_ {j}) H \\eta)}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.731, + 0.766 + ], + "angle": 0, + "content": "Combining the elementwise results, we obtain the following matrix form expression:" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.771, + 0.785, + 0.822 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} - \\eta \\pmb {P} _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\mathcal {T} _ {3} \\pmb {P} _ {\\perp} ] = - \\frac {H \\eta^ {2}}{2 B} \\pmb {P} _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\mathcal {V} _ {\\pmb {H} _ {0}} (\\pmb {\\Sigma} _ {0, \\perp} + (K - 1) \\psi (\\pmb {\\Sigma} _ {0, \\perp})) ] \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.828, + 0.384, + 0.843 + ], + "angle": 0, + "content": "By Lemma K.4, we have (104)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.85, + 0.412, + 0.864 + ], + "angle": 0, + "content": "Secondly, we show that for \\( s > R_0 \\)" + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.87, + 0.824, + 0.923 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} - \\eta P _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ P _ {\\perp} \\mathcal {T} _ {3} P _ {\\|} + P _ {\\|} \\mathcal {T} _ {3} P _ {\\perp} ] \\\\ = \\frac {H \\eta^ {2}}{B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel} + (K - 1) \\psi (\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {105} \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "64" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.137 + ], + "angle": 0, + "content": "where \\(\\psi (\\cdot)\\) is interpreted as an elementwise matrix function here. By symmetry of \\(\\hat{A}_t^{(s)}\\)'s and \\(\\nabla^3\\mathcal{L}(\\hat{\\phi}^{(0)})\\)" + }, + { + "type": "equation", + "bbox": [ + 0.213, + 0.143, + 0.784, + 0.186 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\perp} \\right] = \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\parallel} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.189, + 0.411, + 0.204 + ], + "angle": 0, + "content": "Therefore, we only have to evaluate" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.211, + 0.7, + 0.253 + ], + "angle": 0, + "content": "\\[\n\\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} (\\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\hat {\\boldsymbol {B}} _ {t} ^ {(s)}) \\boldsymbol {P} _ {\\parallel} + \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\perp} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.259, + 0.825, + 0.292 + ], + "angle": 0, + "content": "To compute the elements of \\(\\sum_{t=0}^{H-1} P_{\\perp} (\\hat{A}_t^{(s)} + \\hat{B}_t^{(s)}) P_{\\parallel}\\), we combine Lemmas K.30 and K.33 to obtain that for \\(1 \\leq i \\leq m\\) and \\(m < j \\leq d\\)," + }, + { + "type": "equation", + "bbox": [ + 0.281, + 0.298, + 0.715, + 0.445 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = \\sum_ {t = 0} ^ {H - 1} \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} ^ {2} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\left(1 - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} H \\eta}\\right) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\psi (\\lambda_ {i} H \\eta) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.45, + 0.202, + 0.463 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.468, + 0.783, + 0.615 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t, i, j} ^ {(s)} = \\sum_ {t = 0} ^ {H - 1} \\frac {\\left(1 - \\lambda_ {i} \\eta\\right) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} ^ {2} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\left(1 - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} H \\eta}\\right) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\psi (\\lambda_ {i} H \\eta) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.621, + 0.578, + 0.642 + ], + "angle": 0, + "content": "Therefore, the matrix form of \\(\\sum_{t=0}^{H-1} P_{\\perp} (\\hat{A}_t^{(s)} + \\hat{B}_t^{(s)}) P_{\\parallel}\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.648, + 0.777, + 0.688 + ], + "angle": 0, + "content": "\\[\n\\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} (\\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\hat {\\boldsymbol {B}} _ {t} ^ {(s)}) \\boldsymbol {P} _ {\\parallel} = \\frac {H \\eta}{B} \\mathcal {V} _ {\\boldsymbol {H} _ {0}} \\left(\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel} + (K - 1) \\psi (\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel})\\right) + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.695, + 0.825, + 0.741 + ], + "angle": 0, + "content": "where \\(\\psi (\\cdot)\\) is interpreted as an elementwise matrix function here. Furthermore, by Lemma K.35, \\(\\sum_{t = 0}^{H - 1}\\hat{B}_t^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{0.5 - \\beta})\\). Applying Lemma K.3, we have (105). Finally, directly applying Lemma K.5, we have" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.747, + 0.825, + 0.767 + ], + "angle": 0, + "content": "\\[\n- \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {P} _ {\\parallel} \\mathcal {T} _ {3} \\boldsymbol {P} _ {\\parallel} \\right] = \\boldsymbol {0}. \\tag {106}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.825, + 0.802 + ], + "angle": 0, + "content": "Notice that \\(\\psi(\\Sigma_{0,||}) = 0\\) where \\(\\psi(\\cdot)\\) operates on each element. Combining (104), (105) and (106), we obtain (103). By (102) and (103), we have (98)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.809, + 0.747, + 0.824 + ], + "angle": 0, + "content": "Lemma K.37. The second moment of the change of manifold projection every round is" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.831, + 0.791, + 0.871 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ (\\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) (\\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) ^ {\\top} ] = \\left\\{ \\begin{array}{l l} \\frac {H \\eta^ {2}}{B} \\pmb {\\Sigma} _ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & R _ {0} \\leq s < R _ {\\mathrm {g r p}} \\\\ \\tilde {\\mathcal {O}} (\\eta), & s < R _ {0} \\end{array} \\right.,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.876, + 0.353, + 0.897 + ], + "angle": 0, + "content": "where \\(R_0 \\coloneqq \\left\\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\right\\rceil\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.688, + 0.925 + ], + "angle": 0, + "content": "Proof. Directly apply Lemma K.32 and Lemma K.27 and we have the lemma." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "65" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.614, + 0.12 + ], + "angle": 0, + "content": "With Lemmas K.36 and K.37, we are ready to prove Theorem K.3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.825, + 0.168 + ], + "angle": 0, + "content": "Proof of Theorem K.3. We first derive \\(\\mathbb{E}[\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}]\\). Recall that \\(R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}} \\right\\rfloor = \\frac{1}{H\\eta^{1 + \\beta}} + o(1)\\) where \\(0 < \\beta < 0.5\\). By Lemma K.36," + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.174, + 0.757, + 0.253 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} [ \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(0)} ] = \\sum_ {s = 0} ^ {R _ {0}} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] + \\sum_ {s = R _ {0} + 1} ^ {R _ {\\mathrm {g r p}} - 1} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] \\\\ = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {\\Sigma} _ {0} + \\pmb {\\Psi} (\\hat {\\phi} ^ {(0)}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.258, + 0.459, + 0.275 + ], + "angle": 0, + "content": "Then we compute \\(\\mathbb{E}[\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})^{\\top}}]\\)." + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.282, + 0.819, + 0.418 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} \\left[ \\left(\\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)})\\right) \\left(\\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)})\\right) ^ {\\top} \\right] \\\\ = \\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} \\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ^ {\\top} ] + \\sum_ {s \\neq s ^ {\\prime}} \\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ] \\mathbb {E} [ (\\hat {\\phi} ^ {(s ^ {\\prime} + 1)} - \\hat {\\phi} ^ {(s ^ {\\prime})}) ^ {\\top} ] \\\\ = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta) + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 1. 5 \\beta}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.423, + 0.714, + 0.443 + ], + "angle": 0, + "content": "where the last inequality uses \\(\\mathbb{E}[(\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)})]\\mathbb{E}[(\\hat{\\phi}^{(s' + 1)} - \\hat{\\phi}^{(s')})^\\top ] = \\tilde{\\mathcal{O}} (\\eta^2)\\)" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.426, + 0.826, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.457, + 0.471, + 0.47 + ], + "angle": 0, + "content": "K.10 PROOF OF WEAK APPROXIMATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.483, + 0.827, + 0.548 + ], + "angle": 0, + "content": "We are now in a position to utilize the estimate of moments obtained in previous subsections to prove the closeness of the sequence \\(\\{\\phi^{(s)}\\}_{s = 0}^{\\lfloor T / (H\\eta^2)\\rfloor}\\) and the SDE solution \\(\\{\\zeta :t\\in [0,T]\\}\\) in the sense of weak approximation. Recall the SDE that we expect the manifold projection \\(\\{\\Phi (\\bar{\\theta}^{(s)})\\}_{s = 0}^{\\lfloor T / (H\\eta^2)\\rfloor}\\) to track:" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.553, + 0.826, + 0.596 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}}} _ {\\text {(c) d r i f t - I I}} - \\underbrace {- \\frac {K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(d) d r i f t - I I}}\\right), \\tag {107}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.602, + 0.825, + 0.629 + ], + "angle": 0, + "content": "According to Lemma K.3 and Lemma K.4, the drift term in total can be written as the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.337, + 0.634, + 0.659, + 0.663 + ], + "angle": 0, + "content": "\\[\n(\\mathbf {b}) + (\\mathbf {c}) = \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.667, + 0.614, + 0.683 + ], + "angle": 0, + "content": "Then by definition of \\(P_{\\zeta}\\), (107) is equivalent to the following SDE:" + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.688, + 0.826, + 0.721 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} + \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t. \\tag {108}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.827, + 0.828 + ], + "angle": 0, + "content": "Therefore, we only have to show that \\(\\phi^{(s)}\\) closely tracks \\(\\{\\zeta(t)\\}\\) satisfying Equation (108). By Lemma K.11, there exists an \\(\\epsilon_3\\) neighborhood of \\(\\Gamma\\), \\(\\Gamma^{\\epsilon_3}\\), where \\(\\Phi(\\cdot)\\) is \\(\\mathcal{C}^\\infty\\)-smooth. Due to compactness of \\(\\Gamma\\), \\(\\Gamma^{\\epsilon_3}\\) is bounded and the mappings \\(\\partial^2\\Phi(\\cdot)\\), \\(\\partial\\Phi(\\cdot)\\), \\(\\Sigma^{1/2}(\\cdot)\\), \\(\\Sigma(\\cdot)\\) and \\(\\Psi(\\cdot)\\) are all Lipschitz in \\(\\Gamma^{\\epsilon_3}\\). By Kirschbraun theorem, both the drift and diffusion term of (108) can be extended as Lipschitz functions on \\(\\mathbb{R}^d\\). Therefore, the solution to the extended SDE exists and is unique. We further show that the solution, if initialized as a point on \\(\\Gamma\\), always stays on the manifold almost surely." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.834, + 0.538, + 0.849 + ], + "angle": 0, + "content": "As a preparation, we first show that \\(\\Gamma\\) has no boundary." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.852, + 0.609, + 0.868 + ], + "angle": 0, + "content": "Lemma K.38. Under Assumptions 3.1 to 3.3, \\(\\Gamma\\) has no boundary." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Proof. We prove by contradiction. If \\(\\Gamma\\) has boundary \\(\\partial \\Gamma\\), WLOG, for a point \\(\\pmb{p} \\in \\partial \\Gamma\\), let the Hessian at \\(\\pmb{p}\\) be diagonal with the form \\(\\nabla^2 \\mathcal{L}(\\pmb{p}) = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_d)\\) where \\(\\lambda_i > 0\\) for \\(1 \\leq i \\leq m\\) and \\(\\lambda_i = 0\\) for \\(m < i \\leq d\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "66" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.169 + ], + "angle": 0, + "content": "Denote by \\( \\pmb{x}_{i:j} := (x_i, x_{i+1}, \\dots, x_j) \\) (\\( i \\leq j \\)) the \\( (j - i + 1) \\)-dimensional vector formed by the \\( i \\)-th to \\( j \\)-th coordinates of \\( \\pmb{x} \\). Since \\( \\frac{\\partial(\\nabla\\mathcal{L}(\\pmb{p}))}{\\partial\\pmb{p}_{1:m}} = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_m) \\) is invertible, by the implicit function theorem, there exists an open neighborhood \\( V \\) of \\( \\pmb{p}_{m+1:d} \\) such that \\( \\nabla\\mathcal{L}(\\pmb{v}) = \\mathbf{0} \\), \\( \\forall \\pmb{v} \\in V \\). Then, \\( \\mathcal{L}(\\pmb{v}) = \\mathcal{L}(\\pmb{p}) = \\min_{\\pmb{\\theta} \\in U} \\mathcal{L}(\\pmb{\\theta}) \\) and hence \\( V \\subset \\Gamma \\), which contradicts with \\( \\pmb{p} \\in \\partial \\Gamma \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.182, + 0.825, + 0.211 + ], + "angle": 0, + "content": "Therefore, \\(\\Gamma\\) is a closed manifold (i.e., compact and without boundary). Then we have the following lemma stating that \\(\\Gamma\\) is invariant for (108)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.213, + 0.826, + 0.243 + ], + "angle": 0, + "content": "Lemma K.39. Let \\(\\zeta(t)\\) be the solution to (108) with \\(\\zeta(0) \\in \\Gamma\\), then \\(\\zeta(t) \\in \\Gamma\\) for all \\(t \\geq 0\\). In other words, \\(\\Gamma\\) is invariant for (108)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.257, + 0.825, + 0.302 + ], + "angle": 0, + "content": "Proof. According to Filipovic (2000) and Du & Duan (2007), for a closed manifold \\(\\mathcal{M}\\) to be viable for the SDE \\(\\mathrm{d}\\pmb {X}(t) = F(\\pmb {X}(t))\\mathrm{d}t + \\pmb {B}(\\pmb {X}(t))\\mathrm{d}\\pmb{W}_t\\) where \\(F:\\mathbb{R}^d\\to \\mathbb{R}^d\\) and \\(\\pmb {B}:\\mathbb{R}^d\\rightarrow \\mathbb{R}^d\\) are locally Lipschitz, we only have to verify the following Nagumo type consistency condition:" + }, + { + "type": "equation", + "bbox": [ + 0.257, + 0.304, + 0.741, + 0.34 + ], + "angle": 0, + "content": "\\[\n\\mu (\\pmb {x}) := F (\\pmb {x}) - \\frac {1}{2} \\sum_ {j} \\mathrm {D} [ B _ {j} (\\pmb {x}) ] B _ {j} (\\pmb {x}) \\in T _ {\\pmb {x}} (\\mathcal {M}), \\quad B _ {j} (\\pmb {x}) \\in T _ {\\pmb {x}} (\\mathcal {M}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.345, + 0.704, + 0.362 + ], + "angle": 0, + "content": "where \\(\\mathrm{D}[\\cdot ]\\) is the Jacobian operator and \\(B_{j}(\\pmb {x})\\) denotes the \\(j\\)-th column of \\(\\pmb {B}(\\pmb {x})\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.366, + 0.826, + 0.428 + ], + "angle": 0, + "content": "In our context, since for \\(\\phi \\in \\Gamma\\), \\(\\partial \\Phi(\\phi)\\) is a projection matrix onto \\(T_{\\phi}(\\Gamma)\\), each column of \\(\\partial \\Phi(\\phi)\\Sigma^{1/2}(\\phi)\\) belongs to \\(T_{\\phi}(\\Gamma)\\), verifying the second condition. Denote by \\(P_{\\perp}(\\phi) := I_d - \\partial \\Phi(\\phi)\\) the projection onto the normal space of \\(\\Gamma\\) at \\(\\phi\\). To verify the first condition, it suffices to show that \\(P_{\\perp}(\\phi)\\mu(\\phi) = 0\\). We evaluate \\(\\sum_{j} P_{\\perp}(\\phi)\\mathrm{D}[B_j(\\phi)]B_j(\\phi)\\) as follows." + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.433, + 0.825, + 0.54 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sum_ {j} \\boldsymbol {P} _ {\\perp} (\\phi) \\mathrm {D} [ B _ {j} (\\phi) ] B _ {j} (\\phi) = \\frac {1}{B} \\sum_ {j} \\mathrm {D} [ \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) ] \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) \\\\ = \\frac {1}{B} P _ {\\perp} (\\phi) \\sum_ {j} \\partial^ {2} \\Phi (\\phi) [ \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi), \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) ] \\\\ = - \\frac {1}{B} \\nabla^ {2} \\mathcal {L} (\\phi) ^ {+} \\nabla^ {3} \\mathcal {L} (\\phi) [ \\boldsymbol {\\Sigma} _ {\\parallel} (\\phi) ], \\tag {109} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.542, + 0.704, + 0.558 + ], + "angle": 0, + "content": "where the last inequality uses Lemma K.3. Again applying Lemma K.3, we have" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.561, + 0.825, + 0.59 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {P} _ {\\perp} (\\phi) F (\\phi) = - \\frac {1}{2 B} \\nabla^ {2} \\mathcal {L} (\\phi) ^ {+} \\nabla^ {3} \\mathcal {L} (\\phi) [ \\boldsymbol {\\Sigma} _ {\\parallel} (\\phi) ]. \\tag {110}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.592, + 0.576, + 0.607 + ], + "angle": 0, + "content": "Combining (109) and (110), we can verify the first condition." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.622, + 0.825, + 0.654 + ], + "angle": 0, + "content": "In order to establish Theorem 3.2, it suffices to prove the following theorem, which captures the closeness of \\(\\phi^{(s)}\\) and \\(\\zeta(t)\\) every \\(R_{\\mathrm{grp}}\\) rounds." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.657, + 0.825, + 0.699 + ], + "angle": 0, + "content": "Theorem K.4. If \\(\\| \\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})\\) and \\(\\zeta (0) = \\phi^{(0)}\\in \\Gamma\\), then for \\(R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{0.75}}\\right\\rfloor\\) every test function \\(g\\in \\mathcal{C}^3\\)," + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.703, + 0.728, + 0.733 + ], + "angle": 0, + "content": "\\[\n\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} g (\\boldsymbol {\\phi} ^ {(n R _ {\\mathrm {g r p}})}) - \\mathbb {E} g (\\boldsymbol {\\zeta} (n \\eta^ {0. 7 5})) \\right| \\leq C _ {g} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.825, + 0.767 + ], + "angle": 0, + "content": "where \\( C_g > 0 \\) is a constant independent of \\( \\eta \\) but can depend on \\( g(\\cdot) \\) and \\( b > 0 \\) is a constant independent of \\( \\eta \\) and \\( g(\\cdot) \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.78, + 0.564, + 0.794 + ], + "angle": 0, + "content": "K.10.1 PRELIMINARIES AND ADDITIONAL NOTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.804, + 0.825, + 0.833 + ], + "angle": 0, + "content": "We first introduce a general formulation for stochastic gradient algorithms (SGAs) and then specify the components of this formulation in our context. Consider the following SGA:" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.837, + 0.594, + 0.853 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {n + 1} = \\boldsymbol {x} _ {n} + \\eta_ {\\mathrm {e}} \\boldsymbol {h} (\\boldsymbol {x} _ {n}, \\boldsymbol {\\xi} _ {n}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.858, + 0.826, + 0.902 + ], + "angle": 0, + "content": "where \\( \\pmb{x}_n \\in \\mathbb{R}^d \\) is the parameter, \\( \\eta_{\\mathrm{e}} \\) is the learning rate, \\( h(\\cdot, \\cdot) \\) is the update which depends on \\( \\pmb{x}_n \\) and a random vector \\( \\pmb{\\xi}_n \\) sampled from some distribution \\( \\Xi(\\pmb{x}_n) \\). Also, consider the following Stochastic Differential Equation (SDE)." + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.905, + 0.629, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {X} (t) = \\boldsymbol {b} (\\boldsymbol {X} (t)) \\mathrm {d} t + \\boldsymbol {\\sigma} (\\boldsymbol {X} (t)) \\mathrm {d} \\boldsymbol {W} _ {t},\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "67" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.778, + 0.12 + ], + "angle": 0, + "content": "where \\(\\pmb {b}(\\cdot):\\mathbb{R}^d\\to \\mathbb{R}^d\\) is the drift function and \\(\\sigma (\\cdot):\\mathbb{R}^{d\\times d}\\rightarrow \\mathbb{R}^{d\\times d}\\) is the diffusion matrix." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.125, + 0.772, + 0.141 + ], + "angle": 0, + "content": "Denote by \\(\\mathcal{P}_X(\\pmb {x},s,t)\\) the distribution of \\(X(t)\\) with the initial condition \\(X(s) = x\\) .Define" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.148, + 0.768, + 0.167 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {\\Delta}} (\\boldsymbol {x}, n) := \\boldsymbol {X} _ {(n + 1) \\eta_ {\\mathrm {e}}} - \\boldsymbol {x}, \\quad \\text {w h e r e} \\boldsymbol {X} _ {(n + 1) \\eta_ {\\mathrm {e}}} \\sim \\mathcal {P} _ {\\boldsymbol {X}} (\\boldsymbol {x}, n \\eta_ {\\mathrm {e}}, (n + 1) \\eta_ {\\mathrm {e}}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.172, + 0.457, + 0.187 + ], + "angle": 0, + "content": "which characterizes the update in one step." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.192, + 0.825, + 0.254 + ], + "angle": 0, + "content": "In our context, we view the change of manifold projection over \\( R_{\\mathrm{grp}} \\coloneqq \\left\\lfloor \\frac{1}{\\alpha\\eta^{1 - \\beta}} \\right\\rfloor (\\beta \\in (0, 0.5)) \\) rounds as one \"giant step\". Hence the \\( \\phi^{(nR_{\\mathrm{grp}})} \\) corresponds to the discrete time random variable \\( x_{n} \\) corresponds to and \\( \\zeta(t) \\) corresponds to the continuous time random variable \\( X_{t} \\). According to Theorem K.2, we set" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.259, + 0.8, + 0.291 + ], + "angle": 0, + "content": "\\[\n\\eta_ {\\mathrm {e}} = \\eta^ {1 - \\beta}, \\quad \\boldsymbol {b} (\\boldsymbol {\\zeta}) = \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) \\left[ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) \\right], \\quad \\boldsymbol {\\sigma} (\\boldsymbol {\\zeta}) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.297, + 0.564, + 0.314 + ], + "angle": 0, + "content": "Due to compactness of \\(\\Gamma\\), \\(b(\\cdot)\\) and \\(\\sigma(\\cdot)\\) are Lipschitz on \\(\\Gamma\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.32, + 0.601, + 0.336 + ], + "angle": 0, + "content": "As for the update in one step, \\(\\tilde{\\Delta} (\\cdot ,\\cdot)\\) is defined in our context as:" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.343, + 0.787, + 0.363 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\Delta} (\\phi , n) := \\zeta_ {(n + 1) \\eta_ {\\mathrm {e}}} - \\phi , \\qquad \\text {w h e r e} \\zeta_ {(n + 1) \\eta_ {\\mathrm {e}}} \\sim \\mathcal {P} _ {\\zeta} (\\phi , n \\eta_ {\\mathrm {e}}, (n + 1) \\eta_ {\\mathrm {e}}) \\text {a n d} \\phi \\in \\Gamma .\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.367, + 0.405, + 0.381 + ], + "angle": 0, + "content": "For convenience, we further define" + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.386, + 0.741, + 0.406 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Delta} ^ {(n)} := \\hat {\\phi} ^ {((n + 1) R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(n R _ {\\mathrm {g r p}})}, \\qquad \\qquad \\tilde {\\boldsymbol {\\Delta}} ^ {(n)} := \\tilde {\\boldsymbol {\\Delta}} (\\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})}, n),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.408, + 0.722, + 0.428 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {b} ^ {(n)} := \\boldsymbol {b} (\\hat {\\boldsymbol {\\phi}} ^ {(n R _ {\\mathrm {g r p}})}), \\qquad \\qquad \\boldsymbol {\\sigma} ^ {(n)} := \\boldsymbol {\\sigma} (\\hat {\\boldsymbol {\\phi}} ^ {(n R _ {\\mathrm {g r p}})}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.825, + 0.465 + ], + "angle": 0, + "content": "We use \\( C_{g,i} \\) to denote constants that can depend on the test function \\( g \\) and independent of \\( \\eta_{\\mathrm{e}} \\). The following lemma relates the moments of \\( \\tilde{\\Delta}(\\phi, n) \\) to \\( b(\\phi) \\) and \\( \\sigma(\\phi) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.468, + 0.823, + 0.484 + ], + "angle": 0, + "content": "Lemma K.40. There exists a positive constant \\(C_0\\) independent of \\(\\eta_{\\mathrm{e}}\\) and \\(g\\) such that for all \\(\\phi \\in \\Gamma\\)" + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.489, + 0.78, + 0.509 + ], + "angle": 0, + "content": "\\[\n| \\mathbb {E} [ \\tilde {\\Delta} _ {i} (\\phi , n) ] - \\eta_ {\\mathrm {e}} b _ {i} (\\phi) | \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {2}, \\quad \\forall 1 \\leq i \\leq d,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.512, + 0.78, + 0.552 + ], + "angle": 0, + "content": "\\[\n| \\mathbb {E} [ \\tilde {\\Delta} _ {i} (\\phi , n) \\tilde {\\Delta} _ {j} (\\pmb {x}, n) ] - \\eta_ {\\mathrm {e}} \\sum_ {l = 1} ^ {d} \\sigma_ {i, l} (\\phi) \\sigma_ {l, j} (\\phi) | \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {2}, \\quad \\forall 1 \\leq i, j \\leq d,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.555, + 0.779, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\tilde {\\Delta} _ {i _ {s}} (\\phi , n) \\right| \\right] \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {3}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.602, + 0.825, + 0.629 + ], + "angle": 0, + "content": "The lemma below states that the expectation of the test function is smooth with respect to the initial value." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.826, + 0.688 + ], + "angle": 0, + "content": "Proof. Noticing that (i) the solution to (108) always stays on \\(\\Gamma\\) almost surely if its initial value \\(\\zeta(0)\\) belongs to \\(\\Gamma\\), (ii) \\(b(\\cdot)\\) and \\(\\sigma(\\cdot)\\) are \\(\\mathcal{C}^\\infty\\) and (iii) \\(\\Gamma\\) is compact, we can directly apply Lemma B.3 in Malladi et al. (2022) and Lemma 26 in Li et al. (2019a) to obtain the above lemma." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.703, + 0.825, + 0.731 + ], + "angle": 0, + "content": "The following lemma states that the expectation of \\( g(\\zeta(t)) \\) for \\( g \\in \\mathcal{C}^3 \\) is smooth with respect to the initial value of the SDE solution." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.641, + 0.751 + ], + "angle": 0, + "content": "Lemma K.41. Let \\( s \\in [0, T] \\), \\( \\phi \\in \\Gamma \\) and \\( g \\in \\mathcal{C}^3 \\). For \\( t \\in [s, T] \\), define" + }, + { + "type": "equation", + "bbox": [ + 0.383, + 0.757, + 0.612, + 0.775 + ], + "angle": 0, + "content": "\\[\nu (\\phi , s, t) := \\mathbb {E} _ {\\zeta_ {t} \\sim \\mathcal {P} _ {\\zeta} (\\phi , s, t)} [ g (\\zeta_ {t}) ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.782, + 0.418, + 0.797 + ], + "angle": 0, + "content": "Then \\(u(\\cdot ,s,t)\\in \\mathcal{C}^3\\) uniformly in \\(s,t\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.812, + 0.796, + 0.827 + ], + "angle": 0, + "content": "Proof. A slight modification of Lemma B.4 in Malladi et al. (2022) will give the above lemma." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.842, + 0.588, + 0.856 + ], + "angle": 0, + "content": "K.10.2 PROOF OF THE APPROXIMATION IN OUR CONTEXT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.865, + 0.825, + 0.925 + ], + "angle": 0, + "content": "For \\(\\beta \\in (0, 0.5)\\), define \\(\\gamma_1 \\coloneqq \\frac{1.5 - 2\\beta}{1 - \\beta}\\), \\(\\gamma_2 \\coloneqq \\frac{1}{1 - \\beta}\\), and then \\(1 < \\gamma_1 < 1.5\\), \\(1 < \\gamma_2 < 2\\). We introduce the following lemma which serves as a key step to control the approximation error. Specifically, this lemma bounds the difference in one step change between the discrete process and the continuous one as well as the product of higher orders." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "68" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.143 + ], + "angle": 0, + "content": "Lemma K.42. If \\(\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})\\), then there exist positive constants \\(C_1\\) and \\(b\\) independent of \\(\\eta_{\\mathrm{e}}\\) and \\(g\\) such that for all \\(0\\leq n < \\lfloor T / \\eta_{\\mathrm{e}}\\rfloor\\)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.15, + 0.197, + 0.163 + ], + "angle": 0, + "content": "1." + }, + { + "type": "equation", + "bbox": [ + 0.274, + 0.164, + 0.816, + 0.188 + ], + "angle": 0, + "content": "\\[\n\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} - \\tilde {\\Delta} _ {i} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\quad \\forall 1 \\leq i \\leq d, \\right.\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.21, + 0.19, + 0.816, + 0.213 + ], + "angle": 0, + "content": "\\[\n| \\mathbb {E} [ \\Delta_ {i} ^ {(n)} \\Delta_ {j} ^ {(n)} - \\tilde {\\Delta} _ {i} ^ {(n)} \\tilde {\\Delta} _ {j} ^ {(n)} | \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} | \\leq C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b} + C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b}, \\forall 1 \\leq i, j \\leq d.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.222, + 0.197, + 0.235 + ], + "angle": 0, + "content": "2." + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.236, + 0.769, + 0.279 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.257, + 0.281, + 0.768, + 0.323 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\tilde {\\Delta} _ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.336, + 0.471, + 0.351 + ], + "angle": 0, + "content": "Proof. According to Appendix K.7, we have" + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.353, + 0.635, + 0.395 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] = \\tilde {\\mathcal {O}} (\\eta^ {3 - 3 \\beta}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.398, + 0.825, + 0.426 + ], + "angle": 0, + "content": "Since \\(\\gamma_{1} < 1.5\\) and \\(\\gamma_{2} < 2\\), we can utilize Theorem K.3 and conclude that there exist positive constants \\(C_2\\) and \\(b\\) independent of \\(\\eta_{\\mathrm{e}}\\) and \\(g\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.279, + 0.429, + 0.825, + 0.477 + ], + "angle": 0, + "content": "\\[\n\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} - \\eta_ {\\mathrm {e}} b _ {i} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| \\leq C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\forall 1 \\leq i \\leq d, \\tag {111}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.48, + 0.825, + 0.537 + ], + "angle": 0, + "content": "\\[\n\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} \\Delta_ {j} ^ {(n)} - \\eta_ {\\mathrm {e}} \\sum_ {l = 1} ^ {d} \\sigma_ {i, l} ^ {(n)} \\sigma_ {l, j} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| \\leq C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\forall 1 \\leq i, j \\leq d, \\tag {112}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.3, + 0.54, + 0.826, + 0.582 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {2} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d. \\tag {113}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.584, + 0.62, + 0.599 + ], + "angle": 0, + "content": "Combining (111) - (113) with Lemma K.40 gives the above lemma." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.605, + 0.825, + 0.647 + ], + "angle": 0, + "content": "Lemma K.43. For a test function \\(g \\in \\mathcal{C}^3\\), let \\(u_{l,n}(\\phi) \\coloneqq u(\\phi, l\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}}) = \\mathbb{E}_{\\zeta_t \\sim \\mathcal{P}_{\\zeta}(\\phi, l\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}})}[g(\\zeta_t)]\\). If \\(\\|\\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta \\log \\frac{1}{\\eta}})\\), then for all \\(0 \\leq l \\leq n-1\\) and \\(1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor\\)," + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.65, + 0.825, + 0.676 + ], + "angle": 0, + "content": "\\[\n\\left| \\mathbb {E} [ u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\pmb {\\Delta} ^ {(l)}) - u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\pmb {\\Delta}} ^ {(l + 1)}) \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\leq C _ {g, 1} (\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}) \\log (\\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.681, + 0.731, + 0.699 + ], + "angle": 0, + "content": "where \\(C_{g,1}\\) is a positive constant independent of \\(\\eta\\) and \\(\\hat{\\phi}^{(lR_{\\mathrm{grp}})}\\) but can depend on \\(g\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.712, + 0.827, + 0.743 + ], + "angle": 0, + "content": "Proof. By Lemma K.41, \\( u_{l,n}(\\phi) \\in \\mathcal{C}^3 \\) for all \\( l \\) and \\( n \\). That is, there exists \\( K(\\cdot) \\in G \\) such that for all \\( l, n, u_{l,n}(\\phi) \\) and its partial derivatives up to the third order are bounded by \\( K(\\phi) \\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.748, + 0.533, + 0.764 + ], + "angle": 0, + "content": "By the law of total expectation and triangle inequality," + }, + { + "type": "equation", + "bbox": [ + 0.245, + 0.766, + 0.76, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left| \\mathbb {E} [ u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\pmb {\\Delta} ^ {(l)}) - u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\pmb {\\Delta}} ^ {(l)}) ] \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} \\right| \\\\ \\leq \\underbrace {\\left| \\mathbb {E} \\left[ u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\boldsymbol {\\Delta} ^ {(l)}\\right) - u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\boldsymbol {\\Delta}} ^ {(l)}\\right) \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} , \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right] \\right|} _ {\\mathcal {A} _ {1}} \\\\ + \\underbrace {\\eta^ {1 0 0} \\mathbb {E} \\left[ \\left| u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\boldsymbol {\\Delta} ^ {(l)}\\right) \\right| \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\bar {\\mathcal {E}} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right]} _ {\\mathcal {A} _ {2}} \\\\ + \\underbrace {\\eta^ {1 0 0} \\mathbb {E} [ | u _ {l + 1 , n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\Delta} ^ {(l)}) | | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} , \\bar {\\mathcal {E}} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ]} _ {\\mathcal {A} _ {3}}. \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "69" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.825, + 0.15 + ], + "angle": 0, + "content": "We first bound \\(\\mathcal{A}_2\\) and \\(\\mathcal{A}_3\\). Since \\(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} \\in \\Gamma\\), both \\(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\pmb{\\Delta}^{(l)}\\) and \\(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\tilde{\\Delta}^{(l)}\\) belong to \\(\\Gamma\\). Due to compactness of \\(\\Gamma\\) and smoothness of \\(u_{l+1,n}(\\cdot)\\) on \\(\\Gamma\\), there exist a positive constant \\(C_{g,2}\\) such that \\(\\mathcal{A}_2 + \\mathcal{A}_3 \\leq C_{g,2}\\eta^{100}\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.156, + 0.733, + 0.174 + ], + "angle": 0, + "content": "We proceed to bound \\(\\mathcal{A}_1\\). Expanding \\(u_{l + 1,n}(\\cdot)\\) at \\(\\hat{\\phi}^{(lR_{\\mathrm{grp}})}\\) and by triangle inequality," + }, + { + "type": "equation", + "bbox": [ + 0.206, + 0.177, + 0.79, + 0.319 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {A} _ {1} ^ {(s)} \\leq \\underbrace {\\sum_ {i = 1} ^ {d} \\left| \\mathbb {E} \\big [ \\frac {\\partial u _ {l + 1 , n}}{\\partial \\phi_ {i}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}) \\left(\\Delta_ {i} ^ {(l)} - \\tilde {\\Delta} _ {i} ^ {(l)}\\right) | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right|} _ {\\mathcal {B} _ {1}} \\\\ + \\underbrace {\\frac {1}{2} \\sum_ {1 \\leq i , j \\leq d} \\left| \\mathbb {E} [ \\frac {\\partial^ {2} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}) (\\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} - \\tilde {\\Delta} _ {i} ^ {(l)} \\tilde {\\Delta} _ {j} ^ {(l)}) | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right|} _ {\\mathcal {B} _ {2}} \\\\ + | \\mathcal {R} | + | \\tilde {\\mathcal {R}} |, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.323, + 0.406, + 0.338 + ], + "angle": 0, + "content": "where the remainders \\(\\mathcal{R}\\) and \\(\\tilde{\\mathcal{R}}\\) are" + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.339, + 0.75, + 0.379 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} = \\frac {1}{6} \\sum_ {1 \\leq i, j, p \\leq d} \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\boldsymbol {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ],\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.381, + 0.779, + 0.42 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\mathcal {R}} = \\frac {1}{6} \\sum_ {1 \\leq i, j, p \\leq d} \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\theta} \\tilde {\\Delta} ^ {(l)}) \\tilde {\\Delta} _ {i} ^ {(l)} \\tilde {\\Delta} _ {j} ^ {(l)} \\tilde {\\Delta} _ {p} ^ {(l)} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.424, + 0.825, + 0.456 + ], + "angle": 0, + "content": "for some \\(\\theta, \\tilde{\\theta} \\in (0,1)\\). Since \\(\\hat{\\phi}^{(LR_{\\mathrm{grp}})}\\) belongs to \\(\\Gamma\\) which is compact, there exists a constant \\(C_{g,3}\\) such that for all \\(1 \\leq i,j \\leq d, 0 \\leq l \\leq n-1, 1 \\leq n \\leq \\lfloor T/\\eta_{\\mathrm{e}} \\rfloor\\)," + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.458, + 0.71, + 0.492 + ], + "angle": 0, + "content": "\\[\n| \\frac {\\partial u _ {l + 1 , n}}{\\partial \\phi_ {i}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}) | \\leq C _ {g, 3}, \\qquad | \\frac {\\partial^ {2} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}) | \\leq C _ {g, 3}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.493, + 0.291, + 0.508 + ], + "angle": 0, + "content": "By Lemma K.42," + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.51, + 0.763, + 0.541 + ], + "angle": 0, + "content": "\\[\n\\mathcal {B} _ {1} \\leq d C _ {g, 3} C _ {1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\quad \\mathcal {B} _ {2} \\leq \\frac {d ^ {2}}{2} C _ {g, 3} C _ {1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.542, + 0.598, + 0.557 + ], + "angle": 0, + "content": "Now we bound the remainders. By Cauchy-Schwartz inequality," + }, + { + "type": "equation", + "bbox": [ + 0.26, + 0.559, + 0.743, + 0.671 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left| \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\pmb {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)} \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\\\ \\leq \\left(\\mathbb {E} \\left[ \\left(\\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\boldsymbol {\\Delta} ^ {(l)})\\right) ^ {2} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right]\\right) ^ {1 / 2} \\times \\\\ \\left(\\mathbb {E} [ (\\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)}) ^ {2} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} ]\\right) ^ {1 / 2}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.825, + 0.713 + ], + "angle": 0, + "content": "Since \\(\\hat{\\phi}^{(lR_{\\mathrm{grp}})}\\) and \\(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\Delta^{(l)}\\) both belong to \\(\\Gamma\\) which is compact, there exists a constant \\(C_{g,4}\\) such that for all \\(1 \\leq i, j, p \\leq d, 0 \\leq l \\leq n - 1\\) and \\(1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor\\)," + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.714, + 0.638, + 0.742 + ], + "angle": 0, + "content": "\\[\n\\left. \\right.\\left(\\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\Delta^ {(l)}\\right)\\right) ^ {2} \\leq C _ {g, 4} ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.744, + 0.573, + 0.759 + ], + "angle": 0, + "content": "Combining the above inequality with Lemma K.42, we have" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.761, + 0.805, + 0.796 + ], + "angle": 0, + "content": "\\[\n\\left| \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\pmb {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\leq C _ {g, 4} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log (\\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.797, + 0.487, + 0.813 + ], + "angle": 0, + "content": "Hence, for all \\(1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor, 0 \\leq l \\leq n - 1\\)" + }, + { + "type": "equation", + "bbox": [ + 0.396, + 0.816, + 0.601, + 0.846 + ], + "angle": 0, + "content": "\\[\n| \\mathcal {R} | \\leq \\frac {d ^ {3}}{6} C _ {g, 4} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log \\left(\\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.848, + 0.825, + 0.876 + ], + "angle": 0, + "content": "Similarly, we can show that there exists a constant \\( C_{g,5} \\) such that for all \\( 1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor \\), \\( 0 \\leq l \\leq n - 1 \\)," + }, + { + "type": "equation", + "bbox": [ + 0.396, + 0.878, + 0.601, + 0.909 + ], + "angle": 0, + "content": "\\[\n| \\tilde {\\mathcal {R}} | \\leq \\frac {d ^ {3}}{6} C _ {g, 5} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log \\left(\\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.55, + 0.925 + ], + "angle": 0, + "content": "Combining the bounds on \\(\\mathcal{A}_1\\) to \\(\\mathcal{A}_3\\), we have the lemma." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "70" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.385, + 0.119 + ], + "angle": 0, + "content": "Finally, we prove Theorem K.4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.826, + 0.205 + ], + "angle": 0, + "content": "Proof. For \\(0 \\leq l \\leq n\\), define the random variable \\(\\hat{\\zeta}_{l,n}\\) which follows the distribution \\(\\mathcal{P}_{\\zeta}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})}, l, n)\\) conditioned on \\(\\hat{\\phi}^{(lR_{\\mathrm{grp}})}\\). Therefore, \\(\\mathbb{P}(\\hat{\\zeta}_{n,n} = \\hat{\\phi}^{(nR_{\\mathrm{grp}})}) = 1\\) and \\(\\hat{\\zeta}_{0,n} \\sim \\zeta_{n\\eta_{\\mathrm{e}}}\\). Denote by \\(u(\\phi, s, t) \\coloneqq \\mathbb{E}_{\\zeta_t \\sim \\mathcal{P}_{\\zeta}(\\phi, s, t)}[g(\\zeta_t)]\\) and \\(\\mathcal{T}_{l+1,n} \\coloneqq u_{l+1,n}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\Delta^{(l)}, (l+1)\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}}) - u_{l+1,n}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\tilde{\\Delta}^{(l)}, (l+1)\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}})\\)." + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.212, + 0.802, + 0.396 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left| \\mathbb {E} \\left[ g \\left(\\boldsymbol {\\phi} ^ {\\left(n R _ {\\mathrm {g r p}}\\right)}\\right) \\right] - \\mathbb {E} \\left[ g \\left(\\boldsymbol {\\zeta} \\left(n \\eta_ {\\mathrm {e}}\\right)\\right) \\right] \\right| \\\\ \\leq \\left| \\mathbb {E} \\left[ g \\left(\\hat {\\zeta} _ {n, n}\\right) - g \\left(\\hat {\\zeta} _ {0, n}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ \\leq \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ g \\left(\\hat {\\zeta} _ {l + 1, n}\\right) - g \\left(\\hat {\\zeta} _ {l, n}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ = \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ u \\left(\\hat {\\phi} ^ {\\left((l + 1) R _ {\\mathrm {g r p}}\\right)}, (l + 1) \\eta_ {\\mathrm {e}}, n \\eta_ {\\mathrm {e}}\\right) - u \\left(\\hat {\\zeta} _ {l, l + 1}, (l + 1) \\eta_ {\\mathrm {e}}, n \\eta_ {\\mathrm {e}}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ = \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ \\mathcal {T} _ {l + 1, n} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.404, + 0.825, + 0.438 + ], + "angle": 0, + "content": "Noticing that \\(\\mathbb{E}[\\mathcal{T}_{l + 1,n}\\mid \\mathcal{E}_0^{(nR_{\\mathrm{grp}})}] = \\mathbb{E}[\\mathbb{E}[\\mathcal{T}_{l + 1,n}\\mid \\hat{\\phi}^{(lR_{\\mathrm{grp}})},\\mathcal{E}_0^{(lR_{\\mathrm{grp}})}]\\mid \\mathcal{E}_0^{(nR_{\\mathrm{grp}})}]\\), we can apply Lemma K.43 and obtain that for all \\(0\\leq n\\leq \\lfloor T / \\eta_{\\mathrm{e}}\\rfloor\\)" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.445, + 0.726, + 0.492 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left| \\mathbb {E} \\left[ g \\left(\\phi^ {\\left(n R _ {\\mathrm {g r p}}\\right)}\\right) \\right] - \\mathbb {E} \\left[ g \\left(\\zeta \\left(n \\eta_ {\\mathrm {e}}\\right)\\right) \\right] \\right| \\leq n C _ {g, 1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} \\\\ \\leq T C _ {g, 1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1} - 1} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2} - 1}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.5, + 0.825, + 0.53 + ], + "angle": 0, + "content": "Notice that \\(\\eta_{\\mathrm{e}}^{\\gamma_1} + \\eta_{\\mathrm{e}}^{\\gamma_2} = \\eta^{0.5 - \\beta} + \\eta^\\beta\\) and \\(T, C_{g,1}\\) are both constants that are independent of \\(\\eta_{\\mathrm{e}}\\). Let \\(\\beta = 0.25\\) and we have Theorem K.4." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.544, + 0.647, + 0.56 + ], + "angle": 0, + "content": "Having established Theorem K.4, we are thus led to prove Theorem 3.2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.574, + 0.825, + 0.627 + ], + "angle": 0, + "content": "Proof of Theorem 3.2. Denote by \\( s_{\\mathrm{cls}} = s_0 + s_1 = \\mathcal{O}(\\log \\frac{1}{\\eta}) \\), which is the time the global iterate \\( \\bar{\\theta}^{(s)} \\) will reach within \\( \\tilde{\\mathcal{O}} (\\eta) \\) from \\( \\Gamma \\) with high probability. Define \\( \\tilde{\\zeta} (t) \\) to be the solution to the limiting SDE (108) conditioned on \\( \\mathcal{E}_0^{(s_{\\mathrm{cls}})} \\) and \\( \\tilde{\\zeta}(0) = \\phi^{(s_{\\mathrm{cls}})} \\). By Theorem K.4, we have" + }, + { + "type": "equation", + "bbox": [ + 0.203, + 0.634, + 0.794, + 0.663 + ], + "angle": 0, + "content": "\\[\n\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} [ g (\\phi^ {(n R _ {\\mathrm {g r p}} + s _ {\\mathrm {c l s}})}) - g (\\tilde {\\zeta} (n \\eta^ {0. 7 5})) | \\phi^ {(s _ {\\mathrm {c l s}})}, \\mathcal {E} _ {0} ^ {(s _ {\\mathrm {c l s}})} ] \\right| \\leq C _ {g} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.671, + 0.825, + 0.71 + ], + "angle": 0, + "content": "where \\(R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{0.75}}\\right\\rfloor\\). Noticing that (i) \\(g\\in \\mathcal{C}^3\\) (ii) \\(\\pmb {b},\\pmb {\\sigma}\\in \\mathcal{C}^{\\infty}\\) and (iii) \\(\\zeta (t),\\tilde{\\zeta} (t)\\in \\Gamma ,t\\in [0,\\infty)\\) almost surely, we can conclude that given \\(\\mathcal{E}_0^{(s_{\\mathrm{cls}})}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.716, + 0.635, + 0.736 + ], + "angle": 0, + "content": "\\[\n\\| \\boldsymbol {\\zeta} (t) - \\tilde {\\boldsymbol {\\zeta}} (t) \\| _ {2} = \\tilde {\\mathcal {O}} (\\sqrt {\\eta}), \\quad \\forall t \\in [ 0, T ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.741, + 0.825, + 0.77 + ], + "angle": 0, + "content": "Then there exists positive constant \\( b' \\) independent of \\( \\eta \\) and \\( g \\), and \\( C_g' \\) which is independent of \\( \\eta \\) but can depend on \\( g \\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.777, + 0.78, + 0.806 + ], + "angle": 0, + "content": "\\[\n\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} \\left[ g \\left(\\phi^ {\\left(n R _ {\\mathrm {g r p}} + s _ {\\mathrm {c l s}}\\right)}\\right) - g \\left(\\zeta \\left(n \\eta^ {0. 7 5} + s _ {\\mathrm {c l s}} H \\eta^ {2}\\right)\\right) \\right] \\right| \\leq C _ {g} ^ {\\prime} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b ^ {\\prime}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.814, + 0.825, + 0.862 + ], + "angle": 0, + "content": "We can view the random variable pairs \\(\\{(\\phi^{(nR_{\\mathrm{grp}} + s_{\\mathrm{cls}})},\\zeta_{n\\eta^{0.75} + s_{\\mathrm{cls}}\\alpha \\eta}):n = 0,\\dots ,\\lfloor T / \\eta^{0.75}\\rfloor \\}\\) as reference points and then approximate the value of \\(g(\\phi^{(s)})\\) and \\(g(\\zeta (sH\\eta^2))\\) with the value at the nearest reference points. By Lemmas K.18 and K.23, for \\(0\\leq r\\leq R_{\\mathrm{grp}}\\) and \\(0\\leq s\\leq R_{\\mathrm{tot}} - r\\)" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.868, + 0.614, + 0.887 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {0. 3 7 5}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.895, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Since the values of \\(\\phi^{(s)}\\) and \\(\\zeta\\) are restricted to a bounded set, \\(g(\\cdot)\\) is Lipschitz on that set. Therefore, we have the theorem." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "71" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.103, + 0.75, + 0.119 + ], + "angle": 0, + "content": "L DERIVING THE SLOW SDE FOR LABEL NOISE REGULARIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.825, + 0.163 + ], + "angle": 0, + "content": "In this section, we formulate how label noise regularization works and provide a detailed derivation of the theoretical results in Appendix G." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.168, + 0.825, + 0.247 + ], + "angle": 0, + "content": "Consider training a model for \\(C\\)-class classification on dataset \\(\\mathcal{D} = \\{(x_i, y_i)\\}_{i=1}^N\\), where \\(x_i\\) denotes the input and \\(y_i \\in [C]\\) denotes the label. Denote by \\(\\Delta_+^{C-1}\\) the \\((C-1)\\)-open simplex. Let \\(f(\\theta; x) \\in \\Delta_+^{C-1}\\) be the model output on input \\(x\\) with parameter \\(\\theta\\), whose \\(j\\)-th coordinate \\(f_j(\\theta; x)\\) stands for the probability of \\(x\\) belonging to class \\(j\\). Let \\(\\ell(\\theta; x, y)\\) be the cross entropy loss given input \\(x\\) and label \\(y\\), i.e., \\(\\ell(\\theta; x, y) = -\\log f_y(\\theta; x)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.251, + 0.826, + 0.327 + ], + "angle": 0, + "content": "Adding label noise means replacing the true label \\(y\\) with a fresh noisy label \\(\\hat{y}\\) every time we access the sample. Specifically, \\(\\hat{y}\\) is set as the true label \\(y\\) with probability \\(1 - p\\) and as any other label with probability \\(\\frac{p}{C - 1}\\), where \\(p\\) is the fixed corruption probability. The training loss is defined as \\(\\mathcal{L}(\\boldsymbol{\\theta}) = \\frac{1}{N}\\sum_{i=1}^{N}\\mathbb{E}[\\ell(\\boldsymbol{\\theta};\\boldsymbol{x}_i,\\hat{y}_i)]\\), where the expectation is taken over the stochasticity of \\(\\hat{y}_i\\). Notice that given a sample \\((x,y)\\)," + }, + { + "type": "equation", + "bbox": [ + 0.281, + 0.331, + 0.826, + 0.366 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) ] = - (1 - p) \\log f _ {y} (\\boldsymbol {\\theta}; \\boldsymbol {x}) - \\frac {p}{C - 1} \\sum_ {j \\neq y} \\log f _ {j} (\\boldsymbol {\\theta}; \\boldsymbol {x}). \\tag {114}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.37, + 0.825, + 0.463 + ], + "angle": 0, + "content": "By the property of cross-entropy loss, (114) attains its global minimum if and only if \\( f_{j} = \\frac{p}{C - 1} \\), for all \\( j \\in [C], j \\neq y \\) and \\( f_{y} = 1 - p \\). Due to the large expressiveness of modern deep learning models, there typically exists a set \\( S^{*} := \\{\\pmb{\\theta} \\mid f_{i}(\\pmb{\\theta}) = \\mathbb{E}[\\hat{y}_{i}], \\forall i \\in [N]\\} \\) such that all elements of \\( S^{*} \\) minimize \\( \\mathcal{L}(\\pmb{\\theta}) \\). Then, the manifold \\( \\Gamma \\) is a subset of \\( S^{*} \\). The following lemma relates the noise covariance \\( \\pmb{\\Sigma}(\\pmb{\\theta}) := \\frac{1}{N}\\sum_{i \\in [N]}\\mathbb{E}[(\\nabla\\ell(\\pmb{\\theta};\\pmb{x}_{i},\\hat{y}_{i}) - \\nabla\\mathcal{L}(\\pmb{\\theta}))(\\nabla\\ell(\\pmb{\\theta};\\pmb{x}_{i},\\hat{y}_{i}) - \\nabla\\mathcal{L}(\\pmb{\\theta}))^{\\top}] \\) to the hessian \\( \\nabla^{2}\\mathcal{L}(\\pmb{\\theta}) \\) for all \\( \\pmb{\\theta} \\in S^{*} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.465, + 0.825, + 0.496 + ], + "angle": 0, + "content": "Lemma L.1. If \\(f(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i)\\) is \\(\\mathcal{C}^2\\)-smooth on \\(\\mathbb{R}^d\\) given any \\(i \\in [N]\\), \\(\\hat{y}_i \\in [C]\\) and \\(\\mathcal{S}^* \\neq \\emptyset\\), then for all \\(\\pmb{\\theta} \\in \\mathcal{S}^*\\), \\(\\pmb{\\Sigma}(\\pmb{\\theta}) = \\nabla^2 \\mathcal{L}(\\pmb{\\theta})\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.51, + 0.825, + 0.552 + ], + "angle": 0, + "content": "Proof. Since \\(\\mathcal{L}(\\cdot)\\) is \\(\\mathcal{C}_2\\)-smooth, \\(\\nabla \\mathcal{L}(\\pmb{\\theta}) = \\mathbf{0}\\) for all \\(\\pmb{\\theta} \\in S^*\\). To prove the above lemma, it suffices to show that \\(\\forall i \\in [N]\\), \\(\\mathbb{E}[\\nabla \\ell(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i) \\nabla \\ell(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i)^\\top] = \\nabla^2 \\mathcal{L}(\\pmb{\\theta})\\). W.L.O.G, let \\(y = 1\\) and therefore for all \\(\\pmb{\\theta} \\in S^*\\)," + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.557, + 0.522, + 0.573 + ], + "angle": 0, + "content": "\\[\nf _ {1} (\\boldsymbol {\\theta}; \\boldsymbol {x}) = 1 - p =: a _ {1},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.575, + 0.64, + 0.602 + ], + "angle": 0, + "content": "\\[\nf _ {j} (\\boldsymbol {\\theta}; \\boldsymbol {x}) = \\frac {p}{C - 1} =: a _ {2}, \\forall j > 1, j \\in [ C ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.607, + 0.825, + 0.634 + ], + "angle": 0, + "content": "Additionally, let \\( h(x) \\coloneqq -\\log (x), x \\in \\mathbb{R}^{+} \\). The stochastic gradient \\( \\nabla \\ell(\\pmb{\\theta}; \\pmb{x}, \\hat{y}) \\) follows the distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.638, + 0.69, + 0.678 + ], + "angle": 0, + "content": "\\[\n\\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) = \\left\\{ \\begin{array}{l l} h ^ {\\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}} & \\text {w . p .} 1 - p, \\\\ h ^ {\\prime} (a _ {2}) \\frac {\\partial f _ {j}}{\\partial \\boldsymbol {\\theta}}, & \\text {w . p .} \\frac {p}{C - 1}, \\forall j \\in [ C ], j > 1. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.682, + 0.466, + 0.696 + ], + "angle": 0, + "content": "Then the covariance of the gradient noise is:" + }, + { + "type": "equation", + "bbox": [ + 0.252, + 0.701, + 0.743, + 0.78 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} [ \\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) \\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) ^ {\\top} ] = (1 - p) \\left(h ^ {\\prime} \\left(a _ {1}\\right)\\right) ^ {2} \\frac {\\partial f _ {1} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}} \\left(\\frac {\\partial f _ {1} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}}\\right) ^ {\\top} \\\\ + \\frac {p \\left(h ^ {\\prime} \\left(a _ {2}\\right)\\right) ^ {2}}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}} \\left(\\frac {\\partial f _ {j} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}}\\right) ^ {\\top}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.785, + 0.301, + 0.798 + ], + "angle": 0, + "content": "And the hessian is:" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.801, + 0.757, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) = (1 - p) h ^ {\\prime} (a _ {1}) \\frac {\\partial^ {2} f _ {1}}{\\partial \\boldsymbol {\\theta} ^ {2}} + \\frac {p h ^ {\\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial^ {2} f _ {j}}{\\partial \\boldsymbol {\\theta} ^ {2}} \\\\ \\underbrace {\\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad} _ {\\mathcal {T}} \\\\ + (1 - p) h ^ {\\prime \\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}} \\left(\\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}}\\right) ^ {\\top} + \\frac {p h ^ {\\prime \\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j}}{\\partial \\boldsymbol {\\theta}} \\left(\\frac {\\partial f _ {j} (\\boldsymbol {\\theta})}{\\partial \\boldsymbol {\\theta}}\\right) ^ {\\top}. \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "72" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.103, + 0.32, + 0.123 + ], + "angle": 0, + "content": "Since \\(\\sum_{j\\in [C]}f_i = 1\\)" + }, + { + "type": "equation", + "bbox": [ + 0.428, + 0.129, + 0.825, + 0.168 + ], + "angle": 0, + "content": "\\[\n\\frac {\\partial^ {2} f _ {1}}{\\partial \\boldsymbol {\\theta} ^ {2}} = - \\sum_ {j > 1} \\frac {\\partial^ {2} f _ {j}}{\\partial \\boldsymbol {\\theta} ^ {2}}. \\tag {115}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.176, + 0.45, + 0.195 + ], + "angle": 0, + "content": "Also, notice that \\( h^\\prime (x) = -\\frac{1}{x} \\). Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.41, + 0.201, + 0.826, + 0.232 + ], + "angle": 0, + "content": "\\[\n(1 - p) h ^ {\\prime} \\left(a _ {1}\\right) = \\frac {p h ^ {\\prime} \\left(a _ {2}\\right)}{C - 1}. \\tag {116}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.239, + 0.825, + 0.269 + ], + "angle": 0, + "content": "Substituting (115) and (116) into the expression of \\(\\mathcal{T}\\) gives \\(\\mathcal{T} = \\mathbf{0}\\), which simplifies \\(\\nabla^2\\mathcal{L}(\\pmb{\\theta})\\) as the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.235, + 0.274, + 0.763, + 0.318 + ], + "angle": 0, + "content": "\\[\n\\nabla^ {2} \\mathcal {L} (\\pmb {\\theta}) = (1 - p) h ^ {\\prime \\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\pmb {\\theta}} \\left(\\frac {\\partial f _ {j} (\\pmb {\\theta})}{\\partial \\pmb {\\theta}}\\right) ^ {\\top} + \\frac {p h ^ {\\prime \\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j}}{\\partial \\pmb {\\theta}} \\left(\\frac {\\partial f _ {j} (\\pmb {\\theta})}{\\partial \\pmb {\\theta}}\\right) ^ {\\top}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.344, + 0.688, + 0.361 + ], + "angle": 0, + "content": "Again notice that \\( h''(x) = h'(x) \\) for all \\( x \\in \\mathbb{R}^+ \\). Therefore, \\( \\nabla^2\\mathcal{L}(\\pmb{\\theta}) = \\pmb{\\Sigma}(\\pmb{\\theta}) \\)." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.346, + 0.825, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.375, + 0.654, + 0.391 + ], + "angle": 0, + "content": "With the property \\(\\pmb{\\Sigma}(\\pmb{\\theta}) = \\nabla^2\\mathcal{L}(\\pmb{\\theta})\\), we are ready to prove Theorem G.1." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.405, + 0.6, + 0.42 + ], + "angle": 0, + "content": "Proof of Theorem G.1. Recall the general form of the slow SDE:" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.426, + 0.826, + 0.459 + ], + "angle": 0, + "content": "\\[\n\\mathrm {d} \\boldsymbol {\\zeta} (t) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} (t) + \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t, \\tag {117}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.466, + 0.687, + 0.483 + ], + "angle": 0, + "content": "where \\(\\Psi\\) is defined in Definition K.6. Since for \\(\\zeta \\in \\Gamma\\), \\(\\Sigma(\\zeta) = \\nabla^2\\mathcal{L}(\\zeta)\\), then" + }, + { + "type": "equation", + "bbox": [ + 0.428, + 0.49, + 0.826, + 0.508 + ], + "angle": 0, + "content": "\\[\n\\partial \\Phi (\\zeta) \\Sigma^ {1 / 2} (\\zeta) = \\mathbf {0}. \\tag {118}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.515, + 0.3, + 0.528 + ], + "angle": 0, + "content": "Now we show that" + }, + { + "type": "equation", + "bbox": [ + 0.378, + 0.535, + 0.826, + 0.554 + ], + "angle": 0, + "content": "\\[\n\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) ] = - \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right). \\tag {119}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.56, + 0.572, + 0.579 + ], + "angle": 0, + "content": "Since \\(\\nabla^2\\mathcal{L}(\\zeta) = \\Sigma (\\zeta)\\), \\(\\mathcal{V}_{\\nabla^2\\mathcal{L}(\\zeta)}[\\Sigma ] = \\frac{1}{2}\\pmb {I}\\). By Lemma K.4," + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.585, + 0.706, + 0.615 + ], + "angle": 0, + "content": "\\[\n\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) ] = - \\frac {1}{2} \\partial \\Phi (\\boldsymbol {\\zeta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\boldsymbol {I} ] = - \\frac {1}{2} \\nabla_ {\\Gamma} \\mathrm {t r} (\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.62, + 0.318, + 0.635 + ], + "angle": 0, + "content": "Finally, we show that" + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.642, + 0.826, + 0.673 + ], + "angle": 0, + "content": "\\[\n\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\Psi (\\boldsymbol {\\zeta}) ] = - \\nabla_ {\\Gamma} \\frac {1}{2 H \\eta} \\operatorname {t r} (F (2 H \\eta \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta}))). \\tag {120}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.825, + 0.731 + ], + "angle": 0, + "content": "Define \\(\\hat{\\psi}(x) \\coloneqq x\\psi(x) = e^{-x} - 1 + x\\). By definition of \\(\\Psi(\\zeta)\\), when \\(\\Sigma(\\zeta) = \\nabla^2\\mathcal{L}(\\zeta)\\), \\(\\Psi(\\zeta) = \\hat{\\psi}(2\\eta H\\nabla^2\\mathcal{L}(\\zeta))\\), where \\(\\hat{\\psi}(\\cdot)\\) is interpreted as a matrix function. Since \\(\\psi(2\\eta H\\nabla^2\\mathcal{L}(\\zeta)) \\in \\operatorname{span}\\{\\pmb{u}\\pmb{u}^\\top \\mid \\pmb{u} \\in T_\\zeta^\\perp(\\Gamma)\\}\\), by Lemma K.4," + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.737, + 0.657, + 0.766 + ], + "angle": 0, + "content": "\\[\n\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\Psi (\\boldsymbol {\\zeta}) ] = - \\frac {1}{2} \\partial \\Phi (\\boldsymbol {\\zeta}) \\mathrm {t r} \\psi (2 \\eta H \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.772, + 0.742, + 0.788 + ], + "angle": 0, + "content": "By the chain rule, we have (120). Combining (118),(119) and (120) gives the theorem." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.773, + 0.825, + 0.785 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "73" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.427, + 0.119 + ], + "angle": 0, + "content": "M EXPERIMENTAL DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.827, + 0.248 + ], + "angle": 0, + "content": "In this section, we specify the experimental details that are omitted in the main text. Our experiments are conducted on CIFAR-10 (Krizhevsky et al., 2009) and ImageNet Russakovsky et al. (2015). Our code is available at https://github.com/hmgxr128/Local-SGD. Our implementation of ResNet-56 (He et al., 2016) and VGG-16 (Simonyan & Zisserman, 2015) is based on the high-starred repository by Wei Yang\\(^{2}\\) and we use the implementation of ResNet-50 from torchvision 0.3.1. We run all CIFAR-10 experiments with \\( B_{\\mathrm{loc}} = 128 \\) on 8 NVIDIA Tesla P100 GPUs while ImageNet experiments are run on 8 NVIDIA A5000 GPU with \\( B_{\\mathrm{loc}} = 32 \\). All ImageNet experiments are trained with ResNet-50." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.253, + 0.828, + 0.354 + ], + "angle": 0, + "content": "We generally adopt the following training strategies. We do not add any momentum unless otherwise stated. We follow the suggestions by Jia et al. (2018) and do not add weight decay to the bias and learnable parameters in the normalization layers. For all models with BatchNorm layers, we go through 100 batches of data with batch size \\( B_{\\mathrm{loc}} \\) to estimate the running mean and variance before evaluation. Experiments on both datasets follow the standard data augmentation pipeline in He et al. (2016) except the label noise experiments. Additionally, we use FFCV (Leclerc et al., 2022) to accelerate data loading for ImageNet training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.358, + 0.825, + 0.388 + ], + "angle": 0, + "content": "Slightly different from the update rule of Local SGD in Section 1, we use sampling without replacement unless otherwise stated. See Appendix C for implementation details and discussion." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.405, + 0.554, + 0.42 + ], + "angle": 0, + "content": "M.1 POST-LOCAL SGD EXPERIMENTS IN SECTION 1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.431, + 0.827, + 0.6 + ], + "angle": 0, + "content": "CIFAR-10 experiments. We simulate 32 clients with \\( B = 4096 \\). We follow the linear scaling rule and linear learning rate warmup strategy suggested by Goyal et al. (2017). We first run 250 epochs of SGD with the learning rate gradually ramping up from 0.1 to 3.2 for the first 50 epochs. Resuming from the model obtained at epoch 250, we run Local SGD with \\( \\eta = 0.32 \\). Note that we conduct grid search for the initial learning rate among \\( \\{0.005, 0.01, 0.05, 0.1, 0.15, 0.2\\} \\) and choose the learning rate with which parallel SGD \\( (H = 1) \\) achieves the best test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. The weight decay \\( \\lambda \\) is set as \\( 5 \\times 10^{-4} \\). As for the initialization scheme, we follow Lin et al. (2020b) and Goyal et al. (2017). Specifically, we use Kaiming Normal (He et al., 2015) for the weights of convolutional layers and initialize the weights of fully-connected layers by a Gaussian distribution with mean zero and standard deviation 0.01. The weights for normalization layers are initialized as one. All bias parameters are initialized as zero. We report the mean and standard deviation over 5 runs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.615, + 0.827, + 0.743 + ], + "angle": 0, + "content": "ImageNet experiments. We simulate 256 workers with \\( B = 8192 \\). We follow the linear scaling rule and linear learning rate warmup strategy suggested by Goyal et al. (2017). We first run 100 epochs of SGD where the learning rate linearly ramps up from 0.5 to 16 for the first 5 epochs and then decays by a factor of 0.1 at epoch 50. Resuming from epoch 100, we run Local SGD with \\( \\eta = 0.16 \\). Note that we conduct grid search for the initial learning rate among \\( \\{0.05, 0.1, 0.5, 1\\} \\) and choose the learning rate with which parallel SGD \\( (H = 1) \\) achieves the best test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. The weight decay \\( \\lambda \\) is set as \\( 1 \\times 10^{-4} \\) and we do not add any momentum. The initialization scheme follows the implementation of torchvision 0.3.1. We report the mean and standard deviation over 3 runs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.759, + 0.557, + 0.773 + ], + "angle": 0, + "content": "M.2 EXPERIMENTAL DETAILS FOR FIGURES 2 AND 5" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.786, + 0.827, + 0.9 + ], + "angle": 0, + "content": "CIFAR-10 experiments. We use ResNet-56 for all CIFAR-10 experiments in the two figures. We simulate 32 workers with \\( B = 4096 \\) and set the weight decay as \\( 5 \\times 10^{-4} \\). For Figures 2(a) and 2(b), we set \\( \\eta = 0.32 \\), which is the same as the learning rate after decay in Figure 1(a). For Figure 2(a), we adopt the same initialization scheme introduced in the corresponding paragraph in Appendix M.1. For Figures 2(b), 2(e) and 5(c), we use the model at epoch 250 in Figure 1(a) as the pre-trained model. Additionally, we use a training budget of 250 epochs for Figure 2(e). In Figure 5(e), we use Local SGD with momentum 0.9, where the momentum buffer is kept locally and never averaged. We run SGD with momentum 0.9 for 150 epochs to obtain the pre-trained model, where the learning" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.495, + 0.925 + ], + "angle": 0, + "content": "\\(^{2}\\)https://github.com/bearpaw/pytorch-classification" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "74" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.066, + 0.482, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.262, + 0.195, + 0.45, + 0.209 + ], + "angle": 0, + "content": "(a) CIFAR-10, start from #250." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.067, + 0.768, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.195, + 0.735, + 0.209 + ], + "angle": 0, + "content": "(b) ImageNet, start from #100." + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.223, + 0.48, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.352, + 0.483, + 0.366 + ], + "angle": 0, + "content": "(c) CIFAR-10, start from #250, optimal \\( H \\)." + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.224, + 0.767, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.352, + 0.767, + 0.366 + ], + "angle": 0, + "content": "(d) ImageNet, start from #100, optimal \\( H \\)." + }, + { + "type": "image_caption", + "bbox": [ + 0.296, + 0.373, + 0.696, + 0.389 + ], + "angle": 0, + "content": "Figure 10: The learning curves for experiments in Figure 4." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.405, + 0.827, + 0.504 + ], + "angle": 0, + "content": "rate ramps up from 0.05 to 1.6 linearly in the first 150 epochs. Note that we conduct grid search for the initial learning rate among \\(\\{0.01, 0.05, 0.1, 0.15, 0.2\\}\\) and choose the learning rate with which parallel SGD \\((H = 1)\\) achieves the highest test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. Resuming from epoch 150, we run Local SGD \\(H = 1\\) (i.e., SGD) and 24 with \\(\\eta = 0.16\\) and decay \\(\\eta\\) by 0.1 at epoch 226. For Local SGD \\(H = 900\\), we resume from the model at epoch 226 of \\(H = 24\\) with \\(\\eta = 0.016\\). We report the mean and standard deviation over 3 runs for Figures 2(a), 2(b) and 5(c), and over 5 runs for Figure 2(e)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.517, + 0.828, + 0.617 + ], + "angle": 0, + "content": "ImageNet experiments. We simulate 256 clients with \\( B = 8192 \\) and set the weight decay as \\( 1 \\times 10^{-4} \\). In Figure 2(d), both Local SGD and SGD start from the same random initialization. We warm up the learning rate from 0.1 to 3.2 in the first 5 epochs and decay the learning rate by a factor of 0.1 at epochs 50 and 100. For Figures 2(c), 2(f) and 5(d), we use the model at epoch 100 in Figure 1(b) as the pre-trained model. In Figure 2(c), we set the learning rate as 0.16, which is the same as the learning rate after epoch 100 in Figure 1(b). Finally, in Figures 2(c), 2(f), 5(b) and 5(d), we report the mean and average over 3 runs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.632, + 0.512, + 0.646 + ], + "angle": 0, + "content": "M.3 DETAILS FOR EXPERIMENTS IN FIGURE 6" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.657, + 0.827, + 0.756 + ], + "angle": 0, + "content": "For all experiments in Figure 6, we train a ResNet-56 model on CIFAR-10. We report mean test accuracy over three runs and the shaded area reflects the standard deviation. For Figure 6(a), we use the same setup as Figures 2(a) and 2(b) for training from random initialization and from a pre-trained model respectively except the learning rate. For Figure 6(b), we resume from the model obtained at epoch 250 in Figure 1(a) and train for another 250 epochs. For Figure 6(c), we follow the same procedure as Figure 1(a) except that we use sampling with replacement. We also ensure that the total numbers of iterations in Figures 1(a) and 6(c) are the same." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.772, + 0.715, + 0.786 + ], + "angle": 0, + "content": "M.4 DETAILS FOR EXPERIMENTS ON THE EFFECT OF THE DIFFUSION TERM" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.797, + 0.825, + 0.869 + ], + "angle": 0, + "content": "CIFAR-10 experiments. The model we use is ResNet-56. For Figure 3(a), we first run SGD with batch size 128 and learning rate \\(\\eta = 0.5\\) for 250 epochs to obtain the pre-trained model. The initialization scheme is the same as the corresponding paragraph in Appendix M.1. Resuming from epoch 250 with \\(\\eta = 0.05\\), we run Local SGD with \\(K = 16\\) until epoch 6000 and run all other setups for the same number of iterations. We report the mean and standard deviation over 3 runs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "ImageNet experiments. For Figures 3(b) and 4(b), we start from the model obtained at epoch 100 in Figure 1(b). In Figure 3(b), we run Local SGD with \\( K = 256 \\) for another 150 epochs with \\( \\eta = 0.032 \\). We run all other setups for the same number of iterations with the same learning rate." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "75" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.71, + 0.119 + ], + "angle": 0, + "content": "M.5 DETAILS FOR EXPERIMENTS ON THE EFFECT OF GLOBAL BATCH SIZE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.827, + 0.27 + ], + "angle": 0, + "content": "CIFAR-10 experiments. The model we use is ResNet-56. We resume from the model obtained in Figure 1(a) at epoch 250 and train for another 250 epochs. The local batch size for all runs is \\( B_{\\mathrm{loc}} = 128 \\). We first make grid search of \\( \\eta \\) for SGD with \\( K = 16 \\) among \\( \\{0.04, 0.08, 0.16, 0.32, 0.64\\} \\) and find that the final test accuracy varies little across different learning rates (within \\( 0.1\\% \\)). Then we choose \\( \\eta = 0.32 \\). For the green curve in Figure 4(a), we search for the optimal \\( H \\) for \\( K = 16 \\) and keep \\( \\alpha \\) fixed when scaling \\( \\eta \\) with \\( K \\). For the red curve in Figure 4(a), we search for the optimal \\( H \\) for each \\( K \\) among \\( \\{6, 12, 60, 120, 300, 750, 1500, 3000, 6000, 12000, 24000\\} \\) and also make sure that \\( H \\) does not exceed the total number of iterations for 250 epochs. The learning curves for constant and optimal \\( \\alpha \\) are visualized in Figures 10(a) and 10(c) respectively. We report the mean and standard deviation over three runs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.285, + 0.828, + 0.424 + ], + "angle": 0, + "content": "ImageNet experiments. We start from the model obtained at epoch 100 in Figure 1(b) and train for another 50 epochs. The local batch size for all runs is \\( B_{\\mathrm{loc}} = 32 \\). We first make grid search among \\( \\{0.032, 0.064, 0.16, 0.32\\} \\) for \\( H = 1 \\) to achieve the best test accuracy and choose \\( H = 0.064 \\). For the orange curve in Figure 4(b), we search \\( H \\) among \\( \\{2, 4, 6, 13, 26, 52, 78, 156\\} \\) for \\( K = 256 \\) to achieve the optimal test accuracy and the keep \\( \\alpha \\) constant as we scale \\( \\eta \\) with \\( K \\). To obtain the optimal \\( H \\) for each \\( K \\), we search among \\( \\{6240, 7800, 10400, 12480, 15600, 20800, 24960, 31200\\} \\) for \\( K = 16 \\), \\( \\{1600, 3120, 4160, 5200, 6240, 7800, 10400\\} \\) for \\( K = 32 \\), \\( \\{312, 480, 520, 624, 800, 975, 1040, 1248, 1560, 1950\\} \\) for \\( K = 64 \\), and \\( \\{1, 2, 3, 6, 13\\} \\) for \\( K = 512 \\). The learning curves for constant and optimal \\( \\alpha \\) are visualized in Figures 10(b) and 10(d) respectively. We report the mean and standard deviation over three runs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.441, + 0.676, + 0.456 + ], + "angle": 0, + "content": "M.6 DETAILS FOR EXPERIMENTS ON LABEL NOISE REGULARIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.467, + 0.827, + 0.538 + ], + "angle": 0, + "content": "For all label noise experiments, we do not use data augmentation, use sampling with replacement, and set the corruption probability as 0.1. We simulate 32 workers with \\( B = 4096 \\) in Figure 7 and 4 workers with \\( B = 512 \\) in Figure 8. We use ResNet-56 with GroupNorm with the number of groups 8 for Figure 7(a) and VGG-16 without normalization for Figures 7(b) and 8. Below we list the training details for ResNet-56 and VGG-16 respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.552, + 0.827, + 0.706 + ], + "angle": 0, + "content": "ResNet-56. As for the model architecture, we replace the batch normalization layer in Yang's implementation with group normalization such that the training loss is independent of the sampling order. We also use Swish activation (Ramachandran et al., 2017) in place of ReLU to ensure the smoothness of the loss function. We generate the pre-trained model by running label noise SGD with corruption probability \\( p = 0.1 \\) for 500 epochs (6,000 iterations). We initialize the model by the same strategy introduced in the first paragraph of Appendix M.1. Applying the linear warmup scheme proposed by Goyal et al. (2017), we gradually ramp up the learning rate \\( \\eta \\) from 0.1 to 3.2 for the first 20 epochs and multiply the learning rate by 0.1 at epoch 250. All subsequent experiments in Figure 7(a) (a) use learning rate 0.1. The weight decay \\( \\lambda \\) is set as \\( 5 \\times 10^{-4} \\). Note that adding weight decay in the presence of normalization accelerates the limiting dynamics and will not affect the implicit regularization on the original loss function (Li et al., 2022)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.72, + 0.827, + 0.833 + ], + "angle": 0, + "content": "VGG-16. We follow Yang's implementation of the model architecture except that we replace maximum pooling with average pooling and use Swish activation (Ramachandran et al., 2017) to make the training loss smooth. We initialize all weight parameters by Kaiming Normal and all bias parameters as zero. The pre-trained model is obtained by running label noise SGD with total batch size 4096 and corruption probability \\( p = 0.1 \\) for 6000 iterations. We use a linear learning rate warmup from 0.1 to 0.5 in the first 500 iterations. All runs in Figures 7(b) and 8 resume from the model obtained by SGD with label noise. In Figure 7(b), we use learning rate \\( \\eta = 0.1 \\). In Figure 8, we set \\( \\eta = 0.005 \\) for \\( H = 97,000 \\) and \\( \\eta = 0.01 \\) for SGD \\( (H = 1) \\). The weight decay \\( \\lambda \\) is set as zero." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "76" + } + ] +] \ No newline at end of file diff --git a/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_origin.pdf b/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a7757e9033560d5af18282f27fa7fa1fb664235f --- /dev/null +++ b/2023/Why (and When) does Local SGD Generalize Better than SGD_/306d38ac-f98a-4b3c-97a7-4af7a2c739ce_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ecd37097dec23102b32bfe6b0133c908c09ff03a02812b4d0d773bd7985e729 +size 1441417 diff --git a/2023/Why (and When) does Local SGD Generalize Better than SGD_/full.md b/2023/Why (and When) does Local SGD Generalize Better than SGD_/full.md new file mode 100644 index 0000000000000000000000000000000000000000..47cb89f639024365c393b11e344f3fbad40f2803 --- /dev/null +++ b/2023/Why (and When) does Local SGD Generalize Better than SGD_/full.md @@ -0,0 +1,3259 @@ +# WHY (AND WHEN) DOES LOCAL SGD GENERALIZE BETTER THAN SGD? + +Xinran Gu* + +Institute for Interdisciplinary Information Sciences Tsinghua University + +gxr21@mails.tsinghua.edu.cn + +Kaifeng Lyu* + +Department of Computer Science Princeton University + +klyu@cs.princeton.edu + +Longbo Huang† + +Institute for Interdisciplinary Information Sciences + +Tsinghua University + +longbohuang@tsinghua.edu.cn + +Sanjeev Arora† + +Department of Computer Science + +Princeton University + +arora@cs.princeton.edu + +# ABSTRACT + +Local SGD is a communication-efficient variant of SGD for large-scale training, where multiple GPUs perform SGD independently and average the model parameters periodically. It has been recently observed that Local SGD can not only achieve the design goal of reducing the communication overhead but also lead to higher test accuracy than the corresponding SGD baseline (Lin et al., 2020b), though the training regimes for this to happen are still in debate (Ortiz et al., 2021). This paper aims to understand why (and when) Local SGD generalizes better based on Stochastic Differential Equation (SDE) approximation. The main contributions of this paper include (i) the derivation of an SDE that captures the long-term behavior of Local SGD in the small learning rate regime, showing how noise drives the iterate to drift and diffuse after it has reached close to the manifold of local minima, (ii) a comparison between the SDEs of Local SGD and SGD, showing that Local SGD induces a stronger drift term that can result in a stronger effect of regularization, e.g., a faster reduction of sharpness, and (iii) empirical evidence validating that having a small learning rate and long enough training time enables the generalization improvement over SGD but removing either of the two conditions leads to no improvement. + +# 1 INTRODUCTION + +As deep models have grown larger, training them with reasonable wall-clock times has led to new distributed environments and new variants of gradient-based training. Recall that Stochastic Gradient Descent (SGD) tries to solve $\min_{\pmb{\theta} \in \mathbb{R}^d} \mathbb{E}_{\xi \sim \hat{\mathcal{D}}}[\ell(\pmb{\theta}; \xi)]$ , where $\pmb{\theta} \in \mathbb{R}^d$ is the parameter vector of the model, $\ell(\pmb{\theta}; \xi)$ is the loss function for a data sample $\xi$ drawn from the training distribution $\tilde{\mathcal{D}}$ , e.g., the uniform distribution over the training set. SGD with learning rate $\eta$ and batch size $B$ does the following update at each step, using a batch of $B$ independent $\xi_{t,1}, \ldots, \xi_{t,B} \sim \tilde{\mathcal{D}}$ : + +$$ +\boldsymbol {\theta} _ {t + 1} \leftarrow \boldsymbol {\theta} _ {t} - \eta \boldsymbol {g} _ {t}, \quad \text {w h e r e} \quad \boldsymbol {g} _ {t} = \frac {1}{B} \sum_ {i = 1} ^ {B} \nabla \ell \left(\boldsymbol {\theta} _ {t}; \xi_ {t, i}\right). \tag {1} +$$ + +Parallel SGD tries to improve wall-clock time when the batch size $B$ is large enough. It distributes the gradient computation to $K \geq 2$ workers, each of whom focuses on a local batch of $B_{\mathrm{loc}} := B / K$ samples and computes the average gradient over the local batch. Finally, $g_{t}$ is obtained by averaging the local gradients over the $K$ workers. + +However, large-batch training leads to a significant test accuracy drop compared to a small-batch training baseline with the same number of training steps or epochs (Smith et al., 2020; Shallue et al., + +![](images/d2ddf20977319359843449176528686667e651b70728681363f7c2b748262ac9.jpg) +(a) CIFAR-10, $B = 4096$ , ResNet-56. + +![](images/5535c2fbc915d92756ec5f3da4bc10bda5e4734ecdc6d2e3b634ab473d1da50c.jpg) +(b) ImageNet, $B = 8192$ , ResNet-50. +Figure 1: Post-Local SGD ( $H > 1$ ) generalizes better than SGD ( $H = 1$ ). We switch to Local SGD at the first learning rate decay (epoch #250) for CIFAR-10 and at the second learning rate decay (epoch #100) for ImageNet. See Appendix M.1 for training details. + +2019; Keskar et al., 2017; Jastrzebski et al., 2017). Reducing this generalization gap is the goal of much subsequent research. It was suggested that the generalization gap arises because larger batches lead to a reduction in the level of noise in batch gradient (see Appendix A for more discussion). The Linear Scaling Rule (Krizhevsky, 2014; Goyal et al., 2017; Jastrzebski et al., 2017) tries to fix this by increasing the learning rate in proportion to batch size. This is found to reduce the generalization gap for (parallel) SGD, but does not entirely eliminate it. + +To reduce the generalization gap further, Lin et al. (2020b) discovered that a variant of SGD, called Local SGD (Yu et al., 2019; Wang & Joshi, 2019; Zhou & Cong, 2018), can be used as a strong component. Perhaps surprisingly, Local SGD itself is not designed for improving generalization, but for reducing the high communication cost for synchronization among the workers, which is another important issue that often bottlenecks large-batch training (Seide et al., 2014; Strom, 2015; Chen et al., 2016; Recht et al., 2011). Instead of averaging the local gradients per step as in parallel SGD, Local SGD allows $K$ workers to train their models locally and averages the local model parameters whenever they finish $H$ local steps. Here every worker samples a new batch at each local step, and in this paper we focus on the case where all the workers draw samples with or without replacement from the same training set. See Appendix C for the pseudocode. + +More specifically, Lin et al. (2020b) proposed Post-local SGD, a hybrid method that starts with parallel SGD (equivalent to Local SGD with $H = 1$ in math) and switches to Local SGD with $H > 1$ after a fixed number of steps $t_0$ . They showed through extensive experiments that Post-local SGD significantly outperforms parallel SGD in test accuracy when $t_0$ is carefully chosen. In Figure 1, we reproduce this phenomenon on both CIFAR-10 and ImageNet. + +As suggested by the success of Post-local SGD, Local SGD can improve the generalization of SGD by merely adding more local steps (while fixing the other hyperparameters), at least when the training starts from a model pre-trained by SGD. But the underlying mechanism is not very clear, and there is also controversy about when this phenomenon can happen (see Section 2.1 for a survey). The current paper tries to understand: Why does Local SGD generalize better? Under what general conditions does this generalization benefit arise? + +Previous theoretical research on Local SGD is mainly restricted to the convergence rate for minimizing a convex or non-convex objective (see Appendix A for a survey). A related line of works (Stich, 2018; Yu et al., 2019; Khaled et al., 2020) showed that Local SGD has a slower convergence rate compared with parallel SGD after running the same number of steps/epochs. This convergence result suggests that Local SGD may implicitly regularize the model through insufficient optimization, but this does not explain why parallel SGD with early stopping, which may incur an even higher training loss, still generalizes worse than Post-local SGD. + +Our Contributions. In this paper, we provide the first theoretical understanding on why (and when) switching from parallel SGD to Local SGD improves generalization. + +1. In Section 2.2, we conduct ablation studies on CIFAR-10 and ImageNet and identify a clean setting where adding local steps to SGD consistently improves generalization: if the learning rate is small and the total number of steps is sufficient, Local SGD eventually generalizes better than the corresponding (parallel) SGD baseline. +2. In Section 3.2, we derive a special SDE that characterizes the long-term behavior of Local SGD in the small learning rate regime, as inspired by a previous work (Li et al., 2021b) that proposed this type of SDE for modeling SGD. These SDEs can track the dynamics after the iterate has reached close to a manifold of minima. In this regime, the expected gradient is near zero, but the gradient noise can drive the iterate to wander around. In contrast to the conventional SDE (3) for + +SGD, where the drift and diffusion terms are connected respectively to the expected gradient and gradient noise, the SDE we derived for Local SGD has drift and diffusion terms both connected to gradient noise. + +3. Section 3.3 explains the generalization improvement of Local SGD over SGD by comparing the corresponding SDEs: increasing the number of local steps $H$ strengthens the drift term of SDE while keeping the diffusion term untouched. We hypothesize that having a stronger drift term can benefit generalization. +4. As a by-product, we provide a new proof technique that can give the first quantitative approximation bound for how well Li et al. (2021b)'s SDE approximates SGD. + +Back to the discussion on the generalization gap between small- and large-batch training, we remark that this gap can occur early in training when the learning rate is very large (Smith et al., 2020) and Local SGD cannot prevent this gap in this phase. Instead, our theory suggests that Local SGD can reduce the gap in late training phases after decaying the learning rate. + +# 2 WHEN DOES LOCAL SGD GENERALIZE BETTER? + +In our motivating example of Post-local SGD, switching from SGD to Local SGD can outperform running SGD alone (i.e., no switching) in test accuracy, but this improvement does not always arise and can depend on the choice of the switching time point. Because of this, a necessary first step for developing a theoretical understanding of Local SGD is to identify under what general conditions Local SGD can improve the generalization of SGD by merely adding local steps. + +# 2.1 THE DEBATE ON LOCAL SGD + +We first summarize a debate in the literature regarding when to switch from SGD to Local SGD in running Post-local SGD, which hints the conditions so that Local SGD can improve upon SGD. + +Local SGD generalizes better than SGD on CIFAR-10. Lin et al. (2020b) empirically observed that Post-local SGD exhibits a better generalization performance than SGD. Most of their experiments are conducted on CIFAR-10 and CIFAR-100 with multiple learning rate decays, and the algorithm switches from (parallel) SGD to Local SGD right after the first learning rate decay. We refer to this particular choice of the switching time point as the first-decay switching strategy for short. To justify this strategy, they empirically showed that the generalization improvement can be less significant if starting Local SGD from the beginning or right after the second learning rate decay. It has also been observed by Wang & Joshi (2021) that running Local SGD from the beginning improves generalization, but the test accuracy improvement may not be large enough. A subsequent work by Lin et al. (2020a) showed that adding local steps to Extrap-SGD, a variant of SGD proposed therein, after the first learning rate decay also improves generalization, suggesting that the first-decay switching strategy can also be applied to the post-local variant of other optimizers. + +Does Local SGD exhibit the same generalization benefit on large-scale datasets? Going beyond CIFAR-10, Lin et al. (2020b) conducted a few ImageNet experiments and showed that Post-local SGD with first-decay switching strategy still leads to better generalization than SGD. However, the improvement is sometimes marginal, e.g., $0.1\%$ for batch size 8192. For the general case, they suggested that the time of switching should be tuned aiming at "capturing the time when trajectory starts to get into the influence basin of a local minimum" in a footnote, but no further discussion or experiments are provided to justify this guideline. Ortiz et al. (2021) conducted a more extensive evaluation on ImageNet (with a different set of hyperparameters) and concluded with the opposite: the first-decay switching strategy can hurt the validation accuracy. Instead, switching at a later time, such as the second learning rate decay, leads to a better validation accuracy than SGD. $^{1}$ To explain this phenomenon, they conjecture that switching to Local SGD has a regularization effect that is beneficial only in the short-term, so it is always better to switch as late as possible. They further conjecture that this discrepancy between CIFAR-10 and ImageNet is mainly due to the task scale. On TinyImageNet, which is a spatially downscaled subset of ImageNet, the first-decay switching strategy indeed leads to better validation accuracy. + +![](images/f570556fdfaa0f6044b6f240a2cb7d16b8e147d57d421fd6b15c4809f3cddcfc.jpg) +(a) CIFAR-10, start from random. + +![](images/6461538aea94e321f86ced7d94c6c8a21dd6a9c4cd72c786139df36ceef1178e.jpg) +(b) CIFAR-10, start from #250. + +![](images/b98462c3f9e7557bf75477fde8fe81b76ca936bbd82faa548f0ffa3271ebb7bc.jpg) +(c) ImageNet, start from #100. + +![](images/195a334edf2abcfae1bb681d2e115a4eea3b4fcca6268203023bb1ad7ae80af2.jpg) +(d) ImageNet, first phase $\eta = 3.2$ + +![](images/68ffb33aa41408a4060ba829d1d7b2754f404172061830c7aa362204cfde84d1.jpg) +(e) CIFAR-10, test acc v.s. $H$ + +![](images/0c72702dcc36c753b25919680336467916535f7964024c130c3ff1332f48704b.jpg) +(f) ImageNet, test acc v.s. $H$ +Figure 2: Ablation studies on $\eta$ , $H$ and training time in the same setting as Figure 1. For (a)(d), we train from random initialization. For (b)(c)(e)(f), we start training from the checkpoints saved at the switching time points in Figure 1 (epoch #250 for CIFAR-10 and epoch #100 for ImageNet). See Appendix M.2 for training details. + +# 2.2 KEY FACTORS: SMALL LEARNING RATE AND SUFFICIENT TRAINING TIME + +All the above papers agree that Post-local/Local SGD improves upon SGD to some extent. However, it is in debate under what conditions the generalization benefit can consistently occur. We now conduct ablation studies to identify the key factors so that adding local steps improves the generalization of SGD. We run parallel SGD and Local SGD with the same learning rate $\eta$ , local batch size $B_{\mathrm{loc}}$ , and number of workers $K$ . We start training from the same initialization and compare their generalization after the same number of epochs. As Post-local SGD can be viewed as Local SGD starting from an SGD-pretrained model, the initial point in our experiments can be either random or a checkpoint of SGD training. See Appendix C for implementation details and Appendix M.2 for more details about the experimental setup. + +The first observation we have is that the generalization benefits can be reproduced on both CIFAR-10 and ImageNet in our setting (see Figure 1). We remark that Post-local SGD and SGD in Lin et al. (2020b); Ortiz et al. (2021) are implemented with accompanying Nesterov momentum terms. The learning rate also decays a couple of times in training with Local SGD. Nevertheless, our experiments show that the Nesterov momentum and learning rate decay are not necessary for Local SGD to generalize better than SGD. Our main finding after further ablation studies is summarized below: + +Finding 2.1. Given a sufficiently small learning rate and a sufficiently long training time, Local SGD exhibits better generalization than SGD, if the number of local steps $H$ per round is tuned properly according to the learning rate. This holds for both training from random initialization and from pre-trained models. + +Now we go through each point of our main finding. See also Appendix F for more plots. + +(1). Pretraining is not necessary. In contrast to previous works claiming the benefits of Post-local SGD over Local SGD (Lin et al., 2020b; Ortiz et al., 2021), we observe that Local SGD with random initialization also generalizes significantly better than SGD, as long as the learning rate is small and the training time is sufficiently long (Figure 2(a)). Starting from a pretrained model may shorten the time to reach this generalization benefit to show up (Figure 2(b)), but it is not necessary. +(2). Learning rate should be small. We experiment with a wide range of learning rates to conclude that setting a small learning rate is necessary. The learning rate is 0.32 for Figures 2(a) and 2(b) and is 0.16 for Figure 2(c). As shown in Figure 2(d), Local SGD encounters optimization difficulty in the first phase where $\eta$ is large ( $\eta = 3.2$ ), resulting in inferior final test accuracy. Even for training from a pretrained model, the generalization improvement of Local SGD disappears for large learning rates (e.g., $\eta = 1.6$ in Figure 5(d)). In contrast, if a longer training time is allowed, reducing the learning rate of Local SGD does not lead to test accuracy drop (Figure 5(c)). +(3). Training time should be long enough. To investigate the effect of training time, in Figures 2(b) and 2(c), we extend the training budget for the Post-local SGD experiments in Figure 1 and observe that a longer training time leads to greater generalization improvement upon SGD. On the other hand, Local SGD generalizes worse than SGD in the first few epochs of Figures 2(a) and 2(c); see Figures 5(a) and 5(b) for an enlarged view. + +(4). The number of local steps $H$ should be tuned carefully. The number of local steps $H$ has a complex interplay with the learning rate $\eta$ , but generally speaking, a smaller $\eta$ needs a higher $H$ to achieve consistent generalization improvement. For CIFAR-10 with a post-local training budget of 250 epochs (see Figure 2(e)), the test accuracy first rises as $H$ increases, and begins to fall as $H$ exceeds some threshold for relatively large $\eta$ (e.g., $\eta \geq 0.5$ ) while keeps growing for smaller $\eta$ (e.g., $\eta < 0.5$ ). For ImageNet with a post-local training budget of 50 epochs (see Figure 2(f)), the test accuracy first increases and then decreases in $H$ for all learning rates. + +Reconciling previous works. Our finding can help to settle the debate presented in Section 2.1 to a large extent. Simultaneously requiring a small learning rate and sufficient training time poses a trade-off when learning rate decay is used with a limited training budget: switching to Local SGD earlier may lead to a large learning rate, while switching later makes the generalization improvement of Local SGD less noticeable due to fewer update steps. It is thus unsurprising that first-decay switching strategy is not always the best. The need for sufficient training time does not contradict with Ortiz et al. (2021)'s conjecture that Local SGD only has a "short-term" generalization benefit. In their experiments, the generalization improvement usually disappears right after the next learning rate decay (instead of after a fixed amount of time). We suspect that the real reason why the improvement vanishes is that the number of local steps $H$ was kept as a constant, but our finding suggests tuning $H$ after $\eta$ changes. In Figure 5(e), we reproduce this phenomenon and show that increasing $H$ after learning rate decay retains the improvement. + +Generalization performances at the optimal learning rate of SGD. In practice, the learning rate of SGD is usually tuned to achieve the best training loss/Validation accuracy within a fixed training budget. Our finding suggests that when the tuned learning rate is small and the training time is sufficient, Local SGD can offer generalization improvement over SGD. As an example, in our experiments on training from an SGD-pretrained model, the optimal learning rate for SGD is 0.5 on CIFAR-10 (Figure 2(e)) and 0.064 on ImageNet (Figure 2(f)). With the same learning rate as SGD, the test accuracy is improved by $1.1\%$ on CIFAR-10 and $0.3\%$ on ImageNet when using Local SGD with $H = 750$ and $H = 26$ respectively. The improvement could become even higher if the learning rate of Local SGD is carefully tuned. + +# 3 THEORETICAL ANALYSIS OF LOCAL SGD: THE SLOW SDE + +In this section, we adopt an SDE-based approach to rigorously establish the generalization benefit of Local SGD in a general setting. Below, we first identify the difficulty of adapting the SDE framework to Local SGD. Then, we present our novel SDE characterization of Local SGD around the manifold of minimizers and explain the generalization benefit of Local SGD with our SDE. + +Notations. We follow the notations in Section 1. We denote by $\eta$ the learning rate, $K$ the number of workers, $B$ the (global) batch size, $B_{\mathrm{loc}}\coloneqq B / K$ the local batch size, $H$ the number of local steps, $\ell (\pmb {\theta};\zeta)$ the loss function for a data sample $\zeta$ , and $\tilde{\mathcal{D}}$ the training distribution. Furthermore, we define $\mathcal{L}(\pmb {\theta})\coloneqq \mathbb{E}_{\xi \sim \tilde{\mathcal{D}}}[\ell (\pmb {\theta};\xi)]$ as the expected loss, $\Sigma (\pmb {\theta})\coloneqq \operatorname{Cov}_{\xi \sim \tilde{\mathcal{D}}}[\nabla \ell (\pmb {\theta};\xi)]$ as the noise covariance of gradients at $\pmb{\theta}$ . Let $\{W_t\}_{t\geq 0}$ denote the standard Wiener process. For a mapping $F:\mathbb{R}^d\to \mathbb{R}^d$ , denote by $\partial F(\pmb {\theta})$ the Jacobian at $\pmb{\theta}$ and $\partial^2 F(\pmb {\theta})$ the second order derivative at $\pmb{\theta}$ . Furthermore, for any matrix $M\in \mathbb{R}^{d\times d}$ , $\partial^2 F(\pmb {\theta})[M] = \sum_{i\in [d]}\langle \frac{\partial^2F_i}{\partial\theta^2},M\rangle e_i$ where $e_i$ is the $i$ -th vector of the standard basis. We write $\partial^2 (\nabla \mathcal{L})(\pmb {\theta})[M]$ as $\nabla^3\mathcal{L}(\pmb {\theta})[M]$ for short. + +Local SGD. We use the following formulation of Local SGD for theoretical analysis. See also Appendix C for the pseudocode. Local SGD proceeds in multiple rounds of model averaging, where each round produces a global iterate $\bar{\theta}^{(s)}$ . In the $(s + 1)$ -th round, every worker $k \in [K]$ starts with its local copy of the global iterate $\pmb{\theta}_{k,0}^{(s)} \gets \bar{\pmb{\theta}}^{(s)}$ and does $H$ steps of SGD with local batches. In the $t$ -th local step of the $k$ -th worker, it draws a local batch of $B_{\mathrm{loc}} \coloneqq B / K$ independent samples $\xi_{k,t,1}^{(s)}, \dots, \xi_{k,t,B_{\mathrm{loc}}}^{(s)}$ from a shared training distribution $\tilde{\mathcal{D}}$ and updates as follows: + +$$ +\boldsymbol {\theta} _ {k, t + 1} ^ {(s)} \leftarrow \boldsymbol {\theta} _ {k, t} ^ {(s)} - \eta \boldsymbol {g} _ {k, t} ^ {(s)}, \quad \text {w h e r e} \quad \boldsymbol {g} _ {k, t} ^ {(s)} = \frac {1}{B _ {\mathrm {l o c}}} \sum_ {i = 1} ^ {B _ {\mathrm {l o c}}} \nabla \ell \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}; \xi_ {k, t, i} ^ {(s)}\right), \quad t = 0, \dots , H - 1. \tag {2} +$$ + +The local updates on different workers are independent of each other as there is no communication. After finishing the $H$ local steps, the workers aggregate the resulting local iterates $\pmb{\theta}_{k,H}^{(s)}$ and assign the average to the next global iterate: $\bar{\pmb{\theta}}^{(s + 1)}\gets \frac{1}{K}\sum_{k = 1}^{K}\pmb{\theta}_{k,H}^{(s)}$ . + +# 3.1 DIFFICULTY OF ADAPTING THE SDE FRAMEWORK TO LOCAL SGD + +A widely-adopted approach to understanding the dynamics of SGD is to approximate it from a continuous perspective with the following SDE (3), which we call the conventional SDE approximation. Below, we discuss why it cannot be directly adopted to characterize the behavior of Local SGD. + +$$ +\mathrm {d} \boldsymbol {X} (t) = - \nabla \mathcal {L} (\boldsymbol {X}) \mathrm {d} t + \sqrt {\frac {\eta}{B}} \boldsymbol {\Sigma} ^ {1 / 2} (\boldsymbol {X}) \mathrm {d} \boldsymbol {W} _ {t}. \tag {3} +$$ + +It is proved by Li et al. (2019a) that this SDE is a first-order approximation to SGD, where each discrete step corresponds to a continuous time interval of $\eta$ . Several previous works adopt this SDE approximation and connect good generalization to having a large diffusion term $\sqrt{\frac{\eta}{B}} \Sigma^{1/2} \mathrm{d}W_t$ in the SDE (Jastrzewski et al., 2017; Smith et al., 2020), because a suitable amount of noise can be necessary for large-batch training to generalize well (see also Appendix A). + +According to Finding 2.1, it is tempting to consider the limit $\eta \to 0$ and see if Local SGD can also be modeled via a variant of the conventional SDE. In this case the typical time length that guarantees a good SDE approximation error is $\mathcal{O}(\eta^{-1})$ discrete steps (Li et al., 2019a; 2021a). However, this time scaling is too short for the difference to appear between Local SGD and SGD. Indeed, Theorem 3.1 below shows that they closely track each other for $\mathcal{O}(\eta^{-1})$ steps. + +Theorem 3.1. Assume that the loss function $\mathcal{L}$ is $\mathcal{C}^3$ -smooth with bounded second and third order derivatives and that $\nabla \ell (\pmb {\theta};\xi)$ is bounded. Let $T > 0$ be a constant, $\bar{\pmb{\theta}}^{(s)}$ be the $s$ -th global iterate of Local SGD and $\pmb {w}_t$ be the $t$ -th iterate of SGD with the same initialization $\pmb {w}_0 = \bar{\pmb{\theta}}^{(0)}$ and same $\eta, B_{\mathrm{loc}}, K$ . Then for any $H\leq \frac{T}{\eta}$ and $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , it holds with probability at least $1 - \delta$ that for all $s\leq \frac{T}{\eta H}$ , $\| \bar{\pmb{\theta}}^{(s)} - \pmb{w}_{sH}\| _2 = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta\delta}})$ . + +We defer the proof to Appendix I. See also Appendix D for Lin et al. (2020b)'s attempt to model Local SGD with multiple conventional SDEs and discussions on why it does not give much insight. + +# 3.2 SDE APPROXIMATION NEAR THE MINIMIZER MANIFOLD + +Inspired by a recent paper (Li et al., 2021b), our strategy to overcome the shortcomings of the conventional SDE is to design a new SDE that can guarantee a good approximation for $\mathcal{O}(\eta^{-2})$ discrete steps, much longer than the $\mathcal{O}(\eta^{-1})$ discrete steps for the conventional SDE. Following their setting, we assume the existence of a manifold $\Gamma$ consisting only of local minimizers and track the global iterate $\bar{\theta}^{(s)}$ around $\Gamma$ after it takes $\tilde{\mathcal{O}} (\eta^{-1})$ steps to approach $\Gamma$ . Though the expected gradient $\nabla \mathcal{L}$ is near zero around $\Gamma$ , the dynamics are still non-trivial because the noise can drive the iterate to move a significant distance in $\mathcal{O}(\eta^{-2})$ steps. + +Assumption 3.1. The loss function $\mathcal{L}(\cdot)$ and the matrix square root of the noise covariance $\Sigma^{1/2}(\cdot)$ are $\mathcal{C}^\infty$ -smooth. Besides, we assume that $\|\nabla \ell(\boldsymbol{\theta}; \xi)\|_2$ is bounded by a constant for all $\boldsymbol{\theta}$ and $\xi$ . + +Assumption 3.2. $\Gamma$ is a $\mathcal{C}^\infty$ -smooth, $(d - m)$ -dimensional submanifold of $\mathbb{R}^d$ , where any $\zeta \in \Gamma$ is a local minimizer of $\mathcal{L}$ . For all $\zeta \in \Gamma$ , $\mathrm{rank}(\nabla^2\mathcal{L}(\zeta)) = m$ . Additionally, there exists an open neighborhood of $\Gamma$ , denoted as $U$ , such that $\Gamma = \arg \min_{\pmb{\theta} \in U} \mathcal{L}(\pmb{\theta})$ . + +Assumption 3.3. $\Gamma$ is a compact manifold. + +The smoothness assumption on $\mathcal{L}$ is generally satisfied when we use smooth activation functions, such as Swish (Ramachandran et al., 2017), softplus and GeLU (Hendrycks & Gimpel, 2016), which work equally well as ReLU in many circumstances. The existence of a minimizer manifold with $\mathrm{rank}(\nabla^2\mathcal{L}(\zeta)) = m$ has also been made as a key assumption in Fehrman et al. (2020); Li et al. (2021b); Lyu et al. (2022), where $\mathrm{rank}(\nabla^2\mathcal{L}(\zeta)) = m$ ensures that the Hessian is maximally nondegenerate on the manifold and implies that the tangent space at $\zeta \in \Gamma$ equals the null space of $\nabla^2\mathcal{L}(\zeta)$ . The last assumption is made to prevent the analysis from being too technically involved. + +Our SDE for Local SGD characterizes the training dynamics near $\Gamma$ . For ease of presentation, we define the following projection operators $\Phi, P_{\zeta}$ for points and differential forms respectively. + +Definition 3.1 (Gradient Flow Projection). Fix a point $\theta_{\mathrm{null}} \notin \Gamma$ . For $\pmb{x} \in \mathbb{R}^d$ , consider the gradient flow $\frac{\mathrm{d}\pmb{x}(t)}{\mathrm{d}t} = -\nabla \mathcal{L}(\pmb{x}(t))$ with $\pmb{x}(0) = \pmb{x}$ . We denote the gradient flow projection of $\pmb{x}$ as $\Phi(\pmb{x})$ . $\Phi(\pmb{x}) := \lim_{t \to +\infty} \pmb{x}(t)$ if the limit exists and belongs to $\Gamma$ ; otherwise, $\Phi(\pmb{x}) = \theta_{\mathrm{null}}$ . + +Definition 3.2. For any $\zeta \in \Gamma$ and any differential form $\mathbf{AdW}_t + \mathbf{bdt}$ in Itô calculus, where $\mathbf{A}$ is a matrix and $\mathbf{b}$ is a vector, we use $P_{\zeta}(\mathbf{AdW}_t + \mathbf{bdt})$ as a shorthand for the differential form $\partial \Phi (\zeta)\mathbf{AdW}_t + \left(\partial \Phi (\zeta)\mathbf{b} + \frac{1}{2}\partial^2\Phi (\zeta)[\mathbf{AA}^\top ]\right)\mathrm{d}t.$ + +See Øksendal (2013) for an introduction to Itô calculus. Here $P_{\zeta}$ equals $\Phi (\zeta +A\mathrm{d}\pmb {W}_t + \pmb {b}\mathrm{d}t) - \Phi (\zeta)$ by Itô calculus, which means that $P_{\zeta}$ projects an infinitesimal step from $\zeta$ , so that $\zeta$ after taking the projected step does not leave the manifold $\Gamma$ . It can be shown by simple calculus that $\partial \Phi (\zeta)$ equals the projection matrix onto the tangent space of $\Gamma$ at $\zeta$ . We decompose the noise covariance $\Sigma (\zeta)$ for $\zeta \in \Gamma$ into two parts: the noise in the tangent space $\Sigma_{\parallel}(\zeta)\coloneqq \partial \Phi (\zeta)\Sigma (\zeta)\partial \Phi (\zeta)$ and the noise in the rest $\Sigma_{\diamond}(\zeta)\coloneqq \Sigma (\zeta) - \Sigma_{\parallel}(\zeta)$ . Now we are ready to state our SDE for Local SGD. + +Definition 3.3 (Slow SDE for Local SGD). Given $\eta, H > 0$ and $\zeta_0 \in \Gamma$ , define $\zeta(t)$ as the solution of the following SDE with initial condition $\zeta(0) = \zeta_0$ : + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\boldsymbol {\zeta}} \left(\underbrace {\frac {1}{\sqrt {B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t}} _ {(a) d i f f u s i o n} - \underbrace {- \frac {1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {(b) d r i f t - I} - \underbrace {- \frac {K - 1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Psi}} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {(c) d r i f t - I I}\right). \tag {4} +$$ + +Here $\widehat{\Sigma}_{\diamond}(\zeta), \widehat{\Psi}(\zeta) \in \mathbb{R}^{d \times d}$ are defined as + +$$ +\widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) := \sum_ {i, j: (\lambda_ {i} \neq 0) \vee (\lambda_ {j} \neq 0)} \frac {1}{\lambda_ {i} + \lambda_ {j}} \left\langle \boldsymbol {\Sigma} _ {\diamond} (\boldsymbol {\zeta}), \boldsymbol {v} _ {i} \boldsymbol {v} _ {j} ^ {\top} \right\rangle \boldsymbol {v} _ {i} \boldsymbol {v} _ {j} ^ {\top}, \tag {5} +$$ + +$$ +\widehat {\boldsymbol {\Psi}} (\boldsymbol {\zeta}) := \sum_ {i, j: (\lambda_ {i} \neq 0) \vee (\lambda_ {j} \neq 0)} \frac {\psi (\eta H \cdot (\lambda_ {i} + \lambda_ {j}))}{\lambda_ {i} + \lambda_ {j}} \left\langle \boldsymbol {\Sigma} _ {\diamond} (\boldsymbol {\zeta}), \boldsymbol {v} _ {i} \boldsymbol {v} _ {j} ^ {\top} \right\rangle \boldsymbol {v} _ {i} \boldsymbol {v} _ {j} ^ {\top}, \tag {6} +$$ + +where $\{\pmb{v}_i\}_{i=1}^d$ is a set of eigenvectors of $\nabla^2\mathcal{L}(\zeta)$ that forms an orthonormal eigenbasis, and $\lambda_1, \ldots, \lambda_d$ are the corresponding eigenvalues. Additionally, $\psi(x) := \frac{e^{-x} - 1 + x}{x}$ for $x \neq 0$ and $\psi(0) = 0$ . + +The use of $P_{\zeta}$ keeps $\zeta(t)$ on the manifold $\Gamma$ through projection. $\Sigma_{\parallel}^{\frac{1}{2}}(\zeta)$ introduces a diffusion term to the SDE in the tangent space. The two drift terms involve $\widehat{\Sigma}_{\diamond}(\cdot)$ and $\widehat{\Psi}(\cdot)$ , which can be intuitively understood as rescaling the entries of the noise covariance in the eigenbasis of Hessian. In the special case where $\nabla^{2}\mathcal{L} = \mathrm{diag}(\lambda_{1},\dots,\lambda_{d}) \in \mathbb{R}^{d\times d}$ , we have $\widehat{\Sigma}_{\diamond,i,j} = \frac{1}{\lambda_i + \lambda_j}\Sigma_{0,i,j}$ . $\widehat{\Psi}_{i,j} = \frac{\psi(\eta H(\lambda_i + \lambda_j))}{\lambda_i + \lambda_j}\Sigma_{0,i,j}$ . $\psi(x)$ is a monotonically increasing function, which goes from 0 to 1 as $x$ goes from 0 to infinity (see Figure 9) + +We name this SDE as the Slow SDE for Local SGD because we will show that each discrete step of Local SGD corresponds to a continuous time interval of $\eta^2$ instead of an interval of $\eta$ in the conventional SDE. In this sense, our SDE is "slower" than the conventional SDE (and hence can track a longer horizon). This Slow SDE is inspired by Li et al. (2021b). Under nearly the same set of assumptions, they proved that SGD can be tracked by an SDE that is essentially equivalent to (4) with $K = 1$ , namely, without the drift-II term. + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\zeta} \left(\underbrace {\frac {1}{\sqrt {B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t}} _ {\text {(a) d i f f u s i o n}} - \underbrace {- \frac {1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {\text {(b) d r i f t - I}}\right), \tag {7} +$$ + +We refer to (7) as the Slow SDE for SGD. We remark that the drfit-II term in (4) is novel and is the key to separate the generalization behaviors of Local SGD and SGD in theory. We will discuss this point later in Section 3.3. Now we present our SDE approximation theorem for Local SGD. + +Theorem 3.2. Let Assumptions 3.1 to 3.3 hold. Let $T > 0$ be a constant and $\zeta(t)$ be the solution to (4) with the initial condition $\zeta(0) = \Phi(\bar{\theta}^{(0)}) \in \Gamma$ . If $H$ is set to $\frac{\alpha}{\eta}$ for some constant $\alpha > 0$ , then for any $\mathcal{C}^3$ -smooth function $g(\pmb{\theta})$ , $\max_{0 \leq s \leq \frac{T}{H\eta^2}} \left| \mathbb{E}[g(\Phi(\bar{\pmb{\theta}}^{(s)}))] - \mathbb{E}[g(\pmb{\zeta}(sH\eta^2)] \right| = \tilde{\mathcal{O}}(\eta^{0.25})$ , where $\tilde{\mathcal{O}}(\cdot)$ hides log factors and constants that are independent of $\eta$ but can depend on $g(\pmb{\theta})$ . + +Theorem 3.3. For $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , it holds for all $\mathcal{O}\left(\frac{1}{\alpha}\log \frac{1}{\eta}\right)\leq s\leq \frac{T}{\alpha\eta}$ that $\Phi (\bar{\pmb{\theta}}^{(s)})\in \Gamma$ and $\| \bar{\pmb{\theta}}^{(s)} - \Phi (\bar{\pmb{\theta}}^{(s)})\| _2 = \mathcal{O}(\sqrt{\alpha\eta\log\frac{\alpha}{\eta\delta}})$ , where $\mathcal{O}(\cdot)$ hides constants independent of $\eta$ , $\alpha$ and $\delta$ . + +Theorem 3.2 suggests that the trajectories of the manifold projection and the solution to the Slow SDE (4) are close to each other in the weak approximation sense. That is, $\{\Phi (\bar{\theta}^{(s)})\}$ and $\{\zeta (t)\}$ cannot be distinguished by evaluating test functions from a wide function class, including all polynomials. This measurement of closeness between the iterates of stochastic gradient algorithms and their SDE approximations is also adopted by Li et al. (2019a; 2021a); Malladi et al. (2022), but their analyses are for conventional SDEs. Theorem 3.3 further states that the iterate $\bar{\theta}^{(s)}$ keeps close to its manifold projection after the first few rounds. + +Remark 3.1. To connect to Finding 2.1, we remark that our theorems (1) do not require the model to be pre-trained (as long as the gradient flow starting with $\theta^{(0)}$ converges to $\Gamma$ ); (2) give better bounds for smaller $\eta$ ; (3) characterize a long training horizon $\sim \eta^{-2}$ . The need for tuning $H$ will be discussed in Section 3.3.3. + +Technical Contribution. The proof technique for Theorem 3.2 is novel and significantly different from the Slow SDE analysis of SGD in Li et al. (2021a). Their analysis uses advanced stochastic calculus and invokes Katzenberger's theorem (Katzenberger, 1991) to show that SGD converges to the Slow SDE in distribution, but no quantitative error bounds are provided. Also, due to the local updates and multiple aggregation steps in Local SGD, it is unclear how to extend Katzenberger's theorem to our case. To overcome this difficulty, we develop a new approach to analyze the Slow SDEs, which is based on the method of moments (Li et al., 2019a) and can provide the quantitative error bound $\tilde{\mathcal{O}} (\eta^{0.25})$ in weak approximation. See Appendix J for our proof outline. A by-product of our result is the first quantitative approximation bound for the Slow SDE approximation for SGD, which can be easily obtained by setting $K = 1$ . + +# 3.3 INTERPRETATION OF THE SLOW SDEs + +In this subsection, we compare the Slow SDEs for SGD and Local SGD and provide an important insight into why Local SGD generalizes better than SGD: Local SGD strengthens the drift term in the Slow SDE, which makes the implicit regularization of stochastic gradient noise more effective. + +# 3.3.1 INTERPRETATION OF THE SLOW SDE FOR SGD. + +The Slow SDE for SGD (7) consists of the diffusion and drift-I terms. The former injects noise into the dynamics in the tangent space; the latter one drives the dynamics to move along the negative gradient of $\frac{1}{2B}\langle \nabla^2\mathcal{L}(\zeta),\widehat{\Sigma}_{\diamond}(\zeta)\rangle$ projected onto the tangent space, but ignoring the dependency of $\widehat{\Sigma}_{\diamond}(\zeta)$ on $\zeta$ . This can be connected to the class of semi-gradient methods which only computes a part of the gradient (Mnih et al., 2015; Sutton & Barto, 1998; Brandonbrener & Bruna, 2020). In this view, the long-term behavior of SGD is similar to a stochastic semi-gradient method minimizing the implicit regularizer $\frac{1}{2B}\langle \nabla^2\mathcal{L}(\zeta),\widehat{\Sigma}_{\diamond}(\zeta)\rangle$ on the minimizer manifold of the original loss $\mathcal{L}$ . + +Though the semi-gradient method may not perfectly optimize its objective, the above argument reveals that SGD has a deterministic trend toward the region with a smaller magnitude of Hessian, which is commonly believed to correlate with better generalization (Hochreiter & Schmidhuber, 1997; Keskar et al., 2017; Neyshabur et al., 2017; Jiang et al., 2020) (see Appendix A for more discussions). In contrast, the diffusion term can be regarded as a random perturbation to this trend, which can impede optimization when the drift-I term is not strong enough. + +Based on this view, we conjecture that strengthening the drift term of the Slow SDE can help SGD to better regularize the model, yielding a better generalization performance. More specifically, we propose the following hypothesis, which compares the generalization performances of the following generalized Slow SDEs. Note that $\left(\frac{1}{B},\frac{1}{2B}\right)$ -Slow SDE corresponds to the Slow SDE for SGD (7). + +Definition 3.4. For $\kappa_{1},\kappa_{2}\geq 0$ define $(\kappa_{1},\kappa_{2})$ -Slow SDE to be the following: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\boldsymbol {\xi}} \left(\sqrt {\kappa_ {1}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t} - \kappa_ {2} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t\right). \tag {8} +$$ + +Hypothesis 3.1. Starting at a minimizer $\zeta_0\in \Gamma$ , run $(\kappa_{1},\kappa_{2})$ -Slow SDE and $(\kappa_{1},\kappa_{2}^{\prime})$ -Slow SDE respectively for the same amount of time $T > 0$ and obtain $\zeta (T),\zeta '(T)$ . If $\kappa_{2} > \kappa_{2}^{\prime}$ , then the expected test accuracy at $\zeta (T)$ is better than that at $\zeta '(T)$ . + +Due to the No Free Lunch Theorem, we do not claim that our hypothesis is always true, but we do believe that the hypothesis holds when training usual neural networks (e.g., ResNets, VGGNets) on standard benchmarks (e.g., CIFAR-10, ImageNet). + +Example: Training with Label Noise Regularization. To exemplify the generalization benefit of having a larger drift term, we follow a line of theoretical works (Li et al., 2021b; Blanc et al., 2020; Damian et al., 2021) to study the case of training over-parameterized neural nets with label noise regularization. For a $C$ -class classification task, the label noise regularization is as follows: every time we draw a sample from the training set, we make the true label as it is with probability $1 - p$ and replace it with any other label with equal probability $\frac{p}{C-1}$ . When we use cross-entropy loss, the Slow SDE for SGD turns out to be a simple deterministic gradient flow on $\Gamma$ (instead of a semigroup method) for minimizing the trace of Hessian: $\mathrm{d}\boldsymbol{\zeta}(t) = -\frac{1}{4B}\nabla_{\Gamma}\mathrm{tr}(\nabla^{2}\mathcal{L}(\boldsymbol{\zeta}))\mathrm{d}t$ , where $\nabla_{\Gamma}f$ + +stands for the gradient of the function $f$ projected to the tangent space of $\Gamma$ . Checking the validity of our hypothesis reduces to the following question: Is minimizing the trace of Hessian beneficial to generalization? Many works prove positive results in concrete settings, including the line of works we just mentioned. We refer the readers to Appendix G for further discussion. + +# 3.3.2 LOCAL SGD STRENGTHENS THE DRIFT TERM IN SLOW SDE. + +Based on Hypothesis 3.1, we argue that Local SGD improves generalization by strengthening the drift term of the Slow SDE. First, it can be seen from (4) that the Slow SDE for Local SGD has an additional drfit-II term. Similar to the drift-I term of the Slow SDE for SGD, this drift-II term drives the dynamics to move along the negative semi-gradient of $\frac{K - 1}{2B}\langle \nabla^2\mathcal{L}(\zeta),\widehat{\Psi} (\zeta)\rangle$ (with the dependency of $\widehat{\Psi} (\zeta)$ on $\zeta$ ignored). Combining it with the implicit regularizer induced by the drift-I term, we can see that the long-term behavior of Local SGD is similar to a stochastic semi-gradient method minimizing the implicit regularizer $\frac{1}{2B}\langle \nabla^{2}\mathcal{L}(\zeta),\widehat{\Sigma}_{\diamond}(\zeta)\rangle +\frac{K - 1}{2B}\langle \nabla^{2}\mathcal{L}(\zeta),\widehat{\Psi} (\zeta)\rangle$ on $\Gamma$ . + +Comparing the definitions of $\widehat{\Sigma}_{\diamond}(\zeta)$ (5) and $\widehat{\Psi}(\zeta)$ (6), we can see that $\widehat{\Psi}(\zeta)$ is basically a rescaling of the entries of $\widehat{\Sigma}_{\diamond}(\zeta)$ in the eigenbasis of Hessian, where the rescaling factor $\psi(\eta H \cdot (\lambda_i + \lambda_j))$ for each entry is between 0 and 1 (see Figure 9 for the plot of $\psi$ ). When $\eta H$ is small, the rescaling factors should be close to $\psi(0) = 0$ , then $\widehat{\Psi}(\zeta) \approx \mathbf{0}$ , leading to almost no additional regularization. On the other hand, when $\eta H$ is large, the rescaling factors should be close to $\psi(+\infty) = 1$ , so $\widehat{\Psi}(\zeta) \approx \widehat{\Sigma}_{\diamond}(\zeta)$ . We can then merge the two implicit regularizers as $\frac{K}{2B} \langle \nabla^2 \mathcal{L}(\zeta), \widehat{\Sigma}_{\diamond}(\zeta) \rangle$ , and (4) becomes the $(\frac{1}{B}, \frac{K}{2B})$ -Slow SDE, which is restated below: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\boldsymbol {\zeta}} \left(\frac {1}{\sqrt {B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t} - \frac {K}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t\right). \tag {9} +$$ + +From the above argument we know how the Slow SDE of Local SGD (4) changes as $\eta H$ transitions from 0 to $+\infty$ . Initially, when $\eta H = 0$ , (4) is the same as the $(\frac{1}{B}, \frac{1}{2B})$ -Slow SDE for SGD. Then increasing $\eta H$ strengthens the drift term of (4). As $\eta H \to +\infty$ , (4) transitions to the $(\frac{1}{B}, \frac{K}{2B})$ -Slow SDE, where the drift term becomes $K$ times larger. + +According to Hypothesis 3.1, the $(\frac{1}{B},\frac{K}{2B})$ -Slow SDE generalizes better than the $(\frac{1}{B},\frac{1}{2B})$ -Slow SDE, so Local SGD with $\eta H = +\infty$ should generalize better than SGD. When $\eta H$ is chosen realistically as a finite value, the generalization performance of Local SGD interpolates between these two cases, which results in a worse generalization than $\eta H = +\infty$ but should still be better than SGD. + +# 3.3.3 THEORETICAL INSIGHTS INTO TUNING THE NUMBER OF LOCAL STEPS + +Based on our Slow SDE approximations, we now discuss how the number of local steps $H$ affects the generalization of Local SGD. When $\eta$ is small but finite, tuning $H$ offers a trade-off between regularization strength and SDE approximation quality. Larger $\alpha \coloneqq \eta H$ makes the regularization stronger in the SDE (as discussed in Section 3.3.2), but the SDE itself may lose track of Local SGD, which can be seen from the error bound $\mathcal{O}(\sqrt{\alpha\eta\log(\alpha / \eta\delta)})$ in Theorem 3.3. Therefore, we expect the test accuracy to first increase and then decrease as we gradually increase $H$ . Indeed, we observe in Figures 2(e) and 2(f) that the plot of test accuracy versus $H$ is unimodal for each $\eta$ . + +It is thus necessary to tune $H$ for the best generalization. When $H$ is tuned together with other hyperparameters, such as learning rate $\eta$ , our Slow SDE approximation recommends setting $H$ to be at least $\Omega(\eta^{-1})$ so that $\alpha := \eta H$ does not vanish in the Slow SDE. Since larger $\alpha$ gives a stronger regularization effect, the optimal $H$ should be set to the largest value so that the Slow SDE does not lose track of Local SGD. Indeed, we empirically observed that when $H$ is tuned optimally, $\alpha$ increases as $\eta$ decreases, suggesting that the optimal $H$ grows faster than $\Omega(\eta^{-1})$ . See Figure 5(f). + +# 4 CONCLUSIONS + +In this paper, we analyze the long-term generalization behavior of Local SGD in the small learning rate regime by deriving the Slow SDE for Local SGD as a generalization of that for SGD (Li et al., 2021b). We attribute the generalization improvement over SGD to the larger drift term in the SDE for Local SGD. Our empirical validation shows that Local SGD indeed induces generalization benefits with small learning rate and long enough training time. The main limitation of our work is that our analysis does not imply any direct theoretical separation between SGD and Local SGD in test accuracy, which requires a much deeper understanding of the loss landscape and the Slow SDEs and is left for future work. Another direction for future work is to design distributed training methods that provably generalize better than SGD based on the theoretical insights obtained from Slow SDEs. + +# ACKNOWLEDGEMENT AND DISCLOSURE OF FUNDING + +The work of Xinran Gu and Longbo Huang is supported by the Technology and Innovation Major Project of the Ministry of Science and Technology of China under Grant 2020AAA0108400 and 2020AAA0108403, the Tsinghua University Initiative Scientific Research Program, and Tsinghua Precision Medicine Foundation 10001020109. The work of Kaifeng Lyu and Sanjeev Arora is supported by funding from NSF, ONR, Simons Foundation, DARPA and SRC. + +# REFERENCES + +Kwangjun Ahn, Jingzhao Zhang, and Suvrit Sra. Understanding the unstable convergence of gradient descent. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 247-257. PMLR, 17-23 Jul 2022. +Debraj Basu, Deepesh Data, Can Karakus, and Suhas Diggavi. Qsparse-local-SGD: Distributed SGD with quantization, sparsification and local computations. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. +Yoshua Bengio. Practical Recommendations for Gradient-Based Training of Deep Architectures, pp. 437-478. Springer Berlin Heidelberg, Berlin, Heidelberg, 2012. ISBN 978-3-642-35289-8. doi: 10.1007/978-3-642-35289-8_26. +Guy Blanc, Neha Gupta, Gregory Valiant, and Paul Valiant. Implicit regularization for deep neural networks driven by an Ornstein-uhlenbeck like process. In Jacob Abernethy and Shivani Agarwal (eds.), Proceedings of Thirty Third Conference on Learning Theory, volume 125 of Proceedings of Machine Learning Research, pp. 483–513. PMLR, 09–12 Jul 2020. +David Brandfonbrener and Joan Bruna. Geometric insights into the convergence of nonlinear TD learning. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020. +Jianmin Chen, Xinghao Pan, Rajat Monga, Samy Bengio, and Rafal Jozefowicz. Revisiting distributed synchronous SGD. arXiv preprint arXiv:1604.00981, 2016. +Kai Chen and Qiang Huo. Scalable training of deep learning machines by incremental block training with intra-block parallel optimization and blockwise model-update filtering. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5880-5884, 2016. doi: 10.1109/ICASSP.2016.7472805. +Alex Damian, Tengyu Ma, and Jason D. Lee. Label noise SGD provably prefers flat global minimizers. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, 2021. +Laurent Dinh, Razvan Pascanu, Samy Bengio, and Yoshua Bengio. Sharp minima can generalize for deep nets. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 1019-1028. PMLR, 06-11 Aug 2017. +Aijun Du and JinQiao Duan. Invariant manifold reduction for stochastic dynamical systems. Dynamic Systems and Applications, 16:681-696, 2007. +KJ Falconer. Differentiation of the limit mapping in a dynamical system. Journal of the London Mathematical Society, 2(2):356-372, 1983. +Benjamin Fehrman, Benjamin Gess, and Arnulf Jentzen. Convergence rates for the stochastic gradient descent method for non-convex objective functions. Journal of Machine Learning Research, 21:136, 2020. +Damir Filipović. Invariant manifolds for weak solutions to stochastic equations. *Probability theory and related fields*, 118(3):323-341, 2000. + +Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In International Conference on Learning Representations, 2021. +Margalit R Glasgow, Honglin Yuan, and Tengyu Ma. Sharp bounds for federated averaging (Local SGD) and continuous perspective. In International Conference on Artificial Intelligence and Statistics, pp. 9050-9090. PMLR, 2022. +Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017. +Farzin Haddadpour, Mohammad Mahdi Kamani, Mehrdad Mahdavi, and Viveck Cadambe. Local SGD with periodic averaging: Tighter analysis and adaptive synchronization. Advances in Neural Information Processing Systems, 32, 2019. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. In Proceedings of the IEEE international conference on computer vision, pp. 1026-1034, 2015. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. +Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. +Sepp Hochreiter and Jürgen Schmidhuber. Flat minima. Neural computation, 9(1):1-42, 1997. +Elad Hoffer, Itay Hubara, and Daniel Soudry. Train longer, generalize better: closing the generalization gap in large batch training of neural networks. Advances in neural information processing systems, 30, 2017. +Wenqing Hu, Chris Junchi Li, Lei Li, and Jian-Guo Liu. On the diffusion approximation of nonconvex stochastic gradient descent. arXiv preprint arXiv:1705.07562, 2017. +Hikaru Ibayashi and Masaaki Imaizumi. Exponential escape efficiency of SGD from sharp minima in non-stationary regime. arXiv preprint arXiv:2111.04004, 2021. +Stanisław Jastrzebski, Zachary Kenton, Devansh Arpit, Nicolas Ballas, Asja Fischer, Yoshua Bengio, and Amos Storkey. Three factors influencing minima in SGD. arXiv preprint arXiv:1711.04623, 2017. +Xianyan Jia, Shutao Song, Wei He, Yangzihao Wang, Haidong Rong, Feihu Zhou, Liqiang Xie, Zhenyu Guo, Yuzhou Yang, Liwei Yu, et al. Highly scalable deep learning training system with mixed-precision: Training imagenet in four minutes. Advances in Neural Information Processing Systems, 2018. +Yiding Jiang, Behnam Neyshabur, Hossein Mobahi, Dilip Krishnan, and Samy Bengio. *Fantastic generalization measures and where to find them.* In International Conference on Learning Representations, 2020. +Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. Advances and open problems in federated learning. Foundations and Trends® in Machine Learning, 14(1-2):1-210, 2021. +Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank Reddi, Sebastian Stich, and Ananda Theertha Suresh. Scaffold: Stochastic controlled averaging for federated learning. In International Conference on Machine Learning, pp. 5132-5143. PMLR, 2020. +G. S. Katzenberger. Solutions of a stochastic differential equation forced onto a manifold by a large drift. The Annals of Probability, 19(4):1587 - 1628, 1991. + +Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. In International Conference on Learning Representations, 2017. +Ahmed Khaled, Konstantin Mishchenko, and Peter Richtárik. Tighter theory for local SGD on identical and heterogeneous data. In International Conference on Artificial Intelligence and Statistics, pp. 4519-4529. PMLR, 2020. +Bobby Kleinberg, Yanzhi Li, and Yang Yuan. An alternative view: When does SGD escape local minima? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 2698-2707. PMLR, 10-15 Jul 2018. +Alex Krizhevsky. One weird trick for parallelizing convolutional neural networks. arXiv preprint arXiv:1404.5997, 2014. +Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009. +Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/, 2022. +Yann A. LeCun, Léon Bottou, Genevieve B. Orr, and Klaus-Robert Müller. Efficient BackProp, pp. 9-48. Springer Berlin Heidelberg, Berlin, Heidelberg, 2012. ISBN 978-3-642-35289-8. doi: 10.1007/978-3-642-35289-8_3. +Qianxiao Li, Cheng Tai, and Weinan E. Stochastic modified equations and dynamics of stochastic gradient algorithms i: Mathematical foundations. Journal of Machine Learning Research, 20(40): 1-47, 2019a. +Xiang Li, Kaixuan Huang, Wenhao Yang, Shusen Wang, and Zhihua Zhang. On the convergence of fedavg on non-iid data. In International Conference on Learning Representations, 2019b. +Zhiyuan Li, Kaifeng Lyu, and Sanjeev Arora. Reconciling modern deep learning with traditional optimization analyses: The intrinsic learning rate. Advances in Neural Information Processing Systems, 33:14544-14555, 2020. +Zhiyuan Li, Sadhika Malladi, and Sanjeev Arora. On the validity of modeling SGD with stochastic differential equations (sdes). Advances in Neural Information Processing Systems, 34:12712-12725, 2021a. +Zhiyuan Li, Tianhao Wang, and Sanjeev Arora. What happens after SGD reaches zero loss? a mathematical framework. In International Conference on Learning Representations, 2021b. +Zhiyuan Li, Tianhao Wang, and Dingli Yu. Fast mixing of stochastic gradient descent with normalization and weight decay. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. +Tao Lin, Lingjing Kong, Sebastian Stich, and Martin Jaggi. Extrapolation for large-batch training in deep learning. In Hal Daumé III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 6094-6104. PMLR, 13-18 Jul 2020a. +Tao Lin, Sebastian U. Stich, Kumar Kshitij Patel, and Martin Jaggi. Don't use large mini-batches, use Local SGD. In International Conference on Learning Representations, 2020b. +Kaifeng Lyu, Zhiyuan Li, and Sanjeev Arora. Understanding the generalization benefit of normalization layers: Sharpness reduction, 2022. +Chao Ma and Lexing Ying. On linear stability of SGD and input-smoothness of neural networks. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, pp. 16805-16817. Curran Associates, Inc., 2021. + +Sadhika Malladi, Kaifeng Lyu, Abhishek Panigrahi, and Sanjeev Arora. On the SDEs and scaling rules for adaptive gradient algorithms. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. +Gideon Mann, Ryan T. McDonald, Mehryar Mohri, Nathan Silberman, and Dan Walker. Efficient large-scale distributed training of conditional maximum entropy models. In Advances in Neural Information Processing Systems 22, pp. 1231-1239, 2009. +Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics, pp. 1273-1282. PMLR, 2017. +Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al. Human-level control through deep reinforcement learning. nature, 518(7540):529-533, 2015. +Behnam Neyshabur, Srinadh Bhojanapalli, David Mcallester, and Nati Srebro. Exploring generalization in deep learning. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. +Jose Javier Gonzalez Ortiz, Jonathan Frankle, Mike Rabbat, Ari Morcos, and Nicolas Ballas. Trade-offs of Local SGD at scale: An empirical study. arXiv preprint arXiv:2110.08133, 2021. +Daniel Povey, Xiaohui Zhang, and Sanjeev Khudanpur. Parallel training of dnns with natural gradient and parameter averaging. arXiv preprint arXiv:1410.7455, 2014. +Prajit Ramachandran, Barret Zoph, and Quoc V Le. Searching for activation functions. arXiv preprint arXiv:1710.05941, 2017. +Benjamin Recht, Christopher Ré, Stephen J. Wright, and Feng Niu. Hogwild: A lock-free approach to parallelizing stochastic gradient descent. In Advances in Neural Information Processing Systems 24, pp. 693-701, 2011. +Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 115(3):211-252, 2015. doi: 10.1007/s11263-015-0816-y. +Frank Seide, Hao Fu, Jasha Droppo, Gang Li, and Dong Yu. 1-bit stochastic gradient descent and its application to data-parallel distributed training of speech dnns. In Haizhou Li, Helen M. Meng, Bin Ma, Engsiong Chng, and Lei Xie (eds.), INTERSPEECH 2014, 15th Annual Conference of the International Speech Communication Association, Singapore, September 14-18, 2014, pp. 1058-1062. ISCA, 2014. URL http://www.isca-speech.org/archive/interspeech_2014/i14_1058.html. +Christopher J. Shallue, Jaehoon Lee, Joseph Antognini, Jascha Sohl-Dickstein, Roy Frostig, and George E. Dahl. Measuring the effects of data parallelism on neural network training. Journal of Machine Learning Research, 20(112):1-49, 2019. +K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, May 2015. +Samuel Smith, Erich Elsen, and Soham De. On the generalization benefit of noise in stochastic gradient descent. In Hal Daumé III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 9058-9067. PMLR, 13-18 Jul 2020. +Samuel L Smith, Benoit Dherin, David Barrett, and Soham De. On the origin of implicit regularization in stochastic gradient descent. In International Conference on Learning Representations, 2021. +Sebastian U Stich. Local SGD converges fast and communicates little. In International Conference on Learning Representations, 2018. + +Nikko Strom. Scalable distributed DNN training using commodity GPU cloud computing. In IN-TERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany, September 6-10, 2015, pp. 1488-1492. ISCA, 2015. +Hang Su and Haoyu Chen. Experiments on parallel training of deep neural network using model averaging. arXiv preprint arXiv:1507.01239, 2015. +Richard S. Sutton and Andrew G. Barto. Reinforcement learning - an introduction. Adaptive computation and machine learning. MIT Press, 1998. ISBN 978-0-262-19398-6. +Jianyu Wang and Gauri Joshi. Adaptive communication strategies to achieve the best error-routine trade-off in local-update SGD. Proceedings of Machine Learning and Systems, 1:212-229, 2019. +Jianyu Wang and Gauri Joshi. Cooperative SGD: A unified framework for the design and analysis of local-update SGD algorithms. Journal of Machine Learning Research, 22(213):1-50, 2021. +Jianyu Wang, Rudrajit Das, Gauri Joshi, Satyen Kale, Zheng Xu, and Tong Zhang. On the unreasonable effectiveness of federated averaging with heterogeneous data. arXiv preprint arXiv:2206.04723, 2022. +Blake Woodworth, Kumar Kshitij Patel, Sebastian Stich, Zhen Dai, Brian Bullins, Brendan Mcmahan, Ohad Shamir, and Nathan Srebro. Is local sgd better than minibatch sgd? In International Conference on Machine Learning, pp. 10334-10343. PMLR, 2020a. +Blake E Woodworth, Kumar Kshitij Patel, and Nati Srebro. Minibatch vs Local SGD for heterogeneous distributed learning. Advances in Neural Information Processing Systems, 33:6281-6292, 2020b. +Lei Wu, Chao Ma, and Weinan E. How sgd selects the global minima in over-parameterized learning: A dynamical stability perspective. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesà-Bianchi, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. +Zeke Xie, Issei Sato, and Masashi Sugiyama. A diffusion theory for deep learning dynamics: Stochastic gradient descent exponentially favors flat minima. In International Conference on Learning Representations, 2021. +Yang You, Zhao Zhang, Cho-Jui Hsieh, James Demmel, and Kurt Keutzer. Imagenet training in minutes. In Proceedings of the 47th International Conference on Parallel Processing, pp. 1-10, 2018. +Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In International Conference on Learning Representations, 2020. +Hao Yu, Sen Yang, and Shenghuo Zhu. Parallel restarted SGD with faster convergence and less communication: Demystifying why model averaging works for deep learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 5693-5700, 2019. +Jingzhao Zhang, Sai Praneeth Karimireddy, Andreas Veit, Seungyeon Kim, Sashank Reddi, Sanjiv Kumar, and Suvrit Sra. Why are adaptive methods good for attention models? Advances in Neural Information Processing Systems, 33:15383-15393, 2020. +Xiaohui Zhang, Jan Trmal, Daniel Povey, and Sanjeev Khudanpur. Improving deep neural network acoustic models using generalized maxout networks. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 215-219, 2014. doi: 10.1109/ICASSP.2014.6853589. +Fan Zhou and Guojing Cong. On the convergence properties of a k-step averaging stochastic gradient descent algorithm for nonconvex optimization. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18, pp. 3219-3227. International Joint Conferences on Artificial Intelligence Organization, 7 2018. doi: 10.24963/ijcai.2018/447. URL https://doi.org/10.24963/ijcai.2018/447. + +Zhanxing Zhu, Jingfeng Wu, Bing Yu, Lei Wu, and Jinwen Ma. The anisotropic noise in stochastic gradient descent: Its behavior of escaping from sharp minima and regularization effects. arXiv preprint arXiv:1803.00195, 2018. +Martin Zinkevich, Markus Weimer, Lihong Li, and Alex Smola. Parallelized stochastic gradient descent. In J. Lafferty, C. Williams, J. Shawe-Taylor, R. Zemel, and A. Culotta (eds.), Advances in Neural Information Processing Systems, volume 23. Curran Associates, Inc., 2010. +Bernt Øksendal. Stochastic differential equations: an introduction with applications. Springer Science & Business Media, 2013. + +# CONTENTS + +1 Introduction 1 +2 When does Local SGD Generalize Better? 3 + +2.1 The Debate on Local SGD 3 +2.2 Key Factors: Small Learning Rate and Sufficient Training Time 4 + +3 Theoretical Analysis of Local SGD: The Slow SDE 5 + +3.1 Difficulty of Adapting the SDE Framework to Local SGD 6 +3.2SDE Approximation near the Minimizer Manifold 6 +3.3 Interpretation of the Slow SDEs 8 + +3.3.1 Interpretation of the Slow SDE for SGD. 8 +3.3.2 Local SGD Strengthens the Drift Term in Slow SDE. 9 +3.3.3 Theoretical Insights into Tuning the Number of Local Steps 9 + +4 Conclusions 9 + +A Additional Related Works 18 +B Additional Discussions 19 +C Implementation Details of Parallel SGD, Local SGD and Post-local SGD 20 +D Modeling Local SGD with Multiple Conventional SDEs 23 + +E Additional Interpretation of the Slow SDEs 23 + +E.1 Understanding the Diffusion Term in the Slow SDE 23 +E.2 The Effect of Global Batch Size on Generalization 24 + +F Additional Experimental Results 25 +G Discussions on Local SGD with Label Noise Regularization 27 + +G.1 The Slow SDE for Local SGD with Label Noise Regularization 27 +G.2 The Equivalence of Enlarging the Learning Rate and Adding Local Steps 28 + +H Deriving the Slow SDE after Applying the LSR 28 +I Proof of Theorem 3.1 30 +J Proof Outline of Main Theorems 33 +K Proof Details of Main Theorems 33 + +K.1 Additional Notations 34 + +K.2 Computing the Derivatives of the Limiting Mapping 34 +K.3 Preliminary Lemmas for GD and GF 35 +K.4 Construction of working zones 38 +K.5 Phase 1: Iterate Approaching the Manifold 39 + +K.5.1 Additional notations 39 +K.5.2 Proof for Subphase 1 39 +K.5.3 Proof for Subphase 2 43 + +K.6 Phase 2: Iterates Staying Close to Manifold 46 + +K.6.1 Additional notations 46 +K.6.2 Proof for the High Probability Bounds 46 + +K.7 Summary of the dynamics and Proof of Theorems J.1 and J.2 51 +K.8 Proof of Theorem 3.3 52 +K.9 Computing the Moments for One "Giant Step" 53 + +K.10 Proof of Weak Approximation 66 + +K.10.1 Preliminaries and additional notations 67 +K.10.2 Proof of the approximation in our context 68 + +# L Deriving the Slow SDE for Label Noise Regularization 72 + +# M Experimental Details 74 + +M.1 Post-local SGD Experiments in Section 1 74 +M.2 Experimental Details for Figures 2 and 5 74 +M.3 Details for Experiments in Figure 6. 75 +M.4 Details for Experiments on the Effect of the Diffusion Term 75 +M.5 Details for Experiments on the Effect of Global Batch Size 76 +M.6 Details for Experiments on Label Noise Regularization 76 + +# A ADDITIONAL RELATED WORKS + +**Optimization aspect of Local SGD.** Local SGD is a communication-efficient variant of parallel SGD, where multiple workers perform SGD independently and average the model parameters periodically. Dating back to Mann et al. (2009) and Zinkevich et al. (2010), this strategy has been widely adopted to reduce the communication cost and speed up training in both scenarios of data center distributed training (Chen & Huo, 2016; Zhang et al., 2014; Povey et al., 2014; Su & Chen, 2015) and Federated Learning (McMahan et al., 2017; Kairouz et al., 2021). To further accelerate training, Wang & Joshi (2019) and Haddadpour et al. (2019) proposed adaptive schemes for the averaging frequency, and Basu et al. (2019) combined Local SGD with gradient compression. Motivated to theoretically understand the empirical success of Local SGD, a lot of researchers analyzed the convergence rate of Local SGD under various settings, e.g., homogeneous/heterogeneous data and convex/non-convex objective functions. Among them, Yu et al. (2019); Stich (2018); Khaled et al. (2020); Woodworth et al. (2020a) focus on the homogeneous setting where data for each worker are independent and identically distributed (IID). Li et al. (2019b); Karimireddy et al. (2020); Glasgow et al. (2022); Woodworth et al. (2020b); Wang et al. (2022) study the heterogeneous setting, where workers have non-IID data and local updates may induce "client drift" (Karimireddy et al., 2020) and hurt optimization. The error bound of Local SGD obtained by these works is typically inferior to that of SGD with the same global batch size for fixed number of iterations/epochs and becomes worse as the number of local steps increases, revealing a trade-off between less communication and better optimization. In this paper, we are interested in the generalization aspect of Local SGD in the homogeneous setting, assuming the training loss can be optimized to a small value. + +Gradient noise and generalization. The effect of stochastic gradient noise on generalization has been studied from different aspects, e.g., changing the order of learning different patterns Li et al. (2019a), inducing an implicit regularizer in the second-order SDE approximation Smith et al. (2021); Li et al. (2019a). Our work follows a line of works studying the effect of noise in the lens of sharpness, which is long believed to be related to generalization Hochreiter & Schmidhuber (1997); Neyshabur et al. (2017). Keskar et al. (2017) empirically observed that large-batch training leads to worse generalization and sharper minima than small-batch training. Wu et al. (2018); Hu et al. (2017); Ma & Ying (2021) showed that gradient noise destabilizes the training around sharp minima, and Kleinberg et al. (2018); Zhu et al. (2018); Xie et al. (2021); Ibayashi & Imaizumi (2021) quantitatively characterized how SGD escapes sharp minima. The most related papers are Blanc et al. (2020); Damian et al. (2021); Li et al. (2021b), which focus on the training dynamics near a manifold of minima and study the effect of noise on sharpness (see also Section 3.2). Though the mathematical definition of sharpness may be vulnerable to the various symmetries in deep neural nets (Dinh et al., 2017), sharpness still appears to be one of the most promising tools for predicting generalization (Jiang et al., 2020; Foret et al., 2021). + +Improving generalization in large-batch training. The generalization issue of the large-batch (or full-batch) training has been observed as early as (Bengio, 2012; LeCun et al., 2012). As mentioned in Section 1, the generalization issue of large-batch training could be due to the lack of a sufficient amount of stochastic noise. To make up the noise in large-batch training, Krizhevsky (2014); Goyal et al. (2017) empirically discovered the Linear Scaling Rule for SGD, which suggests enlarging the learning rate proportionally to the batch size. Jastrzebski et al. (2017) adopted an SDE-based analysis to justify that this scaling rule indeed retains the same amount of noise as small-batch training (see also Section 3.1). However, the SDE approximation may fail if the learning rate is too large (Li et al., 2021a), especially in the early phase of training before the first learning rate decay (Smith et al., 2020). Shallue et al. (2019) demonstrated that generalization gap between small- and large-batch training can also depend on many other training hyperparameters. Besides enlarging the learning rate, other approaches have also been proposed to reduce the gap, including training longer (Hoffer et al., 2017), learning rate warmup (Goyal et al., 2017), LARS (You et al., 2018), LAMB (You et al., 2020). In this paper, we focus on using Local SGD to improve generalization, but adding local steps is a generic training trick that can also be combined with others, e.g., Local LARS (Lin et al., 2020b), Local Extrap-SGD (Lin et al., 2020a). + +# B ADDITIONAL DISCUSSIONS + +Connection to the conventional wisdom that the diffusion term matters more. As mentioned in Section 3.1, it is believed in the literature is that a large diffusion term in the conventional SDE leads to good generalization. One may think that the diffusion term in the Slow SDE corresponds to that in the conventional SDE, and thus enlarging the diffusion term rather than the drift term should lead to better generalization. However, we note that both the diffusion and drift terms in the Slow SDEs result from the long-term effects of the diffusion term in the conventional SDE (Slow SDEs become stationary if $\Sigma = 0$ ). This means our view characterizes the role of gradient noise in more detail, and therefore, goes one step further on the conventional wisdom. + +Slow SDEs for neural nets with modern training techniques. In modern neural net training, it is common to add normalization layers and weight decay ( $L^2$ -regularization) for better optimization and generalization. However, these techniques lead to violations of our assumptions, e.g., no fixed point exists in the regularized loss (Li et al., 2020; Ahn et al., 2022). Still, a minimizer manifold can be expected to exist for the unregularized loss. Li et al. (2022) noted that the drift and diffusion around the manifold proceeds faster in this case, and derived a Slow SDE for SGD that captures $\mathcal{O}\left(\frac{1}{\eta} \log \frac{1}{\eta}\right)$ discrete steps instead of $\mathcal{O}\left(\frac{1}{\eta^2}\right)$ . We believe that our analysis can also be extended to this case, and that adding local steps still results in the effect of strengthening the drift term. + +# C IMPLEMENTATION DETAILS OF PARALLEL SGD, LOCAL SGD AND POST-LOCAL SGD + +In this section, we present the formal procedures for Parallel SGD, Local SGD and Post-local SGD. Given a training dataset and a data augmentation function, Algorithms 1 and 2 show the implementations of distributed samplers for sampling local batches with and without replacement. Then Algorithms 3 to 5 show the implementations of parallel SGD, Local SGD and Post-local SGD that can run with either of the samplers. + +Sampling with replacement. Our theory analyzes parallel SGD, Local SGD and Post-local SGD when local batches are sampled with replacement (Algorithm 1). That is, local batches consist of IID samples from the same training distribution $\hat{D}$ , where $\hat{D}$ serves as an abstraction of the distribution of an augmented sample drawn from the training dataset. The mathematical formulations are given in Section 1. + +Sampling without replacement. Slightly different from our theory, we use the sampling without replacement (Algorithm 2) in our experiments unless otherwise stated. This sampling scheme is standard in practice: it is used by Goyal et al. (2017) for parallel SGD and by Lin et al. (2020b); Ortiz et al. (2021) for Post-local/Local SGD. This sampling scheme works as follows. At the beginning of every epoch, the whole training dataset is shuffled and evenly partitioned into $K$ shards. Each worker takes one shard and samples batches without replacement. When all workers pass their own shard, the next epoch begins and the whole dataset is reshuffled. An alternative view is that the workers always share the same dataset. For each epoch, they perform local steps by sampling batches of data without replacement until the dataset contains too few data to form a batch. Then another epoch starts with the dataset reloaded to the initial state. + +Discrepancy in Sampling Schemes. We argue that this discrepancy between theory and experiments on sample schemes is minor. Though sampling without replacement is standard in practice, most previous works, e.g., Wang & Joshi (2019); Li et al. (2021a); Zhang et al. (2020), analyze sampling with replacement for technical simplicity and yields meaningful results. + +Moreover, even if we change the sampling scheme to with replacement, Local SGD can still improve the generalization of SGD (by merely adding local steps). See Appendix F for the experiments. We believe that the reasons for better generalization of Local SGD with either sampling scheme are similar and leave the analysis for sampling without replacement for future work. + +Algorithm 1: Distributed Sampler on $K$ Workers (Sampling with Replacement) +Require: shared training dataset $\mathcal{D}$ data augmentation function $\mathcal{A}(\hat{\xi})$ +Hyperparameters: local batch size $B_{\mathrm{loc}}$ +Function Sample () on worker k: Draw $B_{\mathrm{loc}}$ IID samples $\hat{\xi}_1,\dots ,\hat{\xi}_{B_{\mathrm{loc}}}$ from $\mathcal{D}$ with replacement; $\xi_b\gets \mathcal{A}(\hat{\xi}_b)$ for all $1\leq b\leq B_{\mathrm{loc}}$ // apply data augmentation + +Algorithm 2: Distributed Sampler on $K$ Workers (Sampling without Replacement) +Require: shared training dataset $\mathcal{D}$ data augmentation function $\mathcal{A}(\hat{\xi})$ +Hyperparameters: local batch size $B_{\mathrm{loc}}$ +Constant: $N_{\mathrm{loc}}\coloneqq \left\lfloor \frac{|D|}{KB_{\mathrm{loc}}}\right\rfloor$ // number of local batches per worker per epoch +Local Variables: $c^{(k)}\gets N_{\mathrm{loc}}B_{\mathrm{loc}}$ for worker k // number of samples drawn in this epoch +Function Sample () on worker k: +if $c^{(k)} = N_{\mathrm{loc}}B_{\mathrm{loc}}$ then // Now start a new epoch Wait until all the other workers reach this line; // synchronize Draw a random permutation $P$ of 1,..., $|D|$ jointly with other workers so that the same permutation is shared among all workers; // reshuffle the dataset $Q_{j}^{(k)}\gets P_{(k - 1)N_{\mathrm{loc}}B_{\mathrm{loc}} + j}$ for all $1\leq j\leq N_{\mathrm{loc}}$ // partition the dataset $c^{(k)}\gets 0$ end +for $i = 1,\dots ,B_{\mathrm{loc}}$ do $\hat{\xi}_i\gets \hat{\xi}_i$ the $Q_{c^{(k)} + i}^{(k)}$ th data point of $\mathcal{D}$ // sample without replacement $\xi_i\gets \mathcal{A}(\hat{\xi}_i)$ // apply data augmentation +end + $c^{(k)}\gets c^{(k)} + B_{\mathrm{loc}}$ . +return $(\xi_1,\ldots ,\xi_{B_{\mathrm{loc}}})$ .. + +Algorithm 3: Parallel SGD on $K$ Workers +Input: loss function $\ell (\pmb {\theta};\xi)$ , initial parameter $\pmb{\theta}_{0}$ +Hyperparameters: total number of iterations $T$ , learning rate $\eta$ , local batch size $B_{\mathrm{loc}}$ +for $t = 0,\dots ,T - 1$ do +for each worker k do in parallel + $(\xi_{k,t,1},\ldots ,\xi_{k,t,B_{\mathrm{loc}}})\gets \mathrm{Sample}()$ // sample a local batch + $g_{k,t}\gets \frac{1}{B_{\mathrm{loc}}}\sum_{i = 1}^{B_{\mathrm{loc}}}\nabla \ell (\pmb {\theta}_t;\xi_{k,t,i})$ // computing the local gradient +end + $g_{t}\gets \frac{1}{K}\sum_{k = 1}^{K}g_{k,t}$ // all-Reduce aggregation of local gradients + $\pmb{\theta}_{t + 1}\gets \pmb{\theta}_{t} - \eta_{t}\pmb{g}_{t}$ // update the model +end + +Algorithm 4: Local SGD on $K$ Workers +Input: loss function $\ell (\pmb {\theta};\xi)$ , initial parameter $\bar{\theta}^{(0)}$ +Hyperparameters: total number of rounds $R$ , number of local steps $H$ per round +Hyperparameters: learning rate $\eta$ , local batch size $B_{\mathrm{loc}}$ +for $s = 0,\dots ,R - 1$ do +for each worker k do in parallel $\theta_{k,0}^{(s)}\gets \bar{\theta}^{(0)};$ // maintain a local copy of the global iterate +for $t = 0,\ldots ,H - 1$ do $(\xi_{k,t,1}^{(s)},\dots ,\xi_{k,t,B_{\mathrm{loc}}}^{(s)})\leftarrow \mathrm{Sample}()$ // sample a local batch + $g_{k,t}^{(s)}\leftarrow \frac{1}{B_{\mathrm{loc}}}\sum_{i = 1}^{B_{\mathrm{loc}}}\nabla \ell (\pmb{\theta}_{k,t}^{(s)};\xi_{k,t,i}^{(s)})$ // computing the local gradient + $\theta_{k,t + 1}^{(s)}\gets \theta_{k,t}^{(s)} - \eta g_{k,t}^{(s)}$ // update the local model +end +end + $\bar{\theta}^{(s + 1)}\gets \frac{1}{K}\sum_{k = 1}^{K}\theta_{k,H}^{(s)}$ // all-Reduce aggregation of local iterates +end + +Algorithm 5: Post-local SGD on $K$ Workers +1 Input: loss function $\ell (\pmb {\theta};\xi)$ , initial parameter $\pmb{\theta}_{0}$ +2 Hyperparameters: total number of iterations $T$ , learning rate $\eta$ , local batch size $B_{\mathrm{loc}}$ +3 Hyperparameters: switching time point $t_0$ , number of local steps $H$ per round +4 Ensure: $T - t_0$ is a multiple of $H$ +5 Starting from $\pmb{\theta}_{0}$ , run Parallel SGD for $t_0$ iterations and obtain $\pmb{\theta}_{t_0}$ . +6 Starting from $\pmb{\theta}_{t_0}$ , run Local SGD for $\frac{1}{H} (T - t_0)$ rounds with $H$ local steps per round; +7 return the final global iterate of Local SGD ; + +# D MODELING LOCAL SGD WITH MULTIPLE CONVENTIONAL SDES + +Lin et al. (2020b) tried to informally explain the success of Local SGD by adopting the argument that larger diffusion term in the conventional SDE leads to better generalization (see Section 3.1 and appendix A). Basically, they attempted to write multiple SDEs, each of which describes the $H$ -step local training process of each worker in each round (from $\theta_{k,0}^{(s)}$ to $\theta_{k,H}^{(s)}$ ). The key difference between each of these SDEs and the SDE for SGD (3) is that the former one has a larger diffusion term because the workers use batch size $B_{\mathrm{loc}}$ instead of $B$ : + +$$ +\mathrm {d} \boldsymbol {X} (t) = - \nabla \mathcal {L} (\boldsymbol {X}) \mathrm {d} t + \sqrt {\frac {\eta}{B _ {\mathrm {l o c}}}} \boldsymbol {\Sigma} ^ {1 / 2} (\boldsymbol {X}) \mathrm {d} \boldsymbol {W} _ {t}. \tag {10} +$$ + +Lin et al. (2020b) then argue that the total amount of "noise" in the training dynamics of Local SGD is larger than that of SGD. However, it is hard to see whether it is indeed larger, since the model averaging step at the end of each round can reduce the variance in training and may cancel the effect of having larger diffusion terms. + +More formally, a complete modeling of Local SGD following this idea should view the sequence of global iterates $\{\bar{\theta}^{(s)}\}$ as a Markov process $\{X^{(s)}\}$ . Let $\mathcal{P}_X(x,B,t)$ the distribution of $X(t)$ in (3) with initial condition $X(0) = x$ . Then the Markov transition should be $X^{(s + 1)} = \frac{1}{K}\sum_{k = 1}^{K}X_{k,H}^{(s)}$ where $X_{1,H}^{(s)},\ldots ,X_{K,H}^{(s)}$ are $K$ independent samples from $\mathcal{P}_X(X^{(s)},B_{\mathrm{loc}},H\eta)$ , i.e., sampling from (10). + +Consider one round of model averaging. It is true that $\mathcal{P}_{\mathbf{X}}(\mathbf{X}^{(s)}, B_{\mathrm{loc}}, H\eta)$ may have a larger variance than the corresponding SGD baseline $\mathcal{P}_{\mathbf{X}}(\mathbf{X}^{(s)}, B, H\eta)$ because the former one has a smaller batch size. However, it is unclear whether $\mathbf{X}^{(s + 1)}$ also has a larger variance than $\mathcal{P}_{\mathbf{X}}(\mathbf{X}^{(s)}, B, H\eta)$ . This is because $\mathbf{X}^{(s + 1)}$ is the average of $K$ samples, which means we have to compare $\frac{1}{K}$ times the variance of $\mathcal{P}_{\mathbf{X}}(\mathbf{X}^{(s)}, B_{\mathrm{loc}}, H\eta)$ with the variance of $\mathcal{P}_{\mathbf{X}}(\mathbf{X}^{(s)}, B, H\eta)$ . Then it is unclear which one is larger. + +In the special case where $H\eta$ is small, $\mathcal{P}_X(X^{(s)},B_{\mathrm{loc}},H\eta)$ is approximately equal to the following Gaussian distribution: + +$$ +\mathcal {N} \left(\boldsymbol {X} ^ {(s)} - \eta H \nabla \mathcal {L} \left(\boldsymbol {X} ^ {(s)}\right), \frac {\eta^ {2} H}{B _ {\mathrm {l o c}}} \boldsymbol {\Sigma} \left(\boldsymbol {X} ^ {(s)}\right)\right) \tag {11} +$$ + +Then averaging over $K$ samples gives + +$$ +\mathcal {N} \left(\boldsymbol {X} ^ {(s)} - \eta H \nabla \mathcal {L} \left(\boldsymbol {X} ^ {(s)}\right), \frac {\eta^ {2} H}{B} \boldsymbol {\Sigma} \left(\boldsymbol {X} ^ {(s)}\right)\right), \tag {12} +$$ + +which is exactly the same as the Gaussian approximation of the SGD baseline. This means there do exist certain cases where Lin et al. (2020b)'s argument does not give a good separation between Local SGD and SGD. + +Moreover, we do not gain any further insights from this modeling since it is hard to see how model averaging interacts with the SDEs. + +# E ADDITIONAL INTERPRETATION OF THE SLOW SDES + +# E.1 UNDERSTANDING THE DIFFUSION TERM IN THE SLOW SDE + +So far, we have discussed why adding local steps enlarges the drift term in the Slow SDE and why enlarging the drift term can benefit generalization. Besides this, here we remark that another way to accelerate the corresponding semi-gradient method for minimizing the implicit regularizer is to reduce the diffusion term, so that the trajectory more closely follows the drift term. More formally, we propose the following: + +Hypothesis E.1. Starting at a minimizer $\zeta_0\in \Gamma$ , run $(\kappa_{1},\kappa_{2})$ -Slow SDE and $(\kappa_{1},\kappa_{2}^{\prime})$ -Slow SDE respectively for the same amount of time $T > 0$ and obtain $\zeta (T),\zeta '(T)$ . If $\pmb{\Sigma}_{\parallel}\neq \mathbf{0}$ and $\kappa_{1} < \kappa_{1}^{\prime}$ then the expected test accuracy at $\zeta (T)$ is better than that at $\zeta^{\prime}(T)$ . + +![](images/e0c0b984628c8eae8c0add91a8226ad06ea92b62a4d4e5aef542e842730b3d1d.jpg) +(a) CIFAR-10, $H = 600$ for $K > 1$ . +(a) diffusion (unchanged) +(b) drift-I (unchanged) + +![](images/8ff4d6cbc4e8c06b0fb2a0179d56166cdb3698769edde39d3752e98467c422f8.jpg) +(b) ImageNet, $H = 78$ for $K > 1$ . +Figure 3: Reducing the diffusion term of the Slow SDE for Local SGD leads to better generalization. Test accuracy improves as we increase $K$ with fixed $\eta$ and $H$ to reduce the diffusion term while keeping the drift term untouched. See Appendix M.4 for details. +(c) drift-II (rescaled) + +Here we exclude the case of $\boldsymbol{\Sigma}_{\parallel} \equiv \mathbf{0}$ because in this case the diffusion term in the Slow SDE is always zero. To verify Hypothesis E.1, we set the product $\alpha \coloneqq \eta H$ large, keep $H, \eta$ fixed, increase the number of workers $K$ , and compare the generalization performances after a fixed amount of training steps (but after different numbers of epochs). This case corresponds to the $(\frac{1}{KB_{\mathrm{loc}}}, \frac{1}{2B_{\mathrm{loc}}})$ -Slow SDE, so adding more workers should reduce the diffusion term. As shown in Figure 3, a higher test accuracy is indeed achieved for larger $K$ . + +**Implication:** Enlarging the learning rate is not equally effective as adding local steps. Given that Local SGD improves generalization by strengthening the drift term, it is natural to wonder if enlarging the learning rate of SGD would also lead to similar improvements. While it is true that enlarging the learning rate effectively increases the drift term, it also increases the diffusion term simultaneously, which can hinder the implicit regularization by Hypothesis E.1. In contrast, adding local steps does not change the diffusion term. As shown in Figure 6(a), even when the learning rate of SGD is increased, SGD still underperforms Local SGD by about $2\%$ in test accuracy. + +On the other hand, in the special case of where $\pmb{\Sigma}_{\parallel} \equiv \mathbf{0}$ , Hypothesis E.1 does not hold, and enlarging the learning rate by $\sqrt{K}$ results in the same Slow SDE as adding local steps (see Appendix G for derivation). Then these two actions should produce the same generalization improvement, unless the learning rate is so large that Slow SDE loses track of the training dynamics. As an example of such a special case, an experiment with label noise regularization is presented in Figure 8. + +# E.2 THE EFFECT OF GLOBAL BATCH SIZE ON GENERALIZATION + +In this section, we discuss the effect of global batch size on the generalization of Local SGD. Given that the computation power of a single worker is limited, we consider the case where the local batch size $B_{\mathrm{loc}}$ is fixed and the global batch size $B = KB_{\mathrm{loc}}$ is tuned by adding or removing the workers. This scenario is relevant to the practice because one may want to know the maximum parallelism possible to train the neural net without causing generalization degradation. + +For SGD, previous works have proposed the Linear Scaling Rule (LSR) (Krizhevsky, 2014; Goyal et al., 2017; Jastrzebski et al., 2017): scaling the learning rate $\eta \mapsto \kappa \eta$ linearly with the global batch size $B \mapsto \kappa B$ yields the same conventional SDE (3) under a constant epoch budget, hence leading to almost the same generalization performance as long as the SDE approximation does not fail. + +We show in Theorem H.1 that the LSR does not change the Slow SDE of SGD either. Experiments in Figure 4 show that the LSR indeed holds nicely when we continue training with small learning rates from the same CIFAR-10 and ImageNet checkpoints as in Figure 2. Here we choose $K = 16$ and $K = 256$ as the base settings for CIFAR-10 and ImageNet, respectively, and then tune the learning rate to maximize the test accuracy. As shown in Figures 4(a) and 4(b), the optimal learning rate turns out to be small enough that the LSR can be applied to scale the global batch size with only a minor change in test accuracy. + +Now, assuming the learning rate is scaled as LSR, we study how to tune the number of local steps $H$ for Local SGD for better generalization. A natural choice is to tune $H$ in the base settings and keep $\alpha$ unchanged via scaling $H \mapsto H / \kappa$ . Then the following SDE can be derived (see Theorem H.2): + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\zeta} \left(\underbrace {\frac {1}{\sqrt {B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t}} _ {\text {一}} \underbrace {- \frac {1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {\text {一}} \underbrace {- \frac {\kappa K - 1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Psi}} (\boldsymbol {\zeta}) ] \mathrm {d} t}\right). \tag {13} +$$ + +![](images/3676ff827c60f1a31442e006bbe0b8dc2e7b554d1a323506edaa5e06d9e514d2.jpg) +(a) CIFAR-10, start from #250. + +![](images/a1e59652e5f629d271daf5ab8ac58410d3e6f780b4b78d3f02453fe297cacbb9.jpg) +(b) ImageNet, start from #100. + +![](images/16d0dcb2cbba12d9c0d81b5872aaf76706eeae69248c986653a788fee20e9ca3.jpg) +(c) CIFAR-10, start from #250. + +![](images/d3af3481e36bca72561c65e4fdd5cf3f1fcc7663812a8c6632e27435a132cad4.jpg) +(d) ImageNet, start from #100. +Figure 4: For training from CIFAR-10 and ImageNet checkpoints, Local SGD consistently outperforms SGD $(H = 1)$ across different batch sizes $B$ (fixing $B_{\mathrm{loc}}$ and varying $K$ ), where the learning rate is scaled by the LSR $\eta \propto B$ . Two possible ways of tuning the number of local steps $H$ are considered: (1). Tune $H$ for the best test accuracy for $K = 16$ and $K = 256$ respectively on CIFAR-10 and ImageNet, then scale $H$ as $H \propto 1 / B$ so that $\alpha \coloneqq \eta H$ is constant; (2). Tune $H$ specifically for each $K$ . See Appendix M.5 for training details. + +Compared with (4), the drift-II term here is rescaled by a positive factor. Again, when $\alpha$ is large, we can follow the argument in Section 3.3.2 to approximate $\widehat{\Psi} (\zeta)\approx \widehat{\Sigma}_{\diamond}(\zeta)$ and obtain the following $(\frac{1}{B},\frac{\kappa K}{B})$ -Slow SDE: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\boldsymbol {\zeta}} \left(\frac {1}{\sqrt {B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} (t) - \frac {\kappa K}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t\right). \tag {14} +$$ + +The drift term of the above SDE is always stronger than SGD (7), as long as there exists more than one worker after the scaling (i.e., $\kappa K > 1$ ). As expected from Hypothesis 3.1, we observed in the experiments that the generalization performance of Local SGD is always better than or at least comparable to SGD across different batch sizes (see Figures 4(a) and 4(b)). + +Taking a closer look into the drift term in the Slow SDE (14), we can find that it scales linearly with $\kappa$ . According to Hypothesis 3.1, the SDE is expected to generalize better when adding more workers ( $\kappa > 1$ ) and to generalize worse when removing some workers ( $\kappa < 1$ ). For the latter case, we indeed observed that the test accuracy of Local SGD drops when removing workers. For the case of adding workers, however, we also need to take into account that the LSR specifies a larger learning rate and causes a larger SDE approximation error for the same $\alpha$ , which may cancel the generalization improvement brought by strengthening the drift term. In the experiments, we observed that the test accuracy does not rise when adding more workers to the base settings. + +Since $\alpha$ also controls the regularization strength (Section 3.3.3), it would be beneficial to decrease $\alpha$ for large batch size so as to better trade-off between regularization strength and approximation quality. In Figures 4(c) and 4(d), we plot the optimal value of $\alpha$ for each batch size, and we indeed observed that the optimal $\alpha$ drops as we scale up $K$ . Conversely, a smaller batch size (and hence a smaller learning rate) allows for using a larger $\alpha$ to enhance regularization while still keeping a low approximation error (Theorem 3.3). The test accuracy curves in Figures 4(a) and 4(b) indeed show that setting a larger $\alpha$ can compensate for the accuracy drop when reducing the batch size. + +# F ADDITIONAL EXPERIMENTAL RESULTS + +In this section, we present additional experimental results to further verify our finding. + +Supplementary Plot: Training time should be long enough. Figures 5(a) and 5(b) show enlarged views for Figures 2(a) and 2(c) respectively, showing that Local SGD can generalize worse than SGD in the first few epochs. + +Supplementary Plot: Learning rate should be small. Figure 5(c) shows that reducing the learning rate from 0.32 to 0.064 does not lead to test accuracy drop for Local SGD on CIFAR-10, if the training time is allowed to be longer and the number of local steps $H$ is set properly. Figure 5(d) presents the case where, with a large learning rate, the generalization improvement of Local SGD disappears even starting from a pre-trained model. + +Supplementary Plot: Reconciling our main finding with Ortiz et al. (2021). In Figure 5(e), the generalization benefit of Local SGD with $H = 24$ becomes less significant after the learning rate decay at epoch 226, which is consistent with the observation by Ortiz et al. (2021) that the generalization benefit of Local SGD usually disappears after the learning rate decay. But we can preserve the improvement by increasing $H$ to 900. Here, we use Local SGD with momentum. + +Supplementary Plot: Optimal $\alpha$ gets larger for smaller $\eta$ . In Figure 5(f), we summarize the optimal $\alpha := \eta H$ that enables the highest test accuracy for each learning rate in Figure 2(f). We can see that the optimal $\alpha$ increases as we decrease the learning rate. The reason is that the approximation error bound $\mathcal{O}(\sqrt{\alpha\eta\log\frac{\alpha}{\eta\delta}})$ in Theorem 3.3 decreases with $\eta$ , allowing for a larger value of $\alpha$ to better regularize the model. + +![](images/4fe5d7371a7100bcba8d8ff41d407fafc7f8d0c675a2f7d7c27b6914f412512b.jpg) +(a) CIFAR-10, start from random. + +![](images/caa2fbfba3a461f8cfac378f2bf2bfdc47d2fa72a2416418e2b784b3ab4c599e.jpg) +(b) ImageNet, start from #250. + +![](images/d0d67ab8cec46763de39bf7c882d39a6c278122af06efbe7c944353a4661de2e.jpg) +(c) CIFAR-10, start from #100. + +![](images/1f251751866deb3e4fbe1cf8e012190ad7fb7e1404930e0e6f5b0b64dffd3e77.jpg) +(d) ImageNet, start from #100. + +![](images/63f25ecc16e69a31d38a161ee74e36cd48754561db452ccd63c53c38cd528ed1.jpg) +(e) CIFAR-10, start from #150. + +![](images/80b925b82ab5d697c33f6b926a904856dea02e9328d7cdb3ba25d81786e80019.jpg) +(f) ImageNet, optimal $\alpha$ v.s. $\eta$ . + +![](images/0e19c6e0ffe575769e7933dcb5dda9fdc3428f58151baedbe2d2e1b3a250b1ed.jpg) +Figure 5: Additional experimental results about the effect of the learning rate, training time and the number of local steps. See Appendix M.2 for details. +(a) SGD with various $\eta$ +Figure 6: Additional experimental results on CIFAR-10. See Appendix M.3 for details. + +![](images/61b57afd24323269dec4a5494e2a94ec95720c1ef0b1b6f084d37af55d1289d1.jpg) +(b) SGD with larger batch sizes. + +![](images/9ab02fb5af9589b4e7d99396f3e0415fad83b4d9248f9232a36feb2295e1c293.jpg) +(c) Post-local SGD, sampling with replacement. + +SGD generalizes worse even with extensively tuned learning rates. In Figure 6(a), we run SGD from both random initialization and the pre-trained model for another 3,000 epochs with various learning rates and report the test accuracy. We can see that none of the SGD runs beat Local SGD with the fixed learning rate $\eta = 0.32$ . Therefore, the inferior performance of SGD in Figures 2(a) and 2(b) is not due to the improper learning rate and Local SGD indeed generalizes better. + +SGD with larger batch sizes performs no better. In Figure 6(b), we enlarge the batch size of SGD and report the test accuracy for various learning rates. We can see that SGD with larger batch sizes performs no better and none of the SGD runs outperform Local SGD with the fixed learning rate $\eta = 0.32$ . This result is unsurprising since it is well established in the literature (Jastrzebski et al., 2017; Smith et al., 2020; Keskar et al., 2017) that larger batch size typically leads to worse generalization. See Appendix A for a survey of empirical and theoretical works on understanding and resolving this phenomenon. + +Sampling with or without replacement does not matter. Note that there is a slight discrepancy in sampling schemes between our theoretical and experimental setup: the update rules (1) and (2) assume that data are sampled with replacement while most experiments use sampling without replacement (Appendix C). To eliminate the effect of this discrepancy, we conduct additional experiments on Post-local SGD using sampling with replacement (see Figure 6(c)) and Post-local SGD significantly outperforms SGD. + +# G DISCUSSIONS ON LOCAL SGD WITH LABEL NOISE REGULARIZATION + +# G.1 THE SLOW SDE FOR LOCAL SGD WITH LABEL NOISE REGULARIZATION + +In this subsection, we present the Slow SDE for Local SGD in the case of label noise regularization and show that Local SGD indeed induces a stronger regularization term, which presumably leads to better generalization. + +Theorem G.1 (Slow SDE for Local SGD with label noise regularization). For a $C$ -class classification task with cross-entropy loss, the slow SDE of Local SGD with label noise has the following form: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = - \frac {1}{4 B} \nabla_ {\Gamma} \left(\operatorname {t r} \left(\nabla^ {2} \mathcal {L} (\boldsymbol {\zeta})\right) + (K - 1) \cdot \frac {\operatorname {t r} \left(F \left(2 H \eta \nabla^ {2} \mathcal {L} (\boldsymbol {\zeta})\right)\right)}{2 H \eta}\right) \mathrm {d} t, \tag {15} +$$ + +where $F(x) \coloneqq \int_0^x \psi(y) \, \mathrm{d}y$ and is interpreted as a matrix function. Additionally, $\nabla_{\Gamma} f$ stands for the gradient of a function $f$ projected to the tangent space of $\Gamma$ . + +Proof. See Appendix L. + +![](images/ce5c716a629e476b0ea86ea3fe76183d34c1f7f8c04e9a6e6b894dd51c2c08d7.jpg) + +Note that the magnitude of the RHS in (15) becomes larger as $H$ increases. By letting $H$ to go to infinity, we further have the following theorem. + +Theorem G.2. As the number of local steps $H$ goes to infinity, the slow SDE of Local SGD with label noise (15) can be simplified as: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = - \frac {K}{4 B} \nabla_ {\Gamma} \operatorname {t r} \left(\nabla^ {2} \mathcal {L} (\boldsymbol {\zeta})\right) \mathrm {d} t. \tag {16} +$$ + +Proof. We obtain the corollary by simply taking the limit. By L'Hospital's rule, + +$$ +\lim _ {x \rightarrow + \infty} \frac {F (a x)}{x} = \lim _ {x \rightarrow + \infty} \frac {\mathrm {d} F (a x)}{\mathrm {d} x} = \lim _ {x \rightarrow + \infty} a \psi (a x) = a. +$$ + +Therefore, + +$$ +\lim _ {x \rightarrow + \infty} \frac {\operatorname {t r} (F (2 H \eta \nabla^ {2} \mathcal {L} (\zeta)))}{2 H \eta} = \operatorname {t r} (\nabla^ {2} \mathcal {L} (\zeta)). \tag {17} +$$ + +Substituting (17) into (15) yields (16). + +![](images/f21d3d40f406238e9caec9860c77af2b568b84405a6757c7b980202a49f88a24.jpg) + +As introduced in Section 3.3, the Slow SDE for SGD with label noise regularization has the following form: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = - \frac {1}{4 B} \nabla_ {\Gamma} \operatorname {t r} \left(\nabla^ {2} \mathcal {L} (\boldsymbol {\zeta})\right) \mathrm {d} t, \tag {18} +$$ + +which is a deterministic flow that keeps reducing the trace of Hessian. + +![](images/95fa377cc60c341232565c9293256304933bfec9021c461af4ee1f75a97d2d61.jpg) +(a) ResNet-56 + GroupNorm. + +![](images/4d961518e31054ba64c59978b73d65730b2d0232abf93fea368ff5d0e48d6c9a.jpg) +(b) VGG-16 w/o normalization. +Figure 7: Local SGD with label noise regularization on CIFAR-10 without data augmentation using $K = 32$ , $B_{\mathrm{loc}} = 128$ . A larger number of local steps indeed enables higher test accuracy. For both architectures, we replace ReLU with Swish. See Appendix M.6 for training details. + +As the trace of Hessian can be seen as a measure for the sharpness of the local loss landscape, (18) indicates that SGD with label noise regularization has an implicit bias toward flatter minima, which presumably promotes generalization (Hochreiter & Schmidhuber, 1997; Keskar et al., 2017; Neyshabur et al., 2017). More concretely, Blanc et al. (2020) and Li et al. (2021b) connect minimizing the trace of Hessian to finding sparse or low-rank solutions for training two-layer linear nets. Damian et al. (2021) empirically showed that good generalization correlates with a smaller trace of Hessian in training ResNets with label noise. Besides, Ma & Ying (2021) connect the trace of Hessian to the smoothness of the function represented by a deep neural net. + +From Theorems G.1 and G.2, we can conclude that Local SGD accelerates the process of sharpness reduction, thereby leading to better generalization. Furthermore, the regularization effect gets stronger for larger $H$ and is approximately $K$ times that of SGD. We also conduct experiments on non-augmented CIFAR-10 with label noise regularization to verify our conclusion. As shown in Figure 7, increasing the number of local steps indeed gives better generalization performance. + +# G.2 THE EQUIVALENCE OF ENLARGING THE LEARNING RATE AND ADDING LOCAL STEPS + +In this subsection, we explain in detail why training with label noise regularization is a special case where enlarging the learning rate of SGD can bring the same generalization benefit as adding local steps. TWhen we scale up the learning rate of SGD $\eta \mapsto \kappa \eta$ (while keeping other hyperparameters unchanged), the corresponding Slow SDE is (18) with time horizon $\kappa^2 T$ instead of $T$ , where SGD tracks a continuous interval of $\kappa^2 \eta^2$ per step instead of $\eta^2$ . After rescaling the time horizon to $T$ so that SGD tracks a continuous interval of $\eta^2$ per step, we obtain + +$$ +\mathrm {d} \zeta (t) = - \frac {\kappa^ {2}}{4 B} \nabla_ {\Gamma} \operatorname {t r} \left(\nabla^ {2} \mathcal {L} (\zeta)\right) \mathrm {d} t. \tag {19} +$$ + +Let $\kappa = \sqrt{K}$ in (19) and we obtain the same Slow SDE as (16), which is for Local SGD with a large number of local steps. In Figure 8, we conduct experiments to verify that SGD indeed achieves comparable test accuracy to that of Local SGD with a large $H$ if its learning rate is scaled up by $\sqrt{K}$ that of Local SGD. + +# H DERIVING THE SLOW SDE AFTER APPLYING THE LSR + +In this section, we derive the Slow SDEs for SGD and Local SGD after applying the LSR in Appendix E.2. The results are formally summarized in the following theorems. + +Theorem H.1 (Slow SDE for SGD after applying the LSR). Let Assumptions 3.1 to 3.3 hold. Assume that we run SGD with learning rate $\eta' = \kappa \eta$ and the number of workers $K' = \kappa K$ for some constant $\kappa > 0$ . Let $T > 0$ be a constant and $\zeta(t)$ be the solution to (7) with the initial condition $\zeta(0) = \Phi(\theta_0) \in \Gamma$ . Then for any $\mathcal{C}^3$ -smooth function $g(\pmb{\theta})$ , $\max_{0 \leq s \leq \frac{\kappa T}{\eta'^2}} \left| \mathbb{E}[g(\Phi(\pmb{\theta}_s)] - \mathbb{E}[g(\pmb{\zeta}(s\eta'^2/\kappa)] \right| = \tilde{\mathcal{O}}(\eta'^{0.25})$ , where $\tilde{\mathcal{O}}(\cdot)$ hides log factors and constants that are independent of $\eta'$ but can depend on $g(\pmb{\theta})$ . + +![](images/930392f60ac43dce43184eddb2665e2ee62f7f7142ea9db31582d4f43c89203a.jpg) +Figure 8: Local SGD with label noise regularization on CIFAR-10 without data augmentation using $K = 4$ , $B_{\mathrm{loc}} = 128$ . SGD ( $H = 1$ ) indeed achieves comparable test accuracy as Local SGD with a large $H$ when we scale up its learning rate to $\sqrt{K}$ times that of Local SGD. See Appendix M.6 for training details. + +Proof. Replacing $B$ with $\kappa B$ in the original Slow SDE for Local SGD (7) gives the following Slow SDE: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\boldsymbol {\zeta}} \left(\underbrace {\frac {1}{\sqrt {\kappa B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t}} _ {\text {(a) d i f f u s i o n}} \underbrace {- \frac {1}{2 \kappa B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t}\right). \tag {20} +$$ + +Note that the continuous time horizon for (20) is $\kappa T$ instead of $T$ since after applying the LSR, SGD tracks a continuous interval of $\kappa^2\eta^2$ per step instead of $\eta^2$ while the total number of steps is scaled down by $\kappa$ . We can then rescale the time scaling to obtain (7) that holds for $T$ . + +Theorem H.2 (Slow SDE for Local SGD after applying the LSR). Let Assumptions 3.1 to 3.3 hold. Assume that we run Local SGD with learning rate $\eta' = \kappa \eta$ , the number of workers $K' = \kappa K$ , and the number of local steps $H' = \frac{\alpha}{\kappa \eta}$ for some constants $\alpha, \kappa > 0$ . Let $T > 0$ be a constant and $\zeta(t)$ be the solution to (21) with the initial condition $\zeta(0) = \Phi(\bar{\theta}^{(0)}) \in \Gamma$ . Then for any $\mathcal{C}^3$ -smooth function $g(\pmb{\theta})$ , $\max_{0 \leq s \leq \frac{\kappa T}{H' \eta'^2}} |\mathbb{E}[g(\Phi(\bar{\theta}^{(s)})] - \mathbb{E}[g(\zeta(sH' \eta'^2 / \kappa)]| = \tilde{\mathcal{O}}(\eta'^{0.25})$ , where $\tilde{\mathcal{O}}(\cdot)$ hides log factors and constants that are independent of $\eta'$ but can depend on $g(\pmb{\theta})$ . + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\zeta} \left(\underbrace {\frac {1}{\sqrt {B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t}} _ {(a) \text {d i f f u s i o n (u n c h a n g e d)}} \underbrace {- \frac {1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {(b) \text {d r i f t - I (u n c h a n g e d)}} \underbrace {- \frac {\kappa K - 1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Psi}} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {(c) \text {d r i f t - I I (r e s c a l e d)}}\right). \tag {21} +$$ + +Proof. Replacing $B$ with $\kappa B$ in the original Slow SDE for Local SGD (4) gives the following Slow SDE: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\zeta} \left(\underbrace {\frac {1}{\sqrt {\kappa B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t}} _ {\text {(a) d i f f u s i o n}} \underbrace {- \frac {1}{2 \kappa B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {\text {(b) d r i f t - I}} \underbrace {- \frac {\kappa K - 1}{2 \kappa B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Psi}} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {\text {(c) d r i f t - I I}}\right). \tag {22} +$$ + +Note that the continuous time horizon for (22) is $\kappa T$ instead of $T$ since after applying the LSR, Local SGD tracks a continuous interval of $\kappa^2\eta^2$ per step instead of $\eta^2$ while the total number of steps is scaled down by $\kappa$ . We can then rescale the time scaling to obtain (21) that holds for $T$ . + +# I PROOF OF THEOREM 3.1 + +This section presents the proof for Theorem 3.1. First, we introduce some notations that will be used throughout this section. For the sequence of Local SGD iterates $\{\pmb{\theta}_{k,t}^{(s)}:k\in [K],0\leq t\leq H,s\geq 0\}$ , we introduce an auxiliary sequence $\{\hat{u}_t\}_{t\in \mathbb{N}}$ , which consists of GD iterates from $\bar{\pmb{\theta}}^{(0)}$ : + +$$ +\hat {\boldsymbol {u}} _ {0} = \bar {\boldsymbol {\theta}} ^ {(0)}, \qquad \hat {\boldsymbol {u}} _ {t + 1} \leftarrow \hat {\boldsymbol {u}} _ {t} - \eta \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {t}). +$$ + +For convenience, let $\hat{\pmb{u}}_t^{(s)}\coloneqq \hat{\pmb{u}}_{sH + t}$ and $\pmb {z}_{k,sH + t}\coloneqq \pmb{z}_{k,t}^{(s)}$ . We will use $\hat{\pmb{u}}_t^{(s)}$ and $\hat{\pmb{u}}_{sH + t},\pmb{z}_{k,t}^{(s)}$ and $\pmb {z}_{k,sH + t}$ interchangeably. Recall that we have assumed that $\mathcal{L}$ is $\mathcal{C}^3$ -smooth with bounded second and third order derivatives. Let $\nu_{2}\coloneqq \sup_{\pmb {\theta}\in \mathbb{R}^{d}}\| \nabla^{2}\mathcal{L}(\pmb {\theta})\|_{2}$ and $\nu_{3}\coloneqq \sup_{\pmb {\theta}\in \mathbb{R}^{d}}\| \nabla^{3}\mathcal{L}(\pmb {\theta})\|_{2}$ . Since $\nabla \ell (\pmb {\theta};\pmb {\zeta})$ is bounded, the gradient noise $z_{k,t}^{(s)}$ is also bounded. We denote by $\sigma_{\mathrm{max}}$ an upper bound such that $\| z_{k,t}^{(s)}\| _2\leq \sigma_{\mathrm{max}}$ holds for all $s,k,t$ + +To prove Theorem 3.1, we will show that both Local SGD iterates $\bar{\theta}^{(s)}$ and SGD iterates $\boldsymbol{w}_{sH}$ track GD iterates $\hat{\boldsymbol{u}}_{sH}$ closely with high probability. For each client $k$ , define the following sequence $\{\hat{Z}_{k,t}:t\geq 0\}$ , which will be used in the proof for bounding the overall effect of noise. + +$$ +\hat {\boldsymbol {Z}} _ {k, t} = \sum_ {\tau = 0} ^ {t - 1} \left[ \prod_ {l = \tau + 1} ^ {t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l})\right) \right] \boldsymbol {z} _ {k, \tau}, \quad \hat {\boldsymbol {Z}} _ {k, 0} = \boldsymbol {0}, \quad \forall k \in [ K ]. +$$ + +The following lemma shows that $\hat{Z}_{k,t}$ is concentrated around the origin. + +Lemma I.1 (Concentration property of $\{\hat{Z}_{k,t}\}$ ). With probability at least $1 - \delta$ , the following holds simultaneously for all $k \in [K]$ , $0 \leq t < \left\lfloor \frac{T}{\eta} \right\rfloor$ : + +$$ +\| \hat {\boldsymbol {Z}} _ {k, t} \| _ {2} \leq \hat {C} _ {1} \sigma_ {\max } \sqrt {\frac {2 T}{\eta} \log \frac {2 T K}{\delta \eta}}, +$$ + +where $\hat{C}_1\coloneqq \exp (T\nu_2)$ + +Proof. For each $\hat{\mathbf{Z}}_{k,t}$ , construct a sequence $\{\hat{\mathbf{Z}}_{k,t,t'}\}_{t'=0}^t$ : + +$$ +\hat {\boldsymbol {Z}} _ {k, t, t ^ {\prime}} := \sum_ {\tau = 0} ^ {t ^ {\prime} - 1} \left(\prod_ {l = \tau + 1} ^ {t - 1} (\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l}))\right) \boldsymbol {z} _ {k, \tau} ^ {(s)}, \qquad \tilde {\boldsymbol {Z}} _ {k, t, 0} ^ {(s)} = \boldsymbol {0}. +$$ + +Since $\| \nabla^2\mathcal{L}(\hat{\boldsymbol{u}}_l)\| _2\leq \nu_2$ for all $l\geq 0$ , the following holds for all $0\leq \tau < t - 1$ and $0 < t < \lfloor \frac{T}{\eta}\rfloor$ : + +$$ +\left\| \prod_ {l = \tau + 1} ^ {t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l})\right) \right\| _ {2} \leq (1 + \rho_ {2} \eta) ^ {t} \leq \exp (T \nu_ {2}) = \hat {C} _ {1}. +$$ + +So $\{\hat{Z}_{k,t,t'}\}_{t' = 0}^t$ is a martingale with $\| \hat{Z}_{k,t,t'} - \hat{Z}_{k,t,t' - 1}\| _2\leq \hat{C}_1\sigma_{\max}$ . Since $\hat{Z}_{k,t} = \hat{Z}_{k,t,t}$ , by Azuma-Hoeffding's inequality, + +$$ +\mathbb {P} (\| \hat {\boldsymbol {Z}} _ {k, t} \| _ {2} \geq \epsilon^ {\prime}) \leq 2 \exp \left(\frac {- \epsilon^ {\prime 2}}{2 t \left(\hat {C} _ {1} \sigma_ {\max }\right) ^ {2}}\right). +$$ + +Taking union bound on all $k \in [K]$ and $0 \leq t \leq \left\lfloor \frac{T}{\eta} \right\rfloor$ , we can conclude that with probability at least $1 - \delta$ , + +$$ +\| \hat {\boldsymbol {Z}} _ {k, t} \| _ {2} \leq \hat {C} _ {1} \sigma_ {\max } \sqrt {\frac {2 T}{\eta} \log \frac {2 T K}{\delta \eta}}, \quad \forall 0 \leq t < \left\lfloor \frac {T}{\eta} \right\rfloor , k \in [ K ]. +$$ + +![](images/449f7d06a1a0e66247d3a67f4b5f904d041aed7497481a0dab7af257988683a6.jpg) + +The following lemma states that, with high probability, Local SGD iterates $\theta_{k,t}^{(s)}$ and $\bar{\theta}^{(s)}$ closely track the gradient descent iterates $\hat{\pmb{u}}_{sH}$ for $\lfloor \frac{T}{H\eta}\rfloor$ rounds. + +Lemma I.2. For $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , the following inequalities hold with probability at least $1 - \delta$ : + +$$ +\| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \hat {\boldsymbol {u}} _ {s H + t} \| _ {2} \leq \hat {C} _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall k \in [ K ], 0 \leq s < \left\lfloor \frac {T}{H \eta} \right\rfloor , 0 \leq t \leq H, +$$ + +and + +$$ +\| \bar {\boldsymbol {\theta}} ^ {(s)} - \hat {\boldsymbol {u}} _ {s H} \| _ {2} \leq \hat {C} _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall 0 \leq s \leq \left\lfloor \frac {T}{H \eta} \right\rfloor , +$$ + +where $\hat{C}_3$ is a constant independent of $\eta$ and $H$ . + +Proof. Let $\hat{\Delta}_{k,t}^{(s)}\coloneqq \pmb{\theta}_{k,t}^{(s)} - \hat{\pmb{u}}_t^{(s)}$ and $\bar{\Delta}^{(s)}\coloneqq \bar{\pmb{\theta}}^{(s)} - \hat{\pmb{u}}_{0}^{(s)}$ be the differences between the Local SGD and GD iterates. According to the update rule for $\pmb{\theta}_{k,t}^{(s)}$ and $\hat{\pmb{u}}_t^{(s)}$ , + +$$ +\boldsymbol {\theta} _ {k, t + 1} ^ {(s)} = \boldsymbol {\theta} _ {k, t} ^ {(s)} - \eta \nabla \mathcal {L} \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}\right) - \eta \boldsymbol {z} _ {k, t} ^ {(s)} \tag {23} +$$ + +$$ +\hat {\boldsymbol {u}} _ {t + 1} ^ {(s)} = \hat {\boldsymbol {u}} _ {t} ^ {(s)} - \eta \nabla \mathcal {L} \left(\hat {\boldsymbol {u}} _ {t} ^ {(s)}\right). \tag {24} +$$ + +Subtracting (23) by (24) gives + +$$ +\begin{array}{l} \hat {\boldsymbol {\Delta}} _ {k, t + 1} ^ {(s)} = \hat {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} - \eta (\nabla \mathcal {L} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) - \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {t} ^ {(s)})) - \eta \boldsymbol {z} _ {k, t} ^ {(s)} \\ = \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\hat {\boldsymbol {u}} _ {t} ^ {(s)}\right)\right) \hat {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} - \eta \boldsymbol {z} _ {k, t} ^ {(s)} + \eta \hat {\boldsymbol {v}} _ {k, t} ^ {(s)}, \tag {25} \\ \end{array} +$$ + +where $\hat{\pmb{v}}_{k,t}^{(s)}$ is a remainder term with norm $\| \hat{\pmb{v}}_{k,t}^{(s)}\| _2\leq \frac{\nu_3}{2}\| \hat{\pmb{\Delta}}_{k,t}^{(s)}\| _2^2$ . For the $s$ -th round of Local SGD, we can apply (25) $t$ times to obtain the following: + +$$ +\begin{array}{l} \hat {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} = \left[ \prod_ {\tau = 0} ^ {t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\hat {\boldsymbol {u}} _ {\tau} ^ {(s)}\right)\right) \right] \hat {\boldsymbol {\Delta}} _ {k, 0} ^ {(s)} - \eta \underbrace {\sum_ {\tau = 0} ^ {t - 1} \left[ \prod_ {l = \tau + 1} ^ {t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\hat {\boldsymbol {u}} _ {l} ^ {(s)}\right)\right) \right] \boldsymbol {z} _ {k , \tau} ^ {(s)}} _ {\tau} \tag {26} \\ + \eta \sum_ {\tau = 0} ^ {t - 1} \prod_ {l = \tau + 1} ^ {t - 1} (\pmb {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\pmb {u}} _ {l} ^ {(s)})) \hat {\pmb {v}} _ {k, \tau} ^ {(s)}. \\ \end{array} +$$ + +Here, $\mathcal{T}$ can be expressed in the following form: + +$$ +\mathcal {T} = \hat {\boldsymbol {Z}} _ {k, s H + t} - \left[ \prod_ {l = s H} ^ {s H + t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l})\right) \right] \hat {\boldsymbol {Z}} _ {k, s H}. +$$ + +Substituting in $t = H$ and taking the average, we derive the following recursion: + +$$ +\begin{array}{l} \bar {\boldsymbol {\Delta}} ^ {(s + 1)} = \frac {1}{K} \sum_ {k \in [ K ]} \hat {\boldsymbol {\Delta}} _ {k, H} ^ {(s)} \\ = \left[ \prod_ {\tau = 0} ^ {H - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {\tau} ^ {(s)})\right) \right] \bar {\boldsymbol {\Delta}} ^ {(s)} \\ - \frac {\eta}{K} \sum_ {k \in [ K ]} \hat {\boldsymbol {Z}} _ {k, (s + 1) H} + \frac {\eta}{K} \sum_ {k \in [ K ]} \left[ \prod_ {l = s H} ^ {(s + 1) H - 1} (\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l})) \right] \hat {\boldsymbol {Z}} _ {k, s H} \\ + \frac {\eta}{K} \sum_ {k \in [ K ]} \sum_ {\tau = 0} ^ {H - 1} \prod_ {l = \tau + 1} ^ {H - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\hat {\boldsymbol {u}} _ {l} ^ {(s)}\right)\right) \hat {\boldsymbol {v}} _ {k, \tau} ^ {(s)}. \tag {27} \\ \end{array} +$$ + +Applying (27) $s$ times yields + +$$ +\bar {\boldsymbol {\Delta}} ^ {(s)} = - \frac {\eta}{K} \sum_ {k \in [ K ]} \hat {\boldsymbol {Z}} _ {k, s H} + \frac {\eta}{K} \sum_ {r = 0} ^ {s - 1} \sum_ {\tau = 0} ^ {H - 1} \sum_ {k \in [ K ]} \left[ \prod_ {l = r H + \tau + 1} ^ {s H} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l})\right) \right] \hat {\boldsymbol {v}} _ {k, \tau} ^ {(r)}. \tag {28} +$$ + +Substitute (28) into (26) and we have + +$$ +\begin{array}{l} \hat {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} = - \frac {\eta}{K} \sum_ {k ^ {\prime} \in [ K ]} \hat {\boldsymbol {Z}} _ {k ^ {\prime}, s H} - \eta \hat {\boldsymbol {Z}} _ {k, s H + t} + \eta \left[ \prod_ {l = s H} ^ {s H + t - 1} (\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l})) \right] \hat {\boldsymbol {Z}} _ {k, s H} \\ + \frac {\eta}{K} \sum_ {r = 0} ^ {s - 1} \sum_ {\tau = 0} ^ {H - 1} \sum_ {k ^ {\prime} \in [ K ]} \left[ \prod_ {l = r H + \tau + 1} ^ {s H + t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l})\right) \right] \hat {\boldsymbol {v}} _ {k ^ {\prime}, \tau} ^ {(r)} \\ + \eta \sum_ {\tau = 0} ^ {t - 1} \left[ \prod_ {l = s H + \tau + 1} ^ {s H + t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\hat {\boldsymbol {u}} _ {l})\right) \right] \hat {\boldsymbol {v}} _ {k, \tau} ^ {(s)}. \\ \end{array} +$$ + +By Cauchy-Schwartz inequality and triangle inequality, we have + +$$ +\begin{array}{l} \left\| \hat {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} \right\| _ {2} \leq \frac {\eta}{K} \left(\sum_ {k ^ {\prime} \in [ K ]} \left\| \hat {\boldsymbol {Z}} _ {k ^ {\prime}, s H} \right\| _ {2}\right) + \eta \left\| \hat {\boldsymbol {Z}} _ {k, s H + t} \right\| _ {2} + \eta \hat {C} _ {1} \left\| \hat {\boldsymbol {Z}} _ {k, s H} \right\| _ {2} \tag {29} \\ + \frac {\eta \hat {C} _ {1} \nu_ {3}}{2 K} \sum_ {r = 0} ^ {s - 1} \sum_ {\tau = 0} ^ {H - 1} \sum_ {k ^ {\prime} \in [ K ]} \| \hat {\mathbf {A}} _ {k ^ {\prime}, \tau} ^ {(r)} \| _ {2} ^ {2} + \frac {\eta \hat {C} _ {1} \nu_ {3}}{2} \sum_ {\tau = 0} ^ {t - 1} \| \hat {\mathbf {A}} _ {k, \tau} ^ {(r)} \| _ {2} ^ {2}, \\ \end{array} +$$ + +where $\hat{C}_1 = \exp (\nu_2T)$ + +Below we prove by induction that for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , if + +$$ +\left\| \hat {\boldsymbol {Z}} _ {k, t} \right\| _ {2} \leq \hat {C} _ {1} \sigma_ {\max } \sqrt {\frac {2 T}{\eta} \log \frac {2 T K}{\eta \delta}}, \quad \forall 0 \leq t < \left\lfloor \frac {T}{\eta} \right\rfloor , k \in [ K ], \tag {30} +$$ + +then there exists a constant $\hat{C}_2$ such that for all $k\in [K],0\leq s < \left\lfloor \frac{T}{\eta H}\right\rfloor$ and $0\leq t\leq H$ + +$$ +\left\| \hat {\Delta} _ {k, t} ^ {(s)} \right\| _ {2} \leq \hat {C} _ {2} \sqrt {\eta \log \frac {2 T K}{\eta \delta}}. \tag {31} +$$ + +First, for all $k \in [K]$ , $\| \hat{\Delta}_{k,0}^{(0)}\|_2 = 0$ and hence (31) holds. Assuming that (31) holds for all $\hat{\Delta}_{k',\tau}^{(r)}$ where $k' \in [K]$ , $0 \leq r < s$ , $0 \leq \tau \leq H$ and $r = s$ , $0 \leq \tau < t$ , then by (29), for all $k \in [K]$ , the following holds: + +$$ +\| \hat {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} \| _ {2} \leq 3 \hat {C} _ {1} ^ {2} \sigma_ {\max} \sqrt {2 T \eta \log \frac {2 T K}{\eta \delta}} + \hat {C} _ {1} \hat {C} _ {2} ^ {2} T \eta \nu_ {3} \log \frac {2 T K}{\eta \delta}. +$$ + +Let $\hat{C}_2 \geq 6\hat{C}_1^2\sigma_{\max}\sqrt{2T}$ . Then for sufficiently small $\eta$ , (31) holds. By Lemma I.1, (30) holds with probability at least $1 - \delta$ . Furthermore, notice that $\bar{\pmb{\theta}}^{(s)} - \hat{\pmb{u}}_{sH} = \frac{1}{K}\sum_{k\in [K]}\hat{\pmb{\Delta}}_{k,H}^{(s - 1)}$ . Hence we have the lemma. + +The iterates of standard SGD can be viewed as the local iterates on a single client with the number of local steps $\left\lfloor \frac{T}{\eta} \right\rfloor$ . Therefore, we can directly apply Lemma I.2 and obtain the following lemma about the SGD iterates $\boldsymbol{w}_t$ . + +Corollary 1.1. For $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , the following holds with probability at least $1 - \delta$ : + +$$ +\| \boldsymbol {w} _ {s H} - \hat {\boldsymbol {u}} _ {s H} \| _ {2} \leq \hat {C} _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall 0 \leq s \leq \frac {T}{H \eta}, +$$ + +where $\hat{C}_3$ is the same constant as in Lemma I.2. + +Applying Lemma I.2 and Corollary I.1 and taking the union bound, we have Theorem 3.1. + +# J PROOF OUTLINE OF MAIN THEOREMS + +We adopt the general framework proposed by Li et al. (2019a) to bound the closeness of discrete algorithms and SDE solutions via the method of moments. However, their framework is not directly applicable to our case since they provide approximation guarantees for discrete algorithms with learning rate $\eta$ for $\mathcal{O}(\eta^{-1})$ steps while we want to capture Local SGD for $\mathcal{O}(\eta^{-2})$ steps. To overcome this difficulty, we treat $R_{\mathrm{grp}} := \left\lfloor \frac{1}{\alpha\eta^{\beta}} \right\rfloor$ rounds as a "giant step" of Local SGD with an "effective" learning rate $\eta^{1 - \beta}$ , where $\beta$ is a constant in $(0,1)$ , and derive the recursive formulas to compute the moments for the change in every step, every round, and every $R_{\mathrm{grp}}$ rounds. The formulation of the recursions requires a detailed analysis of the limiting dynamics of the iterate and careful control of approximation errors. + +The dynamics of the iterate can be divided into two phases: the approaching phase (Phase 1) and the drift phase (Phase 2). The approaching phase only lasts for $\mathcal{O}(\log \frac{1}{\eta})$ rounds, during which the iterate is quickly driven to the minimizer manifold by the negative gradient and ends up within only $\tilde{\mathcal{O}} (\sqrt{\eta})$ from $\Gamma$ (see Appendix K.5). After that, the iterate enters the drifting phase and moves in the tangent space of $\Gamma$ while staying close to $\Gamma$ (see Appendix K.6). The closeness of the iterates (local and global) and $\Gamma$ is summarized in the following theorem. + +Theorem J.1 (Closeness of the iterates and $\Gamma$ ). For $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , for all $\mathcal{O}(\log \frac{1}{\eta}) \leq s \leq \lfloor T / (H\eta^2) \rfloor$ , + +$$ +\Phi (\bar {\boldsymbol {\theta}} ^ {(s)}) \in \Gamma , \quad \| \bar {\boldsymbol {\theta}} ^ {(s)} - \Phi (\bar {\boldsymbol {\theta}} ^ {(s)}) \| _ {2} = \mathcal {O} \left(\sqrt {\eta \log \frac {1}{\eta \delta}}\right). +$$ + +Also, for all $\mathcal{O}(\log \frac{1}{\eta}) \leq s < \lfloor T / (H\eta^2) \rfloor$ , $k \in [K]$ and $0 \leq t \leq H$ , + +$$ +\| \pmb {\theta} _ {k, t} ^ {(s)} - \Phi (\bar {\pmb {\theta}} ^ {(s)}) \| _ {2} = \mathcal {O} \left(\sqrt {\eta \log \frac {1}{\eta \delta}}\right). +$$ + +Here, $\mathcal{O}(\cdot)$ hides constants independent of $\eta$ and $\delta$ . + +To control the approximation errors, we also provide a high probability bound for the change of the manifold projection within $R_{\mathrm{grp}}$ rounds. + +Theorem J.2 (High probability bound for the change of manifold projection). For $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ with probability at least $1 - \delta$ , for all $0 \leq s \leq \lfloor T / (H\eta^2) \rfloor - R_{\mathrm{grp}}$ and $0 \leq r \leq R_{\mathrm{grp}}$ , + +$$ +\Phi (\bar {\boldsymbol {\theta}} ^ {(s)}), \Phi (\bar {\boldsymbol {\theta}} ^ {(s + r)}) \in \Gamma , \quad \| \Phi (\bar {\boldsymbol {\theta}} ^ {(s + r)}) - \Phi (\bar {\boldsymbol {\theta}} ^ {(s)}) \| _ {2} = \mathcal {O} \left(\eta^ {0. 5 - 0. 5 \beta} \sqrt {\log \frac {1}{\eta \delta}}\right), +$$ + +where $\mathcal{O}(\cdot)$ hides constants independent of $\eta$ and $\delta$ . + +The proof of Theorems J.1 and J.2 is based on the analysis of the dynamics of the iterate and presented in Appendix K.7. + +Utilizing Theorems J.1 and J.2, we move on to estimate the first and second moments of the change of the manifold projection every $R_{\mathrm{grp}}$ rounds. However, the randomness during training might drive the iterate far from the manifold (with a low probability, though), making the dynamics intractable. To tackle this issue, we construct a well-behaved auxiliary sequence $\{\hat{\pmb{\theta}}_{k,t}^{(s)}\}$ , which is constrained to the neighborhood of $\Gamma$ and equals the original sequence $\{\pmb{\theta}_{k,t}^{(s)}\}$ with high probability (see Definition K.5). Then we can formulate recursions for the change of manifold projection of the auxiliary sequence using the nice properties near $\Gamma$ . The estimate of moments is summarized in Theorem K.2. + +Finally, based on the moment estimates, we apply the framework in Li et al. (2019a) to show that the manifold projection and the SDE solution are weak approximations of each other in Appendix K.10. + +# K PROOF DETAILS OF MAIN THEOREMS + +The detailed proof is organized as follows. In Appendix K.1, we introduce the notations that will be used throughout the proof. To establish preliminary knowledge, Appendix K.2 provides explicit expression for the projection operator $\Phi (\cdot)$ , and Appendix K.3 presents lemmas about gradient descent + +(GD) and gradient flow (GF). Based on the preliminary knowledge, we construct a nested working zone to characterize the closeness of the iterate and $\Gamma$ in Appendix K.4. Appendices K.5 to K.10 make up the main body of the proof. Specifically, Appendices K.5 and K.6 analyze the dynamics of Local SGD iterates for phases 1 and 2, respectively. Utilizing these analyses, we provide the proof of Theorems J.1 and J.2 in Appendix K.7 and the proof of Theorem 3.3 in Appendix K.8. Then we derive the estimation for the first and second moments of one "giant step" $\Phi (\bar{\theta}^{(s + R_{\mathrm{grp}})}) - \Phi (\bar{\theta}^{(s)})$ in Appendix K.9. Finally, we prove the approximation theorem 3.2 in Appendix K.10. + +# K.1 ADDITIONAL NOTATIONS + +Let $R_{\mathrm{tot}} \coloneqq \left\lfloor \frac{T}{H\eta^2} \right\rfloor$ be the total number of rounds. Denote by $\phi^{(s)}$ the manifold projection of the global iterate at the beginning of round $s$ . Let $\pmb{x}_{k,t}^{(s)} \coloneqq \pmb{\theta}_{k,t}^{(s)} - \phi^{(s)}$ be the difference between the local iterate and the manifold projection of the global iterate. Also define $\bar{\pmb{x}}_H^{(s)} \coloneqq \frac{1}{K}\sum_{k\in [K]}\pmb{x}_{k,H}^{(s)}$ and $\bar{\pmb{x}}_0^{(s)} \coloneqq \frac{1}{K}\sum_{k\in [K]}\pmb{x}_{k,0}^{(s)}$ which is the average of $\pmb{x}_{k,t}^{(s)}$ among $K$ workers at step 0 and $H$ . Then for all $k\in [K]$ , $\pmb{x}_{k,0}^{(s)} = \bar{\pmb{x}}_0^{(s)} = \bar{\pmb{\theta}}^{(s)} - \phi^{(s)}$ . Finally, Since $\nabla \ell(\pmb{\theta};\pmb{\zeta})$ is bounded, the gradient noise $z_{k,t}^{(s)}$ is also bounded and we denote by $\sigma_{\max}$ the upper bound such that $\| z_{k,t}^{(s)}\|_2 \leq \sigma_{\max}, \forall s,k,t$ . + +We first introduce the notion of $\mu$ -PL. We will later show that there exists a neighborhood of the minimizer manifold $\Gamma$ where $\mathcal{L}$ satisfies $\mu$ -PL. + +Definition K.1 (Polyak-Lojasiewicz Condition). For $\mu > 0$ , we say a function $\mathcal{L}(\cdot)$ satisfies $\mu$ -Polyak-Lojasiewicz condition (abbreviated as $\mu$ -PL) on set $U$ if + +$$ +\frac {1}{2} \| \nabla \mathcal {L} (\boldsymbol {\theta}) \| _ {2} ^ {2} \geq \mu (\mathcal {L} (\boldsymbol {\theta}) - \inf _ {\boldsymbol {\theta} ^ {\prime} \in U} \mathcal {L} (\boldsymbol {\theta} ^ {\prime})). +$$ + +We then introduce the definitions of the $\epsilon$ -ball at a point and the $\epsilon$ -neighborhood of a set. For $\pmb{\theta} \in \mathbb{R}^d$ and $\epsilon > 0$ , $B^{\epsilon}(\pmb{\theta}) \coloneqq \{\pmb{\theta}' : \| \pmb{\theta}' - \pmb{\theta}\|_2 < \epsilon\}$ is the open $\epsilon$ -ball centered at $\pmb{\theta}$ . For a set $\mathcal{Z} \subseteq \mathbb{R}^d$ , $\mathcal{Z}^{\epsilon} \coloneqq \bigcup_{\pmb{\theta} \in \mathcal{Z}} B^{\epsilon}(\pmb{\theta})$ is the $\epsilon$ -neighborhood of $\mathcal{Z}$ . + +# K.2 COMPUTING THE DERIVATIVES OF THE LIMITING MAPPING + +In subsection, we present lemmas that relate the derivatives of the limiting mapping $\Phi(\cdot)$ to the derivatives of the loss function $\mathcal{L}(\cdot)$ . We first introduce the operator $\nu_{H}$ . + +Definition K.2. For a semi-definite symmetric matrix $\mathbf{H} \in \mathbb{R}^{d \times d}$ , let $\lambda_j$ , $\mathbf{v}_j$ be the $j$ -th eigenvalue and eigenvector and $\mathbf{v}_j$ 's form an orthonormal basis of $\mathbb{R}^d$ . Then, define the operator $\mathcal{V}_{\mathbf{H}}: \mathbb{R}^{d \times d} \to \mathbb{R}^{d \times d}$ as + +$$ +\mathcal {V} _ {\boldsymbol {H}} (\boldsymbol {M}) := \sum_ {i, j: \lambda_ {i} \neq 0 \vee \lambda_ {j} \neq 0} \frac {1}{\lambda_ {i} + \lambda_ {j}} \left\langle \boldsymbol {M}, \boldsymbol {v} _ {i} \boldsymbol {v} _ {j} ^ {\top} \right\rangle \boldsymbol {v} _ {i} \boldsymbol {v} _ {j} ^ {\top}, \forall \boldsymbol {M} \in \mathbb {R} ^ {d \times d}. +$$ + +Intuitively, this operator projects $M$ to the base matrix $\mathbf{v}_i\mathbf{v}_j^\top$ and sums up the projections with weights $\frac{1}{\lambda_i + \lambda_j}$ . + +Additionally, for $\theta \in \Gamma$ , denote by $T_{\theta}$ and $T_{\theta}^{\perp}$ the tangent and normal space of $\Gamma$ at $\theta$ respectively. Lemmas K.1 to K.4 are from Li et al. (2021b). We include them to make the paper self-contained. + +Lemma K.1 (Lemma C.1 of Li et al. (2021b)). For any $\pmb{\theta} \in \Gamma$ and any $\pmb{v} \in T_{\pmb{\theta}}(\Gamma)$ , it holds that $\nabla^2 \mathcal{L}(\pmb{\theta}) \pmb{v} = \mathbf{0}$ . + +Lemma K.2 (Lemma 4.3 of Li et al. (2021b)). For any $\pmb{\theta} \in \Gamma$ , $\partial \Phi(\pmb{\theta}) \in \mathbb{R}^{d \times d}$ is the projection matrix onto the tangent space $T_{\pmb{\theta}}(\Gamma)$ . + +Lemma K.3 (Lemma C.4 of Li et al. (2021b)). For any $\pmb{\theta} \in \Gamma$ , $\pmb{u} \in \mathbb{R}^d$ and $\pmb{v} \in T_{\pmb{\theta}}(\Gamma)$ , it holds that + +$$ +\partial^ {2} \Phi (\boldsymbol {\theta}) [ \boldsymbol {v}, \boldsymbol {u} ] = - \partial \Phi (\boldsymbol {\theta}) \nabla^ {3} \mathcal {L} (\boldsymbol {\theta}) [ \boldsymbol {v}, \nabla^ {2} \mathcal {L} (\boldsymbol {\theta}) ^ {+} \boldsymbol {u} ] - \nabla^ {2} \mathcal {L} (\boldsymbol {\theta}) ^ {+} \nabla^ {3} \mathcal {L} (\boldsymbol {\theta}) [ \boldsymbol {v}, \partial \Phi (\boldsymbol {\theta}) \boldsymbol {u} ]. +$$ + +Lemma K.4 (Lemma C.6 of Li et al. (2021b)). For any $\pmb{\theta} \in \Gamma$ and $\pmb{\Sigma} \in \operatorname{span}\{\pmb{u}\pmb{u}^{\top} \mid \pmb{u} \in T_{\pmb{\theta}}^{\perp}(\Gamma)\}$ , + +$$ +\left\langle \partial^ {2} \Phi (\boldsymbol {\theta}), \boldsymbol {\Sigma} \right\rangle = - \partial \Phi (\boldsymbol {\theta}) \nabla^ {3} \mathcal {L} (\boldsymbol {\theta}) \left[ \mathcal {V} _ {\nabla^ {2} \mathcal {L} (\boldsymbol {\theta})} (\boldsymbol {\Sigma}) \right]. +$$ + +Lemma K.5. For all $\theta \in \Gamma$ , $\pmb{u}, \pmb{v} \in T_{\theta}(\Gamma)$ , it holds that + +$$ +\partial \Phi (\boldsymbol {\theta}) \nabla^ {3} \mathcal {L} [ \boldsymbol {v} \boldsymbol {u} ^ {\top} ] = \mathbf {0}. \tag {32} +$$ + +Proof. This proof is inspired by Lemma C.4 of Li et al. (2021b). For any $\pmb{\theta} \in \Gamma$ , consider a parameterized smooth curve $\pmb{v}(t), t \geq 0$ on $\Gamma$ such that $\pmb{v}(0) = \pmb{\theta}$ and $\pmb{v}'(0) = \pmb{v}$ . Let $P_{\parallel}(t) = \partial \Phi(\pmb{v}(t))$ , $P_{\perp}(t) = I - \partial \Phi(\pmb{v}(t))$ and $\pmb{H}(t) = \nabla^2 \mathcal{L}(\pmb{v}(t))$ . By Lemma C.1 and 4.3 in Li et al. (2021b), + +$$ +\boldsymbol {H} (t) = \boldsymbol {P} _ {\perp} (t) \boldsymbol {H} (t). +$$ + +Take the derivative with respect to $t$ on both sides, + +$$ +\begin{array}{l} \boldsymbol {H} ^ {\prime} (t) = \boldsymbol {P} _ {\perp} (t) \boldsymbol {H} ^ {\prime} (t) + \boldsymbol {P} _ {\perp} ^ {\prime} (t) \boldsymbol {H} (t) \\ \Rightarrow \boldsymbol {P} _ {\parallel} (t) \boldsymbol {H} ^ {\prime} (t) = \boldsymbol {P} _ {\perp} ^ {\prime} (t) \boldsymbol {H} (t) = - \boldsymbol {P} _ {\parallel} ^ {\prime} (t) \boldsymbol {H} (t). \\ \end{array} +$$ + +At $t = 0$ , we have + +$$ +\boldsymbol {P} _ {\parallel} (0) \boldsymbol {H} ^ {\prime} (0) = - \boldsymbol {P} _ {\parallel} ^ {\prime} (0) \boldsymbol {H} (0). \tag {33} +$$ + +WLOG let $H(0) = \mathrm{diag}(\lambda_1, \dots, \lambda_d), \in \mathbb{R}^{d \times d}$ , where $\lambda_i = 0$ for all $m < i \leq d$ . Therefore $P_{\perp}(0) = \begin{bmatrix} I_m & 0 \\ 0 & 0 \end{bmatrix}$ , $P_{\parallel}(0) = \begin{bmatrix} 0 & 0 \\ 0 & I_{d - m} \end{bmatrix}$ . Decompose $P_{\parallel}'(0)$ , $H(0)$ and $H'(0)$ as follows. + +$$ +\boldsymbol {P} _ {\parallel} ^ {\prime} (0) = \left[ \begin{array}{c c} \boldsymbol {P} _ {\parallel , 1 1} ^ {\prime} (0) & \boldsymbol {P} _ {\parallel , 1 2} ^ {\prime} (0) \\ \boldsymbol {P} _ {\parallel , 2 1} ^ {\prime} (0) & \boldsymbol {P} _ {\parallel , 2 2} ^ {\prime} (0) \end{array} \right], \boldsymbol {H} (0) = \left[ \begin{array}{c c} \boldsymbol {H} _ {1 1} (0) & \boldsymbol {0} \\ \boldsymbol {0} & \boldsymbol {0} \end{array} \right], \boldsymbol {H} ^ {\prime} (0) = \left[ \begin{array}{c c} \boldsymbol {H} _ {1 1} ^ {\prime} (0) & \boldsymbol {H} _ {1 2} ^ {\prime} (0) \\ \boldsymbol {H} _ {2 1} ^ {\prime} (0) & \boldsymbol {H} _ {2 2} ^ {\prime} (0) \end{array} \right]. +$$ + +Substituting the decomposition into (33), we have + +$$ +\left[ \begin{array}{c c} \mathbf {0} & \mathbf {0} \\ \mathbf {H} _ {2 1} ^ {\prime} (0) & \mathbf {H} _ {2 2} ^ {\prime} (0) \end{array} \right] = - \left[ \begin{array}{c c} \mathbf {P} _ {\parallel , 1 1} ^ {\prime} (0) \mathbf {H} _ {1 1} (0) & \mathbf {0} \\ \mathbf {P} _ {\parallel , 2 1} ^ {\prime} (0) \mathbf {H} _ {1 1} (0) & \mathbf {0} \end{array} \right]. +$$ + +Therefore, $H_{22}'(0) = 0$ and + +$$ +\boldsymbol {P} _ {\parallel} (0) \boldsymbol {H} ^ {\prime} (0) = - \boldsymbol {P} _ {\parallel} ^ {\prime} (0) \boldsymbol {H} (0) = - \left[ \begin{array}{c c} \boldsymbol {0} & \boldsymbol {0} \\ \boldsymbol {H} _ {2 1} ^ {\prime} (0) & \boldsymbol {0} \end{array} \right]. +$$ + +Any $\pmb{u} \in T_{\pmb{\theta}}(\Gamma)$ can be decomposed as $\pmb{u} = [\pmb{0}, \pmb{u}_2]^\top$ where $\pmb{u}_2 \in \mathbb{R}^{d - m}$ . With this decomposition, we have $\pmb{P}_{\parallel}(0)\pmb{H}'(0)\pmb{u} = \pmb{0}$ . Also, note that $\pmb{H}'(0) = \nabla^3\mathcal{L}(\pmb{\theta})[\pmb{v}]$ . Hence, + +$$ +\partial \Phi (\boldsymbol {\theta}) \nabla^ {3} \mathcal {L} (\boldsymbol {\theta}) [ \boldsymbol {v} \boldsymbol {u} ^ {T} ] = \boldsymbol {0}. +$$ + +![](images/42ce0cb502203a7079070b686c3695160cc407ba9340f5f62f1024cbd5760322.jpg) + +# K.3 PRELIMINARY LEMMAS FOR GD AND GF + +In this subsection, we introduce a few useful preliminary lemmas about gradient descent and gradient flow. Before presenting the lemmas, we introduce some notations and assumptions that will be used in this subsection. + +Assume that the loss function $\mathcal{L}(\pmb{\theta})$ is $\rho$ -smooth and $\mu$ -PL in an open, convex neighborhood $U$ of a local minimizer $\pmb{\theta}^*$ . Denote by $\mathcal{L}^* := \mathcal{L}(\pmb{\theta}^*)$ the minimum value for simplicity. Let $\epsilon'$ be the radius of the open $\epsilon'$ -ball centered at $\pmb{\theta}^*$ such that $B^{\epsilon'}(\pmb{\theta}^*) \subseteq U$ . We also define a potential function $\tilde{\Psi}(\pmb{\theta}) := \sqrt{\mathcal{L}(\pmb{\theta}) - \mathcal{L}^*}$ . + +Consider gradient descent iterates $\{\hat{u}_t\}_{t\in \mathbb{N}}$ following the update rule $\hat{\pmb{u}}_{t + 1} = \hat{\pmb{u}}_t - \eta \nabla \mathcal{L}(\hat{\pmb{u}}_t)$ . We first introduce the descent lemma for gradient descent. + +Lemma K.6 (Descent lemma for GD). If $\hat{\boldsymbol{u}}_t\in U$ and $\eta \leq \frac{1}{\rho}$ , then + +$$ +\frac {\eta}{2} \| \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) \| _ {2} ^ {2} \leq \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) - \mathcal {L} (\hat {\boldsymbol {u}} _ {t + 1}), +$$ + +and + +$$ +\mathcal {L} \left(\hat {\boldsymbol {u}} _ {t + 1}\right) - \mathcal {L} ^ {*} \leq (1 - \mu \eta) \left(\mathcal {L} \left(\hat {\boldsymbol {u}} _ {t}\right) - \mathcal {L} ^ {*}\right). +$$ + +Proof. By $\rho$ -smoothness, + +$$ +\begin{array}{l} \mathcal {L} (\hat {\boldsymbol {u}} _ {t + 1}) \leq \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) + \langle \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {t}), \hat {\boldsymbol {u}} _ {t + 1} - \hat {\boldsymbol {u}} _ {t} \rangle + \frac {\rho \eta^ {2}}{2} \| \hat {\boldsymbol {u}} _ {t + 1} - \hat {\boldsymbol {u}} _ {t} \| _ {2} ^ {2} \\ = \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) - \eta (1 - \frac {\rho \eta}{2}) \| \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) \| _ {2} ^ {2} \\ \leq \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) - \frac {\eta}{2} \| \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) \| _ {2} ^ {2} \\ \end{array} +$$ + +By the definition of $\mu$ -PL, we have + +$$ +\mathcal {L} \left(\hat {\boldsymbol {u}} _ {t + 1}\right) - \mathcal {L} ^ {*} \leq (1 - \mu \eta) \left(\mathcal {L} \left(\hat {\boldsymbol {u}} _ {t}\right) - \mathcal {L} ^ {*}\right). +$$ + +![](images/7dd68ab7f70f27aac1d64197a58e4432d5d5ec93b7e237eb54a00ccbc8150af3.jpg) + +Then we prove the Lipschitzness of $\tilde{\Psi} (\pmb {\theta})$ + +Lemma K.7 (Lipschitzness of $\tilde{\Psi}(\pmb{\theta})$ ). $\tilde{\Psi}(\pmb{\theta})$ is $\sqrt{2\rho}$ -Lipschitz for $\pmb{\theta} \in U$ . That is, for any $\pmb{\theta}_1, \pmb{\theta}_2 \in U$ , + +$$ +\left| \tilde {\Psi} \left(\boldsymbol {\theta} _ {1}\right) - \tilde {\Psi} \left(\boldsymbol {\theta} _ {2}\right) \right| \leq \sqrt {2 \rho} \| \boldsymbol {\theta} _ {1} - \boldsymbol {\theta} _ {2} \| _ {2}. +$$ + +Proof. Fix $\pmb{\theta}_{1}$ and $\pmb{\theta}_{2}$ . Denote by $\pmb{\theta}(t) \coloneqq (1 - t)\pmb{\theta}_{1} + t\pmb{\theta}_{2}$ the convex combination of $\pmb{\theta}_{1}$ and $\pmb{\theta}_{2}$ where $t \in [0,1]$ . Further define $f(t) \coloneqq \tilde{\Psi}(\pmb{\theta}(t))$ . Below we consider two cases. + +Case 1. If $\forall t\in (0,1)$ $f(t) > 0$ , then $f(t)$ is differentiable on $(0,1)$ + +$$ +\begin{array}{l} \left| \tilde {\Psi} \left(\boldsymbol {\theta} _ {2}\right) - \tilde {\Psi} \left(\boldsymbol {\theta} _ {1}\right) \right| = | f (1) - f (0) | \\ = \left| \int_ {0} ^ {1} f ^ {\prime} (t) \mathrm {d} t \right| \\ = \left| \int_ {0} ^ {1} \left\langle \nabla \tilde {\Psi} (\boldsymbol {\theta} (t)), \boldsymbol {\theta} _ {2} - \boldsymbol {\theta} _ {1} \right\rangle \mathrm {d} t \right| \\ = \left| \int_ {0} ^ {1} \frac {\langle \nabla \mathcal {L} (\boldsymbol {\theta} (t)) , \boldsymbol {\theta} _ {2} - \boldsymbol {\theta} _ {1} \rangle}{\sqrt {\mathcal {L} (\boldsymbol {\theta} (t)) - \mathcal {L} ^ {*}}} \mathrm {d} t \right| \\ \leq \| \boldsymbol {\theta} _ {2} - \boldsymbol {\theta} _ {1} \| _ {2} \int_ {0} ^ {1} \frac {\| \nabla \mathcal {L} (\boldsymbol {\theta} (t)) \| _ {2}}{\sqrt {\mathcal {L} (\boldsymbol {\theta} (t)) - \mathcal {L} ^ {*}}} \mathrm {d} t. \\ \end{array} +$$ + +By $\rho$ -smoothness of $\mathcal{L}$ , for all $\pmb{\theta} \in U$ , + +$$ +\left\| \nabla \mathcal {L} (\boldsymbol {\theta}) \right\| _ {2} ^ {2} \leq 2 \rho (\mathcal {L} (\boldsymbol {\theta}) - \mathcal {L} ^ {*}). +$$ + +Since $\sqrt{\mathcal{L}(\pmb{\theta}(t)) - \mathcal{L}^*} > 0$ for all $t \in (0,1)$ , $\frac{\|\nabla\mathcal{L}(\pmb{\theta}(t))\|_2}{\sqrt{\mathcal{L}(\pmb{\theta}(t)) - \mathcal{L}^*}} \leq \sqrt{2\rho}$ . Therefore, + +$$ +\left| \tilde {\Psi} \left(\boldsymbol {\theta} _ {2}\right) - \tilde {\Psi} \left(\boldsymbol {\theta} _ {1}\right) \right| \leq \sqrt {2 \rho_ {2}} \left\| \boldsymbol {\theta} _ {2} - \boldsymbol {\theta} _ {1} \right\| _ {2}. +$$ + +Case 2. If $\exists t' \in (0,1)$ such that $f(t') = 0$ , then + +$$ +\begin{array}{l} \left| \tilde {\Psi} \left(\boldsymbol {\theta} _ {2}\right) - \tilde {\Psi} \left(\boldsymbol {\theta} _ {1}\right) \right| = | f (1) - f (0) | \\ = \left| (1 - t ^ {\prime}) \frac {f (1) - f (t ^ {\prime})}{1 - t ^ {\prime}} + t ^ {\prime} \left(\frac {f (t ^ {\prime}) - f (0)}{t ^ {\prime}}\right) \right| \\ \leq \max \left(\frac {f (1)}{1 - t ^ {\prime}}, \frac {f (0)}{t ^ {\prime}}\right). \\ \end{array} +$$ + +Since $\pmb{\theta}(t')$ minimizes $\mathcal{L}$ in an open set, $\nabla \mathcal{L}(\pmb{\theta}(t')) = \mathbf{0}$ . By $\rho$ -smoothness of $\mathcal{L}$ , for all $\pmb{\theta} \in U$ , + +$$ +\mathcal {L} (\boldsymbol {\theta}) \leq \mathcal {L} ^ {*} + \frac {\rho}{2} \| \boldsymbol {\theta} - \boldsymbol {\theta} (t ^ {\prime}) \| _ {2} ^ {2} \quad \Rightarrow \quad \tilde {\Psi} (\boldsymbol {\theta}) \leq \sqrt {\frac {\rho}{2}} \| \boldsymbol {\theta} - \boldsymbol {\theta} (t ^ {\prime}) \| _ {2}. +$$ + +Therefore, + +$$ +f (1) \leq \sqrt {\frac {\rho}{2}} \| \boldsymbol {\theta} _ {2} - \boldsymbol {\theta} \left(t ^ {\prime}\right) \| _ {2} = \left(1 - t ^ {\prime}\right) \sqrt {\frac {\rho}{2}} \| \boldsymbol {\theta} _ {2} - \boldsymbol {\theta} _ {1} \| _ {2} +$$ + +$$ +f (0) \leq \sqrt {\frac {\rho}{2}} \| \boldsymbol {\theta} _ {1} - \boldsymbol {\theta} \left(t ^ {\prime}\right) \| _ {2} = t ^ {\prime} \sqrt {\frac {\rho}{2}} \| \boldsymbol {\theta} _ {2} - \boldsymbol {\theta} _ {1} \| _ {2}. +$$ + +Then we have + +$$ +\left| \tilde {\Psi} \left(\boldsymbol {\theta} _ {2}\right) - \tilde {\Psi} \left(\boldsymbol {\theta} _ {1}\right) \right| \leq \sqrt {\frac {\rho}{2}} \| \boldsymbol {\theta} _ {2} - \boldsymbol {\theta} _ {1} \| _ {2}. +$$ + +Combining case 1 and case 2, we conclude the proof. + +![](images/7c229c0422d5cc0825d4812c31a06ccbaea051b1488dc6642b0e75cb1a27b46c.jpg) + +Below we introduce a lemma that relates the movement of one step gradient descent to the change of the potential function. + +Lemma K.8 (Lemma G.1 in Lyu et al. (2022)). If $\hat{\pmb{u}}_t\in U$ and $\eta \leq 1 / \rho_{2}$ then + +$$ +\tilde {\Psi} (\hat {\boldsymbol {u}} _ {t}) - \tilde {\Psi} (\hat {\boldsymbol {u}} _ {t + 1}) \geq \frac {\sqrt {2 \mu}}{4} \eta \| \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) \| _ {2}. +$$ + +Proof. + +$$ +\begin{array}{l} \tilde {\Psi} (\hat {\boldsymbol {u}} _ {t}) - \tilde {\Psi} (\hat {\boldsymbol {u}} _ {t + 1}) = \frac {\mathcal {L} (\hat {\boldsymbol {u}} _ {t}) - \mathcal {L} (\hat {\boldsymbol {u}} _ {t + 1})}{\tilde {\Psi} (\hat {\boldsymbol {u}} _ {t}) + \tilde {\Psi} (\hat {\boldsymbol {u}} _ {t + 1})} \\ \geq \frac {\mathcal {L} (\hat {\boldsymbol {u}} _ {t + 1}) - \mathcal {L} (\hat {\boldsymbol {u}} _ {t})}{2 \tilde {\Psi} (\hat {\boldsymbol {u}} _ {t})} \\ \geq \frac {\eta (1 - \rho_ {2} \eta / 2) \| \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {t}) \| _ {2} ^ {2}}{2 \tilde {\Psi} (\hat {\boldsymbol {u}} _ {t})}, \\ \end{array} +$$ + +where the two inequalities use Lemma K.6. By $\mu$ -PL, $\tilde{\Psi}(\hat{\boldsymbol{u}}_t) \leq \frac{1}{\sqrt{2\mu}} \|\nabla \mathcal{L}(\hat{\boldsymbol{u}}_t)\|_2$ . Therefore, we have $\tilde{\Psi}(\hat{\boldsymbol{u}}_t) - \tilde{\Psi}(\hat{\boldsymbol{u}}_{t+1}) \geq \frac{\sqrt{2\mu}}{2}(1 - \eta\rho/2)\eta \|\nabla \mathcal{L}(\hat{\boldsymbol{u}}_t)\|_2 \geq \frac{\sqrt{2\mu}}{4}\eta \|\nabla \mathcal{L}(\hat{\boldsymbol{u}}_t)\|_2$ . + +Based on Lemma K.8, we have the following lemma that bounds the movement of GD over multiple steps. + +Lemma K.9 (Bounding the movement of GD). If $\hat{\pmb{u}}_0$ is initialized such that $\| \hat{\pmb{u}}_0 - \pmb{\theta}^*\| _2\leq \frac{1}{4}\sqrt{\frac{\mu}{\rho}}\epsilon '$ , then for all $t\geq 0$ , $\hat{\pmb{u}}_t\in B^{\epsilon '}(\pmb {\theta}^*)$ and + +$$ +\left\| \hat {\boldsymbol {u}} _ {t} - \hat {\boldsymbol {u}} _ {0} \right\| _ {2} \leq \sqrt {\frac {8}{\mu}} \tilde {\Psi} (\hat {\boldsymbol {u}} _ {0}). +$$ + +Proof. We prove the proposition by induction. When $t = 0$ , it trivially holds. Assume that the proposition holds for $\hat{\pmb{u}}_{\tau}$ , $0 \leq \tau < t$ . For step $t$ , since $\hat{\pmb{u}}_{\tau} \in B^{\epsilon'}(\pmb{\theta}^{*})$ , we apply Lemma K.8 and obtain + +$$ +\| \hat {\boldsymbol {u}} _ {t} - \hat {\boldsymbol {u}} _ {0} \| _ {2} \leq \eta \sum_ {\tau = 0} ^ {t - 1} \| \nabla \mathcal {L} (\hat {\boldsymbol {u}} _ {\tau}) \| _ {2} \leq \sqrt {\frac {8}{\mu}} \left(\tilde {\Psi} (\hat {\boldsymbol {u}} _ {0}) - \tilde {\Psi} (\hat {\boldsymbol {u}} _ {t})\right) \leq \sqrt {\frac {8}{\mu}} \tilde {\Psi} (\hat {\boldsymbol {u}} _ {0}). +$$ + +Further by $\rho$ -smoothness of $\mathcal{L}(\cdot)$ , + +$$ +\left\| \hat {\boldsymbol {u}} _ {t} - \hat {\boldsymbol {u}} _ {0} \right\| _ {2} \leq \sqrt {\frac {8}{\mu}} \tilde {\Psi} (\hat {\boldsymbol {u}} _ {0}) \leq 2 \sqrt {\frac {\rho}{\mu}} \left\| \hat {\boldsymbol {u}} _ {0} - \boldsymbol {\theta} ^ {*} \right\| _ {2} \leq \frac {1}{2} \epsilon^ {\prime}. +$$ + +Therefore, $\| \hat{\pmb{u}}_t - \pmb{\theta}^* \|_2 \leq \| \hat{\pmb{u}}_t - \hat{\pmb{u}}_0 \|_2 + \| \hat{\pmb{u}}_0 - \pmb{\theta}^* \|_2 < \epsilon'$ , which concludes the proof. + +![](images/f7898de853e2fec26ec19d41b0d08e55c88c36513bdf75804aa359d44976016a.jpg) + +Finally, we introduce a lemma adapted from Thm. D.4 of which bounds the movement of GF. Lyu et al. (2022). + +Lemma K.10. Assume that $\| \pmb{\theta}_0 - \pmb{\theta}^*\|_2 < \sqrt{\frac{\mu}{\rho}}\epsilon'$ . The gradient flow $\pmb{\theta}(t) = -\frac{\mathrm{d}\mathcal{L}(\pmb{\theta}(t))}{\mathrm{d}t}$ starting at $\pmb{\theta}_0$ converges to a point in $U$ and + +$$ +\left\| \boldsymbol {\theta} _ {0} - \lim _ {t \rightarrow + \infty} \boldsymbol {\theta} (t) \right\| _ {2} \leq \sqrt {\frac {2}{\mu}} \sqrt {\mathcal {L} (\boldsymbol {\theta} _ {0}) - \mathcal {L} ^ {*}} \leq \sqrt {\frac {\rho}{\mu}} \| \boldsymbol {\theta} _ {0} - \boldsymbol {\theta} ^ {*} \| _ {2} +$$ + +Proof. Let $T \coloneqq \inf \{t : \theta \notin U\}$ . Then for all $t < T$ , + +$$ +\begin{array}{l} \frac {\mathrm {d}}{\mathrm {d} t} \left(\mathcal {L} (\boldsymbol {\theta}) - \mathcal {L} ^ {*}\right) ^ {1 / 2} = \frac {1}{2} \left(\mathcal {L} (\boldsymbol {\theta}) - \mathcal {L} ^ {*}\right) ^ {- 1 / 2} \cdot \left\langle \nabla \mathcal {L} (\boldsymbol {\theta}), \frac {\mathrm {d} \boldsymbol {\theta}}{\mathrm {d} t} \right\rangle \\ = - \frac {1}{2} (\mathcal {L} (\boldsymbol {\theta}) - \mathcal {L} ^ {*}) ^ {- 1 / 2} \| \nabla \mathcal {L} (\boldsymbol {\theta}) \| _ {2} \| \frac {\mathrm {d} \boldsymbol {\theta}}{\mathrm {d} t} \| _ {2}. \\ \end{array} +$$ + +By $\mu$ -PL, $\|\nabla \mathcal{L}(\pmb{\theta})\|_2 \geq \sqrt{2\mu(\mathcal{L}(\pmb{\theta}) - \mathcal{L}^*)}$ . Hence, + +$$ +\frac {\mathrm {d}}{\mathrm {d} t} \left(\mathcal {L} (\boldsymbol {\theta}) - \mathcal {L} ^ {*}\right) ^ {1 / 2} \leq - \frac {\sqrt {2 \mu}}{2} \| \frac {\mathrm {d} \boldsymbol {\theta}}{\mathrm {d} t} \| _ {2}. +$$ + +Integrating both sides, we have + +$$ +\int_ {0} ^ {T} \| \frac {\mathrm {d} \boldsymbol {\theta} (\tau)}{\mathrm {d} \tau} \| \mathrm {d} \tau \leq \frac {2}{\sqrt {2 \mu}} \left(\mathcal {L} \left(\boldsymbol {\theta} _ {0}\right) - \mathcal {L} ^ {*}\right) ^ {1 / 2} \leq \sqrt {\frac {\rho}{\mu}} \| \boldsymbol {\theta} _ {0} - \boldsymbol {\theta} ^ {*} \| _ {2} < \epsilon^ {\prime}, +$$ + +where the second inequality uses $\rho$ -smoothness of $\mathcal{L}$ . Therefore, $T = +\infty$ and $\pmb{\theta}(t)$ converges to some point in $U$ . + +# K.4 CONSTRUCTION OF WORKING ZONES + +We construct four nested working zones $(\Gamma^{\epsilon_0},\Gamma^{\epsilon_1},\Gamma^{\epsilon_2},\Gamma^{\epsilon_3})$ in the neighborhood of $\Gamma$ . Later we will show that the local iterates $\pmb{\theta}_{k,t}^{(s)}\in \Gamma^{\epsilon_2}$ and the global iterates $\bar{\pmb{\theta}}^{(s)}\in \Gamma^{\epsilon_0}$ with high probability after $\mathcal{O}(\log \frac{1}{\eta})$ rounds. The following lemma illustrates the properties the working zones should satisfy. + +Lemma K.11 (Working zone lemma). There exists constants $\epsilon_0 < \epsilon_1 < \epsilon_2 < \epsilon_3$ such that $(\Gamma^{\epsilon_0}, \Gamma^{\epsilon_1}, \Gamma^{\epsilon_2}, \Gamma^{\epsilon_3})$ satisfy the following properties: + +1. $\mathcal{L}$ satisfies $\mu$ -PL in $\Gamma^{\epsilon_3}$ for some $\mu > 0$ . +2. Any gradient flow starting in $\Gamma^{\epsilon_2}$ converges to some point in $\Gamma$ . Then, by Falconer (1983), $\Phi(\cdot)$ is $\mathcal{C}^\infty$ in $\Gamma^{\epsilon_2}$ . +3. Any $\pmb{\theta} \in \Gamma^{\epsilon_1}$ has an $\epsilon_1$ -neighborhood $B^{\epsilon_1}(\pmb{\theta})$ such that $B^{\epsilon_1}(\pmb{\theta}) \subseteq \Gamma^{\epsilon_2}$ . +4. Any gradient descent starting in $\Gamma^{\epsilon_0}$ with sufficiently small learning rate will stay in $\Gamma^{\epsilon_1}$ . + +Proof. Let $\bar{\theta}^{(0)}$ be initialized such that $\Phi (\bar{\theta}^{(0)})\in \Gamma$ . Let $\mathcal{Z}$ be the set of all points on the gradient flow trajectory starting from $\bar{\theta}^{(0)}$ and $\mathcal{Z}^{\epsilon}$ be the $\epsilon$ -neighborhood of $\mathcal{Z}$ , where $\epsilon$ is a positive constant. Since the gradient flow converges to $\phi^{(0)}$ , $\mathcal{Z}$ and $\mathcal{Z}^{\epsilon}$ are bounded. + +We construct four nested working zones. By Lemma H.3 in Lyu et al. (2022), there exists an $\epsilon_3$ -neighborhood of $\Gamma$ , $\Gamma^{\epsilon_3}$ , such that $\mathcal{L}$ satisfies $\mu$ -PL for some $\mu > 0$ . Let $\mathcal{M}$ be the convex hull of $\Gamma^{\epsilon_3} \cup \mathcal{Z}^\epsilon$ and $\mathcal{M}^{\epsilon_4}$ be the $\epsilon_4$ -neighborhood of $\mathcal{M}$ where $\epsilon_4$ is a positive constant. Then $\mathcal{M}^{\epsilon_4}$ is bounded. + +Define $\rho_{2} = \sup_{\pmb{\theta}\in \mathcal{M}^{\epsilon_{4}}}\| \nabla^{2}\mathcal{L}(\pmb {\theta})\|_{2}$ and $\rho_{3} = \sup_{\mathcal{M}^{\epsilon_{4}}}\| \nabla^{3}\mathcal{L}(\pmb {\theta})\|_{2}$ . By Lemma K.10, we can construct an $\epsilon_{2}$ -neighborhood of $\Gamma$ where $\epsilon_{2} < \sqrt{\frac{\mu}{\rho_{2}}}\epsilon_{3}$ such that all GF starting in $\Gamma^{\epsilon_2}$ converges to $\Gamma$ . By Falconer (1983), $\Phi (\cdot)$ is $\mathcal{C}^2$ in $\Gamma^{\epsilon_3}$ . Define $\nu_{1} = \sup_{\pmb {\theta}\in \Gamma^{\epsilon_{3}}}\| \partial \Phi (\pmb {\theta})\|_{2}$ and $\nu_{2} = \sup_{\pmb {\theta}\in \Gamma^{\epsilon_{3}}}\| \partial^{2}\Phi (\pmb {\theta})\|_{2}$ . We also construct an $\epsilon_{1}$ neighborhood of $\Gamma$ , $\Gamma^{\epsilon_1}$ , where $\epsilon_{1}\leq \frac{1}{2}\epsilon_{2} < \frac{1}{2}\sqrt{\frac{\mu}{\rho_{2}}}\epsilon_{3}$ such that all $\pmb {\theta}\in \Gamma^{\epsilon_1}$ has an $\epsilon_{1}$ neighborhood where $\Phi$ is well defined. Finally, by Lemma K.9, there exists an $\epsilon_0$ -neighborhood of $\Gamma$ where $\epsilon_0\leq \frac{1}{4}\sqrt{\frac{\mu}{\rho_2}}\epsilon_1$ such that all gradient descent iterates starting in $\Gamma^{\epsilon_0}$ with $\eta \leq \frac{1}{\rho_2}$ will stay in $\Gamma^{\epsilon_1}$ . + +Note that the notions of $\mathcal{Z}^{\epsilon}$ , $\mathcal{M}^{\epsilon_4}$ , $\rho_2$ , $\rho_3$ , $\nu_{1}$ , and $\nu_{2}$ defined in the proof will be useful in the remaining part of this section. When analyzing the limiting dynamics of Local SGD, we will show that all $\pmb{\theta}_{k,t}^{(s)}$ stays in $\Gamma^{\epsilon_2}$ , $\tilde{\pmb{u}}_t^{(s)} \in \Gamma^{\epsilon_1}$ , $\tilde{\pmb{\theta}}^{(s)} \in \Gamma^{\epsilon_0}$ with high probability after $\mathcal{O}(\log \frac{1}{\eta})$ rounds. + +# K.5 PHASE 1:ITERATE APPROACHING THE MANIFOLD + +The approaching phase can be further divided into two subphases. In the first subphase, $\bar{\theta}^{(0)}$ is initialized such that $\phi^{(0)}\in \Gamma$ . We will show that after a constant number of rounds $s_0$ , $\bar{\theta}^{(s_0)}$ goes to the inner part of $\Gamma^{\epsilon_0}$ such that $\| \bar{\theta}^{(s_0)} - \phi^{(0)}\| _2\leq c\epsilon_0$ with high probability, where $0 < c < 1$ and the constants will be specified later (see Appendix K.5.2). In the second subphase, we show that the iterate can reach within $\tilde{\mathcal{O}} (\sqrt{\eta})$ distance from $\Gamma$ after $\mathcal{O}(\log \frac{1}{\eta})$ rounds with high probability (see Appendix K.5.3). + +# K.5.1 ADDITIONAL NOTATIONS + +Consider an auxiliary sequence $\{\tilde{u}_t^{(s)}\}$ where $\tilde{\pmb{u}}_0^{(s)} = \bar{\pmb{\theta}}^{(s)}$ and $\tilde{\pmb{u}}_{t + 1}^{(s)} = \tilde{\pmb{u}}_t^{(s)} - \eta \nabla \mathcal{L}(\tilde{\pmb{u}}_t^{(s)})$ , $0\leq t\leq H - 1$ . Define $\tilde{\Delta}_{k,t}^{(s)}\coloneqq \pmb{\theta}_{k,t}^{(s)} - \tilde{\pmb{u}}_t^{(s)}$ to be the difference between the local iterate and the gradient descent iterate. Notice that $\tilde{\Delta}_{k,0}^{(s)} = 0$ , for all $k$ and $s$ . + +Consider a gradient flow $\{\pmb{u}(t)\}_{t\geq 0}$ with the initial condition $\pmb{u}(0) = \bar{\pmb{\theta}}^{(0)}$ and converges to $\phi^{(0)}\in \Gamma$ . For simplicity, let $\pmb{u}_t^{(s)}\coloneqq \pmb {u}(s\alpha +t\eta)$ be the gradient flow after $s$ rounds plus $t$ steps. Let $s_0$ be the smallest number such that $\| \pmb{u}_0^{(s_0)} - \pmb{\phi}^{(0)}\| _2\leq \frac{1}{4}\sqrt{\frac{\mu}{\rho_2}}\epsilon_0$ . Note that $s_0$ is a constant independent of $\eta$ . + +In this subsection, the minimum value of the loss in Appendix K.3 corresponds to the loss value on $\Gamma$ , i.e., $\mathcal{L}^{*} = \mathcal{L}(\phi), \forall \phi \in \Gamma$ . + +We also define the following sequence $\{\tilde{\mathbf{Z}}_{k,t}^{(s)}\}_{t = 0}^{H}$ that will be used in the proof. Define + +$$ +\tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} := \sum_ {\tau = 0} ^ {t - 1} \left(\prod_ {l = \tau + 1} ^ {t - 1} (\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\tilde {\boldsymbol {u}} _ {l} ^ {(s)}))\right) \boldsymbol {z} _ {k, \tau} ^ {(s)}, \qquad \tilde {\boldsymbol {Z}} _ {k, 0} ^ {(s)} = \boldsymbol {0}. +$$ + +# K.5.2 PROOF FOR SUBPHASE 1 + +First, we have the following lemma about the concentration of $\tilde{\mathbf{Z}}_{k,t}^{(s)}$ . + +Lemma K.12 (Concentration property of $\{\tilde{\mathbf{Z}}_{k,t}^{(s)}\}_{t = 0}^{H}$ ). Given $\bar{\theta}^{(s)}$ such that $\tilde{\boldsymbol{u}}_t^{(s)}\in \Gamma^{\epsilon_3}\cup \mathcal{Z}^\epsilon$ for all $0\leq t\leq H$ , then with probability at least $1 - \delta$ , + +$$ +\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \| _ {2} \leq \tilde {C} _ {1} \sigma_ {\max } \sqrt {2 H \log \frac {2 H K}{\delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], +$$ + +where $\tilde{C}_1\coloneqq \exp (\alpha \rho_2)$ + +Proof. For each $\tilde{\mathbf{Z}}_{k,t}^{(s)}$ , construct a sequence $\{\tilde{\mathbf{Z}}_{k,t,t'}^{(s)}\}_{t'=0}^t$ : + +$$ +\tilde {\boldsymbol {Z}} _ {k, t, t ^ {\prime}} ^ {(s)} := \sum_ {\tau = 0} ^ {t ^ {\prime} - 1} \left(\prod_ {l = \tau + 1} ^ {t - 1} (\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\tilde {\boldsymbol {u}} _ {l} ^ {(s)}))\right) \boldsymbol {z} _ {k, \tau} ^ {(s)}, \qquad \tilde {\boldsymbol {Z}} _ {k, t, 0} ^ {(s)} = \boldsymbol {0}. +$$ + +Since $\tilde{\pmb{u}}_t^{(s)}\in \Gamma^{\epsilon_3}\cup \mathcal{Z}^\epsilon$ , we have $\| \nabla^2\mathcal{L}(\tilde{\pmb{u}}_t^{(s)})\| _2\leq \rho_2$ for all $0\leq t\leq H$ . Then, for all $\tau$ and $t$ , + +$$ +\left\| \prod_ {l = \tau + 1} ^ {t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {l} ^ {(s)}\right)\right) \right\| _ {2} \leq (1 + \rho_ {2} \eta) ^ {H} \leq \exp (\alpha \rho_ {2}) = \tilde {C} _ {1}. +$$ + +Notice that for all $0 \leq t \leq H$ , $\{\tilde{Z}_{k,t,t'}^{(s)}\}_{t'=0}^t$ is a martingale with $\| \tilde{Z}_{k,t,t'}^{(s)} - \tilde{Z}_{k,t,t'-1}^{(s)} \|_2 \leq \tilde{C}_1 \sigma_{\max}$ . By Azuma-Hoeffding's inequality, + +$$ +\mathbb {P} (\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \| _ {2} \geq \epsilon^ {\prime}) \leq 2 \exp \left(\frac {- \epsilon^ {\prime 2}}{2 t (\tilde {C} _ {1} \sigma_ {\max}) ^ {2}}\right) \leq 2 \exp \left(\frac {- \epsilon^ {\prime 2}}{2 H (\tilde {C} _ {1} \sigma_ {\max}) ^ {2}}\right). +$$ + +Taking a union bound on all $k \in [K]$ and $0 \leq t \leq H$ , we can conclude that with probability at least $1 - \delta$ , + +$$ +\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \| _ {2} \leq \tilde {C} _ {1} \sigma_ {\max } \sqrt {2 H \log \frac {2 H K}{\delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ]. +$$ + +![](images/79f43c4c7290c0e6a1cbcb998982f7f4fc2061b5a5c08015aef42898ad274bc7.jpg) + +The following lemma states that the gradient descent iterates will closely track the gradient flow with the same initial point. + +Lemma K.13. Denote $G \coloneqq \sup_{t \geq 0} \| \nabla \mathcal{L}(\boldsymbol{u}(t)) \|_2$ as the upper bound of the gradient on the gradient flow trajectory. If $\| \tilde{\boldsymbol{u}}_t^{(s)} - \boldsymbol{u}_t^{(s)} \|_2 = \mathcal{O}(\sqrt{\eta})$ , then for all $0 \leq t \leq H$ , the closeness of $\tilde{\boldsymbol{u}}_t^{(s)}$ and $\boldsymbol{u}_t^{(s)}$ is bounded by + +$$ +\| \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \boldsymbol {u} _ {t} ^ {(s)} \| _ {2} \leq \tilde {C} _ {1} \| \tilde {\boldsymbol {u}} _ {0} ^ {(s)} - \boldsymbol {u} _ {0} ^ {(s)} \| _ {2} + \tilde {C} _ {1} \eta G, +$$ + +where $\tilde{C}_1 = \exp (\alpha \rho_2)$ + +Proof. We prove by induction that + +$$ +\left\| \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \boldsymbol {u} _ {t} ^ {(s)} \right\| _ {2} \leq (1 + \rho_ {2} \eta) ^ {t} \left\| \tilde {\boldsymbol {u}} _ {0} ^ {(s)} - \boldsymbol {u} _ {0} ^ {(s)} \right\| _ {2} + \rho_ {2} \eta^ {2} G \sum_ {\tau = 0} ^ {t - 1} (1 + \rho_ {2} \eta) ^ {\tau}. \tag {34} +$$ + +When $t = 0$ , (34) holds trivially. Assume that (34) holds for $0 \leq \tau \leq t$ , then + +$$ +\begin{array}{l} \tilde {\boldsymbol {u}} _ {t + 1} ^ {(s)} - \boldsymbol {u} _ {t + 1} ^ {(s)} = \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \eta \nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) - \left(\boldsymbol {u} _ {t} - \int_ {s \alpha + t \eta} ^ {s \alpha + (t + 1) \eta} \nabla \mathcal {L} (\boldsymbol {u} (v)) d v\right) \\ = \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \boldsymbol {u} _ {t} - \eta (\nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) - \nabla \mathcal {L} (\boldsymbol {u} _ {t} ^ {(s)})) \\ - \int_ {s \alpha + t \eta} ^ {s \alpha + (t + 1) \eta} \left(\nabla \mathcal {L} (\boldsymbol {u} _ {t} ^ {(s)}) - \nabla \mathcal {L} (\boldsymbol {u} (v))\right) d v. \\ \end{array} +$$ + +By smoothness of $\mathcal{L}$ + +$$ +\begin{array}{l} \| \nabla \mathcal {L} (\boldsymbol {u} _ {t} ^ {(s)}) - \nabla \mathcal {L} (\boldsymbol {u} (v)) \| _ {2} \leq \rho_ {2} \| \boldsymbol {u} _ {t} ^ {(s)} - \boldsymbol {u} (v) \| _ {2} \\ \leq \rho_ {2} \int_ {s \alpha + t \eta} ^ {v} \| \nabla \mathcal {L} (\boldsymbol {u} (w)) \| _ {2} d w \\ \leq \rho_ {2} \eta G. \\ \end{array} +$$ + +Since $\rho_2^2\eta^2 G\sum_{\tau = 0}^{t - 1}(1 + \rho_2\eta)^\tau \leq \eta G(1 + \rho_2\eta)^t\leq \exp (\alpha \rho_2)\eta G$ , then $\| \tilde{\pmb{u}}_t^{(s)} - \pmb {u}_t^{(s)}\| _2 = \mathcal{O}(\sqrt{\eta})$ which implies that $\tilde{\pmb{u}}_t^{(s)}\in \mathcal{M}^{\epsilon_4}$ . Hence, $\| \nabla \mathcal{L}(\tilde{\pmb{u}}_t^{(s)}) - \mathcal{L}(\pmb {u}_t^{(s)})\| _2\leq \rho_2\| \tilde{\pmb{u}}_t^{(s)} - \pmb {u}_t^{(s)}\| _2.$ + +By triangle inequality, + +$$ +\begin{array}{l} \left\| \tilde {\boldsymbol {u}} _ {t + 1} ^ {(s)} - \boldsymbol {u} _ {t + 1} ^ {(s)} \right\| _ {2} \leq (1 + \rho_ {2} \eta) \left\| \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \boldsymbol {u} _ {t} ^ {(s)} \right\| _ {2} + \rho_ {2} \eta^ {2} G \\ \leq \left(1 + \rho_ {2} \eta\right) ^ {t + 1} \| \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \boldsymbol {u} _ {t} ^ {(s)} \| _ {2} + \rho_ {2} \eta^ {2} G \sum_ {\tau = 0} ^ {t} (1 + \rho_ {2} \eta) ^ {\tau}, \\ \end{array} +$$ + +which concludes the induction step. Applying $1 + \rho_{2}\eta \leq \exp (\rho_{2}\eta)$ , we have the lemma. + +![](images/12b0357f5a598edb572a3e0dbbcd1ac182759da65bdd92f6d2610597a7c3f138.jpg) + +Utilizing the concentration probability of $\{\tilde{Z}_{k,t}^{(s)}\}$ , we can obtain the following lemma which implies that the Local SGD iterates will closely track the gradient descent iterates with high probability. + +Lemma K.14. Given $\bar{\theta}^{(s)}$ such that $\tilde{u}_t^{(s)}\in \Gamma^{\epsilon_3}\cup \mathcal{Z}^\epsilon$ for all $0\leq t\leq H$ , then for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ with probability at least $1 - \delta$ , there exists a constant $\tilde{C}_3$ such that + +$$ +\left\| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \tilde {\boldsymbol {u}} _ {t} ^ {(s)} \right\| _ {2} \leq \tilde {C} _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], +$$ + +and + +$$ +\| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \tilde {\boldsymbol {u}} _ {H} ^ {(s)} \| _ {2} \leq \tilde {C} _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}. +$$ + +Proof. Since $\tilde{\pmb{u}}_t^{(s)}\in \Gamma^{\epsilon_3}\cup \mathcal{Z}^\epsilon$ for all $0\leq t\leq H$ , we have $\| \nabla^2\mathcal{L}(\tilde{\pmb{u}}_t^{(s)})\| _2\leq \rho_2$ . According to the update rule for $\theta_{k,t}^{(s)}$ and $\tilde{\pmb{u}}_t^{(s)}$ , + +$$ +\boldsymbol {\theta} _ {k, t + 1} ^ {(s)} = \boldsymbol {\theta} _ {k, t} ^ {(s)} - \eta \nabla \mathcal {L} \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}\right) - \eta \boldsymbol {z} _ {k, t} ^ {(s)}, \tag {35} +$$ + +$$ +\tilde {\boldsymbol {u}} _ {t + 1} ^ {(s)} = \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \eta \nabla \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right). \tag {36} +$$ + +Subtracting (36) from (35) gives + +$$ +\begin{array}{l} \tilde {\boldsymbol {\Delta}} _ {k, t + 1} ^ {(s)} = \tilde {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} - \eta (\nabla \mathcal {L} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) - \nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)})) - \eta \boldsymbol {z} _ {k, t} ^ {(s)} \\ = \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right)\right) \tilde {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} - \eta \boldsymbol {z} _ {k, t} ^ {(s)} + \eta \tilde {\boldsymbol {v}} _ {k, t} ^ {(s)}. \tag {37} \\ \end{array} +$$ + +Here, $\tilde{\pmb{v}}_{k,t}^{(s)} = (1 - \beta_{k,t}^{(s)})\pmb{\theta}_{k,t}^{(s)} + \beta_{k,t}^{(s)}\tilde{\pmb{u}}_{k,t}^{(s)}$ , where $\beta_{k,t}^{(s)} \in (0,1)$ depends on $\pmb{\theta}_{k,t}^{(s)}$ and $\tilde{\pmb{u}}_t^{(s)}$ . Therefore, $\| \tilde{\pmb{v}}_{k,t}^{(s)}\| _2 \leq \frac{\rho_3}{2}\| \tilde{\pmb{\Delta}}_{k,t}^{(s)}\| _2^2$ if $\pmb{\theta}_{k,t}^{(s)} \in \mathcal{M}^{\epsilon_4}$ . Applying (37) $t$ times, we have + +$$ +\begin{array}{l} \tilde {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} = \left[ \prod_ {\tau = 0} ^ {t - 1} (\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\tilde {\boldsymbol {u}} _ {\tau} ^ {(s)})) \right] \tilde {\boldsymbol {\Delta}} _ {k, 0} ^ {(s)} - \eta \sum_ {\tau = 0} ^ {t - 1} \prod_ {l = \tau + 1} ^ {t - 1} (\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\tilde {\boldsymbol {u}} _ {l} ^ {(s)})) \boldsymbol {z} _ {k, \tau} ^ {(s)} \\ + \eta \sum_ {\tau = 0} ^ {t - 1} \prod_ {l = \tau + 1} ^ {t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {l} ^ {(s)}\right)\right) \tilde {\boldsymbol {v}} _ {k, \tau} ^ {(s)}. \\ \end{array} +$$ + +By Cauchy-Schwartz inequality, triangle inequality and the definition of $\tilde{\pmb{Z}}_{k,t}^{(s)}$ , if for all $0 \leq \tau \leq t - 1$ and $k \in [K]$ , $\pmb{\theta}_{k,\tau}^{(s)} \in \mathcal{M}^{\epsilon_4}$ , then we have + +$$ +\left\| \tilde {\boldsymbol {\Delta}} _ {k, t} ^ {(s)} \right\| _ {2} \leq \eta \left\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \right\| _ {2} + \frac {1}{2} \eta \rho_ {3} \sum_ {\tau = 0} ^ {t - 1} \tilde {C} _ {1} \left\| \tilde {\boldsymbol {\Delta}} _ {k, \tau} ^ {(s)} \right\| _ {2} ^ {2}. \tag {38} +$$ + +Applying Lemma K.12 and substituting in the value of $H$ , we have that with probability at least $1 - \delta$ , + +$$ +\left\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \right\| _ {2} \leq \tilde {C} _ {1} \sigma_ {\max } \sqrt {\frac {2 \alpha}{\eta} \log \frac {2 \alpha K}{\eta \delta}}, \quad \forall k \in K, 0 \leq t \leq H. \tag {39} +$$ + +Now we show by induction that for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , when (39) holds, there exists a constant $\tilde{C}_2 > 2\sigma_{\max}\sqrt{2\alpha}\tilde{C}_1$ such that $\| \tilde{\Delta}_{k,t}^{(s)}\|_2 \leq \tilde{C}_2\sqrt{\eta\log\frac{2\alpha K}{\eta\delta}}$ . + +When $t = 0$ , $\tilde{\Delta}_{k,0}^{(s)} = 0$ . Assume that $\| \tilde{\Delta}_{k,\tau}^{(s)} \|_2 \leq \tilde{C}_2 \sqrt{\eta \log \frac{2\alpha K}{\eta \delta}}$ , for all $k \in [K]$ , $0 \leq \tau \leq t - 1$ . Then for all $0 \leq \tau \leq t - 1$ , $\pmb{\theta}_{k,\tau}^{(s)} \in \mathcal{M}^{\epsilon_4}$ . Therefore, we can apply (38) and obtain + +$$ +\begin{array}{l} \| \tilde {\Delta} _ {k, t} ^ {(s)} \| _ {2} \leq \eta \| \tilde {Z} _ {k, t} ^ {(s)} \| _ {2} + \frac {1}{2} \eta \rho_ {3} \sum_ {\tau = 0} ^ {t - 1} \tilde {C} _ {1} \| \tilde {\Delta} _ {k, \tau} ^ {(s)} \| _ {2} ^ {2} \\ \leq \tilde {C} _ {1} \sigma_ {\max} \sqrt {2 \alpha \eta \log \frac {2 \alpha K}{\eta \delta}} + \frac {1}{2} \tilde {C} _ {1} \tilde {C} _ {2} ^ {2} \sigma_ {\max} ^ {2} \alpha \rho_ {3} \eta \log \frac {2 \alpha K}{\eta \delta}. \\ \end{array} +$$ + +Given that $\tilde{C}_2 \geq 2\sigma_{\max}\sqrt{2\alpha}\tilde{C}_1$ and $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , when $\eta$ is sufficiently small, $\|\tilde{\Delta}_{k,t}^{(s)}\|_2 \leq \tilde{C}_2\sqrt{\eta\log\frac{2\alpha K}{\eta\delta}}$ . + +To sum up, for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , $\| \tilde{\Delta}_{k,t}^{(s)} \|_2 \leq \tilde{C}_2 \sqrt{\eta \log \frac{2\alpha K}{\eta \delta}}$ for all $k \in [K]$ , $0 \leq t \leq H$ . By triangle inequality, + +$$ +\| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \tilde {\mathbf {u}} _ {H} ^ {(s)} \| _ {2} \leq \frac {1}{K} \sum_ {k \in [ K ]} \| \tilde {\mathbf {A}} _ {k, H} ^ {(s)} \| _ {2} \leq \tilde {C} _ {2} \sqrt {\eta \log \frac {2 \alpha K}{\eta \delta}}. +$$ + +![](images/a563d52b12a9a29f592334f6cdd7f71cbaef23261e24e30e5d89320540334e5b.jpg) + +The combination of Lemma K.13 and Lemma K.14 leads to the following lemma, which states that the Local SGD iterate will enter $\Gamma^{\epsilon_1}$ after $s_0$ rounds with high probability. + +Lemma K.15. Given $\bar{\theta}^{(0)}$ such that $\Phi (\bar{\theta}^{(0)})\in \Gamma$ , then for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , there exists a positive constant $\tilde{C}_4$ such that with probability at least $1 - \delta$ , + +$$ +\| \bar {\boldsymbol {\theta}} ^ {(s _ {0})} - \boldsymbol {\phi} ^ {(0)} \| _ {2} \leq \frac {1}{4} \sqrt {\frac {\mu}{\rho_ {2}}} \epsilon_ {0} + \tilde {C} _ {4} \sqrt {\eta \log \frac {1}{\eta \delta}}. +$$ + +Proof. First, we prove by induction that for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , when + +$$ +\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \| _ {2} \leq \tilde {C} _ {1} \sigma_ {\max } \sqrt {2 H \log \frac {2 H K s _ {0}}{\delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], 0 \leq s < s _ {0}, \tag {40} +$$ + +the closeness of $\bar{\pmb{\theta}}^{(s)}$ and $\pmb{u}_0^{(s)}$ is bounded by + +$$ +\left\| \bar {\boldsymbol {\theta}} ^ {(s)} - \boldsymbol {u} _ {0} ^ {(s)} \right\| _ {2} \leq \sum_ {l = 1} ^ {s} \tilde {C} _ {1} ^ {l} \left(\eta G + \tilde {C} _ {3} \sqrt {\eta \log \frac {s _ {0}}{\eta \delta}}\right), \quad \forall 0 \leq s \leq s _ {0}. \tag {41} +$$ + +When $s = 0$ , $\bar{\theta}^{(0)} = \pmb{u}_0^{(0)}$ . Assume that (41) holds for round $s$ . Then by Lemma K.13, for all $0 \leq t \leq H$ , + +$$ +\begin{array}{l} \| \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \boldsymbol {u} _ {t} ^ {(s)} \| _ {2} \leq \tilde {C} _ {1} \| \tilde {\boldsymbol {u}} _ {0} ^ {(s)} - \boldsymbol {u} _ {0} ^ {(s)} \| _ {2} + \tilde {C} _ {1} \eta G \\ = \tilde {C} _ {1} \| \bar {\boldsymbol {\theta}} _ {0} ^ {(s)} - \boldsymbol {u} _ {0} ^ {(s)} \| _ {2} + \tilde {C} _ {1} \eta G \\ \leq \sum_ {l = 1} ^ {s} \tilde {C} _ {1} ^ {l + 1} \left(\eta G + \tilde {C} _ {3} \sqrt {\eta \log \frac {s _ {0}}{\eta \delta}}\right) + \tilde {C} _ {1} \eta G. \\ \end{array} +$$ + +Therefore, for sufficiently small $\eta$ , $\tilde{\pmb{u}}_t^{(s)} \in \mathcal{Z}^\epsilon$ , $\forall 0 \leq t \leq H$ . Combining the above inequality with Lemma K.14, we have + +$$ +\begin{array}{l} \left\| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \boldsymbol {u} _ {0} ^ {(s + 1)} \right\| _ {2} = \left\| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \boldsymbol {u} _ {H} ^ {(s)} \right\| _ {2} \\ \leq \| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \tilde {\boldsymbol {u}} _ {H} ^ {(s)} \| _ {2} + \| \tilde {\boldsymbol {u}} _ {H} ^ {(s)} - \boldsymbol {u} _ {H} ^ {(s)} \| _ {2} \\ \leq \sum_ {l = 1} ^ {s + 1} \tilde {C} _ {1} ^ {l + 1} \left(\eta G + \tilde {C} _ {3} \sqrt {\eta \log \frac {s _ {0}}{\eta \delta}}\right), \\ \end{array} +$$ + +which concludes the induction. + +Therefore, when (40) holds, there exists a positive constant $\tilde{C}_4$ such that + +$$ +\left\| \bar {\boldsymbol {\theta}} ^ {(s _ {0})} - \boldsymbol {u} _ {0} ^ {(s _ {0})} \right\| _ {2} \leq \tilde {C} _ {4} \sqrt {\eta \log \frac {1}{\eta \delta}}. +$$ + +By definition of $\pmb{u}_0^{(s_0)}$ , + +$$ +\left\| \bar {\boldsymbol {\theta}} ^ {(s _ {0})} - \phi^ {(0)} \right\| _ {2} \leq \frac {1}{4} \sqrt {\frac {\mu}{\rho_ {2}}} \epsilon_ {0} + \tilde {C} _ {4} \sqrt {\eta \log \frac {1}{\eta \delta}}. +$$ + +Finally, according to Lemma K.12, (40) holds with probability at least $1 - \delta$ . + +# K.5.3 PROOF FOR SUBPHASE 2 + +In subphase 2, we show that the iterate can reach within $\tilde{\mathcal{O}} (\sqrt{\eta})$ distance from $\Gamma$ after $\mathcal{O}(\log \frac{1}{\eta})$ rounds with high probability. The following lemma manifests how the potential function $\tilde{\Psi} (\bar{\pmb{\theta}}^{(s)})$ evolves after one round. + +Lemma K.16. Given $\bar{\theta}^{(s)}\in \Gamma^{\epsilon_0}$ , for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ + +$$ +\boldsymbol {\theta} _ {k, t} ^ {(s)} \in \Gamma^ {\epsilon_ {2}}, \quad \tilde {\Psi} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) \leq \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(s)}) + \tilde {C} _ {5} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall k \in [ K ], 0 \leq t \leq H +$$ + +and + +$$ +\bar {\boldsymbol {\theta}} ^ {(s + 1)} \in \Gamma^ {\epsilon_ {2}}, \quad \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(s + 1)}) \leq \exp (- \alpha \mu / 2) \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(s)}) + \tilde {C} _ {5} \sqrt {\eta \log \frac {1}{\eta \delta}}, +$$ + +where $\tilde{C}_5$ is a positive constant. + +Proof. Since $\bar{\theta}^{(s)}\in \Gamma^{\epsilon_0}$ , then for all $0\leq t\leq H$ , $\tilde{\pmb{u}}_t^{(s)}\in \Gamma^{\epsilon_1}$ by the definition of the working zone. By Lemma K.6, for $\eta \leq \frac{1}{\rho_2}$ , + +$$ +\mathcal {L} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right) - \mathcal {L} ^ {*} \leq (1 - \mu \eta) ^ {t} \left(\mathcal {L} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) - \mathcal {L} ^ {*}\right) \leq \mathcal {L} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) - \mathcal {L} ^ {*}, \quad \forall 0 \leq t \leq H. +$$ + +Specially, for $t = H$ + +$$ +\mathcal {L} \left(\tilde {\boldsymbol {u}} _ {H} ^ {(s)}\right) - \mathcal {L} ^ {*} \leq (1 - \mu \eta) ^ {\frac {\alpha}{\eta}} \left(\mathcal {L} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) - \mathcal {L} ^ {*}\right) \leq \exp (- \alpha \mu) \left(\mathcal {L} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) - \mathcal {L} ^ {*}\right). +$$ + +Therefore, + +$$ +\tilde {\Psi} \left(\tilde {\boldsymbol {u}} _ {H} ^ {(s)}\right) \leq \exp (- \alpha \mu / 2) \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right). +$$ + +According to the proof of Lemma K.14, for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , when + +$$ +\left\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \right\| _ {2} \leq \tilde {C} _ {1} \sigma_ {\max } \sqrt {\frac {2 \alpha}{\eta} \log \frac {2 \alpha K}{\eta \delta}}, \quad \forall k \in [ K ], 0 \leq t \leq H, \tag {42} +$$ + +there exists a constant $\tilde{C}_3$ such that + +$$ +\left\| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \tilde {\boldsymbol {u}} _ {t} ^ {(s)} \right\| _ {2} \leq \tilde {C} _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], +$$ + +and + +$$ +\| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \tilde {\boldsymbol {u}} _ {H} ^ {(s)} \| _ {2} \leq \tilde {C} _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}. +$$ + +Since $\tilde{\pmb{u}}_t^{(s)}\in \Gamma^{\epsilon_1},\forall 0\leq t\leq H,\bar{\pmb{\theta}}^{(s + 1)}\in \Gamma^{\epsilon_2}$ and $\bar{\pmb{\theta}}_{k,t}^{(s)}\in \Gamma^{\epsilon_2},\forall 0\leq t\leq H,k\in [K]$ + +By Lemma K.7, $\tilde{\Psi}(\cdot)$ is $\sqrt{2\rho_2}$ -Lipschitz in $\mathcal{M}^{\epsilon_4}$ . Therefore, when (42) holds, there exists a constant $\tilde{C}_5 := \sqrt{2\rho_2}\tilde{C}_3$ such that + +$$ +\begin{array}{l} \tilde {\Psi} \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}\right) \leq \tilde {\Psi} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right) + \sqrt {2 \rho_ {2}} \left\| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \tilde {\boldsymbol {u}} _ {t} ^ {(s)} \right\| _ {2} \\ \leq \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(s)}) + \tilde {C} _ {5} \sqrt {\eta \log \frac {1}{\eta \delta}}, \\ \end{array} +$$ + +and + +$$ +\begin{array}{l} \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(s + 1)}) \leq \tilde {\Psi} (\tilde {\boldsymbol {u}} _ {H} ^ {(s)}) + \sqrt {2 \rho_ {2}} \| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \tilde {\boldsymbol {u}} _ {H} ^ {(s)} \| _ {2} \\ \leq \exp (- \alpha \mu / 2) \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(s)}) + \tilde {C} _ {5} \sqrt {\eta \log \frac {1}{\eta \delta}}. \\ \end{array} +$$ + +Finally, by Lemma K.12, (42) holds with probability at least $1 - \delta$ . + +![](images/84816759e8f94b41e6aec8648bc3a6fe2aa13567683cabc44b1d77d48be46554.jpg) + +We are thus led to the following lemma which characterizes the evolution of the potential $\tilde{\Psi} (\bar{\theta}^{(s)})$ and $\tilde{\Psi} (\pmb{\theta}_{k,t}^{(s)})$ over multiple rounds. + +Lemma K.17. Given $\| \bar{\theta}^{(0)} - \phi^{(0)}\| _2\leq \frac{1}{2}\sqrt{\frac{\mu}{\rho_2}}\epsilon_0,$ for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ and any integer $1\le R\le$ $R_{\mathrm{tot}}$ , with probability at least $1 - \delta$ + +$$ +\bar {\boldsymbol {\theta}} ^ {(s)} \in \Gamma^ {\epsilon_ {0}}, \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) \leq \exp (- \alpha \mu s / 2) \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(0)}\right) + \frac {1}{1 - \exp (- \alpha \mu / 2)} \tilde {C} _ {5} \sqrt {\eta \log \frac {R}{\eta \delta}}, \forall 0 \leq s \leq R. \tag {43} +$$ + +Furthermore, + +$$ +\bar {\boldsymbol {\theta}} _ {k, t} ^ {(s)} \in \Gamma^ {\epsilon_ {2}}, \quad \tilde {\Psi} \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}\right) \leq \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) + \tilde {C} _ {5} \sqrt {\eta \log \frac {R}{\eta \delta}}, \quad \forall 0 \leq t \leq H, 0 \leq s < R, k \in [ K ]. \tag {44} +$$ + +Proof. We prove induction that for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , when + +$$ +\left\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \right\| _ {2} \leq \tilde {C} _ {1} \sigma_ {\max } \sqrt {\frac {2 \alpha}{\eta} \log \frac {2 R \alpha K}{\eta \delta}}, \quad \forall k \in [ K ], 0 \leq t \leq H, 0 \leq s < R, \tag {45} +$$ + +then for all $0 \leq s \leq R$ , (43) and (44) hold. + +When $s = 0$ , $\bar{\theta}^{(0)} \in \Gamma^{\epsilon_0}$ and (43) trivially holds. By Lemma K.16, (44) holds. Assume that (43) and (44) hold for round $s - 1$ . Then for round $s$ , by Lemma K.16, $\bar{\theta}^{(s)} \in \Gamma^{\epsilon_2}$ and + +$$ +\begin{array}{l} \Psi (\bar {\pmb {\theta}} ^ {(s)}) \leq \exp (- \alpha \mu / 2) \tilde {\Psi} (\bar {\pmb {\theta}} ^ {(s - 1)}) + \tilde {C} _ {5} \sqrt {\eta \log \frac {R}{\eta \delta}} \\ \leq \exp (- \alpha \mu s / 2) \tilde {\Psi} (\bar {\pmb {\theta}} ^ {(0)}) + \frac {1}{1 - \exp (- \alpha \mu / 2)} \tilde {C} _ {5} \sqrt {\eta \log \frac {R}{\eta \delta}}, \\ \end{array} +$$ + +where the second inequality comes from the induction hypothesis. By Lemma K.10, + +$$ +\begin{array}{l} \| \bar {\boldsymbol {\theta}} ^ {(s)} - \boldsymbol {\phi} ^ {(s)} \| _ {2} \leq \frac {2}{\sqrt {2 \mu}} \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(s)}) \\ \leq \frac {2}{\sqrt {2 \mu}} \tilde {\Psi} (\bar {\pmb {\theta}} ^ {(0)}) + \frac {2}{\sqrt {2 \mu} (1 - \exp (- \alpha \mu / 2))} \tilde {C} _ {5} \sqrt {\eta \log \frac {R}{\eta \delta}} \\ \leq \frac {1}{2} \epsilon_ {0} + \frac {2}{\sqrt {2 \mu} (1 - \exp (- \alpha \mu / 2))} \tilde {C} _ {5} \sqrt {\eta \log \frac {R}{\eta \delta}}. \\ \end{array} +$$ + +Here, the last inequality uses $\tilde{\Psi} (\bar{\pmb{\theta}}^{(0)})\leq \sqrt{\frac{\rho_2}{2}}\| \bar{\pmb{\theta}}^{(s)} - \phi^{(0)}\| _2\leq \frac{1}{2}\sqrt{\frac{\mu}{2}}\epsilon_0$ . Hence, when $\eta$ is sufficiently small, $\bar{\pmb{\theta}}^{(s)}\in \Gamma^{\epsilon_0}$ . Still by Lemma K.16, $\bar{\pmb{\theta}}_{k,t}^{(s)}\in \Gamma^{\epsilon_2}$ and + +$$ +\tilde {\Psi} \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}\right) \leq \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) + \tilde {C} _ {5} \sqrt {\eta \log \frac {R}{\eta \delta}}. +$$ + +Finally, according to Lemma K.12, (45) holds with probability at least $1 - \delta$ . + +![](images/eb002efdbfca18cd279a314f92e736b1ccc68740b225b627fcac84867376f49e.jpg) + +The following corollary is a direct consequence of Lemma K.17 and Lemma K.10. + +Corollary K.1. Let $s_1 \coloneqq \lceil \frac{20}{\alpha \mu} \log \frac{1}{\eta} \rceil$ . Given $\| \bar{\pmb{\theta}}^{(0)} - \pmb{\phi}^{(0)} \|_2 \leq \frac{1}{2} \sqrt{\frac{\mu}{\rho_2}} \epsilon_0$ , for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ + +$$ +\tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s _ {1})}\right) \leq \tilde {C} _ {6} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \| \bar {\boldsymbol {\theta}} ^ {(s _ {1})} - \phi^ {(s _ {1})} \| _ {2} \leq \tilde {C} _ {6} \sqrt {\eta \log \frac {1}{\eta \delta}}, \tag {46} +$$ + +where $\tilde{C}_6$ is a constant. + +Proof. Substituting in $R = s_1$ to Lemma K.17 and applying $\| \bar{\pmb{\theta}}^{(s_1)} - \phi^{(s)}\|_2 \leq \sqrt{\frac{2}{\mu}}\tilde{\Psi}(\bar{\pmb{\theta}}^{(s_1)})$ for $\bar{\pmb{\theta}}^{(s_1)} \in \Gamma^{\epsilon_0}$ , we have the lemma. + +Finally, we provide a high probability bound for the change of the projection on the manifold after $s_1$ rounds $\| \phi^{(s_1)} - \phi^{(0)} \|_2$ . + +Lemma K.18. Let $s_1 \coloneqq \lceil \frac{20}{\alpha \mu} \log \frac{1}{\eta} \rceil$ . Given $\| \bar{\theta}^{(0)} - \phi^{(0)} \|_2 \leq \frac{1}{2} \sqrt{\frac{\mu}{\rho_2}} \epsilon_0$ . For $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , + +$$ +\left\| \phi^ {(s _ {1})} - \phi^ {(0)} \right\| _ {2} \leq \tilde {C} _ {8} \log \frac {1}{\eta} \sqrt {\eta \log \frac {1}{\eta \delta}}. +$$ + +Proof. From Lemma K.17, for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , when + +$$ +\left\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \right\| _ {2} \leq \tilde {C} _ {1} \sigma_ {\max } \sqrt {\frac {2 \alpha}{\eta} \log \frac {2 s _ {1} \alpha K}{\eta \delta}}, \quad \forall k \in [ K ], 0 \leq t \leq H, 0 \leq s < s _ {1}, \tag {47} +$$ + +then $\bar{\theta}^{(s)}\in \Gamma^{\epsilon_0}$ , for all $0\leq s\leq s_{1}$ . By the definition of $\Gamma^{\epsilon_0}$ , $\tilde{u}_t^{(s)}\in \Gamma^{\epsilon_1}$ , for all $0\leq t\leq H,0\leq s\leq s_{1}$ . By triangle inequality, $\| \phi^{(s_1)} - \phi^{(0)}\| _2$ can be decomposed as follows. + +$$ +\begin{array}{l} \| \phi^ {(s _ {1})} - \phi^ {(0)} \| _ {2} \leq \sum_ {s = 0} ^ {s _ {1} - 1} \| \phi^ {(s + 1)} - \phi^ {(s)} \| _ {2} \\ \leq \sum_ {s = 0} ^ {s _ {1} - 1} \| \Phi \left(\tilde {\boldsymbol {u}} _ {H} ^ {(s)}\right) - \Phi \left(\tilde {\boldsymbol {u}} _ {0} ^ {(s)}\right) \| _ {2} + \sum_ {s = 0} ^ {s _ {1} - 1} \| \Phi \left(\bar {\boldsymbol {\theta}} ^ {(s + 1)}\right) - \Phi \left(\tilde {\boldsymbol {u}} _ {H} ^ {(s)}\right) \| _ {2}. \tag {48} \\ \end{array} +$$ + +By Lemma K.14, when (47) hold, then for all $0 \leq s < s_1 - 1$ , + +$$ +\| \bar {\pmb {\theta}} ^ {(s + 1)} - \tilde {\pmb {u}} _ {H} ^ {(s)} \| _ {2} \leq \tilde {C} _ {3} \sqrt {\eta \log \frac {s _ {1}}{\eta \delta}}. +$$ + +This implies that $\bar{\pmb{\theta}}^{(s + 1)}\in B^{\epsilon_1}(\tilde{\pmb{u}}_H^{(s)})$ . Since for all $\pmb {\theta}\in \Gamma^{\epsilon_2}$ , $\| \partial \Phi (\pmb {\theta})\| _2\leq \nu_1$ , then $\Phi (\cdot)$ is $\nu_{1}$ -Lipschitz in $B^{\epsilon_1}(\tilde{\pmb{u}}_H^{(s)})$ . This gives + +$$ +\begin{array}{l} \| \Phi (\bar {\boldsymbol {\theta}} ^ {(s + 1)}) - \Phi (\tilde {\boldsymbol {u}} _ {H} ^ {(s)}) \| _ {2} \leq \nu_ {1} \| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \tilde {\boldsymbol {u}} _ {H} ^ {(s)} \| _ {2} \\ \leq \nu_ {1} \tilde {C} _ {3} \sqrt {\eta \log \frac {s _ {1}}{\eta \delta}}. \tag {49} \\ \end{array} +$$ + +Then we analyze $\| \bar{\pmb{\theta}}^{(s + 1)} - \tilde{\pmb{u}}_H^{(s)}\| _2$ . By Lemma K.9 and the definition of $\Gamma^{\epsilon_0}$ and $\Gamma^{\epsilon_1}$ , there exists $\phi \in \Gamma$ such that $\tilde{\pmb{u}}_t^{(s)}\in B^{\epsilon_1}(\phi),\forall 0\leq t\leq H$ . Therefore, we can expand $\Phi (\tilde{\pmb{u}}_{t + 1}^{(s)})$ as follows: + +$$ +\begin{array}{l} \Phi \left(\tilde {\boldsymbol {u}} _ {t + 1} ^ {(s)}\right) = \Phi \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \eta \nabla \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right)\right) \\ = \Phi (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) - \eta \partial \Phi (\tilde {\boldsymbol {u}} ^ {(s)}) \nabla \mathcal {L} (\boldsymbol {u} _ {t} ^ {(s)}) + \frac {\eta^ {2}}{2} \partial^ {2} \Phi (\hat {\boldsymbol {u}} _ {t} ^ {(s)}) [ \nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}), \nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) ] \\ = \Phi (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) + \frac {\eta^ {2}}{2} \partial^ {2} \Phi \left(c _ {t} ^ {(s)} \tilde {\boldsymbol {u}} _ {t} ^ {(s)} + (1 - c _ {t} ^ {(s)}) \tilde {\boldsymbol {u}} _ {t + 1} ^ {(s)}\right) [ \nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}), \nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) ], \\ \end{array} +$$ + +where $c_t^{(s)} \in (0,1)$ . Then we have + +$$ +\begin{array}{l} \| \Phi (\tilde {\boldsymbol {u}} _ {H} ^ {(s)}) - \Phi (\tilde {\boldsymbol {u}} _ {0} ^ {(s)}) \| _ {2} \leq \frac {\eta^ {2}}{2} \sum_ {t = 0} ^ {H - 1} \| \partial^ {2} \Phi \left(\left(c _ {t} ^ {(s)} \tilde {\boldsymbol {u}} _ {t} ^ {(s)} + (1 - c _ {t} ^ {(s)}) \tilde {\boldsymbol {u}} _ {t + 1} ^ {(s)}\right)\right) [ \nabla \mathcal {L} (\tilde {\boldsymbol {u}} ^ {(s)}), \nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) ] \| _ {2} \\ \leq \frac {\eta^ {2}}{2} \nu_ {2} \sum_ {t = 0} ^ {H - 1} \| \nabla \mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) \| _ {2} ^ {2}. \\ \end{array} +$$ + +By Lemma K.6, $\frac{\eta}{2}\| \nabla \mathcal{L}(\tilde{\boldsymbol{u}}_t^{(s)})\| _2^2\leq \mathcal{L}(\tilde{\boldsymbol{u}}_t^{(s)}) - \mathcal{L}(\tilde{\boldsymbol{u}}_{t + 1}^{(s)})$ . Therefore, + +$$ +\begin{array}{l} \left\| \Phi \left(\tilde {\boldsymbol {u}} _ {H} ^ {(s)}\right) - \Phi \left(\tilde {\boldsymbol {u}} _ {0} ^ {(s)}\right) \right\| _ {2} \leq \eta \nu_ {2} \left(\mathcal {L} \left(\tilde {\boldsymbol {u}} _ {0} ^ {(s)}\right) - \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {H} ^ {(s)}\right)\right) \\ \leq \eta \nu_ {2} \left[ \tilde {\Psi} \left(\tilde {\boldsymbol {\theta}} ^ {(s)}\right) \right] ^ {2} \\ \leq \nu_ {2} \eta \left[ 2 \exp (- \alpha s \mu) \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(0)}) + \frac {\tilde {C} _ {5} ^ {2} \eta}{(1 - \exp (- \alpha \mu / 2)) ^ {2}} \log \frac {s _ {1}}{\eta \delta} \right], \tag {50} \\ \end{array} +$$ + +where the last inequality uses Cauchy-Schwartz inequality and Lemma K.17. Summing up (50), we obtain + +$$ +\begin{array}{l} \sum_ {s = 0} ^ {s _ {1} - 1} \| \Phi (\tilde {\boldsymbol {u}} _ {H} ^ {(s)}) - \Phi (\tilde {\boldsymbol {u}} _ {0} ^ {(s)}) \| _ {2} \leq \nu_ {2} \eta \left[ 2 \tilde {\Psi} (\tilde {\boldsymbol {\theta}} ^ {(0)}) \sum_ {s = 0} ^ {s _ {1} - 1} \exp (- \alpha \mu s) + \frac {s _ {1} \tilde {C} _ {5} ^ {2} \eta}{(1 - \exp (- \alpha \mu / 2)) ^ {2}} \log \frac {s _ {1}}{\eta \delta} \right] \\ \leq \tilde {C} _ {7} \eta \log \frac {1}{\eta} \log \frac {1}{\eta \delta}, \tag {51} \\ \end{array} +$$ + +where $\tilde{C}_7$ is a constant. Substituting (49) and (51) into (48), for sufficiently small $\eta$ , we have + +$$ +\begin{array}{l} \left\| \phi^ {(s _ {1})} - \phi^ {(0)} \right\| _ {2} \leq \nu_ {1} \tilde {C} _ {3} s _ {1} \sqrt {\eta \log \frac {s _ {1}}{\eta \delta}} + \tilde {C} _ {7} \eta \log \frac {1}{\eta} \log \frac {1}{\eta \delta} \\ \leq \tilde {C} _ {8} \log \frac {1}{\eta} \sqrt {\eta \log \frac {1}{\eta \delta}}, \\ \end{array} +$$ + +where $\tilde{C}_8$ is a constant. Finally, according to Lemma K.12, (47) holds with probability at least $1 - \delta$ . + +# K.6 PHASE 2:ITERATES STAYING CLOSE TO MANIFOLD + +In this subsection, we show that $\| \pmb{x}_{k,t}^{(s)}\| _2 = \tilde{\mathcal{O}} (\sqrt{\eta})$ and $\| \bar{\pmb{\theta}}^{(s + r)} - \bar{\pmb{\theta}}^{(s)}\| _2 = \tilde{\mathcal{O}} (\eta^{0.5 - 0.5\beta}),\forall 0\leq r\leq R_{\mathrm{grp}}$ with high probability. + +# K.6.1 ADDITIONAL NOTATIONS + +Before presenting the lemmas, we define the following martingale $\{\pmb{m}_{k,t}^{(s)}\}_{t = 0}^{H}$ that will be useful in the proof: + +$$ +\boldsymbol {m} _ {k, t} ^ {(s)} := \sum_ {\tau = 0} ^ {t - 1} \boldsymbol {z} _ {k, \tau} ^ {(s)}, \quad \boldsymbol {m} _ {k, 0} = \mathbf {0}. +$$ + +We also define $\tilde{P}:\mathbb{R}^d\to \mathbb{R}^{d\times d}$ as an extension of $\partial \Phi$ + +$$ +\tilde {\boldsymbol {P}} (\boldsymbol {\theta}) := \left\{ \begin{array}{l l} \partial \Phi (\boldsymbol {\theta}), & \text {i f} \boldsymbol {\theta} \in \Gamma^ {\epsilon_ {2}}, \\ \mathbf {0}, & \text {o t h e r w i s e}. \end{array} \right. +$$ + +Finally, we define a martingale $\{Z_t^{(s)}: s \geq 0, 0 \leq t \leq H\}$ : + +$$ +\boldsymbol {Z} _ {t} ^ {(s)} := \frac {1}{K} \sum_ {k \in [ K ]} \sum_ {r = 0} ^ {s - 1} \sum_ {\tau = 0} ^ {H - 1} \tilde {\boldsymbol {P}} (\bar {\boldsymbol {\theta}} ^ {(r)}) \boldsymbol {z} _ {k, t} ^ {(r)} + \frac {1}{K} \sum_ {k \in [ K ]} \sum_ {\tau = 0} ^ {t - 1} \tilde {\boldsymbol {P}} (\bar {\boldsymbol {\theta}} ^ {(s)}) \boldsymbol {z} _ {k, t} ^ {(s)}, \quad \boldsymbol {Z} _ {0} ^ {(0)} = \mathbf {0}. +$$ + +# K.6.2 PROOF FOR THE HIGH PROBABILITY BOUNDS + +A direct application of Azuma-Hoeffding's inequality yields the following lemma. + +Lemma K.19 (Concentration property of $m_{k,t}^{(s)}$ ). With probability at least $1 - \delta$ , the following holds: + +$$ +\| \boldsymbol {m} _ {k, t} ^ {(s)} \| _ {2} \leq \tilde {C} _ {9} \sqrt {\frac {1}{\eta} \log \frac {1}{\eta \delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], 0 \leq s < R _ {\mathrm {g r p}}, +$$ + +where $\tilde{C}_9$ is a constant. + +Proof. Notice that $\| \pmb{m}_{k,t + 1}^{(s)} - \pmb{m}_{k,t}^{(s)}\| _2\leq \sigma_{\max}$ . Then by Azuma-Hoeffdings inequality, + +$$ +\mathbb {P} \left(\| \boldsymbol {m} _ {k, t} ^ {(s)} \| _ {2} \geq \epsilon^ {\prime}\right) \leq 2 \exp \left(- \frac {\epsilon^ {\prime 2}}{2 t \sigma_ {\max } ^ {2}}\right). +$$ + +Taking union bound on $K$ clients, $H$ local steps and $R_{\mathrm{grp}}$ rounds, we obtain that the following inequality holds with probability at least $1 - \delta$ : + +$$ +\| \boldsymbol {m} _ {k, t} ^ {(s)} \| _ {2} \leq \sigma_ {\max } \sqrt {2 H \log \frac {2 K H R _ {\mathrm {g r p}}}{\delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], 0 \leq s < R _ {\mathrm {g r p}}. +$$ + +Substituting in $H = \frac{\alpha}{\eta}$ and $R_{\mathrm{grp}} = \left\lfloor \frac{1}{\alpha\eta^{\beta}}\right\rfloor$ yields the lemma. + +![](images/5605f695561744585b52e30a41f9e93710d68c24a2fb59f618e73162bc199eac.jpg) + +Again applying Azuma-Hoeffding's inequality, we have the following lemma about the concentration property of $Z_{t}^{(s)}$ . + +Lemma K.20 (Concentration property of $Z_{t}^{(s)}$ ). With probability at least $1 - \delta$ , the following inequality holds: + +$$ +\| \boldsymbol {Z} _ {H} ^ {(s)} \| _ {2} \leq \tilde {C} _ {1 2} \eta^ {- 0. 5 - 0. 5 \beta} \sqrt {\log \frac {1}{\eta \delta}}, \quad \forall 0 \leq s < R _ {\mathrm {g r p}}. +$$ + +Proof. Notice that $\| \mathbf{Z}_{t + 1}^{(s)} - \mathbf{Z}_t^{(s)}\| _2\leq \nu_2\sigma_{\max},\forall 0\leq t\leq H - 1$ and $\| \mathbf{Z}_0^{(s + 1)} - \mathbf{Z}_H^{(s)}\| _2\leq \nu_2\sigma_{\max}$ . By Azuma-Hoeffding's inequality, + +$$ +\mathbb {P} (\| \pmb {Z} _ {t} ^ {(s)} \| _ {2} \geq \epsilon^ {\prime}) \leq 2 \exp \left(- \frac {\epsilon^ {\prime 2}}{2 (s H + t) \nu_ {2} ^ {2} \sigma_ {\mathrm {m a x}} ^ {2}}\right). +$$ + +Taking union bound on $R_{\mathrm{grp}}$ rounds, we obtain that the following inequality holds with probability at least $1 - \delta$ : + +$$ +\| Z _ {H} ^ {(s)} \| _ {2} \leq \sigma_ {\max } \nu_ {2} \sqrt {2 H R _ {\mathrm {g r p}} \log \frac {2 R _ {\mathrm {g r p}}}{\delta}}, \quad \forall 0 \leq s < R _ {\mathrm {g r p}}. +$$ + +Substituting in $H = \frac{\alpha}{\eta}$ and $R_{\mathrm{gpr}} = \left\lfloor \frac{1}{\alpha\eta^{\beta}}\right\rfloor$ yields the lemma. + +![](images/623b23c9acf1b65ac7da7488d501bfbee0de06fd8b00de1bb3ce750aa4f27220.jpg) + +We proceed to present a direct corollary of Lemma K.17 which provides a bound for the potential function over $R_{\mathrm{grp}}$ rounds. + +Lemma K.21. Given $\| \bar{\theta}^{(0)} - \phi^{(0)}\| _2\leq C_0\sqrt{\eta\log\frac{1}{\eta}}$ where $C_0$ is a constant, then for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , + +$$ +\bar {\boldsymbol {\theta}} ^ {(s)} \in \Gamma^ {\epsilon_ {0}}, \quad \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) \leq C _ {1} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall 0 \leq s < R _ {\mathrm {g r p}}, \tag {52} +$$ + +and + +$$ +\bar {\boldsymbol {\theta}} _ {k, t} ^ {(s)} \in \Gamma^ {\epsilon_ {2}}, \quad \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} _ {k, t} ^ {(s)}\right) \leq C _ {1} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall 0 \leq s < R _ {\mathrm {g r p}}, 0 \leq t \leq H, k \in [ K ], \tag {53} +$$ + +where $C_1$ is a constant that can depend on $C_0$ . + +Furthermore, + +$$ +\tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(R _ {\mathrm {g r p}})}) \leq \tilde {C} _ {1 0} \sqrt {\eta \log \frac {1}{\eta \delta}}, +$$ + +where $\tilde{C}_9$ is a constant independent of $C_0$ . + +Proof. By $\rho_{2}$ -smoothness of $\mathcal{L}$ , $\tilde{\Psi}(\bar{\pmb{\theta}}^{(0)}) \leq C_0 \sqrt{\frac{\eta \rho_2}{2} \log \frac{1}{\eta}}$ . Substituting $R_{\mathrm{grp}} = \left\lfloor \frac{1}{\alpha \eta^{\beta}} \right\rfloor$ and $\tilde{\Psi}(\bar{\pmb{\theta}}^{(0)}) \leq C_0 \sqrt{\frac{\eta \rho_2}{2} \log \frac{1}{\eta}}$ into Lemma K.17, for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , (52) and (53) where $C_1$ is a constant that can depend on $C_0$ . + +Furthermore, for round $\bar{\theta}^{(R_{\mathrm{grp}})}$ + +$$ +\tilde {\Psi} (\bar {\pmb {\theta}} ^ {(R _ {\mathrm {g r p}})}) \leq \exp (- \mathcal {O} (\eta^ {- \beta})) + \frac {1}{1 - \exp (- \alpha \mu / 2)} \tilde {C} _ {5} \sqrt {\eta \log \frac {R _ {\mathrm {g r p}}}{\eta \delta}} \leq \tilde {C} _ {1 0} \sqrt {\eta \log \frac {1}{\eta \delta}}, +$$ + +where $\tilde{C}_9$ is a constant independent of $C_0$ . + +![](images/3fbad9ac8622b8a91201b5a489176323ab972ffbf93a6adb7ee011279b012a48.jpg) + +Lemma K.22. Given $\| \bar{\theta}^{(0)} - \phi^{(0)} \|_2 \leq C_0 \sqrt{\eta \log \frac{1}{\eta}}$ where $C_0$ is a constant, then for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , for all $0 \leq s_0 < R_{\mathrm{grp}}, 0 \leq t \leq H, k \in [K]$ , + +$$ +\begin{array}{l} \| \boldsymbol {x} _ {k, t} ^ {(s)} \| _ {2} \leq C _ {2} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \| \bar {\boldsymbol {x}} _ {H} ^ {(s)} \| _ {2} \leq C _ {2} \sqrt {\eta \log \frac {1}{\eta \delta}}, \\ \| \bar {\boldsymbol {\theta}} _ {k, t} ^ {(s)} - \bar {\boldsymbol {\theta}} ^ {(s)} \| _ {2} \leq C _ {2} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \bar {\boldsymbol {\theta}} ^ {(s)} \| _ {2} \leq C _ {2} \sqrt {\eta \log \frac {1}{\eta \delta}}. \\ \end{array} +$$ + +where $C_2$ is a constant that can depend $C_0$ . Furthermore, + +$$ +\| \bar {\pmb {\theta}} ^ {(R _ {\mathrm {g r p}})} - \pmb {\phi} ^ {(R _ {\mathrm {g r p}})} \| _ {2} \leq \tilde {C} _ {1 1} \sqrt {\eta \log \frac {1}{\eta \delta}}, +$$ + +where $\tilde{C}_{11}$ is a constant independent of $C_0$ + +Proof. Decomposing $\boldsymbol{x}_{k,t}^{(s)}$ by triangle inequality, we have + +$$ +\| \boldsymbol {x} _ {k, t} ^ {(s)} \| _ {2} \leq \| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \bar {\boldsymbol {\theta}} ^ {(s)} \| _ {2} + \| \bar {\boldsymbol {\theta}} ^ {(s)} - \boldsymbol {\phi} ^ {(s)} \| _ {2}. +$$ + +We first bound $\| \bar{\theta}^{(s)} - \phi^{(s)} \|_2$ . By Lemma K.21, for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \frac{\delta}{2}$ , + +$$ +\tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) \leq C _ {1} \sqrt {\eta \log \frac {2}{\eta \delta}}, \forall 0 \leq s < R _ {\mathrm {g r p}}, \tag {54} +$$ + +$$ +\tilde {\Psi} \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}\right) \leq C _ {1} \sqrt {\eta \log \frac {2}{\eta \delta}}, \quad \forall 0 \leq s < R _ {\mathrm {g r p}}, 0 \leq t \leq H, \tag {55} +$$ + +and + +$$ +\tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {\left(R _ {\mathrm {g r p}}\right)}\right) \leq \tilde {C} _ {1 0} \sqrt {\eta \log \frac {2}{\eta \delta}}, \tag {56} +$$ + +where $C_2$ is a constant that may depend on $C_0$ and $\tilde{C}_{10}$ is a constant independent of $C_0$ . When (54) and (56) hold, by Lemma K.10, + +$$ +\left\| \bar {\boldsymbol {\theta}} ^ {(s)} - \phi^ {(s)} \right\| _ {2} \leq \sqrt {\frac {2}{\mu}} \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right) \leq C _ {1} \sqrt {\frac {2 \eta}{\mu} \log \frac {2}{\eta \delta}}, \tag {57} +$$ + +$$ +\left\| \bar {\boldsymbol {\theta}} ^ {\left(R _ {\mathrm {g r p}}\right)} - \phi^ {\left(R _ {\mathrm {g r p}}\right)} \right\| _ {2} \leq \sqrt {\frac {2}{\mu}} \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {\left(R _ {\mathrm {g r p}}\right)}\right) \leq \tilde {C} _ {1 0} \sqrt {\frac {2 \eta}{\mu} \log \frac {2}{\eta \delta}}. \tag {58} +$$ + +Then we bound $\| \pmb{\theta}_{k,t}^{(s)} - \bar{\pmb{\theta}}^{(s)}\| _2$ . By the update rule, we have + +$$ +\boldsymbol {\theta} _ {k, t} ^ {(s)} = \bar {\boldsymbol {\theta}} ^ {(s)} - \eta \sum_ {\tau = 0} ^ {t - 1} \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, \tau} ^ {(s)}) - \eta \sum_ {\tau = 0} ^ {t - 1} \boldsymbol {z} _ {k, \tau} ^ {(s)} = \bar {\boldsymbol {\theta}} ^ {(s)} - \eta \sum_ {\tau = 0} ^ {t - 1} \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, \tau} ^ {(s)}) - \eta \boldsymbol {m} _ {k, t} ^ {(s)}. +$$ + +Still by triangle inequality, we have + +$$ +\| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \bar {\boldsymbol {\theta}} ^ {(s)} \| _ {2} \leq \eta \sum_ {\tau = 0} ^ {t - 1} \| \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, \tau} ^ {(s)}) \| _ {2} + \eta \| \boldsymbol {m} _ {k, t} ^ {(s)} \| _ {2}. +$$ + +Due to $\rho_{2}$ -smoothness of $\mathcal{L}$ , when (55) holds, + +$$ +\left\| \nabla \mathcal {L} \left(\boldsymbol {\theta} _ {k, \tau} ^ {(s)}\right) \right\| _ {2} \leq \sqrt {2 \rho_ {2}} \tilde {\Psi} \left(\boldsymbol {\theta} _ {k, \tau} ^ {(s)}\right) \leq C _ {1} \sqrt {2 \rho_ {2} \eta \log \frac {2}{\eta \delta}}. \tag {59} +$$ + +By Lemma K.19, with probability at least $1 - \frac{\delta}{2}$ , + +$$ +\left\| \boldsymbol {m} _ {k, t} ^ {(s)} \right\| _ {2} \leq \tilde {C} _ {9} \sqrt {\frac {1}{\eta} \log \frac {2}{\eta \delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], 0 \leq s < R _ {\mathrm {g r p}}. \tag {60} +$$ + +Combining (59) and (60), when (55) and (56) hold simultaneously, there exists a constant $C_3$ which can depend on $C_0$ such that + +$$ +\left\| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \bar {\boldsymbol {\theta}} ^ {(s)} \right\| _ {2} \leq C _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}, \quad \forall k \in [ K ], 0 \leq t \leq H. \tag {61} +$$ + +By triangle inequality, + +$$ +\| \bar {\pmb {\theta}} ^ {(s + 1)} - \bar {\pmb {\theta}} ^ {(s)} \| _ {2} \leq C _ {3} \sqrt {\eta \log \frac {1}{\eta \delta}}. +$$ + +Combining (57), (58) and (61), we complete the proof. + +![](images/81f1985b57526f1bc28ab22f11b85687c90ead9c450b4c1809bd0f9183a00e5a.jpg) + +Then we provide high probability bounds for the movement of $\phi^{(s)}$ within $R_{\mathrm{grp}}$ rounds. + +Lemma K.23. Given $\| \bar{\theta}^{(0)} - \phi^{(0)}\|_2 \leq C_0\sqrt{\eta\log\frac{1}{\eta}}$ where $C_0$ is a constant, then for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , + +$$ +\| \phi^ {(s)} - \phi^ {(0)} \| _ {2} \leq C _ {4} \eta^ {0. 5 - 0. 5 \beta} \sqrt {\log \frac {1}{\eta \delta}}, \quad \forall 1 \leq s \leq R _ {\mathrm {g r p}}. +$$ + +where $C_4$ is a constant that can depend on $C_0$ . + +Proof. By the update rule of Local SGD, + +$$ +\pmb {\theta} _ {k, H} ^ {(s)} = \bar {\pmb {\theta}} ^ {(s)} - \eta \sum_ {t = 0} ^ {H - 1} \nabla \mathcal {L} (\pmb {\theta} _ {k, t} ^ {(s)}) - \eta \sum_ {t = 0} ^ {H - 1} \pmb {z} _ {k, t} ^ {(s)} +$$ + +Averaging among $K$ clients gives + +$$ +\bar {\boldsymbol {\theta}} ^ {(s + 1)} = \bar {\boldsymbol {\theta}} ^ {(s)} - \frac {\eta}{K} \sum_ {t = 0} ^ {H - 1} \sum_ {k \in [ K ]} \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) - \frac {\eta}{K} \sum_ {t = 0} ^ {H - 1} \sum_ {k \in [ K ]} \boldsymbol {z} _ {k, t} ^ {(s)}. +$$ + +By Lemma K.22, for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , the following holds with probability at least $1 - \delta / 3$ , + +$$ +\left\| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \bar {\boldsymbol {\theta}} ^ {(s)} \right\| _ {2} \leq C _ {2} \sqrt {\eta \log \frac {3}{\eta \delta}}, \quad \boldsymbol {\theta} _ {k, t} ^ {(s)} \in B ^ {\epsilon_ {0}} \left(\phi^ {(s)}\right), \forall 0 \leq s < R _ {\mathrm {g r p}}, 0 \leq t \leq H, k \in [ K ], \tag {62} +$$ + +$$ +\left\| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \bar {\boldsymbol {\theta}} ^ {(s)} \right\| _ {2} \leq C _ {2} \sqrt {\eta \log \frac {3}{\eta \delta}}, \quad \bar {\boldsymbol {\theta}} ^ {(s)}, \bar {\boldsymbol {\theta}} ^ {(s + 1)} \in B ^ {\epsilon_ {0}} \left(\phi^ {(s)}\right), \quad \forall 0 \leq s < R _ {\mathrm {g r p}}. \tag {63} +$$ + +When (62) and (63) hold, we can expand $\Phi (\bar{\theta}^{(s + 1)})$ as follows: + +$$ +\begin{array}{l} \phi^ {(s + 1)} = \phi^ {(s)} + \partial \Phi (\bar {\theta} ^ {(s)}) (\bar {\theta} ^ {(s + 1)} - \bar {\theta} ^ {(s)}) + \frac {1}{2} \partial^ {2} \Phi (\tilde {\theta} ^ {(s)}) [ \bar {\theta} ^ {(s + 1)} - \bar {\theta} ^ {(s)}, \bar {\theta} ^ {(s + 1)} - \bar {\theta} ^ {(s)} ] \\ = \phi^ {(s)} \underbrace {- \frac {\eta}{K} \sum_ {t = 0} ^ {H - 1} \sum_ {k \in [ K ]} \partial \Phi (\bar {\boldsymbol {\theta}} ^ {(s)}) \nabla \mathcal {L} (\boldsymbol {\theta} _ {k , t} ^ {(s)})} _ {\mathcal {T} _ {1} ^ {(s)}} \underbrace {- \frac {\eta}{K} \partial \Phi (\bar {\boldsymbol {\theta}} ^ {(s)}) \sum_ {t = 0} ^ {H - 1} \sum_ {k \in [ K ]} z _ {k , t} ^ {(s)}} _ {\mathcal {T} _ {2} ^ {(s)}} \\ + \underbrace {\frac {1}{2} \partial^ {2} \Phi (a ^ {(s)} \bar {\boldsymbol {\theta}} ^ {(s)} + (1 - a ^ {(s)}) \bar {\boldsymbol {\theta}} ^ {(s + 1)}) [ \boldsymbol {\theta} ^ {(s + 1)} - \boldsymbol {\theta} ^ {(s)} , \boldsymbol {\theta} ^ {(s + 1)} - \boldsymbol {\theta} ^ {(s)} ]} _ {\mathcal {T} _ {3} ^ {(s)}}, \\ \end{array} +$$ + +where $a^{(s)}\in (0,1)$ . Telescoping from round 0 to $s - 1$ , we have + +$$ +\| \phi^ {(s)} - \phi^ {(0)} \| _ {2} = \sum_ {r = 0} ^ {s - 1} \mathcal {T} _ {1} ^ {(r)} + \sum_ {r = 0} ^ {s - 1} \mathcal {T} _ {2} ^ {(r)} + \sum_ {r = 0} ^ {s - 1} \mathcal {T} _ {3} ^ {(r)}. +$$ + +From (63), we can bound $\| \mathcal{T}_3^{(s)}\| _2$ by $\| \mathcal{T}_3^{(s)}\| _2\leq \frac{1}{2}\nu_2C_2^2\eta \log \frac{3}{\eta\delta}$ . We proceed to bound $\| \mathcal{T}_1^{(s)}\| _2$ . When (62) and (63) hold, we have + +$$ +\begin{array}{l} \partial \Phi (\bar {\boldsymbol {\theta}} ^ {(s)}) \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) = \partial \Phi (\boldsymbol {\theta} _ {k, t} ^ {(s)}) \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) + \partial^ {2} \Phi (\hat {\boldsymbol {\theta}} _ {k, t} ^ {(s)}) [ \boldsymbol {\theta} _ {k, t} ^ {(s)} - \bar {\boldsymbol {\theta}} ^ {(s)}, \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) ] \\ = \partial^ {2} \Phi (b _ {k, t} ^ {(s)} \bar {\boldsymbol {\theta}} ^ {(s)} + (1 - b _ {k, t} ^ {(s)}) \hat {\boldsymbol {\theta}} _ {k, t} ^ {(s)}) [ \boldsymbol {\theta} _ {k, t} ^ {(s)} - \bar {\boldsymbol {\theta}} ^ {(s)}, \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) ], \\ \end{array} +$$ + +where $b_{k,t}^{(s)} \in (0,1)$ . By Lemma K.17, with probability at least $1 - \delta /3$ , the following holds: + +$$ +\left\| \nabla \mathcal {L} \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}\right) \right\| _ {2} \leq \sqrt {2 \rho_ {2}} \tilde {\Psi} \left(\boldsymbol {\theta} _ {k, t} ^ {(s)}\right) \leq C _ {1} \sqrt {2 \rho_ {2} \eta \log \frac {3}{\eta \delta}}, \forall k \in [ K ], 0 \leq t \leq H, 0 \leq s < R _ {\mathrm {g r p}}. \tag {64} +$$ + +When (62), (63) and (64) hold simultaneously, we have for all $0 \leq s < R_{\mathrm{grp}}$ + +$$ +\begin{array}{l} \| \mathcal {T} _ {1} ^ {(s)} \| _ {2} \leq \frac {\eta \nu_ {2}}{K} \sum_ {t = 0} ^ {H - 1} \| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \bar {\boldsymbol {\theta}} ^ {(s)} \| _ {2} \| \nabla \mathcal {L} (\boldsymbol {\theta} _ {k, t} ^ {(s)}) \| _ {2} \\ \leq \frac {\alpha \nu_ {2} \sqrt {2 \rho_ {2}} C _ {1} C _ {2}}{K} \eta \log \frac {3}{\eta \delta}. \\ \end{array} +$$ + +Finally, we bound $\| \sum_{r = 0}^{s - 1}\mathcal{T}_2^{(r)}\| _2$ . By Lemma K.20, the following inequality holds with probability at least $1 - \delta /3$ : + +$$ +\left\| \boldsymbol {Z} _ {H} ^ {(s)} \right\| _ {2} \leq \tilde {C} _ {1 2} \eta^ {- 0. 5 - 0. 5 \beta} \sqrt {\log \frac {3}{\eta \delta}}, \quad \forall 0 \leq s < R _ {\mathrm {g r p}}. \tag {65} +$$ + +When (62), (63) and (65) hold simultaneously, we have + +$$ +\| \sum_ {r = 0} ^ {s} \mathcal {T} _ {2} ^ {(r)} \| _ {2} = \eta \| \boldsymbol {Z} _ {H} ^ {(s)} \| _ {2} \leq \tilde {C} _ {1 2} \eta^ {0. 5 - 0. 5 \beta} \sqrt {\log \frac {3}{\eta \delta}}, \quad \forall 0 \leq s < R _ {\mathrm {g r p}} +$$ + +Combining the bounds for $\| \mathcal{T}_1^{(s)}\| _2, \| \sum_{r = 0}^s\mathcal{T}_2^{(r)}\| _2$ and $\| \mathcal{T}_3^{(s)}\| _2$ and taking union bound, we obtain that for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , the following inequality holds with probability at least $1 - \delta$ : + +$$ +\| \boldsymbol {\phi} ^ {(s)} - \boldsymbol {\phi} ^ {(0)} \| _ {2} \leq C _ {4} \eta^ {0. 5 - 0. 5 \beta} \sqrt {\log \frac {1}{\eta \delta}}, \quad \forall 1 \leq s \leq R _ {\mathrm {g r p}}. +$$ + +where $C_4$ is a constant that can depend on $C_0$ . + +# K.7 SUMMARY OF THE DYNAMICS AND PROOF OF THEOREMS J.1 AND J.2 + +Based on the results in Appendix K.5 and Appendix K.6, we summarize the dynamics of Local SGD iterates and then present the proof of Theorems J.1 and J.2 in this subsection. For convenience, we first introduce the definition of global step and $\delta$ -good step. + +Definition K.3 (Global step). Define $\mathcal{I}$ as the index set $\{(s,t):s\geq 0,0\leq t\leq H\}$ with lexicographical order, which means $(s_1,t_1)\preceq (s_2,t_2)$ if and only if $s_1 < s_2$ or $(s_{1} = s_{2}$ and $t_1\leq t_2)$ . A global step is indexed by $(s,t)$ corresponding to the $t$ -th local step at round $s$ . + +Definition K.4 ( $\delta$ -good step). In the training process of Local SGD, we say the global step $(s,t) \preceq (R_{\mathrm{tot}},0)$ is $\delta$ -good if the following inequalities hold: + +$$ +\| \tilde {\mathbf {Z}} _ {k, \tau} ^ {(r)} \| _ {2} \leq \exp (\alpha \rho_ {2}) \sigma_ {\max } \sqrt {2 H \log \frac {6 H R _ {\operatorname* {t o t}} K}{\delta}}, \quad \forall k \in [ K ], (r, \tau) \preceq (s, t), +$$ + +$$ +\| \boldsymbol {m} _ {k, \tau} ^ {(r)} \| _ {2} \leq \sigma_ {\max } \sqrt {2 H \log \frac {6 K H R _ {\mathrm {t o t}}}{\delta}}, \quad \forall k \in [ K ], (r, \tau) \preceq (s, t), +$$ + +$$ +\| \boldsymbol {Z} _ {H} ^ {(r)} \| _ {2} \leq \sigma_ {\max } \nu_ {2} \sqrt {2 H R _ {\mathrm {g r p}} \log \frac {2 R _ {\mathrm {t o t}}}{\delta}}, \quad \forall 0 \leq r < s. +$$ + +Applying the concentration properties of $\tilde{\pmb{Z}}_{k,\tau}^{(r)},\pmb{m}_{k,\tau}^{(r)}$ and $\pmb{Z}_H^{(r)}$ (Lemmas K.20, K.19 and K.12) yields the following theorem. + +Theorem K.1. For $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , all global steps $(s,t) \preceq (R_{\mathrm{tot}},0)$ are $\delta$ -good. + +In the remainder of this subsection, we use $\mathcal{O}(\cdot)$ notation to hide constants independent of $\delta$ and $\eta$ . + +Below we present a summary of the dynamics of Local SGD when $\bar{\theta}^{(0)}$ is initialized such that $\Phi (\bar{\theta}^{(0)})\in \Gamma$ and all global steps are $\delta$ -good. Phase 1 lasts for $s_0 + s_1 = \mathcal{O}(\log \frac{1}{\eta})$ rounds. At the end of phase 1, the iterate reaches within $\mathcal{O}(\sqrt{\eta\log\frac{1}{\eta\delta}})$ from $\Gamma$ , i.e., $\| \bar{\pmb{\theta}}^{(s_0 + s_1)} - \pmb {\phi}^{(s_0 + s_1)}\| _2 = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta\delta}})$ . The change of the projection on manifold over $s_0 + s_1$ rounds, $\| \phi^{(s_1 + s_0)} - \phi^{(0)}\| _2$ is bounded by $\mathcal{O}(\log \frac{1}{\eta}\sqrt{\eta\log\frac{1}{\eta\delta}})$ . + +After $s_0 + s_1$ rounds, the dynamic enters phase 2 when the iterates stay close to $\Gamma$ with $\bar{\theta}^{(s)} \in \Gamma^{\epsilon_2}, \forall s_0 + s_1 \leq s \leq R_{\mathrm{tot}}$ and $\pmb{\theta}_{k,t}^{(s)} \in \Gamma^{\epsilon_2}, \forall k \in [K], (s_0 + s_1, 0) \preceq (s,t) \preceq (R_{\mathrm{tot}}, 0)$ . Furthermore, $\| \pmb{x}_{k,t}^{(s)} \|_2$ and $\| \bar{\pmb{x}}_H^{(s)} \|_2$ satisfy the following equations: + +$$ +\| \boldsymbol {x} _ {k, t} ^ {(s)} \| _ {2} = \mathcal {O} (\sqrt {\eta \log \frac {1}{\eta \delta}}), \quad \forall k \in [ K ], 0 \leq t \leq H, s _ {0} + s _ {1} \leq s < R _ {\mathrm {t o t}}, +$$ + +$$ +\| \tilde {\boldsymbol {x}} _ {H} ^ {(s)} \| _ {2} = \mathcal {O} (\sqrt {\eta \log \frac {1}{\eta \delta}}), \quad \forall s _ {0} + s _ {1} \leq s < R _ {\mathrm {t o t}}. +$$ + +Moreover, for $s_0 + s_1 \leq s \leq R_{\mathrm{tot}} - R_{\mathrm{grp}}$ , the change of the manifold projection within $R_{\mathrm{grp}}$ rounds can be bounded as follows: + +$$ +\| \phi^ {(s + r)} - \phi^ {(s)} \| _ {2} = \mathcal {O} (\eta^ {0. 5 - 0. 5 \beta} \sqrt {\log \frac {1}{\eta \delta}}), \quad \forall 1 \leq r \leq R _ {\mathrm {g r p}}. +$$ + +After combing through the dynamics of Local SGD iterates during the approaching and drift phase, we are ready to present the proof of Theorems J.1 and J.2, which are direct consequences of the lemmas in Appendix K.5 and K.6. + +Proof of Theorem J.1. By Lemmas K.15, K.22 and Corollary K.1, for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , when all global steps are $\delta$ -good, $\bar{\pmb{\theta}}^{(s)} \in \Gamma^{\epsilon_2}, \forall s_0 + s_1 \leq s \leq R_{\mathrm{tot}}$ and $\pmb{\theta}_{k,t}^{(s)} \in \Gamma^{\epsilon_2}, \forall k \in [K], (s_0 + s_1, 0) \preceq (s,t) \preceq (R_{\mathrm{tot}}, 0)$ and $\| \pmb{x}_{k,t}^{(s)} \|_2, \| \bar{\pmb{x}}_H^{(s)} \|_2$ satisfy the following equations: + +$$ +\| \boldsymbol{x}_{k,t}^{(s)}\|_{2} = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta\delta}}),\quad \forall k\in [K],0\leq t\leq H,s_{0} + s_{1}\leq s < R_{\text{tot}}, +$$ + +$$ +\| \bar {\boldsymbol {x}} _ {H} ^ {(s)} \| _ {2} = \mathcal {O} (\sqrt {\eta \log \frac {1}{\eta \delta}}), \quad \forall s _ {0} + s _ {1} \leq s < R _ {\mathrm {t o t}}. +$$ + +Hence $\| \tilde{\pmb{x}}_0^{(R_{\mathrm{tot}})}\| _2 = \mathcal{O}(\tilde{\Psi} (\bar{\pmb{\theta}}^{(R_{\mathrm{tot}})})) = \mathcal{O}(\| \tilde{\pmb{x}}_H^{(R_{\mathrm{tot}} - 1)}\| _2) = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta\delta}})$ by smoothness of $\mathcal{L}$ and Lemma K.10. According to Theorem K.1, with probability at least $1 - \delta$ , all global steps are $\delta$ -good, thus completing the proof. + +Proof of Theorem J.2. By Lemma K.23, for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , when all global steps are $\delta$ -good, then $\forall s_0 + s_1 \leq s \leq R_{\mathrm{tot}} - R_{\mathrm{grp}}$ , + +$$ +\| \phi^ {(s + r)} - \phi^ {(s)} \| _ {2} = \tilde {\mathcal {O}} (\eta^ {0. 5 - 0. 5 \beta}), \quad \forall 0 \leq r \leq R _ {\mathrm {g r p}}. +$$ + +Also, by Lemma K.18, when all global steps are $\delta$ -good, the change of projection on manifold over $s_0 + s_1$ rounds (i.e., Phase 1), $\| \phi^{(s_0 + s_1)} - \phi^{(0)} \|_2$ is bounded by $\tilde{\mathcal{O}}(\sqrt{\eta})$ . According to Theorem K.1, with probability at least $1 - \delta$ , all global steps are $\delta$ -good, thus completing the proof. + +# K.8 PROOF OF THEOREM 3.3 + +In this subsection, we explicitly derive the dependency of the approximation error on $\alpha$ . The proofs are quite similar to those in Appendix K.5 and hence we only state the key proof idea for brevity. With the same method as the proofs in Appendix K.5.2, we can show that with high probability, $\| \bar{\theta}^{(s)} - \phi^{(s)}\|_2 \leq \frac{1}{2}\sqrt{\frac{\mu}{\rho_2}}$ after $s_0' = \mathcal{O}(1)$ rounds. Below we focus on the dynamics of Local SGD thereafter. We first remind the readers of the definition of $\{\tilde{Z}_{k,t}^s\}$ : + +$$ +\tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} := \sum_ {\tau = 0} ^ {t - 1} \left(\prod_ {l = \tau + 1} ^ {t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {l} ^ {(s)}\right)\right)\right) \boldsymbol {z} _ {k, \tau} ^ {(s)}, \qquad \tilde {\boldsymbol {Z}} _ {k, 0} ^ {(s)} = \boldsymbol {0}. +$$ + +We have the following lemma that controls the norm of the matrix product $\prod_{l = \tau +1}^{t - 1}(\boldsymbol {I} - \eta \nabla^2\mathcal{L}(\tilde{\boldsymbol{u}}_l^{(s)}))$ + +Lemma K.24. Given $\bar{\theta}^{(s)}\in \Gamma^{\epsilon_0}$ , then there exists a positive constant $C_3^\prime$ independent of $\alpha$ such that for all $0\leq \tau < t\leq H$ + +$$ +\left\| \prod_ {l = \tau + 1} ^ {t - 1} (\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} (\tilde {\boldsymbol {u}} _ {l} ^ {(s)})) \right\| _ {2} \leq C _ {3} ^ {\prime}. +$$ + +Proof. Since $\bar{\theta}^{(s)}\in \Gamma^{\epsilon_0}$ , then $\tilde{\pmb{u}}_t^{(s)}\in \Gamma^{\epsilon_1}$ for all $0\leq t\leq H$ . We first bound the minimum eigenvalue of $\nabla^2\mathcal{L}(\tilde{\pmb{u}}_t^{(s)})$ . Due to the PL condition, by Lemma K.6, for $\eta \leq \frac{1}{\rho_2}$ + +$$ +\mathcal {L} (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) - \mathcal {L} ^ {*} \leq (1 - \mu \eta) ^ {t} \left(\mathcal {L} (\bar {\boldsymbol {\theta}} ^ {(s)}) - \mathcal {L} ^ {*}\right) \leq \exp (- \mu t \eta) (\mathcal {L} (\bar {\boldsymbol {\theta}} ^ {(s)}) - \mathcal {L} ^ {*}), \quad \forall 0 \leq t \leq H. +$$ + +Therefore, + +$$ +\tilde {\Psi} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right) \leq \exp (- \mu t \eta / 2) \tilde {\Psi} \left(\bar {\boldsymbol {\theta}} ^ {(s)}\right). +$$ + +Let $C_1^\prime = \rho_3\sqrt{\frac{\rho_2}{\mu}}$ . By Weyl's inequality, + +$$ +\begin{array}{l} \left| \lambda_ {\min } \left(\nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right)\right) \right| = \left| \lambda_ {\min } \left(\nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right)\right) - \lambda_ {\min } \left(\nabla^ {2} \mathcal {L} \left(\Phi \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right)\right) \right. \right| \\ \leq \rho_ {3} \| \nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right) - \nabla^ {2} \mathcal {L} \left(\Phi \left(\tilde {\boldsymbol {u}} _ {t} ^ {(s)}\right)\right) \| _ {2} \\ \leq \rho_ {3} \| \tilde {\boldsymbol {u}} _ {t} ^ {(s)} - \Phi (\tilde {\boldsymbol {u}} _ {t} ^ {(s)}) \| _ {2} \\ \leq \rho_ {3} \sqrt {\frac {2}{\mu}} \exp (- \mu t \eta / 2) \tilde {\Psi} (\bar {\boldsymbol {\theta}} ^ {(s)}) \\ \leq C _ {1} ^ {\prime} \exp (- \mu t \eta / 2) \epsilon_ {0}, \\ \end{array} +$$ + +where the last two inequalities use Lemmas K.10 and K.7 respectively. Therefore, for all $0 \leq t \leq H$ and $0 \leq \tau \leq t - 1$ , + +$$ +\begin{array}{l} \| \prod_ {l = \tau + 1} ^ {t - 1} \left(\boldsymbol {I} - \eta \nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {l} ^ {(s)}\right)\right) \| _ {2} \leq \prod_ {l = \tau + 1} ^ {t - 1} \left(1 + \eta \left| \lambda_ {\min } \nabla^ {2} \mathcal {L} \left(\tilde {\boldsymbol {u}} _ {l} ^ {(s)}\right) \right|\right) \\ \leq \prod_ {l = 0} ^ {\infty} (1 + \eta | \lambda_ {\min } \nabla^ {2} \mathcal {L} (\tilde {\boldsymbol {u}} _ {l} ^ {(s)}) |) \\ \leq \exp \left(\eta \epsilon_ {0} C _ {1} ^ {\prime} \sum_ {l = 0} ^ {\infty} \exp (- \mu l \eta / 2)\right). \tag {66} \\ \end{array} +$$ + +For sufficiently small $\eta$ , there exists a constant $C_2'$ such that + +$$ +\sum_ {l = 0} ^ {\infty} \exp (- \mu l \eta / 2)) = \frac {1}{1 - \exp (- \mu \eta / 2)} \leq \frac {C _ {2} ^ {\prime}}{\eta}. \tag {67} +$$ + +Substituting (67) into (66), we obtain the lemma. + +![](images/339c2726326801c8e83e538fd68f4467abc476dbd7ac53fcc8d671f5ae3c1c12.jpg) + +Based on Lemma K.24, we obtain the following lemma about the concentration property of $\tilde{Z}_{k,t}^{(s)}$ , which can be derived in the same way as Lemma K.12. + +Lemma K.25. Given $\bar{\theta}^{(s)}\in \Gamma^{\epsilon_0}$ , then with probability at least $1 - \delta$ + +$$ +\| \tilde {\boldsymbol {Z}} _ {k, t} ^ {(s)} \| _ {2} \leq C _ {3} ^ {\prime} \sigma_ {\max } \sqrt {\frac {2 \alpha}{\eta} \log \frac {2 \alpha K}{\eta \delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], +$$ + +where $C_3^\prime$ is defined in Lemma K.24. + +The following lemma can be derived analogously to Lemma K.14 but the error bound is tighter in terms of its dependency on $\alpha$ . + +Lemma K.26. Given $\bar{\theta}^{(s)}\in \Gamma^{\epsilon_1}$ , then for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ , with probability at least $1 - \delta$ , there exists a constant $C_4^\prime$ independent of $\alpha$ such that + +$$ +\| \boldsymbol {\theta} _ {k, t} ^ {(s)} - \tilde {\boldsymbol {u}} _ {t} ^ {(s)} \| _ {2} \leq C _ {4} ^ {\prime} \sqrt {\alpha \eta \log \frac {\alpha}{\eta \delta}}, \quad \forall 0 \leq t \leq H, k \in [ K ], +$$ + +and + +$$ +\| \bar {\boldsymbol {\theta}} ^ {(s + 1)} - \tilde {\boldsymbol {u}} _ {H} ^ {(s)} \| _ {2} \leq C _ {4} ^ {\prime} \sqrt {\alpha \eta \log \frac {\alpha}{\eta \delta}}. +$$ + +Then, similar to Lemma K.17, we can show that for $\delta = \mathcal{O}(\mathrm{poly}(\eta))$ and simultaneously all $s\geq s_0^{\prime} + s_1^{\prime}$ where $s_1^\prime = \mathcal{O}(\frac{1}{\alpha}\log \frac{1}{\eta})$ , it holds with probability at least $1 - \delta$ that $\| \bar{\pmb{\theta}}^{(s)} - \phi^{(s)}\| _2 = \mathcal{O}(\sqrt{\alpha\eta\log\frac{\alpha}{\eta\delta}})$ . Note that to eliminate the dependency of the second term's denominator on $\alpha$ in (44), we can discuss the cases of $\alpha >c_{0}$ and $\alpha < c_{0}$ respectively where $c_{0}$ can be an arbitrary positive constant independent of $\alpha$ . For the case of $\alpha < c_{0}$ group $\lceil \frac{c_0}{\alpha}\rceil$ rounds together and repeat the arguments in this subsection to analyze the closeness between Local SGD and GD iterates as well as the evolution of loss. + +# K.9 COMPUTING THE MOMENTS FOR ONE "GIANT STEP" + +In this subsection, we compute the first and second moments for the change of manifold projection every $R_{\mathrm{grp}}$ rounds of Local SGD. Since the randomness in training might drive the iterate out of the working zone, making the dynamic intractable, we analyze a more well-behaved sequence $\{\hat{\pmb{\theta}}_{k,t}^{(s)}: (s,t) \preceq (R_{\mathrm{tot}},0), k \in [K]\}$ which is equal to $\{\pmb{\theta}_{k,t}^{(s)}\}$ with high probability. Specifically, $\hat{\pmb{\theta}}_{k,t}^{(s)}$ equal to $\pmb{\theta}_{k,t}^{(s)}$ if the global step $(s,t)$ is $\eta^{100}$ -good and is set as a point $\phi_{\mathrm{null}} \in \Gamma$ otherwise. The formal definition is as follows. + +![](images/ffc695507921c80a14b163e44d2b9a7edaf3e6efd525e42e284333a0590fabc9.jpg) +Figure 9: A plot of $\psi (x)$ + +Definition K.5 (Well-behaved sequence). Denote by $\mathcal{E}_t^{(s)}$ the event $\{\text{global step } (s, t) \text{ is } \eta^{100} \text{-good}\}$ . Define a well-behaved sequence $\hat{\pmb{\theta}}_{k,t}^{(s)} := \pmb{\theta}_{k,t}^{(s)}\mathbb{1}_{\mathcal{E}_t^{(s)}} + \phi_{\mathrm{null}}\mathbb{1}_{\bar{\mathcal{E}}_t^{(s)}}$ , which satisfies the following update rule: + +$$ +\begin{array}{l} \hat {\boldsymbol {\theta}} _ {k, t + 1} ^ {(s)} = \boldsymbol {\theta} _ {k, t + 1} ^ {(s)} \mathbb {1} _ {\mathcal {E} _ {t + 1} ^ {(s)}} + \phi_ {\text {n u l l}} \mathbb {1} _ {\bar {\mathcal {E}} _ {t + 1} ^ {(s)}} (68) \\ = \hat {\boldsymbol {\theta}} _ {k, t} ^ {(s)} - \eta \nabla \mathcal {L} (\hat {\boldsymbol {\theta}} _ {k, t} ^ {(s)}) - \eta \boldsymbol {z} _ {k, t} ^ {(s)} \underbrace {- \mathbb {1} _ {\bar {\mathcal {E}} _ {t + 1} ^ {(s)}} (\hat {\boldsymbol {\theta}} _ {k , t} ^ {(s)} - \eta \nabla \mathcal {L} (\hat {\boldsymbol {\theta}} _ {k , t} ^ {(s)}) - \eta \boldsymbol {z} _ {k , t} ^ {(s)}) + \mathbb {1} _ {\bar {\mathcal {E}} _ {t + 1} ^ {(s)}} \phi_ {\mathrm {n u l l}}} _ {:= \hat {\boldsymbol {e}} _ {k, t} ^ {(s)}}. (69) \\ \end{array} +$$ + +By Theorem K.1, with probability at least $1 - \eta^{100}$ , $\hat{\pmb{\theta}}_{k,t}^{(s)} = \pmb{\theta}_{k,t}^{(s)}, \forall k \in [K], (s,t) \preceq (R_{\mathrm{tot}},0)$ . Similar to $\{\pmb{\theta}_{k,t}^{(s)}\}$ , we define the following variables with respect to $\{\hat{\pmb{\theta}}_{k,t}^{(s)}\}$ : + +$$ +\hat {\boldsymbol {\theta}} _ {\mathrm {a v g}} ^ {(s + 1)} := \frac {1}{K} \sum_ {k \in [ K ]} \hat {\boldsymbol {\theta}} _ {k, H} ^ {(s)}, \quad \hat {\boldsymbol {\phi}} ^ {(s)} := \Phi (\hat {\boldsymbol {\theta}} _ {\mathrm {a v g}} ^ {(s)}), +$$ + +$$ +\hat {\pmb {x}} _ {k, t} ^ {(s)} := \hat {\pmb {\theta}} _ {k, t} ^ {(s)} - \hat {\pmb {\phi}} ^ {(s)}, \quad \hat {\pmb {x}} _ {\mathrm {a v g}, 0} ^ {(s)} := \hat {\pmb {\theta}} _ {\mathrm {a v g}} ^ {(s)} - \hat {\pmb {\phi}} ^ {(s)}, \quad \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} := \frac {1}{K} \sum_ {k \in [ K ]} \hat {\pmb {x}} _ {k, H} ^ {(s)}. +$$ + +Notice that $\hat{\pmb{x}}_{k,0}^{(s)} = \hat{\pmb{x}}_{\mathrm{avg},0}^{(s)}$ for all $k\in [K]$ . Finally, we introduce the following mapping $\Psi (\pmb {\theta}):$ $\Gamma \to \mathbb{R}^{d\times d}$ , which is closely related to $\widehat{\pmb{\Psi}}$ defined in Theorem 3.2. + +Definition K.6. For $\pmb{\theta} \in \Gamma$ , we define the mapping $\Psi(\pmb{\theta}) : \Gamma \to \mathbb{R}^{d \times d}$ : + +$$ +\Psi (\boldsymbol {\theta}) = \sum_ {i, j \in [ d ]} \psi \left(\eta H \left(\lambda_ {i} + \lambda_ {j}\right)\right) \left\langle \boldsymbol {\Sigma} (\boldsymbol {\theta}), \boldsymbol {v} _ {i} \boldsymbol {v} _ {j} ^ {\top} \right\rangle \boldsymbol {v} _ {i} \boldsymbol {v} _ {j} ^ {\top}, +$$ + +where $\lambda_{i},\pmb{v}_{i}$ are the $i$ -th eigenvalue and eigenvector of $\nabla^2\mathcal{L}(\pmb {\theta})$ and $\pmb {v}_i$ 's form an orthonormal basis of $\mathbb{R}^d$ . Additionally, $\psi (x)\coloneqq \frac{e^{-x} - 1 + x}{x}$ and $\psi (0) = 0$ ; see Figure 9 for a plot. + +Remark K.1. Intuitively, $\Psi(\pmb{\theta})$ rescales the entries of $\pmb{\Sigma}(\pmb{\theta})$ in the eigenbasis of $\nabla^2\mathcal{L}(\pmb{\theta})$ . When $\nabla^2\mathcal{L}(\pmb{\theta}) = \mathrm{diag}(\lambda_1,\dots ,\lambda_d)\in \mathbb{R}^{d\times d}$ , where $\lambda_{i} = 0$ for all $m < i\leq d$ , $\Psi (\pmb{\Sigma}_0)_{i,j} = \psi (\eta H(\lambda_i + \lambda_j))\Sigma_{0,i,j}$ . Note that $\Psi (\pmb{\theta})$ can also be written as + +$$ +\operatorname {v e c} (\boldsymbol {\Psi} (\boldsymbol {\theta})) = \psi (\eta H (\nabla^ {2} \mathcal {L} (\boldsymbol {\theta}) \oplus \nabla^ {2} \mathcal {L} (\boldsymbol {\theta}))) \operatorname {v e c} (\boldsymbol {\Sigma} (\boldsymbol {\theta})), +$$ + +where $\oplus$ denotes the Kronecker sum $A\oplus B = A\otimes I_d + I_d\otimes B$ , $\operatorname{vec}(\cdot)$ is the vectorization operator of a matrix and $\psi (\cdot)$ is interpreted as a matrix function. + +Now we are ready to present the result about the moments of $\hat{\phi}^{(s + R_{\mathrm{grp}})} - \hat{\phi}^{(s)}$ + +Theorem K.2. For $s_0 + s_1 \leq s \leq R_{\mathrm{tot}} - R_{\mathrm{grp}}$ and $0 < \beta < 0.5$ , the first and second moments of $\hat{\phi}^{(s + R_{\mathrm{grp}})} - \hat{\phi}^{(s)}$ are as follows: + +$$ +\begin{array}{l} \mathbb {E} \left[ \hat {\phi} ^ {(s + R _ {\mathrm {g r p}})} - \hat {\phi} ^ {(s)} \mid \hat {\phi} ^ {(s)}, \mathcal {E} _ {0} ^ {(s)} \right] = \frac {\eta^ {1 - \beta}}{2 B} \partial^ {2} \Phi \left(\hat {\phi} ^ {(s)}\right) \left[ \boldsymbol {\Sigma} \left(\hat {\phi} ^ {(s)}\right) + (K - 1) \Psi \left(\hat {\phi} ^ {(s)}\right) \right] \tag {70} \\ + \tilde {\mathcal {O}} (\eta^ {1. 5 - 2 \beta}) + \tilde {\mathcal {O}} (\eta), \\ \end{array} +$$ + +$$ +\mathbb {E} \left[ \left(\hat {\phi} ^ {(s + R _ {\mathrm {g r p}})} - \hat {\phi} ^ {(s)}\right) \left(\hat {\phi} ^ {(s + R _ {\mathrm {g r p}})} - \hat {\phi} ^ {(s)}\right) ^ {\top} \mid \hat {\phi} ^ {(s)}, \mathcal {E} _ {0} ^ {(s)} \right] = \frac {\eta^ {1 - \beta}}{B} \Sigma_ {\|} \left(\hat {\phi} ^ {(s)}\right) + \tilde {\mathcal {O}} \left(\eta^ {1. 5 - 2 \beta}\right) + \tilde {\mathcal {O}} (\eta), \tag {71} +$$ + +where $\tilde{\mathcal{O}} (\cdot)$ hides log terms and constants independent of $\eta$ + +Remark K.2. By Theorem K.1 and the definition of $\hat{\pmb{\theta}}_{k,t}^{(s)}$ , (70) and (71) still hold when we replace $\hat{\phi}^{(s)}$ with $\phi^{(s)}$ and replace $\hat{\phi}^{(s + R_{\mathrm{grp}})}$ with $\phi^{(s + R_{\mathrm{grp}})}$ . + +We shall have Theorem K.2 if we prove the following theorem, which directly gives Theorem K.2 with a simple shift of index. For brevity, denote by $\Delta \hat{\phi}^{(s)}\coloneqq \hat{\phi}^{(s)} - \hat{\phi}^{(0)}$ $\Sigma_0\coloneqq \Sigma (\hat{\phi}^{(0)})$ $\Sigma_{0,\parallel}\coloneqq \Sigma_{\parallel}(\hat{\phi}^{(0)})$ + +Theorem K.3. Given $\| \hat{\pmb{\theta}}_{\mathrm{avg}}^{(0)} - \hat{\phi}^{(0)}\|_2 = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta}})$ , for $0 < \beta < 0.5$ , the first and second moments of $\Delta \hat{\phi}^{(R_{\mathrm{grp}})}$ are as follows: + +$$ +\mathbb {E} [ \Delta \hat {\phi} ^ {(R _ {\mathrm {g r p}})} ] = \frac {\eta^ {1 - \beta}}{2 B} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \pmb {\Sigma} _ {0} + (K - 1) \pmb {\Psi} (\hat {\phi} ^ {(0)}) ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - 2 \beta}) + \tilde {\mathcal {O}} (\eta), +$$ + +$$ +\mathbb {E} [ \Delta \hat {\phi} ^ {(R _ {\mathrm {g r p}})} \Delta \hat {\phi} ^ {(R _ {\mathrm {g r p}}) ^ {\top}} ] = \frac {\eta^ {1 - \beta}}{B} \Sigma_ {0, \parallel} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 1. 5 \beta}) + \tilde {\mathcal {O}} (\eta). +$$ + +We will prove Theorem K.3 in the remainder of this subsection. For convenience, we introduce more notations that will be used throughout the proof. Let $\pmb{H}_0 \coloneqq \nabla^2\mathcal{L}(\hat{\phi}^{(0)})$ . By Assumption 3.2, $\mathrm{rank}(H_0) = m$ . WLOG, assume $H_0 = \mathrm{diag}(\lambda_1,\dots ,\lambda_d)\in \mathbb{R}^{d\times d}$ , where $\lambda_{i} = 0$ for all $m < i\leq d$ and $\lambda_{1}\geq \lambda_{2}\dots \geq \lambda_{m}$ . By Lemma K.2, $\partial \Phi (\hat{\phi}^{(0)})$ is the projection matrix onto the tangent space $T_{\hat{\phi}^{(0)}}(\Gamma)$ (i.e. the null space of $\nabla^2\mathcal{L}(\hat{\phi}^{(0)})$ ) and therefore, $\partial \Phi (\hat{\phi}^{(0)}) = \left[ \begin{array}{cc}0 & 0\\ 0 & I_{d - m} \end{array} \right]$ . Let $P_{\parallel}\coloneqq \partial \Phi (\hat{\phi}^{(0)})$ and $P_{\perp}\coloneqq I_d - P_{\parallel}$ . + +Let $\hat{\pmb{A}}_{\mathrm{avg}}^{(s)} := \mathbb{E}[\hat{\pmb{x}}_{\mathrm{avg},H}^{(s)}\hat{\pmb{x}}_{\mathrm{avg},H}^{(s)\top}], \hat{\pmb{q}}_t^{(s)} := \mathbb{E}[\hat{\pmb{x}}_{k,t}^{(s)}]$ and $\hat{\pmb{B}}_t^{(s)} := \mathbb{E}[\hat{\pmb{x}}_{k,t}^{(s)}\Delta \hat{\phi}^{(s)\top}]$ . The latter two notations are independent of $k$ since $\hat{\pmb{\theta}}_{1,t}^{(s)}, \dots, \hat{\pmb{\theta}}_{K,t}^{(s)}$ are identically distributed. The following lemma computes the first and second moments of the change of manifold projection every round. + +Lemma K.27. Given $\| \hat{\pmb{\theta}}_{\mathrm{avg}}^{(0)} - \hat{\pmb{\phi}}^{(0)}\| _2 = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta}})$ , for $0\leq s < R_{\mathrm{grp}}$ , the first and second moments of $\hat{\phi}^{(s + 1)} - \hat{\phi}^{(s)}$ are as follows: + +$$ +\mathbb {E} \left[ \hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)} \right] = \boldsymbol {P} _ {\parallel} \hat {\boldsymbol {q}} _ {H} ^ {(s)} + \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {B}} _ {H} ^ {(s)} ] + \frac {1}{2} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), \tag {72} +$$ + +$$ +\mathbb {E} \left[ \left(\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)}\right) \left(\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)}\right) ^ {\top} \right] = P _ {\|} \hat {A} _ {\text {a v g}} ^ {(s)} P _ {\|} + \tilde {\mathcal {O}} \left(\eta^ {1. 5 - 0. 5 \beta}\right). \tag {73} +$$ + +Proof. By Taylor expansion, we have + +$$ +\begin{array}{l} \hat {\phi} ^ {(s + 1)} = \Phi (\hat {\phi} ^ {(s)} + \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)}) \\ = \hat {\phi} ^ {(s)} + \partial \Phi (\hat {\phi} ^ {(s)}) \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s)} + \frac {1}{2} \partial^ {2} \Phi (\hat {\phi} ^ {(s)}) [ \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s)} \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s) \top} ] + \mathcal {O} (\| \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {3}) \\ = \hat {\phi} ^ {(s)} + \partial \Phi (\hat {\phi} ^ {(0)} + \Delta \hat {\phi} ^ {(s)}) \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s)} + \frac {1}{2} \partial^ {2} \Phi (\hat {\phi} ^ {(0)} + \Delta \hat {\phi} ^ {(s)}) [ \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s)} \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s) \top} ] \\ + \mathcal {O} (\| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {3}) \\ = \hat {\phi} ^ {(s)} + P _ {\parallel} \hat {x} _ {\mathrm {a v g}, H} ^ {(s)} + \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {x} _ {\mathrm {a v g}, H} ^ {(s)} \Delta \hat {\phi} ^ {(s) \top} ] + \frac {1}{2} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {x} _ {\mathrm {a v g}, H} ^ {(s)} \hat {x} _ {\mathrm {a v g}, H} ^ {(s) \top} ] \\ + \mathcal {O} (\| \Delta \hat {\phi} ^ {(s)} \| _ {2} ^ {2} \| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} + \| \Delta \hat {\phi} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {2} + \| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {3}). \\ \end{array} +$$ + +Rearrange the terms and we obtain: + +$$ +\begin{array}{l} \hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)} = \boldsymbol {P} _ {\parallel} \hat {\boldsymbol {x}} _ {\text {a v g}, H} ^ {(s)} + \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {x}} _ {\text {a v g}, H} ^ {(s)} \Delta \hat {\phi} ^ {(s) \top} ] + \frac {1}{2} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {x}} _ {\text {a v g}, H} ^ {(s)} \hat {\boldsymbol {x}} _ {\text {a v g}, H} ^ {(s) \top} ] \tag {74} \\ + \mathcal {O} \left(\| \Delta \hat {\phi} ^ {(s)} \| _ {2} ^ {2} \| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} + \| \Delta \hat {\phi} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {2} + \| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {3}\right). \\ \end{array} +$$ + +Moreover, + +$$ +(\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)}) (\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)}) ^ {\top} = P _ {\|} \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s) \top} P _ {\|} + \mathcal {O} (\| \Delta \hat {\phi} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {2}). \tag {75} +$$ + +Noticing that $\hat{\pmb{x}}_{k,H}^{(s)}\Delta \hat{\phi}^{(s)\top}$ are identically distributed for all $k\in [K]$ , we have $\mathbb{E}[\hat{\pmb{x}}_{\mathrm{avg},H}^{(s)}\Delta \hat{\phi}^{(s)\top}] = \frac{1}{K}\sum_{k\in [K]}\mathbb{E}[\hat{\pmb{x}}_{k,H}^{(s)}\Delta \hat{\phi}^{(s)\top}] = \hat{\pmb{B}}_H^{(s)}$ . Then taking expectation of both sides of (74) gives + +$$ +\begin{array}{l} \mathbb {E} [ \hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)} ] = \boldsymbol {P} _ {\parallel} \hat {\boldsymbol {q}} _ {H} ^ {(s)} + \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {B}} _ {H} ^ {(s)} ] + \frac {1}{2} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} ] \\ + \mathcal {O} (\mathbb {E} [ \| \Delta \hat {\phi} ^ {(s)} \| _ {2} ^ {2} \| \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ] + \mathbb {E} [ \| \Delta \hat {\phi} ^ {(s)} \| _ {2} \| \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {2} ] + \mathbb {E} [ \| \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {3} ]). \\ \end{array} +$$ + +Again taking expectation of both sides of (75) yields + +$$ +\mathbb {E} [ (\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)}) (\hat {\phi} ^ {(s + 1)} - \Delta \hat {\phi} ^ {(s) \top}) ] = P _ {\parallel} \hat {A} _ {\mathrm {a v g}} ^ {(s)} P _ {\parallel} + \mathcal {O} (\mathbb {E} [ \| \Delta \hat {\phi} ^ {(s)} \| _ {2} \| \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {2} ]). +$$ + +By Lemmas K.22 and K.23, the following holds simultaneously with probability at least $1 - \eta^{100}$ : + +$$ +\| \Delta \hat {\phi} ^ {(s)} \| _ {2} = \tilde {\mathcal {O}} (\eta^ {0. 5 - 0. 5 \beta}), \quad \| \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} = \tilde {\mathcal {O}} (\eta^ {0. 5}). +$$ + +Furthermore, since for all $k \in [K]$ and $(s,t) \preceq (R_{\mathrm{tot}},0)$ , $\hat{\pmb{\theta}}_{k,t}^{(s)}$ stays in $\Gamma^{\epsilon_2}$ which is a bounded set, $\| \Delta \hat{\phi}^{(s)}\| _2$ and $\| \hat{\pmb{x}}_{\mathrm{avg},H}^{(s)}\| _2$ are also bounded. Therefore, we have + +$$ +\mathbb {E} [ \| \Delta \hat {\phi} ^ {(s)} \| _ {2} ^ {2} \| \hat {\boldsymbol {x}} _ {\operatorname {a v g}, H} ^ {(s)} \| _ {2} ] = \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), \tag {76} +$$ + +$$ +\mathbb {E} [ \| \Delta \hat {\phi} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {\operatorname {a v g}, H} ^ {(s)} \| _ {2} ^ {2} ] = \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), \tag {77} +$$ + +$$ +\mathbb {E} [ \| \hat {\boldsymbol {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {3} ] = \tilde {\mathcal {O}} (\eta^ {1. 5}), \tag {78} +$$ + +which concludes the proof. + +We compute $\hat{A}_{\mathrm{avg}}^{(s)}, \hat{q}_t^{(s)}$ and $\hat{B}_t^{(s)}$ by solving a set of recursions, which is formulated in the following lemma. Additionally, define $\hat{A}_t^{(s)} \coloneqq \mathbb{E}[\hat{\pmb{x}}_{k,t}^{(s)}\hat{\pmb{x}}_{k,t}^{(s)\top}]$ and $\hat{M}_t^{(s)} \coloneqq \mathbb{E}[\hat{\pmb{x}}_{k,t}^{(s)}\hat{\pmb{x}}_{k,l}^{(s)}], (k \neq l)$ . + +Lemma K.28. Given $\| \hat{\pmb{\theta}}_{\mathrm{avg}}^{(0)} - \hat{\phi}^{(0)}\| _2 = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta}})$ , for $0\leq s < R_{\mathrm{grp}}$ and $0\leq t < H$ , we have the following recursions. + +$$ +\hat {\boldsymbol {q}} _ {t + 1} ^ {(s)} = \hat {\boldsymbol {q}} _ {t} ^ {(s)} - \eta \boldsymbol {H} _ {0} \hat {\boldsymbol {q}} _ {t} ^ {(s)} - \eta \nabla^ {3} \mathcal {L} \left(\phi^ {(0)}\right) \left[ \hat {\boldsymbol {B}} _ {t} ^ {(s)} \right] - \frac {\eta}{2} \nabla^ {3} \mathcal {L} \left(\phi^ {(0)}\right) \left[ \hat {\boldsymbol {A}} _ {t} ^ {(s)} \right] + \tilde {\mathcal {O}} \left(\eta^ {2. 5 - \beta}\right), \tag {79} +$$ + +$$ +\hat {\boldsymbol {A}} _ {t + 1} ^ {(s)} = \hat {\boldsymbol {A}} _ {t} ^ {(s)} - \eta \boldsymbol {H} _ {0} \hat {\boldsymbol {A}} _ {t} ^ {(s)} - \eta \hat {\boldsymbol {A}} _ {t} ^ {(s)} \boldsymbol {H} _ {0} + \frac {\eta^ {2}}{B _ {\mathrm {l o c}}} \boldsymbol {\Sigma} _ {0} + \tilde {\mathcal {O}} (\eta^ {2. 5 - 0. 5 \beta}), \tag {80} +$$ + +$$ +\hat {\boldsymbol {M}} _ {t + 1} ^ {(s)} = \hat {\boldsymbol {M}} _ {t} ^ {(s)} - \eta \boldsymbol {H} _ {0} \hat {\boldsymbol {M}} _ {t} ^ {(s)} - \eta \hat {\boldsymbol {M}} _ {t} ^ {(s)} \boldsymbol {H} _ {0} + \tilde {\mathcal {O}} \left(\eta^ {2. 5 - 0. 5 \beta}\right), \tag {81} +$$ + +$$ +\hat {\boldsymbol {B}} _ {t + 1} ^ {(s)} = \left(\boldsymbol {I} - \eta \boldsymbol {H} _ {0}\right) \hat {\boldsymbol {B}} _ {t} ^ {(s)} + \tilde {\mathcal {O}} \left(\eta^ {2. 5 - \beta}\right). \tag {82} +$$ + +Moreover, + +$$ +\hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} = \frac {1}{K} \hat {\boldsymbol {A}} _ {H} ^ {(s)} + (1 - \frac {1}{K}) \hat {\boldsymbol {M}} _ {H} ^ {(s)}, \tag {83} +$$ + +$$ +\hat {\boldsymbol {M}} _ {0} ^ {(s + 1)} = \hat {\boldsymbol {A}} _ {0} ^ {(s + 1)} = \boldsymbol {P} _ {\perp} \hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} \boldsymbol {P} _ {\perp} + \mathcal {O} \left(\eta^ {1. 5 - 0. 5 \beta}\right), \tag {84} +$$ + +$$ +\hat {\boldsymbol {q}} _ {0} ^ {(s + 1)} = \boldsymbol {P} _ {\perp} \hat {\boldsymbol {q}} _ {H} ^ {(s)} - \partial^ {2} \Phi (\phi^ {(0)}) [ \hat {\boldsymbol {B}} _ {H} ^ {(s)} ] - \frac {1}{2} \partial^ {2} \Phi (\phi^ {(0)}) [ \hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), \tag {85} +$$ + +$$ +\hat {\boldsymbol {B}} _ {0} ^ {(s + 1)} = \boldsymbol {P} _ {\perp} \hat {\boldsymbol {B}} _ {H} ^ {(s)} + \boldsymbol {P} _ {\perp} \hat {\boldsymbol {A}} _ {\text {a v g}} ^ {(s)} \boldsymbol {P} _ {\parallel} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \tag {86} +$$ + +Proof. We first derive the recursion for $\hat{\pmb{q}}_t^{(s)}$ . Recall the update rule for $\hat{\pmb{\theta}}_{k,t}^{(s)}$ : + +$$ +\hat {\boldsymbol {\theta}} _ {k, t + 1} ^ {(s)} = \hat {\boldsymbol {\theta}} _ {k, t} ^ {(s)} - \eta \nabla \mathcal {L} (\hat {\boldsymbol {\theta}} _ {k, t} ^ {(s)}) - \eta \boldsymbol {z} _ {k, t} ^ {(s)} + \hat {\boldsymbol {e}} _ {k, t} ^ {(s)}. +$$ + +Subtracting $\hat{\phi}^{(s)}$ from both sides gives + +$$ +\begin{array}{l} \hat {\boldsymbol {x}} _ {k, t + 1} ^ {(s)} = \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} - \eta \nabla \mathcal {L} (\hat {\boldsymbol {\theta}} _ {k, t} ^ {(s)}) - \eta \boldsymbol {z} _ {k, t} ^ {(s)} + \mathcal {O} (\| \hat {\boldsymbol {e}} _ {k, t} ^ {(s)} \| _ {2}) \\ = \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} - \eta \left(\nabla^ {2} \mathcal {L} (\hat {\boldsymbol {\phi}} ^ {(s)}) \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} + \frac {1}{2} \nabla^ {3} \mathcal {L} (\hat {\boldsymbol {\phi}} ^ {(s)}) [ \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \hat {\boldsymbol {x}} _ {k, t} ^ {(s) \top} ] + \mathcal {O} (\| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {3})\right) \\ - \eta z _ {k, t} ^ {(s)} + \mathcal {O} (\| \hat {\boldsymbol {e}} _ {k, t} ^ {(s)} \| _ {2}) \\ = \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} - \eta \left(\nabla^ {2} \mathcal {L} \left(\hat {\boldsymbol {\phi}} ^ {(0)}\right) + \nabla^ {3} \mathcal {L} \left(\hat {\boldsymbol {\phi}} ^ {(0)}\right) \Delta \hat {\boldsymbol {\phi}} ^ {(s)} + \mathcal {O} \left(\| \Delta \hat {\boldsymbol {\phi}} ^ {(s)} \| ^ {2}\right)\right) \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \\ - \frac {\eta}{2} \left(\nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) + \mathcal {O} \left(\| \Delta \hat {\phi} ^ {(s)} \| _ {2}\right)\right) \left[ \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \hat {\boldsymbol {x}} _ {k t} ^ {(s) \top} \right] - \eta \boldsymbol {z} _ {k, t} ^ {(s)} + \mathcal {O} \left(\eta \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {3} + \| \hat {\boldsymbol {e}} _ {k, t} ^ {(s)} \| _ {2}\right) \\ = \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} - \eta \boldsymbol {H} _ {0} \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} - \eta \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \Delta \hat {\phi} ^ {(s) \top} ] - \frac {\eta}{2} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \hat {\boldsymbol {x}} _ {k, t} ^ {(s) \top} ] - \eta \boldsymbol {z} _ {k, t} ^ {(s)} \\ + \mathcal {O} (\eta \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {3} + \eta \| \Delta \hat {\phi} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {2} + \eta \| \Delta \hat {\phi} ^ {(s)} \| _ {2} ^ {2} \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} + \| \hat {\boldsymbol {e}} _ {k, t} ^ {(s)} \| _ {2}), \tag {87} \\ \end{array} +$$ + +where the second and third equality perform Taylor expansion. Taking expectation on both sides gives + +$$ +\begin{array}{l} \hat {\pmb {q}} _ {t + 1} ^ {(s)} = (\pmb {I} - \eta \pmb {H} _ {0}) \hat {\pmb {q}} _ {t} ^ {(s)} - \eta \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \hat {\pmb {q}} _ {t} ^ {(s)} ] - \frac {\eta}{2} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \hat {\pmb {A}} _ {t} ^ {(s)} ] \\ + \mathcal {O} \left(\eta \mathbb {E} [ \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {3} ] + \eta \mathbb {E} [ \| \Delta \hat {\phi} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {2} ] + \eta \mathbb {E} [ \| \Delta \hat {\phi} ^ {(s)} \| _ {2} ^ {2} \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ] + \mathbb {E} [ \| \hat {\boldsymbol {e}} _ {k, t} ^ {(s)} \| _ {2} ]\right). \\ \end{array} +$$ + +By Theorem K.1, with probability at least $1 - \eta^{100}$ , $\hat{e}_{k,t}^{(s)} = \mathbf{0}, \forall k \in [K], (s,t) \preceq (R_{\mathrm{grp}},0)$ . Also notice that both $\hat{\theta}_{k,t}^{(s)}$ and $\phi_{\mathrm{null}}$ belong to the bounded set $\Gamma^{\epsilon_2}$ . Therefore, $\| \hat{e}_{k,t}^{(s)} \|_2$ is bounded and we have $\mathbb{E}[\| \hat{e}_{k,t}^{(s)} \|_2] = \mathcal{O}(\eta^{100})$ . Combining this with (76) to (78) yields (79). + +Secondly, we derive the recursion for $\hat{B}_t^{(s)}$ . Multiplying both sides of (87) by $\Delta \hat{\phi}^{(s)\top}$ and taking expectation, we have + +$$ +\hat {\boldsymbol {B}} _ {t + 1} ^ {(s)} = (\boldsymbol {I} - \eta \boldsymbol {H} _ {0}) \hat {\boldsymbol {B}} _ {t} ^ {(s)} + \mathcal {O} (\eta \mathbb {E} [ \| \Delta \hat {\boldsymbol {\phi}} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {2} + \| \Delta \hat {\boldsymbol {\phi}} ^ {(s)} \| _ {2} ^ {2} \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} + \| \hat {\boldsymbol {e}} _ {k, t} ^ {(s)} \| _ {2} ]). +$$ + +Still by Theorem K.1 and (76) to (78), we have (82). + +Thirdly, we derive the recursion for $\hat{A}_t^{(s)}$ . By (87), we have + +$$ +\begin{array}{l} \hat {\boldsymbol {A}} _ {t + 1} ^ {(s)} = \hat {\boldsymbol {A}} _ {t} ^ {(s)} - \eta \boldsymbol {H} _ {0} \hat {\boldsymbol {A}} _ {t} ^ {(s)} - \eta \hat {\boldsymbol {A}} _ {t} ^ {(s)} \boldsymbol {H} _ {0} + \frac {\eta^ {2}}{B _ {\mathrm {l o c}}} \boldsymbol {\Sigma} _ {0} + \mathcal {O} (\eta^ {2} \mathbb {E} [ \| \Delta \hat {\phi} ^ {(s)} \| _ {2} + \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ]) \\ + \mathcal {O} (\eta \mathbb {E} [ \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {3} + \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {2} \| \Delta \hat {\phi} ^ {(s)} \| _ {2} + \| \hat {\boldsymbol {e}} _ {k, t} ^ {(s)} \| _ {2} ]) \\ = (\boldsymbol {I} - \eta \boldsymbol {H} _ {0}) \hat {\boldsymbol {A}} _ {t} ^ {(s)} + \frac {\eta^ {2}}{B _ {\mathrm {l o c}}} \boldsymbol {\Sigma} _ {0} + \tilde {\mathcal {O}} (\eta^ {2. 5 - 0. 5 \beta}), \\ \end{array} +$$ + +which establishes (80). + +Fourthly, we derive the recursion for $\hat{M}_t^{(s)}$ . Multiplying both sides of (87) by $\hat{\pmb{x}}_{l,t + 1}^{(s)}$ and taking expectation, $l\neq k$ , we obtain + +$$ +\begin{array}{l} \hat {\boldsymbol {M}} _ {t + 1} ^ {(s)} = \hat {\boldsymbol {M}} _ {t} ^ {(s)} - \eta \boldsymbol {H} _ {0} \hat {\boldsymbol {M}} _ {t} ^ {(s)} - \eta \hat {\boldsymbol {M}} _ {t} ^ {(s)} \boldsymbol {H} _ {0} + \mathcal {O} (\eta \mathbb {E} [ \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {l, t} ^ {(s)} \| _ {2} \| \Delta \hat {\boldsymbol {\phi}} ^ {(s)} \| _ {2} ]) \\ + \mathcal {O} (\eta \mathbb {E} [ \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} ^ {2} \| \hat {\boldsymbol {x}} _ {l, t} ^ {(s)} \| _ {2} + \| \hat {\boldsymbol {e}} _ {k, t} ^ {(s)} \| _ {2} ]). \\ \end{array} +$$ + +By a similar argument to the proof of Lemma K.27, we have + +$$ +\mathbb {E} [ \| \hat {\boldsymbol {x}} _ {k, (s)} ^ {(s)} \| _ {2} ^ {2} \| \hat {\boldsymbol {x}} _ {l, t} ^ {(s)} \| _ {2} ] = \bar {\mathcal {O}} (\eta^ {1. 5}), +$$ + +$$ +\mathbb {E} \left[ \| \hat {\boldsymbol {x}} _ {k, t} ^ {(s)} \| _ {2} \| \hat {\boldsymbol {x}} _ {l, t} ^ {(s)} \| _ {2} \| \Delta \hat {\phi} ^ {(s)} \| _ {2} \right] = \tilde {\mathcal {O}} \left(\eta^ {1. 5 - 0. 5 \beta}\right), +$$ + +which yields (81). + +Now we proceed to prove (83) to (86). By definition of $\hat{A}_{\mathrm{avg}}^{(s)}$ + +$$ +\begin{array}{l} \hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} = \frac {1}{K ^ {2}} \mathbb {E} [ (\sum_ {k \in [ K ]} \hat {\boldsymbol {x}} _ {k, H} ^ {(s)}) (\sum_ {k \in [ K ]} \hat {\boldsymbol {x}} _ {k, H} ^ {(s)}) ^ {\top} ] \\ = \frac {1}{K ^ {2}} \sum_ {k \in [ K ]} \mathbb {E} \left[ \hat {\boldsymbol {x}} _ {k, H} ^ {(s)} \hat {\boldsymbol {x}} _ {k, H} ^ {(s) \top} \right] + \frac {1}{K ^ {2}} \sum_ {k, l \in [ K ], k \neq l} \mathbb {E} \left[ \hat {\boldsymbol {x}} _ {k, H} ^ {(s)} \hat {\boldsymbol {x}} _ {l, H} ^ {(s) \top} \right] \\ = \frac {1}{K} \hat {\boldsymbol {A}} _ {H} ^ {(s)} + (1 - \frac {1}{K}) \hat {\boldsymbol {M}} _ {H} ^ {(s)}, \\ \end{array} +$$ + +which demonstrates (83). Then we derive (84). By definition of $\hat{\pmb{x}}_{\mathrm{avg},0}^{(s + 1)}$ + +$$ +\begin{array}{l} \hat {\mathbf {x}} _ {\mathrm {a v g}, 0} ^ {(s + 1)} = \hat {\boldsymbol {\phi}} ^ {(s)} + \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s)} - \Phi \big (\hat {\boldsymbol {\phi}} ^ {(s)} + \hat {\mathbf {x}} _ {\mathrm {a v g}, H} ^ {(s)} \\ = \hat {\phi} ^ {(s)} + \hat {\mathbf {x}} _ {\operatorname {a v g}, H} ^ {(s)} - \left(\hat {\phi} ^ {(s)} + \partial \Phi (\hat {\phi} ^ {(s)}) \hat {\mathbf {x}} _ {\operatorname {a v g}, H} ^ {(s)} + \mathcal {O} (\| \hat {\mathbf {x}} _ {\operatorname {a v g}, H} ^ {(s)} \| _ {2} ^ {2})\right) \\ = \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} - \left(\pmb {P} _ {\parallel} + \mathcal {O} (\| \Delta \hat {\phi} ^ {(s)} \| _ {2})\right) \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} + \mathcal {O} (\| \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {2}) \\ = \boldsymbol {P} _ {\perp} \hat {\boldsymbol {x}} _ {\text {a v g}, H} ^ {(s)} + \mathcal {O} \left(\left\| \hat {\boldsymbol {x}} _ {\text {a v g}, H} ^ {(s)} \right\| _ {2} ^ {2} + \left\| \hat {\boldsymbol {x}} _ {\text {a v g}, H} ^ {(s)} \right\| _ {2} \left\| \Delta \hat {\phi} ^ {(s)} \right\| _ {2}\right). \tag {88} \\ \end{array} +$$ + +Hence, + +$$ +\begin{array}{l} \hat {\boldsymbol {M}} _ {0} ^ {(s + 1)} = \hat {\boldsymbol {A}} _ {0} ^ {(s + 1)} = \mathbb {E} [ \hat {\boldsymbol {x}} _ {\mathrm {a v g}, 0} ^ {(s)} \hat {\boldsymbol {x}} _ {\mathrm {a v g}, 0} ^ {(s) \top} ] \\ = \pmb {P} _ {\perp} \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} \pmb {P} _ {\perp} + \mathcal {O} (\mathbb {E} [ \| \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {3} + \| \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} \| _ {2} ^ {2} \| \Delta \hat {\phi} ^ {(s)} \| _ {2} ]). \\ \end{array} +$$ + +By (76) and (78), we obtain (84). By (74), + +$$ +\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)} = P _ {\|} \hat {\mathbf {x}} _ {\operatorname {a v g}, H} ^ {(s)} + \mathcal {O} \left(\left\| \hat {\mathbf {x}} _ {\operatorname {a v g}, H} ^ {(s)} \right\| _ {2} \| \Delta \hat {\phi} ^ {(s)} \| _ {2} + \| \hat {\mathbf {x}} _ {\operatorname {a v g}, H} ^ {(s)} \| _ {2} ^ {2}\right). \tag {89} +$$ + +Combining (88) and (89) gives + +$$ +\mathbb {E} [ \hat {\pmb {x}} _ {\mathrm {a v g}, 0} ^ {(s)} (\hat {\pmb {\phi}} ^ {(s + 1)} - \hat {\pmb {\phi}} ^ {(s)}) ^ {\top} ] = \pmb {P} _ {\perp} \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} \pmb {P} _ {\parallel} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +Therefore, + +$$ +\begin{array}{l} \hat {\boldsymbol {B}} _ {0} ^ {(s + 1)} = \mathbb {E} [ \hat {\boldsymbol {x}} _ {\mathrm {a v g}, 0} ^ {(s + 1)} \Delta \hat {\boldsymbol {\phi}} ^ {(s + 1) \top} ] = \mathbb {E} [ \hat {\boldsymbol {x}} _ {\mathrm {a v g}, 0} ^ {(s + 1)} (\Delta \hat {\boldsymbol {\phi}} ^ {(s)} + \hat {\boldsymbol {\phi}} ^ {(s + 1)} - \hat {\boldsymbol {\phi}} ^ {(s)}) ^ {\top} ] \\ = \boldsymbol {P} _ {\perp} \hat {\boldsymbol {B}} _ {H} ^ {(s)} + \boldsymbol {P} _ {\perp} \hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} \boldsymbol {P} _ {\parallel} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \\ \end{array} +$$ + +Finally, we apply Lemma K.27 to derive (85). + +$$ +\begin{array}{l} \hat {\pmb {q}} _ {0} ^ {(s + 1)} = \mathbb {E} [ \hat {\pmb {x}} _ {\mathrm {a v g}, 0} ^ {(s + 1)} ] = \mathbb {E} [ \hat {\pmb {x}} _ {\mathrm {a v g}, H} ^ {(s)} - (\hat {\pmb {\phi}} ^ {(s + 1)} - \hat {\pmb {\phi}} ^ {(s)}) ] \\ = \hat {\pmb {q}} _ {H} ^ {(s)} - \pmb {P} _ {\parallel} \hat {\pmb {q}} _ {H} ^ {(s)} - \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\pmb {B}} _ {H} ^ {(s)} ] - \frac {1}{2} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}) \\ = \pmb {P} _ {\perp} \hat {\pmb {q}} _ {H} ^ {(s)} - \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\pmb {B}} _ {H} ^ {(s)} ] - \frac {1}{2} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), \\ \end{array} +$$ + +which concludes the proof. + +![](images/d4dcbf8df32884e9d32fcdd59e3a618e31cb9e1d7d85998673d0924bf5a9fe95.jpg) + +With the assumption that the hessian at $\hat{\phi}^{(0)}$ is diagonal, we have the following corollary that formulates the recursions for each matrix element. + +Corollary K.2. Given $\| \hat{\pmb{\theta}}_{\mathrm{avg}}^{(0)} - \hat{\phi}^{(0)}\| _2 = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta}})$ , for $0\leq s < R_{\mathrm{grp}}$ and $0\leq t < H$ , we have the following elementwise recursions. + +$$ +\hat {A} _ {t + 1, i, j} ^ {(s)} = \left(1 - \left(\lambda_ {i} + \lambda_ {j}\right) \eta\right) \hat {A} _ {t, i, j} ^ {(s)} + \frac {\eta^ {2}}{B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} \left(\eta^ {2. 5 - 0. 5 \beta}\right), \tag {90} +$$ + +$$ +\hat {M} _ {t + 1, i, j} ^ {(s)} = \left(1 - \left(\lambda_ {i} + \lambda_ {j}\right) \eta\right) \hat {M} _ {t, i, j} ^ {(s)} + \tilde {\mathcal {O}} \left(\eta^ {2. 5 - 0. 5 \beta}\right), \tag {91} +$$ + +$$ +\hat {B} _ {t + 1, i, j} ^ {(s)} = \left(1 - \lambda_ {i} \eta\right) \hat {B} _ {t, i, j} ^ {(s)} + \tilde {\mathcal {O}} \left(\eta^ {2. 5 - \beta}\right), \tag {92} +$$ + +$$ +\hat {A} _ {\text {a v g}, i, j} ^ {(s)} = \frac {1}{K} \left(\hat {A} _ {H, i, j} ^ {(s)} - \hat {M} _ {H, i, j} ^ {(s)}\right) + \hat {M} _ {H, i, j} ^ {(s)}, \tag {93} +$$ + +$$ +\hat {M} _ {0, i, j} ^ {(s + 1)} = \hat {A} _ {0, i, j} ^ {(s + 1)} = \left\{ \begin{array}{l l} \hat {A} _ {\text {a v g}, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), & 1 \leq i \leq m, 1 \leq j \leq m, \\ \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), & \text {o t h e r w i s e .} \end{array} \right. \tag {94} +$$ + +$$ +\hat {B} _ {0, i, j} ^ {(s + 1)} = \left\{ \begin{array}{l l} \hat {B} _ {H, i, j} ^ {(s)} + \hat {A} _ {\text {a v g}, i, j} ^ {(s)} + \tilde {\mathcal {O}} \left(\eta^ {1. 5 - \beta}\right), & 1 \leq i \leq m, m < j \leq d, \\ \hat {B} _ {H, i, j} ^ {(s)} + \tilde {\mathcal {O}} \left(\eta^ {1. 5 - \beta}\right), & 1 \leq i \leq m, 1 \leq j \leq m, \\ \tilde {\mathcal {O}} \left(\eta^ {1. 5 - \beta}\right), & m < i \leq d. \end{array} \right. \tag {95} +$$ + +Having formulated the recursions, we are ready to solve out the explicit expressions. We will split each matrix into four parts and them one by on. Specifically, a matrix $M$ can be split into $P_{\parallel}MP_{\parallel}$ in the tangent space of $\Gamma$ at $\hat{\phi}^{(0)}, P_{\perp}MP_{\perp}$ in the normal space, along with $P_{\parallel}MP_{\perp}$ and $P_{\perp}MP_{\parallel}$ across both spaces. + +We first compute the elements of $P_{\perp}\hat{A}_{t}^{(s)}P_{\perp}$ and $P_{\perp}\hat{A}_{\mathrm{avg}}^{(s)}P_{\perp}$ . + +Lemma K.29 (General formula for $P_{\perp}\hat{A}_{t}^{(s)}P_{\perp}$ and $P_{\perp}\hat{A}_{\mathrm{avg}}^{(s)}P_{\perp}$ ). Let $R_0 \coloneqq \lceil \frac{10}{\lambda_m\alpha}\log \frac{1}{\eta}\rceil$ . Then for $1\leq i\leq m,1\leq j\leq m$ and $R_0\leq s < R_{\mathrm{grp}}$ + +$$ +\hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} = \frac {1}{(\lambda_ {i} + \lambda_ {j}) K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), +$$ + +$$ +\hat {A} _ {t, i, j} ^ {(s)} = - \left(1 - \frac {1}{K}\right) \frac {(1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {t}}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \frac {\eta}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +For $s < R_0$ , $\hat{A}_{t,i,j}^{(s)} = \tilde{\mathcal{O}} (\eta)$ and $\hat{A}_{\mathrm{avg},i,j}^{(s)} = \tilde{\mathcal{O}} (\eta)$ . + +Proof. For $1 \leq i \leq m, 1 \leq j \leq m, \lambda_i > 0, \lambda_j > 0$ . By (90), + +$$ +\begin{array}{l} \hat {A} _ {t, i, j} ^ {(s)} = (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {t} \hat {A} _ {0, i, j} ^ {(s)} + \sum_ {\tau = 0} ^ {t - 1} (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {\tau} \frac {\eta^ {2}}{B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} \\ + \tilde {\mathcal {O}} (\sum_ {\tau = 0} ^ {t - 1} (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {\tau} \eta^ {2. 5 - 0. 5 \beta}) \\ = (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {t} \hat {A} _ {0, i, j} ^ {(s)} + \frac {1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {t}}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), \\ \end{array} +$$ + +where the second inequality uses $\sum_{\tau = 0}^{t - 1}(1 - (\lambda_i + \lambda_j)\eta)^\tau = \frac{1 - (1 - (\lambda_i + \lambda_j)\eta)^t}{(\lambda_i + \lambda_j)\eta}\leq \frac{1}{(\lambda_i + \lambda_j)\eta}$ . By (91), + +$$ +\begin{array}{l} \hat {M} _ {t, i, j} ^ {(s)} = (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {t} \hat {M} _ {0, i, j} ^ {(s)} + \tilde {\mathcal {O}} \left(\sum_ {\tau = 0} ^ {t - 1} (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {\tau} \eta^ {2. 5 - 0. 5 \beta}\right) \\ = (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {t} \hat {A} _ {0, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), \\ \end{array} +$$ + +where the second equality uses $M_0^{(s + 1)} = A_0^{(s + 1)}$ . By (93) and (94), + +$$ +\hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} = \frac {1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {H}}{(\lambda_ {i} + \lambda_ {j}) K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {H} \hat {A} _ {0, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), +$$ + +$$ +\begin{array}{l} \hat {A} _ {0, i, j} ^ {(s + 1)} = \hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {2. 5 - 0. 5 \beta}) \\ = \frac {1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {H}}{(\lambda_ {i} + \lambda_ {j}) K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {H} \hat {A} _ {0, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). \\ \end{array} +$$ + +Then we obtain + +$$ +\begin{array}{l} \hat {A} _ {0, i, j} ^ {(s)} = (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {s H} \hat {A} _ {0, i, j} ^ {(0)} + \frac {1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {H}}{(\lambda_ {i} + \lambda_ {j}) K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} \sum_ {r = 0} ^ {s - 1} (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {r H} \\ + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta} \sum_ {r = R _ {0}} ^ {s - 1} (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {r H}). \\ \end{array} +$$ + +Notice that $|1 - (\lambda_i + \lambda_j)\eta | < 1$ and + +$$ +\left(1 - \left(\lambda_ {i} + \lambda_ {j}\right) \eta\right) ^ {H} \leq \exp \left(- \left(\lambda_ {i} + \lambda_ {j}\right) \eta H\right) = \exp \left(- \left(\lambda_ {i} + \lambda_ {j}\right) \alpha\right). \tag {96} +$$ + +Therefore, + +$$ +\sum_ {r = 0} ^ {s - 1} (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {r H} = \frac {1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {r H}}{1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {H}} \leq \frac {1}{1 - \exp (- (\lambda_ {i} + \lambda_ {j}) \alpha)}. +$$ + +Then we have + +$$ +\hat {A} _ {0, i, j} ^ {(s)} = (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {s H} \hat {A} _ {0, i, j} ^ {(0)} + \frac {1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {s H}}{(\lambda_ {i} + \lambda_ {j}) K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +Finally, we demonstrate that for $s \geq R_0$ , $\hat{A}_{0,i,j}^{(s)}$ and $\hat{A}_{\mathrm{avg},i,j}^{(s)}$ is approximately equal to $\frac{\eta}{(\lambda_i + \lambda_j)KB_{\mathrm{loc}}}\Sigma_{0,i,j}$ . By (96), when $s \geq R_0$ , $(1 - (\lambda_i + \lambda_j)\eta)^{sH} = \mathcal{O}(\eta^{10})$ , which gives + +$$ +\hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} = \frac {1}{(\lambda_ {i} + \lambda_ {j}) K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), +$$ + +$$ +A _ {t, i, j} ^ {(s)} = - \left(1 - \frac {1}{K}\right) \frac {(1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {t}}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \frac {\eta}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +For $s < R_0$ , since $\hat{A}_0^{(0)} = \hat{\pmb{x}}_{\mathrm{avg},0}^{(s)}\hat{\pmb{x}}_{\mathrm{avg},0}^{(s)\top} = \tilde{\mathcal{O}} (\eta)$ , we have $\hat{A}_{\mathrm{avg},i,j}^{(s)} = \tilde{\mathcal{O}} (\eta)$ and $\hat{A}_{t,i,j}^{(s)} = \tilde{\mathcal{O}} (\eta)$ . + +Secondly, we compute $P_{\parallel}\hat{A}_t^{(s)}P_\perp$ and $P_{\parallel}\hat{A}_{\mathrm{avg}}^{(s)}P_{\perp}$ + +Lemma K.30 (General formula for $P_{\perp}\hat{A}_{t}^{(s)}P_{\parallel}$ and $P_{\perp}\hat{A}_{\mathrm{avg}}^{(s)}P_{\parallel})$ . For $1\leq i\leq m,m < j\leq d,$ + +$$ +\hat {A} _ {t, i, j} ^ {(s)} = \frac {1 - (1 - \lambda_ {i} \eta) ^ {t}}{\lambda_ {i} B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), +$$ + +$$ +\hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} = \frac {1 - (1 - \lambda_ {i} \eta) ^ {H}}{\lambda_ {i} K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +Proof. Note that for $1 \leq i \leq m, m < j \leq d$ and $\lambda_i > 0, \lambda_j = 0$ . By (90) and (94), + +$$ +\begin{array}{l} \hat {A} _ {t, i, j} ^ {(s)} = (1 - \lambda_ {i} \eta) ^ {t} \hat {A} _ {0, i, j} ^ {(s)} + \frac {1 - (1 - \lambda_ {i} \eta) ^ {t}}{\lambda_ {i} B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}) \\ = \frac {1 - (1 - \lambda_ {i} \eta) ^ {t}}{\lambda_ {i} B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \\ \end{array} +$$ + +By (91) and (94), $\hat{M}_{t,i,j}^{(s)} = \tilde{\mathcal{O}} (\eta^{1.5 - 0.5\beta})$ . Then, + +$$ +\hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} = \frac {1 - (1 - \lambda_ {i} \eta) ^ {H}}{\lambda_ {i} K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +Similar to Lemma K.30, we have the following lemma for the general formula of $P_{\parallel} \hat{A}_t^{(s)} P_{\perp}$ and $P_{\parallel} \hat{A}_{\mathrm{avg}}^{(s)} P_{\perp}$ . + +Lemma K.31 (General formula for $P_{\parallel}\hat{A}_t^{(s)}P_\perp$ and $P_{\parallel}\hat{A}_{\mathrm{avg}}^{(s)}P_{\perp}$ ). For $m < i \leq d$ and $1 \leq j \leq m$ , + +$$ +\hat {A} _ {t, i, j} ^ {(s)} = \frac {1 - (1 - \lambda_ {j} \eta) ^ {t}}{\lambda_ {j} B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), +$$ + +$$ +\hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} = \frac {1 - (1 - \lambda_ {j} \eta) ^ {H}}{\lambda_ {j} K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +Finally, we derive the general formula for $P_{\parallel}\hat{A}_t^{(s)}P_{\parallel}$ and $P_{\parallel}\hat{A}_{\mathrm{avg}}^{(s)}P_{\parallel}$ . + +Lemma K.32 (General formula for $P_{\parallel}\hat{A}_t^{(s)}P_{\parallel}$ and $P_{\parallel}\hat{A}_{\mathrm{avg}}^{(s)}P_{\parallel}$ ). For $m < i \leq d$ and $m < j \leq d$ , + +$$ +\hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} = \frac {H \eta^ {2}}{K B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), +$$ + +$$ +\hat {A} _ {t, i, j} ^ {(s)} = \hat {A} _ {0, i, j} ^ {(s)} + \frac {t \eta^ {2}}{B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +Proof. Note that for $m < i \leq d$ , $m < j \leq d$ and $\lambda_i = \lambda_j = 0$ . (90) is then simplified as + +$$ +\hat {A} _ {t + 1, i, j} ^ {(s)} = \hat {A} _ {t, i, j} ^ {(s)} + \frac {\eta^ {2}}{B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {2. 5 - 0. 5 \beta}). +$$ + +Therefore, + +$$ +\hat {A} _ {t, i, j} ^ {(s)} = \hat {A} _ {0, i, j} ^ {(s)} + \frac {t \eta^ {2}}{B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} \left(\eta^ {1. 5 - 0. 5 \beta}\right). \tag {97} +$$ + +According to (91), $\hat{M}_{t,i,j}^{(s)} = \tilde{\mathcal{O}} (\eta^{1.5 - 0.5\beta})$ for $m < i\leq d$ and $m < j\leq d$ . Combining (91), (94) and (97) yields + +$$ +\hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} = \frac {H \eta^ {2}}{K B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}). +$$ + +![](images/4f9db5562bba8d982c04450bdefd155a9b7b4bbdeb34727f4888a7895dc935c3.jpg) + +Now, we move on to compute the general formula for $\hat{B}_t^{(s)}$ . + +Lemma K.33 (The general formula for $P_{\perp} \hat{B}_t^{(s)} P_{\parallel}$ ). Note that for $1 \leq i \leq m$ and $m < j \leq d$ , when $R_0 := \lceil \frac{10}{\lambda_m \alpha} \log \frac{1}{\eta} \rceil \leq s < R_{\mathrm{grp}}$ , + +$$ +\hat {B} _ {t, i, j} ^ {(s)} = \frac {(1 - \lambda_ {i} \eta) ^ {t}}{\lambda_ {i} K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). +$$ + +For $s < R_0$ , $\hat{B}_{t,i,j}^{(s)} = \tilde{\mathcal{O}} (\eta)$ . + +Proof. Note that for $1 \leq i \leq m$ , $\lambda_i > 0$ . By (92), + +$$ +\hat {B} _ {t + 1, i, j} ^ {(s)} = (1 - \lambda_ {i} \eta) \hat {B} _ {t, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {2. 5 - \beta}). +$$ + +Hence, + +$$ +\hat {B} _ {t, i, j} ^ {(s)} = (1 - \lambda_ {i} \eta) ^ {t} \hat {B} _ {0, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). +$$ + +According to (95), + +$$ +\begin{array}{l} \hat {B} _ {0, i, j} ^ {(s + 1)} = \hat {B} _ {H, i, j} ^ {(s)} + \hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {2. 5 - \beta}) \\ = (1 - \lambda_ {i} \eta) ^ {H} \hat {B} _ {0, i, j} ^ {(s)} + \hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \\ \end{array} +$$ + +Then we have + +$$ +\begin{array}{l} \hat {B} _ {0, i, j} ^ {(s)} = (1 - \lambda_ {i} \eta) ^ {s H} \hat {B} _ {0, i, j} ^ {(0)} + \hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} \sum_ {r = 0} ^ {s - 1} (1 - \lambda_ {i} \eta) ^ {r H} + \tilde {\mathcal {O}} (\sum_ {r = 0} ^ {s - 1} (1 - \lambda_ {i} \eta) ^ {r H} \eta^ {1. 5 - \beta}) \\ = (1 - \lambda_ {i} \eta) ^ {s H} \hat {B} _ {0, i, j} ^ {(0)} + \frac {1 - (1 - \lambda_ {i} \eta) ^ {s H}}{1 - (1 - \lambda_ {i} \eta) ^ {H}} \hat {A} _ {\mathrm {a v g}, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}) \\ = \frac {1 - (1 - \lambda_ {i} \eta) ^ {s H}}{1 - (1 - \lambda_ {i} \eta) ^ {H}} \hat {A} _ {\mathrm {a v g},, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \\ \end{array} +$$ + +where the second equality uses (96) and the last inequality uses $\hat{B}_0^{(0)} = \hat{\pmb{x}}_{\mathrm{avg},0}^{(0)}\Delta \hat{\phi}^{(0)} = \mathbf{0}$ . For $s\geq R_0$ , $\hat{A}_{\mathrm{avg},i,j}^{(s)} = \frac{1 - (1 - \lambda_i\eta)^H}{\lambda_iKB_{\mathrm{loc}}} \eta \Sigma_{0,i,j} + \tilde{\mathcal{O}} (\eta^{1.5 - 0.5\beta})$ , which gives + +$$ +\hat {B} _ {0, i, j} ^ {(s)} = \frac {\eta}{\lambda_ {i} K B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). +$$ + +Therefore, + +$$ +\hat {B} _ {t, i, j} ^ {(s)} = \frac {(1 - \lambda_ {i} \eta) ^ {t}}{\lambda_ {i} K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). +$$ + +For $s < R_0$ , $\hat{A}_{\mathrm{avg},i,j}^{(s)} = \tilde{\mathcal{O}} (\eta)$ and therefore, $\hat{B}_{t,i,j}^{(s)} = \tilde{\mathcal{O}} (\eta)$ . + +![](images/32b84bff80b5f374e13d7d220662f423fc6d255fd2589d0d27ef5bad9a6bcc3f.jpg) + +Lemma K.34 (General formula for the elements of $P_{\perp} \hat{B}_t^{(s)} P_{\perp}$ ). For $1 \leq i \leq m$ and $1 \leq j \leq m$ , $\hat{B}_{t,i,j}^{(s)} = \tilde{\mathcal{O}}(\eta^{1.5 - \beta})$ . + +Proof. Note that for $1 \leq i \leq m$ , $\lambda_i > 0$ . By (92), + +$$ +\hat {B} _ {t + 1, i, j} ^ {(s)} = (1 - \lambda_ {i} \eta) \hat {B} _ {t, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {2. 5 - \beta}). +$$ + +Hence, + +$$ +\hat {B} _ {t, i, j} ^ {(s)} = (1 - \lambda_ {i} \eta) ^ {t} \hat {B} _ {0, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). +$$ + +By (95), + +$$ +\begin{array}{l} \hat {B} _ {0, i, j} ^ {(s + 1)} = \hat {B} _ {H, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {2. 5 - \beta}) \\ = (1 - \lambda_ {i} \eta) ^ {H} \hat {B} _ {0, i, j} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}) \\ = (1 - \lambda_ {i} \eta) ^ {s H} \hat {B} _ {0, i, j} ^ {(0)} + \tilde {\mathcal {O}} (\sum_ {r = 0} ^ {s - 1} (1 - \lambda_ {i} \eta) ^ {r H} \eta^ {1. 5 - \beta}) \\ = (1 - \lambda_ {i} \eta) ^ {s H} \hat {B} _ {0, i, j} ^ {(0)} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}) \\ = \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), \\ \end{array} +$$ + +where the last inequality uses $\hat{B}_0^{(0)} = 0$ . + +![](images/52eb9e3b58cb8b8311c94d5e68838c18fdd54aa2bca9b33745bd0a2ca977a4ff.jpg) + +Lemma K.35 (General formula for $P_{\parallel}\hat{B}_t^{(s)}$ ). For $m < i \leq d$ , $\hat{B}_{t,i,j}^{(s)} = \tilde{\mathcal{O}} (\eta^{1.5 - \beta})$ + +Proof. Note that $\lambda_{i} = 0$ for $m < i\leq d$ . By (92) and (95), + +$$ +\hat {B} _ {t + 1} ^ {(s)} = \hat {B} _ {t} ^ {(s)} + \tilde {\mathcal {O}} (\eta^ {2. 5 - \beta}), \quad \hat {B} _ {0} ^ {(s)} = \tilde {\mathcal {O}} (\eta^ {2. 5 - \beta}). +$$ + +Therefore, + +$$ +\hat {B} _ {t} ^ {(s)} = t \tilde {\mathcal {O}} (\eta^ {2. 5 - \beta}) + \hat {B} _ {0} ^ {(s)} = \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). +$$ + +![](images/b78b55b4a4a343eca118a828771d231d6eabb1f549175c74f9751c0664dd16df.jpg) + +Having obtained the expressions for $\hat{B}_t^{(s)}$ , $\hat{A}_t^{(s)}$ and $\hat{A}_{\mathrm{avg}}^{(s)}$ , we now provide explicit expressions for the first and second moments of the change of manifold projection every round in the following two lemmas. + +Lemma K.36. The expectation of the change of manifold projection every round is + +$$ +\mathbb {E} \left[ \hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)} \right] = \left\{ \begin{array}{l l} \frac {H \eta^ {2}}{2 B} \partial^ {2} \Phi \left(\hat {\phi} ^ {(0)}\right) \left[ \boldsymbol {\Sigma} _ {0} + \boldsymbol {\Psi} \left(\hat {\phi} ^ {(0)}\right) \right] + \tilde {\mathcal {O}} \left(\eta^ {1. 5 - \beta}\right), & R _ {0} < s < R _ {\mathrm {g r p}}, \\ \tilde {\mathcal {O}} (\eta), & s \leq R _ {0} \end{array} , \right. \tag {98} +$$ + +where $R_0 \coloneqq \left\lceil \frac{10}{\lambda_m \alpha} \log \frac{1}{\eta} \right\rceil$ . + +Proof. We first compute $\mathbb{E}[\hat{\phi}^{(s + 1)} - \hat{\phi}^{(s)}]$ . By (72), we only need to compute $P_{\parallel}\hat{q}_H^{(s)}$ by relating it to these matrices. Multiplying both sides of (79) by $P_{\parallel}$ gives + +$$ +\boldsymbol {P} _ {\parallel} \hat {\boldsymbol {q}} _ {t + 1} ^ {(s)} = \boldsymbol {P} _ {\parallel} \hat {\boldsymbol {q}} _ {t} ^ {(s)} - \eta \boldsymbol {P} _ {\parallel} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {B}} _ {t} ^ {(s)} ] - \frac {\eta}{2} \boldsymbol {P} _ {\parallel} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {A}} _ {t} ^ {(s)} ] + \tilde {\mathcal {O}} (\eta^ {2. 5 - \beta}). \tag {99} +$$ + +Similarly, according to (85), we have + +$$ +\boldsymbol {P} _ {\parallel} \hat {\boldsymbol {q}} _ {0} ^ {(s + 1)} = - \boldsymbol {P} _ {\parallel} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {B}} _ {H} ^ {(s)} ] - \frac {1}{2} \boldsymbol {P} _ {\parallel} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \tag {100} +$$ + +Combining (99) and (100) yields + +$$ +\begin{array}{l} \boldsymbol {P} _ {\parallel} \hat {\boldsymbol {q}} _ {H} ^ {(s)} = - \frac {1}{2} \boldsymbol {P} _ {\parallel} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {A}} _ {\text {a v g}} ^ {(s - 1)} ] - \frac {\eta}{2} \boldsymbol {P} _ {\parallel} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \sum_ {t = 0} ^ {H - 1} \hat {\boldsymbol {A}} _ {t} ^ {(s)} ] \tag {101} \\ - \eta P _ {\|} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \sum_ {t = 0} ^ {H - 1} \hat {B} _ {t} ^ {(s)} ] - P _ {\|} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {B} _ {H} ^ {(s - 1)} ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \\ \end{array} +$$ + +By Lemmas K.29, K.32 and K.30, for $s \leq R_0 = \left\lfloor \frac{10}{\lambda \alpha} \log \frac{1}{\eta} \right\rfloor$ , $\hat{\pmb{A}}_t^{(s)} = \tilde{\mathcal{O}}(\eta)$ , $\hat{\pmb{A}}_{\mathrm{avg}}^{(s)} = \tilde{\mathcal{O}}(\eta)$ and $\hat{\pmb{B}}_t^{(s)} = \tilde{\mathcal{O}}(\eta)$ . Therefore, $\mathbb{E}[\hat{\phi}^{(s+1)} - \hat{\phi}^{(s)}] = \tilde{\mathcal{O}}(\eta)$ . For $s > R_0$ , $\hat{\pmb{A}}_{\mathrm{avg}}^{(s-1)} = \hat{\pmb{A}}_{\mathrm{avg}}^{(s)} + \tilde{\mathcal{O}}(\eta^{1.5-0.5\beta})$ . Substituting (101) into (72) gives + +$$ +\begin{array}{r l} & {\mathbb {E} [ \hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)} ] = \underbrace {\frac {1}{2} P _ {\perp} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {A} _ {\mathrm {a v g}} ^ {(s)} ] + P _ {\perp} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {B} _ {H} ^ {(s)} ]} _ {\mathcal {T} _ {1}}} \\ & {\qquad \overbrace {- \eta P _ {\parallel} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \underbrace {\frac {1}{2} \sum_ {t = 0} ^ {H - 1} \hat {A} _ {t} ^ {(s)} + \sum_ {t = 0} ^ {H - 1} \hat {B} _ {t} ^ {(s)} ]} _ {\mathcal {T} _ {3}} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}).} \end{array} +$$ + +Below we compute $\mathcal{T}_1$ and $\mathcal{T}_2$ for $s > R_0$ respectively. By Lemma K.3, + +$$ +\pmb {P} _ {\perp} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \pmb {P} _ {\perp} \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} \pmb {P} _ {\parallel} ] = \pmb {P} _ {\perp} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \pmb {P} _ {\parallel} \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} \pmb {P} _ {\perp} ] = \mathbf {0}, +$$ + +$$ +\pmb {P} _ {\perp} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \pmb {P} _ {\parallel} \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} \pmb {P} _ {\parallel} ] = \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \pmb {P} _ {\parallel} \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} \pmb {P} _ {\parallel} ]. +$$ + +By Lemma K.4, + +$$ +\pmb {P} _ {\perp} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \pmb {P} _ {\perp} \hat {\pmb {A}} _ {\mathrm {a v g}} ^ {(s)} \pmb {P} _ {\perp} ] = \mathbf {0}. +$$ + +Therefore, for $s > R_0$ , + +$$ +\boldsymbol {P} _ {\perp} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\boldsymbol {A}} _ {\mathrm {a v g}} ^ {(s)} ] = \frac {H \eta^ {2}}{2 K B _ {\mathrm {l o c}}} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) \Phi [ \boldsymbol {\Sigma} _ {0, \parallel} ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), +$$ + +where we apply Lemma K.32. Similarly, for $s > R_0$ + +$$ +\pmb {P} _ {\perp} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \hat {\pmb {B}} _ {H} ^ {(s)} ] = \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \pmb {P} _ {\parallel} \hat {\pmb {B}} _ {H} ^ {(s)} \pmb {P} _ {\parallel} ] = \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), +$$ + +where we apply Lemma K.35. Hence, + +$$ +\mathcal {T} _ {1} = \frac {H \eta^ {2}}{2 B} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \boldsymbol {\Sigma} _ {0, \parallel} ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \tag {102} +$$ + +We move on to show that + +$$ +\mathcal {T} _ {2} = \frac {H \eta^ {2}}{2 B} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \boldsymbol {\Sigma} _ {0} - \boldsymbol {\Sigma} _ {0, \parallel} + (K - 1) \boldsymbol {\Psi} (\hat {\phi} ^ {(0)}) ]. \tag {103} +$$ + +Similar to the way we compute $\hat{A}_t^{(s)}$ , $\hat{A}_{\mathrm{avg}}^{(s)}$ and $\hat{B}_t^{(s)}$ , we compute $\mathcal{T}_2$ by splitting $\mathcal{T}_3$ into four matrices and then substituting them into the linear operator $-\eta P_{\parallel}\nabla^3\mathcal{L}(\hat{\phi}^{(0)})[\cdot ]$ one by one. First, we show that + +$$ +\begin{array}{l} - \eta \boldsymbol {P} _ {\parallel} \nabla^ {3} \mathcal {L} \left(\hat {\phi} ^ {(0)}\right) \left[ \boldsymbol {P} _ {\perp} \mathcal {T} _ {3} \boldsymbol {P} _ {\perp} \right] = \frac {H \eta^ {2}}{2 B} \partial^ {2} \Phi \left(\hat {\phi} ^ {(0)}\right) \left[ \boldsymbol {\Sigma} _ {0, \perp} + (K - 1) \psi \left(\boldsymbol {\Sigma} _ {0, \perp}\right) \right] \tag {104} \\ + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), \\ \end{array} +$$ + +where $\psi (\cdot)$ is interpreted as an elementwise matrix function here. By Lemmas K.29 and K.34, for $1\leq i\leq m$ $1\le j\le m$ and $s > R_0$ + +$$ +\begin{array}{l} \hat {A} _ {t, i, j} ^ {(s)} = - \left(1 - \frac {1}{K}\right) \frac {(1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {t}}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \frac {\eta}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), \\ \hat {B} _ {t, i, j} ^ {(s)} = \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \\ \end{array} +$$ + +Therefore, + +$$ +\begin{array}{l} \sum_ {t = 0} ^ {H - 1} \hat {A} _ {t, i, j} ^ {(s)} = - \left(1 - \frac {1}{K}\right) \frac {1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {H}}{(\lambda_ {i} + \lambda_ {j}) ^ {2} B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \frac {H \eta}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \Sigma_ {0., i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}) \\ = \frac {H \eta}{K \left(\lambda_ {i} + \lambda_ {j}\right) B _ {\mathrm {l o c}}} \Sigma_ {0.., i, j} \\ + \left(1 - \frac {1}{K}\right) \frac {H \eta}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \underbrace {\left[ 1 - \frac {1 - (1 - (\lambda_ {i} + \lambda_ {j}) \eta) ^ {H}}{H \eta (\lambda_ {i} + \lambda_ {j})} \right]} _ {\tau_ {4}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}). \\ \end{array} +$$ + +$$ +\sum_ {t = 0} ^ {H - 1} \hat {B} _ {t, i, j} ^ {(s)} = \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}), +$$ + +Then we simplify $\mathcal{T}_4$ . Notice that + +$$ +\begin{array}{l} \left(1 - \left(\lambda_ {i} + \lambda_ {i}\right) \eta\right) ^ {H} = \exp \left(- H \left(\lambda_ {i} + \lambda_ {j}\right) \eta\right) \left[ 1 + \mathcal {O} \left(H \eta^ {2}\right) \right] \\ = \exp (- H (\lambda_ {i} + \lambda_ {j}) \eta) + \mathcal {O} (\eta). \\ \end{array} +$$ + +Therefore, + +$$ +\mathcal {T} _ {4} = \psi \left(\left(\lambda_ {i} + \lambda_ {j}\right) H \eta\right) + \mathcal {O} (\eta). +$$ + +Substituting $\mathcal{T}_4$ back into the expression for $\sum_{t=0}^{H-1} \hat{A}_{t,i,j}^{(s)}$ gives + +$$ +\sum_ {t = 0} ^ {H - 1} \hat {A} _ {t, i, j} ^ {(s)} = \frac {H \eta}{K (\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \Sigma_ {0.., i, j} + \left(1 - \frac {1}{K}\right) \frac {H \eta \psi ((\lambda_ {i} + \lambda_ {j}) H \eta)}{(\lambda_ {i} + \lambda_ {j}) B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}). +$$ + +Combining the elementwise results, we obtain the following matrix form expression: + +$$ +\begin{array}{l} - \eta \pmb {P} _ {\|} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \pmb {P} _ {\perp} \mathcal {T} _ {3} \pmb {P} _ {\perp} ] = - \frac {H \eta^ {2}}{2 B} \pmb {P} _ {\|} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ \mathcal {V} _ {\pmb {H} _ {0}} (\pmb {\Sigma} _ {0, \perp} + (K - 1) \psi (\pmb {\Sigma} _ {0, \perp})) ] \\ + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}). \\ \end{array} +$$ + +By Lemma K.4, we have (104). + +Secondly, we show that for $s > R_0$ + +$$ +\begin{array}{l} - \eta P _ {\|} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) [ P _ {\perp} \mathcal {T} _ {3} P _ {\|} + P _ {\|} \mathcal {T} _ {3} P _ {\perp} ] \\ = \frac {H \eta^ {2}}{B} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \boldsymbol {\Sigma} _ {0, \perp , \parallel} + (K - 1) \psi (\boldsymbol {\Sigma} _ {0, \perp , \parallel}) ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), \tag {105} \\ \end{array} +$$ + +where $\psi (\cdot)$ is interpreted as an elementwise matrix function here. By symmetry of $\hat{A}_t^{(s)}$ 's and $\nabla^3\mathcal{L}(\hat{\phi}^{(0)})$ + +$$ +\frac {1}{2} \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) \left[ \sum_ {t = 0} ^ {H - 1} \boldsymbol {P} _ {\perp} \hat {\boldsymbol {A}} _ {t} ^ {(s)} \boldsymbol {P} _ {\parallel} + \sum_ {t = 0} ^ {H - 1} \boldsymbol {P} _ {\parallel} \hat {\boldsymbol {A}} _ {t} ^ {(s)} \boldsymbol {P} _ {\perp} \right] = \nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) \left[ \sum_ {t = 0} ^ {H - 1} \boldsymbol {P} _ {\perp} \hat {\boldsymbol {A}} _ {t} ^ {(s)} \boldsymbol {P} _ {\parallel} \right]. +$$ + +Therefore, we only have to evaluate + +$$ +\nabla^ {3} \mathcal {L} (\hat {\phi} ^ {(0)}) \left[ \sum_ {t = 0} ^ {H - 1} \boldsymbol {P} _ {\perp} (\hat {\boldsymbol {A}} _ {t} ^ {(s)} + \hat {\boldsymbol {B}} _ {t} ^ {(s)}) \boldsymbol {P} _ {\parallel} + \sum_ {t = 0} ^ {H - 1} \boldsymbol {P} _ {\parallel} \hat {\boldsymbol {B}} _ {t} ^ {(s)} \boldsymbol {P} _ {\perp} \right]. +$$ + +To compute the elements of $\sum_{t=0}^{H-1} P_{\perp} (\hat{A}_t^{(s)} + \hat{B}_t^{(s)}) P_{\parallel}$ , we combine Lemmas K.30 and K.33 to obtain that for $1 \leq i \leq m$ and $m < j \leq d$ , + +$$ +\begin{array}{l} \sum_ {t = 0} ^ {H - 1} \hat {A} _ {t, i, j} ^ {(s)} = \sum_ {t = 0} ^ {H - 1} \frac {1 - (1 - \lambda_ {i} \eta) ^ {t}}{\lambda_ {i} B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}) \\ = \frac {H \eta}{\lambda_ {i} B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} - \frac {1 - (1 - \lambda_ {i} \eta) ^ {H}}{\lambda_ {i} ^ {2} B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}) \\ = \frac {H \eta}{\lambda_ {i} B _ {\mathrm {l o c}}} \left(1 - \frac {1 - (1 - \lambda_ {i} \eta) ^ {H}}{\lambda_ {i} H \eta}\right) \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}) \\ = \frac {H \eta}{\lambda_ {i} B _ {\mathrm {l o c}}} \psi (\lambda_ {i} H \eta) \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}), \\ \end{array} +$$ + +and + +$$ +\begin{array}{l} \sum_ {t = 0} ^ {H - 1} \hat {B} _ {t, i, j} ^ {(s)} = \sum_ {t = 0} ^ {H - 1} \frac {\left(1 - \lambda_ {i} \eta\right) ^ {t}}{\lambda_ {i} K B _ {\mathrm {l o c}}} \eta \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {1. 5 - \beta}), \\ = \frac {1 - (1 - \lambda_ {i} \eta) ^ {H}}{\lambda_ {i} ^ {2} K B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}) \\ = \frac {H \eta}{\lambda_ {i} K B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} - \frac {H \eta}{\lambda_ {i} K B _ {\mathrm {l o c}}} \left(1 - \frac {1 - (1 - \lambda_ {i} \eta) ^ {H}}{\lambda_ {i} H \eta}\right) \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}) \\ = \frac {H \eta}{\lambda_ {i} K B _ {\mathrm {l o c}}} \Sigma_ {0, i, j} - \frac {H \eta}{\lambda_ {i} K B _ {\mathrm {l o c}}} \psi (\lambda_ {i} H \eta) \Sigma_ {0, i, j} + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}). \\ \end{array} +$$ + +Therefore, the matrix form of $\sum_{t=0}^{H-1} P_{\perp} (\hat{A}_t^{(s)} + \hat{B}_t^{(s)}) P_{\parallel}$ is + +$$ +\sum_ {t = 0} ^ {H - 1} \boldsymbol {P} _ {\perp} (\hat {\boldsymbol {A}} _ {t} ^ {(s)} + \hat {\boldsymbol {B}} _ {t} ^ {(s)}) \boldsymbol {P} _ {\parallel} = \frac {H \eta}{B} \mathcal {V} _ {\boldsymbol {H} _ {0}} \left(\boldsymbol {\Sigma} _ {0, \perp , \parallel} + (K - 1) \psi (\boldsymbol {\Sigma} _ {0, \perp , \parallel})\right) + \tilde {\mathcal {O}} (\eta^ {0. 5 - \beta}), +$$ + +where $\psi (\cdot)$ is interpreted as an elementwise matrix function here. Furthermore, by Lemma K.35, $\sum_{t = 0}^{H - 1}\hat{B}_t^{(s)} = \tilde{\mathcal{O}} (\eta^{0.5 - \beta})$ . Applying Lemma K.3, we have (105). Finally, directly applying Lemma K.5, we have + +$$ +- \eta \boldsymbol {P} _ {\parallel} \nabla^ {3} \mathcal {L} \left(\hat {\phi} ^ {(0)}\right) \left[ \boldsymbol {P} _ {\parallel} \mathcal {T} _ {3} \boldsymbol {P} _ {\parallel} \right] = \boldsymbol {0}. \tag {106} +$$ + +Notice that $\psi(\Sigma_{0,||}) = 0$ where $\psi(\cdot)$ operates on each element. Combining (104), (105) and (106), we obtain (103). By (102) and (103), we have (98). + +Lemma K.37. The second moment of the change of manifold projection every round is + +$$ +\mathbb {E} [ (\hat {\boldsymbol {\phi}} ^ {(s + 1)} - \hat {\boldsymbol {\phi}} ^ {(s)}) (\hat {\boldsymbol {\phi}} ^ {(s + 1)} - \hat {\boldsymbol {\phi}} ^ {(s)}) ^ {\top} ] = \left\{ \begin{array}{l l} \frac {H \eta^ {2}}{B} \pmb {\Sigma} _ {0, \parallel} + \tilde {\mathcal {O}} (\eta^ {1. 5 - 0. 5 \beta}), & R _ {0} \leq s < R _ {\mathrm {g r p}} \\ \tilde {\mathcal {O}} (\eta), & s < R _ {0} \end{array} \right., +$$ + +where $R_0 \coloneqq \left\lceil \frac{10}{\lambda_m \alpha} \log \frac{1}{\eta} \right\rceil$ . + +Proof. Directly apply Lemma K.32 and Lemma K.27 and we have the lemma. + +With Lemmas K.36 and K.37, we are ready to prove Theorem K.3. + +Proof of Theorem K.3. We first derive $\mathbb{E}[\Delta \hat{\phi}^{(R_{\mathrm{grp}})}]$ . Recall that $R_{\mathrm{grp}} = \left\lfloor \frac{1}{\alpha\eta^{\beta}} \right\rfloor = \frac{1}{H\eta^{1 + \beta}} + o(1)$ where $0 < \beta < 0.5$ . By Lemma K.36, + +$$ +\begin{array}{l} \mathbb {E} [ \hat {\phi} ^ {(R _ {\mathrm {g r p}})} - \hat {\phi} ^ {(0)} ] = \sum_ {s = 0} ^ {R _ {0}} \mathbb {E} [ \hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)} ] + \sum_ {s = R _ {0} + 1} ^ {R _ {\mathrm {g r p}} - 1} \mathbb {E} [ \hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)} ] \\ = \frac {\eta^ {1 - \beta}}{2 B} \partial^ {2} \Phi (\hat {\phi} ^ {(0)}) [ \pmb {\Sigma} _ {0} + \pmb {\Psi} (\hat {\phi} ^ {(0)}) ] + \tilde {\mathcal {O}} (\eta^ {1. 5 - 2 \beta}) + \tilde {\mathcal {O}} (\eta). \\ \end{array} +$$ + +Then we compute $\mathbb{E}[\Delta \hat{\phi}^{(R_{\mathrm{grp}})}\Delta \hat{\phi}^{(R_{\mathrm{grp}})^{\top}}]$ . + +$$ +\begin{array}{l} \mathbb {E} \left[ \left(\sum_ {s = 0} ^ {R _ {\mathrm {g r p}} - 1} (\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)})\right) \left(\sum_ {s = 0} ^ {R _ {\mathrm {g r p}} - 1} (\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)})\right) ^ {\top} \right] \\ = \sum_ {s = 0} ^ {R _ {\mathrm {g r p}} - 1} \mathbb {E} [ (\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)}) (\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)}) ^ {\top} ] + \sum_ {s \neq s ^ {\prime}} \mathbb {E} [ (\hat {\phi} ^ {(s + 1)} - \hat {\phi} ^ {(s)}) ] \mathbb {E} [ (\hat {\phi} ^ {(s ^ {\prime} + 1)} - \hat {\phi} ^ {(s ^ {\prime})}) ^ {\top} ] \\ = \frac {\eta^ {1 - \beta}}{B} \Sigma_ {0, \parallel} + \tilde {\mathcal {O}} (\eta) + \tilde {\mathcal {O}} (\eta^ {1. 5 - 1. 5 \beta}), \\ \end{array} +$$ + +where the last inequality uses $\mathbb{E}[(\hat{\phi}^{(s + 1)} - \hat{\phi}^{(s)})]\mathbb{E}[(\hat{\phi}^{(s' + 1)} - \hat{\phi}^{(s')})^\top ] = \tilde{\mathcal{O}} (\eta^2)$ + +![](images/78b29f9c05159682ca3d886a219bb7e9cb859ab59189ae22eecd9c91e12a2eba.jpg) + +# K.10 PROOF OF WEAK APPROXIMATION + +We are now in a position to utilize the estimate of moments obtained in previous subsections to prove the closeness of the sequence $\{\phi^{(s)}\}_{s = 0}^{\lfloor T / (H\eta^2)\rfloor}$ and the SDE solution $\{\zeta :t\in [0,T]\}$ in the sense of weak approximation. Recall the SDE that we expect the manifold projection $\{\Phi (\bar{\theta}^{(s)})\}_{s = 0}^{\lfloor T / (H\eta^2)\rfloor}$ to track: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = P _ {\zeta} \left(\underbrace {\frac {1}{\sqrt {B}} \boldsymbol {\Sigma} _ {\parallel} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t}} _ {\text {(a) d i f f u s i o n}} \underbrace {- \frac {1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Sigma}} _ {\diamond} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {\text {(b) d r i f t - I}}} _ {\text {(c) d r i f t - I I}} - \underbrace {- \frac {K - 1}{2 B} \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \widehat {\boldsymbol {\Psi}} (\boldsymbol {\zeta}) ] \mathrm {d} t} _ {\text {(d) d r i f t - I I}}\right), \tag {107} +$$ + +According to Lemma K.3 and Lemma K.4, the drift term in total can be written as the following form: + +$$ +(\mathbf {b}) + (\mathbf {c}) = \frac {1}{2 B} \partial^ {2} \Phi (\boldsymbol {\zeta}) [ \boldsymbol {\Sigma} (\boldsymbol {\zeta}) + (K - 1) \boldsymbol {\Psi} (\boldsymbol {\zeta}) ]. +$$ + +Then by definition of $P_{\zeta}$ , (107) is equivalent to the following SDE: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = \frac {1}{\sqrt {B}} \partial \Phi (\boldsymbol {\zeta}) \boldsymbol {\Sigma} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} _ {t} + \frac {1}{2 B} \partial^ {2} \Phi (\boldsymbol {\zeta}) [ \boldsymbol {\Sigma} (\boldsymbol {\zeta}) + (K - 1) \boldsymbol {\Psi} (\boldsymbol {\zeta}) ] \mathrm {d} t. \tag {108} +$$ + +Therefore, we only have to show that $\phi^{(s)}$ closely tracks $\{\zeta(t)\}$ satisfying Equation (108). By Lemma K.11, there exists an $\epsilon_3$ neighborhood of $\Gamma$ , $\Gamma^{\epsilon_3}$ , where $\Phi(\cdot)$ is $\mathcal{C}^\infty$ -smooth. Due to compactness of $\Gamma$ , $\Gamma^{\epsilon_3}$ is bounded and the mappings $\partial^2\Phi(\cdot)$ , $\partial\Phi(\cdot)$ , $\Sigma^{1/2}(\cdot)$ , $\Sigma(\cdot)$ and $\Psi(\cdot)$ are all Lipschitz in $\Gamma^{\epsilon_3}$ . By Kirschbraun theorem, both the drift and diffusion term of (108) can be extended as Lipschitz functions on $\mathbb{R}^d$ . Therefore, the solution to the extended SDE exists and is unique. We further show that the solution, if initialized as a point on $\Gamma$ , always stays on the manifold almost surely. + +As a preparation, we first show that $\Gamma$ has no boundary. + +Lemma K.38. Under Assumptions 3.1 to 3.3, $\Gamma$ has no boundary. + +Proof. We prove by contradiction. If $\Gamma$ has boundary $\partial \Gamma$ , WLOG, for a point $\pmb{p} \in \partial \Gamma$ , let the Hessian at $\pmb{p}$ be diagonal with the form $\nabla^2 \mathcal{L}(\pmb{p}) = \mathrm{diag}(\lambda_1, \dots, \lambda_d)$ where $\lambda_i > 0$ for $1 \leq i \leq m$ and $\lambda_i = 0$ for $m < i \leq d$ . + +Denote by $\pmb{x}_{i:j} := (x_i, x_{i+1}, \dots, x_j)$ ( $i \leq j$ ) the $(j - i + 1)$ -dimensional vector formed by the $i$ -th to $j$ -th coordinates of $\pmb{x}$ . Since $\frac{\partial(\nabla\mathcal{L}(\pmb{p}))}{\partial\pmb{p}_{1:m}} = \mathrm{diag}(\lambda_1, \dots, \lambda_m)$ is invertible, by the implicit function theorem, there exists an open neighborhood $V$ of $\pmb{p}_{m+1:d}$ such that $\nabla\mathcal{L}(\pmb{v}) = \mathbf{0}$ , $\forall \pmb{v} \in V$ . Then, $\mathcal{L}(\pmb{v}) = \mathcal{L}(\pmb{p}) = \min_{\pmb{\theta} \in U} \mathcal{L}(\pmb{\theta})$ and hence $V \subset \Gamma$ , which contradicts with $\pmb{p} \in \partial \Gamma$ . + +Therefore, $\Gamma$ is a closed manifold (i.e., compact and without boundary). Then we have the following lemma stating that $\Gamma$ is invariant for (108). + +Lemma K.39. Let $\zeta(t)$ be the solution to (108) with $\zeta(0) \in \Gamma$ , then $\zeta(t) \in \Gamma$ for all $t \geq 0$ . In other words, $\Gamma$ is invariant for (108). + +Proof. According to Filipovic (2000) and Du & Duan (2007), for a closed manifold $\mathcal{M}$ to be viable for the SDE $\mathrm{d}\pmb {X}(t) = F(\pmb {X}(t))\mathrm{d}t + \pmb {B}(\pmb {X}(t))\mathrm{d}\pmb{W}_t$ where $F:\mathbb{R}^d\to \mathbb{R}^d$ and $\pmb {B}:\mathbb{R}^d\rightarrow \mathbb{R}^d$ are locally Lipschitz, we only have to verify the following Nagumo type consistency condition: + +$$ +\mu (\pmb {x}) := F (\pmb {x}) - \frac {1}{2} \sum_ {j} \mathrm {D} [ B _ {j} (\pmb {x}) ] B _ {j} (\pmb {x}) \in T _ {\pmb {x}} (\mathcal {M}), \quad B _ {j} (\pmb {x}) \in T _ {\pmb {x}} (\mathcal {M}), +$$ + +where $\mathrm{D}[\cdot ]$ is the Jacobian operator and $B_{j}(\pmb {x})$ denotes the $j$ -th column of $\pmb {B}(\pmb {x})$ . + +In our context, since for $\phi \in \Gamma$ , $\partial \Phi(\phi)$ is a projection matrix onto $T_{\phi}(\Gamma)$ , each column of $\partial \Phi(\phi)\Sigma^{1/2}(\phi)$ belongs to $T_{\phi}(\Gamma)$ , verifying the second condition. Denote by $P_{\perp}(\phi) := I_d - \partial \Phi(\phi)$ the projection onto the normal space of $\Gamma$ at $\phi$ . To verify the first condition, it suffices to show that $P_{\perp}(\phi)\mu(\phi) = 0$ . We evaluate $\sum_{j} P_{\perp}(\phi)\mathrm{D}[B_j(\phi)]B_j(\phi)$ as follows. + +$$ +\begin{array}{l} \sum_ {j} \boldsymbol {P} _ {\perp} (\phi) \mathrm {D} [ B _ {j} (\phi) ] B _ {j} (\phi) = \frac {1}{B} \sum_ {j} \mathrm {D} [ \partial \Phi (\phi) \boldsymbol {\Sigma} _ {j} ^ {1 / 2} (\phi) ] \partial \Phi (\phi) \boldsymbol {\Sigma} _ {j} ^ {1 / 2} (\phi) \\ = \frac {1}{B} P _ {\perp} (\phi) \sum_ {j} \partial^ {2} \Phi (\phi) [ \boldsymbol {\Sigma} _ {j} ^ {1 / 2} (\phi), \partial \Phi (\phi) \boldsymbol {\Sigma} _ {j} ^ {1 / 2} (\phi) ] \\ = - \frac {1}{B} \nabla^ {2} \mathcal {L} (\phi) ^ {+} \nabla^ {3} \mathcal {L} (\phi) [ \boldsymbol {\Sigma} _ {\parallel} (\phi) ], \tag {109} \\ \end{array} +$$ + +where the last inequality uses Lemma K.3. Again applying Lemma K.3, we have + +$$ +\boldsymbol {P} _ {\perp} (\phi) F (\phi) = - \frac {1}{2 B} \nabla^ {2} \mathcal {L} (\phi) ^ {+} \nabla^ {3} \mathcal {L} (\phi) [ \boldsymbol {\Sigma} _ {\parallel} (\phi) ]. \tag {110} +$$ + +Combining (109) and (110), we can verify the first condition. + +In order to establish Theorem 3.2, it suffices to prove the following theorem, which captures the closeness of $\phi^{(s)}$ and $\zeta(t)$ every $R_{\mathrm{grp}}$ rounds. + +Theorem K.4. If $\| \bar{\pmb{\theta}}^{(0)} - \pmb{\phi}^{(0)}\|_2 = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta}})$ and $\zeta (0) = \phi^{(0)}\in \Gamma$ , then for $R_{\mathrm{grp}} = \left\lfloor \frac{1}{\alpha\eta^{0.75}}\right\rfloor$ every test function $g\in \mathcal{C}^3$ , + +$$ +\max _ {n = 0, \dots , \lfloor T / \eta^ {0. 7 5} \rfloor} \left| \mathbb {E} g (\boldsymbol {\phi} ^ {(n R _ {\mathrm {g r p}})}) - \mathbb {E} g (\boldsymbol {\zeta} (n \eta^ {0. 7 5})) \right| \leq C _ {g} \eta^ {0. 2 5} (\log \frac {1}{\eta}) ^ {b}, +$$ + +where $C_g > 0$ is a constant independent of $\eta$ but can depend on $g(\cdot)$ and $b > 0$ is a constant independent of $\eta$ and $g(\cdot)$ . + +# K.10.1 PRELIMINARIES AND ADDITIONAL NOTATIONS + +We first introduce a general formulation for stochastic gradient algorithms (SGAs) and then specify the components of this formulation in our context. Consider the following SGA: + +$$ +\boldsymbol {x} _ {n + 1} = \boldsymbol {x} _ {n} + \eta_ {\mathrm {e}} \boldsymbol {h} (\boldsymbol {x} _ {n}, \boldsymbol {\xi} _ {n}), +$$ + +where $\pmb{x}_n \in \mathbb{R}^d$ is the parameter, $\eta_{\mathrm{e}}$ is the learning rate, $h(\cdot, \cdot)$ is the update which depends on $\pmb{x}_n$ and a random vector $\pmb{\xi}_n$ sampled from some distribution $\Xi(\pmb{x}_n)$ . Also, consider the following Stochastic Differential Equation (SDE). + +$$ +\mathrm {d} \boldsymbol {X} (t) = \boldsymbol {b} (\boldsymbol {X} (t)) \mathrm {d} t + \boldsymbol {\sigma} (\boldsymbol {X} (t)) \mathrm {d} \boldsymbol {W} _ {t}, +$$ + +where $\pmb {b}(\cdot):\mathbb{R}^d\to \mathbb{R}^d$ is the drift function and $\sigma (\cdot):\mathbb{R}^{d\times d}\rightarrow \mathbb{R}^{d\times d}$ is the diffusion matrix. + +Denote by $\mathcal{P}_X(\pmb {x},s,t)$ the distribution of $X(t)$ with the initial condition $X(s) = x$ .Define + +$$ +\tilde {\boldsymbol {\Delta}} (\boldsymbol {x}, n) := \boldsymbol {X} _ {(n + 1) \eta_ {\mathrm {e}}} - \boldsymbol {x}, \quad \text {w h e r e} \boldsymbol {X} _ {(n + 1) \eta_ {\mathrm {e}}} \sim \mathcal {P} _ {\boldsymbol {X}} (\boldsymbol {x}, n \eta_ {\mathrm {e}}, (n + 1) \eta_ {\mathrm {e}}), +$$ + +which characterizes the update in one step. + +In our context, we view the change of manifold projection over $R_{\mathrm{grp}} \coloneqq \left\lfloor \frac{1}{\alpha\eta^{1 - \beta}} \right\rfloor (\beta \in (0, 0.5))$ rounds as one "giant step". Hence the $\phi^{(nR_{\mathrm{grp}})}$ corresponds to the discrete time random variable $x_{n}$ corresponds to and $\zeta(t)$ corresponds to the continuous time random variable $X_{t}$ . According to Theorem K.2, we set + +$$ +\eta_ {\mathrm {e}} = \eta^ {1 - \beta}, \quad \boldsymbol {b} (\boldsymbol {\zeta}) = \frac {1}{2 B} \partial^ {2} \Phi (\boldsymbol {\zeta}) \left[ \boldsymbol {\Sigma} (\boldsymbol {\zeta}) + (K - 1) \boldsymbol {\Psi} (\boldsymbol {\zeta}) \right], \quad \boldsymbol {\sigma} (\boldsymbol {\zeta}) = \frac {1}{\sqrt {B}} \partial \Phi (\boldsymbol {\zeta}) \boldsymbol {\Sigma} ^ {1 / 2} (\boldsymbol {\zeta}). +$$ + +Due to compactness of $\Gamma$ , $b(\cdot)$ and $\sigma(\cdot)$ are Lipschitz on $\Gamma$ . + +As for the update in one step, $\tilde{\Delta} (\cdot ,\cdot)$ is defined in our context as: + +$$ +\tilde {\Delta} (\phi , n) := \zeta_ {(n + 1) \eta_ {\mathrm {e}}} - \phi , \qquad \text {w h e r e} \zeta_ {(n + 1) \eta_ {\mathrm {e}}} \sim \mathcal {P} _ {\zeta} (\phi , n \eta_ {\mathrm {e}}, (n + 1) \eta_ {\mathrm {e}}) \text {a n d} \phi \in \Gamma . +$$ + +For convenience, we further define + +$$ +\boldsymbol {\Delta} ^ {(n)} := \hat {\phi} ^ {((n + 1) R _ {\mathrm {g r p}})} - \hat {\phi} ^ {(n R _ {\mathrm {g r p}})}, \qquad \qquad \tilde {\boldsymbol {\Delta}} ^ {(n)} := \tilde {\boldsymbol {\Delta}} (\hat {\phi} ^ {(R _ {\mathrm {g r p}})}, n), +$$ + +$$ +\boldsymbol {b} ^ {(n)} := \boldsymbol {b} (\hat {\boldsymbol {\phi}} ^ {(n R _ {\mathrm {g r p}})}), \qquad \qquad \boldsymbol {\sigma} ^ {(n)} := \boldsymbol {\sigma} (\hat {\boldsymbol {\phi}} ^ {(n R _ {\mathrm {g r p}})}). +$$ + +We use $C_{g,i}$ to denote constants that can depend on the test function $g$ and independent of $\eta_{\mathrm{e}}$ . The following lemma relates the moments of $\tilde{\Delta}(\phi, n)$ to $b(\phi)$ and $\sigma(\phi)$ . + +Lemma K.40. There exists a positive constant $C_0$ independent of $\eta_{\mathrm{e}}$ and $g$ such that for all $\phi \in \Gamma$ + +$$ +| \mathbb {E} [ \tilde {\Delta} _ {i} (\phi , n) ] - \eta_ {\mathrm {e}} b _ {i} (\phi) | \leq C _ {0} \eta_ {\mathrm {e}} ^ {2}, \quad \forall 1 \leq i \leq d, +$$ + +$$ +| \mathbb {E} [ \tilde {\Delta} _ {i} (\phi , n) \tilde {\Delta} _ {j} (\pmb {x}, n) ] - \eta_ {\mathrm {e}} \sum_ {l = 1} ^ {d} \sigma_ {i, l} (\phi) \sigma_ {l, j} (\phi) | \leq C _ {0} \eta_ {\mathrm {e}} ^ {2}, \quad \forall 1 \leq i, j \leq d, +$$ + +$$ +\mathbb {E} \left[ \left| \prod_ {s = 1} ^ {6} \tilde {\Delta} _ {i _ {s}} (\phi , n) \right| \right] \leq C _ {0} \eta_ {\mathrm {e}} ^ {3}, \quad \forall 1 \leq i _ {1}, \dots , i _ {6} \leq d. +$$ + +The lemma below states that the expectation of the test function is smooth with respect to the initial value. + +Proof. Noticing that (i) the solution to (108) always stays on $\Gamma$ almost surely if its initial value $\zeta(0)$ belongs to $\Gamma$ , (ii) $b(\cdot)$ and $\sigma(\cdot)$ are $\mathcal{C}^\infty$ and (iii) $\Gamma$ is compact, we can directly apply Lemma B.3 in Malladi et al. (2022) and Lemma 26 in Li et al. (2019a) to obtain the above lemma. + +The following lemma states that the expectation of $g(\zeta(t))$ for $g \in \mathcal{C}^3$ is smooth with respect to the initial value of the SDE solution. + +Lemma K.41. Let $s \in [0, T]$ , $\phi \in \Gamma$ and $g \in \mathcal{C}^3$ . For $t \in [s, T]$ , define + +$$ +u (\phi , s, t) := \mathbb {E} _ {\zeta_ {t} \sim \mathcal {P} _ {\zeta} (\phi , s, t)} [ g (\zeta_ {t}) ]. +$$ + +Then $u(\cdot ,s,t)\in \mathcal{C}^3$ uniformly in $s,t$ + +Proof. A slight modification of Lemma B.4 in Malladi et al. (2022) will give the above lemma. + +# K.10.2 PROOF OF THE APPROXIMATION IN OUR CONTEXT + +For $\beta \in (0, 0.5)$ , define $\gamma_1 \coloneqq \frac{1.5 - 2\beta}{1 - \beta}$ , $\gamma_2 \coloneqq \frac{1}{1 - \beta}$ , and then $1 < \gamma_1 < 1.5$ , $1 < \gamma_2 < 2$ . We introduce the following lemma which serves as a key step to control the approximation error. Specifically, this lemma bounds the difference in one step change between the discrete process and the continuous one as well as the product of higher orders. + +Lemma K.42. If $\| \bar{\theta}^{(0)} - \phi^{(0)}\| _2 = \mathcal{O}(\sqrt{\eta\log\frac{1}{\eta}})$ , then there exist positive constants $C_1$ and $b$ independent of $\eta_{\mathrm{e}}$ and $g$ such that for all $0\leq n < \lfloor T / \eta_{\mathrm{e}}\rfloor$ + +1. + +$$ +\left| \mathbb {E} \left[ \Delta_ {i} ^ {(n)} - \tilde {\Delta} _ {i} ^ {(n)} \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \leq C _ {1} \eta_ {\mathrm {e}} ^ {\gamma_ {1}} \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b} + C _ {1} \eta_ {\mathrm {e}} ^ {\gamma_ {2}} \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b}, \quad \forall 1 \leq i \leq d, \right. +$$ + +$$ +| \mathbb {E} [ \Delta_ {i} ^ {(n)} \Delta_ {j} ^ {(n)} - \tilde {\Delta} _ {i} ^ {(n)} \tilde {\Delta} _ {j} ^ {(n)} | \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} | \leq C _ {1} \eta_ {\mathrm {e}} ^ {\gamma_ {1}} (\log \frac {1}{\eta_ {\mathrm {e}}}) ^ {b} + C _ {1} \eta_ {\mathrm {e}} ^ {\gamma_ {2}} (\log \frac {1}{\eta_ {\mathrm {e}}}) ^ {b}, \forall 1 \leq i, j \leq d. +$$ + +2. + +$$ +\mathbb {E} \left[ \left| \prod_ {s = 1} ^ {6} \Delta_ {i _ {s}} ^ {(n)} \right| \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \leq C _ {1} ^ {2} \eta_ {\mathrm {e}} ^ {2 \gamma_ {1}} (\log \frac {1}{\eta_ {\mathrm {e}}}) ^ {2 b}, \quad \forall 1 \leq i _ {1}, \dots , i _ {6} \leq d, +$$ + +$$ +\mathbb {E} \left[ \left| \prod_ {s = 1} ^ {6} \tilde {\Delta} _ {i _ {s}} ^ {(n)} \right| \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \leq C _ {1} ^ {2} \eta_ {\mathrm {e}} ^ {2 \gamma_ {1}} (\log \frac {1}{\eta_ {\mathrm {e}}}) ^ {2 b}, \quad \forall 1 \leq i _ {1}, \dots , i _ {6} \leq d. +$$ + +Proof. According to Appendix K.7, we have + +$$ +\mathbb {E} \left[ \left| \prod_ {s = 1} ^ {6} \Delta_ {i _ {s}} ^ {(n)} \right| \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] = \tilde {\mathcal {O}} (\eta^ {3 - 3 \beta}). +$$ + +Since $\gamma_{1} < 1.5$ and $\gamma_{2} < 2$ , we can utilize Theorem K.3 and conclude that there exist positive constants $C_2$ and $b$ independent of $\eta_{\mathrm{e}}$ and $g$ such that + +$$ +\left| \mathbb {E} \left[ \Delta_ {i} ^ {(n)} - \eta_ {\mathrm {e}} b _ {i} ^ {(n)} \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \right| \leq C _ {2} \eta_ {\mathrm {e}} ^ {\gamma_ {1}} \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b} + C _ {2} \eta_ {\mathrm {e}} ^ {\gamma_ {2}} \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b}, \forall 1 \leq i \leq d, \tag {111} +$$ + +$$ +\left| \mathbb {E} \left[ \Delta_ {i} ^ {(n)} \Delta_ {j} ^ {(n)} - \eta_ {\mathrm {e}} \sum_ {l = 1} ^ {d} \sigma_ {i, l} ^ {(n)} \sigma_ {l, j} ^ {(n)} \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \right| \leq C _ {2} \eta_ {\mathrm {e}} ^ {\gamma_ {1}} \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b} + C _ {2} \eta_ {\mathrm {e}} ^ {\gamma_ {2}} \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b}, \forall 1 \leq i, j \leq d, \tag {112} +$$ + +$$ +\mathbb {E} \left[ \left| \prod_ {s = 1} ^ {6} \Delta_ {i _ {s}} ^ {(n)} \right| \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \leq C _ {2} ^ {2} \eta_ {\mathrm {e}} ^ {2 \gamma_ {1}} \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {2 b}, \quad \forall 1 \leq i _ {1}, \dots , i _ {6} \leq d. \tag {113} +$$ + +Combining (111) - (113) with Lemma K.40 gives the above lemma. + +Lemma K.43. For a test function $g \in \mathcal{C}^3$ , let $u_{l,n}(\phi) \coloneqq u(\phi, l\eta_{\mathrm{e}}, n\eta_{\mathrm{e}}) = \mathbb{E}_{\zeta_t \sim \mathcal{P}_{\zeta}(\phi, l\eta_{\mathrm{e}}, n\eta_{\mathrm{e}})}[g(\zeta_t)]$ . If $\|\bar{\pmb{\theta}}^{(0)} - \pmb{\phi}^{(0)}\|_2 = \mathcal{O}(\sqrt{\eta \log \frac{1}{\eta}})$ , then for all $0 \leq l \leq n-1$ and $1 \leq n \leq \lfloor T / \eta_{\mathrm{e}} \rfloor$ , + +$$ +\left| \mathbb {E} [ u _ {l + 1, n} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \pmb {\Delta} ^ {(l)}) - u _ {l + 1, n} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \tilde {\pmb {\Delta}} ^ {(l + 1)}) \mid \hat {\phi} ^ {(l R _ {\mathrm {g r p}})} ] \right| \leq C _ {g, 1} (\eta_ {\mathrm {e}} ^ {\gamma_ {1}} + \eta_ {\mathrm {e}} ^ {\gamma_ {2}}) \log (\frac {1}{\eta_ {\mathrm {e}}}) ^ {b}, +$$ + +where $C_{g,1}$ is a positive constant independent of $\eta$ and $\hat{\phi}^{(lR_{\mathrm{grp}})}$ but can depend on $g$ . + +Proof. By Lemma K.41, $u_{l,n}(\phi) \in \mathcal{C}^3$ for all $l$ and $n$ . That is, there exists $K(\cdot) \in G$ such that for all $l, n, u_{l,n}(\phi)$ and its partial derivatives up to the third order are bounded by $K(\phi)$ . + +By the law of total expectation and triangle inequality, + +$$ +\begin{array}{l} \left| \mathbb {E} [ u _ {l + 1, n} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \pmb {\Delta} ^ {(l)}) - u _ {l + 1, n} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \tilde {\pmb {\Delta}} ^ {(l)}) ] \mid \hat {\phi} ^ {(l R _ {\mathrm {g r p}})} \right| \\ \leq \underbrace {\left| \mathbb {E} \left[ u _ {l + 1 , n} \left(\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \boldsymbol {\Delta} ^ {(l)}\right) - u _ {l + 1 , n} \left(\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \tilde {\boldsymbol {\Delta}} ^ {(l)}\right) \mid \hat {\phi} ^ {(l R _ {\mathrm {g r p}})} , \mathcal {E} _ {0} ^ {(l R _ {\mathrm {g r p}})} \right] \right|} _ {\mathcal {A} _ {1}} \\ + \underbrace {\eta^ {1 0 0} \mathbb {E} \left[ \left| u _ {l + 1 , n} \left(\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \boldsymbol {\Delta} ^ {(l)}\right) \right| \mid \hat {\phi} ^ {(l R _ {\mathrm {g r p}})}, \bar {\mathcal {E}} _ {0} ^ {(l R _ {\mathrm {g r p}})} \right]} _ {\mathcal {A} _ {2}} \\ + \underbrace {\eta^ {1 0 0} \mathbb {E} [ | u _ {l + 1 , n} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \tilde {\Delta} ^ {(l)}) | | \hat {\phi} ^ {(l R _ {\mathrm {g r p}})} , \bar {\mathcal {E}} _ {0} ^ {(l R _ {\mathrm {g r p}})} ]} _ {\mathcal {A} _ {3}}. \\ \end{array} +$$ + +We first bound $\mathcal{A}_2$ and $\mathcal{A}_3$ . Since $\hat{\phi}^{(lR_{\mathrm{grp}})} \in \Gamma$ , both $\hat{\phi}^{(lR_{\mathrm{grp}})} + \pmb{\Delta}^{(l)}$ and $\hat{\phi}^{(lR_{\mathrm{grp}})} + \tilde{\Delta}^{(l)}$ belong to $\Gamma$ . Due to compactness of $\Gamma$ and smoothness of $u_{l+1,n}(\cdot)$ on $\Gamma$ , there exist a positive constant $C_{g,2}$ such that $\mathcal{A}_2 + \mathcal{A}_3 \leq C_{g,2}\eta^{100}$ . + +We proceed to bound $\mathcal{A}_1$ . Expanding $u_{l + 1,n}(\cdot)$ at $\hat{\phi}^{(lR_{\mathrm{grp}})}$ and by triangle inequality, + +$$ +\begin{array}{l} \mathcal {A} _ {1} ^ {(s)} \leq \underbrace {\sum_ {i = 1} ^ {d} \left| \mathbb {E} \big [ \frac {\partial u _ {l + 1 , n}}{\partial \phi_ {i}} (\hat {\boldsymbol {\phi}} ^ {(l R _ {\mathrm {g r p}})}) \left(\Delta_ {i} ^ {(l)} - \tilde {\Delta} _ {i} ^ {(l)}\right) | \hat {\boldsymbol {\phi}} ^ {(l R _ {\mathrm {g r p}})}, \mathcal {E} _ {0} ^ {(l R _ {\mathrm {g r p}})} \right|} _ {\mathcal {B} _ {1}} \\ + \underbrace {\frac {1}{2} \sum_ {1 \leq i , j \leq d} \left| \mathbb {E} [ \frac {\partial^ {2} u _ {l + 1 , n}}{\partial \phi_ {i} \partial \phi_ {j}} (\hat {\boldsymbol {\phi}} ^ {(l R _ {\mathrm {g r p}})}) (\Delta_ {i} ^ {(l)} \Delta_ {j} ^ {(l)} - \tilde {\Delta} _ {i} ^ {(l)} \tilde {\Delta} _ {j} ^ {(l)}) | \hat {\boldsymbol {\phi}} ^ {(l R _ {\mathrm {g r p}})}, \mathcal {E} _ {0} ^ {(l R _ {\mathrm {g r p}})} ] \right|} _ {\mathcal {B} _ {2}} \\ + | \mathcal {R} | + | \tilde {\mathcal {R}} |, \\ \end{array} +$$ + +where the remainders $\mathcal{R}$ and $\tilde{\mathcal{R}}$ are + +$$ +\mathcal {R} = \frac {1}{6} \sum_ {1 \leq i, j, p \leq d} \mathbb {E} [ \frac {\partial^ {3} u _ {l + 1 , n}}{\partial \phi_ {i} \partial \phi_ {j} \partial \phi_ {p}} (\hat {\boldsymbol {\phi}} ^ {(l R _ {\mathrm {g r p}})} + \theta \boldsymbol {\Delta} ^ {(l)}) \Delta_ {i} ^ {(l)} \Delta_ {j} ^ {(l)} | \hat {\boldsymbol {\phi}} ^ {(l R _ {\mathrm {g r p}})}, \mathcal {E} _ {0} ^ {(l R _ {\mathrm {g r p}})} ], +$$ + +$$ +\tilde {\mathcal {R}} = \frac {1}{6} \sum_ {1 \leq i, j, p \leq d} \mathbb {E} [ \frac {\partial^ {3} u _ {l + 1 , n}}{\partial \phi_ {i} \partial \phi_ {j} \partial \phi_ {p}} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \tilde {\theta} \tilde {\Delta} ^ {(l)}) \tilde {\Delta} _ {i} ^ {(l)} \tilde {\Delta} _ {j} ^ {(l)} \tilde {\Delta} _ {p} ^ {(l)} | \hat {\phi} ^ {(l R _ {\mathrm {g r p}})}, \mathcal {E} _ {0} ^ {(l R _ {\mathrm {g r p}})} ], +$$ + +for some $\theta, \tilde{\theta} \in (0,1)$ . Since $\hat{\phi}^{(LR_{\mathrm{grp}})}$ belongs to $\Gamma$ which is compact, there exists a constant $C_{g,3}$ such that for all $1 \leq i,j \leq d, 0 \leq l \leq n-1, 1 \leq n \leq \lfloor T/\eta_{\mathrm{e}} \rfloor$ , + +$$ +| \frac {\partial u _ {l + 1 , n}}{\partial \phi_ {i}} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})}) | \leq C _ {g, 3}, \qquad | \frac {\partial^ {2} u _ {l + 1 , n}}{\partial \phi_ {i} \partial \phi_ {j}} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})}) | \leq C _ {g, 3}. +$$ + +By Lemma K.42, + +$$ +\mathcal {B} _ {1} \leq d C _ {g, 3} C _ {1} \left(\eta_ {\mathrm {e}} ^ {\gamma_ {1}} + \eta_ {\mathrm {e}} ^ {\gamma_ {2}}\right) \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b}, \quad \mathcal {B} _ {2} \leq \frac {d ^ {2}}{2} C _ {g, 3} C _ {1} \left(\eta_ {\mathrm {e}} ^ {\gamma_ {1}} + \eta_ {\mathrm {e}} ^ {\gamma_ {2}}\right) \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b}. +$$ + +Now we bound the remainders. By Cauchy-Schwartz inequality, + +$$ +\begin{array}{l} \left| \mathbb {E} [ \frac {\partial^ {3} u _ {l + 1 , n}}{\partial \phi_ {i} \partial \phi_ {j} \partial \phi_ {p}} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \theta \pmb {\Delta} ^ {(l)}) \Delta_ {i} ^ {(l)} \Delta_ {j} ^ {(l)} \Delta_ {p} ^ {(l)} \mid \hat {\phi} ^ {(l R _ {\mathrm {g r p}})}, \mathcal {E} _ {0} ^ {(l R _ {\mathrm {g r p}})} ] \right| \\ \leq \left(\mathbb {E} \left[ \left(\frac {\partial^ {3} u _ {l + 1 , n}}{\partial \phi_ {i} \partial \phi_ {j} \partial \phi_ {p}} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \theta \boldsymbol {\Delta} ^ {(l)})\right) ^ {2} | \hat {\phi} ^ {(l R _ {\mathrm {g r p}})}, \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right]\right) ^ {1 / 2} \times \\ \left(\mathbb {E} [ (\Delta_ {i} ^ {(l)} \Delta_ {j} ^ {(l)} \Delta_ {p} ^ {(l)}) ^ {2} | \hat {\phi} ^ {(l R _ {\mathrm {g r p}})}, \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} ]\right) ^ {1 / 2}. \\ \end{array} +$$ + +Since $\hat{\phi}^{(lR_{\mathrm{grp}})}$ and $\hat{\phi}^{(lR_{\mathrm{grp}})} + \Delta^{(l)}$ both belong to $\Gamma$ which is compact, there exists a constant $C_{g,4}$ such that for all $1 \leq i, j, p \leq d, 0 \leq l \leq n - 1$ and $1 \leq n \leq \lfloor T / \eta_{\mathrm{e}} \rfloor$ , + +$$ +\left. \right.\left(\frac {\partial^ {3} u _ {l + 1 , n}}{\partial \phi_ {i} \partial \phi_ {j} \partial \phi_ {p}} \left(\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \theta \Delta^ {(l)}\right)\right) ^ {2} \leq C _ {g, 4} ^ {2}. +$$ + +Combining the above inequality with Lemma K.42, we have + +$$ +\left| \mathbb {E} [ \frac {\partial^ {3} u _ {l + 1 , n}}{\partial \phi_ {i} \partial \phi_ {j} \partial \phi_ {p}} (\hat {\phi} ^ {(l R _ {\mathrm {g r p}})} + \theta \pmb {\Delta} ^ {(l)}) \Delta_ {i} ^ {(l)} \Delta_ {j} ^ {(l)} \Delta_ {p} ^ {(l)} | \hat {\phi} ^ {(l R _ {\mathrm {g r p}})}, \mathcal {E} _ {0} ^ {(l R _ {\mathrm {g r p}})} ] \right| \leq C _ {g, 4} C _ {1} \eta_ {\mathrm {e}} ^ {\gamma_ {1}} \log (\frac {1}{\eta_ {\mathrm {e}}}) ^ {b}. +$$ + +Hence, for all $1 \leq n \leq \lfloor T / \eta_{\mathrm{e}} \rfloor, 0 \leq l \leq n - 1$ + +$$ +| \mathcal {R} | \leq \frac {d ^ {3}}{6} C _ {g, 4} C _ {1} \eta_ {\mathrm {e}} ^ {\gamma_ {1}} \log \left(\frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b}. +$$ + +Similarly, we can show that there exists a constant $C_{g,5}$ such that for all $1 \leq n \leq \lfloor T / \eta_{\mathrm{e}} \rfloor$ , $0 \leq l \leq n - 1$ , + +$$ +| \tilde {\mathcal {R}} | \leq \frac {d ^ {3}}{6} C _ {g, 5} C _ {1} \eta_ {\mathrm {e}} ^ {\gamma_ {1}} \log \left(\frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b}. +$$ + +Combining the bounds on $\mathcal{A}_1$ to $\mathcal{A}_3$ , we have the lemma. + +Finally, we prove Theorem K.4. + +Proof. For $0 \leq l \leq n$ , define the random variable $\hat{\zeta}_{l,n}$ which follows the distribution $\mathcal{P}_{\zeta}(\hat{\phi}^{(lR_{\mathrm{grp}})}, l, n)$ conditioned on $\hat{\phi}^{(lR_{\mathrm{grp}})}$ . Therefore, $\mathbb{P}(\hat{\zeta}_{n,n} = \hat{\phi}^{(nR_{\mathrm{grp}})}) = 1$ and $\hat{\zeta}_{0,n} \sim \zeta_{n\eta_{\mathrm{e}}}$ . Denote by $u(\phi, s, t) \coloneqq \mathbb{E}_{\zeta_t \sim \mathcal{P}_{\zeta}(\phi, s, t)}[g(\zeta_t)]$ and $\mathcal{T}_{l+1,n} \coloneqq u_{l+1,n}(\hat{\phi}^{(lR_{\mathrm{grp}})} + \Delta^{(l)}, (l+1)\eta_{\mathrm{e}}, n\eta_{\mathrm{e}}) - u_{l+1,n}(\hat{\phi}^{(lR_{\mathrm{grp}})} + \tilde{\Delta}^{(l)}, (l+1)\eta_{\mathrm{e}}, n\eta_{\mathrm{e}})$ . + +$$ +\begin{array}{l} \left| \mathbb {E} \left[ g \left(\boldsymbol {\phi} ^ {\left(n R _ {\mathrm {g r p}}\right)}\right) \right] - \mathbb {E} \left[ g \left(\boldsymbol {\zeta} \left(n \eta_ {\mathrm {e}}\right)\right) \right] \right| \\ \leq \left| \mathbb {E} \left[ g \left(\hat {\zeta} _ {n, n}\right) - g \left(\hat {\zeta} _ {0, n}\right) \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \right| + \mathcal {O} (\eta^ {1 0 0}) \\ \leq \sum_ {l = 0} ^ {n - 1} \left| \mathbb {E} \left[ g \left(\hat {\zeta} _ {l + 1, n}\right) - g \left(\hat {\zeta} _ {l, n}\right) \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \right| + \mathcal {O} (\eta^ {1 0 0}) \\ = \sum_ {l = 0} ^ {n - 1} \left| \mathbb {E} \left[ u \left(\hat {\phi} ^ {\left((l + 1) R _ {\mathrm {g r p}}\right)}, (l + 1) \eta_ {\mathrm {e}}, n \eta_ {\mathrm {e}}\right) - u \left(\hat {\zeta} _ {l, l + 1}, (l + 1) \eta_ {\mathrm {e}}, n \eta_ {\mathrm {e}}\right) \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \right| + \mathcal {O} (\eta^ {1 0 0}) \\ = \sum_ {l = 0} ^ {n - 1} \left| \mathbb {E} \left[ \mathcal {T} _ {l + 1, n} \mid \mathcal {E} _ {0} ^ {(n R _ {\mathrm {g r p}})} \right] \right| + \mathcal {O} (\eta^ {1 0 0}). \\ \end{array} +$$ + +Noticing that $\mathbb{E}[\mathcal{T}_{l + 1,n}\mid \mathcal{E}_0^{(nR_{\mathrm{grp}})}] = \mathbb{E}[\mathbb{E}[\mathcal{T}_{l + 1,n}\mid \hat{\phi}^{(lR_{\mathrm{grp}})},\mathcal{E}_0^{(lR_{\mathrm{grp}})}]\mid \mathcal{E}_0^{(nR_{\mathrm{grp}})}]$ , we can apply Lemma K.43 and obtain that for all $0\leq n\leq \lfloor T / \eta_{\mathrm{e}}\rfloor$ + +$$ +\begin{array}{l} \left| \mathbb {E} \left[ g \left(\phi^ {\left(n R _ {\mathrm {g r p}}\right)}\right) \right] - \mathbb {E} \left[ g \left(\zeta \left(n \eta_ {\mathrm {e}}\right)\right) \right] \right| \leq n C _ {g, 1} \left(\eta_ {\mathrm {e}} ^ {\gamma_ {1}} + \eta_ {\mathrm {e}} ^ {\gamma_ {2}}\right) \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b} \\ \leq T C _ {g, 1} \left(\eta_ {\mathrm {e}} ^ {\gamma_ {1} - 1} + \eta_ {\mathrm {e}} ^ {\gamma_ {2} - 1}\right) \left(\log \frac {1}{\eta_ {\mathrm {e}}}\right) ^ {b}. \\ \end{array} +$$ + +Notice that $\eta_{\mathrm{e}}^{\gamma_1} + \eta_{\mathrm{e}}^{\gamma_2} = \eta^{0.5 - \beta} + \eta^\beta$ and $T, C_{g,1}$ are both constants that are independent of $\eta_{\mathrm{e}}$ . Let $\beta = 0.25$ and we have Theorem K.4. + +Having established Theorem K.4, we are thus led to prove Theorem 3.2. + +Proof of Theorem 3.2. Denote by $s_{\mathrm{cls}} = s_0 + s_1 = \mathcal{O}(\log \frac{1}{\eta})$ , which is the time the global iterate $\bar{\theta}^{(s)}$ will reach within $\tilde{\mathcal{O}} (\eta)$ from $\Gamma$ with high probability. Define $\tilde{\zeta} (t)$ to be the solution to the limiting SDE (108) conditioned on $\mathcal{E}_0^{(s_{\mathrm{cls}})}$ and $\tilde{\zeta}(0) = \phi^{(s_{\mathrm{cls}})}$ . By Theorem K.4, we have + +$$ +\max _ {n = 0, \dots , \lfloor T / \eta^ {0. 7 5} \rfloor} \left| \mathbb {E} [ g (\phi^ {(n R _ {\mathrm {g r p}} + s _ {\mathrm {c l s}})}) - g (\tilde {\zeta} (n \eta^ {0. 7 5})) | \phi^ {(s _ {\mathrm {c l s}})}, \mathcal {E} _ {0} ^ {(s _ {\mathrm {c l s}})} ] \right| \leq C _ {g} \eta^ {0. 2 5} (\log \frac {1}{\eta}) ^ {b}, +$$ + +where $R_{\mathrm{grp}} = \left\lfloor \frac{1}{\alpha\eta^{0.75}}\right\rfloor$ . Noticing that (i) $g\in \mathcal{C}^3$ (ii) $\pmb {b},\pmb {\sigma}\in \mathcal{C}^{\infty}$ and (iii) $\zeta (t),\tilde{\zeta} (t)\in \Gamma ,t\in [0,\infty)$ almost surely, we can conclude that given $\mathcal{E}_0^{(s_{\mathrm{cls}})}$ + +$$ +\| \boldsymbol {\zeta} (t) - \tilde {\boldsymbol {\zeta}} (t) \| _ {2} = \tilde {\mathcal {O}} (\sqrt {\eta}), \quad \forall t \in [ 0, T ]. +$$ + +Then there exists positive constant $b'$ independent of $\eta$ and $g$ , and $C_g'$ which is independent of $\eta$ but can depend on $g$ such that + +$$ +\max _ {n = 0, \dots , \lfloor T / \eta^ {0. 7 5} \rfloor} \left| \mathbb {E} \left[ g \left(\phi^ {\left(n R _ {\mathrm {g r p}} + s _ {\mathrm {c l s}}\right)}\right) - g \left(\zeta \left(n \eta^ {0. 7 5} + s _ {\mathrm {c l s}} H \eta^ {2}\right)\right) \right] \right| \leq C _ {g} ^ {\prime} \eta^ {0. 2 5} (\log \frac {1}{\eta}) ^ {b ^ {\prime}}. +$$ + +We can view the random variable pairs $\{(\phi^{(nR_{\mathrm{grp}} + s_{\mathrm{cls}})},\zeta_{n\eta^{0.75} + s_{\mathrm{cls}}\alpha \eta}):n = 0,\dots ,\lfloor T / \eta^{0.75}\rfloor \}$ as reference points and then approximate the value of $g(\phi^{(s)})$ and $g(\zeta (sH\eta^2))$ with the value at the nearest reference points. By Lemmas K.18 and K.23, for $0\leq r\leq R_{\mathrm{grp}}$ and $0\leq s\leq R_{\mathrm{tot}} - r$ + +$$ +\mathbb {E} [ \| \phi^ {(s + r)} - \phi^ {(s)} \| _ {2} ] = \tilde {\mathcal {O}} (\eta^ {0. 3 7 5}). +$$ + +Since the values of $\phi^{(s)}$ and $\zeta$ are restricted to a bounded set, $g(\cdot)$ is Lipschitz on that set. Therefore, we have the theorem. + +# L DERIVING THE SLOW SDE FOR LABEL NOISE REGULARIZATION + +In this section, we formulate how label noise regularization works and provide a detailed derivation of the theoretical results in Appendix G. + +Consider training a model for $C$ -class classification on dataset $\mathcal{D} = \{(x_i, y_i)\}_{i=1}^N$ , where $x_i$ denotes the input and $y_i \in [C]$ denotes the label. Denote by $\Delta_+^{C-1}$ the $(C-1)$ -open simplex. Let $f(\theta; x) \in \Delta_+^{C-1}$ be the model output on input $x$ with parameter $\theta$ , whose $j$ -th coordinate $f_j(\theta; x)$ stands for the probability of $x$ belonging to class $j$ . Let $\ell(\theta; x, y)$ be the cross entropy loss given input $x$ and label $y$ , i.e., $\ell(\theta; x, y) = -\log f_y(\theta; x)$ . + +Adding label noise means replacing the true label $y$ with a fresh noisy label $\hat{y}$ every time we access the sample. Specifically, $\hat{y}$ is set as the true label $y$ with probability $1 - p$ and as any other label with probability $\frac{p}{C - 1}$ , where $p$ is the fixed corruption probability. The training loss is defined as $\mathcal{L}(\boldsymbol{\theta}) = \frac{1}{N}\sum_{i=1}^{N}\mathbb{E}[\ell(\boldsymbol{\theta};\boldsymbol{x}_i,\hat{y}_i)]$ , where the expectation is taken over the stochasticity of $\hat{y}_i$ . Notice that given a sample $(x,y)$ , + +$$ +\mathbb {E} [ \ell (\boldsymbol {\theta}; \boldsymbol {x}, \hat {y}) ] = - (1 - p) \log f _ {y} (\boldsymbol {\theta}; \boldsymbol {x}) - \frac {p}{C - 1} \sum_ {j \neq y} \log f _ {j} (\boldsymbol {\theta}; \boldsymbol {x}). \tag {114} +$$ + +By the property of cross-entropy loss, (114) attains its global minimum if and only if $f_{j} = \frac{p}{C - 1}$ , for all $j \in [C], j \neq y$ and $f_{y} = 1 - p$ . Due to the large expressiveness of modern deep learning models, there typically exists a set $S^{*} := \{\pmb{\theta} \mid f_{i}(\pmb{\theta}) = \mathbb{E}[\hat{y}_{i}], \forall i \in [N]\}$ such that all elements of $S^{*}$ minimize $\mathcal{L}(\pmb{\theta})$ . Then, the manifold $\Gamma$ is a subset of $S^{*}$ . The following lemma relates the noise covariance $\pmb{\Sigma}(\pmb{\theta}) := \frac{1}{N}\sum_{i \in [N]}\mathbb{E}[(\nabla\ell(\pmb{\theta};\pmb{x}_{i},\hat{y}_{i}) - \nabla\mathcal{L}(\pmb{\theta}))(\nabla\ell(\pmb{\theta};\pmb{x}_{i},\hat{y}_{i}) - \nabla\mathcal{L}(\pmb{\theta}))^{\top}]$ to the hessian $\nabla^{2}\mathcal{L}(\pmb{\theta})$ for all $\pmb{\theta} \in S^{*}$ . + +Lemma L.1. If $f(\pmb{\theta}; \pmb{x}_i, \hat{y}_i)$ is $\mathcal{C}^2$ -smooth on $\mathbb{R}^d$ given any $i \in [N]$ , $\hat{y}_i \in [C]$ and $\mathcal{S}^* \neq \emptyset$ , then for all $\pmb{\theta} \in \mathcal{S}^*$ , $\pmb{\Sigma}(\pmb{\theta}) = \nabla^2 \mathcal{L}(\pmb{\theta})$ . + +Proof. Since $\mathcal{L}(\cdot)$ is $\mathcal{C}_2$ -smooth, $\nabla \mathcal{L}(\pmb{\theta}) = \mathbf{0}$ for all $\pmb{\theta} \in S^*$ . To prove the above lemma, it suffices to show that $\forall i \in [N]$ , $\mathbb{E}[\nabla \ell(\pmb{\theta}; \pmb{x}_i, \hat{y}_i) \nabla \ell(\pmb{\theta}; \pmb{x}_i, \hat{y}_i)^\top] = \nabla^2 \mathcal{L}(\pmb{\theta})$ . W.L.O.G, let $y = 1$ and therefore for all $\pmb{\theta} \in S^*$ , + +$$ +f _ {1} (\boldsymbol {\theta}; \boldsymbol {x}) = 1 - p =: a _ {1}, +$$ + +$$ +f _ {j} (\boldsymbol {\theta}; \boldsymbol {x}) = \frac {p}{C - 1} =: a _ {2}, \forall j > 1, j \in [ C ]. +$$ + +Additionally, let $h(x) \coloneqq -\log (x), x \in \mathbb{R}^{+}$ . The stochastic gradient $\nabla \ell(\pmb{\theta}; \pmb{x}, \hat{y})$ follows the distribution: + +$$ +\nabla \ell (\boldsymbol {\theta}; \boldsymbol {x}, \hat {y}) = \left\{ \begin{array}{l l} h ^ {\prime} (a _ {1}) \frac {\partial f _ {1}}{\partial \boldsymbol {\theta}} & \text {w . p .} 1 - p, \\ h ^ {\prime} (a _ {2}) \frac {\partial f _ {j}}{\partial \boldsymbol {\theta}}, & \text {w . p .} \frac {p}{C - 1}, \forall j \in [ C ], j > 1. \end{array} \right. +$$ + +Then the covariance of the gradient noise is: + +$$ +\begin{array}{l} \mathbb {E} [ \nabla \ell (\boldsymbol {\theta}; \boldsymbol {x}, \hat {y}) \nabla \ell (\boldsymbol {\theta}; \boldsymbol {x}, \hat {y}) ^ {\top} ] = (1 - p) \left(h ^ {\prime} \left(a _ {1}\right)\right) ^ {2} \frac {\partial f _ {1} \left(\boldsymbol {\theta} ^ {*}\right)}{\partial \boldsymbol {\theta} ^ {*}} \left(\frac {\partial f _ {1} \left(\boldsymbol {\theta} ^ {*}\right)}{\partial \boldsymbol {\theta} ^ {*}}\right) ^ {\top} \\ + \frac {p \left(h ^ {\prime} \left(a _ {2}\right)\right) ^ {2}}{C - 1} \sum_ {j > 1} \frac {\partial f _ {j} \left(\boldsymbol {\theta} ^ {*}\right)}{\partial \boldsymbol {\theta} ^ {*}} \left(\frac {\partial f _ {j} \left(\boldsymbol {\theta} ^ {*}\right)}{\partial \boldsymbol {\theta} ^ {*}}\right) ^ {\top}. \\ \end{array} +$$ + +And the hessian is: + +$$ +\begin{array}{l} \nabla^ {2} \mathcal {L} (\boldsymbol {\theta}) = (1 - p) h ^ {\prime} (a _ {1}) \frac {\partial^ {2} f _ {1}}{\partial \boldsymbol {\theta} ^ {2}} + \frac {p h ^ {\prime} (a _ {2})}{C - 1} \sum_ {j > 1} \frac {\partial^ {2} f _ {j}}{\partial \boldsymbol {\theta} ^ {2}} \\ \underbrace {\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad} _ {\mathcal {T}} \\ + (1 - p) h ^ {\prime \prime} (a _ {1}) \frac {\partial f _ {1}}{\partial \boldsymbol {\theta}} \left(\frac {\partial f _ {1}}{\partial \boldsymbol {\theta}}\right) ^ {\top} + \frac {p h ^ {\prime \prime} (a _ {2})}{C - 1} \sum_ {j > 1} \frac {\partial f _ {j}}{\partial \boldsymbol {\theta}} \left(\frac {\partial f _ {j} (\boldsymbol {\theta})}{\partial \boldsymbol {\theta}}\right) ^ {\top}. \\ \end{array} +$$ + +Since $\sum_{j\in [C]}f_i = 1$ + +$$ +\frac {\partial^ {2} f _ {1}}{\partial \boldsymbol {\theta} ^ {2}} = - \sum_ {j > 1} \frac {\partial^ {2} f _ {j}}{\partial \boldsymbol {\theta} ^ {2}}. \tag {115} +$$ + +Also, notice that $h^\prime (x) = -\frac{1}{x}$ . Therefore, + +$$ +(1 - p) h ^ {\prime} \left(a _ {1}\right) = \frac {p h ^ {\prime} \left(a _ {2}\right)}{C - 1}. \tag {116} +$$ + +Substituting (115) and (116) into the expression of $\mathcal{T}$ gives $\mathcal{T} = \mathbf{0}$ , which simplifies $\nabla^2\mathcal{L}(\pmb{\theta})$ as the following form: + +$$ +\nabla^ {2} \mathcal {L} (\pmb {\theta}) = (1 - p) h ^ {\prime \prime} (a _ {1}) \frac {\partial f _ {1}}{\partial \pmb {\theta}} \left(\frac {\partial f _ {j} (\pmb {\theta})}{\partial \pmb {\theta}}\right) ^ {\top} + \frac {p h ^ {\prime \prime} (a _ {2})}{C - 1} \sum_ {j > 1} \frac {\partial f _ {j}}{\partial \pmb {\theta}} \left(\frac {\partial f _ {j} (\pmb {\theta})}{\partial \pmb {\theta}}\right) ^ {\top}. +$$ + +Again notice that $h''(x) = h'(x)$ for all $x \in \mathbb{R}^+$ . Therefore, $\nabla^2\mathcal{L}(\pmb{\theta}) = \pmb{\Sigma}(\pmb{\theta})$ . + +![](images/c34ca2c7035c13ce87c8c0a9518312e2649ffe323b4770bd03aac6d8f4a67397.jpg) + +With the property $\pmb{\Sigma}(\pmb{\theta}) = \nabla^2\mathcal{L}(\pmb{\theta})$ , we are ready to prove Theorem G.1. + +Proof of Theorem G.1. Recall the general form of the slow SDE: + +$$ +\mathrm {d} \boldsymbol {\zeta} (t) = \frac {1}{\sqrt {B}} \partial \Phi (\boldsymbol {\zeta}) \boldsymbol {\Sigma} ^ {1 / 2} (\boldsymbol {\zeta}) \mathrm {d} \boldsymbol {W} (t) + \frac {1}{2 B} \partial^ {2} \Phi (\boldsymbol {\zeta}) [ \boldsymbol {\Sigma} (\boldsymbol {\zeta}) + (K - 1) \boldsymbol {\Psi} (\boldsymbol {\zeta}) ] \mathrm {d} t, \tag {117} +$$ + +where $\Psi$ is defined in Definition K.6. Since for $\zeta \in \Gamma$ , $\Sigma(\zeta) = \nabla^2\mathcal{L}(\zeta)$ , then + +$$ +\partial \Phi (\zeta) \Sigma^ {1 / 2} (\zeta) = \mathbf {0}. \tag {118} +$$ + +Now we show that + +$$ +\partial^ {2} \Phi (\boldsymbol {\zeta}) [ \boldsymbol {\Sigma} (\boldsymbol {\zeta}) ] = - \nabla_ {\Gamma} \operatorname {t r} \left(\nabla^ {2} \mathcal {L} (\boldsymbol {\zeta})\right). \tag {119} +$$ + +Since $\nabla^2\mathcal{L}(\zeta) = \Sigma (\zeta)$ , $\mathcal{V}_{\nabla^2\mathcal{L}(\zeta)}[\Sigma ] = \frac{1}{2}\pmb {I}$ . By Lemma K.4, + +$$ +\partial^ {2} \Phi (\boldsymbol {\zeta}) [ \boldsymbol {\Sigma} (\boldsymbol {\zeta}) ] = - \frac {1}{2} \partial \Phi (\boldsymbol {\zeta}) \nabla^ {3} \mathcal {L} (\boldsymbol {\zeta}) [ \boldsymbol {I} ] = - \frac {1}{2} \nabla_ {\Gamma} \mathrm {t r} (\nabla^ {2} \mathcal {L} (\boldsymbol {\zeta})). +$$ + +Finally, we show that + +$$ +\partial^ {2} \Phi (\boldsymbol {\zeta}) [ \Psi (\boldsymbol {\zeta}) ] = - \nabla_ {\Gamma} \frac {1}{2 H \eta} \operatorname {t r} (F (2 H \eta \nabla^ {2} \mathcal {L} (\boldsymbol {\zeta}))). \tag {120} +$$ + +Define $\hat{\psi}(x) \coloneqq x\psi(x) = e^{-x} - 1 + x$ . By definition of $\Psi(\zeta)$ , when $\Sigma(\zeta) = \nabla^2\mathcal{L}(\zeta)$ , $\Psi(\zeta) = \hat{\psi}(2\eta H\nabla^2\mathcal{L}(\zeta))$ , where $\hat{\psi}(\cdot)$ is interpreted as a matrix function. Since $\psi(2\eta H\nabla^2\mathcal{L}(\zeta)) \in \operatorname{span}\{\pmb{u}\pmb{u}^\top \mid \pmb{u} \in T_\zeta^\perp(\Gamma)\}$ , by Lemma K.4, + +$$ +\partial^ {2} \Phi (\boldsymbol {\zeta}) [ \Psi (\boldsymbol {\zeta}) ] = - \frac {1}{2} \partial \Phi (\boldsymbol {\zeta}) \mathrm {t r} \psi (2 \eta H \nabla^ {2} \mathcal {L} (\boldsymbol {\zeta})). +$$ + +By the chain rule, we have (120). Combining (118),(119) and (120) gives the theorem. + +![](images/96cdd3309715f5f0e273dec7f88f6f3d099b9cc141fe0c1bcc7856562356a5e3.jpg) + +# M EXPERIMENTAL DETAILS + +In this section, we specify the experimental details that are omitted in the main text. Our experiments are conducted on CIFAR-10 (Krizhevsky et al., 2009) and ImageNet Russakovsky et al. (2015). Our code is available at https://github.com/hmgxr128/Local-SGD. Our implementation of ResNet-56 (He et al., 2016) and VGG-16 (Simonyan & Zisserman, 2015) is based on the high-starred repository by Wei Yang $^{2}$ and we use the implementation of ResNet-50 from torchvision 0.3.1. We run all CIFAR-10 experiments with $B_{\mathrm{loc}} = 128$ on 8 NVIDIA Tesla P100 GPUs while ImageNet experiments are run on 8 NVIDIA A5000 GPU with $B_{\mathrm{loc}} = 32$ . All ImageNet experiments are trained with ResNet-50. + +We generally adopt the following training strategies. We do not add any momentum unless otherwise stated. We follow the suggestions by Jia et al. (2018) and do not add weight decay to the bias and learnable parameters in the normalization layers. For all models with BatchNorm layers, we go through 100 batches of data with batch size $B_{\mathrm{loc}}$ to estimate the running mean and variance before evaluation. Experiments on both datasets follow the standard data augmentation pipeline in He et al. (2016) except the label noise experiments. Additionally, we use FFCV (Leclerc et al., 2022) to accelerate data loading for ImageNet training. + +Slightly different from the update rule of Local SGD in Section 1, we use sampling without replacement unless otherwise stated. See Appendix C for implementation details and discussion. + +# M.1 POST-LOCAL SGD EXPERIMENTS IN SECTION 1 + +CIFAR-10 experiments. We simulate 32 clients with $B = 4096$ . We follow the linear scaling rule and linear learning rate warmup strategy suggested by Goyal et al. (2017). We first run 250 epochs of SGD with the learning rate gradually ramping up from 0.1 to 3.2 for the first 50 epochs. Resuming from the model obtained at epoch 250, we run Local SGD with $\eta = 0.32$ . Note that we conduct grid search for the initial learning rate among $\{0.005, 0.01, 0.05, 0.1, 0.15, 0.2\}$ and choose the learning rate with which parallel SGD $(H = 1)$ achieves the best test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. The weight decay $\lambda$ is set as $5 \times 10^{-4}$ . As for the initialization scheme, we follow Lin et al. (2020b) and Goyal et al. (2017). Specifically, we use Kaiming Normal (He et al., 2015) for the weights of convolutional layers and initialize the weights of fully-connected layers by a Gaussian distribution with mean zero and standard deviation 0.01. The weights for normalization layers are initialized as one. All bias parameters are initialized as zero. We report the mean and standard deviation over 5 runs. + +ImageNet experiments. We simulate 256 workers with $B = 8192$ . We follow the linear scaling rule and linear learning rate warmup strategy suggested by Goyal et al. (2017). We first run 100 epochs of SGD where the learning rate linearly ramps up from 0.5 to 16 for the first 5 epochs and then decays by a factor of 0.1 at epoch 50. Resuming from epoch 100, we run Local SGD with $\eta = 0.16$ . Note that we conduct grid search for the initial learning rate among $\{0.05, 0.1, 0.5, 1\}$ and choose the learning rate with which parallel SGD $(H = 1)$ achieves the best test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. The weight decay $\lambda$ is set as $1 \times 10^{-4}$ and we do not add any momentum. The initialization scheme follows the implementation of torchvision 0.3.1. We report the mean and standard deviation over 3 runs. + +# M.2 EXPERIMENTAL DETAILS FOR FIGURES 2 AND 5 + +CIFAR-10 experiments. We use ResNet-56 for all CIFAR-10 experiments in the two figures. We simulate 32 workers with $B = 4096$ and set the weight decay as $5 \times 10^{-4}$ . For Figures 2(a) and 2(b), we set $\eta = 0.32$ , which is the same as the learning rate after decay in Figure 1(a). For Figure 2(a), we adopt the same initialization scheme introduced in the corresponding paragraph in Appendix M.1. For Figures 2(b), 2(e) and 5(c), we use the model at epoch 250 in Figure 1(a) as the pre-trained model. Additionally, we use a training budget of 250 epochs for Figure 2(e). In Figure 5(e), we use Local SGD with momentum 0.9, where the momentum buffer is kept locally and never averaged. We run SGD with momentum 0.9 for 150 epochs to obtain the pre-trained model, where the learning + +![](images/612528912af565b1974a5b0405841ebf3de8c57a1667022b0675b195111bc322.jpg) +(a) CIFAR-10, start from #250. + +![](images/2c96fc87eb556badec6bbbfd81dc240c15e15b91feb73206c8d82f2bd1f576f8.jpg) +(b) ImageNet, start from #100. + +![](images/15b9a9acdf8d649523f76a01f91a745314bc26039553bf831b31cfae87f772b9.jpg) +(c) CIFAR-10, start from #250, optimal $H$ . + +![](images/e69a09c2ea1ac40840af919aaed448f9ff983a5c9933c7c9ba82086774a9ec9f.jpg) +(d) ImageNet, start from #100, optimal $H$ . +Figure 10: The learning curves for experiments in Figure 4. + +rate ramps up from 0.05 to 1.6 linearly in the first 150 epochs. Note that we conduct grid search for the initial learning rate among $\{0.01, 0.05, 0.1, 0.15, 0.2\}$ and choose the learning rate with which parallel SGD $(H = 1)$ achieves the highest test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. Resuming from epoch 150, we run Local SGD $H = 1$ (i.e., SGD) and 24 with $\eta = 0.16$ and decay $\eta$ by 0.1 at epoch 226. For Local SGD $H = 900$ , we resume from the model at epoch 226 of $H = 24$ with $\eta = 0.016$ . We report the mean and standard deviation over 3 runs for Figures 2(a), 2(b) and 5(c), and over 5 runs for Figure 2(e). + +ImageNet experiments. We simulate 256 clients with $B = 8192$ and set the weight decay as $1 \times 10^{-4}$ . In Figure 2(d), both Local SGD and SGD start from the same random initialization. We warm up the learning rate from 0.1 to 3.2 in the first 5 epochs and decay the learning rate by a factor of 0.1 at epochs 50 and 100. For Figures 2(c), 2(f) and 5(d), we use the model at epoch 100 in Figure 1(b) as the pre-trained model. In Figure 2(c), we set the learning rate as 0.16, which is the same as the learning rate after epoch 100 in Figure 1(b). Finally, in Figures 2(c), 2(f), 5(b) and 5(d), we report the mean and average over 3 runs. + +# M.3 DETAILS FOR EXPERIMENTS IN FIGURE 6 + +For all experiments in Figure 6, we train a ResNet-56 model on CIFAR-10. We report mean test accuracy over three runs and the shaded area reflects the standard deviation. For Figure 6(a), we use the same setup as Figures 2(a) and 2(b) for training from random initialization and from a pre-trained model respectively except the learning rate. For Figure 6(b), we resume from the model obtained at epoch 250 in Figure 1(a) and train for another 250 epochs. For Figure 6(c), we follow the same procedure as Figure 1(a) except that we use sampling with replacement. We also ensure that the total numbers of iterations in Figures 1(a) and 6(c) are the same. + +# M.4 DETAILS FOR EXPERIMENTS ON THE EFFECT OF THE DIFFUSION TERM + +CIFAR-10 experiments. The model we use is ResNet-56. For Figure 3(a), we first run SGD with batch size 128 and learning rate $\eta = 0.5$ for 250 epochs to obtain the pre-trained model. The initialization scheme is the same as the corresponding paragraph in Appendix M.1. Resuming from epoch 250 with $\eta = 0.05$ , we run Local SGD with $K = 16$ until epoch 6000 and run all other setups for the same number of iterations. We report the mean and standard deviation over 3 runs. + +ImageNet experiments. For Figures 3(b) and 4(b), we start from the model obtained at epoch 100 in Figure 1(b). In Figure 3(b), we run Local SGD with $K = 256$ for another 150 epochs with $\eta = 0.032$ . We run all other setups for the same number of iterations with the same learning rate. + +# M.5 DETAILS FOR EXPERIMENTS ON THE EFFECT OF GLOBAL BATCH SIZE + +CIFAR-10 experiments. The model we use is ResNet-56. We resume from the model obtained in Figure 1(a) at epoch 250 and train for another 250 epochs. The local batch size for all runs is $B_{\mathrm{loc}} = 128$ . We first make grid search of $\eta$ for SGD with $K = 16$ among $\{0.04, 0.08, 0.16, 0.32, 0.64\}$ and find that the final test accuracy varies little across different learning rates (within $0.1\%$ ). Then we choose $\eta = 0.32$ . For the green curve in Figure 4(a), we search for the optimal $H$ for $K = 16$ and keep $\alpha$ fixed when scaling $\eta$ with $K$ . For the red curve in Figure 4(a), we search for the optimal $H$ for each $K$ among $\{6, 12, 60, 120, 300, 750, 1500, 3000, 6000, 12000, 24000\}$ and also make sure that $H$ does not exceed the total number of iterations for 250 epochs. The learning curves for constant and optimal $\alpha$ are visualized in Figures 10(a) and 10(c) respectively. We report the mean and standard deviation over three runs. + +ImageNet experiments. We start from the model obtained at epoch 100 in Figure 1(b) and train for another 50 epochs. The local batch size for all runs is $B_{\mathrm{loc}} = 32$ . We first make grid search among $\{0.032, 0.064, 0.16, 0.32\}$ for $H = 1$ to achieve the best test accuracy and choose $H = 0.064$ . For the orange curve in Figure 4(b), we search $H$ among $\{2, 4, 6, 13, 26, 52, 78, 156\}$ for $K = 256$ to achieve the optimal test accuracy and the keep $\alpha$ constant as we scale $\eta$ with $K$ . To obtain the optimal $H$ for each $K$ , we search among $\{6240, 7800, 10400, 12480, 15600, 20800, 24960, 31200\}$ for $K = 16$ , $\{1600, 3120, 4160, 5200, 6240, 7800, 10400\}$ for $K = 32$ , $\{312, 480, 520, 624, 800, 975, 1040, 1248, 1560, 1950\}$ for $K = 64$ , and $\{1, 2, 3, 6, 13\}$ for $K = 512$ . The learning curves for constant and optimal $\alpha$ are visualized in Figures 10(b) and 10(d) respectively. We report the mean and standard deviation over three runs. + +# M.6 DETAILS FOR EXPERIMENTS ON LABEL NOISE REGULARIZATION + +For all label noise experiments, we do not use data augmentation, use sampling with replacement, and set the corruption probability as 0.1. We simulate 32 workers with $B = 4096$ in Figure 7 and 4 workers with $B = 512$ in Figure 8. We use ResNet-56 with GroupNorm with the number of groups 8 for Figure 7(a) and VGG-16 without normalization for Figures 7(b) and 8. Below we list the training details for ResNet-56 and VGG-16 respectively. + +ResNet-56. As for the model architecture, we replace the batch normalization layer in Yang's implementation with group normalization such that the training loss is independent of the sampling order. We also use Swish activation (Ramachandran et al., 2017) in place of ReLU to ensure the smoothness of the loss function. We generate the pre-trained model by running label noise SGD with corruption probability $p = 0.1$ for 500 epochs (6,000 iterations). We initialize the model by the same strategy introduced in the first paragraph of Appendix M.1. Applying the linear warmup scheme proposed by Goyal et al. (2017), we gradually ramp up the learning rate $\eta$ from 0.1 to 3.2 for the first 20 epochs and multiply the learning rate by 0.1 at epoch 250. All subsequent experiments in Figure 7(a) (a) use learning rate 0.1. The weight decay $\lambda$ is set as $5 \times 10^{-4}$ . Note that adding weight decay in the presence of normalization accelerates the limiting dynamics and will not affect the implicit regularization on the original loss function (Li et al., 2022). + +VGG-16. We follow Yang's implementation of the model architecture except that we replace maximum pooling with average pooling and use Swish activation (Ramachandran et al., 2017) to make the training loss smooth. We initialize all weight parameters by Kaiming Normal and all bias parameters as zero. The pre-trained model is obtained by running label noise SGD with total batch size 4096 and corruption probability $p = 0.1$ for 6000 iterations. We use a linear learning rate warmup from 0.1 to 0.5 in the first 500 iterations. All runs in Figures 7(b) and 8 resume from the model obtained by SGD with label noise. In Figure 7(b), we use learning rate $\eta = 0.1$ . In Figure 8, we set $\eta = 0.005$ for $H = 97,000$ and $\eta = 0.01$ for SGD $(H = 1)$ . The weight decay $\lambda$ is set as zero. \ No newline at end of file diff --git a/2023/Why (and When) does Local SGD Generalize Better than SGD_/images.zip b/2023/Why (and When) does Local SGD Generalize Better than SGD_/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..2ca737db822e537be2250162b5974cc81e223dbb --- /dev/null +++ b/2023/Why (and When) does Local SGD Generalize Better than SGD_/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad79793d4c9fc0e8ba59877228ecbf5db245e85343fac194ef14131a3d5055f5 +size 4249603 diff --git a/2023/Why (and When) does Local SGD Generalize Better than SGD_/layout.json b/2023/Why (and When) does Local SGD Generalize Better than SGD_/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..11c7c0ecc737c305e7e836a8249bd55ee2156230 --- /dev/null +++ b/2023/Why (and When) does Local SGD Generalize Better than SGD_/layout.json @@ -0,0 +1,90222 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 504, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 504, + 117 + ], + "type": "text", + "content": "WHY (AND WHEN) DOES LOCAL SGD GENERALIZE BETTER THAN SGD?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 129, + 134, + 183, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 134, + 183, + 145 + ], + "spans": [ + { + "bbox": [ + 129, + 134, + 183, + 145 + ], + "type": "text", + "content": "Xinran Gu*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 146, + 334, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 146, + 334, + 168 + ], + "spans": [ + { + "bbox": [ + 129, + 146, + 334, + 168 + ], + "type": "text", + "content": "Institute for Interdisciplinary Information Sciences Tsinghua University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 129, + 168, + 293, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 168, + 293, + 179 + ], + "spans": [ + { + "bbox": [ + 129, + 168, + 293, + 179 + ], + "type": "text", + "content": "gxr21@mails.tsinghua.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 344, + 135, + 403, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 135, + 403, + 146 + ], + "spans": [ + { + "bbox": [ + 344, + 135, + 403, + 146 + ], + "type": "text", + "content": "Kaifeng Lyu*" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 344, + 146, + 481, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 146, + 481, + 168 + ], + "spans": [ + { + "bbox": [ + 344, + 146, + 481, + 168 + ], + "type": "text", + "content": "Department of Computer Science Princeton University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 344, + 168, + 472, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 168, + 472, + 179 + ], + "spans": [ + { + "bbox": [ + 344, + 168, + 472, + 179 + ], + "type": "text", + "content": "klyu@cs.princeton.edu" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 129, + 196, + 200, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 196, + 200, + 207 + ], + "spans": [ + { + "bbox": [ + 129, + 196, + 200, + 207 + ], + "type": "text", + "content": "Longbo Huang†" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 129, + 207, + 334, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 207, + 334, + 219 + ], + "spans": [ + { + "bbox": [ + 129, + 207, + 334, + 219 + ], + "type": "text", + "content": "Institute for Interdisciplinary Information Sciences" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 219, + 214, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 219, + 214, + 229 + ], + "spans": [ + { + "bbox": [ + 129, + 219, + 214, + 229 + ], + "type": "text", + "content": "Tsinghua University" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 129, + 229, + 293, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 229, + 293, + 240 + ], + "spans": [ + { + "bbox": [ + 129, + 229, + 293, + 240 + ], + "type": "text", + "content": "longbohuang@tsinghua.edu.cn" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 344, + 196, + 411, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 196, + 411, + 207 + ], + "spans": [ + { + "bbox": [ + 344, + 196, + 411, + 207 + ], + "type": "text", + "content": "Sanjeev Arora†" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 344, + 207, + 481, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 207, + 481, + 218 + ], + "spans": [ + { + "bbox": [ + 344, + 207, + 481, + 218 + ], + "type": "text", + "content": "Department of Computer Science" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 344, + 219, + 429, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 219, + 429, + 229 + ], + "spans": [ + { + "bbox": [ + 344, + 219, + 429, + 229 + ], + "type": "text", + "content": "Princeton University" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 344, + 230, + 477, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 230, + 477, + 240 + ], + "spans": [ + { + "bbox": [ + 344, + 230, + 477, + 240 + ], + "type": "text", + "content": "arora@cs.princeton.edu" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 276, + 255, + 334, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 255, + 334, + 266 + ], + "spans": [ + { + "bbox": [ + 276, + 255, + 334, + 266 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 140, + 280, + 470, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 280, + 470, + 468 + ], + "spans": [ + { + "bbox": [ + 140, + 280, + 470, + 468 + ], + "type": "text", + "content": "Local SGD is a communication-efficient variant of SGD for large-scale training, where multiple GPUs perform SGD independently and average the model parameters periodically. It has been recently observed that Local SGD can not only achieve the design goal of reducing the communication overhead but also lead to higher test accuracy than the corresponding SGD baseline (Lin et al., 2020b), though the training regimes for this to happen are still in debate (Ortiz et al., 2021). This paper aims to understand why (and when) Local SGD generalizes better based on Stochastic Differential Equation (SDE) approximation. The main contributions of this paper include (i) the derivation of an SDE that captures the long-term behavior of Local SGD in the small learning rate regime, showing how noise drives the iterate to drift and diffuse after it has reached close to the manifold of local minima, (ii) a comparison between the SDEs of Local SGD and SGD, showing that Local SGD induces a stronger drift term that can result in a stronger effect of regularization, e.g., a faster reduction of sharpness, and (iii) empirical evidence validating that having a small learning rate and long enough training time enables the generalization improvement over SGD but removing either of the two conditions leads to no improvement." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 489, + 206, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 489, + 206, + 500 + ], + "spans": [ + { + "bbox": [ + 106, + 489, + 206, + 500 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": "As deep models have grown larger, training them with reasonable wall-clock times has led to new distributed environments and new variants of gradient-based training. Recall that Stochastic Gradient Descent (SGD) tries to solve " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\min_{\\pmb{\\theta} \\in \\mathbb{R}^d} \\mathbb{E}_{\\xi \\sim \\hat{\\mathcal{D}}}[\\ell(\\pmb{\\theta}; \\xi)]" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": " is the parameter vector of the model, " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\ell(\\pmb{\\theta}; \\xi)" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": " is the loss function for a data sample " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": " drawn from the training distribution " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{D}}" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": ", e.g., the uniform distribution over the training set. SGD with learning rate " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": " and batch size " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": " does the following update at each step, using a batch of " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": " independent " + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "inline_equation", + "content": "\\xi_{t,1}, \\ldots, \\xi_{t,B} \\sim \\tilde{\\mathcal{D}}" + }, + { + "bbox": [ + 104, + 514, + 506, + 586 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 193, + 592, + 505, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 592, + 505, + 624 + ], + "spans": [ + { + "bbox": [ + 193, + 592, + 505, + 624 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {t + 1} \\leftarrow \\boldsymbol {\\theta} _ {t} - \\eta \\boldsymbol {g} _ {t}, \\quad \\text {w h e r e} \\quad \\boldsymbol {g} _ {t} = \\frac {1}{B} \\sum_ {i = 1} ^ {B} \\nabla \\ell \\left(\\boldsymbol {\\theta} _ {t}; \\xi_ {t, i}\\right). \\tag {1}", + "image_path": "afedaff5b92fea1289938e8298f2e788d724fea214cb4d24e609201f91e04472.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "text", + "content": "Parallel SGD tries to improve wall-clock time when the batch size " + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "text", + "content": " is large enough. It distributes the gradient computation to " + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "inline_equation", + "content": "K \\geq 2" + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "text", + "content": " workers, each of whom focuses on a local batch of " + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}} := B / K" + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "text", + "content": " samples and computes the average gradient over the local batch. Finally, " + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "inline_equation", + "content": "g_{t}" + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "text", + "content": " is obtained by averaging the local gradients over the " + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 630, + 504, + 675 + ], + "type": "text", + "content": " workers." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 680, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 680, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 680, + 505, + 704 + ], + "type": "text", + "content": "However, large-batch training leads to a significant test accuracy drop compared to a small-batch training baseline with the same number of training steps or epochs (Smith et al., 2020; Shallue et al.," + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 710, + 192, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 192, + 721 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 192, + 721 + ], + "type": "text", + "content": "*Equal contribution" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 116, + 721, + 206, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 721, + 206, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 721, + 206, + 732 + ], + "type": "text", + "content": "†Corresponding authors" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 140, + 45, + 294, + 140 + ], + "blocks": [ + { + "bbox": [ + 140, + 45, + 294, + 140 + ], + "lines": [ + { + "bbox": [ + 140, + 45, + 294, + 140 + ], + "spans": [ + { + "bbox": [ + 140, + 45, + 294, + 140 + ], + "type": "image", + "image_path": "d2ddf20977319359843449176528686667e651b70728681363f7c2b748262ac9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 146, + 285, + 157 + ], + "lines": [ + { + "bbox": [ + 143, + 146, + 285, + 157 + ], + "spans": [ + { + "bbox": [ + 143, + 146, + 285, + 157 + ], + "type": "text", + "content": "(a) CIFAR-10, " + }, + { + "bbox": [ + 143, + 146, + 285, + 157 + ], + "type": "inline_equation", + "content": "B = 4096" + }, + { + "bbox": [ + 143, + 146, + 285, + 157 + ], + "type": "text", + "content": " , ResNet-56." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 315, + 45, + 470, + 140 + ], + "blocks": [ + { + "bbox": [ + 315, + 45, + 470, + 140 + ], + "lines": [ + { + "bbox": [ + 315, + 45, + 470, + 140 + ], + "spans": [ + { + "bbox": [ + 315, + 45, + 470, + 140 + ], + "type": "image", + "image_path": "5535c2fbc915d92756ec5f3da4bc10bda5e4734ecdc6d2e3b634ab473d1da50c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 147, + 459, + 157 + ], + "lines": [ + { + "bbox": [ + 321, + 147, + 459, + 157 + ], + "spans": [ + { + "bbox": [ + 321, + 147, + 459, + 157 + ], + "type": "text", + "content": "(b) ImageNet, " + }, + { + "bbox": [ + 321, + 147, + 459, + 157 + ], + "type": "inline_equation", + "content": "B = 8192" + }, + { + "bbox": [ + 321, + 147, + 459, + 157 + ], + "type": "text", + "content": ", ResNet-50." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 159, + 504, + 191 + ], + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 191 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 191 + ], + "type": "text", + "content": "Figure 1: Post-Local SGD (" + }, + { + "bbox": [ + 104, + 159, + 504, + 191 + ], + "type": "inline_equation", + "content": "H > 1" + }, + { + "bbox": [ + 104, + 159, + 504, + 191 + ], + "type": "text", + "content": ") generalizes better than SGD (" + }, + { + "bbox": [ + 104, + 159, + 504, + 191 + ], + "type": "inline_equation", + "content": "H = 1" + }, + { + "bbox": [ + 104, + 159, + 504, + 191 + ], + "type": "text", + "content": "). We switch to Local SGD at the first learning rate decay (epoch #250) for CIFAR-10 and at the second learning rate decay (epoch #100) for ImageNet. See Appendix M.1 for training details." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 199, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 199, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 504, + 266 + ], + "type": "text", + "content": "2019; Keskar et al., 2017; Jastrzebski et al., 2017). Reducing this generalization gap is the goal of much subsequent research. It was suggested that the generalization gap arises because larger batches lead to a reduction in the level of noise in batch gradient (see Appendix A for more discussion). The Linear Scaling Rule (Krizhevsky, 2014; Goyal et al., 2017; Jastrzebski et al., 2017) tries to fix this by increasing the learning rate in proportion to batch size. This is found to reduce the generalization gap for (parallel) SGD, but does not entirely eliminate it." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 270, + 506, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 382 + ], + "type": "text", + "content": "To reduce the generalization gap further, Lin et al. (2020b) discovered that a variant of SGD, called Local SGD (Yu et al., 2019; Wang & Joshi, 2019; Zhou & Cong, 2018), can be used as a strong component. Perhaps surprisingly, Local SGD itself is not designed for improving generalization, but for reducing the high communication cost for synchronization among the workers, which is another important issue that often bottlenecks large-batch training (Seide et al., 2014; Strom, 2015; Chen et al., 2016; Recht et al., 2011). Instead of averaging the local gradients per step as in parallel SGD, Local SGD allows " + }, + { + "bbox": [ + 104, + 270, + 506, + 382 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 270, + 506, + 382 + ], + "type": "text", + "content": " workers to train their models locally and averages the local model parameters whenever they finish " + }, + { + "bbox": [ + 104, + 270, + 506, + 382 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 270, + 506, + 382 + ], + "type": "text", + "content": " local steps. Here every worker samples a new batch at each local step, and in this paper we focus on the case where all the workers draw samples with or without replacement from the same training set. See Appendix C for the pseudocode." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "text", + "content": "More specifically, Lin et al. (2020b) proposed Post-local SGD, a hybrid method that starts with parallel SGD (equivalent to Local SGD with " + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "inline_equation", + "content": "H = 1" + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "text", + "content": " in math) and switches to Local SGD with " + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "inline_equation", + "content": "H > 1" + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "text", + "content": " after a fixed number of steps " + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "text", + "content": ". They showed through extensive experiments that Post-local SGD significantly outperforms parallel SGD in test accuracy when " + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 104, + 386, + 504, + 443 + ], + "type": "text", + "content": " is carefully chosen. In Figure 1, we reproduce this phenomenon on both CIFAR-10 and ImageNet." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 447, + 506, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 447, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 104, + 447, + 506, + 514 + ], + "type": "text", + "content": "As suggested by the success of Post-local SGD, Local SGD can improve the generalization of SGD by merely adding more local steps (while fixing the other hyperparameters), at least when the training starts from a model pre-trained by SGD. But the underlying mechanism is not very clear, and there is also controversy about when this phenomenon can happen (see Section 2.1 for a survey). The current paper tries to understand: Why does Local SGD generalize better? Under what general conditions does this generalization benefit arise?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 519, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 504, + 597 + ], + "type": "text", + "content": "Previous theoretical research on Local SGD is mainly restricted to the convergence rate for minimizing a convex or non-convex objective (see Appendix A for a survey). A related line of works (Stich, 2018; Yu et al., 2019; Khaled et al., 2020) showed that Local SGD has a slower convergence rate compared with parallel SGD after running the same number of steps/epochs. This convergence result suggests that Local SGD may implicitly regularize the model through insufficient optimization, but this does not explain why parallel SGD with early stopping, which may incur an even higher training loss, still generalizes worse than Post-local SGD." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 601, + 504, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 504, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 504, + 625 + ], + "type": "text", + "content": "Our Contributions. In this paper, we provide the first theoretical understanding on why (and when) switching from parallel SGD to Local SGD improves generalization." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 628, + 505, + 733 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 110, + 628, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 628, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 110, + 628, + 504, + 672 + ], + "type": "text", + "content": "1. In Section 2.2, we conduct ablation studies on CIFAR-10 and ImageNet and identify a clean setting where adding local steps to SGD consistently improves generalization: if the learning rate is small and the total number of steps is sufficient, Local SGD eventually generalizes better than the corresponding (parallel) SGD baseline." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 110, + 676, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 676, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 110, + 676, + 505, + 733 + ], + "type": "text", + "content": "2. In Section 3.2, we derive a special SDE that characterizes the long-term behavior of Local SGD in the small learning rate regime, as inspired by a previous work (Li et al., 2021b) that proposed this type of SDE for modeling SGD. These SDEs can track the dynamics after the iterate has reached close to a manifold of minima. In this regime, the expected gradient is near zero, but the gradient noise can drive the iterate to wander around. In contrast to the conventional SDE (3) for" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 121, + 82, + 504, + 117 + ], + "type": "text", + "content": "SGD, where the drift and diffusion terms are connected respectively to the expected gradient and gradient noise, the SDE we derived for Local SGD has drift and diffusion terms both connected to gradient noise." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 109, + 122, + 505, + 198 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 109, + 122, + 505, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 122, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 109, + 122, + 505, + 167 + ], + "type": "text", + "content": "3. Section 3.3 explains the generalization improvement of Local SGD over SGD by comparing the corresponding SDEs: increasing the number of local steps " + }, + { + "bbox": [ + 109, + 122, + 505, + 167 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 109, + 122, + 505, + 167 + ], + "type": "text", + "content": " strengthens the drift term of SDE while keeping the diffusion term untouched. We hypothesize that having a stronger drift term can benefit generalization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 109, + 175, + 504, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 175, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 109, + 175, + 504, + 198 + ], + "type": "text", + "content": "4. As a by-product, we provide a new proof technique that can give the first quantitative approximation bound for how well Li et al. (2021b)'s SDE approximates SGD." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 202, + 504, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 202, + 504, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 504, + 248 + ], + "type": "text", + "content": "Back to the discussion on the generalization gap between small- and large-batch training, we remark that this gap can occur early in training when the learning rate is very large (Smith et al., 2020) and Local SGD cannot prevent this gap in this phase. Instead, our theory suggests that Local SGD can reduce the gap in late training phases after decaying the learning rate." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 260, + 380, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 260, + 380, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 380, + 274 + ], + "type": "text", + "content": "2 WHEN DOES LOCAL SGD GENERALIZE BETTER?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 281, + 504, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 504, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 504, + 338 + ], + "type": "text", + "content": "In our motivating example of Post-local SGD, switching from SGD to Local SGD can outperform running SGD alone (i.e., no switching) in test accuracy, but this improvement does not always arise and can depend on the choice of the switching time point. Because of this, a necessary first step for developing a theoretical understanding of Local SGD is to identify under what general conditions Local SGD can improve the generalization of SGD by merely adding local steps." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 350, + 259, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 350, + 259, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 259, + 361 + ], + "type": "text", + "content": "2.1 THE DEBATE ON LOCAL SGD" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "text", + "content": "We first summarize a debate in the literature regarding when to switch from SGD to Local SGD in running Post-local SGD, which hints the conditions so that Local SGD can improve upon SGD." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 395, + 505, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 395, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 395, + 505, + 529 + ], + "type": "text", + "content": "Local SGD generalizes better than SGD on CIFAR-10. Lin et al. (2020b) empirically observed that Post-local SGD exhibits a better generalization performance than SGD. Most of their experiments are conducted on CIFAR-10 and CIFAR-100 with multiple learning rate decays, and the algorithm switches from (parallel) SGD to Local SGD right after the first learning rate decay. We refer to this particular choice of the switching time point as the first-decay switching strategy for short. To justify this strategy, they empirically showed that the generalization improvement can be less significant if starting Local SGD from the beginning or right after the second learning rate decay. It has also been observed by Wang & Joshi (2021) that running Local SGD from the beginning improves generalization, but the test accuracy improvement may not be large enough. A subsequent work by Lin et al. (2020a) showed that adding local steps to Extrap-SGD, a variant of SGD proposed therein, after the first learning rate decay also improves generalization, suggesting that the first-decay switching strategy can also be applied to the post-local variant of other optimizers." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 533, + 505, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 505, + 699 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 505, + 699 + ], + "type": "text", + "content": "Does Local SGD exhibit the same generalization benefit on large-scale datasets? Going beyond CIFAR-10, Lin et al. (2020b) conducted a few ImageNet experiments and showed that Post-local SGD with first-decay switching strategy still leads to better generalization than SGD. However, the improvement is sometimes marginal, e.g., " + }, + { + "bbox": [ + 104, + 533, + 505, + 699 + ], + "type": "inline_equation", + "content": "0.1\\%" + }, + { + "bbox": [ + 104, + 533, + 505, + 699 + ], + "type": "text", + "content": " for batch size 8192. For the general case, they suggested that the time of switching should be tuned aiming at \"capturing the time when trajectory starts to get into the influence basin of a local minimum\" in a footnote, but no further discussion or experiments are provided to justify this guideline. Ortiz et al. (2021) conducted a more extensive evaluation on ImageNet (with a different set of hyperparameters) and concluded with the opposite: the first-decay switching strategy can hurt the validation accuracy. Instead, switching at a later time, such as the second learning rate decay, leads to a better validation accuracy than SGD." + }, + { + "bbox": [ + 104, + 533, + 505, + 699 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 104, + 533, + 505, + 699 + ], + "type": "text", + "content": " To explain this phenomenon, they conjecture that switching to Local SGD has a regularization effect that is beneficial only in the short-term, so it is always better to switch as late as possible. They further conjecture that this discrepancy between CIFAR-10 and ImageNet is mainly due to the task scale. On TinyImageNet, which is a spatially downscaled subset of ImageNet, the first-decay switching strategy indeed leads to better validation accuracy." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "type": "text", + "content": "This generalization improvement is not mentioned explicitly in (Ortiz et al., 2021) but can be clearly seen from Figures 7 and 8 in their paper." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 52, + 228, + 124 + ], + "blocks": [ + { + "bbox": [ + 111, + 52, + 228, + 124 + ], + "lines": [ + { + "bbox": [ + 111, + 52, + 228, + 124 + ], + "spans": [ + { + "bbox": [ + 111, + 52, + 228, + 124 + ], + "type": "image", + "image_path": "f570556fdfaa0f6044b6f240a2cb7d16b8e147d57d421fd6b15c4809f3cddcfc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 129, + 231, + 140 + ], + "lines": [ + { + "bbox": [ + 107, + 129, + 231, + 140 + ], + "spans": [ + { + "bbox": [ + 107, + 129, + 231, + 140 + ], + "type": "text", + "content": "(a) CIFAR-10, start from random." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 247, + 52, + 362, + 123 + ], + "blocks": [ + { + "bbox": [ + 247, + 52, + 362, + 123 + ], + "lines": [ + { + "bbox": [ + 247, + 52, + 362, + 123 + ], + "spans": [ + { + "bbox": [ + 247, + 52, + 362, + 123 + ], + "type": "image", + "image_path": "6461538aea94e321f86ced7d94c6c8a21dd6a9c4cd72c786139df36ceef1178e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 247, + 129, + 363, + 140 + ], + "lines": [ + { + "bbox": [ + 247, + 129, + 363, + 140 + ], + "spans": [ + { + "bbox": [ + 247, + 129, + 363, + 140 + ], + "type": "text", + "content": "(b) CIFAR-10, start from #250." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 383, + 52, + 498, + 123 + ], + "blocks": [ + { + "bbox": [ + 383, + 52, + 498, + 123 + ], + "lines": [ + { + "bbox": [ + 383, + 52, + 498, + 123 + ], + "spans": [ + { + "bbox": [ + 383, + 52, + 498, + 123 + ], + "type": "image", + "image_path": "b98462c3f9e7557bf75477fde8fe81b76ca936bbd82faa548f0ffa3271ebb7bc.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 129, + 497, + 140 + ], + "lines": [ + { + "bbox": [ + 384, + 129, + 497, + 140 + ], + "spans": [ + { + "bbox": [ + 384, + 129, + 497, + 140 + ], + "type": "text", + "content": "(c) ImageNet, start from #100." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 111, + 148, + 228, + 220 + ], + "blocks": [ + { + "bbox": [ + 111, + 148, + 228, + 220 + ], + "lines": [ + { + "bbox": [ + 111, + 148, + 228, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 228, + 220 + ], + "type": "image", + "image_path": "195a334edf2abcfae1bb681d2e115a4eea3b4fcca6268203023bb1ad7ae80af2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 106, + 224, + 231, + 236 + ], + "lines": [ + { + "bbox": [ + 106, + 224, + 231, + 236 + ], + "spans": [ + { + "bbox": [ + 106, + 224, + 231, + 236 + ], + "type": "text", + "content": "(d) ImageNet, first phase " + }, + { + "bbox": [ + 106, + 224, + 231, + 236 + ], + "type": "inline_equation", + "content": "\\eta = 3.2" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 247, + 148, + 362, + 220 + ], + "blocks": [ + { + "bbox": [ + 247, + 148, + 362, + 220 + ], + "lines": [ + { + "bbox": [ + 247, + 148, + 362, + 220 + ], + "spans": [ + { + "bbox": [ + 247, + 148, + 362, + 220 + ], + "type": "image", + "image_path": "68ffb33aa41408a4060ba829d1d7b2754f404172061830c7aa362204cfde84d1.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 249, + 224, + 361, + 235 + ], + "lines": [ + { + "bbox": [ + 249, + 224, + 361, + 235 + ], + "spans": [ + { + "bbox": [ + 249, + 224, + 361, + 235 + ], + "type": "text", + "content": "(e) CIFAR-10, test acc v.s. " + }, + { + "bbox": [ + 249, + 224, + 361, + 235 + ], + "type": "inline_equation", + "content": "H" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 383, + 148, + 498, + 220 + ], + "blocks": [ + { + "bbox": [ + 383, + 148, + 498, + 220 + ], + "lines": [ + { + "bbox": [ + 383, + 148, + 498, + 220 + ], + "spans": [ + { + "bbox": [ + 383, + 148, + 498, + 220 + ], + "type": "image", + "image_path": "0c72702dcc36c753b25919680336467916535f7964024c130c3ff1332f48704b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 387, + 224, + 494, + 235 + ], + "lines": [ + { + "bbox": [ + 387, + 224, + 494, + 235 + ], + "spans": [ + { + "bbox": [ + 387, + 224, + 494, + 235 + ], + "type": "text", + "content": "(f) ImageNet, test acc v.s. " + }, + { + "bbox": [ + 387, + 224, + 494, + 235 + ], + "type": "inline_equation", + "content": "H" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 239, + 504, + 271 + ], + "lines": [ + { + "bbox": [ + 104, + 239, + 504, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 504, + 271 + ], + "type": "text", + "content": "Figure 2: Ablation studies on " + }, + { + "bbox": [ + 104, + 239, + 504, + 271 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 239, + 504, + 271 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 239, + 504, + 271 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 239, + 504, + 271 + ], + "type": "text", + "content": " and training time in the same setting as Figure 1. For (a)(d), we train from random initialization. For (b)(c)(e)(f), we start training from the checkpoints saved at the switching time points in Figure 1 (epoch #250 for CIFAR-10 and epoch #100 for ImageNet). See Appendix M.2 for training details." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 277, + 449, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 449, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 449, + 289 + ], + "type": "text", + "content": "2.2 KEY FACTORS: SMALL LEARNING RATE AND SUFFICIENT TRAINING TIME" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "type": "text", + "content": "All the above papers agree that Post-local/Local SGD improves upon SGD to some extent. However, it is in debate under what conditions the generalization benefit can consistently occur. We now conduct ablation studies to identify the key factors so that adding local steps improves the generalization of SGD. We run parallel SGD and Local SGD with the same learning rate " + }, + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "type": "text", + "content": ", local batch size " + }, + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "type": "text", + "content": ", and number of workers " + }, + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 295, + 504, + 396 + ], + "type": "text", + "content": ". We start training from the same initialization and compare their generalization after the same number of epochs. As Post-local SGD can be viewed as Local SGD starting from an SGD-pretrained model, the initial point in our experiments can be either random or a checkpoint of SGD training. See Appendix C for implementation details and Appendix M.2 for more details about the experimental setup." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": "The first observation we have is that the generalization benefits can be reproduced on both CIFAR-10 and ImageNet in our setting (see Figure 1). We remark that Post-local SGD and SGD in Lin et al. (2020b); Ortiz et al. (2021) are implemented with accompanying Nesterov momentum terms. The learning rate also decays a couple of times in training with Local SGD. Nevertheless, our experiments show that the Nesterov momentum and learning rate decay are not necessary for Local SGD to generalize better than SGD. Our main finding after further ablation studies is summarized below:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 470, + 506, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 470, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 470, + 506, + 515 + ], + "type": "text", + "content": "Finding 2.1. Given a sufficiently small learning rate and a sufficiently long training time, Local SGD exhibits better generalization than SGD, if the number of local steps " + }, + { + "bbox": [ + 104, + 470, + 506, + 515 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 470, + 506, + 515 + ], + "type": "text", + "content": " per round is tuned properly according to the learning rate. This holds for both training from random initialization and from pre-trained models." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 519, + 460, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 460, + 531 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 460, + 531 + ], + "type": "text", + "content": "Now we go through each point of our main finding. See also Appendix F for more plots." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 533, + 504, + 733 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 104, + 533, + 504, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 504, + 589 + ], + "type": "text", + "content": "(1). Pretraining is not necessary. In contrast to previous works claiming the benefits of Post-local SGD over Local SGD (Lin et al., 2020b; Ortiz et al., 2021), we observe that Local SGD with random initialization also generalizes significantly better than SGD, as long as the learning rate is small and the training time is sufficiently long (Figure 2(a)). Starting from a pretrained model may shorten the time to reach this generalization benefit to show up (Figure 2(b)), but it is not necessary." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "type": "text", + "content": "(2). Learning rate should be small. We experiment with a wide range of learning rates to conclude that setting a small learning rate is necessary. The learning rate is 0.32 for Figures 2(a) and 2(b) and is 0.16 for Figure 2(c). As shown in Figure 2(d), Local SGD encounters optimization difficulty in the first phase where " + }, + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "type": "text", + "content": " is large (" + }, + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "type": "inline_equation", + "content": "\\eta = 3.2" + }, + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "type": "text", + "content": "), resulting in inferior final test accuracy. Even for training from a pretrained model, the generalization improvement of Local SGD disappears for large learning rates (e.g., " + }, + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "type": "inline_equation", + "content": "\\eta = 1.6" + }, + { + "bbox": [ + 104, + 594, + 504, + 672 + ], + "type": "text", + "content": " in Figure 5(d)). In contrast, if a longer training time is allowed, reducing the learning rate of Local SGD does not lead to test accuracy drop (Figure 5(c))." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": "(3). Training time should be long enough. To investigate the effect of training time, in Figures 2(b) and 2(c), we extend the training budget for the Post-local SGD experiments in Figure 1 and observe that a longer training time leads to greater generalization improvement upon SGD. On the other hand, Local SGD generalizes worse than SGD in the first few epochs of Figures 2(a) and 2(c); see Figures 5(a) and 5(b) for an enlarged view." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "(4). The number of local steps " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " should be tuned carefully. The number of local steps " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " has a complex interplay with the learning rate " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": ", but generally speaking, a smaller " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " needs a higher " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " to achieve consistent generalization improvement. For CIFAR-10 with a post-local training budget of 250 epochs (see Figure 2(e)), the test accuracy first rises as " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " increases, and begins to fall as " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " exceeds some threshold for relatively large " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " (e.g., " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\eta \\geq 0.5" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": ") while keeps growing for smaller " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " (e.g., " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\eta < 0.5" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "). For ImageNet with a post-local training budget of 50 epochs (see Figure 2(f)), the test accuracy first increases and then decreases in " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " for all learning rates." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "text", + "content": "Reconciling previous works. Our finding can help to settle the debate presented in Section 2.1 to a large extent. Simultaneously requiring a small learning rate and sufficient training time poses a trade-off when learning rate decay is used with a limited training budget: switching to Local SGD earlier may lead to a large learning rate, while switching later makes the generalization improvement of Local SGD less noticeable due to fewer update steps. It is thus unsurprising that first-decay switching strategy is not always the best. The need for sufficient training time does not contradict with Ortiz et al. (2021)'s conjecture that Local SGD only has a \"short-term\" generalization benefit. In their experiments, the generalization improvement usually disappears right after the next learning rate decay (instead of after a fixed amount of time). We suspect that the real reason why the improvement vanishes is that the number of local steps " + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "text", + "content": " was kept as a constant, but our finding suggests tuning " + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "text", + "content": " changes. In Figure 5(e), we reproduce this phenomenon and show that increasing " + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 165, + 506, + 299 + ], + "type": "text", + "content": " after learning rate decay retains the improvement." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "text", + "content": "Generalization performances at the optimal learning rate of SGD. In practice, the learning rate of SGD is usually tuned to achieve the best training loss/Validation accuracy within a fixed training budget. Our finding suggests that when the tuned learning rate is small and the training time is sufficient, Local SGD can offer generalization improvement over SGD. As an example, in our experiments on training from an SGD-pretrained model, the optimal learning rate for SGD is 0.5 on CIFAR-10 (Figure 2(e)) and 0.064 on ImageNet (Figure 2(f)). With the same learning rate as SGD, the test accuracy is improved by " + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "inline_equation", + "content": "1.1\\%" + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "text", + "content": " on CIFAR-10 and " + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "inline_equation", + "content": "0.3\\%" + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "text", + "content": " on ImageNet when using Local SGD with " + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "inline_equation", + "content": "H = 750" + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "inline_equation", + "content": "H = 26" + }, + { + "bbox": [ + 104, + 302, + 504, + 402 + ], + "type": "text", + "content": " respectively. The improvement could become even higher if the learning rate of Local SGD is carefully tuned." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 408, + 430, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 408, + 430, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 430, + 420 + ], + "type": "text", + "content": "3 THEORETICAL ANALYSIS OF LOCAL SGD: THE SLOW SDE" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 426, + 504, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 471 + ], + "type": "text", + "content": "In this section, we adopt an SDE-based approach to rigorously establish the generalization benefit of Local SGD in a general setting. Below, we first identify the difficulty of adapting the SDE framework to Local SGD. Then, we present our novel SDE characterization of Local SGD around the manifold of minimizers and explain the generalization benefit of Local SGD with our SDE." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": "Notations. We follow the notations in Section 1. We denote by " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the learning rate, " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the number of workers, " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the (global) batch size, " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}\\coloneqq B / K" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the local batch size, " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the number of local steps, " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\ell (\\pmb {\\theta};\\zeta)" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the loss function for a data sample " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{D}}" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the training distribution. Furthermore, we define " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\pmb {\\theta})\\coloneqq \\mathbb{E}_{\\xi \\sim \\tilde{\\mathcal{D}}}[\\ell (\\pmb {\\theta};\\xi)]" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " as the expected loss, " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\Sigma (\\pmb {\\theta})\\coloneqq \\operatorname{Cov}_{\\xi \\sim \\tilde{\\mathcal{D}}}[\\nabla \\ell (\\pmb {\\theta};\\xi)]" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " as the noise covariance of gradients at " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\{W_t\\}_{t\\geq 0}" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " denote the standard Wiener process. For a mapping " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "F:\\mathbb{R}^d\\to \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": ", denote by " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\partial F(\\pmb {\\theta})" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the Jacobian at " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\partial^2 F(\\pmb {\\theta})" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " the second order derivative at " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": ". Furthermore, for any matrix " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "M\\in \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\partial^2 F(\\pmb {\\theta})[M] = \\sum_{i\\in [d]}\\langle \\frac{\\partial^2F_i}{\\partial\\theta^2},M\\rangle e_i" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "e_i" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": "-th vector of the standard basis. We write " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\partial^2 (\\nabla \\mathcal{L})(\\pmb {\\theta})[M]" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\nabla^3\\mathcal{L}(\\pmb {\\theta})[M]" + }, + { + "bbox": [ + 104, + 475, + 504, + 575 + ], + "type": "text", + "content": " for short." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": "Local SGD. We use the following formulation of Local SGD for theoretical analysis. See also Appendix C for the pseudocode. Local SGD proceeds in multiple rounds of model averaging, where each round produces a global iterate " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": ". In the " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "(s + 1)" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": "-th round, every worker " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": " starts with its local copy of the global iterate " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,0}^{(s)} \\gets \\bar{\\pmb{\\theta}}^{(s)}" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": " and does " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": " steps of SGD with local batches. In the " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": "-th local step of the " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": "-th worker, it draws a local batch of " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}} \\coloneqq B / K" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": " independent samples " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "\\xi_{k,t,1}^{(s)}, \\dots, \\xi_{k,t,B_{\\mathrm{loc}}}^{(s)}" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": " from a shared training distribution " + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{D}}" + }, + { + "bbox": [ + 104, + 579, + 506, + 656 + ], + "type": "text", + "content": " and updates as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 118, + 657, + 504, + 688 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 657, + 504, + 688 + ], + "spans": [ + { + "bbox": [ + 118, + 657, + 504, + 688 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} \\leftarrow \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {g} _ {k, t} ^ {(s)}, \\quad \\text {w h e r e} \\quad \\boldsymbol {g} _ {k, t} ^ {(s)} = \\frac {1}{B _ {\\mathrm {l o c}}} \\sum_ {i = 1} ^ {B _ {\\mathrm {l o c}}} \\nabla \\ell \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}; \\xi_ {k, t, i} ^ {(s)}\\right), \\quad t = 0, \\dots , H - 1. \\tag {2}", + "image_path": "074e49d31cf5149b0e75fc122b835e86925259d1f958f9823c731469babb79d9.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "type": "text", + "content": "The local updates on different workers are independent of each other as there is no communication. After finishing the " + }, + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "type": "text", + "content": " local steps, the workers aggregate the resulting local iterates " + }, + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,H}^{(s)}" + }, + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "type": "text", + "content": " and assign the average to the next global iterate: " + }, + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s + 1)}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}\\pmb{\\theta}_{k,H}^{(s)}" + }, + { + "bbox": [ + 104, + 691, + 504, + 735 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 418, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 418, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 418, + 94 + ], + "type": "text", + "content": "3.1 DIFFICULTY OF ADAPTING THE SDE FRAMEWORK TO LOCAL SGD" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 99, + 504, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 99, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 99, + 504, + 133 + ], + "type": "text", + "content": "A widely-adopted approach to understanding the dynamics of SGD is to approximate it from a continuous perspective with the following SDE (3), which we call the conventional SDE approximation. Below, we discuss why it cannot be directly adopted to characterize the behavior of Local SGD." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 212, + 136, + 504, + 156 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 136, + 504, + 156 + ], + "spans": [ + { + "bbox": [ + 212, + 136, + 504, + 156 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {X} (t) = - \\nabla \\mathcal {L} (\\boldsymbol {X}) \\mathrm {d} t + \\sqrt {\\frac {\\eta}{B}} \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {X}) \\mathrm {d} \\boldsymbol {W} _ {t}. \\tag {3}", + "image_path": "883d89dd47b8643151f5c6dc061b555c34552aee52b7df8d967d6753b6b62762.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 157, + 505, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 157, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 157, + 505, + 213 + ], + "type": "text", + "content": "It is proved by Li et al. (2019a) that this SDE is a first-order approximation to SGD, where each discrete step corresponds to a continuous time interval of " + }, + { + "bbox": [ + 104, + 157, + 505, + 213 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 157, + 505, + 213 + ], + "type": "text", + "content": ". Several previous works adopt this SDE approximation and connect good generalization to having a large diffusion term " + }, + { + "bbox": [ + 104, + 157, + 505, + 213 + ], + "type": "inline_equation", + "content": "\\sqrt{\\frac{\\eta}{B}} \\Sigma^{1/2} \\mathrm{d}W_t" + }, + { + "bbox": [ + 104, + 157, + 505, + 213 + ], + "type": "text", + "content": " in the SDE (Jastrzewski et al., 2017; Smith et al., 2020), because a suitable amount of noise can be necessary for large-batch training to generalize well (see also Appendix A)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "text", + "content": "According to Finding 2.1, it is tempting to consider the limit " + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "inline_equation", + "content": "\\eta \\to 0" + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "text", + "content": " and see if Local SGD can also be modeled via a variant of the conventional SDE. In this case the typical time length that guarantees a good SDE approximation error is " + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\eta^{-1})" + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "text", + "content": " discrete steps (Li et al., 2019a; 2021a). However, this time scaling is too short for the difference to appear between Local SGD and SGD. Indeed, Theorem 3.1 below shows that they closely track each other for " + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\eta^{-1})" + }, + { + "bbox": [ + 104, + 218, + 504, + 275 + ], + "type": "text", + "content": " steps." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": "Theorem 3.1. Assume that the loss function " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": "-smooth with bounded second and third order derivatives and that " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\nabla \\ell (\\pmb {\\theta};\\xi)" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": " is bounded. Let " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "T > 0" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": " be a constant, " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s)}" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": "-th global iterate of Local SGD and " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\pmb {w}_t" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": "-th iterate of SGD with the same initialization " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\pmb {w}_0 = \\bar{\\pmb{\\theta}}^{(0)}" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": " and same " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\eta, B_{\\mathrm{loc}}, K" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": ". Then for any " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "H\\leq \\frac{T}{\\eta}" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": ", it holds with probability at least " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": " that for all " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "s\\leq \\frac{T}{\\eta H}" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(s)} - \\pmb{w}_{sH}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})" + }, + { + "bbox": [ + 104, + 275, + 506, + 342 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 344, + 505, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 505, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 505, + 368 + ], + "type": "text", + "content": "We defer the proof to Appendix I. See also Appendix D for Lin et al. (2020b)'s attempt to model Local SGD with multiple conventional SDEs and discussions on why it does not give much insight." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 373, + 375, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 375, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 375, + 384 + ], + "type": "text", + "content": "3.2 SDE APPROXIMATION NEAR THE MINIMIZER MANIFOLD" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": "Inspired by a recent paper (Li et al., 2021b), our strategy to overcome the shortcomings of the conventional SDE is to design a new SDE that can guarantee a good approximation for " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\eta^{-2})" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": " discrete steps, much longer than the " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\eta^{-1})" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": " discrete steps for the conventional SDE. Following their setting, we assume the existence of a manifold " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": " consisting only of local minimizers and track the global iterate " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": " around " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": " after it takes " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}} (\\eta^{-1})" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": " steps to approach " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": ". Though the expected gradient " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\nabla \\mathcal{L}" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": " is near zero around " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": ", the dynamics are still non-trivial because the noise can drive the iterate to move a significant distance in " + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\eta^{-2})" + }, + { + "bbox": [ + 104, + 390, + 504, + 469 + ], + "type": "text", + "content": " steps." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "text", + "content": "Assumption 3.1. The loss function " + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot)" + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "text", + "content": " and the matrix square root of the noise covariance " + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\Sigma^{1/2}(\\cdot)" + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^\\infty" + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "text", + "content": "-smooth. Besides, we assume that " + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\|\\nabla \\ell(\\boldsymbol{\\theta}; \\xi)\\|_2" + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "text", + "content": " is bounded by a constant for all " + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\theta}" + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 104, + 471, + 504, + 495 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": "Assumption 3.2. " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^\\infty" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": "-smooth, " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "(d - m)" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": "-dimensional submanifold of " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": ", where any " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\zeta \\in \\Gamma" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": " is a local minimizer of " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": ". For all " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\zeta \\in \\Gamma" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": ". Additionally, there exists an open neighborhood of " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "inline_equation", + "content": "\\Gamma = \\arg \\min_{\\pmb{\\theta} \\in U} \\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 496, + 505, + 531 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 533, + 281, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 281, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 281, + 545 + ], + "type": "text", + "content": "Assumption 3.3. " + }, + { + "bbox": [ + 105, + 533, + 281, + 545 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 105, + 533, + 281, + 545 + ], + "type": "text", + "content": " is a compact manifold." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "text", + "content": "The smoothness assumption on " + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "text", + "content": " is generally satisfied when we use smooth activation functions, such as Swish (Ramachandran et al., 2017), softplus and GeLU (Hendrycks & Gimpel, 2016), which work equally well as ReLU in many circumstances. The existence of a minimizer manifold with " + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "inline_equation", + "content": "\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m" + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "text", + "content": " has also been made as a key assumption in Fehrman et al. (2020); Li et al. (2021b); Lyu et al. (2022), where " + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "inline_equation", + "content": "\\mathrm{rank}(\\nabla^2\\mathcal{L}(\\zeta)) = m" + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "text", + "content": " ensures that the Hessian is maximally nondegenerate on the manifold and implies that the tangent space at " + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "inline_equation", + "content": "\\zeta \\in \\Gamma" + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "text", + "content": " equals the null space of " + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\zeta)" + }, + { + "bbox": [ + 104, + 548, + 505, + 626 + ], + "type": "text", + "content": ". The last assumption is made to prevent the analysis from being too technically involved." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "type": "text", + "content": "Our SDE for Local SGD characterizes the training dynamics near " + }, + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "type": "text", + "content": ". For ease of presentation, we define the following projection operators " + }, + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "type": "inline_equation", + "content": "\\Phi, P_{\\zeta}" + }, + { + "bbox": [ + 104, + 631, + 504, + 654 + ], + "type": "text", + "content": " for points and differential forms respectively." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": "Definition 3.1 (Gradient Flow Projection). Fix a point " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{null}} \\notin \\Gamma" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": ", consider the gradient flow " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\frac{\\mathrm{d}\\pmb{x}(t)}{\\mathrm{d}t} = -\\nabla \\mathcal{L}(\\pmb{x}(t))" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\pmb{x}(0) = \\pmb{x}" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": ". We denote the gradient flow projection of " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\Phi(\\pmb{x})" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\Phi(\\pmb{x}) := \\lim_{t \\to +\\infty} \\pmb{x}(t)" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": " if the limit exists and belongs to " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": "; otherwise, " + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "inline_equation", + "content": "\\Phi(\\pmb{x}) = \\theta_{\\mathrm{null}}" + }, + { + "bbox": [ + 103, + 658, + 504, + 694 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": "Definition 3.2. For any " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\zeta \\in \\Gamma" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " and any differential form " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{AdW}_t + \\mathbf{bdt}" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " in Itô calculus, where " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " is a matrix and " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{b}" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " is a vector, we use " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "P_{\\zeta}(\\mathbf{AdW}_t + \\mathbf{bdt})" + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "text", + "content": " as a shorthand for the differential form " + }, + { + "bbox": [ + 104, + 696, + 505, + 734 + ], + "type": "inline_equation", + "content": "\\partial \\Phi (\\zeta)\\mathbf{AdW}_t + \\left(\\partial \\Phi (\\zeta)\\mathbf{b} + \\frac{1}{2}\\partial^2\\Phi (\\zeta)[\\mathbf{AA}^\\top ]\\right)\\mathrm{d}t." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": "See Øksendal (2013) for an introduction to Itô calculus. Here " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "P_{\\zeta}" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " equals " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\Phi (\\zeta +A\\mathrm{d}\\pmb {W}_t + \\pmb {b}\\mathrm{d}t) - \\Phi (\\zeta)" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " by Itô calculus, which means that " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "P_{\\zeta}" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " projects an infinitesimal step from " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " , so that " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " after taking the projected step does not leave the manifold " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " . It can be shown by simple calculus that " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\partial \\Phi (\\zeta)" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " equals the projection matrix onto the tangent space of " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " . We decompose the noise covariance " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\Sigma (\\zeta)" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\zeta \\in \\Gamma" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " into two parts: the noise in the tangent space " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\Sigma_{\\parallel}(\\zeta)\\coloneqq \\partial \\Phi (\\zeta)\\Sigma (\\zeta)\\partial \\Phi (\\zeta)" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " and the noise in the rest " + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\Sigma_{\\diamond}(\\zeta)\\coloneqq \\Sigma (\\zeta) - \\Sigma_{\\parallel}(\\zeta)" + }, + { + "bbox": [ + 104, + 82, + 504, + 150 + ], + "type": "text", + "content": " . Now we are ready to state our SDE for Local SGD." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "text", + "content": "Definition 3.3 (Slow SDE for Local SGD). Given " + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\eta, H > 0" + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\zeta_0 \\in \\Gamma" + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "text", + "content": ", define " + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\zeta(t)" + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "text", + "content": " as the solution of the following SDE with initial condition " + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\zeta(0) = \\zeta_0" + }, + { + "bbox": [ + 104, + 152, + 504, + 176 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 178, + 505, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 178, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 138, + 178, + 505, + 213 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {(a) d i f f u s i o n} - \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(b) d r i f t - I} - \\underbrace {- \\frac {K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(c) d r i f t - I I}\\right). \\tag {4}", + "image_path": "9e2762b57386cb08656ef47901552a38b60f359efbb770a8ccc8ca5e06980d56.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 217, + 279, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 279, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 279, + 231 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 105, + 217, + 279, + 231 + ], + "type": "inline_equation", + "content": "\\widehat{\\Sigma}_{\\diamond}(\\zeta), \\widehat{\\Psi}(\\zeta) \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 105, + 217, + 279, + 231 + ], + "type": "text", + "content": " are defined as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 168, + 233, + 505, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 233, + 505, + 251 + ], + "spans": [ + { + "bbox": [ + 168, + 233, + 505, + 251 + ], + "type": "interline_equation", + "content": "\\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) := \\sum_ {i, j: (\\lambda_ {i} \\neq 0) \\vee (\\lambda_ {j} \\neq 0)} \\frac {1}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {\\Sigma} _ {\\diamond} (\\boldsymbol {\\zeta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\tag {5}", + "image_path": "9234ec704e2efc2cf26dbb194d604de93abf29ea445930d08ed35bb70d0905de.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 174, + 252, + 504, + 270 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 252, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 174, + 252, + 504, + 270 + ], + "type": "interline_equation", + "content": "\\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) := \\sum_ {i, j: (\\lambda_ {i} \\neq 0) \\vee (\\lambda_ {j} \\neq 0)} \\frac {\\psi (\\eta H \\cdot (\\lambda_ {i} + \\lambda_ {j}))}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {\\Sigma} _ {\\diamond} (\\boldsymbol {\\zeta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\tag {6}", + "image_path": "57db29171d3616efe1377a49a0d98e409c7689796815f8ce156f46a1288e70dd.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "inline_equation", + "content": "\\{\\pmb{v}_i\\}_{i=1}^d" + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "text", + "content": " is a set of eigenvectors of " + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\zeta)" + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "text", + "content": " that forms an orthonormal eigenbasis, and " + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "inline_equation", + "content": "\\lambda_1, \\ldots, \\lambda_d" + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "text", + "content": " are the corresponding eigenvalues. Additionally, " + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "inline_equation", + "content": "\\psi(x) := \\frac{e^{-x} - 1 + x}{x}" + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "inline_equation", + "content": "x \\neq 0" + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "inline_equation", + "content": "\\psi(0) = 0" + }, + { + "bbox": [ + 104, + 274, + 506, + 311 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": "The use of " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "P_{\\zeta}" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": " keeps " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\zeta(t)" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": " on the manifold " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": " through projection. " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\Sigma_{\\parallel}^{\\frac{1}{2}}(\\zeta)" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": " introduces a diffusion term to the SDE in the tangent space. The two drift terms involve " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\widehat{\\Sigma}_{\\diamond}(\\cdot)" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\widehat{\\Psi}(\\cdot)" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": ", which can be intuitively understood as rescaling the entries of the noise covariance in the eigenbasis of Hessian. In the special case where " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\nabla^{2}\\mathcal{L} = \\mathrm{diag}(\\lambda_{1},\\dots,\\lambda_{d}) \\in \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\widehat{\\Sigma}_{\\diamond,i,j} = \\frac{1}{\\lambda_i + \\lambda_j}\\Sigma_{0,i,j}" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\widehat{\\Psi}_{i,j} = \\frac{\\psi(\\eta H(\\lambda_i + \\lambda_j))}{\\lambda_i + \\lambda_j}\\Sigma_{0,i,j}" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "\\psi(x)" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": " is a monotonically increasing function, which goes from 0 to 1 as " + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 316, + 504, + 400 + ], + "type": "text", + "content": " goes from 0 to infinity (see Figure 9)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "type": "text", + "content": "We name this SDE as the Slow SDE for Local SGD because we will show that each discrete step of Local SGD corresponds to a continuous time interval of " + }, + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "type": "inline_equation", + "content": "\\eta^2" + }, + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "type": "text", + "content": " instead of an interval of " + }, + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "type": "text", + "content": " in the conventional SDE. In this sense, our SDE is \"slower\" than the conventional SDE (and hence can track a longer horizon). This Slow SDE is inspired by Li et al. (2021b). Under nearly the same set of assumptions, they proved that SGD can be tracked by an SDE that is essentially equivalent to (4) with " + }, + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "type": "inline_equation", + "content": "K = 1" + }, + { + "bbox": [ + 104, + 404, + 505, + 471 + ], + "type": "text", + "content": ", namely, without the drift-II term." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 187, + 473, + 505, + 508 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 473, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 187, + 473, + 505, + 508 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} - \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}}\\right), \\tag {7}", + "image_path": "887bb005720cbad800036f0437f24af3e33e882152e8824216b84167c700df66.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 510, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 504, + 544 + ], + "type": "text", + "content": "We refer to (7) as the Slow SDE for SGD. We remark that the drfit-II term in (4) is novel and is the key to separate the generalization behaviors of Local SGD and SGD in theory. We will discuss this point later in Section 3.3. Now we present our SDE approximation theorem for Local SGD." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "spans": [ + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": "Theorem 3.2. Let Assumptions 3.1 to 3.3 hold. Let " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "T > 0" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": " be a constant and " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\zeta(t)" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": " be the solution to (4) with the initial condition " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\zeta(0) = \\Phi(\\bar{\\theta}^{(0)}) \\in \\Gamma" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": " is set to " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\frac{\\alpha}{\\eta}" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": " for some constant " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\alpha > 0" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": ", then for any " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": "-smooth function " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "g(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\max_{0 \\leq s \\leq \\frac{T}{H\\eta^2}} \\left| \\mathbb{E}[g(\\Phi(\\bar{\\pmb{\\theta}}^{(s)}))] - \\mathbb{E}[g(\\pmb{\\zeta}(sH\\eta^2)] \\right| = \\tilde{\\mathcal{O}}(\\eta^{0.25})" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}}(\\cdot)" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": " hides log factors and constants that are independent of " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": " but can depend on " + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "inline_equation", + "content": "g(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 546, + 504, + 603 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": "Theorem 3.3. For " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": ", it holds for all " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "\\mathcal{O}\\left(\\frac{1}{\\alpha}\\log \\frac{1}{\\eta}\\right)\\leq s\\leq \\frac{T}{\\alpha\\eta}" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": " that " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "\\Phi (\\bar{\\pmb{\\theta}}^{(s)})\\in \\Gamma" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(s)} - \\Phi (\\bar{\\pmb{\\theta}}^{(s)})\\| _2 = \\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\cdot)" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": " hides constants independent of " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 603, + 504, + 649 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "type": "text", + "content": "Theorem 3.2 suggests that the trajectories of the manifold projection and the solution to the Slow SDE (4) are close to each other in the weak approximation sense. That is, " + }, + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\{\\Phi (\\bar{\\theta}^{(s)})\\}" + }, + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\{\\zeta (t)\\}" + }, + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "type": "text", + "content": " cannot be distinguished by evaluating test functions from a wide function class, including all polynomials. This measurement of closeness between the iterates of stochastic gradient algorithms and their SDE approximations is also adopted by Li et al. (2019a; 2021a); Malladi et al. (2022), but their analyses are for conventional SDEs. Theorem 3.3 further states that the iterate " + }, + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}" + }, + { + "bbox": [ + 104, + 652, + 505, + 733 + ], + "type": "text", + "content": " keeps close to its manifold projection after the first few rounds." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "content": "Remark 3.1. To connect to Finding 2.1, we remark that our theorems (1) do not require the model to be pre-trained (as long as the gradient flow starting with " + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "inline_equation", + "content": "\\theta^{(0)}" + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "content": " converges to " + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "content": "); (2) give better bounds for smaller " + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "content": "; (3) characterize a long training horizon " + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "inline_equation", + "content": "\\sim \\eta^{-2}" + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "content": ". The need for tuning " + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 127 + ], + "type": "text", + "content": " will be discussed in Section 3.3.3." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 129, + 506, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 129, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 104, + 129, + 506, + 242 + ], + "type": "text", + "content": "Technical Contribution. The proof technique for Theorem 3.2 is novel and significantly different from the Slow SDE analysis of SGD in Li et al. (2021a). Their analysis uses advanced stochastic calculus and invokes Katzenberger's theorem (Katzenberger, 1991) to show that SGD converges to the Slow SDE in distribution, but no quantitative error bounds are provided. Also, due to the local updates and multiple aggregation steps in Local SGD, it is unclear how to extend Katzenberger's theorem to our case. To overcome this difficulty, we develop a new approach to analyze the Slow SDEs, which is based on the method of moments (Li et al., 2019a) and can provide the quantitative error bound " + }, + { + "bbox": [ + 104, + 129, + 506, + 242 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}} (\\eta^{0.25})" + }, + { + "bbox": [ + 104, + 129, + 506, + 242 + ], + "type": "text", + "content": " in weak approximation. See Appendix J for our proof outline. A by-product of our result is the first quantitative approximation bound for the Slow SDE approximation for SGD, which can be easily obtained by setting " + }, + { + "bbox": [ + 104, + 129, + 506, + 242 + ], + "type": "inline_equation", + "content": "K = 1" + }, + { + "bbox": [ + 104, + 129, + 506, + 242 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 248, + 295, + 259 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 248, + 295, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 295, + 259 + ], + "type": "text", + "content": "3.3 INTERPRETATION OF THE SLOW SDEs" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 265, + 504, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 300 + ], + "type": "text", + "content": "In this subsection, we compare the Slow SDEs for SGD and Local SGD and provide an important insight into why Local SGD generalizes better than SGD: Local SGD strengthens the drift term in the Slow SDE, which makes the implicit regularization of stochastic gradient noise more effective." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 305, + 345, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 305, + 345, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 345, + 316 + ], + "type": "text", + "content": "3.3.1 INTERPRETATION OF THE SLOW SDE FOR SGD." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "text", + "content": "The Slow SDE for SGD (7) consists of the diffusion and drift-I terms. The former injects noise into the dynamics in the tangent space; the latter one drives the dynamics to move along the negative gradient of " + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "inline_equation", + "content": "\\frac{1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle" + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "text", + "content": " projected onto the tangent space, but ignoring the dependency of " + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "inline_equation", + "content": "\\widehat{\\Sigma}_{\\diamond}(\\zeta)" + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "text", + "content": ". This can be connected to the class of semi-gradient methods which only computes a part of the gradient (Mnih et al., 2015; Sutton & Barto, 1998; Brandonbrener & Bruna, 2020). In this view, the long-term behavior of SGD is similar to a stochastic semi-gradient method minimizing the implicit regularizer " + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "inline_equation", + "content": "\\frac{1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle" + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "text", + "content": " on the minimizer manifold of the original loss " + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 322, + 504, + 409 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 411, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 479 + ], + "type": "text", + "content": "Though the semi-gradient method may not perfectly optimize its objective, the above argument reveals that SGD has a deterministic trend toward the region with a smaller magnitude of Hessian, which is commonly believed to correlate with better generalization (Hochreiter & Schmidhuber, 1997; Keskar et al., 2017; Neyshabur et al., 2017; Jiang et al., 2020) (see Appendix A for more discussions). In contrast, the diffusion term can be regarded as a random perturbation to this trend, which can impede optimization when the drift-I term is not strong enough." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "text", + "content": "Based on this view, we conjecture that strengthening the drift term of the Slow SDE can help SGD to better regularize the model, yielding a better generalization performance. More specifically, we propose the following hypothesis, which compares the generalization performances of the following generalized Slow SDEs. Note that " + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "inline_equation", + "content": "\\left(\\frac{1}{B},\\frac{1}{2B}\\right)" + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "text", + "content": "-Slow SDE corresponds to the Slow SDE for SGD (7)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 530, + 421, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 421, + 542 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 421, + 542 + ], + "type": "text", + "content": "Definition 3.4. For " + }, + { + "bbox": [ + 104, + 530, + 421, + 542 + ], + "type": "inline_equation", + "content": "\\kappa_{1},\\kappa_{2}\\geq 0" + }, + { + "bbox": [ + 104, + 530, + 421, + 542 + ], + "type": "text", + "content": " define " + }, + { + "bbox": [ + 104, + 530, + 421, + 542 + ], + "type": "inline_equation", + "content": "(\\kappa_{1},\\kappa_{2})" + }, + { + "bbox": [ + 104, + 530, + 421, + 542 + ], + "type": "text", + "content": " -Slow SDE to be the following:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 187, + 544, + 504, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 544, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 187, + 544, + 504, + 563 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\xi}} \\left(\\sqrt {\\kappa_ {1}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} - \\kappa_ {2} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {8}", + "image_path": "5bbcff488ffecf065c2fa40f37ba394bffdce31e4e7ac2f5c058996b25a85417.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": "Hypothesis 3.1. Starting at a minimizer " + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\zeta_0\\in \\Gamma" + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": ", run " + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "inline_equation", + "content": "(\\kappa_{1},\\kappa_{2})" + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": "-Slow SDE and " + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "inline_equation", + "content": "(\\kappa_{1},\\kappa_{2}^{\\prime})" + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": "-Slow SDE respectively for the same amount of time " + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "inline_equation", + "content": "T > 0" + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": " and obtain " + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\zeta (T),\\zeta '(T)" + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\kappa_{2} > \\kappa_{2}^{\\prime}" + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": ", then the expected test accuracy at " + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\zeta (T)" + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": " is better than that at " + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\zeta '(T)" + }, + { + "bbox": [ + 104, + 567, + 504, + 602 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 604, + 504, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 504, + 638 + ], + "type": "text", + "content": "Due to the No Free Lunch Theorem, we do not claim that our hypothesis is always true, but we do believe that the hypothesis holds when training usual neural networks (e.g., ResNets, VGGNets) on standard benchmarks (e.g., CIFAR-10, ImageNet)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "text", + "content": "Example: Training with Label Noise Regularization. To exemplify the generalization benefit of having a larger drift term, we follow a line of theoretical works (Li et al., 2021b; Blanc et al., 2020; Damian et al., 2021) to study the case of training over-parameterized neural nets with label noise regularization. For a " + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "text", + "content": "-class classification task, the label noise regularization is as follows: every time we draw a sample from the training set, we make the true label as it is with probability " + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "inline_equation", + "content": "1 - p" + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "text", + "content": " and replace it with any other label with equal probability " + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\frac{p}{C-1}" + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "text", + "content": ". When we use cross-entropy loss, the Slow SDE for SGD turns out to be a simple deterministic gradient flow on " + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "text", + "content": " (instead of a semigroup method) for minimizing the trace of Hessian: " + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathrm{d}\\boldsymbol{\\zeta}(t) = -\\frac{1}{4B}\\nabla_{\\Gamma}\\mathrm{tr}(\\nabla^{2}\\mathcal{L}(\\boldsymbol{\\zeta}))\\mathrm{d}t" + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 642, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\nabla_{\\Gamma}f" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "stands for the gradient of the function " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": " projected to the tangent space of " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ". Checking the validity of our hypothesis reduces to the following question: Is minimizing the trace of Hessian beneficial to generalization? Many works prove positive results in concrete settings, including the line of works we just mentioned. We refer the readers to Appendix G for further discussion." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 402, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 402, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 402, + 144 + ], + "type": "text", + "content": "3.3.2 LOCAL SGD STRENGTHENS THE DRIFT TERM IN SLOW SDE." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "text", + "content": "Based on Hypothesis 3.1, we argue that Local SGD improves generalization by strengthening the drift term of the Slow SDE. First, it can be seen from (4) that the Slow SDE for Local SGD has an additional drfit-II term. Similar to the drift-I term of the Slow SDE for SGD, this drift-II term drives the dynamics to move along the negative semi-gradient of " + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "inline_equation", + "content": "\\frac{K - 1}{2B}\\langle \\nabla^2\\mathcal{L}(\\zeta),\\widehat{\\Psi} (\\zeta)\\rangle" + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "text", + "content": " (with the dependency of " + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "inline_equation", + "content": "\\widehat{\\Psi} (\\zeta)" + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "text", + "content": " ignored). Combining it with the implicit regularizer induced by the drift-I term, we can see that the long-term behavior of Local SGD is similar to a stochastic semi-gradient method minimizing the implicit regularizer " + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "inline_equation", + "content": "\\frac{1}{2B}\\langle \\nabla^{2}\\mathcal{L}(\\zeta),\\widehat{\\Sigma}_{\\diamond}(\\zeta)\\rangle +\\frac{K - 1}{2B}\\langle \\nabla^{2}\\mathcal{L}(\\zeta),\\widehat{\\Psi} (\\zeta)\\rangle" + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 149, + 504, + 236 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": "Comparing the definitions of " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\widehat{\\Sigma}_{\\diamond}(\\zeta)" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": " (5) and " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\widehat{\\Psi}(\\zeta)" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": " (6), we can see that " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\widehat{\\Psi}(\\zeta)" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": " is basically a rescaling of the entries of " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\widehat{\\Sigma}_{\\diamond}(\\zeta)" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": " in the eigenbasis of Hessian, where the rescaling factor " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\psi(\\eta H \\cdot (\\lambda_i + \\lambda_j))" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": " for each entry is between 0 and 1 (see Figure 9 for the plot of " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": "). When " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\eta H" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": " is small, the rescaling factors should be close to " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\psi(0) = 0" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\widehat{\\Psi}(\\zeta) \\approx \\mathbf{0}" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": ", leading to almost no additional regularization. On the other hand, when " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\eta H" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": " is large, the rescaling factors should be close to " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\psi(+\\infty) = 1" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": ", so " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\widehat{\\Psi}(\\zeta) \\approx \\widehat{\\Sigma}_{\\diamond}(\\zeta)" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": ". We can then merge the two implicit regularizers as " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "\\frac{K}{2B} \\langle \\nabla^2 \\mathcal{L}(\\zeta), \\widehat{\\Sigma}_{\\diamond}(\\zeta) \\rangle" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": ", and (4) becomes the " + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "inline_equation", + "content": "(\\frac{1}{B}, \\frac{K}{2B})" + }, + { + "bbox": [ + 104, + 240, + 504, + 327 + ], + "type": "text", + "content": "-Slow SDE, which is restated below:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 189, + 326, + 504, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 326, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 189, + 326, + 504, + 342 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} - \\frac {K}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {9}", + "image_path": "b993b4b2ad6890155dc2cc347b6156a9e132b6ba9524786801da32f87737c2b7.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": "From the above argument we know how the Slow SDE of Local SGD (4) changes as " + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\eta H" + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": " transitions from 0 to " + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "inline_equation", + "content": "+\\infty" + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": ". Initially, when " + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\eta H = 0" + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": ", (4) is the same as the " + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "inline_equation", + "content": "(\\frac{1}{B}, \\frac{1}{2B})" + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": "-Slow SDE for SGD. Then increasing " + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\eta H" + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": " strengthens the drift term of (4). As " + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "inline_equation", + "content": "\\eta H \\to +\\infty" + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": ", (4) transitions to the " + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "inline_equation", + "content": "(\\frac{1}{B}, \\frac{K}{2B})" + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": "-Slow SDE, where the drift term becomes " + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 342, + 504, + 388 + ], + "type": "text", + "content": " times larger." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "text", + "content": "According to Hypothesis 3.1, the " + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "inline_equation", + "content": "(\\frac{1}{B},\\frac{K}{2B})" + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "text", + "content": "-Slow SDE generalizes better than the " + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "inline_equation", + "content": "(\\frac{1}{B},\\frac{1}{2B})" + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "text", + "content": "-Slow SDE, so Local SGD with " + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "inline_equation", + "content": "\\eta H = +\\infty" + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "text", + "content": " should generalize better than SGD. When " + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "inline_equation", + "content": "\\eta H" + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "text", + "content": " is chosen realistically as a finite value, the generalization performance of Local SGD interpolates between these two cases, which results in a worse generalization than " + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "inline_equation", + "content": "\\eta H = +\\infty" + }, + { + "bbox": [ + 104, + 392, + 504, + 438 + ], + "type": "text", + "content": " but should still be better than SGD." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 443, + 441, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 441, + 454 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 441, + 454 + ], + "type": "text", + "content": "3.3.3 THEORETICAL INSIGHTS INTO TUNING THE NUMBER OF LOCAL STEPS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": "Based on our Slow SDE approximations, we now discuss how the number of local steps " + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": " affects the generalization of Local SGD. When " + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": " is small but finite, tuning " + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": " offers a trade-off between regularization strength and SDE approximation quality. Larger " + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "inline_equation", + "content": "\\alpha \\coloneqq \\eta H" + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": " makes the regularization stronger in the SDE (as discussed in Section 3.3.2), but the SDE itself may lose track of Local SGD, which can be seen from the error bound " + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\sqrt{\\alpha\\eta\\log(\\alpha / \\eta\\delta)})" + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": " in Theorem 3.3. Therefore, we expect the test accuracy to first increase and then decrease as we gradually increase " + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": ". Indeed, we observe in Figures 2(e) and 2(f) that the plot of test accuracy versus " + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": " is unimodal for each " + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 460, + 504, + 539 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": "It is thus necessary to tune " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " for the best generalization. When " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " is tuned together with other hyperparameters, such as learning rate " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": ", our Slow SDE approximation recommends setting " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " to be at least " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\Omega(\\eta^{-1})" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\alpha := \\eta H" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " does not vanish in the Slow SDE. Since larger " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " gives a stronger regularization effect, the optimal " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " should be set to the largest value so that the Slow SDE does not lose track of Local SGD. Indeed, we empirically observed that when " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " is tuned optimally, " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " increases as " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " decreases, suggesting that the optimal " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": " grows faster than " + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\Omega(\\eta^{-1})" + }, + { + "bbox": [ + 104, + 544, + 504, + 612 + ], + "type": "text", + "content": ". See Figure 5(f)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 616, + 202, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 202, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 202, + 628 + ], + "type": "text", + "content": "4 CONCLUSIONS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 632, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 505, + 733 + ], + "type": "text", + "content": "In this paper, we analyze the long-term generalization behavior of Local SGD in the small learning rate regime by deriving the Slow SDE for Local SGD as a generalization of that for SGD (Li et al., 2021b). We attribute the generalization improvement over SGD to the larger drift term in the SDE for Local SGD. Our empirical validation shows that Local SGD indeed induces generalization benefits with small learning rate and long enough training time. The main limitation of our work is that our analysis does not imply any direct theoretical separation between SGD and Local SGD in test accuracy, which requires a much deeper understanding of the loss landscape and the Slow SDEs and is left for future work. Another direction for future work is to design distributed training methods that provably generalize better than SGD based on the theoretical insights obtained from Slow SDEs." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 384, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 384, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 384, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT AND DISCLOSURE OF FUNDING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 506, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 506, + 162 + ], + "type": "text", + "content": "The work of Xinran Gu and Longbo Huang is supported by the Technology and Innovation Major Project of the Ministry of Science and Technology of China under Grant 2020AAA0108400 and 2020AAA0108403, the Tsinghua University Initiative Scientific Research Program, and Tsinghua Precision Medicine Foundation 10001020109. The work of Kaifeng Lyu and Sanjeev Arora is supported by funding from NSF, ONR, Simons Foundation, DARPA and SRC." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 178, + 176, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 178, + 176, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 178, + 176, + 190 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 196, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 196, + 505, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 196, + 505, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 505, + 242 + ], + "type": "text", + "content": "Kwangjun Ahn, Jingzhao Zhang, and Suvrit Sra. Understanding the unstable convergence of gradient descent. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 247-257. PMLR, 17-23 Jul 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 247, + 505, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 505, + 293 + ], + "type": "text", + "content": "Debraj Basu, Deepesh Data, Can Karakus, and Suhas Diggavi. Qsparse-local-SGD: Distributed SGD with quantization, sparsification and local computations. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 300, + 505, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 505, + 334 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 505, + 334 + ], + "type": "text", + "content": "Yoshua Bengio. Practical Recommendations for Gradient-Based Training of Deep Architectures, pp. 437-478. Springer Berlin Heidelberg, Berlin, Heidelberg, 2012. ISBN 978-3-642-35289-8. doi: 10.1007/978-3-642-35289-8_26." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 340, + 505, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 340, + 505, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 505, + 385 + ], + "type": "text", + "content": "Guy Blanc, Neha Gupta, Gregory Valiant, and Paul Valiant. Implicit regularization for deep neural networks driven by an Ornstein-uhlenbeck like process. In Jacob Abernethy and Shivani Agarwal (eds.), Proceedings of Thirty Third Conference on Learning Theory, volume 125 of Proceedings of Machine Learning Research, pp. 483–513. PMLR, 09–12 Jul 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 392, + 505, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 392, + 505, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 505, + 427 + ], + "type": "text", + "content": "David Brandfonbrener and Joan Bruna. Geometric insights into the convergence of nonlinear TD learning. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 434, + 505, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 434, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 505, + 456 + ], + "type": "text", + "content": "Jianmin Chen, Xinghao Pan, Rajat Monga, Samy Bengio, and Rafal Jozefowicz. Revisiting distributed synchronous SGD. arXiv preprint arXiv:1604.00981, 2016." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 464, + 505, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 505, + 508 + ], + "type": "text", + "content": "Kai Chen and Qiang Huo. Scalable training of deep learning machines by incremental block training with intra-block parallel optimization and blockwise model-update filtering. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5880-5884, 2016. doi: 10.1109/ICASSP.2016.7472805." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 515, + 505, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 505, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 505, + 550 + ], + "type": "text", + "content": "Alex Damian, Tengyu Ma, and Jason D. Lee. Label noise SGD provably prefers flat global minimizers. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 556, + 505, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 556, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 505, + 601 + ], + "type": "text", + "content": "Laurent Dinh, Razvan Pascanu, Samy Bengio, and Yoshua Bengio. Sharp minima can generalize for deep nets. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 1019-1028. PMLR, 06-11 Aug 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 608, + 505, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 608, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 505, + 631 + ], + "type": "text", + "content": "Aijun Du and JinQiao Duan. Invariant manifold reduction for stochastic dynamical systems. Dynamic Systems and Applications, 16:681-696, 2007." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 638, + 505, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 638, + 505, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 505, + 662 + ], + "type": "text", + "content": "KJ Falconer. Differentiation of the limit mapping in a dynamical system. Journal of the London Mathematical Society, 2(2):356-372, 1983." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 668, + 505, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 505, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 505, + 701 + ], + "type": "text", + "content": "Benjamin Fehrman, Benjamin Gess, and Arnulf Jentzen. Convergence rates for the stochastic gradient descent method for non-convex objective functions. Journal of Machine Learning Research, 21:136, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "text", + "content": "Damir Filipović. Invariant manifolds for weak solutions to stochastic equations. *Probability theory and related fields*, 118(3):323-341, 2000." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "text", + "content": "Margalit R Glasgow, Honglin Yuan, and Tengyu Ma. Sharp bounds for federated averaging (Local SGD) and continuous perspective. In International Conference on Artificial Intelligence and Statistics, pp. 9050-9090. PMLR, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 199 + ], + "type": "text", + "content": "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch SGD: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 205, + 505, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 505, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 505, + 240 + ], + "type": "text", + "content": "Farzin Haddadpour, Mohammad Mahdi Kamani, Mehrdad Mahdavi, and Viveck Cadambe. Local SGD with periodic averaging: Tighter analysis and adaptive synchronization. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 246, + 505, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 246, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 505, + 281 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. In Proceedings of the IEEE international conference on computer vision, pp. 1026-1034, 2015." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 288, + 505, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 288, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 505, + 322 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 329, + 505, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 329, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 329, + 505, + 352 + ], + "type": "text", + "content": "Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 359, + 486, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 359, + 486, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 359, + 486, + 373 + ], + "type": "text", + "content": "Sepp Hochreiter and Jürgen Schmidhuber. Flat minima. Neural computation, 9(1):1-42, 1997." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 378, + 505, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 378, + 505, + 413 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 505, + 413 + ], + "type": "text", + "content": "Elad Hoffer, Itay Hubara, and Daniel Soudry. Train longer, generalize better: closing the generalization gap in large batch training of neural networks. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 419, + 505, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 419, + 505, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 505, + 444 + ], + "type": "text", + "content": "Wenqing Hu, Chris Junchi Li, Lei Li, and Jian-Guo Liu. On the diffusion approximation of nonconvex stochastic gradient descent. arXiv preprint arXiv:1705.07562, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 450, + 505, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 450, + 505, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 505, + 474 + ], + "type": "text", + "content": "Hikaru Ibayashi and Masaaki Imaizumi. Exponential escape efficiency of SGD from sharp minima in non-stationary regime. arXiv preprint arXiv:2111.04004, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 480, + 505, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 480, + 505, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 505, + 515 + ], + "type": "text", + "content": "Stanisław Jastrzebski, Zachary Kenton, Devansh Arpit, Nicolas Ballas, Asja Fischer, Yoshua Bengio, and Amos Storkey. Three factors influencing minima in SGD. arXiv preprint arXiv:1711.04623, 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 521, + 505, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 505, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 505, + 567 + ], + "type": "text", + "content": "Xianyan Jia, Shutao Song, Wei He, Yangzihao Wang, Haidong Rong, Feihu Zhou, Liqiang Xie, Zhenyu Guo, Yuzhou Yang, Liwei Yu, et al. Highly scalable deep learning training system with mixed-precision: Training imagenet in four minutes. Advances in Neural Information Processing Systems, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 574, + 505, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 574, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 505, + 609 + ], + "type": "text", + "content": "Yiding Jiang, Behnam Neyshabur, Hossein Mobahi, Dilip Krishnan, and Samy Bengio. *Fantastic generalization measures and where to find them.* In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 615, + 505, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 615, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 505, + 660 + ], + "type": "text", + "content": "Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. Advances and open problems in federated learning. Foundations and Trends® in Machine Learning, 14(1-2):1-210, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 667, + 505, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 505, + 702 + ], + "type": "text", + "content": "Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank Reddi, Sebastian Stich, and Ananda Theertha Suresh. Scaffold: Stochastic controlled averaging for federated learning. In International Conference on Machine Learning, pp. 5132-5143. PMLR, 2020." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 732 + ], + "type": "text", + "content": "G. S. Katzenberger. Solutions of a stochastic differential equation forced onto a manifold by a large drift. The Annals of Probability, 19(4):1587 - 1628, 1991." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. In International Conference on Learning Representations, 2017." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 505, + 158 + ], + "type": "text", + "content": "Ahmed Khaled, Konstantin Mishchenko, and Peter Richtárik. Tighter theory for local SGD on identical and heterogeneous data. In International Conference on Artificial Intelligence and Statistics, pp. 4519-4529. PMLR, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 165, + 505, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 165, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 505, + 211 + ], + "type": "text", + "content": "Bobby Kleinberg, Yanzhi Li, and Yang Yuan. An alternative view: When does SGD escape local minima? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 2698-2707. PMLR, 10-15 Jul 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 218, + 505, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 218, + 505, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 505, + 242 + ], + "type": "text", + "content": "Alex Krizhevsky. One weird trick for parallelizing convolutional neural networks. arXiv preprint arXiv:1404.5997, 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 249, + 443, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 249, + 443, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 249, + 443, + 262 + ], + "type": "text", + "content": "Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 270, + 505, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 505, + 293 + ], + "type": "text", + "content": "Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 300, + 505, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 505, + 334 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 505, + 334 + ], + "type": "text", + "content": "Yann A. LeCun, Léon Bottou, Genevieve B. Orr, and Klaus-Robert Müller. Efficient BackProp, pp. 9-48. Springer Berlin Heidelberg, Berlin, Heidelberg, 2012. ISBN 978-3-642-35289-8. doi: 10.1007/978-3-642-35289-8_3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "type": "text", + "content": "Qianxiao Li, Cheng Tai, and Weinan E. Stochastic modified equations and dynamics of stochastic gradient algorithms i: Mathematical foundations. Journal of Machine Learning Research, 20(40): 1-47, 2019a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 384, + 505, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 505, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 505, + 408 + ], + "type": "text", + "content": "Xiang Li, Kaixuan Huang, Wenhao Yang, Shusen Wang, and Zhihua Zhang. On the convergence of fedavg on non-iid data. In International Conference on Learning Representations, 2019b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 415, + 505, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 505, + 449 + ], + "type": "text", + "content": "Zhiyuan Li, Kaifeng Lyu, and Sanjeev Arora. Reconciling modern deep learning with traditional optimization analyses: The intrinsic learning rate. Advances in Neural Information Processing Systems, 33:14544-14555, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 457, + 505, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 505, + 491 + ], + "type": "text", + "content": "Zhiyuan Li, Sadhika Malladi, and Sanjeev Arora. On the validity of modeling SGD with stochastic differential equations (sdes). Advances in Neural Information Processing Systems, 34:12712-12725, 2021a." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 498, + 505, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 498, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 505, + 523 + ], + "type": "text", + "content": "Zhiyuan Li, Tianhao Wang, and Sanjeev Arora. What happens after SGD reaches zero loss? a mathematical framework. In International Conference on Learning Representations, 2021b." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 530, + 505, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 530, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 505, + 564 + ], + "type": "text", + "content": "Zhiyuan Li, Tianhao Wang, and Dingli Yu. Fast mixing of stochastic gradient descent with normalization and weight decay. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 571, + 505, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 505, + 617 + ], + "type": "text", + "content": "Tao Lin, Lingjing Kong, Sebastian Stich, and Martin Jaggi. Extrapolation for large-batch training in deep learning. In Hal Daumé III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 6094-6104. PMLR, 13-18 Jul 2020a." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 624, + 505, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 505, + 649 + ], + "type": "text", + "content": "Tao Lin, Sebastian U. Stich, Kumar Kshitij Patel, and Martin Jaggi. Don't use large mini-batches, use Local SGD. In International Conference on Learning Representations, 2020b." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 655, + 505, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 505, + 680 + ], + "type": "text", + "content": "Kaifeng Lyu, Zhiyuan Li, and Sanjeev Arora. Understanding the generalization benefit of normalization layers: Sharpness reduction, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 505, + 731 + ], + "type": "text", + "content": "Chao Ma and Lexing Ying. On linear stability of SGD and input-smoothness of neural networks. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, volume 34, pp. 16805-16817. Curran Associates, Inc., 2021." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Sadhika Malladi, Kaifeng Lyu, Abhishek Panigrahi, and Sanjeev Arora. On the SDEs and scaling rules for adaptive gradient algorithms. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 505, + 157 + ], + "type": "text", + "content": "Gideon Mann, Ryan T. McDonald, Mehryar Mohri, Nathan Silberman, and Dan Walker. Efficient large-scale distributed training of conditional maximum entropy models. In Advances in Neural Information Processing Systems 22, pp. 1231-1239, 2009." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 162, + 504, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 162, + 504, + 197 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 504, + 197 + ], + "type": "text", + "content": "Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics, pp. 1273-1282. PMLR, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 201, + 504, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 504, + 238 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 504, + 238 + ], + "type": "text", + "content": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al. Human-level control through deep reinforcement learning. nature, 518(7540):529-533, 2015." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 242, + 504, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 504, + 288 + ], + "type": "text", + "content": "Behnam Neyshabur, Srinadh Bhojanapalli, David Mcallester, and Nati Srebro. Exploring generalization in deep learning. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 293, + 504, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 504, + 318 + ], + "type": "text", + "content": "Jose Javier Gonzalez Ortiz, Jonathan Frankle, Mike Rabbat, Ari Morcos, and Nicolas Ballas. Trade-offs of Local SGD at scale: An empirical study. arXiv preprint arXiv:2110.08133, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "type": "text", + "content": "Daniel Povey, Xiaohui Zhang, and Sanjeev Khudanpur. Parallel training of dnns with natural gradient and parameter averaging. arXiv preprint arXiv:1410.7455, 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 353, + 504, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 353, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 504, + 376 + ], + "type": "text", + "content": "Prajit Ramachandran, Barret Zoph, and Quoc V Le. Searching for activation functions. arXiv preprint arXiv:1710.05941, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 382, + 504, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 382, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 382, + 504, + 417 + ], + "type": "text", + "content": "Benjamin Recht, Christopher Ré, Stephen J. Wright, and Feng Niu. Hogwild: A lock-free approach to parallelizing stochastic gradient descent. In Advances in Neural Information Processing Systems 24, pp. 693-701, 2011." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 422, + 504, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 504, + 468 + ], + "type": "text", + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 115(3):211-252, 2015. doi: 10.1007/s11263-015-0816-y." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 473, + 504, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 473, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 504, + 540 + ], + "type": "text", + "content": "Frank Seide, Hao Fu, Jasha Droppo, Gang Li, and Dong Yu. 1-bit stochastic gradient descent and its application to data-parallel distributed training of speech dnns. In Haizhou Li, Helen M. Meng, Bin Ma, Engsiong Chng, and Lei Xie (eds.), INTERSPEECH 2014, 15th Annual Conference of the International Speech Communication Association, Singapore, September 14-18, 2014, pp. 1058-1062. ISCA, 2014. URL http://www.isca-speech.org/archive/interspeech_2014/i14_1058.html." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 547, + 506, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 547, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 506, + 582 + ], + "type": "text", + "content": "Christopher J. Shallue, Jaehoon Lee, Joseph Antognini, Jascha Sohl-Dickstein, Roy Frostig, and George E. Dahl. Measuring the effects of data parallelism on neural network training. Journal of Machine Learning Research, 20(112):1-49, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 587, + 504, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 587, + 504, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 587, + 504, + 611 + ], + "type": "text", + "content": "K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, May 2015." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 616, + 504, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 504, + 662 + ], + "type": "text", + "content": "Samuel Smith, Erich Elsen, and Soham De. On the generalization benefit of noise in stochastic gradient descent. In Hal Daumé III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 9058-9067. PMLR, 13-18 Jul 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 668, + 504, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 504, + 702 + ], + "type": "text", + "content": "Samuel L Smith, Benoit Dherin, David Barrett, and Soham De. On the origin of implicit regularization in stochastic gradient descent. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "text", + "content": "Sebastian U Stich. Local SGD converges fast and communicates little. In International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "text", + "content": "Nikko Strom. Scalable distributed DNN training using commodity GPU cloud computing. In IN-TERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany, September 6-10, 2015, pp. 1488-1492. ISCA, 2015." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 504, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 504, + 146 + ], + "type": "text", + "content": "Hang Su and Haoyu Chen. Experiments on parallel training of deep neural network using model averaging. arXiv preprint arXiv:1507.01239, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 151, + 504, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 151, + 504, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 504, + 175 + ], + "type": "text", + "content": "Richard S. Sutton and Andrew G. Barto. Reinforcement learning - an introduction. Adaptive computation and machine learning. MIT Press, 1998. ISBN 978-0-262-19398-6." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 180, + 504, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 180, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 504, + 205 + ], + "type": "text", + "content": "Jianyu Wang and Gauri Joshi. Adaptive communication strategies to achieve the best error-routine trade-off in local-update SGD. Proceedings of Machine Learning and Systems, 1:212-229, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 210, + 504, + 234 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 210, + 504, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 504, + 234 + ], + "type": "text", + "content": "Jianyu Wang and Gauri Joshi. Cooperative SGD: A unified framework for the design and analysis of local-update SGD algorithms. Journal of Machine Learning Research, 22(213):1-50, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 239, + 504, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 504, + 274 + ], + "type": "text", + "content": "Jianyu Wang, Rudrajit Das, Gauri Joshi, Satyen Kale, Zheng Xu, and Tong Zhang. On the unreasonable effectiveness of federated averaging with heterogeneous data. arXiv preprint arXiv:2206.04723, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 280, + 504, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 280, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 504, + 315 + ], + "type": "text", + "content": "Blake Woodworth, Kumar Kshitij Patel, Sebastian Stich, Zhen Dai, Brian Bullins, Brendan Mcmahan, Ohad Shamir, and Nathan Srebro. Is local sgd better than minibatch sgd? In International Conference on Machine Learning, pp. 10334-10343. PMLR, 2020a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 319, + 504, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 504, + 354 + ], + "type": "text", + "content": "Blake E Woodworth, Kumar Kshitij Patel, and Nati Srebro. Minibatch vs Local SGD for heterogeneous distributed learning. Advances in Neural Information Processing Systems, 33:6281-6292, 2020b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 360, + 504, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 360, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 504, + 406 + ], + "type": "text", + "content": "Lei Wu, Chao Ma, and Weinan E. How sgd selects the global minima in over-parameterized learning: A dynamical stability perspective. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesà-Bianchi, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 411, + 504, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 504, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 504, + 446 + ], + "type": "text", + "content": "Zeke Xie, Issei Sato, and Masashi Sugiyama. A diffusion theory for deep learning dynamics: Stochastic gradient descent exponentially favors flat minima. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 452, + 504, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 452, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 452, + 504, + 486 + ], + "type": "text", + "content": "Yang You, Zhao Zhang, Cho-Jui Hsieh, James Demmel, and Kurt Keutzer. Imagenet training in minutes. In Proceedings of the 47th International Conference on Parallel Processing, pp. 1-10, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 492, + 504, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 504, + 537 + ], + "type": "text", + "content": "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 544, + 504, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 504, + 578 + ], + "type": "text", + "content": "Hao Yu, Sen Yang, and Shenghuo Zhu. Parallel restarted SGD with faster convergence and less communication: Demystifying why model averaging works for deep learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 5693-5700, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "type": "text", + "content": "Jingzhao Zhang, Sai Praneeth Karimireddy, Andreas Veit, Seungyeon Kim, Sashank Reddi, Sanjiv Kumar, and Suvrit Sra. Why are adaptive methods good for attention models? Advances in Neural Information Processing Systems, 33:15383-15393, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 624, + 504, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 504, + 669 + ], + "type": "text", + "content": "Xiaohui Zhang, Jan Trmal, Daniel Povey, and Sanjeev Khudanpur. Improving deep neural network acoustic models using generalized maxout networks. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 215-219, 2014. doi: 10.1109/ICASSP.2014.6853589." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 675, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 675, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 504, + 732 + ], + "type": "text", + "content": "Fan Zhou and Guojing Cong. On the convergence properties of a k-step averaging stochastic gradient descent algorithm for nonconvex optimization. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18, pp. 3219-3227. International Joint Conferences on Artificial Intelligence Organization, 7 2018. doi: 10.24963/ijcai.2018/447. URL https://doi.org/10.24963/ijcai.2018/447." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 186 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Zhanxing Zhu, Jingfeng Wu, Bing Yu, Lei Wu, and Jinwen Ma. The anisotropic noise in stochastic gradient descent: Its behavior of escaping from sharp minima and regularization effects. arXiv preprint arXiv:1803.00195, 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Martin Zinkevich, Markus Weimer, Lihong Li, and Alex Smola. Parallelized stochastic gradient descent. In J. Lafferty, C. Williams, J. Shawe-Taylor, R. Zemel, and A. Culotta (eds.), Advances in Neural Information Processing Systems, volume 23. Curran Associates, Inc., 2010." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 186 + ], + "type": "text", + "content": "Bernt Øksendal. Stochastic differential equations: an introduction with applications. Springer Science & Business Media, 2013." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 165, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 165, + 93 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 165, + 93 + ], + "type": "text", + "content": "CONTENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 110, + 505, + 149 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 107, + 110, + 505, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 110, + 505, + 121 + ], + "spans": [ + { + "bbox": [ + 107, + 110, + 505, + 121 + ], + "type": "text", + "content": "1 Introduction 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 137, + 505, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 137, + 505, + 149 + ], + "spans": [ + { + "bbox": [ + 106, + 137, + 505, + 149 + ], + "type": "text", + "content": "2 When does Local SGD Generalize Better? 3" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 120, + 154, + 505, + 184 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 120, + 154, + 505, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 154, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 120, + 154, + 505, + 167 + ], + "type": "text", + "content": "2.1 The Debate on Local SGD 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 171, + 505, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 171, + 505, + 184 + ], + "spans": [ + { + "bbox": [ + 121, + 171, + 505, + 184 + ], + "type": "text", + "content": "2.2 Key Factors: Small Learning Rate and Sufficient Training Time 4" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "type": "text", + "content": "3 Theoretical Analysis of Local SGD: The Slow SDE 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 120, + 216, + 505, + 263 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 120, + 216, + 505, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 216, + 505, + 229 + ], + "spans": [ + { + "bbox": [ + 120, + 216, + 505, + 229 + ], + "type": "text", + "content": "3.1 Difficulty of Adapting the SDE Framework to Local SGD 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 233, + 505, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 233, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 121, + 233, + 505, + 246 + ], + "type": "text", + "content": "3.2SDE Approximation near the Minimizer Manifold 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 251, + 505, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 251, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 121, + 251, + 505, + 263 + ], + "type": "text", + "content": "3.3 Interpretation of the Slow SDEs 8" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 143, + 267, + 505, + 314 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 143, + 267, + 505, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 267, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 143, + 267, + 505, + 280 + ], + "type": "text", + "content": "3.3.1 Interpretation of the Slow SDE for SGD. 8" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 143, + 285, + 505, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 285, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 143, + 285, + 505, + 297 + ], + "type": "text", + "content": "3.3.2 Local SGD Strengthens the Drift Term in Slow SDE. 9" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 143, + 301, + 505, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 301, + 505, + 314 + ], + "spans": [ + { + "bbox": [ + 143, + 301, + 505, + 314 + ], + "type": "text", + "content": "3.3.3 Theoretical Insights into Tuning the Number of Local Steps 9" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 329, + 505, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 329, + 505, + 341 + ], + "spans": [ + { + "bbox": [ + 106, + 329, + 505, + 341 + ], + "type": "text", + "content": "4 Conclusions 9" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 357, + 505, + 453 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 106, + 357, + 505, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 357, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 106, + 357, + 505, + 369 + ], + "type": "text", + "content": "A Additional Related Works 18" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 384, + 505, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 384, + 505, + 396 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 505, + 396 + ], + "type": "text", + "content": "B Additional Discussions 19" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 412, + 505, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 412, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 505, + 425 + ], + "type": "text", + "content": "C Implementation Details of Parallel SGD, Local SGD and Post-local SGD 20" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 440, + 505, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 440, + 505, + 453 + ], + "spans": [ + { + "bbox": [ + 106, + 440, + 505, + 453 + ], + "type": "text", + "content": "D Modeling Local SGD with Multiple Conventional SDEs 23" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 468, + 505, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 468, + 505, + 480 + ], + "spans": [ + { + "bbox": [ + 106, + 468, + 505, + 480 + ], + "type": "text", + "content": "E Additional Interpretation of the Slow SDEs 23" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 120, + 485, + 505, + 514 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 120, + 485, + 505, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 485, + 505, + 498 + ], + "spans": [ + { + "bbox": [ + 120, + 485, + 505, + 498 + ], + "type": "text", + "content": "E.1 Understanding the Diffusion Term in the Slow SDE 23" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 121, + 502, + 505, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 502, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 121, + 502, + 505, + 514 + ], + "type": "text", + "content": "E.2 The Effect of Global Batch Size on Generalization 24" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 529, + 505, + 570 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 106, + 529, + 505, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 529, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 505, + 542 + ], + "type": "text", + "content": "F Additional Experimental Results 25" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 557, + 505, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 557, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 106, + 557, + 505, + 570 + ], + "type": "text", + "content": "G Discussions on Local SGD with Label Noise Regularization 27" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 120, + 574, + 505, + 605 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 120, + 574, + 505, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 574, + 505, + 587 + ], + "spans": [ + { + "bbox": [ + 120, + 574, + 505, + 587 + ], + "type": "text", + "content": "G.1 The Slow SDE for Local SGD with Label Noise Regularization 27" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 121, + 592, + 505, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 592, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 121, + 592, + 505, + 605 + ], + "type": "text", + "content": "G.2 The Equivalence of Enlarging the Learning Rate and Adding Local Steps 28" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 620, + 505, + 715 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 106, + 620, + 505, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 620, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 106, + 620, + 505, + 632 + ], + "type": "text", + "content": "H Deriving the Slow SDE after Applying the LSR 28" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 647, + 505, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 647, + 505, + 659 + ], + "spans": [ + { + "bbox": [ + 106, + 647, + 505, + 659 + ], + "type": "text", + "content": "I Proof of Theorem 3.1 30" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 675, + 505, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 675, + 505, + 687 + ], + "spans": [ + { + "bbox": [ + 106, + 675, + 505, + 687 + ], + "type": "text", + "content": "J Proof Outline of Main Theorems 33" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 703, + 505, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 703, + 505, + 715 + ], + "spans": [ + { + "bbox": [ + 106, + 703, + 505, + 715 + ], + "type": "text", + "content": "K Proof Details of Main Theorems 33" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 720, + 505, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 720, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 121, + 720, + 505, + 732 + ], + "type": "text", + "content": "K.1 Additional Notations 34" + } + ] + } + ], + "index": 38 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 39 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 81, + 505, + 145 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 121, + 81, + 505, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 81, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 121, + 81, + 505, + 95 + ], + "type": "text", + "content": "K.2 Computing the Derivatives of the Limiting Mapping 34" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 99, + 505, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 99, + 505, + 111 + ], + "spans": [ + { + "bbox": [ + 121, + 99, + 505, + 111 + ], + "type": "text", + "content": "K.3 Preliminary Lemmas for GD and GF 35" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 115, + 505, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 115, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 121, + 115, + 505, + 129 + ], + "type": "text", + "content": "K.4 Construction of working zones 38" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 132, + 505, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 132, + 505, + 145 + ], + "spans": [ + { + "bbox": [ + 121, + 132, + 505, + 145 + ], + "type": "text", + "content": "K.5 Phase 1: Iterate Approaching the Manifold 39" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 143, + 149, + 505, + 196 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 143, + 149, + 505, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 149, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 143, + 149, + 505, + 162 + ], + "type": "text", + "content": "K.5.1 Additional notations 39" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 166, + 505, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 166, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 143, + 166, + 505, + 178 + ], + "type": "text", + "content": "K.5.2 Proof for Subphase 1 39" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 183, + 505, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 183, + 505, + 196 + ], + "spans": [ + { + "bbox": [ + 143, + 183, + 505, + 196 + ], + "type": "text", + "content": "K.5.3 Proof for Subphase 2 43" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 200, + 505, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 200, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 121, + 200, + 505, + 213 + ], + "type": "text", + "content": "K.6 Phase 2: Iterates Staying Close to Manifold 46" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 143, + 217, + 505, + 247 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 143, + 217, + 505, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 217, + 505, + 230 + ], + "spans": [ + { + "bbox": [ + 143, + 217, + 505, + 230 + ], + "type": "text", + "content": "K.6.1 Additional notations 46" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 143, + 234, + 505, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 234, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 143, + 234, + 505, + 247 + ], + "type": "text", + "content": "K.6.2 Proof for the High Probability Bounds 46" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 251, + 505, + 298 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 121, + 251, + 505, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 251, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 121, + 251, + 505, + 264 + ], + "type": "text", + "content": "K.7 Summary of the dynamics and Proof of Theorems J.1 and J.2 51" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 268, + 505, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 268, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 121, + 268, + 505, + 281 + ], + "type": "text", + "content": "K.8 Proof of Theorem 3.3 52" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 285, + 505, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 285, + 505, + 298 + ], + "spans": [ + { + "bbox": [ + 121, + 285, + 505, + 298 + ], + "type": "text", + "content": "K.9 Computing the Moments for One \"Giant Step\" 53" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 302, + 505, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 302, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 121, + 302, + 505, + 315 + ], + "type": "text", + "content": "K.10 Proof of Weak Approximation 66" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 143, + 319, + 505, + 348 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 143, + 319, + 505, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 319, + 505, + 331 + ], + "spans": [ + { + "bbox": [ + 143, + 319, + 505, + 331 + ], + "type": "text", + "content": "K.10.1 Preliminaries and additional notations 67" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 143, + 335, + 505, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 335, + 505, + 348 + ], + "spans": [ + { + "bbox": [ + 143, + 335, + 505, + 348 + ], + "type": "text", + "content": "K.10.2 Proof of the approximation in our context 68" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 362, + 505, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 362, + 505, + 375 + ], + "spans": [ + { + "bbox": [ + 105, + 362, + 505, + 375 + ], + "type": "text", + "content": "L Deriving the Slow SDE for Label Noise Regularization 72" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 389, + 505, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 389, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 505, + 402 + ], + "type": "text", + "content": "M Experimental Details 74" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 121, + 407, + 505, + 503 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 121, + 407, + 505, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 407, + 505, + 418 + ], + "spans": [ + { + "bbox": [ + 121, + 407, + 505, + 418 + ], + "type": "text", + "content": "M.1 Post-local SGD Experiments in Section 1 74" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 121, + 423, + 505, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 423, + 505, + 436 + ], + "spans": [ + { + "bbox": [ + 121, + 423, + 505, + 436 + ], + "type": "text", + "content": "M.2 Experimental Details for Figures 2 and 5 74" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 121, + 440, + 505, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 440, + 505, + 453 + ], + "spans": [ + { + "bbox": [ + 121, + 440, + 505, + 453 + ], + "type": "text", + "content": "M.3 Details for Experiments in Figure 6. 75" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 121, + 457, + 505, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 457, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 121, + 457, + 505, + 469 + ], + "type": "text", + "content": "M.4 Details for Experiments on the Effect of the Diffusion Term 75" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 121, + 475, + 505, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 475, + 505, + 487 + ], + "spans": [ + { + "bbox": [ + 121, + 475, + 505, + 487 + ], + "type": "text", + "content": "M.5 Details for Experiments on the Effect of Global Batch Size 76" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 121, + 491, + 505, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 491, + 505, + 503 + ], + "spans": [ + { + "bbox": [ + 121, + 491, + 505, + 503 + ], + "type": "text", + "content": "M.6 Details for Experiments on Label Noise Regularization 76" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 82, + 288, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 288, + 92 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 288, + 92 + ], + "type": "text", + "content": "A ADDITIONAL RELATED WORKS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 159, + 504, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 159, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 107, + 159, + 504, + 380 + ], + "type": "text", + "content": "**Optimization aspect of Local SGD.** Local SGD is a communication-efficient variant of parallel SGD, where multiple workers perform SGD independently and average the model parameters periodically. Dating back to Mann et al. (2009) and Zinkevich et al. (2010), this strategy has been widely adopted to reduce the communication cost and speed up training in both scenarios of data center distributed training (Chen & Huo, 2016; Zhang et al., 2014; Povey et al., 2014; Su & Chen, 2015) and Federated Learning (McMahan et al., 2017; Kairouz et al., 2021). To further accelerate training, Wang & Joshi (2019) and Haddadpour et al. (2019) proposed adaptive schemes for the averaging frequency, and Basu et al. (2019) combined Local SGD with gradient compression. Motivated to theoretically understand the empirical success of Local SGD, a lot of researchers analyzed the convergence rate of Local SGD under various settings, e.g., homogeneous/heterogeneous data and convex/non-convex objective functions. Among them, Yu et al. (2019); Stich (2018); Khaled et al. (2020); Woodworth et al. (2020a) focus on the homogeneous setting where data for each worker are independent and identically distributed (IID). Li et al. (2019b); Karimireddy et al. (2020); Glasgow et al. (2022); Woodworth et al. (2020b); Wang et al. (2022) study the heterogeneous setting, where workers have non-IID data and local updates may induce \"client drift\" (Karimireddy et al., 2020) and hurt optimization. The error bound of Local SGD obtained by these works is typically inferior to that of SGD with the same global batch size for fixed number of iterations/epochs and becomes worse as the number of local steps increases, revealing a trade-off between less communication and better optimization. In this paper, we are interested in the generalization aspect of Local SGD in the homogeneous setting, assuming the training loss can be optimized to a small value." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 385, + 504, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 385, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 107, + 385, + 504, + 550 + ], + "type": "text", + "content": "Gradient noise and generalization. The effect of stochastic gradient noise on generalization has been studied from different aspects, e.g., changing the order of learning different patterns Li et al. (2019a), inducing an implicit regularizer in the second-order SDE approximation Smith et al. (2021); Li et al. (2019a). Our work follows a line of works studying the effect of noise in the lens of sharpness, which is long believed to be related to generalization Hochreiter & Schmidhuber (1997); Neyshabur et al. (2017). Keskar et al. (2017) empirically observed that large-batch training leads to worse generalization and sharper minima than small-batch training. Wu et al. (2018); Hu et al. (2017); Ma & Ying (2021) showed that gradient noise destabilizes the training around sharp minima, and Kleinberg et al. (2018); Zhu et al. (2018); Xie et al. (2021); Ibayashi & Imaizumi (2021) quantitatively characterized how SGD escapes sharp minima. The most related papers are Blanc et al. (2020); Damian et al. (2021); Li et al. (2021b), which focus on the training dynamics near a manifold of minima and study the effect of noise on sharpness (see also Section 3.2). Though the mathematical definition of sharpness may be vulnerable to the various symmetries in deep neural nets (Dinh et al., 2017), sharpness still appears to be one of the most promising tools for predicting generalization (Jiang et al., 2020; Foret et al., 2021)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 556, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 556, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 556, + 504, + 731 + ], + "type": "text", + "content": "Improving generalization in large-batch training. The generalization issue of the large-batch (or full-batch) training has been observed as early as (Bengio, 2012; LeCun et al., 2012). As mentioned in Section 1, the generalization issue of large-batch training could be due to the lack of a sufficient amount of stochastic noise. To make up the noise in large-batch training, Krizhevsky (2014); Goyal et al. (2017) empirically discovered the Linear Scaling Rule for SGD, which suggests enlarging the learning rate proportionally to the batch size. Jastrzebski et al. (2017) adopted an SDE-based analysis to justify that this scaling rule indeed retains the same amount of noise as small-batch training (see also Section 3.1). However, the SDE approximation may fail if the learning rate is too large (Li et al., 2021a), especially in the early phase of training before the first learning rate decay (Smith et al., 2020). Shallue et al. (2019) demonstrated that generalization gap between small- and large-batch training can also depend on many other training hyperparameters. Besides enlarging the learning rate, other approaches have also been proposed to reduce the gap, including training longer (Hoffer et al., 2017), learning rate warmup (Goyal et al., 2017), LARS (You et al., 2018), LAMB (You et al., 2020). In this paper, we focus on using Local SGD to improve generalization, but adding local steps is a generic training trick that can also be combined with others, e.g., Local LARS (Lin et al., 2020b), Local Extrap-SGD (Lin et al., 2020a)." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 269, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 269, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 269, + 94 + ], + "type": "text", + "content": "B ADDITIONAL DISCUSSIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "content": "Connection to the conventional wisdom that the diffusion term matters more. As mentioned in Section 3.1, it is believed in the literature is that a large diffusion term in the conventional SDE leads to good generalization. One may think that the diffusion term in the Slow SDE corresponds to that in the conventional SDE, and thus enlarging the diffusion term rather than the drift term should lead to better generalization. However, we note that both the diffusion and drift terms in the Slow SDEs result from the long-term effects of the diffusion term in the conventional SDE (Slow SDEs become stationary if " + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\Sigma = 0" + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "content": "). This means our view characterizes the role of gradient noise in more detail, and therefore, goes one step further on the conventional wisdom." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "type": "text", + "content": "Slow SDEs for neural nets with modern training techniques. In modern neural net training, it is common to add normalization layers and weight decay (" + }, + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "type": "inline_equation", + "content": "L^2" + }, + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "type": "text", + "content": "-regularization) for better optimization and generalization. However, these techniques lead to violations of our assumptions, e.g., no fixed point exists in the regularized loss (Li et al., 2020; Ahn et al., 2022). Still, a minimizer manifold can be expected to exist for the unregularized loss. Li et al. (2022) noted that the drift and diffusion around the manifold proceeds faster in this case, and derived a Slow SDE for SGD that captures " + }, + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "type": "inline_equation", + "content": "\\mathcal{O}\\left(\\frac{1}{\\eta} \\log \\frac{1}{\\eta}\\right)" + }, + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "type": "text", + "content": " discrete steps instead of " + }, + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "type": "inline_equation", + "content": "\\mathcal{O}\\left(\\frac{1}{\\eta^2}\\right)" + }, + { + "bbox": [ + 104, + 199, + 506, + 291 + ], + "type": "text", + "content": ". We believe that our analysis can also be extended to this case, and that adding local steps still results in the effect of strengthening the drift term." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 470, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 470, + 109 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 470, + 109 + ], + "type": "text", + "content": "C IMPLEMENTATION DETAILS OF PARALLEL SGD, LOCAL SGD AND POST-LOCAL SGD" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 120, + 504, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 120, + 504, + 175 + ], + "spans": [ + { + "bbox": [ + 104, + 120, + 504, + 175 + ], + "type": "text", + "content": "In this section, we present the formal procedures for Parallel SGD, Local SGD and Post-local SGD. Given a training dataset and a data augmentation function, Algorithms 1 and 2 show the implementations of distributed samplers for sampling local batches with and without replacement. Then Algorithms 3 to 5 show the implementations of parallel SGD, Local SGD and Post-local SGD that can run with either of the samplers." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 180, + 506, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 180, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 180, + 506, + 237 + ], + "type": "text", + "content": "Sampling with replacement. Our theory analyzes parallel SGD, Local SGD and Post-local SGD when local batches are sampled with replacement (Algorithm 1). That is, local batches consist of IID samples from the same training distribution " + }, + { + "bbox": [ + 104, + 180, + 506, + 237 + ], + "type": "inline_equation", + "content": "\\hat{D}" + }, + { + "bbox": [ + 104, + 180, + 506, + 237 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 180, + 506, + 237 + ], + "type": "inline_equation", + "content": "\\hat{D}" + }, + { + "bbox": [ + 104, + 180, + 506, + 237 + ], + "type": "text", + "content": " serves as an abstraction of the distribution of an augmented sample drawn from the training dataset. The mathematical formulations are given in Section 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 243, + 506, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 354 + ], + "type": "text", + "content": "Sampling without replacement. Slightly different from our theory, we use the sampling without replacement (Algorithm 2) in our experiments unless otherwise stated. This sampling scheme is standard in practice: it is used by Goyal et al. (2017) for parallel SGD and by Lin et al. (2020b); Ortiz et al. (2021) for Post-local/Local SGD. This sampling scheme works as follows. At the beginning of every epoch, the whole training dataset is shuffled and evenly partitioned into " + }, + { + "bbox": [ + 104, + 243, + 506, + 354 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 243, + 506, + 354 + ], + "type": "text", + "content": " shards. Each worker takes one shard and samples batches without replacement. When all workers pass their own shard, the next epoch begins and the whole dataset is reshuffled. An alternative view is that the workers always share the same dataset. For each epoch, they perform local steps by sampling batches of data without replacement until the dataset contains too few data to form a batch. Then another epoch starts with the dataset reloaded to the initial state." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 358, + 504, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 504, + 403 + ], + "type": "text", + "content": "Discrepancy in Sampling Schemes. We argue that this discrepancy between theory and experiments on sample schemes is minor. Though sampling without replacement is standard in practice, most previous works, e.g., Wang & Joshi (2019); Li et al. (2021a); Zhang et al. (2020), analyze sampling with replacement for technical simplicity and yields meaningful results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 408, + 506, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 408, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 408, + 506, + 453 + ], + "type": "text", + "content": "Moreover, even if we change the sampling scheme to with replacement, Local SGD can still improve the generalization of SGD (by merely adding local steps). See Appendix F for the experiments. We believe that the reasons for better generalization of Local SGD with either sampling scheme are similar and leave the analysis for sampling without replacement for future work." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 97, + 100, + 489, + 195 + ], + "blocks": [ + { + "bbox": [ + 105, + 84, + 428, + 98 + ], + "lines": [ + { + "bbox": [ + 105, + 84, + 428, + 98 + ], + "spans": [ + { + "bbox": [ + 105, + 84, + 428, + 98 + ], + "type": "text", + "content": "Algorithm 1: Distributed Sampler on " + }, + { + "bbox": [ + 105, + 84, + 428, + 98 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 105, + 84, + 428, + 98 + ], + "type": "text", + "content": " Workers (Sampling with Replacement)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "lines": [ + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "spans": [ + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": "Require: shared training dataset " + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": " data augmentation function " + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(\\hat{\\xi})" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": " \nHyperparameters: local batch size " + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": " \nFunction Sample () on worker k: Draw " + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": " IID samples " + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "inline_equation", + "content": "\\hat{\\xi}_1,\\dots ,\\hat{\\xi}_{B_{\\mathrm{loc}}}" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": " with replacement; " + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "inline_equation", + "content": "\\xi_b\\gets \\mathcal{A}(\\hat{\\xi}_b)" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "inline_equation", + "content": "1\\leq b\\leq B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 100, + 489, + 195 + ], + "type": "text", + "content": " // apply data augmentation" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 97, + 349, + 489, + 608 + ], + "blocks": [ + { + "bbox": [ + 105, + 334, + 441, + 346 + ], + "lines": [ + { + "bbox": [ + 105, + 334, + 441, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 441, + 346 + ], + "type": "text", + "content": "Algorithm 2: Distributed Sampler on " + }, + { + "bbox": [ + 105, + 334, + 441, + 346 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 105, + 334, + 441, + 346 + ], + "type": "text", + "content": " Workers (Sampling without Replacement)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "lines": [ + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "spans": [ + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": "Require: shared training dataset " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " data augmentation function " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(\\hat{\\xi})" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " \nHyperparameters: local batch size " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " \nConstant: " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "N_{\\mathrm{loc}}\\coloneqq \\left\\lfloor \\frac{|D|}{KB_{\\mathrm{loc}}}\\right\\rfloor" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " // number of local batches per worker per epoch \nLocal Variables: " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "c^{(k)}\\gets N_{\\mathrm{loc}}B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " for worker k // number of samples drawn in this epoch \nFunction Sample () on worker k: \nif " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "c^{(k)} = N_{\\mathrm{loc}}B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " then // Now start a new epoch Wait until all the other workers reach this line; // synchronize Draw a random permutation " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " of 1,..., " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "|D|" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " jointly with other workers so that the same permutation is shared among all workers; // reshuffle the dataset " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "Q_{j}^{(k)}\\gets P_{(k - 1)N_{\\mathrm{loc}}B_{\\mathrm{loc}} + j}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "1\\leq j\\leq N_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " // partition the dataset " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "c^{(k)}\\gets 0" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " end \nfor " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "i = 1,\\dots ,B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " do " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "\\hat{\\xi}_i\\gets \\hat{\\xi}_i" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " the " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "Q_{c^{(k)} + i}^{(k)}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " th data point of " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " // sample without replacement " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "\\xi_i\\gets \\mathcal{A}(\\hat{\\xi}_i)" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " // apply data augmentation \nend \n" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "c^{(k)}\\gets c^{(k)} + B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " . \nreturn " + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "inline_equation", + "content": "(\\xi_1,\\ldots ,\\xi_{B_{\\mathrm{loc}}})" + }, + { + "bbox": [ + 97, + 349, + 489, + 608 + ], + "type": "text", + "content": " .." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "algorithm" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 97, + 99, + 492, + 230 + ], + "blocks": [ + { + "bbox": [ + 105, + 84, + 279, + 97 + ], + "lines": [ + { + "bbox": [ + 105, + 84, + 279, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 84, + 279, + 97 + ], + "type": "text", + "content": "Algorithm 3: Parallel SGD on " + }, + { + "bbox": [ + 105, + 84, + 279, + 97 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 105, + 84, + 279, + 97 + ], + "type": "text", + "content": " Workers" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "lines": [ + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "spans": [ + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": "Input: loss function " + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "\\ell (\\pmb {\\theta};\\xi)" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " , initial parameter " + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{0}" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " \nHyperparameters: total number of iterations " + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " , learning rate " + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " , local batch size " + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " \nfor " + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "t = 0,\\dots ,T - 1" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " do \nfor each worker k do in parallel \n" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "(\\xi_{k,t,1},\\ldots ,\\xi_{k,t,B_{\\mathrm{loc}}})\\gets \\mathrm{Sample}()" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " // sample a local batch \n" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "g_{k,t}\\gets \\frac{1}{B_{\\mathrm{loc}}}\\sum_{i = 1}^{B_{\\mathrm{loc}}}\\nabla \\ell (\\pmb {\\theta}_t;\\xi_{k,t,i})" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " // computing the local gradient \nend \n" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "g_{t}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}g_{k,t}" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " // all-Reduce aggregation of local gradients \n" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{t + 1}\\gets \\pmb{\\theta}_{t} - \\eta_{t}\\pmb{g}_{t}" + }, + { + "bbox": [ + 97, + 99, + 492, + 230 + ], + "type": "text", + "content": " // update the model \nend" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 97, + 282, + 491, + 474 + ], + "blocks": [ + { + "bbox": [ + 106, + 267, + 271, + 280 + ], + "lines": [ + { + "bbox": [ + 106, + 267, + 271, + 280 + ], + "spans": [ + { + "bbox": [ + 106, + 267, + 271, + 280 + ], + "type": "text", + "content": "Algorithm 4: Local SGD on " + }, + { + "bbox": [ + 106, + 267, + 271, + 280 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 106, + 267, + 271, + 280 + ], + "type": "text", + "content": " Workers" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "lines": [ + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "spans": [ + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": "Input: loss function " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "\\ell (\\pmb {\\theta};\\xi)" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " , initial parameter " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(0)}" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " \nHyperparameters: total number of rounds " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " , number of local steps " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " per round \nHyperparameters: learning rate " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " , local batch size " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " \nfor " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "s = 0,\\dots ,R - 1" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " do \nfor each worker k do in parallel " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "\\theta_{k,0}^{(s)}\\gets \\bar{\\theta}^{(0)};" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " // maintain a local copy of the global iterate \nfor " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "t = 0,\\ldots ,H - 1" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " do " + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "(\\xi_{k,t,1}^{(s)},\\dots ,\\xi_{k,t,B_{\\mathrm{loc}}}^{(s)})\\leftarrow \\mathrm{Sample}()" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " // sample a local batch \n" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "g_{k,t}^{(s)}\\leftarrow \\frac{1}{B_{\\mathrm{loc}}}\\sum_{i = 1}^{B_{\\mathrm{loc}}}\\nabla \\ell (\\pmb{\\theta}_{k,t}^{(s)};\\xi_{k,t,i}^{(s)})" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " // computing the local gradient \n" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "\\theta_{k,t + 1}^{(s)}\\gets \\theta_{k,t}^{(s)} - \\eta g_{k,t}^{(s)}" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " // update the local model \nend \nend \n" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s + 1)}\\gets \\frac{1}{K}\\sum_{k = 1}^{K}\\theta_{k,H}^{(s)}" + }, + { + "bbox": [ + 97, + 282, + 491, + 474 + ], + "type": "text", + "content": " // all-Reduce aggregation of local iterates \nend" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 97, + 513, + 455, + 604 + ], + "blocks": [ + { + "bbox": [ + 106, + 499, + 288, + 510 + ], + "lines": [ + { + "bbox": [ + 106, + 499, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 106, + 499, + 288, + 510 + ], + "type": "text", + "content": "Algorithm 5: Post-local SGD on " + }, + { + "bbox": [ + 106, + 499, + 288, + 510 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 106, + 499, + 288, + 510 + ], + "type": "text", + "content": " Workers" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "lines": [ + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "spans": [ + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": "1 Input: loss function " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "\\ell (\\pmb {\\theta};\\xi)" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " , initial parameter " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{0}" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " \n2 Hyperparameters: total number of iterations " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " , learning rate " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " , local batch size " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " \n3 Hyperparameters: switching time point " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " , number of local steps " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " per round \n4 Ensure: " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "T - t_0" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " is a multiple of " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " \n5 Starting from " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{0}" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " , run Parallel SGD for " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " iterations and obtain " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{t_0}" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " . \n6 Starting from " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{t_0}" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " , run Local SGD for " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "\\frac{1}{H} (T - t_0)" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " rounds with " + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 97, + 513, + 455, + 604 + ], + "type": "text", + "content": " local steps per round; \n7 return the final global iterate of Local SGD ;" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "algorithm" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 463, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 463, + 94 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 463, + 94 + ], + "type": "text", + "content": "D MODELING LOCAL SGD WITH MULTIPLE CONVENTIONAL SDES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "text", + "content": "Lin et al. (2020b) tried to informally explain the success of Local SGD by adopting the argument that larger diffusion term in the conventional SDE leads to better generalization (see Section 3.1 and appendix A). Basically, they attempted to write multiple SDEs, each of which describes the " + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "text", + "content": "-step local training process of each worker in each round (from " + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\theta_{k,0}^{(s)}" + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\theta_{k,H}^{(s)}" + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "text", + "content": "). The key difference between each of these SDEs and the SDE for SGD (3) is that the former one has a larger diffusion term because the workers use batch size " + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 105, + 506, + 177 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 206, + 181, + 504, + 207 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 181, + 504, + 207 + ], + "spans": [ + { + "bbox": [ + 206, + 181, + 504, + 207 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {X} (t) = - \\nabla \\mathcal {L} (\\boldsymbol {X}) \\mathrm {d} t + \\sqrt {\\frac {\\eta}{B _ {\\mathrm {l o c}}}} \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {X}) \\mathrm {d} \\boldsymbol {W} _ {t}. \\tag {10}", + "image_path": "92f35ffab084661c5d7c6fbd6c38e4c74980575db8f837feb337d46d90d1bc65.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 211, + 504, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 211, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 211, + 504, + 257 + ], + "type": "text", + "content": "Lin et al. (2020b) then argue that the total amount of \"noise\" in the training dynamics of Local SGD is larger than that of SGD. However, it is hard to see whether it is indeed larger, since the model averaging step at the end of each round can reduce the variance in training and may cancel the effect of having larger diffusion terms." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": "More formally, a complete modeling of Local SGD following this idea should view the sequence of global iterates " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "\\{\\bar{\\theta}^{(s)}\\}" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": " as a Markov process " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "\\{X^{(s)}\\}" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_X(x,B,t)" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": " the distribution of " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "X(t)" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": " in (3) with initial condition " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "X(0) = x" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": ". Then the Markov transition should be " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "X^{(s + 1)} = \\frac{1}{K}\\sum_{k = 1}^{K}X_{k,H}^{(s)}" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "X_{1,H}^{(s)},\\ldots ,X_{K,H}^{(s)}" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": " independent samples from " + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_X(X^{(s)},B_{\\mathrm{loc}},H\\eta)" + }, + { + "bbox": [ + 104, + 261, + 505, + 327 + ], + "type": "text", + "content": ", i.e., sampling from (10)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": "Consider one round of model averaging. It is true that " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B_{\\mathrm{loc}}, H\\eta)" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": " may have a larger variance than the corresponding SGD baseline " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": " because the former one has a smaller batch size. However, it is unclear whether " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{(s + 1)}" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": " also has a larger variance than " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": ". This is because " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{(s + 1)}" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": " is the average of " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": " samples, which means we have to compare " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\frac{1}{K}" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": " times the variance of " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B_{\\mathrm{loc}}, H\\eta)" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": " with the variance of " + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathbf{X}}(\\mathbf{X}^{(s)}, B, H\\eta)" + }, + { + "bbox": [ + 104, + 331, + 504, + 406 + ], + "type": "text", + "content": ". Then it is unclear which one is larger." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 411, + 504, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 434 + ], + "type": "text", + "content": "In the special case where " + }, + { + "bbox": [ + 104, + 411, + 504, + 434 + ], + "type": "inline_equation", + "content": "H\\eta" + }, + { + "bbox": [ + 104, + 411, + 504, + 434 + ], + "type": "text", + "content": " is small, " + }, + { + "bbox": [ + 104, + 411, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_X(X^{(s)},B_{\\mathrm{loc}},H\\eta)" + }, + { + "bbox": [ + 104, + 411, + 504, + 434 + ], + "type": "text", + "content": " is approximately equal to the following Gaussian distribution:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 212, + 437, + 504, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 437, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 212, + 437, + 504, + 464 + ], + "type": "interline_equation", + "content": "\\mathcal {N} \\left(\\boldsymbol {X} ^ {(s)} - \\eta H \\nabla \\mathcal {L} \\left(\\boldsymbol {X} ^ {(s)}\\right), \\frac {\\eta^ {2} H}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} \\left(\\boldsymbol {X} ^ {(s)}\\right)\\right) \\tag {11}", + "image_path": "a9a4c1b8c8e9c5adc4b6b336cefefde5ee9f40c6064e60a6a2e2b7b1e68d4d2d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 468, + 261, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 261, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 261, + 480 + ], + "type": "text", + "content": "Then averaging over " + }, + { + "bbox": [ + 105, + 468, + 261, + 480 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 105, + 468, + 261, + 480 + ], + "type": "text", + "content": " samples gives" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 210, + 485, + 504, + 513 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 485, + 504, + 513 + ], + "spans": [ + { + "bbox": [ + 210, + 485, + 504, + 513 + ], + "type": "interline_equation", + "content": "\\mathcal {N} \\left(\\boldsymbol {X} ^ {(s)} - \\eta H \\nabla \\mathcal {L} \\left(\\boldsymbol {X} ^ {(s)}\\right), \\frac {\\eta^ {2} H}{B} \\boldsymbol {\\Sigma} \\left(\\boldsymbol {X} ^ {(s)}\\right)\\right), \\tag {12}", + "image_path": "f3e89d434abbd1f78829aec478fc3ae6bc6aa21e3b15bc082be95dc47aead048.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 516, + 504, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 504, + 550 + ], + "type": "text", + "content": "which is exactly the same as the Gaussian approximation of the SGD baseline. This means there do exist certain cases where Lin et al. (2020b)'s argument does not give a good separation between Local SGD and SGD." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 555, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 578 + ], + "type": "text", + "content": "Moreover, we do not gain any further insights from this modeling since it is hard to see how model averaging interacts with the SDEs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 594, + 392, + 607 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 392, + 607 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 392, + 607 + ], + "type": "text", + "content": "E ADDITIONAL INTERPRETATION OF THE SLOW SDES" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 618, + 388, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 618, + 388, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 388, + 629 + ], + "type": "text", + "content": "E.1 UNDERSTANDING THE DIFFUSION TERM IN THE SLOW SDE" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 639, + 504, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 696 + ], + "type": "text", + "content": "So far, we have discussed why adding local steps enlarges the drift term in the Slow SDE and why enlarging the drift term can benefit generalization. Besides this, here we remark that another way to accelerate the corresponding semi-gradient method for minimizing the implicit regularizer is to reduce the diffusion term, so that the trajectory more closely follows the drift term. More formally, we propose the following:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": "Hypothesis E.1. Starting at a minimizer " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\zeta_0\\in \\Gamma" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": ", run " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "(\\kappa_{1},\\kappa_{2})" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": "-Slow SDE and " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "(\\kappa_{1},\\kappa_{2}^{\\prime})" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": "-Slow SDE respectively for the same amount of time " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "T > 0" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": " and obtain " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\zeta (T),\\zeta '(T)" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{\\parallel}\\neq \\mathbf{0}" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\kappa_{1} < \\kappa_{1}^{\\prime}" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": " then the expected test accuracy at " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\zeta (T)" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": " is better than that at " + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\zeta^{\\prime}(T)" + }, + { + "bbox": [ + 104, + 697, + 504, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 140, + 53, + 294, + 148 + ], + "blocks": [ + { + "bbox": [ + 140, + 53, + 294, + 148 + ], + "lines": [ + { + "bbox": [ + 140, + 53, + 294, + 148 + ], + "spans": [ + { + "bbox": [ + 140, + 53, + 294, + 148 + ], + "type": "image", + "image_path": "e0c0b984628c8eae8c0add91a8226ad06ea92b62a4d4e5aef542e842730b3d1d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 148, + 154, + 284, + 165 + ], + "lines": [ + { + "bbox": [ + 148, + 154, + 284, + 165 + ], + "spans": [ + { + "bbox": [ + 148, + 154, + 284, + 165 + ], + "type": "text", + "content": "(a) CIFAR-10, " + }, + { + "bbox": [ + 148, + 154, + 284, + 165 + ], + "type": "inline_equation", + "content": "H = 600" + }, + { + "bbox": [ + 148, + 154, + 284, + 165 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 148, + 154, + 284, + 165 + ], + "type": "inline_equation", + "content": "K > 1" + }, + { + "bbox": [ + 148, + 154, + 284, + 165 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 182, + 720, + 255, + 729 + ], + "lines": [ + { + "bbox": [ + 182, + 720, + 255, + 729 + ], + "spans": [ + { + "bbox": [ + 182, + 720, + 255, + 729 + ], + "type": "text", + "content": "(a) diffusion (unchanged)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 271, + 718, + 336, + 727 + ], + "lines": [ + { + "bbox": [ + 271, + 718, + 336, + 727 + ], + "spans": [ + { + "bbox": [ + 271, + 718, + 336, + 727 + ], + "type": "text", + "content": "(b) drift-I (unchanged)" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 312, + 53, + 468, + 148 + ], + "blocks": [ + { + "bbox": [ + 312, + 53, + 468, + 148 + ], + "lines": [ + { + "bbox": [ + 312, + 53, + 468, + 148 + ], + "spans": [ + { + "bbox": [ + 312, + 53, + 468, + 148 + ], + "type": "image", + "image_path": "8ff4d6cbc4e8c06b0fb2a0179d56166cdb3698769edde39d3752e98467c422f8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 326, + 154, + 454, + 165 + ], + "lines": [ + { + "bbox": [ + 326, + 154, + 454, + 165 + ], + "spans": [ + { + "bbox": [ + 326, + 154, + 454, + 165 + ], + "type": "text", + "content": "(b) ImageNet, " + }, + { + "bbox": [ + 326, + 154, + 454, + 165 + ], + "type": "inline_equation", + "content": "H = 78" + }, + { + "bbox": [ + 326, + 154, + 454, + 165 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 326, + 154, + 454, + 165 + ], + "type": "inline_equation", + "content": "K > 1" + }, + { + "bbox": [ + 326, + 154, + 454, + 165 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "lines": [ + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "type": "text", + "content": "Figure 3: Reducing the diffusion term of the Slow SDE for Local SGD leads to better generalization. Test accuracy improves as we increase " + }, + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "type": "text", + "content": " with fixed " + }, + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 170, + 504, + 205 + ], + "type": "text", + "content": " to reduce the diffusion term while keeping the drift term untouched. See Appendix M.4 for details." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 374, + 718, + 434, + 727 + ], + "lines": [ + { + "bbox": [ + 374, + 718, + 434, + 727 + ], + "spans": [ + { + "bbox": [ + 374, + 718, + 434, + 727 + ], + "type": "text", + "content": "(c) drift-II (rescaled)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "text", + "content": "Here we exclude the case of " + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}_{\\parallel} \\equiv \\mathbf{0}" + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "text", + "content": " because in this case the diffusion term in the Slow SDE is always zero. To verify Hypothesis E.1, we set the product " + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "inline_equation", + "content": "\\alpha \\coloneqq \\eta H" + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "text", + "content": " large, keep " + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "inline_equation", + "content": "H, \\eta" + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "text", + "content": " fixed, increase the number of workers " + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "text", + "content": ", and compare the generalization performances after a fixed amount of training steps (but after different numbers of epochs). This case corresponds to the " + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "inline_equation", + "content": "(\\frac{1}{KB_{\\mathrm{loc}}}, \\frac{1}{2B_{\\mathrm{loc}}})" + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "text", + "content": "-Slow SDE, so adding more workers should reduce the diffusion term. As shown in Figure 3, a higher test accuracy is indeed achieved for larger " + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 216, + 504, + 286 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 289, + 504, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 504, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 504, + 368 + ], + "type": "text", + "content": "**Implication:** Enlarging the learning rate is not equally effective as adding local steps. Given that Local SGD improves generalization by strengthening the drift term, it is natural to wonder if enlarging the learning rate of SGD would also lead to similar improvements. While it is true that enlarging the learning rate effectively increases the drift term, it also increases the diffusion term simultaneously, which can hinder the implicit regularization by Hypothesis E.1. In contrast, adding local steps does not change the diffusion term. As shown in Figure 6(a), even when the learning rate of SGD is increased, SGD still underperforms Local SGD by about " + }, + { + "bbox": [ + 104, + 289, + 504, + 368 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 104, + 289, + 504, + 368 + ], + "type": "text", + "content": " in test accuracy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 372, + 504, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 504, + 431 + ], + "type": "text", + "content": "On the other hand, in the special case of where " + }, + { + "bbox": [ + 104, + 372, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}_{\\parallel} \\equiv \\mathbf{0}" + }, + { + "bbox": [ + 104, + 372, + 504, + 431 + ], + "type": "text", + "content": ", Hypothesis E.1 does not hold, and enlarging the learning rate by " + }, + { + "bbox": [ + 104, + 372, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\sqrt{K}" + }, + { + "bbox": [ + 104, + 372, + 504, + 431 + ], + "type": "text", + "content": " results in the same Slow SDE as adding local steps (see Appendix G for derivation). Then these two actions should produce the same generalization improvement, unless the learning rate is so large that Slow SDE loses track of the training dynamics. As an example of such a special case, an experiment with label noise regularization is presented in Figure 8." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 444, + 388, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 444, + 388, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 388, + 455 + ], + "type": "text", + "content": "E.2 THE EFFECT OF GLOBAL BATCH SIZE ON GENERALIZATION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "type": "text", + "content": "In this section, we discuss the effect of global batch size on the generalization of Local SGD. Given that the computation power of a single worker is limited, we consider the case where the local batch size " + }, + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "type": "text", + "content": " is fixed and the global batch size " + }, + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "type": "inline_equation", + "content": "B = KB_{\\mathrm{loc}}" + }, + { + "bbox": [ + 104, + 464, + 504, + 521 + ], + "type": "text", + "content": " is tuned by adding or removing the workers. This scenario is relevant to the practice because one may want to know the maximum parallelism possible to train the neural net without causing generalization degradation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "type": "text", + "content": "For SGD, previous works have proposed the Linear Scaling Rule (LSR) (Krizhevsky, 2014; Goyal et al., 2017; Jastrzebski et al., 2017): scaling the learning rate " + }, + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "type": "inline_equation", + "content": "\\eta \\mapsto \\kappa \\eta" + }, + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "type": "text", + "content": " linearly with the global batch size " + }, + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "type": "inline_equation", + "content": "B \\mapsto \\kappa B" + }, + { + "bbox": [ + 104, + 525, + 504, + 571 + ], + "type": "text", + "content": " yields the same conventional SDE (3) under a constant epoch budget, hence leading to almost the same generalization performance as long as the SDE approximation does not fail." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 574, + 504, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 504, + 653 + ], + "type": "text", + "content": "We show in Theorem H.1 that the LSR does not change the Slow SDE of SGD either. Experiments in Figure 4 show that the LSR indeed holds nicely when we continue training with small learning rates from the same CIFAR-10 and ImageNet checkpoints as in Figure 2. Here we choose " + }, + { + "bbox": [ + 104, + 574, + 504, + 653 + ], + "type": "inline_equation", + "content": "K = 16" + }, + { + "bbox": [ + 104, + 574, + 504, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 574, + 504, + 653 + ], + "type": "inline_equation", + "content": "K = 256" + }, + { + "bbox": [ + 104, + 574, + 504, + 653 + ], + "type": "text", + "content": " as the base settings for CIFAR-10 and ImageNet, respectively, and then tune the learning rate to maximize the test accuracy. As shown in Figures 4(a) and 4(b), the optimal learning rate turns out to be small enough that the LSR can be applied to scale the global batch size with only a minor change in test accuracy." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "text", + "content": "Now, assuming the learning rate is scaled as LSR, we study how to tune the number of local steps " + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "text", + "content": " for Local SGD for better generalization. A natural choice is to tune " + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "text", + "content": " in the base settings and keep " + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "text", + "content": " unchanged via scaling " + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "inline_equation", + "content": "H \\mapsto H / \\kappa" + }, + { + "bbox": [ + 104, + 658, + 504, + 692 + ], + "type": "text", + "content": ". Then the following SDE can be derived (see Theorem H.2):" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 126, + 695, + 504, + 718 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 695, + 504, + 718 + ], + "spans": [ + { + "bbox": [ + 126, + 695, + 504, + 718 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {一}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {一}} \\underbrace {- \\frac {\\kappa K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t}\\right). \\tag {13}", + "image_path": "f3f61440f523701e7db9bb05bd39889cfdb2b9d74b5ab8cbe85b47d6f39d1af1.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 145, + 53, + 299, + 148 + ], + "blocks": [ + { + "bbox": [ + 145, + 53, + 299, + 148 + ], + "lines": [ + { + "bbox": [ + 145, + 53, + 299, + 148 + ], + "spans": [ + { + "bbox": [ + 145, + 53, + 299, + 148 + ], + "type": "image", + "image_path": "3676ff827c60f1a31442e006bbe0b8dc2e7b554d1a323506edaa5e06d9e514d2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 154, + 279, + 165 + ], + "lines": [ + { + "bbox": [ + 164, + 154, + 279, + 165 + ], + "spans": [ + { + "bbox": [ + 164, + 154, + 279, + 165 + ], + "type": "text", + "content": "(a) CIFAR-10, start from #250." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 52, + 465, + 148 + ], + "blocks": [ + { + "bbox": [ + 310, + 52, + 465, + 148 + ], + "lines": [ + { + "bbox": [ + 310, + 52, + 465, + 148 + ], + "spans": [ + { + "bbox": [ + 310, + 52, + 465, + 148 + ], + "type": "image", + "image_path": "a1e59652e5f629d271daf5ab8ac58410d3e6f780b4b78d3f02453fe297cacbb9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 154, + 444, + 165 + ], + "lines": [ + { + "bbox": [ + 331, + 154, + 444, + 165 + ], + "spans": [ + { + "bbox": [ + 331, + 154, + 444, + 165 + ], + "type": "text", + "content": "(b) ImageNet, start from #100." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 143, + 170, + 298, + 265 + ], + "blocks": [ + { + "bbox": [ + 143, + 170, + 298, + 265 + ], + "lines": [ + { + "bbox": [ + 143, + 170, + 298, + 265 + ], + "spans": [ + { + "bbox": [ + 143, + 170, + 298, + 265 + ], + "type": "image", + "image_path": "16d0dcb2cbba12d9c0d81b5872aaf76706eeae69248c986653a788fee20e9ca3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 163, + 271, + 279, + 282 + ], + "lines": [ + { + "bbox": [ + 163, + 271, + 279, + 282 + ], + "spans": [ + { + "bbox": [ + 163, + 271, + 279, + 282 + ], + "type": "text", + "content": "(c) CIFAR-10, start from #250." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 170, + 463, + 266 + ], + "blocks": [ + { + "bbox": [ + 310, + 170, + 463, + 266 + ], + "lines": [ + { + "bbox": [ + 310, + 170, + 463, + 266 + ], + "spans": [ + { + "bbox": [ + 310, + 170, + 463, + 266 + ], + "type": "image", + "image_path": "d3af3481e36bca72561c65e4fdd5cf3f1fcc7663812a8c6632e27435a132cad4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 271, + 443, + 282 + ], + "lines": [ + { + "bbox": [ + 329, + 271, + 443, + 282 + ], + "spans": [ + { + "bbox": [ + 329, + 271, + 443, + 282 + ], + "type": "text", + "content": "(d) ImageNet, start from #100." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "lines": [ + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": "Figure 4: For training from CIFAR-10 and ImageNet checkpoints, Local SGD consistently outperforms SGD " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "(H = 1)" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " across different batch sizes " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " (fixing " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " and varying " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": "), where the learning rate is scaled by the LSR " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\eta \\propto B" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": ". Two possible ways of tuning the number of local steps " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " are considered: (1). Tune " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " for the best test accuracy for " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "K = 16" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "K = 256" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " respectively on CIFAR-10 and ImageNet, then scale " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "H \\propto 1 / B" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\alpha \\coloneqq \\eta H" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " is constant; (2). Tune " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": " specifically for each " + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 288, + 504, + 355 + ], + "type": "text", + "content": ". See Appendix M.5 for training details." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "type": "text", + "content": "Compared with (4), the drift-II term here is rescaled by a positive factor. Again, when " + }, + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "type": "text", + "content": " is large, we can follow the argument in Section 3.3.2 to approximate " + }, + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\widehat{\\Psi} (\\zeta)\\approx \\widehat{\\Sigma}_{\\diamond}(\\zeta)" + }, + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "type": "text", + "content": " and obtain the following " + }, + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "type": "inline_equation", + "content": "(\\frac{1}{B},\\frac{\\kappa K}{B})" + }, + { + "bbox": [ + 104, + 373, + 504, + 411 + ], + "type": "text", + "content": "-Slow SDE:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 182, + 422, + 504, + 442 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 422, + 504, + 442 + ], + "spans": [ + { + "bbox": [ + 182, + 422, + 504, + 442 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} (t) - \\frac {\\kappa K}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t\\right). \\tag {14}", + "image_path": "e1ba7dbcd85d3248fca8de005df07fc96f5beb20b6c8a86b289dcecdd186d9f7.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 451, + 504, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 504, + 496 + ], + "type": "text", + "content": "The drift term of the above SDE is always stronger than SGD (7), as long as there exists more than one worker after the scaling (i.e., " + }, + { + "bbox": [ + 104, + 451, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\kappa K > 1" + }, + { + "bbox": [ + 104, + 451, + 504, + 496 + ], + "type": "text", + "content": "). As expected from Hypothesis 3.1, we observed in the experiments that the generalization performance of Local SGD is always better than or at least comparable to SGD across different batch sizes (see Figures 4(a) and 4(b))." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "text", + "content": "Taking a closer look into the drift term in the Slow SDE (14), we can find that it scales linearly with " + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "text", + "content": ". According to Hypothesis 3.1, the SDE is expected to generalize better when adding more workers (" + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\kappa > 1" + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "text", + "content": ") and to generalize worse when removing some workers (" + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\kappa < 1" + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "text", + "content": "). For the latter case, we indeed observed that the test accuracy of Local SGD drops when removing workers. For the case of adding workers, however, we also need to take into account that the LSR specifies a larger learning rate and causes a larger SDE approximation error for the same " + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 500, + 504, + 590 + ], + "type": "text", + "content": ", which may cancel the generalization improvement brought by strengthening the drift term. In the experiments, we observed that the test accuracy does not rise when adding more workers to the base settings." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "content": " also controls the regularization strength (Section 3.3.3), it would be beneficial to decrease " + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "content": " for large batch size so as to better trade-off between regularization strength and approximation quality. In Figures 4(c) and 4(d), we plot the optimal value of " + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "content": " for each batch size, and we indeed observed that the optimal " + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "content": " drops as we scale up " + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "content": ". Conversely, a smaller batch size (and hence a smaller learning rate) allows for using a larger " + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "content": " to enhance regularization while still keeping a low approximation error (Theorem 3.3). The test accuracy curves in Figures 4(a) and 4(b) indeed show that setting a larger " + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 594, + 506, + 673 + ], + "type": "text", + "content": " can compensate for the accuracy drop when reducing the batch size." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 693, + 327, + 704 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 693, + 327, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 693, + 327, + 704 + ], + "type": "text", + "content": "F ADDITIONAL EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 720, + 452, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 720, + 452, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 720, + 452, + 733 + ], + "type": "text", + "content": "In this section, we present additional experimental results to further verify our finding." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "Supplementary Plot: Training time should be long enough. Figures 5(a) and 5(b) show enlarged views for Figures 2(a) and 2(c) respectively, showing that Local SGD can generalize worse than SGD in the first few epochs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 504, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 504, + 177 + ], + "type": "text", + "content": "Supplementary Plot: Learning rate should be small. Figure 5(c) shows that reducing the learning rate from 0.32 to 0.064 does not lead to test accuracy drop for Local SGD on CIFAR-10, if the training time is allowed to be longer and the number of local steps " + }, + { + "bbox": [ + 104, + 121, + 504, + 177 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 121, + 504, + 177 + ], + "type": "text", + "content": " is set properly. Figure 5(d) presents the case where, with a large learning rate, the generalization improvement of Local SGD disappears even starting from a pre-trained model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 239 + ], + "type": "text", + "content": "Supplementary Plot: Reconciling our main finding with Ortiz et al. (2021). In Figure 5(e), the generalization benefit of Local SGD with " + }, + { + "bbox": [ + 104, + 182, + 504, + 239 + ], + "type": "inline_equation", + "content": "H = 24" + }, + { + "bbox": [ + 104, + 182, + 504, + 239 + ], + "type": "text", + "content": " becomes less significant after the learning rate decay at epoch 226, which is consistent with the observation by Ortiz et al. (2021) that the generalization benefit of Local SGD usually disappears after the learning rate decay. But we can preserve the improvement by increasing " + }, + { + "bbox": [ + 104, + 182, + 504, + 239 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 182, + 504, + 239 + ], + "type": "text", + "content": " to 900. Here, we use Local SGD with momentum." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "content": "Supplementary Plot: Optimal " + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "content": " gets larger for smaller " + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "content": ". In Figure 5(f), we summarize the optimal " + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "inline_equation", + "content": "\\alpha := \\eta H" + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "content": " that enables the highest test accuracy for each learning rate in Figure 2(f). We can see that the optimal " + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "content": " increases as we decrease the learning rate. The reason is that the approximation error bound " + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})" + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "content": " in Theorem 3.3 decreases with " + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "content": ", allowing for a larger value of " + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 243, + 504, + 306 + ], + "type": "text", + "content": " to better regularize the model." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 110, + 322, + 227, + 395 + ], + "blocks": [ + { + "bbox": [ + 110, + 322, + 227, + 395 + ], + "lines": [ + { + "bbox": [ + 110, + 322, + 227, + 395 + ], + "spans": [ + { + "bbox": [ + 110, + 322, + 227, + 395 + ], + "type": "image", + "image_path": "4fe5d7371a7100bcba8d8ff41d407fafc7f8d0c675a2f7d7c27b6914f412512b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 106, + 399, + 231, + 411 + ], + "lines": [ + { + "bbox": [ + 106, + 399, + 231, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 399, + 231, + 411 + ], + "type": "text", + "content": "(a) CIFAR-10, start from random." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 246, + 322, + 362, + 395 + ], + "blocks": [ + { + "bbox": [ + 246, + 322, + 362, + 395 + ], + "lines": [ + { + "bbox": [ + 246, + 322, + 362, + 395 + ], + "spans": [ + { + "bbox": [ + 246, + 322, + 362, + 395 + ], + "type": "image", + "image_path": "caa2fbfba3a461f8cfac378f2bf2bfdc47d2fa72a2416418e2b784b3ab4c599e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 248, + 399, + 362, + 411 + ], + "lines": [ + { + "bbox": [ + 248, + 399, + 362, + 411 + ], + "spans": [ + { + "bbox": [ + 248, + 399, + 362, + 411 + ], + "type": "text", + "content": "(b) ImageNet, start from #250." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 383, + 323, + 498, + 395 + ], + "blocks": [ + { + "bbox": [ + 383, + 323, + 498, + 395 + ], + "lines": [ + { + "bbox": [ + 383, + 323, + 498, + 395 + ], + "spans": [ + { + "bbox": [ + 383, + 323, + 498, + 395 + ], + "type": "image", + "image_path": "d0d67ab8cec46763de39bf7c882d39a6c278122af06efbe7c944353a4661de2e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 383, + 399, + 498, + 411 + ], + "lines": [ + { + "bbox": [ + 383, + 399, + 498, + 411 + ], + "spans": [ + { + "bbox": [ + 383, + 399, + 498, + 411 + ], + "type": "text", + "content": "(c) CIFAR-10, start from #100." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 110, + 422, + 227, + 495 + ], + "blocks": [ + { + "bbox": [ + 110, + 422, + 227, + 495 + ], + "lines": [ + { + "bbox": [ + 110, + 422, + 227, + 495 + ], + "spans": [ + { + "bbox": [ + 110, + 422, + 227, + 495 + ], + "type": "image", + "image_path": "1f251751866deb3e4fbe1cf8e012190ad7fb7e1404930e0e6f5b0b64dffd3e77.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 112, + 499, + 226, + 510 + ], + "lines": [ + { + "bbox": [ + 112, + 499, + 226, + 510 + ], + "spans": [ + { + "bbox": [ + 112, + 499, + 226, + 510 + ], + "type": "text", + "content": "(d) ImageNet, start from #100." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 247, + 422, + 362, + 495 + ], + "blocks": [ + { + "bbox": [ + 247, + 422, + 362, + 495 + ], + "lines": [ + { + "bbox": [ + 247, + 422, + 362, + 495 + ], + "spans": [ + { + "bbox": [ + 247, + 422, + 362, + 495 + ], + "type": "image", + "image_path": "63f25ecc16e69a31d38a161ee74e36cd48754561db452ccd63c53c38cd528ed1.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 247, + 499, + 362, + 510 + ], + "lines": [ + { + "bbox": [ + 247, + 499, + 362, + 510 + ], + "spans": [ + { + "bbox": [ + 247, + 499, + 362, + 510 + ], + "type": "text", + "content": "(e) CIFAR-10, start from #150." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 383, + 422, + 498, + 495 + ], + "blocks": [ + { + "bbox": [ + 383, + 422, + 498, + 495 + ], + "lines": [ + { + "bbox": [ + 383, + 422, + 498, + 495 + ], + "spans": [ + { + "bbox": [ + 383, + 422, + 498, + 495 + ], + "type": "image", + "image_path": "80b925b82ab5d697c33f6b926a904856dea02e9328d7cdb3ba25d81786e80019.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 499, + 497, + 510 + ], + "lines": [ + { + "bbox": [ + 384, + 499, + 497, + 510 + ], + "spans": [ + { + "bbox": [ + 384, + 499, + 497, + 510 + ], + "type": "text", + "content": "(f) ImageNet, optimal " + }, + { + "bbox": [ + 384, + 499, + 497, + 510 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 384, + 499, + 497, + 510 + ], + "type": "text", + "content": " v.s. " + }, + { + "bbox": [ + 384, + 499, + 497, + 510 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 384, + 499, + 497, + 510 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 111, + 563, + 227, + 635 + ], + "blocks": [ + { + "bbox": [ + 104, + 527, + 504, + 551 + ], + "lines": [ + { + "bbox": [ + 104, + 527, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 504, + 551 + ], + "type": "text", + "content": "Figure 5: Additional experimental results about the effect of the learning rate, training time and the number of local steps. See Appendix M.2 for details." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 111, + 563, + 227, + 635 + ], + "lines": [ + { + "bbox": [ + 111, + 563, + 227, + 635 + ], + "spans": [ + { + "bbox": [ + 111, + 563, + 227, + 635 + ], + "type": "image", + "image_path": "0e19c6e0ffe575769e7933dcb5dda9fdc3428f58151baedbe2d2e1b3a250b1ed.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 639, + 214, + 651 + ], + "lines": [ + { + "bbox": [ + 124, + 639, + 214, + 651 + ], + "spans": [ + { + "bbox": [ + 124, + 639, + 214, + 651 + ], + "type": "text", + "content": "(a) SGD with various " + }, + { + "bbox": [ + 124, + 639, + 214, + 651 + ], + "type": "inline_equation", + "content": "\\eta" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 128, + 663, + 481, + 675 + ], + "lines": [ + { + "bbox": [ + 128, + 663, + 481, + 675 + ], + "spans": [ + { + "bbox": [ + 128, + 663, + 481, + 675 + ], + "type": "text", + "content": "Figure 6: Additional experimental results on CIFAR-10. See Appendix M.3 for details." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 247, + 563, + 362, + 635 + ], + "blocks": [ + { + "bbox": [ + 247, + 563, + 362, + 635 + ], + "lines": [ + { + "bbox": [ + 247, + 563, + 362, + 635 + ], + "spans": [ + { + "bbox": [ + 247, + 563, + 362, + 635 + ], + "type": "image", + "image_path": "61b57afd24323269dec4a5494e2a94ec95720c1ef0b1b6f084d37af55d1289d1.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 639, + 364, + 651 + ], + "lines": [ + { + "bbox": [ + 246, + 639, + 364, + 651 + ], + "spans": [ + { + "bbox": [ + 246, + 639, + 364, + 651 + ], + "type": "text", + "content": "(b) SGD with larger batch sizes." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 383, + 563, + 498, + 635 + ], + "blocks": [ + { + "bbox": [ + 383, + 563, + 498, + 635 + ], + "lines": [ + { + "bbox": [ + 383, + 563, + 498, + 635 + ], + "spans": [ + { + "bbox": [ + 383, + 563, + 498, + 635 + ], + "type": "image", + "image_path": "9ab02fb5af9589b4e7d99396f3e0415fad83b4d9248f9232a36feb2295e1c293.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 639, + 504, + 661 + ], + "lines": [ + { + "bbox": [ + 378, + 639, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 378, + 639, + 504, + 661 + ], + "type": "text", + "content": "(c) Post-local SGD, sampling with replacement." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": "SGD generalizes worse even with extensively tuned learning rates. In Figure 6(a), we run SGD from both random initialization and the pre-trained model for another 3,000 epochs with various learning rates and report the test accuracy. We can see that none of the SGD runs beat Local SGD with the fixed learning rate " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\eta = 0.32" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": ". Therefore, the inferior performance of SGD in Figures 2(a) and 2(b) is not due to the improper learning rate and Local SGD indeed generalizes better." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "SGD with larger batch sizes performs no better. In Figure 6(b), we enlarge the batch size of SGD and report the test accuracy for various learning rates. We can see that SGD with larger batch sizes performs no better and none of the SGD runs outperform Local SGD with the fixed learning rate " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\eta = 0.32" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": ". This result is unsurprising since it is well established in the literature (Jastrzebski et al., 2017; Smith et al., 2020; Keskar et al., 2017) that larger batch size typically leads to worse generalization. See Appendix A for a survey of empirical and theoretical works on understanding and resolving this phenomenon." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 165, + 504, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 504, + 232 + ], + "type": "text", + "content": "Sampling with or without replacement does not matter. Note that there is a slight discrepancy in sampling schemes between our theoretical and experimental setup: the update rules (1) and (2) assume that data are sampled with replacement while most experiments use sampling without replacement (Appendix C). To eliminate the effect of this discrepancy, we conduct additional experiments on Post-local SGD using sampling with replacement (see Figure 6(c)) and Post-local SGD significantly outperforms SGD." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 247, + 486, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 486, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 486, + 260 + ], + "type": "text", + "content": "G DISCUSSIONS ON LOCAL SGD WITH LABEL NOISE REGULARIZATION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 272, + 447, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 447, + 283 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 447, + 283 + ], + "type": "text", + "content": "G.1 THE SLOW SDE FOR LOCAL SGD WITH LABEL NOISE REGULARIZATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": "In this subsection, we present the Slow SDE for Local SGD in the case of label noise regularization and show that Local SGD indeed induces a stronger regularization term, which presumably leads to better generalization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 103, + 329, + 504, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 329, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 103, + 329, + 504, + 363 + ], + "type": "text", + "content": "Theorem G.1 (Slow SDE for Local SGD with label noise regularization). For a " + }, + { + "bbox": [ + 103, + 329, + 504, + 363 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 103, + 329, + 504, + 363 + ], + "type": "text", + "content": "-class classification task with cross-entropy loss, the slow SDE of Local SGD with label noise has the following form:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 157, + 368, + 504, + 396 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 368, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 157, + 368, + 504, + 396 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {1}{4 B} \\nabla_ {\\Gamma} \\left(\\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) + (K - 1) \\cdot \\frac {\\operatorname {t r} \\left(F \\left(2 H \\eta \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right)\\right)}{2 H \\eta}\\right) \\mathrm {d} t, \\tag {15}", + "image_path": "d167d056cbd93387c33a40274948bfc6535fb9b3c25d55f52794663f733b0465.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "inline_equation", + "content": "F(x) \\coloneqq \\int_0^x \\psi(y) \\, \\mathrm{d}y" + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "text", + "content": " and is interpreted as a matrix function. Additionally, " + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\nabla_{\\Gamma} f" + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "text", + "content": " stands for the gradient of a function " + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "text", + "content": " projected to the tangent space of " + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 401, + 504, + 426 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 436, + 205, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 205, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 205, + 449 + ], + "type": "text", + "content": "Proof. See Appendix L." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 494, + 437, + 505, + 447 + ], + "blocks": [ + { + "bbox": [ + 494, + 437, + 505, + 447 + ], + "lines": [ + { + "bbox": [ + 494, + 437, + 505, + 447 + ], + "spans": [ + { + "bbox": [ + 494, + 437, + 505, + 447 + ], + "type": "image", + "image_path": "ce5c716a629e476b0ea86ea3fe76183d34c1f7f8c04e9a6e6b894dd51c2c08d7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 460, + 504, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 504, + 483 + ], + "type": "text", + "content": "Note that the magnitude of the RHS in (15) becomes larger as " + }, + { + "bbox": [ + 104, + 460, + 504, + 483 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 460, + 504, + 483 + ], + "type": "text", + "content": " increases. By letting " + }, + { + "bbox": [ + 104, + 460, + 504, + 483 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 460, + 504, + 483 + ], + "type": "text", + "content": " to go to infinity, we further have the following theorem." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 486, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 486, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 504, + 510 + ], + "type": "text", + "content": "Theorem G.2. As the number of local steps " + }, + { + "bbox": [ + 104, + 486, + 504, + 510 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 486, + 504, + 510 + ], + "type": "text", + "content": " goes to infinity, the slow SDE of Local SGD with label noise (15) can be simplified as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 235, + 514, + 504, + 538 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 514, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 235, + 514, + 504, + 538 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {K}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) \\mathrm {d} t. \\tag {16}", + "image_path": "53115fe10505f9b8cb987b9c3d90eb1c0ec4637902a3db99072245a8ebb572c8.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 548, + 426, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 426, + 560 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 426, + 560 + ], + "type": "text", + "content": "Proof. We obtain the corollary by simply taking the limit. By L'Hospital's rule," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 189, + 564, + 419, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 564, + 419, + 590 + ], + "spans": [ + { + "bbox": [ + 189, + 564, + 419, + 590 + ], + "type": "interline_equation", + "content": "\\lim _ {x \\rightarrow + \\infty} \\frac {F (a x)}{x} = \\lim _ {x \\rightarrow + \\infty} \\frac {\\mathrm {d} F (a x)}{\\mathrm {d} x} = \\lim _ {x \\rightarrow + \\infty} a \\psi (a x) = a.", + "image_path": "8f4ada21ca335d20c15fd6736084258569bfa1e40b001f648a53221313b96c92.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 594, + 151, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 151, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 151, + 605 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 213, + 610, + 504, + 637 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 610, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 213, + 610, + 504, + 637 + ], + "type": "interline_equation", + "content": "\\lim _ {x \\rightarrow + \\infty} \\frac {\\operatorname {t r} (F (2 H \\eta \\nabla^ {2} \\mathcal {L} (\\zeta)))}{2 H \\eta} = \\operatorname {t r} (\\nabla^ {2} \\mathcal {L} (\\zeta)). \\tag {17}", + "image_path": "a6ef94146b7f85a1b56b89065623ea212e2d9958b213fd7a0380c95783be14a8.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 641, + 261, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 261, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 261, + 654 + ], + "type": "text", + "content": "Substituting (17) into (15) yields (16)." + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 494, + 642, + 505, + 651 + ], + "blocks": [ + { + "bbox": [ + 494, + 642, + 505, + 651 + ], + "lines": [ + { + "bbox": [ + 494, + 642, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 494, + 642, + 505, + 651 + ], + "type": "image", + "image_path": "f21d3d40f406238e9caec9860c77af2b568b84405a6757c7b980202a49f88a24.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 665, + 504, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 688 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 688 + ], + "type": "text", + "content": "As introduced in Section 3.3, the Slow SDE for SGD with label noise regularization has the following form:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 235, + 693, + 504, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 693, + 504, + 716 + ], + "spans": [ + { + "bbox": [ + 235, + 693, + 504, + 716 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = - \\frac {1}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right) \\mathrm {d} t, \\tag {18}", + "image_path": "33d103a4055b7116deea04bcbc163e74f8243039e72c293a9ff76841270fdfbf.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 720, + 387, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 720, + 387, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 720, + 387, + 732 + ], + "type": "text", + "content": "which is a deterministic flow that keeps reducing the trace of Hessian." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 125, + 87, + 255, + 168 + ], + "blocks": [ + { + "bbox": [ + 125, + 87, + 255, + 168 + ], + "lines": [ + { + "bbox": [ + 125, + 87, + 255, + 168 + ], + "spans": [ + { + "bbox": [ + 125, + 87, + 255, + 168 + ], + "type": "image", + "image_path": "95fa377cc60c341232565c9293256304933bfec9021c461af4ee1f75a97d2d61.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 136, + 174, + 246, + 185 + ], + "lines": [ + { + "bbox": [ + 136, + 174, + 246, + 185 + ], + "spans": [ + { + "bbox": [ + 136, + 174, + 246, + 185 + ], + "type": "text", + "content": "(a) ResNet-56 + GroupNorm." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 278, + 88, + 475, + 168 + ], + "blocks": [ + { + "bbox": [ + 278, + 88, + 475, + 168 + ], + "lines": [ + { + "bbox": [ + 278, + 88, + 475, + 168 + ], + "spans": [ + { + "bbox": [ + 278, + 88, + 475, + 168 + ], + "type": "image", + "image_path": "4d961518e31054ba64c59978b73d65730b2d0232abf93fea368ff5d0e48d6c9a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 322, + 174, + 440, + 185 + ], + "lines": [ + { + "bbox": [ + 322, + 174, + 440, + 185 + ], + "spans": [ + { + "bbox": [ + 322, + 174, + 440, + 185 + ], + "type": "text", + "content": "(b) VGG-16 w/o normalization." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 190, + 504, + 225 + ], + "lines": [ + { + "bbox": [ + 104, + 190, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 190, + 504, + 225 + ], + "type": "text", + "content": "Figure 7: Local SGD with label noise regularization on CIFAR-10 without data augmentation using " + }, + { + "bbox": [ + 104, + 190, + 504, + 225 + ], + "type": "inline_equation", + "content": "K = 32" + }, + { + "bbox": [ + 104, + 190, + 504, + 225 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 190, + 504, + 225 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}} = 128" + }, + { + "bbox": [ + 104, + 190, + 504, + 225 + ], + "type": "text", + "content": ". A larger number of local steps indeed enables higher test accuracy. For both architectures, we replace ReLU with Swish. See Appendix M.6 for training details." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 251, + 506, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 251, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 251, + 506, + 340 + ], + "type": "text", + "content": "As the trace of Hessian can be seen as a measure for the sharpness of the local loss landscape, (18) indicates that SGD with label noise regularization has an implicit bias toward flatter minima, which presumably promotes generalization (Hochreiter & Schmidhuber, 1997; Keskar et al., 2017; Neyshabur et al., 2017). More concretely, Blanc et al. (2020) and Li et al. (2021b) connect minimizing the trace of Hessian to finding sparse or low-rank solutions for training two-layer linear nets. Damian et al. (2021) empirically showed that good generalization correlates with a smaller trace of Hessian in training ResNets with label noise. Besides, Ma & Ying (2021) connect the trace of Hessian to the smoothness of the function represented by a deep neural net." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 344, + 504, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 504, + 401 + ], + "type": "text", + "content": "From Theorems G.1 and G.2, we can conclude that Local SGD accelerates the process of sharpness reduction, thereby leading to better generalization. Furthermore, the regularization effect gets stronger for larger " + }, + { + "bbox": [ + 104, + 344, + 504, + 401 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 344, + 504, + 401 + ], + "type": "text", + "content": " and is approximately " + }, + { + "bbox": [ + 104, + 344, + 504, + 401 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 344, + 504, + 401 + ], + "type": "text", + "content": " times that of SGD. We also conduct experiments on non-augmented CIFAR-10 with label noise regularization to verify our conclusion. As shown in Figure 7, increasing the number of local steps indeed gives better generalization performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 415, + 501, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 501, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 501, + 427 + ], + "type": "text", + "content": "G.2 THE EQUIVALENCE OF ENLARGING THE LEARNING RATE AND ADDING LOCAL STEPS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "content": "In this subsection, we explain in detail why training with label noise regularization is a special case where enlarging the learning rate of SGD can bring the same generalization benefit as adding local steps. TWhen we scale up the learning rate of SGD " + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "inline_equation", + "content": "\\eta \\mapsto \\kappa \\eta" + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "content": " (while keeping other hyperparameters unchanged), the corresponding Slow SDE is (18) with time horizon " + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "inline_equation", + "content": "\\kappa^2 T" + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "content": ", where SGD tracks a continuous interval of " + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "inline_equation", + "content": "\\kappa^2 \\eta^2" + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "content": " per step instead of " + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "inline_equation", + "content": "\\eta^2" + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "content": ". After rescaling the time horizon to " + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "content": " so that SGD tracks a continuous interval of " + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "inline_equation", + "content": "\\eta^2" + }, + { + "bbox": [ + 104, + 436, + 504, + 503 + ], + "type": "text", + "content": " per step, we obtain" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 235, + 510, + 504, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 510, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 235, + 510, + 504, + 536 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\zeta (t) = - \\frac {\\kappa^ {2}}{4 B} \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\zeta)\\right) \\mathrm {d} t. \\tag {19}", + "image_path": "a4107afbfc40d00a4d3100daa18fdddcccadccdf422428323dad7f5f09bc721f.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\kappa = \\sqrt{K}" + }, + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "type": "text", + "content": " in (19) and we obtain the same Slow SDE as (16), which is for Local SGD with a large number of local steps. In Figure 8, we conduct experiments to verify that SGD indeed achieves comparable test accuracy to that of Local SGD with a large " + }, + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "type": "text", + "content": " if its learning rate is scaled up by " + }, + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\sqrt{K}" + }, + { + "bbox": [ + 104, + 542, + 504, + 589 + ], + "type": "text", + "content": " that of Local SGD." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 606, + 408, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 408, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 408, + 620 + ], + "type": "text", + "content": "H DERIVING THE SLOW SDE AFTER APPLYING THE LSR" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 632, + 504, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 655 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 655 + ], + "type": "text", + "content": "In this section, we derive the Slow SDEs for SGD and Local SGD after applying the LSR in Appendix E.2. The results are formally summarized in the following theorems." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": "Theorem H.1 (Slow SDE for SGD after applying the LSR). Let Assumptions 3.1 to 3.3 hold. Assume that we run SGD with learning rate " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\eta' = \\kappa \\eta" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": " and the number of workers " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "K' = \\kappa K" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": " for some constant " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\kappa > 0" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "T > 0" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": " be a constant and " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\zeta(t)" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": " be the solution to (7) with the initial condition " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\zeta(0) = \\Phi(\\theta_0) \\in \\Gamma" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": ". Then for any " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": "-smooth function " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "g(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\max_{0 \\leq s \\leq \\frac{\\kappa T}{\\eta'^2}} \\left| \\mathbb{E}[g(\\Phi(\\pmb{\\theta}_s)] - \\mathbb{E}[g(\\pmb{\\zeta}(s\\eta'^2/\\kappa)] \\right| = \\tilde{\\mathcal{O}}(\\eta'^{0.25})" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}}(\\cdot)" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": " hides log factors and constants that are independent of " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\eta'" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": " but can depend on " + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "inline_equation", + "content": "g(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 659, + 505, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 223, + 83, + 387, + 167 + ], + "blocks": [ + { + "bbox": [ + 223, + 83, + 387, + 167 + ], + "lines": [ + { + "bbox": [ + 223, + 83, + 387, + 167 + ], + "spans": [ + { + "bbox": [ + 223, + 83, + 387, + 167 + ], + "type": "image", + "image_path": "930392f60ac43dce43184eddb2665e2ee62f7f7142ea9db31582d4f43c89203a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "lines": [ + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "text", + "content": "Figure 8: Local SGD with label noise regularization on CIFAR-10 without data augmentation using " + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "inline_equation", + "content": "K = 4" + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}} = 128" + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "text", + "content": ". SGD (" + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "inline_equation", + "content": "H = 1" + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "text", + "content": ") indeed achieves comparable test accuracy as Local SGD with a large " + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "text", + "content": " when we scale up its learning rate to " + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "inline_equation", + "content": "\\sqrt{K}" + }, + { + "bbox": [ + 104, + 175, + 506, + 222 + ], + "type": "text", + "content": " times that of Local SGD. See Appendix M.6 for training details." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 247, + 504, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 269 + ], + "type": "text", + "content": "Proof. Replacing " + }, + { + "bbox": [ + 104, + 247, + 504, + 269 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 247, + 504, + 269 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 247, + 504, + 269 + ], + "type": "inline_equation", + "content": "\\kappa B" + }, + { + "bbox": [ + 104, + 247, + 504, + 269 + ], + "type": "text", + "content": " in the original Slow SDE for Local SGD (7) gives the following Slow SDE:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 182, + 273, + 504, + 307 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 273, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 182, + 273, + 504, + 307 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\boldsymbol {\\zeta}} \\left(\\underbrace {\\frac {1}{\\sqrt {\\kappa B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t}\\right). \\tag {20}", + "image_path": "dc4f1a755c73898759a171cfccf10525fbeb90807aea42a0e8c374d5cc2af632.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "content": "Note that the continuous time horizon for (20) is " + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "inline_equation", + "content": "\\kappa T" + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "content": " since after applying the LSR, SGD tracks a continuous interval of " + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "inline_equation", + "content": "\\kappa^2\\eta^2" + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "content": " per step instead of " + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "inline_equation", + "content": "\\eta^2" + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "content": " while the total number of steps is scaled down by " + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "content": ". We can then rescale the time scaling to obtain (7) that holds for " + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 313, + 504, + 348 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": "Theorem H.2 (Slow SDE for Local SGD after applying the LSR). Let Assumptions 3.1 to 3.3 hold. Assume that we run Local SGD with learning rate " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "\\eta' = \\kappa \\eta" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": ", the number of workers " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "K' = \\kappa K" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": ", and the number of local steps " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "H' = \\frac{\\alpha}{\\kappa \\eta}" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": " for some constants " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "\\alpha, \\kappa > 0" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "T > 0" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": " be a constant and " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "\\zeta(t)" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": " be the solution to (21) with the initial condition " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "\\zeta(0) = \\Phi(\\bar{\\theta}^{(0)}) \\in \\Gamma" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": ". Then for any " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": "-smooth function " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "g(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "\\max_{0 \\leq s \\leq \\frac{\\kappa T}{H' \\eta'^2}} |\\mathbb{E}[g(\\Phi(\\bar{\\theta}^{(s)})] - \\mathbb{E}[g(\\zeta(sH' \\eta'^2 / \\kappa)]| = \\tilde{\\mathcal{O}}(\\eta'^{0.25})" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}}(\\cdot)" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": " hides log factors and constants that are independent of " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "\\eta'" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": " but can depend on " + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "inline_equation", + "content": "g(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 353, + 505, + 431 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 125, + 436, + 504, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 436, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 125, + 436, + 504, + 471 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {(a) \\text {d i f f u s i o n (u n c h a n g e d)}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(b) \\text {d r i f t - I (u n c h a n g e d)}} \\underbrace {- \\frac {\\kappa K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {(c) \\text {d r i f t - I I (r e s c a l e d)}}\\right). \\tag {21}", + "image_path": "3e747779652ae549e3d4b3c6215ff9925e7b2f71e88d27a78e916dd7bc011025.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 482, + 504, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 482, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 482, + 504, + 503 + ], + "type": "text", + "content": "Proof. Replacing " + }, + { + "bbox": [ + 104, + 482, + 504, + 503 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 482, + 504, + 503 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 482, + 504, + 503 + ], + "type": "inline_equation", + "content": "\\kappa B" + }, + { + "bbox": [ + 104, + 482, + 504, + 503 + ], + "type": "text", + "content": " in the original Slow SDE for Local SGD (4) gives the following Slow SDE:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 509, + 504, + 542 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 509, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 123, + 509, + 504, + 542 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {\\kappa B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}} \\underbrace {- \\frac {\\kappa K - 1}{2 \\kappa B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(c) d r i f t - I I}}\\right). \\tag {22}", + "image_path": "519665d76e506103a945a21ee7ae73151b8b950fe226cdf1f588d6ee6d7f896c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "text", + "content": "Note that the continuous time horizon for (22) is " + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "inline_equation", + "content": "\\kappa T" + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "text", + "content": " since after applying the LSR, Local SGD tracks a continuous interval of " + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "inline_equation", + "content": "\\kappa^2\\eta^2" + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "text", + "content": " per step instead of " + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "inline_equation", + "content": "\\eta^2" + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "text", + "content": " while the total number of steps is scaled down by " + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "text", + "content": ". We can then rescale the time scaling to obtain (21) that holds for " + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 548, + 506, + 583 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 250, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 250, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 250, + 94 + ], + "type": "text", + "content": "I PROOF OF THEOREM 3.1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "type": "text", + "content": "This section presents the proof for Theorem 3.1. First, we introduce some notations that will be used throughout this section. For the sequence of Local SGD iterates " + }, + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "type": "inline_equation", + "content": "\\{\\pmb{\\theta}_{k,t}^{(s)}:k\\in [K],0\\leq t\\leq H,s\\geq 0\\}" + }, + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "type": "text", + "content": ", we introduce an auxiliary sequence " + }, + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "type": "inline_equation", + "content": "\\{\\hat{u}_t\\}_{t\\in \\mathbb{N}}" + }, + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "type": "text", + "content": ", which consists of GD iterates from " + }, + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(0)}" + }, + { + "bbox": [ + 104, + 106, + 504, + 146 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 220, + 152, + 389, + 167 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 152, + 389, + 167 + ], + "spans": [ + { + "bbox": [ + 220, + 152, + 389, + 167 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {u}} _ {0} = \\bar {\\boldsymbol {\\theta}} ^ {(0)}, \\qquad \\hat {\\boldsymbol {u}} _ {t + 1} \\leftarrow \\hat {\\boldsymbol {u}} _ {t} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}).", + "image_path": "489fba74bd6528b35a2243e7098952e3b837afbe6283ad88f4f97923adfb2198.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": "For convenience, let " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_t^{(s)}\\coloneqq \\hat{\\pmb{u}}_{sH + t}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\pmb {z}_{k,sH + t}\\coloneqq \\pmb{z}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " . We will use " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_t^{(s)}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_{sH + t},\\pmb{z}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\pmb {z}_{k,sH + t}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " interchangeably. Recall that we have assumed that " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " -smooth with bounded second and third order derivatives. Let " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\nu_{2}\\coloneqq \\sup_{\\pmb {\\theta}\\in \\mathbb{R}^{d}}\\| \\nabla^{2}\\mathcal{L}(\\pmb {\\theta})\\|_{2}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\nu_{3}\\coloneqq \\sup_{\\pmb {\\theta}\\in \\mathbb{R}^{d}}\\| \\nabla^{3}\\mathcal{L}(\\pmb {\\theta})\\|_{2}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " . Since " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\nabla \\ell (\\pmb {\\theta};\\pmb {\\zeta})" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " is bounded, the gradient noise " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "z_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " is also bounded. We denote by " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\sigma_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " an upper bound such that " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\| z_{k,t}^{(s)}\\| _2\\leq \\sigma_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "text", + "content": " holds for all " + }, + { + "bbox": [ + 104, + 171, + 504, + 243 + ], + "type": "inline_equation", + "content": "s,k,t" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "text", + "content": "To prove Theorem 3.1, we will show that both Local SGD iterates " + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}" + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "text", + "content": " and SGD iterates " + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_{sH}" + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "text", + "content": " track GD iterates " + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "inline_equation", + "content": "\\hat{\\boldsymbol{u}}_{sH}" + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "text", + "content": " closely with high probability. For each client " + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "text", + "content": ", define the following sequence " + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "inline_equation", + "content": "\\{\\hat{Z}_{k,t}:t\\geq 0\\}" + }, + { + "bbox": [ + 104, + 249, + 504, + 286 + ], + "type": "text", + "content": ", which will be used in the proof for bounding the overall effect of noise." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 154, + 292, + 455, + 326 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 292, + 455, + 326 + ], + "spans": [ + { + "bbox": [ + 154, + 292, + 455, + 326 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {Z}} _ {k, t} = \\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\boldsymbol {z} _ {k, \\tau}, \\quad \\hat {\\boldsymbol {Z}} _ {k, 0} = \\boldsymbol {0}, \\quad \\forall k \\in [ K ].", + "image_path": "3c848fbc901c513a4252e9f3ca2e678cf7133da6e8f35d5ade1b69d46316014f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 333, + 395, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 333, + 395, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 333, + 395, + 346 + ], + "type": "text", + "content": "The following lemma shows that " + }, + { + "bbox": [ + 104, + 333, + 395, + 346 + ], + "type": "inline_equation", + "content": "\\hat{Z}_{k,t}" + }, + { + "bbox": [ + 104, + 333, + 395, + 346 + ], + "type": "text", + "content": " is concentrated around the origin." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "text", + "content": "Lemma I.1 (Concentration property of " + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "inline_equation", + "content": "\\{\\hat{Z}_{k,t}\\}" + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "text", + "content": "). With probability at least " + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "text", + "content": ", the following holds simultaneously for all " + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "inline_equation", + "content": "0 \\leq t < \\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor" + }, + { + "bbox": [ + 104, + 350, + 504, + 378 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 230, + 384, + 379, + 417 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 384, + 379, + 417 + ], + "spans": [ + { + "bbox": [ + 230, + 384, + 379, + 417 + ], + "type": "interline_equation", + "content": "\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\delta \\eta}},", + "image_path": "8ed28e86b941eba6ee5624e17374c8542e93b0a927d93a745ff124c41e4ccc52.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 423, + 204, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 423, + 204, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 423, + 204, + 437 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 423, + 204, + 437 + ], + "type": "inline_equation", + "content": "\\hat{C}_1\\coloneqq \\exp (T\\nu_2)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 449, + 339, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 339, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 339, + 464 + ], + "type": "text", + "content": "Proof. For each " + }, + { + "bbox": [ + 104, + 449, + 339, + 464 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{Z}}_{k,t}" + }, + { + "bbox": [ + 104, + 449, + 339, + 464 + ], + "type": "text", + "content": ", construct a sequence " + }, + { + "bbox": [ + 104, + 449, + 339, + 464 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\mathbf{Z}}_{k,t,t'}\\}_{t'=0}^t" + }, + { + "bbox": [ + 104, + 449, + 339, + 464 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 174, + 469, + 435, + 503 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 469, + 435, + 503 + ], + "spans": [ + { + "bbox": [ + 174, + 469, + 435, + 503 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {Z}} _ {k, t, t ^ {\\prime}} := \\sum_ {\\tau = 0} ^ {t ^ {\\prime} - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, t, 0} ^ {(s)} = \\boldsymbol {0}.", + "image_path": "31c0c777f082579896f74281d73d3e08b5ae1a726b5b691b8dc502eac1b5541f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "inline_equation", + "content": "\\| \\nabla^2\\mathcal{L}(\\hat{\\boldsymbol{u}}_l)\\| _2\\leq \\nu_2" + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "inline_equation", + "content": "l\\geq 0" + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "text", + "content": ", the following holds for all " + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "inline_equation", + "content": "0\\leq \\tau < t - 1" + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "inline_equation", + "content": "0 < t < \\lfloor \\frac{T}{\\eta}\\rfloor" + }, + { + "bbox": [ + 104, + 510, + 501, + 525 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 180, + 530, + 428, + 566 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 530, + 428, + 566 + ], + "spans": [ + { + "bbox": [ + 180, + 530, + 428, + 566 + ], + "type": "interline_equation", + "content": "\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {t} \\leq \\exp (T \\nu_ {2}) = \\hat {C} _ {1}.", + "image_path": "14b1ea3aa8ac1adc61f8d54e47622f22bc8baee88f2854f9ad6b962640ecf6fe.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "type": "text", + "content": "So " + }, + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "type": "inline_equation", + "content": "\\{\\hat{Z}_{k,t,t'}\\}_{t' = 0}^t" + }, + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "type": "text", + "content": " is a martingale with " + }, + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "type": "inline_equation", + "content": "\\| \\hat{Z}_{k,t,t'} - \\hat{Z}_{k,t,t' - 1}\\| _2\\leq \\hat{C}_1\\sigma_{\\max}" + }, + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "type": "inline_equation", + "content": "\\hat{Z}_{k,t} = \\hat{Z}_{k,t,t}" + }, + { + "bbox": [ + 104, + 571, + 504, + 597 + ], + "type": "text", + "content": ", by Azuma-Hoeffding's inequality," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 209, + 601, + 399, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 601, + 399, + 647 + ], + "spans": [ + { + "bbox": [ + 209, + 601, + 399, + 647 + ], + "type": "interline_equation", + "content": "\\mathbb {P} (\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 t \\left(\\hat {C} _ {1} \\sigma_ {\\max }\\right) ^ {2}}\\right).", + "image_path": "76931e6aceda9834ffe90bcdfa5a5fce927067a966bc186e0bb4a2aa5e2bb991.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "type": "text", + "content": "Taking union bound on all " + }, + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "type": "inline_equation", + "content": "0 \\leq t \\leq \\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor" + }, + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "type": "text", + "content": ", we can conclude that with probability at least " + }, + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 652, + 504, + 677 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 167, + 683, + 441, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 683, + 441, + 715 + ], + "spans": [ + { + "bbox": [ + 167, + 683, + 441, + 715 + ], + "type": "interline_equation", + "content": "\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\delta \\eta}}, \\quad \\forall 0 \\leq t < \\left\\lfloor \\frac {T}{\\eta} \\right\\rfloor , k \\in [ K ].", + "image_path": "7b55b85380b21242cc332123459c25a1aed2fd432653fdd260f5a9a10b92e633.jpg" + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 494, + 720, + 504, + 730 + ], + "blocks": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "type": "image", + "image_path": "449f7d06a1a0e66247d3a67f4b5f904d041aed7497481a0dab7af257988683a6.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "content": "The following lemma states that, with high probability, Local SGD iterates " + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "inline_equation", + "content": "\\theta_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}" + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "content": " closely track the gradient descent iterates " + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_{sH}" + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "inline_equation", + "content": "\\lfloor \\frac{T}{H\\eta}\\rfloor" + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "content": " rounds." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 114, + 496, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 496, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 496, + 127 + ], + "type": "text", + "content": "Lemma I.2. For " + }, + { + "bbox": [ + 104, + 114, + 496, + 127 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 114, + 496, + 127 + ], + "type": "text", + "content": ", the following inequalities hold with probability at least " + }, + { + "bbox": [ + 104, + 114, + 496, + 127 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 114, + 496, + 127 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 133, + 470, + 161 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 133, + 470, + 161 + ], + "spans": [ + { + "bbox": [ + 141, + 133, + 470, + 161 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\hat {\\boldsymbol {u}} _ {s H + t} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq s < \\left\\lfloor \\frac {T}{H \\eta} \\right\\rfloor , 0 \\leq t \\leq H,", + "image_path": "6a57174b8ba82dc19d4656a876f388451bbce9bd3caabcbb07d16eaed38e2f0a.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 167, + 124, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 124, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 124, + 177 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 189, + 183, + 421, + 211 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 183, + 421, + 211 + ], + "spans": [ + { + "bbox": [ + 189, + 183, + 421, + 211 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\hat {\\boldsymbol {u}} _ {s H} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s \\leq \\left\\lfloor \\frac {T}{H \\eta} \\right\\rfloor ,", + "image_path": "d23538f910cbd52781c58358f636ea3bff9e44c36da4515d4837ff1da1294a70.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "type": "inline_equation", + "content": "\\hat{C}_3" + }, + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "type": "text", + "content": " is a constant independent of " + }, + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 218, + 299, + 231 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "text", + "content": "Proof. Let " + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "inline_equation", + "content": "\\hat{\\Delta}_{k,t}^{(s)}\\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\hat{\\pmb{u}}_t^{(s)}" + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "inline_equation", + "content": "\\bar{\\Delta}^{(s)}\\coloneqq \\bar{\\pmb{\\theta}}^{(s)} - \\hat{\\pmb{u}}_{0}^{(s)}" + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "text", + "content": " be the differences between the Local SGD and GD iterates. According to the update rule for " + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_t^{(s)}" + }, + { + "bbox": [ + 104, + 246, + 504, + 277 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 229, + 284, + 504, + 302 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 284, + 504, + 302 + ], + "spans": [ + { + "bbox": [ + 229, + 284, + 504, + 302 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\tag {23}", + "image_path": "c4f630288aa9439c43f138b01fcdec911b2947706c093fe449aa2b4ad9387828.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 235, + 304, + 504, + 320 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 304, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 235, + 304, + 504, + 320 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {u}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}\\right). \\tag {24}", + "image_path": "aaa79920eb8391ebcfc33ee8da26d5961390d990628666df75eec0bb45fe2e93.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 325, + 228, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 325, + 228, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 325, + 228, + 338 + ], + "type": "text", + "content": "Subtracting (23) by (24) gives" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 195, + 344, + 504, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 344, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 195, + 344, + 504, + 380 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta (\\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t} ^ {(s)})) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ = \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\eta \\hat {\\boldsymbol {v}} _ {k, t} ^ {(s)}, \\tag {25} \\\\ \\end{array}", + "image_path": "d7ee922e2613f62a73a771298d8778beb1bec11abd906a9abbf326315d9b236d.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{v}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "text", + "content": " is a remainder term with norm " + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\pmb{v}}_{k,t}^{(s)}\\| _2\\leq \\frac{\\nu_3}{2}\\| \\hat{\\pmb{\\Delta}}_{k,t}^{(s)}\\| _2^2" + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "text", + "content": ". For the " + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "text", + "content": "-th round of Local SGD, we can apply (25) " + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 387, + 504, + 414 + ], + "type": "text", + "content": " times to obtain the following:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 419, + 504, + 505 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 419, + 504, + 505 + ], + "spans": [ + { + "bbox": [ + 141, + 419, + 504, + 505 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = \\left[ \\prod_ {\\tau = 0} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {\\tau} ^ {(s)}\\right)\\right) \\right] \\hat {\\boldsymbol {\\Delta}} _ {k, 0} ^ {(s)} - \\eta \\underbrace {\\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\right] \\boldsymbol {z} _ {k , \\tau} ^ {(s)}} _ {\\tau} \\tag {26} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} (\\pmb {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\pmb {u}} _ {l} ^ {(s)})) \\hat {\\pmb {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}", + "image_path": "0cd7b32e067ce39585b5b103685ef30f1025a340bbbf3fb30b36329c110b284b.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 511, + 301, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 301, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 301, + 523 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 511, + 301, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 511, + 301, + 523 + ], + "type": "text", + "content": " can be expressed in the following form:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 198, + 529, + 411, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 529, + 411, + 563 + ], + "spans": [ + { + "bbox": [ + 198, + 529, + 411, + 563 + ], + "type": "interline_equation", + "content": "\\mathcal {T} = \\hat {\\boldsymbol {Z}} _ {k, s H + t} - \\left[ \\prod_ {l = s H} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H}.", + "image_path": "6106ee87b6f6a37d266bf21ddc46af731c26118399d7c070b0f468feb9e31fa6.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 569, + 429, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 569, + 429, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 569, + 429, + 581 + ], + "type": "text", + "content": "Substituting in " + }, + { + "bbox": [ + 104, + 569, + 429, + 581 + ], + "type": "inline_equation", + "content": "t = H" + }, + { + "bbox": [ + 104, + 569, + 429, + 581 + ], + "type": "text", + "content": " and taking the average, we derive the following recursion:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 140, + 587, + 504, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 587, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 140, + 587, + 504, + 730 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\bar {\\boldsymbol {\\Delta}} ^ {(s + 1)} = \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {\\Delta}} _ {k, H} ^ {(s)} \\\\ = \\left[ \\prod_ {\\tau = 0} ^ {H - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {\\tau} ^ {(s)})\\right) \\right] \\bar {\\boldsymbol {\\Delta}} ^ {(s)} \\\\ - \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k, (s + 1) H} + \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\left[ \\prod_ {l = s H} ^ {(s + 1) H - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H} \\\\ + \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\sum_ {\\tau = 0} ^ {H - 1} \\prod_ {l = \\tau + 1} ^ {H - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\tag {27} \\\\ \\end{array}", + "image_path": "13d19cced69e731998932e90d7058cfd2f78c588e46d7dd376aa7f64f53314b9.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 223, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 223, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 223, + 95 + ], + "type": "text", + "content": "Applying (27) " + }, + { + "bbox": [ + 105, + 82, + 223, + 95 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 105, + 82, + 223, + 95 + ], + "type": "text", + "content": " times yields" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 138, + 98, + 504, + 133 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 98, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 138, + 98, + 504, + 133 + ], + "type": "interline_equation", + "content": "\\bar {\\boldsymbol {\\Delta}} ^ {(s)} = - \\frac {\\eta}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k, s H} + \\frac {\\eta}{K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\left[ \\prod_ {l = r H + \\tau + 1} ^ {s H} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(r)}. \\tag {28}", + "image_path": "98ec5acfb869999cfbbb0dbe250b40f0d5ebc1d83e72b8a5716e42f2f7137a3a.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 136, + 257, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 136, + 257, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 136, + 257, + 148 + ], + "type": "text", + "content": "Substitute (28) into (26) and we have" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 144, + 152, + 465, + 259 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 152, + 465, + 259 + ], + "spans": [ + { + "bbox": [ + 144, + 152, + 465, + 259 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = - \\frac {\\eta}{K} \\sum_ {k ^ {\\prime} \\in [ K ]} \\hat {\\boldsymbol {Z}} _ {k ^ {\\prime}, s H} - \\eta \\hat {\\boldsymbol {Z}} _ {k, s H + t} + \\eta \\left[ \\prod_ {l = s H} ^ {s H + t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})) \\right] \\hat {\\boldsymbol {Z}} _ {k, s H} \\\\ + \\frac {\\eta}{K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k ^ {\\prime} \\in [ K ]} \\left[ \\prod_ {l = r H + \\tau + 1} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k ^ {\\prime}, \\tau} ^ {(r)} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\left[ \\prod_ {l = s H + \\tau + 1} ^ {s H + t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {l})\\right) \\right] \\hat {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}", + "image_path": "9a34265f4bad12a9fba29173c3328003f048cf206007b35fb9206be4555c6aac.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 263, + 365, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 365, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 365, + 275 + ], + "type": "text", + "content": "By Cauchy-Schwartz inequality and triangle inequality, we have" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 161, + 278, + 504, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 278, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 161, + 278, + 504, + 354 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\frac {\\eta}{K} \\left(\\sum_ {k ^ {\\prime} \\in [ K ]} \\left\\| \\hat {\\boldsymbol {Z}} _ {k ^ {\\prime}, s H} \\right\\| _ {2}\\right) + \\eta \\left\\| \\hat {\\boldsymbol {Z}} _ {k, s H + t} \\right\\| _ {2} + \\eta \\hat {C} _ {1} \\left\\| \\hat {\\boldsymbol {Z}} _ {k, s H} \\right\\| _ {2} \\tag {29} \\\\ + \\frac {\\eta \\hat {C} _ {1} \\nu_ {3}}{2 K} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\sum_ {k ^ {\\prime} \\in [ K ]} \\| \\hat {\\mathbf {A}} _ {k ^ {\\prime}, \\tau} ^ {(r)} \\| _ {2} ^ {2} + \\frac {\\eta \\hat {C} _ {1} \\nu_ {3}}{2} \\sum_ {\\tau = 0} ^ {t - 1} \\| \\hat {\\mathbf {A}} _ {k, \\tau} ^ {(r)} \\| _ {2} ^ {2}, \\\\ \\end{array}", + "image_path": "1a7a4478ec4fc3cd9f8f0094ccb2e3d64fa144509cab8798cab8ece8c6a42427.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 358, + 201, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 201, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 201, + 372 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 358, + 201, + 372 + ], + "type": "inline_equation", + "content": "\\hat{C}_1 = \\exp (\\nu_2T)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 376, + 339, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 376, + 339, + 389 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 339, + 389 + ], + "type": "text", + "content": "Below we prove by induction that for " + }, + { + "bbox": [ + 105, + 376, + 339, + 389 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 105, + 376, + 339, + 389 + ], + "type": "text", + "content": ", if" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 173, + 392, + 504, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 392, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 173, + 392, + 504, + 426 + ], + "type": "interline_equation", + "content": "\\left\\| \\hat {\\boldsymbol {Z}} _ {k, t} \\right\\| _ {2} \\leq \\hat {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 T}{\\eta} \\log \\frac {2 T K}{\\eta \\delta}}, \\quad \\forall 0 \\leq t < \\left\\lfloor \\frac {T}{\\eta} \\right\\rfloor , k \\in [ K ], \\tag {30}", + "image_path": "db2d16660ca301902c9a4c0976e41a3f0702050ea18704711206aa869ff2ef71.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 429, + 459, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 429, + 459, + 445 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 459, + 445 + ], + "type": "text", + "content": "then there exists a constant " + }, + { + "bbox": [ + 105, + 429, + 459, + 445 + ], + "type": "inline_equation", + "content": "\\hat{C}_2" + }, + { + "bbox": [ + 105, + 429, + 459, + 445 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 105, + 429, + 459, + 445 + ], + "type": "inline_equation", + "content": "k\\in [K],0\\leq s < \\left\\lfloor \\frac{T}{\\eta H}\\right\\rfloor" + }, + { + "bbox": [ + 105, + 429, + 459, + 445 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 429, + 459, + 445 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 244, + 449, + 504, + 480 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 449, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 244, + 449, + 504, + 480 + ], + "type": "interline_equation", + "content": "\\left\\| \\hat {\\Delta} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\hat {C} _ {2} \\sqrt {\\eta \\log \\frac {2 T K}{\\eta \\delta}}. \\tag {31}", + "image_path": "f0525c5052a315d7287332e3d97e85a56097666d7c44299db4d566405fc38388.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": "First, for all " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\Delta}_{k,0}^{(0)}\\|_2 = 0" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": " and hence (31) holds. Assuming that (31) holds for all " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "\\hat{\\Delta}_{k',\\tau}^{(r)}" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "k' \\in [K]" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "0 \\leq r < s" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "0 \\leq \\tau \\leq H" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "r = s" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "0 \\leq \\tau < t" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": ", then by (29), for all " + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 485, + 504, + 523 + ], + "type": "text", + "content": ", the following holds:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 176, + 527, + 434, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 527, + 434, + 559 + ], + "spans": [ + { + "bbox": [ + 176, + 527, + 434, + 559 + ], + "type": "interline_equation", + "content": "\\| \\hat {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\| _ {2} \\leq 3 \\hat {C} _ {1} ^ {2} \\sigma_ {\\max} \\sqrt {2 T \\eta \\log \\frac {2 T K}{\\eta \\delta}} + \\hat {C} _ {1} \\hat {C} _ {2} ^ {2} T \\eta \\nu_ {3} \\log \\frac {2 T K}{\\eta \\delta}.", + "image_path": "5fcc87fd2221fff659251e2ba8a85ef4578c4eb5022313930b1a48bbb22576c3.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "inline_equation", + "content": "\\hat{C}_2 \\geq 6\\hat{C}_1^2\\sigma_{\\max}\\sqrt{2T}" + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "text", + "content": ". Then for sufficiently small " + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "text", + "content": ", (31) holds. By Lemma I.1, (30) holds with probability at least " + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "text", + "content": ". Furthermore, notice that " + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s)} - \\hat{\\pmb{u}}_{sH} = \\frac{1}{K}\\sum_{k\\in [K]}\\hat{\\pmb{\\Delta}}_{k,H}^{(s - 1)}" + }, + { + "bbox": [ + 104, + 563, + 506, + 603 + ], + "type": "text", + "content": ". Hence we have the lemma." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 614, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 614, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 614, + 504, + 651 + ], + "type": "text", + "content": "The iterates of standard SGD can be viewed as the local iterates on a single client with the number of local steps " + }, + { + "bbox": [ + 104, + 614, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\left\\lfloor \\frac{T}{\\eta} \\right\\rfloor" + }, + { + "bbox": [ + 104, + 614, + 504, + 651 + ], + "type": "text", + "content": ". Therefore, we can directly apply Lemma I.2 and obtain the following lemma about the SGD iterates " + }, + { + "bbox": [ + 104, + 614, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_t" + }, + { + "bbox": [ + 104, + 614, + 504, + 651 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 652, + 460, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 652, + 460, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 652, + 460, + 666 + ], + "type": "text", + "content": "Corollary 1.1. For " + }, + { + "bbox": [ + 105, + 652, + 460, + 666 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 105, + 652, + 460, + 666 + ], + "type": "text", + "content": ", the following holds with probability at least " + }, + { + "bbox": [ + 105, + 652, + 460, + 666 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 105, + 652, + 460, + 666 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 193, + 669, + 416, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 669, + 416, + 696 + ], + "spans": [ + { + "bbox": [ + 193, + 669, + 416, + 696 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {w} _ {s H} - \\hat {\\boldsymbol {u}} _ {s H} \\| _ {2} \\leq \\hat {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s \\leq \\frac {T}{H \\eta},", + "image_path": "1195d6ae0ca7e897ca116e1ab4256e87b6a6d87e83d5c253a0945e952a2b1568.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 700, + 299, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 700, + 299, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 299, + 712 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 700, + 299, + 712 + ], + "type": "inline_equation", + "content": "\\hat{C}_3" + }, + { + "bbox": [ + 105, + 700, + 299, + 712 + ], + "type": "text", + "content": " is the same constant as in Lemma I.2." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 720, + 468, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 468, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 468, + 733 + ], + "type": "text", + "content": "Applying Lemma I.2 and Corollary I.1 and taking the union bound, we have Theorem 3.1." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 321, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 321, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 321, + 94 + ], + "type": "text", + "content": "J PROOF OUTLINE OF MAIN THEOREMS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": "We adopt the general framework proposed by Li et al. (2019a) to bound the closeness of discrete algorithms and SDE solutions via the method of moments. However, their framework is not directly applicable to our case since they provide approximation guarantees for discrete algorithms with learning rate " + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\eta^{-1})" + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": " steps while we want to capture Local SGD for " + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\eta^{-2})" + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": " steps. To overcome this difficulty, we treat " + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}} := \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}} \\right\\rfloor" + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": " rounds as a \"giant step\" of Local SGD with an \"effective\" learning rate " + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\eta^{1 - \\beta}" + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": " is a constant in " + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "inline_equation", + "content": "(0,1)" + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": ", and derive the recursive formulas to compute the moments for the change in every step, every round, and every " + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 209 + ], + "type": "text", + "content": " rounds. The formulation of the recursions requires a detailed analysis of the limiting dynamics of the iterate and careful control of approximation errors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "text", + "content": "The dynamics of the iterate can be divided into two phases: the approaching phase (Phase 1) and the drift phase (Phase 2). The approaching phase only lasts for " + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log \\frac{1}{\\eta})" + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "text", + "content": " rounds, during which the iterate is quickly driven to the minimizer manifold by the negative gradient and ends up within only " + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}} (\\sqrt{\\eta})" + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "text", + "content": " (see Appendix K.5). After that, the iterate enters the drifting phase and moves in the tangent space of " + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "text", + "content": " while staying close to " + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "text", + "content": " (see Appendix K.6). The closeness of the iterates (local and global) and " + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 213, + 504, + 284 + ], + "type": "text", + "content": " is summarized in the following theorem." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "text", + "content": "Theorem J.1 (Closeness of the iterates and " + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "text", + "content": "). For " + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log \\frac{1}{\\eta}) \\leq s \\leq \\lfloor T / (H\\eta^2) \\rfloor" + }, + { + "bbox": [ + 102, + 285, + 504, + 310 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 185, + 315, + 424, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 315, + 424, + 342 + ], + "spans": [ + { + "bbox": [ + 185, + 315, + 424, + 342 + ], + "type": "interline_equation", + "content": "\\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\in \\Gamma , \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}\\right).", + "image_path": "c9a14461b8f045eb3e79a368ac85e77776a52c1ff5f148f2933d4d716b6a77dc.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "type": "text", + "content": "Also, for all " + }, + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log \\frac{1}{\\eta}) \\leq s < \\lfloor T / (H\\eta^2) \\rfloor" + }, + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "type": "inline_equation", + "content": "0 \\leq t \\leq H" + }, + { + "bbox": [ + 104, + 345, + 378, + 360 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 228, + 364, + 392, + 392 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 364, + 392, + 392 + ], + "spans": [ + { + "bbox": [ + 228, + 364, + 392, + 392 + ], + "type": "interline_equation", + "content": "\\| \\pmb {\\theta} _ {k, t} ^ {(s)} - \\Phi (\\bar {\\pmb {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}\\right).", + "image_path": "a01f1cf1d10f89538a00e4b8e28c40aa2259aac05f77f29dbbea3a6bf6088d99.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\cdot)" + }, + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "type": "text", + "content": " hides constants independent of " + }, + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 394, + 311, + 406 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 415, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 437 + ], + "type": "text", + "content": "To control the approximation errors, we also provide a high probability bound for the change of the manifold projection within " + }, + { + "bbox": [ + 104, + 415, + 504, + 437 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 415, + 504, + 437 + ], + "type": "text", + "content": " rounds." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "text", + "content": "Theorem J.2 (High probability bound for the change of manifold projection). For " + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "text", + "content": " with probability at least " + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "inline_equation", + "content": "0 \\leq s \\leq \\lfloor T / (H\\eta^2) \\rfloor - R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "inline_equation", + "content": "0 \\leq r \\leq R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 438, + 504, + 464 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 134, + 466, + 474, + 494 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 466, + 474, + 494 + ], + "spans": [ + { + "bbox": [ + 134, + 466, + 474, + 494 + ], + "type": "interline_equation", + "content": "\\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}), \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + r)}) \\in \\Gamma , \\quad \\| \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + r)}) - \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\| _ {2} = \\mathcal {O} \\left(\\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}\\right),", + "image_path": "e3e8e0cf1be6bfb573c26827ddf069045642a7dffc99f8faf3a4818635877eaa.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\cdot)" + }, + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "type": "text", + "content": " hides constants independent of " + }, + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 105, + 496, + 313, + 509 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 516, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 504, + 540 + ], + "type": "text", + "content": "The proof of Theorems J.1 and J.2 is based on the analysis of the dynamics of the iterate and presented in Appendix K.7." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "text", + "content": "Utilizing Theorems J.1 and J.2, we move on to estimate the first and second moments of the change of the manifold projection every " + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "text", + "content": " rounds. However, the randomness during training might drive the iterate far from the manifold (with a low probability, though), making the dynamics intractable. To tackle this issue, we construct a well-behaved auxiliary sequence " + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\}" + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "text", + "content": ", which is constrained to the neighborhood of " + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "text", + "content": " and equals the original sequence " + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\{\\pmb{\\theta}_{k,t}^{(s)}\\}" + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "text", + "content": " with high probability (see Definition K.5). Then we can formulate recursions for the change of manifold projection of the auxiliary sequence using the nice properties near " + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 544, + 505, + 632 + ], + "type": "text", + "content": ". The estimate of moments is summarized in Theorem K.2." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 635, + 504, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 659 + ], + "type": "text", + "content": "Finally, based on the moment estimates, we apply the framework in Li et al. (2019a) to show that the manifold projection and the SDE solution are weak approximations of each other in Appendix K.10." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 673, + 323, + 686 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 673, + 323, + 686 + ], + "spans": [ + { + "bbox": [ + 105, + 673, + 323, + 686 + ], + "type": "text", + "content": "K PROOF DETAILS OF MAIN THEOREMS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "The detailed proof is organized as follows. In Appendix K.1, we introduce the notations that will be used throughout the proof. To establish preliminary knowledge, Appendix K.2 provides explicit expression for the projection operator " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\Phi (\\cdot)" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": ", and Appendix K.3 presents lemmas about gradient descent" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "(GD) and gradient flow (GF). Based on the preliminary knowledge, we construct a nested working zone to characterize the closeness of the iterate and " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": " in Appendix K.4. Appendices K.5 to K.10 make up the main body of the proof. Specifically, Appendices K.5 and K.6 analyze the dynamics of Local SGD iterates for phases 1 and 2, respectively. Utilizing these analyses, we provide the proof of Theorems J.1 and J.2 in Appendix K.7 and the proof of Theorem 3.3 in Appendix K.8. Then we derive the estimation for the first and second moments of one \"giant step\" " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\Phi (\\bar{\\theta}^{(s + R_{\\mathrm{grp}})}) - \\Phi (\\bar{\\theta}^{(s)})" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": " in Appendix K.9. Finally, we prove the approximation theorem 3.2 in Appendix K.10." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 174, + 242, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 174, + 242, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 174, + 242, + 185 + ], + "type": "text", + "content": "K.1 ADDITIONAL NOTATIONS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{tot}} \\coloneqq \\left\\lfloor \\frac{T}{H\\eta^2} \\right\\rfloor" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " be the total number of rounds. Denote by " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\phi^{(s)}" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " the manifold projection of the global iterate at the beginning of round " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{k,t}^{(s)} \\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\phi^{(s)}" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " be the difference between the local iterate and the manifold projection of the global iterate. Also define " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{x}}_H^{(s)} \\coloneqq \\frac{1}{K}\\sum_{k\\in [K]}\\pmb{x}_{k,H}^{(s)}" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{x}}_0^{(s)} \\coloneqq \\frac{1}{K}\\sum_{k\\in [K]}\\pmb{x}_{k,0}^{(s)}" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " which is the average of " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " among " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " workers at step 0 and " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": ". Then for all " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "k\\in [K]" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{k,0}^{(s)} = \\bar{\\pmb{x}}_0^{(s)} = \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(s)}" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": ". Finally, Since " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\nabla \\ell(\\pmb{\\theta};\\pmb{\\zeta})" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " is bounded, the gradient noise " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "z_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " is also bounded and we denote by " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\sigma_{\\max}" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": " the upper bound such that " + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "inline_equation", + "content": "\\| z_{k,t}^{(s)}\\|_2 \\leq \\sigma_{\\max}, \\forall s,k,t" + }, + { + "bbox": [ + 104, + 194, + 504, + 288 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "text", + "content": "We first introduce the notion of " + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "text", + "content": "-PL. We will later show that there exists a neighborhood of the minimizer manifold " + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "text", + "content": " satisfies " + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 292, + 504, + 316 + ], + "type": "text", + "content": "-PL." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "content": "Definition K.1 (Polyak-Lojasiewicz Condition). For " + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "inline_equation", + "content": "\\mu > 0" + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "content": ", we say a function " + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot)" + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "content": " satisfies " + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "content": "-Polyak-Lojasiewicz condition (abbreviated as " + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "content": "-PL) on set " + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 318, + 504, + 342 + ], + "type": "text", + "content": " if" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 224, + 346, + 385, + 369 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 346, + 385, + 369 + ], + "spans": [ + { + "bbox": [ + 224, + 346, + 385, + 369 + ], + "type": "interline_equation", + "content": "\\frac {1}{2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\| _ {2} ^ {2} \\geq \\mu (\\mathcal {L} (\\boldsymbol {\\theta}) - \\inf _ {\\boldsymbol {\\theta} ^ {\\prime} \\in U} \\mathcal {L} (\\boldsymbol {\\theta} ^ {\\prime})).", + "image_path": "d1c58ab2dc88666f1fd07de0228cd6a530cee514020f0b2eb5611e953cf6dbc5.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": "We then introduce the definitions of the " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": "-ball at a point and the " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": "-neighborhood of a set. For " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "B^{\\epsilon}(\\pmb{\\theta}) \\coloneqq \\{\\pmb{\\theta}' : \\| \\pmb{\\theta}' - \\pmb{\\theta}\\|_2 < \\epsilon\\}" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": " is the open " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": "-ball centered at " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": ". For a set " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{Z} \\subseteq \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^{\\epsilon} \\coloneqq \\bigcup_{\\pmb{\\theta} \\in \\mathcal{Z}} B^{\\epsilon}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": "-neighborhood of " + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 380, + 504, + 416 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 428, + 388, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 428, + 388, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 388, + 439 + ], + "type": "text", + "content": "K.2 COMPUTING THE DERIVATIVES OF THE LIMITING MAPPING" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "type": "text", + "content": "In subsection, we present lemmas that relate the derivatives of the limiting mapping " + }, + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\Phi(\\cdot)" + }, + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "type": "text", + "content": " to the derivatives of the loss function " + }, + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot)" + }, + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "type": "text", + "content": ". We first introduce the operator " + }, + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\nu_{H}" + }, + { + "bbox": [ + 104, + 449, + 504, + 472 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "content": "Definition K.2. For a semi-definite symmetric matrix " + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{H} \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "inline_equation", + "content": "\\lambda_j" + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_j" + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "content": "-th eigenvalue and eigenvector and " + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_j" + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "content": "'s form an orthonormal basis of " + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "content": ". Then, define the operator " + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_{\\mathbf{H}}: \\mathbb{R}^{d \\times d} \\to \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 104, + 474, + 504, + 508 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 166, + 508, + 443, + 537 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 508, + 443, + 537 + ], + "spans": [ + { + "bbox": [ + 166, + 508, + 443, + 537 + ], + "type": "interline_equation", + "content": "\\mathcal {V} _ {\\boldsymbol {H}} (\\boldsymbol {M}) := \\sum_ {i, j: \\lambda_ {i} \\neq 0 \\vee \\lambda_ {j} \\neq 0} \\frac {1}{\\lambda_ {i} + \\lambda_ {j}} \\left\\langle \\boldsymbol {M}, \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top}, \\forall \\boldsymbol {M} \\in \\mathbb {R} ^ {d \\times d}.", + "image_path": "e03cea7ff68cf11fa605caa9f8ae8ef55e5eac68ef950b64431dc59276cb47b2.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "type": "text", + "content": "Intuitively, this operator projects " + }, + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "type": "text", + "content": " to the base matrix " + }, + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_i\\mathbf{v}_j^\\top" + }, + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "type": "text", + "content": " and sums up the projections with weights " + }, + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "type": "inline_equation", + "content": "\\frac{1}{\\lambda_i + \\lambda_j}" + }, + { + "bbox": [ + 104, + 541, + 504, + 571 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "text", + "content": "Additionally, for " + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\theta \\in \\Gamma" + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "text", + "content": ", denote by " + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "inline_equation", + "content": "T_{\\theta}" + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "inline_equation", + "content": "T_{\\theta}^{\\perp}" + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "text", + "content": " the tangent and normal space of " + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 578, + 504, + 602 + ], + "type": "text", + "content": " respectively. Lemmas K.1 to K.4 are from Li et al. (2021b). We include them to make the paper self-contained." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "text", + "content": "Lemma K.1 (Lemma C.1 of Li et al. (2021b)). For any " + }, + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\Gamma" + }, + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "text", + "content": " and any " + }, + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "inline_equation", + "content": "\\pmb{v} \\in T_{\\pmb{\\theta}}(\\Gamma)" + }, + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "text", + "content": ", it holds that " + }, + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "inline_equation", + "content": "\\nabla^2 \\mathcal{L}(\\pmb{\\theta}) \\pmb{v} = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 605, + 504, + 628 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "type": "text", + "content": "Lemma K.2 (Lemma 4.3 of Li et al. (2021b)). For any " + }, + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\Gamma" + }, + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "type": "inline_equation", + "content": "\\partial \\Phi(\\pmb{\\theta}) \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "type": "text", + "content": " is the projection matrix onto the tangent space " + }, + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "type": "inline_equation", + "content": "T_{\\pmb{\\theta}}(\\Gamma)" + }, + { + "bbox": [ + 104, + 630, + 504, + 653 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "type": "text", + "content": "Lemma K.3 (Lemma C.4 of Li et al. (2021b)). For any " + }, + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\Gamma" + }, + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "type": "inline_equation", + "content": "\\pmb{u} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "type": "inline_equation", + "content": "\\pmb{v} \\in T_{\\pmb{\\theta}}(\\Gamma)" + }, + { + "bbox": [ + 104, + 655, + 504, + 677 + ], + "type": "text", + "content": ", it holds that" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 134, + 682, + 473, + 697 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 682, + 473, + 697 + ], + "spans": [ + { + "bbox": [ + 134, + 682, + 473, + 697 + ], + "type": "interline_equation", + "content": "\\partial^ {2} \\Phi (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\boldsymbol {u} ] = - \\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) ^ {+} \\boldsymbol {u} ] - \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) ^ {+} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v}, \\partial \\Phi (\\boldsymbol {\\theta}) \\boldsymbol {u} ].", + "image_path": "0bffe6f0c5b74508952e37d4e38a6d513dbbe0ebfd8b12faa21dd258dfd54bfd.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 700, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 715 + ], + "type": "text", + "content": "Lemma K.4 (Lemma C.6 of Li et al. (2021b)). For any " + }, + { + "bbox": [ + 104, + 700, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\Gamma" + }, + { + "bbox": [ + 104, + 700, + 504, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 700, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma} \\in \\operatorname{span}\\{\\pmb{u}\\pmb{u}^{\\top} \\mid \\pmb{u} \\in T_{\\pmb{\\theta}}^{\\perp}(\\Gamma)\\}" + }, + { + "bbox": [ + 104, + 700, + 504, + 715 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 207, + 719, + 402, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 719, + 402, + 734 + ], + "spans": [ + { + "bbox": [ + 207, + 719, + 402, + 734 + ], + "type": "interline_equation", + "content": "\\left\\langle \\partial^ {2} \\Phi (\\boldsymbol {\\theta}), \\boldsymbol {\\Sigma} \\right\\rangle = - \\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) \\left[ \\mathcal {V} _ {\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta})} (\\boldsymbol {\\Sigma}) \\right].", + "image_path": "cb88be7bad0df86c16f73b4e4b5286c61b04c7024eb2a1889316beb150d78d03.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 334, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 334, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 334, + 95 + ], + "type": "text", + "content": "Lemma K.5. For all " + }, + { + "bbox": [ + 104, + 82, + 334, + 95 + ], + "type": "inline_equation", + "content": "\\theta \\in \\Gamma" + }, + { + "bbox": [ + 104, + 82, + 334, + 95 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 334, + 95 + ], + "type": "inline_equation", + "content": "\\pmb{u}, \\pmb{v} \\in T_{\\theta}(\\Gamma)" + }, + { + "bbox": [ + 104, + 82, + 334, + 95 + ], + "type": "text", + "content": ", it holds that" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 257, + 99, + 504, + 113 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 99, + 504, + 113 + ], + "spans": [ + { + "bbox": [ + 257, + 99, + 504, + 113 + ], + "type": "interline_equation", + "content": "\\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} [ \\boldsymbol {v} \\boldsymbol {u} ^ {\\top} ] = \\mathbf {0}. \\tag {32}", + "image_path": "4edf0c81073f0d7e850212c812ed50b5dbbf2c10ce12d3aa4fc07123838463aa.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": "Proof. This proof is inspired by Lemma C.4 of Li et al. (2021b). For any " + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\Gamma" + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": ", consider a parameterized smooth curve " + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "inline_equation", + "content": "\\pmb{v}(t), t \\geq 0" + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "inline_equation", + "content": "\\pmb{v}(0) = \\pmb{\\theta}" + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "inline_equation", + "content": "\\pmb{v}'(0) = \\pmb{v}" + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "inline_equation", + "content": "P_{\\parallel}(t) = \\partial \\Phi(\\pmb{v}(t))" + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "inline_equation", + "content": "P_{\\perp}(t) = I - \\partial \\Phi(\\pmb{v}(t))" + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "inline_equation", + "content": "\\pmb{H}(t) = \\nabla^2 \\mathcal{L}(\\pmb{v}(t))" + }, + { + "bbox": [ + 104, + 124, + 506, + 170 + ], + "type": "text", + "content": ". By Lemma C.1 and 4.3 in Li et al. (2021b)," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 260, + 175, + 350, + 188 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 175, + 350, + 188 + ], + "spans": [ + { + "bbox": [ + 260, + 175, + 350, + 188 + ], + "type": "interline_equation", + "content": "\\boldsymbol {H} (t) = \\boldsymbol {P} _ {\\perp} (t) \\boldsymbol {H} (t).", + "image_path": "3790764ef45c68d2a8bffd6c6863d585cb955b26c69073595cb307cc9c2bd383.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 192, + 309, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 309, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 309, + 204 + ], + "type": "text", + "content": "Take the derivative with respect to " + }, + { + "bbox": [ + 104, + 192, + 309, + 204 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 192, + 309, + 204 + ], + "type": "text", + "content": " on both sides," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 209, + 208, + 400, + 239 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 208, + 400, + 239 + ], + "spans": [ + { + "bbox": [ + 209, + 208, + 400, + 239 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\boldsymbol {H} ^ {\\prime} (t) = \\boldsymbol {P} _ {\\perp} (t) \\boldsymbol {H} ^ {\\prime} (t) + \\boldsymbol {P} _ {\\perp} ^ {\\prime} (t) \\boldsymbol {H} (t) \\\\ \\Rightarrow \\boldsymbol {P} _ {\\parallel} (t) \\boldsymbol {H} ^ {\\prime} (t) = \\boldsymbol {P} _ {\\perp} ^ {\\prime} (t) \\boldsymbol {H} (t) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (t) \\boldsymbol {H} (t). \\\\ \\end{array}", + "image_path": "b6948cc9b088c748c046927f37e598f47e86d7210691dc1f8bee1bbaab382c27.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 243, + 181, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 243, + 181, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 181, + 253 + ], + "type": "text", + "content": "At " + }, + { + "bbox": [ + 105, + 243, + 181, + 253 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 105, + 243, + 181, + 253 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 242, + 258, + 504, + 273 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 258, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 242, + 258, + 504, + 273 + ], + "type": "interline_equation", + "content": "\\boldsymbol {P} _ {\\parallel} (0) \\boldsymbol {H} ^ {\\prime} (0) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) \\boldsymbol {H} (0). \\tag {33}", + "image_path": "7753043374a66f3f838379b666d864e1d1f808e1beb838f90f8bd5fd0cd58b90.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": "WLOG let " + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "inline_equation", + "content": "H(0) = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_d), \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "inline_equation", + "content": "\\lambda_i = 0" + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "inline_equation", + "content": "m < i \\leq d" + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": ". Therefore " + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "inline_equation", + "content": "P_{\\perp}(0) = \\begin{bmatrix} I_m & 0 \\\\ 0 & 0 \\end{bmatrix}" + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "inline_equation", + "content": "P_{\\parallel}(0) = \\begin{bmatrix} 0 & 0 \\\\ 0 & I_{d - m} \\end{bmatrix}" + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": ". Decompose " + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "inline_equation", + "content": "P_{\\parallel}'(0)" + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "inline_equation", + "content": "H(0)" + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "inline_equation", + "content": "H'(0)" + }, + { + "bbox": [ + 104, + 277, + 504, + 316 + ], + "type": "text", + "content": " as follows." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 319, + 487, + 348 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 319, + 487, + 348 + ], + "spans": [ + { + "bbox": [ + 123, + 319, + 487, + 348 + ], + "type": "interline_equation", + "content": "\\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {P} _ {\\parallel , 1 1} ^ {\\prime} (0) & \\boldsymbol {P} _ {\\parallel , 1 2} ^ {\\prime} (0) \\\\ \\boldsymbol {P} _ {\\parallel , 2 1} ^ {\\prime} (0) & \\boldsymbol {P} _ {\\parallel , 2 2} ^ {\\prime} (0) \\end{array} \\right], \\boldsymbol {H} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {H} _ {1 1} (0) & \\boldsymbol {0} \\\\ \\boldsymbol {0} & \\boldsymbol {0} \\end{array} \\right], \\boldsymbol {H} ^ {\\prime} (0) = \\left[ \\begin{array}{c c} \\boldsymbol {H} _ {1 1} ^ {\\prime} (0) & \\boldsymbol {H} _ {1 2} ^ {\\prime} (0) \\\\ \\boldsymbol {H} _ {2 1} ^ {\\prime} (0) & \\boldsymbol {H} _ {2 2} ^ {\\prime} (0) \\end{array} \\right].", + "image_path": "9c63c502bc6f0211c3082e734681c9b7bdb6ff4c72ad3e6ca0628b02c0184573.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 351, + 308, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 351, + 308, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 351, + 308, + 363 + ], + "type": "text", + "content": "Substituting the decomposition into (33), we have" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 203, + 367, + 407, + 396 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 367, + 407, + 396 + ], + "spans": [ + { + "bbox": [ + 203, + 367, + 407, + 396 + ], + "type": "interline_equation", + "content": "\\left[ \\begin{array}{c c} \\mathbf {0} & \\mathbf {0} \\\\ \\mathbf {H} _ {2 1} ^ {\\prime} (0) & \\mathbf {H} _ {2 2} ^ {\\prime} (0) \\end{array} \\right] = - \\left[ \\begin{array}{c c} \\mathbf {P} _ {\\parallel , 1 1} ^ {\\prime} (0) \\mathbf {H} _ {1 1} (0) & \\mathbf {0} \\\\ \\mathbf {P} _ {\\parallel , 2 1} ^ {\\prime} (0) \\mathbf {H} _ {1 1} (0) & \\mathbf {0} \\end{array} \\right].", + "image_path": "75268ace057b19a9adbce70c9d7b6cdf003b1b4bf6f58a0a92120df33659f945.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 399, + 220, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 220, + 412 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 220, + 412 + ], + "type": "text", + "content": "Therefore, " + }, + { + "bbox": [ + 104, + 399, + 220, + 412 + ], + "type": "inline_equation", + "content": "H_{22}'(0) = 0" + }, + { + "bbox": [ + 104, + 399, + 220, + 412 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 201, + 415, + 408, + 443 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 415, + 408, + 443 + ], + "spans": [ + { + "bbox": [ + 201, + 415, + 408, + 443 + ], + "type": "interline_equation", + "content": "\\boldsymbol {P} _ {\\parallel} (0) \\boldsymbol {H} ^ {\\prime} (0) = - \\boldsymbol {P} _ {\\parallel} ^ {\\prime} (0) \\boldsymbol {H} (0) = - \\left[ \\begin{array}{c c} \\boldsymbol {0} & \\boldsymbol {0} \\\\ \\boldsymbol {H} _ {2 1} ^ {\\prime} (0) & \\boldsymbol {0} \\end{array} \\right].", + "image_path": "87e1495a3005e7d17b5bf198bc206f7701bd1a8c2fc6535156b96494d43fc315.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "text", + "content": "Any " + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\pmb{u} \\in T_{\\pmb{\\theta}}(\\Gamma)" + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "text", + "content": " can be decomposed as " + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\pmb{u} = [\\pmb{0}, \\pmb{u}_2]^\\top" + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\pmb{u}_2 \\in \\mathbb{R}^{d - m}" + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "text", + "content": ". With this decomposition, we have " + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\pmb{P}_{\\parallel}(0)\\pmb{H}'(0)\\pmb{u} = \\pmb{0}" + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "text", + "content": ". Also, note that " + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\pmb{H}'(0) = \\nabla^3\\mathcal{L}(\\pmb{\\theta})[\\pmb{v}]" + }, + { + "bbox": [ + 104, + 448, + 504, + 475 + ], + "type": "text", + "content": ". Hence," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 249, + 479, + 360, + 494 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 479, + 360, + 494 + ], + "spans": [ + { + "bbox": [ + 249, + 479, + 360, + 494 + ], + "type": "interline_equation", + "content": "\\partial \\Phi (\\boldsymbol {\\theta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\theta}) [ \\boldsymbol {v} \\boldsymbol {u} ^ {T} ] = \\boldsymbol {0}.", + "image_path": "37f002be5427cf00cbf977756fb7abb9aadee729607082d657783aa87901926a.jpg" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 494, + 498, + 504, + 508 + ], + "blocks": [ + { + "bbox": [ + 494, + 498, + 504, + 508 + ], + "lines": [ + { + "bbox": [ + 494, + 498, + 504, + 508 + ], + "spans": [ + { + "bbox": [ + 494, + 498, + 504, + 508 + ], + "type": "image", + "image_path": "42ce0cb502203a7079070b686c3695160cc407ba9340f5f62f1024cbd5760322.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 522, + 311, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 311, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 311, + 533 + ], + "type": "text", + "content": "K.3 PRELIMINARY LEMMAS FOR GD AND GF" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 543, + 506, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 543, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 104, + 543, + 506, + 576 + ], + "type": "text", + "content": "In this subsection, we introduce a few useful preliminary lemmas about gradient descent and gradient flow. Before presenting the lemmas, we introduce some notations and assumptions that will be used in this subsection." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": "Assume that the loss function " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": "-smooth and " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": "-PL in an open, convex neighborhood " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": " of a local minimizer " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}^*" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": ". Denote by " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^* := \\mathcal{L}(\\pmb{\\theta}^*)" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": " the minimum value for simplicity. Let " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\epsilon'" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": " be the radius of the open " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\epsilon'" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": "-ball centered at " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}^*" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "B^{\\epsilon'}(\\pmb{\\theta}^*) \\subseteq U" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": ". We also define a potential function " + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi}(\\pmb{\\theta}) := \\sqrt{\\mathcal{L}(\\pmb{\\theta}) - \\mathcal{L}^*}" + }, + { + "bbox": [ + 104, + 582, + 505, + 632 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "type": "text", + "content": "Consider gradient descent iterates " + }, + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "type": "inline_equation", + "content": "\\{\\hat{u}_t\\}_{t\\in \\mathbb{N}}" + }, + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "type": "text", + "content": " following the update rule " + }, + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_{t + 1} = \\hat{\\pmb{u}}_t - \\eta \\nabla \\mathcal{L}(\\hat{\\pmb{u}}_t)" + }, + { + "bbox": [ + 104, + 635, + 504, + 658 + ], + "type": "text", + "content": ". We first introduce the descent lemma for gradient descent." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 659, + 375, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 375, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 375, + 675 + ], + "type": "text", + "content": "Lemma K.6 (Descent lemma for GD). If " + }, + { + "bbox": [ + 104, + 659, + 375, + 675 + ], + "type": "inline_equation", + "content": "\\hat{\\boldsymbol{u}}_t\\in U" + }, + { + "bbox": [ + 104, + 659, + 375, + 675 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 659, + 375, + 675 + ], + "type": "inline_equation", + "content": "\\eta \\leq \\frac{1}{\\rho}" + }, + { + "bbox": [ + 104, + 659, + 375, + 675 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 231, + 679, + 378, + 700 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 679, + 378, + 700 + ], + "spans": [ + { + "bbox": [ + 231, + 679, + 378, + 700 + ], + "type": "interline_equation", + "content": "\\frac {\\eta}{2} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}),", + "image_path": "671ad99214dbbd3c481009da939d23c62e2522c88c9b3e8b20f8f7ef4c9c899e.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 704, + 124, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 704, + 124, + 714 + ], + "spans": [ + { + "bbox": [ + 105, + 704, + 124, + 714 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 219, + 719, + 389, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 719, + 389, + 734 + ], + "spans": [ + { + "bbox": [ + 219, + 719, + 389, + 734 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t + 1}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) \\left(\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t}\\right) - \\mathcal {L} ^ {*}\\right).", + "image_path": "5dd13b480dc4e01e615ca96346d88f3857b02492892118404fe65f7f164e512b.jpg" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 210, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 210, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 210, + 95 + ], + "type": "text", + "content": "Proof. By " + }, + { + "bbox": [ + 105, + 83, + 210, + 95 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 105, + 83, + 210, + 95 + ], + "type": "text", + "content": "-smoothness," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 173, + 99, + 438, + 168 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 99, + 438, + 168 + ], + "spans": [ + { + "bbox": [ + 173, + 99, + 438, + 168 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}) \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) + \\langle \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}), \\hat {\\boldsymbol {u}} _ {t + 1} - \\hat {\\boldsymbol {u}} _ {t} \\rangle + \\frac {\\rho \\eta^ {2}}{2} \\| \\hat {\\boldsymbol {u}} _ {t + 1} - \\hat {\\boldsymbol {u}} _ {t} \\| _ {2} ^ {2} \\\\ = \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\eta (1 - \\frac {\\rho \\eta}{2}) \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\\\ \\leq \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\frac {\\eta}{2} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2} \\\\ \\end{array}", + "image_path": "9b4288e1099fddbec9a5214c8c1eb0d0d1d7cca44d31fab97461839436ebe284.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 173, + 247, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 173, + 247, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 247, + 185 + ], + "type": "text", + "content": "By the definition of " + }, + { + "bbox": [ + 105, + 173, + 247, + 185 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 105, + 173, + 247, + 185 + ], + "type": "text", + "content": "-PL, we have" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 219, + 190, + 390, + 204 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 190, + 390, + 204 + ], + "spans": [ + { + "bbox": [ + 219, + 190, + 390, + 204 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t + 1}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) \\left(\\mathcal {L} \\left(\\hat {\\boldsymbol {u}} _ {t}\\right) - \\mathcal {L} ^ {*}\\right).", + "image_path": "f9c2e799672defcc376c199d0661ea9297032f69870abcbdfa53e558a8208e31.jpg" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 494, + 209, + 504, + 219 + ], + "blocks": [ + { + "bbox": [ + 494, + 209, + 504, + 219 + ], + "lines": [ + { + "bbox": [ + 494, + 209, + 504, + 219 + ], + "spans": [ + { + "bbox": [ + 494, + 209, + 504, + 219 + ], + "type": "image", + "image_path": "7dd68ab7f70f27aac1d64197a58e4432d5d5ec93b7e237eb54a00ccbc8150af3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 232, + 276, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 276, + 246 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 276, + 246 + ], + "type": "text", + "content": "Then we prove the Lipschitzness of " + }, + { + "bbox": [ + 104, + 232, + 276, + 246 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi} (\\pmb {\\theta})" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": "Lemma K.7 (Lipschitzness of " + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": "). " + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "inline_equation", + "content": "\\sqrt{2\\rho}" + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": "-Lipschitz for " + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in U" + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": ". That is, for any " + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_1, \\pmb{\\theta}_2 \\in U" + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 227, + 277, + 383, + 293 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 277, + 383, + 293 + ], + "spans": [ + { + "bbox": [ + 227, + 277, + 383, + 293 + ], + "type": "interline_equation", + "content": "\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) \\right| \\leq \\sqrt {2 \\rho} \\| \\boldsymbol {\\theta} _ {1} - \\boldsymbol {\\theta} _ {2} \\| _ {2}.", + "image_path": "526bcfdad974b40be96d2ea5cf6e431587d5204763f57ea79737fbd39b63f3bb.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "content": "Proof. Fix " + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{1}" + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{2}" + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "content": ". Denote by " + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}(t) \\coloneqq (1 - t)\\pmb{\\theta}_{1} + t\\pmb{\\theta}_{2}" + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "content": " the convex combination of " + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{1}" + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{2}" + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "inline_equation", + "content": "t \\in [0,1]" + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "content": ". Further define " + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "inline_equation", + "content": "f(t) \\coloneqq \\tilde{\\Psi}(\\pmb{\\theta}(t))" + }, + { + "bbox": [ + 104, + 304, + 504, + 330 + ], + "type": "text", + "content": ". Below we consider two cases." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "type": "text", + "content": "Case 1. If " + }, + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "type": "inline_equation", + "content": "\\forall t\\in (0,1)" + }, + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "type": "inline_equation", + "content": "f(t) > 0" + }, + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "type": "text", + "content": " , then " + }, + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "type": "inline_equation", + "content": "f(t)" + }, + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "type": "text", + "content": " is differentiable on " + }, + { + "bbox": [ + 104, + 341, + 389, + 354 + ], + "type": "inline_equation", + "content": "(0,1)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 190, + 359, + 419, + 497 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 359, + 419, + 497 + ], + "spans": [ + { + "bbox": [ + 190, + 359, + 419, + 497 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| = | f (1) - f (0) | \\\\ = \\left| \\int_ {0} ^ {1} f ^ {\\prime} (t) \\mathrm {d} t \\right| \\\\ = \\left| \\int_ {0} ^ {1} \\left\\langle \\nabla \\tilde {\\Psi} (\\boldsymbol {\\theta} (t)), \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\right\\rangle \\mathrm {d} t \\right| \\\\ = \\left| \\int_ {0} ^ {1} \\frac {\\langle \\nabla \\mathcal {L} (\\boldsymbol {\\theta} (t)) , \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\rangle}{\\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} (t)) - \\mathcal {L} ^ {*}}} \\mathrm {d} t \\right| \\\\ \\leq \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2} \\int_ {0} ^ {1} \\frac {\\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} (t)) \\| _ {2}}{\\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} (t)) - \\mathcal {L} ^ {*}}} \\mathrm {d} t. \\\\ \\end{array}", + "image_path": "fe3d3e7043028a288382c52a36bf564ff712e0f26923299fec43cd3de1bf6d76.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "type": "text", + "content": "By " + }, + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "type": "text", + "content": "-smoothness of " + }, + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in U" + }, + { + "bbox": [ + 105, + 502, + 258, + 514 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 241, + 518, + 368, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 518, + 368, + 533 + ], + "spans": [ + { + "bbox": [ + 241, + 518, + 368, + 533 + ], + "type": "interline_equation", + "content": "\\left\\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\right\\| _ {2} ^ {2} \\leq 2 \\rho (\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}).", + "image_path": "c30c4214595271edc14cd8744108920bba093f7c60b5c81b17bbec1a346127d0.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "type": "inline_equation", + "content": "\\sqrt{\\mathcal{L}(\\pmb{\\theta}(t)) - \\mathcal{L}^*} > 0" + }, + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "type": "inline_equation", + "content": "t \\in (0,1)" + }, + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "type": "inline_equation", + "content": "\\frac{\\|\\nabla\\mathcal{L}(\\pmb{\\theta}(t))\\|_2}{\\sqrt{\\mathcal{L}(\\pmb{\\theta}(t)) - \\mathcal{L}^*}} \\leq \\sqrt{2\\rho}" + }, + { + "bbox": [ + 104, + 537, + 420, + 559 + ], + "type": "text", + "content": ". Therefore," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 225, + 564, + 384, + 581 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 564, + 384, + 581 + ], + "spans": [ + { + "bbox": [ + 225, + 564, + 384, + 581 + ], + "type": "interline_equation", + "content": "\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| \\leq \\sqrt {2 \\rho_ {2}} \\left\\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\right\\| _ {2}.", + "image_path": "d080e29bb45e2bbada93d1b78f4494aba1e3b5b5bacb39b90df30239b00f63ec.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 590, + 307, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 307, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 307, + 604 + ], + "type": "text", + "content": "Case 2. If " + }, + { + "bbox": [ + 104, + 590, + 307, + 604 + ], + "type": "inline_equation", + "content": "\\exists t' \\in (0,1)" + }, + { + "bbox": [ + 104, + 590, + 307, + 604 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 590, + 307, + 604 + ], + "type": "inline_equation", + "content": "f(t') = 0" + }, + { + "bbox": [ + 104, + 590, + 307, + 604 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 172, + 608, + 437, + 680 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 608, + 437, + 680 + ], + "spans": [ + { + "bbox": [ + 172, + 608, + 437, + 680 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| = | f (1) - f (0) | \\\\ = \\left| (1 - t ^ {\\prime}) \\frac {f (1) - f (t ^ {\\prime})}{1 - t ^ {\\prime}} + t ^ {\\prime} \\left(\\frac {f (t ^ {\\prime}) - f (0)}{t ^ {\\prime}}\\right) \\right| \\\\ \\leq \\max \\left(\\frac {f (1)}{1 - t ^ {\\prime}}, \\frac {f (0)}{t ^ {\\prime}}\\right). \\\\ \\end{array}", + "image_path": "42479d699f28753339950a34d67c96b061b206ceb220ec1f30e6a2fddc245c2a.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}(t')" + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "text", + "content": " minimizes " + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "text", + "content": " in an open set, " + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "inline_equation", + "content": "\\nabla \\mathcal{L}(\\pmb{\\theta}(t')) = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "text", + "content": ". By " + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "text", + "content": "-smoothness of " + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in U" + }, + { + "bbox": [ + 104, + 685, + 486, + 698 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 172, + 704, + 436, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 704, + 436, + 730 + ], + "spans": [ + { + "bbox": [ + 172, + 704, + 436, + 730 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\boldsymbol {\\theta}) \\leq \\mathcal {L} ^ {*} + \\frac {\\rho}{2} \\| \\boldsymbol {\\theta} - \\boldsymbol {\\theta} (t ^ {\\prime}) \\| _ {2} ^ {2} \\quad \\Rightarrow \\quad \\tilde {\\Psi} (\\boldsymbol {\\theta}) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} - \\boldsymbol {\\theta} (t ^ {\\prime}) \\| _ {2}.", + "image_path": "d48af87b8b10bef6fd1182a9b0337c051a4e7c4b9e43312eac6c10429a83ee60.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 149, + 92 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 149, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 149, + 92 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 197, + 100, + 413, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 100, + 413, + 125 + ], + "spans": [ + { + "bbox": [ + 197, + 100, + 413, + 125 + ], + "type": "interline_equation", + "content": "f (1) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} \\left(t ^ {\\prime}\\right) \\| _ {2} = \\left(1 - t ^ {\\prime}\\right) \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}", + "image_path": "e8a9bb4a68760393ca0f81397e2e2f1ecbf79ae85054152592d42483da4ee122.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 198, + 128, + 391, + 153 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 128, + 391, + 153 + ], + "spans": [ + { + "bbox": [ + 198, + 128, + 391, + 153 + ], + "type": "interline_equation", + "content": "f (0) \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {1} - \\boldsymbol {\\theta} \\left(t ^ {\\prime}\\right) \\| _ {2} = t ^ {\\prime} \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}.", + "image_path": "cd81b71fe2c3a1a2cedb84197db96c0f85b3a61844fd1fc3c0c0418e41b86866.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 158, + 164, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 158, + 164, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 164, + 169 + ], + "type": "text", + "content": "Then we have" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 229, + 174, + 381, + 200 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 174, + 381, + 200 + ], + "spans": [ + { + "bbox": [ + 229, + 174, + 381, + 200 + ], + "type": "interline_equation", + "content": "\\left| \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {2}\\right) - \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {1}\\right) \\right| \\leq \\sqrt {\\frac {\\rho}{2}} \\| \\boldsymbol {\\theta} _ {2} - \\boldsymbol {\\theta} _ {1} \\| _ {2}.", + "image_path": "4fdb33c6cd1f8216e72f6cbaee2fa80fd9d31008ed10607fc81aa965a9c484cb.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 205, + 321, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 321, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 321, + 217 + ], + "type": "text", + "content": "Combining case 1 and case 2, we conclude the proof." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 494, + 205, + 504, + 214 + ], + "blocks": [ + { + "bbox": [ + 494, + 205, + 504, + 214 + ], + "lines": [ + { + "bbox": [ + 494, + 205, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 494, + 205, + 504, + 214 + ], + "type": "image", + "image_path": "7c229c0422d5cc0825d4812c31a06ccbaea051b1488dc6642b0e75cb1a27b46c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 228, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 504, + 251 + ], + "type": "text", + "content": "Below we introduce a lemma that relates the movement of one step gradient descent to the change of the potential function." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 254, + 419, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 254, + 419, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 254, + 419, + 266 + ], + "type": "text", + "content": "Lemma K.8 (Lemma G.1 in Lyu et al. (2022)). If " + }, + { + "bbox": [ + 104, + 254, + 419, + 266 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_t\\in U" + }, + { + "bbox": [ + 104, + 254, + 419, + 266 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 254, + 419, + 266 + ], + "type": "inline_equation", + "content": "\\eta \\leq 1 / \\rho_{2}" + }, + { + "bbox": [ + 104, + 254, + 419, + 266 + ], + "type": "text", + "content": " then" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 220, + 272, + 388, + 296 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 272, + 388, + 296 + ], + "spans": [ + { + "bbox": [ + 220, + 272, + 388, + 296 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1}) \\geq \\frac {\\sqrt {2 \\mu}}{4} \\eta \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2}.", + "image_path": "01a2efdc2dd962b03eb3e563ace8b5a7da135aedf0f40b7b2fb7e2251e321346.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 307, + 133, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 307, + 133, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 133, + 318 + ], + "type": "text", + "content": "Proof." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 207, + 323, + 403, + 408 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 323, + 403, + 408 + ], + "spans": [ + { + "bbox": [ + 207, + 323, + 403, + 408 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1}) = \\frac {\\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1})}{\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t}) + \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t + 1})} \\\\ \\geq \\frac {\\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t + 1}) - \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t})}{2 \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})} \\\\ \\geq \\frac {\\eta (1 - \\rho_ {2} \\eta / 2) \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {t}) \\| _ {2} ^ {2}}{2 \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})}, \\\\ \\end{array}", + "image_path": "9dedd5eb0d22e5fc0db3d13b03486ece7041ec3727c32601939b296e24367e19.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "type": "text", + "content": "where the two inequalities use Lemma K.6. By " + }, + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "type": "text", + "content": "-PL, " + }, + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_t) \\leq \\frac{1}{\\sqrt{2\\mu}} \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2" + }, + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "type": "text", + "content": ". Therefore, we have " + }, + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_t) - \\tilde{\\Psi}(\\hat{\\boldsymbol{u}}_{t+1}) \\geq \\frac{\\sqrt{2\\mu}}{2}(1 - \\eta\\rho/2)\\eta \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2 \\geq \\frac{\\sqrt{2\\mu}}{4}\\eta \\|\\nabla \\mathcal{L}(\\hat{\\boldsymbol{u}}_t)\\|_2" + }, + { + "bbox": [ + 104, + 415, + 504, + 446 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 456, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 479 + ], + "type": "text", + "content": "Based on Lemma K.8, we have the following lemma that bounds the movement of GD over multiple steps." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "text", + "content": "Lemma K.9 (Bounding the movement of GD). If " + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_0" + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "text", + "content": " is initialized such that " + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\pmb{u}}_0 - \\pmb{\\theta}^*\\| _2\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho}}\\epsilon '" + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "text", + "content": ", then for all " + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "inline_equation", + "content": "t\\geq 0" + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_t\\in B^{\\epsilon '}(\\pmb {\\theta}^*)" + }, + { + "bbox": [ + 104, + 483, + 504, + 515 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 249, + 521, + 361, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 521, + 361, + 548 + ], + "spans": [ + { + "bbox": [ + 249, + 521, + 361, + 548 + ], + "type": "interline_equation", + "content": "\\left\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\right\\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}).", + "image_path": "829ad7551f25797506588e861cfee43f53ce019c8a49b9f7006a5afa68ec6d28.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "text", + "content": "Proof. We prove the proposition by induction. When " + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "text", + "content": ", it trivially holds. Assume that the proposition holds for " + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_{\\tau}" + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "inline_equation", + "content": "0 \\leq \\tau < t" + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "text", + "content": ". For step " + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "text", + "content": ", since " + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_{\\tau} \\in B^{\\epsilon'}(\\pmb{\\theta}^{*})" + }, + { + "bbox": [ + 104, + 558, + 504, + 592 + ], + "type": "text", + "content": ", we apply Lemma K.8 and obtain" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 150, + 597, + 459, + 629 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 597, + 459, + 629 + ], + "spans": [ + { + "bbox": [ + 150, + 597, + 459, + 629 + ], + "type": "interline_equation", + "content": "\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\| _ {2} \\leq \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\| \\nabla \\mathcal {L} (\\hat {\\boldsymbol {u}} _ {\\tau}) \\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\left(\\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}) - \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {t})\\right) \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}).", + "image_path": "11cc95b2e4d3b2e7d8cff922c33182134bbd11d9109ace2b80df2d7484940070.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 635, + 241, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 241, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 241, + 647 + ], + "type": "text", + "content": "Further by " + }, + { + "bbox": [ + 104, + 635, + 241, + 647 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 635, + 241, + 647 + ], + "type": "text", + "content": "-smoothness of " + }, + { + "bbox": [ + 104, + 635, + 241, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot)" + }, + { + "bbox": [ + 104, + 635, + 241, + 647 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 192, + 652, + 416, + 680 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 652, + 416, + 680 + ], + "spans": [ + { + "bbox": [ + 192, + 652, + 416, + 680 + ], + "type": "interline_equation", + "content": "\\left\\| \\hat {\\boldsymbol {u}} _ {t} - \\hat {\\boldsymbol {u}} _ {0} \\right\\| _ {2} \\leq \\sqrt {\\frac {8}{\\mu}} \\tilde {\\Psi} (\\hat {\\boldsymbol {u}} _ {0}) \\leq 2 \\sqrt {\\frac {\\rho}{\\mu}} \\left\\| \\hat {\\boldsymbol {u}} _ {0} - \\boldsymbol {\\theta} ^ {*} \\right\\| _ {2} \\leq \\frac {1}{2} \\epsilon^ {\\prime}.", + "image_path": "ed4efe6c0eae1d603ddf1c221a556565a36a6bd9a95926ff13da426dc86162ed.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 685, + 454, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 454, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 454, + 698 + ], + "type": "text", + "content": "Therefore, " + }, + { + "bbox": [ + 104, + 685, + 454, + 698 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\pmb{u}}_t - \\pmb{\\theta}^* \\|_2 \\leq \\| \\hat{\\pmb{u}}_t - \\hat{\\pmb{u}}_0 \\|_2 + \\| \\hat{\\pmb{u}}_0 - \\pmb{\\theta}^* \\|_2 < \\epsilon'" + }, + { + "bbox": [ + 104, + 685, + 454, + 698 + ], + "type": "text", + "content": ", which concludes the proof." + } + ] + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 494, + 686, + 504, + 696 + ], + "blocks": [ + { + "bbox": [ + 494, + 686, + 504, + 696 + ], + "lines": [ + { + "bbox": [ + 494, + 686, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 494, + 686, + 504, + 696 + ], + "type": "image", + "image_path": "f7898de853e2fec26ec19d41b0d08e55c88c36513bdf75804aa359d44976016a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "type": "text", + "content": "Finally, we introduce a lemma adapted from Thm. D.4 of which bounds the movement of GF. Lyu et al. (2022)." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "text", + "content": "Lemma K.10. Assume that " + }, + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\| \\pmb{\\theta}_0 - \\pmb{\\theta}^*\\|_2 < \\sqrt{\\frac{\\mu}{\\rho}}\\epsilon'" + }, + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "text", + "content": ". The gradient flow " + }, + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}(t) = -\\frac{\\mathrm{d}\\mathcal{L}(\\pmb{\\theta}(t))}{\\mathrm{d}t}" + }, + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "text", + "content": " starting at " + }, + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_0" + }, + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "text", + "content": " converges to a point in " + }, + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 80, + 504, + 110 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 178, + 114, + 433, + 143 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 114, + 433, + 143 + ], + "spans": [ + { + "bbox": [ + 178, + 114, + 433, + 143 + ], + "type": "interline_equation", + "content": "\\left\\| \\boldsymbol {\\theta} _ {0} - \\lim _ {t \\rightarrow + \\infty} \\boldsymbol {\\theta} (t) \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\sqrt {\\mathcal {L} (\\boldsymbol {\\theta} _ {0}) - \\mathcal {L} ^ {*}} \\leq \\sqrt {\\frac {\\rho}{\\mu}} \\| \\boldsymbol {\\theta} _ {0} - \\boldsymbol {\\theta} ^ {*} \\| _ {2}", + "image_path": "65c97212401b852842f054082b7453e041acd49f4c3fe479ed49544d3f3ac854.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 152, + 320, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 320, + 165 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 320, + 165 + ], + "type": "text", + "content": "Proof. Let " + }, + { + "bbox": [ + 104, + 152, + 320, + 165 + ], + "type": "inline_equation", + "content": "T \\coloneqq \\inf \\{t : \\theta \\notin U\\}" + }, + { + "bbox": [ + 104, + 152, + 320, + 165 + ], + "type": "text", + "content": ". Then for all " + }, + { + "bbox": [ + 104, + 152, + 320, + 165 + ], + "type": "inline_equation", + "content": "t < T" + }, + { + "bbox": [ + 104, + 152, + 320, + 165 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 178, + 168, + 431, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 168, + 431, + 220 + ], + "spans": [ + { + "bbox": [ + 178, + 168, + 431, + 220 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {\\mathrm {d}}{\\mathrm {d} t} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} = \\frac {1}{2} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {- 1 / 2} \\cdot \\left\\langle \\nabla \\mathcal {L} (\\boldsymbol {\\theta}), \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\right\\rangle \\\\ = - \\frac {1}{2} (\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}) ^ {- 1 / 2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta}) \\| _ {2} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\| _ {2}. \\\\ \\end{array}", + "image_path": "787a143bb6a4440d505a677eceff09e3921eb2f578da06a8c09018c406ef681e.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 224, + 312, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 224, + 312, + 238 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 312, + 238 + ], + "type": "text", + "content": "By " + }, + { + "bbox": [ + 104, + 224, + 312, + 238 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 224, + 312, + 238 + ], + "type": "text", + "content": "-PL, " + }, + { + "bbox": [ + 104, + 224, + 312, + 238 + ], + "type": "inline_equation", + "content": "\\|\\nabla \\mathcal{L}(\\pmb{\\theta})\\|_2 \\geq \\sqrt{2\\mu(\\mathcal{L}(\\pmb{\\theta}) - \\mathcal{L}^*)}" + }, + { + "bbox": [ + 104, + 224, + 312, + 238 + ], + "type": "text", + "content": ". Hence," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 227, + 241, + 381, + 266 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 241, + 381, + 266 + ], + "spans": [ + { + "bbox": [ + 227, + 241, + 381, + 266 + ], + "type": "interline_equation", + "content": "\\frac {\\mathrm {d}}{\\mathrm {d} t} \\left(\\mathcal {L} (\\boldsymbol {\\theta}) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} \\leq - \\frac {\\sqrt {2 \\mu}}{2} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta}}{\\mathrm {d} t} \\| _ {2}.", + "image_path": "a416e73c01570182529d6de0d54fe748bbe54bb18e5e45fc70ff811f1ad15610.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 269, + 233, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 233, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 233, + 280 + ], + "type": "text", + "content": "Integrating both sides, we have" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 167, + 283, + 443, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 283, + 443, + 312 + ], + "spans": [ + { + "bbox": [ + 167, + 283, + 443, + 312 + ], + "type": "interline_equation", + "content": "\\int_ {0} ^ {T} \\| \\frac {\\mathrm {d} \\boldsymbol {\\theta} (\\tau)}{\\mathrm {d} \\tau} \\| \\mathrm {d} \\tau \\leq \\frac {2}{\\sqrt {2 \\mu}} \\left(\\mathcal {L} \\left(\\boldsymbol {\\theta} _ {0}\\right) - \\mathcal {L} ^ {*}\\right) ^ {1 / 2} \\leq \\sqrt {\\frac {\\rho}{\\mu}} \\| \\boldsymbol {\\theta} _ {0} - \\boldsymbol {\\theta} ^ {*} \\| _ {2} < \\epsilon^ {\\prime},", + "image_path": "f3d287a527c84cbee692499270d485807751ef92a03a9d9d384896b5f81211ab.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "text", + "content": "where the second inequality uses " + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "text", + "content": "-smoothness of " + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "inline_equation", + "content": "T = +\\infty" + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}(t)" + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "text", + "content": " converges to some point in " + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 315, + 506, + 339 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 351, + 292, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 351, + 292, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 292, + 361 + ], + "type": "text", + "content": "K.4 CONSTRUCTION OF WORKING ZONES" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "text", + "content": "We construct four nested working zones " + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "inline_equation", + "content": "(\\Gamma^{\\epsilon_0},\\Gamma^{\\epsilon_1},\\Gamma^{\\epsilon_2},\\Gamma^{\\epsilon_3})" + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "text", + "content": " in the neighborhood of " + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "text", + "content": ". Later we will show that the local iterates " + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "text", + "content": " and the global iterates " + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s)}\\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "text", + "content": " with high probability after " + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log \\frac{1}{\\eta})" + }, + { + "bbox": [ + 104, + 371, + 504, + 413 + ], + "type": "text", + "content": " rounds. The following lemma illustrates the properties the working zones should satisfy." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": "Lemma K.11 (Working zone lemma). There exists constants " + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "inline_equation", + "content": "\\epsilon_0 < \\epsilon_1 < \\epsilon_2 < \\epsilon_3" + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "inline_equation", + "content": "(\\Gamma^{\\epsilon_0}, \\Gamma^{\\epsilon_1}, \\Gamma^{\\epsilon_2}, \\Gamma^{\\epsilon_3})" + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": " satisfy the following properties:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 109, + 445, + 504, + 525 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "spans": [ + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "text", + "content": "1. " + }, + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "text", + "content": " satisfies " + }, + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "text", + "content": "-PL in " + }, + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_3}" + }, + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "inline_equation", + "content": "\\mu > 0" + }, + { + "bbox": [ + 110, + 445, + 282, + 457 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "text", + "content": "2. Any gradient flow starting in " + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "text", + "content": " converges to some point in " + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "text", + "content": ". Then, by Falconer (1983), " + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "\\Phi(\\cdot)" + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^\\infty" + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 109, + 464, + 504, + 487 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "spans": [ + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "text", + "content": "3. Any " + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "text", + "content": " has an " + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "inline_equation", + "content": "\\epsilon_1" + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "text", + "content": "-neighborhood " + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "inline_equation", + "content": "B^{\\epsilon_1}(\\pmb{\\theta})" + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "inline_equation", + "content": "B^{\\epsilon_1}(\\pmb{\\theta}) \\subseteq \\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 109, + 494, + 406, + 506 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 109, + 513, + 477, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 513, + 477, + 525 + ], + "spans": [ + { + "bbox": [ + 109, + 513, + 477, + 525 + ], + "type": "text", + "content": "4. Any gradient descent starting in " + }, + { + "bbox": [ + 109, + 513, + 477, + 525 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 109, + 513, + 477, + 525 + ], + "type": "text", + "content": " with sufficiently small learning rate will stay in " + }, + { + "bbox": [ + 109, + 513, + 477, + 525 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 109, + 513, + 477, + 525 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": "Proof. Let " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(0)}" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": " be initialized such that " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": " be the set of all points on the gradient flow trajectory starting from " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(0)}" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^{\\epsilon}" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": "-neighborhood of " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": " is a positive constant. Since the gradient flow converges to " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\phi^{(0)}" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^{\\epsilon}" + }, + { + "bbox": [ + 104, + 536, + 504, + 574 + ], + "type": "text", + "content": " are bounded." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": "We construct four nested working zones. By Lemma H.3 in Lyu et al. (2022), there exists an " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\epsilon_3" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": "-neighborhood of " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_3}" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": " satisfies " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": "-PL for some " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\mu > 0" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": " be the convex hull of " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_3} \\cup \\mathcal{Z}^\\epsilon" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{M}^{\\epsilon_4}" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\epsilon_4" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": "-neighborhood of " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\epsilon_4" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": " is a positive constant. Then " + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{M}^{\\epsilon_4}" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": " is bounded." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": "Define " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\rho_{2} = \\sup_{\\pmb{\\theta}\\in \\mathcal{M}^{\\epsilon_{4}}}\\| \\nabla^{2}\\mathcal{L}(\\pmb {\\theta})\\|_{2}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\rho_{3} = \\sup_{\\mathcal{M}^{\\epsilon_{4}}}\\| \\nabla^{3}\\mathcal{L}(\\pmb {\\theta})\\|_{2}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": ". By Lemma K.10, we can construct an " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\epsilon_{2}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": "-neighborhood of " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\epsilon_{2} < \\sqrt{\\frac{\\mu}{\\rho_{2}}}\\epsilon_{3}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " such that all GF starting in " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " converges to " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": ". By Falconer (1983), " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Phi (\\cdot)" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^2" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_3}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": ". Define " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\nu_{1} = \\sup_{\\pmb {\\theta}\\in \\Gamma^{\\epsilon_{3}}}\\| \\partial \\Phi (\\pmb {\\theta})\\|_{2}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\nu_{2} = \\sup_{\\pmb {\\theta}\\in \\Gamma^{\\epsilon_{3}}}\\| \\partial^{2}\\Phi (\\pmb {\\theta})\\|_{2}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": ". We also construct an " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\epsilon_{1}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " neighborhood of " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\epsilon_{1}\\leq \\frac{1}{2}\\epsilon_{2} < \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_{2}}}\\epsilon_{3}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " such that all " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\pmb {\\theta}\\in \\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " has an " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\epsilon_{1}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " neighborhood where " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " is well defined. Finally, by Lemma K.9, there exists an " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\epsilon_0" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": "-neighborhood of " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\epsilon_0\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_1" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " such that all gradient descent iterates starting in " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\eta \\leq \\frac{1}{\\rho_2}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": " will stay in " + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 627, + 506, + 735 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": "Note that the notions of " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^{\\epsilon}" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\mathcal{M}^{\\epsilon_4}" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\rho_2" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\rho_3" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\nu_{1}" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\nu_{2}" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": " defined in the proof will be useful in the remaining part of this section. When analyzing the limiting dynamics of Local SGD, we will show that all " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": " stays in " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)} \\in \\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{\\theta}}^{(s)} \\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": " with high probability after " + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log \\frac{1}{\\eta})" + }, + { + "bbox": [ + 104, + 82, + 504, + 122 + ], + "type": "text", + "content": " rounds." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 135, + 350, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 135, + 350, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 350, + 148 + ], + "type": "text", + "content": "K.5 PHASE 1:ITERATE APPROACHING THE MANIFOLD" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": "The approaching phase can be further divided into two subphases. In the first subphase, " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(0)}" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": " is initialized such that " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\phi^{(0)}\\in \\Gamma" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": ". We will show that after a constant number of rounds " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "s_0" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s_0)}" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": " goes to the inner part of " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(s_0)} - \\phi^{(0)}\\| _2\\leq c\\epsilon_0" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": " with high probability, where " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "0 < c < 1" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": " and the constants will be specified later (see Appendix K.5.2). In the second subphase, we show that the iterate can reach within " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}} (\\sqrt{\\eta})" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": " distance from " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log \\frac{1}{\\eta})" + }, + { + "bbox": [ + 104, + 156, + 506, + 229 + ], + "type": "text", + "content": " rounds with high probability (see Appendix K.5.3)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 242, + 249, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 249, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 249, + 253 + ], + "type": "text", + "content": "K.5.1 ADDITIONAL NOTATIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": "Consider an auxiliary sequence " + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\{\\tilde{u}_t^{(s)}\\}" + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_0^{(s)} = \\bar{\\pmb{\\theta}}^{(s)}" + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_{t + 1}^{(s)} = \\tilde{\\pmb{u}}_t^{(s)} - \\eta \\nabla \\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})" + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H - 1" + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": ". Define " + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\tilde{\\Delta}_{k,t}^{(s)}\\coloneqq \\pmb{\\theta}_{k,t}^{(s)} - \\tilde{\\pmb{u}}_t^{(s)}" + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": " to be the difference between the local iterate and the gradient descent iterate. Notice that " + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\tilde{\\Delta}_{k,0}^{(s)} = 0" + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 259, + 504, + 307 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": "Consider a gradient flow " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\{\\pmb{u}(t)\\}_{t\\geq 0}" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": " with the initial condition " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\pmb{u}(0) = \\bar{\\pmb{\\theta}}^{(0)}" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": " and converges to " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\phi^{(0)}\\in \\Gamma" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": ". For simplicity, let " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\pmb{u}_t^{(s)}\\coloneqq \\pmb {u}(s\\alpha +t\\eta)" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": " be the gradient flow after " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": " rounds plus " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": " steps. Let " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "s_0" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": " be the smallest number such that " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\| \\pmb{u}_0^{(s_0)} - \\pmb{\\phi}^{(0)}\\| _2\\leq \\frac{1}{4}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_0" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": ". Note that " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "s_0" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": " is a constant independent of " + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 312, + 504, + 369 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "content": "In this subsection, the minimum value of the loss in Appendix K.3 corresponds to the loss value on " + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "inline_equation", + "content": "\\mathcal{L}^{*} = \\mathcal{L}(\\phi), \\forall \\phi \\in \\Gamma" + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 402, + 457, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 457, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 457, + 418 + ], + "type": "text", + "content": "We also define the following sequence " + }, + { + "bbox": [ + 105, + 402, + 457, + 418 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{Z}}_{k,t}^{(s)}\\}_{t = 0}^{H}" + }, + { + "bbox": [ + 105, + 402, + 457, + 418 + ], + "type": "text", + "content": " that will be used in the proof. Define" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 178, + 426, + 432, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 426, + 432, + 460 + ], + "spans": [ + { + "bbox": [ + 178, + 426, + 432, + 460 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, 0} ^ {(s)} = \\boldsymbol {0}.", + "image_path": "07fd7f8f342ad59ebdf826672daa7aa4792ced37b8181cc37c44a6034184ac35.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 472, + 247, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 247, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 247, + 483 + ], + "type": "text", + "content": "K.5.2 PROOF FOR SUBPHASE 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 491, + 379, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 379, + 507 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 379, + 507 + ], + "type": "text", + "content": "First, we have the following lemma about the concentration of " + }, + { + "bbox": [ + 104, + 491, + 379, + 507 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Z}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 491, + 379, + 507 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "text", + "content": "Lemma K.12 (Concentration property of " + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{Z}}_{k,t}^{(s)}\\}_{t = 0}^{H}" + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "text", + "content": "). Given " + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}" + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "inline_equation", + "content": "\\tilde{\\boldsymbol{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon" + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H" + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "text", + "content": ", then with probability at least " + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 511, + 506, + 538 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 174, + 545, + 435, + 572 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 545, + 435, + 572 + ], + "spans": [ + { + "bbox": [ + 174, + 545, + 435, + 572 + ], + "type": "interline_equation", + "content": "\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],", + "image_path": "b2cc5151ec6822cfc73dbbf9c93c27932f9fb2687913ddb01c597fd6107b3c07.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 579, + 204, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 579, + 204, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 204, + 594 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 579, + 204, + 594 + ], + "type": "inline_equation", + "content": "\\tilde{C}_1\\coloneqq \\exp (\\alpha \\rho_2)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 610, + 339, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 610, + 339, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 610, + 339, + 628 + ], + "type": "text", + "content": "Proof. For each " + }, + { + "bbox": [ + 104, + 610, + 339, + 628 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{Z}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 610, + 339, + 628 + ], + "type": "text", + "content": ", construct a sequence " + }, + { + "bbox": [ + 104, + 610, + 339, + 628 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathbf{Z}}_{k,t,t'}^{(s)}\\}_{t'=0}^t" + }, + { + "bbox": [ + 104, + 610, + 339, + 628 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 171, + 635, + 438, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 635, + 438, + 670 + ], + "spans": [ + { + "bbox": [ + 171, + 635, + 438, + 670 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {Z}} _ {k, t, t ^ {\\prime}} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t ^ {\\prime} - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}))\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, t, 0} ^ {(s)} = \\boldsymbol {0}.", + "image_path": "3a90d42a41e2b31c1044916cb04e3c7fe9f597086eab6d04ff35873dafb385ef.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon" + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "inline_equation", + "content": "\\| \\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})\\| _2\\leq \\rho_2" + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H" + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "text", + "content": ". Then, for all " + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 678, + 484, + 693 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 175, + 700, + 434, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 700, + 434, + 735 + ], + "spans": [ + { + "bbox": [ + 175, + 700, + 434, + 735 + ], + "type": "interline_equation", + "content": "\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {H} \\leq \\exp (\\alpha \\rho_ {2}) = \\tilde {C} _ {1}.", + "image_path": "98fd5d41721dfc1298b6a9598be559bcf8f71cc91ebd1eaae0be6ef3340f733c.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "type": "text", + "content": "Notice that for all " + }, + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "type": "inline_equation", + "content": "0 \\leq t \\leq H" + }, + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\{\\tilde{Z}_{k,t,t'}^{(s)}\\}_{t'=0}^t" + }, + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "type": "text", + "content": " is a martingale with " + }, + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\| \\tilde{Z}_{k,t,t'}^{(s)} - \\tilde{Z}_{k,t,t'-1}^{(s)} \\|_2 \\leq \\tilde{C}_1 \\sigma_{\\max}" + }, + { + "bbox": [ + 104, + 80, + 504, + 108 + ], + "type": "text", + "content": ". By Azuma-Hoeffding's inequality," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 148, + 112, + 460, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 112, + 460, + 159 + ], + "spans": [ + { + "bbox": [ + 148, + 112, + 460, + 159 + ], + "type": "interline_equation", + "content": "\\mathbb {P} (\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 t (\\tilde {C} _ {1} \\sigma_ {\\max}) ^ {2}}\\right) \\leq 2 \\exp \\left(\\frac {- \\epsilon^ {\\prime 2}}{2 H (\\tilde {C} _ {1} \\sigma_ {\\max}) ^ {2}}\\right).", + "image_path": "892074ee0a57279397edd66c3f50340c10896ee05ce0fa5f9cc2ef2d624dd37e.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "type": "text", + "content": "Taking a union bound on all " + }, + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "type": "inline_equation", + "content": "0 \\leq t \\leq H" + }, + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "type": "text", + "content": ", we can conclude that with probability at least " + }, + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 162, + 504, + 186 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 174, + 190, + 436, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 190, + 436, + 217 + ], + "spans": [ + { + "bbox": [ + 174, + 190, + 436, + 217 + ], + "type": "interline_equation", + "content": "\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ].", + "image_path": "362a20561b30eae572ea8d029159b55deab6793b88ec9afa1142172f24082975.jpg" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 494, + 222, + 504, + 232 + ], + "blocks": [ + { + "bbox": [ + 494, + 222, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 494, + 222, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 494, + 222, + 504, + 232 + ], + "type": "image", + "image_path": "79f43c4c7290c0e6a1cbcb998982f7f4fc2061b5a5c08015aef42898ad274bc7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 246, + 504, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 246, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 246, + 504, + 269 + ], + "type": "text", + "content": "The following lemma states that the gradient descent iterates will closely track the gradient flow with the same initial point." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "text", + "content": "Lemma K.13. Denote " + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "inline_equation", + "content": "G \\coloneqq \\sup_{t \\geq 0} \\| \\nabla \\mathcal{L}(\\boldsymbol{u}(t)) \\|_2" + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "text", + "content": " as the upper bound of the gradient on the gradient flow trajectory. If " + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\boldsymbol{u}}_t^{(s)} - \\boldsymbol{u}_t^{(s)} \\|_2 = \\mathcal{O}(\\sqrt{\\eta})" + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "text", + "content": ", then for all " + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "inline_equation", + "content": "0 \\leq t \\leq H" + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "text", + "content": ", the closeness of " + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "inline_equation", + "content": "\\tilde{\\boldsymbol{u}}_t^{(s)}" + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "inline_equation", + "content": "\\boldsymbol{u}_t^{(s)}" + }, + { + "bbox": [ + 104, + 271, + 505, + 313 + ], + "type": "text", + "content": " is bounded by" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 209, + 319, + 400, + 336 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 319, + 400, + 336 + ], + "spans": [ + { + "bbox": [ + 209, + 319, + 400, + 336 + ], + "type": "interline_equation", + "content": "\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G,", + "image_path": "b2a5f525d47a9f257dc2034eab8e768e8fee943463fd2c38e2dec13356503be1.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 342, + 201, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 201, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 201, + 356 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 342, + 201, + 356 + ], + "type": "inline_equation", + "content": "\\tilde{C}_1 = \\exp (\\alpha \\rho_2)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 366, + 247, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 366, + 247, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 247, + 379 + ], + "type": "text", + "content": "Proof. We prove by induction that" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 162, + 384, + 505, + 417 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 384, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 162, + 384, + 505, + 417 + ], + "type": "interline_equation", + "content": "\\left\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) ^ {t} \\left\\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\right\\| _ {2} + \\rho_ {2} \\eta^ {2} G \\sum_ {\\tau = 0} ^ {t - 1} (1 + \\rho_ {2} \\eta) ^ {\\tau}. \\tag {34}", + "image_path": "4a2d0fa3d0af0f78753d339dd3de611b089cfe52ad12c2f92ee313ddb5df26f1.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 422, + 414, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 414, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 414, + 434 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 105, + 422, + 414, + 434 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 105, + 422, + 414, + 434 + ], + "type": "text", + "content": ", (34) holds trivially. Assume that (34) holds for " + }, + { + "bbox": [ + 105, + 422, + 414, + 434 + ], + "type": "inline_equation", + "content": "0 \\leq \\tau \\leq t" + }, + { + "bbox": [ + 105, + 422, + 414, + 434 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 156, + 439, + 453, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 439, + 453, + 525 + ], + "spans": [ + { + "bbox": [ + 156, + 439, + 453, + 525 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} - \\boldsymbol {u} _ {t + 1} ^ {(s)} = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\left(\\boldsymbol {u} _ {t} - \\int_ {s \\alpha + t \\eta} ^ {s \\alpha + (t + 1) \\eta} \\nabla \\mathcal {L} (\\boldsymbol {u} (v)) d v\\right) \\\\ = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} - \\eta (\\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)})) \\\\ - \\int_ {s \\alpha + t \\eta} ^ {s \\alpha + (t + 1) \\eta} \\left(\\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} (v))\\right) d v. \\\\ \\end{array}", + "image_path": "620bbafce574e68a753980834e0f7013e9e39b5ab99ea38f76d3f97bbe5641c9.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 529, + 192, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 192, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 192, + 541 + ], + "type": "text", + "content": "By smoothness of " + }, + { + "bbox": [ + 105, + 529, + 192, + 541 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 186, + 546, + 424, + 606 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 546, + 424, + 606 + ], + "spans": [ + { + "bbox": [ + 186, + 546, + 424, + 606 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) - \\nabla \\mathcal {L} (\\boldsymbol {u} (v)) \\| _ {2} \\leq \\rho_ {2} \\| \\boldsymbol {u} _ {t} ^ {(s)} - \\boldsymbol {u} (v) \\| _ {2} \\\\ \\leq \\rho_ {2} \\int_ {s \\alpha + t \\eta} ^ {v} \\| \\nabla \\mathcal {L} (\\boldsymbol {u} (w)) \\| _ {2} d w \\\\ \\leq \\rho_ {2} \\eta G. \\\\ \\end{array}", + "image_path": "b788ea9bae77c487221918dc466144283055c891bd98dfd5b41d48433700ad06.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "inline_equation", + "content": "\\rho_2^2\\eta^2 G\\sum_{\\tau = 0}^{t - 1}(1 + \\rho_2\\eta)^\\tau \\leq \\eta G(1 + \\rho_2\\eta)^t\\leq \\exp (\\alpha \\rho_2)\\eta G" + }, + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "text", + "content": " , then " + }, + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\pmb{u}}_t^{(s)} - \\pmb {u}_t^{(s)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta})" + }, + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "text", + "content": " which implies that " + }, + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}\\in \\mathcal{M}^{\\epsilon_4}" + }, + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "text", + "content": " . Hence, " + }, + { + "bbox": [ + 104, + 613, + 504, + 643 + ], + "type": "inline_equation", + "content": "\\| \\nabla \\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)}) - \\mathcal{L}(\\pmb {u}_t^{(s)})\\| _2\\leq \\rho_2\\| \\tilde{\\pmb{u}}_t^{(s)} - \\pmb {u}_t^{(s)}\\| _2." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 647, + 198, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 198, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 198, + 659 + ], + "type": "text", + "content": "By triangle inequality," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 153, + 663, + 455, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 663, + 455, + 715 + ], + "spans": [ + { + "bbox": [ + 153, + 663, + 455, + 715 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} - \\boldsymbol {u} _ {t + 1} ^ {(s)} \\right\\| _ {2} \\leq (1 + \\rho_ {2} \\eta) \\left\\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\right\\| _ {2} + \\rho_ {2} \\eta^ {2} G \\\\ \\leq \\left(1 + \\rho_ {2} \\eta\\right) ^ {t + 1} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} + \\rho_ {2} \\eta^ {2} G \\sum_ {\\tau = 0} ^ {t} (1 + \\rho_ {2} \\eta) ^ {\\tau}, \\\\ \\end{array}", + "image_path": "eb22f251eee7d94eb02c6f4a6d7ac14e3210a50aff629bca2b8ddddf115fd659.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 720, + 457, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 457, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 457, + 733 + ], + "type": "text", + "content": "which concludes the induction step. Applying " + }, + { + "bbox": [ + 105, + 720, + 457, + 733 + ], + "type": "inline_equation", + "content": "1 + \\rho_{2}\\eta \\leq \\exp (\\rho_{2}\\eta)" + }, + { + "bbox": [ + 105, + 720, + 457, + 733 + ], + "type": "text", + "content": ", we have the lemma." + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 494, + 720, + 504, + 730 + ], + "blocks": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "type": "image", + "image_path": "12b0357f5a598edb572a3e0dbbcd1ac182759da65bdd92f6d2610597a7c3f138.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "text", + "content": "Utilizing the concentration probability of " + }, + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\{\\tilde{Z}_{k,t}^{(s)}\\}" + }, + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "text", + "content": ", we can obtain the following lemma which implies that the Local SGD iterates will closely track the gradient descent iterates with high probability." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "text", + "content": "Lemma K.14. Given " + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}" + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "inline_equation", + "content": "\\tilde{u}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon" + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H" + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "text", + "content": ", then for " + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "text", + "content": " with probability at least " + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "text", + "content": ", there exists a constant " + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "inline_equation", + "content": "\\tilde{C}_3" + }, + { + "bbox": [ + 104, + 110, + 504, + 137 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 186, + 140, + 423, + 166 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 140, + 423, + 166 + ], + "spans": [ + { + "bbox": [ + 186, + 140, + 423, + 166 + ], + "type": "interline_equation", + "content": "\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],", + "image_path": "b0e7ce40e526acc47816e64cdb593a5e5cd96f8d8eb93ddd5777671b75ed9965.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 168, + 124, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 168, + 124, + 179 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 124, + 179 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 232, + 182, + 378, + 209 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 182, + 378, + 209 + ], + "spans": [ + { + "bbox": [ + 232, + 182, + 378, + 209 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.", + "image_path": "d72ad05f7746e01e96f1adfbbe658b87edeb4f5b517309be2b000439caa40db4.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "text", + "content": "Proof. Since " + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_3}\\cup \\mathcal{Z}^\\epsilon" + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H" + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "inline_equation", + "content": "\\| \\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})\\| _2\\leq \\rho_2" + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "text", + "content": ". According to the update rule for " + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "inline_equation", + "content": "\\theta_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}" + }, + { + "bbox": [ + 104, + 220, + 504, + 250 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 228, + 253, + 504, + 270 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 253, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 228, + 253, + 504, + 270 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)}, \\tag {35}", + "image_path": "be169699096048c45766614b597ed29c695fe6ecd7c776d0e9a083f6e0995718.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 235, + 272, + 504, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 272, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 235, + 272, + 504, + 289 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)} = \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right). \\tag {36}", + "image_path": "f03d365d3f4c2ed06dc80b13f1e3d9b054e3fa98025b062b0a81512bdf8336fd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 291, + 238, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 238, + 303 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 238, + 303 + ], + "type": "text", + "content": "Subtracting (36) from (35) gives" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 195, + 305, + 504, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 305, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 195, + 305, + 504, + 342 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\tilde {\\boldsymbol {\\Delta}} _ {k, t + 1} ^ {(s)} = \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta (\\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)})) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ = \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\eta \\tilde {\\boldsymbol {v}} _ {k, t} ^ {(s)}. \\tag {37} \\\\ \\end{array}", + "image_path": "1722296fc6533b311820561ab59a72dc168e8a249411f25c63709ac60b69932a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{v}}_{k,t}^{(s)} = (1 - \\beta_{k,t}^{(s)})\\pmb{\\theta}_{k,t}^{(s)} + \\beta_{k,t}^{(s)}\\tilde{\\pmb{u}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\beta_{k,t}^{(s)} \\in (0,1)" + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "content": " depends on " + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}" + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\pmb{v}}_{k,t}^{(s)}\\| _2 \\leq \\frac{\\rho_3}{2}\\| \\tilde{\\pmb{\\Delta}}_{k,t}^{(s)}\\| _2^2" + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,t}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}" + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "content": ". Applying (37) " + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 345, + 504, + 377 + ], + "type": "text", + "content": " times, we have" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 148, + 380, + 460, + 450 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 380, + 460, + 450 + ], + "spans": [ + { + "bbox": [ + 148, + 380, + 460, + 450 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} = \\left[ \\prod_ {\\tau = 0} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {\\tau} ^ {(s)})) \\right] \\tilde {\\boldsymbol {\\Delta}} _ {k, 0} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)})) \\boldsymbol {z} _ {k, \\tau} ^ {(s)} \\\\ + \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\tilde {\\boldsymbol {v}} _ {k, \\tau} ^ {(s)}. \\\\ \\end{array}", + "image_path": "7f62129b3d9c8a63bfe982d1ce84a5fa367778601ea19a171c94d88e7ba2d005.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "text", + "content": "By Cauchy-Schwartz inequality, triangle inequality and the definition of " + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{Z}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "text", + "content": ", if for all " + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "inline_equation", + "content": "0 \\leq \\tau \\leq t - 1" + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,\\tau}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}" + }, + { + "bbox": [ + 104, + 456, + 504, + 487 + ], + "type": "text", + "content": ", then we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 208, + 491, + 504, + 523 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 491, + 504, + 523 + ], + "spans": [ + { + "bbox": [ + 208, + 491, + 504, + 523 + ], + "type": "interline_equation", + "content": "\\left\\| \\tilde {\\boldsymbol {\\Delta}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\eta \\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} + \\frac {1}{2} \\eta \\rho_ {3} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {C} _ {1} \\left\\| \\tilde {\\boldsymbol {\\Delta}} _ {k, \\tau} ^ {(s)} \\right\\| _ {2} ^ {2}. \\tag {38}", + "image_path": "3feb711065b7fe2cda4b5da02e6590ceffe94c1777ec6640c30c716a9a1c5170.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 525, + 504, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 548 + ], + "type": "text", + "content": "Applying Lemma K.12 and substituting in the value of " + }, + { + "bbox": [ + 104, + 525, + 504, + 548 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 525, + 504, + 548 + ], + "type": "text", + "content": ", we have that with probability at least " + }, + { + "bbox": [ + 104, + 525, + 504, + 548 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 525, + 504, + 548 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 178, + 552, + 504, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 552, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 178, + 552, + 504, + 583 + ], + "type": "interline_equation", + "content": "\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in K, 0 \\leq t \\leq H. \\tag {39}", + "image_path": "5f0e94496e73869c878a6e4b11a4e3ec73d8ce1f92af802622e8bc5444153351.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "type": "text", + "content": "Now we show by induction that for " + }, + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "type": "text", + "content": ", when (39) holds, there exists a constant " + }, + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\tilde{C}_2 > 2\\sigma_{\\max}\\sqrt{2\\alpha}\\tilde{C}_1" + }, + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\Delta}_{k,t}^{(s)}\\|_2 \\leq \\tilde{C}_2\\sqrt{\\eta\\log\\frac{2\\alpha K}{\\eta\\delta}}" + }, + { + "bbox": [ + 104, + 586, + 504, + 618 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "inline_equation", + "content": "\\tilde{\\Delta}_{k,0}^{(s)} = 0" + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": ". Assume that " + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\Delta}_{k,\\tau}^{(s)} \\|_2 \\leq \\tilde{C}_2 \\sqrt{\\eta \\log \\frac{2\\alpha K}{\\eta \\delta}}" + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "inline_equation", + "content": "0 \\leq \\tau \\leq t - 1" + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": ". Then for all " + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "inline_equation", + "content": "0 \\leq \\tau \\leq t - 1" + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,\\tau}^{(s)} \\in \\mathcal{M}^{\\epsilon_4}" + }, + { + "bbox": [ + 104, + 624, + 504, + 659 + ], + "type": "text", + "content": ". Therefore, we can apply (38) and obtain" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 165, + 663, + 443, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 663, + 443, + 731 + ], + "spans": [ + { + "bbox": [ + 165, + 663, + 443, + 731 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\tilde {\\Delta} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\eta \\| \\tilde {Z} _ {k, t} ^ {(s)} \\| _ {2} + \\frac {1}{2} \\eta \\rho_ {3} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {C} _ {1} \\| \\tilde {\\Delta} _ {k, \\tau} ^ {(s)} \\| _ {2} ^ {2} \\\\ \\leq \\tilde {C} _ {1} \\sigma_ {\\max} \\sqrt {2 \\alpha \\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}} + \\frac {1}{2} \\tilde {C} _ {1} \\tilde {C} _ {2} ^ {2} \\sigma_ {\\max} ^ {2} \\alpha \\rho_ {3} \\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}. \\\\ \\end{array}", + "image_path": "6504402cc75aac896562b49d093e18c035eefa288b6f68176454d3ef9cc91c20.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "text", + "content": "Given that " + }, + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "inline_equation", + "content": "\\tilde{C}_2 \\geq 2\\sigma_{\\max}\\sqrt{2\\alpha}\\tilde{C}_1" + }, + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "text", + "content": ", when " + }, + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "text", + "content": " is sufficiently small, " + }, + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "inline_equation", + "content": "\\|\\tilde{\\Delta}_{k,t}^{(s)}\\|_2 \\leq \\tilde{C}_2\\sqrt{\\eta\\log\\frac{2\\alpha K}{\\eta\\delta}}" + }, + { + "bbox": [ + 104, + 80, + 504, + 115 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "text", + "content": "To sum up, for " + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\Delta}_{k,t}^{(s)} \\|_2 \\leq \\tilde{C}_2 \\sqrt{\\eta \\log \\frac{2\\alpha K}{\\eta \\delta}}" + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "inline_equation", + "content": "0 \\leq t \\leq H" + }, + { + "bbox": [ + 104, + 121, + 505, + 152 + ], + "type": "text", + "content": ". By triangle inequality," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 182, + 156, + 428, + 192 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 156, + 428, + 192 + ], + "spans": [ + { + "bbox": [ + 182, + 156, + 428, + 192 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\mathbf {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\frac {1}{K} \\sum_ {k \\in [ K ]} \\| \\tilde {\\mathbf {A}} _ {k, H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {2} \\sqrt {\\eta \\log \\frac {2 \\alpha K}{\\eta \\delta}}.", + "image_path": "b381d8d51dbf2921797548f0e7f065526449ccb0caacf80b3ac1e16ba7f0c74c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 494, + 194, + 504, + 205 + ], + "blocks": [ + { + "bbox": [ + 494, + 194, + 504, + 205 + ], + "lines": [ + { + "bbox": [ + 494, + 194, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 494, + 194, + 504, + 205 + ], + "type": "image", + "image_path": "a563d52b12a9a29f592334f6cdd7f71cbaef23261e24e30e5d89320540334e5b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 217, + 504, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 217, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 504, + 240 + ], + "type": "text", + "content": "The combination of Lemma K.13 and Lemma K.14 leads to the following lemma, which states that the Local SGD iterate will enter " + }, + { + "bbox": [ + 104, + 217, + 504, + 240 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 217, + 504, + 240 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 104, + 217, + 504, + 240 + ], + "type": "inline_equation", + "content": "s_0" + }, + { + "bbox": [ + 104, + 217, + 504, + 240 + ], + "type": "text", + "content": " rounds with high probability." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "text", + "content": "Lemma K.15. Given " + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(0)}" + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "inline_equation", + "content": "\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma" + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "text", + "content": ", then for " + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "text", + "content": ", there exists a positive constant " + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "inline_equation", + "content": "\\tilde{C}_4" + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "text", + "content": " such that with probability at least " + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 243, + 505, + 269 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 209, + 273, + 400, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 273, + 400, + 300 + ], + "spans": [ + { + "bbox": [ + 209, + 273, + 400, + 300 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\boldsymbol {\\phi} ^ {(0)} \\| _ {2} \\leq \\frac {1}{4} \\sqrt {\\frac {\\mu}{\\rho_ {2}}} \\epsilon_ {0} + \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.", + "image_path": "442822f41ba97ba818f9305d7c4d1f900ca97f2f9e99f91b6978368be6438042.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 311, + 378, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 311, + 378, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 378, + 323 + ], + "type": "text", + "content": "Proof. First, we prove by induction that for " + }, + { + "bbox": [ + 105, + 311, + 378, + 323 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 105, + 311, + 378, + 323 + ], + "type": "text", + "content": ", when" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 145, + 327, + 505, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 327, + 505, + 354 + ], + "spans": [ + { + "bbox": [ + 145, + 327, + 505, + 354 + ], + "type": "interline_equation", + "content": "\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 H K s _ {0}}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < s _ {0}, \\tag {40}", + "image_path": "ad77f7312cf3a0a120c7b25c69c96795a1d45ee2164396104a9350fa0795e717.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 357, + 287, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 287, + 373 + ], + "type": "text", + "content": "the closeness of " + }, + { + "bbox": [ + 104, + 357, + 287, + 373 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s)}" + }, + { + "bbox": [ + 104, + 357, + 287, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 357, + 287, + 373 + ], + "type": "inline_equation", + "content": "\\pmb{u}_0^{(s)}" + }, + { + "bbox": [ + 104, + 357, + 287, + 373 + ], + "type": "text", + "content": " is bounded by" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 164, + 376, + 505, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 376, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 164, + 376, + 505, + 407 + ], + "type": "interline_equation", + "content": "\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\right\\| _ {2} \\leq \\sum_ {l = 1} ^ {s} \\tilde {C} _ {1} ^ {l} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right), \\quad \\forall 0 \\leq s \\leq s _ {0}. \\tag {41}", + "image_path": "89a147ffe86ad7e7957e2f688a55d8c4175cbe7e47dee7f5491a81e4de739b48.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "inline_equation", + "content": "s = 0" + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(0)} = \\pmb{u}_0^{(0)}" + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": ". Assume that (41) holds for round " + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": ". Then by Lemma K.13, for all " + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "inline_equation", + "content": "0 \\leq t \\leq H" + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 179, + 438, + 431, + 505 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 438, + 431, + 505 + ], + "spans": [ + { + "bbox": [ + 179, + 438, + 431, + 505 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\boldsymbol {u} _ {t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1} \\| \\tilde {\\boldsymbol {u}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G \\\\ = \\tilde {C} _ {1} \\| \\bar {\\boldsymbol {\\theta}} _ {0} ^ {(s)} - \\boldsymbol {u} _ {0} ^ {(s)} \\| _ {2} + \\tilde {C} _ {1} \\eta G \\\\ \\leq \\sum_ {l = 1} ^ {s} \\tilde {C} _ {1} ^ {l + 1} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right) + \\tilde {C} _ {1} \\eta G. \\\\ \\end{array}", + "image_path": "e803cfdf463311a586e70b8737351a2f46140f21632249e357c6e82273dbb1c3.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "type": "text", + "content": "Therefore, for sufficiently small " + }, + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)} \\in \\mathcal{Z}^\\epsilon" + }, + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "type": "inline_equation", + "content": "\\forall 0 \\leq t \\leq H" + }, + { + "bbox": [ + 104, + 511, + 504, + 534 + ], + "type": "text", + "content": ". Combining the above inequality with Lemma K.14, we have" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 184, + 537, + 425, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 537, + 425, + 605 + ], + "spans": [ + { + "bbox": [ + 184, + 537, + 425, + 605 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\boldsymbol {u} _ {0} ^ {(s + 1)} \\right\\| _ {2} = \\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\boldsymbol {u} _ {H} ^ {(s)} \\right\\| _ {2} \\\\ \\leq \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} + \\| \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} - \\boldsymbol {u} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\sum_ {l = 1} ^ {s + 1} \\tilde {C} _ {1} ^ {l + 1} \\left(\\eta G + \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {0}}{\\eta \\delta}}\\right), \\\\ \\end{array}", + "image_path": "ea89f0d6784f50849ab61fcf4ea7e8648c639c336045ccbbe33246020c411122.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 609, + 233, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 233, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 233, + 620 + ], + "type": "text", + "content": "which concludes the induction." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 625, + 397, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 397, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 397, + 637 + ], + "type": "text", + "content": "Therefore, when (40) holds, there exists a positive constant " + }, + { + "bbox": [ + 105, + 625, + 397, + 637 + ], + "type": "inline_equation", + "content": "\\tilde{C}_4" + }, + { + "bbox": [ + 105, + 625, + 397, + 637 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 233, + 641, + 377, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 641, + 377, + 668 + ], + "spans": [ + { + "bbox": [ + 233, + 641, + 377, + 668 + ], + "type": "interline_equation", + "content": "\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\boldsymbol {u} _ {0} ^ {(s _ {0})} \\right\\| _ {2} \\leq \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.", + "image_path": "0d10616e685b987f710add2289545c6c6093ce5664a17d5d6aa4fa0819201935.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 671, + 198, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 671, + 198, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 671, + 198, + 687 + ], + "type": "text", + "content": "By definition of " + }, + { + "bbox": [ + 105, + 671, + 198, + 687 + ], + "type": "inline_equation", + "content": "\\pmb{u}_0^{(s_0)}" + }, + { + "bbox": [ + 105, + 671, + 198, + 687 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 209, + 690, + 400, + 718 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 690, + 400, + 718 + ], + "spans": [ + { + "bbox": [ + 209, + 690, + 400, + 718 + ], + "type": "interline_equation", + "content": "\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {0})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\frac {1}{4} \\sqrt {\\frac {\\mu}{\\rho_ {2}}} \\epsilon_ {0} + \\tilde {C} _ {4} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.", + "image_path": "5987dc485a477534a15c6c260d5f78d74d077c40539b583146e219052d1ae8f5.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 719, + 415, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 719, + 415, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 415, + 732 + ], + "type": "text", + "content": "Finally, according to Lemma K.12, (40) holds with probability at least " + }, + { + "bbox": [ + 105, + 719, + 415, + 732 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 105, + 719, + 415, + 732 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 248, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 248, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 248, + 94 + ], + "type": "text", + "content": "K.5.3 PROOF FOR SUBPHASE 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "text", + "content": "In subphase 2, we show that the iterate can reach within " + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}} (\\sqrt{\\eta})" + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "text", + "content": " distance from " + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log \\frac{1}{\\eta})" + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "text", + "content": " rounds with high probability. The following lemma manifests how the potential function " + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(s)})" + }, + { + "bbox": [ + 104, + 100, + 504, + 138 + ], + "type": "text", + "content": " evolves after one round." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 140, + 453, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 140, + 453, + 154 + ], + "spans": [ + { + "bbox": [ + 104, + 140, + 453, + 154 + ], + "type": "text", + "content": "Lemma K.16. Given " + }, + { + "bbox": [ + 104, + 140, + 453, + 154 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 140, + 453, + 154 + ], + "type": "text", + "content": ", for " + }, + { + "bbox": [ + 104, + 140, + 453, + 154 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 140, + 453, + 154 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 140, + 453, + 154 + ], + "type": "inline_equation", + "content": "1 - \\delta" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 150, + 160, + 459, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 160, + 459, + 186 + ], + "spans": [ + { + "bbox": [ + 150, + 160, + 459, + 186 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\leq \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H", + "image_path": "31796a09329bfbb872987281b7ea123938c9c42e55b926a9fd06b9e3c707d3b2.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 191, + 124, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 124, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 124, + 201 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 164, + 206, + 446, + 233 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 206, + 446, + 233 + ], + "spans": [ + { + "bbox": [ + 164, + 206, + 446, + 233 + ], + "type": "interline_equation", + "content": "\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},", + "image_path": "852a552afdaca58bc088828f5310a850531ed6515fd25c099e3246d8ab8e21a8.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 239, + 235, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 235, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 235, + 251 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 239, + 235, + 251 + ], + "type": "inline_equation", + "content": "\\tilde{C}_5" + }, + { + "bbox": [ + 104, + 239, + 235, + 251 + ], + "type": "text", + "content": " is a positive constant." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "text", + "content": "Proof. Since " + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "text", + "content": ", then for all " + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H" + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "text", + "content": " by the definition of the working zone. By Lemma K.6, for " + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\eta \\leq \\frac{1}{\\rho_2}" + }, + { + "bbox": [ + 104, + 265, + 504, + 292 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 147, + 297, + 462, + 317 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 297, + 462, + 317 + ], + "spans": [ + { + "bbox": [ + 147, + 297, + 462, + 317 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {t} \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right) \\leq \\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}, \\quad \\forall 0 \\leq t \\leq H.", + "image_path": "853c6eed5e04039b2fc670c80d59db103bab747e6607dc0b5db0fdd5f485275e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 322, + 193, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 193, + 334 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 193, + 334 + ], + "type": "text", + "content": "Specially, for " + }, + { + "bbox": [ + 104, + 322, + 193, + 334 + ], + "type": "inline_equation", + "content": "t = H" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 151, + 339, + 457, + 358 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 339, + 457, + 358 + ], + "spans": [ + { + "bbox": [ + 151, + 339, + 457, + 358 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {\\frac {\\alpha}{\\eta}} \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right) \\leq \\exp (- \\alpha \\mu) \\left(\\mathcal {L} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) - \\mathcal {L} ^ {*}\\right).", + "image_path": "430a4cf300a6b1ac31b9be3e91de0022244339b4e6053d9ba3deedfbb1d6d941.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 364, + 150, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 150, + 375 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 150, + 375 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 236, + 380, + 373, + 396 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 380, + 373, + 396 + ], + "spans": [ + { + "bbox": [ + 236, + 380, + 373, + 396 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right).", + "image_path": "5dff8f3d50b6aebad6d42dbd2c772edb1e65f2eedf62e616b0c83ea93d58aeb1.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 401, + 377, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 401, + 377, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 377, + 414 + ], + "type": "text", + "content": "According to the proof of Lemma K.14, for " + }, + { + "bbox": [ + 105, + 401, + 377, + 414 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 105, + 401, + 377, + 414 + ], + "type": "text", + "content": ", when" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 176, + 419, + 504, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 419, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 176, + 419, + 504, + 452 + ], + "type": "interline_equation", + "content": "\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, \\tag {42}", + "image_path": "90ac41446cef613745fba26d3ceda3cf497922d50ab85cf20460bf6d15251ef8.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 457, + 249, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 249, + 470 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 249, + 470 + ], + "type": "text", + "content": "there exists a constant " + }, + { + "bbox": [ + 104, + 457, + 249, + 470 + ], + "type": "inline_equation", + "content": "\\tilde{C}_3" + }, + { + "bbox": [ + 104, + 457, + 249, + 470 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 187, + 475, + 422, + 502 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 475, + 422, + 502 + ], + "spans": [ + { + "bbox": [ + 187, + 475, + 422, + 502 + ], + "type": "interline_equation", + "content": "\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],", + "image_path": "5d5a3219d96647d92a073e070b7baba71e983efef544ec4eb61c2b4129ff9eab.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 506, + 123, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 123, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 123, + 517 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 233, + 521, + 378, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 521, + 378, + 548 + ], + "spans": [ + { + "bbox": [ + 233, + 521, + 378, + 548 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.", + "image_path": "c79adb12d7d95ee2405b16cde4deeec4ca0039cb1e4808a10a046a212c7559d0.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 553, + 445, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 553, + 445, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 553, + 445, + 571 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 553, + 445, + 571 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1},\\forall 0\\leq t\\leq H,\\bar{\\pmb{\\theta}}^{(s + 1)}\\in \\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 553, + 445, + 571 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 553, + 445, + 571 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2},\\forall 0\\leq t\\leq H,k\\in [K]" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "text", + "content": "By Lemma K.7, " + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi}(\\cdot)" + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "inline_equation", + "content": "\\sqrt{2\\rho_2}" + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "text", + "content": "-Lipschitz in " + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "inline_equation", + "content": "\\mathcal{M}^{\\epsilon_4}" + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "text", + "content": ". Therefore, when (42) holds, there exists a constant " + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "inline_equation", + "content": "\\tilde{C}_5 := \\sqrt{2\\rho_2}\\tilde{C}_3" + }, + { + "bbox": [ + 104, + 575, + 504, + 601 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 214, + 606, + 394, + 652 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 606, + 394, + 652 + ], + "spans": [ + { + "bbox": [ + 214, + 606, + 394, + 652 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) + \\sqrt {2 \\rho_ {2}} \\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\right\\| _ {2} \\\\ \\leq \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\end{array}", + "image_path": "6bd67989d0a922fece77e6f4bedacb9afbc5d868b5c75644a350551a71a4edd5.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 657, + 123, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 123, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 123, + 666 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 197, + 671, + 412, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 671, + 412, + 715 + ], + "spans": [ + { + "bbox": [ + 197, + 671, + 412, + 715 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) \\leq \\tilde {\\Psi} (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) + \\sqrt {2 \\rho_ {2}} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}. \\\\ \\end{array}", + "image_path": "885eddeba3c1e1b120c4fdf72481af8fc8fc75d1bbf4d843647276d9421ea458.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 720, + 375, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 720, + 375, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 720, + 375, + 732 + ], + "type": "text", + "content": "Finally, by Lemma K.12, (42) holds with probability at least " + }, + { + "bbox": [ + 104, + 720, + 375, + 732 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 720, + 375, + 732 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 494, + 720, + 504, + 730 + ], + "blocks": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 494, + 720, + 504, + 730 + ], + "type": "image", + "image_path": "84816759e8f94b41e6aec8648bc3a6fe2aa13567683cabc44b1d77d48be46554.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 109 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 109 + ], + "type": "text", + "content": "We are thus led to the following lemma which characterizes the evolution of the potential " + }, + { + "bbox": [ + 104, + 81, + 504, + 109 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi} (\\bar{\\theta}^{(s)})" + }, + { + "bbox": [ + 104, + 81, + 504, + 109 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 81, + 504, + 109 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi} (\\pmb{\\theta}_{k,t}^{(s)})" + }, + { + "bbox": [ + 104, + 81, + 504, + 109 + ], + "type": "text", + "content": " over multiple rounds." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "text", + "content": "Lemma K.17. Given " + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2\\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_2}}\\epsilon_0," + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "text", + "content": " and any integer " + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "inline_equation", + "content": "1\\le R\\le" + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{tot}}" + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "text", + "content": " , with probability at least " + }, + { + "bbox": [ + 104, + 114, + 505, + 143 + ], + "type": "inline_equation", + "content": "1 - \\delta" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 146, + 505, + 190 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 146, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 113, + 146, + 505, + 190 + ], + "type": "interline_equation", + "content": "\\bar {\\boldsymbol {\\theta}} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {0}}, \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq \\exp (- \\alpha \\mu s / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(0)}\\right) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\forall 0 \\leq s \\leq R. \\tag {43}", + "image_path": "efc2e27e52f834a14c7c8804c517bf3f019f9713cfbe3c41391d6ce146df51d4.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 194, + 162, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 162, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 162, + 205 + ], + "type": "text", + "content": "Furthermore," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 208, + 505, + 241 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 208, + 505, + 241 + ], + "spans": [ + { + "bbox": [ + 117, + 208, + 505, + 241 + ], + "type": "interline_equation", + "content": "\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, 0 \\leq s < R, k \\in [ K ]. \\tag {44}", + "image_path": "012bcedf423ea7021ad55e4714864adf49f058e87989fa6db221f2c2015a5d12.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 251, + 343, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 343, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 343, + 264 + ], + "type": "text", + "content": "Proof. We prove induction that for " + }, + { + "bbox": [ + 105, + 251, + 343, + 264 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 105, + 251, + 343, + 264 + ], + "type": "text", + "content": ", when" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 147, + 269, + 505, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 269, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 147, + 269, + 505, + 300 + ], + "type": "interline_equation", + "content": "\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 R \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < R, \\tag {45}", + "image_path": "65797cceaa7bb4e1292318c03fc7c5e207ef87362067eeaa1d414b0a37a3fcfe.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 304, + 279, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 279, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 279, + 316 + ], + "type": "text", + "content": "then for all " + }, + { + "bbox": [ + 104, + 304, + 279, + 316 + ], + "type": "inline_equation", + "content": "0 \\leq s \\leq R" + }, + { + "bbox": [ + 104, + 304, + 279, + 316 + ], + "type": "text", + "content": ", (43) and (44) hold." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "inline_equation", + "content": "s = 0" + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(0)} \\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "text", + "content": " and (43) trivially holds. By Lemma K.16, (44) holds. Assume that (43) and (44) hold for round " + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "inline_equation", + "content": "s - 1" + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "text", + "content": ". Then for round " + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "text", + "content": ", by Lemma K.16, " + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)} \\in \\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 319, + 504, + 344 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 164, + 348, + 446, + 415 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 348, + 446, + 415 + ], + "spans": [ + { + "bbox": [ + 164, + 348, + 446, + 415 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\Psi (\\bar {\\pmb {\\theta}} ^ {(s)}) \\leq \\exp (- \\alpha \\mu / 2) \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(s - 1)}) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}} \\\\ \\leq \\exp (- \\alpha \\mu s / 2) \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(0)}) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}, \\\\ \\end{array}", + "image_path": "67b2625498ebe48a99feeb3c9cc961678a1b0dbde60b0d65c69a331caa154f7b.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 418, + 444, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 418, + 444, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 444, + 430 + ], + "type": "text", + "content": "where the second inequality comes from the induction hypothesis. By Lemma K.10," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 156, + 433, + 454, + 526 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 433, + 454, + 526 + ], + "spans": [ + { + "bbox": [ + 156, + 433, + 454, + 526 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {\\phi} ^ {(s)} \\| _ {2} \\leq \\frac {2}{\\sqrt {2 \\mu}} \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\\\ \\leq \\frac {2}{\\sqrt {2 \\mu}} \\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(0)}) + \\frac {2}{\\sqrt {2 \\mu} (1 - \\exp (- \\alpha \\mu / 2))} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}} \\\\ \\leq \\frac {1}{2} \\epsilon_ {0} + \\frac {2}{\\sqrt {2 \\mu} (1 - \\exp (- \\alpha \\mu / 2))} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}. \\\\ \\end{array}", + "image_path": "66bf14cddef4a6344bdbc6c917adf2038acb5abc613db189b60de8c249ba9884.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "text", + "content": "Here, the last inequality uses " + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(0)})\\leq \\sqrt{\\frac{\\rho_2}{2}}\\| \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(0)}\\| _2\\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{2}}\\epsilon_0" + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "text", + "content": ". Hence, when " + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "text", + "content": " is sufficiently small, " + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s)}\\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "text", + "content": ". Still by Lemma K.16, " + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}_{k,t}^{(s)}\\in \\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 530, + 504, + 560 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 229, + 564, + 380, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 564, + 380, + 597 + ], + "spans": [ + { + "bbox": [ + 229, + 564, + 380, + 597 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) + \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R}{\\eta \\delta}}.", + "image_path": "5811d47cce694bc6491ab8e12aa85a64eacacaa61c8c81290173b2af82daafe1.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 599, + 414, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 599, + 414, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 414, + 612 + ], + "type": "text", + "content": "Finally, according to Lemma K.12, (45) holds with probability at least " + }, + { + "bbox": [ + 105, + 599, + 414, + 612 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 105, + 599, + 414, + 612 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 494, + 617, + 504, + 627 + ], + "blocks": [ + { + "bbox": [ + 494, + 617, + 504, + 627 + ], + "lines": [ + { + "bbox": [ + 494, + 617, + 504, + 627 + ], + "spans": [ + { + "bbox": [ + 494, + 617, + 504, + 627 + ], + "type": "image", + "image_path": "eb002efdbfca18cd279a314f92e736b1ccc68740b225b627fcac84867376f49e.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 640, + 438, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 640, + 438, + 652 + ], + "spans": [ + { + "bbox": [ + 104, + 640, + 438, + 652 + ], + "type": "text", + "content": "The following corollary is a direct consequence of Lemma K.17 and Lemma K.10." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "text", + "content": "Corollary K.1. Let " + }, + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "inline_equation", + "content": "s_1 \\coloneqq \\lceil \\frac{20}{\\alpha \\mu} \\log \\frac{1}{\\eta} \\rceil" + }, + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)} \\|_2 \\leq \\frac{1}{2} \\sqrt{\\frac{\\mu}{\\rho_2}} \\epsilon_0" + }, + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "text", + "content": ", for " + }, + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 655, + 504, + 685 + ], + "type": "inline_equation", + "content": "1 - \\delta" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 173, + 689, + 505, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 689, + 505, + 715 + ], + "spans": [ + { + "bbox": [ + 173, + 689, + 505, + 715 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s _ {1})}\\right) \\leq \\tilde {C} _ {6} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s _ {1})} - \\phi^ {(s _ {1})} \\| _ {2} \\leq \\tilde {C} _ {6} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\tag {46}", + "image_path": "b5f400fdf9bcb2d6e6a227be16980a4298d03117ba0e6d6ee09d6c6cdaface5c.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "type": "inline_equation", + "content": "\\tilde{C}_6" + }, + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "type": "text", + "content": " is a constant." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "type": "text", + "content": "Proof. Substituting in " + }, + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "type": "inline_equation", + "content": "R = s_1" + }, + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "type": "text", + "content": " to Lemma K.17 and applying " + }, + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(s_1)} - \\phi^{(s)}\\|_2 \\leq \\sqrt{\\frac{2}{\\mu}}\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(s_1)})" + }, + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s_1)} \\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 81, + 506, + 113 + ], + "type": "text", + "content": ", we have the lemma." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 125, + 504, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 125, + 504, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 504, + 150 + ], + "type": "text", + "content": "Finally, we provide a high probability bound for the change of the projection on the manifold after " + }, + { + "bbox": [ + 104, + 125, + 504, + 150 + ], + "type": "inline_equation", + "content": "s_1" + }, + { + "bbox": [ + 104, + 125, + 504, + 150 + ], + "type": "text", + "content": " rounds " + }, + { + "bbox": [ + 104, + 125, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\| \\phi^{(s_1)} - \\phi^{(0)} \\|_2" + }, + { + "bbox": [ + 104, + 125, + 504, + 150 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "text", + "content": "Lemma K.18. Let " + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "inline_equation", + "content": "s_1 \\coloneqq \\lceil \\frac{20}{\\alpha \\mu} \\log \\frac{1}{\\eta} \\rceil" + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(0)} - \\phi^{(0)} \\|_2 \\leq \\frac{1}{2} \\sqrt{\\frac{\\mu}{\\rho_2}} \\epsilon_0" + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 154, + 504, + 184 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 222, + 190, + 388, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 190, + 388, + 217 + ], + "spans": [ + { + "bbox": [ + 222, + 190, + 388, + 217 + ], + "type": "interline_equation", + "content": "\\left\\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\tilde {C} _ {8} \\log \\frac {1}{\\eta} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.", + "image_path": "04f1e64a1b5c28a56e681fda1c977ead1ceaf306cc8c9dcf4a82d53755036eef.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 229, + 327, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 229, + 327, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 327, + 243 + ], + "type": "text", + "content": "Proof. From Lemma K.17, for " + }, + { + "bbox": [ + 104, + 229, + 327, + 243 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 229, + 327, + 243 + ], + "type": "text", + "content": ", when" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 146, + 248, + 505, + 280 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 248, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 146, + 248, + 505, + 280 + ], + "type": "interline_equation", + "content": "\\left\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 s _ {1} \\alpha K}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < s _ {1}, \\tag {47}", + "image_path": "401e098936594bb4558b1032bd29ef0ac7e54ddc01dcd008d61744b2fc0245bd.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "text", + "content": "then " + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "inline_equation", + "content": "0\\leq s\\leq s_{1}" + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "text", + "content": ". By the definition of " + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "inline_equation", + "content": "\\tilde{u}_t^{(s)}\\in \\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H,0\\leq s\\leq s_{1}" + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "text", + "content": ". By triangle inequality, " + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "inline_equation", + "content": "\\| \\phi^{(s_1)} - \\phi^{(0)}\\| _2" + }, + { + "bbox": [ + 104, + 288, + 504, + 315 + ], + "type": "text", + "content": " can be decomposed as follows." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 320, + 505, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 320, + 505, + 388 + ], + "spans": [ + { + "bbox": [ + 141, + 320, + 505, + 388 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\| _ {2} \\leq \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\phi^ {(s + 1)} - \\phi^ {(s)} \\| _ {2} \\\\ \\leq \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) \\| _ {2} + \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) \\| _ {2}. \\tag {48} \\\\ \\end{array}", + "image_path": "16e8fb61b0a3068cd5e723cf877ff71dde4b67ef661d0cfe86074f69c8565821.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 393, + 361, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 393, + 361, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 393, + 361, + 405 + ], + "type": "text", + "content": "By Lemma K.14, when (47) hold, then for all " + }, + { + "bbox": [ + 105, + 393, + 361, + 405 + ], + "type": "inline_equation", + "content": "0 \\leq s < s_1 - 1" + }, + { + "bbox": [ + 105, + 393, + 361, + 405 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 233, + 411, + 378, + 439 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 411, + 378, + 439 + ], + "spans": [ + { + "bbox": [ + 233, + 411, + 378, + 439 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\pmb {\\theta}} ^ {(s + 1)} - \\tilde {\\pmb {u}} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}}.", + "image_path": "06f9caed432fd4fb5da9df12bfc53346445502acbaff914c0c49c9e6d5982923.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "text", + "content": "This implies that " + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s + 1)}\\in B^{\\epsilon_1}(\\tilde{\\pmb{u}}_H^{(s)})" + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "text", + "content": ". Since for all " + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\pmb {\\theta}\\in \\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\| \\partial \\Phi (\\pmb {\\theta})\\| _2\\leq \\nu_1" + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\Phi (\\cdot)" + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\nu_{1}" + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "text", + "content": "-Lipschitz in " + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "inline_equation", + "content": "B^{\\epsilon_1}(\\tilde{\\pmb{u}}_H^{(s)})" + }, + { + "bbox": [ + 104, + 445, + 504, + 474 + ], + "type": "text", + "content": ". This gives" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 206, + 479, + 505, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 479, + 505, + 525 + ], + "spans": [ + { + "bbox": [ + 206, + 479, + 505, + 525 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) \\| _ {2} \\leq \\nu_ {1} \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\\\ \\leq \\nu_ {1} \\tilde {C} _ {3} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}}. \\tag {49} \\\\ \\end{array}", + "image_path": "2bb99ad33f2ed0b9af3a116a2048d9f0c5fc1b200726cf8f0478b0695fcc4db0.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "text", + "content": "Then we analyze " + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(s + 1)} - \\tilde{\\pmb{u}}_H^{(s)}\\| _2" + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "text", + "content": ". By Lemma K.9 and the definition of " + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "text", + "content": ", there exists " + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "inline_equation", + "content": "\\phi \\in \\Gamma" + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}\\in B^{\\epsilon_1}(\\phi),\\forall 0\\leq t\\leq H" + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "text", + "content": ". Therefore, we can expand " + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "inline_equation", + "content": "\\Phi (\\tilde{\\pmb{u}}_{t + 1}^{(s)})" + }, + { + "bbox": [ + 104, + 532, + 504, + 561 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 135, + 567, + 474, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 567, + 474, + 635 + ], + "spans": [ + { + "bbox": [ + 135, + 567, + 474, + 635 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right) = \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\eta \\nabla \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\\\ = \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\eta \\partial \\Phi (\\tilde {\\boldsymbol {u}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {u} _ {t} ^ {(s)}) + \\frac {\\eta^ {2}}{2} \\partial^ {2} \\Phi (\\hat {\\boldsymbol {u}} _ {t} ^ {(s)}) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ] \\\\ = \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) + \\frac {\\eta^ {2}}{2} \\partial^ {2} \\Phi \\left(c _ {t} ^ {(s)} \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} + (1 - c _ {t} ^ {(s)}) \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ], \\\\ \\end{array}", + "image_path": "83e614f0110ae92b9d525bd7c7e18c1a37d804cabd7525194316121e94f39a7c.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 641, + 247, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 641, + 247, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 641, + 247, + 656 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 641, + 247, + 656 + ], + "type": "inline_equation", + "content": "c_t^{(s)} \\in (0,1)" + }, + { + "bbox": [ + 105, + 641, + 247, + 656 + ], + "type": "text", + "content": ". Then we have" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 662, + 498, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 498, + 730 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 498, + 730 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}) \\| _ {2} \\leq \\frac {\\eta^ {2}}{2} \\sum_ {t = 0} ^ {H - 1} \\| \\partial^ {2} \\Phi \\left(\\left(c _ {t} ^ {(s)} \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} + (1 - c _ {t} ^ {(s)}) \\tilde {\\boldsymbol {u}} _ {t + 1} ^ {(s)}\\right)\\right) [ \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} ^ {(s)}), \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) ] \\| _ {2} \\\\ \\leq \\frac {\\eta^ {2}}{2} \\nu_ {2} \\sum_ {t = 0} ^ {H - 1} \\| \\nabla \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) \\| _ {2} ^ {2}. \\\\ \\end{array}", + "image_path": "c36e40373e9989c0f8b58fc7b80b208c595b4faff17da4b88381399eea5add27.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 376, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 376, + 96 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 376, + 96 + ], + "type": "text", + "content": "By Lemma K.6, " + }, + { + "bbox": [ + 104, + 80, + 376, + 96 + ], + "type": "inline_equation", + "content": "\\frac{\\eta}{2}\\| \\nabla \\mathcal{L}(\\tilde{\\boldsymbol{u}}_t^{(s)})\\| _2^2\\leq \\mathcal{L}(\\tilde{\\boldsymbol{u}}_t^{(s)}) - \\mathcal{L}(\\tilde{\\boldsymbol{u}}_{t + 1}^{(s)})" + }, + { + "bbox": [ + 104, + 80, + 376, + 96 + ], + "type": "text", + "content": " . Therefore," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 119, + 101, + 504, + 168 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 101, + 504, + 168 + ], + "spans": [ + { + "bbox": [ + 119, + 101, + 504, + 168 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right) - \\Phi \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\eta \\nu_ {2} \\left(\\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}\\right) - \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}\\right)\\right) \\\\ \\leq \\eta \\nu_ {2} \\left[ \\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\right] ^ {2} \\\\ \\leq \\nu_ {2} \\eta \\left[ 2 \\exp (- \\alpha s \\mu) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(0)}) + \\frac {\\tilde {C} _ {5} ^ {2} \\eta}{(1 - \\exp (- \\alpha \\mu / 2)) ^ {2}} \\log \\frac {s _ {1}}{\\eta \\delta} \\right], \\tag {50} \\\\ \\end{array}", + "image_path": "f3c4e96af5d4c67190fdb24ae463c54955e649472abf99deb0ff998825bc2418.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 172, + 504, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 172, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 172, + 504, + 194 + ], + "type": "text", + "content": "where the last inequality uses Cauchy-Schwartz inequality and Lemma K.17. Summing up (50), we obtain" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 198, + 504, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 198, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 111, + 198, + 504, + 258 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sum_ {s = 0} ^ {s _ {1} - 1} \\| \\Phi (\\tilde {\\boldsymbol {u}} _ {H} ^ {(s)}) - \\Phi (\\tilde {\\boldsymbol {u}} _ {0} ^ {(s)}) \\| _ {2} \\leq \\nu_ {2} \\eta \\left[ 2 \\tilde {\\Psi} (\\tilde {\\boldsymbol {\\theta}} ^ {(0)}) \\sum_ {s = 0} ^ {s _ {1} - 1} \\exp (- \\alpha \\mu s) + \\frac {s _ {1} \\tilde {C} _ {5} ^ {2} \\eta}{(1 - \\exp (- \\alpha \\mu / 2)) ^ {2}} \\log \\frac {s _ {1}}{\\eta \\delta} \\right] \\\\ \\leq \\tilde {C} _ {7} \\eta \\log \\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta}, \\tag {51} \\\\ \\end{array}", + "image_path": "14723dd7e3eb53fc8c80b40b6aa6d8ce0620e6ff1dd622a56ae49850f63f2d37.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 263, + 478, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 263, + 478, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 478, + 277 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 263, + 478, + 277 + ], + "type": "inline_equation", + "content": "\\tilde{C}_7" + }, + { + "bbox": [ + 104, + 263, + 478, + 277 + ], + "type": "text", + "content": " is a constant. Substituting (49) and (51) into (48), for sufficiently small " + }, + { + "bbox": [ + 104, + 263, + 478, + 277 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 263, + 478, + 277 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 186, + 281, + 423, + 337 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 281, + 423, + 337 + ], + "spans": [ + { + "bbox": [ + 186, + 281, + 423, + 337 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\phi^ {(s _ {1})} - \\phi^ {(0)} \\right\\| _ {2} \\leq \\nu_ {1} \\tilde {C} _ {3} s _ {1} \\sqrt {\\eta \\log \\frac {s _ {1}}{\\eta \\delta}} + \\tilde {C} _ {7} \\eta \\log \\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta} \\\\ \\leq \\tilde {C} _ {8} \\log \\frac {1}{\\eta} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\end{array}", + "image_path": "3972a62e5a43da6f846320d54828788f967e5ee74fda859e35fa46b3bc8d7593.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\tilde{C}_8" + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": " is a constant. Finally, according to Lemma K.12, (47) holds with probability at least " + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 380, + 356, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 356, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 356, + 391 + ], + "type": "text", + "content": "K.6 PHASE 2:ITERATES STAYING CLOSE TO MANIFOLD" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 398, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 398, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 398, + 504, + 425 + ], + "type": "text", + "content": "In this subsection, we show that " + }, + { + "bbox": [ + 104, + 398, + 504, + 425 + ], + "type": "inline_equation", + "content": "\\| \\pmb{x}_{k,t}^{(s)}\\| _2 = \\tilde{\\mathcal{O}} (\\sqrt{\\eta})" + }, + { + "bbox": [ + 104, + 398, + 504, + 425 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 398, + 504, + 425 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(s + r)} - \\bar{\\pmb{\\theta}}^{(s)}\\| _2 = \\tilde{\\mathcal{O}} (\\eta^{0.5 - 0.5\\beta}),\\forall 0\\leq r\\leq R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 398, + 504, + 425 + ], + "type": "text", + "content": " with high probability." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 436, + 249, + 446 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 249, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 249, + 446 + ], + "type": "text", + "content": "K.6.1 ADDITIONAL NOTATIONS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": "Before presenting the lemmas, we define the following martingale " + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "inline_equation", + "content": "\\{\\pmb{m}_{k,t}^{(s)}\\}_{t = 0}^{H}" + }, + { + "bbox": [ + 104, + 453, + 504, + 478 + ], + "type": "text", + "content": " that will be useful in the proof:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 238, + 479, + 370, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 479, + 370, + 510 + ], + "spans": [ + { + "bbox": [ + 238, + 479, + 370, + 510 + ], + "type": "interline_equation", + "content": "\\boldsymbol {m} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\quad \\boldsymbol {m} _ {k, 0} = \\mathbf {0}.", + "image_path": "7defed94a9f63f542944bfaa842d9decdc2defc9d973d7c1f1d43791b7b574fe.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 514, + 329, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 329, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 329, + 526 + ], + "type": "text", + "content": "We also define " + }, + { + "bbox": [ + 105, + 514, + 329, + 526 + ], + "type": "inline_equation", + "content": "\\tilde{P}:\\mathbb{R}^d\\to \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 105, + 514, + 329, + 526 + ], + "type": "text", + "content": " as an extension of " + }, + { + "bbox": [ + 105, + 514, + 329, + 526 + ], + "type": "inline_equation", + "content": "\\partial \\Phi" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 238, + 531, + 370, + 558 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 531, + 370, + 558 + ], + "spans": [ + { + "bbox": [ + 238, + 531, + 370, + 558 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {P}} (\\boldsymbol {\\theta}) := \\left\\{ \\begin{array}{l l} \\partial \\Phi (\\boldsymbol {\\theta}), & \\text {i f} \\boldsymbol {\\theta} \\in \\Gamma^ {\\epsilon_ {2}}, \\\\ \\mathbf {0}, & \\text {o t h e r w i s e}. \\end{array} \\right.", + "image_path": "2f0db1c226f8021c16e7fbd7f0610bc058e20112f7f7d10697f42d6d772215dd.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 565, + 345, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 565, + 345, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 565, + 345, + 579 + ], + "type": "text", + "content": "Finally, we define a martingale " + }, + { + "bbox": [ + 104, + 565, + 345, + 579 + ], + "type": "inline_equation", + "content": "\\{Z_t^{(s)}: s \\geq 0, 0 \\leq t \\leq H\\}" + }, + { + "bbox": [ + 104, + 565, + 345, + 579 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 140, + 585, + 469, + 619 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 585, + 469, + 619 + ], + "spans": [ + { + "bbox": [ + 140, + 585, + 469, + 619 + ], + "type": "interline_equation", + "content": "\\boldsymbol {Z} _ {t} ^ {(s)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\sum_ {r = 0} ^ {s - 1} \\sum_ {\\tau = 0} ^ {H - 1} \\tilde {\\boldsymbol {P}} (\\bar {\\boldsymbol {\\theta}} ^ {(r)}) \\boldsymbol {z} _ {k, t} ^ {(r)} + \\frac {1}{K} \\sum_ {k \\in [ K ]} \\sum_ {\\tau = 0} ^ {t - 1} \\tilde {\\boldsymbol {P}} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\boldsymbol {z} _ {k, t} ^ {(s)}, \\quad \\boldsymbol {Z} _ {0} ^ {(0)} = \\mathbf {0}.", + "image_path": "1b7ea7673f5b79b09f0c97483b65d4a15bb85d228784079f439a30c6920583ab.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 631, + 337, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 631, + 337, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 337, + 641 + ], + "type": "text", + "content": "K.6.2 PROOF FOR THE HIGH PROBABILITY BOUNDS" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 650, + 436, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 650, + 436, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 650, + 436, + 662 + ], + "type": "text", + "content": "A direct application of Azuma-Hoeffding's inequality yields the following lemma." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 666, + 504, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 681 + ], + "type": "text", + "content": "Lemma K.19 (Concentration property of " + }, + { + "bbox": [ + 104, + 666, + 504, + 681 + ], + "type": "inline_equation", + "content": "m_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 666, + 504, + 681 + ], + "type": "text", + "content": "). With probability at least " + }, + { + "bbox": [ + 104, + 666, + 504, + 681 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 666, + 504, + 681 + ], + "type": "text", + "content": ", the following holds:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 168, + 687, + 441, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 687, + 441, + 714 + ], + "spans": [ + { + "bbox": [ + 168, + 687, + 441, + 714 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {9} \\sqrt {\\frac {1}{\\eta} \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}},", + "image_path": "88822907e5c25861979fd0d6f2c0592c4d1ede533753a8ecadd2289c052c66a7.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "type": "inline_equation", + "content": "\\tilde{C}_9" + }, + { + "bbox": [ + 105, + 719, + 201, + 732 + ], + "type": "text", + "content": " is a constant." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 459, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 459, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 459, + 97 + ], + "type": "text", + "content": "Proof. Notice that " + }, + { + "bbox": [ + 104, + 80, + 459, + 97 + ], + "type": "inline_equation", + "content": "\\| \\pmb{m}_{k,t + 1}^{(s)} - \\pmb{m}_{k,t}^{(s)}\\| _2\\leq \\sigma_{\\max}" + }, + { + "bbox": [ + 104, + 80, + 459, + 97 + ], + "type": "text", + "content": ". Then by Azuma-Hoeffdings inequality," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 220, + 103, + 388, + 131 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 103, + 388, + 131 + ], + "spans": [ + { + "bbox": [ + 220, + 103, + 388, + 131 + ], + "type": "interline_equation", + "content": "\\mathbb {P} \\left(\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}\\right) \\leq 2 \\exp \\left(- \\frac {\\epsilon^ {\\prime 2}}{2 t \\sigma_ {\\max } ^ {2}}\\right).", + "image_path": "f6abd710eae8137f947e066bffd1ac7d9f5aa096e81bf36c3bda1ad4effa4b7a.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "text", + "content": "Taking union bound on " + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "text", + "content": " clients, " + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "text", + "content": " local steps and " + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "text", + "content": " rounds, we obtain that the following inequality holds with probability at least " + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 167, + 465, + 194 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 167, + 465, + 194 + ], + "spans": [ + { + "bbox": [ + 143, + 167, + 465, + 194 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {2 K H R _ {\\mathrm {g r p}}}{\\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}}.", + "image_path": "54836b9ad7182c271309b2e3a4732bae781c9ef6af72ca9036957a0a2b172eda.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 201, + 350, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 201, + 350, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 350, + 217 + ], + "type": "text", + "content": "Substituting in " + }, + { + "bbox": [ + 104, + 201, + 350, + 217 + ], + "type": "inline_equation", + "content": "H = \\frac{\\alpha}{\\eta}" + }, + { + "bbox": [ + 104, + 201, + 350, + 217 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 201, + 350, + 217 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}}\\right\\rfloor" + }, + { + "bbox": [ + 104, + 201, + 350, + 217 + ], + "type": "text", + "content": " yields the lemma." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 494, + 201, + 504, + 212 + ], + "blocks": [ + { + "bbox": [ + 494, + 201, + 504, + 212 + ], + "lines": [ + { + "bbox": [ + 494, + 201, + 504, + 212 + ], + "spans": [ + { + "bbox": [ + 494, + 201, + 504, + 212 + ], + "type": "image", + "image_path": "5605f695561744585b52e30a41f9e93710d68c24a2fb59f618e73162bc199eac.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 232, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 504, + 258 + ], + "type": "text", + "content": "Again applying Azuma-Hoeffding's inequality, we have the following lemma about the concentration property of " + }, + { + "bbox": [ + 104, + 232, + 504, + 258 + ], + "type": "inline_equation", + "content": "Z_{t}^{(s)}" + }, + { + "bbox": [ + 104, + 232, + 504, + 258 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 262, + 504, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 287 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 287 + ], + "type": "text", + "content": "Lemma K.20 (Concentration property of " + }, + { + "bbox": [ + 104, + 262, + 504, + 287 + ], + "type": "inline_equation", + "content": "Z_{t}^{(s)}" + }, + { + "bbox": [ + 104, + 262, + 504, + 287 + ], + "type": "text", + "content": "). With probability at least " + }, + { + "bbox": [ + 104, + 262, + 504, + 287 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 262, + 504, + 287 + ], + "type": "text", + "content": ", the following inequality holds:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 192, + 293, + 417, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 293, + 417, + 321 + ], + "spans": [ + { + "bbox": [ + 192, + 293, + 417, + 321 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {Z} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {- 0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}.", + "image_path": "40efd4168e292406b70ac61fd092697f02fc19906bb51bd566a390617f742c57.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 337, + 504, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 337, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 504, + 363 + ], + "type": "text", + "content": "Proof. Notice that " + }, + { + "bbox": [ + 104, + 337, + 504, + 363 + ], + "type": "inline_equation", + "content": "\\| \\mathbf{Z}_{t + 1}^{(s)} - \\mathbf{Z}_t^{(s)}\\| _2\\leq \\nu_2\\sigma_{\\max},\\forall 0\\leq t\\leq H - 1" + }, + { + "bbox": [ + 104, + 337, + 504, + 363 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 337, + 504, + 363 + ], + "type": "inline_equation", + "content": "\\| \\mathbf{Z}_0^{(s + 1)} - \\mathbf{Z}_H^{(s)}\\| _2\\leq \\nu_2\\sigma_{\\max}" + }, + { + "bbox": [ + 104, + 337, + 504, + 363 + ], + "type": "text", + "content": ". By Azuma-Hoeffding's inequality," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 199, + 369, + 410, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 369, + 410, + 397 + ], + "spans": [ + { + "bbox": [ + 199, + 369, + 410, + 397 + ], + "type": "interline_equation", + "content": "\\mathbb {P} (\\| \\pmb {Z} _ {t} ^ {(s)} \\| _ {2} \\geq \\epsilon^ {\\prime}) \\leq 2 \\exp \\left(- \\frac {\\epsilon^ {\\prime 2}}{2 (s H + t) \\nu_ {2} ^ {2} \\sigma_ {\\mathrm {m a x}} ^ {2}}\\right).", + "image_path": "58d5743aa4640f4d1ed7ecc01064b878381759bc83538d550b6557b052e38336.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "type": "text", + "content": "Taking union bound on " + }, + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "type": "text", + "content": " rounds, we obtain that the following inequality holds with probability at least " + }, + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 403, + 504, + 425 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 182, + 431, + 428, + 458 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 431, + 428, + 458 + ], + "spans": [ + { + "bbox": [ + 182, + 431, + 428, + 458 + ], + "type": "interline_equation", + "content": "\\| Z _ {H} ^ {(s)} \\| _ {2} \\leq \\sigma_ {\\max } \\nu_ {2} \\sqrt {2 H R _ {\\mathrm {g r p}} \\log \\frac {2 R _ {\\mathrm {g r p}}}{\\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}.", + "image_path": "096015807500f6fc1aa9ea97e5f0ebb846ec380d0ff8b1fb6420424bc68e105f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 465, + 350, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 465, + 350, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 465, + 350, + 480 + ], + "type": "text", + "content": "Substituting in " + }, + { + "bbox": [ + 104, + 465, + 350, + 480 + ], + "type": "inline_equation", + "content": "H = \\frac{\\alpha}{\\eta}" + }, + { + "bbox": [ + 104, + 465, + 350, + 480 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 465, + 350, + 480 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{gpr}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}}\\right\\rfloor" + }, + { + "bbox": [ + 104, + 465, + 350, + 480 + ], + "type": "text", + "content": " yields the lemma." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 494, + 466, + 504, + 476 + ], + "blocks": [ + { + "bbox": [ + 494, + 466, + 504, + 476 + ], + "lines": [ + { + "bbox": [ + 494, + 466, + 504, + 476 + ], + "spans": [ + { + "bbox": [ + 494, + 466, + 504, + 476 + ], + "type": "image", + "image_path": "623b23c9acf1b65ac7da7488d501bfbee0de06fd8b00de1bb3ce750aa4f27220.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "type": "text", + "content": "We proceed to present a direct corollary of Lemma K.17 which provides a bound for the potential function over " + }, + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "type": "text", + "content": " rounds." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "text", + "content": "Lemma K.21. Given " + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2\\leq C_0\\sqrt{\\eta\\log\\frac{1}{\\eta}}" + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "text", + "content": " is a constant, then for " + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 525, + 504, + 555 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 184, + 562, + 504, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 562, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 184, + 562, + 504, + 590 + ], + "type": "interline_equation", + "content": "\\bar {\\boldsymbol {\\theta}} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {0}}, \\quad \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, \\tag {52}", + "image_path": "0c674b02ac28e397071a8c8f2f1ac777f609802635482efd0bd3ece241f4597a.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 595, + 124, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 595, + 124, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 124, + 605 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 612, + 505, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 612, + 505, + 639 + ], + "spans": [ + { + "bbox": [ + 141, + 612, + 505, + 639 + ], + "type": "interline_equation", + "content": "\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} \\in \\Gamma^ {\\epsilon_ {2}}, \\quad \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, k \\in [ K ], \\tag {53}", + "image_path": "74f6068665a6c3a30fcafef76e04537c4d97132b44856de8e201ae15c395f760.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 645, + 294, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 294, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 294, + 657 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 645, + 294, + 657 + ], + "type": "inline_equation", + "content": "C_1" + }, + { + "bbox": [ + 104, + 645, + 294, + 657 + ], + "type": "text", + "content": " is a constant that can depend on " + }, + { + "bbox": [ + 104, + 645, + 294, + 657 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 645, + 294, + 657 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 666, + 161, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 161, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 161, + 677 + ], + "type": "text", + "content": "Furthermore," + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 243, + 685, + 367, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 685, + 367, + 712 + ], + "spans": [ + { + "bbox": [ + 243, + 685, + 367, + 712 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(R _ {\\mathrm {g r p}})}) \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},", + "image_path": "167c306d422324aa4d627fb4db7e2ddc91b05712b92eb9e690299e4915c4a4c7.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 719, + 277, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 719, + 277, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 277, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 719, + 277, + 733 + ], + "type": "inline_equation", + "content": "\\tilde{C}_9" + }, + { + "bbox": [ + 105, + 719, + 277, + 733 + ], + "type": "text", + "content": " is a constant independent of " + }, + { + "bbox": [ + 105, + 719, + 277, + 733 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 105, + 719, + 277, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": "Proof. By " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "\\rho_{2}" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": "-smoothness of " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(0)}) \\leq C_0 \\sqrt{\\frac{\\eta \\rho_2}{2} \\log \\frac{1}{\\eta}}" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": ". Substituting " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha \\eta^{\\beta}} \\right\\rfloor" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "\\tilde{\\Psi}(\\bar{\\pmb{\\theta}}^{(0)}) \\leq C_0 \\sqrt{\\frac{\\eta \\rho_2}{2} \\log \\frac{1}{\\eta}}" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": " into Lemma K.17, for " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": ", (52) and (53) where " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "C_1" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": " is a constant that can depend on " + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 81, + 504, + 131 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 134, + 234, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 134, + 234, + 147 + ], + "spans": [ + { + "bbox": [ + 105, + 134, + 234, + 147 + ], + "type": "text", + "content": "Furthermore, for round " + }, + { + "bbox": [ + 105, + 134, + 234, + 147 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(R_{\\mathrm{grp}})}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 154, + 481, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 154, + 481, + 186 + ], + "spans": [ + { + "bbox": [ + 129, + 154, + 481, + 186 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} (\\bar {\\pmb {\\theta}} ^ {(R _ {\\mathrm {g r p}})}) \\leq \\exp (- \\mathcal {O} (\\eta^ {- \\beta})) + \\frac {1}{1 - \\exp (- \\alpha \\mu / 2)} \\tilde {C} _ {5} \\sqrt {\\eta \\log \\frac {R _ {\\mathrm {g r p}}}{\\eta \\delta}} \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},", + "image_path": "680ff1f460c73231b9e797de055531041b71d2d64c10e634b12d720428e8b471.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 193, + 277, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 277, + 206 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 277, + 206 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 193, + 277, + 206 + ], + "type": "inline_equation", + "content": "\\tilde{C}_9" + }, + { + "bbox": [ + 105, + 193, + 277, + 206 + ], + "type": "text", + "content": " is a constant independent of " + }, + { + "bbox": [ + 105, + 193, + 277, + 206 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 105, + 193, + 277, + 206 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 494, + 194, + 504, + 204 + ], + "blocks": [ + { + "bbox": [ + 494, + 194, + 504, + 204 + ], + "lines": [ + { + "bbox": [ + 494, + 194, + 504, + 204 + ], + "spans": [ + { + "bbox": [ + 494, + 194, + 504, + 204 + ], + "type": "image", + "image_path": "3fbad9ac8622b8a91201b5a489176323ab972ffbf93a6adb7ee011279b012a48.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "text", + "content": "Lemma K.22. Given " + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(0)} - \\phi^{(0)} \\|_2 \\leq C_0 \\sqrt{\\eta \\log \\frac{1}{\\eta}}" + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "text", + "content": " is a constant, then for " + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "inline_equation", + "content": "0 \\leq s_0 < R_{\\mathrm{grp}}, 0 \\leq t \\leq H, k \\in [K]" + }, + { + "bbox": [ + 104, + 216, + 504, + 247 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 164, + 252, + 447, + 308 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 252, + 447, + 308 + ], + "spans": [ + { + "bbox": [ + 164, + 252, + 447, + 308 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\\\ \\| \\bar {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}. \\\\ \\end{array}", + "image_path": "419454b2bfa6649b818fe3b9d8915adf090c748516ede67c7addacb03e30caa8.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 313, + 338, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 313, + 338, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 338, + 325 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 313, + 338, + 325 + ], + "type": "inline_equation", + "content": "C_2" + }, + { + "bbox": [ + 105, + 313, + 338, + 325 + ], + "type": "text", + "content": " is a constant that can depend " + }, + { + "bbox": [ + 105, + 313, + 338, + 325 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 105, + 313, + 338, + 325 + ], + "type": "text", + "content": ". Furthermore," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 222, + 331, + 388, + 358 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 331, + 388, + 358 + ], + "spans": [ + { + "bbox": [ + 222, + 331, + 388, + 358 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\pmb {\\theta}} ^ {(R _ {\\mathrm {g r p}})} - \\pmb {\\phi} ^ {(R _ {\\mathrm {g r p}})} \\| _ {2} \\leq \\tilde {C} _ {1 1} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}},", + "image_path": "3ee1dc375950d62c1c3fcd37273cff80c38666b0e086e4e25ccb22b527f5b663.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 365, + 280, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 280, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 280, + 379 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 365, + 280, + 379 + ], + "type": "inline_equation", + "content": "\\tilde{C}_{11}" + }, + { + "bbox": [ + 105, + 365, + 280, + 379 + ], + "type": "text", + "content": " is a constant independent of " + }, + { + "bbox": [ + 105, + 365, + 280, + 379 + ], + "type": "inline_equation", + "content": "C_0" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 392, + 339, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 392, + 339, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 339, + 407 + ], + "type": "text", + "content": "Proof. Decomposing " + }, + { + "bbox": [ + 105, + 392, + 339, + 407 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{k,t}^{(s)}" + }, + { + "bbox": [ + 105, + 392, + 339, + 407 + ], + "type": "text", + "content": " by triangle inequality, we have" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 213, + 415, + 395, + 432 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 415, + 395, + 432 + ], + "spans": [ + { + "bbox": [ + 213, + 415, + 395, + 432 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} \\leq \\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} + \\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\boldsymbol {\\phi} ^ {(s)} \\| _ {2}.", + "image_path": "85f4e0fd9e318dc03d9d4dcceb53e83f70714cd619a23e4095c57fab3fab63db.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "type": "text", + "content": "We first bound " + }, + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(s)} - \\phi^{(s)} \\|_2" + }, + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "type": "text", + "content": ". By Lemma K.21, for " + }, + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "type": "inline_equation", + "content": "1 - \\frac{\\delta}{2}" + }, + { + "bbox": [ + 104, + 438, + 504, + 465 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 189, + 472, + 504, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 472, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 189, + 472, + 504, + 498 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, \\tag {54}", + "image_path": "1777bcb0647a1e09ff6bf6212f566a1e7cbe3bdd424bb7180fc96fe8bfac6bf2.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 190, + 500, + 504, + 527 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 500, + 504, + 527 + ], + "spans": [ + { + "bbox": [ + 190, + 500, + 504, + 527 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, \\tag {55}", + "image_path": "a5c99c91c59d99fa9a5989c450b27be66f640c34dd5dbd6a176dce57868c0373.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 533, + 123, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 123, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 123, + 543 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 243, + 549, + 504, + 576 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 549, + 504, + 576 + ], + "spans": [ + { + "bbox": [ + 243, + 549, + 504, + 576 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)}\\right) \\leq \\tilde {C} _ {1 0} \\sqrt {\\eta \\log \\frac {2}{\\eta \\delta}}, \\tag {56}", + "image_path": "0952c5145079eb09c8d230cb14b279a5483acaa8bdc69fe193a2772d64de5df9.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "inline_equation", + "content": "C_2" + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "content": " is a constant that may depend on " + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\tilde{C}_{10}" + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "content": " is a constant independent of " + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 582, + 504, + 606 + ], + "type": "text", + "content": ". When (54) and (56) hold, by Lemma K.10," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 206, + 613, + 504, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 613, + 504, + 639 + ], + "spans": [ + { + "bbox": [ + 206, + 613, + 504, + 639 + ], + "type": "interline_equation", + "content": "\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\phi^ {(s)} \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {\\frac {2 \\eta}{\\mu} \\log \\frac {2}{\\eta \\delta}}, \\tag {57}", + "image_path": "c286f71e4d38155d3f35a80551ab642f111d41904235a0f39c8a777f19da454f.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 181, + 641, + 504, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 641, + 504, + 668 + ], + "spans": [ + { + "bbox": [ + 181, + 641, + 504, + 668 + ], + "type": "interline_equation", + "content": "\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)} - \\phi^ {\\left(R _ {\\mathrm {g r p}}\\right)} \\right\\| _ {2} \\leq \\sqrt {\\frac {2}{\\mu}} \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {\\left(R _ {\\mathrm {g r p}}\\right)}\\right) \\leq \\tilde {C} _ {1 0} \\sqrt {\\frac {2 \\eta}{\\mu} \\log \\frac {2}{\\eta \\delta}}. \\tag {58}", + "image_path": "d3193cce002669c9610cf0109c4f4b4800f873a7241099f3acb7a45118b9c738.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 674, + 350, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 674, + 350, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 674, + 350, + 691 + ], + "type": "text", + "content": "Then we bound " + }, + { + "bbox": [ + 105, + 674, + 350, + 691 + ], + "type": "inline_equation", + "content": "\\| \\pmb{\\theta}_{k,t}^{(s)} - \\bar{\\pmb{\\theta}}^{(s)}\\| _2" + }, + { + "bbox": [ + 105, + 674, + 350, + 691 + ], + "type": "text", + "content": " . By the update rule, we have" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 142, + 697, + 466, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 697, + 466, + 730 + ], + "spans": [ + { + "bbox": [ + 142, + 697, + 466, + 730 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {k, t} ^ {(s)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\boldsymbol {z} _ {k, \\tau} ^ {(s)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) - \\eta \\boldsymbol {m} _ {k, t} ^ {(s)}.", + "image_path": "3bbabc51db4918b7a22fdf4d9681a65e1c4535e8c9e08666d1a71b1bffab40a9.jpg" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 251, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 251, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 251, + 95 + ], + "type": "text", + "content": "Still by triangle inequality, we have" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 200, + 102, + 410, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 102, + 410, + 135 + ], + "spans": [ + { + "bbox": [ + 200, + 102, + 410, + 135 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\leq \\eta \\sum_ {\\tau = 0} ^ {t - 1} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}) \\| _ {2} + \\eta \\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\| _ {2}.", + "image_path": "9afc32384f52a618b10d0bd071c98d4f7f10a5f664dd9a04f6349f33134509ff.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 142, + 290, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 290, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 290, + 154 + ], + "type": "text", + "content": "Due to " + }, + { + "bbox": [ + 105, + 142, + 290, + 154 + ], + "type": "inline_equation", + "content": "\\rho_{2}" + }, + { + "bbox": [ + 105, + 142, + 290, + 154 + ], + "type": "text", + "content": "-smoothness of " + }, + { + "bbox": [ + 105, + 142, + 290, + 154 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 105, + 142, + 290, + 154 + ], + "type": "text", + "content": ", when (55) holds," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 197, + 162, + 504, + 189 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 162, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 197, + 162, + 504, + 189 + ], + "type": "interline_equation", + "content": "\\left\\| \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\sqrt {2 \\rho_ {2}} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, \\tau} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {2 \\rho_ {2} \\eta \\log \\frac {2}{\\eta \\delta}}. \\tag {59}", + "image_path": "3dfc2ec9575ae9583f7d95ae7ee55142476bd30eda437dcb22ffea34e2cff6dc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 195, + 304, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 195, + 304, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 195, + 304, + 211 + ], + "type": "text", + "content": "By Lemma K.19, with probability at least " + }, + { + "bbox": [ + 105, + 195, + 304, + 211 + ], + "type": "inline_equation", + "content": "1 - \\frac{\\delta}{2}" + }, + { + "bbox": [ + 105, + 195, + 304, + 211 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 168, + 218, + 504, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 218, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 168, + 218, + 504, + 246 + ], + "type": "interline_equation", + "content": "\\left\\| \\boldsymbol {m} _ {k, t} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {9} \\sqrt {\\frac {1}{\\eta} \\log \\frac {2}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ], 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {60}", + "image_path": "51e506b4b32a9acbf90bc9c9979f44e9e467dbb44f0e2f0b74b37b091dfc76ec.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 251, + 504, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 251, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 251, + 504, + 275 + ], + "type": "text", + "content": "Combining (59) and (60), when (55) and (56) hold simultaneously, there exists a constant " + }, + { + "bbox": [ + 104, + 251, + 504, + 275 + ], + "type": "inline_equation", + "content": "C_3" + }, + { + "bbox": [ + 104, + 251, + 504, + 275 + ], + "type": "text", + "content": " which can depend on " + }, + { + "bbox": [ + 104, + 251, + 504, + 275 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 251, + 504, + 275 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 187, + 282, + 505, + 309 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 282, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 187, + 282, + 505, + 309 + ], + "type": "interline_equation", + "content": "\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}, \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H. \\tag {61}", + "image_path": "e8b36111b9ea18b87c0c63f278b4ea6eca5c457a02a3db940f9cd10732c4a40a.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 316, + 198, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 198, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 198, + 328 + ], + "type": "text", + "content": "By triangle inequality," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 233, + 336, + 378, + 363 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 336, + 378, + 363 + ], + "spans": [ + { + "bbox": [ + 233, + 336, + 378, + 363 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\pmb {\\theta}} ^ {(s + 1)} - \\bar {\\pmb {\\theta}} ^ {(s)} \\| _ {2} \\leq C _ {3} \\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}.", + "image_path": "af96c88354c5f35fb13a1a28946b0f584c35a62c17ab12787933c785ef540801.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 369, + 328, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 328, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 328, + 381 + ], + "type": "text", + "content": "Combining (57), (58) and (61), we complete the proof." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 494, + 369, + 504, + 379 + ], + "blocks": [ + { + "bbox": [ + 494, + 369, + 504, + 379 + ], + "lines": [ + { + "bbox": [ + 494, + 369, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 494, + 369, + 504, + 379 + ], + "type": "image", + "image_path": "81f1985b57526f1bc28ab22f11b85687c90ead9c450b4c1809bd0f9183a00e5a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 400, + 459, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 400, + 459, + 414 + ], + "spans": [ + { + "bbox": [ + 104, + 400, + 459, + 414 + ], + "type": "text", + "content": "Then we provide high probability bounds for the movement of " + }, + { + "bbox": [ + 104, + 400, + 459, + 414 + ], + "type": "inline_equation", + "content": "\\phi^{(s)}" + }, + { + "bbox": [ + 104, + 400, + 459, + 414 + ], + "type": "text", + "content": " within " + }, + { + "bbox": [ + 104, + 400, + 459, + 414 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 400, + 459, + 414 + ], + "type": "text", + "content": " rounds." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "text", + "content": "Lemma K.23. Given " + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\|_2 \\leq C_0\\sqrt{\\eta\\log\\frac{1}{\\eta}}" + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "text", + "content": " is a constant, then for " + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 418, + 504, + 449 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 182, + 456, + 427, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 456, + 427, + 483 + ], + "spans": [ + { + "bbox": [ + 182, + 456, + 427, + 483 + ], + "type": "interline_equation", + "content": "\\| \\phi^ {(s)} - \\phi^ {(0)} \\| _ {2} \\leq C _ {4} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 1 \\leq s \\leq R _ {\\mathrm {g r p}}.", + "image_path": "c0d9aded438b94561eb0311a4fe0e16f53b0f9eb4d4643cd621f0177704271f0.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 490, + 294, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 490, + 294, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 294, + 502 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 490, + 294, + 502 + ], + "type": "inline_equation", + "content": "C_4" + }, + { + "bbox": [ + 105, + 490, + 294, + 502 + ], + "type": "text", + "content": " is a constant that can depend on " + }, + { + "bbox": [ + 105, + 490, + 294, + 502 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 105, + 490, + 294, + 502 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 521, + 272, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 272, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 272, + 533 + ], + "type": "text", + "content": "Proof. By the update rule of Local SGD," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 212, + 540, + 399, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 540, + 399, + 574 + ], + "spans": [ + { + "bbox": [ + 212, + 540, + 399, + 574 + ], + "type": "interline_equation", + "content": "\\pmb {\\theta} _ {k, H} ^ {(s)} = \\bar {\\pmb {\\theta}} ^ {(s)} - \\eta \\sum_ {t = 0} ^ {H - 1} \\nabla \\mathcal {L} (\\pmb {\\theta} _ {k, t} ^ {(s)}) - \\eta \\sum_ {t = 0} ^ {H - 1} \\pmb {z} _ {k, t} ^ {(s)}", + "image_path": "79e525f64a29dbfca5382fad7944a42a9e17e7fdd1c6491c3b14fd04623221b4.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 580, + 243, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 580, + 243, + 593 + ], + "spans": [ + { + "bbox": [ + 105, + 580, + 243, + 593 + ], + "type": "text", + "content": "Averaging among " + }, + { + "bbox": [ + 105, + 580, + 243, + 593 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 105, + 580, + 243, + 593 + ], + "type": "text", + "content": " clients gives" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 178, + 600, + 431, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 600, + 431, + 635 + ], + "spans": [ + { + "bbox": [ + 178, + 600, + 431, + 635 + ], + "type": "interline_equation", + "content": "\\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} = \\bar {\\boldsymbol {\\theta}} ^ {(s)} - \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) - \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\boldsymbol {z} _ {k, t} ^ {(s)}.", + "image_path": "348ea1e107fe087e8311189827255fb8d63b6bd97e1b6560f8321b9bcaa004b7.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 643, + 477, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 643, + 477, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 477, + 656 + ], + "type": "text", + "content": "By Lemma K.22, for " + }, + { + "bbox": [ + 105, + 643, + 477, + 656 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 105, + 643, + 477, + 656 + ], + "type": "text", + "content": ", the following holds with probability at least " + }, + { + "bbox": [ + 105, + 643, + 477, + 656 + ], + "type": "inline_equation", + "content": "1 - \\delta / 3" + }, + { + "bbox": [ + 105, + 643, + 477, + 656 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 127, + 664, + 504, + 700 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 664, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 127, + 664, + 504, + 700 + ], + "type": "interline_equation", + "content": "\\left\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {3}{\\eta \\delta}}, \\quad \\boldsymbol {\\theta} _ {k, t} ^ {(s)} \\in B ^ {\\epsilon_ {0}} \\left(\\phi^ {(s)}\\right), \\forall 0 \\leq s < R _ {\\mathrm {g r p}}, 0 \\leq t \\leq H, k \\in [ K ], \\tag {62}", + "image_path": "544cf4605aecccbc4773a7f00b27b4d9223b4bc113e51d6f17cea7c95ec52399.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 118, + 703, + 504, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 703, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 703, + 504, + 731 + ], + "type": "interline_equation", + "content": "\\left\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\right\\| _ {2} \\leq C _ {2} \\sqrt {\\eta \\log \\frac {3}{\\eta \\delta}}, \\quad \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} \\in B ^ {\\epsilon_ {0}} \\left(\\phi^ {(s)}\\right), \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {63}", + "image_path": "fe9c91149c1dafe15562e0b9766cebd31c278fb801621908d56d1b7884ff5e58.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 362, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 362, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 362, + 95 + ], + "type": "text", + "content": "When (62) and (63) hold, we can expand " + }, + { + "bbox": [ + 105, + 81, + 362, + 95 + ], + "type": "inline_equation", + "content": "\\Phi (\\bar{\\theta}^{(s + 1)})" + }, + { + "bbox": [ + 105, + 81, + 362, + 95 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 129, + 102, + 481, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 102, + 481, + 224 + ], + "spans": [ + { + "bbox": [ + 129, + 102, + 481, + 224 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\phi^ {(s + 1)} = \\phi^ {(s)} + \\partial \\Phi (\\bar {\\theta} ^ {(s)}) (\\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)}) + \\frac {1}{2} \\partial^ {2} \\Phi (\\tilde {\\theta} ^ {(s)}) [ \\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)}, \\bar {\\theta} ^ {(s + 1)} - \\bar {\\theta} ^ {(s)} ] \\\\ = \\phi^ {(s)} \\underbrace {- \\frac {\\eta}{K} \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k , t} ^ {(s)})} _ {\\mathcal {T} _ {1} ^ {(s)}} \\underbrace {- \\frac {\\eta}{K} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\sum_ {t = 0} ^ {H - 1} \\sum_ {k \\in [ K ]} z _ {k , t} ^ {(s)}} _ {\\mathcal {T} _ {2} ^ {(s)}} \\\\ + \\underbrace {\\frac {1}{2} \\partial^ {2} \\Phi (a ^ {(s)} \\bar {\\boldsymbol {\\theta}} ^ {(s)} + (1 - a ^ {(s)}) \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)}) [ \\boldsymbol {\\theta} ^ {(s + 1)} - \\boldsymbol {\\theta} ^ {(s)} , \\boldsymbol {\\theta} ^ {(s + 1)} - \\boldsymbol {\\theta} ^ {(s)} ]} _ {\\mathcal {T} _ {3} ^ {(s)}}, \\\\ \\end{array}", + "image_path": "71ad611756b0a6790e106165c34abf423c594fb8e822e09f01898b2078b32f3f.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 232, + 367, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 367, + 245 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 367, + 245 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 232, + 367, + 245 + ], + "type": "inline_equation", + "content": "a^{(s)}\\in (0,1)" + }, + { + "bbox": [ + 105, + 232, + 367, + 245 + ], + "type": "text", + "content": ". Telescoping from round 0 to " + }, + { + "bbox": [ + 105, + 232, + 367, + 245 + ], + "type": "inline_equation", + "content": "s - 1" + }, + { + "bbox": [ + 105, + 232, + 367, + 245 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 200, + 253, + 410, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 253, + 410, + 285 + ], + "spans": [ + { + "bbox": [ + 200, + 253, + 410, + 285 + ], + "type": "interline_equation", + "content": "\\| \\phi^ {(s)} - \\phi^ {(0)} \\| _ {2} = \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {1} ^ {(r)} + \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {2} ^ {(r)} + \\sum_ {r = 0} ^ {s - 1} \\mathcal {T} _ {3} ^ {(r)}.", + "image_path": "2698814a6173859a6bef2498427a8fca7344e602b09ed5a6bfc448c51f93aeb7.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "type": "text", + "content": "From (63), we can bound " + }, + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{T}_3^{(s)}\\| _2" + }, + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{T}_3^{(s)}\\| _2\\leq \\frac{1}{2}\\nu_2C_2^2\\eta \\log \\frac{3}{\\eta\\delta}" + }, + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "type": "text", + "content": ". We proceed to bound " + }, + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{T}_1^{(s)}\\| _2" + }, + { + "bbox": [ + 104, + 300, + 504, + 326 + ], + "type": "text", + "content": ". When (62) and (63) hold, we have" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 148, + 334, + 462, + 369 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 334, + 462, + 369 + ], + "spans": [ + { + "bbox": [ + 148, + 334, + 462, + 369 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial \\Phi (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) = \\partial \\Phi (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) + \\partial^ {2} \\Phi (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) [ \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) ] \\\\ = \\partial^ {2} \\Phi (b _ {k, t} ^ {(s)} \\bar {\\boldsymbol {\\theta}} ^ {(s)} + (1 - b _ {k, t} ^ {(s)}) \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) [ \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)}, \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) ], \\\\ \\end{array}", + "image_path": "6cffd17798e3af2ac4976ede91f67ff93968f3b74db393fd6f704afbc31fda65.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 377, + 476, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 377, + 476, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 377, + 476, + 392 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 377, + 476, + 392 + ], + "type": "inline_equation", + "content": "b_{k,t}^{(s)} \\in (0,1)" + }, + { + "bbox": [ + 105, + 377, + 476, + 392 + ], + "type": "text", + "content": ". By Lemma K.17, with probability at least " + }, + { + "bbox": [ + 105, + 377, + 476, + 392 + ], + "type": "inline_equation", + "content": "1 - \\delta /3" + }, + { + "bbox": [ + 105, + 377, + 476, + 392 + ], + "type": "text", + "content": ", the following holds:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 117, + 402, + 504, + 427 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 402, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 117, + 402, + 504, + 427 + ], + "type": "interline_equation", + "content": "\\left\\| \\nabla \\mathcal {L} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\right\\| _ {2} \\leq \\sqrt {2 \\rho_ {2}} \\tilde {\\Psi} \\left(\\boldsymbol {\\theta} _ {k, t} ^ {(s)}\\right) \\leq C _ {1} \\sqrt {2 \\rho_ {2} \\eta \\log \\frac {3}{\\eta \\delta}}, \\forall k \\in [ K ], 0 \\leq t \\leq H, 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {64}", + "image_path": "adac2f89ead450d12c2ccb7c0698a911da63135278d9d1fbad85a4828727ec1c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 434, + 416, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 434, + 416, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 416, + 447 + ], + "type": "text", + "content": "When (62), (63) and (64) hold simultaneously, we have for all " + }, + { + "bbox": [ + 105, + 434, + 416, + 447 + ], + "type": "inline_equation", + "content": "0 \\leq s < R_{\\mathrm{grp}}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 207, + 456, + 403, + 515 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 456, + 403, + 515 + ], + "spans": [ + { + "bbox": [ + 207, + 456, + 403, + 515 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\mathcal {T} _ {1} ^ {(s)} \\| _ {2} \\leq \\frac {\\eta \\nu_ {2}}{K} \\sum_ {t = 0} ^ {H - 1} \\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\bar {\\boldsymbol {\\theta}} ^ {(s)} \\| _ {2} \\| \\nabla \\mathcal {L} (\\boldsymbol {\\theta} _ {k, t} ^ {(s)}) \\| _ {2} \\\\ \\leq \\frac {\\alpha \\nu_ {2} \\sqrt {2 \\rho_ {2}} C _ {1} C _ {2}}{K} \\eta \\log \\frac {3}{\\eta \\delta}. \\\\ \\end{array}", + "image_path": "f2dcf2b917250fe60f1a59644c4842e427d335947bd19cd457eb52bbd286bede.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 529, + 504, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 504, + 555 + ], + "type": "text", + "content": "Finally, we bound " + }, + { + "bbox": [ + 104, + 529, + 504, + 555 + ], + "type": "inline_equation", + "content": "\\| \\sum_{r = 0}^{s - 1}\\mathcal{T}_2^{(r)}\\| _2" + }, + { + "bbox": [ + 104, + 529, + 504, + 555 + ], + "type": "text", + "content": ". By Lemma K.20, the following inequality holds with probability at least " + }, + { + "bbox": [ + 104, + 529, + 504, + 555 + ], + "type": "inline_equation", + "content": "1 - \\delta /3" + }, + { + "bbox": [ + 104, + 529, + 504, + 555 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 194, + 563, + 504, + 589 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 563, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 194, + 563, + 504, + 589 + ], + "type": "interline_equation", + "content": "\\left\\| \\boldsymbol {Z} _ {H} ^ {(s)} \\right\\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {- 0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {3}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}. \\tag {65}", + "image_path": "55304aa4395328e9c8c6ed82c38cf898d0eec82030e47506ede7f8c21d427e94.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 595, + 329, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 595, + 329, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 329, + 607 + ], + "type": "text", + "content": "When (62), (63) and (65) hold simultaneously, we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 164, + 616, + 447, + 645 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 616, + 447, + 645 + ], + "spans": [ + { + "bbox": [ + 164, + 616, + 447, + 645 + ], + "type": "interline_equation", + "content": "\\| \\sum_ {r = 0} ^ {s} \\mathcal {T} _ {2} ^ {(r)} \\| _ {2} = \\eta \\| \\boldsymbol {Z} _ {H} ^ {(s)} \\| _ {2} \\leq \\tilde {C} _ {1 2} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {3}{\\eta \\delta}}, \\quad \\forall 0 \\leq s < R _ {\\mathrm {g r p}}", + "image_path": "81a3f8aef7f24e7f6a4289d85050e5e1f72bbcfa81de5776002fee6db3b6bd12.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "text", + "content": "Combining the bounds for " + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{T}_1^{(s)}\\| _2, \\| \\sum_{r = 0}^s\\mathcal{T}_2^{(r)}\\| _2" + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{T}_3^{(s)}\\| _2" + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "text", + "content": " and taking union bound, we obtain that for " + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "text", + "content": ", the following inequality holds with probability at least " + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 654, + 504, + 680 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 184, + 688, + 426, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 688, + 426, + 714 + ], + "spans": [ + { + "bbox": [ + 184, + 688, + 426, + 714 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {\\phi} ^ {(s)} - \\boldsymbol {\\phi} ^ {(0)} \\| _ {2} \\leq C _ {4} \\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}, \\quad \\forall 1 \\leq s \\leq R _ {\\mathrm {g r p}}.", + "image_path": "146410b390290caa9918dd861aae92fc9ea4543e26bedd5b3a39b5f3a5ffdf8b.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 720, + 292, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 292, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 292, + 732 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 720, + 292, + 732 + ], + "type": "inline_equation", + "content": "C_4" + }, + { + "bbox": [ + 105, + 720, + 292, + 732 + ], + "type": "text", + "content": " is a constant that can depend on " + }, + { + "bbox": [ + 105, + 720, + 292, + 732 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 105, + 720, + 292, + 732 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 430, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 430, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 430, + 94 + ], + "type": "text", + "content": "K.7 SUMMARY OF THE DYNAMICS AND PROOF OF THEOREMS J.1 AND J.2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 137 + ], + "type": "text", + "content": "Based on the results in Appendix K.5 and Appendix K.6, we summarize the dynamics of Local SGD iterates and then present the proof of Theorems J.1 and J.2 in this subsection. For convenience, we first introduce the definition of global step and " + }, + { + "bbox": [ + 104, + 102, + 504, + 137 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 102, + 504, + 137 + ], + "type": "text", + "content": "-good step." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": "Definition K.3 (Global step). Define " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": " as the index set " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "\\{(s,t):s\\geq 0,0\\leq t\\leq H\\}" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": " with lexicographical order, which means " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "(s_1,t_1)\\preceq (s_2,t_2)" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": " if and only if " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "s_1 < s_2" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "(s_{1} = s_{2}" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "t_1\\leq t_2)" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": ". A global step is indexed by " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "(s,t)" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": " corresponding to the " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": "-th local step at round " + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 137, + 504, + 172 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "type": "text", + "content": "Definition K.4 (" + }, + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "type": "text", + "content": "-good step). In the training process of Local SGD, we say the global step " + }, + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "type": "inline_equation", + "content": "(s,t) \\preceq (R_{\\mathrm{tot}},0)" + }, + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 173, + 504, + 197 + ], + "type": "text", + "content": "-good if the following inequalities hold:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 198, + 474, + 225 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 198, + 474, + 225 + ], + "spans": [ + { + "bbox": [ + 138, + 198, + 474, + 225 + ], + "type": "interline_equation", + "content": "\\| \\tilde {\\mathbf {Z}} _ {k, \\tau} ^ {(r)} \\| _ {2} \\leq \\exp (\\alpha \\rho_ {2}) \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {6 H R _ {\\operatorname* {t o t}} K}{\\delta}}, \\quad \\forall k \\in [ K ], (r, \\tau) \\preceq (s, t),", + "image_path": "24061fbc640dc42250d269af5c7f2d288ac6d34109c25486d23599b03a9f44ed.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 136, + 227, + 474, + 253 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 227, + 474, + 253 + ], + "spans": [ + { + "bbox": [ + 136, + 227, + 474, + 253 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {m} _ {k, \\tau} ^ {(r)} \\| _ {2} \\leq \\sigma_ {\\max } \\sqrt {2 H \\log \\frac {6 K H R _ {\\mathrm {t o t}}}{\\delta}}, \\quad \\forall k \\in [ K ], (r, \\tau) \\preceq (s, t),", + "image_path": "6872e86dbb8749b1c0b7bb0ac0a733e2c7288a6bc8f20f9218a7bfbc4fad9a70.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 139, + 255, + 474, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 255, + 474, + 281 + ], + "spans": [ + { + "bbox": [ + 139, + 255, + 474, + 281 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {Z} _ {H} ^ {(r)} \\| _ {2} \\leq \\sigma_ {\\max } \\nu_ {2} \\sqrt {2 H R _ {\\mathrm {g r p}} \\log \\frac {2 R _ {\\mathrm {t o t}}}{\\delta}}, \\quad \\forall 0 \\leq r < s.", + "image_path": "4a3a09743d4477797dfb786ef38f394f38e74f377d15a0f2a7ff664edbd90954.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 290, + 504, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 290, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 290, + 504, + 316 + ], + "type": "text", + "content": "Applying the concentration properties of " + }, + { + "bbox": [ + 104, + 290, + 504, + 316 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{Z}}_{k,\\tau}^{(r)},\\pmb{m}_{k,\\tau}^{(r)}" + }, + { + "bbox": [ + 104, + 290, + 504, + 316 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 290, + 504, + 316 + ], + "type": "inline_equation", + "content": "\\pmb{Z}_H^{(r)}" + }, + { + "bbox": [ + 104, + 290, + 504, + 316 + ], + "type": "text", + "content": " (Lemmas K.20, K.19 and K.12) yields the following theorem." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "text", + "content": "Theorem K.1. For " + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "text", + "content": ", all global steps " + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "inline_equation", + "content": "(s,t) \\preceq (R_{\\mathrm{tot}},0)" + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 105, + 317, + 504, + 341 + ], + "type": "text", + "content": "-good." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "type": "text", + "content": "In the remainder of this subsection, we use " + }, + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\cdot)" + }, + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "type": "text", + "content": " notation to hide constants independent of " + }, + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 347, + 501, + 361 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": "Below we present a summary of the dynamics of Local SGD when " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(0)}" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": " is initialized such that " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\Phi (\\bar{\\theta}^{(0)})\\in \\Gamma" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": " and all global steps are " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": "-good. Phase 1 lasts for " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "s_0 + s_1 = \\mathcal{O}(\\log \\frac{1}{\\eta})" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": " rounds. At the end of phase 1, the iterate reaches within " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(s_0 + s_1)} - \\pmb {\\phi}^{(s_0 + s_1)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": ". The change of the projection on manifold over " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "s_0 + s_1" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": " rounds, " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\| \\phi^{(s_1 + s_0)} - \\phi^{(0)}\\| _2" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": " is bounded by " + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(\\log \\frac{1}{\\eta}\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})" + }, + { + "bbox": [ + 104, + 365, + 504, + 450 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "text", + "content": "After " + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "inline_equation", + "content": "s_0 + s_1" + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "text", + "content": " rounds, the dynamic enters phase 2 when the iterates stay close to " + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}}" + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,t}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall k \\in [K], (s_0 + s_1, 0) \\preceq (s,t) \\preceq (R_{\\mathrm{tot}}, 0)" + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "text", + "content": ". Furthermore, " + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "inline_equation", + "content": "\\| \\pmb{x}_{k,t}^{(s)} \\|_2" + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{x}}_H^{(s)} \\|_2" + }, + { + "bbox": [ + 104, + 456, + 504, + 499 + ], + "type": "text", + "content": " satisfy the following equations:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 144, + 502, + 465, + 522 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 502, + 465, + 522 + ], + "spans": [ + { + "bbox": [ + 144, + 502, + 465, + 522 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {x} _ {k, t} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall k \\in [ K ], 0 \\leq t \\leq H, s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}},", + "image_path": "ac582aa1467fc82f8653e042655848361ca5385a83b4fb26dad3c2a63164f1ec.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 145, + 525, + 465, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 525, + 465, + 544 + ], + "spans": [ + { + "bbox": [ + 145, + 525, + 465, + 544 + ], + "type": "interline_equation", + "content": "\\| \\tilde {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}}.", + "image_path": "c7f258d15940c5e1be8332eaa13873055daa46d8a78089acf8af94c9aea95e23.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 546, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 546, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 546, + 504, + 567 + ], + "type": "text", + "content": "Moreover, for " + }, + { + "bbox": [ + 104, + 546, + 504, + 567 + ], + "type": "inline_equation", + "content": "s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 546, + 504, + 567 + ], + "type": "text", + "content": ", the change of the manifold projection within " + }, + { + "bbox": [ + 104, + 546, + 504, + 567 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 546, + 504, + 567 + ], + "type": "text", + "content": " rounds can be bounded as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 176, + 570, + 433, + 595 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 570, + 433, + 595 + ], + "spans": [ + { + "bbox": [ + 176, + 570, + 433, + 595 + ], + "type": "interline_equation", + "content": "\\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} = \\mathcal {O} (\\eta^ {0. 5 - 0. 5 \\beta} \\sqrt {\\log \\frac {1}{\\eta \\delta}}), \\quad \\forall 1 \\leq r \\leq R _ {\\mathrm {g r p}}.", + "image_path": "608c428c47e2792cece3b5ed2f24dc9fe5f0d4cfa4a2a345368f4e7318682ef0.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 631 + ], + "type": "text", + "content": "After combing through the dynamics of Local SGD iterates during the approaching and drift phase, we are ready to present the proof of Theorems J.1 and J.2, which are direct consequences of the lemmas in Appendix K.5 and K.6." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "text", + "content": "Proof of Theorem J.1. By Lemmas K.15, K.22 and Corollary K.1, for " + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "text", + "content": ", when all global steps are " + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "text", + "content": "-good, " + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "inline_equation", + "content": "\\bar{\\pmb{\\theta}}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}}" + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,t}^{(s)} \\in \\Gamma^{\\epsilon_2}, \\forall k \\in [K], (s_0 + s_1, 0) \\preceq (s,t) \\preceq (R_{\\mathrm{tot}}, 0)" + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "inline_equation", + "content": "\\| \\pmb{x}_{k,t}^{(s)} \\|_2, \\| \\bar{\\pmb{x}}_H^{(s)} \\|_2" + }, + { + "bbox": [ + 104, + 641, + 505, + 685 + ], + "type": "text", + "content": " satisfy the following equations:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 144, + 688, + 465, + 707 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 688, + 465, + 707 + ], + "spans": [ + { + "bbox": [ + 144, + 688, + 465, + 707 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol{x}_{k,t}^{(s)}\\|_{2} = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}}),\\quad \\forall k\\in [K],0\\leq t\\leq H,s_{0} + s_{1}\\leq s < R_{\\text{tot}},", + "image_path": "9f5b73f045193db638501f57a1e139d192cce246b260ae4ee3950c6ad636d063.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 145, + 710, + 465, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 710, + 465, + 730 + ], + "spans": [ + { + "bbox": [ + 145, + 710, + 465, + 730 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\boldsymbol {x}} _ {H} ^ {(s)} \\| _ {2} = \\mathcal {O} (\\sqrt {\\eta \\log \\frac {1}{\\eta \\delta}}), \\quad \\forall s _ {0} + s _ {1} \\leq s < R _ {\\mathrm {t o t}}.", + "image_path": "fdcad84288c06085d07fed1b5cc175be0e4140075fe168ab053da649b8d62ed8.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "51" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 50 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "text", + "content": "Hence " + }, + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\pmb{x}}_0^{(R_{\\mathrm{tot}})}\\| _2 = \\mathcal{O}(\\tilde{\\Psi} (\\bar{\\pmb{\\theta}}^{(R_{\\mathrm{tot}})})) = \\mathcal{O}(\\| \\tilde{\\pmb{x}}_H^{(R_{\\mathrm{tot}} - 1)}\\| _2) = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta\\delta}})" + }, + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "text", + "content": " by smoothness of " + }, + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "text", + "content": " and Lemma K.10. According to Theorem K.1, with probability at least " + }, + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "text", + "content": " , all global steps are " + }, + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 80, + 506, + 124 + ], + "type": "text", + "content": " -good, thus completing the proof." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "type": "text", + "content": "Proof of Theorem J.2. By Lemma K.23, for " + }, + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "type": "text", + "content": ", when all global steps are " + }, + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "type": "text", + "content": "-good, then " + }, + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "type": "inline_equation", + "content": "\\forall s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 139, + 504, + 163 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 194, + 171, + 415, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 171, + 415, + 186 + ], + "spans": [ + { + "bbox": [ + 194, + 171, + 415, + 186 + ], + "type": "interline_equation", + "content": "\\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - 0. 5 \\beta}), \\quad \\forall 0 \\leq r \\leq R _ {\\mathrm {g r p}}.", + "image_path": "6cd84eef98472e1b8dd4e3be865f78082371fe4b82159d4c9c3fa6ee8c218e21.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "text", + "content": "Also, by Lemma K.18, when all global steps are " + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "text", + "content": "-good, the change of projection on manifold over " + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "inline_equation", + "content": "s_0 + s_1" + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "text", + "content": " rounds (i.e., Phase 1), " + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "inline_equation", + "content": "\\| \\phi^{(s_0 + s_1)} - \\phi^{(0)} \\|_2" + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "text", + "content": " is bounded by " + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}}(\\sqrt{\\eta})" + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "text", + "content": ". According to Theorem K.1, with probability at least " + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "text", + "content": ", all global steps are " + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 192, + 505, + 228 + ], + "type": "text", + "content": "-good, thus completing the proof." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 243, + 239, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 243, + 239, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 239, + 254 + ], + "type": "text", + "content": "K.8 PROOF OF THEOREM 3.3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "text", + "content": "In this subsection, we explicitly derive the dependency of the approximation error on " + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "text", + "content": ". The proofs are quite similar to those in Appendix K.5 and hence we only state the key proof idea for brevity. With the same method as the proofs in Appendix K.5.2, we can show that with high probability, " + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(s)} - \\phi^{(s)}\\|_2 \\leq \\frac{1}{2}\\sqrt{\\frac{\\mu}{\\rho_2}}" + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "inline_equation", + "content": "s_0' = \\mathcal{O}(1)" + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "text", + "content": " rounds. Below we focus on the dynamics of Local SGD thereafter. We first remind the readers of the definition of " + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "inline_equation", + "content": "\\{\\tilde{Z}_{k,t}^s\\}" + }, + { + "bbox": [ + 104, + 264, + 504, + 331 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 178, + 339, + 431, + 373 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 339, + 431, + 373 + ], + "spans": [ + { + "bbox": [ + 178, + 339, + 431, + 373 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} := \\sum_ {\\tau = 0} ^ {t - 1} \\left(\\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right)\\right) \\boldsymbol {z} _ {k, \\tau} ^ {(s)}, \\qquad \\tilde {\\boldsymbol {Z}} _ {k, 0} ^ {(s)} = \\boldsymbol {0}.", + "image_path": "bb7e1831fc2a8f8ce76f09155a974063fdb72024ec22f688dcbd699effd5f931.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 381, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 381, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 381, + 504, + 411 + ], + "type": "text", + "content": "We have the following lemma that controls the norm of the matrix product " + }, + { + "bbox": [ + 104, + 381, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\prod_{l = \\tau +1}^{t - 1}(\\boldsymbol {I} - \\eta \\nabla^2\\mathcal{L}(\\tilde{\\boldsymbol{u}}_l^{(s)}))" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "text", + "content": "Lemma K.24. Given " + }, + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "text", + "content": ", then there exists a positive constant " + }, + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "inline_equation", + "content": "C_3^\\prime" + }, + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "text", + "content": " independent of " + }, + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 104, + 415, + 505, + 440 + ], + "type": "inline_equation", + "content": "0\\leq \\tau < t\\leq H" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 231, + 446, + 378, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 446, + 378, + 482 + ], + "spans": [ + { + "bbox": [ + 231, + 446, + 378, + 482 + ], + "type": "interline_equation", + "content": "\\left\\| \\prod_ {l = \\tau + 1} ^ {t - 1} (\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)})) \\right\\| _ {2} \\leq C _ {3} ^ {\\prime}.", + "image_path": "dbe52f10e2c4e4e4e31cd37b63cb5c5b4cb55dbaea57a7d6bdf7b3902e8ac71e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "spans": [ + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "text", + "content": "Proof. Since " + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "inline_equation", + "content": "\\tilde{\\pmb{u}}_t^{(s)}\\in \\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "inline_equation", + "content": "0\\leq t\\leq H" + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "text", + "content": ". We first bound the minimum eigenvalue of " + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\tilde{\\pmb{u}}_t^{(s)})" + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "text", + "content": ". Due to the PL condition, by Lemma K.6, for " + }, + { + "bbox": [ + 104, + 500, + 504, + 531 + ], + "type": "inline_equation", + "content": "\\eta \\leq \\frac{1}{\\rho_2}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 120, + 537, + 490, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 537, + 490, + 559 + ], + "spans": [ + { + "bbox": [ + 120, + 537, + 490, + 559 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) - \\mathcal {L} ^ {*} \\leq (1 - \\mu \\eta) ^ {t} \\left(\\mathcal {L} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) - \\mathcal {L} ^ {*}\\right) \\leq \\exp (- \\mu t \\eta) (\\mathcal {L} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) - \\mathcal {L} ^ {*}), \\quad \\forall 0 \\leq t \\leq H.", + "image_path": "2cd48a466b1214618145c33a3aa4ad1c0f4f66d32d08964d7a4b94239b208df5.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 564, + 149, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 564, + 149, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 149, + 575 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 235, + 583, + 374, + 599 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 583, + 374, + 599 + ], + "spans": [ + { + "bbox": [ + 235, + 583, + 374, + 599 + ], + "type": "interline_equation", + "content": "\\tilde {\\Psi} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) \\leq \\exp (- \\mu t \\eta / 2) \\tilde {\\Psi} \\left(\\bar {\\boldsymbol {\\theta}} ^ {(s)}\\right).", + "image_path": "ef1ada50923611695724226796a405988181132a1db3c3cbb35e83d72371a736.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 608, + 271, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 608, + 271, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 271, + 628 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 608, + 271, + 628 + ], + "type": "inline_equation", + "content": "C_1^\\prime = \\rho_3\\sqrt{\\frac{\\rho_2}{\\mu}}" + }, + { + "bbox": [ + 104, + 608, + 271, + 628 + ], + "type": "text", + "content": " . By Weyl's inequality," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 174, + 636, + 437, + 729 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 636, + 437, + 729 + ], + "spans": [ + { + "bbox": [ + 174, + 636, + 437, + 729 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left| \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\right| = \\left| \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) - \\lambda_ {\\min } \\left(\\nabla^ {2} \\mathcal {L} \\left(\\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\right. \\right| \\\\ \\leq \\rho_ {3} \\| \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right) - \\nabla^ {2} \\mathcal {L} \\left(\\Phi \\left(\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}\\right)\\right) \\| _ {2} \\\\ \\leq \\rho_ {3} \\| \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} - \\Phi (\\tilde {\\boldsymbol {u}} _ {t} ^ {(s)}) \\| _ {2} \\\\ \\leq \\rho_ {3} \\sqrt {\\frac {2}{\\mu}} \\exp (- \\mu t \\eta / 2) \\tilde {\\Psi} (\\bar {\\boldsymbol {\\theta}} ^ {(s)}) \\\\ \\leq C _ {1} ^ {\\prime} \\exp (- \\mu t \\eta / 2) \\epsilon_ {0}, \\\\ \\end{array}", + "image_path": "81123c479fc5820472079c9240b3aea103c1be4c54d21b1b292452bb7e02024c.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "52" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 51 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "where the last two inequalities use Lemmas K.10 and K.7 respectively. Therefore, for all " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "0 \\leq t \\leq H" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "0 \\leq \\tau \\leq t - 1" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 177, + 109, + 504, + 209 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 109, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 177, + 109, + 504, + 209 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\| \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(\\boldsymbol {I} - \\eta \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right)\\right) \\| _ {2} \\leq \\prod_ {l = \\tau + 1} ^ {t - 1} \\left(1 + \\eta \\left| \\lambda_ {\\min } \\nabla^ {2} \\mathcal {L} \\left(\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}\\right) \\right|\\right) \\\\ \\leq \\prod_ {l = 0} ^ {\\infty} (1 + \\eta | \\lambda_ {\\min } \\nabla^ {2} \\mathcal {L} (\\tilde {\\boldsymbol {u}} _ {l} ^ {(s)}) |) \\\\ \\leq \\exp \\left(\\eta \\epsilon_ {0} C _ {1} ^ {\\prime} \\sum_ {l = 0} ^ {\\infty} \\exp (- \\mu l \\eta / 2)\\right). \\tag {66} \\\\ \\end{array}", + "image_path": "ee43fdef6b56db89db2761438150103b0abf9159ca8114a5251c9eab0f53e177.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 213, + 347, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 347, + 227 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 347, + 227 + ], + "type": "text", + "content": "For sufficiently small " + }, + { + "bbox": [ + 105, + 213, + 347, + 227 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 105, + 213, + 347, + 227 + ], + "type": "text", + "content": ", there exists a constant " + }, + { + "bbox": [ + 105, + 213, + 347, + 227 + ], + "type": "inline_equation", + "content": "C_2'" + }, + { + "bbox": [ + 105, + 213, + 347, + 227 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 208, + 231, + 504, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 231, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 208, + 231, + 504, + 262 + ], + "type": "interline_equation", + "content": "\\sum_ {l = 0} ^ {\\infty} \\exp (- \\mu l \\eta / 2)) = \\frac {1}{1 - \\exp (- \\mu \\eta / 2)} \\leq \\frac {C _ {2} ^ {\\prime}}{\\eta}. \\tag {67}", + "image_path": "6723da45d6b0b88f25060143ecea3a629950bea9a6a80f42f39096042a5d2b02.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 266, + 304, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 266, + 304, + 278 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 304, + 278 + ], + "type": "text", + "content": "Substituting (67) into (66), we obtain the lemma." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 494, + 266, + 504, + 276 + ], + "blocks": [ + { + "bbox": [ + 494, + 266, + 504, + 276 + ], + "lines": [ + { + "bbox": [ + 494, + 266, + 504, + 276 + ], + "spans": [ + { + "bbox": [ + 494, + 266, + 504, + 276 + ], + "type": "image", + "image_path": "339c2726326801c8e83e538fd68f4467abc476dbd7ac53fcc8d671f5ae3c1c12.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 291, + 504, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 504, + 317 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 504, + 317 + ], + "type": "text", + "content": "Based on Lemma K.24, we obtain the following lemma about the concentration property of " + }, + { + "bbox": [ + 104, + 291, + 504, + 317 + ], + "type": "inline_equation", + "content": "\\tilde{Z}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 291, + 504, + 317 + ], + "type": "text", + "content": ", which can be derived in the same way as Lemma K.12." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 319, + 389, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 389, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 389, + 332 + ], + "type": "text", + "content": "Lemma K.25. Given " + }, + { + "bbox": [ + 105, + 319, + 389, + 332 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_0}" + }, + { + "bbox": [ + 105, + 319, + 389, + 332 + ], + "type": "text", + "content": ", then with probability at least " + }, + { + "bbox": [ + 105, + 319, + 389, + 332 + ], + "type": "inline_equation", + "content": "1 - \\delta" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 176, + 337, + 434, + 369 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 337, + 434, + 369 + ], + "spans": [ + { + "bbox": [ + 176, + 337, + 434, + 369 + ], + "type": "interline_equation", + "content": "\\| \\tilde {\\boldsymbol {Z}} _ {k, t} ^ {(s)} \\| _ {2} \\leq C _ {3} ^ {\\prime} \\sigma_ {\\max } \\sqrt {\\frac {2 \\alpha}{\\eta} \\log \\frac {2 \\alpha K}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],", + "image_path": "da7ea37d92391a43c15593ae08ab11eb51ff9dd93808ea96b3155c1eb0f8f048.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 373, + 253, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 253, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 253, + 385 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 373, + 253, + 385 + ], + "type": "inline_equation", + "content": "C_3^\\prime" + }, + { + "bbox": [ + 105, + 373, + 253, + 385 + ], + "type": "text", + "content": " is defined in Lemma K.24." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 393, + 504, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 393, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 393, + 504, + 417 + ], + "type": "text", + "content": "The following lemma can be derived analogously to Lemma K.14 but the error bound is tighter in terms of its dependency on " + }, + { + "bbox": [ + 104, + 393, + 504, + 417 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 393, + 504, + 417 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "text", + "content": "Lemma K.26. Given " + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}\\in \\Gamma^{\\epsilon_1}" + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "text", + "content": ", then for " + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "text", + "content": ", there exists a constant " + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "inline_equation", + "content": "C_4^\\prime" + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "text", + "content": " independent of " + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 419, + 505, + 443 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 184, + 448, + 427, + 474 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 448, + 427, + 474 + ], + "spans": [ + { + "bbox": [ + 184, + 448, + 427, + 474 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {\\theta} _ {k, t} ^ {(s)} - \\tilde {\\boldsymbol {u}} _ {t} ^ {(s)} \\| _ {2} \\leq C _ {4} ^ {\\prime} \\sqrt {\\alpha \\eta \\log \\frac {\\alpha}{\\eta \\delta}}, \\quad \\forall 0 \\leq t \\leq H, k \\in [ K ],", + "image_path": "74c835fa15a56d2e016e458e5b7e1498bd48d685a86c8e3cb273925de3821581.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 479, + 124, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 479, + 124, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 124, + 488 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 229, + 493, + 381, + 519 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 493, + 381, + 519 + ], + "spans": [ + { + "bbox": [ + 229, + 493, + 381, + 519 + ], + "type": "interline_equation", + "content": "\\| \\bar {\\boldsymbol {\\theta}} ^ {(s + 1)} - \\tilde {\\boldsymbol {u}} _ {H} ^ {(s)} \\| _ {2} \\leq C _ {4} ^ {\\prime} \\sqrt {\\alpha \\eta \\log \\frac {\\alpha}{\\eta \\delta}}.", + "image_path": "31ddd3437ab0fc848fee7357467cdb64192d3f5e59fb43f52b34c9c8eb525bfd.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": "Then, similar to Lemma K.17, we can show that for " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\delta = \\mathcal{O}(\\mathrm{poly}(\\eta))" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " and simultaneously all " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "s\\geq s_0^{\\prime} + s_1^{\\prime}" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "s_1^\\prime = \\mathcal{O}(\\frac{1}{\\alpha}\\log \\frac{1}{\\eta})" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": ", it holds with probability at least " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " that " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(s)} - \\phi^{(s)}\\| _2 = \\mathcal{O}(\\sqrt{\\alpha\\eta\\log\\frac{\\alpha}{\\eta\\delta}})" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": ". Note that to eliminate the dependency of the second term's denominator on " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " in (44), we can discuss the cases of " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\alpha >c_{0}" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\alpha < c_{0}" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " respectively where " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "c_{0}" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " can be an arbitrary positive constant independent of " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": ". For the case of " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\alpha < c_{0}" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " group " + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\lceil \\frac{c_0}{\\alpha}\\rceil" + }, + { + "bbox": [ + 104, + 529, + 506, + 617 + ], + "type": "text", + "content": " rounds together and repeat the arguments in this subsection to analyze the closeness between Local SGD and GD iterates as well as the evolution of loss." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 631, + 361, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 631, + 361, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 361, + 642 + ], + "type": "text", + "content": "K.9 COMPUTING THE MOMENTS FOR ONE \"GIANT STEP\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": "In this subsection, we compute the first and second moments for the change of manifold projection every " + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": " rounds of Local SGD. Since the randomness in training might drive the iterate out of the working zone, making the dynamic intractable, we analyze a more well-behaved sequence " + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}: (s,t) \\preceq (R_{\\mathrm{tot}},0), k \\in [K]\\}" + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": " which is equal to " + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\{\\pmb{\\theta}_{k,t}^{(s)}\\}" + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": " with high probability. Specifically, " + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{\\theta}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": " equal to " + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": " if the global step " + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "inline_equation", + "content": "(s,t)" + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\eta^{100}" + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": "-good and is set as a point " + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{null}} \\in \\Gamma" + }, + { + "bbox": [ + 104, + 651, + 506, + 732 + ], + "type": "text", + "content": " otherwise. The formal definition is as follows." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "53" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 52 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 246, + 68, + 356, + 148 + ], + "blocks": [ + { + "bbox": [ + 246, + 68, + 356, + 148 + ], + "lines": [ + { + "bbox": [ + 246, + 68, + 356, + 148 + ], + "spans": [ + { + "bbox": [ + 246, + 68, + 356, + 148 + ], + "type": "image", + "image_path": "ffc695507921c80a14b163e44d2b9a7edaf3e6efd525e42e284333a0590fabc9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 252, + 159, + 357, + 171 + ], + "lines": [ + { + "bbox": [ + 252, + 159, + 357, + 171 + ], + "spans": [ + { + "bbox": [ + 252, + 159, + 357, + 171 + ], + "type": "text", + "content": "Figure 9: A plot of " + }, + { + "bbox": [ + 252, + 159, + 357, + 171 + ], + "type": "inline_equation", + "content": "\\psi (x)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "type": "text", + "content": "Definition K.5 (Well-behaved sequence). Denote by " + }, + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_t^{(s)}" + }, + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "type": "text", + "content": " the event " + }, + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\{\\text{global step } (s, t) \\text{ is } \\eta^{100} \\text{-good}\\}" + }, + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "type": "text", + "content": ". Define a well-behaved sequence " + }, + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{\\theta}}_{k,t}^{(s)} := \\pmb{\\theta}_{k,t}^{(s)}\\mathbb{1}_{\\mathcal{E}_t^{(s)}} + \\phi_{\\mathrm{null}}\\mathbb{1}_{\\bar{\\mathcal{E}}_t^{(s)}}" + }, + { + "bbox": [ + 104, + 192, + 506, + 233 + ], + "type": "text", + "content": ", which satisfies the following update rule:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 235, + 505, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 235, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 117, + 235, + 505, + 297 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {\\theta}} _ {k, t + 1} ^ {(s)} = \\boldsymbol {\\theta} _ {k, t + 1} ^ {(s)} \\mathbb {1} _ {\\mathcal {E} _ {t + 1} ^ {(s)}} + \\phi_ {\\text {n u l l}} \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} (68) \\\\ = \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\underbrace {- \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} (\\hat {\\boldsymbol {\\theta}} _ {k , t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k , t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k , t} ^ {(s)}) + \\mathbb {1} _ {\\bar {\\mathcal {E}} _ {t + 1} ^ {(s)}} \\phi_ {\\mathrm {n u l l}}} _ {:= \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)}}. (69) \\\\ \\end{array}", + "image_path": "f3c1144b8ab03b94d44f5db8e9790a137fbd4961747f1076138057784c04e401.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "text", + "content": "By Theorem K.1, with probability at least " + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "inline_equation", + "content": "1 - \\eta^{100}" + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{\\theta}}_{k,t}^{(s)} = \\pmb{\\theta}_{k,t}^{(s)}, \\forall k \\in [K], (s,t) \\preceq (R_{\\mathrm{tot}},0)" + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "text", + "content": ". Similar to " + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\{\\pmb{\\theta}_{k,t}^{(s)}\\}" + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "text", + "content": ", we define the following variables with respect to " + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\pmb{\\theta}}_{k,t}^{(s)}\\}" + }, + { + "bbox": [ + 104, + 306, + 504, + 337 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 146, + 339, + 334, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 339, + 334, + 370 + ], + "spans": [ + { + "bbox": [ + 146, + 339, + 334, + 370 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {\\theta}} _ {\\mathrm {a v g}} ^ {(s + 1)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {\\theta}} _ {k, H} ^ {(s)}, \\quad \\hat {\\boldsymbol {\\phi}} ^ {(s)} := \\Phi (\\hat {\\boldsymbol {\\theta}} _ {\\mathrm {a v g}} ^ {(s)}),", + "image_path": "c6e1b3528e119d90f53209153bf3df9e1d91596abaa03a13ecbf0fae0a1e2bbe.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 157, + 372, + 462, + 402 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 372, + 462, + 402 + ], + "spans": [ + { + "bbox": [ + 157, + 372, + 462, + 402 + ], + "type": "interline_equation", + "content": "\\hat {\\pmb {x}} _ {k, t} ^ {(s)} := \\hat {\\pmb {\\theta}} _ {k, t} ^ {(s)} - \\hat {\\pmb {\\phi}} ^ {(s)}, \\quad \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} := \\hat {\\pmb {\\theta}} _ {\\mathrm {a v g}} ^ {(s)} - \\hat {\\pmb {\\phi}} ^ {(s)}, \\quad \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} := \\frac {1}{K} \\sum_ {k \\in [ K ]} \\hat {\\pmb {x}} _ {k, H} ^ {(s)}.", + "image_path": "645eabf429a0364286a086334bac995fddab4b9feb8d19a4dde4c367e2d79567.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "text", + "content": "Notice that " + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}_{k,0}^{(s)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)}" + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "inline_equation", + "content": "k\\in [K]" + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "text", + "content": ". Finally, we introduce the following mapping " + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\Psi (\\pmb {\\theta}):" + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\Gamma \\to \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "text", + "content": ", which is closely related to " + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "inline_equation", + "content": "\\widehat{\\pmb{\\Psi}}" + }, + { + "bbox": [ + 104, + 406, + 504, + 434 + ], + "type": "text", + "content": " defined in Theorem 3.2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 435, + 392, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 392, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 392, + 448 + ], + "type": "text", + "content": "Definition K.6. For " + }, + { + "bbox": [ + 104, + 435, + 392, + 448 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\Gamma" + }, + { + "bbox": [ + 104, + 435, + 392, + 448 + ], + "type": "text", + "content": ", we define the mapping " + }, + { + "bbox": [ + 104, + 435, + 392, + 448 + ], + "type": "inline_equation", + "content": "\\Psi(\\pmb{\\theta}) : \\Gamma \\to \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 104, + 435, + 392, + 448 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 195, + 450, + 413, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 450, + 413, + 477 + ], + "spans": [ + { + "bbox": [ + 195, + 450, + 413, + 477 + ], + "type": "interline_equation", + "content": "\\Psi (\\boldsymbol {\\theta}) = \\sum_ {i, j \\in [ d ]} \\psi \\left(\\eta H \\left(\\lambda_ {i} + \\lambda_ {j}\\right)\\right) \\left\\langle \\boldsymbol {\\Sigma} (\\boldsymbol {\\theta}), \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top} \\right\\rangle \\boldsymbol {v} _ {i} \\boldsymbol {v} _ {j} ^ {\\top},", + "image_path": "715929d2e39b0ff3366350bbce363e18e744cd48ff1057f742b7707249132f85.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "inline_equation", + "content": "\\lambda_{i},\\pmb{v}_{i}" + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "content": " are the " + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "content": " -th eigenvalue and eigenvector of " + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\pmb {\\theta})" + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "inline_equation", + "content": "\\pmb {v}_i" + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "content": " 's form an orthonormal basis of " + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "content": " . Additionally, " + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "inline_equation", + "content": "\\psi (x)\\coloneqq \\frac{e^{-x} - 1 + x}{x}" + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "inline_equation", + "content": "\\psi (0) = 0" + }, + { + "bbox": [ + 104, + 480, + 504, + 507 + ], + "type": "text", + "content": " ; see Figure 9 for a plot." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": "Remark K.1. Intuitively, " + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\Psi(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": " rescales the entries of " + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": " in the eigenbasis of " + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\pmb{\\theta}) = \\mathrm{diag}(\\lambda_1,\\dots ,\\lambda_d)\\in \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\lambda_{i} = 0" + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "inline_equation", + "content": "m < i\\leq d" + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\Psi (\\pmb{\\Sigma}_0)_{i,j} = \\psi (\\eta H(\\lambda_i + \\lambda_j))\\Sigma_{0,i,j}" + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": ". Note that " + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "inline_equation", + "content": "\\Psi (\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 509, + 504, + 544 + ], + "type": "text", + "content": " can also be written as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 193, + 545, + 416, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 545, + 416, + 559 + ], + "spans": [ + { + "bbox": [ + 193, + 545, + 416, + 559 + ], + "type": "interline_equation", + "content": "\\operatorname {v e c} (\\boldsymbol {\\Psi} (\\boldsymbol {\\theta})) = \\psi (\\eta H (\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) \\oplus \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}))) \\operatorname {v e c} (\\boldsymbol {\\Sigma} (\\boldsymbol {\\theta})),", + "image_path": "091456904f8bfd8fbc3558e054018b24312a2b5b3c2432323936ab54a586e476.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "text", + "content": " denotes the Kronecker sum " + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "inline_equation", + "content": "A\\oplus B = A\\otimes I_d + I_d\\otimes B" + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "inline_equation", + "content": "\\operatorname{vec}(\\cdot)" + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "text", + "content": " is the vectorization operator of a matrix and " + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "inline_equation", + "content": "\\psi (\\cdot)" + }, + { + "bbox": [ + 104, + 561, + 504, + 585 + ], + "type": "text", + "content": " is interpreted as a matrix function." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 592, + 425, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 592, + 425, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 592, + 425, + 605 + ], + "type": "text", + "content": "Now we are ready to present the result about the moments of " + }, + { + "bbox": [ + 104, + 592, + 425, + 605 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(s + R_{\\mathrm{grp}})} - \\hat{\\phi}^{(s)}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "type": "text", + "content": "Theorem K.2. For " + }, + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "type": "inline_equation", + "content": "s_0 + s_1 \\leq s \\leq R_{\\mathrm{tot}} - R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "type": "inline_equation", + "content": "0 < \\beta < 0.5" + }, + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "type": "text", + "content": ", the first and second moments of " + }, + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(s + R_{\\mathrm{grp}})} - \\hat{\\phi}^{(s)}" + }, + { + "bbox": [ + 104, + 607, + 506, + 632 + ], + "type": "text", + "content": " are as follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 634, + 504, + 675 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 634, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 138, + 634, + 504, + 675 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)} \\mid \\hat {\\phi} ^ {(s)}, \\mathcal {E} _ {0} ^ {(s)} \\right] = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(s)}\\right) \\left[ \\boldsymbol {\\Sigma} \\left(\\hat {\\phi} ^ {(s)}\\right) + (K - 1) \\Psi \\left(\\hat {\\phi} ^ {(s)}\\right) \\right] \\tag {70} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta), \\\\ \\end{array}", + "image_path": "6157f24a2c1fb77d594ba8f5a2ed943510dadc15e58d25b83fecab7a17efcff9.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 680, + 504, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 680, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 107, + 680, + 504, + 715 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left(\\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)}\\right) \\left(\\hat {\\phi} ^ {(s + R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(s)}\\right) ^ {\\top} \\mid \\hat {\\phi} ^ {(s)}, \\mathcal {E} _ {0} ^ {(s)} \\right] = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {\\|} \\left(\\hat {\\phi} ^ {(s)}\\right) + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 2 \\beta}\\right) + \\tilde {\\mathcal {O}} (\\eta), \\tag {71}", + "image_path": "5080b48168cbd1e8ad26901e0ee9124a1a20f930f98578955411d57c53b5123b.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 719, + 346, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 719, + 346, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 719, + 346, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 719, + 346, + 733 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}} (\\cdot)" + }, + { + "bbox": [ + 104, + 719, + 346, + 733 + ], + "type": "text", + "content": " hides log terms and constants independent of " + }, + { + "bbox": [ + 104, + 719, + 346, + 733 + ], + "type": "inline_equation", + "content": "\\eta" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "54" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 53 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": "Remark K.2. By Theorem K.1 and the definition of " + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{\\theta}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": ", (70) and (71) still hold when we replace " + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(s)}" + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\phi^{(s)}" + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": " and replace " + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(s + R_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\phi^{(s + R_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 118, + 504, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 118, + 504, + 157 + ], + "spans": [ + { + "bbox": [ + 104, + 118, + 504, + 157 + ], + "type": "text", + "content": "We shall have Theorem K.2 if we prove the following theorem, which directly gives Theorem K.2 with a simple shift of index. For brevity, denote by " + }, + { + "bbox": [ + 104, + 118, + 504, + 157 + ], + "type": "inline_equation", + "content": "\\Delta \\hat{\\phi}^{(s)}\\coloneqq \\hat{\\phi}^{(s)} - \\hat{\\phi}^{(0)}" + }, + { + "bbox": [ + 104, + 118, + 504, + 157 + ], + "type": "inline_equation", + "content": "\\Sigma_0\\coloneqq \\Sigma (\\hat{\\phi}^{(0)})" + }, + { + "bbox": [ + 104, + 118, + 504, + 157 + ], + "type": "inline_equation", + "content": "\\Sigma_{0,\\parallel}\\coloneqq \\Sigma_{\\parallel}(\\hat{\\phi}^{(0)})" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "type": "text", + "content": "Theorem K.3. Given " + }, + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})" + }, + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "type": "text", + "content": ", for " + }, + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "type": "inline_equation", + "content": "0 < \\beta < 0.5" + }, + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "type": "text", + "content": ", the first and second moments of " + }, + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "type": "inline_equation", + "content": "\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 104, + 161, + 505, + 194 + ], + "type": "text", + "content": " are as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 164, + 198, + 492, + 223 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 198, + 492, + 223 + ], + "spans": [ + { + "bbox": [ + 164, + 198, + 492, + 223 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} ] = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {\\Sigma} _ {0} + (K - 1) \\pmb {\\Psi} (\\hat {\\phi} ^ {(0)}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta),", + "image_path": "36876e4c567148e867ccf7385b6e704c0d8d295202270c97a3dc065ff905c277.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 119, + 224, + 373, + 248 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 224, + 373, + 248 + ], + "spans": [ + { + "bbox": [ + 119, + 224, + 373, + 248 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} \\Delta \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}}) ^ {\\top}} ] = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 1. 5 \\beta}) + \\tilde {\\mathcal {O}} (\\eta).", + "image_path": "0b15c0561344f4dce735e550325c4e1343e529dca1fd9c70a67ec897e653d63c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": "We will prove Theorem K.3 in the remainder of this subsection. For convenience, we introduce more notations that will be used throughout the proof. Let " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\pmb{H}_0 \\coloneqq \\nabla^2\\mathcal{L}(\\hat{\\phi}^{(0)})" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": ". By Assumption 3.2, " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\mathrm{rank}(H_0) = m" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": ". WLOG, assume " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "H_0 = \\mathrm{diag}(\\lambda_1,\\dots ,\\lambda_d)\\in \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\lambda_{i} = 0" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "m < i\\leq d" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\lambda_{1}\\geq \\lambda_{2}\\dots \\geq \\lambda_{m}" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": ". By Lemma K.2, " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\partial \\Phi (\\hat{\\phi}^{(0)})" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": " is the projection matrix onto the tangent space " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "T_{\\hat{\\phi}^{(0)}}(\\Gamma)" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": " (i.e. the null space of " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\hat{\\phi}^{(0)})" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": ") and therefore, " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\partial \\Phi (\\hat{\\phi}^{(0)}) = \\left[ \\begin{array}{cc}0 & 0\\\\ 0 & I_{d - m} \\end{array} \\right]" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\coloneqq \\partial \\Phi (\\hat{\\phi}^{(0)})" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "inline_equation", + "content": "P_{\\perp}\\coloneqq I_d - P_{\\parallel}" + }, + { + "bbox": [ + 104, + 258, + 504, + 346 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "spans": [ + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)\\top}], \\hat{\\pmb{q}}_t^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}]" + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{B}}_t^{(s)} := \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}]" + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "text", + "content": ". The latter two notations are independent of " + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "text", + "content": " since " + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{\\theta}}_{1,t}^{(s)}, \\dots, \\hat{\\pmb{\\theta}}_{K,t}^{(s)}" + }, + { + "bbox": [ + 104, + 351, + 504, + 394 + ], + "type": "text", + "content": " are identically distributed. The following lemma computes the first and second moments of the change of manifold projection every round." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "type": "text", + "content": "Lemma K.27. Given " + }, + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\pmb{\\phi}}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})" + }, + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "type": "text", + "content": ", for " + }, + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "type": "inline_equation", + "content": "0\\leq s < R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "type": "text", + "content": ", the first and second moments of " + }, + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)}" + }, + { + "bbox": [ + 104, + 398, + 504, + 430 + ], + "type": "text", + "content": " are as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 127, + 434, + 504, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 434, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 127, + 434, + 504, + 456 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} \\right] = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {72}", + "image_path": "86e82b7bd63c6c7953c8eaddcd994581beb3651b009329f6c193fba8cb682a15.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 127, + 458, + 504, + 474 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 458, + 504, + 474 + ], + "spans": [ + { + "bbox": [ + 127, + 458, + 504, + 474 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}\\right) \\left(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}\\right) ^ {\\top} \\right] = P _ {\\|} \\hat {A} _ {\\text {a v g}} ^ {(s)} P _ {\\|} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right). \\tag {73}", + "image_path": "4c7559b18d0f1d03b5e63f04bd81053396d8900ca1d8a3c84d8e1b58b8da3dc9.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 484, + 259, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 259, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 259, + 496 + ], + "type": "text", + "content": "Proof. By Taylor expansion, we have" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 126, + 502, + 484, + 631 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 502, + 484, + 631 + ], + "spans": [ + { + "bbox": [ + 126, + 502, + 484, + 631 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\phi} ^ {(s + 1)} = \\Phi (\\hat {\\phi} ^ {(s)} + \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)}) \\\\ = \\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(s)}) [ \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] + \\mathcal {O} (\\| \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}) \\\\ = \\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(0)} + \\Delta \\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)} + \\Delta \\hat {\\phi} ^ {(s)}) [ \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] \\\\ + \\mathcal {O} (\\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}) \\\\ = \\hat {\\phi} ^ {(s)} + P _ {\\parallel} \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {x} _ {\\mathrm {a v g}, H} ^ {(s) \\top} ] \\\\ + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} + \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} + \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}). \\\\ \\end{array}", + "image_path": "0155b9159a276017cbda1a228991289448863a89788b2f9b6bd3d8b171d3d6d5.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 635, + 249, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 249, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 249, + 647 + ], + "type": "text", + "content": "Rearrange the terms and we obtain:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 653, + 504, + 693 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 653, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 118, + 653, + 504, + 693 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s) \\top} ] \\tag {74} \\\\ + \\mathcal {O} \\left(\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} + \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} + \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3}\\right). \\\\ \\end{array}", + "image_path": "3746691eedb39bf474e3174a71c81502519c87ce9e1ea3b9b6b6cbd183a6f016.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 696, + 149, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 696, + 149, + 708 + ], + "spans": [ + { + "bbox": [ + 105, + 696, + 149, + 708 + ], + "type": "text", + "content": "Moreover," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 124, + 713, + 504, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 713, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 124, + 713, + 504, + 731 + ], + "type": "interline_equation", + "content": "(\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ^ {\\top} = P _ {\\|} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s) \\top} P _ {\\|} + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}). \\tag {75}", + "image_path": "0920bd26bac49e442dce92db6b6659c7c91212bb01700e0d6dcfb687ba54d5a6.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "55" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 54 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "type": "text", + "content": "Noticing that " + }, + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}_{k,H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}" + }, + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "type": "text", + "content": " are identically distributed for all " + }, + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "type": "inline_equation", + "content": "k\\in [K]" + }, + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}] = \\frac{1}{K}\\sum_{k\\in [K]}\\mathbb{E}[\\hat{\\pmb{x}}_{k,H}^{(s)}\\Delta \\hat{\\phi}^{(s)\\top}] = \\hat{\\pmb{B}}_H^{(s)}" + }, + { + "bbox": [ + 104, + 80, + 504, + 113 + ], + "type": "text", + "content": ". Then taking expectation of both sides of (74) gives" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 122, + 497, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 122, + 497, + 163 + ], + "spans": [ + { + "bbox": [ + 115, + 122, + 497, + 163 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} + \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] + \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] \\\\ + \\mathcal {O} (\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ] + \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ] + \\mathbb {E} [ \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} ]). \\\\ \\end{array}", + "image_path": "d4f62f7f53c88d7423d83c24f4ea60615557078ec3f941d4d7848466ba3566c6.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 170, + 321, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 170, + 321, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 321, + 183 + ], + "type": "text", + "content": "Again taking expectation of both sides of (75) yields" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 190, + 477, + 209 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 190, + 477, + 209 + ], + "spans": [ + { + "bbox": [ + 132, + 190, + 477, + 209 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\Delta \\hat {\\phi} ^ {(s) \\top}) ] = P _ {\\parallel} \\hat {A} _ {\\mathrm {a v g}} ^ {(s)} P _ {\\parallel} + \\mathcal {O} (\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ]).", + "image_path": "f1a3744688f82a6a9f43c7b4344e0787dd8787fca592fc0908745a526189dd83.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 216, + 500, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 500, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 500, + 229 + ], + "type": "text", + "content": "By Lemmas K.22 and K.23, the following holds simultaneously with probability at least " + }, + { + "bbox": [ + 104, + 216, + 500, + 229 + ], + "type": "inline_equation", + "content": "1 - \\eta^{100}" + }, + { + "bbox": [ + 104, + 216, + 500, + 229 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 197, + 237, + 413, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 237, + 413, + 255 + ], + "spans": [ + { + "bbox": [ + 197, + 237, + 413, + 255 + ], + "type": "interline_equation", + "content": "\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - 0. 5 \\beta}), \\quad \\| \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5}).", + "image_path": "4b7263e70ce99a7f6f5ff9b58fc7b573e70870ac23bb1a0f5818dda10969578d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "text", + "content": "Furthermore, since for all " + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "inline_equation", + "content": "k \\in [K]" + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "inline_equation", + "content": "(s,t) \\preceq (R_{\\mathrm{tot}},0)" + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{\\theta}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "text", + "content": " stays in " + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "text", + "content": " which is a bounded set, " + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "inline_equation", + "content": "\\| \\Delta \\hat{\\phi}^{(s)}\\| _2" + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\pmb{x}}_{\\mathrm{avg},H}^{(s)}\\| _2" + }, + { + "bbox": [ + 104, + 265, + 504, + 297 + ], + "type": "text", + "content": " are also bounded. Therefore, we have" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 222, + 305, + 504, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 305, + 504, + 321 + ], + "spans": [ + { + "bbox": [ + 222, + 305, + 504, + 321 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {76}", + "image_path": "c59ce5bd4bef684a763cd3f11566af73152e8b58cf6156879778cc07c4fa5172.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 222, + 323, + 504, + 340 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 323, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 222, + 323, + 504, + 340 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\tag {77}", + "image_path": "44e6515b610e0df846f6ad6dc70865de860e53940ce9c765fb0e5cc66f04ab45.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 262, + 342, + 504, + 359 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 342, + 504, + 359 + ], + "spans": [ + { + "bbox": [ + 262, + 342, + 504, + 359 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5}), \\tag {78}", + "image_path": "454c52975faa5ad60a4f4d3f3a102a6a05c21ddd1f8a07e8cd4347ea461ef7c3.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 366, + 216, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 366, + 216, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 216, + 379 + ], + "type": "text", + "content": "which concludes the proof." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "text", + "content": "We compute " + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg}}^{(s)}, \\hat{q}_t^{(s)}" + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\hat{B}_t^{(s)}" + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "text", + "content": " by solving a set of recursions, which is formulated in the following lemma. Additionally, define " + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\hat{A}_t^{(s)} \\coloneqq \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\hat{\\pmb{x}}_{k,t}^{(s)\\top}]" + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\hat{M}_t^{(s)} \\coloneqq \\mathbb{E}[\\hat{\\pmb{x}}_{k,t}^{(s)}\\hat{\\pmb{x}}_{k,l}^{(s)}], (k \\neq l)" + }, + { + "bbox": [ + 104, + 400, + 504, + 430 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "type": "text", + "content": "Lemma K.28. Given " + }, + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})" + }, + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "type": "text", + "content": ", for " + }, + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "type": "inline_equation", + "content": "0\\leq s < R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "type": "inline_equation", + "content": "0\\leq t < H" + }, + { + "bbox": [ + 104, + 435, + 506, + 464 + ], + "type": "text", + "content": ", we have the following recursions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 133, + 473, + 504, + 493 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 473, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 133, + 473, + 504, + 493 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {q}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} \\left(\\phi^ {(0)}\\right) \\left[ \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} \\right] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} \\left(\\phi^ {(0)}\\right) \\left[ \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\right] + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right), \\tag {79}", + "image_path": "a36a045e262b41126bd5a36def7e3bc9803c186afef621291497b8de232410f4.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 495, + 504, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 495, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 495, + 504, + 521 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {A}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}), \\tag {80}", + "image_path": "0214e245c80c2ac75180fa1bf785e91c712e7cf341f2fea124d7212d90810cc8.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 523, + 504, + 538 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 523, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 127, + 523, + 504, + 538 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {M}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {81}", + "image_path": "18de3d788144ffcf12de3ac04e3186142f1819ccf21e2fd6d1a6c76d68651a1b.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 541, + 504, + 556 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 541, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 129, + 541, + 504, + 556 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {B}} _ {t + 1} ^ {(s)} = \\left(\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}\\right) \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right). \\tag {82}", + "image_path": "fe139cdbbe962f53bfc987ffac831f4fcad165718b96a1349a5b3f7a40b27dd2.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 564, + 149, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 564, + 149, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 149, + 574 + ], + "type": "text", + "content": "Moreover," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 164, + 584, + 504, + 607 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 584, + 504, + 607 + ], + "spans": [ + { + "bbox": [ + 164, + 584, + 504, + 607 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} = \\frac {1}{K} \\hat {\\boldsymbol {A}} _ {H} ^ {(s)} + (1 - \\frac {1}{K}) \\hat {\\boldsymbol {M}} _ {H} ^ {(s)}, \\tag {83}", + "image_path": "203d8d1616da93357ead8bc497545cc65c76a65d3b81b7cf8541464a8e4b3bd0.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 153, + 609, + 504, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 609, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 153, + 609, + 504, + 624 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {M}} _ {0} ^ {(s + 1)} = \\hat {\\boldsymbol {A}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\perp} + \\mathcal {O} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right), \\tag {84}", + "image_path": "a482d8034590d55f4d01791aca42489051c09dc104ce0972cd1055902ba1a2f2.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 158, + 626, + 504, + 648 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 626, + 504, + 648 + ], + "spans": [ + { + "bbox": [ + 158, + 626, + 504, + 648 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {q}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\phi^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\phi^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {85}", + "image_path": "042203d38e0c83511372bde18f9c8c869925d6758e39f9d08eed4fcc6fda1bd6.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 156, + 651, + 504, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 651, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 156, + 651, + 504, + 666 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {B}} _ {0} ^ {(s + 1)} = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} + \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\text {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {86}", + "image_path": "6b1bca55a8be315a75a0520e0192b4db6442ab9c4e40b35d6e58be990e257730.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 688, + 412, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 688, + 412, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 412, + 704 + ], + "type": "text", + "content": "Proof. We first derive the recursion for " + }, + { + "bbox": [ + 105, + 688, + 412, + 704 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{q}}_t^{(s)}" + }, + { + "bbox": [ + 105, + 688, + 412, + 704 + ], + "type": "text", + "content": ". Recall the update rule for " + }, + { + "bbox": [ + 105, + 688, + 412, + 704 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{\\theta}}_{k,t}^{(s)}" + }, + { + "bbox": [ + 105, + 688, + 412, + 704 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 214, + 712, + 395, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 712, + 395, + 730 + ], + "spans": [ + { + "bbox": [ + 214, + 712, + 395, + 730 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {\\theta}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)}.", + "image_path": "fad7e5cf86c594c0f56c624b56b4f07dce1a8ad2a95adc42dd3be4e6e5e0514f.jpg" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "56" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 55 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 263, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 263, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 263, + 95 + ], + "type": "text", + "content": "Subtracting " + }, + { + "bbox": [ + 105, + 81, + 263, + 95 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(s)}" + }, + { + "bbox": [ + 105, + 81, + 263, + 95 + ], + "type": "text", + "content": " from both sides gives" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 119, + 102, + 503, + 250 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 102, + 503, + 250 + ], + "spans": [ + { + "bbox": [ + 119, + 102, + 503, + 250 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {x}} _ {k, t + 1} ^ {(s)} = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\nabla \\mathcal {L} (\\hat {\\boldsymbol {\\theta}} _ {k, t} ^ {(s)}) - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\left(\\nabla^ {2} \\mathcal {L} (\\hat {\\boldsymbol {\\phi}} ^ {(s)}) \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} + \\frac {1}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\boldsymbol {\\phi}} ^ {(s)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s) \\top} ] + \\mathcal {O} (\\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3})\\right) \\\\ - \\eta z _ {k, t} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\left(\\nabla^ {2} \\mathcal {L} \\left(\\hat {\\boldsymbol {\\phi}} ^ {(0)}\\right) + \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\boldsymbol {\\phi}} ^ {(0)}\\right) \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\mathcal {O} \\left(\\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| ^ {2}\\right)\\right) \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\\\ - \\frac {\\eta}{2} \\left(\\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) + \\mathcal {O} \\left(\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2}\\right)\\right) \\left[ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k t} ^ {(s) \\top} \\right] - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} + \\mathcal {O} \\left(\\eta \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}\\right) \\\\ = \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\Delta \\hat {\\phi} ^ {(s) \\top} ] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s) \\top} ] - \\eta \\boldsymbol {z} _ {k, t} ^ {(s)} \\\\ + \\mathcal {O} (\\eta \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\eta \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} + \\eta \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2}), \\tag {87} \\\\ \\end{array}", + "image_path": "0eb0dac513f33725469fa8b02ae5d622bdedb3d9966665fec76d48ae6dfc9c83.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 257, + 504, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 257, + 504, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 504, + 280 + ], + "type": "text", + "content": "where the second and third equality perform Taylor expansion. Taking expectation on both sides gives" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 288, + 492, + 329 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 288, + 492, + 329 + ], + "spans": [ + { + "bbox": [ + 118, + 288, + 492, + 329 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\pmb {q}} _ {t + 1} ^ {(s)} = (\\pmb {I} - \\eta \\pmb {H} _ {0}) \\hat {\\pmb {q}} _ {t} ^ {(s)} - \\eta \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {q}} _ {t} ^ {(s)} ] - \\frac {\\eta}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {t} ^ {(s)} ] \\\\ + \\mathcal {O} \\left(\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} ] + \\eta \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} ] + \\eta \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ] + \\mathbb {E} [ \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]\\right). \\\\ \\end{array}", + "image_path": "2107fce48fbf40d41059a5abe97293caa140c1484315f60dddfcf9f04eae4392.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": "By Theorem K.1, with probability at least " + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "inline_equation", + "content": "1 - \\eta^{100}" + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\hat{e}_{k,t}^{(s)} = \\mathbf{0}, \\forall k \\in [K], (s,t) \\preceq (R_{\\mathrm{grp}},0)" + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": ". Also notice that both " + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\hat{\\theta}_{k,t}^{(s)}" + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\phi_{\\mathrm{null}}" + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": " belong to the bounded set " + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_2}" + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\| \\hat{e}_{k,t}^{(s)} \\|_2" + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": " is bounded and we have " + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\| \\hat{e}_{k,t}^{(s)} \\|_2] = \\mathcal{O}(\\eta^{100})" + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": ". Combining this with (76) to (78) yields (79)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 392, + 504, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 416 + ], + "type": "text", + "content": "Secondly, we derive the recursion for " + }, + { + "bbox": [ + 104, + 392, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\hat{B}_t^{(s)}" + }, + { + "bbox": [ + 104, + 392, + 504, + 416 + ], + "type": "text", + "content": ". Multiplying both sides of (87) by " + }, + { + "bbox": [ + 104, + 392, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\Delta \\hat{\\phi}^{(s)\\top}" + }, + { + "bbox": [ + 104, + 392, + 504, + 416 + ], + "type": "text", + "content": " and taking expectation, we have" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 423, + 479, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 423, + 479, + 440 + ], + "spans": [ + { + "bbox": [ + 129, + 423, + 479, + 440 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {B}} _ {t + 1} ^ {(s)} = (\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}) \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} + \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]).", + "image_path": "dc9124ab0c584ab910a7604acbd06564c88c13e2fa120e28b986a752c223fdd3.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 447, + 317, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 447, + 317, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 317, + 459 + ], + "type": "text", + "content": "Still by Theorem K.1 and (76) to (78), we have (82)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 465, + 346, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 465, + 346, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 346, + 479 + ], + "type": "text", + "content": "Thirdly, we derive the recursion for " + }, + { + "bbox": [ + 105, + 465, + 346, + 479 + ], + "type": "inline_equation", + "content": "\\hat{A}_t^{(s)}" + }, + { + "bbox": [ + 105, + 465, + 346, + 479 + ], + "type": "text", + "content": ". By (87), we have" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 137, + 487, + 473, + 558 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 487, + 473, + 558 + ], + "spans": [ + { + "bbox": [ + 137, + 487, + 473, + 558 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {A}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\mathcal {O} (\\eta^ {2} \\mathbb {E} [ \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ]) \\\\ + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]) \\\\ = (\\boldsymbol {I} - \\eta \\boldsymbol {H} _ {0}) \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\boldsymbol {\\Sigma} _ {0} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}), \\\\ \\end{array}", + "image_path": "50005493561984c93fa892b02a10847f08a9458360ec5030d7f63656dda13d4b.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 564, + 200, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 564, + 200, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 200, + 576 + ], + "type": "text", + "content": "which establishes (80)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "type": "text", + "content": "Fourthly, we derive the recursion for " + }, + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "type": "inline_equation", + "content": "\\hat{M}_t^{(s)}" + }, + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "type": "text", + "content": ". Multiplying both sides of (87) by " + }, + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}_{l,t + 1}^{(s)}" + }, + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "type": "text", + "content": " and taking expectation, " + }, + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "type": "inline_equation", + "content": "l\\neq k" + }, + { + "bbox": [ + 104, + 582, + 504, + 608 + ], + "type": "text", + "content": ", we obtain" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 145, + 616, + 465, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 616, + 465, + 651 + ], + "spans": [ + { + "bbox": [ + 145, + 616, + 465, + 651 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {M}} _ {t + 1} ^ {(s)} = \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\boldsymbol {H} _ {0} \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} - \\eta \\hat {\\boldsymbol {M}} _ {t} ^ {(s)} \\boldsymbol {H} _ {0} + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} \\| \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} \\| _ {2} ]) \\\\ + \\mathcal {O} (\\eta \\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} + \\| \\hat {\\boldsymbol {e}} _ {k, t} ^ {(s)} \\| _ {2} ]). \\\\ \\end{array}", + "image_path": "3f94fbb7ed67f6424fbd74dd142034b1d250e7f26df4028f5cc60b69840c3c81.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 658, + 350, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 350, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 350, + 670 + ], + "type": "text", + "content": "By a similar argument to the proof of Lemma K.27, we have" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 252, + 679, + 376, + 694 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 679, + 376, + 694 + ], + "spans": [ + { + "bbox": [ + 252, + 679, + 376, + 694 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\| \\hat {\\boldsymbol {x}} _ {k, (s)} ^ {(s)} \\| _ {2} ^ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} ] = \\bar {\\mathcal {O}} (\\eta^ {1. 5}),", + "image_path": "7862ed34a8355266ecf2e01c61bca64786a24896cf6cf801e804ceb1ea16fbbd.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 212, + 697, + 397, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 697, + 397, + 712 + ], + "spans": [ + { + "bbox": [ + 212, + 697, + 397, + 712 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\| \\hat {\\boldsymbol {x}} _ {k, t} ^ {(s)} \\| _ {2} \\| \\hat {\\boldsymbol {x}} _ {l, t} ^ {(s)} \\| _ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} \\right] = \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right),", + "image_path": "3d57a0897f6a72691dcff4d6efff5b4455f6508e3f95ebacdac414541b0133c9.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 720, + 180, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 180, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 180, + 732 + ], + "type": "text", + "content": "which yields (81)." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "57" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 56 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 354, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 354, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 354, + 95 + ], + "type": "text", + "content": "Now we proceed to prove (83) to (86). By definition of " + }, + { + "bbox": [ + 104, + 80, + 354, + 95 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg}}^{(s)}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 172, + 106, + 437, + 194 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 106, + 437, + 194 + ], + "spans": [ + { + "bbox": [ + 172, + 106, + 437, + 194 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} = \\frac {1}{K ^ {2}} \\mathbb {E} [ (\\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)}) (\\sum_ {k \\in [ K ]} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)}) ^ {\\top} ] \\\\ = \\frac {1}{K ^ {2}} \\sum_ {k \\in [ K ]} \\mathbb {E} \\left[ \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s) \\top} \\right] + \\frac {1}{K ^ {2}} \\sum_ {k, l \\in [ K ], k \\neq l} \\mathbb {E} \\left[ \\hat {\\boldsymbol {x}} _ {k, H} ^ {(s)} \\hat {\\boldsymbol {x}} _ {l, H} ^ {(s) \\top} \\right] \\\\ = \\frac {1}{K} \\hat {\\boldsymbol {A}} _ {H} ^ {(s)} + (1 - \\frac {1}{K}) \\hat {\\boldsymbol {M}} _ {H} ^ {(s)}, \\\\ \\end{array}", + "image_path": "3e61e9d1e65b0dcf414cb6e5307a0748f01c845ed0d3d011498a41ec4ec24d50.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 205, + 394, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 394, + 223 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 394, + 223 + ], + "type": "text", + "content": "which demonstrates (83). Then we derive (84). By definition of " + }, + { + "bbox": [ + 105, + 205, + 394, + 223 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s + 1)}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 157, + 233, + 504, + 314 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 233, + 504, + 314 + ], + "spans": [ + { + "bbox": [ + 157, + 233, + 504, + 314 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} = \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - \\Phi \\big (\\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\\\ = \\hat {\\phi} ^ {(s)} + \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} - \\left(\\hat {\\phi} ^ {(s)} + \\partial \\Phi (\\hat {\\phi} ^ {(s)}) \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2})\\right) \\\\ = \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - \\left(\\pmb {P} _ {\\parallel} + \\mathcal {O} (\\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2})\\right) \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} + \\mathcal {O} (\\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}) \\\\ = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} + \\mathcal {O} \\left(\\left\\| \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\right\\| _ {2} ^ {2} + \\left\\| \\hat {\\boldsymbol {x}} _ {\\text {a v g}, H} ^ {(s)} \\right\\| _ {2} \\left\\| \\Delta \\hat {\\phi} ^ {(s)} \\right\\| _ {2}\\right). \\tag {88} \\\\ \\end{array}", + "image_path": "606c62a00c6fca1ae28e69e551659cc9d42b91c5d2fdf5128cdc0cc87e69ec68.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 323, + 136, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 136, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 136, + 335 + ], + "type": "text", + "content": "Hence," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 165, + 346, + 444, + 382 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 346, + 444, + 382 + ], + "spans": [ + { + "bbox": [ + 165, + 346, + 444, + 382 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {M}} _ {0} ^ {(s + 1)} = \\hat {\\boldsymbol {A}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s) \\top} ] \\\\ = \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} + \\mathcal {O} (\\mathbb {E} [ \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {3} + \\| \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} \\| _ {2} ^ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} ]). \\\\ \\end{array}", + "image_path": "1d4be361a98abe186e7a48716e550c71e479522e2b32171e17cb29a46937226f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 392, + 277, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 392, + 277, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 277, + 405 + ], + "type": "text", + "content": "By (76) and (78), we obtain (84). By (74)," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 162, + 416, + 504, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 416, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 162, + 416, + 504, + 434 + ], + "type": "interline_equation", + "content": "\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} = P _ {\\|} \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} + \\mathcal {O} \\left(\\left\\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\right\\| _ {2} \\| \\Delta \\hat {\\phi} ^ {(s)} \\| _ {2} + \\| \\hat {\\mathbf {x}} _ {\\operatorname {a v g}, H} ^ {(s)} \\| _ {2} ^ {2}\\right). \\tag {89}", + "image_path": "0e7057d8d7a6c55ab2f8d0ab066d8fc634cfe9d3849ee7384bdfc3dcde4915d3.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 445, + 233, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 233, + 457 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 233, + 457 + ], + "type": "text", + "content": "Combining (88) and (89) gives" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 183, + 468, + 426, + 486 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 468, + 426, + 486 + ], + "spans": [ + { + "bbox": [ + 183, + 468, + 426, + 486 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s)} (\\hat {\\pmb {\\phi}} ^ {(s + 1)} - \\hat {\\pmb {\\phi}} ^ {(s)}) ^ {\\top} ] = \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "23aa407b79ee243efb56179d7967aa69c5916877428df3e23f9e73d66d550980.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 495, + 150, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 150, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 150, + 507 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 159, + 518, + 449, + 554 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 518, + 449, + 554 + ], + "spans": [ + { + "bbox": [ + 159, + 518, + 449, + 554 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\boldsymbol {B}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} \\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s + 1) \\top} ] = \\mathbb {E} [ \\hat {\\boldsymbol {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} (\\Delta \\hat {\\boldsymbol {\\phi}} ^ {(s)} + \\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) ^ {\\top} ] \\\\ = \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} + \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "1d9a5718b9584e1900b3bbe3507c3d4627ad3218d7d4e0c21e02d451cf6ccc1f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 563, + 291, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 291, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 291, + 576 + ], + "type": "text", + "content": "Finally, we apply Lemma K.27 to derive (85)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 142, + 587, + 466, + 653 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 587, + 466, + 653 + ], + "spans": [ + { + "bbox": [ + 142, + 587, + 466, + 653 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {\\pmb {q}} _ {0} ^ {(s + 1)} = \\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, 0} ^ {(s + 1)} ] = \\mathbb {E} [ \\hat {\\pmb {x}} _ {\\mathrm {a v g}, H} ^ {(s)} - (\\hat {\\pmb {\\phi}} ^ {(s + 1)} - \\hat {\\pmb {\\phi}} ^ {(s)}) ] \\\\ = \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\pmb {P} _ {\\parallel} \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\pmb {P} _ {\\perp} \\hat {\\pmb {q}} _ {H} ^ {(s)} - \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}", + "image_path": "1ae4e1a5842731ee1325453f975a0195851bcf712a9c623ef06b0623d7b6269a.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 664, + 217, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 664, + 217, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 217, + 676 + ], + "type": "text", + "content": "which concludes the proof." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 494, + 664, + 505, + 673 + ], + "blocks": [ + { + "bbox": [ + 494, + 664, + 505, + 673 + ], + "lines": [ + { + "bbox": [ + 494, + 664, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 494, + 664, + 505, + 673 + ], + "type": "image", + "image_path": "d4dcbf8df32884e9d32fcdd59e3a618e31cb9e1d7d85998673d0924bf5a9fe95.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "content": "With the assumption that the hessian at " + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(0)}" + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "content": " is diagonal, we have the following corollary that formulates the recursions for each matrix element." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "58" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 57 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": "Corollary K.2. Given " + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "inline_equation", + "content": "\\| \\hat{\\pmb{\\theta}}_{\\mathrm{avg}}^{(0)} - \\hat{\\phi}^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})" + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": ", for " + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "inline_equation", + "content": "0\\leq s < R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "inline_equation", + "content": "0\\leq t < H" + }, + { + "bbox": [ + 104, + 81, + 504, + 110 + ], + "type": "text", + "content": ", we have the following elementwise recursions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 154, + 121, + 505, + 146 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 121, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 154, + 121, + 505, + 146 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\hat {A} _ {t, i, j} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {90}", + "image_path": "d1dc8ae920a126fecbcff46e14d287896adbff40f30589d1de70251f8a91ed61.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 153, + 148, + 504, + 164 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 148, + 504, + 164 + ], + "spans": [ + { + "bbox": [ + 153, + 148, + 504, + 164 + ], + "type": "interline_equation", + "content": "\\hat {M} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\hat {M} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - 0. 5 \\beta}\\right), \\tag {91}", + "image_path": "7909640db2f5448ebdc7d821b6af12a75512fd5d345c7d3e49a71efe2c5c6c14.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 155, + 167, + 504, + 183 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 167, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 155, + 167, + 504, + 183 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t + 1, i, j} ^ {(s)} = \\left(1 - \\lambda_ {i} \\eta\\right) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {2. 5 - \\beta}\\right), \\tag {92}", + "image_path": "0b4709adb6d546da982488c9250ebef34764bc2988126f293f25663149c5ac51.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 156, + 185, + 504, + 207 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 185, + 504, + 207 + ], + "spans": [ + { + "bbox": [ + 156, + 185, + 504, + 207 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} = \\frac {1}{K} \\left(\\hat {A} _ {H, i, j} ^ {(s)} - \\hat {M} _ {H, i, j} ^ {(s)}\\right) + \\hat {M} _ {H, i, j} ^ {(s)}, \\tag {93}", + "image_path": "c9396939316d6304d97fb295a918fada746e6c79e32206eaacaa43f9e800553e.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 156, + 209, + 504, + 240 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 209, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 156, + 209, + 504, + 240 + ], + "type": "interline_equation", + "content": "\\hat {M} _ {0, i, j} ^ {(s + 1)} = \\hat {A} _ {0, i, j} ^ {(s + 1)} = \\left\\{ \\begin{array}{l l} \\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & 1 \\leq i \\leq m, 1 \\leq j \\leq m, \\\\ \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {94}", + "image_path": "70551756adae8afbb37c92c1be79eb27733eb7664592934a208a284d07a93703.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 157, + 243, + 504, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 243, + 504, + 287 + ], + "spans": [ + { + "bbox": [ + 157, + 243, + 504, + 287 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {0, i, j} ^ {(s + 1)} = \\left\\{ \\begin{array}{l l} \\hat {B} _ {H, i, j} ^ {(s)} + \\hat {A} _ {\\text {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & 1 \\leq i \\leq m, m < j \\leq d, \\\\ \\hat {B} _ {H, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & 1 \\leq i \\leq m, 1 \\leq j \\leq m, \\\\ \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & m < i \\leq d. \\end{array} \\right. \\tag {95}", + "image_path": "2460a63e8aacb110bea89d58e04815fc67fe86d4b11677708f4c31fea90b4d68.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "text", + "content": "Having formulated the recursions, we are ready to solve out the explicit expressions. We will split each matrix into four parts and them one by on. Specifically, a matrix " + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "text", + "content": " can be split into " + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "inline_equation", + "content": "P_{\\parallel}MP_{\\parallel}" + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "text", + "content": " in the tangent space of " + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(0)}, P_{\\perp}MP_{\\perp}" + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "text", + "content": " in the normal space, along with " + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "inline_equation", + "content": "P_{\\parallel}MP_{\\perp}" + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "inline_equation", + "content": "P_{\\perp}MP_{\\parallel}" + }, + { + "bbox": [ + 104, + 303, + 504, + 352 + ], + "type": "text", + "content": " across both spaces." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 356, + 360, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 356, + 360, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 360, + 373 + ], + "type": "text", + "content": "We first compute the elements of " + }, + { + "bbox": [ + 105, + 356, + 360, + 373 + ], + "type": "inline_equation", + "content": "P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\perp}" + }, + { + "bbox": [ + 105, + 356, + 360, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 356, + 360, + 373 + ], + "type": "inline_equation", + "content": "P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}" + }, + { + "bbox": [ + 105, + 356, + 360, + 373 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "spans": [ + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "text", + "content": "Lemma K.29 (General formula for " + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "inline_equation", + "content": "P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\perp}" + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "inline_equation", + "content": "P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}" + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "text", + "content": "). Let " + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "inline_equation", + "content": "R_0 \\coloneqq \\lceil \\frac{10}{\\lambda_m\\alpha}\\log \\frac{1}{\\eta}\\rceil" + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "text", + "content": ". Then for " + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "inline_equation", + "content": "1\\leq i\\leq m,1\\leq j\\leq m" + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 102, + 376, + 504, + 405 + ], + "type": "inline_equation", + "content": "R_0\\leq s < R_{\\mathrm{grp}}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 413, + 334, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 413, + 334, + 440 + ], + "spans": [ + { + "bbox": [ + 120, + 413, + 334, + 440 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),", + "image_path": "c129bbee871621c38bd7fcfa0fae7a2448026269fddb078e6ce198de5a4213ea.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 441, + 489, + 469 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 441, + 489, + 469 + ], + "spans": [ + { + "bbox": [ + 130, + 441, + 489, + 469 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "25535c84f43b3baf20a5c03ef4faf35d40ab2813fd5886001777284f6c1ef9c1.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "type": "inline_equation", + "content": "s < R_0" + }, + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "type": "inline_equation", + "content": "\\hat{A}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 105, + 478, + 307, + 495 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 521, + 351, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 351, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 351, + 534 + ], + "type": "text", + "content": "Proof. For " + }, + { + "bbox": [ + 104, + 521, + 351, + 534 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq m, 1 \\leq j \\leq m, \\lambda_i > 0, \\lambda_j > 0" + }, + { + "bbox": [ + 104, + 521, + 351, + 534 + ], + "type": "text", + "content": ". By (90)," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 544, + 472, + 640 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 544, + 472, + 640 + ], + "spans": [ + { + "bbox": [ + 138, + 544, + 472, + 640 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} \\\\ + \\tilde {\\mathcal {O}} (\\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\eta^ {2. 5 - 0. 5 \\beta}) \\\\ = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\end{array}", + "image_path": "87102816fc7ff70186dba80f44c6e2425f8ea19eb7fdd72546e7d8f4c1e1acc6.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 650, + 504, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 650, + 504, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 504, + 670 + ], + "type": "text", + "content": "where the second inequality uses " + }, + { + "bbox": [ + 105, + 650, + 504, + 670 + ], + "type": "inline_equation", + "content": "\\sum_{\\tau = 0}^{t - 1}(1 - (\\lambda_i + \\lambda_j)\\eta)^\\tau = \\frac{1 - (1 - (\\lambda_i + \\lambda_j)\\eta)^t}{(\\lambda_i + \\lambda_j)\\eta}\\leq \\frac{1}{(\\lambda_i + \\lambda_j)\\eta}" + }, + { + "bbox": [ + 105, + 650, + 504, + 670 + ], + "type": "text", + "content": ". By (91)," + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 157, + 679, + 451, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 679, + 451, + 731 + ], + "spans": [ + { + "bbox": [ + 157, + 679, + 451, + 731 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {M} _ {t, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {M} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} \\left(\\sum_ {\\tau = 0} ^ {t - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {\\tau} \\eta^ {2. 5 - 0. 5 \\beta}\\right) \\\\ = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\end{array}", + "image_path": "995c19f71e2e31d155024bb8eabda10d3408202148f73b189dfeb1bfc99790b2.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "59" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 58 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 79, + 385, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 79, + 385, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 385, + 95 + ], + "type": "text", + "content": "where the second equality uses " + }, + { + "bbox": [ + 104, + 79, + 385, + 95 + ], + "type": "inline_equation", + "content": "M_0^{(s + 1)} = A_0^{(s + 1)}" + }, + { + "bbox": [ + 104, + 79, + 385, + 95 + ], + "type": "text", + "content": ". By (93) and (94)," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 128, + 98, + 481, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 98, + 481, + 125 + ], + "spans": [ + { + "bbox": [ + 128, + 98, + 481, + 125 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),", + "image_path": "1387e8cdfb3f9a70432fa07f2da9b5207b8851eb6d3633c35924eed890059891.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 126, + 481, + 172 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 126, + 481, + 172 + ], + "spans": [ + { + "bbox": [ + 132, + 126, + 481, + 172 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {A} _ {0, i, j} ^ {(s + 1)} = \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}) \\\\ = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H} \\hat {A} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}). \\\\ \\end{array}", + "image_path": "9dc2b57e3bd7efcb101ee13104f1db5c25e75312bff7b6790a6e2bf4ae19afff.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 175, + 170, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 170, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 170, + 185 + ], + "type": "text", + "content": "Then we obtain" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 188, + 493, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 188, + 493, + 255 + ], + "spans": [ + { + "bbox": [ + 116, + 188, + 493, + 255 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {A} _ {0, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H} \\hat {A} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} \\sum_ {r = 0} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta} \\sum_ {r = R _ {0}} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H}). \\\\ \\end{array}", + "image_path": "703dc53b6e94ce86d712fc582b374bacef8ccdd70c988b030d54aa3fa11b18b2.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 258, + 257, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 257, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 257, + 271 + ], + "type": "text", + "content": "Notice that " + }, + { + "bbox": [ + 105, + 258, + 257, + 271 + ], + "type": "inline_equation", + "content": "|1 - (\\lambda_i + \\lambda_j)\\eta | < 1" + }, + { + "bbox": [ + 105, + 258, + 257, + 271 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 170, + 274, + 504, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 274, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 170, + 274, + 504, + 289 + ], + "type": "interline_equation", + "content": "\\left(1 - \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) ^ {H} \\leq \\exp \\left(- \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta H\\right) = \\exp \\left(- \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\alpha\\right). \\tag {96}", + "image_path": "596a5fe7057c2a61ca242850844d194384d400e4f8b3ff3c2cc40bfeec3452cc.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 291, + 149, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 291, + 149, + 301 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 149, + 301 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 142, + 305, + 468, + 337 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 305, + 468, + 337 + ], + "spans": [ + { + "bbox": [ + 142, + 305, + 468, + 337 + ], + "type": "interline_equation", + "content": "\\sum_ {r = 0} ^ {s - 1} (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H} = \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {r H}}{1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}} \\leq \\frac {1}{1 - \\exp (- (\\lambda_ {i} + \\lambda_ {j}) \\alpha)}.", + "image_path": "e37f0419ce0d4d04b823cc26d2f81d487581a2325682b060c2f8f2005756b9cb.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 340, + 164, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 340, + 164, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 164, + 350 + ], + "type": "text", + "content": "Then we have" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 128, + 352, + 481, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 352, + 481, + 380 + ], + "spans": [ + { + "bbox": [ + 128, + 352, + 481, + 380 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {0, i, j} ^ {(s)} = (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H} \\hat {A} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {s H}}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "b3472513faaac9cb76116ec2ee9a7364e3d902d61c6a2c5892b2fe6189c260c8.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "text", + "content": "Finally, we demonstrate that for " + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "inline_equation", + "content": "s \\geq R_0" + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "inline_equation", + "content": "\\hat{A}_{0,i,j}^{(s)}" + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg},i,j}^{(s)}" + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "text", + "content": " is approximately equal to " + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "inline_equation", + "content": "\\frac{\\eta}{(\\lambda_i + \\lambda_j)KB_{\\mathrm{loc}}}\\Sigma_{0,i,j}" + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "text", + "content": ". By (96), when " + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "inline_equation", + "content": "s \\geq R_0" + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "inline_equation", + "content": "(1 - (\\lambda_i + \\lambda_j)\\eta)^{sH} = \\mathcal{O}(\\eta^{10})" + }, + { + "bbox": [ + 104, + 384, + 504, + 415 + ], + "type": "text", + "content": ", which gives" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 120, + 416, + 334, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 416, + 334, + 441 + ], + "spans": [ + { + "bbox": [ + 120, + 416, + 334, + 441 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1}{(\\lambda_ {i} + \\lambda_ {j}) K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),", + "image_path": "bb14472a98c128c8f00eda285584bce3bdee1380fc99fd5a44c4023916a30615.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 129, + 443, + 488, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 443, + 488, + 471 + ], + "spans": [ + { + "bbox": [ + 129, + 443, + 488, + 471 + ], + "type": "interline_equation", + "content": "A _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "8444f670b8a353d038f1ac97a9b0fae462e3d227554404b4f1353bc37aa7d329.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "inline_equation", + "content": "s < R_0" + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "text", + "content": ", since " + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\hat{A}_0^{(0)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)}\\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(s)\\top} = \\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\hat{A}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 104, + 472, + 504, + 491 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 502, + 314, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 314, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 314, + 518 + ], + "type": "text", + "content": "Secondly, we compute " + }, + { + "bbox": [ + 104, + 502, + 314, + 518 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{A}_t^{(s)}P_\\perp" + }, + { + "bbox": [ + 104, + 502, + 314, + 518 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 502, + 314, + 518 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 520, + 485, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 485, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 485, + 536 + ], + "type": "text", + "content": "Lemma K.30 (General formula for " + }, + { + "bbox": [ + 104, + 520, + 485, + 536 + ], + "type": "inline_equation", + "content": "P_{\\perp}\\hat{A}_{t}^{(s)}P_{\\parallel}" + }, + { + "bbox": [ + 104, + 520, + 485, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 520, + 485, + 536 + ], + "type": "inline_equation", + "content": "P_{\\perp}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel})" + }, + { + "bbox": [ + 104, + 520, + 485, + 536 + ], + "type": "text", + "content": " . For " + }, + { + "bbox": [ + 104, + 520, + 485, + 536 + ], + "type": "inline_equation", + "content": "1\\leq i\\leq m,m < j\\leq d," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 208, + 538, + 406, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 538, + 406, + 564 + ], + "spans": [ + { + "bbox": [ + 208, + 538, + 406, + 564 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {t, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),", + "image_path": "c7bf5f58cff22cfe1c008f6a97e442a04fa67d7add251087875ec3546ebfe575.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 200, + 565, + 410, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 565, + 410, + 592 + ], + "spans": [ + { + "bbox": [ + 200, + 565, + 410, + 592 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "d8ae5b1582b45e6dfc43e792cbed902973205d56bfba09c8207dbc070d7f7501.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 602, + 439, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 602, + 439, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 439, + 615 + ], + "type": "text", + "content": "Proof. Note that for " + }, + { + "bbox": [ + 104, + 602, + 439, + 615 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq m, m < j \\leq d" + }, + { + "bbox": [ + 104, + 602, + 439, + 615 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 602, + 439, + 615 + ], + "type": "inline_equation", + "content": "\\lambda_i > 0, \\lambda_j = 0" + }, + { + "bbox": [ + 104, + 602, + 439, + 615 + ], + "type": "text", + "content": ". By (90) and (94)," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 168, + 617, + 440, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 617, + 440, + 670 + ], + "spans": [ + { + "bbox": [ + 168, + 617, + 440, + 670 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}) \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "bec7bd401245e0ef193da4f73eabc9f6c2f6ac38944b97b71a06a948392d6c75.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 673, + 301, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 673, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 673, + 301, + 689 + ], + "type": "text", + "content": "By (91) and (94), " + }, + { + "bbox": [ + 105, + 673, + 301, + 689 + ], + "type": "inline_equation", + "content": "\\hat{M}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})" + }, + { + "bbox": [ + 105, + 673, + 301, + 689 + ], + "type": "text", + "content": ". Then," + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 199, + 692, + 410, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 692, + 410, + 719 + ], + "spans": [ + { + "bbox": [ + 199, + 692, + 410, + 719 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "cae40e5bdc1a0ccee69d6cef9c49cf47b6e531f79cfeedf002aefba67001fbed.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "60" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 59 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "content": "Similar to Lemma K.30, we have the following lemma for the general formula of " + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "inline_equation", + "content": "P_{\\parallel} \\hat{A}_t^{(s)} P_{\\perp}" + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "inline_equation", + "content": "P_{\\parallel} \\hat{A}_{\\mathrm{avg}}^{(s)} P_{\\perp}" + }, + { + "bbox": [ + 104, + 81, + 504, + 111 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "text", + "content": "Lemma K.31 (General formula for " + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{A}_t^{(s)}P_\\perp" + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\perp}" + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "text", + "content": "). For " + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "inline_equation", + "content": "m < i \\leq d" + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "inline_equation", + "content": "1 \\leq j \\leq m" + }, + { + "bbox": [ + 104, + 114, + 501, + 131 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 208, + 135, + 408, + 162 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 135, + 408, + 162 + ], + "spans": [ + { + "bbox": [ + 208, + 135, + 408, + 162 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {t, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {j} \\eta) ^ {t}}{\\lambda_ {j} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),", + "image_path": "19b147a191583777c258cf515141fae7803b75f67e50061bf82df5f26b7532a3.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 200, + 163, + 411, + 192 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 163, + 411, + 192 + ], + "spans": [ + { + "bbox": [ + 200, + 163, + 411, + 192 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {1 - (1 - \\lambda_ {j} \\eta) ^ {H}}{\\lambda_ {j} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "6d89a18ebc4e2e889dcbff35e2db3909ad609415498f2f9cf061295b4882e8ed.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 203, + 386, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 203, + 386, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 386, + 220 + ], + "type": "text", + "content": "Finally, we derive the general formula for " + }, + { + "bbox": [ + 104, + 203, + 386, + 220 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{A}_t^{(s)}P_{\\parallel}" + }, + { + "bbox": [ + 104, + 203, + 386, + 220 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 203, + 386, + 220 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel}" + }, + { + "bbox": [ + 104, + 203, + 386, + 220 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "text", + "content": "Lemma K.32 (General formula for " + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{A}_t^{(s)}P_{\\parallel}" + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{A}_{\\mathrm{avg}}^{(s)}P_{\\parallel}" + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "text", + "content": "). For " + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "inline_equation", + "content": "m < i \\leq d" + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "inline_equation", + "content": "m < j \\leq d" + }, + { + "bbox": [ + 104, + 222, + 498, + 239 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 206, + 243, + 377, + 269 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 243, + 377, + 269 + ], + "spans": [ + { + "bbox": [ + 206, + 243, + 377, + 269 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {H \\eta^ {2}}{K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),", + "image_path": "97e985607520fcf89f54c554b36df654441126baaee34bc64f475756aff1754e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 217, + 270, + 402, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 270, + 402, + 297 + ], + "spans": [ + { + "bbox": [ + 217, + 270, + 402, + 297 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {t, i, j} ^ {(s)} = \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {t \\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "d6195e6d3360f209e13ba6a058ee383bfc0c8a8ec7db675f8f5a7e33fdad728a.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "type": "text", + "content": "Proof. Note that for " + }, + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "type": "inline_equation", + "content": "m < i \\leq d" + }, + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "type": "inline_equation", + "content": "m < j \\leq d" + }, + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "type": "inline_equation", + "content": "\\lambda_i = \\lambda_j = 0" + }, + { + "bbox": [ + 104, + 307, + 460, + 321 + ], + "type": "text", + "content": ". (90) is then simplified as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 207, + 326, + 403, + 352 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 326, + 403, + 352 + ], + "spans": [ + { + "bbox": [ + 207, + 326, + 403, + 352 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {t + 1, i, j} ^ {(s)} = \\hat {A} _ {t, i, j} ^ {(s)} + \\frac {\\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - 0. 5 \\beta}).", + "image_path": "37793d0412153af99eeff938bd6baf80e97b69b9ef53004db63939da67ce6892.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 356, + 151, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 356, + 151, + 368 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 151, + 368 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 211, + 373, + 504, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 373, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 211, + 373, + 504, + 399 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {t, i, j} ^ {(s)} = \\hat {A} _ {0, i, j} ^ {(s)} + \\frac {t \\eta^ {2}}{B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - 0. 5 \\beta}\\right). \\tag {97}", + "image_path": "dcdb8d91039ed36a965a7241c0aaa244ce7a03aac00fab15dc308703f1e7e42f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "type": "text", + "content": "According to (91), " + }, + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\hat{M}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})" + }, + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "type": "inline_equation", + "content": "m < i\\leq d" + }, + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "type": "inline_equation", + "content": "m < j\\leq d" + }, + { + "bbox": [ + 104, + 405, + 504, + 430 + ], + "type": "text", + "content": ". Combining (91), (94) and (97) yields" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 220, + 435, + 389, + 462 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 435, + 389, + 462 + ], + "spans": [ + { + "bbox": [ + 220, + 435, + 389, + 462 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} = \\frac {H \\eta^ {2}}{K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}).", + "image_path": "26fdc23934cd44f14ba46971a7073efcf09c120321a56843ac50942763b725ca.jpg" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 494, + 467, + 504, + 476 + ], + "blocks": [ + { + "bbox": [ + 494, + 467, + 504, + 476 + ], + "lines": [ + { + "bbox": [ + 494, + 467, + 504, + 476 + ], + "spans": [ + { + "bbox": [ + 494, + 467, + 504, + 476 + ], + "type": "image", + "image_path": "4f9db5562bba8d982c04450bdefd155a9b7b4bbdeb34727f4888a7895dc935c3.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 488, + 347, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 488, + 347, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 488, + 347, + 503 + ], + "type": "text", + "content": "Now, we move on to compute the general formula for " + }, + { + "bbox": [ + 104, + 488, + 347, + 503 + ], + "type": "inline_equation", + "content": "\\hat{B}_t^{(s)}" + }, + { + "bbox": [ + 104, + 488, + 347, + 503 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "text", + "content": "Lemma K.33 (The general formula for " + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "inline_equation", + "content": "P_{\\perp} \\hat{B}_t^{(s)} P_{\\parallel}" + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "text", + "content": "). Note that for " + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq m" + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "inline_equation", + "content": "m < j \\leq d" + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "text", + "content": ", when " + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "inline_equation", + "content": "R_0 := \\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\rceil \\leq s < R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 506, + 504, + 536 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 219, + 541, + 390, + 567 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 541, + 390, + 567 + ], + "spans": [ + { + "bbox": [ + 219, + 541, + 390, + 567 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t, i, j} ^ {(s)} = \\frac {(1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).", + "image_path": "30bbbc74eb49fbc2277c1938871593e5a6095d067b20ef42859ab509d9ee0fe3.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 572, + 219, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 572, + 219, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 219, + 590 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 105, + 572, + 219, + 590 + ], + "type": "inline_equation", + "content": "s < R_0" + }, + { + "bbox": [ + 105, + 572, + 219, + 590 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 105, + 572, + 219, + 590 + ], + "type": "inline_equation", + "content": "\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 105, + 572, + 219, + 590 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 600, + 307, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 307, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 307, + 613 + ], + "type": "text", + "content": "Proof. Note that for " + }, + { + "bbox": [ + 104, + 600, + 307, + 613 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq m" + }, + { + "bbox": [ + 104, + 600, + 307, + 613 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 600, + 307, + 613 + ], + "type": "inline_equation", + "content": "\\lambda_i > 0" + }, + { + "bbox": [ + 104, + 600, + 307, + 613 + ], + "type": "text", + "content": ". By (92)," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 220, + 617, + 389, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 617, + 389, + 635 + ], + "spans": [ + { + "bbox": [ + 220, + 617, + 389, + 635 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t + 1, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).", + "image_path": "54521e43536b9bdc76de685663eedd97dfc9b29d47ce434e8c097fc540c06330.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 639, + 137, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 137, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 137, + 650 + ], + "type": "text", + "content": "Hence," + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 223, + 655, + 386, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 655, + 386, + 673 + ], + "spans": [ + { + "bbox": [ + 223, + 655, + 386, + 673 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).", + "image_path": "1fa87fdc4e3a4933d7908ea9158e8e169a67273e2a34713bef08a74316b6ce3f.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 677, + 182, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 182, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 182, + 689 + ], + "type": "text", + "content": "According to (95)," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 195, + 693, + 413, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 693, + 413, + 731 + ], + "spans": [ + { + "bbox": [ + 195, + 693, + 413, + 731 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s + 1)} = \\hat {B} _ {H, i, j} ^ {(s)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {H} \\hat {B} _ {0, i, j} ^ {(s)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "6f37169ad9f3185606a93126c6a906ebd8582d6103e12a5adafc4220b6a64a3f.jpg" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "61" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 60 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 165, + 92 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 165, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 165, + 92 + ], + "type": "text", + "content": "Then we have" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 96, + 477, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 96, + 477, + 186 + ], + "spans": [ + { + "bbox": [ + 132, + 96, + 477, + 186 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} \\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} + \\tilde {\\mathcal {O}} (\\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} \\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {s H}}{1 - (1 - \\lambda_ {i} \\eta) ^ {H}} \\hat {A} _ {\\mathrm {a v g}, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {s H}}{1 - (1 - \\lambda_ {i} \\eta) ^ {H}} \\hat {A} _ {\\mathrm {a v g},, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "6df4b217728f1b996128c3e5aaf563b64762d84335d476d571bed9d200d43b29.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "text", + "content": "where the second equality uses (96) and the last inequality uses " + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\hat{B}_0^{(0)} = \\hat{\\pmb{x}}_{\\mathrm{avg},0}^{(0)}\\Delta \\hat{\\phi}^{(0)} = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "inline_equation", + "content": "s\\geq R_0" + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\frac{1 - (1 - \\lambda_i\\eta)^H}{\\lambda_iKB_{\\mathrm{loc}}} \\eta \\Sigma_{0,i,j} + \\tilde{\\mathcal{O}} (\\eta^{1.5 - 0.5\\beta})" + }, + { + "bbox": [ + 104, + 190, + 506, + 224 + ], + "type": "text", + "content": ", which gives" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 225, + 227, + 384, + 250 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 227, + 384, + 250 + ], + "spans": [ + { + "bbox": [ + 225, + 227, + 384, + 250 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {0, i, j} ^ {(s)} = \\frac {\\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).", + "image_path": "10e01d05a2500a74836d14d1ffc31d8b25a1796a9ddd32020a1b1a41d47c58f6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 253, + 151, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 253, + 151, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 151, + 263 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 219, + 267, + 390, + 293 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 267, + 390, + 293 + ], + "spans": [ + { + "bbox": [ + 219, + 267, + 390, + 293 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t, i, j} ^ {(s)} = \\frac {(1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).", + "image_path": "aef5379d7edf86c525bf94b89c0c7000fac526a835ddfc9311a19f08e5ee5b49.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "type": "inline_equation", + "content": "s < R_0" + }, + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg},i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "type": "text", + "content": " and therefore, " + }, + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "type": "inline_equation", + "content": "\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 104, + 297, + 349, + 315 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 494, + 300, + 505, + 310 + ], + "blocks": [ + { + "bbox": [ + 494, + 300, + 505, + 310 + ], + "lines": [ + { + "bbox": [ + 494, + 300, + 505, + 310 + ], + "spans": [ + { + "bbox": [ + 494, + 300, + 505, + 310 + ], + "type": "image", + "image_path": "32b84bff80b5f374e13d7d220662f423fc6d255fd2589d0d27ef5bad9a6bcc3f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "text", + "content": "Lemma K.34 (General formula for the elements of " + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "inline_equation", + "content": "P_{\\perp} \\hat{B}_t^{(s)} P_{\\perp}" + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "text", + "content": "). For " + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq m" + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "inline_equation", + "content": "1 \\leq j \\leq m" + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}}(\\eta^{1.5 - \\beta})" + }, + { + "bbox": [ + 104, + 322, + 504, + 352 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 361, + 308, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 308, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 308, + 373 + ], + "type": "text", + "content": "Proof. Note that for " + }, + { + "bbox": [ + 104, + 361, + 308, + 373 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq m" + }, + { + "bbox": [ + 104, + 361, + 308, + 373 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 361, + 308, + 373 + ], + "type": "inline_equation", + "content": "\\lambda_i > 0" + }, + { + "bbox": [ + 104, + 361, + 308, + 373 + ], + "type": "text", + "content": ". By (92)," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 220, + 377, + 389, + 395 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 377, + 389, + 395 + ], + "spans": [ + { + "bbox": [ + 220, + 377, + 389, + 395 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t + 1, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) \\hat {B} _ {t, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).", + "image_path": "0b4f521bc2caba166f9186a61d09909948119c53a49e549fbb8f7ce8d5e150e8.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 397, + 137, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 397, + 137, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 397, + 137, + 408 + ], + "type": "text", + "content": "Hence," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 223, + 412, + 386, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 412, + 386, + 430 + ], + "spans": [ + { + "bbox": [ + 223, + 412, + 386, + 430 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t, i, j} ^ {(s)} = (1 - \\lambda_ {i} \\eta) ^ {t} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).", + "image_path": "44f242cfb7460aa9fa681810f82ad2653c005903297474d0f1d8a5109350fc63.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 431, + 141, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 431, + 141, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 431, + 141, + 443 + ], + "type": "text", + "content": "By (95)," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 184, + 447, + 424, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 447, + 424, + 552 + ], + "spans": [ + { + "bbox": [ + 184, + 447, + 424, + 552 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {B} _ {0, i, j} ^ {(s + 1)} = \\hat {B} _ {H, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {H} \\hat {B} _ {0, i, j} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\tilde {\\mathcal {O}} (\\sum_ {r = 0} ^ {s - 1} (1 - \\lambda_ {i} \\eta) ^ {r H} \\eta^ {1. 5 - \\beta}) \\\\ = (1 - \\lambda_ {i} \\eta) ^ {s H} \\hat {B} _ {0, i, j} ^ {(0)} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}) \\\\ = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}", + "image_path": "675adea062c14632212e19bf7fb349eab04abe914bebf13ff06733e12a003e5f.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 557, + 270, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 270, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 270, + 572 + ], + "type": "text", + "content": "where the last inequality uses " + }, + { + "bbox": [ + 104, + 557, + 270, + 572 + ], + "type": "inline_equation", + "content": "\\hat{B}_0^{(0)} = 0" + }, + { + "bbox": [ + 104, + 557, + 270, + 572 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 494, + 559, + 505, + 570 + ], + "blocks": [ + { + "bbox": [ + 494, + 559, + 505, + 570 + ], + "lines": [ + { + "bbox": [ + 494, + 559, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 494, + 559, + 505, + 570 + ], + "type": "image", + "image_path": "52eb9e3b58cb8b8311c94d5e68838c18fdd54aa2bca9b33745bd0a2ca977a4ff.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 578, + 440, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 440, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 440, + 594 + ], + "type": "text", + "content": "Lemma K.35 (General formula for " + }, + { + "bbox": [ + 104, + 578, + 440, + 594 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{B}_t^{(s)}" + }, + { + "bbox": [ + 104, + 578, + 440, + 594 + ], + "type": "text", + "content": "). For " + }, + { + "bbox": [ + 104, + 578, + 440, + 594 + ], + "type": "inline_equation", + "content": "m < i \\leq d" + }, + { + "bbox": [ + 104, + 578, + 440, + 594 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 578, + 440, + 594 + ], + "type": "inline_equation", + "content": "\\hat{B}_{t,i,j}^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{1.5 - \\beta})" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 605, + 341, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 341, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 341, + 617 + ], + "type": "text", + "content": "Proof. Note that " + }, + { + "bbox": [ + 104, + 605, + 341, + 617 + ], + "type": "inline_equation", + "content": "\\lambda_{i} = 0" + }, + { + "bbox": [ + 104, + 605, + 341, + 617 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 605, + 341, + 617 + ], + "type": "inline_equation", + "content": "m < i\\leq d" + }, + { + "bbox": [ + 104, + 605, + 341, + 617 + ], + "type": "text", + "content": ". By (92) and (95)," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 202, + 620, + 407, + 637 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 620, + 407, + 637 + ], + "spans": [ + { + "bbox": [ + 202, + 620, + 407, + 637 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t + 1} ^ {(s)} = \\hat {B} _ {t} ^ {(s)} + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}), \\quad \\hat {B} _ {0} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}).", + "image_path": "9e9aca4b4e2c93d241bb465dabb4854480af658a16c585478f571d114acf5efa.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 640, + 151, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 640, + 151, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 640, + 151, + 651 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 219, + 655, + 391, + 672 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 655, + 391, + 672 + ], + "spans": [ + { + "bbox": [ + 219, + 655, + 391, + 672 + ], + "type": "interline_equation", + "content": "\\hat {B} _ {t} ^ {(s)} = t \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}) + \\hat {B} _ {0} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).", + "image_path": "419eeca36cb6163051731894be42a0032cd256b8928c15e8e1fdeee8c69945ea.jpg" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 494, + 675, + 505, + 685 + ], + "blocks": [ + { + "bbox": [ + 494, + 675, + 505, + 685 + ], + "lines": [ + { + "bbox": [ + 494, + 675, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 494, + 675, + 505, + 685 + ], + "type": "image", + "image_path": "b78b55b4a4a343eca118a828771d231d6eabb1f549175c74f9751c0664dd16df.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "type": "text", + "content": "Having obtained the expressions for " + }, + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\hat{B}_t^{(s)}" + }, + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\hat{A}_t^{(s)}" + }, + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg}}^{(s)}" + }, + { + "bbox": [ + 104, + 696, + 506, + 731 + ], + "type": "text", + "content": ", we now provide explicit expressions for the first and second moments of the change of manifold projection every round in the following two lemmas." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "text", + "content": "62" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 61 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 441, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 441, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 441, + 95 + ], + "type": "text", + "content": "Lemma K.36. The expectation of the change of manifold projection every round is" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 99, + 504, + 133 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 99, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 117, + 99, + 504, + 133 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} \\right] = \\left\\{ \\begin{array}{l l} \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {\\Sigma} _ {0} + \\boldsymbol {\\Psi} \\left(\\hat {\\phi} ^ {(0)}\\right) \\right] + \\tilde {\\mathcal {O}} \\left(\\eta^ {1. 5 - \\beta}\\right), & R _ {0} < s < R _ {\\mathrm {g r p}}, \\\\ \\tilde {\\mathcal {O}} (\\eta), & s \\leq R _ {0} \\end{array} , \\right. \\tag {98}", + "image_path": "d438108744b31e9ec652455bb0c957db40bb8eefe44b5447a6a9d08fc48c05d2.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 137, + 217, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 137, + 217, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 217, + 153 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 137, + 217, + 153 + ], + "type": "inline_equation", + "content": "R_0 \\coloneqq \\left\\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\right\\rceil" + }, + { + "bbox": [ + 105, + 137, + 217, + 153 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "type": "text", + "content": "Proof. We first compute " + }, + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)}]" + }, + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "type": "text", + "content": ". By (72), we only need to compute " + }, + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "type": "inline_equation", + "content": "P_{\\parallel}\\hat{q}_H^{(s)}" + }, + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "type": "text", + "content": " by relating it to these matrices. Multiplying both sides of (79) by " + }, + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "type": "inline_equation", + "content": "P_{\\parallel}" + }, + { + "bbox": [ + 104, + 165, + 506, + 192 + ], + "type": "text", + "content": " gives" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 197, + 505, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 197, + 505, + 217 + ], + "spans": [ + { + "bbox": [ + 129, + 197, + 505, + 217 + ], + "type": "interline_equation", + "content": "\\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {t + 1} ^ {(s)} = \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {t} ^ {(s)} - \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} ] - \\frac {\\eta}{2} \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {2. 5 - \\beta}). \\tag {99}", + "image_path": "77c0b0a6647ff7289c77bf2bea8519db34b1cd5e51c1dd40c21d96a57943fa3f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 221, + 257, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 221, + 257, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 221, + 257, + 233 + ], + "type": "text", + "content": "Similarly, according to (85), we have" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 153, + 239, + 505, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 239, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 153, + 239, + 505, + 262 + ], + "type": "interline_equation", + "content": "\\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {0} ^ {(s + 1)} = - \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {B}} _ {H} ^ {(s)} ] - \\frac {1}{2} \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {100}", + "image_path": "96100f11b99dabbeafc929df86a15ae9dbaebdaeccffb5fa958962805425f892.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 266, + 240, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 266, + 240, + 278 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 240, + 278 + ], + "type": "text", + "content": "Combining (99) and (100) yields" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 133, + 284, + 504, + 351 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 284, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 133, + 284, + 504, + 351 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {q}} _ {H} ^ {(s)} = - \\frac {1}{2} \\boldsymbol {P} _ {\\parallel} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\text {a v g}} ^ {(s - 1)} ] - \\frac {\\eta}{2} \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\sum_ {t = 0} ^ {H - 1} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} ] \\tag {101} \\\\ - \\eta P _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t} ^ {(s)} ] - P _ {\\|} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {B} _ {H} ^ {(s - 1)} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "ede88c8adf5575de329794a4fce9387773a87170a299a4514e05ea47b16c0cc4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "content": "By Lemmas K.29, K.32 and K.30, for " + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "inline_equation", + "content": "s \\leq R_0 = \\left\\lfloor \\frac{10}{\\lambda \\alpha} \\log \\frac{1}{\\eta} \\right\\rfloor" + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{A}}_t^{(s)} = \\tilde{\\mathcal{O}}(\\eta)" + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} = \\tilde{\\mathcal{O}}(\\eta)" + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{B}}_t^{(s)} = \\tilde{\\mathcal{O}}(\\eta)" + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\hat{\\phi}^{(s+1)} - \\hat{\\phi}^{(s)}] = \\tilde{\\mathcal{O}}(\\eta)" + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "inline_equation", + "content": "s > R_0" + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s-1)} = \\hat{\\pmb{A}}_{\\mathrm{avg}}^{(s)} + \\tilde{\\mathcal{O}}(\\eta^{1.5-0.5\\beta})" + }, + { + "bbox": [ + 104, + 357, + 506, + 399 + ], + "type": "text", + "content": ". Substituting (101) into (72) gives" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 147, + 403, + 462, + 508 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 403, + 462, + 508 + ], + "spans": [ + { + "bbox": [ + 147, + 403, + 462, + 508 + ], + "type": "interline_equation", + "content": "\\begin{array}{r l} & {\\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] = \\underbrace {\\frac {1}{2} P _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {A} _ {\\mathrm {a v g}} ^ {(s)} ] + P _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {B} _ {H} ^ {(s)} ]} _ {\\mathcal {T} _ {1}}} \\\\ & {\\qquad \\overbrace {- \\eta P _ {\\parallel} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\underbrace {\\frac {1}{2} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t} ^ {(s)} + \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t} ^ {(s)} ]} _ {\\mathcal {T} _ {3}} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}).} \\end{array}", + "image_path": "0f7ef739357eeae9cb612b4b2202a0720da2556752f8b38d9c08961388119993.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "type": "text", + "content": "Below we compute " + }, + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_1" + }, + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_2" + }, + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "type": "inline_equation", + "content": "s > R_0" + }, + { + "bbox": [ + 105, + 512, + 393, + 525 + ], + "type": "text", + "content": " respectively. By Lemma K.3," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 176, + 529, + 433, + 545 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 529, + 433, + 545 + ], + "spans": [ + { + "bbox": [ + 176, + 529, + 433, + 545 + ], + "type": "interline_equation", + "content": "\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} ] = \\mathbf {0},", + "image_path": "a3383a3b09be0dd9161553d61263e6e0112f23029fa55bc5cb81e61ffb2faff2.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 179, + 546, + 399, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 546, + 399, + 563 + ], + "spans": [ + { + "bbox": [ + 179, + 546, + 399, + 563 + ], + "type": "interline_equation", + "content": "\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\parallel} ].", + "image_path": "e066b0ec303c6e414a3e292e6528bfe22e606fd966195b3c786567643ca8e2de.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 567, + 173, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 173, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 173, + 578 + ], + "type": "text", + "content": "By Lemma K.4," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 236, + 584, + 373, + 600 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 584, + 373, + 600 + ], + "spans": [ + { + "bbox": [ + 236, + 584, + 373, + 600 + ], + "type": "interline_equation", + "content": "\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\hat {\\pmb {A}} _ {\\mathrm {a v g}} ^ {(s)} \\pmb {P} _ {\\perp} ] = \\mathbf {0}.", + "image_path": "2661867bbfc115faa74f454052ab938a5b0256551c9a7eb8a7dc4b8d99107c31.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 604, + 199, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 199, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 199, + 615 + ], + "type": "text", + "content": "Therefore, for " + }, + { + "bbox": [ + 105, + 604, + 199, + 615 + ], + "type": "inline_equation", + "content": "s > R_0" + }, + { + "bbox": [ + 105, + 604, + 199, + 615 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 165, + 620, + 444, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 620, + 444, + 647 + ], + "spans": [ + { + "bbox": [ + 165, + 620, + 444, + 647 + ], + "type": "interline_equation", + "content": "\\boldsymbol {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\boldsymbol {A}} _ {\\mathrm {a v g}} ^ {(s)} ] = \\frac {H \\eta^ {2}}{2 K B _ {\\mathrm {l o c}}} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) \\Phi [ \\boldsymbol {\\Sigma} _ {0, \\parallel} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}),", + "image_path": "8d0190cc91e187e90c6685bdc00ad2c111fe8e226e4e3b6edf61954911f24d49.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 651, + 320, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 651, + 320, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 320, + 662 + ], + "type": "text", + "content": "where we apply Lemma K.32. Similarly, for " + }, + { + "bbox": [ + 105, + 651, + 320, + 662 + ], + "type": "inline_equation", + "content": "s > R_0" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 179, + 668, + 430, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 668, + 430, + 685 + ], + "spans": [ + { + "bbox": [ + 179, + 668, + 430, + 685 + ], + "type": "interline_equation", + "content": "\\pmb {P} _ {\\perp} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\hat {\\pmb {B}} _ {H} ^ {(s)} ] = \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\parallel} \\hat {\\pmb {B}} _ {H} ^ {(s)} \\pmb {P} _ {\\parallel} ] = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}),", + "image_path": "4807a8b71af00fa2d935530acf35f530e7767ac79ce466faa16ccb127b5a804f.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 689, + 261, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 261, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 261, + 700 + ], + "type": "text", + "content": "where we apply Lemma K.35. Hence," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 217, + 706, + 505, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 706, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 217, + 706, + 505, + 731 + ], + "type": "interline_equation", + "content": "\\mathcal {T} _ {1} = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0, \\parallel} ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\tag {102}", + "image_path": "6506517b94e640f2eea1d12d67c3f5ffa311415cfefae2dc203c9e63499d07a4.jpg" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "63" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 62 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 210, + 93 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 210, + 93 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 210, + 93 + ], + "type": "text", + "content": "We move on to show that" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 192, + 97, + 505, + 122 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 97, + 505, + 122 + ], + "spans": [ + { + "bbox": [ + 192, + 97, + 505, + 122 + ], + "type": "interline_equation", + "content": "\\mathcal {T} _ {2} = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0} - \\boldsymbol {\\Sigma} _ {0, \\parallel} + (K - 1) \\boldsymbol {\\Psi} (\\hat {\\phi} ^ {(0)}) ]. \\tag {103}", + "image_path": "a7ecf91ed1f65871bf0ccab8bd6294c85230199584775c9667038711ac4986ba.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "content": "Similar to the way we compute " + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "inline_equation", + "content": "\\hat{A}_t^{(s)}" + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "inline_equation", + "content": "\\hat{A}_{\\mathrm{avg}}^{(s)}" + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "inline_equation", + "content": "\\hat{B}_t^{(s)}" + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "content": ", we compute " + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_2" + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "content": " by splitting " + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_3" + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "content": " into four matrices and then substituting them into the linear operator " + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "inline_equation", + "content": "-\\eta P_{\\parallel}\\nabla^3\\mathcal{L}(\\hat{\\phi}^{(0)})[\\cdot ]" + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "content": " one by one. First, we show that" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 153, + 168, + 505, + 208 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 168, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 153, + 168, + 505, + 208 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} - \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {P} _ {\\perp} \\mathcal {T} _ {3} \\boldsymbol {P} _ {\\perp} \\right] = \\frac {H \\eta^ {2}}{2 B} \\partial^ {2} \\Phi \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {\\Sigma} _ {0, \\perp} + (K - 1) \\psi \\left(\\boldsymbol {\\Sigma} _ {0, \\perp}\\right) \\right] \\tag {104} \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ \\end{array}", + "image_path": "6655d574fb1963fc2e3fe6f77d62ff0be5137e46ccaa1b3f4f758d23b9699532.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "type": "inline_equation", + "content": "\\psi (\\cdot)" + }, + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "type": "text", + "content": " is interpreted as an elementwise matrix function here. By Lemmas K.29 and K.34, for " + }, + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "type": "inline_equation", + "content": "1\\leq i\\leq m" + }, + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "type": "inline_equation", + "content": "1\\le j\\le m" + }, + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 213, + 506, + 236 + ], + "type": "inline_equation", + "content": "s > R_0" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 124, + 240, + 486, + 286 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 240, + 486, + 286 + ], + "spans": [ + { + "bbox": [ + 124, + 240, + 486, + 286 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {(1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {t}}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\frac {\\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), \\\\ \\hat {B} _ {t, i, j} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "2733dcc4cfa23c17767ab3ec15e4b1087bb524ceba76a5bb7e96623f17421867.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 290, + 151, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 290, + 151, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 151, + 300 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 306, + 497, + 411 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 306, + 497, + 411 + ], + "spans": [ + { + "bbox": [ + 113, + 306, + 497, + 411 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = - \\left(1 - \\frac {1}{K}\\right) \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{(\\lambda_ {i} + \\lambda_ {j}) ^ {2} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\frac {H \\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0., i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{K \\left(\\lambda_ {i} + \\lambda_ {j}\\right) B _ {\\mathrm {l o c}}} \\Sigma_ {0.., i, j} \\\\ + \\left(1 - \\frac {1}{K}\\right) \\frac {H \\eta}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\underbrace {\\left[ 1 - \\frac {1 - (1 - (\\lambda_ {i} + \\lambda_ {j}) \\eta) ^ {H}}{H \\eta (\\lambda_ {i} + \\lambda_ {j})} \\right]} _ {\\tau_ {4}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "ec8e33a4a1a98a6460f77322883182f5cabd1112058888b57f6b6669edb68435.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 412, + 217, + 445 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 412, + 217, + 445 + ], + "spans": [ + { + "bbox": [ + 113, + 412, + 217, + 445 + ], + "type": "interline_equation", + "content": "\\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t, i, j} ^ {(s)} = \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}),", + "image_path": "97d0ae29bdf0873cfeb69a197ca628e94f726980b3b08fd63a8a995c533007b0.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 449, + 242, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 242, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 242, + 462 + ], + "type": "text", + "content": "Then we simplify " + }, + { + "bbox": [ + 105, + 449, + 242, + 462 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_4" + }, + { + "bbox": [ + 105, + 449, + 242, + 462 + ], + "type": "text", + "content": ". Notice that" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 187, + 466, + 422, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 466, + 422, + 495 + ], + "spans": [ + { + "bbox": [ + 187, + 466, + 422, + 495 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left(1 - \\left(\\lambda_ {i} + \\lambda_ {i}\\right) \\eta\\right) ^ {H} = \\exp \\left(- H \\left(\\lambda_ {i} + \\lambda_ {j}\\right) \\eta\\right) \\left[ 1 + \\mathcal {O} \\left(H \\eta^ {2}\\right) \\right] \\\\ = \\exp (- H (\\lambda_ {i} + \\lambda_ {j}) \\eta) + \\mathcal {O} (\\eta). \\\\ \\end{array}", + "image_path": "fb5d3e01d503bf013bdbf3e65a75c192169f94ac05fa1aef66f7b819423dab43.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 499, + 151, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 499, + 151, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 151, + 510 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 239, + 516, + 370, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 516, + 370, + 529 + ], + "spans": [ + { + "bbox": [ + 239, + 516, + 370, + 529 + ], + "type": "interline_equation", + "content": "\\mathcal {T} _ {4} = \\psi \\left(\\left(\\lambda_ {i} + \\lambda_ {j}\\right) H \\eta\\right) + \\mathcal {O} (\\eta).", + "image_path": "e19c5a795cecad24108f3751e1ffa9261c813019d478ac56d583b122ae2c93e6.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 535, + 361, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 535, + 361, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 535, + 361, + 552 + ], + "type": "text", + "content": "Substituting " + }, + { + "bbox": [ + 104, + 535, + 361, + 552 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_4" + }, + { + "bbox": [ + 104, + 535, + 361, + 552 + ], + "type": "text", + "content": " back into the expression for " + }, + { + "bbox": [ + 104, + 535, + 361, + 552 + ], + "type": "inline_equation", + "content": "\\sum_{t=0}^{H-1} \\hat{A}_{t,i,j}^{(s)}" + }, + { + "bbox": [ + 104, + 535, + 361, + 552 + ], + "type": "text", + "content": " gives" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 116, + 557, + 492, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 557, + 492, + 590 + ], + "spans": [ + { + "bbox": [ + 116, + 557, + 492, + 590 + ], + "type": "interline_equation", + "content": "\\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = \\frac {H \\eta}{K (\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0.., i, j} + \\left(1 - \\frac {1}{K}\\right) \\frac {H \\eta \\psi ((\\lambda_ {i} + \\lambda_ {j}) H \\eta)}{(\\lambda_ {i} + \\lambda_ {j}) B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}).", + "image_path": "1cc8265f0134b709b0dc2813f2615a5f3d5253aa4c6946e1a3c02711dab484fb.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 594, + 447, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 447, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 447, + 606 + ], + "type": "text", + "content": "Combining the elementwise results, we obtain the following matrix form expression:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 610, + 480, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 610, + 480, + 651 + ], + "spans": [ + { + "bbox": [ + 129, + 610, + 480, + 651 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} - \\eta \\pmb {P} _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\pmb {P} _ {\\perp} \\mathcal {T} _ {3} \\pmb {P} _ {\\perp} ] = - \\frac {H \\eta^ {2}}{2 B} \\pmb {P} _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ \\mathcal {V} _ {\\pmb {H} _ {0}} (\\pmb {\\Sigma} _ {0, \\perp} + (K - 1) \\psi (\\pmb {\\Sigma} _ {0, \\perp})) ] \\\\ + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "5cc7ed2709f739450445250a1a39f9486c1377ef1bc6b0ed79ed57d262a0bed6.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 655, + 235, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 235, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 235, + 667 + ], + "type": "text", + "content": "By Lemma K.4, we have (104)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 673, + 252, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 673, + 252, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 252, + 684 + ], + "type": "text", + "content": "Secondly, we show that for " + }, + { + "bbox": [ + 104, + 673, + 252, + 684 + ], + "type": "inline_equation", + "content": "s > R_0" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 178, + 689, + 504, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 689, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 178, + 689, + 504, + 731 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} - \\eta P _ {\\|} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) [ P _ {\\perp} \\mathcal {T} _ {3} P _ {\\|} + P _ {\\|} \\mathcal {T} _ {3} P _ {\\perp} ] \\\\ = \\frac {H \\eta^ {2}}{B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel} + (K - 1) \\psi (\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\tag {105} \\\\ \\end{array}", + "image_path": "1aaa12ca7454d490a4f3ea3d8dc632d33e8ed3a6a83f9b200be5b432ff970935.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "64" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 63 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\psi (\\cdot)" + }, + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "text", + "content": " is interpreted as an elementwise matrix function here. By symmetry of " + }, + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\hat{A}_t^{(s)}" + }, + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "text", + "content": "'s and " + }, + { + "bbox": [ + 104, + 81, + 504, + 108 + ], + "type": "inline_equation", + "content": "\\nabla^3\\mathcal{L}(\\hat{\\phi}^{(0)})" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 130, + 113, + 479, + 147 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 113, + 479, + 147 + ], + "spans": [ + { + "bbox": [ + 130, + 113, + 479, + 147 + ], + "type": "interline_equation", + "content": "\\frac {1}{2} \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\parallel} + \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\perp} \\right] = \\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} \\hat {\\boldsymbol {A}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\parallel} \\right].", + "image_path": "f815fd4b1999eb9664389873981c8aecba887ed418a3d889a99c9839df567e9d.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 149, + 251, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 149, + 251, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 251, + 161 + ], + "type": "text", + "content": "Therefore, we only have to evaluate" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 181, + 167, + 428, + 200 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 167, + 428, + 200 + ], + "spans": [ + { + "bbox": [ + 181, + 167, + 428, + 200 + ], + "type": "interline_equation", + "content": "\\nabla^ {3} \\mathcal {L} (\\hat {\\phi} ^ {(0)}) \\left[ \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} (\\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\hat {\\boldsymbol {B}} _ {t} ^ {(s)}) \\boldsymbol {P} _ {\\parallel} + \\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\parallel} \\hat {\\boldsymbol {B}} _ {t} ^ {(s)} \\boldsymbol {P} _ {\\perp} \\right].", + "image_path": "c2774e4142a829c0209fc30d60526bcbf36572e3cf781f14ab4bade0f08b0085.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "type": "text", + "content": "To compute the elements of " + }, + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "type": "inline_equation", + "content": "\\sum_{t=0}^{H-1} P_{\\perp} (\\hat{A}_t^{(s)} + \\hat{B}_t^{(s)}) P_{\\parallel}" + }, + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "type": "text", + "content": ", we combine Lemmas K.30 and K.33 to obtain that for " + }, + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq m" + }, + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "type": "inline_equation", + "content": "m < j \\leq d" + }, + { + "bbox": [ + 104, + 205, + 504, + 231 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 171, + 236, + 437, + 352 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 236, + 437, + 352 + ], + "spans": [ + { + "bbox": [ + 171, + 236, + 437, + 352 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {A} _ {t, i, j} ^ {(s)} = \\sum_ {t = 0} ^ {H - 1} \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {t}}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} ^ {2} B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\left(1 - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} H \\eta}\\right) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} B _ {\\mathrm {l o c}}} \\psi (\\lambda_ {i} H \\eta) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}), \\\\ \\end{array}", + "image_path": "ce2ba6d305c3fb5816ca1951e1295957e77ae9cd827840f7c007cfd1c1a2f8d0.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 356, + 123, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 356, + 123, + 366 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 123, + 366 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 370, + 479, + 487 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 370, + 479, + 487 + ], + "spans": [ + { + "bbox": [ + 130, + 370, + 479, + 487 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sum_ {t = 0} ^ {H - 1} \\hat {B} _ {t, i, j} ^ {(s)} = \\sum_ {t = 0} ^ {H - 1} \\frac {\\left(1 - \\lambda_ {i} \\eta\\right) ^ {t}}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\eta \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - \\beta}), \\\\ = \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} ^ {2} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\left(1 - \\frac {1 - (1 - \\lambda_ {i} \\eta) ^ {H}}{\\lambda_ {i} H \\eta}\\right) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}) \\\\ = \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\Sigma_ {0, i, j} - \\frac {H \\eta}{\\lambda_ {i} K B _ {\\mathrm {l o c}}} \\psi (\\lambda_ {i} H \\eta) \\Sigma_ {0, i, j} + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}). \\\\ \\end{array}", + "image_path": "222f41bf6d77285fa4980c867c77c6b810307030c2e6d0176c0bb48589a596d3.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 491, + 353, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 353, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 353, + 508 + ], + "type": "text", + "content": "Therefore, the matrix form of " + }, + { + "bbox": [ + 105, + 491, + 353, + 508 + ], + "type": "inline_equation", + "content": "\\sum_{t=0}^{H-1} P_{\\perp} (\\hat{A}_t^{(s)} + \\hat{B}_t^{(s)}) P_{\\parallel}" + }, + { + "bbox": [ + 105, + 491, + 353, + 508 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 134, + 513, + 475, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 513, + 475, + 544 + ], + "spans": [ + { + "bbox": [ + 134, + 513, + 475, + 544 + ], + "type": "interline_equation", + "content": "\\sum_ {t = 0} ^ {H - 1} \\boldsymbol {P} _ {\\perp} (\\hat {\\boldsymbol {A}} _ {t} ^ {(s)} + \\hat {\\boldsymbol {B}} _ {t} ^ {(s)}) \\boldsymbol {P} _ {\\parallel} = \\frac {H \\eta}{B} \\mathcal {V} _ {\\boldsymbol {H} _ {0}} \\left(\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel} + (K - 1) \\psi (\\boldsymbol {\\Sigma} _ {0, \\perp , \\parallel})\\right) + \\tilde {\\mathcal {O}} (\\eta^ {0. 5 - \\beta}),", + "image_path": "bbd46a94b3a7411c892ece1bf84b78733f75bd688ae2dfd3cb549fae1707aad3.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 550, + 504, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 504, + 586 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 550, + 504, + 586 + ], + "type": "inline_equation", + "content": "\\psi (\\cdot)" + }, + { + "bbox": [ + 104, + 550, + 504, + 586 + ], + "type": "text", + "content": " is interpreted as an elementwise matrix function here. Furthermore, by Lemma K.35, " + }, + { + "bbox": [ + 104, + 550, + 504, + 586 + ], + "type": "inline_equation", + "content": "\\sum_{t = 0}^{H - 1}\\hat{B}_t^{(s)} = \\tilde{\\mathcal{O}} (\\eta^{0.5 - \\beta})" + }, + { + "bbox": [ + 104, + 550, + 504, + 586 + ], + "type": "text", + "content": ". Applying Lemma K.3, we have (105). Finally, directly applying Lemma K.5, we have" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 238, + 591, + 504, + 607 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 591, + 504, + 607 + ], + "spans": [ + { + "bbox": [ + 238, + 591, + 504, + 607 + ], + "type": "interline_equation", + "content": "- \\eta \\boldsymbol {P} _ {\\parallel} \\nabla^ {3} \\mathcal {L} \\left(\\hat {\\phi} ^ {(0)}\\right) \\left[ \\boldsymbol {P} _ {\\parallel} \\mathcal {T} _ {3} \\boldsymbol {P} _ {\\parallel} \\right] = \\boldsymbol {0}. \\tag {106}", + "image_path": "de2ae8ef76684a696a2c40db8e3b14f2ccd832b09b8424485ef7e209710b1a37.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 611, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 504, + 635 + ], + "type": "text", + "content": "Notice that " + }, + { + "bbox": [ + 104, + 611, + 504, + 635 + ], + "type": "inline_equation", + "content": "\\psi(\\Sigma_{0,||}) = 0" + }, + { + "bbox": [ + 104, + 611, + 504, + 635 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 611, + 504, + 635 + ], + "type": "inline_equation", + "content": "\\psi(\\cdot)" + }, + { + "bbox": [ + 104, + 611, + 504, + 635 + ], + "type": "text", + "content": " operates on each element. Combining (104), (105) and (106), we obtain (103). By (102) and (103), we have (98)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 640, + 457, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 640, + 457, + 652 + ], + "spans": [ + { + "bbox": [ + 104, + 640, + 457, + 652 + ], + "type": "text", + "content": "Lemma K.37. The second moment of the change of manifold projection every round is" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 125, + 658, + 484, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 658, + 484, + 689 + ], + "spans": [ + { + "bbox": [ + 125, + 658, + 484, + 689 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ (\\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) (\\hat {\\boldsymbol {\\phi}} ^ {(s + 1)} - \\hat {\\boldsymbol {\\phi}} ^ {(s)}) ^ {\\top} ] = \\left\\{ \\begin{array}{l l} \\frac {H \\eta^ {2}}{B} \\pmb {\\Sigma} _ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 0. 5 \\beta}), & R _ {0} \\leq s < R _ {\\mathrm {g r p}} \\\\ \\tilde {\\mathcal {O}} (\\eta), & s < R _ {0} \\end{array} \\right.,", + "image_path": "96f95a32f8f20841e900fb704c966c2c45dd0dac00baaf429a2eb1ee6d176336.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 693, + 216, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 693, + 216, + 710 + ], + "spans": [ + { + "bbox": [ + 105, + 693, + 216, + 710 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 693, + 216, + 710 + ], + "type": "inline_equation", + "content": "R_0 \\coloneqq \\left\\lceil \\frac{10}{\\lambda_m \\alpha} \\log \\frac{1}{\\eta} \\right\\rceil" + }, + { + "bbox": [ + 105, + 693, + 216, + 710 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 720, + 421, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 421, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 421, + 732 + ], + "type": "text", + "content": "Proof. Directly apply Lemma K.32 and Lemma K.27 and we have the lemma." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "65" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 64 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 375, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 375, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 375, + 95 + ], + "type": "text", + "content": "With Lemmas K.36 and K.37, we are ready to prove Theorem K.3." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "type": "text", + "content": "Proof of Theorem K.3. We first derive " + }, + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}]" + }, + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "type": "text", + "content": ". Recall that " + }, + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{\\beta}} \\right\\rfloor = \\frac{1}{H\\eta^{1 + \\beta}} + o(1)" + }, + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "type": "inline_equation", + "content": "0 < \\beta < 0.5" + }, + { + "bbox": [ + 104, + 106, + 504, + 133 + ], + "type": "text", + "content": ". By Lemma K.36," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 147, + 137, + 463, + 200 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 137, + 463, + 200 + ], + "spans": [ + { + "bbox": [ + 147, + 137, + 463, + 200 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} [ \\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(0)} ] = \\sum_ {s = 0} ^ {R _ {0}} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] + \\sum_ {s = R _ {0} + 1} ^ {R _ {\\mathrm {g r p}} - 1} \\mathbb {E} [ \\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)} ] \\\\ = \\frac {\\eta^ {1 - \\beta}}{2 B} \\partial^ {2} \\Phi (\\hat {\\phi} ^ {(0)}) [ \\pmb {\\Sigma} _ {0} + \\pmb {\\Psi} (\\hat {\\phi} ^ {(0)}) ] + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 2 \\beta}) + \\tilde {\\mathcal {O}} (\\eta). \\\\ \\end{array}", + "image_path": "e5082d932e4a4f90e50e126bf1b0f93ec82a5d1b8b6353ae3ac91c324938e2a7.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 280, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 280, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 280, + 217 + ], + "type": "text", + "content": "Then we compute " + }, + { + "bbox": [ + 105, + 204, + 280, + 217 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})}\\Delta \\hat{\\phi}^{(R_{\\mathrm{grp}})^{\\top}}]" + }, + { + "bbox": [ + 105, + 204, + 280, + 217 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 114, + 223, + 501, + 331 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 223, + 501, + 331 + ], + "spans": [ + { + "bbox": [ + 114, + 223, + 501, + 331 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} \\left[ \\left(\\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)})\\right) \\left(\\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)})\\right) ^ {\\top} \\right] \\\\ = \\sum_ {s = 0} ^ {R _ {\\mathrm {g r p}} - 1} \\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ^ {\\top} ] + \\sum_ {s \\neq s ^ {\\prime}} \\mathbb {E} [ (\\hat {\\phi} ^ {(s + 1)} - \\hat {\\phi} ^ {(s)}) ] \\mathbb {E} [ (\\hat {\\phi} ^ {(s ^ {\\prime} + 1)} - \\hat {\\phi} ^ {(s ^ {\\prime})}) ^ {\\top} ] \\\\ = \\frac {\\eta^ {1 - \\beta}}{B} \\Sigma_ {0, \\parallel} + \\tilde {\\mathcal {O}} (\\eta) + \\tilde {\\mathcal {O}} (\\eta^ {1. 5 - 1. 5 \\beta}), \\\\ \\end{array}", + "image_path": "98bddb819379f1d7e36eb1d7aa1bed7d08b5a24d3e58a10387c80eec2afd50d9.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 335, + 436, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 436, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 436, + 350 + ], + "type": "text", + "content": "where the last inequality uses " + }, + { + "bbox": [ + 105, + 335, + 436, + 350 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[(\\hat{\\phi}^{(s + 1)} - \\hat{\\phi}^{(s)})]\\mathbb{E}[(\\hat{\\phi}^{(s' + 1)} - \\hat{\\phi}^{(s')})^\\top ] = \\tilde{\\mathcal{O}} (\\eta^2)" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 494, + 337, + 505, + 346 + ], + "blocks": [ + { + "bbox": [ + 494, + 337, + 505, + 346 + ], + "lines": [ + { + "bbox": [ + 494, + 337, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 494, + 337, + 505, + 346 + ], + "type": "image", + "image_path": "78b29f9c05159682ca3d886a219bb7e9cb859ab59189ae22eecd9c91e12a2eba.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 361, + 288, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 288, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 288, + 372 + ], + "type": "text", + "content": "K.10 PROOF OF WEAK APPROXIMATION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "type": "text", + "content": "We are now in a position to utilize the estimate of moments obtained in previous subsections to prove the closeness of the sequence " + }, + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "type": "inline_equation", + "content": "\\{\\phi^{(s)}\\}_{s = 0}^{\\lfloor T / (H\\eta^2)\\rfloor}" + }, + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "type": "text", + "content": " and the SDE solution " + }, + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "type": "inline_equation", + "content": "\\{\\zeta :t\\in [0,T]\\}" + }, + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "type": "text", + "content": " in the sense of weak approximation. Recall the SDE that we expect the manifold projection " + }, + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "type": "inline_equation", + "content": "\\{\\Phi (\\bar{\\theta}^{(s)})\\}_{s = 0}^{\\lfloor T / (H\\eta^2)\\rfloor}" + }, + { + "bbox": [ + 104, + 382, + 506, + 434 + ], + "type": "text", + "content": " to track:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 127, + 437, + 505, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 437, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 127, + 437, + 505, + 472 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = P _ {\\zeta} \\left(\\underbrace {\\frac {1}{\\sqrt {B}} \\boldsymbol {\\Sigma} _ {\\parallel} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t}} _ {\\text {(a) d i f f u s i o n}} \\underbrace {- \\frac {1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Sigma}} _ {\\diamond} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(b) d r i f t - I}}} _ {\\text {(c) d r i f t - I I}} - \\underbrace {- \\frac {K - 1}{2 B} \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\widehat {\\boldsymbol {\\Psi}} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t} _ {\\text {(d) d r i f t - I I}}\\right), \\tag {107}", + "image_path": "3ce3f03db3979fc12125793d3d8265d3423531be3e0c64db3d6c42cf180f209c.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "type": "text", + "content": "According to Lemma K.3 and Lemma K.4, the drift term in total can be written as the following form:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 206, + 502, + 403, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 502, + 403, + 525 + ], + "spans": [ + { + "bbox": [ + 206, + 502, + 403, + 525 + ], + "type": "interline_equation", + "content": "(\\mathbf {b}) + (\\mathbf {c}) = \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ].", + "image_path": "0fcba67cfc9e726b4c9923073f45dccb8a835bb91593619f75505269e0b6baba.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 528, + 375, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 375, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 375, + 540 + ], + "type": "text", + "content": "Then by definition of " + }, + { + "bbox": [ + 105, + 528, + 375, + 540 + ], + "type": "inline_equation", + "content": "P_{\\zeta}" + }, + { + "bbox": [ + 105, + 528, + 375, + 540 + ], + "type": "text", + "content": ", (107) is equivalent to the following SDE:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 150, + 544, + 505, + 571 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 544, + 505, + 571 + ], + "spans": [ + { + "bbox": [ + 150, + 544, + 505, + 571 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} _ {t} + \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t. \\tag {108}", + "image_path": "ea4dde0a10b23235de6df2aba60ddbec61eeaadac59eab262a5c4c796adc6c12.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": "Therefore, we only have to show that " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\phi^{(s)}" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": " closely tracks " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\{\\zeta(t)\\}" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": " satisfying Equation (108). By Lemma K.11, there exists an " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\epsilon_3" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": " neighborhood of " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_3}" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Phi(\\cdot)" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^\\infty" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": "-smooth. Due to compactness of " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_3}" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": " is bounded and the mappings " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\partial^2\\Phi(\\cdot)" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\partial\\Phi(\\cdot)" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Sigma^{1/2}(\\cdot)" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Sigma(\\cdot)" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Psi(\\cdot)" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": " are all Lipschitz in " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Gamma^{\\epsilon_3}" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ". By Kirschbraun theorem, both the drift and diffusion term of (108) can be extended as Lipschitz functions on " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ". Therefore, the solution to the extended SDE exists and is unique. We further show that the solution, if initialized as a point on " + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 575, + 506, + 655 + ], + "type": "text", + "content": ", always stays on the manifold almost surely." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 660, + 329, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 660, + 329, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 329, + 672 + ], + "type": "text", + "content": "As a preparation, we first show that " + }, + { + "bbox": [ + 105, + 660, + 329, + 672 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 105, + 660, + 329, + 672 + ], + "type": "text", + "content": " has no boundary." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 674, + 372, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 674, + 372, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 674, + 372, + 687 + ], + "type": "text", + "content": "Lemma K.38. Under Assumptions 3.1 to 3.3, " + }, + { + "bbox": [ + 105, + 674, + 372, + 687 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 105, + 674, + 372, + 687 + ], + "type": "text", + "content": " has no boundary." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "Proof. We prove by contradiction. If " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " has boundary " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\partial \\Gamma" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": ", WLOG, for a point " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\partial \\Gamma" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": ", let the Hessian at " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{p}" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " be diagonal with the form " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\nabla^2 \\mathcal{L}(\\pmb{p}) = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_d)" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\lambda_i > 0" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq m" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\lambda_i = 0" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "m < i \\leq d" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "text", + "content": "66" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 65 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": "Denote by " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{i:j} := (x_i, x_{i+1}, \\dots, x_j)" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "i \\leq j" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": ") the " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "(j - i + 1)" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": "-dimensional vector formed by the " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": "-th to " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": "-th coordinates of " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\frac{\\partial(\\nabla\\mathcal{L}(\\pmb{p}))}{\\partial\\pmb{p}_{1:m}} = \\mathrm{diag}(\\lambda_1, \\dots, \\lambda_m)" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": " is invertible, by the implicit function theorem, there exists an open neighborhood " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\pmb{p}_{m+1:d}" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\nabla\\mathcal{L}(\\pmb{v}) = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\forall \\pmb{v} \\in V" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\pmb{v}) = \\mathcal{L}(\\pmb{p}) = \\min_{\\pmb{\\theta} \\in U} \\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": " and hence " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "V \\subset \\Gamma" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": ", which contradicts with " + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\partial \\Gamma" + }, + { + "bbox": [ + 104, + 82, + 504, + 133 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "text", + "content": "Therefore, " + }, + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "text", + "content": " is a closed manifold (i.e., compact and without boundary). Then we have the following lemma stating that " + }, + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 144, + 504, + 167 + ], + "type": "text", + "content": " is invariant for (108)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "text", + "content": "Lemma K.39. Let " + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "inline_equation", + "content": "\\zeta(t)" + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "text", + "content": " be the solution to (108) with " + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "inline_equation", + "content": "\\zeta(0) \\in \\Gamma" + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "inline_equation", + "content": "\\zeta(t) \\in \\Gamma" + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "inline_equation", + "content": "t \\geq 0" + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "text", + "content": ". In other words, " + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 168, + 505, + 192 + ], + "type": "text", + "content": " is invariant for (108)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "text", + "content": "Proof. According to Filipovic (2000) and Du & Duan (2007), for a closed manifold " + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "text", + "content": " to be viable for the SDE " + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "inline_equation", + "content": "\\mathrm{d}\\pmb {X}(t) = F(\\pmb {X}(t))\\mathrm{d}t + \\pmb {B}(\\pmb {X}(t))\\mathrm{d}\\pmb{W}_t" + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "inline_equation", + "content": "F:\\mathbb{R}^d\\to \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "inline_equation", + "content": "\\pmb {B}:\\mathbb{R}^d\\rightarrow \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 203, + 504, + 239 + ], + "type": "text", + "content": " are locally Lipschitz, we only have to verify the following Nagumo type consistency condition:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 157, + 240, + 453, + 269 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 240, + 453, + 269 + ], + "spans": [ + { + "bbox": [ + 157, + 240, + 453, + 269 + ], + "type": "interline_equation", + "content": "\\mu (\\pmb {x}) := F (\\pmb {x}) - \\frac {1}{2} \\sum_ {j} \\mathrm {D} [ B _ {j} (\\pmb {x}) ] B _ {j} (\\pmb {x}) \\in T _ {\\pmb {x}} (\\mathcal {M}), \\quad B _ {j} (\\pmb {x}) \\in T _ {\\pmb {x}} (\\mathcal {M}),", + "image_path": "23a010d8f41795cf5319d151f8c89adda21f0cb5443b25c4a379f5af09cf983c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "inline_equation", + "content": "\\mathrm{D}[\\cdot ]" + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "text", + "content": " is the Jacobian operator and " + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "inline_equation", + "content": "B_{j}(\\pmb {x})" + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "text", + "content": "-th column of " + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "inline_equation", + "content": "\\pmb {B}(\\pmb {x})" + }, + { + "bbox": [ + 104, + 273, + 430, + 286 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": "In our context, since for " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "\\phi \\in \\Gamma" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "\\partial \\Phi(\\phi)" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": " is a projection matrix onto " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "T_{\\phi}(\\Gamma)" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": ", each column of " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "\\partial \\Phi(\\phi)\\Sigma^{1/2}(\\phi)" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": " belongs to " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "T_{\\phi}(\\Gamma)" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": ", verifying the second condition. Denote by " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "P_{\\perp}(\\phi) := I_d - \\partial \\Phi(\\phi)" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": " the projection onto the normal space of " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": ". To verify the first condition, it suffices to show that " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "P_{\\perp}(\\phi)\\mu(\\phi) = 0" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": ". We evaluate " + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "inline_equation", + "content": "\\sum_{j} P_{\\perp}(\\phi)\\mathrm{D}[B_j(\\phi)]B_j(\\phi)" + }, + { + "bbox": [ + 104, + 289, + 505, + 338 + ], + "type": "text", + "content": " as follows." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 142, + 342, + 504, + 427 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 342, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 142, + 342, + 504, + 427 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sum_ {j} \\boldsymbol {P} _ {\\perp} (\\phi) \\mathrm {D} [ B _ {j} (\\phi) ] B _ {j} (\\phi) = \\frac {1}{B} \\sum_ {j} \\mathrm {D} [ \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) ] \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) \\\\ = \\frac {1}{B} P _ {\\perp} (\\phi) \\sum_ {j} \\partial^ {2} \\Phi (\\phi) [ \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi), \\partial \\Phi (\\phi) \\boldsymbol {\\Sigma} _ {j} ^ {1 / 2} (\\phi) ] \\\\ = - \\frac {1}{B} \\nabla^ {2} \\mathcal {L} (\\phi) ^ {+} \\nabla^ {3} \\mathcal {L} (\\phi) [ \\boldsymbol {\\Sigma} _ {\\parallel} (\\phi) ], \\tag {109} \\\\ \\end{array}", + "image_path": "e0542511692cf4973ced967487d373f311592b339e09c0113301c3d081b78104.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 429, + 430, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 429, + 430, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 430, + 441 + ], + "type": "text", + "content": "where the last inequality uses Lemma K.3. Again applying Lemma K.3, we have" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 203, + 444, + 504, + 467 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 444, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 203, + 444, + 504, + 467 + ], + "type": "interline_equation", + "content": "\\boldsymbol {P} _ {\\perp} (\\phi) F (\\phi) = - \\frac {1}{2 B} \\nabla^ {2} \\mathcal {L} (\\phi) ^ {+} \\nabla^ {3} \\mathcal {L} (\\phi) [ \\boldsymbol {\\Sigma} _ {\\parallel} (\\phi) ]. \\tag {110}", + "image_path": "cfc573140af8da1fdb0bc35bd6bc353de3a26d7e4433720a9395277faceeb5c5.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 468, + 352, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 352, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 352, + 480 + ], + "type": "text", + "content": "Combining (109) and (110), we can verify the first condition." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "type": "text", + "content": "In order to establish Theorem 3.2, it suffices to prove the following theorem, which captures the closeness of " + }, + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\phi^{(s)}" + }, + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\zeta(t)" + }, + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "type": "text", + "content": " every " + }, + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 492, + 504, + 517 + ], + "type": "text", + "content": " rounds." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "text", + "content": "Theorem K.4. If " + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})" + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "inline_equation", + "content": "\\zeta (0) = \\phi^{(0)}\\in \\Gamma" + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "text", + "content": ", then for " + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{0.75}}\\right\\rfloor" + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "text", + "content": " every test function " + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "inline_equation", + "content": "g\\in \\mathcal{C}^3" + }, + { + "bbox": [ + 105, + 520, + 504, + 553 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 164, + 556, + 445, + 580 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 556, + 445, + 580 + ], + "spans": [ + { + "bbox": [ + 164, + 556, + 445, + 580 + ], + "type": "interline_equation", + "content": "\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} g (\\boldsymbol {\\phi} ^ {(n R _ {\\mathrm {g r p}})}) - \\mathbb {E} g (\\boldsymbol {\\zeta} (n \\eta^ {0. 7 5})) \\right| \\leq C _ {g} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b},", + "image_path": "e84a5f6f82d6683a8328730364140a1cb790e566ac57aebaca439d9fd6ea196c.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "inline_equation", + "content": "C_g > 0" + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "text", + "content": " is a constant independent of " + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "text", + "content": " but can depend on " + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "inline_equation", + "content": "b > 0" + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "text", + "content": " is a constant independent of " + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 104, + 582, + 504, + 607 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 617, + 345, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 617, + 345, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 345, + 628 + ], + "type": "text", + "content": "K.10.1 PRELIMINARIES AND ADDITIONAL NOTATIONS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 636, + 504, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 636, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 636, + 504, + 659 + ], + "type": "text", + "content": "We first introduce a general formulation for stochastic gradient algorithms (SGAs) and then specify the components of this formulation in our context. Consider the following SGA:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 246, + 662, + 363, + 675 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 662, + 363, + 675 + ], + "spans": [ + { + "bbox": [ + 246, + 662, + 363, + 675 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {n + 1} = \\boldsymbol {x} _ {n} + \\eta_ {\\mathrm {e}} \\boldsymbol {h} (\\boldsymbol {x} _ {n}, \\boldsymbol {\\xi} _ {n}),", + "image_path": "00c4c0d4bc5f961a5ed43ccf576c2b5534b4d683822dc6b3e6c725de8bea73c8.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "inline_equation", + "content": "\\pmb{x}_n \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "text", + "content": " is the parameter, " + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathrm{e}}" + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "text", + "content": " is the learning rate, " + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "inline_equation", + "content": "h(\\cdot, \\cdot)" + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "text", + "content": " is the update which depends on " + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "inline_equation", + "content": "\\pmb{x}_n" + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "text", + "content": " and a random vector " + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "inline_equation", + "content": "\\pmb{\\xi}_n" + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "text", + "content": " sampled from some distribution " + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "inline_equation", + "content": "\\Xi(\\pmb{x}_n)" + }, + { + "bbox": [ + 104, + 679, + 505, + 714 + ], + "type": "text", + "content": ". Also, consider the following Stochastic Differential Equation (SDE)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 225, + 716, + 384, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 716, + 384, + 730 + ], + "spans": [ + { + "bbox": [ + 225, + 716, + 384, + 730 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {X} (t) = \\boldsymbol {b} (\\boldsymbol {X} (t)) \\mathrm {d} t + \\boldsymbol {\\sigma} (\\boldsymbol {X} (t)) \\mathrm {d} \\boldsymbol {W} _ {t},", + "image_path": "900aebf7c01971233f9c68a6e67b2e45aaa0ce6c8b9cd3bdbe8b710d6ce3fa2d.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "67" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 66 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 476, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 476, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 476, + 95 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 81, + 476, + 95 + ], + "type": "inline_equation", + "content": "\\pmb {b}(\\cdot):\\mathbb{R}^d\\to \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 81, + 476, + 95 + ], + "type": "text", + "content": " is the drift function and " + }, + { + "bbox": [ + 104, + 81, + 476, + 95 + ], + "type": "inline_equation", + "content": "\\sigma (\\cdot):\\mathbb{R}^{d\\times d}\\rightarrow \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 104, + 81, + 476, + 95 + ], + "type": "text", + "content": " is the diffusion matrix." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "type": "text", + "content": "Denote by " + }, + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_X(\\pmb {x},s,t)" + }, + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "type": "text", + "content": " the distribution of " + }, + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "type": "inline_equation", + "content": "X(t)" + }, + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "type": "text", + "content": " with the initial condition " + }, + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "type": "inline_equation", + "content": "X(s) = x" + }, + { + "bbox": [ + 104, + 99, + 472, + 111 + ], + "type": "text", + "content": " .Define" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 117, + 470, + 132 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 117, + 470, + 132 + ], + "spans": [ + { + "bbox": [ + 140, + 117, + 470, + 132 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {\\Delta}} (\\boldsymbol {x}, n) := \\boldsymbol {X} _ {(n + 1) \\eta_ {\\mathrm {e}}} - \\boldsymbol {x}, \\quad \\text {w h e r e} \\boldsymbol {X} _ {(n + 1) \\eta_ {\\mathrm {e}}} \\sim \\mathcal {P} _ {\\boldsymbol {X}} (\\boldsymbol {x}, n \\eta_ {\\mathrm {e}}, (n + 1) \\eta_ {\\mathrm {e}}),", + "image_path": "3208f2a1931ae5684de766e3f04388743faefaf3f81300f8d4b309c39445a055.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 136, + 279, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 136, + 279, + 148 + ], + "spans": [ + { + "bbox": [ + 104, + 136, + 279, + 148 + ], + "type": "text", + "content": "which characterizes the update in one step." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "text", + "content": "In our context, we view the change of manifold projection over " + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}} \\coloneqq \\left\\lfloor \\frac{1}{\\alpha\\eta^{1 - \\beta}} \\right\\rfloor (\\beta \\in (0, 0.5))" + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "text", + "content": " rounds as one \"giant step\". Hence the " + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "inline_equation", + "content": "\\phi^{(nR_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "text", + "content": " corresponds to the discrete time random variable " + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "inline_equation", + "content": "x_{n}" + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "text", + "content": " corresponds to and " + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "inline_equation", + "content": "\\zeta(t)" + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "text", + "content": " corresponds to the continuous time random variable " + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "inline_equation", + "content": "X_{t}" + }, + { + "bbox": [ + 104, + 152, + 504, + 201 + ], + "type": "text", + "content": ". According to Theorem K.2, we set" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 119, + 205, + 489, + 230 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 205, + 489, + 230 + ], + "spans": [ + { + "bbox": [ + 119, + 205, + 489, + 230 + ], + "type": "interline_equation", + "content": "\\eta_ {\\mathrm {e}} = \\eta^ {1 - \\beta}, \\quad \\boldsymbol {b} (\\boldsymbol {\\zeta}) = \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) \\left[ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) \\right], \\quad \\boldsymbol {\\sigma} (\\boldsymbol {\\zeta}) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}).", + "image_path": "4b16ab09b31410a79e4f4b8a42b112514f773841ad00b10a884bf27def7e25a5.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "text", + "content": "Due to compactness of " + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "inline_equation", + "content": "b(\\cdot)" + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "inline_equation", + "content": "\\sigma(\\cdot)" + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "text", + "content": " are Lipschitz on " + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 235, + 345, + 248 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 253, + 367, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 253, + 367, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 367, + 266 + ], + "type": "text", + "content": "As for the update in one step, " + }, + { + "bbox": [ + 104, + 253, + 367, + 266 + ], + "type": "inline_equation", + "content": "\\tilde{\\Delta} (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 104, + 253, + 367, + 266 + ], + "type": "text", + "content": " is defined in our context as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 127, + 271, + 481, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 271, + 481, + 287 + ], + "spans": [ + { + "bbox": [ + 127, + 271, + 481, + 287 + ], + "type": "interline_equation", + "content": "\\tilde {\\Delta} (\\phi , n) := \\zeta_ {(n + 1) \\eta_ {\\mathrm {e}}} - \\phi , \\qquad \\text {w h e r e} \\zeta_ {(n + 1) \\eta_ {\\mathrm {e}}} \\sim \\mathcal {P} _ {\\zeta} (\\phi , n \\eta_ {\\mathrm {e}}, (n + 1) \\eta_ {\\mathrm {e}}) \\text {a n d} \\phi \\in \\Gamma .", + "image_path": "f5082b97503ff5420aa764e84f375d205ac04bd1ff788664d6026f0223530da3.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 290, + 247, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 290, + 247, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 290, + 247, + 301 + ], + "type": "text", + "content": "For convenience, we further define" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 157, + 305, + 453, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 305, + 453, + 321 + ], + "spans": [ + { + "bbox": [ + 157, + 305, + 453, + 321 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Delta} ^ {(n)} := \\hat {\\phi} ^ {((n + 1) R _ {\\mathrm {g r p}})} - \\hat {\\phi} ^ {(n R _ {\\mathrm {g r p}})}, \\qquad \\qquad \\tilde {\\boldsymbol {\\Delta}} ^ {(n)} := \\tilde {\\boldsymbol {\\Delta}} (\\hat {\\phi} ^ {(R _ {\\mathrm {g r p}})}, n),", + "image_path": "e6986ae49e1b5c543f80532de896157a5911aae2dd97342f6ab4135b7c60e3ea.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 157, + 323, + 441, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 323, + 441, + 338 + ], + "spans": [ + { + "bbox": [ + 157, + 323, + 441, + 338 + ], + "type": "interline_equation", + "content": "\\boldsymbol {b} ^ {(n)} := \\boldsymbol {b} (\\hat {\\boldsymbol {\\phi}} ^ {(n R _ {\\mathrm {g r p}})}), \\qquad \\qquad \\boldsymbol {\\sigma} ^ {(n)} := \\boldsymbol {\\sigma} (\\hat {\\boldsymbol {\\phi}} ^ {(n R _ {\\mathrm {g r p}})}).", + "image_path": "f457b6e7c75766ff2dfe199654d21743382826855a3e3f25215182f3e5f880b2.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "text", + "content": "We use " + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "inline_equation", + "content": "C_{g,i}" + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "text", + "content": " to denote constants that can depend on the test function " + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "text", + "content": " and independent of " + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathrm{e}}" + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "text", + "content": ". The following lemma relates the moments of " + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "inline_equation", + "content": "\\tilde{\\Delta}(\\phi, n)" + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "inline_equation", + "content": "b(\\phi)" + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "inline_equation", + "content": "\\sigma(\\phi)" + }, + { + "bbox": [ + 104, + 342, + 504, + 368 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "spans": [ + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "text", + "content": "Lemma K.40. There exists a positive constant " + }, + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "inline_equation", + "content": "C_0" + }, + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "text", + "content": " independent of " + }, + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathrm{e}}" + }, + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 104, + 370, + 503, + 383 + ], + "type": "inline_equation", + "content": "\\phi \\in \\Gamma" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 225, + 387, + 477, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 387, + 477, + 403 + ], + "spans": [ + { + "bbox": [ + 225, + 387, + 477, + 403 + ], + "type": "interline_equation", + "content": "| \\mathbb {E} [ \\tilde {\\Delta} _ {i} (\\phi , n) ] - \\eta_ {\\mathrm {e}} b _ {i} (\\phi) | \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {2}, \\quad \\forall 1 \\leq i \\leq d,", + "image_path": "553b0fd1ffbeab40392becaad773b0510854234160b12a1e7cc2476f5164897b.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 134, + 405, + 477, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 405, + 477, + 437 + ], + "spans": [ + { + "bbox": [ + 134, + 405, + 477, + 437 + ], + "type": "interline_equation", + "content": "| \\mathbb {E} [ \\tilde {\\Delta} _ {i} (\\phi , n) \\tilde {\\Delta} _ {j} (\\pmb {x}, n) ] - \\eta_ {\\mathrm {e}} \\sum_ {l = 1} ^ {d} \\sigma_ {i, l} (\\phi) \\sigma_ {l, j} (\\phi) | \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {2}, \\quad \\forall 1 \\leq i, j \\leq d,", + "image_path": "77c3d0d7a1d4f08f9352a50082ad52cf3bc558663c682d5deaa9ff2462fae41e.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 239, + 439, + 476, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 439, + 476, + 472 + ], + "spans": [ + { + "bbox": [ + 239, + 439, + 476, + 472 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\tilde {\\Delta} _ {i _ {s}} (\\phi , n) \\right| \\right] \\leq C _ {0} \\eta_ {\\mathrm {e}} ^ {3}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d.", + "image_path": "356bd3f1b532f445aace002cc90f62c691df740cb71a47ed6c98e9475aca14e0.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 504, + 498 + ], + "type": "text", + "content": "The lemma below states that the expectation of the test function is smooth with respect to the initial value." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "content": "Proof. Noticing that (i) the solution to (108) always stays on " + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "content": " almost surely if its initial value " + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "inline_equation", + "content": "\\zeta(0)" + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "content": " belongs to " + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "content": ", (ii) " + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "inline_equation", + "content": "b(\\cdot)" + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "inline_equation", + "content": "\\sigma(\\cdot)" + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^\\infty" + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "content": " and (iii) " + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 511, + 505, + 544 + ], + "type": "text", + "content": " is compact, we can directly apply Lemma B.3 in Malladi et al. (2022) and Lemma 26 in Li et al. (2019a) to obtain the above lemma." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "text", + "content": "The following lemma states that the expectation of " + }, + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "inline_equation", + "content": "g(\\zeta(t))" + }, + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "inline_equation", + "content": "g \\in \\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 556, + 504, + 578 + ], + "type": "text", + "content": " is smooth with respect to the initial value of the SDE solution." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "text", + "content": "Lemma K.41. Let " + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "inline_equation", + "content": "s \\in [0, T]" + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "inline_equation", + "content": "\\phi \\in \\Gamma" + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "inline_equation", + "content": "g \\in \\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "inline_equation", + "content": "t \\in [s, T]" + }, + { + "bbox": [ + 104, + 582, + 392, + 594 + ], + "type": "text", + "content": ", define" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 234, + 599, + 374, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 599, + 374, + 613 + ], + "spans": [ + { + "bbox": [ + 234, + 599, + 374, + 613 + ], + "type": "interline_equation", + "content": "u (\\phi , s, t) := \\mathbb {E} _ {\\zeta_ {t} \\sim \\mathcal {P} _ {\\zeta} (\\phi , s, t)} [ g (\\zeta_ {t}) ].", + "image_path": "7cdf3395fcedc5df58e545bf1c6c49be1b8287919fec5e313d8306411151b846.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 619, + 255, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 619, + 255, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 255, + 631 + ], + "type": "text", + "content": "Then " + }, + { + "bbox": [ + 104, + 619, + 255, + 631 + ], + "type": "inline_equation", + "content": "u(\\cdot ,s,t)\\in \\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 619, + 255, + 631 + ], + "type": "text", + "content": " uniformly in " + }, + { + "bbox": [ + 104, + 619, + 255, + 631 + ], + "type": "inline_equation", + "content": "s,t" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 643, + 487, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 487, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 487, + 654 + ], + "type": "text", + "content": "Proof. A slight modification of Lemma B.4 in Malladi et al. (2022) will give the above lemma." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 666, + 359, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 359, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 359, + 677 + ], + "type": "text", + "content": "K.10.2 PROOF OF THE APPROXIMATION IN OUR CONTEXT" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\beta \\in (0, 0.5)" + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "text", + "content": ", define " + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\gamma_1 \\coloneqq \\frac{1.5 - 2\\beta}{1 - \\beta}" + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\gamma_2 \\coloneqq \\frac{1}{1 - \\beta}" + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "text", + "content": ", and then " + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "inline_equation", + "content": "1 < \\gamma_1 < 1.5" + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "inline_equation", + "content": "1 < \\gamma_2 < 2" + }, + { + "bbox": [ + 104, + 685, + 504, + 732 + ], + "type": "text", + "content": ". We introduce the following lemma which serves as a key step to control the approximation error. Specifically, this lemma bounds the difference in one step change between the discrete process and the continuous one as well as the product of higher orders." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "68" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 67 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "text", + "content": "Lemma K.42. If " + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "inline_equation", + "content": "\\| \\bar{\\theta}^{(0)} - \\phi^{(0)}\\| _2 = \\mathcal{O}(\\sqrt{\\eta\\log\\frac{1}{\\eta}})" + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "text", + "content": ", then there exist positive constants " + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "inline_equation", + "content": "C_1" + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "text", + "content": " independent of " + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathrm{e}}" + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 104, + 82, + 506, + 113 + ], + "type": "inline_equation", + "content": "0\\leq n < \\lfloor T / \\eta_{\\mathrm{e}}\\rfloor" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 118, + 120, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 118, + 120, + 129 + ], + "spans": [ + { + "bbox": [ + 110, + 118, + 120, + 129 + ], + "type": "text", + "content": "1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 129, + 499, + 148 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 129, + 499, + 148 + ], + "spans": [ + { + "bbox": [ + 167, + 129, + 499, + 148 + ], + "type": "interline_equation", + "content": "\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} - \\tilde {\\Delta} _ {i} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\quad \\forall 1 \\leq i \\leq d, \\right.", + "image_path": "f763d88727a654cd90f4fc059509d81bbb10cdab16ac2af9fa0234e2c70fb927.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 128, + 150, + 499, + 168 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 150, + 499, + 168 + ], + "spans": [ + { + "bbox": [ + 128, + 150, + 499, + 168 + ], + "type": "interline_equation", + "content": "| \\mathbb {E} [ \\Delta_ {i} ^ {(n)} \\Delta_ {j} ^ {(n)} - \\tilde {\\Delta} _ {i} ^ {(n)} \\tilde {\\Delta} _ {j} ^ {(n)} | \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} | \\leq C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b} + C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b}, \\forall 1 \\leq i, j \\leq d.", + "image_path": "08850bbfc0a45f9bb61fd93e65bf3a982d1f536e82add6120bc5a199c4fbeb31.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 175, + 120, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 175, + 120, + 186 + ], + "spans": [ + { + "bbox": [ + 110, + 175, + 120, + 186 + ], + "type": "text", + "content": "2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 156, + 186, + 470, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 186, + 470, + 220 + ], + "spans": [ + { + "bbox": [ + 156, + 186, + 470, + 220 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d,", + "image_path": "4eb131279c30f6ea92dff9aab3db47f07eac453bbbe23cf0527e6118989f1a15.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 157, + 222, + 470, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 222, + 470, + 255 + ], + "spans": [ + { + "bbox": [ + 157, + 222, + 470, + 255 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\tilde {\\Delta} _ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {1} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} (\\log \\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d.", + "image_path": "7a158eaf97764e2f6cb6b3a2bafa1f8d66dd4ca4bcbbbe31996b2f6af180a576.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 266, + 288, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 266, + 288, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 288, + 277 + ], + "type": "text", + "content": "Proof. According to Appendix K.7, we have" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 222, + 279, + 388, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 279, + 388, + 312 + ], + "spans": [ + { + "bbox": [ + 222, + 279, + 388, + 312 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] = \\tilde {\\mathcal {O}} (\\eta^ {3 - 3 \\beta}).", + "image_path": "bda04878caac264b2d30f84a02496e701d78009413d1eed3a9b4878e65835d52.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\gamma_{1} < 1.5" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\gamma_{2} < 2" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": ", we can utilize Theorem K.3 and conclude that there exist positive constants " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "C_2" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": " independent of " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathrm{e}}" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 170, + 339, + 504, + 377 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 339, + 504, + 377 + ], + "spans": [ + { + "bbox": [ + 170, + 339, + 504, + 377 + ], + "type": "interline_equation", + "content": "\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} - \\eta_ {\\mathrm {e}} b _ {i} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| \\leq C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\forall 1 \\leq i \\leq d, \\tag {111}", + "image_path": "9b1fdb93171dbeba759613acebbd6220a65af66c7dfee6a7c8b0a4629ff1f10e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 113, + 380, + 504, + 425 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 380, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 113, + 380, + 504, + 425 + ], + "type": "interline_equation", + "content": "\\left| \\mathbb {E} \\left[ \\Delta_ {i} ^ {(n)} \\Delta_ {j} ^ {(n)} - \\eta_ {\\mathrm {e}} \\sum_ {l = 1} ^ {d} \\sigma_ {i, l} ^ {(n)} \\sigma_ {l, j} ^ {(n)} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| \\leq C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} + C _ {2} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\forall 1 \\leq i, j \\leq d, \\tag {112}", + "image_path": "1be86f732045fc346a9c0dcd8b63af9024dc6a8b23954dcb3d0d1f646b97c89e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 183, + 427, + 505, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 427, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 183, + 427, + 505, + 460 + ], + "type": "interline_equation", + "content": "\\mathbb {E} \\left[ \\left| \\prod_ {s = 1} ^ {6} \\Delta_ {i _ {s}} ^ {(n)} \\right| \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\leq C _ {2} ^ {2} \\eta_ {\\mathrm {e}} ^ {2 \\gamma_ {1}} \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {2 b}, \\quad \\forall 1 \\leq i _ {1}, \\dots , i _ {6} \\leq d. \\tag {113}", + "image_path": "2ab435a79610b7fa2c401e67b63f2232ff1881776178aa2ec6dee586dfaa0168.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 462, + 379, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 379, + 474 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 379, + 474 + ], + "type": "text", + "content": "Combining (111) - (113) with Lemma K.40 gives the above lemma." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "text", + "content": "Lemma K.43. For a test function " + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "inline_equation", + "content": "g \\in \\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "inline_equation", + "content": "u_{l,n}(\\phi) \\coloneqq u(\\phi, l\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}}) = \\mathbb{E}_{\\zeta_t \\sim \\mathcal{P}_{\\zeta}(\\phi, l\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}})}[g(\\zeta_t)]" + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "inline_equation", + "content": "\\|\\bar{\\pmb{\\theta}}^{(0)} - \\pmb{\\phi}^{(0)}\\|_2 = \\mathcal{O}(\\sqrt{\\eta \\log \\frac{1}{\\eta}})" + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "text", + "content": ", then for all " + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "inline_equation", + "content": "0 \\leq l \\leq n-1" + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "inline_equation", + "content": "1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor" + }, + { + "bbox": [ + 104, + 479, + 504, + 512 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 514, + 504, + 535 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 514, + 504, + 535 + ], + "spans": [ + { + "bbox": [ + 106, + 514, + 504, + 535 + ], + "type": "interline_equation", + "content": "\\left| \\mathbb {E} [ u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\pmb {\\Delta} ^ {(l)}) - u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\pmb {\\Delta}} ^ {(l + 1)}) \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\leq C _ {g, 1} (\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}) \\log (\\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b},", + "image_path": "a1e838f6299585b7eac4de590636e6c853c81e7598dea829588bdbafea51f424.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "inline_equation", + "content": "C_{g,1}" + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "text", + "content": " is a positive constant independent of " + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(lR_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "text", + "content": " but can depend on " + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 105, + 539, + 447, + 553 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "text", + "content": "Proof. By Lemma K.41, " + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "inline_equation", + "content": "u_{l,n}(\\phi) \\in \\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "text", + "content": ". That is, there exists " + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "inline_equation", + "content": "K(\\cdot) \\in G" + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "inline_equation", + "content": "l, n, u_{l,n}(\\phi)" + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "text", + "content": " and its partial derivatives up to the third order are bounded by " + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "inline_equation", + "content": "K(\\phi)" + }, + { + "bbox": [ + 104, + 563, + 506, + 588 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 592, + 326, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 326, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 326, + 605 + ], + "type": "text", + "content": "By the law of total expectation and triangle inequality," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 149, + 606, + 465, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 606, + 465, + 730 + ], + "spans": [ + { + "bbox": [ + 149, + 606, + 465, + 730 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left| \\mathbb {E} [ u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\pmb {\\Delta} ^ {(l)}) - u _ {l + 1, n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\pmb {\\Delta}} ^ {(l)}) ] \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} \\right| \\\\ \\leq \\underbrace {\\left| \\mathbb {E} \\left[ u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\boldsymbol {\\Delta} ^ {(l)}\\right) - u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\boldsymbol {\\Delta}} ^ {(l)}\\right) \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} , \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right] \\right|} _ {\\mathcal {A} _ {1}} \\\\ + \\underbrace {\\eta^ {1 0 0} \\mathbb {E} \\left[ \\left| u _ {l + 1 , n} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\boldsymbol {\\Delta} ^ {(l)}\\right) \\right| \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\bar {\\mathcal {E}} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right]} _ {\\mathcal {A} _ {2}} \\\\ + \\underbrace {\\eta^ {1 0 0} \\mathbb {E} [ | u _ {l + 1 , n} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\Delta} ^ {(l)}) | | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} , \\bar {\\mathcal {E}} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ]} _ {\\mathcal {A} _ {3}}. \\\\ \\end{array}", + "image_path": "69d6536fe3b8be21f2b6807a51c7fb4a0907c6cd4311ce5a9e29ec5758a69c4f.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "69" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 68 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": "We first bound " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_2" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_3" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(lR_{\\mathrm{grp}})} \\in \\Gamma" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": ", both " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\pmb{\\Delta}^{(l)}" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\tilde{\\Delta}^{(l)}" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": " belong to " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": ". Due to compactness of " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": " and smoothness of " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "u_{l+1,n}(\\cdot)" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": ", there exist a positive constant " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "C_{g,2}" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_2 + \\mathcal{A}_3 \\leq C_{g,2}\\eta^{100}" + }, + { + "bbox": [ + 104, + 80, + 504, + 118 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "type": "text", + "content": "We proceed to bound " + }, + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_1" + }, + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "type": "text", + "content": ". Expanding " + }, + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "type": "inline_equation", + "content": "u_{l + 1,n}(\\cdot)" + }, + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(lR_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 105, + 123, + 448, + 137 + ], + "type": "text", + "content": " and by triangle inequality," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 126, + 140, + 483, + 252 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 140, + 483, + 252 + ], + "spans": [ + { + "bbox": [ + 126, + 140, + 483, + 252 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {A} _ {1} ^ {(s)} \\leq \\underbrace {\\sum_ {i = 1} ^ {d} \\left| \\mathbb {E} \\big [ \\frac {\\partial u _ {l + 1 , n}}{\\partial \\phi_ {i}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}) \\left(\\Delta_ {i} ^ {(l)} - \\tilde {\\Delta} _ {i} ^ {(l)}\\right) | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} \\right|} _ {\\mathcal {B} _ {1}} \\\\ + \\underbrace {\\frac {1}{2} \\sum_ {1 \\leq i , j \\leq d} \\left| \\mathbb {E} [ \\frac {\\partial^ {2} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}) (\\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} - \\tilde {\\Delta} _ {i} ^ {(l)} \\tilde {\\Delta} _ {j} ^ {(l)}) | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right|} _ {\\mathcal {B} _ {2}} \\\\ + | \\mathcal {R} | + | \\tilde {\\mathcal {R}} |, \\\\ \\end{array}", + "image_path": "0eef18584e43a3164c052ae9dd7eee20d1368e507ac889d57cbe8841a879759f.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 255, + 248, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 248, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 248, + 267 + ], + "type": "text", + "content": "where the remainders " + }, + { + "bbox": [ + 105, + 255, + 248, + 267 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 105, + 255, + 248, + 267 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 255, + 248, + 267 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{R}}" + }, + { + "bbox": [ + 105, + 255, + 248, + 267 + ], + "type": "text", + "content": " are" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 268, + 459, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 268, + 459, + 300 + ], + "spans": [ + { + "bbox": [ + 133, + 268, + 459, + 300 + ], + "type": "interline_equation", + "content": "\\mathcal {R} = \\frac {1}{6} \\sum_ {1 \\leq i, j, p \\leq d} \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\boldsymbol {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} | \\hat {\\boldsymbol {\\phi}} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ],", + "image_path": "a537ba7a12d1200eefc3fee0df6f5490174cf5dfb7afd3d7d28d5027bc6c93a4.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 133, + 301, + 476, + 332 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 301, + 476, + 332 + ], + "spans": [ + { + "bbox": [ + 133, + 301, + 476, + 332 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathcal {R}} = \\frac {1}{6} \\sum_ {1 \\leq i, j, p \\leq d} \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\tilde {\\theta} \\tilde {\\Delta} ^ {(l)}) \\tilde {\\Delta} _ {i} ^ {(l)} \\tilde {\\Delta} _ {j} ^ {(l)} \\tilde {\\Delta} _ {p} ^ {(l)} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ],", + "image_path": "87f49a622e0d051314ab64e31018bd2e9027878e2f9d448dc6202811fb16b919.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "text", + "content": "for some " + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "inline_equation", + "content": "\\theta, \\tilde{\\theta} \\in (0,1)" + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(LR_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "text", + "content": " belongs to " + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "text", + "content": " which is compact, there exists a constant " + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "inline_equation", + "content": "C_{g,3}" + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "inline_equation", + "content": "1 \\leq i,j \\leq d, 0 \\leq l \\leq n-1, 1 \\leq n \\leq \\lfloor T/\\eta_{\\mathrm{e}} \\rfloor" + }, + { + "bbox": [ + 104, + 335, + 504, + 361 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 176, + 362, + 434, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 362, + 434, + 389 + ], + "spans": [ + { + "bbox": [ + 176, + 362, + 434, + 389 + ], + "type": "interline_equation", + "content": "| \\frac {\\partial u _ {l + 1 , n}}{\\partial \\phi_ {i}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}) | \\leq C _ {g, 3}, \\qquad | \\frac {\\partial^ {2} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}) | \\leq C _ {g, 3}.", + "image_path": "4899f82a76f02bce5055aebdbe913602a928903a1fabbe3d524ceda75c68afd1.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 390, + 178, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 178, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 178, + 402 + ], + "type": "text", + "content": "By Lemma K.42," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 142, + 403, + 466, + 428 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 403, + 466, + 428 + ], + "spans": [ + { + "bbox": [ + 142, + 403, + 466, + 428 + ], + "type": "interline_equation", + "content": "\\mathcal {B} _ {1} \\leq d C _ {g, 3} C _ {1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}, \\quad \\mathcal {B} _ {2} \\leq \\frac {d ^ {2}}{2} C _ {g, 3} C _ {1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.", + "image_path": "fa1501a6fa2d228cccd1bf1ac42cc229ec0fcf9a073e3bdedae9ddc4f8776b1a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 429, + 365, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 429, + 365, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 365, + 441 + ], + "type": "text", + "content": "Now we bound the remainders. By Cauchy-Schwartz inequality," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 159, + 442, + 454, + 531 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 442, + 454, + 531 + ], + "spans": [ + { + "bbox": [ + 159, + 442, + 454, + 531 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left| \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\pmb {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)} \\mid \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\\\ \\leq \\left(\\mathbb {E} \\left[ \\left(\\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\boldsymbol {\\Delta} ^ {(l)})\\right) ^ {2} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right]\\right) ^ {1 / 2} \\times \\\\ \\left(\\mathbb {E} [ (\\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)}) ^ {2} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} ]\\right) ^ {1 / 2}. \\\\ \\end{array}", + "image_path": "897fe2d7232d4db58c19c351bb0764fe60fbd7a1b74376fc49cf1c94eb4a3d42.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(lR_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\Delta^{(l)}" + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "text", + "content": " both belong to " + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "text", + "content": " which is compact, there exists a constant " + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "inline_equation", + "content": "C_{g,4}" + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "inline_equation", + "content": "1 \\leq i, j, p \\leq d, 0 \\leq l \\leq n - 1" + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "inline_equation", + "content": "1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor" + }, + { + "bbox": [ + 104, + 539, + 504, + 564 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 220, + 565, + 390, + 587 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 565, + 390, + 587 + ], + "spans": [ + { + "bbox": [ + 220, + 565, + 390, + 587 + ], + "type": "interline_equation", + "content": "\\left. \\right.\\left(\\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} \\left(\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\Delta^ {(l)}\\right)\\right) ^ {2} \\leq C _ {g, 4} ^ {2}.", + "image_path": "b919ad351edbfd6582d18f2c9c48354b74d5fa4ea69395602ace070de644257d.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 589, + 350, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 589, + 350, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 589, + 350, + 601 + ], + "type": "text", + "content": "Combining the above inequality with Lemma K.42, we have" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 118, + 602, + 492, + 630 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 602, + 492, + 630 + ], + "spans": [ + { + "bbox": [ + 118, + 602, + 492, + 630 + ], + "type": "interline_equation", + "content": "\\left| \\mathbb {E} [ \\frac {\\partial^ {3} u _ {l + 1 , n}}{\\partial \\phi_ {i} \\partial \\phi_ {j} \\partial \\phi_ {p}} (\\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})} + \\theta \\pmb {\\Delta} ^ {(l)}) \\Delta_ {i} ^ {(l)} \\Delta_ {j} ^ {(l)} \\Delta_ {p} ^ {(l)} | \\hat {\\phi} ^ {(l R _ {\\mathrm {g r p}})}, \\mathcal {E} _ {0} ^ {(l R _ {\\mathrm {g r p}})} ] \\right| \\leq C _ {g, 4} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log (\\frac {1}{\\eta_ {\\mathrm {e}}}) ^ {b}.", + "image_path": "44eff6ce85696a8298d0cf13bbeaf91f1fdc2d8b27c1538f414891b001f56fb0.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 631, + 298, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 631, + 298, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 298, + 643 + ], + "type": "text", + "content": "Hence, for all " + }, + { + "bbox": [ + 105, + 631, + 298, + 643 + ], + "type": "inline_equation", + "content": "1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor, 0 \\leq l \\leq n - 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 242, + 646, + 367, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 646, + 367, + 670 + ], + "spans": [ + { + "bbox": [ + 242, + 646, + 367, + 670 + ], + "type": "interline_equation", + "content": "| \\mathcal {R} | \\leq \\frac {d ^ {3}}{6} C _ {g, 4} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log \\left(\\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.", + "image_path": "3d2d79100de756ac1f64ac974a06f8246ce20357f6062cc38684c8d423e35430.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "type": "text", + "content": "Similarly, we can show that there exists a constant " + }, + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "type": "inline_equation", + "content": "C_{g,5}" + }, + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "type": "text", + "content": " such that for all " + }, + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "type": "inline_equation", + "content": "1 \\leq n \\leq \\lfloor T / \\eta_{\\mathrm{e}} \\rfloor" + }, + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "type": "inline_equation", + "content": "0 \\leq l \\leq n - 1" + }, + { + "bbox": [ + 104, + 671, + 504, + 693 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 242, + 695, + 367, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 695, + 367, + 719 + ], + "spans": [ + { + "bbox": [ + 242, + 695, + 367, + 719 + ], + "type": "interline_equation", + "content": "| \\tilde {\\mathcal {R}} | \\leq \\frac {d ^ {3}}{6} C _ {g, 5} C _ {1} \\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} \\log \\left(\\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}.", + "image_path": "36dd9b090597bde08ecc1b6771adc04798e228e07acf956a113873b72dd3800e.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 720, + 336, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 336, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 336, + 732 + ], + "type": "text", + "content": "Combining the bounds on " + }, + { + "bbox": [ + 105, + 720, + 336, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_1" + }, + { + "bbox": [ + 105, + 720, + 336, + 732 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 105, + 720, + 336, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_3" + }, + { + "bbox": [ + 105, + 720, + 336, + 732 + ], + "type": "text", + "content": ", we have the lemma." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "70" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 69 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 235, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 235, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 235, + 94 + ], + "type": "text", + "content": "Finally, we prove Theorem K.4." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": "Proof. For " + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "inline_equation", + "content": "0 \\leq l \\leq n" + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": ", define the random variable " + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "inline_equation", + "content": "\\hat{\\zeta}_{l,n}" + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": " which follows the distribution " + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\zeta}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})}, l, n)" + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": " conditioned on " + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "inline_equation", + "content": "\\hat{\\phi}^{(lR_{\\mathrm{grp}})}" + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "inline_equation", + "content": "\\mathbb{P}(\\hat{\\zeta}_{n,n} = \\hat{\\phi}^{(nR_{\\mathrm{grp}})}) = 1" + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "inline_equation", + "content": "\\hat{\\zeta}_{0,n} \\sim \\zeta_{n\\eta_{\\mathrm{e}}}" + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": ". Denote by " + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "inline_equation", + "content": "u(\\phi, s, t) \\coloneqq \\mathbb{E}_{\\zeta_t \\sim \\mathcal{P}_{\\zeta}(\\phi, s, t)}[g(\\zeta_t)]" + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{l+1,n} \\coloneqq u_{l+1,n}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\Delta^{(l)}, (l+1)\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}}) - u_{l+1,n}(\\hat{\\phi}^{(lR_{\\mathrm{grp}})} + \\tilde{\\Delta}^{(l)}, (l+1)\\eta_{\\mathrm{e}}, n\\eta_{\\mathrm{e}})" + }, + { + "bbox": [ + 104, + 106, + 505, + 162 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 123, + 167, + 490, + 313 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 167, + 490, + 313 + ], + "spans": [ + { + "bbox": [ + 123, + 167, + 490, + 313 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left| \\mathbb {E} \\left[ g \\left(\\boldsymbol {\\phi} ^ {\\left(n R _ {\\mathrm {g r p}}\\right)}\\right) \\right] - \\mathbb {E} \\left[ g \\left(\\boldsymbol {\\zeta} \\left(n \\eta_ {\\mathrm {e}}\\right)\\right) \\right] \\right| \\\\ \\leq \\left| \\mathbb {E} \\left[ g \\left(\\hat {\\zeta} _ {n, n}\\right) - g \\left(\\hat {\\zeta} _ {0, n}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ \\leq \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ g \\left(\\hat {\\zeta} _ {l + 1, n}\\right) - g \\left(\\hat {\\zeta} _ {l, n}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ = \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ u \\left(\\hat {\\phi} ^ {\\left((l + 1) R _ {\\mathrm {g r p}}\\right)}, (l + 1) \\eta_ {\\mathrm {e}}, n \\eta_ {\\mathrm {e}}\\right) - u \\left(\\hat {\\zeta} _ {l, l + 1}, (l + 1) \\eta_ {\\mathrm {e}}, n \\eta_ {\\mathrm {e}}\\right) \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}) \\\\ = \\sum_ {l = 0} ^ {n - 1} \\left| \\mathbb {E} \\left[ \\mathcal {T} _ {l + 1, n} \\mid \\mathcal {E} _ {0} ^ {(n R _ {\\mathrm {g r p}})} \\right] \\right| + \\mathcal {O} (\\eta^ {1 0 0}). \\\\ \\end{array}", + "image_path": "8af566337969f95e0067a1525dbae28eabadda49856fc0099c91b6cd3715f355.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 319, + 504, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 346 + ], + "type": "text", + "content": "Noticing that " + }, + { + "bbox": [ + 104, + 319, + 504, + 346 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\mathcal{T}_{l + 1,n}\\mid \\mathcal{E}_0^{(nR_{\\mathrm{grp}})}] = \\mathbb{E}[\\mathbb{E}[\\mathcal{T}_{l + 1,n}\\mid \\hat{\\phi}^{(lR_{\\mathrm{grp}})},\\mathcal{E}_0^{(lR_{\\mathrm{grp}})}]\\mid \\mathcal{E}_0^{(nR_{\\mathrm{grp}})}]" + }, + { + "bbox": [ + 104, + 319, + 504, + 346 + ], + "type": "text", + "content": ", we can apply Lemma K.43 and obtain that for all " + }, + { + "bbox": [ + 104, + 319, + 504, + 346 + ], + "type": "inline_equation", + "content": "0\\leq n\\leq \\lfloor T / \\eta_{\\mathrm{e}}\\rfloor" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 165, + 352, + 444, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 352, + 444, + 389 + ], + "spans": [ + { + "bbox": [ + 165, + 352, + 444, + 389 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left| \\mathbb {E} \\left[ g \\left(\\phi^ {\\left(n R _ {\\mathrm {g r p}}\\right)}\\right) \\right] - \\mathbb {E} \\left[ g \\left(\\zeta \\left(n \\eta_ {\\mathrm {e}}\\right)\\right) \\right] \\right| \\leq n C _ {g, 1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1}} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2}}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b} \\\\ \\leq T C _ {g, 1} \\left(\\eta_ {\\mathrm {e}} ^ {\\gamma_ {1} - 1} + \\eta_ {\\mathrm {e}} ^ {\\gamma_ {2} - 1}\\right) \\left(\\log \\frac {1}{\\eta_ {\\mathrm {e}}}\\right) ^ {b}. \\\\ \\end{array}", + "image_path": "a5270edaed5ed65da73aa0b377c57f477e21a4cdcd063fe7b601fa32415f425e.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "text", + "content": "Notice that " + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathrm{e}}^{\\gamma_1} + \\eta_{\\mathrm{e}}^{\\gamma_2} = \\eta^{0.5 - \\beta} + \\eta^\\beta" + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "inline_equation", + "content": "T, C_{g,1}" + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "text", + "content": " are both constants that are independent of " + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "inline_equation", + "content": "\\eta_{\\mathrm{e}}" + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "inline_equation", + "content": "\\beta = 0.25" + }, + { + "bbox": [ + 104, + 396, + 504, + 419 + ], + "type": "text", + "content": " and we have Theorem K.4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 430, + 395, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 395, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 395, + 443 + ], + "type": "text", + "content": "Having established Theorem K.4, we are thus led to prove Theorem 3.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "content": "Proof of Theorem 3.2. Denote by " + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "inline_equation", + "content": "s_{\\mathrm{cls}} = s_0 + s_1 = \\mathcal{O}(\\log \\frac{1}{\\eta})" + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "content": ", which is the time the global iterate " + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\bar{\\theta}^{(s)}" + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "content": " will reach within " + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{O}} (\\eta)" + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "content": " with high probability. Define " + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\tilde{\\zeta} (t)" + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "content": " to be the solution to the limiting SDE (108) conditioned on " + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_0^{(s_{\\mathrm{cls}})}" + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\tilde{\\zeta}(0) = \\phi^{(s_{\\mathrm{cls}})}" + }, + { + "bbox": [ + 104, + 454, + 504, + 496 + ], + "type": "text", + "content": ". By Theorem K.4, we have" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 124, + 502, + 485, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 502, + 485, + 525 + ], + "spans": [ + { + "bbox": [ + 124, + 502, + 485, + 525 + ], + "type": "interline_equation", + "content": "\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} [ g (\\phi^ {(n R _ {\\mathrm {g r p}} + s _ {\\mathrm {c l s}})}) - g (\\tilde {\\zeta} (n \\eta^ {0. 7 5})) | \\phi^ {(s _ {\\mathrm {c l s}})}, \\mathcal {E} _ {0} ^ {(s _ {\\mathrm {c l s}})} ] \\right| \\leq C _ {g} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b},", + "image_path": "77d0ddcec68d05ca5a82dbde4073abd1935171a3792fb48e121927dcef53da0c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{grp}} = \\left\\lfloor \\frac{1}{\\alpha\\eta^{0.75}}\\right\\rfloor" + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "text", + "content": ". Noticing that (i) " + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "inline_equation", + "content": "g\\in \\mathcal{C}^3" + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "text", + "content": " (ii) " + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\pmb {b},\\pmb {\\sigma}\\in \\mathcal{C}^{\\infty}" + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "text", + "content": " and (iii) " + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\zeta (t),\\tilde{\\zeta} (t)\\in \\Gamma ,t\\in [0,\\infty)" + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "text", + "content": " almost surely, we can conclude that given " + }, + { + "bbox": [ + 104, + 531, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_0^{(s_{\\mathrm{cls}})}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 221, + 567, + 388, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 567, + 388, + 582 + ], + "spans": [ + { + "bbox": [ + 221, + 567, + 388, + 582 + ], + "type": "interline_equation", + "content": "\\| \\boldsymbol {\\zeta} (t) - \\tilde {\\boldsymbol {\\zeta}} (t) \\| _ {2} = \\tilde {\\mathcal {O}} (\\sqrt {\\eta}), \\quad \\forall t \\in [ 0, T ].", + "image_path": "1d2d42d8203a1281c0350180f9da34f061b399a1e3693bd88900e6bf891a90ac.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "text", + "content": "Then there exists positive constant " + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "inline_equation", + "content": "b'" + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "text", + "content": " independent of " + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "inline_equation", + "content": "C_g'" + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "text", + "content": " which is independent of " + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "text", + "content": " but can depend on " + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 586, + 504, + 609 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 615, + 477, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 615, + 477, + 638 + ], + "spans": [ + { + "bbox": [ + 132, + 615, + 477, + 638 + ], + "type": "interline_equation", + "content": "\\max _ {n = 0, \\dots , \\lfloor T / \\eta^ {0. 7 5} \\rfloor} \\left| \\mathbb {E} \\left[ g \\left(\\phi^ {\\left(n R _ {\\mathrm {g r p}} + s _ {\\mathrm {c l s}}\\right)}\\right) - g \\left(\\zeta \\left(n \\eta^ {0. 7 5} + s _ {\\mathrm {c l s}} H \\eta^ {2}\\right)\\right) \\right] \\right| \\leq C _ {g} ^ {\\prime} \\eta^ {0. 2 5} (\\log \\frac {1}{\\eta}) ^ {b ^ {\\prime}}.", + "image_path": "f301bb529c92358b091e1e5f8b5dfb48fad26c8acdfacdb025e1137cdb6da0d9.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "text", + "content": "We can view the random variable pairs " + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "inline_equation", + "content": "\\{(\\phi^{(nR_{\\mathrm{grp}} + s_{\\mathrm{cls}})},\\zeta_{n\\eta^{0.75} + s_{\\mathrm{cls}}\\alpha \\eta}):n = 0,\\dots ,\\lfloor T / \\eta^{0.75}\\rfloor \\}" + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "text", + "content": " as reference points and then approximate the value of " + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "inline_equation", + "content": "g(\\phi^{(s)})" + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "inline_equation", + "content": "g(\\zeta (sH\\eta^2))" + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "text", + "content": " with the value at the nearest reference points. By Lemmas K.18 and K.23, for " + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "inline_equation", + "content": "0\\leq r\\leq R_{\\mathrm{grp}}" + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 644, + 504, + 682 + ], + "type": "inline_equation", + "content": "0\\leq s\\leq R_{\\mathrm{tot}} - r" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 233, + 687, + 375, + 702 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 687, + 375, + 702 + ], + "spans": [ + { + "bbox": [ + 233, + 687, + 375, + 702 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\| \\phi^ {(s + r)} - \\phi^ {(s)} \\| _ {2} ] = \\tilde {\\mathcal {O}} (\\eta^ {0. 3 7 5}).", + "image_path": "57228d3440c4d0a61629cbbbcd77482c38ceef11b97ef7c46d66b9480476df09.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "content": "Since the values of " + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "inline_equation", + "content": "\\phi^{(s)}" + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "content": " are restricted to a bounded set, " + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "content": " is Lipschitz on that set. Therefore, we have the theorem." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "71" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 70 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 459, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 459, + 94 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 459, + 94 + ], + "type": "text", + "content": "L DERIVING THE SLOW SDE FOR LABEL NOISE REGULARIZATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 504, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 504, + 129 + ], + "type": "text", + "content": "In this section, we formulate how label noise regularization works and provide a detailed derivation of the theoretical results in Appendix G." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": "Consider training a model for " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": "-class classification on dataset " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{(x_i, y_i)\\}_{i=1}^N" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " denotes the input and " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "y_i \\in [C]" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " denotes the label. Denote by " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\Delta_+^{C-1}" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " the " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "(C-1)" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": "-open simplex. Let " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "f(\\theta; x) \\in \\Delta_+^{C-1}" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " be the model output on input " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " with parameter " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": ", whose " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": "-th coordinate " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "f_j(\\theta; x)" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " stands for the probability of " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " belonging to class " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\ell(\\theta; x, y)" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " be the cross entropy loss given input " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": " and label " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "inline_equation", + "content": "\\ell(\\theta; x, y) = -\\log f_y(\\theta; x)" + }, + { + "bbox": [ + 104, + 133, + 504, + 195 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": "Adding label noise means replacing the true label " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": " with a fresh noisy label " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": " every time we access the sample. Specifically, " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": " is set as the true label " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": " with probability " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "1 - p" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": " and as any other label with probability " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "\\frac{p}{C - 1}" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": " is the fixed corruption probability. The training loss is defined as " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\boldsymbol{\\theta}) = \\frac{1}{N}\\sum_{i=1}^{N}\\mathbb{E}[\\ell(\\boldsymbol{\\theta};\\boldsymbol{x}_i,\\hat{y}_i)]" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": ", where the expectation is taken over the stochasticity of " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": ". Notice that given a sample " + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 104, + 198, + 505, + 258 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 171, + 262, + 505, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 262, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 171, + 262, + 505, + 289 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) ] = - (1 - p) \\log f _ {y} (\\boldsymbol {\\theta}; \\boldsymbol {x}) - \\frac {p}{C - 1} \\sum_ {j \\neq y} \\log f _ {j} (\\boldsymbol {\\theta}; \\boldsymbol {x}). \\tag {114}", + "image_path": "cae642d74be2be1dda8c3d57e7625adca46af2a6b9892db42199e51be4212451.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": "By the property of cross-entropy loss, (114) attains its global minimum if and only if " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "f_{j} = \\frac{p}{C - 1}" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": ", for all " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "j \\in [C], j \\neq y" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "f_{y} = 1 - p" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": ". Due to the large expressiveness of modern deep learning models, there typically exists a set " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "S^{*} := \\{\\pmb{\\theta} \\mid f_{i}(\\pmb{\\theta}) = \\mathbb{E}[\\hat{y}_{i}], \\forall i \\in [N]\\}" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": " such that all elements of " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "S^{*}" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": " minimize " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": ". Then, the manifold " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": " is a subset of " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "S^{*}" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": ". The following lemma relates the noise covariance " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}(\\pmb{\\theta}) := \\frac{1}{N}\\sum_{i \\in [N]}\\mathbb{E}[(\\nabla\\ell(\\pmb{\\theta};\\pmb{x}_{i},\\hat{y}_{i}) - \\nabla\\mathcal{L}(\\pmb{\\theta}))(\\nabla\\ell(\\pmb{\\theta};\\pmb{x}_{i},\\hat{y}_{i}) - \\nabla\\mathcal{L}(\\pmb{\\theta}))^{\\top}]" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": " to the hessian " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\nabla^{2}\\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in S^{*}" + }, + { + "bbox": [ + 104, + 293, + 504, + 366 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": "Lemma L.1. If " + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "inline_equation", + "content": "f(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i)" + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\mathcal{C}^2" + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": "-smooth on " + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": " given any " + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "inline_equation", + "content": "i \\in [N]" + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\hat{y}_i \\in [C]" + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\mathcal{S}^* \\neq \\emptyset" + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": ", then for all " + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in \\mathcal{S}^*" + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}(\\pmb{\\theta}) = \\nabla^2 \\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 368, + 504, + 392 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": "Proof. Since " + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot)" + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_2" + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": "-smooth, " + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\nabla \\mathcal{L}(\\pmb{\\theta}) = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in S^*" + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": ". To prove the above lemma, it suffices to show that " + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\forall i \\in [N]" + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\nabla \\ell(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i) \\nabla \\ell(\\pmb{\\theta}; \\pmb{x}_i, \\hat{y}_i)^\\top] = \\nabla^2 \\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": ". W.L.O.G, let " + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "inline_equation", + "content": "y = 1" + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": " and therefore for all " + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} \\in S^*" + }, + { + "bbox": [ + 104, + 403, + 504, + 437 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 216, + 441, + 319, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 441, + 319, + 453 + ], + "spans": [ + { + "bbox": [ + 216, + 441, + 319, + 453 + ], + "type": "interline_equation", + "content": "f _ {1} (\\boldsymbol {\\theta}; \\boldsymbol {x}) = 1 - p =: a _ {1},", + "image_path": "6c9be3d72a1501a8b951e8e9985ba6d58e2cf6922cffe1d6f9b70ad80aa2cb08.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 219, + 455, + 391, + 476 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 455, + 391, + 476 + ], + "spans": [ + { + "bbox": [ + 219, + 455, + 391, + 476 + ], + "type": "interline_equation", + "content": "f _ {j} (\\boldsymbol {\\theta}; \\boldsymbol {x}) = \\frac {p}{C - 1} =: a _ {2}, \\forall j > 1, j \\in [ C ].", + "image_path": "2b7f9472db524d66169bf53f6b9f9603299c01d5675f15ef9888440c7801438e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 480, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 504, + 502 + ], + "type": "text", + "content": "Additionally, let " + }, + { + "bbox": [ + 104, + 480, + 504, + 502 + ], + "type": "inline_equation", + "content": "h(x) \\coloneqq -\\log (x), x \\in \\mathbb{R}^{+}" + }, + { + "bbox": [ + 104, + 480, + 504, + 502 + ], + "type": "text", + "content": ". The stochastic gradient " + }, + { + "bbox": [ + 104, + 480, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\nabla \\ell(\\pmb{\\theta}; \\pmb{x}, \\hat{y})" + }, + { + "bbox": [ + 104, + 480, + 504, + 502 + ], + "type": "text", + "content": " follows the distribution:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 185, + 505, + 422, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 505, + 422, + 536 + ], + "spans": [ + { + "bbox": [ + 185, + 505, + 422, + 536 + ], + "type": "interline_equation", + "content": "\\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) = \\left\\{ \\begin{array}{l l} h ^ {\\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}} & \\text {w . p .} 1 - p, \\\\ h ^ {\\prime} (a _ {2}) \\frac {\\partial f _ {j}}{\\partial \\boldsymbol {\\theta}}, & \\text {w . p .} \\frac {p}{C - 1}, \\forall j \\in [ C ], j > 1. \\end{array} \\right.", + "image_path": "5dad5338da5519bc29c5d28b61cbc58d3ac3cfd921305e52e865cf11b6cc632a.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 540, + 285, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 285, + 551 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 285, + 551 + ], + "type": "text", + "content": "Then the covariance of the gradient noise is:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 154, + 555, + 454, + 617 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 555, + 454, + 617 + ], + "spans": [ + { + "bbox": [ + 154, + 555, + 454, + 617 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} [ \\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) \\nabla \\ell (\\boldsymbol {\\theta}; \\boldsymbol {x}, \\hat {y}) ^ {\\top} ] = (1 - p) \\left(h ^ {\\prime} \\left(a _ {1}\\right)\\right) ^ {2} \\frac {\\partial f _ {1} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}} \\left(\\frac {\\partial f _ {1} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}}\\right) ^ {\\top} \\\\ + \\frac {p \\left(h ^ {\\prime} \\left(a _ {2}\\right)\\right) ^ {2}}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}} \\left(\\frac {\\partial f _ {j} \\left(\\boldsymbol {\\theta} ^ {*}\\right)}{\\partial \\boldsymbol {\\theta} ^ {*}}\\right) ^ {\\top}. \\\\ \\end{array}", + "image_path": "c733141ef0574e9e84a52063feb066142d0788b19e41a65760e8511027e5cee2.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 621, + 184, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 621, + 184, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 184, + 632 + ], + "type": "text", + "content": "And the hessian is:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 146, + 634, + 463, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 634, + 463, + 714 + ], + "spans": [ + { + "bbox": [ + 146, + 634, + 463, + 714 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\theta}) = (1 - p) h ^ {\\prime} (a _ {1}) \\frac {\\partial^ {2} f _ {1}}{\\partial \\boldsymbol {\\theta} ^ {2}} + \\frac {p h ^ {\\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial^ {2} f _ {j}}{\\partial \\boldsymbol {\\theta} ^ {2}} \\\\ \\underbrace {\\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad} _ {\\mathcal {T}} \\\\ + (1 - p) h ^ {\\prime \\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}} \\left(\\frac {\\partial f _ {1}}{\\partial \\boldsymbol {\\theta}}\\right) ^ {\\top} + \\frac {p h ^ {\\prime \\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j}}{\\partial \\boldsymbol {\\theta}} \\left(\\frac {\\partial f _ {j} (\\boldsymbol {\\theta})}{\\partial \\boldsymbol {\\theta}}\\right) ^ {\\top}. \\\\ \\end{array}", + "image_path": "4bdd582291730097d467003ae3bec4239dd1c94b4ec2c4746182e1c97577a1ef.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "72" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 71 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 195, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 195, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 195, + 97 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 105, + 81, + 195, + 97 + ], + "type": "inline_equation", + "content": "\\sum_{j\\in [C]}f_i = 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 261, + 102, + 504, + 133 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 102, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 261, + 102, + 504, + 133 + ], + "type": "interline_equation", + "content": "\\frac {\\partial^ {2} f _ {1}}{\\partial \\boldsymbol {\\theta} ^ {2}} = - \\sum_ {j > 1} \\frac {\\partial^ {2} f _ {j}}{\\partial \\boldsymbol {\\theta} ^ {2}}. \\tag {115}", + "image_path": "55699e3be01b06299477d2acd793924fdbe3cf7d45dee9ed6f03f40cdaca71b7.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 139, + 275, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 139, + 275, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 275, + 154 + ], + "type": "text", + "content": "Also, notice that " + }, + { + "bbox": [ + 105, + 139, + 275, + 154 + ], + "type": "inline_equation", + "content": "h^\\prime (x) = -\\frac{1}{x}" + }, + { + "bbox": [ + 105, + 139, + 275, + 154 + ], + "type": "text", + "content": ". Therefore," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 250, + 159, + 505, + 183 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 159, + 505, + 183 + ], + "spans": [ + { + "bbox": [ + 250, + 159, + 505, + 183 + ], + "type": "interline_equation", + "content": "(1 - p) h ^ {\\prime} \\left(a _ {1}\\right) = \\frac {p h ^ {\\prime} \\left(a _ {2}\\right)}{C - 1}. \\tag {116}", + "image_path": "7581f4522a3aa63e7f7f124030ee35750079f9071dd23372d85a6f25399359e3.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "text", + "content": "Substituting (115) and (116) into the expression of " + }, + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "text", + "content": " gives " + }, + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "text", + "content": ", which simplifies " + }, + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 104, + 189, + 504, + 213 + ], + "type": "text", + "content": " as the following form:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 217, + 466, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 217, + 466, + 251 + ], + "spans": [ + { + "bbox": [ + 143, + 217, + 466, + 251 + ], + "type": "interline_equation", + "content": "\\nabla^ {2} \\mathcal {L} (\\pmb {\\theta}) = (1 - p) h ^ {\\prime \\prime} (a _ {1}) \\frac {\\partial f _ {1}}{\\partial \\pmb {\\theta}} \\left(\\frac {\\partial f _ {j} (\\pmb {\\theta})}{\\partial \\pmb {\\theta}}\\right) ^ {\\top} + \\frac {p h ^ {\\prime \\prime} (a _ {2})}{C - 1} \\sum_ {j > 1} \\frac {\\partial f _ {j}}{\\partial \\pmb {\\theta}} \\left(\\frac {\\partial f _ {j} (\\pmb {\\theta})}{\\partial \\pmb {\\theta}}\\right) ^ {\\top}.", + "image_path": "5698da94993ba34333660f43dffcb727d8b5007e8eee2808a41e883c788b94cd.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "type": "text", + "content": "Again notice that " + }, + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "type": "inline_equation", + "content": "h''(x) = h'(x)" + }, + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^+" + }, + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\pmb{\\theta}) = \\pmb{\\Sigma}(\\pmb{\\theta})" + }, + { + "bbox": [ + 105, + 272, + 421, + 285 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 494, + 274, + 504, + 283 + ], + "blocks": [ + { + "bbox": [ + 494, + 274, + 504, + 283 + ], + "lines": [ + { + "bbox": [ + 494, + 274, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 494, + 274, + 504, + 283 + ], + "type": "image", + "image_path": "c34ca2c7035c13ce87c8c0a9518312e2649ffe323b4770bd03aac6d8f4a67397.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 297, + 400, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 400, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 400, + 309 + ], + "type": "text", + "content": "With the property " + }, + { + "bbox": [ + 105, + 297, + 400, + 309 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}(\\pmb{\\theta}) = \\nabla^2\\mathcal{L}(\\pmb{\\theta})" + }, + { + "bbox": [ + 105, + 297, + 400, + 309 + ], + "type": "text", + "content": ", we are ready to prove Theorem G.1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 320, + 367, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 320, + 367, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 320, + 367, + 332 + ], + "type": "text", + "content": "Proof of Theorem G.1. Recall the general form of the slow SDE:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 134, + 337, + 505, + 363 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 337, + 505, + 363 + ], + "spans": [ + { + "bbox": [ + 134, + 337, + 505, + 363 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\boldsymbol {\\zeta} (t) = \\frac {1}{\\sqrt {B}} \\partial \\Phi (\\boldsymbol {\\zeta}) \\boldsymbol {\\Sigma} ^ {1 / 2} (\\boldsymbol {\\zeta}) \\mathrm {d} \\boldsymbol {W} (t) + \\frac {1}{2 B} \\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) + (K - 1) \\boldsymbol {\\Psi} (\\boldsymbol {\\zeta}) ] \\mathrm {d} t, \\tag {117}", + "image_path": "398e7640efa1403b1f11c57e54df0cc99ecd2efafec6f1bf27327b75679b077e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "type": "text", + "content": " is defined in Definition K.6. Since for " + }, + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "type": "inline_equation", + "content": "\\zeta \\in \\Gamma" + }, + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "type": "inline_equation", + "content": "\\Sigma(\\zeta) = \\nabla^2\\mathcal{L}(\\zeta)" + }, + { + "bbox": [ + 105, + 369, + 420, + 382 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 261, + 388, + 505, + 402 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 388, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 261, + 388, + 505, + 402 + ], + "type": "interline_equation", + "content": "\\partial \\Phi (\\zeta) \\Sigma^ {1 / 2} (\\zeta) = \\mathbf {0}. \\tag {118}", + "image_path": "9b42a33f2e99e67eabd34b702df395819803e14ed37a40479bdceac7b97d848f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 407, + 183, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 407, + 183, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 183, + 418 + ], + "type": "text", + "content": "Now we show that" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 231, + 423, + 505, + 438 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 423, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 231, + 423, + 505, + 438 + ], + "type": "interline_equation", + "content": "\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) ] = - \\nabla_ {\\Gamma} \\operatorname {t r} \\left(\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})\\right). \\tag {119}", + "image_path": "ff9277c0a1bc3c14235a7d1052808810b7ba4adf5706b52da12eec084cf0e790.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 443, + 350, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 443, + 350, + 458 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 350, + 458 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 104, + 443, + 350, + 458 + ], + "type": "inline_equation", + "content": "\\nabla^2\\mathcal{L}(\\zeta) = \\Sigma (\\zeta)" + }, + { + "bbox": [ + 104, + 443, + 350, + 458 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 443, + 350, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_{\\nabla^2\\mathcal{L}(\\zeta)}[\\Sigma ] = \\frac{1}{2}\\pmb {I}" + }, + { + "bbox": [ + 104, + 443, + 350, + 458 + ], + "type": "text", + "content": ". By Lemma K.4," + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 177, + 463, + 432, + 487 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 463, + 432, + 487 + ], + "spans": [ + { + "bbox": [ + 177, + 463, + 432, + 487 + ], + "type": "interline_equation", + "content": "\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\boldsymbol {\\Sigma} (\\boldsymbol {\\zeta}) ] = - \\frac {1}{2} \\partial \\Phi (\\boldsymbol {\\zeta}) \\nabla^ {3} \\mathcal {L} (\\boldsymbol {\\zeta}) [ \\boldsymbol {I} ] = - \\frac {1}{2} \\nabla_ {\\Gamma} \\mathrm {t r} (\\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})).", + "image_path": "2d3da907b33edc76ec33f5757fcfc14ba35575990d57d0bc1f7183bb96527293.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 491, + 194, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 194, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 194, + 502 + ], + "type": "text", + "content": "Finally, we show that" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 201, + 508, + 505, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 508, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 201, + 508, + 505, + 533 + ], + "type": "interline_equation", + "content": "\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\Psi (\\boldsymbol {\\zeta}) ] = - \\nabla_ {\\Gamma} \\frac {1}{2 H \\eta} \\operatorname {t r} (F (2 H \\eta \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta}))). \\tag {120}", + "image_path": "b1bf443b35576e028f60aba918205e1f29e9cdcdd1465ce1fe918fc5b338abbd.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "text", + "content": "Define " + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\hat{\\psi}(x) \\coloneqq x\\psi(x) = e^{-x} - 1 + x" + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "text", + "content": ". By definition of " + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\Psi(\\zeta)" + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "text", + "content": ", when " + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\Sigma(\\zeta) = \\nabla^2\\mathcal{L}(\\zeta)" + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\Psi(\\zeta) = \\hat{\\psi}(2\\eta H\\nabla^2\\mathcal{L}(\\zeta))" + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\hat{\\psi}(\\cdot)" + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "text", + "content": " is interpreted as a matrix function. Since " + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\psi(2\\eta H\\nabla^2\\mathcal{L}(\\zeta)) \\in \\operatorname{span}\\{\\pmb{u}\\pmb{u}^\\top \\mid \\pmb{u} \\in T_\\zeta^\\perp(\\Gamma)\\}" + }, + { + "bbox": [ + 104, + 539, + 504, + 578 + ], + "type": "text", + "content": ", by Lemma K.4," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 208, + 583, + 402, + 606 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 583, + 402, + 606 + ], + "spans": [ + { + "bbox": [ + 208, + 583, + 402, + 606 + ], + "type": "interline_equation", + "content": "\\partial^ {2} \\Phi (\\boldsymbol {\\zeta}) [ \\Psi (\\boldsymbol {\\zeta}) ] = - \\frac {1}{2} \\partial \\Phi (\\boldsymbol {\\zeta}) \\mathrm {t r} \\psi (2 \\eta H \\nabla^ {2} \\mathcal {L} (\\boldsymbol {\\zeta})).", + "image_path": "dbaa01d80f76d63085967b726ff358eca0f042f8158192ccb59b445cefae3afc.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 611, + 454, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 454, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 454, + 624 + ], + "type": "text", + "content": "By the chain rule, we have (120). Combining (118),(119) and (120) gives the theorem." + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 494, + 612, + 504, + 621 + ], + "blocks": [ + { + "bbox": [ + 494, + 612, + 504, + 621 + ], + "lines": [ + { + "bbox": [ + 494, + 612, + 504, + 621 + ], + "spans": [ + { + "bbox": [ + 494, + 612, + 504, + 621 + ], + "type": "image", + "image_path": "96cdd3309715f5f0e273dec7f88f6f3d099b9cc141fe0c1bcc7856562356a5e3.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "73" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 72 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 261, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 261, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 261, + 94 + ], + "type": "text", + "content": "M EXPERIMENTAL DETAILS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "content": "In this section, we specify the experimental details that are omitted in the main text. Our experiments are conducted on CIFAR-10 (Krizhevsky et al., 2009) and ImageNet Russakovsky et al. (2015). Our code is available at https://github.com/hmgxr128/Local-SGD. Our implementation of ResNet-56 (He et al., 2016) and VGG-16 (Simonyan & Zisserman, 2015) is based on the high-starred repository by Wei Yang" + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "content": " and we use the implementation of ResNet-50 from torchvision 0.3.1. We run all CIFAR-10 experiments with " + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}} = 128" + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "content": " on 8 NVIDIA Tesla P100 GPUs while ImageNet experiments are run on 8 NVIDIA A5000 GPU with " + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}} = 32" + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "content": ". All ImageNet experiments are trained with ResNet-50." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 200, + 506, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 200, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 200, + 506, + 280 + ], + "type": "text", + "content": "We generally adopt the following training strategies. We do not add any momentum unless otherwise stated. We follow the suggestions by Jia et al. (2018) and do not add weight decay to the bias and learnable parameters in the normalization layers. For all models with BatchNorm layers, we go through 100 batches of data with batch size " + }, + { + "bbox": [ + 104, + 200, + 506, + 280 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}}" + }, + { + "bbox": [ + 104, + 200, + 506, + 280 + ], + "type": "text", + "content": " to estimate the running mean and variance before evaluation. Experiments on both datasets follow the standard data augmentation pipeline in He et al. (2016) except the label noise experiments. Additionally, we use FFCV (Leclerc et al., 2022) to accelerate data loading for ImageNet training." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 283, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 283, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 283, + 504, + 307 + ], + "type": "text", + "content": "Slightly different from the update rule of Local SGD in Section 1, we use sampling without replacement unless otherwise stated. See Appendix C for implementation details and discussion." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 320, + 339, + 332 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 320, + 339, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 320, + 339, + 332 + ], + "type": "text", + "content": "M.1 POST-LOCAL SGD EXPERIMENTS IN SECTION 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "text", + "content": "CIFAR-10 experiments. We simulate 32 clients with " + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "inline_equation", + "content": "B = 4096" + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "text", + "content": ". We follow the linear scaling rule and linear learning rate warmup strategy suggested by Goyal et al. (2017). We first run 250 epochs of SGD with the learning rate gradually ramping up from 0.1 to 3.2 for the first 50 epochs. Resuming from the model obtained at epoch 250, we run Local SGD with " + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "inline_equation", + "content": "\\eta = 0.32" + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "text", + "content": ". Note that we conduct grid search for the initial learning rate among " + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "inline_equation", + "content": "\\{0.005, 0.01, 0.05, 0.1, 0.15, 0.2\\}" + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "text", + "content": " and choose the learning rate with which parallel SGD " + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "inline_equation", + "content": "(H = 1)" + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "text", + "content": " achieves the best test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. The weight decay " + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "text", + "content": " is set as " + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 341, + 506, + 475 + ], + "type": "text", + "content": ". As for the initialization scheme, we follow Lin et al. (2020b) and Goyal et al. (2017). Specifically, we use Kaiming Normal (He et al., 2015) for the weights of convolutional layers and initialize the weights of fully-connected layers by a Gaussian distribution with mean zero and standard deviation 0.01. The weights for normalization layers are initialized as one. All bias parameters are initialized as zero. We report the mean and standard deviation over 5 runs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "text", + "content": "ImageNet experiments. We simulate 256 workers with " + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "inline_equation", + "content": "B = 8192" + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "text", + "content": ". We follow the linear scaling rule and linear learning rate warmup strategy suggested by Goyal et al. (2017). We first run 100 epochs of SGD where the learning rate linearly ramps up from 0.5 to 16 for the first 5 epochs and then decays by a factor of 0.1 at epoch 50. Resuming from epoch 100, we run Local SGD with " + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\eta = 0.16" + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "text", + "content": ". Note that we conduct grid search for the initial learning rate among " + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\{0.05, 0.1, 0.5, 1\\}" + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "text", + "content": " and choose the learning rate with which parallel SGD " + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "inline_equation", + "content": "(H = 1)" + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "text", + "content": " achieves the best test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. The weight decay " + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "text", + "content": " is set as " + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 487, + 506, + 588 + ], + "type": "text", + "content": " and we do not add any momentum. The initialization scheme follows the implementation of torchvision 0.3.1. We report the mean and standard deviation over 3 runs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 601, + 340, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 340, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 340, + 612 + ], + "type": "text", + "content": "M.2 EXPERIMENTAL DETAILS FOR FIGURES 2 AND 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "type": "text", + "content": "CIFAR-10 experiments. We use ResNet-56 for all CIFAR-10 experiments in the two figures. We simulate 32 workers with " + }, + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "type": "inline_equation", + "content": "B = 4096" + }, + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "type": "text", + "content": " and set the weight decay as " + }, + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "type": "text", + "content": ". For Figures 2(a) and 2(b), we set " + }, + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "type": "inline_equation", + "content": "\\eta = 0.32" + }, + { + "bbox": [ + 104, + 622, + 506, + 712 + ], + "type": "text", + "content": ", which is the same as the learning rate after decay in Figure 1(a). For Figure 2(a), we adopt the same initialization scheme introduced in the corresponding paragraph in Appendix M.1. For Figures 2(b), 2(e) and 5(c), we use the model at epoch 250 in Figure 1(a) as the pre-trained model. Additionally, we use a training budget of 250 epochs for Figure 2(e). In Figure 5(e), we use Local SGD with momentum 0.9, where the momentum buffer is kept locally and never averaged. We run SGD with momentum 0.9 for 150 epochs to obtain the pre-trained model, where the learning" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 302, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 302, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 302, + 732 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 117, + 720, + 302, + 732 + ], + "type": "text", + "content": "https://github.com/bearpaw/pytorch-classification" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "74" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 73 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 140, + 52, + 294, + 148 + ], + "blocks": [ + { + "bbox": [ + 140, + 52, + 294, + 148 + ], + "lines": [ + { + "bbox": [ + 140, + 52, + 294, + 148 + ], + "spans": [ + { + "bbox": [ + 140, + 52, + 294, + 148 + ], + "type": "image", + "image_path": "612528912af565b1974a5b0405841ebf3de8c57a1667022b0675b195111bc322.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 154, + 275, + 165 + ], + "lines": [ + { + "bbox": [ + 160, + 154, + 275, + 165 + ], + "spans": [ + { + "bbox": [ + 160, + 154, + 275, + 165 + ], + "type": "text", + "content": "(a) CIFAR-10, start from #250." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 316, + 53, + 470, + 148 + ], + "blocks": [ + { + "bbox": [ + 316, + 53, + 470, + 148 + ], + "lines": [ + { + "bbox": [ + 316, + 53, + 470, + 148 + ], + "spans": [ + { + "bbox": [ + 316, + 53, + 470, + 148 + ], + "type": "image", + "image_path": "2c96fc87eb556badec6bbbfd81dc240c15e15b91feb73206c8d82f2bd1f576f8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 154, + 449, + 165 + ], + "lines": [ + { + "bbox": [ + 336, + 154, + 449, + 165 + ], + "spans": [ + { + "bbox": [ + 336, + 154, + 449, + 165 + ], + "type": "text", + "content": "(b) ImageNet, start from #100." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 139, + 176, + 293, + 273 + ], + "blocks": [ + { + "bbox": [ + 139, + 176, + 293, + 273 + ], + "lines": [ + { + "bbox": [ + 139, + 176, + 293, + 273 + ], + "spans": [ + { + "bbox": [ + 139, + 176, + 293, + 273 + ], + "type": "image", + "image_path": "15b9a9acdf8d649523f76a01f91a745314bc26039553bf831b31cfae87f772b9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 278, + 295, + 289 + ], + "lines": [ + { + "bbox": [ + 137, + 278, + 295, + 289 + ], + "spans": [ + { + "bbox": [ + 137, + 278, + 295, + 289 + ], + "type": "text", + "content": "(c) CIFAR-10, start from #250, optimal " + }, + { + "bbox": [ + 137, + 278, + 295, + 289 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 137, + 278, + 295, + 289 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 315, + 177, + 469, + 273 + ], + "blocks": [ + { + "bbox": [ + 315, + 177, + 469, + 273 + ], + "lines": [ + { + "bbox": [ + 315, + 177, + 469, + 273 + ], + "spans": [ + { + "bbox": [ + 315, + 177, + 469, + 273 + ], + "type": "image", + "image_path": "e69a09c2ea1ac40840af919aaed448f9ff983a5c9933c7c9ba82086774a9ec9f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 278, + 469, + 289 + ], + "lines": [ + { + "bbox": [ + 313, + 278, + 469, + 289 + ], + "spans": [ + { + "bbox": [ + 313, + 278, + 469, + 289 + ], + "type": "text", + "content": "(d) ImageNet, start from #100, optimal " + }, + { + "bbox": [ + 313, + 278, + 469, + 289 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 313, + 278, + 469, + 289 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 181, + 295, + 425, + 308 + ], + "lines": [ + { + "bbox": [ + 181, + 295, + 425, + 308 + ], + "spans": [ + { + "bbox": [ + 181, + 295, + 425, + 308 + ], + "type": "text", + "content": "Figure 10: The learning curves for experiments in Figure 4." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": "rate ramps up from 0.05 to 1.6 linearly in the first 150 epochs. Note that we conduct grid search for the initial learning rate among " + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\{0.01, 0.05, 0.1, 0.15, 0.2\\}" + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": " and choose the learning rate with which parallel SGD " + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "inline_equation", + "content": "(H = 1)" + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": " achieves the highest test accuracy. We also make sure that the optimal learning rate resides in the middle of the set. Resuming from epoch 150, we run Local SGD " + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "inline_equation", + "content": "H = 1" + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": " (i.e., SGD) and 24 with " + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\eta = 0.16" + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": " and decay " + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": " by 0.1 at epoch 226. For Local SGD " + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "inline_equation", + "content": "H = 900" + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": ", we resume from the model at epoch 226 of " + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "inline_equation", + "content": "H = 24" + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "inline_equation", + "content": "\\eta = 0.016" + }, + { + "bbox": [ + 104, + 320, + 506, + 399 + ], + "type": "text", + "content": ". We report the mean and standard deviation over 3 runs for Figures 2(a), 2(b) and 5(c), and over 5 runs for Figure 2(e)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 409, + 506, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 409, + 506, + 488 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 506, + 488 + ], + "type": "text", + "content": "ImageNet experiments. We simulate 256 clients with " + }, + { + "bbox": [ + 104, + 409, + 506, + 488 + ], + "type": "inline_equation", + "content": "B = 8192" + }, + { + "bbox": [ + 104, + 409, + 506, + 488 + ], + "type": "text", + "content": " and set the weight decay as " + }, + { + "bbox": [ + 104, + 409, + 506, + 488 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 409, + 506, + 488 + ], + "type": "text", + "content": ". In Figure 2(d), both Local SGD and SGD start from the same random initialization. We warm up the learning rate from 0.1 to 3.2 in the first 5 epochs and decay the learning rate by a factor of 0.1 at epochs 50 and 100. For Figures 2(c), 2(f) and 5(d), we use the model at epoch 100 in Figure 1(b) as the pre-trained model. In Figure 2(c), we set the learning rate as 0.16, which is the same as the learning rate after epoch 100 in Figure 1(b). Finally, in Figures 2(c), 2(f), 5(b) and 5(d), we report the mean and average over 3 runs." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 500, + 313, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 500, + 313, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 500, + 313, + 511 + ], + "type": "text", + "content": "M.3 DETAILS FOR EXPERIMENTS IN FIGURE 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 520, + 506, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 506, + 598 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 506, + 598 + ], + "type": "text", + "content": "For all experiments in Figure 6, we train a ResNet-56 model on CIFAR-10. We report mean test accuracy over three runs and the shaded area reflects the standard deviation. For Figure 6(a), we use the same setup as Figures 2(a) and 2(b) for training from random initialization and from a pre-trained model respectively except the learning rate. For Figure 6(b), we resume from the model obtained at epoch 250 in Figure 1(a) and train for another 250 epochs. For Figure 6(c), we follow the same procedure as Figure 1(a) except that we use sampling with replacement. We also ensure that the total numbers of iterations in Figures 1(a) and 6(c) are the same." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 611, + 437, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 437, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 437, + 622 + ], + "type": "text", + "content": "M.4 DETAILS FOR EXPERIMENTS ON THE EFFECT OF THE DIFFUSION TERM" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "spans": [ + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "type": "text", + "content": "CIFAR-10 experiments. The model we use is ResNet-56. For Figure 3(a), we first run SGD with batch size 128 and learning rate " + }, + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "type": "inline_equation", + "content": "\\eta = 0.5" + }, + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "type": "text", + "content": " for 250 epochs to obtain the pre-trained model. The initialization scheme is the same as the corresponding paragraph in Appendix M.1. Resuming from epoch 250 with " + }, + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "type": "inline_equation", + "content": "\\eta = 0.05" + }, + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "type": "text", + "content": ", we run Local SGD with " + }, + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "type": "inline_equation", + "content": "K = 16" + }, + { + "bbox": [ + 104, + 631, + 504, + 688 + ], + "type": "text", + "content": " until epoch 6000 and run all other setups for the same number of iterations. We report the mean and standard deviation over 3 runs." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "ImageNet experiments. For Figures 3(b) and 4(b), we start from the model obtained at epoch 100 in Figure 1(b). In Figure 3(b), we run Local SGD with " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "K = 256" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": " for another 150 epochs with " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\eta = 0.032" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": ". We run all other setups for the same number of iterations with the same learning rate." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "75" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 74 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 434, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 434, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 434, + 94 + ], + "type": "text", + "content": "M.5 DETAILS FOR EXPERIMENTS ON THE EFFECT OF GLOBAL BATCH SIZE" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": "CIFAR-10 experiments. The model we use is ResNet-56. We resume from the model obtained in Figure 1(a) at epoch 250 and train for another 250 epochs. The local batch size for all runs is " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}} = 128" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": ". We first make grid search of " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " for SGD with " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "K = 16" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " among " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\{0.04, 0.08, 0.16, 0.32, 0.64\\}" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " and find that the final test accuracy varies little across different learning rates (within " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "0.1\\%" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": "). Then we choose " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\eta = 0.32" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": ". For the green curve in Figure 4(a), we search for the optimal " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "K = 16" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " and keep " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " fixed when scaling " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": ". For the red curve in Figure 4(a), we search for the optimal " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " among " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\{6, 12, 60, 120, 300, 750, 1500, 3000, 6000, 12000, 24000\\}" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " and also make sure that " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " does not exceed the total number of iterations for 250 epochs. The learning curves for constant and optimal " + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 102, + 506, + 213 + ], + "type": "text", + "content": " are visualized in Figures 10(a) and 10(c) respectively. We report the mean and standard deviation over three runs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": "ImageNet experiments. We start from the model obtained at epoch 100 in Figure 1(b) and train for another 50 epochs. The local batch size for all runs is " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{loc}} = 32" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": ". We first make grid search among " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\{0.032, 0.064, 0.16, 0.32\\}" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "H = 1" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " to achieve the best test accuracy and choose " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "H = 0.064" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": ". For the orange curve in Figure 4(b), we search " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " among " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\{2, 4, 6, 13, 26, 52, 78, 156\\}" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "K = 256" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " to achieve the optimal test accuracy and the keep " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " constant as we scale " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": ". To obtain the optimal " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": ", we search among " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\{6240, 7800, 10400, 12480, 15600, 20800, 24960, 31200\\}" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "K = 16" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\{1600, 3120, 4160, 5200, 6240, 7800, 10400\\}" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "K = 32" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\{312, 480, 520, 624, 800, 975, 1040, 1248, 1560, 1950\\}" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "K = 64" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\{1, 2, 3, 6, 13\\}" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "K = 512" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": ". The learning curves for constant and optimal " + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 225, + 506, + 335 + ], + "type": "text", + "content": " are visualized in Figures 10(b) and 10(d) respectively. We report the mean and standard deviation over three runs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 349, + 413, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 349, + 413, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 349, + 413, + 361 + ], + "type": "text", + "content": "M.6 DETAILS FOR EXPERIMENTS ON LABEL NOISE REGULARIZATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 369, + 506, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 506, + 426 + ], + "type": "text", + "content": "For all label noise experiments, we do not use data augmentation, use sampling with replacement, and set the corruption probability as 0.1. We simulate 32 workers with " + }, + { + "bbox": [ + 104, + 369, + 506, + 426 + ], + "type": "inline_equation", + "content": "B = 4096" + }, + { + "bbox": [ + 104, + 369, + 506, + 426 + ], + "type": "text", + "content": " in Figure 7 and 4 workers with " + }, + { + "bbox": [ + 104, + 369, + 506, + 426 + ], + "type": "inline_equation", + "content": "B = 512" + }, + { + "bbox": [ + 104, + 369, + 506, + 426 + ], + "type": "text", + "content": " in Figure 8. We use ResNet-56 with GroupNorm with the number of groups 8 for Figure 7(a) and VGG-16 without normalization for Figures 7(b) and 8. Below we list the training details for ResNet-56 and VGG-16 respectively." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "text", + "content": "ResNet-56. As for the model architecture, we replace the batch normalization layer in Yang's implementation with group normalization such that the training loss is independent of the sampling order. We also use Swish activation (Ramachandran et al., 2017) in place of ReLU to ensure the smoothness of the loss function. We generate the pre-trained model by running label noise SGD with corruption probability " + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "inline_equation", + "content": "p = 0.1" + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "text", + "content": " for 500 epochs (6,000 iterations). We initialize the model by the same strategy introduced in the first paragraph of Appendix M.1. Applying the linear warmup scheme proposed by Goyal et al. (2017), we gradually ramp up the learning rate " + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "text", + "content": " from 0.1 to 3.2 for the first 20 epochs and multiply the learning rate by 0.1 at epoch 250. All subsequent experiments in Figure 7(a) (a) use learning rate 0.1. The weight decay " + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "text", + "content": " is set as " + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 437, + 506, + 559 + ], + "type": "text", + "content": ". Note that adding weight decay in the presence of normalization accelerates the limiting dynamics and will not affect the implicit regularization on the original loss function (Li et al., 2022)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "content": "VGG-16. We follow Yang's implementation of the model architecture except that we replace maximum pooling with average pooling and use Swish activation (Ramachandran et al., 2017) to make the training loss smooth. We initialize all weight parameters by Kaiming Normal and all bias parameters as zero. The pre-trained model is obtained by running label noise SGD with total batch size 4096 and corruption probability " + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "inline_equation", + "content": "p = 0.1" + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "content": " for 6000 iterations. We use a linear learning rate warmup from 0.1 to 0.5 in the first 500 iterations. All runs in Figures 7(b) and 8 resume from the model obtained by SGD with label noise. In Figure 7(b), we use learning rate " + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "inline_equation", + "content": "\\eta = 0.1" + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "content": ". In Figure 8, we set " + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "inline_equation", + "content": "\\eta = 0.005" + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "inline_equation", + "content": "H = 97,000" + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "inline_equation", + "content": "\\eta = 0.01" + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "content": " for SGD " + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "inline_equation", + "content": "(H = 1)" + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "content": ". The weight decay " + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 570, + 506, + 659 + ], + "type": "text", + "content": " is set as zero." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "76" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 75 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_content_list.json b/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..72028fcbaddf16202fec9ea84db3ec4bf9eaf9ec --- /dev/null +++ b/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_content_list.json @@ -0,0 +1,4954 @@ +[ + { + "type": "text", + "text": "WHY ADVERSARIAL TRAINING CAN HURT ROBUST ACCURACY", + "text_level": 1, + "bbox": [ + 171, + 99, + 679, + 147 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jacob Clarysse1, Julia Hörrmann2, Fanny Yang1", + "bbox": [ + 181, + 167, + 524, + 184 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Department of Computer Science, ETH Zürich", + "2. Department of Mathematics, ETH Zürich" + ], + "bbox": [ + 183, + 185, + 509, + 210 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{jacob.clarysse;fan.yang}@inf.ethz.ch;", + "bbox": [ + 183, + 212, + 550, + 226 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{julia.hoerrmann}@stat.math.ethz.ch", + "bbox": [ + 183, + 226, + 524, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 276, + 547, + 291 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Machine learning classifiers with high test accuracy often perform poorly under adversarial perturbations. It is commonly believed that adversarial training alleviates this issue. In this paper, we demonstrate that, surprisingly, the opposite can be true for a natural class of perceptible perturbations — even though adversarial training helps when enough data is available, it may in fact hurt robust generalization in the small sample size regime. We first prove this phenomenon for a high-dimensional linear classification setting with noiseless observations. Using intuitive insights from the proof, we could find perturbations on standard image datasets for which this behavior persists. Specifically, it occurs for perceptible perturbations that effectively reduce class information such as object occlusions or corruptions.", + "bbox": [ + 228, + 305, + 769, + 446 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 460, + 338, + 476 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Today's best-performing classifiers are vulnerable to adversarial attacks Goodfellow et al. (2015); Szegedy et al. (2014) and exhibit high robust error: for many inputs, their predictions change under adversarial perturbations, even though the true class stays the same. Such content-preserving (Gilmer et al., 2018), consistent (Raghunathan et al., 2020) attacks can be either perceptible or imperceptible. For image datasets, most work to date studies imperceptible attacks that are based on perturbations with limited strength or attack budget. These include bounded $\\ell_p$ -norm perturbations (Goodfellow et al., 2015; Madry et al., 2018; Moosavi-Dezfooli et al., 2016), small transformations using image processing techniques (Ghiasi et al., 2019; Zhao et al., 2020; Laidlaw et al., 2021; Luo et al., 2018) or nearby samples on the data manifold (Lin et al., 2020; Zhou et al., 2020). Even though they do not visibly change the image by definition, imperceptible attacks can often successfully fool a learned classifier.", + "bbox": [ + 169, + 486, + 553, + 736 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/db053b4d669625641de60d177fdcb522d14ad763796fe72449396ecab37d64da.jpg", + "image_caption": [ + "Figure 1: On the Waterbirds dataset attacked by the adversarial illumination attack, adversarial training (yellow) yields higher robust error than standard training (blue) when the sample size is small, even though it helps for large sample sizes and in a setting where the standard error of standard training is small. (see App. D for details)." + ], + "image_footnote": [], + "bbox": [ + 563, + 494, + 823, + 604 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "On the other hand, perturbations that naturally occur and are physically realizable are commonly perceptible. Some perceptible perturbations specifically target the object to be recognized: these include occlusions (e.g. stickers placed on traffic signs (Eykholt et al., 2018) or masks of different sizes that cover important features of human faces (Wu et al., 2020)) or corruptions that are caused by the image capturing process (animals that move faster than the shutter speed or objects that are not well-lit, see Figure 2). Others transform the whole image and are not confined to the object itself, such as rotations, translations or corruptions Engstrom et al. (2019); Kang et al. (2019). In this paper, we refer to such perceptible attacks as directed attacks. In contrast to other attacks, they effectively reduce useful class information in the input for any model, without necessarily changing the true label - we say that they are directed and consistent, more formally defined in Section 2. For example, a stop sign with a small sticker could partially cover the text without losing its semantic meaning. Similarly, a flying bird captured with a long exposure time can induce motion blur in the final image without becoming unrecognizable to the observer.", + "bbox": [ + 168, + 742, + 828, + 924 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/20e801aeba267110296db82a839e1ddf1a7cf57b7ffc5ae8cbe7af8b242fe422.jpg", + "image_caption": [ + "(a) Masks" + ], + "image_footnote": [], + "bbox": [ + 191, + 119, + 277, + 186 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/173687da0046c68a3fb5cdcf13b159e5479d8229046b575ce294cf9d7e57370a.jpg", + "image_caption": [ + "(b) Original" + ], + "image_footnote": [], + "bbox": [ + 279, + 119, + 364, + 186 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ed5c3950496d37e51c824df4b0ece66fb6baded47681d50b5abf1f39ae0d3bd7.jpg", + "image_caption": [ + "(c) Lighting", + "Figure 2: Examples of directed attacks on CIFAR10 and the Waterbirds dataset. In Figure 2a, we corrupt the image with a black mask of size $2 \\times 2$ and in Figure 2c and 2d we change the lighting conditions (darkening) and apply motion blur on the bird in the image respectively. All perturbations reduce the information about the class in the images: they are the result of directed attacks. (e) Directed attacks are a subset of perceptible attacks." + ], + "image_footnote": [], + "bbox": [ + 367, + 119, + 454, + 186 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bead8d81be1b2f47ff023313e31b2aa7d1560cadc72723872a6c2696a8f3890a.jpg", + "image_caption": [ + "(d) Blur" + ], + "image_footnote": [], + "bbox": [ + 455, + 119, + 542, + 186 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0df3fd1cecceec31c77a870c31c54cfecf5c5ced125048a3ff1b76430c1686c9.jpg", + "image_caption": [ + "(e) Classification of perturbations" + ], + "image_footnote": [], + "bbox": [ + 544, + 119, + 671, + 186 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d0acf40bcb6c92928c178ce679671e9bff4201863a077344aa30cefc755687d2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 674, + 119, + 803, + 186 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the literature so far, it is widely acknowledged that adversarial training with the same perturbation type and budget as during test time often achieves significantly lower robust error than standard training (Madry et al., 2018; Zhang et al., 2019; Bai et al., 2021).", + "bbox": [ + 169, + 281, + 823, + 324 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In contrast, we show that adversarial training not only increases standard error (Zhang et al., 2019; Tsipras et al., 2019; Stutz et al., 2019; Raghunathan et al., 2020), but surprisingly, in the low sample regime,", + "bbox": [ + 169, + 330, + 825, + 372 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "adversarial training may even increase the robust error compared to standard training!", + "bbox": [ + 209, + 383, + 785, + 398 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Figure 1 illustrates the main message of our paper on the Waterbirds dataset: Although adversarial training with directed attacks outperforms standard training when enough training samples are available, it is inferior when the sample size is small (but still large enough to obtain a small standard test error).", + "bbox": [ + 169, + 407, + 823, + 463 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions are as follows:", + "bbox": [ + 171, + 470, + 390, + 484 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We prove that, almost surely, adversarially training a linear classifier on separable data yields a monotonically increasing robust error as the perturbation budget grows. We further establish high-probability non-asymptotic lower bounds on the robust error gap between adversarial and standard training.", + "- Our proof provides intuition for why this lower bound on the gap is particularly large for directed attacks in the low sample regime.", + "- We observe empirically for different directed attacks on real-world image datasets that this behavior persists: adversarial training for directed attacks hurts robust accuracy when the sample size is small." + ], + "bbox": [ + 215, + 494, + 823, + 628 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 ROBUST CLASSIFICATION", + "text_level": 1, + "bbox": [ + 171, + 648, + 418, + 664 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We first introduce our robust classification setting more formally by defining the notions of adversarial robustness, directed attacks and adversarial training used throughout the paper.", + "bbox": [ + 169, + 680, + 823, + 709 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Adversarially robust classifiers For inputs $x \\in \\mathbb{R}^d$ , we consider multi-class classifiers associated with parameterized functions $f_{\\theta}: \\mathbb{R}^d \\to \\mathbb{R}^K$ if $K > 2$ and $f_{\\theta}: \\mathbb{R}^d \\to \\mathbb{R}$ if $K = 2$ , where $K$ is the number of labels. For example, $f_{\\theta}(x)$ could be a linear model (as in Section 3) or a neural network (as in Section 4). The output label predictions are obtained by $h(f_{\\theta}(x)) = \\mathrm{sign}(f_{\\theta}(x))$ for $K = 2$ and $h(f_{\\theta}(x)) = \\arg \\max_{k \\in \\{1, \\dots, K\\}} f_{\\theta}(x)_k$ for $K > 2$ .", + "bbox": [ + 169, + 715, + 823, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In order to convince practitioners to use machine learning models in the wild, it is key to demonstrate that they exhibit robustness. One kind of robustness is that they do not change prediction when the input is subject to consistent perturbations, which are small class-preserving perturbations. Mathematically speaking, for the underlying joint data distribution $\\mathbb{P}$ , the model should have a small $\\epsilon_{te}$ -robust error, defined as", + "bbox": [ + 169, + 792, + 825, + 863 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {E r r} (\\theta ; \\epsilon_ {\\mathrm {t e}}) := \\mathbb {E} _ {(x, y) \\sim \\mathbb {P}} \\max _ {x ^ {\\prime} \\in T (x; \\epsilon_ {\\mathrm {t e}})} \\ell \\left(f _ {\\theta} \\left(x ^ {\\prime}\\right), y\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 866, + 823, + 890 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\ell$ is 0 if the class determined by $h(f_{\\theta}(x))$ is equal to $y$ and 1 otherwise. Further, $T(x;\\epsilon_{te})$ indicates a perturbation set around $x$ of a certain transformation type with size $\\epsilon_{test}$ . Note that", + "bbox": [ + 169, + 895, + 825, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the (standard) error $\\mathbb{E}_{(x,y)\\sim \\mathbb{P}}\\ell (f_{\\theta}(x),y)$ of a classifier corresponds to $\\mathrm{Err}(\\theta ;0)$ - the robust error evaluated at $\\epsilon_{\\mathrm{te}} = 0$", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Directed attacks The inner maximization in Equation 1 is often called the adversarial attack of the input $x$ for the model $f_{\\theta}$ and the corresponding solution is referred to as the adversarial example. In this paper, we consider directed attacks that effectively reduce the information about the true classes, with image-based examples depicted in Figure 2. For linear classification, we analyze directed attacks in the form of additive perturbations that are constrained to the direction of the optimal decision boundary (see details in Section 3.1). In particular, note that the set of directed perturbations is restricted to directions attacking the Bayes optimal classifier.", + "bbox": [ + 169, + 140, + 826, + 238 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Adversarial training A common approach to obtain classifiers with a good robust accuracy is to minimize the training objective $\\mathcal{L}_{\\epsilon_{\\mathrm{tr}}}$ with a surrogate robust classification loss $L$", + "bbox": [ + 169, + 246, + 823, + 275 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\epsilon_ {\\mathrm {t r}}} (\\theta) := \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\max _ {x _ {i} ^ {\\prime} \\in T \\left(x _ {i}; \\epsilon_ {\\mathrm {t r}}\\right)} L \\left(f _ {\\theta} \\left(x _ {i} ^ {\\prime}\\right) y _ {i}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 279, + 823, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "also called adversarial training. In practice, we often use the cross entropy loss $L(z) = \\log (1 + e^{-z})$ and minimize the robust objective by using first order optimization methods such as (stochastic) gradient descent. SGD is also the algorithm that we focus on in both the theoretical and experimental sections. When the desired type of robustness is known in advance, it is standard practice to use the same perturbation set for training as for testing, i.e. $T(x;\\epsilon_{\\mathrm{tr}}) = T(x;\\epsilon_{\\mathrm{te}})$ . For example, Madry et al. (2018) show that the robust error sharply increases for $\\epsilon_{\\mathrm{tr}} < \\epsilon_{\\mathrm{te}}$ . In this paper, we demonstrate that for directed attacks in the small sample size regime, in fact, the opposite is true.", + "bbox": [ + 169, + 321, + 825, + 420 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 THEORETICAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 439, + 405, + 454 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we prove for linear functions $f_{\\theta}(x) = \\theta^{\\top}x$ that in the case of directed attacks, robust generalization deteriorates with increasing $\\epsilon_{\\mathrm{tr}}$ . The proof, albeit in a simple setting, provides explanations for why adversarial training fails in the high-dimensional regime for such attacks.", + "bbox": [ + 169, + 469, + 823, + 513 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 SETTING", + "text_level": 1, + "bbox": [ + 171, + 527, + 277, + 541 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We now introduce the precise linear setting used in our theoretical results.", + "bbox": [ + 171, + 554, + 656, + 569 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data model We assume that the ground truth and hypothesis class are given by linear functions $f_{\\theta}(x) = \\theta^{\\top}x$ and the sample size $n$ is lower than the ambient dimension $d$ minus one. The generative distribution $\\mathbb{P}_r$ is similar to (Tsipras et al., 2019; Nagarajan & Kolter, 2019): The label $y \\in \\{+1, -1\\}$ is drawn with equal probability and the covariate vector is sampled as $x = [y_{\\frac{r}{2}}, \\tilde{x}]$ with the random vector $\\tilde{x} \\in \\mathbb{R}^{d-1}$ drawn from a standard normal distribution, i.e. $\\tilde{x} \\sim \\mathcal{N}(0, \\sigma^2 I_{d-1})$ . We would like to learn a classifier that has low robust error by using a dataset $D = (x_i, y_i)_{i=1}^n$ with $n$ i.i.d. samples from $\\mathbb{P}_r$ . Intuitively, the separation distance $r$ reflects the signal strength of the data distribution.", + "bbox": [ + 169, + 577, + 823, + 678 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Notice that the distribution $\\mathbb{P}_r$ is noiseless: for a given input $x$ , the label $y = \\mathrm{sign}(x_{[1]})$ is deterministic. Further, the Bayes optimal linear classifier (also referred to as the ground truth) is parameterized by the first standard coordinate vector, $\\theta^{\\star} = e_1$ . By definition, the ground truth is robust against all perturbations that do not change the sign in the first coordinate of the sample, i.e. consistent perturbations, and hence so is the optimal robust classifier.", + "bbox": [ + 169, + 683, + 826, + 753 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Directed attacks In this paper, we focus on consistent directed attacks that by definition efficiently concentrate their attack budget to reduce the class information. For our linear setting this information lies in the first entry. Hence, we can model such attacks by additive perturbations in the first dimension", + "bbox": [ + 169, + 762, + 823, + 804 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nT (x; \\epsilon) = \\left\\{x ^ {\\prime} = x + \\delta \\mid \\delta = \\beta e _ {1} \\text {a n d} - \\epsilon \\leq \\beta \\leq \\epsilon \\right\\}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 806, + 823, + 824 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Note that this attack is always in the direction of the signal dimension, i.e. the Bayes optimal classifier or equivalently the ground truth. Furthermore, when $\\epsilon < \\frac{r}{2}$ , it is a consistent directed attack. Observe how this is different from $\\ell_p$ -attacks — an $\\ell_p$ attack, depending on the model, may add a perturbation that only has a very small component in the signal direction.", + "bbox": [ + 169, + 827, + 823, + 882 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "1Note that the result more generally holds for non-sparse models that are not axis aligned by way of a simple rotation $z = Ux$ . In that case the distribution is characterized by $\\theta^{\\star} = u_{1}$ , where $u_{1}$ is the first column vector of $U$ , and a rotated Gaussian in the $d - 1$ dimensions orthogonal to $\\theta^{\\star}$ .", + "bbox": [ + 169, + 883, + 825, + 924 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0119c40cdabdd7186fc2459084cebce9704f5969cd0ff3776576c1b7ada73052.jpg", + "image_caption": [ + "(a) Robust error increase with $\\epsilon_{\\mathrm{tr}}$ (b) Standard-adversarial training (c) Effect of overparameterization" + ], + "image_footnote": [], + "bbox": [ + 202, + 99, + 397, + 191 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f3fa5b762288fef2b2c93a37e2bb3c5345351eff1a6e13f6490d5a706dae9852.jpg", + "image_caption": [ + "Figure 3: Experimental verification of Theorem 3.1. (a) We set $d = 1000$ , $r = 12$ and $n = 50$ . The robust error gap between standard and adversarial training as a function of the adversarial budget $\\epsilon_{\\mathrm{tr}} = 5$ independent experiments (blue) and the lower bound given in Theorem 3.1 (gray). In (b) and (c), we set $d = 10000$ and vary the number of samples $n$ . (b) The robust error of standard and adversarial training with $\\epsilon_{\\mathrm{tr}} = 4.5$ . (c) The error gap and the lower bound of Theorem 3.1. For more experimental details see Appendix C." + ], + "image_footnote": [], + "bbox": [ + 400, + 99, + 596, + 191 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a0e1558dbd11522644d5a3300bc9272536b8d0027c140ae0e2b01ff2764679d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 598, + 99, + 792, + 191 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Robust max- $\\ell_2$ -margin classifier We study a classifier that is the solution of running gradient descent on the adversarial logistic loss. A long line of work (Soudry et al., 2018; Ji & Telgarsky, 2019; Chizat & Bach, 2020; Nacson et al., 2019; Liu et al., 2020) studies the implicit bias of (S)GD on the (standard) logistic loss and separable data. In particular, they show directional convergence to the max-margin solution. For the adversarial logistic loss and linear models in particular, (S)GD converges to the robust max- $\\ell_2$ -margin solution (Li et al., 2020),", + "bbox": [ + 169, + 295, + 823, + 380 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ], x _ {i} ^ {\\prime} \\in T (x _ {i}; \\epsilon_ {\\mathrm {t r}})} {\\min } y _ {i} \\theta^ {\\top} x _ {i} ^ {\\prime}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 386, + 823, + 416 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Even though our result is proven for the max- $\\ell_2$ -margin classifier, it can easily be extended to other interpolators.", + "bbox": [ + 169, + 422, + 823, + 452 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 MAIN RESULTS", + "text_level": 1, + "bbox": [ + 171, + 468, + 321, + 481 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We are now ready to characterize the $\\epsilon_{\\mathrm{te}}$ -robust error as a function of $\\epsilon_{\\mathrm{tr}}$ , the separation $r$ , the dimension $d$ and sample size $n$ of the data. In the theorem statement we use the following quantities", + "bbox": [ + 169, + 493, + 823, + 523 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi_ {\\min } = \\frac {\\sigma}{r / 2 - \\epsilon_ {\\mathrm {t e}}} \\left(\\sqrt {\\frac {d - 1}{n}} - \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right)\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 527, + 679, + 570 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi_ {\\max } = \\frac {\\sigma}{r / 2 - \\epsilon_ {\\mathrm {t e}}} \\left(\\sqrt {\\frac {d - 1}{n}} + \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right)\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 571, + 679, + 613 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "that arise from concentration bounds for the singular values of the random data matrix. Further, let $\\tilde{\\epsilon} := \\frac{r}{2} - \\frac{\\varphi_{\\max}}{\\sqrt{2}}$ and denote by $\\Phi$ the cumulative distribution function of a standard normal.", + "bbox": [ + 169, + 618, + 823, + 650 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Theorem 3.1. Assume $d - 1 > n$ . For test samples from $\\mathbb{P}_r$ , perturbation set type $T$ as in Equation 3 and any $0 \\leq \\epsilon_{te} < \\frac{r}{2}$ , the following holds for the $\\epsilon_{te}$ -robust error of the classifier (Equation 1) resulting from $\\epsilon_{tr}$ -adversarial training:", + "bbox": [ + 169, + 654, + 826, + 696 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1. The $\\epsilon_{te}$ -robust error of the $\\epsilon_{tr}$ -robust max-margin estimator reads", + "bbox": [ + 210, + 708, + 658, + 722 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) = \\Phi \\left(- \\frac {\\left(\\frac {r}{2} - \\epsilon_ {t r}\\right)}{\\tilde {\\varphi}}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 729, + 823, + 770 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "for a random quantity $\\tilde{\\varphi} > 0$ depending on $\\sigma, r, \\epsilon_{te}$ and is hence strictly increasing in the adversarial training budget $\\epsilon_{tr}$ .", + "bbox": [ + 225, + 776, + 823, + 805 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2. With probability at least $1 - \\delta$ , we further have $\\varphi_{\\min} \\leq \\tilde{\\varphi} \\leq \\varphi_{\\max}$ and the following lower bound on the robust error increase by adversarially training with size $\\epsilon_{tr}$", + "bbox": [ + 209, + 813, + 823, + 842 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) - \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {t e}\\right) \\geq \\Phi \\left(\\frac {r / 2}{\\varphi_ {\\min }}\\right) - \\Phi \\left(\\frac {r / 2 - \\min \\left\\{\\epsilon_ {t r} , \\widetilde {\\epsilon} \\right\\}}{\\varphi_ {\\min }}\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 848, + 823, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proof can be found in Appendix A and primarily relies on estimation of singular values of high-dimensional matrices. Note that the theorem holds for any $0 \\leq \\epsilon_{\\mathrm{te}} < \\frac{r}{2}$ and hence also directly", + "bbox": [ + 169, + 895, + 825, + 926 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/11a7371c179794f387e78dbb1dffa7b6a78670bc7a9813b49ce7cad1a08c10b2.jpg", + "image_caption": [ + "(a) Robust error vs $\\epsilon_{\\mathrm{tr}}$" + ], + "image_footnote": [], + "bbox": [ + 181, + 117, + 390, + 205 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/39bc6a2d0b245932b78657cad1591dc252b093589a70d83d2dc4e67a1653351f.jpg", + "image_caption": [ + "(b) Robust error decomposition" + ], + "image_footnote": [], + "bbox": [ + 395, + 117, + 602, + 205 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/40c9459235c3e2eac9b86560c410a3b8b2f3ccc2441903d056f733d89252a7ed.jpg", + "image_caption": [ + "(c) Intuition in 2D", + "Figure 4: (a) We set $d = 1000$ and $r = 12$ . The robust error as a function of the adversarial training budget $\\epsilon_{\\mathrm{tr}}$ for different $d / n$ . (b) The robust error decomposition into susceptibility and standard error as a function of the adversarial budget $\\epsilon_{\\mathrm{tr}}$ . Full experimental details can be found in Section C. (c) 2D illustration providing intuition for the linear setting. The effect of adversarial training with directed attacks is captured in the yellow dotted lines: adversarily perturbed training points move closer to the true boundary which in turn tilts the decision boundary more heavily in the wrong direction." + ], + "image_footnote": [], + "bbox": [ + 604, + 99, + 815, + 207 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "applies to the standard error by setting $\\epsilon_{\\mathrm{te}} = 0$ . In Figure 3, we empirically confirm the statements of Theorem 3.1 by performing multiple experiments on synthetic datasets as described in Subsection 3.1 with different choices of $d / n$ and $\\epsilon_{\\mathrm{tr}}$ . In the first statement, we prove that for small sample-size $(n < d - 1)$ noiseless data, almost surely, the robust error increases monotonically with adversarial training budget $\\epsilon_{\\mathrm{tr}} > 0$ . In Figure 3a, we plot the robust error gap between standard and adversarial logistic regression as a function of the adversarial training budget $\\epsilon_{\\mathrm{tr}}$ for 5 runs.", + "bbox": [ + 169, + 321, + 823, + 407 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The second statement establishes a simplified lower bound on the robust error increase for adversarial training (for a fixed $\\epsilon_{\\mathrm{tr}} = \\epsilon_{\\mathrm{te}}$ ) compared to standard training. In Figures 3a and 3c, we show how the lower bound closely predicts the robust error gap in our synthetic experiments. Furthermore, by the dependence of $\\varphi_{\\mathrm{min}}$ on the overparameterization ratio $d / n$ , the lower bound on the robust error gap is amplified for large $d / n$ . Indeed, Figure 3c shows how the error gap increases with $d / n$ both theoretically and experimentally. However, when $d / n$ increases above a certain threshold, the gap decreases again, as standard training fails to learn the signal and yields a high error (see Figure 3b).", + "bbox": [ + 169, + 412, + 823, + 512 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 PROOF INTUITION", + "text_level": 1, + "bbox": [ + 171, + 523, + 341, + 537 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The reason that adversarial training hurts robust generalization is based on an extreme robust vs. standard error trade-off. We now provide intuition for the effect of directed attacks and the low sample regime on the $\\epsilon_{\\mathrm{tr}}$ -robust max- $\\ell_2$ -margin solution by decomposing the robust error $\\mathrm{Err}(\\theta; \\epsilon_{\\mathrm{te}})$ . Notice that $\\epsilon_{\\mathrm{te}}$ -robust error $\\mathrm{Err}(\\theta; \\epsilon_{\\mathrm{te}})$ can be written as the probability of the union of two events: the event that the classifier based on $\\theta$ is wrong and the event that the classifier is susceptible to attacks:", + "bbox": [ + 169, + 546, + 826, + 617 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {E r r} (\\theta ; \\epsilon_ {\\mathrm {t e}}) = \\mathbb {E} _ {x, y \\sim \\mathbb {P}} \\left[ \\mathbb {I} \\left\\{y f _ {\\theta} (x) < 0 \\right\\} \\vee \\max _ {x ^ {\\prime} \\in T (x; \\epsilon_ {\\mathrm {t e}})} \\mathbb {I} \\left\\{f _ {\\theta} (x) f _ {\\theta} \\left(x ^ {\\prime}\\right) < 0 \\right\\} \\right] \\leq \\operatorname {E r r} (\\theta ; 0) + \\operatorname {S u s c} (\\theta ; \\epsilon_ {\\mathrm {t e}}) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 622, + 823, + 667 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})$ is the expectation of the maximization term in Equation 7. $\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})$ represents the $\\epsilon_{\\mathrm{te}}$ -attack-susceptibility of a classifier induced by $\\theta$ and $\\mathrm{Err}(\\theta ;0)$ its standard error. In our linear setting, we can lower bound Equation 7 by $\\mathrm{Err}(\\theta ;0) + \\frac{1}{2}\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})$ . Hence, Equation 7 suggests that the robust error can only be small if both the standard error and susceptibility are small. In Figure 4b, we plot the decomposition of the robust error in standard error and susceptibility for adversarial logistic regression with increasing $\\epsilon_{\\mathrm{tr}}$ . We observe that increasing $\\epsilon_{\\mathrm{tr}}$ increases the standard error too drastically compared to the decrease in susceptibility, leading to a drop in robust accuracy. For completeness, in Appendix B, we provide upper and lower bounds for the susceptibility score. We now explain why, in the small-sample size regime, adversarial training with directed attacks 3 may increase standard error to the extent that it dominates the decrease in susceptibility.", + "bbox": [ + 169, + 667, + 826, + 808 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A key observation is that the robust max- $\\ell_2$ -margin solution of a dataset $D = \\{(x_i, y_i)\\}_{i=1}^n$ maximizes the minimum margin that reads $\\min_{i \\in [n]} y_i \\theta^\\top (x_i - y_i \\epsilon_{\\mathrm{tr}} | \\theta_{[1]}| e_1)$ , where $\\theta_{[i]}$ refers to the $i$ -th entry of vector $\\theta$ . Therefore, it simply corresponds to the max- $\\ell_2$ -margin solution of the dataset shifted towards the decision boundary $D_{\\epsilon_{\\mathrm{tr}}} = \\{(x_i - y_i \\epsilon_{\\mathrm{tr}} | \\widehat{\\theta}_{[1]}^{\\epsilon_{\\mathrm{tr}}} | e_1, y_i)\\}_{i=1}^n$ . Using this fact, we obtain a closed-form expression of the (normalized) max-margin solution 4 as a function of $\\epsilon_{\\mathrm{tr}}$ that reads", + "bbox": [ + 169, + 813, + 823, + 891 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right], \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 896, + 823, + 929 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\| \\tilde{\\theta} \\|_2 = 1$ and $\\tilde{\\gamma} > 0$ is a random quantity associated with the max- $\\ell_2$ -margin solution of the $d - 1$ dimensional Gaussian inputs orthogonal to the signal direction (see Lemma A.1 in Section A).", + "bbox": [ + 169, + 102, + 826, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In high dimensions, with high probability any two Gaussian random vectors are far apart – in our distributional setting, this corresponds to the vectors being far apart in the non-signal directions. In Figure 4c, we illustrate the phenomenon using a 2D cartoon, where the few samples in the dataset are all far apart in the non-signal direction. We see how shifting the dataset closer to the true decision boundary, may result in a max-margin solution (yellow) that aligns much worse with the ground truth (gray), compared to the estimator learned from the original points (blue). Even though the new (robust max-margin) classifier (yellow) is less susceptible to attacks in the signal dimension, it also uses the signal dimension less. Mathematically, this is reflected in the expression of the max-margin solution in Equation 8: We see that the first (signal) dimension is used less as $\\epsilon_{\\mathrm{tr}}$ increases.", + "bbox": [ + 169, + 138, + 826, + 267 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 GENERALITY OF THE RESULTS", + "text_level": 1, + "bbox": [ + 171, + 277, + 429, + 291 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section we discuss how Theorem 3.1 might generalize to other perturbation sets and models.", + "bbox": [ + 169, + 300, + 823, + 316 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Signal direction is known The type of additive perturbations used in Theorem 3.1, defined in Equation 3, is explicitly constrained to the direction of the true signal. This choice is reminiscent of corruptions where every possible perturbation in the set is directly targeted at the object to be recognized, such as motion blur of moving objects. Such corruptions are also studied in the context of domain generalization and adaptation (Schneider et al., 2020).", + "bbox": [ + 169, + 321, + 823, + 393 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Directed attacks in general, however, may also consist of perturbation sets that are only strongly biased towards the true signal direction. They may find the true signal direction only when the inner maximization is exact. The following corollary extends Theorem 3.1 to small $\\ell_1$ -perturbations", + "bbox": [ + 169, + 400, + 823, + 444 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nT (x; \\epsilon) = \\left\\{x ^ {\\prime} = x + \\delta \\mid \\| \\delta \\| _ {1} \\leq \\epsilon \\right\\}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 449, + 823, + 467 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "for $0 < \\epsilon < \\frac{r}{2}$ that reflect such attacks. We state the corollary here and give the proof in Appendix A.", + "bbox": [ + 169, + 472, + 826, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Corollary 3.2. Theorem 3.1 also holds for 4 with perturbation sets defined in 9.", + "bbox": [ + 169, + 491, + 705, + 507 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The proof uses the fact that the inner maximization effectively results in a sparse perturbation equivalent to the attack resulting from the perturbation set defined in Equation 3.", + "bbox": [ + 169, + 511, + 823, + 541 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Other models Motivated by the implicit bias results of (stochastic) gradient descent on the logistic loss, Theorem 3.1 is proven for the max- $\\ell_2$ -margin solution. We would like to conjecture that for the data distribution in Section 3, adversarial training can hurt robust generalization also for other models with zero training error (interpolators in short). For example, Adaboost is a widely used algorithm that converges to the max- $\\ell_1$ -margin classifier (Telgarsky, 2013). One might argue that for a sparse ground truth, the max- $\\ell_1$ -margin classifier should (at least in the noiseless case) have the right inductive bias to alleviate large bias in high dimensions. Hence, in many cases the (sparse) max- $\\ell_1$ -margin solution might align with the ground truth for a given dataset. However, we conjecture that even in this case, the robust max- $\\ell_1$ -margin solution would be misled to choose a wrong sparse solution. This can be seen with the help of the cartoon illustration in Figure 4c.", + "bbox": [ + 169, + 547, + 826, + 688 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 REAL-WORLD EXPERIMENTS", + "text_level": 1, + "bbox": [ + 169, + 700, + 447, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we demonstrate that the proof intuition of the linear case may generalize to more complex models. Specifically, the insights from Section 3 helped us to identify realistic directed attacks on standard image datasets for which adversarial training hurts robust accuracy in the low sample regime. In what follows, we present experimental results for corruption attacks on the Waterbirds dataset. Due to space constraints, results on the mask attacks on CIFAR-10 can be found in Appendix E. The corresponding experimental details and more results on other additional image datasets (such as the hand gestures dataset) can be found in Appendices D, E and F.", + "bbox": [ + 169, + 727, + 826, + 827 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 DATASETS AND MODELS", + "text_level": 1, + "bbox": [ + 169, + 842, + 385, + 856 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We consider three datasets: the Waterbirds dataset, CIFAR-10 and a hand gesture datasets. Due to space constraints, we describe CIFAR-10 and the hand gesture dataset in Appendix E and F. Apart from CIFAR-10 and the hand gesture dataset, we build a new version of the Waterbirds dataset, consisting of images of water- and landbirds of size $256 \\times 256$ and labels that distinguish the two", + "bbox": [ + 169, + 867, + 826, + 926 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 960 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2f1792755900aa0a2ae6a16b916feebafc887c18064a2ec4876c18540ace6f1e.jpg", + "image_caption": [ + "(a) Robust error with increasing $\\epsilon_{\\mathrm{tr}}$" + ], + "image_footnote": [], + "bbox": [ + 176, + 99, + 390, + 200 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2e512a26d35f101675ce39afd985a02b24707f099404378131e77f7f20e80434.jpg", + "image_caption": [ + "Figure 5: Experiments on the Waterbirds dataset considering the adversarial illumination attack with $\\epsilon_{\\mathrm{te}} = 0.3$ . We plot the mean and standard deviation of the mean of several independent experiments. (a) The robust error increases with larger $\\epsilon_{\\mathrm{tr}}$ in the low sample size regime. (b) We set $n = 20$ and plot the robust error decomposition as in Equation 7 with increasing $\\epsilon_{\\mathrm{tr}}$ . While the susceptibility decreases slightly, the increase in standard error is much more severe, resulting in an increase in robust error. (c) The robust error of standard training and adversarial training as a function of the number of samples, where the smallest sample size still yields small ( $< 10\\%$ ) standard test error for standard training. While adversarial training hurts for small sample sizes, it helps for larger sample sizes. For more experimental details see App. D." + ], + "image_footnote": [], + "bbox": [ + 395, + 102, + 602, + 200 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2bf92660f987cf1228c27d11c2f82a9521dc4cd942f53ba6f8942dbe360b164d.jpg", + "image_caption": [ + "(b) Robust error decomposition", + "(c) Robust error vs. #samples" + ], + "image_footnote": [], + "bbox": [ + 607, + 108, + 820, + 200 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "types of birds. Using code provided by Sagawa et al. (2020), we construct the dataset as follows: First, we sample equally many water- and landbirds from the CUB-200 dataset (Welinder et al., 2010). Then, we segment the birds and paste them onto a background image that is randomly sampled (without replacement) from the Places-256 dataset (Zhou et al., 2017). Also, following the choice of Sagawa et al. (2020), we use as models a ResNet50 and a ResNet18 that were both pretrained on ImageNet and achieve near perfect standard accuracy. In Appendix D, we complement the results of this section by reporting the results of similar experiments with different architectures.", + "bbox": [ + 169, + 340, + 826, + 439 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 IMPLEMENTATION OF THE DIRECTED ATTACKS", + "text_level": 1, + "bbox": [ + 171, + 455, + 539, + 468 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we consider two attacks on the Waterbirds dataset: motion blur and adversarial illumination as depicted in Figure 2. In Appendix E, we also discuss the mask attack, which should mimic occlusions of objects in images that are physically realizable (Eykholt et al., 2018; Wu et al., 2020). On the other hand, motion blur may arise naturally when photographing fast moving objects with a slow shutter speed. Lastly, adversarial illumination may result from adversarial lighting conditions. Next, we describe the motion blur and adversarial illumination attacks in more detail.", + "bbox": [ + 169, + 479, + 826, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Motion blur For the Waterbirds dataset we can implement motion blur attacks on the object (the bird) specifically, a natural corruption that could occur if birds move at speeds that are faster than the shutter speed. The aim is to be robust against all motion blur severity levels up to $M_{max} = 15$ . To simulate motion blur, we apply a motion blur filter with a kernel of size $M$ on the segmented bird before we paste it onto the background image. We can change the severity level of the motion blur by increasing the kernel size of the filter. See Appendix D for concrete expressions of the motion blur kernel. Intuitively the worst attack should be the most severe blur, rendering a search over a range of severity superfluous. However, similar to rotations, this is not necessarily true in practice since the training loss on neural networks is generally nonconvex. Therefore, for an exact evaluation of the robust error at test time, we perform a full grid search over all kernel sizes in $[1,2,\\dots,M_{max}]$ . We refer to Figure 2d and Section D for an illustration of our motion blur attack. During training time, we perform an approximate search over kernels with sizes $2i$ for $i = 1,\\dots,M_{max}/2$ .", + "bbox": [ + 169, + 571, + 826, + 741 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Adversarial illumination As a second attack on the Waterbirds dataset, we consider adversarial illumination. The adversary can darken or brighten the bird without corrupting the background of the image. The attack aims to model images where the object at interest is hidden in shadows or placed against bright light. To compute the adversarial illumination attack, we modify the brightness of the segmented bird by adding a constant $a \\in [-\\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]$ to all pixel values, before pasting the bird onto the background image. With an analogous argument as for the adversarial search for motion blur, the exact evaluation requires an actual search over the interval $[- \\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]$ . We find the most adversarial lighting level, i.e. the value of $a$ , by equidistantly partitioning the interval $[- \\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]$ in $K$ steps and performing a full list-search over all steps. See Figure 2c and Appendix D for an illustration of the adversarial illumination attack. We choose $K = 65, 33$ during test and training time respectively.", + "bbox": [ + 169, + 747, + 826, + 888 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Adversarial training For all datasets and attacks, we run SGD until convergence on the robust cross-entropy loss 2. In each iteration, we search for an adversarial example as described above and", + "bbox": [ + 169, + 895, + 825, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9b0fbfe233ca438fe0d94437618e0cef512148bc02efadc540b13a3ead58cd5d.jpg", + "image_caption": [ + "(a) Robust error with increasing $\\epsilon_{\\mathrm{tr}}$" + ], + "image_footnote": [], + "bbox": [ + 183, + 99, + 390, + 199 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6cd0e52c6637acdb28d0b7b8c2632185837ce80d86fca17c63afd52390a4e5b2.jpg", + "image_caption": [ + "Figure 6: Experiments on the (subsampled) Waterbirds dataset using the motion blur attack. (a) Even though adversarial training hurts robust generalization for low sample size ( $n = 20$ ), it helps for $n = 50$ . (b) For $n = 20$ , the decomposition of the robust error in standard error and susceptibility as a function of adversarial budget $\\epsilon_{\\mathrm{tr}}$ . The increase in standard error is more severe than the drop in susceptibility, leading to a slight increase in robust error. (c) The robust error of standard and adversarial training on settings where the test error after standard training is small as a function of the number of samples. While adversarial training hurts for small sample sizes, it helps for larger sample sizes. For each experiment we plot the mean and standard deviation of the mean of independent experiments. For more experimental details see App. D." + ], + "image_footnote": [], + "bbox": [ + 395, + 99, + 602, + 199 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a05f6749a5966589bd482fc2270139528f0e355f0c79e9729cedbf98242a810c.jpg", + "image_caption": [ + "(b) Robust error decomposition", + "(c) Robust error vs. #samples" + ], + "image_footnote": [], + "bbox": [ + 607, + 101, + 812, + 199 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "update the weights using a gradient with respect to the resulting perturbed example (Goodfellow et al., 2015; Madry et al., 2018). For every experiment, we choose the learning rate and weight decay parameters that minimize the robust error on a hold-out dataset.", + "bbox": [ + 169, + 344, + 823, + 387 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 ADVERSARIAL TRAINING CAN HURT ROBUST GENERALIZATION", + "text_level": 1, + "bbox": [ + 171, + 402, + 653, + 415 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We now present our experimental results on the Waterbirds dataset for both motion blur and adversarial illumination attacks. First of all, Figure 5a and 6a show that the phenomenon characterized in the linear setting by Theorem 3.1 also occurs for directed attacks on the Waterbirds dataset: as we increase the adversarial training budget $\\epsilon_{\\mathrm{tr}}$ starting from zero (standard training), the robust error monotonically increases.", + "bbox": [ + 169, + 424, + 823, + 494 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Furthermore, to gain intuition as described in Section 3.3, we also plot the robust error decomposition (Equation 7) consisting of the standard error and susceptibility in Figure 5b and 6b. Recall that we measure susceptibility as the fraction of data points in the test set for which the classifier predicts a different class under an adversarial attack. As in our linear example, we observe an increase in robust error despite a slight drop in susceptibility, because of the more severe increase in standard error. Moreover, Figures 1 and 6c show that analogous to our linear example, this phenomenon is specific to the low sample regime: for large sample size adversarial training outperforms standard training as expected. Note again that even the smallest sample size is large enough to yield a standard test error $< 10\\%$ for standard training. Similar experiments for CIFAR-10 can be found in Appendix E. Finally, we empirically confirm in Appendix D.8 that our phenomenon is specific to directed attacks: for undirected attacks such as bounded $\\ell_{\\infty}$ and $\\ell_{2}$ -ball perturbations, adversarial training helps robust generalization also in the low sample size regime.", + "bbox": [ + 169, + 501, + 826, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 683, + 303, + 696 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We now discuss how different algorithmic choices, motivated by related work, might affect how adversarial training hurts robust generalization.", + "bbox": [ + 169, + 705, + 823, + 734 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Catastrophic overfitting Often the worst-case perturbation during adversarial training is found using an approximate algorithm such as SGD. It is common belief that using the strongest attack (in the motion blur case, full grid search) during training also results in better robust generalization. In particular, the literature on catastrophic overfitting shows that weaker attacks during training lead to bad performance on stronger attacks during testing (Wong et al., 2020; Andriushchenko & Flammarion, 2020; Li et al., 2021). Our results suggest the opposite in the low sample size regime for directed attacks: the weaker the attack during training, the better adversarial training performs.", + "bbox": [ + 169, + 744, + 826, + 844 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Robust overfitting Recent work observes empirically (Rice et al., 2020) and theoretically (Sanyal et al., 2020; Donhauser et al., 2021), that perfectly minimizing the adversarial loss during training might in fact be suboptimal for robust generalization; that is, classical regularization techniques might lead to higher robust accuracy. This phenomenon is often referred to as robust overfitting. May the phenomenon be mitigated using standard regularization techniques? In Appendix D we shed light", + "bbox": [ + 169, + 854, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "on this question and show that adversarial training hurts robust generalization even when standard regularization methods such as early stopping are used.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 152, + 341, + 167 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Robust and non-robust useful features In the words of Ilyas et al. (2019) and Springer et al. (2021) we can describe the intuition behind \"our phenomenon\" as follows: for directed attacks, all robust features become less useful, but adversarial training uses robust features more. In the small sample-size regime, $n < d - 1$ in particular, robust learning assigns too much weight on the robust (possibly non-useful) features that then dominate the non-robust (but useful)features. Even though they define these concepts, they don't make our statement, but show that adversarial training reduces the reliance on non-robust but possibly useful features.", + "bbox": [ + 169, + 176, + 826, + 273 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Small sample size and robustness A direct consequence of Theorem 3.1 is that in order to achieve the same robust error as standard training, adversarial training requires more samples. This statement might remind the reader of sample complexity results for robust generalization in Schmidt et al. (2018); Yin et al. (2019); Khim & Loh (2018). While those results compare sample complexity bounds for standard vs. robust error, our theorem statement compares two algorithms, standard vs. adversarial training, with respect to the robust error.", + "bbox": [ + 169, + 282, + 826, + 367 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Trade-off between standard and robust error Many papers observed that even though adversarial training decreases robust error compared to standard training, it may lead to an increase in standard test error Madry et al. (2018); Zhang et al. (2019). For example, Tsipras et al. (2019); Zhang et al. (2019); Javanmard et al. (2020); Dobriban et al. (2020); Chen et al. (2020) study settings where the Bayes optimal robust classifier is not equal to the Bayes optimal (standard) classifier (i.e. the perturbations are inconsistent or the dataset is non-separable). Raghunathan et al. (2020) study consistent perturbations, as in our paper, and prove that for small sample size, fitting adversarial examples can increase standard error even in the absence of noise. Empirically, Dong et al. (2021); Mendonça et al. (2022) show that for $\\ell_p$ -attacks low-quality data might be the main cause of the trade-off. While aforementioned works focus on the decrease in standard error, we prove that for directed attacks, in the small sample regime adversarial training may in fact increase robust error.", + "bbox": [ + 169, + 375, + 826, + 527 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Mitigation of the trade-off A long line of work has proposed procedures to mitigate the trade-off between robust and standard accuracy. For example Alayrac et al. (2019); Carmon et al. (2019); Zhai et al. (2019); Raghunathan et al. (2020) study robust self training, which leverages a large set of unlabelled data, while Lee et al. (2020); Lamb et al. (2019); Xu et al. (2020) use data augmentation by interpolation. Ding et al. (2020); Balaji et al. (2019); Cheng et al. (2020) on the other hand propose to use adaptive perturbation budgets $\\epsilon_{\\mathrm{tr}}$ that vary across inputs. The intuition behind our theoretical analysis suggests that the standard mitigation procedures for imperceptible perturbations may not work for perceptible directed attacks, because all relevant features are non-robust. We leave a thorough empirical study as interesting future work.", + "bbox": [ + 169, + 535, + 826, + 660 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 SUMMARY AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 171, + 672, + 465, + 688 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This paper aims to caution the practitioner against blindly following current widespread practices to increase the robust performance of machine learning models. Specifically, adversarial training is currently recognized to be one of the most effective defense mechanisms for $\\ell_p$ -perturbations, significantly outperforming robust performance of standard training. However, we prove that in the low sample size regime this common wisdom is not applicable for consistent directed attacks, which efficiently focus their attack budget to target the ground truth class information. In terms of follow-up work on directed attacks in the low sample regime, there are some concrete questions that would be interesting to explore. For example, as discussed in Section 5, it would be useful to test whether some methods to mitigate the standard accuracy vs. robustness trade-off would also relieve the perils of adversarial training for directed attacks. Further, we hypothesize that when few samples are available, one should avoid training with attacks that may heavily reduce class information, independently of the attacks at test time. If this hypothesis were confirmed, it would break with yet another general rule that the best defense perturbation type should always match the attack during evaluation.", + "bbox": [ + 169, + 704, + 826, + 885 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 173, + 102, + 361, + 118 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "Supported by the Hasler Foundation grant number 21050.", + "bbox": [ + 171, + 132, + 553, + 148 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 167, + 287, + 183 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jean-Baptiste Alayrac, Jonathan Uesato, Po-Sen Huang, Alhussein Fawzi, Robert Stanforth, and Pushmeet Kohli. Are labels required for improving adversarial robustness? Advances in Neural Information Processing Systems, pp. 12214-12223, 2019.", + "Maksym Andriushchenko and Nicolas Flammarion. Understanding and improving fast adversarial training. Advances in Neural Information Processing Systems, 2020.", + "Tao Bai, Jinqi Luo, Jun Zhao, Bihan Wen, and Qian Wang. Recent advances in adversarial training for adversarial robustness. In Zhi-Hua Zhou (ed.), The 30th International Joint Conference on Artificial Intelligence, pp. 4312-4321. International Joint Conferences on Artificial Intelligence Organization, 2021.", + "Yogesh Balaji, Tom Goldstein, and Judy Hoffman. Instance adaptive adversarial training: Improved accuracy tradeoffs in neural nets. arXiv preprint arXiv:1910.08051, 2019.", + "G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000.", + "Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, Percy Liang, and John C Duchi. Unlabeled data improves adversarial robustness. In The 33rd International Conference on Neural Information Processing Systems, pp. 11192-11203, 2019.", + "Lin Chen, Yifei Min, Mingrui Zhang, and Amin Karbasi. More data can expand the generalization gap between adversarially robust and standard models. In The 36th International Conference on Machine Learning, pp. 1670-1680, 2020.", + "Minhao Cheng, Qi Lei, Pin-Yu Chen, Inderjit Dhillon, and Cho-Jui Hsieh. Cat: Customized adversarial training for improved robustness. arXiv preprint arXiv:2002.06789, 2020.", + "Lenaic Chizat and Francis Bach. Implicit bias of gradient descent for wide two-layer neural networks trained with the logistic loss. In The 7th International Conference on Learning Theory, pp. 1305-1338, 2020.", + "Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In The 37th International Conference on Machine Learning, pp. 2206-2216, 2020.", + "Gavin Weiguang Ding, Yash Sharma, Kry Yik Chau Lui, and Ruitong Huang. Mma training: Direct input space margin maximization through adversarial training. In The 8th International Conference on Learning Representations, 2020.", + "Edgar Dobriban, Hamed Hassani, David Hong, and Alexander Robey. Provable tradeoffs in adversarially robust classification. arXiv preprint arXiv:2006.05161, 2020.", + "Chengyu Dong, Liyuan Liu, and Jingbo Shang. Data quality matters for adversarial training: An empirical study, 2021.", + "Konstantin Donhauser, Alexandru Tifrea, Michael Aerni, Reinhard Heckel, and Fanny Yang. Interpolation can hurt robust generalization even when there is no noise. The 36th conference on Advances in Neural Information Processing Systems, 2021.", + "Logan Engstrom, Brandon Tran, Dimitris Tsipras, Ludwig Schmidt, and Aleksander Madry. Exploring the landscape of spatial robustness. In The 36th International Conference on Machine Learning, pp. 1802-1811, 2019.", + "Kevin Eykholt, Ivan Evtimov, Earlence Fernandes, Bo Li, Amir Rahmati, Chaowei Xiao, Atul Prakash, Tadayoshi Kohno, and Dawn Song. Robust physical-world attacks on deep learning visual classification. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1625-1634, 2018." + ], + "bbox": [ + 171, + 189, + 826, + 922 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 508, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Amin Ghiasi, Ali Shafahi, and Tom Goldstein. Breaking certified defenses: semantic adversarial examples with spoofed robustness certificates. In The 6th International Conference on Learning Representations, 2019.", + "Justin Gilmer, Ryan P Adams, Ian Goodfellow, David Andersen, and George E Dahl. Motivating the rules of the game for adversarial example research. arXiv preprint arXiv:1807.06732, 2018.", + "Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. In The 3th International Conference on Learning Representations, pp. 1-10, 2015.", + "Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. In The 33rd conference on Advances in Neural Information Processing Systems, pp. 125-136, 2019.", + "Adel Javanmard, Mahdi Soltanolkotabi, and Hamed Hassani. Precise tradeoffs in adversarial training for linear regression. In Conference on Learning Theory, pp. 2034-2078, 2020.", + "Ziwei Ji and Matus Telgarsky. The implicit bias of gradient descent on nonseparable data. In *The 32nd Conference on Learning Theory*, pp. 1772-1798, 2019.", + "Daniel Kang, Yi Sun, Tom Brown, Dan Hendrycks, and Jacob Steinhardt. Transfer of adversarial robustness between perturbation types. arXiv e-prints, pp. arXiv-1905, 2019.", + "Justin Khim and Po-Ling Loh. Adversarial risk bounds via function transformation. arXiv preprint arXiv:1810.09519, 2018.", + "Cassidy Laidlaw, Sahil Singla, and Soheil Feizi. Perceptual adversarial robustness: Defense against unseen threat models. In The 9th International Conference on Learning Representation, 2021.", + "Alex Lamb, Vikas Verma, Juho Kannala, and Yoshua Bengio. Interpolated adversarial training: Achieving robust neural networks without sacrificing too much accuracy. In The 12th ACM Workshop on Artificial Intelligence and Security, pp. 95-103, 2019.", + "Saehyung Lee, Hyungyu Lee, and Sungroh Yoon. Adversarial vertex mixup: Toward better adversarily robust generalization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020.", + "Bai Li, Shiqi Wang, Suman Jana, and Lawrence Carin. Towards understanding fast adversarial training. arXiv preprint arXiv:2006.03089, 2021.", + "Yan Li, Ethan X.Fang, Huan Xu, and Tuo Zhao. Implicit bias of gradient descent based adversarial training on separable data. In The 8th International Conference on Learning Representations, 2020.", + "Wei-An Lin, Chun Pong Lau, Alexander Levine, Rama Chellappa, and Soheil Feizi. Dual manifold adversarial robustness: Defense against lp and non-lp adversarial attacks. In The 34th conference on Advances in Neural Information Processing Systems, pp. 3487-3498, 2020.", + "Chen Liu, Mathieu Salzmann, Tao Lin, Ryota Tomioka, and Sabine Susstrunk. On the loss landscape of adversarial training: Identifying challenges and how to overcome them. In The 35th conference on Advances in Neural Information Processing Systems, pp. 21476-21487, 2020.", + "Bo Luo, Yannan Liu, Lingxiao Wei, and Qiang Xu. Towards imperceptible and robust adversarial example attacks against neural networks. In The 32nd AAAI Conference on Artificial Intelligence and Thirtieth Innovative Applications of Artificial Intelligence Conference and Eighth AAAI Symposium on Educational Advances in Artificial Intelligence, 2018.", + "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In The 6th International Conference on Learning Representations, 2018.", + "Tomás Mantecón, Carlos R. del Blanco, Fernando Jaureguizar, and Narciso García. A real-time gesture recognition system using near-infrared imagery. PLOS ONE, pp. 1-17, 2019." + ], + "bbox": [ + 171, + 102, + 826, + 925 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Marcele OK Mendonça, Javier Maroto, Pascal Frossard, and Paulo SR Diniz. Adversarial training with informed data selection. In The 30th European Signal Processing Conference (EUSIPCO), pp. 608-612, 2022.", + "Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, and Pascal Frossard. Deepfool: a simple and accurate method to fool deep neural networks. In The IEEE conference on computer vision and pattern recognition (CVPR), pp. 2574-2582, 2016.", + "Abdullah Mujahid, Mazhar Javed Awan, Awais Yasin, Mazin Abed Mohammed, Robertas Damaševićius, Rytis Maskeliūnas, and Karrar Hameed Abdulkareem. Real-time hand gesture recognition based on deep learning yolov3 model. Applied Sciences, 2021.", + "Mor Shpigel Nacson, Nathan Srebro, and Daniel Soudry. Stochastic gradient descent on separable data: Exact convergence with a fixed learning rate. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 3051-3059, 2019.", + "Vaishnavh Nagarajan and J. Zico Kolter. Uniform convergence may be unable to explain generalization in deep learning. In The 33d conference on Advances in Neural Information Processing Systems, pp. 11611-11622, 2019.", + "Munir Oudah, Ali Al-Naji, and Javaan Chahl. Hand gesture recognition based on computer vision: A review of techniques. Journal of Imaging, 2020.", + "Huy Phan. huyvnphan/pytorch_cifar10, 1 2021.", + "Aditi Raghunathan, Sang Michael Xie, Fanny Yang, John Duchi, and Percy Liang. Understanding and mitigating the tradeoff between robustness and accuracy. In The 37th International Conference on Machine Learning, pp. 7909-7919, 2020.", + "Leslie Rice, Eric Wong, and Zico Kolter. Overfitting in adversarially robust deep learning. In The 37th International Conference on Machine Learning, pp. 8093-8104, 2020.", + "Shiori Sagawa, Pang Wei Koh, Tatsunori B. Hashimoto, and Percy Liang. Distributionally robust neural networks. In The 7th International Conference on Learning Representations, 2020.", + "Amartya Sanyal, Puneet K Dokania, Varun Kanade, and Philip Torr. How benign is benign overfitting? In The 8th International Conference on Learning Representations, 2020.", + "Ludwig Schmidt, Shibani Santurkar, Dimitris Tsipras, Kunal Talwar, and Aleksander Madry. Adversarily robust generalization requires more data. In The 32nd conference Advances in Neural Information Processing Systems, 2018.", + "Steffen Schneider, Evgenia Rusak, Luisa Eck, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Improving robustness against common corruptions by covariate shift adaptation. In The 34th conference on Advances in Neural Information Processing Systems, pp. 11539-11551, 2020.", + "Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, pp. 1-57, 2018.", + "Jacob M Springer, Melanie Mitchell, and Garrett T Kenyon. Adversarial perturbations are not so weird: Entanglement of robust and non-robust features in neural network classifiers. arXiv preprint arXiv:2102.05110, 2021.", + "David Stutz, Matthias Hein, and Bernt Schiele. Disentangling adversarial robustness and generalization. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 6967-6987, 2019.", + "Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In The 2nd International Conference on Learning Representations, 2014.", + "Matus Telgarsky. Margins, shrinkage, and boosting. In The 30th International Conference on Machine Learning, pp. 307-315, 2013." + ], + "bbox": [ + 171, + 102, + 826, + 925 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In The 7th International Conference on Learning Representations, 2019.", + "Roman Vershynin. Introduction to the non-asymptotic analysis of random matrices. arXiv preprint arXiv:1011.3027, 2010.", + "P. Welinder, S. Branson, T. Mita, C. Wah, F. Schroff, S. Belongie, and P. Perona. Caltech-UCSD Birds 200. Technical report, California Institute of Technology, 2010.", + "Eric Wong, Leslie Rice, and J. Zico Kolter. Fast is better than free: Revisiting adversarial training. In The 8th International Conference on Learning Representations, 2020.", + "Tong Wu, Liang Tong, and Yevgeniy Vorobeychik. Defending against physically realizable attacks on image classification. In The 8th International Conference on Learning Representations, 2020.", + "Minghao Xu, Jian Zhang, Bingbing Ni, Teng Li, Chengjie Wang, Qi Tian, and Wenjun Zhang. Adversarial domain adaptation with domain mixup. In The AAAI Conference on Artificial Intelligence, pp. 6502-6509, 2020.", + "Shuai Yang, Prashan Premaratne, and Peter Vial. Hand gesture recognition: An overview. In The 5th IEEE International Conference on Broadband Network Multimedia Technology, pp. 63-69, 2013.", + "Dong Yin, Ramchandran Kannan, and Peter Bartlett. Rademacher complexity for adversarially robust generalization. In The 36th International conference on machine learning, pp. 7085-7094, 2019.", + "Runtian Zhai, Tianle Cai, Di He, Chen Dan, Kun He, John Hopcroft, and Liwei Wang. Adversarily robust generalization just requires more unlabeled data. arXiv preprint arXiv:1906.00555, 2019.", + "Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric Xing, Laurent El Ghaoui, and Michael Jordan. Theoretically principled trade-off between robustness and accuracy. In *The 36th International Conference on Machine Learning*, pp. 7472-7482, 2019.", + "Zhengyu Zhao, Zhuoran Liu, and Martha Larson. Towards large yet imperceptible adversarial image perturbations with perceptual color distance. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1039-1048, 2020.", + "Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017.", + "Jianli Zhou, Chao Liang, and Jun Chen. Manifold projection for adversarial defense on face recognition. In The 16th European Conference on Computer Vision, pp. 288-305, 2020." + ], + "bbox": [ + 171, + 102, + 826, + 656 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A THEORETICAL STATEMENTS FOR THE LINEAR MODEL", + "text_level": 1, + "bbox": [ + 171, + 102, + 656, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Before we present the proof of the theorem, we introduce two lemmas are of separate interest that are used throughout the proof of Theorem 1. Recall that the definition of the (standard normalized) maximum- $\\ell_2$ -margin solution (max-margin solution in short) of a dataset $D = \\{(x_i, y_i)\\}_{i=1}^n$ corresponds to", + "bbox": [ + 169, + 132, + 826, + 188 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\theta^ {\\top} x _ {i}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 405, + 188, + 823, + 215 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "by simply setting $\\epsilon_{\\mathrm{tr}} = 0$ in Equation 4. The $\\ell_2$ -margin of $\\widehat{\\theta}$ then reads $\\min_{i\\in [n]}y_i\\widehat{\\theta}^\\top x_i$ . Furthermore for a dataset $D = \\{(x_{i},y_{i})\\}_{i = 1}^{n}$ we refer to the induced dataset $\\widetilde{D}$ as the dataset with covariate vectors stripped of the first element, i.e.", + "bbox": [ + 169, + 219, + 823, + 266 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {D} = \\left\\{\\left(\\tilde {x} _ {i}, y _ {i}\\right) \\right\\} _ {i = 1} ^ {n} := \\left\\{\\left(\\left(x _ {i}\\right) _ {[ 2: d ]}, y _ {i}\\right) \\right\\} _ {i = 1} ^ {n}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 268, + 823, + 287 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $(x_{i})_{[2:d]}$ refers to the last $d - 1$ elements of the vector $x_{i}$ . Furthermore, remember that for any vector $z$ , $z_{[j]}$ refers to the $j$ -th element of $z$ and $e_j$ denotes the $j$ -th canonical basis vector. Further, recall the distribution $\\mathbb{P}_r$ as defined in Section 3.1: the label $y \\in \\{+1, -1\\}$ is drawn with equal probability and the covariate vector is sampled as $x = [y_{\\frac{r}{2}}, \\tilde{x}]$ where $\\tilde{x} \\in \\mathbb{R}^{d-1}$ is a random vector drawn from a standard normal distribution, i.e. $\\tilde{x} \\sim \\mathcal{N}(0, \\sigma^2 I_{d-1})$ . We generally allow $r$ , used to sample the training data, to differ from $r_{\\mathrm{test}}$ , which is used during test time.", + "bbox": [ + 169, + 289, + 826, + 377 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The following lemma derives a closed-form expression for the normalized max-margin solution for any dataset with fixed separation $r$ in the signal component, and that is linearly separable in the last $d - 1$ coordinates with margin $\\tilde{\\gamma}$ .", + "bbox": [ + 169, + 382, + 823, + 424 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Lemma A.1. Let $D = \\{(x_i, y_i)\\}_{i=1}^n$ be a dataset that consists of points $(x, y) \\in \\mathbb{R}^d \\times \\{\\pm 1\\}$ and $x_{[1]} = y_{\\frac{r}{2}}$ , i.e. the covariates $x_i$ are deterministic in their first coordinate given $y_i$ with separation distance $r$ . Furthermore, let the induced dataset $\\widetilde{D}$ also be linearly separable by the normalized max- $\\ell_2$ -margin solution $\\tilde{\\theta}$ with an $\\ell_2$ -margin $\\tilde{\\gamma}$ . Then, the normalized max-margin solution of the original dataset $D$ is given by", + "bbox": [ + 169, + 426, + 825, + 502 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} = \\frac {1}{\\sqrt {r ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right]. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 405, + 502, + 823, + 534 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Further, the standard accuracy of $\\widehat{\\theta}$ for data drawn from $\\mathbb{P}_{r_{test}}$ reads", + "bbox": [ + 169, + 536, + 616, + 554 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} _ {r _ {\\text {t e s t}}} \\left(Y \\widehat {\\theta} ^ {\\top} X > 0\\right) = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right). \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 555, + 823, + 587 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The proof can be found in Section A.3. The next lemma provides high probability upper and lower bounds for the margin $\\tilde{\\gamma}$ of $\\widetilde{D}$ when $\\tilde{x}_i$ are drawn from the normal distribution.", + "bbox": [ + 169, + 595, + 823, + 626 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Lemma A.2. Let $\\widetilde{D} = \\{(\\tilde{x}_i, y_i)\\}_{i=1}^n$ be a random dataset where $y_i \\in \\{\\pm 1\\}$ are equally distributed and $\\tilde{x}_i \\sim \\mathcal{N}(0, \\sigma I_{d-1})$ for all $i$ , and $\\tilde{\\gamma}$ is the maximum $\\ell_2$ margin that can be written as", + "bbox": [ + 169, + 628, + 825, + 659 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde{\\gamma} = \\max_{\\| \\tilde{\\theta}\\|_{2}\\leq 1}\\min_{i\\in [n]}y_{i}\\tilde{\\theta}^{\\top}\\tilde{x}_{i}.\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 660, + 580, + 688 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Then, for any $t \\geq 0$ , with probability greater than $1 - 2e^{-\\frac{t^2}{2}}$ , we have $\\tilde{\\gamma}_{\\min}(t) \\leq \\tilde{\\gamma} \\leq \\tilde{\\gamma}_{\\max}(t)$ where", + "bbox": [ + 169, + 691, + 823, + 710 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\gamma} _ {\\mathrm {m a x}} (t) = \\sigma \\left(\\sqrt {\\frac {d - 1}{n}} + 1 + \\frac {t}{\\sqrt {n}}\\right), \\tilde {\\gamma} _ {\\mathrm {m i n}} (t) = \\sigma \\left(\\sqrt {\\frac {d - 1}{n}} - 1 - \\frac {t}{\\sqrt {n}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 712, + 754, + 753 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 PROOF OF THEOREM 3.1", + "text_level": 1, + "bbox": [ + 171, + 766, + 390, + 780 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Given a dataset $D = \\{(x_{i},y_{i})\\}$ drawn from $\\mathbb{P}_r$ , it is easy to see that the (normalized) $\\epsilon_{\\mathrm{tr}}$ -robust max-margin solution 4 of $D$ with respect to signal-attacking perturbations $T(\\epsilon_{\\mathrm{tr}};x_i)$ as defined in Equation 3, can be written as", + "bbox": [ + 169, + 792, + 825, + 834 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}} = \\arg \\max_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n],x_{i}^{\\prime}\\in T(x_{i};\\epsilon_{\\mathrm{tr}})}y_{i}\\theta^{\\top}x_{i}^{\\prime} \\\\ = \\operatorname *{arg max}_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n],|\\beta |\\leq \\epsilon_{\\mathrm{tr}}}y_{i}\\theta^{\\top}(x_{i} + \\beta e_{1}) \\\\ = \\operatorname *{arg max}_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n]}y_{i}\\theta^{\\top}(x_{i} - y_{i}\\epsilon_{\\mathrm{tr}}\\operatorname {sign}(\\theta_{[1]})e_{1}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 835, + 661, + 926 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Note that by definition, it is equivalent to the (standard normalized) max-margin solution $\\widehat{\\theta}$ of the shifted dataset $D_{\\epsilon_{\\mathrm{tr}}} = \\{(x_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[1]})e_1,y_i)\\}_{i = 1}^n$ . Since $D_{\\epsilon_{\\mathrm{tr}}}$ satisfies the assumptions of Lemma A.1, it then follows directly that the normalized $\\epsilon_{\\mathrm{tr}}$ -robust max-margin solution reads", + "bbox": [ + 169, + 102, + 826, + 148 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right], \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 152, + 823, + 186 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "by replacing $r$ by $r - 2\\epsilon_{\\mathrm{tr}}$ in Equation 12. Similar to above, $\\tilde{\\theta} \\in R^{d-1}$ is the (standard normalized) max-margin solution of $\\{(\\tilde{x}_i, y_i)\\}_{i=1}^n$ and $\\tilde{\\gamma}$ the corresponding margin.", + "bbox": [ + 169, + 194, + 825, + 226 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Proof of 1. We can now compute the $\\epsilon_{\\mathrm{te}}$ -robust accuracy of the $\\epsilon_{\\mathrm{tr}}$ -robust max-margin estimator $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ for a given dataset $D$ as a function of $\\tilde{\\gamma}$ . Note that in the expression of $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ , all values are fixed for a fixed dataset, while $0 \\leq \\epsilon_{\\mathrm{tr}} \\leq r - 2\\tilde{\\gamma}_{\\mathrm{max}}$ can be chosen. First note that for a test distribution $\\mathbb{P}_r$ , the $\\epsilon_{\\mathrm{te}}$ -robust accuracy, defined as one minus the robust error (Equation 1), for a classifier associated with a vector $\\theta$ , can be written as", + "bbox": [ + 169, + 241, + 826, + 315 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {A c c} (\\theta ; \\epsilon_ {\\mathrm {t e}}) = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{\\min _ {x ^ {\\prime} \\in T (X; \\epsilon_ {\\mathrm {t e}})} Y \\theta^ {\\top} x ^ {\\prime} > 0 \\right\\} \\right] \\tag {15} \\\\ = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\{Y \\theta^ {\\top} X - \\epsilon_ {\\mathrm {t e}} \\theta_ {[ 1 ]} > 0 \\} \\right] = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\{Y \\theta^ {\\top} (X - Y \\epsilon_ {\\mathrm {t e}} \\operatorname {s i g n} (\\theta_ {[ 1 ]}) e _ {1}) > 0 \\} \\right] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 320, + 839, + 376 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Now, recall that by Equation 14 and the assumption in the theorem, we have $r - 2\\epsilon_{\\mathrm{tr}} > 0$ , so that $\\mathrm{sign}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}) = 1$ . Further, using the definition of the $T(\\epsilon_{\\mathrm{tr}};x)$ in Equation 3 and by definition of the distribution $\\mathbb{P}_r$ , we have $X_{[1]} = Y\\frac{r}{2}$ . Plugging into Equation 15 then yields", + "bbox": [ + 169, + 380, + 823, + 426 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} ^ {\\top} \\left(X - Y \\epsilon_ {\\mathrm {t e}} e _ {1}\\right) > 0 \\right\\} \\right] \\\\ = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}} \\top} \\left(X _ {- 1} + Y \\left(\\frac {r}{2} - \\epsilon_ {\\mathrm {t e}}\\right) e _ {1}\\right) > 0 \\right\\} \\right] \\\\ = \\mathbb {P} _ {r - 2 \\epsilon_ {\\mathrm {t e}}} (Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}} \\top} X > 0) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 434, + 718, + 508 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $X_{-1}$ is a shorthand for the random vector $X_{-1} = (0; X_{[2]}, \\ldots, X_{[d]})$ . The assumptions in Lemma A.1 ( $D_{\\epsilon_{\\mathrm{tr}}}$ is linearly separable) are satisfied whenever the $n < d - 1$ samples are distinct, i.e. with probability one. Hence applying Lemma A.1 with $r_{\\mathrm{test}} = r - 2\\epsilon_{\\mathrm{te}}$ and $r = r - 2\\epsilon_{\\mathrm{tr}}$ yields", + "bbox": [ + 169, + 515, + 826, + 558 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = \\Phi \\left(\\frac {r (r - 2 \\epsilon_ {\\mathrm {t e}})}{4 \\sigma \\tilde {\\gamma}} - \\epsilon_ {\\mathrm {t r}} \\frac {r - 2 \\epsilon_ {\\mathrm {t e}}}{2 \\sigma \\tilde {\\gamma}}\\right). \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 564, + 823, + 597 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Theorem statement a) then follows by noting that $\\Phi$ is a monotonically decreasing function in $\\epsilon_{\\mathrm{tr}}$ . The expression for the robust error then follows by noting that $1 - \\Phi(-z) = \\Phi(z)$ for any $z \\in \\mathbb{R}$ and defining", + "bbox": [ + 169, + 603, + 823, + 643 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\varphi} = \\frac {\\sigma \\tilde {\\gamma}}{r / 2 - \\epsilon_ {\\mathrm {t e}}}. \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 444, + 645, + 823, + 676 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Proof of 2. First define $\\varphi_{\\mathrm{min}},\\varphi_{\\mathrm{max}}$ using $\\tilde{\\gamma}_{\\mathrm{min}},\\tilde{\\gamma}_{\\mathrm{max}}$ as in Equation 17. Then we have by Equation 16", + "bbox": [ + 169, + 689, + 826, + 715 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) - \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {\\mathrm {t e}}\\right) = \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {\\mathrm {t e}}\\right) - \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) \\\\ = \\Phi \\left(\\frac {r / 2}{\\tilde {\\varphi}}\\right) - \\Phi \\left(\\frac {r / 2 - \\epsilon_ {\\mathrm {t r}}}{\\tilde {\\varphi}}\\right) \\\\ = \\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\bar {\\varphi}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\bar {\\varphi} ^ {2}}} d x \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 720, + 686, + 816 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "By plugging in $t = \\sqrt{\\frac{2\\log 2 / \\delta}{n}}$ in Lemma A.2, we obtain that with probability at least $1 - \\delta$ we have", + "bbox": [ + 169, + 832, + 823, + 854 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\gamma} _ {\\min } := \\sigma \\left[ \\sqrt {\\frac {d - 1}{n}} - \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right) \\right] \\leq \\tilde {\\gamma} \\leq \\sigma \\left[ \\sqrt {\\frac {d - 1}{n}} + \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right) \\right] =: \\tilde {\\gamma} _ {\\max }\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 862, + 846, + 905 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "and equivalently $\\varphi_{\\mathrm{min}}\\leq \\tilde{\\varphi}\\leq \\varphi_{\\mathrm{max}}$", + "bbox": [ + 171, + 909, + 406, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Now note the general fact that for all $\\tilde{\\varphi} \\leq \\sqrt{2} x$ the density function $f(\\tilde{\\varphi};x) = \\frac{1}{\\sqrt{2\\pi}\\tilde{\\varphi}}\\mathbb{E}^{-\\frac{x^2}{\\tilde{\\varphi}^2}}$ is monotonically increasing in $\\tilde{\\varphi}$ .", + "bbox": [ + 169, + 101, + 823, + 140 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "By assumption of the theorem, $\\tilde{\\varphi} \\leq \\sqrt{2}(r/2 - \\epsilon_{\\mathrm{tr}})(r/2 - \\epsilon_{\\mathrm{te}})$ so that $f(\\tilde{\\varphi}; x) \\geq f(\\varphi_{\\min}; x)$ for all $x \\in [r/2 - \\epsilon_{\\mathrm{tr}}, r/2]$ and therefore", + "bbox": [ + 169, + 147, + 825, + 178 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\tilde {\\varphi}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\tilde {\\varphi} ^ {2}}} d x \\geq \\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\varphi_ {\\mathrm {m i n}}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\tilde {\\varphi} ^ {2}}} d x = \\Phi \\left(\\frac {r / 2}{\\varphi_ {\\mathrm {m i n}}}\\right) - \\Phi \\left(\\frac {r / 2 - \\epsilon_ {\\mathrm {t r}}}{\\varphi_ {\\mathrm {m i n}}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 181, + 789, + 219 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "and the statement is proved.", + "bbox": [ + 171, + 223, + 359, + 238 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.2 PROOF OF COROLLARY 3.2", + "text_level": 1, + "bbox": [ + 171, + 253, + 408, + 268 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We now show that Theorem 3.1 also holds for $\\ell_1$ -ball perturbations with at most radius $\\epsilon$ . Following similar steps as in Equation 14, the $\\epsilon_{\\mathrm{tr}}$ -robust max-margin solution for $\\ell_1$ -perturbations can be written as", + "bbox": [ + 169, + 279, + 823, + 319 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\theta^ {\\top} \\left(x _ {i} - y _ {i} \\epsilon_ {\\mathrm {t r}} \\operatorname {s i g n} \\left(\\theta_ {[ j ^ {\\star} (\\theta) ]}\\right) e _ {j ^ {\\star} (\\theta)}\\right) \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 319, + 825, + 347 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $j^{\\star}(\\theta) \\coloneqq \\arg \\max_{j} |\\theta_{j}|$ is the index of the maximum absolute value of $\\theta$ . We now prove by contradiction that the robust max-margin solution for this perturbation set 9 is equivalent to the solution 14 for the perturbation set 3. We start by assuming that $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ does not solve Equation 14, which is equivalent to assuming $1 \\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})$ by definition. We now show how this assumption leads to a contradiction.", + "bbox": [ + 169, + 349, + 826, + 425 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Define the shorthand $j^{\\star} \\coloneqq j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}) - 1$ . Since $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ is the solution of 18, by definition, we have that $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ is also the max-margin solution of the shifted dataset $D_{\\epsilon_{\\mathrm{tr}}} \\coloneqq (x_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[j^{\\star} + 1]}e_{j^{\\star} + 1},y_i)$ . Further, note that by the assumption that $1 \\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})$ , this dataset $D_{\\epsilon_{\\mathrm{tr}}}$ consists of input vectors $x_i = (y_i\\frac{r}{2},\\tilde{x}_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[j^{\\star} + 1]}e_{j^{\\star} + 1})$ . Hence via Lemma A.1, $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ can be written as", + "bbox": [ + 169, + 430, + 825, + 501 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {r ^ {2} - 4 \\left(\\tilde {\\gamma} ^ {\\epsilon_ {\\mathrm {t r}}}\\right) ^ {2}}} [ r, 2 \\tilde {\\gamma} ^ {\\epsilon_ {\\mathrm {t r}}} \\tilde {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} ], \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 503, + 825, + 539 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ is the normalized max-margin solution of $\\widetilde{D} := (\\tilde{x}_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\tilde{\\theta}_{[j^\\star ]})e_{j^\\star},y_i)$ .", + "bbox": [ + 169, + 544, + 751, + 564 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We now characterize $\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ . Note that by assumption, $j^{\\star} = j^{\\star}(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}) = \\arg \\max_{j}|\\tilde{\\theta}_{[j]}^{\\epsilon_{\\mathrm{tr}}}|$ . Hence, the normalized max-margin solution $\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ is the solution of", + "bbox": [ + 169, + 569, + 823, + 604 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\tilde {\\theta} \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\tilde {\\theta} ^ {\\top} \\tilde {x} _ {i} - \\epsilon_ {\\mathrm {t r}} | \\tilde {\\theta} _ {[ j ^ {\\star} ]} | \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 608, + 825, + 641 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Observe that the minimum margin of this estimator $\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}} = \\min_{i\\in [n]}y_i(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}})^\\top \\tilde{x}_i - \\epsilon_{\\mathrm{tr}}|\\tilde{\\theta}_{[j^* ]}^{\\epsilon_{\\mathrm{tr}}}$ decreases with $\\epsilon_{\\mathrm{tr}}$ as the problem becomes harder $\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}\\leq \\tilde{\\gamma}$ , where the latter is equivalent to the margin of $\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ for $\\epsilon_{\\mathrm{tr}} = 0$ . Since $r > 2\\tilde{\\gamma}_{\\max}$ by assumption in the Theorem, by Lemma A.2 with probability at least $1 - 2\\mathbb{E}^{-\\frac{\\alpha^2(d - 1)}{n}}$ , we then have that $r > 2\\tilde{\\gamma}\\geq 2\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}$ . Given the closed form of $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ in Equation 19, it directly follows that $\\widehat{\\theta}_{[1]}^{\\epsilon_{\\mathrm{tr}}} = r > 2\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}\\| \\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\| _2 = \\| \\widehat{\\theta}_{[2:d]}^{\\epsilon_{\\mathrm{tr}}}\\| _2$ and hence $1\\in j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})$ . This contradicts the original assumption $1\\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})$ and hence we established that $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ for the $\\ell_1$ -perturbation set 9 has the same closed form 14 as for the perturbation set 3.", + "bbox": [ + 169, + 646, + 826, + 766 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The final statement is proved by using the analogous steps as in the proof of 1. and 2. to obtain the closed form of the robust accuracy of $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ .", + "bbox": [ + 169, + 772, + 823, + 804 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.3 PROOF OF LEMMA A.1", + "text_level": 1, + "bbox": [ + 171, + 820, + 377, + 834 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We start by proving that $\\widehat{\\theta}$ is of the form", + "bbox": [ + 171, + 844, + 439, + 861 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} = \\left[ a _ {1}, a _ {2} \\tilde {\\theta} \\right], \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 444, + 864, + 825, + 891 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "for $a_1, a_2 > 0$ . Denote by $\\mathcal{H}(\\theta)$ the plane through the origin with normal $\\theta$ . We define $d((x,y), \\mathcal{H}(\\theta))$ as the signed euclidean distance from the point $(x,y) \\in D \\sim \\mathbb{P}_r$ to the plane $\\mathcal{H}(\\theta)$ . The signed", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "euclidean distance is the defined as the euclidean distance from $x$ to the plane if the point $(x, y)$ is correctly predicted by $\\theta$ , and the negative euclidean distance from $x$ to the plane otherwise. We rewrite the definition of the max $l_{2}$ -margin classifier. It is the classifier induced by the normalized vector $\\widehat{\\theta}$ , such that", + "bbox": [ + 169, + 103, + 826, + 162 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {\\theta \\in \\mathbb {R} ^ {d}} \\min _ {(x, y) \\in D} d \\left((x, y), \\mathcal {H} (\\theta)\\right) = \\min _ {(x, y) \\in D} d \\left(\\left(x, y\\right), \\mathcal {H} (\\widehat {\\theta})\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 167, + 692, + 198 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We use that $D$ is deterministic in its first coordinate and get", + "bbox": [ + 171, + 202, + 566, + 218 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\max _ {\\theta} \\min _ {(x, y) \\in D} d \\left((x, y), \\mathcal {H} (\\theta)\\right) = \\max _ {\\theta} \\min _ {(x, y) \\in D} y \\left(\\theta_ {[ 1 ]} x _ {[ 1 ]} + \\tilde {\\theta} ^ {\\top} \\tilde {x}\\right) \\\\ = \\max _ {\\theta} \\theta_ {1} \\frac {r}{2} + \\min _ {(x, y) \\in D} y \\tilde {\\theta} ^ {\\top} \\tilde {x}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 223, + 705, + 280 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Because $r > 0$ , the maximum over all $\\theta$ has $\\widehat{\\theta}_{[1]} \\geq 0$ . Take any $a > 0$ such that $\\| \\widetilde{\\theta} \\|_2 = a$ . By definition the max $l_2$ -margin classifier, $\\widetilde{\\theta}$ , maximizes $\\min_{(x,y) \\in D} d((x,y), \\mathcal{H}(\\theta))$ . Therefore, $\\widehat{\\theta}$ is of the form of Equation 21.", + "bbox": [ + 169, + 287, + 826, + 339 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Note that all classifiers induced by vectors of the form of Equation 21 classify $D$ correctly. Next, we aim to find expressions for $a_1$ and $a_2$ such that Equation 21 is the normalized max $l_2$ -margin classifier. The distance from any $x \\in D$ to $\\mathcal{H}(\\widehat{\\theta})$ is", + "bbox": [ + 169, + 344, + 826, + 391 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nd \\left(x, \\mathcal {H} (\\widehat {\\theta})\\right) = \\left| a _ {1} x _ {[ 1 ]} + a _ {2} \\tilde {\\theta} ^ {\\top} \\tilde {x} \\right|.\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 396, + 614, + 422 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Using that $x_{[1]} = y^{\\frac{r}{2}}$ and that the second term equals $a_2 d\\left(x, \\mathcal{H}(\\tilde{\\theta})\\right)$ , we get", + "bbox": [ + 171, + 429, + 676, + 454 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left. d \\left(x, \\mathcal {H} (\\hat {\\theta})\\right) = \\left| a _ {1} \\frac {r}{2} + a _ {2} d \\left(x, \\mathcal {H} (\\tilde {\\theta})\\right) \\right| = a _ {1} \\frac {r}{2} + \\sqrt {1 - a _ {1} ^ {2}} d \\left(x, \\mathcal {H} (\\tilde {\\theta})\\right). \\right. \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 459, + 823, + 488 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Let $(\\tilde{x},y)\\in \\widetilde{D}$ be the point closest in Euclidean distance to $\\tilde{\\theta}$ . This point is also the closest point in Euclidean distance to $\\mathcal{H}(\\widehat{\\theta})$ , because by Equation 22 $d\\left(x,\\mathcal{H}(\\widehat{\\theta})\\right)$ is strictly decreasing for decreasing $d\\left(x,\\mathcal{H}(\\tilde{\\theta})\\right)$ . We maximize the minimum margin $d\\left(x,\\mathcal{H}(\\widehat{\\theta})\\right)$ with respect to $a_1$ . Define the vectors $a = [a_1,a_2]$ and $v = \\left[\\frac{r}{2},d\\left(x,\\mathcal{H}(\\tilde{\\theta})\\right)\\right]$ . We find using the dual norm that", + "bbox": [ + 169, + 494, + 823, + 583 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\na = \\frac {v}{\\| v \\| _ {2}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 459, + 588, + 535, + 619 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Plugging the expression of $a$ into Equation 21 yields that $\\widehat{\\theta}$ is given by", + "bbox": [ + 171, + 625, + 637, + 642 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} = \\frac {1}{\\sqrt {r ^ {2} + 4 \\widetilde {\\gamma} ^ {2}}} \\left[ r, 2 \\widetilde {\\gamma} \\widetilde {\\theta} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 647, + 589, + 681 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For the second part of the lemma we first decompose", + "bbox": [ + 171, + 693, + 522, + 709 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} _ {r _ {\\mathrm {t e s t}}} (Y \\widehat {\\theta} ^ {\\top} X > 0) = \\frac {1}{2} \\mathbb {P} _ {r _ {\\mathrm {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X > 0 \\mid Y = 1 \\right] + \\frac {1}{2} \\mathbb {P} _ {r _ {\\mathrm {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X < 0 \\mid Y = - 1 \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 714, + 767, + 744 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We can further write", + "bbox": [ + 171, + 747, + 310, + 762 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X > 0 \\mid Y = 1 \\right] = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\sum_ {i = 2} ^ {d} \\widehat {\\theta} _ {[ i ]} X _ {[ i ]} > - \\widehat {\\theta} _ {[ 1 ]} X _ {[ 1 ]} \\mid Y = 1 \\right] \\tag {23} \\\\ = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ 2 \\tilde {\\gamma} \\sum_ {i = 1} ^ {d - 1} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} > - r \\frac {r _ {\\text {t e s t}}}{2} \\mid Y = 1 \\right] \\\\ = 1 - \\Phi \\left(- \\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right) = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 263, + 766, + 823, + 888 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $\\Phi$ is the cumulative distribution function. The second equality follows by multiplying by the normalization constant on both sides and the third equality is due to the fact that $\\sum_{i=1}^{d-1} \\tilde{\\theta}_{[i]} X_{[i]}$ is", + "bbox": [ + 169, + 892, + 826, + 928 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "a zero-mean Gaussian with variance $\\sigma^2\\|\\tilde{\\theta}\\|_2^2 = \\sigma^2$ since $\\tilde{\\theta}$ is normalized. Correspondingly we can write", + "bbox": [ + 169, + 102, + 823, + 132 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X < 0 \\mid Y = - 1 \\right] = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ 2 \\widetilde {\\gamma} \\sum_ {i = 1} ^ {d - 1} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} < - r \\left(- \\frac {r _ {\\text {t e s t}}}{2}\\right) \\mid Y = - 1 \\right] = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\widetilde {\\gamma}}\\right) \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 138, + 825, + 195 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "so that we can combine 23 and 23 and 24 to obtain $\\mathbb{P}_{r_{\\mathrm{test}}}(Y\\widehat{\\theta}^{\\top}X > 0) = \\Phi \\left(\\frac{r r_{\\mathrm{test}}}{4\\sigma\\widetilde{\\gamma}}\\right)$ . This concludes the proof of the lemma.", + "bbox": [ + 169, + 207, + 823, + 244 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.4 PROOF OF LEMMA A.2", + "text_level": 1, + "bbox": [ + 171, + 261, + 380, + 275 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The proof plan is as follows. We start from the definition of the max $\\ell_2$ -margin of a dataset. Then, we rewrite the max $\\ell_2$ -margin as an expression that includes a random matrix with independent standard normal entries. This allows us to prove the upper and lower bounds for the max- $\\ell_2$ -margin in Sections A.4.1 and A.4.2 respectively, using non-asymptotic estimates on the singular values of Gaussian random matrices.", + "bbox": [ + 169, + 287, + 826, + 358 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Given the dataset $\\widetilde{D} = \\{(\\tilde{x}_i, y_i)\\}_{i=1}^n$ , we define the random matrix", + "bbox": [ + 169, + 364, + 612, + 381 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nX = \\left( \\begin{array}{c} \\tilde {x} _ {1} ^ {\\top} \\\\ \\tilde {x} _ {2} ^ {\\top} \\\\ \\dots \\\\ \\tilde {x} _ {n} ^ {\\top} \\end{array} \\right). \\tag {25}\n$$\n", + "text_format": "latex", + "bbox": [ + 447, + 388, + 825, + 450 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\tilde{x}_i\\sim \\mathcal{N}(0,\\sigma I_{d - 1})$ . Let $\\mathcal{V}$ be the class of all perfect predictors of $\\widetilde{D}$ . For a matrix $A$ and vector $b$ we also denote by $|Ab|$ the vector whose entries correspond to the absolute values of the entries of $Ab$ . Then, by definition", + "bbox": [ + 169, + 460, + 826, + 505 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\gamma} = \\max _ {v \\in \\mathcal {V}, \\| v \\| _ {2} = 1} \\min _ {j \\in [ n ]} | X v | _ {[ j ]} = \\max _ {v \\in \\mathcal {V}, \\| v \\| _ {2} = 1} \\min _ {j \\in [ n ]} \\sigma | Q v | _ {[ j ]}, \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 513, + 825, + 539 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $Q = \\frac{1}{\\sigma} X$ is the scaled data matrix.", + "bbox": [ + 169, + 547, + 449, + 565 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In the sequel we will use the operator norm of a matrix $A \\in \\mathbb{R}^{n \\times d - 1}$ .", + "bbox": [ + 169, + 571, + 630, + 588 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\| A \\| _ {2} = \\sup _ {v \\in \\mathbb {R} ^ {d - 1} | \\| v \\| _ {2} = 1} \\| A v \\| _ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 595, + 599, + 623 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "and denote the maximum singular value of a matrix $A$ as $s_{\\max}(A)$ and the minimum singular value as $s_{\\min}(A)$ .", + "bbox": [ + 169, + 632, + 823, + 662 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.4.1 UPPERBOUND", + "text_level": 1, + "bbox": [ + 171, + 678, + 334, + 691 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Given the maximality of the operator norm and since the minimum entry of the vector $|Qv|$ must be smaller than $\\frac{\\|Q\\|_2}{\\sqrt{n}}$ , we can upper bound $\\tilde{\\gamma}$ by", + "bbox": [ + 169, + 703, + 823, + 739 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\gamma} \\leq \\sigma \\frac {1}{\\sqrt {n}} \\| Q \\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 439, + 747, + 555, + 780 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Taking the expectation on both sides with respect to the draw of $\\widetilde{D}$ and noting $\\| Q\\| _2\\leq s_{\\max}(Q)$ , it follows from Corollary 5.35 of Vershynin (2010) that for all $t\\geq 0$", + "bbox": [ + 169, + 790, + 826, + 821 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} \\left[ \\sqrt {d - 1} + \\sqrt {n} + t \\geq s _ {\\max } (Q) \\right] \\geq 1 - 2 e ^ {- \\frac {t ^ {2}}{2}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 333, + 828, + 661, + 856 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Therefore, with a probability greater than $1 - 2e^{-\\frac{t^2}{2}}$", + "bbox": [ + 169, + 864, + 524, + 883 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\gamma} \\leq \\sigma \\left(1 + \\frac {t + \\sqrt {d - 1}}{\\sqrt {n}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 893, + 593, + 929 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.4.2 LOWERBOUND", + "text_level": 1, + "bbox": [ + 171, + 104, + 341, + 118 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "By the definition in Equation 26, if we find a vector $v \\in \\mathcal{V}$ with $\\| v \\|_2 = 1$ such that for an $a > 0$ , it holds that $\\min_{j \\in n} \\sigma |Xv|_{[j]} > a$ , then $\\tilde{\\gamma} > a$ .", + "bbox": [ + 169, + 127, + 826, + 160 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Recall the definition of the max- $\\ell_2$ -margin as in Equation 25. As $n < d - 1$ , the random matrix $Q$ is a wide matrix, i.e. there are more columns than rows and therefore the minimal singular value is 0. Furthermore, $Q$ has rank $n$ almost surely and hence for all $c > 0$ , there exists a $v \\in \\mathbb{R}^{d-1}$ such that", + "bbox": [ + 169, + 165, + 826, + 208 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma Q v = 1 _ {\\{n \\}} c > 0, \\tag {27}\n$$\n", + "text_format": "latex", + "bbox": [ + 431, + 215, + 825, + 232 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where $1_{\\{n\\}}$ denotes the all ones vector of dimension $n$ . The smallest non-zero singular value of $Q$ , $s_{\\min, \\text{nonzero}}(Q)$ , equals the smallest non-zero singular value of its transpose $Q^{\\top}$ . Therefore, there also exists a $v \\in \\mathcal{V}$ with $\\| v \\|_2 = 1$ such that", + "bbox": [ + 169, + 238, + 826, + 284 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\gamma} \\geq \\min _ {j \\in [ n ]} \\sigma | Q v | _ {[ j ]} \\geq \\sigma s _ {\\min , \\text {n o n z e r o s}} \\left(Q ^ {\\top}\\right) \\frac {1}{\\sqrt {n}}, \\tag {28}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 290, + 825, + 321 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where we used the fact that any vector $v$ in the span of non-zero eigenvectors satisfies $\\| Qv\\| _2\\geq s_{\\min, \\text{nonzeros}}(Q)$ and the existence of a solution $v$ for any right-hand side as in Equation 27. Taking the expectation on both sides, Corollary 5.35 of Vershynin (2010) yields that with a probability greater than $1 - 2e^{-\\frac{t^2}{2}}$ , $t\\geq 0$ we have", + "bbox": [ + 169, + 329, + 825, + 390 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\gamma} \\geq \\sigma \\left(\\frac {\\sqrt {d - 1} - t}{\\sqrt {n}} - 1\\right). \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 398, + 825, + 433 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B BOUNDS ON THE SUSCEPTIBILITY SCORE", + "text_level": 1, + "bbox": [ + 171, + 449, + 553, + 465 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In Theorem 3.1, we give non-asymptotic bounds on the robust and standard error of a linear classifier trained with adversarial logistic regression. Moreover, we use the robust error decomposition in susceptibility and standard error to gain intuition about how adversarial training may hurt robust generalization. In this section, we complete the result of Theorem 3.1 by also deriving non-asymptotic bounds on the susceptibility score of the max $\\ell_2$ -margin classifier.", + "bbox": [ + 169, + 479, + 823, + 551 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Using the results in Appendix A, we can prove following Corollary B.1, which gives non-asymptotic bounds on the susceptibility score.", + "bbox": [ + 169, + 556, + 823, + 585 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Corollary B.1. Assume $d - 1 > n$ . For the $\\epsilon_{te}$ -susceptibility on test samples from $\\mathbb{P}_r$ with $2\\epsilon_{te} < r$ and perturbation sets in Equation equation 3 and equation 9 the following holds:", + "bbox": [ + 169, + 589, + 823, + 619 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "For $\\epsilon_{tr} < \\frac{r}{2} - \\tilde{\\gamma}_{\\mathrm{max}}$ , with probability at least $1 - 2\\mathbb{E}^{-\\frac{\\alpha^2(d - 1)}{2}}$ for any $0 < \\alpha < 1$ , over the draw of a dataset $D$ with $n$ samples from $\\mathbb{P}_r$ , the $\\epsilon_{te}$ -susceptibility is upper and lower bounded by", + "bbox": [ + 169, + 625, + 826, + 660 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S u s c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) \\leq \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (\\epsilon_ {t e} - \\frac {r}{2})}{2 \\widetilde {\\gamma} _ {\\max } \\sigma}\\right) - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (- \\epsilon_ {t e} - \\frac {r}{2})}{2 \\widetilde {\\gamma} _ {\\min } \\sigma}\\right) \\tag {30}\n$$\n", + "text_format": "latex", + "bbox": [ + 258, + 665, + 823, + 705 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nS u s c (\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}) \\geq \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (\\epsilon_ {t e} - \\frac {r}{2})}{2 \\tilde {\\gamma} _ {\\min} \\sigma}\\right) - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (- \\epsilon_ {t e} - \\frac {r}{2})}{2 \\tilde {\\gamma} _ {\\max} \\sigma}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 702, + 733, + 736 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We give the proof in Subsection B.1. Observe that the bounds on the susceptibility score in Corollary B.1 consist of two terms each, where the second term decreases with $\\epsilon_{\\mathrm{tr}}$ , but the first term increases. We recognise following two regimes: the max $\\ell_2$ -margin classifier is close to the ground truth $e_1$ or not. Clearly, the ground truth classifier has zero susceptibility and hence classifiers close to the ground truth also have low susceptibility. On the other hand, if the max $l_2$ -margin classifier is not close to the ground truth, then putting less weight on the first coordinate increases invariance to the perturbations along the first direction. Recall that by Lemma A.1, increasing $\\epsilon_{\\mathrm{tr}}$ , decreases the weight on the first coordinate of the max $\\ell_2$ -margin classifier. Furthermore, in the low sample size regime, we are likely not close to the ground truth. Therefore, the regime where the susceptibility decreases with increasing $\\epsilon_{\\mathrm{tr}}$ dominates in the low sample size regime.", + "bbox": [ + 169, + 750, + 826, + 888 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "To confirm the result of Corollary B.1, we plot the mean and standard deviation of the susceptibility score of 5 independent experiments. The results are depicted in Figure 7. We see that for low standard", + "bbox": [ + 169, + 895, + 825, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "error, when the classifier is reasonably close to the optimal classifier, the susceptibility increases slightly with increasing adversarial budget. However, increasing the adversarial training budget, $\\epsilon_{\\mathrm{tr}}$ , further, causes the susceptibility score to drop greatly. Hence, we can recognize both regimes and validate that, indeed, the second regime dominates in the low sample size setting.", + "bbox": [ + 169, + 103, + 826, + 161 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B.1 PROOF OF COROLLARY B.1", + "text_level": 1, + "bbox": [ + 171, + 176, + 408, + 190 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We proof the statement by bounding the robustness of a linear classifier. Recall that the robustness of a classifier is the probability that a classifier does not change its prediction under an adversarial attack. The susceptibility score is then given by", + "bbox": [ + 169, + 202, + 823, + 244 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S u s c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = 1 - \\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right). \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 382, + 251, + 823, + 270 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The proof idea is as follows: since the perturbations are along the first basis direction, $e_1$ , we compute the distance from the robust $l_2$ -max margin $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ to a point $(X,Y)\\sim \\mathbb{P}$ . Then, we note that the robustness of $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ is given by the probability that the distance along $e_1$ , from $X$ to the decision plane induced by $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ is greater than $\\epsilon_{\\mathrm{te}}$ . Lastly, we use the non-asymptotic bounds of Lemma A.2.", + "bbox": [ + 169, + 282, + 823, + 348 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Recall, by Lemma A.1, the max $l_{2}$ -margin classifier is of the form of", + "bbox": [ + 171, + 354, + 627, + 369 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right]. \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 375, + 821, + 409 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Let $(X,Y)\\sim \\mathbb{P}$ . The distance along $e_1$ from $X$ to the decision plane induced by $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ , $\\mathcal{H}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})$ , is given by", + "bbox": [ + 169, + 415, + 823, + 446 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nd _ {e _ {1}} (X, \\mathcal {H} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}})) = \\left| X _ {[ 1 ]} + \\frac {1}{\\widehat {\\theta} _ {[ 0 ]} ^ {\\epsilon_ {\\mathrm {t r}}}} \\sum_ {i = 2} ^ {d} \\widehat {\\theta} _ {[ i ]} ^ {\\epsilon_ {\\mathrm {t r}}} X _ {[ i ]} \\right|.\n$$\n", + "text_format": "latex", + "bbox": [ + 348, + 446, + 648, + 488 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Substituting the expression of $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ in Equation 32 yields", + "bbox": [ + 171, + 493, + 539, + 508 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nd _ {e _ {1}} (X, \\mathcal {H} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}})) = \\left| X _ {[ 1 ]} + 2 \\tilde {\\gamma} \\frac {1}{(r - \\epsilon_ {\\mathrm {t r}})} \\sum_ {i = 2} ^ {d} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} \\right|.\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 516, + 671, + 556 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Let $N$ be a standard normal distributed random variable. By definition $\\| \\tilde{\\theta}\\| _2^2 = 1$ and using that a sum of Gaussian random variables is again a Gaussian random variable, we can write", + "bbox": [ + 169, + 565, + 823, + 594 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nd _ {e _ {1}} \\left(X, \\mathcal {H} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}\\right)\\right) = \\left| X _ {[ 1 ]} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{\\left(r - \\epsilon_ {\\mathrm {t r}}\\right)} N \\right|.\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 599, + 640, + 633 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The robustness of $\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}$ is given by the probability that $d_{e_1}(X,\\mathcal{H}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})) > \\epsilon_{\\mathrm{te}}$ . Hence, using that $X_{1} = \\pm \\frac{r}{2}$ with probability $\\frac{1}{2}$ , we get", + "bbox": [ + 169, + 640, + 823, + 672 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = P \\left[ \\frac {r}{2} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{(r - 2 \\epsilon_ {\\mathrm {t r}})} N > \\epsilon_ {\\mathrm {t e}} \\right] + P \\left[ \\frac {r}{2} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{(r - \\epsilon_ {\\mathrm {t r}})} N < - \\epsilon_ {\\mathrm {t e}} \\right]. \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 678, + 823, + 714 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/9f8cac3f607343a7145a010ec398b9228e234301d7eb198f39a7b1e1cb4989d2.jpg", + "image_caption": [ + "(a) Susceptibility score decreases with $\\epsilon_{\\mathrm{tr}}$" + ], + "image_footnote": [], + "bbox": [ + 236, + 734, + 496, + 854 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/02ed5f16994099a05932f6d9ffce85ac74e4f13a1e9351bd0d56039b5cbc740e.jpg", + "image_caption": [ + "(b) Robust error decomposition", + "Figure 7: We set $r = 6$ , $d = 1000$ , $n = 50$ and $\\epsilon_{\\mathrm{te}} = 2.5$ . (a) The average susceptibility score and the standard deviation over 5 independent experiments. Note how the bounds closely predict the susceptibility score. (b) For comparison, we also plot the robust error decomposition in susceptibility and standard error. Even though the susceptibility decreases, the robust error increases with increasing adversarial budget $\\epsilon_{\\mathrm{tr}}$ ." + ], + "image_footnote": [], + "bbox": [ + 501, + 734, + 761, + 856 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We can rewrite Equation 33 in the form", + "bbox": [ + 171, + 103, + 436, + 119 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = P \\left[ N > \\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) \\left(\\epsilon_ {\\mathrm {t e}} - \\frac {r}{2}\\right)}{2 \\widetilde {\\gamma} \\sigma} \\right] + P \\left[ N < \\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) \\left(- \\epsilon_ {\\mathrm {t e}} - \\frac {r}{2}\\right)}{2 \\widetilde {\\gamma} \\sigma} \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 227, + 128, + 769, + 162 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Recall, that $N$ is a standard normal distributed random variable and denote by $\\Phi$ the cumulative standard normal density. By definition of the cumulative density function, we find that", + "bbox": [ + 169, + 172, + 823, + 200 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {R o b} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}) = 1 - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) (\\epsilon_ {\\mathrm {t e}} - \\frac {r}{2})}{2 \\widetilde {\\gamma} \\sigma}\\right) + \\Phi \\left(\\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) (- \\epsilon_ {\\mathrm {t e}} - \\frac {r}{2})}{2 \\widetilde {\\gamma} \\sigma}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 243, + 210, + 751, + 246 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Substituting the bounds on $\\tilde{\\gamma}$ of Lemma A.2 gives us the non-asymptotic bounds on the robustness score and by Equation 31 also on the susceptibility score.", + "bbox": [ + 169, + 253, + 823, + 284 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C EXPERIMENTAL DETAILS ON THE LINEAR MODEL", + "text_level": 1, + "bbox": [ + 171, + 306, + 620, + 321 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this section, we provide detailed experimental details to Figures 3 and 4.", + "bbox": [ + 169, + 340, + 666, + 356 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We implement adversarial logistic regression using stochastic gradient descent with a learning rate of 0.01. Note that logistic regression converges logarithmically to the robust max $l_{2}$ -margin solution. As a consequence of the slow convergence, we train for up to $10^{7}$ epochs. Both during training and test time we solve $\\max_{x_i' \\in T(x_i; \\epsilon_{\\mathrm{tr}})} L(f_\\theta(x_i') y_i)$ exactly. Hence, we exactly measure the robust error. Unless specified otherwise, we set $\\sigma = 1$ , $r = 12$ and $\\epsilon_{\\mathrm{te}} = 4$ .", + "bbox": [ + 169, + 361, + 826, + 434 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Experimental details on Figure 3 (a) We draw 5 datasets with $n = 50$ samples and input dimension $d = 1000$ from the distribution $\\mathbb{P}$ . We then run adversarial logistic regression on all 5 datasets with adversarial training budgets, $\\epsilon_{\\mathrm{tr}} = 1$ to 5. To compute the resulting robust error gap of all the obtained classifiers, we use a test set of size $10^{6}$ . Lastly, we compute the lower bound given in part 2. of Theorem 3.1. (b) We draw 5 datasets with different sizes $n$ between 50 and $10^{4}$ . We take an input dimension of $d = 10^{4}$ and plot the mean and standard deviation of the robust error after adversarial and standard logistic regression over the 5 samples.(c) We again draw 5 datasets for each $d / n$ constellation and compute the robust error gap for each dataset.", + "bbox": [ + 169, + 450, + 826, + 563 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Experimental details on Figure 4 For both (a) and (b) we set $d = 1000$ , $\\epsilon_{\\mathrm{te}} = 4$ , and vary the adversarial training budget $(\\epsilon_{\\mathrm{tr}})$ from 1 to 5. For every constellation of $n$ and $\\epsilon_{\\mathrm{tr}}$ , we draw 10 datasets and show the average and standard deviation of the resulting robust errors. In (b), we set $n = 50$ .", + "bbox": [ + 169, + 580, + 823, + 625 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D EXPERIMENTAL DETAILS ON THE WATERBIRDS DATASET", + "text_level": 1, + "bbox": [ + 171, + 647, + 683, + 664 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this section, we discuss the experimental details and construction of the Waterbirds dataset in more detail. We also provide ablation studies of attack parameters such as the size of the motion blur kernel, plots of the robust error decomposition with increasing $n$ , and some experiments using early stopping.", + "bbox": [ + 169, + 680, + 825, + 739 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D.1 THE WATERBIRDS DATASET", + "text_level": 1, + "bbox": [ + 171, + 757, + 410, + 771 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To build the Waterbirds dataset, we use the CUB-200 dataset Welinder et al. (2010), which contains images and labels of 200 bird species, and 4 background classes (forest, jungle/bamboo, water ocean, water lake natural) of the Places dataset Zhou et al. (2017). The aim is to recognize whether or not the bird, in a given image, is a waterbird (e.g. an albatros) or a landbird (e.g. a woodpecker). To create the dataset, we randomly sample equally many water- as landbirds from the CUB-200 dataset. Thereafter, we sample for each bird image a random background image. Then, we use the segmentation provided in the CUB-200 dataset to segment the birds from their original images and paste them onto the randomly sampled backgrounds. The resulting images have a size of $256 \\times 256$ . Moreover, we also resize the segmentations such that we have the correct segmentation profiles of the birds in the new dataset as well. For the concrete implementation, we use the code provided by Sagawa et al. (2020).", + "bbox": [ + 169, + 784, + 828, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/308fa5b96594d839d7a3f2b5ad9586a1aa7a275e5d7209cfcdef6747d2d50ec9.jpg", + "image_caption": [ + "(a) Original" + ], + "image_footnote": [], + "bbox": [ + 181, + 99, + 305, + 196 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/3214b31a4dbf850b77bead8e8eefad8b9b38d23cc6c5b73e696f707a259ce557.jpg", + "image_caption": [ + "(b) $M = 5$" + ], + "image_footnote": [], + "bbox": [ + 307, + 99, + 434, + 196 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/20d37da60f42bee0071ee398b1cf254b7295c8d32f8e0b65adbcfd1aea602368.jpg", + "image_caption": [ + "(c) $M = 10$", + "Figure 8: An ablation study of the motion blur kernel size, which corresponds to the severity level of the blur. For increasing $M$ , the severity of the motion blur increases. In particular, note that for $M = 15$ and even $M = 20$ , the bird remains recognizable: we do not semantically change the class, i.e. the perturbations are consistent." + ], + "image_footnote": [], + "bbox": [ + 436, + 99, + 560, + 196 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/1946f37d32cd5f414b8f93df16b46e47268ff322809cfafd6be88aa9f758013b.jpg", + "image_caption": [ + "(d) $M = 15$" + ], + "image_footnote": [], + "bbox": [ + 562, + 101, + 687, + 196 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/dd41066b90a2b7ef8df0dacd0b2e195f8a7cb2eb31d7ceb7e710e01291e23dd7.jpg", + "image_caption": [ + "(e) $M = 20$" + ], + "image_footnote": [], + "bbox": [ + 689, + 101, + 813, + 196 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D.2 EXPERIMENTAL TRAINING DETAILS", + "text_level": 1, + "bbox": [ + 171, + 323, + 455, + 337 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Following the example of Sagawa et al. (2020), we use a ResNet50 or ResNet18 pretrained on the ImageNet dataset for all experiments in the main text, a weight-decay of $10^{-4}$ , and train for 300 epochs using the Adam optimizer. Extensive fine-tuning of the learning rate resulted in an optimal learning rate of 0.006 for all experiments using the adversarial illumination attack and a pretrained ResNet50. For the experiments considering the adversarial illumination attack using a pretrained VGG19 or Densenet121 network, we found optimal learning rates of 0.001 and 0.002 respectively. Lastly, we found that for all experiments using the motion blur attack a learning rate of 0.0011 was optimal. Adversarial training is implemented as suggested in Madry et al. (2018): at each iteration we find the worst case perturbation with an exact or approximate method. In all our experiments, the resulting classifier interpolates the training set. We plot the mean over all runs and the standard deviation of the mean.", + "bbox": [ + 169, + 356, + 826, + 508 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D.3 SPECIFICS TO THE MOTION BLUR ATTACK", + "text_level": 1, + "bbox": [ + 171, + 545, + 504, + 559 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Fast moving objects or animals are hard to photograph due to motion blur. Hence, when trying to classify or detect moving objects from images, it is imperative that the classifier is robust against reasonable levels of motion blur. We implement the attack as follows. First, we segment the bird from the original image, then use a blur filter and lastly, we paste the blurred bird back onto the background. We are able to apply more severe blur, by enlarging the kernel of the filter. See Figure 8 for an ablation study of the kernel size.", + "bbox": [ + 169, + 579, + 823, + 662 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The motion blur filter is implemented as follows. We use a kernel of size $M \\times M$ and build the filter as follows: we fill the row $(M - 1)/2$ of the kernel with the value $1 / M$ . Thereafter, we use the 2D convolution implementation of OpenCV (filter2D) Bradski (2000) to convolve the kernel with the image. Note that applying a rotation before the convolution to the kernel, changes the direction of the resulting motion blur. Lastly, we find the most detrimental level of motion blur using a list-search over all levels up to $M_{max}$ .", + "bbox": [ + 169, + 669, + 823, + 753 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/f614e660b88a78c7880f2ce5e90593675e5a477d53b9332bedc7d55c309f8c42.jpg", + "image_caption": [ + "(a) $\\epsilon = -0.3$", + "Figure 9: An ablation study of the different lighting changes of the adversarial illumination attack. Even though the directed attack perturbs the signal component in the image, the bird remains recognizable in all cases." + ], + "image_footnote": [], + "bbox": [ + 176, + 792, + 267, + 863 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/b58a0da6bfacfca42a7873a0c8f5585149d0f514f1621f23d9e4363f6ece74e7.jpg", + "image_caption": [ + "(b) $\\epsilon = -0.2$" + ], + "image_footnote": [], + "bbox": [ + 267, + 792, + 357, + 863 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/6c7f4b7890ebdbafdb0111110a3ef64cba4d583f402b03b817f39789467e475d.jpg", + "image_caption": [ + "(c) $\\epsilon = -0.1$" + ], + "image_footnote": [], + "bbox": [ + 361, + 792, + 450, + 863 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/22d8287926a9871b28773de529a69f7e4249eea73095cb27a7325ea0fc2af986.jpg", + "image_caption": [ + "(d) Original" + ], + "image_footnote": [], + "bbox": [ + 452, + 792, + 542, + 863 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/193de382e84891a3e7b1626ab841a78eb9eb103df9cc6234bd3c3bc6827efcf9.jpg", + "image_caption": [ + "(e) $\\epsilon = 0.1$" + ], + "image_footnote": [], + "bbox": [ + 544, + 792, + 635, + 863 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/8e257db8389ee975e1a3b8730315c3d4fc6b9c07b23338ffc622353aa83da60f.jpg", + "image_caption": [ + "(f) $\\epsilon = 0.2$" + ], + "image_footnote": [], + "bbox": [ + 637, + 792, + 727, + 863 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/7e2e802520cbd90b9078ef4c8d1940a88b0111080a9d57a6b25547991daf347c.jpg", + "image_caption": [ + "(g) $\\epsilon = 0.3$" + ], + "image_footnote": [], + "bbox": [ + 728, + 792, + 818, + 863 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/072ea61b93e47129f9eec199a253eb05855f7b3f005bae8ff79fa5d5b3a77b9e.jpg", + "image_caption": [ + "(a) Robust error" + ], + "image_footnote": [], + "bbox": [ + 183, + 99, + 390, + 198 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/7996db4272d7089d55bdfb310602478efd147e10a8f85b3c1c51e28552951112.jpg", + "image_caption": [ + "(b) Standard error", + "Figure 10: The robust error decomposition of the experiments depicted in Figure 10a. The plots depict the mean and standard deviation of the mean over several independent experiments. We see that, in comparison to standard training, the reduction in susceptibility for adversarial training is minimal in the low sample size regime. Moreover, the increase in standard error of adversarial training is quite severe, leading to an overall increase in robust error in the low sample size regime." + ], + "image_footnote": [], + "bbox": [ + 395, + 108, + 599, + 198 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/5ceff4dafc9a91a71f450435d67582f561781041746756b7cda66dc823313a9d.jpg", + "image_caption": [ + "(c) Susceptibility" + ], + "image_footnote": [], + "bbox": [ + 607, + 101, + 812, + 196 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.4 SPECIFICS TO THE ADVERSARIAL ILLUMINATION ATTACK", + "text_level": 1, + "bbox": [ + 171, + 320, + 614, + 333 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "An adversary can hide objects using poor lightning conditions, which can for example arise from shadows or bright spots. To model poor lighting conditions on the object only (or targeted to the object), we use the adversarial illumination attack. The attack is constructed as follows: First, we segment the bird from their background. Then we apply an additive constant $\\epsilon$ to the bird, where the absolute size of the constant satisfies $|\\epsilon| < \\epsilon_{\\mathrm{te}} = 0.3$ . Thereafter, we clip the values of the bird images to $[0,1]$ , and lastly, we paste the bird back onto the background. See Figure 9 for an ablation of the parameter $\\epsilon$ of the attack. It is non-trivial how to (approximately) find the worst perturbation. We find an approximate solution by searching over all perturbations with increments of size $\\epsilon_{\\mathrm{te}} / K_{\\mathrm{max}}$ . Denote by seg, the segmentation profile of the image $x$ . We consider all perturbed images in the form of", + "bbox": [ + 169, + 345, + 823, + 472 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\nx _ {p e r t} = (1 - \\operatorname {s e g}) x + \\operatorname {s e g} \\left(x + \\epsilon \\frac {K}{K _ {\\max}} 1 _ {2 5 5 \\times 2 5 5}\\right), K \\in [ - K _ {\\max }, K _ {\\max } ].\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 478, + 740, + 510 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "During training time we set $K_{max} = 16$ and therefore search over 33 possible images. During test time we search over 65 images ( $K_{max} = 32$ ).", + "bbox": [ + 169, + 515, + 823, + 545 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.5 EARLY STOPPING", + "text_level": 1, + "bbox": [ + 171, + 560, + 339, + 574 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In all our experiments on the Waterbirds dataset, a parameter search lead to an optimal weight-decay and learning rate of $10^{-4}$ and 0.006 respectively. Another common regularization technique is early stopping, where one stops training on the epoch where the classifier achieves minimal robust error on a hold-out dataset. To understand if early stopping can mitigate the effect of adversarial training aggregating robust generalization in comparison to standard training, we perform the following experiment. On the Waterbirds dataset of size $n = 20$ and considering the adversarial illumination attack, we compare standard training with early stopping and adversarial training $(\\epsilon_{\\mathrm{tr}} = \\epsilon_{\\mathrm{te}} = 0.3)$ with early stopping. Considering several independent experiments, early stopped adversarial training has an average robust error of 33.5 a early stopped standard training 29.1. Hence, early stopping does decrease the robust error gap, but does not close it.", + "bbox": [ + 169, + 585, + 823, + 726 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.6 ERROR DECOMPOSITION WITH INCREASING $n$", + "text_level": 1, + "bbox": [ + 171, + 743, + 534, + 757 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In Figure 10a and 11a, we see that adversarial training hurts robust generalization in the small sample size regime. For completeness, we plot the robust error composition for adversarial and standard training in Figure 10. We see that in the low sample size regime, the drop in susceptibility that adversarial training achieves in comparison to standard training, is much lower than the increase in standard error. Conversely, in the high sample regime, the drop of susceptibility from adversarial training over standard training is much bigger than the increase in standard error.", + "bbox": [ + 169, + 768, + 823, + 853 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.7 DIFFERENT ARCHITECTURES", + "text_level": 1, + "bbox": [ + 171, + 869, + 419, + 883 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "For completeness, we also performed similar experiments on the waterbirds dataset using the adversarial illumination attack with different network architectures as with the pretrained ResNet50", + "bbox": [ + 169, + 895, + 825, + 924 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/1fa6f9a850b1bbc638d3bfaf586a74d1012c61196e4406d743a533a694153e24.jpg", + "image_caption": [ + "(a) Robust error" + ], + "image_footnote": [], + "bbox": [ + 183, + 99, + 390, + 196 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/97732f1c42e0117a6f40b172ac296a32fcc2bedddc50450cbf46cff242faf72d.jpg", + "image_caption": [ + "(b) Standard error", + "Figure 11: The robust error decomposition of the experiments depicted in Figure 6. The plots depict the mean and standard deviation of the mean over several independent experiments. We see that, in comparison to standard training, the reduction in susceptibility for adversarial training is minimal in the low sample size regime. Moreover, the increase in standard error of adversarial training is quite severe, leading to an overall increase in robust error in the low sample size regime." + ], + "image_footnote": [], + "bbox": [ + 395, + 99, + 599, + 198 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/4f86b66d0ee15c4dc58a455fbc261eda21a0b72a5331db55da3b0fc8a4a05f8e.jpg", + "image_caption": [ + "(c) Susceptibility" + ], + "image_footnote": [], + "bbox": [ + 607, + 102, + 812, + 196 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "network. In particular, we considered the following pretrained network architectures: VGG19 and Densenet121. See Figure 12 for the results. We observe that accuracies, adversarial training hurts in the low sample size regime, but helps when enough data is available.", + "bbox": [ + 169, + 321, + 823, + 364 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/c2545b9ee64e06e61529db60614c35b0feef3a5b2579bf760b328868e3263af0.jpg", + "image_caption": [ + "(a) VGG19", + "Figure 12: The robust error of adversarial training and standard training with increasing sample size using the adversarial illumination attack with $\\epsilon_{\\mathrm{te}} = 0.3$ . We depict the mean and the standard deviation of the mean for multiple runs. Observe that across models, adversarial training hurts in the low sample size regime, but helps when enough samples are available." + ], + "image_footnote": [], + "bbox": [ + 236, + 376, + 496, + 500 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/4a1d525f26b278bc0cc15364a502d8f4b8a3789e35eb9f03eeb528ad3d61a44f.jpg", + "image_caption": [ + "(b) Densenet121" + ], + "image_footnote": [], + "bbox": [ + 501, + 376, + 759, + 500 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "D.8 UNDIRECTED ATTACKS ON THE WATERBIRDS DATASET", + "text_level": 1, + "bbox": [ + 171, + 609, + 598, + 623 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In this section, we analyse adversarial training for $\\ell_2$ -and $\\ell_{\\infty}$ -ball perturbations in the small sample size regime. We observe that while adversarial training hurts standard generalization, it helps robust generalization.", + "bbox": [ + 169, + 636, + 823, + 678 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Adversarial training with $\\ell_2$ -balls We train and test with small $\\ell_2$ -balls, $\\epsilon_{\\mathrm{te}} = 0.2$ , such that the networks trained with standard training achieve a non-zero robust accuracy and the networks trained with adversarial training achieve non-trivial standard accuracy. We see in Figure 13, that adversarial training with $\\ell_2$ -balls hurts standard generalization while increasing robust generalization. Moreover, in Figure 14, we see that also in the very small sample size regime, adversarial training with increasing $\\epsilon_{\\mathrm{tr}}$ increases the standard error, but reduces the susceptibility.", + "bbox": [ + 169, + 695, + 826, + 779 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Adversarial training with $\\ell_{\\infty}$ -balls We also consider $\\ell_{\\infty}$ -ball perturbation. We see in Figure 15 that even the smallest perturbation budget $\\epsilon_{\\mathrm{te}} = \\frac{2}{255}$ , standard training has robust error of 100 percent. On the other hand, adversarial training achieves low, but non-zero robust error.", + "bbox": [ + 169, + 795, + 826, + 838 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Experimental details We use an ImageNet pretrained ResNet34 and train for 300 epochs. Moreover, for reliable robust error and susceptibility evaluation of the attacks we use AutoAttack Croce & Hein (2020). All networks were trained such that the network interpolates the training dataset and has low robust error with non-trivial standard error. For the networks trained using standard training we use a learning rate of 0.006 and for the networks trained with adversarial training we used a learning", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/4813d8c68584c6fd6f0d091879855b4edf8e67863a56e9c420fecb0e9845185d.jpg", + "image_caption": [ + "(a) Robust error" + ], + "image_footnote": [], + "bbox": [ + 183, + 99, + 390, + 196 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/2bf552c9eac55f28ee8b4c7e6b56d38077f2fe4ec3e53879d3f3f376fb952996.jpg", + "image_caption": [ + "(b) Standard error" + ], + "image_footnote": [], + "bbox": [ + 395, + 99, + 602, + 198 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/612ea7ac44cfeb101c06298a654da19a8af7b97a6933a613ea07a3d154a71d38.jpg", + "image_caption": [ + "(c) Susceptibility" + ], + "image_footnote": [], + "bbox": [ + 607, + 99, + 813, + 198 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/0bebaf17241c57e738fd9ae7f28a94ccdfccb03e957d2362bfbbbf5c4f468af2.jpg", + "image_caption": [ + "Figure 13: The robust error decomposition of adversarial training with $\\ell_2$ -balls of size $\\epsilon_{\\mathrm{tr}} = 0.2$ and test adversaries with $\\ell_2$ -balls of size $\\epsilon_{\\mathrm{te}} = 0.2$ . The plots depict the mean and standard deviation of the mean over several independent experiments. We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers." + ], + "image_footnote": [], + "bbox": [ + 395, + 300, + 604, + 398 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/9650e61ba70fd559ddb12196d5584aa791956547d67ee4f0ccf561562bdbebd1.jpg", + "image_caption": [ + "Figure 14: The robust error decomposition of adversarial training in function of $\\epsilon_{\\mathrm{tr}}$ in the small sample size regime $n = 20$ . We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers with increasing $\\epsilon_{\\mathrm{tr}}$ . We take $n = 20$ and consider test adversaries with $\\ell_2$ -balls of size $\\epsilon_{\\mathrm{te}} = 0.2$ . The plots depict the mean and standard deviation of the mean over several independent experiments.", + "(a) Robust error", + "Figure 15: The robust error decomposition of adversarial training with $\\ell_{\\infty}$ -balls of size $\\epsilon_{\\mathrm{tr}} = \\frac{2}{255}$ and test adversaries with $\\ell_{\\infty}$ -balls of size $\\epsilon_{\\mathrm{te}} = \\frac{2}{255}$ . The plots depict the mean and standard deviation of the mean over several independent experiments. We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers." + ], + "image_footnote": [], + "bbox": [ + 183, + 494, + 390, + 593 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/50d23d820785208d59d85c8b949f3c9a2b3001572c97f9839ac28daad5a20dd6.jpg", + "image_caption": [ + "(b) Standard error" + ], + "image_footnote": [], + "bbox": [ + 395, + 494, + 601, + 592 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/d94a65abce4a6df74a6e0c2e27cda3fa545e574980cce73fe85a371e9d165da7.jpg", + "image_caption": [ + "(c) Susceptibility" + ], + "image_footnote": [], + "bbox": [ + 607, + 494, + 813, + 592 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "rate of $5 \\cdot 10^{-4}$ . We also trained with a weight decay of $10^{-4}$ , a batch size of 8 and a momentum of 0.9 for all networks. We train at least 3 networks for all settings and report the mean and standard deviation of the mean of the standard error, robust error and susceptibility over the three runs.", + "bbox": [ + 169, + 710, + 823, + 753 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "EXPERIMENTAL DETAILS ON CIFAR-10", + "text_level": 1, + "bbox": [ + 171, + 780, + 542, + 796 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In this section, we give the experimental details on the CIFAR-10-based experiments shown in Figures 1 and 17.", + "bbox": [ + 169, + 816, + 823, + 845 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Subsampling CIFAR-10 In all our experiments we subsample CIFAR-10 to simulate the low sample size regime. We ensure that for all subsampled versions the number of samples of each class are equal. Hence, if we subsample to 500 training images, then each class has exactly 50 images, which are drawn uniformly from the $5k$ training images of the respective class.", + "bbox": [ + 169, + 867, + 825, + 925 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Mask perturbation on CIFAR-10 On CIFAR-10, we consider the square black mask attack where the adversary can mask a square in the image of size $\\epsilon_{\\mathrm{te}} \\times \\epsilon_{\\mathrm{te}}$ by setting the pixel values zero. To ensure that the mask cannot cover all the information about the true class in the image, we restrict the size of the masks to be at most $2 \\times 2$ , while allowing for all possible locations of the mask in the targeted image. For exact robust error evaluation, we perform a full grid search over all possible locations during test time. We show an example of a black-mask attack on each of the classes in CIFAR-10 in Figure 16.", + "bbox": [ + 169, + 103, + 826, + 202 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "During training, a full grid search is computationally intractable so that we use an approximate attack similar to Wu et al. (2020) during training time: by identifying the $K = 16$ most promising mask locations with a heuristic as follows. First, we identify promising mask locations by analyzing the gradient, $\\nabla_{x}L(f_{\\theta}(x),y)$ , of the cross-entropy loss with respect to the input. Masks that cover part of the image where the gradient is large, are more likely to increase the loss. Hence, we compute the $K$ mask locations $(i,j)$ , where $\\| \\nabla_{x}L(f_{\\theta}(x),y)_{[i:i + 2,j:j + 2]}\\| _1$ is the largest and take using a full list-search the mask that incurs the highest loss. Our intuition from the theory predicts that higher $K$ , and hence a more exact \"defense\", only increases the robust error of adversarial training, since the mask could then more efficiently cover important information about the class.", + "bbox": [ + 169, + 208, + 826, + 335 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/8f3c05007626d1ca15fe61a8198f820712a7ea9791907a7525cb5d4a8fec1bb8.jpg", + "image_caption": [ + "Figure 16: We show an example of a mask perturbation for all 10 classes of CIFAR-10. Even though the attack occludes part of the images, a human can still easily classify all images correctly." + ], + "image_footnote": [], + "bbox": [ + 236, + 344, + 759, + 386 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Experimental training details For all our experiments on CIFAR-10, we adjusted the code provided by Phan (2021). As typically done for CIFAR-10, we augment the data with random cropping and horizontal flipping. For the experiments with results depicted in Figures 1 and 17, we use a ResNet18 network and train for 100 epochs. We tune the parameters learning rate and weight decay for low robust error. For standard standard training, we use a learning rate of 0.01 with equal weight decay. For adversarial training, we use a learning rate of 0.015 and a weight decay of $10^{-4}$ . We run each experiment three times for every dataset with different initialization seeds, and plot the average and standard deviation over the runs.", + "bbox": [ + 169, + 445, + 552, + 627 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "For the experiments in Figure 1 and 18 we use an attack strength of $K = 4$ . Recall that we perform a full grid search at test time and hence have a good approximation of the robust accuracy and susceptibility score.", + "bbox": [ + 169, + 632, + 550, + 690 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/962a80bd4b81e3e400f347ea5d098f7e0eb92be644e217c37d36dc6863740e53.jpg", + "image_caption": [ + "Figure 17: The robust error decomposition in standard error and susceptibility for varying attack strengths $K$ . We see that the larger $K$ , the lower the susceptibility, but the higher the standard error." + ], + "image_footnote": [], + "bbox": [ + 562, + 460, + 821, + 583 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Increasing training attack strength We investigate the influence of the attack strength $K$ on the robust error for adversarial training. We take $\\epsilon_{\\mathrm{tr}} = 2$ and $n = 500$ and vary $K$ . The results are depicted in Figure 17. We see that for increasing $K$ , the susceptibility decreases, but the standard error increases more severely, resulting in an increasing robust error.", + "bbox": [ + 169, + 703, + 823, + 762 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Robust error decomposition In Figure 1, we see that the robust error increases for adversarial training compared to standard training in the low sample size regime, but the opposite holds when enough samples are available. For completeness, we provide a full decomposition of the robust error in standard error and susceptibility for standard and adversarial training. We plot the decomposition in Figure 18.", + "bbox": [ + 169, + 773, + 825, + 845 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "F STATIC HAND GESTURE RECOGNITION", + "text_level": 1, + "bbox": [ + 171, + 864, + 526, + 878 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The goal of static hand gesture or posture recognition is to recognize hand gestures such as a pointing index finger or the okay-sign based on static data such as images Oudah et al. (2020); Yang et al.", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "(2013). The current use of hand gesture recognition is primarily in the interaction between computers and humans Oudah et al. (2020). More specifically, typical practical applications can be found in the environment of games, assisted living, and virtual reality Mujahid et al. (2021). In the following, we conduct experiments on a hand gesture recognition dataset constructed by Mantecón et al. (2019), which consists of near-infrared stereo images obtained using the Leap Motion device. First, we crop or segment the images after which we use logistic regression for classification. We see that adversarial logistic regression deteriorates robust generalization with increasing $\\epsilon_{\\mathrm{tr}}$ .", + "bbox": [ + 169, + 103, + 826, + 203 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Static hand-gesture dataset We use the dataset made available by Mantecón et al. (2019). This dataset consists of near-infrared stereo images taken with the Leap Motion device and provides detailed skeleton data. We base our analysis on the images only. The size of the images is $640 \\times 240$ pixels. The dataset consists of 16 classes of hand poses taken by 25 different people. We note that the variety between the different people is relatively wide; there are men and women with different posture and hand sizes. However, the different samples taken by the same person are alike.", + "bbox": [ + 169, + 220, + 823, + 305 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "We consider binary classification between the index-pose and L-pose, and take as a training set 30 images of the users 16 to 25. This results in a training dataset of 300 samples. We show two examples of the training dataset in Figure 19, each corresponding to a different class. Observe that the near-infrared images darken the background and successfully highlight the hand-pose. As a test dataset, we take 10 images of each of the two classes from the users 1 to 10 resulting in a test dataset of size 200.", + "bbox": [ + 169, + 311, + 823, + 393 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "**Cropping the dataset** To speed up training and ease the classification problem, we crop the images from a size of $640 \\times 240$ to a size of $200 \\times 200$ . We crop the images using a basic image segmentation technique to stay as close as possible to real-world applications. The aim is to crop the images such that the hand gesture is centered within the cropped image.", + "bbox": [ + 169, + 415, + 823, + 470 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "For every user in the training set, we crop an image of the L-pose and the index pose by hand. We call these images the training masks $\\{\\mathrm{masks}_i\\}_{i=1}^{20}$ . We note that the more a particular window of an image resembles a mask, the more likely that the window captures the hand gesture correctly. Moreover, the near-infrared images are such that the hands of a person are brighter than the surroundings of the person itself. Based on these two observations, we define the best segment or window, defined by the upper left coordinates $(i,j)$ , for an image $x$ as the solution to the following optimization problem:", + "bbox": [ + 169, + 477, + 823, + 561 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {i \\in [ 4 4 0 ], j \\in [ 4 0 ]} {\\arg \\min } \\sum_ {l = 1} ^ {2 0} \\| \\operatorname {m a s k s} _ {l} - x _ {\\{i: i + 2 0 0, j: j + 2 0 0 \\}} \\| _ {2} ^ {2} - \\frac {1}{2} \\| x _ {\\{i + w, j + h \\}} \\| _ {1}. \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 585, + 823, + 626 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Equation 34 is solved using a full grid search. We use the result to crop both training and test images. Upon manual inspection of the cropped images, close to all images were perfectly cropped. We replace the handful poorly cropped training images with hand-cropped counterparts.", + "bbox": [ + 169, + 635, + 826, + 678 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Square-mask perturbations Since we use logistic regression, we perform a full grid search to find the best adversarial perturbation at training and test time. For completeness, the upper left coordinates", + "bbox": [ + 169, + 696, + 823, + 726 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/4502c1bf228648f40c15268a303921271876dbb44477d6fcaac2679eca5b0995.jpg", + "image_caption": [ + "(a) Robust error" + ], + "image_footnote": [], + "bbox": [ + 184, + 758, + 387, + 849 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/38e471ba42dd37e14e3a8db65922e6c39a88c3e2c12371f52024872c3b996834.jpg", + "image_caption": [ + "(b) Standard error", + "Figure 18: The robust error decomposition in standard error and susceptibility of the subsampled datasets of CIFAR-10 after adversarial and standard training. For small sample size, adversarial training has higher robust error then standard training." + ], + "image_footnote": [], + "bbox": [ + 395, + 758, + 599, + 849 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/33c9a5b0e9ca0d43691b68e683d8fe948c15802f0bf95bc6a8e39725fdb9dcd8.jpg", + "image_caption": [ + "(c) Susceptibility" + ], + "image_footnote": [], + "bbox": [ + 604, + 752, + 810, + 849 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/ad02e8b4c078764e1ca3fd16302c513719a6c229139042de1270d9e805555f92.jpg", + "image_caption": [ + "(a) L pose" + ], + "image_footnote": [], + "bbox": [ + 178, + 99, + 436, + 233 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/1f696bdcbdb4525ecbc4b630198f092d4ac936b3045ca4db67441f2d59f0637d.jpg", + "image_caption": [ + "(b) Index pose" + ], + "image_footnote": [], + "bbox": [ + 498, + 99, + 756, + 233 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/5f5121a185f800d9707d726c94c5020fec1d2515474efd7d4b24c21c092db34f.jpg", + "image_caption": [ + "Figure 19: Examples of the original images of the considered hand-gestures. We recognize the \"L\"-sign in Figure 19a and the index sign in Figure 19b. Observe that the near-infrared images highlight the hand pose well and blends out much of the non-useful or noisy background.", + "(a) Cropped L pose", + "Figure 20: Examples of the cropped hand-gesture images. We see that the hands are centered and the images have a size of $200 \\times 200$ . In Figure 20c we show an example of the square black-mask perturbation." + ], + "image_footnote": [], + "bbox": [ + 210, + 316, + 377, + 445 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/2b48acfec7de0b523a300cf6494ea3830a1e9c2682c157b0c206d41136172bb1.jpg", + "image_caption": [ + "(b) Cropped index pose" + ], + "image_footnote": [], + "bbox": [ + 415, + 316, + 581, + 444 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/cf12fa372fc46b7868a4a707ff18f18ec6208d99c94c4a6883f7601f03b44ae6.jpg", + "image_caption": [ + "(c) Black-mask perturbation" + ], + "image_footnote": [], + "bbox": [ + 620, + 316, + 785, + 443 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "of the optimal black-mask perturbation of size $\\epsilon_{\\mathrm{tr}} \\times \\epsilon_{\\mathrm{tr}}$ can be found as the solution to", + "bbox": [ + 169, + 527, + 736, + 544 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\arg \\max _ {i \\in [ 2 0 0 - \\epsilon_ {\\mathrm {t r}} ], j \\in [ 2 0 0 - \\epsilon_ {\\mathrm {t r}} ]} \\sum_ {l, m \\in [ \\epsilon_ {\\mathrm {t r}} ]} \\theta_ {[ i: i + l, j: j + m ]}. \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 343, + 551, + 823, + 585 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "The algorithm is rather slow as we iterate over all possible windows. We show a black-mask perturbation on an $L$ -pose image in Figure 20c.", + "bbox": [ + 169, + 592, + 823, + 621 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Results We run adversarial logistic regression with square-mask perturbations on the cropped dataset and vary the adversarial training budget and plot the result in Figure 21. We observe attack that adversarial logistic regression deteriorates robust generalization.", + "bbox": [ + 169, + 636, + 823, + 679 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Because we use adversarial logistic regression, we are able to visualize the classifier. Given the classifier induced by $\\theta$ , we can visualize how it classifies the images by plotting $\\frac{\\theta - \\min_{i\\in[d]}\\theta_{[i]}}{\\max_{i\\in[d]}\\theta_{[i]}}\\in [0,1]^d$ . Recall that the class-prediction of our predictor for a data point $(x,y)$ is given by $\\mathrm{sign}(\\theta^{\\top}x)\\in \\{\\pm 1\\}$ . The lighter parts of the resulting image correspond to the class with label 1 and the darker patches with the class corresponding to label $-1$ .", + "bbox": [ + 169, + 685, + 823, + 765 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We plot the classifiers obtained by standard logistic regression and adversarial logistic regression with training adversarial budgets $\\epsilon_{\\mathrm{tr}}$ of 10 and 25 in Figure 22. The darker parts in the classifier correspond to patches that are typically bright for the $L$ -pose. Complementary, the lighter patches in the classifier correspond to patches that are typically bright for the index pose. We see that in the case of adversarial logistic regression, the background noise is much higher than for standard logistic regression. In other words, adversarial logistic regression puts more weight on non-signal parts in the images to classify the", + "bbox": [ + 169, + 771, + 550, + 925 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/43ce2a07cf4257b4a504149373536b2d67bd2154d79ebfc09d771c9ee0b09f31.jpg", + "image_caption": [ + "Figure 21: The standard error and robust error for varying adversarial training budget $\\epsilon_{\\mathrm{tr}}$ . We see that the larger $\\epsilon_{\\mathrm{tr}}$ the higher the robust error." + ], + "image_footnote": [], + "bbox": [ + 563, + 787, + 821, + 909 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "training dataset and hence exhibits worse performance on the test dataset.", + "bbox": [ + 169, + 104, + 550, + 132 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/6e7a5f7e308b622e6cd348122f3d35085268ab1cd1c252c0673785d1da3282bd.jpg", + "image_caption": [ + "(a) $\\epsilon_{\\mathrm{tr}} = 0$" + ], + "image_footnote": [], + "bbox": [ + 210, + 142, + 379, + 268 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/41bbadd545f0831fc85c2431e3c32f8936ddf77a80b40627dd559a109c253ed3.jpg", + "image_caption": [ + "(b) $\\epsilon_{\\mathrm{tr}} = 10$", + "Figure 22: We visualize the logistic regression solutions. In Figure 22a we plot the vector that induces the classifier obtained after standard training. In Figure 22b and Figure 22c we plot the vector obtained after training with square-mask perturbations of size 10 and 25, respectively. We note the non-signal enhanced background correlations at the parts highlighted with the red circles in the image projection of the adversarially trained classifiers." + ], + "image_footnote": [], + "bbox": [ + 415, + 142, + 581, + 268 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/7b392b5b988645f9de0e728e4d94808ddd76150a02d046121f5f86949513ebda.jpg", + "image_caption": [ + "(c) $\\epsilon_{\\mathrm{tr}} = 25$" + ], + "image_footnote": [], + "bbox": [ + 622, + 142, + 787, + 268 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 28 + } +] \ No newline at end of file diff --git a/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_model.json b/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_model.json new file mode 100644 index 0000000000000000000000000000000000000000..303f6ff60a39c25aa5b8fdb5b2ae34a28c597f1f --- /dev/null +++ b/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_model.json @@ -0,0 +1,6176 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.681, + 0.148 + ], + "angle": 0, + "content": "WHY ADVERSARIAL TRAINING CAN HURT ROBUST ACCURACY" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.169, + 0.525, + 0.185 + ], + "angle": 0, + "content": "Jacob Clarysse1, Julia Hörrmann2, Fanny Yang1" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.186, + 0.51, + 0.199 + ], + "angle": 0, + "content": "1. Department of Computer Science, ETH Zürich" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.199, + 0.475, + 0.212 + ], + "angle": 0, + "content": "2. Department of Mathematics, ETH Zürich" + }, + { + "type": "list", + "bbox": [ + 0.184, + 0.186, + 0.51, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.213, + 0.551, + 0.227 + ], + "angle": 0, + "content": "{jacob.clarysse;fan.yang}@inf.ethz.ch;" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.227, + 0.526, + 0.241 + ], + "angle": 0, + "content": "{julia.hoerrmann}@stat.math.ethz.ch" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.277, + 0.548, + 0.292 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.306, + 0.771, + 0.448 + ], + "angle": 0, + "content": "Machine learning classifiers with high test accuracy often perform poorly under adversarial perturbations. It is commonly believed that adversarial training alleviates this issue. In this paper, we demonstrate that, surprisingly, the opposite can be true for a natural class of perceptible perturbations — even though adversarial training helps when enough data is available, it may in fact hurt robust generalization in the small sample size regime. We first prove this phenomenon for a high-dimensional linear classification setting with noiseless observations. Using intuitive insights from the proof, we could find perturbations on standard image datasets for which this behavior persists. Specifically, it occurs for perceptible perturbations that effectively reduce class information such as object occlusions or corruptions." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.461, + 0.339, + 0.477 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.487, + 0.554, + 0.737 + ], + "angle": 0, + "content": "Today's best-performing classifiers are vulnerable to adversarial attacks Goodfellow et al. (2015); Szegedy et al. (2014) and exhibit high robust error: for many inputs, their predictions change under adversarial perturbations, even though the true class stays the same. Such content-preserving (Gilmer et al., 2018), consistent (Raghunathan et al., 2020) attacks can be either perceptible or imperceptible. For image datasets, most work to date studies imperceptible attacks that are based on perturbations with limited strength or attack budget. These include bounded \\(\\ell_p\\)-norm perturbations (Goodfellow et al., 2015; Madry et al., 2018; Moosavi-Dezfooli et al., 2016), small transformations using image processing techniques (Ghiasi et al., 2019; Zhao et al., 2020; Laidlaw et al., 2021; Luo et al., 2018) or nearby samples on the data manifold (Lin et al., 2020; Zhou et al., 2020). Even though they do not visibly change the image by definition, imperceptible attacks can often successfully fool a learned classifier." + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.495, + 0.825, + 0.606 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.559, + 0.616, + 0.828, + 0.72 + ], + "angle": 0, + "content": "Figure 1: On the Waterbirds dataset attacked by the adversarial illumination attack, adversarial training (yellow) yields higher robust error than standard training (blue) when the sample size is small, even though it helps for large sample sizes and in a setting where the standard error of standard training is small. (see App. D for details)." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.743, + 0.829, + 0.925 + ], + "angle": 0, + "content": "On the other hand, perturbations that naturally occur and are physically realizable are commonly perceptible. Some perceptible perturbations specifically target the object to be recognized: these include occlusions (e.g. stickers placed on traffic signs (Eykholt et al., 2018) or masks of different sizes that cover important features of human faces (Wu et al., 2020)) or corruptions that are caused by the image capturing process (animals that move faster than the shutter speed or objects that are not well-lit, see Figure 2). Others transform the whole image and are not confined to the object itself, such as rotations, translations or corruptions Engstrom et al. (2019); Kang et al. (2019). In this paper, we refer to such perceptible attacks as directed attacks. In contrast to other attacks, they effectively reduce useful class information in the input for any model, without necessarily changing the true label - we say that they are directed and consistent, more formally defined in Section 2. For example, a stop sign with a small sticker could partially cover the text without losing its semantic meaning. Similarly, a flying bird captured with a long exposure time can induce motion blur in the final image without becoming unrecognizable to the observer." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.12, + 0.278, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.205, + 0.193, + 0.266, + 0.205 + ], + "angle": 0, + "content": "(a) Masks" + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.12, + 0.366, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.288, + 0.193, + 0.361, + 0.206 + ], + "angle": 0, + "content": "(b) Original" + }, + { + "type": "image", + "bbox": [ + 0.369, + 0.12, + 0.455, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.193, + 0.448, + 0.206 + ], + "angle": 0, + "content": "(c) Lighting" + }, + { + "type": "image", + "bbox": [ + 0.457, + 0.12, + 0.544, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.475, + 0.193, + 0.525, + 0.206 + ], + "angle": 0, + "content": "(d) Blur" + }, + { + "type": "image", + "bbox": [ + 0.545, + 0.12, + 0.673, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.576, + 0.193, + 0.775, + 0.206 + ], + "angle": 0, + "content": "(e) Classification of perturbations" + }, + { + "type": "image", + "bbox": [ + 0.675, + 0.12, + 0.805, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.218, + 0.825, + 0.271 + ], + "angle": 0, + "content": "Figure 2: Examples of directed attacks on CIFAR10 and the Waterbirds dataset. In Figure 2a, we corrupt the image with a black mask of size \\(2 \\times 2\\) and in Figure 2c and 2d we change the lighting conditions (darkening) and apply motion blur on the bird in the image respectively. All perturbations reduce the information about the class in the images: they are the result of directed attacks. (e) Directed attacks are a subset of perceptible attacks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.825, + 0.325 + ], + "angle": 0, + "content": "In the literature so far, it is widely acknowledged that adversarial training with the same perturbation type and budget as during test time often achieves significantly lower robust error than standard training (Madry et al., 2018; Zhang et al., 2019; Bai et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.331, + 0.826, + 0.373 + ], + "angle": 0, + "content": "In contrast, we show that adversarial training not only increases standard error (Zhang et al., 2019; Tsipras et al., 2019; Stutz et al., 2019; Raghunathan et al., 2020), but surprisingly, in the low sample regime," + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.384, + 0.787, + 0.399 + ], + "angle": 0, + "content": "adversarial training may even increase the robust error compared to standard training!" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.408, + 0.825, + 0.464 + ], + "angle": 0, + "content": "Figure 1 illustrates the main message of our paper on the Waterbirds dataset: Although adversarial training with directed attacks outperforms standard training when enough training samples are available, it is inferior when the sample size is small (but still large enough to obtain a small standard test error)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.471, + 0.391, + 0.485 + ], + "angle": 0, + "content": "Our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.496, + 0.825, + 0.553 + ], + "angle": 0, + "content": "- We prove that, almost surely, adversarially training a linear classifier on separable data yields a monotonically increasing robust error as the perturbation budget grows. We further establish high-probability non-asymptotic lower bounds on the robust error gap between adversarial and standard training." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.556, + 0.825, + 0.584 + ], + "angle": 0, + "content": "- Our proof provides intuition for why this lower bound on the gap is particularly large for directed attacks in the low sample regime." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.588, + 0.825, + 0.63 + ], + "angle": 0, + "content": "- We observe empirically for different directed attacks on real-world image datasets that this behavior persists: adversarial training for directed attacks hurts robust accuracy when the sample size is small." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.496, + 0.825, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.65, + 0.419, + 0.665 + ], + "angle": 0, + "content": "2 ROBUST CLASSIFICATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.825, + 0.71 + ], + "angle": 0, + "content": "We first introduce our robust classification setting more formally by defining the notions of adversarial robustness, directed attacks and adversarial training used throughout the paper." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.717, + 0.825, + 0.789 + ], + "angle": 0, + "content": "Adversarially robust classifiers For inputs \\(x \\in \\mathbb{R}^d\\), we consider multi-class classifiers associated with parameterized functions \\(f_{\\theta}: \\mathbb{R}^d \\to \\mathbb{R}^K\\) if \\(K > 2\\) and \\(f_{\\theta}: \\mathbb{R}^d \\to \\mathbb{R}\\) if \\(K = 2\\), where \\(K\\) is the number of labels. For example, \\(f_{\\theta}(x)\\) could be a linear model (as in Section 3) or a neural network (as in Section 4). The output label predictions are obtained by \\(h(f_{\\theta}(x)) = \\mathrm{sign}(f_{\\theta}(x))\\) for \\(K = 2\\) and \\(h(f_{\\theta}(x)) = \\arg \\max_{k \\in \\{1, \\dots, K\\}} f_{\\theta}(x)_k\\) for \\(K > 2\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.826, + 0.864 + ], + "angle": 0, + "content": "In order to convince practitioners to use machine learning models in the wild, it is key to demonstrate that they exhibit robustness. One kind of robustness is that they do not change prediction when the input is subject to consistent perturbations, which are small class-preserving perturbations. Mathematically speaking, for the underlying joint data distribution \\(\\mathbb{P}\\), the model should have a small \\(\\epsilon_{te}\\)-robust error, defined as" + }, + { + "type": "equation", + "bbox": [ + 0.345, + 0.867, + 0.825, + 0.891 + ], + "angle": 0, + "content": "\\[\n\\operatorname {E r r} (\\theta ; \\epsilon_ {\\mathrm {t e}}) := \\mathbb {E} _ {(x, y) \\sim \\mathbb {P}} \\max _ {x ^ {\\prime} \\in T (x; \\epsilon_ {\\mathrm {t e}})} \\ell \\left(f _ {\\theta} \\left(x ^ {\\prime}\\right), y\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "where \\(\\ell\\) is 0 if the class determined by \\(h(f_{\\theta}(x))\\) is equal to \\(y\\) and 1 otherwise. Further, \\(T(x;\\epsilon_{te})\\) indicates a perturbation set around \\(x\\) of a certain transformation type with size \\(\\epsilon_{test}\\). Note that" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "the (standard) error \\(\\mathbb{E}_{(x,y)\\sim \\mathbb{P}}\\ell (f_{\\theta}(x),y)\\) of a classifier corresponds to \\(\\mathrm{Err}(\\theta ;0)\\) - the robust error evaluated at \\(\\epsilon_{\\mathrm{te}} = 0\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.141, + 0.827, + 0.239 + ], + "angle": 0, + "content": "Directed attacks The inner maximization in Equation 1 is often called the adversarial attack of the input \\( x \\) for the model \\( f_{\\theta} \\) and the corresponding solution is referred to as the adversarial example. In this paper, we consider directed attacks that effectively reduce the information about the true classes, with image-based examples depicted in Figure 2. For linear classification, we analyze directed attacks in the form of additive perturbations that are constrained to the direction of the optimal decision boundary (see details in Section 3.1). In particular, note that the set of directed perturbations is restricted to directions attacking the Bayes optimal classifier." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.247, + 0.825, + 0.276 + ], + "angle": 0, + "content": "Adversarial training A common approach to obtain classifiers with a good robust accuracy is to minimize the training objective \\(\\mathcal{L}_{\\epsilon_{\\mathrm{tr}}}\\) with a surrogate robust classification loss \\(L\\)" + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.28, + 0.825, + 0.318 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\epsilon_ {\\mathrm {t r}}} (\\theta) := \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\max _ {x _ {i} ^ {\\prime} \\in T \\left(x _ {i}; \\epsilon_ {\\mathrm {t r}}\\right)} L \\left(f _ {\\theta} \\left(x _ {i} ^ {\\prime}\\right) y _ {i}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.322, + 0.826, + 0.421 + ], + "angle": 0, + "content": "also called adversarial training. In practice, we often use the cross entropy loss \\( L(z) = \\log (1 + e^{-z}) \\) and minimize the robust objective by using first order optimization methods such as (stochastic) gradient descent. SGD is also the algorithm that we focus on in both the theoretical and experimental sections. When the desired type of robustness is known in advance, it is standard practice to use the same perturbation set for training as for testing, i.e. \\( T(x;\\epsilon_{\\mathrm{tr}}) = T(x;\\epsilon_{\\mathrm{te}}) \\). For example, Madry et al. (2018) show that the robust error sharply increases for \\( \\epsilon_{\\mathrm{tr}} < \\epsilon_{\\mathrm{te}} \\). In this paper, we demonstrate that for directed attacks in the small sample size regime, in fact, the opposite is true." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.44, + 0.406, + 0.455 + ], + "angle": 0, + "content": "3 THEORETICAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.47, + 0.825, + 0.514 + ], + "angle": 0, + "content": "In this section, we prove for linear functions \\( f_{\\theta}(x) = \\theta^{\\top}x \\) that in the case of directed attacks, robust generalization deteriorates with increasing \\( \\epsilon_{\\mathrm{tr}} \\). The proof, albeit in a simple setting, provides explanations for why adversarial training fails in the high-dimensional regime for such attacks." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.529, + 0.279, + 0.542 + ], + "angle": 0, + "content": "3.1 SETTING" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.555, + 0.658, + 0.57 + ], + "angle": 0, + "content": "We now introduce the precise linear setting used in our theoretical results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Data model We assume that the ground truth and hypothesis class are given by linear functions \\( f_{\\theta}(x) = \\theta^{\\top}x \\) and the sample size \\( n \\) is lower than the ambient dimension \\( d \\) minus one. The generative distribution \\( \\mathbb{P}_r \\) is similar to (Tsipras et al., 2019; Nagarajan & Kolter, 2019): The label \\( y \\in \\{+1, -1\\} \\) is drawn with equal probability and the covariate vector is sampled as \\( x = [y_{\\frac{r}{2}}, \\tilde{x}] \\) with the random vector \\( \\tilde{x} \\in \\mathbb{R}^{d-1} \\) drawn from a standard normal distribution, i.e. \\( \\tilde{x} \\sim \\mathcal{N}(0, \\sigma^2 I_{d-1}) \\). We would like to learn a classifier that has low robust error by using a dataset \\( D = (x_i, y_i)_{i=1}^n \\) with \\( n \\) i.i.d. samples from \\( \\mathbb{P}_r \\). Intuitively, the separation distance \\( r \\) reflects the signal strength of the data distribution." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.827, + 0.755 + ], + "angle": 0, + "content": "Notice that the distribution \\(\\mathbb{P}_r\\) is noiseless: for a given input \\(x\\), the label \\(y = \\mathrm{sign}(x_{[1]})\\) is deterministic. Further, the Bayes optimal linear classifier (also referred to as the ground truth) is parameterized by the first standard coordinate vector, \\(\\theta^{\\star} = e_1\\). By definition, the ground truth is robust against all perturbations that do not change the sign in the first coordinate of the sample, i.e. consistent perturbations, and hence so is the optimal robust classifier." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.763, + 0.825, + 0.805 + ], + "angle": 0, + "content": "Directed attacks In this paper, we focus on consistent directed attacks that by definition efficiently concentrate their attack budget to reduce the class information. For our linear setting this information lies in the first entry. Hence, we can model such attacks by additive perturbations in the first dimension" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.808, + 0.825, + 0.825 + ], + "angle": 0, + "content": "\\[\nT (x; \\epsilon) = \\left\\{x ^ {\\prime} = x + \\delta \\mid \\delta = \\beta e _ {1} \\text {a n d} - \\epsilon \\leq \\beta \\leq \\epsilon \\right\\}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.825, + 0.883 + ], + "angle": 0, + "content": "Note that this attack is always in the direction of the signal dimension, i.e. the Bayes optimal classifier or equivalently the ground truth. Furthermore, when \\(\\epsilon < \\frac{r}{2}\\), it is a consistent directed attack. Observe how this is different from \\(\\ell_p\\)-attacks — an \\(\\ell_p\\) attack, depending on the model, may add a perturbation that only has a very small component in the signal direction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.885, + 0.826, + 0.925 + ], + "angle": 0, + "content": "1Note that the result more generally holds for non-sparse models that are not axis aligned by way of a simple rotation \\( z = Ux \\). In that case the distribution is characterized by \\( \\theta^{\\star} = u_{1} \\), where \\( u_{1} \\) is the first column vector of \\( U \\), and a rotated Gaussian in the \\( d - 1 \\) dimensions orthogonal to \\( \\theta^{\\star} \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.203, + 0.101, + 0.398, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.101, + 0.597, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.101, + 0.794, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.202, + 0.198, + 0.796, + 0.213 + ], + "angle": 0, + "content": "(a) Robust error increase with \\(\\epsilon_{\\mathrm{tr}}\\) (b) Standard-adversarial training (c) Effect of overparameterization" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.224, + 0.825, + 0.289 + ], + "angle": 0, + "content": "Figure 3: Experimental verification of Theorem 3.1. (a) We set \\( d = 1000 \\), \\( r = 12 \\) and \\( n = 50 \\). The robust error gap between standard and adversarial training as a function of the adversarial budget \\( \\epsilon_{\\mathrm{tr}} = 5 \\) independent experiments (blue) and the lower bound given in Theorem 3.1 (gray). In (b) and (c), we set \\( d = 10000 \\) and vary the number of samples \\( n \\). (b) The robust error of standard and adversarial training with \\( \\epsilon_{\\mathrm{tr}} = 4.5 \\). (c) The error gap and the lower bound of Theorem 3.1. For more experimental details see Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.296, + 0.825, + 0.381 + ], + "angle": 0, + "content": "Robust max-\\(\\ell_2\\)-margin classifier We study a classifier that is the solution of running gradient descent on the adversarial logistic loss. A long line of work (Soudry et al., 2018; Ji & Telgarsky, 2019; Chizat & Bach, 2020; Nacson et al., 2019; Liu et al., 2020) studies the implicit bias of (S)GD on the (standard) logistic loss and separable data. In particular, they show directional convergence to the max-margin solution. For the adversarial logistic loss and linear models in particular, (S)GD converges to the robust max-\\(\\ell_2\\)-margin solution (Li et al., 2020)," + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.387, + 0.825, + 0.417 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ], x _ {i} ^ {\\prime} \\in T (x _ {i}; \\epsilon_ {\\mathrm {t r}})} {\\min } y _ {i} \\theta^ {\\top} x _ {i} ^ {\\prime}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.424, + 0.825, + 0.453 + ], + "angle": 0, + "content": "Even though our result is proven for the max-\\(\\ell_2\\)-margin classifier, it can easily be extended to other interpolators." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.469, + 0.322, + 0.482 + ], + "angle": 0, + "content": "3.2 MAIN RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.494, + 0.825, + 0.524 + ], + "angle": 0, + "content": "We are now ready to characterize the \\(\\epsilon_{\\mathrm{te}}\\)-robust error as a function of \\(\\epsilon_{\\mathrm{tr}}\\), the separation \\(r\\), the dimension \\(d\\) and sample size \\(n\\) of the data. In the theorem statement we use the following quantities" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.529, + 0.68, + 0.571 + ], + "angle": 0, + "content": "\\[\n\\varphi_ {\\min } = \\frac {\\sigma}{r / 2 - \\epsilon_ {\\mathrm {t e}}} \\left(\\sqrt {\\frac {d - 1}{n}} - \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right)\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.573, + 0.68, + 0.614 + ], + "angle": 0, + "content": "\\[\n\\varphi_ {\\max } = \\frac {\\sigma}{r / 2 - \\epsilon_ {\\mathrm {t e}}} \\left(\\sqrt {\\frac {d - 1}{n}} + \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right)\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.619, + 0.825, + 0.651 + ], + "angle": 0, + "content": "that arise from concentration bounds for the singular values of the random data matrix. Further, let \\(\\tilde{\\epsilon} := \\frac{r}{2} - \\frac{\\varphi_{\\max}}{\\sqrt{2}}\\) and denote by \\(\\Phi\\) the cumulative distribution function of a standard normal." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.655, + 0.827, + 0.698 + ], + "angle": 0, + "content": "Theorem 3.1. Assume \\( d - 1 > n \\). For test samples from \\( \\mathbb{P}_r \\), perturbation set type \\( T \\) as in Equation 3 and any \\( 0 \\leq \\epsilon_{te} < \\frac{r}{2} \\), the following holds for the \\( \\epsilon_{te} \\)-robust error of the classifier (Equation 1) resulting from \\( \\epsilon_{tr} \\)-adversarial training:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.709, + 0.66, + 0.723 + ], + "angle": 0, + "content": "1. The \\(\\epsilon_{te}\\)-robust error of the \\(\\epsilon_{tr}\\)-robust max-margin estimator reads" + }, + { + "type": "equation", + "bbox": [ + 0.415, + 0.73, + 0.825, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) = \\Phi \\left(- \\frac {\\left(\\frac {r}{2} - \\epsilon_ {t r}\\right)}{\\tilde {\\varphi}}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.777, + 0.825, + 0.806 + ], + "angle": 0, + "content": "for a random quantity \\(\\tilde{\\varphi} > 0\\) depending on \\(\\sigma, r, \\epsilon_{te}\\) and is hence strictly increasing in the adversarial training budget \\(\\epsilon_{tr}\\)." + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.814, + 0.825, + 0.843 + ], + "angle": 0, + "content": "2. With probability at least \\(1 - \\delta\\), we further have \\(\\varphi_{\\min} \\leq \\tilde{\\varphi} \\leq \\varphi_{\\max}\\) and the following lower bound on the robust error increase by adversarially training with size \\(\\epsilon_{tr}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.299, + 0.849, + 0.825, + 0.884 + ], + "angle": 0, + "content": "\\[\n\\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) - \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {t e}\\right) \\geq \\Phi \\left(\\frac {r / 2}{\\varphi_ {\\min }}\\right) - \\Phi \\left(\\frac {r / 2 - \\min \\left\\{\\epsilon_ {t r} , \\widetilde {\\epsilon} \\right\\}}{\\varphi_ {\\min }}\\right). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.927 + ], + "angle": 0, + "content": "The proof can be found in Appendix A and primarily relies on estimation of singular values of high-dimensional matrices. Note that the theorem holds for any \\(0 \\leq \\epsilon_{\\mathrm{te}} < \\frac{r}{2}\\) and hence also directly" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.118, + 0.391, + 0.207 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.213, + 0.353, + 0.226 + ], + "angle": 0, + "content": "(a) Robust error vs \\(\\epsilon_{\\mathrm{tr}}\\)" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.118, + 0.603, + 0.207 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.406, + 0.213, + 0.593, + 0.226 + ], + "angle": 0, + "content": "(b) Robust error decomposition" + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.101, + 0.816, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.655, + 0.213, + 0.766, + 0.226 + ], + "angle": 0, + "content": "(c) Intuition in 2D" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.238, + 0.825, + 0.317 + ], + "angle": 0, + "content": "Figure 4: (a) We set \\( d = 1000 \\) and \\( r = 12 \\). The robust error as a function of the adversarial training budget \\( \\epsilon_{\\mathrm{tr}} \\) for different \\( d / n \\). (b) The robust error decomposition into susceptibility and standard error as a function of the adversarial budget \\( \\epsilon_{\\mathrm{tr}} \\). Full experimental details can be found in Section C. (c) 2D illustration providing intuition for the linear setting. The effect of adversarial training with directed attacks is captured in the yellow dotted lines: adversarily perturbed training points move closer to the true boundary which in turn tilts the decision boundary more heavily in the wrong direction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.323, + 0.825, + 0.409 + ], + "angle": 0, + "content": "applies to the standard error by setting \\(\\epsilon_{\\mathrm{te}} = 0\\). In Figure 3, we empirically confirm the statements of Theorem 3.1 by performing multiple experiments on synthetic datasets as described in Subsection 3.1 with different choices of \\(d / n\\) and \\(\\epsilon_{\\mathrm{tr}}\\). In the first statement, we prove that for small sample-size \\((n < d - 1)\\) noiseless data, almost surely, the robust error increases monotonically with adversarial training budget \\(\\epsilon_{\\mathrm{tr}} > 0\\). In Figure 3a, we plot the robust error gap between standard and adversarial logistic regression as a function of the adversarial training budget \\(\\epsilon_{\\mathrm{tr}}\\) for 5 runs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.414, + 0.825, + 0.513 + ], + "angle": 0, + "content": "The second statement establishes a simplified lower bound on the robust error increase for adversarial training (for a fixed \\(\\epsilon_{\\mathrm{tr}} = \\epsilon_{\\mathrm{te}}\\)) compared to standard training. In Figures 3a and 3c, we show how the lower bound closely predicts the robust error gap in our synthetic experiments. Furthermore, by the dependence of \\(\\varphi_{\\mathrm{min}}\\) on the overparameterization ratio \\(d / n\\), the lower bound on the robust error gap is amplified for large \\(d / n\\). Indeed, Figure 3c shows how the error gap increases with \\(d / n\\) both theoretically and experimentally. However, when \\(d / n\\) increases above a certain threshold, the gap decreases again, as standard training fails to learn the signal and yields a high error (see Figure 3b)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.525, + 0.342, + 0.538 + ], + "angle": 0, + "content": "3.3 PROOF INTUITION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.547, + 0.827, + 0.618 + ], + "angle": 0, + "content": "The reason that adversarial training hurts robust generalization is based on an extreme robust vs. standard error trade-off. We now provide intuition for the effect of directed attacks and the low sample regime on the \\(\\epsilon_{\\mathrm{tr}}\\)-robust max-\\(\\ell_2\\)-margin solution by decomposing the robust error \\(\\mathrm{Err}(\\theta; \\epsilon_{\\mathrm{te}})\\). Notice that \\(\\epsilon_{\\mathrm{te}}\\)-robust error \\(\\mathrm{Err}(\\theta; \\epsilon_{\\mathrm{te}})\\) can be written as the probability of the union of two events: the event that the classifier based on \\(\\theta\\) is wrong and the event that the classifier is susceptible to attacks:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.623, + 0.825, + 0.668 + ], + "angle": 0, + "content": "\\[\n\\operatorname {E r r} (\\theta ; \\epsilon_ {\\mathrm {t e}}) = \\mathbb {E} _ {x, y \\sim \\mathbb {P}} \\left[ \\mathbb {I} \\left\\{y f _ {\\theta} (x) < 0 \\right\\} \\vee \\max _ {x ^ {\\prime} \\in T (x; \\epsilon_ {\\mathrm {t e}})} \\mathbb {I} \\left\\{f _ {\\theta} (x) f _ {\\theta} \\left(x ^ {\\prime}\\right) < 0 \\right\\} \\right] \\leq \\operatorname {E r r} (\\theta ; 0) + \\operatorname {S u s c} (\\theta ; \\epsilon_ {\\mathrm {t e}}) \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.668, + 0.827, + 0.809 + ], + "angle": 0, + "content": "where \\(\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})\\) is the expectation of the maximization term in Equation 7. \\(\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})\\) represents the \\(\\epsilon_{\\mathrm{te}}\\)-attack-susceptibility of a classifier induced by \\(\\theta\\) and \\(\\mathrm{Err}(\\theta ;0)\\) its standard error. In our linear setting, we can lower bound Equation 7 by \\(\\mathrm{Err}(\\theta ;0) + \\frac{1}{2}\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})\\). Hence, Equation 7 suggests that the robust error can only be small if both the standard error and susceptibility are small. In Figure 4b, we plot the decomposition of the robust error in standard error and susceptibility for adversarial logistic regression with increasing \\(\\epsilon_{\\mathrm{tr}}\\). We observe that increasing \\(\\epsilon_{\\mathrm{tr}}\\) increases the standard error too drastically compared to the decrease in susceptibility, leading to a drop in robust accuracy. For completeness, in Appendix B, we provide upper and lower bounds for the susceptibility score. We now explain why, in the small-sample size regime, adversarial training with directed attacks 3 may increase standard error to the extent that it dominates the decrease in susceptibility." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.814, + 0.825, + 0.892 + ], + "angle": 0, + "content": "A key observation is that the robust max-\\(\\ell_2\\)-margin solution of a dataset \\(D = \\{(x_i, y_i)\\}_{i=1}^n\\) maximizes the minimum margin that reads \\(\\min_{i \\in [n]} y_i \\theta^\\top (x_i - y_i \\epsilon_{\\mathrm{tr}} | \\theta_{[1]}| e_1)\\), where \\(\\theta_{[i]}\\) refers to the \\(i\\)-th entry of vector \\(\\theta\\). Therefore, it simply corresponds to the max-\\(\\ell_2\\)-margin solution of the dataset shifted towards the decision boundary \\(D_{\\epsilon_{\\mathrm{tr}}} = \\{(x_i - y_i \\epsilon_{\\mathrm{tr}} | \\widehat{\\theta}_{[1]}^{\\epsilon_{\\mathrm{tr}}} | e_1, y_i)\\}_{i=1}^n\\). Using this fact, we obtain a closed-form expression of the (normalized) max-margin solution 4 as a function of \\(\\epsilon_{\\mathrm{tr}}\\) that reads" + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.897, + 0.825, + 0.93 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right], \\tag {8}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.827, + 0.135 + ], + "angle": 0, + "content": "where \\( \\| \\tilde{\\theta} \\|_2 = 1 \\) and \\( \\tilde{\\gamma} > 0 \\) is a random quantity associated with the max- \\( \\ell_2 \\)-margin solution of the \\( d - 1 \\) dimensional Gaussian inputs orthogonal to the signal direction (see Lemma A.1 in Section A)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.268 + ], + "angle": 0, + "content": "In high dimensions, with high probability any two Gaussian random vectors are far apart – in our distributional setting, this corresponds to the vectors being far apart in the non-signal directions. In Figure 4c, we illustrate the phenomenon using a 2D cartoon, where the few samples in the dataset are all far apart in the non-signal direction. We see how shifting the dataset closer to the true decision boundary, may result in a max-margin solution (yellow) that aligns much worse with the ground truth (gray), compared to the estimator learned from the original points (blue). Even though the new (robust max-margin) classifier (yellow) is less susceptible to attacks in the signal dimension, it also uses the signal dimension less. Mathematically, this is reflected in the expression of the max-margin solution in Equation 8: We see that the first (signal) dimension is used less as \\(\\epsilon_{\\mathrm{tr}}\\) increases." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.278, + 0.43, + 0.292 + ], + "angle": 0, + "content": "3.4 GENERALITY OF THE RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.301, + 0.825, + 0.317 + ], + "angle": 0, + "content": "In this section we discuss how Theorem 3.1 might generalize to other perturbation sets and models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.323, + 0.825, + 0.395 + ], + "angle": 0, + "content": "Signal direction is known The type of additive perturbations used in Theorem 3.1, defined in Equation 3, is explicitly constrained to the direction of the true signal. This choice is reminiscent of corruptions where every possible perturbation in the set is directly targeted at the object to be recognized, such as motion blur of moving objects. Such corruptions are also studied in the context of domain generalization and adaptation (Schneider et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.401, + 0.825, + 0.445 + ], + "angle": 0, + "content": "Directed attacks in general, however, may also consist of perturbation sets that are only strongly biased towards the true signal direction. They may find the true signal direction only when the inner maximization is exact. The following corollary extends Theorem 3.1 to small \\(\\ell_1\\)-perturbations" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.45, + 0.825, + 0.468 + ], + "angle": 0, + "content": "\\[\nT (x; \\epsilon) = \\left\\{x ^ {\\prime} = x + \\delta \\mid \\| \\delta \\| _ {1} \\leq \\epsilon \\right\\}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.473, + 0.827, + 0.49 + ], + "angle": 0, + "content": "for \\(0 < \\epsilon < \\frac{r}{2}\\) that reflect such attacks. We state the corollary here and give the proof in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.492, + 0.706, + 0.508 + ], + "angle": 0, + "content": "Corollary 3.2. Theorem 3.1 also holds for 4 with perturbation sets defined in 9." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.512, + 0.825, + 0.542 + ], + "angle": 0, + "content": "The proof uses the fact that the inner maximization effectively results in a sparse perturbation equivalent to the attack resulting from the perturbation set defined in Equation 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.827, + 0.689 + ], + "angle": 0, + "content": "Other models Motivated by the implicit bias results of (stochastic) gradient descent on the logistic loss, Theorem 3.1 is proven for the max-\\(\\ell_2\\)-margin solution. We would like to conjecture that for the data distribution in Section 3, adversarial training can hurt robust generalization also for other models with zero training error (interpolators in short). For example, Adaboost is a widely used algorithm that converges to the max-\\(\\ell_1\\)-margin classifier (Telgarsky, 2013). One might argue that for a sparse ground truth, the max-\\(\\ell_1\\)-margin classifier should (at least in the noiseless case) have the right inductive bias to alleviate large bias in high dimensions. Hence, in many cases the (sparse) max-\\(\\ell_1\\)-margin solution might align with the ground truth for a given dataset. However, we conjecture that even in this case, the robust max-\\(\\ell_1\\)-margin solution would be misled to choose a wrong sparse solution. This can be seen with the help of the cartoon illustration in Figure 4c." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.701, + 0.449, + 0.716 + ], + "angle": 0, + "content": "4 REAL-WORLD EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.728, + 0.827, + 0.828 + ], + "angle": 0, + "content": "In this section, we demonstrate that the proof intuition of the linear case may generalize to more complex models. Specifically, the insights from Section 3 helped us to identify realistic directed attacks on standard image datasets for which adversarial training hurts robust accuracy in the low sample regime. In what follows, we present experimental results for corruption attacks on the Waterbirds dataset. Due to space constraints, results on the mask attacks on CIFAR-10 can be found in Appendix E. The corresponding experimental details and more results on other additional image datasets (such as the hand gestures dataset) can be found in Appendices D, E and F." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.843, + 0.387, + 0.857 + ], + "angle": 0, + "content": "4.1 DATASETS AND MODELS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.927 + ], + "angle": 0, + "content": "We consider three datasets: the Waterbirds dataset, CIFAR-10 and a hand gesture datasets. Due to space constraints, we describe CIFAR-10 and the hand gesture dataset in Appendix E and F. Apart from CIFAR-10 and the hand gesture dataset, we build a new version of the Waterbirds dataset, consisting of images of water- and landbirds of size \\(256 \\times 256\\) and labels that distinguish the two" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.961 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.101, + 0.391, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.207, + 0.388, + 0.222 + ], + "angle": 0, + "content": "(a) Robust error with increasing \\(\\epsilon_{\\mathrm{tr}}\\)" + }, + { + "type": "image", + "bbox": [ + 0.397, + 0.103, + 0.603, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.405, + 0.207, + 0.593, + 0.221 + ], + "angle": 0, + "content": "(b) Robust error decomposition" + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.109, + 0.821, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.207, + 0.802, + 0.221 + ], + "angle": 0, + "content": "(c) Robust error vs. #samples" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.233, + 0.828, + 0.336 + ], + "angle": 0, + "content": "Figure 5: Experiments on the Waterbirds dataset considering the adversarial illumination attack with \\(\\epsilon_{\\mathrm{te}} = 0.3\\). We plot the mean and standard deviation of the mean of several independent experiments. (a) The robust error increases with larger \\(\\epsilon_{\\mathrm{tr}}\\) in the low sample size regime. (b) We set \\(n = 20\\) and plot the robust error decomposition as in Equation 7 with increasing \\(\\epsilon_{\\mathrm{tr}}\\). While the susceptibility decreases slightly, the increase in standard error is much more severe, resulting in an increase in robust error. (c) The robust error of standard training and adversarial training as a function of the number of samples, where the smallest sample size still yields small (\\(< 10\\%\\)) standard test error for standard training. While adversarial training hurts for small sample sizes, it helps for larger sample sizes. For more experimental details see App. D." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.341, + 0.828, + 0.44 + ], + "angle": 0, + "content": "types of birds. Using code provided by Sagawa et al. (2020), we construct the dataset as follows: First, we sample equally many water- and landbirds from the CUB-200 dataset (Welinder et al., 2010). Then, we segment the birds and paste them onto a background image that is randomly sampled (without replacement) from the Places-256 dataset (Zhou et al., 2017). Also, following the choice of Sagawa et al. (2020), we use as models a ResNet50 and a ResNet18 that were both pretrained on ImageNet and achieve near perfect standard accuracy. In Appendix D, we complement the results of this section by reporting the results of similar experiments with different architectures." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.456, + 0.54, + 0.469 + ], + "angle": 0, + "content": "4.2 IMPLEMENTATION OF THE DIRECTED ATTACKS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.481, + 0.827, + 0.566 + ], + "angle": 0, + "content": "In this section, we consider two attacks on the Waterbirds dataset: motion blur and adversarial illumination as depicted in Figure 2. In Appendix E, we also discuss the mask attack, which should mimic occlusions of objects in images that are physically realizable (Eykholt et al., 2018; Wu et al., 2020). On the other hand, motion blur may arise naturally when photographing fast moving objects with a slow shutter speed. Lastly, adversarial illumination may result from adversarial lighting conditions. Next, we describe the motion blur and adversarial illumination attacks in more detail." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.573, + 0.827, + 0.742 + ], + "angle": 0, + "content": "Motion blur For the Waterbirds dataset we can implement motion blur attacks on the object (the bird) specifically, a natural corruption that could occur if birds move at speeds that are faster than the shutter speed. The aim is to be robust against all motion blur severity levels up to \\( M_{max} = 15 \\). To simulate motion blur, we apply a motion blur filter with a kernel of size \\( M \\) on the segmented bird before we paste it onto the background image. We can change the severity level of the motion blur by increasing the kernel size of the filter. See Appendix D for concrete expressions of the motion blur kernel. Intuitively the worst attack should be the most severe blur, rendering a search over a range of severity superfluous. However, similar to rotations, this is not necessarily true in practice since the training loss on neural networks is generally nonconvex. Therefore, for an exact evaluation of the robust error at test time, we perform a full grid search over all kernel sizes in \\( [1,2,\\dots,M_{max}] \\). We refer to Figure 2d and Section D for an illustration of our motion blur attack. During training time, we perform an approximate search over kernels with sizes \\( 2i \\) for \\( i = 1,\\dots,M_{max}/2 \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.748, + 0.827, + 0.889 + ], + "angle": 0, + "content": "Adversarial illumination As a second attack on the Waterbirds dataset, we consider adversarial illumination. The adversary can darken or brighten the bird without corrupting the background of the image. The attack aims to model images where the object at interest is hidden in shadows or placed against bright light. To compute the adversarial illumination attack, we modify the brightness of the segmented bird by adding a constant \\(a \\in [-\\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]\\) to all pixel values, before pasting the bird onto the background image. With an analogous argument as for the adversarial search for motion blur, the exact evaluation requires an actual search over the interval \\([- \\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]\\). We find the most adversarial lighting level, i.e. the value of \\(a\\), by equidistantly partitioning the interval \\([- \\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]\\) in \\(K\\) steps and performing a full list-search over all steps. See Figure 2c and Appendix D for an illustration of the adversarial illumination attack. We choose \\(K = 65, 33\\) during test and training time respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Adversarial training For all datasets and attacks, we run SGD until convergence on the robust cross-entropy loss 2. In each iteration, we search for an adversarial example as described above and" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.101, + 0.391, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.184, + 0.205, + 0.391, + 0.22 + ], + "angle": 0, + "content": "(a) Robust error with increasing \\(\\epsilon_{\\mathrm{tr}}\\)" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.101, + 0.603, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.405, + 0.205, + 0.593, + 0.22 + ], + "angle": 0, + "content": "(b) Robust error decomposition" + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.102, + 0.813, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.622, + 0.205, + 0.798, + 0.22 + ], + "angle": 0, + "content": "(c) Robust error vs. #samples" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.231, + 0.827, + 0.334 + ], + "angle": 0, + "content": "Figure 6: Experiments on the (subsampled) Waterbirds dataset using the motion blur attack. (a) Even though adversarial training hurts robust generalization for low sample size (\\(n = 20\\)), it helps for \\(n = 50\\). (b) For \\(n = 20\\), the decomposition of the robust error in standard error and susceptibility as a function of adversarial budget \\(\\epsilon_{\\mathrm{tr}}\\). The increase in standard error is more severe than the drop in susceptibility, leading to a slight increase in robust error. (c) The robust error of standard and adversarial training on settings where the test error after standard training is small as a function of the number of samples. While adversarial training hurts for small sample sizes, it helps for larger sample sizes. For each experiment we plot the mean and standard deviation of the mean of independent experiments. For more experimental details see App. D." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.345, + 0.825, + 0.388 + ], + "angle": 0, + "content": "update the weights using a gradient with respect to the resulting perturbed example (Goodfellow et al., 2015; Madry et al., 2018). For every experiment, we choose the learning rate and weight decay parameters that minimize the robust error on a hold-out dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.403, + 0.655, + 0.416 + ], + "angle": 0, + "content": "4.3 ADVERSARIAL TRAINING CAN HURT ROBUST GENERALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.425, + 0.825, + 0.495 + ], + "angle": 0, + "content": "We now present our experimental results on the Waterbirds dataset for both motion blur and adversarial illumination attacks. First of all, Figure 5a and 6a show that the phenomenon characterized in the linear setting by Theorem 3.1 also occurs for directed attacks on the Waterbirds dataset: as we increase the adversarial training budget \\(\\epsilon_{\\mathrm{tr}}\\) starting from zero (standard training), the robust error monotonically increases." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.827, + 0.669 + ], + "angle": 0, + "content": "Furthermore, to gain intuition as described in Section 3.3, we also plot the robust error decomposition (Equation 7) consisting of the standard error and susceptibility in Figure 5b and 6b. Recall that we measure susceptibility as the fraction of data points in the test set for which the classifier predicts a different class under an adversarial attack. As in our linear example, we observe an increase in robust error despite a slight drop in susceptibility, because of the more severe increase in standard error. Moreover, Figures 1 and 6c show that analogous to our linear example, this phenomenon is specific to the low sample regime: for large sample size adversarial training outperforms standard training as expected. Note again that even the smallest sample size is large enough to yield a standard test error \\(< 10\\%\\) for standard training. Similar experiments for CIFAR-10 can be found in Appendix E. Finally, we empirically confirm in Appendix D.8 that our phenomenon is specific to directed attacks: for undirected attacks such as bounded \\(\\ell_{\\infty}\\) and \\(\\ell_{2}\\)-ball perturbations, adversarial training helps robust generalization also in the low sample size regime." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.684, + 0.304, + 0.697 + ], + "angle": 0, + "content": "4.4 DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.706, + 0.825, + 0.736 + ], + "angle": 0, + "content": "We now discuss how different algorithmic choices, motivated by related work, might affect how adversarial training hurts robust generalization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.827, + 0.845 + ], + "angle": 0, + "content": "Catastrophic overfitting Often the worst-case perturbation during adversarial training is found using an approximate algorithm such as SGD. It is common belief that using the strongest attack (in the motion blur case, full grid search) during training also results in better robust generalization. In particular, the literature on catastrophic overfitting shows that weaker attacks during training lead to bad performance on stronger attacks during testing (Wong et al., 2020; Andriushchenko & Flammarion, 2020; Li et al., 2021). Our results suggest the opposite in the low sample size regime for directed attacks: the weaker the attack during training, the better adversarial training performs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Robust overfitting Recent work observes empirically (Rice et al., 2020) and theoretically (Sanyal et al., 2020; Donhauser et al., 2021), that perfectly minimizing the adversarial loss during training might in fact be suboptimal for robust generalization; that is, classical regularization techniques might lead to higher robust accuracy. This phenomenon is often referred to as robust overfitting. May the phenomenon be mitigated using standard regularization techniques? In Appendix D we shed light" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "on this question and show that adversarial training hurts robust generalization even when standard regularization methods such as early stopping are used." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.154, + 0.343, + 0.168 + ], + "angle": 0, + "content": "5 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.177, + 0.827, + 0.275 + ], + "angle": 0, + "content": "Robust and non-robust useful features In the words of Ilyas et al. (2019) and Springer et al. (2021) we can describe the intuition behind \"our phenomenon\" as follows: for directed attacks, all robust features become less useful, but adversarial training uses robust features more. In the small sample-size regime, \\( n < d - 1 \\) in particular, robust learning assigns too much weight on the robust (possibly non-useful) features that then dominate the non-robust (but useful)features. Even though they define these concepts, they don't make our statement, but show that adversarial training reduces the reliance on non-robust but possibly useful features." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.283, + 0.828, + 0.368 + ], + "angle": 0, + "content": "Small sample size and robustness A direct consequence of Theorem 3.1 is that in order to achieve the same robust error as standard training, adversarial training requires more samples. This statement might remind the reader of sample complexity results for robust generalization in Schmidt et al. (2018); Yin et al. (2019); Khim & Loh (2018). While those results compare sample complexity bounds for standard vs. robust error, our theorem statement compares two algorithms, standard vs. adversarial training, with respect to the robust error." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.375, + 0.827, + 0.529 + ], + "angle": 0, + "content": "Trade-off between standard and robust error Many papers observed that even though adversarial training decreases robust error compared to standard training, it may lead to an increase in standard test error Madry et al. (2018); Zhang et al. (2019). For example, Tsipras et al. (2019); Zhang et al. (2019); Javanmard et al. (2020); Dobriban et al. (2020); Chen et al. (2020) study settings where the Bayes optimal robust classifier is not equal to the Bayes optimal (standard) classifier (i.e. the perturbations are inconsistent or the dataset is non-separable). Raghunathan et al. (2020) study consistent perturbations, as in our paper, and prove that for small sample size, fitting adversarial examples can increase standard error even in the absence of noise. Empirically, Dong et al. (2021); Mendonça et al. (2022) show that for \\(\\ell_p\\)-attacks low-quality data might be the main cause of the trade-off. While aforementioned works focus on the decrease in standard error, we prove that for directed attacks, in the small sample regime adversarial training may in fact increase robust error." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.536, + 0.827, + 0.661 + ], + "angle": 0, + "content": "Mitigation of the trade-off A long line of work has proposed procedures to mitigate the trade-off between robust and standard accuracy. For example Alayrac et al. (2019); Carmon et al. (2019); Zhai et al. (2019); Raghunathan et al. (2020) study robust self training, which leverages a large set of unlabelled data, while Lee et al. (2020); Lamb et al. (2019); Xu et al. (2020) use data augmentation by interpolation. Ding et al. (2020); Balaji et al. (2019); Cheng et al. (2020) on the other hand propose to use adaptive perturbation budgets \\(\\epsilon_{\\mathrm{tr}}\\) that vary across inputs. The intuition behind our theoretical analysis suggests that the standard mitigation procedures for imperceptible perturbations may not work for perceptible directed attacks, because all relevant features are non-robust. We leave a thorough empirical study as interesting future work." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.674, + 0.466, + 0.689 + ], + "angle": 0, + "content": "6 SUMMARY AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.827, + 0.886 + ], + "angle": 0, + "content": "This paper aims to caution the practitioner against blindly following current widespread practices to increase the robust performance of machine learning models. Specifically, adversarial training is currently recognized to be one of the most effective defense mechanisms for \\(\\ell_p\\)-perturbations, significantly outperforming robust performance of standard training. However, we prove that in the low sample size regime this common wisdom is not applicable for consistent directed attacks, which efficiently focus their attack budget to target the ground truth class information. In terms of follow-up work on directed attacks in the low sample regime, there are some concrete questions that would be interesting to explore. For example, as discussed in Section 5, it would be useful to test whether some methods to mitigate the standard accuracy vs. robustness trade-off would also relieve the perils of adversarial training for directed attacks. Further, we hypothesize that when few samples are available, one should avoid training with attacks that may heavily reduce class information, independently of the attacks at test time. If this hypothesis were confirmed, it would break with yet another general rule that the best defense perturbation type should always match the attack during evaluation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.103, + 0.362, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENT" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.133, + 0.554, + 0.15 + ], + "angle": 0, + "content": "Supported by the Hasler Foundation grant number 21050." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.168, + 0.289, + 0.184 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.19, + 0.828, + 0.235 + ], + "angle": 0, + "content": "Jean-Baptiste Alayrac, Jonathan Uesato, Po-Sen Huang, Alhussein Fawzi, Robert Stanforth, and Pushmeet Kohli. Are labels required for improving adversarial robustness? Advances in Neural Information Processing Systems, pp. 12214-12223, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.24, + 0.826, + 0.272 + ], + "angle": 0, + "content": "Maksym Andriushchenko and Nicolas Flammarion. Understanding and improving fast adversarial training. Advances in Neural Information Processing Systems, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.278, + 0.827, + 0.336 + ], + "angle": 0, + "content": "Tao Bai, Jinqi Luo, Jun Zhao, Bihan Wen, and Qian Wang. Recent advances in adversarial training for adversarial robustness. In Zhi-Hua Zhou (ed.), The 30th International Joint Conference on Artificial Intelligence, pp. 4312-4321. International Joint Conferences on Artificial Intelligence Organization, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.342, + 0.825, + 0.373 + ], + "angle": 0, + "content": "Yogesh Balaji, Tom Goldstein, and Judy Hoffman. Instance adaptive adversarial training: Improved accuracy tradeoffs in neural nets. arXiv preprint arXiv:1910.08051, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.379, + 0.698, + 0.397 + ], + "angle": 0, + "content": "G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.402, + 0.826, + 0.446 + ], + "angle": 0, + "content": "Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, Percy Liang, and John C Duchi. Unlabeled data improves adversarial robustness. In The 33rd International Conference on Neural Information Processing Systems, pp. 11192-11203, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.453, + 0.826, + 0.497 + ], + "angle": 0, + "content": "Lin Chen, Yifei Min, Mingrui Zhang, and Amin Karbasi. More data can expand the generalization gap between adversarially robust and standard models. In The 36th International Conference on Machine Learning, pp. 1670-1680, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.503, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Minhao Cheng, Qi Lei, Pin-Yu Chen, Inderjit Dhillon, and Cho-Jui Hsieh. Cat: Customized adversarial training for improved robustness. arXiv preprint arXiv:2002.06789, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.54, + 0.827, + 0.584 + ], + "angle": 0, + "content": "Lenaic Chizat and Francis Bach. Implicit bias of gradient descent for wide two-layer neural networks trained with the logistic loss. In The 7th International Conference on Learning Theory, pp. 1305-1338, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.591, + 0.828, + 0.635 + ], + "angle": 0, + "content": "Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In The 37th International Conference on Machine Learning, pp. 2206-2216, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.642, + 0.826, + 0.686 + ], + "angle": 0, + "content": "Gavin Weiguang Ding, Yash Sharma, Kry Yik Chau Lui, and Ruitong Huang. Mma training: Direct input space margin maximization through adversarial training. In The 8th International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.692, + 0.827, + 0.723 + ], + "angle": 0, + "content": "Edgar Dobriban, Hamed Hassani, David Hong, and Alexander Robey. Provable tradeoffs in adversarially robust classification. arXiv preprint arXiv:2006.05161, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.729, + 0.825, + 0.761 + ], + "angle": 0, + "content": "Chengyu Dong, Liyuan Liu, and Jingbo Shang. Data quality matters for adversarial training: An empirical study, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.766, + 0.827, + 0.81 + ], + "angle": 0, + "content": "Konstantin Donhauser, Alexandru Tifrea, Michael Aerni, Reinhard Heckel, and Fanny Yang. Interpolation can hurt robust generalization even when there is no noise. The 36th conference on Advances in Neural Information Processing Systems, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.817, + 0.827, + 0.861 + ], + "angle": 0, + "content": "Logan Engstrom, Brandon Tran, Dimitris Tsipras, Ludwig Schmidt, and Aleksander Madry. Exploring the landscape of spatial robustness. In The 36th International Conference on Machine Learning, pp. 1802-1811, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.867, + 0.828, + 0.924 + ], + "angle": 0, + "content": "Kevin Eykholt, Ivan Evtimov, Earlence Fernandes, Bo Li, Amir Rahmati, Chaowei Xiao, Atul Prakash, Tadayoshi Kohno, and Dawn Song. Robust physical-world attacks on deep learning visual classification. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1625-1634, 2018." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.19, + 0.828, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Amin Ghiasi, Ali Shafahi, and Tom Goldstein. Breaking certified defenses: semantic adversarial examples with spoofed robustness certificates. In The 6th International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.825, + 0.187 + ], + "angle": 0, + "content": "Justin Gilmer, Ryan P Adams, Ian Goodfellow, David Andersen, and George E Dahl. Motivating the rules of the game for adversarial example research. arXiv preprint arXiv:1807.06732, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.194, + 0.825, + 0.226 + ], + "angle": 0, + "content": "Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. In The 3th International Conference on Learning Representations, pp. 1-10, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.233, + 0.825, + 0.276 + ], + "angle": 0, + "content": "Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. In The 33rd conference on Advances in Neural Information Processing Systems, pp. 125-136, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.285, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Adel Javanmard, Mahdi Soltanolkotabi, and Hamed Hassani. Precise tradeoffs in adversarial training for linear regression. In Conference on Learning Theory, pp. 2034-2078, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.324, + 0.825, + 0.354 + ], + "angle": 0, + "content": "Ziwei Ji and Matus Telgarsky. The implicit bias of gradient descent on nonseparable data. In *The 32nd Conference on Learning Theory*, pp. 1772-1798, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.362, + 0.825, + 0.393 + ], + "angle": 0, + "content": "Daniel Kang, Yi Sun, Tom Brown, Dan Hendrycks, and Jacob Steinhardt. Transfer of adversarial robustness between perturbation types. arXiv e-prints, pp. arXiv-1905, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.4, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Justin Khim and Po-Ling Loh. Adversarial risk bounds via function transformation. arXiv preprint arXiv:1810.09519, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.438, + 0.825, + 0.469 + ], + "angle": 0, + "content": "Cassidy Laidlaw, Sahil Singla, and Soheil Feizi. Perceptual adversarial robustness: Defense against unseen threat models. In The 9th International Conference on Learning Representation, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.477, + 0.825, + 0.521 + ], + "angle": 0, + "content": "Alex Lamb, Vikas Verma, Juho Kannala, and Yoshua Bengio. Interpolated adversarial training: Achieving robust neural networks without sacrificing too much accuracy. In The 12th ACM Workshop on Artificial Intelligence and Security, pp. 95-103, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.529, + 0.827, + 0.572 + ], + "angle": 0, + "content": "Saehyung Lee, Hyungyu Lee, and Sungroh Yoon. Adversarial vertex mixup: Toward better adversarily robust generalization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.581, + 0.825, + 0.611 + ], + "angle": 0, + "content": "Bai Li, Shiqi Wang, Suman Jana, and Lawrence Carin. Towards understanding fast adversarial training. arXiv preprint arXiv:2006.03089, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.62, + 0.827, + 0.662 + ], + "angle": 0, + "content": "Yan Li, Ethan X.Fang, Huan Xu, and Tuo Zhao. Implicit bias of gradient descent based adversarial training on separable data. In The 8th International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.673, + 0.825, + 0.716 + ], + "angle": 0, + "content": "Wei-An Lin, Chun Pong Lau, Alexander Levine, Rama Chellappa, and Soheil Feizi. Dual manifold adversarial robustness: Defense against lp and non-lp adversarial attacks. In The 34th conference on Advances in Neural Information Processing Systems, pp. 3487-3498, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.725, + 0.825, + 0.768 + ], + "angle": 0, + "content": "Chen Liu, Mathieu Salzmann, Tao Lin, Ryota Tomioka, and Sabine Susstrunk. On the loss landscape of adversarial training: Identifying challenges and how to overcome them. In The 35th conference on Advances in Neural Information Processing Systems, pp. 21476-21487, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.777, + 0.825, + 0.834 + ], + "angle": 0, + "content": "Bo Luo, Yannan Liu, Lingxiao Wei, and Qiang Xu. Towards imperceptible and robust adversarial example attacks against neural networks. In The 32nd AAAI Conference on Artificial Intelligence and Thirtieth Innovative Applications of Artificial Intelligence Conference and Eighth AAAI Symposium on Educational Advances in Artificial Intelligence, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.843, + 0.825, + 0.886 + ], + "angle": 0, + "content": "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In The 6th International Conference on Learning Representations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.895, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Tomás Mantecón, Carlos R. del Blanco, Fernando Jaureguizar, and Narciso García. A real-time gesture recognition system using near-infrared imagery. PLOS ONE, pp. 1-17, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.148 + ], + "angle": 0, + "content": "Marcele OK Mendonça, Javier Maroto, Pascal Frossard, and Paulo SR Diniz. Adversarial training with informed data selection. In The 30th European Signal Processing Conference (EUSIPCO), pp. 608-612, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, and Pascal Frossard. Deepfool: a simple and accurate method to fool deep neural networks. In The IEEE conference on computer vision and pattern recognition (CVPR), pp. 2574-2582, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.208, + 0.825, + 0.253 + ], + "angle": 0, + "content": "Abdullah Mujahid, Mazhar Javed Awan, Awais Yasin, Mazin Abed Mohammed, Robertas Damaševićius, Rytis Maskeliūnas, and Karrar Hameed Abdulkareem. Real-time hand gesture recognition based on deep learning yolov3 model. Applied Sciences, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.261, + 0.825, + 0.305 + ], + "angle": 0, + "content": "Mor Shpigel Nacson, Nathan Srebro, and Daniel Soudry. Stochastic gradient descent on separable data: Exact convergence with a fixed learning rate. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 3051-3059, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.313, + 0.826, + 0.357 + ], + "angle": 0, + "content": "Vaishnavh Nagarajan and J. Zico Kolter. Uniform convergence may be unable to explain generalization in deep learning. In The 33d conference on Advances in Neural Information Processing Systems, pp. 11611-11622, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.365, + 0.825, + 0.396 + ], + "angle": 0, + "content": "Munir Oudah, Ali Al-Naji, and Javaan Chahl. Hand gesture recognition based on computer vision: A review of techniques. Journal of Imaging, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.404, + 0.487, + 0.42 + ], + "angle": 0, + "content": "Huy Phan. huyvnphan/pytorch_cifar10, 1 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.428, + 0.825, + 0.472 + ], + "angle": 0, + "content": "Aditi Raghunathan, Sang Michael Xie, Fanny Yang, John Duchi, and Percy Liang. Understanding and mitigating the tradeoff between robustness and accuracy. In The 37th International Conference on Machine Learning, pp. 7909-7919, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.481, + 0.825, + 0.511 + ], + "angle": 0, + "content": "Leslie Rice, Eric Wong, and Zico Kolter. Overfitting in adversarially robust deep learning. In The 37th International Conference on Machine Learning, pp. 8093-8104, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.518, + 0.825, + 0.55 + ], + "angle": 0, + "content": "Shiori Sagawa, Pang Wei Koh, Tatsunori B. Hashimoto, and Percy Liang. Distributionally robust neural networks. In The 7th International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.557, + 0.826, + 0.587 + ], + "angle": 0, + "content": "Amartya Sanyal, Puneet K Dokania, Varun Kanade, and Philip Torr. How benign is benign overfitting? In The 8th International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.595, + 0.826, + 0.64 + ], + "angle": 0, + "content": "Ludwig Schmidt, Shibani Santurkar, Dimitris Tsipras, Kunal Talwar, and Aleksander Madry. Adversarily robust generalization requires more data. In The 32nd conference Advances in Neural Information Processing Systems, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.647, + 0.826, + 0.692 + ], + "angle": 0, + "content": "Steffen Schneider, Evgenia Rusak, Luisa Eck, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Improving robustness against common corruptions by covariate shift adaptation. In The 34th conference on Advances in Neural Information Processing Systems, pp. 11539-11551, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.7, + 0.826, + 0.731 + ], + "angle": 0, + "content": "Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, pp. 1-57, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.739, + 0.826, + 0.782 + ], + "angle": 0, + "content": "Jacob M Springer, Melanie Mitchell, and Garrett T Kenyon. Adversarial perturbations are not so weird: Entanglement of robust and non-robust features in neural network classifiers. arXiv preprint arXiv:2102.05110, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.791, + 0.826, + 0.834 + ], + "angle": 0, + "content": "David Stutz, Matthias Hein, and Bernt Schiele. Disentangling adversarial robustness and generalization. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 6967-6987, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.826, + 0.887 + ], + "angle": 0, + "content": "Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In The 2nd International Conference on Learning Representations, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Matus Telgarsky. Margins, shrinkage, and boosting. In The 30th International Conference on Machine Learning, pp. 307-315, 2013." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In The 7th International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.185 + ], + "angle": 0, + "content": "Roman Vershynin. Introduction to the non-asymptotic analysis of random matrices. arXiv preprint arXiv:1011.3027, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.193, + 0.825, + 0.224 + ], + "angle": 0, + "content": "P. Welinder, S. Branson, T. Mita, C. Wah, F. Schroff, S. Belongie, and P. Perona. Caltech-UCSD Birds 200. Technical report, California Institute of Technology, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.231, + 0.825, + 0.261 + ], + "angle": 0, + "content": "Eric Wong, Leslie Rice, and J. Zico Kolter. Fast is better than free: Revisiting adversarial training. In The 8th International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.269, + 0.825, + 0.299 + ], + "angle": 0, + "content": "Tong Wu, Liang Tong, and Yevgeniy Vorobeychik. Defending against physically realizable attacks on image classification. In The 8th International Conference on Learning Representations, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.306, + 0.827, + 0.351 + ], + "angle": 0, + "content": "Minghao Xu, Jian Zhang, Bingbing Ni, Teng Li, Chengjie Wang, Qi Tian, and Wenjun Zhang. Adversarial domain adaptation with domain mixup. In The AAAI Conference on Artificial Intelligence, pp. 6502-6509, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.358, + 0.827, + 0.389 + ], + "angle": 0, + "content": "Shuai Yang, Prashan Premaratne, and Peter Vial. Hand gesture recognition: An overview. In The 5th IEEE International Conference on Broadband Network Multimedia Technology, pp. 63-69, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.396, + 0.825, + 0.427 + ], + "angle": 0, + "content": "Dong Yin, Ramchandran Kannan, and Peter Bartlett. Rademacher complexity for adversarially robust generalization. In The 36th International conference on machine learning, pp. 7085-7094, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.433, + 0.825, + 0.464 + ], + "angle": 0, + "content": "Runtian Zhai, Tianle Cai, Di He, Chen Dan, Kun He, John Hopcroft, and Liwei Wang. Adversarily robust generalization just requires more unlabeled data. arXiv preprint arXiv:1906.00555, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.471, + 0.827, + 0.515 + ], + "angle": 0, + "content": "Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric Xing, Laurent El Ghaoui, and Michael Jordan. Theoretically principled trade-off between robustness and accuracy. In *The 36th International Conference on Machine Learning*, pp. 7472-7482, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.523, + 0.825, + 0.567 + ], + "angle": 0, + "content": "Zhengyu Zhao, Zhuoran Liu, and Martha Larson. Towards large yet imperceptible adversarial image perturbations with perceptual color distance. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1039-1048, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.574, + 0.825, + 0.617 + ], + "angle": 0, + "content": "Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.626, + 0.825, + 0.657 + ], + "angle": 0, + "content": "Jianli Zhou, Chao Liang, and Jun Chen. Manifold projection for adversarial defense on face recognition. In The 16th European Conference on Computer Vision, pp. 288-305, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.657, + 0.119 + ], + "angle": 0, + "content": "A THEORETICAL STATEMENTS FOR THE LINEAR MODEL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.133, + 0.827, + 0.189 + ], + "angle": 0, + "content": "Before we present the proof of the theorem, we introduce two lemmas are of separate interest that are used throughout the proof of Theorem 1. Recall that the definition of the (standard normalized) maximum-\\(\\ell_2\\)-margin solution (max-margin solution in short) of a dataset \\(D = \\{(x_i, y_i)\\}_{i=1}^n\\) corresponds to" + }, + { + "type": "equation", + "bbox": [ + 0.406, + 0.189, + 0.825, + 0.217 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\theta^ {\\top} x _ {i}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.22, + 0.825, + 0.267 + ], + "angle": 0, + "content": "by simply setting \\(\\epsilon_{\\mathrm{tr}} = 0\\) in Equation 4. The \\(\\ell_2\\)-margin of \\(\\widehat{\\theta}\\) then reads \\(\\min_{i\\in [n]}y_i\\widehat{\\theta}^\\top x_i\\). Furthermore for a dataset \\(D = \\{(x_{i},y_{i})\\}_{i = 1}^{n}\\) we refer to the induced dataset \\(\\widetilde{D}\\) as the dataset with covariate vectors stripped of the first element, i.e." + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.27, + 0.825, + 0.288 + ], + "angle": 0, + "content": "\\[\n\\widetilde {D} = \\left\\{\\left(\\tilde {x} _ {i}, y _ {i}\\right) \\right\\} _ {i = 1} ^ {n} := \\left\\{\\left(\\left(x _ {i}\\right) _ {[ 2: d ]}, y _ {i}\\right) \\right\\} _ {i = 1} ^ {n}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.29, + 0.827, + 0.378 + ], + "angle": 0, + "content": "where \\((x_{i})_{[2:d]}\\) refers to the last \\(d - 1\\) elements of the vector \\(x_{i}\\). Furthermore, remember that for any vector \\(z\\), \\(z_{[j]}\\) refers to the \\(j\\)-th element of \\(z\\) and \\(e_j\\) denotes the \\(j\\)-th canonical basis vector. Further, recall the distribution \\(\\mathbb{P}_r\\) as defined in Section 3.1: the label \\(y \\in \\{+1, -1\\}\\) is drawn with equal probability and the covariate vector is sampled as \\(x = [y_{\\frac{r}{2}}, \\tilde{x}]\\) where \\(\\tilde{x} \\in \\mathbb{R}^{d-1}\\) is a random vector drawn from a standard normal distribution, i.e. \\(\\tilde{x} \\sim \\mathcal{N}(0, \\sigma^2 I_{d-1})\\). We generally allow \\(r\\), used to sample the training data, to differ from \\(r_{\\mathrm{test}}\\), which is used during test time." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.383, + 0.825, + 0.425 + ], + "angle": 0, + "content": "The following lemma derives a closed-form expression for the normalized max-margin solution for any dataset with fixed separation \\( r \\) in the signal component, and that is linearly separable in the last \\( d - 1 \\) coordinates with margin \\( \\tilde{\\gamma} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.427, + 0.826, + 0.503 + ], + "angle": 0, + "content": "Lemma A.1. Let \\( D = \\{(x_i, y_i)\\}_{i=1}^n \\) be a dataset that consists of points \\( (x, y) \\in \\mathbb{R}^d \\times \\{\\pm 1\\} \\) and \\( x_{[1]} = y_{\\frac{r}{2}} \\), i.e. the covariates \\( x_i \\) are deterministic in their first coordinate given \\( y_i \\) with separation distance \\( r \\). Furthermore, let the induced dataset \\( \\widetilde{D} \\) also be linearly separable by the normalized max- \\( \\ell_2 \\)-margin solution \\( \\tilde{\\theta} \\) with an \\( \\ell_2 \\)-margin \\( \\tilde{\\gamma} \\). Then, the normalized max-margin solution of the original dataset \\( D \\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.406, + 0.503, + 0.825, + 0.535 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} = \\frac {1}{\\sqrt {r ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right]. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.537, + 0.617, + 0.555 + ], + "angle": 0, + "content": "Further, the standard accuracy of \\(\\widehat{\\theta}\\) for data drawn from \\(\\mathbb{P}_{r_{test}}\\) reads" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.556, + 0.825, + 0.588 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} _ {r _ {\\text {t e s t}}} \\left(Y \\widehat {\\theta} ^ {\\top} X > 0\\right) = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right). \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.596, + 0.825, + 0.627 + ], + "angle": 0, + "content": "The proof can be found in Section A.3. The next lemma provides high probability upper and lower bounds for the margin \\(\\tilde{\\gamma}\\) of \\(\\widetilde{D}\\) when \\(\\tilde{x}_i\\) are drawn from the normal distribution." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.63, + 0.826, + 0.66 + ], + "angle": 0, + "content": "Lemma A.2. Let \\(\\widetilde{D} = \\{(\\tilde{x}_i, y_i)\\}_{i=1}^n\\) be a random dataset where \\(y_i \\in \\{\\pm 1\\}\\) are equally distributed and \\(\\tilde{x}_i \\sim \\mathcal{N}(0, \\sigma I_{d-1})\\) for all \\(i\\), and \\(\\tilde{\\gamma}\\) is the maximum \\(\\ell_2\\) margin that can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.415, + 0.661, + 0.581, + 0.689 + ], + "angle": 0, + "content": "\\[\n\\tilde{\\gamma} = \\max_{\\| \\tilde{\\theta}\\|_{2}\\leq 1}\\min_{i\\in [n]}y_{i}\\tilde{\\theta}^{\\top}\\tilde{x}_{i}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.693, + 0.825, + 0.712 + ], + "angle": 0, + "content": "Then, for any \\(t \\geq 0\\), with probability greater than \\(1 - 2e^{-\\frac{t^2}{2}}\\), we have \\(\\tilde{\\gamma}_{\\min}(t) \\leq \\tilde{\\gamma} \\leq \\tilde{\\gamma}_{\\max}(t)\\) where" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.713, + 0.756, + 0.754 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\gamma} _ {\\mathrm {m a x}} (t) = \\sigma \\left(\\sqrt {\\frac {d - 1}{n}} + 1 + \\frac {t}{\\sqrt {n}}\\right), \\tilde {\\gamma} _ {\\mathrm {m i n}} (t) = \\sigma \\left(\\sqrt {\\frac {d - 1}{n}} - 1 - \\frac {t}{\\sqrt {n}}\\right).\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.767, + 0.391, + 0.781 + ], + "angle": 0, + "content": "A.1 PROOF OF THEOREM 3.1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.793, + 0.826, + 0.835 + ], + "angle": 0, + "content": "Given a dataset \\( D = \\{(x_{i},y_{i})\\} \\) drawn from \\( \\mathbb{P}_r \\), it is easy to see that the (normalized) \\( \\epsilon_{\\mathrm{tr}} \\)-robust max-margin solution 4 of \\( D \\) with respect to signal-attacking perturbations \\( T(\\epsilon_{\\mathrm{tr}};x_i) \\) as defined in Equation 3, can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.837, + 0.663, + 0.928 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}} = \\arg \\max_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n],x_{i}^{\\prime}\\in T(x_{i};\\epsilon_{\\mathrm{tr}})}y_{i}\\theta^{\\top}x_{i}^{\\prime} \\\\ = \\operatorname *{arg max}_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n],|\\beta |\\leq \\epsilon_{\\mathrm{tr}}}y_{i}\\theta^{\\top}(x_{i} + \\beta e_{1}) \\\\ = \\operatorname *{arg max}_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n]}y_{i}\\theta^{\\top}(x_{i} - y_{i}\\epsilon_{\\mathrm{tr}}\\operatorname {sign}(\\theta_{[1]})e_{1}). \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.827, + 0.149 + ], + "angle": 0, + "content": "Note that by definition, it is equivalent to the (standard normalized) max-margin solution \\(\\widehat{\\theta}\\) of the shifted dataset \\(D_{\\epsilon_{\\mathrm{tr}}} = \\{(x_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[1]})e_1,y_i)\\}_{i = 1}^n\\). Since \\(D_{\\epsilon_{\\mathrm{tr}}}\\) satisfies the assumptions of Lemma A.1, it then follows directly that the normalized \\(\\epsilon_{\\mathrm{tr}}\\)-robust max-margin solution reads" + }, + { + "type": "equation", + "bbox": [ + 0.351, + 0.154, + 0.825, + 0.188 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right], \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.826, + 0.227 + ], + "angle": 0, + "content": "by replacing \\(r\\) by \\(r - 2\\epsilon_{\\mathrm{tr}}\\) in Equation 12. Similar to above, \\(\\tilde{\\theta} \\in R^{d-1}\\) is the (standard normalized) max-margin solution of \\(\\{(\\tilde{x}_i, y_i)\\}_{i=1}^n\\) and \\(\\tilde{\\gamma}\\) the corresponding margin." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.242, + 0.827, + 0.316 + ], + "angle": 0, + "content": "Proof of 1. We can now compute the \\(\\epsilon_{\\mathrm{te}}\\)-robust accuracy of the \\(\\epsilon_{\\mathrm{tr}}\\)-robust max-margin estimator \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) for a given dataset \\(D\\) as a function of \\(\\tilde{\\gamma}\\). Note that in the expression of \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\), all values are fixed for a fixed dataset, while \\(0 \\leq \\epsilon_{\\mathrm{tr}} \\leq r - 2\\tilde{\\gamma}_{\\mathrm{max}}\\) can be chosen. First note that for a test distribution \\(\\mathbb{P}_r\\), the \\(\\epsilon_{\\mathrm{te}}\\)-robust accuracy, defined as one minus the robust error (Equation 1), for a classifier associated with a vector \\(\\theta\\), can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.321, + 0.841, + 0.377 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {A c c} (\\theta ; \\epsilon_ {\\mathrm {t e}}) = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{\\min _ {x ^ {\\prime} \\in T (X; \\epsilon_ {\\mathrm {t e}})} Y \\theta^ {\\top} x ^ {\\prime} > 0 \\right\\} \\right] \\tag {15} \\\\ = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\{Y \\theta^ {\\top} X - \\epsilon_ {\\mathrm {t e}} \\theta_ {[ 1 ]} > 0 \\} \\right] = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\{Y \\theta^ {\\top} (X - Y \\epsilon_ {\\mathrm {t e}} \\operatorname {s i g n} (\\theta_ {[ 1 ]}) e _ {1}) > 0 \\} \\right] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.825, + 0.428 + ], + "angle": 0, + "content": "Now, recall that by Equation 14 and the assumption in the theorem, we have \\(r - 2\\epsilon_{\\mathrm{tr}} > 0\\), so that \\(\\mathrm{sign}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}) = 1\\). Further, using the definition of the \\(T(\\epsilon_{\\mathrm{tr}};x)\\) in Equation 3 and by definition of the distribution \\(\\mathbb{P}_r\\), we have \\(X_{[1]} = Y\\frac{r}{2}\\). Plugging into Equation 15 then yields" + }, + { + "type": "equation", + "bbox": [ + 0.273, + 0.435, + 0.72, + 0.51 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} ^ {\\top} \\left(X - Y \\epsilon_ {\\mathrm {t e}} e _ {1}\\right) > 0 \\right\\} \\right] \\\\ = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}} \\top} \\left(X _ {- 1} + Y \\left(\\frac {r}{2} - \\epsilon_ {\\mathrm {t e}}\\right) e _ {1}\\right) > 0 \\right\\} \\right] \\\\ = \\mathbb {P} _ {r - 2 \\epsilon_ {\\mathrm {t e}}} (Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}} \\top} X > 0) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.516, + 0.827, + 0.559 + ], + "angle": 0, + "content": "where \\(X_{-1}\\) is a shorthand for the random vector \\(X_{-1} = (0; X_{[2]}, \\ldots, X_{[d]})\\). The assumptions in Lemma A.1 (\\(D_{\\epsilon_{\\mathrm{tr}}}\\) is linearly separable) are satisfied whenever the \\(n < d - 1\\) samples are distinct, i.e. with probability one. Hence applying Lemma A.1 with \\(r_{\\mathrm{test}} = r - 2\\epsilon_{\\mathrm{te}}\\) and \\(r = r - 2\\epsilon_{\\mathrm{tr}}\\) yields" + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.565, + 0.825, + 0.598 + ], + "angle": 0, + "content": "\\[\n\\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = \\Phi \\left(\\frac {r (r - 2 \\epsilon_ {\\mathrm {t e}})}{4 \\sigma \\tilde {\\gamma}} - \\epsilon_ {\\mathrm {t r}} \\frac {r - 2 \\epsilon_ {\\mathrm {t e}}}{2 \\sigma \\tilde {\\gamma}}\\right). \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.825, + 0.645 + ], + "angle": 0, + "content": "Theorem statement a) then follows by noting that \\(\\Phi\\) is a monotonically decreasing function in \\(\\epsilon_{\\mathrm{tr}}\\). The expression for the robust error then follows by noting that \\(1 - \\Phi(-z) = \\Phi(z)\\) for any \\(z \\in \\mathbb{R}\\) and defining" + }, + { + "type": "equation", + "bbox": [ + 0.446, + 0.646, + 0.825, + 0.677 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\varphi} = \\frac {\\sigma \\tilde {\\gamma}}{r / 2 - \\epsilon_ {\\mathrm {t e}}}. \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.69, + 0.827, + 0.717 + ], + "angle": 0, + "content": "Proof of 2. First define \\(\\varphi_{\\mathrm{min}},\\varphi_{\\mathrm{max}}\\) using \\(\\tilde{\\gamma}_{\\mathrm{min}},\\tilde{\\gamma}_{\\mathrm{max}}\\) as in Equation 17. Then we have by Equation 16" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.722, + 0.687, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) - \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {\\mathrm {t e}}\\right) = \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {\\mathrm {t e}}\\right) - \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) \\\\ = \\Phi \\left(\\frac {r / 2}{\\tilde {\\varphi}}\\right) - \\Phi \\left(\\frac {r / 2 - \\epsilon_ {\\mathrm {t r}}}{\\tilde {\\varphi}}\\right) \\\\ = \\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\bar {\\varphi}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\bar {\\varphi} ^ {2}}} d x \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.856 + ], + "angle": 0, + "content": "By plugging in \\( t = \\sqrt{\\frac{2\\log 2 / \\delta}{n}} \\) in Lemma A.2, we obtain that with probability at least \\( 1 - \\delta \\) we have" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.863, + 0.848, + 0.906 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\gamma} _ {\\min } := \\sigma \\left[ \\sqrt {\\frac {d - 1}{n}} - \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right) \\right] \\leq \\tilde {\\gamma} \\leq \\sigma \\left[ \\sqrt {\\frac {d - 1}{n}} + \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right) \\right] =: \\tilde {\\gamma} _ {\\max }\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.91, + 0.407, + 0.926 + ], + "angle": 0, + "content": "and equivalently \\(\\varphi_{\\mathrm{min}}\\leq \\tilde{\\varphi}\\leq \\varphi_{\\mathrm{max}}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.102, + 0.825, + 0.141 + ], + "angle": 0, + "content": "Now note the general fact that for all \\(\\tilde{\\varphi} \\leq \\sqrt{2} x\\) the density function \\(f(\\tilde{\\varphi};x) = \\frac{1}{\\sqrt{2\\pi}\\tilde{\\varphi}}\\mathbb{E}^{-\\frac{x^2}{\\tilde{\\varphi}^2}}\\) is monotonically increasing in \\(\\tilde{\\varphi}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.148, + 0.826, + 0.179 + ], + "angle": 0, + "content": "By assumption of the theorem, \\(\\tilde{\\varphi} \\leq \\sqrt{2}(r/2 - \\epsilon_{\\mathrm{tr}})(r/2 - \\epsilon_{\\mathrm{te}})\\) so that \\(f(\\tilde{\\varphi}; x) \\geq f(\\varphi_{\\min}; x)\\) for all \\(x \\in [r/2 - \\epsilon_{\\mathrm{tr}}, r/2]\\) and therefore" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.183, + 0.79, + 0.22 + ], + "angle": 0, + "content": "\\[\n\\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\tilde {\\varphi}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\tilde {\\varphi} ^ {2}}} d x \\geq \\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\varphi_ {\\mathrm {m i n}}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\tilde {\\varphi} ^ {2}}} d x = \\Phi \\left(\\frac {r / 2}{\\varphi_ {\\mathrm {m i n}}}\\right) - \\Phi \\left(\\frac {r / 2 - \\epsilon_ {\\mathrm {t r}}}{\\varphi_ {\\mathrm {m i n}}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.224, + 0.361, + 0.239 + ], + "angle": 0, + "content": "and the statement is proved." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.255, + 0.41, + 0.269 + ], + "angle": 0, + "content": "A.2 PROOF OF COROLLARY 3.2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.28, + 0.825, + 0.32 + ], + "angle": 0, + "content": "We now show that Theorem 3.1 also holds for \\(\\ell_1\\)-ball perturbations with at most radius \\(\\epsilon\\). Following similar steps as in Equation 14, the \\(\\epsilon_{\\mathrm{tr}}\\)-robust max-margin solution for \\(\\ell_1\\)-perturbations can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.32, + 0.826, + 0.348 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\theta^ {\\top} \\left(x _ {i} - y _ {i} \\epsilon_ {\\mathrm {t r}} \\operatorname {s i g n} \\left(\\theta_ {[ j ^ {\\star} (\\theta) ]}\\right) e _ {j ^ {\\star} (\\theta)}\\right) \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.35, + 0.827, + 0.426 + ], + "angle": 0, + "content": "where \\( j^{\\star}(\\theta) \\coloneqq \\arg \\max_{j} |\\theta_{j}| \\) is the index of the maximum absolute value of \\( \\theta \\). We now prove by contradiction that the robust max-margin solution for this perturbation set 9 is equivalent to the solution 14 for the perturbation set 3. We start by assuming that \\( \\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}} \\) does not solve Equation 14, which is equivalent to assuming \\( 1 \\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}) \\) by definition. We now show how this assumption leads to a contradiction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.431, + 0.826, + 0.502 + ], + "angle": 0, + "content": "Define the shorthand \\( j^{\\star} \\coloneqq j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}) - 1 \\). Since \\( \\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}} \\) is the solution of 18, by definition, we have that \\( \\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}} \\) is also the max-margin solution of the shifted dataset \\( D_{\\epsilon_{\\mathrm{tr}}} \\coloneqq (x_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[j^{\\star} + 1]}e_{j^{\\star} + 1},y_i) \\). Further, note that by the assumption that \\( 1 \\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}) \\), this dataset \\( D_{\\epsilon_{\\mathrm{tr}}} \\) consists of input vectors \\( x_i = (y_i\\frac{r}{2},\\tilde{x}_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[j^{\\star} + 1]}e_{j^{\\star} + 1}) \\). Hence via Lemma A.1, \\( \\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}} \\) can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.505, + 0.826, + 0.54 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {r ^ {2} - 4 \\left(\\tilde {\\gamma} ^ {\\epsilon_ {\\mathrm {t r}}}\\right) ^ {2}}} [ r, 2 \\tilde {\\gamma} ^ {\\epsilon_ {\\mathrm {t r}}} \\tilde {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} ], \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.545, + 0.752, + 0.565 + ], + "angle": 0, + "content": "where \\(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) is the normalized max-margin solution of \\(\\widetilde{D} := (\\tilde{x}_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\tilde{\\theta}_{[j^\\star ]})e_{j^\\star},y_i)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.57, + 0.825, + 0.606 + ], + "angle": 0, + "content": "We now characterize \\(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\). Note that by assumption, \\(j^{\\star} = j^{\\star}(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}) = \\arg \\max_{j}|\\tilde{\\theta}_{[j]}^{\\epsilon_{\\mathrm{tr}}}|\\). Hence, the normalized max-margin solution \\(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) is the solution of" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.609, + 0.826, + 0.642 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\tilde {\\theta} \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\tilde {\\theta} ^ {\\top} \\tilde {x} _ {i} - \\epsilon_ {\\mathrm {t r}} | \\tilde {\\theta} _ {[ j ^ {\\star} ]} | \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.647, + 0.827, + 0.767 + ], + "angle": 0, + "content": "Observe that the minimum margin of this estimator \\(\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}} = \\min_{i\\in [n]}y_i(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}})^\\top \\tilde{x}_i - \\epsilon_{\\mathrm{tr}}|\\tilde{\\theta}_{[j^* ]}^{\\epsilon_{\\mathrm{tr}}}\\) decreases with \\(\\epsilon_{\\mathrm{tr}}\\) as the problem becomes harder \\(\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}\\leq \\tilde{\\gamma}\\), where the latter is equivalent to the margin of \\(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) for \\(\\epsilon_{\\mathrm{tr}} = 0\\). Since \\(r > 2\\tilde{\\gamma}_{\\max}\\) by assumption in the Theorem, by Lemma A.2 with probability at least \\(1 - 2\\mathbb{E}^{-\\frac{\\alpha^2(d - 1)}{n}}\\), we then have that \\(r > 2\\tilde{\\gamma}\\geq 2\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}\\). Given the closed form of \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) in Equation 19, it directly follows that \\(\\widehat{\\theta}_{[1]}^{\\epsilon_{\\mathrm{tr}}} = r > 2\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}\\| \\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\| _2 = \\| \\widehat{\\theta}_{[2:d]}^{\\epsilon_{\\mathrm{tr}}}\\| _2\\) and hence \\(1\\in j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})\\). This contradicts the original assumption \\(1\\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})\\) and hence we established that \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) for the \\(\\ell_1\\)-perturbation set 9 has the same closed form 14 as for the perturbation set 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.825, + 0.805 + ], + "angle": 0, + "content": "The final statement is proved by using the analogous steps as in the proof of 1. and 2. to obtain the closed form of the robust accuracy of \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.821, + 0.379, + 0.835 + ], + "angle": 0, + "content": "A.3 PROOF OF LEMMA A.1" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.845, + 0.441, + 0.862 + ], + "angle": 0, + "content": "We start by proving that \\(\\widehat{\\theta}\\) is of the form" + }, + { + "type": "equation", + "bbox": [ + 0.445, + 0.865, + 0.826, + 0.892 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} = \\left[ a _ {1}, a _ {2} \\tilde {\\theta} \\right], \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "for \\(a_1, a_2 > 0\\). Denote by \\(\\mathcal{H}(\\theta)\\) the plane through the origin with normal \\(\\theta\\). We define \\(d((x,y), \\mathcal{H}(\\theta))\\) as the signed euclidean distance from the point \\((x,y) \\in D \\sim \\mathbb{P}_r\\) to the plane \\(\\mathcal{H}(\\theta)\\). The signed" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.163 + ], + "angle": 0, + "content": "euclidean distance is the defined as the euclidean distance from \\( x \\) to the plane if the point \\( (x, y) \\) is correctly predicted by \\( \\theta \\), and the negative euclidean distance from \\( x \\) to the plane otherwise. We rewrite the definition of the max \\( l_{2} \\)-margin classifier. It is the classifier induced by the normalized vector \\( \\widehat{\\theta} \\), such that" + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.168, + 0.693, + 0.199 + ], + "angle": 0, + "content": "\\[\n\\max _ {\\theta \\in \\mathbb {R} ^ {d}} \\min _ {(x, y) \\in D} d \\left((x, y), \\mathcal {H} (\\theta)\\right) = \\min _ {(x, y) \\in D} d \\left(\\left(x, y\\right), \\mathcal {H} (\\widehat {\\theta})\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.203, + 0.568, + 0.219 + ], + "angle": 0, + "content": "We use that \\(D\\) is deterministic in its first coordinate and get" + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.224, + 0.707, + 0.281 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\max _ {\\theta} \\min _ {(x, y) \\in D} d \\left((x, y), \\mathcal {H} (\\theta)\\right) = \\max _ {\\theta} \\min _ {(x, y) \\in D} y \\left(\\theta_ {[ 1 ]} x _ {[ 1 ]} + \\tilde {\\theta} ^ {\\top} \\tilde {x}\\right) \\\\ = \\max _ {\\theta} \\theta_ {1} \\frac {r}{2} + \\min _ {(x, y) \\in D} y \\tilde {\\theta} ^ {\\top} \\tilde {x}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.827, + 0.34 + ], + "angle": 0, + "content": "Because \\( r > 0 \\), the maximum over all \\( \\theta \\) has \\( \\widehat{\\theta}_{[1]} \\geq 0 \\). Take any \\( a > 0 \\) such that \\( \\| \\widetilde{\\theta} \\|_2 = a \\). By definition the max \\( l_2 \\)-margin classifier, \\( \\widetilde{\\theta} \\), maximizes \\( \\min_{(x,y) \\in D} d((x,y), \\mathcal{H}(\\theta)) \\). Therefore, \\( \\widehat{\\theta} \\) is of the form of Equation 21." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.345, + 0.827, + 0.392 + ], + "angle": 0, + "content": "Note that all classifiers induced by vectors of the form of Equation 21 classify \\( D \\) correctly. Next, we aim to find expressions for \\( a_1 \\) and \\( a_2 \\) such that Equation 21 is the normalized max \\( l_2 \\)-margin classifier. The distance from any \\( x \\in D \\) to \\( \\mathcal{H}(\\widehat{\\theta}) \\) is" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.397, + 0.615, + 0.423 + ], + "angle": 0, + "content": "\\[\nd \\left(x, \\mathcal {H} (\\widehat {\\theta})\\right) = \\left| a _ {1} x _ {[ 1 ]} + a _ {2} \\tilde {\\theta} ^ {\\top} \\tilde {x} \\right|.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.43, + 0.678, + 0.455 + ], + "angle": 0, + "content": "Using that \\( x_{[1]} = y^{\\frac{r}{2}} \\) and that the second term equals \\( a_2 d\\left(x, \\mathcal{H}(\\tilde{\\theta})\\right) \\), we get" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.46, + 0.825, + 0.489 + ], + "angle": 0, + "content": "\\[\n\\left. d \\left(x, \\mathcal {H} (\\hat {\\theta})\\right) = \\left| a _ {1} \\frac {r}{2} + a _ {2} d \\left(x, \\mathcal {H} (\\tilde {\\theta})\\right) \\right| = a _ {1} \\frac {r}{2} + \\sqrt {1 - a _ {1} ^ {2}} d \\left(x, \\mathcal {H} (\\tilde {\\theta})\\right). \\right. \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.825, + 0.584 + ], + "angle": 0, + "content": "Let \\((\\tilde{x},y)\\in \\widetilde{D}\\) be the point closest in Euclidean distance to \\(\\tilde{\\theta}\\). This point is also the closest point in Euclidean distance to \\(\\mathcal{H}(\\widehat{\\theta})\\), because by Equation 22 \\(d\\left(x,\\mathcal{H}(\\widehat{\\theta})\\right)\\) is strictly decreasing for decreasing \\(d\\left(x,\\mathcal{H}(\\tilde{\\theta})\\right)\\). We maximize the minimum margin \\(d\\left(x,\\mathcal{H}(\\widehat{\\theta})\\right)\\) with respect to \\(a_1\\). Define the vectors \\(a = [a_1,a_2]\\) and \\(v = \\left[\\frac{r}{2},d\\left(x,\\mathcal{H}(\\tilde{\\theta})\\right)\\right]\\). We find using the dual norm that" + }, + { + "type": "equation", + "bbox": [ + 0.46, + 0.589, + 0.537, + 0.62 + ], + "angle": 0, + "content": "\\[\na = \\frac {v}{\\| v \\| _ {2}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.626, + 0.638, + 0.643 + ], + "angle": 0, + "content": "Plugging the expression of \\(a\\) into Equation 21 yields that \\(\\widehat{\\theta}\\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.648, + 0.591, + 0.682 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} = \\frac {1}{\\sqrt {r ^ {2} + 4 \\widetilde {\\gamma} ^ {2}}} \\left[ r, 2 \\widetilde {\\gamma} \\widetilde {\\theta} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.694, + 0.524, + 0.71 + ], + "angle": 0, + "content": "For the second part of the lemma we first decompose" + }, + { + "type": "equation", + "bbox": [ + 0.229, + 0.715, + 0.768, + 0.745 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} _ {r _ {\\mathrm {t e s t}}} (Y \\widehat {\\theta} ^ {\\top} X > 0) = \\frac {1}{2} \\mathbb {P} _ {r _ {\\mathrm {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X > 0 \\mid Y = 1 \\right] + \\frac {1}{2} \\mathbb {P} _ {r _ {\\mathrm {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X < 0 \\mid Y = - 1 \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.748, + 0.312, + 0.763 + ], + "angle": 0, + "content": "We can further write" + }, + { + "type": "equation", + "bbox": [ + 0.264, + 0.767, + 0.825, + 0.889 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X > 0 \\mid Y = 1 \\right] = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\sum_ {i = 2} ^ {d} \\widehat {\\theta} _ {[ i ]} X _ {[ i ]} > - \\widehat {\\theta} _ {[ 1 ]} X _ {[ 1 ]} \\mid Y = 1 \\right] \\tag {23} \\\\ = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ 2 \\tilde {\\gamma} \\sum_ {i = 1} ^ {d - 1} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} > - r \\frac {r _ {\\text {t e s t}}}{2} \\mid Y = 1 \\right] \\\\ = 1 - \\Phi \\left(- \\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right) = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.893, + 0.827, + 0.929 + ], + "angle": 0, + "content": "where \\(\\Phi\\) is the cumulative distribution function. The second equality follows by multiplying by the normalization constant on both sides and the third equality is due to the fact that \\(\\sum_{i=1}^{d-1} \\tilde{\\theta}_{[i]} X_{[i]}\\) is" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.133 + ], + "angle": 0, + "content": "a zero-mean Gaussian with variance \\(\\sigma^2\\|\\tilde{\\theta}\\|_2^2 = \\sigma^2\\) since \\(\\tilde{\\theta}\\) is normalized. Correspondingly we can write" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.14, + 0.826, + 0.196 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X < 0 \\mid Y = - 1 \\right] = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ 2 \\widetilde {\\gamma} \\sum_ {i = 1} ^ {d - 1} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} < - r \\left(- \\frac {r _ {\\text {t e s t}}}{2}\\right) \\mid Y = - 1 \\right] = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\widetilde {\\gamma}}\\right) \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.208, + 0.825, + 0.245 + ], + "angle": 0, + "content": "so that we can combine 23 and 23 and 24 to obtain \\(\\mathbb{P}_{r_{\\mathrm{test}}}(Y\\widehat{\\theta}^{\\top}X > 0) = \\Phi \\left(\\frac{r r_{\\mathrm{test}}}{4\\sigma\\widetilde{\\gamma}}\\right)\\). This concludes the proof of the lemma." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.262, + 0.381, + 0.276 + ], + "angle": 0, + "content": "A.4 PROOF OF LEMMA A.2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.827, + 0.359 + ], + "angle": 0, + "content": "The proof plan is as follows. We start from the definition of the max \\(\\ell_2\\)-margin of a dataset. Then, we rewrite the max \\(\\ell_2\\)-margin as an expression that includes a random matrix with independent standard normal entries. This allows us to prove the upper and lower bounds for the max-\\(\\ell_2\\)-margin in Sections A.4.1 and A.4.2 respectively, using non-asymptotic estimates on the singular values of Gaussian random matrices." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.365, + 0.613, + 0.382 + ], + "angle": 0, + "content": "Given the dataset \\(\\widetilde{D} = \\{(\\tilde{x}_i, y_i)\\}_{i=1}^n\\), we define the random matrix" + }, + { + "type": "equation", + "bbox": [ + 0.449, + 0.389, + 0.826, + 0.451 + ], + "angle": 0, + "content": "\\[\nX = \\left( \\begin{array}{c} \\tilde {x} _ {1} ^ {\\top} \\\\ \\tilde {x} _ {2} ^ {\\top} \\\\ \\dots \\\\ \\tilde {x} _ {n} ^ {\\top} \\end{array} \\right). \\tag {25}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.462, + 0.827, + 0.506 + ], + "angle": 0, + "content": "where \\(\\tilde{x}_i\\sim \\mathcal{N}(0,\\sigma I_{d - 1})\\). Let \\(\\mathcal{V}\\) be the class of all perfect predictors of \\(\\widetilde{D}\\). For a matrix \\(A\\) and vector \\(b\\) we also denote by \\(|Ab|\\) the vector whose entries correspond to the absolute values of the entries of \\(Ab\\). Then, by definition" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.514, + 0.826, + 0.54 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\gamma} = \\max _ {v \\in \\mathcal {V}, \\| v \\| _ {2} = 1} \\min _ {j \\in [ n ]} | X v | _ {[ j ]} = \\max _ {v \\in \\mathcal {V}, \\| v \\| _ {2} = 1} \\min _ {j \\in [ n ]} \\sigma | Q v | _ {[ j ]}, \\tag {26}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.45, + 0.566 + ], + "angle": 0, + "content": "where \\(Q = \\frac{1}{\\sigma} X\\) is the scaled data matrix." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.572, + 0.631, + 0.589 + ], + "angle": 0, + "content": "In the sequel we will use the operator norm of a matrix \\(A \\in \\mathbb{R}^{n \\times d - 1}\\)." + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.596, + 0.6, + 0.624 + ], + "angle": 0, + "content": "\\[\n\\| A \\| _ {2} = \\sup _ {v \\in \\mathbb {R} ^ {d - 1} | \\| v \\| _ {2} = 1} \\| A v \\| _ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.633, + 0.825, + 0.663 + ], + "angle": 0, + "content": "and denote the maximum singular value of a matrix \\( A \\) as \\( s_{\\max}(A) \\) and the minimum singular value as \\( s_{\\min}(A) \\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.679, + 0.336, + 0.693 + ], + "angle": 0, + "content": "A.4.1 UPPERBOUND" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.825, + 0.741 + ], + "angle": 0, + "content": "Given the maximality of the operator norm and since the minimum entry of the vector \\( |Qv| \\) must be smaller than \\( \\frac{\\|Q\\|_2}{\\sqrt{n}} \\), we can upper bound \\( \\tilde{\\gamma} \\) by" + }, + { + "type": "equation", + "bbox": [ + 0.44, + 0.748, + 0.556, + 0.781 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\gamma} \\leq \\sigma \\frac {1}{\\sqrt {n}} \\| Q \\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.827, + 0.822 + ], + "angle": 0, + "content": "Taking the expectation on both sides with respect to the draw of \\(\\widetilde{D}\\) and noting \\(\\| Q\\| _2\\leq s_{\\max}(Q)\\), it follows from Corollary 5.35 of Vershynin (2010) that for all \\(t\\geq 0\\)" + }, + { + "type": "equation", + "bbox": [ + 0.334, + 0.829, + 0.662, + 0.857 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} \\left[ \\sqrt {d - 1} + \\sqrt {n} + t \\geq s _ {\\max } (Q) \\right] \\geq 1 - 2 e ^ {- \\frac {t ^ {2}}{2}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.866, + 0.526, + 0.885 + ], + "angle": 0, + "content": "Therefore, with a probability greater than \\(1 - 2e^{-\\frac{t^2}{2}}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.403, + 0.894, + 0.594, + 0.93 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\gamma} \\leq \\sigma \\left(1 + \\frac {t + \\sqrt {d - 1}}{\\sqrt {n}}\\right).\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.342, + 0.119 + ], + "angle": 0, + "content": "A.4.2 LOWERBOUND" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.128, + 0.827, + 0.161 + ], + "angle": 0, + "content": "By the definition in Equation 26, if we find a vector \\( v \\in \\mathcal{V} \\) with \\( \\| v \\|_2 = 1 \\) such that for an \\( a > 0 \\), it holds that \\( \\min_{j \\in n} \\sigma |Xv|_{[j]} > a \\), then \\( \\tilde{\\gamma} > a \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.166, + 0.828, + 0.209 + ], + "angle": 0, + "content": "Recall the definition of the max-\\(\\ell_2\\)-margin as in Equation 25. As \\(n < d - 1\\), the random matrix \\(Q\\) is a wide matrix, i.e. there are more columns than rows and therefore the minimal singular value is 0. Furthermore, \\(Q\\) has rank \\(n\\) almost surely and hence for all \\(c > 0\\), there exists a \\(v \\in \\mathbb{R}^{d-1}\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.433, + 0.216, + 0.826, + 0.233 + ], + "angle": 0, + "content": "\\[\n\\sigma Q v = 1 _ {\\{n \\}} c > 0, \\tag {27}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.239, + 0.828, + 0.285 + ], + "angle": 0, + "content": "where \\(1_{\\{n\\}}\\) denotes the all ones vector of dimension \\(n\\). The smallest non-zero singular value of \\(Q\\), \\(s_{\\min, \\text{nonzero}}(Q)\\), equals the smallest non-zero singular value of its transpose \\(Q^{\\top}\\). Therefore, there also exists a \\(v \\in \\mathcal{V}\\) with \\(\\| v \\|_2 = 1\\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.346, + 0.291, + 0.826, + 0.323 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\gamma} \\geq \\min _ {j \\in [ n ]} \\sigma | Q v | _ {[ j ]} \\geq \\sigma s _ {\\min , \\text {n o n z e r o s}} \\left(Q ^ {\\top}\\right) \\frac {1}{\\sqrt {n}}, \\tag {28}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.33, + 0.826, + 0.391 + ], + "angle": 0, + "content": "where we used the fact that any vector \\( v \\) in the span of non-zero eigenvectors satisfies \\( \\| Qv\\| _2\\geq s_{\\min, \\text{nonzeros}}(Q) \\) and the existence of a solution \\( v \\) for any right-hand side as in Equation 27. Taking the expectation on both sides, Corollary 5.35 of Vershynin (2010) yields that with a probability greater than \\( 1 - 2e^{-\\frac{t^2}{2}} \\), \\( t\\geq 0 \\) we have" + }, + { + "type": "equation", + "bbox": [ + 0.403, + 0.399, + 0.826, + 0.434 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\gamma} \\geq \\sigma \\left(\\frac {\\sqrt {d - 1} - t}{\\sqrt {n}} - 1\\right). \\tag {29}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.45, + 0.555, + 0.466 + ], + "angle": 0, + "content": "B BOUNDS ON THE SUSCEPTIBILITY SCORE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.825, + 0.552 + ], + "angle": 0, + "content": "In Theorem 3.1, we give non-asymptotic bounds on the robust and standard error of a linear classifier trained with adversarial logistic regression. Moreover, we use the robust error decomposition in susceptibility and standard error to gain intuition about how adversarial training may hurt robust generalization. In this section, we complete the result of Theorem 3.1 by also deriving non-asymptotic bounds on the susceptibility score of the max \\(\\ell_2\\)-margin classifier." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.558, + 0.825, + 0.587 + ], + "angle": 0, + "content": "Using the results in Appendix A, we can prove following Corollary B.1, which gives non-asymptotic bounds on the susceptibility score." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.59, + 0.825, + 0.62 + ], + "angle": 0, + "content": "Corollary B.1. Assume \\( d - 1 > n \\). For the \\( \\epsilon_{te} \\)-susceptibility on test samples from \\( \\mathbb{P}_r \\) with \\( 2\\epsilon_{te} < r \\) and perturbation sets in Equation equation 3 and equation 9 the following holds:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.827, + 0.661 + ], + "angle": 0, + "content": "For \\(\\epsilon_{tr} < \\frac{r}{2} - \\tilde{\\gamma}_{\\mathrm{max}}\\), with probability at least \\(1 - 2\\mathbb{E}^{-\\frac{\\alpha^2(d - 1)}{2}}\\) for any \\(0 < \\alpha < 1\\), over the draw of a dataset \\(D\\) with \\(n\\) samples from \\(\\mathbb{P}_r\\), the \\(\\epsilon_{te}\\)-susceptibility is upper and lower bounded by" + }, + { + "type": "equation", + "bbox": [ + 0.259, + 0.666, + 0.825, + 0.707 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S u s c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) \\leq \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (\\epsilon_ {t e} - \\frac {r}{2})}{2 \\widetilde {\\gamma} _ {\\max } \\sigma}\\right) - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (- \\epsilon_ {t e} - \\frac {r}{2})}{2 \\widetilde {\\gamma} _ {\\min } \\sigma}\\right) \\tag {30}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.703, + 0.734, + 0.737 + ], + "angle": 0, + "content": "\\[\nS u s c (\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}) \\geq \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (\\epsilon_ {t e} - \\frac {r}{2})}{2 \\tilde {\\gamma} _ {\\min} \\sigma}\\right) - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (- \\epsilon_ {t e} - \\frac {r}{2})}{2 \\tilde {\\gamma} _ {\\max} \\sigma}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.828, + 0.89 + ], + "angle": 0, + "content": "We give the proof in Subsection B.1. Observe that the bounds on the susceptibility score in Corollary B.1 consist of two terms each, where the second term decreases with \\(\\epsilon_{\\mathrm{tr}}\\), but the first term increases. We recognise following two regimes: the max \\(\\ell_2\\)-margin classifier is close to the ground truth \\(e_1\\) or not. Clearly, the ground truth classifier has zero susceptibility and hence classifiers close to the ground truth also have low susceptibility. On the other hand, if the max \\(l_2\\)-margin classifier is not close to the ground truth, then putting less weight on the first coordinate increases invariance to the perturbations along the first direction. Recall that by Lemma A.1, increasing \\(\\epsilon_{\\mathrm{tr}}\\), decreases the weight on the first coordinate of the max \\(\\ell_2\\)-margin classifier. Furthermore, in the low sample size regime, we are likely not close to the ground truth. Therefore, the regime where the susceptibility decreases with increasing \\(\\epsilon_{\\mathrm{tr}}\\) dominates in the low sample size regime." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "To confirm the result of Corollary B.1, we plot the mean and standard deviation of the susceptibility score of 5 independent experiments. The results are depicted in Figure 7. We see that for low standard" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.162 + ], + "angle": 0, + "content": "error, when the classifier is reasonably close to the optimal classifier, the susceptibility increases slightly with increasing adversarial budget. However, increasing the adversarial training budget, \\(\\epsilon_{\\mathrm{tr}}\\), further, causes the susceptibility score to drop greatly. Hence, we can recognize both regimes and validate that, indeed, the second regime dominates in the low sample size setting." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.177, + 0.41, + 0.191 + ], + "angle": 0, + "content": "B.1 PROOF OF COROLLARY B.1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.203, + 0.825, + 0.246 + ], + "angle": 0, + "content": "We proof the statement by bounding the robustness of a linear classifier. Recall that the robustness of a classifier is the probability that a classifier does not change its prediction under an adversarial attack. The susceptibility score is then given by" + }, + { + "type": "equation", + "bbox": [ + 0.383, + 0.252, + 0.825, + 0.271 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S u s c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = 1 - \\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right). \\tag {31}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.284, + 0.825, + 0.349 + ], + "angle": 0, + "content": "The proof idea is as follows: since the perturbations are along the first basis direction, \\(e_1\\), we compute the distance from the robust \\(l_2\\)-max margin \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) to a point \\((X,Y)\\sim \\mathbb{P}\\). Then, we note that the robustness of \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) is given by the probability that the distance along \\(e_1\\), from \\(X\\) to the decision plane induced by \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) is greater than \\(\\epsilon_{\\mathrm{te}}\\). Lastly, we use the non-asymptotic bounds of Lemma A.2." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.355, + 0.628, + 0.37 + ], + "angle": 0, + "content": "Recall, by Lemma A.1, the max \\(l_{2}\\)-margin classifier is of the form of" + }, + { + "type": "equation", + "bbox": [ + 0.351, + 0.375, + 0.823, + 0.41 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right]. \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.416, + 0.825, + 0.447 + ], + "angle": 0, + "content": "Let \\((X,Y)\\sim \\mathbb{P}\\). The distance along \\(e_1\\) from \\(X\\) to the decision plane induced by \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\), \\(\\mathcal{H}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})\\), is given by" + }, + { + "type": "equation", + "bbox": [ + 0.349, + 0.447, + 0.649, + 0.489 + ], + "angle": 0, + "content": "\\[\nd _ {e _ {1}} (X, \\mathcal {H} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}})) = \\left| X _ {[ 1 ]} + \\frac {1}{\\widehat {\\theta} _ {[ 0 ]} ^ {\\epsilon_ {\\mathrm {t r}}}} \\sum_ {i = 2} ^ {d} \\widehat {\\theta} _ {[ i ]} ^ {\\epsilon_ {\\mathrm {t r}}} X _ {[ i ]} \\right|.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.494, + 0.54, + 0.51 + ], + "angle": 0, + "content": "Substituting the expression of \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) in Equation 32 yields" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.517, + 0.673, + 0.558 + ], + "angle": 0, + "content": "\\[\nd _ {e _ {1}} (X, \\mathcal {H} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}})) = \\left| X _ {[ 1 ]} + 2 \\tilde {\\gamma} \\frac {1}{(r - \\epsilon_ {\\mathrm {t r}})} \\sum_ {i = 2} ^ {d} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} \\right|.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.566, + 0.825, + 0.595 + ], + "angle": 0, + "content": "Let \\(N\\) be a standard normal distributed random variable. By definition \\(\\| \\tilde{\\theta}\\| _2^2 = 1\\) and using that a sum of Gaussian random variables is again a Gaussian random variable, we can write" + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.601, + 0.641, + 0.635 + ], + "angle": 0, + "content": "\\[\nd _ {e _ {1}} \\left(X, \\mathcal {H} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}\\right)\\right) = \\left| X _ {[ 1 ]} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{\\left(r - \\epsilon_ {\\mathrm {t r}}\\right)} N \\right|.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.641, + 0.825, + 0.674 + ], + "angle": 0, + "content": "The robustness of \\(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\) is given by the probability that \\(d_{e_1}(X,\\mathcal{H}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})) > \\epsilon_{\\mathrm{te}}\\). Hence, using that \\(X_{1} = \\pm \\frac{r}{2}\\) with probability \\(\\frac{1}{2}\\), we get" + }, + { + "type": "equation", + "bbox": [ + 0.226, + 0.679, + 0.825, + 0.715 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = P \\left[ \\frac {r}{2} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{(r - 2 \\epsilon_ {\\mathrm {t r}})} N > \\epsilon_ {\\mathrm {t e}} \\right] + P \\left[ \\frac {r}{2} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{(r - \\epsilon_ {\\mathrm {t r}})} N < - \\epsilon_ {\\mathrm {t e}} \\right]. \\tag {33}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.238, + 0.736, + 0.497, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.862, + 0.49, + 0.875 + ], + "angle": 0, + "content": "(a) Susceptibility score decreases with \\(\\epsilon_{\\mathrm{tr}}\\)" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.735, + 0.763, + 0.857 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.537, + 0.862, + 0.724, + 0.875 + ], + "angle": 0, + "content": "(b) Robust error decomposition" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.887, + 0.826, + 0.94 + ], + "angle": 0, + "content": "Figure 7: We set \\( r = 6 \\), \\( d = 1000 \\), \\( n = 50 \\) and \\( \\epsilon_{\\mathrm{te}} = 2.5 \\). (a) The average susceptibility score and the standard deviation over 5 independent experiments. Note how the bounds closely predict the susceptibility score. (b) For comparison, we also plot the robust error decomposition in susceptibility and standard error. Even though the susceptibility decreases, the robust error increases with increasing adversarial budget \\( \\epsilon_{\\mathrm{tr}} \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.437, + 0.12 + ], + "angle": 0, + "content": "We can rewrite Equation 33 in the form" + }, + { + "type": "equation", + "bbox": [ + 0.228, + 0.129, + 0.77, + 0.164 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = P \\left[ N > \\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) \\left(\\epsilon_ {\\mathrm {t e}} - \\frac {r}{2}\\right)}{2 \\widetilde {\\gamma} \\sigma} \\right] + P \\left[ N < \\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) \\left(- \\epsilon_ {\\mathrm {t e}} - \\frac {r}{2}\\right)}{2 \\widetilde {\\gamma} \\sigma} \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.173, + 0.825, + 0.202 + ], + "angle": 0, + "content": "Recall, that \\(N\\) is a standard normal distributed random variable and denote by \\(\\Phi\\) the cumulative standard normal density. By definition of the cumulative density function, we find that" + }, + { + "type": "equation", + "bbox": [ + 0.244, + 0.211, + 0.753, + 0.247 + ], + "angle": 0, + "content": "\\[\n\\mathrm {R o b} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}) = 1 - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) (\\epsilon_ {\\mathrm {t e}} - \\frac {r}{2})}{2 \\widetilde {\\gamma} \\sigma}\\right) + \\Phi \\left(\\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) (- \\epsilon_ {\\mathrm {t e}} - \\frac {r}{2})}{2 \\widetilde {\\gamma} \\sigma}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.255, + 0.825, + 0.285 + ], + "angle": 0, + "content": "Substituting the bounds on \\(\\tilde{\\gamma}\\) of Lemma A.2 gives us the non-asymptotic bounds on the robustness score and by Equation 31 also on the susceptibility score." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.308, + 0.621, + 0.323 + ], + "angle": 0, + "content": "C EXPERIMENTAL DETAILS ON THE LINEAR MODEL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.668, + 0.357 + ], + "angle": 0, + "content": "In this section, we provide detailed experimental details to Figures 3 and 4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.362, + 0.828, + 0.435 + ], + "angle": 0, + "content": "We implement adversarial logistic regression using stochastic gradient descent with a learning rate of 0.01. Note that logistic regression converges logarithmically to the robust max \\( l_{2} \\)-margin solution. As a consequence of the slow convergence, we train for up to \\( 10^{7} \\) epochs. Both during training and test time we solve \\( \\max_{x_i' \\in T(x_i; \\epsilon_{\\mathrm{tr}})} L(f_\\theta(x_i') y_i) \\) exactly. Hence, we exactly measure the robust error. Unless specified otherwise, we set \\( \\sigma = 1 \\), \\( r = 12 \\) and \\( \\epsilon_{\\mathrm{te}} = 4 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.828, + 0.564 + ], + "angle": 0, + "content": "Experimental details on Figure 3 (a) We draw 5 datasets with \\( n = 50 \\) samples and input dimension \\( d = 1000 \\) from the distribution \\( \\mathbb{P} \\). We then run adversarial logistic regression on all 5 datasets with adversarial training budgets, \\( \\epsilon_{\\mathrm{tr}} = 1 \\) to 5. To compute the resulting robust error gap of all the obtained classifiers, we use a test set of size \\( 10^{6} \\). Lastly, we compute the lower bound given in part 2. of Theorem 3.1. (b) We draw 5 datasets with different sizes \\( n \\) between 50 and \\( 10^{4} \\). We take an input dimension of \\( d = 10^{4} \\) and plot the mean and standard deviation of the robust error after adversarial and standard logistic regression over the 5 samples.(c) We again draw 5 datasets for each \\( d / n \\) constellation and compute the robust error gap for each dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.581, + 0.825, + 0.625 + ], + "angle": 0, + "content": "Experimental details on Figure 4 For both (a) and (b) we set \\( d = 1000 \\), \\( \\epsilon_{\\mathrm{te}} = 4 \\), and vary the adversarial training budget \\( (\\epsilon_{\\mathrm{tr}}) \\) from 1 to 5. For every constellation of \\( n \\) and \\( \\epsilon_{\\mathrm{tr}} \\), we draw 10 datasets and show the average and standard deviation of the resulting robust errors. In (b), we set \\( n = 50 \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.648, + 0.684, + 0.665 + ], + "angle": 0, + "content": "D EXPERIMENTAL DETAILS ON THE WATERBIRDS DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.826, + 0.74 + ], + "angle": 0, + "content": "In this section, we discuss the experimental details and construction of the Waterbirds dataset in more detail. We also provide ablation studies of attack parameters such as the size of the motion blur kernel, plots of the robust error decomposition with increasing \\( n \\), and some experiments using early stopping." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.758, + 0.411, + 0.772 + ], + "angle": 0, + "content": "D.1 THE WATERBIRDS DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.829, + 0.926 + ], + "angle": 0, + "content": "To build the Waterbirds dataset, we use the CUB-200 dataset Welinder et al. (2010), which contains images and labels of 200 bird species, and 4 background classes (forest, jungle/bamboo, water ocean, water lake natural) of the Places dataset Zhou et al. (2017). The aim is to recognize whether or not the bird, in a given image, is a waterbird (e.g. an albatros) or a landbird (e.g. a woodpecker). To create the dataset, we randomly sample equally many water- as landbirds from the CUB-200 dataset. Thereafter, we sample for each bird image a random background image. Then, we use the segmentation provided in the CUB-200 dataset to segment the birds from their original images and paste them onto the randomly sampled backgrounds. The resulting images have a size of \\(256 \\times 256\\). Moreover, we also resize the segmentations such that we have the correct segmentation profiles of the birds in the new dataset as well. For the concrete implementation, we use the code provided by Sagawa et al. (2020)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.101, + 0.306, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.209, + 0.203, + 0.281, + 0.217 + ], + "angle": 0, + "content": "(a) Original" + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.101, + 0.435, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.338, + 0.203, + 0.406, + 0.216 + ], + "angle": 0, + "content": "(b) \\(M = 5\\)" + }, + { + "type": "image", + "bbox": [ + 0.437, + 0.101, + 0.561, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.203, + 0.536, + 0.216 + ], + "angle": 0, + "content": "(c) \\(M = 10\\)" + }, + { + "type": "image", + "bbox": [ + 0.563, + 0.102, + 0.688, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.589, + 0.203, + 0.663, + 0.216 + ], + "angle": 0, + "content": "(d) \\(M = 15\\)" + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.102, + 0.815, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.716, + 0.203, + 0.791, + 0.216 + ], + "angle": 0, + "content": "(e) \\(M = 20\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.229, + 0.825, + 0.281 + ], + "angle": 0, + "content": "Figure 8: An ablation study of the motion blur kernel size, which corresponds to the severity level of the blur. For increasing \\( M \\), the severity of the motion blur increases. In particular, note that for \\( M = 15 \\) and even \\( M = 20 \\), the bird remains recognizable: we do not semantically change the class, i.e. the perturbations are consistent." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.324, + 0.456, + 0.338 + ], + "angle": 0, + "content": "D.2 EXPERIMENTAL TRAINING DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.357, + 0.827, + 0.51 + ], + "angle": 0, + "content": "Following the example of Sagawa et al. (2020), we use a ResNet50 or ResNet18 pretrained on the ImageNet dataset for all experiments in the main text, a weight-decay of \\(10^{-4}\\), and train for 300 epochs using the Adam optimizer. Extensive fine-tuning of the learning rate resulted in an optimal learning rate of 0.006 for all experiments using the adversarial illumination attack and a pretrained ResNet50. For the experiments considering the adversarial illumination attack using a pretrained VGG19 or Densenet121 network, we found optimal learning rates of 0.001 and 0.002 respectively. Lastly, we found that for all experiments using the motion blur attack a learning rate of 0.0011 was optimal. Adversarial training is implemented as suggested in Madry et al. (2018): at each iteration we find the worst case perturbation with an exact or approximate method. In all our experiments, the resulting classifier interpolates the training set. We plot the mean over all runs and the standard deviation of the mean." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.546, + 0.506, + 0.56 + ], + "angle": 0, + "content": "D.3 SPECIFICS TO THE MOTION BLUR ATTACK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.58, + 0.825, + 0.664 + ], + "angle": 0, + "content": "Fast moving objects or animals are hard to photograph due to motion blur. Hence, when trying to classify or detect moving objects from images, it is imperative that the classifier is robust against reasonable levels of motion blur. We implement the attack as follows. First, we segment the bird from the original image, then use a blur filter and lastly, we paste the blurred bird back onto the background. We are able to apply more severe blur, by enlarging the kernel of the filter. See Figure 8 for an ablation study of the kernel size." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.825, + 0.755 + ], + "angle": 0, + "content": "The motion blur filter is implemented as follows. We use a kernel of size \\( M \\times M \\) and build the filter as follows: we fill the row \\( (M - 1)/2 \\) of the kernel with the value \\( 1 / M \\). Thereafter, we use the 2D convolution implementation of OpenCV (filter2D) Bradski (2000) to convolve the kernel with the image. Note that applying a rotation before the convolution to the kernel, changes the direction of the resulting motion blur. Lastly, we find the most detrimental level of motion blur using a list-search over all levels up to \\( M_{max} \\)." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.793, + 0.268, + 0.864 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.868, + 0.262, + 0.881 + ], + "angle": 0, + "content": "(a) \\(\\epsilon = -0.3\\)" + }, + { + "type": "image", + "bbox": [ + 0.268, + 0.794, + 0.359, + 0.864 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.275, + 0.869, + 0.355, + 0.881 + ], + "angle": 0, + "content": "(b) \\(\\epsilon = -0.2\\)" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.794, + 0.451, + 0.864 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.869, + 0.447, + 0.882 + ], + "angle": 0, + "content": "(c) \\(\\epsilon = -0.1\\)" + }, + { + "type": "image", + "bbox": [ + 0.453, + 0.793, + 0.544, + 0.864 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.869, + 0.535, + 0.882 + ], + "angle": 0, + "content": "(d) Original" + }, + { + "type": "image", + "bbox": [ + 0.545, + 0.793, + 0.636, + 0.864 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.558, + 0.869, + 0.623, + 0.881 + ], + "angle": 0, + "content": "(e) \\(\\epsilon = 0.1\\)" + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.793, + 0.728, + 0.864 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.65, + 0.869, + 0.716, + 0.881 + ], + "angle": 0, + "content": "(f) \\(\\epsilon = 0.2\\)" + }, + { + "type": "image", + "bbox": [ + 0.729, + 0.793, + 0.82, + 0.864 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.741, + 0.869, + 0.81, + 0.882 + ], + "angle": 0, + "content": "(g) \\(\\epsilon = 0.3\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.894, + 0.825, + 0.922 + ], + "angle": 0, + "content": "Figure 9: An ablation study of the different lighting changes of the adversarial illumination attack. Even though the directed attack perturbs the signal component in the image, the bird remains recognizable in all cases." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.101, + 0.391, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.205, + 0.337, + 0.218 + ], + "angle": 0, + "content": "(a) Robust error" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.109, + 0.601, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.204, + 0.554, + 0.218 + ], + "angle": 0, + "content": "(b) Standard error" + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.102, + 0.813, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.658, + 0.204, + 0.762, + 0.218 + ], + "angle": 0, + "content": "(c) Susceptibility" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.23, + 0.825, + 0.295 + ], + "angle": 0, + "content": "Figure 10: The robust error decomposition of the experiments depicted in Figure 10a. The plots depict the mean and standard deviation of the mean over several independent experiments. We see that, in comparison to standard training, the reduction in susceptibility for adversarial training is minimal in the low sample size regime. Moreover, the increase in standard error of adversarial training is quite severe, leading to an overall increase in robust error in the low sample size regime." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.321, + 0.615, + 0.334 + ], + "angle": 0, + "content": "D.4 SPECIFICS TO THE ADVERSARIAL ILLUMINATION ATTACK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.347, + 0.825, + 0.473 + ], + "angle": 0, + "content": "An adversary can hide objects using poor lightning conditions, which can for example arise from shadows or bright spots. To model poor lighting conditions on the object only (or targeted to the object), we use the adversarial illumination attack. The attack is constructed as follows: First, we segment the bird from their background. Then we apply an additive constant \\(\\epsilon\\) to the bird, where the absolute size of the constant satisfies \\(|\\epsilon| < \\epsilon_{\\mathrm{te}} = 0.3\\). Thereafter, we clip the values of the bird images to \\([0,1]\\), and lastly, we paste the bird back onto the background. See Figure 9 for an ablation of the parameter \\(\\epsilon\\) of the attack. It is non-trivial how to (approximately) find the worst perturbation. We find an approximate solution by searching over all perturbations with increments of size \\(\\epsilon_{\\mathrm{te}} / K_{\\mathrm{max}}\\). Denote by seg, the segmentation profile of the image \\(x\\). We consider all perturbed images in the form of" + }, + { + "type": "equation", + "bbox": [ + 0.253, + 0.479, + 0.741, + 0.511 + ], + "angle": 0, + "content": "\\[\nx _ {p e r t} = (1 - \\operatorname {s e g}) x + \\operatorname {s e g} \\left(x + \\epsilon \\frac {K}{K _ {\\max}} 1 _ {2 5 5 \\times 2 5 5}\\right), K \\in [ - K _ {\\max }, K _ {\\max } ].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.516, + 0.825, + 0.546 + ], + "angle": 0, + "content": "During training time we set \\( K_{max} = 16 \\) and therefore search over 33 possible images. During test time we search over 65 images (\\( K_{max} = 32 \\))." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.561, + 0.341, + 0.575 + ], + "angle": 0, + "content": "D.5 EARLY STOPPING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.587, + 0.825, + 0.727 + ], + "angle": 0, + "content": "In all our experiments on the Waterbirds dataset, a parameter search lead to an optimal weight-decay and learning rate of \\(10^{-4}\\) and 0.006 respectively. Another common regularization technique is early stopping, where one stops training on the epoch where the classifier achieves minimal robust error on a hold-out dataset. To understand if early stopping can mitigate the effect of adversarial training aggregating robust generalization in comparison to standard training, we perform the following experiment. On the Waterbirds dataset of size \\(n = 20\\) and considering the adversarial illumination attack, we compare standard training with early stopping and adversarial training \\((\\epsilon_{\\mathrm{tr}} = \\epsilon_{\\mathrm{te}} = 0.3)\\) with early stopping. Considering several independent experiments, early stopped adversarial training has an average robust error of 33.5 a early stopped standard training 29.1. Hence, early stopping does decrease the robust error gap, but does not close it." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.744, + 0.535, + 0.758 + ], + "angle": 0, + "content": "D.6 ERROR DECOMPOSITION WITH INCREASING \\( n \\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.825, + 0.854 + ], + "angle": 0, + "content": "In Figure 10a and 11a, we see that adversarial training hurts robust generalization in the small sample size regime. For completeness, we plot the robust error composition for adversarial and standard training in Figure 10. We see that in the low sample size regime, the drop in susceptibility that adversarial training achieves in comparison to standard training, is much lower than the increase in standard error. Conversely, in the high sample regime, the drop of susceptibility from adversarial training over standard training is much bigger than the increase in standard error." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.421, + 0.884 + ], + "angle": 0, + "content": "D.7 DIFFERENT ARCHITECTURES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.925 + ], + "angle": 0, + "content": "For completeness, we also performed similar experiments on the waterbirds dataset using the adversarial illumination attack with different network architectures as with the pretrained ResNet50" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.101, + 0.391, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.205, + 0.337, + 0.218 + ], + "angle": 0, + "content": "(a) Robust error" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.101, + 0.601, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.204, + 0.554, + 0.218 + ], + "angle": 0, + "content": "(b) Standard error" + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.103, + 0.813, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.658, + 0.204, + 0.762, + 0.218 + ], + "angle": 0, + "content": "(c) Susceptibility" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.23, + 0.825, + 0.294 + ], + "angle": 0, + "content": "Figure 11: The robust error decomposition of the experiments depicted in Figure 6. The plots depict the mean and standard deviation of the mean over several independent experiments. We see that, in comparison to standard training, the reduction in susceptibility for adversarial training is minimal in the low sample size regime. Moreover, the increase in standard error of adversarial training is quite severe, leading to an overall increase in robust error in the low sample size regime." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.322, + 0.825, + 0.365 + ], + "angle": 0, + "content": "network. In particular, we considered the following pretrained network architectures: VGG19 and Densenet121. See Figure 12 for the results. We observe that accuracies, adversarial training hurts in the low sample size regime, but helps when enough data is available." + }, + { + "type": "image", + "bbox": [ + 0.238, + 0.377, + 0.497, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.333, + 0.505, + 0.402, + 0.518 + ], + "angle": 0, + "content": "(a) VGG19" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.377, + 0.761, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.58, + 0.505, + 0.679, + 0.517 + ], + "angle": 0, + "content": "(b) Densenet121" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.531, + 0.825, + 0.582 + ], + "angle": 0, + "content": "Figure 12: The robust error of adversarial training and standard training with increasing sample size using the adversarial illumination attack with \\(\\epsilon_{\\mathrm{te}} = 0.3\\). We depict the mean and the standard deviation of the mean for multiple runs. Observe that across models, adversarial training hurts in the low sample size regime, but helps when enough samples are available." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.61, + 0.599, + 0.624 + ], + "angle": 0, + "content": "D.8 UNDIRECTED ATTACKS ON THE WATERBIRDS DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.637, + 0.825, + 0.679 + ], + "angle": 0, + "content": "In this section, we analyse adversarial training for \\(\\ell_2\\)-and \\(\\ell_{\\infty}\\)-ball perturbations in the small sample size regime. We observe that while adversarial training hurts standard generalization, it helps robust generalization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.696, + 0.827, + 0.78 + ], + "angle": 0, + "content": "Adversarial training with \\(\\ell_2\\)-balls We train and test with small \\(\\ell_2\\)-balls, \\(\\epsilon_{\\mathrm{te}} = 0.2\\), such that the networks trained with standard training achieve a non-zero robust accuracy and the networks trained with adversarial training achieve non-trivial standard accuracy. We see in Figure 13, that adversarial training with \\(\\ell_2\\)-balls hurts standard generalization while increasing robust generalization. Moreover, in Figure 14, we see that also in the very small sample size regime, adversarial training with increasing \\(\\epsilon_{\\mathrm{tr}}\\) increases the standard error, but reduces the susceptibility." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.796, + 0.827, + 0.839 + ], + "angle": 0, + "content": "Adversarial training with \\(\\ell_{\\infty}\\)-balls We also consider \\(\\ell_{\\infty}\\)-ball perturbation. We see in Figure 15 that even the smallest perturbation budget \\(\\epsilon_{\\mathrm{te}} = \\frac{2}{255}\\), standard training has robust error of 100 percent. On the other hand, adversarial training achieves low, but non-zero robust error." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Experimental details We use an ImageNet pretrained ResNet34 and train for 300 epochs. Moreover, for reliable robust error and susceptibility evaluation of the attacks we use AutoAttack Croce & Hein (2020). All networks were trained such that the network interpolates the training dataset and has low robust error with non-trivial standard error. For the networks trained using standard training we use a learning rate of 0.006 and for the networks trained with adversarial training we used a learning" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.101, + 0.391, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.239, + 0.205, + 0.337, + 0.218 + ], + "angle": 0, + "content": "(a) Robust error" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.101, + 0.603, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.204, + 0.554, + 0.218 + ], + "angle": 0, + "content": "(b) Standard error" + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.101, + 0.815, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.658, + 0.204, + 0.762, + 0.218 + ], + "angle": 0, + "content": "(c) Susceptibility" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.23, + 0.828, + 0.281 + ], + "angle": 0, + "content": "Figure 13: The robust error decomposition of adversarial training with \\(\\ell_2\\)-balls of size \\(\\epsilon_{\\mathrm{tr}} = 0.2\\) and test adversaries with \\(\\ell_2\\)-balls of size \\(\\epsilon_{\\mathrm{te}} = 0.2\\). The plots depict the mean and standard deviation of the mean over several independent experiments. We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers." + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.301, + 0.606, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.411, + 0.825, + 0.476 + ], + "angle": 0, + "content": "Figure 14: The robust error decomposition of adversarial training in function of \\(\\epsilon_{\\mathrm{tr}}\\) in the small sample size regime \\(n = 20\\). We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers with increasing \\(\\epsilon_{\\mathrm{tr}}\\). We take \\(n = 20\\) and consider test adversaries with \\(\\ell_2\\)-balls of size \\(\\epsilon_{\\mathrm{te}} = 0.2\\). The plots depict the mean and standard deviation of the mean over several independent experiments." + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.495, + 0.391, + 0.594 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.6, + 0.336, + 0.612 + ], + "angle": 0, + "content": "(a) Robust error" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.496, + 0.602, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.6, + 0.553, + 0.612 + ], + "angle": 0, + "content": "(b) Standard error" + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.496, + 0.815, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.599, + 0.762, + 0.613 + ], + "angle": 0, + "content": "(c) Susceptibility" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.624, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Figure 15: The robust error decomposition of adversarial training with \\(\\ell_{\\infty}\\)-balls of size \\(\\epsilon_{\\mathrm{tr}} = \\frac{2}{255}\\) and test adversaries with \\(\\ell_{\\infty}\\)-balls of size \\(\\epsilon_{\\mathrm{te}} = \\frac{2}{255}\\). The plots depict the mean and standard deviation of the mean over several independent experiments. We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.711, + 0.825, + 0.755 + ], + "angle": 0, + "content": "rate of \\(5 \\cdot 10^{-4}\\). We also trained with a weight decay of \\(10^{-4}\\), a batch size of 8 and a momentum of 0.9 for all networks. We train at least 3 networks for all settings and report the mean and standard deviation of the mean of the standard error, robust error and susceptibility over the three runs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.781, + 0.543, + 0.797 + ], + "angle": 0, + "content": "EXPERIMENTAL DETAILS ON CIFAR-10" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.817, + 0.825, + 0.846 + ], + "angle": 0, + "content": "In this section, we give the experimental details on the CIFAR-10-based experiments shown in Figures 1 and 17." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Subsampling CIFAR-10 In all our experiments we subsample CIFAR-10 to simulate the low sample size regime. We ensure that for all subsampled versions the number of samples of each class are equal. Hence, if we subsample to 500 training images, then each class has exactly 50 images, which are drawn uniformly from the \\(5k\\) training images of the respective class." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.203 + ], + "angle": 0, + "content": "Mask perturbation on CIFAR-10 On CIFAR-10, we consider the square black mask attack where the adversary can mask a square in the image of size \\(\\epsilon_{\\mathrm{te}} \\times \\epsilon_{\\mathrm{te}}\\) by setting the pixel values zero. To ensure that the mask cannot cover all the information about the true class in the image, we restrict the size of the masks to be at most \\(2 \\times 2\\), while allowing for all possible locations of the mask in the targeted image. For exact robust error evaluation, we perform a full grid search over all possible locations during test time. We show an example of a black-mask attack on each of the classes in CIFAR-10 in Figure 16." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.209, + 0.828, + 0.336 + ], + "angle": 0, + "content": "During training, a full grid search is computationally intractable so that we use an approximate attack similar to Wu et al. (2020) during training time: by identifying the \\( K = 16 \\) most promising mask locations with a heuristic as follows. First, we identify promising mask locations by analyzing the gradient, \\( \\nabla_{x}L(f_{\\theta}(x),y) \\), of the cross-entropy loss with respect to the input. Masks that cover part of the image where the gradient is large, are more likely to increase the loss. Hence, we compute the \\( K \\) mask locations \\( (i,j) \\), where \\( \\| \\nabla_{x}L(f_{\\theta}(x),y)_{[i:i + 2,j:j + 2]}\\| _1 \\) is the largest and take using a full list-search the mask that incurs the highest loss. Our intuition from the theory predicts that higher \\( K \\), and hence a more exact \"defense\", only increases the robust error of adversarial training, since the mask could then more efficiently cover important information about the class." + }, + { + "type": "image", + "bbox": [ + 0.238, + 0.345, + 0.761, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.397, + 0.825, + 0.425 + ], + "angle": 0, + "content": "Figure 16: We show an example of a mask perturbation for all 10 classes of CIFAR-10. Even though the attack occludes part of the images, a human can still easily classify all images correctly." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.446, + 0.553, + 0.628 + ], + "angle": 0, + "content": "Experimental training details For all our experiments on CIFAR-10, we adjusted the code provided by Phan (2021). As typically done for CIFAR-10, we augment the data with random cropping and horizontal flipping. For the experiments with results depicted in Figures 1 and 17, we use a ResNet18 network and train for 100 epochs. We tune the parameters learning rate and weight decay for low robust error. For standard standard training, we use a learning rate of 0.01 with equal weight decay. For adversarial training, we use a learning rate of 0.015 and a weight decay of \\(10^{-4}\\). We run each experiment three times for every dataset with different initialization seeds, and plot the average and standard deviation over the runs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.633, + 0.552, + 0.691 + ], + "angle": 0, + "content": "For the experiments in Figure 1 and 18 we use an attack strength of \\( K = 4 \\). Recall that we perform a full grid search at test time and hence have a good approximation of the robust accuracy and susceptibility score." + }, + { + "type": "image", + "bbox": [ + 0.563, + 0.462, + 0.822, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.56, + 0.595, + 0.827, + 0.658 + ], + "angle": 0, + "content": "Figure 17: The robust error decomposition in standard error and susceptibility for varying attack strengths \\( K \\). We see that the larger \\( K \\), the lower the susceptibility, but the higher the standard error." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.825, + 0.763 + ], + "angle": 0, + "content": "Increasing training attack strength We investigate the influence of the attack strength \\( K \\) on the robust error for adversarial training. We take \\( \\epsilon_{\\mathrm{tr}} = 2 \\) and \\( n = 500 \\) and vary \\( K \\). The results are depicted in Figure 17. We see that for increasing \\( K \\), the susceptibility decreases, but the standard error increases more severely, resulting in an increasing robust error." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.775, + 0.826, + 0.846 + ], + "angle": 0, + "content": "Robust error decomposition In Figure 1, we see that the robust error increases for adversarial training compared to standard training in the low sample size regime, but the opposite holds when enough samples are available. For completeness, we provide a full decomposition of the robust error in standard error and susceptibility for standard and adversarial training. We plot the decomposition in Figure 18." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.865, + 0.527, + 0.88 + ], + "angle": 0, + "content": "F STATIC HAND GESTURE RECOGNITION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "The goal of static hand gesture or posture recognition is to recognize hand gestures such as a pointing index finger or the okay-sign based on static data such as images Oudah et al. (2020); Yang et al." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.204 + ], + "angle": 0, + "content": "(2013). The current use of hand gesture recognition is primarily in the interaction between computers and humans Oudah et al. (2020). More specifically, typical practical applications can be found in the environment of games, assisted living, and virtual reality Mujahid et al. (2021). In the following, we conduct experiments on a hand gesture recognition dataset constructed by Mantecón et al. (2019), which consists of near-infrared stereo images obtained using the Leap Motion device. First, we crop or segment the images after which we use logistic regression for classification. We see that adversarial logistic regression deteriorates robust generalization with increasing \\(\\epsilon_{\\mathrm{tr}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.221, + 0.825, + 0.306 + ], + "angle": 0, + "content": "Static hand-gesture dataset We use the dataset made available by Mantecón et al. (2019). This dataset consists of near-infrared stereo images taken with the Leap Motion device and provides detailed skeleton data. We base our analysis on the images only. The size of the images is \\(640 \\times 240\\) pixels. The dataset consists of 16 classes of hand poses taken by 25 different people. We note that the variety between the different people is relatively wide; there are men and women with different posture and hand sizes. However, the different samples taken by the same person are alike." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.312, + 0.825, + 0.395 + ], + "angle": 0, + "content": "We consider binary classification between the index-pose and L-pose, and take as a training set 30 images of the users 16 to 25. This results in a training dataset of 300 samples. We show two examples of the training dataset in Figure 19, each corresponding to a different class. Observe that the near-infrared images darken the background and successfully highlight the hand-pose. As a test dataset, we take 10 images of each of the two classes from the users 1 to 10 resulting in a test dataset of size 200." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.416, + 0.825, + 0.472 + ], + "angle": 0, + "content": "**Cropping the dataset** To speed up training and ease the classification problem, we crop the images from a size of \\(640 \\times 240\\) to a size of \\(200 \\times 200\\). We crop the images using a basic image segmentation technique to stay as close as possible to real-world applications. The aim is to crop the images such that the hand gesture is centered within the cropped image." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.478, + 0.825, + 0.563 + ], + "angle": 0, + "content": "For every user in the training set, we crop an image of the L-pose and the index pose by hand. We call these images the training masks \\(\\{\\mathrm{masks}_i\\}_{i=1}^{20}\\). We note that the more a particular window of an image resembles a mask, the more likely that the window captures the hand gesture correctly. Moreover, the near-infrared images are such that the hands of a person are brighter than the surroundings of the person itself. Based on these two observations, we define the best segment or window, defined by the upper left coordinates \\((i,j)\\), for an image \\(x\\) as the solution to the following optimization problem:" + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.586, + 0.825, + 0.627 + ], + "angle": 0, + "content": "\\[\n\\underset {i \\in [ 4 4 0 ], j \\in [ 4 0 ]} {\\arg \\min } \\sum_ {l = 1} ^ {2 0} \\| \\operatorname {m a s k s} _ {l} - x _ {\\{i: i + 2 0 0, j: j + 2 0 0 \\}} \\| _ {2} ^ {2} - \\frac {1}{2} \\| x _ {\\{i + w, j + h \\}} \\| _ {1}. \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.636, + 0.827, + 0.679 + ], + "angle": 0, + "content": "Equation 34 is solved using a full grid search. We use the result to crop both training and test images. Upon manual inspection of the cropped images, close to all images were perfectly cropped. We replace the handful poorly cropped training images with hand-cropped counterparts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.698, + 0.825, + 0.727 + ], + "angle": 0, + "content": "Square-mask perturbations Since we use logistic regression, we perform a full grid search to find the best adversarial perturbation at training and test time. For completeness, the upper left coordinates" + }, + { + "type": "image", + "bbox": [ + 0.185, + 0.759, + 0.388, + 0.85 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.856, + 0.336, + 0.869 + ], + "angle": 0, + "content": "(a) Robust error" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.759, + 0.601, + 0.85 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.856, + 0.553, + 0.869 + ], + "angle": 0, + "content": "(b) Standard error" + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.753, + 0.812, + 0.85 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.658, + 0.856, + 0.762, + 0.87 + ], + "angle": 0, + "content": "(c) Susceptibility" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.882, + 0.825, + 0.92 + ], + "angle": 0, + "content": "Figure 18: The robust error decomposition in standard error and susceptibility of the subsampled datasets of CIFAR-10 after adversarial and standard training. For small sample size, adversarial training has higher robust error then standard training." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.101, + 0.437, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.239, + 0.371, + 0.253 + ], + "angle": 0, + "content": "(a) L pose" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.101, + 0.758, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.616, + 0.239, + 0.704, + 0.253 + ], + "angle": 0, + "content": "(b) Index pose" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.264, + 0.825, + 0.304 + ], + "angle": 0, + "content": "Figure 19: Examples of the original images of the considered hand-gestures. We recognize the \"L\"-sign in Figure 19a and the index sign in Figure 19b. Observe that the near-infrared images highlight the hand pose well and blends out much of the non-useful or noisy background." + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.317, + 0.379, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.236, + 0.45, + 0.353, + 0.464 + ], + "angle": 0, + "content": "(a) Cropped L pose" + }, + { + "type": "image", + "bbox": [ + 0.416, + 0.317, + 0.583, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.428, + 0.45, + 0.57, + 0.464 + ], + "angle": 0, + "content": "(b) Cropped index pose" + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.318, + 0.787, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.62, + 0.449, + 0.788, + 0.464 + ], + "angle": 0, + "content": "(c) Black-mask perturbation" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.475, + 0.825, + 0.503 + ], + "angle": 0, + "content": "Figure 20: Examples of the cropped hand-gesture images. We see that the hands are centered and the images have a size of \\(200 \\times 200\\). In Figure 20c we show an example of the square black-mask perturbation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.529, + 0.738, + 0.545 + ], + "angle": 0, + "content": "of the optimal black-mask perturbation of size \\(\\epsilon_{\\mathrm{tr}} \\times \\epsilon_{\\mathrm{tr}}\\) can be found as the solution to" + }, + { + "type": "equation", + "bbox": [ + 0.344, + 0.552, + 0.825, + 0.587 + ], + "angle": 0, + "content": "\\[\n\\arg \\max _ {i \\in [ 2 0 0 - \\epsilon_ {\\mathrm {t r}} ], j \\in [ 2 0 0 - \\epsilon_ {\\mathrm {t r}} ]} \\sum_ {l, m \\in [ \\epsilon_ {\\mathrm {t r}} ]} \\theta_ {[ i: i + l, j: j + m ]}. \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.593, + 0.825, + 0.622 + ], + "angle": 0, + "content": "The algorithm is rather slow as we iterate over all possible windows. We show a black-mask perturbation on an \\(L\\)-pose image in Figure 20c." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.637, + 0.825, + 0.68 + ], + "angle": 0, + "content": "Results We run adversarial logistic regression with square-mask perturbations on the cropped dataset and vary the adversarial training budget and plot the result in Figure 21. We observe attack that adversarial logistic regression deteriorates robust generalization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.686, + 0.825, + 0.766 + ], + "angle": 0, + "content": "Because we use adversarial logistic regression, we are able to visualize the classifier. Given the classifier induced by \\(\\theta\\), we can visualize how it classifies the images by plotting \\(\\frac{\\theta - \\min_{i\\in[d]}\\theta_{[i]}}{\\max_{i\\in[d]}\\theta_{[i]}}\\in [0,1]^d\\). Recall that the class-prediction of our predictor for a data point \\((x,y)\\) is given by \\(\\mathrm{sign}(\\theta^{\\top}x)\\in \\{\\pm 1\\}\\). The lighter parts of the resulting image correspond to the class with label 1 and the darker patches with the class corresponding to label \\(-1\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.551, + 0.926 + ], + "angle": 0, + "content": "We plot the classifiers obtained by standard logistic regression and adversarial logistic regression with training adversarial budgets \\(\\epsilon_{\\mathrm{tr}}\\) of 10 and 25 in Figure 22. The darker parts in the classifier correspond to patches that are typically bright for the \\(L\\)-pose. Complementary, the lighter patches in the classifier correspond to patches that are typically bright for the index pose. We see that in the case of adversarial logistic regression, the background noise is much higher than for standard logistic regression. In other words, adversarial logistic regression puts more weight on non-signal parts in the images to classify the" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.788, + 0.822, + 0.91 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.559, + 0.92, + 0.826, + 0.971 + ], + "angle": 0, + "content": "Figure 21: The standard error and robust error for varying adversarial training budget \\(\\epsilon_{\\mathrm{tr}}\\). We see that the larger \\(\\epsilon_{\\mathrm{tr}}\\) the higher the robust error." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.105, + 0.551, + 0.133 + ], + "angle": 0, + "content": "training dataset and hence exhibits worse performance on the test dataset." + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.143, + 0.38, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.263, + 0.274, + 0.327, + 0.288 + ], + "angle": 0, + "content": "(a) \\(\\epsilon_{\\mathrm{tr}} = 0\\)" + }, + { + "type": "image", + "bbox": [ + 0.416, + 0.143, + 0.583, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.274, + 0.536, + 0.288 + ], + "angle": 0, + "content": "(b) \\(\\epsilon_{\\mathrm{tr}} = 10\\)" + }, + { + "type": "image", + "bbox": [ + 0.623, + 0.143, + 0.788, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.669, + 0.275, + 0.741, + 0.288 + ], + "angle": 0, + "content": "(c) \\(\\epsilon_{\\mathrm{tr}} = 25\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.3, + 0.828, + 0.364 + ], + "angle": 0, + "content": "Figure 22: We visualize the logistic regression solutions. In Figure 22a we plot the vector that induces the classifier obtained after standard training. In Figure 22b and Figure 22c we plot the vector obtained after training with square-mask perturbations of size 10 and 25, respectively. We note the non-signal enhanced background correlations at the parts highlighted with the red circles in the image projection of the adversarially trained classifiers." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "29" + } + ] +] \ No newline at end of file diff --git a/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_origin.pdf b/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9ac4c4bdb9f3d7652feebf3c39938941ed1547a4 --- /dev/null +++ b/2023/Why adversarial training can hurt robust accuracy/b15d9063-140c-4e2d-a2bd-fd12553144a4_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f6d09f110ff6fbdeb57363e1dbe3654d3302f7dcff09c066afdfc9ef564e831 +size 19920812 diff --git a/2023/Why adversarial training can hurt robust accuracy/full.md b/2023/Why adversarial training can hurt robust accuracy/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ed30bb17266dd6058cccc181dcdeade42b62e2a4 --- /dev/null +++ b/2023/Why adversarial training can hurt robust accuracy/full.md @@ -0,0 +1,965 @@ +# WHY ADVERSARIAL TRAINING CAN HURT ROBUST ACCURACY + +Jacob Clarysse1, Julia Hörrmann2, Fanny Yang1 + +1. Department of Computer Science, ETH Zürich +2. Department of Mathematics, ETH Zürich + +{jacob.clarysse;fan.yang}@inf.ethz.ch; + +{julia.hoerrmann}@stat.math.ethz.ch + +# ABSTRACT + +Machine learning classifiers with high test accuracy often perform poorly under adversarial perturbations. It is commonly believed that adversarial training alleviates this issue. In this paper, we demonstrate that, surprisingly, the opposite can be true for a natural class of perceptible perturbations — even though adversarial training helps when enough data is available, it may in fact hurt robust generalization in the small sample size regime. We first prove this phenomenon for a high-dimensional linear classification setting with noiseless observations. Using intuitive insights from the proof, we could find perturbations on standard image datasets for which this behavior persists. Specifically, it occurs for perceptible perturbations that effectively reduce class information such as object occlusions or corruptions. + +# 1 INTRODUCTION + +Today's best-performing classifiers are vulnerable to adversarial attacks Goodfellow et al. (2015); Szegedy et al. (2014) and exhibit high robust error: for many inputs, their predictions change under adversarial perturbations, even though the true class stays the same. Such content-preserving (Gilmer et al., 2018), consistent (Raghunathan et al., 2020) attacks can be either perceptible or imperceptible. For image datasets, most work to date studies imperceptible attacks that are based on perturbations with limited strength or attack budget. These include bounded $\ell_p$ -norm perturbations (Goodfellow et al., 2015; Madry et al., 2018; Moosavi-Dezfooli et al., 2016), small transformations using image processing techniques (Ghiasi et al., 2019; Zhao et al., 2020; Laidlaw et al., 2021; Luo et al., 2018) or nearby samples on the data manifold (Lin et al., 2020; Zhou et al., 2020). Even though they do not visibly change the image by definition, imperceptible attacks can often successfully fool a learned classifier. + +![](images/db053b4d669625641de60d177fdcb522d14ad763796fe72449396ecab37d64da.jpg) +Figure 1: On the Waterbirds dataset attacked by the adversarial illumination attack, adversarial training (yellow) yields higher robust error than standard training (blue) when the sample size is small, even though it helps for large sample sizes and in a setting where the standard error of standard training is small. (see App. D for details). + +On the other hand, perturbations that naturally occur and are physically realizable are commonly perceptible. Some perceptible perturbations specifically target the object to be recognized: these include occlusions (e.g. stickers placed on traffic signs (Eykholt et al., 2018) or masks of different sizes that cover important features of human faces (Wu et al., 2020)) or corruptions that are caused by the image capturing process (animals that move faster than the shutter speed or objects that are not well-lit, see Figure 2). Others transform the whole image and are not confined to the object itself, such as rotations, translations or corruptions Engstrom et al. (2019); Kang et al. (2019). In this paper, we refer to such perceptible attacks as directed attacks. In contrast to other attacks, they effectively reduce useful class information in the input for any model, without necessarily changing the true label - we say that they are directed and consistent, more formally defined in Section 2. For example, a stop sign with a small sticker could partially cover the text without losing its semantic meaning. Similarly, a flying bird captured with a long exposure time can induce motion blur in the final image without becoming unrecognizable to the observer. + +![](images/20e801aeba267110296db82a839e1ddf1a7cf57b7ffc5ae8cbe7af8b242fe422.jpg) +(a) Masks + +![](images/173687da0046c68a3fb5cdcf13b159e5479d8229046b575ce294cf9d7e57370a.jpg) +(b) Original + +![](images/ed5c3950496d37e51c824df4b0ece66fb6baded47681d50b5abf1f39ae0d3bd7.jpg) +(c) Lighting +Figure 2: Examples of directed attacks on CIFAR10 and the Waterbirds dataset. In Figure 2a, we corrupt the image with a black mask of size $2 \times 2$ and in Figure 2c and 2d we change the lighting conditions (darkening) and apply motion blur on the bird in the image respectively. All perturbations reduce the information about the class in the images: they are the result of directed attacks. (e) Directed attacks are a subset of perceptible attacks. + +![](images/bead8d81be1b2f47ff023313e31b2aa7d1560cadc72723872a6c2696a8f3890a.jpg) +(d) Blur + +![](images/0df3fd1cecceec31c77a870c31c54cfecf5c5ced125048a3ff1b76430c1686c9.jpg) +(e) Classification of perturbations + +![](images/d0acf40bcb6c92928c178ce679671e9bff4201863a077344aa30cefc755687d2.jpg) + +In the literature so far, it is widely acknowledged that adversarial training with the same perturbation type and budget as during test time often achieves significantly lower robust error than standard training (Madry et al., 2018; Zhang et al., 2019; Bai et al., 2021). + +In contrast, we show that adversarial training not only increases standard error (Zhang et al., 2019; Tsipras et al., 2019; Stutz et al., 2019; Raghunathan et al., 2020), but surprisingly, in the low sample regime, + +adversarial training may even increase the robust error compared to standard training! + +Figure 1 illustrates the main message of our paper on the Waterbirds dataset: Although adversarial training with directed attacks outperforms standard training when enough training samples are available, it is inferior when the sample size is small (but still large enough to obtain a small standard test error). + +Our contributions are as follows: + +- We prove that, almost surely, adversarially training a linear classifier on separable data yields a monotonically increasing robust error as the perturbation budget grows. We further establish high-probability non-asymptotic lower bounds on the robust error gap between adversarial and standard training. +- Our proof provides intuition for why this lower bound on the gap is particularly large for directed attacks in the low sample regime. +- We observe empirically for different directed attacks on real-world image datasets that this behavior persists: adversarial training for directed attacks hurts robust accuracy when the sample size is small. + +# 2 ROBUST CLASSIFICATION + +We first introduce our robust classification setting more formally by defining the notions of adversarial robustness, directed attacks and adversarial training used throughout the paper. + +Adversarially robust classifiers For inputs $x \in \mathbb{R}^d$ , we consider multi-class classifiers associated with parameterized functions $f_{\theta}: \mathbb{R}^d \to \mathbb{R}^K$ if $K > 2$ and $f_{\theta}: \mathbb{R}^d \to \mathbb{R}$ if $K = 2$ , where $K$ is the number of labels. For example, $f_{\theta}(x)$ could be a linear model (as in Section 3) or a neural network (as in Section 4). The output label predictions are obtained by $h(f_{\theta}(x)) = \mathrm{sign}(f_{\theta}(x))$ for $K = 2$ and $h(f_{\theta}(x)) = \arg \max_{k \in \{1, \dots, K\}} f_{\theta}(x)_k$ for $K > 2$ . + +In order to convince practitioners to use machine learning models in the wild, it is key to demonstrate that they exhibit robustness. One kind of robustness is that they do not change prediction when the input is subject to consistent perturbations, which are small class-preserving perturbations. Mathematically speaking, for the underlying joint data distribution $\mathbb{P}$ , the model should have a small $\epsilon_{te}$ -robust error, defined as + +$$ +\operatorname {E r r} (\theta ; \epsilon_ {\mathrm {t e}}) := \mathbb {E} _ {(x, y) \sim \mathbb {P}} \max _ {x ^ {\prime} \in T (x; \epsilon_ {\mathrm {t e}})} \ell \left(f _ {\theta} \left(x ^ {\prime}\right), y\right), \tag {1} +$$ + +where $\ell$ is 0 if the class determined by $h(f_{\theta}(x))$ is equal to $y$ and 1 otherwise. Further, $T(x;\epsilon_{te})$ indicates a perturbation set around $x$ of a certain transformation type with size $\epsilon_{test}$ . Note that + +the (standard) error $\mathbb{E}_{(x,y)\sim \mathbb{P}}\ell (f_{\theta}(x),y)$ of a classifier corresponds to $\mathrm{Err}(\theta ;0)$ - the robust error evaluated at $\epsilon_{\mathrm{te}} = 0$ + +Directed attacks The inner maximization in Equation 1 is often called the adversarial attack of the input $x$ for the model $f_{\theta}$ and the corresponding solution is referred to as the adversarial example. In this paper, we consider directed attacks that effectively reduce the information about the true classes, with image-based examples depicted in Figure 2. For linear classification, we analyze directed attacks in the form of additive perturbations that are constrained to the direction of the optimal decision boundary (see details in Section 3.1). In particular, note that the set of directed perturbations is restricted to directions attacking the Bayes optimal classifier. + +Adversarial training A common approach to obtain classifiers with a good robust accuracy is to minimize the training objective $\mathcal{L}_{\epsilon_{\mathrm{tr}}}$ with a surrogate robust classification loss $L$ + +$$ +\mathcal {L} _ {\epsilon_ {\mathrm {t r}}} (\theta) := \frac {1}{n} \sum_ {i = 1} ^ {n} \max _ {x _ {i} ^ {\prime} \in T \left(x _ {i}; \epsilon_ {\mathrm {t r}}\right)} L \left(f _ {\theta} \left(x _ {i} ^ {\prime}\right) y _ {i}\right), \tag {2} +$$ + +also called adversarial training. In practice, we often use the cross entropy loss $L(z) = \log (1 + e^{-z})$ and minimize the robust objective by using first order optimization methods such as (stochastic) gradient descent. SGD is also the algorithm that we focus on in both the theoretical and experimental sections. When the desired type of robustness is known in advance, it is standard practice to use the same perturbation set for training as for testing, i.e. $T(x;\epsilon_{\mathrm{tr}}) = T(x;\epsilon_{\mathrm{te}})$ . For example, Madry et al. (2018) show that the robust error sharply increases for $\epsilon_{\mathrm{tr}} < \epsilon_{\mathrm{te}}$ . In this paper, we demonstrate that for directed attacks in the small sample size regime, in fact, the opposite is true. + +# 3 THEORETICAL RESULTS + +In this section, we prove for linear functions $f_{\theta}(x) = \theta^{\top}x$ that in the case of directed attacks, robust generalization deteriorates with increasing $\epsilon_{\mathrm{tr}}$ . The proof, albeit in a simple setting, provides explanations for why adversarial training fails in the high-dimensional regime for such attacks. + +# 3.1 SETTING + +We now introduce the precise linear setting used in our theoretical results. + +Data model We assume that the ground truth and hypothesis class are given by linear functions $f_{\theta}(x) = \theta^{\top}x$ and the sample size $n$ is lower than the ambient dimension $d$ minus one. The generative distribution $\mathbb{P}_r$ is similar to (Tsipras et al., 2019; Nagarajan & Kolter, 2019): The label $y \in \{+1, -1\}$ is drawn with equal probability and the covariate vector is sampled as $x = [y_{\frac{r}{2}}, \tilde{x}]$ with the random vector $\tilde{x} \in \mathbb{R}^{d-1}$ drawn from a standard normal distribution, i.e. $\tilde{x} \sim \mathcal{N}(0, \sigma^2 I_{d-1})$ . We would like to learn a classifier that has low robust error by using a dataset $D = (x_i, y_i)_{i=1}^n$ with $n$ i.i.d. samples from $\mathbb{P}_r$ . Intuitively, the separation distance $r$ reflects the signal strength of the data distribution. + +Notice that the distribution $\mathbb{P}_r$ is noiseless: for a given input $x$ , the label $y = \mathrm{sign}(x_{[1]})$ is deterministic. Further, the Bayes optimal linear classifier (also referred to as the ground truth) is parameterized by the first standard coordinate vector, $\theta^{\star} = e_1$ . By definition, the ground truth is robust against all perturbations that do not change the sign in the first coordinate of the sample, i.e. consistent perturbations, and hence so is the optimal robust classifier. + +Directed attacks In this paper, we focus on consistent directed attacks that by definition efficiently concentrate their attack budget to reduce the class information. For our linear setting this information lies in the first entry. Hence, we can model such attacks by additive perturbations in the first dimension + +$$ +T (x; \epsilon) = \left\{x ^ {\prime} = x + \delta \mid \delta = \beta e _ {1} \text {a n d} - \epsilon \leq \beta \leq \epsilon \right\}. \tag {3} +$$ + +Note that this attack is always in the direction of the signal dimension, i.e. the Bayes optimal classifier or equivalently the ground truth. Furthermore, when $\epsilon < \frac{r}{2}$ , it is a consistent directed attack. Observe how this is different from $\ell_p$ -attacks — an $\ell_p$ attack, depending on the model, may add a perturbation that only has a very small component in the signal direction. + +1Note that the result more generally holds for non-sparse models that are not axis aligned by way of a simple rotation $z = Ux$ . In that case the distribution is characterized by $\theta^{\star} = u_{1}$ , where $u_{1}$ is the first column vector of $U$ , and a rotated Gaussian in the $d - 1$ dimensions orthogonal to $\theta^{\star}$ . + +![](images/0119c40cdabdd7186fc2459084cebce9704f5969cd0ff3776576c1b7ada73052.jpg) +(a) Robust error increase with $\epsilon_{\mathrm{tr}}$ (b) Standard-adversarial training (c) Effect of overparameterization + +![](images/f3fa5b762288fef2b2c93a37e2bb3c5345351eff1a6e13f6490d5a706dae9852.jpg) +Figure 3: Experimental verification of Theorem 3.1. (a) We set $d = 1000$ , $r = 12$ and $n = 50$ . The robust error gap between standard and adversarial training as a function of the adversarial budget $\epsilon_{\mathrm{tr}} = 5$ independent experiments (blue) and the lower bound given in Theorem 3.1 (gray). In (b) and (c), we set $d = 10000$ and vary the number of samples $n$ . (b) The robust error of standard and adversarial training with $\epsilon_{\mathrm{tr}} = 4.5$ . (c) The error gap and the lower bound of Theorem 3.1. For more experimental details see Appendix C. + +![](images/a0e1558dbd11522644d5a3300bc9272536b8d0027c140ae0e2b01ff2764679d9.jpg) + +Robust max- $\ell_2$ -margin classifier We study a classifier that is the solution of running gradient descent on the adversarial logistic loss. A long line of work (Soudry et al., 2018; Ji & Telgarsky, 2019; Chizat & Bach, 2020; Nacson et al., 2019; Liu et al., 2020) studies the implicit bias of (S)GD on the (standard) logistic loss and separable data. In particular, they show directional convergence to the max-margin solution. For the adversarial logistic loss and linear models in particular, (S)GD converges to the robust max- $\ell_2$ -margin solution (Li et al., 2020), + +$$ +\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}} := \underset {\| \theta \| _ {2} \leq 1} {\arg \max } \underset {i \in [ n ], x _ {i} ^ {\prime} \in T (x _ {i}; \epsilon_ {\mathrm {t r}})} {\min } y _ {i} \theta^ {\top} x _ {i} ^ {\prime}. \tag {4} +$$ + +Even though our result is proven for the max- $\ell_2$ -margin classifier, it can easily be extended to other interpolators. + +# 3.2 MAIN RESULTS + +We are now ready to characterize the $\epsilon_{\mathrm{te}}$ -robust error as a function of $\epsilon_{\mathrm{tr}}$ , the separation $r$ , the dimension $d$ and sample size $n$ of the data. In the theorem statement we use the following quantities + +$$ +\varphi_ {\min } = \frac {\sigma}{r / 2 - \epsilon_ {\mathrm {t e}}} \left(\sqrt {\frac {d - 1}{n}} - \left(1 + \sqrt {\frac {2 \log (2 / \delta)}{n}}\right)\right) +$$ + +$$ +\varphi_ {\max } = \frac {\sigma}{r / 2 - \epsilon_ {\mathrm {t e}}} \left(\sqrt {\frac {d - 1}{n}} + \left(1 + \sqrt {\frac {2 \log (2 / \delta)}{n}}\right)\right) +$$ + +that arise from concentration bounds for the singular values of the random data matrix. Further, let $\tilde{\epsilon} := \frac{r}{2} - \frac{\varphi_{\max}}{\sqrt{2}}$ and denote by $\Phi$ the cumulative distribution function of a standard normal. + +Theorem 3.1. Assume $d - 1 > n$ . For test samples from $\mathbb{P}_r$ , perturbation set type $T$ as in Equation 3 and any $0 \leq \epsilon_{te} < \frac{r}{2}$ , the following holds for the $\epsilon_{te}$ -robust error of the classifier (Equation 1) resulting from $\epsilon_{tr}$ -adversarial training: + +1. The $\epsilon_{te}$ -robust error of the $\epsilon_{tr}$ -robust max-margin estimator reads + +$$ +\operatorname {E r r} \left(\widehat {\theta} ^ {\epsilon_ {t r}}; \epsilon_ {t e}\right) = \Phi \left(- \frac {\left(\frac {r}{2} - \epsilon_ {t r}\right)}{\tilde {\varphi}}\right) \tag {5} +$$ + +for a random quantity $\tilde{\varphi} > 0$ depending on $\sigma, r, \epsilon_{te}$ and is hence strictly increasing in the adversarial training budget $\epsilon_{tr}$ . + +2. With probability at least $1 - \delta$ , we further have $\varphi_{\min} \leq \tilde{\varphi} \leq \varphi_{\max}$ and the following lower bound on the robust error increase by adversarially training with size $\epsilon_{tr}$ + +$$ +\operatorname {E r r} \left(\widehat {\theta} ^ {\epsilon_ {t r}}; \epsilon_ {t e}\right) - \operatorname {E r r} \left(\widehat {\theta} ^ {0}; \epsilon_ {t e}\right) \geq \Phi \left(\frac {r / 2}{\varphi_ {\min }}\right) - \Phi \left(\frac {r / 2 - \min \left\{\epsilon_ {t r} , \widetilde {\epsilon} \right\}}{\varphi_ {\min }}\right). \tag {6} +$$ + +The proof can be found in Appendix A and primarily relies on estimation of singular values of high-dimensional matrices. Note that the theorem holds for any $0 \leq \epsilon_{\mathrm{te}} < \frac{r}{2}$ and hence also directly + +![](images/11a7371c179794f387e78dbb1dffa7b6a78670bc7a9813b49ce7cad1a08c10b2.jpg) +(a) Robust error vs $\epsilon_{\mathrm{tr}}$ + +![](images/39bc6a2d0b245932b78657cad1591dc252b093589a70d83d2dc4e67a1653351f.jpg) +(b) Robust error decomposition + +![](images/40c9459235c3e2eac9b86560c410a3b8b2f3ccc2441903d056f733d89252a7ed.jpg) +(c) Intuition in 2D +Figure 4: (a) We set $d = 1000$ and $r = 12$ . The robust error as a function of the adversarial training budget $\epsilon_{\mathrm{tr}}$ for different $d / n$ . (b) The robust error decomposition into susceptibility and standard error as a function of the adversarial budget $\epsilon_{\mathrm{tr}}$ . Full experimental details can be found in Section C. (c) 2D illustration providing intuition for the linear setting. The effect of adversarial training with directed attacks is captured in the yellow dotted lines: adversarily perturbed training points move closer to the true boundary which in turn tilts the decision boundary more heavily in the wrong direction. + +applies to the standard error by setting $\epsilon_{\mathrm{te}} = 0$ . In Figure 3, we empirically confirm the statements of Theorem 3.1 by performing multiple experiments on synthetic datasets as described in Subsection 3.1 with different choices of $d / n$ and $\epsilon_{\mathrm{tr}}$ . In the first statement, we prove that for small sample-size $(n < d - 1)$ noiseless data, almost surely, the robust error increases monotonically with adversarial training budget $\epsilon_{\mathrm{tr}} > 0$ . In Figure 3a, we plot the robust error gap between standard and adversarial logistic regression as a function of the adversarial training budget $\epsilon_{\mathrm{tr}}$ for 5 runs. + +The second statement establishes a simplified lower bound on the robust error increase for adversarial training (for a fixed $\epsilon_{\mathrm{tr}} = \epsilon_{\mathrm{te}}$ ) compared to standard training. In Figures 3a and 3c, we show how the lower bound closely predicts the robust error gap in our synthetic experiments. Furthermore, by the dependence of $\varphi_{\mathrm{min}}$ on the overparameterization ratio $d / n$ , the lower bound on the robust error gap is amplified for large $d / n$ . Indeed, Figure 3c shows how the error gap increases with $d / n$ both theoretically and experimentally. However, when $d / n$ increases above a certain threshold, the gap decreases again, as standard training fails to learn the signal and yields a high error (see Figure 3b). + +# 3.3 PROOF INTUITION + +The reason that adversarial training hurts robust generalization is based on an extreme robust vs. standard error trade-off. We now provide intuition for the effect of directed attacks and the low sample regime on the $\epsilon_{\mathrm{tr}}$ -robust max- $\ell_2$ -margin solution by decomposing the robust error $\mathrm{Err}(\theta; \epsilon_{\mathrm{te}})$ . Notice that $\epsilon_{\mathrm{te}}$ -robust error $\mathrm{Err}(\theta; \epsilon_{\mathrm{te}})$ can be written as the probability of the union of two events: the event that the classifier based on $\theta$ is wrong and the event that the classifier is susceptible to attacks: + +$$ +\operatorname {E r r} (\theta ; \epsilon_ {\mathrm {t e}}) = \mathbb {E} _ {x, y \sim \mathbb {P}} \left[ \mathbb {I} \left\{y f _ {\theta} (x) < 0 \right\} \vee \max _ {x ^ {\prime} \in T (x; \epsilon_ {\mathrm {t e}})} \mathbb {I} \left\{f _ {\theta} (x) f _ {\theta} \left(x ^ {\prime}\right) < 0 \right\} \right] \leq \operatorname {E r r} (\theta ; 0) + \operatorname {S u s c} (\theta ; \epsilon_ {\mathrm {t e}}) \tag {7} +$$ + +where $\mathrm{Susc}(\theta ;\epsilon_{\mathrm{te}})$ is the expectation of the maximization term in Equation 7. $\mathrm{Susc}(\theta ;\epsilon_{\mathrm{te}})$ represents the $\epsilon_{\mathrm{te}}$ -attack-susceptibility of a classifier induced by $\theta$ and $\mathrm{Err}(\theta ;0)$ its standard error. In our linear setting, we can lower bound Equation 7 by $\mathrm{Err}(\theta ;0) + \frac{1}{2}\mathrm{Susc}(\theta ;\epsilon_{\mathrm{te}})$ . Hence, Equation 7 suggests that the robust error can only be small if both the standard error and susceptibility are small. In Figure 4b, we plot the decomposition of the robust error in standard error and susceptibility for adversarial logistic regression with increasing $\epsilon_{\mathrm{tr}}$ . We observe that increasing $\epsilon_{\mathrm{tr}}$ increases the standard error too drastically compared to the decrease in susceptibility, leading to a drop in robust accuracy. For completeness, in Appendix B, we provide upper and lower bounds for the susceptibility score. We now explain why, in the small-sample size regime, adversarial training with directed attacks 3 may increase standard error to the extent that it dominates the decrease in susceptibility. + +A key observation is that the robust max- $\ell_2$ -margin solution of a dataset $D = \{(x_i, y_i)\}_{i=1}^n$ maximizes the minimum margin that reads $\min_{i \in [n]} y_i \theta^\top (x_i - y_i \epsilon_{\mathrm{tr}} | \theta_{[1]}| e_1)$ , where $\theta_{[i]}$ refers to the $i$ -th entry of vector $\theta$ . Therefore, it simply corresponds to the max- $\ell_2$ -margin solution of the dataset shifted towards the decision boundary $D_{\epsilon_{\mathrm{tr}}} = \{(x_i - y_i \epsilon_{\mathrm{tr}} | \widehat{\theta}_{[1]}^{\epsilon_{\mathrm{tr}}} | e_1, y_i)\}_{i=1}^n$ . Using this fact, we obtain a closed-form expression of the (normalized) max-margin solution 4 as a function of $\epsilon_{\mathrm{tr}}$ that reads + +$$ +\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}} = \frac {1}{(r - 2 \epsilon_ {\mathrm {t r}}) ^ {2} + 4 \tilde {\gamma} ^ {2}} \left[ r - 2 \epsilon_ {\mathrm {t r}}, 2 \tilde {\gamma} \tilde {\theta} \right], \tag {8} +$$ + +where $\| \tilde{\theta} \|_2 = 1$ and $\tilde{\gamma} > 0$ is a random quantity associated with the max- $\ell_2$ -margin solution of the $d - 1$ dimensional Gaussian inputs orthogonal to the signal direction (see Lemma A.1 in Section A). + +In high dimensions, with high probability any two Gaussian random vectors are far apart – in our distributional setting, this corresponds to the vectors being far apart in the non-signal directions. In Figure 4c, we illustrate the phenomenon using a 2D cartoon, where the few samples in the dataset are all far apart in the non-signal direction. We see how shifting the dataset closer to the true decision boundary, may result in a max-margin solution (yellow) that aligns much worse with the ground truth (gray), compared to the estimator learned from the original points (blue). Even though the new (robust max-margin) classifier (yellow) is less susceptible to attacks in the signal dimension, it also uses the signal dimension less. Mathematically, this is reflected in the expression of the max-margin solution in Equation 8: We see that the first (signal) dimension is used less as $\epsilon_{\mathrm{tr}}$ increases. + +# 3.4 GENERALITY OF THE RESULTS + +In this section we discuss how Theorem 3.1 might generalize to other perturbation sets and models. + +Signal direction is known The type of additive perturbations used in Theorem 3.1, defined in Equation 3, is explicitly constrained to the direction of the true signal. This choice is reminiscent of corruptions where every possible perturbation in the set is directly targeted at the object to be recognized, such as motion blur of moving objects. Such corruptions are also studied in the context of domain generalization and adaptation (Schneider et al., 2020). + +Directed attacks in general, however, may also consist of perturbation sets that are only strongly biased towards the true signal direction. They may find the true signal direction only when the inner maximization is exact. The following corollary extends Theorem 3.1 to small $\ell_1$ -perturbations + +$$ +T (x; \epsilon) = \left\{x ^ {\prime} = x + \delta \mid \| \delta \| _ {1} \leq \epsilon \right\}, \tag {9} +$$ + +for $0 < \epsilon < \frac{r}{2}$ that reflect such attacks. We state the corollary here and give the proof in Appendix A. + +Corollary 3.2. Theorem 3.1 also holds for 4 with perturbation sets defined in 9. + +The proof uses the fact that the inner maximization effectively results in a sparse perturbation equivalent to the attack resulting from the perturbation set defined in Equation 3. + +Other models Motivated by the implicit bias results of (stochastic) gradient descent on the logistic loss, Theorem 3.1 is proven for the max- $\ell_2$ -margin solution. We would like to conjecture that for the data distribution in Section 3, adversarial training can hurt robust generalization also for other models with zero training error (interpolators in short). For example, Adaboost is a widely used algorithm that converges to the max- $\ell_1$ -margin classifier (Telgarsky, 2013). One might argue that for a sparse ground truth, the max- $\ell_1$ -margin classifier should (at least in the noiseless case) have the right inductive bias to alleviate large bias in high dimensions. Hence, in many cases the (sparse) max- $\ell_1$ -margin solution might align with the ground truth for a given dataset. However, we conjecture that even in this case, the robust max- $\ell_1$ -margin solution would be misled to choose a wrong sparse solution. This can be seen with the help of the cartoon illustration in Figure 4c. + +# 4 REAL-WORLD EXPERIMENTS + +In this section, we demonstrate that the proof intuition of the linear case may generalize to more complex models. Specifically, the insights from Section 3 helped us to identify realistic directed attacks on standard image datasets for which adversarial training hurts robust accuracy in the low sample regime. In what follows, we present experimental results for corruption attacks on the Waterbirds dataset. Due to space constraints, results on the mask attacks on CIFAR-10 can be found in Appendix E. The corresponding experimental details and more results on other additional image datasets (such as the hand gestures dataset) can be found in Appendices D, E and F. + +# 4.1 DATASETS AND MODELS + +We consider three datasets: the Waterbirds dataset, CIFAR-10 and a hand gesture datasets. Due to space constraints, we describe CIFAR-10 and the hand gesture dataset in Appendix E and F. Apart from CIFAR-10 and the hand gesture dataset, we build a new version of the Waterbirds dataset, consisting of images of water- and landbirds of size $256 \times 256$ and labels that distinguish the two + +![](images/2f1792755900aa0a2ae6a16b916feebafc887c18064a2ec4876c18540ace6f1e.jpg) +(a) Robust error with increasing $\epsilon_{\mathrm{tr}}$ + +![](images/2e512a26d35f101675ce39afd985a02b24707f099404378131e77f7f20e80434.jpg) +Figure 5: Experiments on the Waterbirds dataset considering the adversarial illumination attack with $\epsilon_{\mathrm{te}} = 0.3$ . We plot the mean and standard deviation of the mean of several independent experiments. (a) The robust error increases with larger $\epsilon_{\mathrm{tr}}$ in the low sample size regime. (b) We set $n = 20$ and plot the robust error decomposition as in Equation 7 with increasing $\epsilon_{\mathrm{tr}}$ . While the susceptibility decreases slightly, the increase in standard error is much more severe, resulting in an increase in robust error. (c) The robust error of standard training and adversarial training as a function of the number of samples, where the smallest sample size still yields small ( $< 10\%$ ) standard test error for standard training. While adversarial training hurts for small sample sizes, it helps for larger sample sizes. For more experimental details see App. D. + +![](images/2bf92660f987cf1228c27d11c2f82a9521dc4cd942f53ba6f8942dbe360b164d.jpg) +(b) Robust error decomposition +(c) Robust error vs. #samples + +types of birds. Using code provided by Sagawa et al. (2020), we construct the dataset as follows: First, we sample equally many water- and landbirds from the CUB-200 dataset (Welinder et al., 2010). Then, we segment the birds and paste them onto a background image that is randomly sampled (without replacement) from the Places-256 dataset (Zhou et al., 2017). Also, following the choice of Sagawa et al. (2020), we use as models a ResNet50 and a ResNet18 that were both pretrained on ImageNet and achieve near perfect standard accuracy. In Appendix D, we complement the results of this section by reporting the results of similar experiments with different architectures. + +# 4.2 IMPLEMENTATION OF THE DIRECTED ATTACKS + +In this section, we consider two attacks on the Waterbirds dataset: motion blur and adversarial illumination as depicted in Figure 2. In Appendix E, we also discuss the mask attack, which should mimic occlusions of objects in images that are physically realizable (Eykholt et al., 2018; Wu et al., 2020). On the other hand, motion blur may arise naturally when photographing fast moving objects with a slow shutter speed. Lastly, adversarial illumination may result from adversarial lighting conditions. Next, we describe the motion blur and adversarial illumination attacks in more detail. + +Motion blur For the Waterbirds dataset we can implement motion blur attacks on the object (the bird) specifically, a natural corruption that could occur if birds move at speeds that are faster than the shutter speed. The aim is to be robust against all motion blur severity levels up to $M_{max} = 15$ . To simulate motion blur, we apply a motion blur filter with a kernel of size $M$ on the segmented bird before we paste it onto the background image. We can change the severity level of the motion blur by increasing the kernel size of the filter. See Appendix D for concrete expressions of the motion blur kernel. Intuitively the worst attack should be the most severe blur, rendering a search over a range of severity superfluous. However, similar to rotations, this is not necessarily true in practice since the training loss on neural networks is generally nonconvex. Therefore, for an exact evaluation of the robust error at test time, we perform a full grid search over all kernel sizes in $[1,2,\dots,M_{max}]$ . We refer to Figure 2d and Section D for an illustration of our motion blur attack. During training time, we perform an approximate search over kernels with sizes $2i$ for $i = 1,\dots,M_{max}/2$ . + +Adversarial illumination As a second attack on the Waterbirds dataset, we consider adversarial illumination. The adversary can darken or brighten the bird without corrupting the background of the image. The attack aims to model images where the object at interest is hidden in shadows or placed against bright light. To compute the adversarial illumination attack, we modify the brightness of the segmented bird by adding a constant $a \in [-\epsilon_{\mathrm{te}}, \epsilon_{\mathrm{te}}]$ to all pixel values, before pasting the bird onto the background image. With an analogous argument as for the adversarial search for motion blur, the exact evaluation requires an actual search over the interval $[- \epsilon_{\mathrm{te}}, \epsilon_{\mathrm{te}}]$ . We find the most adversarial lighting level, i.e. the value of $a$ , by equidistantly partitioning the interval $[- \epsilon_{\mathrm{te}}, \epsilon_{\mathrm{te}}]$ in $K$ steps and performing a full list-search over all steps. See Figure 2c and Appendix D for an illustration of the adversarial illumination attack. We choose $K = 65, 33$ during test and training time respectively. + +Adversarial training For all datasets and attacks, we run SGD until convergence on the robust cross-entropy loss 2. In each iteration, we search for an adversarial example as described above and + +![](images/9b0fbfe233ca438fe0d94437618e0cef512148bc02efadc540b13a3ead58cd5d.jpg) +(a) Robust error with increasing $\epsilon_{\mathrm{tr}}$ + +![](images/6cd0e52c6637acdb28d0b7b8c2632185837ce80d86fca17c63afd52390a4e5b2.jpg) +Figure 6: Experiments on the (subsampled) Waterbirds dataset using the motion blur attack. (a) Even though adversarial training hurts robust generalization for low sample size ( $n = 20$ ), it helps for $n = 50$ . (b) For $n = 20$ , the decomposition of the robust error in standard error and susceptibility as a function of adversarial budget $\epsilon_{\mathrm{tr}}$ . The increase in standard error is more severe than the drop in susceptibility, leading to a slight increase in robust error. (c) The robust error of standard and adversarial training on settings where the test error after standard training is small as a function of the number of samples. While adversarial training hurts for small sample sizes, it helps for larger sample sizes. For each experiment we plot the mean and standard deviation of the mean of independent experiments. For more experimental details see App. D. + +![](images/a05f6749a5966589bd482fc2270139528f0e355f0c79e9729cedbf98242a810c.jpg) +(b) Robust error decomposition +(c) Robust error vs. #samples + +update the weights using a gradient with respect to the resulting perturbed example (Goodfellow et al., 2015; Madry et al., 2018). For every experiment, we choose the learning rate and weight decay parameters that minimize the robust error on a hold-out dataset. + +# 4.3 ADVERSARIAL TRAINING CAN HURT ROBUST GENERALIZATION + +We now present our experimental results on the Waterbirds dataset for both motion blur and adversarial illumination attacks. First of all, Figure 5a and 6a show that the phenomenon characterized in the linear setting by Theorem 3.1 also occurs for directed attacks on the Waterbirds dataset: as we increase the adversarial training budget $\epsilon_{\mathrm{tr}}$ starting from zero (standard training), the robust error monotonically increases. + +Furthermore, to gain intuition as described in Section 3.3, we also plot the robust error decomposition (Equation 7) consisting of the standard error and susceptibility in Figure 5b and 6b. Recall that we measure susceptibility as the fraction of data points in the test set for which the classifier predicts a different class under an adversarial attack. As in our linear example, we observe an increase in robust error despite a slight drop in susceptibility, because of the more severe increase in standard error. Moreover, Figures 1 and 6c show that analogous to our linear example, this phenomenon is specific to the low sample regime: for large sample size adversarial training outperforms standard training as expected. Note again that even the smallest sample size is large enough to yield a standard test error $< 10\%$ for standard training. Similar experiments for CIFAR-10 can be found in Appendix E. Finally, we empirically confirm in Appendix D.8 that our phenomenon is specific to directed attacks: for undirected attacks such as bounded $\ell_{\infty}$ and $\ell_{2}$ -ball perturbations, adversarial training helps robust generalization also in the low sample size regime. + +# 4.4 DISCUSSION + +We now discuss how different algorithmic choices, motivated by related work, might affect how adversarial training hurts robust generalization. + +Catastrophic overfitting Often the worst-case perturbation during adversarial training is found using an approximate algorithm such as SGD. It is common belief that using the strongest attack (in the motion blur case, full grid search) during training also results in better robust generalization. In particular, the literature on catastrophic overfitting shows that weaker attacks during training lead to bad performance on stronger attacks during testing (Wong et al., 2020; Andriushchenko & Flammarion, 2020; Li et al., 2021). Our results suggest the opposite in the low sample size regime for directed attacks: the weaker the attack during training, the better adversarial training performs. + +Robust overfitting Recent work observes empirically (Rice et al., 2020) and theoretically (Sanyal et al., 2020; Donhauser et al., 2021), that perfectly minimizing the adversarial loss during training might in fact be suboptimal for robust generalization; that is, classical regularization techniques might lead to higher robust accuracy. This phenomenon is often referred to as robust overfitting. May the phenomenon be mitigated using standard regularization techniques? In Appendix D we shed light + +on this question and show that adversarial training hurts robust generalization even when standard regularization methods such as early stopping are used. + +# 5 RELATED WORK + +Robust and non-robust useful features In the words of Ilyas et al. (2019) and Springer et al. (2021) we can describe the intuition behind "our phenomenon" as follows: for directed attacks, all robust features become less useful, but adversarial training uses robust features more. In the small sample-size regime, $n < d - 1$ in particular, robust learning assigns too much weight on the robust (possibly non-useful) features that then dominate the non-robust (but useful)features. Even though they define these concepts, they don't make our statement, but show that adversarial training reduces the reliance on non-robust but possibly useful features. + +Small sample size and robustness A direct consequence of Theorem 3.1 is that in order to achieve the same robust error as standard training, adversarial training requires more samples. This statement might remind the reader of sample complexity results for robust generalization in Schmidt et al. (2018); Yin et al. (2019); Khim & Loh (2018). While those results compare sample complexity bounds for standard vs. robust error, our theorem statement compares two algorithms, standard vs. adversarial training, with respect to the robust error. + +Trade-off between standard and robust error Many papers observed that even though adversarial training decreases robust error compared to standard training, it may lead to an increase in standard test error Madry et al. (2018); Zhang et al. (2019). For example, Tsipras et al. (2019); Zhang et al. (2019); Javanmard et al. (2020); Dobriban et al. (2020); Chen et al. (2020) study settings where the Bayes optimal robust classifier is not equal to the Bayes optimal (standard) classifier (i.e. the perturbations are inconsistent or the dataset is non-separable). Raghunathan et al. (2020) study consistent perturbations, as in our paper, and prove that for small sample size, fitting adversarial examples can increase standard error even in the absence of noise. Empirically, Dong et al. (2021); Mendonça et al. (2022) show that for $\ell_p$ -attacks low-quality data might be the main cause of the trade-off. While aforementioned works focus on the decrease in standard error, we prove that for directed attacks, in the small sample regime adversarial training may in fact increase robust error. + +Mitigation of the trade-off A long line of work has proposed procedures to mitigate the trade-off between robust and standard accuracy. For example Alayrac et al. (2019); Carmon et al. (2019); Zhai et al. (2019); Raghunathan et al. (2020) study robust self training, which leverages a large set of unlabelled data, while Lee et al. (2020); Lamb et al. (2019); Xu et al. (2020) use data augmentation by interpolation. Ding et al. (2020); Balaji et al. (2019); Cheng et al. (2020) on the other hand propose to use adaptive perturbation budgets $\epsilon_{\mathrm{tr}}$ that vary across inputs. The intuition behind our theoretical analysis suggests that the standard mitigation procedures for imperceptible perturbations may not work for perceptible directed attacks, because all relevant features are non-robust. We leave a thorough empirical study as interesting future work. + +# 6 SUMMARY AND FUTURE WORK + +This paper aims to caution the practitioner against blindly following current widespread practices to increase the robust performance of machine learning models. Specifically, adversarial training is currently recognized to be one of the most effective defense mechanisms for $\ell_p$ -perturbations, significantly outperforming robust performance of standard training. However, we prove that in the low sample size regime this common wisdom is not applicable for consistent directed attacks, which efficiently focus their attack budget to target the ground truth class information. In terms of follow-up work on directed attacks in the low sample regime, there are some concrete questions that would be interesting to explore. For example, as discussed in Section 5, it would be useful to test whether some methods to mitigate the standard accuracy vs. robustness trade-off would also relieve the perils of adversarial training for directed attacks. Further, we hypothesize that when few samples are available, one should avoid training with attacks that may heavily reduce class information, independently of the attacks at test time. If this hypothesis were confirmed, it would break with yet another general rule that the best defense perturbation type should always match the attack during evaluation. + +# ACKNOWLEDGEMENT + +Supported by the Hasler Foundation grant number 21050. + +# REFERENCES + +Jean-Baptiste Alayrac, Jonathan Uesato, Po-Sen Huang, Alhussein Fawzi, Robert Stanforth, and Pushmeet Kohli. Are labels required for improving adversarial robustness? Advances in Neural Information Processing Systems, pp. 12214-12223, 2019. +Maksym Andriushchenko and Nicolas Flammarion. Understanding and improving fast adversarial training. Advances in Neural Information Processing Systems, 2020. +Tao Bai, Jinqi Luo, Jun Zhao, Bihan Wen, and Qian Wang. Recent advances in adversarial training for adversarial robustness. In Zhi-Hua Zhou (ed.), The 30th International Joint Conference on Artificial Intelligence, pp. 4312-4321. International Joint Conferences on Artificial Intelligence Organization, 2021. +Yogesh Balaji, Tom Goldstein, and Judy Hoffman. Instance adaptive adversarial training: Improved accuracy tradeoffs in neural nets. arXiv preprint arXiv:1910.08051, 2019. +G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000. +Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, Percy Liang, and John C Duchi. Unlabeled data improves adversarial robustness. In The 33rd International Conference on Neural Information Processing Systems, pp. 11192-11203, 2019. +Lin Chen, Yifei Min, Mingrui Zhang, and Amin Karbasi. More data can expand the generalization gap between adversarially robust and standard models. In The 36th International Conference on Machine Learning, pp. 1670-1680, 2020. +Minhao Cheng, Qi Lei, Pin-Yu Chen, Inderjit Dhillon, and Cho-Jui Hsieh. Cat: Customized adversarial training for improved robustness. arXiv preprint arXiv:2002.06789, 2020. +Lenaic Chizat and Francis Bach. Implicit bias of gradient descent for wide two-layer neural networks trained with the logistic loss. In The 7th International Conference on Learning Theory, pp. 1305-1338, 2020. +Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In The 37th International Conference on Machine Learning, pp. 2206-2216, 2020. +Gavin Weiguang Ding, Yash Sharma, Kry Yik Chau Lui, and Ruitong Huang. Mma training: Direct input space margin maximization through adversarial training. In The 8th International Conference on Learning Representations, 2020. +Edgar Dobriban, Hamed Hassani, David Hong, and Alexander Robey. Provable tradeoffs in adversarially robust classification. arXiv preprint arXiv:2006.05161, 2020. +Chengyu Dong, Liyuan Liu, and Jingbo Shang. Data quality matters for adversarial training: An empirical study, 2021. +Konstantin Donhauser, Alexandru Tifrea, Michael Aerni, Reinhard Heckel, and Fanny Yang. Interpolation can hurt robust generalization even when there is no noise. The 36th conference on Advances in Neural Information Processing Systems, 2021. +Logan Engstrom, Brandon Tran, Dimitris Tsipras, Ludwig Schmidt, and Aleksander Madry. Exploring the landscape of spatial robustness. In The 36th International Conference on Machine Learning, pp. 1802-1811, 2019. +Kevin Eykholt, Ivan Evtimov, Earlence Fernandes, Bo Li, Amir Rahmati, Chaowei Xiao, Atul Prakash, Tadayoshi Kohno, and Dawn Song. Robust physical-world attacks on deep learning visual classification. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1625-1634, 2018. + +Amin Ghiasi, Ali Shafahi, and Tom Goldstein. Breaking certified defenses: semantic adversarial examples with spoofed robustness certificates. In The 6th International Conference on Learning Representations, 2019. +Justin Gilmer, Ryan P Adams, Ian Goodfellow, David Andersen, and George E Dahl. Motivating the rules of the game for adversarial example research. arXiv preprint arXiv:1807.06732, 2018. +Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. In The 3th International Conference on Learning Representations, pp. 1-10, 2015. +Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. In The 33rd conference on Advances in Neural Information Processing Systems, pp. 125-136, 2019. +Adel Javanmard, Mahdi Soltanolkotabi, and Hamed Hassani. Precise tradeoffs in adversarial training for linear regression. In Conference on Learning Theory, pp. 2034-2078, 2020. +Ziwei Ji and Matus Telgarsky. The implicit bias of gradient descent on nonseparable data. In *The 32nd Conference on Learning Theory*, pp. 1772-1798, 2019. +Daniel Kang, Yi Sun, Tom Brown, Dan Hendrycks, and Jacob Steinhardt. Transfer of adversarial robustness between perturbation types. arXiv e-prints, pp. arXiv-1905, 2019. +Justin Khim and Po-Ling Loh. Adversarial risk bounds via function transformation. arXiv preprint arXiv:1810.09519, 2018. +Cassidy Laidlaw, Sahil Singla, and Soheil Feizi. Perceptual adversarial robustness: Defense against unseen threat models. In The 9th International Conference on Learning Representation, 2021. +Alex Lamb, Vikas Verma, Juho Kannala, and Yoshua Bengio. Interpolated adversarial training: Achieving robust neural networks without sacrificing too much accuracy. In The 12th ACM Workshop on Artificial Intelligence and Security, pp. 95-103, 2019. +Saehyung Lee, Hyungyu Lee, and Sungroh Yoon. Adversarial vertex mixup: Toward better adversarily robust generalization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020. +Bai Li, Shiqi Wang, Suman Jana, and Lawrence Carin. Towards understanding fast adversarial training. arXiv preprint arXiv:2006.03089, 2021. +Yan Li, Ethan X.Fang, Huan Xu, and Tuo Zhao. Implicit bias of gradient descent based adversarial training on separable data. In The 8th International Conference on Learning Representations, 2020. +Wei-An Lin, Chun Pong Lau, Alexander Levine, Rama Chellappa, and Soheil Feizi. Dual manifold adversarial robustness: Defense against lp and non-lp adversarial attacks. In The 34th conference on Advances in Neural Information Processing Systems, pp. 3487-3498, 2020. +Chen Liu, Mathieu Salzmann, Tao Lin, Ryota Tomioka, and Sabine Susstrunk. On the loss landscape of adversarial training: Identifying challenges and how to overcome them. In The 35th conference on Advances in Neural Information Processing Systems, pp. 21476-21487, 2020. +Bo Luo, Yannan Liu, Lingxiao Wei, and Qiang Xu. Towards imperceptible and robust adversarial example attacks against neural networks. In The 32nd AAAI Conference on Artificial Intelligence and Thirtieth Innovative Applications of Artificial Intelligence Conference and Eighth AAAI Symposium on Educational Advances in Artificial Intelligence, 2018. +Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In The 6th International Conference on Learning Representations, 2018. +Tomás Mantecón, Carlos R. del Blanco, Fernando Jaureguizar, and Narciso García. A real-time gesture recognition system using near-infrared imagery. PLOS ONE, pp. 1-17, 2019. + +Marcele OK Mendonça, Javier Maroto, Pascal Frossard, and Paulo SR Diniz. Adversarial training with informed data selection. In The 30th European Signal Processing Conference (EUSIPCO), pp. 608-612, 2022. +Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, and Pascal Frossard. Deepfool: a simple and accurate method to fool deep neural networks. In The IEEE conference on computer vision and pattern recognition (CVPR), pp. 2574-2582, 2016. +Abdullah Mujahid, Mazhar Javed Awan, Awais Yasin, Mazin Abed Mohammed, Robertas Damaševićius, Rytis Maskeliūnas, and Karrar Hameed Abdulkareem. Real-time hand gesture recognition based on deep learning yolov3 model. Applied Sciences, 2021. +Mor Shpigel Nacson, Nathan Srebro, and Daniel Soudry. Stochastic gradient descent on separable data: Exact convergence with a fixed learning rate. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 3051-3059, 2019. +Vaishnavh Nagarajan and J. Zico Kolter. Uniform convergence may be unable to explain generalization in deep learning. In The 33d conference on Advances in Neural Information Processing Systems, pp. 11611-11622, 2019. +Munir Oudah, Ali Al-Naji, and Javaan Chahl. Hand gesture recognition based on computer vision: A review of techniques. Journal of Imaging, 2020. +Huy Phan. huyvnphan/pytorch_cifar10, 1 2021. +Aditi Raghunathan, Sang Michael Xie, Fanny Yang, John Duchi, and Percy Liang. Understanding and mitigating the tradeoff between robustness and accuracy. In The 37th International Conference on Machine Learning, pp. 7909-7919, 2020. +Leslie Rice, Eric Wong, and Zico Kolter. Overfitting in adversarially robust deep learning. In The 37th International Conference on Machine Learning, pp. 8093-8104, 2020. +Shiori Sagawa, Pang Wei Koh, Tatsunori B. Hashimoto, and Percy Liang. Distributionally robust neural networks. In The 7th International Conference on Learning Representations, 2020. +Amartya Sanyal, Puneet K Dokania, Varun Kanade, and Philip Torr. How benign is benign overfitting? In The 8th International Conference on Learning Representations, 2020. +Ludwig Schmidt, Shibani Santurkar, Dimitris Tsipras, Kunal Talwar, and Aleksander Madry. Adversarily robust generalization requires more data. In The 32nd conference Advances in Neural Information Processing Systems, 2018. +Steffen Schneider, Evgenia Rusak, Luisa Eck, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Improving robustness against common corruptions by covariate shift adaptation. In The 34th conference on Advances in Neural Information Processing Systems, pp. 11539-11551, 2020. +Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, pp. 1-57, 2018. +Jacob M Springer, Melanie Mitchell, and Garrett T Kenyon. Adversarial perturbations are not so weird: Entanglement of robust and non-robust features in neural network classifiers. arXiv preprint arXiv:2102.05110, 2021. +David Stutz, Matthias Hein, and Bernt Schiele. Disentangling adversarial robustness and generalization. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 6967-6987, 2019. +Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In The 2nd International Conference on Learning Representations, 2014. +Matus Telgarsky. Margins, shrinkage, and boosting. In The 30th International Conference on Machine Learning, pp. 307-315, 2013. + +Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In The 7th International Conference on Learning Representations, 2019. +Roman Vershynin. Introduction to the non-asymptotic analysis of random matrices. arXiv preprint arXiv:1011.3027, 2010. +P. Welinder, S. Branson, T. Mita, C. Wah, F. Schroff, S. Belongie, and P. Perona. Caltech-UCSD Birds 200. Technical report, California Institute of Technology, 2010. +Eric Wong, Leslie Rice, and J. Zico Kolter. Fast is better than free: Revisiting adversarial training. In The 8th International Conference on Learning Representations, 2020. +Tong Wu, Liang Tong, and Yevgeniy Vorobeychik. Defending against physically realizable attacks on image classification. In The 8th International Conference on Learning Representations, 2020. +Minghao Xu, Jian Zhang, Bingbing Ni, Teng Li, Chengjie Wang, Qi Tian, and Wenjun Zhang. Adversarial domain adaptation with domain mixup. In The AAAI Conference on Artificial Intelligence, pp. 6502-6509, 2020. +Shuai Yang, Prashan Premaratne, and Peter Vial. Hand gesture recognition: An overview. In The 5th IEEE International Conference on Broadband Network Multimedia Technology, pp. 63-69, 2013. +Dong Yin, Ramchandran Kannan, and Peter Bartlett. Rademacher complexity for adversarially robust generalization. In The 36th International conference on machine learning, pp. 7085-7094, 2019. +Runtian Zhai, Tianle Cai, Di He, Chen Dan, Kun He, John Hopcroft, and Liwei Wang. Adversarily robust generalization just requires more unlabeled data. arXiv preprint arXiv:1906.00555, 2019. +Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric Xing, Laurent El Ghaoui, and Michael Jordan. Theoretically principled trade-off between robustness and accuracy. In *The 36th International Conference on Machine Learning*, pp. 7472-7482, 2019. +Zhengyu Zhao, Zhuoran Liu, and Martha Larson. Towards large yet imperceptible adversarial image perturbations with perceptual color distance. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1039-1048, 2020. +Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017. +Jianli Zhou, Chao Liang, and Jun Chen. Manifold projection for adversarial defense on face recognition. In The 16th European Conference on Computer Vision, pp. 288-305, 2020. + +# A THEORETICAL STATEMENTS FOR THE LINEAR MODEL + +Before we present the proof of the theorem, we introduce two lemmas are of separate interest that are used throughout the proof of Theorem 1. Recall that the definition of the (standard normalized) maximum- $\ell_2$ -margin solution (max-margin solution in short) of a dataset $D = \{(x_i, y_i)\}_{i=1}^n$ corresponds to + +$$ +\widehat {\theta} := \underset {\| \theta \| _ {2} \leq 1} {\arg \max } \underset {i \in [ n ]} {\min } y _ {i} \theta^ {\top} x _ {i}, \tag {10} +$$ + +by simply setting $\epsilon_{\mathrm{tr}} = 0$ in Equation 4. The $\ell_2$ -margin of $\widehat{\theta}$ then reads $\min_{i\in [n]}y_i\widehat{\theta}^\top x_i$ . Furthermore for a dataset $D = \{(x_{i},y_{i})\}_{i = 1}^{n}$ we refer to the induced dataset $\widetilde{D}$ as the dataset with covariate vectors stripped of the first element, i.e. + +$$ +\widetilde {D} = \left\{\left(\tilde {x} _ {i}, y _ {i}\right) \right\} _ {i = 1} ^ {n} := \left\{\left(\left(x _ {i}\right) _ {[ 2: d ]}, y _ {i}\right) \right\} _ {i = 1} ^ {n}, \tag {11} +$$ + +where $(x_{i})_{[2:d]}$ refers to the last $d - 1$ elements of the vector $x_{i}$ . Furthermore, remember that for any vector $z$ , $z_{[j]}$ refers to the $j$ -th element of $z$ and $e_j$ denotes the $j$ -th canonical basis vector. Further, recall the distribution $\mathbb{P}_r$ as defined in Section 3.1: the label $y \in \{+1, -1\}$ is drawn with equal probability and the covariate vector is sampled as $x = [y_{\frac{r}{2}}, \tilde{x}]$ where $\tilde{x} \in \mathbb{R}^{d-1}$ is a random vector drawn from a standard normal distribution, i.e. $\tilde{x} \sim \mathcal{N}(0, \sigma^2 I_{d-1})$ . We generally allow $r$ , used to sample the training data, to differ from $r_{\mathrm{test}}$ , which is used during test time. + +The following lemma derives a closed-form expression for the normalized max-margin solution for any dataset with fixed separation $r$ in the signal component, and that is linearly separable in the last $d - 1$ coordinates with margin $\tilde{\gamma}$ . + +Lemma A.1. Let $D = \{(x_i, y_i)\}_{i=1}^n$ be a dataset that consists of points $(x, y) \in \mathbb{R}^d \times \{\pm 1\}$ and $x_{[1]} = y_{\frac{r}{2}}$ , i.e. the covariates $x_i$ are deterministic in their first coordinate given $y_i$ with separation distance $r$ . Furthermore, let the induced dataset $\widetilde{D}$ also be linearly separable by the normalized max- $\ell_2$ -margin solution $\tilde{\theta}$ with an $\ell_2$ -margin $\tilde{\gamma}$ . Then, the normalized max-margin solution of the original dataset $D$ is given by + +$$ +\widehat {\theta} = \frac {1}{\sqrt {r ^ {2} + 4 \tilde {\gamma} ^ {2}}} \left[ r, 2 \tilde {\gamma} \tilde {\theta} \right]. \tag {12} +$$ + +Further, the standard accuracy of $\widehat{\theta}$ for data drawn from $\mathbb{P}_{r_{test}}$ reads + +$$ +\mathbb {P} _ {r _ {\text {t e s t}}} \left(Y \widehat {\theta} ^ {\top} X > 0\right) = \Phi \left(\frac {r r _ {\text {t e s t}}}{4 \sigma \tilde {\gamma}}\right). \tag {13} +$$ + +The proof can be found in Section A.3. The next lemma provides high probability upper and lower bounds for the margin $\tilde{\gamma}$ of $\widetilde{D}$ when $\tilde{x}_i$ are drawn from the normal distribution. + +Lemma A.2. Let $\widetilde{D} = \{(\tilde{x}_i, y_i)\}_{i=1}^n$ be a random dataset where $y_i \in \{\pm 1\}$ are equally distributed and $\tilde{x}_i \sim \mathcal{N}(0, \sigma I_{d-1})$ for all $i$ , and $\tilde{\gamma}$ is the maximum $\ell_2$ margin that can be written as + +$$ +\tilde{\gamma} = \max_{\| \tilde{\theta}\|_{2}\leq 1}\min_{i\in [n]}y_{i}\tilde{\theta}^{\top}\tilde{x}_{i}. +$$ + +Then, for any $t \geq 0$ , with probability greater than $1 - 2e^{-\frac{t^2}{2}}$ , we have $\tilde{\gamma}_{\min}(t) \leq \tilde{\gamma} \leq \tilde{\gamma}_{\max}(t)$ where + +$$ +\tilde {\gamma} _ {\mathrm {m a x}} (t) = \sigma \left(\sqrt {\frac {d - 1}{n}} + 1 + \frac {t}{\sqrt {n}}\right), \tilde {\gamma} _ {\mathrm {m i n}} (t) = \sigma \left(\sqrt {\frac {d - 1}{n}} - 1 - \frac {t}{\sqrt {n}}\right). +$$ + +# A.1 PROOF OF THEOREM 3.1 + +Given a dataset $D = \{(x_{i},y_{i})\}$ drawn from $\mathbb{P}_r$ , it is easy to see that the (normalized) $\epsilon_{\mathrm{tr}}$ -robust max-margin solution 4 of $D$ with respect to signal-attacking perturbations $T(\epsilon_{\mathrm{tr}};x_i)$ as defined in Equation 3, can be written as + +$$ +\begin{array}{l} \widehat{\theta}^{\epsilon_{\mathrm{tr}}} = \arg \max_{\| \theta \|_{2}\leq 1}\min_{i\in [n],x_{i}^{\prime}\in T(x_{i};\epsilon_{\mathrm{tr}})}y_{i}\theta^{\top}x_{i}^{\prime} \\ = \operatorname *{arg max}_{\| \theta \|_{2}\leq 1}\min_{i\in [n],|\beta |\leq \epsilon_{\mathrm{tr}}}y_{i}\theta^{\top}(x_{i} + \beta e_{1}) \\ = \operatorname *{arg max}_{\| \theta \|_{2}\leq 1}\min_{i\in [n]}y_{i}\theta^{\top}(x_{i} - y_{i}\epsilon_{\mathrm{tr}}\operatorname {sign}(\theta_{[1]})e_{1}). \\ \end{array} +$$ + +Note that by definition, it is equivalent to the (standard normalized) max-margin solution $\widehat{\theta}$ of the shifted dataset $D_{\epsilon_{\mathrm{tr}}} = \{(x_i - y_i\epsilon_{\mathrm{tr}}\mathrm{sign}(\theta_{[1]})e_1,y_i)\}_{i = 1}^n$ . Since $D_{\epsilon_{\mathrm{tr}}}$ satisfies the assumptions of Lemma A.1, it then follows directly that the normalized $\epsilon_{\mathrm{tr}}$ -robust max-margin solution reads + +$$ +\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}} = \frac {1}{\sqrt {(r - 2 \epsilon_ {\mathrm {t r}}) ^ {2} + 4 \tilde {\gamma} ^ {2}}} \left[ r - 2 \epsilon_ {\mathrm {t r}}, 2 \tilde {\gamma} \tilde {\theta} \right], \tag {14} +$$ + +by replacing $r$ by $r - 2\epsilon_{\mathrm{tr}}$ in Equation 12. Similar to above, $\tilde{\theta} \in R^{d-1}$ is the (standard normalized) max-margin solution of $\{(\tilde{x}_i, y_i)\}_{i=1}^n$ and $\tilde{\gamma}$ the corresponding margin. + +Proof of 1. We can now compute the $\epsilon_{\mathrm{te}}$ -robust accuracy of the $\epsilon_{\mathrm{tr}}$ -robust max-margin estimator $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ for a given dataset $D$ as a function of $\tilde{\gamma}$ . Note that in the expression of $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ , all values are fixed for a fixed dataset, while $0 \leq \epsilon_{\mathrm{tr}} \leq r - 2\tilde{\gamma}_{\mathrm{max}}$ can be chosen. First note that for a test distribution $\mathbb{P}_r$ , the $\epsilon_{\mathrm{te}}$ -robust accuracy, defined as one minus the robust error (Equation 1), for a classifier associated with a vector $\theta$ , can be written as + +$$ +\begin{array}{l} \operatorname {A c c} (\theta ; \epsilon_ {\mathrm {t e}}) = \mathbb {E} _ {X, Y \sim \mathbb {P} _ {r}} \left[ \mathbb {I} \left\{\min _ {x ^ {\prime} \in T (X; \epsilon_ {\mathrm {t e}})} Y \theta^ {\top} x ^ {\prime} > 0 \right\} \right] \tag {15} \\ = \mathbb {E} _ {X, Y \sim \mathbb {P} _ {r}} \left[ \mathbb {I} \{Y \theta^ {\top} X - \epsilon_ {\mathrm {t e}} \theta_ {[ 1 ]} > 0 \} \right] = \mathbb {E} _ {X, Y \sim \mathbb {P} _ {r}} \left[ \mathbb {I} \{Y \theta^ {\top} (X - Y \epsilon_ {\mathrm {t e}} \operatorname {s i g n} (\theta_ {[ 1 ]}) e _ {1}) > 0 \} \right] \\ \end{array} +$$ + +Now, recall that by Equation 14 and the assumption in the theorem, we have $r - 2\epsilon_{\mathrm{tr}} > 0$ , so that $\mathrm{sign}(\widehat{\theta}^{\epsilon_{\mathrm{tr}}}) = 1$ . Further, using the definition of the $T(\epsilon_{\mathrm{tr}};x)$ in Equation 3 and by definition of the distribution $\mathbb{P}_r$ , we have $X_{[1]} = Y\frac{r}{2}$ . Plugging into Equation 15 then yields + +$$ +\begin{array}{l} \operatorname {A c c} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}\right) = \mathbb {E} _ {X, Y \sim \mathbb {P} _ {r}} \left[ \mathbb {I} \left\{Y \widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}} ^ {\top} \left(X - Y \epsilon_ {\mathrm {t e}} e _ {1}\right) > 0 \right\} \right] \\ = \mathbb {E} _ {X, Y \sim \mathbb {P} _ {r}} \left[ \mathbb {I} \left\{Y \widehat {\theta} ^ {\epsilon_ {\mathrm {t r}} \top} \left(X _ {- 1} + Y \left(\frac {r}{2} - \epsilon_ {\mathrm {t e}}\right) e _ {1}\right) > 0 \right\} \right] \\ = \mathbb {P} _ {r - 2 \epsilon_ {\mathrm {t e}}} (Y \widehat {\theta} ^ {\epsilon_ {\mathrm {t r}} \top} X > 0) \\ \end{array} +$$ + +where $X_{-1}$ is a shorthand for the random vector $X_{-1} = (0; X_{[2]}, \ldots, X_{[d]})$ . The assumptions in Lemma A.1 ( $D_{\epsilon_{\mathrm{tr}}}$ is linearly separable) are satisfied whenever the $n < d - 1$ samples are distinct, i.e. with probability one. Hence applying Lemma A.1 with $r_{\mathrm{test}} = r - 2\epsilon_{\mathrm{te}}$ and $r = r - 2\epsilon_{\mathrm{tr}}$ yields + +$$ +\operatorname {A c c} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}\right) = \Phi \left(\frac {r (r - 2 \epsilon_ {\mathrm {t e}})}{4 \sigma \tilde {\gamma}} - \epsilon_ {\mathrm {t r}} \frac {r - 2 \epsilon_ {\mathrm {t e}}}{2 \sigma \tilde {\gamma}}\right). \tag {16} +$$ + +Theorem statement a) then follows by noting that $\Phi$ is a monotonically decreasing function in $\epsilon_{\mathrm{tr}}$ . The expression for the robust error then follows by noting that $1 - \Phi(-z) = \Phi(z)$ for any $z \in \mathbb{R}$ and defining + +$$ +\tilde {\varphi} = \frac {\sigma \tilde {\gamma}}{r / 2 - \epsilon_ {\mathrm {t e}}}. \tag {17} +$$ + +Proof of 2. First define $\varphi_{\mathrm{min}},\varphi_{\mathrm{max}}$ using $\tilde{\gamma}_{\mathrm{min}},\tilde{\gamma}_{\mathrm{max}}$ as in Equation 17. Then we have by Equation 16 + +$$ +\begin{array}{l} \operatorname {E r r} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}\right) - \operatorname {E r r} \left(\widehat {\theta} ^ {0}; \epsilon_ {\mathrm {t e}}\right) = \operatorname {A c c} \left(\widehat {\theta} ^ {0}; \epsilon_ {\mathrm {t e}}\right) - \operatorname {A c c} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}\right) \\ = \Phi \left(\frac {r / 2}{\tilde {\varphi}}\right) - \Phi \left(\frac {r / 2 - \epsilon_ {\mathrm {t r}}}{\tilde {\varphi}}\right) \\ = \int_ {r / 2 - \epsilon_ {\mathrm {t r}}} ^ {r / 2} \frac {1}{\sqrt {2 \pi} \bar {\varphi}} \mathbb {E} ^ {- \frac {x ^ {2}}{\bar {\varphi} ^ {2}}} d x \\ \end{array} +$$ + +By plugging in $t = \sqrt{\frac{2\log 2 / \delta}{n}}$ in Lemma A.2, we obtain that with probability at least $1 - \delta$ we have + +$$ +\tilde {\gamma} _ {\min } := \sigma \left[ \sqrt {\frac {d - 1}{n}} - \left(1 + \sqrt {\frac {2 \log (2 / \delta)}{n}}\right) \right] \leq \tilde {\gamma} \leq \sigma \left[ \sqrt {\frac {d - 1}{n}} + \left(1 + \sqrt {\frac {2 \log (2 / \delta)}{n}}\right) \right] =: \tilde {\gamma} _ {\max } +$$ + +and equivalently $\varphi_{\mathrm{min}}\leq \tilde{\varphi}\leq \varphi_{\mathrm{max}}$ + +Now note the general fact that for all $\tilde{\varphi} \leq \sqrt{2} x$ the density function $f(\tilde{\varphi};x) = \frac{1}{\sqrt{2\pi}\tilde{\varphi}}\mathbb{E}^{-\frac{x^2}{\tilde{\varphi}^2}}$ is monotonically increasing in $\tilde{\varphi}$ . + +By assumption of the theorem, $\tilde{\varphi} \leq \sqrt{2}(r/2 - \epsilon_{\mathrm{tr}})(r/2 - \epsilon_{\mathrm{te}})$ so that $f(\tilde{\varphi}; x) \geq f(\varphi_{\min}; x)$ for all $x \in [r/2 - \epsilon_{\mathrm{tr}}, r/2]$ and therefore + +$$ +\int_ {r / 2 - \epsilon_ {\mathrm {t r}}} ^ {r / 2} \frac {1}{\sqrt {2 \pi} \tilde {\varphi}} \mathbb {E} ^ {- \frac {x ^ {2}}{\tilde {\varphi} ^ {2}}} d x \geq \int_ {r / 2 - \epsilon_ {\mathrm {t r}}} ^ {r / 2} \frac {1}{\sqrt {2 \pi} \varphi_ {\mathrm {m i n}}} \mathbb {E} ^ {- \frac {x ^ {2}}{\tilde {\varphi} ^ {2}}} d x = \Phi \left(\frac {r / 2}{\varphi_ {\mathrm {m i n}}}\right) - \Phi \left(\frac {r / 2 - \epsilon_ {\mathrm {t r}}}{\varphi_ {\mathrm {m i n}}}\right). +$$ + +and the statement is proved. + +# A.2 PROOF OF COROLLARY 3.2 + +We now show that Theorem 3.1 also holds for $\ell_1$ -ball perturbations with at most radius $\epsilon$ . Following similar steps as in Equation 14, the $\epsilon_{\mathrm{tr}}$ -robust max-margin solution for $\ell_1$ -perturbations can be written as + +$$ +\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}} := \underset {\| \theta \| _ {2} \leq 1} {\arg \max } \underset {i \in [ n ]} {\min } y _ {i} \theta^ {\top} \left(x _ {i} - y _ {i} \epsilon_ {\mathrm {t r}} \operatorname {s i g n} \left(\theta_ {[ j ^ {\star} (\theta) ]}\right) e _ {j ^ {\star} (\theta)}\right) \tag {18} +$$ + +where $j^{\star}(\theta) \coloneqq \arg \max_{j} |\theta_{j}|$ is the index of the maximum absolute value of $\theta$ . We now prove by contradiction that the robust max-margin solution for this perturbation set 9 is equivalent to the solution 14 for the perturbation set 3. We start by assuming that $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ does not solve Equation 14, which is equivalent to assuming $1 \notin j^{\star}(\widehat{\theta}^{\epsilon_{\mathrm{tr}}})$ by definition. We now show how this assumption leads to a contradiction. + +Define the shorthand $j^{\star} \coloneqq j^{\star}(\widehat{\theta}^{\epsilon_{\mathrm{tr}}}) - 1$ . Since $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ is the solution of 18, by definition, we have that $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ is also the max-margin solution of the shifted dataset $D_{\epsilon_{\mathrm{tr}}} \coloneqq (x_i - y_i\epsilon_{\mathrm{tr}}\mathrm{sign}(\theta_{[j^{\star} + 1]}e_{j^{\star} + 1},y_i)$ . Further, note that by the assumption that $1 \notin j^{\star}(\widehat{\theta}^{\epsilon_{\mathrm{tr}}})$ , this dataset $D_{\epsilon_{\mathrm{tr}}}$ consists of input vectors $x_i = (y_i\frac{r}{2},\tilde{x}_i - y_i\epsilon_{\mathrm{tr}}\mathrm{sign}(\theta_{[j^{\star} + 1]}e_{j^{\star} + 1})$ . Hence via Lemma A.1, $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ can be written as + +$$ +\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}} = \frac {1}{\sqrt {r ^ {2} - 4 \left(\tilde {\gamma} ^ {\epsilon_ {\mathrm {t r}}}\right) ^ {2}}} [ r, 2 \tilde {\gamma} ^ {\epsilon_ {\mathrm {t r}}} \tilde {\theta} ^ {\epsilon_ {\mathrm {t r}}} ], \tag {19} +$$ + +where $\tilde{\theta}^{\epsilon_{\mathrm{tr}}}$ is the normalized max-margin solution of $\widetilde{D} := (\tilde{x}_i - y_i\epsilon_{\mathrm{tr}}\mathrm{sign}(\tilde{\theta}_{[j^\star ]})e_{j^\star},y_i)$ . + +We now characterize $\tilde{\theta}^{\epsilon_{\mathrm{tr}}}$ . Note that by assumption, $j^{\star} = j^{\star}(\tilde{\theta}^{\epsilon_{\mathrm{tr}}}) = \arg \max_{j}|\tilde{\theta}_{[j]}^{\epsilon_{\mathrm{tr}}}|$ . Hence, the normalized max-margin solution $\tilde{\theta}^{\epsilon_{\mathrm{tr}}}$ is the solution of + +$$ +\tilde {\theta} ^ {\epsilon_ {\mathrm {t r}}} := \underset {\| \tilde {\theta} \| _ {2} \leq 1} {\arg \max } \underset {i \in [ n ]} {\min } y _ {i} \tilde {\theta} ^ {\top} \tilde {x} _ {i} - \epsilon_ {\mathrm {t r}} | \tilde {\theta} _ {[ j ^ {\star} ]} | \tag {20} +$$ + +Observe that the minimum margin of this estimator $\tilde{\gamma}^{\epsilon_{\mathrm{tr}}} = \min_{i\in [n]}y_i(\tilde{\theta}^{\epsilon_{\mathrm{tr}}})^\top \tilde{x}_i - \epsilon_{\mathrm{tr}}|\tilde{\theta}_{[j^* ]}^{\epsilon_{\mathrm{tr}}}$ decreases with $\epsilon_{\mathrm{tr}}$ as the problem becomes harder $\tilde{\gamma}^{\epsilon_{\mathrm{tr}}}\leq \tilde{\gamma}$ , where the latter is equivalent to the margin of $\tilde{\theta}^{\epsilon_{\mathrm{tr}}}$ for $\epsilon_{\mathrm{tr}} = 0$ . Since $r > 2\tilde{\gamma}_{\max}$ by assumption in the Theorem, by Lemma A.2 with probability at least $1 - 2\mathbb{E}^{-\frac{\alpha^2(d - 1)}{n}}$ , we then have that $r > 2\tilde{\gamma}\geq 2\tilde{\gamma}^{\epsilon_{\mathrm{tr}}}$ . Given the closed form of $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ in Equation 19, it directly follows that $\widehat{\theta}_{[1]}^{\epsilon_{\mathrm{tr}}} = r > 2\tilde{\gamma}^{\epsilon_{\mathrm{tr}}}\| \tilde{\theta}^{\epsilon_{\mathrm{tr}}}\| _2 = \| \widehat{\theta}_{[2:d]}^{\epsilon_{\mathrm{tr}}}\| _2$ and hence $1\in j^{\star}(\widehat{\theta}^{\epsilon_{\mathrm{tr}}})$ . This contradicts the original assumption $1\notin j^{\star}(\widehat{\theta}^{\epsilon_{\mathrm{tr}}})$ and hence we established that $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ for the $\ell_1$ -perturbation set 9 has the same closed form 14 as for the perturbation set 3. + +The final statement is proved by using the analogous steps as in the proof of 1. and 2. to obtain the closed form of the robust accuracy of $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ . + +# A.3 PROOF OF LEMMA A.1 + +We start by proving that $\widehat{\theta}$ is of the form + +$$ +\widehat {\theta} = \left[ a _ {1}, a _ {2} \tilde {\theta} \right], \tag {21} +$$ + +for $a_1, a_2 > 0$ . Denote by $\mathcal{H}(\theta)$ the plane through the origin with normal $\theta$ . We define $d((x,y), \mathcal{H}(\theta))$ as the signed euclidean distance from the point $(x,y) \in D \sim \mathbb{P}_r$ to the plane $\mathcal{H}(\theta)$ . The signed + +euclidean distance is the defined as the euclidean distance from $x$ to the plane if the point $(x, y)$ is correctly predicted by $\theta$ , and the negative euclidean distance from $x$ to the plane otherwise. We rewrite the definition of the max $l_{2}$ -margin classifier. It is the classifier induced by the normalized vector $\widehat{\theta}$ , such that + +$$ +\max _ {\theta \in \mathbb {R} ^ {d}} \min _ {(x, y) \in D} d \left((x, y), \mathcal {H} (\theta)\right) = \min _ {(x, y) \in D} d \left(\left(x, y\right), \mathcal {H} (\widehat {\theta})\right). +$$ + +We use that $D$ is deterministic in its first coordinate and get + +$$ +\begin{array}{l} \max _ {\theta} \min _ {(x, y) \in D} d \left((x, y), \mathcal {H} (\theta)\right) = \max _ {\theta} \min _ {(x, y) \in D} y \left(\theta_ {[ 1 ]} x _ {[ 1 ]} + \tilde {\theta} ^ {\top} \tilde {x}\right) \\ = \max _ {\theta} \theta_ {1} \frac {r}{2} + \min _ {(x, y) \in D} y \tilde {\theta} ^ {\top} \tilde {x}. \\ \end{array} +$$ + +Because $r > 0$ , the maximum over all $\theta$ has $\widehat{\theta}_{[1]} \geq 0$ . Take any $a > 0$ such that $\| \widetilde{\theta} \|_2 = a$ . By definition the max $l_2$ -margin classifier, $\widetilde{\theta}$ , maximizes $\min_{(x,y) \in D} d((x,y), \mathcal{H}(\theta))$ . Therefore, $\widehat{\theta}$ is of the form of Equation 21. + +Note that all classifiers induced by vectors of the form of Equation 21 classify $D$ correctly. Next, we aim to find expressions for $a_1$ and $a_2$ such that Equation 21 is the normalized max $l_2$ -margin classifier. The distance from any $x \in D$ to $\mathcal{H}(\widehat{\theta})$ is + +$$ +d \left(x, \mathcal {H} (\widehat {\theta})\right) = \left| a _ {1} x _ {[ 1 ]} + a _ {2} \tilde {\theta} ^ {\top} \tilde {x} \right|. +$$ + +Using that $x_{[1]} = y^{\frac{r}{2}}$ and that the second term equals $a_2 d\left(x, \mathcal{H}(\tilde{\theta})\right)$ , we get + +$$ +\left. d \left(x, \mathcal {H} (\hat {\theta})\right) = \left| a _ {1} \frac {r}{2} + a _ {2} d \left(x, \mathcal {H} (\tilde {\theta})\right) \right| = a _ {1} \frac {r}{2} + \sqrt {1 - a _ {1} ^ {2}} d \left(x, \mathcal {H} (\tilde {\theta})\right). \right. \tag {22} +$$ + +Let $(\tilde{x},y)\in \widetilde{D}$ be the point closest in Euclidean distance to $\tilde{\theta}$ . This point is also the closest point in Euclidean distance to $\mathcal{H}(\widehat{\theta})$ , because by Equation 22 $d\left(x,\mathcal{H}(\widehat{\theta})\right)$ is strictly decreasing for decreasing $d\left(x,\mathcal{H}(\tilde{\theta})\right)$ . We maximize the minimum margin $d\left(x,\mathcal{H}(\widehat{\theta})\right)$ with respect to $a_1$ . Define the vectors $a = [a_1,a_2]$ and $v = \left[\frac{r}{2},d\left(x,\mathcal{H}(\tilde{\theta})\right)\right]$ . We find using the dual norm that + +$$ +a = \frac {v}{\| v \| _ {2}}. +$$ + +Plugging the expression of $a$ into Equation 21 yields that $\widehat{\theta}$ is given by + +$$ +\widehat {\theta} = \frac {1}{\sqrt {r ^ {2} + 4 \widetilde {\gamma} ^ {2}}} \left[ r, 2 \widetilde {\gamma} \widetilde {\theta} \right]. +$$ + +For the second part of the lemma we first decompose + +$$ +\mathbb {P} _ {r _ {\mathrm {t e s t}}} (Y \widehat {\theta} ^ {\top} X > 0) = \frac {1}{2} \mathbb {P} _ {r _ {\mathrm {t e s t}}} \left[ \widehat {\theta} ^ {\top} X > 0 \mid Y = 1 \right] + \frac {1}{2} \mathbb {P} _ {r _ {\mathrm {t e s t}}} \left[ \widehat {\theta} ^ {\top} X < 0 \mid Y = - 1 \right] +$$ + +We can further write + +$$ +\begin{array}{l} \mathbb {P} _ {r _ {\text {t e s t}}} \left[ \widehat {\theta} ^ {\top} X > 0 \mid Y = 1 \right] = \mathbb {P} _ {r _ {\text {t e s t}}} \left[ \sum_ {i = 2} ^ {d} \widehat {\theta} _ {[ i ]} X _ {[ i ]} > - \widehat {\theta} _ {[ 1 ]} X _ {[ 1 ]} \mid Y = 1 \right] \tag {23} \\ = \mathbb {P} _ {r _ {\text {t e s t}}} \left[ 2 \tilde {\gamma} \sum_ {i = 1} ^ {d - 1} \tilde {\theta} _ {[ i ]} X _ {[ i ]} > - r \frac {r _ {\text {t e s t}}}{2} \mid Y = 1 \right] \\ = 1 - \Phi \left(- \frac {r r _ {\text {t e s t}}}{4 \sigma \tilde {\gamma}}\right) = \Phi \left(\frac {r r _ {\text {t e s t}}}{4 \sigma \tilde {\gamma}}\right) \\ \end{array} +$$ + +where $\Phi$ is the cumulative distribution function. The second equality follows by multiplying by the normalization constant on both sides and the third equality is due to the fact that $\sum_{i=1}^{d-1} \tilde{\theta}_{[i]} X_{[i]}$ is + +a zero-mean Gaussian with variance $\sigma^2\|\tilde{\theta}\|_2^2 = \sigma^2$ since $\tilde{\theta}$ is normalized. Correspondingly we can write + +$$ +\mathbb {P} _ {r _ {\text {t e s t}}} \left[ \widehat {\theta} ^ {\top} X < 0 \mid Y = - 1 \right] = \mathbb {P} _ {r _ {\text {t e s t}}} \left[ 2 \widetilde {\gamma} \sum_ {i = 1} ^ {d - 1} \tilde {\theta} _ {[ i ]} X _ {[ i ]} < - r \left(- \frac {r _ {\text {t e s t}}}{2}\right) \mid Y = - 1 \right] = \Phi \left(\frac {r r _ {\text {t e s t}}}{4 \sigma \widetilde {\gamma}}\right) \tag {24} +$$ + +so that we can combine 23 and 23 and 24 to obtain $\mathbb{P}_{r_{\mathrm{test}}}(Y\widehat{\theta}^{\top}X > 0) = \Phi \left(\frac{r r_{\mathrm{test}}}{4\sigma\widetilde{\gamma}}\right)$ . This concludes the proof of the lemma. + +# A.4 PROOF OF LEMMA A.2 + +The proof plan is as follows. We start from the definition of the max $\ell_2$ -margin of a dataset. Then, we rewrite the max $\ell_2$ -margin as an expression that includes a random matrix with independent standard normal entries. This allows us to prove the upper and lower bounds for the max- $\ell_2$ -margin in Sections A.4.1 and A.4.2 respectively, using non-asymptotic estimates on the singular values of Gaussian random matrices. + +Given the dataset $\widetilde{D} = \{(\tilde{x}_i, y_i)\}_{i=1}^n$ , we define the random matrix + +$$ +X = \left( \begin{array}{c} \tilde {x} _ {1} ^ {\top} \\ \tilde {x} _ {2} ^ {\top} \\ \dots \\ \tilde {x} _ {n} ^ {\top} \end{array} \right). \tag {25} +$$ + +where $\tilde{x}_i\sim \mathcal{N}(0,\sigma I_{d - 1})$ . Let $\mathcal{V}$ be the class of all perfect predictors of $\widetilde{D}$ . For a matrix $A$ and vector $b$ we also denote by $|Ab|$ the vector whose entries correspond to the absolute values of the entries of $Ab$ . Then, by definition + +$$ +\tilde {\gamma} = \max _ {v \in \mathcal {V}, \| v \| _ {2} = 1} \min _ {j \in [ n ]} | X v | _ {[ j ]} = \max _ {v \in \mathcal {V}, \| v \| _ {2} = 1} \min _ {j \in [ n ]} \sigma | Q v | _ {[ j ]}, \tag {26} +$$ + +where $Q = \frac{1}{\sigma} X$ is the scaled data matrix. + +In the sequel we will use the operator norm of a matrix $A \in \mathbb{R}^{n \times d - 1}$ . + +$$ +\| A \| _ {2} = \sup _ {v \in \mathbb {R} ^ {d - 1} | \| v \| _ {2} = 1} \| A v \| _ {2} +$$ + +and denote the maximum singular value of a matrix $A$ as $s_{\max}(A)$ and the minimum singular value as $s_{\min}(A)$ . + +# A.4.1 UPPERBOUND + +Given the maximality of the operator norm and since the minimum entry of the vector $|Qv|$ must be smaller than $\frac{\|Q\|_2}{\sqrt{n}}$ , we can upper bound $\tilde{\gamma}$ by + +$$ +\tilde {\gamma} \leq \sigma \frac {1}{\sqrt {n}} \| Q \| _ {2}. +$$ + +Taking the expectation on both sides with respect to the draw of $\widetilde{D}$ and noting $\| Q\| _2\leq s_{\max}(Q)$ , it follows from Corollary 5.35 of Vershynin (2010) that for all $t\geq 0$ + +$$ +\mathbb {P} \left[ \sqrt {d - 1} + \sqrt {n} + t \geq s _ {\max } (Q) \right] \geq 1 - 2 e ^ {- \frac {t ^ {2}}{2}}. +$$ + +Therefore, with a probability greater than $1 - 2e^{-\frac{t^2}{2}}$ + +$$ +\tilde {\gamma} \leq \sigma \left(1 + \frac {t + \sqrt {d - 1}}{\sqrt {n}}\right). +$$ + +# A.4.2 LOWERBOUND + +By the definition in Equation 26, if we find a vector $v \in \mathcal{V}$ with $\| v \|_2 = 1$ such that for an $a > 0$ , it holds that $\min_{j \in n} \sigma |Xv|_{[j]} > a$ , then $\tilde{\gamma} > a$ . + +Recall the definition of the max- $\ell_2$ -margin as in Equation 25. As $n < d - 1$ , the random matrix $Q$ is a wide matrix, i.e. there are more columns than rows and therefore the minimal singular value is 0. Furthermore, $Q$ has rank $n$ almost surely and hence for all $c > 0$ , there exists a $v \in \mathbb{R}^{d-1}$ such that + +$$ +\sigma Q v = 1 _ {\{n \}} c > 0, \tag {27} +$$ + +where $1_{\{n\}}$ denotes the all ones vector of dimension $n$ . The smallest non-zero singular value of $Q$ , $s_{\min, \text{nonzero}}(Q)$ , equals the smallest non-zero singular value of its transpose $Q^{\top}$ . Therefore, there also exists a $v \in \mathcal{V}$ with $\| v \|_2 = 1$ such that + +$$ +\tilde {\gamma} \geq \min _ {j \in [ n ]} \sigma | Q v | _ {[ j ]} \geq \sigma s _ {\min , \text {n o n z e r o s}} \left(Q ^ {\top}\right) \frac {1}{\sqrt {n}}, \tag {28} +$$ + +where we used the fact that any vector $v$ in the span of non-zero eigenvectors satisfies $\| Qv\| _2\geq s_{\min, \text{nonzeros}}(Q)$ and the existence of a solution $v$ for any right-hand side as in Equation 27. Taking the expectation on both sides, Corollary 5.35 of Vershynin (2010) yields that with a probability greater than $1 - 2e^{-\frac{t^2}{2}}$ , $t\geq 0$ we have + +$$ +\tilde {\gamma} \geq \sigma \left(\frac {\sqrt {d - 1} - t}{\sqrt {n}} - 1\right). \tag {29} +$$ + +# B BOUNDS ON THE SUSCEPTIBILITY SCORE + +In Theorem 3.1, we give non-asymptotic bounds on the robust and standard error of a linear classifier trained with adversarial logistic regression. Moreover, we use the robust error decomposition in susceptibility and standard error to gain intuition about how adversarial training may hurt robust generalization. In this section, we complete the result of Theorem 3.1 by also deriving non-asymptotic bounds on the susceptibility score of the max $\ell_2$ -margin classifier. + +Using the results in Appendix A, we can prove following Corollary B.1, which gives non-asymptotic bounds on the susceptibility score. + +Corollary B.1. Assume $d - 1 > n$ . For the $\epsilon_{te}$ -susceptibility on test samples from $\mathbb{P}_r$ with $2\epsilon_{te} < r$ and perturbation sets in Equation equation 3 and equation 9 the following holds: + +For $\epsilon_{tr} < \frac{r}{2} - \tilde{\gamma}_{\mathrm{max}}$ , with probability at least $1 - 2\mathbb{E}^{-\frac{\alpha^2(d - 1)}{2}}$ for any $0 < \alpha < 1$ , over the draw of a dataset $D$ with $n$ samples from $\mathbb{P}_r$ , the $\epsilon_{te}$ -susceptibility is upper and lower bounded by + +$$ +\operatorname {S u s c} \left(\widehat {\theta} ^ {\epsilon_ {t r}}; \epsilon_ {t e}\right) \leq \Phi \left(\frac {(r - 2 \epsilon_ {t r}) (\epsilon_ {t e} - \frac {r}{2})}{2 \widetilde {\gamma} _ {\max } \sigma}\right) - \Phi \left(\frac {(r - 2 \epsilon_ {t r}) (- \epsilon_ {t e} - \frac {r}{2})}{2 \widetilde {\gamma} _ {\min } \sigma}\right) \tag {30} +$$ + +$$ +S u s c (\widehat {\theta} ^ {\epsilon_ {t r}}; \epsilon_ {t e}) \geq \Phi \left(\frac {(r - 2 \epsilon_ {t r}) (\epsilon_ {t e} - \frac {r}{2})}{2 \tilde {\gamma} _ {\min} \sigma}\right) - \Phi \left(\frac {(r - 2 \epsilon_ {t r}) (- \epsilon_ {t e} - \frac {r}{2})}{2 \tilde {\gamma} _ {\max} \sigma}\right) +$$ + +We give the proof in Subsection B.1. Observe that the bounds on the susceptibility score in Corollary B.1 consist of two terms each, where the second term decreases with $\epsilon_{\mathrm{tr}}$ , but the first term increases. We recognise following two regimes: the max $\ell_2$ -margin classifier is close to the ground truth $e_1$ or not. Clearly, the ground truth classifier has zero susceptibility and hence classifiers close to the ground truth also have low susceptibility. On the other hand, if the max $l_2$ -margin classifier is not close to the ground truth, then putting less weight on the first coordinate increases invariance to the perturbations along the first direction. Recall that by Lemma A.1, increasing $\epsilon_{\mathrm{tr}}$ , decreases the weight on the first coordinate of the max $\ell_2$ -margin classifier. Furthermore, in the low sample size regime, we are likely not close to the ground truth. Therefore, the regime where the susceptibility decreases with increasing $\epsilon_{\mathrm{tr}}$ dominates in the low sample size regime. + +To confirm the result of Corollary B.1, we plot the mean and standard deviation of the susceptibility score of 5 independent experiments. The results are depicted in Figure 7. We see that for low standard + +error, when the classifier is reasonably close to the optimal classifier, the susceptibility increases slightly with increasing adversarial budget. However, increasing the adversarial training budget, $\epsilon_{\mathrm{tr}}$ , further, causes the susceptibility score to drop greatly. Hence, we can recognize both regimes and validate that, indeed, the second regime dominates in the low sample size setting. + +# B.1 PROOF OF COROLLARY B.1 + +We proof the statement by bounding the robustness of a linear classifier. Recall that the robustness of a classifier is the probability that a classifier does not change its prediction under an adversarial attack. The susceptibility score is then given by + +$$ +\operatorname {S u s c} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}\right) = 1 - \operatorname {R o b} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}\right). \tag {31} +$$ + +The proof idea is as follows: since the perturbations are along the first basis direction, $e_1$ , we compute the distance from the robust $l_2$ -max margin $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ to a point $(X,Y)\sim \mathbb{P}$ . Then, we note that the robustness of $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ is given by the probability that the distance along $e_1$ , from $X$ to the decision plane induced by $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ is greater than $\epsilon_{\mathrm{te}}$ . Lastly, we use the non-asymptotic bounds of Lemma A.2. + +Recall, by Lemma A.1, the max $l_{2}$ -margin classifier is of the form of + +$$ +\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}} = \frac {1}{\sqrt {(r - 2 \epsilon_ {\mathrm {t r}}) ^ {2} + 4 \tilde {\gamma} ^ {2}}} \left[ r - 2 \epsilon_ {\mathrm {t r}}, 2 \tilde {\gamma} \tilde {\theta} \right]. \tag {32} +$$ + +Let $(X,Y)\sim \mathbb{P}$ . The distance along $e_1$ from $X$ to the decision plane induced by $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ , $\mathcal{H}(\widehat{\theta}^{\epsilon_{\mathrm{tr}}})$ , is given by + +$$ +d _ {e _ {1}} (X, \mathcal {H} (\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}})) = \left| X _ {[ 1 ]} + \frac {1}{\widehat {\theta} _ {[ 0 ]} ^ {\epsilon_ {\mathrm {t r}}}} \sum_ {i = 2} ^ {d} \widehat {\theta} _ {[ i ]} ^ {\epsilon_ {\mathrm {t r}}} X _ {[ i ]} \right|. +$$ + +Substituting the expression of $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ in Equation 32 yields + +$$ +d _ {e _ {1}} (X, \mathcal {H} (\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}})) = \left| X _ {[ 1 ]} + 2 \tilde {\gamma} \frac {1}{(r - \epsilon_ {\mathrm {t r}})} \sum_ {i = 2} ^ {d} \tilde {\theta} _ {[ i ]} X _ {[ i ]} \right|. +$$ + +Let $N$ be a standard normal distributed random variable. By definition $\| \tilde{\theta}\| _2^2 = 1$ and using that a sum of Gaussian random variables is again a Gaussian random variable, we can write + +$$ +d _ {e _ {1}} \left(X, \mathcal {H} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}\right)\right) = \left| X _ {[ 1 ]} + 2 \widetilde {\gamma} \frac {\sigma}{\left(r - \epsilon_ {\mathrm {t r}}\right)} N \right|. +$$ + +The robustness of $\widehat{\theta}^{\epsilon_{\mathrm{tr}}}$ is given by the probability that $d_{e_1}(X,\mathcal{H}(\widehat{\theta}^{\epsilon_{\mathrm{tr}}})) > \epsilon_{\mathrm{te}}$ . Hence, using that $X_{1} = \pm \frac{r}{2}$ with probability $\frac{1}{2}$ , we get + +$$ +\operatorname {R o b} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}\right) = P \left[ \frac {r}{2} + 2 \widetilde {\gamma} \frac {\sigma}{(r - 2 \epsilon_ {\mathrm {t r}})} N > \epsilon_ {\mathrm {t e}} \right] + P \left[ \frac {r}{2} + 2 \widetilde {\gamma} \frac {\sigma}{(r - \epsilon_ {\mathrm {t r}})} N < - \epsilon_ {\mathrm {t e}} \right]. \tag {33} +$$ + +![](images/9f8cac3f607343a7145a010ec398b9228e234301d7eb198f39a7b1e1cb4989d2.jpg) +(a) Susceptibility score decreases with $\epsilon_{\mathrm{tr}}$ + +![](images/02ed5f16994099a05932f6d9ffce85ac74e4f13a1e9351bd0d56039b5cbc740e.jpg) +(b) Robust error decomposition +Figure 7: We set $r = 6$ , $d = 1000$ , $n = 50$ and $\epsilon_{\mathrm{te}} = 2.5$ . (a) The average susceptibility score and the standard deviation over 5 independent experiments. Note how the bounds closely predict the susceptibility score. (b) For comparison, we also plot the robust error decomposition in susceptibility and standard error. Even though the susceptibility decreases, the robust error increases with increasing adversarial budget $\epsilon_{\mathrm{tr}}$ . + +We can rewrite Equation 33 in the form + +$$ +\operatorname {R o b} \left(\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}\right) = P \left[ N > \frac {(r - 2 \epsilon_ {\mathrm {t r}}) \left(\epsilon_ {\mathrm {t e}} - \frac {r}{2}\right)}{2 \widetilde {\gamma} \sigma} \right] + P \left[ N < \frac {(r - 2 \epsilon_ {\mathrm {t r}}) \left(- \epsilon_ {\mathrm {t e}} - \frac {r}{2}\right)}{2 \widetilde {\gamma} \sigma} \right]. +$$ + +Recall, that $N$ is a standard normal distributed random variable and denote by $\Phi$ the cumulative standard normal density. By definition of the cumulative density function, we find that + +$$ +\mathrm {R o b} (\widehat {\theta} ^ {\epsilon_ {\mathrm {t r}}}; \epsilon_ {\mathrm {t e}}) = 1 - \Phi \left(\frac {(r - 2 \epsilon_ {\mathrm {t r}}) (\epsilon_ {\mathrm {t e}} - \frac {r}{2})}{2 \widetilde {\gamma} \sigma}\right) + \Phi \left(\frac {(r - 2 \epsilon_ {\mathrm {t r}}) (- \epsilon_ {\mathrm {t e}} - \frac {r}{2})}{2 \widetilde {\gamma} \sigma}\right). +$$ + +Substituting the bounds on $\tilde{\gamma}$ of Lemma A.2 gives us the non-asymptotic bounds on the robustness score and by Equation 31 also on the susceptibility score. + +# C EXPERIMENTAL DETAILS ON THE LINEAR MODEL + +In this section, we provide detailed experimental details to Figures 3 and 4. + +We implement adversarial logistic regression using stochastic gradient descent with a learning rate of 0.01. Note that logistic regression converges logarithmically to the robust max $l_{2}$ -margin solution. As a consequence of the slow convergence, we train for up to $10^{7}$ epochs. Both during training and test time we solve $\max_{x_i' \in T(x_i; \epsilon_{\mathrm{tr}})} L(f_\theta(x_i') y_i)$ exactly. Hence, we exactly measure the robust error. Unless specified otherwise, we set $\sigma = 1$ , $r = 12$ and $\epsilon_{\mathrm{te}} = 4$ . + +Experimental details on Figure 3 (a) We draw 5 datasets with $n = 50$ samples and input dimension $d = 1000$ from the distribution $\mathbb{P}$ . We then run adversarial logistic regression on all 5 datasets with adversarial training budgets, $\epsilon_{\mathrm{tr}} = 1$ to 5. To compute the resulting robust error gap of all the obtained classifiers, we use a test set of size $10^{6}$ . Lastly, we compute the lower bound given in part 2. of Theorem 3.1. (b) We draw 5 datasets with different sizes $n$ between 50 and $10^{4}$ . We take an input dimension of $d = 10^{4}$ and plot the mean and standard deviation of the robust error after adversarial and standard logistic regression over the 5 samples.(c) We again draw 5 datasets for each $d / n$ constellation and compute the robust error gap for each dataset. + +Experimental details on Figure 4 For both (a) and (b) we set $d = 1000$ , $\epsilon_{\mathrm{te}} = 4$ , and vary the adversarial training budget $(\epsilon_{\mathrm{tr}})$ from 1 to 5. For every constellation of $n$ and $\epsilon_{\mathrm{tr}}$ , we draw 10 datasets and show the average and standard deviation of the resulting robust errors. In (b), we set $n = 50$ . + +# D EXPERIMENTAL DETAILS ON THE WATERBIRDS DATASET + +In this section, we discuss the experimental details and construction of the Waterbirds dataset in more detail. We also provide ablation studies of attack parameters such as the size of the motion blur kernel, plots of the robust error decomposition with increasing $n$ , and some experiments using early stopping. + +# D.1 THE WATERBIRDS DATASET + +To build the Waterbirds dataset, we use the CUB-200 dataset Welinder et al. (2010), which contains images and labels of 200 bird species, and 4 background classes (forest, jungle/bamboo, water ocean, water lake natural) of the Places dataset Zhou et al. (2017). The aim is to recognize whether or not the bird, in a given image, is a waterbird (e.g. an albatros) or a landbird (e.g. a woodpecker). To create the dataset, we randomly sample equally many water- as landbirds from the CUB-200 dataset. Thereafter, we sample for each bird image a random background image. Then, we use the segmentation provided in the CUB-200 dataset to segment the birds from their original images and paste them onto the randomly sampled backgrounds. The resulting images have a size of $256 \times 256$ . Moreover, we also resize the segmentations such that we have the correct segmentation profiles of the birds in the new dataset as well. For the concrete implementation, we use the code provided by Sagawa et al. (2020). + +![](images/308fa5b96594d839d7a3f2b5ad9586a1aa7a275e5d7209cfcdef6747d2d50ec9.jpg) +(a) Original + +![](images/3214b31a4dbf850b77bead8e8eefad8b9b38d23cc6c5b73e696f707a259ce557.jpg) +(b) $M = 5$ + +![](images/20d37da60f42bee0071ee398b1cf254b7295c8d32f8e0b65adbcfd1aea602368.jpg) +(c) $M = 10$ +Figure 8: An ablation study of the motion blur kernel size, which corresponds to the severity level of the blur. For increasing $M$ , the severity of the motion blur increases. In particular, note that for $M = 15$ and even $M = 20$ , the bird remains recognizable: we do not semantically change the class, i.e. the perturbations are consistent. + +![](images/1946f37d32cd5f414b8f93df16b46e47268ff322809cfafd6be88aa9f758013b.jpg) +(d) $M = 15$ + +![](images/dd41066b90a2b7ef8df0dacd0b2e195f8a7cb2eb31d7ceb7e710e01291e23dd7.jpg) +(e) $M = 20$ + +# D.2 EXPERIMENTAL TRAINING DETAILS + +Following the example of Sagawa et al. (2020), we use a ResNet50 or ResNet18 pretrained on the ImageNet dataset for all experiments in the main text, a weight-decay of $10^{-4}$ , and train for 300 epochs using the Adam optimizer. Extensive fine-tuning of the learning rate resulted in an optimal learning rate of 0.006 for all experiments using the adversarial illumination attack and a pretrained ResNet50. For the experiments considering the adversarial illumination attack using a pretrained VGG19 or Densenet121 network, we found optimal learning rates of 0.001 and 0.002 respectively. Lastly, we found that for all experiments using the motion blur attack a learning rate of 0.0011 was optimal. Adversarial training is implemented as suggested in Madry et al. (2018): at each iteration we find the worst case perturbation with an exact or approximate method. In all our experiments, the resulting classifier interpolates the training set. We plot the mean over all runs and the standard deviation of the mean. + +# D.3 SPECIFICS TO THE MOTION BLUR ATTACK + +Fast moving objects or animals are hard to photograph due to motion blur. Hence, when trying to classify or detect moving objects from images, it is imperative that the classifier is robust against reasonable levels of motion blur. We implement the attack as follows. First, we segment the bird from the original image, then use a blur filter and lastly, we paste the blurred bird back onto the background. We are able to apply more severe blur, by enlarging the kernel of the filter. See Figure 8 for an ablation study of the kernel size. + +The motion blur filter is implemented as follows. We use a kernel of size $M \times M$ and build the filter as follows: we fill the row $(M - 1)/2$ of the kernel with the value $1 / M$ . Thereafter, we use the 2D convolution implementation of OpenCV (filter2D) Bradski (2000) to convolve the kernel with the image. Note that applying a rotation before the convolution to the kernel, changes the direction of the resulting motion blur. Lastly, we find the most detrimental level of motion blur using a list-search over all levels up to $M_{max}$ . + +![](images/f614e660b88a78c7880f2ce5e90593675e5a477d53b9332bedc7d55c309f8c42.jpg) +(a) $\epsilon = -0.3$ +Figure 9: An ablation study of the different lighting changes of the adversarial illumination attack. Even though the directed attack perturbs the signal component in the image, the bird remains recognizable in all cases. + +![](images/b58a0da6bfacfca42a7873a0c8f5585149d0f514f1621f23d9e4363f6ece74e7.jpg) +(b) $\epsilon = -0.2$ + +![](images/6c7f4b7890ebdbafdb0111110a3ef64cba4d583f402b03b817f39789467e475d.jpg) +(c) $\epsilon = -0.1$ + +![](images/22d8287926a9871b28773de529a69f7e4249eea73095cb27a7325ea0fc2af986.jpg) +(d) Original + +![](images/193de382e84891a3e7b1626ab841a78eb9eb103df9cc6234bd3c3bc6827efcf9.jpg) +(e) $\epsilon = 0.1$ + +![](images/8e257db8389ee975e1a3b8730315c3d4fc6b9c07b23338ffc622353aa83da60f.jpg) +(f) $\epsilon = 0.2$ + +![](images/7e2e802520cbd90b9078ef4c8d1940a88b0111080a9d57a6b25547991daf347c.jpg) +(g) $\epsilon = 0.3$ + +![](images/072ea61b93e47129f9eec199a253eb05855f7b3f005bae8ff79fa5d5b3a77b9e.jpg) +(a) Robust error + +![](images/7996db4272d7089d55bdfb310602478efd147e10a8f85b3c1c51e28552951112.jpg) +(b) Standard error +Figure 10: The robust error decomposition of the experiments depicted in Figure 10a. The plots depict the mean and standard deviation of the mean over several independent experiments. We see that, in comparison to standard training, the reduction in susceptibility for adversarial training is minimal in the low sample size regime. Moreover, the increase in standard error of adversarial training is quite severe, leading to an overall increase in robust error in the low sample size regime. + +![](images/5ceff4dafc9a91a71f450435d67582f561781041746756b7cda66dc823313a9d.jpg) +(c) Susceptibility + +# D.4 SPECIFICS TO THE ADVERSARIAL ILLUMINATION ATTACK + +An adversary can hide objects using poor lightning conditions, which can for example arise from shadows or bright spots. To model poor lighting conditions on the object only (or targeted to the object), we use the adversarial illumination attack. The attack is constructed as follows: First, we segment the bird from their background. Then we apply an additive constant $\epsilon$ to the bird, where the absolute size of the constant satisfies $|\epsilon| < \epsilon_{\mathrm{te}} = 0.3$ . Thereafter, we clip the values of the bird images to $[0,1]$ , and lastly, we paste the bird back onto the background. See Figure 9 for an ablation of the parameter $\epsilon$ of the attack. It is non-trivial how to (approximately) find the worst perturbation. We find an approximate solution by searching over all perturbations with increments of size $\epsilon_{\mathrm{te}} / K_{\mathrm{max}}$ . Denote by seg, the segmentation profile of the image $x$ . We consider all perturbed images in the form of + +$$ +x _ {p e r t} = (1 - \operatorname {s e g}) x + \operatorname {s e g} \left(x + \epsilon \frac {K}{K _ {\max}} 1 _ {2 5 5 \times 2 5 5}\right), K \in [ - K _ {\max }, K _ {\max } ]. +$$ + +During training time we set $K_{max} = 16$ and therefore search over 33 possible images. During test time we search over 65 images ( $K_{max} = 32$ ). + +# D.5 EARLY STOPPING + +In all our experiments on the Waterbirds dataset, a parameter search lead to an optimal weight-decay and learning rate of $10^{-4}$ and 0.006 respectively. Another common regularization technique is early stopping, where one stops training on the epoch where the classifier achieves minimal robust error on a hold-out dataset. To understand if early stopping can mitigate the effect of adversarial training aggregating robust generalization in comparison to standard training, we perform the following experiment. On the Waterbirds dataset of size $n = 20$ and considering the adversarial illumination attack, we compare standard training with early stopping and adversarial training $(\epsilon_{\mathrm{tr}} = \epsilon_{\mathrm{te}} = 0.3)$ with early stopping. Considering several independent experiments, early stopped adversarial training has an average robust error of 33.5 a early stopped standard training 29.1. Hence, early stopping does decrease the robust error gap, but does not close it. + +# D.6 ERROR DECOMPOSITION WITH INCREASING $n$ + +In Figure 10a and 11a, we see that adversarial training hurts robust generalization in the small sample size regime. For completeness, we plot the robust error composition for adversarial and standard training in Figure 10. We see that in the low sample size regime, the drop in susceptibility that adversarial training achieves in comparison to standard training, is much lower than the increase in standard error. Conversely, in the high sample regime, the drop of susceptibility from adversarial training over standard training is much bigger than the increase in standard error. + +# D.7 DIFFERENT ARCHITECTURES + +For completeness, we also performed similar experiments on the waterbirds dataset using the adversarial illumination attack with different network architectures as with the pretrained ResNet50 + +![](images/1fa6f9a850b1bbc638d3bfaf586a74d1012c61196e4406d743a533a694153e24.jpg) +(a) Robust error + +![](images/97732f1c42e0117a6f40b172ac296a32fcc2bedddc50450cbf46cff242faf72d.jpg) +(b) Standard error +Figure 11: The robust error decomposition of the experiments depicted in Figure 6. The plots depict the mean and standard deviation of the mean over several independent experiments. We see that, in comparison to standard training, the reduction in susceptibility for adversarial training is minimal in the low sample size regime. Moreover, the increase in standard error of adversarial training is quite severe, leading to an overall increase in robust error in the low sample size regime. + +![](images/4f86b66d0ee15c4dc58a455fbc261eda21a0b72a5331db55da3b0fc8a4a05f8e.jpg) +(c) Susceptibility + +network. In particular, we considered the following pretrained network architectures: VGG19 and Densenet121. See Figure 12 for the results. We observe that accuracies, adversarial training hurts in the low sample size regime, but helps when enough data is available. + +![](images/c2545b9ee64e06e61529db60614c35b0feef3a5b2579bf760b328868e3263af0.jpg) +(a) VGG19 +Figure 12: The robust error of adversarial training and standard training with increasing sample size using the adversarial illumination attack with $\epsilon_{\mathrm{te}} = 0.3$ . We depict the mean and the standard deviation of the mean for multiple runs. Observe that across models, adversarial training hurts in the low sample size regime, but helps when enough samples are available. + +![](images/4a1d525f26b278bc0cc15364a502d8f4b8a3789e35eb9f03eeb528ad3d61a44f.jpg) +(b) Densenet121 + +# D.8 UNDIRECTED ATTACKS ON THE WATERBIRDS DATASET + +In this section, we analyse adversarial training for $\ell_2$ -and $\ell_{\infty}$ -ball perturbations in the small sample size regime. We observe that while adversarial training hurts standard generalization, it helps robust generalization. + +Adversarial training with $\ell_2$ -balls We train and test with small $\ell_2$ -balls, $\epsilon_{\mathrm{te}} = 0.2$ , such that the networks trained with standard training achieve a non-zero robust accuracy and the networks trained with adversarial training achieve non-trivial standard accuracy. We see in Figure 13, that adversarial training with $\ell_2$ -balls hurts standard generalization while increasing robust generalization. Moreover, in Figure 14, we see that also in the very small sample size regime, adversarial training with increasing $\epsilon_{\mathrm{tr}}$ increases the standard error, but reduces the susceptibility. + +Adversarial training with $\ell_{\infty}$ -balls We also consider $\ell_{\infty}$ -ball perturbation. We see in Figure 15 that even the smallest perturbation budget $\epsilon_{\mathrm{te}} = \frac{2}{255}$ , standard training has robust error of 100 percent. On the other hand, adversarial training achieves low, but non-zero robust error. + +Experimental details We use an ImageNet pretrained ResNet34 and train for 300 epochs. Moreover, for reliable robust error and susceptibility evaluation of the attacks we use AutoAttack Croce & Hein (2020). All networks were trained such that the network interpolates the training dataset and has low robust error with non-trivial standard error. For the networks trained using standard training we use a learning rate of 0.006 and for the networks trained with adversarial training we used a learning + +![](images/4813d8c68584c6fd6f0d091879855b4edf8e67863a56e9c420fecb0e9845185d.jpg) +(a) Robust error + +![](images/2bf552c9eac55f28ee8b4c7e6b56d38077f2fe4ec3e53879d3f3f376fb952996.jpg) +(b) Standard error + +![](images/612ea7ac44cfeb101c06298a654da19a8af7b97a6933a613ea07a3d154a71d38.jpg) +(c) Susceptibility + +![](images/0bebaf17241c57e738fd9ae7f28a94ccdfccb03e957d2362bfbbbf5c4f468af2.jpg) +Figure 13: The robust error decomposition of adversarial training with $\ell_2$ -balls of size $\epsilon_{\mathrm{tr}} = 0.2$ and test adversaries with $\ell_2$ -balls of size $\epsilon_{\mathrm{te}} = 0.2$ . The plots depict the mean and standard deviation of the mean over several independent experiments. We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers. + +![](images/9650e61ba70fd559ddb12196d5584aa791956547d67ee4f0ccf561562bdbebd1.jpg) +Figure 14: The robust error decomposition of adversarial training in function of $\epsilon_{\mathrm{tr}}$ in the small sample size regime $n = 20$ . We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers with increasing $\epsilon_{\mathrm{tr}}$ . We take $n = 20$ and consider test adversaries with $\ell_2$ -balls of size $\epsilon_{\mathrm{te}} = 0.2$ . The plots depict the mean and standard deviation of the mean over several independent experiments. +(a) Robust error +Figure 15: The robust error decomposition of adversarial training with $\ell_{\infty}$ -balls of size $\epsilon_{\mathrm{tr}} = \frac{2}{255}$ and test adversaries with $\ell_{\infty}$ -balls of size $\epsilon_{\mathrm{te}} = \frac{2}{255}$ . The plots depict the mean and standard deviation of the mean over several independent experiments. We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers. + +![](images/50d23d820785208d59d85c8b949f3c9a2b3001572c97f9839ac28daad5a20dd6.jpg) +(b) Standard error + +![](images/d94a65abce4a6df74a6e0c2e27cda3fa545e574980cce73fe85a371e9d165da7.jpg) +(c) Susceptibility + +rate of $5 \cdot 10^{-4}$ . We also trained with a weight decay of $10^{-4}$ , a batch size of 8 and a momentum of 0.9 for all networks. We train at least 3 networks for all settings and report the mean and standard deviation of the mean of the standard error, robust error and susceptibility over the three runs. + +# EXPERIMENTAL DETAILS ON CIFAR-10 + +In this section, we give the experimental details on the CIFAR-10-based experiments shown in Figures 1 and 17. + +Subsampling CIFAR-10 In all our experiments we subsample CIFAR-10 to simulate the low sample size regime. We ensure that for all subsampled versions the number of samples of each class are equal. Hence, if we subsample to 500 training images, then each class has exactly 50 images, which are drawn uniformly from the $5k$ training images of the respective class. + +Mask perturbation on CIFAR-10 On CIFAR-10, we consider the square black mask attack where the adversary can mask a square in the image of size $\epsilon_{\mathrm{te}} \times \epsilon_{\mathrm{te}}$ by setting the pixel values zero. To ensure that the mask cannot cover all the information about the true class in the image, we restrict the size of the masks to be at most $2 \times 2$ , while allowing for all possible locations of the mask in the targeted image. For exact robust error evaluation, we perform a full grid search over all possible locations during test time. We show an example of a black-mask attack on each of the classes in CIFAR-10 in Figure 16. + +During training, a full grid search is computationally intractable so that we use an approximate attack similar to Wu et al. (2020) during training time: by identifying the $K = 16$ most promising mask locations with a heuristic as follows. First, we identify promising mask locations by analyzing the gradient, $\nabla_{x}L(f_{\theta}(x),y)$ , of the cross-entropy loss with respect to the input. Masks that cover part of the image where the gradient is large, are more likely to increase the loss. Hence, we compute the $K$ mask locations $(i,j)$ , where $\| \nabla_{x}L(f_{\theta}(x),y)_{[i:i + 2,j:j + 2]}\| _1$ is the largest and take using a full list-search the mask that incurs the highest loss. Our intuition from the theory predicts that higher $K$ , and hence a more exact "defense", only increases the robust error of adversarial training, since the mask could then more efficiently cover important information about the class. + +![](images/8f3c05007626d1ca15fe61a8198f820712a7ea9791907a7525cb5d4a8fec1bb8.jpg) +Figure 16: We show an example of a mask perturbation for all 10 classes of CIFAR-10. Even though the attack occludes part of the images, a human can still easily classify all images correctly. + +Experimental training details For all our experiments on CIFAR-10, we adjusted the code provided by Phan (2021). As typically done for CIFAR-10, we augment the data with random cropping and horizontal flipping. For the experiments with results depicted in Figures 1 and 17, we use a ResNet18 network and train for 100 epochs. We tune the parameters learning rate and weight decay for low robust error. For standard standard training, we use a learning rate of 0.01 with equal weight decay. For adversarial training, we use a learning rate of 0.015 and a weight decay of $10^{-4}$ . We run each experiment three times for every dataset with different initialization seeds, and plot the average and standard deviation over the runs. + +For the experiments in Figure 1 and 18 we use an attack strength of $K = 4$ . Recall that we perform a full grid search at test time and hence have a good approximation of the robust accuracy and susceptibility score. + +![](images/962a80bd4b81e3e400f347ea5d098f7e0eb92be644e217c37d36dc6863740e53.jpg) +Figure 17: The robust error decomposition in standard error and susceptibility for varying attack strengths $K$ . We see that the larger $K$ , the lower the susceptibility, but the higher the standard error. + +Increasing training attack strength We investigate the influence of the attack strength $K$ on the robust error for adversarial training. We take $\epsilon_{\mathrm{tr}} = 2$ and $n = 500$ and vary $K$ . The results are depicted in Figure 17. We see that for increasing $K$ , the susceptibility decreases, but the standard error increases more severely, resulting in an increasing robust error. + +Robust error decomposition In Figure 1, we see that the robust error increases for adversarial training compared to standard training in the low sample size regime, but the opposite holds when enough samples are available. For completeness, we provide a full decomposition of the robust error in standard error and susceptibility for standard and adversarial training. We plot the decomposition in Figure 18. + +# F STATIC HAND GESTURE RECOGNITION + +The goal of static hand gesture or posture recognition is to recognize hand gestures such as a pointing index finger or the okay-sign based on static data such as images Oudah et al. (2020); Yang et al. + +(2013). The current use of hand gesture recognition is primarily in the interaction between computers and humans Oudah et al. (2020). More specifically, typical practical applications can be found in the environment of games, assisted living, and virtual reality Mujahid et al. (2021). In the following, we conduct experiments on a hand gesture recognition dataset constructed by Mantecón et al. (2019), which consists of near-infrared stereo images obtained using the Leap Motion device. First, we crop or segment the images after which we use logistic regression for classification. We see that adversarial logistic regression deteriorates robust generalization with increasing $\epsilon_{\mathrm{tr}}$ . + +Static hand-gesture dataset We use the dataset made available by Mantecón et al. (2019). This dataset consists of near-infrared stereo images taken with the Leap Motion device and provides detailed skeleton data. We base our analysis on the images only. The size of the images is $640 \times 240$ pixels. The dataset consists of 16 classes of hand poses taken by 25 different people. We note that the variety between the different people is relatively wide; there are men and women with different posture and hand sizes. However, the different samples taken by the same person are alike. + +We consider binary classification between the index-pose and L-pose, and take as a training set 30 images of the users 16 to 25. This results in a training dataset of 300 samples. We show two examples of the training dataset in Figure 19, each corresponding to a different class. Observe that the near-infrared images darken the background and successfully highlight the hand-pose. As a test dataset, we take 10 images of each of the two classes from the users 1 to 10 resulting in a test dataset of size 200. + +**Cropping the dataset** To speed up training and ease the classification problem, we crop the images from a size of $640 \times 240$ to a size of $200 \times 200$ . We crop the images using a basic image segmentation technique to stay as close as possible to real-world applications. The aim is to crop the images such that the hand gesture is centered within the cropped image. + +For every user in the training set, we crop an image of the L-pose and the index pose by hand. We call these images the training masks $\{\mathrm{masks}_i\}_{i=1}^{20}$ . We note that the more a particular window of an image resembles a mask, the more likely that the window captures the hand gesture correctly. Moreover, the near-infrared images are such that the hands of a person are brighter than the surroundings of the person itself. Based on these two observations, we define the best segment or window, defined by the upper left coordinates $(i,j)$ , for an image $x$ as the solution to the following optimization problem: + +$$ +\underset {i \in [ 4 4 0 ], j \in [ 4 0 ]} {\arg \min } \sum_ {l = 1} ^ {2 0} \| \operatorname {m a s k s} _ {l} - x _ {\{i: i + 2 0 0, j: j + 2 0 0 \}} \| _ {2} ^ {2} - \frac {1}{2} \| x _ {\{i + w, j + h \}} \| _ {1}. \tag {34} +$$ + +Equation 34 is solved using a full grid search. We use the result to crop both training and test images. Upon manual inspection of the cropped images, close to all images were perfectly cropped. We replace the handful poorly cropped training images with hand-cropped counterparts. + +Square-mask perturbations Since we use logistic regression, we perform a full grid search to find the best adversarial perturbation at training and test time. For completeness, the upper left coordinates + +![](images/4502c1bf228648f40c15268a303921271876dbb44477d6fcaac2679eca5b0995.jpg) +(a) Robust error + +![](images/38e471ba42dd37e14e3a8db65922e6c39a88c3e2c12371f52024872c3b996834.jpg) +(b) Standard error +Figure 18: The robust error decomposition in standard error and susceptibility of the subsampled datasets of CIFAR-10 after adversarial and standard training. For small sample size, adversarial training has higher robust error then standard training. + +![](images/33c9a5b0e9ca0d43691b68e683d8fe948c15802f0bf95bc6a8e39725fdb9dcd8.jpg) +(c) Susceptibility + +![](images/ad02e8b4c078764e1ca3fd16302c513719a6c229139042de1270d9e805555f92.jpg) +(a) L pose + +![](images/1f696bdcbdb4525ecbc4b630198f092d4ac936b3045ca4db67441f2d59f0637d.jpg) +(b) Index pose + +![](images/5f5121a185f800d9707d726c94c5020fec1d2515474efd7d4b24c21c092db34f.jpg) +Figure 19: Examples of the original images of the considered hand-gestures. We recognize the "L"-sign in Figure 19a and the index sign in Figure 19b. Observe that the near-infrared images highlight the hand pose well and blends out much of the non-useful or noisy background. +(a) Cropped L pose +Figure 20: Examples of the cropped hand-gesture images. We see that the hands are centered and the images have a size of $200 \times 200$ . In Figure 20c we show an example of the square black-mask perturbation. + +![](images/2b48acfec7de0b523a300cf6494ea3830a1e9c2682c157b0c206d41136172bb1.jpg) +(b) Cropped index pose + +![](images/cf12fa372fc46b7868a4a707ff18f18ec6208d99c94c4a6883f7601f03b44ae6.jpg) +(c) Black-mask perturbation + +of the optimal black-mask perturbation of size $\epsilon_{\mathrm{tr}} \times \epsilon_{\mathrm{tr}}$ can be found as the solution to + +$$ +\arg \max _ {i \in [ 2 0 0 - \epsilon_ {\mathrm {t r}} ], j \in [ 2 0 0 - \epsilon_ {\mathrm {t r}} ]} \sum_ {l, m \in [ \epsilon_ {\mathrm {t r}} ]} \theta_ {[ i: i + l, j: j + m ]}. \tag {35} +$$ + +The algorithm is rather slow as we iterate over all possible windows. We show a black-mask perturbation on an $L$ -pose image in Figure 20c. + +Results We run adversarial logistic regression with square-mask perturbations on the cropped dataset and vary the adversarial training budget and plot the result in Figure 21. We observe attack that adversarial logistic regression deteriorates robust generalization. + +Because we use adversarial logistic regression, we are able to visualize the classifier. Given the classifier induced by $\theta$ , we can visualize how it classifies the images by plotting $\frac{\theta - \min_{i\in[d]}\theta_{[i]}}{\max_{i\in[d]}\theta_{[i]}}\in [0,1]^d$ . Recall that the class-prediction of our predictor for a data point $(x,y)$ is given by $\mathrm{sign}(\theta^{\top}x)\in \{\pm 1\}$ . The lighter parts of the resulting image correspond to the class with label 1 and the darker patches with the class corresponding to label $-1$ . + +We plot the classifiers obtained by standard logistic regression and adversarial logistic regression with training adversarial budgets $\epsilon_{\mathrm{tr}}$ of 10 and 25 in Figure 22. The darker parts in the classifier correspond to patches that are typically bright for the $L$ -pose. Complementary, the lighter patches in the classifier correspond to patches that are typically bright for the index pose. We see that in the case of adversarial logistic regression, the background noise is much higher than for standard logistic regression. In other words, adversarial logistic regression puts more weight on non-signal parts in the images to classify the + +![](images/43ce2a07cf4257b4a504149373536b2d67bd2154d79ebfc09d771c9ee0b09f31.jpg) +Figure 21: The standard error and robust error for varying adversarial training budget $\epsilon_{\mathrm{tr}}$ . We see that the larger $\epsilon_{\mathrm{tr}}$ the higher the robust error. + +training dataset and hence exhibits worse performance on the test dataset. + +![](images/6e7a5f7e308b622e6cd348122f3d35085268ab1cd1c252c0673785d1da3282bd.jpg) +(a) $\epsilon_{\mathrm{tr}} = 0$ + +![](images/41bbadd545f0831fc85c2431e3c32f8936ddf77a80b40627dd559a109c253ed3.jpg) +(b) $\epsilon_{\mathrm{tr}} = 10$ +Figure 22: We visualize the logistic regression solutions. In Figure 22a we plot the vector that induces the classifier obtained after standard training. In Figure 22b and Figure 22c we plot the vector obtained after training with square-mask perturbations of size 10 and 25, respectively. We note the non-signal enhanced background correlations at the parts highlighted with the red circles in the image projection of the adversarially trained classifiers. + +![](images/7b392b5b988645f9de0e728e4d94808ddd76150a02d046121f5f86949513ebda.jpg) +(c) $\epsilon_{\mathrm{tr}} = 25$ \ No newline at end of file diff --git a/2023/Why adversarial training can hurt robust accuracy/images.zip b/2023/Why adversarial training can hurt robust accuracy/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..0d3a2226db7bbd93a15e32822359985cc1986e25 --- /dev/null +++ b/2023/Why adversarial training can hurt robust accuracy/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20621b758bbc884e5e68204d34d98214819542ca8e38573a9b37e1d80b5985fa +size 1089551 diff --git a/2023/Why adversarial training can hurt robust accuracy/layout.json b/2023/Why adversarial training can hurt robust accuracy/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a7c23fb39b6313d6db9f97684204c88a6e0cdb7d --- /dev/null +++ b/2023/Why adversarial training can hurt robust accuracy/layout.json @@ -0,0 +1,30704 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 416, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 416, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 416, + 117 + ], + "type": "text", + "content": "WHY ADVERSARIAL TRAINING CAN HURT ROBUST ACCURACY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 133, + 321, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 133, + 321, + 146 + ], + "spans": [ + { + "bbox": [ + 111, + 133, + 321, + 146 + ], + "type": "text", + "content": "Jacob Clarysse1, Julia Hörrmann2, Fanny Yang1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 147, + 312, + 167 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 112, + 147, + 312, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 147, + 312, + 157 + ], + "spans": [ + { + "bbox": [ + 112, + 147, + 312, + 157 + ], + "type": "text", + "content": "1. Department of Computer Science, ETH Zürich" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 157, + 290, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 157, + 290, + 167 + ], + "spans": [ + { + "bbox": [ + 112, + 157, + 290, + 167 + ], + "type": "text", + "content": "2. Department of Mathematics, ETH Zürich" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 112, + 168, + 337, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 168, + 337, + 179 + ], + "spans": [ + { + "bbox": [ + 112, + 168, + 337, + 179 + ], + "type": "text", + "content": "{jacob.clarysse;fan.yang}@inf.ethz.ch;" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 179, + 321, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 179, + 321, + 190 + ], + "spans": [ + { + "bbox": [ + 112, + 179, + 321, + 190 + ], + "type": "text", + "content": "{julia.hoerrmann}@stat.math.ethz.ch" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 276, + 219, + 335, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 219, + 335, + 231 + ], + "spans": [ + { + "bbox": [ + 276, + 219, + 335, + 231 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 242, + 471, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 242, + 471, + 354 + ], + "spans": [ + { + "bbox": [ + 140, + 242, + 471, + 354 + ], + "type": "text", + "content": "Machine learning classifiers with high test accuracy often perform poorly under adversarial perturbations. It is commonly believed that adversarial training alleviates this issue. In this paper, we demonstrate that, surprisingly, the opposite can be true for a natural class of perceptible perturbations — even though adversarial training helps when enough data is available, it may in fact hurt robust generalization in the small sample size regime. We first prove this phenomenon for a high-dimensional linear classification setting with noiseless observations. Using intuitive insights from the proof, we could find perturbations on standard image datasets for which this behavior persists. Specifically, it occurs for perceptible perturbations that effectively reduce class information such as object occlusions or corruptions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 365, + 207, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 365, + 207, + 377 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 207, + 377 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 385, + 339, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 339, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 339, + 583 + ], + "type": "text", + "content": "Today's best-performing classifiers are vulnerable to adversarial attacks Goodfellow et al. (2015); Szegedy et al. (2014) and exhibit high robust error: for many inputs, their predictions change under adversarial perturbations, even though the true class stays the same. Such content-preserving (Gilmer et al., 2018), consistent (Raghunathan et al., 2020) attacks can be either perceptible or imperceptible. For image datasets, most work to date studies imperceptible attacks that are based on perturbations with limited strength or attack budget. These include bounded " + }, + { + "bbox": [ + 104, + 385, + 339, + 583 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 104, + 385, + 339, + 583 + ], + "type": "text", + "content": "-norm perturbations (Goodfellow et al., 2015; Madry et al., 2018; Moosavi-Dezfooli et al., 2016), small transformations using image processing techniques (Ghiasi et al., 2019; Zhao et al., 2020; Laidlaw et al., 2021; Luo et al., 2018) or nearby samples on the data manifold (Lin et al., 2020; Zhou et al., 2020). Even though they do not visibly change the image by definition, imperceptible attacks can often successfully fool a learned classifier." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 345, + 392, + 504, + 479 + ], + "blocks": [ + { + "bbox": [ + 345, + 392, + 504, + 479 + ], + "lines": [ + { + "bbox": [ + 345, + 392, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 345, + 392, + 504, + 479 + ], + "type": "image", + "image_path": "db053b4d669625641de60d177fdcb522d14ad763796fe72449396ecab37d64da.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 487, + 506, + 570 + ], + "lines": [ + { + "bbox": [ + 342, + 487, + 506, + 570 + ], + "spans": [ + { + "bbox": [ + 342, + 487, + 506, + 570 + ], + "type": "text", + "content": "Figure 1: On the Waterbirds dataset attacked by the adversarial illumination attack, adversarial training (yellow) yields higher robust error than standard training (blue) when the sample size is small, even though it helps for large sample sizes and in a setting where the standard error of standard training is small. (see App. D for details)." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 103, + 588, + 507, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 588, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 103, + 588, + 507, + 732 + ], + "type": "text", + "content": "On the other hand, perturbations that naturally occur and are physically realizable are commonly perceptible. Some perceptible perturbations specifically target the object to be recognized: these include occlusions (e.g. stickers placed on traffic signs (Eykholt et al., 2018) or masks of different sizes that cover important features of human faces (Wu et al., 2020)) or corruptions that are caused by the image capturing process (animals that move faster than the shutter speed or objects that are not well-lit, see Figure 2). Others transform the whole image and are not confined to the object itself, such as rotations, translations or corruptions Engstrom et al. (2019); Kang et al. (2019). In this paper, we refer to such perceptible attacks as directed attacks. In contrast to other attacks, they effectively reduce useful class information in the input for any model, without necessarily changing the true label - we say that they are directed and consistent, more formally defined in Section 2. For example, a stop sign with a small sticker could partially cover the text without losing its semantic meaning. Similarly, a flying bird captured with a long exposure time can induce motion blur in the final image without becoming unrecognizable to the observer." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 117, + 95, + 170, + 148 + ], + "blocks": [ + { + "bbox": [ + 117, + 95, + 170, + 148 + ], + "lines": [ + { + "bbox": [ + 117, + 95, + 170, + 148 + ], + "spans": [ + { + "bbox": [ + 117, + 95, + 170, + 148 + ], + "type": "image", + "image_path": "20e801aeba267110296db82a839e1ddf1a7cf57b7ffc5ae8cbe7af8b242fe422.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 125, + 152, + 162, + 162 + ], + "lines": [ + { + "bbox": [ + 125, + 152, + 162, + 162 + ], + "spans": [ + { + "bbox": [ + 125, + 152, + 162, + 162 + ], + "type": "text", + "content": "(a) Masks" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 171, + 95, + 223, + 148 + ], + "blocks": [ + { + "bbox": [ + 171, + 95, + 223, + 148 + ], + "lines": [ + { + "bbox": [ + 171, + 95, + 223, + 148 + ], + "spans": [ + { + "bbox": [ + 171, + 95, + 223, + 148 + ], + "type": "image", + "image_path": "173687da0046c68a3fb5cdcf13b159e5479d8229046b575ce294cf9d7e57370a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 176, + 152, + 220, + 163 + ], + "lines": [ + { + "bbox": [ + 176, + 152, + 220, + 163 + ], + "spans": [ + { + "bbox": [ + 176, + 152, + 220, + 163 + ], + "type": "text", + "content": "(b) Original" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 225, + 95, + 278, + 148 + ], + "blocks": [ + { + "bbox": [ + 225, + 95, + 278, + 148 + ], + "lines": [ + { + "bbox": [ + 225, + 95, + 278, + 148 + ], + "spans": [ + { + "bbox": [ + 225, + 95, + 278, + 148 + ], + "type": "image", + "image_path": "ed5c3950496d37e51c824df4b0ece66fb6baded47681d50b5abf1f39ae0d3bd7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 152, + 274, + 163 + ], + "lines": [ + { + "bbox": [ + 230, + 152, + 274, + 163 + ], + "spans": [ + { + "bbox": [ + 230, + 152, + 274, + 163 + ], + "type": "text", + "content": "(c) Lighting" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 172, + 504, + 214 + ], + "lines": [ + { + "bbox": [ + 104, + 172, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 104, + 172, + 504, + 214 + ], + "type": "text", + "content": "Figure 2: Examples of directed attacks on CIFAR10 and the Waterbirds dataset. In Figure 2a, we corrupt the image with a black mask of size " + }, + { + "bbox": [ + 104, + 172, + 504, + 214 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 104, + 172, + 504, + 214 + ], + "type": "text", + "content": " and in Figure 2c and 2d we change the lighting conditions (darkening) and apply motion blur on the bird in the image respectively. All perturbations reduce the information about the class in the images: they are the result of directed attacks. (e) Directed attacks are a subset of perceptible attacks." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 279, + 95, + 332, + 148 + ], + "blocks": [ + { + "bbox": [ + 279, + 95, + 332, + 148 + ], + "lines": [ + { + "bbox": [ + 279, + 95, + 332, + 148 + ], + "spans": [ + { + "bbox": [ + 279, + 95, + 332, + 148 + ], + "type": "image", + "image_path": "bead8d81be1b2f47ff023313e31b2aa7d1560cadc72723872a6c2696a8f3890a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 290, + 152, + 321, + 163 + ], + "lines": [ + { + "bbox": [ + 290, + 152, + 321, + 163 + ], + "spans": [ + { + "bbox": [ + 290, + 152, + 321, + 163 + ], + "type": "text", + "content": "(d) Blur" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 333, + 95, + 411, + 148 + ], + "blocks": [ + { + "bbox": [ + 333, + 95, + 411, + 148 + ], + "lines": [ + { + "bbox": [ + 333, + 95, + 411, + 148 + ], + "spans": [ + { + "bbox": [ + 333, + 95, + 411, + 148 + ], + "type": "image", + "image_path": "0df3fd1cecceec31c77a870c31c54cfecf5c5ced125048a3ff1b76430c1686c9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 352, + 152, + 474, + 163 + ], + "lines": [ + { + "bbox": [ + 352, + 152, + 474, + 163 + ], + "spans": [ + { + "bbox": [ + 352, + 152, + 474, + 163 + ], + "type": "text", + "content": "(e) Classification of perturbations" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 413, + 95, + 492, + 148 + ], + "blocks": [ + { + "bbox": [ + 413, + 95, + 492, + 148 + ], + "lines": [ + { + "bbox": [ + 413, + 95, + 492, + 148 + ], + "spans": [ + { + "bbox": [ + 413, + 95, + 492, + 148 + ], + "type": "image", + "image_path": "d0acf40bcb6c92928c178ce679671e9bff4201863a077344aa30cefc755687d2.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 223, + 504, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 504, + 257 + ], + "type": "text", + "content": "In the literature so far, it is widely acknowledged that adversarial training with the same perturbation type and budget as during test time often achieves significantly lower robust error than standard training (Madry et al., 2018; Zhang et al., 2019; Bai et al., 2021)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 262, + 505, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 505, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 505, + 295 + ], + "type": "text", + "content": "In contrast, we show that adversarial training not only increases standard error (Zhang et al., 2019; Tsipras et al., 2019; Stutz et al., 2019; Raghunathan et al., 2020), but surprisingly, in the low sample regime," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 128, + 304, + 481, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 304, + 481, + 316 + ], + "spans": [ + { + "bbox": [ + 128, + 304, + 481, + 316 + ], + "type": "text", + "content": "adversarial training may even increase the robust error compared to standard training!" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 323, + 504, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 323, + 504, + 367 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 504, + 367 + ], + "type": "text", + "content": "Figure 1 illustrates the main message of our paper on the Waterbirds dataset: Although adversarial training with directed attacks outperforms standard training when enough training samples are available, it is inferior when the sample size is small (but still large enough to obtain a small standard test error)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 373, + 239, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 239, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 239, + 384 + ], + "type": "text", + "content": "Our contributions are as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 392, + 504, + 498 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 132, + 392, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 504, + 437 + ], + "type": "text", + "content": "- We prove that, almost surely, adversarially training a linear classifier on separable data yields a monotonically increasing robust error as the perturbation budget grows. We further establish high-probability non-asymptotic lower bounds on the robust error gap between adversarial and standard training." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 440, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 440, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 132, + 440, + 504, + 462 + ], + "type": "text", + "content": "- Our proof provides intuition for why this lower bound on the gap is particularly large for directed attacks in the low sample regime." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 465, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 465, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 132, + 465, + 504, + 498 + ], + "type": "text", + "content": "- We observe empirically for different directed attacks on real-world image datasets that this behavior persists: adversarial training for directed attacks hurts robust accuracy when the sample size is small." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 514, + 256, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 256, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 256, + 526 + ], + "type": "text", + "content": "2 ROBUST CLASSIFICATION" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 539, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 562 + ], + "type": "text", + "content": "We first introduce our robust classification setting more formally by defining the notions of adversarial robustness, directed attacks and adversarial training used throughout the paper." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": "Adversarially robust classifiers For inputs " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": ", we consider multi-class classifiers associated with parameterized functions " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "f_{\\theta}: \\mathbb{R}^d \\to \\mathbb{R}^K" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "K > 2" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "f_{\\theta}: \\mathbb{R}^d \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "K = 2" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": " is the number of labels. For example, " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "f_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": " could be a linear model (as in Section 3) or a neural network (as in Section 4). The output label predictions are obtained by " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "h(f_{\\theta}(x)) = \\mathrm{sign}(f_{\\theta}(x))" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "K = 2" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "h(f_{\\theta}(x)) = \\arg \\max_{k \\in \\{1, \\dots, K\\}} f_{\\theta}(x)_k" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "inline_equation", + "content": "K > 2" + }, + { + "bbox": [ + 104, + 567, + 504, + 624 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 628, + 505, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 505, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 505, + 684 + ], + "type": "text", + "content": "In order to convince practitioners to use machine learning models in the wild, it is key to demonstrate that they exhibit robustness. One kind of robustness is that they do not change prediction when the input is subject to consistent perturbations, which are small class-preserving perturbations. Mathematically speaking, for the underlying joint data distribution " + }, + { + "bbox": [ + 104, + 628, + 505, + 684 + ], + "type": "inline_equation", + "content": "\\mathbb{P}" + }, + { + "bbox": [ + 104, + 628, + 505, + 684 + ], + "type": "text", + "content": ", the model should have a small " + }, + { + "bbox": [ + 104, + 628, + 505, + 684 + ], + "type": "inline_equation", + "content": "\\epsilon_{te}" + }, + { + "bbox": [ + 104, + 628, + 505, + 684 + ], + "type": "text", + "content": "-robust error, defined as" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 211, + 686, + 504, + 705 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 686, + 504, + 705 + ], + "spans": [ + { + "bbox": [ + 211, + 686, + 504, + 705 + ], + "type": "interline_equation", + "content": "\\operatorname {E r r} (\\theta ; \\epsilon_ {\\mathrm {t e}}) := \\mathbb {E} _ {(x, y) \\sim \\mathbb {P}} \\max _ {x ^ {\\prime} \\in T (x; \\epsilon_ {\\mathrm {t e}})} \\ell \\left(f _ {\\theta} \\left(x ^ {\\prime}\\right), y\\right), \\tag {1}", + "image_path": "8ada2936e4875eb1f6167ff98d0028070047311172708ca119be42c0163b2efa.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": " is 0 if the class determined by " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "h(f_{\\theta}(x))" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": " is equal to " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": " and 1 otherwise. Further, " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "T(x;\\epsilon_{te})" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": " indicates a perturbation set around " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": " of a certain transformation type with size " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\epsilon_{test}" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": ". Note that" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "the (standard) error " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{(x,y)\\sim \\mathbb{P}}\\ell (f_{\\theta}(x),y)" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " of a classifier corresponds to " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\mathrm{Err}(\\theta ;0)" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " - the robust error evaluated at " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 0" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 111, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 111, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 111, + 506, + 189 + ], + "type": "text", + "content": "Directed attacks The inner maximization in Equation 1 is often called the adversarial attack of the input " + }, + { + "bbox": [ + 104, + 111, + 506, + 189 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 111, + 506, + 189 + ], + "type": "text", + "content": " for the model " + }, + { + "bbox": [ + 104, + 111, + 506, + 189 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 104, + 111, + 506, + 189 + ], + "type": "text", + "content": " and the corresponding solution is referred to as the adversarial example. In this paper, we consider directed attacks that effectively reduce the information about the true classes, with image-based examples depicted in Figure 2. For linear classification, we analyze directed attacks in the form of additive perturbations that are constrained to the direction of the optimal decision boundary (see details in Section 3.1). In particular, note that the set of directed perturbations is restricted to directions attacking the Bayes optimal classifier." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 195, + 504, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 195, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 104, + 195, + 504, + 218 + ], + "type": "text", + "content": "Adversarial training A common approach to obtain classifiers with a good robust accuracy is to minimize the training objective " + }, + { + "bbox": [ + 104, + 195, + 504, + 218 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 195, + 504, + 218 + ], + "type": "text", + "content": " with a surrogate robust classification loss " + }, + { + "bbox": [ + 104, + 195, + 504, + 218 + ], + "type": "inline_equation", + "content": "L" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 220, + 221, + 504, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 221, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 220, + 221, + 504, + 251 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\epsilon_ {\\mathrm {t r}}} (\\theta) := \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\max _ {x _ {i} ^ {\\prime} \\in T \\left(x _ {i}; \\epsilon_ {\\mathrm {t r}}\\right)} L \\left(f _ {\\theta} \\left(x _ {i} ^ {\\prime}\\right) y _ {i}\\right), \\tag {2}", + "image_path": "1f0660484e7940f320901b1ae03db035e8667c7756745053fc4ffe99c877f31d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "type": "text", + "content": "also called adversarial training. In practice, we often use the cross entropy loss " + }, + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "type": "inline_equation", + "content": "L(z) = \\log (1 + e^{-z})" + }, + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "type": "text", + "content": " and minimize the robust objective by using first order optimization methods such as (stochastic) gradient descent. SGD is also the algorithm that we focus on in both the theoretical and experimental sections. When the desired type of robustness is known in advance, it is standard practice to use the same perturbation set for training as for testing, i.e. " + }, + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "type": "inline_equation", + "content": "T(x;\\epsilon_{\\mathrm{tr}}) = T(x;\\epsilon_{\\mathrm{te}})" + }, + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "type": "text", + "content": ". For example, Madry et al. (2018) show that the robust error sharply increases for " + }, + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} < \\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 255, + 505, + 333 + ], + "type": "text", + "content": ". In this paper, we demonstrate that for directed attacks in the small sample size regime, in fact, the opposite is true." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 348, + 248, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 348, + 248, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 248, + 360 + ], + "type": "text", + "content": "3 THEORETICAL RESULTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 372, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 504, + 407 + ], + "type": "text", + "content": "In this section, we prove for linear functions " + }, + { + "bbox": [ + 104, + 372, + 504, + 407 + ], + "type": "inline_equation", + "content": "f_{\\theta}(x) = \\theta^{\\top}x" + }, + { + "bbox": [ + 104, + 372, + 504, + 407 + ], + "type": "text", + "content": " that in the case of directed attacks, robust generalization deteriorates with increasing " + }, + { + "bbox": [ + 104, + 372, + 504, + 407 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 372, + 504, + 407 + ], + "type": "text", + "content": ". The proof, albeit in a simple setting, provides explanations for why adversarial training fails in the high-dimensional regime for such attacks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 418, + 170, + 429 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 418, + 170, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 170, + 429 + ], + "type": "text", + "content": "3.1 SETTING" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 439, + 402, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 439, + 402, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 402, + 451 + ], + "type": "text", + "content": "We now introduce the precise linear setting used in our theoretical results." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": "Data model We assume that the ground truth and hypothesis class are given by linear functions " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "f_{\\theta}(x) = \\theta^{\\top}x" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " and the sample size " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " is lower than the ambient dimension " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " minus one. The generative distribution " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " is similar to (Tsipras et al., 2019; Nagarajan & Kolter, 2019): The label " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "y \\in \\{+1, -1\\}" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " is drawn with equal probability and the covariate vector is sampled as " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "x = [y_{\\frac{r}{2}}, \\tilde{x}]" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " with the random vector " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\tilde{x} \\in \\mathbb{R}^{d-1}" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " drawn from a standard normal distribution, i.e. " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\tilde{x} \\sim \\mathcal{N}(0, \\sigma^2 I_{d-1})" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": ". We would like to learn a classifier that has low robust error by using a dataset " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "D = (x_i, y_i)_{i=1}^n" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " i.i.d. samples from " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": ". Intuitively, the separation distance " + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 457, + 504, + 537 + ], + "type": "text", + "content": " reflects the signal strength of the data distribution." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "text", + "content": "Notice that the distribution " + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "text", + "content": " is noiseless: for a given input " + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "text", + "content": ", the label " + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "inline_equation", + "content": "y = \\mathrm{sign}(x_{[1]})" + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "text", + "content": " is deterministic. Further, the Bayes optimal linear classifier (also referred to as the ground truth) is parameterized by the first standard coordinate vector, " + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "inline_equation", + "content": "\\theta^{\\star} = e_1" + }, + { + "bbox": [ + 104, + 541, + 506, + 597 + ], + "type": "text", + "content": ". By definition, the ground truth is robust against all perturbations that do not change the sign in the first coordinate of the sample, i.e. consistent perturbations, and hence so is the optimal robust classifier." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 604, + 504, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 504, + 637 + ], + "type": "text", + "content": "Directed attacks In this paper, we focus on consistent directed attacks that by definition efficiently concentrate their attack budget to reduce the class information. For our linear setting this information lies in the first entry. Hence, we can model such attacks by additive perturbations in the first dimension" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 195, + 639, + 504, + 653 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 639, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 195, + 639, + 504, + 653 + ], + "type": "interline_equation", + "content": "T (x; \\epsilon) = \\left\\{x ^ {\\prime} = x + \\delta \\mid \\delta = \\beta e _ {1} \\text {a n d} - \\epsilon \\leq \\beta \\leq \\epsilon \\right\\}. \\tag {3}", + "image_path": "1f1ce6d1f54673d1f7c918b6eab2c70dec47e886dd68c7d9e01febb61709d2da.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "type": "text", + "content": "Note that this attack is always in the direction of the signal dimension, i.e. the Bayes optimal classifier or equivalently the ground truth. Furthermore, when " + }, + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "type": "inline_equation", + "content": "\\epsilon < \\frac{r}{2}" + }, + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "type": "text", + "content": ", it is a consistent directed attack. Observe how this is different from " + }, + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "type": "text", + "content": "-attacks — an " + }, + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 104, + 655, + 504, + 699 + ], + "type": "text", + "content": " attack, depending on the model, may add a perturbation that only has a very small component in the signal direction." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "text", + "content": "1Note that the result more generally holds for non-sparse models that are not axis aligned by way of a simple rotation " + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "inline_equation", + "content": "z = Ux" + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "text", + "content": ". In that case the distribution is characterized by " + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "inline_equation", + "content": "\\theta^{\\star} = u_{1}" + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "inline_equation", + "content": "u_{1}" + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "text", + "content": " is the first column vector of " + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "text", + "content": ", and a rotated Gaussian in the " + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "inline_equation", + "content": "d - 1" + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "text", + "content": " dimensions orthogonal to " + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "inline_equation", + "content": "\\theta^{\\star}" + }, + { + "bbox": [ + 104, + 700, + 505, + 732 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 124, + 79, + 243, + 152 + ], + "blocks": [ + { + "bbox": [ + 124, + 79, + 243, + 152 + ], + "lines": [ + { + "bbox": [ + 124, + 79, + 243, + 152 + ], + "spans": [ + { + "bbox": [ + 124, + 79, + 243, + 152 + ], + "type": "image", + "image_path": "0119c40cdabdd7186fc2459084cebce9704f5969cd0ff3776576c1b7ada73052.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 156, + 487, + 168 + ], + "lines": [ + { + "bbox": [ + 123, + 156, + 487, + 168 + ], + "spans": [ + { + "bbox": [ + 123, + 156, + 487, + 168 + ], + "type": "text", + "content": "(a) Robust error increase with " + }, + { + "bbox": [ + 123, + 156, + 487, + 168 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 123, + 156, + 487, + 168 + ], + "type": "text", + "content": " (b) Standard-adversarial training (c) Effect of overparameterization" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 245, + 79, + 365, + 152 + ], + "blocks": [ + { + "bbox": [ + 245, + 79, + 365, + 152 + ], + "lines": [ + { + "bbox": [ + 245, + 79, + 365, + 152 + ], + "spans": [ + { + "bbox": [ + 245, + 79, + 365, + 152 + ], + "type": "image", + "image_path": "f3fa5b762288fef2b2c93a37e2bb3c5345351eff1a6e13f6490d5a706dae9852.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "lines": [ + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "text", + "content": "Figure 3: Experimental verification of Theorem 3.1. (a) We set " + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "inline_equation", + "content": "d = 1000" + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "inline_equation", + "content": "r = 12" + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "inline_equation", + "content": "n = 50" + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "text", + "content": ". The robust error gap between standard and adversarial training as a function of the adversarial budget " + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 5" + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "text", + "content": " independent experiments (blue) and the lower bound given in Theorem 3.1 (gray). In (b) and (c), we set " + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "inline_equation", + "content": "d = 10000" + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "text", + "content": " and vary the number of samples " + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "text", + "content": ". (b) The robust error of standard and adversarial training with " + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 4.5" + }, + { + "bbox": [ + 104, + 177, + 504, + 228 + ], + "type": "text", + "content": ". (c) The error gap and the lower bound of Theorem 3.1. For more experimental details see Appendix C." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 366, + 79, + 485, + 152 + ], + "blocks": [ + { + "bbox": [ + 366, + 79, + 485, + 152 + ], + "lines": [ + { + "bbox": [ + 366, + 79, + 485, + 152 + ], + "spans": [ + { + "bbox": [ + 366, + 79, + 485, + 152 + ], + "type": "image", + "image_path": "a0e1558dbd11522644d5a3300bc9272536b8d0027c140ae0e2b01ff2764679d9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 234, + 504, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 504, + 301 + ], + "type": "text", + "content": "Robust max-" + }, + { + "bbox": [ + 104, + 234, + 504, + 301 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 234, + 504, + 301 + ], + "type": "text", + "content": "-margin classifier We study a classifier that is the solution of running gradient descent on the adversarial logistic loss. A long line of work (Soudry et al., 2018; Ji & Telgarsky, 2019; Chizat & Bach, 2020; Nacson et al., 2019; Liu et al., 2020) studies the implicit bias of (S)GD on the (standard) logistic loss and separable data. In particular, they show directional convergence to the max-margin solution. For the adversarial logistic loss and linear models in particular, (S)GD converges to the robust max-" + }, + { + "bbox": [ + 104, + 234, + 504, + 301 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 234, + 504, + 301 + ], + "type": "text", + "content": "-margin solution (Li et al., 2020)," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 222, + 306, + 504, + 330 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 306, + 504, + 330 + ], + "spans": [ + { + "bbox": [ + 222, + 306, + 504, + 330 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ], x _ {i} ^ {\\prime} \\in T (x _ {i}; \\epsilon_ {\\mathrm {t r}})} {\\min } y _ {i} \\theta^ {\\top} x _ {i} ^ {\\prime}. \\tag {4}", + "image_path": "7fe1d6efab2f445c21f838484c0fd658518ede1fd9344cd4a3f0a3a4f305ef14.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 335, + 504, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 504, + 358 + ], + "type": "text", + "content": "Even though our result is proven for the max-" + }, + { + "bbox": [ + 104, + 335, + 504, + 358 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 335, + 504, + 358 + ], + "type": "text", + "content": "-margin classifier, it can easily be extended to other interpolators." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 371, + 197, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 371, + 197, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 197, + 381 + ], + "type": "text", + "content": "3.2 MAIN RESULTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "text", + "content": "We are now ready to characterize the " + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "text", + "content": "-robust error as a function of " + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "text", + "content": ", the separation " + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "text", + "content": ", the dimension " + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "text", + "content": " and sample size " + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 391, + 504, + 415 + ], + "type": "text", + "content": " of the data. In the theorem statement we use the following quantities" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 192, + 418, + 416, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 418, + 416, + 452 + ], + "spans": [ + { + "bbox": [ + 192, + 418, + 416, + 452 + ], + "type": "interline_equation", + "content": "\\varphi_ {\\min } = \\frac {\\sigma}{r / 2 - \\epsilon_ {\\mathrm {t e}}} \\left(\\sqrt {\\frac {d - 1}{n}} - \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right)\\right)", + "image_path": "ce1591083ab42673c5d378ee368c695c8da16ecb616e0216f47014cae9921ee7.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 192, + 453, + 416, + 486 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 453, + 416, + 486 + ], + "spans": [ + { + "bbox": [ + 192, + 453, + 416, + 486 + ], + "type": "interline_equation", + "content": "\\varphi_ {\\max } = \\frac {\\sigma}{r / 2 - \\epsilon_ {\\mathrm {t e}}} \\left(\\sqrt {\\frac {d - 1}{n}} + \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right)\\right)", + "image_path": "95c717897efa653d034fbd10b83f901536175ff1939d178233cd4d1685caee67.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 490, + 504, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 504, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 504, + 515 + ], + "type": "text", + "content": "that arise from concentration bounds for the singular values of the random data matrix. Further, let " + }, + { + "bbox": [ + 104, + 490, + 504, + 515 + ], + "type": "inline_equation", + "content": "\\tilde{\\epsilon} := \\frac{r}{2} - \\frac{\\varphi_{\\max}}{\\sqrt{2}}" + }, + { + "bbox": [ + 104, + 490, + 504, + 515 + ], + "type": "text", + "content": " and denote by " + }, + { + "bbox": [ + 104, + 490, + 504, + 515 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 490, + 504, + 515 + ], + "type": "text", + "content": " the cumulative distribution function of a standard normal." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "text", + "content": "Theorem 3.1. Assume " + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "inline_equation", + "content": "d - 1 > n" + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "text", + "content": ". For test samples from " + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "text", + "content": ", perturbation set type " + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "text", + "content": " as in Equation 3 and any " + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "inline_equation", + "content": "0 \\leq \\epsilon_{te} < \\frac{r}{2}" + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "text", + "content": ", the following holds for the " + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "inline_equation", + "content": "\\epsilon_{te}" + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "text", + "content": "-robust error of the classifier (Equation 1) resulting from " + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "inline_equation", + "content": "\\epsilon_{tr}" + }, + { + "bbox": [ + 104, + 518, + 506, + 552 + ], + "type": "text", + "content": "-adversarial training:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 561, + 403, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 561, + 403, + 572 + ], + "spans": [ + { + "bbox": [ + 129, + 561, + 403, + 572 + ], + "type": "text", + "content": "1. The " + }, + { + "bbox": [ + 129, + 561, + 403, + 572 + ], + "type": "inline_equation", + "content": "\\epsilon_{te}" + }, + { + "bbox": [ + 129, + 561, + 403, + 572 + ], + "type": "text", + "content": "-robust error of the " + }, + { + "bbox": [ + 129, + 561, + 403, + 572 + ], + "type": "inline_equation", + "content": "\\epsilon_{tr}" + }, + { + "bbox": [ + 129, + 561, + 403, + 572 + ], + "type": "text", + "content": "-robust max-margin estimator reads" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 253, + 578, + 504, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 578, + 504, + 610 + ], + "spans": [ + { + "bbox": [ + 253, + 578, + 504, + 610 + ], + "type": "interline_equation", + "content": "\\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) = \\Phi \\left(- \\frac {\\left(\\frac {r}{2} - \\epsilon_ {t r}\\right)}{\\tilde {\\varphi}}\\right) \\tag {5}", + "image_path": "2a179768c62241d50721cfe653ce257a94606eb7d686f5cfd2eaee49387df7ba.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "type": "text", + "content": "for a random quantity " + }, + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "type": "inline_equation", + "content": "\\tilde{\\varphi} > 0" + }, + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "type": "text", + "content": " depending on " + }, + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "type": "inline_equation", + "content": "\\sigma, r, \\epsilon_{te}" + }, + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "type": "text", + "content": " and is hence strictly increasing in the adversarial training budget " + }, + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "type": "inline_equation", + "content": "\\epsilon_{tr}" + }, + { + "bbox": [ + 138, + 615, + 504, + 638 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 128, + 644, + 504, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 644, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 128, + 644, + 504, + 667 + ], + "type": "text", + "content": "2. With probability at least " + }, + { + "bbox": [ + 128, + 644, + 504, + 667 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 128, + 644, + 504, + 667 + ], + "type": "text", + "content": ", we further have " + }, + { + "bbox": [ + 128, + 644, + 504, + 667 + ], + "type": "inline_equation", + "content": "\\varphi_{\\min} \\leq \\tilde{\\varphi} \\leq \\varphi_{\\max}" + }, + { + "bbox": [ + 128, + 644, + 504, + 667 + ], + "type": "text", + "content": " and the following lower bound on the robust error increase by adversarially training with size " + }, + { + "bbox": [ + 128, + 644, + 504, + 667 + ], + "type": "inline_equation", + "content": "\\epsilon_{tr}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 182, + 672, + 504, + 700 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 672, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 182, + 672, + 504, + 700 + ], + "type": "interline_equation", + "content": "\\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) - \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {t e}\\right) \\geq \\Phi \\left(\\frac {r / 2}{\\varphi_ {\\min }}\\right) - \\Phi \\left(\\frac {r / 2 - \\min \\left\\{\\epsilon_ {t r} , \\widetilde {\\epsilon} \\right\\}}{\\varphi_ {\\min }}\\right). \\tag {6}", + "image_path": "4ebd251934ae52742c220323ba450651d4bd680cdf4c38ffc6562ae6dc67d316.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 709, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 734 + ], + "type": "text", + "content": "The proof can be found in Appendix A and primarily relies on estimation of singular values of high-dimensional matrices. Note that the theorem holds for any " + }, + { + "bbox": [ + 104, + 709, + 505, + 734 + ], + "type": "inline_equation", + "content": "0 \\leq \\epsilon_{\\mathrm{te}} < \\frac{r}{2}" + }, + { + "bbox": [ + 104, + 709, + 505, + 734 + ], + "type": "text", + "content": " and hence also directly" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 93, + 239, + 163 + ], + "blocks": [ + { + "bbox": [ + 111, + 93, + 239, + 163 + ], + "lines": [ + { + "bbox": [ + 111, + 93, + 239, + 163 + ], + "spans": [ + { + "bbox": [ + 111, + 93, + 239, + 163 + ], + "type": "image", + "image_path": "11a7371c179794f387e78dbb1dffa7b6a78670bc7a9813b49ce7cad1a08c10b2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 136, + 168, + 216, + 178 + ], + "lines": [ + { + "bbox": [ + 136, + 168, + 216, + 178 + ], + "spans": [ + { + "bbox": [ + 136, + 168, + 216, + 178 + ], + "type": "text", + "content": "(a) Robust error vs " + }, + { + "bbox": [ + 136, + 168, + 216, + 178 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 93, + 369, + 163 + ], + "blocks": [ + { + "bbox": [ + 242, + 93, + 369, + 163 + ], + "lines": [ + { + "bbox": [ + 242, + 93, + 369, + 163 + ], + "spans": [ + { + "bbox": [ + 242, + 93, + 369, + 163 + ], + "type": "image", + "image_path": "39bc6a2d0b245932b78657cad1591dc252b093589a70d83d2dc4e67a1653351f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 248, + 168, + 362, + 178 + ], + "lines": [ + { + "bbox": [ + 248, + 168, + 362, + 178 + ], + "spans": [ + { + "bbox": [ + 248, + 168, + 362, + 178 + ], + "type": "text", + "content": "(b) Robust error decomposition" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 370, + 79, + 499, + 164 + ], + "blocks": [ + { + "bbox": [ + 370, + 79, + 499, + 164 + ], + "lines": [ + { + "bbox": [ + 370, + 79, + 499, + 164 + ], + "spans": [ + { + "bbox": [ + 370, + 79, + 499, + 164 + ], + "type": "image", + "image_path": "40c9459235c3e2eac9b86560c410a3b8b2f3ccc2441903d056f733d89252a7ed.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 400, + 168, + 468, + 178 + ], + "lines": [ + { + "bbox": [ + 400, + 168, + 468, + 178 + ], + "spans": [ + { + "bbox": [ + 400, + 168, + 468, + 178 + ], + "type": "text", + "content": "(c) Intuition in 2D" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "text", + "content": "Figure 4: (a) We set " + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "inline_equation", + "content": "d = 1000" + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "inline_equation", + "content": "r = 12" + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "text", + "content": ". The robust error as a function of the adversarial training budget " + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "text", + "content": " for different " + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "inline_equation", + "content": "d / n" + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "text", + "content": ". (b) The robust error decomposition into susceptibility and standard error as a function of the adversarial budget " + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 188, + 504, + 251 + ], + "type": "text", + "content": ". Full experimental details can be found in Section C. (c) 2D illustration providing intuition for the linear setting. The effect of adversarial training with directed attacks is captured in the yellow dotted lines: adversarily perturbed training points move closer to the true boundary which in turn tilts the decision boundary more heavily in the wrong direction." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "text", + "content": "applies to the standard error by setting " + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 0" + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "text", + "content": ". In Figure 3, we empirically confirm the statements of Theorem 3.1 by performing multiple experiments on synthetic datasets as described in Subsection 3.1 with different choices of " + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "inline_equation", + "content": "d / n" + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "text", + "content": ". In the first statement, we prove that for small sample-size " + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "inline_equation", + "content": "(n < d - 1)" + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "text", + "content": " noiseless data, almost surely, the robust error increases monotonically with adversarial training budget " + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} > 0" + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "text", + "content": ". In Figure 3a, we plot the robust error gap between standard and adversarial logistic regression as a function of the adversarial training budget " + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 255, + 504, + 323 + ], + "type": "text", + "content": " for 5 runs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "text", + "content": "The second statement establishes a simplified lower bound on the robust error increase for adversarial training (for a fixed " + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = \\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "text", + "content": ") compared to standard training. In Figures 3a and 3c, we show how the lower bound closely predicts the robust error gap in our synthetic experiments. Furthermore, by the dependence of " + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "inline_equation", + "content": "\\varphi_{\\mathrm{min}}" + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "text", + "content": " on the overparameterization ratio " + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "inline_equation", + "content": "d / n" + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "text", + "content": ", the lower bound on the robust error gap is amplified for large " + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "inline_equation", + "content": "d / n" + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "text", + "content": ". Indeed, Figure 3c shows how the error gap increases with " + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "inline_equation", + "content": "d / n" + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "text", + "content": " both theoretically and experimentally. However, when " + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "inline_equation", + "content": "d / n" + }, + { + "bbox": [ + 104, + 327, + 504, + 406 + ], + "type": "text", + "content": " increases above a certain threshold, the gap decreases again, as standard training fails to learn the signal and yields a high error (see Figure 3b)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 415, + 209, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 209, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 209, + 426 + ], + "type": "text", + "content": "3.3 PROOF INTUITION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "text", + "content": "The reason that adversarial training hurts robust generalization is based on an extreme robust vs. standard error trade-off. We now provide intuition for the effect of directed attacks and the low sample regime on the " + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "text", + "content": "-robust max-" + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "text", + "content": "-margin solution by decomposing the robust error " + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{Err}(\\theta; \\epsilon_{\\mathrm{te}})" + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "text", + "content": ". Notice that " + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "text", + "content": "-robust error " + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{Err}(\\theta; \\epsilon_{\\mathrm{te}})" + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "text", + "content": " can be written as the probability of the union of two events: the event that the classifier based on " + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 433, + 506, + 489 + ], + "type": "text", + "content": " is wrong and the event that the classifier is susceptible to attacks:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 493, + 504, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 493, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 504, + 529 + ], + "type": "interline_equation", + "content": "\\operatorname {E r r} (\\theta ; \\epsilon_ {\\mathrm {t e}}) = \\mathbb {E} _ {x, y \\sim \\mathbb {P}} \\left[ \\mathbb {I} \\left\\{y f _ {\\theta} (x) < 0 \\right\\} \\vee \\max _ {x ^ {\\prime} \\in T (x; \\epsilon_ {\\mathrm {t e}})} \\mathbb {I} \\left\\{f _ {\\theta} (x) f _ {\\theta} \\left(x ^ {\\prime}\\right) < 0 \\right\\} \\right] \\leq \\operatorname {E r r} (\\theta ; 0) + \\operatorname {S u s c} (\\theta ; \\epsilon_ {\\mathrm {t e}}) \\tag {7}", + "image_path": "5bf5c5b87a040d75f3b3049eddd5893b633d5fb8f2fa28b79e8dab86373bb115.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})" + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": " is the expectation of the maximization term in Equation 7. " + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})" + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": "-attack-susceptibility of a classifier induced by " + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\mathrm{Err}(\\theta ;0)" + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": " its standard error. In our linear setting, we can lower bound Equation 7 by " + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\mathrm{Err}(\\theta ;0) + \\frac{1}{2}\\mathrm{Susc}(\\theta ;\\epsilon_{\\mathrm{te}})" + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": ". Hence, Equation 7 suggests that the robust error can only be small if both the standard error and susceptibility are small. In Figure 4b, we plot the decomposition of the robust error in standard error and susceptibility for adversarial logistic regression with increasing " + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": ". We observe that increasing " + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 529, + 506, + 640 + ], + "type": "text", + "content": " increases the standard error too drastically compared to the decrease in susceptibility, leading to a drop in robust accuracy. For completeness, in Appendix B, we provide upper and lower bounds for the susceptibility score. We now explain why, in the small-sample size regime, adversarial training with directed attacks 3 may increase standard error to the extent that it dominates the decrease in susceptibility." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": "A key observation is that the robust max-" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": "-margin solution of a dataset " + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "D = \\{(x_i, y_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": " maximizes the minimum margin that reads " + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\min_{i \\in [n]} y_i \\theta^\\top (x_i - y_i \\epsilon_{\\mathrm{tr}} | \\theta_{[1]}| e_1)" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\theta_{[i]}" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": " refers to the " + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": "-th entry of vector " + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": ". Therefore, it simply corresponds to the max-" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": "-margin solution of the dataset shifted towards the decision boundary " + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "D_{\\epsilon_{\\mathrm{tr}}} = \\{(x_i - y_i \\epsilon_{\\mathrm{tr}} | \\widehat{\\theta}_{[1]}^{\\epsilon_{\\mathrm{tr}}} | e_1, y_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": ". Using this fact, we obtain a closed-form expression of the (normalized) max-margin solution 4 as a function of " + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 644, + 504, + 706 + ], + "type": "text", + "content": " that reads" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 219, + 710, + 504, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 710, + 504, + 736 + ], + "spans": [ + { + "bbox": [ + 219, + 710, + 504, + 736 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right], \\tag {8}", + "image_path": "f3075b56451d0461e5e24b40fdc43382ebb7be9d25b2c655177f0b4c8fa8fa52.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\theta} \\|_2 = 1" + }, + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma} > 0" + }, + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "text", + "content": " is a random quantity associated with the max- " + }, + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "text", + "content": "-margin solution of the " + }, + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "inline_equation", + "content": "d - 1" + }, + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "text", + "content": " dimensional Gaussian inputs orthogonal to the signal direction (see Lemma A.1 in Section A)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 212 + ], + "type": "text", + "content": "In high dimensions, with high probability any two Gaussian random vectors are far apart – in our distributional setting, this corresponds to the vectors being far apart in the non-signal directions. In Figure 4c, we illustrate the phenomenon using a 2D cartoon, where the few samples in the dataset are all far apart in the non-signal direction. We see how shifting the dataset closer to the true decision boundary, may result in a max-margin solution (yellow) that aligns much worse with the ground truth (gray), compared to the estimator learned from the original points (blue). Even though the new (robust max-margin) classifier (yellow) is less susceptible to attacks in the signal dimension, it also uses the signal dimension less. Mathematically, this is reflected in the expression of the max-margin solution in Equation 8: We see that the first (signal) dimension is used less as " + }, + { + "bbox": [ + 104, + 110, + 506, + 212 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 110, + 506, + 212 + ], + "type": "text", + "content": " increases." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 220, + 263, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 263, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 263, + 231 + ], + "type": "text", + "content": "3.4 GENERALITY OF THE RESULTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 238, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 238, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 238, + 504, + 251 + ], + "type": "text", + "content": "In this section we discuss how Theorem 3.1 might generalize to other perturbation sets and models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "text", + "content": "Signal direction is known The type of additive perturbations used in Theorem 3.1, defined in Equation 3, is explicitly constrained to the direction of the true signal. This choice is reminiscent of corruptions where every possible perturbation in the set is directly targeted at the object to be recognized, such as motion blur of moving objects. Such corruptions are also studied in the context of domain generalization and adaptation (Schneider et al., 2020)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 317, + 504, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 317, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 317, + 504, + 352 + ], + "type": "text", + "content": "Directed attacks in general, however, may also consist of perturbation sets that are only strongly biased towards the true signal direction. They may find the true signal direction only when the inner maximization is exact. The following corollary extends Theorem 3.1 to small " + }, + { + "bbox": [ + 104, + 317, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 104, + 317, + 504, + 352 + ], + "type": "text", + "content": "-perturbations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 230, + 356, + 504, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 356, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 230, + 356, + 504, + 370 + ], + "type": "interline_equation", + "content": "T (x; \\epsilon) = \\left\\{x ^ {\\prime} = x + \\delta \\mid \\| \\delta \\| _ {1} \\leq \\epsilon \\right\\}, \\tag {9}", + "image_path": "a54d0e5414950804c084ea082f7c12a6b37969f37841c113af77348d96b1be14.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 374, + 506, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 506, + 388 + ], + "type": "text", + "content": "for " + }, + { + "bbox": [ + 104, + 374, + 506, + 388 + ], + "type": "inline_equation", + "content": "0 < \\epsilon < \\frac{r}{2}" + }, + { + "bbox": [ + 104, + 374, + 506, + 388 + ], + "type": "text", + "content": " that reflect such attacks. We state the corollary here and give the proof in Appendix A." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 389, + 432, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 389, + 432, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 389, + 432, + 402 + ], + "type": "text", + "content": "Corollary 3.2. Theorem 3.1 also holds for 4 with perturbation sets defined in 9." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 405, + 504, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 405, + 504, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 405, + 504, + 429 + ], + "type": "text", + "content": "The proof uses the fact that the inner maximization effectively results in a sparse perturbation equivalent to the attack resulting from the perturbation set defined in Equation 3." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "text", + "content": "Other models Motivated by the implicit bias results of (stochastic) gradient descent on the logistic loss, Theorem 3.1 is proven for the max-" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "text", + "content": "-margin solution. We would like to conjecture that for the data distribution in Section 3, adversarial training can hurt robust generalization also for other models with zero training error (interpolators in short). For example, Adaboost is a widely used algorithm that converges to the max-" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "text", + "content": "-margin classifier (Telgarsky, 2013). One might argue that for a sparse ground truth, the max-" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "text", + "content": "-margin classifier should (at least in the noiseless case) have the right inductive bias to alleviate large bias in high dimensions. Hence, in many cases the (sparse) max-" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "text", + "content": "-margin solution might align with the ground truth for a given dataset. However, we conjecture that even in this case, the robust max-" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 104, + 434, + 506, + 545 + ], + "type": "text", + "content": "-margin solution would be misled to choose a wrong sparse solution. This can be seen with the help of the cartoon illustration in Figure 4c." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 555, + 274, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 274, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 274, + 567 + ], + "type": "text", + "content": "4 REAL-WORLD EXPERIMENTS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 576, + 506, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 506, + 655 + ], + "type": "text", + "content": "In this section, we demonstrate that the proof intuition of the linear case may generalize to more complex models. Specifically, the insights from Section 3 helped us to identify realistic directed attacks on standard image datasets for which adversarial training hurts robust accuracy in the low sample regime. In what follows, we present experimental results for corruption attacks on the Waterbirds dataset. Due to space constraints, results on the mask attacks on CIFAR-10 can be found in Appendix E. The corresponding experimental details and more results on other additional image datasets (such as the hand gestures dataset) can be found in Appendices D, E and F." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 667, + 236, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 236, + 678 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 236, + 678 + ], + "type": "text", + "content": "4.1 DATASETS AND MODELS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "type": "text", + "content": "We consider three datasets: the Waterbirds dataset, CIFAR-10 and a hand gesture datasets. Due to space constraints, we describe CIFAR-10 and the hand gesture dataset in Appendix E and F. Apart from CIFAR-10 and the hand gesture dataset, we build a new version of the Waterbirds dataset, consisting of images of water- and landbirds of size " + }, + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "type": "text", + "content": " and labels that distinguish the two" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 79, + 239, + 159 + ], + "blocks": [ + { + "bbox": [ + 108, + 79, + 239, + 159 + ], + "lines": [ + { + "bbox": [ + 108, + 79, + 239, + 159 + ], + "spans": [ + { + "bbox": [ + 108, + 79, + 239, + 159 + ], + "type": "image", + "image_path": "2f1792755900aa0a2ae6a16b916feebafc887c18064a2ec4876c18540ace6f1e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 163, + 237, + 175 + ], + "lines": [ + { + "bbox": [ + 111, + 163, + 237, + 175 + ], + "spans": [ + { + "bbox": [ + 111, + 163, + 237, + 175 + ], + "type": "text", + "content": "(a) Robust error with increasing " + }, + { + "bbox": [ + 111, + 163, + 237, + 175 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 81, + 369, + 159 + ], + "blocks": [ + { + "bbox": [ + 242, + 81, + 369, + 159 + ], + "lines": [ + { + "bbox": [ + 242, + 81, + 369, + 159 + ], + "spans": [ + { + "bbox": [ + 242, + 81, + 369, + 159 + ], + "type": "image", + "image_path": "2e512a26d35f101675ce39afd985a02b24707f099404378131e77f7f20e80434.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "lines": [ + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "text", + "content": "Figure 5: Experiments on the Waterbirds dataset considering the adversarial illumination attack with " + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 0.3" + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "text", + "content": ". We plot the mean and standard deviation of the mean of several independent experiments. (a) The robust error increases with larger " + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "text", + "content": " in the low sample size regime. (b) We set " + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "inline_equation", + "content": "n = 20" + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "text", + "content": " and plot the robust error decomposition as in Equation 7 with increasing " + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "text", + "content": ". While the susceptibility decreases slightly, the increase in standard error is much more severe, resulting in an increase in robust error. (c) The robust error of standard training and adversarial training as a function of the number of samples, where the smallest sample size still yields small (" + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "inline_equation", + "content": "< 10\\%" + }, + { + "bbox": [ + 104, + 184, + 506, + 266 + ], + "type": "text", + "content": ") standard test error for standard training. While adversarial training hurts for small sample sizes, it helps for larger sample sizes. For more experimental details see App. D." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 372, + 86, + 502, + 159 + ], + "blocks": [ + { + "bbox": [ + 247, + 163, + 362, + 175 + ], + "lines": [ + { + "bbox": [ + 247, + 163, + 362, + 175 + ], + "spans": [ + { + "bbox": [ + 247, + 163, + 362, + 175 + ], + "type": "text", + "content": "(b) Robust error decomposition" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 372, + 86, + 502, + 159 + ], + "lines": [ + { + "bbox": [ + 372, + 86, + 502, + 159 + ], + "spans": [ + { + "bbox": [ + 372, + 86, + 502, + 159 + ], + "type": "image", + "image_path": "2bf92660f987cf1228c27d11c2f82a9521dc4cd942f53ba6f8942dbe360b164d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 382, + 163, + 490, + 175 + ], + "lines": [ + { + "bbox": [ + 382, + 163, + 490, + 175 + ], + "spans": [ + { + "bbox": [ + 382, + 163, + 490, + 175 + ], + "type": "text", + "content": "(c) Robust error vs. #samples" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 270, + 506, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 348 + ], + "type": "text", + "content": "types of birds. Using code provided by Sagawa et al. (2020), we construct the dataset as follows: First, we sample equally many water- and landbirds from the CUB-200 dataset (Welinder et al., 2010). Then, we segment the birds and paste them onto a background image that is randomly sampled (without replacement) from the Places-256 dataset (Zhou et al., 2017). Also, following the choice of Sagawa et al. (2020), we use as models a ResNet50 and a ResNet18 that were both pretrained on ImageNet and achieve near perfect standard accuracy. In Appendix D, we complement the results of this section by reporting the results of similar experiments with different architectures." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 361, + 330, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 330, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 330, + 371 + ], + "type": "text", + "content": "4.2 IMPLEMENTATION OF THE DIRECTED ATTACKS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 380, + 506, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 448 + ], + "type": "text", + "content": "In this section, we consider two attacks on the Waterbirds dataset: motion blur and adversarial illumination as depicted in Figure 2. In Appendix E, we also discuss the mask attack, which should mimic occlusions of objects in images that are physically realizable (Eykholt et al., 2018; Wu et al., 2020). On the other hand, motion blur may arise naturally when photographing fast moving objects with a slow shutter speed. Lastly, adversarial illumination may result from adversarial lighting conditions. Next, we describe the motion blur and adversarial illumination attacks in more detail." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "text", + "content": "Motion blur For the Waterbirds dataset we can implement motion blur attacks on the object (the bird) specifically, a natural corruption that could occur if birds move at speeds that are faster than the shutter speed. The aim is to be robust against all motion blur severity levels up to " + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "inline_equation", + "content": "M_{max} = 15" + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "text", + "content": ". To simulate motion blur, we apply a motion blur filter with a kernel of size " + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "text", + "content": " on the segmented bird before we paste it onto the background image. We can change the severity level of the motion blur by increasing the kernel size of the filter. See Appendix D for concrete expressions of the motion blur kernel. Intuitively the worst attack should be the most severe blur, rendering a search over a range of severity superfluous. However, similar to rotations, this is not necessarily true in practice since the training loss on neural networks is generally nonconvex. Therefore, for an exact evaluation of the robust error at test time, we perform a full grid search over all kernel sizes in " + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "inline_equation", + "content": "[1,2,\\dots,M_{max}]" + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "text", + "content": ". We refer to Figure 2d and Section D for an illustration of our motion blur attack. During training time, we perform an approximate search over kernels with sizes " + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "inline_equation", + "content": "2i" + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "inline_equation", + "content": "i = 1,\\dots,M_{max}/2" + }, + { + "bbox": [ + 104, + 453, + 506, + 587 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": "Adversarial illumination As a second attack on the Waterbirds dataset, we consider adversarial illumination. The adversary can darken or brighten the bird without corrupting the background of the image. The attack aims to model images where the object at interest is hidden in shadows or placed against bright light. To compute the adversarial illumination attack, we modify the brightness of the segmented bird by adding a constant " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "a \\in [-\\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " to all pixel values, before pasting the bird onto the background image. With an analogous argument as for the adversarial search for motion blur, the exact evaluation requires an actual search over the interval " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "[- \\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": ". We find the most adversarial lighting level, i.e. the value of " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": ", by equidistantly partitioning the interval " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "[- \\epsilon_{\\mathrm{te}}, \\epsilon_{\\mathrm{te}}]" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " steps and performing a full list-search over all steps. See Figure 2c and Appendix D for an illustration of the adversarial illumination attack. We choose " + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "inline_equation", + "content": "K = 65, 33" + }, + { + "bbox": [ + 104, + 592, + 506, + 704 + ], + "type": "text", + "content": " during test and training time respectively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": "Adversarial training For all datasets and attacks, we run SGD until convergence on the robust cross-entropy loss 2. In each iteration, we search for an adversarial example as described above and" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 79, + 239, + 158 + ], + "blocks": [ + { + "bbox": [ + 112, + 79, + 239, + 158 + ], + "lines": [ + { + "bbox": [ + 112, + 79, + 239, + 158 + ], + "spans": [ + { + "bbox": [ + 112, + 79, + 239, + 158 + ], + "type": "image", + "image_path": "9b0fbfe233ca438fe0d94437618e0cef512148bc02efadc540b13a3ead58cd5d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 112, + 162, + 239, + 174 + ], + "lines": [ + { + "bbox": [ + 112, + 162, + 239, + 174 + ], + "spans": [ + { + "bbox": [ + 112, + 162, + 239, + 174 + ], + "type": "text", + "content": "(a) Robust error with increasing " + }, + { + "bbox": [ + 112, + 162, + 239, + 174 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 79, + 369, + 158 + ], + "blocks": [ + { + "bbox": [ + 242, + 79, + 369, + 158 + ], + "lines": [ + { + "bbox": [ + 242, + 79, + 369, + 158 + ], + "spans": [ + { + "bbox": [ + 242, + 79, + 369, + 158 + ], + "type": "image", + "image_path": "6cd0e52c6637acdb28d0b7b8c2632185837ce80d86fca17c63afd52390a4e5b2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "text", + "content": "Figure 6: Experiments on the (subsampled) Waterbirds dataset using the motion blur attack. (a) Even though adversarial training hurts robust generalization for low sample size (" + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "inline_equation", + "content": "n = 20" + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "text", + "content": "), it helps for " + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "inline_equation", + "content": "n = 50" + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "text", + "content": ". (b) For " + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "inline_equation", + "content": "n = 20" + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "text", + "content": ", the decomposition of the robust error in standard error and susceptibility as a function of adversarial budget " + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 182, + 506, + 264 + ], + "type": "text", + "content": ". The increase in standard error is more severe than the drop in susceptibility, leading to a slight increase in robust error. (c) The robust error of standard and adversarial training on settings where the test error after standard training is small as a function of the number of samples. While adversarial training hurts for small sample sizes, it helps for larger sample sizes. For each experiment we plot the mean and standard deviation of the mean of independent experiments. For more experimental details see App. D." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 372, + 80, + 497, + 158 + ], + "blocks": [ + { + "bbox": [ + 247, + 162, + 362, + 174 + ], + "lines": [ + { + "bbox": [ + 247, + 162, + 362, + 174 + ], + "spans": [ + { + "bbox": [ + 247, + 162, + 362, + 174 + ], + "type": "text", + "content": "(b) Robust error decomposition" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 372, + 80, + 497, + 158 + ], + "lines": [ + { + "bbox": [ + 372, + 80, + 497, + 158 + ], + "spans": [ + { + "bbox": [ + 372, + 80, + 497, + 158 + ], + "type": "image", + "image_path": "a05f6749a5966589bd482fc2270139528f0e355f0c79e9729cedbf98242a810c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 162, + 488, + 174 + ], + "lines": [ + { + "bbox": [ + 380, + 162, + 488, + 174 + ], + "spans": [ + { + "bbox": [ + 380, + 162, + 488, + 174 + ], + "type": "text", + "content": "(c) Robust error vs. #samples" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 273, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 504, + 307 + ], + "type": "text", + "content": "update the weights using a gradient with respect to the resulting perturbed example (Goodfellow et al., 2015; Madry et al., 2018). For every experiment, we choose the learning rate and weight decay parameters that minimize the robust error on a hold-out dataset." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 319, + 400, + 329 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 400, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 400, + 329 + ], + "type": "text", + "content": "4.3 ADVERSARIAL TRAINING CAN HURT ROBUST GENERALIZATION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "content": "We now present our experimental results on the Waterbirds dataset for both motion blur and adversarial illumination attacks. First of all, Figure 5a and 6a show that the phenomenon characterized in the linear setting by Theorem 3.1 also occurs for directed attacks on the Waterbirds dataset: as we increase the adversarial training budget " + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "content": " starting from zero (standard training), the robust error monotonically increases." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "type": "text", + "content": "Furthermore, to gain intuition as described in Section 3.3, we also plot the robust error decomposition (Equation 7) consisting of the standard error and susceptibility in Figure 5b and 6b. Recall that we measure susceptibility as the fraction of data points in the test set for which the classifier predicts a different class under an adversarial attack. As in our linear example, we observe an increase in robust error despite a slight drop in susceptibility, because of the more severe increase in standard error. Moreover, Figures 1 and 6c show that analogous to our linear example, this phenomenon is specific to the low sample regime: for large sample size adversarial training outperforms standard training as expected. Note again that even the smallest sample size is large enough to yield a standard test error " + }, + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "type": "inline_equation", + "content": "< 10\\%" + }, + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "type": "text", + "content": " for standard training. Similar experiments for CIFAR-10 can be found in Appendix E. Finally, we empirically confirm in Appendix D.8 that our phenomenon is specific to directed attacks: for undirected attacks such as bounded " + }, + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "type": "inline_equation", + "content": "\\ell_{\\infty}" + }, + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "type": "inline_equation", + "content": "\\ell_{2}" + }, + { + "bbox": [ + 104, + 397, + 506, + 529 + ], + "type": "text", + "content": "-ball perturbations, adversarial training helps robust generalization also in the low sample size regime." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 541, + 186, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 541, + 186, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 541, + 186, + 552 + ], + "type": "text", + "content": "4.4 DISCUSSION" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 559, + 504, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 504, + 582 + ], + "type": "text", + "content": "We now discuss how different algorithmic choices, motivated by related work, might affect how adversarial training hurts robust generalization." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 590, + 506, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 669 + ], + "type": "text", + "content": "Catastrophic overfitting Often the worst-case perturbation during adversarial training is found using an approximate algorithm such as SGD. It is common belief that using the strongest attack (in the motion blur case, full grid search) during training also results in better robust generalization. In particular, the literature on catastrophic overfitting shows that weaker attacks during training lead to bad performance on stronger attacks during testing (Wong et al., 2020; Andriushchenko & Flammarion, 2020; Li et al., 2021). Our results suggest the opposite in the low sample size regime for directed attacks: the weaker the attack during training, the better adversarial training performs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": "Robust overfitting Recent work observes empirically (Rice et al., 2020) and theoretically (Sanyal et al., 2020; Donhauser et al., 2021), that perfectly minimizing the adversarial loss during training might in fact be suboptimal for robust generalization; that is, classical regularization techniques might lead to higher robust accuracy. This phenomenon is often referred to as robust overfitting. May the phenomenon be mitigated using standard regularization techniques? In Appendix D we shed light" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "on this question and show that adversarial training hurts robust generalization even when standard regularization methods such as early stopping are used." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 209, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 209, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 209, + 133 + ], + "type": "text", + "content": "5 RELATED WORK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 140, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 140, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 140, + 506, + 217 + ], + "type": "text", + "content": "Robust and non-robust useful features In the words of Ilyas et al. (2019) and Springer et al. (2021) we can describe the intuition behind \"our phenomenon\" as follows: for directed attacks, all robust features become less useful, but adversarial training uses robust features more. In the small sample-size regime, " + }, + { + "bbox": [ + 104, + 140, + 506, + 217 + ], + "type": "inline_equation", + "content": "n < d - 1" + }, + { + "bbox": [ + 104, + 140, + 506, + 217 + ], + "type": "text", + "content": " in particular, robust learning assigns too much weight on the robust (possibly non-useful) features that then dominate the non-robust (but useful)features. Even though they define these concepts, they don't make our statement, but show that adversarial training reduces the reliance on non-robust but possibly useful features." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 224, + 506, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 224, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 506, + 291 + ], + "type": "text", + "content": "Small sample size and robustness A direct consequence of Theorem 3.1 is that in order to achieve the same robust error as standard training, adversarial training requires more samples. This statement might remind the reader of sample complexity results for robust generalization in Schmidt et al. (2018); Yin et al. (2019); Khim & Loh (2018). While those results compare sample complexity bounds for standard vs. robust error, our theorem statement compares two algorithms, standard vs. adversarial training, with respect to the robust error." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 297, + 506, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 506, + 418 + ], + "type": "text", + "content": "Trade-off between standard and robust error Many papers observed that even though adversarial training decreases robust error compared to standard training, it may lead to an increase in standard test error Madry et al. (2018); Zhang et al. (2019). For example, Tsipras et al. (2019); Zhang et al. (2019); Javanmard et al. (2020); Dobriban et al. (2020); Chen et al. (2020) study settings where the Bayes optimal robust classifier is not equal to the Bayes optimal (standard) classifier (i.e. the perturbations are inconsistent or the dataset is non-separable). Raghunathan et al. (2020) study consistent perturbations, as in our paper, and prove that for small sample size, fitting adversarial examples can increase standard error even in the absence of noise. Empirically, Dong et al. (2021); Mendonça et al. (2022) show that for " + }, + { + "bbox": [ + 104, + 297, + 506, + 418 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 104, + 297, + 506, + 418 + ], + "type": "text", + "content": "-attacks low-quality data might be the main cause of the trade-off. While aforementioned works focus on the decrease in standard error, we prove that for directed attacks, in the small sample regime adversarial training may in fact increase robust error." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 424, + 506, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 424, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 424, + 506, + 523 + ], + "type": "text", + "content": "Mitigation of the trade-off A long line of work has proposed procedures to mitigate the trade-off between robust and standard accuracy. For example Alayrac et al. (2019); Carmon et al. (2019); Zhai et al. (2019); Raghunathan et al. (2020) study robust self training, which leverages a large set of unlabelled data, while Lee et al. (2020); Lamb et al. (2019); Xu et al. (2020) use data augmentation by interpolation. Ding et al. (2020); Balaji et al. (2019); Cheng et al. (2020) on the other hand propose to use adaptive perturbation budgets " + }, + { + "bbox": [ + 104, + 424, + 506, + 523 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 424, + 506, + 523 + ], + "type": "text", + "content": " that vary across inputs. The intuition behind our theoretical analysis suggests that the standard mitigation procedures for imperceptible perturbations may not work for perceptible directed attacks, because all relevant features are non-robust. We leave a thorough empirical study as interesting future work." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 533, + 285, + 545 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 285, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 285, + 545 + ], + "type": "text", + "content": "6 SUMMARY AND FUTURE WORK" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 558, + 506, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 506, + 701 + ], + "type": "text", + "content": "This paper aims to caution the practitioner against blindly following current widespread practices to increase the robust performance of machine learning models. Specifically, adversarial training is currently recognized to be one of the most effective defense mechanisms for " + }, + { + "bbox": [ + 104, + 558, + 506, + 701 + ], + "type": "inline_equation", + "content": "\\ell_p" + }, + { + "bbox": [ + 104, + 558, + 506, + 701 + ], + "type": "text", + "content": "-perturbations, significantly outperforming robust performance of standard training. However, we prove that in the low sample size regime this common wisdom is not applicable for consistent directed attacks, which efficiently focus their attack budget to target the ground truth class information. In terms of follow-up work on directed attacks in the low sample regime, there are some concrete questions that would be interesting to explore. For example, as discussed in Section 5, it would be useful to test whether some methods to mitigate the standard accuracy vs. robustness trade-off would also relieve the perils of adversarial training for directed attacks. Further, we hypothesize that when few samples are available, one should avoid training with attacks that may heavily reduce class information, independently of the attacks at test time. If this hypothesis were confirmed, it would break with yet another general rule that the best defense perturbation type should always match the attack during evaluation." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 221, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 221, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 221, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 339, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 339, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 339, + 118 + ], + "type": "text", + "content": "Supported by the Hasler Foundation grant number 21050." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 133, + 176, + 145 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 133, + 176, + 145 + ], + "spans": [ + { + "bbox": [ + 106, + 133, + 176, + 145 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 150, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 150, + 506, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 150, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 506, + 186 + ], + "type": "text", + "content": "Jean-Baptiste Alayrac, Jonathan Uesato, Po-Sen Huang, Alhussein Fawzi, Robert Stanforth, and Pushmeet Kohli. Are labels required for improving adversarial robustness? Advances in Neural Information Processing Systems, pp. 12214-12223, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 190, + 505, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 190, + 505, + 215 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 505, + 215 + ], + "type": "text", + "content": "Maksym Andriushchenko and Nicolas Flammarion. Understanding and improving fast adversarial training. Advances in Neural Information Processing Systems, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 220, + 506, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 220, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 506, + 266 + ], + "type": "text", + "content": "Tao Bai, Jinqi Luo, Jun Zhao, Bihan Wen, and Qian Wang. Recent advances in adversarial training for adversarial robustness. In Zhi-Hua Zhou (ed.), The 30th International Joint Conference on Artificial Intelligence, pp. 4312-4321. International Joint Conferences on Artificial Intelligence Organization, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 270, + 504, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 504, + 295 + ], + "type": "text", + "content": "Yogesh Balaji, Tom Goldstein, and Judy Hoffman. Instance adaptive adversarial training: Improved accuracy tradeoffs in neural nets. arXiv preprint arXiv:1910.08051, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 300, + 427, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 427, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 427, + 314 + ], + "type": "text", + "content": "G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 318, + 505, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 318, + 505, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 505, + 353 + ], + "type": "text", + "content": "Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, Percy Liang, and John C Duchi. Unlabeled data improves adversarial robustness. In The 33rd International Conference on Neural Information Processing Systems, pp. 11192-11203, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 358, + 505, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 505, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 505, + 393 + ], + "type": "text", + "content": "Lin Chen, Yifei Min, Mingrui Zhang, and Amin Karbasi. More data can expand the generalization gap between adversarially robust and standard models. In The 36th International Conference on Machine Learning, pp. 1670-1680, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 398, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 398, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 398, + 504, + 422 + ], + "type": "text", + "content": "Minhao Cheng, Qi Lei, Pin-Yu Chen, Inderjit Dhillon, and Cho-Jui Hsieh. Cat: Customized adversarial training for improved robustness. arXiv preprint arXiv:2002.06789, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 427, + 506, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 427, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 506, + 462 + ], + "type": "text", + "content": "Lenaic Chizat and Francis Bach. Implicit bias of gradient descent for wide two-layer neural networks trained with the logistic loss. In The 7th International Conference on Learning Theory, pp. 1305-1338, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 468, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 506, + 502 + ], + "type": "text", + "content": "Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In The 37th International Conference on Machine Learning, pp. 2206-2216, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 508, + 505, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 508, + 505, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 505, + 543 + ], + "type": "text", + "content": "Gavin Weiguang Ding, Yash Sharma, Kry Yik Chau Lui, and Ruitong Huang. Mma training: Direct input space margin maximization through adversarial training. In The 8th International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 548, + 506, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 506, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 506, + 572 + ], + "type": "text", + "content": "Edgar Dobriban, Hamed Hassani, David Hong, and Alexander Robey. Provable tradeoffs in adversarially robust classification. arXiv preprint arXiv:2006.05161, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 577, + 504, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 577, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 504, + 602 + ], + "type": "text", + "content": "Chengyu Dong, Liyuan Liu, and Jingbo Shang. Data quality matters for adversarial training: An empirical study, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 606, + 506, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 506, + 641 + ], + "type": "text", + "content": "Konstantin Donhauser, Alexandru Tifrea, Michael Aerni, Reinhard Heckel, and Fanny Yang. Interpolation can hurt robust generalization even when there is no noise. The 36th conference on Advances in Neural Information Processing Systems, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 647, + 506, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 506, + 681 + ], + "type": "text", + "content": "Logan Engstrom, Brandon Tran, Dimitris Tsipras, Ludwig Schmidt, and Aleksander Madry. Exploring the landscape of spatial robustness. In The 36th International Conference on Machine Learning, pp. 1802-1811, 2019." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 686, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 686, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 506, + 731 + ], + "type": "text", + "content": "Kevin Eykholt, Ivan Evtimov, Earlence Fernandes, Bo Li, Amir Rahmati, Chaowei Xiao, Atul Prakash, Tadayoshi Kohno, and Dawn Song. Robust physical-world attacks on deep learning visual classification. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1625-1634, 2018." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Amin Ghiasi, Ali Shafahi, and Tom Goldstein. Breaking certified defenses: semantic adversarial examples with spoofed robustness certificates. In The 6th International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 148 + ], + "type": "text", + "content": "Justin Gilmer, Ryan P Adams, Ian Goodfellow, David Andersen, and George E Dahl. Motivating the rules of the game for adversarial example research. arXiv preprint arXiv:1807.06732, 2018." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 153, + 504, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 153, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 107, + 153, + 504, + 178 + ], + "type": "text", + "content": "Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. In The 3th International Conference on Learning Representations, pp. 1-10, 2015." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 184, + 504, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 184, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 107, + 184, + 504, + 218 + ], + "type": "text", + "content": "Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. In The 33rd conference on Advances in Neural Information Processing Systems, pp. 125-136, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "text", + "content": "Adel Javanmard, Mahdi Soltanolkotabi, and Hamed Hassani. Precise tradeoffs in adversarial training for linear regression. In Conference on Learning Theory, pp. 2034-2078, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 256, + 504, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 256, + 504, + 280 + ], + "spans": [ + { + "bbox": [ + 107, + 256, + 504, + 280 + ], + "type": "text", + "content": "Ziwei Ji and Matus Telgarsky. The implicit bias of gradient descent on nonseparable data. In *The 32nd Conference on Learning Theory*, pp. 1772-1798, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 286, + 504, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 286, + 504, + 311 + ], + "spans": [ + { + "bbox": [ + 107, + 286, + 504, + 311 + ], + "type": "text", + "content": "Daniel Kang, Yi Sun, Tom Brown, Dan Hendrycks, and Jacob Steinhardt. Transfer of adversarial robustness between perturbation types. arXiv e-prints, pp. arXiv-1905, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 316, + 504, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 504, + 340 + ], + "type": "text", + "content": "Justin Khim and Po-Ling Loh. Adversarial risk bounds via function transformation. arXiv preprint arXiv:1810.09519, 2018." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 346, + 504, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 346, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 504, + 371 + ], + "type": "text", + "content": "Cassidy Laidlaw, Sahil Singla, and Soheil Feizi. Perceptual adversarial robustness: Defense against unseen threat models. In The 9th International Conference on Learning Representation, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 377, + 504, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 377, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 107, + 377, + 504, + 412 + ], + "type": "text", + "content": "Alex Lamb, Vikas Verma, Juho Kannala, and Yoshua Bengio. Interpolated adversarial training: Achieving robust neural networks without sacrificing too much accuracy. In The 12th ACM Workshop on Artificial Intelligence and Security, pp. 95-103, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 418, + 506, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 418, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 107, + 418, + 506, + 453 + ], + "type": "text", + "content": "Saehyung Lee, Hyungyu Lee, and Sungroh Yoon. Adversarial vertex mixup: Toward better adversarily robust generalization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 460, + 504, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 460, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 107, + 460, + 504, + 483 + ], + "type": "text", + "content": "Bai Li, Shiqi Wang, Suman Jana, and Lawrence Carin. Towards understanding fast adversarial training. arXiv preprint arXiv:2006.03089, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 491, + 506, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 491, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 107, + 491, + 506, + 524 + ], + "type": "text", + "content": "Yan Li, Ethan X.Fang, Huan Xu, and Tuo Zhao. Implicit bias of gradient descent based adversarial training on separable data. In The 8th International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 533, + 504, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 533, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 107, + 533, + 504, + 567 + ], + "type": "text", + "content": "Wei-An Lin, Chun Pong Lau, Alexander Levine, Rama Chellappa, and Soheil Feizi. Dual manifold adversarial robustness: Defense against lp and non-lp adversarial attacks. In The 34th conference on Advances in Neural Information Processing Systems, pp. 3487-3498, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 574, + 504, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 574, + 504, + 608 + ], + "spans": [ + { + "bbox": [ + 107, + 574, + 504, + 608 + ], + "type": "text", + "content": "Chen Liu, Mathieu Salzmann, Tao Lin, Ryota Tomioka, and Sabine Susstrunk. On the loss landscape of adversarial training: Identifying challenges and how to overcome them. In The 35th conference on Advances in Neural Information Processing Systems, pp. 21476-21487, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 615, + 504, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 615, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 107, + 615, + 504, + 660 + ], + "type": "text", + "content": "Bo Luo, Yannan Liu, Lingxiao Wei, and Qiang Xu. Towards imperceptible and robust adversarial example attacks against neural networks. In The 32nd AAAI Conference on Artificial Intelligence and Thirtieth Innovative Applications of Artificial Intelligence Conference and Eighth AAAI Symposium on Educational Advances in Artificial Intelligence, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 667, + 504, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 667, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 107, + 667, + 504, + 701 + ], + "type": "text", + "content": "Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In The 6th International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 708, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 708, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 107, + 708, + 504, + 733 + ], + "type": "text", + "content": "Tomás Mantecón, Carlos R. del Blanco, Fernando Jaureguizar, and Narciso García. A real-time gesture recognition system using near-infrared imagery. PLOS ONE, pp. 1-17, 2019." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "type": "text", + "content": "Marcele OK Mendonça, Javier Maroto, Pascal Frossard, and Paulo SR Diniz. Adversarial training with informed data selection. In The 30th European Signal Processing Conference (EUSIPCO), pp. 608-612, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "text", + "content": "Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, and Pascal Frossard. Deepfool: a simple and accurate method to fool deep neural networks. In The IEEE conference on computer vision and pattern recognition (CVPR), pp. 2574-2582, 2016." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 164, + 504, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 164, + 504, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 504, + 200 + ], + "type": "text", + "content": "Abdullah Mujahid, Mazhar Javed Awan, Awais Yasin, Mazin Abed Mohammed, Robertas Damaševićius, Rytis Maskeliūnas, and Karrar Hameed Abdulkareem. Real-time hand gesture recognition based on deep learning yolov3 model. Applied Sciences, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 206, + 504, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 206, + 504, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 206, + 504, + 241 + ], + "type": "text", + "content": "Mor Shpigel Nacson, Nathan Srebro, and Daniel Soudry. Stochastic gradient descent on separable data: Exact convergence with a fixed learning rate. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 3051-3059, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 247, + 505, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 505, + 282 + ], + "type": "text", + "content": "Vaishnavh Nagarajan and J. Zico Kolter. Uniform convergence may be unable to explain generalization in deep learning. In The 33d conference on Advances in Neural Information Processing Systems, pp. 11611-11622, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 289, + 504, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 504, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 504, + 313 + ], + "type": "text", + "content": "Munir Oudah, Ali Al-Naji, and Javaan Chahl. Hand gesture recognition based on computer vision: A review of techniques. Journal of Imaging, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 319, + 298, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 298, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 298, + 332 + ], + "type": "text", + "content": "Huy Phan. huyvnphan/pytorch_cifar10, 1 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 338, + 504, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 338, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 504, + 373 + ], + "type": "text", + "content": "Aditi Raghunathan, Sang Michael Xie, Fanny Yang, John Duchi, and Percy Liang. Understanding and mitigating the tradeoff between robustness and accuracy. In The 37th International Conference on Machine Learning, pp. 7909-7919, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 380, + 504, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 504, + 404 + ], + "type": "text", + "content": "Leslie Rice, Eric Wong, and Zico Kolter. Overfitting in adversarially robust deep learning. In The 37th International Conference on Machine Learning, pp. 8093-8104, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 410, + 504, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 504, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 504, + 435 + ], + "type": "text", + "content": "Shiori Sagawa, Pang Wei Koh, Tatsunori B. Hashimoto, and Percy Liang. Distributionally robust neural networks. In The 7th International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 441, + 505, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 505, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 505, + 464 + ], + "type": "text", + "content": "Amartya Sanyal, Puneet K Dokania, Varun Kanade, and Philip Torr. How benign is benign overfitting? In The 8th International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 471, + 505, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 505, + 506 + ], + "type": "text", + "content": "Ludwig Schmidt, Shibani Santurkar, Dimitris Tsipras, Kunal Talwar, and Aleksander Madry. Adversarily robust generalization requires more data. In The 32nd conference Advances in Neural Information Processing Systems, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 512, + 505, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 505, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 505, + 548 + ], + "type": "text", + "content": "Steffen Schneider, Evgenia Rusak, Luisa Eck, Oliver Bringmann, Wieland Brendel, and Matthias Bethge. Improving robustness against common corruptions by covariate shift adaptation. In The 34th conference on Advances in Neural Information Processing Systems, pp. 11539-11551, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 554, + 505, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 554, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 554, + 505, + 578 + ], + "type": "text", + "content": "Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, pp. 1-57, 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 585, + 505, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 505, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 505, + 619 + ], + "type": "text", + "content": "Jacob M Springer, Melanie Mitchell, and Garrett T Kenyon. Adversarial perturbations are not so weird: Entanglement of robust and non-robust features in neural network classifiers. arXiv preprint arXiv:2102.05110, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 626, + 505, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 626, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 505, + 660 + ], + "type": "text", + "content": "David Stutz, Matthias Hein, and Bernt Schiele. Disentangling adversarial robustness and generalization. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 6967-6987, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 667, + 505, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 505, + 702 + ], + "type": "text", + "content": "Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In The 2nd International Conference on Learning Representations, 2014." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "type": "text", + "content": "Matus Telgarsky. Margins, shrinkage, and boosting. In The 30th International Conference on Machine Learning, pp. 307-315, 2013." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 520 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "type": "text", + "content": "Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In The 7th International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "type": "text", + "content": "Roman Vershynin. Introduction to the non-asymptotic analysis of random matrices. arXiv preprint arXiv:1011.3027, 2010." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "type": "text", + "content": "P. Welinder, S. Branson, T. Mita, C. Wah, F. Schroff, S. Belongie, and P. Perona. Caltech-UCSD Birds 200. Technical report, California Institute of Technology, 2010." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 182, + 504, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 504, + 206 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 504, + 206 + ], + "type": "text", + "content": "Eric Wong, Leslie Rice, and J. Zico Kolter. Fast is better than free: Revisiting adversarial training. In The 8th International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 213, + 504, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 504, + 236 + ], + "type": "text", + "content": "Tong Wu, Liang Tong, and Yevgeniy Vorobeychik. Defending against physically realizable attacks on image classification. In The 8th International Conference on Learning Representations, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 242, + 506, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 506, + 277 + ], + "type": "text", + "content": "Minghao Xu, Jian Zhang, Bingbing Ni, Teng Li, Chengjie Wang, Qi Tian, and Wenjun Zhang. Adversarial domain adaptation with domain mixup. In The AAAI Conference on Artificial Intelligence, pp. 6502-6509, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 283, + 506, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 283, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 506, + 308 + ], + "type": "text", + "content": "Shuai Yang, Prashan Premaratne, and Peter Vial. Hand gesture recognition: An overview. In The 5th IEEE International Conference on Broadband Network Multimedia Technology, pp. 63-69, 2013." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 313, + 504, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 313, + 504, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 504, + 338 + ], + "type": "text", + "content": "Dong Yin, Ramchandran Kannan, and Peter Bartlett. Rademacher complexity for adversarially robust generalization. In The 36th International conference on machine learning, pp. 7085-7094, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 342, + 504, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 504, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 504, + 367 + ], + "type": "text", + "content": "Runtian Zhai, Tianle Cai, Di He, Chen Dan, Kun He, John Hopcroft, and Liwei Wang. Adversarily robust generalization just requires more unlabeled data. arXiv preprint arXiv:1906.00555, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 373, + 506, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 506, + 407 + ], + "type": "text", + "content": "Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric Xing, Laurent El Ghaoui, and Michael Jordan. Theoretically principled trade-off between robustness and accuracy. In *The 36th International Conference on Machine Learning*, pp. 7472-7482, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 414, + 504, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 414, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 504, + 449 + ], + "type": "text", + "content": "Zhengyu Zhao, Zhuoran Liu, and Martha Larson. Towards large yet imperceptible adversarial image perturbations with perceptual color distance. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1039-1048, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 454, + 504, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 454, + 504, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 454, + 504, + 488 + ], + "type": "text", + "content": "Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 495, + 504, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 504, + 520 + ], + "type": "text", + "content": "Jianli Zhou, Chao Liang, and Jun Chen. Manifold projection for adversarial defense on face recognition. In The 16th European Conference on Computer Vision, pp. 288-305, 2020." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 402, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 402, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 402, + 94 + ], + "type": "text", + "content": "A THEORETICAL STATEMENTS FOR THE LINEAR MODEL" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 105, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 105, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 506, + 149 + ], + "type": "text", + "content": "Before we present the proof of the theorem, we introduce two lemmas are of separate interest that are used throughout the proof of Theorem 1. Recall that the definition of the (standard normalized) maximum-" + }, + { + "bbox": [ + 104, + 105, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 105, + 506, + 149 + ], + "type": "text", + "content": "-margin solution (max-margin solution in short) of a dataset " + }, + { + "bbox": [ + 104, + 105, + 506, + 149 + ], + "type": "inline_equation", + "content": "D = \\{(x_i, y_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 105, + 506, + 149 + ], + "type": "text", + "content": " corresponds to" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 248, + 149, + 504, + 171 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 149, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 248, + 149, + 504, + 171 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\theta^ {\\top} x _ {i}, \\tag {10}", + "image_path": "7bac964466c15c4e6aede9b42fe73285247fe4eb1198f17a38cf9a0c3c1201f4.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "text", + "content": "by simply setting " + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 0" + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "text", + "content": " in Equation 4. The " + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "text", + "content": "-margin of " + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}" + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "text", + "content": " then reads " + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\min_{i\\in [n]}y_i\\widehat{\\theta}^\\top x_i" + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "text", + "content": ". Furthermore for a dataset " + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "inline_equation", + "content": "D = \\{(x_{i},y_{i})\\}_{i = 1}^{n}" + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "text", + "content": " we refer to the induced dataset " + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\widetilde{D}" + }, + { + "bbox": [ + 104, + 174, + 504, + 211 + ], + "type": "text", + "content": " as the dataset with covariate vectors stripped of the first element, i.e." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 219, + 213, + 504, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 213, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 219, + 213, + 504, + 228 + ], + "type": "interline_equation", + "content": "\\widetilde {D} = \\left\\{\\left(\\tilde {x} _ {i}, y _ {i}\\right) \\right\\} _ {i = 1} ^ {n} := \\left\\{\\left(\\left(x _ {i}\\right) _ {[ 2: d ]}, y _ {i}\\right) \\right\\} _ {i = 1} ^ {n}, \\tag {11}", + "image_path": "174745dff0e6d2d1f8c74e8a5de8b6f936a7f8e5940d2f5b80a5eded34267c18.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "(x_{i})_{[2:d]}" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " refers to the last " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "d - 1" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " elements of the vector " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": ". Furthermore, remember that for any vector " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "z_{[j]}" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " refers to the " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": "-th element of " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "e_j" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": "-th canonical basis vector. Further, recall the distribution " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " as defined in Section 3.1: the label " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "y \\in \\{+1, -1\\}" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " is drawn with equal probability and the covariate vector is sampled as " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "x = [y_{\\frac{r}{2}}, \\tilde{x}]" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\tilde{x} \\in \\mathbb{R}^{d-1}" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": " is a random vector drawn from a standard normal distribution, i.e. " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\tilde{x} \\sim \\mathcal{N}(0, \\sigma^2 I_{d-1})" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": ". We generally allow " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": ", used to sample the training data, to differ from " + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "inline_equation", + "content": "r_{\\mathrm{test}}" + }, + { + "bbox": [ + 104, + 229, + 506, + 299 + ], + "type": "text", + "content": ", which is used during test time." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "type": "text", + "content": "The following lemma derives a closed-form expression for the normalized max-margin solution for any dataset with fixed separation " + }, + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "type": "text", + "content": " in the signal component, and that is linearly separable in the last " + }, + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "type": "inline_equation", + "content": "d - 1" + }, + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "type": "text", + "content": " coordinates with margin " + }, + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 303, + 504, + 336 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": "Lemma A.1. Let " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "D = \\{(x_i, y_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": " be a dataset that consists of points " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "(x, y) \\in \\mathbb{R}^d \\times \\{\\pm 1\\}" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "x_{[1]} = y_{\\frac{r}{2}}" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": ", i.e. the covariates " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": " are deterministic in their first coordinate given " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": " with separation distance " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": ". Furthermore, let the induced dataset " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "\\widetilde{D}" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": " also be linearly separable by the normalized max- " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": "-margin solution " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": " with an " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": "-margin " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": ". Then, the normalized max-margin solution of the original dataset " + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 338, + 505, + 398 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 248, + 398, + 504, + 423 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 398, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 248, + 398, + 504, + 423 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} = \\frac {1}{\\sqrt {r ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right]. \\tag {12}", + "image_path": "e5a10add755ed929703629d124060bbf163b18019faf16516c19279fd9bd8781.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 425, + 377, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 425, + 377, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 425, + 377, + 439 + ], + "type": "text", + "content": "Further, the standard accuracy of " + }, + { + "bbox": [ + 104, + 425, + 377, + 439 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}" + }, + { + "bbox": [ + 104, + 425, + 377, + 439 + ], + "type": "text", + "content": " for data drawn from " + }, + { + "bbox": [ + 104, + 425, + 377, + 439 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_{r_{test}}" + }, + { + "bbox": [ + 104, + 425, + 377, + 439 + ], + "type": "text", + "content": " reads" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 233, + 440, + 504, + 465 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 440, + 504, + 465 + ], + "spans": [ + { + "bbox": [ + 233, + 440, + 504, + 465 + ], + "type": "interline_equation", + "content": "\\mathbb {P} _ {r _ {\\text {t e s t}}} \\left(Y \\widehat {\\theta} ^ {\\top} X > 0\\right) = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right). \\tag {13}", + "image_path": "c3d21ad1d651dc5959dc7058a0ed5170f4cfb4e064560b0844d6e2646c70b3e2.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "type": "text", + "content": "The proof can be found in Section A.3. The next lemma provides high probability upper and lower bounds for the margin " + }, + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\widetilde{D}" + }, + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "type": "inline_equation", + "content": "\\tilde{x}_i" + }, + { + "bbox": [ + 104, + 472, + 504, + 496 + ], + "type": "text", + "content": " are drawn from the normal distribution." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "text", + "content": "Lemma A.2. Let " + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "inline_equation", + "content": "\\widetilde{D} = \\{(\\tilde{x}_i, y_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "text", + "content": " be a random dataset where " + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "inline_equation", + "content": "y_i \\in \\{\\pm 1\\}" + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "text", + "content": " are equally distributed and " + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "inline_equation", + "content": "\\tilde{x}_i \\sim \\mathcal{N}(0, \\sigma I_{d-1})" + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "text", + "content": " is the maximum " + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 498, + 505, + 522 + ], + "type": "text", + "content": " margin that can be written as" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 253, + 523, + 355, + 545 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 523, + 355, + 545 + ], + "spans": [ + { + "bbox": [ + 253, + 523, + 355, + 545 + ], + "type": "interline_equation", + "content": "\\tilde{\\gamma} = \\max_{\\| \\tilde{\\theta}\\|_{2}\\leq 1}\\min_{i\\in [n]}y_{i}\\tilde{\\theta}^{\\top}\\tilde{x}_{i}.", + "image_path": "9d6df5ed1d91654adf90336a1f7d6883b13b0d88cf30ad74e8627eaa2d341b81.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "type": "text", + "content": "Then, for any " + }, + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "type": "inline_equation", + "content": "t \\geq 0" + }, + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "type": "text", + "content": ", with probability greater than " + }, + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "type": "inline_equation", + "content": "1 - 2e^{-\\frac{t^2}{2}}" + }, + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}_{\\min}(t) \\leq \\tilde{\\gamma} \\leq \\tilde{\\gamma}_{\\max}(t)" + }, + { + "bbox": [ + 104, + 548, + 504, + 563 + ], + "type": "text", + "content": " where" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 146, + 564, + 462, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 564, + 462, + 597 + ], + "spans": [ + { + "bbox": [ + 146, + 564, + 462, + 597 + ], + "type": "interline_equation", + "content": "\\tilde {\\gamma} _ {\\mathrm {m a x}} (t) = \\sigma \\left(\\sqrt {\\frac {d - 1}{n}} + 1 + \\frac {t}{\\sqrt {n}}\\right), \\tilde {\\gamma} _ {\\mathrm {m i n}} (t) = \\sigma \\left(\\sqrt {\\frac {d - 1}{n}} - 1 - \\frac {t}{\\sqrt {n}}\\right).", + "image_path": "1f55278399cb5761daecaca4486097b3f36508fd8cd35b4970fc0eb6ebcee6ef.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 607, + 239, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 607, + 239, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 607, + 239, + 618 + ], + "type": "text", + "content": "A.1 PROOF OF THEOREM 3.1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "text", + "content": "Given a dataset " + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "inline_equation", + "content": "D = \\{(x_{i},y_{i})\\}" + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "text", + "content": " drawn from " + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "text", + "content": ", it is easy to see that the (normalized) " + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "text", + "content": "-robust max-margin solution 4 of " + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "text", + "content": " with respect to signal-attacking perturbations " + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "inline_equation", + "content": "T(\\epsilon_{\\mathrm{tr}};x_i)" + }, + { + "bbox": [ + 104, + 628, + 505, + 661 + ], + "type": "text", + "content": " as defined in Equation 3, can be written as" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 202, + 662, + 405, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 662, + 405, + 734 + ], + "spans": [ + { + "bbox": [ + 202, + 662, + 405, + 734 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}} = \\arg \\max_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n],x_{i}^{\\prime}\\in T(x_{i};\\epsilon_{\\mathrm{tr}})}y_{i}\\theta^{\\top}x_{i}^{\\prime} \\\\ = \\operatorname *{arg max}_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n],|\\beta |\\leq \\epsilon_{\\mathrm{tr}}}y_{i}\\theta^{\\top}(x_{i} + \\beta e_{1}) \\\\ = \\operatorname *{arg max}_{\\| \\theta \\|_{2}\\leq 1}\\min_{i\\in [n]}y_{i}\\theta^{\\top}(x_{i} - y_{i}\\epsilon_{\\mathrm{tr}}\\operatorname {sign}(\\theta_{[1]})e_{1}). \\\\ \\end{array}", + "image_path": "821024a950d34bfc8a1e698c3283f87b821411ec857f0f4854bd2d76f299aef2.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "text", + "content": "Note that by definition, it is equivalent to the (standard normalized) max-margin solution " + }, + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}" + }, + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "text", + "content": " of the shifted dataset " + }, + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "inline_equation", + "content": "D_{\\epsilon_{\\mathrm{tr}}} = \\{(x_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[1]})e_1,y_i)\\}_{i = 1}^n" + }, + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "inline_equation", + "content": "D_{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "text", + "content": " satisfies the assumptions of Lemma A.1, it then follows directly that the normalized " + }, + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "text", + "content": "-robust max-margin solution reads" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 214, + 121, + 504, + 148 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 121, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 214, + 121, + 504, + 148 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right], \\tag {14}", + "image_path": "f721e262e74cf1ef55c1bf7ec37470da9bd3a8c8745f4233668a4d0fe9ba290c.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "text", + "content": "by replacing " + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "inline_equation", + "content": "r - 2\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "text", + "content": " in Equation 12. Similar to above, " + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta} \\in R^{d-1}" + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "text", + "content": " is the (standard normalized) max-margin solution of " + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "inline_equation", + "content": "\\{(\\tilde{x}_i, y_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 154, + 505, + 179 + ], + "type": "text", + "content": " the corresponding margin." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": "Proof of 1. We can now compute the " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": "-robust accuracy of the " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": "-robust max-margin estimator " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": " for a given dataset " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": " as a function of " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": ". Note that in the expression of " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": ", all values are fixed for a fixed dataset, while " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "0 \\leq \\epsilon_{\\mathrm{tr}} \\leq r - 2\\tilde{\\gamma}_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": " can be chosen. First note that for a test distribution " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": ", the " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": "-robust accuracy, defined as one minus the robust error (Equation 1), for a classifier associated with a vector " + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 191, + 506, + 250 + ], + "type": "text", + "content": ", can be written as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 254, + 514, + 298 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 254, + 514, + 298 + ], + "spans": [ + { + "bbox": [ + 106, + 254, + 514, + 298 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {A c c} (\\theta ; \\epsilon_ {\\mathrm {t e}}) = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{\\min _ {x ^ {\\prime} \\in T (X; \\epsilon_ {\\mathrm {t e}})} Y \\theta^ {\\top} x ^ {\\prime} > 0 \\right\\} \\right] \\tag {15} \\\\ = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\{Y \\theta^ {\\top} X - \\epsilon_ {\\mathrm {t e}} \\theta_ {[ 1 ]} > 0 \\} \\right] = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\{Y \\theta^ {\\top} (X - Y \\epsilon_ {\\mathrm {t e}} \\operatorname {s i g n} (\\theta_ {[ 1 ]}) e _ {1}) > 0 \\} \\right] \\\\ \\end{array}", + "image_path": "b8d08d68de76b036c4aa8202fbc1d09123cc3c202209f59a230e7418ebb93c4c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "text", + "content": "Now, recall that by Equation 14 and the assumption in the theorem, we have " + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "inline_equation", + "content": "r - 2\\epsilon_{\\mathrm{tr}} > 0" + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "text", + "content": ", so that " + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "inline_equation", + "content": "\\mathrm{sign}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}) = 1" + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "text", + "content": ". Further, using the definition of the " + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "inline_equation", + "content": "T(\\epsilon_{\\mathrm{tr}};x)" + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "text", + "content": " in Equation 3 and by definition of the distribution " + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "inline_equation", + "content": "X_{[1]} = Y\\frac{r}{2}" + }, + { + "bbox": [ + 104, + 301, + 504, + 338 + ], + "type": "text", + "content": ". Plugging into Equation 15 then yields" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 167, + 344, + 440, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 344, + 440, + 403 + ], + "spans": [ + { + "bbox": [ + 167, + 344, + 440, + 403 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} ^ {\\top} \\left(X - Y \\epsilon_ {\\mathrm {t e}} e _ {1}\\right) > 0 \\right\\} \\right] \\\\ = \\mathbb {E} _ {X, Y \\sim \\mathbb {P} _ {r}} \\left[ \\mathbb {I} \\left\\{Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}} \\top} \\left(X _ {- 1} + Y \\left(\\frac {r}{2} - \\epsilon_ {\\mathrm {t e}}\\right) e _ {1}\\right) > 0 \\right\\} \\right] \\\\ = \\mathbb {P} _ {r - 2 \\epsilon_ {\\mathrm {t e}}} (Y \\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}} \\top} X > 0) \\\\ \\end{array}", + "image_path": "a0bdcf7380a7a3bb329ad999a6207202128f2e103f090a0a5eb4c4968e89620d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "spans": [ + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "inline_equation", + "content": "X_{-1}" + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "text", + "content": " is a shorthand for the random vector " + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "inline_equation", + "content": "X_{-1} = (0; X_{[2]}, \\ldots, X_{[d]})" + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "text", + "content": ". The assumptions in Lemma A.1 (" + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "inline_equation", + "content": "D_{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "text", + "content": " is linearly separable) are satisfied whenever the " + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "inline_equation", + "content": "n < d - 1" + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "text", + "content": " samples are distinct, i.e. with probability one. Hence applying Lemma A.1 with " + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "inline_equation", + "content": "r_{\\mathrm{test}} = r - 2\\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "inline_equation", + "content": "r = r - 2\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 408, + 506, + 442 + ], + "type": "text", + "content": " yields" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 206, + 447, + 504, + 473 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 447, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 206, + 447, + 504, + 473 + ], + "type": "interline_equation", + "content": "\\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = \\Phi \\left(\\frac {r (r - 2 \\epsilon_ {\\mathrm {t e}})}{4 \\sigma \\tilde {\\gamma}} - \\epsilon_ {\\mathrm {t r}} \\frac {r - 2 \\epsilon_ {\\mathrm {t e}}}{2 \\sigma \\tilde {\\gamma}}\\right). \\tag {16}", + "image_path": "4391e9fe94fe504d90829587c17a4859caef8b5c290292d4450f1993cd554729.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "text", + "content": "Theorem statement a) then follows by noting that " + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "text", + "content": " is a monotonically decreasing function in " + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "text", + "content": ". The expression for the robust error then follows by noting that " + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "inline_equation", + "content": "1 - \\Phi(-z) = \\Phi(z)" + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "inline_equation", + "content": "z \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 478, + 504, + 510 + ], + "type": "text", + "content": " and defining" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 272, + 511, + 504, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 511, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 272, + 511, + 504, + 536 + ], + "type": "interline_equation", + "content": "\\tilde {\\varphi} = \\frac {\\sigma \\tilde {\\gamma}}{r / 2 - \\epsilon_ {\\mathrm {t e}}}. \\tag {17}", + "image_path": "41e08beccdfed999f6b1f7919db174aa86c087fa62e64560940c985a76964f38.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 546, + 506, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 546, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 546, + 506, + 567 + ], + "type": "text", + "content": "Proof of 2. First define " + }, + { + "bbox": [ + 104, + 546, + 506, + 567 + ], + "type": "inline_equation", + "content": "\\varphi_{\\mathrm{min}},\\varphi_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 546, + 506, + 567 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 104, + 546, + 506, + 567 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}_{\\mathrm{min}},\\tilde{\\gamma}_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 546, + 506, + 567 + ], + "type": "text", + "content": " as in Equation 17. Then we have by Equation 16" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 187, + 571, + 420, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 571, + 420, + 647 + ], + "spans": [ + { + "bbox": [ + 187, + 571, + 420, + 647 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) - \\operatorname {E r r} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {\\mathrm {t e}}\\right) = \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {0}; \\epsilon_ {\\mathrm {t e}}\\right) - \\operatorname {A c c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) \\\\ = \\Phi \\left(\\frac {r / 2}{\\tilde {\\varphi}}\\right) - \\Phi \\left(\\frac {r / 2 - \\epsilon_ {\\mathrm {t r}}}{\\tilde {\\varphi}}\\right) \\\\ = \\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\bar {\\varphi}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\bar {\\varphi} ^ {2}}} d x \\\\ \\end{array}", + "image_path": "cb8e95c8e5a402609690e325c40a70e6c7a0935a2005e3a065b700b7dc9aa84d.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 659, + 504, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 677 + ], + "type": "text", + "content": "By plugging in " + }, + { + "bbox": [ + 104, + 659, + 504, + 677 + ], + "type": "inline_equation", + "content": "t = \\sqrt{\\frac{2\\log 2 / \\delta}{n}}" + }, + { + "bbox": [ + 104, + 659, + 504, + 677 + ], + "type": "text", + "content": " in Lemma A.2, we obtain that with probability at least " + }, + { + "bbox": [ + 104, + 659, + 504, + 677 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 659, + 504, + 677 + ], + "type": "text", + "content": " we have" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 683, + 518, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 683, + 518, + 717 + ], + "spans": [ + { + "bbox": [ + 104, + 683, + 518, + 717 + ], + "type": "interline_equation", + "content": "\\tilde {\\gamma} _ {\\min } := \\sigma \\left[ \\sqrt {\\frac {d - 1}{n}} - \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right) \\right] \\leq \\tilde {\\gamma} \\leq \\sigma \\left[ \\sqrt {\\frac {d - 1}{n}} + \\left(1 + \\sqrt {\\frac {2 \\log (2 / \\delta)}{n}}\\right) \\right] =: \\tilde {\\gamma} _ {\\max }", + "image_path": "591ece2e3a46aeedd30624fd34ef5ae30feae9f3605446cf9b85e2503ad7a669.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 720, + 249, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 720, + 249, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 249, + 733 + ], + "type": "text", + "content": "and equivalently " + }, + { + "bbox": [ + 105, + 720, + 249, + 733 + ], + "type": "inline_equation", + "content": "\\varphi_{\\mathrm{min}}\\leq \\tilde{\\varphi}\\leq \\varphi_{\\mathrm{max}}" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "type": "text", + "content": "Now note the general fact that for all " + }, + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "type": "inline_equation", + "content": "\\tilde{\\varphi} \\leq \\sqrt{2} x" + }, + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "type": "text", + "content": " the density function " + }, + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "type": "inline_equation", + "content": "f(\\tilde{\\varphi};x) = \\frac{1}{\\sqrt{2\\pi}\\tilde{\\varphi}}\\mathbb{E}^{-\\frac{x^2}{\\tilde{\\varphi}^2}}" + }, + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "type": "text", + "content": " is monotonically increasing in " + }, + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "type": "inline_equation", + "content": "\\tilde{\\varphi}" + }, + { + "bbox": [ + 104, + 80, + 504, + 111 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "spans": [ + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "type": "text", + "content": "By assumption of the theorem, " + }, + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "type": "inline_equation", + "content": "\\tilde{\\varphi} \\leq \\sqrt{2}(r/2 - \\epsilon_{\\mathrm{tr}})(r/2 - \\epsilon_{\\mathrm{te}})" + }, + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "type": "inline_equation", + "content": "f(\\tilde{\\varphi}; x) \\geq f(\\varphi_{\\min}; x)" + }, + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "type": "inline_equation", + "content": "x \\in [r/2 - \\epsilon_{\\mathrm{tr}}, r/2]" + }, + { + "bbox": [ + 104, + 117, + 505, + 141 + ], + "type": "text", + "content": " and therefore" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 127, + 144, + 483, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 144, + 483, + 174 + ], + "spans": [ + { + "bbox": [ + 127, + 144, + 483, + 174 + ], + "type": "interline_equation", + "content": "\\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\tilde {\\varphi}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\tilde {\\varphi} ^ {2}}} d x \\geq \\int_ {r / 2 - \\epsilon_ {\\mathrm {t r}}} ^ {r / 2} \\frac {1}{\\sqrt {2 \\pi} \\varphi_ {\\mathrm {m i n}}} \\mathbb {E} ^ {- \\frac {x ^ {2}}{\\tilde {\\varphi} ^ {2}}} d x = \\Phi \\left(\\frac {r / 2}{\\varphi_ {\\mathrm {m i n}}}\\right) - \\Phi \\left(\\frac {r / 2 - \\epsilon_ {\\mathrm {t r}}}{\\varphi_ {\\mathrm {m i n}}}\\right).", + "image_path": "caa9b3cfd4d5139b634c67e7469cf3a88564e9ba876165149d791ecce9b3257c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 177, + 220, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 177, + 220, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 177, + 220, + 189 + ], + "type": "text", + "content": "and the statement is proved." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 201, + 250, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 250, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 250, + 213 + ], + "type": "text", + "content": "A.2 PROOF OF COROLLARY 3.2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "text", + "content": "We now show that Theorem 3.1 also holds for " + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "text", + "content": "-ball perturbations with at most radius " + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "text", + "content": ". Following similar steps as in Equation 14, the " + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "text", + "content": "-robust max-margin solution for " + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 104, + 221, + 504, + 253 + ], + "type": "text", + "content": "-perturbations can be written as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 188, + 253, + 505, + 275 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 253, + 505, + 275 + ], + "spans": [ + { + "bbox": [ + 188, + 253, + 505, + 275 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\theta \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\theta^ {\\top} \\left(x _ {i} - y _ {i} \\epsilon_ {\\mathrm {t r}} \\operatorname {s i g n} \\left(\\theta_ {[ j ^ {\\star} (\\theta) ]}\\right) e _ {j ^ {\\star} (\\theta)}\\right) \\tag {18}", + "image_path": "15ea2d9b06d49b26a1fa82e25358f179a6ff570af76209de03878d9875cb6e9b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "inline_equation", + "content": "j^{\\star}(\\theta) \\coloneqq \\arg \\max_{j} |\\theta_{j}|" + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "text", + "content": " is the index of the maximum absolute value of " + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "text", + "content": ". We now prove by contradiction that the robust max-margin solution for this perturbation set 9 is equivalent to the solution 14 for the perturbation set 3. We start by assuming that " + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "text", + "content": " does not solve Equation 14, which is equivalent to assuming " + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "inline_equation", + "content": "1 \\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})" + }, + { + "bbox": [ + 104, + 277, + 506, + 337 + ], + "type": "text", + "content": " by definition. We now show how this assumption leads to a contradiction." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": "Define the shorthand " + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "inline_equation", + "content": "j^{\\star} \\coloneqq j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}) - 1" + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": " is the solution of 18, by definition, we have that " + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": " is also the max-margin solution of the shifted dataset " + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "inline_equation", + "content": "D_{\\epsilon_{\\mathrm{tr}}} \\coloneqq (x_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[j^{\\star} + 1]}e_{j^{\\star} + 1},y_i)" + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": ". Further, note that by the assumption that " + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "inline_equation", + "content": "1 \\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})" + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": ", this dataset " + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "inline_equation", + "content": "D_{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": " consists of input vectors " + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "inline_equation", + "content": "x_i = (y_i\\frac{r}{2},\\tilde{x}_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\theta_{[j^{\\star} + 1]}e_{j^{\\star} + 1})" + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": ". Hence via Lemma A.1, " + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 341, + 505, + 397 + ], + "type": "text", + "content": " can be written as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 233, + 399, + 505, + 427 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 399, + 505, + 427 + ], + "spans": [ + { + "bbox": [ + 233, + 399, + 505, + 427 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {r ^ {2} - 4 \\left(\\tilde {\\gamma} ^ {\\epsilon_ {\\mathrm {t r}}}\\right) ^ {2}}} [ r, 2 \\tilde {\\gamma} ^ {\\epsilon_ {\\mathrm {t r}}} \\tilde {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} ], \\tag {19}", + "image_path": "f4fb4a5763f3c2c582def4fad8a0b1d1232671fe8fd1126293f005851ac51a53.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 431, + 460, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 431, + 460, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 431, + 460, + 447 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 431, + 460, + 447 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 431, + 460, + 447 + ], + "type": "text", + "content": " is the normalized max-margin solution of " + }, + { + "bbox": [ + 104, + 431, + 460, + 447 + ], + "type": "inline_equation", + "content": "\\widetilde{D} := (\\tilde{x}_i - y_i\\epsilon_{\\mathrm{tr}}\\mathrm{sign}(\\tilde{\\theta}_{[j^\\star ]})e_{j^\\star},y_i)" + }, + { + "bbox": [ + 104, + 431, + 460, + 447 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "type": "text", + "content": "We now characterize " + }, + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "type": "text", + "content": ". Note that by assumption, " + }, + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "type": "inline_equation", + "content": "j^{\\star} = j^{\\star}(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}) = \\arg \\max_{j}|\\tilde{\\theta}_{[j]}^{\\epsilon_{\\mathrm{tr}}}|" + }, + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "type": "text", + "content": ". Hence, the normalized max-margin solution " + }, + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 451, + 504, + 479 + ], + "type": "text", + "content": " is the solution of" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 224, + 482, + 505, + 508 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 482, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 224, + 482, + 505, + 508 + ], + "type": "interline_equation", + "content": "\\tilde {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} := \\underset {\\| \\tilde {\\theta} \\| _ {2} \\leq 1} {\\arg \\max } \\underset {i \\in [ n ]} {\\min } y _ {i} \\tilde {\\theta} ^ {\\top} \\tilde {x} _ {i} - \\epsilon_ {\\mathrm {t r}} | \\tilde {\\theta} _ {[ j ^ {\\star} ]} | \\tag {20}", + "image_path": "1a7157e8d5d073e5670bc5018ad4102402e92c9112df1db4fcdf0dcd38598563.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": "Observe that the minimum margin of this estimator " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}} = \\min_{i\\in [n]}y_i(\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}})^\\top \\tilde{x}_i - \\epsilon_{\\mathrm{tr}}|\\tilde{\\theta}_{[j^* ]}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": " decreases with " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": " as the problem becomes harder " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}\\leq \\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": ", where the latter is equivalent to the margin of " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 0" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "r > 2\\tilde{\\gamma}_{\\max}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": " by assumption in the Theorem, by Lemma A.2 with probability at least " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "1 - 2\\mathbb{E}^{-\\frac{\\alpha^2(d - 1)}{n}}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": ", we then have that " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "r > 2\\tilde{\\gamma}\\geq 2\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": ". Given the closed form of " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": " in Equation 19, it directly follows that " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}_{[1]}^{\\epsilon_{\\mathrm{tr}}} = r > 2\\tilde{\\gamma}^{\\epsilon_{\\mathrm{tr}}}\\| \\tilde{\\theta}^{\\epsilon_{\\mathrm{tr}}}\\| _2 = \\| \\widehat{\\theta}_{[2:d]}^{\\epsilon_{\\mathrm{tr}}}\\| _2" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": " and hence " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "1\\in j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": ". This contradicts the original assumption " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "1\\notin j^{\\star}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": " and hence we established that " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": " for the " + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 104, + 512, + 506, + 607 + ], + "type": "text", + "content": "-perturbation set 9 has the same closed form 14 as for the perturbation set 3." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 612, + 504, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 504, + 637 + ], + "type": "text", + "content": "The final statement is proved by using the analogous steps as in the proof of 1. and 2. to obtain the closed form of the robust accuracy of " + }, + { + "bbox": [ + 104, + 612, + 504, + 637 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 612, + 504, + 637 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 650, + 231, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 650, + 231, + 661 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 231, + 661 + ], + "type": "text", + "content": "A.3 PROOF OF LEMMA A.1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 669, + 269, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 669, + 269, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 669, + 269, + 682 + ], + "type": "text", + "content": "We start by proving that " + }, + { + "bbox": [ + 105, + 669, + 269, + 682 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}" + }, + { + "bbox": [ + 105, + 669, + 269, + 682 + ], + "type": "text", + "content": " is of the form" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 272, + 685, + 505, + 706 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 685, + 505, + 706 + ], + "spans": [ + { + "bbox": [ + 272, + 685, + 505, + 706 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} = \\left[ a _ {1}, a _ {2} \\tilde {\\theta} \\right], \\tag {21}", + "image_path": "5e2c35d19417b700ade151c7bad1c9a807b2c662ec531570413521db731a2af8.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "for " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "a_1, a_2 > 0" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": ". Denote by " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\theta)" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " the plane through the origin with normal " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": ". We define " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "d((x,y), \\mathcal{H}(\\theta))" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " as the signed euclidean distance from the point " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "(x,y) \\in D \\sim \\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " to the plane " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\theta)" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": ". The signed" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": "euclidean distance is the defined as the euclidean distance from " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": " to the plane if the point " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "(x, y)" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": " is correctly predicted by " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": ", and the negative euclidean distance from " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": " to the plane otherwise. We rewrite the definition of the max " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": "-margin classifier. It is the classifier induced by the normalized vector " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": ", such that" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 185, + 133, + 424, + 157 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 133, + 424, + 157 + ], + "spans": [ + { + "bbox": [ + 185, + 133, + 424, + 157 + ], + "type": "interline_equation", + "content": "\\max _ {\\theta \\in \\mathbb {R} ^ {d}} \\min _ {(x, y) \\in D} d \\left((x, y), \\mathcal {H} (\\theta)\\right) = \\min _ {(x, y) \\in D} d \\left(\\left(x, y\\right), \\mathcal {H} (\\widehat {\\theta})\\right).", + "image_path": "92a72dac71d0ce057aa352c86277d0c41819e524dcf7f3a502a214d830daf57a.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 160, + 347, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 160, + 347, + 173 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 347, + 173 + ], + "type": "text", + "content": "We use that " + }, + { + "bbox": [ + 105, + 160, + 347, + 173 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 105, + 160, + 347, + 173 + ], + "type": "text", + "content": " is deterministic in its first coordinate and get" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 177, + 177, + 432, + 222 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 177, + 432, + 222 + ], + "spans": [ + { + "bbox": [ + 177, + 177, + 432, + 222 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\max _ {\\theta} \\min _ {(x, y) \\in D} d \\left((x, y), \\mathcal {H} (\\theta)\\right) = \\max _ {\\theta} \\min _ {(x, y) \\in D} y \\left(\\theta_ {[ 1 ]} x _ {[ 1 ]} + \\tilde {\\theta} ^ {\\top} \\tilde {x}\\right) \\\\ = \\max _ {\\theta} \\theta_ {1} \\frac {r}{2} + \\min _ {(x, y) \\in D} y \\tilde {\\theta} ^ {\\top} \\tilde {x}. \\\\ \\end{array}", + "image_path": "ff1533ed0c1f6ada2c66115ca8fcbcee52021937cfef2e6c7bbeb04467cd3b3d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": "Because " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "r > 0" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": ", the maximum over all " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": " has " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}_{[1]} \\geq 0" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": ". Take any " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "a > 0" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "\\| \\widetilde{\\theta} \\|_2 = a" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": ". By definition the max " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "l_2" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": "-margin classifier, " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "\\widetilde{\\theta}" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": ", maximizes " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "\\min_{(x,y) \\in D} d((x,y), \\mathcal{H}(\\theta))" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}" + }, + { + "bbox": [ + 104, + 228, + 506, + 269 + ], + "type": "text", + "content": " is of the form of Equation 21." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "text", + "content": "Note that all classifiers induced by vectors of the form of Equation 21 classify " + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "text", + "content": " correctly. Next, we aim to find expressions for " + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "inline_equation", + "content": "a_1" + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "inline_equation", + "content": "a_2" + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "text", + "content": " such that Equation 21 is the normalized max " + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "inline_equation", + "content": "l_2" + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "text", + "content": "-margin classifier. The distance from any " + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "inline_equation", + "content": "x \\in D" + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\widehat{\\theta})" + }, + { + "bbox": [ + 104, + 273, + 506, + 310 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 233, + 314, + 376, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 314, + 376, + 335 + ], + "spans": [ + { + "bbox": [ + 233, + 314, + 376, + 335 + ], + "type": "interline_equation", + "content": "d \\left(x, \\mathcal {H} (\\widehat {\\theta})\\right) = \\left| a _ {1} x _ {[ 1 ]} + a _ {2} \\tilde {\\theta} ^ {\\top} \\tilde {x} \\right|.", + "image_path": "4aeddecb6629559d9d22165b1ccba42831cad1605319430abb52e89d6e30f4db.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 340, + 414, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 340, + 414, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 414, + 360 + ], + "type": "text", + "content": "Using that " + }, + { + "bbox": [ + 105, + 340, + 414, + 360 + ], + "type": "inline_equation", + "content": "x_{[1]} = y^{\\frac{r}{2}}" + }, + { + "bbox": [ + 105, + 340, + 414, + 360 + ], + "type": "text", + "content": " and that the second term equals " + }, + { + "bbox": [ + 105, + 340, + 414, + 360 + ], + "type": "inline_equation", + "content": "a_2 d\\left(x, \\mathcal{H}(\\tilde{\\theta})\\right)" + }, + { + "bbox": [ + 105, + 340, + 414, + 360 + ], + "type": "text", + "content": ", we get" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 156, + 364, + 504, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 364, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 156, + 364, + 504, + 387 + ], + "type": "interline_equation", + "content": "\\left. d \\left(x, \\mathcal {H} (\\hat {\\theta})\\right) = \\left| a _ {1} \\frac {r}{2} + a _ {2} d \\left(x, \\mathcal {H} (\\tilde {\\theta})\\right) \\right| = a _ {1} \\frac {r}{2} + \\sqrt {1 - a _ {1} ^ {2}} d \\left(x, \\mathcal {H} (\\tilde {\\theta})\\right). \\right. \\tag {22}", + "image_path": "7c199869e1ca2e6a99b6171957c1d44f2d4c65f830b35dd474f78239f17b063e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "(\\tilde{x},y)\\in \\widetilde{D}" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": " be the point closest in Euclidean distance to " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": ". This point is also the closest point in Euclidean distance to " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\widehat{\\theta})" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": ", because by Equation 22 " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "d\\left(x,\\mathcal{H}(\\widehat{\\theta})\\right)" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": " is strictly decreasing for decreasing " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "d\\left(x,\\mathcal{H}(\\tilde{\\theta})\\right)" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": ". We maximize the minimum margin " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "d\\left(x,\\mathcal{H}(\\widehat{\\theta})\\right)" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": " with respect to " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "a_1" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": ". Define the vectors " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "a = [a_1,a_2]" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "inline_equation", + "content": "v = \\left[\\frac{r}{2},d\\left(x,\\mathcal{H}(\\tilde{\\theta})\\right)\\right]" + }, + { + "bbox": [ + 104, + 392, + 504, + 462 + ], + "type": "text", + "content": ". We find using the dual norm that" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 281, + 466, + 328, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 466, + 328, + 491 + ], + "spans": [ + { + "bbox": [ + 281, + 466, + 328, + 491 + ], + "type": "interline_equation", + "content": "a = \\frac {v}{\\| v \\| _ {2}}.", + "image_path": "97c2151da435e631613af7fdaa1045d34839339f07dbd98eebb21a1bf4fe433f.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 495, + 390, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 390, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 390, + 509 + ], + "type": "text", + "content": "Plugging the expression of " + }, + { + "bbox": [ + 105, + 495, + 390, + 509 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 105, + 495, + 390, + 509 + ], + "type": "text", + "content": " into Equation 21 yields that " + }, + { + "bbox": [ + 105, + 495, + 390, + 509 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}" + }, + { + "bbox": [ + 105, + 495, + 390, + 509 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 249, + 513, + 361, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 513, + 361, + 540 + ], + "spans": [ + { + "bbox": [ + 249, + 513, + 361, + 540 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} = \\frac {1}{\\sqrt {r ^ {2} + 4 \\widetilde {\\gamma} ^ {2}}} \\left[ r, 2 \\widetilde {\\gamma} \\widetilde {\\theta} \\right].", + "image_path": "b1a81cf44a32d3ba33f76552c220ca20c23e24733dc521fdbe2ead2be8de220f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 549, + 320, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 549, + 320, + 562 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 320, + 562 + ], + "type": "text", + "content": "For the second part of the lemma we first decompose" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 140, + 566, + 470, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 566, + 470, + 590 + ], + "spans": [ + { + "bbox": [ + 140, + 566, + 470, + 590 + ], + "type": "interline_equation", + "content": "\\mathbb {P} _ {r _ {\\mathrm {t e s t}}} (Y \\widehat {\\theta} ^ {\\top} X > 0) = \\frac {1}{2} \\mathbb {P} _ {r _ {\\mathrm {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X > 0 \\mid Y = 1 \\right] + \\frac {1}{2} \\mathbb {P} _ {r _ {\\mathrm {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X < 0 \\mid Y = - 1 \\right]", + "image_path": "07816f81ac31650ddb1fc9f6e0b836f5072f71f41e24671786ce3a642c256367.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 592, + 190, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 190, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 190, + 604 + ], + "type": "text", + "content": "We can further write" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 161, + 607, + 504, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 607, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 161, + 607, + 504, + 704 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X > 0 \\mid Y = 1 \\right] = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\sum_ {i = 2} ^ {d} \\widehat {\\theta} _ {[ i ]} X _ {[ i ]} > - \\widehat {\\theta} _ {[ 1 ]} X _ {[ 1 ]} \\mid Y = 1 \\right] \\tag {23} \\\\ = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ 2 \\tilde {\\gamma} \\sum_ {i = 1} ^ {d - 1} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} > - r \\frac {r _ {\\text {t e s t}}}{2} \\mid Y = 1 \\right] \\\\ = 1 - \\Phi \\left(- \\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right) = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\tilde {\\gamma}}\\right) \\\\ \\end{array}", + "image_path": "8cfa54ee7bce515c81bdd4443f17503b57788ace339fd723638158923aea483a.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 707, + 506, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 707, + 506, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 707, + 506, + 735 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 707, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 707, + 506, + 735 + ], + "type": "text", + "content": " is the cumulative distribution function. The second equality follows by multiplying by the normalization constant on both sides and the third equality is due to the fact that " + }, + { + "bbox": [ + 104, + 707, + 506, + 735 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{d-1} \\tilde{\\theta}_{[i]} X_{[i]}" + }, + { + "bbox": [ + 104, + 707, + 506, + 735 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 105 + ], + "type": "text", + "content": "a zero-mean Gaussian with variance " + }, + { + "bbox": [ + 104, + 81, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\sigma^2\\|\\tilde{\\theta}\\|_2^2 = \\sigma^2" + }, + { + "bbox": [ + 104, + 81, + 504, + 105 + ], + "type": "text", + "content": " since " + }, + { + "bbox": [ + 104, + 81, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\tilde{\\theta}" + }, + { + "bbox": [ + 104, + 81, + 504, + 105 + ], + "type": "text", + "content": " is normalized. Correspondingly we can write" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 113, + 110, + 505, + 155 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 110, + 505, + 155 + ], + "spans": [ + { + "bbox": [ + 113, + 110, + 505, + 155 + ], + "type": "interline_equation", + "content": "\\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ \\widehat {\\theta} ^ {\\top} X < 0 \\mid Y = - 1 \\right] = \\mathbb {P} _ {r _ {\\text {t e s t}}} \\left[ 2 \\widetilde {\\gamma} \\sum_ {i = 1} ^ {d - 1} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} < - r \\left(- \\frac {r _ {\\text {t e s t}}}{2}\\right) \\mid Y = - 1 \\right] = \\Phi \\left(\\frac {r r _ {\\text {t e s t}}}{4 \\sigma \\widetilde {\\gamma}}\\right) \\tag {24}", + "image_path": "9ec2fdfae4dc0ae89052c3da46c4c96a987dedde206300f70ef953ad7ac8da67.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 164, + 504, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 164, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 164, + 504, + 194 + ], + "type": "text", + "content": "so that we can combine 23 and 23 and 24 to obtain " + }, + { + "bbox": [ + 104, + 164, + 504, + 194 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_{r_{\\mathrm{test}}}(Y\\widehat{\\theta}^{\\top}X > 0) = \\Phi \\left(\\frac{r r_{\\mathrm{test}}}{4\\sigma\\widetilde{\\gamma}}\\right)" + }, + { + "bbox": [ + 104, + 164, + 504, + 194 + ], + "type": "text", + "content": ". This concludes the proof of the lemma." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 207, + 233, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 207, + 233, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 233, + 218 + ], + "type": "text", + "content": "A.4 PROOF OF LEMMA A.2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "type": "text", + "content": "The proof plan is as follows. We start from the definition of the max " + }, + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "type": "text", + "content": "-margin of a dataset. Then, we rewrite the max " + }, + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "type": "text", + "content": "-margin as an expression that includes a random matrix with independent standard normal entries. This allows us to prove the upper and lower bounds for the max-" + }, + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 228, + 506, + 284 + ], + "type": "text", + "content": "-margin in Sections A.4.1 and A.4.2 respectively, using non-asymptotic estimates on the singular values of Gaussian random matrices." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 289, + 375, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 375, + 302 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 375, + 302 + ], + "type": "text", + "content": "Given the dataset " + }, + { + "bbox": [ + 104, + 289, + 375, + 302 + ], + "type": "inline_equation", + "content": "\\widetilde{D} = \\{(\\tilde{x}_i, y_i)\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 289, + 375, + 302 + ], + "type": "text", + "content": ", we define the random matrix" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 274, + 308, + 505, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 308, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 274, + 308, + 505, + 357 + ], + "type": "interline_equation", + "content": "X = \\left( \\begin{array}{c} \\tilde {x} _ {1} ^ {\\top} \\\\ \\tilde {x} _ {2} ^ {\\top} \\\\ \\dots \\\\ \\tilde {x} _ {n} ^ {\\top} \\end{array} \\right). \\tag {25}", + "image_path": "1a3776ae5cd962b6fb341a58d4e0f3d2b5b55a1e3843dc562da7949d08222da8.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "inline_equation", + "content": "\\tilde{x}_i\\sim \\mathcal{N}(0,\\sigma I_{d - 1})" + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "content": " be the class of all perfect predictors of " + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "inline_equation", + "content": "\\widetilde{D}" + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "content": ". For a matrix " + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "content": " and vector " + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "content": " we also denote by " + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "inline_equation", + "content": "|Ab|" + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "content": " the vector whose entries correspond to the absolute values of the entries of " + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "inline_equation", + "content": "Ab" + }, + { + "bbox": [ + 104, + 365, + 506, + 400 + ], + "type": "text", + "content": ". Then, by definition" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 188, + 407, + 505, + 427 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 407, + 505, + 427 + ], + "spans": [ + { + "bbox": [ + 188, + 407, + 505, + 427 + ], + "type": "interline_equation", + "content": "\\tilde {\\gamma} = \\max _ {v \\in \\mathcal {V}, \\| v \\| _ {2} = 1} \\min _ {j \\in [ n ]} | X v | _ {[ j ]} = \\max _ {v \\in \\mathcal {V}, \\| v \\| _ {2} = 1} \\min _ {j \\in [ n ]} \\sigma | Q v | _ {[ j ]}, \\tag {26}", + "image_path": "0929a6586f04778de9c1a3dc9048e8122490603fc2db867b8758c7d9bd2d24a3.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 434, + 275, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 275, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 275, + 448 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 434, + 275, + 448 + ], + "type": "inline_equation", + "content": "Q = \\frac{1}{\\sigma} X" + }, + { + "bbox": [ + 104, + 434, + 275, + 448 + ], + "type": "text", + "content": " is the scaled data matrix." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 453, + 386, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 386, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 386, + 466 + ], + "type": "text", + "content": "In the sequel we will use the operator norm of a matrix " + }, + { + "bbox": [ + 104, + 453, + 386, + 466 + ], + "type": "inline_equation", + "content": "A \\in \\mathbb{R}^{n \\times d - 1}" + }, + { + "bbox": [ + 104, + 453, + 386, + 466 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 242, + 472, + 367, + 494 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 472, + 367, + 494 + ], + "spans": [ + { + "bbox": [ + 242, + 472, + 367, + 494 + ], + "type": "interline_equation", + "content": "\\| A \\| _ {2} = \\sup _ {v \\in \\mathbb {R} ^ {d - 1} | \\| v \\| _ {2} = 1} \\| A v \\| _ {2}", + "image_path": "7b628b972fc05ec570faca165dce75a0b6606d84fbb0fa24473f5cf8f50742bd.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "text", + "content": "and denote the maximum singular value of a matrix " + }, + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "inline_equation", + "content": "s_{\\max}(A)" + }, + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "text", + "content": " and the minimum singular value as " + }, + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "inline_equation", + "content": "s_{\\min}(A)" + }, + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 537, + 205, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 537, + 205, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 537, + 205, + 548 + ], + "type": "text", + "content": "A.4.1 UPPERBOUND" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "type": "text", + "content": "Given the maximality of the operator norm and since the minimum entry of the vector " + }, + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "type": "inline_equation", + "content": "|Qv|" + }, + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "type": "text", + "content": " must be smaller than " + }, + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "type": "inline_equation", + "content": "\\frac{\\|Q\\|_2}{\\sqrt{n}}" + }, + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "type": "text", + "content": ", we can upper bound " + }, + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 557, + 504, + 586 + ], + "type": "text", + "content": " by" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 269, + 592, + 340, + 618 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 592, + 340, + 618 + ], + "spans": [ + { + "bbox": [ + 269, + 592, + 340, + 618 + ], + "type": "interline_equation", + "content": "\\tilde {\\gamma} \\leq \\sigma \\frac {1}{\\sqrt {n}} \\| Q \\| _ {2}.", + "image_path": "e6784723e40944183599f37dad6f23a9f2674084fc1dcd1bbfa3e8d4b47d269b.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 626, + 506, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 506, + 651 + ], + "type": "text", + "content": "Taking the expectation on both sides with respect to the draw of " + }, + { + "bbox": [ + 104, + 626, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\widetilde{D}" + }, + { + "bbox": [ + 104, + 626, + 506, + 651 + ], + "type": "text", + "content": " and noting " + }, + { + "bbox": [ + 104, + 626, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\| Q\\| _2\\leq s_{\\max}(Q)" + }, + { + "bbox": [ + 104, + 626, + 506, + 651 + ], + "type": "text", + "content": ", it follows from Corollary 5.35 of Vershynin (2010) that for all " + }, + { + "bbox": [ + 104, + 626, + 506, + 651 + ], + "type": "inline_equation", + "content": "t\\geq 0" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 204, + 656, + 405, + 678 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 656, + 405, + 678 + ], + "spans": [ + { + "bbox": [ + 204, + 656, + 405, + 678 + ], + "type": "interline_equation", + "content": "\\mathbb {P} \\left[ \\sqrt {d - 1} + \\sqrt {n} + t \\geq s _ {\\max } (Q) \\right] \\geq 1 - 2 e ^ {- \\frac {t ^ {2}}{2}}.", + "image_path": "4a1a68e9d2d53cd321e7216e59e361b2425b6b0aa117dfdf2cecb6d4f23559b1.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 685, + 321, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 321, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 321, + 700 + ], + "type": "text", + "content": "Therefore, with a probability greater than " + }, + { + "bbox": [ + 104, + 685, + 321, + 700 + ], + "type": "inline_equation", + "content": "1 - 2e^{-\\frac{t^2}{2}}" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 246, + 708, + 363, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 708, + 363, + 736 + ], + "spans": [ + { + "bbox": [ + 246, + 708, + 363, + 736 + ], + "type": "interline_equation", + "content": "\\tilde {\\gamma} \\leq \\sigma \\left(1 + \\frac {t + \\sqrt {d - 1}}{\\sqrt {n}}\\right).", + "image_path": "232b04646e884e0ef5953fd26e8219a19af23c32331f662a3d31448e27fe9c4e.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 209, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 209, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 209, + 94 + ], + "type": "text", + "content": "A.4.2 LOWERBOUND" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "text", + "content": "By the definition in Equation 26, if we find a vector " + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "inline_equation", + "content": "v \\in \\mathcal{V}" + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "inline_equation", + "content": "\\| v \\|_2 = 1" + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "text", + "content": " such that for an " + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "inline_equation", + "content": "a > 0" + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "text", + "content": ", it holds that " + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "inline_equation", + "content": "\\min_{j \\in n} \\sigma |Xv|_{[j]} > a" + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma} > a" + }, + { + "bbox": [ + 104, + 101, + 506, + 127 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "content": "Recall the definition of the max-" + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "content": "-margin as in Equation 25. As " + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "inline_equation", + "content": "n < d - 1" + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "content": ", the random matrix " + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "content": " is a wide matrix, i.e. there are more columns than rows and therefore the minimal singular value is 0. Furthermore, " + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "content": " has rank " + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "content": " almost surely and hence for all " + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "inline_equation", + "content": "c > 0" + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "content": ", there exists a " + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "inline_equation", + "content": "v \\in \\mathbb{R}^{d-1}" + }, + { + "bbox": [ + 104, + 131, + 506, + 165 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 264, + 171, + 505, + 184 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 171, + 505, + 184 + ], + "spans": [ + { + "bbox": [ + 264, + 171, + 505, + 184 + ], + "type": "interline_equation", + "content": "\\sigma Q v = 1 _ {\\{n \\}} c > 0, \\tag {27}", + "image_path": "a2e1bad92345a3a9f4aa4dc2b64e62aa38c9d51e697966bfde6902372da9e91e.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "inline_equation", + "content": "1_{\\{n\\}}" + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "content": " denotes the all ones vector of dimension " + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "content": ". The smallest non-zero singular value of " + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "inline_equation", + "content": "s_{\\min, \\text{nonzero}}(Q)" + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "content": ", equals the smallest non-zero singular value of its transpose " + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "inline_equation", + "content": "Q^{\\top}" + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "content": ". Therefore, there also exists a " + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "inline_equation", + "content": "v \\in \\mathcal{V}" + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "inline_equation", + "content": "\\| v \\|_2 = 1" + }, + { + "bbox": [ + 104, + 189, + 506, + 225 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 211, + 230, + 505, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 230, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 211, + 230, + 505, + 255 + ], + "type": "interline_equation", + "content": "\\tilde {\\gamma} \\geq \\min _ {j \\in [ n ]} \\sigma | Q v | _ {[ j ]} \\geq \\sigma s _ {\\min , \\text {n o n z e r o s}} \\left(Q ^ {\\top}\\right) \\frac {1}{\\sqrt {n}}, \\tag {28}", + "image_path": "7883413088d6291c9c4f03a2b7d017e7fb3dc5110960ff4ec12417decf1a34f9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "text", + "content": "where we used the fact that any vector " + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "text", + "content": " in the span of non-zero eigenvectors satisfies " + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "inline_equation", + "content": "\\| Qv\\| _2\\geq s_{\\min, \\text{nonzeros}}(Q)" + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "text", + "content": " and the existence of a solution " + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "text", + "content": " for any right-hand side as in Equation 27. Taking the expectation on both sides, Corollary 5.35 of Vershynin (2010) yields that with a probability greater than " + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "inline_equation", + "content": "1 - 2e^{-\\frac{t^2}{2}}" + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "inline_equation", + "content": "t\\geq 0" + }, + { + "bbox": [ + 104, + 261, + 505, + 309 + ], + "type": "text", + "content": " we have" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 246, + 316, + 505, + 343 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 316, + 505, + 343 + ], + "spans": [ + { + "bbox": [ + 246, + 316, + 505, + 343 + ], + "type": "interline_equation", + "content": "\\tilde {\\gamma} \\geq \\sigma \\left(\\frac {\\sqrt {d - 1} - t}{\\sqrt {n}} - 1\\right). \\tag {29}", + "image_path": "09dc0b1fabfb1ccde809da67bf1e0bb49cdf819cbc69e7bd3831ae55362f7dd1.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 356, + 339, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 356, + 339, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 339, + 369 + ], + "type": "text", + "content": "B BOUNDS ON THE SUSCEPTIBILITY SCORE" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "text", + "content": "In Theorem 3.1, we give non-asymptotic bounds on the robust and standard error of a linear classifier trained with adversarial logistic regression. Moreover, we use the robust error decomposition in susceptibility and standard error to gain intuition about how adversarial training may hurt robust generalization. In this section, we complete the result of Theorem 3.1 by also deriving non-asymptotic bounds on the susceptibility score of the max " + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "text", + "content": "-margin classifier." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 441, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 504, + 464 + ], + "type": "text", + "content": "Using the results in Appendix A, we can prove following Corollary B.1, which gives non-asymptotic bounds on the susceptibility score." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "text", + "content": "Corollary B.1. Assume " + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "inline_equation", + "content": "d - 1 > n" + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "text", + "content": ". For the " + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\epsilon_{te}" + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "text", + "content": "-susceptibility on test samples from " + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "inline_equation", + "content": "2\\epsilon_{te} < r" + }, + { + "bbox": [ + 104, + 467, + 504, + 491 + ], + "type": "text", + "content": " and perturbation sets in Equation equation 3 and equation 9 the following holds:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "content": "For " + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "inline_equation", + "content": "\\epsilon_{tr} < \\frac{r}{2} - \\tilde{\\gamma}_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "content": ", with probability at least " + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "inline_equation", + "content": "1 - 2\\mathbb{E}^{-\\frac{\\alpha^2(d - 1)}{2}}" + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "inline_equation", + "content": "0 < \\alpha < 1" + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "content": ", over the draw of a dataset " + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "content": " samples from " + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_r" + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "content": ", the " + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "inline_equation", + "content": "\\epsilon_{te}" + }, + { + "bbox": [ + 104, + 495, + 506, + 523 + ], + "type": "text", + "content": "-susceptibility is upper and lower bounded by" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 158, + 527, + 504, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 527, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 158, + 527, + 504, + 559 + ], + "type": "interline_equation", + "content": "\\operatorname {S u s c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}\\right) \\leq \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (\\epsilon_ {t e} - \\frac {r}{2})}{2 \\widetilde {\\gamma} _ {\\max } \\sigma}\\right) - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (- \\epsilon_ {t e} - \\frac {r}{2})}{2 \\widetilde {\\gamma} _ {\\min } \\sigma}\\right) \\tag {30}", + "image_path": "158f3cf01c14ff01f2eb25c93ae0e5f17ed40f8821603416e123721c726ce343.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 160, + 556, + 449, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 556, + 449, + 583 + ], + "spans": [ + { + "bbox": [ + 160, + 556, + 449, + 583 + ], + "type": "interline_equation", + "content": "S u s c (\\widehat {\\theta} ^ {\\epsilon_ {t r}}; \\epsilon_ {t e}) \\geq \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (\\epsilon_ {t e} - \\frac {r}{2})}{2 \\tilde {\\gamma} _ {\\min} \\sigma}\\right) - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {t r}) (- \\epsilon_ {t e} - \\frac {r}{2})}{2 \\tilde {\\gamma} _ {\\max} \\sigma}\\right)", + "image_path": "109d5ed499db8fd4c21e2ba52400585b6be572d3693a28c63d891c5ec1758004.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "content": "We give the proof in Subsection B.1. Observe that the bounds on the susceptibility score in Corollary B.1 consist of two terms each, where the second term decreases with " + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "content": ", but the first term increases. We recognise following two regimes: the max " + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "content": "-margin classifier is close to the ground truth " + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "inline_equation", + "content": "e_1" + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "content": " or not. Clearly, the ground truth classifier has zero susceptibility and hence classifiers close to the ground truth also have low susceptibility. On the other hand, if the max " + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "inline_equation", + "content": "l_2" + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "content": "-margin classifier is not close to the ground truth, then putting less weight on the first coordinate increases invariance to the perturbations along the first direction. Recall that by Lemma A.1, increasing " + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "content": ", decreases the weight on the first coordinate of the max " + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "content": "-margin classifier. Furthermore, in the low sample size regime, we are likely not close to the ground truth. Therefore, the regime where the susceptibility decreases with increasing " + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 594, + 506, + 704 + ], + "type": "text", + "content": " dominates in the low sample size regime." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": "To confirm the result of Corollary B.1, we plot the mean and standard deviation of the susceptibility score of 5 independent experiments. The results are depicted in Figure 7. We see that for low standard" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": "error, when the classifier is reasonably close to the optimal classifier, the susceptibility increases slightly with increasing adversarial budget. However, increasing the adversarial training budget, " + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": ", further, causes the susceptibility score to drop greatly. Hence, we can recognize both regimes and validate that, indeed, the second regime dominates in the low sample size setting." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 140, + 250, + 151 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 250, + 151 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 250, + 151 + ], + "type": "text", + "content": "B.1 PROOF OF COROLLARY B.1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 160, + 504, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 160, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 504, + 194 + ], + "type": "text", + "content": "We proof the statement by bounding the robustness of a linear classifier. Recall that the robustness of a classifier is the probability that a classifier does not change its prediction under an adversarial attack. The susceptibility score is then given by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 234, + 199, + 504, + 214 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 199, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 234, + 199, + 504, + 214 + ], + "type": "interline_equation", + "content": "\\operatorname {S u s c} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = 1 - \\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right). \\tag {31}", + "image_path": "586b03f04a28c8cb6d87d70f94facd7b241aa5beef5c32aa9f042dba2d8543b7.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": "The proof idea is as follows: since the perturbations are along the first basis direction, " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "e_1" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": ", we compute the distance from the robust " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "l_2" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": "-max margin " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": " to a point " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "(X,Y)\\sim \\mathbb{P}" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": ". Then, we note that the robustness of " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": " is given by the probability that the distance along " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "e_1" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": ", from " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": " to the decision plane induced by " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": " is greater than " + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 224, + 504, + 276 + ], + "type": "text", + "content": ". Lastly, we use the non-asymptotic bounds of Lemma A.2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 281, + 384, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 281, + 384, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 384, + 293 + ], + "type": "text", + "content": "Recall, by Lemma A.1, the max " + }, + { + "bbox": [ + 105, + 281, + 384, + 293 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 105, + 281, + 384, + 293 + ], + "type": "text", + "content": "-margin classifier is of the form of" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 214, + 297, + 503, + 324 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 297, + 503, + 324 + ], + "spans": [ + { + "bbox": [ + 214, + 297, + 503, + 324 + ], + "type": "interline_equation", + "content": "\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}} = \\frac {1}{\\sqrt {(r - 2 \\epsilon_ {\\mathrm {t r}}) ^ {2} + 4 \\tilde {\\gamma} ^ {2}}} \\left[ r - 2 \\epsilon_ {\\mathrm {t r}}, 2 \\tilde {\\gamma} \\tilde {\\theta} \\right]. \\tag {32}", + "image_path": "3d7d9c0280f34035c4bda6839a13e4da39ba7d1367d70048c8985603bd75b786.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "inline_equation", + "content": "(X,Y)\\sim \\mathbb{P}" + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "text", + "content": ". The distance along " + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "inline_equation", + "content": "e_1" + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "text", + "content": " to the decision plane induced by " + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{H}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})" + }, + { + "bbox": [ + 104, + 329, + 504, + 354 + ], + "type": "text", + "content": ", is given by" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 213, + 354, + 397, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 354, + 397, + 387 + ], + "spans": [ + { + "bbox": [ + 213, + 354, + 397, + 387 + ], + "type": "interline_equation", + "content": "d _ {e _ {1}} (X, \\mathcal {H} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}})) = \\left| X _ {[ 1 ]} + \\frac {1}{\\widehat {\\theta} _ {[ 0 ]} ^ {\\epsilon_ {\\mathrm {t r}}}} \\sum_ {i = 2} ^ {d} \\widehat {\\theta} _ {[ i ]} ^ {\\epsilon_ {\\mathrm {t r}}} X _ {[ i ]} \\right|.", + "image_path": "6d646db27a8b912af86ae3cdd4a39f1d58ec096a52610292b2bc0f4df8f938b4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 391, + 330, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 391, + 330, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 391, + 330, + 403 + ], + "type": "text", + "content": "Substituting the expression of " + }, + { + "bbox": [ + 105, + 391, + 330, + 403 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 105, + 391, + 330, + 403 + ], + "type": "text", + "content": " in Equation 32 yields" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 198, + 409, + 411, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 409, + 411, + 441 + ], + "spans": [ + { + "bbox": [ + 198, + 409, + 411, + 441 + ], + "type": "interline_equation", + "content": "d _ {e _ {1}} (X, \\mathcal {H} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}})) = \\left| X _ {[ 1 ]} + 2 \\tilde {\\gamma} \\frac {1}{(r - \\epsilon_ {\\mathrm {t r}})} \\sum_ {i = 2} ^ {d} \\tilde {\\theta} _ {[ i ]} X _ {[ i ]} \\right|.", + "image_path": "99860d9119356071a5ad7ab53214b589c733fdc98d47ee41859593fbcf7a6f46.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 448, + 504, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 504, + 471 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 448, + 504, + 471 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 448, + 504, + 471 + ], + "type": "text", + "content": " be a standard normal distributed random variable. By definition " + }, + { + "bbox": [ + 104, + 448, + 504, + 471 + ], + "type": "inline_equation", + "content": "\\| \\tilde{\\theta}\\| _2^2 = 1" + }, + { + "bbox": [ + 104, + 448, + 504, + 471 + ], + "type": "text", + "content": " and using that a sum of Gaussian random variables is again a Gaussian random variable, we can write" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 216, + 475, + 392, + 502 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 475, + 392, + 502 + ], + "spans": [ + { + "bbox": [ + 216, + 475, + 392, + 502 + ], + "type": "interline_equation", + "content": "d _ {e _ {1}} \\left(X, \\mathcal {H} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}\\right)\\right) = \\left| X _ {[ 1 ]} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{\\left(r - \\epsilon_ {\\mathrm {t r}}\\right)} N \\right|.", + "image_path": "2b42ec3d8a84a95fc3ef65a0c5794589018b4dd64f5d2bebc43e142dcf5791c9.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "text", + "content": "The robustness of " + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "inline_equation", + "content": "\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}}" + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "text", + "content": " is given by the probability that " + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "inline_equation", + "content": "d_{e_1}(X,\\mathcal{H}(\\widehat{\\theta}^{\\epsilon_{\\mathrm{tr}}})) > \\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "text", + "content": ". Hence, using that " + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "inline_equation", + "content": "X_{1} = \\pm \\frac{r}{2}" + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "text", + "content": " with probability " + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}" + }, + { + "bbox": [ + 104, + 507, + 504, + 533 + ], + "type": "text", + "content": ", we get" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 537, + 504, + 566 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 537, + 504, + 566 + ], + "spans": [ + { + "bbox": [ + 138, + 537, + 504, + 566 + ], + "type": "interline_equation", + "content": "\\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = P \\left[ \\frac {r}{2} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{(r - 2 \\epsilon_ {\\mathrm {t r}})} N > \\epsilon_ {\\mathrm {t e}} \\right] + P \\left[ \\frac {r}{2} + 2 \\widetilde {\\gamma} \\frac {\\sigma}{(r - \\epsilon_ {\\mathrm {t r}})} N < - \\epsilon_ {\\mathrm {t e}} \\right]. \\tag {33}", + "image_path": "fa75c957e4162767990c179e1be16503733ba10ed0ee0d1f041bccc800ca64e4.jpg" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 145, + 582, + 304, + 677 + ], + "blocks": [ + { + "bbox": [ + 145, + 582, + 304, + 677 + ], + "lines": [ + { + "bbox": [ + 145, + 582, + 304, + 677 + ], + "spans": [ + { + "bbox": [ + 145, + 582, + 304, + 677 + ], + "type": "image", + "image_path": "9f8cac3f607343a7145a010ec398b9228e234301d7eb198f39a7b1e1cb4989d2.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 682, + 299, + 693 + ], + "lines": [ + { + "bbox": [ + 149, + 682, + 299, + 693 + ], + "spans": [ + { + "bbox": [ + 149, + 682, + 299, + 693 + ], + "type": "text", + "content": "(a) Susceptibility score decreases with " + }, + { + "bbox": [ + 149, + 682, + 299, + 693 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 307, + 582, + 466, + 678 + ], + "blocks": [ + { + "bbox": [ + 307, + 582, + 466, + 678 + ], + "lines": [ + { + "bbox": [ + 307, + 582, + 466, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 582, + 466, + 678 + ], + "type": "image", + "image_path": "02ed5f16994099a05932f6d9ffce85ac74e4f13a1e9351bd0d56039b5cbc740e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 682, + 443, + 693 + ], + "lines": [ + { + "bbox": [ + 328, + 682, + 443, + 693 + ], + "spans": [ + { + "bbox": [ + 328, + 682, + 443, + 693 + ], + "type": "text", + "content": "(b) Robust error decomposition" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "lines": [ + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "spans": [ + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "text", + "content": "Figure 7: We set " + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "inline_equation", + "content": "r = 6" + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "inline_equation", + "content": "d = 1000" + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "inline_equation", + "content": "n = 50" + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 2.5" + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "text", + "content": ". (a) The average susceptibility score and the standard deviation over 5 independent experiments. Note how the bounds closely predict the susceptibility score. (b) For comparison, we also plot the robust error decomposition in susceptibility and standard error. Even though the susceptibility decreases, the robust error increases with increasing adversarial budget " + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 702, + 505, + 744 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 267, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 267, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 267, + 95 + ], + "type": "text", + "content": "We can rewrite Equation 33 in the form" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 139, + 102, + 471, + 129 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 102, + 471, + 129 + ], + "spans": [ + { + "bbox": [ + 139, + 102, + 471, + 129 + ], + "type": "interline_equation", + "content": "\\operatorname {R o b} \\left(\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}\\right) = P \\left[ N > \\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) \\left(\\epsilon_ {\\mathrm {t e}} - \\frac {r}{2}\\right)}{2 \\widetilde {\\gamma} \\sigma} \\right] + P \\left[ N < \\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) \\left(- \\epsilon_ {\\mathrm {t e}} - \\frac {r}{2}\\right)}{2 \\widetilde {\\gamma} \\sigma} \\right].", + "image_path": "94aab86200121d869edecb0c676f329c6a261cf03371969033076d9ddffeb983.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 137, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 137, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 137, + 504, + 159 + ], + "type": "text", + "content": "Recall, that " + }, + { + "bbox": [ + 104, + 137, + 504, + 159 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 137, + 504, + 159 + ], + "type": "text", + "content": " is a standard normal distributed random variable and denote by " + }, + { + "bbox": [ + 104, + 137, + 504, + 159 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 137, + 504, + 159 + ], + "type": "text", + "content": " the cumulative standard normal density. By definition of the cumulative density function, we find that" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 149, + 167, + 460, + 195 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 167, + 460, + 195 + ], + "spans": [ + { + "bbox": [ + 149, + 167, + 460, + 195 + ], + "type": "interline_equation", + "content": "\\mathrm {R o b} (\\widehat {\\theta} ^ {\\epsilon_ {\\mathrm {t r}}}; \\epsilon_ {\\mathrm {t e}}) = 1 - \\Phi \\left(\\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) (\\epsilon_ {\\mathrm {t e}} - \\frac {r}{2})}{2 \\widetilde {\\gamma} \\sigma}\\right) + \\Phi \\left(\\frac {(r - 2 \\epsilon_ {\\mathrm {t r}}) (- \\epsilon_ {\\mathrm {t e}} - \\frac {r}{2})}{2 \\widetilde {\\gamma} \\sigma}\\right).", + "image_path": "9d5394f268ee39434b9a54f31ad9db2b250191e808520a41476fb26ebcde9f85.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 201, + 504, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 201, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 504, + 225 + ], + "type": "text", + "content": "Substituting the bounds on " + }, + { + "bbox": [ + 104, + 201, + 504, + 225 + ], + "type": "inline_equation", + "content": "\\tilde{\\gamma}" + }, + { + "bbox": [ + 104, + 201, + 504, + 225 + ], + "type": "text", + "content": " of Lemma A.2 gives us the non-asymptotic bounds on the robustness score and by Equation 31 also on the susceptibility score." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 243, + 380, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 243, + 380, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 380, + 255 + ], + "type": "text", + "content": "C EXPERIMENTAL DETAILS ON THE LINEAR MODEL" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 270, + 408, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 408, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 408, + 282 + ], + "type": "text", + "content": "In this section, we provide detailed experimental details to Figures 3 and 4." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "text", + "content": "We implement adversarial logistic regression using stochastic gradient descent with a learning rate of 0.01. Note that logistic regression converges logarithmically to the robust max " + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "text", + "content": "-margin solution. As a consequence of the slow convergence, we train for up to " + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "inline_equation", + "content": "10^{7}" + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "text", + "content": " epochs. Both during training and test time we solve " + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\max_{x_i' \\in T(x_i; \\epsilon_{\\mathrm{tr}})} L(f_\\theta(x_i') y_i)" + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "text", + "content": " exactly. Hence, we exactly measure the robust error. Unless specified otherwise, we set " + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\sigma = 1" + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "inline_equation", + "content": "r = 12" + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 4" + }, + { + "bbox": [ + 104, + 286, + 506, + 344 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": "Experimental details on Figure 3 (a) We draw 5 datasets with " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "n = 50" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": " samples and input dimension " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "d = 1000" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": " from the distribution " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "\\mathbb{P}" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": ". We then run adversarial logistic regression on all 5 datasets with adversarial training budgets, " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 1" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": " to 5. To compute the resulting robust error gap of all the obtained classifiers, we use a test set of size " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "10^{6}" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": ". Lastly, we compute the lower bound given in part 2. of Theorem 3.1. (b) We draw 5 datasets with different sizes " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": " between 50 and " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "10^{4}" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": ". We take an input dimension of " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "d = 10^{4}" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": " and plot the mean and standard deviation of the robust error after adversarial and standard logistic regression over the 5 samples.(c) We again draw 5 datasets for each " + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "inline_equation", + "content": "d / n" + }, + { + "bbox": [ + 104, + 357, + 506, + 446 + ], + "type": "text", + "content": " constellation and compute the robust error gap for each dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "text", + "content": "Experimental details on Figure 4 For both (a) and (b) we set " + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "inline_equation", + "content": "d = 1000" + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 4" + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "text", + "content": ", and vary the adversarial training budget " + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "inline_equation", + "content": "(\\epsilon_{\\mathrm{tr}})" + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "text", + "content": " from 1 to 5. For every constellation of " + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "text", + "content": ", we draw 10 datasets and show the average and standard deviation of the resulting robust errors. In (b), we set " + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "inline_equation", + "content": "n = 50" + }, + { + "bbox": [ + 104, + 460, + 504, + 495 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 513, + 418, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 513, + 418, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 418, + 526 + ], + "type": "text", + "content": "D EXPERIMENTAL DETAILS ON THE WATERBIRDS DATASET" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 539, + 505, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 505, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 505, + 586 + ], + "type": "text", + "content": "In this section, we discuss the experimental details and construction of the Waterbirds dataset in more detail. We also provide ablation studies of attack parameters such as the size of the motion blur kernel, plots of the robust error decomposition with increasing " + }, + { + "bbox": [ + 104, + 539, + 505, + 586 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 539, + 505, + 586 + ], + "type": "text", + "content": ", and some experiments using early stopping." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 600, + 251, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 600, + 251, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 600, + 251, + 611 + ], + "type": "text", + "content": "D.1 THE WATERBIRDS DATASET" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 621, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 507, + 733 + ], + "type": "text", + "content": "To build the Waterbirds dataset, we use the CUB-200 dataset Welinder et al. (2010), which contains images and labels of 200 bird species, and 4 background classes (forest, jungle/bamboo, water ocean, water lake natural) of the Places dataset Zhou et al. (2017). The aim is to recognize whether or not the bird, in a given image, is a waterbird (e.g. an albatros) or a landbird (e.g. a woodpecker). To create the dataset, we randomly sample equally many water- as landbirds from the CUB-200 dataset. Thereafter, we sample for each bird image a random background image. Then, we use the segmentation provided in the CUB-200 dataset to segment the birds from their original images and paste them onto the randomly sampled backgrounds. The resulting images have a size of " + }, + { + "bbox": [ + 104, + 621, + 507, + 733 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 621, + 507, + 733 + ], + "type": "text", + "content": ". Moreover, we also resize the segmentations such that we have the correct segmentation profiles of the birds in the new dataset as well. For the concrete implementation, we use the code provided by Sagawa et al. (2020)." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 79, + 187, + 156 + ], + "blocks": [ + { + "bbox": [ + 111, + 79, + 187, + 156 + ], + "lines": [ + { + "bbox": [ + 111, + 79, + 187, + 156 + ], + "spans": [ + { + "bbox": [ + 111, + 79, + 187, + 156 + ], + "type": "image", + "image_path": "308fa5b96594d839d7a3f2b5ad9586a1aa7a275e5d7209cfcdef6747d2d50ec9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 160, + 171, + 171 + ], + "lines": [ + { + "bbox": [ + 127, + 160, + 171, + 171 + ], + "spans": [ + { + "bbox": [ + 127, + 160, + 171, + 171 + ], + "type": "text", + "content": "(a) Original" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 188, + 79, + 266, + 156 + ], + "blocks": [ + { + "bbox": [ + 188, + 79, + 266, + 156 + ], + "lines": [ + { + "bbox": [ + 188, + 79, + 266, + 156 + ], + "spans": [ + { + "bbox": [ + 188, + 79, + 266, + 156 + ], + "type": "image", + "image_path": "3214b31a4dbf850b77bead8e8eefad8b9b38d23cc6c5b73e696f707a259ce557.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 206, + 160, + 248, + 171 + ], + "lines": [ + { + "bbox": [ + 206, + 160, + 248, + 171 + ], + "spans": [ + { + "bbox": [ + 206, + 160, + 248, + 171 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 206, + 160, + 248, + 171 + ], + "type": "inline_equation", + "content": "M = 5" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 267, + 79, + 343, + 156 + ], + "blocks": [ + { + "bbox": [ + 267, + 79, + 343, + 156 + ], + "lines": [ + { + "bbox": [ + 267, + 79, + 343, + 156 + ], + "spans": [ + { + "bbox": [ + 267, + 79, + 343, + 156 + ], + "type": "image", + "image_path": "20d37da60f42bee0071ee398b1cf254b7295c8d32f8e0b65adbcfd1aea602368.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 160, + 328, + 171 + ], + "lines": [ + { + "bbox": [ + 282, + 160, + 328, + 171 + ], + "spans": [ + { + "bbox": [ + 282, + 160, + 328, + 171 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 282, + 160, + 328, + 171 + ], + "type": "inline_equation", + "content": "M = 10" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "lines": [ + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "type": "text", + "content": "Figure 8: An ablation study of the motion blur kernel size, which corresponds to the severity level of the blur. For increasing " + }, + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "type": "text", + "content": ", the severity of the motion blur increases. In particular, note that for " + }, + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "type": "inline_equation", + "content": "M = 15" + }, + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "type": "text", + "content": " and even " + }, + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "type": "inline_equation", + "content": "M = 20" + }, + { + "bbox": [ + 104, + 181, + 504, + 222 + ], + "type": "text", + "content": ", the bird remains recognizable: we do not semantically change the class, i.e. the perturbations are consistent." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 344, + 80, + 421, + 156 + ], + "blocks": [ + { + "bbox": [ + 344, + 80, + 421, + 156 + ], + "lines": [ + { + "bbox": [ + 344, + 80, + 421, + 156 + ], + "spans": [ + { + "bbox": [ + 344, + 80, + 421, + 156 + ], + "type": "image", + "image_path": "1946f37d32cd5f414b8f93df16b46e47268ff322809cfafd6be88aa9f758013b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 360, + 160, + 405, + 171 + ], + "lines": [ + { + "bbox": [ + 360, + 160, + 405, + 171 + ], + "spans": [ + { + "bbox": [ + 360, + 160, + 405, + 171 + ], + "type": "text", + "content": "(d) " + }, + { + "bbox": [ + 360, + 160, + 405, + 171 + ], + "type": "inline_equation", + "content": "M = 15" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 422, + 80, + 498, + 156 + ], + "blocks": [ + { + "bbox": [ + 422, + 80, + 498, + 156 + ], + "lines": [ + { + "bbox": [ + 422, + 80, + 498, + 156 + ], + "spans": [ + { + "bbox": [ + 422, + 80, + 498, + 156 + ], + "type": "image", + "image_path": "dd41066b90a2b7ef8df0dacd0b2e195f8a7cb2eb31d7ceb7e710e01291e23dd7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 438, + 160, + 484, + 171 + ], + "lines": [ + { + "bbox": [ + 438, + 160, + 484, + 171 + ], + "spans": [ + { + "bbox": [ + 438, + 160, + 484, + 171 + ], + "type": "text", + "content": "(e) " + }, + { + "bbox": [ + 438, + 160, + 484, + 171 + ], + "type": "inline_equation", + "content": "M = 20" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 256, + 279, + 267 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 256, + 279, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 256, + 279, + 267 + ], + "type": "text", + "content": "D.2 EXPERIMENTAL TRAINING DETAILS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 282, + 506, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 506, + 403 + ], + "type": "text", + "content": "Following the example of Sagawa et al. (2020), we use a ResNet50 or ResNet18 pretrained on the ImageNet dataset for all experiments in the main text, a weight-decay of " + }, + { + "bbox": [ + 104, + 282, + 506, + 403 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 104, + 282, + 506, + 403 + ], + "type": "text", + "content": ", and train for 300 epochs using the Adam optimizer. Extensive fine-tuning of the learning rate resulted in an optimal learning rate of 0.006 for all experiments using the adversarial illumination attack and a pretrained ResNet50. For the experiments considering the adversarial illumination attack using a pretrained VGG19 or Densenet121 network, we found optimal learning rates of 0.001 and 0.002 respectively. Lastly, we found that for all experiments using the motion blur attack a learning rate of 0.0011 was optimal. Adversarial training is implemented as suggested in Madry et al. (2018): at each iteration we find the worst case perturbation with an exact or approximate method. In all our experiments, the resulting classifier interpolates the training set. We plot the mean over all runs and the standard deviation of the mean." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 432, + 309, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 432, + 309, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 309, + 443 + ], + "type": "text", + "content": "D.3 SPECIFICS TO THE MOTION BLUR ATTACK" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 504, + 525 + ], + "type": "text", + "content": "Fast moving objects or animals are hard to photograph due to motion blur. Hence, when trying to classify or detect moving objects from images, it is imperative that the classifier is robust against reasonable levels of motion blur. We implement the attack as follows. First, we segment the bird from the original image, then use a blur filter and lastly, we paste the blurred bird back onto the background. We are able to apply more severe blur, by enlarging the kernel of the filter. See Figure 8 for an ablation study of the kernel size." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "text", + "content": "The motion blur filter is implemented as follows. We use a kernel of size " + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "inline_equation", + "content": "M \\times M" + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "text", + "content": " and build the filter as follows: we fill the row " + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "inline_equation", + "content": "(M - 1)/2" + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "text", + "content": " of the kernel with the value " + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "inline_equation", + "content": "1 / M" + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "text", + "content": ". Thereafter, we use the 2D convolution implementation of OpenCV (filter2D) Bradski (2000) to convolve the kernel with the image. Note that applying a rotation before the convolution to the kernel, changes the direction of the resulting motion blur. Lastly, we find the most detrimental level of motion blur using a list-search over all levels up to " + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "inline_equation", + "content": "M_{max}" + }, + { + "bbox": [ + 104, + 530, + 504, + 597 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 108, + 628, + 164, + 684 + ], + "blocks": [ + { + "bbox": [ + 108, + 628, + 164, + 684 + ], + "lines": [ + { + "bbox": [ + 108, + 628, + 164, + 684 + ], + "spans": [ + { + "bbox": [ + 108, + 628, + 164, + 684 + ], + "type": "image", + "image_path": "f614e660b88a78c7880f2ce5e90593675e5a477d53b9332bedc7d55c309f8c42.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 687, + 160, + 697 + ], + "lines": [ + { + "bbox": [ + 111, + 687, + 160, + 697 + ], + "spans": [ + { + "bbox": [ + 111, + 687, + 160, + 697 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 111, + 687, + 160, + 697 + ], + "type": "inline_equation", + "content": "\\epsilon = -0.3" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "type": "text", + "content": "Figure 9: An ablation study of the different lighting changes of the adversarial illumination attack. Even though the directed attack perturbs the signal component in the image, the bird remains recognizable in all cases." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 164, + 628, + 219, + 684 + ], + "blocks": [ + { + "bbox": [ + 164, + 628, + 219, + 684 + ], + "lines": [ + { + "bbox": [ + 164, + 628, + 219, + 684 + ], + "spans": [ + { + "bbox": [ + 164, + 628, + 219, + 684 + ], + "type": "image", + "image_path": "b58a0da6bfacfca42a7873a0c8f5585149d0f514f1621f23d9e4363f6ece74e7.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 168, + 688, + 217, + 697 + ], + "lines": [ + { + "bbox": [ + 168, + 688, + 217, + 697 + ], + "spans": [ + { + "bbox": [ + 168, + 688, + 217, + 697 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 168, + 688, + 217, + 697 + ], + "type": "inline_equation", + "content": "\\epsilon = -0.2" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 221, + 628, + 276, + 684 + ], + "blocks": [ + { + "bbox": [ + 221, + 628, + 276, + 684 + ], + "lines": [ + { + "bbox": [ + 221, + 628, + 276, + 684 + ], + "spans": [ + { + "bbox": [ + 221, + 628, + 276, + 684 + ], + "type": "image", + "image_path": "6c7f4b7890ebdbafdb0111110a3ef64cba4d583f402b03b817f39789467e475d.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 688, + 273, + 698 + ], + "lines": [ + { + "bbox": [ + 225, + 688, + 273, + 698 + ], + "spans": [ + { + "bbox": [ + 225, + 688, + 273, + 698 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 225, + 688, + 273, + 698 + ], + "type": "inline_equation", + "content": "\\epsilon = -0.1" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 277, + 628, + 332, + 684 + ], + "blocks": [ + { + "bbox": [ + 277, + 628, + 332, + 684 + ], + "lines": [ + { + "bbox": [ + 277, + 628, + 332, + 684 + ], + "spans": [ + { + "bbox": [ + 277, + 628, + 332, + 684 + ], + "type": "image", + "image_path": "22d8287926a9871b28773de529a69f7e4249eea73095cb27a7325ea0fc2af986.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 688, + 327, + 698 + ], + "lines": [ + { + "bbox": [ + 282, + 688, + 327, + 698 + ], + "spans": [ + { + "bbox": [ + 282, + 688, + 327, + 698 + ], + "type": "text", + "content": "(d) Original" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 333, + 628, + 389, + 684 + ], + "blocks": [ + { + "bbox": [ + 333, + 628, + 389, + 684 + ], + "lines": [ + { + "bbox": [ + 333, + 628, + 389, + 684 + ], + "spans": [ + { + "bbox": [ + 333, + 628, + 389, + 684 + ], + "type": "image", + "image_path": "193de382e84891a3e7b1626ab841a78eb9eb103df9cc6234bd3c3bc6827efcf9.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 341, + 688, + 381, + 697 + ], + "lines": [ + { + "bbox": [ + 341, + 688, + 381, + 697 + ], + "spans": [ + { + "bbox": [ + 341, + 688, + 381, + 697 + ], + "type": "text", + "content": "(e) " + }, + { + "bbox": [ + 341, + 688, + 381, + 697 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.1" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 390, + 628, + 445, + 684 + ], + "blocks": [ + { + "bbox": [ + 390, + 628, + 445, + 684 + ], + "lines": [ + { + "bbox": [ + 390, + 628, + 445, + 684 + ], + "spans": [ + { + "bbox": [ + 390, + 628, + 445, + 684 + ], + "type": "image", + "image_path": "8e257db8389ee975e1a3b8730315c3d4fc6b9c07b23338ffc622353aa83da60f.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 688, + 438, + 697 + ], + "lines": [ + { + "bbox": [ + 397, + 688, + 438, + 697 + ], + "spans": [ + { + "bbox": [ + 397, + 688, + 438, + 697 + ], + "type": "text", + "content": "(f) " + }, + { + "bbox": [ + 397, + 688, + 438, + 697 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.2" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 446, + 628, + 501, + 684 + ], + "blocks": [ + { + "bbox": [ + 446, + 628, + 501, + 684 + ], + "lines": [ + { + "bbox": [ + 446, + 628, + 501, + 684 + ], + "spans": [ + { + "bbox": [ + 446, + 628, + 501, + 684 + ], + "type": "image", + "image_path": "7e2e802520cbd90b9078ef4c8d1940a88b0111080a9d57a6b25547991daf347c.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 453, + 688, + 495, + 698 + ], + "lines": [ + { + "bbox": [ + 453, + 688, + 495, + 698 + ], + "spans": [ + { + "bbox": [ + 453, + 688, + 495, + 698 + ], + "type": "text", + "content": "(g) " + }, + { + "bbox": [ + 453, + 688, + 495, + 698 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.3" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 79, + 239, + 157 + ], + "blocks": [ + { + "bbox": [ + 112, + 79, + 239, + 157 + ], + "lines": [ + { + "bbox": [ + 112, + 79, + 239, + 157 + ], + "spans": [ + { + "bbox": [ + 112, + 79, + 239, + 157 + ], + "type": "image", + "image_path": "072ea61b93e47129f9eec199a253eb05855f7b3f005bae8ff79fa5d5b3a77b9e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "lines": [ + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "spans": [ + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "type": "text", + "content": "(a) Robust error" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 86, + 367, + 157 + ], + "blocks": [ + { + "bbox": [ + 242, + 86, + 367, + 157 + ], + "lines": [ + { + "bbox": [ + 242, + 86, + 367, + 157 + ], + "spans": [ + { + "bbox": [ + 242, + 86, + 367, + 157 + ], + "type": "image", + "image_path": "7996db4272d7089d55bdfb310602478efd147e10a8f85b3c1c51e28552951112.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "lines": [ + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "spans": [ + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "type": "text", + "content": "(b) Standard error" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 182, + 504, + 233 + ], + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 233 + ], + "type": "text", + "content": "Figure 10: The robust error decomposition of the experiments depicted in Figure 10a. The plots depict the mean and standard deviation of the mean over several independent experiments. We see that, in comparison to standard training, the reduction in susceptibility for adversarial training is minimal in the low sample size regime. Moreover, the increase in standard error of adversarial training is quite severe, leading to an overall increase in robust error in the low sample size regime." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 372, + 80, + 497, + 156 + ], + "blocks": [ + { + "bbox": [ + 372, + 80, + 497, + 156 + ], + "lines": [ + { + "bbox": [ + 372, + 80, + 497, + 156 + ], + "spans": [ + { + "bbox": [ + 372, + 80, + 497, + 156 + ], + "type": "image", + "image_path": "5ceff4dafc9a91a71f450435d67582f561781041746756b7cda66dc823313a9d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "lines": [ + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "spans": [ + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "type": "text", + "content": "(c) Susceptibility" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 254, + 376, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 254, + 376, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 254, + 376, + 264 + ], + "type": "text", + "content": "D.4 SPECIFICS TO THE ADVERSARIAL ILLUMINATION ATTACK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "text", + "content": "An adversary can hide objects using poor lightning conditions, which can for example arise from shadows or bright spots. To model poor lighting conditions on the object only (or targeted to the object), we use the adversarial illumination attack. The attack is constructed as follows: First, we segment the bird from their background. Then we apply an additive constant " + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "text", + "content": " to the bird, where the absolute size of the constant satisfies " + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "inline_equation", + "content": "|\\epsilon| < \\epsilon_{\\mathrm{te}} = 0.3" + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "text", + "content": ". Thereafter, we clip the values of the bird images to " + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "inline_equation", + "content": "[0,1]" + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "text", + "content": ", and lastly, we paste the bird back onto the background. See Figure 9 for an ablation of the parameter " + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "text", + "content": " of the attack. It is non-trivial how to (approximately) find the worst perturbation. We find an approximate solution by searching over all perturbations with increments of size " + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} / K_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "text", + "content": ". Denote by seg, the segmentation profile of the image " + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 274, + 504, + 374 + ], + "type": "text", + "content": ". We consider all perturbed images in the form of" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 154, + 379, + 453, + 404 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 379, + 453, + 404 + ], + "spans": [ + { + "bbox": [ + 154, + 379, + 453, + 404 + ], + "type": "interline_equation", + "content": "x _ {p e r t} = (1 - \\operatorname {s e g}) x + \\operatorname {s e g} \\left(x + \\epsilon \\frac {K}{K _ {\\max}} 1 _ {2 5 5 \\times 2 5 5}\\right), K \\in [ - K _ {\\max }, K _ {\\max } ].", + "image_path": "a4d912e20ce9960f686c8b779d7163fda7b1305c7e97acbda57676e51eb4ce9f.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 408, + 504, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 408, + 504, + 432 + ], + "spans": [ + { + "bbox": [ + 104, + 408, + 504, + 432 + ], + "type": "text", + "content": "During training time we set " + }, + { + "bbox": [ + 104, + 408, + 504, + 432 + ], + "type": "inline_equation", + "content": "K_{max} = 16" + }, + { + "bbox": [ + 104, + 408, + 504, + 432 + ], + "type": "text", + "content": " and therefore search over 33 possible images. During test time we search over 65 images (" + }, + { + "bbox": [ + 104, + 408, + 504, + 432 + ], + "type": "inline_equation", + "content": "K_{max} = 32" + }, + { + "bbox": [ + 104, + 408, + 504, + 432 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 444, + 208, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 444, + 208, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 208, + 455 + ], + "type": "text", + "content": "D.5 EARLY STOPPING" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "type": "text", + "content": "In all our experiments on the Waterbirds dataset, a parameter search lead to an optimal weight-decay and learning rate of " + }, + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "type": "text", + "content": " and 0.006 respectively. Another common regularization technique is early stopping, where one stops training on the epoch where the classifier achieves minimal robust error on a hold-out dataset. To understand if early stopping can mitigate the effect of adversarial training aggregating robust generalization in comparison to standard training, we perform the following experiment. On the Waterbirds dataset of size " + }, + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "type": "inline_equation", + "content": "n = 20" + }, + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "type": "text", + "content": " and considering the adversarial illumination attack, we compare standard training with early stopping and adversarial training " + }, + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "type": "inline_equation", + "content": "(\\epsilon_{\\mathrm{tr}} = \\epsilon_{\\mathrm{te}} = 0.3)" + }, + { + "bbox": [ + 104, + 464, + 504, + 575 + ], + "type": "text", + "content": " with early stopping. Considering several independent experiments, early stopped adversarial training has an average robust error of 33.5 a early stopped standard training 29.1. Hence, early stopping does decrease the robust error gap, but does not close it." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 589, + 327, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 589, + 327, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 589, + 327, + 600 + ], + "type": "text", + "content": "D.6 ERROR DECOMPOSITION WITH INCREASING " + }, + { + "bbox": [ + 105, + 589, + 327, + 600 + ], + "type": "inline_equation", + "content": "n" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 609, + 504, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 676 + ], + "type": "text", + "content": "In Figure 10a and 11a, we see that adversarial training hurts robust generalization in the small sample size regime. For completeness, we plot the robust error composition for adversarial and standard training in Figure 10. We see that in the low sample size regime, the drop in susceptibility that adversarial training achieves in comparison to standard training, is much lower than the increase in standard error. Conversely, in the high sample regime, the drop of susceptibility from adversarial training over standard training is much bigger than the increase in standard error." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 689, + 257, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 257, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 257, + 700 + ], + "type": "text", + "content": "D.7 DIFFERENT ARCHITECTURES" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 709, + 505, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 732 + ], + "type": "text", + "content": "For completeness, we also performed similar experiments on the waterbirds dataset using the adversarial illumination attack with different network architectures as with the pretrained ResNet50" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 79, + 239, + 156 + ], + "blocks": [ + { + "bbox": [ + 112, + 79, + 239, + 156 + ], + "lines": [ + { + "bbox": [ + 112, + 79, + 239, + 156 + ], + "spans": [ + { + "bbox": [ + 112, + 79, + 239, + 156 + ], + "type": "image", + "image_path": "1fa6f9a850b1bbc638d3bfaf586a74d1012c61196e4406d743a533a694153e24.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "lines": [ + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "spans": [ + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "type": "text", + "content": "(a) Robust error" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 79, + 367, + 157 + ], + "blocks": [ + { + "bbox": [ + 242, + 79, + 367, + 157 + ], + "lines": [ + { + "bbox": [ + 242, + 79, + 367, + 157 + ], + "spans": [ + { + "bbox": [ + 242, + 79, + 367, + 157 + ], + "type": "image", + "image_path": "97732f1c42e0117a6f40b172ac296a32fcc2bedddc50450cbf46cff242faf72d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "lines": [ + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "spans": [ + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "type": "text", + "content": "(b) Standard error" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 182, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 232 + ], + "type": "text", + "content": "Figure 11: The robust error decomposition of the experiments depicted in Figure 6. The plots depict the mean and standard deviation of the mean over several independent experiments. We see that, in comparison to standard training, the reduction in susceptibility for adversarial training is minimal in the low sample size regime. Moreover, the increase in standard error of adversarial training is quite severe, leading to an overall increase in robust error in the low sample size regime." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 372, + 81, + 497, + 156 + ], + "blocks": [ + { + "bbox": [ + 372, + 81, + 497, + 156 + ], + "lines": [ + { + "bbox": [ + 372, + 81, + 497, + 156 + ], + "spans": [ + { + "bbox": [ + 372, + 81, + 497, + 156 + ], + "type": "image", + "image_path": "4f86b66d0ee15c4dc58a455fbc261eda21a0b72a5331db55da3b0fc8a4a05f8e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "lines": [ + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "spans": [ + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "type": "text", + "content": "(c) Susceptibility" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "type": "text", + "content": "network. In particular, we considered the following pretrained network architectures: VGG19 and Densenet121. See Figure 12 for the results. We observe that accuracies, adversarial training hurts in the low sample size regime, but helps when enough data is available." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 145, + 298, + 304, + 396 + ], + "blocks": [ + { + "bbox": [ + 145, + 298, + 304, + 396 + ], + "lines": [ + { + "bbox": [ + 145, + 298, + 304, + 396 + ], + "spans": [ + { + "bbox": [ + 145, + 298, + 304, + 396 + ], + "type": "image", + "image_path": "c2545b9ee64e06e61529db60614c35b0feef3a5b2579bf760b328868e3263af0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 399, + 246, + 410 + ], + "lines": [ + { + "bbox": [ + 203, + 399, + 246, + 410 + ], + "spans": [ + { + "bbox": [ + 203, + 399, + 246, + 410 + ], + "type": "text", + "content": "(a) VGG19" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 420, + 504, + 460 + ], + "lines": [ + { + "bbox": [ + 104, + 420, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 420, + 504, + 460 + ], + "type": "text", + "content": "Figure 12: The robust error of adversarial training and standard training with increasing sample size using the adversarial illumination attack with " + }, + { + "bbox": [ + 104, + 420, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 0.3" + }, + { + "bbox": [ + 104, + 420, + 504, + 460 + ], + "type": "text", + "content": ". We depict the mean and the standard deviation of the mean for multiple runs. Observe that across models, adversarial training hurts in the low sample size regime, but helps when enough samples are available." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 298, + 465, + 396 + ], + "blocks": [ + { + "bbox": [ + 307, + 298, + 465, + 396 + ], + "lines": [ + { + "bbox": [ + 307, + 298, + 465, + 396 + ], + "spans": [ + { + "bbox": [ + 307, + 298, + 465, + 396 + ], + "type": "image", + "image_path": "4a1d525f26b278bc0cc15364a502d8f4b8a3789e35eb9f03eeb528ad3d61a44f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 354, + 399, + 415, + 409 + ], + "lines": [ + { + "bbox": [ + 354, + 399, + 415, + 409 + ], + "spans": [ + { + "bbox": [ + 354, + 399, + 415, + 409 + ], + "type": "text", + "content": "(b) Densenet121" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 483, + 366, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 366, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 366, + 494 + ], + "type": "text", + "content": "D.8 UNDIRECTED ATTACKS ON THE WATERBIRDS DATASET" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 504, + 504, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 504, + 537 + ], + "type": "text", + "content": "In this section, we analyse adversarial training for " + }, + { + "bbox": [ + 104, + 504, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 504, + 504, + 537 + ], + "type": "text", + "content": "-and " + }, + { + "bbox": [ + 104, + 504, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\ell_{\\infty}" + }, + { + "bbox": [ + 104, + 504, + 504, + 537 + ], + "type": "text", + "content": "-ball perturbations in the small sample size regime. We observe that while adversarial training hurts standard generalization, it helps robust generalization." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "text", + "content": "Adversarial training with " + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "text", + "content": "-balls We train and test with small " + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "text", + "content": "-balls, " + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 0.2" + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "text", + "content": ", such that the networks trained with standard training achieve a non-zero robust accuracy and the networks trained with adversarial training achieve non-trivial standard accuracy. We see in Figure 13, that adversarial training with " + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "text", + "content": "-balls hurts standard generalization while increasing robust generalization. Moreover, in Figure 14, we see that also in the very small sample size regime, adversarial training with increasing " + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 551, + 506, + 617 + ], + "type": "text", + "content": " increases the standard error, but reduces the susceptibility." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "spans": [ + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "type": "text", + "content": "Adversarial training with " + }, + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "type": "inline_equation", + "content": "\\ell_{\\infty}" + }, + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "type": "text", + "content": "-balls We also consider " + }, + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "type": "inline_equation", + "content": "\\ell_{\\infty}" + }, + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "type": "text", + "content": "-ball perturbation. We see in Figure 15 that even the smallest perturbation budget " + }, + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = \\frac{2}{255}" + }, + { + "bbox": [ + 104, + 630, + 506, + 664 + ], + "type": "text", + "content": ", standard training has robust error of 100 percent. On the other hand, adversarial training achieves low, but non-zero robust error." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "Experimental details We use an ImageNet pretrained ResNet34 and train for 300 epochs. Moreover, for reliable robust error and susceptibility evaluation of the attacks we use AutoAttack Croce & Hein (2020). All networks were trained such that the network interpolates the training dataset and has low robust error with non-trivial standard error. For the networks trained using standard training we use a learning rate of 0.006 and for the networks trained with adversarial training we used a learning" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 79, + 239, + 156 + ], + "blocks": [ + { + "bbox": [ + 112, + 79, + 239, + 156 + ], + "lines": [ + { + "bbox": [ + 112, + 79, + 239, + 156 + ], + "spans": [ + { + "bbox": [ + 112, + 79, + 239, + 156 + ], + "type": "image", + "image_path": "4813d8c68584c6fd6f0d091879855b4edf8e67863a56e9c420fecb0e9845185d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "lines": [ + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "spans": [ + { + "bbox": [ + 146, + 162, + 206, + 172 + ], + "type": "text", + "content": "(a) Robust error" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 79, + 369, + 157 + ], + "blocks": [ + { + "bbox": [ + 242, + 79, + 369, + 157 + ], + "lines": [ + { + "bbox": [ + 242, + 79, + 369, + 157 + ], + "spans": [ + { + "bbox": [ + 242, + 79, + 369, + 157 + ], + "type": "image", + "image_path": "2bf552c9eac55f28ee8b4c7e6b56d38077f2fe4ec3e53879d3f3f376fb952996.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "lines": [ + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "spans": [ + { + "bbox": [ + 272, + 161, + 339, + 172 + ], + "type": "text", + "content": "(b) Standard error" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 372, + 79, + 498, + 157 + ], + "blocks": [ + { + "bbox": [ + 372, + 79, + 498, + 157 + ], + "lines": [ + { + "bbox": [ + 372, + 79, + 498, + 157 + ], + "spans": [ + { + "bbox": [ + 372, + 79, + 498, + 157 + ], + "type": "image", + "image_path": "612ea7ac44cfeb101c06298a654da19a8af7b97a6933a613ea07a3d154a71d38.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "lines": [ + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "spans": [ + { + "bbox": [ + 402, + 161, + 466, + 172 + ], + "type": "text", + "content": "(c) Susceptibility" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 242, + 238, + 370, + 316 + ], + "blocks": [ + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "text", + "content": "Figure 13: The robust error decomposition of adversarial training with " + }, + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "text", + "content": "-balls of size " + }, + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 0.2" + }, + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "text", + "content": " and test adversaries with " + }, + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "text", + "content": "-balls of size " + }, + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 0.2" + }, + { + "bbox": [ + 104, + 182, + 506, + 222 + ], + "type": "text", + "content": ". The plots depict the mean and standard deviation of the mean over several independent experiments. We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 242, + 238, + 370, + 316 + ], + "lines": [ + { + "bbox": [ + 242, + 238, + 370, + 316 + ], + "spans": [ + { + "bbox": [ + 242, + 238, + 370, + 316 + ], + "type": "image", + "image_path": "0bebaf17241c57e738fd9ae7f28a94ccdfccb03e957d2362bfbbbf5c4f468af2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 112, + 392, + 239, + 470 + ], + "blocks": [ + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "lines": [ + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "text", + "content": "Figure 14: The robust error decomposition of adversarial training in function of " + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "text", + "content": " in the small sample size regime " + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "inline_equation", + "content": "n = 20" + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "text", + "content": ". We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers with increasing " + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "text", + "content": ". We take " + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "inline_equation", + "content": "n = 20" + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "text", + "content": " and consider test adversaries with " + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "text", + "content": "-balls of size " + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = 0.2" + }, + { + "bbox": [ + 104, + 325, + 504, + 376 + ], + "type": "text", + "content": ". The plots depict the mean and standard deviation of the mean over several independent experiments." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 112, + 392, + 239, + 470 + ], + "lines": [ + { + "bbox": [ + 112, + 392, + 239, + 470 + ], + "spans": [ + { + "bbox": [ + 112, + 392, + 239, + 470 + ], + "type": "image", + "image_path": "9650e61ba70fd559ddb12196d5584aa791956547d67ee4f0ccf561562bdbebd1.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 475, + 205, + 484 + ], + "lines": [ + { + "bbox": [ + 146, + 475, + 205, + 484 + ], + "spans": [ + { + "bbox": [ + 146, + 475, + 205, + 484 + ], + "type": "text", + "content": "(a) Robust error" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "lines": [ + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "text", + "content": "Figure 15: The robust error decomposition of adversarial training with " + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\ell_{\\infty}" + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "text", + "content": "-balls of size " + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = \\frac{2}{255}" + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "text", + "content": " and test adversaries with " + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\ell_{\\infty}" + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "text", + "content": "-balls of size " + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} = \\frac{2}{255}" + }, + { + "bbox": [ + 104, + 494, + 504, + 537 + ], + "type": "text", + "content": ". The plots depict the mean and standard deviation of the mean over several independent experiments. We see that even though adversarial training hurts standard generalization, it increases robust generalization as it decreases the susceptibility of the classifiers." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 242, + 392, + 368, + 469 + ], + "blocks": [ + { + "bbox": [ + 242, + 392, + 368, + 469 + ], + "lines": [ + { + "bbox": [ + 242, + 392, + 368, + 469 + ], + "spans": [ + { + "bbox": [ + 242, + 392, + 368, + 469 + ], + "type": "image", + "image_path": "50d23d820785208d59d85c8b949f3c9a2b3001572c97f9839ac28daad5a20dd6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 475, + 338, + 484 + ], + "lines": [ + { + "bbox": [ + 272, + 475, + 338, + 484 + ], + "spans": [ + { + "bbox": [ + 272, + 475, + 338, + 484 + ], + "type": "text", + "content": "(b) Standard error" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 372, + 392, + 498, + 469 + ], + "blocks": [ + { + "bbox": [ + 372, + 392, + 498, + 469 + ], + "lines": [ + { + "bbox": [ + 372, + 392, + 498, + 469 + ], + "spans": [ + { + "bbox": [ + 372, + 392, + 498, + 469 + ], + "type": "image", + "image_path": "d94a65abce4a6df74a6e0c2e27cda3fa545e574980cce73fe85a371e9d165da7.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 474, + 466, + 485 + ], + "lines": [ + { + "bbox": [ + 403, + 474, + 466, + 485 + ], + "spans": [ + { + "bbox": [ + 403, + 474, + 466, + 485 + ], + "type": "text", + "content": "(c) Susceptibility" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "content": "rate of " + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "inline_equation", + "content": "5 \\cdot 10^{-4}" + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "content": ". We also trained with a weight decay of " + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "content": ", a batch size of 8 and a momentum of 0.9 for all networks. We train at least 3 networks for all settings and report the mean and standard deviation of the mean of the standard error, robust error and susceptibility over the three runs." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 618, + 332, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 618, + 332, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 332, + 631 + ], + "type": "text", + "content": "EXPERIMENTAL DETAILS ON CIFAR-10" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 504, + 670 + ], + "type": "text", + "content": "In this section, we give the experimental details on the CIFAR-10-based experiments shown in Figures 1 and 17." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "Subsampling CIFAR-10 In all our experiments we subsample CIFAR-10 to simulate the low sample size regime. We ensure that for all subsampled versions the number of samples of each class are equal. Hence, if we subsample to 500 training images, then each class has exactly 50 images, which are drawn uniformly from the " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "5k" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": " training images of the respective class." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "Mask perturbation on CIFAR-10 On CIFAR-10, we consider the square black mask attack where the adversary can mask a square in the image of size " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{te}} \\times \\epsilon_{\\mathrm{te}}" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " by setting the pixel values zero. To ensure that the mask cannot cover all the information about the true class in the image, we restrict the size of the masks to be at most " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": ", while allowing for all possible locations of the mask in the targeted image. For exact robust error evaluation, we perform a full grid search over all possible locations during test time. We show an example of a black-mask attack on each of the classes in CIFAR-10 in Figure 16." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "text", + "content": "During training, a full grid search is computationally intractable so that we use an approximate attack similar to Wu et al. (2020) during training time: by identifying the " + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "inline_equation", + "content": "K = 16" + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "text", + "content": " most promising mask locations with a heuristic as follows. First, we identify promising mask locations by analyzing the gradient, " + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\nabla_{x}L(f_{\\theta}(x),y)" + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "text", + "content": ", of the cross-entropy loss with respect to the input. Masks that cover part of the image where the gradient is large, are more likely to increase the loss. Hence, we compute the " + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "text", + "content": " mask locations " + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\| \\nabla_{x}L(f_{\\theta}(x),y)_{[i:i + 2,j:j + 2]}\\| _1" + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "text", + "content": " is the largest and take using a full list-search the mask that incurs the highest loss. Our intuition from the theory predicts that higher " + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 165, + 506, + 266 + ], + "type": "text", + "content": ", and hence a more exact \"defense\", only increases the robust error of adversarial training, since the mask could then more efficiently cover important information about the class." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 145, + 273, + 465, + 306 + ], + "blocks": [ + { + "bbox": [ + 145, + 273, + 465, + 306 + ], + "lines": [ + { + "bbox": [ + 145, + 273, + 465, + 306 + ], + "spans": [ + { + "bbox": [ + 145, + 273, + 465, + 306 + ], + "type": "image", + "image_path": "8f3c05007626d1ca15fe61a8198f820712a7ea9791907a7525cb5d4a8fec1bb8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 314, + 504, + 336 + ], + "lines": [ + { + "bbox": [ + 104, + 314, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 504, + 336 + ], + "type": "text", + "content": "Figure 16: We show an example of a mask perturbation for all 10 classes of CIFAR-10. Even though the attack occludes part of the images, a human can still easily classify all images correctly." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 353, + 338, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 353, + 338, + 497 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 338, + 497 + ], + "type": "text", + "content": "Experimental training details For all our experiments on CIFAR-10, we adjusted the code provided by Phan (2021). As typically done for CIFAR-10, we augment the data with random cropping and horizontal flipping. For the experiments with results depicted in Figures 1 and 17, we use a ResNet18 network and train for 100 epochs. We tune the parameters learning rate and weight decay for low robust error. For standard standard training, we use a learning rate of 0.01 with equal weight decay. For adversarial training, we use a learning rate of 0.015 and a weight decay of " + }, + { + "bbox": [ + 104, + 353, + 338, + 497 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 104, + 353, + 338, + 497 + ], + "type": "text", + "content": ". We run each experiment three times for every dataset with different initialization seeds, and plot the average and standard deviation over the runs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 501, + 337, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 501, + 337, + 547 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 337, + 547 + ], + "type": "text", + "content": "For the experiments in Figure 1 and 18 we use an attack strength of " + }, + { + "bbox": [ + 104, + 501, + 337, + 547 + ], + "type": "inline_equation", + "content": "K = 4" + }, + { + "bbox": [ + 104, + 501, + 337, + 547 + ], + "type": "text", + "content": ". Recall that we perform a full grid search at test time and hence have a good approximation of the robust accuracy and susceptibility score." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 344, + 365, + 503, + 462 + ], + "blocks": [ + { + "bbox": [ + 344, + 365, + 503, + 462 + ], + "lines": [ + { + "bbox": [ + 344, + 365, + 503, + 462 + ], + "spans": [ + { + "bbox": [ + 344, + 365, + 503, + 462 + ], + "type": "image", + "image_path": "962a80bd4b81e3e400f347ea5d098f7e0eb92be644e217c37d36dc6863740e53.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 471, + 506, + 521 + ], + "lines": [ + { + "bbox": [ + 342, + 471, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 342, + 471, + 506, + 521 + ], + "type": "text", + "content": "Figure 17: The robust error decomposition in standard error and susceptibility for varying attack strengths " + }, + { + "bbox": [ + 342, + 471, + 506, + 521 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 342, + 471, + 506, + 521 + ], + "type": "text", + "content": ". We see that the larger " + }, + { + "bbox": [ + 342, + 471, + 506, + 521 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 342, + 471, + 506, + 521 + ], + "type": "text", + "content": ", the lower the susceptibility, but the higher the standard error." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": "Increasing training attack strength We investigate the influence of the attack strength " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": " on the robust error for adversarial training. We take " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 2" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "n = 500" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": " and vary " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": ". The results are depicted in Figure 17. We see that for increasing " + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 557, + 504, + 604 + ], + "type": "text", + "content": ", the susceptibility decreases, but the standard error increases more severely, resulting in an increasing robust error." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 613, + 505, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 505, + 670 + ], + "type": "text", + "content": "Robust error decomposition In Figure 1, we see that the robust error increases for adversarial training compared to standard training in the low sample size regime, but the opposite holds when enough samples are available. For completeness, we provide a full decomposition of the robust error in standard error and susceptibility for standard and adversarial training. We plot the decomposition in Figure 18." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 685, + 322, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 685, + 322, + 696 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 322, + 696 + ], + "type": "text", + "content": "F STATIC HAND GESTURE RECOGNITION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "The goal of static hand gesture or posture recognition is to recognize hand gestures such as a pointing index finger or the okay-sign based on static data such as images Oudah et al. (2020); Yang et al." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "(2013). The current use of hand gesture recognition is primarily in the interaction between computers and humans Oudah et al. (2020). More specifically, typical practical applications can be found in the environment of games, assisted living, and virtual reality Mujahid et al. (2021). In the following, we conduct experiments on a hand gesture recognition dataset constructed by Mantecón et al. (2019), which consists of near-infrared stereo images obtained using the Leap Motion device. First, we crop or segment the images after which we use logistic regression for classification. We see that adversarial logistic regression deteriorates robust generalization with increasing " + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 175, + 504, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 504, + 242 + ], + "type": "text", + "content": "Static hand-gesture dataset We use the dataset made available by Mantecón et al. (2019). This dataset consists of near-infrared stereo images taken with the Leap Motion device and provides detailed skeleton data. We base our analysis on the images only. The size of the images is " + }, + { + "bbox": [ + 104, + 175, + 504, + 242 + ], + "type": "inline_equation", + "content": "640 \\times 240" + }, + { + "bbox": [ + 104, + 175, + 504, + 242 + ], + "type": "text", + "content": " pixels. The dataset consists of 16 classes of hand poses taken by 25 different people. We note that the variety between the different people is relatively wide; there are men and women with different posture and hand sizes. However, the different samples taken by the same person are alike." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 247, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 312 + ], + "type": "text", + "content": "We consider binary classification between the index-pose and L-pose, and take as a training set 30 images of the users 16 to 25. This results in a training dataset of 300 samples. We show two examples of the training dataset in Figure 19, each corresponding to a different class. Observe that the near-infrared images darken the background and successfully highlight the hand-pose. As a test dataset, we take 10 images of each of the two classes from the users 1 to 10 resulting in a test dataset of size 200." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 329, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 329, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 504, + 373 + ], + "type": "text", + "content": "**Cropping the dataset** To speed up training and ease the classification problem, we crop the images from a size of " + }, + { + "bbox": [ + 104, + 329, + 504, + 373 + ], + "type": "inline_equation", + "content": "640 \\times 240" + }, + { + "bbox": [ + 104, + 329, + 504, + 373 + ], + "type": "text", + "content": " to a size of " + }, + { + "bbox": [ + 104, + 329, + 504, + 373 + ], + "type": "inline_equation", + "content": "200 \\times 200" + }, + { + "bbox": [ + 104, + 329, + 504, + 373 + ], + "type": "text", + "content": ". We crop the images using a basic image segmentation technique to stay as close as possible to real-world applications. The aim is to crop the images such that the hand gesture is centered within the cropped image." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "type": "text", + "content": "For every user in the training set, we crop an image of the L-pose and the index pose by hand. We call these images the training masks " + }, + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "type": "inline_equation", + "content": "\\{\\mathrm{masks}_i\\}_{i=1}^{20}" + }, + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "type": "text", + "content": ". We note that the more a particular window of an image resembles a mask, the more likely that the window captures the hand gesture correctly. Moreover, the near-infrared images are such that the hands of a person are brighter than the surroundings of the person itself. Based on these two observations, we define the best segment or window, defined by the upper left coordinates " + }, + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "type": "text", + "content": ", for an image " + }, + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 378, + 504, + 445 + ], + "type": "text", + "content": " as the solution to the following optimization problem:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 164, + 464, + 504, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 464, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 164, + 464, + 504, + 496 + ], + "type": "interline_equation", + "content": "\\underset {i \\in [ 4 4 0 ], j \\in [ 4 0 ]} {\\arg \\min } \\sum_ {l = 1} ^ {2 0} \\| \\operatorname {m a s k s} _ {l} - x _ {\\{i: i + 2 0 0, j: j + 2 0 0 \\}} \\| _ {2} ^ {2} - \\frac {1}{2} \\| x _ {\\{i + w, j + h \\}} \\| _ {1}. \\tag {34}", + "image_path": "a39e9a114d82f984c425044fed2dc79338cba996bc25565f6e967f9ff0c78e7d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 503, + 506, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 503, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 503, + 506, + 537 + ], + "type": "text", + "content": "Equation 34 is solved using a full grid search. We use the result to crop both training and test images. Upon manual inspection of the cropped images, close to all images were perfectly cropped. We replace the handful poorly cropped training images with hand-cropped counterparts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 575 + ], + "type": "text", + "content": "Square-mask perturbations Since we use logistic regression, we perform a full grid search to find the best adversarial perturbation at training and test time. For completeness, the upper left coordinates" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 113, + 601, + 237, + 673 + ], + "blocks": [ + { + "bbox": [ + 113, + 601, + 237, + 673 + ], + "lines": [ + { + "bbox": [ + 113, + 601, + 237, + 673 + ], + "spans": [ + { + "bbox": [ + 113, + 601, + 237, + 673 + ], + "type": "image", + "image_path": "4502c1bf228648f40c15268a303921271876dbb44477d6fcaac2679eca5b0995.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 677, + 205, + 688 + ], + "lines": [ + { + "bbox": [ + 146, + 677, + 205, + 688 + ], + "spans": [ + { + "bbox": [ + 146, + 677, + 205, + 688 + ], + "type": "text", + "content": "(a) Robust error" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 242, + 601, + 367, + 673 + ], + "blocks": [ + { + "bbox": [ + 242, + 601, + 367, + 673 + ], + "lines": [ + { + "bbox": [ + 242, + 601, + 367, + 673 + ], + "spans": [ + { + "bbox": [ + 242, + 601, + 367, + 673 + ], + "type": "image", + "image_path": "38e471ba42dd37e14e3a8db65922e6c39a88c3e2c12371f52024872c3b996834.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 677, + 338, + 688 + ], + "lines": [ + { + "bbox": [ + 272, + 677, + 338, + 688 + ], + "spans": [ + { + "bbox": [ + 272, + 677, + 338, + 688 + ], + "type": "text", + "content": "(b) Standard error" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 698, + 504, + 728 + ], + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 728 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 728 + ], + "type": "text", + "content": "Figure 18: The robust error decomposition in standard error and susceptibility of the subsampled datasets of CIFAR-10 after adversarial and standard training. For small sample size, adversarial training has higher robust error then standard training." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 370, + 596, + 496, + 673 + ], + "blocks": [ + { + "bbox": [ + 370, + 596, + 496, + 673 + ], + "lines": [ + { + "bbox": [ + 370, + 596, + 496, + 673 + ], + "spans": [ + { + "bbox": [ + 370, + 596, + 496, + 673 + ], + "type": "image", + "image_path": "33c9a5b0e9ca0d43691b68e683d8fe948c15802f0bf95bc6a8e39725fdb9dcd8.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 402, + 677, + 466, + 689 + ], + "lines": [ + { + "bbox": [ + 402, + 677, + 466, + 689 + ], + "spans": [ + { + "bbox": [ + 402, + 677, + 466, + 689 + ], + "type": "text", + "content": "(c) Susceptibility" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 79, + 267, + 185 + ], + "blocks": [ + { + "bbox": [ + 109, + 79, + 267, + 185 + ], + "lines": [ + { + "bbox": [ + 109, + 79, + 267, + 185 + ], + "spans": [ + { + "bbox": [ + 109, + 79, + 267, + 185 + ], + "type": "image", + "image_path": "ad02e8b4c078764e1ca3fd16302c513719a6c229139042de1270d9e805555f92.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 189, + 227, + 200 + ], + "lines": [ + { + "bbox": [ + 187, + 189, + 227, + 200 + ], + "spans": [ + { + "bbox": [ + 187, + 189, + 227, + 200 + ], + "type": "text", + "content": "(a) L pose" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 305, + 79, + 463, + 185 + ], + "blocks": [ + { + "bbox": [ + 305, + 79, + 463, + 185 + ], + "lines": [ + { + "bbox": [ + 305, + 79, + 463, + 185 + ], + "spans": [ + { + "bbox": [ + 305, + 79, + 463, + 185 + ], + "type": "image", + "image_path": "1f696bdcbdb4525ecbc4b630198f092d4ac936b3045ca4db67441f2d59f0637d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 376, + 189, + 430, + 200 + ], + "lines": [ + { + "bbox": [ + 376, + 189, + 430, + 200 + ], + "spans": [ + { + "bbox": [ + 376, + 189, + 430, + 200 + ], + "type": "text", + "content": "(b) Index pose" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 129, + 251, + 231, + 353 + ], + "blocks": [ + { + "bbox": [ + 104, + 209, + 504, + 240 + ], + "lines": [ + { + "bbox": [ + 104, + 209, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 504, + 240 + ], + "type": "text", + "content": "Figure 19: Examples of the original images of the considered hand-gestures. We recognize the \"L\"-sign in Figure 19a and the index sign in Figure 19b. Observe that the near-infrared images highlight the hand pose well and blends out much of the non-useful or noisy background." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 129, + 251, + 231, + 353 + ], + "lines": [ + { + "bbox": [ + 129, + 251, + 231, + 353 + ], + "spans": [ + { + "bbox": [ + 129, + 251, + 231, + 353 + ], + "type": "image", + "image_path": "5f5121a185f800d9707d726c94c5020fec1d2515474efd7d4b24c21c092db34f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 144, + 356, + 216, + 367 + ], + "lines": [ + { + "bbox": [ + 144, + 356, + 216, + 367 + ], + "spans": [ + { + "bbox": [ + 144, + 356, + 216, + 367 + ], + "type": "text", + "content": "(a) Cropped L pose" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 376, + 504, + 398 + ], + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 398 + ], + "type": "text", + "content": "Figure 20: Examples of the cropped hand-gesture images. We see that the hands are centered and the images have a size of " + }, + { + "bbox": [ + 104, + 376, + 504, + 398 + ], + "type": "inline_equation", + "content": "200 \\times 200" + }, + { + "bbox": [ + 104, + 376, + 504, + 398 + ], + "type": "text", + "content": ". In Figure 20c we show an example of the square black-mask perturbation." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 254, + 251, + 356, + 352 + ], + "blocks": [ + { + "bbox": [ + 254, + 251, + 356, + 352 + ], + "lines": [ + { + "bbox": [ + 254, + 251, + 356, + 352 + ], + "spans": [ + { + "bbox": [ + 254, + 251, + 356, + 352 + ], + "type": "image", + "image_path": "2b48acfec7de0b523a300cf6494ea3830a1e9c2682c157b0c206d41136172bb1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 261, + 356, + 348, + 367 + ], + "lines": [ + { + "bbox": [ + 261, + 356, + 348, + 367 + ], + "spans": [ + { + "bbox": [ + 261, + 356, + 348, + 367 + ], + "type": "text", + "content": "(b) Cropped index pose" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 380, + 251, + 481, + 351 + ], + "blocks": [ + { + "bbox": [ + 380, + 251, + 481, + 351 + ], + "lines": [ + { + "bbox": [ + 380, + 251, + 481, + 351 + ], + "spans": [ + { + "bbox": [ + 380, + 251, + 481, + 351 + ], + "type": "image", + "image_path": "cf12fa372fc46b7868a4a707ff18f18ec6208d99c94c4a6883f7601f03b44ae6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 379, + 355, + 482, + 367 + ], + "lines": [ + { + "bbox": [ + 379, + 355, + 482, + 367 + ], + "spans": [ + { + "bbox": [ + 379, + 355, + 482, + 367 + ], + "type": "text", + "content": "(c) Black-mask perturbation" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 418, + 451, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 451, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 451, + 431 + ], + "type": "text", + "content": "of the optimal black-mask perturbation of size " + }, + { + "bbox": [ + 104, + 418, + 451, + 431 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} \\times \\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 418, + 451, + 431 + ], + "type": "text", + "content": " can be found as the solution to" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 210, + 437, + 504, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 437, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 210, + 437, + 504, + 464 + ], + "type": "interline_equation", + "content": "\\arg \\max _ {i \\in [ 2 0 0 - \\epsilon_ {\\mathrm {t r}} ], j \\in [ 2 0 0 - \\epsilon_ {\\mathrm {t r}} ]} \\sum_ {l, m \\in [ \\epsilon_ {\\mathrm {t r}} ]} \\theta_ {[ i: i + l, j: j + m ]}. \\tag {35}", + "image_path": "2177a63c6f3081e5f2710717bcf986363df31851088c7fe1d098277e20003a48.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "content": "The algorithm is rather slow as we iterate over all possible windows. We show a black-mask perturbation on an " + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "content": "-pose image in Figure 20c." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 504, + 504, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 504, + 538 + ], + "type": "text", + "content": "Results We run adversarial logistic regression with square-mask perturbations on the cropped dataset and vary the adversarial training budget and plot the result in Figure 21. We observe attack that adversarial logistic regression deteriorates robust generalization." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "text", + "content": "Because we use adversarial logistic regression, we are able to visualize the classifier. Given the classifier induced by " + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "text", + "content": ", we can visualize how it classifies the images by plotting " + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\frac{\\theta - \\min_{i\\in[d]}\\theta_{[i]}}{\\max_{i\\in[d]}\\theta_{[i]}}\\in [0,1]^d" + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "text", + "content": ". Recall that the class-prediction of our predictor for a data point " + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "text", + "content": " is given by " + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\mathrm{sign}(\\theta^{\\top}x)\\in \\{\\pm 1\\}" + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "text", + "content": ". The lighter parts of the resulting image correspond to the class with label 1 and the darker patches with the class corresponding to label " + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "inline_equation", + "content": "-1" + }, + { + "bbox": [ + 104, + 543, + 504, + 606 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 611, + 337, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 337, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 337, + 733 + ], + "type": "text", + "content": "We plot the classifiers obtained by standard logistic regression and adversarial logistic regression with training adversarial budgets " + }, + { + "bbox": [ + 104, + 611, + 337, + 733 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 104, + 611, + 337, + 733 + ], + "type": "text", + "content": " of 10 and 25 in Figure 22. The darker parts in the classifier correspond to patches that are typically bright for the " + }, + { + "bbox": [ + 104, + 611, + 337, + 733 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 611, + 337, + 733 + ], + "type": "text", + "content": "-pose. Complementary, the lighter patches in the classifier correspond to patches that are typically bright for the index pose. We see that in the case of adversarial logistic regression, the background noise is much higher than for standard logistic regression. In other words, adversarial logistic regression puts more weight on non-signal parts in the images to classify the" + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 345, + 624, + 503, + 720 + ], + "blocks": [ + { + "bbox": [ + 345, + 624, + 503, + 720 + ], + "lines": [ + { + "bbox": [ + 345, + 624, + 503, + 720 + ], + "spans": [ + { + "bbox": [ + 345, + 624, + 503, + 720 + ], + "type": "image", + "image_path": "43ce2a07cf4257b4a504149373536b2d67bd2154d79ebfc09d771c9ee0b09f31.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 728, + 505, + 769 + ], + "lines": [ + { + "bbox": [ + 342, + 728, + 505, + 769 + ], + "spans": [ + { + "bbox": [ + 342, + 728, + 505, + 769 + ], + "type": "text", + "content": "Figure 21: The standard error and robust error for varying adversarial training budget " + }, + { + "bbox": [ + 342, + 728, + 505, + 769 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 342, + 728, + 505, + 769 + ], + "type": "text", + "content": ". We see that the larger " + }, + { + "bbox": [ + 342, + 728, + 505, + 769 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}}" + }, + { + "bbox": [ + 342, + 728, + 505, + 769 + ], + "type": "text", + "content": " the higher the robust error." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 83, + 337, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 83, + 337, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 83, + 337, + 105 + ], + "type": "text", + "content": "training dataset and hence exhibits worse performance on the test dataset." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 129, + 113, + 232, + 213 + ], + "blocks": [ + { + "bbox": [ + 129, + 113, + 232, + 213 + ], + "lines": [ + { + "bbox": [ + 129, + 113, + 232, + 213 + ], + "spans": [ + { + "bbox": [ + 129, + 113, + 232, + 213 + ], + "type": "image", + "image_path": "6e7a5f7e308b622e6cd348122f3d35085268ab1cd1c252c0673785d1da3282bd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 217, + 200, + 228 + ], + "lines": [ + { + "bbox": [ + 160, + 217, + 200, + 228 + ], + "spans": [ + { + "bbox": [ + 160, + 217, + 200, + 228 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 160, + 217, + 200, + 228 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 0" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 254, + 113, + 356, + 213 + ], + "blocks": [ + { + "bbox": [ + 254, + 113, + 356, + 213 + ], + "lines": [ + { + "bbox": [ + 254, + 113, + 356, + 213 + ], + "spans": [ + { + "bbox": [ + 254, + 113, + 356, + 213 + ], + "type": "image", + "image_path": "41bbadd545f0831fc85c2431e3c32f8936ddf77a80b40627dd559a109c253ed3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 217, + 328, + 228 + ], + "lines": [ + { + "bbox": [ + 282, + 217, + 328, + 228 + ], + "spans": [ + { + "bbox": [ + 282, + 217, + 328, + 228 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 282, + 217, + 328, + 228 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 10" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 237, + 506, + 288 + ], + "lines": [ + { + "bbox": [ + 104, + 237, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 237, + 506, + 288 + ], + "type": "text", + "content": "Figure 22: We visualize the logistic regression solutions. In Figure 22a we plot the vector that induces the classifier obtained after standard training. In Figure 22b and Figure 22c we plot the vector obtained after training with square-mask perturbations of size 10 and 25, respectively. We note the non-signal enhanced background correlations at the parts highlighted with the red circles in the image projection of the adversarially trained classifiers." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 381, + 113, + 482, + 213 + ], + "blocks": [ + { + "bbox": [ + 381, + 113, + 482, + 213 + ], + "lines": [ + { + "bbox": [ + 381, + 113, + 482, + 213 + ], + "spans": [ + { + "bbox": [ + 381, + 113, + 482, + 213 + ], + "type": "image", + "image_path": "7b392b5b988645f9de0e728e4d94808ddd76150a02d046121f5f86949513ebda.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 409, + 217, + 453, + 228 + ], + "lines": [ + { + "bbox": [ + 409, + 217, + 453, + 228 + ], + "spans": [ + { + "bbox": [ + 409, + 217, + 453, + 228 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 409, + 217, + 453, + 228 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{tr}} = 25" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_content_list.json b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..93392801b52851283c7b5fdd6a6012618891f4fe --- /dev/null +++ b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_content_list.json @@ -0,0 +1,2415 @@ +[ + { + "type": "text", + "text": "WINERT: TOWARDS NEURAL RAY TRACING FOR WIRELESS CHANNEL MODELLING AND DIFFERENTIABLE SIMULATIONS", + "text_level": 1, + "bbox": [ + 171, + 98, + 823, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tribhuvanesh Orekondy, Kumar Pratik, Shreya Kadambi, Hao Ye, Joseph Soriaga, Arash Behboodi \nQualcomm AI Research*", + "bbox": [ + 179, + 194, + 653, + 237 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 273, + 547, + 289 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we work towards a neural surrogate to model wireless electromagnetic propagation effects in indoor environments. Such neural surrogates provide a fast, differentiable, and continuous representation of the environment and enables end-to-end optimization for downstream tasks (e.g., network planning). Specifically, the goal of the paper is to render the wireless signal (e.g., time-of-flights, power of each path) in an environment as a function of the sensor's spatial configuration (e.g., placement of transmit and receive antennas). NeRF-based approaches have shown promising results in the visual setting (RGB image signal, with a camera sensor), where the key idea is to algorithmically evaluate the 'global' signal (e.g., using volumetric rendering) by breaking it down in a sequence of 'local' evaluations (e.g., using co-ordinate neural networks). In a similar spirit, we model the time-angle channel impulse response (the global wireless signal) as a superposition of multiple paths. The wireless characteristics (e.g., power) of each path is a result of multiple evaluations of a neural network that learns implicit ray-surface interaction properties. We evaluate our approach in multiple indoor scenarios and demonstrate that our model achieves strong performance (e.g., $<0.33\\mathrm{ns}$ error in time-of-flight predictions). Furthermore, we demonstrate that our neural surrogate whitens the 'black-box' wireless simulators, and thus enables inverse rendering applications (e.g., user localization).", + "bbox": [ + 228, + 304, + 769, + 569 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 592, + 339, + 607 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Realistic simulations of physical processes are vital to many scientific and engineering disciplines. In this paper, we focus on simulation of wireless electromagnetic (EM) signals within a propagation environment. The physics of such EM wave propagation between a transmit and receive point are analytically given by Maxwell equations: the transmitted wave undergoes different interactions with the environment (e.g., reflection), and the receiver gets the wave through multiple paths with different time-of-flights and powers, and from different directions. However, solving the Maxwell equations with boundary conditions requires in-depth knowledge of the propagation environment, hence classically modelling EM propagation is intractable for most engineering applications.", + "bbox": [ + 169, + 623, + 826, + 737 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing techniques make such simulations tractable by trading-off accuracy for speed. At one end of the spectrum, such simulations are represented in a statistical sense where a probabilistic model roughly captures the marginalized distribution over time-of-flights, gains and direction of transmit-receive paths. However, this level of accuracy is insufficient for designing systems that efficiently operate in high frequency bands. This motivates solutions at the other end of the spectrum: wireless ray tracing simulators. Given a detailed CAD representation of the environment along with the material properties, and numerous wireless configuration parameters (e.g., placement of a base station), the simulators generate resulting propagation characteristics.", + "bbox": [ + 169, + 741, + 826, + 854 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although wireless ray tracing simulators are appealing, there are a few drawbacks. First, they are generally slow, which poses a bottleneck for closed-loop design pipelines, as wireless configurations cannot be quickly mapped to propagation characteristics. Second, because they are non", + "bbox": [ + 169, + 859, + 828, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Qualcomm AI Research is an initiative of Qualcomm Technologies, Inc", + "bbox": [ + 189, + 909, + 624, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "differentiable, they are not amenable with inverse physical design formulations, for example optimizing base station placement with the simulator in the optimization loop. Third, they usually require additional fine-tuning with real data as they are not data-driven. Calibrating them with real-world measurements is non-trivial and tedious. Fourth, they cannot generally inter-operate with probabilistic frameworks which have the advantage of better dealing with epistemic uncertainties. We believe neural surrogates provide a natural solution to circumvent many of these drawbacks of classical ray tracing simulators.", + "bbox": [ + 174, + 103, + 823, + 200 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we propose a neural wireless simulator ('WiNeRT') by building on recent advances in scenes representation as continuous-function neural networks (Sitzmann et al., 2019; Tancik et al., 2020; Mildenhall et al., 2020). In particular, central to our approach is learning a network to model ray-surface interactions, i.e., the network transforms an incident wireless ray to an attenuated outgoing ray. By shooting out a number of rays and evaluating the network at relevant spatial regions in the environment, we estimate the wireless characteristics as a set of transmit-receive paths, each path encodes attributes such as time-of-flight and gain. Our approach also addresses some unique technical challenges posed by the non-visual wireless modality, such as dealing with sparse high-dimensional time-angle measurement signals.", + "bbox": [ + 174, + 207, + 823, + 333 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We demonstrate that our neural wireless simulator reasonably renders the wireless propagation aspects by evaluating on two datasets which captures $50 - 100\\mathrm{m}^2$ indoor propagation scenes. Interestingly, we find that the 3D-structure-aware implicit formulation is a strong inductive bias and helps generalization to significant inference-time distributions shifts. Finally, we demonstrate the potential of our differentiable forward model in solving inverse problem by tackling the user localization problem after posing it as an inverse rendering problem. Our results indicate that simulator physics for specified environments can be 'distilled' into neural surrogates and thereby presenting first steps towards closed-loop design pipelines of wireless communication systems.", + "bbox": [ + 174, + 339, + 823, + 450 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 174, + 474, + 343, + 491 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Physics-based Neural Simulations. There exists a wide body of literature to model physical processes using advances in neural networks (Djeumou et al., 2022; Karniadakis et al., 2021; Raissi et al., 2017). As simulating physical processes can be expensive and can also present nondifferentiable 'black-box' in design pipelines, recent literature addresses how to work towards neural surrogates, such as for particle simulation (Sanchez-Gonzalez et al., 2020), mesh simulations (Pfaff et al., 2020), design of particle accelerators (Shirobokov et al., 2020), and inverse kinematics (Sun et al., 2021). In this paper, we are particularly interested in a specific physical process - wireless EM-wave propagation. Although this has received limited recent attention (Xia et al., 2020) in a 3D-oblivious setting, it is unclear whether these extend to complex configurations. Consequently, in this work, we work towards the first 3d-structure-aware surrogates for wireless ray tracing simulation.", + "bbox": [ + 174, + 512, + 823, + 652 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Channel Modelling. Although propagation channel modeling has been a central topic in wireless communication (Jakes & Cox, 1994; Lee, 1982; Rappaport et al., 2022), there has been a recent trend for fully data-driven models. The main paradigm of these activities is to use machine learning to learn complex distributions, model non-linearities and have differentiable simulators. These works can be categorized as statistical channel models where the channel input-output relation is modelled as a conditional probability distribution. Many works leverage recent advances in generative modelling and use models like generative adversarial networks (GANs) (Goodfellow et al., 2014) or variational autoencoders (VAEs) (Kingma & Welling, 2013) to learn the channel model (O'Shea et al., 2019; Ye et al., 2018; Yang et al., 2019; O'Shea et al., 2019; Orekondy et al., 2022; Ye et al., 2020; Dorner et al., 2020). In contrast to these works, our approach inscribes within ray tracing channel modeling paradigm, where wireless propagation is precisely modelled by tracing wireless rays, however, unlike classical ray tracers, our model is able to blend in the elements of statistical modeling and is trainable directly on field data. To the best of our knowledge, this work is the first differentiable neural ray tracer for wireless channel modelling.", + "bbox": [ + 174, + 662, + 823, + 856 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Scene Representations. Representing scenes (or more generally signals) has been widely studied in literature, such as encoding the signal in the latent space of a generative model (Kingma & Welling, 2013; Goodfellow et al., 2014). A more recent link of work encodes the signal in the parameters of a co-ordinate MLP (Park et al., 2019; Sitzmann et al., 2020; Tancik et al., 2020; Fathony", + "bbox": [ + 174, + 867, + 823, + 922 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 949, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c74b0a95ffdda8f97ceb12b64de439e99ae04a016d2f6461b653f52e5eddf3a6.jpg", + "image_caption": [ + "Figure 1: Approach Overview. We learn a forward simulator $\\text{render}_{\\theta}(\\cdot)$ that maps an environment configuration to a wireless channel $h_i$ . Here, $h_i$ is a set of wireless propagation paths between $x_{\\mathrm{tx}} - x_{\\mathrm{rx}}$ (green rays in right image), each path encoding certain channel attributes e.g., path gain." + ], + "image_footnote": [], + "bbox": [ + 174, + 101, + 823, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "et al., 2020), thereby mapping co-ordinates (e.g., spatial, temporal) to the signal intensity values (e.g., pixel intensity, amplitude). In a specific case where the signal is a 2D RGB image, recent works (Schwarz et al., 2020; Niemeyer & Geiger, 2021; Mildenhall et al., 2020) show promising results by additionally employing image-based differentiable rendering paradigms (Drebin et al., 1988; Liu et al., 2019) to recover 3D properties of the scene. Inspired by this idea, our work neurally represents a wireless scene by tackling a set of orthogonal challenges, such as dealing with sparse high-dimensional signals and particularly modelling reflection and transmission effects. Consequently, we work towards the first 3D-aware neural 'wireless' scene representation model.", + "bbox": [ + 169, + 234, + 826, + 347 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 APPROACH", + "text_level": 1, + "bbox": [ + 171, + 366, + 302, + 382 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we begin with some preliminaries to the subsequent formulation of the neural wireless ray tracing problem. We then provide an initial overview of our approach in Sec. 3.1 and then dive deeper into specific technical aspects of wireless ray marching in Sec. 3.2.", + "bbox": [ + 169, + 396, + 823, + 439 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Preliminaries: Wireless Channels Scattering, reflection and diffraction are among the main effects in electromagnetic propagation. A general mathematical description of a wireless channel, seen as linear time varying system, is given by its impulse response Tse & Viswanath (2005); Rappaport (1996). A general model can be written as (Samimi & Rappaport, 2016):", + "bbox": [ + 169, + 449, + 823, + 505 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nh (t, \\boldsymbol {\\Theta}, \\boldsymbol {\\Phi}) = \\sum_ {k} a _ {k} (t) \\delta \\left(t - \\tau_ {k} (t)\\right) \\delta \\left(\\boldsymbol {\\Theta} - \\boldsymbol {\\Theta} _ {k} (t)\\right) \\delta \\left(\\boldsymbol {\\Phi} - \\boldsymbol {\\Phi} _ {k} (t)\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 281, + 508, + 823, + 540 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $a_{k}(t)$ is the complex gain, $\\tau_{k}(t)$ is the delay (time-of-flight) of path $k$ , $\\Theta_{k}(t)$ is azimuth and elevation angle of departure (AoD), and $\\Phi_k(t)$ is azimuth and elevation angle of arrival (AoA). Going forward, we use $\\phi_{k} = (\\Theta_{k},\\Phi_{k})$ as a shorthand to collectively represent all angles. Intuitively equation 1, represents each path as a dirac function in time-angle space. The task of channel modeling can, therefore, be reduced to predicting channel attributes $(a_{k}(t),\\tau_{k}(t),\\phi_{k}(t))$ for a given environment map, and a transmit and receive location. See Sec. A.1 for a detailed discussion.", + "bbox": [ + 169, + 542, + 825, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Forward Model: render. The general goal of our forward model is to run a wireless ray simulation given a certain configuration of the propagation environment. More specifically, as shown in Figure 1, the model takes three configuration parameters as input: a 3D representation of the environment $F$ and the spatial co-ordinates of the transmitter $x_{\\mathrm{tx}}$ and receiver $x_{\\mathrm{rx}}$ devices. The model predicts the wireless scene as:", + "bbox": [ + 169, + 637, + 823, + 707 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {h}} = \\left\\{\\boldsymbol {u} \\right\\} _ {k = 1} ^ {K} = \\left\\{\\left(a _ {k}, \\tau_ {k}, \\phi_ {k}\\right) \\right\\} _ {k = 1} ^ {K} = \\operatorname {r e n d e r} _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathrm {t x}}, \\boldsymbol {x} _ {\\mathrm {r x}}, \\boldsymbol {F}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 710, + 823, + 729 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where the output is a variably-sized set of $K$ paths. Each path $\\pmb{u}_k$ encodes three channel attributes: gain $a_k$ , time-of-flight $\\tau_k$ and angles $\\phi_k$ . With these predicted channel attributes, we can obtain a time-angle impulse response (the 'channel') to characterize the wireless propagation effects.", + "bbox": [ + 169, + 731, + 823, + 773 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Key Idea: Implicit Representation Network $f_{\\theta}$ . Our approach recursively constructs the channel by using a learnt function $f_{\\theta}: F \\times \\mathbf{u}_{k}^{(r)} \\mapsto \\mathbf{u}_{k}^{(r+1)}$ As shown in Figure 1, given an initial ray $\\mathbf{u}_{k}^{(r=0)}$ , we model the final state as an evaluation of interactions that the ray undergoes with the environment $F$ . Intuitively, $f_{\\theta}$ models the local interaction of any given ray $k$ either in free-space, or in particular when it is incident on an interacting surface. In the latter case of ray-surface interaction, we leverage a co-ordinate MLP to predict the transformation (e.g., attenuation, rotation) to the incident ray.", + "bbox": [ + 169, + 784, + 825, + 872 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Representing Environment $F$ . We primarily focus on indoor propagation environments in this paper, where the environment is a 3D geometric representation. Specifically, we consider the environment represented as a 3D mesh composed of $F$ faces and $V$ vertices, where each face corresponds", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c66cddf7981830725b69b76efc859e0363ee0a2813c296272c06d0f3e7d2b001.jpg", + "image_caption": [ + "Figure 2: Renderer: Ray Marching Steps. At each step $r$ of the simulation, we learn the transformation introduced on a ray $\\boldsymbol{u}_k^{(r)}$ e.g., reflection off a particular surface. The final transformation is a result of learnt (green blocks) and non-learnable (blue blocks) evaluations." + ], + "image_footnote": [], + "bbox": [ + 174, + 99, + 823, + 220 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "to some surface on a wall. We consider a mesh structure with two subtleties: (a) we represent walls as a flattened polygon and thereby do not explicitly consider its thickness; and (b) we do not encode materials of the corresponding wall faces, but rather learn the properties implicitly from data.", + "bbox": [ + 169, + 290, + 823, + 333 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 OVERVIEW: NEURAL-renderING", + "text_level": 1, + "bbox": [ + 171, + 352, + 452, + 366 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we present an overview of the three steps in our approach (as shown in Fig. 1).", + "bbox": [ + 169, + 378, + 790, + 395 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Ray Launching. We begin by shooting out a fixed set of $K$ rays from the transmitter location $\\pmb{x}_k^{(r = 0)}\\coloneqq \\pmb{x}_{\\mathrm{tx}}(\\forall k)$ . We launch the rays omni-directionally from the transmitter co-ordinate, agnostic to the environment and location of the receiver location. Direction $\\pmb{d}_k^{(r = 0)}$ of each ray is oriented in the direction of a unique vertex of a ico-sphere centered at $\\pmb{x}_{\\mathrm{tx}}$ . We use the number of sub-divisions of the ico-sphere to trade-off between computational complexity and accuracy.", + "bbox": [ + 169, + 404, + 823, + 482 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Ray Marching. The crux of our approach involves 'marching' the ray and accounting for interactions (e.g., transmission) with various surfaces of the environment. A key aspect here is using a neural network to make local evaluations: mapping an incident ray with some direction and power to an updated outgoing attenuated ray. The neural network is hence tasked to learn a complex nonlinear characterization of the surface materials at a spatial co-ordinate. We further elaborate on the ray marching procedure in the next section.", + "bbox": [ + 169, + 492, + 823, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Ray Aggregation and Reception. Of the $K$ rays launched from the ray launching step, we are now interested in the subset of the rays that impinges on the receiver. We model the reception sphere with a specific radius, which can be tuned to achieve a desired level of precision. To mitigate double-counting of received rays, we filter rays by associating them with a unique interaction path.", + "bbox": [ + 169, + 585, + 823, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 RAY MARCHING", + "text_level": 1, + "bbox": [ + 171, + 662, + 331, + 676 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We now dive deeper into the ray marching step, which tracks the evaluation of each ray as it propagates in the environment and hits various surfaces. We walk through the steps as shown sequentially in Fig. 2. We begin with a set of geometric rays $\\boldsymbol{u}_k^{(r = 0)}$ , originating at the transmitter co-ordinate $\\boldsymbol{x}_{\\mathrm{tx}}$ . In addition to the channel attributes of each ray (see Eq. 2), we also consider in this section an additional set of meta-attributes (e.g., origin $\\boldsymbol{x}_k$ , direction $\\boldsymbol{d}_k$ ) that helps us with the ray marching steps (elaborated in Sec. A.2).", + "bbox": [ + 169, + 689, + 823, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Ray-Environment Intersections. For each ray, we evaluate its first interaction with the environment (e.g., first wall it hits). Representing the ray geometry as $\\pmb{p}(t) = \\pmb{x}_k^{(r)} + t\\pmb{d}_k^{(r)}$ , we are primarily interested in a solution $t > 0$ for which the ray is incident on some surface. This location helps us determine the relay (i.e., new origin) $\\pmb{x}_k^{(r+1)}$ for the subsequent step.", + "bbox": [ + 169, + 787, + 823, + 851 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Ray-Surface Interaction. While the previous step solves for where the ray is incident in the environment, a crucial next step is determining attributes of the outgoing ray as a result of this interaction. We specifically focus on determining two attributes in this step: the new direction $\\pmb{d}_k^{(r+1)}$ and gain $a_k^{(r+1)}$ . Popular non-neural simulators, such as Remcom (2022), look-up frequency-", + "bbox": [ + 169, + 859, + 823, + 926 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "dependent material properties (e.g., conductivity, permittivity) at the incidence point from standard databases (ITU-R P.2040-2) to calculate the attributes of the outgoing ray. However, it is unclear how to calculate the attributes with imprecise knowledge of the surfaces (e.g., unknown thickness and material types of each layer in a wall) or when the material properties of a layer have not been previously empirically analyzed. Our solution is to instead predict the attributes using learnt network as a function of the incident location $\\pmb{x}_k^{(r + 1)}$ and direction $\\pmb{d}_k^{(r)}$ (see $f_{\\theta}^{1}$ in Fig. 2). The ray-surface interaction network $f_{\\theta}^{1}$ used in our experiments is a ReLU MLP with 3 layers (with 64-hidden units). Similar to NeRF (Mildenhall et al., 2020), we split the network into learning incident direction-independent and dependent features by concatenating direction $\\pmb{d}_k^{(r)}$ with bottlenecked outputs of the penultimate layer in the network (See Sec. A.3 fore more details). The network predicts an attenuation factor $s$ and a rotation matrix $\\pmb{A}$ (4-dim Euler-Rodrigues parameterization), which is then used to determine the updated gain $(a_k^{(r + 1)} = sa_k^{(r)})$ and direction $(\\pmb{d}_k^{(r + 1)} = \\pmb{A}\\pmb{d}_k^{(r)})$ .", + "bbox": [ + 169, + 103, + 826, + 287 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reception/Termination check. For some special cases, we halt ray marching for a subset of rays. Namely, when ray $k$ impinges on a reception sphere of a pre-specified radius (30cm in our experiments). This prevents a future version of the already received ray being potentially incorrectly received at a future iteration. In addition, for computation reasons, we also terminate ray marching if the ray exits the region of interest (e.g., ray exiting the environment).", + "bbox": [ + 169, + 292, + 826, + 364 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Free-space interaction. While the previous steps modeled the interaction of material properties of the environment on wireless propagation, we now switch focus to free-space. In this case, we model propagation of a ray using the empirically-adjusted Friis' Equation: $P_r(d) = P_t G\\left(\\frac{d_0}{d}\\right)^{\\lambda}$ ( $d \\geq d_0$ ) which represents the power at the received at the receive antenna $P_r$ as a function of the power fed into transmitting antenna $P_t$ , and the distance travelled by the ray $d$ . We learn the remaining scalar parameters $G$ (antenna gain constant), $\\lambda$ (attenuation factor), and $d_0$ (reference distance).", + "bbox": [ + 169, + 372, + 826, + 464 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 TRAINING", + "text_level": 1, + "bbox": [ + 171, + 493, + 287, + 507 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Over the previous sections, we walked through our approach on predicting a channel $\\hat{h} = \\mathrm{render}_{\\theta}(\\pmb{x}_{\\mathrm{tx}},\\pmb{x}_{\\mathrm{rx}},\\pmb{F})$ . We train the model in a supervised setting, with ground-truth time-angle impulse response measurements. Importantly, we rely only on final measurements (i.e., at $r = R$ ) for training and do not use any intermediate information (e.g., interaction data through a ray tracer).", + "bbox": [ + 169, + 523, + 825, + 583 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Set-based Channel Loss. We compare two sets of multi-path channels: predictions $\\hat{\\pmb{h}} = \\{\\hat{\\pmb{u}}_k\\}_{k=1}^K$ and ground-truth $\\pmb{h} = \\{\\pmb{u}_l\\}_{l=1}^L$ to provide a supervisory signal for training. We evaluate the set-based loss as: $\\mathcal{L}_{\\mathrm{chan}}(\\pmb{h},\\hat{\\pmb{h}}) = \\sum_l d(\\pmb{u}_l,\\hat{\\pmb{u}}_{\\Pi(l)})$ , which has two key ideas: (a) correspondence $\\Pi$ : we associate each ground-truth path $\\pmb{u}_l$ with a predicted path $\\hat{\\pmb{u}}_k = \\Pi(l)$ . To perform such an association, we use direction-of-departure information and thereby pair paths launched in approximately the same direction; and (b) inter-path distance $d(\\pmb{u}_l,\\hat{\\pmb{u}}_k)$ : to compare two paths, we use mean square error for scalar-valued attributes (e.g., time-of-flights) and cosine distances between angular-attributes (e.g., direction of arrival). For the latter, we represent angles as unit vectors in cartesian coordinates.", + "bbox": [ + 169, + 592, + 823, + 712 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training and Implementation Details. We train our approach for 100 epochs using Adam optimizer with a learning rate of $10^{-3}$ . We found it crucial to not aggregate rays (Sec. 3.1) in the training steps, as it led to vanishing gradients due to negligible number of rays that contributed towards gradient updates. We model the reception sphere as a fixed-sized sphere of radius $30\\mathrm{cm}$ . Additional implementation details are provided in Sec. C.4.", + "bbox": [ + 169, + 720, + 823, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 827, + 419, + 842 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we discuss experimental analysis of our neural simulator approach. We begin by discussing the preliminaries: the choice of datasets and the evaluation metrics to compare simulations. The section concludes by discussing overall performances and highlights certain benefits of neural simulations, such as running controllable simulations outside of training conditions.", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 EXPERIMENTAL SETUP: DATSETS, EVALUATION METRICS, AND BASELINES", + "text_level": 1, + "bbox": [ + 169, + 103, + 751, + 118 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We train and evaluate our algorithm using ground-truth data from wireless ray tracing packages. We collect two datasets, where each dataset contains channel measurements (i.e., gains, time-of-flight, angles) for different distributions of environments (e.g., floor layout). We keep the wireless configuration fixed to using omni-directional antennas at both the transmitter and receiver operating at a 3.5GHz carrier frequency. Now we further elaborate on the datasets used in our experiments.", + "bbox": [ + 169, + 131, + 823, + 202 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dataset 1: WI3ROOMS. We create a synthetic dataset which gives us greater control over many aspects over the generation process. Using a $10\\mathrm{m} \\times 5\\mathrm{m} \\times 3\\mathrm{m}$ hull, we randomly synthesize interior brick walls such that the eventual configuration consists of three rooms inter-connected with 1m doorways. We import the environment into an open-source wireless propagation toolbox (Amiot et al., 2013) and collect 41.6K channels, of which $\\sim 37\\%$ of measurements are used for training.", + "bbox": [ + 169, + 210, + 826, + 284 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dataset 2: WIINDOOR. We use the indoor floorplans from the RPLAN dataset (Wu et al., 2019), which is popularly used to model indoor scenes (Nauata et al., 2020; 2021; Para et al., 2021). These layouts represent real-world single floor houses, with 4-8 rooms and $65 - 120\\mathrm{m}^2$ areas. Each floorplan is further accompanied with room semantics such as whether a certain area is a living room, bed room, bathroom, etc. We use these semantics to selectively sample transmit/receiver locations (e.g., locations are not outside the boundary) and to determine wall materials (e.g., external facing walls are bricks, where as internal facing walls are dry plaster walls). We use a commercial ray tracer Remcom 'Wireless Inside' (Remcom, 2022) with ray tracer X3D to collect measurements in the RPLAN environment. Similar to the earlier dataset, we collect 42.5K measurements, of which $\\sim 36\\%$ are used to train the model.", + "bbox": [ + 169, + 291, + 826, + 431 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Train and Test Regimes. For the training dataset, we collect measurements by sampling transmitter ('Tx') from $\\sim 10$ locations (XY plane at an elevation of $2.8\\mathrm{m}$ ) and similarly, receiver ('Rx') from $60\\times 30$ locations (but with elevation of $2\\mathrm{m}$ ). We then create three challenging test sets (see Fig. A2 for an illustration) with novel Tx-Rx locations: (a) Checkerboard: where train and test Rx locations form a checkerboard pattern on the same XY plane at $2\\mathrm{m}$ elevation; (b) Generalization- $z$ : where we move the test Rx locations in (a) to a novel elevation ( $z = 1.0\\mathrm{m}$ for ThreeRooms and $z = 2.5\\mathrm{m}$ for RPLAN); and (c) Generalization-diag: where we sample test Rx locations on a diagonal XYZ plane. Such regimes let us validate the generalization performance under distribution shifts.", + "bbox": [ + 169, + 440, + 826, + 554 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Metrics. We consider three evaluation metrics to evaluate our approach: (i) Overall prediction error ('Overall'): We follow a similar formulation as our loss (Sec. 3.3) with one key difference - we find correspondences $\\Pi$ by solving a linear-sum assignment problem. The eventual error aggregates all attributes relevant for the path (e.g., gain, angles). Intuitively, this measures the distance between two sets (sets of multi-dim paths in our case), using a similar metric common in set prediction tasks (Fan et al., 2017; Zhang et al., 2019). (ii) Geometry prediction error ('Geometry'): We follow a formulation similar to (i), but now focus on two specific features that captures the geometrical accuracy of the path - time-of-flight and angles at departure and arrival. Intuitively, this metric measures whether the predicted rays take the same GT route between the transmit and receive co-ordinates. (iii) Average Delay Time - MAE ('AvgDelay'): We average the time-of-flights $\\tau_{k}$ per path of the channel, weighted by its linear power $p(a_{k})$ . We report the mean absolute error of average delays between the predicted and ground-truth channel attributes.", + "bbox": [ + 169, + 561, + 826, + 731 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We propose two reference baselines (i) $k$ -NN (with $k = 1$ ): which predicts the channel, given the closest match to the input spatial co-ordinates in terms of Euclidean distance (ii) MLP: A geometry-oblivious MLP regressor with 3-hidden layers, each with 128 units. We train the MLP using the same loss as WiNeRT. Additional details of the baselines are provided in Sec. C.4.", + "bbox": [ + 169, + 739, + 826, + 799 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 OVERALL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 816, + 352, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we present the overall qualitative and quantitative results of our approach. We complement the overall performances with additional analysis in the next section.", + "bbox": [ + 169, + 842, + 823, + 872 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative Results. We report the quantitative results for the two datasets (column groups) and three test sets (row groups) in Table 1. We observe from the table: (a) by focusing on the overall errors, we find WiNeRT generally outperforms all baselines, with a significant average decrease of", + "bbox": [ + 169, + 881, + 826, + 926 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 960 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/70f353f5c6ec26985c2cb2ad9ada986cd5a41158c5e40f92fa5150e8cd879689.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
WI3ROOMSWIINDOOR
OverallGeometryAvgDelayOverallGeometryAvgDelay
checkerboardkNN0.2320.2122.2380.4120.3962.484
MLP0.2870.3302.0510.3730.3991.745
WiNeRT0.2020.0872.0290.2370.2071.546
gen-zkNN0.2530.2262.0330.4240.4282.487
MLP0.2970.3501.7970.3880.4211.969
WiNeRT0.2170.0841.5220.2850.2501.839
gen-diagkNN0.2520.2132.1180.3800.2511.377
MLP0.3120.3221.8890.3900.3151.513
WiNeRT0.2290.0851.7920.3690.1700.828
", + "bbox": [ + 171, + 99, + 839, + 289 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1: Quantitative Results. Comparing errors of our approach (WiNeRT) with baselines, over two datasets (column groups) and three test regimes (row groups). Lower values are better and the lowest errors are in bold.", + "bbox": [ + 171, + 297, + 823, + 325 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/82c9e812e95d9f9b9175609d86e7873e3d6b9ba3e2a43b8d65695e54dc393076.jpg", + "image_caption": [ + "Figure 3: Receive Powers. By fixing the transmit location $(x_{\\mathrm{tx}}$ , red cross), we measure the receive power (color at each point; in dB) predicted at each location in W13ROOMS dataset. kNN and MLP suffer from memorization and falsely predict highest receive powers around phantom transmit locations (purple star)." + ], + "image_footnote": [], + "bbox": [ + 176, + 334, + 823, + 398 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "-0.071 points compared to kNN and -0.085 with MLP; (b) WiNeRT is especially strong in capturing the geometry (e.g., 59-63% drop in errors w.r.t second best on WI3ROOMS) of the environment, which can be likely attributed to a strong inductive bias enforced by decoupling global rendering from local evaluations; (c) Although WiNeRT has reasonable performance in capturing the average delays, the performance gap here (e.g., 1-15% reduction in errors on WI3ROOMS) is not especially large compared to other metrics. We attribute this to contributions from 'false positive' rays with non-negligible power arising from our dense ray-launching technique. (d) The contributions of false positives can be mitigated by using a more sophisticated ray launching technique. For instance, by piggybacking on ray launch directions from GT channels, we can significantly improve performances across all metrics e.g., from 1-15% error reduction to 15-20% reduction in average delays on WI3ROOMS; (e) Overall, we attribute the underperformance of the baselines to poor generalization performance. For instance, in Figure 3, we illustrate the receive powers (in dB) predicted by all approaches in WI3ROOMS, for some placement of the transmitter (red cross in top-right room). We observe in this particular case that the high-power areas in the kNN and MLP baselines are predicted for a false phantom location (purple star), which roughly corresponds to a transmitter location in training set. This contrasts predictions by WiNeRT where the high-power areas are correctly concentrated around the transmitter location. As a result, we find that simple baselines find it challenging to generalize to new unseen spatial co-ordinates at inference time.", + "bbox": [ + 169, + 457, + 826, + 708 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative Results. We complement the previous quantitative discussions with observations drawn from qualitative analysis. WiNeRT particularly helps for this analysis, as we can recover intermediate ray-environment interaction information. From qualitative examples shown in Fig. 4(a, b), we draw some observations: (a) WiNeRT surprisingly learns ray-surface interactions implicitly, without any direct supervision. For instance, we observe multiple reflected paths between Tx and Rx; (b) we also find that our predictions (red rays) are generally consistent with the underlying simulation process (green rays) e.g., reflections from adjacent walls, floor and ceiling; and (c) we notice WiNeRT sometimes predicts false positives (e.g., above $x_{\\mathrm{tx}}$ in Fig. 4b), which we attribute to dense omni-directional ray launching.", + "bbox": [ + 169, + 717, + 823, + 844 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 867, + 287, + 881 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the previous section, we evaluated the overall performance of WiNeRT and found promising results. Now, we take a closer look at our approach and investigate generalization benefits.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c91a7eac1d49b21c068572f1193df77bdfc95a1dade31e322c41275a600dfd7c.jpg", + "image_caption": [ + "(a) W13ROOMS" + ], + "image_footnote": [], + "bbox": [ + 174, + 99, + 382, + 219 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/731e290a7acccc58811301e225652e98bf744655a7ca5d1725917766c3781cc0.jpg", + "image_caption": [ + "(b) WIINDOOR" + ], + "image_footnote": [], + "bbox": [ + 395, + 99, + 602, + 218 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6b0ea87aa2f04037c201089d97969dcb9a3368cc59650405f96d911f1ce450bd.jpg", + "image_caption": [ + "(c) WI3ROOMS (novel $F$ )" + ], + "image_footnote": [], + "bbox": [ + 614, + 101, + 823, + 218 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1651dd31070aa5e21b8d79dc8134174fb51eab246b7281438d0222fabfd0416e.jpg", + "image_caption": [ + "Figure 4: Qualitative results. (a, b) Evaluation on WiNeRT on the environment seen during training. (c) We use the previously trained model and re-render on a re-configured floormap $\\pmb{F}$ .", + "(a) Ray-surface interactions" + ], + "image_footnote": [], + "bbox": [ + 173, + 280, + 364, + 388 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/824fe270b736efbd9bf1c9a551cb2d8cef483220d5e298f254791c2b25fc139a.jpg", + "image_caption": [ + "(b) Attenuation: Reflection" + ], + "image_footnote": [], + "bbox": [ + 395, + 280, + 586, + 388 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9433920fbba857bd026cb3caba22ad5b78a61493f516bd5b498209881b49808f.jpg", + "image_caption": [ + "(c) Attenuation: Transmission", + "Figure 5: Evaluating Ray-surface interaction MLP. We display a cut-out of the 3ROOMS represented as a wireframe, with a specific focus on a particular wall. (a) We find a train-test distribution shift of ray-surface incidence points (b, c) Evaluation of the MLP at various incidence points." + ], + "image_footnote": [], + "bbox": [ + 609, + 281, + 823, + 390 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "What does the ray-surface interaction learn? We begin by investigating the ray-surface network $(f_{\\theta}^{1}$ in Fig. 2) in isolation. The network is tasked to map an incident ray (gain $a_{\\mathrm{in}}$ , direction $d_{\\mathrm{in}}$ ) to an outgoing ray $(a_{\\mathrm{out}}, d_{\\mathrm{out}})$ . To accurately make this prediction, the network needs to learn direction- and material-dependent properties at the incident location $x_{\\mathrm{inc}}$ , which poses two challenges. First, the network does not have explicit supervision to learn these properties. Rather, the network needs to implicitly learn these properties by optimizing over a number of channel measurements. Second, specific to our case, the measurements collected involve sparse ray-surface interactions i.e., in practise we cannot expect for paths in the training measurements to interact densely with all possible surfaces. For instance, consider Fig. 5a, which show the incident points $x_{\\mathrm{inc}}$ for a particular wall (black edges) that we recover from the underlying ray tracing tool. Here, we observe that the implicit training set interactions (red markers; never used during our training) are localized to a $\\sim 50\\mathrm{cm}$ band ( $15\\%$ area of the wall). However, at test-time, the network is tasked to generalize to interactions for a different distribution of incidence points (purple markers). In spite of the challenges we find the ray-surface network associates meaningful information to surface co-ordinates. For instance, we show the attenuation factor predicted for the reflected (Fig. 5b) and transmitted co-ordinates (Fig. 5c) for rays arriving from a fixed $x_{\\mathrm{tx}}$ co-ordinate (placed at $x = 8\\mathrm{m}$ ). We find that the network learns a smooth material- and direction-dependent function over the surface. Over the next experiments, we exploit these locally learnt properties and evaluate WiNeRT rendering in novel scenarios.", + "bbox": [ + 169, + 470, + 826, + 723 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Controllable synthesis: Predicting in Novel Environment Configurations. The previous experiments focused on evaluating approaches for novel locations of transmit and receive co-ordinates at simulation time. Now, we consider novel test-time environments by simulating approaches on re-configured layouts $\\pmb{F}^{\\prime}$ of the train-time environment $\\pmb{F}$ , such as by randomly editing placement of interior walls. Overall, we find that WiNeRT remarkably extrapolates to the reconfigured environment, with the overall error unchanged with WiNeRT (0.202 on $\\pmb{F}$ vs. 0.203 on $\\pmb{F}^{\\prime}$ ; more results in Table A2). Furthermore, by observing the results qualitatively in Figure 4c, we find the predicted interactions remain consistent with the ground-truth simulated rays in novel environment configurations. This is particularly appealing as for simulation use-cases which require modelling dynamic objects (e.g., moving vehicle), as WiNeRT allows re-configuring environment without retraining.", + "bbox": [ + 169, + 732, + 823, + 872 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Controllable synthesis: Simulating Higher-order Interactions. In this experiment, we evaluate the ability of approaches to generalize to different numbers of interactions (denoted by $r$ in Sec. 3) at inference time. With WiNeRT, we have the ability to control the number of interactions at", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "test-time (i.e., by unrolling $f_{\\theta}$ for fewer or more steps). We briefly summarize our observations here (see Table A4 for more details). WiNeRT exhibits promising results: while the baselines struggle with a simpler task of lower-order interactions (e.g., 0.22-0.58 overall errors at $r = 0$ ), WiNeRT's performance improves (from 0.20 to 0.12). A better performance is natural in this particular setting, since the model is required to perform an easier task than original (predicting only line-of-sight component). For higher-order interactions, we observe performances of all approaches degrades, but WiNeRT outperforms the baselines. In particular, even at $r = 3$ , we find the geometric-errors of WiNeRT (0.27) comparable to baselines in their originally trained setting ( $r = 1$ , 0.21-0.33 errors).", + "bbox": [ + 169, + 103, + 826, + 217 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "How fast are the simulations? We investigate the wall-clock simulation times of WiNeRT and baselines and compare them with wireless ray tracers. In the specific case of WiNeRT, we have some control over the time-accuracy trade-offs at test-time by varying the density of initial rays launched (see Sec. 3.1). Overall, we find that WiNeRT demonstrates speed-ups of $11 - 22 \\times$ over PyLayers and $6 - 22 \\times$ over Wireless Inside. While the baselines are even faster ( $538 - 687 \\times$ with MLP and $79 - 97 \\times$ with kNN), it is achieved at the price of higher errors and poor generalization capabilities (Sec. 4.2). Overall, we find WiNeRT presents reasonable time-accuracy trade-offs compared to baselines. See Sec. C.2 for additional details.", + "bbox": [ + 169, + 224, + 826, + 338 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Exploiting differentiability: User Localization via inverse (differentiable) rendering. Over the previous sections we focused on forward simulations. Now, we study a proof-of-concept for leveraging our differentiable simulator for inverse problems, such as for user localization: determining user location $\\boldsymbol{x}_{\\mathrm{rx}}$ from an observed channel $h_{\\mathrm{obs}}$ . We solve for $\\boldsymbol{x}_{\\mathrm{rx}}$ , by performing gradient on spatial coordinate $\\boldsymbol{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}$ that minimizes the channel loss $\\text{render}_{\\theta}(\\boldsymbol{x}_{\\mathrm{tx}}, \\boldsymbol{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}, \\boldsymbol{F}_i)$ . This is possible with WiNeRT, since we can backpropagate through the neural simulation of the channel. We evaluate over 100 test examples and find encouraging results, with a median error of 0.58m in WI3ROOMS (a $150\\mathrm{m}^3$ volume) and 1.21m in WIINDOOR (a $300\\mathrm{m}^3$ volume). See Sec. C.4 for more details.", + "bbox": [ + 169, + 347, + 826, + 460 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION, LIMITATIONS, AND BROADER IMPACT", + "text_level": 1, + "bbox": [ + 171, + 481, + 648, + 497 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we proposed the first neural forward model for wireless ray tracing-based simulations. Such models are particularly appealing as they help alleviate some drawbacks of classical non-neural simulators (e.g., better handling model-measurement mismatches, non-differentiability). Towards this goal, we proposed WiNeRT which tasks an MLP to learn how surfaces in a 3D environment influence propagation of wireless rays, such as by predicting attenuation factor of a reflective component. Overall, we find promising results indicating neural simulators closely capture propagation effects. As neural simulators are additionally differentiable, we further show that they can be used to optimize inverse problems such as user localization.", + "bbox": [ + 169, + 513, + 826, + 626 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Limitations and Future Work. This paper presents the first step towards realizing a neural surrogate for simulating propagation of wireless rays. While we find promising results – in terms of empirically mimicking the simulator's performance while simultaneously reducing complexity – many important steps remain to realize our over-arching goal of differentiable wireless ray tracing. Our approach is designed to capture linear effects of the channel in line with standards (3GPP TR 38.901; ITU-R P.2040-2) and extending to non-linear effects (e.g., amplifier saturations) remains an open-problem. Additionally, while our focus is primarily reflection and transmission properties of ray-surface interactions (capturing majority of receive power) which are increasingly relevant for high-frequency transmissions, other properties (e.g., scattering, diffraction) require investigation to model simulations across a wider radio-frequency spectrum. Finally, our surrogate's performance is currently upper-bounded by the underlying simulator's performance, motivating studies into learnt calibration of the surrogate model with real-world measurement data to bypass simulation accuracy.", + "bbox": [ + 169, + 635, + 826, + 803 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Broader Technical Impact. Although our paper focuses on neural simulation of EM waves in the radio-frequency spectrum (0.5-100 GHz), we believe working towards this goal complements research in non-radio modalities as well. For instance, to model propagation of acoustic signals in spatial environments, estimating material-dependent ray-surface interactive properties remains a challenging problem and the proposed research direction potentially complements existing techniques. More generally, we believe that as radio signals require modelling both ray (e.g., reflection) and physical optic (e.g., interference, diffraction) properties, advances here are intertwined with many modalities across the EM spectrum (e.g., audio, visual).", + "bbox": [ + 169, + 811, + 826, + 926 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REPRODUCIBILITY STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 104, + 393, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To ensure reproducibility, we take a number of steps. On the dataset side, we use either publicly available indoor layouts (e.g., RPLAN) or synthetically generate layouts with known random seeds (0 and 10 in our case). We further elaborate on the simulation settings to recreate our dataset in Section 4.1 and Section B. We plan to release the simulated data measurements. On the implementation side, we provide specific training details in Section C.4 and further elaborate on the detailed architecture in Section A.3.", + "bbox": [ + 169, + 128, + 826, + 213 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ETHICS STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 231, + 318, + 244 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The data used in our paper corresponds to simulated data of physical processes (EM wave propagation). Since this does not involve any human subjects or personally identifiable information, we believe there is no conflict in this regard.", + "bbox": [ + 169, + 257, + 823, + 301 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 171, + 316, + 328, + 330 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We thank Hanno Ackermann for discussions and feedback on the paper. We additionally thank numerous colleagues for insightful discussions: Thomas Hehn, Fabio Valerio Massoli, Maziar Raissi, Afshin Abdi, June Namgoong, Taesang Yoo, and Akash Doshi.", + "bbox": [ + 169, + 343, + 823, + 387 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 406, + 285, + 421 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "3GPP TR 38.901. Study on channel model for frequencies from 0.5 to 100 ghz. Standard, 3GPP, Valbonne, FR, March 2022.", + "Nicolas Amiot, Mohamed Laaraiedh, and Bernard Uguen. Pylayers: An open source dynamic simulator for indoor propagation and localization. In ICC, 2013.", + "Franck Djeumou, Cyrus Neary, Eric Goubault, Sylvie Putot, and Ufuk Topcu. Neural networks with physics-informed architectures and constraints for dynamical systems modeling. In Learning for Dynamics and Control Conference. PMLR, 2022.", + "Sebastian Dorner, Marcus Henninger, Sebastian Cammerer, and Stephan ten Brink. Wgan-based autoencoder training over-the-air. In IEEE International Workshop on Signal Processing Advances in Wireless Communications, 2020.", + "Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. SIGGRAPH, 1988.", + "Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In CVPR, 2017.", + "Rizal Fathony, Anit Kumar Sahu, Devin Willmott, and J Zico Kolter. Multiplicative filter networks. In ICLR, 2020.", + "Andrew S. Glassner. An introduction to ray tracing. Morgan Kaufmann, 1989.", + "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014.", + "Fumio Ikegami, Tsutomu Takeuchi, and Susumu Yoshida. Theoretical prediction of mean field strength for urban mobile radio. IEEE Transactions on Antennas and Propagation, 39(3):299-302, 1991.", + "ITU-R P.2040-2. Effects of building materials and structures on radiowave propagation above about $100\\mathrm{mhz}$ . Standard, International Telecommunication Union, Geneva, CH, September 2021.", + "William C. Jakes and Donald C. Cox. Microwave mobile communications. Wiley-IEEE press, 1994.", + "George Em Karniadakis, Ioannis G. Kevrekidis, Lu Lu, Paris Perdikaris, Sifan Wang, and Liu Yang. Physics-informed machine learning. Nature Reviews Physics, 3(6):422-440, June 2021." + ], + "bbox": [ + 173, + 429, + 825, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.", + "William C. Y. Lee. Mobile communications engineering. McGraw-Hill, 1982. ISBN 978-0-07-037039-5.", + "Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In ICCV, 2019.", + "J.W. McKown and R.L. Hamilton. Ray tracing as a design tool for radio networks. IEEE Network, 5(6):27-30, November 1991.", + "Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020.", + "Nelson Nauata, Kai-Hung Chang, Chin-Yi Cheng, Greg Mori, and Yasutaka Furukawa. Housegan: Relational generative adversarial networks for graph-constrained house layout generation. In ECCV, 2020.", + "Nelson Nauata, Sepidehsadat Hosseini, Kai-Hung Chang, Hang Chu, Chin-Yi Cheng, and Yasutaka Furukawa. House-gan++: Generative adversarial layout refinement network towards intelligent computational agent for professional architects. In CVPR, 2021.", + "Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, 2021.", + "Tribhuvanesh Orekondy, Arash Behboodi, and Joseph B Soriaga. Mimo-gan: Generative mimo channel modeling. In IEEE ICC, 2022.", + "Timothy J O'Shea, Tamoghna Roy, and Nathan West. Approximating the void: Learning stochastic channel models from observation with variational generative adversarial networks. In ICNC, 2019.", + "Wamiq Para, Paul Guerrero, Tom Kelly, Leonidas J Guibas, and Peter Wonka. Generative layout modeling using constraint graphs. In CVPR, 2021.", + "Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In CVPR, 2019.", + "Tobias Pfaff, Meire Fortunato, Alvaro Sanchez-Gonzalez, and Peter W Battaglia. Learning mesh-based simulation with graph networks. arXiv preprint arXiv:2010.03409, 2020.", + "Maziar Raissi, Paris Perdikaris, and George Em Karniadakis. Physics informed deep learning (part i): Data-driven solutions of nonlinear partial differential equations. arXiv preprint arXiv:1711.10561, 2017.", + "Theodore S. Rappaport. Wireless communications: principles and practice, volume 2. prentice hall PTR New Jersey, 1996.", + "Theodore S Rappaport, Kate A Remley, Camillo Gentile, Andreas F Molisch, and Alenka Zajic. Radio Propagation Measurements and Channel Modeling. Cambridge University Press, 2022.", + "Remcom. Wireless insite, 2022. URL https://www.remcom.com/ wireless-insite-em-propagation-software.", + "Mathew K. Samimi and Theodore S. Rappaport. 3-D millimeter-wave statistical channel model for 5G wireless system design. IEEE Transactions on Microwave Theory and Techniques, 64(7): 2207-2225, 2016.", + "Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia. Learning to simulate complex physics with graph networks. In ICML, 2020.", + "Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. NeurIPS, 2020." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Sergey Shirobokov, Vladislav Belavin, Michael Kagan, Andrei Ustyuzhanin, and Atilim Gunes Baydin. Black-box optimization with local generative surrogates. In NeurIPS, 2020.", + "Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. NeurIPS, 2019.", + "Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. NeurIPS, 2020.", + "Xingyuan Sun, Tianju Xue, Szymon Rusinkiewicz, and Ryan P Adams. Amortized synthesis of constrained configurations using a differentiable surrogate. NeurIPS, 2021.", + "Matthew Tancik, Pratul Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020.", + "David Tse and Pramod Viswanath. Fundamentals of wireless communication. Cambridge university press, 2005.", + "Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T Barron, and Pratul P Srinivasan. Ref-nerf: Structured view-dependent appearance for neural radiance fields. In CVPR, 2022.", + "Joram Walfisch and Henry L. Bertoni. A theoretical model of UHF propagation in urban environments. IEEE Transactions on antennas and propagation, 36(12):1788-1796, 1988.", + "Wenming Wu, Xiao-Ming Fu, Rui Tang, Yuhan Wang, Yu-Hao Qi, and Ligang Liu. Data-driven interior plan generation for residential buildings. ACM Transactions on Graphics (TOG), 38(6): 1-12, 2019.", + "William Xia, Sundeep Rangan, Marco Mezzavilla, Angel Lozano, Giovanni Geraci, Vasilii Semkin, and Giuseppe Loianno. Millimeter wave channel modeling via generative neural networks. In 2020 IEEE Globecom Workshops, 2020.", + "Yang Yang, Yang Li, Wuxiong Zhang, Fei Qin, Pengcheng Zhu, and Cheng-Xiang Wang. Generative-adversarial-network-based wireless channel modeling: Challenges and opportunities. IEEE Communications Magazine, 2019.", + "Hao Ye, Geoffrey Ye Li, Biing-Hwang Fred Juang, and Kathiravetpillai Sivanesan. Channel agnostic end-to-end learning based communication systems with conditional gan. In IEEE Globecom Workshops, 2018.", + "Hao Ye, Le Liang, Geoffrey Ye Li, and Biing-Hwang Juang. Deep learning-based end-to-end wireless communication systems with conditional gans as unknown channels. IEEE Transactions on Wireless Communications, 2020.", + "Yan Zhang, Jonathon Hare, and Adam Prugel-Bennett. Deep set prediction networks. NeurIPS, 2019." + ], + "bbox": [ + 171, + 102, + 825, + 719 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 171, + 99, + 295, + 125 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A APPROACH", + "text_level": 1, + "bbox": [ + 171, + 155, + 305, + 170 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.1 BUILDING CHANNEL MODELS", + "text_level": 1, + "bbox": [ + 171, + 186, + 431, + 200 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This section accompanies the text in Section 3.", + "bbox": [ + 171, + 212, + 482, + 227 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Channel models are defined either in a statistical way by defining a distribution over channel attributes or in deterministic way using ray tracing. Statistical channel models are inadequate for applications involving positioning, sensing and challenges of communication at higher frequencies (e.g., mmWave at 30-300 GHz (Rappaport et al., 2022)). Inspired by similar techniques in computer graphics (Glassner, 1989), traditional ray tracing approaches (see for example (McKown & Hamilton, 1991; Ikegami et al., 1991; Walfisch & Bertoni, 1988)) approximate propagation of electromagnetic waves by modeling interactions of each ray with objects in its paths. These interactions include for example reflection, diffraction and penetration. Although this is more efficient than solving Maxwell equations, ray tracing methods need a detailed knowledge of the environment and are generally slow for prototyping. They generally utilize hard coded and mathematically tractable models for example knife-edge model for diffraction (Lee, 1982; Rappaport, 1996). These abstractions suffer from mismatches and require occasional tedious fine-tuning and calibration with real data. Improving these models while remaining tractable for rapid simulation rounds is not straightforward. Finally, they are non-differentiable and cannot be integrated into a closed loop design pipeline. We plan to tackle these issues by building a neural surrogate of a physics-based wireless ray tracer in this paper.", + "bbox": [ + 169, + 233, + 826, + 455 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.2 REPRESENTING RAY ATTRIBUTES", + "text_level": 1, + "bbox": [ + 171, + 472, + 455, + 486 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We represent the $k$ -th ray (among $K$ rays) at the $r$ -th iteration of rendering as $\\pmb{u}_k^{(r)}$ . For notation convenience, we drop the sub- and super-script for the rest of the section. We characterize the wireless ray analogous to the concept of an optical ray (such as with geometric direction, intensity). In addition to the wireless attributes (see Equation 2), we further include meta-level attributes that helps us propagate and render the eventual ray received at the receiver co-ordinate $\\pmb{x}_{\\mathrm{rx}}$ . We briefly describe these attributes here and elaborate on how they are obtained or updated over the next sections. The ray contains the attributes:", + "bbox": [ + 169, + 498, + 823, + 595 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {u} = \\underbrace {\\left( \\begin{array}{c c c} a & \\tau & \\phi \\\\ \\text {(a) C h a n n e l A t t r i b u t e s} \\end{array} \\right)} _ {\\text {(b) R a y G e o m e t r y}} \\underbrace {\\boldsymbol {x} \\quad \\boldsymbol {d} \\quad t _ {s} \\quad t _ {r x} \\quad \\rho_ {\\mathrm {r x}}} _ {\\text {(c) S t a t e}} \\underbrace {\\sigma_ {\\mathrm {u p d}} \\quad \\sigma_ {\\mathrm {r x}} \\quad)} _ {\\text {(d) C h a n n e l A t t r i b u t e s}}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 595, + 715, + 637 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "which as shown can be grouped into three categories: (a) Wireless Channel Attributes. Exactly as discussed earlier in the section (see Equation 2), it contains the attributes to construct the wireless channel time-angle impulse response (Equation 1) (b) Ray Geometry. We additionally include geometrical representation of the ray, which helps us determine how to propagate the ray through the environment. Specifically, we represent the geometry of the ray using the line equation: $\\pmb{p}(t) = \\pmb{x} + t\\pmb{d}$ , where $\\pmb{x}$ is the origin and $\\pmb{d}$ is a unit-vector encoding the ray direction. We are interested in two particular solutions of $t$ in this equation: $t_s$ for which the ray intersects with a surface (mesh face in our case) and $t_{\\mathrm{rx}}$ for which the ray is tangential to a sphere around some receiver of radius $\\rho_{\\mathrm{rx}}$ . (c) Ray state. To help with subsequent updates to the ray at future iterations, we track two binary variables. $\\sigma_{\\mathrm{upd}}$ denotes whether the ray has to be updated in the next iteration. $\\sigma_{\\mathrm{rx}}$ denotes whether the ray has impinged on a reception sphere of a predefined radius.", + "bbox": [ + 169, + 638, + 826, + 791 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.3 RAY MARCHING: DETAILS", + "text_level": 1, + "bbox": [ + 171, + 806, + 406, + 821 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Ray-Environment Intersections. For each ray, we are interested in their first interaction with the environment (e.g., first wall it hits, impinging on the receiver). For this, we are interested in the solutions to the line equation representing the geometry of the ray: $\\pmb{p}(t) = \\pmb{x}_k^{(r)} + t\\pmb{d}_k^{(r)}$ . In particular, we are interested in two solutions of $t$ : (a) Ray-Face intersection. The smallest value of $t > 0$ for which $\\pmb{p}(t)$ lies on a surface (a triangular mesh face in our case). For this, we perform ray-triangle intersections with each face in the environment and find the corresponding solution $t = t_s$ .", + "bbox": [ + 169, + 835, + 826, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/d2f20eee2b3f9b823f2adc8f4a42645796bf6d8ad301be8f20a54c5baf03ff4e.jpg", + "image_caption": [ + "Figure A1: Ray-surface interaction network $f_{\\theta}^{1}$" + ], + "image_footnote": [], + "bbox": [ + 176, + 103, + 823, + 186 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This helps us estimate the new relay location: $\\pmb{x}_k^{(r + 1)} = \\pmb{x}_k^{(r)} + t_s\\pmb{d}_k^{(r)}$ (a) Ray-Rx intersection. In parallel, we are also interested in positive solutions of $t$ for which the ray hits the receiver if it were modeled as a sphere of radius $\\rho_{\\mathrm{rx}}$ . In this case, we obtain the value of $t$ as the projection of $\\pmb{x}_{rx}$ on $\\pmb{p}(t)$ :", + "bbox": [ + 169, + 232, + 826, + 292 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nt _ {r x} = \\max \\left(0, \\left(\\boldsymbol {x} _ {r x} - \\boldsymbol {x} _ {k} ^ {(r)}\\right) \\cdot \\boldsymbol {d} _ {k} ^ {(r)}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 296, + 823, + 316 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\rho_ {r x} = \\left| \\left| \\left(\\boldsymbol {x} _ {r x} - \\boldsymbol {x} _ {k} ^ {(r)}\\right) - t _ {r x} \\boldsymbol {d} _ {k} ^ {(r)} \\right| \\right| \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 318, + 823, + 339 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Consequently, at the end of ray-environment, we analytically estimate the first intersections of the ray with both the environment and (potentially) the receiver.", + "bbox": [ + 169, + 340, + 823, + 369 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Ray-Surface Interaction. If the ray $\\pmb{u}_k^{(r)}$ (originating at $\\pmb{x}_k^{(r)}$ ) and travelling in direction $d_k^{(r)}$ hits a wall at $x_{k}^{(r + 1)}$ (as estimated in the previous step), we are now interested in characterizing the outgoing ray with origin at $x_{k}^{(r + 1)}$ . Specifically, we are interested in estimating the new direction $d_k^{(r + 1)}$ (does the ray penetrate the wall? or reflect?) and the corresponding change in gain that arises (i.e., loss of power, change of phase). This is a complex problem and typically requires in-depth knowledge of the surface (e.g., which material) as well as its specific EM properties (e.g., frequency-dependent effects). Our solution is to instead learn these properties by associating spatial regions in the environment with EM-specific properties. Towards this, we delegate the association to a neural network show in Figure A1. The key idea is to associate spatial co-ordinates (or sets of co-ordinates, given by face on which they lie) with EM properties. We achieve this by mapping spatial properties (e.g., face corresponding to $x_{k}^{(r + 1)}$ ) to EM properties (e.g., gain factor).", + "bbox": [ + 169, + 380, + 826, + 554 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Specifically, our neural network is:", + "bbox": [ + 171, + 559, + 403, + 574 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {v} _ {i} = \\text {s p a t i a l - n e t} \\left(\\boldsymbol {f} _ {i}, \\boldsymbol {n} _ {i}, \\boldsymbol {b} _ {i}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 578, + 823, + 594 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {s} _ {i}, \\boldsymbol {\\rho} _ {i} = \\text {d i r e c t i o n a l . n e t} \\left(\\boldsymbol {v} _ {i}, \\boldsymbol {d} _ {i}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 595, + 823, + 612 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "which consists of a spatial_net to encode EM properties specific to a spatial region, but independent of the incidence direction. This network takes as inputs the one-hot encoding of the face $\\pmb{f}_i$ on which the relay point $\\pmb{x}_k^{(r+1)}$ lies and the surface normal vector at that point $\\pmb{n}_i$ . In addition, we also provide the network a 3-dim conditioning vector of signed distances", + "bbox": [ + 169, + 614, + 823, + 675 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {b} _ {i} = \\left(\\operatorname {s d f} \\left(\\boldsymbol {x} _ {t x}, \\boldsymbol {f} _ {i}\\right), \\quad \\operatorname {s d f} \\left(\\boldsymbol {x} _ {r x}, \\boldsymbol {f} _ {i}\\right), \\quad \\operatorname {s d f} \\left(\\boldsymbol {x} _ {k} ^ {(r + 1)}, \\boldsymbol {f} _ {i}\\right)\\right) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 680, + 823, + 699 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\operatorname{sdf}(\\pmb{x},\\pmb{f})$ is the signed distance function between co-ordinate $\\pmb{x}$ and face $f$ . We find it crucial to condition the network on these values to help predict EM-properties for relevant outgoing components (e.g., reflective, transmission).", + "bbox": [ + 169, + 700, + 826, + 744 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The output of the network is a gain factor $s_i$ , such that the new gain of the ray $\\boldsymbol{u}_k^{(r+1)}$ is $a_k^{(r+1)} = s_i a_k^{(r)}$ . Since the gain magnitudes can be represented in either linear or logarithmic scale, we predict both additive and multiplicative factors of the gain in practice ( $a_k^{(r+1)} = s_{i,1} a_k^{(r)} + s_{i,2}$ ). In parallel, the network also predicts the rotation a ray incident with direction $\\boldsymbol{d}_k^{(r)}$ on $\\boldsymbol{f}_i$ undergoes. We characterize rotations using a 4-dim rotation $\\rho_i$ using Euler-Rodrigues parameterization. This parameterization encodes the axis of rotation and about which $\\boldsymbol{d}_k^{(r)}$ rotates by angle $\\vartheta$ . We represent the rotation by a $3 \\times 3$ SO(3) matrix $A$ and the new outgoing direction of ray $k$ is given by $\\boldsymbol{d}_k^{(r+1)} = A \\boldsymbol{d}_k^{(r)}$", + "bbox": [ + 169, + 752, + 826, + 888 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Reception/Termination check. For some special cases, we halt ray marching for a subset of rays. Namely, when ray $k$ impinges on a reception sphere of radius under $\\varrho$ meters. This prevents", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/0cef139f6ea25ca9f4890a522c10971c0381d79ed5f5f595829cb4ec9cb6adc9.jpg", + "image_caption": [ + "(a) Testset 1: \"Checkerboard\"" + ], + "image_footnote": [], + "bbox": [ + 174, + 112, + 380, + 234 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/ff9a1704ab235f3358c8018acd8dba87c60a37a294e3c398aed3663d99c28239.jpg", + "image_caption": [ + "(b) Testset 2: \"Generalization-z\"", + "Figure A2: Train and test regimes: We consider disjoint subsets of train (blue markers; identical in all figures) and test (orange markers) co-ordinates of transmit and receive locations." + ], + "image_footnote": [], + "bbox": [ + 385, + 102, + 607, + 234 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/44d3c622da02035b4fcd69c7fea3ddd1cb7aefd0ef2363370ac71d0b3f328243.jpg", + "image_caption": [ + "(c) Testset 3: \"Generalization-diag\"" + ], + "image_footnote": [], + "bbox": [ + 614, + 112, + 821, + 234 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "a future version of the ray being potentially being incorrectly received once again. In addition, for computation reasons, we also terminate ray marching if the ray exits the region of interest (e.g., ray exiting the environment).", + "bbox": [ + 169, + 311, + 823, + 354 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Free-space interaction. While the previous steps modeled the interaction of material properties of the environment on wireless propagation, we now switch focus to free-space. In this case, we model propagation of a ray using the empirically-adjusted Friis' Equation:", + "bbox": [ + 169, + 363, + 825, + 406 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nP _ {r} (d) = P _ {t} K \\left(\\frac {d _ {0}}{d}\\right) ^ {\\lambda}, \\quad d \\geq d _ {0} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 410, + 821, + 445 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "which represents the power at the received at the receive antenna $P_r$ as a function of the power fed into transmitting antenna $P_t$ and the distance travelled by the ray $d$ . We learn the remaining scalar parameters $K$ (constant representing of antenna gains), $\\lambda$ (wavelength of signal), and $d_0$ (reference distance).", + "bbox": [ + 169, + 449, + 823, + 503 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B DATASET: ADDITIONAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 523, + 485, + 539 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1 TRAIN AND TEST REGIMES", + "text_level": 1, + "bbox": [ + 171, + 555, + 408, + 568 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure A2 accompanies the text in Section 4.1.", + "bbox": [ + 171, + 580, + 480, + 595 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.2 SIMULATION FOR WIINDOOR DATASET: DETAILS", + "text_level": 1, + "bbox": [ + 171, + 612, + 555, + 626 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We created 3 different floor-plans in Wireless Inside where 2D floor-plans layout and semantic labels of each room are picked from House $\\mathrm{GAN}++$ dataset and mapped into a 3D layout where the scale and dimensions are determined based on practical floor-plan scenarios. All layouts are scaled to $10\\mathrm{m}\\times 10\\mathrm{m}$ with ceiling height at $3\\mathrm{m}$ . All the inner walls and floor materials are layered dielectrics with specific permittivity, conductivity & roughness. These have finite reflection and transmission coefficients. The reflection coefficient is corrected if the surface is not smooth while the transmission coefficients are unaffected by surface roughness.", + "bbox": [ + 169, + 637, + 823, + 736 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Materials. Propagation characteristics are naturally affected by the medium and we create a dataset with fairly diverse set of materials. Layered dielectric with two layers separated by free-space of $89\\mathrm{cm}$ is chosen for all inner walls and the outer-walls were made of thicker materials of concrete. Doors were created using free space except the balcony door which was created using glass with a small thickness. The balcony walls were laid out using brick walls. The propagation factor and index of reflection are functions of the permittivity $(\\epsilon)$ and conductivity $(\\sigma)$ of medium. In Table A1, we present the relative permittivity and conductivity.", + "bbox": [ + 169, + 746, + 823, + 844 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Antenna and Transceiver configuration. Omnidirectional beam patterned antenna with polarization perpendicular to the z axis is setup for all receive and transmit antennas. Location, Orientation of the antenna are set relative to global reference such that they are rotated about the z axis by 90deg and placed at a height of $2.8\\mathrm{m}$ . All antennas employ the same configuration with no transmission loss.", + "bbox": [ + 169, + 854, + 823, + 922 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/3fd2f171e59fb24bd706ce2c86b6e573fdd7464e3fa8745a0badd131cea568c1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
thickness(cm)permittivity εconductivity σ (S/m)
Layered drywall(1,3)1.32.80.013
Brick12.54.440.0001
Concrete305.310.015
Glass32.40
", + "bbox": [ + 238, + 99, + 756, + 186 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/7ef4308f608175418b413d4cdc6a996fe102fd2027a25683b2a2c5a98dcb4ef3.jpg", + "table_caption": [ + "Table A1: Material properties" + ], + "table_footnote": [], + "table_body": "
OverallGeometryAvg. Delay
kNN0.2640.2881.479
MLP0.2800.3781.191
WiNeRT0.2030.1141.297
", + "bbox": [ + 207, + 265, + 493, + 334 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/72cfcef1ab10c90b4f112744c062f3d7c1a191477f885f9a69747b740a9af549.jpg", + "image_caption": [ + "Table A3: Qualitative results" + ], + "image_footnote": [], + "bbox": [ + 516, + 224, + 810, + 388 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Simulation. We currently run the simulation using the shoot and bounce model where a geometric path is drawn from every point on the transmitter field pattern to the receive point. This also includes transmission through surfaces allowing it to model transmittance and reflection. Rays are first traced from the source points with the rays reflecting specularly from the building walls. The rays that hit building walls are reflected specularly and continue to be traced up to the maximum number of reflections and transmissions.", + "bbox": [ + 169, + 438, + 826, + 523 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The spatial separation of rays is set to $0.75^{\\circ}$ . The geometric path traced by the ray undergoes up to 6 specular reflection and 3 transmittance with path loss threshold set to -70dBm.", + "bbox": [ + 169, + 529, + 823, + 560 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Total received power of all paths is determined as the sum of time averaged power of group of correlated paths. A set of ray paths that interact with similar set of faces and follow nearly same path are defined as group.", + "bbox": [ + 169, + 564, + 826, + 608 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C EVALUATION: ADDITIONAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 628, + 519, + 643 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 CONTROLLABLE SYNTHESIS: GENERALIZATION TO RECONFIGURED FLOORMAPS", + "text_level": 1, + "bbox": [ + 169, + 660, + 782, + 674 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table A2 accompanies the discussions in Section 4.3, where we evaluate a WiNeRT model trained in one environment $\\mathbf{F}$ and evaluated in a reconfigured environment $\\mathbf{F}'$ .", + "bbox": [ + 169, + 686, + 823, + 715 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.2 CONTROLLABLE SYNTHESIS: LOWER- AND HIGHER-ORDER INTERACTIONS", + "text_level": 1, + "bbox": [ + 169, + 733, + 743, + 748 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "See Table A4, which accompanies the discussions in Section 4.3.", + "bbox": [ + 169, + 758, + 599, + 773 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/dbe9fd59651ce7db0e6b30bb888189d2e15f55afff59d34435e44027e64c5b6c.jpg", + "table_caption": [ + "Table A2: Quantitative results. For a trained approach evaluated on a reconfigured floormap $\\mathbf{F}^{\\prime }$" + ], + "table_footnote": [], + "table_body": "
#interactions rOverall (DoD)GeometryAvg. Delay
01*2301*2301*23
kNN0.220.330.500.550.310.210.290.331.302.242.963.40
MLP0.580.460.610.670.340.330.370.410.982.052.933.48
WiNeRT0.120.250.440.510.000.090.210.270.032.032.432.8
", + "bbox": [ + 171, + 797, + 846, + 885 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table A4: Low- and Higher-Order Interactions. We vary the number of ray-surface interactions (denoted by $r$ ) for a model trained using single-order interactions $\\left( {r = 1\\text{,denoted by * in the table).}}\\right)$ .", + "bbox": [ + 169, + 893, + 823, + 921 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/54ed2cb9f6fb7385596948980201aec5351dc6670b16dde8c6a637b89d3ab444.jpg", + "image_caption": [ + "Figure A3: Simulation Time. Comparing wall-clock time vs. accuracy performances of our approach (WiN-eRT) against baselines (MLP, kNN) and wireless ray tracing softwares (PyLayers and Insite). The 'Oracle ray launch' variant, which utilizes known ray launch directions at test-time, indicates an approximate performance upper-bound of our approach." + ], + "image_footnote": [], + "bbox": [ + 181, + 106, + 496, + 295 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/87f6ef8759d03631a96dad398a0fa0df83e5dacb9239815374b27f524ea23781.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 498, + 107, + 816, + 296 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.3 SIMULATION TIME", + "text_level": 1, + "bbox": [ + 171, + 410, + 349, + 422 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In Sections 4.2 and 4.3, we found our proposed approach WiNeRT achieves reasonable performance compared with non-differentiable and non-neural simulator packages. Additionally, we demonstrated that WiNeRT is capable of generalization (e.g., to novel elevations, to re-configured floor-plans) and can be used for inverse problems. In this section, we additionally discuss run-time performance of WiNeRT and compare against baseline approaches as well as the simulator package.", + "bbox": [ + 169, + 443, + 823, + 513 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Experimental Setup. The end-goal of the experiment is to analyze the simulation time (specifically wall-clock times) of the proposed WiNeRT approach and contrast it against both the simulator softwares (PyLayers, Wireless Inside) and proposed baselines (MLP, kNN). We first remark that the implementations fundamentally vary between the approaches and hence an ideal wall-clock timing comparison is not possible. For instance, some approaches (WiNeRT, MLP, kNN) use a PyTorch implementation which can be run on GPU whereas the wireless ray tracing simulation packages are either proprietary (e.g., Wireless Inside) or developed exclusively for CPU (e.g., PyLayers) and thereby limiting the choice of hardware on which they can be run. Nonetheless, we keep simulation settings consistent when possible: by running the exact simulations used for the overall results (setting 'checkerboard'; see Section 4.1) and furthermore estimating wall-clock times per simulation (batch size of 1) over $N$ individual simulations with a maximum of 1 reflection and transmission (i.e., $r = 1$ ). For all approaches, we report only the mean simulation time over the multiple simulations, as we found the variances low ( $\\sigma^2 \\leq 3.5 \\times 10^{-3}$ ). When possible, we also report corresponding accuracy ('overall prediction error'; see Sec. 4.1). We evaluate PyTorch-based implementations (WiNeRT, MLP, kNN) over $N = \\sim 8K$ simulations using pretrained models (specifically the ones for reporting 1) on a Nvidia A100 GPU. In the case of WiNeRT, we are able to control time-accuracy trade-off to some degree at test-time by varying the number of launched rays $K$ (see 'Ray Launching' in Sec. 3.1) as a function of the number of subdivisions of the ico-sphere. We choose 1-5 sub-divisions and additionally an 'oracle ray' launch strategy to depict a lower-bound on the time-accuracy values.", + "bbox": [ + 169, + 523, + 826, + 801 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Results. We present the time-accuracy in Figure A3 and observe: (i) WiNeRT (orange markers) is significantly faster than the simulators (blue line), demonstrating speed-ups of $11 - 22 \\times$ over PyLayers (Amiot et al., 2013) and $6 - 22 \\times$ over Wireless Insite (Remcom, 2022). Although the simulators are approximately an upper-bound on the accuracy, we find that WiNeRT can make reasonable trade-offs on accuracy to boost simulation times in certain scenarios; (ii) The baselines we propose in this paper (MLP and kNN) are even faster. MLP (green marker) is the fastest with speed-ups of $538 - 687 \\times$ , which can be largely attributed to a simple architecture (3-layer ReLU MLP with 128 hidden units). kNN (red marker) is the second fastest with $79 - 97 \\times$ speed-ups over the simulators.", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/f39a85c43617e0f4efee44c6bcbf283850edecce615634dde21652b47fb763ab.jpg", + "image_caption": [ + "Figure A4: User Localization. We backpropagate through our trained forward model to solve for the position of the receiver." + ], + "image_footnote": [], + "bbox": [ + 375, + 106, + 622, + 295 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "While these baselines offer much faster simulation times, their generalization capabilities remain unclear as they suffer from memorization (see discussion for Fig. 3).", + "bbox": [ + 169, + 364, + 823, + 395 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.4 USER LOCALIZATION VIA INVERSE RENDERING", + "text_level": 1, + "bbox": [ + 171, + 410, + 552, + 424 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this section, we provide additional details to complement the discussion on the user localization experiment in Section 4.3. For the user localization task, the problem is to determine user location $\\pmb{x}_{\\mathrm{rx}}$ from an observed channel $h_{\\mathrm{obs}}$ . We solve for $\\pmb{x}_{\\mathrm{rx}}$ , by performing gradient on spatial coordinate $\\pmb{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}$ that minimizes the channel loss $\\text{render}_{\\theta}(\\pmb{x}_{\\mathrm{tx}}, \\pmb{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}, \\pmb{F}_i)$ . This is possible with WiNeRT, since we can backpropagate through the neural simulation of the channel. We optimize for $\\pmb{x}_{\\mathrm{rx}}$ using SGD with momentum (lr=0.01, momentum=0.9, 2000 iterations) with two additional considerations: (a) we constrain $\\pmb{x}_{\\mathrm{rx}}$ to lie in valid ranges (positive, upper-bounded by $\\pmb{x}_{\\max}$ ) by clamping the values at each iteration; and (b) to prevent solutions in local minimas, we take the result which yields the minimum loss over five random initializations of $\\pmb{x}_{\\mathrm{rx}}$ . We present the CDF of localization errors over 100 test examples in A4", + "bbox": [ + 169, + 436, + 823, + 575 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D IMPLEMENTATION: ADDITIONAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 595, + 562, + 611 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this section, we provide additional implementation details and hyperparameter choices of approaches discussed in the paper.", + "bbox": [ + 169, + 626, + 823, + 656 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D.1 WINERT", + "text_level": 1, + "bbox": [ + 171, + 671, + 284, + 686 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Architecture: Ray-surface Interaction $f_{\\theta}^{1}$ . We follow an MLP architecture (see Figure A1) similar to NeRF approaches (Mildenhall et al., 2020; Verbin et al., 2022). We decompose the parameters into view-independent ('spatial MLP') and view-dependent ('directional MLP') sets. Given a ray incident at a spatial co-ordinate $x_{k}$ in direction $d_{k}$ , the spatial MLP (2 hidden layers, 64 units) takes three inputs: (a) the face $f_{i}$ (1-hot index) on which $x_{k}$ lies; (b) the surface normal $n_{i}$ of face $f_{i}$ ; and (c) a 3d vector of signed-distance values between the face and $x_{\\mathrm{tx}}$ , $x_{\\mathrm{rx}}$ , and $x_{k}$ . We find (c) provides information (e.g., $x_{\\mathrm{tx}}$ and $x_{\\mathrm{rx}}$ on the same side of wall) to condition the network to predict attributes related to either reflection or transmission components. The directional MLP (1 hidden layer, 64 units) takes two inputs: (i) a 32-dim bottleneck vector produced by the spatial MLP; and (ii) a 3-dim unit vector representing the incidence direction $d_{k}$ . The final output are scaling and additive co-efficients $s$ for the gain magnitude (i.e., $a_{k}^{(r + 1)} = s_{1}a_{k}^{(r)} + s_{2}$ ) and 4-dim parameters $\\rho_{i}$ for rotation (based on Euler-Rodrigues formulation). The rotation parameters $\\rho_{i}$ are mapped to a $3\\times 3$ rotation matrix $A = \\Gamma (\\rho_{i})$ to transform the incident to outgoing ray $d_{k}\\coloneqq Ad_{k}$ .", + "bbox": [ + 169, + 700, + 826, + 886 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "**Renderer:** Ray Launching. In the first step of the renderer, we launch $K$ rays from co-ordinate $x_{\\mathrm{tx}}$ uniformly in all directions. To achieve this, we center a ico-sphere with 5 sub-divisions and", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "choose as directions the vectors from $\\pmb{x}_{\\mathrm{tx}}$ towards the ico-sphere vertices (10.2K vertices with 5 sub-divisions). Since we know the exact co-ordinates between $\\pmb{x}_{\\mathrm{tx}}$ and $\\pmb{x}_{\\mathrm{rx}}$ , we manually include the line-of-sight direction resulting in a total of $K$ rays.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "**Renderer:** Ray Marching. The core step of the renderer is ray marching (detailed in Figure 2). We elaborate on technical implementation details step-by-step using as reference Figure 2. We drop sub- and super-scripts for rest of the paragraph for notational convenience. (a) **Ray-Triangle intersection:** For a given ray $\\pmb{p} = \\pmb{o} + t\\pmb{d}$ , we are interested in the minimum finite solution of $t > 0$ for which the ray intersects with each face of the mesh. For some face with coordinates $(\\pmb{a}, \\pmb{b}, \\pmb{c})$ , this entails solving for $t$ such that $\\pmb{p} = \\pmb{o} + t\\pmb{d} = \\alpha \\pmb{a} + \\beta \\pmb{b} + \\gamma \\pmb{c}$ (under constraints $\\alpha + \\beta + \\gamma = 1$ and $0 \\leq \\alpha, \\beta, \\gamma \\leq 1$ ). We calculate valid solutions using Cramer's rule for all faces in the mesh and only consider (if one exists) the minimum positive solution corresponding to the first ray-triangle intersecting point. (b) **Ray-Surface interaction:** Given the solution from the previous step (i.e., on which spatial co-ordinate the ray is incident on the surface), we are now interested in estimating the outgoing ray from that co-ordinate. For this, we leverage an MLP that maps incident gain, direction, and certain face properties to outgoing gain and direction. More details of this MLP are discussed above under the 'Architecture: Ray-surface Interaction'. (c) **Reception/Termination:** Per ray, we stop ray marching steps if it is either received (hits a reception sphere of fixed size of $30\\mathrm{cm}$ ) or leaves the region of interest (e.g., penetrates exterior wall is shot into infinity). In other cases, we continue with ray marching steps.", + "bbox": [ + 169, + 156, + 826, + 380 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "**Renderer:** Ray Aggregation. At the end of ray marching steps (over $R$ iterations), we determine the final state of the $K$ rays. We are now interested in a small subset of these $K$ rays that is received at a receiver at fixed co-ordinate $x_{\\mathrm{rx}}$ . Note that we perform these steps only at test-time. The ray aggregation as a result involves two steps: (a) Ray Filtering: where we determine the subset of rays that arrives at $x_{\\mathrm{rx}}$ by modelling the receiver as a sphere of fixed radius of $30\\mathrm{cm}$ ; and (b) Preventing double counting: we find duplicate rays arrive at $x_{\\mathrm{rx}}$ due to a combination of a non-infinitesimally sized reception sphere and a high density of launched rays. We cull such duplicates by grouping rays based on a unique interaction sequence (i.e., IDs of faces it intersects with) and choosing the ray of the shortest length in each group.", + "bbox": [ + 169, + 388, + 823, + 515 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "**Optimization.** We perform gradient-descent steps on learnable parameters using Adam with a learning rate of 0.001 with batch size of 1. We observed large gradients (possibly due to single-batch) and hence clip gradient values to 100 during training. The model is trained for 100 epochs and we pick the checkpoint with lowest validation error during training.", + "bbox": [ + 169, + 523, + 823, + 583 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D.2 BASELINES", + "text_level": 1, + "bbox": [ + 171, + 598, + 299, + 612 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "MLP. The MLP baseline extends ideas presented in Tancik et al. (2020); Sitzmann et al. (2020), where a simple MLP is used to map co-ordinates to the signal (e.g., pixel co-ordinate to RGB values). In our paper, the MLP directly maps the spatial co-ordinates $x_{\\mathrm{tx}}$ and $x_{\\mathrm{rx}}$ to channel $h_i$ . The MLP contains 3 hidden layers, each with 128 hidden units and ReLU activation. The core idea here is to implicitly learn the geometry of the environment (floormap $F$ ), which is common to all train and test examples. Note that in contrast to previous works, this model does not use positional embeddings nor sinusoidal activations, as our initial experiments indicated they learn high-frequency artifacts that is not typically present in our datasets (the wireless channels).", + "bbox": [ + 169, + 626, + 823, + 739 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "kNN. The kNN baseline (with $k = 1$ ) works as so: for a given test-example $(\\pmb{x}_{\\mathrm{tx}}, \\pmb{x}_{\\mathrm{rx}})$ we find the spatially closest training example arg $\\min_{i} ||\\pmb{x}_{\\mathrm{tx}} - \\pmb{x}_{\\mathrm{tx},i}^{\\mathrm{train}}||_{2} + ||\\pmb{x}_{\\mathrm{rx}} + \\pmb{x}_{\\mathrm{rx},i}^{\\mathrm{train}}||_{2}$ and predict channel $\\pmb{h}_i$ .", + "bbox": [ + 169, + 750, + 823, + 792 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 960 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_model.json b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_model.json new file mode 100644 index 0000000000000000000000000000000000000000..309f226e185017a9c367afa16c9f10a84de7ee1a --- /dev/null +++ b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_model.json @@ -0,0 +1,3021 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.099, + 0.825, + 0.173 + ], + "angle": 0, + "content": "WINERT: TOWARDS NEURAL RAY TRACING FOR WIRELESS CHANNEL MODELLING AND DIFFERENTIABLE SIMULATIONS" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.195, + 0.655, + 0.238 + ], + "angle": 0, + "content": "Tribhuvanesh Orekondy, Kumar Pratik, Shreya Kadambi, Hao Ye, Joseph Soriaga, Arash Behboodi \nQualcomm AI Research*" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.275, + 0.548, + 0.29 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.305, + 0.77, + 0.57 + ], + "angle": 0, + "content": "In this paper, we work towards a neural surrogate to model wireless electromagnetic propagation effects in indoor environments. Such neural surrogates provide a fast, differentiable, and continuous representation of the environment and enables end-to-end optimization for downstream tasks (e.g., network planning). Specifically, the goal of the paper is to render the wireless signal (e.g., time-of-flights, power of each path) in an environment as a function of the sensor's spatial configuration (e.g., placement of transmit and receive antennas). NeRF-based approaches have shown promising results in the visual setting (RGB image signal, with a camera sensor), where the key idea is to algorithmically evaluate the 'global' signal (e.g., using volumetric rendering) by breaking it down in a sequence of 'local' evaluations (e.g., using co-ordinate neural networks). In a similar spirit, we model the time-angle channel impulse response (the global wireless signal) as a superposition of multiple paths. The wireless characteristics (e.g., power) of each path is a result of multiple evaluations of a neural network that learns implicit ray-surface interaction properties. We evaluate our approach in multiple indoor scenarios and demonstrate that our model achieves strong performance (e.g., \\(<0.33\\mathrm{ns}\\) error in time-of-flight predictions). Furthermore, we demonstrate that our neural surrogate whitens the 'black-box' wireless simulators, and thus enables inverse rendering applications (e.g., user localization)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.593, + 0.341, + 0.608 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.624, + 0.827, + 0.738 + ], + "angle": 0, + "content": "Realistic simulations of physical processes are vital to many scientific and engineering disciplines. In this paper, we focus on simulation of wireless electromagnetic (EM) signals within a propagation environment. The physics of such EM wave propagation between a transmit and receive point are analytically given by Maxwell equations: the transmitted wave undergoes different interactions with the environment (e.g., reflection), and the receiver gets the wave through multiple paths with different time-of-flights and powers, and from different directions. However, solving the Maxwell equations with boundary conditions requires in-depth knowledge of the propagation environment, hence classically modelling EM propagation is intractable for most engineering applications." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.742, + 0.828, + 0.856 + ], + "angle": 0, + "content": "Existing techniques make such simulations tractable by trading-off accuracy for speed. At one end of the spectrum, such simulations are represented in a statistical sense where a probabilistic model roughly captures the marginalized distribution over time-of-flights, gains and direction of transmit-receive paths. However, this level of accuracy is insufficient for designing systems that efficiently operate in high frequency bands. This motivates solutions at the other end of the spectrum: wireless ray tracing simulators. Given a detailed CAD representation of the environment along with the material properties, and numerous wireless configuration parameters (e.g., placement of a base station), the simulators generate resulting propagation characteristics." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.861, + 0.829, + 0.904 + ], + "angle": 0, + "content": "Although wireless ray tracing simulators are appealing, there are a few drawbacks. First, they are generally slow, which poses a bottleneck for closed-loop design pipelines, as wireless configurations cannot be quickly mapped to propagation characteristics. Second, because they are non" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.625, + 0.925 + ], + "angle": 0, + "content": "*Qualcomm AI Research is an initiative of Qualcomm Technologies, Inc" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.104, + 0.825, + 0.202 + ], + "angle": 0, + "content": "differentiable, they are not amenable with inverse physical design formulations, for example optimizing base station placement with the simulator in the optimization loop. Third, they usually require additional fine-tuning with real data as they are not data-driven. Calibrating them with real-world measurements is non-trivial and tedious. Fourth, they cannot generally inter-operate with probabilistic frameworks which have the advantage of better dealing with epistemic uncertainties. We believe neural surrogates provide a natural solution to circumvent many of these drawbacks of classical ray tracing simulators." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.208, + 0.825, + 0.334 + ], + "angle": 0, + "content": "In this work, we propose a neural wireless simulator ('WiNeRT') by building on recent advances in scenes representation as continuous-function neural networks (Sitzmann et al., 2019; Tancik et al., 2020; Mildenhall et al., 2020). In particular, central to our approach is learning a network to model ray-surface interactions, i.e., the network transforms an incident wireless ray to an attenuated outgoing ray. By shooting out a number of rays and evaluating the network at relevant spatial regions in the environment, we estimate the wireless characteristics as a set of transmit-receive paths, each path encodes attributes such as time-of-flight and gain. Our approach also addresses some unique technical challenges posed by the non-visual wireless modality, such as dealing with sparse high-dimensional time-angle measurement signals." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.34, + 0.825, + 0.452 + ], + "angle": 0, + "content": "We demonstrate that our neural wireless simulator reasonably renders the wireless propagation aspects by evaluating on two datasets which captures \\(50 - 100\\mathrm{m}^2\\) indoor propagation scenes. Interestingly, we find that the 3D-structure-aware implicit formulation is a strong inductive bias and helps generalization to significant inference-time distributions shifts. Finally, we demonstrate the potential of our differentiable forward model in solving inverse problem by tackling the user localization problem after posing it as an inverse rendering problem. Our results indicate that simulator physics for specified environments can be 'distilled' into neural surrogates and thereby presenting first steps towards closed-loop design pipelines of wireless communication systems." + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.476, + 0.344, + 0.492 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.513, + 0.825, + 0.653 + ], + "angle": 0, + "content": "Physics-based Neural Simulations. There exists a wide body of literature to model physical processes using advances in neural networks (Djeumou et al., 2022; Karniadakis et al., 2021; Raissi et al., 2017). As simulating physical processes can be expensive and can also present nondifferentiable 'black-box' in design pipelines, recent literature addresses how to work towards neural surrogates, such as for particle simulation (Sanchez-Gonzalez et al., 2020), mesh simulations (Pfaff et al., 2020), design of particle accelerators (Shirobokov et al., 2020), and inverse kinematics (Sun et al., 2021). In this paper, we are particularly interested in a specific physical process - wireless EM-wave propagation. Although this has received limited recent attention (Xia et al., 2020) in a 3D-oblivious setting, it is unclear whether these extend to complex configurations. Consequently, in this work, we work towards the first 3d-structure-aware surrogates for wireless ray tracing simulation." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.663, + 0.825, + 0.857 + ], + "angle": 0, + "content": "Neural Channel Modelling. Although propagation channel modeling has been a central topic in wireless communication (Jakes & Cox, 1994; Lee, 1982; Rappaport et al., 2022), there has been a recent trend for fully data-driven models. The main paradigm of these activities is to use machine learning to learn complex distributions, model non-linearities and have differentiable simulators. These works can be categorized as statistical channel models where the channel input-output relation is modelled as a conditional probability distribution. Many works leverage recent advances in generative modelling and use models like generative adversarial networks (GANs) (Goodfellow et al., 2014) or variational autoencoders (VAEs) (Kingma & Welling, 2013) to learn the channel model (O'Shea et al., 2019; Ye et al., 2018; Yang et al., 2019; O'Shea et al., 2019; Orekondy et al., 2022; Ye et al., 2020; Dorner et al., 2020). In contrast to these works, our approach inscribes within ray tracing channel modeling paradigm, where wireless propagation is precisely modelled by tracing wireless rays, however, unlike classical ray tracers, our model is able to blend in the elements of statistical modeling and is trainable directly on field data. To the best of our knowledge, this work is the first differentiable neural ray tracer for wireless channel modelling." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.868, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Neural Scene Representations. Representing scenes (or more generally signals) has been widely studied in literature, such as encoding the signal in the latent space of a generative model (Kingma & Welling, 2013; Goodfellow et al., 2014). A more recent link of work encodes the signal in the parameters of a co-ordinate MLP (Park et al., 2019; Sitzmann et al., 2020; Tancik et al., 2020; Fathony" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.95, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.102, + 0.825, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.186, + 0.825, + 0.226 + ], + "angle": 0, + "content": "Figure 1: Approach Overview. We learn a forward simulator \\( \\text{render}_{\\theta}(\\cdot) \\) that maps an environment configuration to a wireless channel \\( h_i \\). Here, \\( h_i \\) is a set of wireless propagation paths between \\( x_{\\mathrm{tx}} - x_{\\mathrm{rx}} \\) (green rays in right image), each path encoding certain channel attributes e.g., path gain." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.235, + 0.827, + 0.348 + ], + "angle": 0, + "content": "et al., 2020), thereby mapping co-ordinates (e.g., spatial, temporal) to the signal intensity values (e.g., pixel intensity, amplitude). In a specific case where the signal is a 2D RGB image, recent works (Schwarz et al., 2020; Niemeyer & Geiger, 2021; Mildenhall et al., 2020) show promising results by additionally employing image-based differentiable rendering paradigms (Drebin et al., 1988; Liu et al., 2019) to recover 3D properties of the scene. Inspired by this idea, our work neurally represents a wireless scene by tackling a set of orthogonal challenges, such as dealing with sparse high-dimensional signals and particularly modelling reflection and transmission effects. Consequently, we work towards the first 3D-aware neural 'wireless' scene representation model." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.367, + 0.303, + 0.383 + ], + "angle": 0, + "content": "3 APPROACH" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.825, + 0.44 + ], + "angle": 0, + "content": "In this section, we begin with some preliminaries to the subsequent formulation of the neural wireless ray tracing problem. We then provide an initial overview of our approach in Sec. 3.1 and then dive deeper into specific technical aspects of wireless ray marching in Sec. 3.2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.45, + 0.825, + 0.506 + ], + "angle": 0, + "content": "Preliminaries: Wireless Channels Scattering, reflection and diffraction are among the main effects in electromagnetic propagation. A general mathematical description of a wireless channel, seen as linear time varying system, is given by its impulse response Tse & Viswanath (2005); Rappaport (1996). A general model can be written as (Samimi & Rappaport, 2016):" + }, + { + "type": "equation", + "bbox": [ + 0.282, + 0.51, + 0.825, + 0.541 + ], + "angle": 0, + "content": "\\[\nh (t, \\boldsymbol {\\Theta}, \\boldsymbol {\\Phi}) = \\sum_ {k} a _ {k} (t) \\delta \\left(t - \\tau_ {k} (t)\\right) \\delta \\left(\\boldsymbol {\\Theta} - \\boldsymbol {\\Theta} _ {k} (t)\\right) \\delta \\left(\\boldsymbol {\\Phi} - \\boldsymbol {\\Phi} _ {k} (t)\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.544, + 0.826, + 0.629 + ], + "angle": 0, + "content": "where \\(a_{k}(t)\\) is the complex gain, \\(\\tau_{k}(t)\\) is the delay (time-of-flight) of path \\(k\\), \\(\\Theta_{k}(t)\\) is azimuth and elevation angle of departure (AoD), and \\(\\Phi_k(t)\\) is azimuth and elevation angle of arrival (AoA). Going forward, we use \\(\\phi_{k} = (\\Theta_{k},\\Phi_{k})\\) as a shorthand to collectively represent all angles. Intuitively equation 1, represents each path as a dirac function in time-angle space. The task of channel modeling can, therefore, be reduced to predicting channel attributes \\((a_{k}(t),\\tau_{k}(t),\\phi_{k}(t))\\) for a given environment map, and a transmit and receive location. See Sec. A.1 for a detailed discussion." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.825, + 0.708 + ], + "angle": 0, + "content": "Forward Model: render. The general goal of our forward model is to run a wireless ray simulation given a certain configuration of the propagation environment. More specifically, as shown in Figure 1, the model takes three configuration parameters as input: a 3D representation of the environment \\( F \\) and the spatial co-ordinates of the transmitter \\( x_{\\mathrm{tx}} \\) and receiver \\( x_{\\mathrm{rx}} \\) devices. The model predicts the wireless scene as:" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.712, + 0.825, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {h}} = \\left\\{\\boldsymbol {u} \\right\\} _ {k = 1} ^ {K} = \\left\\{\\left(a _ {k}, \\tau_ {k}, \\phi_ {k}\\right) \\right\\} _ {k = 1} ^ {K} = \\operatorname {r e n d e r} _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathrm {t x}}, \\boldsymbol {x} _ {\\mathrm {r x}}, \\boldsymbol {F}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.732, + 0.825, + 0.775 + ], + "angle": 0, + "content": "where the output is a variably-sized set of \\(K\\) paths. Each path \\(\\pmb{u}_k\\) encodes three channel attributes: gain \\(a_k\\), time-of-flight \\(\\tau_k\\) and angles \\(\\phi_k\\). With these predicted channel attributes, we can obtain a time-angle impulse response (the 'channel') to characterize the wireless propagation effects." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.826, + 0.873 + ], + "angle": 0, + "content": "Key Idea: Implicit Representation Network \\( f_{\\theta} \\). Our approach recursively constructs the channel by using a learnt function \\( f_{\\theta}: F \\times \\mathbf{u}_{k}^{(r)} \\mapsto \\mathbf{u}_{k}^{(r+1)} \\) As shown in Figure 1, given an initial ray \\( \\mathbf{u}_{k}^{(r=0)} \\), we model the final state as an evaluation of interactions that the ray undergoes with the environment \\( F \\). Intuitively, \\( f_{\\theta} \\) models the local interaction of any given ray \\( k \\) either in free-space, or in particular when it is incident on an interacting surface. In the latter case of ray-surface interaction, we leverage a co-ordinate MLP to predict the transformation (e.g., attenuation, rotation) to the incident ray." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Representing Environment \\( F \\). We primarily focus on indoor propagation environments in this paper, where the environment is a 3D geometric representation. Specifically, we consider the environment represented as a 3D mesh composed of \\( F \\) faces and \\( V \\) vertices, where each face corresponds" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.101, + 0.825, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.232, + 0.825, + 0.275 + ], + "angle": 0, + "content": "Figure 2: Renderer: Ray Marching Steps. At each step \\( r \\) of the simulation, we learn the transformation introduced on a ray \\( \\boldsymbol{u}_k^{(r)} \\) e.g., reflection off a particular surface. The final transformation is a result of learnt (green blocks) and non-learnable (blue blocks) evaluations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.291, + 0.825, + 0.334 + ], + "angle": 0, + "content": "to some surface on a wall. We consider a mesh structure with two subtleties: (a) we represent walls as a flattened polygon and thereby do not explicitly consider its thickness; and (b) we do not encode materials of the corresponding wall faces, but rather learn the properties implicitly from data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.353, + 0.453, + 0.367 + ], + "angle": 0, + "content": "3.1 OVERVIEW: NEURAL-renderING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.38, + 0.791, + 0.396 + ], + "angle": 0, + "content": "In this section, we present an overview of the three steps in our approach (as shown in Fig. 1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.405, + 0.825, + 0.483 + ], + "angle": 0, + "content": "Ray Launching. We begin by shooting out a fixed set of \\( K \\) rays from the transmitter location \\( \\pmb{x}_k^{(r = 0)}\\coloneqq \\pmb{x}_{\\mathrm{tx}}(\\forall k) \\). We launch the rays omni-directionally from the transmitter co-ordinate, agnostic to the environment and location of the receiver location. Direction \\( \\pmb{d}_k^{(r = 0)} \\) of each ray is oriented in the direction of a unique vertex of a ico-sphere centered at \\( \\pmb{x}_{\\mathrm{tx}} \\). We use the number of sub-divisions of the ico-sphere to trade-off between computational complexity and accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.493, + 0.825, + 0.577 + ], + "angle": 0, + "content": "Ray Marching. The crux of our approach involves 'marching' the ray and accounting for interactions (e.g., transmission) with various surfaces of the environment. A key aspect here is using a neural network to make local evaluations: mapping an incident ray with some direction and power to an updated outgoing attenuated ray. The neural network is hence tasked to learn a complex nonlinear characterization of the surface materials at a spatial co-ordinate. We further elaborate on the ray marching procedure in the next section." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.587, + 0.825, + 0.645 + ], + "angle": 0, + "content": "Ray Aggregation and Reception. Of the \\(K\\) rays launched from the ray launching step, we are now interested in the subset of the rays that impinges on the receiver. We model the reception sphere with a specific radius, which can be tuned to achieve a desired level of precision. To mitigate double-counting of received rays, we filter rays by associating them with a unique interaction path." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.663, + 0.332, + 0.677 + ], + "angle": 0, + "content": "3.2 RAY MARCHING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.69, + 0.825, + 0.779 + ], + "angle": 0, + "content": "We now dive deeper into the ray marching step, which tracks the evaluation of each ray as it propagates in the environment and hits various surfaces. We walk through the steps as shown sequentially in Fig. 2. We begin with a set of geometric rays \\( \\boldsymbol{u}_k^{(r = 0)} \\), originating at the transmitter co-ordinate \\( \\boldsymbol{x}_{\\mathrm{tx}} \\). In addition to the channel attributes of each ray (see Eq. 2), we also consider in this section an additional set of meta-attributes (e.g., origin \\( \\boldsymbol{x}_k \\), direction \\( \\boldsymbol{d}_k \\)) that helps us with the ray marching steps (elaborated in Sec. A.2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.825, + 0.852 + ], + "angle": 0, + "content": "Ray-Environment Intersections. For each ray, we evaluate its first interaction with the environment (e.g., first wall it hits). Representing the ray geometry as \\(\\pmb{p}(t) = \\pmb{x}_k^{(r)} + t\\pmb{d}_k^{(r)}\\), we are primarily interested in a solution \\(t > 0\\) for which the ray is incident on some surface. This location helps us determine the relay (i.e., new origin) \\(\\pmb{x}_k^{(r+1)}\\) for the subsequent step." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.861, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Ray-Surface Interaction. While the previous step solves for where the ray is incident in the environment, a crucial next step is determining attributes of the outgoing ray as a result of this interaction. We specifically focus on determining two attributes in this step: the new direction \\( \\pmb{d}_k^{(r+1)} \\) and gain \\( a_k^{(r+1)} \\). Popular non-neural simulators, such as Remcom (2022), look-up frequency-" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.288 + ], + "angle": 0, + "content": "dependent material properties (e.g., conductivity, permittivity) at the incidence point from standard databases (ITU-R P.2040-2) to calculate the attributes of the outgoing ray. However, it is unclear how to calculate the attributes with imprecise knowledge of the surfaces (e.g., unknown thickness and material types of each layer in a wall) or when the material properties of a layer have not been previously empirically analyzed. Our solution is to instead predict the attributes using learnt network as a function of the incident location \\( \\pmb{x}_k^{(r + 1)} \\) and direction \\( \\pmb{d}_k^{(r)} \\) (see \\( f_{\\theta}^{1} \\) in Fig. 2). The ray-surface interaction network \\( f_{\\theta}^{1} \\) used in our experiments is a ReLU MLP with 3 layers (with 64-hidden units). Similar to NeRF (Mildenhall et al., 2020), we split the network into learning incident direction-independent and dependent features by concatenating direction \\( \\pmb{d}_k^{(r)} \\) with bottlenecked outputs of the penultimate layer in the network (See Sec. A.3 fore more details). The network predicts an attenuation factor \\( s \\) and a rotation matrix \\( \\pmb{A} \\) (4-dim Euler-Rodrigues parameterization), which is then used to determine the updated gain \\( (a_k^{(r + 1)} = sa_k^{(r)}) \\) and direction \\( (\\pmb{d}_k^{(r + 1)} = \\pmb{A}\\pmb{d}_k^{(r)}) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.293, + 0.828, + 0.365 + ], + "angle": 0, + "content": "Reception/Termination check. For some special cases, we halt ray marching for a subset of rays. Namely, when ray \\( k \\) impinges on a reception sphere of a pre-specified radius (30cm in our experiments). This prevents a future version of the already received ray being potentially incorrectly received at a future iteration. In addition, for computation reasons, we also terminate ray marching if the ray exits the region of interest (e.g., ray exiting the environment)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.828, + 0.465 + ], + "angle": 0, + "content": "Free-space interaction. While the previous steps modeled the interaction of material properties of the environment on wireless propagation, we now switch focus to free-space. In this case, we model propagation of a ray using the empirically-adjusted Friis' Equation: \\( P_r(d) = P_t G\\left(\\frac{d_0}{d}\\right)^{\\lambda} \\) (\\( d \\geq d_0 \\)) which represents the power at the received at the receive antenna \\( P_r \\) as a function of the power fed into transmitting antenna \\( P_t \\), and the distance travelled by the ray \\( d \\). We learn the remaining scalar parameters \\( G \\) (antenna gain constant), \\( \\lambda \\) (attenuation factor), and \\( d_0 \\) (reference distance)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.494, + 0.289, + 0.508 + ], + "angle": 0, + "content": "3.3 TRAINING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.826, + 0.584 + ], + "angle": 0, + "content": "Over the previous sections, we walked through our approach on predicting a channel \\(\\hat{h} = \\mathrm{render}_{\\theta}(\\pmb{x}_{\\mathrm{tx}},\\pmb{x}_{\\mathrm{rx}},\\pmb{F})\\). We train the model in a supervised setting, with ground-truth time-angle impulse response measurements. Importantly, we rely only on final measurements (i.e., at \\(r = R\\)) for training and do not use any intermediate information (e.g., interaction data through a ray tracer)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.593, + 0.825, + 0.713 + ], + "angle": 0, + "content": "Set-based Channel Loss. We compare two sets of multi-path channels: predictions \\(\\hat{\\pmb{h}} = \\{\\hat{\\pmb{u}}_k\\}_{k=1}^K\\) and ground-truth \\(\\pmb{h} = \\{\\pmb{u}_l\\}_{l=1}^L\\) to provide a supervisory signal for training. We evaluate the set-based loss as: \\(\\mathcal{L}_{\\mathrm{chan}}(\\pmb{h},\\hat{\\pmb{h}}) = \\sum_l d(\\pmb{u}_l,\\hat{\\pmb{u}}_{\\Pi(l)})\\), which has two key ideas: (a) correspondence \\(\\Pi\\): we associate each ground-truth path \\(\\pmb{u}_l\\) with a predicted path \\(\\hat{\\pmb{u}}_k = \\Pi(l)\\). To perform such an association, we use direction-of-departure information and thereby pair paths launched in approximately the same direction; and (b) inter-path distance \\(d(\\pmb{u}_l,\\hat{\\pmb{u}}_k)\\): to compare two paths, we use mean square error for scalar-valued attributes (e.g., time-of-flights) and cosine distances between angular-attributes (e.g., direction of arrival). For the latter, we represent angles as unit vectors in cartesian coordinates." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.722, + 0.825, + 0.793 + ], + "angle": 0, + "content": "Training and Implementation Details. We train our approach for 100 epochs using Adam optimizer with a learning rate of \\(10^{-3}\\). We found it crucial to not aggregate rays (Sec. 3.1) in the training steps, as it led to vanishing gradients due to negligible number of rays that contributed towards gradient updates. We model the reception sphere as a fixed-sized sphere of radius \\(30\\mathrm{cm}\\). Additional implementation details are provided in Sec. C.4." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.828, + 0.421, + 0.843 + ], + "angle": 0, + "content": "4 EXPERIMENTAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In this section, we discuss experimental analysis of our neural simulator approach. We begin by discussing the preliminaries: the choice of datasets and the evaluation metrics to compare simulations. The section concludes by discussing overall performances and highlights certain benefits of neural simulations, such as running controllable simulations outside of training conditions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.104, + 0.753, + 0.119 + ], + "angle": 0, + "content": "4.1 EXPERIMENTAL SETUP: DATSETS, EVALUATION METRICS, AND BASELINES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.132, + 0.825, + 0.203 + ], + "angle": 0, + "content": "We train and evaluate our algorithm using ground-truth data from wireless ray tracing packages. We collect two datasets, where each dataset contains channel measurements (i.e., gains, time-of-flight, angles) for different distributions of environments (e.g., floor layout). We keep the wireless configuration fixed to using omni-directional antennas at both the transmitter and receiver operating at a 3.5GHz carrier frequency. Now we further elaborate on the datasets used in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.212, + 0.827, + 0.285 + ], + "angle": 0, + "content": "Dataset 1: WI3ROOMS. We create a synthetic dataset which gives us greater control over many aspects over the generation process. Using a \\(10\\mathrm{m} \\times 5\\mathrm{m} \\times 3\\mathrm{m}\\) hull, we randomly synthesize interior brick walls such that the eventual configuration consists of three rooms inter-connected with 1m doorways. We import the environment into an open-source wireless propagation toolbox (Amiot et al., 2013) and collect 41.6K channels, of which \\(\\sim 37\\%\\) of measurements are used for training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.292, + 0.828, + 0.432 + ], + "angle": 0, + "content": "Dataset 2: WIINDOOR. We use the indoor floorplans from the RPLAN dataset (Wu et al., 2019), which is popularly used to model indoor scenes (Nauata et al., 2020; 2021; Para et al., 2021). These layouts represent real-world single floor houses, with 4-8 rooms and \\(65 - 120\\mathrm{m}^2\\) areas. Each floorplan is further accompanied with room semantics such as whether a certain area is a living room, bed room, bathroom, etc. We use these semantics to selectively sample transmit/receiver locations (e.g., locations are not outside the boundary) and to determine wall materials (e.g., external facing walls are bricks, where as internal facing walls are dry plaster walls). We use a commercial ray tracer Remcom 'Wireless Inside' (Remcom, 2022) with ray tracer X3D to collect measurements in the RPLAN environment. Similar to the earlier dataset, we collect 42.5K measurements, of which \\(\\sim 36\\%\\) are used to train the model." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.441, + 0.828, + 0.555 + ], + "angle": 0, + "content": "Train and Test Regimes. For the training dataset, we collect measurements by sampling transmitter ('Tx') from \\(\\sim 10\\) locations (XY plane at an elevation of \\(2.8\\mathrm{m}\\)) and similarly, receiver ('Rx') from \\(60\\times 30\\) locations (but with elevation of \\(2\\mathrm{m}\\)). We then create three challenging test sets (see Fig. A2 for an illustration) with novel Tx-Rx locations: (a) Checkerboard: where train and test Rx locations form a checkerboard pattern on the same XY plane at \\(2\\mathrm{m}\\) elevation; (b) Generalization-\\(z\\): where we move the test Rx locations in (a) to a novel elevation (\\(z = 1.0\\mathrm{m}\\) for ThreeRooms and \\(z = 2.5\\mathrm{m}\\) for RPLAN); and (c) Generalization-diag: where we sample test Rx locations on a diagonal XYZ plane. Such regimes let us validate the generalization performance under distribution shifts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.563, + 0.828, + 0.732 + ], + "angle": 0, + "content": "Evaluation Metrics. We consider three evaluation metrics to evaluate our approach: (i) Overall prediction error ('Overall'): We follow a similar formulation as our loss (Sec. 3.3) with one key difference - we find correspondences \\(\\Pi\\) by solving a linear-sum assignment problem. The eventual error aggregates all attributes relevant for the path (e.g., gain, angles). Intuitively, this measures the distance between two sets (sets of multi-dim paths in our case), using a similar metric common in set prediction tasks (Fan et al., 2017; Zhang et al., 2019). (ii) Geometry prediction error ('Geometry'): We follow a formulation similar to (i), but now focus on two specific features that captures the geometrical accuracy of the path - time-of-flight and angles at departure and arrival. Intuitively, this metric measures whether the predicted rays take the same GT route between the transmit and receive co-ordinates. (iii) Average Delay Time - MAE ('AvgDelay'): We average the time-of-flights \\(\\tau_{k}\\) per path of the channel, weighted by its linear power \\(p(a_{k})\\). We report the mean absolute error of average delays between the predicted and ground-truth channel attributes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.74, + 0.828, + 0.8 + ], + "angle": 0, + "content": "Baselines. We propose two reference baselines (i) \\( k \\)-NN (with \\( k = 1 \\)): which predicts the channel, given the closest match to the input spatial co-ordinates in terms of Euclidean distance (ii) MLP: A geometry-oblivious MLP regressor with 3-hidden layers, each with 128 units. We train the MLP using the same loss as WiNeRT. Additional details of the baselines are provided in Sec. C.4." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.817, + 0.353, + 0.831 + ], + "angle": 0, + "content": "4.2 OVERALL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.825, + 0.873 + ], + "angle": 0, + "content": "In this section, we present the overall qualitative and quantitative results of our approach. We complement the overall performances with additional analysis in the next section." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Quantitative Results. We report the quantitative results for the two datasets (column groups) and three test sets (row groups) in Table 1. We observe from the table: (a) by focusing on the overall errors, we find WiNeRT generally outperforms all baselines, with a significant average decrease of" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.961 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.173, + 0.101, + 0.84, + 0.29 + ], + "angle": 0, + "content": "
WI3ROOMSWIINDOOR
OverallGeometryAvgDelayOverallGeometryAvgDelay
checkerboardkNN0.2320.2122.2380.4120.3962.484
MLP0.2870.3302.0510.3730.3991.745
WiNeRT0.2020.0872.0290.2370.2071.546
gen-zkNN0.2530.2262.0330.4240.4282.487
MLP0.2970.3501.7970.3880.4211.969
WiNeRT0.2170.0841.5220.2850.2501.839
gen-diagkNN0.2520.2132.1180.3800.2511.377
MLP0.3120.3221.8890.3900.3151.513
WiNeRT0.2290.0851.7920.3690.1700.828
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.298, + 0.825, + 0.327 + ], + "angle": 0, + "content": "Table 1: Quantitative Results. Comparing errors of our approach (WiNeRT) with baselines, over two datasets (column groups) and three test regimes (row groups). Lower values are better and the lowest errors are in bold." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.335, + 0.825, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.412, + 0.825, + 0.452 + ], + "angle": 0, + "content": "Figure 3: Receive Powers. By fixing the transmit location \\((x_{\\mathrm{tx}}\\), red cross), we measure the receive power (color at each point; in dB) predicted at each location in W13ROOMS dataset. kNN and MLP suffer from memorization and falsely predict highest receive powers around phantom transmit locations (purple star)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.458, + 0.827, + 0.709 + ], + "angle": 0, + "content": "-0.071 points compared to kNN and -0.085 with MLP; (b) WiNeRT is especially strong in capturing the geometry (e.g., 59-63% drop in errors w.r.t second best on WI3ROOMS) of the environment, which can be likely attributed to a strong inductive bias enforced by decoupling global rendering from local evaluations; (c) Although WiNeRT has reasonable performance in capturing the average delays, the performance gap here (e.g., 1-15% reduction in errors on WI3ROOMS) is not especially large compared to other metrics. We attribute this to contributions from 'false positive' rays with non-negligible power arising from our dense ray-launching technique. (d) The contributions of false positives can be mitigated by using a more sophisticated ray launching technique. For instance, by piggybacking on ray launch directions from GT channels, we can significantly improve performances across all metrics e.g., from 1-15% error reduction to 15-20% reduction in average delays on WI3ROOMS; (e) Overall, we attribute the underperformance of the baselines to poor generalization performance. For instance, in Figure 3, we illustrate the receive powers (in dB) predicted by all approaches in WI3ROOMS, for some placement of the transmitter (red cross in top-right room). We observe in this particular case that the high-power areas in the kNN and MLP baselines are predicted for a false phantom location (purple star), which roughly corresponds to a transmitter location in training set. This contrasts predictions by WiNeRT where the high-power areas are correctly concentrated around the transmitter location. As a result, we find that simple baselines find it challenging to generalize to new unseen spatial co-ordinates at inference time." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.718, + 0.825, + 0.845 + ], + "angle": 0, + "content": "Qualitative Results. We complement the previous quantitative discussions with observations drawn from qualitative analysis. WiNeRT particularly helps for this analysis, as we can recover intermediate ray-environment interaction information. From qualitative examples shown in Fig. 4(a, b), we draw some observations: (a) WiNeRT surprisingly learns ray-surface interactions implicitly, without any direct supervision. For instance, we observe multiple reflected paths between Tx and Rx; (b) we also find that our predictions (red rays) are generally consistent with the underlying simulation process (green rays) e.g., reflections from adjacent walls, floor and ceiling; and (c) we notice WiNeRT sometimes predicts false positives (e.g., above \\( x_{\\mathrm{tx}} \\) in Fig. 4b), which we attribute to dense omni-directional ray launching." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.868, + 0.288, + 0.882 + ], + "angle": 0, + "content": "4.3 ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In the previous section, we evaluated the overall performance of WiNeRT and found promising results. Now, we take a closer look at our approach and investigate generalization benefits." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.101, + 0.383, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.224, + 0.326, + 0.237 + ], + "angle": 0, + "content": "(a) W13ROOMS" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.101, + 0.603, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.454, + 0.224, + 0.545, + 0.237 + ], + "angle": 0, + "content": "(b) WIINDOOR" + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.102, + 0.825, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.642, + 0.224, + 0.798, + 0.237 + ], + "angle": 0, + "content": "(c) WI3ROOMS (novel \\(F\\))" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.249, + 0.825, + 0.276 + ], + "angle": 0, + "content": "Figure 4: Qualitative results. (a, b) Evaluation on WiNeRT on the environment seen during training. (c) We use the previously trained model and re-render on a re-configured floormap \\( \\pmb{F} \\)." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.281, + 0.365, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.393, + 0.332, + 0.404 + ], + "angle": 0, + "content": "(a) Ray-surface interactions" + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.281, + 0.588, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.418, + 0.393, + 0.546, + 0.402 + ], + "angle": 0, + "content": "(b) Attenuation: Reflection" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.282, + 0.824, + 0.391 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.628, + 0.393, + 0.771, + 0.403 + ], + "angle": 0, + "content": "(c) Attenuation: Transmission" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.414, + 0.825, + 0.454 + ], + "angle": 0, + "content": "Figure 5: Evaluating Ray-surface interaction MLP. We display a cut-out of the 3ROOMS represented as a wireframe, with a specific focus on a particular wall. (a) We find a train-test distribution shift of ray-surface incidence points (b, c) Evaluation of the MLP at various incidence points." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.472, + 0.827, + 0.724 + ], + "angle": 0, + "content": "What does the ray-surface interaction learn? We begin by investigating the ray-surface network \\((f_{\\theta}^{1}\\) in Fig. 2) in isolation. The network is tasked to map an incident ray (gain \\(a_{\\mathrm{in}}\\), direction \\(d_{\\mathrm{in}}\\)) to an outgoing ray \\((a_{\\mathrm{out}}, d_{\\mathrm{out}})\\). To accurately make this prediction, the network needs to learn direction- and material-dependent properties at the incident location \\(x_{\\mathrm{inc}}\\), which poses two challenges. First, the network does not have explicit supervision to learn these properties. Rather, the network needs to implicitly learn these properties by optimizing over a number of channel measurements. Second, specific to our case, the measurements collected involve sparse ray-surface interactions i.e., in practise we cannot expect for paths in the training measurements to interact densely with all possible surfaces. For instance, consider Fig. 5a, which show the incident points \\(x_{\\mathrm{inc}}\\) for a particular wall (black edges) that we recover from the underlying ray tracing tool. Here, we observe that the implicit training set interactions (red markers; never used during our training) are localized to a \\(\\sim 50\\mathrm{cm}\\) band (\\(15\\%\\) area of the wall). However, at test-time, the network is tasked to generalize to interactions for a different distribution of incidence points (purple markers). In spite of the challenges we find the ray-surface network associates meaningful information to surface co-ordinates. For instance, we show the attenuation factor predicted for the reflected (Fig. 5b) and transmitted co-ordinates (Fig. 5c) for rays arriving from a fixed \\(x_{\\mathrm{tx}}\\) co-ordinate (placed at \\(x = 8\\mathrm{m}\\)). We find that the network learns a smooth material- and direction-dependent function over the surface. Over the next experiments, we exploit these locally learnt properties and evaluate WiNeRT rendering in novel scenarios." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.733, + 0.825, + 0.873 + ], + "angle": 0, + "content": "Controllable synthesis: Predicting in Novel Environment Configurations. The previous experiments focused on evaluating approaches for novel locations of transmit and receive co-ordinates at simulation time. Now, we consider novel test-time environments by simulating approaches on re-configured layouts \\( \\pmb{F}^{\\prime} \\) of the train-time environment \\( \\pmb{F} \\), such as by randomly editing placement of interior walls. Overall, we find that WiNeRT remarkably extrapolates to the reconfigured environment, with the overall error unchanged with WiNeRT (0.202 on \\( \\pmb{F} \\) vs. 0.203 on \\( \\pmb{F}^{\\prime} \\); more results in Table A2). Furthermore, by observing the results qualitatively in Figure 4c, we find the predicted interactions remain consistent with the ground-truth simulated rays in novel environment configurations. This is particularly appealing as for simulation use-cases which require modelling dynamic objects (e.g., moving vehicle), as WiNeRT allows re-configuring environment without retraining." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Controllable synthesis: Simulating Higher-order Interactions. In this experiment, we evaluate the ability of approaches to generalize to different numbers of interactions (denoted by \\( r \\) in Sec. 3) at inference time. With WiNeRT, we have the ability to control the number of interactions at" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.218 + ], + "angle": 0, + "content": "test-time (i.e., by unrolling \\( f_{\\theta} \\) for fewer or more steps). We briefly summarize our observations here (see Table A4 for more details). WiNeRT exhibits promising results: while the baselines struggle with a simpler task of lower-order interactions (e.g., 0.22-0.58 overall errors at \\( r = 0 \\)), WiNeRT's performance improves (from 0.20 to 0.12). A better performance is natural in this particular setting, since the model is required to perform an easier task than original (predicting only line-of-sight component). For higher-order interactions, we observe performances of all approaches degrades, but WiNeRT outperforms the baselines. In particular, even at \\( r = 3 \\), we find the geometric-errors of WiNeRT (0.27) comparable to baselines in their originally trained setting (\\( r = 1 \\), 0.21-0.33 errors)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.226, + 0.828, + 0.339 + ], + "angle": 0, + "content": "How fast are the simulations? We investigate the wall-clock simulation times of WiNeRT and baselines and compare them with wireless ray tracers. In the specific case of WiNeRT, we have some control over the time-accuracy trade-offs at test-time by varying the density of initial rays launched (see Sec. 3.1). Overall, we find that WiNeRT demonstrates speed-ups of \\(11 - 22 \\times\\) over PyLayers and \\(6 - 22 \\times\\) over Wireless Inside. While the baselines are even faster (\\(538 - 687 \\times\\) with MLP and \\(79 - 97 \\times\\) with kNN), it is achieved at the price of higher errors and poor generalization capabilities (Sec. 4.2). Overall, we find WiNeRT presents reasonable time-accuracy trade-offs compared to baselines. See Sec. C.2 for additional details." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.348, + 0.828, + 0.462 + ], + "angle": 0, + "content": "Exploiting differentiability: User Localization via inverse (differentiable) rendering. Over the previous sections we focused on forward simulations. Now, we study a proof-of-concept for leveraging our differentiable simulator for inverse problems, such as for user localization: determining user location \\( \\boldsymbol{x}_{\\mathrm{rx}} \\) from an observed channel \\( h_{\\mathrm{obs}} \\). We solve for \\( \\boldsymbol{x}_{\\mathrm{rx}} \\), by performing gradient on spatial coordinate \\( \\boldsymbol{x}_{\\mathrm{rx}}^{\\mathrm{ukn}} \\) that minimizes the channel loss \\( \\text{render}_{\\theta}(\\boldsymbol{x}_{\\mathrm{tx}}, \\boldsymbol{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}, \\boldsymbol{F}_i) \\). This is possible with WiNeRT, since we can backpropagate through the neural simulation of the channel. We evaluate over 100 test examples and find encouraging results, with a median error of 0.58m in WI3ROOMS (a \\( 150\\mathrm{m}^3 \\) volume) and 1.21m in WIINDOOR (a \\( 300\\mathrm{m}^3 \\) volume). See Sec. C.4 for more details." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.482, + 0.649, + 0.498 + ], + "angle": 0, + "content": "5 CONCLUSION, LIMITATIONS, AND BROADER IMPACT" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.514, + 0.827, + 0.627 + ], + "angle": 0, + "content": "In this paper, we proposed the first neural forward model for wireless ray tracing-based simulations. Such models are particularly appealing as they help alleviate some drawbacks of classical non-neural simulators (e.g., better handling model-measurement mismatches, non-differentiability). Towards this goal, we proposed WiNeRT which tasks an MLP to learn how surfaces in a 3D environment influence propagation of wireless rays, such as by predicting attenuation factor of a reflective component. Overall, we find promising results indicating neural simulators closely capture propagation effects. As neural simulators are additionally differentiable, we further show that they can be used to optimize inverse problems such as user localization." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.636, + 0.828, + 0.804 + ], + "angle": 0, + "content": "Limitations and Future Work. This paper presents the first step towards realizing a neural surrogate for simulating propagation of wireless rays. While we find promising results – in terms of empirically mimicking the simulator's performance while simultaneously reducing complexity – many important steps remain to realize our over-arching goal of differentiable wireless ray tracing. Our approach is designed to capture linear effects of the channel in line with standards (3GPP TR 38.901; ITU-R P.2040-2) and extending to non-linear effects (e.g., amplifier saturations) remains an open-problem. Additionally, while our focus is primarily reflection and transmission properties of ray-surface interactions (capturing majority of receive power) which are increasingly relevant for high-frequency transmissions, other properties (e.g., scattering, diffraction) require investigation to model simulations across a wider radio-frequency spectrum. Finally, our surrogate's performance is currently upper-bounded by the underlying simulator's performance, motivating studies into learnt calibration of the surrogate model with real-world measurement data to bypass simulation accuracy." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.813, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Broader Technical Impact. Although our paper focuses on neural simulation of EM waves in the radio-frequency spectrum (0.5-100 GHz), we believe working towards this goal complements research in non-radio modalities as well. For instance, to model propagation of acoustic signals in spatial environments, estimating material-dependent ray-surface interactive properties remains a challenging problem and the proposed research direction potentially complements existing techniques. More generally, we believe that as radio signals require modelling both ray (e.g., reflection) and physical optic (e.g., interference, diffraction) properties, advances here are intertwined with many modalities across the EM spectrum (e.g., audio, visual)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.105, + 0.395, + 0.119 + ], + "angle": 0, + "content": "REPRODUCIBILITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.827, + 0.214 + ], + "angle": 0, + "content": "To ensure reproducibility, we take a number of steps. On the dataset side, we use either publicly available indoor layouts (e.g., RPLAN) or synthetically generate layouts with known random seeds (0 and 10 in our case). We further elaborate on the simulation settings to recreate our dataset in Section 4.1 and Section B. We plan to release the simulated data measurements. On the implementation side, we provide specific training details in Section C.4 and further elaborate on the detailed architecture in Section A.3." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.232, + 0.319, + 0.246 + ], + "angle": 0, + "content": "ETHICS STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.825, + 0.302 + ], + "angle": 0, + "content": "The data used in our paper corresponds to simulated data of physical processes (EM wave propagation). Since this does not involve any human subjects or personally identifiable information, we believe there is no conflict in this regard." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.318, + 0.329, + 0.332 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.344, + 0.825, + 0.388 + ], + "angle": 0, + "content": "We thank Hanno Ackermann for discussions and feedback on the paper. We additionally thank numerous colleagues for insightful discussions: Thomas Hehn, Fabio Valerio Massoli, Maziar Raissi, Afshin Abdi, June Namgoong, Taesang Yoo, and Akash Doshi." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.407, + 0.287, + 0.422 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.43, + 0.826, + 0.46 + ], + "angle": 0, + "content": "3GPP TR 38.901. Study on channel model for frequencies from 0.5 to 100 ghz. Standard, 3GPP, Valbonne, FR, March 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.47, + 0.826, + 0.499 + ], + "angle": 0, + "content": "Nicolas Amiot, Mohamed Laaraiedh, and Bernard Uguen. Pylayers: An open source dynamic simulator for indoor propagation and localization. In ICC, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.509, + 0.826, + 0.551 + ], + "angle": 0, + "content": "Franck Djeumou, Cyrus Neary, Eric Goubault, Sylvie Putot, and Ufuk Topcu. Neural networks with physics-informed architectures and constraints for dynamical systems modeling. In Learning for Dynamics and Control Conference. PMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.561, + 0.826, + 0.603 + ], + "angle": 0, + "content": "Sebastian Dorner, Marcus Henninger, Sebastian Cammerer, and Stephan ten Brink. Wgan-based autoencoder training over-the-air. In IEEE International Workshop on Signal Processing Advances in Wireless Communications, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.614, + 0.785, + 0.629 + ], + "angle": 0, + "content": "Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. SIGGRAPH, 1988." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.639, + 0.825, + 0.668 + ], + "angle": 0, + "content": "Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In CVPR, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.678, + 0.824, + 0.706 + ], + "angle": 0, + "content": "Rizal Fathony, Anit Kumar Sahu, Devin Willmott, and J Zico Kolter. Multiplicative filter networks. In ICLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.716, + 0.69, + 0.731 + ], + "angle": 0, + "content": "Andrew S. Glassner. An introduction to ray tracing. Morgan Kaufmann, 1989." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.741, + 0.825, + 0.77 + ], + "angle": 0, + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.78, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Fumio Ikegami, Tsutomu Takeuchi, and Susumu Yoshida. Theoretical prediction of mean field strength for urban mobile radio. IEEE Transactions on Antennas and Propagation, 39(3):299-302, 1991." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.833, + 0.825, + 0.861 + ], + "angle": 0, + "content": "ITU-R P.2040-2. Effects of building materials and structures on radiowave propagation above about \\(100\\mathrm{mhz}\\). Standard, International Telecommunication Union, Geneva, CH, September 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.871, + 0.824, + 0.886 + ], + "angle": 0, + "content": "William C. Jakes and Donald C. Cox. Microwave mobile communications. Wiley-IEEE press, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.896, + 0.824, + 0.925 + ], + "angle": 0, + "content": "George Em Karniadakis, Ioannis G. Kevrekidis, Lu Lu, Paris Perdikaris, Sifan Wang, and Liu Yang. Physics-informed machine learning. Nature Reviews Physics, 3(6):422-440, June 2021." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.43, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.133 + ], + "angle": 0, + "content": "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.825, + 0.17 + ], + "angle": 0, + "content": "William C. Y. Lee. Mobile communications engineering. McGraw-Hill, 1982. ISBN 978-0-07-037039-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.179, + 0.825, + 0.208 + ], + "angle": 0, + "content": "Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In ICCV, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.216, + 0.825, + 0.245 + ], + "angle": 0, + "content": "J.W. McKown and R.L. Hamilton. Ray tracing as a design tool for radio networks. IEEE Network, 5(6):27-30, November 1991." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.253, + 0.825, + 0.283 + ], + "angle": 0, + "content": "Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.29, + 0.825, + 0.332 + ], + "angle": 0, + "content": "Nelson Nauata, Kai-Hung Chang, Chin-Yi Cheng, Greg Mori, and Yasutaka Furukawa. Housegan: Relational generative adversarial networks for graph-constrained house layout generation. In ECCV, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.341, + 0.825, + 0.385 + ], + "angle": 0, + "content": "Nelson Nauata, Sepidehsadat Hosseini, Kai-Hung Chang, Hang Chu, Chin-Yi Cheng, and Yasutaka Furukawa. House-gan++: Generative adversarial layout refinement network towards intelligent computational agent for professional architects. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.392, + 0.825, + 0.422 + ], + "angle": 0, + "content": "Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.43, + 0.825, + 0.459 + ], + "angle": 0, + "content": "Tribhuvanesh Orekondy, Arash Behboodi, and Joseph B Soriaga. Mimo-gan: Generative mimo channel modeling. In IEEE ICC, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.468, + 0.825, + 0.51 + ], + "angle": 0, + "content": "Timothy J O'Shea, Tamoghna Roy, and Nathan West. Approximating the void: Learning stochastic channel models from observation with variational generative adversarial networks. In ICNC, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.518, + 0.825, + 0.548 + ], + "angle": 0, + "content": "Wamiq Para, Paul Guerrero, Tom Kelly, Leonidas J Guibas, and Peter Wonka. Generative layout modeling using constraint graphs. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.556, + 0.825, + 0.598 + ], + "angle": 0, + "content": "Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In CVPR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.607, + 0.825, + 0.637 + ], + "angle": 0, + "content": "Tobias Pfaff, Meire Fortunato, Alvaro Sanchez-Gonzalez, and Peter W Battaglia. Learning mesh-based simulation with graph networks. arXiv preprint arXiv:2010.03409, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.644, + 0.825, + 0.687 + ], + "angle": 0, + "content": "Maziar Raissi, Paris Perdikaris, and George Em Karniadakis. Physics informed deep learning (part i): Data-driven solutions of nonlinear partial differential equations. arXiv preprint arXiv:1711.10561, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.695, + 0.825, + 0.724 + ], + "angle": 0, + "content": "Theodore S. Rappaport. Wireless communications: principles and practice, volume 2. prentice hall PTR New Jersey, 1996." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.732, + 0.825, + 0.763 + ], + "angle": 0, + "content": "Theodore S Rappaport, Kate A Remley, Camillo Gentile, Andreas F Molisch, and Alenka Zajic. Radio Propagation Measurements and Channel Modeling. Cambridge University Press, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.77, + 0.825, + 0.799 + ], + "angle": 0, + "content": "Remcom. Wireless insite, 2022. URL https://www.remcom.com/ wireless-insite-em-propagation-software." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.807, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Mathew K. Samimi and Theodore S. Rappaport. 3-D millimeter-wave statistical channel model for 5G wireless system design. IEEE Transactions on Microwave Theory and Techniques, 64(7): 2207-2225, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.858, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia. Learning to simulate complex physics with graph networks. In ICML, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. NeurIPS, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Sergey Shirobokov, Vladislav Belavin, Michael Kagan, Andrei Ustyuzhanin, and Atilim Gunes Baydin. Black-box optimization with local generative surrogates. In NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.826, + 0.172 + ], + "angle": 0, + "content": "Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. NeurIPS, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.178, + 0.826, + 0.21 + ], + "angle": 0, + "content": "Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.216, + 0.826, + 0.248 + ], + "angle": 0, + "content": "Xingyuan Sun, Tianju Xue, Szymon Rusinkiewicz, and Ryan P Adams. Amortized synthesis of constrained configurations using a differentiable surrogate. NeurIPS, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.254, + 0.826, + 0.298 + ], + "angle": 0, + "content": "Matthew Tancik, Pratul Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.306, + 0.826, + 0.336 + ], + "angle": 0, + "content": "David Tse and Pramod Viswanath. Fundamentals of wireless communication. Cambridge university press, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.344, + 0.826, + 0.387 + ], + "angle": 0, + "content": "Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T Barron, and Pratul P Srinivasan. Ref-nerf: Structured view-dependent appearance for neural radiance fields. In CVPR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.395, + 0.826, + 0.426 + ], + "angle": 0, + "content": "Joram Walfisch and Henry L. Bertoni. A theoretical model of UHF propagation in urban environments. IEEE Transactions on antennas and propagation, 36(12):1788-1796, 1988." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.433, + 0.826, + 0.476 + ], + "angle": 0, + "content": "Wenming Wu, Xiao-Ming Fu, Rui Tang, Yuhan Wang, Yu-Hao Qi, and Ligang Liu. Data-driven interior plan generation for residential buildings. ACM Transactions on Graphics (TOG), 38(6): 1-12, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.484, + 0.826, + 0.528 + ], + "angle": 0, + "content": "William Xia, Sundeep Rangan, Marco Mezzavilla, Angel Lozano, Giovanni Geraci, Vasilii Semkin, and Giuseppe Loianno. Millimeter wave channel modeling via generative neural networks. In 2020 IEEE Globecom Workshops, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.536, + 0.826, + 0.58 + ], + "angle": 0, + "content": "Yang Yang, Yang Li, Wuxiong Zhang, Fei Qin, Pengcheng Zhu, and Cheng-Xiang Wang. Generative-adversarial-network-based wireless channel modeling: Challenges and opportunities. IEEE Communications Magazine, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.588, + 0.826, + 0.631 + ], + "angle": 0, + "content": "Hao Ye, Geoffrey Ye Li, Biing-Hwang Fred Juang, and Kathiravetpillai Sivanesan. Channel agnostic end-to-end learning based communication systems with conditional gan. In IEEE Globecom Workshops, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.64, + 0.826, + 0.683 + ], + "angle": 0, + "content": "Hao Ye, Le Liang, Geoffrey Ye Li, and Biing-Hwang Juang. Deep learning-based end-to-end wireless communication systems with conditional gans as unknown channels. IEEE Transactions on Wireless Communications, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.691, + 0.826, + 0.72 + ], + "angle": 0, + "content": "Yan Zhang, Jonathon Hare, and Adam Prugel-Bennett. Deep set prediction networks. NeurIPS, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.101, + 0.296, + 0.125 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.156, + 0.306, + 0.171 + ], + "angle": 0, + "content": "A APPROACH" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.187, + 0.432, + 0.202 + ], + "angle": 0, + "content": "A.1 BUILDING CHANNEL MODELS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.213, + 0.483, + 0.228 + ], + "angle": 0, + "content": "This section accompanies the text in Section 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.234, + 0.827, + 0.457 + ], + "angle": 0, + "content": "Channel models are defined either in a statistical way by defining a distribution over channel attributes or in deterministic way using ray tracing. Statistical channel models are inadequate for applications involving positioning, sensing and challenges of communication at higher frequencies (e.g., mmWave at 30-300 GHz (Rappaport et al., 2022)). Inspired by similar techniques in computer graphics (Glassner, 1989), traditional ray tracing approaches (see for example (McKown & Hamilton, 1991; Ikegami et al., 1991; Walfisch & Bertoni, 1988)) approximate propagation of electromagnetic waves by modeling interactions of each ray with objects in its paths. These interactions include for example reflection, diffraction and penetration. Although this is more efficient than solving Maxwell equations, ray tracing methods need a detailed knowledge of the environment and are generally slow for prototyping. They generally utilize hard coded and mathematically tractable models for example knife-edge model for diffraction (Lee, 1982; Rappaport, 1996). These abstractions suffer from mismatches and require occasional tedious fine-tuning and calibration with real data. Improving these models while remaining tractable for rapid simulation rounds is not straightforward. Finally, they are non-differentiable and cannot be integrated into a closed loop design pipeline. We plan to tackle these issues by building a neural surrogate of a physics-based wireless ray tracer in this paper." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.473, + 0.456, + 0.487 + ], + "angle": 0, + "content": "A.2 REPRESENTING RAY ATTRIBUTES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.499, + 0.825, + 0.596 + ], + "angle": 0, + "content": "We represent the \\(k\\)-th ray (among \\(K\\) rays) at the \\(r\\)-th iteration of rendering as \\(\\pmb{u}_k^{(r)}\\). For notation convenience, we drop the sub- and super-script for the rest of the section. We characterize the wireless ray analogous to the concept of an optical ray (such as with geometric direction, intensity). In addition to the wireless attributes (see Equation 2), we further include meta-level attributes that helps us propagate and render the eventual ray received at the receiver co-ordinate \\(\\pmb{x}_{\\mathrm{rx}}\\). We briefly describe these attributes here and elaborate on how they are obtained or updated over the next sections. The ray contains the attributes:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.597, + 0.717, + 0.638 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {u} = \\underbrace {\\left( \\begin{array}{c c c} a & \\tau & \\phi \\\\ \\text {(a) C h a n n e l A t t r i b u t e s} \\end{array} \\right)} _ {\\text {(b) R a y G e o m e t r y}} \\underbrace {\\boldsymbol {x} \\quad \\boldsymbol {d} \\quad t _ {s} \\quad t _ {r x} \\quad \\rho_ {\\mathrm {r x}}} _ {\\text {(c) S t a t e}} \\underbrace {\\sigma_ {\\mathrm {u p d}} \\quad \\sigma_ {\\mathrm {r x}} \\quad)} _ {\\text {(d) C h a n n e l A t t r i b u t e s}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.639, + 0.827, + 0.792 + ], + "angle": 0, + "content": "which as shown can be grouped into three categories: (a) Wireless Channel Attributes. Exactly as discussed earlier in the section (see Equation 2), it contains the attributes to construct the wireless channel time-angle impulse response (Equation 1) (b) Ray Geometry. We additionally include geometrical representation of the ray, which helps us determine how to propagate the ray through the environment. Specifically, we represent the geometry of the ray using the line equation: \\( \\pmb{p}(t) = \\pmb{x} + t\\pmb{d} \\), where \\( \\pmb{x} \\) is the origin and \\( \\pmb{d} \\) is a unit-vector encoding the ray direction. We are interested in two particular solutions of \\( t \\) in this equation: \\( t_s \\) for which the ray intersects with a surface (mesh face in our case) and \\( t_{\\mathrm{rx}} \\) for which the ray is tangential to a sphere around some receiver of radius \\( \\rho_{\\mathrm{rx}} \\). (c) Ray state. To help with subsequent updates to the ray at future iterations, we track two binary variables. \\( \\sigma_{\\mathrm{upd}} \\) denotes whether the ray has to be updated in the next iteration. \\( \\sigma_{\\mathrm{rx}} \\) denotes whether the ray has impinged on a reception sphere of a predefined radius." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.808, + 0.407, + 0.822 + ], + "angle": 0, + "content": "A.3 RAY MARCHING: DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.837, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Ray-Environment Intersections. For each ray, we are interested in their first interaction with the environment (e.g., first wall it hits, impinging on the receiver). For this, we are interested in the solutions to the line equation representing the geometry of the ray: \\( \\pmb{p}(t) = \\pmb{x}_k^{(r)} + t\\pmb{d}_k^{(r)} \\). In particular, we are interested in two solutions of \\( t \\): (a) Ray-Face intersection. The smallest value of \\( t > 0 \\) for which \\( \\pmb{p}(t) \\) lies on a surface (a triangular mesh face in our case). For this, we perform ray-triangle intersections with each face in the environment and find the corresponding solution \\( t = t_s \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.104, + 0.825, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.356, + 0.197, + 0.642, + 0.212 + ], + "angle": 0, + "content": "Figure A1: Ray-surface interaction network \\( f_{\\theta}^{1} \\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.233, + 0.827, + 0.293 + ], + "angle": 0, + "content": "This helps us estimate the new relay location: \\( \\pmb{x}_k^{(r + 1)} = \\pmb{x}_k^{(r)} + t_s\\pmb{d}_k^{(r)} \\) (a) Ray-Rx intersection. In parallel, we are also interested in positive solutions of \\( t \\) for which the ray hits the receiver if it were modeled as a sphere of radius \\( \\rho_{\\mathrm{rx}} \\). In this case, we obtain the value of \\( t \\) as the projection of \\( \\pmb{x}_{rx} \\) on \\( \\pmb{p}(t) \\):" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.297, + 0.825, + 0.317 + ], + "angle": 0, + "content": "\\[\nt _ {r x} = \\max \\left(0, \\left(\\boldsymbol {x} _ {r x} - \\boldsymbol {x} _ {k} ^ {(r)}\\right) \\cdot \\boldsymbol {d} _ {k} ^ {(r)}\\right) \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.319, + 0.825, + 0.34 + ], + "angle": 0, + "content": "\\[\n\\rho_ {r x} = \\left| \\left| \\left(\\boldsymbol {x} _ {r x} - \\boldsymbol {x} _ {k} ^ {(r)}\\right) - t _ {r x} \\boldsymbol {d} _ {k} ^ {(r)} \\right| \\right| \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.342, + 0.825, + 0.371 + ], + "angle": 0, + "content": "Consequently, at the end of ray-environment, we analytically estimate the first intersections of the ray with both the environment and (potentially) the receiver." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.827, + 0.555 + ], + "angle": 0, + "content": "Ray-Surface Interaction. If the ray \\( \\pmb{u}_k^{(r)} \\) (originating at \\( \\pmb{x}_k^{(r)} \\)) and travelling in direction \\( d_k^{(r)} \\) hits a wall at \\( x_{k}^{(r + 1)} \\) (as estimated in the previous step), we are now interested in characterizing the outgoing ray with origin at \\( x_{k}^{(r + 1)} \\). Specifically, we are interested in estimating the new direction \\( d_k^{(r + 1)} \\) (does the ray penetrate the wall? or reflect?) and the corresponding change in gain that arises (i.e., loss of power, change of phase). This is a complex problem and typically requires in-depth knowledge of the surface (e.g., which material) as well as its specific EM properties (e.g., frequency-dependent effects). Our solution is to instead learn these properties by associating spatial regions in the environment with EM-specific properties. Towards this, we delegate the association to a neural network show in Figure A1. The key idea is to associate spatial co-ordinates (or sets of co-ordinates, given by face on which they lie) with EM properties. We achieve this by mapping spatial properties (e.g., face corresponding to \\( x_{k}^{(r + 1)} \\)) to EM properties (e.g., gain factor)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.56, + 0.405, + 0.575 + ], + "angle": 0, + "content": "Specifically, our neural network is:" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.579, + 0.825, + 0.595 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {v} _ {i} = \\text {s p a t i a l - n e t} \\left(\\boldsymbol {f} _ {i}, \\boldsymbol {n} _ {i}, \\boldsymbol {b} _ {i}\\right) \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.597, + 0.825, + 0.613 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {s} _ {i}, \\boldsymbol {\\rho} _ {i} = \\text {d i r e c t i o n a l . n e t} \\left(\\boldsymbol {v} _ {i}, \\boldsymbol {d} _ {i}\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.616, + 0.825, + 0.676 + ], + "angle": 0, + "content": "which consists of a spatial_net to encode EM properties specific to a spatial region, but independent of the incidence direction. This network takes as inputs the one-hot encoding of the face \\( \\pmb{f}_i \\) on which the relay point \\( \\pmb{x}_k^{(r+1)} \\) lies and the surface normal vector at that point \\( \\pmb{n}_i \\). In addition, we also provide the network a 3-dim conditioning vector of signed distances" + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.681, + 0.825, + 0.7 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {b} _ {i} = \\left(\\operatorname {s d f} \\left(\\boldsymbol {x} _ {t x}, \\boldsymbol {f} _ {i}\\right), \\quad \\operatorname {s d f} \\left(\\boldsymbol {x} _ {r x}, \\boldsymbol {f} _ {i}\\right), \\quad \\operatorname {s d f} \\left(\\boldsymbol {x} _ {k} ^ {(r + 1)}, \\boldsymbol {f} _ {i}\\right)\\right) \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.702, + 0.827, + 0.746 + ], + "angle": 0, + "content": "where \\(\\operatorname{sdf}(\\pmb{x},\\pmb{f})\\) is the signed distance function between co-ordinate \\(\\pmb{x}\\) and face \\(f\\). We find it crucial to condition the network on these values to help predict EM-properties for relevant outgoing components (e.g., reflective, transmission)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.827, + 0.889 + ], + "angle": 0, + "content": "The output of the network is a gain factor \\( s_i \\), such that the new gain of the ray \\( \\boldsymbol{u}_k^{(r+1)} \\) is \\( a_k^{(r+1)} = s_i a_k^{(r)} \\). Since the gain magnitudes can be represented in either linear or logarithmic scale, we predict both additive and multiplicative factors of the gain in practice (\\( a_k^{(r+1)} = s_{i,1} a_k^{(r)} + s_{i,2} \\)). In parallel, the network also predicts the rotation a ray incident with direction \\( \\boldsymbol{d}_k^{(r)} \\) on \\( \\boldsymbol{f}_i \\) undergoes. We characterize rotations using a 4-dim rotation \\( \\rho_i \\) using Euler-Rodrigues parameterization. This parameterization encodes the axis of rotation and about which \\( \\boldsymbol{d}_k^{(r)} \\) rotates by angle \\( \\vartheta \\). We represent the rotation by a \\( 3 \\times 3 \\) SO(3) matrix \\( A \\) and the new outgoing direction of ray \\( k \\) is given by \\( \\boldsymbol{d}_k^{(r+1)} = A \\boldsymbol{d}_k^{(r)} \\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Reception/Termination check. For some special cases, we halt ray marching for a subset of rays. Namely, when ray \\( k \\) impinges on a reception sphere of radius under \\( \\varrho \\) meters. This prevents" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.113, + 0.382, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.238, + 0.356, + 0.249 + ], + "angle": 0, + "content": "(a) Testset 1: \"Checkerboard\"" + }, + { + "type": "image", + "bbox": [ + 0.387, + 0.103, + 0.608, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.415, + 0.238, + 0.576, + 0.249 + ], + "angle": 0, + "content": "(b) Testset 2: \"Generalization-z\"" + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.113, + 0.822, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.626, + 0.238, + 0.802, + 0.249 + ], + "angle": 0, + "content": "(c) Testset 3: \"Generalization-diag\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.261, + 0.825, + 0.287 + ], + "angle": 0, + "content": "Figure A2: Train and test regimes: We consider disjoint subsets of train (blue markers; identical in all figures) and test (orange markers) co-ordinates of transmit and receive locations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.825, + 0.355 + ], + "angle": 0, + "content": "a future version of the ray being potentially being incorrectly received once again. In addition, for computation reasons, we also terminate ray marching if the ray exits the region of interest (e.g., ray exiting the environment)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.364, + 0.826, + 0.407 + ], + "angle": 0, + "content": "Free-space interaction. While the previous steps modeled the interaction of material properties of the environment on wireless propagation, we now switch focus to free-space. In this case, we model propagation of a ray using the empirically-adjusted Friis' Equation:" + }, + { + "type": "equation", + "bbox": [ + 0.378, + 0.411, + 0.823, + 0.446 + ], + "angle": 0, + "content": "\\[\nP _ {r} (d) = P _ {t} K \\left(\\frac {d _ {0}}{d}\\right) ^ {\\lambda}, \\quad d \\geq d _ {0} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.45, + 0.825, + 0.505 + ], + "angle": 0, + "content": "which represents the power at the received at the receive antenna \\( P_r \\) as a function of the power fed into transmitting antenna \\( P_t \\) and the distance travelled by the ray \\( d \\). We learn the remaining scalar parameters \\( K \\) (constant representing of antenna gains), \\( \\lambda \\) (wavelength of signal), and \\( d_0 \\) (reference distance)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.525, + 0.486, + 0.54 + ], + "angle": 0, + "content": "B DATASET: ADDITIONAL DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.556, + 0.409, + 0.569 + ], + "angle": 0, + "content": "B.1 TRAIN AND TEST REGIMES" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.582, + 0.482, + 0.596 + ], + "angle": 0, + "content": "Figure A2 accompanies the text in Section 4.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.613, + 0.557, + 0.627 + ], + "angle": 0, + "content": "B.2 SIMULATION FOR WIINDOOR DATASET: DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.825, + 0.737 + ], + "angle": 0, + "content": "We created 3 different floor-plans in Wireless Inside where 2D floor-plans layout and semantic labels of each room are picked from House \\(\\mathrm{GAN}++\\) dataset and mapped into a 3D layout where the scale and dimensions are determined based on practical floor-plan scenarios. All layouts are scaled to \\(10\\mathrm{m}\\times 10\\mathrm{m}\\) with ceiling height at \\(3\\mathrm{m}\\). All the inner walls and floor materials are layered dielectrics with specific permittivity, conductivity & roughness. These have finite reflection and transmission coefficients. The reflection coefficient is corrected if the surface is not smooth while the transmission coefficients are unaffected by surface roughness." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.747, + 0.825, + 0.845 + ], + "angle": 0, + "content": "Materials. Propagation characteristics are naturally affected by the medium and we create a dataset with fairly diverse set of materials. Layered dielectric with two layers separated by free-space of \\(89\\mathrm{cm}\\) is chosen for all inner walls and the outer-walls were made of thicker materials of concrete. Doors were created using free space except the balcony door which was created using glass with a small thickness. The balcony walls were laid out using brick walls. The propagation factor and index of reflection are functions of the permittivity \\((\\epsilon)\\) and conductivity \\((\\sigma)\\) of medium. In Table A1, we present the relative permittivity and conductivity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Antenna and Transceiver configuration. Omnidirectional beam patterned antenna with polarization perpendicular to the z axis is setup for all receive and transmit antennas. Location, Orientation of the antenna are set relative to global reference such that they are rotated about the z axis by 90deg and placed at a height of \\(2.8\\mathrm{m}\\). All antennas employ the same configuration with no transmission loss." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.24, + 0.101, + 0.758, + 0.188 + ], + "angle": 0, + "content": "
thickness(cm)permittivity εconductivity σ (S/m)
Layered drywall(1,3)1.32.80.013
Brick12.54.440.0001
Concrete305.310.015
Glass32.40
" + }, + { + "type": "table_caption", + "bbox": [ + 0.407, + 0.197, + 0.591, + 0.212 + ], + "angle": 0, + "content": "Table A1: Material properties" + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.266, + 0.495, + 0.335 + ], + "angle": 0, + "content": "
OverallGeometryAvg. Delay
kNN0.2640.2881.479
MLP0.2800.3781.191
WiNeRT0.2030.1141.297
" + }, + { + "type": "table_caption", + "bbox": [ + 0.186, + 0.344, + 0.514, + 0.371 + ], + "angle": 0, + "content": "Table A2: Quantitative results. For a trained approach evaluated on a reconfigured floormap \\( \\mathbf{F}^{\\prime } \\)" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.226, + 0.812, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.575, + 0.398, + 0.752, + 0.412 + ], + "angle": 0, + "content": "Table A3: Qualitative results" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.439, + 0.827, + 0.524 + ], + "angle": 0, + "content": "Simulation. We currently run the simulation using the shoot and bounce model where a geometric path is drawn from every point on the transmitter field pattern to the receive point. This also includes transmission through surfaces allowing it to model transmittance and reflection. Rays are first traced from the source points with the rays reflecting specularly from the building walls. The rays that hit building walls are reflected specularly and continue to be traced up to the maximum number of reflections and transmissions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.53, + 0.825, + 0.561 + ], + "angle": 0, + "content": "The spatial separation of rays is set to \\(0.75^{\\circ}\\). The geometric path traced by the ray undergoes up to 6 specular reflection and 3 transmittance with path loss threshold set to -70dBm." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.565, + 0.827, + 0.609 + ], + "angle": 0, + "content": "Total received power of all paths is determined as the sum of time averaged power of group of correlated paths. A set of ray paths that interact with similar set of faces and follow nearly same path are defined as group." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.629, + 0.52, + 0.645 + ], + "angle": 0, + "content": "C EVALUATION: ADDITIONAL DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.661, + 0.783, + 0.675 + ], + "angle": 0, + "content": "C.1 CONTROLLABLE SYNTHESIS: GENERALIZATION TO RECONFIGURED FLOORMAPS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.687, + 0.825, + 0.717 + ], + "angle": 0, + "content": "Table A2 accompanies the discussions in Section 4.3, where we evaluate a WiNeRT model trained in one environment \\( \\mathbf{F} \\) and evaluated in a reconfigured environment \\( \\mathbf{F}' \\)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.734, + 0.744, + 0.749 + ], + "angle": 0, + "content": "C.2 CONTROLLABLE SYNTHESIS: LOWER- AND HIGHER-ORDER INTERACTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.76, + 0.601, + 0.775 + ], + "angle": 0, + "content": "See Table A4, which accompanies the discussions in Section 4.3." + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.798, + 0.848, + 0.886 + ], + "angle": 0, + "content": "
#interactions rOverall (DoD)GeometryAvg. Delay
01*2301*2301*23
kNN0.220.330.500.550.310.210.290.331.302.242.963.40
MLP0.580.460.610.670.340.330.370.410.982.052.933.48
WiNeRT0.120.250.440.510.000.090.210.270.032.032.432.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.894, + 0.825, + 0.922 + ], + "angle": 0, + "content": "Table A4: Low- and Higher-Order Interactions. We vary the number of ray-surface interactions (denoted by \\( r \\) ) for a model trained using single-order interactions \\( \\left( {r = 1\\text{,denoted by * in the table).}}\\right) \\) ." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.107, + 0.498, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.108, + 0.817, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.314, + 0.825, + 0.366 + ], + "angle": 0, + "content": "Figure A3: Simulation Time. Comparing wall-clock time vs. accuracy performances of our approach (WiN-eRT) against baselines (MLP, kNN) and wireless ray tracing softwares (PyLayers and Insite). The 'Oracle ray launch' variant, which utilizes known ray launch directions at test-time, indicates an approximate performance upper-bound of our approach." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.411, + 0.35, + 0.424 + ], + "angle": 0, + "content": "C.3 SIMULATION TIME" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.444, + 0.825, + 0.515 + ], + "angle": 0, + "content": "In Sections 4.2 and 4.3, we found our proposed approach WiNeRT achieves reasonable performance compared with non-differentiable and non-neural simulator packages. Additionally, we demonstrated that WiNeRT is capable of generalization (e.g., to novel elevations, to re-configured floor-plans) and can be used for inverse problems. In this section, we additionally discuss run-time performance of WiNeRT and compare against baseline approaches as well as the simulator package." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.525, + 0.827, + 0.803 + ], + "angle": 0, + "content": "Experimental Setup. The end-goal of the experiment is to analyze the simulation time (specifically wall-clock times) of the proposed WiNeRT approach and contrast it against both the simulator softwares (PyLayers, Wireless Inside) and proposed baselines (MLP, kNN). We first remark that the implementations fundamentally vary between the approaches and hence an ideal wall-clock timing comparison is not possible. For instance, some approaches (WiNeRT, MLP, kNN) use a PyTorch implementation which can be run on GPU whereas the wireless ray tracing simulation packages are either proprietary (e.g., Wireless Inside) or developed exclusively for CPU (e.g., PyLayers) and thereby limiting the choice of hardware on which they can be run. Nonetheless, we keep simulation settings consistent when possible: by running the exact simulations used for the overall results (setting 'checkerboard'; see Section 4.1) and furthermore estimating wall-clock times per simulation (batch size of 1) over \\( N \\) individual simulations with a maximum of 1 reflection and transmission (i.e., \\( r = 1 \\)). For all approaches, we report only the mean simulation time over the multiple simulations, as we found the variances low (\\( \\sigma^2 \\leq 3.5 \\times 10^{-3} \\)). When possible, we also report corresponding accuracy ('overall prediction error'; see Sec. 4.1). We evaluate PyTorch-based implementations (WiNeRT, MLP, kNN) over \\( N = \\sim 8K \\) simulations using pretrained models (specifically the ones for reporting 1) on a Nvidia A100 GPU. In the case of WiNeRT, we are able to control time-accuracy trade-off to some degree at test-time by varying the number of launched rays \\( K \\) (see 'Ray Launching' in Sec. 3.1) as a function of the number of subdivisions of the ico-sphere. We choose 1-5 sub-divisions and additionally an 'oracle ray' launch strategy to depict a lower-bound on the time-accuracy values." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.813, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Results. We present the time-accuracy in Figure A3 and observe: (i) WiNeRT (orange markers) is significantly faster than the simulators (blue line), demonstrating speed-ups of \\(11 - 22 \\times\\) over PyLayers (Amiot et al., 2013) and \\(6 - 22 \\times\\) over Wireless Insite (Remcom, 2022). Although the simulators are approximately an upper-bound on the accuracy, we find that WiNeRT can make reasonable trade-offs on accuracy to boost simulation times in certain scenarios; (ii) The baselines we propose in this paper (MLP and kNN) are even faster. MLP (green marker) is the fastest with speed-ups of \\(538 - 687 \\times\\), which can be largely attributed to a simple architecture (3-layer ReLU MLP with 128 hidden units). kNN (red marker) is the second fastest with \\(79 - 97 \\times\\) speed-ups over the simulators." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.377, + 0.107, + 0.624, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.314, + 0.825, + 0.34 + ], + "angle": 0, + "content": "Figure A4: User Localization. We backpropagate through our trained forward model to solve for the position of the receiver." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.366, + 0.825, + 0.396 + ], + "angle": 0, + "content": "While these baselines offer much faster simulation times, their generalization capabilities remain unclear as they suffer from memorization (see discussion for Fig. 3)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.411, + 0.553, + 0.425 + ], + "angle": 0, + "content": "C.4 USER LOCALIZATION VIA INVERSE RENDERING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.437, + 0.825, + 0.577 + ], + "angle": 0, + "content": "In this section, we provide additional details to complement the discussion on the user localization experiment in Section 4.3. For the user localization task, the problem is to determine user location \\( \\pmb{x}_{\\mathrm{rx}} \\) from an observed channel \\( h_{\\mathrm{obs}} \\). We solve for \\( \\pmb{x}_{\\mathrm{rx}} \\), by performing gradient on spatial coordinate \\( \\pmb{x}_{\\mathrm{rx}}^{\\mathrm{ukn}} \\) that minimizes the channel loss \\( \\text{render}_{\\theta}(\\pmb{x}_{\\mathrm{tx}}, \\pmb{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}, \\pmb{F}_i) \\). This is possible with WiNeRT, since we can backpropagate through the neural simulation of the channel. We optimize for \\( \\pmb{x}_{\\mathrm{rx}} \\) using SGD with momentum (lr=0.01, momentum=0.9, 2000 iterations) with two additional considerations: (a) we constrain \\( \\pmb{x}_{\\mathrm{rx}} \\) to lie in valid ranges (positive, upper-bounded by \\( \\pmb{x}_{\\max} \\)) by clamping the values at each iteration; and (b) to prevent solutions in local minimas, we take the result which yields the minimum loss over five random initializations of \\( \\pmb{x}_{\\mathrm{rx}} \\). We present the CDF of localization errors over 100 test examples in A4" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.596, + 0.563, + 0.612 + ], + "angle": 0, + "content": "D IMPLEMENTATION: ADDITIONAL DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.825, + 0.657 + ], + "angle": 0, + "content": "In this section, we provide additional implementation details and hyperparameter choices of approaches discussed in the paper." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.672, + 0.285, + 0.687 + ], + "angle": 0, + "content": "D.1 WINERT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.701, + 0.827, + 0.887 + ], + "angle": 0, + "content": "Architecture: Ray-surface Interaction \\( f_{\\theta}^{1} \\). We follow an MLP architecture (see Figure A1) similar to NeRF approaches (Mildenhall et al., 2020; Verbin et al., 2022). We decompose the parameters into view-independent ('spatial MLP') and view-dependent ('directional MLP') sets. Given a ray incident at a spatial co-ordinate \\( x_{k} \\) in direction \\( d_{k} \\), the spatial MLP (2 hidden layers, 64 units) takes three inputs: (a) the face \\( f_{i} \\) (1-hot index) on which \\( x_{k} \\) lies; (b) the surface normal \\( n_{i} \\) of face \\( f_{i} \\); and (c) a 3d vector of signed-distance values between the face and \\( x_{\\mathrm{tx}} \\), \\( x_{\\mathrm{rx}} \\), and \\( x_{k} \\). We find (c) provides information (e.g., \\( x_{\\mathrm{tx}} \\) and \\( x_{\\mathrm{rx}} \\) on the same side of wall) to condition the network to predict attributes related to either reflection or transmission components. The directional MLP (1 hidden layer, 64 units) takes two inputs: (i) a 32-dim bottleneck vector produced by the spatial MLP; and (ii) a 3-dim unit vector representing the incidence direction \\( d_{k} \\). The final output are scaling and additive co-efficients \\( s \\) for the gain magnitude (i.e., \\( a_{k}^{(r + 1)} = s_{1}a_{k}^{(r)} + s_{2} \\)) and 4-dim parameters \\( \\rho_{i} \\) for rotation (based on Euler-Rodrigues formulation). The rotation parameters \\( \\rho_{i} \\) are mapped to a \\( 3\\times 3 \\) rotation matrix \\( A = \\Gamma (\\rho_{i}) \\) to transform the incident to outgoing ray \\( d_{k}\\coloneqq Ad_{k} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "**Renderer:** Ray Launching. In the first step of the renderer, we launch \\( K \\) rays from co-ordinate \\( x_{\\mathrm{tx}} \\) uniformly in all directions. To achieve this, we center a ico-sphere with 5 sub-divisions and" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "choose as directions the vectors from \\( \\pmb{x}_{\\mathrm{tx}} \\) towards the ico-sphere vertices (10.2K vertices with 5 sub-divisions). Since we know the exact co-ordinates between \\( \\pmb{x}_{\\mathrm{tx}} \\) and \\( \\pmb{x}_{\\mathrm{rx}} \\), we manually include the line-of-sight direction resulting in a total of \\( K \\) rays." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.157, + 0.827, + 0.381 + ], + "angle": 0, + "content": "**Renderer:** Ray Marching. The core step of the renderer is ray marching (detailed in Figure 2). We elaborate on technical implementation details step-by-step using as reference Figure 2. We drop sub- and super-scripts for rest of the paragraph for notational convenience. (a) **Ray-Triangle intersection:** For a given ray \\( \\pmb{p} = \\pmb{o} + t\\pmb{d} \\), we are interested in the minimum finite solution of \\( t > 0 \\) for which the ray intersects with each face of the mesh. For some face with coordinates \\( (\\pmb{a}, \\pmb{b}, \\pmb{c}) \\), this entails solving for \\( t \\) such that \\( \\pmb{p} = \\pmb{o} + t\\pmb{d} = \\alpha \\pmb{a} + \\beta \\pmb{b} + \\gamma \\pmb{c} \\) (under constraints \\( \\alpha + \\beta + \\gamma = 1 \\) and \\( 0 \\leq \\alpha, \\beta, \\gamma \\leq 1 \\)). We calculate valid solutions using Cramer's rule for all faces in the mesh and only consider (if one exists) the minimum positive solution corresponding to the first ray-triangle intersecting point. (b) **Ray-Surface interaction:** Given the solution from the previous step (i.e., on which spatial co-ordinate the ray is incident on the surface), we are now interested in estimating the outgoing ray from that co-ordinate. For this, we leverage an MLP that maps incident gain, direction, and certain face properties to outgoing gain and direction. More details of this MLP are discussed above under the 'Architecture: Ray-surface Interaction'. (c) **Reception/Termination:** Per ray, we stop ray marching steps if it is either received (hits a reception sphere of fixed size of \\( 30\\mathrm{cm} \\)) or leaves the region of interest (e.g., penetrates exterior wall is shot into infinity). In other cases, we continue with ray marching steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.389, + 0.825, + 0.516 + ], + "angle": 0, + "content": "**Renderer:** Ray Aggregation. At the end of ray marching steps (over \\( R \\) iterations), we determine the final state of the \\( K \\) rays. We are now interested in a small subset of these \\( K \\) rays that is received at a receiver at fixed co-ordinate \\( x_{\\mathrm{rx}} \\). Note that we perform these steps only at test-time. The ray aggregation as a result involves two steps: (a) Ray Filtering: where we determine the subset of rays that arrives at \\( x_{\\mathrm{rx}} \\) by modelling the receiver as a sphere of fixed radius of \\( 30\\mathrm{cm} \\); and (b) Preventing double counting: we find duplicate rays arrive at \\( x_{\\mathrm{rx}} \\) due to a combination of a non-infinitesimally sized reception sphere and a high density of launched rays. We cull such duplicates by grouping rays based on a unique interaction sequence (i.e., IDs of faces it intersects with) and choosing the ray of the shortest length in each group." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.825, + 0.584 + ], + "angle": 0, + "content": "**Optimization.** We perform gradient-descent steps on learnable parameters using Adam with a learning rate of 0.001 with batch size of 1. We observed large gradients (possibly due to single-batch) and hence clip gradient values to 100 during training. The model is trained for 100 epochs and we pick the checkpoint with lowest validation error during training." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.599, + 0.3, + 0.613 + ], + "angle": 0, + "content": "D.2 BASELINES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.825, + 0.74 + ], + "angle": 0, + "content": "MLP. The MLP baseline extends ideas presented in Tancik et al. (2020); Sitzmann et al. (2020), where a simple MLP is used to map co-ordinates to the signal (e.g., pixel co-ordinate to RGB values). In our paper, the MLP directly maps the spatial co-ordinates \\( x_{\\mathrm{tx}} \\) and \\( x_{\\mathrm{rx}} \\) to channel \\( h_i \\). The MLP contains 3 hidden layers, each with 128 hidden units and ReLU activation. The core idea here is to implicitly learn the geometry of the environment (floormap \\( F \\)), which is common to all train and test examples. Note that in contrast to previous works, this model does not use positional embeddings nor sinusoidal activations, as our initial experiments indicated they learn high-frequency artifacts that is not typically present in our datasets (the wireless channels)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.825, + 0.793 + ], + "angle": 0, + "content": "kNN. The kNN baseline (with \\( k = 1 \\)) works as so: for a given test-example \\( (\\pmb{x}_{\\mathrm{tx}}, \\pmb{x}_{\\mathrm{rx}}) \\) we find the spatially closest training example arg \\( \\min_{i} ||\\pmb{x}_{\\mathrm{tx}} - \\pmb{x}_{\\mathrm{tx},i}^{\\mathrm{train}}||_{2} + ||\\pmb{x}_{\\mathrm{rx}} + \\pmb{x}_{\\mathrm{rx},i}^{\\mathrm{train}}||_{2} \\) and predict channel \\( \\pmb{h}_i \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "19" + } + ] +] \ No newline at end of file diff --git a/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_origin.pdf b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7b8b242c608bd68200f09afdfb4ca630959f5c71 --- /dev/null +++ b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/41f81ce5-4453-4061-b257-336a66f472e8_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d9bcb29576e1d79f75ce6883c903303857d65511d5dd78227c86746b3fa9127 +size 2831559 diff --git a/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/full.md b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/full.md new file mode 100644 index 0000000000000000000000000000000000000000..360c2fd509870a5bb7b9f01b728c40f5ab57f584 --- /dev/null +++ b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/full.md @@ -0,0 +1,413 @@ +# WINERT: TOWARDS NEURAL RAY TRACING FOR WIRELESS CHANNEL MODELLING AND DIFFERENTIABLE SIMULATIONS + +Tribhuvanesh Orekondy, Kumar Pratik, Shreya Kadambi, Hao Ye, Joseph Soriaga, Arash Behboodi +Qualcomm AI Research* + +# ABSTRACT + +In this paper, we work towards a neural surrogate to model wireless electromagnetic propagation effects in indoor environments. Such neural surrogates provide a fast, differentiable, and continuous representation of the environment and enables end-to-end optimization for downstream tasks (e.g., network planning). Specifically, the goal of the paper is to render the wireless signal (e.g., time-of-flights, power of each path) in an environment as a function of the sensor's spatial configuration (e.g., placement of transmit and receive antennas). NeRF-based approaches have shown promising results in the visual setting (RGB image signal, with a camera sensor), where the key idea is to algorithmically evaluate the 'global' signal (e.g., using volumetric rendering) by breaking it down in a sequence of 'local' evaluations (e.g., using co-ordinate neural networks). In a similar spirit, we model the time-angle channel impulse response (the global wireless signal) as a superposition of multiple paths. The wireless characteristics (e.g., power) of each path is a result of multiple evaluations of a neural network that learns implicit ray-surface interaction properties. We evaluate our approach in multiple indoor scenarios and demonstrate that our model achieves strong performance (e.g., $<0.33\mathrm{ns}$ error in time-of-flight predictions). Furthermore, we demonstrate that our neural surrogate whitens the 'black-box' wireless simulators, and thus enables inverse rendering applications (e.g., user localization). + +# 1 INTRODUCTION + +Realistic simulations of physical processes are vital to many scientific and engineering disciplines. In this paper, we focus on simulation of wireless electromagnetic (EM) signals within a propagation environment. The physics of such EM wave propagation between a transmit and receive point are analytically given by Maxwell equations: the transmitted wave undergoes different interactions with the environment (e.g., reflection), and the receiver gets the wave through multiple paths with different time-of-flights and powers, and from different directions. However, solving the Maxwell equations with boundary conditions requires in-depth knowledge of the propagation environment, hence classically modelling EM propagation is intractable for most engineering applications. + +Existing techniques make such simulations tractable by trading-off accuracy for speed. At one end of the spectrum, such simulations are represented in a statistical sense where a probabilistic model roughly captures the marginalized distribution over time-of-flights, gains and direction of transmit-receive paths. However, this level of accuracy is insufficient for designing systems that efficiently operate in high frequency bands. This motivates solutions at the other end of the spectrum: wireless ray tracing simulators. Given a detailed CAD representation of the environment along with the material properties, and numerous wireless configuration parameters (e.g., placement of a base station), the simulators generate resulting propagation characteristics. + +Although wireless ray tracing simulators are appealing, there are a few drawbacks. First, they are generally slow, which poses a bottleneck for closed-loop design pipelines, as wireless configurations cannot be quickly mapped to propagation characteristics. Second, because they are non + +differentiable, they are not amenable with inverse physical design formulations, for example optimizing base station placement with the simulator in the optimization loop. Third, they usually require additional fine-tuning with real data as they are not data-driven. Calibrating them with real-world measurements is non-trivial and tedious. Fourth, they cannot generally inter-operate with probabilistic frameworks which have the advantage of better dealing with epistemic uncertainties. We believe neural surrogates provide a natural solution to circumvent many of these drawbacks of classical ray tracing simulators. + +In this work, we propose a neural wireless simulator ('WiNeRT') by building on recent advances in scenes representation as continuous-function neural networks (Sitzmann et al., 2019; Tancik et al., 2020; Mildenhall et al., 2020). In particular, central to our approach is learning a network to model ray-surface interactions, i.e., the network transforms an incident wireless ray to an attenuated outgoing ray. By shooting out a number of rays and evaluating the network at relevant spatial regions in the environment, we estimate the wireless characteristics as a set of transmit-receive paths, each path encodes attributes such as time-of-flight and gain. Our approach also addresses some unique technical challenges posed by the non-visual wireless modality, such as dealing with sparse high-dimensional time-angle measurement signals. + +We demonstrate that our neural wireless simulator reasonably renders the wireless propagation aspects by evaluating on two datasets which captures $50 - 100\mathrm{m}^2$ indoor propagation scenes. Interestingly, we find that the 3D-structure-aware implicit formulation is a strong inductive bias and helps generalization to significant inference-time distributions shifts. Finally, we demonstrate the potential of our differentiable forward model in solving inverse problem by tackling the user localization problem after posing it as an inverse rendering problem. Our results indicate that simulator physics for specified environments can be 'distilled' into neural surrogates and thereby presenting first steps towards closed-loop design pipelines of wireless communication systems. + +# 2 RELATED WORK + +Physics-based Neural Simulations. There exists a wide body of literature to model physical processes using advances in neural networks (Djeumou et al., 2022; Karniadakis et al., 2021; Raissi et al., 2017). As simulating physical processes can be expensive and can also present nondifferentiable 'black-box' in design pipelines, recent literature addresses how to work towards neural surrogates, such as for particle simulation (Sanchez-Gonzalez et al., 2020), mesh simulations (Pfaff et al., 2020), design of particle accelerators (Shirobokov et al., 2020), and inverse kinematics (Sun et al., 2021). In this paper, we are particularly interested in a specific physical process - wireless EM-wave propagation. Although this has received limited recent attention (Xia et al., 2020) in a 3D-oblivious setting, it is unclear whether these extend to complex configurations. Consequently, in this work, we work towards the first 3d-structure-aware surrogates for wireless ray tracing simulation. + +Neural Channel Modelling. Although propagation channel modeling has been a central topic in wireless communication (Jakes & Cox, 1994; Lee, 1982; Rappaport et al., 2022), there has been a recent trend for fully data-driven models. The main paradigm of these activities is to use machine learning to learn complex distributions, model non-linearities and have differentiable simulators. These works can be categorized as statistical channel models where the channel input-output relation is modelled as a conditional probability distribution. Many works leverage recent advances in generative modelling and use models like generative adversarial networks (GANs) (Goodfellow et al., 2014) or variational autoencoders (VAEs) (Kingma & Welling, 2013) to learn the channel model (O'Shea et al., 2019; Ye et al., 2018; Yang et al., 2019; O'Shea et al., 2019; Orekondy et al., 2022; Ye et al., 2020; Dorner et al., 2020). In contrast to these works, our approach inscribes within ray tracing channel modeling paradigm, where wireless propagation is precisely modelled by tracing wireless rays, however, unlike classical ray tracers, our model is able to blend in the elements of statistical modeling and is trainable directly on field data. To the best of our knowledge, this work is the first differentiable neural ray tracer for wireless channel modelling. + +Neural Scene Representations. Representing scenes (or more generally signals) has been widely studied in literature, such as encoding the signal in the latent space of a generative model (Kingma & Welling, 2013; Goodfellow et al., 2014). A more recent link of work encodes the signal in the parameters of a co-ordinate MLP (Park et al., 2019; Sitzmann et al., 2020; Tancik et al., 2020; Fathony + +![](images/c74b0a95ffdda8f97ceb12b64de439e99ae04a016d2f6461b653f52e5eddf3a6.jpg) +Figure 1: Approach Overview. We learn a forward simulator $\text{render}_{\theta}(\cdot)$ that maps an environment configuration to a wireless channel $h_i$ . Here, $h_i$ is a set of wireless propagation paths between $x_{\mathrm{tx}} - x_{\mathrm{rx}}$ (green rays in right image), each path encoding certain channel attributes e.g., path gain. + +et al., 2020), thereby mapping co-ordinates (e.g., spatial, temporal) to the signal intensity values (e.g., pixel intensity, amplitude). In a specific case where the signal is a 2D RGB image, recent works (Schwarz et al., 2020; Niemeyer & Geiger, 2021; Mildenhall et al., 2020) show promising results by additionally employing image-based differentiable rendering paradigms (Drebin et al., 1988; Liu et al., 2019) to recover 3D properties of the scene. Inspired by this idea, our work neurally represents a wireless scene by tackling a set of orthogonal challenges, such as dealing with sparse high-dimensional signals and particularly modelling reflection and transmission effects. Consequently, we work towards the first 3D-aware neural 'wireless' scene representation model. + +# 3 APPROACH + +In this section, we begin with some preliminaries to the subsequent formulation of the neural wireless ray tracing problem. We then provide an initial overview of our approach in Sec. 3.1 and then dive deeper into specific technical aspects of wireless ray marching in Sec. 3.2. + +Preliminaries: Wireless Channels Scattering, reflection and diffraction are among the main effects in electromagnetic propagation. A general mathematical description of a wireless channel, seen as linear time varying system, is given by its impulse response Tse & Viswanath (2005); Rappaport (1996). A general model can be written as (Samimi & Rappaport, 2016): + +$$ +h (t, \boldsymbol {\Theta}, \boldsymbol {\Phi}) = \sum_ {k} a _ {k} (t) \delta \left(t - \tau_ {k} (t)\right) \delta \left(\boldsymbol {\Theta} - \boldsymbol {\Theta} _ {k} (t)\right) \delta \left(\boldsymbol {\Phi} - \boldsymbol {\Phi} _ {k} (t)\right) \tag {1} +$$ + +where $a_{k}(t)$ is the complex gain, $\tau_{k}(t)$ is the delay (time-of-flight) of path $k$ , $\Theta_{k}(t)$ is azimuth and elevation angle of departure (AoD), and $\Phi_k(t)$ is azimuth and elevation angle of arrival (AoA). Going forward, we use $\phi_{k} = (\Theta_{k},\Phi_{k})$ as a shorthand to collectively represent all angles. Intuitively equation 1, represents each path as a dirac function in time-angle space. The task of channel modeling can, therefore, be reduced to predicting channel attributes $(a_{k}(t),\tau_{k}(t),\phi_{k}(t))$ for a given environment map, and a transmit and receive location. See Sec. A.1 for a detailed discussion. + +Forward Model: render. The general goal of our forward model is to run a wireless ray simulation given a certain configuration of the propagation environment. More specifically, as shown in Figure 1, the model takes three configuration parameters as input: a 3D representation of the environment $F$ and the spatial co-ordinates of the transmitter $x_{\mathrm{tx}}$ and receiver $x_{\mathrm{rx}}$ devices. The model predicts the wireless scene as: + +$$ +\hat {\boldsymbol {h}} = \left\{\boldsymbol {u} \right\} _ {k = 1} ^ {K} = \left\{\left(a _ {k}, \tau_ {k}, \phi_ {k}\right) \right\} _ {k = 1} ^ {K} = \operatorname {r e n d e r} _ {\theta} \left(\boldsymbol {x} _ {\mathrm {t x}}, \boldsymbol {x} _ {\mathrm {r x}}, \boldsymbol {F}\right) \tag {2} +$$ + +where the output is a variably-sized set of $K$ paths. Each path $\pmb{u}_k$ encodes three channel attributes: gain $a_k$ , time-of-flight $\tau_k$ and angles $\phi_k$ . With these predicted channel attributes, we can obtain a time-angle impulse response (the 'channel') to characterize the wireless propagation effects. + +Key Idea: Implicit Representation Network $f_{\theta}$ . Our approach recursively constructs the channel by using a learnt function $f_{\theta}: F \times \mathbf{u}_{k}^{(r)} \mapsto \mathbf{u}_{k}^{(r+1)}$ As shown in Figure 1, given an initial ray $\mathbf{u}_{k}^{(r=0)}$ , we model the final state as an evaluation of interactions that the ray undergoes with the environment $F$ . Intuitively, $f_{\theta}$ models the local interaction of any given ray $k$ either in free-space, or in particular when it is incident on an interacting surface. In the latter case of ray-surface interaction, we leverage a co-ordinate MLP to predict the transformation (e.g., attenuation, rotation) to the incident ray. + +Representing Environment $F$ . We primarily focus on indoor propagation environments in this paper, where the environment is a 3D geometric representation. Specifically, we consider the environment represented as a 3D mesh composed of $F$ faces and $V$ vertices, where each face corresponds + +![](images/c66cddf7981830725b69b76efc859e0363ee0a2813c296272c06d0f3e7d2b001.jpg) +Figure 2: Renderer: Ray Marching Steps. At each step $r$ of the simulation, we learn the transformation introduced on a ray $\boldsymbol{u}_k^{(r)}$ e.g., reflection off a particular surface. The final transformation is a result of learnt (green blocks) and non-learnable (blue blocks) evaluations. + +to some surface on a wall. We consider a mesh structure with two subtleties: (a) we represent walls as a flattened polygon and thereby do not explicitly consider its thickness; and (b) we do not encode materials of the corresponding wall faces, but rather learn the properties implicitly from data. + +# 3.1 OVERVIEW: NEURAL-renderING + +In this section, we present an overview of the three steps in our approach (as shown in Fig. 1). + +Ray Launching. We begin by shooting out a fixed set of $K$ rays from the transmitter location $\pmb{x}_k^{(r = 0)}\coloneqq \pmb{x}_{\mathrm{tx}}(\forall k)$ . We launch the rays omni-directionally from the transmitter co-ordinate, agnostic to the environment and location of the receiver location. Direction $\pmb{d}_k^{(r = 0)}$ of each ray is oriented in the direction of a unique vertex of a ico-sphere centered at $\pmb{x}_{\mathrm{tx}}$ . We use the number of sub-divisions of the ico-sphere to trade-off between computational complexity and accuracy. + +Ray Marching. The crux of our approach involves 'marching' the ray and accounting for interactions (e.g., transmission) with various surfaces of the environment. A key aspect here is using a neural network to make local evaluations: mapping an incident ray with some direction and power to an updated outgoing attenuated ray. The neural network is hence tasked to learn a complex nonlinear characterization of the surface materials at a spatial co-ordinate. We further elaborate on the ray marching procedure in the next section. + +Ray Aggregation and Reception. Of the $K$ rays launched from the ray launching step, we are now interested in the subset of the rays that impinges on the receiver. We model the reception sphere with a specific radius, which can be tuned to achieve a desired level of precision. To mitigate double-counting of received rays, we filter rays by associating them with a unique interaction path. + +# 3.2 RAY MARCHING + +We now dive deeper into the ray marching step, which tracks the evaluation of each ray as it propagates in the environment and hits various surfaces. We walk through the steps as shown sequentially in Fig. 2. We begin with a set of geometric rays $\boldsymbol{u}_k^{(r = 0)}$ , originating at the transmitter co-ordinate $\boldsymbol{x}_{\mathrm{tx}}$ . In addition to the channel attributes of each ray (see Eq. 2), we also consider in this section an additional set of meta-attributes (e.g., origin $\boldsymbol{x}_k$ , direction $\boldsymbol{d}_k$ ) that helps us with the ray marching steps (elaborated in Sec. A.2). + +Ray-Environment Intersections. For each ray, we evaluate its first interaction with the environment (e.g., first wall it hits). Representing the ray geometry as $\pmb{p}(t) = \pmb{x}_k^{(r)} + t\pmb{d}_k^{(r)}$ , we are primarily interested in a solution $t > 0$ for which the ray is incident on some surface. This location helps us determine the relay (i.e., new origin) $\pmb{x}_k^{(r+1)}$ for the subsequent step. + +Ray-Surface Interaction. While the previous step solves for where the ray is incident in the environment, a crucial next step is determining attributes of the outgoing ray as a result of this interaction. We specifically focus on determining two attributes in this step: the new direction $\pmb{d}_k^{(r+1)}$ and gain $a_k^{(r+1)}$ . Popular non-neural simulators, such as Remcom (2022), look-up frequency- + +dependent material properties (e.g., conductivity, permittivity) at the incidence point from standard databases (ITU-R P.2040-2) to calculate the attributes of the outgoing ray. However, it is unclear how to calculate the attributes with imprecise knowledge of the surfaces (e.g., unknown thickness and material types of each layer in a wall) or when the material properties of a layer have not been previously empirically analyzed. Our solution is to instead predict the attributes using learnt network as a function of the incident location $\pmb{x}_k^{(r + 1)}$ and direction $\pmb{d}_k^{(r)}$ (see $f_{\theta}^{1}$ in Fig. 2). The ray-surface interaction network $f_{\theta}^{1}$ used in our experiments is a ReLU MLP with 3 layers (with 64-hidden units). Similar to NeRF (Mildenhall et al., 2020), we split the network into learning incident direction-independent and dependent features by concatenating direction $\pmb{d}_k^{(r)}$ with bottlenecked outputs of the penultimate layer in the network (See Sec. A.3 fore more details). The network predicts an attenuation factor $s$ and a rotation matrix $\pmb{A}$ (4-dim Euler-Rodrigues parameterization), which is then used to determine the updated gain $(a_k^{(r + 1)} = sa_k^{(r)})$ and direction $(\pmb{d}_k^{(r + 1)} = \pmb{A}\pmb{d}_k^{(r)})$ . + +Reception/Termination check. For some special cases, we halt ray marching for a subset of rays. Namely, when ray $k$ impinges on a reception sphere of a pre-specified radius (30cm in our experiments). This prevents a future version of the already received ray being potentially incorrectly received at a future iteration. In addition, for computation reasons, we also terminate ray marching if the ray exits the region of interest (e.g., ray exiting the environment). + +Free-space interaction. While the previous steps modeled the interaction of material properties of the environment on wireless propagation, we now switch focus to free-space. In this case, we model propagation of a ray using the empirically-adjusted Friis' Equation: $P_r(d) = P_t G\left(\frac{d_0}{d}\right)^{\lambda}$ ( $d \geq d_0$ ) which represents the power at the received at the receive antenna $P_r$ as a function of the power fed into transmitting antenna $P_t$ , and the distance travelled by the ray $d$ . We learn the remaining scalar parameters $G$ (antenna gain constant), $\lambda$ (attenuation factor), and $d_0$ (reference distance). + +# 3.3 TRAINING + +Over the previous sections, we walked through our approach on predicting a channel $\hat{h} = \mathrm{render}_{\theta}(\pmb{x}_{\mathrm{tx}},\pmb{x}_{\mathrm{rx}},\pmb{F})$ . We train the model in a supervised setting, with ground-truth time-angle impulse response measurements. Importantly, we rely only on final measurements (i.e., at $r = R$ ) for training and do not use any intermediate information (e.g., interaction data through a ray tracer). + +Set-based Channel Loss. We compare two sets of multi-path channels: predictions $\hat{\pmb{h}} = \{\hat{\pmb{u}}_k\}_{k=1}^K$ and ground-truth $\pmb{h} = \{\pmb{u}_l\}_{l=1}^L$ to provide a supervisory signal for training. We evaluate the set-based loss as: $\mathcal{L}_{\mathrm{chan}}(\pmb{h},\hat{\pmb{h}}) = \sum_l d(\pmb{u}_l,\hat{\pmb{u}}_{\Pi(l)})$ , which has two key ideas: (a) correspondence $\Pi$ : we associate each ground-truth path $\pmb{u}_l$ with a predicted path $\hat{\pmb{u}}_k = \Pi(l)$ . To perform such an association, we use direction-of-departure information and thereby pair paths launched in approximately the same direction; and (b) inter-path distance $d(\pmb{u}_l,\hat{\pmb{u}}_k)$ : to compare two paths, we use mean square error for scalar-valued attributes (e.g., time-of-flights) and cosine distances between angular-attributes (e.g., direction of arrival). For the latter, we represent angles as unit vectors in cartesian coordinates. + +Training and Implementation Details. We train our approach for 100 epochs using Adam optimizer with a learning rate of $10^{-3}$ . We found it crucial to not aggregate rays (Sec. 3.1) in the training steps, as it led to vanishing gradients due to negligible number of rays that contributed towards gradient updates. We model the reception sphere as a fixed-sized sphere of radius $30\mathrm{cm}$ . Additional implementation details are provided in Sec. C.4. + +# 4 EXPERIMENTAL RESULTS + +In this section, we discuss experimental analysis of our neural simulator approach. We begin by discussing the preliminaries: the choice of datasets and the evaluation metrics to compare simulations. The section concludes by discussing overall performances and highlights certain benefits of neural simulations, such as running controllable simulations outside of training conditions. + +# 4.1 EXPERIMENTAL SETUP: DATSETS, EVALUATION METRICS, AND BASELINES + +We train and evaluate our algorithm using ground-truth data from wireless ray tracing packages. We collect two datasets, where each dataset contains channel measurements (i.e., gains, time-of-flight, angles) for different distributions of environments (e.g., floor layout). We keep the wireless configuration fixed to using omni-directional antennas at both the transmitter and receiver operating at a 3.5GHz carrier frequency. Now we further elaborate on the datasets used in our experiments. + +Dataset 1: WI3ROOMS. We create a synthetic dataset which gives us greater control over many aspects over the generation process. Using a $10\mathrm{m} \times 5\mathrm{m} \times 3\mathrm{m}$ hull, we randomly synthesize interior brick walls such that the eventual configuration consists of three rooms inter-connected with 1m doorways. We import the environment into an open-source wireless propagation toolbox (Amiot et al., 2013) and collect 41.6K channels, of which $\sim 37\%$ of measurements are used for training. + +Dataset 2: WIINDOOR. We use the indoor floorplans from the RPLAN dataset (Wu et al., 2019), which is popularly used to model indoor scenes (Nauata et al., 2020; 2021; Para et al., 2021). These layouts represent real-world single floor houses, with 4-8 rooms and $65 - 120\mathrm{m}^2$ areas. Each floorplan is further accompanied with room semantics such as whether a certain area is a living room, bed room, bathroom, etc. We use these semantics to selectively sample transmit/receiver locations (e.g., locations are not outside the boundary) and to determine wall materials (e.g., external facing walls are bricks, where as internal facing walls are dry plaster walls). We use a commercial ray tracer Remcom 'Wireless Inside' (Remcom, 2022) with ray tracer X3D to collect measurements in the RPLAN environment. Similar to the earlier dataset, we collect 42.5K measurements, of which $\sim 36\%$ are used to train the model. + +Train and Test Regimes. For the training dataset, we collect measurements by sampling transmitter ('Tx') from $\sim 10$ locations (XY plane at an elevation of $2.8\mathrm{m}$ ) and similarly, receiver ('Rx') from $60\times 30$ locations (but with elevation of $2\mathrm{m}$ ). We then create three challenging test sets (see Fig. A2 for an illustration) with novel Tx-Rx locations: (a) Checkerboard: where train and test Rx locations form a checkerboard pattern on the same XY plane at $2\mathrm{m}$ elevation; (b) Generalization- $z$ : where we move the test Rx locations in (a) to a novel elevation ( $z = 1.0\mathrm{m}$ for ThreeRooms and $z = 2.5\mathrm{m}$ for RPLAN); and (c) Generalization-diag: where we sample test Rx locations on a diagonal XYZ plane. Such regimes let us validate the generalization performance under distribution shifts. + +Evaluation Metrics. We consider three evaluation metrics to evaluate our approach: (i) Overall prediction error ('Overall'): We follow a similar formulation as our loss (Sec. 3.3) with one key difference - we find correspondences $\Pi$ by solving a linear-sum assignment problem. The eventual error aggregates all attributes relevant for the path (e.g., gain, angles). Intuitively, this measures the distance between two sets (sets of multi-dim paths in our case), using a similar metric common in set prediction tasks (Fan et al., 2017; Zhang et al., 2019). (ii) Geometry prediction error ('Geometry'): We follow a formulation similar to (i), but now focus on two specific features that captures the geometrical accuracy of the path - time-of-flight and angles at departure and arrival. Intuitively, this metric measures whether the predicted rays take the same GT route between the transmit and receive co-ordinates. (iii) Average Delay Time - MAE ('AvgDelay'): We average the time-of-flights $\tau_{k}$ per path of the channel, weighted by its linear power $p(a_{k})$ . We report the mean absolute error of average delays between the predicted and ground-truth channel attributes. + +Baselines. We propose two reference baselines (i) $k$ -NN (with $k = 1$ ): which predicts the channel, given the closest match to the input spatial co-ordinates in terms of Euclidean distance (ii) MLP: A geometry-oblivious MLP regressor with 3-hidden layers, each with 128 units. We train the MLP using the same loss as WiNeRT. Additional details of the baselines are provided in Sec. C.4. + +# 4.2 OVERALL RESULTS + +In this section, we present the overall qualitative and quantitative results of our approach. We complement the overall performances with additional analysis in the next section. + +Quantitative Results. We report the quantitative results for the two datasets (column groups) and three test sets (row groups) in Table 1. We observe from the table: (a) by focusing on the overall errors, we find WiNeRT generally outperforms all baselines, with a significant average decrease of + +
WI3ROOMSWIINDOOR
OverallGeometryAvgDelayOverallGeometryAvgDelay
checkerboardkNN0.2320.2122.2380.4120.3962.484
MLP0.2870.3302.0510.3730.3991.745
WiNeRT0.2020.0872.0290.2370.2071.546
gen-zkNN0.2530.2262.0330.4240.4282.487
MLP0.2970.3501.7970.3880.4211.969
WiNeRT0.2170.0841.5220.2850.2501.839
gen-diagkNN0.2520.2132.1180.3800.2511.377
MLP0.3120.3221.8890.3900.3151.513
WiNeRT0.2290.0851.7920.3690.1700.828
+ +Table 1: Quantitative Results. Comparing errors of our approach (WiNeRT) with baselines, over two datasets (column groups) and three test regimes (row groups). Lower values are better and the lowest errors are in bold. + +![](images/82c9e812e95d9f9b9175609d86e7873e3d6b9ba3e2a43b8d65695e54dc393076.jpg) +Figure 3: Receive Powers. By fixing the transmit location $(x_{\mathrm{tx}}$ , red cross), we measure the receive power (color at each point; in dB) predicted at each location in W13ROOMS dataset. kNN and MLP suffer from memorization and falsely predict highest receive powers around phantom transmit locations (purple star). + +-0.071 points compared to kNN and -0.085 with MLP; (b) WiNeRT is especially strong in capturing the geometry (e.g., 59-63% drop in errors w.r.t second best on WI3ROOMS) of the environment, which can be likely attributed to a strong inductive bias enforced by decoupling global rendering from local evaluations; (c) Although WiNeRT has reasonable performance in capturing the average delays, the performance gap here (e.g., 1-15% reduction in errors on WI3ROOMS) is not especially large compared to other metrics. We attribute this to contributions from 'false positive' rays with non-negligible power arising from our dense ray-launching technique. (d) The contributions of false positives can be mitigated by using a more sophisticated ray launching technique. For instance, by piggybacking on ray launch directions from GT channels, we can significantly improve performances across all metrics e.g., from 1-15% error reduction to 15-20% reduction in average delays on WI3ROOMS; (e) Overall, we attribute the underperformance of the baselines to poor generalization performance. For instance, in Figure 3, we illustrate the receive powers (in dB) predicted by all approaches in WI3ROOMS, for some placement of the transmitter (red cross in top-right room). We observe in this particular case that the high-power areas in the kNN and MLP baselines are predicted for a false phantom location (purple star), which roughly corresponds to a transmitter location in training set. This contrasts predictions by WiNeRT where the high-power areas are correctly concentrated around the transmitter location. As a result, we find that simple baselines find it challenging to generalize to new unseen spatial co-ordinates at inference time. + +Qualitative Results. We complement the previous quantitative discussions with observations drawn from qualitative analysis. WiNeRT particularly helps for this analysis, as we can recover intermediate ray-environment interaction information. From qualitative examples shown in Fig. 4(a, b), we draw some observations: (a) WiNeRT surprisingly learns ray-surface interactions implicitly, without any direct supervision. For instance, we observe multiple reflected paths between Tx and Rx; (b) we also find that our predictions (red rays) are generally consistent with the underlying simulation process (green rays) e.g., reflections from adjacent walls, floor and ceiling; and (c) we notice WiNeRT sometimes predicts false positives (e.g., above $x_{\mathrm{tx}}$ in Fig. 4b), which we attribute to dense omni-directional ray launching. + +# 4.3 ANALYSIS + +In the previous section, we evaluated the overall performance of WiNeRT and found promising results. Now, we take a closer look at our approach and investigate generalization benefits. + +![](images/c91a7eac1d49b21c068572f1193df77bdfc95a1dade31e322c41275a600dfd7c.jpg) +(a) W13ROOMS + +![](images/731e290a7acccc58811301e225652e98bf744655a7ca5d1725917766c3781cc0.jpg) +(b) WIINDOOR + +![](images/6b0ea87aa2f04037c201089d97969dcb9a3368cc59650405f96d911f1ce450bd.jpg) +(c) WI3ROOMS (novel $F$ ) + +![](images/1651dd31070aa5e21b8d79dc8134174fb51eab246b7281438d0222fabfd0416e.jpg) +Figure 4: Qualitative results. (a, b) Evaluation on WiNeRT on the environment seen during training. (c) We use the previously trained model and re-render on a re-configured floormap $\pmb{F}$ . +(a) Ray-surface interactions + +![](images/824fe270b736efbd9bf1c9a551cb2d8cef483220d5e298f254791c2b25fc139a.jpg) +(b) Attenuation: Reflection + +![](images/9433920fbba857bd026cb3caba22ad5b78a61493f516bd5b498209881b49808f.jpg) +(c) Attenuation: Transmission +Figure 5: Evaluating Ray-surface interaction MLP. We display a cut-out of the 3ROOMS represented as a wireframe, with a specific focus on a particular wall. (a) We find a train-test distribution shift of ray-surface incidence points (b, c) Evaluation of the MLP at various incidence points. + +What does the ray-surface interaction learn? We begin by investigating the ray-surface network $(f_{\theta}^{1}$ in Fig. 2) in isolation. The network is tasked to map an incident ray (gain $a_{\mathrm{in}}$ , direction $d_{\mathrm{in}}$ ) to an outgoing ray $(a_{\mathrm{out}}, d_{\mathrm{out}})$ . To accurately make this prediction, the network needs to learn direction- and material-dependent properties at the incident location $x_{\mathrm{inc}}$ , which poses two challenges. First, the network does not have explicit supervision to learn these properties. Rather, the network needs to implicitly learn these properties by optimizing over a number of channel measurements. Second, specific to our case, the measurements collected involve sparse ray-surface interactions i.e., in practise we cannot expect for paths in the training measurements to interact densely with all possible surfaces. For instance, consider Fig. 5a, which show the incident points $x_{\mathrm{inc}}$ for a particular wall (black edges) that we recover from the underlying ray tracing tool. Here, we observe that the implicit training set interactions (red markers; never used during our training) are localized to a $\sim 50\mathrm{cm}$ band ( $15\%$ area of the wall). However, at test-time, the network is tasked to generalize to interactions for a different distribution of incidence points (purple markers). In spite of the challenges we find the ray-surface network associates meaningful information to surface co-ordinates. For instance, we show the attenuation factor predicted for the reflected (Fig. 5b) and transmitted co-ordinates (Fig. 5c) for rays arriving from a fixed $x_{\mathrm{tx}}$ co-ordinate (placed at $x = 8\mathrm{m}$ ). We find that the network learns a smooth material- and direction-dependent function over the surface. Over the next experiments, we exploit these locally learnt properties and evaluate WiNeRT rendering in novel scenarios. + +Controllable synthesis: Predicting in Novel Environment Configurations. The previous experiments focused on evaluating approaches for novel locations of transmit and receive co-ordinates at simulation time. Now, we consider novel test-time environments by simulating approaches on re-configured layouts $\pmb{F}^{\prime}$ of the train-time environment $\pmb{F}$ , such as by randomly editing placement of interior walls. Overall, we find that WiNeRT remarkably extrapolates to the reconfigured environment, with the overall error unchanged with WiNeRT (0.202 on $\pmb{F}$ vs. 0.203 on $\pmb{F}^{\prime}$ ; more results in Table A2). Furthermore, by observing the results qualitatively in Figure 4c, we find the predicted interactions remain consistent with the ground-truth simulated rays in novel environment configurations. This is particularly appealing as for simulation use-cases which require modelling dynamic objects (e.g., moving vehicle), as WiNeRT allows re-configuring environment without retraining. + +Controllable synthesis: Simulating Higher-order Interactions. In this experiment, we evaluate the ability of approaches to generalize to different numbers of interactions (denoted by $r$ in Sec. 3) at inference time. With WiNeRT, we have the ability to control the number of interactions at + +test-time (i.e., by unrolling $f_{\theta}$ for fewer or more steps). We briefly summarize our observations here (see Table A4 for more details). WiNeRT exhibits promising results: while the baselines struggle with a simpler task of lower-order interactions (e.g., 0.22-0.58 overall errors at $r = 0$ ), WiNeRT's performance improves (from 0.20 to 0.12). A better performance is natural in this particular setting, since the model is required to perform an easier task than original (predicting only line-of-sight component). For higher-order interactions, we observe performances of all approaches degrades, but WiNeRT outperforms the baselines. In particular, even at $r = 3$ , we find the geometric-errors of WiNeRT (0.27) comparable to baselines in their originally trained setting ( $r = 1$ , 0.21-0.33 errors). + +How fast are the simulations? We investigate the wall-clock simulation times of WiNeRT and baselines and compare them with wireless ray tracers. In the specific case of WiNeRT, we have some control over the time-accuracy trade-offs at test-time by varying the density of initial rays launched (see Sec. 3.1). Overall, we find that WiNeRT demonstrates speed-ups of $11 - 22 \times$ over PyLayers and $6 - 22 \times$ over Wireless Inside. While the baselines are even faster ( $538 - 687 \times$ with MLP and $79 - 97 \times$ with kNN), it is achieved at the price of higher errors and poor generalization capabilities (Sec. 4.2). Overall, we find WiNeRT presents reasonable time-accuracy trade-offs compared to baselines. See Sec. C.2 for additional details. + +Exploiting differentiability: User Localization via inverse (differentiable) rendering. Over the previous sections we focused on forward simulations. Now, we study a proof-of-concept for leveraging our differentiable simulator for inverse problems, such as for user localization: determining user location $\boldsymbol{x}_{\mathrm{rx}}$ from an observed channel $h_{\mathrm{obs}}$ . We solve for $\boldsymbol{x}_{\mathrm{rx}}$ , by performing gradient on spatial coordinate $\boldsymbol{x}_{\mathrm{rx}}^{\mathrm{ukn}}$ that minimizes the channel loss $\text{render}_{\theta}(\boldsymbol{x}_{\mathrm{tx}}, \boldsymbol{x}_{\mathrm{rx}}^{\mathrm{ukn}}, \boldsymbol{F}_i)$ . This is possible with WiNeRT, since we can backpropagate through the neural simulation of the channel. We evaluate over 100 test examples and find encouraging results, with a median error of 0.58m in WI3ROOMS (a $150\mathrm{m}^3$ volume) and 1.21m in WIINDOOR (a $300\mathrm{m}^3$ volume). See Sec. C.4 for more details. + +# 5 CONCLUSION, LIMITATIONS, AND BROADER IMPACT + +In this paper, we proposed the first neural forward model for wireless ray tracing-based simulations. Such models are particularly appealing as they help alleviate some drawbacks of classical non-neural simulators (e.g., better handling model-measurement mismatches, non-differentiability). Towards this goal, we proposed WiNeRT which tasks an MLP to learn how surfaces in a 3D environment influence propagation of wireless rays, such as by predicting attenuation factor of a reflective component. Overall, we find promising results indicating neural simulators closely capture propagation effects. As neural simulators are additionally differentiable, we further show that they can be used to optimize inverse problems such as user localization. + +Limitations and Future Work. This paper presents the first step towards realizing a neural surrogate for simulating propagation of wireless rays. While we find promising results – in terms of empirically mimicking the simulator's performance while simultaneously reducing complexity – many important steps remain to realize our over-arching goal of differentiable wireless ray tracing. Our approach is designed to capture linear effects of the channel in line with standards (3GPP TR 38.901; ITU-R P.2040-2) and extending to non-linear effects (e.g., amplifier saturations) remains an open-problem. Additionally, while our focus is primarily reflection and transmission properties of ray-surface interactions (capturing majority of receive power) which are increasingly relevant for high-frequency transmissions, other properties (e.g., scattering, diffraction) require investigation to model simulations across a wider radio-frequency spectrum. Finally, our surrogate's performance is currently upper-bounded by the underlying simulator's performance, motivating studies into learnt calibration of the surrogate model with real-world measurement data to bypass simulation accuracy. + +Broader Technical Impact. Although our paper focuses on neural simulation of EM waves in the radio-frequency spectrum (0.5-100 GHz), we believe working towards this goal complements research in non-radio modalities as well. For instance, to model propagation of acoustic signals in spatial environments, estimating material-dependent ray-surface interactive properties remains a challenging problem and the proposed research direction potentially complements existing techniques. More generally, we believe that as radio signals require modelling both ray (e.g., reflection) and physical optic (e.g., interference, diffraction) properties, advances here are intertwined with many modalities across the EM spectrum (e.g., audio, visual). + +# REPRODUCIBILITY STATEMENT + +To ensure reproducibility, we take a number of steps. On the dataset side, we use either publicly available indoor layouts (e.g., RPLAN) or synthetically generate layouts with known random seeds (0 and 10 in our case). We further elaborate on the simulation settings to recreate our dataset in Section 4.1 and Section B. We plan to release the simulated data measurements. On the implementation side, we provide specific training details in Section C.4 and further elaborate on the detailed architecture in Section A.3. + +# ETHICS STATEMENT + +The data used in our paper corresponds to simulated data of physical processes (EM wave propagation). Since this does not involve any human subjects or personally identifiable information, we believe there is no conflict in this regard. + +# ACKNOWLEDGEMENT + +We thank Hanno Ackermann for discussions and feedback on the paper. We additionally thank numerous colleagues for insightful discussions: Thomas Hehn, Fabio Valerio Massoli, Maziar Raissi, Afshin Abdi, June Namgoong, Taesang Yoo, and Akash Doshi. + +# REFERENCES + +3GPP TR 38.901. Study on channel model for frequencies from 0.5 to 100 ghz. Standard, 3GPP, Valbonne, FR, March 2022. +Nicolas Amiot, Mohamed Laaraiedh, and Bernard Uguen. Pylayers: An open source dynamic simulator for indoor propagation and localization. In ICC, 2013. +Franck Djeumou, Cyrus Neary, Eric Goubault, Sylvie Putot, and Ufuk Topcu. Neural networks with physics-informed architectures and constraints for dynamical systems modeling. In Learning for Dynamics and Control Conference. PMLR, 2022. +Sebastian Dorner, Marcus Henninger, Sebastian Cammerer, and Stephan ten Brink. Wgan-based autoencoder training over-the-air. In IEEE International Workshop on Signal Processing Advances in Wireless Communications, 2020. +Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. SIGGRAPH, 1988. +Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In CVPR, 2017. +Rizal Fathony, Anit Kumar Sahu, Devin Willmott, and J Zico Kolter. Multiplicative filter networks. In ICLR, 2020. +Andrew S. Glassner. An introduction to ray tracing. Morgan Kaufmann, 1989. +Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014. +Fumio Ikegami, Tsutomu Takeuchi, and Susumu Yoshida. Theoretical prediction of mean field strength for urban mobile radio. IEEE Transactions on Antennas and Propagation, 39(3):299-302, 1991. +ITU-R P.2040-2. Effects of building materials and structures on radiowave propagation above about $100\mathrm{mhz}$ . Standard, International Telecommunication Union, Geneva, CH, September 2021. +William C. Jakes and Donald C. Cox. Microwave mobile communications. Wiley-IEEE press, 1994. +George Em Karniadakis, Ioannis G. Kevrekidis, Lu Lu, Paris Perdikaris, Sifan Wang, and Liu Yang. Physics-informed machine learning. Nature Reviews Physics, 3(6):422-440, June 2021. + +Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. +William C. Y. Lee. Mobile communications engineering. McGraw-Hill, 1982. ISBN 978-0-07-037039-5. +Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In ICCV, 2019. +J.W. McKown and R.L. Hamilton. Ray tracing as a design tool for radio networks. IEEE Network, 5(6):27-30, November 1991. +Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. +Nelson Nauata, Kai-Hung Chang, Chin-Yi Cheng, Greg Mori, and Yasutaka Furukawa. Housegan: Relational generative adversarial networks for graph-constrained house layout generation. In ECCV, 2020. +Nelson Nauata, Sepidehsadat Hosseini, Kai-Hung Chang, Hang Chu, Chin-Yi Cheng, and Yasutaka Furukawa. House-gan++: Generative adversarial layout refinement network towards intelligent computational agent for professional architects. In CVPR, 2021. +Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, 2021. +Tribhuvanesh Orekondy, Arash Behboodi, and Joseph B Soriaga. Mimo-gan: Generative mimo channel modeling. In IEEE ICC, 2022. +Timothy J O'Shea, Tamoghna Roy, and Nathan West. Approximating the void: Learning stochastic channel models from observation with variational generative adversarial networks. In ICNC, 2019. +Wamiq Para, Paul Guerrero, Tom Kelly, Leonidas J Guibas, and Peter Wonka. Generative layout modeling using constraint graphs. In CVPR, 2021. +Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In CVPR, 2019. +Tobias Pfaff, Meire Fortunato, Alvaro Sanchez-Gonzalez, and Peter W Battaglia. Learning mesh-based simulation with graph networks. arXiv preprint arXiv:2010.03409, 2020. +Maziar Raissi, Paris Perdikaris, and George Em Karniadakis. Physics informed deep learning (part i): Data-driven solutions of nonlinear partial differential equations. arXiv preprint arXiv:1711.10561, 2017. +Theodore S. Rappaport. Wireless communications: principles and practice, volume 2. prentice hall PTR New Jersey, 1996. +Theodore S Rappaport, Kate A Remley, Camillo Gentile, Andreas F Molisch, and Alenka Zajic. Radio Propagation Measurements and Channel Modeling. Cambridge University Press, 2022. +Remcom. Wireless insite, 2022. URL https://www.remcom.com/ wireless-insite-em-propagation-software. +Mathew K. Samimi and Theodore S. Rappaport. 3-D millimeter-wave statistical channel model for 5G wireless system design. IEEE Transactions on Microwave Theory and Techniques, 64(7): 2207-2225, 2016. +Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia. Learning to simulate complex physics with graph networks. In ICML, 2020. +Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. NeurIPS, 2020. + +Sergey Shirobokov, Vladislav Belavin, Michael Kagan, Andrei Ustyuzhanin, and Atilim Gunes Baydin. Black-box optimization with local generative surrogates. In NeurIPS, 2020. +Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. NeurIPS, 2019. +Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. NeurIPS, 2020. +Xingyuan Sun, Tianju Xue, Szymon Rusinkiewicz, and Ryan P Adams. Amortized synthesis of constrained configurations using a differentiable surrogate. NeurIPS, 2021. +Matthew Tancik, Pratul Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. +David Tse and Pramod Viswanath. Fundamentals of wireless communication. Cambridge university press, 2005. +Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T Barron, and Pratul P Srinivasan. Ref-nerf: Structured view-dependent appearance for neural radiance fields. In CVPR, 2022. +Joram Walfisch and Henry L. Bertoni. A theoretical model of UHF propagation in urban environments. IEEE Transactions on antennas and propagation, 36(12):1788-1796, 1988. +Wenming Wu, Xiao-Ming Fu, Rui Tang, Yuhan Wang, Yu-Hao Qi, and Ligang Liu. Data-driven interior plan generation for residential buildings. ACM Transactions on Graphics (TOG), 38(6): 1-12, 2019. +William Xia, Sundeep Rangan, Marco Mezzavilla, Angel Lozano, Giovanni Geraci, Vasilii Semkin, and Giuseppe Loianno. Millimeter wave channel modeling via generative neural networks. In 2020 IEEE Globecom Workshops, 2020. +Yang Yang, Yang Li, Wuxiong Zhang, Fei Qin, Pengcheng Zhu, and Cheng-Xiang Wang. Generative-adversarial-network-based wireless channel modeling: Challenges and opportunities. IEEE Communications Magazine, 2019. +Hao Ye, Geoffrey Ye Li, Biing-Hwang Fred Juang, and Kathiravetpillai Sivanesan. Channel agnostic end-to-end learning based communication systems with conditional gan. In IEEE Globecom Workshops, 2018. +Hao Ye, Le Liang, Geoffrey Ye Li, and Biing-Hwang Juang. Deep learning-based end-to-end wireless communication systems with conditional gans as unknown channels. IEEE Transactions on Wireless Communications, 2020. +Yan Zhang, Jonathon Hare, and Adam Prugel-Bennett. Deep set prediction networks. NeurIPS, 2019. + +# Appendix + +# A APPROACH + +# A.1 BUILDING CHANNEL MODELS + +This section accompanies the text in Section 3. + +Channel models are defined either in a statistical way by defining a distribution over channel attributes or in deterministic way using ray tracing. Statistical channel models are inadequate for applications involving positioning, sensing and challenges of communication at higher frequencies (e.g., mmWave at 30-300 GHz (Rappaport et al., 2022)). Inspired by similar techniques in computer graphics (Glassner, 1989), traditional ray tracing approaches (see for example (McKown & Hamilton, 1991; Ikegami et al., 1991; Walfisch & Bertoni, 1988)) approximate propagation of electromagnetic waves by modeling interactions of each ray with objects in its paths. These interactions include for example reflection, diffraction and penetration. Although this is more efficient than solving Maxwell equations, ray tracing methods need a detailed knowledge of the environment and are generally slow for prototyping. They generally utilize hard coded and mathematically tractable models for example knife-edge model for diffraction (Lee, 1982; Rappaport, 1996). These abstractions suffer from mismatches and require occasional tedious fine-tuning and calibration with real data. Improving these models while remaining tractable for rapid simulation rounds is not straightforward. Finally, they are non-differentiable and cannot be integrated into a closed loop design pipeline. We plan to tackle these issues by building a neural surrogate of a physics-based wireless ray tracer in this paper. + +# A.2 REPRESENTING RAY ATTRIBUTES + +We represent the $k$ -th ray (among $K$ rays) at the $r$ -th iteration of rendering as $\pmb{u}_k^{(r)}$ . For notation convenience, we drop the sub- and super-script for the rest of the section. We characterize the wireless ray analogous to the concept of an optical ray (such as with geometric direction, intensity). In addition to the wireless attributes (see Equation 2), we further include meta-level attributes that helps us propagate and render the eventual ray received at the receiver co-ordinate $\pmb{x}_{\mathrm{rx}}$ . We briefly describe these attributes here and elaborate on how they are obtained or updated over the next sections. The ray contains the attributes: + +$$ +\boldsymbol {u} = \underbrace {\left( \begin{array}{c c c} a & \tau & \phi \\ \text {(a) C h a n n e l A t t r i b u t e s} \end{array} \right)} _ {\text {(b) R a y G e o m e t r y}} \underbrace {\boldsymbol {x} \quad \boldsymbol {d} \quad t _ {s} \quad t _ {r x} \quad \rho_ {\mathrm {r x}}} _ {\text {(c) S t a t e}} \underbrace {\sigma_ {\mathrm {u p d}} \quad \sigma_ {\mathrm {r x}} \quad)} _ {\text {(d) C h a n n e l A t t r i b u t e s}} +$$ + +which as shown can be grouped into three categories: (a) Wireless Channel Attributes. Exactly as discussed earlier in the section (see Equation 2), it contains the attributes to construct the wireless channel time-angle impulse response (Equation 1) (b) Ray Geometry. We additionally include geometrical representation of the ray, which helps us determine how to propagate the ray through the environment. Specifically, we represent the geometry of the ray using the line equation: $\pmb{p}(t) = \pmb{x} + t\pmb{d}$ , where $\pmb{x}$ is the origin and $\pmb{d}$ is a unit-vector encoding the ray direction. We are interested in two particular solutions of $t$ in this equation: $t_s$ for which the ray intersects with a surface (mesh face in our case) and $t_{\mathrm{rx}}$ for which the ray is tangential to a sphere around some receiver of radius $\rho_{\mathrm{rx}}$ . (c) Ray state. To help with subsequent updates to the ray at future iterations, we track two binary variables. $\sigma_{\mathrm{upd}}$ denotes whether the ray has to be updated in the next iteration. $\sigma_{\mathrm{rx}}$ denotes whether the ray has impinged on a reception sphere of a predefined radius. + +# A.3 RAY MARCHING: DETAILS + +Ray-Environment Intersections. For each ray, we are interested in their first interaction with the environment (e.g., first wall it hits, impinging on the receiver). For this, we are interested in the solutions to the line equation representing the geometry of the ray: $\pmb{p}(t) = \pmb{x}_k^{(r)} + t\pmb{d}_k^{(r)}$ . In particular, we are interested in two solutions of $t$ : (a) Ray-Face intersection. The smallest value of $t > 0$ for which $\pmb{p}(t)$ lies on a surface (a triangular mesh face in our case). For this, we perform ray-triangle intersections with each face in the environment and find the corresponding solution $t = t_s$ . + +![](images/d2f20eee2b3f9b823f2adc8f4a42645796bf6d8ad301be8f20a54c5baf03ff4e.jpg) +Figure A1: Ray-surface interaction network $f_{\theta}^{1}$ + +This helps us estimate the new relay location: $\pmb{x}_k^{(r + 1)} = \pmb{x}_k^{(r)} + t_s\pmb{d}_k^{(r)}$ (a) Ray-Rx intersection. In parallel, we are also interested in positive solutions of $t$ for which the ray hits the receiver if it were modeled as a sphere of radius $\rho_{\mathrm{rx}}$ . In this case, we obtain the value of $t$ as the projection of $\pmb{x}_{rx}$ on $\pmb{p}(t)$ : + +$$ +t _ {r x} = \max \left(0, \left(\boldsymbol {x} _ {r x} - \boldsymbol {x} _ {k} ^ {(r)}\right) \cdot \boldsymbol {d} _ {k} ^ {(r)}\right) \tag {3} +$$ + +$$ +\rho_ {r x} = \left| \left| \left(\boldsymbol {x} _ {r x} - \boldsymbol {x} _ {k} ^ {(r)}\right) - t _ {r x} \boldsymbol {d} _ {k} ^ {(r)} \right| \right| \tag {4} +$$ + +Consequently, at the end of ray-environment, we analytically estimate the first intersections of the ray with both the environment and (potentially) the receiver. + +Ray-Surface Interaction. If the ray $\pmb{u}_k^{(r)}$ (originating at $\pmb{x}_k^{(r)}$ ) and travelling in direction $d_k^{(r)}$ hits a wall at $x_{k}^{(r + 1)}$ (as estimated in the previous step), we are now interested in characterizing the outgoing ray with origin at $x_{k}^{(r + 1)}$ . Specifically, we are interested in estimating the new direction $d_k^{(r + 1)}$ (does the ray penetrate the wall? or reflect?) and the corresponding change in gain that arises (i.e., loss of power, change of phase). This is a complex problem and typically requires in-depth knowledge of the surface (e.g., which material) as well as its specific EM properties (e.g., frequency-dependent effects). Our solution is to instead learn these properties by associating spatial regions in the environment with EM-specific properties. Towards this, we delegate the association to a neural network show in Figure A1. The key idea is to associate spatial co-ordinates (or sets of co-ordinates, given by face on which they lie) with EM properties. We achieve this by mapping spatial properties (e.g., face corresponding to $x_{k}^{(r + 1)}$ ) to EM properties (e.g., gain factor). + +Specifically, our neural network is: + +$$ +\boldsymbol {v} _ {i} = \text {s p a t i a l - n e t} \left(\boldsymbol {f} _ {i}, \boldsymbol {n} _ {i}, \boldsymbol {b} _ {i}\right) \tag {5} +$$ + +$$ +\boldsymbol {s} _ {i}, \boldsymbol {\rho} _ {i} = \text {d i r e c t i o n a l . n e t} \left(\boldsymbol {v} _ {i}, \boldsymbol {d} _ {i}\right) \tag {6} +$$ + +which consists of a spatial_net to encode EM properties specific to a spatial region, but independent of the incidence direction. This network takes as inputs the one-hot encoding of the face $\pmb{f}_i$ on which the relay point $\pmb{x}_k^{(r+1)}$ lies and the surface normal vector at that point $\pmb{n}_i$ . In addition, we also provide the network a 3-dim conditioning vector of signed distances + +$$ +\boldsymbol {b} _ {i} = \left(\operatorname {s d f} \left(\boldsymbol {x} _ {t x}, \boldsymbol {f} _ {i}\right), \quad \operatorname {s d f} \left(\boldsymbol {x} _ {r x}, \boldsymbol {f} _ {i}\right), \quad \operatorname {s d f} \left(\boldsymbol {x} _ {k} ^ {(r + 1)}, \boldsymbol {f} _ {i}\right)\right) \tag {7} +$$ + +where $\operatorname{sdf}(\pmb{x},\pmb{f})$ is the signed distance function between co-ordinate $\pmb{x}$ and face $f$ . We find it crucial to condition the network on these values to help predict EM-properties for relevant outgoing components (e.g., reflective, transmission). + +The output of the network is a gain factor $s_i$ , such that the new gain of the ray $\boldsymbol{u}_k^{(r+1)}$ is $a_k^{(r+1)} = s_i a_k^{(r)}$ . Since the gain magnitudes can be represented in either linear or logarithmic scale, we predict both additive and multiplicative factors of the gain in practice ( $a_k^{(r+1)} = s_{i,1} a_k^{(r)} + s_{i,2}$ ). In parallel, the network also predicts the rotation a ray incident with direction $\boldsymbol{d}_k^{(r)}$ on $\boldsymbol{f}_i$ undergoes. We characterize rotations using a 4-dim rotation $\rho_i$ using Euler-Rodrigues parameterization. This parameterization encodes the axis of rotation and about which $\boldsymbol{d}_k^{(r)}$ rotates by angle $\vartheta$ . We represent the rotation by a $3 \times 3$ SO(3) matrix $A$ and the new outgoing direction of ray $k$ is given by $\boldsymbol{d}_k^{(r+1)} = A \boldsymbol{d}_k^{(r)}$ + +Reception/Termination check. For some special cases, we halt ray marching for a subset of rays. Namely, when ray $k$ impinges on a reception sphere of radius under $\varrho$ meters. This prevents + +![](images/0cef139f6ea25ca9f4890a522c10971c0381d79ed5f5f595829cb4ec9cb6adc9.jpg) +(a) Testset 1: "Checkerboard" + +![](images/ff9a1704ab235f3358c8018acd8dba87c60a37a294e3c398aed3663d99c28239.jpg) +(b) Testset 2: "Generalization-z" +Figure A2: Train and test regimes: We consider disjoint subsets of train (blue markers; identical in all figures) and test (orange markers) co-ordinates of transmit and receive locations. + +![](images/44d3c622da02035b4fcd69c7fea3ddd1cb7aefd0ef2363370ac71d0b3f328243.jpg) +(c) Testset 3: "Generalization-diag" + +a future version of the ray being potentially being incorrectly received once again. In addition, for computation reasons, we also terminate ray marching if the ray exits the region of interest (e.g., ray exiting the environment). + +Free-space interaction. While the previous steps modeled the interaction of material properties of the environment on wireless propagation, we now switch focus to free-space. In this case, we model propagation of a ray using the empirically-adjusted Friis' Equation: + +$$ +P _ {r} (d) = P _ {t} K \left(\frac {d _ {0}}{d}\right) ^ {\lambda}, \quad d \geq d _ {0} \tag {8} +$$ + +which represents the power at the received at the receive antenna $P_r$ as a function of the power fed into transmitting antenna $P_t$ and the distance travelled by the ray $d$ . We learn the remaining scalar parameters $K$ (constant representing of antenna gains), $\lambda$ (wavelength of signal), and $d_0$ (reference distance). + +# B DATASET: ADDITIONAL DETAILS + +# B.1 TRAIN AND TEST REGIMES + +Figure A2 accompanies the text in Section 4.1. + +# B.2 SIMULATION FOR WIINDOOR DATASET: DETAILS + +We created 3 different floor-plans in Wireless Inside where 2D floor-plans layout and semantic labels of each room are picked from House $\mathrm{GAN}++$ dataset and mapped into a 3D layout where the scale and dimensions are determined based on practical floor-plan scenarios. All layouts are scaled to $10\mathrm{m}\times 10\mathrm{m}$ with ceiling height at $3\mathrm{m}$ . All the inner walls and floor materials are layered dielectrics with specific permittivity, conductivity & roughness. These have finite reflection and transmission coefficients. The reflection coefficient is corrected if the surface is not smooth while the transmission coefficients are unaffected by surface roughness. + +Materials. Propagation characteristics are naturally affected by the medium and we create a dataset with fairly diverse set of materials. Layered dielectric with two layers separated by free-space of $89\mathrm{cm}$ is chosen for all inner walls and the outer-walls were made of thicker materials of concrete. Doors were created using free space except the balcony door which was created using glass with a small thickness. The balcony walls were laid out using brick walls. The propagation factor and index of reflection are functions of the permittivity $(\epsilon)$ and conductivity $(\sigma)$ of medium. In Table A1, we present the relative permittivity and conductivity. + +Antenna and Transceiver configuration. Omnidirectional beam patterned antenna with polarization perpendicular to the z axis is setup for all receive and transmit antennas. Location, Orientation of the antenna are set relative to global reference such that they are rotated about the z axis by 90deg and placed at a height of $2.8\mathrm{m}$ . All antennas employ the same configuration with no transmission loss. + +
thickness(cm)permittivity εconductivity σ (S/m)
Layered drywall(1,3)1.32.80.013
Brick12.54.440.0001
Concrete305.310.015
Glass32.40
+ +Table A1: Material properties + +
OverallGeometryAvg. Delay
kNN0.2640.2881.479
MLP0.2800.3781.191
WiNeRT0.2030.1141.297
+ +![](images/72cfcef1ab10c90b4f112744c062f3d7c1a191477f885f9a69747b740a9af549.jpg) +Table A3: Qualitative results + +Simulation. We currently run the simulation using the shoot and bounce model where a geometric path is drawn from every point on the transmitter field pattern to the receive point. This also includes transmission through surfaces allowing it to model transmittance and reflection. Rays are first traced from the source points with the rays reflecting specularly from the building walls. The rays that hit building walls are reflected specularly and continue to be traced up to the maximum number of reflections and transmissions. + +The spatial separation of rays is set to $0.75^{\circ}$ . The geometric path traced by the ray undergoes up to 6 specular reflection and 3 transmittance with path loss threshold set to -70dBm. + +Total received power of all paths is determined as the sum of time averaged power of group of correlated paths. A set of ray paths that interact with similar set of faces and follow nearly same path are defined as group. + +# C EVALUATION: ADDITIONAL DETAILS + +# C.1 CONTROLLABLE SYNTHESIS: GENERALIZATION TO RECONFIGURED FLOORMAPS + +Table A2 accompanies the discussions in Section 4.3, where we evaluate a WiNeRT model trained in one environment $\mathbf{F}$ and evaluated in a reconfigured environment $\mathbf{F}'$ . + +# C.2 CONTROLLABLE SYNTHESIS: LOWER- AND HIGHER-ORDER INTERACTIONS + +See Table A4, which accompanies the discussions in Section 4.3. + +Table A2: Quantitative results. For a trained approach evaluated on a reconfigured floormap $\mathbf{F}^{\prime }$ + +
#interactions rOverall (DoD)GeometryAvg. Delay
01*2301*2301*23
kNN0.220.330.500.550.310.210.290.331.302.242.963.40
MLP0.580.460.610.670.340.330.370.410.982.052.933.48
WiNeRT0.120.250.440.510.000.090.210.270.032.032.432.8
+ +Table A4: Low- and Higher-Order Interactions. We vary the number of ray-surface interactions (denoted by $r$ ) for a model trained using single-order interactions $\left( {r = 1\text{,denoted by * in the table).}}\right)$ . + +![](images/54ed2cb9f6fb7385596948980201aec5351dc6670b16dde8c6a637b89d3ab444.jpg) +Figure A3: Simulation Time. Comparing wall-clock time vs. accuracy performances of our approach (WiN-eRT) against baselines (MLP, kNN) and wireless ray tracing softwares (PyLayers and Insite). The 'Oracle ray launch' variant, which utilizes known ray launch directions at test-time, indicates an approximate performance upper-bound of our approach. + +![](images/87f6ef8759d03631a96dad398a0fa0df83e5dacb9239815374b27f524ea23781.jpg) + +# C.3 SIMULATION TIME + +In Sections 4.2 and 4.3, we found our proposed approach WiNeRT achieves reasonable performance compared with non-differentiable and non-neural simulator packages. Additionally, we demonstrated that WiNeRT is capable of generalization (e.g., to novel elevations, to re-configured floor-plans) and can be used for inverse problems. In this section, we additionally discuss run-time performance of WiNeRT and compare against baseline approaches as well as the simulator package. + +Experimental Setup. The end-goal of the experiment is to analyze the simulation time (specifically wall-clock times) of the proposed WiNeRT approach and contrast it against both the simulator softwares (PyLayers, Wireless Inside) and proposed baselines (MLP, kNN). We first remark that the implementations fundamentally vary between the approaches and hence an ideal wall-clock timing comparison is not possible. For instance, some approaches (WiNeRT, MLP, kNN) use a PyTorch implementation which can be run on GPU whereas the wireless ray tracing simulation packages are either proprietary (e.g., Wireless Inside) or developed exclusively for CPU (e.g., PyLayers) and thereby limiting the choice of hardware on which they can be run. Nonetheless, we keep simulation settings consistent when possible: by running the exact simulations used for the overall results (setting 'checkerboard'; see Section 4.1) and furthermore estimating wall-clock times per simulation (batch size of 1) over $N$ individual simulations with a maximum of 1 reflection and transmission (i.e., $r = 1$ ). For all approaches, we report only the mean simulation time over the multiple simulations, as we found the variances low ( $\sigma^2 \leq 3.5 \times 10^{-3}$ ). When possible, we also report corresponding accuracy ('overall prediction error'; see Sec. 4.1). We evaluate PyTorch-based implementations (WiNeRT, MLP, kNN) over $N = \sim 8K$ simulations using pretrained models (specifically the ones for reporting 1) on a Nvidia A100 GPU. In the case of WiNeRT, we are able to control time-accuracy trade-off to some degree at test-time by varying the number of launched rays $K$ (see 'Ray Launching' in Sec. 3.1) as a function of the number of subdivisions of the ico-sphere. We choose 1-5 sub-divisions and additionally an 'oracle ray' launch strategy to depict a lower-bound on the time-accuracy values. + +Results. We present the time-accuracy in Figure A3 and observe: (i) WiNeRT (orange markers) is significantly faster than the simulators (blue line), demonstrating speed-ups of $11 - 22 \times$ over PyLayers (Amiot et al., 2013) and $6 - 22 \times$ over Wireless Insite (Remcom, 2022). Although the simulators are approximately an upper-bound on the accuracy, we find that WiNeRT can make reasonable trade-offs on accuracy to boost simulation times in certain scenarios; (ii) The baselines we propose in this paper (MLP and kNN) are even faster. MLP (green marker) is the fastest with speed-ups of $538 - 687 \times$ , which can be largely attributed to a simple architecture (3-layer ReLU MLP with 128 hidden units). kNN (red marker) is the second fastest with $79 - 97 \times$ speed-ups over the simulators. + +![](images/f39a85c43617e0f4efee44c6bcbf283850edecce615634dde21652b47fb763ab.jpg) +Figure A4: User Localization. We backpropagate through our trained forward model to solve for the position of the receiver. + +While these baselines offer much faster simulation times, their generalization capabilities remain unclear as they suffer from memorization (see discussion for Fig. 3). + +# C.4 USER LOCALIZATION VIA INVERSE RENDERING + +In this section, we provide additional details to complement the discussion on the user localization experiment in Section 4.3. For the user localization task, the problem is to determine user location $\pmb{x}_{\mathrm{rx}}$ from an observed channel $h_{\mathrm{obs}}$ . We solve for $\pmb{x}_{\mathrm{rx}}$ , by performing gradient on spatial coordinate $\pmb{x}_{\mathrm{rx}}^{\mathrm{ukn}}$ that minimizes the channel loss $\text{render}_{\theta}(\pmb{x}_{\mathrm{tx}}, \pmb{x}_{\mathrm{rx}}^{\mathrm{ukn}}, \pmb{F}_i)$ . This is possible with WiNeRT, since we can backpropagate through the neural simulation of the channel. We optimize for $\pmb{x}_{\mathrm{rx}}$ using SGD with momentum (lr=0.01, momentum=0.9, 2000 iterations) with two additional considerations: (a) we constrain $\pmb{x}_{\mathrm{rx}}$ to lie in valid ranges (positive, upper-bounded by $\pmb{x}_{\max}$ ) by clamping the values at each iteration; and (b) to prevent solutions in local minimas, we take the result which yields the minimum loss over five random initializations of $\pmb{x}_{\mathrm{rx}}$ . We present the CDF of localization errors over 100 test examples in A4 + +# D IMPLEMENTATION: ADDITIONAL DETAILS + +In this section, we provide additional implementation details and hyperparameter choices of approaches discussed in the paper. + +# D.1 WINERT + +Architecture: Ray-surface Interaction $f_{\theta}^{1}$ . We follow an MLP architecture (see Figure A1) similar to NeRF approaches (Mildenhall et al., 2020; Verbin et al., 2022). We decompose the parameters into view-independent ('spatial MLP') and view-dependent ('directional MLP') sets. Given a ray incident at a spatial co-ordinate $x_{k}$ in direction $d_{k}$ , the spatial MLP (2 hidden layers, 64 units) takes three inputs: (a) the face $f_{i}$ (1-hot index) on which $x_{k}$ lies; (b) the surface normal $n_{i}$ of face $f_{i}$ ; and (c) a 3d vector of signed-distance values between the face and $x_{\mathrm{tx}}$ , $x_{\mathrm{rx}}$ , and $x_{k}$ . We find (c) provides information (e.g., $x_{\mathrm{tx}}$ and $x_{\mathrm{rx}}$ on the same side of wall) to condition the network to predict attributes related to either reflection or transmission components. The directional MLP (1 hidden layer, 64 units) takes two inputs: (i) a 32-dim bottleneck vector produced by the spatial MLP; and (ii) a 3-dim unit vector representing the incidence direction $d_{k}$ . The final output are scaling and additive co-efficients $s$ for the gain magnitude (i.e., $a_{k}^{(r + 1)} = s_{1}a_{k}^{(r)} + s_{2}$ ) and 4-dim parameters $\rho_{i}$ for rotation (based on Euler-Rodrigues formulation). The rotation parameters $\rho_{i}$ are mapped to a $3\times 3$ rotation matrix $A = \Gamma (\rho_{i})$ to transform the incident to outgoing ray $d_{k}\coloneqq Ad_{k}$ . + +**Renderer:** Ray Launching. In the first step of the renderer, we launch $K$ rays from co-ordinate $x_{\mathrm{tx}}$ uniformly in all directions. To achieve this, we center a ico-sphere with 5 sub-divisions and + +choose as directions the vectors from $\pmb{x}_{\mathrm{tx}}$ towards the ico-sphere vertices (10.2K vertices with 5 sub-divisions). Since we know the exact co-ordinates between $\pmb{x}_{\mathrm{tx}}$ and $\pmb{x}_{\mathrm{rx}}$ , we manually include the line-of-sight direction resulting in a total of $K$ rays. + +**Renderer:** Ray Marching. The core step of the renderer is ray marching (detailed in Figure 2). We elaborate on technical implementation details step-by-step using as reference Figure 2. We drop sub- and super-scripts for rest of the paragraph for notational convenience. (a) **Ray-Triangle intersection:** For a given ray $\pmb{p} = \pmb{o} + t\pmb{d}$ , we are interested in the minimum finite solution of $t > 0$ for which the ray intersects with each face of the mesh. For some face with coordinates $(\pmb{a}, \pmb{b}, \pmb{c})$ , this entails solving for $t$ such that $\pmb{p} = \pmb{o} + t\pmb{d} = \alpha \pmb{a} + \beta \pmb{b} + \gamma \pmb{c}$ (under constraints $\alpha + \beta + \gamma = 1$ and $0 \leq \alpha, \beta, \gamma \leq 1$ ). We calculate valid solutions using Cramer's rule for all faces in the mesh and only consider (if one exists) the minimum positive solution corresponding to the first ray-triangle intersecting point. (b) **Ray-Surface interaction:** Given the solution from the previous step (i.e., on which spatial co-ordinate the ray is incident on the surface), we are now interested in estimating the outgoing ray from that co-ordinate. For this, we leverage an MLP that maps incident gain, direction, and certain face properties to outgoing gain and direction. More details of this MLP are discussed above under the 'Architecture: Ray-surface Interaction'. (c) **Reception/Termination:** Per ray, we stop ray marching steps if it is either received (hits a reception sphere of fixed size of $30\mathrm{cm}$ ) or leaves the region of interest (e.g., penetrates exterior wall is shot into infinity). In other cases, we continue with ray marching steps. + +**Renderer:** Ray Aggregation. At the end of ray marching steps (over $R$ iterations), we determine the final state of the $K$ rays. We are now interested in a small subset of these $K$ rays that is received at a receiver at fixed co-ordinate $x_{\mathrm{rx}}$ . Note that we perform these steps only at test-time. The ray aggregation as a result involves two steps: (a) Ray Filtering: where we determine the subset of rays that arrives at $x_{\mathrm{rx}}$ by modelling the receiver as a sphere of fixed radius of $30\mathrm{cm}$ ; and (b) Preventing double counting: we find duplicate rays arrive at $x_{\mathrm{rx}}$ due to a combination of a non-infinitesimally sized reception sphere and a high density of launched rays. We cull such duplicates by grouping rays based on a unique interaction sequence (i.e., IDs of faces it intersects with) and choosing the ray of the shortest length in each group. + +**Optimization.** We perform gradient-descent steps on learnable parameters using Adam with a learning rate of 0.001 with batch size of 1. We observed large gradients (possibly due to single-batch) and hence clip gradient values to 100 during training. The model is trained for 100 epochs and we pick the checkpoint with lowest validation error during training. + +# D.2 BASELINES + +MLP. The MLP baseline extends ideas presented in Tancik et al. (2020); Sitzmann et al. (2020), where a simple MLP is used to map co-ordinates to the signal (e.g., pixel co-ordinate to RGB values). In our paper, the MLP directly maps the spatial co-ordinates $x_{\mathrm{tx}}$ and $x_{\mathrm{rx}}$ to channel $h_i$ . The MLP contains 3 hidden layers, each with 128 hidden units and ReLU activation. The core idea here is to implicitly learn the geometry of the environment (floormap $F$ ), which is common to all train and test examples. Note that in contrast to previous works, this model does not use positional embeddings nor sinusoidal activations, as our initial experiments indicated they learn high-frequency artifacts that is not typically present in our datasets (the wireless channels). + +kNN. The kNN baseline (with $k = 1$ ) works as so: for a given test-example $(\pmb{x}_{\mathrm{tx}}, \pmb{x}_{\mathrm{rx}})$ we find the spatially closest training example arg $\min_{i} ||\pmb{x}_{\mathrm{tx}} - \pmb{x}_{\mathrm{tx},i}^{\mathrm{train}}||_{2} + ||\pmb{x}_{\mathrm{rx}} + \pmb{x}_{\mathrm{rx},i}^{\mathrm{train}}||_{2}$ and predict channel $\pmb{h}_i$ . \ No newline at end of file diff --git a/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/images.zip b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..2e5357d6d8255f5b7dad268ed3154ce3b932bd5a --- /dev/null +++ b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2df74b208589d4b6372b851c8f6518b1aff2d399f090ff52de774a465ddc9b5 +size 508924 diff --git a/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/layout.json b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ac68ff9d1bccd6ee208fc288643bd6afff5664c6 --- /dev/null +++ b/2023/WiNeRT_ Towards Neural Ray Tracing for Wireless Channel Modelling and Differentiable Simulations/layout.json @@ -0,0 +1,14676 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 504, + 137 + ], + "type": "text", + "content": "WINERT: TOWARDS NEURAL RAY TRACING FOR WIRELESS CHANNEL MODELLING AND DIFFERENTIABLE SIMULATIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 154, + 400, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 154, + 400, + 188 + ], + "spans": [ + { + "bbox": [ + 110, + 154, + 400, + 188 + ], + "type": "text", + "content": "Tribhuvanesh Orekondy, Kumar Pratik, Shreya Kadambi, Hao Ye, Joseph Soriaga, Arash Behboodi \nQualcomm AI Research*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 276, + 217, + 335, + 229 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 217, + 335, + 229 + ], + "spans": [ + { + "bbox": [ + 276, + 217, + 335, + 229 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 241, + 471, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 241, + 471, + 451 + ], + "spans": [ + { + "bbox": [ + 140, + 241, + 471, + 451 + ], + "type": "text", + "content": "In this paper, we work towards a neural surrogate to model wireless electromagnetic propagation effects in indoor environments. Such neural surrogates provide a fast, differentiable, and continuous representation of the environment and enables end-to-end optimization for downstream tasks (e.g., network planning). Specifically, the goal of the paper is to render the wireless signal (e.g., time-of-flights, power of each path) in an environment as a function of the sensor's spatial configuration (e.g., placement of transmit and receive antennas). NeRF-based approaches have shown promising results in the visual setting (RGB image signal, with a camera sensor), where the key idea is to algorithmically evaluate the 'global' signal (e.g., using volumetric rendering) by breaking it down in a sequence of 'local' evaluations (e.g., using co-ordinate neural networks). In a similar spirit, we model the time-angle channel impulse response (the global wireless signal) as a superposition of multiple paths. The wireless characteristics (e.g., power) of each path is a result of multiple evaluations of a neural network that learns implicit ray-surface interaction properties. We evaluate our approach in multiple indoor scenarios and demonstrate that our model achieves strong performance (e.g., " + }, + { + "bbox": [ + 140, + 241, + 471, + 451 + ], + "type": "inline_equation", + "content": "<0.33\\mathrm{ns}" + }, + { + "bbox": [ + 140, + 241, + 471, + 451 + ], + "type": "text", + "content": " error in time-of-flight predictions). Furthermore, we demonstrate that our neural surrogate whitens the 'black-box' wireless simulators, and thus enables inverse rendering applications (e.g., user localization)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 469, + 208, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 469, + 208, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 469, + 208, + 481 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 494, + 506, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 494, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 506, + 584 + ], + "type": "text", + "content": "Realistic simulations of physical processes are vital to many scientific and engineering disciplines. In this paper, we focus on simulation of wireless electromagnetic (EM) signals within a propagation environment. The physics of such EM wave propagation between a transmit and receive point are analytically given by Maxwell equations: the transmitted wave undergoes different interactions with the environment (e.g., reflection), and the receiver gets the wave through multiple paths with different time-of-flights and powers, and from different directions. However, solving the Maxwell equations with boundary conditions requires in-depth knowledge of the propagation environment, hence classically modelling EM propagation is intractable for most engineering applications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 587, + 506, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 587, + 506, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 587, + 506, + 677 + ], + "type": "text", + "content": "Existing techniques make such simulations tractable by trading-off accuracy for speed. At one end of the spectrum, such simulations are represented in a statistical sense where a probabilistic model roughly captures the marginalized distribution over time-of-flights, gains and direction of transmit-receive paths. However, this level of accuracy is insufficient for designing systems that efficiently operate in high frequency bands. This motivates solutions at the other end of the spectrum: wireless ray tracing simulators. Given a detailed CAD representation of the environment along with the material properties, and numerous wireless configuration parameters (e.g., placement of a base station), the simulators generate resulting propagation characteristics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 681, + 507, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 507, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 507, + 715 + ], + "type": "text", + "content": "Although wireless ray tracing simulators are appealing, there are a few drawbacks. First, they are generally slow, which poses a bottleneck for closed-loop design pipelines, as wireless configurations cannot be quickly mapped to propagation characteristics. Second, because they are non" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 382, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 382, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 382, + 732 + ], + "type": "text", + "content": "*Qualcomm AI Research is an initiative of Qualcomm Technologies, Inc" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 82, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 504, + 159 + ], + "type": "text", + "content": "differentiable, they are not amenable with inverse physical design formulations, for example optimizing base station placement with the simulator in the optimization loop. Third, they usually require additional fine-tuning with real data as they are not data-driven. Calibrating them with real-world measurements is non-trivial and tedious. Fourth, they cannot generally inter-operate with probabilistic frameworks which have the advantage of better dealing with epistemic uncertainties. We believe neural surrogates provide a natural solution to circumvent many of these drawbacks of classical ray tracing simulators." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 164, + 504, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 164, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 107, + 164, + 504, + 264 + ], + "type": "text", + "content": "In this work, we propose a neural wireless simulator ('WiNeRT') by building on recent advances in scenes representation as continuous-function neural networks (Sitzmann et al., 2019; Tancik et al., 2020; Mildenhall et al., 2020). In particular, central to our approach is learning a network to model ray-surface interactions, i.e., the network transforms an incident wireless ray to an attenuated outgoing ray. By shooting out a number of rays and evaluating the network at relevant spatial regions in the environment, we estimate the wireless characteristics as a set of transmit-receive paths, each path encodes attributes such as time-of-flight and gain. Our approach also addresses some unique technical challenges posed by the non-visual wireless modality, such as dealing with sparse high-dimensional time-angle measurement signals." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 269, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 269, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 107, + 269, + 504, + 357 + ], + "type": "text", + "content": "We demonstrate that our neural wireless simulator reasonably renders the wireless propagation aspects by evaluating on two datasets which captures " + }, + { + "bbox": [ + 107, + 269, + 504, + 357 + ], + "type": "inline_equation", + "content": "50 - 100\\mathrm{m}^2" + }, + { + "bbox": [ + 107, + 269, + 504, + 357 + ], + "type": "text", + "content": " indoor propagation scenes. Interestingly, we find that the 3D-structure-aware implicit formulation is a strong inductive bias and helps generalization to significant inference-time distributions shifts. Finally, we demonstrate the potential of our differentiable forward model in solving inverse problem by tackling the user localization problem after posing it as an inverse rendering problem. Our results indicate that simulator physics for specified environments can be 'distilled' into neural surrogates and thereby presenting first steps towards closed-loop design pipelines of wireless communication systems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 376, + 210, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 376, + 210, + 389 + ], + "spans": [ + { + "bbox": [ + 107, + 376, + 210, + 389 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 406, + 504, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 406, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 107, + 406, + 504, + 517 + ], + "type": "text", + "content": "Physics-based Neural Simulations. There exists a wide body of literature to model physical processes using advances in neural networks (Djeumou et al., 2022; Karniadakis et al., 2021; Raissi et al., 2017). As simulating physical processes can be expensive and can also present nondifferentiable 'black-box' in design pipelines, recent literature addresses how to work towards neural surrogates, such as for particle simulation (Sanchez-Gonzalez et al., 2020), mesh simulations (Pfaff et al., 2020), design of particle accelerators (Shirobokov et al., 2020), and inverse kinematics (Sun et al., 2021). In this paper, we are particularly interested in a specific physical process - wireless EM-wave propagation. Although this has received limited recent attention (Xia et al., 2020) in a 3D-oblivious setting, it is unclear whether these extend to complex configurations. Consequently, in this work, we work towards the first 3d-structure-aware surrogates for wireless ray tracing simulation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 525, + 504, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 525, + 504, + 678 + ], + "spans": [ + { + "bbox": [ + 107, + 525, + 504, + 678 + ], + "type": "text", + "content": "Neural Channel Modelling. Although propagation channel modeling has been a central topic in wireless communication (Jakes & Cox, 1994; Lee, 1982; Rappaport et al., 2022), there has been a recent trend for fully data-driven models. The main paradigm of these activities is to use machine learning to learn complex distributions, model non-linearities and have differentiable simulators. These works can be categorized as statistical channel models where the channel input-output relation is modelled as a conditional probability distribution. Many works leverage recent advances in generative modelling and use models like generative adversarial networks (GANs) (Goodfellow et al., 2014) or variational autoencoders (VAEs) (Kingma & Welling, 2013) to learn the channel model (O'Shea et al., 2019; Ye et al., 2018; Yang et al., 2019; O'Shea et al., 2019; Orekondy et al., 2022; Ye et al., 2020; Dorner et al., 2020). In contrast to these works, our approach inscribes within ray tracing channel modeling paradigm, where wireless propagation is precisely modelled by tracing wireless rays, however, unlike classical ray tracers, our model is able to blend in the elements of statistical modeling and is trainable directly on field data. To the best of our knowledge, this work is the first differentiable neural ray tracer for wireless channel modelling." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 687, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 687, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 687, + 504, + 731 + ], + "type": "text", + "content": "Neural Scene Representations. Representing scenes (or more generally signals) has been widely studied in literature, such as encoding the signal in the latent space of a generative model (Kingma & Welling, 2013; Goodfellow et al., 2014). A more recent link of work encodes the signal in the parameters of a co-ordinate MLP (Park et al., 2019; Sitzmann et al., 2020; Tancik et al., 2020; Fathony" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 752, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 752, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 752, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 80, + 504, + 140 + ], + "blocks": [ + { + "bbox": [ + 107, + 80, + 504, + 140 + ], + "lines": [ + { + "bbox": [ + 107, + 80, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 107, + 80, + 504, + 140 + ], + "type": "image", + "image_path": "c74b0a95ffdda8f97ceb12b64de439e99ae04a016d2f6461b653f52e5eddf3a6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "lines": [ + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "text", + "content": "Figure 1: Approach Overview. We learn a forward simulator " + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "inline_equation", + "content": "\\text{render}_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "text", + "content": " that maps an environment configuration to a wireless channel " + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "text", + "content": " is a set of wireless propagation paths between " + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{tx}} - x_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 147, + 504, + 178 + ], + "type": "text", + "content": " (green rays in right image), each path encoding certain channel attributes e.g., path gain." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 186, + 506, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 506, + 275 + ], + "type": "text", + "content": "et al., 2020), thereby mapping co-ordinates (e.g., spatial, temporal) to the signal intensity values (e.g., pixel intensity, amplitude). In a specific case where the signal is a 2D RGB image, recent works (Schwarz et al., 2020; Niemeyer & Geiger, 2021; Mildenhall et al., 2020) show promising results by additionally employing image-based differentiable rendering paradigms (Drebin et al., 1988; Liu et al., 2019) to recover 3D properties of the scene. Inspired by this idea, our work neurally represents a wireless scene by tackling a set of orthogonal challenges, such as dealing with sparse high-dimensional signals and particularly modelling reflection and transmission effects. Consequently, we work towards the first 3D-aware neural 'wireless' scene representation model." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 290, + 185, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 290, + 185, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 185, + 303 + ], + "type": "text", + "content": "3 APPROACH" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 314, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 504, + 348 + ], + "type": "text", + "content": "In this section, we begin with some preliminaries to the subsequent formulation of the neural wireless ray tracing problem. We then provide an initial overview of our approach in Sec. 3.1 and then dive deeper into specific technical aspects of wireless ray marching in Sec. 3.2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 356, + 504, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 504, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 504, + 400 + ], + "type": "text", + "content": "Preliminaries: Wireless Channels Scattering, reflection and diffraction are among the main effects in electromagnetic propagation. A general mathematical description of a wireless channel, seen as linear time varying system, is given by its impulse response Tse & Viswanath (2005); Rappaport (1996). A general model can be written as (Samimi & Rappaport, 2016):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 172, + 403, + 504, + 428 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 403, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 172, + 403, + 504, + 428 + ], + "type": "interline_equation", + "content": "h (t, \\boldsymbol {\\Theta}, \\boldsymbol {\\Phi}) = \\sum_ {k} a _ {k} (t) \\delta \\left(t - \\tau_ {k} (t)\\right) \\delta \\left(\\boldsymbol {\\Theta} - \\boldsymbol {\\Theta} _ {k} (t)\\right) \\delta \\left(\\boldsymbol {\\Phi} - \\boldsymbol {\\Phi} _ {k} (t)\\right) \\tag {1}", + "image_path": "7b5403601dbf0a2ce4dcbbff224e7399279da2e99f94ac410cea5863019c357e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "inline_equation", + "content": "a_{k}(t)" + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "content": " is the complex gain, " + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "inline_equation", + "content": "\\tau_{k}(t)" + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "content": " is the delay (time-of-flight) of path " + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "inline_equation", + "content": "\\Theta_{k}(t)" + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "content": " is azimuth and elevation angle of departure (AoD), and " + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "inline_equation", + "content": "\\Phi_k(t)" + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "content": " is azimuth and elevation angle of arrival (AoA). Going forward, we use " + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "inline_equation", + "content": "\\phi_{k} = (\\Theta_{k},\\Phi_{k})" + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "content": " as a shorthand to collectively represent all angles. Intuitively equation 1, represents each path as a dirac function in time-angle space. The task of channel modeling can, therefore, be reduced to predicting channel attributes " + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "inline_equation", + "content": "(a_{k}(t),\\tau_{k}(t),\\phi_{k}(t))" + }, + { + "bbox": [ + 104, + 430, + 505, + 498 + ], + "type": "text", + "content": " for a given environment map, and a transmit and receive location. See Sec. A.1 for a detailed discussion." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "text", + "content": "Forward Model: render. The general goal of our forward model is to run a wireless ray simulation given a certain configuration of the propagation environment. More specifically, as shown in Figure 1, the model takes three configuration parameters as input: a 3D representation of the environment " + }, + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "text", + "content": " and the spatial co-ordinates of the transmitter " + }, + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "text", + "content": " and receiver " + }, + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 505, + 504, + 560 + ], + "type": "text", + "content": " devices. The model predicts the wireless scene as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 181, + 563, + 504, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 563, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 181, + 563, + 504, + 578 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {h}} = \\left\\{\\boldsymbol {u} \\right\\} _ {k = 1} ^ {K} = \\left\\{\\left(a _ {k}, \\tau_ {k}, \\phi_ {k}\\right) \\right\\} _ {k = 1} ^ {K} = \\operatorname {r e n d e r} _ {\\theta} \\left(\\boldsymbol {x} _ {\\mathrm {t x}}, \\boldsymbol {x} _ {\\mathrm {r x}}, \\boldsymbol {F}\\right) \\tag {2}", + "image_path": "92fceb9c71e808538dc63a4200ca44c46fe856df24388d2a2c9b1e2e36df1a3f.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "text", + "content": "where the output is a variably-sized set of " + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "text", + "content": " paths. Each path " + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "inline_equation", + "content": "\\pmb{u}_k" + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "text", + "content": " encodes three channel attributes: gain " + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "inline_equation", + "content": "a_k" + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "text", + "content": ", time-of-flight " + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "inline_equation", + "content": "\\tau_k" + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "text", + "content": " and angles " + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "inline_equation", + "content": "\\phi_k" + }, + { + "bbox": [ + 104, + 579, + 504, + 613 + ], + "type": "text", + "content": ". With these predicted channel attributes, we can obtain a time-angle impulse response (the 'channel') to characterize the wireless propagation effects." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "text", + "content": "Key Idea: Implicit Representation Network " + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "text", + "content": ". Our approach recursively constructs the channel by using a learnt function " + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "inline_equation", + "content": "f_{\\theta}: F \\times \\mathbf{u}_{k}^{(r)} \\mapsto \\mathbf{u}_{k}^{(r+1)}" + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "text", + "content": " As shown in Figure 1, given an initial ray " + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{k}^{(r=0)}" + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "text", + "content": ", we model the final state as an evaluation of interactions that the ray undergoes with the environment " + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "text", + "content": ". Intuitively, " + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "text", + "content": " models the local interaction of any given ray " + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 621, + 505, + 691 + ], + "type": "text", + "content": " either in free-space, or in particular when it is incident on an interacting surface. In the latter case of ray-surface interaction, we leverage a co-ordinate MLP to predict the transformation (e.g., attenuation, rotation) to the incident ray." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "Representing Environment " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": ". We primarily focus on indoor propagation environments in this paper, where the environment is a 3D geometric representation. Specifically, we consider the environment represented as a 3D mesh composed of " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " faces and " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " vertices, where each face corresponds" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 504, + 175 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 504, + 175 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 504, + 175 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 504, + 175 + ], + "type": "image", + "image_path": "c66cddf7981830725b69b76efc859e0363ee0a2813c296272c06d0f3e7d2b001.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 183, + 504, + 217 + ], + "lines": [ + { + "bbox": [ + 104, + 183, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 183, + 504, + 217 + ], + "type": "text", + "content": "Figure 2: Renderer: Ray Marching Steps. At each step " + }, + { + "bbox": [ + 104, + 183, + 504, + 217 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 183, + 504, + 217 + ], + "type": "text", + "content": " of the simulation, we learn the transformation introduced on a ray " + }, + { + "bbox": [ + 104, + 183, + 504, + 217 + ], + "type": "inline_equation", + "content": "\\boldsymbol{u}_k^{(r)}" + }, + { + "bbox": [ + 104, + 183, + 504, + 217 + ], + "type": "text", + "content": " e.g., reflection off a particular surface. The final transformation is a result of learnt (green blocks) and non-learnable (blue blocks) evaluations." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 230, + 504, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 230, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 504, + 264 + ], + "type": "text", + "content": "to some surface on a wall. We consider a mesh structure with two subtleties: (a) we represent walls as a flattened polygon and thereby do not explicitly consider its thickness; and (b) we do not encode materials of the corresponding wall faces, but rather learn the properties implicitly from data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 279, + 277, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 279, + 277, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 277, + 290 + ], + "type": "text", + "content": "3.1 OVERVIEW: NEURAL-renderING" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 300, + 484, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 484, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 484, + 313 + ], + "type": "text", + "content": "In this section, we present an overview of the three steps in our approach (as shown in Fig. 1)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "text", + "content": "Ray Launching. We begin by shooting out a fixed set of " + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "text", + "content": " rays from the transmitter location " + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "inline_equation", + "content": "\\pmb{x}_k^{(r = 0)}\\coloneqq \\pmb{x}_{\\mathrm{tx}}(\\forall k)" + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "text", + "content": ". We launch the rays omni-directionally from the transmitter co-ordinate, agnostic to the environment and location of the receiver location. Direction " + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "inline_equation", + "content": "\\pmb{d}_k^{(r = 0)}" + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "text", + "content": " of each ray is oriented in the direction of a unique vertex of a ico-sphere centered at " + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 320, + 504, + 382 + ], + "type": "text", + "content": ". We use the number of sub-divisions of the ico-sphere to trade-off between computational complexity and accuracy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 390, + 504, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 390, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 390, + 504, + 456 + ], + "type": "text", + "content": "Ray Marching. The crux of our approach involves 'marching' the ray and accounting for interactions (e.g., transmission) with various surfaces of the environment. A key aspect here is using a neural network to make local evaluations: mapping an incident ray with some direction and power to an updated outgoing attenuated ray. The neural network is hence tasked to learn a complex nonlinear characterization of the surface materials at a spatial co-ordinate. We further elaborate on the ray marching procedure in the next section." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 464, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 510 + ], + "type": "text", + "content": "Ray Aggregation and Reception. Of the " + }, + { + "bbox": [ + 104, + 464, + 504, + 510 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 464, + 504, + 510 + ], + "type": "text", + "content": " rays launched from the ray launching step, we are now interested in the subset of the rays that impinges on the receiver. We model the reception sphere with a specific radius, which can be tuned to achieve a desired level of precision. To mitigate double-counting of received rays, we filter rays by associating them with a unique interaction path." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 525, + 203, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 203, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 203, + 536 + ], + "type": "text", + "content": "3.2 RAY MARCHING" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "text", + "content": "We now dive deeper into the ray marching step, which tracks the evaluation of each ray as it propagates in the environment and hits various surfaces. We walk through the steps as shown sequentially in Fig. 2. We begin with a set of geometric rays " + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "inline_equation", + "content": "\\boldsymbol{u}_k^{(r = 0)}" + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "text", + "content": ", originating at the transmitter co-ordinate " + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "text", + "content": ". In addition to the channel attributes of each ray (see Eq. 2), we also consider in this section an additional set of meta-attributes (e.g., origin " + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_k" + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "text", + "content": ", direction " + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "inline_equation", + "content": "\\boldsymbol{d}_k" + }, + { + "bbox": [ + 104, + 546, + 504, + 616 + ], + "type": "text", + "content": ") that helps us with the ray marching steps (elaborated in Sec. A.2)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "type": "text", + "content": "Ray-Environment Intersections. For each ray, we evaluate its first interaction with the environment (e.g., first wall it hits). Representing the ray geometry as " + }, + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "type": "inline_equation", + "content": "\\pmb{p}(t) = \\pmb{x}_k^{(r)} + t\\pmb{d}_k^{(r)}" + }, + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "type": "text", + "content": ", we are primarily interested in a solution " + }, + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "type": "inline_equation", + "content": "t > 0" + }, + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "type": "text", + "content": " for which the ray is incident on some surface. This location helps us determine the relay (i.e., new origin) " + }, + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "type": "inline_equation", + "content": "\\pmb{x}_k^{(r+1)}" + }, + { + "bbox": [ + 104, + 624, + 504, + 674 + ], + "type": "text", + "content": " for the subsequent step." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 681, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 504, + 734 + ], + "type": "text", + "content": "Ray-Surface Interaction. While the previous step solves for where the ray is incident in the environment, a crucial next step is determining attributes of the outgoing ray as a result of this interaction. We specifically focus on determining two attributes in this step: the new direction " + }, + { + "bbox": [ + 104, + 681, + 504, + 734 + ], + "type": "inline_equation", + "content": "\\pmb{d}_k^{(r+1)}" + }, + { + "bbox": [ + 104, + 681, + 504, + 734 + ], + "type": "text", + "content": " and gain " + }, + { + "bbox": [ + 104, + 681, + 504, + 734 + ], + "type": "inline_equation", + "content": "a_k^{(r+1)}" + }, + { + "bbox": [ + 104, + 681, + 504, + 734 + ], + "type": "text", + "content": ". Popular non-neural simulators, such as Remcom (2022), look-up frequency-" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": "dependent material properties (e.g., conductivity, permittivity) at the incidence point from standard databases (ITU-R P.2040-2) to calculate the attributes of the outgoing ray. However, it is unclear how to calculate the attributes with imprecise knowledge of the surfaces (e.g., unknown thickness and material types of each layer in a wall) or when the material properties of a layer have not been previously empirically analyzed. Our solution is to instead predict the attributes using learnt network as a function of the incident location " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{x}_k^{(r + 1)}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " and direction " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{d}_k^{(r)}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " (see " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "f_{\\theta}^{1}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " in Fig. 2). The ray-surface interaction network " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "f_{\\theta}^{1}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " used in our experiments is a ReLU MLP with 3 layers (with 64-hidden units). Similar to NeRF (Mildenhall et al., 2020), we split the network into learning incident direction-independent and dependent features by concatenating direction " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{d}_k^{(r)}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " with bottlenecked outputs of the penultimate layer in the network (See Sec. A.3 fore more details). The network predicts an attenuation factor " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " and a rotation matrix " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\pmb{A}" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " (4-dim Euler-Rodrigues parameterization), which is then used to determine the updated gain " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "(a_k^{(r + 1)} = sa_k^{(r)})" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": " and direction " + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "inline_equation", + "content": "(\\pmb{d}_k^{(r + 1)} = \\pmb{A}\\pmb{d}_k^{(r)})" + }, + { + "bbox": [ + 104, + 82, + 506, + 228 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "type": "text", + "content": "Reception/Termination check. For some special cases, we halt ray marching for a subset of rays. Namely, when ray " + }, + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 232, + 506, + 289 + ], + "type": "text", + "content": " impinges on a reception sphere of a pre-specified radius (30cm in our experiments). This prevents a future version of the already received ray being potentially incorrectly received at a future iteration. In addition, for computation reasons, we also terminate ray marching if the ray exits the region of interest (e.g., ray exiting the environment)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": "Free-space interaction. While the previous steps modeled the interaction of material properties of the environment on wireless propagation, we now switch focus to free-space. In this case, we model propagation of a ray using the empirically-adjusted Friis' Equation: " + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "inline_equation", + "content": "P_r(d) = P_t G\\left(\\frac{d_0}{d}\\right)^{\\lambda}" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "inline_equation", + "content": "d \\geq d_0" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": ") which represents the power at the received at the receive antenna " + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "inline_equation", + "content": "P_r" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": " as a function of the power fed into transmitting antenna " + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "inline_equation", + "content": "P_t" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": ", and the distance travelled by the ray " + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": ". We learn the remaining scalar parameters " + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": " (antenna gain constant), " + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": " (attenuation factor), and " + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 104, + 295, + 506, + 368 + ], + "type": "text", + "content": " (reference distance)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 391, + 176, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 391, + 176, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 391, + 176, + 402 + ], + "type": "text", + "content": "3.3 TRAINING" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 415, + 505, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 505, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 505, + 462 + ], + "type": "text", + "content": "Over the previous sections, we walked through our approach on predicting a channel " + }, + { + "bbox": [ + 104, + 415, + 505, + 462 + ], + "type": "inline_equation", + "content": "\\hat{h} = \\mathrm{render}_{\\theta}(\\pmb{x}_{\\mathrm{tx}},\\pmb{x}_{\\mathrm{rx}},\\pmb{F})" + }, + { + "bbox": [ + 104, + 415, + 505, + 462 + ], + "type": "text", + "content": ". We train the model in a supervised setting, with ground-truth time-angle impulse response measurements. Importantly, we rely only on final measurements (i.e., at " + }, + { + "bbox": [ + 104, + 415, + 505, + 462 + ], + "type": "inline_equation", + "content": "r = R" + }, + { + "bbox": [ + 104, + 415, + 505, + 462 + ], + "type": "text", + "content": ") for training and do not use any intermediate information (e.g., interaction data through a ray tracer)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "content": "Set-based Channel Loss. We compare two sets of multi-path channels: predictions " + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{h}} = \\{\\hat{\\pmb{u}}_k\\}_{k=1}^K" + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "content": " and ground-truth " + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\pmb{h} = \\{\\pmb{u}_l\\}_{l=1}^L" + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "content": " to provide a supervisory signal for training. We evaluate the set-based loss as: " + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{chan}}(\\pmb{h},\\hat{\\pmb{h}}) = \\sum_l d(\\pmb{u}_l,\\hat{\\pmb{u}}_{\\Pi(l)})" + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "content": ", which has two key ideas: (a) correspondence " + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\Pi" + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "content": ": we associate each ground-truth path " + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\pmb{u}_l" + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "content": " with a predicted path " + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{u}}_k = \\Pi(l)" + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "content": ". To perform such an association, we use direction-of-departure information and thereby pair paths launched in approximately the same direction; and (b) inter-path distance " + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "inline_equation", + "content": "d(\\pmb{u}_l,\\hat{\\pmb{u}}_k)" + }, + { + "bbox": [ + 104, + 469, + 504, + 564 + ], + "type": "text", + "content": ": to compare two paths, we use mean square error for scalar-valued attributes (e.g., time-of-flights) and cosine distances between angular-attributes (e.g., direction of arrival). For the latter, we represent angles as unit vectors in cartesian coordinates." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 571, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 504, + 628 + ], + "type": "text", + "content": "Training and Implementation Details. We train our approach for 100 epochs using Adam optimizer with a learning rate of " + }, + { + "bbox": [ + 104, + 571, + 504, + 628 + ], + "type": "inline_equation", + "content": "10^{-3}" + }, + { + "bbox": [ + 104, + 571, + 504, + 628 + ], + "type": "text", + "content": ". We found it crucial to not aggregate rays (Sec. 3.1) in the training steps, as it led to vanishing gradients due to negligible number of rays that contributed towards gradient updates. We model the reception sphere as a fixed-sized sphere of radius " + }, + { + "bbox": [ + 104, + 571, + 504, + 628 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}" + }, + { + "bbox": [ + 104, + 571, + 504, + 628 + ], + "type": "text", + "content": ". Additional implementation details are provided in Sec. C.4." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 655, + 257, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 257, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 257, + 667 + ], + "type": "text", + "content": "4 EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "In this section, we discuss experimental analysis of our neural simulator approach. We begin by discussing the preliminaries: the choice of datasets and the evaluation metrics to compare simulations. The section concludes by discussing overall performances and highlights certain benefits of neural simulations, such as running controllable simulations outside of training conditions." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 460, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 460, + 94 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 460, + 94 + ], + "type": "text", + "content": "4.1 EXPERIMENTAL SETUP: DATSETS, EVALUATION METRICS, AND BASELINES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 104, + 504, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 104, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 104, + 504, + 160 + ], + "type": "text", + "content": "We train and evaluate our algorithm using ground-truth data from wireless ray tracing packages. We collect two datasets, where each dataset contains channel measurements (i.e., gains, time-of-flight, angles) for different distributions of environments (e.g., floor layout). We keep the wireless configuration fixed to using omni-directional antennas at both the transmitter and receiver operating at a 3.5GHz carrier frequency. Now we further elaborate on the datasets used in our experiments." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "type": "text", + "content": "Dataset 1: WI3ROOMS. We create a synthetic dataset which gives us greater control over many aspects over the generation process. Using a " + }, + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "type": "inline_equation", + "content": "10\\mathrm{m} \\times 5\\mathrm{m} \\times 3\\mathrm{m}" + }, + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "type": "text", + "content": " hull, we randomly synthesize interior brick walls such that the eventual configuration consists of three rooms inter-connected with 1m doorways. We import the environment into an open-source wireless propagation toolbox (Amiot et al., 2013) and collect 41.6K channels, of which " + }, + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "type": "inline_equation", + "content": "\\sim 37\\%" + }, + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "type": "text", + "content": " of measurements are used for training." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 231, + 506, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 231, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 506, + 342 + ], + "type": "text", + "content": "Dataset 2: WIINDOOR. We use the indoor floorplans from the RPLAN dataset (Wu et al., 2019), which is popularly used to model indoor scenes (Nauata et al., 2020; 2021; Para et al., 2021). These layouts represent real-world single floor houses, with 4-8 rooms and " + }, + { + "bbox": [ + 104, + 231, + 506, + 342 + ], + "type": "inline_equation", + "content": "65 - 120\\mathrm{m}^2" + }, + { + "bbox": [ + 104, + 231, + 506, + 342 + ], + "type": "text", + "content": " areas. Each floorplan is further accompanied with room semantics such as whether a certain area is a living room, bed room, bathroom, etc. We use these semantics to selectively sample transmit/receiver locations (e.g., locations are not outside the boundary) and to determine wall materials (e.g., external facing walls are bricks, where as internal facing walls are dry plaster walls). We use a commercial ray tracer Remcom 'Wireless Inside' (Remcom, 2022) with ray tracer X3D to collect measurements in the RPLAN environment. Similar to the earlier dataset, we collect 42.5K measurements, of which " + }, + { + "bbox": [ + 104, + 231, + 506, + 342 + ], + "type": "inline_equation", + "content": "\\sim 36\\%" + }, + { + "bbox": [ + 104, + 231, + 506, + 342 + ], + "type": "text", + "content": " are used to train the model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": "Train and Test Regimes. For the training dataset, we collect measurements by sampling transmitter ('Tx') from " + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "inline_equation", + "content": "\\sim 10" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": " locations (XY plane at an elevation of " + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "inline_equation", + "content": "2.8\\mathrm{m}" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": ") and similarly, receiver ('Rx') from " + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "inline_equation", + "content": "60\\times 30" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": " locations (but with elevation of " + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "inline_equation", + "content": "2\\mathrm{m}" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": "). We then create three challenging test sets (see Fig. A2 for an illustration) with novel Tx-Rx locations: (a) Checkerboard: where train and test Rx locations form a checkerboard pattern on the same XY plane at " + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "inline_equation", + "content": "2\\mathrm{m}" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": " elevation; (b) Generalization-" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": ": where we move the test Rx locations in (a) to a novel elevation (" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "inline_equation", + "content": "z = 1.0\\mathrm{m}" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": " for ThreeRooms and " + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "inline_equation", + "content": "z = 2.5\\mathrm{m}" + }, + { + "bbox": [ + 104, + 349, + 506, + 439 + ], + "type": "text", + "content": " for RPLAN); and (c) Generalization-diag: where we sample test Rx locations on a diagonal XYZ plane. Such regimes let us validate the generalization performance under distribution shifts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "type": "text", + "content": "Evaluation Metrics. We consider three evaluation metrics to evaluate our approach: (i) Overall prediction error ('Overall'): We follow a similar formulation as our loss (Sec. 3.3) with one key difference - we find correspondences " + }, + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "type": "inline_equation", + "content": "\\Pi" + }, + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "type": "text", + "content": " by solving a linear-sum assignment problem. The eventual error aggregates all attributes relevant for the path (e.g., gain, angles). Intuitively, this measures the distance between two sets (sets of multi-dim paths in our case), using a similar metric common in set prediction tasks (Fan et al., 2017; Zhang et al., 2019). (ii) Geometry prediction error ('Geometry'): We follow a formulation similar to (i), but now focus on two specific features that captures the geometrical accuracy of the path - time-of-flight and angles at departure and arrival. Intuitively, this metric measures whether the predicted rays take the same GT route between the transmit and receive co-ordinates. (iii) Average Delay Time - MAE ('AvgDelay'): We average the time-of-flights " + }, + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "type": "inline_equation", + "content": "\\tau_{k}" + }, + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "type": "text", + "content": " per path of the channel, weighted by its linear power " + }, + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "type": "inline_equation", + "content": "p(a_{k})" + }, + { + "bbox": [ + 104, + 445, + 506, + 579 + ], + "type": "text", + "content": ". We report the mean absolute error of average delays between the predicted and ground-truth channel attributes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": "Baselines. We propose two reference baselines (i) " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": "-NN (with " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "k = 1" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": "): which predicts the channel, given the closest match to the input spatial co-ordinates in terms of Euclidean distance (ii) MLP: A geometry-oblivious MLP regressor with 3-hidden layers, each with 128 units. We train the MLP using the same loss as WiNeRT. Additional details of the baselines are provided in Sec. C.4." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 647, + 216, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 216, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 216, + 658 + ], + "type": "text", + "content": "4.2 OVERALL RESULTS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 667, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 691 + ], + "type": "text", + "content": "In this section, we present the overall qualitative and quantitative results of our approach. We complement the overall performances with additional analysis in the next section." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": "Quantitative Results. We report the quantitative results for the two datasets (column groups) and three test sets (row groups) in Table 1. We observe from the table: (a) by focusing on the overall errors, we find WiNeRT generally outperforms all baselines, with a significant average decrease of" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 79, + 514, + 229 + ], + "blocks": [ + { + "bbox": [ + 105, + 79, + 514, + 229 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 514, + 229 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 514, + 229 + ], + "type": "table", + "html": "
WI3ROOMSWIINDOOR
OverallGeometryAvgDelayOverallGeometryAvgDelay
checkerboardkNN0.2320.2122.2380.4120.3962.484
MLP0.2870.3302.0510.3730.3991.745
WiNeRT0.2020.0872.0290.2370.2071.546
gen-zkNN0.2530.2262.0330.4240.4282.487
MLP0.2970.3501.7970.3880.4211.969
WiNeRT0.2170.0841.5220.2850.2501.839
gen-diagkNN0.2520.2132.1180.3800.2511.377
MLP0.3120.3221.8890.3900.3151.513
WiNeRT0.2290.0851.7920.3690.1700.828
", + "image_path": "70f353f5c6ec26985c2cb2ad9ada986cd5a41158c5e40f92fa5150e8cd879689.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 236, + 504, + 258 + ], + "lines": [ + { + "bbox": [ + 105, + 236, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 504, + 258 + ], + "type": "text", + "content": "Table 1: Quantitative Results. Comparing errors of our approach (WiNeRT) with baselines, over two datasets (column groups) and three test regimes (row groups). Lower values are better and the lowest errors are in bold." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 108, + 265, + 504, + 316 + ], + "blocks": [ + { + "bbox": [ + 108, + 265, + 504, + 316 + ], + "lines": [ + { + "bbox": [ + 108, + 265, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 108, + 265, + 504, + 316 + ], + "type": "image", + "image_path": "82c9e812e95d9f9b9175609d86e7873e3d6b9ba3e2a43b8d65695e54dc393076.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 326, + 504, + 357 + ], + "lines": [ + { + "bbox": [ + 104, + 326, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 504, + 357 + ], + "type": "text", + "content": "Figure 3: Receive Powers. By fixing the transmit location " + }, + { + "bbox": [ + 104, + 326, + 504, + 357 + ], + "type": "inline_equation", + "content": "(x_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 326, + 504, + 357 + ], + "type": "text", + "content": ", red cross), we measure the receive power (color at each point; in dB) predicted at each location in W13ROOMS dataset. kNN and MLP suffer from memorization and falsely predict highest receive powers around phantom transmit locations (purple star)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 362, + 506, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 362, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 362, + 506, + 561 + ], + "type": "text", + "content": "-0.071 points compared to kNN and -0.085 with MLP; (b) WiNeRT is especially strong in capturing the geometry (e.g., 59-63% drop in errors w.r.t second best on WI3ROOMS) of the environment, which can be likely attributed to a strong inductive bias enforced by decoupling global rendering from local evaluations; (c) Although WiNeRT has reasonable performance in capturing the average delays, the performance gap here (e.g., 1-15% reduction in errors on WI3ROOMS) is not especially large compared to other metrics. We attribute this to contributions from 'false positive' rays with non-negligible power arising from our dense ray-launching technique. (d) The contributions of false positives can be mitigated by using a more sophisticated ray launching technique. For instance, by piggybacking on ray launch directions from GT channels, we can significantly improve performances across all metrics e.g., from 1-15% error reduction to 15-20% reduction in average delays on WI3ROOMS; (e) Overall, we attribute the underperformance of the baselines to poor generalization performance. For instance, in Figure 3, we illustrate the receive powers (in dB) predicted by all approaches in WI3ROOMS, for some placement of the transmitter (red cross in top-right room). We observe in this particular case that the high-power areas in the kNN and MLP baselines are predicted for a false phantom location (purple star), which roughly corresponds to a transmitter location in training set. This contrasts predictions by WiNeRT where the high-power areas are correctly concentrated around the transmitter location. As a result, we find that simple baselines find it challenging to generalize to new unseen spatial co-ordinates at inference time." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 568, + 504, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 504, + 669 + ], + "type": "text", + "content": "Qualitative Results. We complement the previous quantitative discussions with observations drawn from qualitative analysis. WiNeRT particularly helps for this analysis, as we can recover intermediate ray-environment interaction information. From qualitative examples shown in Fig. 4(a, b), we draw some observations: (a) WiNeRT surprisingly learns ray-surface interactions implicitly, without any direct supervision. For instance, we observe multiple reflected paths between Tx and Rx; (b) we also find that our predictions (red rays) are generally consistent with the underlying simulation process (green rays) e.g., reflections from adjacent walls, floor and ceiling; and (c) we notice WiNeRT sometimes predicts false positives (e.g., above " + }, + { + "bbox": [ + 104, + 568, + 504, + 669 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 568, + 504, + 669 + ], + "type": "text", + "content": " in Fig. 4b), which we attribute to dense omni-directional ray launching." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 687, + 176, + 698 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 176, + 698 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 176, + 698 + ], + "type": "text", + "content": "4.3 ANALYSIS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "In the previous section, we evaluated the overall performance of WiNeRT and found promising results. Now, we take a closer look at our approach and investigate generalization benefits." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 234, + 174 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 234, + 174 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 234, + 174 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 234, + 174 + ], + "type": "image", + "image_path": "c91a7eac1d49b21c068572f1193df77bdfc95a1dade31e322c41275a600dfd7c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 177, + 199, + 187 + ], + "lines": [ + { + "bbox": [ + 141, + 177, + 199, + 187 + ], + "spans": [ + { + "bbox": [ + 141, + 177, + 199, + 187 + ], + "type": "text", + "content": "(a) W13ROOMS" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 79, + 369, + 173 + ], + "blocks": [ + { + "bbox": [ + 242, + 79, + 369, + 173 + ], + "lines": [ + { + "bbox": [ + 242, + 79, + 369, + 173 + ], + "spans": [ + { + "bbox": [ + 242, + 79, + 369, + 173 + ], + "type": "image", + "image_path": "731e290a7acccc58811301e225652e98bf744655a7ca5d1725917766c3781cc0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 277, + 177, + 333, + 187 + ], + "lines": [ + { + "bbox": [ + 277, + 177, + 333, + 187 + ], + "spans": [ + { + "bbox": [ + 277, + 177, + 333, + 187 + ], + "type": "text", + "content": "(b) WIINDOOR" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 376, + 80, + 504, + 173 + ], + "blocks": [ + { + "bbox": [ + 376, + 80, + 504, + 173 + ], + "lines": [ + { + "bbox": [ + 376, + 80, + 504, + 173 + ], + "spans": [ + { + "bbox": [ + 376, + 80, + 504, + 173 + ], + "type": "image", + "image_path": "6b0ea87aa2f04037c201089d97969dcb9a3368cc59650405f96d911f1ce450bd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 392, + 177, + 488, + 187 + ], + "lines": [ + { + "bbox": [ + 392, + 177, + 488, + 187 + ], + "spans": [ + { + "bbox": [ + 392, + 177, + 488, + 187 + ], + "type": "text", + "content": "(c) WI3ROOMS (novel " + }, + { + "bbox": [ + 392, + 177, + 488, + 187 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 392, + 177, + 488, + 187 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 106, + 222, + 223, + 308 + ], + "blocks": [ + { + "bbox": [ + 104, + 197, + 504, + 218 + ], + "lines": [ + { + "bbox": [ + 104, + 197, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 104, + 197, + 504, + 218 + ], + "type": "text", + "content": "Figure 4: Qualitative results. (a, b) Evaluation on WiNeRT on the environment seen during training. (c) We use the previously trained model and re-render on a re-configured floormap " + }, + { + "bbox": [ + 104, + 197, + 504, + 218 + ], + "type": "inline_equation", + "content": "\\pmb{F}" + }, + { + "bbox": [ + 104, + 197, + 504, + 218 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 222, + 223, + 308 + ], + "lines": [ + { + "bbox": [ + 106, + 222, + 223, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 222, + 223, + 308 + ], + "type": "image", + "image_path": "1651dd31070aa5e21b8d79dc8134174fb51eab246b7281438d0222fabfd0416e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 311, + 203, + 319 + ], + "lines": [ + { + "bbox": [ + 120, + 311, + 203, + 319 + ], + "spans": [ + { + "bbox": [ + 120, + 311, + 203, + 319 + ], + "type": "text", + "content": "(a) Ray-surface interactions" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 242, + 222, + 359, + 308 + ], + "blocks": [ + { + "bbox": [ + 242, + 222, + 359, + 308 + ], + "lines": [ + { + "bbox": [ + 242, + 222, + 359, + 308 + ], + "spans": [ + { + "bbox": [ + 242, + 222, + 359, + 308 + ], + "type": "image", + "image_path": "824fe270b736efbd9bf1c9a551cb2d8cef483220d5e298f254791c2b25fc139a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 255, + 311, + 334, + 318 + ], + "lines": [ + { + "bbox": [ + 255, + 311, + 334, + 318 + ], + "spans": [ + { + "bbox": [ + 255, + 311, + 334, + 318 + ], + "type": "text", + "content": "(b) Attenuation: Reflection" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 373, + 223, + 504, + 309 + ], + "blocks": [ + { + "bbox": [ + 373, + 223, + 504, + 309 + ], + "lines": [ + { + "bbox": [ + 373, + 223, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 373, + 223, + 504, + 309 + ], + "type": "image", + "image_path": "9433920fbba857bd026cb3caba22ad5b78a61493f516bd5b498209881b49808f.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 311, + 471, + 319 + ], + "lines": [ + { + "bbox": [ + 384, + 311, + 471, + 319 + ], + "spans": [ + { + "bbox": [ + 384, + 311, + 471, + 319 + ], + "type": "text", + "content": "(c) Attenuation: Transmission" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 327, + 504, + 359 + ], + "lines": [ + { + "bbox": [ + 104, + 327, + 504, + 359 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 504, + 359 + ], + "type": "text", + "content": "Figure 5: Evaluating Ray-surface interaction MLP. We display a cut-out of the 3ROOMS represented as a wireframe, with a specific focus on a particular wall. (a) We find a train-test distribution shift of ray-surface incidence points (b, c) Evaluation of the MLP at various incidence points." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": "What does the ray-surface interaction learn? We begin by investigating the ray-surface network " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "(f_{\\theta}^{1}" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": " in Fig. 2) in isolation. The network is tasked to map an incident ray (gain " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "a_{\\mathrm{in}}" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": ", direction " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{in}}" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": ") to an outgoing ray " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "(a_{\\mathrm{out}}, d_{\\mathrm{out}})" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": ". To accurately make this prediction, the network needs to learn direction- and material-dependent properties at the incident location " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{inc}}" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": ", which poses two challenges. First, the network does not have explicit supervision to learn these properties. Rather, the network needs to implicitly learn these properties by optimizing over a number of channel measurements. Second, specific to our case, the measurements collected involve sparse ray-surface interactions i.e., in practise we cannot expect for paths in the training measurements to interact densely with all possible surfaces. For instance, consider Fig. 5a, which show the incident points " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{inc}}" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": " for a particular wall (black edges) that we recover from the underlying ray tracing tool. Here, we observe that the implicit training set interactions (red markers; never used during our training) are localized to a " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "\\sim 50\\mathrm{cm}" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": " band (" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": " area of the wall). However, at test-time, the network is tasked to generalize to interactions for a different distribution of incidence points (purple markers). In spite of the challenges we find the ray-surface network associates meaningful information to surface co-ordinates. For instance, we show the attenuation factor predicted for the reflected (Fig. 5b) and transmitted co-ordinates (Fig. 5c) for rays arriving from a fixed " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": " co-ordinate (placed at " + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "inline_equation", + "content": "x = 8\\mathrm{m}" + }, + { + "bbox": [ + 104, + 373, + 506, + 573 + ], + "type": "text", + "content": "). We find that the network learns a smooth material- and direction-dependent function over the surface. Over the next experiments, we exploit these locally learnt properties and evaluate WiNeRT rendering in novel scenarios." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "text", + "content": "Controllable synthesis: Predicting in Novel Environment Configurations. The previous experiments focused on evaluating approaches for novel locations of transmit and receive co-ordinates at simulation time. Now, we consider novel test-time environments by simulating approaches on re-configured layouts " + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "inline_equation", + "content": "\\pmb{F}^{\\prime}" + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "text", + "content": " of the train-time environment " + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "inline_equation", + "content": "\\pmb{F}" + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "text", + "content": ", such as by randomly editing placement of interior walls. Overall, we find that WiNeRT remarkably extrapolates to the reconfigured environment, with the overall error unchanged with WiNeRT (0.202 on " + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "inline_equation", + "content": "\\pmb{F}" + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "text", + "content": " vs. 0.203 on " + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "inline_equation", + "content": "\\pmb{F}^{\\prime}" + }, + { + "bbox": [ + 104, + 580, + 504, + 691 + ], + "type": "text", + "content": "; more results in Table A2). Furthermore, by observing the results qualitatively in Figure 4c, we find the predicted interactions remain consistent with the ground-truth simulated rays in novel environment configurations. This is particularly appealing as for simulation use-cases which require modelling dynamic objects (e.g., moving vehicle), as WiNeRT allows re-configuring environment without retraining." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Controllable synthesis: Simulating Higher-order Interactions. In this experiment, we evaluate the ability of approaches to generalize to different numbers of interactions (denoted by " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": " in Sec. 3) at inference time. With WiNeRT, we have the ability to control the number of interactions at" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": "test-time (i.e., by unrolling " + }, + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": " for fewer or more steps). We briefly summarize our observations here (see Table A4 for more details). WiNeRT exhibits promising results: while the baselines struggle with a simpler task of lower-order interactions (e.g., 0.22-0.58 overall errors at " + }, + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "inline_equation", + "content": "r = 0" + }, + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": "), WiNeRT's performance improves (from 0.20 to 0.12). A better performance is natural in this particular setting, since the model is required to perform an easier task than original (predicting only line-of-sight component). For higher-order interactions, we observe performances of all approaches degrades, but WiNeRT outperforms the baselines. In particular, even at " + }, + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "inline_equation", + "content": "r = 3" + }, + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": ", we find the geometric-errors of WiNeRT (0.27) comparable to baselines in their originally trained setting (" + }, + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "inline_equation", + "content": "r = 1" + }, + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": ", 0.21-0.33 errors)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "text", + "content": "How fast are the simulations? We investigate the wall-clock simulation times of WiNeRT and baselines and compare them with wireless ray tracers. In the specific case of WiNeRT, we have some control over the time-accuracy trade-offs at test-time by varying the density of initial rays launched (see Sec. 3.1). Overall, we find that WiNeRT demonstrates speed-ups of " + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "inline_equation", + "content": "11 - 22 \\times" + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "text", + "content": " over PyLayers and " + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "inline_equation", + "content": "6 - 22 \\times" + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "text", + "content": " over Wireless Inside. While the baselines are even faster (" + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "inline_equation", + "content": "538 - 687 \\times" + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "text", + "content": " with MLP and " + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "inline_equation", + "content": "79 - 97 \\times" + }, + { + "bbox": [ + 104, + 178, + 506, + 268 + ], + "type": "text", + "content": " with kNN), it is achieved at the price of higher errors and poor generalization capabilities (Sec. 4.2). Overall, we find WiNeRT presents reasonable time-accuracy trade-offs compared to baselines. See Sec. C.2 for additional details." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "content": "Exploiting differentiability: User Localization via inverse (differentiable) rendering. Over the previous sections we focused on forward simulations. Now, we study a proof-of-concept for leveraging our differentiable simulator for inverse problems, such as for user localization: determining user location " + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "content": " from an observed channel " + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{obs}}" + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "content": ". We solve for " + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "content": ", by performing gradient on spatial coordinate " + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}" + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "content": " that minimizes the channel loss " + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "inline_equation", + "content": "\\text{render}_{\\theta}(\\boldsymbol{x}_{\\mathrm{tx}}, \\boldsymbol{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}, \\boldsymbol{F}_i)" + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "content": ". This is possible with WiNeRT, since we can backpropagate through the neural simulation of the channel. We evaluate over 100 test examples and find encouraging results, with a median error of 0.58m in WI3ROOMS (a " + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "inline_equation", + "content": "150\\mathrm{m}^3" + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "content": " volume) and 1.21m in WIINDOOR (a " + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "inline_equation", + "content": "300\\mathrm{m}^3" + }, + { + "bbox": [ + 104, + 275, + 506, + 365 + ], + "type": "text", + "content": " volume). See Sec. C.4 for more details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 381, + 397, + 394 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 381, + 397, + 394 + ], + "spans": [ + { + "bbox": [ + 105, + 381, + 397, + 394 + ], + "type": "text", + "content": "5 CONCLUSION, LIMITATIONS, AND BROADER IMPACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 407, + 506, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 506, + 496 + ], + "type": "text", + "content": "In this paper, we proposed the first neural forward model for wireless ray tracing-based simulations. Such models are particularly appealing as they help alleviate some drawbacks of classical non-neural simulators (e.g., better handling model-measurement mismatches, non-differentiability). Towards this goal, we proposed WiNeRT which tasks an MLP to learn how surfaces in a 3D environment influence propagation of wireless rays, such as by predicting attenuation factor of a reflective component. Overall, we find promising results indicating neural simulators closely capture propagation effects. As neural simulators are additionally differentiable, we further show that they can be used to optimize inverse problems such as user localization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 503, + 506, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 503, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 104, + 503, + 506, + 636 + ], + "type": "text", + "content": "Limitations and Future Work. This paper presents the first step towards realizing a neural surrogate for simulating propagation of wireless rays. While we find promising results – in terms of empirically mimicking the simulator's performance while simultaneously reducing complexity – many important steps remain to realize our over-arching goal of differentiable wireless ray tracing. Our approach is designed to capture linear effects of the channel in line with standards (3GPP TR 38.901; ITU-R P.2040-2) and extending to non-linear effects (e.g., amplifier saturations) remains an open-problem. Additionally, while our focus is primarily reflection and transmission properties of ray-surface interactions (capturing majority of receive power) which are increasingly relevant for high-frequency transmissions, other properties (e.g., scattering, diffraction) require investigation to model simulations across a wider radio-frequency spectrum. Finally, our surrogate's performance is currently upper-bounded by the underlying simulator's performance, motivating studies into learnt calibration of the surrogate model with real-world measurement data to bypass simulation accuracy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "type": "text", + "content": "Broader Technical Impact. Although our paper focuses on neural simulation of EM waves in the radio-frequency spectrum (0.5-100 GHz), we believe working towards this goal complements research in non-radio modalities as well. For instance, to model propagation of acoustic signals in spatial environments, estimating material-dependent ray-surface interactive properties remains a challenging problem and the proposed research direction potentially complements existing techniques. More generally, we believe that as radio signals require modelling both ray (e.g., reflection) and physical optic (e.g., interference, diffraction) properties, advances here are intertwined with many modalities across the EM spectrum (e.g., audio, visual)." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 241, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 241, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 241, + 94 + ], + "type": "text", + "content": "REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 169 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 169 + ], + "type": "text", + "content": "To ensure reproducibility, we take a number of steps. On the dataset side, we use either publicly available indoor layouts (e.g., RPLAN) or synthetically generate layouts with known random seeds (0 and 10 in our case). We further elaborate on the simulation settings to recreate our dataset in Section 4.1 and Section B. We plan to release the simulated data measurements. On the implementation side, we provide specific training details in Section C.4 and further elaborate on the detailed architecture in Section A.3." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 183, + 195, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 183, + 195, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 195, + 194 + ], + "type": "text", + "content": "ETHICS STATEMENT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 204, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 504, + 239 + ], + "type": "text", + "content": "The data used in our paper corresponds to simulated data of physical processes (EM wave propagation). Since this does not involve any human subjects or personally identifiable information, we believe there is no conflict in this regard." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 251, + 201, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 201, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 201, + 262 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "type": "text", + "content": "We thank Hanno Ackermann for discussions and feedback on the paper. We additionally thank numerous colleagues for insightful discussions: Thomas Hehn, Fabio Valerio Massoli, Maziar Raissi, Afshin Abdi, June Namgoong, Taesang Yoo, and Akash Doshi." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 322, + 175, + 334 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 322, + 175, + 334 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 175, + 334 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 340, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 106, + 340, + 505, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 340, + 505, + 364 + ], + "spans": [ + { + "bbox": [ + 106, + 340, + 505, + 364 + ], + "type": "text", + "content": "3GPP TR 38.901. Study on channel model for frequencies from 0.5 to 100 ghz. Standard, 3GPP, Valbonne, FR, March 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 372, + 505, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 372, + 505, + 395 + ], + "spans": [ + { + "bbox": [ + 107, + 372, + 505, + 395 + ], + "type": "text", + "content": "Nicolas Amiot, Mohamed Laaraiedh, and Bernard Uguen. Pylayers: An open source dynamic simulator for indoor propagation and localization. In ICC, 2013." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 403, + 505, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 403, + 505, + 436 + ], + "spans": [ + { + "bbox": [ + 106, + 403, + 505, + 436 + ], + "type": "text", + "content": "Franck Djeumou, Cyrus Neary, Eric Goubault, Sylvie Putot, and Ufuk Topcu. Neural networks with physics-informed architectures and constraints for dynamical systems modeling. In Learning for Dynamics and Control Conference. PMLR, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 444, + 505, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 444, + 505, + 477 + ], + "spans": [ + { + "bbox": [ + 107, + 444, + 505, + 477 + ], + "type": "text", + "content": "Sebastian Dorner, Marcus Henninger, Sebastian Cammerer, and Stephan ten Brink. Wgan-based autoencoder training over-the-air. In IEEE International Workshop on Signal Processing Advances in Wireless Communications, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 486, + 480, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 486, + 480, + 498 + ], + "spans": [ + { + "bbox": [ + 107, + 486, + 480, + 498 + ], + "type": "text", + "content": "Robert A Drebin, Loren Carpenter, and Pat Hanrahan. Volume rendering. SIGGRAPH, 1988." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 506, + 504, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 506, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 107, + 506, + 504, + 529 + ], + "type": "text", + "content": "Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In CVPR, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 536, + 504, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 536, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 107, + 536, + 504, + 559 + ], + "type": "text", + "content": "Rizal Fathony, Anit Kumar Sahu, Devin Willmott, and J Zico Kolter. Multiplicative filter networks. In ICLR, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 567, + 422, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 567, + 422, + 578 + ], + "spans": [ + { + "bbox": [ + 107, + 567, + 422, + 578 + ], + "type": "text", + "content": "Andrew S. Glassner. An introduction to ray tracing. Morgan Kaufmann, 1989." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 586, + 504, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 586, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 107, + 586, + 504, + 609 + ], + "type": "text", + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 617, + 504, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 617, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 107, + 617, + 504, + 651 + ], + "type": "text", + "content": "Fumio Ikegami, Tsutomu Takeuchi, and Susumu Yoshida. Theoretical prediction of mean field strength for urban mobile radio. IEEE Transactions on Antennas and Propagation, 39(3):299-302, 1991." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 659, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 659, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 107, + 659, + 504, + 681 + ], + "type": "text", + "content": "ITU-R P.2040-2. Effects of building materials and structures on radiowave propagation above about " + }, + { + "bbox": [ + 107, + 659, + 504, + 681 + ], + "type": "inline_equation", + "content": "100\\mathrm{mhz}" + }, + { + "bbox": [ + 107, + 659, + 504, + 681 + ], + "type": "text", + "content": ". Standard, International Telecommunication Union, Geneva, CH, September 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 689, + 504, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 504, + 701 + ], + "type": "text", + "content": "William C. Jakes and Donald C. Cox. Microwave mobile communications. Wiley-IEEE press, 1994." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "type": "text", + "content": "George Em Karniadakis, Ioannis G. Kevrekidis, Lu Lu, Paris Perdikaris, Sifan Wang, and Liu Yang. Physics-informed machine learning. Nature Reviews Physics, 3(6):422-440, June 2021." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 105 + ], + "type": "text", + "content": "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 504, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 504, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 504, + 134 + ], + "type": "text", + "content": "William C. Y. Lee. Mobile communications engineering. McGraw-Hill, 1982. ISBN 978-0-07-037039-5." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 141, + 504, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 141, + 504, + 164 + ], + "spans": [ + { + "bbox": [ + 106, + 141, + 504, + 164 + ], + "type": "text", + "content": "Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In ICCV, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 171, + 504, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 171, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 504, + 194 + ], + "type": "text", + "content": "J.W. McKown and R.L. Hamilton. Ray tracing as a design tool for radio networks. IEEE Network, 5(6):27-30, November 1991." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 200, + 504, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 200, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 107, + 200, + 504, + 224 + ], + "type": "text", + "content": "Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 229, + 504, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 229, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 106, + 229, + 504, + 262 + ], + "type": "text", + "content": "Nelson Nauata, Kai-Hung Chang, Chin-Yi Cheng, Greg Mori, and Yasutaka Furukawa. Housegan: Relational generative adversarial networks for graph-constrained house layout generation. In ECCV, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 270, + 504, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 504, + 304 + ], + "type": "text", + "content": "Nelson Nauata, Sepidehsadat Hosseini, Kai-Hung Chang, Hang Chu, Chin-Yi Cheng, and Yasutaka Furukawa. House-gan++: Generative adversarial layout refinement network towards intelligent computational agent for professional architects. In CVPR, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 310, + 504, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 310, + 504, + 334 + ], + "spans": [ + { + "bbox": [ + 105, + 310, + 504, + 334 + ], + "type": "text", + "content": "Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 340, + 504, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 340, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 504, + 363 + ], + "type": "text", + "content": "Tribhuvanesh Orekondy, Arash Behboodi, and Joseph B Soriaga. Mimo-gan: Generative mimo channel modeling. In IEEE ICC, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 370, + 504, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 370, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 504, + 403 + ], + "type": "text", + "content": "Timothy J O'Shea, Tamoghna Roy, and Nathan West. Approximating the void: Learning stochastic channel models from observation with variational generative adversarial networks. In ICNC, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 410, + 504, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 504, + 434 + ], + "type": "text", + "content": "Wamiq Para, Paul Guerrero, Tom Kelly, Leonidas J Guibas, and Peter Wonka. Generative layout modeling using constraint graphs. In CVPR, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 440, + 504, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 504, + 473 + ], + "type": "text", + "content": "Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In CVPR, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 480, + 504, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 480, + 504, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 504, + 504 + ], + "type": "text", + "content": "Tobias Pfaff, Meire Fortunato, Alvaro Sanchez-Gonzalez, and Peter W Battaglia. Learning mesh-based simulation with graph networks. arXiv preprint arXiv:2010.03409, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 510, + 504, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 504, + 544 + ], + "type": "text", + "content": "Maziar Raissi, Paris Perdikaris, and George Em Karniadakis. Physics informed deep learning (part i): Data-driven solutions of nonlinear partial differential equations. arXiv preprint arXiv:1711.10561, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 550, + 504, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 550, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 504, + 573 + ], + "type": "text", + "content": "Theodore S. Rappaport. Wireless communications: principles and practice, volume 2. prentice hall PTR New Jersey, 1996." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 579, + 504, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 579, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 504, + 604 + ], + "type": "text", + "content": "Theodore S Rappaport, Kate A Remley, Camillo Gentile, Andreas F Molisch, and Alenka Zajic. Radio Propagation Measurements and Channel Modeling. Cambridge University Press, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 609, + 504, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 504, + 632 + ], + "type": "text", + "content": "Remcom. Wireless insite, 2022. URL https://www.remcom.com/ wireless-insite-em-propagation-software." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 639, + 504, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 504, + 672 + ], + "type": "text", + "content": "Mathew K. Samimi and Theodore S. Rappaport. 3-D millimeter-wave statistical channel model for 5G wireless system design. IEEE Transactions on Microwave Theory and Techniques, 64(7): 2207-2225, 2016." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "type": "text", + "content": "Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia. Learning to simulate complex physics with graph networks. In ICML, 2020." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "text", + "content": "Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. NeurIPS, 2020." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 570 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Sergey Shirobokov, Vladislav Belavin, Michael Kagan, Andrei Ustyuzhanin, and Atilim Gunes Baydin. Black-box optimization with local generative surrogates. In NeurIPS, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "type": "text", + "content": "Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. NeurIPS, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 505, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 505, + 166 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 505, + 166 + ], + "type": "text", + "content": "Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. NeurIPS, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 171, + 505, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 505, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 505, + 196 + ], + "type": "text", + "content": "Xingyuan Sun, Tianju Xue, Szymon Rusinkiewicz, and Ryan P Adams. Amortized synthesis of constrained configurations using a differentiable surrogate. NeurIPS, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 201, + 505, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 505, + 236 + ], + "type": "text", + "content": "Matthew Tancik, Pratul Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 242, + 505, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 505, + 266 + ], + "type": "text", + "content": "David Tse and Pramod Viswanath. Fundamentals of wireless communication. Cambridge university press, 2005." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 272, + 505, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 272, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 505, + 306 + ], + "type": "text", + "content": "Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T Barron, and Pratul P Srinivasan. Ref-nerf: Structured view-dependent appearance for neural radiance fields. In CVPR, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 312, + 505, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 505, + 337 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 505, + 337 + ], + "type": "text", + "content": "Joram Walfisch and Henry L. Bertoni. A theoretical model of UHF propagation in urban environments. IEEE Transactions on antennas and propagation, 36(12):1788-1796, 1988." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "type": "text", + "content": "Wenming Wu, Xiao-Ming Fu, Rui Tang, Yuhan Wang, Yu-Hao Qi, and Ligang Liu. Data-driven interior plan generation for residential buildings. ACM Transactions on Graphics (TOG), 38(6): 1-12, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 383, + 505, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 383, + 505, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 383, + 505, + 418 + ], + "type": "text", + "content": "William Xia, Sundeep Rangan, Marco Mezzavilla, Angel Lozano, Giovanni Geraci, Vasilii Semkin, and Giuseppe Loianno. Millimeter wave channel modeling via generative neural networks. In 2020 IEEE Globecom Workshops, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 424, + 505, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 424, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 505, + 459 + ], + "type": "text", + "content": "Yang Yang, Yang Li, Wuxiong Zhang, Fei Qin, Pengcheng Zhu, and Cheng-Xiang Wang. Generative-adversarial-network-based wireless channel modeling: Challenges and opportunities. IEEE Communications Magazine, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 465, + 505, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 465, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 505, + 499 + ], + "type": "text", + "content": "Hao Ye, Geoffrey Ye Li, Biing-Hwang Fred Juang, and Kathiravetpillai Sivanesan. Channel agnostic end-to-end learning based communication systems with conditional gan. In IEEE Globecom Workshops, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 506, + 505, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 505, + 540 + ], + "type": "text", + "content": "Hao Ye, Le Liang, Geoffrey Ye Li, and Biing-Hwang Juang. Deep learning-based end-to-end wireless communication systems with conditional gans as unknown channels. IEEE Transactions on Wireless Communications, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 547, + 505, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 547, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 505, + 570 + ], + "type": "text", + "content": "Yan Zhang, Jonathon Hare, and Adam Prugel-Bennett. Deep set prediction networks. NeurIPS, 2019." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 181, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 181, + 99 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 181, + 99 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 123, + 187, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 123, + 187, + 135 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 187, + 135 + ], + "type": "text", + "content": "A APPROACH" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 148, + 264, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 148, + 264, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 264, + 159 + ], + "type": "text", + "content": "A.1 BUILDING CHANNEL MODELS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 168, + 295, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 168, + 295, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 168, + 295, + 180 + ], + "type": "text", + "content": "This section accompanies the text in Section 3." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 185, + 506, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 185, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 185, + 506, + 361 + ], + "type": "text", + "content": "Channel models are defined either in a statistical way by defining a distribution over channel attributes or in deterministic way using ray tracing. Statistical channel models are inadequate for applications involving positioning, sensing and challenges of communication at higher frequencies (e.g., mmWave at 30-300 GHz (Rappaport et al., 2022)). Inspired by similar techniques in computer graphics (Glassner, 1989), traditional ray tracing approaches (see for example (McKown & Hamilton, 1991; Ikegami et al., 1991; Walfisch & Bertoni, 1988)) approximate propagation of electromagnetic waves by modeling interactions of each ray with objects in its paths. These interactions include for example reflection, diffraction and penetration. Although this is more efficient than solving Maxwell equations, ray tracing methods need a detailed knowledge of the environment and are generally slow for prototyping. They generally utilize hard coded and mathematically tractable models for example knife-edge model for diffraction (Lee, 1982; Rappaport, 1996). These abstractions suffer from mismatches and require occasional tedious fine-tuning and calibration with real data. Improving these models while remaining tractable for rapid simulation rounds is not straightforward. Finally, they are non-differentiable and cannot be integrated into a closed loop design pipeline. We plan to tackle these issues by building a neural surrogate of a physics-based wireless ray tracer in this paper." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 374, + 279, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 279, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 279, + 385 + ], + "type": "text", + "content": "A.2 REPRESENTING RAY ATTRIBUTES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "text", + "content": "We represent the " + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "text", + "content": "-th ray (among " + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "text", + "content": " rays) at the " + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "text", + "content": "-th iteration of rendering as " + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\pmb{u}_k^{(r)}" + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "text", + "content": ". For notation convenience, we drop the sub- and super-script for the rest of the section. We characterize the wireless ray analogous to the concept of an optical ray (such as with geometric direction, intensity). In addition to the wireless attributes (see Equation 2), we further include meta-level attributes that helps us propagate and render the eventual ray received at the receiver co-ordinate " + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 395, + 504, + 472 + ], + "type": "text", + "content": ". We briefly describe these attributes here and elaborate on how they are obtained or updated over the next sections. The ray contains the attributes:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 472, + 438, + 505 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 472, + 438, + 505 + ], + "spans": [ + { + "bbox": [ + 111, + 472, + 438, + 505 + ], + "type": "interline_equation", + "content": "\\boldsymbol {u} = \\underbrace {\\left( \\begin{array}{c c c} a & \\tau & \\phi \\\\ \\text {(a) C h a n n e l A t t r i b u t e s} \\end{array} \\right)} _ {\\text {(b) R a y G e o m e t r y}} \\underbrace {\\boldsymbol {x} \\quad \\boldsymbol {d} \\quad t _ {s} \\quad t _ {r x} \\quad \\rho_ {\\mathrm {r x}}} _ {\\text {(c) S t a t e}} \\underbrace {\\sigma_ {\\mathrm {u p d}} \\quad \\sigma_ {\\mathrm {r x}} \\quad)} _ {\\text {(d) C h a n n e l A t t r i b u t e s}}", + "image_path": "985d427edf422692b7ea88099a4026ea10e93eb2e48ecfb623cd534376613099.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": "which as shown can be grouped into three categories: (a) Wireless Channel Attributes. Exactly as discussed earlier in the section (see Equation 2), it contains the attributes to construct the wireless channel time-angle impulse response (Equation 1) (b) Ray Geometry. We additionally include geometrical representation of the ray, which helps us determine how to propagate the ray through the environment. Specifically, we represent the geometry of the ray using the line equation: " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "\\pmb{p}(t) = \\pmb{x} + t\\pmb{d}" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": " is the origin and " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "\\pmb{d}" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": " is a unit-vector encoding the ray direction. We are interested in two particular solutions of " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": " in this equation: " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "t_s" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": " for which the ray intersects with a surface (mesh face in our case) and " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": " for which the ray is tangential to a sphere around some receiver of radius " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "\\rho_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": ". (c) Ray state. To help with subsequent updates to the ray at future iterations, we track two binary variables. " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "\\sigma_{\\mathrm{upd}}" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": " denotes whether the ray has to be updated in the next iteration. " + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "inline_equation", + "content": "\\sigma_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 506, + 506, + 627 + ], + "type": "text", + "content": " denotes whether the ray has impinged on a reception sphere of a predefined radius." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 639, + 249, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 249, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 249, + 651 + ], + "type": "text", + "content": "A.3 RAY MARCHING: DETAILS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": "Ray-Environment Intersections. For each ray, we are interested in their first interaction with the environment (e.g., first wall it hits, impinging on the receiver). For this, we are interested in the solutions to the line equation representing the geometry of the ray: " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{p}(t) = \\pmb{x}_k^{(r)} + t\\pmb{d}_k^{(r)}" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": ". In particular, we are interested in two solutions of " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": ": (a) Ray-Face intersection. The smallest value of " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "t > 0" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": " for which " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{p}(t)" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": " lies on a surface (a triangular mesh face in our case). For this, we perform ray-triangle intersections with each face in the environment and find the corresponding solution " + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "inline_equation", + "content": "t = t_s" + }, + { + "bbox": [ + 104, + 662, + 506, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 82, + 504, + 148 + ], + "blocks": [ + { + "bbox": [ + 108, + 82, + 504, + 148 + ], + "lines": [ + { + "bbox": [ + 108, + 82, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 108, + 82, + 504, + 148 + ], + "type": "image", + "image_path": "d2f20eee2b3f9b823f2adc8f4a42645796bf6d8ad301be8f20a54c5baf03ff4e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 156, + 392, + 167 + ], + "lines": [ + { + "bbox": [ + 217, + 156, + 392, + 167 + ], + "spans": [ + { + "bbox": [ + 217, + 156, + 392, + 167 + ], + "type": "text", + "content": "Figure A1: Ray-surface interaction network " + }, + { + "bbox": [ + 217, + 156, + 392, + 167 + ], + "type": "inline_equation", + "content": "f_{\\theta}^{1}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": "This helps us estimate the new relay location: " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "\\pmb{x}_k^{(r + 1)} = \\pmb{x}_k^{(r)} + t_s\\pmb{d}_k^{(r)}" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " (a) Ray-Rx intersection. In parallel, we are also interested in positive solutions of " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " for which the ray hits the receiver if it were modeled as a sphere of radius " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "\\rho_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": ". In this case, we obtain the value of " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " as the projection of " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{rx}" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "\\pmb{p}(t)" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 232, + 235, + 504, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 235, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 232, + 235, + 504, + 251 + ], + "type": "interline_equation", + "content": "t _ {r x} = \\max \\left(0, \\left(\\boldsymbol {x} _ {r x} - \\boldsymbol {x} _ {k} ^ {(r)}\\right) \\cdot \\boldsymbol {d} _ {k} ^ {(r)}\\right) \\tag {3}", + "image_path": "f639571663f62e24e2a8eb240ae07dd91e4905cf4de7f8696d95bacd815e4fba.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 231, + 252, + 504, + 269 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 252, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 231, + 252, + 504, + 269 + ], + "type": "interline_equation", + "content": "\\rho_ {r x} = \\left| \\left| \\left(\\boldsymbol {x} _ {r x} - \\boldsymbol {x} _ {k} ^ {(r)}\\right) - t _ {r x} \\boldsymbol {d} _ {k} ^ {(r)} \\right| \\right| \\tag {4}", + "image_path": "83dbe1933fab413d7ecd88a39d549d46a13d30db4ddb1d3bdf8970579d72cead.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 270, + 504, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 504, + 293 + ], + "type": "text", + "content": "Consequently, at the end of ray-environment, we analytically estimate the first intersections of the ray with both the environment and (potentially) the receiver." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "content": "Ray-Surface Interaction. If the ray " + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "inline_equation", + "content": "\\pmb{u}_k^{(r)}" + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "content": " (originating at " + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "inline_equation", + "content": "\\pmb{x}_k^{(r)}" + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "content": ") and travelling in direction " + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "inline_equation", + "content": "d_k^{(r)}" + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "content": " hits a wall at " + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "inline_equation", + "content": "x_{k}^{(r + 1)}" + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "content": " (as estimated in the previous step), we are now interested in characterizing the outgoing ray with origin at " + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "inline_equation", + "content": "x_{k}^{(r + 1)}" + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "content": ". Specifically, we are interested in estimating the new direction " + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "inline_equation", + "content": "d_k^{(r + 1)}" + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "content": " (does the ray penetrate the wall? or reflect?) and the corresponding change in gain that arises (i.e., loss of power, change of phase). This is a complex problem and typically requires in-depth knowledge of the surface (e.g., which material) as well as its specific EM properties (e.g., frequency-dependent effects). Our solution is to instead learn these properties by associating spatial regions in the environment with EM-specific properties. Towards this, we delegate the association to a neural network show in Figure A1. The key idea is to associate spatial co-ordinates (or sets of co-ordinates, given by face on which they lie) with EM properties. We achieve this by mapping spatial properties (e.g., face corresponding to " + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "inline_equation", + "content": "x_{k}^{(r + 1)}" + }, + { + "bbox": [ + 104, + 301, + 506, + 439 + ], + "type": "text", + "content": ") to EM properties (e.g., gain factor)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 443, + 247, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 247, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 247, + 455 + ], + "type": "text", + "content": "Specifically, our neural network is:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 239, + 458, + 504, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 458, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 239, + 458, + 504, + 471 + ], + "type": "interline_equation", + "content": "\\boldsymbol {v} _ {i} = \\text {s p a t i a l - n e t} \\left(\\boldsymbol {f} _ {i}, \\boldsymbol {n} _ {i}, \\boldsymbol {b} _ {i}\\right) \\tag {5}", + "image_path": "7ed0ca07f4d4382c3695d218f99f5d862d7b435f57574e645e6937fe33e9dcdb.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 227, + 472, + 504, + 485 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 472, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 227, + 472, + 504, + 485 + ], + "type": "interline_equation", + "content": "\\boldsymbol {s} _ {i}, \\boldsymbol {\\rho} _ {i} = \\text {d i r e c t i o n a l . n e t} \\left(\\boldsymbol {v} _ {i}, \\boldsymbol {d} _ {i}\\right) \\tag {6}", + "image_path": "2a62170faceab6f871cc7fdd4e30d27f965d910e641b7d20ce57a326884ac1f0.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "type": "text", + "content": "which consists of a spatial_net to encode EM properties specific to a spatial region, but independent of the incidence direction. This network takes as inputs the one-hot encoding of the face " + }, + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "type": "inline_equation", + "content": "\\pmb{f}_i" + }, + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "type": "text", + "content": " on which the relay point " + }, + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "type": "inline_equation", + "content": "\\pmb{x}_k^{(r+1)}" + }, + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "type": "text", + "content": " lies and the surface normal vector at that point " + }, + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "type": "inline_equation", + "content": "\\pmb{n}_i" + }, + { + "bbox": [ + 104, + 487, + 504, + 535 + ], + "type": "text", + "content": ". In addition, we also provide the network a 3-dim conditioning vector of signed distances" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 195, + 539, + 504, + 554 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 539, + 504, + 554 + ], + "spans": [ + { + "bbox": [ + 195, + 539, + 504, + 554 + ], + "type": "interline_equation", + "content": "\\boldsymbol {b} _ {i} = \\left(\\operatorname {s d f} \\left(\\boldsymbol {x} _ {t x}, \\boldsymbol {f} _ {i}\\right), \\quad \\operatorname {s d f} \\left(\\boldsymbol {x} _ {r x}, \\boldsymbol {f} _ {i}\\right), \\quad \\operatorname {s d f} \\left(\\boldsymbol {x} _ {k} ^ {(r + 1)}, \\boldsymbol {f} _ {i}\\right)\\right) \\tag {7}", + "image_path": "c999abd878d07079adf76875aae428fca21beafaffb359f7b42711d21359f56a.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "type": "inline_equation", + "content": "\\operatorname{sdf}(\\pmb{x},\\pmb{f})" + }, + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "type": "text", + "content": " is the signed distance function between co-ordinate " + }, + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "type": "text", + "content": " and face " + }, + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 555, + 506, + 590 + ], + "type": "text", + "content": ". We find it crucial to condition the network on these values to help predict EM-properties for relevant outgoing components (e.g., reflective, transmission)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": "The output of the network is a gain factor " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": ", such that the new gain of the ray " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\boldsymbol{u}_k^{(r+1)}" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "a_k^{(r+1)} = s_i a_k^{(r)}" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": ". Since the gain magnitudes can be represented in either linear or logarithmic scale, we predict both additive and multiplicative factors of the gain in practice (" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "a_k^{(r+1)} = s_{i,1} a_k^{(r)} + s_{i,2}" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": "). In parallel, the network also predicts the rotation a ray incident with direction " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\boldsymbol{d}_k^{(r)}" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\boldsymbol{f}_i" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": " undergoes. We characterize rotations using a 4-dim rotation " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\rho_i" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": " using Euler-Rodrigues parameterization. This parameterization encodes the axis of rotation and about which " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\boldsymbol{d}_k^{(r)}" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": " rotates by angle " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\vartheta" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": ". We represent the rotation by a " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": " SO(3) matrix " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": " and the new outgoing direction of ray " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "text", + "content": " is given by " + }, + { + "bbox": [ + 104, + 596, + 506, + 704 + ], + "type": "inline_equation", + "content": "\\boldsymbol{d}_k^{(r+1)} = A \\boldsymbol{d}_k^{(r)}" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "Reception/Termination check. For some special cases, we halt ray marching for a subset of rays. Namely, when ray " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " impinges on a reception sphere of radius under " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\varrho" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " meters. This prevents" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 89, + 233, + 186 + ], + "blocks": [ + { + "bbox": [ + 107, + 89, + 233, + 186 + ], + "lines": [ + { + "bbox": [ + 107, + 89, + 233, + 186 + ], + "spans": [ + { + "bbox": [ + 107, + 89, + 233, + 186 + ], + "type": "image", + "image_path": "0cef139f6ea25ca9f4890a522c10971c0381d79ed5f5f595829cb4ec9cb6adc9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 188, + 217, + 197 + ], + "lines": [ + { + "bbox": [ + 126, + 188, + 217, + 197 + ], + "spans": [ + { + "bbox": [ + 126, + 188, + 217, + 197 + ], + "type": "text", + "content": "(a) Testset 1: \"Checkerboard\"" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 236, + 81, + 372, + 186 + ], + "blocks": [ + { + "bbox": [ + 236, + 81, + 372, + 186 + ], + "lines": [ + { + "bbox": [ + 236, + 81, + 372, + 186 + ], + "spans": [ + { + "bbox": [ + 236, + 81, + 372, + 186 + ], + "type": "image", + "image_path": "ff9a1704ab235f3358c8018acd8dba87c60a37a294e3c398aed3663d99c28239.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 253, + 188, + 352, + 197 + ], + "lines": [ + { + "bbox": [ + 253, + 188, + 352, + 197 + ], + "spans": [ + { + "bbox": [ + 253, + 188, + 352, + 197 + ], + "type": "text", + "content": "(b) Testset 2: \"Generalization-z\"" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 206, + 504, + 227 + ], + "lines": [ + { + "bbox": [ + 104, + 206, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 104, + 206, + 504, + 227 + ], + "type": "text", + "content": "Figure A2: Train and test regimes: We consider disjoint subsets of train (blue markers; identical in all figures) and test (orange markers) co-ordinates of transmit and receive locations." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 376, + 89, + 503, + 186 + ], + "blocks": [ + { + "bbox": [ + 376, + 89, + 503, + 186 + ], + "lines": [ + { + "bbox": [ + 376, + 89, + 503, + 186 + ], + "spans": [ + { + "bbox": [ + 376, + 89, + 503, + 186 + ], + "type": "image", + "image_path": "44d3c622da02035b4fcd69c7fea3ddd1cb7aefd0ef2363370ac71d0b3f328243.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 383, + 188, + 490, + 197 + ], + "lines": [ + { + "bbox": [ + 383, + 188, + 490, + 197 + ], + "spans": [ + { + "bbox": [ + 383, + 188, + 490, + 197 + ], + "type": "text", + "content": "(c) Testset 3: \"Generalization-diag\"" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": "a future version of the ray being potentially being incorrectly received once again. In addition, for computation reasons, we also terminate ray marching if the ray exits the region of interest (e.g., ray exiting the environment)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 288, + 505, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 288, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 288, + 505, + 322 + ], + "type": "text", + "content": "Free-space interaction. While the previous steps modeled the interaction of material properties of the environment on wireless propagation, we now switch focus to free-space. In this case, we model propagation of a ray using the empirically-adjusted Friis' Equation:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 231, + 325, + 503, + 353 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 325, + 503, + 353 + ], + "spans": [ + { + "bbox": [ + 231, + 325, + 503, + 353 + ], + "type": "interline_equation", + "content": "P _ {r} (d) = P _ {t} K \\left(\\frac {d _ {0}}{d}\\right) ^ {\\lambda}, \\quad d \\geq d _ {0} \\tag {8}", + "image_path": "1c549a2ecd67d9dbb6189a46b6df2e4d7fe31ffdff17255fdd1c331024d60f3e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "text", + "content": "which represents the power at the received at the receive antenna " + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "inline_equation", + "content": "P_r" + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "text", + "content": " as a function of the power fed into transmitting antenna " + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "inline_equation", + "content": "P_t" + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "text", + "content": " and the distance travelled by the ray " + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "text", + "content": ". We learn the remaining scalar parameters " + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "text", + "content": " (constant representing of antenna gains), " + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "text", + "content": " (wavelength of signal), and " + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 104, + 356, + 504, + 399 + ], + "type": "text", + "content": " (reference distance)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 415, + 297, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 297, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 297, + 427 + ], + "type": "text", + "content": "B DATASET: ADDITIONAL DETAILS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 440, + 250, + 450 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 250, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 250, + 450 + ], + "type": "text", + "content": "B.1 TRAIN AND TEST REGIMES" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 460, + 294, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 294, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 294, + 472 + ], + "type": "text", + "content": "Figure A2 accompanies the text in Section 4.1." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 485, + 340, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 485, + 340, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 340, + 496 + ], + "type": "text", + "content": "B.2 SIMULATION FOR WIINDOOR DATASET: DETAILS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "type": "text", + "content": "We created 3 different floor-plans in Wireless Inside where 2D floor-plans layout and semantic labels of each room are picked from House " + }, + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "type": "inline_equation", + "content": "\\mathrm{GAN}++" + }, + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "type": "text", + "content": " dataset and mapped into a 3D layout where the scale and dimensions are determined based on practical floor-plan scenarios. All layouts are scaled to " + }, + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "type": "inline_equation", + "content": "10\\mathrm{m}\\times 10\\mathrm{m}" + }, + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "type": "text", + "content": " with ceiling height at " + }, + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "type": "inline_equation", + "content": "3\\mathrm{m}" + }, + { + "bbox": [ + 104, + 505, + 504, + 583 + ], + "type": "text", + "content": ". All the inner walls and floor materials are layered dielectrics with specific permittivity, conductivity & roughness. These have finite reflection and transmission coefficients. The reflection coefficient is corrected if the surface is not smooth while the transmission coefficients are unaffected by surface roughness." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "type": "text", + "content": "Materials. Propagation characteristics are naturally affected by the medium and we create a dataset with fairly diverse set of materials. Layered dielectric with two layers separated by free-space of " + }, + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "type": "inline_equation", + "content": "89\\mathrm{cm}" + }, + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "type": "text", + "content": " is chosen for all inner walls and the outer-walls were made of thicker materials of concrete. Doors were created using free space except the balcony door which was created using glass with a small thickness. The balcony walls were laid out using brick walls. The propagation factor and index of reflection are functions of the permittivity " + }, + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "type": "inline_equation", + "content": "(\\epsilon)" + }, + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "type": "text", + "content": " and conductivity " + }, + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "type": "inline_equation", + "content": "(\\sigma)" + }, + { + "bbox": [ + 104, + 591, + 504, + 669 + ], + "type": "text", + "content": " of medium. In Table A1, we present the relative permittivity and conductivity." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "text", + "content": "Antenna and Transceiver configuration. Omnidirectional beam patterned antenna with polarization perpendicular to the z axis is setup for all receive and transmit antennas. Location, Orientation of the antenna are set relative to global reference such that they are rotated about the z axis by 90deg and placed at a height of " + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "inline_equation", + "content": "2.8\\mathrm{m}" + }, + { + "bbox": [ + 104, + 677, + 504, + 731 + ], + "type": "text", + "content": ". All antennas employ the same configuration with no transmission loss." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 146, + 79, + 463, + 148 + ], + "blocks": [ + { + "bbox": [ + 146, + 79, + 463, + 148 + ], + "lines": [ + { + "bbox": [ + 146, + 79, + 463, + 148 + ], + "spans": [ + { + "bbox": [ + 146, + 79, + 463, + 148 + ], + "type": "table", + "html": "
thickness(cm)permittivity εconductivity σ (S/m)
Layered drywall(1,3)1.32.80.013
Brick12.54.440.0001
Concrete305.310.015
Glass32.40
", + "image_path": "3fd2f171e59fb24bd706ce2c86b6e573fdd7464e3fa8745a0badd131cea568c1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 127, + 210, + 302, + 265 + ], + "blocks": [ + { + "bbox": [ + 249, + 156, + 361, + 167 + ], + "lines": [ + { + "bbox": [ + 249, + 156, + 361, + 167 + ], + "spans": [ + { + "bbox": [ + 249, + 156, + 361, + 167 + ], + "type": "text", + "content": "Table A1: Material properties" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 210, + 302, + 265 + ], + "lines": [ + { + "bbox": [ + 127, + 210, + 302, + 265 + ], + "spans": [ + { + "bbox": [ + 127, + 210, + 302, + 265 + ], + "type": "table", + "html": "
OverallGeometryAvg. Delay
kNN0.2640.2881.479
MLP0.2800.3781.191
WiNeRT0.2030.1141.297
", + "image_path": "7ef4308f608175418b413d4cdc6a996fe102fd2027a25683b2a2c5a98dcb4ef3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 316, + 178, + 496, + 308 + ], + "blocks": [ + { + "bbox": [ + 316, + 178, + 496, + 308 + ], + "lines": [ + { + "bbox": [ + 316, + 178, + 496, + 308 + ], + "spans": [ + { + "bbox": [ + 316, + 178, + 496, + 308 + ], + "type": "image", + "image_path": "72cfcef1ab10c90b4f112744c062f3d7c1a191477f885f9a69747b740a9af549.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 351, + 315, + 460, + 326 + ], + "lines": [ + { + "bbox": [ + 351, + 315, + 460, + 326 + ], + "spans": [ + { + "bbox": [ + 351, + 315, + 460, + 326 + ], + "type": "text", + "content": "Table A3: Qualitative results" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 347, + 506, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 506, + 415 + ], + "type": "text", + "content": "Simulation. We currently run the simulation using the shoot and bounce model where a geometric path is drawn from every point on the transmitter field pattern to the receive point. This also includes transmission through surfaces allowing it to model transmittance and reflection. Rays are first traced from the source points with the rays reflecting specularly from the building walls. The rays that hit building walls are reflected specularly and continue to be traced up to the maximum number of reflections and transmissions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 419, + 504, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 419, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 419, + 504, + 444 + ], + "type": "text", + "content": "The spatial separation of rays is set to " + }, + { + "bbox": [ + 104, + 419, + 504, + 444 + ], + "type": "inline_equation", + "content": "0.75^{\\circ}" + }, + { + "bbox": [ + 104, + 419, + 504, + 444 + ], + "type": "text", + "content": ". The geometric path traced by the ray undergoes up to 6 specular reflection and 3 transmittance with path loss threshold set to -70dBm." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 447, + 506, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 447, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 447, + 506, + 482 + ], + "type": "text", + "content": "Total received power of all paths is determined as the sum of time averaged power of group of correlated paths. A set of ray paths that interact with similar set of faces and follow nearly same path are defined as group." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 498, + 318, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 498, + 318, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 318, + 510 + ], + "type": "text", + "content": "C EVALUATION: ADDITIONAL DETAILS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 523, + 479, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 479, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 479, + 534 + ], + "type": "text", + "content": "C.1 CONTROLLABLE SYNTHESIS: GENERALIZATION TO RECONFIGURED FLOORMAPS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 544, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 504, + 567 + ], + "type": "text", + "content": "Table A2 accompanies the discussions in Section 4.3, where we evaluate a WiNeRT model trained in one environment " + }, + { + "bbox": [ + 104, + 544, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 104, + 544, + 504, + 567 + ], + "type": "text", + "content": " and evaluated in a reconfigured environment " + }, + { + "bbox": [ + 104, + 544, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{F}'" + }, + { + "bbox": [ + 104, + 544, + 504, + 567 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 581, + 455, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 581, + 455, + 593 + ], + "spans": [ + { + "bbox": [ + 104, + 581, + 455, + 593 + ], + "type": "text", + "content": "C.2 CONTROLLABLE SYNTHESIS: LOWER- AND HIGHER-ORDER INTERACTIONS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 601, + 367, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 367, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 367, + 613 + ], + "type": "text", + "content": "See Table A4, which accompanies the discussions in Section 4.3." + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 105, + 632, + 518, + 701 + ], + "blocks": [ + { + "bbox": [ + 113, + 272, + 314, + 293 + ], + "lines": [ + { + "bbox": [ + 113, + 272, + 314, + 293 + ], + "spans": [ + { + "bbox": [ + 113, + 272, + 314, + 293 + ], + "type": "text", + "content": "Table A2: Quantitative results. For a trained approach evaluated on a reconfigured floormap " + }, + { + "bbox": [ + 113, + 272, + 314, + 293 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{\\prime }" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 105, + 632, + 518, + 701 + ], + "lines": [ + { + "bbox": [ + 105, + 632, + 518, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 518, + 701 + ], + "type": "table", + "html": "
#interactions rOverall (DoD)GeometryAvg. Delay
01*2301*2301*23
kNN0.220.330.500.550.310.210.290.331.302.242.963.40
MLP0.580.460.610.670.340.330.370.410.982.052.933.48
WiNeRT0.120.250.440.510.000.090.210.270.032.032.432.8
", + "image_path": "dbe9fd59651ce7db0e6b30bb888189d2e15f55afff59d34435e44027e64c5b6c.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "type": "text", + "content": "Table A4: Low- and Higher-Order Interactions. We vary the number of ray-surface interactions (denoted by " + }, + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "type": "text", + "content": " ) for a model trained using single-order interactions " + }, + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "type": "inline_equation", + "content": "\\left( {r = 1\\text{,denoted by * in the table).}}\\right)" + }, + { + "bbox": [ + 104, + 708, + 504, + 730 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 84, + 304, + 234 + ], + "blocks": [ + { + "bbox": [ + 111, + 84, + 304, + 234 + ], + "lines": [ + { + "bbox": [ + 111, + 84, + 304, + 234 + ], + "spans": [ + { + "bbox": [ + 111, + 84, + 304, + 234 + ], + "type": "image", + "image_path": "54ed2cb9f6fb7385596948980201aec5351dc6670b16dde8c6a637b89d3ab444.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 248, + 504, + 289 + ], + "lines": [ + { + "bbox": [ + 104, + 248, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 504, + 289 + ], + "type": "text", + "content": "Figure A3: Simulation Time. Comparing wall-clock time vs. accuracy performances of our approach (WiN-eRT) against baselines (MLP, kNN) and wireless ray tracing softwares (PyLayers and Insite). The 'Oracle ray launch' variant, which utilizes known ray launch directions at test-time, indicates an approximate performance upper-bound of our approach." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 305, + 85, + 500, + 235 + ], + "blocks": [ + { + "bbox": [ + 305, + 85, + 500, + 235 + ], + "lines": [ + { + "bbox": [ + 305, + 85, + 500, + 235 + ], + "spans": [ + { + "bbox": [ + 305, + 85, + 500, + 235 + ], + "type": "image", + "image_path": "87f6ef8759d03631a96dad398a0fa0df83e5dacb9239815374b27f524ea23781.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 325, + 214, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 325, + 214, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 325, + 214, + 335 + ], + "type": "text", + "content": "C.3 SIMULATION TIME" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 351, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 351, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 351, + 504, + 407 + ], + "type": "text", + "content": "In Sections 4.2 and 4.3, we found our proposed approach WiNeRT achieves reasonable performance compared with non-differentiable and non-neural simulator packages. Additionally, we demonstrated that WiNeRT is capable of generalization (e.g., to novel elevations, to re-configured floor-plans) and can be used for inverse problems. In this section, we additionally discuss run-time performance of WiNeRT and compare against baseline approaches as well as the simulator package." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "text", + "content": "Experimental Setup. The end-goal of the experiment is to analyze the simulation time (specifically wall-clock times) of the proposed WiNeRT approach and contrast it against both the simulator softwares (PyLayers, Wireless Inside) and proposed baselines (MLP, kNN). We first remark that the implementations fundamentally vary between the approaches and hence an ideal wall-clock timing comparison is not possible. For instance, some approaches (WiNeRT, MLP, kNN) use a PyTorch implementation which can be run on GPU whereas the wireless ray tracing simulation packages are either proprietary (e.g., Wireless Inside) or developed exclusively for CPU (e.g., PyLayers) and thereby limiting the choice of hardware on which they can be run. Nonetheless, we keep simulation settings consistent when possible: by running the exact simulations used for the overall results (setting 'checkerboard'; see Section 4.1) and furthermore estimating wall-clock times per simulation (batch size of 1) over " + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "text", + "content": " individual simulations with a maximum of 1 reflection and transmission (i.e., " + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "inline_equation", + "content": "r = 1" + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "text", + "content": "). For all approaches, we report only the mean simulation time over the multiple simulations, as we found the variances low (" + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "inline_equation", + "content": "\\sigma^2 \\leq 3.5 \\times 10^{-3}" + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "text", + "content": "). When possible, we also report corresponding accuracy ('overall prediction error'; see Sec. 4.1). We evaluate PyTorch-based implementations (WiNeRT, MLP, kNN) over " + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "inline_equation", + "content": "N = \\sim 8K" + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "text", + "content": " simulations using pretrained models (specifically the ones for reporting 1) on a Nvidia A100 GPU. In the case of WiNeRT, we are able to control time-accuracy trade-off to some degree at test-time by varying the number of launched rays " + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 415, + 506, + 635 + ], + "type": "text", + "content": " (see 'Ray Launching' in Sec. 3.1) as a function of the number of subdivisions of the ico-sphere. We choose 1-5 sub-divisions and additionally an 'oracle ray' launch strategy to depict a lower-bound on the time-accuracy values." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "Results. We present the time-accuracy in Figure A3 and observe: (i) WiNeRT (orange markers) is significantly faster than the simulators (blue line), demonstrating speed-ups of " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "11 - 22 \\times" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " over PyLayers (Amiot et al., 2013) and " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "6 - 22 \\times" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " over Wireless Insite (Remcom, 2022). Although the simulators are approximately an upper-bound on the accuracy, we find that WiNeRT can make reasonable trade-offs on accuracy to boost simulation times in certain scenarios; (ii) The baselines we propose in this paper (MLP and kNN) are even faster. MLP (green marker) is the fastest with speed-ups of " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "538 - 687 \\times" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": ", which can be largely attributed to a simple architecture (3-layer ReLU MLP with 128 hidden units). kNN (red marker) is the second fastest with " + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "inline_equation", + "content": "79 - 97 \\times" + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": " speed-ups over the simulators." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 230, + 84, + 381, + 234 + ], + "blocks": [ + { + "bbox": [ + 230, + 84, + 381, + 234 + ], + "lines": [ + { + "bbox": [ + 230, + 84, + 381, + 234 + ], + "spans": [ + { + "bbox": [ + 230, + 84, + 381, + 234 + ], + "type": "image", + "image_path": "f39a85c43617e0f4efee44c6bcbf283850edecce615634dde21652b47fb763ab.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 248, + 504, + 269 + ], + "lines": [ + { + "bbox": [ + 104, + 248, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 504, + 269 + ], + "type": "text", + "content": "Figure A4: User Localization. We backpropagate through our trained forward model to solve for the position of the receiver." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "type": "text", + "content": "While these baselines offer much faster simulation times, their generalization capabilities remain unclear as they suffer from memorization (see discussion for Fig. 3)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 325, + 338, + 336 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 325, + 338, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 325, + 338, + 336 + ], + "type": "text", + "content": "C.4 USER LOCALIZATION VIA INVERSE RENDERING" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": "In this section, we provide additional details to complement the discussion on the user localization experiment in Section 4.3. For the user localization task, the problem is to determine user location " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": " from an observed channel " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "h_{\\mathrm{obs}}" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": ". We solve for " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": ", by performing gradient on spatial coordinate " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": " that minimizes the channel loss " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\text{render}_{\\theta}(\\pmb{x}_{\\mathrm{tx}}, \\pmb{x}_{\\mathrm{rx}}^{\\mathrm{ukn}}, \\pmb{F}_i)" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": ". This is possible with WiNeRT, since we can backpropagate through the neural simulation of the channel. We optimize for " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": " using SGD with momentum (lr=0.01, momentum=0.9, 2000 iterations) with two additional considerations: (a) we constrain " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": " to lie in valid ranges (positive, upper-bounded by " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\max}" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": ") by clamping the values at each iteration; and (b) to prevent solutions in local minimas, we take the result which yields the minimum loss over five random initializations of " + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 346, + 504, + 456 + ], + "type": "text", + "content": ". We present the CDF of localization errors over 100 test examples in A4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 472, + 344, + 484 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 344, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 344, + 484 + ], + "type": "text", + "content": "D IMPLEMENTATION: ADDITIONAL DETAILS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 504, + 520 + ], + "type": "text", + "content": "In this section, we provide additional implementation details and hyperparameter choices of approaches discussed in the paper." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 532, + 174, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 532, + 174, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 174, + 544 + ], + "type": "text", + "content": "D.1 WINERT" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": "Architecture: Ray-surface Interaction " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "f_{\\theta}^{1}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": ". We follow an MLP architecture (see Figure A1) similar to NeRF approaches (Mildenhall et al., 2020; Verbin et al., 2022). We decompose the parameters into view-independent ('spatial MLP') and view-dependent ('directional MLP') sets. Given a ray incident at a spatial co-ordinate " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "x_{k}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " in direction " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": ", the spatial MLP (2 hidden layers, 64 units) takes three inputs: (a) the face " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "f_{i}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " (1-hot index) on which " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "x_{k}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " lies; (b) the surface normal " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "n_{i}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " of face " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "f_{i}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": "; and (c) a 3d vector of signed-distance values between the face and " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "x_{k}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": ". We find (c) provides information (e.g., " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " on the same side of wall) to condition the network to predict attributes related to either reflection or transmission components. The directional MLP (1 hidden layer, 64 units) takes two inputs: (i) a 32-dim bottleneck vector produced by the spatial MLP; and (ii) a 3-dim unit vector representing the incidence direction " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": ". The final output are scaling and additive co-efficients " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " for the gain magnitude (i.e., " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "a_{k}^{(r + 1)} = s_{1}a_{k}^{(r)} + s_{2}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": ") and 4-dim parameters " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " for rotation (based on Euler-Rodrigues formulation). The rotation parameters " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " are mapped to a " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "3\\times 3" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " rotation matrix " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "A = \\Gamma (\\rho_{i})" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": " to transform the incident to outgoing ray " + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "inline_equation", + "content": "d_{k}\\coloneqq Ad_{k}" + }, + { + "bbox": [ + 104, + 555, + 506, + 702 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "**Renderer:** Ray Launching. In the first step of the renderer, we launch " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " rays from co-ordinate " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " uniformly in all directions. To achieve this, we center a ico-sphere with 5 sub-divisions and" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "choose as directions the vectors from " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " towards the ico-sphere vertices (10.2K vertices with 5 sub-divisions). Since we know the exact co-ordinates between " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": ", we manually include the line-of-sight direction resulting in a total of " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " rays." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": "**Renderer:** Ray Marching. The core step of the renderer is ray marching (detailed in Figure 2). We elaborate on technical implementation details step-by-step using as reference Figure 2. We drop sub- and super-scripts for rest of the paragraph for notational convenience. (a) **Ray-Triangle intersection:** For a given ray " + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\pmb{p} = \\pmb{o} + t\\pmb{d}" + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": ", we are interested in the minimum finite solution of " + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "inline_equation", + "content": "t > 0" + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": " for which the ray intersects with each face of the mesh. For some face with coordinates " + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "inline_equation", + "content": "(\\pmb{a}, \\pmb{b}, \\pmb{c})" + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": ", this entails solving for " + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\pmb{p} = \\pmb{o} + t\\pmb{d} = \\alpha \\pmb{a} + \\beta \\pmb{b} + \\gamma \\pmb{c}" + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": " (under constraints " + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\alpha + \\beta + \\gamma = 1" + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "inline_equation", + "content": "0 \\leq \\alpha, \\beta, \\gamma \\leq 1" + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": "). We calculate valid solutions using Cramer's rule for all faces in the mesh and only consider (if one exists) the minimum positive solution corresponding to the first ray-triangle intersecting point. (b) **Ray-Surface interaction:** Given the solution from the previous step (i.e., on which spatial co-ordinate the ray is incident on the surface), we are now interested in estimating the outgoing ray from that co-ordinate. For this, we leverage an MLP that maps incident gain, direction, and certain face properties to outgoing gain and direction. More details of this MLP are discussed above under the 'Architecture: Ray-surface Interaction'. (c) **Reception/Termination:** Per ray, we stop ray marching steps if it is either received (hits a reception sphere of fixed size of " + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}" + }, + { + "bbox": [ + 104, + 124, + 506, + 301 + ], + "type": "text", + "content": ") or leaves the region of interest (e.g., penetrates exterior wall is shot into infinity). In other cases, we continue with ray marching steps." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "content": "**Renderer:** Ray Aggregation. At the end of ray marching steps (over " + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "content": " iterations), we determine the final state of the " + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "content": " rays. We are now interested in a small subset of these " + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "content": " rays that is received at a receiver at fixed co-ordinate " + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "content": ". Note that we perform these steps only at test-time. The ray aggregation as a result involves two steps: (a) Ray Filtering: where we determine the subset of rays that arrives at " + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "content": " by modelling the receiver as a sphere of fixed radius of " + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}" + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "content": "; and (b) Preventing double counting: we find duplicate rays arrive at " + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 308, + 504, + 408 + ], + "type": "text", + "content": " due to a combination of a non-infinitesimally sized reception sphere and a high density of launched rays. We cull such duplicates by grouping rays based on a unique interaction sequence (i.e., IDs of faces it intersects with) and choosing the ray of the shortest length in each group." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 415, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 462 + ], + "type": "text", + "content": "**Optimization.** We perform gradient-descent steps on learnable parameters using Adam with a learning rate of 0.001 with batch size of 1. We observed large gradients (possibly due to single-batch) and hence clip gradient values to 100 during training. The model is trained for 100 epochs and we pick the checkpoint with lowest validation error during training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 474, + 183, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 474, + 183, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 474, + 183, + 485 + ], + "type": "text", + "content": "D.2 BASELINES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "text", + "content": "MLP. The MLP baseline extends ideas presented in Tancik et al. (2020); Sitzmann et al. (2020), where a simple MLP is used to map co-ordinates to the signal (e.g., pixel co-ordinate to RGB values). In our paper, the MLP directly maps the spatial co-ordinates " + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{tx}}" + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "inline_equation", + "content": "x_{\\mathrm{rx}}" + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "text", + "content": " to channel " + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "text", + "content": ". The MLP contains 3 hidden layers, each with 128 hidden units and ReLU activation. The core idea here is to implicitly learn the geometry of the environment (floormap " + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 496, + 504, + 586 + ], + "type": "text", + "content": "), which is common to all train and test examples. Note that in contrast to previous works, this model does not use positional embeddings nor sinusoidal activations, as our initial experiments indicated they learn high-frequency artifacts that is not typically present in our datasets (the wireless channels)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "text", + "content": "kNN. The kNN baseline (with " + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "inline_equation", + "content": "k = 1" + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "text", + "content": ") works as so: for a given test-example " + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_{\\mathrm{tx}}, \\pmb{x}_{\\mathrm{rx}})" + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "text", + "content": " we find the spatially closest training example arg " + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "inline_equation", + "content": "\\min_{i} ||\\pmb{x}_{\\mathrm{tx}} - \\pmb{x}_{\\mathrm{tx},i}^{\\mathrm{train}}||_{2} + ||\\pmb{x}_{\\mathrm{rx}} + \\pmb{x}_{\\mathrm{rx},i}^{\\mathrm{train}}||_{2}" + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "text", + "content": " and predict channel " + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "inline_equation", + "content": "\\pmb{h}_i" + }, + { + "bbox": [ + 104, + 594, + 504, + 628 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_content_list.json b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a146f36cca14138cc4d17d01d5cdc2d116ceed3a --- /dev/null +++ b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_content_list.json @@ -0,0 +1,2125 @@ +[ + { + "type": "text", + "text": "WINNING BOTH THE ACCURACY OF FLOATING POINT ACTIVATION AND THE SIMPLICITY OF INTEGER ARITHMETIC", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yulhwa $\\mathbf{K}\\mathbf{m}^{1}$ , Jaeyong $\\mathbf{J}\\mathbf{a}\\mathbf{g}^{1}$ , Jehun $\\mathbf{L}\\mathbf{e}^{1}$ , Jihoon $\\mathbf{P}\\mathbf{k}^{1}$ , Jeonghoon $\\mathbf{K}\\mathbf{m}^{2}$ , Byeongwook $\\mathbf{K}\\mathbf{m}^{2}$ , Baeseong $\\mathbf{p}\\mathbf{k}^{2}$ , Se Jung Kwon $^{2}$ , Dongsoo Lee $^{2}$ , Jae-Joon $\\mathbf{K}\\mathbf{m}^{1}$", + "bbox": [ + 179, + 194, + 766, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Seoul National University, $^{2}$ NAVER Cloud", + "bbox": [ + 183, + 224, + 475, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{yulhwakim, jaeyongjang, jehun.lee, jihoonpark, kimjaejoon}@snu.ac.kr, {jeonghoon.samuel, byeonguk.kim, baeseong.park, sejung.kwon, dongsoo.lee}@navercorp.com", + "bbox": [ + 183, + 239, + 823, + 280 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 316, + 545, + 330 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Even though floating point (FP) numbers have been adopted as a de facto standard data format for deep learning computing, the complexity of FP arithmetic impedes a broader deployment of Deep Neural Networks (DNNs). Recent works such as quantization have attempted to replace the FP matrix multiplication (MatMul) of DNNs with simple integer MatMul by transforming the datatypes of both weights and activations into integers. Unfortunately, unlike weight values that are static, it is challenging to represent dynamic activations with integers. In this paper, to simultaneously achieve the accuracy of FP activation and the simplicity of integer arithmetic, we present a method for replacing FP arithmetic with integer one without changing FP activations in the storage format while weights are quantized. The proposed method pre-aligns the significands of FP activations just ahead of the MatMul on-the-fly so that the aligned significands (integers) can be used for the computation. Inspired by an observation that conventional FP arithmetic does not produce precise results due to rounding, we demonstrate that our proposed integer arithmetic-based scheme can produce the same level of errors as that of the FP arithmetic in case DNNs use FP activations and quantized weights. Experimental results show that the hardware based on the proposed scheme shows significant improvement over FP arithmetic-based designs in terms of energy efficiency and throughput-per-area while maintaining a similar level of accuracy.", + "bbox": [ + 228, + 349, + 767, + 613 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 642, + 336, + 656 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deep Neural Networks (DNNs) usually use Floating-Point (FP) number systems to represent a wide range of weight and activation values. Such a comprehensive representation, however, demands high computational complexity and cost for FP matrix multiplication (MatMul) (Sze et al., 2017). On the other hand, integer (a.k.a fixed-point) arithmetic logic is much simpler while consuming less energy compared to FP counterpart (Jouppi et al., 2021). As such, the computational efficiency of DNNs can be enhanced by replacing FP arithmetic with integer one. Accordingly, quantization has been actively studied as a promising technique to support DNN computations with integer arithmetic, as it maps the input values of a (virtually) continuous domain (FP numbers) to the output values of a discrete set (integers) (Jacob et al., 2018). Note that even though several studies have successfully quantized weights and activations of some target DNNs with low-precision integer values (Li et al., 2021; Wu et al., 2022), quantization is still challenging for numerous DNNs. In particular, activation values are known to be more difficult to be quantized than the weight parameters because activations are dynamically generated during inference while the distribution of weights is static. The uncertainty of the distribution of dynamic activation values limits the ability to estimate proper quantization range (Choi et al., 2018). Such issues on activation quantization become even more serious when DNNs involve highly non-linear activation functions (e.g., GeLU) or modules that increase the variance of the activations (e.g., softmax and normalization layers) (Jeon et al., 2020). As a result, while the weight parameters can be successfully quantized even for generative mod", + "bbox": [ + 169, + 674, + 826, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a88431bd3c3fb7c88e0af2e9244d736bb954c6c3e9fc88a8995005b1c6f84b2a.jpg", + "image_caption": [ + "Figure 1: An example of FP summation with (a) conventional FP computation and (b) proposed method. The precise summation is described in the box on top." + ], + "image_footnote": [], + "bbox": [ + 184, + 104, + 818, + 191 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "els (Xu et al., 2018; Bai et al., 2019; Jeon et al., 2022; Park et al., 2022; Kwon et al., 2022; Frantar et al., 2022) and extra-large models such as GPT-NeoX-20B (Chung et al., 2020; Yao et al., 2022), activation quantization usually relies on intensive quantization-aware training or sophisticated investigation algorithms such as dynamic min/max searching (Tao et al., 2022). Note that activation quantization is mandatory if integer arithmetic logic is involved for MatMul operations. Thus, to avoid such significant efforts to quantize complex DNNs (mainly due to activation quantization), recent neural processing units tend to employ FP arithmetic units even for inference process at the cost of increased energy and area (Jouppi et al., 2021).", + "bbox": [ + 169, + 232, + 826, + 345 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the challenges discussed above, we propose a scheme that can achieve both the accuracy of FP activations and the simplicity of integer arithmetic. Our motivation stems from an observation that most multiplications can be removed once weights are quantized to be binary-coded (Jeon et al., 2020). Then, consecutive FP additions are mainly required to perform MatMul, and hence, we find conventional FP units can be much simplified. To be more specific, when processing the MatMul of DNNs, our proposed method first pre-aligns the significands of FP activations to be added. Correspondingly, FP activations can be reformatted into integer values and FP arithmetic units (FPUs) can be replaced with integer units during MatMul operations. A naive pre-alignment for accurate computation requires very high-resolution integer units for the computation, which negates the benefits of using integer units. Inspired by an observation that conventional FP arithmetic does not guarantee the exact results due to rounding errors (Wilkinson, 1994), we show that the same level of computational error can be obtained even when the pre-aligned significands are aggressively truncated. We then implement an integer-based FP arithmetic unit (iFPU) hardware for MatMul computation based on the proposed scheme. A comprehensive evaluation of the iFPU on various DNNs shows that the iFPU significantly improves energy efficiency and throughput-per-area over the conventional FPU-based MatMul engine while maintaining the neural network accuracy.", + "bbox": [ + 169, + 352, + 826, + 575 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 BACKGROUND", + "text_level": 1, + "bbox": [ + 171, + 601, + 328, + 616 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 FLOATING-POINT ARITHMETIC AND ROUNDING ERROR", + "text_level": 1, + "bbox": [ + 171, + 637, + 602, + 651 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "FP format represents a number as $(-1)^{s} \\times (m) \\times 2^{(e - bias)}$ which consists of sign $(s)$ , exponent $(e)$ , and significand (or mantissa, $m$ ) (Muller et al., 2018). Float32 assigns 1 bit for $s$ and 8 bits for $e$ . Precision $(p)$ , the effective bit count of the significand, is 24 bits (among which 23 bits are explicitly stored). Bfloat16, which has been gaining popularity in the field of deep learning, intensely cuts down stored significand bits to 7 (compared to 23 in float32) to lower the total number of bits per value, and thereby reduces memory footprint (Wang & Kanwar, 2019). The bias of the exponent term is usually set to half of the exponent maximum.", + "bbox": [ + 169, + 665, + 823, + 765 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "FP format can cover a wide range of numbers by separating the significant digits and the scale of the number. Note that because of the precision limits, there is a gap between two consecutive FP numbers. Such a gap is called a unit of least precision (ulp) whose value is represented by the least significant digit. Hence, it is hard to represent real numbers precisely with FP format even if the numbers are in the dynamic range of the FP format, and rounding is required for converting real numbers into FP numbers. FP arithmetic typically normalizes significands for each computation, and the rounding operation is followed by the normalization to convert the computation result into an FP number. Round-to-nearest is the most frequently chosen as a rounding mode where the difference between the real value and the round-off value can be as large as half ofulp, and its relative error is bounded by $\\epsilon = \\frac{1}{2}ulp = 2^{-p}$ , which is referred to as machine epsilon. Bothulp and $\\epsilon$ are widely used to evaluate the accuracy of numeric calculations (Goldberg, 1991).", + "bbox": [ + 169, + 771, + 826, + 924 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As every FP operation includes the rounding stage, rounding error is unavoidable in FP arithmetic. Although the error of a single FP arithmetic operation may be small enough to be ignored, the error can be substantial if a series of multiple FP arithmetic results are accumulated. For example, an inner product of MatMul involves multiple FP additions in a row and the FP summation piles up the rounding error of each FP adder (Figure 1(a)). Accordingly, numerous solutions have been introduced to compensate for the error of the FP summation (Muller et al., 2018). Such error compensations cause an additional computation burden for tracking and fixing the error. Since the effect of the rounding errors on DNN accuracy is negligible, popular deep learning frameworks such as PyTorch and CuDNN (Paszke et al., 2019; Chetlur et al., 2014) allow the rounding errors (without the compensation algorithms) in favor of simple computation. Note that as the level of rounding error depends on the precision $p$ (only 8 bits for bffloat16), the error becomes noticeable for bffloat16. Therefore, summation of bffloat16 values uses float32 adders (instead of bffloat16 adders) to preserve the accuracy of accumulated results (Wang & Kanwar, 2019; Intel, 2018; Henry et al., 2019).", + "bbox": [ + 169, + 103, + 826, + 285 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 RELATED WORKS", + "text_level": 1, + "bbox": [ + 171, + 300, + 341, + 315 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Block Floating Point (BFP) has been proposed as a compromise between FP and integer formats. It assigns a single shared exponent to a group of FP values while maintaining individual significands (Wilkinson, 1994). The BFP has drawn attention as a flexible low-bit numeric format for quantization because the shared exponent can represent the dynamic range of values with little overhead. Hence, BFP can achieve a higher compression ratio than integer formats (Zhang et al., 2022a). In addition, since the individual significand values are integer, the BFP formats enable simpler computation than FP formats (Koster et al., 2017). Note that a critical limitation in previous works based on BFP formats is that the same level of accuracy as that of conventional FP computations cannot be guaranteed (even theoretically). Previous works tend to find the optimal BFP formats with the least memory/computation density by evaluating DNN accuracy for various bit resolution and group sizes (Song et al., 2018; Lian et al., 2019; Rouhani et al., 2020). Another drawback in some previous works on BFP is that DNNs with BFP format need to be fine-tuned usually by quantization-aware training to improve the accuracy (Zhang et al., 2022a; Rouhani et al., 2020). Since a quantized neural network allows only one fixed block size that is optimized for target hardware during training, a neural network needs to be retrained for different hardware choices if a block size differs.", + "bbox": [ + 169, + 325, + 826, + 536 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Truncated binary multipliers with error compensation schemes have been proposed to reduce the number of outputs in integer multiplications (Petra et al., 2009). While both the truncated multipliers and our proposed work use the truncations to improve computational efficiency, there are critical differences between them. In the truncated binary integer multipliers, the amount of the truncated bits is fixed while it varies in FP additions cases which our work focuses on. In addition, (Petra et al., 2009) presents a truncation error correction function utilizing the fact that some of the truncated partial products share the same inputs with the remaining partial products, so they have correlations with the remaining partial product values. Unfortunately, in FP addition cases, the truncated significands do not have any correlation with the remaining bits so it is hard to devise similar error correction schemes. Hence, there is a strong need to develop alternative ways to control the truncation errors in FP operations.", + "bbox": [ + 169, + 542, + 826, + 696 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 RECONSTRUCTION OF FP-BASED MATMUL WITH INTEGER ARITHMETIC", + "text_level": 1, + "bbox": [ + 171, + 715, + 805, + 729 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 OVERVIEW OF THE PROPOSED MATMUL RECONSTRUCTION AND COMPUTATION", + "text_level": 1, + "bbox": [ + 171, + 744, + 772, + 760 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we propose a methodology to reconstruct FP MatMul with integer arithmetic for efficient DNN computation, focusing on FP activations and quantized weights. In most cases, the weight matrix with $m$ -bit quantization can be expressed as a binary-coded matrix: $\\sum_{b=1}^{m} \\alpha_{b} \\cdot B_{b}$ where $\\alpha_{b}$ is a scaling factor and $B_{b}$ is a binary weight matrix of each bitplane. Here, $\\alpha_{b}$ can be a power of 2 for uniform quantization or can be an FP value for non-uniform quantization. MatMul is composed of multiple dot products, and a dot product between activations and weights is defined as $\\sum_{k=1}^{n} (a_{k} \\times w_{k})$ ( $a$ : activation, $w$ : weight, $n$ : fan-in of the layer). If we apply binary-coded weights and properly change the order of the operations, we can rewrite the dot product as follows:", + "bbox": [ + 169, + 771, + 823, + 883 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {b = 1} ^ {m} \\alpha_ {b} \\sum_ {k = 1} ^ {n} \\left(a _ {k} \\times B _ {b, k}\\right), B _ {b, k} \\in [ - 1, + 1 ] \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 888, + 825, + 928 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/811293706d38e8580ee9fae13d3f1ecfad52f15572bc10de6365b6b9099324a6.jpg", + "image_caption": [ + "Figure 2: Overview of the proposed MatMul computing scheme for DNNs with FP activations." + ], + "image_footnote": [], + "bbox": [ + 192, + 99, + 803, + 196 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/55188d2c9943bae17819d9b758b32d345370f0eb1446e130b59171a68ae29ca9.jpg", + "image_caption": [ + "Figure 3: Comparison of a previous approach (e.g., MSFP (Rouhani et al., 2020)) and the proposed approach for applying block floating point (BFP) to DNN computation. In the case of MSFP, the original network needs to be retrained for the MatMul engines with different block sizes, but in the proposed scheme, the original network can be fed into the engines with any block sizes." + ], + "image_footnote": [], + "bbox": [ + 184, + 213, + 810, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For each bitplane, MatMul of weights and activations is reconfigured as the addition/subtraction of activation values except for a few $\\alpha_{b}$ multiplications that are necessary to merge the outputs from each bitplane. Because FP multiply-accumulate operations require more hardware resources than FP additions, even such a reconfiguration of matrix multiplication to remove most multiplications can improve the efficiency of DNN computations significantly (Jeon et al., 2020). Even so, because FP additions are still computationally more expensive than integer additions, replacing FP additions with integer additions can save even more energy and area. Therefore, we propose to reconstruct FP-based MatMul (Eq. 1) using integer additions (Figure 2). One of the key components of the proposed method is the pre-alignment, which reforms the FP activation values into integer values on-the-fly by sharing the exponent value among the activations that are fed to a dot product of the MatMul at a time. The pre-alignment finds the maximum of the exponents among the activations and aligns corresponding significands simultaneously based on the difference of each exponent and the maximum exponent. As a result, unlike conventional FP arithmetic that performs the alignment for each addition, our proposed computing methodology aligns the activation values once per MatMul, and thus, reduces the overall cost of the alignment process significantly. Note that as opposed to previous works that share the exponent among a block of inputs in the storage format (e.g., MSFP (Rouhani et al., 2020)), our design performs the exponent sharing during the computation. Since different exponents are allowed in the storage format in our scheme, we keep the representation power of the conventional FP format (Figure 3). Because pre-aligned activations can be represented by the aligned significands which are integer values, an FP addition of the MatMul can be replaced by an integer addition. After the whole summation process, the proposed method reforms the summation results back to FP values by normalizing the results with the maximum exponent found in the pre-alignment stage. Then, the computation results from each weight bitplane are multiplied by $\\alpha_{b}$ and merged to finish the target MatMul operation.", + "bbox": [ + 169, + 373, + 826, + 708 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As the exponent of float32 (or bfloat16) is 8-bit, the maximum amount of the significand shifting is 255 and the resolution of the aligned activation becomes 279 (or 263) bits. Note that such a large bit width might negate the benefits of using integer units. For example, while 32-bit integer addition consumes $10.3\\%$ energy of float32 addition, 279-bit integer requires a level of energy per addition comparable to that of float32 addition (Appendix B.1). To avoid the large design overhead, we propose to use only the top $t(= p + \\delta)$ bits of the aligned activation when $\\delta$ indicates the number of extra significand bits for reducing truncation error. Since the conventional FP addition also experiences errors due to truncation of significand, relatively small extra $\\delta$ bits for the proposed method can derive a level of errors similar to that of conventional FP addition (as described in Figure 1).", + "bbox": [ + 169, + 712, + 826, + 852 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 COMPUTATION ERROR AFTER SIGNIFICAND TRUNCATION", + "text_level": 1, + "bbox": [ + 171, + 869, + 614, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To study the characteristics of errors in the proposed method with truncated significands, we first analyze the computation error with a single addition/subtraction between two FP values $x$ and $y$ .", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We assume $x > y \\geq 0$ , $x = x_0.x_1 \\cdots x_{p-1}$ , and $y = y_0.y_1 \\cdots y_{p-1} \\times 2^{-k} (k \\geq 0)$ without loss of generality, because only the difference between the exponents decides the amount of shifting and truncation. Here, $x_i$ and $y_i$ denote the binary value of $i$ -th significand bit, and the leading bit $x_0$ is 1 for $x$ when $x > 0$ . When either $k$ or $y$ is 0, there is no need for significand shifting and truncation, and hence, integer-based FP arithmetic can guarantee the precise computation without any extra bit (i.e., $\\delta = 0$ ).", + "bbox": [ + 169, + 103, + 823, + 188 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When $k > 0$ , we need to shift and truncate the significand of $y$ for the computation. For the alignment, $y$ should be shifted to right by $k$ , so $y$ can be rewritten as $y = 0.0\\cdots 0y_k' y_{k+1}'\\cdots y_{k+p-1}'$ where $y_{k+i}'$ is equal to $y_i$ . As only the top $t(= p + \\delta)$ bits of the significand remain after the truncation, the truncated result becomes $\\bar{y} = 0.0\\cdots y_k' \\cdots y_{t-1}$ . When $\\delta \\geq k$ , the difference between $y$ and $\\bar{y}$ is 0. Otherwise, the difference between $y$ and $\\bar{y}$ is bounded as follows:", + "bbox": [ + 169, + 194, + 823, + 268 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left| y - \\bar {y} \\right| = 0. 0 \\dots 0 y _ {t} ^ {\\prime} \\dots y _ {k + p - 1} ^ {\\prime} \\leq 2 ^ {- (p + \\delta - 1)} \\left(1 - 2 ^ {- (k - \\delta)}\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 286, + 823, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The relative error of the addition with the truncated significand is defined as follows:", + "bbox": [ + 171, + 315, + 725, + 330 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ne _ {a d d} = \\frac {\\left| (x + y) - (x + \\bar {y}) \\right|}{| x + y |} = \\frac {| y - \\bar {y} |}{| x + y |}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 337, + 823, + 371 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By applying both $|x + y| \\geq |x| \\geq 1$ and Eq. 2 to Eq. 3, we can obtain", + "bbox": [ + 171, + 378, + 632, + 393 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ne _ {a d d} \\leq 2 ^ {- (p + \\delta - 1)} \\left(1 - 2 ^ {- (k - \\delta)}\\right) \\leq 2 ^ {- (p + \\delta - 1)}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 400, + 823, + 419 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Because the machine epsilon is given as $\\epsilon = 2^{-p}$ , $e_{add} \\leq \\epsilon$ when $\\delta$ is 1 and $e_{add} \\leq \\frac{1}{2}\\epsilon$ when $\\delta$ is 2. For subtraction, the relative error is defined similarly as follows:", + "bbox": [ + 169, + 425, + 820, + 462 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ne _ {s u b} = \\frac {\\left| (x - y) - (x - \\bar {y}) \\right|}{\\left| x - y \\right|} = \\frac {\\left| y - \\bar {y} \\right|}{\\left| x - y \\right|}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 469, + 823, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When $\\delta \\geq k$ , $|y - \\bar{y}|$ is 0 so that $e_{sub}$ is 0. The minimum of $x$ is 1, and $y$ has the maximum value when all $y_{k + i}'$ s are 1. Correspondingly, $|x - y|$ is bounded as follows:", + "bbox": [ + 169, + 508, + 823, + 539 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left| x - y \\right| \\geq \\left\\{ \\begin{array}{l l} 1 - 0. 1 1 \\dots 1 = 2 ^ {- p}, & \\text {f o r} k = 1 \\\\ 1 - 0. 0 \\dots 0 1 \\dots 1 \\geq 2 ^ {- 1} + 2 ^ {- 2} + \\dots + 2 ^ {- (k - 1)}, & \\text {f o r} k \\geq 2 \\end{array} \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 549, + 823, + 583 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When $k$ is 1 and $\\delta$ is 0, we get $|y - \\bar{y}| \\leq 2^{-p}$ from Eq. 2. For such a case, according to Eq. 6 and Eq. 5, we have $e_{sub} \\leq 1$ . The worst case happens when $x = 1$ and $y = 0.111 \\cdots 1$ . When $k \\geq 2$ , by applying Eq. 2 and Eq. 6 to Eq. 5, we get $e_{sub} \\leq \\epsilon$ for $\\delta = 1$ , and $e_{sub} \\leq 1/2\\epsilon$ for $\\delta = 2$ . As a result, regardless of FP formats, the proposed method has the error level as summarized in the following Remark 1.", + "bbox": [ + 169, + 592, + 823, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Remark 1 The integer-based FP addition/subtraction has the same level of error as that of the conventional FP addition/subtraction with 1 extra bit, and the error becomes half with 2 extra bits.", + "bbox": [ + 169, + 675, + 823, + 705 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that the error of FP summation is the same as the accumulated value of errors from each addition (Muller et al., 2018). The reconstructed MatMul, however, induces an additional stage of", + "bbox": [ + 169, + 717, + 826, + 746 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/34a77698e36c7076b358899961895e08c9fc226399ef18cfa647a076d91edd91.jpg", + "image_caption": [ + "Figure 4: (a) Average and (b) maximum FP summation errors of conventional FP computation and the proposed method with extra bits $(\\delta = 0,1,2)$ against the accurate FP summations with Schewchuk algorithm (Shewchuk, 1997)." + ], + "image_footnote": [], + "bbox": [ + 178, + 752, + 496, + 873 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/35d60a018a96def348034e2d6034b03f1044607d7f5a7eb920c58b535999e31b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 752, + 826, + 873 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "converting integer summation results to FP values, and thus, additional rounding error during the FP formatting (Figure 5(a)). For example, to sum 128 FP values, a conventional FP-based MatMul has 127 error sources with bound $\\epsilon$ while the reconstructed MatMul with 1 extra bit has 128 error sources with bound $\\epsilon$ such that the reconstructed MatMul might experience a slightly larger error than conventional FP-based MatMul. Therefore, to guarantee the same error level as that of the conventional FP arithmetic, 2 extra bits are used for pre-alignment. Then, reconstructed MatMul has 127 error sources with bound $0.5\\epsilon$ and an additional error source with bound $\\epsilon$ .", + "bbox": [ + 169, + 103, + 826, + 202 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To verify the computation error of the proposed method, we randomly sample float32 values and compare the computation error of FP summation between conventional FP computation and the proposed method. To explore a wide range of float32 values, we sample $s$ , $e$ , and $m$ values independently assuming a uniform distribution, and then concatenate those values. We vary the fan-in (i.e., the number of values to be accumulated) from 128 to 8192, and sample 50,000 sets of FP numbers for each fan-in selection. The Schewchuk algorithm is employed to obtain accurate FP summation baseline data for error measurement (Shewchuk, 1997).", + "bbox": [ + 169, + 208, + 823, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Figure 4, the proposed method produces a similar level of errors to that of the conventional FP arithmetic for various fan-in values when $\\delta = 2$ . Because larger errors are more likely to be accumulated with larger fan-in, we see that both average and maximum errors tend to grow as the fan-in increases (Figure 4). Nonetheless, the average error $(12.3 \\times 10^{-7})$ and the maximum error $(2.4 \\times 10^{-2}$ or $2.4\\%)$ are relatively small even with 8192 fan-in, which justifies the current practice of implementing conventional FP additions without error correction for DNN inference. Correspondingly, the proposed method can support as precise numerical computation as conventional FP arithmetic does.", + "bbox": [ + 169, + 311, + 823, + 424 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 EXPERIMENT", + "text_level": 1, + "bbox": [ + 171, + 444, + 318, + 458 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 IFPU: A MATMUL ENGINE FOR THE PROPOSED METHOD", + "text_level": 1, + "bbox": [ + 169, + 474, + 607, + 488 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall Architecture. To evaluate the proposed method with real hardware implementation, we first design a MatMul engine called iFPU. Figure 5 shows the overview of systolic iFPU architecture which adopts the design principle of Google's TPU (Jouppi et al., 2017). iFPU performs FP MatMul in the form of a set of FP summation (Eq. 1) that is physically implemented as integer summation for high efficiency. After the computation, the iFPU converts integer results into FP values through the int2fp converter at the end of the Processing Element (PE) arrays. Then, scale & accumulator is used to multiply $\\alpha_{b}$ and add summation results of each weight bit", + "bbox": [ + 169, + 500, + 517, + 680 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8a13a0c4739e9b90f68e1394695a72bbe8e7cc524e031be0bed1ed4a70bd57e1.jpg", + "image_caption": [ + "Figure 5: A block diagram of iFPU" + ], + "image_footnote": [], + "bbox": [ + 535, + 505, + 823, + 652 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "plane to finish the MatMul (Eq. 1). The size of MatMul that can be processed in the iFPU at a time is bounded by the number of PEs, and as a practical design, we evaluate the iFPU with $32 \\times 32$ , $64 \\times 64$ , or $128 \\times 128$ PEs for the experiment. When fan-in of the DNN layer exceeds the row count of PEs, activations of the layer are tiled to fit the row-count limit, and each tile is fed into the iFPU at a time and processed with integer adders in the PEs. To complete the entire MatMul, the computing results for different tiles should be merged, and for this, float32 adders (accumulator) are used again.", + "bbox": [ + 169, + 680, + 823, + 765 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Precision of Integer Adder. As the PE array of the iFPU accumulates the pre-aligned and truncated significands, the size of the integer adder in each PE depends on $t$ , which is determined by the precision of the given FP format ( $p$ ) and extra bits ( $\\delta$ ) attached to control truncation error. Based on the theoretical analysis given in Section 3.2, the iFPU for float32 activations conducts 26-bit integer addition with $\\delta = 2$ . Though the iFPU introduces additional FP accumulations due to the MatMul tiling, the error level of integer-based FP addition with $\\delta = 2$ is half of the conventional FP addition according to Remark 1. Therefore, the iFPU with $\\delta = 2$ can still preserve the same level of computing error as that of conventional FP MatMul (Figure 6(a)). Furthermore, the iFPU for bfloat16 activations can be designed to be even smaller and more energy efficient by using smaller precision integer adders thanks to the reduced bit precision for significands. Interestingly, conventional bfloat16 accumulation still uses float32 adders to preserve the accuracy of accumulated", + "bbox": [ + 169, + 771, + 826, + 924 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/cf46c1f7050183a59f40048d068deffef67250ac95693dfffa8f6ce8d478fe7f.jpg", + "image_caption": [ + "(a) Evaluation with float32 activation" + ], + "image_footnote": [], + "bbox": [ + 187, + 103, + 810, + 193 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/17cb24c416bcac6f908010fec2e877c55e1bc30c4dbb8c06f04d4f194e1ed123.jpg", + "image_caption": [ + "(b) Evaluation with bfloat16 activation" + ], + "image_footnote": [], + "bbox": [ + 187, + 212, + 807, + 303 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cd2264494c7c1149c454cfc42e3280b6d3e9841ae704ae9f98e1217546c28ba8.jpg", + "image_caption": [ + "Figure 6: Numerical computation errors of MatMul for DNNs with FP activation. We measure the computation error of conventional FPU-based engine and the proposed iFPU against the accurate FP computation with Schewchuk algorithm (Shewchuk, 1997). The number of PEs and fan-in are annotated along the horizontal axis." + ], + "image_footnote": [], + "bbox": [ + 189, + 386, + 403, + 463 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/819af033f580bca5686abd485b875008c4e4eab5e6e651e6d643ea05e1f2ffa7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 408, + 386, + 602, + 463 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b17e97228a302c010195825f3567e080b9a809e0c13e42506451f32f825770f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 607, + 387, + 803, + 463 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d4e67b68c95440ea4b5872d8a48a3e0ff842a2a3dcbe5ce46074da4e54b4b959.jpg", + "image_caption": [ + "(a) Evaluation with float32 activation" + ], + "image_footnote": [], + "bbox": [ + 189, + 483, + 403, + 559 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fac697cc56c79e8dfc34799ac13aae38a22cde3ff94de97cf53d615d61082109.jpg", + "image_caption": [ + "(b) Evaluation with bfloat16 activation" + ], + "image_footnote": [], + "bbox": [ + 406, + 484, + 602, + 559 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f64159eff0c7a5f994cd1ca807d5665991632eae40a8901dc01eb8d5e726adc7.jpg", + "image_caption": [ + "Figure 7: Cosine distance between MatMul results of BERT-base (task: MRPC) extracted from inference results using conventional FPU-based engine (NVIDIA RTX3090) and the proposed iFPU. The last feed-forward layer in each encoder block (1-12th layers) and pooler (13th layer) is used for the evaluation. The number of PEs and layer indices are annotated along the horizontal axis." + ], + "image_footnote": [], + "bbox": [ + 607, + 484, + 803, + 560 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "results (Wang & Kanwar, 2019; Intel, 2018; Henry et al., 2019). However, as the accumulated results are converted back to bfloat16, it is possible to maintain the accuracy of bfloat16 accumulation with less accurate adders than float32 adders. Figure 6(b) shows that the proposed bfloat16 iFPU with $\\delta = 3$ (which uses 11 bit adders) provides comparable accuracy to that of conventional bfloat16 adders.", + "bbox": [ + 169, + 654, + 823, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 ANALYSIS OF THE DNN COMPUTATION ACCURACY", + "text_level": 1, + "bbox": [ + 171, + 744, + 575, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "MatMuls of DNN with iFPU vs FPU. In the previous section, we compared the accuracy of the proposed integer-based FP MatMul with precise results. Since our goal is to replace the FPU with the proposed iFPU, it is also important to compare the computational difference between the conventional error-prone FPU-based engine and the iFPU. For an in-depth understanding of DNN inference with the iFPU, we first compare the inference output of each layer in the BERT-base model (Devlin et al., 2018) computed with an FPU-based engine (NVIDIA RTX3090) and the proposed iFPU. BERT-base uses 4-bit weight values and the target task is MRPC. In iFPU, MatMuls between weights and activations are processed with the proposed integer-based approach, but other operations such as softmax are processed by using conventional FPU. We employ cosine distance as the metric to measure the difference in layer outputs. Note that the cosine distance is 0 for two identical vectors and 2 for entirely opposite vectors. In this experiment, the last feed-forward layer in each", + "bbox": [ + 169, + 771, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/937d97d3411101d9d633328531fc89e8ceb02e28ba19a278e159ddd9ea0bdd17.jpg", + "table_caption": [ + "Table 1: Accuracy of DNNs inference with conventional FPU-based engine (NVIDIA RTX3090) and proposed iFPUs(-#rows/columns of PE arrays). The numbers in parentheses represent accuracy difference between FPU & iFPU." + ], + "table_footnote": [], + "table_body": "
float32 activationbfloat16 activation
VGG-9ResNet-18OPT-1.3BVGG-9ResNet-18OPT-1.3B
FPU92.9170.2712.9692.9170.2812.96
iFPU-3292.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.91 (+0.00)70.26 (-0.02)12.96 (+0.00)
iFPU-6492.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.90 (-0.01)70.27 (-0.01)12.97 (+0.01)
iFPU-12892.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.92 (+0.01)70.26 (-0.02)12.98 (+0.02)
ResNet-50RegNetMnasNetResNet-50RegNetMnasNet
FPU76.3278.1875.9976.3378.1775.96
iFPU-3276.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.38 (+0.05)78.18 (+0.01)75.97 (+0.01)
iFPU-6476.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.38 (+0.05)78.18 (+0.01)75.96 (+0.00)
iFPU-12876.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.40 (+0.07)78.18 (+0.01)75.97 (+0.01)
", + "bbox": [ + 173, + 133, + 823, + 321 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/6d57767d4b6ea01b49b5853890646157b793b26b28fb3470ba5e42cc7e7047fb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BERT-Base w/ float32 activationAvg.
CoLAMRPCSST-2STS-BQQPMNLI-m/mmQNLIRTE
FPU56.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28
iFPU-3256.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
iFPU-6456.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
iFPU-12856.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
", + "bbox": [ + 173, + 321, + 826, + 424 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/15f57c953a6bf035f7ef5e609bd79799864a466a6db6baca1e09f429eba6c417.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BERT--BASE w/ bfloat16 activationAvg.
CoLAMRPCSST-2STS-BQQPMNLI-m/mmQNLIRTE
FPU56.0889.0591.5187.5283.7481.97/82.5789.0070.0481.30
iFPU-3256.1089.0591.5187.5283.7281.94/82.5589.0570.0481.28 (-0.02)
iFPU-6456.3689.0591.6387.5283.7281.93/82.5689.0070.0481.31 (+0.01)
iFPU-12856.1088.8391.6387.5283.7281.96/82.5489.0570.0481.27 (-0.03)
", + "bbox": [ + 173, + 426, + 826, + 527 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "encoder block and pooler is chosen for evaluation. Figure 7 shows that the FPU and the iFPU produce almost identical outputs for each layer. The averages of the distance are less than $1.2 \\times 10^{-6}$ and $2.5 \\times 10^{-4}$ for float32 and bfloat16 activations, respectively. Moreover, the distance between layer outputs from the two engines remains close throughout the forward path. As a result, we can expect that the proposed iFPU can support DNN inference with almost the same accuracy as that of conventional FPU.", + "bbox": [ + 169, + 537, + 823, + 619 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "DNN Inference Accuracy. We select 7 types of DNN models to compare DNN model accuracy between the FPU and iFPU: BERT-base, VGG-9, ResNet-18, ResNet-50, RegNet-3.2GF, MnasNet2.0, and OPT-1.3B. The accuracy of BERT-base is evaluated on the General Language Understanding Evaluation (GLUE) benchmark (Wang et al., 2019). VGG-9 (Simonyan & Zisserman, 2014) is evaluated on CIFAR-10 (Krizhevsky et al., 2009). ResNet-18, ResNet-50 (He et al., 2016), RegNet3.2GF (Radosavovic et al., 2020), and MnasNet-2.0 (Tan et al., 2019) measure top-1 accuracy on ImageNet (Russakovsky et al., 2015). OPT-1.3B (Zhang et al., 2022b) is an open-sourced NLP model provided by Meta AI roughly matching the performance and sizes of the GPT-3 class of models and is evaluated by estimating the perplexity on WikiText-2 dataset (Merit et al., 2016). All DNN models use 4-bit weight values that are quantized by a binary-coding quantization scheme. Note that no modifications to DNN structures are needed to deploy the weight-quantized DNNs to various iFPUs because 1) activations are FP values and 2) iFPUs are designed to process any MatMul for DNNs as long as weights are quantized. Table 1 summarizes the DNN inference results. Because the iFPU can produce almost identical MatMul results as FPU, the proposed iFPUs preserve the DNN accuracy for both float32 and bfloat16 activations as we expected.", + "bbox": [ + 169, + 627, + 826, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 ANALYSIS OF COMPUTATION EFFICIENCY", + "text_level": 1, + "bbox": [ + 169, + 854, + 503, + 869 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Setup. To evaluate the efficiency of proposed iFPUs, we synthesize the proposed hardware in a $28\\mathrm{nm}$ CMOS technology. For a fair evaluation of the impact of replacing FP MatMul with integer-based MatMul, we also design two 'baseline' engines for the conventional FP-based MatMul (Fig-", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ure 8). As the first baseline (FP-MAC), Figure 8(a) is designed with FP MAC units to process FP MatMul as a naive approach. In addition, as the second baseline (FP-ADD), Figure 8(b) is designed with FP adders to process FP MatMul reconfigured as Eq. 1. Because bitplanes of weight values are decomposed for FP-ADD and iFPU, binary weights are processed in a bit-parallel manner in FP-ADD and iFPU, while FP-MAC processes the whole weight values in each MAC unit. Compared to those two baseline engines, iFPU exhibits the lighter PEs along with additional units such as the pre-alignment unit and int2fp converter. Lastly, an int8 MatMul engine (INT8) is also implemented for the comparison between the proposed iFPU MatMul and integer MatMul.", + "bbox": [ + 169, + 103, + 826, + 217 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Results. Simulation results using the synthesized hardware demonstrate that the proposed iFPUs can improve both energy and area compared to the baselines, as the FP units of the baseline engines are replaced with the more area/energy efficient integer units (Figure 9). For float32 activations, the proposed iFPU improves throughput-per-area $(\\mathrm{TOPS} / \\mathrm{mm}^2)$ by up to $7.9\\times$ and energy efficiency (TOPS/W) by up to $6.4\\times$ compared to the FP-MAC baseline. For bfloat16 activations, the proposed iFPU achieves", + "bbox": [ + 169, + 222, + 550, + 347 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/78a9b38799730d982a84162ee1b462f287492c13abbfdf994834e1d8f4953627.jpg", + "image_caption": [ + "Figure 8: Baseline MatMul engines (a) FP-MAC and (b) FP-ADD" + ], + "image_footnote": [], + "bbox": [ + 563, + 224, + 689, + 311 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0c03f4a03e6589511b931f1211689d946f45856dd15f17cf76dc0e7bde169e37.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 226, + 823, + 311 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "even larger improvements because the size of the corresponding integer-based unit is reduced as the bit resolution of the aligned-truncated significands is reduced by 15 bits compared to float32 activation cases. The throughput-per-area of the iFPU is improved by up to $9.9\\times$ and energy efficiency is enhanced by up to $11.9\\times$ compared to the FP-MAC baseline. The improvement over the baseline becomes larger as the number of PEs increases because the overhead of additional logic such as pre-alignment units in the proposed scheme can be amortized (detailed in Appendix C.2). We also compare the iFPUs with the INT8 engine. While bffloat16 activations close the gap between the FP-MAC baseline and the INT8 engine significantly in terms of throughput-per-area, iFPU (with bffloat16 activations) achieves even higher energy efficiency than the INT8 engine in some cases (Figure 9).", + "bbox": [ + 169, + 347, + 826, + 487 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b300b0a5576d752e93ad84c011fe82fe2c46903ab5d90fd00710bad3caef2aa3.jpg", + "image_caption": [ + "Figure 9: Normalized energy efficiency (TOPS/W) (left) and throughput-per-area (TOPS/mm²) (right) of MatMul Engines: baselines and iFPUs for FP MatMul; INT8 for int8 MatMul. The number of PEs and target activation types are annotated along the horizontal axis." + ], + "image_footnote": [], + "bbox": [ + 178, + 489, + 500, + 609 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c8ed2a204e737468738f1e7cd154c1c9fad26803d41a986509e9b7811dac74f0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 489, + 823, + 609 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 670, + 320, + 686 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The need to accomplish computing MatMul by using FP activations and quantized weights is increasing due to the growing usage of complex non-linear activation functions in DNN models such as Transformers. Conventional computing platforms such as CPU, GPU, and NPU, however, are inefficient in performing such computations. In this paper, we propose a new MatMul computing scheme dedicated to DNNs with FP activations and binary-coding weight quantization. The proposed method accelerates the FP MatMul of DNNs using the shared exponent and the integer arithmetic to improve computational efficiency. Previous works which also used the block floating point number with shared exponent often claim the validity of their design by presenting comparable DNN accuracy without verifying the robustness of MatMul results in a rigorous manner. We theoretically prove that the proposed scheme can produce the same error level as that of conventional FP arithmetic. To evaluate the computational efficiency of the proposed method, we design and synthesize a MatMul engine, iFPU, following the principle of integer-based operations. Experimental results support our claim that, compared to the conventional FPU-based design, the iFPUs accelerate the weight-only quantized DNNs with $6.4 \\times$ and $7.9 \\times$ higher energy efficiency and throughput-per-area for float32 activations, respectively. In addition, the iFPUs yield $11.9 \\times$ and $9.9 \\times$ higher energy efficiency and throughput-per-area, respectively, when associated with bffloat16 activations.", + "bbox": [ + 169, + 702, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 104, + 328, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This work was supported in part by Institute of Information communications Technology Planning Evaluation (IITP) grant funded by the Korea government (MSIT) (No. 2021-0-01343, Artificial Intelligence Graduate School Program (Seoul National University) $(10\\%)$ , and No.2021-0-02068, Artificial Intelligence Innovation Hub $(10\\%)$ ).", + "bbox": [ + 171, + 127, + 826, + 186 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 204, + 287, + 220 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yu Bai, Yu-Xiang Wang, and Edo Liberty. Proxquant: Quantized neural networks via proximal operators. International Conference on Learning Representations, 2019.", + "Sharan Chetlur, Cliff Woolley, Philippe Vandermersch, Jonathan Cohen, John Tran, Bryan Catanzaro, and Evan Shelhamer. codnn: Efficient primitives for deep learning. arXiv preprint arXiv:1410.0759, 2014.", + "Jungwook Choi, Zhuo Wang, Swagath Venkataramani, Pierce I-Jen Chuang, Vijayalakshmi Srinivasan, and Kailash Gopalakrishnan. Pact: Parameterized clipping activation for quantized neural networks. arXiv preprint arXiv:1805.06085, 2018.", + "Insoo Chung, Byeongwook Kim, Yoonjung Choi, Se Jung Kwon, Yongkweon Jeon, Baeseong Park, Sangha Kim, and Dongsoo Lee. Extremely low bit transformer quantization for on-device neural machine translation. In Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 4812-4826, 2020.", + "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.", + "Elias Frantar, Saleh Ashkboos, Torsten Hoefer, and Dan Alistarh. Gptq: Accurate post-training quantization for generative pre-trained transformers. arXiv preprint arXiv:2210.17323, 2022.", + "David Goldberg. What every computer scientist should know about floating-point arithmetic. ACM computing surveys (CSUR), 23(1):5-48, 1991.", + "Mark Harris. Mixed-precision programming with CUDA 8, 2016. URL https://developer.nvidia.com/blog/mixed-precision-programming-cuda-8/.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.", + "Greg Henry, Ping Tak Peter Tang, and Alexander Heinecke. Leveraging the bfloat16 artificial intelligence datatype for higher-precision computations. In 2019 IEEE 26th Symposium on Computer Arithmetic (ARITH), pp. 69-76. IEEE, 2019.", + "Mark Horowitz. Computing's energy problem (and what we can do about it). In 2014 IEEE International Solid-State Circuits Conference Digest of Technical Papers (ISSCC), pp. 10-14. IEEE, 2014.", + "Intel. Bfloat16 - hardware numerics definition, 2018. URL https://www.intel.com/content/dam/develop/external/us/en/documents/bf16-hardware-numerics-definition-white-paper.pdf. Accessed: 2022-09-07.", + "Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam, and Dmitry Kalenichenko. Quantization and training of neural networks for efficient integer-arithmetic-only inference. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2704-2713, 2018.", + "Yongkweon Jeon, Baeseong Park, Se Jung Kwon, Byeongwook Kim, Jeongin Yun, and Dongsoo Lee. Biqgemm: matrix multiplication with lookup table for binary-coding-based quantized dnns. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis, pp. 1-14. IEEE, 2020." + ], + "bbox": [ + 171, + 227, + 826, + 925 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yongkweon Jeon, Chungman Lee, Eulrang Cho, and Yeonju Ro. Mr.biq: Post-training non-uniform quantization based on minimizing the reconstruction error. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12329-12338, 2022.", + "Norman P Jouppi, Cliff Young, Nishant Patil, David Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, et al. In-datacenter performance analysis of a tensor processing unit. In Proceedings of the 44th annual international symposium on computer architecture, pp. 1-12, 2017.", + "Norman P Jouppi, Doe Hyun Yoon, Matthew Ashcraft, Mark Gottscho, Thomas B Jablin, George Kurian, James Laudon, Sheng Li, Peter Ma, Xiaoyu Ma, et al. Ten lessons from three generations shaped google's tpuv4i: Industrial product. In 2021 ACM/IEEE 48th Annual International Symposium on Computer Architecture (ISCA), pp. 1-14. IEEE, 2021.", + "Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W Mahoney, and Kurt Keutzer. I-bert: Integer-only bert quantization. In International Conference on Machine Learning, pp. 5506-5518. PMLR, 2021.", + "Urs Köster, Tristan Webb, Xin Wang, Marcel Nassar, Arjun K Bansal, William Constable, Oguz Elibol, Scott Gray, Stewart Hall, Luke Hornof, et al. Flexpoint: An adaptive numerical format for efficient training of deep neural networks. Advances in neural information processing systems, 30, 2017.", + "Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009.", + "Se Jung Kwon, Jeonghoon Kim, Jeongin Bae, Kang Min Yoo, Jin-Hwa Kim, Baeseong Park, Byeongwook Kim, Jung-Woo Ha, Nako Sung, and Dongsoo Lee. Alphatuning: Quantization-aware parameter-efficient adaptation of large-scale pre-trained language models. arXiv preprint arXiv:2210.03858, 2022.", + "Yuhang Li, Ruihao Gong, Xu Tan, Yang Yang, Peng Hu, Qi Zhang, Fengwei Yu, Wei Wang, and Shi Gu. Brecq: Pushing the limit of post-training quantization by block reconstruction. In International Conference on Learning Representations, 2021.", + "Xiaocong Lian, Zhenyu Liu, Zhourui Song, Jiwu Dai, Wei Zhou, and Xiangyang Ji. High-performance fpga-based cnn accelerator with block-floating-point arithmetic. IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 27(8):1874-1885, 2019.", + "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016.", + "Asit Mishra, Eriko Nurvitadhi, Jeffrey J Cook, and Debbie Marr. Wrpn: Wide reduced-precision networks. In International Conference on Learning Representations, 2018.", + "Jean-Michel Muller, Nicolas Brisebarre, Florent De Dinechin, Claude-Pierre Jeannerod, Vincent Lefevre, Guillaume Melquiond, Nathalie Revol, Damien Stehlé, Serge Torres, et al. Handbook of floating-point arithmetic. Springer, 2018.", + "Gunho Park, Baeseong Park, Se Jung Kwon, Byeongwook Kim, Youngjoo Lee, and Dongsoo Lee. nuqmm: Quantized matmul for efficient inference of large-scale generative language models. arXiv preprint arXiv:2206.09557, 2022.", + "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, volume 32, 2019.", + "Nicola Petra, Davide De Caro, Valeria Garofalo, Ettore Napoli, and Antonio GM Srolllo. Truncated binary multipliers with variable correction and minimum mean square error. IEEE Transactions on Circuits and Systems I: Regular Papers, 57(6):1312-1325, 2009.", + "Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólar. Designing network design spaces. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10428-10436, 2020." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Bita Darvish Rouhani, Daniel Lo, Ritchie Zhao, Ming Liu, Jeremy Fowers, Kalin Ovtcharov, Anna Vinogradsky, Sarah Massengill, Lita Yang, Ray Bittner, et al. Pushing the limits of narrow precision inferencing at cloud scale with microsoft floating point. Advances in neural information processing systems, 33:10271-10281, 2020.", + "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015.", + "Jonathan Richard Shewchuk. Adaptive precision floating-point arithmetic and fast robust geometric predicates. Discrete & Computational Geometry, 18(3):305-363, 1997.", + "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014.", + "Zhourui Song, Zhenyu Liu, and Dongsheng Wang. Computation error analysis of block floating point arithmetic oriented convolution neural network accelerator design. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018.", + "Vivienne Sze, Yu-Hsin Chen, Tien-Ju Yang, and Joel S Emer. Efficient processing of deep neural networks: A tutorial and survey. Proceedings of the IEEE, 105(12):2295-2329, 2017.", + "Mingxing Tan, Bo Chen, Ruoming Pang, Vijay Vasudevan, Mark Sandler, Andrew Howard, and Quoc V Le. Mnasnet: Platform-aware neural architecture search for mobile. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2820-2828, 2019.", + "Chaofan Tao, Lu Hou, Wei Zhang, Lifeng Shang, Xin Jiang, Qun Liu, Ping Luo, and Ngai Wong. Compression of generative pre-trained language models via quantization. arXiv preprint arXiv:2203.10705, 2022.", + "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. International Conference on Learning Representations, 2019.", + "Shibo Wang and Pankaj Kanwar. Bfloat16: The secret to high performance on cloud tpus. Google Cloud Blog, 4, 2019.", + "James Hardy Wilkinson. Rounding errors in algebraic processes. Courier Corporation, 1994.", + "Xiaoxia Wu, Zhewei Yao, Minjia Zhang, Conglong Li, and Yuxiong He. Extreme compression for pre-trained transformers made simple and efficient. arXiv preprint arXiv:2206.01859, 2022.", + "Chen Xu, Jianqiang Yao, Zhouchen Lin, Wenwu Ou, Yuanbin Cao, Zhirong Wang, and Hongbin Zha. Alternating multi-bit quantization for recurrent neural networks. In International Conference on Learning Representations, 2018.", + "Zhewei Yao, Reza Yazdani Aminabadi, Minjia Zhang, Xiaoxia Wu, Conglong Li, and Yuxiong He. Zeroquant: Efficient and affordable post-training quantization for large-scale transformers. arXiv preprint arXiv:2206.01861, 2022.", + "Sai Qian Zhang, Bradley McDanel, and HT Kung. Fast: Dnn training under variable precision block floating point with stochastic rounding. In 2022 IEEE International Symposium on High-Performance Computer Architecture (HPCA), pp. 846-860. IEEE, 2022a.", + "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuhui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022b." + ], + "bbox": [ + 171, + 102, + 826, + 837 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A COMPUTATIONAL COST OF FP ARITHMETIC VS. INTEGER ARITHEMETIC", + "text_level": 1, + "bbox": [ + 171, + 102, + 807, + 118 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/795c69d7b191729241fa3831cc884c16a8897a2ae1de0d3cf909c0742b71daf7.jpg", + "table_caption": [ + "Table 2: Energy of computing units synthesized in a ${28}\\mathrm{\\;{nm}}$ tech node (MAC: multiply-accumulate)." + ], + "table_footnote": [], + "table_body": "
MACMultiplyAdd
float32int8float32int32int8float32int32
Energy per Operation1.51 pJ0.08 pJ1.23 pJ0.940.06 pJ0.28 pJ0.03 pJ
Normalized Energy18.9×1.0×20.5×15.7×1.0×9.3×1.0×
", + "bbox": [ + 173, + 164, + 823, + 239 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To cover a wide range of numbers, FP format does not fix the location of the radix point (Goldberg, 1991). Hence, FP arithmetic needs to handle input and output values with different scaling factors, and the FP arithmetic units need to align and normalize significands before and after each computation, respectively. The alignment and normalization logics consist of barrel shifters that can shift a data word by a specified amount, and the cost of the barrel shifter far exceeds the cost of other arithmetic logics in terms of both energy and area, increasing the cost of FP computation (Horowitz, 2014). Hence, in general, integer arithmetic logic is much smaller and consumes less energy than FP counterpart.", + "bbox": [ + 169, + 256, + 614, + 411 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "It is well known that 8-bit integer can achieve up to $4 \\times$ throughput improvement compared to IEEE-754 single-precision format (float32) in widely used GPUs (Kim et al., 2021), as the throughput of 8-bit operations is generally $4 \\times$ that of 32-bit operations (Har", + "bbox": [ + 169, + 416, + 614, + 472 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "ris, 2016). The advantage of integer can be magnified when the hardware platform moves to ASIC (Mishra et al., 2018). For in-depth understanding, we synthesize computing units for FP and integer in a $28\\mathrm{nm}$ tech node. As shown in Table 2, multiplication-accumulation (MAC) for float32 consumes $18.9\\times$ more energy than 8-bit integer (int8), a widely used integer format for quantized DNNs. Please note that a float32 MAC consists of a float32 multiplication and a float32 addition while an int8 MAC consists of an int8 multiplication and an int32 addition. The bit resolution of the adder for the int8 MAC is higher than that of the multiplier, because int8 multiplication results in 16-bit values and the bit resolution of MAC values increases as the number of accumulated values increases for integer format. In addition, the area cost of the integer unit is also much smaller than FP units as shown in Figure 10. Therefore, many studies have attempted activation quantization despite the various difficulties in the quantization process because both weight parameters and activations should be quantized to replace FP arithmetic with integer arithmetic.", + "bbox": [ + 169, + 472, + 823, + 638 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/2122f6a76adc28979622ea194af6ee36e4c5348de9684ba2be11299ebb3fb30b.jpg", + "image_caption": [ + "Figure 10: Area comparison of computing units (layouts synthesized in a $28\\mathrm{nm}$ node)." + ], + "image_footnote": [], + "bbox": [ + 625, + 258, + 823, + 411 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B SUPPLEMENT FOR PROPOSED SIGNIFICAND TRUNCATION", + "text_level": 1, + "bbox": [ + 171, + 659, + 683, + 674 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B.1 ENERGY IMPROVEMENT WITH SIGNIFICAND TRUNCATION", + "text_level": 1, + "bbox": [ + 171, + 691, + 620, + 705 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/f6d95e1fb8d8523562dfe4ac551a38216da2318ef898a74304b45f87823fe641.jpg", + "image_caption": [ + "Figure 11: Energy of adders synthesized in a $28\\mathrm{nm}$ tech node (tested at $0.9\\mathrm{V}$ )." + ], + "image_footnote": [], + "bbox": [ + 179, + 731, + 439, + 820 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/6771686cafd9759a43126e3e9a12c277438d282b35843033f72ac77f7163cab9.jpg", + "image_caption": [ + "Figure 12: Example of the significand truncation followed by the pre-alignment." + ], + "image_footnote": [], + "bbox": [ + 524, + 723, + 813, + 821 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "With naive pre-alignment of float32 activations, the maximum amount of the significand shifting is 255 and the resolution of the aligned activation becomes 279 bits. As shown in Figure 11, while 32-bit integer consumes $0.029\\mathrm{pJ}$ per addition, both float32 and 279-bit integer consumes $0.281\\mathrm{pJ}$", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "per addition. To avoid the large design overhead, we truncate the pre-aligned significands as shown in Figure 12. The aggressive truncation still did not cause accuracy degradation in FP additions as we described in the Section 3.2.", + "bbox": [ + 169, + 103, + 823, + 146 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.2 TRUNCATED BINARY MULTIPLIERS VS. PROPOSED SIGNIFICAND TRUNCATION", + "text_level": 1, + "bbox": [ + 171, + 162, + 759, + 176 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/2846d2162fa52e8b3bf581042c18aac7af09023cc2feabee7634935dd797f738.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 207, + 195, + 509, + 292 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/ca81923ddefb5c2185c6966507f20db5b5b336a0ea89cc33807884d331413bc9.jpg", + "image_caption": [ + "(b)", + "Figure 13: Comparison of the truncation scheme in the (a) truncated binary multiplier for integer multiplication and (b) proposed method for FP addition/subtraction." + ], + "image_footnote": [], + "bbox": [ + 578, + 195, + 823, + 284 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Truncated binary multipliers (Petra et al., 2009) also discuss the truncation to improve computational efficiency, but there are critical differences between truncated binary multipliers and the proposed work as summarized in Figure 13. First of all, truncated binary multipliers deals with integer multiplications while the proposed work focuses on FP additions/subtractions. Due to the differences in the number format (integer vs. FP) and arithmetic operations (multiplications vs. additions/subtractions), the two works present completely different error analysis models and error reduction schemes.", + "bbox": [ + 169, + 359, + 823, + 455 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The error analysis models between truncated binary multipliers and our case are different, because the amount of truncation is fixed in the truncated binary integer multipliers and the amount of truncation varies in our work as the amount of significand shift varies depending on the input data. Moreover, in truncated binary multipliers, the bit resolution of truncated output is defined by the application requirement. On the other hand, as we proposed to truncate the pre-aligned values to adopt lower-bit integers and improve computational efficiency, the proper bit resolution of truncated values should be found to meet the accuracy requirement in our case.", + "bbox": [ + 169, + 464, + 823, + 561 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In addition, in integer multiplication case, some of the truncated partial products share the same inputs with the remaining partial products, so they have correlations with the remaining partial product values. (Petra et al., 2009) proposed an error minimization scheme which exploits such characteristics. On the other hand, in the FP addition/subtraction case, the truncated significands do not have any correlation with the remaining bits so it is hard to devise similar error correction schemes. Instead, we focused on the fact that conventional FP operation is also not precise due to the rounding of output significands so that we only need to match the error level of the proposed scheme to the conventional FP operations. Based on the facts, we showed a theoretical analysis such that the proposed integer-based FP addition/subtraction can have the similar error level as that of the conventional FP addition/subtraction when small number (1-2) of extra bits are attached to the shifted significands. With this finding, we can design an efficient integer-based FP addition logic without having complex error correction function estimated based on the truncated bits.", + "bbox": [ + 169, + 569, + 826, + 736 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C IN-DEPTH HARDWARE ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 756, + 480, + 771 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C.1 DETAILED HARDWARE DESCRIPTION OF THE PROPOSED IFPU", + "text_level": 1, + "bbox": [ + 169, + 786, + 643, + 800 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Figure 14 describes the proposed iFPU in detail. The proposed iFPU is a bit-flexible accelerator which can handle variable bitwidth of weight values. The iFPU processes weights in bit-parallel manner by processing each weight bitplane in different columns of the PE array. For example, 4-bit weights use 4 PE columns for the computation, and 8-bit weights use 8 PE columns for the computation. After the integer-based summations are done in each column of the PE array, the integer results are converted into FP values and multiplied by scaling factors which represent the significance of each bitplane. Then, computing results of each bitplane are merged in the accumulator (FP adder) to finish the MatMul. As the output resolution of FP accumulation remains the same regardless of the", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/e7fab444163ce272b8005088b3213cc4c59d1ad88dd0b78bd54c6800f21cbb4c.jpg", + "image_caption": [ + "Figure 14: A detailed block diagram of iFPU. The iFPU processes weights in bit-parallel manner by processing each bitplane of the weights in each column of the PE array. $(B_{b,k}$ : binary weights in Eq. 1)" + ], + "image_footnote": [], + "bbox": [ + 174, + 102, + 464, + 330 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/737da39c05e176befb2ecac474fd513ab63db587cee4589de9006f1b969bfd96.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 470, + 107, + 609, + 325 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/17e161b1a48ac765de589d10bcb589ab3055080325953a2431a71f0998358d68.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 629, + 104, + 818, + 325 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "size of the accumulation thanks to the characteristics of the FP format, the size of the accumulator does not need to increase for the increased weight bit width.", + "bbox": [ + 169, + 411, + 823, + 440 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.2 AREA/ENERGY BREAKDOWN OF PROPOSED IFPU", + "text_level": 1, + "bbox": [ + 171, + 457, + 562, + 470 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/479482482e743352d8059cdf7b9142d724358af8380b45b92013c7a6f7f81c59.jpg", + "image_caption": [ + "Figure 15: Area $(mm^2)$ (left) and power (W) (right) of MatMul Engines: baselines and iFPUs for FP MatMul with 32x32 PEs." + ], + "image_footnote": [], + "bbox": [ + 178, + 488, + 496, + 604 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/9f699af0fee63273ed539329b5989a908d9f842aeacf5e7b84e0c6a47deb8a1e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 488, + 818, + 604 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/ff4bd437c8192a558c56c8c0441be2ef86392d6948afa6172828065ce47eab4c.jpg", + "image_caption": [ + "Figure 16: Area breakdown (left) and power breakdown (right) of proposed iFPUs with $32\\mathrm{x}32$ , $64\\mathrm{x}64$ , and $128\\mathrm{x}128$ PEs." + ], + "image_footnote": [], + "bbox": [ + 178, + 670, + 491, + 792 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/09e5f4d13d77e18a58272181dd308ec012a30052175177d136a81df4cf53ef07.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 670, + 825, + 792 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this section, the area and power of the MatMul engines designed in Section 4.3 are analyzed in more detail for deeper understanding of the proposed scheme. First, a breakdown of the area/power of various MatMul engines with $32 \\times 32$ PEs is shown in Figure 15. FP-ADD reconstructs FP-MAC with a series of FP additions by separately processing each weight bitplane (Eq. 1), so to match the effective throughput of FP-ADD with that of FP-MAC in case of 4-bit weights, 4 FP-ADD", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "operations are used for the evaluation. Hence, though the area/energy of a single float32 adder is lower than that of a float32 MAC unit (Table 2), FP-ADD requires slightly larger area and power than FP-MAC. On the other hand, though iFPU also introduces $m$ times more operations than FP-MAC, iFPUs achieve large area and power reduction as the area/energy cost of PE arrays become significantly lower by replacing FP adders with integer adders. The area/power reduction is even larger in bfloat16 cases because smaller integer units can be used. As the area/power cost of PE arrays in iFPUs decreases, the relative portion of area/power of supporting logic (such as scale & accumulator) in the total area/power increases. Hence, the supporting logic accounts for more than half of the total area/power of iFPUs with 32x32 PEs. Meanwhile, the overhead of the supporting logic decreases as the size of PE arrays increases. We report the area/power breakdown of iFPUs with various number of PEs in Figure 16. The experimental results show that, as the size of PE arrays increase, the supporting logic is shared among more PEs and the overhead can be amortized.", + "bbox": [ + 169, + 103, + 826, + 272 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.3 IMPACT OF THE WEIGHT BITWIDTH ON THE PROPOSED IFPU", + "text_level": 1, + "bbox": [ + 171, + 287, + 635, + 304 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/03996390a185fb711ac2f8be70e6ddefff7047e7d4423fe87dad947c3e3121a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 318, + 825, + 465 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/efae3a70ea025388fd7031579896d7b03a54d2e968c2b0fa6f156bfd7fd618b4.jpg", + "image_caption": [ + "(a) Normalized energy efficiency (TOPS/W) of iFPUs", + "(b) Normalized throughput-per-area (TOPS/mm $^2$ ) of iFPUs", + "Figure 17: Computational efficiency of iFPUs normalized with that of the baseline FP MatMul engine (FP-MAC). Y-axis is the normalized value against FP-MAC and the iFPUs show higher efficiency than FP-MAC even for high-precision weight bits. The number of PEs and target activation types are annotated along the horizontal axis." + ], + "image_footnote": [], + "bbox": [ + 176, + 486, + 823, + 631 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "This section analyzes impact of weight bitwidth on the efficiency improvement achievable with the proposed iFPU. The experimental setup is the same as Section 4.3 except the weight bits. While only 4-bit weight cases are evaluated in Section 4.3, this section evaluates weights with 1 to 16 bits. Because the proposed scheme processes each bitplane of the weights in the bit-parallel manner, higher-bit weights require more operations with PE, scale, and accumulators. Hence, as shown in Figure 17, the benefits of the iFPUs diminish as the number of weight bits increases. Nevertheless, even for 8-bit weight case, iFPUs achieve better computational efficiency compared to the FP-MAC baseline.", + "bbox": [ + 169, + 739, + 826, + 851 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.4 COMPARISON OF THE PROPOSED IFPU WITH INT4 MATMUL ENGINE", + "text_level": 1, + "bbox": [ + 169, + 869, + 699, + 883 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In Figure 18., an int4 MatMul engine (INT4) is evaluated and compared with the other MatMul engines analyzed in Section 4.3. INT4 MatMul shows high energy efficiency and throughput-per", + "bbox": [ + 169, + 895, + 823, + 926 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 509, + 959 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/c5e3c9d6346d6aa33c6caae94ea469a25be309957f6bdd30e862fd44c183d6af.jpg", + "image_caption": [ + "Figure 18: Normalized energy efficiency (TOPS/W) (left) and throughput-per-area $(\\mathrm{TOPS} / \\mathrm{mm}^2)$ (right) of MatMul Engines: baselines and iFPUs for FP MatMul; INT8/INT4 for int8/int4 MatMul. The number of PEs and target activation types are annotated along the horizontal axis." + ], + "image_footnote": [], + "bbox": [ + 176, + 92, + 500, + 213 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/9a356fe726cbb5d3a58cf9461b03d3960de5e0f3ba0b6e06fa89355cb059cfdc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 93, + 823, + 212 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "area. However, to take advantage of INT4 MatMul, both weight and activation should be quantized to 4 bits, which may not provide desired accuracy in many cases.", + "bbox": [ + 169, + 300, + 823, + 330 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.5 HARDWARE EVALUATION WITH MEMORY ACCESS", + "text_level": 1, + "bbox": [ + 171, + 348, + 558, + 361 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/c3cca79da7dc9a65091328ad1629fd5d8011049b9d33dce7c96233f76ca10ec8.jpg", + "image_caption": [ + "Figure 19: Normalized energy consumption of MatMul engines (FP-MAC, FP-ADD, and iFPU) with memory system. The inference energy is measured for BERT-based and OPT-1.3B with 4-bit weights and float32/bfloat16 activations." + ], + "image_footnote": [], + "bbox": [ + 181, + 383, + 464, + 539 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/72fff8ed9dae84bf3d5364cabd6f23c656e183c87972a7bc0de116e2df7d1d13.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 383, + 692, + 539 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/30a2a7c507b03a1b556a55f453baf86b63bf50d5c0c902cd9d39f6a51f5cd8cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 696, + 422, + 821, + 486 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Setup. To understand the effectiveness of the proposed method in the real computing scenario, the baselines (FP-MAC and FP-ADD) and the proposed iFPU with 128x128 PEs are further evaluated including memory access. For off-chip memory, we scaled down the bandwidth of HBM2 in TPU (Jouppi et al., 2021) considering the ratio of the number of PEs that make up Matrix Multiply Unit (MXU), which is 1:4 and adopted energy per bit of HBM2 from Table 2 in (Jouppi et al., 2021); we used the bandwidth of 153.5 GB/s and the energy per bit of $3.9\\mathrm{pJ} / \\mathrm{bit}$ . We also scaled the size of the unified buffer (on-chip SRAM buffer) in (Jouppi et al., 2021) by dividing it by 4. The unified buffer size in our design was 32MB. For SRAMs, we used the $28\\mathrm{nm}$ CMOS memory compiler and the energy per bit of $0.155\\mathrm{pJ} / \\mathrm{bit}$ was used. To overlap memory access with computation, double buffering scheme was adopted in the unified buffer.", + "bbox": [ + 169, + 611, + 823, + 750 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Results. We evaluate a single batch inference of BERT-base and OPT-1.3B. We set the sequence length of BERT-base and OPT-1.3B as 128 and 1024 respectively. As double buffering hides the memory access latency, the proposed iFPU with memory model can achieve the same amount of throughput-per-area improvement as that of the baseline for the case in which memory access is not considered. On the other hand, the gain in energy efficiency slightly changes after considering memory access. As shown in Figure 19, the dram access energy accounts for a relatively small portion of total energy consumption in the baselines, because the data is intensively reused in the MatMul computation. As the proposed iFPU reduces the energy cost of computation, memory access energy becomes relatively significant in the proposed system. Thus, when considering the cost of memory access, the amount of improvement in the energy efficiency slightly decreases. Nevertheless, the iFPU with memory access still can improve the energy efficiency by up to 6.6x compared to FP-MAC baseline.", + "bbox": [ + 169, + 757, + 826, + 924 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D FINE-TUNING CONDITION FOR BERT-BASE TRAINING", + "text_level": 1, + "bbox": [ + 171, + 102, + 660, + 118 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/36869695064233af6e6995471766023f7e552aade89303f4c3257dc252a8b1ee.jpg", + "table_caption": [ + "Table 3: Hyper-parameters for fine-tuning BERT-base on GLUE benchmark. The fine-tuning use AdamW optimizer and the number of training epochs is 10. The learning rates decay linearly and the weight decay is set to 0.01." + ], + "table_footnote": [], + "table_body": "
ConfigurationGLUE
CoLAMRPCSST-2STS-BQQPMNLIQNLIRTE
Batch size1632323232161616
Learning rate1e-41e-41e-4 2e-41e-45e-55e-55e-51e-4
", + "bbox": [ + 173, + 191, + 823, + 266 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_model.json b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_model.json new file mode 100644 index 0000000000000000000000000000000000000000..80c7574202c6409dbbd5d2b7aee87a248cc2be0e --- /dev/null +++ b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_model.json @@ -0,0 +1,2766 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.173 + ], + "angle": 0, + "content": "WINNING BOTH THE ACCURACY OF FLOATING POINT ACTIVATION AND THE SIMPLICITY OF INTEGER ARITHMETIC" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.195, + 0.767, + 0.224 + ], + "angle": 0, + "content": "Yulhwa \\(\\mathbf{K}\\mathbf{m}^{1}\\), Jaeyong \\(\\mathbf{J}\\mathbf{a}\\mathbf{g}^{1}\\), Jehun \\(\\mathbf{L}\\mathbf{e}^{1}\\), Jihoon \\(\\mathbf{P}\\mathbf{k}^{1}\\), Jeonghoon \\(\\mathbf{K}\\mathbf{m}^{2}\\), Byeongwook \\(\\mathbf{K}\\mathbf{m}^{2}\\), Baeseong \\(\\mathbf{p}\\mathbf{k}^{2}\\), Se Jung Kwon\\(^{2}\\), Dongsoo Lee\\(^{2}\\), Jae-Joon \\(\\mathbf{K}\\mathbf{m}^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.225, + 0.476, + 0.239 + ], + "angle": 0, + "content": "\\(^{1}\\)Seoul National University, \\(^{2}\\)NAVER Cloud" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.24, + 0.825, + 0.281 + ], + "angle": 0, + "content": "{yulhwakim, jaeyongjang, jehun.lee, jihoonpark, kimjaejoon}@snu.ac.kr, {jeonghoon.samuel, byeonguk.kim, baeseong.park, sejung.kwon, dongsoo.lee}@navercorp.com" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.318, + 0.547, + 0.332 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.35, + 0.768, + 0.614 + ], + "angle": 0, + "content": "Even though floating point (FP) numbers have been adopted as a de facto standard data format for deep learning computing, the complexity of FP arithmetic impedes a broader deployment of Deep Neural Networks (DNNs). Recent works such as quantization have attempted to replace the FP matrix multiplication (MatMul) of DNNs with simple integer MatMul by transforming the datatypes of both weights and activations into integers. Unfortunately, unlike weight values that are static, it is challenging to represent dynamic activations with integers. In this paper, to simultaneously achieve the accuracy of FP activation and the simplicity of integer arithmetic, we present a method for replacing FP arithmetic with integer one without changing FP activations in the storage format while weights are quantized. The proposed method pre-aligns the significands of FP activations just ahead of the MatMul on-the-fly so that the aligned significands (integers) can be used for the computation. Inspired by an observation that conventional FP arithmetic does not produce precise results due to rounding, we demonstrate that our proposed integer arithmetic-based scheme can produce the same level of errors as that of the FP arithmetic in case DNNs use FP activations and quantized weights. Experimental results show that the hardware based on the proposed scheme shows significant improvement over FP arithmetic-based designs in terms of energy efficiency and throughput-per-area while maintaining a similar level of accuracy." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.643, + 0.338, + 0.657 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.675, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Deep Neural Networks (DNNs) usually use Floating-Point (FP) number systems to represent a wide range of weight and activation values. Such a comprehensive representation, however, demands high computational complexity and cost for FP matrix multiplication (MatMul) (Sze et al., 2017). On the other hand, integer (a.k.a fixed-point) arithmetic logic is much simpler while consuming less energy compared to FP counterpart (Jouppi et al., 2021). As such, the computational efficiency of DNNs can be enhanced by replacing FP arithmetic with integer one. Accordingly, quantization has been actively studied as a promising technique to support DNN computations with integer arithmetic, as it maps the input values of a (virtually) continuous domain (FP numbers) to the output values of a discrete set (integers) (Jacob et al., 2018). Note that even though several studies have successfully quantized weights and activations of some target DNNs with low-precision integer values (Li et al., 2021; Wu et al., 2022), quantization is still challenging for numerous DNNs. In particular, activation values are known to be more difficult to be quantized than the weight parameters because activations are dynamically generated during inference while the distribution of weights is static. The uncertainty of the distribution of dynamic activation values limits the ability to estimate proper quantization range (Choi et al., 2018). Such issues on activation quantization become even more serious when DNNs involve highly non-linear activation functions (e.g., GeLU) or modules that increase the variance of the activations (e.g., softmax and normalization layers) (Jeon et al., 2020). As a result, while the weight parameters can be successfully quantized even for generative mod" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.185, + 0.106, + 0.819, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.194, + 0.825, + 0.222 + ], + "angle": 0, + "content": "Figure 1: An example of FP summation with (a) conventional FP computation and (b) proposed method. The precise summation is described in the box on top." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.233, + 0.827, + 0.346 + ], + "angle": 0, + "content": "els (Xu et al., 2018; Bai et al., 2019; Jeon et al., 2022; Park et al., 2022; Kwon et al., 2022; Frantar et al., 2022) and extra-large models such as GPT-NeoX-20B (Chung et al., 2020; Yao et al., 2022), activation quantization usually relies on intensive quantization-aware training or sophisticated investigation algorithms such as dynamic min/max searching (Tao et al., 2022). Note that activation quantization is mandatory if integer arithmetic logic is involved for MatMul operations. Thus, to avoid such significant efforts to quantize complex DNNs (mainly due to activation quantization), recent neural processing units tend to employ FP arithmetic units even for inference process at the cost of increased energy and area (Jouppi et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.353, + 0.828, + 0.576 + ], + "angle": 0, + "content": "To address the challenges discussed above, we propose a scheme that can achieve both the accuracy of FP activations and the simplicity of integer arithmetic. Our motivation stems from an observation that most multiplications can be removed once weights are quantized to be binary-coded (Jeon et al., 2020). Then, consecutive FP additions are mainly required to perform MatMul, and hence, we find conventional FP units can be much simplified. To be more specific, when processing the MatMul of DNNs, our proposed method first pre-aligns the significands of FP activations to be added. Correspondingly, FP activations can be reformatted into integer values and FP arithmetic units (FPUs) can be replaced with integer units during MatMul operations. A naive pre-alignment for accurate computation requires very high-resolution integer units for the computation, which negates the benefits of using integer units. Inspired by an observation that conventional FP arithmetic does not guarantee the exact results due to rounding errors (Wilkinson, 1994), we show that the same level of computational error can be obtained even when the pre-aligned significands are aggressively truncated. We then implement an integer-based FP arithmetic unit (iFPU) hardware for MatMul computation based on the proposed scheme. A comprehensive evaluation of the iFPU on various DNNs shows that the iFPU significantly improves energy efficiency and throughput-per-area over the conventional FPU-based MatMul engine while maintaining the neural network accuracy." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.602, + 0.329, + 0.617 + ], + "angle": 0, + "content": "2 BACKGROUND" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.638, + 0.603, + 0.652 + ], + "angle": 0, + "content": "2.1 FLOATING-POINT ARITHMETIC AND ROUNDING ERROR" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.666, + 0.825, + 0.766 + ], + "angle": 0, + "content": "FP format represents a number as \\((-1)^{s} \\times (m) \\times 2^{(e - bias)}\\) which consists of sign \\((s)\\), exponent \\((e)\\), and significand (or mantissa, \\(m\\)) (Muller et al., 2018). Float32 assigns 1 bit for \\(s\\) and 8 bits for \\(e\\). Precision \\((p)\\), the effective bit count of the significand, is 24 bits (among which 23 bits are explicitly stored). Bfloat16, which has been gaining popularity in the field of deep learning, intensely cuts down stored significand bits to 7 (compared to 23 in float32) to lower the total number of bits per value, and thereby reduces memory footprint (Wang & Kanwar, 2019). The bias of the exponent term is usually set to half of the exponent maximum." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.827, + 0.925 + ], + "angle": 0, + "content": "FP format can cover a wide range of numbers by separating the significant digits and the scale of the number. Note that because of the precision limits, there is a gap between two consecutive FP numbers. Such a gap is called a unit of least precision (ulp) whose value is represented by the least significant digit. Hence, it is hard to represent real numbers precisely with FP format even if the numbers are in the dynamic range of the FP format, and rounding is required for converting real numbers into FP numbers. FP arithmetic typically normalizes significands for each computation, and the rounding operation is followed by the normalization to convert the computation result into an FP number. Round-to-nearest is the most frequently chosen as a rounding mode where the difference between the real value and the round-off value can be as large as half ofulp, and its relative error is bounded by \\(\\epsilon = \\frac{1}{2}ulp = 2^{-p}\\), which is referred to as machine epsilon. Bothulp and \\(\\epsilon\\) are widely used to evaluate the accuracy of numeric calculations (Goldberg, 1991)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.286 + ], + "angle": 0, + "content": "As every FP operation includes the rounding stage, rounding error is unavoidable in FP arithmetic. Although the error of a single FP arithmetic operation may be small enough to be ignored, the error can be substantial if a series of multiple FP arithmetic results are accumulated. For example, an inner product of MatMul involves multiple FP additions in a row and the FP summation piles up the rounding error of each FP adder (Figure 1(a)). Accordingly, numerous solutions have been introduced to compensate for the error of the FP summation (Muller et al., 2018). Such error compensations cause an additional computation burden for tracking and fixing the error. Since the effect of the rounding errors on DNN accuracy is negligible, popular deep learning frameworks such as PyTorch and CuDNN (Paszke et al., 2019; Chetlur et al., 2014) allow the rounding errors (without the compensation algorithms) in favor of simple computation. Note that as the level of rounding error depends on the precision \\( p \\) (only 8 bits for bffloat16), the error becomes noticeable for bffloat16. Therefore, summation of bffloat16 values uses float32 adders (instead of bffloat16 adders) to preserve the accuracy of accumulated results (Wang & Kanwar, 2019; Intel, 2018; Henry et al., 2019)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.301, + 0.342, + 0.316 + ], + "angle": 0, + "content": "2.2 RELATED WORKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.327, + 0.828, + 0.537 + ], + "angle": 0, + "content": "Block Floating Point (BFP) has been proposed as a compromise between FP and integer formats. It assigns a single shared exponent to a group of FP values while maintaining individual significands (Wilkinson, 1994). The BFP has drawn attention as a flexible low-bit numeric format for quantization because the shared exponent can represent the dynamic range of values with little overhead. Hence, BFP can achieve a higher compression ratio than integer formats (Zhang et al., 2022a). In addition, since the individual significand values are integer, the BFP formats enable simpler computation than FP formats (Koster et al., 2017). Note that a critical limitation in previous works based on BFP formats is that the same level of accuracy as that of conventional FP computations cannot be guaranteed (even theoretically). Previous works tend to find the optimal BFP formats with the least memory/computation density by evaluating DNN accuracy for various bit resolution and group sizes (Song et al., 2018; Lian et al., 2019; Rouhani et al., 2020). Another drawback in some previous works on BFP is that DNNs with BFP format need to be fine-tuned usually by quantization-aware training to improve the accuracy (Zhang et al., 2022a; Rouhani et al., 2020). Since a quantized neural network allows only one fixed block size that is optimized for target hardware during training, a neural network needs to be retrained for different hardware choices if a block size differs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.543, + 0.827, + 0.697 + ], + "angle": 0, + "content": "Truncated binary multipliers with error compensation schemes have been proposed to reduce the number of outputs in integer multiplications (Petra et al., 2009). While both the truncated multipliers and our proposed work use the truncations to improve computational efficiency, there are critical differences between them. In the truncated binary integer multipliers, the amount of the truncated bits is fixed while it varies in FP additions cases which our work focuses on. In addition, (Petra et al., 2009) presents a truncation error correction function utilizing the fact that some of the truncated partial products share the same inputs with the remaining partial products, so they have correlations with the remaining partial product values. Unfortunately, in FP addition cases, the truncated significands do not have any correlation with the remaining bits so it is hard to devise similar error correction schemes. Hence, there is a strong need to develop alternative ways to control the truncation errors in FP operations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.716, + 0.807, + 0.731 + ], + "angle": 0, + "content": "3 RECONSTRUCTION OF FP-BASED MATMUL WITH INTEGER ARITHMETIC" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.746, + 0.774, + 0.761 + ], + "angle": 0, + "content": "3.1 OVERVIEW OF THE PROPOSED MATMUL RECONSTRUCTION AND COMPUTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.825, + 0.884 + ], + "angle": 0, + "content": "In this section, we propose a methodology to reconstruct FP MatMul with integer arithmetic for efficient DNN computation, focusing on FP activations and quantized weights. In most cases, the weight matrix with \\( m \\)-bit quantization can be expressed as a binary-coded matrix: \\( \\sum_{b=1}^{m} \\alpha_{b} \\cdot B_{b} \\) where \\( \\alpha_{b} \\) is a scaling factor and \\( B_{b} \\) is a binary weight matrix of each bitplane. Here, \\( \\alpha_{b} \\) can be a power of 2 for uniform quantization or can be an FP value for non-uniform quantization. MatMul is composed of multiple dot products, and a dot product between activations and weights is defined as \\( \\sum_{k=1}^{n} (a_{k} \\times w_{k}) \\) (\\( a \\): activation, \\( w \\): weight, \\( n \\): fan-in of the layer). If we apply binary-coded weights and properly change the order of the operations, we can rewrite the dot product as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.889, + 0.826, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\sum_ {b = 1} ^ {m} \\alpha_ {b} \\sum_ {k = 1} ^ {n} \\left(a _ {k} \\times B _ {b, k}\\right), B _ {b, k} \\in [ - 1, + 1 ] \\tag {1}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.193, + 0.1, + 0.805, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.185, + 0.197, + 0.812, + 0.212 + ], + "angle": 0, + "content": "Figure 2: Overview of the proposed MatMul computing scheme for DNNs with FP activations." + }, + { + "type": "image", + "bbox": [ + 0.185, + 0.214, + 0.812, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.314, + 0.825, + 0.37 + ], + "angle": 0, + "content": "Figure 3: Comparison of a previous approach (e.g., MSFP (Rouhani et al., 2020)) and the proposed approach for applying block floating point (BFP) to DNN computation. In the case of MSFP, the original network needs to be retrained for the MatMul engines with different block sizes, but in the proposed scheme, the original network can be fed into the engines with any block sizes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.374, + 0.827, + 0.709 + ], + "angle": 0, + "content": "For each bitplane, MatMul of weights and activations is reconfigured as the addition/subtraction of activation values except for a few \\(\\alpha_{b}\\) multiplications that are necessary to merge the outputs from each bitplane. Because FP multiply-accumulate operations require more hardware resources than FP additions, even such a reconfiguration of matrix multiplication to remove most multiplications can improve the efficiency of DNN computations significantly (Jeon et al., 2020). Even so, because FP additions are still computationally more expensive than integer additions, replacing FP additions with integer additions can save even more energy and area. Therefore, we propose to reconstruct FP-based MatMul (Eq. 1) using integer additions (Figure 2). One of the key components of the proposed method is the pre-alignment, which reforms the FP activation values into integer values on-the-fly by sharing the exponent value among the activations that are fed to a dot product of the MatMul at a time. The pre-alignment finds the maximum of the exponents among the activations and aligns corresponding significands simultaneously based on the difference of each exponent and the maximum exponent. As a result, unlike conventional FP arithmetic that performs the alignment for each addition, our proposed computing methodology aligns the activation values once per MatMul, and thus, reduces the overall cost of the alignment process significantly. Note that as opposed to previous works that share the exponent among a block of inputs in the storage format (e.g., MSFP (Rouhani et al., 2020)), our design performs the exponent sharing during the computation. Since different exponents are allowed in the storage format in our scheme, we keep the representation power of the conventional FP format (Figure 3). Because pre-aligned activations can be represented by the aligned significands which are integer values, an FP addition of the MatMul can be replaced by an integer addition. After the whole summation process, the proposed method reforms the summation results back to FP values by normalizing the results with the maximum exponent found in the pre-alignment stage. Then, the computation results from each weight bitplane are multiplied by \\(\\alpha_{b}\\) and merged to finish the target MatMul operation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.713, + 0.827, + 0.853 + ], + "angle": 0, + "content": "As the exponent of float32 (or bfloat16) is 8-bit, the maximum amount of the significand shifting is 255 and the resolution of the aligned activation becomes 279 (or 263) bits. Note that such a large bit width might negate the benefits of using integer units. For example, while 32-bit integer addition consumes \\(10.3\\%\\) energy of float32 addition, 279-bit integer requires a level of energy per addition comparable to that of float32 addition (Appendix B.1). To avoid the large design overhead, we propose to use only the top \\(t(= p + \\delta)\\) bits of the aligned activation when \\(\\delta\\) indicates the number of extra significand bits for reducing truncation error. Since the conventional FP addition also experiences errors due to truncation of significand, relatively small extra \\(\\delta\\) bits for the proposed method can derive a level of errors similar to that of conventional FP addition (as described in Figure 1)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.616, + 0.884 + ], + "angle": 0, + "content": "3.2 COMPUTATION ERROR AFTER SIGNIFICAND TRUNCATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "To study the characteristics of errors in the proposed method with truncated significands, we first analyze the computation error with a single addition/subtraction between two FP values \\( x \\) and \\( y \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.189 + ], + "angle": 0, + "content": "We assume \\( x > y \\geq 0 \\), \\( x = x_0.x_1 \\cdots x_{p-1} \\), and \\( y = y_0.y_1 \\cdots y_{p-1} \\times 2^{-k} (k \\geq 0) \\) without loss of generality, because only the difference between the exponents decides the amount of shifting and truncation. Here, \\( x_i \\) and \\( y_i \\) denote the binary value of \\( i \\)-th significand bit, and the leading bit \\( x_0 \\) is 1 for \\( x \\) when \\( x > 0 \\). When either \\( k \\) or \\( y \\) is 0, there is no need for significand shifting and truncation, and hence, integer-based FP arithmetic can guarantee the precise computation without any extra bit (i.e., \\( \\delta = 0 \\))." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.825, + 0.269 + ], + "angle": 0, + "content": "When \\( k > 0 \\), we need to shift and truncate the significand of \\( y \\) for the computation. For the alignment, \\( y \\) should be shifted to right by \\( k \\), so \\( y \\) can be rewritten as \\( y = 0.0\\cdots 0y_k' y_{k+1}'\\cdots y_{k+p-1}' \\) where \\( y_{k+i}' \\) is equal to \\( y_i \\). As only the top \\( t(= p + \\delta) \\) bits of the significand remain after the truncation, the truncated result becomes \\( \\bar{y} = 0.0\\cdots y_k' \\cdots y_{t-1} \\). When \\( \\delta \\geq k \\), the difference between \\( y \\) and \\( \\bar{y} \\) is 0. Otherwise, the difference between \\( y \\) and \\( \\bar{y} \\) is bounded as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.287, + 0.825, + 0.307 + ], + "angle": 0, + "content": "\\[\n\\left| y - \\bar {y} \\right| = 0. 0 \\dots 0 y _ {t} ^ {\\prime} \\dots y _ {k + p - 1} ^ {\\prime} \\leq 2 ^ {- (p + \\delta - 1)} \\left(1 - 2 ^ {- (k - \\delta)}\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.316, + 0.727, + 0.331 + ], + "angle": 0, + "content": "The relative error of the addition with the truncated significand is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.338, + 0.825, + 0.372 + ], + "angle": 0, + "content": "\\[\ne _ {a d d} = \\frac {\\left| (x + y) - (x + \\bar {y}) \\right|}{| x + y |} = \\frac {| y - \\bar {y} |}{| x + y |}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.379, + 0.633, + 0.394 + ], + "angle": 0, + "content": "By applying both \\( |x + y| \\geq |x| \\geq 1 \\) and Eq. 2 to Eq. 3, we can obtain" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.401, + 0.825, + 0.42 + ], + "angle": 0, + "content": "\\[\ne _ {a d d} \\leq 2 ^ {- (p + \\delta - 1)} \\left(1 - 2 ^ {- (k - \\delta)}\\right) \\leq 2 ^ {- (p + \\delta - 1)}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.426, + 0.821, + 0.463 + ], + "angle": 0, + "content": "Because the machine epsilon is given as \\(\\epsilon = 2^{-p}\\), \\(e_{add} \\leq \\epsilon\\) when \\(\\delta\\) is 1 and \\(e_{add} \\leq \\frac{1}{2}\\epsilon\\) when \\(\\delta\\) is 2. For subtraction, the relative error is defined similarly as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.364, + 0.47, + 0.825, + 0.504 + ], + "angle": 0, + "content": "\\[\ne _ {s u b} = \\frac {\\left| (x - y) - (x - \\bar {y}) \\right|}{\\left| x - y \\right|} = \\frac {\\left| y - \\bar {y} \\right|}{\\left| x - y \\right|}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.51, + 0.825, + 0.54 + ], + "angle": 0, + "content": "When \\(\\delta \\geq k\\), \\(|y - \\bar{y}|\\) is 0 so that \\(e_{sub}\\) is 0. The minimum of \\(x\\) is 1, and \\(y\\) has the maximum value when all \\(y_{k + i}'\\)s are 1. Correspondingly, \\(|x - y|\\) is bounded as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.55, + 0.825, + 0.584 + ], + "angle": 0, + "content": "\\[\n\\left| x - y \\right| \\geq \\left\\{ \\begin{array}{l l} 1 - 0. 1 1 \\dots 1 = 2 ^ {- p}, & \\text {f o r} k = 1 \\\\ 1 - 0. 0 \\dots 0 1 \\dots 1 \\geq 2 ^ {- 1} + 2 ^ {- 2} + \\dots + 2 ^ {- (k - 1)}, & \\text {f o r} k \\geq 2 \\end{array} \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.593, + 0.825, + 0.663 + ], + "angle": 0, + "content": "When \\( k \\) is 1 and \\( \\delta \\) is 0, we get \\( |y - \\bar{y}| \\leq 2^{-p} \\) from Eq. 2. For such a case, according to Eq. 6 and Eq. 5, we have \\( e_{sub} \\leq 1 \\). The worst case happens when \\( x = 1 \\) and \\( y = 0.111 \\cdots 1 \\). When \\( k \\geq 2 \\), by applying Eq. 2 and Eq. 6 to Eq. 5, we get \\( e_{sub} \\leq \\epsilon \\) for \\( \\delta = 1 \\), and \\( e_{sub} \\leq 1/2\\epsilon \\) for \\( \\delta = 2 \\). As a result, regardless of FP formats, the proposed method has the error level as summarized in the following Remark 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.676, + 0.825, + 0.706 + ], + "angle": 0, + "content": "Remark 1 The integer-based FP addition/subtraction has the same level of error as that of the conventional FP addition/subtraction with 1 extra bit, and the error becomes half with 2 extra bits." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.718, + 0.827, + 0.747 + ], + "angle": 0, + "content": "Note that the error of FP summation is the same as the accumulated value of errors from each addition (Muller et al., 2018). The reconstructed MatMul, however, induces an additional stage of" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.753, + 0.498, + 0.874 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.753, + 0.828, + 0.874 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.878, + 0.825, + 0.922 + ], + "angle": 0, + "content": "Figure 4: (a) Average and (b) maximum FP summation errors of conventional FP computation and the proposed method with extra bits \\((\\delta = 0,1,2)\\) against the accurate FP summations with Schewchuk algorithm (Shewchuk, 1997)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.203 + ], + "angle": 0, + "content": "converting integer summation results to FP values, and thus, additional rounding error during the FP formatting (Figure 5(a)). For example, to sum 128 FP values, a conventional FP-based MatMul has 127 error sources with bound \\(\\epsilon\\) while the reconstructed MatMul with 1 extra bit has 128 error sources with bound \\(\\epsilon\\) such that the reconstructed MatMul might experience a slightly larger error than conventional FP-based MatMul. Therefore, to guarantee the same error level as that of the conventional FP arithmetic, 2 extra bits are used for pre-alignment. Then, reconstructed MatMul has 127 error sources with bound \\(0.5\\epsilon\\) and an additional error source with bound \\(\\epsilon\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.209, + 0.825, + 0.307 + ], + "angle": 0, + "content": "To verify the computation error of the proposed method, we randomly sample float32 values and compare the computation error of FP summation between conventional FP computation and the proposed method. To explore a wide range of float32 values, we sample \\( s \\), \\( e \\), and \\( m \\) values independently assuming a uniform distribution, and then concatenate those values. We vary the fan-in (i.e., the number of values to be accumulated) from 128 to 8192, and sample 50,000 sets of FP numbers for each fan-in selection. The Schewchuk algorithm is employed to obtain accurate FP summation baseline data for error measurement (Shewchuk, 1997)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.313, + 0.825, + 0.425 + ], + "angle": 0, + "content": "As shown in Figure 4, the proposed method produces a similar level of errors to that of the conventional FP arithmetic for various fan-in values when \\(\\delta = 2\\). Because larger errors are more likely to be accumulated with larger fan-in, we see that both average and maximum errors tend to grow as the fan-in increases (Figure 4). Nonetheless, the average error \\((12.3 \\times 10^{-7})\\) and the maximum error \\((2.4 \\times 10^{-2}\\) or \\(2.4\\%)\\) are relatively small even with 8192 fan-in, which justifies the current practice of implementing conventional FP additions without error correction for DNN inference. Correspondingly, the proposed method can support as precise numerical computation as conventional FP arithmetic does." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.445, + 0.319, + 0.459 + ], + "angle": 0, + "content": "4 EXPERIMENT" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.475, + 0.609, + 0.489 + ], + "angle": 0, + "content": "4.1 IFPU: A MATMUL ENGINE FOR THE PROPOSED METHOD" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.501, + 0.519, + 0.681 + ], + "angle": 0, + "content": "Overall Architecture. To evaluate the proposed method with real hardware implementation, we first design a MatMul engine called iFPU. Figure 5 shows the overview of systolic iFPU architecture which adopts the design principle of Google's TPU (Jouppi et al., 2017). iFPU performs FP MatMul in the form of a set of FP summation (Eq. 1) that is physically implemented as integer summation for high efficiency. After the computation, the iFPU converts integer results into FP values through the int2fp converter at the end of the Processing Element (PE) arrays. Then, scale & accumulator is used to multiply \\(\\alpha_{b}\\) and add summation results of each weight bit" + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.506, + 0.825, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.558, + 0.658, + 0.794, + 0.673 + ], + "angle": 0, + "content": "Figure 5: A block diagram of iFPU" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.681, + 0.825, + 0.766 + ], + "angle": 0, + "content": "plane to finish the MatMul (Eq. 1). The size of MatMul that can be processed in the iFPU at a time is bounded by the number of PEs, and as a practical design, we evaluate the iFPU with \\(32 \\times 32\\), \\(64 \\times 64\\), or \\(128 \\times 128\\) PEs for the experiment. When fan-in of the DNN layer exceeds the row count of PEs, activations of the layer are tiled to fit the row-count limit, and each tile is fed into the iFPU at a time and processed with integer adders in the PEs. To complete the entire MatMul, the computing results for different tiles should be merged, and for this, float32 adders (accumulator) are used again." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.772, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Precision of Integer Adder. As the PE array of the iFPU accumulates the pre-aligned and truncated significands, the size of the integer adder in each PE depends on \\( t \\), which is determined by the precision of the given FP format (\\( p \\)) and extra bits (\\( \\delta \\)) attached to control truncation error. Based on the theoretical analysis given in Section 3.2, the iFPU for float32 activations conducts 26-bit integer addition with \\( \\delta = 2 \\). Though the iFPU introduces additional FP accumulations due to the MatMul tiling, the error level of integer-based FP addition with \\( \\delta = 2 \\) is half of the conventional FP addition according to Remark 1. Therefore, the iFPU with \\( \\delta = 2 \\) can still preserve the same level of computing error as that of conventional FP MatMul (Figure 6(a)). Furthermore, the iFPU for bfloat16 activations can be designed to be even smaller and more energy efficient by using smaller precision integer adders thanks to the reduced bit precision for significands. Interestingly, conventional bfloat16 accumulation still uses float32 adders to preserve the accuracy of accumulated" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.104, + 0.812, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.388, + 0.197, + 0.61, + 0.21 + ], + "angle": 0, + "content": "(a) Evaluation with float32 activation" + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.213, + 0.808, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.385, + 0.305, + 0.614, + 0.318 + ], + "angle": 0, + "content": "(b) Evaluation with bfloat16 activation" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.323, + 0.825, + 0.38 + ], + "angle": 0, + "content": "Figure 6: Numerical computation errors of MatMul for DNNs with FP activation. We measure the computation error of conventional FPU-based engine and the proposed iFPU against the accurate FP computation with Schewchuk algorithm (Shewchuk, 1997). The number of PEs and fan-in are annotated along the horizontal axis." + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.387, + 0.404, + 0.464 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.409, + 0.387, + 0.603, + 0.464 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.388, + 0.804, + 0.464 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.389, + 0.467, + 0.61, + 0.48 + ], + "angle": 0, + "content": "(a) Evaluation with float32 activation" + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.484, + 0.404, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.485, + 0.603, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.485, + 0.804, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.385, + 0.564, + 0.614, + 0.577 + ], + "angle": 0, + "content": "(b) Evaluation with bfloat16 activation" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.582, + 0.825, + 0.639 + ], + "angle": 0, + "content": "Figure 7: Cosine distance between MatMul results of BERT-base (task: MRPC) extracted from inference results using conventional FPU-based engine (NVIDIA RTX3090) and the proposed iFPU. The last feed-forward layer in each encoder block (1-12th layers) and pooler (13th layer) is used for the evaluation. The number of PEs and layer indices are annotated along the horizontal axis." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.655, + 0.825, + 0.725 + ], + "angle": 0, + "content": "results (Wang & Kanwar, 2019; Intel, 2018; Henry et al., 2019). However, as the accumulated results are converted back to bfloat16, it is possible to maintain the accuracy of bfloat16 accumulation with less accurate adders than float32 adders. Figure 6(b) shows that the proposed bfloat16 iFPU with \\(\\delta = 3\\) (which uses 11 bit adders) provides comparable accuracy to that of conventional bfloat16 adders." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.745, + 0.576, + 0.759 + ], + "angle": 0, + "content": "4.2 ANALYSIS OF THE DNN COMPUTATION ACCURACY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.827, + 0.926 + ], + "angle": 0, + "content": "MatMuls of DNN with iFPU vs FPU. In the previous section, we compared the accuracy of the proposed integer-based FP MatMul with precise results. Since our goal is to replace the FPU with the proposed iFPU, it is also important to compare the computational difference between the conventional error-prone FPU-based engine and the iFPU. For an in-depth understanding of DNN inference with the iFPU, we first compare the inference output of each layer in the BERT-base model (Devlin et al., 2018) computed with an FPU-based engine (NVIDIA RTX3090) and the proposed iFPU. BERT-base uses 4-bit weight values and the target task is MRPC. In iFPU, MatMuls between weights and activations are processed with the proposed integer-based approach, but other operations such as softmax are processed by using conventional FPU. We employ cosine distance as the metric to measure the difference in layer outputs. Note that the cosine distance is 0 for two identical vectors and 2 for entirely opposite vectors. In this experiment, the last feed-forward layer in each" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.091, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Table 1: Accuracy of DNNs inference with conventional FPU-based engine (NVIDIA RTX3090) and proposed iFPUs(-#rows/columns of PE arrays). The numbers in parentheses represent accuracy difference between FPU & iFPU." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.135, + 0.825, + 0.322 + ], + "angle": 0, + "content": "
float32 activationbfloat16 activation
VGG-9ResNet-18OPT-1.3BVGG-9ResNet-18OPT-1.3B
FPU92.9170.2712.9692.9170.2812.96
iFPU-3292.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.91 (+0.00)70.26 (-0.02)12.96 (+0.00)
iFPU-6492.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.90 (-0.01)70.27 (-0.01)12.97 (+0.01)
iFPU-12892.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.92 (+0.01)70.26 (-0.02)12.98 (+0.02)
ResNet-50RegNetMnasNetResNet-50RegNetMnasNet
FPU76.3278.1875.9976.3378.1775.96
iFPU-3276.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.38 (+0.05)78.18 (+0.01)75.97 (+0.01)
iFPU-6476.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.38 (+0.05)78.18 (+0.01)75.96 (+0.00)
iFPU-12876.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.40 (+0.07)78.18 (+0.01)75.97 (+0.01)
" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.323, + 0.827, + 0.425 + ], + "angle": 0, + "content": "
BERT-Base w/ float32 activationAvg.
CoLAMRPCSST-2STS-BQQPMNLI-m/mmQNLIRTE
FPU56.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28
iFPU-3256.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
iFPU-6456.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
iFPU-12856.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.427, + 0.827, + 0.528 + ], + "angle": 0, + "content": "
BERT--BASE w/ bfloat16 activationAvg.
CoLAMRPCSST-2STS-BQQPMNLI-m/mmQNLIRTE
FPU56.0889.0591.5187.5283.7481.97/82.5789.0070.0481.30
iFPU-3256.1089.0591.5187.5283.7281.94/82.5589.0570.0481.28 (-0.02)
iFPU-6456.3689.0591.6387.5283.7281.93/82.5689.0070.0481.31 (+0.01)
iFPU-12856.1088.8391.6387.5283.7281.96/82.5489.0570.0481.27 (-0.03)
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.538, + 0.825, + 0.621 + ], + "angle": 0, + "content": "encoder block and pooler is chosen for evaluation. Figure 7 shows that the FPU and the iFPU produce almost identical outputs for each layer. The averages of the distance are less than \\(1.2 \\times 10^{-6}\\) and \\(2.5 \\times 10^{-4}\\) for float32 and bfloat16 activations, respectively. Moreover, the distance between layer outputs from the two engines remains close throughout the forward path. As a result, we can expect that the proposed iFPU can support DNN inference with almost the same accuracy as that of conventional FPU." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.628, + 0.827, + 0.837 + ], + "angle": 0, + "content": "DNN Inference Accuracy. We select 7 types of DNN models to compare DNN model accuracy between the FPU and iFPU: BERT-base, VGG-9, ResNet-18, ResNet-50, RegNet-3.2GF, MnasNet2.0, and OPT-1.3B. The accuracy of BERT-base is evaluated on the General Language Understanding Evaluation (GLUE) benchmark (Wang et al., 2019). VGG-9 (Simonyan & Zisserman, 2014) is evaluated on CIFAR-10 (Krizhevsky et al., 2009). ResNet-18, ResNet-50 (He et al., 2016), RegNet3.2GF (Radosavovic et al., 2020), and MnasNet-2.0 (Tan et al., 2019) measure top-1 accuracy on ImageNet (Russakovsky et al., 2015). OPT-1.3B (Zhang et al., 2022b) is an open-sourced NLP model provided by Meta AI roughly matching the performance and sizes of the GPT-3 class of models and is evaluated by estimating the perplexity on WikiText-2 dataset (Merit et al., 2016). All DNN models use 4-bit weight values that are quantized by a binary-coding quantization scheme. Note that no modifications to DNN structures are needed to deploy the weight-quantized DNNs to various iFPUs because 1) activations are FP values and 2) iFPUs are designed to process any MatMul for DNNs as long as weights are quantized. Table 1 summarizes the DNN inference results. Because the iFPU can produce almost identical MatMul results as FPU, the proposed iFPUs preserve the DNN accuracy for both float32 and bfloat16 activations as we expected." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.856, + 0.504, + 0.87 + ], + "angle": 0, + "content": "4.3 ANALYSIS OF COMPUTATION EFFICIENCY" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Setup. To evaluate the efficiency of proposed iFPUs, we synthesize the proposed hardware in a \\(28\\mathrm{nm}\\) CMOS technology. For a fair evaluation of the impact of replacing FP MatMul with integer-based MatMul, we also design two 'baseline' engines for the conventional FP-based MatMul (Fig-" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.218 + ], + "angle": 0, + "content": "ure 8). As the first baseline (FP-MAC), Figure 8(a) is designed with FP MAC units to process FP MatMul as a naive approach. In addition, as the second baseline (FP-ADD), Figure 8(b) is designed with FP adders to process FP MatMul reconfigured as Eq. 1. Because bitplanes of weight values are decomposed for FP-ADD and iFPU, binary weights are processed in a bit-parallel manner in FP-ADD and iFPU, while FP-MAC processes the whole weight values in each MAC unit. Compared to those two baseline engines, iFPU exhibits the lighter PEs along with additional units such as the pre-alignment unit and int2fp converter. Lastly, an int8 MatMul engine (INT8) is also implemented for the comparison between the proposed iFPU MatMul and integer MatMul." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.223, + 0.551, + 0.348 + ], + "angle": 0, + "content": "Results. Simulation results using the synthesized hardware demonstrate that the proposed iFPUs can improve both energy and area compared to the baselines, as the FP units of the baseline engines are replaced with the more area/energy efficient integer units (Figure 9). For float32 activations, the proposed iFPU improves throughput-per-area \\((\\mathrm{TOPS} / \\mathrm{mm}^2)\\) by up to \\(7.9\\times\\) and energy efficiency (TOPS/W) by up to \\(6.4\\times\\) compared to the FP-MAC baseline. For bfloat16 activations, the proposed iFPU achieves" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.226, + 0.691, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.227, + 0.824, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.561, + 0.317, + 0.825, + 0.345 + ], + "angle": 0, + "content": "Figure 8: Baseline MatMul engines (a) FP-MAC and (b) FP-ADD" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.348, + 0.827, + 0.488 + ], + "angle": 0, + "content": "even larger improvements because the size of the corresponding integer-based unit is reduced as the bit resolution of the aligned-truncated significands is reduced by 15 bits compared to float32 activation cases. The throughput-per-area of the iFPU is improved by up to \\(9.9\\times\\) and energy efficiency is enhanced by up to \\(11.9\\times\\) compared to the FP-MAC baseline. The improvement over the baseline becomes larger as the number of PEs increases because the overhead of additional logic such as pre-alignment units in the proposed scheme can be amortized (detailed in Appendix C.2). We also compare the iFPUs with the INT8 engine. While bffloat16 activations close the gap between the FP-MAC baseline and the INT8 engine significantly in terms of throughput-per-area, iFPU (with bffloat16 activations) achieves even higher energy efficiency than the INT8 engine in some cases (Figure 9)." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.491, + 0.501, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.491, + 0.825, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.615, + 0.825, + 0.658 + ], + "angle": 0, + "content": "Figure 9: Normalized energy efficiency (TOPS/W) (left) and throughput-per-area (TOPS/mm²) (right) of MatMul Engines: baselines and iFPUs for FP MatMul; INT8 for int8 MatMul. The number of PEs and target activation types are annotated along the horizontal axis." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.671, + 0.321, + 0.687 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.703, + 0.827, + 0.926 + ], + "angle": 0, + "content": "The need to accomplish computing MatMul by using FP activations and quantized weights is increasing due to the growing usage of complex non-linear activation functions in DNN models such as Transformers. Conventional computing platforms such as CPU, GPU, and NPU, however, are inefficient in performing such computations. In this paper, we propose a new MatMul computing scheme dedicated to DNNs with FP activations and binary-coding weight quantization. The proposed method accelerates the FP MatMul of DNNs using the shared exponent and the integer arithmetic to improve computational efficiency. Previous works which also used the block floating point number with shared exponent often claim the validity of their design by presenting comparable DNN accuracy without verifying the robustness of MatMul results in a rigorous manner. We theoretically prove that the proposed scheme can produce the same error level as that of conventional FP arithmetic. To evaluate the computational efficiency of the proposed method, we design and synthesize a MatMul engine, iFPU, following the principle of integer-based operations. Experimental results support our claim that, compared to the conventional FPU-based design, the iFPUs accelerate the weight-only quantized DNNs with \\(6.4 \\times\\) and \\(7.9 \\times\\) higher energy efficiency and throughput-per-area for float32 activations, respectively. In addition, the iFPUs yield \\(11.9 \\times\\) and \\(9.9 \\times\\) higher energy efficiency and throughput-per-area, respectively, when associated with bffloat16 activations." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.105, + 0.33, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.128, + 0.827, + 0.187 + ], + "angle": 0, + "content": "This work was supported in part by Institute of Information communications Technology Planning Evaluation (IITP) grant funded by the Korea government (MSIT) (No. 2021-0-01343, Artificial Intelligence Graduate School Program (Seoul National University) \\((10\\%)\\), and No.2021-0-02068, Artificial Intelligence Innovation Hub \\((10\\%)\\))." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.205, + 0.289, + 0.221 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.228, + 0.826, + 0.259 + ], + "angle": 0, + "content": "Yu Bai, Yu-Xiang Wang, and Edo Liberty. Proxquant: Quantized neural networks via proximal operators. International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.266, + 0.827, + 0.309 + ], + "angle": 0, + "content": "Sharan Chetlur, Cliff Woolley, Philippe Vandermersch, Jonathan Cohen, John Tran, Bryan Catanzaro, and Evan Shelhamer. codnn: Efficient primitives for deep learning. arXiv preprint arXiv:1410.0759, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.317, + 0.825, + 0.361 + ], + "angle": 0, + "content": "Jungwook Choi, Zhuo Wang, Swagath Venkataramani, Pierce I-Jen Chuang, Vijayalakshmi Srinivasan, and Kailash Gopalakrishnan. Pact: Parameterized clipping activation for quantized neural networks. arXiv preprint arXiv:1805.06085, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.369, + 0.825, + 0.425 + ], + "angle": 0, + "content": "Insoo Chung, Byeongwook Kim, Yoonjung Choi, Se Jung Kwon, Yongkweon Jeon, Baeseong Park, Sangha Kim, and Dongsoo Lee. Extremely low bit transformer quantization for on-device neural machine translation. In Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 4812-4826, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.434, + 0.825, + 0.464 + ], + "angle": 0, + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.472, + 0.825, + 0.502 + ], + "angle": 0, + "content": "Elias Frantar, Saleh Ashkboos, Torsten Hoefer, and Dan Alistarh. Gptq: Accurate post-training quantization for generative pre-trained transformers. arXiv preprint arXiv:2210.17323, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.509, + 0.825, + 0.539 + ], + "angle": 0, + "content": "David Goldberg. What every computer scientist should know about floating-point arithmetic. ACM computing surveys (CSUR), 23(1):5-48, 1991." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.546, + 0.822, + 0.576 + ], + "angle": 0, + "content": "Mark Harris. Mixed-precision programming with CUDA 8, 2016. URL https://developer.nvidia.com/blog/mixed-precision-programming-cuda-8/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.584, + 0.825, + 0.626 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.635, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Greg Henry, Ping Tak Peter Tang, and Alexander Heinecke. Leveraging the bfloat16 artificial intelligence datatype for higher-precision computations. In 2019 IEEE 26th Symposium on Computer Arithmetic (ARITH), pp. 69-76. IEEE, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.686, + 0.825, + 0.729 + ], + "angle": 0, + "content": "Mark Horowitz. Computing's energy problem (and what we can do about it). In 2014 IEEE International Solid-State Circuits Conference Digest of Technical Papers (ISSCC), pp. 10-14. IEEE, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.738, + 0.825, + 0.794 + ], + "angle": 0, + "content": "Intel. Bfloat16 - hardware numerics definition, 2018. URL https://www.intel.com/content/dam/develop/external/us/en/documents/bf16-hardware-numerics-definition-white-paper.pdf. Accessed: 2022-09-07." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.803, + 0.825, + 0.86 + ], + "angle": 0, + "content": "Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam, and Dmitry Kalenichenko. Quantization and training of neural networks for efficient integer-arithmetic-only inference. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2704-2713, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Yongkweon Jeon, Baeseong Park, Se Jung Kwon, Byeongwook Kim, Jeongin Yun, and Dongsoo Lee. Biqgemm: matrix multiplication with lookup table for binary-coding-based quantized dnns. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis, pp. 1-14. IEEE, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.228, + 0.827, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Yongkweon Jeon, Chungman Lee, Eulrang Cho, and Yeonju Ro. Mr.biq: Post-training non-uniform quantization based on minimizing the reconstruction error. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12329-12338, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.211 + ], + "angle": 0, + "content": "Norman P Jouppi, Cliff Young, Nishant Patil, David Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, et al. In-datacenter performance analysis of a tensor processing unit. In Proceedings of the 44th annual international symposium on computer architecture, pp. 1-12, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.219, + 0.826, + 0.277 + ], + "angle": 0, + "content": "Norman P Jouppi, Doe Hyun Yoon, Matthew Ashcraft, Mark Gottscho, Thomas B Jablin, George Kurian, James Laudon, Sheng Li, Peter Ma, Xiaoyu Ma, et al. Ten lessons from three generations shaped google's tpuv4i: Industrial product. In 2021 ACM/IEEE 48th Annual International Symposium on Computer Architecture (ISCA), pp. 1-14. IEEE, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.284, + 0.826, + 0.327 + ], + "angle": 0, + "content": "Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W Mahoney, and Kurt Keutzer. I-bert: Integer-only bert quantization. In International Conference on Machine Learning, pp. 5506-5518. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.335, + 0.826, + 0.392 + ], + "angle": 0, + "content": "Urs Köster, Tristan Webb, Xin Wang, Marcel Nassar, Arjun K Bansal, William Constable, Oguz Elibol, Scott Gray, Stewart Hall, Luke Hornof, et al. Flexpoint: An adaptive numerical format for efficient training of deep neural networks. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.4, + 0.724, + 0.417 + ], + "angle": 0, + "content": "Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.423, + 0.826, + 0.48 + ], + "angle": 0, + "content": "Se Jung Kwon, Jeonghoon Kim, Jeongin Bae, Kang Min Yoo, Jin-Hwa Kim, Baeseong Park, Byeongwook Kim, Jung-Woo Ha, Nako Sung, and Dongsoo Lee. Alphatuning: Quantization-aware parameter-efficient adaptation of large-scale pre-trained language models. arXiv preprint arXiv:2210.03858, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.488, + 0.826, + 0.531 + ], + "angle": 0, + "content": "Yuhang Li, Ruihao Gong, Xu Tan, Yang Yang, Peng Hu, Qi Zhang, Fengwei Yu, Wei Wang, and Shi Gu. Brecq: Pushing the limit of post-training quantization by block reconstruction. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.539, + 0.826, + 0.582 + ], + "angle": 0, + "content": "Xiaocong Lian, Zhenyu Liu, Zhourui Song, Jiwu Dai, Wei Zhou, and Xiangyang Ji. High-performance fpga-based cnn accelerator with block-floating-point arithmetic. IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 27(8):1874-1885, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.59, + 0.826, + 0.62 + ], + "angle": 0, + "content": "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.627, + 0.826, + 0.657 + ], + "angle": 0, + "content": "Asit Mishra, Eriko Nurvitadhi, Jeffrey J Cook, and Debbie Marr. Wrpn: Wide reduced-precision networks. In International Conference on Learning Representations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.664, + 0.826, + 0.708 + ], + "angle": 0, + "content": "Jean-Michel Muller, Nicolas Brisebarre, Florent De Dinechin, Claude-Pierre Jeannerod, Vincent Lefevre, Guillaume Melquiond, Nathalie Revol, Damien Stehlé, Serge Torres, et al. Handbook of floating-point arithmetic. Springer, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.715, + 0.826, + 0.758 + ], + "angle": 0, + "content": "Gunho Park, Baeseong Park, Se Jung Kwon, Byeongwook Kim, Youngjoo Lee, and Dongsoo Lee. nuqmm: Quantized matmul for efficient inference of large-scale generative language models. arXiv preprint arXiv:2206.09557, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.766, + 0.826, + 0.823 + ], + "angle": 0, + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, volume 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.83, + 0.826, + 0.874 + ], + "angle": 0, + "content": "Nicola Petra, Davide De Caro, Valeria Garofalo, Ettore Napoli, and Antonio GM Srolllo. Truncated binary multipliers with variable correction and minimum mean square error. IEEE Transactions on Circuits and Systems I: Regular Papers, 57(6):1312-1325, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólar. Designing network design spaces. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10428-10436, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.162 + ], + "angle": 0, + "content": "Bita Darvish Rouhani, Daniel Lo, Ritchie Zhao, Ming Liu, Jeremy Fowers, Kalin Ovtcharov, Anna Vinogradsky, Sarah Massengill, Lita Yang, Ray Bittner, et al. Pushing the limits of narrow precision inferencing at cloud scale with microsoft floating point. Advances in neural information processing systems, 33:10271-10281, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.169, + 0.826, + 0.213 + ], + "angle": 0, + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.221, + 0.825, + 0.25 + ], + "angle": 0, + "content": "Jonathan Richard Shewchuk. Adaptive precision floating-point arithmetic and fast robust geometric predicates. Discrete & Computational Geometry, 18(3):305-363, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.259, + 0.825, + 0.288 + ], + "angle": 0, + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.296, + 0.825, + 0.34 + ], + "angle": 0, + "content": "Zhourui Song, Zhenyu Liu, and Dongsheng Wang. Computation error analysis of block floating point arithmetic oriented convolution neural network accelerator design. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.347, + 0.825, + 0.377 + ], + "angle": 0, + "content": "Vivienne Sze, Yu-Hsin Chen, Tien-Ju Yang, and Joel S Emer. Efficient processing of deep neural networks: A tutorial and survey. Proceedings of the IEEE, 105(12):2295-2329, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.385, + 0.827, + 0.43 + ], + "angle": 0, + "content": "Mingxing Tan, Bo Chen, Ruoming Pang, Vijay Vasudevan, Mark Sandler, Andrew Howard, and Quoc V Le. Mnasnet: Platform-aware neural architecture search for mobile. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2820-2828, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.437, + 0.825, + 0.48 + ], + "angle": 0, + "content": "Chaofan Tao, Lu Hou, Wei Zhang, Lifeng Shang, Xin Jiang, Qun Liu, Ping Luo, and Ngai Wong. Compression of generative pre-trained language models via quantization. arXiv preprint arXiv:2203.10705, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.488, + 0.825, + 0.533 + ], + "angle": 0, + "content": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.54, + 0.825, + 0.569 + ], + "angle": 0, + "content": "Shibo Wang and Pankaj Kanwar. Bfloat16: The secret to high performance on cloud tpus. Google Cloud Blog, 4, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.578, + 0.788, + 0.594 + ], + "angle": 0, + "content": "James Hardy Wilkinson. Rounding errors in algebraic processes. Courier Corporation, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.602, + 0.825, + 0.632 + ], + "angle": 0, + "content": "Xiaoxia Wu, Zhewei Yao, Minjia Zhang, Conglong Li, and Yuxiong He. Extreme compression for pre-trained transformers made simple and efficient. arXiv preprint arXiv:2206.01859, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.64, + 0.825, + 0.683 + ], + "angle": 0, + "content": "Chen Xu, Jianqiang Yao, Zhouchen Lin, Wenwu Ou, Yuanbin Cao, Zhirong Wang, and Hongbin Zha. Alternating multi-bit quantization for recurrent neural networks. In International Conference on Learning Representations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.691, + 0.825, + 0.735 + ], + "angle": 0, + "content": "Zhewei Yao, Reza Yazdani Aminabadi, Minjia Zhang, Xiaoxia Wu, Conglong Li, and Yuxiong He. Zeroquant: Efficient and affordable post-training quantization for large-scale transformers. arXiv preprint arXiv:2206.01861, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.743, + 0.825, + 0.786 + ], + "angle": 0, + "content": "Sai Qian Zhang, Bradley McDanel, and HT Kung. Fast: Dnn training under variable precision block floating point with stochastic rounding. In 2022 IEEE International Symposium on High-Performance Computer Architecture (HPCA), pp. 846-860. IEEE, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.794, + 0.825, + 0.838 + ], + "angle": 0, + "content": "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuhui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022b." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.808, + 0.119 + ], + "angle": 0, + "content": "A COMPUTATIONAL COST OF FP ARITHMETIC VS. INTEGER ARITHEMETIC" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.14, + 0.825, + 0.157 + ], + "angle": 0, + "content": "Table 2: Energy of computing units synthesized in a \\( {28}\\mathrm{\\;{nm}} \\) tech node (MAC: multiply-accumulate)." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.165, + 0.825, + 0.24 + ], + "angle": 0, + "content": "
MACMultiplyAdd
float32int8float32int32int8float32int32
Energy per Operation1.51 pJ0.08 pJ1.23 pJ0.940.06 pJ0.28 pJ0.03 pJ
Normalized Energy18.9×1.0×20.5×15.7×1.0×9.3×1.0×
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.257, + 0.615, + 0.412 + ], + "angle": 0, + "content": "To cover a wide range of numbers, FP format does not fix the location of the radix point (Goldberg, 1991). Hence, FP arithmetic needs to handle input and output values with different scaling factors, and the FP arithmetic units need to align and normalize significands before and after each computation, respectively. The alignment and normalization logics consist of barrel shifters that can shift a data word by a specified amount, and the cost of the barrel shifter far exceeds the cost of other arithmetic logics in terms of both energy and area, increasing the cost of FP computation (Horowitz, 2014). Hence, in general, integer arithmetic logic is much smaller and consumes less energy than FP counterpart." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.417, + 0.615, + 0.473 + ], + "angle": 0, + "content": "It is well known that 8-bit integer can achieve up to \\(4 \\times\\) throughput improvement compared to IEEE-754 single-precision format (float32) in widely used GPUs (Kim et al., 2021), as the throughput of 8-bit operations is generally \\(4 \\times\\) that of 32-bit operations (Har" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.473, + 0.825, + 0.64 + ], + "angle": 0, + "content": "ris, 2016). The advantage of integer can be magnified when the hardware platform moves to ASIC (Mishra et al., 2018). For in-depth understanding, we synthesize computing units for FP and integer in a \\(28\\mathrm{nm}\\) tech node. As shown in Table 2, multiplication-accumulation (MAC) for float32 consumes \\(18.9\\times\\) more energy than 8-bit integer (int8), a widely used integer format for quantized DNNs. Please note that a float32 MAC consists of a float32 multiplication and a float32 addition while an int8 MAC consists of an int8 multiplication and an int32 addition. The bit resolution of the adder for the int8 MAC is higher than that of the multiplier, because int8 multiplication results in 16-bit values and the bit resolution of MAC values increases as the number of accumulated values increases for integer format. In addition, the area cost of the integer unit is also much smaller than FP units as shown in Figure 10. Therefore, many studies have attempted activation quantization despite the various difficulties in the quantization process because both weight parameters and activations should be quantized to replace FP arithmetic with integer arithmetic." + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.259, + 0.825, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.417, + 0.825, + 0.461 + ], + "angle": 0, + "content": "Figure 10: Area comparison of computing units (layouts synthesized in a \\(28\\mathrm{nm}\\) node)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.66, + 0.684, + 0.675 + ], + "angle": 0, + "content": "B SUPPLEMENT FOR PROPOSED SIGNIFICAND TRUNCATION" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.693, + 0.622, + 0.707 + ], + "angle": 0, + "content": "B.1 ENERGY IMPROVEMENT WITH SIGNIFICAND TRUNCATION" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.732, + 0.44, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.834, + 0.476, + 0.863 + ], + "angle": 0, + "content": "Figure 11: Energy of adders synthesized in a \\(28\\mathrm{nm}\\) tech node (tested at \\(0.9\\mathrm{V}\\))." + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.724, + 0.815, + 0.822 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.521, + 0.834, + 0.825, + 0.864 + ], + "angle": 0, + "content": "Figure 12: Example of the significand truncation followed by the pre-alignment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "With naive pre-alignment of float32 activations, the maximum amount of the significand shifting is 255 and the resolution of the aligned activation becomes 279 bits. As shown in Figure 11, while 32-bit integer consumes \\(0.029\\mathrm{pJ}\\) per addition, both float32 and 279-bit integer consumes \\(0.281\\mathrm{pJ}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.147 + ], + "angle": 0, + "content": "per addition. To avoid the large design overhead, we truncate the pre-aligned significands as shown in Figure 12. The aggressive truncation still did not cause accuracy degradation in FP additions as we described in the Section 3.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.163, + 0.761, + 0.177 + ], + "angle": 0, + "content": "B.2 TRUNCATED BINARY MULTIPLIERS VS. PROPOSED SIGNIFICAND TRUNCATION" + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.196, + 0.207, + 0.211 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.209, + 0.196, + 0.511, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.553, + 0.196, + 0.577, + 0.212 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.58, + 0.196, + 0.825, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.315, + 0.825, + 0.345 + ], + "angle": 0, + "content": "Figure 13: Comparison of the truncation scheme in the (a) truncated binary multiplier for integer multiplication and (b) proposed method for FP addition/subtraction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.361, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Truncated binary multipliers (Petra et al., 2009) also discuss the truncation to improve computational efficiency, but there are critical differences between truncated binary multipliers and the proposed work as summarized in Figure 13. First of all, truncated binary multipliers deals with integer multiplications while the proposed work focuses on FP additions/subtractions. Due to the differences in the number format (integer vs. FP) and arithmetic operations (multiplications vs. additions/subtractions), the two works present completely different error analysis models and error reduction schemes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.465, + 0.825, + 0.563 + ], + "angle": 0, + "content": "The error analysis models between truncated binary multipliers and our case are different, because the amount of truncation is fixed in the truncated binary integer multipliers and the amount of truncation varies in our work as the amount of significand shift varies depending on the input data. Moreover, in truncated binary multipliers, the bit resolution of truncated output is defined by the application requirement. On the other hand, as we proposed to truncate the pre-aligned values to adopt lower-bit integers and improve computational efficiency, the proper bit resolution of truncated values should be found to meet the accuracy requirement in our case." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.57, + 0.827, + 0.737 + ], + "angle": 0, + "content": "In addition, in integer multiplication case, some of the truncated partial products share the same inputs with the remaining partial products, so they have correlations with the remaining partial product values. (Petra et al., 2009) proposed an error minimization scheme which exploits such characteristics. On the other hand, in the FP addition/subtraction case, the truncated significands do not have any correlation with the remaining bits so it is hard to devise similar error correction schemes. Instead, we focused on the fact that conventional FP operation is also not precise due to the rounding of output significands so that we only need to match the error level of the proposed scheme to the conventional FP operations. Based on the facts, we showed a theoretical analysis such that the proposed integer-based FP addition/subtraction can have the similar error level as that of the conventional FP addition/subtraction when small number (1-2) of extra bits are attached to the shifted significands. With this finding, we can design an efficient integer-based FP addition logic without having complex error correction function estimated based on the truncated bits." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.757, + 0.482, + 0.772 + ], + "angle": 0, + "content": "C IN-DEPTH HARDWARE ANALYSIS" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.787, + 0.644, + 0.801 + ], + "angle": 0, + "content": "C.1 DETAILED HARDWARE DESCRIPTION OF THE PROPOSED IFPU" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Figure 14 describes the proposed iFPU in detail. The proposed iFPU is a bit-flexible accelerator which can handle variable bitwidth of weight values. The iFPU processes weights in bit-parallel manner by processing each weight bitplane in different columns of the PE array. For example, 4-bit weights use 4 PE columns for the computation, and 8-bit weights use 8 PE columns for the computation. After the integer-based summations are done in each column of the PE array, the integer results are converted into FP values and multiplied by scaling factors which represent the significance of each bitplane. Then, computing results of each bitplane are merged in the accumulator (FP adder) to finish the MatMul. As the output resolution of FP accumulation remains the same regardless of the" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.103, + 0.465, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.472, + 0.108, + 0.61, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.106, + 0.82, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.342, + 0.825, + 0.385 + ], + "angle": 0, + "content": "Figure 14: A detailed block diagram of iFPU. The iFPU processes weights in bit-parallel manner by processing each bitplane of the weights in each column of the PE array. \\((B_{b,k}\\) : binary weights in Eq. 1)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.412, + 0.825, + 0.441 + ], + "angle": 0, + "content": "size of the accumulation thanks to the characteristics of the FP format, the size of the accumulator does not need to increase for the increased weight bit width." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.458, + 0.563, + 0.472 + ], + "angle": 0, + "content": "C.2 AREA/ENERGY BREAKDOWN OF PROPOSED IFPU" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.489, + 0.497, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.489, + 0.82, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.619, + 0.825, + 0.647 + ], + "angle": 0, + "content": "Figure 15: Area \\((mm^2)\\) (left) and power (W) (right) of MatMul Engines: baselines and iFPUs for FP MatMul with 32x32 PEs." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.671, + 0.493, + 0.793 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.671, + 0.826, + 0.793 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.807, + 0.825, + 0.836 + ], + "angle": 0, + "content": "Figure 16: Area breakdown (left) and power breakdown (right) of proposed iFPUs with \\(32\\mathrm{x}32\\), \\(64\\mathrm{x}64\\), and \\(128\\mathrm{x}128\\) PEs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "In this section, the area and power of the MatMul engines designed in Section 4.3 are analyzed in more detail for deeper understanding of the proposed scheme. First, a breakdown of the area/power of various MatMul engines with \\(32 \\times 32\\) PEs is shown in Figure 15. FP-ADD reconstructs FP-MAC with a series of FP additions by separately processing each weight bitplane (Eq. 1), so to match the effective throughput of FP-ADD with that of FP-MAC in case of 4-bit weights, 4 FP-ADD" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.273 + ], + "angle": 0, + "content": "operations are used for the evaluation. Hence, though the area/energy of a single float32 adder is lower than that of a float32 MAC unit (Table 2), FP-ADD requires slightly larger area and power than FP-MAC. On the other hand, though iFPU also introduces \\( m \\) times more operations than FP-MAC, iFPUs achieve large area and power reduction as the area/energy cost of PE arrays become significantly lower by replacing FP adders with integer adders. The area/power reduction is even larger in bfloat16 cases because smaller integer units can be used. As the area/power cost of PE arrays in iFPUs decreases, the relative portion of area/power of supporting logic (such as scale & accumulator) in the total area/power increases. Hence, the supporting logic accounts for more than half of the total area/power of iFPUs with 32x32 PEs. Meanwhile, the overhead of the supporting logic decreases as the size of PE arrays increases. We report the area/power breakdown of iFPUs with various number of PEs in Figure 16. The experimental results show that, as the size of PE arrays increase, the supporting logic is shared among more PEs and the overhead can be amortized." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.289, + 0.637, + 0.305 + ], + "angle": 0, + "content": "C.3 IMPACT OF THE WEIGHT BITWIDTH ON THE PROPOSED IFPU" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.319, + 0.826, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.339, + 0.472, + 0.66, + 0.486 + ], + "angle": 0, + "content": "(a) Normalized energy efficiency (TOPS/W) of iFPUs" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.487, + 0.825, + 0.632 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.32, + 0.639, + 0.678, + 0.654 + ], + "angle": 0, + "content": "(b) Normalized throughput-per-area (TOPS/mm\\(^2\\)) of iFPUs" + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.665, + 0.825, + 0.723 + ], + "angle": 0, + "content": "Figure 17: Computational efficiency of iFPUs normalized with that of the baseline FP MatMul engine (FP-MAC). Y-axis is the normalized value against FP-MAC and the iFPUs show higher efficiency than FP-MAC even for high-precision weight bits. The number of PEs and target activation types are annotated along the horizontal axis." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.74, + 0.827, + 0.852 + ], + "angle": 0, + "content": "This section analyzes impact of weight bitwidth on the efficiency improvement achievable with the proposed iFPU. The experimental setup is the same as Section 4.3 except the weight bits. While only 4-bit weight cases are evaluated in Section 4.3, this section evaluates weights with 1 to 16 bits. Because the proposed scheme processes each bitplane of the weights in the bit-parallel manner, higher-bit weights require more operations with PE, scale, and accumulators. Hence, as shown in Figure 17, the benefits of the iFPUs diminish as the number of weight bits increases. Nevertheless, even for 8-bit weight case, iFPUs achieve better computational efficiency compared to the FP-MAC baseline." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.87, + 0.7, + 0.884 + ], + "angle": 0, + "content": "C.4 COMPARISON OF THE PROPOSED IFPU WITH INT4 MATMUL ENGINE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.896, + 0.825, + 0.927 + ], + "angle": 0, + "content": "In Figure 18., an int4 MatMul engine (INT4) is evaluated and compared with the other MatMul engines analyzed in Section 4.3. INT4 MatMul shows high energy efficiency and throughput-per" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.51, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.093, + 0.5, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.094, + 0.825, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.229, + 0.825, + 0.273 + ], + "angle": 0, + "content": "Figure 18: Normalized energy efficiency (TOPS/W) (left) and throughput-per-area \\((\\mathrm{TOPS} / \\mathrm{mm}^2)\\) (right) of MatMul Engines: baselines and iFPUs for FP MatMul; INT8/INT4 for int8/int4 MatMul. The number of PEs and target activation types are annotated along the horizontal axis." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.301, + 0.825, + 0.331 + ], + "angle": 0, + "content": "area. However, to take advantage of INT4 MatMul, both weight and activation should be quantized to 4 bits, which may not provide desired accuracy in many cases." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.349, + 0.56, + 0.362 + ], + "angle": 0, + "content": "C.5 HARDWARE EVALUATION WITH MEMORY ACCESS" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.385, + 0.465, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.385, + 0.693, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.424, + 0.822, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.549, + 0.825, + 0.592 + ], + "angle": 0, + "content": "Figure 19: Normalized energy consumption of MatMul engines (FP-MAC, FP-ADD, and iFPU) with memory system. The inference energy is measured for BERT-based and OPT-1.3B with 4-bit weights and float32/bfloat16 activations." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.612, + 0.825, + 0.751 + ], + "angle": 0, + "content": "Setup. To understand the effectiveness of the proposed method in the real computing scenario, the baselines (FP-MAC and FP-ADD) and the proposed iFPU with 128x128 PEs are further evaluated including memory access. For off-chip memory, we scaled down the bandwidth of HBM2 in TPU (Jouppi et al., 2021) considering the ratio of the number of PEs that make up Matrix Multiply Unit (MXU), which is 1:4 and adopted energy per bit of HBM2 from Table 2 in (Jouppi et al., 2021); we used the bandwidth of 153.5 GB/s and the energy per bit of \\(3.9\\mathrm{pJ} / \\mathrm{bit}\\). We also scaled the size of the unified buffer (on-chip SRAM buffer) in (Jouppi et al., 2021) by dividing it by 4. The unified buffer size in our design was 32MB. For SRAMs, we used the \\(28\\mathrm{nm}\\) CMOS memory compiler and the energy per bit of \\(0.155\\mathrm{pJ} / \\mathrm{bit}\\) was used. To overlap memory access with computation, double buffering scheme was adopted in the unified buffer." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.758, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Results. We evaluate a single batch inference of BERT-base and OPT-1.3B. We set the sequence length of BERT-base and OPT-1.3B as 128 and 1024 respectively. As double buffering hides the memory access latency, the proposed iFPU with memory model can achieve the same amount of throughput-per-area improvement as that of the baseline for the case in which memory access is not considered. On the other hand, the gain in energy efficiency slightly changes after considering memory access. As shown in Figure 19, the dram access energy accounts for a relatively small portion of total energy consumption in the baselines, because the data is intensively reused in the MatMul computation. As the proposed iFPU reduces the energy cost of computation, memory access energy becomes relatively significant in the proposed system. Thus, when considering the cost of memory access, the amount of improvement in the energy efficiency slightly decreases. Nevertheless, the iFPU with memory access still can improve the energy efficiency by up to 6.6x compared to FP-MAC baseline." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.661, + 0.119 + ], + "angle": 0, + "content": "D FINE-TUNING CONDITION FOR BERT-BASE TRAINING" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.138, + 0.828, + 0.182 + ], + "angle": 0, + "content": "Table 3: Hyper-parameters for fine-tuning BERT-base on GLUE benchmark. The fine-tuning use AdamW optimizer and the number of training epochs is 10. The learning rates decay linearly and the weight decay is set to 0.01." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.192, + 0.825, + 0.267 + ], + "angle": 0, + "content": "
ConfigurationGLUE
CoLAMRPCSST-2STS-BQQPMNLIQNLIRTE
Batch size1632323232161616
Learning rate1e-41e-41e-4 2e-41e-45e-55e-55e-51e-4
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "18" + } + ] +] \ No newline at end of file diff --git a/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_origin.pdf b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e3f4d98977001976cdcb2cd87cb15d62fb786d37 --- /dev/null +++ b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/a62258ff-e367-4f69-b7af-d16c0a09ca72_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:341f25ece553738d4fb83973c14a91d0c4a59aecd60e7e4d5b54a2231ccb63b0 +size 17679744 diff --git a/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/full.md b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c8167ae94dcb8ea82e10c5cec0aaab08d451502d --- /dev/null +++ b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/full.md @@ -0,0 +1,359 @@ +# WINNING BOTH THE ACCURACY OF FLOATING POINT ACTIVATION AND THE SIMPLICITY OF INTEGER ARITHMETIC + +Yulhwa $\mathbf{K}\mathbf{m}^{1}$ , Jaeyong $\mathbf{J}\mathbf{a}\mathbf{g}^{1}$ , Jehun $\mathbf{L}\mathbf{e}^{1}$ , Jihoon $\mathbf{P}\mathbf{k}^{1}$ , Jeonghoon $\mathbf{K}\mathbf{m}^{2}$ , Byeongwook $\mathbf{K}\mathbf{m}^{2}$ , Baeseong $\mathbf{p}\mathbf{k}^{2}$ , Se Jung Kwon $^{2}$ , Dongsoo Lee $^{2}$ , Jae-Joon $\mathbf{K}\mathbf{m}^{1}$ + +$^{1}$ Seoul National University, $^{2}$ NAVER Cloud + +{yulhwakim, jaeyongjang, jehun.lee, jihoonpark, kimjaejoon}@snu.ac.kr, {jeonghoon.samuel, byeonguk.kim, baeseong.park, sejung.kwon, dongsoo.lee}@navercorp.com + +# ABSTRACT + +Even though floating point (FP) numbers have been adopted as a de facto standard data format for deep learning computing, the complexity of FP arithmetic impedes a broader deployment of Deep Neural Networks (DNNs). Recent works such as quantization have attempted to replace the FP matrix multiplication (MatMul) of DNNs with simple integer MatMul by transforming the datatypes of both weights and activations into integers. Unfortunately, unlike weight values that are static, it is challenging to represent dynamic activations with integers. In this paper, to simultaneously achieve the accuracy of FP activation and the simplicity of integer arithmetic, we present a method for replacing FP arithmetic with integer one without changing FP activations in the storage format while weights are quantized. The proposed method pre-aligns the significands of FP activations just ahead of the MatMul on-the-fly so that the aligned significands (integers) can be used for the computation. Inspired by an observation that conventional FP arithmetic does not produce precise results due to rounding, we demonstrate that our proposed integer arithmetic-based scheme can produce the same level of errors as that of the FP arithmetic in case DNNs use FP activations and quantized weights. Experimental results show that the hardware based on the proposed scheme shows significant improvement over FP arithmetic-based designs in terms of energy efficiency and throughput-per-area while maintaining a similar level of accuracy. + +# 1 INTRODUCTION + +Deep Neural Networks (DNNs) usually use Floating-Point (FP) number systems to represent a wide range of weight and activation values. Such a comprehensive representation, however, demands high computational complexity and cost for FP matrix multiplication (MatMul) (Sze et al., 2017). On the other hand, integer (a.k.a fixed-point) arithmetic logic is much simpler while consuming less energy compared to FP counterpart (Jouppi et al., 2021). As such, the computational efficiency of DNNs can be enhanced by replacing FP arithmetic with integer one. Accordingly, quantization has been actively studied as a promising technique to support DNN computations with integer arithmetic, as it maps the input values of a (virtually) continuous domain (FP numbers) to the output values of a discrete set (integers) (Jacob et al., 2018). Note that even though several studies have successfully quantized weights and activations of some target DNNs with low-precision integer values (Li et al., 2021; Wu et al., 2022), quantization is still challenging for numerous DNNs. In particular, activation values are known to be more difficult to be quantized than the weight parameters because activations are dynamically generated during inference while the distribution of weights is static. The uncertainty of the distribution of dynamic activation values limits the ability to estimate proper quantization range (Choi et al., 2018). Such issues on activation quantization become even more serious when DNNs involve highly non-linear activation functions (e.g., GeLU) or modules that increase the variance of the activations (e.g., softmax and normalization layers) (Jeon et al., 2020). As a result, while the weight parameters can be successfully quantized even for generative mod + +![](images/a88431bd3c3fb7c88e0af2e9244d736bb954c6c3e9fc88a8995005b1c6f84b2a.jpg) +Figure 1: An example of FP summation with (a) conventional FP computation and (b) proposed method. The precise summation is described in the box on top. + +els (Xu et al., 2018; Bai et al., 2019; Jeon et al., 2022; Park et al., 2022; Kwon et al., 2022; Frantar et al., 2022) and extra-large models such as GPT-NeoX-20B (Chung et al., 2020; Yao et al., 2022), activation quantization usually relies on intensive quantization-aware training or sophisticated investigation algorithms such as dynamic min/max searching (Tao et al., 2022). Note that activation quantization is mandatory if integer arithmetic logic is involved for MatMul operations. Thus, to avoid such significant efforts to quantize complex DNNs (mainly due to activation quantization), recent neural processing units tend to employ FP arithmetic units even for inference process at the cost of increased energy and area (Jouppi et al., 2021). + +To address the challenges discussed above, we propose a scheme that can achieve both the accuracy of FP activations and the simplicity of integer arithmetic. Our motivation stems from an observation that most multiplications can be removed once weights are quantized to be binary-coded (Jeon et al., 2020). Then, consecutive FP additions are mainly required to perform MatMul, and hence, we find conventional FP units can be much simplified. To be more specific, when processing the MatMul of DNNs, our proposed method first pre-aligns the significands of FP activations to be added. Correspondingly, FP activations can be reformatted into integer values and FP arithmetic units (FPUs) can be replaced with integer units during MatMul operations. A naive pre-alignment for accurate computation requires very high-resolution integer units for the computation, which negates the benefits of using integer units. Inspired by an observation that conventional FP arithmetic does not guarantee the exact results due to rounding errors (Wilkinson, 1994), we show that the same level of computational error can be obtained even when the pre-aligned significands are aggressively truncated. We then implement an integer-based FP arithmetic unit (iFPU) hardware for MatMul computation based on the proposed scheme. A comprehensive evaluation of the iFPU on various DNNs shows that the iFPU significantly improves energy efficiency and throughput-per-area over the conventional FPU-based MatMul engine while maintaining the neural network accuracy. + +# 2 BACKGROUND + +# 2.1 FLOATING-POINT ARITHMETIC AND ROUNDING ERROR + +FP format represents a number as $(-1)^{s} \times (m) \times 2^{(e - bias)}$ which consists of sign $(s)$ , exponent $(e)$ , and significand (or mantissa, $m$ ) (Muller et al., 2018). Float32 assigns 1 bit for $s$ and 8 bits for $e$ . Precision $(p)$ , the effective bit count of the significand, is 24 bits (among which 23 bits are explicitly stored). Bfloat16, which has been gaining popularity in the field of deep learning, intensely cuts down stored significand bits to 7 (compared to 23 in float32) to lower the total number of bits per value, and thereby reduces memory footprint (Wang & Kanwar, 2019). The bias of the exponent term is usually set to half of the exponent maximum. + +FP format can cover a wide range of numbers by separating the significant digits and the scale of the number. Note that because of the precision limits, there is a gap between two consecutive FP numbers. Such a gap is called a unit of least precision (ulp) whose value is represented by the least significant digit. Hence, it is hard to represent real numbers precisely with FP format even if the numbers are in the dynamic range of the FP format, and rounding is required for converting real numbers into FP numbers. FP arithmetic typically normalizes significands for each computation, and the rounding operation is followed by the normalization to convert the computation result into an FP number. Round-to-nearest is the most frequently chosen as a rounding mode where the difference between the real value and the round-off value can be as large as half ofulp, and its relative error is bounded by $\epsilon = \frac{1}{2}ulp = 2^{-p}$ , which is referred to as machine epsilon. Bothulp and $\epsilon$ are widely used to evaluate the accuracy of numeric calculations (Goldberg, 1991). + +As every FP operation includes the rounding stage, rounding error is unavoidable in FP arithmetic. Although the error of a single FP arithmetic operation may be small enough to be ignored, the error can be substantial if a series of multiple FP arithmetic results are accumulated. For example, an inner product of MatMul involves multiple FP additions in a row and the FP summation piles up the rounding error of each FP adder (Figure 1(a)). Accordingly, numerous solutions have been introduced to compensate for the error of the FP summation (Muller et al., 2018). Such error compensations cause an additional computation burden for tracking and fixing the error. Since the effect of the rounding errors on DNN accuracy is negligible, popular deep learning frameworks such as PyTorch and CuDNN (Paszke et al., 2019; Chetlur et al., 2014) allow the rounding errors (without the compensation algorithms) in favor of simple computation. Note that as the level of rounding error depends on the precision $p$ (only 8 bits for bffloat16), the error becomes noticeable for bffloat16. Therefore, summation of bffloat16 values uses float32 adders (instead of bffloat16 adders) to preserve the accuracy of accumulated results (Wang & Kanwar, 2019; Intel, 2018; Henry et al., 2019). + +# 2.2 RELATED WORKS + +Block Floating Point (BFP) has been proposed as a compromise between FP and integer formats. It assigns a single shared exponent to a group of FP values while maintaining individual significands (Wilkinson, 1994). The BFP has drawn attention as a flexible low-bit numeric format for quantization because the shared exponent can represent the dynamic range of values with little overhead. Hence, BFP can achieve a higher compression ratio than integer formats (Zhang et al., 2022a). In addition, since the individual significand values are integer, the BFP formats enable simpler computation than FP formats (Koster et al., 2017). Note that a critical limitation in previous works based on BFP formats is that the same level of accuracy as that of conventional FP computations cannot be guaranteed (even theoretically). Previous works tend to find the optimal BFP formats with the least memory/computation density by evaluating DNN accuracy for various bit resolution and group sizes (Song et al., 2018; Lian et al., 2019; Rouhani et al., 2020). Another drawback in some previous works on BFP is that DNNs with BFP format need to be fine-tuned usually by quantization-aware training to improve the accuracy (Zhang et al., 2022a; Rouhani et al., 2020). Since a quantized neural network allows only one fixed block size that is optimized for target hardware during training, a neural network needs to be retrained for different hardware choices if a block size differs. + +Truncated binary multipliers with error compensation schemes have been proposed to reduce the number of outputs in integer multiplications (Petra et al., 2009). While both the truncated multipliers and our proposed work use the truncations to improve computational efficiency, there are critical differences between them. In the truncated binary integer multipliers, the amount of the truncated bits is fixed while it varies in FP additions cases which our work focuses on. In addition, (Petra et al., 2009) presents a truncation error correction function utilizing the fact that some of the truncated partial products share the same inputs with the remaining partial products, so they have correlations with the remaining partial product values. Unfortunately, in FP addition cases, the truncated significands do not have any correlation with the remaining bits so it is hard to devise similar error correction schemes. Hence, there is a strong need to develop alternative ways to control the truncation errors in FP operations. + +# 3 RECONSTRUCTION OF FP-BASED MATMUL WITH INTEGER ARITHMETIC + +# 3.1 OVERVIEW OF THE PROPOSED MATMUL RECONSTRUCTION AND COMPUTATION + +In this section, we propose a methodology to reconstruct FP MatMul with integer arithmetic for efficient DNN computation, focusing on FP activations and quantized weights. In most cases, the weight matrix with $m$ -bit quantization can be expressed as a binary-coded matrix: $\sum_{b=1}^{m} \alpha_{b} \cdot B_{b}$ where $\alpha_{b}$ is a scaling factor and $B_{b}$ is a binary weight matrix of each bitplane. Here, $\alpha_{b}$ can be a power of 2 for uniform quantization or can be an FP value for non-uniform quantization. MatMul is composed of multiple dot products, and a dot product between activations and weights is defined as $\sum_{k=1}^{n} (a_{k} \times w_{k})$ ( $a$ : activation, $w$ : weight, $n$ : fan-in of the layer). If we apply binary-coded weights and properly change the order of the operations, we can rewrite the dot product as follows: + +$$ +\sum_ {b = 1} ^ {m} \alpha_ {b} \sum_ {k = 1} ^ {n} \left(a _ {k} \times B _ {b, k}\right), B _ {b, k} \in [ - 1, + 1 ] \tag {1} +$$ + +![](images/811293706d38e8580ee9fae13d3f1ecfad52f15572bc10de6365b6b9099324a6.jpg) +Figure 2: Overview of the proposed MatMul computing scheme for DNNs with FP activations. + +![](images/55188d2c9943bae17819d9b758b32d345370f0eb1446e130b59171a68ae29ca9.jpg) +Figure 3: Comparison of a previous approach (e.g., MSFP (Rouhani et al., 2020)) and the proposed approach for applying block floating point (BFP) to DNN computation. In the case of MSFP, the original network needs to be retrained for the MatMul engines with different block sizes, but in the proposed scheme, the original network can be fed into the engines with any block sizes. + +For each bitplane, MatMul of weights and activations is reconfigured as the addition/subtraction of activation values except for a few $\alpha_{b}$ multiplications that are necessary to merge the outputs from each bitplane. Because FP multiply-accumulate operations require more hardware resources than FP additions, even such a reconfiguration of matrix multiplication to remove most multiplications can improve the efficiency of DNN computations significantly (Jeon et al., 2020). Even so, because FP additions are still computationally more expensive than integer additions, replacing FP additions with integer additions can save even more energy and area. Therefore, we propose to reconstruct FP-based MatMul (Eq. 1) using integer additions (Figure 2). One of the key components of the proposed method is the pre-alignment, which reforms the FP activation values into integer values on-the-fly by sharing the exponent value among the activations that are fed to a dot product of the MatMul at a time. The pre-alignment finds the maximum of the exponents among the activations and aligns corresponding significands simultaneously based on the difference of each exponent and the maximum exponent. As a result, unlike conventional FP arithmetic that performs the alignment for each addition, our proposed computing methodology aligns the activation values once per MatMul, and thus, reduces the overall cost of the alignment process significantly. Note that as opposed to previous works that share the exponent among a block of inputs in the storage format (e.g., MSFP (Rouhani et al., 2020)), our design performs the exponent sharing during the computation. Since different exponents are allowed in the storage format in our scheme, we keep the representation power of the conventional FP format (Figure 3). Because pre-aligned activations can be represented by the aligned significands which are integer values, an FP addition of the MatMul can be replaced by an integer addition. After the whole summation process, the proposed method reforms the summation results back to FP values by normalizing the results with the maximum exponent found in the pre-alignment stage. Then, the computation results from each weight bitplane are multiplied by $\alpha_{b}$ and merged to finish the target MatMul operation. + +As the exponent of float32 (or bfloat16) is 8-bit, the maximum amount of the significand shifting is 255 and the resolution of the aligned activation becomes 279 (or 263) bits. Note that such a large bit width might negate the benefits of using integer units. For example, while 32-bit integer addition consumes $10.3\%$ energy of float32 addition, 279-bit integer requires a level of energy per addition comparable to that of float32 addition (Appendix B.1). To avoid the large design overhead, we propose to use only the top $t(= p + \delta)$ bits of the aligned activation when $\delta$ indicates the number of extra significand bits for reducing truncation error. Since the conventional FP addition also experiences errors due to truncation of significand, relatively small extra $\delta$ bits for the proposed method can derive a level of errors similar to that of conventional FP addition (as described in Figure 1). + +# 3.2 COMPUTATION ERROR AFTER SIGNIFICAND TRUNCATION + +To study the characteristics of errors in the proposed method with truncated significands, we first analyze the computation error with a single addition/subtraction between two FP values $x$ and $y$ . + +We assume $x > y \geq 0$ , $x = x_0.x_1 \cdots x_{p-1}$ , and $y = y_0.y_1 \cdots y_{p-1} \times 2^{-k} (k \geq 0)$ without loss of generality, because only the difference between the exponents decides the amount of shifting and truncation. Here, $x_i$ and $y_i$ denote the binary value of $i$ -th significand bit, and the leading bit $x_0$ is 1 for $x$ when $x > 0$ . When either $k$ or $y$ is 0, there is no need for significand shifting and truncation, and hence, integer-based FP arithmetic can guarantee the precise computation without any extra bit (i.e., $\delta = 0$ ). + +When $k > 0$ , we need to shift and truncate the significand of $y$ for the computation. For the alignment, $y$ should be shifted to right by $k$ , so $y$ can be rewritten as $y = 0.0\cdots 0y_k' y_{k+1}'\cdots y_{k+p-1}'$ where $y_{k+i}'$ is equal to $y_i$ . As only the top $t(= p + \delta)$ bits of the significand remain after the truncation, the truncated result becomes $\bar{y} = 0.0\cdots y_k' \cdots y_{t-1}$ . When $\delta \geq k$ , the difference between $y$ and $\bar{y}$ is 0. Otherwise, the difference between $y$ and $\bar{y}$ is bounded as follows: + +$$ +\left| y - \bar {y} \right| = 0. 0 \dots 0 y _ {t} ^ {\prime} \dots y _ {k + p - 1} ^ {\prime} \leq 2 ^ {- (p + \delta - 1)} \left(1 - 2 ^ {- (k - \delta)}\right). \tag {2} +$$ + +The relative error of the addition with the truncated significand is defined as follows: + +$$ +e _ {a d d} = \frac {\left| (x + y) - (x + \bar {y}) \right|}{| x + y |} = \frac {| y - \bar {y} |}{| x + y |}. \tag {3} +$$ + +By applying both $|x + y| \geq |x| \geq 1$ and Eq. 2 to Eq. 3, we can obtain + +$$ +e _ {a d d} \leq 2 ^ {- (p + \delta - 1)} \left(1 - 2 ^ {- (k - \delta)}\right) \leq 2 ^ {- (p + \delta - 1)}. \tag {4} +$$ + +Because the machine epsilon is given as $\epsilon = 2^{-p}$ , $e_{add} \leq \epsilon$ when $\delta$ is 1 and $e_{add} \leq \frac{1}{2}\epsilon$ when $\delta$ is 2. For subtraction, the relative error is defined similarly as follows: + +$$ +e _ {s u b} = \frac {\left| (x - y) - (x - \bar {y}) \right|}{\left| x - y \right|} = \frac {\left| y - \bar {y} \right|}{\left| x - y \right|}. \tag {5} +$$ + +When $\delta \geq k$ , $|y - \bar{y}|$ is 0 so that $e_{sub}$ is 0. The minimum of $x$ is 1, and $y$ has the maximum value when all $y_{k + i}'$ s are 1. Correspondingly, $|x - y|$ is bounded as follows: + +$$ +\left| x - y \right| \geq \left\{ \begin{array}{l l} 1 - 0. 1 1 \dots 1 = 2 ^ {- p}, & \text {f o r} k = 1 \\ 1 - 0. 0 \dots 0 1 \dots 1 \geq 2 ^ {- 1} + 2 ^ {- 2} + \dots + 2 ^ {- (k - 1)}, & \text {f o r} k \geq 2 \end{array} \right. \tag {6} +$$ + +When $k$ is 1 and $\delta$ is 0, we get $|y - \bar{y}| \leq 2^{-p}$ from Eq. 2. For such a case, according to Eq. 6 and Eq. 5, we have $e_{sub} \leq 1$ . The worst case happens when $x = 1$ and $y = 0.111 \cdots 1$ . When $k \geq 2$ , by applying Eq. 2 and Eq. 6 to Eq. 5, we get $e_{sub} \leq \epsilon$ for $\delta = 1$ , and $e_{sub} \leq 1/2\epsilon$ for $\delta = 2$ . As a result, regardless of FP formats, the proposed method has the error level as summarized in the following Remark 1. + +Remark 1 The integer-based FP addition/subtraction has the same level of error as that of the conventional FP addition/subtraction with 1 extra bit, and the error becomes half with 2 extra bits. + +Note that the error of FP summation is the same as the accumulated value of errors from each addition (Muller et al., 2018). The reconstructed MatMul, however, induces an additional stage of + +![](images/34a77698e36c7076b358899961895e08c9fc226399ef18cfa647a076d91edd91.jpg) +Figure 4: (a) Average and (b) maximum FP summation errors of conventional FP computation and the proposed method with extra bits $(\delta = 0,1,2)$ against the accurate FP summations with Schewchuk algorithm (Shewchuk, 1997). + +![](images/35d60a018a96def348034e2d6034b03f1044607d7f5a7eb920c58b535999e31b.jpg) + +converting integer summation results to FP values, and thus, additional rounding error during the FP formatting (Figure 5(a)). For example, to sum 128 FP values, a conventional FP-based MatMul has 127 error sources with bound $\epsilon$ while the reconstructed MatMul with 1 extra bit has 128 error sources with bound $\epsilon$ such that the reconstructed MatMul might experience a slightly larger error than conventional FP-based MatMul. Therefore, to guarantee the same error level as that of the conventional FP arithmetic, 2 extra bits are used for pre-alignment. Then, reconstructed MatMul has 127 error sources with bound $0.5\epsilon$ and an additional error source with bound $\epsilon$ . + +To verify the computation error of the proposed method, we randomly sample float32 values and compare the computation error of FP summation between conventional FP computation and the proposed method. To explore a wide range of float32 values, we sample $s$ , $e$ , and $m$ values independently assuming a uniform distribution, and then concatenate those values. We vary the fan-in (i.e., the number of values to be accumulated) from 128 to 8192, and sample 50,000 sets of FP numbers for each fan-in selection. The Schewchuk algorithm is employed to obtain accurate FP summation baseline data for error measurement (Shewchuk, 1997). + +As shown in Figure 4, the proposed method produces a similar level of errors to that of the conventional FP arithmetic for various fan-in values when $\delta = 2$ . Because larger errors are more likely to be accumulated with larger fan-in, we see that both average and maximum errors tend to grow as the fan-in increases (Figure 4). Nonetheless, the average error $(12.3 \times 10^{-7})$ and the maximum error $(2.4 \times 10^{-2}$ or $2.4\%)$ are relatively small even with 8192 fan-in, which justifies the current practice of implementing conventional FP additions without error correction for DNN inference. Correspondingly, the proposed method can support as precise numerical computation as conventional FP arithmetic does. + +# 4 EXPERIMENT + +# 4.1 IFPU: A MATMUL ENGINE FOR THE PROPOSED METHOD + +Overall Architecture. To evaluate the proposed method with real hardware implementation, we first design a MatMul engine called iFPU. Figure 5 shows the overview of systolic iFPU architecture which adopts the design principle of Google's TPU (Jouppi et al., 2017). iFPU performs FP MatMul in the form of a set of FP summation (Eq. 1) that is physically implemented as integer summation for high efficiency. After the computation, the iFPU converts integer results into FP values through the int2fp converter at the end of the Processing Element (PE) arrays. Then, scale & accumulator is used to multiply $\alpha_{b}$ and add summation results of each weight bit + +![](images/8a13a0c4739e9b90f68e1394695a72bbe8e7cc524e031be0bed1ed4a70bd57e1.jpg) +Figure 5: A block diagram of iFPU + +plane to finish the MatMul (Eq. 1). The size of MatMul that can be processed in the iFPU at a time is bounded by the number of PEs, and as a practical design, we evaluate the iFPU with $32 \times 32$ , $64 \times 64$ , or $128 \times 128$ PEs for the experiment. When fan-in of the DNN layer exceeds the row count of PEs, activations of the layer are tiled to fit the row-count limit, and each tile is fed into the iFPU at a time and processed with integer adders in the PEs. To complete the entire MatMul, the computing results for different tiles should be merged, and for this, float32 adders (accumulator) are used again. + +Precision of Integer Adder. As the PE array of the iFPU accumulates the pre-aligned and truncated significands, the size of the integer adder in each PE depends on $t$ , which is determined by the precision of the given FP format ( $p$ ) and extra bits ( $\delta$ ) attached to control truncation error. Based on the theoretical analysis given in Section 3.2, the iFPU for float32 activations conducts 26-bit integer addition with $\delta = 2$ . Though the iFPU introduces additional FP accumulations due to the MatMul tiling, the error level of integer-based FP addition with $\delta = 2$ is half of the conventional FP addition according to Remark 1. Therefore, the iFPU with $\delta = 2$ can still preserve the same level of computing error as that of conventional FP MatMul (Figure 6(a)). Furthermore, the iFPU for bfloat16 activations can be designed to be even smaller and more energy efficient by using smaller precision integer adders thanks to the reduced bit precision for significands. Interestingly, conventional bfloat16 accumulation still uses float32 adders to preserve the accuracy of accumulated + +![](images/cf46c1f7050183a59f40048d068deffef67250ac95693dfffa8f6ce8d478fe7f.jpg) +(a) Evaluation with float32 activation + +![](images/17cb24c416bcac6f908010fec2e877c55e1bc30c4dbb8c06f04d4f194e1ed123.jpg) +(b) Evaluation with bfloat16 activation + +![](images/cd2264494c7c1149c454cfc42e3280b6d3e9841ae704ae9f98e1217546c28ba8.jpg) +Figure 6: Numerical computation errors of MatMul for DNNs with FP activation. We measure the computation error of conventional FPU-based engine and the proposed iFPU against the accurate FP computation with Schewchuk algorithm (Shewchuk, 1997). The number of PEs and fan-in are annotated along the horizontal axis. + +![](images/819af033f580bca5686abd485b875008c4e4eab5e6e651e6d643ea05e1f2ffa7.jpg) + +![](images/b17e97228a302c010195825f3567e080b9a809e0c13e42506451f32f825770f2.jpg) + +![](images/d4e67b68c95440ea4b5872d8a48a3e0ff842a2a3dcbe5ce46074da4e54b4b959.jpg) +(a) Evaluation with float32 activation + +![](images/fac697cc56c79e8dfc34799ac13aae38a22cde3ff94de97cf53d615d61082109.jpg) +(b) Evaluation with bfloat16 activation + +![](images/f64159eff0c7a5f994cd1ca807d5665991632eae40a8901dc01eb8d5e726adc7.jpg) +Figure 7: Cosine distance between MatMul results of BERT-base (task: MRPC) extracted from inference results using conventional FPU-based engine (NVIDIA RTX3090) and the proposed iFPU. The last feed-forward layer in each encoder block (1-12th layers) and pooler (13th layer) is used for the evaluation. The number of PEs and layer indices are annotated along the horizontal axis. + +results (Wang & Kanwar, 2019; Intel, 2018; Henry et al., 2019). However, as the accumulated results are converted back to bfloat16, it is possible to maintain the accuracy of bfloat16 accumulation with less accurate adders than float32 adders. Figure 6(b) shows that the proposed bfloat16 iFPU with $\delta = 3$ (which uses 11 bit adders) provides comparable accuracy to that of conventional bfloat16 adders. + +# 4.2 ANALYSIS OF THE DNN COMPUTATION ACCURACY + +MatMuls of DNN with iFPU vs FPU. In the previous section, we compared the accuracy of the proposed integer-based FP MatMul with precise results. Since our goal is to replace the FPU with the proposed iFPU, it is also important to compare the computational difference between the conventional error-prone FPU-based engine and the iFPU. For an in-depth understanding of DNN inference with the iFPU, we first compare the inference output of each layer in the BERT-base model (Devlin et al., 2018) computed with an FPU-based engine (NVIDIA RTX3090) and the proposed iFPU. BERT-base uses 4-bit weight values and the target task is MRPC. In iFPU, MatMuls between weights and activations are processed with the proposed integer-based approach, but other operations such as softmax are processed by using conventional FPU. We employ cosine distance as the metric to measure the difference in layer outputs. Note that the cosine distance is 0 for two identical vectors and 2 for entirely opposite vectors. In this experiment, the last feed-forward layer in each + +Table 1: Accuracy of DNNs inference with conventional FPU-based engine (NVIDIA RTX3090) and proposed iFPUs(-#rows/columns of PE arrays). The numbers in parentheses represent accuracy difference between FPU & iFPU. + +
float32 activationbfloat16 activation
VGG-9ResNet-18OPT-1.3BVGG-9ResNet-18OPT-1.3B
FPU92.9170.2712.9692.9170.2812.96
iFPU-3292.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.91 (+0.00)70.26 (-0.02)12.96 (+0.00)
iFPU-6492.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.90 (-0.01)70.27 (-0.01)12.97 (+0.01)
iFPU-12892.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.92 (+0.01)70.26 (-0.02)12.98 (+0.02)
ResNet-50RegNetMnasNetResNet-50RegNetMnasNet
FPU76.3278.1875.9976.3378.1775.96
iFPU-3276.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.38 (+0.05)78.18 (+0.01)75.97 (+0.01)
iFPU-6476.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.38 (+0.05)78.18 (+0.01)75.96 (+0.00)
iFPU-12876.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.40 (+0.07)78.18 (+0.01)75.97 (+0.01)
+ +
BERT-Base w/ float32 activationAvg.
CoLAMRPCSST-2STS-BQQPMNLI-m/mmQNLIRTE
FPU56.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28
iFPU-3256.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
iFPU-6456.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
iFPU-12856.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
+ +
BERT--BASE w/ bfloat16 activationAvg.
CoLAMRPCSST-2STS-BQQPMNLI-m/mmQNLIRTE
FPU56.0889.0591.5187.5283.7481.97/82.5789.0070.0481.30
iFPU-3256.1089.0591.5187.5283.7281.94/82.5589.0570.0481.28 (-0.02)
iFPU-6456.3689.0591.6387.5283.7281.93/82.5689.0070.0481.31 (+0.01)
iFPU-12856.1088.8391.6387.5283.7281.96/82.5489.0570.0481.27 (-0.03)
+ +encoder block and pooler is chosen for evaluation. Figure 7 shows that the FPU and the iFPU produce almost identical outputs for each layer. The averages of the distance are less than $1.2 \times 10^{-6}$ and $2.5 \times 10^{-4}$ for float32 and bfloat16 activations, respectively. Moreover, the distance between layer outputs from the two engines remains close throughout the forward path. As a result, we can expect that the proposed iFPU can support DNN inference with almost the same accuracy as that of conventional FPU. + +DNN Inference Accuracy. We select 7 types of DNN models to compare DNN model accuracy between the FPU and iFPU: BERT-base, VGG-9, ResNet-18, ResNet-50, RegNet-3.2GF, MnasNet2.0, and OPT-1.3B. The accuracy of BERT-base is evaluated on the General Language Understanding Evaluation (GLUE) benchmark (Wang et al., 2019). VGG-9 (Simonyan & Zisserman, 2014) is evaluated on CIFAR-10 (Krizhevsky et al., 2009). ResNet-18, ResNet-50 (He et al., 2016), RegNet3.2GF (Radosavovic et al., 2020), and MnasNet-2.0 (Tan et al., 2019) measure top-1 accuracy on ImageNet (Russakovsky et al., 2015). OPT-1.3B (Zhang et al., 2022b) is an open-sourced NLP model provided by Meta AI roughly matching the performance and sizes of the GPT-3 class of models and is evaluated by estimating the perplexity on WikiText-2 dataset (Merit et al., 2016). All DNN models use 4-bit weight values that are quantized by a binary-coding quantization scheme. Note that no modifications to DNN structures are needed to deploy the weight-quantized DNNs to various iFPUs because 1) activations are FP values and 2) iFPUs are designed to process any MatMul for DNNs as long as weights are quantized. Table 1 summarizes the DNN inference results. Because the iFPU can produce almost identical MatMul results as FPU, the proposed iFPUs preserve the DNN accuracy for both float32 and bfloat16 activations as we expected. + +# 4.3 ANALYSIS OF COMPUTATION EFFICIENCY + +Setup. To evaluate the efficiency of proposed iFPUs, we synthesize the proposed hardware in a $28\mathrm{nm}$ CMOS technology. For a fair evaluation of the impact of replacing FP MatMul with integer-based MatMul, we also design two 'baseline' engines for the conventional FP-based MatMul (Fig- + +ure 8). As the first baseline (FP-MAC), Figure 8(a) is designed with FP MAC units to process FP MatMul as a naive approach. In addition, as the second baseline (FP-ADD), Figure 8(b) is designed with FP adders to process FP MatMul reconfigured as Eq. 1. Because bitplanes of weight values are decomposed for FP-ADD and iFPU, binary weights are processed in a bit-parallel manner in FP-ADD and iFPU, while FP-MAC processes the whole weight values in each MAC unit. Compared to those two baseline engines, iFPU exhibits the lighter PEs along with additional units such as the pre-alignment unit and int2fp converter. Lastly, an int8 MatMul engine (INT8) is also implemented for the comparison between the proposed iFPU MatMul and integer MatMul. + +Results. Simulation results using the synthesized hardware demonstrate that the proposed iFPUs can improve both energy and area compared to the baselines, as the FP units of the baseline engines are replaced with the more area/energy efficient integer units (Figure 9). For float32 activations, the proposed iFPU improves throughput-per-area $(\mathrm{TOPS} / \mathrm{mm}^2)$ by up to $7.9\times$ and energy efficiency (TOPS/W) by up to $6.4\times$ compared to the FP-MAC baseline. For bfloat16 activations, the proposed iFPU achieves + +![](images/78a9b38799730d982a84162ee1b462f287492c13abbfdf994834e1d8f4953627.jpg) +Figure 8: Baseline MatMul engines (a) FP-MAC and (b) FP-ADD + +![](images/0c03f4a03e6589511b931f1211689d946f45856dd15f17cf76dc0e7bde169e37.jpg) + +even larger improvements because the size of the corresponding integer-based unit is reduced as the bit resolution of the aligned-truncated significands is reduced by 15 bits compared to float32 activation cases. The throughput-per-area of the iFPU is improved by up to $9.9\times$ and energy efficiency is enhanced by up to $11.9\times$ compared to the FP-MAC baseline. The improvement over the baseline becomes larger as the number of PEs increases because the overhead of additional logic such as pre-alignment units in the proposed scheme can be amortized (detailed in Appendix C.2). We also compare the iFPUs with the INT8 engine. While bffloat16 activations close the gap between the FP-MAC baseline and the INT8 engine significantly in terms of throughput-per-area, iFPU (with bffloat16 activations) achieves even higher energy efficiency than the INT8 engine in some cases (Figure 9). + +![](images/b300b0a5576d752e93ad84c011fe82fe2c46903ab5d90fd00710bad3caef2aa3.jpg) +Figure 9: Normalized energy efficiency (TOPS/W) (left) and throughput-per-area (TOPS/mm²) (right) of MatMul Engines: baselines and iFPUs for FP MatMul; INT8 for int8 MatMul. The number of PEs and target activation types are annotated along the horizontal axis. + +![](images/c8ed2a204e737468738f1e7cd154c1c9fad26803d41a986509e9b7811dac74f0.jpg) + +# 5 CONCLUSION + +The need to accomplish computing MatMul by using FP activations and quantized weights is increasing due to the growing usage of complex non-linear activation functions in DNN models such as Transformers. Conventional computing platforms such as CPU, GPU, and NPU, however, are inefficient in performing such computations. In this paper, we propose a new MatMul computing scheme dedicated to DNNs with FP activations and binary-coding weight quantization. The proposed method accelerates the FP MatMul of DNNs using the shared exponent and the integer arithmetic to improve computational efficiency. Previous works which also used the block floating point number with shared exponent often claim the validity of their design by presenting comparable DNN accuracy without verifying the robustness of MatMul results in a rigorous manner. We theoretically prove that the proposed scheme can produce the same error level as that of conventional FP arithmetic. To evaluate the computational efficiency of the proposed method, we design and synthesize a MatMul engine, iFPU, following the principle of integer-based operations. Experimental results support our claim that, compared to the conventional FPU-based design, the iFPUs accelerate the weight-only quantized DNNs with $6.4 \times$ and $7.9 \times$ higher energy efficiency and throughput-per-area for float32 activations, respectively. In addition, the iFPUs yield $11.9 \times$ and $9.9 \times$ higher energy efficiency and throughput-per-area, respectively, when associated with bffloat16 activations. + +# ACKNOWLEDGMENTS + +This work was supported in part by Institute of Information communications Technology Planning Evaluation (IITP) grant funded by the Korea government (MSIT) (No. 2021-0-01343, Artificial Intelligence Graduate School Program (Seoul National University) $(10\%)$ , and No.2021-0-02068, Artificial Intelligence Innovation Hub $(10\%)$ ). + +# REFERENCES + +Yu Bai, Yu-Xiang Wang, and Edo Liberty. Proxquant: Quantized neural networks via proximal operators. International Conference on Learning Representations, 2019. +Sharan Chetlur, Cliff Woolley, Philippe Vandermersch, Jonathan Cohen, John Tran, Bryan Catanzaro, and Evan Shelhamer. codnn: Efficient primitives for deep learning. arXiv preprint arXiv:1410.0759, 2014. +Jungwook Choi, Zhuo Wang, Swagath Venkataramani, Pierce I-Jen Chuang, Vijayalakshmi Srinivasan, and Kailash Gopalakrishnan. Pact: Parameterized clipping activation for quantized neural networks. arXiv preprint arXiv:1805.06085, 2018. +Insoo Chung, Byeongwook Kim, Yoonjung Choi, Se Jung Kwon, Yongkweon Jeon, Baeseong Park, Sangha Kim, and Dongsoo Lee. Extremely low bit transformer quantization for on-device neural machine translation. In Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 4812-4826, 2020. +Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. +Elias Frantar, Saleh Ashkboos, Torsten Hoefer, and Dan Alistarh. Gptq: Accurate post-training quantization for generative pre-trained transformers. arXiv preprint arXiv:2210.17323, 2022. +David Goldberg. What every computer scientist should know about floating-point arithmetic. ACM computing surveys (CSUR), 23(1):5-48, 1991. +Mark Harris. Mixed-precision programming with CUDA 8, 2016. URL https://developer.nvidia.com/blog/mixed-precision-programming-cuda-8/. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. +Greg Henry, Ping Tak Peter Tang, and Alexander Heinecke. Leveraging the bfloat16 artificial intelligence datatype for higher-precision computations. In 2019 IEEE 26th Symposium on Computer Arithmetic (ARITH), pp. 69-76. IEEE, 2019. +Mark Horowitz. Computing's energy problem (and what we can do about it). In 2014 IEEE International Solid-State Circuits Conference Digest of Technical Papers (ISSCC), pp. 10-14. IEEE, 2014. +Intel. Bfloat16 - hardware numerics definition, 2018. URL https://www.intel.com/content/dam/develop/external/us/en/documents/bf16-hardware-numerics-definition-white-paper.pdf. Accessed: 2022-09-07. +Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam, and Dmitry Kalenichenko. Quantization and training of neural networks for efficient integer-arithmetic-only inference. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2704-2713, 2018. +Yongkweon Jeon, Baeseong Park, Se Jung Kwon, Byeongwook Kim, Jeongin Yun, and Dongsoo Lee. Biqgemm: matrix multiplication with lookup table for binary-coding-based quantized dnns. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis, pp. 1-14. IEEE, 2020. + +Yongkweon Jeon, Chungman Lee, Eulrang Cho, and Yeonju Ro. Mr.biq: Post-training non-uniform quantization based on minimizing the reconstruction error. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12329-12338, 2022. +Norman P Jouppi, Cliff Young, Nishant Patil, David Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, et al. In-datacenter performance analysis of a tensor processing unit. In Proceedings of the 44th annual international symposium on computer architecture, pp. 1-12, 2017. +Norman P Jouppi, Doe Hyun Yoon, Matthew Ashcraft, Mark Gottscho, Thomas B Jablin, George Kurian, James Laudon, Sheng Li, Peter Ma, Xiaoyu Ma, et al. Ten lessons from three generations shaped google's tpuv4i: Industrial product. In 2021 ACM/IEEE 48th Annual International Symposium on Computer Architecture (ISCA), pp. 1-14. IEEE, 2021. +Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W Mahoney, and Kurt Keutzer. I-bert: Integer-only bert quantization. In International Conference on Machine Learning, pp. 5506-5518. PMLR, 2021. +Urs Köster, Tristan Webb, Xin Wang, Marcel Nassar, Arjun K Bansal, William Constable, Oguz Elibol, Scott Gray, Stewart Hall, Luke Hornof, et al. Flexpoint: An adaptive numerical format for efficient training of deep neural networks. Advances in neural information processing systems, 30, 2017. +Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009. +Se Jung Kwon, Jeonghoon Kim, Jeongin Bae, Kang Min Yoo, Jin-Hwa Kim, Baeseong Park, Byeongwook Kim, Jung-Woo Ha, Nako Sung, and Dongsoo Lee. Alphatuning: Quantization-aware parameter-efficient adaptation of large-scale pre-trained language models. arXiv preprint arXiv:2210.03858, 2022. +Yuhang Li, Ruihao Gong, Xu Tan, Yang Yang, Peng Hu, Qi Zhang, Fengwei Yu, Wei Wang, and Shi Gu. Brecq: Pushing the limit of post-training quantization by block reconstruction. In International Conference on Learning Representations, 2021. +Xiaocong Lian, Zhenyu Liu, Zhourui Song, Jiwu Dai, Wei Zhou, and Xiangyang Ji. High-performance fpga-based cnn accelerator with block-floating-point arithmetic. IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 27(8):1874-1885, 2019. +Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016. +Asit Mishra, Eriko Nurvitadhi, Jeffrey J Cook, and Debbie Marr. Wrpn: Wide reduced-precision networks. In International Conference on Learning Representations, 2018. +Jean-Michel Muller, Nicolas Brisebarre, Florent De Dinechin, Claude-Pierre Jeannerod, Vincent Lefevre, Guillaume Melquiond, Nathalie Revol, Damien Stehlé, Serge Torres, et al. Handbook of floating-point arithmetic. Springer, 2018. +Gunho Park, Baeseong Park, Se Jung Kwon, Byeongwook Kim, Youngjoo Lee, and Dongsoo Lee. nuqmm: Quantized matmul for efficient inference of large-scale generative language models. arXiv preprint arXiv:2206.09557, 2022. +Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, volume 32, 2019. +Nicola Petra, Davide De Caro, Valeria Garofalo, Ettore Napoli, and Antonio GM Srolllo. Truncated binary multipliers with variable correction and minimum mean square error. IEEE Transactions on Circuits and Systems I: Regular Papers, 57(6):1312-1325, 2009. +Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólar. Designing network design spaces. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10428-10436, 2020. + +Bita Darvish Rouhani, Daniel Lo, Ritchie Zhao, Ming Liu, Jeremy Fowers, Kalin Ovtcharov, Anna Vinogradsky, Sarah Massengill, Lita Yang, Ray Bittner, et al. Pushing the limits of narrow precision inferencing at cloud scale with microsoft floating point. Advances in neural information processing systems, 33:10271-10281, 2020. +Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015. +Jonathan Richard Shewchuk. Adaptive precision floating-point arithmetic and fast robust geometric predicates. Discrete & Computational Geometry, 18(3):305-363, 1997. +Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. +Zhourui Song, Zhenyu Liu, and Dongsheng Wang. Computation error analysis of block floating point arithmetic oriented convolution neural network accelerator design. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018. +Vivienne Sze, Yu-Hsin Chen, Tien-Ju Yang, and Joel S Emer. Efficient processing of deep neural networks: A tutorial and survey. Proceedings of the IEEE, 105(12):2295-2329, 2017. +Mingxing Tan, Bo Chen, Ruoming Pang, Vijay Vasudevan, Mark Sandler, Andrew Howard, and Quoc V Le. Mnasnet: Platform-aware neural architecture search for mobile. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2820-2828, 2019. +Chaofan Tao, Lu Hou, Wei Zhang, Lifeng Shang, Xin Jiang, Qun Liu, Ping Luo, and Ngai Wong. Compression of generative pre-trained language models via quantization. arXiv preprint arXiv:2203.10705, 2022. +Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. International Conference on Learning Representations, 2019. +Shibo Wang and Pankaj Kanwar. Bfloat16: The secret to high performance on cloud tpus. Google Cloud Blog, 4, 2019. +James Hardy Wilkinson. Rounding errors in algebraic processes. Courier Corporation, 1994. +Xiaoxia Wu, Zhewei Yao, Minjia Zhang, Conglong Li, and Yuxiong He. Extreme compression for pre-trained transformers made simple and efficient. arXiv preprint arXiv:2206.01859, 2022. +Chen Xu, Jianqiang Yao, Zhouchen Lin, Wenwu Ou, Yuanbin Cao, Zhirong Wang, and Hongbin Zha. Alternating multi-bit quantization for recurrent neural networks. In International Conference on Learning Representations, 2018. +Zhewei Yao, Reza Yazdani Aminabadi, Minjia Zhang, Xiaoxia Wu, Conglong Li, and Yuxiong He. Zeroquant: Efficient and affordable post-training quantization for large-scale transformers. arXiv preprint arXiv:2206.01861, 2022. +Sai Qian Zhang, Bradley McDanel, and HT Kung. Fast: Dnn training under variable precision block floating point with stochastic rounding. In 2022 IEEE International Symposium on High-Performance Computer Architecture (HPCA), pp. 846-860. IEEE, 2022a. +Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuhui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022b. + +# A COMPUTATIONAL COST OF FP ARITHMETIC VS. INTEGER ARITHEMETIC + +Table 2: Energy of computing units synthesized in a ${28}\mathrm{\;{nm}}$ tech node (MAC: multiply-accumulate). + +
MACMultiplyAdd
float32int8float32int32int8float32int32
Energy per Operation1.51 pJ0.08 pJ1.23 pJ0.940.06 pJ0.28 pJ0.03 pJ
Normalized Energy18.9×1.0×20.5×15.7×1.0×9.3×1.0×
+ +To cover a wide range of numbers, FP format does not fix the location of the radix point (Goldberg, 1991). Hence, FP arithmetic needs to handle input and output values with different scaling factors, and the FP arithmetic units need to align and normalize significands before and after each computation, respectively. The alignment and normalization logics consist of barrel shifters that can shift a data word by a specified amount, and the cost of the barrel shifter far exceeds the cost of other arithmetic logics in terms of both energy and area, increasing the cost of FP computation (Horowitz, 2014). Hence, in general, integer arithmetic logic is much smaller and consumes less energy than FP counterpart. + +It is well known that 8-bit integer can achieve up to $4 \times$ throughput improvement compared to IEEE-754 single-precision format (float32) in widely used GPUs (Kim et al., 2021), as the throughput of 8-bit operations is generally $4 \times$ that of 32-bit operations (Har + +ris, 2016). The advantage of integer can be magnified when the hardware platform moves to ASIC (Mishra et al., 2018). For in-depth understanding, we synthesize computing units for FP and integer in a $28\mathrm{nm}$ tech node. As shown in Table 2, multiplication-accumulation (MAC) for float32 consumes $18.9\times$ more energy than 8-bit integer (int8), a widely used integer format for quantized DNNs. Please note that a float32 MAC consists of a float32 multiplication and a float32 addition while an int8 MAC consists of an int8 multiplication and an int32 addition. The bit resolution of the adder for the int8 MAC is higher than that of the multiplier, because int8 multiplication results in 16-bit values and the bit resolution of MAC values increases as the number of accumulated values increases for integer format. In addition, the area cost of the integer unit is also much smaller than FP units as shown in Figure 10. Therefore, many studies have attempted activation quantization despite the various difficulties in the quantization process because both weight parameters and activations should be quantized to replace FP arithmetic with integer arithmetic. + +![](images/2122f6a76adc28979622ea194af6ee36e4c5348de9684ba2be11299ebb3fb30b.jpg) +Figure 10: Area comparison of computing units (layouts synthesized in a $28\mathrm{nm}$ node). + +# B SUPPLEMENT FOR PROPOSED SIGNIFICAND TRUNCATION + +# B.1 ENERGY IMPROVEMENT WITH SIGNIFICAND TRUNCATION + +![](images/f6d95e1fb8d8523562dfe4ac551a38216da2318ef898a74304b45f87823fe641.jpg) +Figure 11: Energy of adders synthesized in a $28\mathrm{nm}$ tech node (tested at $0.9\mathrm{V}$ ). + +![](images/6771686cafd9759a43126e3e9a12c277438d282b35843033f72ac77f7163cab9.jpg) +Figure 12: Example of the significand truncation followed by the pre-alignment. + +With naive pre-alignment of float32 activations, the maximum amount of the significand shifting is 255 and the resolution of the aligned activation becomes 279 bits. As shown in Figure 11, while 32-bit integer consumes $0.029\mathrm{pJ}$ per addition, both float32 and 279-bit integer consumes $0.281\mathrm{pJ}$ + +per addition. To avoid the large design overhead, we truncate the pre-aligned significands as shown in Figure 12. The aggressive truncation still did not cause accuracy degradation in FP additions as we described in the Section 3.2. + +# B.2 TRUNCATED BINARY MULTIPLIERS VS. PROPOSED SIGNIFICAND TRUNCATION + +![](images/2846d2162fa52e8b3bf581042c18aac7af09023cc2feabee7634935dd797f738.jpg) +(a) + +![](images/ca81923ddefb5c2185c6966507f20db5b5b336a0ea89cc33807884d331413bc9.jpg) +(b) +Figure 13: Comparison of the truncation scheme in the (a) truncated binary multiplier for integer multiplication and (b) proposed method for FP addition/subtraction. + +Truncated binary multipliers (Petra et al., 2009) also discuss the truncation to improve computational efficiency, but there are critical differences between truncated binary multipliers and the proposed work as summarized in Figure 13. First of all, truncated binary multipliers deals with integer multiplications while the proposed work focuses on FP additions/subtractions. Due to the differences in the number format (integer vs. FP) and arithmetic operations (multiplications vs. additions/subtractions), the two works present completely different error analysis models and error reduction schemes. + +The error analysis models between truncated binary multipliers and our case are different, because the amount of truncation is fixed in the truncated binary integer multipliers and the amount of truncation varies in our work as the amount of significand shift varies depending on the input data. Moreover, in truncated binary multipliers, the bit resolution of truncated output is defined by the application requirement. On the other hand, as we proposed to truncate the pre-aligned values to adopt lower-bit integers and improve computational efficiency, the proper bit resolution of truncated values should be found to meet the accuracy requirement in our case. + +In addition, in integer multiplication case, some of the truncated partial products share the same inputs with the remaining partial products, so they have correlations with the remaining partial product values. (Petra et al., 2009) proposed an error minimization scheme which exploits such characteristics. On the other hand, in the FP addition/subtraction case, the truncated significands do not have any correlation with the remaining bits so it is hard to devise similar error correction schemes. Instead, we focused on the fact that conventional FP operation is also not precise due to the rounding of output significands so that we only need to match the error level of the proposed scheme to the conventional FP operations. Based on the facts, we showed a theoretical analysis such that the proposed integer-based FP addition/subtraction can have the similar error level as that of the conventional FP addition/subtraction when small number (1-2) of extra bits are attached to the shifted significands. With this finding, we can design an efficient integer-based FP addition logic without having complex error correction function estimated based on the truncated bits. + +# C IN-DEPTH HARDWARE ANALYSIS + +# C.1 DETAILED HARDWARE DESCRIPTION OF THE PROPOSED IFPU + +Figure 14 describes the proposed iFPU in detail. The proposed iFPU is a bit-flexible accelerator which can handle variable bitwidth of weight values. The iFPU processes weights in bit-parallel manner by processing each weight bitplane in different columns of the PE array. For example, 4-bit weights use 4 PE columns for the computation, and 8-bit weights use 8 PE columns for the computation. After the integer-based summations are done in each column of the PE array, the integer results are converted into FP values and multiplied by scaling factors which represent the significance of each bitplane. Then, computing results of each bitplane are merged in the accumulator (FP adder) to finish the MatMul. As the output resolution of FP accumulation remains the same regardless of the + +![](images/e7fab444163ce272b8005088b3213cc4c59d1ad88dd0b78bd54c6800f21cbb4c.jpg) +Figure 14: A detailed block diagram of iFPU. The iFPU processes weights in bit-parallel manner by processing each bitplane of the weights in each column of the PE array. $(B_{b,k}$ : binary weights in Eq. 1) + +![](images/737da39c05e176befb2ecac474fd513ab63db587cee4589de9006f1b969bfd96.jpg) + +![](images/17e161b1a48ac765de589d10bcb589ab3055080325953a2431a71f0998358d68.jpg) + +size of the accumulation thanks to the characteristics of the FP format, the size of the accumulator does not need to increase for the increased weight bit width. + +# C.2 AREA/ENERGY BREAKDOWN OF PROPOSED IFPU + +![](images/479482482e743352d8059cdf7b9142d724358af8380b45b92013c7a6f7f81c59.jpg) +Figure 15: Area $(mm^2)$ (left) and power (W) (right) of MatMul Engines: baselines and iFPUs for FP MatMul with 32x32 PEs. + +![](images/9f699af0fee63273ed539329b5989a908d9f842aeacf5e7b84e0c6a47deb8a1e.jpg) + +![](images/ff4bd437c8192a558c56c8c0441be2ef86392d6948afa6172828065ce47eab4c.jpg) +Figure 16: Area breakdown (left) and power breakdown (right) of proposed iFPUs with $32\mathrm{x}32$ , $64\mathrm{x}64$ , and $128\mathrm{x}128$ PEs. + +![](images/09e5f4d13d77e18a58272181dd308ec012a30052175177d136a81df4cf53ef07.jpg) + +In this section, the area and power of the MatMul engines designed in Section 4.3 are analyzed in more detail for deeper understanding of the proposed scheme. First, a breakdown of the area/power of various MatMul engines with $32 \times 32$ PEs is shown in Figure 15. FP-ADD reconstructs FP-MAC with a series of FP additions by separately processing each weight bitplane (Eq. 1), so to match the effective throughput of FP-ADD with that of FP-MAC in case of 4-bit weights, 4 FP-ADD + +operations are used for the evaluation. Hence, though the area/energy of a single float32 adder is lower than that of a float32 MAC unit (Table 2), FP-ADD requires slightly larger area and power than FP-MAC. On the other hand, though iFPU also introduces $m$ times more operations than FP-MAC, iFPUs achieve large area and power reduction as the area/energy cost of PE arrays become significantly lower by replacing FP adders with integer adders. The area/power reduction is even larger in bfloat16 cases because smaller integer units can be used. As the area/power cost of PE arrays in iFPUs decreases, the relative portion of area/power of supporting logic (such as scale & accumulator) in the total area/power increases. Hence, the supporting logic accounts for more than half of the total area/power of iFPUs with 32x32 PEs. Meanwhile, the overhead of the supporting logic decreases as the size of PE arrays increases. We report the area/power breakdown of iFPUs with various number of PEs in Figure 16. The experimental results show that, as the size of PE arrays increase, the supporting logic is shared among more PEs and the overhead can be amortized. + +# C.3 IMPACT OF THE WEIGHT BITWIDTH ON THE PROPOSED IFPU + +![](images/03996390a185fb711ac2f8be70e6ddefff7047e7d4423fe87dad947c3e3121a0.jpg) + +![](images/efae3a70ea025388fd7031579896d7b03a54d2e968c2b0fa6f156bfd7fd618b4.jpg) +(a) Normalized energy efficiency (TOPS/W) of iFPUs +(b) Normalized throughput-per-area (TOPS/mm $^2$ ) of iFPUs +Figure 17: Computational efficiency of iFPUs normalized with that of the baseline FP MatMul engine (FP-MAC). Y-axis is the normalized value against FP-MAC and the iFPUs show higher efficiency than FP-MAC even for high-precision weight bits. The number of PEs and target activation types are annotated along the horizontal axis. + +This section analyzes impact of weight bitwidth on the efficiency improvement achievable with the proposed iFPU. The experimental setup is the same as Section 4.3 except the weight bits. While only 4-bit weight cases are evaluated in Section 4.3, this section evaluates weights with 1 to 16 bits. Because the proposed scheme processes each bitplane of the weights in the bit-parallel manner, higher-bit weights require more operations with PE, scale, and accumulators. Hence, as shown in Figure 17, the benefits of the iFPUs diminish as the number of weight bits increases. Nevertheless, even for 8-bit weight case, iFPUs achieve better computational efficiency compared to the FP-MAC baseline. + +# C.4 COMPARISON OF THE PROPOSED IFPU WITH INT4 MATMUL ENGINE + +In Figure 18., an int4 MatMul engine (INT4) is evaluated and compared with the other MatMul engines analyzed in Section 4.3. INT4 MatMul shows high energy efficiency and throughput-per + +![](images/c5e3c9d6346d6aa33c6caae94ea469a25be309957f6bdd30e862fd44c183d6af.jpg) +Figure 18: Normalized energy efficiency (TOPS/W) (left) and throughput-per-area $(\mathrm{TOPS} / \mathrm{mm}^2)$ (right) of MatMul Engines: baselines and iFPUs for FP MatMul; INT8/INT4 for int8/int4 MatMul. The number of PEs and target activation types are annotated along the horizontal axis. + +![](images/9a356fe726cbb5d3a58cf9461b03d3960de5e0f3ba0b6e06fa89355cb059cfdc.jpg) + +area. However, to take advantage of INT4 MatMul, both weight and activation should be quantized to 4 bits, which may not provide desired accuracy in many cases. + +# C.5 HARDWARE EVALUATION WITH MEMORY ACCESS + +![](images/c3cca79da7dc9a65091328ad1629fd5d8011049b9d33dce7c96233f76ca10ec8.jpg) +Figure 19: Normalized energy consumption of MatMul engines (FP-MAC, FP-ADD, and iFPU) with memory system. The inference energy is measured for BERT-based and OPT-1.3B with 4-bit weights and float32/bfloat16 activations. + +![](images/72fff8ed9dae84bf3d5364cabd6f23c656e183c87972a7bc0de116e2df7d1d13.jpg) + +![](images/30a2a7c507b03a1b556a55f453baf86b63bf50d5c0c902cd9d39f6a51f5cd8cf.jpg) + +Setup. To understand the effectiveness of the proposed method in the real computing scenario, the baselines (FP-MAC and FP-ADD) and the proposed iFPU with 128x128 PEs are further evaluated including memory access. For off-chip memory, we scaled down the bandwidth of HBM2 in TPU (Jouppi et al., 2021) considering the ratio of the number of PEs that make up Matrix Multiply Unit (MXU), which is 1:4 and adopted energy per bit of HBM2 from Table 2 in (Jouppi et al., 2021); we used the bandwidth of 153.5 GB/s and the energy per bit of $3.9\mathrm{pJ} / \mathrm{bit}$ . We also scaled the size of the unified buffer (on-chip SRAM buffer) in (Jouppi et al., 2021) by dividing it by 4. The unified buffer size in our design was 32MB. For SRAMs, we used the $28\mathrm{nm}$ CMOS memory compiler and the energy per bit of $0.155\mathrm{pJ} / \mathrm{bit}$ was used. To overlap memory access with computation, double buffering scheme was adopted in the unified buffer. + +Results. We evaluate a single batch inference of BERT-base and OPT-1.3B. We set the sequence length of BERT-base and OPT-1.3B as 128 and 1024 respectively. As double buffering hides the memory access latency, the proposed iFPU with memory model can achieve the same amount of throughput-per-area improvement as that of the baseline for the case in which memory access is not considered. On the other hand, the gain in energy efficiency slightly changes after considering memory access. As shown in Figure 19, the dram access energy accounts for a relatively small portion of total energy consumption in the baselines, because the data is intensively reused in the MatMul computation. As the proposed iFPU reduces the energy cost of computation, memory access energy becomes relatively significant in the proposed system. Thus, when considering the cost of memory access, the amount of improvement in the energy efficiency slightly decreases. Nevertheless, the iFPU with memory access still can improve the energy efficiency by up to 6.6x compared to FP-MAC baseline. + +# D FINE-TUNING CONDITION FOR BERT-BASE TRAINING + +Table 3: Hyper-parameters for fine-tuning BERT-base on GLUE benchmark. The fine-tuning use AdamW optimizer and the number of training epochs is 10. The learning rates decay linearly and the weight decay is set to 0.01. + +
ConfigurationGLUE
CoLAMRPCSST-2STS-BQQPMNLIQNLIRTE
Batch size1632323232161616
Learning rate1e-41e-41e-4 2e-41e-45e-55e-55e-51e-4
\ No newline at end of file diff --git a/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/images.zip b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..615dcb37759b1a82f21de60eff734611f169b596 --- /dev/null +++ b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f06761f28506b9f4864c804050cd3b82937245bfc268102e67c624e304673d74 +size 1082926 diff --git a/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/layout.json b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ae6b4e09ed09245bc5d3b88214e21e4b035d5fdf --- /dev/null +++ b/2023/Winning Both the Accuracy of Floating Point Activation and the Simplicity of Integer Arithmetic/layout.json @@ -0,0 +1,11796 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 137 + ], + "type": "text", + "content": "WINNING BOTH THE ACCURACY OF FLOATING POINT ACTIVATION AND THE SIMPLICITY OF INTEGER ARITHMETIC" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "spans": [ + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": "Yulhwa " + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{K}\\mathbf{m}^{1}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Jaeyong " + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{J}\\mathbf{a}\\mathbf{g}^{1}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Jehun " + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{L}\\mathbf{e}^{1}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Jihoon " + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{P}\\mathbf{k}^{1}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Jeonghoon " + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{K}\\mathbf{m}^{2}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Byeongwook " + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{K}\\mathbf{m}^{2}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Baeseong " + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{p}\\mathbf{k}^{2}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Se Jung Kwon" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Dongsoo Lee" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "text", + "content": ", Jae-Joon " + }, + { + "bbox": [ + 110, + 154, + 469, + 177 + ], + "type": "inline_equation", + "content": "\\mathbf{K}\\mathbf{m}^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 178, + 291, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 178, + 291, + 189 + ], + "spans": [ + { + "bbox": [ + 112, + 178, + 291, + 189 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 178, + 291, + 189 + ], + "type": "text", + "content": "Seoul National University, " + }, + { + "bbox": [ + 112, + 178, + 291, + 189 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 112, + 178, + 291, + 189 + ], + "type": "text", + "content": "NAVER Cloud" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 190, + 504, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 190, + 504, + 222 + ], + "spans": [ + { + "bbox": [ + 112, + 190, + 504, + 222 + ], + "type": "text", + "content": "{yulhwakim, jaeyongjang, jehun.lee, jihoonpark, kimjaejoon}@snu.ac.kr, {jeonghoon.samuel, byeonguk.kim, baeseong.park, sejung.kwon, dongsoo.lee}@navercorp.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 276, + 251, + 334, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 251, + 334, + 262 + ], + "spans": [ + { + "bbox": [ + 276, + 251, + 334, + 262 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 277, + 470, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 277, + 470, + 486 + ], + "spans": [ + { + "bbox": [ + 140, + 277, + 470, + 486 + ], + "type": "text", + "content": "Even though floating point (FP) numbers have been adopted as a de facto standard data format for deep learning computing, the complexity of FP arithmetic impedes a broader deployment of Deep Neural Networks (DNNs). Recent works such as quantization have attempted to replace the FP matrix multiplication (MatMul) of DNNs with simple integer MatMul by transforming the datatypes of both weights and activations into integers. Unfortunately, unlike weight values that are static, it is challenging to represent dynamic activations with integers. In this paper, to simultaneously achieve the accuracy of FP activation and the simplicity of integer arithmetic, we present a method for replacing FP arithmetic with integer one without changing FP activations in the storage format while weights are quantized. The proposed method pre-aligns the significands of FP activations just ahead of the MatMul on-the-fly so that the aligned significands (integers) can be used for the computation. Inspired by an observation that conventional FP arithmetic does not produce precise results due to rounding, we demonstrate that our proposed integer arithmetic-based scheme can produce the same level of errors as that of the FP arithmetic in case DNNs use FP activations and quantized weights. Experimental results show that the hardware based on the proposed scheme shows significant improvement over FP arithmetic-based designs in terms of energy efficiency and throughput-per-area while maintaining a similar level of accuracy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 509, + 206, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 509, + 206, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 206, + 520 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 534, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 506, + 733 + ], + "type": "text", + "content": "Deep Neural Networks (DNNs) usually use Floating-Point (FP) number systems to represent a wide range of weight and activation values. Such a comprehensive representation, however, demands high computational complexity and cost for FP matrix multiplication (MatMul) (Sze et al., 2017). On the other hand, integer (a.k.a fixed-point) arithmetic logic is much simpler while consuming less energy compared to FP counterpart (Jouppi et al., 2021). As such, the computational efficiency of DNNs can be enhanced by replacing FP arithmetic with integer one. Accordingly, quantization has been actively studied as a promising technique to support DNN computations with integer arithmetic, as it maps the input values of a (virtually) continuous domain (FP numbers) to the output values of a discrete set (integers) (Jacob et al., 2018). Note that even though several studies have successfully quantized weights and activations of some target DNNs with low-precision integer values (Li et al., 2021; Wu et al., 2022), quantization is still challenging for numerous DNNs. In particular, activation values are known to be more difficult to be quantized than the weight parameters because activations are dynamically generated during inference while the distribution of weights is static. The uncertainty of the distribution of dynamic activation values limits the ability to estimate proper quantization range (Choi et al., 2018). Such issues on activation quantization become even more serious when DNNs involve highly non-linear activation functions (e.g., GeLU) or modules that increase the variance of the activations (e.g., softmax and normalization layers) (Jeon et al., 2020). As a result, while the weight parameters can be successfully quantized even for generative mod" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 113, + 83, + 501, + 152 + ], + "blocks": [ + { + "bbox": [ + 113, + 83, + 501, + 152 + ], + "lines": [ + { + "bbox": [ + 113, + 83, + 501, + 152 + ], + "spans": [ + { + "bbox": [ + 113, + 83, + 501, + 152 + ], + "type": "image", + "image_path": "a88431bd3c3fb7c88e0af2e9244d736bb954c6c3e9fc88a8995005b1c6f84b2a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 153, + 504, + 175 + ], + "lines": [ + { + "bbox": [ + 104, + 153, + 504, + 175 + ], + "spans": [ + { + "bbox": [ + 104, + 153, + 504, + 175 + ], + "type": "text", + "content": "Figure 1: An example of FP summation with (a) conventional FP computation and (b) proposed method. The precise summation is described in the box on top." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 184, + 506, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 184, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 506, + 274 + ], + "type": "text", + "content": "els (Xu et al., 2018; Bai et al., 2019; Jeon et al., 2022; Park et al., 2022; Kwon et al., 2022; Frantar et al., 2022) and extra-large models such as GPT-NeoX-20B (Chung et al., 2020; Yao et al., 2022), activation quantization usually relies on intensive quantization-aware training or sophisticated investigation algorithms such as dynamic min/max searching (Tao et al., 2022). Note that activation quantization is mandatory if integer arithmetic logic is involved for MatMul operations. Thus, to avoid such significant efforts to quantize complex DNNs (mainly due to activation quantization), recent neural processing units tend to employ FP arithmetic units even for inference process at the cost of increased energy and area (Jouppi et al., 2021)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 279, + 506, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 279, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 279, + 506, + 456 + ], + "type": "text", + "content": "To address the challenges discussed above, we propose a scheme that can achieve both the accuracy of FP activations and the simplicity of integer arithmetic. Our motivation stems from an observation that most multiplications can be removed once weights are quantized to be binary-coded (Jeon et al., 2020). Then, consecutive FP additions are mainly required to perform MatMul, and hence, we find conventional FP units can be much simplified. To be more specific, when processing the MatMul of DNNs, our proposed method first pre-aligns the significands of FP activations to be added. Correspondingly, FP activations can be reformatted into integer values and FP arithmetic units (FPUs) can be replaced with integer units during MatMul operations. A naive pre-alignment for accurate computation requires very high-resolution integer units for the computation, which negates the benefits of using integer units. Inspired by an observation that conventional FP arithmetic does not guarantee the exact results due to rounding errors (Wilkinson, 1994), we show that the same level of computational error can be obtained even when the pre-aligned significands are aggressively truncated. We then implement an integer-based FP arithmetic unit (iFPU) hardware for MatMul computation based on the proposed scheme. A comprehensive evaluation of the iFPU on various DNNs shows that the iFPU significantly improves energy efficiency and throughput-per-area over the conventional FPU-based MatMul engine while maintaining the neural network accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 476, + 201, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 476, + 201, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 476, + 201, + 488 + ], + "type": "text", + "content": "2 BACKGROUND" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 505, + 369, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 505, + 369, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 369, + 516 + ], + "type": "text", + "content": "2.1 FLOATING-POINT ARITHMETIC AND ROUNDING ERROR" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "content": "FP format represents a number as " + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "inline_equation", + "content": "(-1)^{s} \\times (m) \\times 2^{(e - bias)}" + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "content": " which consists of sign " + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "inline_equation", + "content": "(s)" + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "content": ", exponent " + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "inline_equation", + "content": "(e)" + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "content": ", and significand (or mantissa, " + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "content": ") (Muller et al., 2018). Float32 assigns 1 bit for " + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "content": " and 8 bits for " + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "content": ". Precision " + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "inline_equation", + "content": "(p)" + }, + { + "bbox": [ + 104, + 527, + 504, + 606 + ], + "type": "text", + "content": ", the effective bit count of the significand, is 24 bits (among which 23 bits are explicitly stored). Bfloat16, which has been gaining popularity in the field of deep learning, intensely cuts down stored significand bits to 7 (compared to 23 in float32) to lower the total number of bits per value, and thereby reduces memory footprint (Wang & Kanwar, 2019). The bias of the exponent term is usually set to half of the exponent maximum." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": "FP format can cover a wide range of numbers by separating the significant digits and the scale of the number. Note that because of the precision limits, there is a gap between two consecutive FP numbers. Such a gap is called a unit of least precision (ulp) whose value is represented by the least significant digit. Hence, it is hard to represent real numbers precisely with FP format even if the numbers are in the dynamic range of the FP format, and rounding is required for converting real numbers into FP numbers. FP arithmetic typically normalizes significands for each computation, and the rounding operation is followed by the normalization to convert the computation result into an FP number. Round-to-nearest is the most frequently chosen as a rounding mode where the difference between the real value and the round-off value can be as large as half ofulp, and its relative error is bounded by " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\epsilon = \\frac{1}{2}ulp = 2^{-p}" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ", which is referred to as machine epsilon. Bothulp and " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " are widely used to evaluate the accuracy of numeric calculations (Goldberg, 1991)." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 226 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 226 + ], + "type": "text", + "content": "As every FP operation includes the rounding stage, rounding error is unavoidable in FP arithmetic. Although the error of a single FP arithmetic operation may be small enough to be ignored, the error can be substantial if a series of multiple FP arithmetic results are accumulated. For example, an inner product of MatMul involves multiple FP additions in a row and the FP summation piles up the rounding error of each FP adder (Figure 1(a)). Accordingly, numerous solutions have been introduced to compensate for the error of the FP summation (Muller et al., 2018). Such error compensations cause an additional computation burden for tracking and fixing the error. Since the effect of the rounding errors on DNN accuracy is negligible, popular deep learning frameworks such as PyTorch and CuDNN (Paszke et al., 2019; Chetlur et al., 2014) allow the rounding errors (without the compensation algorithms) in favor of simple computation. Note that as the level of rounding error depends on the precision " + }, + { + "bbox": [ + 104, + 82, + 506, + 226 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 82, + 506, + 226 + ], + "type": "text", + "content": " (only 8 bits for bffloat16), the error becomes noticeable for bffloat16. Therefore, summation of bffloat16 values uses float32 adders (instead of bffloat16 adders) to preserve the accuracy of accumulated results (Wang & Kanwar, 2019; Intel, 2018; Henry et al., 2019)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 238, + 209, + 250 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 238, + 209, + 250 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 209, + 250 + ], + "type": "text", + "content": "2.2 RELATED WORKS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 258, + 506, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 425 + ], + "type": "text", + "content": "Block Floating Point (BFP) has been proposed as a compromise between FP and integer formats. It assigns a single shared exponent to a group of FP values while maintaining individual significands (Wilkinson, 1994). The BFP has drawn attention as a flexible low-bit numeric format for quantization because the shared exponent can represent the dynamic range of values with little overhead. Hence, BFP can achieve a higher compression ratio than integer formats (Zhang et al., 2022a). In addition, since the individual significand values are integer, the BFP formats enable simpler computation than FP formats (Koster et al., 2017). Note that a critical limitation in previous works based on BFP formats is that the same level of accuracy as that of conventional FP computations cannot be guaranteed (even theoretically). Previous works tend to find the optimal BFP formats with the least memory/computation density by evaluating DNN accuracy for various bit resolution and group sizes (Song et al., 2018; Lian et al., 2019; Rouhani et al., 2020). Another drawback in some previous works on BFP is that DNNs with BFP format need to be fine-tuned usually by quantization-aware training to improve the accuracy (Zhang et al., 2022a; Rouhani et al., 2020). Since a quantized neural network allows only one fixed block size that is optimized for target hardware during training, a neural network needs to be retrained for different hardware choices if a block size differs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 430, + 506, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 552 + ], + "type": "text", + "content": "Truncated binary multipliers with error compensation schemes have been proposed to reduce the number of outputs in integer multiplications (Petra et al., 2009). While both the truncated multipliers and our proposed work use the truncations to improve computational efficiency, there are critical differences between them. In the truncated binary integer multipliers, the amount of the truncated bits is fixed while it varies in FP additions cases which our work focuses on. In addition, (Petra et al., 2009) presents a truncation error correction function utilizing the fact that some of the truncated partial products share the same inputs with the remaining partial products, so they have correlations with the remaining partial product values. Unfortunately, in FP addition cases, the truncated significands do not have any correlation with the remaining bits so it is hard to devise similar error correction schemes. Hence, there is a strong need to develop alternative ways to control the truncation errors in FP operations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 567, + 493, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 493, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 493, + 578 + ], + "type": "text", + "content": "3 RECONSTRUCTION OF FP-BASED MATMUL WITH INTEGER ARITHMETIC" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 590, + 473, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 590, + 473, + 602 + ], + "spans": [ + { + "bbox": [ + 105, + 590, + 473, + 602 + ], + "type": "text", + "content": "3.1 OVERVIEW OF THE PROPOSED MATMUL RECONSTRUCTION AND COMPUTATION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": "In this section, we propose a methodology to reconstruct FP MatMul with integer arithmetic for efficient DNN computation, focusing on FP activations and quantized weights. In most cases, the weight matrix with " + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": "-bit quantization can be expressed as a binary-coded matrix: " + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\sum_{b=1}^{m} \\alpha_{b} \\cdot B_{b}" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\alpha_{b}" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": " is a scaling factor and " + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "B_{b}" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": " is a binary weight matrix of each bitplane. Here, " + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\alpha_{b}" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": " can be a power of 2 for uniform quantization or can be an FP value for non-uniform quantization. MatMul is composed of multiple dot products, and a dot product between activations and weights is defined as " + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\sum_{k=1}^{n} (a_{k} \\times w_{k})" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": ": activation, " + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": ": weight, " + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 611, + 504, + 700 + ], + "type": "text", + "content": ": fan-in of the layer). If we apply binary-coded weights and properly change the order of the operations, we can rewrite the dot product as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 219, + 704, + 505, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 704, + 505, + 735 + ], + "spans": [ + { + "bbox": [ + 219, + 704, + 505, + 735 + ], + "type": "interline_equation", + "content": "\\sum_ {b = 1} ^ {m} \\alpha_ {b} \\sum_ {k = 1} ^ {n} \\left(a _ {k} \\times B _ {b, k}\\right), B _ {b, k} \\in [ - 1, + 1 ] \\tag {1}", + "image_path": "59e06edd0a85c2cd2dfe1b8ab438f80f73310958a0bf45c5fe49727ca1af5362.jpg" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 118, + 79, + 492, + 156 + ], + "blocks": [ + { + "bbox": [ + 118, + 79, + 492, + 156 + ], + "lines": [ + { + "bbox": [ + 118, + 79, + 492, + 156 + ], + "spans": [ + { + "bbox": [ + 118, + 79, + 492, + 156 + ], + "type": "image", + "image_path": "811293706d38e8580ee9fae13d3f1ecfad52f15572bc10de6365b6b9099324a6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 156, + 496, + 167 + ], + "lines": [ + { + "bbox": [ + 113, + 156, + 496, + 167 + ], + "spans": [ + { + "bbox": [ + 113, + 156, + 496, + 167 + ], + "type": "text", + "content": "Figure 2: Overview of the proposed MatMul computing scheme for DNNs with FP activations." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 113, + 169, + 496, + 247 + ], + "blocks": [ + { + "bbox": [ + 113, + 169, + 496, + 247 + ], + "lines": [ + { + "bbox": [ + 113, + 169, + 496, + 247 + ], + "spans": [ + { + "bbox": [ + 113, + 169, + 496, + 247 + ], + "type": "image", + "image_path": "55188d2c9943bae17819d9b758b32d345370f0eb1446e130b59171a68ae29ca9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 248, + 504, + 293 + ], + "lines": [ + { + "bbox": [ + 104, + 248, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 504, + 293 + ], + "type": "text", + "content": "Figure 3: Comparison of a previous approach (e.g., MSFP (Rouhani et al., 2020)) and the proposed approach for applying block floating point (BFP) to DNN computation. In the case of MSFP, the original network needs to be retrained for the MatMul engines with different block sizes, but in the proposed scheme, the original network can be fed into the engines with any block sizes." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 296, + 506, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 506, + 561 + ], + "type": "text", + "content": "For each bitplane, MatMul of weights and activations is reconfigured as the addition/subtraction of activation values except for a few " + }, + { + "bbox": [ + 104, + 296, + 506, + 561 + ], + "type": "inline_equation", + "content": "\\alpha_{b}" + }, + { + "bbox": [ + 104, + 296, + 506, + 561 + ], + "type": "text", + "content": " multiplications that are necessary to merge the outputs from each bitplane. Because FP multiply-accumulate operations require more hardware resources than FP additions, even such a reconfiguration of matrix multiplication to remove most multiplications can improve the efficiency of DNN computations significantly (Jeon et al., 2020). Even so, because FP additions are still computationally more expensive than integer additions, replacing FP additions with integer additions can save even more energy and area. Therefore, we propose to reconstruct FP-based MatMul (Eq. 1) using integer additions (Figure 2). One of the key components of the proposed method is the pre-alignment, which reforms the FP activation values into integer values on-the-fly by sharing the exponent value among the activations that are fed to a dot product of the MatMul at a time. The pre-alignment finds the maximum of the exponents among the activations and aligns corresponding significands simultaneously based on the difference of each exponent and the maximum exponent. As a result, unlike conventional FP arithmetic that performs the alignment for each addition, our proposed computing methodology aligns the activation values once per MatMul, and thus, reduces the overall cost of the alignment process significantly. Note that as opposed to previous works that share the exponent among a block of inputs in the storage format (e.g., MSFP (Rouhani et al., 2020)), our design performs the exponent sharing during the computation. Since different exponents are allowed in the storage format in our scheme, we keep the representation power of the conventional FP format (Figure 3). Because pre-aligned activations can be represented by the aligned significands which are integer values, an FP addition of the MatMul can be replaced by an integer addition. After the whole summation process, the proposed method reforms the summation results back to FP values by normalizing the results with the maximum exponent found in the pre-alignment stage. Then, the computation results from each weight bitplane are multiplied by " + }, + { + "bbox": [ + 104, + 296, + 506, + 561 + ], + "type": "inline_equation", + "content": "\\alpha_{b}" + }, + { + "bbox": [ + 104, + 296, + 506, + 561 + ], + "type": "text", + "content": " and merged to finish the target MatMul operation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "text", + "content": "As the exponent of float32 (or bfloat16) is 8-bit, the maximum amount of the significand shifting is 255 and the resolution of the aligned activation becomes 279 (or 263) bits. Note that such a large bit width might negate the benefits of using integer units. For example, while 32-bit integer addition consumes " + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "inline_equation", + "content": "10.3\\%" + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "text", + "content": " energy of float32 addition, 279-bit integer requires a level of energy per addition comparable to that of float32 addition (Appendix B.1). To avoid the large design overhead, we propose to use only the top " + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "inline_equation", + "content": "t(= p + \\delta)" + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "text", + "content": " bits of the aligned activation when " + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "text", + "content": " indicates the number of extra significand bits for reducing truncation error. Since the conventional FP addition also experiences errors due to truncation of significand, relatively small extra " + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 564, + 506, + 675 + ], + "type": "text", + "content": " bits for the proposed method can derive a level of errors similar to that of conventional FP addition (as described in Figure 1)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 689, + 376, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 376, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 376, + 700 + ], + "type": "text", + "content": "3.2 COMPUTATION ERROR AFTER SIGNIFICAND TRUNCATION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "To study the characteristics of errors in the proposed method with truncated significands, we first analyze the computation error with a single addition/subtraction between two FP values " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": "We assume " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "x > y \\geq 0" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "x = x_0.x_1 \\cdots x_{p-1}" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "y = y_0.y_1 \\cdots y_{p-1} \\times 2^{-k} (k \\geq 0)" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": " without loss of generality, because only the difference between the exponents decides the amount of shifting and truncation. Here, " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": " denote the binary value of " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": "-th significand bit, and the leading bit " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": " is 1 for " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "x > 0" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": ". When either " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": " is 0, there is no need for significand shifting and truncation, and hence, integer-based FP arithmetic can guarantee the precise computation without any extra bit (i.e., " + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "inline_equation", + "content": "\\delta = 0" + }, + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "k > 0" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": ", we need to shift and truncate the significand of " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " for the computation. For the alignment, " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " should be shifted to right by " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": ", so " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " can be rewritten as " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "y = 0.0\\cdots 0y_k' y_{k+1}'\\cdots y_{k+p-1}'" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "y_{k+i}'" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " is equal to " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": ". As only the top " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "t(= p + \\delta)" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " bits of the significand remain after the truncation, the truncated result becomes " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "\\bar{y} = 0.0\\cdots y_k' \\cdots y_{t-1}" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "\\delta \\geq k" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": ", the difference between " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "\\bar{y}" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " is 0. Otherwise, the difference between " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "inline_equation", + "content": "\\bar{y}" + }, + { + "bbox": [ + 104, + 154, + 504, + 213 + ], + "type": "text", + "content": " is bounded as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 181, + 227, + 504, + 243 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 227, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 181, + 227, + 504, + 243 + ], + "type": "interline_equation", + "content": "\\left| y - \\bar {y} \\right| = 0. 0 \\dots 0 y _ {t} ^ {\\prime} \\dots y _ {k + p - 1} ^ {\\prime} \\leq 2 ^ {- (p + \\delta - 1)} \\left(1 - 2 ^ {- (k - \\delta)}\\right). \\tag {2}", + "image_path": "15edd51498642609c624f27d2fbb7d7ce676c01305c2289061eff4bb172b3ce7.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 250, + 444, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 250, + 444, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 444, + 262 + ], + "type": "text", + "content": "The relative error of the addition with the truncated significand is defined as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 222, + 267, + 504, + 294 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 267, + 504, + 294 + ], + "spans": [ + { + "bbox": [ + 222, + 267, + 504, + 294 + ], + "type": "interline_equation", + "content": "e _ {a d d} = \\frac {\\left| (x + y) - (x + \\bar {y}) \\right|}{| x + y |} = \\frac {| y - \\bar {y} |}{| x + y |}. \\tag {3}", + "image_path": "0ca59305efda7c237f3185ed9f58e26b23e880a65b9d8dd080fb38840d8afcc4.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 300, + 387, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 387, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 387, + 312 + ], + "type": "text", + "content": "By applying both " + }, + { + "bbox": [ + 105, + 300, + 387, + 312 + ], + "type": "inline_equation", + "content": "|x + y| \\geq |x| \\geq 1" + }, + { + "bbox": [ + 105, + 300, + 387, + 312 + ], + "type": "text", + "content": " and Eq. 2 to Eq. 3, we can obtain" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 208, + 317, + 504, + 332 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 317, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 208, + 317, + 504, + 332 + ], + "type": "interline_equation", + "content": "e _ {a d d} \\leq 2 ^ {- (p + \\delta - 1)} \\left(1 - 2 ^ {- (k - \\delta)}\\right) \\leq 2 ^ {- (p + \\delta - 1)}. \\tag {4}", + "image_path": "663e1ea7677ddd3d35a9487cf12e3c8adce895e4dc3b4c8d42cefa66d7f5d9ef.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "text", + "content": "Because the machine epsilon is given as " + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "inline_equation", + "content": "\\epsilon = 2^{-p}" + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "inline_equation", + "content": "e_{add} \\leq \\epsilon" + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "text", + "content": " is 1 and " + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "inline_equation", + "content": "e_{add} \\leq \\frac{1}{2}\\epsilon" + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 337, + 502, + 366 + ], + "type": "text", + "content": " is 2. For subtraction, the relative error is defined similarly as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 222, + 372, + 504, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 372, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 222, + 372, + 504, + 399 + ], + "type": "interline_equation", + "content": "e _ {s u b} = \\frac {\\left| (x - y) - (x - \\bar {y}) \\right|}{\\left| x - y \\right|} = \\frac {\\left| y - \\bar {y} \\right|}{\\left| x - y \\right|}. \\tag {5}", + "image_path": "718a19fc0d1206f79e5581f3c774a662fc53b4344cfa2c7d1986014cc3c13092.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "inline_equation", + "content": "\\delta \\geq k" + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "inline_equation", + "content": "|y - \\bar{y}|" + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": " is 0 so that " + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "inline_equation", + "content": "e_{sub}" + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": " is 0. The minimum of " + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": " is 1, and " + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": " has the maximum value when all " + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "inline_equation", + "content": "y_{k + i}'" + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": "s are 1. Correspondingly, " + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "inline_equation", + "content": "|x - y|" + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": " is bounded as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 151, + 435, + 504, + 462 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 435, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 151, + 435, + 504, + 462 + ], + "type": "interline_equation", + "content": "\\left| x - y \\right| \\geq \\left\\{ \\begin{array}{l l} 1 - 0. 1 1 \\dots 1 = 2 ^ {- p}, & \\text {f o r} k = 1 \\\\ 1 - 0. 0 \\dots 0 1 \\dots 1 \\geq 2 ^ {- 1} + 2 ^ {- 2} + \\dots + 2 ^ {- (k - 1)}, & \\text {f o r} k \\geq 2 \\end{array} \\right. \\tag {6}", + "image_path": "b26f77c554a1e37d3743b278a687faf2adbd81fa5a14f5b3d79bbca8a53cfe0b.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": " is 1 and " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": " is 0, we get " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "|y - \\bar{y}| \\leq 2^{-p}" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": " from Eq. 2. For such a case, according to Eq. 6 and Eq. 5, we have " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "e_{sub} \\leq 1" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": ". The worst case happens when " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "x = 1" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "y = 0.111 \\cdots 1" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "k \\geq 2" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": ", by applying Eq. 2 and Eq. 6 to Eq. 5, we get " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "e_{sub} \\leq \\epsilon" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "\\delta = 1" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "e_{sub} \\leq 1/2\\epsilon" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "inline_equation", + "content": "\\delta = 2" + }, + { + "bbox": [ + 104, + 469, + 504, + 525 + ], + "type": "text", + "content": ". As a result, regardless of FP formats, the proposed method has the error level as summarized in the following Remark 1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 535, + 504, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 535, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 535, + 504, + 559 + ], + "type": "text", + "content": "Remark 1 The integer-based FP addition/subtraction has the same level of error as that of the conventional FP addition/subtraction with 1 extra bit, and the error becomes half with 2 extra bits." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 568, + 506, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 506, + 591 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 506, + 591 + ], + "type": "text", + "content": "Note that the error of FP summation is the same as the accumulated value of errors from each addition (Muller et al., 2018). The reconstructed MatMul, however, induces an additional stage of" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 109, + 596, + 304, + 692 + ], + "blocks": [ + { + "bbox": [ + 109, + 596, + 304, + 692 + ], + "lines": [ + { + "bbox": [ + 109, + 596, + 304, + 692 + ], + "spans": [ + { + "bbox": [ + 109, + 596, + 304, + 692 + ], + "type": "image", + "image_path": "34a77698e36c7076b358899961895e08c9fc226399ef18cfa647a076d91edd91.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 695, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 104, + 695, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 695, + 504, + 730 + ], + "type": "text", + "content": "Figure 4: (a) Average and (b) maximum FP summation errors of conventional FP computation and the proposed method with extra bits " + }, + { + "bbox": [ + 104, + 695, + 504, + 730 + ], + "type": "inline_equation", + "content": "(\\delta = 0,1,2)" + }, + { + "bbox": [ + 104, + 695, + 504, + 730 + ], + "type": "text", + "content": " against the accurate FP summations with Schewchuk algorithm (Shewchuk, 1997)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 313, + 596, + 506, + 692 + ], + "blocks": [ + { + "bbox": [ + 313, + 596, + 506, + 692 + ], + "lines": [ + { + "bbox": [ + 313, + 596, + 506, + 692 + ], + "spans": [ + { + "bbox": [ + 313, + 596, + 506, + 692 + ], + "type": "image", + "image_path": "35d60a018a96def348034e2d6034b03f1044607d7f5a7eb920c58b535999e31b.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "converting integer summation results to FP values, and thus, additional rounding error during the FP formatting (Figure 5(a)). For example, to sum 128 FP values, a conventional FP-based MatMul has 127 error sources with bound " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " while the reconstructed MatMul with 1 extra bit has 128 error sources with bound " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " such that the reconstructed MatMul might experience a slightly larger error than conventional FP-based MatMul. Therefore, to guarantee the same error level as that of the conventional FP arithmetic, 2 extra bits are used for pre-alignment. Then, reconstructed MatMul has 127 error sources with bound " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "0.5\\epsilon" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": " and an additional error source with bound " + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "type": "text", + "content": "To verify the computation error of the proposed method, we randomly sample float32 values and compare the computation error of FP summation between conventional FP computation and the proposed method. To explore a wide range of float32 values, we sample " + }, + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 165, + 504, + 243 + ], + "type": "text", + "content": " values independently assuming a uniform distribution, and then concatenate those values. We vary the fan-in (i.e., the number of values to be accumulated) from 128 to 8192, and sample 50,000 sets of FP numbers for each fan-in selection. The Schewchuk algorithm is employed to obtain accurate FP summation baseline data for error measurement (Shewchuk, 1997)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "text", + "content": "As shown in Figure 4, the proposed method produces a similar level of errors to that of the conventional FP arithmetic for various fan-in values when " + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "inline_equation", + "content": "\\delta = 2" + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "text", + "content": ". Because larger errors are more likely to be accumulated with larger fan-in, we see that both average and maximum errors tend to grow as the fan-in increases (Figure 4). Nonetheless, the average error " + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "inline_equation", + "content": "(12.3 \\times 10^{-7})" + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "text", + "content": " and the maximum error " + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "inline_equation", + "content": "(2.4 \\times 10^{-2}" + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "inline_equation", + "content": "2.4\\%)" + }, + { + "bbox": [ + 104, + 247, + 504, + 336 + ], + "type": "text", + "content": " are relatively small even with 8192 fan-in, which justifies the current practice of implementing conventional FP additions without error correction for DNN inference. Correspondingly, the proposed method can support as precise numerical computation as conventional FP arithmetic does." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 352, + 195, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 352, + 195, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 195, + 363 + ], + "type": "text", + "content": "4 EXPERIMENT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 376, + 372, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 372, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 372, + 387 + ], + "type": "text", + "content": "4.1 IFPU: A MATMUL ENGINE FOR THE PROPOSED METHOD" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 396, + 317, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 317, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 317, + 539 + ], + "type": "text", + "content": "Overall Architecture. To evaluate the proposed method with real hardware implementation, we first design a MatMul engine called iFPU. Figure 5 shows the overview of systolic iFPU architecture which adopts the design principle of Google's TPU (Jouppi et al., 2017). iFPU performs FP MatMul in the form of a set of FP summation (Eq. 1) that is physically implemented as integer summation for high efficiency. After the computation, the iFPU converts integer results into FP values through the int2fp converter at the end of the Processing Element (PE) arrays. Then, scale & accumulator is used to multiply " + }, + { + "bbox": [ + 104, + 396, + 317, + 539 + ], + "type": "inline_equation", + "content": "\\alpha_{b}" + }, + { + "bbox": [ + 104, + 396, + 317, + 539 + ], + "type": "text", + "content": " and add summation results of each weight bit" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 328, + 400, + 504, + 517 + ], + "blocks": [ + { + "bbox": [ + 328, + 400, + 504, + 517 + ], + "lines": [ + { + "bbox": [ + 328, + 400, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 328, + 400, + 504, + 517 + ], + "type": "image", + "image_path": "8a13a0c4739e9b90f68e1394695a72bbe8e7cc524e031be0bed1ed4a70bd57e1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 341, + 521, + 485, + 533 + ], + "lines": [ + { + "bbox": [ + 341, + 521, + 485, + 533 + ], + "spans": [ + { + "bbox": [ + 341, + 521, + 485, + 533 + ], + "type": "text", + "content": "Figure 5: A block diagram of iFPU" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "type": "text", + "content": "plane to finish the MatMul (Eq. 1). The size of MatMul that can be processed in the iFPU at a time is bounded by the number of PEs, and as a practical design, we evaluate the iFPU with " + }, + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "type": "text", + "content": ", or " + }, + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 104, + 539, + 504, + 606 + ], + "type": "text", + "content": " PEs for the experiment. When fan-in of the DNN layer exceeds the row count of PEs, activations of the layer are tiled to fit the row-count limit, and each tile is fed into the iFPU at a time and processed with integer adders in the PEs. To complete the entire MatMul, the computing results for different tiles should be merged, and for this, float32 adders (accumulator) are used again." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": "Precision of Integer Adder. As the PE array of the iFPU accumulates the pre-aligned and truncated significands, the size of the integer adder in each PE depends on " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ", which is determined by the precision of the given FP format (" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ") and extra bits (" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ") attached to control truncation error. Based on the theoretical analysis given in Section 3.2, the iFPU for float32 activations conducts 26-bit integer addition with " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\delta = 2" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": ". Though the iFPU introduces additional FP accumulations due to the MatMul tiling, the error level of integer-based FP addition with " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\delta = 2" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " is half of the conventional FP addition according to Remark 1. Therefore, the iFPU with " + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\delta = 2" + }, + { + "bbox": [ + 104, + 611, + 506, + 732 + ], + "type": "text", + "content": " can still preserve the same level of computing error as that of conventional FP MatMul (Figure 6(a)). Furthermore, the iFPU for bfloat16 activations can be designed to be even smaller and more energy efficient by using smaller precision integer adders thanks to the reduced bit precision for significands. Interestingly, conventional bfloat16 accumulation still uses float32 adders to preserve the accuracy of accumulated" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 115, + 82, + 496, + 153 + ], + "blocks": [ + { + "bbox": [ + 115, + 82, + 496, + 153 + ], + "lines": [ + { + "bbox": [ + 115, + 82, + 496, + 153 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 496, + 153 + ], + "type": "image", + "image_path": "cf46c1f7050183a59f40048d068deffef67250ac95693dfffa8f6ce8d478fe7f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 237, + 156, + 373, + 166 + ], + "lines": [ + { + "bbox": [ + 237, + 156, + 373, + 166 + ], + "spans": [ + { + "bbox": [ + 237, + 156, + 373, + 166 + ], + "type": "text", + "content": "(a) Evaluation with float32 activation" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 115, + 168, + 494, + 240 + ], + "blocks": [ + { + "bbox": [ + 115, + 168, + 494, + 240 + ], + "lines": [ + { + "bbox": [ + 115, + 168, + 494, + 240 + ], + "spans": [ + { + "bbox": [ + 115, + 168, + 494, + 240 + ], + "type": "image", + "image_path": "17cb24c416bcac6f908010fec2e877c55e1bc30c4dbb8c06f04d4f194e1ed123.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 235, + 241, + 375, + 251 + ], + "lines": [ + { + "bbox": [ + 235, + 241, + 375, + 251 + ], + "spans": [ + { + "bbox": [ + 235, + 241, + 375, + 251 + ], + "type": "text", + "content": "(b) Evaluation with bfloat16 activation" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 116, + 306, + 247, + 367 + ], + "blocks": [ + { + "bbox": [ + 104, + 255, + 504, + 300 + ], + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 300 + ], + "type": "text", + "content": "Figure 6: Numerical computation errors of MatMul for DNNs with FP activation. We measure the computation error of conventional FPU-based engine and the proposed iFPU against the accurate FP computation with Schewchuk algorithm (Shewchuk, 1997). The number of PEs and fan-in are annotated along the horizontal axis." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 116, + 306, + 247, + 367 + ], + "lines": [ + { + "bbox": [ + 116, + 306, + 247, + 367 + ], + "spans": [ + { + "bbox": [ + 116, + 306, + 247, + 367 + ], + "type": "image", + "image_path": "cd2264494c7c1149c454cfc42e3280b6d3e9841ae704ae9f98e1217546c28ba8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 250, + 306, + 369, + 367 + ], + "blocks": [ + { + "bbox": [ + 250, + 306, + 369, + 367 + ], + "lines": [ + { + "bbox": [ + 250, + 306, + 369, + 367 + ], + "spans": [ + { + "bbox": [ + 250, + 306, + 369, + 367 + ], + "type": "image", + "image_path": "819af033f580bca5686abd485b875008c4e4eab5e6e651e6d643ea05e1f2ffa7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 372, + 307, + 492, + 367 + ], + "blocks": [ + { + "bbox": [ + 372, + 307, + 492, + 367 + ], + "lines": [ + { + "bbox": [ + 372, + 307, + 492, + 367 + ], + "spans": [ + { + "bbox": [ + 372, + 307, + 492, + 367 + ], + "type": "image", + "image_path": "b17e97228a302c010195825f3567e080b9a809e0c13e42506451f32f825770f2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 116, + 383, + 247, + 443 + ], + "blocks": [ + { + "bbox": [ + 238, + 369, + 373, + 380 + ], + "lines": [ + { + "bbox": [ + 238, + 369, + 373, + 380 + ], + "spans": [ + { + "bbox": [ + 238, + 369, + 373, + 380 + ], + "type": "text", + "content": "(a) Evaluation with float32 activation" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 116, + 383, + 247, + 443 + ], + "lines": [ + { + "bbox": [ + 116, + 383, + 247, + 443 + ], + "spans": [ + { + "bbox": [ + 116, + 383, + 247, + 443 + ], + "type": "image", + "image_path": "d4e67b68c95440ea4b5872d8a48a3e0ff842a2a3dcbe5ce46074da4e54b4b959.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 249, + 384, + 369, + 443 + ], + "blocks": [ + { + "bbox": [ + 249, + 384, + 369, + 443 + ], + "lines": [ + { + "bbox": [ + 249, + 384, + 369, + 443 + ], + "spans": [ + { + "bbox": [ + 249, + 384, + 369, + 443 + ], + "type": "image", + "image_path": "fac697cc56c79e8dfc34799ac13aae38a22cde3ff94de97cf53d615d61082109.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 235, + 446, + 375, + 456 + ], + "lines": [ + { + "bbox": [ + 235, + 446, + 375, + 456 + ], + "spans": [ + { + "bbox": [ + 235, + 446, + 375, + 456 + ], + "type": "text", + "content": "(b) Evaluation with bfloat16 activation" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 372, + 384, + 492, + 444 + ], + "blocks": [ + { + "bbox": [ + 372, + 384, + 492, + 444 + ], + "lines": [ + { + "bbox": [ + 372, + 384, + 492, + 444 + ], + "spans": [ + { + "bbox": [ + 372, + 384, + 492, + 444 + ], + "type": "image", + "image_path": "f64159eff0c7a5f994cd1ca807d5665991632eae40a8901dc01eb8d5e726adc7.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 460, + 504, + 506 + ], + "lines": [ + { + "bbox": [ + 104, + 460, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 504, + 506 + ], + "type": "text", + "content": "Figure 7: Cosine distance between MatMul results of BERT-base (task: MRPC) extracted from inference results using conventional FPU-based engine (NVIDIA RTX3090) and the proposed iFPU. The last feed-forward layer in each encoder block (1-12th layers) and pooler (13th layer) is used for the evaluation. The number of PEs and layer indices are annotated along the horizontal axis." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 518, + 504, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 518, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 518, + 504, + 574 + ], + "type": "text", + "content": "results (Wang & Kanwar, 2019; Intel, 2018; Henry et al., 2019). However, as the accumulated results are converted back to bfloat16, it is possible to maintain the accuracy of bfloat16 accumulation with less accurate adders than float32 adders. Figure 6(b) shows that the proposed bfloat16 iFPU with " + }, + { + "bbox": [ + 104, + 518, + 504, + 574 + ], + "type": "inline_equation", + "content": "\\delta = 3" + }, + { + "bbox": [ + 104, + 518, + 504, + 574 + ], + "type": "text", + "content": " (which uses 11 bit adders) provides comparable accuracy to that of conventional bfloat16 adders." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 590, + 352, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 590, + 352, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 590, + 352, + 601 + ], + "type": "text", + "content": "4.2 ANALYSIS OF THE DNN COMPUTATION ACCURACY" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": "MatMuls of DNN with iFPU vs FPU. In the previous section, we compared the accuracy of the proposed integer-based FP MatMul with precise results. Since our goal is to replace the FPU with the proposed iFPU, it is also important to compare the computational difference between the conventional error-prone FPU-based engine and the iFPU. For an in-depth understanding of DNN inference with the iFPU, we first compare the inference output of each layer in the BERT-base model (Devlin et al., 2018) computed with an FPU-based engine (NVIDIA RTX3090) and the proposed iFPU. BERT-base uses 4-bit weight values and the target task is MRPC. In iFPU, MatMuls between weights and activations are processed with the proposed integer-based approach, but other operations such as softmax are processed by using conventional FPU. We employ cosine distance as the metric to measure the difference in layer outputs. Note that the cosine distance is 0 for two identical vectors and 2 for entirely opposite vectors. In this experiment, the last feed-forward layer in each" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 106, + 504, + 255 + ], + "blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 105 + ], + "type": "text", + "content": "Table 1: Accuracy of DNNs inference with conventional FPU-based engine (NVIDIA RTX3090) and proposed iFPUs(-#rows/columns of PE arrays). The numbers in parentheses represent accuracy difference between FPU & iFPU." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 106, + 504, + 255 + ], + "lines": [ + { + "bbox": [ + 106, + 106, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 106, + 106, + 504, + 255 + ], + "type": "table", + "html": "
float32 activationbfloat16 activation
VGG-9ResNet-18OPT-1.3BVGG-9ResNet-18OPT-1.3B
FPU92.9170.2712.9692.9170.2812.96
iFPU-3292.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.91 (+0.00)70.26 (-0.02)12.96 (+0.00)
iFPU-6492.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.90 (-0.01)70.27 (-0.01)12.97 (+0.01)
iFPU-12892.91 (+0.00)70.27 (+0.00)12.96 (+0.00)92.92 (+0.01)70.26 (-0.02)12.98 (+0.02)
ResNet-50RegNetMnasNetResNet-50RegNetMnasNet
FPU76.3278.1875.9976.3378.1775.96
iFPU-3276.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.38 (+0.05)78.18 (+0.01)75.97 (+0.01)
iFPU-6476.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.38 (+0.05)78.18 (+0.01)75.96 (+0.00)
iFPU-12876.31 (-0.01)78.18 (+0.00)75.99 (+0.00)76.40 (+0.07)78.18 (+0.01)75.97 (+0.01)
", + "image_path": "937d97d3411101d9d633328531fc89e8ceb02e28ba19a278e159ddd9ea0bdd17.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 255, + 506, + 336 + ], + "blocks": [ + { + "bbox": [ + 106, + 255, + 506, + 336 + ], + "lines": [ + { + "bbox": [ + 106, + 255, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 255, + 506, + 336 + ], + "type": "table", + "html": "
BERT-Base w/ float32 activationAvg.
CoLAMRPCSST-2STS-BQQPMNLI-m/mmQNLIRTE
FPU56.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28
iFPU-3256.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
iFPU-6456.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
iFPU-12856.3689.0591.5187.5283.7381.95/82.5689.0070.0481.28 (+0.00)
", + "image_path": "6d57767d4b6ea01b49b5853890646157b793b26b28fb3470ba5e42cc7e7047fb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 106, + 338, + 506, + 418 + ], + "blocks": [ + { + "bbox": [ + 106, + 338, + 506, + 418 + ], + "lines": [ + { + "bbox": [ + 106, + 338, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 506, + 418 + ], + "type": "table", + "html": "
BERT--BASE w/ bfloat16 activationAvg.
CoLAMRPCSST-2STS-BQQPMNLI-m/mmQNLIRTE
FPU56.0889.0591.5187.5283.7481.97/82.5789.0070.0481.30
iFPU-3256.1089.0591.5187.5283.7281.94/82.5589.0570.0481.28 (-0.02)
iFPU-6456.3689.0591.6387.5283.7281.93/82.5689.0070.0481.31 (+0.01)
iFPU-12856.1088.8391.6387.5283.7281.96/82.5489.0570.0481.27 (-0.03)
", + "image_path": "15f57c953a6bf035f7ef5e609bd79799864a466a6db6baca1e09f429eba6c417.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 426, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 491 + ], + "type": "text", + "content": "encoder block and pooler is chosen for evaluation. Figure 7 shows that the FPU and the iFPU produce almost identical outputs for each layer. The averages of the distance are less than " + }, + { + "bbox": [ + 104, + 426, + 504, + 491 + ], + "type": "inline_equation", + "content": "1.2 \\times 10^{-6}" + }, + { + "bbox": [ + 104, + 426, + 504, + 491 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 426, + 504, + 491 + ], + "type": "inline_equation", + "content": "2.5 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 426, + 504, + 491 + ], + "type": "text", + "content": " for float32 and bfloat16 activations, respectively. Moreover, the distance between layer outputs from the two engines remains close throughout the forward path. As a result, we can expect that the proposed iFPU can support DNN inference with almost the same accuracy as that of conventional FPU." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 497, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 497, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 497, + 506, + 662 + ], + "type": "text", + "content": "DNN Inference Accuracy. We select 7 types of DNN models to compare DNN model accuracy between the FPU and iFPU: BERT-base, VGG-9, ResNet-18, ResNet-50, RegNet-3.2GF, MnasNet2.0, and OPT-1.3B. The accuracy of BERT-base is evaluated on the General Language Understanding Evaluation (GLUE) benchmark (Wang et al., 2019). VGG-9 (Simonyan & Zisserman, 2014) is evaluated on CIFAR-10 (Krizhevsky et al., 2009). ResNet-18, ResNet-50 (He et al., 2016), RegNet3.2GF (Radosavovic et al., 2020), and MnasNet-2.0 (Tan et al., 2019) measure top-1 accuracy on ImageNet (Russakovsky et al., 2015). OPT-1.3B (Zhang et al., 2022b) is an open-sourced NLP model provided by Meta AI roughly matching the performance and sizes of the GPT-3 class of models and is evaluated by estimating the perplexity on WikiText-2 dataset (Merit et al., 2016). All DNN models use 4-bit weight values that are quantized by a binary-coding quantization scheme. Note that no modifications to DNN structures are needed to deploy the weight-quantized DNNs to various iFPUs because 1) activations are FP values and 2) iFPUs are designed to process any MatMul for DNNs as long as weights are quantized. Table 1 summarizes the DNN inference results. Because the iFPU can produce almost identical MatMul results as FPU, the proposed iFPUs preserve the DNN accuracy for both float32 and bfloat16 activations as we expected." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 677, + 308, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 308, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 308, + 689 + ], + "type": "text", + "content": "4.3 ANALYSIS OF COMPUTATION EFFICIENCY" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Setup. To evaluate the efficiency of proposed iFPUs, we synthesize the proposed hardware in a " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "28\\mathrm{nm}" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": " CMOS technology. For a fair evaluation of the impact of replacing FP MatMul with integer-based MatMul, we also design two 'baseline' engines for the conventional FP-based MatMul (Fig-" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": "ure 8). As the first baseline (FP-MAC), Figure 8(a) is designed with FP MAC units to process FP MatMul as a naive approach. In addition, as the second baseline (FP-ADD), Figure 8(b) is designed with FP adders to process FP MatMul reconfigured as Eq. 1. Because bitplanes of weight values are decomposed for FP-ADD and iFPU, binary weights are processed in a bit-parallel manner in FP-ADD and iFPU, while FP-MAC processes the whole weight values in each MAC unit. Compared to those two baseline engines, iFPU exhibits the lighter PEs along with additional units such as the pre-alignment unit and int2fp converter. Lastly, an int8 MatMul engine (INT8) is also implemented for the comparison between the proposed iFPU MatMul and integer MatMul." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "type": "text", + "content": "Results. Simulation results using the synthesized hardware demonstrate that the proposed iFPUs can improve both energy and area compared to the baselines, as the FP units of the baseline engines are replaced with the more area/energy efficient integer units (Figure 9). For float32 activations, the proposed iFPU improves throughput-per-area " + }, + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "type": "inline_equation", + "content": "(\\mathrm{TOPS} / \\mathrm{mm}^2)" + }, + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "type": "text", + "content": " by up to " + }, + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "type": "inline_equation", + "content": "7.9\\times" + }, + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "type": "text", + "content": " and energy efficiency (TOPS/W) by up to " + }, + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "type": "inline_equation", + "content": "6.4\\times" + }, + { + "bbox": [ + 104, + 176, + 337, + 275 + ], + "type": "text", + "content": " compared to the FP-MAC baseline. For bfloat16 activations, the proposed iFPU achieves" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 345, + 178, + 422, + 247 + ], + "blocks": [ + { + "bbox": [ + 345, + 178, + 422, + 247 + ], + "lines": [ + { + "bbox": [ + 345, + 178, + 422, + 247 + ], + "spans": [ + { + "bbox": [ + 345, + 178, + 422, + 247 + ], + "type": "image", + "image_path": "78a9b38799730d982a84162ee1b462f287492c13abbfdf994834e1d8f4953627.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 251, + 504, + 273 + ], + "lines": [ + { + "bbox": [ + 343, + 251, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 343, + 251, + 504, + 273 + ], + "type": "text", + "content": "Figure 8: Baseline MatMul engines (a) FP-MAC and (b) FP-ADD" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 429, + 179, + 504, + 247 + ], + "blocks": [ + { + "bbox": [ + 429, + 179, + 504, + 247 + ], + "lines": [ + { + "bbox": [ + 429, + 179, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 429, + 179, + 504, + 247 + ], + "type": "image", + "image_path": "0c03f4a03e6589511b931f1211689d946f45856dd15f17cf76dc0e7bde169e37.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 275, + 506, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 275, + 506, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 506, + 386 + ], + "type": "text", + "content": "even larger improvements because the size of the corresponding integer-based unit is reduced as the bit resolution of the aligned-truncated significands is reduced by 15 bits compared to float32 activation cases. The throughput-per-area of the iFPU is improved by up to " + }, + { + "bbox": [ + 104, + 275, + 506, + 386 + ], + "type": "inline_equation", + "content": "9.9\\times" + }, + { + "bbox": [ + 104, + 275, + 506, + 386 + ], + "type": "text", + "content": " and energy efficiency is enhanced by up to " + }, + { + "bbox": [ + 104, + 275, + 506, + 386 + ], + "type": "inline_equation", + "content": "11.9\\times" + }, + { + "bbox": [ + 104, + 275, + 506, + 386 + ], + "type": "text", + "content": " compared to the FP-MAC baseline. The improvement over the baseline becomes larger as the number of PEs increases because the overhead of additional logic such as pre-alignment units in the proposed scheme can be amortized (detailed in Appendix C.2). We also compare the iFPUs with the INT8 engine. While bffloat16 activations close the gap between the FP-MAC baseline and the INT8 engine significantly in terms of throughput-per-area, iFPU (with bffloat16 activations) achieves even higher energy efficiency than the INT8 engine in some cases (Figure 9)." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 388, + 306, + 483 + ], + "blocks": [ + { + "bbox": [ + 109, + 388, + 306, + 483 + ], + "lines": [ + { + "bbox": [ + 109, + 388, + 306, + 483 + ], + "spans": [ + { + "bbox": [ + 109, + 388, + 306, + 483 + ], + "type": "image", + "image_path": "b300b0a5576d752e93ad84c011fe82fe2c46903ab5d90fd00710bad3caef2aa3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 487, + 504, + 521 + ], + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 521 + ], + "type": "text", + "content": "Figure 9: Normalized energy efficiency (TOPS/W) (left) and throughput-per-area (TOPS/mm²) (right) of MatMul Engines: baselines and iFPUs for FP MatMul; INT8 for int8 MatMul. The number of PEs and target activation types are annotated along the horizontal axis." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 309, + 388, + 504, + 483 + ], + "blocks": [ + { + "bbox": [ + 309, + 388, + 504, + 483 + ], + "lines": [ + { + "bbox": [ + 309, + 388, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 309, + 388, + 504, + 483 + ], + "type": "image", + "image_path": "c8ed2a204e737468738f1e7cd154c1c9fad26803d41a986509e9b7811dac74f0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 531, + 196, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 531, + 196, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 196, + 544 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": "The need to accomplish computing MatMul by using FP activations and quantized weights is increasing due to the growing usage of complex non-linear activation functions in DNN models such as Transformers. Conventional computing platforms such as CPU, GPU, and NPU, however, are inefficient in performing such computations. In this paper, we propose a new MatMul computing scheme dedicated to DNNs with FP activations and binary-coding weight quantization. The proposed method accelerates the FP MatMul of DNNs using the shared exponent and the integer arithmetic to improve computational efficiency. Previous works which also used the block floating point number with shared exponent often claim the validity of their design by presenting comparable DNN accuracy without verifying the robustness of MatMul results in a rigorous manner. We theoretically prove that the proposed scheme can produce the same error level as that of conventional FP arithmetic. To evaluate the computational efficiency of the proposed method, we design and synthesize a MatMul engine, iFPU, following the principle of integer-based operations. Experimental results support our claim that, compared to the conventional FPU-based design, the iFPUs accelerate the weight-only quantized DNNs with " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "6.4 \\times" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "7.9 \\times" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": " higher energy efficiency and throughput-per-area for float32 activations, respectively. In addition, the iFPUs yield " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "11.9 \\times" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "9.9 \\times" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": " higher energy efficiency and throughput-per-area, respectively, when associated with bffloat16 activations." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 101, + 506, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 101, + 506, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 506, + 148 + ], + "type": "text", + "content": "This work was supported in part by Institute of Information communications Technology Planning Evaluation (IITP) grant funded by the Korea government (MSIT) (No. 2021-0-01343, Artificial Intelligence Graduate School Program (Seoul National University) " + }, + { + "bbox": [ + 105, + 101, + 506, + 148 + ], + "type": "inline_equation", + "content": "(10\\%)" + }, + { + "bbox": [ + 105, + 101, + 506, + 148 + ], + "type": "text", + "content": ", and No.2021-0-02068, Artificial Intelligence Innovation Hub " + }, + { + "bbox": [ + 105, + 101, + 506, + 148 + ], + "type": "inline_equation", + "content": "(10\\%)" + }, + { + "bbox": [ + 105, + 101, + 506, + 148 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 162, + 176, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 162, + 176, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 176, + 175 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 180, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 180, + 505, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 180, + 505, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 505, + 205 + ], + "type": "text", + "content": "Yu Bai, Yu-Xiang Wang, and Edo Liberty. Proxquant: Quantized neural networks via proximal operators. International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 210, + 506, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 210, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 107, + 210, + 506, + 244 + ], + "type": "text", + "content": "Sharan Chetlur, Cliff Woolley, Philippe Vandermersch, Jonathan Cohen, John Tran, Bryan Catanzaro, and Evan Shelhamer. codnn: Efficient primitives for deep learning. arXiv preprint arXiv:1410.0759, 2014." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 251, + 504, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 504, + 285 + ], + "type": "text", + "content": "Jungwook Choi, Zhuo Wang, Swagath Venkataramani, Pierce I-Jen Chuang, Vijayalakshmi Srinivasan, and Kailash Gopalakrishnan. Pact: Parameterized clipping activation for quantized neural networks. arXiv preprint arXiv:1805.06085, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 292, + 504, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 292, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 504, + 336 + ], + "type": "text", + "content": "Insoo Chung, Byeongwook Kim, Yoonjung Choi, Se Jung Kwon, Yongkweon Jeon, Baeseong Park, Sangha Kim, and Dongsoo Lee. Extremely low bit transformer quantization for on-device neural machine translation. In Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 4812-4826, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 343, + 504, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 343, + 504, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 504, + 367 + ], + "type": "text", + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 373, + 504, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 504, + 397 + ], + "type": "text", + "content": "Elias Frantar, Saleh Ashkboos, Torsten Hoefer, and Dan Alistarh. Gptq: Accurate post-training quantization for generative pre-trained transformers. arXiv preprint arXiv:2210.17323, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 403, + 504, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 403, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 504, + 426 + ], + "type": "text", + "content": "David Goldberg. What every computer scientist should know about floating-point arithmetic. ACM computing surveys (CSUR), 23(1):5-48, 1991." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 432, + 503, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 432, + 503, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 503, + 456 + ], + "type": "text", + "content": "Mark Harris. Mixed-precision programming with CUDA 8, 2016. URL https://developer.nvidia.com/blog/mixed-precision-programming-cuda-8/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 462, + 504, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 462, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 504, + 495 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 502, + 504, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 504, + 537 + ], + "type": "text", + "content": "Greg Henry, Ping Tak Peter Tang, and Alexander Heinecke. Leveraging the bfloat16 artificial intelligence datatype for higher-precision computations. In 2019 IEEE 26th Symposium on Computer Arithmetic (ARITH), pp. 69-76. IEEE, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 543, + 504, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 543, + 504, + 577 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 504, + 577 + ], + "type": "text", + "content": "Mark Horowitz. Computing's energy problem (and what we can do about it). In 2014 IEEE International Solid-State Circuits Conference Digest of Technical Papers (ISSCC), pp. 10-14. IEEE, 2014." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 584, + 504, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 504, + 628 + ], + "type": "text", + "content": "Intel. Bfloat16 - hardware numerics definition, 2018. URL https://www.intel.com/content/dam/develop/external/us/en/documents/bf16-hardware-numerics-definition-white-paper.pdf. Accessed: 2022-09-07." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 635, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 504, + 681 + ], + "type": "text", + "content": "Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam, and Dmitry Kalenichenko. Quantization and training of neural networks for efficient integer-arithmetic-only inference. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2704-2713, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 687, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 504, + 733 + ], + "type": "text", + "content": "Yongkweon Jeon, Baeseong Park, Se Jung Kwon, Byeongwook Kim, Jeongin Yun, and Dongsoo Lee. Biqgemm: matrix multiplication with lookup table for binary-coding-based quantized dnns. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis, pp. 1-14. IEEE, 2020." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Yongkweon Jeon, Chungman Lee, Eulrang Cho, and Yeonju Ro. Mr.biq: Post-training non-uniform quantization based on minimizing the reconstruction error. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12329-12338, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "type": "text", + "content": "Norman P Jouppi, Cliff Young, Nishant Patil, David Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, et al. In-datacenter performance analysis of a tensor processing unit. In Proceedings of the 44th annual international symposium on computer architecture, pp. 1-12, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 173, + 505, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 173, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 505, + 219 + ], + "type": "text", + "content": "Norman P Jouppi, Doe Hyun Yoon, Matthew Ashcraft, Mark Gottscho, Thomas B Jablin, George Kurian, James Laudon, Sheng Li, Peter Ma, Xiaoyu Ma, et al. Ten lessons from three generations shaped google's tpuv4i: Industrial product. In 2021 ACM/IEEE 48th Annual International Symposium on Computer Architecture (ISCA), pp. 1-14. IEEE, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 224, + 505, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 224, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 505, + 258 + ], + "type": "text", + "content": "Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W Mahoney, and Kurt Keutzer. I-bert: Integer-only bert quantization. In International Conference on Machine Learning, pp. 5506-5518. PMLR, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 265, + 505, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 265, + 505, + 310 + ], + "spans": [ + { + "bbox": [ + 105, + 265, + 505, + 310 + ], + "type": "text", + "content": "Urs Köster, Tristan Webb, Xin Wang, Marcel Nassar, Arjun K Bansal, William Constable, Oguz Elibol, Scott Gray, Stewart Hall, Luke Hornof, et al. Flexpoint: An adaptive numerical format for efficient training of deep neural networks. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 316, + 443, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 443, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 443, + 330 + ], + "type": "text", + "content": "Alex Krizhevsky et al. Learning multiple layers of features from tiny images. 2009." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 335, + 505, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 380 + ], + "type": "text", + "content": "Se Jung Kwon, Jeonghoon Kim, Jeongin Bae, Kang Min Yoo, Jin-Hwa Kim, Baeseong Park, Byeongwook Kim, Jung-Woo Ha, Nako Sung, and Dongsoo Lee. Alphatuning: Quantization-aware parameter-efficient adaptation of large-scale pre-trained language models. arXiv preprint arXiv:2210.03858, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 386, + 505, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 386, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 505, + 420 + ], + "type": "text", + "content": "Yuhang Li, Ruihao Gong, Xu Tan, Yang Yang, Peng Hu, Qi Zhang, Fengwei Yu, Wei Wang, and Shi Gu. Brecq: Pushing the limit of post-training quantization by block reconstruction. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 426, + 505, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 505, + 460 + ], + "type": "text", + "content": "Xiaocong Lian, Zhenyu Liu, Zhourui Song, Jiwu Dai, Wei Zhou, and Xiangyang Ji. High-performance fpga-based cnn accelerator with block-floating-point arithmetic. IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 27(8):1874-1885, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 467, + 505, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 467, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 467, + 505, + 491 + ], + "type": "text", + "content": "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "type": "text", + "content": "Asit Mishra, Eriko Nurvitadhi, Jeffrey J Cook, and Debbie Marr. Wrpn: Wide reduced-precision networks. In International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 525, + 505, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 505, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 505, + 560 + ], + "type": "text", + "content": "Jean-Michel Muller, Nicolas Brisebarre, Florent De Dinechin, Claude-Pierre Jeannerod, Vincent Lefevre, Guillaume Melquiond, Nathalie Revol, Damien Stehlé, Serge Torres, et al. Handbook of floating-point arithmetic. Springer, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 566, + 505, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 566, + 505, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 505, + 600 + ], + "type": "text", + "content": "Gunho Park, Baeseong Park, Se Jung Kwon, Byeongwook Kim, Youngjoo Lee, and Dongsoo Lee. nuqmm: Quantized matmul for efficient inference of large-scale generative language models. arXiv preprint arXiv:2206.09557, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 606, + 505, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 505, + 651 + ], + "type": "text", + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, volume 32, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 657, + 505, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 505, + 692 + ], + "type": "text", + "content": "Nicola Petra, Davide De Caro, Valeria Garofalo, Ettore Napoli, and Antonio GM Srolllo. Truncated binary multipliers with variable correction and minimum mean square error. IEEE Transactions on Circuits and Systems I: Regular Papers, 57(6):1312-1325, 2009." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "text", + "content": "Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólar. Designing network design spaces. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10428-10436, 2020." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 663 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "type": "text", + "content": "Bita Darvish Rouhani, Daniel Lo, Ritchie Zhao, Ming Liu, Jeremy Fowers, Kalin Ovtcharov, Anna Vinogradsky, Sarah Massengill, Lita Yang, Ray Bittner, et al. Pushing the limits of narrow precision inferencing at cloud scale with microsoft floating point. Advances in neural information processing systems, 33:10271-10281, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 505, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 505, + 168 + ], + "type": "text", + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 175, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 175, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 107, + 175, + 504, + 198 + ], + "type": "text", + "content": "Jonathan Richard Shewchuk. Adaptive precision floating-point arithmetic and fast robust geometric predicates. Discrete & Computational Geometry, 18(3):305-363, 1997." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 205, + 504, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 205, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 107, + 205, + 504, + 228 + ], + "type": "text", + "content": "Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 234, + 504, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 234, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 504, + 269 + ], + "type": "text", + "content": "Zhourui Song, Zhenyu Liu, and Dongsheng Wang. Computation error analysis of block floating point arithmetic oriented convolution neural network accelerator design. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 274, + 504, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 274, + 504, + 298 + ], + "spans": [ + { + "bbox": [ + 107, + 274, + 504, + 298 + ], + "type": "text", + "content": "Vivienne Sze, Yu-Hsin Chen, Tien-Ju Yang, and Joel S Emer. Efficient processing of deep neural networks: A tutorial and survey. Proceedings of the IEEE, 105(12):2295-2329, 2017." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 304, + 506, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 304, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 107, + 304, + 506, + 340 + ], + "type": "text", + "content": "Mingxing Tan, Bo Chen, Ruoming Pang, Vijay Vasudevan, Mark Sandler, Andrew Howard, and Quoc V Le. Mnasnet: Platform-aware neural architecture search for mobile. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2820-2828, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 346, + 504, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 346, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 504, + 380 + ], + "type": "text", + "content": "Chaofan Tao, Lu Hou, Wei Zhang, Lifeng Shang, Xin Jiang, Qun Liu, Ping Luo, and Ngai Wong. Compression of generative pre-trained language models via quantization. arXiv preprint arXiv:2203.10705, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 386, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 386, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 107, + 386, + 504, + 422 + ], + "type": "text", + "content": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 427, + 504, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 427, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 107, + 427, + 504, + 450 + ], + "type": "text", + "content": "Shibo Wang and Pankaj Kanwar. Bfloat16: The secret to high performance on cloud tpus. Google Cloud Blog, 4, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 457, + 482, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 457, + 482, + 470 + ], + "spans": [ + { + "bbox": [ + 107, + 457, + 482, + 470 + ], + "type": "text", + "content": "James Hardy Wilkinson. Rounding errors in algebraic processes. Courier Corporation, 1994." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 476, + 504, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 476, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 107, + 476, + 504, + 500 + ], + "type": "text", + "content": "Xiaoxia Wu, Zhewei Yao, Minjia Zhang, Conglong Li, and Yuxiong He. Extreme compression for pre-trained transformers made simple and efficient. arXiv preprint arXiv:2206.01859, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 506, + 504, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 506, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 107, + 506, + 504, + 540 + ], + "type": "text", + "content": "Chen Xu, Jianqiang Yao, Zhouchen Lin, Wenwu Ou, Yuanbin Cao, Zhirong Wang, and Hongbin Zha. Alternating multi-bit quantization for recurrent neural networks. In International Conference on Learning Representations, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 547, + 504, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 547, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 107, + 547, + 504, + 582 + ], + "type": "text", + "content": "Zhewei Yao, Reza Yazdani Aminabadi, Minjia Zhang, Xiaoxia Wu, Conglong Li, and Yuxiong He. Zeroquant: Efficient and affordable post-training quantization for large-scale transformers. arXiv preprint arXiv:2206.01861, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 588, + 504, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 588, + 504, + 622 + ], + "spans": [ + { + "bbox": [ + 107, + 588, + 504, + 622 + ], + "type": "text", + "content": "Sai Qian Zhang, Bradley McDanel, and HT Kung. Fast: Dnn training under variable precision block floating point with stochastic rounding. In 2022 IEEE International Symposium on High-Performance Computer Architecture (HPCA), pp. 846-860. IEEE, 2022a." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 628, + 504, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 628, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 107, + 628, + 504, + 663 + ], + "type": "text", + "content": "Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuhui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022b." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 494, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 494, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 494, + 94 + ], + "type": "text", + "content": "A COMPUTATIONAL COST OF FP ARITHMETIC VS. INTEGER ARITHEMETIC" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 130, + 504, + 190 + ], + "blocks": [ + { + "bbox": [ + 105, + 110, + 504, + 124 + ], + "lines": [ + { + "bbox": [ + 105, + 110, + 504, + 124 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 504, + 124 + ], + "type": "text", + "content": "Table 2: Energy of computing units synthesized in a " + }, + { + "bbox": [ + 105, + 110, + 504, + 124 + ], + "type": "inline_equation", + "content": "{28}\\mathrm{\\;{nm}}" + }, + { + "bbox": [ + 105, + 110, + 504, + 124 + ], + "type": "text", + "content": " tech node (MAC: multiply-accumulate)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 130, + 504, + 190 + ], + "lines": [ + { + "bbox": [ + 106, + 130, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 130, + 504, + 190 + ], + "type": "table", + "html": "
MACMultiplyAdd
float32int8float32int32int8float32int32
Energy per Operation1.51 pJ0.08 pJ1.23 pJ0.940.06 pJ0.28 pJ0.03 pJ
Normalized Energy18.9×1.0×20.5×15.7×1.0×9.3×1.0×
", + "image_path": "795c69d7b191729241fa3831cc884c16a8897a2ae1de0d3cf909c0742b71daf7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 203, + 376, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 203, + 376, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 376, + 326 + ], + "type": "text", + "content": "To cover a wide range of numbers, FP format does not fix the location of the radix point (Goldberg, 1991). Hence, FP arithmetic needs to handle input and output values with different scaling factors, and the FP arithmetic units need to align and normalize significands before and after each computation, respectively. The alignment and normalization logics consist of barrel shifters that can shift a data word by a specified amount, and the cost of the barrel shifter far exceeds the cost of other arithmetic logics in terms of both energy and area, increasing the cost of FP computation (Horowitz, 2014). Hence, in general, integer arithmetic logic is much smaller and consumes less energy than FP counterpart." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 330, + 376, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 330, + 376, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 330, + 376, + 374 + ], + "type": "text", + "content": "It is well known that 8-bit integer can achieve up to " + }, + { + "bbox": [ + 104, + 330, + 376, + 374 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 104, + 330, + 376, + 374 + ], + "type": "text", + "content": " throughput improvement compared to IEEE-754 single-precision format (float32) in widely used GPUs (Kim et al., 2021), as the throughput of 8-bit operations is generally " + }, + { + "bbox": [ + 104, + 330, + 376, + 374 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 104, + 330, + 376, + 374 + ], + "type": "text", + "content": " that of 32-bit operations (Har" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 374, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 504, + 506 + ], + "type": "text", + "content": "ris, 2016). The advantage of integer can be magnified when the hardware platform moves to ASIC (Mishra et al., 2018). For in-depth understanding, we synthesize computing units for FP and integer in a " + }, + { + "bbox": [ + 104, + 374, + 504, + 506 + ], + "type": "inline_equation", + "content": "28\\mathrm{nm}" + }, + { + "bbox": [ + 104, + 374, + 504, + 506 + ], + "type": "text", + "content": " tech node. As shown in Table 2, multiplication-accumulation (MAC) for float32 consumes " + }, + { + "bbox": [ + 104, + 374, + 504, + 506 + ], + "type": "inline_equation", + "content": "18.9\\times" + }, + { + "bbox": [ + 104, + 374, + 504, + 506 + ], + "type": "text", + "content": " more energy than 8-bit integer (int8), a widely used integer format for quantized DNNs. Please note that a float32 MAC consists of a float32 multiplication and a float32 addition while an int8 MAC consists of an int8 multiplication and an int32 addition. The bit resolution of the adder for the int8 MAC is higher than that of the multiplier, because int8 multiplication results in 16-bit values and the bit resolution of MAC values increases as the number of accumulated values increases for integer format. In addition, the area cost of the integer unit is also much smaller than FP units as shown in Figure 10. Therefore, many studies have attempted activation quantization despite the various difficulties in the quantization process because both weight parameters and activations should be quantized to replace FP arithmetic with integer arithmetic." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 383, + 205, + 504, + 326 + ], + "blocks": [ + { + "bbox": [ + 383, + 205, + 504, + 326 + ], + "lines": [ + { + "bbox": [ + 383, + 205, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 383, + 205, + 504, + 326 + ], + "type": "image", + "image_path": "2122f6a76adc28979622ea194af6ee36e4c5348de9684ba2be11299ebb3fb30b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 382, + 330, + 504, + 365 + ], + "lines": [ + { + "bbox": [ + 382, + 330, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 382, + 330, + 504, + 365 + ], + "type": "text", + "content": "Figure 10: Area comparison of computing units (layouts synthesized in a " + }, + { + "bbox": [ + 382, + 330, + 504, + 365 + ], + "type": "inline_equation", + "content": "28\\mathrm{nm}" + }, + { + "bbox": [ + 382, + 330, + 504, + 365 + ], + "type": "text", + "content": " node)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 522, + 418, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 418, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 418, + 534 + ], + "type": "text", + "content": "B SUPPLEMENT FOR PROPOSED SIGNIFICAND TRUNCATION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 548, + 380, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 380, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 380, + 559 + ], + "type": "text", + "content": "B.1 ENERGY IMPROVEMENT WITH SIGNIFICAND TRUNCATION" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 110, + 579, + 269, + 650 + ], + "blocks": [ + { + "bbox": [ + 110, + 579, + 269, + 650 + ], + "lines": [ + { + "bbox": [ + 110, + 579, + 269, + 650 + ], + "spans": [ + { + "bbox": [ + 110, + 579, + 269, + 650 + ], + "type": "image", + "image_path": "f6d95e1fb8d8523562dfe4ac551a38216da2318ef898a74304b45f87823fe641.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 660, + 291, + 683 + ], + "lines": [ + { + "bbox": [ + 105, + 660, + 291, + 683 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 291, + 683 + ], + "type": "text", + "content": "Figure 11: Energy of adders synthesized in a " + }, + { + "bbox": [ + 105, + 660, + 291, + 683 + ], + "type": "inline_equation", + "content": "28\\mathrm{nm}" + }, + { + "bbox": [ + 105, + 660, + 291, + 683 + ], + "type": "text", + "content": " tech node (tested at " + }, + { + "bbox": [ + 105, + 660, + 291, + 683 + ], + "type": "inline_equation", + "content": "0.9\\mathrm{V}" + }, + { + "bbox": [ + 105, + 660, + 291, + 683 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 321, + 573, + 498, + 651 + ], + "blocks": [ + { + "bbox": [ + 321, + 573, + 498, + 651 + ], + "lines": [ + { + "bbox": [ + 321, + 573, + 498, + 651 + ], + "spans": [ + { + "bbox": [ + 321, + 573, + 498, + 651 + ], + "type": "image", + "image_path": "6771686cafd9759a43126e3e9a12c277438d282b35843033f72ac77f7163cab9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 660, + 504, + 684 + ], + "lines": [ + { + "bbox": [ + 318, + 660, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 318, + 660, + 504, + 684 + ], + "type": "text", + "content": "Figure 12: Example of the significand truncation followed by the pre-alignment." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "With naive pre-alignment of float32 activations, the maximum amount of the significand shifting is 255 and the resolution of the aligned activation becomes 279 bits. As shown in Figure 11, while 32-bit integer consumes " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "0.029\\mathrm{pJ}" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": " per addition, both float32 and 279-bit integer consumes " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "0.281\\mathrm{pJ}" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 116 + ], + "type": "text", + "content": "per addition. To avoid the large design overhead, we truncate the pre-aligned significands as shown in Figure 12. The aggressive truncation still did not cause accuracy degradation in FP additions as we described in the Section 3.2." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 129, + 465, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 129, + 465, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 465, + 140 + ], + "type": "text", + "content": "B.2 TRUNCATED BINARY MULTIPLIERS VS. PROPOSED SIGNIFICAND TRUNCATION" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 127, + 155, + 312, + 232 + ], + "blocks": [ + { + "bbox": [ + 111, + 155, + 126, + 167 + ], + "lines": [ + { + "bbox": [ + 111, + 155, + 126, + 167 + ], + "spans": [ + { + "bbox": [ + 111, + 155, + 126, + 167 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 127, + 155, + 312, + 232 + ], + "lines": [ + { + "bbox": [ + 127, + 155, + 312, + 232 + ], + "spans": [ + { + "bbox": [ + 127, + 155, + 312, + 232 + ], + "type": "image", + "image_path": "2846d2162fa52e8b3bf581042c18aac7af09023cc2feabee7634935dd797f738.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 354, + 155, + 504, + 225 + ], + "blocks": [ + { + "bbox": [ + 338, + 155, + 353, + 167 + ], + "lines": [ + { + "bbox": [ + 338, + 155, + 353, + 167 + ], + "spans": [ + { + "bbox": [ + 338, + 155, + 353, + 167 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 354, + 155, + 504, + 225 + ], + "lines": [ + { + "bbox": [ + 354, + 155, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 354, + 155, + 504, + 225 + ], + "type": "image", + "image_path": "ca81923ddefb5c2185c6966507f20db5b5b336a0ea89cc33807884d331413bc9.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "lines": [ + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": "Figure 13: Comparison of the truncation scheme in the (a) truncated binary multiplier for integer multiplication and (b) proposed method for FP addition/subtraction." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 285, + 504, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 504, + 361 + ], + "type": "text", + "content": "Truncated binary multipliers (Petra et al., 2009) also discuss the truncation to improve computational efficiency, but there are critical differences between truncated binary multipliers and the proposed work as summarized in Figure 13. First of all, truncated binary multipliers deals with integer multiplications while the proposed work focuses on FP additions/subtractions. Due to the differences in the number format (integer vs. FP) and arithmetic operations (multiplications vs. additions/subtractions), the two works present completely different error analysis models and error reduction schemes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 368, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 368, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 504, + 445 + ], + "type": "text", + "content": "The error analysis models between truncated binary multipliers and our case are different, because the amount of truncation is fixed in the truncated binary integer multipliers and the amount of truncation varies in our work as the amount of significand shift varies depending on the input data. Moreover, in truncated binary multipliers, the bit resolution of truncated output is defined by the application requirement. On the other hand, as we proposed to truncate the pre-aligned values to adopt lower-bit integers and improve computational efficiency, the proper bit resolution of truncated values should be found to meet the accuracy requirement in our case." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 451, + 506, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 506, + 583 + ], + "type": "text", + "content": "In addition, in integer multiplication case, some of the truncated partial products share the same inputs with the remaining partial products, so they have correlations with the remaining partial product values. (Petra et al., 2009) proposed an error minimization scheme which exploits such characteristics. On the other hand, in the FP addition/subtraction case, the truncated significands do not have any correlation with the remaining bits so it is hard to devise similar error correction schemes. Instead, we focused on the fact that conventional FP operation is also not precise due to the rounding of output significands so that we only need to match the error level of the proposed scheme to the conventional FP operations. Based on the facts, we showed a theoretical analysis such that the proposed integer-based FP addition/subtraction can have the similar error level as that of the conventional FP addition/subtraction when small number (1-2) of extra bits are attached to the shifted significands. With this finding, we can design an efficient integer-based FP addition logic without having complex error correction function estimated based on the truncated bits." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 599, + 294, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 599, + 294, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 294, + 611 + ], + "type": "text", + "content": "C IN-DEPTH HARDWARE ANALYSIS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 623, + 394, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 623, + 394, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 394, + 634 + ], + "type": "text", + "content": "C.1 DETAILED HARDWARE DESCRIPTION OF THE PROPOSED IFPU" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "Figure 14 describes the proposed iFPU in detail. The proposed iFPU is a bit-flexible accelerator which can handle variable bitwidth of weight values. The iFPU processes weights in bit-parallel manner by processing each weight bitplane in different columns of the PE array. For example, 4-bit weights use 4 PE columns for the computation, and 8-bit weights use 8 PE columns for the computation. After the integer-based summations are done in each column of the PE array, the integer results are converted into FP values and multiplied by scaling factors which represent the significance of each bitplane. Then, computing results of each bitplane are merged in the accumulator (FP adder) to finish the MatMul. As the output resolution of FP accumulation remains the same regardless of the" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 284, + 262 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 284, + 262 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 284, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 284, + 262 + ], + "type": "image", + "image_path": "e7fab444163ce272b8005088b3213cc4c59d1ad88dd0b78bd54c6800f21cbb4c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 270, + 504, + 304 + ], + "lines": [ + { + "bbox": [ + 104, + 270, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 504, + 304 + ], + "type": "text", + "content": "Figure 14: A detailed block diagram of iFPU. The iFPU processes weights in bit-parallel manner by processing each bitplane of the weights in each column of the PE array. " + }, + { + "bbox": [ + 104, + 270, + 504, + 304 + ], + "type": "inline_equation", + "content": "(B_{b,k}" + }, + { + "bbox": [ + 104, + 270, + 504, + 304 + ], + "type": "text", + "content": " : binary weights in Eq. 1)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 288, + 85, + 373, + 258 + ], + "blocks": [ + { + "bbox": [ + 288, + 85, + 373, + 258 + ], + "lines": [ + { + "bbox": [ + 288, + 85, + 373, + 258 + ], + "spans": [ + { + "bbox": [ + 288, + 85, + 373, + 258 + ], + "type": "image", + "image_path": "737da39c05e176befb2ecac474fd513ab63db587cee4589de9006f1b969bfd96.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 385, + 83, + 501, + 258 + ], + "blocks": [ + { + "bbox": [ + 385, + 83, + 501, + 258 + ], + "lines": [ + { + "bbox": [ + 385, + 83, + 501, + 258 + ], + "spans": [ + { + "bbox": [ + 385, + 83, + 501, + 258 + ], + "type": "image", + "image_path": "17e161b1a48ac765de589d10bcb589ab3055080325953a2431a71f0998358d68.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 504, + 349 + ], + "type": "text", + "content": "size of the accumulation thanks to the characteristics of the FP format, the size of the accumulator does not need to increase for the increased weight bit width." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 362, + 344, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 362, + 344, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 362, + 344, + 373 + ], + "type": "text", + "content": "C.2 AREA/ENERGY BREAKDOWN OF PROPOSED IFPU" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 387, + 304, + 479 + ], + "blocks": [ + { + "bbox": [ + 109, + 387, + 304, + 479 + ], + "lines": [ + { + "bbox": [ + 109, + 387, + 304, + 479 + ], + "spans": [ + { + "bbox": [ + 109, + 387, + 304, + 479 + ], + "type": "image", + "image_path": "479482482e743352d8059cdf7b9142d724358af8380b45b92013c7a6f7f81c59.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 490, + 504, + 512 + ], + "lines": [ + { + "bbox": [ + 104, + 490, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 504, + 512 + ], + "type": "text", + "content": "Figure 15: Area " + }, + { + "bbox": [ + 104, + 490, + 504, + 512 + ], + "type": "inline_equation", + "content": "(mm^2)" + }, + { + "bbox": [ + 104, + 490, + 504, + 512 + ], + "type": "text", + "content": " (left) and power (W) (right) of MatMul Engines: baselines and iFPUs for FP MatMul with 32x32 PEs." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 387, + 501, + 479 + ], + "blocks": [ + { + "bbox": [ + 307, + 387, + 501, + 479 + ], + "lines": [ + { + "bbox": [ + 307, + 387, + 501, + 479 + ], + "spans": [ + { + "bbox": [ + 307, + 387, + 501, + 479 + ], + "type": "image", + "image_path": "9f699af0fee63273ed539329b5989a908d9f842aeacf5e7b84e0c6a47deb8a1e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 109, + 531, + 301, + 628 + ], + "blocks": [ + { + "bbox": [ + 109, + 531, + 301, + 628 + ], + "lines": [ + { + "bbox": [ + 109, + 531, + 301, + 628 + ], + "spans": [ + { + "bbox": [ + 109, + 531, + 301, + 628 + ], + "type": "image", + "image_path": "ff4bd437c8192a558c56c8c0441be2ef86392d6948afa6172828065ce47eab4c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "content": "Figure 16: Area breakdown (left) and power breakdown (right) of proposed iFPUs with " + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "inline_equation", + "content": "32\\mathrm{x}32" + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "inline_equation", + "content": "64\\mathrm{x}64" + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "inline_equation", + "content": "128\\mathrm{x}128" + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "content": " PEs." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 310, + 531, + 505, + 628 + ], + "blocks": [ + { + "bbox": [ + 310, + 531, + 505, + 628 + ], + "lines": [ + { + "bbox": [ + 310, + 531, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 310, + 531, + 505, + 628 + ], + "type": "image", + "image_path": "09e5f4d13d77e18a58272181dd308ec012a30052175177d136a81df4cf53ef07.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "In this section, the area and power of the MatMul engines designed in Section 4.3 are analyzed in more detail for deeper understanding of the proposed scheme. First, a breakdown of the area/power of various MatMul engines with " + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": " PEs is shown in Figure 15. FP-ADD reconstructs FP-MAC with a series of FP additions by separately processing each weight bitplane (Eq. 1), so to match the effective throughput of FP-ADD with that of FP-MAC in case of 4-bit weights, 4 FP-ADD" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 216 + ], + "type": "text", + "content": "operations are used for the evaluation. Hence, though the area/energy of a single float32 adder is lower than that of a float32 MAC unit (Table 2), FP-ADD requires slightly larger area and power than FP-MAC. On the other hand, though iFPU also introduces " + }, + { + "bbox": [ + 104, + 82, + 506, + 216 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 82, + 506, + 216 + ], + "type": "text", + "content": " times more operations than FP-MAC, iFPUs achieve large area and power reduction as the area/energy cost of PE arrays become significantly lower by replacing FP adders with integer adders. The area/power reduction is even larger in bfloat16 cases because smaller integer units can be used. As the area/power cost of PE arrays in iFPUs decreases, the relative portion of area/power of supporting logic (such as scale & accumulator) in the total area/power increases. Hence, the supporting logic accounts for more than half of the total area/power of iFPUs with 32x32 PEs. Meanwhile, the overhead of the supporting logic decreases as the size of PE arrays increases. We report the area/power breakdown of iFPUs with various number of PEs in Figure 16. The experimental results show that, as the size of PE arrays increase, the supporting logic is shared among more PEs and the overhead can be amortized." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 228, + 389, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 228, + 389, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 389, + 241 + ], + "type": "text", + "content": "C.3 IMPACT OF THE WEIGHT BITWIDTH ON THE PROPOSED IFPU" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 252, + 505, + 369 + ], + "blocks": [ + { + "bbox": [ + 108, + 252, + 505, + 369 + ], + "lines": [ + { + "bbox": [ + 108, + 252, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 108, + 252, + 505, + 369 + ], + "type": "image", + "image_path": "03996390a185fb711ac2f8be70e6ddefff7047e7d4423fe87dad947c3e3121a0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 108, + 385, + 504, + 500 + ], + "blocks": [ + { + "bbox": [ + 207, + 373, + 403, + 384 + ], + "lines": [ + { + "bbox": [ + 207, + 373, + 403, + 384 + ], + "spans": [ + { + "bbox": [ + 207, + 373, + 403, + 384 + ], + "type": "text", + "content": "(a) Normalized energy efficiency (TOPS/W) of iFPUs" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 385, + 504, + 500 + ], + "lines": [ + { + "bbox": [ + 108, + 385, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 108, + 385, + 504, + 500 + ], + "type": "image", + "image_path": "efae3a70ea025388fd7031579896d7b03a54d2e968c2b0fa6f156bfd7fd618b4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 506, + 414, + 517 + ], + "lines": [ + { + "bbox": [ + 195, + 506, + 414, + 517 + ], + "spans": [ + { + "bbox": [ + 195, + 506, + 414, + 517 + ], + "type": "text", + "content": "(b) Normalized throughput-per-area (TOPS/mm" + }, + { + "bbox": [ + 195, + 506, + 414, + 517 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 195, + 506, + 414, + 517 + ], + "type": "text", + "content": ") of iFPUs" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 526, + 504, + 572 + ], + "lines": [ + { + "bbox": [ + 104, + 526, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 526, + 504, + 572 + ], + "type": "text", + "content": "Figure 17: Computational efficiency of iFPUs normalized with that of the baseline FP MatMul engine (FP-MAC). Y-axis is the normalized value against FP-MAC and the iFPUs show higher efficiency than FP-MAC even for high-precision weight bits. The number of PEs and target activation types are annotated along the horizontal axis." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 586, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 674 + ], + "type": "text", + "content": "This section analyzes impact of weight bitwidth on the efficiency improvement achievable with the proposed iFPU. The experimental setup is the same as Section 4.3 except the weight bits. While only 4-bit weight cases are evaluated in Section 4.3, this section evaluates weights with 1 to 16 bits. Because the proposed scheme processes each bitplane of the weights in the bit-parallel manner, higher-bit weights require more operations with PE, scale, and accumulators. Hence, as shown in Figure 17, the benefits of the iFPUs diminish as the number of weight bits increases. Nevertheless, even for 8-bit weight case, iFPUs achieve better computational efficiency compared to the FP-MAC baseline." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 689, + 428, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 428, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 428, + 700 + ], + "type": "text", + "content": "C.4 COMPARISON OF THE PROPOSED IFPU WITH INT4 MATMUL ENGINE" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": "In Figure 18., an int4 MatMul engine (INT4) is evaluated and compared with the other MatMul engines analyzed in Section 4.3. INT4 MatMul shows high energy efficiency and throughput-per" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 312, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 73, + 306, + 169 + ], + "blocks": [ + { + "bbox": [ + 108, + 73, + 306, + 169 + ], + "lines": [ + { + "bbox": [ + 108, + 73, + 306, + 169 + ], + "spans": [ + { + "bbox": [ + 108, + 73, + 306, + 169 + ], + "type": "image", + "image_path": "c5e3c9d6346d6aa33c6caae94ea469a25be309957f6bdd30e862fd44c183d6af.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 181, + 504, + 216 + ], + "lines": [ + { + "bbox": [ + 104, + 181, + 504, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 504, + 216 + ], + "type": "text", + "content": "Figure 18: Normalized energy efficiency (TOPS/W) (left) and throughput-per-area " + }, + { + "bbox": [ + 104, + 181, + 504, + 216 + ], + "type": "inline_equation", + "content": "(\\mathrm{TOPS} / \\mathrm{mm}^2)" + }, + { + "bbox": [ + 104, + 181, + 504, + 216 + ], + "type": "text", + "content": " (right) of MatMul Engines: baselines and iFPUs for FP MatMul; INT8/INT4 for int8/int4 MatMul. The number of PEs and target activation types are annotated along the horizontal axis." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 74, + 504, + 168 + ], + "blocks": [ + { + "bbox": [ + 309, + 74, + 504, + 168 + ], + "lines": [ + { + "bbox": [ + 309, + 74, + 504, + 168 + ], + "spans": [ + { + "bbox": [ + 309, + 74, + 504, + 168 + ], + "type": "image", + "image_path": "9a356fe726cbb5d3a58cf9461b03d3960de5e0f3ba0b6e06fa89355cb059cfdc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 238, + 504, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 238, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 238, + 504, + 262 + ], + "type": "text", + "content": "area. However, to take advantage of INT4 MatMul, both weight and activation should be quantized to 4 bits, which may not provide desired accuracy in many cases." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 276, + 342, + 286 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 276, + 342, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 276, + 342, + 286 + ], + "type": "text", + "content": "C.5 HARDWARE EVALUATION WITH MEMORY ACCESS" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 111, + 304, + 284, + 427 + ], + "blocks": [ + { + "bbox": [ + 111, + 304, + 284, + 427 + ], + "lines": [ + { + "bbox": [ + 111, + 304, + 284, + 427 + ], + "spans": [ + { + "bbox": [ + 111, + 304, + 284, + 427 + ], + "type": "image", + "image_path": "c3cca79da7dc9a65091328ad1629fd5d8011049b9d33dce7c96233f76ca10ec8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 434, + 504, + 468 + ], + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 468 + ], + "type": "text", + "content": "Figure 19: Normalized energy consumption of MatMul engines (FP-MAC, FP-ADD, and iFPU) with memory system. The inference energy is measured for BERT-based and OPT-1.3B with 4-bit weights and float32/bfloat16 activations." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 291, + 304, + 424, + 427 + ], + "blocks": [ + { + "bbox": [ + 291, + 304, + 424, + 427 + ], + "lines": [ + { + "bbox": [ + 291, + 304, + 424, + 427 + ], + "spans": [ + { + "bbox": [ + 291, + 304, + 424, + 427 + ], + "type": "image", + "image_path": "72fff8ed9dae84bf3d5364cabd6f23c656e183c87972a7bc0de116e2df7d1d13.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 426, + 335, + 503, + 385 + ], + "blocks": [ + { + "bbox": [ + 426, + 335, + 503, + 385 + ], + "lines": [ + { + "bbox": [ + 426, + 335, + 503, + 385 + ], + "spans": [ + { + "bbox": [ + 426, + 335, + 503, + 385 + ], + "type": "image", + "image_path": "30a2a7c507b03a1b556a55f453baf86b63bf50d5c0c902cd9d39f6a51f5cd8cf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "type": "text", + "content": "Setup. To understand the effectiveness of the proposed method in the real computing scenario, the baselines (FP-MAC and FP-ADD) and the proposed iFPU with 128x128 PEs are further evaluated including memory access. For off-chip memory, we scaled down the bandwidth of HBM2 in TPU (Jouppi et al., 2021) considering the ratio of the number of PEs that make up Matrix Multiply Unit (MXU), which is 1:4 and adopted energy per bit of HBM2 from Table 2 in (Jouppi et al., 2021); we used the bandwidth of 153.5 GB/s and the energy per bit of " + }, + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "type": "inline_equation", + "content": "3.9\\mathrm{pJ} / \\mathrm{bit}" + }, + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "type": "text", + "content": ". We also scaled the size of the unified buffer (on-chip SRAM buffer) in (Jouppi et al., 2021) by dividing it by 4. The unified buffer size in our design was 32MB. For SRAMs, we used the " + }, + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "type": "inline_equation", + "content": "28\\mathrm{nm}" + }, + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "type": "text", + "content": " CMOS memory compiler and the energy per bit of " + }, + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "type": "inline_equation", + "content": "0.155\\mathrm{pJ} / \\mathrm{bit}" + }, + { + "bbox": [ + 104, + 484, + 504, + 594 + ], + "type": "text", + "content": " was used. To overlap memory access with computation, double buffering scheme was adopted in the unified buffer." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 732 + ], + "type": "text", + "content": "Results. We evaluate a single batch inference of BERT-base and OPT-1.3B. We set the sequence length of BERT-base and OPT-1.3B as 128 and 1024 respectively. As double buffering hides the memory access latency, the proposed iFPU with memory model can achieve the same amount of throughput-per-area improvement as that of the baseline for the case in which memory access is not considered. On the other hand, the gain in energy efficiency slightly changes after considering memory access. As shown in Figure 19, the dram access energy accounts for a relatively small portion of total energy consumption in the baselines, because the data is intensively reused in the MatMul computation. As the proposed iFPU reduces the energy cost of computation, memory access energy becomes relatively significant in the proposed system. Thus, when considering the cost of memory access, the amount of improvement in the energy efficiency slightly decreases. Nevertheless, the iFPU with memory access still can improve the energy efficiency by up to 6.6x compared to FP-MAC baseline." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 404, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 404, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 404, + 94 + ], + "type": "text", + "content": "D FINE-TUNING CONDITION FOR BERT-BASE TRAINING" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 152, + 504, + 211 + ], + "blocks": [ + { + "bbox": [ + 104, + 109, + 506, + 144 + ], + "lines": [ + { + "bbox": [ + 104, + 109, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 104, + 109, + 506, + 144 + ], + "type": "text", + "content": "Table 3: Hyper-parameters for fine-tuning BERT-base on GLUE benchmark. The fine-tuning use AdamW optimizer and the number of training epochs is 10. The learning rates decay linearly and the weight decay is set to 0.01." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 152, + 504, + 211 + ], + "lines": [ + { + "bbox": [ + 106, + 152, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 504, + 211 + ], + "type": "table", + "html": "
ConfigurationGLUE
CoLAMRPCSST-2STS-BQQPMNLIQNLIRTE
Batch size1632323232161616
Learning rate1e-41e-41e-4 2e-41e-45e-55e-55e-51e-4
", + "image_path": "36869695064233af6e6995471766023f7e552aade89303f4c3257dc252a8b1ee.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_content_list.json b/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c042d33ae7fe4e42c10a4206ad69b877a2855a0d --- /dev/null +++ b/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_content_list.json @@ -0,0 +1,3428 @@ +[ + { + "type": "text", + "text": "WORDS ARE ALL YOU NEED? LANGUAGE AS AN APPROXIMATION FOR HUMAN SIMILARITY JUDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Raja Marjieh $^{1,*}$ , Pol van Rijn $^{2,*}$ , Ilia Sucholutsky $^{3,*}$ , Theodore R. Sumers $^{3}$ , Harin Lee $^{2,4}$", + "bbox": [ + 179, + 167, + 797, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Thomas L. Griffiths $^{1,3,\\ast \\ast}$ , Nori Jacoby $^{2,\\ast \\ast}$", + "bbox": [ + 181, + 204, + 467, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "\\*\\*\\* Equal contribution.", + "bbox": [ + 483, + 204, + 630, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Department of Psychology, Princeton University", + "bbox": [ + 483, + 220, + 808, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2Max Planck Institute for Empirical Aesthetics", + "bbox": [ + 483, + 234, + 790, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Department of Computer Science, Princeton University", + "bbox": [ + 483, + 250, + 854, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4Max Planck Institute for Cognitive and Brain Sciences", + "bbox": [ + 483, + 263, + 848, + 279 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 315, + 545, + 330 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human similarity judgments are a powerful supervision signal for machine learning applications based on techniques such as contrastive learning, information retrieval, and model alignment, but classical methods for collecting human similarity judgments are too expensive to be used at scale. Recent methods propose using pre-trained deep neural networks (DNNs) to approximate human similarity, but pre-trained DNNs may not be available for certain domains (e.g., medical images, low-resource languages) and their performance in approximating human similarity has not been extensively tested. We conducted an evaluation of 611 pre-trained models across three domains – images, audio, video – and found that there is a large gap in performance between human similarity judgments and pre-trained DNNs. To address this gap, we propose a new class of similarity approximation methods based on language. To collect the language data required by these new methods, we also developed and validated a novel adaptive tag collection pipeline. We find that our proposed language-based methods are significantly cheaper, in the number of human judgments, than classical methods, but still improve performance over the DNN-based methods. Finally, we also develop 'stacked' methods that combine language embeddings with DNN embeddings, and find that these consistently provide the best approximations for human similarity across all three of our modalities. Based on the results of this comprehensive study, we provide a concise guide for researchers interested in collecting or approximating human similarity data. To accompany this guide, we also release all of the similarity and language data, a total of 206,339 human judgments, that we collected in our experiments, along with a detailed breakdown of all modeling results.", + "bbox": [ + 228, + 349, + 769, + 670 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 700, + 336, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Similarity judgments have long been used as a tool for studying human representations, both in cognitive science (Shepard, 1980; 1987; Tversky, 1977; Tenenbaum & Griffiths, 2001), as well as in neuroscience, as exemplified by the rich literature on representational similarity between humans and machines (Schrimpf et al., 2020; Kell et al., 2018; Linsley et al., 2017; Langlois et al., 2021; Yamins et al., 2014) whereby similarity patterns of brain activity are compared to those arising from a model of interest. Recent research in machine learning suggests that incorporating human similarity judgments in model training can play an important role in a variety of paradigms such as human alignment (Esling et al., 2018), contrastive learning (Khosla et al., 2020), information retrieval (Parekh et al., 2020), and natural language processing (Gao et al., 2021).", + "bbox": [ + 169, + 734, + 826, + 861 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, building a large dataset based on human similarity judgments is very expensive and often infeasible since the number of judgments required is quadratic in the number of stimuli – for $N$", + "bbox": [ + 169, + 866, + 823, + 897 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Correspondence: {raja.marjieh, is2961}@princeton.edu, pol.van-rijn@ae.mpg.de", + "bbox": [ + 189, + 910, + 823, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/aea578caba5065f0dadc0f7b3cf13e1e7a10d356d8d7a8df27f4bf60b75ca097.jpg", + "image_caption": [ + "Figure 1: Comparing human similarity scores gathered through crowdsourcing with ML pipelines. We used data from three modalities: images, audio, and video. For each modality, we extracted deep model embeddings and gathered human captions and tags. Word- and language-embedding models, as well as simple word-frequency analysis, were used to predict human similarity judgments." + ], + "image_footnote": [], + "bbox": [ + 173, + 106, + 823, + 459 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "stimuli, $O(N^2)$ judgments are required1. For example, to fully quantify the similarity of all possible dyadic pairs of 50,000 images, one needs to collect on the order of 1.25 billion ( $\\sim \\frac{50000^2}{2}$ ) human similarity judgments. Thus, human judgments are the main bottleneck for machine-learning methods based on similarity. For this reason, the majority of available human similarity datasets are small by machine learning standards (up to a few thousand objects).", + "bbox": [ + 169, + 561, + 823, + 638 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Advancements in deep learning have brought an alternative approach that does not require extensive collection of human judgments. Specifically, the idea is to use the similarity between hidden representations in pre-trained deep neural networks (DNNs) to approximate human similarity (Peterson et al., 2018; Jha et al., 2020; Marjieh et al., 2022; Hebart et al., 2020; Roads & Love, 2021). Some of these methods also suggest fine-tuning representations on a small training set of human similarity judgments (Peterson et al., 2018). This, in turn, results in a significant reduction in the number of required human judgments down to $O(1)$ (given the pre-trained model). While such methods are promising, they still require access to strong pre-trained models which may not necessarily be available in all domains (e.g., medical datasets, niche modalities, low-resource languages, etc.). In addition, representations obtained from neural networks may not always overlap with human similarity representations, given that the models can be trained for different objectives (i.e., their embeddings may be poor approximations for human similarity).", + "bbox": [ + 169, + 643, + 826, + 811 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A comprehensive comparison to assess which models perform well in predicting human similarity across different modalities is currently lacking in the literature. To this end, one of our main contributions in this paper is providing a first-of-its-kind large-scale evaluation of over 600 publicly-available pre-trained models as approximations for human similarity judgments on three modalities", + "bbox": [ + 169, + 816, + 826, + 875 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1Depending on various assumptions, the full range of classical methods can require between $O(N \\log N)$ (Jamieson & Nowak, 2011) and $O(N^3)$ (Hebart et al., 2020) human judgments. In this work, we used $O(N^2)$ human judgments (collecting all unique dyadic pairs) as the baseline for comparison", + "bbox": [ + 169, + 883, + 825, + 925 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "/images, audio, video). Our experiments reveal that there is a large gap in performance between the $O(1)$ DNN methods and the classical $O(N^2)$ similarity method we used as the baseline.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address this gap, we propose a new class of $O(N)$ methods to efficiently and accurately approximate human similarity based on language. This is motivated by a long line of research in cognitive science suggesting that language is an extremely efficient way for humans to communicate information about their sensory environment (Murphy, 2004; Zaslavsky et al., 2018; Piantadosi et al., 2011; Jaeger & Levy, 2006). This in turn suggests that we can use textual descriptors to approximate similarity judgments across different modalities. Moreover, such textual descriptors can be collected at the cost of $O(N)$ human judgments (as people describe individual stimuli rather than pairs), which renders this method scalable.", + "bbox": [ + 169, + 138, + 826, + 251 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We consider two approaches for approximating similarity from text data. One approach is to use pre-trained Large Language Models (LLM) to produce vector embeddings of the textual descriptions, and then use a measure of distance between these embeddings to approximate human similarity. This method is more domain-agnostic than the $O(1)$ deep learning methods as it only requires access to a pre-trained LLM regardless of the modality of the original dataset. However, there are some cases where the domain may be out-of-distribution for all available LLMs (e.g., niche technical fields), or where no LLMs are available at all (e.g., low-resource languages). In such cases, the other approach is to use Word-Frequency Analysis (WFA) methods from classical text processing literature (Barrios et al., 2016; Rouge, 2004; Beel et al., 2016),", + "bbox": [ + 169, + 257, + 826, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As for the textual descriptions themselves, we consider two types, namely, free-text captions and concise word tags. Collecting captions for machine learning datasets is a well-established practice and can easily be done through crowdsourcing platforms. On the other hand, there is no consensus on best practices for collecting tags without a pre-existing taxonomy (i.e., open-set labels). To address this, we propose a novel adaptive tag mining pipeline called Sequential Transmission Evaluation Pipeline (STEP-Tag) which we describe in Section 2.2.4. As we will show, STEP-Tag allows to collect meaningful, diverse, and high-quality word tags for target stimuli in an online crowdsourcing environment.", + "bbox": [ + 169, + 388, + 826, + 501 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finally, we propose one additional set of hybrid approximation methods that combine sensory information with textual descriptions while still requiring $O(N)$ human judgments. For this approach, we propose to stack the embeddings derived from both domain-specific models (e.g., output from the last layer of an image classifier) with the LLM embedding of the respective textual description. When multi-modal models are available, we can similarly leverage the joint embedding of both the stimulus and its textual description.", + "bbox": [ + 169, + 507, + 826, + 592 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We evaluate all of these novel and existing methods across multiple modalities. We test the relative contributions of linguistic and sensory information in approximating human similarity and show that our proposed language-based methods provide both accurate and efficient approximations across modalities, even though they do not require a trained modality-specific deep learning model. Crucially, with this large-scale evaluation, we are able for the first time to provide researchers with a comprehensive guide of the tools to use for approximating human similarity at scale.", + "bbox": [ + 169, + 598, + 826, + 683 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To summarize, our contributions are as follows:", + "bbox": [ + 171, + 688, + 486, + 703 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We conduct a comprehensive comparison of human similarity approximation methods.", + "bbox": [ + 215, + 720, + 797, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We propose a novel modality-agnostic method for approximating similarity based on text and show that it is both efficient and competitive in terms of performance.", + "bbox": [ + 215, + 750, + 823, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We propose STEP-Tag, a novel adaptive tagging pipeline, and show that it is effective for crowdsourcing high-quality and diverse sets of word tags.", + "bbox": [ + 215, + 794, + 823, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We synthesize our findings into a detailed guide for researchers interested in approximating human similarity judgments at scale.", + "bbox": [ + 215, + 837, + 823, + 866 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We collect and release ground-truth and approximated versions of a large behavioral dataset $(N = 1,492)$ across three different domains (images, audio, video), including two text-approximated similarity matrices for 1,000 audio clips and 1,000 video clips.", + "bbox": [ + 215, + 881, + 823, + 924 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 DATASETS", + "text_level": 1, + "bbox": [ + 171, + 102, + 294, + 118 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 STIMULI", + "text_level": 1, + "bbox": [ + 171, + 136, + 277, + 150 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Throughout this work, we considered five stimulus datasets across three different modalities - images, audio, and video - consisting of a total of 31,320 dyadic pairs labeled with similarity.", + "bbox": [ + 169, + 164, + 826, + 193 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Images For images, we considered three datasets of common objects introduced in Peterson et al. (2018) – namely, animals, furniture, and vegetables – each consisting of 7,140 dyadic pairs (all unique pairs over 120 images).", + "bbox": [ + 169, + 212, + 826, + 256 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Audio For audio, we used the RAVDESS corpus (Livingstone & Russo (2018), released under a CC Attribution license), which consists of semantically neutral sentences spoken by 24 US American actors to convey a specific target emotion. To construct a 1,000-recording subset, we selected 3 emotions per speaker per sentence. We randomly omitted 104 emotional stimuli and included all 96 neutral recordings (the dataset only contains 2 neutral recordings per speaker per sentence). To construct the subset composed of 4,950 dyadic pairs (all unique pairs over 100 recordings), we randomly selected $\\sim 13$ recordings per emotion from the 1,000.", + "bbox": [ + 169, + 273, + 826, + 375 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Video Finally, for the video dataset, we considered the Mini-Kinetics-200 dataset (Xie et al., 2018) (released under a CC BY 4.0 International License), which contains a large set of short video clips of human activities from 200 activity classes. Specifically, we focused on the validation split, which contains 5,000 videos in total. To construct our 1,000-video dataset, we sampled 5 random videos from each of the 200 activity categories. The 100-video subset (4,950 dyadic pairs) used in the similarity judgment collection experiment was then generated by sampling 100 random stimuli from the 1,000 list.", + "bbox": [ + 169, + 392, + 826, + 491 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 HUMAN JUDGMENT COLLECTION", + "text_level": 1, + "bbox": [ + 171, + 512, + 447, + 526 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2.1 PARTICIPANTS", + "text_level": 1, + "bbox": [ + 171, + 540, + 331, + 554 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We collected data from $N = 1,492$ US participants for the new behavioral experiments reported in this paper. Participants were recruited anonymously from Amazon Mechanical Turk and provided informed consent under an approved protocol by either the Institutional Review Board (IRB) at Princeton University (application 10859) or the Max Planck Ethics Council (application 2021_42) before taking part. Participants earned 9-12 USD per hour, and each session lasted less than 30 minutes. To help recruit reliable participants, we required that participants are at least 18 years of age, reside in the United States and have participated in more than 5,000 previous tasks with a $99\\%$ approval rate (see Supplementary Section B for additional details about the behavioral experiments). All experiments were implemented with the Dallinger and PsyNet frameworks designed for automation of large-scale behavioral research (Harrison et al., 2020). In Supplementary Section A.1, we include the data that was collected, instructions used, and code for replication of the behavioral experiments. We also provide the code for computational experiments and analysis.", + "bbox": [ + 169, + 566, + 828, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2.2 SIMILARITY JUDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 753, + 406, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We collected two batches of pairwise similarity judgements, one for each of the audio and video subsets, and were provided access to the similarity matrices for the three image datasets by the authors of Peterson et al. (2018). For each pair we collected $\\sim 5$ similarity judgments to average out inter-rater noise.", + "bbox": [ + 169, + 779, + 826, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2.3 CAPTIONS", + "text_level": 1, + "bbox": [ + 171, + 854, + 303, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We collected free-text captions for the video and audio datasets. Captions for the image datasets were already collected by Marjieh et al. (2022) and used here with permission. For each stimulus, we collected $\\sim 10$ captions.", + "bbox": [ + 169, + 881, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8a98b1de6f85e5fff095e57db5e2dca85fafcce1dced6e5cc2ee52e5bc880c26.jpg", + "image_caption": [ + "Figure 2: STEP-Tag, our novel tag-mining paradigm. We ran an adaptive process in which results of one iteration are used as inputs for subsequent iterations. In every iteration, participants can add a new tag, rate the relevance of existing tags or flag tags that are inappropriate." + ], + "image_footnote": [], + "bbox": [ + 174, + 102, + 823, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2.4 TAGS", + "text_level": 1, + "bbox": [ + 171, + 383, + 267, + 397 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We propose a novel adaptive tag pipeline for simultaneous data collection and evaluation called Sequential Transmission Evaluation Pipeline (STEP) and apply it in the context of semantic tag mining (STEP-Tag). Our paradigm, STEP-Tag, allows researchers to efficiently collect high-quality word tags for a given stimulus (Figure 2) and extends existing crowdsourcing text-mining techniques (Von Ahn & Dabbish, 2008; 2004; Krishna et al., 2017; Law et al., 2007) by integrating ideas from transmission chain experiments (Kirby et al., 2008; Griffiths & Kalish, 2005). In STEP-Tag, participants adaptively create tags for a set of target stimuli and simultaneously evaluate the annotations made by previous participants. In each trial, participants are first given a stimulus (e.g., an image or audio fragment) and rate the relevance of tags that were created by other participants (on a 5-interval Likert scale) or flag a tag if they find it inappropriate (with tags removed if more than two people flag the tag). Next, participants are also given the opportunity to add new tags if they feel a relevant tag that describes the stimulus is missing. The results of the annotation procedure of one participant then propagate to the next participant (additional details about the paradigm, and screenshots are provided in Supplementary Section B.6). Ultimately, as the process unfolds over many iterations, meaningful tags are extracted and validated by multiple participants, enabling efficient open-label collection of a desired dataset.", + "bbox": [ + 169, + 407, + 826, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To validate STEP-Tag, we compared it against several baselines: (i) randomly selecting only a single high-rated tag from the last iteration of STEP-Tag per stimulus, (ii) using tags only from the first iteration of STEP-Tag (equivalent to non-adaptive tag collection), and (iii) using class labels instead of tags. We found that tags produced after multiple iterations of STEP-Tag outperformed all three baselines in terms of quality (i.e., downstream performance for similarity reconstruction) and diversity (see Supplementary Section B.6.1).", + "bbox": [ + 169, + 636, + 825, + 720 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 MODELS", + "text_level": 1, + "bbox": [ + 171, + 741, + 282, + 756 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 DNN-BASED METHODS", + "text_level": 1, + "bbox": [ + 171, + 772, + 380, + 786 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We tested a wide range of pre-trained ML models that do not rely on text (overall we tested 611 models) and compared their internal representations to human similarity judgments and text-based predictions (Figure 1A). We compiled our model pool by leveraging pre-trained model repositories (or zoos) available online. In particular, for images we use 569 pre-trained models from the pytorch-image-models package timm (Wightman, 2019), for audio we use 36 pre-trained models available in the torchaudio package (Yang et al., 2021) (see also Supplementary Figure 10 for an analysis of layer depth), and for video we use 6 pre-trained models available from the PyTorchVideo package (Fan et al., 2021). Because of the recent success of multimodal training, we additionally included 9 multimodal models based on CLIP from OpenAI's public implementation.", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "tion (https://github.com/openai/CLIP) for the image datasets, and compared them to \"stacked\" representations (i.e., concatenating embeddings from separate image and text models).", + "bbox": [ + 168, + 103, + 823, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 LLM-BASED METHODS", + "text_level": 1, + "bbox": [ + 171, + 148, + 380, + 162 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Tags To embed tags we used ConceptNet Numberbatch (CNNB) which is a word-embedding model trained on the ConceptNet knowledge graph that leverages other popular word embedding models such as word2vec and GloVe (Speer et al., 2017). We experimented with several algorithms for computing similarity between sets (or multi-sets) of tags and share the details in Supplementary Section C.1.2. As a control, for images we also tried converting tags into a caption of the form \"This is an image of tag1, tag2, ...\" and embedding them using a language model (see Supplementary Section C.1.2).", + "bbox": [ + 169, + 176, + 826, + 273 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Captions To embed captions, we used four pre-trained LLMs from HuggingFace (Wolf et al., 2020): 'bert-base-uncased', 'deberta-xlarge-mnli', 'sup-simcse-bert-base-uncased', and 'sup-simcse-roberta-large'. SimCSE is a pre-training procedure that uses semantic entailment in a contrastive learning objective (Gao et al., 2021). According to BERTScore (Zhang et al., 2020), the latter three models are ranked in the top 40 models in terms of correlation with human evaluations on certain tasks, with 'deberta-xlarge-mnli' ranked first. However, in our experiments, we found that embedding similarity computed from 'sup-simcse-roberta-large' has the highest correlation with human similarity judgments out of the four models. For SimCSE-based models, we used representations from the (final) embedding layer (where the SimCSE contrastive objective is actually applied). For the other two models, we computed embeddings from every layer, but restricted the main analysis to embeddings from the penultimate layers. This was done in order to be consistent with our procedure for DNNs.", + "bbox": [ + 169, + 289, + 826, + 443 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Other methods For the image datasets, we also considered several other methods that made use of LLMs but do not fit into the categories described above. One approach was using prompts with GPT3 (Brown et al., 2020) in a text-completion setup to directly predict similarity without extracting embeddings (see Supplementary Section C.1.3 for details). We also tried using pre-trained image captioning models to generate captions automatically (i.e. this would reduce $O(N)$ language-based methods to $O(1)$ ) but this resulted in poor performance (see Supplementary Section C.1.3 for details).", + "bbox": [ + 169, + 458, + 826, + 542 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 STACKING METHODS", + "text_level": 1, + "bbox": [ + 171, + 559, + 361, + 571 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We produce stacked representations for each modality by concatenating the single best-performing (see Figure 3) LLM's embeddings with the embeddings from the five best-performing DNNs into a single set of long embeddings. Since the two sets of embeddings come from different spaces, we add a single tunable hyperparameter for rescaling the LLM embeddings. This hyperparameter can be set manually, but we use a small number of ground-truth similarity judgments (we use dyadic pairs for just 20 stimuli) to optimize it automatically.", + "bbox": [ + 169, + 585, + 825, + 672 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 WORD FREQUENCY ANALYSIS (WFA) METHODS", + "text_level": 1, + "bbox": [ + 171, + 686, + 555, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The aim of the WFA methods is to enable similarity approximation from language using traditional embedding-free techniques. Such techniques are particularly useful for low-resource languages or cross-cultural comparisons (Cowen & Keltner, 2017; Barrett, 2020), for which pre-trained models are lacking, as they work solely on the basis of the text itself. The WFA methods we considered included measuring co-occurrence, Rouge score, bm25s, and tfidf. We provide details on each of these procedures in Supplementary Section C.2.", + "bbox": [ + 169, + 712, + 826, + 797 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.5 PERFORMANCE METRIC", + "text_level": 1, + "bbox": [ + 171, + 814, + 382, + 828 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We quantified performance by computing the Pearson correlation $r$ between approximated similarity scores and the ground-truth human similarity scores for all the unique dyadic pairs in a dataset. We compared the performance of the different prediction methods to the inter-rater reliability (IRR) of participants, which serves as an approximate upper-bound on performance. Following Peterson et al. (2018), we computed IRR for each human similarity matrix using the split-half correlation method with a Spearman-Brown correction (Brown, 1910).", + "bbox": [ + 169, + 840, + 826, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 RESULTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 282, + 118 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d77376fc14926badb2e03efdcc2ea3c112f33ec27be4ae665e9fa9d8c9574858.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 143, + 821, + 287 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a8a8c9b51c3fe39d89cde181140c094e59c9683ace33d3ababdbfe2f4b585119.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 306, + 818, + 450 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ae6b182d09f93dee1b908aa568dee1ab2661348ddda7a0163109fd4f0966c432.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 469, + 818, + 612 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8ca213ebefb0e9cbe901e5366eca268396e36df51d8e0eeab47203e127837709.jpg", + "image_caption": [ + "Figure 3: Correlation to human similarity. A: Top 50 models averaged over the 3 image datasets. B: Audio dataset. C: Video dataset. Each DNN baseline bar averages over multiple variants of the same architecture; the dots indicate average correlation of individual variants of the architecture. D: Average for each method type for each modality. The error bars are standard deviations." + ], + "image_footnote": [], + "bbox": [ + 178, + 633, + 818, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 3 summarizes the performance of the various techniques across the three modalities. Note that the image modality results in Figure 3A are averaged across the three image datasets and only show the top 50 methods for this modality due to space constraints. Figure 3D shows the mean performance of the methods of each type for each modality. When viewing these results, a clear hierarchy emerges. While no approximation methods can perfectly match the ground-truth pairwise similarity, (see the gap between the methods and IRR), stacked ones get close and are consistently more aligned with", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "human similarity than other methods across all three modalities. Text-based methods come next in this hierarchy, followed by DNN-based ones. We also considered supervised methods that reweight DNN-based embeddings based on a small set of human similarity judgments, but we found that the performance was unstable (see Supplementary Section C.3 for details).", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The pre-eminence of stacked results suggests that LLMs and DNNs capture at least some different sources of variance in human similarity judgments. This is reinforced by our surprising finding that stacked representations from CLIP, a state-of-the-art jointly pre-trained multi-modal model, do not outperform stacked representations from independently trained models. We hypothesize that this happens because information is lost from both modalities when optimizing for a joint embedding. However, we note that the modest size of the performance gap between stacked and LLMs/DNNs, suggests that there is also significant overlap between aspects of human similarity captured by language and perception.", + "bbox": [ + 169, + 166, + 826, + 280 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To investigate the effect of architecture and downstream task (e.g., classification) performance on alignment of DNNs with human similarity, for the image modality we compared similarity approximation performance against the number of model parameters on a log scale (Figure 4A) and ImageNet classification performance (Deng et al., 2009) (Figure 4B). Overall, we found a positive correlation between similarity approximation performance and the number of model parameters $(r = 0.39, p < 0.001)$ and a smaller but still significant positive correlation with performance on ImageNet $(r = 0.26, p < 0.001)$ . There were some notable exceptions with particularly high ImageNet performance but low similarity performance, such as the image transformer BEiT (Bao et al., 2021).", + "bbox": [ + 169, + 285, + 823, + 411 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Finally, we leverage both DNN-based methods and our proposed language-based methods to approximate similarity matrices that would otherwise require an unaffordable number of human similarity judgments to collect all dyadic pairs. Specifically, we approximate the two similarity matrices corresponding to all 1,000 audio clips and 1,000 video clips in our datasets using every method listed for each of those modalities in Figure 3. We provide visualizations of the resulting matrices at https://words-are-all-you-need.s3.amazon.com/index.html. We note that to exhaustively collect all dyadic pairs with five judgments per pair would normally require roughly 2.5 million human judgments for each of these matrices.", + "bbox": [ + 169, + 417, + 826, + 529 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2b2ca0e9510a55b27fde0673f2b4db373aa6bc6472c1b777de22294faaabac0f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 272, + 565, + 718, + 712 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d9c0483bcc12a6f34ad4ef34477a05e518d4f065a015bfd4d283743cf710c670.jpg", + "image_caption": [ + "Figure 4: Correlation to human similarity judgments as a function of A: number of model parameters; and B: ImageNet accuracy." + ], + "image_footnote": [], + "bbox": [ + 271, + 715, + 723, + 864 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0c182b240e2e982ca4bdfc7e32e3332c53a4dc813f606ab89cce465dc3cccef2.jpg", + "image_caption": [ + "Figure 5: Guide to collecting and estimating human similarity judgments at scale." + ], + "image_footnote": [], + "bbox": [ + 181, + 106, + 823, + 402 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 DISCUSSION AND CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 457, + 472, + 472 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we compared novel and existing methods for approximating human similarity judgments. The main contributions can be summarized as follows: 1) we provide a simple and accessible approach for approximating $O(N^2)$ human similarity judgments using $O(N)$ annotations, 2) we propose a new adaptive pipeline STEP-tag for tag mining, 3) we evaluate our approach against $600+$ domain-specific state-of-the-art DNNs, and 4) we publicly release all data comprising 206,339 human judgments.", + "bbox": [ + 169, + 489, + 826, + 560 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Based on these, we are now able to provide researchers with a best-practices guide to collecting similarity datasets. Our guide is based on two bottlenecks that researchers may face: one is the limit on the number of judgments that can be collected (e.g., due to cost) and the second is the availability of pre-trained models (i.e., either DNNs or LLMs). Our results make it clear that deep learning can provide good approximations for human similarity. In fact, when both pre-trained LLMs and DNNs are available, stacking their representations is consistently the best approach. However, even when neither type of pre-trained models are available, we suggest that classical word-frequency analysis methods still provide researchers with an efficient and competitive method for approximating human similarity. Our guide, comprehensively covering these and other cases, is laid out in Figure 5.", + "bbox": [ + 169, + 566, + 823, + 691 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "One limitation of this work is that while similarity proxies generated from our pipeline can support ML datasets, they are also at risk of baking in high-level human biases that can lead to adverse societal implications, such as amplifying race and gender gaps. Researchers should devote utmost care to what they choose to incorporate in their training objective. Another limitation of our work is the fact that we were restricted to English text data and US participants. However, we believe that our approach and proposed methods (especially STEP-tag and the word-frequency methods) pave the way for the study of cross-cultural variation of human semantic representations by providing efficient tools for crowdsourcing high-quality semantic descriptors across languages. This is particularly relevant for low-resource languages, where our tag-mining techniques can work even with the absence of pre-trained ML models (Thompson et al., 2020; Barrett, 2020). We are currently expanding our work to include more languages and diverse cultures. Taken together, our results showcase how we can leverage language to make machine representations more human-like. Moreover, it highlights the importance of combining machine learning and cognitive science approaches for mutually advancing both fields. In particular, we believe that the methodologies adopted in this work have the potential to greatly advance basic research on naturalistic representations in cognitive science.", + "bbox": [ + 169, + 698, + 826, + 907 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 102, + 356, + 118 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "This work was supported by a grant from the John Templeton Foundation to TLG, an NDSEG fellowship to TRS, and an NSERC fellowship (567554-2022) to IS.", + "bbox": [ + 171, + 133, + 823, + 162 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 183, + 287, + 199 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33:12449-12460, 2020.", + "Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, and Michael Auli. data2vec: A general framework for self-supervised learning in speech, vision and language, 2022.", + "Hangbo Bao, Li Dong, and Furu Wei. BEiT: BERT pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021.", + "H Clark Barrett. Towards a cognitive science of the human: cross-cultural approaches and their urgency. Trends in Cognitive Sciences, 24(8):620-638, 2020.", + "Federico Barrios, Federico López, Luis Argerich, and Rosa Wachenchauzer. Variations of the similarity function of textrank for automated summarization. arXiv preprint arXiv:1602.03606, 2016.", + "Joeran Beel, Bela Gipp, Stefan Langer, and Corinna Breitinger. Paper recommender systems: a literature survey. International Journal on Digital Libraries, 17(4):305-338, 2016.", + "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.", + "William Brown. Some experimental results in the correlation of mental abilities 1. British Journal of Psychology, 1904-1920, 3(3):296-322, 1910.", + "Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, et al. WavLM: Large-scale self-supervised pre-training for full stack speech processing. arXiv preprint arXiv:2110.13900, 2021.", + "Alan S Cowen and Dacher Keltner. Self-report captures 27 distinct categories of emotion bridged by continuous gradients. Proceedings of the National Academy of Sciences, 114(38):E7900-E7909, 2017.", + "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255. IEEE, 2009.", + "Philippe Esling, Adrien Bitton, et al. Generative timbre spaces: regularizing variational auto-encoders with perceptual metrics. arXiv preprint arXiv:1805.08501, 2018.", + "Haoqi Fan, Tullie Murrell, Heng Wang, Kalyan Vasudev Alwala, Yanghao Li, Yilei Li, Bo Xiong, Nikhila Ravi, Meng Li, Haichuan Yang, Jitendra Malik, Ross Girshick, Matt Feiszli, Aaron Adcock, Wan-Yen Lo, and Christoph Feichtenhofer. PyTorchVideo: A deep learning library for video understanding. In Proceedings of the 29th ACM International Conference on Multimedia, 2021. https://pytorchvideo.org/.", + "Christoph Feichtenhofer. X3d: Expanding architectures for efficient video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 203-213, 2020.", + "Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. SlowFast networks for video recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6202-6211, 2019." + ], + "bbox": [ + 171, + 207, + 826, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tianyu Gao, Xingcheng Yao, and Danqi Chen. SimCSE: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021.", + "Thomas L Griffiths and Michael L Kalish. A bayesian view of language evolution by iterated learning. In Proceedings of the Annual Meeting of the Cognitive Science Society, volume 27, 2005.", + "Peter Harrison, Raja Marjieh, Federico Adolfi, Pol van Rijn, Manuel Anglada-Tort, Ofer Tchernichovski, Pauline Larrouy-Maestri, and Nori Jacoby. Gibbs sampling with people. Advances in Neural Information Processing Systems, 33:10659-10671, 2020.", + "Martin N Hebart, Charles Y Zheng, Francisco Pereira, and Chris I Baker. Revealing the multidimensional mental representations of natural objects underlying human similarity judgements. Nature Human Behaviour, 4(11):1173-1185, 2020.", + "Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. HuBERT: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021.", + "T Jaeger and Roger Levy. Speakers optimize information density through syntactic reduction. Advances in Neural Information Processing Systems, 19, 2006.", + "Kevin G Jamieson and Robert D Nowak. Low-dimensional embedding using adaptively selected ordinal data. In 2011 49th Annual Allerton Conference on Communication, Control, and Computing (Allerton), pp. 1077-1084. IEEE, 2011.", + "Aditi Jha, Joshua Peterson, and Thomas L Griffiths. Extracting low-dimensional psychological representations from convolutional neural networks. arXiv preprint arXiv:2005.14363, 2020.", + "Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The Kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017.", + "Alexander JE Kell, Daniel LK Yamins, Erica N Shook, Sam V Norman-Haignere, and Josh H McDermott. A task-optimized neural network replicates human auditory behavior, predicts brain responses, and reveals a cortical processing hierarchy. Neuron, 98(3):630-644, 2018.", + "Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron Sarna, Yonglong Tian, Phillip Isola, Aaron Maschinot, Ce Liu, and Dilip Krishnan. Supervised contrastive learning. Advances in Neural Information Processing Systems, 33:18661-18673, 2020.", + "Simon Kirby, Hannah Cornish, and Kenny Smith. Cumulative cultural evolution in the laboratory: An experimental approach to the origins of structure in human language. Proceedings of the National Academy of Sciences, 105(31):10681-10686, 2008.", + "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision, 123(1):32-73, 2017.", + "Thomas Langlois, Haicheng Zhao, Erin Grant, Ishita Dasgupta, Tom Griffiths, and Nori Jacoby. Passive attention in artificial neural networks predicts human visual selectivity. Advances in Neural Information Processing Systems, 34, 2021.", + "Edith LM Law, Luis Von Ahn, Roger B Dannenberg, and Mike Crawford. TagATune: A game for music and sound annotation. In ISMIR, volume 3, pp. 2, 2007.", + "Kristin Lemhöfer and Mirjam Broersma. Introducing lexdale: A quick and valid lexical test for advanced learners of english. Behavior research methods, 44(2):325-343, 2012.", + "Drew Linsley, Sven Eberhardt, Tarun Sharma, Pankaj Gupta, and Thomas Serre. What are the visual features underlying human versus machine vision? In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 2706-2714, 2017." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021.", + "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. arXiv preprint arXiv:2201.03545, 2022.", + "Steven R Livingstone and Frank A Russo. The Ryerson audio-visual database of emotional speech and song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in north american english. PloS one, 13(5):e0196391, 2018.", + "Raja Marjieh, Ilia Sucholutsky, Theodore R Sumers, Nori Jacoby, and Thomas L Griffiths. Predicting human similarity judgments using large language models. arXiv preprint arXiv:2202.04728, 2022.", + "Alice E Milne, Roberta Bianco, Katarina C Poole, Sijia Zhao, Andrew J Oxenham, Alexander J Billig, and Maria Chait. An online headphone screening test based on dichotic pitch. Behavior Research Methods, 53(4):1551-1562, 2021.", + "Gregory Murphy. The big book of concepts. MIT press, 2004.", + "Zarana Parekh, Jason Baldridge, Daniel Cer, Austin Waters, and Yinfei Yang. Crisscrossed captions: Extended intramodal and intermodal semantic similarity judgments for MS-COCO. arXiv preprint arXiv:2004.15020, 2020.", + "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825–2830, 2011.", + "Joshua C Peterson, Joshua T Abbott, and Thomas L Griffiths. Evaluating (and improving) the correspondence between deep neural networks and human representations. Cognitive Science, 42 (8):2648-2669, 2018.", + "Steven T Piantadosi, Harry Tily, and Edward Gibson. Word lengths are optimized for efficient communication. Proceedings of the National Academy of Sciences, 108(9):3526-3529, 2011.", + "Mirco Ravanelli, Titouan Parcollet, Peter Plantinga, Aku Rouhe, Samuele Cornell, Loren Lugosch, Cem Subakan, Nauman Dawalatabad, Abdelwahab Heba, Jianyuan Zhong, Ju-Chieh Chou, Sung-Lin Yeh, Szu-Wei Fu, Chien-Feng Liao, Elena Rastorgueva, François Grondin, William Aris, Hwidong Na, Yan Gao, Renato De Mori, and Yoshua Bengio. SpeechBrain: A general-purpose speech toolkit, 2021. arXiv:2106.04624.", + "Brett D Roads and Bradley C Love. Enriching ImageNet with human similarity judgments and psychological embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3547-3557, 2021.", + "Lin CY Rouge. A package for automatic evaluation of summaries. In Proceedings of Workshop on Text Summarization of ACL, Spain, 2004.", + "Martin Schrimpf, Jonas Kubilius, Ha Hong, Najib J Majaj, Rishi Rajalingham, Elias B Issa, Kohitij Kar, Pouya Bashivan, Jonathan Prescott-Roy, Franziska Geiger, et al. Brain-Score: Which artificial neural network for object recognition is most brain-like? BioRxiv, pp. 407007, 2020.", + "Roger N Shepard. Multidimensional scaling, tree-fitting, and clustering. Science, 210(4468):390-398, 1980.", + "Roger N Shepard. Toward a universal law of generalization for psychological science. Science, 237 (4820):1317-1323, 1987.", + "Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI Conference on Artificial Intelligence, 2017.", + "Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pp. 6105-6114. PMLR, 2019." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Joshua B Tenenbaum and Thomas L Griffiths. Generalization, similarity, and bayesian inference. Behavioral and brain sciences, 24(4):629-640, 2001.", + "Bill Thompson, Seán G Roberts, and Gary Lupyan. Cultural influences on word meanings revealed through large-scale semantic alignment. Nature Human Behaviour, 4(10):1029-1038, 2020.", + "Amos Tversky. Features of similarity. Psychological review, 84(4):327, 1977.", + "Luis Von Ahn and Laura Dabbish. Labeling images with a computer game. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems, pp. 319-326, 2004.", + "Luis Von Ahn and Laura Dabbish. Designing games with a purpose. Communications of the ACM, 51(8):58-67, 2008.", + "Johannes Wagner, Andreas Triantafyllopoulos, Hagen Wierstorf, Maximilian Schmitt, Felix Burkhardt, Florian Eyben, and Björn W. Schuller. Dawn of the transformer era in speech emotion recognition: closing the valence gap, 2022.", + "Shu wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y. Lin, Andy T. Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, Tzu-Hsien Huang, Wei-Cheng Tseng, Kotik Lee, Da-Rong Liu, Zili Huang, Shuyan Dong, Shang-Wen Li, Shinji Watanabe, Abdelrahman Mohamed, and Hung yi Lee. SUPERB: Speech Processing Universal PERformance Benchmark. In Proc. Interspeech 2021, pp. 1194-1198, 2021. doi: 10.21437/Interspeech.2021-1775.", + "Ross Wightman. PyTorch image models. https://github.com/rwrightman/pytorch-image-models, 2019.", + "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 38-45. Association for Computational Linguistics, 2020.", + "Kevin JP Woods, Max H Siegel, James Traer, and Josh H McDermott. Headphone screening to facilitate web-based auditory experiments. Attention, Perception, & Psychophysics, 79(7): 2064-2072, 2017.", + "Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 305–321, 2018.", + "Daniel Yamins. An optimization-based approach to understanding sensory systems. The Cognitive Neurosciences, 4(V1):381, 2020.", + "Daniel LK Yamins, Ha Hong, Charles F Cadieu, Ethan A Solomon, Darren Seibert, and James J DiCarlo. Performance-optimized hierarchical models predict neural responses in higher visual cortex. Proceedings of the National Academy of Sciences, 111(23):8619-8624, 2014.", + "Yao-Yuan Yang, Moto Hira, Zhaoheng Ni, Anjali Chourdia, Artyom Astafurov, Caroline Chen, Ching-Feng Yeh, Christian Puhrsch, David Pollack, Dmitriy Genzel, Donny Greenberg, Edward Z. Yang, Jason Lian, Jay Mahadeokar, Jeff Hwang, Ji Chen, Peter Goldsborough, Prabhat Roy, Sean Narethiran, Shinji Watanabe, Soumith Chintala, Vincent Quenneville-Bélair, and Yangyang Shi. Torchaudio: Building blocks for audio and speech processing. arXiv preprint arXiv:2110.15018, 2021.", + "Noga Zaslavsky, Charles Kemp, Terry Regier, and Naftali Tishby. Efficient compression in color naming and its evolution. Proceedings of the National Academy of Sciences, 115(31):7937-7942, 2018.", + "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. Bertscore: Evaluating text generation with bert. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=SkeHuCVFDr." + ], + "bbox": [ + 171, + 102, + 826, + 917 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "SUPPLEMENTARY MATERIALS", + "text_level": 1, + "bbox": [ + 171, + 102, + 423, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A STIMULI AND DATA", + "text_level": 1, + "bbox": [ + 171, + 136, + 374, + 152 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 CODE AND DATA AVAILABILITY", + "text_level": 1, + "bbox": [ + 171, + 167, + 437, + 181 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A link is provided to the public, containing all the data collected for this project during the review stage. It includes the new human behavioral data, the computational experiments with machine learning models, and all the necessary analyses scripts for producing the results. Additionally, the repository includes the Dallinger/PsyNet source codes for reproducing the behavioral experiments. Finally, we present an interactive visualization for exploring the similarity between stimuli as experienced by humans and different methods reported in the paper.", + "bbox": [ + 169, + 194, + 826, + 279 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B BEHAVIORAL PARADIGMS", + "text_level": 1, + "bbox": [ + 171, + 299, + 428, + 314 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.1 PARTICIPANTS", + "text_level": 1, + "bbox": [ + 171, + 330, + 318, + 344 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The exact number of participants for each of the 9 new behavioral experiments is reported in Table 1.", + "bbox": [ + 171, + 356, + 826, + 372 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/856abb7e128ebd8a80b1869a0cf130fe9c4367a9765a1a91fb182a86cd9da58d.jpg", + "table_caption": [ + "Table 1: Behavioral experiment summary table." + ], + "table_footnote": [ + "Note. 'N' denotes the number of participants included in the analysis; 'LX' denotes the LexTALE English proficiency pre-screening task; 'HT' denotes the headphone test." + ], + "table_body": "
ModalityParadigmRespectTotal stimuliTrials per participantSectionNPre-screening
ImagesTagsAnimals120602.2.456LX
ImagesTagsFurniture120602.2.458LX
ImagesTagsVegetables120602.2.457LX
AudioSimilarityEmotions100852.2.2252HT
AudioCaptionsEmotions1,000502.2.3151HT, LX
AudioTagsEmotions1,000502.2.4217HT, LX
VideoSimilarityActivities100852.2.2284HT
VideoCaptionsActivities1,000502.2.3196HT, LX
VideoTagsActivities1,000502.2.4221HT, LX
", + "bbox": [ + 173, + 411, + 841, + 579 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.2 IMPLEMENTATION", + "text_level": 1, + "bbox": [ + 171, + 635, + 346, + 648 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "All behavioral experiments were implemented using the Dallinger4 and PsyNet (Harrison et al., 2020) frameworks. Dallinger is a modern tool for experiment hosting and deployment which automates the process of participant recruitment and compensation by integrating cloud-based services such as Heroku5 with online crowd-sourcing platforms such as AMT. PsyNet is a novel experiment design framework that builds on Dallinger and allows for flexible specification of experiment timelines as well as providing support for a wide array of tasks across different modalities (visual, auditory and audio-visual). Participants interact with the experiment through their web-browser, which in turn communicates with a backend Python server responsible for the experiment logic.", + "bbox": [ + 169, + 660, + 826, + 773 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.3 PRE-SCREENING", + "text_level": 1, + "bbox": [ + 171, + 790, + 333, + 804 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A common technique for filtering out participants that are likely to deliver low-quality responses, as well as automated scripts (bots), is to implement pre-screening tasks prior to the main part of", + "bbox": [ + 169, + 816, + 826, + 845 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "$^{2}$ Code and data: https://osf.io/kzbr5/?view_only=3dea58e008ce41c290ef0f374bddbf444", + "3Interactive plots: https://words-are-all-you-need.s3.amazon.com/index.html" + ], + "bbox": [ + 171, + 854, + 862, + 893 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "4https://dallinger.readthedocs.io/", + "5https://www.heroku.com/" + ], + "bbox": [ + 192, + 896, + 491, + 922 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "each experiment. Failing the pre-screening tasks results in early termination of the experiment. Nevertheless, participants are still compensated for their time regardless of whether they fail or succeed on a pre-screener to ensure fair compensation. The role of pre-screeners in our studies was to realize two main criteria for data quality, namely, a) to be able to collect high-quality text descriptors, and b) to ensure that participants are able to inspect the target stimuli properly (in particular the audio component in prosody and videos). To do this, we implemented two pre-screening tasks, an English proficiency test and a standardized headphone test (used only for audio and video experiments). Table 1 provides details on which pre-screeners were used in each of the behavioral experiments.", + "bbox": [ + 169, + 103, + 826, + 218 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "alberation", + "text_level": 1, + "bbox": [ + 375, + 250, + 609, + 287 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Does this word exist?", + "bbox": [ + 444, + 299, + 545, + 309 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "yes", + "bbox": [ + 434, + 345, + 452, + 354 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "no", + "bbox": [ + 537, + 345, + 552, + 354 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 6: Example trial from the LexTALE pre-screening task (Lemhöfer & Broersma, 2012).", + "bbox": [ + 187, + 398, + 805, + 416 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "English proficiency test. To test participants' English proficiency, we used LexTALE, a lexical decision task developed in Lemhöfer & Broersma (2012). In each trial, participants were briefly presented (1 second) with either a real English word or a made up word that does not exist. Participants were instructed to guess whether the word was real or not. A total of 12 trials (half of them being real words) were presented, and 8 of them needed to be correct for the participant to pass. The presented words were: hasty, fray, stoutly, moonlit, scornful, unkempt, sensible, kilp, plaintively, crumper, plaudate, alberation. An example trial is shown in Figure 6.", + "bbox": [ + 169, + 441, + 826, + 541 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Which sound was softest (quietest) -- 1, 2, or 3?", + "bbox": [ + 354, + 580, + 643, + 594 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1", + "2", + "3" + ], + "bbox": [ + 498, + 642, + 509, + 723 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 7: Example trial from the headphone pre-screening test (Woods et al., 2017).", + "bbox": [ + 220, + 756, + 774, + 773 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Headphone test. We used the headphone test developed by Wood et al. (Woods et al., 2017), which is used as a standard pre-screener for high-quality auditory psychophysics data-collection procedures (Milne et al., 2021). The test is designed to ensure that the participants are wearing headphones and are able to perceive subtle differences in volume. The task consists of a forced choice task, in which three consecutive tones are played, and the participant has to identify which of them is the quietest. Crucially, these tones are constructed to exhibit a phase cancellation effect when not using headphones, and therefore making it difficult for non-headphone users to identify the quietest tone. Participants had to answer 4 out of 6 trials correctly to pass this test. An example trial is shown in Figure 7.", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/57d6ed78b93d1f35c49af269482f05fbc8e7333a5e8542f8fb9d134c75450ae2.jpg", + "image_caption": [ + "How similar are the activities in following two videos? (2 / 85)", + "If it is difficult to choose between the options, don't worry, and just give what you intuitively think is the right answer.", + "Figure 8: Screenshot from the similarity judgment task over video pairs." + ], + "image_footnote": [], + "bbox": [ + 187, + 157, + 803, + 398 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.4 SIMILARITY JUDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 468, + 392, + 482 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In the present work, we collected similarity judgments across audio and video datasets. Each dataset comprised of 4,950 unique pairs corresponding to the number of unordered subsets that contain two distinct objects (i.e., excluding self-similarity), within a set of 100 stimuli. We did not collect similarity judgments over the three datasets of images, as these were provided in Peterson et al. (2018) (and used here with permission). The experiments proceeded as follows: upon completion of the consent form and the pre-screening tasks, participants received instructions regarding the main experiment:", + "bbox": [ + 169, + 494, + 826, + 592 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Audio. In this experiment we are studying how people perceive emotions. In each round you will be presented with two different recordings and your task will be to simply judge how similar are the emotions of the speakers.", + "bbox": [ + 228, + 604, + 767, + 648 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Video. In this experiment we are studying how people perceive activities. In each round you will be presented with two different videos and your task will be to simply judge how similar are the activities in them.", + "bbox": [ + 228, + 660, + 766, + 704 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The instructions then continued as follows:", + "bbox": [ + 171, + 715, + 455, + 729 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "You will have seven response options, ranging from 0 ('Completely Dissimilar') to 6 ('Completely Similar'). Choose the one you think is most appropriate. Note: no prior expertise is required to complete this task, just choose what you intuitively think is the right answer.", + "bbox": [ + 228, + 743, + 766, + 800 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The quality of your responses will be automatically monitored, and you will receive a bonus at the end of the experiment in proportion to your quality score. The best way to achieve a high score is to concentrate and give each round your best attempt.", + "bbox": [ + 228, + 811, + 766, + 869 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The experiment will begin now. You will take up to 85 rounds where you have to answer this question. Remember to pay careful attention in order to get the best bonus!", + "bbox": [ + 228, + 881, + 766, + 922 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As described in the instructions, in each trial, participants rated the similarity between a pair of sounds (how similar are the emotions of the two speakers?) or videos (how similar are the activities in the following two videos?) on a scale ranging from 0 (completely dissimilar) to 6 (completely similar) (Figure 8). Overall, participants completed 85 trials on a random subset of the possible pairs. To further motivate participants to provide good responses, we gave them an additional performance bonus for providing consistent data. Among the 85 trials, 5 trials were repeated for consistency checking. The responses were converted into a performance score by computing the Spearman correlation between the original and repeat ratings. Perfect scores resulted in a 10 cent bonus.", + "bbox": [ + 169, + 103, + 826, + 217 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.5 CAPTIONS", + "text_level": 1, + "bbox": [ + 171, + 231, + 292, + 244 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We collected free-text captions for the video and audio datasets. Captions for the image datasets were previously collected in Marjieh et al. (2022) and used here with permission. After completing the consent form and pre-screening tests, participants received the following instructions:", + "bbox": [ + 169, + 257, + 823, + 301 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Audio. In this experiment we are studying how people describe emotions. You will be presented with different recordings of speakers and your task will be to describe their emotions. In doing so, please keep in mind the following instructions", + "bbox": [ + 228, + 310, + 767, + 354 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Describe all the important aspects of the recording.", + "bbox": [ + 248, + 356, + 599, + 371 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Video. In this experiment we are studying how people describe activities in videos. You will be presented with different videos of activities and your task will be to describe their content. In doing so, please keep in mind the following instructions", + "bbox": [ + 228, + 381, + 767, + 424 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Describe all the important activities in the video.", + "bbox": [ + 248, + 426, + 584, + 441 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "As well as the following guidelines adapted from Marjieh et al. (2022):", + "bbox": [ + 169, + 452, + 640, + 468 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Do not start the sentences with \"There is\" or \"There are\".", + "- Do not describe unimportant details.", + "- You are not allowed to copy and paste descriptions.", + "- Descriptions should contain at least 5 words.", + "- Descriptions should contain at least 4 unique words." + ], + "bbox": [ + 248, + 478, + 638, + 554 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Note: No prior expertise is required to complete this task, just describe what you intuitively think is important as accurately as possible.", + "bbox": [ + 228, + 556, + 766, + 585 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The quality of your captions will be monitored automatically and providing low quality and repetitive responses could result in early termination of the experiment and hence a lower bonus.", + "bbox": [ + 228, + 597, + 766, + 640 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You will describe up to 50 recordings.", + "bbox": [ + 230, + 652, + 482, + 667 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "These guidelines were enforced to ensure that participants deliver sufficiently informative captions that are not repetitive. In each trial of the main experiment, participants described a single audio (please describe the emotions of the speaker) or video stimulus (please describe the activity in the video). Overall, participants described up to 50 randomly presented stimuli. To filter out bad participants that tend to deliver repeated responses, in each trial (excluding the first 4 trials) we computed the mean edit distance between their current response and all previous responses that they previously provided using the partial_ratio function in the fuzzz6 Python package for fuzzy string matching. This function returns for a pair of input strings a matching score between 0 and 100 (100 being identical strings). Early termination was enforced if the mean response matching score was above 80. The idea here was to prevent participants from copying and pasting the same response over and over again (or varying it only slightly).", + "bbox": [ + 169, + 678, + 826, + 832 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B.6 TAGS", + "text_level": 1, + "bbox": [ + 171, + 847, + 256, + 861 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For the image, audio, and video datasets, we collected tag data, i.e., concise labels that describe the salient features of a stimulus. To do so, we developed a novel tag mining paradigm called STEP-Tag in", + "bbox": [ + 169, + 873, + 823, + 902 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_footnote", + "text": "$^{6}$ https://github.com/seatgeek/thefuzz", + "bbox": [ + 189, + 909, + 509, + 922 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Mark the existing tags", + "text_level": 1, + "bbox": [ + 186, + 125, + 361, + 140 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "picking", + "text_level": 1, + "bbox": [ + 191, + 148, + 292, + 172 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/442e688eade67c51016a7c0f2e60198b4abd7f0a403184413a6ae1f5a5feeaaf.jpg", + "image_caption": [ + "Figure 9: Screenshot of an example tag mining task for videos. The tag \"picking\" received 5 stars (very relevant), whereas the tag \"apple\" is flagged (marked as irrelevant)." + ], + "image_footnote": [], + "bbox": [ + 191, + 176, + 294, + 189 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "apple", + "text_level": 1, + "bbox": [ + 308, + 150, + 387, + 172 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/305054b44560047950dbbc8dd653c442dfab18e29527dfb6f15bf0605a4ca4b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 176, + 410, + 189 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/63c64730495dff6b765e2e2b2cd0fd66867d64fdc3ded9a61bc60dc6d25aa0be.jpg", + "image_caption": [ + "Play again" + ], + "image_footnote": [], + "bbox": [ + 594, + 109, + 818, + 239 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Are any tags missing?", + "text_level": 1, + "bbox": [ + 186, + 220, + 339, + 234 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Type in words describing the activity in the video, that are missing above. You can either select tags from a dropdown list or create entirely new ones. Submit your response for a new tag by pressing the enter key. You can add more than one tag.", + "bbox": [ + 196, + 243, + 531, + 287 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "peach", + "bbox": [ + 200, + 306, + 230, + 318 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Type more tags", + "bbox": [ + 248, + 306, + 313, + 316 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Next", + "bbox": [ + 509, + 335, + 537, + 345 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/bab50fb7d54ac2fc9327b528782562d43cbef44ca56bffdadb1d9670980212a9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset (# of stimuli)meanstdtotal
Vegetables (120)3.21.1385
Furniture (120)5.21.7627
Animals (120)8.22.7988
Audio-emotions (1000)9.13.59092
Video-activities (1000)8.52.98482
", + "bbox": [ + 334, + 415, + 663, + 503 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 2: Mean, standard deviation, and total number of tags collected for each dataset.", + "bbox": [ + 212, + 512, + 781, + 527 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "which each stimulus was treated as a separate \"chain\" (see Figure 2 in the paper). When the stimulus was presented for the first time, the participant was asked to provide at least one tag. For the following iterations, we sequenced participants so that each of them had to rate the tags provided by participants from the previous iterations within the same chain. The rating was either choosing between one (not very relevant) to five stars (very relevant), or marking the tag as completely irrelevant by using the flag icon (see Figure 9). Participants could optionally introduce new tags that will subsequently be presented to other participants assigned to the same chain. Participants could only provide tags that were not already present, and they had to be in lower-case letters. To discourage frequent use of long word combinations, a pop-up window appeared if participants used two or more white spaces (i.e., three or more words) to warn that long combinations should only be used when completely necessary. This process continued for at least 10 iterations, after which we checked at each consequent iteration whether the chain was \"full\". We considered a chain to be full if its latest iteration had at least 2 tags that were rated at least 3 times and had a mean rating of 3 stars. If a chain was not full after 20 iterations, we stopped collecting further iterations. Since each experimental batch lasted for a fixed duration of less than one day, in some cases we did not complete all chains, and a few chains had fewer iterations (3 for vegetables, 6 for animals and 2 for furniture, out of 120 chains each). Our experiment incentivized participants to provide new tags by paying them a performance bonus of 0.01 USD for every up-vote (i.e., not flagged) given by other participants. On the contrary, if two or more tags of the same participant were flagged by others, the participant was excluded (the participant received a warning after the first flag). We provide summary statistics on the number of collected tags in Table 2.", + "bbox": [ + 169, + 554, + 826, + 847 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "After accepting the consent form and passing the pre-screening tasks, participants received introductory instructions regarding the main experiment:", + "bbox": [ + 169, + 852, + 828, + 882 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Images. Rate & Tag animals/furniture/vegetables! Thanks for participating in this game! In this game you will:", + "bbox": [ + 228, + 895, + 767, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Watch images of animals/furniture/vegetables.", + "- Rate tags that other players have given.", + "- Add new tags that you think are missing." + ], + "bbox": [ + 250, + 103, + 568, + 152 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Audio. Rate & Tag emotions! Thanks for participating in this game! In this game you will:", + "bbox": [ + 228, + 167, + 766, + 196 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Listen to a speech fragment and focus on the emotional content of the recording.", + "- Rate tags that other players have given.", + "- Add new tags that you think are missing." + ], + "bbox": [ + 248, + 202, + 766, + 265 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Video. Rate & Tag activities! In this game you will:", + "bbox": [ + 230, + 279, + 575, + 294 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Watch a video and focus on the activities happening.", + "- Rate tags that other players have given.", + "- Add new tags that you think are missing." + ], + "bbox": [ + 250, + 300, + 609, + 349 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Participants then received further instructions regarding the rules of the game", + "bbox": [ + 171, + 364, + 679, + 378 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Images. After watching the animal/furniture/vegetable you will see tags given by other players that describe the animal/furniture/vegetable. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person seeing this animal/furniture/vegetable, you may see no previous tags. You can also add your own tag that is relevant to describe the animal/furniture/vegetable. Your tag will then be rated by other players who are playing the game simultaneously.", + "bbox": [ + 228, + 393, + 766, + 505 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Audio. After listening to the recording, you will see tags given by other players that describe the emotions in the speech fragment. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person listening to this speech sample, you may see no previous tags. You can also add your own tag that is relevant to describe the emotions in the speech fragment. Your tag will then be rated by other players who are playing the game simultaneously.", + "bbox": [ + 228, + 520, + 766, + 631 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Video. After watching the video, you will see tags given by other players that describe the activities in the video. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person watching this video, you may see no previous tags. You can also add your own tag that is relevant to describe the activities in the video. Your tag will then be rated by other players who are playing the game simultaneously.", + "bbox": [ + 228, + 645, + 766, + 757 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Finally, participants received the following guidelines regarding the tag input and the bonus scheme:", + "bbox": [ + 171, + 771, + 823, + 786 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Keep tags short. A word like \"green grass\" should rather be submitted as \"green\" and \"grass\", whereas a compound word such as \"red wine\" cannot be separated, since \"red wine\" means something different than just \"red\" and \"wine\".", + "bbox": [ + 228, + 800, + 766, + 843 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Bonus rules.", + "text_level": 1, + "bbox": [ + 230, + 857, + 320, + 871 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- If the tag you provide gets rated as a relevant tag (i.e., not flagged) by other players", + "- If your tag is unique and have not been introduced by others" + ], + "bbox": [ + 248, + 878, + 764, + 922 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/c834513de26c89851221182a5b0ca2ac5873298ec9e854c14575b280143dd2a5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModalitySTEPCaptions
Audio230187
Video264291
", + "bbox": [ + 383, + 101, + 614, + 160 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 3: Median of overall participants' time spent per stimulus (in seconds).", + "bbox": [ + 243, + 169, + 751, + 185 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Note: Simply writing many and irrelevant tags is not a good idea because other players might flag your tag. Your experiment will terminate early if there are too many red flags!", + "bbox": [ + 228, + 210, + 767, + 253 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Please try to use a variety of words to describe the animal / furniture / vegetable / emotion in the speech fragment / activities in the video, and use the entire star rating scale for your responses.", + "bbox": [ + 228, + 268, + 767, + 311 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B.6.1 VALIDATING STEP-TAG", + "text_level": 1, + "bbox": [ + 171, + 325, + 401, + 340 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We conducted a small, exploratory ablation study to validate STEP-Tag as a procedure for collecting diverse, accurate, and informative tags. First, we compared using multiple tags from the last iteration of STEP-Tag to using just a single randomly-selected highly-rated tag from the last iteration. We found that using a single tag greatly decreased correlation with human similarity (i.e., for the video dataset, the best-performing method on multiple tags had a correlation of $r = 0.74$ while the best-performing method on single labels had a correlation of $r = 0.35$ ). Second, we compared tags from the first iteration of STEP-Tag (equivalent to collecting tags without an adaptive procedure) to tags from the last iteration. We found that using first iteration tags greatly decreased correlation with human similarity (i.e., for the video dataset, the 'Tags CNNB mean (no split)' method, the correlation from the last iteration was $r = 0.74$ and from the first iteration it was $r = 0.44$ ; for 'Tags overlap' it was $r = 0.56$ from the last iteration and $r = 0.38$ from the first iteration). Finally, we extracted the Kinetics-200 labels for each video to compare the tags from STEP-Tag against the kinds of labels typically collected for machine learning datasets. We found that using labels decreased the correlation with human similarity (i.e., the best-performing method on pipeline tags had a correlation of $r = 0.74$ while the best-performing method on dataset labels had a correlation of $r = 0.64$ ).", + "bbox": [ + 169, + 351, + 826, + 561 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B.7 DURATION OF STEP-TAG AND CAPTIONS", + "text_level": 1, + "bbox": [ + 171, + 577, + 504, + 590 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To compare STEP-tag and captions, we computed the median of overall participants' time spent per stimulus (see Table 3). The times were only collected for the audio and video modality (captions for the image datasets were already collected by Marjieh et al. (2022)). We see that both methods consume roughly similar amounts of time, which is desirable as our analysis suggests that in some domains (e.g., video) tags yield the best results whereas in others (e.g., audio) captions do.", + "bbox": [ + 169, + 603, + 826, + 676 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C PREDICTION METHODS", + "text_level": 1, + "bbox": [ + 171, + 694, + 401, + 709 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We used two main types of methods to predict human similarity judgments. The first class (\"DNN-based methods\", described in section C.1) make use of pre-trained embedding models. In the second class of models (\"Word Frequency Analysis methods\", described in the section C.2) simple feature extraction techniques are used instead of pre-trained deep learning models. Figure 1 depicts schematic overview of all prediction methods that we used.", + "bbox": [ + 169, + 726, + 826, + 797 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.1 DNN-BASED METHODS", + "text_level": 1, + "bbox": [ + 171, + 814, + 382, + 828 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The DNN-based methods use various embeddings and deep learning representations to predict human similarity judgments. These methods could be further split into three groups based on the kinds of input data they process, namely if they use a single sensory modality that is either image, audio or video (\"unimodal models\"; see subsection C.1.1), or use text that is either tag or captions (\"text embeddings\"; see subsection C.1.2), or use both (\"multimodal models\"). In addition, we also tested the performance of \"stacked\" representations, where the sensory and textual embedding of a select", + "bbox": [ + 169, + 840, + 826, + 926 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "number of models were concatenated into a single long embedding. Overall, the computation time of embedding methods took about two weeks on an x1.16xlarge Amazon Web Services instance with 64 vCPUs and 976 GiB of memory.", + "bbox": [ + 169, + 103, + 826, + 148 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.1.1 UNIMODAL DNN-BASED METHODS", + "text_level": 1, + "bbox": [ + 171, + 166, + 478, + 181 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/6f907053e36ebb00a0b4f8c4bb80870c51050c3a9fdb06abcd35bc2d3478a015.jpg", + "table_caption": [ + "Table 4: All 30 image baseline models occurring in the top 50 best models reported in Figure 3A." + ], + "table_footnote": [ + "Note. Performance accuracy on ImageNet was based on Wightman (2019) and was not available for all models." + ], + "table_body": "
Model nameAverage scoreSD scoreTop 1 accuracyNumber of parameters (M)
1Swin0.660.0681.5223.37
2ConvNeXT0.640.07N/A348.15
3NF-ResNet0.620.0480.6523.51
4NFNet l00.610.0882.7532.77
5ResNetV20.600.11N/A928.34
6NF-RegNet0.590.0579.299.26
7VGG160.580.1173.35134.27
8VGG190.580.1174.21139.58
9ViT0.580.1275.956.16
10ResMLP0.570.0783.59128.37
11Twins-SVT0.570.0681.6823.55
12Twins-PCPVT0.570.0481.0923.59
13VGG130.570.1171.59128.96
14CaiT0.570.0482.1917.18
15VGG110.570.1070.36128.77
16gMLP0.560.0679.6419.17
17PIT0.560.0378.1910.23
18DeiT0.560.0372.175.52
19ConViT0.560.0373.115.52
20TNT0.560.0381.5223.37
21CoaT0.550.0478.435.35
22gMixer0.550.0578.0424.34
23XCiT0.550.0482.5711.92
24IG ResNeXt0.530.1385.44826.36
25Visformer0.520.0282.1139.45
26RepVGG0.520.1180.2181.26
27CLIP image0.500.11N/A102.01
28JXNesT0.500.0781.4216.67
29ECAResNet0.470.1480.4528.11
30DenseNet0.470.1274.746.95
", + "bbox": [ + 225, + 227, + 769, + 686 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Image models We used 560 pre-trained models from the Pytorch Image Models (timm) repository (Wightman, 2019). We chose this repository as it contains an extensive and highly diverse set of pre-trained models in terms of architecture backbones, model sizes, and training sets. The repository includes models published from 2014 to 2022 that use various training sets (such as ImageNet1k, ImageNet21k, Instagram, etc.), training procedures objectives (e.g., pre-training, fine-tuning, self-supervision, weak supervision, etc.) and architectures (e.g., VGG, ResNet, Inception, Transformer, etc.). The repository also reports various evaluation metrics for each model (e.g., their ImageNet performance).", + "bbox": [ + 169, + 750, + 826, + 861 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For each model, we computed the embedding from the last layer (typically before the final softmax layer; see below and Figure 10 for a preliminary analysis for the effect of layer depth in audio models). We then computed the cosine similarity between pairs of embedding vectors to produce a similarity matrix. The entire list of the performance of all models is detailed in the OSF repository", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/945b41b903c72933eacab4066b51f19c2d63f10c7678cac69daf4dd5aebd1480.jpg", + "image_caption": [ + "Figure 10: Scores for individual layers of audio models scaled to the total number of layers. Models are colored by their meta architecture." + ], + "image_footnote": [], + "bbox": [ + 181, + 99, + 808, + 349 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "associated with this paper7. Table 4 presents additional details for the top 42 image baseline models in Figure 3A including their average score (correlation to human judgments) across the three image datasets, the standard deviation (SD) of this score (across datasets, repeated runs and available model parameters in Wightman (2019)), their ImageNet accuracy, and their number of trainable parameters.", + "bbox": [ + 169, + 422, + 826, + 481 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Figure 4A shows the correlation to human similarity as a function of the number of parameters for all 569 models. In general, we found that models that have more parameters perform better (Figure 4A). Plotting all the embedding technique correlations against the number of training parameters of their respective models showed statistically significant positive correlation $(r = 0.39, p < 0.001)$ . However, one possible explanation for this could be the improved performance of newer models, which typically have more parameters, on various computer vision tasks. To test this, we computed the performance (i.e., correlation with human similarity) of the various models as a function of their accuracy on ImageNet (Deng et al., 2009) - which was provided in Wightman (2019) for all models except for CLIP (whose implementation came from a different repository) as summarized in Figure 4B. We found a positive correlation between the two metrics $(r = 0.26, p < 0.001)$ , though with some clear exceptions. For example, the vision transformer BEiT (Bao et al., 2021) and the convolutional architecture EfficientNet (Tan & Le, 2019) achieved high accuracy on ImageNet but performed poorly on human data. On the other hand, the vision transformer Swin (Liu et al., 2021) and the convolutional architecture ConvNext (Liu et al., 2022) both performed well on ImageNet and human similarity. This suggests that architecture and number of parameters are better predictors of similarity judgments than performance on ImageNet. Further analysis is required to determine what kind of architectural components actually contribute to more human-like performance (Langlois et al., 2021).", + "bbox": [ + 169, + 484, + 826, + 736 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Audio models We used all pre-trained wav2vec 2.0 (Baevski et al., 2020) and HuBERT (Hsu et al., 2021) models available in torchaudio (Yang et al., 2021). We also extracted embeddings from WavLM (Chen et al., 2021) and data2vec audio models (Baevski et al., 2022). Furthermore, we used additional wav2vec 2.0 and HuBERT models that were either specialized on emotion recognition or speaker identification (wen Yang et al., 2021; Wagner et al., 2022; Ravanelli et al., 2021). The performance of HuBERT, wav2vec 2.0, and WavLM models is shown in Figure 3B. Additional details about the models are displayed in Table 5.", + "bbox": [ + 169, + 751, + 826, + 851 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In addition, we explored the correlation between the audio models and human similarity data as a function of the layer in the model. Earlier literature has suggested that similarity to human representations may depend on the layer of the model (Kell et al., 2018; Yamins et al., 2014; Yamins,", + "bbox": [ + 169, + 854, + 826, + 900 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_footnote", + "text": "$^{7}$ https://osf.io/kzbr5/?view_only=3dea58e008ce41c290ef0f374bddbf444", + "bbox": [ + 189, + 909, + 764, + 922 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/0e62a0858d91fc0202ea64c360aefb7dae372e76a7354810f44d0a7af623935e.jpg", + "table_caption": [ + "Table 5: All audio baseline models used in the analysis." + ], + "table_footnote": [], + "table_body": "
Model nameEmotion correlationNumber of parameters (M)
1wav2vec 2.0 lv60k (100h)0.49317
2wav2vec 2.0 lv60k (960h)0.49317
3wav2vec 2.0 lv60k0.51317
4wav2vec 2.0 lv60k (10m)0.51317
5HuBERT xlarge ASR0.451000
6HuBERT xlarge0.461000
7HuBERT large ASR0.46300
8wav2vec 2.0 large XLSR530.47317
9HuBERT large0.46300
10wav2vec 2.0 (Audeering, emotion)0.49317
11HuBERT base0.4190
12WavLM large0.46316.62
13HuBERT base (superb, emotion)0.4290
14HuBERT base (superb, speaker)0.4290
15WavLM base+0.4194.70
16wav2vec 2.0 base (960h)0.3895
17WavLM base0.3994.70
18wav2vec 2.0 base0.3495
19wav2vec 2.0 base (10m)0.3495
20wav2vec 2.0 base (superb, emotion)0.3495
21wav2vec 2.0 base (superb, speaker)0.3495
22wav2vec 2.0 base (100h)0.3295
23HuBERT large (superb, emotion)0.29300
24HuBERT large (superb, speaker)0.29300
25wav2vec 2.0 large (100h)0.32317
26wav2vec 2.0 large (superb, emotion)0.31317
27wav2vec 2.0 large (superb, speaker)0.31317
28wav2vec 2.0 large (960h)0.31317
29wav2vec 2.0 large (10m)0.31317
30data2vec audio large (960h)0.31313.28
31data2vec audio base (100h)0.23313.28
32data2vec audio large (100h)0.23313.28
33data2vec audio large (10m)0.21313.28
34wav2vec 2.0 (SpeechBrain, emotion)0.1195
35data2vec audio base (960h)0.1693.16
36data2vec audio base (10m)0.1593.16
", + "bbox": [ + 243, + 126, + 754, + 670 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "2020). We expected that the layers closer to the input of the model (where the representation is more low-level) to be less predictive. In general, we found that this was the case (Figure 10). In some variants of wav2vec, however, intermediate representations performed better, possibly due to the misalignment of the training task of wav2vec with the emotion task. This analysis confirms the choice we made in the paper to mostly use the last two layers of the models. Preliminary analysis of the image and video models also explored different layers, but the results were similar to those we presented in audio, and are therefore not reported here.", + "bbox": [ + 169, + 696, + 823, + 792 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Video models We extracted embeddings from the 'Slow' (a 3D ResNet; see Feichtenhofer et al. (2019)), Slowfast (a 2-path model with one path capturing semantics and the other capturing fine details; see Feichtenhofer et al. (2019)), and X3d (a model that initially starts as a simple 2D image classifier but is expanded in several axes; see Feichtenhofer (2020)) architectures implemented in pytorchvideo (Fan et al., 2021). All video models were pre-trained on the Kinetics-400 dataset (Kay et al., 2017). The performance of the models is displayed in Figure 3C. Numeric correlation values are detailed in Table 6 along with model accuracy (Top1 and Top5) on Kinetics-400, and the number of parameters in each model. The accuracies and parameter counts are listed as reported in", + "bbox": [ + 169, + 811, + 823, + 924 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Fan et al. (2021). As with previous modalities, the number of parameters appears to be positively correlated with correlation to human similarity.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/e0af87b13737d1fdb5751ee23688ea68c677bf3f0accc0bf1ca6d8f515f5d2bb.jpg", + "table_caption": [ + "Table 6: All video baseline models used in the analysis." + ], + "table_footnote": [], + "table_body": "
Model nameCorrelationKinetics-400 Top1 AccKinetics-400 Top5 AccNumber of parameters (M)
1Slowfast r500.6576.9492.6934.57
2Slowfast r1010.6477.9093.2762.83
3Slow r500.6174.5891.6332.45
4X3d M0.5375.9492.723.79
5X3d S0.4973.3391.273.79
6X3d XS0.4869.1288.633.79
", + "bbox": [ + 217, + 169, + 781, + 296 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.1.2 TEXT EMBEDDING METHODS", + "text_level": 1, + "bbox": [ + 171, + 316, + 433, + 330 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Caption text embedding. Since there are multiple captions per stimulus, an aggregation procedure had to be applied to produce a single embedding vector for each stimulus. In our main analysis, for each stimulus, we extracted the embedding for each associated caption and averaged these embeddings together before computing cosine similarity between the mean embeddings. We also tried an alternative approach of concatenating the captions together into a single paragraph, which we then passed through the LLMs to compute a single embedding per stimulus. We found that this did not consistently improve performance and in many cases even decreased it, though we note that we did not experiment with different permutations of the concatenated captions, nor did we extensively study other ways to combine them together. Future work could explore other techniques for pre-processing captions and aggregating representations from multiple captions in ways that would improve correlation with human similarity judgments.", + "bbox": [ + 169, + 340, + 826, + 493 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Tag text embedding. We experimented with several algorithms for computing similarity between sets (or multi-sets) of tags. The algorithms described in this section all involve using ConceptNet NumberBatch (CNNB) (Speer et al., 2017) as the embedding backbone for turning discrete tags into continuous vector representations. For each stimulus, we took the tags remaining in the final iteration, and tested whether they were found in the dictionary for our embedding model. If a tag was not found and if it contained no spaces, we tried to correct the spelling before trying to look it up in the dictionary again. If a tag contained spaces, we split it into individual words, correct their spelling, and averaged together the embedded representations of those words that were found in the dictionary. Tags that were not found even after spelling correction and splitting were excluded from the set and did not contribute to the final representation. For the methods marked ‘(no split)’ we did not split multi-word tags, instead we just excluded multi-word tags that were not found in the embedding model dictionary. In the following, we describe the different techniques used to generate predictions based on tag embeddings.", + "bbox": [ + 169, + 500, + 828, + 681 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Tags CNNB overlap. For each pair of stimuli, we counted the number of 'almost identical' tag embeddings, defined as every respective element of the two embeddings being less than a certain threshold apart (in our case, this threshold was 0.1). We then set similarity for that pair of stimuli to be this count, i.e., the number of 'almost identical' tags, normalized by the total number of tags across the respective two sets.", + "bbox": [ + 169, + 686, + 825, + 758 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Tags CNNB quantized. This method involves quantizing tags using cosine similarity to find the number of unique tags. For each pair of stimuli, we counted the number of tags assigned to the first stimulus that had cosine similarity greater than a certain threshold (in our case, this threshold was 0.7) to at least one tag of the second stimulus (call this value $N_A$ ) and vice-versa ( $N_B$ ). The minimum of these two values is the number of unique, shared tags between the two sets ( $\\min(N_A, N_B)$ ). The total number of unique tags across the two sets is then the total number of tags in each set ( $T_A + T_B$ ) minus the maximum number of shared tags ( $\\max(N_A, N_B)$ ). We compute similarity as the ratio of the number of unique, shared tags to the total number of unique tags, $S_{AB} = \\frac{\\min(N_A, N_B)}{T_A + T_B - \\max(N_A, N_B)}$ . For example, suppose the two sets of tags are $A: \\{a, b, c, g\\}$ and $B: \\{a, b, d, e\\}$ , so $T_A = T_B = 4$ , and that $a, c$ have cosine similarity of 0.8. The number of tags from set A found in set B is $N_A = 3$ , and those from B found in A is $N_B = 2$ . The number of unique, shared tags is $\\min(N_A, N_B) = 2$ (since", + "bbox": [ + 169, + 763, + 828, + 925 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "$\\{a, b, c\\}$ can be represented by $\\{a, b\\}$ , and the total number of unique tags is $4 + 4 - 3 = 5$ (since $\\{a, b, c, g, a, b, d, e\\}$ can be represented by $\\{a, b, d, e, g\\}$ ). The assigned similarity is then $S_{AB} = \\frac{2}{5}$ .", + "bbox": [ + 171, + 103, + 825, + 133 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Tags CNNB mean. The set of tag embeddings for each stimulus were averaged together to form a single embedding assigned to the respective stimulus. We then computed cosine similarity on the embeddings of each pair of stimuli.", + "bbox": [ + 169, + 138, + 823, + 181 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Tags CNNB mean (no split). Same as above, but without splitting multi-word tags (i.e., ones that contain spaces) during the embedding process.", + "bbox": [ + 169, + 186, + 823, + 217 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "All spelling corrections in the algorithms listed above were performed using the Python package `pyspellchecker^8`, taking the top corrected recommendation returned by the spell checker in each case.", + "bbox": [ + 169, + 223, + 823, + 265 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Tags to caption Roberta (SimCSE). Additionally, for the images datasets, we experimented with converting sets of tags into captions and then using those captions with our best-performing LLM ('sup-simcse-roberta-large') the same way we do with user-generated captions. To convert a set of tags into a caption, we joined the set of tags with commas and pretended them with the phrase \"This is an image of\".", + "bbox": [ + 169, + 272, + 825, + 342 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "C.1.3 OTHER DNN-BASED METHODS", + "text_level": 1, + "bbox": [ + 171, + 358, + 452, + 372 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "For the image datasets, we also considered several other methods that made use of DNNs but do not fit into the categories described above.", + "bbox": [ + 169, + 382, + 823, + 411 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "GPT3 prompting We experimented with prompting GPT3 (Brown et al., 2020), a large pre-trained language model, to directly output similarity judgments as a text-completion problem rather than having to access model embeddings as we did above. We used a few-shot prompting approach where in each prompt we included three context examples of pairs of tag sets and their associated similarity rating. We then provided the pair of tag sets for the two images that we wanted to get a similarity rating for but left the rating empty for the model to fill in.", + "bbox": [ + 169, + 426, + 823, + 510 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Here is an example prompt with the GPT3 response bolded and in square brackets:", + "bbox": [ + 171, + 517, + 715, + 532 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/7f2f031fb1676aaa68caedf8d18d388be0ba557c6db67d2c736e26ffbca707c5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
People described pairs of images using words.
How similar are the two images in each pair on a scale of 0-1 where 0 is completely dissimilar and 1 is completely similar?
Here are the descriptions of image one: tortoise, slow, protected, shell, turtle, scaly, old, cold-blooded
Here are the descriptions of image two: monkey, ape, mammal, black and white, hairy, agile, primate, smart, tree-dwelling
Rating: 0.05
Here are the descriptions of image one: rhinoceros, horn, gray, standing, heavy body, endangered, wild, africa, african
Here are the descriptions of image two: tiger, open mouth, stripes, feline, predator
Rating: 0.27
Here are the descriptions of image one: goat, eye, leg
Here are the descriptions of image two: mammal, wide-nosed, mandrill, primate, baboon, smart
Rating: 0.19
Here are the descriptions of image one: black, primate, mammal, hairy, chimpanzee, africa, african, great ape, smart, omnivore
Here are the descriptions of image two: zebra, striped, two-toned, wild, staring, mammal, equine, herd animal, africa
Rating: [0.14]
", + "bbox": [ + 173, + 540, + 834, + 864 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "We repeated this four times for each pair of images in each image dataset with a different set of context examples during each repetition and averaged together the GPT responses to get a final", + "bbox": [ + 169, + 869, + 823, + 898 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_footnote", + "text": "8https://pyspellchecker.readthedocs.io/en/latest/", + "bbox": [ + 189, + 909, + 624, + 922 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "similarity prediction for each pair. In total, creating the context examples required having access to human similarity judgments over only 12 pairs of images. We found that this approach yielded surprisingly good predictions, with an average correlation of $r = 0.62$ across the image datasets. We believe this approach merits future investigation to determine whether prompt engineering can further increase the performance.", + "bbox": [ + 169, + 103, + 823, + 174 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Image captioning models We experimented with using pre-trained image captioning models to generate captions for our images and then using those captions with our best-performing LLM ('sup-simcse-roberta-large') the same way we do with user-generated captions. We used three pre-trained image captioning models from HuggingFace ('flamingo-mini', 'vilt-b32-finetuned-vqa', and 'vit-gpt2-image-captioning') to generate text descriptions for our images. However, the performance was quite poor with an average of $r = 0.29$ across the three models. As a result, $O(N)$ language-based methods cannot easily be reduced to $O(1)$ even when domain-relevant pre-trained caption models are available.", + "bbox": [ + 169, + 191, + 826, + 303 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "C.2 WORD FREQUENCY ANALYSIS METHODS", + "text_level": 1, + "bbox": [ + 171, + 321, + 503, + 335 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "In this work, we also conducted an additional evaluation of prediction models beyond embedding-based techniques (described in the previous section). Specifically, we compared the predictions of embedding-based models, which utilize deep learning representations, with those of traditional methods of text mining.", + "bbox": [ + 169, + 348, + 823, + 405 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Before the word frequency analysis, we performed the following initial pre-processing steps", + "bbox": [ + 171, + 411, + 777, + 426 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For caption data, we concatenated all the captions describing the same stimulus into a single long \"document.\"", + "- For tag data, we wanted to prioritize tags that appeared earlier in the tag-mining chains and were rated higher. To that end, we gathered all tags from all iterations and duplicated tags from a given iteration based on the ratings they received. For example, if the tag \"tomato\" received three stars, then we would add the repeated tokens \"tomato, tomato, tomato\" to the aggregated list (\"document\"). In a given iteration, flagged tags are removed, but if they are rated later, then they are included. The total number of repetitions per token is equal to the sum of all the stars they received in all iterations. As a result, each token is repeated multiple times, which we take into consideration in consequent analysis." + ], + "bbox": [ + 215, + 438, + 826, + 585 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "For the next steps, we used the Matlab text analytics toolbox $^{9}$ . Unless otherwise specified, we used default parameters for all functions. To generate similarity matrices, we applied the following methods:", + "bbox": [ + 169, + 595, + 823, + 638 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Co-occurrence method. In this approach, we simply counted the number of repeated pairs of words in documents $i$ and $j$ and normalized by the total number of pairs. Formally, we use $w_{i}$ to denote the word list of a document $i$ . Let $w_{i,k}$ be the $k$ -th word in the $w_{i}$ list of words, and let $|w_{i}|$ denote the length of the list. We denote by $\\delta(c,d)$ the indicator function that returns 1 if and only if the word $c$ is identical to the word $d$ , and 0 otherwise. We computed the co-occurrence score $S(w_{i},w_{j})$ according to the following formula:", + "bbox": [ + 169, + 646, + 823, + 729 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\nS (w _ {i}, w _ {j}) = \\frac {\\sum_ {k} \\sum_ {l} \\delta (w _ {i , k} , w _ {j , l})}{| w _ {i} | | w _ {j} |}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 729, + 612, + 765 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We suggest using this method only with tags and not with captions.", + "bbox": [ + 171, + 768, + 612, + 785 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Co-occurrence-rep. This method was applied only to tags. We used an identical procedure to the Co-occurrence method, except that we did not separate the words within a tag as separate tokens and instead treated the entire tag (that may include multiple words) as a single token.", + "bbox": [ + 169, + 790, + 823, + 834 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Rouge score. In this approach, similarity was estimated by computing the rouge score of the word lists associated with each pair of documents. The Rouge score was computed using rougeEvaluationScore (Rouge, 2004). We suggest using this method only with tags and not with captions.", + "bbox": [ + 169, + 839, + 825, + 897 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_footnote", + "text": "$^{9}$ https://mathworks.com/products/text-analytics.html", + "bbox": [ + 189, + 909, + 643, + 924 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The following methods make use of tokenized data and a pre-processing procedure that we found effective. Pre-processing was applied to both tag and caption data and tokenization was performed as follows:", + "bbox": [ + 169, + 103, + 823, + 145 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We separate all text into single words by applying the tokenizedDocument function.", + "- We added part of speech information using the addPartOfSpeechDetails function.", + "- We performed Lemmatization using the normalizeWords function.", + "- We erased punctuation from the token using the erasePunctuation function.", + "- We removed stopwords using the removeStopWords function.", + "- We removed words with less than two characters or more than 15 characters.", + "- We created a bag of words representation of each tokenized document using the bagOfWords function.", + "- We also removed words that were not present in more than two documents using the InfrequentWords function." + ], + "bbox": [ + 215, + 159, + 823, + 333 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "With the results of these pre-processing steps, we then computed similarity matrices based on the following methods:", + "bbox": [ + 169, + 345, + 823, + 375 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "bm25S. We used bm25+ to compute similarity between documents (Barrios et al., 2016) using Matlab's bm25Similarity function. This function represents TF-IDF-like retrieval functions used in document retrieval. We used a variant that has a normalization function that properly handles documents with a long list of words.", + "bbox": [ + 169, + 381, + 823, + 438 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "tfidf-cosine. We computed pairwise cosine similarities between document pairs using the TF-IDF matrix derived from their word counts and Matlab's cosineSimilarity function.", + "bbox": [ + 169, + 444, + 823, + 472 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "C.3 SUPERVISED METHODS", + "text_level": 1, + "bbox": [ + 171, + 489, + 382, + 503 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Several previous studies investigated improving correlations by applying and fine-tuning simple linear transformations to embedding vectors $z^T \\mathbf{W}z$ where $\\mathbf{W} = \\mathrm{diag}(w_1, \\ldots, w_d)$ via a cross-validated ridge regression procedure that could be fit to ground-truth similarity judgments. The parameters of the diagonal reweighting matrix $\\mathbf{W}$ are fitted to a training subset of stimuli and used to predict similarity of pairs in a held-out validation set Peterson et al. (2018); Marjieh et al. (2022). To be consistent and make results comparable, here we report the results of performing this 6-fold cross-validated linear transformation (LT-CCV) on the model embeddings and datasets considered in this work. The analysis was carried out using the RidgeCV package from the scikit-learn Python library Pedregosa et al. (2011). Results with both normalized ('LT CCV (norm') and unnormalized ('LT CCV') regressors are shown in Figure 11; see RidgeCV documentation for details on normalization $^{10}$ . We see that the linear transformation does not consistently improve performance (and can even decrease it) when applied to many of the modality-based or stacked embeddings, but it does frequently improve performance when applied to caption embeddings. Due to their instability and risk of overfitting, we do not use these methods in our main analysis.", + "bbox": [ + 169, + 516, + 826, + 710 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_footnote", + "text": "$^{10}$ https://scikit-learn.org/stable/modules/generated/sklearn.linear_model. RidgeCV.html", + "bbox": [ + 171, + 896, + 823, + 922 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/713b9d34308b1033df18e83391dfaaf08fe926c77cfe63f85c9fdbc65fc19aa4.jpg", + "image_caption": [ + "Figure 11: Effect of fine-tuning model embeddings using a small subset of similarity judgments." + ], + "image_footnote": [], + "bbox": [ + 181, + 250, + 823, + 734 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + } +] \ No newline at end of file diff --git a/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_model.json b/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b177aaf84f6515dad1630afee0a2519a44d0c1f6 --- /dev/null +++ b/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_model.json @@ -0,0 +1,4513 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.147 + ], + "angle": 0, + "content": "WORDS ARE ALL YOU NEED? LANGUAGE AS AN APPROXIMATION FOR HUMAN SIMILARITY JUDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.169, + 0.798, + 0.187 + ], + "angle": 0, + "content": "Raja Marjieh\\(^{1,*}\\), Pol van Rijn\\(^{2,*}\\), Ilia Sucholutsky\\(^{3,*}\\), Theodore R. Sumers\\(^{3}\\), Harin Lee\\(^{2,4}\\)" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.205, + 0.468, + 0.221 + ], + "angle": 0, + "content": "Thomas L. Griffiths\\(^{1,3,\\ast \\ast}\\), Nori Jacoby\\(^{2,\\ast \\ast}\\)" + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.205, + 0.631, + 0.22 + ], + "angle": 0, + "content": "\\*\\*\\* Equal contribution." + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.221, + 0.81, + 0.236 + ], + "angle": 0, + "content": "\\(^{1}\\)Department of Psychology, Princeton University" + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.236, + 0.792, + 0.25 + ], + "angle": 0, + "content": "2Max Planck Institute for Empirical Aesthetics" + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.25, + 0.855, + 0.265 + ], + "angle": 0, + "content": "\\(^{3}\\)Department of Computer Science, Princeton University" + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.265, + 0.849, + 0.28 + ], + "angle": 0, + "content": "4Max Planck Institute for Cognitive and Brain Sciences" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.316, + 0.547, + 0.331 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.35, + 0.77, + 0.671 + ], + "angle": 0, + "content": "Human similarity judgments are a powerful supervision signal for machine learning applications based on techniques such as contrastive learning, information retrieval, and model alignment, but classical methods for collecting human similarity judgments are too expensive to be used at scale. Recent methods propose using pre-trained deep neural networks (DNNs) to approximate human similarity, but pre-trained DNNs may not be available for certain domains (e.g., medical images, low-resource languages) and their performance in approximating human similarity has not been extensively tested. We conducted an evaluation of 611 pre-trained models across three domains – images, audio, video – and found that there is a large gap in performance between human similarity judgments and pre-trained DNNs. To address this gap, we propose a new class of similarity approximation methods based on language. To collect the language data required by these new methods, we also developed and validated a novel adaptive tag collection pipeline. We find that our proposed language-based methods are significantly cheaper, in the number of human judgments, than classical methods, but still improve performance over the DNN-based methods. Finally, we also develop 'stacked' methods that combine language embeddings with DNN embeddings, and find that these consistently provide the best approximations for human similarity across all three of our modalities. Based on the results of this comprehensive study, we provide a concise guide for researchers interested in collecting or approximating human similarity data. To accompany this guide, we also release all of the similarity and language data, a total of 206,339 human judgments, that we collected in our experiments, along with a detailed breakdown of all modeling results." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.702, + 0.338, + 0.717 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.735, + 0.827, + 0.862 + ], + "angle": 0, + "content": "Similarity judgments have long been used as a tool for studying human representations, both in cognitive science (Shepard, 1980; 1987; Tversky, 1977; Tenenbaum & Griffiths, 2001), as well as in neuroscience, as exemplified by the rich literature on representational similarity between humans and machines (Schrimpf et al., 2020; Kell et al., 2018; Linsley et al., 2017; Langlois et al., 2021; Yamins et al., 2014) whereby similarity patterns of brain activity are compared to those arising from a model of interest. Recent research in machine learning suggests that incorporating human similarity judgments in model training can play an important role in a variety of paradigms such as human alignment (Esling et al., 2018), contrastive learning (Khosla et al., 2020), information retrieval (Parekh et al., 2020), and natural language processing (Gao et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.867, + 0.825, + 0.898 + ], + "angle": 0, + "content": "However, building a large dataset based on human similarity judgments is very expensive and often infeasible since the number of judgments required is quadratic in the number of stimuli – for \\( N \\)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.911, + 0.825, + 0.925 + ], + "angle": 0, + "content": "*Correspondence: {raja.marjieh, is2961}@princeton.edu, pol.van-rijn@ae.mpg.de" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.107, + 0.825, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.481, + 0.828, + 0.54 + ], + "angle": 0, + "content": "Figure 1: Comparing human similarity scores gathered through crowdsourcing with ML pipelines. We used data from three modalities: images, audio, and video. For each modality, we extracted deep model embeddings and gathered human captions and tags. Word- and language-embedding models, as well as simple word-frequency analysis, were used to predict human similarity judgments." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.563, + 0.825, + 0.639 + ], + "angle": 0, + "content": "stimuli, \\( O(N^2) \\) judgments are required1. For example, to fully quantify the similarity of all possible dyadic pairs of 50,000 images, one needs to collect on the order of 1.25 billion (\\( \\sim \\frac{50000^2}{2} \\)) human similarity judgments. Thus, human judgments are the main bottleneck for machine-learning methods based on similarity. For this reason, the majority of available human similarity datasets are small by machine learning standards (up to a few thousand objects)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.644, + 0.827, + 0.812 + ], + "angle": 0, + "content": "Advancements in deep learning have brought an alternative approach that does not require extensive collection of human judgments. Specifically, the idea is to use the similarity between hidden representations in pre-trained deep neural networks (DNNs) to approximate human similarity (Peterson et al., 2018; Jha et al., 2020; Marjieh et al., 2022; Hebart et al., 2020; Roads & Love, 2021). Some of these methods also suggest fine-tuning representations on a small training set of human similarity judgments (Peterson et al., 2018). This, in turn, results in a significant reduction in the number of required human judgments down to \\( O(1) \\) (given the pre-trained model). While such methods are promising, they still require access to strong pre-trained models which may not necessarily be available in all domains (e.g., medical datasets, niche modalities, low-resource languages, etc.). In addition, representations obtained from neural networks may not always overlap with human similarity representations, given that the models can be trained for different objectives (i.e., their embeddings may be poor approximations for human similarity)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.817, + 0.828, + 0.875 + ], + "angle": 0, + "content": "A comprehensive comparison to assess which models perform well in predicting human similarity across different modalities is currently lacking in the literature. To this end, one of our main contributions in this paper is providing a first-of-its-kind large-scale evaluation of over 600 publicly-available pre-trained models as approximations for human similarity judgments on three modalities" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.826, + 0.926 + ], + "angle": 0, + "content": "1Depending on various assumptions, the full range of classical methods can require between \\( O(N \\log N) \\) (Jamieson & Nowak, 2011) and \\( O(N^3) \\) (Hebart et al., 2020) human judgments. In this work, we used \\( O(N^2) \\) human judgments (collecting all unique dyadic pairs) as the baseline for comparison" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "/images, audio, video). Our experiments reveal that there is a large gap in performance between the \\( O(1) \\) DNN methods and the classical \\( O(N^2) \\) similarity method we used as the baseline." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.252 + ], + "angle": 0, + "content": "To address this gap, we propose a new class of \\( O(N) \\) methods to efficiently and accurately approximate human similarity based on language. This is motivated by a long line of research in cognitive science suggesting that language is an extremely efficient way for humans to communicate information about their sensory environment (Murphy, 2004; Zaslavsky et al., 2018; Piantadosi et al., 2011; Jaeger & Levy, 2006). This in turn suggests that we can use textual descriptors to approximate similarity judgments across different modalities. Moreover, such textual descriptors can be collected at the cost of \\( O(N) \\) human judgments (as people describe individual stimuli rather than pairs), which renders this method scalable." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.828, + 0.385 + ], + "angle": 0, + "content": "We consider two approaches for approximating similarity from text data. One approach is to use pre-trained Large Language Models (LLM) to produce vector embeddings of the textual descriptions, and then use a measure of distance between these embeddings to approximate human similarity. This method is more domain-agnostic than the \\(O(1)\\) deep learning methods as it only requires access to a pre-trained LLM regardless of the modality of the original dataset. However, there are some cases where the domain may be out-of-distribution for all available LLMs (e.g., niche technical fields), or where no LLMs are available at all (e.g., low-resource languages). In such cases, the other approach is to use Word-Frequency Analysis (WFA) methods from classical text processing literature (Barrios et al., 2016; Rouge, 2004; Beel et al., 2016)," + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.827, + 0.502 + ], + "angle": 0, + "content": "As for the textual descriptions themselves, we consider two types, namely, free-text captions and concise word tags. Collecting captions for machine learning datasets is a well-established practice and can easily be done through crowdsourcing platforms. On the other hand, there is no consensus on best practices for collecting tags without a pre-existing taxonomy (i.e., open-set labels). To address this, we propose a novel adaptive tag mining pipeline called Sequential Transmission Evaluation Pipeline (STEP-Tag) which we describe in Section 2.2.4. As we will show, STEP-Tag allows to collect meaningful, diverse, and high-quality word tags for target stimuli in an online crowdsourcing environment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.508, + 0.828, + 0.593 + ], + "angle": 0, + "content": "Finally, we propose one additional set of hybrid approximation methods that combine sensory information with textual descriptions while still requiring \\( O(N) \\) human judgments. For this approach, we propose to stack the embeddings derived from both domain-specific models (e.g., output from the last layer of an image classifier) with the LLM embedding of the respective textual description. When multi-modal models are available, we can similarly leverage the joint embedding of both the stimulus and its textual description." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.599, + 0.828, + 0.684 + ], + "angle": 0, + "content": "We evaluate all of these novel and existing methods across multiple modalities. We test the relative contributions of linguistic and sensory information in approximating human similarity and show that our proposed language-based methods provide both accurate and efficient approximations across modalities, even though they do not require a trained modality-specific deep learning model. Crucially, with this large-scale evaluation, we are able for the first time to provide researchers with a comprehensive guide of the tools to use for approximating human similarity at scale." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.689, + 0.488, + 0.704 + ], + "angle": 0, + "content": "To summarize, our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.721, + 0.799, + 0.736 + ], + "angle": 0, + "content": "- We conduct a comprehensive comparison of human similarity approximation methods." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.751, + 0.825, + 0.779 + ], + "angle": 0, + "content": "- We propose a novel modality-agnostic method for approximating similarity based on text and show that it is both efficient and competitive in terms of performance." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.795, + 0.825, + 0.823 + ], + "angle": 0, + "content": "- We propose STEP-Tag, a novel adaptive tagging pipeline, and show that it is effective for crowdsourcing high-quality and diverse sets of word tags." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.838, + 0.825, + 0.867 + ], + "angle": 0, + "content": "- We synthesize our findings into a detailed guide for researchers interested in approximating human similarity judgments at scale." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "- We collect and release ground-truth and approximated versions of a large behavioral dataset \\((N = 1,492)\\) across three different domains (images, audio, video), including two text-approximated similarity matrices for 1,000 audio clips and 1,000 video clips." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.295, + 0.119 + ], + "angle": 0, + "content": "2 DATASETS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.137, + 0.279, + 0.151 + ], + "angle": 0, + "content": "2.1 STIMULI" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.165, + 0.828, + 0.194 + ], + "angle": 0, + "content": "Throughout this work, we considered five stimulus datasets across three different modalities - images, audio, and video - consisting of a total of 31,320 dyadic pairs labeled with similarity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.213, + 0.828, + 0.257 + ], + "angle": 0, + "content": "Images For images, we considered three datasets of common objects introduced in Peterson et al. (2018) – namely, animals, furniture, and vegetables – each consisting of 7,140 dyadic pairs (all unique pairs over 120 images)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.275, + 0.828, + 0.375 + ], + "angle": 0, + "content": "Audio For audio, we used the RAVDESS corpus (Livingstone & Russo (2018), released under a CC Attribution license), which consists of semantically neutral sentences spoken by 24 US American actors to convey a specific target emotion. To construct a 1,000-recording subset, we selected 3 emotions per speaker per sentence. We randomly omitted 104 emotional stimuli and included all 96 neutral recordings (the dataset only contains 2 neutral recordings per speaker per sentence). To construct the subset composed of 4,950 dyadic pairs (all unique pairs over 100 recordings), we randomly selected \\(\\sim 13\\) recordings per emotion from the 1,000." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.393, + 0.828, + 0.492 + ], + "angle": 0, + "content": "Video Finally, for the video dataset, we considered the Mini-Kinetics-200 dataset (Xie et al., 2018) (released under a CC BY 4.0 International License), which contains a large set of short video clips of human activities from 200 activity classes. Specifically, we focused on the validation split, which contains 5,000 videos in total. To construct our 1,000-video dataset, we sampled 5 random videos from each of the 200 activity categories. The 100-video subset (4,950 dyadic pairs) used in the similarity judgment collection experiment was then generated by sampling 100 random stimuli from the 1,000 list." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.513, + 0.449, + 0.527 + ], + "angle": 0, + "content": "2.2 HUMAN JUDGMENT COLLECTION" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.541, + 0.333, + 0.555 + ], + "angle": 0, + "content": "2.2.1 PARTICIPANTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.567, + 0.829, + 0.735 + ], + "angle": 0, + "content": "We collected data from \\(N = 1,492\\) US participants for the new behavioral experiments reported in this paper. Participants were recruited anonymously from Amazon Mechanical Turk and provided informed consent under an approved protocol by either the Institutional Review Board (IRB) at Princeton University (application 10859) or the Max Planck Ethics Council (application 2021_42) before taking part. Participants earned 9-12 USD per hour, and each session lasted less than 30 minutes. To help recruit reliable participants, we required that participants are at least 18 years of age, reside in the United States and have participated in more than 5,000 previous tasks with a \\(99\\%\\) approval rate (see Supplementary Section B for additional details about the behavioral experiments). All experiments were implemented with the Dallinger and PsyNet frameworks designed for automation of large-scale behavioral research (Harrison et al., 2020). In Supplementary Section A.1, we include the data that was collected, instructions used, and code for replication of the behavioral experiments. We also provide the code for computational experiments and analysis." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.754, + 0.407, + 0.768 + ], + "angle": 0, + "content": "2.2.2 SIMILARITY JUDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.78, + 0.828, + 0.837 + ], + "angle": 0, + "content": "We collected two batches of pairwise similarity judgements, one for each of the audio and video subsets, and were provided access to the similarity matrices for the three image datasets by the authors of Peterson et al. (2018). For each pair we collected \\(\\sim 5\\) similarity judgments to average out inter-rater noise." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.856, + 0.304, + 0.87 + ], + "angle": 0, + "content": "2.2.3 CAPTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.828, + 0.926 + ], + "angle": 0, + "content": "We collected free-text captions for the video and audio datasets. Captions for the image datasets were already collected by Marjieh et al. (2022) and used here with permission. For each stimulus, we collected \\(\\sim 10\\) captions." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.103, + 0.825, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.316, + 0.828, + 0.361 + ], + "angle": 0, + "content": "Figure 2: STEP-Tag, our novel tag-mining paradigm. We ran an adaptive process in which results of one iteration are used as inputs for subsequent iterations. In every iteration, participants can add a new tag, rate the relevance of existing tags or flag tags that are inappropriate." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.384, + 0.269, + 0.398 + ], + "angle": 0, + "content": "2.2.4 TAGS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.408, + 0.828, + 0.63 + ], + "angle": 0, + "content": "We propose a novel adaptive tag pipeline for simultaneous data collection and evaluation called Sequential Transmission Evaluation Pipeline (STEP) and apply it in the context of semantic tag mining (STEP-Tag). Our paradigm, STEP-Tag, allows researchers to efficiently collect high-quality word tags for a given stimulus (Figure 2) and extends existing crowdsourcing text-mining techniques (Von Ahn & Dabbish, 2008; 2004; Krishna et al., 2017; Law et al., 2007) by integrating ideas from transmission chain experiments (Kirby et al., 2008; Griffiths & Kalish, 2005). In STEP-Tag, participants adaptively create tags for a set of target stimuli and simultaneously evaluate the annotations made by previous participants. In each trial, participants are first given a stimulus (e.g., an image or audio fragment) and rate the relevance of tags that were created by other participants (on a 5-interval Likert scale) or flag a tag if they find it inappropriate (with tags removed if more than two people flag the tag). Next, participants are also given the opportunity to add new tags if they feel a relevant tag that describes the stimulus is missing. The results of the annotation procedure of one participant then propagate to the next participant (additional details about the paradigm, and screenshots are provided in Supplementary Section B.6). Ultimately, as the process unfolds over many iterations, meaningful tags are extracted and validated by multiple participants, enabling efficient open-label collection of a desired dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.637, + 0.826, + 0.722 + ], + "angle": 0, + "content": "To validate STEP-Tag, we compared it against several baselines: (i) randomly selecting only a single high-rated tag from the last iteration of STEP-Tag per stimulus, (ii) using tags only from the first iteration of STEP-Tag (equivalent to non-adaptive tag collection), and (iii) using class labels instead of tags. We found that tags produced after multiple iterations of STEP-Tag outperformed all three baselines in terms of quality (i.e., downstream performance for similarity reconstruction) and diversity (see Supplementary Section B.6.1)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.742, + 0.283, + 0.757 + ], + "angle": 0, + "content": "3 MODELS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.773, + 0.381, + 0.787 + ], + "angle": 0, + "content": "3.1 DNN-BASED METHODS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.828, + 0.926 + ], + "angle": 0, + "content": "We tested a wide range of pre-trained ML models that do not rely on text (overall we tested 611 models) and compared their internal representations to human similarity judgments and text-based predictions (Figure 1A). We compiled our model pool by leveraging pre-trained model repositories (or zoos) available online. In particular, for images we use 569 pre-trained models from the pytorch-image-models package timm (Wightman, 2019), for audio we use 36 pre-trained models available in the torchaudio package (Yang et al., 2021) (see also Supplementary Figure 10 for an analysis of layer depth), and for video we use 6 pre-trained models available from the PyTorchVideo package (Fan et al., 2021). Because of the recent success of multimodal training, we additionally included 9 multimodal models based on CLIP from OpenAI's public implementation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "tion (https://github.com/openai/CLIP) for the image datasets, and compared them to \"stacked\" representations (i.e., concatenating embeddings from separate image and text models)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.15, + 0.381, + 0.163 + ], + "angle": 0, + "content": "3.2 LLM-BASED METHODS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.177, + 0.827, + 0.274 + ], + "angle": 0, + "content": "Tags To embed tags we used ConceptNet Numberbatch (CNNB) which is a word-embedding model trained on the ConceptNet knowledge graph that leverages other popular word embedding models such as word2vec and GloVe (Speer et al., 2017). We experimented with several algorithms for computing similarity between sets (or multi-sets) of tags and share the details in Supplementary Section C.1.2. As a control, for images we also tried converting tags into a caption of the form \"This is an image of tag1, tag2, ...\" and embedding them using a language model (see Supplementary Section C.1.2)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.29, + 0.828, + 0.444 + ], + "angle": 0, + "content": "Captions To embed captions, we used four pre-trained LLMs from HuggingFace (Wolf et al., 2020): 'bert-base-uncased', 'deberta-xlarge-mnli', 'sup-simcse-bert-base-uncased', and 'sup-simcse-roberta-large'. SimCSE is a pre-training procedure that uses semantic entailment in a contrastive learning objective (Gao et al., 2021). According to BERTScore (Zhang et al., 2020), the latter three models are ranked in the top 40 models in terms of correlation with human evaluations on certain tasks, with 'deberta-xlarge-mnli' ranked first. However, in our experiments, we found that embedding similarity computed from 'sup-simcse-roberta-large' has the highest correlation with human similarity judgments out of the four models. For SimCSE-based models, we used representations from the (final) embedding layer (where the SimCSE contrastive objective is actually applied). For the other two models, we computed embeddings from every layer, but restricted the main analysis to embeddings from the penultimate layers. This was done in order to be consistent with our procedure for DNNs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.459, + 0.828, + 0.543 + ], + "angle": 0, + "content": "Other methods For the image datasets, we also considered several other methods that made use of LLMs but do not fit into the categories described above. One approach was using prompts with GPT3 (Brown et al., 2020) in a text-completion setup to directly predict similarity without extracting embeddings (see Supplementary Section C.1.3 for details). We also tried using pre-trained image captioning models to generate captions automatically (i.e. this would reduce \\( O(N) \\) language-based methods to \\( O(1) \\)) but this resulted in poor performance (see Supplementary Section C.1.3 for details)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.56, + 0.362, + 0.573 + ], + "angle": 0, + "content": "3.3 STACKING METHODS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.587, + 0.826, + 0.673 + ], + "angle": 0, + "content": "We produce stacked representations for each modality by concatenating the single best-performing (see Figure 3) LLM's embeddings with the embeddings from the five best-performing DNNs into a single set of long embeddings. Since the two sets of embeddings come from different spaces, we add a single tunable hyperparameter for rescaling the LLM embeddings. This hyperparameter can be set manually, but we use a small number of ground-truth similarity judgments (we use dyadic pairs for just 20 stimuli) to optimize it automatically." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.687, + 0.557, + 0.702 + ], + "angle": 0, + "content": "3.4 WORD FREQUENCY ANALYSIS (WFA) METHODS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.713, + 0.827, + 0.798 + ], + "angle": 0, + "content": "The aim of the WFA methods is to enable similarity approximation from language using traditional embedding-free techniques. Such techniques are particularly useful for low-resource languages or cross-cultural comparisons (Cowen & Keltner, 2017; Barrett, 2020), for which pre-trained models are lacking, as they work solely on the basis of the text itself. The WFA methods we considered included measuring co-occurrence, Rouge score, bm25s, and tfidf. We provide details on each of these procedures in Supplementary Section C.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.815, + 0.383, + 0.829 + ], + "angle": 0, + "content": "3.5 PERFORMANCE METRIC" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.841, + 0.828, + 0.926 + ], + "angle": 0, + "content": "We quantified performance by computing the Pearson correlation \\( r \\) between approximated similarity scores and the ground-truth human similarity scores for all the unique dyadic pairs in a dataset. We compared the performance of the different prediction methods to the inter-rater reliability (IRR) of participants, which serves as an approximate upper-bound on performance. Following Peterson et al. (2018), we computed IRR for each human similarity matrix using the split-half correlation method with a Spearman-Brown correction (Brown, 1910)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.284, + 0.119 + ], + "angle": 0, + "content": "4 RESULTS" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.144, + 0.823, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.307, + 0.82, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.47, + 0.82, + 0.613 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.634, + 0.819, + 0.745 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.766, + 0.828, + 0.823 + ], + "angle": 0, + "content": "Figure 3: Correlation to human similarity. A: Top 50 models averaged over the 3 image datasets. B: Audio dataset. C: Video dataset. Each DNN baseline bar averages over multiple variants of the same architecture; the dots indicate average correlation of individual variants of the architecture. D: Average for each method type for each modality. The error bars are standard deviations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Figure 3 summarizes the performance of the various techniques across the three modalities. Note that the image modality results in Figure 3A are averaged across the three image datasets and only show the top 50 methods for this modality due to space constraints. Figure 3D shows the mean performance of the methods of each type for each modality. When viewing these results, a clear hierarchy emerges. While no approximation methods can perfectly match the ground-truth pairwise similarity, (see the gap between the methods and IRR), stacked ones get close and are consistently more aligned with" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "human similarity than other methods across all three modalities. Text-based methods come next in this hierarchy, followed by DNN-based ones. We also considered supervised methods that reweight DNN-based embeddings based on a small set of human similarity judgments, but we found that the performance was unstable (see Supplementary Section C.3 for details)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.827, + 0.281 + ], + "angle": 0, + "content": "The pre-eminence of stacked results suggests that LLMs and DNNs capture at least some different sources of variance in human similarity judgments. This is reinforced by our surprising finding that stacked representations from CLIP, a state-of-the-art jointly pre-trained multi-modal model, do not outperform stacked representations from independently trained models. We hypothesize that this happens because information is lost from both modalities when optimizing for a joint embedding. However, we note that the modest size of the performance gap between stacked and LLMs/DNNs, suggests that there is also significant overlap between aspects of human similarity captured by language and perception." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.286, + 0.825, + 0.412 + ], + "angle": 0, + "content": "To investigate the effect of architecture and downstream task (e.g., classification) performance on alignment of DNNs with human similarity, for the image modality we compared similarity approximation performance against the number of model parameters on a log scale (Figure 4A) and ImageNet classification performance (Deng et al., 2009) (Figure 4B). Overall, we found a positive correlation between similarity approximation performance and the number of model parameters \\((r = 0.39, p < 0.001)\\) and a smaller but still significant positive correlation with performance on ImageNet \\((r = 0.26, p < 0.001)\\). There were some notable exceptions with particularly high ImageNet performance but low similarity performance, such as the image transformer BEiT (Bao et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.418, + 0.827, + 0.53 + ], + "angle": 0, + "content": "Finally, we leverage both DNN-based methods and our proposed language-based methods to approximate similarity matrices that would otherwise require an unaffordable number of human similarity judgments to collect all dyadic pairs. Specifically, we approximate the two similarity matrices corresponding to all 1,000 audio clips and 1,000 video clips in our datasets using every method listed for each of those modalities in Figure 3. We provide visualizations of the resulting matrices at https://words-are-all-you-need.s3.amazon.com/index.html. We note that to exhaustively collect all dyadic pairs with five judgments per pair would normally require roughly 2.5 million human judgments for each of these matrices." + }, + { + "type": "image", + "bbox": [ + 0.273, + 0.566, + 0.72, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.716, + 0.724, + 0.865 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.881, + 0.825, + 0.911 + ], + "angle": 0, + "content": "Figure 4: Correlation to human similarity judgments as a function of A: number of model parameters; and B: ImageNet accuracy." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.107, + 0.825, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.419, + 0.768, + 0.436 + ], + "angle": 0, + "content": "Figure 5: Guide to collecting and estimating human similarity judgments at scale." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.458, + 0.473, + 0.473 + ], + "angle": 0, + "content": "5 DISCUSSION AND CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.49, + 0.827, + 0.561 + ], + "angle": 0, + "content": "In this work, we compared novel and existing methods for approximating human similarity judgments. The main contributions can be summarized as follows: 1) we provide a simple and accessible approach for approximating \\( O(N^2) \\) human similarity judgments using \\( O(N) \\) annotations, 2) we propose a new adaptive pipeline STEP-tag for tag mining, 3) we evaluate our approach against \\( 600+ \\) domain-specific state-of-the-art DNNs, and 4) we publicly release all data comprising 206,339 human judgments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.567, + 0.825, + 0.693 + ], + "angle": 0, + "content": "Based on these, we are now able to provide researchers with a best-practices guide to collecting similarity datasets. Our guide is based on two bottlenecks that researchers may face: one is the limit on the number of judgments that can be collected (e.g., due to cost) and the second is the availability of pre-trained models (i.e., either DNNs or LLMs). Our results make it clear that deep learning can provide good approximations for human similarity. In fact, when both pre-trained LLMs and DNNs are available, stacking their representations is consistently the best approach. However, even when neither type of pre-trained models are available, we suggest that classical word-frequency analysis methods still provide researchers with an efficient and competitive method for approximating human similarity. Our guide, comprehensively covering these and other cases, is laid out in Figure 5." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.699, + 0.827, + 0.909 + ], + "angle": 0, + "content": "One limitation of this work is that while similarity proxies generated from our pipeline can support ML datasets, they are also at risk of baking in high-level human biases that can lead to adverse societal implications, such as amplifying race and gender gaps. Researchers should devote utmost care to what they choose to incorporate in their training objective. Another limitation of our work is the fact that we were restricted to English text data and US participants. However, we believe that our approach and proposed methods (especially STEP-tag and the word-frequency methods) pave the way for the study of cross-cultural variation of human semantic representations by providing efficient tools for crowdsourcing high-quality semantic descriptors across languages. This is particularly relevant for low-resource languages, where our tag-mining techniques can work even with the absence of pre-trained ML models (Thompson et al., 2020; Barrett, 2020). We are currently expanding our work to include more languages and diverse cultures. Taken together, our results showcase how we can leverage language to make machine representations more human-like. Moreover, it highlights the importance of combining machine learning and cognitive science approaches for mutually advancing both fields. In particular, we believe that the methodologies adopted in this work have the potential to greatly advance basic research on naturalistic representations in cognitive science." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.357, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.134, + 0.825, + 0.164 + ], + "angle": 0, + "content": "This work was supported by a grant from the John Templeton Foundation to TLG, an NDSEG fellowship to TRS, and an NSERC fellowship (567554-2022) to IS." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.184, + 0.289, + 0.2 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.208, + 0.826, + 0.252 + ], + "angle": 0, + "content": "Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33:12449-12460, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.261, + 0.825, + 0.293 + ], + "angle": 0, + "content": "Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, and Michael Auli. data2vec: A general framework for self-supervised learning in speech, vision and language, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.3, + 0.825, + 0.33 + ], + "angle": 0, + "content": "Hangbo Bao, Li Dong, and Furu Wei. BEiT: BERT pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.339, + 0.825, + 0.369 + ], + "angle": 0, + "content": "H Clark Barrett. Towards a cognitive science of the human: cross-cultural approaches and their urgency. Trends in Cognitive Sciences, 24(8):620-638, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.379, + 0.826, + 0.421 + ], + "angle": 0, + "content": "Federico Barrios, Federico López, Luis Argerich, and Rosa Wachenchauzer. Variations of the similarity function of textrank for automated summarization. arXiv preprint arXiv:1602.03606, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.432, + 0.825, + 0.462 + ], + "angle": 0, + "content": "Joeran Beel, Bela Gipp, Stefan Langer, and Corinna Breitinger. Paper recommender systems: a literature survey. International Journal on Digital Libraries, 17(4):305-338, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.471, + 0.826, + 0.515 + ], + "angle": 0, + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.524, + 0.827, + 0.554 + ], + "angle": 0, + "content": "William Brown. Some experimental results in the correlation of mental abilities 1. British Journal of Psychology, 1904-1920, 3(3):296-322, 1910." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.563, + 0.826, + 0.607 + ], + "angle": 0, + "content": "Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, et al. WavLM: Large-scale self-supervised pre-training for full stack speech processing. arXiv preprint arXiv:2110.13900, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.616, + 0.827, + 0.658 + ], + "angle": 0, + "content": "Alan S Cowen and Dacher Keltner. Self-report captures 27 distinct categories of emotion bridged by continuous gradients. Proceedings of the National Academy of Sciences, 114(38):E7900-E7909, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.669, + 0.827, + 0.713 + ], + "angle": 0, + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255. IEEE, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.722, + 0.825, + 0.753 + ], + "angle": 0, + "content": "Philippe Esling, Adrien Bitton, et al. Generative timbre spaces: regularizing variational auto-encoders with perceptual metrics. arXiv preprint arXiv:1805.08501, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.762, + 0.827, + 0.833 + ], + "angle": 0, + "content": "Haoqi Fan, Tullie Murrell, Heng Wang, Kalyan Vasudev Alwala, Yanghao Li, Yilei Li, Bo Xiong, Nikhila Ravi, Meng Li, Haichuan Yang, Jitendra Malik, Ross Girshick, Matt Feiszli, Aaron Adcock, Wan-Yen Lo, and Christoph Feichtenhofer. PyTorchVideo: A deep learning library for video understanding. In Proceedings of the 29th ACM International Conference on Multimedia, 2021. https://pytorchvideo.org/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.825, + 0.873 + ], + "angle": 0, + "content": "Christoph Feichtenhofer. X3d: Expanding architectures for efficient video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 203-213, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. SlowFast networks for video recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6202-6211, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.208, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Tianyu Gao, Xingcheng Yao, and Danqi Chen. SimCSE: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.827, + 0.172 + ], + "angle": 0, + "content": "Thomas L Griffiths and Michael L Kalish. A bayesian view of language evolution by iterated learning. In Proceedings of the Annual Meeting of the Cognitive Science Society, volume 27, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.18, + 0.827, + 0.223 + ], + "angle": 0, + "content": "Peter Harrison, Raja Marjieh, Federico Adolfi, Pol van Rijn, Manuel Anglada-Tort, Ofer Tchernichovski, Pauline Larrouy-Maestri, and Nori Jacoby. Gibbs sampling with people. Advances in Neural Information Processing Systems, 33:10659-10671, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.233, + 0.827, + 0.275 + ], + "angle": 0, + "content": "Martin N Hebart, Charles Y Zheng, Francisco Pereira, and Chris I Baker. Revealing the multidimensional mental representations of natural objects underlying human similarity judgements. Nature Human Behaviour, 4(11):1173-1185, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.285, + 0.827, + 0.34 + ], + "angle": 0, + "content": "Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. HuBERT: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.351, + 0.827, + 0.38 + ], + "angle": 0, + "content": "T Jaeger and Roger Levy. Speakers optimize information density through syntactic reduction. Advances in Neural Information Processing Systems, 19, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.389, + 0.826, + 0.432 + ], + "angle": 0, + "content": "Kevin G Jamieson and Robert D Nowak. Low-dimensional embedding using adaptively selected ordinal data. In 2011 49th Annual Allerton Conference on Communication, Control, and Computing (Allerton), pp. 1077-1084. IEEE, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.441, + 0.826, + 0.47 + ], + "angle": 0, + "content": "Aditi Jha, Joshua Peterson, and Thomas L Griffiths. Extracting low-dimensional psychological representations from convolutional neural networks. arXiv preprint arXiv:2005.14363, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.479, + 0.827, + 0.522 + ], + "angle": 0, + "content": "Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The Kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.531, + 0.826, + 0.574 + ], + "angle": 0, + "content": "Alexander JE Kell, Daniel LK Yamins, Erica N Shook, Sam V Norman-Haignere, and Josh H McDermott. A task-optimized neural network replicates human auditory behavior, predicts brain responses, and reveals a cortical processing hierarchy. Neuron, 98(3):630-644, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.583, + 0.826, + 0.626 + ], + "angle": 0, + "content": "Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron Sarna, Yonglong Tian, Phillip Isola, Aaron Maschinot, Ce Liu, and Dilip Krishnan. Supervised contrastive learning. Advances in Neural Information Processing Systems, 33:18661-18673, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.635, + 0.826, + 0.678 + ], + "angle": 0, + "content": "Simon Kirby, Hannah Cornish, and Kenny Smith. Cumulative cultural evolution in the laboratory: An experimental approach to the origins of structure in human language. Proceedings of the National Academy of Sciences, 105(31):10681-10686, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.687, + 0.827, + 0.743 + ], + "angle": 0, + "content": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision, 123(1):32-73, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.753, + 0.827, + 0.797 + ], + "angle": 0, + "content": "Thomas Langlois, Haicheng Zhao, Erin Grant, Ishita Dasgupta, Tom Griffiths, and Nori Jacoby. Passive attention in artificial neural networks predicts human visual selectivity. Advances in Neural Information Processing Systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.805, + 0.826, + 0.835 + ], + "angle": 0, + "content": "Edith LM Law, Luis Von Ahn, Roger B Dannenberg, and Mike Crawford. TagATune: A game for music and sound annotation. In ISMIR, volume 3, pp. 2, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.844, + 0.826, + 0.873 + ], + "angle": 0, + "content": "Kristin Lemhöfer and Mirjam Broersma. Introducing lexdale: A quick and valid lexical test for advanced learners of english. Behavior research methods, 44(2):325-343, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Drew Linsley, Sven Eberhardt, Tarun Sharma, Pankaj Gupta, and Thomas Serre. What are the visual features underlying human versus machine vision? In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 2706-2714, 2017." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.148 + ], + "angle": 0, + "content": "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.827, + 0.185 + ], + "angle": 0, + "content": "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. arXiv preprint arXiv:2201.03545, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.193, + 0.825, + 0.235 + ], + "angle": 0, + "content": "Steven R Livingstone and Frank A Russo. The Ryerson audio-visual database of emotional speech and song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in north american english. PloS one, 13(5):e0196391, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.244, + 0.827, + 0.274 + ], + "angle": 0, + "content": "Raja Marjieh, Ilia Sucholutsky, Theodore R Sumers, Nori Jacoby, and Thomas L Griffiths. Predicting human similarity judgments using large language models. arXiv preprint arXiv:2202.04728, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.282, + 0.825, + 0.325 + ], + "angle": 0, + "content": "Alice E Milne, Roberta Bianco, Katarina C Poole, Sijia Zhao, Andrew J Oxenham, Alexander J Billig, and Maria Chait. An online headphone screening test based on dichotic pitch. Behavior Research Methods, 53(4):1551-1562, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.333, + 0.581, + 0.35 + ], + "angle": 0, + "content": "Gregory Murphy. The big book of concepts. MIT press, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.357, + 0.827, + 0.399 + ], + "angle": 0, + "content": "Zarana Parekh, Jason Baldridge, Daniel Cer, Austin Waters, and Yinfei Yang. Crisscrossed captions: Extended intramodal and intermodal semantic similarity judgments for MS-COCO. arXiv preprint arXiv:2004.15020, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.408, + 0.827, + 0.464 + ], + "angle": 0, + "content": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825–2830, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.473, + 0.825, + 0.516 + ], + "angle": 0, + "content": "Joshua C Peterson, Joshua T Abbott, and Thomas L Griffiths. Evaluating (and improving) the correspondence between deep neural networks and human representations. Cognitive Science, 42 (8):2648-2669, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.525, + 0.825, + 0.555 + ], + "angle": 0, + "content": "Steven T Piantadosi, Harry Tily, and Edward Gibson. Word lengths are optimized for efficient communication. Proceedings of the National Academy of Sciences, 108(9):3526-3529, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.563, + 0.827, + 0.634 + ], + "angle": 0, + "content": "Mirco Ravanelli, Titouan Parcollet, Peter Plantinga, Aku Rouhe, Samuele Cornell, Loren Lugosch, Cem Subakan, Nauman Dawalatabad, Abdelwahab Heba, Jianyuan Zhong, Ju-Chieh Chou, Sung-Lin Yeh, Szu-Wei Fu, Chien-Feng Liao, Elena Rastorgueva, François Grondin, William Aris, Hwidong Na, Yan Gao, Renato De Mori, and Yoshua Bengio. SpeechBrain: A general-purpose speech toolkit, 2021. arXiv:2106.04624." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.642, + 0.825, + 0.685 + ], + "angle": 0, + "content": "Brett D Roads and Bradley C Love. Enriching ImageNet with human similarity judgments and psychological embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3547-3557, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.693, + 0.825, + 0.723 + ], + "angle": 0, + "content": "Lin CY Rouge. A package for automatic evaluation of summaries. In Proceedings of Workshop on Text Summarization of ACL, Spain, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.731, + 0.825, + 0.774 + ], + "angle": 0, + "content": "Martin Schrimpf, Jonas Kubilius, Ha Hong, Najib J Majaj, Rishi Rajalingham, Elias B Issa, Kohitij Kar, Pouya Bashivan, Jonathan Prescott-Roy, Franziska Geiger, et al. Brain-Score: Which artificial neural network for object recognition is most brain-like? BioRxiv, pp. 407007, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.783, + 0.825, + 0.811 + ], + "angle": 0, + "content": "Roger N Shepard. Multidimensional scaling, tree-fitting, and clustering. Science, 210(4468):390-398, 1980." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.82, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Roger N Shepard. Toward a universal law of generalization for psychological science. Science, 237 (4820):1317-1323, 1987." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.858, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI Conference on Artificial Intelligence, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pp. 6105-6114. PMLR, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.133 + ], + "angle": 0, + "content": "Joshua B Tenenbaum and Thomas L Griffiths. Generalization, similarity, and bayesian inference. Behavioral and brain sciences, 24(4):629-640, 2001." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.826, + 0.171 + ], + "angle": 0, + "content": "Bill Thompson, Seán G Roberts, and Gary Lupyan. Cultural influences on word meanings revealed through large-scale semantic alignment. Nature Human Behaviour, 4(10):1029-1038, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.179, + 0.686, + 0.195 + ], + "angle": 0, + "content": "Amos Tversky. Features of similarity. Psychological review, 84(4):327, 1977." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.202, + 0.825, + 0.232 + ], + "angle": 0, + "content": "Luis Von Ahn and Laura Dabbish. Labeling images with a computer game. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems, pp. 319-326, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.24, + 0.827, + 0.268 + ], + "angle": 0, + "content": "Luis Von Ahn and Laura Dabbish. Designing games with a purpose. Communications of the ACM, 51(8):58-67, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.277, + 0.825, + 0.321 + ], + "angle": 0, + "content": "Johannes Wagner, Andreas Triantafyllopoulos, Hagen Wierstorf, Maximilian Schmitt, Felix Burkhardt, Florian Eyben, and Björn W. Schuller. Dawn of the transformer era in speech emotion recognition: closing the valence gap, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.329, + 0.827, + 0.4 + ], + "angle": 0, + "content": "Shu wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y. Lin, Andy T. Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, Tzu-Hsien Huang, Wei-Cheng Tseng, Kotik Lee, Da-Rong Liu, Zili Huang, Shuyan Dong, Shang-Wen Li, Shinji Watanabe, Abdelrahman Mohamed, and Hung yi Lee. SUPERB: Speech Processing Universal PERformance Benchmark. In Proc. Interspeech 2021, pp. 1194-1198, 2021. doi: 10.21437/Interspeech.2021-1775." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.408, + 0.826, + 0.438 + ], + "angle": 0, + "content": "Ross Wightman. PyTorch image models. https://github.com/rwrightman/pytorch-image-models, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.445, + 0.828, + 0.53 + ], + "angle": 0, + "content": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 38-45. Association for Computational Linguistics, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.538, + 0.827, + 0.581 + ], + "angle": 0, + "content": "Kevin JP Woods, Max H Siegel, James Traer, and Josh H McDermott. Headphone screening to facilitate web-based auditory experiments. Attention, Perception, & Psychophysics, 79(7): 2064-2072, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.59, + 0.825, + 0.634 + ], + "angle": 0, + "content": "Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 305–321, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.641, + 0.825, + 0.67 + ], + "angle": 0, + "content": "Daniel Yamins. An optimization-based approach to understanding sensory systems. The Cognitive Neurosciences, 4(V1):381, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.678, + 0.825, + 0.722 + ], + "angle": 0, + "content": "Daniel LK Yamins, Ha Hong, Charles F Cadieu, Ethan A Solomon, Darren Seibert, and James J DiCarlo. Performance-optimized hierarchical models predict neural responses in higher visual cortex. Proceedings of the National Academy of Sciences, 111(23):8619-8624, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.73, + 0.828, + 0.813 + ], + "angle": 0, + "content": "Yao-Yuan Yang, Moto Hira, Zhaoheng Ni, Anjali Chourdia, Artyom Astafurov, Caroline Chen, Ching-Feng Yeh, Christian Puhrsch, David Pollack, Dmitriy Genzel, Donny Greenberg, Edward Z. Yang, Jason Lian, Jay Mahadeokar, Jeff Hwang, Ji Chen, Peter Goldsborough, Prabhat Roy, Sean Narethiran, Shinji Watanabe, Soumith Chintala, Vincent Quenneville-Bélair, and Yangyang Shi. Torchaudio: Building blocks for audio and speech processing. arXiv preprint arXiv:2110.15018, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.822, + 0.828, + 0.865 + ], + "angle": 0, + "content": "Noga Zaslavsky, Charles Kemp, Terry Regier, and Naftali Tishby. Efficient compression in color naming and its evolution. Proceedings of the National Academy of Sciences, 115(31):7937-7942, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.874, + 0.825, + 0.918 + ], + "angle": 0, + "content": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. Bertscore: Evaluating text generation with bert. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=SkeHuCVFDr." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.918 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.424, + 0.119 + ], + "angle": 0, + "content": "SUPPLEMENTARY MATERIALS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.137, + 0.375, + 0.153 + ], + "angle": 0, + "content": "A STIMULI AND DATA" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.169, + 0.439, + 0.183 + ], + "angle": 0, + "content": "A.1 CODE AND DATA AVAILABILITY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.828, + 0.28 + ], + "angle": 0, + "content": "A link is provided to the public, containing all the data collected for this project during the review stage. It includes the new human behavioral data, the computational experiments with machine learning models, and all the necessary analyses scripts for producing the results. Additionally, the repository includes the Dallinger/PsyNet source codes for reproducing the behavioral experiments. Finally, we present an interactive visualization for exploring the similarity between stimuli as experienced by humans and different methods reported in the paper." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.3, + 0.429, + 0.315 + ], + "angle": 0, + "content": "B BEHAVIORAL PARADIGMS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.332, + 0.32, + 0.345 + ], + "angle": 0, + "content": "B.1 PARTICIPANTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.357, + 0.827, + 0.373 + ], + "angle": 0, + "content": "The exact number of participants for each of the 9 new behavioral experiments is reported in Table 1." + }, + { + "type": "table_caption", + "bbox": [ + 0.341, + 0.397, + 0.656, + 0.412 + ], + "angle": 0, + "content": "Table 1: Behavioral experiment summary table." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.412, + 0.842, + 0.58 + ], + "angle": 0, + "content": "
ModalityParadigmRespectTotal stimuliTrials per participantSectionNPre-screening
ImagesTagsAnimals120602.2.456LX
ImagesTagsFurniture120602.2.458LX
ImagesTagsVegetables120602.2.457LX
AudioSimilarityEmotions100852.2.2252HT
AudioCaptionsEmotions1,000502.2.3151HT, LX
AudioTagsEmotions1,000502.2.4217HT, LX
VideoSimilarityActivities100852.2.2284HT
VideoCaptionsActivities1,000502.2.3196HT, LX
VideoTagsActivities1,000502.2.4221HT, LX
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.195, + 0.582, + 0.825, + 0.61 + ], + "angle": 0, + "content": "Note. 'N' denotes the number of participants included in the analysis; 'LX' denotes the LexTALE English proficiency pre-screening task; 'HT' denotes the headphone test." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.636, + 0.347, + 0.649 + ], + "angle": 0, + "content": "B.2 IMPLEMENTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.661, + 0.827, + 0.775 + ], + "angle": 0, + "content": "All behavioral experiments were implemented using the Dallinger4 and PsyNet (Harrison et al., 2020) frameworks. Dallinger is a modern tool for experiment hosting and deployment which automates the process of participant recruitment and compensation by integrating cloud-based services such as Heroku5 with online crowd-sourcing platforms such as AMT. PsyNet is a novel experiment design framework that builds on Dallinger and allows for flexible specification of experiment timelines as well as providing support for a wide array of tasks across different modalities (visual, auditory and audio-visual). Participants interact with the experiment through their web-browser, which in turn communicates with a backend Python server responsible for the experiment logic." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.791, + 0.334, + 0.805 + ], + "angle": 0, + "content": "B.3 PRE-SCREENING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.817, + 0.827, + 0.847 + ], + "angle": 0, + "content": "A common technique for filtering out participants that are likely to deliver low-quality responses, as well as automated scripts (bots), is to implement pre-screening tasks prior to the main part of" + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.856, + 0.863, + 0.87 + ], + "angle": 0, + "content": "\\(^{2}\\)Code and data: https://osf.io/kzbr5/?view_only=3dea58e008ce41c290ef0f374bddbf444" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.871, + 0.829, + 0.895 + ], + "angle": 0, + "content": "3Interactive plots: https://words-are-all-you-need.s3.amazon.com/index.html" + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.856, + 0.863, + 0.895 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.194, + 0.897, + 0.493, + 0.91 + ], + "angle": 0, + "content": "4https://dallinger.readthedocs.io/" + }, + { + "type": "ref_text", + "bbox": [ + 0.194, + 0.91, + 0.405, + 0.924 + ], + "angle": 0, + "content": "5https://www.heroku.com/" + }, + { + "type": "list", + "bbox": [ + 0.194, + 0.897, + 0.493, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.828, + 0.219 + ], + "angle": 0, + "content": "each experiment. Failing the pre-screening tasks results in early termination of the experiment. Nevertheless, participants are still compensated for their time regardless of whether they fail or succeed on a pre-screener to ensure fair compensation. The role of pre-screeners in our studies was to realize two main criteria for data quality, namely, a) to be able to collect high-quality text descriptors, and b) to ensure that participants are able to inspect the target stimuli properly (in particular the audio component in prosody and videos). To do this, we implemented two pre-screening tasks, an English proficiency test and a standardized headphone test (used only for audio and video experiments). Table 1 provides details on which pre-screeners were used in each of the behavioral experiments." + }, + { + "type": "title", + "bbox": [ + 0.377, + 0.251, + 0.61, + 0.289 + ], + "angle": 0, + "content": "alberation" + }, + { + "type": "text", + "bbox": [ + 0.446, + 0.3, + 0.546, + 0.31 + ], + "angle": 0, + "content": "Does this word exist?" + }, + { + "type": "text", + "bbox": [ + 0.435, + 0.346, + 0.453, + 0.356 + ], + "angle": 0, + "content": "yes" + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.347, + 0.553, + 0.355 + ], + "angle": 0, + "content": "no" + }, + { + "type": "image_caption", + "bbox": [ + 0.189, + 0.4, + 0.807, + 0.417 + ], + "angle": 0, + "content": "Figure 6: Example trial from the LexTALE pre-screening task (Lemhöfer & Broersma, 2012)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.442, + 0.828, + 0.542 + ], + "angle": 0, + "content": "English proficiency test. To test participants' English proficiency, we used LexTALE, a lexical decision task developed in Lemhöfer & Broersma (2012). In each trial, participants were briefly presented (1 second) with either a real English word or a made up word that does not exist. Participants were instructed to guess whether the word was real or not. A total of 12 trials (half of them being real words) were presented, and 8 of them needed to be correct for the participant to pass. The presented words were: hasty, fray, stoutly, moonlit, scornful, unkempt, sensible, kilp, plaintively, crumper, plaudate, alberation. An example trial is shown in Figure 6." + }, + { + "type": "text", + "bbox": [ + 0.356, + 0.581, + 0.645, + 0.595 + ], + "angle": 0, + "content": "Which sound was softest (quietest) -- 1, 2, or 3?" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.643, + 0.511, + 0.653 + ], + "angle": 0, + "content": "1" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.678, + 0.511, + 0.688 + ], + "angle": 0, + "content": "2" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.714, + 0.511, + 0.724 + ], + "angle": 0, + "content": "3" + }, + { + "type": "list", + "bbox": [ + 0.499, + 0.643, + 0.511, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.757, + 0.776, + 0.774 + ], + "angle": 0, + "content": "Figure 7: Example trial from the headphone pre-screening test (Woods et al., 2017)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Headphone test. We used the headphone test developed by Wood et al. (Woods et al., 2017), which is used as a standard pre-screener for high-quality auditory psychophysics data-collection procedures (Milne et al., 2021). The test is designed to ensure that the participants are wearing headphones and are able to perceive subtle differences in volume. The task consists of a forced choice task, in which three consecutive tones are played, and the participant has to identify which of them is the quietest. Crucially, these tones are constructed to exhibit a phase cancellation effect when not using headphones, and therefore making it difficult for non-headphone users to identify the quietest tone. Participants had to answer 4 out of 6 trials correctly to pass this test. An example trial is shown in Figure 7." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image_caption", + "bbox": [ + 0.378, + 0.123, + 0.654, + 0.135 + ], + "angle": 0, + "content": "How similar are the activities in following two videos? (2 / 85)" + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.142, + 0.773, + 0.154 + ], + "angle": 0, + "content": "If it is difficult to choose between the options, don't worry, and just give what you intuitively think is the right answer." + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.159, + 0.805, + 0.399 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.26, + 0.429, + 0.737, + 0.446 + ], + "angle": 0, + "content": "Figure 8: Screenshot from the similarity judgment task over video pairs." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.469, + 0.393, + 0.483 + ], + "angle": 0, + "content": "B.4 SIMILARITY JUDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.828, + 0.593 + ], + "angle": 0, + "content": "In the present work, we collected similarity judgments across audio and video datasets. Each dataset comprised of 4,950 unique pairs corresponding to the number of unordered subsets that contain two distinct objects (i.e., excluding self-similarity), within a set of 100 stimuli. We did not collect similarity judgments over the three datasets of images, as these were provided in Peterson et al. (2018) (and used here with permission). The experiments proceeded as follows: upon completion of the consent form and the pre-screening tasks, participants received instructions regarding the main experiment:" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.606, + 0.768, + 0.65 + ], + "angle": 0, + "content": "Audio. In this experiment we are studying how people perceive emotions. In each round you will be presented with two different recordings and your task will be to simply judge how similar are the emotions of the speakers." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.661, + 0.767, + 0.705 + ], + "angle": 0, + "content": "Video. In this experiment we are studying how people perceive activities. In each round you will be presented with two different videos and your task will be to simply judge how similar are the activities in them." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.716, + 0.457, + 0.731 + ], + "angle": 0, + "content": "The instructions then continued as follows:" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.744, + 0.767, + 0.801 + ], + "angle": 0, + "content": "You will have seven response options, ranging from 0 ('Completely Dissimilar') to 6 ('Completely Similar'). Choose the one you think is most appropriate. Note: no prior expertise is required to complete this task, just choose what you intuitively think is the right answer." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.813, + 0.767, + 0.87 + ], + "angle": 0, + "content": "The quality of your responses will be automatically monitored, and you will receive a bonus at the end of the experiment in proportion to your quality score. The best way to achieve a high score is to concentrate and give each round your best attempt." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.882, + 0.767, + 0.924 + ], + "angle": 0, + "content": "The experiment will begin now. You will take up to 85 rounds where you have to answer this question. Remember to pay careful attention in order to get the best bonus!" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.218 + ], + "angle": 0, + "content": "As described in the instructions, in each trial, participants rated the similarity between a pair of sounds (how similar are the emotions of the two speakers?) or videos (how similar are the activities in the following two videos?) on a scale ranging from 0 (completely dissimilar) to 6 (completely similar) (Figure 8). Overall, participants completed 85 trials on a random subset of the possible pairs. To further motivate participants to provide good responses, we gave them an additional performance bonus for providing consistent data. Among the 85 trials, 5 trials were repeated for consistency checking. The responses were converted into a performance score by computing the Spearman correlation between the original and repeat ratings. Perfect scores resulted in a 10 cent bonus." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.232, + 0.293, + 0.246 + ], + "angle": 0, + "content": "B.5 CAPTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.825, + 0.302 + ], + "angle": 0, + "content": "We collected free-text captions for the video and audio datasets. Captions for the image datasets were previously collected in Marjieh et al. (2022) and used here with permission. After completing the consent form and pre-screening tests, participants received the following instructions:" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.311, + 0.768, + 0.355 + ], + "angle": 0, + "content": "Audio. In this experiment we are studying how people describe emotions. You will be presented with different recordings of speakers and your task will be to describe their emotions. In doing so, please keep in mind the following instructions" + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.357, + 0.601, + 0.372 + ], + "angle": 0, + "content": "- Describe all the important aspects of the recording." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.382, + 0.768, + 0.425 + ], + "angle": 0, + "content": "Video. In this experiment we are studying how people describe activities in videos. You will be presented with different videos of activities and your task will be to describe their content. In doing so, please keep in mind the following instructions" + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.428, + 0.585, + 0.443 + ], + "angle": 0, + "content": "- Describe all the important activities in the video." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.453, + 0.641, + 0.469 + ], + "angle": 0, + "content": "As well as the following guidelines adapted from Marjieh et al. (2022):" + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.479, + 0.64, + 0.493 + ], + "angle": 0, + "content": "- Do not start the sentences with \"There is\" or \"There are\"." + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.495, + 0.506, + 0.508 + ], + "angle": 0, + "content": "- Do not describe unimportant details." + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.51, + 0.602, + 0.524 + ], + "angle": 0, + "content": "- You are not allowed to copy and paste descriptions." + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.526, + 0.56, + 0.539 + ], + "angle": 0, + "content": "- Descriptions should contain at least 5 words." + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.541, + 0.607, + 0.555 + ], + "angle": 0, + "content": "- Descriptions should contain at least 4 unique words." + }, + { + "type": "list", + "bbox": [ + 0.249, + 0.479, + 0.64, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.557, + 0.767, + 0.587 + ], + "angle": 0, + "content": "Note: No prior expertise is required to complete this task, just describe what you intuitively think is important as accurately as possible." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.598, + 0.767, + 0.641 + ], + "angle": 0, + "content": "The quality of your captions will be monitored automatically and providing low quality and repetitive responses could result in early termination of the experiment and hence a lower bonus." + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.654, + 0.483, + 0.669 + ], + "angle": 0, + "content": "You will describe up to 50 recordings." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.679, + 0.827, + 0.833 + ], + "angle": 0, + "content": "These guidelines were enforced to ensure that participants deliver sufficiently informative captions that are not repetitive. In each trial of the main experiment, participants described a single audio (please describe the emotions of the speaker) or video stimulus (please describe the activity in the video). Overall, participants described up to 50 randomly presented stimuli. To filter out bad participants that tend to deliver repeated responses, in each trial (excluding the first 4 trials) we computed the mean edit distance between their current response and all previous responses that they previously provided using the partial_ratio function in the fuzzz6 Python package for fuzzy string matching. This function returns for a pair of input strings a matching score between 0 and 100 (100 being identical strings). Early termination was enforced if the mean response matching score was above 80. The idea here was to prevent participants from copying and pasting the same response over and over again (or varying it only slightly)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.848, + 0.258, + 0.862 + ], + "angle": 0, + "content": "B.6 TAGS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.874, + 0.825, + 0.903 + ], + "angle": 0, + "content": "For the image, audio, and video datasets, we collected tag data, i.e., concise labels that describe the salient features of a stimulus. To do so, we developed a novel tag mining paradigm called STEP-Tag in" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.511, + 0.924 + ], + "angle": 0, + "content": "\\(^{6}\\)https://github.com/seatgeek/thefuzz" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.125, + 0.362, + 0.141 + ], + "angle": 0, + "content": "Mark the existing tags" + }, + { + "type": "title", + "bbox": [ + 0.192, + 0.149, + 0.294, + 0.174 + ], + "angle": 0, + "content": "picking" + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.178, + 0.295, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.31, + 0.151, + 0.388, + 0.174 + ], + "angle": 0, + "content": "apple" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.178, + 0.411, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.596, + 0.11, + 0.819, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.603, + 0.261, + 0.646, + 0.273 + ], + "angle": 0, + "content": "Play again" + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.221, + 0.34, + 0.236 + ], + "angle": 0, + "content": "Are any tags missing?" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.244, + 0.532, + 0.289 + ], + "angle": 0, + "content": "Type in words describing the activity in the video, that are missing above. You can either select tags from a dropdown list or create entirely new ones. Submit your response for a new tag by pressing the enter key. You can add more than one tag." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.308, + 0.232, + 0.319 + ], + "angle": 0, + "content": "peach" + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.308, + 0.314, + 0.318 + ], + "angle": 0, + "content": "Type more tags" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.337, + 0.538, + 0.347 + ], + "angle": 0, + "content": "Next" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.374, + 0.825, + 0.403 + ], + "angle": 0, + "content": "Figure 9: Screenshot of an example tag mining task for videos. The tag \"picking\" received 5 stars (very relevant), whereas the tag \"apple\" is flagged (marked as irrelevant)." + }, + { + "type": "table", + "bbox": [ + 0.336, + 0.416, + 0.664, + 0.504 + ], + "angle": 0, + "content": "
Dataset (# of stimuli)meanstdtotal
Vegetables (120)3.21.1385
Furniture (120)5.21.7627
Animals (120)8.22.7988
Audio-emotions (1000)9.13.59092
Video-activities (1000)8.52.98482
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.513, + 0.782, + 0.529 + ], + "angle": 0, + "content": "Table 2: Mean, standard deviation, and total number of tags collected for each dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.555, + 0.828, + 0.848 + ], + "angle": 0, + "content": "which each stimulus was treated as a separate \"chain\" (see Figure 2 in the paper). When the stimulus was presented for the first time, the participant was asked to provide at least one tag. For the following iterations, we sequenced participants so that each of them had to rate the tags provided by participants from the previous iterations within the same chain. The rating was either choosing between one (not very relevant) to five stars (very relevant), or marking the tag as completely irrelevant by using the flag icon (see Figure 9). Participants could optionally introduce new tags that will subsequently be presented to other participants assigned to the same chain. Participants could only provide tags that were not already present, and they had to be in lower-case letters. To discourage frequent use of long word combinations, a pop-up window appeared if participants used two or more white spaces (i.e., three or more words) to warn that long combinations should only be used when completely necessary. This process continued for at least 10 iterations, after which we checked at each consequent iteration whether the chain was \"full\". We considered a chain to be full if its latest iteration had at least 2 tags that were rated at least 3 times and had a mean rating of 3 stars. If a chain was not full after 20 iterations, we stopped collecting further iterations. Since each experimental batch lasted for a fixed duration of less than one day, in some cases we did not complete all chains, and a few chains had fewer iterations (3 for vegetables, 6 for animals and 2 for furniture, out of 120 chains each). Our experiment incentivized participants to provide new tags by paying them a performance bonus of 0.01 USD for every up-vote (i.e., not flagged) given by other participants. On the contrary, if two or more tags of the same participant were flagged by others, the participant was excluded (the participant received a warning after the first flag). We provide summary statistics on the number of collected tags in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.853, + 0.829, + 0.883 + ], + "angle": 0, + "content": "After accepting the consent form and passing the pre-screening tasks, participants received introductory instructions regarding the main experiment:" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.896, + 0.768, + 0.926 + ], + "angle": 0, + "content": "Images. Rate & Tag animals/furniture/vegetables! Thanks for participating in this game! In this game you will:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.104, + 0.57, + 0.118 + ], + "angle": 0, + "content": "- Watch images of animals/furniture/vegetables." + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.122, + 0.523, + 0.136 + ], + "angle": 0, + "content": "- Rate tags that other players have given." + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.14, + 0.534, + 0.154 + ], + "angle": 0, + "content": "- Add new tags that you think are missing." + }, + { + "type": "list", + "bbox": [ + 0.25, + 0.104, + 0.57, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.168, + 0.767, + 0.197 + ], + "angle": 0, + "content": "Audio. Rate & Tag emotions! Thanks for participating in this game! In this game you will:" + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.203, + 0.767, + 0.231 + ], + "angle": 0, + "content": "- Listen to a speech fragment and focus on the emotional content of the recording." + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.235, + 0.523, + 0.248 + ], + "angle": 0, + "content": "- Rate tags that other players have given." + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.252, + 0.534, + 0.266 + ], + "angle": 0, + "content": "- Add new tags that you think are missing." + }, + { + "type": "list", + "bbox": [ + 0.249, + 0.203, + 0.767, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.28, + 0.576, + 0.295 + ], + "angle": 0, + "content": "Video. Rate & Tag activities! In this game you will:" + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.301, + 0.61, + 0.316 + ], + "angle": 0, + "content": "- Watch a video and focus on the activities happening." + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.319, + 0.523, + 0.333 + ], + "angle": 0, + "content": "- Rate tags that other players have given." + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.336, + 0.534, + 0.35 + ], + "angle": 0, + "content": "- Add new tags that you think are missing." + }, + { + "type": "list", + "bbox": [ + 0.25, + 0.301, + 0.61, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.365, + 0.68, + 0.38 + ], + "angle": 0, + "content": "Participants then received further instructions regarding the rules of the game" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.394, + 0.767, + 0.506 + ], + "angle": 0, + "content": "Images. After watching the animal/furniture/vegetable you will see tags given by other players that describe the animal/furniture/vegetable. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person seeing this animal/furniture/vegetable, you may see no previous tags. You can also add your own tag that is relevant to describe the animal/furniture/vegetable. Your tag will then be rated by other players who are playing the game simultaneously." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.521, + 0.767, + 0.632 + ], + "angle": 0, + "content": "Audio. After listening to the recording, you will see tags given by other players that describe the emotions in the speech fragment. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person listening to this speech sample, you may see no previous tags. You can also add your own tag that is relevant to describe the emotions in the speech fragment. Your tag will then be rated by other players who are playing the game simultaneously." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.646, + 0.767, + 0.758 + ], + "angle": 0, + "content": "Video. After watching the video, you will see tags given by other players that describe the activities in the video. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person watching this video, you may see no previous tags. You can also add your own tag that is relevant to describe the activities in the video. Your tag will then be rated by other players who are playing the game simultaneously." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.772, + 0.825, + 0.787 + ], + "angle": 0, + "content": "Finally, participants received the following guidelines regarding the tag input and the bonus scheme:" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.801, + 0.767, + 0.844 + ], + "angle": 0, + "content": "Keep tags short. A word like \"green grass\" should rather be submitted as \"green\" and \"grass\", whereas a compound word such as \"red wine\" cannot be separated, since \"red wine\" means something different than just \"red\" and \"wine\"." + }, + { + "type": "title", + "bbox": [ + 0.231, + 0.858, + 0.321, + 0.872 + ], + "angle": 0, + "content": "Bonus rules." + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.879, + 0.765, + 0.906 + ], + "angle": 0, + "content": "- If the tag you provide gets rated as a relevant tag (i.e., not flagged) by other players" + }, + { + "type": "text", + "bbox": [ + 0.249, + 0.91, + 0.659, + 0.923 + ], + "angle": 0, + "content": "- If your tag is unique and have not been introduced by others" + }, + { + "type": "list", + "bbox": [ + 0.249, + 0.879, + 0.765, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.384, + 0.102, + 0.615, + 0.161 + ], + "angle": 0, + "content": "
ModalitySTEPCaptions
Audio230187
Video264291
" + }, + { + "type": "table_caption", + "bbox": [ + 0.245, + 0.17, + 0.752, + 0.186 + ], + "angle": 0, + "content": "Table 3: Median of overall participants' time spent per stimulus (in seconds)." + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.211, + 0.768, + 0.255 + ], + "angle": 0, + "content": "Note: Simply writing many and irrelevant tags is not a good idea because other players might flag your tag. Your experiment will terminate early if there are too many red flags!" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.269, + 0.768, + 0.313 + ], + "angle": 0, + "content": "Please try to use a variety of words to describe the animal / furniture / vegetable / emotion in the speech fragment / activities in the video, and use the entire star rating scale for your responses." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.327, + 0.403, + 0.342 + ], + "angle": 0, + "content": "B.6.1 VALIDATING STEP-TAG" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.352, + 0.828, + 0.562 + ], + "angle": 0, + "content": "We conducted a small, exploratory ablation study to validate STEP-Tag as a procedure for collecting diverse, accurate, and informative tags. First, we compared using multiple tags from the last iteration of STEP-Tag to using just a single randomly-selected highly-rated tag from the last iteration. We found that using a single tag greatly decreased correlation with human similarity (i.e., for the video dataset, the best-performing method on multiple tags had a correlation of \\( r = 0.74 \\) while the best-performing method on single labels had a correlation of \\( r = 0.35 \\)). Second, we compared tags from the first iteration of STEP-Tag (equivalent to collecting tags without an adaptive procedure) to tags from the last iteration. We found that using first iteration tags greatly decreased correlation with human similarity (i.e., for the video dataset, the 'Tags CNNB mean (no split)' method, the correlation from the last iteration was \\( r = 0.74 \\) and from the first iteration it was \\( r = 0.44 \\); for 'Tags overlap' it was \\( r = 0.56 \\) from the last iteration and \\( r = 0.38 \\) from the first iteration). Finally, we extracted the Kinetics-200 labels for each video to compare the tags from STEP-Tag against the kinds of labels typically collected for machine learning datasets. We found that using labels decreased the correlation with human similarity (i.e., the best-performing method on pipeline tags had a correlation of \\( r = 0.74 \\) while the best-performing method on dataset labels had a correlation of \\( r = 0.64 \\))." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.578, + 0.506, + 0.592 + ], + "angle": 0, + "content": "B.7 DURATION OF STEP-TAG AND CAPTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.827, + 0.677 + ], + "angle": 0, + "content": "To compare STEP-tag and captions, we computed the median of overall participants' time spent per stimulus (see Table 3). The times were only collected for the audio and video modality (captions for the image datasets were already collected by Marjieh et al. (2022)). We see that both methods consume roughly similar amounts of time, which is desirable as our analysis suggests that in some domains (e.g., video) tags yield the best results whereas in others (e.g., audio) captions do." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.695, + 0.403, + 0.71 + ], + "angle": 0, + "content": "C PREDICTION METHODS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.828, + 0.799 + ], + "angle": 0, + "content": "We used two main types of methods to predict human similarity judgments. The first class (\"DNN-based methods\", described in section C.1) make use of pre-trained embedding models. In the second class of models (\"Word Frequency Analysis methods\", described in the section C.2) simple feature extraction techniques are used instead of pre-trained deep learning models. Figure 1 depicts schematic overview of all prediction methods that we used." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.815, + 0.383, + 0.829 + ], + "angle": 0, + "content": "C.1 DNN-BASED METHODS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.827, + 0.927 + ], + "angle": 0, + "content": "The DNN-based methods use various embeddings and deep learning representations to predict human similarity judgments. These methods could be further split into three groups based on the kinds of input data they process, namely if they use a single sensory modality that is either image, audio or video (\"unimodal models\"; see subsection C.1.1), or use text that is either tag or captions (\"text embeddings\"; see subsection C.1.2), or use both (\"multimodal models\"). In addition, we also tested the performance of \"stacked\" representations, where the sensory and textual embedding of a select" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.149 + ], + "angle": 0, + "content": "number of models were concatenated into a single long embedding. Overall, the computation time of embedding methods took about two weeks on an x1.16xlarge Amazon Web Services instance with 64 vCPUs and 976 GiB of memory." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.167, + 0.48, + 0.182 + ], + "angle": 0, + "content": "C.1.1 UNIMODAL DNN-BASED METHODS" + }, + { + "type": "table_caption", + "bbox": [ + 0.18, + 0.213, + 0.819, + 0.228 + ], + "angle": 0, + "content": "Table 4: All 30 image baseline models occurring in the top 50 best models reported in Figure 3A." + }, + { + "type": "table", + "bbox": [ + 0.227, + 0.228, + 0.77, + 0.687 + ], + "angle": 0, + "content": "
Model nameAverage scoreSD scoreTop 1 accuracyNumber of parameters (M)
1Swin0.660.0681.5223.37
2ConvNeXT0.640.07N/A348.15
3NF-ResNet0.620.0480.6523.51
4NFNet l00.610.0882.7532.77
5ResNetV20.600.11N/A928.34
6NF-RegNet0.590.0579.299.26
7VGG160.580.1173.35134.27
8VGG190.580.1174.21139.58
9ViT0.580.1275.956.16
10ResMLP0.570.0783.59128.37
11Twins-SVT0.570.0681.6823.55
12Twins-PCPVT0.570.0481.0923.59
13VGG130.570.1171.59128.96
14CaiT0.570.0482.1917.18
15VGG110.570.1070.36128.77
16gMLP0.560.0679.6419.17
17PIT0.560.0378.1910.23
18DeiT0.560.0372.175.52
19ConViT0.560.0373.115.52
20TNT0.560.0381.5223.37
21CoaT0.550.0478.435.35
22gMixer0.550.0578.0424.34
23XCiT0.550.0482.5711.92
24IG ResNeXt0.530.1385.44826.36
25Visformer0.520.0282.1139.45
26RepVGG0.520.1180.2181.26
27CLIP image0.500.11N/A102.01
28JXNesT0.500.0781.4216.67
29ECAResNet0.470.1480.4528.11
30DenseNet0.470.1274.746.95
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.196, + 0.689, + 0.825, + 0.716 + ], + "angle": 0, + "content": "Note. Performance accuracy on ImageNet was based on Wightman (2019) and was not available for all models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.75, + 0.828, + 0.862 + ], + "angle": 0, + "content": "Image models We used 560 pre-trained models from the Pytorch Image Models (timm) repository (Wightman, 2019). We chose this repository as it contains an extensive and highly diverse set of pre-trained models in terms of architecture backbones, model sizes, and training sets. The repository includes models published from 2014 to 2022 that use various training sets (such as ImageNet1k, ImageNet21k, Instagram, etc.), training procedures objectives (e.g., pre-training, fine-tuning, self-supervision, weak supervision, etc.) and architectures (e.g., VGG, ResNet, Inception, Transformer, etc.). The repository also reports various evaluation metrics for each model (e.g., their ImageNet performance)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "For each model, we computed the embedding from the last layer (typically before the final softmax layer; see below and Figure 10 for a preliminary analysis for the effect of layer depth in audio models). We then computed the cosine similarity between pairs of embedding vectors to produce a similarity matrix. The entire list of the performance of all models is detailed in the OSF repository" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.101, + 0.81, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.368, + 0.825, + 0.398 + ], + "angle": 0, + "content": "Figure 10: Scores for individual layers of audio models scaled to the total number of layers. Models are colored by their meta architecture." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.423, + 0.827, + 0.482 + ], + "angle": 0, + "content": "associated with this paper7. Table 4 presents additional details for the top 42 image baseline models in Figure 3A including their average score (correlation to human judgments) across the three image datasets, the standard deviation (SD) of this score (across datasets, repeated runs and available model parameters in Wightman (2019)), their ImageNet accuracy, and their number of trainable parameters." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.486, + 0.828, + 0.737 + ], + "angle": 0, + "content": "Figure 4A shows the correlation to human similarity as a function of the number of parameters for all 569 models. In general, we found that models that have more parameters perform better (Figure 4A). Plotting all the embedding technique correlations against the number of training parameters of their respective models showed statistically significant positive correlation \\((r = 0.39, p < 0.001)\\). However, one possible explanation for this could be the improved performance of newer models, which typically have more parameters, on various computer vision tasks. To test this, we computed the performance (i.e., correlation with human similarity) of the various models as a function of their accuracy on ImageNet (Deng et al., 2009) - which was provided in Wightman (2019) for all models except for CLIP (whose implementation came from a different repository) as summarized in Figure 4B. We found a positive correlation between the two metrics \\((r = 0.26, p < 0.001)\\), though with some clear exceptions. For example, the vision transformer BEiT (Bao et al., 2021) and the convolutional architecture EfficientNet (Tan & Le, 2019) achieved high accuracy on ImageNet but performed poorly on human data. On the other hand, the vision transformer Swin (Liu et al., 2021) and the convolutional architecture ConvNext (Liu et al., 2022) both performed well on ImageNet and human similarity. This suggests that architecture and number of parameters are better predictors of similarity judgments than performance on ImageNet. Further analysis is required to determine what kind of architectural components actually contribute to more human-like performance (Langlois et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.752, + 0.828, + 0.852 + ], + "angle": 0, + "content": "Audio models We used all pre-trained wav2vec 2.0 (Baevski et al., 2020) and HuBERT (Hsu et al., 2021) models available in torchaudio (Yang et al., 2021). We also extracted embeddings from WavLM (Chen et al., 2021) and data2vec audio models (Baevski et al., 2022). Furthermore, we used additional wav2vec 2.0 and HuBERT models that were either specialized on emotion recognition or speaker identification (wen Yang et al., 2021; Wagner et al., 2022; Ravanelli et al., 2021). The performance of HuBERT, wav2vec 2.0, and WavLM models is shown in Figure 3B. Additional details about the models are displayed in Table 5." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.828, + 0.901 + ], + "angle": 0, + "content": "In addition, we explored the correlation between the audio models and human similarity data as a function of the layer in the model. Earlier literature has suggested that similarity to human representations may depend on the layer of the model (Kell et al., 2018; Yamins et al., 2014; Yamins," + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.766, + 0.924 + ], + "angle": 0, + "content": "\\(^{7}\\)https://osf.io/kzbr5/?view_only=3dea58e008ce41c290ef0f374bddbf444" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.315, + 0.113, + 0.68, + 0.127 + ], + "angle": 0, + "content": "Table 5: All audio baseline models used in the analysis." + }, + { + "type": "table", + "bbox": [ + 0.244, + 0.127, + 0.755, + 0.671 + ], + "angle": 0, + "content": "
Model nameEmotion correlationNumber of parameters (M)
1wav2vec 2.0 lv60k (100h)0.49317
2wav2vec 2.0 lv60k (960h)0.49317
3wav2vec 2.0 lv60k0.51317
4wav2vec 2.0 lv60k (10m)0.51317
5HuBERT xlarge ASR0.451000
6HuBERT xlarge0.461000
7HuBERT large ASR0.46300
8wav2vec 2.0 large XLSR530.47317
9HuBERT large0.46300
10wav2vec 2.0 (Audeering, emotion)0.49317
11HuBERT base0.4190
12WavLM large0.46316.62
13HuBERT base (superb, emotion)0.4290
14HuBERT base (superb, speaker)0.4290
15WavLM base+0.4194.70
16wav2vec 2.0 base (960h)0.3895
17WavLM base0.3994.70
18wav2vec 2.0 base0.3495
19wav2vec 2.0 base (10m)0.3495
20wav2vec 2.0 base (superb, emotion)0.3495
21wav2vec 2.0 base (superb, speaker)0.3495
22wav2vec 2.0 base (100h)0.3295
23HuBERT large (superb, emotion)0.29300
24HuBERT large (superb, speaker)0.29300
25wav2vec 2.0 large (100h)0.32317
26wav2vec 2.0 large (superb, emotion)0.31317
27wav2vec 2.0 large (superb, speaker)0.31317
28wav2vec 2.0 large (960h)0.31317
29wav2vec 2.0 large (10m)0.31317
30data2vec audio large (960h)0.31313.28
31data2vec audio base (100h)0.23313.28
32data2vec audio large (100h)0.23313.28
33data2vec audio large (10m)0.21313.28
34wav2vec 2.0 (SpeechBrain, emotion)0.1195
35data2vec audio base (960h)0.1693.16
36data2vec audio base (10m)0.1593.16
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.697, + 0.825, + 0.794 + ], + "angle": 0, + "content": "2020). We expected that the layers closer to the input of the model (where the representation is more low-level) to be less predictive. In general, we found that this was the case (Figure 10). In some variants of wav2vec, however, intermediate representations performed better, possibly due to the misalignment of the training task of wav2vec with the emotion task. This analysis confirms the choice we made in the paper to mostly use the last two layers of the models. Preliminary analysis of the image and video models also explored different layers, but the results were similar to those we presented in audio, and are therefore not reported here." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.813, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Video models We extracted embeddings from the 'Slow' (a 3D ResNet; see Feichtenhofer et al. (2019)), Slowfast (a 2-path model with one path capturing semantics and the other capturing fine details; see Feichtenhofer et al. (2019)), and X3d (a model that initially starts as a simple 2D image classifier but is expanded in several axes; see Feichtenhofer (2020)) architectures implemented in pytorchvideo (Fan et al., 2021). All video models were pre-trained on the Kinetics-400 dataset (Kay et al., 2017). The performance of the models is displayed in Figure 3C. Numeric correlation values are detailed in Table 6 along with model accuracy (Top1 and Top5) on Kinetics-400, and the number of parameters in each model. The accuracies and parameter counts are listed as reported in" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Fan et al. (2021). As with previous modalities, the number of parameters appears to be positively correlated with correlation to human similarity." + }, + { + "type": "table_caption", + "bbox": [ + 0.315, + 0.155, + 0.683, + 0.17 + ], + "angle": 0, + "content": "Table 6: All video baseline models used in the analysis." + }, + { + "type": "table", + "bbox": [ + 0.218, + 0.17, + 0.782, + 0.297 + ], + "angle": 0, + "content": "
Model nameCorrelationKinetics-400 Top1 AccKinetics-400 Top5 AccNumber of parameters (M)
1Slowfast r500.6576.9492.6934.57
2Slowfast r1010.6477.9093.2762.83
3Slow r500.6174.5891.6332.45
4X3d M0.5375.9492.723.79
5X3d S0.4973.3391.273.79
6X3d XS0.4869.1288.633.79
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.317, + 0.434, + 0.331 + ], + "angle": 0, + "content": "C.1.2 TEXT EMBEDDING METHODS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.341, + 0.828, + 0.494 + ], + "angle": 0, + "content": "Caption text embedding. Since there are multiple captions per stimulus, an aggregation procedure had to be applied to produce a single embedding vector for each stimulus. In our main analysis, for each stimulus, we extracted the embedding for each associated caption and averaged these embeddings together before computing cosine similarity between the mean embeddings. We also tried an alternative approach of concatenating the captions together into a single paragraph, which we then passed through the LLMs to compute a single embedding per stimulus. We found that this did not consistently improve performance and in many cases even decreased it, though we note that we did not experiment with different permutations of the concatenated captions, nor did we extensively study other ways to combine them together. Future work could explore other techniques for pre-processing captions and aggregating representations from multiple captions in ways that would improve correlation with human similarity judgments." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.501, + 0.829, + 0.682 + ], + "angle": 0, + "content": "Tag text embedding. We experimented with several algorithms for computing similarity between sets (or multi-sets) of tags. The algorithms described in this section all involve using ConceptNet NumberBatch (CNNB) (Speer et al., 2017) as the embedding backbone for turning discrete tags into continuous vector representations. For each stimulus, we took the tags remaining in the final iteration, and tested whether they were found in the dictionary for our embedding model. If a tag was not found and if it contained no spaces, we tried to correct the spelling before trying to look it up in the dictionary again. If a tag contained spaces, we split it into individual words, correct their spelling, and averaged together the embedded representations of those words that were found in the dictionary. Tags that were not found even after spelling correction and splitting were excluded from the set and did not contribute to the final representation. For the methods marked ‘(no split)’ we did not split multi-word tags, instead we just excluded multi-word tags that were not found in the embedding model dictionary. In the following, we describe the different techniques used to generate predictions based on tag embeddings." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.688, + 0.826, + 0.759 + ], + "angle": 0, + "content": "Tags CNNB overlap. For each pair of stimuli, we counted the number of 'almost identical' tag embeddings, defined as every respective element of the two embeddings being less than a certain threshold apart (in our case, this threshold was 0.1). We then set similarity for that pair of stimuli to be this count, i.e., the number of 'almost identical' tags, normalized by the total number of tags across the respective two sets." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.765, + 0.829, + 0.926 + ], + "angle": 0, + "content": "Tags CNNB quantized. This method involves quantizing tags using cosine similarity to find the number of unique tags. For each pair of stimuli, we counted the number of tags assigned to the first stimulus that had cosine similarity greater than a certain threshold (in our case, this threshold was 0.7) to at least one tag of the second stimulus (call this value \\( N_A \\)) and vice-versa (\\( N_B \\)). The minimum of these two values is the number of unique, shared tags between the two sets (\\( \\min(N_A, N_B) \\)). The total number of unique tags across the two sets is then the total number of tags in each set (\\( T_A + T_B \\)) minus the maximum number of shared tags (\\( \\max(N_A, N_B) \\)). We compute similarity as the ratio of the number of unique, shared tags to the total number of unique tags, \\( S_{AB} = \\frac{\\min(N_A, N_B)}{T_A + T_B - \\max(N_A, N_B)} \\). For example, suppose the two sets of tags are \\( A: \\{a, b, c, g\\} \\) and \\( B: \\{a, b, d, e\\} \\), so \\( T_A = T_B = 4 \\), and that \\( a, c \\) have cosine similarity of 0.8. The number of tags from set A found in set B is \\( N_A = 3 \\), and those from B found in A is \\( N_B = 2 \\). The number of unique, shared tags is \\( \\min(N_A, N_B) = 2 \\) (since" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.826, + 0.135 + ], + "angle": 0, + "content": "\\(\\{a, b, c\\}\\) can be represented by \\(\\{a, b\\}\\), and the total number of unique tags is \\(4 + 4 - 3 = 5\\) (since \\(\\{a, b, c, g, a, b, d, e\\}\\) can be represented by \\(\\{a, b, d, e, g\\}\\)). The assigned similarity is then \\(S_{AB} = \\frac{2}{5}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.825, + 0.182 + ], + "angle": 0, + "content": "Tags CNNB mean. The set of tag embeddings for each stimulus were averaged together to form a single embedding assigned to the respective stimulus. We then computed cosine similarity on the embeddings of each pair of stimuli." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.188, + 0.825, + 0.218 + ], + "angle": 0, + "content": "Tags CNNB mean (no split). Same as above, but without splitting multi-word tags (i.e., ones that contain spaces) during the embedding process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.825, + 0.266 + ], + "angle": 0, + "content": "All spelling corrections in the algorithms listed above were performed using the Python package `pyspellchecker^8`, taking the top corrected recommendation returned by the spell checker in each case." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.826, + 0.343 + ], + "angle": 0, + "content": "Tags to caption Roberta (SimCSE). Additionally, for the images datasets, we experimented with converting sets of tags into captions and then using those captions with our best-performing LLM ('sup-simcse-roberta-large') the same way we do with user-generated captions. To convert a set of tags into a caption, we joined the set of tags with commas and pretended them with the phrase \"This is an image of\"." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.359, + 0.454, + 0.373 + ], + "angle": 0, + "content": "C.1.3 OTHER DNN-BASED METHODS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.383, + 0.825, + 0.412 + ], + "angle": 0, + "content": "For the image datasets, we also considered several other methods that made use of DNNs but do not fit into the categories described above." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.428, + 0.825, + 0.511 + ], + "angle": 0, + "content": "GPT3 prompting We experimented with prompting GPT3 (Brown et al., 2020), a large pre-trained language model, to directly output similarity judgments as a text-completion problem rather than having to access model embeddings as we did above. We used a few-shot prompting approach where in each prompt we included three context examples of pairs of tag sets and their associated similarity rating. We then provided the pair of tag sets for the two images that we wanted to get a similarity rating for but left the rating empty for the model to fill in." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.518, + 0.716, + 0.534 + ], + "angle": 0, + "content": "Here is an example prompt with the GPT3 response bolded and in square brackets:" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.541, + 0.835, + 0.865 + ], + "angle": 0, + "content": "
People described pairs of images using words.
How similar are the two images in each pair on a scale of 0-1 where 0 is completely dissimilar and 1 is completely similar?
Here are the descriptions of image one: tortoise, slow, protected, shell, turtle, scaly, old, cold-blooded
Here are the descriptions of image two: monkey, ape, mammal, black and white, hairy, agile, primate, smart, tree-dwelling
Rating: 0.05
Here are the descriptions of image one: rhinoceros, horn, gray, standing, heavy body, endangered, wild, africa, african
Here are the descriptions of image two: tiger, open mouth, stripes, feline, predator
Rating: 0.27
Here are the descriptions of image one: goat, eye, leg
Here are the descriptions of image two: mammal, wide-nosed, mandrill, primate, baboon, smart
Rating: 0.19
Here are the descriptions of image one: black, primate, mammal, hairy, chimpanzee, africa, african, great ape, smart, omnivore
Here are the descriptions of image two: zebra, striped, two-toned, wild, staring, mammal, equine, herd animal, africa
Rating: [0.14]
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.871, + 0.825, + 0.9 + ], + "angle": 0, + "content": "We repeated this four times for each pair of images in each image dataset with a different set of context examples during each repetition and averaged together the GPT responses to get a final" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.625, + 0.924 + ], + "angle": 0, + "content": "8https://pyspellchecker.readthedocs.io/en/latest/" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.175 + ], + "angle": 0, + "content": "similarity prediction for each pair. In total, creating the context examples required having access to human similarity judgments over only 12 pairs of images. We found that this approach yielded surprisingly good predictions, with an average correlation of \\( r = 0.62 \\) across the image datasets. We believe this approach merits future investigation to determine whether prompt engineering can further increase the performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.192, + 0.827, + 0.304 + ], + "angle": 0, + "content": "Image captioning models We experimented with using pre-trained image captioning models to generate captions for our images and then using those captions with our best-performing LLM ('sup-simcse-roberta-large') the same way we do with user-generated captions. We used three pre-trained image captioning models from HuggingFace ('flamingo-mini', 'vilt-b32-finetuned-vqa', and 'vit-gpt2-image-captioning') to generate text descriptions for our images. However, the performance was quite poor with an average of \\( r = 0.29 \\) across the three models. As a result, \\( O(N) \\) language-based methods cannot easily be reduced to \\( O(1) \\) even when domain-relevant pre-trained caption models are available." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.322, + 0.504, + 0.337 + ], + "angle": 0, + "content": "C.2 WORD FREQUENCY ANALYSIS METHODS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.349, + 0.825, + 0.406 + ], + "angle": 0, + "content": "In this work, we also conducted an additional evaluation of prediction models beyond embedding-based techniques (described in the previous section). Specifically, we compared the predictions of embedding-based models, which utilize deep learning representations, with those of traditional methods of text mining." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.412, + 0.779, + 0.428 + ], + "angle": 0, + "content": "Before the word frequency analysis, we performed the following initial pre-processing steps" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.439, + 0.825, + 0.468 + ], + "angle": 0, + "content": "- For caption data, we concatenated all the captions describing the same stimulus into a single long \"document.\"" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.473, + 0.827, + 0.587 + ], + "angle": 0, + "content": "- For tag data, we wanted to prioritize tags that appeared earlier in the tag-mining chains and were rated higher. To that end, we gathered all tags from all iterations and duplicated tags from a given iteration based on the ratings they received. For example, if the tag \"tomato\" received three stars, then we would add the repeated tokens \"tomato, tomato, tomato\" to the aggregated list (\"document\"). In a given iteration, flagged tags are removed, but if they are rated later, then they are included. The total number of repetitions per token is equal to the sum of all the stars they received in all iterations. As a result, each token is repeated multiple times, which we take into consideration in consequent analysis." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.439, + 0.827, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.597, + 0.825, + 0.64 + ], + "angle": 0, + "content": "For the next steps, we used the Matlab text analytics toolbox \\(^{9}\\). Unless otherwise specified, we used default parameters for all functions. To generate similarity matrices, we applied the following methods:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.647, + 0.825, + 0.731 + ], + "angle": 0, + "content": "Co-occurrence method. In this approach, we simply counted the number of repeated pairs of words in documents \\(i\\) and \\(j\\) and normalized by the total number of pairs. Formally, we use \\(w_{i}\\) to denote the word list of a document \\(i\\). Let \\(w_{i,k}\\) be the \\(k\\)-th word in the \\(w_{i}\\) list of words, and let \\(|w_{i}|\\) denote the length of the list. We denote by \\(\\delta(c,d)\\) the indicator function that returns 1 if and only if the word \\(c\\) is identical to the word \\(d\\), and 0 otherwise. We computed the co-occurrence score \\(S(w_{i},w_{j})\\) according to the following formula:" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.731, + 0.613, + 0.766 + ], + "angle": 0, + "content": "\\[\nS (w _ {i}, w _ {j}) = \\frac {\\sum_ {k} \\sum_ {l} \\delta (w _ {i , k} , w _ {j , l})}{| w _ {i} | | w _ {j} |}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.77, + 0.614, + 0.786 + ], + "angle": 0, + "content": "We suggest using this method only with tags and not with captions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.825, + 0.835 + ], + "angle": 0, + "content": "Co-occurrence-rep. This method was applied only to tags. We used an identical procedure to the Co-occurrence method, except that we did not separate the words within a tag as separate tokens and instead treated the entire tag (that may include multiple words) as a single token." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.826, + 0.898 + ], + "angle": 0, + "content": "Rouge score. In this approach, similarity was estimated by computing the rouge score of the word lists associated with each pair of documents. The Rouge score was computed using rougeEvaluationScore (Rouge, 2004). We suggest using this method only with tags and not with captions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.644, + 0.925 + ], + "angle": 0, + "content": "\\(^{9}\\)https://mathworks.com/products/text-analytics.html" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.146 + ], + "angle": 0, + "content": "The following methods make use of tokenized data and a pre-processing procedure that we found effective. Pre-processing was applied to both tag and caption data and tokenization was performed as follows:" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.16, + 0.816, + 0.175 + ], + "angle": 0, + "content": "- We separate all text into single words by applying the tokenizedDocument function." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.179, + 0.82, + 0.193 + ], + "angle": 0, + "content": "- We added part of speech information using the addPartOfSpeechDetails function." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.198, + 0.698, + 0.212 + ], + "angle": 0, + "content": "- We performed Lemmatization using the normalizeWords function." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.216, + 0.769, + 0.231 + ], + "angle": 0, + "content": "- We erased punctuation from the token using the erasePunctuation function." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.235, + 0.665, + 0.249 + ], + "angle": 0, + "content": "- We removed stopwords using the removeStopWords function." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.254, + 0.734, + 0.268 + ], + "angle": 0, + "content": "- We removed words with less than two characters or more than 15 characters." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.273, + 0.825, + 0.3 + ], + "angle": 0, + "content": "- We created a bag of words representation of each tokenized document using the bagOfWords function." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.306, + 0.825, + 0.334 + ], + "angle": 0, + "content": "- We also removed words that were not present in more than two documents using the InfrequentWords function." + }, + { + "type": "list", + "bbox": [ + 0.216, + 0.16, + 0.825, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.347, + 0.825, + 0.376 + ], + "angle": 0, + "content": "With the results of these pre-processing steps, we then computed similarity matrices based on the following methods:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.382, + 0.825, + 0.439 + ], + "angle": 0, + "content": "bm25S. We used bm25+ to compute similarity between documents (Barrios et al., 2016) using Matlab's bm25Similarity function. This function represents TF-IDF-like retrieval functions used in document retrieval. We used a variant that has a normalization function that properly handles documents with a long list of words." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.825, + 0.473 + ], + "angle": 0, + "content": "tfidf-cosine. We computed pairwise cosine similarities between document pairs using the TF-IDF matrix derived from their word counts and Matlab's cosineSimilarity function." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.491, + 0.383, + 0.505 + ], + "angle": 0, + "content": "C.3 SUPERVISED METHODS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.517, + 0.827, + 0.712 + ], + "angle": 0, + "content": "Several previous studies investigated improving correlations by applying and fine-tuning simple linear transformations to embedding vectors \\( z^T \\mathbf{W}z \\) where \\( \\mathbf{W} = \\mathrm{diag}(w_1, \\ldots, w_d) \\) via a cross-validated ridge regression procedure that could be fit to ground-truth similarity judgments. The parameters of the diagonal reweighting matrix \\( \\mathbf{W} \\) are fitted to a training subset of stimuli and used to predict similarity of pairs in a held-out validation set Peterson et al. (2018); Marjieh et al. (2022). To be consistent and make results comparable, here we report the results of performing this 6-fold cross-validated linear transformation (LT-CCV) on the model embeddings and datasets considered in this work. The analysis was carried out using the RidgeCV package from the scikit-learn Python library Pedregosa et al. (2011). Results with both normalized ('LT CCV (norm') and unnormalized ('LT CCV') regressors are shown in Figure 11; see RidgeCV documentation for details on normalization \\( ^{10} \\). We see that the linear transformation does not consistently improve performance (and can even decrease it) when applied to many of the modality-based or stacked embeddings, but it does frequently improve performance when applied to caption embeddings. Due to their instability and risk of overfitting, we do not use these methods in our main analysis." + }, + { + "type": "page_footnote", + "bbox": [ + 0.172, + 0.897, + 0.825, + 0.924 + ], + "angle": 0, + "content": "\\(^{10}\\)https://scikit-learn.org/stable/modules/generated/sklearn.linear_model. RidgeCV.html" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.25, + 0.825, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.763, + 0.816, + 0.779 + ], + "angle": 0, + "content": "Figure 11: Effect of fine-tuning model embeddings using a small subset of similarity judgments." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ] +] \ No newline at end of file diff --git a/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_origin.pdf b/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8dcb2d92175a4ef3b5a9923a12dbce77b7d19ae1 --- /dev/null +++ b/2023/Words are all you need_ Language as an approximation for human similarity judgments/bd5757f5-b64d-41a1-849d-8e09ed031d8e_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03c3ab6cda1d5a1b82e108b6de6c988751a2650ac2abc2bfe4f4c57d048e43d4 +size 5472901 diff --git a/2023/Words are all you need_ Language as an approximation for human similarity judgments/full.md b/2023/Words are all you need_ Language as an approximation for human similarity judgments/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e012853baeb4b170c382ee972f0fdee30d25a1d6 --- /dev/null +++ b/2023/Words are all you need_ Language as an approximation for human similarity judgments/full.md @@ -0,0 +1,560 @@ +# WORDS ARE ALL YOU NEED? LANGUAGE AS AN APPROXIMATION FOR HUMAN SIMILARITY JUDGMENTS + +Raja Marjieh $^{1,*}$ , Pol van Rijn $^{2,*}$ , Ilia Sucholutsky $^{3,*}$ , Theodore R. Sumers $^{3}$ , Harin Lee $^{2,4}$ + +Thomas L. Griffiths $^{1,3,\ast \ast}$ , Nori Jacoby $^{2,\ast \ast}$ + +\*\*\* Equal contribution. + +$^{1}$ Department of Psychology, Princeton University + +2Max Planck Institute for Empirical Aesthetics + +$^{3}$ Department of Computer Science, Princeton University + +4Max Planck Institute for Cognitive and Brain Sciences + +# ABSTRACT + +Human similarity judgments are a powerful supervision signal for machine learning applications based on techniques such as contrastive learning, information retrieval, and model alignment, but classical methods for collecting human similarity judgments are too expensive to be used at scale. Recent methods propose using pre-trained deep neural networks (DNNs) to approximate human similarity, but pre-trained DNNs may not be available for certain domains (e.g., medical images, low-resource languages) and their performance in approximating human similarity has not been extensively tested. We conducted an evaluation of 611 pre-trained models across three domains – images, audio, video – and found that there is a large gap in performance between human similarity judgments and pre-trained DNNs. To address this gap, we propose a new class of similarity approximation methods based on language. To collect the language data required by these new methods, we also developed and validated a novel adaptive tag collection pipeline. We find that our proposed language-based methods are significantly cheaper, in the number of human judgments, than classical methods, but still improve performance over the DNN-based methods. Finally, we also develop 'stacked' methods that combine language embeddings with DNN embeddings, and find that these consistently provide the best approximations for human similarity across all three of our modalities. Based on the results of this comprehensive study, we provide a concise guide for researchers interested in collecting or approximating human similarity data. To accompany this guide, we also release all of the similarity and language data, a total of 206,339 human judgments, that we collected in our experiments, along with a detailed breakdown of all modeling results. + +# 1 INTRODUCTION + +Similarity judgments have long been used as a tool for studying human representations, both in cognitive science (Shepard, 1980; 1987; Tversky, 1977; Tenenbaum & Griffiths, 2001), as well as in neuroscience, as exemplified by the rich literature on representational similarity between humans and machines (Schrimpf et al., 2020; Kell et al., 2018; Linsley et al., 2017; Langlois et al., 2021; Yamins et al., 2014) whereby similarity patterns of brain activity are compared to those arising from a model of interest. Recent research in machine learning suggests that incorporating human similarity judgments in model training can play an important role in a variety of paradigms such as human alignment (Esling et al., 2018), contrastive learning (Khosla et al., 2020), information retrieval (Parekh et al., 2020), and natural language processing (Gao et al., 2021). + +However, building a large dataset based on human similarity judgments is very expensive and often infeasible since the number of judgments required is quadratic in the number of stimuli – for $N$ + +![](images/aea578caba5065f0dadc0f7b3cf13e1e7a10d356d8d7a8df27f4bf60b75ca097.jpg) +Figure 1: Comparing human similarity scores gathered through crowdsourcing with ML pipelines. We used data from three modalities: images, audio, and video. For each modality, we extracted deep model embeddings and gathered human captions and tags. Word- and language-embedding models, as well as simple word-frequency analysis, were used to predict human similarity judgments. + +stimuli, $O(N^2)$ judgments are required1. For example, to fully quantify the similarity of all possible dyadic pairs of 50,000 images, one needs to collect on the order of 1.25 billion ( $\sim \frac{50000^2}{2}$ ) human similarity judgments. Thus, human judgments are the main bottleneck for machine-learning methods based on similarity. For this reason, the majority of available human similarity datasets are small by machine learning standards (up to a few thousand objects). + +Advancements in deep learning have brought an alternative approach that does not require extensive collection of human judgments. Specifically, the idea is to use the similarity between hidden representations in pre-trained deep neural networks (DNNs) to approximate human similarity (Peterson et al., 2018; Jha et al., 2020; Marjieh et al., 2022; Hebart et al., 2020; Roads & Love, 2021). Some of these methods also suggest fine-tuning representations on a small training set of human similarity judgments (Peterson et al., 2018). This, in turn, results in a significant reduction in the number of required human judgments down to $O(1)$ (given the pre-trained model). While such methods are promising, they still require access to strong pre-trained models which may not necessarily be available in all domains (e.g., medical datasets, niche modalities, low-resource languages, etc.). In addition, representations obtained from neural networks may not always overlap with human similarity representations, given that the models can be trained for different objectives (i.e., their embeddings may be poor approximations for human similarity). + +A comprehensive comparison to assess which models perform well in predicting human similarity across different modalities is currently lacking in the literature. To this end, one of our main contributions in this paper is providing a first-of-its-kind large-scale evaluation of over 600 publicly-available pre-trained models as approximations for human similarity judgments on three modalities + +/images, audio, video). Our experiments reveal that there is a large gap in performance between the $O(1)$ DNN methods and the classical $O(N^2)$ similarity method we used as the baseline. + +To address this gap, we propose a new class of $O(N)$ methods to efficiently and accurately approximate human similarity based on language. This is motivated by a long line of research in cognitive science suggesting that language is an extremely efficient way for humans to communicate information about their sensory environment (Murphy, 2004; Zaslavsky et al., 2018; Piantadosi et al., 2011; Jaeger & Levy, 2006). This in turn suggests that we can use textual descriptors to approximate similarity judgments across different modalities. Moreover, such textual descriptors can be collected at the cost of $O(N)$ human judgments (as people describe individual stimuli rather than pairs), which renders this method scalable. + +We consider two approaches for approximating similarity from text data. One approach is to use pre-trained Large Language Models (LLM) to produce vector embeddings of the textual descriptions, and then use a measure of distance between these embeddings to approximate human similarity. This method is more domain-agnostic than the $O(1)$ deep learning methods as it only requires access to a pre-trained LLM regardless of the modality of the original dataset. However, there are some cases where the domain may be out-of-distribution for all available LLMs (e.g., niche technical fields), or where no LLMs are available at all (e.g., low-resource languages). In such cases, the other approach is to use Word-Frequency Analysis (WFA) methods from classical text processing literature (Barrios et al., 2016; Rouge, 2004; Beel et al., 2016), + +As for the textual descriptions themselves, we consider two types, namely, free-text captions and concise word tags. Collecting captions for machine learning datasets is a well-established practice and can easily be done through crowdsourcing platforms. On the other hand, there is no consensus on best practices for collecting tags without a pre-existing taxonomy (i.e., open-set labels). To address this, we propose a novel adaptive tag mining pipeline called Sequential Transmission Evaluation Pipeline (STEP-Tag) which we describe in Section 2.2.4. As we will show, STEP-Tag allows to collect meaningful, diverse, and high-quality word tags for target stimuli in an online crowdsourcing environment. + +Finally, we propose one additional set of hybrid approximation methods that combine sensory information with textual descriptions while still requiring $O(N)$ human judgments. For this approach, we propose to stack the embeddings derived from both domain-specific models (e.g., output from the last layer of an image classifier) with the LLM embedding of the respective textual description. When multi-modal models are available, we can similarly leverage the joint embedding of both the stimulus and its textual description. + +We evaluate all of these novel and existing methods across multiple modalities. We test the relative contributions of linguistic and sensory information in approximating human similarity and show that our proposed language-based methods provide both accurate and efficient approximations across modalities, even though they do not require a trained modality-specific deep learning model. Crucially, with this large-scale evaluation, we are able for the first time to provide researchers with a comprehensive guide of the tools to use for approximating human similarity at scale. + +To summarize, our contributions are as follows: + +- We conduct a comprehensive comparison of human similarity approximation methods. + +- We propose a novel modality-agnostic method for approximating similarity based on text and show that it is both efficient and competitive in terms of performance. + +- We propose STEP-Tag, a novel adaptive tagging pipeline, and show that it is effective for crowdsourcing high-quality and diverse sets of word tags. + +- We synthesize our findings into a detailed guide for researchers interested in approximating human similarity judgments at scale. + +- We collect and release ground-truth and approximated versions of a large behavioral dataset $(N = 1,492)$ across three different domains (images, audio, video), including two text-approximated similarity matrices for 1,000 audio clips and 1,000 video clips. + +# 2 DATASETS + +# 2.1 STIMULI + +Throughout this work, we considered five stimulus datasets across three different modalities - images, audio, and video - consisting of a total of 31,320 dyadic pairs labeled with similarity. + +Images For images, we considered three datasets of common objects introduced in Peterson et al. (2018) – namely, animals, furniture, and vegetables – each consisting of 7,140 dyadic pairs (all unique pairs over 120 images). + +Audio For audio, we used the RAVDESS corpus (Livingstone & Russo (2018), released under a CC Attribution license), which consists of semantically neutral sentences spoken by 24 US American actors to convey a specific target emotion. To construct a 1,000-recording subset, we selected 3 emotions per speaker per sentence. We randomly omitted 104 emotional stimuli and included all 96 neutral recordings (the dataset only contains 2 neutral recordings per speaker per sentence). To construct the subset composed of 4,950 dyadic pairs (all unique pairs over 100 recordings), we randomly selected $\sim 13$ recordings per emotion from the 1,000. + +Video Finally, for the video dataset, we considered the Mini-Kinetics-200 dataset (Xie et al., 2018) (released under a CC BY 4.0 International License), which contains a large set of short video clips of human activities from 200 activity classes. Specifically, we focused on the validation split, which contains 5,000 videos in total. To construct our 1,000-video dataset, we sampled 5 random videos from each of the 200 activity categories. The 100-video subset (4,950 dyadic pairs) used in the similarity judgment collection experiment was then generated by sampling 100 random stimuli from the 1,000 list. + +# 2.2 HUMAN JUDGMENT COLLECTION + +# 2.2.1 PARTICIPANTS + +We collected data from $N = 1,492$ US participants for the new behavioral experiments reported in this paper. Participants were recruited anonymously from Amazon Mechanical Turk and provided informed consent under an approved protocol by either the Institutional Review Board (IRB) at Princeton University (application 10859) or the Max Planck Ethics Council (application 2021_42) before taking part. Participants earned 9-12 USD per hour, and each session lasted less than 30 minutes. To help recruit reliable participants, we required that participants are at least 18 years of age, reside in the United States and have participated in more than 5,000 previous tasks with a $99\%$ approval rate (see Supplementary Section B for additional details about the behavioral experiments). All experiments were implemented with the Dallinger and PsyNet frameworks designed for automation of large-scale behavioral research (Harrison et al., 2020). In Supplementary Section A.1, we include the data that was collected, instructions used, and code for replication of the behavioral experiments. We also provide the code for computational experiments and analysis. + +# 2.2.2 SIMILARITY JUDGMENTS + +We collected two batches of pairwise similarity judgements, one for each of the audio and video subsets, and were provided access to the similarity matrices for the three image datasets by the authors of Peterson et al. (2018). For each pair we collected $\sim 5$ similarity judgments to average out inter-rater noise. + +# 2.2.3 CAPTIONS + +We collected free-text captions for the video and audio datasets. Captions for the image datasets were already collected by Marjieh et al. (2022) and used here with permission. For each stimulus, we collected $\sim 10$ captions. + +![](images/8a98b1de6f85e5fff095e57db5e2dca85fafcce1dced6e5cc2ee52e5bc880c26.jpg) +Figure 2: STEP-Tag, our novel tag-mining paradigm. We ran an adaptive process in which results of one iteration are used as inputs for subsequent iterations. In every iteration, participants can add a new tag, rate the relevance of existing tags or flag tags that are inappropriate. + +# 2.2.4 TAGS + +We propose a novel adaptive tag pipeline for simultaneous data collection and evaluation called Sequential Transmission Evaluation Pipeline (STEP) and apply it in the context of semantic tag mining (STEP-Tag). Our paradigm, STEP-Tag, allows researchers to efficiently collect high-quality word tags for a given stimulus (Figure 2) and extends existing crowdsourcing text-mining techniques (Von Ahn & Dabbish, 2008; 2004; Krishna et al., 2017; Law et al., 2007) by integrating ideas from transmission chain experiments (Kirby et al., 2008; Griffiths & Kalish, 2005). In STEP-Tag, participants adaptively create tags for a set of target stimuli and simultaneously evaluate the annotations made by previous participants. In each trial, participants are first given a stimulus (e.g., an image or audio fragment) and rate the relevance of tags that were created by other participants (on a 5-interval Likert scale) or flag a tag if they find it inappropriate (with tags removed if more than two people flag the tag). Next, participants are also given the opportunity to add new tags if they feel a relevant tag that describes the stimulus is missing. The results of the annotation procedure of one participant then propagate to the next participant (additional details about the paradigm, and screenshots are provided in Supplementary Section B.6). Ultimately, as the process unfolds over many iterations, meaningful tags are extracted and validated by multiple participants, enabling efficient open-label collection of a desired dataset. + +To validate STEP-Tag, we compared it against several baselines: (i) randomly selecting only a single high-rated tag from the last iteration of STEP-Tag per stimulus, (ii) using tags only from the first iteration of STEP-Tag (equivalent to non-adaptive tag collection), and (iii) using class labels instead of tags. We found that tags produced after multiple iterations of STEP-Tag outperformed all three baselines in terms of quality (i.e., downstream performance for similarity reconstruction) and diversity (see Supplementary Section B.6.1). + +# 3 MODELS + +# 3.1 DNN-BASED METHODS + +We tested a wide range of pre-trained ML models that do not rely on text (overall we tested 611 models) and compared their internal representations to human similarity judgments and text-based predictions (Figure 1A). We compiled our model pool by leveraging pre-trained model repositories (or zoos) available online. In particular, for images we use 569 pre-trained models from the pytorch-image-models package timm (Wightman, 2019), for audio we use 36 pre-trained models available in the torchaudio package (Yang et al., 2021) (see also Supplementary Figure 10 for an analysis of layer depth), and for video we use 6 pre-trained models available from the PyTorchVideo package (Fan et al., 2021). Because of the recent success of multimodal training, we additionally included 9 multimodal models based on CLIP from OpenAI's public implementation. + +tion (https://github.com/openai/CLIP) for the image datasets, and compared them to "stacked" representations (i.e., concatenating embeddings from separate image and text models). + +# 3.2 LLM-BASED METHODS + +Tags To embed tags we used ConceptNet Numberbatch (CNNB) which is a word-embedding model trained on the ConceptNet knowledge graph that leverages other popular word embedding models such as word2vec and GloVe (Speer et al., 2017). We experimented with several algorithms for computing similarity between sets (or multi-sets) of tags and share the details in Supplementary Section C.1.2. As a control, for images we also tried converting tags into a caption of the form "This is an image of tag1, tag2, ..." and embedding them using a language model (see Supplementary Section C.1.2). + +Captions To embed captions, we used four pre-trained LLMs from HuggingFace (Wolf et al., 2020): 'bert-base-uncased', 'deberta-xlarge-mnli', 'sup-simcse-bert-base-uncased', and 'sup-simcse-roberta-large'. SimCSE is a pre-training procedure that uses semantic entailment in a contrastive learning objective (Gao et al., 2021). According to BERTScore (Zhang et al., 2020), the latter three models are ranked in the top 40 models in terms of correlation with human evaluations on certain tasks, with 'deberta-xlarge-mnli' ranked first. However, in our experiments, we found that embedding similarity computed from 'sup-simcse-roberta-large' has the highest correlation with human similarity judgments out of the four models. For SimCSE-based models, we used representations from the (final) embedding layer (where the SimCSE contrastive objective is actually applied). For the other two models, we computed embeddings from every layer, but restricted the main analysis to embeddings from the penultimate layers. This was done in order to be consistent with our procedure for DNNs. + +Other methods For the image datasets, we also considered several other methods that made use of LLMs but do not fit into the categories described above. One approach was using prompts with GPT3 (Brown et al., 2020) in a text-completion setup to directly predict similarity without extracting embeddings (see Supplementary Section C.1.3 for details). We also tried using pre-trained image captioning models to generate captions automatically (i.e. this would reduce $O(N)$ language-based methods to $O(1)$ ) but this resulted in poor performance (see Supplementary Section C.1.3 for details). + +# 3.3 STACKING METHODS + +We produce stacked representations for each modality by concatenating the single best-performing (see Figure 3) LLM's embeddings with the embeddings from the five best-performing DNNs into a single set of long embeddings. Since the two sets of embeddings come from different spaces, we add a single tunable hyperparameter for rescaling the LLM embeddings. This hyperparameter can be set manually, but we use a small number of ground-truth similarity judgments (we use dyadic pairs for just 20 stimuli) to optimize it automatically. + +# 3.4 WORD FREQUENCY ANALYSIS (WFA) METHODS + +The aim of the WFA methods is to enable similarity approximation from language using traditional embedding-free techniques. Such techniques are particularly useful for low-resource languages or cross-cultural comparisons (Cowen & Keltner, 2017; Barrett, 2020), for which pre-trained models are lacking, as they work solely on the basis of the text itself. The WFA methods we considered included measuring co-occurrence, Rouge score, bm25s, and tfidf. We provide details on each of these procedures in Supplementary Section C.2. + +# 3.5 PERFORMANCE METRIC + +We quantified performance by computing the Pearson correlation $r$ between approximated similarity scores and the ground-truth human similarity scores for all the unique dyadic pairs in a dataset. We compared the performance of the different prediction methods to the inter-rater reliability (IRR) of participants, which serves as an approximate upper-bound on performance. Following Peterson et al. (2018), we computed IRR for each human similarity matrix using the split-half correlation method with a Spearman-Brown correction (Brown, 1910). + +# 4 RESULTS + +![](images/d77376fc14926badb2e03efdcc2ea3c112f33ec27be4ae665e9fa9d8c9574858.jpg) + +![](images/a8a8c9b51c3fe39d89cde181140c094e59c9683ace33d3ababdbfe2f4b585119.jpg) + +![](images/ae6b182d09f93dee1b908aa568dee1ab2661348ddda7a0163109fd4f0966c432.jpg) + +![](images/8ca213ebefb0e9cbe901e5366eca268396e36df51d8e0eeab47203e127837709.jpg) +Figure 3: Correlation to human similarity. A: Top 50 models averaged over the 3 image datasets. B: Audio dataset. C: Video dataset. Each DNN baseline bar averages over multiple variants of the same architecture; the dots indicate average correlation of individual variants of the architecture. D: Average for each method type for each modality. The error bars are standard deviations. + +Figure 3 summarizes the performance of the various techniques across the three modalities. Note that the image modality results in Figure 3A are averaged across the three image datasets and only show the top 50 methods for this modality due to space constraints. Figure 3D shows the mean performance of the methods of each type for each modality. When viewing these results, a clear hierarchy emerges. While no approximation methods can perfectly match the ground-truth pairwise similarity, (see the gap between the methods and IRR), stacked ones get close and are consistently more aligned with + +human similarity than other methods across all three modalities. Text-based methods come next in this hierarchy, followed by DNN-based ones. We also considered supervised methods that reweight DNN-based embeddings based on a small set of human similarity judgments, but we found that the performance was unstable (see Supplementary Section C.3 for details). + +The pre-eminence of stacked results suggests that LLMs and DNNs capture at least some different sources of variance in human similarity judgments. This is reinforced by our surprising finding that stacked representations from CLIP, a state-of-the-art jointly pre-trained multi-modal model, do not outperform stacked representations from independently trained models. We hypothesize that this happens because information is lost from both modalities when optimizing for a joint embedding. However, we note that the modest size of the performance gap between stacked and LLMs/DNNs, suggests that there is also significant overlap between aspects of human similarity captured by language and perception. + +To investigate the effect of architecture and downstream task (e.g., classification) performance on alignment of DNNs with human similarity, for the image modality we compared similarity approximation performance against the number of model parameters on a log scale (Figure 4A) and ImageNet classification performance (Deng et al., 2009) (Figure 4B). Overall, we found a positive correlation between similarity approximation performance and the number of model parameters $(r = 0.39, p < 0.001)$ and a smaller but still significant positive correlation with performance on ImageNet $(r = 0.26, p < 0.001)$ . There were some notable exceptions with particularly high ImageNet performance but low similarity performance, such as the image transformer BEiT (Bao et al., 2021). + +Finally, we leverage both DNN-based methods and our proposed language-based methods to approximate similarity matrices that would otherwise require an unaffordable number of human similarity judgments to collect all dyadic pairs. Specifically, we approximate the two similarity matrices corresponding to all 1,000 audio clips and 1,000 video clips in our datasets using every method listed for each of those modalities in Figure 3. We provide visualizations of the resulting matrices at https://words-are-all-you-need.s3.amazon.com/index.html. We note that to exhaustively collect all dyadic pairs with five judgments per pair would normally require roughly 2.5 million human judgments for each of these matrices. + +![](images/2b2ca0e9510a55b27fde0673f2b4db373aa6bc6472c1b777de22294faaabac0f.jpg) + +![](images/d9c0483bcc12a6f34ad4ef34477a05e518d4f065a015bfd4d283743cf710c670.jpg) +Figure 4: Correlation to human similarity judgments as a function of A: number of model parameters; and B: ImageNet accuracy. + +![](images/0c182b240e2e982ca4bdfc7e32e3332c53a4dc813f606ab89cce465dc3cccef2.jpg) +Figure 5: Guide to collecting and estimating human similarity judgments at scale. + +# 5 DISCUSSION AND CONCLUSION + +In this work, we compared novel and existing methods for approximating human similarity judgments. The main contributions can be summarized as follows: 1) we provide a simple and accessible approach for approximating $O(N^2)$ human similarity judgments using $O(N)$ annotations, 2) we propose a new adaptive pipeline STEP-tag for tag mining, 3) we evaluate our approach against $600+$ domain-specific state-of-the-art DNNs, and 4) we publicly release all data comprising 206,339 human judgments. + +Based on these, we are now able to provide researchers with a best-practices guide to collecting similarity datasets. Our guide is based on two bottlenecks that researchers may face: one is the limit on the number of judgments that can be collected (e.g., due to cost) and the second is the availability of pre-trained models (i.e., either DNNs or LLMs). Our results make it clear that deep learning can provide good approximations for human similarity. In fact, when both pre-trained LLMs and DNNs are available, stacking their representations is consistently the best approach. However, even when neither type of pre-trained models are available, we suggest that classical word-frequency analysis methods still provide researchers with an efficient and competitive method for approximating human similarity. Our guide, comprehensively covering these and other cases, is laid out in Figure 5. + +One limitation of this work is that while similarity proxies generated from our pipeline can support ML datasets, they are also at risk of baking in high-level human biases that can lead to adverse societal implications, such as amplifying race and gender gaps. Researchers should devote utmost care to what they choose to incorporate in their training objective. Another limitation of our work is the fact that we were restricted to English text data and US participants. However, we believe that our approach and proposed methods (especially STEP-tag and the word-frequency methods) pave the way for the study of cross-cultural variation of human semantic representations by providing efficient tools for crowdsourcing high-quality semantic descriptors across languages. This is particularly relevant for low-resource languages, where our tag-mining techniques can work even with the absence of pre-trained ML models (Thompson et al., 2020; Barrett, 2020). We are currently expanding our work to include more languages and diverse cultures. Taken together, our results showcase how we can leverage language to make machine representations more human-like. Moreover, it highlights the importance of combining machine learning and cognitive science approaches for mutually advancing both fields. In particular, we believe that the methodologies adopted in this work have the potential to greatly advance basic research on naturalistic representations in cognitive science. + +# ACKNOWLEDGMENTS + +This work was supported by a grant from the John Templeton Foundation to TLG, an NDSEG fellowship to TRS, and an NSERC fellowship (567554-2022) to IS. + +# REFERENCES + +Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33:12449-12460, 2020. +Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, and Michael Auli. data2vec: A general framework for self-supervised learning in speech, vision and language, 2022. +Hangbo Bao, Li Dong, and Furu Wei. BEiT: BERT pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021. +H Clark Barrett. Towards a cognitive science of the human: cross-cultural approaches and their urgency. Trends in Cognitive Sciences, 24(8):620-638, 2020. +Federico Barrios, Federico López, Luis Argerich, and Rosa Wachenchauzer. Variations of the similarity function of textrank for automated summarization. arXiv preprint arXiv:1602.03606, 2016. +Joeran Beel, Bela Gipp, Stefan Langer, and Corinna Breitinger. Paper recommender systems: a literature survey. International Journal on Digital Libraries, 17(4):305-338, 2016. +Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. +William Brown. Some experimental results in the correlation of mental abilities 1. British Journal of Psychology, 1904-1920, 3(3):296-322, 1910. +Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, et al. WavLM: Large-scale self-supervised pre-training for full stack speech processing. arXiv preprint arXiv:2110.13900, 2021. +Alan S Cowen and Dacher Keltner. Self-report captures 27 distinct categories of emotion bridged by continuous gradients. Proceedings of the National Academy of Sciences, 114(38):E7900-E7909, 2017. +Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255. IEEE, 2009. +Philippe Esling, Adrien Bitton, et al. Generative timbre spaces: regularizing variational auto-encoders with perceptual metrics. arXiv preprint arXiv:1805.08501, 2018. +Haoqi Fan, Tullie Murrell, Heng Wang, Kalyan Vasudev Alwala, Yanghao Li, Yilei Li, Bo Xiong, Nikhila Ravi, Meng Li, Haichuan Yang, Jitendra Malik, Ross Girshick, Matt Feiszli, Aaron Adcock, Wan-Yen Lo, and Christoph Feichtenhofer. PyTorchVideo: A deep learning library for video understanding. In Proceedings of the 29th ACM International Conference on Multimedia, 2021. https://pytorchvideo.org/. +Christoph Feichtenhofer. X3d: Expanding architectures for efficient video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 203-213, 2020. +Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. SlowFast networks for video recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6202-6211, 2019. + +Tianyu Gao, Xingcheng Yao, and Danqi Chen. SimCSE: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021. +Thomas L Griffiths and Michael L Kalish. A bayesian view of language evolution by iterated learning. In Proceedings of the Annual Meeting of the Cognitive Science Society, volume 27, 2005. +Peter Harrison, Raja Marjieh, Federico Adolfi, Pol van Rijn, Manuel Anglada-Tort, Ofer Tchernichovski, Pauline Larrouy-Maestri, and Nori Jacoby. Gibbs sampling with people. Advances in Neural Information Processing Systems, 33:10659-10671, 2020. +Martin N Hebart, Charles Y Zheng, Francisco Pereira, and Chris I Baker. Revealing the multidimensional mental representations of natural objects underlying human similarity judgements. Nature Human Behaviour, 4(11):1173-1185, 2020. +Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. HuBERT: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021. +T Jaeger and Roger Levy. Speakers optimize information density through syntactic reduction. Advances in Neural Information Processing Systems, 19, 2006. +Kevin G Jamieson and Robert D Nowak. Low-dimensional embedding using adaptively selected ordinal data. In 2011 49th Annual Allerton Conference on Communication, Control, and Computing (Allerton), pp. 1077-1084. IEEE, 2011. +Aditi Jha, Joshua Peterson, and Thomas L Griffiths. Extracting low-dimensional psychological representations from convolutional neural networks. arXiv preprint arXiv:2005.14363, 2020. +Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The Kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. +Alexander JE Kell, Daniel LK Yamins, Erica N Shook, Sam V Norman-Haignere, and Josh H McDermott. A task-optimized neural network replicates human auditory behavior, predicts brain responses, and reveals a cortical processing hierarchy. Neuron, 98(3):630-644, 2018. +Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron Sarna, Yonglong Tian, Phillip Isola, Aaron Maschinot, Ce Liu, and Dilip Krishnan. Supervised contrastive learning. Advances in Neural Information Processing Systems, 33:18661-18673, 2020. +Simon Kirby, Hannah Cornish, and Kenny Smith. Cumulative cultural evolution in the laboratory: An experimental approach to the origins of structure in human language. Proceedings of the National Academy of Sciences, 105(31):10681-10686, 2008. +Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision, 123(1):32-73, 2017. +Thomas Langlois, Haicheng Zhao, Erin Grant, Ishita Dasgupta, Tom Griffiths, and Nori Jacoby. Passive attention in artificial neural networks predicts human visual selectivity. Advances in Neural Information Processing Systems, 34, 2021. +Edith LM Law, Luis Von Ahn, Roger B Dannenberg, and Mike Crawford. TagATune: A game for music and sound annotation. In ISMIR, volume 3, pp. 2, 2007. +Kristin Lemhöfer and Mirjam Broersma. Introducing lexdale: A quick and valid lexical test for advanced learners of english. Behavior research methods, 44(2):325-343, 2012. +Drew Linsley, Sven Eberhardt, Tarun Sharma, Pankaj Gupta, and Thomas Serre. What are the visual features underlying human versus machine vision? In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 2706-2714, 2017. + +Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021. +Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. arXiv preprint arXiv:2201.03545, 2022. +Steven R Livingstone and Frank A Russo. The Ryerson audio-visual database of emotional speech and song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in north american english. PloS one, 13(5):e0196391, 2018. +Raja Marjieh, Ilia Sucholutsky, Theodore R Sumers, Nori Jacoby, and Thomas L Griffiths. Predicting human similarity judgments using large language models. arXiv preprint arXiv:2202.04728, 2022. +Alice E Milne, Roberta Bianco, Katarina C Poole, Sijia Zhao, Andrew J Oxenham, Alexander J Billig, and Maria Chait. An online headphone screening test based on dichotic pitch. Behavior Research Methods, 53(4):1551-1562, 2021. +Gregory Murphy. The big book of concepts. MIT press, 2004. +Zarana Parekh, Jason Baldridge, Daniel Cer, Austin Waters, and Yinfei Yang. Crisscrossed captions: Extended intramodal and intermodal semantic similarity judgments for MS-COCO. arXiv preprint arXiv:2004.15020, 2020. +F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825–2830, 2011. +Joshua C Peterson, Joshua T Abbott, and Thomas L Griffiths. Evaluating (and improving) the correspondence between deep neural networks and human representations. Cognitive Science, 42 (8):2648-2669, 2018. +Steven T Piantadosi, Harry Tily, and Edward Gibson. Word lengths are optimized for efficient communication. Proceedings of the National Academy of Sciences, 108(9):3526-3529, 2011. +Mirco Ravanelli, Titouan Parcollet, Peter Plantinga, Aku Rouhe, Samuele Cornell, Loren Lugosch, Cem Subakan, Nauman Dawalatabad, Abdelwahab Heba, Jianyuan Zhong, Ju-Chieh Chou, Sung-Lin Yeh, Szu-Wei Fu, Chien-Feng Liao, Elena Rastorgueva, François Grondin, William Aris, Hwidong Na, Yan Gao, Renato De Mori, and Yoshua Bengio. SpeechBrain: A general-purpose speech toolkit, 2021. arXiv:2106.04624. +Brett D Roads and Bradley C Love. Enriching ImageNet with human similarity judgments and psychological embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3547-3557, 2021. +Lin CY Rouge. A package for automatic evaluation of summaries. In Proceedings of Workshop on Text Summarization of ACL, Spain, 2004. +Martin Schrimpf, Jonas Kubilius, Ha Hong, Najib J Majaj, Rishi Rajalingham, Elias B Issa, Kohitij Kar, Pouya Bashivan, Jonathan Prescott-Roy, Franziska Geiger, et al. Brain-Score: Which artificial neural network for object recognition is most brain-like? BioRxiv, pp. 407007, 2020. +Roger N Shepard. Multidimensional scaling, tree-fitting, and clustering. Science, 210(4468):390-398, 1980. +Roger N Shepard. Toward a universal law of generalization for psychological science. Science, 237 (4820):1317-1323, 1987. +Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI Conference on Artificial Intelligence, 2017. +Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pp. 6105-6114. PMLR, 2019. + +Joshua B Tenenbaum and Thomas L Griffiths. Generalization, similarity, and bayesian inference. Behavioral and brain sciences, 24(4):629-640, 2001. +Bill Thompson, Seán G Roberts, and Gary Lupyan. Cultural influences on word meanings revealed through large-scale semantic alignment. Nature Human Behaviour, 4(10):1029-1038, 2020. +Amos Tversky. Features of similarity. Psychological review, 84(4):327, 1977. +Luis Von Ahn and Laura Dabbish. Labeling images with a computer game. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems, pp. 319-326, 2004. +Luis Von Ahn and Laura Dabbish. Designing games with a purpose. Communications of the ACM, 51(8):58-67, 2008. +Johannes Wagner, Andreas Triantafyllopoulos, Hagen Wierstorf, Maximilian Schmitt, Felix Burkhardt, Florian Eyben, and Björn W. Schuller. Dawn of the transformer era in speech emotion recognition: closing the valence gap, 2022. +Shu wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y. Lin, Andy T. Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, Tzu-Hsien Huang, Wei-Cheng Tseng, Kotik Lee, Da-Rong Liu, Zili Huang, Shuyan Dong, Shang-Wen Li, Shinji Watanabe, Abdelrahman Mohamed, and Hung yi Lee. SUPERB: Speech Processing Universal PERformance Benchmark. In Proc. Interspeech 2021, pp. 1194-1198, 2021. doi: 10.21437/Interspeech.2021-1775. +Ross Wightman. PyTorch image models. https://github.com/rwrightman/pytorch-image-models, 2019. +Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 38-45. Association for Computational Linguistics, 2020. +Kevin JP Woods, Max H Siegel, James Traer, and Josh H McDermott. Headphone screening to facilitate web-based auditory experiments. Attention, Perception, & Psychophysics, 79(7): 2064-2072, 2017. +Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 305–321, 2018. +Daniel Yamins. An optimization-based approach to understanding sensory systems. The Cognitive Neurosciences, 4(V1):381, 2020. +Daniel LK Yamins, Ha Hong, Charles F Cadieu, Ethan A Solomon, Darren Seibert, and James J DiCarlo. Performance-optimized hierarchical models predict neural responses in higher visual cortex. Proceedings of the National Academy of Sciences, 111(23):8619-8624, 2014. +Yao-Yuan Yang, Moto Hira, Zhaoheng Ni, Anjali Chourdia, Artyom Astafurov, Caroline Chen, Ching-Feng Yeh, Christian Puhrsch, David Pollack, Dmitriy Genzel, Donny Greenberg, Edward Z. Yang, Jason Lian, Jay Mahadeokar, Jeff Hwang, Ji Chen, Peter Goldsborough, Prabhat Roy, Sean Narethiran, Shinji Watanabe, Soumith Chintala, Vincent Quenneville-Bélair, and Yangyang Shi. Torchaudio: Building blocks for audio and speech processing. arXiv preprint arXiv:2110.15018, 2021. +Noga Zaslavsky, Charles Kemp, Terry Regier, and Naftali Tishby. Efficient compression in color naming and its evolution. Proceedings of the National Academy of Sciences, 115(31):7937-7942, 2018. +Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. Bertscore: Evaluating text generation with bert. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=SkeHuCVFDr. + +# SUPPLEMENTARY MATERIALS + +# A STIMULI AND DATA + +# A.1 CODE AND DATA AVAILABILITY + +A link is provided to the public, containing all the data collected for this project during the review stage. It includes the new human behavioral data, the computational experiments with machine learning models, and all the necessary analyses scripts for producing the results. Additionally, the repository includes the Dallinger/PsyNet source codes for reproducing the behavioral experiments. Finally, we present an interactive visualization for exploring the similarity between stimuli as experienced by humans and different methods reported in the paper. + +# B BEHAVIORAL PARADIGMS + +# B.1 PARTICIPANTS + +The exact number of participants for each of the 9 new behavioral experiments is reported in Table 1. + +Table 1: Behavioral experiment summary table. + +
ModalityParadigmRespectTotal stimuliTrials per participantSectionNPre-screening
ImagesTagsAnimals120602.2.456LX
ImagesTagsFurniture120602.2.458LX
ImagesTagsVegetables120602.2.457LX
AudioSimilarityEmotions100852.2.2252HT
AudioCaptionsEmotions1,000502.2.3151HT, LX
AudioTagsEmotions1,000502.2.4217HT, LX
VideoSimilarityActivities100852.2.2284HT
VideoCaptionsActivities1,000502.2.3196HT, LX
VideoTagsActivities1,000502.2.4221HT, LX
+ +Note. 'N' denotes the number of participants included in the analysis; 'LX' denotes the LexTALE English proficiency pre-screening task; 'HT' denotes the headphone test. + +# B.2 IMPLEMENTATION + +All behavioral experiments were implemented using the Dallinger4 and PsyNet (Harrison et al., 2020) frameworks. Dallinger is a modern tool for experiment hosting and deployment which automates the process of participant recruitment and compensation by integrating cloud-based services such as Heroku5 with online crowd-sourcing platforms such as AMT. PsyNet is a novel experiment design framework that builds on Dallinger and allows for flexible specification of experiment timelines as well as providing support for a wide array of tasks across different modalities (visual, auditory and audio-visual). Participants interact with the experiment through their web-browser, which in turn communicates with a backend Python server responsible for the experiment logic. + +# B.3 PRE-SCREENING + +A common technique for filtering out participants that are likely to deliver low-quality responses, as well as automated scripts (bots), is to implement pre-screening tasks prior to the main part of + +$^{2}$ Code and data: https://osf.io/kzbr5/?view_only=3dea58e008ce41c290ef0f374bddbf444 +3Interactive plots: https://words-are-all-you-need.s3.amazon.com/index.html + +4https://dallinger.readthedocs.io/ +5https://www.heroku.com/ + +each experiment. Failing the pre-screening tasks results in early termination of the experiment. Nevertheless, participants are still compensated for their time regardless of whether they fail or succeed on a pre-screener to ensure fair compensation. The role of pre-screeners in our studies was to realize two main criteria for data quality, namely, a) to be able to collect high-quality text descriptors, and b) to ensure that participants are able to inspect the target stimuli properly (in particular the audio component in prosody and videos). To do this, we implemented two pre-screening tasks, an English proficiency test and a standardized headphone test (used only for audio and video experiments). Table 1 provides details on which pre-screeners were used in each of the behavioral experiments. + +# alberation + +Does this word exist? + +yes + +no + +Figure 6: Example trial from the LexTALE pre-screening task (Lemhöfer & Broersma, 2012). + +English proficiency test. To test participants' English proficiency, we used LexTALE, a lexical decision task developed in Lemhöfer & Broersma (2012). In each trial, participants were briefly presented (1 second) with either a real English word or a made up word that does not exist. Participants were instructed to guess whether the word was real or not. A total of 12 trials (half of them being real words) were presented, and 8 of them needed to be correct for the participant to pass. The presented words were: hasty, fray, stoutly, moonlit, scornful, unkempt, sensible, kilp, plaintively, crumper, plaudate, alberation. An example trial is shown in Figure 6. + +Which sound was softest (quietest) -- 1, 2, or 3? + +1 +2 +3 + +Figure 7: Example trial from the headphone pre-screening test (Woods et al., 2017). + +Headphone test. We used the headphone test developed by Wood et al. (Woods et al., 2017), which is used as a standard pre-screener for high-quality auditory psychophysics data-collection procedures (Milne et al., 2021). The test is designed to ensure that the participants are wearing headphones and are able to perceive subtle differences in volume. The task consists of a forced choice task, in which three consecutive tones are played, and the participant has to identify which of them is the quietest. Crucially, these tones are constructed to exhibit a phase cancellation effect when not using headphones, and therefore making it difficult for non-headphone users to identify the quietest tone. Participants had to answer 4 out of 6 trials correctly to pass this test. An example trial is shown in Figure 7. + +![](images/57d6ed78b93d1f35c49af269482f05fbc8e7333a5e8542f8fb9d134c75450ae2.jpg) +How similar are the activities in following two videos? (2 / 85) +If it is difficult to choose between the options, don't worry, and just give what you intuitively think is the right answer. +Figure 8: Screenshot from the similarity judgment task over video pairs. + +# B.4 SIMILARITY JUDGMENTS + +In the present work, we collected similarity judgments across audio and video datasets. Each dataset comprised of 4,950 unique pairs corresponding to the number of unordered subsets that contain two distinct objects (i.e., excluding self-similarity), within a set of 100 stimuli. We did not collect similarity judgments over the three datasets of images, as these were provided in Peterson et al. (2018) (and used here with permission). The experiments proceeded as follows: upon completion of the consent form and the pre-screening tasks, participants received instructions regarding the main experiment: + +Audio. In this experiment we are studying how people perceive emotions. In each round you will be presented with two different recordings and your task will be to simply judge how similar are the emotions of the speakers. + +Video. In this experiment we are studying how people perceive activities. In each round you will be presented with two different videos and your task will be to simply judge how similar are the activities in them. + +The instructions then continued as follows: + +You will have seven response options, ranging from 0 ('Completely Dissimilar') to 6 ('Completely Similar'). Choose the one you think is most appropriate. Note: no prior expertise is required to complete this task, just choose what you intuitively think is the right answer. + +The quality of your responses will be automatically monitored, and you will receive a bonus at the end of the experiment in proportion to your quality score. The best way to achieve a high score is to concentrate and give each round your best attempt. + +The experiment will begin now. You will take up to 85 rounds where you have to answer this question. Remember to pay careful attention in order to get the best bonus! + +As described in the instructions, in each trial, participants rated the similarity between a pair of sounds (how similar are the emotions of the two speakers?) or videos (how similar are the activities in the following two videos?) on a scale ranging from 0 (completely dissimilar) to 6 (completely similar) (Figure 8). Overall, participants completed 85 trials on a random subset of the possible pairs. To further motivate participants to provide good responses, we gave them an additional performance bonus for providing consistent data. Among the 85 trials, 5 trials were repeated for consistency checking. The responses were converted into a performance score by computing the Spearman correlation between the original and repeat ratings. Perfect scores resulted in a 10 cent bonus. + +# B.5 CAPTIONS + +We collected free-text captions for the video and audio datasets. Captions for the image datasets were previously collected in Marjieh et al. (2022) and used here with permission. After completing the consent form and pre-screening tests, participants received the following instructions: + +Audio. In this experiment we are studying how people describe emotions. You will be presented with different recordings of speakers and your task will be to describe their emotions. In doing so, please keep in mind the following instructions + +- Describe all the important aspects of the recording. + +Video. In this experiment we are studying how people describe activities in videos. You will be presented with different videos of activities and your task will be to describe their content. In doing so, please keep in mind the following instructions + +- Describe all the important activities in the video. + +As well as the following guidelines adapted from Marjieh et al. (2022): + +- Do not start the sentences with "There is" or "There are". +- Do not describe unimportant details. +- You are not allowed to copy and paste descriptions. +- Descriptions should contain at least 5 words. +- Descriptions should contain at least 4 unique words. + +Note: No prior expertise is required to complete this task, just describe what you intuitively think is important as accurately as possible. + +The quality of your captions will be monitored automatically and providing low quality and repetitive responses could result in early termination of the experiment and hence a lower bonus. + +You will describe up to 50 recordings. + +These guidelines were enforced to ensure that participants deliver sufficiently informative captions that are not repetitive. In each trial of the main experiment, participants described a single audio (please describe the emotions of the speaker) or video stimulus (please describe the activity in the video). Overall, participants described up to 50 randomly presented stimuli. To filter out bad participants that tend to deliver repeated responses, in each trial (excluding the first 4 trials) we computed the mean edit distance between their current response and all previous responses that they previously provided using the partial_ratio function in the fuzzz6 Python package for fuzzy string matching. This function returns for a pair of input strings a matching score between 0 and 100 (100 being identical strings). Early termination was enforced if the mean response matching score was above 80. The idea here was to prevent participants from copying and pasting the same response over and over again (or varying it only slightly). + +# B.6 TAGS + +For the image, audio, and video datasets, we collected tag data, i.e., concise labels that describe the salient features of a stimulus. To do so, we developed a novel tag mining paradigm called STEP-Tag in + +# Mark the existing tags + +# picking + +![](images/442e688eade67c51016a7c0f2e60198b4abd7f0a403184413a6ae1f5a5feeaaf.jpg) +Figure 9: Screenshot of an example tag mining task for videos. The tag "picking" received 5 stars (very relevant), whereas the tag "apple" is flagged (marked as irrelevant). + +# apple + +![](images/305054b44560047950dbbc8dd653c442dfab18e29527dfb6f15bf0605a4ca4b5.jpg) + +![](images/63c64730495dff6b765e2e2b2cd0fd66867d64fdc3ded9a61bc60dc6d25aa0be.jpg) +Play again + +# Are any tags missing? + +Type in words describing the activity in the video, that are missing above. You can either select tags from a dropdown list or create entirely new ones. Submit your response for a new tag by pressing the enter key. You can add more than one tag. + +peach + +Type more tags + +Next + +
Dataset (# of stimuli)meanstdtotal
Vegetables (120)3.21.1385
Furniture (120)5.21.7627
Animals (120)8.22.7988
Audio-emotions (1000)9.13.59092
Video-activities (1000)8.52.98482
+ +Table 2: Mean, standard deviation, and total number of tags collected for each dataset. + +which each stimulus was treated as a separate "chain" (see Figure 2 in the paper). When the stimulus was presented for the first time, the participant was asked to provide at least one tag. For the following iterations, we sequenced participants so that each of them had to rate the tags provided by participants from the previous iterations within the same chain. The rating was either choosing between one (not very relevant) to five stars (very relevant), or marking the tag as completely irrelevant by using the flag icon (see Figure 9). Participants could optionally introduce new tags that will subsequently be presented to other participants assigned to the same chain. Participants could only provide tags that were not already present, and they had to be in lower-case letters. To discourage frequent use of long word combinations, a pop-up window appeared if participants used two or more white spaces (i.e., three or more words) to warn that long combinations should only be used when completely necessary. This process continued for at least 10 iterations, after which we checked at each consequent iteration whether the chain was "full". We considered a chain to be full if its latest iteration had at least 2 tags that were rated at least 3 times and had a mean rating of 3 stars. If a chain was not full after 20 iterations, we stopped collecting further iterations. Since each experimental batch lasted for a fixed duration of less than one day, in some cases we did not complete all chains, and a few chains had fewer iterations (3 for vegetables, 6 for animals and 2 for furniture, out of 120 chains each). Our experiment incentivized participants to provide new tags by paying them a performance bonus of 0.01 USD for every up-vote (i.e., not flagged) given by other participants. On the contrary, if two or more tags of the same participant were flagged by others, the participant was excluded (the participant received a warning after the first flag). We provide summary statistics on the number of collected tags in Table 2. + +After accepting the consent form and passing the pre-screening tasks, participants received introductory instructions regarding the main experiment: + +Images. Rate & Tag animals/furniture/vegetables! Thanks for participating in this game! In this game you will: + +- Watch images of animals/furniture/vegetables. +- Rate tags that other players have given. +- Add new tags that you think are missing. + +Audio. Rate & Tag emotions! Thanks for participating in this game! In this game you will: + +- Listen to a speech fragment and focus on the emotional content of the recording. +- Rate tags that other players have given. +- Add new tags that you think are missing. + +Video. Rate & Tag activities! In this game you will: + +- Watch a video and focus on the activities happening. +- Rate tags that other players have given. +- Add new tags that you think are missing. + +Participants then received further instructions regarding the rules of the game + +Images. After watching the animal/furniture/vegetable you will see tags given by other players that describe the animal/furniture/vegetable. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person seeing this animal/furniture/vegetable, you may see no previous tags. You can also add your own tag that is relevant to describe the animal/furniture/vegetable. Your tag will then be rated by other players who are playing the game simultaneously. + +Audio. After listening to the recording, you will see tags given by other players that describe the emotions in the speech fragment. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person listening to this speech sample, you may see no previous tags. You can also add your own tag that is relevant to describe the emotions in the speech fragment. Your tag will then be rated by other players who are playing the game simultaneously. + +Video. After watching the video, you will see tags given by other players that describe the activities in the video. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person watching this video, you may see no previous tags. You can also add your own tag that is relevant to describe the activities in the video. Your tag will then be rated by other players who are playing the game simultaneously. + +Finally, participants received the following guidelines regarding the tag input and the bonus scheme: + +Keep tags short. A word like "green grass" should rather be submitted as "green" and "grass", whereas a compound word such as "red wine" cannot be separated, since "red wine" means something different than just "red" and "wine". + +# Bonus rules. + +- If the tag you provide gets rated as a relevant tag (i.e., not flagged) by other players +- If your tag is unique and have not been introduced by others + +
ModalitySTEPCaptions
Audio230187
Video264291
+ +Table 3: Median of overall participants' time spent per stimulus (in seconds). + +Note: Simply writing many and irrelevant tags is not a good idea because other players might flag your tag. Your experiment will terminate early if there are too many red flags! + +Please try to use a variety of words to describe the animal / furniture / vegetable / emotion in the speech fragment / activities in the video, and use the entire star rating scale for your responses. + +# B.6.1 VALIDATING STEP-TAG + +We conducted a small, exploratory ablation study to validate STEP-Tag as a procedure for collecting diverse, accurate, and informative tags. First, we compared using multiple tags from the last iteration of STEP-Tag to using just a single randomly-selected highly-rated tag from the last iteration. We found that using a single tag greatly decreased correlation with human similarity (i.e., for the video dataset, the best-performing method on multiple tags had a correlation of $r = 0.74$ while the best-performing method on single labels had a correlation of $r = 0.35$ ). Second, we compared tags from the first iteration of STEP-Tag (equivalent to collecting tags without an adaptive procedure) to tags from the last iteration. We found that using first iteration tags greatly decreased correlation with human similarity (i.e., for the video dataset, the 'Tags CNNB mean (no split)' method, the correlation from the last iteration was $r = 0.74$ and from the first iteration it was $r = 0.44$ ; for 'Tags overlap' it was $r = 0.56$ from the last iteration and $r = 0.38$ from the first iteration). Finally, we extracted the Kinetics-200 labels for each video to compare the tags from STEP-Tag against the kinds of labels typically collected for machine learning datasets. We found that using labels decreased the correlation with human similarity (i.e., the best-performing method on pipeline tags had a correlation of $r = 0.74$ while the best-performing method on dataset labels had a correlation of $r = 0.64$ ). + +# B.7 DURATION OF STEP-TAG AND CAPTIONS + +To compare STEP-tag and captions, we computed the median of overall participants' time spent per stimulus (see Table 3). The times were only collected for the audio and video modality (captions for the image datasets were already collected by Marjieh et al. (2022)). We see that both methods consume roughly similar amounts of time, which is desirable as our analysis suggests that in some domains (e.g., video) tags yield the best results whereas in others (e.g., audio) captions do. + +# C PREDICTION METHODS + +We used two main types of methods to predict human similarity judgments. The first class ("DNN-based methods", described in section C.1) make use of pre-trained embedding models. In the second class of models ("Word Frequency Analysis methods", described in the section C.2) simple feature extraction techniques are used instead of pre-trained deep learning models. Figure 1 depicts schematic overview of all prediction methods that we used. + +# C.1 DNN-BASED METHODS + +The DNN-based methods use various embeddings and deep learning representations to predict human similarity judgments. These methods could be further split into three groups based on the kinds of input data they process, namely if they use a single sensory modality that is either image, audio or video ("unimodal models"; see subsection C.1.1), or use text that is either tag or captions ("text embeddings"; see subsection C.1.2), or use both ("multimodal models"). In addition, we also tested the performance of "stacked" representations, where the sensory and textual embedding of a select + +number of models were concatenated into a single long embedding. Overall, the computation time of embedding methods took about two weeks on an x1.16xlarge Amazon Web Services instance with 64 vCPUs and 976 GiB of memory. + +# C.1.1 UNIMODAL DNN-BASED METHODS + +Table 4: All 30 image baseline models occurring in the top 50 best models reported in Figure 3A. + +
Model nameAverage scoreSD scoreTop 1 accuracyNumber of parameters (M)
1Swin0.660.0681.5223.37
2ConvNeXT0.640.07N/A348.15
3NF-ResNet0.620.0480.6523.51
4NFNet l00.610.0882.7532.77
5ResNetV20.600.11N/A928.34
6NF-RegNet0.590.0579.299.26
7VGG160.580.1173.35134.27
8VGG190.580.1174.21139.58
9ViT0.580.1275.956.16
10ResMLP0.570.0783.59128.37
11Twins-SVT0.570.0681.6823.55
12Twins-PCPVT0.570.0481.0923.59
13VGG130.570.1171.59128.96
14CaiT0.570.0482.1917.18
15VGG110.570.1070.36128.77
16gMLP0.560.0679.6419.17
17PIT0.560.0378.1910.23
18DeiT0.560.0372.175.52
19ConViT0.560.0373.115.52
20TNT0.560.0381.5223.37
21CoaT0.550.0478.435.35
22gMixer0.550.0578.0424.34
23XCiT0.550.0482.5711.92
24IG ResNeXt0.530.1385.44826.36
25Visformer0.520.0282.1139.45
26RepVGG0.520.1180.2181.26
27CLIP image0.500.11N/A102.01
28JXNesT0.500.0781.4216.67
29ECAResNet0.470.1480.4528.11
30DenseNet0.470.1274.746.95
+ +Note. Performance accuracy on ImageNet was based on Wightman (2019) and was not available for all models. + +Image models We used 560 pre-trained models from the Pytorch Image Models (timm) repository (Wightman, 2019). We chose this repository as it contains an extensive and highly diverse set of pre-trained models in terms of architecture backbones, model sizes, and training sets. The repository includes models published from 2014 to 2022 that use various training sets (such as ImageNet1k, ImageNet21k, Instagram, etc.), training procedures objectives (e.g., pre-training, fine-tuning, self-supervision, weak supervision, etc.) and architectures (e.g., VGG, ResNet, Inception, Transformer, etc.). The repository also reports various evaluation metrics for each model (e.g., their ImageNet performance). + +For each model, we computed the embedding from the last layer (typically before the final softmax layer; see below and Figure 10 for a preliminary analysis for the effect of layer depth in audio models). We then computed the cosine similarity between pairs of embedding vectors to produce a similarity matrix. The entire list of the performance of all models is detailed in the OSF repository + +![](images/945b41b903c72933eacab4066b51f19c2d63f10c7678cac69daf4dd5aebd1480.jpg) +Figure 10: Scores for individual layers of audio models scaled to the total number of layers. Models are colored by their meta architecture. + +associated with this paper7. Table 4 presents additional details for the top 42 image baseline models in Figure 3A including their average score (correlation to human judgments) across the three image datasets, the standard deviation (SD) of this score (across datasets, repeated runs and available model parameters in Wightman (2019)), their ImageNet accuracy, and their number of trainable parameters. + +Figure 4A shows the correlation to human similarity as a function of the number of parameters for all 569 models. In general, we found that models that have more parameters perform better (Figure 4A). Plotting all the embedding technique correlations against the number of training parameters of their respective models showed statistically significant positive correlation $(r = 0.39, p < 0.001)$ . However, one possible explanation for this could be the improved performance of newer models, which typically have more parameters, on various computer vision tasks. To test this, we computed the performance (i.e., correlation with human similarity) of the various models as a function of their accuracy on ImageNet (Deng et al., 2009) - which was provided in Wightman (2019) for all models except for CLIP (whose implementation came from a different repository) as summarized in Figure 4B. We found a positive correlation between the two metrics $(r = 0.26, p < 0.001)$ , though with some clear exceptions. For example, the vision transformer BEiT (Bao et al., 2021) and the convolutional architecture EfficientNet (Tan & Le, 2019) achieved high accuracy on ImageNet but performed poorly on human data. On the other hand, the vision transformer Swin (Liu et al., 2021) and the convolutional architecture ConvNext (Liu et al., 2022) both performed well on ImageNet and human similarity. This suggests that architecture and number of parameters are better predictors of similarity judgments than performance on ImageNet. Further analysis is required to determine what kind of architectural components actually contribute to more human-like performance (Langlois et al., 2021). + +Audio models We used all pre-trained wav2vec 2.0 (Baevski et al., 2020) and HuBERT (Hsu et al., 2021) models available in torchaudio (Yang et al., 2021). We also extracted embeddings from WavLM (Chen et al., 2021) and data2vec audio models (Baevski et al., 2022). Furthermore, we used additional wav2vec 2.0 and HuBERT models that were either specialized on emotion recognition or speaker identification (wen Yang et al., 2021; Wagner et al., 2022; Ravanelli et al., 2021). The performance of HuBERT, wav2vec 2.0, and WavLM models is shown in Figure 3B. Additional details about the models are displayed in Table 5. + +In addition, we explored the correlation between the audio models and human similarity data as a function of the layer in the model. Earlier literature has suggested that similarity to human representations may depend on the layer of the model (Kell et al., 2018; Yamins et al., 2014; Yamins, + +Table 5: All audio baseline models used in the analysis. + +
Model nameEmotion correlationNumber of parameters (M)
1wav2vec 2.0 lv60k (100h)0.49317
2wav2vec 2.0 lv60k (960h)0.49317
3wav2vec 2.0 lv60k0.51317
4wav2vec 2.0 lv60k (10m)0.51317
5HuBERT xlarge ASR0.451000
6HuBERT xlarge0.461000
7HuBERT large ASR0.46300
8wav2vec 2.0 large XLSR530.47317
9HuBERT large0.46300
10wav2vec 2.0 (Audeering, emotion)0.49317
11HuBERT base0.4190
12WavLM large0.46316.62
13HuBERT base (superb, emotion)0.4290
14HuBERT base (superb, speaker)0.4290
15WavLM base+0.4194.70
16wav2vec 2.0 base (960h)0.3895
17WavLM base0.3994.70
18wav2vec 2.0 base0.3495
19wav2vec 2.0 base (10m)0.3495
20wav2vec 2.0 base (superb, emotion)0.3495
21wav2vec 2.0 base (superb, speaker)0.3495
22wav2vec 2.0 base (100h)0.3295
23HuBERT large (superb, emotion)0.29300
24HuBERT large (superb, speaker)0.29300
25wav2vec 2.0 large (100h)0.32317
26wav2vec 2.0 large (superb, emotion)0.31317
27wav2vec 2.0 large (superb, speaker)0.31317
28wav2vec 2.0 large (960h)0.31317
29wav2vec 2.0 large (10m)0.31317
30data2vec audio large (960h)0.31313.28
31data2vec audio base (100h)0.23313.28
32data2vec audio large (100h)0.23313.28
33data2vec audio large (10m)0.21313.28
34wav2vec 2.0 (SpeechBrain, emotion)0.1195
35data2vec audio base (960h)0.1693.16
36data2vec audio base (10m)0.1593.16
+ +2020). We expected that the layers closer to the input of the model (where the representation is more low-level) to be less predictive. In general, we found that this was the case (Figure 10). In some variants of wav2vec, however, intermediate representations performed better, possibly due to the misalignment of the training task of wav2vec with the emotion task. This analysis confirms the choice we made in the paper to mostly use the last two layers of the models. Preliminary analysis of the image and video models also explored different layers, but the results were similar to those we presented in audio, and are therefore not reported here. + +Video models We extracted embeddings from the 'Slow' (a 3D ResNet; see Feichtenhofer et al. (2019)), Slowfast (a 2-path model with one path capturing semantics and the other capturing fine details; see Feichtenhofer et al. (2019)), and X3d (a model that initially starts as a simple 2D image classifier but is expanded in several axes; see Feichtenhofer (2020)) architectures implemented in pytorchvideo (Fan et al., 2021). All video models were pre-trained on the Kinetics-400 dataset (Kay et al., 2017). The performance of the models is displayed in Figure 3C. Numeric correlation values are detailed in Table 6 along with model accuracy (Top1 and Top5) on Kinetics-400, and the number of parameters in each model. The accuracies and parameter counts are listed as reported in + +Fan et al. (2021). As with previous modalities, the number of parameters appears to be positively correlated with correlation to human similarity. + +Table 6: All video baseline models used in the analysis. + +
Model nameCorrelationKinetics-400 Top1 AccKinetics-400 Top5 AccNumber of parameters (M)
1Slowfast r500.6576.9492.6934.57
2Slowfast r1010.6477.9093.2762.83
3Slow r500.6174.5891.6332.45
4X3d M0.5375.9492.723.79
5X3d S0.4973.3391.273.79
6X3d XS0.4869.1288.633.79
+ +# C.1.2 TEXT EMBEDDING METHODS + +Caption text embedding. Since there are multiple captions per stimulus, an aggregation procedure had to be applied to produce a single embedding vector for each stimulus. In our main analysis, for each stimulus, we extracted the embedding for each associated caption and averaged these embeddings together before computing cosine similarity between the mean embeddings. We also tried an alternative approach of concatenating the captions together into a single paragraph, which we then passed through the LLMs to compute a single embedding per stimulus. We found that this did not consistently improve performance and in many cases even decreased it, though we note that we did not experiment with different permutations of the concatenated captions, nor did we extensively study other ways to combine them together. Future work could explore other techniques for pre-processing captions and aggregating representations from multiple captions in ways that would improve correlation with human similarity judgments. + +Tag text embedding. We experimented with several algorithms for computing similarity between sets (or multi-sets) of tags. The algorithms described in this section all involve using ConceptNet NumberBatch (CNNB) (Speer et al., 2017) as the embedding backbone for turning discrete tags into continuous vector representations. For each stimulus, we took the tags remaining in the final iteration, and tested whether they were found in the dictionary for our embedding model. If a tag was not found and if it contained no spaces, we tried to correct the spelling before trying to look it up in the dictionary again. If a tag contained spaces, we split it into individual words, correct their spelling, and averaged together the embedded representations of those words that were found in the dictionary. Tags that were not found even after spelling correction and splitting were excluded from the set and did not contribute to the final representation. For the methods marked ‘(no split)’ we did not split multi-word tags, instead we just excluded multi-word tags that were not found in the embedding model dictionary. In the following, we describe the different techniques used to generate predictions based on tag embeddings. + +Tags CNNB overlap. For each pair of stimuli, we counted the number of 'almost identical' tag embeddings, defined as every respective element of the two embeddings being less than a certain threshold apart (in our case, this threshold was 0.1). We then set similarity for that pair of stimuli to be this count, i.e., the number of 'almost identical' tags, normalized by the total number of tags across the respective two sets. + +Tags CNNB quantized. This method involves quantizing tags using cosine similarity to find the number of unique tags. For each pair of stimuli, we counted the number of tags assigned to the first stimulus that had cosine similarity greater than a certain threshold (in our case, this threshold was 0.7) to at least one tag of the second stimulus (call this value $N_A$ ) and vice-versa ( $N_B$ ). The minimum of these two values is the number of unique, shared tags between the two sets ( $\min(N_A, N_B)$ ). The total number of unique tags across the two sets is then the total number of tags in each set ( $T_A + T_B$ ) minus the maximum number of shared tags ( $\max(N_A, N_B)$ ). We compute similarity as the ratio of the number of unique, shared tags to the total number of unique tags, $S_{AB} = \frac{\min(N_A, N_B)}{T_A + T_B - \max(N_A, N_B)}$ . For example, suppose the two sets of tags are $A: \{a, b, c, g\}$ and $B: \{a, b, d, e\}$ , so $T_A = T_B = 4$ , and that $a, c$ have cosine similarity of 0.8. The number of tags from set A found in set B is $N_A = 3$ , and those from B found in A is $N_B = 2$ . The number of unique, shared tags is $\min(N_A, N_B) = 2$ (since + +$\{a, b, c\}$ can be represented by $\{a, b\}$ , and the total number of unique tags is $4 + 4 - 3 = 5$ (since $\{a, b, c, g, a, b, d, e\}$ can be represented by $\{a, b, d, e, g\}$ ). The assigned similarity is then $S_{AB} = \frac{2}{5}$ . + +Tags CNNB mean. The set of tag embeddings for each stimulus were averaged together to form a single embedding assigned to the respective stimulus. We then computed cosine similarity on the embeddings of each pair of stimuli. + +Tags CNNB mean (no split). Same as above, but without splitting multi-word tags (i.e., ones that contain spaces) during the embedding process. + +All spelling corrections in the algorithms listed above were performed using the Python package `pyspellchecker^8`, taking the top corrected recommendation returned by the spell checker in each case. + +Tags to caption Roberta (SimCSE). Additionally, for the images datasets, we experimented with converting sets of tags into captions and then using those captions with our best-performing LLM ('sup-simcse-roberta-large') the same way we do with user-generated captions. To convert a set of tags into a caption, we joined the set of tags with commas and pretended them with the phrase "This is an image of". + +# C.1.3 OTHER DNN-BASED METHODS + +For the image datasets, we also considered several other methods that made use of DNNs but do not fit into the categories described above. + +GPT3 prompting We experimented with prompting GPT3 (Brown et al., 2020), a large pre-trained language model, to directly output similarity judgments as a text-completion problem rather than having to access model embeddings as we did above. We used a few-shot prompting approach where in each prompt we included three context examples of pairs of tag sets and their associated similarity rating. We then provided the pair of tag sets for the two images that we wanted to get a similarity rating for but left the rating empty for the model to fill in. + +Here is an example prompt with the GPT3 response bolded and in square brackets: + +
People described pairs of images using words.
How similar are the two images in each pair on a scale of 0-1 where 0 is completely dissimilar and 1 is completely similar?
Here are the descriptions of image one: tortoise, slow, protected, shell, turtle, scaly, old, cold-blooded
Here are the descriptions of image two: monkey, ape, mammal, black and white, hairy, agile, primate, smart, tree-dwelling
Rating: 0.05
Here are the descriptions of image one: rhinoceros, horn, gray, standing, heavy body, endangered, wild, africa, african
Here are the descriptions of image two: tiger, open mouth, stripes, feline, predator
Rating: 0.27
Here are the descriptions of image one: goat, eye, leg
Here are the descriptions of image two: mammal, wide-nosed, mandrill, primate, baboon, smart
Rating: 0.19
Here are the descriptions of image one: black, primate, mammal, hairy, chimpanzee, africa, african, great ape, smart, omnivore
Here are the descriptions of image two: zebra, striped, two-toned, wild, staring, mammal, equine, herd animal, africa
Rating: [0.14]
+ +We repeated this four times for each pair of images in each image dataset with a different set of context examples during each repetition and averaged together the GPT responses to get a final + +similarity prediction for each pair. In total, creating the context examples required having access to human similarity judgments over only 12 pairs of images. We found that this approach yielded surprisingly good predictions, with an average correlation of $r = 0.62$ across the image datasets. We believe this approach merits future investigation to determine whether prompt engineering can further increase the performance. + +Image captioning models We experimented with using pre-trained image captioning models to generate captions for our images and then using those captions with our best-performing LLM ('sup-simcse-roberta-large') the same way we do with user-generated captions. We used three pre-trained image captioning models from HuggingFace ('flamingo-mini', 'vilt-b32-finetuned-vqa', and 'vit-gpt2-image-captioning') to generate text descriptions for our images. However, the performance was quite poor with an average of $r = 0.29$ across the three models. As a result, $O(N)$ language-based methods cannot easily be reduced to $O(1)$ even when domain-relevant pre-trained caption models are available. + +# C.2 WORD FREQUENCY ANALYSIS METHODS + +In this work, we also conducted an additional evaluation of prediction models beyond embedding-based techniques (described in the previous section). Specifically, we compared the predictions of embedding-based models, which utilize deep learning representations, with those of traditional methods of text mining. + +Before the word frequency analysis, we performed the following initial pre-processing steps + +- For caption data, we concatenated all the captions describing the same stimulus into a single long "document." +- For tag data, we wanted to prioritize tags that appeared earlier in the tag-mining chains and were rated higher. To that end, we gathered all tags from all iterations and duplicated tags from a given iteration based on the ratings they received. For example, if the tag "tomato" received three stars, then we would add the repeated tokens "tomato, tomato, tomato" to the aggregated list ("document"). In a given iteration, flagged tags are removed, but if they are rated later, then they are included. The total number of repetitions per token is equal to the sum of all the stars they received in all iterations. As a result, each token is repeated multiple times, which we take into consideration in consequent analysis. + +For the next steps, we used the Matlab text analytics toolbox $^{9}$ . Unless otherwise specified, we used default parameters for all functions. To generate similarity matrices, we applied the following methods: + +Co-occurrence method. In this approach, we simply counted the number of repeated pairs of words in documents $i$ and $j$ and normalized by the total number of pairs. Formally, we use $w_{i}$ to denote the word list of a document $i$ . Let $w_{i,k}$ be the $k$ -th word in the $w_{i}$ list of words, and let $|w_{i}|$ denote the length of the list. We denote by $\delta(c,d)$ the indicator function that returns 1 if and only if the word $c$ is identical to the word $d$ , and 0 otherwise. We computed the co-occurrence score $S(w_{i},w_{j})$ according to the following formula: + +$$ +S (w _ {i}, w _ {j}) = \frac {\sum_ {k} \sum_ {l} \delta (w _ {i , k} , w _ {j , l})}{| w _ {i} | | w _ {j} |} +$$ + +We suggest using this method only with tags and not with captions. + +Co-occurrence-rep. This method was applied only to tags. We used an identical procedure to the Co-occurrence method, except that we did not separate the words within a tag as separate tokens and instead treated the entire tag (that may include multiple words) as a single token. + +Rouge score. In this approach, similarity was estimated by computing the rouge score of the word lists associated with each pair of documents. The Rouge score was computed using rougeEvaluationScore (Rouge, 2004). We suggest using this method only with tags and not with captions. + +The following methods make use of tokenized data and a pre-processing procedure that we found effective. Pre-processing was applied to both tag and caption data and tokenization was performed as follows: + +- We separate all text into single words by applying the tokenizedDocument function. +- We added part of speech information using the addPartOfSpeechDetails function. +- We performed Lemmatization using the normalizeWords function. +- We erased punctuation from the token using the erasePunctuation function. +- We removed stopwords using the removeStopWords function. +- We removed words with less than two characters or more than 15 characters. +- We created a bag of words representation of each tokenized document using the bagOfWords function. +- We also removed words that were not present in more than two documents using the InfrequentWords function. + +With the results of these pre-processing steps, we then computed similarity matrices based on the following methods: + +bm25S. We used bm25+ to compute similarity between documents (Barrios et al., 2016) using Matlab's bm25Similarity function. This function represents TF-IDF-like retrieval functions used in document retrieval. We used a variant that has a normalization function that properly handles documents with a long list of words. + +tfidf-cosine. We computed pairwise cosine similarities between document pairs using the TF-IDF matrix derived from their word counts and Matlab's cosineSimilarity function. + +# C.3 SUPERVISED METHODS + +Several previous studies investigated improving correlations by applying and fine-tuning simple linear transformations to embedding vectors $z^T \mathbf{W}z$ where $\mathbf{W} = \mathrm{diag}(w_1, \ldots, w_d)$ via a cross-validated ridge regression procedure that could be fit to ground-truth similarity judgments. The parameters of the diagonal reweighting matrix $\mathbf{W}$ are fitted to a training subset of stimuli and used to predict similarity of pairs in a held-out validation set Peterson et al. (2018); Marjieh et al. (2022). To be consistent and make results comparable, here we report the results of performing this 6-fold cross-validated linear transformation (LT-CCV) on the model embeddings and datasets considered in this work. The analysis was carried out using the RidgeCV package from the scikit-learn Python library Pedregosa et al. (2011). Results with both normalized ('LT CCV (norm') and unnormalized ('LT CCV') regressors are shown in Figure 11; see RidgeCV documentation for details on normalization $^{10}$ . We see that the linear transformation does not consistently improve performance (and can even decrease it) when applied to many of the modality-based or stacked embeddings, but it does frequently improve performance when applied to caption embeddings. Due to their instability and risk of overfitting, we do not use these methods in our main analysis. + +![](images/713b9d34308b1033df18e83391dfaaf08fe926c77cfe63f85c9fdbc65fc19aa4.jpg) +Figure 11: Effect of fine-tuning model embeddings using a small subset of similarity judgments. \ No newline at end of file diff --git a/2023/Words are all you need_ Language as an approximation for human similarity judgments/images.zip b/2023/Words are all you need_ Language as an approximation for human similarity judgments/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..2ba7c7f0827e8569a9528cd45ee981e4fece5365 --- /dev/null +++ b/2023/Words are all you need_ Language as an approximation for human similarity judgments/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4db4119d0f89878f406173b14f52d3acf9e634f3fc8f71fe5eeb9beb0d0fa6a7 +size 1122568 diff --git a/2023/Words are all you need_ Language as an approximation for human similarity judgments/layout.json b/2023/Words are all you need_ Language as an approximation for human similarity judgments/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f3ec2dbc27f734668665e65c1b2eb1b34d4312fc --- /dev/null +++ b/2023/Words are all you need_ Language as an approximation for human similarity judgments/layout.json @@ -0,0 +1,15395 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "WORDS ARE ALL YOU NEED? LANGUAGE AS AN APPROXIMATION FOR HUMAN SIMILARITY JUDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "spans": [ + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "text", + "content": "Raja Marjieh" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "inline_equation", + "content": "^{1,*}" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "text", + "content": ", Pol van Rijn" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "inline_equation", + "content": "^{2,*}" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "text", + "content": ", Ilia Sucholutsky" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "inline_equation", + "content": "^{3,*}" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "text", + "content": ", Theodore R. Sumers" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "text", + "content": ", Harin Lee" + }, + { + "bbox": [ + 110, + 133, + 488, + 148 + ], + "type": "inline_equation", + "content": "^{2,4}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 162, + 286, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 162, + 286, + 175 + ], + "spans": [ + { + "bbox": [ + 111, + 162, + 286, + 175 + ], + "type": "text", + "content": "Thomas L. Griffiths" + }, + { + "bbox": [ + 111, + 162, + 286, + 175 + ], + "type": "inline_equation", + "content": "^{1,3,\\ast \\ast}" + }, + { + "bbox": [ + 111, + 162, + 286, + 175 + ], + "type": "text", + "content": ", Nori Jacoby" + }, + { + "bbox": [ + 111, + 162, + 286, + 175 + ], + "type": "inline_equation", + "content": "^{2,\\ast \\ast}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 296, + 162, + 386, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 162, + 386, + 174 + ], + "spans": [ + { + "bbox": [ + 296, + 162, + 386, + 174 + ], + "type": "text", + "content": "\\*\\*\\* Equal contribution." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 296, + 175, + 495, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 175, + 495, + 186 + ], + "spans": [ + { + "bbox": [ + 296, + 175, + 495, + 186 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 296, + 175, + 495, + 186 + ], + "type": "text", + "content": "Department of Psychology, Princeton University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 296, + 186, + 484, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 186, + 484, + 198 + ], + "spans": [ + { + "bbox": [ + 296, + 186, + 484, + 198 + ], + "type": "text", + "content": "2Max Planck Institute for Empirical Aesthetics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 296, + 198, + 523, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 198, + 523, + 209 + ], + "spans": [ + { + "bbox": [ + 296, + 198, + 523, + 209 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 296, + 198, + 523, + 209 + ], + "type": "text", + "content": "Department of Computer Science, Princeton University" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 296, + 209, + 519, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 209, + 519, + 221 + ], + "spans": [ + { + "bbox": [ + 296, + 209, + 519, + 221 + ], + "type": "text", + "content": "4Max Planck Institute for Cognitive and Brain Sciences" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 276, + 250, + 334, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 250, + 334, + 262 + ], + "spans": [ + { + "bbox": [ + 276, + 250, + 334, + 262 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 140, + 277, + 471, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 277, + 471, + 531 + ], + "spans": [ + { + "bbox": [ + 140, + 277, + 471, + 531 + ], + "type": "text", + "content": "Human similarity judgments are a powerful supervision signal for machine learning applications based on techniques such as contrastive learning, information retrieval, and model alignment, but classical methods for collecting human similarity judgments are too expensive to be used at scale. Recent methods propose using pre-trained deep neural networks (DNNs) to approximate human similarity, but pre-trained DNNs may not be available for certain domains (e.g., medical images, low-resource languages) and their performance in approximating human similarity has not been extensively tested. We conducted an evaluation of 611 pre-trained models across three domains – images, audio, video – and found that there is a large gap in performance between human similarity judgments and pre-trained DNNs. To address this gap, we propose a new class of similarity approximation methods based on language. To collect the language data required by these new methods, we also developed and validated a novel adaptive tag collection pipeline. We find that our proposed language-based methods are significantly cheaper, in the number of human judgments, than classical methods, but still improve performance over the DNN-based methods. Finally, we also develop 'stacked' methods that combine language embeddings with DNN embeddings, and find that these consistently provide the best approximations for human similarity across all three of our modalities. Based on the results of this comprehensive study, we provide a concise guide for researchers interested in collecting or approximating human similarity data. To accompany this guide, we also release all of the similarity and language data, a total of 206,339 human judgments, that we collected in our experiments, along with a detailed breakdown of all modeling results." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 555, + 206, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 555, + 206, + 567 + ], + "spans": [ + { + "bbox": [ + 106, + 555, + 206, + 567 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 582, + 506, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 682 + ], + "type": "text", + "content": "Similarity judgments have long been used as a tool for studying human representations, both in cognitive science (Shepard, 1980; 1987; Tversky, 1977; Tenenbaum & Griffiths, 2001), as well as in neuroscience, as exemplified by the rich literature on representational similarity between humans and machines (Schrimpf et al., 2020; Kell et al., 2018; Linsley et al., 2017; Langlois et al., 2021; Yamins et al., 2014) whereby similarity patterns of brain activity are compared to those arising from a model of interest. Recent research in machine learning suggests that incorporating human similarity judgments in model training can play an important role in a variety of paradigms such as human alignment (Esling et al., 2018), contrastive learning (Khosla et al., 2020), information retrieval (Parekh et al., 2020), and natural language processing (Gao et al., 2021)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 686, + 504, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 686, + 504, + 711 + ], + "spans": [ + { + "bbox": [ + 104, + 686, + 504, + 711 + ], + "type": "text", + "content": "However, building a large dataset based on human similarity judgments is very expensive and often infeasible since the number of judgments required is quadratic in the number of stimuli – for " + }, + { + "bbox": [ + 104, + 686, + 504, + 711 + ], + "type": "inline_equation", + "content": "N" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 721, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 721, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 721, + 504, + 732 + ], + "type": "text", + "content": "*Correspondence: {raja.marjieh, is2961}@princeton.edu, pol.van-rijn@ae.mpg.de" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 84, + 504, + 364 + ], + "blocks": [ + { + "bbox": [ + 106, + 84, + 504, + 364 + ], + "lines": [ + { + "bbox": [ + 106, + 84, + 504, + 364 + ], + "spans": [ + { + "bbox": [ + 106, + 84, + 504, + 364 + ], + "type": "image", + "image_path": "aea578caba5065f0dadc0f7b3cf13e1e7a10d356d8d7a8df27f4bf60b75ca097.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 380, + 506, + 427 + ], + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 427 + ], + "type": "text", + "content": "Figure 1: Comparing human similarity scores gathered through crowdsourcing with ML pipelines. We used data from three modalities: images, audio, and video. For each modality, we extracted deep model embeddings and gathered human captions and tags. Word- and language-embedding models, as well as simple word-frequency analysis, were used to predict human similarity judgments." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 445, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 504, + 506 + ], + "type": "text", + "content": "stimuli, " + }, + { + "bbox": [ + 104, + 445, + 504, + 506 + ], + "type": "inline_equation", + "content": "O(N^2)" + }, + { + "bbox": [ + 104, + 445, + 504, + 506 + ], + "type": "text", + "content": " judgments are required1. For example, to fully quantify the similarity of all possible dyadic pairs of 50,000 images, one needs to collect on the order of 1.25 billion (" + }, + { + "bbox": [ + 104, + 445, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\sim \\frac{50000^2}{2}" + }, + { + "bbox": [ + 104, + 445, + 504, + 506 + ], + "type": "text", + "content": ") human similarity judgments. Thus, human judgments are the main bottleneck for machine-learning methods based on similarity. For this reason, the majority of available human similarity datasets are small by machine learning standards (up to a few thousand objects)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 510, + 506, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 643 + ], + "type": "text", + "content": "Advancements in deep learning have brought an alternative approach that does not require extensive collection of human judgments. Specifically, the idea is to use the similarity between hidden representations in pre-trained deep neural networks (DNNs) to approximate human similarity (Peterson et al., 2018; Jha et al., 2020; Marjieh et al., 2022; Hebart et al., 2020; Roads & Love, 2021). Some of these methods also suggest fine-tuning representations on a small training set of human similarity judgments (Peterson et al., 2018). This, in turn, results in a significant reduction in the number of required human judgments down to " + }, + { + "bbox": [ + 104, + 510, + 506, + 643 + ], + "type": "inline_equation", + "content": "O(1)" + }, + { + "bbox": [ + 104, + 510, + 506, + 643 + ], + "type": "text", + "content": " (given the pre-trained model). While such methods are promising, they still require access to strong pre-trained models which may not necessarily be available in all domains (e.g., medical datasets, niche modalities, low-resource languages, etc.). In addition, representations obtained from neural networks may not always overlap with human similarity representations, given that the models can be trained for different objectives (i.e., their embeddings may be poor approximations for human similarity)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 647, + 506, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 506, + 693 + ], + "type": "text", + "content": "A comprehensive comparison to assess which models perform well in predicting human similarity across different modalities is currently lacking in the literature. To this end, one of our main contributions in this paper is providing a first-of-its-kind large-scale evaluation of over 600 publicly-available pre-trained models as approximations for human similarity judgments on three modalities" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "text", + "content": "1Depending on various assumptions, the full range of classical methods can require between " + }, + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "inline_equation", + "content": "O(N \\log N)" + }, + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "text", + "content": " (Jamieson & Nowak, 2011) and " + }, + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "inline_equation", + "content": "O(N^3)" + }, + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "text", + "content": " (Hebart et al., 2020) human judgments. In this work, we used " + }, + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "inline_equation", + "content": "O(N^2)" + }, + { + "bbox": [ + 104, + 700, + 505, + 733 + ], + "type": "text", + "content": " human judgments (collecting all unique dyadic pairs) as the baseline for comparison" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "/images, audio, video). Our experiments reveal that there is a large gap in performance between the " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "O(1)" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " DNN methods and the classical " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "O(N^2)" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " similarity method we used as the baseline." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "text", + "content": "To address this gap, we propose a new class of " + }, + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "inline_equation", + "content": "O(N)" + }, + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "text", + "content": " methods to efficiently and accurately approximate human similarity based on language. This is motivated by a long line of research in cognitive science suggesting that language is an extremely efficient way for humans to communicate information about their sensory environment (Murphy, 2004; Zaslavsky et al., 2018; Piantadosi et al., 2011; Jaeger & Levy, 2006). This in turn suggests that we can use textual descriptors to approximate similarity judgments across different modalities. Moreover, such textual descriptors can be collected at the cost of " + }, + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "inline_equation", + "content": "O(N)" + }, + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "text", + "content": " human judgments (as people describe individual stimuli rather than pairs), which renders this method scalable." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 204, + 506, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 506, + 304 + ], + "type": "text", + "content": "We consider two approaches for approximating similarity from text data. One approach is to use pre-trained Large Language Models (LLM) to produce vector embeddings of the textual descriptions, and then use a measure of distance between these embeddings to approximate human similarity. This method is more domain-agnostic than the " + }, + { + "bbox": [ + 104, + 204, + 506, + 304 + ], + "type": "inline_equation", + "content": "O(1)" + }, + { + "bbox": [ + 104, + 204, + 506, + 304 + ], + "type": "text", + "content": " deep learning methods as it only requires access to a pre-trained LLM regardless of the modality of the original dataset. However, there are some cases where the domain may be out-of-distribution for all available LLMs (e.g., niche technical fields), or where no LLMs are available at all (e.g., low-resource languages). In such cases, the other approach is to use Word-Frequency Analysis (WFA) methods from classical text processing literature (Barrios et al., 2016; Rouge, 2004; Beel et al., 2016)," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 308, + 506, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 397 + ], + "type": "text", + "content": "As for the textual descriptions themselves, we consider two types, namely, free-text captions and concise word tags. Collecting captions for machine learning datasets is a well-established practice and can easily be done through crowdsourcing platforms. On the other hand, there is no consensus on best practices for collecting tags without a pre-existing taxonomy (i.e., open-set labels). To address this, we propose a novel adaptive tag mining pipeline called Sequential Transmission Evaluation Pipeline (STEP-Tag) which we describe in Section 2.2.4. As we will show, STEP-Tag allows to collect meaningful, diverse, and high-quality word tags for target stimuli in an online crowdsourcing environment." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "content": "Finally, we propose one additional set of hybrid approximation methods that combine sensory information with textual descriptions while still requiring " + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "inline_equation", + "content": "O(N)" + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "content": " human judgments. For this approach, we propose to stack the embeddings derived from both domain-specific models (e.g., output from the last layer of an image classifier) with the LLM embedding of the respective textual description. When multi-modal models are available, we can similarly leverage the joint embedding of both the stimulus and its textual description." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 474, + 506, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 506, + 541 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 506, + 541 + ], + "type": "text", + "content": "We evaluate all of these novel and existing methods across multiple modalities. We test the relative contributions of linguistic and sensory information in approximating human similarity and show that our proposed language-based methods provide both accurate and efficient approximations across modalities, even though they do not require a trained modality-specific deep learning model. Crucially, with this large-scale evaluation, we are able for the first time to provide researchers with a comprehensive guide of the tools to use for approximating human similarity at scale." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 545, + 298, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 545, + 298, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 298, + 557 + ], + "type": "text", + "content": "To summarize, our contributions are as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 571, + 488, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 571, + 488, + 582 + ], + "spans": [ + { + "bbox": [ + 132, + 571, + 488, + 582 + ], + "type": "text", + "content": "- We conduct a comprehensive comparison of human similarity approximation methods." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 594, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 594, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 132, + 594, + 504, + 616 + ], + "type": "text", + "content": "- We propose a novel modality-agnostic method for approximating similarity based on text and show that it is both efficient and competitive in terms of performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 629, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 629, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 132, + 629, + 504, + 651 + ], + "type": "text", + "content": "- We propose STEP-Tag, a novel adaptive tagging pipeline, and show that it is effective for crowdsourcing high-quality and diverse sets of word tags." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 663, + 504, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 663, + 504, + 686 + ], + "spans": [ + { + "bbox": [ + 132, + 663, + 504, + 686 + ], + "type": "text", + "content": "- We synthesize our findings into a detailed guide for researchers interested in approximating human similarity judgments at scale." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 698, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 132, + 698, + 504, + 732 + ], + "type": "text", + "content": "- We collect and release ground-truth and approximated versions of a large behavioral dataset " + }, + { + "bbox": [ + 132, + 698, + 504, + 732 + ], + "type": "inline_equation", + "content": "(N = 1,492)" + }, + { + "bbox": [ + 132, + 698, + 504, + 732 + ], + "type": "text", + "content": " across three different domains (images, audio, video), including two text-approximated similarity matrices for 1,000 audio clips and 1,000 video clips." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 180, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 180, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 180, + 94 + ], + "type": "text", + "content": "2 DATASETS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 108, + 170, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 108, + 170, + 119 + ], + "spans": [ + { + "bbox": [ + 105, + 108, + 170, + 119 + ], + "type": "text", + "content": "2.1 STIMULI" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 130, + 506, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 130, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 104, + 130, + 506, + 153 + ], + "type": "text", + "content": "Throughout this work, we considered five stimulus datasets across three different modalities - images, audio, and video - consisting of a total of 31,320 dyadic pairs labeled with similarity." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 168, + 506, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 168, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 506, + 203 + ], + "type": "text", + "content": "Images For images, we considered three datasets of common objects introduced in Peterson et al. (2018) – namely, animals, furniture, and vegetables – each consisting of 7,140 dyadic pairs (all unique pairs over 120 images)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 217, + 506, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 217, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 506, + 297 + ], + "type": "text", + "content": "Audio For audio, we used the RAVDESS corpus (Livingstone & Russo (2018), released under a CC Attribution license), which consists of semantically neutral sentences spoken by 24 US American actors to convey a specific target emotion. To construct a 1,000-recording subset, we selected 3 emotions per speaker per sentence. We randomly omitted 104 emotional stimuli and included all 96 neutral recordings (the dataset only contains 2 neutral recordings per speaker per sentence). To construct the subset composed of 4,950 dyadic pairs (all unique pairs over 100 recordings), we randomly selected " + }, + { + "bbox": [ + 104, + 217, + 506, + 297 + ], + "type": "inline_equation", + "content": "\\sim 13" + }, + { + "bbox": [ + 104, + 217, + 506, + 297 + ], + "type": "text", + "content": " recordings per emotion from the 1,000." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 311, + 506, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 311, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 104, + 311, + 506, + 389 + ], + "type": "text", + "content": "Video Finally, for the video dataset, we considered the Mini-Kinetics-200 dataset (Xie et al., 2018) (released under a CC BY 4.0 International License), which contains a large set of short video clips of human activities from 200 activity classes. Specifically, we focused on the validation split, which contains 5,000 videos in total. To construct our 1,000-video dataset, we sampled 5 random videos from each of the 200 activity categories. The 100-video subset (4,950 dyadic pairs) used in the similarity judgment collection experiment was then generated by sampling 100 random stimuli from the 1,000 list." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 406, + 274, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 274, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 274, + 417 + ], + "type": "text", + "content": "2.2 HUMAN JUDGMENT COLLECTION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 428, + 203, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 428, + 203, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 203, + 439 + ], + "type": "text", + "content": "2.2.1 PARTICIPANTS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 449, + 507, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 507, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 507, + 582 + ], + "type": "text", + "content": "We collected data from " + }, + { + "bbox": [ + 104, + 449, + 507, + 582 + ], + "type": "inline_equation", + "content": "N = 1,492" + }, + { + "bbox": [ + 104, + 449, + 507, + 582 + ], + "type": "text", + "content": " US participants for the new behavioral experiments reported in this paper. Participants were recruited anonymously from Amazon Mechanical Turk and provided informed consent under an approved protocol by either the Institutional Review Board (IRB) at Princeton University (application 10859) or the Max Planck Ethics Council (application 2021_42) before taking part. Participants earned 9-12 USD per hour, and each session lasted less than 30 minutes. To help recruit reliable participants, we required that participants are at least 18 years of age, reside in the United States and have participated in more than 5,000 previous tasks with a " + }, + { + "bbox": [ + 104, + 449, + 507, + 582 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 449, + 507, + 582 + ], + "type": "text", + "content": " approval rate (see Supplementary Section B for additional details about the behavioral experiments). All experiments were implemented with the Dallinger and PsyNet frameworks designed for automation of large-scale behavioral research (Harrison et al., 2020). In Supplementary Section A.1, we include the data that was collected, instructions used, and code for replication of the behavioral experiments. We also provide the code for computational experiments and analysis." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 597, + 249, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 249, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 249, + 608 + ], + "type": "text", + "content": "2.2.2 SIMILARITY JUDGMENTS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 617, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 506, + 662 + ], + "type": "text", + "content": "We collected two batches of pairwise similarity judgements, one for each of the audio and video subsets, and were provided access to the similarity matrices for the three image datasets by the authors of Peterson et al. (2018). For each pair we collected " + }, + { + "bbox": [ + 104, + 617, + 506, + 662 + ], + "type": "inline_equation", + "content": "\\sim 5" + }, + { + "bbox": [ + 104, + 617, + 506, + 662 + ], + "type": "text", + "content": " similarity judgments to average out inter-rater noise." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 677, + 186, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 186, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 186, + 689 + ], + "type": "text", + "content": "2.2.3 CAPTIONS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "We collected free-text captions for the video and audio datasets. Captions for the image datasets were already collected by Marjieh et al. (2022) and used here with permission. For each stimulus, we collected " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\sim 10" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": " captions." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 504, + 236 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 504, + 236 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 504, + 236 + ], + "type": "image", + "image_path": "8a98b1de6f85e5fff095e57db5e2dca85fafcce1dced6e5cc2ee52e5bc880c26.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 250, + 506, + 285 + ], + "lines": [ + { + "bbox": [ + 104, + 250, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 104, + 250, + 506, + 285 + ], + "type": "text", + "content": "Figure 2: STEP-Tag, our novel tag-mining paradigm. We ran an adaptive process in which results of one iteration are used as inputs for subsequent iterations. In every iteration, participants can add a new tag, rate the relevance of existing tags or flag tags that are inappropriate." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 304, + 164, + 315 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 164, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 164, + 315 + ], + "type": "text", + "content": "2.2.4 TAGS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 323, + 506, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 323, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 506, + 498 + ], + "type": "text", + "content": "We propose a novel adaptive tag pipeline for simultaneous data collection and evaluation called Sequential Transmission Evaluation Pipeline (STEP) and apply it in the context of semantic tag mining (STEP-Tag). Our paradigm, STEP-Tag, allows researchers to efficiently collect high-quality word tags for a given stimulus (Figure 2) and extends existing crowdsourcing text-mining techniques (Von Ahn & Dabbish, 2008; 2004; Krishna et al., 2017; Law et al., 2007) by integrating ideas from transmission chain experiments (Kirby et al., 2008; Griffiths & Kalish, 2005). In STEP-Tag, participants adaptively create tags for a set of target stimuli and simultaneously evaluate the annotations made by previous participants. In each trial, participants are first given a stimulus (e.g., an image or audio fragment) and rate the relevance of tags that were created by other participants (on a 5-interval Likert scale) or flag a tag if they find it inappropriate (with tags removed if more than two people flag the tag). Next, participants are also given the opportunity to add new tags if they feel a relevant tag that describes the stimulus is missing. The results of the annotation procedure of one participant then propagate to the next participant (additional details about the paradigm, and screenshots are provided in Supplementary Section B.6). Ultimately, as the process unfolds over many iterations, meaningful tags are extracted and validated by multiple participants, enabling efficient open-label collection of a desired dataset." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 504, + 505, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 505, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 505, + 571 + ], + "type": "text", + "content": "To validate STEP-Tag, we compared it against several baselines: (i) randomly selecting only a single high-rated tag from the last iteration of STEP-Tag per stimulus, (ii) using tags only from the first iteration of STEP-Tag (equivalent to non-adaptive tag collection), and (iii) using class labels instead of tags. We found that tags produced after multiple iterations of STEP-Tag outperformed all three baselines in terms of quality (i.e., downstream performance for similarity reconstruction) and diversity (see Supplementary Section B.6.1)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 587, + 173, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 587, + 173, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 587, + 173, + 599 + ], + "type": "text", + "content": "3 MODELS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 612, + 233, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 612, + 233, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 612, + 233, + 623 + ], + "type": "text", + "content": "3.1 DNN-BASED METHODS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "We tested a wide range of pre-trained ML models that do not rely on text (overall we tested 611 models) and compared their internal representations to human similarity judgments and text-based predictions (Figure 1A). We compiled our model pool by leveraging pre-trained model repositories (or zoos) available online. In particular, for images we use 569 pre-trained models from the pytorch-image-models package timm (Wightman, 2019), for audio we use 36 pre-trained models available in the torchaudio package (Yang et al., 2021) (see also Supplementary Figure 10 for an analysis of layer depth), and for video we use 6 pre-trained models available from the PyTorchVideo package (Fan et al., 2021). Because of the recent success of multimodal training, we additionally included 9 multimodal models based on CLIP from OpenAI's public implementation." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 103, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 103, + 82, + 504, + 106 + ], + "type": "text", + "content": "tion (https://github.com/openai/CLIP) for the image datasets, and compared them to \"stacked\" representations (i.e., concatenating embeddings from separate image and text models)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 118, + 233, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 118, + 233, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 233, + 129 + ], + "type": "text", + "content": "3.2 LLM-BASED METHODS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 140, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 140, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 140, + 506, + 217 + ], + "type": "text", + "content": "Tags To embed tags we used ConceptNet Numberbatch (CNNB) which is a word-embedding model trained on the ConceptNet knowledge graph that leverages other popular word embedding models such as word2vec and GloVe (Speer et al., 2017). We experimented with several algorithms for computing similarity between sets (or multi-sets) of tags and share the details in Supplementary Section C.1.2. As a control, for images we also tried converting tags into a caption of the form \"This is an image of tag1, tag2, ...\" and embedding them using a language model (see Supplementary Section C.1.2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 229, + 506, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 229, + 506, + 351 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 506, + 351 + ], + "type": "text", + "content": "Captions To embed captions, we used four pre-trained LLMs from HuggingFace (Wolf et al., 2020): 'bert-base-uncased', 'deberta-xlarge-mnli', 'sup-simcse-bert-base-uncased', and 'sup-simcse-roberta-large'. SimCSE is a pre-training procedure that uses semantic entailment in a contrastive learning objective (Gao et al., 2021). According to BERTScore (Zhang et al., 2020), the latter three models are ranked in the top 40 models in terms of correlation with human evaluations on certain tasks, with 'deberta-xlarge-mnli' ranked first. However, in our experiments, we found that embedding similarity computed from 'sup-simcse-roberta-large' has the highest correlation with human similarity judgments out of the four models. For SimCSE-based models, we used representations from the (final) embedding layer (where the SimCSE contrastive objective is actually applied). For the other two models, we computed embeddings from every layer, but restricted the main analysis to embeddings from the penultimate layers. This was done in order to be consistent with our procedure for DNNs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 363, + 506, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 506, + 430 + ], + "type": "text", + "content": "Other methods For the image datasets, we also considered several other methods that made use of LLMs but do not fit into the categories described above. One approach was using prompts with GPT3 (Brown et al., 2020) in a text-completion setup to directly predict similarity without extracting embeddings (see Supplementary Section C.1.3 for details). We also tried using pre-trained image captioning models to generate captions automatically (i.e. this would reduce " + }, + { + "bbox": [ + 104, + 363, + 506, + 430 + ], + "type": "inline_equation", + "content": "O(N)" + }, + { + "bbox": [ + 104, + 363, + 506, + 430 + ], + "type": "text", + "content": " language-based methods to " + }, + { + "bbox": [ + 104, + 363, + 506, + 430 + ], + "type": "inline_equation", + "content": "O(1)" + }, + { + "bbox": [ + 104, + 363, + 506, + 430 + ], + "type": "text", + "content": ") but this resulted in poor performance (see Supplementary Section C.1.3 for details)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 443, + 221, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 221, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 221, + 453 + ], + "type": "text", + "content": "3.3 STACKING METHODS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 464, + 505, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 505, + 533 + ], + "type": "text", + "content": "We produce stacked representations for each modality by concatenating the single best-performing (see Figure 3) LLM's embeddings with the embeddings from the five best-performing DNNs into a single set of long embeddings. Since the two sets of embeddings come from different spaces, we add a single tunable hyperparameter for rescaling the LLM embeddings. This hyperparameter can be set manually, but we use a small number of ground-truth similarity judgments (we use dyadic pairs for just 20 stimuli) to optimize it automatically." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 544, + 340, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 340, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 340, + 555 + ], + "type": "text", + "content": "3.4 WORD FREQUENCY ANALYSIS (WFA) METHODS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 564, + 506, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 564, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 506, + 632 + ], + "type": "text", + "content": "The aim of the WFA methods is to enable similarity approximation from language using traditional embedding-free techniques. Such techniques are particularly useful for low-resource languages or cross-cultural comparisons (Cowen & Keltner, 2017; Barrett, 2020), for which pre-trained models are lacking, as they work solely on the basis of the text itself. The WFA methods we considered included measuring co-occurrence, Rouge score, bm25s, and tfidf. We provide details on each of these procedures in Supplementary Section C.2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 645, + 234, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 645, + 234, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 234, + 656 + ], + "type": "text", + "content": "3.5 PERFORMANCE METRIC" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "content": "We quantified performance by computing the Pearson correlation " + }, + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 666, + 506, + 733 + ], + "type": "text", + "content": " between approximated similarity scores and the ground-truth human similarity scores for all the unique dyadic pairs in a dataset. We compared the performance of the different prediction methods to the inter-rater reliability (IRR) of participants, which serves as an approximate upper-bound on performance. Following Peterson et al. (2018), we computed IRR for each human similarity matrix using the split-half correlation method with a Spearman-Brown correction (Brown, 1910)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 173, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 173, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 173, + 94 + ], + "type": "text", + "content": "4 RESULTS" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 108, + 114, + 503, + 228 + ], + "blocks": [ + { + "bbox": [ + 108, + 114, + 503, + 228 + ], + "lines": [ + { + "bbox": [ + 108, + 114, + 503, + 228 + ], + "spans": [ + { + "bbox": [ + 108, + 114, + 503, + 228 + ], + "type": "image", + "image_path": "d77376fc14926badb2e03efdcc2ea3c112f33ec27be4ae665e9fa9d8c9574858.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 243, + 501, + 357 + ], + "blocks": [ + { + "bbox": [ + 109, + 243, + 501, + 357 + ], + "lines": [ + { + "bbox": [ + 109, + 243, + 501, + 357 + ], + "spans": [ + { + "bbox": [ + 109, + 243, + 501, + 357 + ], + "type": "image", + "image_path": "a8a8c9b51c3fe39d89cde181140c094e59c9683ace33d3ababdbfe2f4b585119.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 109, + 372, + 501, + 485 + ], + "blocks": [ + { + "bbox": [ + 109, + 372, + 501, + 485 + ], + "lines": [ + { + "bbox": [ + 109, + 372, + 501, + 485 + ], + "spans": [ + { + "bbox": [ + 109, + 372, + 501, + 485 + ], + "type": "image", + "image_path": "ae6b182d09f93dee1b908aa568dee1ab2661348ddda7a0163109fd4f0966c432.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 109, + 502, + 501, + 590 + ], + "blocks": [ + { + "bbox": [ + 109, + 502, + 501, + 590 + ], + "lines": [ + { + "bbox": [ + 109, + 502, + 501, + 590 + ], + "spans": [ + { + "bbox": [ + 109, + 502, + 501, + 590 + ], + "type": "image", + "image_path": "8ca213ebefb0e9cbe901e5366eca268396e36df51d8e0eeab47203e127837709.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 606, + 506, + 651 + ], + "lines": [ + { + "bbox": [ + 104, + 606, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 606, + 506, + 651 + ], + "type": "text", + "content": "Figure 3: Correlation to human similarity. A: Top 50 models averaged over the 3 image datasets. B: Audio dataset. C: Video dataset. Each DNN baseline bar averages over multiple variants of the same architecture; the dots indicate average correlation of individual variants of the architecture. D: Average for each method type for each modality. The error bars are standard deviations." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "Figure 3 summarizes the performance of the various techniques across the three modalities. Note that the image modality results in Figure 3A are averaged across the three image datasets and only show the top 50 methods for this modality due to space constraints. Figure 3D shows the mean performance of the methods of each type for each modality. When viewing these results, a clear hierarchy emerges. While no approximation methods can perfectly match the ground-truth pairwise similarity, (see the gap between the methods and IRR), stacked ones get close and are consistently more aligned with" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "human similarity than other methods across all three modalities. Text-based methods come next in this hierarchy, followed by DNN-based ones. We also considered supervised methods that reweight DNN-based embeddings based on a small set of human similarity judgments, but we found that the performance was unstable (see Supplementary Section C.3 for details)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 506, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 506, + 222 + ], + "type": "text", + "content": "The pre-eminence of stacked results suggests that LLMs and DNNs capture at least some different sources of variance in human similarity judgments. This is reinforced by our surprising finding that stacked representations from CLIP, a state-of-the-art jointly pre-trained multi-modal model, do not outperform stacked representations from independently trained models. We hypothesize that this happens because information is lost from both modalities when optimizing for a joint embedding. However, we note that the modest size of the performance gap between stacked and LLMs/DNNs, suggests that there is also significant overlap between aspects of human similarity captured by language and perception." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 226, + 504, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 226, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 226, + 504, + 326 + ], + "type": "text", + "content": "To investigate the effect of architecture and downstream task (e.g., classification) performance on alignment of DNNs with human similarity, for the image modality we compared similarity approximation performance against the number of model parameters on a log scale (Figure 4A) and ImageNet classification performance (Deng et al., 2009) (Figure 4B). Overall, we found a positive correlation between similarity approximation performance and the number of model parameters " + }, + { + "bbox": [ + 104, + 226, + 504, + 326 + ], + "type": "inline_equation", + "content": "(r = 0.39, p < 0.001)" + }, + { + "bbox": [ + 104, + 226, + 504, + 326 + ], + "type": "text", + "content": " and a smaller but still significant positive correlation with performance on ImageNet " + }, + { + "bbox": [ + 104, + 226, + 504, + 326 + ], + "type": "inline_equation", + "content": "(r = 0.26, p < 0.001)" + }, + { + "bbox": [ + 104, + 226, + 504, + 326 + ], + "type": "text", + "content": ". There were some notable exceptions with particularly high ImageNet performance but low similarity performance, such as the image transformer BEiT (Bao et al., 2021)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 331, + 506, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 506, + 419 + ], + "type": "text", + "content": "Finally, we leverage both DNN-based methods and our proposed language-based methods to approximate similarity matrices that would otherwise require an unaffordable number of human similarity judgments to collect all dyadic pairs. Specifically, we approximate the two similarity matrices corresponding to all 1,000 audio clips and 1,000 video clips in our datasets using every method listed for each of those modalities in Figure 3. We provide visualizations of the resulting matrices at https://words-are-all-you-need.s3.amazon.com/index.html. We note that to exhaustively collect all dyadic pairs with five judgments per pair would normally require roughly 2.5 million human judgments for each of these matrices." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 167, + 448, + 440, + 564 + ], + "blocks": [ + { + "bbox": [ + 167, + 448, + 440, + 564 + ], + "lines": [ + { + "bbox": [ + 167, + 448, + 440, + 564 + ], + "spans": [ + { + "bbox": [ + 167, + 448, + 440, + 564 + ], + "type": "image", + "image_path": "2b2ca0e9510a55b27fde0673f2b4db373aa6bc6472c1b777de22294faaabac0f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 166, + 567, + 443, + 685 + ], + "blocks": [ + { + "bbox": [ + 166, + 567, + 443, + 685 + ], + "lines": [ + { + "bbox": [ + 166, + 567, + 443, + 685 + ], + "spans": [ + { + "bbox": [ + 166, + 567, + 443, + 685 + ], + "type": "image", + "image_path": "d9c0483bcc12a6f34ad4ef34477a05e518d4f065a015bfd4d283743cf710c670.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 697, + 504, + 721 + ], + "lines": [ + { + "bbox": [ + 104, + 697, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 697, + 504, + 721 + ], + "type": "text", + "content": "Figure 4: Correlation to human similarity judgments as a function of A: number of model parameters; and B: ImageNet accuracy." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 84, + 504, + 319 + ], + "blocks": [ + { + "bbox": [ + 111, + 84, + 504, + 319 + ], + "lines": [ + { + "bbox": [ + 111, + 84, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 111, + 84, + 504, + 319 + ], + "type": "image", + "image_path": "0c182b240e2e982ca4bdfc7e32e3332c53a4dc813f606ab89cce465dc3cccef2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 331, + 470, + 345 + ], + "lines": [ + { + "bbox": [ + 140, + 331, + 470, + 345 + ], + "spans": [ + { + "bbox": [ + 140, + 331, + 470, + 345 + ], + "type": "text", + "content": "Figure 5: Guide to collecting and estimating human similarity judgments at scale." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 362, + 289, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 362, + 289, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 362, + 289, + 374 + ], + "type": "text", + "content": "5 DISCUSSION AND CONCLUSION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "type": "text", + "content": "In this work, we compared novel and existing methods for approximating human similarity judgments. The main contributions can be summarized as follows: 1) we provide a simple and accessible approach for approximating " + }, + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "type": "inline_equation", + "content": "O(N^2)" + }, + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "type": "text", + "content": " human similarity judgments using " + }, + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "type": "inline_equation", + "content": "O(N)" + }, + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "type": "text", + "content": " annotations, 2) we propose a new adaptive pipeline STEP-tag for tag mining, 3) we evaluate our approach against " + }, + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "type": "inline_equation", + "content": "600+" + }, + { + "bbox": [ + 104, + 388, + 506, + 444 + ], + "type": "text", + "content": " domain-specific state-of-the-art DNNs, and 4) we publicly release all data comprising 206,339 human judgments." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 449, + 504, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 504, + 548 + ], + "type": "text", + "content": "Based on these, we are now able to provide researchers with a best-practices guide to collecting similarity datasets. Our guide is based on two bottlenecks that researchers may face: one is the limit on the number of judgments that can be collected (e.g., due to cost) and the second is the availability of pre-trained models (i.e., either DNNs or LLMs). Our results make it clear that deep learning can provide good approximations for human similarity. In fact, when both pre-trained LLMs and DNNs are available, stacking their representations is consistently the best approach. However, even when neither type of pre-trained models are available, we suggest that classical word-frequency analysis methods still provide researchers with an efficient and competitive method for approximating human similarity. Our guide, comprehensively covering these and other cases, is laid out in Figure 5." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 553, + 506, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 553, + 506, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 553, + 506, + 719 + ], + "type": "text", + "content": "One limitation of this work is that while similarity proxies generated from our pipeline can support ML datasets, they are also at risk of baking in high-level human biases that can lead to adverse societal implications, such as amplifying race and gender gaps. Researchers should devote utmost care to what they choose to incorporate in their training objective. Another limitation of our work is the fact that we were restricted to English text data and US participants. However, we believe that our approach and proposed methods (especially STEP-tag and the word-frequency methods) pave the way for the study of cross-cultural variation of human semantic representations by providing efficient tools for crowdsourcing high-quality semantic descriptors across languages. This is particularly relevant for low-resource languages, where our tag-mining techniques can work even with the absence of pre-trained ML models (Thompson et al., 2020; Barrett, 2020). We are currently expanding our work to include more languages and diverse cultures. Taken together, our results showcase how we can leverage language to make machine representations more human-like. Moreover, it highlights the importance of combining machine learning and cognitive science approaches for mutually advancing both fields. In particular, we believe that the methodologies adopted in this work have the potential to greatly advance basic research on naturalistic representations in cognitive science." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 218, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 218, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 218, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 504, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 504, + 129 + ], + "type": "text", + "content": "This work was supported by a grant from the John Templeton Foundation to TLG, an NDSEG fellowship to TRS, and an NSERC fellowship (567554-2022) to IS." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 145, + 176, + 158 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 145, + 176, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 145, + 176, + 158 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 164, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 164, + 505, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 164, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 505, + 199 + ], + "type": "text", + "content": "Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33:12449-12460, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 206, + 504, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 206, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 206, + 504, + 232 + ], + "type": "text", + "content": "Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, and Michael Auli. data2vec: A general framework for self-supervised learning in speech, vision and language, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 237, + 504, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 237, + 504, + 261 + ], + "spans": [ + { + "bbox": [ + 106, + 237, + 504, + 261 + ], + "type": "text", + "content": "Hangbo Bao, Li Dong, and Furu Wei. BEiT: BERT pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 268, + 504, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 268, + 504, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 504, + 292 + ], + "type": "text", + "content": "H Clark Barrett. Towards a cognitive science of the human: cross-cultural approaches and their urgency. Trends in Cognitive Sciences, 24(8):620-638, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 300, + 505, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 505, + 333 + ], + "type": "text", + "content": "Federico Barrios, Federico López, Luis Argerich, and Rosa Wachenchauzer. Variations of the similarity function of textrank for automated summarization. arXiv preprint arXiv:1602.03606, 2016." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 342, + 504, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 504, + 365 + ], + "type": "text", + "content": "Joeran Beel, Bela Gipp, Stefan Langer, and Corinna Breitinger. Paper recommender systems: a literature survey. International Journal on Digital Libraries, 17(4):305-338, 2016." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 373, + 505, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 505, + 407 + ], + "type": "text", + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 415, + 506, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 506, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 506, + 438 + ], + "type": "text", + "content": "William Brown. Some experimental results in the correlation of mental abilities 1. British Journal of Psychology, 1904-1920, 3(3):296-322, 1910." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 445, + 505, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 505, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 505, + 480 + ], + "type": "text", + "content": "Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, et al. WavLM: Large-scale self-supervised pre-training for full stack speech processing. arXiv preprint arXiv:2110.13900, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 487, + 506, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 506, + 521 + ], + "type": "text", + "content": "Alan S Cowen and Dacher Keltner. Self-report captures 27 distinct categories of emotion bridged by continuous gradients. Proceedings of the National Academy of Sciences, 114(38):E7900-E7909, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 529, + 506, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 506, + 564 + ], + "type": "text", + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255. IEEE, 2009." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 571, + 504, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 504, + 596 + ], + "type": "text", + "content": "Philippe Esling, Adrien Bitton, et al. Generative timbre spaces: regularizing variational auto-encoders with perceptual metrics. arXiv preprint arXiv:1805.08501, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 603, + 506, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 603, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 603, + 506, + 659 + ], + "type": "text", + "content": "Haoqi Fan, Tullie Murrell, Heng Wang, Kalyan Vasudev Alwala, Yanghao Li, Yilei Li, Bo Xiong, Nikhila Ravi, Meng Li, Haichuan Yang, Jitendra Malik, Ross Girshick, Matt Feiszli, Aaron Adcock, Wan-Yen Lo, and Christoph Feichtenhofer. PyTorchVideo: A deep learning library for video understanding. In Proceedings of the 29th ACM International Conference on Multimedia, 2021. https://pytorchvideo.org/." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 667, + 504, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 504, + 691 + ], + "type": "text", + "content": "Christoph Feichtenhofer. X3d: Expanding architectures for efficient video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 203-213, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "type": "text", + "content": "Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. SlowFast networks for video recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6202-6211, 2019." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "type": "text", + "content": "Tianyu Gao, Xingcheng Yao, and Danqi Chen. SimCSE: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 506, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 506, + 136 + ], + "type": "text", + "content": "Thomas L Griffiths and Michael L Kalish. A bayesian view of language evolution by iterated learning. In Proceedings of the Annual Meeting of the Cognitive Science Society, volume 27, 2005." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 142, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 142, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 142, + 506, + 176 + ], + "type": "text", + "content": "Peter Harrison, Raja Marjieh, Federico Adolfi, Pol van Rijn, Manuel Anglada-Tort, Ofer Tchernichovski, Pauline Larrouy-Maestri, and Nori Jacoby. Gibbs sampling with people. Advances in Neural Information Processing Systems, 33:10659-10671, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 184, + 506, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 184, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 184, + 506, + 217 + ], + "type": "text", + "content": "Martin N Hebart, Charles Y Zheng, Francisco Pereira, and Chris I Baker. Revealing the multidimensional mental representations of natural objects underlying human similarity judgements. Nature Human Behaviour, 4(11):1173-1185, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 225, + 506, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 225, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 107, + 225, + 506, + 269 + ], + "type": "text", + "content": "Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. HuBERT: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 277, + 506, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 277, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 107, + 277, + 506, + 300 + ], + "type": "text", + "content": "T Jaeger and Roger Levy. Speakers optimize information density through syntactic reduction. Advances in Neural Information Processing Systems, 19, 2006." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 308, + 505, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 308, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 107, + 308, + 505, + 342 + ], + "type": "text", + "content": "Kevin G Jamieson and Robert D Nowak. Low-dimensional embedding using adaptively selected ordinal data. In 2011 49th Annual Allerton Conference on Communication, Control, and Computing (Allerton), pp. 1077-1084. IEEE, 2011." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 349, + 505, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 349, + 505, + 372 + ], + "spans": [ + { + "bbox": [ + 107, + 349, + 505, + 372 + ], + "type": "text", + "content": "Aditi Jha, Joshua Peterson, and Thomas L Griffiths. Extracting low-dimensional psychological representations from convolutional neural networks. arXiv preprint arXiv:2005.14363, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 379, + 506, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 379, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 107, + 379, + 506, + 413 + ], + "type": "text", + "content": "Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The Kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 420, + 505, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 420, + 505, + 454 + ], + "spans": [ + { + "bbox": [ + 107, + 420, + 505, + 454 + ], + "type": "text", + "content": "Alexander JE Kell, Daniel LK Yamins, Erica N Shook, Sam V Norman-Haignere, and Josh H McDermott. A task-optimized neural network replicates human auditory behavior, predicts brain responses, and reveals a cortical processing hierarchy. Neuron, 98(3):630-644, 2018." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 461, + 505, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 461, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 107, + 461, + 505, + 495 + ], + "type": "text", + "content": "Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron Sarna, Yonglong Tian, Phillip Isola, Aaron Maschinot, Ce Liu, and Dilip Krishnan. Supervised contrastive learning. Advances in Neural Information Processing Systems, 33:18661-18673, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 502, + 505, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 502, + 505, + 536 + ], + "spans": [ + { + "bbox": [ + 107, + 502, + 505, + 536 + ], + "type": "text", + "content": "Simon Kirby, Hannah Cornish, and Kenny Smith. Cumulative cultural evolution in the laboratory: An experimental approach to the origins of structure in human language. Proceedings of the National Academy of Sciences, 105(31):10681-10686, 2008." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 544, + 506, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 544, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 107, + 544, + 506, + 588 + ], + "type": "text", + "content": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision, 123(1):32-73, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 596, + 506, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 596, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 107, + 596, + 506, + 631 + ], + "type": "text", + "content": "Thomas Langlois, Haicheng Zhao, Erin Grant, Ishita Dasgupta, Tom Griffiths, and Nori Jacoby. Passive attention in artificial neural networks predicts human visual selectivity. Advances in Neural Information Processing Systems, 34, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 637, + 505, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 637, + 505, + 661 + ], + "spans": [ + { + "bbox": [ + 107, + 637, + 505, + 661 + ], + "type": "text", + "content": "Edith LM Law, Luis Von Ahn, Roger B Dannenberg, and Mike Crawford. TagATune: A game for music and sound annotation. In ISMIR, volume 3, pp. 2, 2007." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 668, + 505, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 668, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 107, + 668, + 505, + 691 + ], + "type": "text", + "content": "Kristin Lemhöfer and Mirjam Broersma. Introducing lexdale: A quick and valid lexical test for advanced learners of english. Behavior research methods, 44(2):325-343, 2012." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 505, + 732 + ], + "type": "text", + "content": "Drew Linsley, Sven Eberhardt, Tarun Sharma, Pankaj Gupta, and Thomas Serre. What are the visual features underlying human versus machine vision? In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 2706-2714, 2017." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 117 + ], + "type": "text", + "content": "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 146 + ], + "type": "text", + "content": "Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. arXiv preprint arXiv:2201.03545, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 504, + 186 + ], + "type": "text", + "content": "Steven R Livingstone and Frank A Russo. The Ryerson audio-visual database of emotional speech and song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in north american english. PloS one, 13(5):e0196391, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 193, + 506, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 506, + 217 + ], + "type": "text", + "content": "Raja Marjieh, Ilia Sucholutsky, Theodore R Sumers, Nori Jacoby, and Thomas L Griffiths. Predicting human similarity judgments using large language models. arXiv preprint arXiv:2202.04728, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 504, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 504, + 257 + ], + "type": "text", + "content": "Alice E Milne, Roberta Bianco, Katarina C Poole, Sijia Zhao, Andrew J Oxenham, Alexander J Billig, and Maria Chait. An online headphone screening test based on dichotic pitch. Behavior Research Methods, 53(4):1551-1562, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 263, + 355, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 355, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 355, + 277 + ], + "type": "text", + "content": "Gregory Murphy. The big book of concepts. MIT press, 2004." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 282, + 506, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 282, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 506, + 316 + ], + "type": "text", + "content": "Zarana Parekh, Jason Baldridge, Daniel Cer, Austin Waters, and Yinfei Yang. Crisscrossed captions: Extended intramodal and intermodal semantic similarity judgments for MS-COCO. arXiv preprint arXiv:2004.15020, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 323, + 506, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 506, + 367 + ], + "type": "text", + "content": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825–2830, 2011." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 374, + 504, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 504, + 408 + ], + "type": "text", + "content": "Joshua C Peterson, Joshua T Abbott, and Thomas L Griffiths. Evaluating (and improving) the correspondence between deep neural networks and human representations. Cognitive Science, 42 (8):2648-2669, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 415, + 504, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 504, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 504, + 439 + ], + "type": "text", + "content": "Steven T Piantadosi, Harry Tily, and Edward Gibson. Word lengths are optimized for efficient communication. Proceedings of the National Academy of Sciences, 108(9):3526-3529, 2011." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 445, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 506, + 502 + ], + "type": "text", + "content": "Mirco Ravanelli, Titouan Parcollet, Peter Plantinga, Aku Rouhe, Samuele Cornell, Loren Lugosch, Cem Subakan, Nauman Dawalatabad, Abdelwahab Heba, Jianyuan Zhong, Ju-Chieh Chou, Sung-Lin Yeh, Szu-Wei Fu, Chien-Feng Liao, Elena Rastorgueva, François Grondin, William Aris, Hwidong Na, Yan Gao, Renato De Mori, and Yoshua Bengio. SpeechBrain: A general-purpose speech toolkit, 2021. arXiv:2106.04624." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 508, + 504, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 508, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 504, + 542 + ], + "type": "text", + "content": "Brett D Roads and Bradley C Love. Enriching ImageNet with human similarity judgments and psychological embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3547-3557, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "type": "text", + "content": "Lin CY Rouge. A package for automatic evaluation of summaries. In Proceedings of Workshop on Text Summarization of ACL, Spain, 2004." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 578, + 504, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 504, + 613 + ], + "type": "text", + "content": "Martin Schrimpf, Jonas Kubilius, Ha Hong, Najib J Majaj, Rishi Rajalingham, Elias B Issa, Kohitij Kar, Pouya Bashivan, Jonathan Prescott-Roy, Franziska Geiger, et al. Brain-Score: Which artificial neural network for object recognition is most brain-like? BioRxiv, pp. 407007, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 620, + 504, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 620, + 504, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 504, + 642 + ], + "type": "text", + "content": "Roger N Shepard. Multidimensional scaling, tree-fitting, and clustering. Science, 210(4468):390-398, 1980." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 649, + 504, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 649, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 504, + 672 + ], + "type": "text", + "content": "Roger N Shepard. Toward a universal law of generalization for psychological science. Science, 237 (4820):1317-1323, 1987." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "type": "text", + "content": "Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI Conference on Artificial Intelligence, 2017." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "text", + "content": "Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pp. 6105-6114. PMLR, 2019." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 727 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 105 + ], + "type": "text", + "content": "Joshua B Tenenbaum and Thomas L Griffiths. Generalization, similarity, and bayesian inference. Behavioral and brain sciences, 24(4):629-640, 2001." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 505, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 505, + 135 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 505, + 135 + ], + "type": "text", + "content": "Bill Thompson, Seán G Roberts, and Gary Lupyan. Cultural influences on word meanings revealed through large-scale semantic alignment. Nature Human Behaviour, 4(10):1029-1038, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 141, + 419, + 154 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 141, + 419, + 154 + ], + "spans": [ + { + "bbox": [ + 106, + 141, + 419, + 154 + ], + "type": "text", + "content": "Amos Tversky. Features of similarity. Psychological review, 84(4):327, 1977." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 159, + 504, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 159, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 106, + 159, + 504, + 183 + ], + "type": "text", + "content": "Luis Von Ahn and Laura Dabbish. Labeling images with a computer game. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems, pp. 319-326, 2004." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 190, + 506, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 190, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 506, + 212 + ], + "type": "text", + "content": "Luis Von Ahn and Laura Dabbish. Designing games with a purpose. Communications of the ACM, 51(8):58-67, 2008." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 219, + 504, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 219, + 504, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 504, + 254 + ], + "type": "text", + "content": "Johannes Wagner, Andreas Triantafyllopoulos, Hagen Wierstorf, Maximilian Schmitt, Felix Burkhardt, Florian Eyben, and Björn W. Schuller. Dawn of the transformer era in speech emotion recognition: closing the valence gap, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 260, + 506, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 260, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 506, + 316 + ], + "type": "text", + "content": "Shu wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y. Lin, Andy T. Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, Tzu-Hsien Huang, Wei-Cheng Tseng, Kotik Lee, Da-Rong Liu, Zili Huang, Shuyan Dong, Shang-Wen Li, Shinji Watanabe, Abdelrahman Mohamed, and Hung yi Lee. SUPERB: Speech Processing Universal PERformance Benchmark. In Proc. Interspeech 2021, pp. 1194-1198, 2021. doi: 10.21437/Interspeech.2021-1775." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 323, + 505, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 505, + 346 + ], + "type": "text", + "content": "Ross Wightman. PyTorch image models. https://github.com/rwrightman/pytorch-image-models, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 352, + 506, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 352, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 506, + 419 + ], + "type": "text", + "content": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 38-45. Association for Computational Linguistics, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 426, + 506, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 506, + 460 + ], + "type": "text", + "content": "Kevin JP Woods, Max H Siegel, James Traer, and Josh H McDermott. Headphone screening to facilitate web-based auditory experiments. Attention, Perception, & Psychophysics, 79(7): 2064-2072, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 467, + 504, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 467, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 467, + 504, + 502 + ], + "type": "text", + "content": "Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 305–321, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 507, + 504, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 507, + 504, + 530 + ], + "spans": [ + { + "bbox": [ + 105, + 507, + 504, + 530 + ], + "type": "text", + "content": "Daniel Yamins. An optimization-based approach to understanding sensory systems. The Cognitive Neurosciences, 4(V1):381, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 536, + 504, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 536, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 504, + 571 + ], + "type": "text", + "content": "Daniel LK Yamins, Ha Hong, Charles F Cadieu, Ethan A Solomon, Darren Seibert, and James J DiCarlo. Performance-optimized hierarchical models predict neural responses in higher visual cortex. Proceedings of the National Academy of Sciences, 111(23):8619-8624, 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 578, + 506, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 506, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 506, + 643 + ], + "type": "text", + "content": "Yao-Yuan Yang, Moto Hira, Zhaoheng Ni, Anjali Chourdia, Artyom Astafurov, Caroline Chen, Ching-Feng Yeh, Christian Puhrsch, David Pollack, Dmitriy Genzel, Donny Greenberg, Edward Z. Yang, Jason Lian, Jay Mahadeokar, Jeff Hwang, Ji Chen, Peter Goldsborough, Prabhat Roy, Sean Narethiran, Shinji Watanabe, Soumith Chintala, Vincent Quenneville-Bélair, and Yangyang Shi. Torchaudio: Building blocks for audio and speech processing. arXiv preprint arXiv:2110.15018, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 651, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 651, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 506, + 685 + ], + "type": "text", + "content": "Noga Zaslavsky, Charles Kemp, Terry Regier, and Naftali Tishby. Efficient compression in color naming and its evolution. Proceedings of the National Academy of Sciences, 115(31):7937-7942, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 692, + 504, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 692, + 504, + 727 + ], + "spans": [ + { + "bbox": [ + 105, + 692, + 504, + 727 + ], + "type": "text", + "content": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. Bertscore: Evaluating text generation with bert. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=SkeHuCVFDr." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 259, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 259, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 259, + 94 + ], + "type": "text", + "content": "SUPPLEMENTARY MATERIALS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 108, + 229, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 108, + 229, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 108, + 229, + 121 + ], + "type": "text", + "content": "A STIMULI AND DATA" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 133, + 268, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 268, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 268, + 144 + ], + "type": "text", + "content": "A.1 CODE AND DATA AVAILABILITY" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 154, + 506, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 221 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 221 + ], + "type": "text", + "content": "A link is provided to the public, containing all the data collected for this project during the review stage. It includes the new human behavioral data, the computational experiments with machine learning models, and all the necessary analyses scripts for producing the results. Additionally, the repository includes the Dallinger/PsyNet source codes for reproducing the behavioral experiments. Finally, we present an interactive visualization for exploring the similarity between stimuli as experienced by humans and different methods reported in the paper." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 237, + 262, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 237, + 262, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 237, + 262, + 249 + ], + "type": "text", + "content": "B BEHAVIORAL PARADIGMS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 262, + 195, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 195, + 273 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 195, + 273 + ], + "type": "text", + "content": "B.1 PARTICIPANTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "type": "text", + "content": "The exact number of participants for each of the 9 new behavioral experiments is reported in Table 1." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 106, + 326, + 515, + 459 + ], + "blocks": [ + { + "bbox": [ + 208, + 314, + 401, + 326 + ], + "lines": [ + { + "bbox": [ + 208, + 314, + 401, + 326 + ], + "spans": [ + { + "bbox": [ + 208, + 314, + 401, + 326 + ], + "type": "text", + "content": "Table 1: Behavioral experiment summary table." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 326, + 515, + 459 + ], + "lines": [ + { + "bbox": [ + 106, + 326, + 515, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 326, + 515, + 459 + ], + "type": "table", + "html": "
ModalityParadigmRespectTotal stimuliTrials per participantSectionNPre-screening
ImagesTagsAnimals120602.2.456LX
ImagesTagsFurniture120602.2.458LX
ImagesTagsVegetables120602.2.457LX
AudioSimilarityEmotions100852.2.2252HT
AudioCaptionsEmotions1,000502.2.3151HT, LX
AudioTagsEmotions1,000502.2.4217HT, LX
VideoSimilarityActivities100852.2.2284HT
VideoCaptionsActivities1,000502.2.3196HT, LX
VideoTagsActivities1,000502.2.4221HT, LX
", + "image_path": "856abb7e128ebd8a80b1869a0cf130fe9c4367a9765a1a91fb182a86cd9da58d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 119, + 460, + 504, + 483 + ], + "lines": [ + { + "bbox": [ + 119, + 460, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 119, + 460, + 504, + 483 + ], + "type": "text", + "content": "Note. 'N' denotes the number of participants included in the analysis; 'LX' denotes the LexTALE English proficiency pre-screening task; 'HT' denotes the headphone test." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 503, + 212, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 503, + 212, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 212, + 514 + ], + "type": "text", + "content": "B.2 IMPLEMENTATION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 523, + 506, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 506, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 506, + 613 + ], + "type": "text", + "content": "All behavioral experiments were implemented using the Dallinger4 and PsyNet (Harrison et al., 2020) frameworks. Dallinger is a modern tool for experiment hosting and deployment which automates the process of participant recruitment and compensation by integrating cloud-based services such as Heroku5 with online crowd-sourcing platforms such as AMT. PsyNet is a novel experiment design framework that builds on Dallinger and allows for flexible specification of experiment timelines as well as providing support for a wide array of tasks across different modalities (visual, auditory and audio-visual). Participants interact with the experiment through their web-browser, which in turn communicates with a backend Python server responsible for the experiment logic." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 626, + 204, + 637 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 626, + 204, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 204, + 637 + ], + "type": "text", + "content": "B.3 PRE-SCREENING" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 647, + 506, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 506, + 670 + ], + "type": "text", + "content": "A common technique for filtering out participants that are likely to deliver low-quality responses, as well as automated scripts (bots), is to implement pre-screening tasks prior to the main part of" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 677, + 528, + 708 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 677, + 528, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 528, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 528, + 689 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 677, + 528, + 689 + ], + "type": "text", + "content": "Code and data: https://osf.io/kzbr5/?view_only=3dea58e008ce41c290ef0f374bddbf444" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 689, + 507, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 507, + 708 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 507, + 708 + ], + "type": "text", + "content": "3Interactive plots: https://words-are-all-you-need.s3.amazon.com/index.html" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 118, + 710, + 301, + 731 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 118, + 710, + 301, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 301, + 720 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 301, + 720 + ], + "type": "text", + "content": "4https://dallinger.readthedocs.io/" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 118, + 720, + 247, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 720, + 247, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 720, + 247, + 731 + ], + "type": "text", + "content": "5https://www.heroku.com/" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 173 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 173 + ], + "type": "text", + "content": "each experiment. Failing the pre-screening tasks results in early termination of the experiment. Nevertheless, participants are still compensated for their time regardless of whether they fail or succeed on a pre-screener to ensure fair compensation. The role of pre-screeners in our studies was to realize two main criteria for data quality, namely, a) to be able to collect high-quality text descriptors, and b) to ensure that participants are able to inspect the target stimuli properly (in particular the audio component in prosody and videos). To do this, we implemented two pre-screening tasks, an English proficiency test and a standardized headphone test (used only for audio and video experiments). Table 1 provides details on which pre-screeners were used in each of the behavioral experiments." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 230, + 198, + 373, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 198, + 373, + 228 + ], + "spans": [ + { + "bbox": [ + 230, + 198, + 373, + 228 + ], + "type": "text", + "content": "alberation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 272, + 237, + 334, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 237, + 334, + 245 + ], + "spans": [ + { + "bbox": [ + 272, + 237, + 334, + 245 + ], + "type": "text", + "content": "Does this word exist?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 266, + 274, + 277, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 274, + 277, + 281 + ], + "spans": [ + { + "bbox": [ + 266, + 274, + 277, + 281 + ], + "type": "text", + "content": "yes" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 329, + 274, + 338, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 274, + 338, + 281 + ], + "spans": [ + { + "bbox": [ + 329, + 274, + 338, + 281 + ], + "type": "text", + "content": "no" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 316, + 493, + 330 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 316, + 493, + 330 + ], + "spans": [ + { + "bbox": [ + 115, + 316, + 493, + 330 + ], + "type": "text", + "content": "Figure 6: Example trial from the LexTALE pre-screening task (Lemhöfer & Broersma, 2012)." + } + ] + } + ], + "index": 6, + "type": "text" + }, + { + "bbox": [ + 104, + 350, + 506, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 429 + ], + "type": "text", + "content": "English proficiency test. To test participants' English proficiency, we used LexTALE, a lexical decision task developed in Lemhöfer & Broersma (2012). In each trial, participants were briefly presented (1 second) with either a real English word or a made up word that does not exist. Participants were instructed to guess whether the word was real or not. A total of 12 trials (half of them being real words) were presented, and 8 of them needed to be correct for the participant to pass. The presented words were: hasty, fray, stoutly, moonlit, scornful, unkempt, sensible, kilp, plaintively, crumper, plaudate, alberation. An example trial is shown in Figure 6." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 217, + 460, + 394, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 460, + 394, + 471 + ], + "spans": [ + { + "bbox": [ + 217, + 460, + 394, + 471 + ], + "type": "text", + "content": "Which sound was softest (quietest) -- 1, 2, or 3?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 509, + 312, + 573 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 305, + 509, + 312, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 509, + 312, + 517 + ], + "spans": [ + { + "bbox": [ + 305, + 509, + 312, + 517 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 536, + 312, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 536, + 312, + 544 + ], + "spans": [ + { + "bbox": [ + 305, + 536, + 312, + 544 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 565, + 312, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 565, + 312, + 573 + ], + "spans": [ + { + "bbox": [ + 305, + 565, + 312, + 573 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 135, + 599, + 474, + 613 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 599, + 474, + 613 + ], + "spans": [ + { + "bbox": [ + 135, + 599, + 474, + 613 + ], + "type": "text", + "content": "Figure 7: Example trial from the headphone pre-screening test (Woods et al., 2017)." + } + ] + } + ], + "index": 13, + "type": "text" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "Headphone test. We used the headphone test developed by Wood et al. (Woods et al., 2017), which is used as a standard pre-screener for high-quality auditory psychophysics data-collection procedures (Milne et al., 2021). The test is designed to ensure that the participants are wearing headphones and are able to perceive subtle differences in volume. The task consists of a forced choice task, in which three consecutive tones are played, and the participant has to identify which of them is the quietest. Crucially, these tones are constructed to exhibit a phase cancellation effect when not using headphones, and therefore making it difficult for non-headphone users to identify the quietest tone. Participants had to answer 4 out of 6 trials correctly to pass this test. An example trial is shown in Figure 7." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 115, + 125, + 492, + 316 + ], + "blocks": [ + { + "bbox": [ + 231, + 97, + 400, + 106 + ], + "lines": [ + { + "bbox": [ + 231, + 97, + 400, + 106 + ], + "spans": [ + { + "bbox": [ + 231, + 97, + 400, + 106 + ], + "type": "text", + "content": "How similar are the activities in following two videos? (2 / 85)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 157, + 112, + 473, + 121 + ], + "lines": [ + { + "bbox": [ + 157, + 112, + 473, + 121 + ], + "spans": [ + { + "bbox": [ + 157, + 112, + 473, + 121 + ], + "type": "text", + "content": "If it is difficult to choose between the options, don't worry, and just give what you intuitively think is the right answer." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 115, + 125, + 492, + 316 + ], + "lines": [ + { + "bbox": [ + 115, + 125, + 492, + 316 + ], + "spans": [ + { + "bbox": [ + 115, + 125, + 492, + 316 + ], + "type": "image", + "image_path": "57d6ed78b93d1f35c49af269482f05fbc8e7333a5e8542f8fb9d134c75450ae2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 339, + 451, + 353 + ], + "lines": [ + { + "bbox": [ + 159, + 339, + 451, + 353 + ], + "spans": [ + { + "bbox": [ + 159, + 339, + 451, + 353 + ], + "type": "text", + "content": "Figure 8: Screenshot from the similarity judgment task over video pairs." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 371, + 240, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 371, + 240, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 240, + 382 + ], + "type": "text", + "content": "B.4 SIMILARITY JUDGMENTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 392, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 506, + 469 + ], + "type": "text", + "content": "In the present work, we collected similarity judgments across audio and video datasets. Each dataset comprised of 4,950 unique pairs corresponding to the number of unordered subsets that contain two distinct objects (i.e., excluding self-similarity), within a set of 100 stimuli. We did not collect similarity judgments over the three datasets of images, as these were provided in Peterson et al. (2018) (and used here with permission). The experiments proceeded as follows: upon completion of the consent form and the pre-screening tasks, participants received instructions regarding the main experiment:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 479, + 470, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 479, + 470, + 514 + ], + "spans": [ + { + "bbox": [ + 140, + 479, + 470, + 514 + ], + "type": "text", + "content": "Audio. In this experiment we are studying how people perceive emotions. In each round you will be presented with two different recordings and your task will be to simply judge how similar are the emotions of the speakers." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 523, + 469, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 523, + 469, + 558 + ], + "spans": [ + { + "bbox": [ + 140, + 523, + 469, + 558 + ], + "type": "text", + "content": "Video. In this experiment we are studying how people perceive activities. In each round you will be presented with two different videos and your task will be to simply judge how similar are the activities in them." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 567, + 279, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 279, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 279, + 578 + ], + "type": "text", + "content": "The instructions then continued as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 140, + 589, + 469, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 589, + 469, + 634 + ], + "spans": [ + { + "bbox": [ + 140, + 589, + 469, + 634 + ], + "type": "text", + "content": "You will have seven response options, ranging from 0 ('Completely Dissimilar') to 6 ('Completely Similar'). Choose the one you think is most appropriate. Note: no prior expertise is required to complete this task, just choose what you intuitively think is the right answer." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 643, + 469, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 643, + 469, + 689 + ], + "spans": [ + { + "bbox": [ + 140, + 643, + 469, + 689 + ], + "type": "text", + "content": "The quality of your responses will be automatically monitored, and you will receive a bonus at the end of the experiment in proportion to your quality score. The best way to achieve a high score is to concentrate and give each round your best attempt." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 140, + 698, + 469, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 698, + 469, + 731 + ], + "spans": [ + { + "bbox": [ + 140, + 698, + 469, + 731 + ], + "type": "text", + "content": "The experiment will begin now. You will take up to 85 rounds where you have to answer this question. Remember to pay careful attention in order to get the best bonus!" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": "As described in the instructions, in each trial, participants rated the similarity between a pair of sounds (how similar are the emotions of the two speakers?) or videos (how similar are the activities in the following two videos?) on a scale ranging from 0 (completely dissimilar) to 6 (completely similar) (Figure 8). Overall, participants completed 85 trials on a random subset of the possible pairs. To further motivate participants to provide good responses, we gave them an additional performance bonus for providing consistent data. Among the 85 trials, 5 trials were repeated for consistency checking. The responses were converted into a performance score by computing the Spearman correlation between the original and repeat ratings. Perfect scores resulted in a 10 cent bonus." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 183, + 179, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 183, + 179, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 179, + 194 + ], + "type": "text", + "content": "B.5 CAPTIONS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 204, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 504, + 239 + ], + "type": "text", + "content": "We collected free-text captions for the video and audio datasets. Captions for the image datasets were previously collected in Marjieh et al. (2022) and used here with permission. After completing the consent form and pre-screening tests, participants received the following instructions:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 246, + 470, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 246, + 470, + 281 + ], + "spans": [ + { + "bbox": [ + 140, + 246, + 470, + 281 + ], + "type": "text", + "content": "Audio. In this experiment we are studying how people describe emotions. You will be presented with different recordings of speakers and your task will be to describe their emotions. In doing so, please keep in mind the following instructions" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 152, + 282, + 367, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 282, + 367, + 294 + ], + "spans": [ + { + "bbox": [ + 152, + 282, + 367, + 294 + ], + "type": "text", + "content": "- Describe all the important aspects of the recording." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 302, + 470, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 302, + 470, + 336 + ], + "spans": [ + { + "bbox": [ + 140, + 302, + 470, + 336 + ], + "type": "text", + "content": "Video. In this experiment we are studying how people describe activities in videos. You will be presented with different videos of activities and your task will be to describe their content. In doing so, please keep in mind the following instructions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 152, + 338, + 358, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 338, + 358, + 350 + ], + "spans": [ + { + "bbox": [ + 152, + 338, + 358, + 350 + ], + "type": "text", + "content": "- Describe all the important activities in the video." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 358, + 392, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 392, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 392, + 371 + ], + "type": "text", + "content": "As well as the following guidelines adapted from Marjieh et al. (2022):" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 152, + 379, + 391, + 439 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 152, + 379, + 391, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 379, + 391, + 390 + ], + "spans": [ + { + "bbox": [ + 152, + 379, + 391, + 390 + ], + "type": "text", + "content": "- Do not start the sentences with \"There is\" or \"There are\"." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 152, + 392, + 309, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 392, + 309, + 402 + ], + "spans": [ + { + "bbox": [ + 152, + 392, + 309, + 402 + ], + "type": "text", + "content": "- Do not describe unimportant details." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 152, + 403, + 368, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 403, + 368, + 415 + ], + "spans": [ + { + "bbox": [ + 152, + 403, + 368, + 415 + ], + "type": "text", + "content": "- You are not allowed to copy and paste descriptions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 152, + 416, + 342, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 416, + 342, + 426 + ], + "spans": [ + { + "bbox": [ + 152, + 416, + 342, + 426 + ], + "type": "text", + "content": "- Descriptions should contain at least 5 words." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 152, + 428, + 371, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 428, + 371, + 439 + ], + "spans": [ + { + "bbox": [ + 152, + 428, + 371, + 439 + ], + "type": "text", + "content": "- Descriptions should contain at least 4 unique words." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 140, + 441, + 469, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 441, + 469, + 464 + ], + "spans": [ + { + "bbox": [ + 140, + 441, + 469, + 464 + ], + "type": "text", + "content": "Note: No prior expertise is required to complete this task, just describe what you intuitively think is important as accurately as possible." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 140, + 473, + 469, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 473, + 469, + 507 + ], + "spans": [ + { + "bbox": [ + 140, + 473, + 469, + 507 + ], + "type": "text", + "content": "The quality of your captions will be monitored automatically and providing low quality and repetitive responses could result in early termination of the experiment and hence a lower bonus." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 517, + 295, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 517, + 295, + 529 + ], + "spans": [ + { + "bbox": [ + 141, + 517, + 295, + 529 + ], + "type": "text", + "content": "You will describe up to 50 recordings." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 537, + 506, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 506, + 659 + ], + "type": "text", + "content": "These guidelines were enforced to ensure that participants deliver sufficiently informative captions that are not repetitive. In each trial of the main experiment, participants described a single audio (please describe the emotions of the speaker) or video stimulus (please describe the activity in the video). Overall, participants described up to 50 randomly presented stimuli. To filter out bad participants that tend to deliver repeated responses, in each trial (excluding the first 4 trials) we computed the mean edit distance between their current response and all previous responses that they previously provided using the partial_ratio function in the fuzzz6 Python package for fuzzy string matching. This function returns for a pair of input strings a matching score between 0 and 100 (100 being identical strings). Early termination was enforced if the mean response matching score was above 80. The idea here was to prevent participants from copying and pasting the same response over and over again (or varying it only slightly)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 671, + 157, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 671, + 157, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 671, + 157, + 682 + ], + "type": "text", + "content": "B.6 TAGS" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 692, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 692, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 692, + 504, + 715 + ], + "type": "text", + "content": "For the image, audio, and video datasets, we collected tag data, i.e., concise labels that describe the salient features of a stimulus. To do so, we developed a novel tag mining paradigm called STEP-Tag in" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 312, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 312, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 312, + 731 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 116, + 720, + 312, + 731 + ], + "type": "text", + "content": "https://github.com/seatgeek/thefuzz" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 114, + 99, + 221, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 99, + 221, + 111 + ], + "spans": [ + { + "bbox": [ + 114, + 99, + 221, + 111 + ], + "type": "text", + "content": "Mark the existing tags" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 118, + 179, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 118, + 179, + 137 + ], + "spans": [ + { + "bbox": [ + 117, + 118, + 179, + 137 + ], + "type": "text", + "content": "picking" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 117, + 140, + 180, + 150 + ], + "blocks": [ + { + "bbox": [ + 117, + 140, + 180, + 150 + ], + "lines": [ + { + "bbox": [ + 117, + 140, + 180, + 150 + ], + "spans": [ + { + "bbox": [ + 117, + 140, + 180, + 150 + ], + "type": "image", + "image_path": "442e688eade67c51016a7c0f2e60198b4abd7f0a403184413a6ae1f5a5feeaaf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 296, + 504, + 319 + ], + "lines": [ + { + "bbox": [ + 104, + 296, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 504, + 319 + ], + "type": "text", + "content": "Figure 9: Screenshot of an example tag mining task for videos. The tag \"picking\" received 5 stars (very relevant), whereas the tag \"apple\" is flagged (marked as irrelevant)." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 189, + 119, + 237, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 119, + 237, + 137 + ], + "spans": [ + { + "bbox": [ + 189, + 119, + 237, + 137 + ], + "type": "text", + "content": "apple" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 187, + 140, + 251, + 150 + ], + "blocks": [ + { + "bbox": [ + 187, + 140, + 251, + 150 + ], + "lines": [ + { + "bbox": [ + 187, + 140, + 251, + 150 + ], + "spans": [ + { + "bbox": [ + 187, + 140, + 251, + 150 + ], + "type": "image", + "image_path": "305054b44560047950dbbc8dd653c442dfab18e29527dfb6f15bf0605a4ca4b5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 364, + 87, + 501, + 190 + ], + "blocks": [ + { + "bbox": [ + 364, + 87, + 501, + 190 + ], + "lines": [ + { + "bbox": [ + 364, + 87, + 501, + 190 + ], + "spans": [ + { + "bbox": [ + 364, + 87, + 501, + 190 + ], + "type": "image", + "image_path": "63c64730495dff6b765e2e2b2cd0fd66867d64fdc3ded9a61bc60dc6d25aa0be.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 369, + 206, + 395, + 216 + ], + "lines": [ + { + "bbox": [ + 369, + 206, + 395, + 216 + ], + "spans": [ + { + "bbox": [ + 369, + 206, + 395, + 216 + ], + "type": "text", + "content": "Play again" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 114, + 175, + 208, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 175, + 208, + 186 + ], + "spans": [ + { + "bbox": [ + 114, + 175, + 208, + 186 + ], + "type": "text", + "content": "Are any tags missing?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 120, + 193, + 325, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 193, + 325, + 228 + ], + "spans": [ + { + "bbox": [ + 120, + 193, + 325, + 228 + ], + "type": "text", + "content": "Type in words describing the activity in the video, that are missing above. You can either select tags from a dropdown list or create entirely new ones. Submit your response for a new tag by pressing the enter key. You can add more than one tag." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 243, + 141, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 243, + 141, + 252 + ], + "spans": [ + { + "bbox": [ + 123, + 243, + 141, + 252 + ], + "type": "text", + "content": "peach" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 152, + 243, + 192, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 243, + 192, + 251 + ], + "spans": [ + { + "bbox": [ + 152, + 243, + 192, + 251 + ], + "type": "text", + "content": "Type more tags" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 312, + 266, + 329, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 266, + 329, + 274 + ], + "spans": [ + { + "bbox": [ + 312, + 266, + 329, + 274 + ], + "type": "text", + "content": "Next" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 205, + 329, + 406, + 399 + ], + "blocks": [ + { + "bbox": [ + 205, + 329, + 406, + 399 + ], + "lines": [ + { + "bbox": [ + 205, + 329, + 406, + 399 + ], + "spans": [ + { + "bbox": [ + 205, + 329, + 406, + 399 + ], + "type": "table", + "html": "
Dataset (# of stimuli)meanstdtotal
Vegetables (120)3.21.1385
Furniture (120)5.21.7627
Animals (120)8.22.7988
Audio-emotions (1000)9.13.59092
Video-activities (1000)8.52.98482
", + "image_path": "bab50fb7d54ac2fc9327b528782562d43cbef44ca56bffdadb1d9670980212a9.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 406, + 478, + 418 + ], + "lines": [ + { + "bbox": [ + 130, + 406, + 478, + 418 + ], + "spans": [ + { + "bbox": [ + 130, + 406, + 478, + 418 + ], + "type": "text", + "content": "Table 2: Mean, standard deviation, and total number of tags collected for each dataset." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 439, + 506, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 506, + 671 + ], + "type": "text", + "content": "which each stimulus was treated as a separate \"chain\" (see Figure 2 in the paper). When the stimulus was presented for the first time, the participant was asked to provide at least one tag. For the following iterations, we sequenced participants so that each of them had to rate the tags provided by participants from the previous iterations within the same chain. The rating was either choosing between one (not very relevant) to five stars (very relevant), or marking the tag as completely irrelevant by using the flag icon (see Figure 9). Participants could optionally introduce new tags that will subsequently be presented to other participants assigned to the same chain. Participants could only provide tags that were not already present, and they had to be in lower-case letters. To discourage frequent use of long word combinations, a pop-up window appeared if participants used two or more white spaces (i.e., three or more words) to warn that long combinations should only be used when completely necessary. This process continued for at least 10 iterations, after which we checked at each consequent iteration whether the chain was \"full\". We considered a chain to be full if its latest iteration had at least 2 tags that were rated at least 3 times and had a mean rating of 3 stars. If a chain was not full after 20 iterations, we stopped collecting further iterations. Since each experimental batch lasted for a fixed duration of less than one day, in some cases we did not complete all chains, and a few chains had fewer iterations (3 for vegetables, 6 for animals and 2 for furniture, out of 120 chains each). Our experiment incentivized participants to provide new tags by paying them a performance bonus of 0.01 USD for every up-vote (i.e., not flagged) given by other participants. On the contrary, if two or more tags of the same participant were flagged by others, the participant was excluded (the participant received a warning after the first flag). We provide summary statistics on the number of collected tags in Table 2." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 675, + 507, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 675, + 507, + 699 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 507, + 699 + ], + "type": "text", + "content": "After accepting the consent form and passing the pre-screening tasks, participants received introductory instructions regarding the main experiment:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 140, + 709, + 470, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 709, + 470, + 733 + ], + "spans": [ + { + "bbox": [ + 140, + 709, + 470, + 733 + ], + "type": "text", + "content": "Images. Rate & Tag animals/furniture/vegetables! Thanks for participating in this game! In this game you will:" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 153, + 82, + 348, + 121 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 153, + 82, + 348, + 93 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 82, + 348, + 93 + ], + "spans": [ + { + "bbox": [ + 153, + 82, + 348, + 93 + ], + "type": "text", + "content": "- Watch images of animals/furniture/vegetables." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 153, + 96, + 320, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 96, + 320, + 107 + ], + "spans": [ + { + "bbox": [ + 153, + 96, + 320, + 107 + ], + "type": "text", + "content": "- Rate tags that other players have given." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 153, + 110, + 326, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 110, + 326, + 121 + ], + "spans": [ + { + "bbox": [ + 153, + 110, + 326, + 121 + ], + "type": "text", + "content": "- Add new tags that you think are missing." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 140, + 133, + 469, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 133, + 469, + 156 + ], + "spans": [ + { + "bbox": [ + 140, + 133, + 469, + 156 + ], + "type": "text", + "content": "Audio. Rate & Tag emotions! Thanks for participating in this game! In this game you will:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 152, + 160, + 469, + 210 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 152, + 160, + 469, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 160, + 469, + 182 + ], + "spans": [ + { + "bbox": [ + 152, + 160, + 469, + 182 + ], + "type": "text", + "content": "- Listen to a speech fragment and focus on the emotional content of the recording." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 153, + 186, + 320, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 186, + 320, + 196 + ], + "spans": [ + { + "bbox": [ + 153, + 186, + 320, + 196 + ], + "type": "text", + "content": "- Rate tags that other players have given." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 153, + 199, + 326, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 199, + 326, + 210 + ], + "spans": [ + { + "bbox": [ + 153, + 199, + 326, + 210 + ], + "type": "text", + "content": "- Add new tags that you think are missing." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 141, + 221, + 352, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 221, + 352, + 233 + ], + "spans": [ + { + "bbox": [ + 141, + 221, + 352, + 233 + ], + "type": "text", + "content": "Video. Rate & Tag activities! In this game you will:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 153, + 238, + 373, + 277 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 153, + 238, + 373, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 238, + 373, + 250 + ], + "spans": [ + { + "bbox": [ + 153, + 238, + 373, + 250 + ], + "type": "text", + "content": "- Watch a video and focus on the activities happening." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 153, + 252, + 320, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 252, + 320, + 263 + ], + "spans": [ + { + "bbox": [ + 153, + 252, + 320, + 263 + ], + "type": "text", + "content": "- Rate tags that other players have given." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 153, + 266, + 326, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 266, + 326, + 277 + ], + "spans": [ + { + "bbox": [ + 153, + 266, + 326, + 277 + ], + "type": "text", + "content": "- Add new tags that you think are missing." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 289, + 416, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 416, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 416, + 300 + ], + "type": "text", + "content": "Participants then received further instructions regarding the rules of the game" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 140, + 312, + 469, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 312, + 469, + 400 + ], + "spans": [ + { + "bbox": [ + 140, + 312, + 469, + 400 + ], + "type": "text", + "content": "Images. After watching the animal/furniture/vegetable you will see tags given by other players that describe the animal/furniture/vegetable. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person seeing this animal/furniture/vegetable, you may see no previous tags. You can also add your own tag that is relevant to describe the animal/furniture/vegetable. Your tag will then be rated by other players who are playing the game simultaneously." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 140, + 412, + 469, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 412, + 469, + 500 + ], + "spans": [ + { + "bbox": [ + 140, + 412, + 469, + 500 + ], + "type": "text", + "content": "Audio. After listening to the recording, you will see tags given by other players that describe the emotions in the speech fragment. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person listening to this speech sample, you may see no previous tags. You can also add your own tag that is relevant to describe the emotions in the speech fragment. Your tag will then be rated by other players who are playing the game simultaneously." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 140, + 511, + 469, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 511, + 469, + 600 + ], + "spans": [ + { + "bbox": [ + 140, + 511, + 469, + 600 + ], + "type": "text", + "content": "Video. After watching the video, you will see tags given by other players that describe the activities in the video. You should rate the relevance of each tag by clicking the appropriate amount of stars (1 star not very relevant, 5 stars very relevant). If you think that the tag is a mistake or completely irrelevant, you should flag it by clicking the flag icon. If you are the first person watching this video, you may see no previous tags. You can also add your own tag that is relevant to describe the activities in the video. Your tag will then be rated by other players who are playing the game simultaneously." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 611, + 504, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 504, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 504, + 623 + ], + "type": "text", + "content": "Finally, participants received the following guidelines regarding the tag input and the bonus scheme:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 140, + 634, + 469, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 634, + 469, + 668 + ], + "spans": [ + { + "bbox": [ + 140, + 634, + 469, + 668 + ], + "type": "text", + "content": "Keep tags short. A word like \"green grass\" should rather be submitted as \"green\" and \"grass\", whereas a compound word such as \"red wine\" cannot be separated, since \"red wine\" means something different than just \"red\" and \"wine\"." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 679, + 196, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 679, + 196, + 690 + ], + "spans": [ + { + "bbox": [ + 141, + 679, + 196, + 690 + ], + "type": "text", + "content": "Bonus rules." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 152, + 696, + 468, + 731 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 152, + 696, + 468, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 696, + 468, + 717 + ], + "spans": [ + { + "bbox": [ + 152, + 696, + 468, + 717 + ], + "type": "text", + "content": "- If the tag you provide gets rated as a relevant tag (i.e., not flagged) by other players" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 152, + 720, + 403, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 720, + 403, + 731 + ], + "spans": [ + { + "bbox": [ + 152, + 720, + 403, + 731 + ], + "type": "text", + "content": "- If your tag is unique and have not been introduced by others" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 235, + 80, + 376, + 127 + ], + "blocks": [ + { + "bbox": [ + 235, + 80, + 376, + 127 + ], + "lines": [ + { + "bbox": [ + 235, + 80, + 376, + 127 + ], + "spans": [ + { + "bbox": [ + 235, + 80, + 376, + 127 + ], + "type": "table", + "html": "
ModalitySTEPCaptions
Audio230187
Video264291
", + "image_path": "c834513de26c89851221182a5b0ca2ac5873298ec9e854c14575b280143dd2a5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 149, + 134, + 460, + 147 + ], + "lines": [ + { + "bbox": [ + 149, + 134, + 460, + 147 + ], + "spans": [ + { + "bbox": [ + 149, + 134, + 460, + 147 + ], + "type": "text", + "content": "Table 3: Median of overall participants' time spent per stimulus (in seconds)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 140, + 167, + 470, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 167, + 470, + 201 + ], + "spans": [ + { + "bbox": [ + 140, + 167, + 470, + 201 + ], + "type": "text", + "content": "Note: Simply writing many and irrelevant tags is not a good idea because other players might flag your tag. Your experiment will terminate early if there are too many red flags!" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 213, + 470, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 213, + 470, + 247 + ], + "spans": [ + { + "bbox": [ + 140, + 213, + 470, + 247 + ], + "type": "text", + "content": "Please try to use a variety of words to describe the animal / furniture / vegetable / emotion in the speech fragment / activities in the video, and use the entire star rating scale for your responses." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 258, + 246, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 246, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 246, + 270 + ], + "type": "text", + "content": "B.6.1 VALIDATING STEP-TAG" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": "We conducted a small, exploratory ablation study to validate STEP-Tag as a procedure for collecting diverse, accurate, and informative tags. First, we compared using multiple tags from the last iteration of STEP-Tag to using just a single randomly-selected highly-rated tag from the last iteration. We found that using a single tag greatly decreased correlation with human similarity (i.e., for the video dataset, the best-performing method on multiple tags had a correlation of " + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "inline_equation", + "content": "r = 0.74" + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": " while the best-performing method on single labels had a correlation of " + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "inline_equation", + "content": "r = 0.35" + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": "). Second, we compared tags from the first iteration of STEP-Tag (equivalent to collecting tags without an adaptive procedure) to tags from the last iteration. We found that using first iteration tags greatly decreased correlation with human similarity (i.e., for the video dataset, the 'Tags CNNB mean (no split)' method, the correlation from the last iteration was " + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "inline_equation", + "content": "r = 0.74" + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": " and from the first iteration it was " + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "inline_equation", + "content": "r = 0.44" + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": "; for 'Tags overlap' it was " + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "inline_equation", + "content": "r = 0.56" + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": " from the last iteration and " + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "inline_equation", + "content": "r = 0.38" + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": " from the first iteration). Finally, we extracted the Kinetics-200 labels for each video to compare the tags from STEP-Tag against the kinds of labels typically collected for machine learning datasets. We found that using labels decreased the correlation with human similarity (i.e., the best-performing method on pipeline tags had a correlation of " + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "inline_equation", + "content": "r = 0.74" + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": " while the best-performing method on dataset labels had a correlation of " + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "inline_equation", + "content": "r = 0.64" + }, + { + "bbox": [ + 104, + 278, + 506, + 445 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 457, + 309, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 309, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 309, + 468 + ], + "type": "text", + "content": "B.7 DURATION OF STEP-TAG AND CAPTIONS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 506, + 536 + ], + "type": "text", + "content": "To compare STEP-tag and captions, we computed the median of overall participants' time spent per stimulus (see Table 3). The times were only collected for the audio and video modality (captions for the image datasets were already collected by Marjieh et al. (2022)). We see that both methods consume roughly similar amounts of time, which is desirable as our analysis suggests that in some domains (e.g., video) tags yield the best results whereas in others (e.g., audio) captions do." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 550, + 246, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 550, + 246, + 562 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 246, + 562 + ], + "type": "text", + "content": "C PREDICTION METHODS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 575, + 506, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 506, + 632 + ], + "type": "text", + "content": "We used two main types of methods to predict human similarity judgments. The first class (\"DNN-based methods\", described in section C.1) make use of pre-trained embedding models. In the second class of models (\"Word Frequency Analysis methods\", described in the section C.2) simple feature extraction techniques are used instead of pre-trained deep learning models. Figure 1 depicts schematic overview of all prediction methods that we used." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 645, + 234, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 645, + 234, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 234, + 656 + ], + "type": "text", + "content": "C.1 DNN-BASED METHODS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 666, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 506, + 734 + ], + "type": "text", + "content": "The DNN-based methods use various embeddings and deep learning representations to predict human similarity judgments. These methods could be further split into three groups based on the kinds of input data they process, namely if they use a single sensory modality that is either image, audio or video (\"unimodal models\"; see subsection C.1.1), or use text that is either tag or captions (\"text embeddings\"; see subsection C.1.2), or use both (\"multimodal models\"). In addition, we also tested the performance of \"stacked\" representations, where the sensory and textual embedding of a select" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": "number of models were concatenated into a single long embedding. Overall, the computation time of embedding methods took about two weeks on an x1.16xlarge Amazon Web Services instance with 64 vCPUs and 976 GiB of memory." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 132, + 293, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 132, + 293, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 132, + 293, + 144 + ], + "type": "text", + "content": "C.1.1 UNIMODAL DNN-BASED METHODS" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 138, + 180, + 471, + 544 + ], + "blocks": [ + { + "bbox": [ + 110, + 168, + 501, + 180 + ], + "lines": [ + { + "bbox": [ + 110, + 168, + 501, + 180 + ], + "spans": [ + { + "bbox": [ + 110, + 168, + 501, + 180 + ], + "type": "text", + "content": "Table 4: All 30 image baseline models occurring in the top 50 best models reported in Figure 3A." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 180, + 471, + 544 + ], + "lines": [ + { + "bbox": [ + 138, + 180, + 471, + 544 + ], + "spans": [ + { + "bbox": [ + 138, + 180, + 471, + 544 + ], + "type": "table", + "html": "
Model nameAverage scoreSD scoreTop 1 accuracyNumber of parameters (M)
1Swin0.660.0681.5223.37
2ConvNeXT0.640.07N/A348.15
3NF-ResNet0.620.0480.6523.51
4NFNet l00.610.0882.7532.77
5ResNetV20.600.11N/A928.34
6NF-RegNet0.590.0579.299.26
7VGG160.580.1173.35134.27
8VGG190.580.1174.21139.58
9ViT0.580.1275.956.16
10ResMLP0.570.0783.59128.37
11Twins-SVT0.570.0681.6823.55
12Twins-PCPVT0.570.0481.0923.59
13VGG130.570.1171.59128.96
14CaiT0.570.0482.1917.18
15VGG110.570.1070.36128.77
16gMLP0.560.0679.6419.17
17PIT0.560.0378.1910.23
18DeiT0.560.0372.175.52
19ConViT0.560.0373.115.52
20TNT0.560.0381.5223.37
21CoaT0.550.0478.435.35
22gMixer0.550.0578.0424.34
23XCiT0.550.0482.5711.92
24IG ResNeXt0.530.1385.44826.36
25Visformer0.520.0282.1139.45
26RepVGG0.520.1180.2181.26
27CLIP image0.500.11N/A102.01
28JXNesT0.500.0781.4216.67
29ECAResNet0.470.1480.4528.11
30DenseNet0.470.1274.746.95
", + "image_path": "6f907053e36ebb00a0b4f8c4bb80870c51050c3a9fdb06abcd35bc2d3478a015.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 119, + 545, + 504, + 567 + ], + "lines": [ + { + "bbox": [ + 119, + 545, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 119, + 545, + 504, + 567 + ], + "type": "text", + "content": "Note. Performance accuracy on ImageNet was based on Wightman (2019) and was not available for all models." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 594, + 506, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 506, + 682 + ], + "type": "text", + "content": "Image models We used 560 pre-trained models from the Pytorch Image Models (timm) repository (Wightman, 2019). We chose this repository as it contains an extensive and highly diverse set of pre-trained models in terms of architecture backbones, model sizes, and training sets. The repository includes models published from 2014 to 2022 that use various training sets (such as ImageNet1k, ImageNet21k, Instagram, etc.), training procedures objectives (e.g., pre-training, fine-tuning, self-supervision, weak supervision, etc.) and architectures (e.g., VGG, ResNet, Inception, Transformer, etc.). The repository also reports various evaluation metrics for each model (e.g., their ImageNet performance)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "For each model, we computed the embedding from the last layer (typically before the final softmax layer; see below and Figure 10 for a preliminary analysis for the effect of layer depth in audio models). We then computed the cosine similarity between pairs of embedding vectors to produce a similarity matrix. The entire list of the performance of all models is detailed in the OSF repository" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 79, + 495, + 277 + ], + "blocks": [ + { + "bbox": [ + 111, + 79, + 495, + 277 + ], + "lines": [ + { + "bbox": [ + 111, + 79, + 495, + 277 + ], + "spans": [ + { + "bbox": [ + 111, + 79, + 495, + 277 + ], + "type": "image", + "image_path": "945b41b903c72933eacab4066b51f19c2d63f10c7678cac69daf4dd5aebd1480.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 291, + 504, + 315 + ], + "lines": [ + { + "bbox": [ + 104, + 291, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 504, + 315 + ], + "type": "text", + "content": "Figure 10: Scores for individual layers of audio models scaled to the total number of layers. Models are colored by their meta architecture." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 335, + 506, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 506, + 381 + ], + "type": "text", + "content": "associated with this paper7. Table 4 presents additional details for the top 42 image baseline models in Figure 3A including their average score (correlation to human judgments) across the three image datasets, the standard deviation (SD) of this score (across datasets, repeated runs and available model parameters in Wightman (2019)), their ImageNet accuracy, and their number of trainable parameters." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 384, + 506, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 506, + 583 + ], + "type": "text", + "content": "Figure 4A shows the correlation to human similarity as a function of the number of parameters for all 569 models. In general, we found that models that have more parameters perform better (Figure 4A). Plotting all the embedding technique correlations against the number of training parameters of their respective models showed statistically significant positive correlation " + }, + { + "bbox": [ + 104, + 384, + 506, + 583 + ], + "type": "inline_equation", + "content": "(r = 0.39, p < 0.001)" + }, + { + "bbox": [ + 104, + 384, + 506, + 583 + ], + "type": "text", + "content": ". However, one possible explanation for this could be the improved performance of newer models, which typically have more parameters, on various computer vision tasks. To test this, we computed the performance (i.e., correlation with human similarity) of the various models as a function of their accuracy on ImageNet (Deng et al., 2009) - which was provided in Wightman (2019) for all models except for CLIP (whose implementation came from a different repository) as summarized in Figure 4B. We found a positive correlation between the two metrics " + }, + { + "bbox": [ + 104, + 384, + 506, + 583 + ], + "type": "inline_equation", + "content": "(r = 0.26, p < 0.001)" + }, + { + "bbox": [ + 104, + 384, + 506, + 583 + ], + "type": "text", + "content": ", though with some clear exceptions. For example, the vision transformer BEiT (Bao et al., 2021) and the convolutional architecture EfficientNet (Tan & Le, 2019) achieved high accuracy on ImageNet but performed poorly on human data. On the other hand, the vision transformer Swin (Liu et al., 2021) and the convolutional architecture ConvNext (Liu et al., 2022) both performed well on ImageNet and human similarity. This suggests that architecture and number of parameters are better predictors of similarity judgments than performance on ImageNet. Further analysis is required to determine what kind of architectural components actually contribute to more human-like performance (Langlois et al., 2021)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 595, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 595, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 595, + 506, + 674 + ], + "type": "text", + "content": "Audio models We used all pre-trained wav2vec 2.0 (Baevski et al., 2020) and HuBERT (Hsu et al., 2021) models available in torchaudio (Yang et al., 2021). We also extracted embeddings from WavLM (Chen et al., 2021) and data2vec audio models (Baevski et al., 2022). Furthermore, we used additional wav2vec 2.0 and HuBERT models that were either specialized on emotion recognition or speaker identification (wen Yang et al., 2021; Wagner et al., 2022; Ravanelli et al., 2021). The performance of HuBERT, wav2vec 2.0, and WavLM models is shown in Figure 3B. Additional details about the models are displayed in Table 5." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 677, + 506, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 713 + ], + "type": "text", + "content": "In addition, we explored the correlation between the audio models and human similarity data as a function of the layer in the model. Earlier literature has suggested that similarity to human representations may depend on the layer of the model (Kell et al., 2018; Yamins et al., 2014; Yamins," + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 468, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 468, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 468, + 731 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 116, + 720, + 468, + 731 + ], + "type": "text", + "content": "https://osf.io/kzbr5/?view_only=3dea58e008ce41c290ef0f374bddbf444" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 149, + 100, + 462, + 531 + ], + "blocks": [ + { + "bbox": [ + 192, + 89, + 416, + 100 + ], + "lines": [ + { + "bbox": [ + 192, + 89, + 416, + 100 + ], + "spans": [ + { + "bbox": [ + 192, + 89, + 416, + 100 + ], + "type": "text", + "content": "Table 5: All audio baseline models used in the analysis." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 149, + 100, + 462, + 531 + ], + "lines": [ + { + "bbox": [ + 149, + 100, + 462, + 531 + ], + "spans": [ + { + "bbox": [ + 149, + 100, + 462, + 531 + ], + "type": "table", + "html": "
Model nameEmotion correlationNumber of parameters (M)
1wav2vec 2.0 lv60k (100h)0.49317
2wav2vec 2.0 lv60k (960h)0.49317
3wav2vec 2.0 lv60k0.51317
4wav2vec 2.0 lv60k (10m)0.51317
5HuBERT xlarge ASR0.451000
6HuBERT xlarge0.461000
7HuBERT large ASR0.46300
8wav2vec 2.0 large XLSR530.47317
9HuBERT large0.46300
10wav2vec 2.0 (Audeering, emotion)0.49317
11HuBERT base0.4190
12WavLM large0.46316.62
13HuBERT base (superb, emotion)0.4290
14HuBERT base (superb, speaker)0.4290
15WavLM base+0.4194.70
16wav2vec 2.0 base (960h)0.3895
17WavLM base0.3994.70
18wav2vec 2.0 base0.3495
19wav2vec 2.0 base (10m)0.3495
20wav2vec 2.0 base (superb, emotion)0.3495
21wav2vec 2.0 base (superb, speaker)0.3495
22wav2vec 2.0 base (100h)0.3295
23HuBERT large (superb, emotion)0.29300
24HuBERT large (superb, speaker)0.29300
25wav2vec 2.0 large (100h)0.32317
26wav2vec 2.0 large (superb, emotion)0.31317
27wav2vec 2.0 large (superb, speaker)0.31317
28wav2vec 2.0 large (960h)0.31317
29wav2vec 2.0 large (10m)0.31317
30data2vec audio large (960h)0.31313.28
31data2vec audio base (100h)0.23313.28
32data2vec audio large (100h)0.23313.28
33data2vec audio large (10m)0.21313.28
34wav2vec 2.0 (SpeechBrain, emotion)0.1195
35data2vec audio base (960h)0.1693.16
36data2vec audio base (10m)0.1593.16
", + "image_path": "0e62a0858d91fc0202ea64c360aefb7dae372e76a7354810f44d0a7af623935e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 552, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 628 + ], + "type": "text", + "content": "2020). We expected that the layers closer to the input of the model (where the representation is more low-level) to be less predictive. In general, we found that this was the case (Figure 10). In some variants of wav2vec, however, intermediate representations performed better, possibly due to the misalignment of the training task of wav2vec with the emotion task. This analysis confirms the choice we made in the paper to mostly use the last two layers of the models. Preliminary analysis of the image and video models also explored different layers, but the results were similar to those we presented in audio, and are therefore not reported here." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": "Video models We extracted embeddings from the 'Slow' (a 3D ResNet; see Feichtenhofer et al. (2019)), Slowfast (a 2-path model with one path capturing semantics and the other capturing fine details; see Feichtenhofer et al. (2019)), and X3d (a model that initially starts as a simple 2D image classifier but is expanded in several axes; see Feichtenhofer (2020)) architectures implemented in pytorchvideo (Fan et al., 2021). All video models were pre-trained on the Kinetics-400 dataset (Kay et al., 2017). The performance of the models is displayed in Figure 3C. Numeric correlation values are detailed in Table 6 along with model accuracy (Top1 and Top5) on Kinetics-400, and the number of parameters in each model. The accuracies and parameter counts are listed as reported in" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "Fan et al. (2021). As with previous modalities, the number of parameters appears to be positively correlated with correlation to human similarity." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 133, + 134, + 478, + 235 + ], + "blocks": [ + { + "bbox": [ + 192, + 122, + 417, + 134 + ], + "lines": [ + { + "bbox": [ + 192, + 122, + 417, + 134 + ], + "spans": [ + { + "bbox": [ + 192, + 122, + 417, + 134 + ], + "type": "text", + "content": "Table 6: All video baseline models used in the analysis." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 133, + 134, + 478, + 235 + ], + "lines": [ + { + "bbox": [ + 133, + 134, + 478, + 235 + ], + "spans": [ + { + "bbox": [ + 133, + 134, + 478, + 235 + ], + "type": "table", + "html": "
Model nameCorrelationKinetics-400 Top1 AccKinetics-400 Top5 AccNumber of parameters (M)
1Slowfast r500.6576.9492.6934.57
2Slowfast r1010.6477.9093.2762.83
3Slow r500.6174.5891.6332.45
4X3d M0.5375.9492.723.79
5X3d S0.4973.3391.273.79
6X3d XS0.4869.1288.633.79
", + "image_path": "e0af87b13737d1fdb5751ee23688ea68c677bf3f0accc0bf1ca6d8f515f5d2bb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 251, + 265, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 265, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 265, + 262 + ], + "type": "text", + "content": "C.1.2 TEXT EMBEDDING METHODS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 270, + 506, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 391 + ], + "type": "text", + "content": "Caption text embedding. Since there are multiple captions per stimulus, an aggregation procedure had to be applied to produce a single embedding vector for each stimulus. In our main analysis, for each stimulus, we extracted the embedding for each associated caption and averaged these embeddings together before computing cosine similarity between the mean embeddings. We also tried an alternative approach of concatenating the captions together into a single paragraph, which we then passed through the LLMs to compute a single embedding per stimulus. We found that this did not consistently improve performance and in many cases even decreased it, though we note that we did not experiment with different permutations of the concatenated captions, nor did we extensively study other ways to combine them together. Future work could explore other techniques for pre-processing captions and aggregating representations from multiple captions in ways that would improve correlation with human similarity judgments." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 396, + 507, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 507, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 507, + 540 + ], + "type": "text", + "content": "Tag text embedding. We experimented with several algorithms for computing similarity between sets (or multi-sets) of tags. The algorithms described in this section all involve using ConceptNet NumberBatch (CNNB) (Speer et al., 2017) as the embedding backbone for turning discrete tags into continuous vector representations. For each stimulus, we took the tags remaining in the final iteration, and tested whether they were found in the dictionary for our embedding model. If a tag was not found and if it contained no spaces, we tried to correct the spelling before trying to look it up in the dictionary again. If a tag contained spaces, we split it into individual words, correct their spelling, and averaged together the embedded representations of those words that were found in the dictionary. Tags that were not found even after spelling correction and splitting were excluded from the set and did not contribute to the final representation. For the methods marked ‘(no split)’ we did not split multi-word tags, instead we just excluded multi-word tags that were not found in the embedding model dictionary. In the following, we describe the different techniques used to generate predictions based on tag embeddings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 544, + 505, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 505, + 601 + ], + "type": "text", + "content": "Tags CNNB overlap. For each pair of stimuli, we counted the number of 'almost identical' tag embeddings, defined as every respective element of the two embeddings being less than a certain threshold apart (in our case, this threshold was 0.1). We then set similarity for that pair of stimuli to be this count, i.e., the number of 'almost identical' tags, normalized by the total number of tags across the respective two sets." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": "Tags CNNB quantized. This method involves quantizing tags using cosine similarity to find the number of unique tags. For each pair of stimuli, we counted the number of tags assigned to the first stimulus that had cosine similarity greater than a certain threshold (in our case, this threshold was 0.7) to at least one tag of the second stimulus (call this value " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "N_A" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": ") and vice-versa (" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "N_B" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": "). The minimum of these two values is the number of unique, shared tags between the two sets (" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\min(N_A, N_B)" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": "). The total number of unique tags across the two sets is then the total number of tags in each set (" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "T_A + T_B" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": ") minus the maximum number of shared tags (" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\max(N_A, N_B)" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": "). We compute similarity as the ratio of the number of unique, shared tags to the total number of unique tags, " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "S_{AB} = \\frac{\\min(N_A, N_B)}{T_A + T_B - \\max(N_A, N_B)}" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": ". For example, suppose the two sets of tags are " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "A: \\{a, b, c, g\\}" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "B: \\{a, b, d, e\\}" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": ", so " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "T_A = T_B = 4" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": ", and that " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "a, c" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": " have cosine similarity of 0.8. The number of tags from set A found in set B is " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "N_A = 3" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": ", and those from B found in A is " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "N_B = 2" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": ". The number of unique, shared tags is " + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "inline_equation", + "content": "\\min(N_A, N_B) = 2" + }, + { + "bbox": [ + 104, + 605, + 507, + 733 + ], + "type": "text", + "content": " (since" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "inline_equation", + "content": "\\{a, b, c\\}" + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "text", + "content": " can be represented by " + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "inline_equation", + "content": "\\{a, b\\}" + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "text", + "content": ", and the total number of unique tags is " + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "inline_equation", + "content": "4 + 4 - 3 = 5" + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "text", + "content": " (since " + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "inline_equation", + "content": "\\{a, b, c, g, a, b, d, e\\}" + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "text", + "content": " can be represented by " + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "inline_equation", + "content": "\\{a, b, d, e, g\\}" + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "text", + "content": "). The assigned similarity is then " + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "inline_equation", + "content": "S_{AB} = \\frac{2}{5}" + }, + { + "bbox": [ + 105, + 82, + 505, + 106 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": "Tags CNNB mean. The set of tag embeddings for each stimulus were averaged together to form a single embedding assigned to the respective stimulus. We then computed cosine similarity on the embeddings of each pair of stimuli." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 148, + 504, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 148, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 148, + 504, + 172 + ], + "type": "text", + "content": "Tags CNNB mean (no split). Same as above, but without splitting multi-word tags (i.e., ones that contain spaces) during the embedding process." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 177, + 504, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 504, + 210 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 504, + 210 + ], + "type": "text", + "content": "All spelling corrections in the algorithms listed above were performed using the Python package `pyspellchecker^8`, taking the top corrected recommendation returned by the spell checker in each case." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 216, + 505, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 505, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 505, + 271 + ], + "type": "text", + "content": "Tags to caption Roberta (SimCSE). Additionally, for the images datasets, we experimented with converting sets of tags into captions and then using those captions with our best-performing LLM ('sup-simcse-roberta-large') the same way we do with user-generated captions. To convert a set of tags into a caption, we joined the set of tags with commas and pretended them with the phrase \"This is an image of\"." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 284, + 277, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 284, + 277, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 277, + 295 + ], + "type": "text", + "content": "C.1.3 OTHER DNN-BASED METHODS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 303, + 504, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 303, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 303, + 504, + 326 + ], + "type": "text", + "content": "For the image datasets, we also considered several other methods that made use of DNNs but do not fit into the categories described above." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 338, + 504, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 404 + ], + "type": "text", + "content": "GPT3 prompting We experimented with prompting GPT3 (Brown et al., 2020), a large pre-trained language model, to directly output similarity judgments as a text-completion problem rather than having to access model embeddings as we did above. We used a few-shot prompting approach where in each prompt we included three context examples of pairs of tag sets and their associated similarity rating. We then provided the pair of tag sets for the two images that we wanted to get a similarity rating for but left the rating empty for the model to fill in." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 410, + 438, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 438, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 438, + 422 + ], + "type": "text", + "content": "Here is an example prompt with the GPT3 response bolded and in square brackets:" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 106, + 428, + 511, + 685 + ], + "blocks": [ + { + "bbox": [ + 106, + 428, + 511, + 685 + ], + "lines": [ + { + "bbox": [ + 106, + 428, + 511, + 685 + ], + "spans": [ + { + "bbox": [ + 106, + 428, + 511, + 685 + ], + "type": "table", + "html": "
People described pairs of images using words.
How similar are the two images in each pair on a scale of 0-1 where 0 is completely dissimilar and 1 is completely similar?
Here are the descriptions of image one: tortoise, slow, protected, shell, turtle, scaly, old, cold-blooded
Here are the descriptions of image two: monkey, ape, mammal, black and white, hairy, agile, primate, smart, tree-dwelling
Rating: 0.05
Here are the descriptions of image one: rhinoceros, horn, gray, standing, heavy body, endangered, wild, africa, african
Here are the descriptions of image two: tiger, open mouth, stripes, feline, predator
Rating: 0.27
Here are the descriptions of image one: goat, eye, leg
Here are the descriptions of image two: mammal, wide-nosed, mandrill, primate, baboon, smart
Rating: 0.19
Here are the descriptions of image one: black, primate, mammal, hairy, chimpanzee, africa, african, great ape, smart, omnivore
Here are the descriptions of image two: zebra, striped, two-toned, wild, staring, mammal, equine, herd animal, africa
Rating: [0.14]
", + "image_path": "7f2f031fb1676aaa68caedf8d18d388be0ba557c6db67d2c736e26ffbca707c5.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 689, + 504, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 712 + ], + "type": "text", + "content": "We repeated this four times for each pair of images in each image dataset with a different set of context examples during each repetition and averaged together the GPT responses to get a final" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 382, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 382, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 382, + 731 + ], + "type": "text", + "content": "8https://pyspellchecker.readthedocs.io/en/latest/" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": "similarity prediction for each pair. In total, creating the context examples required having access to human similarity judgments over only 12 pairs of images. We found that this approach yielded surprisingly good predictions, with an average correlation of " + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "inline_equation", + "content": "r = 0.62" + }, + { + "bbox": [ + 104, + 82, + 504, + 138 + ], + "type": "text", + "content": " across the image datasets. We believe this approach merits future investigation to determine whether prompt engineering can further increase the performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": "Image captioning models We experimented with using pre-trained image captioning models to generate captions for our images and then using those captions with our best-performing LLM ('sup-simcse-roberta-large') the same way we do with user-generated captions. We used three pre-trained image captioning models from HuggingFace ('flamingo-mini', 'vilt-b32-finetuned-vqa', and 'vit-gpt2-image-captioning') to generate text descriptions for our images. However, the performance was quite poor with an average of " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "r = 0.29" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": " across the three models. As a result, " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "O(N)" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": " language-based methods cannot easily be reduced to " + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "inline_equation", + "content": "O(1)" + }, + { + "bbox": [ + 104, + 152, + 506, + 240 + ], + "type": "text", + "content": " even when domain-relevant pre-trained caption models are available." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 255, + 308, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 308, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 308, + 266 + ], + "type": "text", + "content": "C.2 WORD FREQUENCY ANALYSIS METHODS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 276, + 504, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 276, + 504, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 276, + 504, + 321 + ], + "type": "text", + "content": "In this work, we also conducted an additional evaluation of prediction models beyond embedding-based techniques (described in the previous section). Specifically, we compared the predictions of embedding-based models, which utilize deep learning representations, with those of traditional methods of text mining." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 326, + 476, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 326, + 476, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 476, + 338 + ], + "type": "text", + "content": "Before the word frequency analysis, we performed the following initial pre-processing steps" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 347, + 506, + 464 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 132, + 347, + 504, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 504, + 370 + ], + "type": "text", + "content": "- For caption data, we concatenated all the captions describing the same stimulus into a single long \"document.\"" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 374, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 374, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 132, + 374, + 506, + 464 + ], + "type": "text", + "content": "- For tag data, we wanted to prioritize tags that appeared earlier in the tag-mining chains and were rated higher. To that end, we gathered all tags from all iterations and duplicated tags from a given iteration based on the ratings they received. For example, if the tag \"tomato\" received three stars, then we would add the repeated tokens \"tomato, tomato, tomato\" to the aggregated list (\"document\"). In a given iteration, flagged tags are removed, but if they are rated later, then they are included. The total number of repetitions per token is equal to the sum of all the stars they received in all iterations. As a result, each token is repeated multiple times, which we take into consideration in consequent analysis." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "content": "For the next steps, we used the Matlab text analytics toolbox " + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "inline_equation", + "content": "^{9}" + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "content": ". Unless otherwise specified, we used default parameters for all functions. To generate similarity matrices, we applied the following methods:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": "Co-occurrence method. In this approach, we simply counted the number of repeated pairs of words in documents " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " and normalized by the total number of pairs. Formally, we use " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " to denote the word list of a document " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "w_{i,k}" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": "-th word in the " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " list of words, and let " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "|w_{i}|" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " denote the length of the list. We denote by " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "\\delta(c,d)" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " the indicator function that returns 1 if and only if the word " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " is identical to the word " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": ", and 0 otherwise. We computed the co-occurrence score " + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "inline_equation", + "content": "S(w_{i},w_{j})" + }, + { + "bbox": [ + 104, + 512, + 504, + 578 + ], + "type": "text", + "content": " according to the following formula:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 233, + 578, + 375, + 606 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 578, + 375, + 606 + ], + "spans": [ + { + "bbox": [ + 233, + 578, + 375, + 606 + ], + "type": "interline_equation", + "content": "S (w _ {i}, w _ {j}) = \\frac {\\sum_ {k} \\sum_ {l} \\delta (w _ {i , k} , w _ {j , l})}{| w _ {i} | | w _ {j} |}", + "image_path": "1ab297089b18b5f322355374c3b7fb7c54ef949bb516f809848a2cade86fe5b0.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 609, + 375, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 375, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 375, + 622 + ], + "type": "text", + "content": "We suggest using this method only with tags and not with captions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 626, + 504, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 661 + ], + "type": "text", + "content": "Co-occurrence-rep. This method was applied only to tags. We used an identical procedure to the Co-occurrence method, except that we did not separate the words within a tag as separate tokens and instead treated the entire tag (that may include multiple words) as a single token." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 665, + 505, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 505, + 711 + ], + "type": "text", + "content": "Rouge score. In this approach, similarity was estimated by computing the rouge score of the word lists associated with each pair of documents. The Rouge score was computed using rougeEvaluationScore (Rouge, 2004). We suggest using this method only with tags and not with captions." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "type": "inline_equation", + "content": "^{9}" + }, + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "type": "text", + "content": "https://mathworks.com/products/text-analytics.html" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 115 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 115 + ], + "type": "text", + "content": "The following methods make use of tokenized data and a pre-processing procedure that we found effective. Pre-processing was applied to both tag and caption data and tokenization was performed as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 126, + 504, + 264 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 132, + 126, + 499, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 126, + 499, + 138 + ], + "spans": [ + { + "bbox": [ + 132, + 126, + 499, + 138 + ], + "type": "text", + "content": "- We separate all text into single words by applying the tokenizedDocument function." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 141, + 501, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 141, + 501, + 152 + ], + "spans": [ + { + "bbox": [ + 132, + 141, + 501, + 152 + ], + "type": "text", + "content": "- We added part of speech information using the addPartOfSpeechDetails function." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 156, + 427, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 156, + 427, + 167 + ], + "spans": [ + { + "bbox": [ + 132, + 156, + 427, + 167 + ], + "type": "text", + "content": "- We performed Lemmatization using the normalizeWords function." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 171, + 470, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 171, + 470, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 171, + 470, + 182 + ], + "type": "text", + "content": "- We erased punctuation from the token using the erasePunctuation function." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 186, + 406, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 186, + 406, + 197 + ], + "spans": [ + { + "bbox": [ + 132, + 186, + 406, + 197 + ], + "type": "text", + "content": "- We removed stopwords using the removeStopWords function." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 201, + 449, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 201, + 449, + 212 + ], + "spans": [ + { + "bbox": [ + 132, + 201, + 449, + 212 + ], + "type": "text", + "content": "- We removed words with less than two characters or more than 15 characters." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 216, + 504, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 504, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 504, + 237 + ], + "type": "text", + "content": "- We created a bag of words representation of each tokenized document using the bagOfWords function." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 242, + 504, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 242, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 132, + 242, + 504, + 264 + ], + "type": "text", + "content": "- We also removed words that were not present in more than two documents using the InfrequentWords function." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 274, + 504, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 297 + ], + "type": "text", + "content": "With the results of these pre-processing steps, we then computed similarity matrices based on the following methods:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 302, + 504, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 302, + 504, + 347 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 504, + 347 + ], + "type": "text", + "content": "bm25S. We used bm25+ to compute similarity between documents (Barrios et al., 2016) using Matlab's bm25Similarity function. This function represents TF-IDF-like retrieval functions used in document retrieval. We used a variant that has a normalization function that properly handles documents with a long list of words." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "content": "tfidf-cosine. We computed pairwise cosine similarities between document pairs using the TF-IDF matrix derived from their word counts and Matlab's cosineSimilarity function." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 388, + 234, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 388, + 234, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 234, + 399 + ], + "type": "text", + "content": "C.3 SUPERVISED METHODS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "text", + "content": "Several previous studies investigated improving correlations by applying and fine-tuning simple linear transformations to embedding vectors " + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "inline_equation", + "content": "z^T \\mathbf{W}z" + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\mathbf{W} = \\mathrm{diag}(w_1, \\ldots, w_d)" + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "text", + "content": " via a cross-validated ridge regression procedure that could be fit to ground-truth similarity judgments. The parameters of the diagonal reweighting matrix " + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "text", + "content": " are fitted to a training subset of stimuli and used to predict similarity of pairs in a held-out validation set Peterson et al. (2018); Marjieh et al. (2022). To be consistent and make results comparable, here we report the results of performing this 6-fold cross-validated linear transformation (LT-CCV) on the model embeddings and datasets considered in this work. The analysis was carried out using the RidgeCV package from the scikit-learn Python library Pedregosa et al. (2011). Results with both normalized ('LT CCV (norm') and unnormalized ('LT CCV') regressors are shown in Figure 11; see RidgeCV documentation for details on normalization " + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "inline_equation", + "content": "^{10}" + }, + { + "bbox": [ + 104, + 409, + 506, + 563 + ], + "type": "text", + "content": ". We see that the linear transformation does not consistently improve performance (and can even decrease it) when applied to many of the modality-based or stacked embeddings, but it does frequently improve performance when applied to caption embeddings. Due to their instability and risk of overfitting, we do not use these methods in our main analysis." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 710, + 504, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 710, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 504, + 731 + ], + "type": "inline_equation", + "content": "^{10}" + }, + { + "bbox": [ + 105, + 710, + 504, + 731 + ], + "type": "text", + "content": "https://scikit-learn.org/stable/modules/generated/sklearn.linear_model. RidgeCV.html" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 198, + 504, + 582 + ], + "blocks": [ + { + "bbox": [ + 111, + 198, + 504, + 582 + ], + "lines": [ + { + "bbox": [ + 111, + 198, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 111, + 198, + 504, + 582 + ], + "type": "image", + "image_path": "713b9d34308b1033df18e83391dfaaf08fe926c77cfe63f85c9fdbc65fc19aa4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 604, + 499, + 616 + ], + "lines": [ + { + "bbox": [ + 111, + 604, + 499, + 616 + ], + "spans": [ + { + "bbox": [ + 111, + 604, + 499, + 616 + ], + "type": "text", + "content": "Figure 11: Effect of fine-tuning model embeddings using a small subset of similarity judgments." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_content_list.json b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2a91611fa6370a546bacb109fcc54c217667513b --- /dev/null +++ b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_content_list.json @@ -0,0 +1,2769 @@ +[ + { + "type": "text", + "text": "WRITE AND PAINT: GENERATIVE VISION-LANGUAGE MODELS ARE UNIFIED MODAL LEARNERS", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shizhe Diao*", + "bbox": [ + 181, + 170, + 274, + 183 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The Hong Kong University of Science and Technology sdiaoaa@connect.ust.hk", + "bbox": [ + 181, + 184, + 542, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wangchunshu Zhou", + "bbox": [ + 560, + 170, + 705, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ByteDance AI Lab", + "bbox": [ + 560, + 185, + 687, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "wangchunshu.zhou@inf.ethz.ch", + "bbox": [ + 560, + 199, + 836, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xinsong Zhang†", + "bbox": [ + 181, + 232, + 297, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ByteDance AI Lab", + "bbox": [ + 181, + 247, + 308, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zhangxinsong.0320@bytedance.com", + "bbox": [ + 181, + 262, + 488, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiawei Wang", + "bbox": [ + 557, + 233, + 651, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shanghai Jiao Tong University", + "bbox": [ + 557, + 247, + 761, + 262 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "wjw_sjt@sjtu.edu.cn", + "bbox": [ + 557, + 262, + 746, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 311, + 545, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in vision-language pre-training have pushed the state-of-the-art on various vision-language tasks, making machines more capable of multi-modal writing (image-to-text generation) and painting (text-to-image generation). However, few studies investigate if these two essential capabilities can be learned together and boost each other, making a versatile and powerful multi-modal foundation model. In this work, we disclose the potential of symmetric generative vision-language pre-training in learning to write and paint concurrently, and propose a new unified modal model, named DAVINCI, trained with prefix language modeling and prefix image modeling, a simple generative self-supervised objective on image-text pairs. Thanks to the proposed prefix multi-modal modeling framework, DAVINCI is simple to train, scalable to huge data, adaptable to both writing and painting tasks, and also strong on other vision, text, and multi-modal understanding tasks. DAVINCI achieves competitive performance on a wide range of 27 generation/understanding tasks and demonstrates the superiority of combining vision/language generative pre-training. Furthermore, we carefully benchmark the performance of different vision-language pre-training objectives on different scales of pre-training datasets on a heterogeneous and broad distribution coverage. Our results demonstrate the potential of exploiting self-supervision in both language and vision inputs, and establish new, stronger baselines for future comparisons at different data scales. $^{1}$", + "bbox": [ + 228, + 342, + 767, + 607 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 628, + 336, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Self-supervised language model pre-training (Peters et al., 2018; Radford et al., 2018; Devlin et al., 2019; Liu et al., 2019; Lewis et al., 2020; Raffel et al., 2020; Brown et al., 2020; Fu et al., 2022; Zhou et al., 2021b; Diao et al., 2020; 2021; Zhou et al., 2021a; Xu et al., 2020; Zhou et al., 2020; 2022a; Pan et al., 2022; Diao et al., 2023) has reshaped the landscape of modern natural language processing (NLP) research, pushing the state-of-the-art of a wide range of NLP tasks. Recently, this success has been transferred to the multi-modal context and resulted in a number of vision-language pretrained models (VLMs) (Lu et al., 2019; Tan & Bansal, 2019a), achieving state-of-the-art results on various vision-language tasks. Most existing VLMs are BERT-like Transformer (Vaswani et al., 2017) encoders pre-trained with a combination of different vision-language pre-training (VLP) objectives: masked multi-modal modeling (Lu et al., 2019; Tan & Bansal, 2019b; Chen et al., 2020; Li et al., 2020), multi-modal alignment prediction (Lu et al., 2019; Tan & Bansal, 2019b; Chen et al., 2020; Li et al., 2020), region of interest feature regression (Tan & Bansal, 2019b), image-text matching (Li et al., 2021; Zeng et al., 2021), to name a few. However, the roadmap towards large language models reveals a transition pattern from encoder-only models like BERT (Devlin et al., 2019) / RoBERTa (Liu et al., 2019) to sequence-to-sequence models like T5 (Raffel et al., 2020) / BART (Lewis et al., 2020) and autoregressive models like GPT-3 (Brown et al., 2020) / PaLM (Chowdhery et al., 2022) to tackle", + "bbox": [ + 169, + 652, + 826, + 876 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Work done during the internship at ByteDance AI Lab.", + "bbox": [ + 189, + 882, + 524, + 896 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author", + "bbox": [ + 192, + 896, + 331, + 909 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1The code and pre-trained models are available at https://github.com/shizhediao/DaVinci.", + "bbox": [ + 192, + 909, + 818, + 922 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "more tasks in a unified way, and from complicated objectives like masked language modeling / next sentence prediction / replace token detection to a simple language modeling objective to improve the scalability of pre-training. This suggests that the generative pre-training paradigm with simple targets shows great potential for pre-training more scalable and general VLMs.", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, several recent studies (Cho et al., 2021; Zhang et al., 2021a; Wang et al., 2021b; 2022) investigated sequence-to-sequence (seq2seq) vision-language pre-training and achieved state-of-the-art results on a range of vision-language understanding and generation tasks. For example, VL-T5 (Cho et al., 2021), OFA (Wang et al., 2022) and PaLI (Chen et al., 2022) formulate various vision-and-language problems into seq2seq tasks and pre-train a seq2seq VLM by multi-tasking on these tasks. In addition, ERNIE-ViLG (Zhang et al., 2021a) and SimVLM (Wang et al., 2021b) pre-train seq2seq VLMs with a simple language modeling or prefix language modeling objective on a large number of image-caption pairs. While achieving promising results, these objectives are not versatile enough, resulting in VLMs that are only capable of a subset of tasks in image-text modalities. On the other hand, the recent success of generative language pre-training (Brown et al., 2020) and generative vision pre-training (He et al., 2022; Bao et al., 2021) motivates us to explore generative vision-language pre-training to learn more versatile and scalable vision-language models.", + "bbox": [ + 169, + 162, + 826, + 329 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we introduce prefix multi-modal modeling, a unified generative pre-training framework that extends prefix language modeling to the multi-modal context and learns a multi-modal foundation model by learning to write and paint simultaneously. As illustrated in Figure 1, given an image-caption pair, we split the image and caption into two parts denoted as prefix and suffix. To make prefix image modeling compatible with the seq2seq formulation of conventional prefix language modeling, we follow DALLE (Ramesh et al., 2021) and convert images into discrete sequences of image tokens (van den Oord et al., 2017). We then train the model to generate the suffix in one modality based on the prefix in the same modality and the complete input in the other modality. In this way, prefix multi-modal modeling can fully exploit self-supervision from large-scale image-caption pairs by learning to write and paint simultaneously. We pre-train DAVinci2, a vision-language foundation model, with the proposed prefix multi-modal modeling framework on large-scale image-text pairs. DAVinci is the first self-supervised vision-language foundation model that is versatile for all kinds of tasks in vision-and-language modalities, including image-to-text generation, text-to-image generation, vision-language understanding, and single-modal language / vision tasks. DAVinci consistently outperforms FLAVA (Singh et al., 2021), an existing vision-language foundation model, on both language, vision, and multi-modal tasks, and performs competitively with state-of-the-art models across a wide range of tasks and modalities. Moreover, DAVinci also shows strong few-shot and zero-shot image/text generation capability.", + "bbox": [ + 169, + 330, + 826, + 582 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In addition, most existing VLMs are pre-trained with mixed pre-training objectives and different data sources varying in size, making it difficult to disentangle the impact of pre-training objectives and data sources on the downstream tasks. To this end, we conduct a systematic analysis of the performance of generative vision-language pre-training by carefully ablating different pre-training objectives, such as prefix language / image modeling, and the amount of pre-training data with different qualities, revealing the impact of different objectives and data sources to facilitating future research.", + "bbox": [ + 169, + 584, + 826, + 667 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize, our contribution is three-fold: (1) We introduce prefix multi-modal modeling, a simple unified generative vision-language pre-training framework that is scalable for large-scale pre-training and versatile for image-to-text generation, text-to-image generation and various multi-modal / single-modal understanding tasks. (2) We pre-train DAVINCI, a vision-language foundation model, with the proposed approach, demonstrating competitive performance on a wide range of 27 downstream tasks and the superiority of combining vision/language generative pre-training. (3) We conduct an analysis about the impact of different pre-training data sources and pre-training objectives on the performance of seq2seq VLMs.", + "bbox": [ + 169, + 669, + 826, + 782 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 792, + 346, + 809 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by the success of language model pre-training, several studies investigated vision-language pre-training on large-scale image-caption pairs. ViLBERT (Lu et al., 2019) and LXMERT (Tan & Bansal, 2019b) first propose to extract visual object features with an external object detection model like Fast-RCNN (Girshick, 2015), feed the image features together with texts into Transformer", + "bbox": [ + 169, + 816, + 823, + 875 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "$^{2}$ Named after the Italian polymath Leonardo da Vinci, who displayed infinite grace in everything. We noticed that this name is used in GPT-3 versioning. However, we think there is no conflict because it is only a suffix for a specific checkpoint of the GPT-3 family.", + "bbox": [ + 169, + 883, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/47d8b05a6d5bf1ad408398bf32a3a02a079e04087e5c92ddae4c6e5c46418f6c.jpg", + "image_caption": [ + "Figure 1: Illustration of the overall architecture and pre-training procedures of DAVinci, a Transformer-based sequence-to-sequence model. Given an image-text pair, DAVinci first splits either the word sequence or image token sequence into prefix and suffix. It then concatenates the prefix with the complete sequence in the other modality as input. DAVinci is trained to recover the suffix with maximum likelihood estimation." + ], + "image_footnote": [], + "bbox": [ + 181, + 103, + 816, + 328 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "models, and train the model to align vision and language representations with masked multi-modal modeling and multi-modal alignment prediction objectives. Many following works (Li et al., 2020; Zhang et al., 2021b; Chen et al., 2020; Li et al., 2022a; 2021; Zeng et al., 2021; Wang et al., 2021a) propose several new objectives to improve object detection based VLP and explored using vision Transformer (Dosovitskiy et al., 2021; Touvron et al., 2021) as visual feature extractor.", + "bbox": [ + 169, + 388, + 823, + 459 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "More recently, FLAVA (Singh et al., 2021), a new vision-language foundation model, is pre-trained with a masked multi-modal modeling objective. Performing competitively on language, vision, and vision-language understanding tasks, FLAVA is designed for understanding tasks without text and image generation abilities.", + "bbox": [ + 169, + 460, + 823, + 517 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "While achieving promising results on multi-modal understanding tasks, most VLMs are based on encoder-only architectures with bidirectional attention, making them non-trivial to adapt to multi-modal generation tasks such as image captioning and text-to-image generation. Inspired by the success of seq2seq pre-trained language models such as T5 (Raffel et al., 2020) and BART (Lewis et al., 2020), VL-T5 (Cho et al., 2021) and OFA (Wang et al., 2022) propose to formulate both vision-language pre-training objectives and various downstream vision-language tasks as seq2seq tasks and pre-train a seq2seq VLM by multi-tasking on these tasks. However, the scalability and the zero-shot transfer capability of this approach are limited by the availability of large-scale and diverse vision-language tasks. To this end, SimVLM (Wang et al., 2021b), the most related work to our approach, instead pre-trains a seq2seq VLM with a simple prefix language modeling objective on text generation. It easily scales to very large and potentially noisy pre-training data and achieves competitive results. However, SimVLM only exploits language self-supervision, and thus it does not perform well on image understanding tasks and is unable to tackle image generation tasks. Another recent study is CM3 (Aghajanyan et al., 2022), which proposes a causal masked multi-modal model learned from large web data and differs from our work in pre-training objectives and target tasks.", + "bbox": [ + 169, + 520, + 826, + 729 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As for the text-to-image generation task, Ramesh et al. (2021); Ding et al. (2021); Yu et al. (2022) achieved promising performance by learning an auto-regressive target with Transformer and VQ-VAE / VQ-GAN tokenizer. Most recently, Ramesh et al. (2022); Saharia et al. (2022) advanced the image generation capability by using diffusion models and high-quality text embeddings (e.g., CLIP, T5). Therefore, it is natural to explore boosting image generation via stronger multi-modal understanding.", + "bbox": [ + 169, + 731, + 826, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Previous studies are good at either image-to-text or text-to-image generation, but few studies investigate whether these two important capabilities can be learned together and boost each other. In this paper, we explore making a versatile and powerful multi-modal foundation model that is good at text-to-image generation, image-to-text generation, and multi-modal understanding tasks.", + "bbox": [ + 169, + 803, + 823, + 859 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 DAVINCI", + "text_level": 1, + "bbox": [ + 171, + 872, + 282, + 886 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the superior performance of auto-regressive language models (LM) (Brown et al., 2020; Chowdhery et al., 2022; Rae et al., 2021) on zero-shot and few-shot transfer abilities, we decided to", + "bbox": [ + 169, + 895, + 823, + 924 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "adopt a decoder optimized by language modeling loss to retain the generalization capabilities, and an encoder to represent the prefix input. Unlike using a causal mask in the decoder, the encoder employs fully-visible attention for the prefix input. This architecture resembles prefix language modeling, which shows effectiveness in a wide range of language tasks (Dong et al., 2019; Raffel et al., 2020) and enables zero-shot generalization abilities. Contrary to the previous multi-stage approaches (Wang et al., 2021a; Singh et al., 2021), our model is trained from scratch in an end-to-end manner thanks to the model's simplicity. In this section, we introduce the proposed prefix multi-modal modeling framework and the DAVinci model. The overall architecture of DAVinci is depicted in Figure 1. We first explain our model architecture in detail in §3.1 and then introduce pre-training objectives and procedures in §3.2.", + "bbox": [ + 169, + 103, + 826, + 243 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 MODEL ARCHITECTURE", + "text_level": 1, + "bbox": [ + 171, + 255, + 385, + 268 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Textual Feature Embedding Given an input sentence $S$ , we first use WordPiece (Wu et al., 2016) tockenize it to a sequence of tokens $W = \\{w_{1},w_{2},\\dots,w_{n}\\}$ . To obtain text features $T$ , for each token $w_{i}$ , a token embedding $e_i$ and position embedding $p_i$ are computed by two separate embedding matrices. Finally, the textual feature embedding $T = \\{t_1,t_2,\\dots,t_i,\\dots,t_n\\}$ is calculated by $t_i = LayerNorm(e_i + p_i)$ , where $i$ indicates the $i$ -th position, and LayerNorm (Ba et al., 2016) is a layer normalization function.", + "bbox": [ + 169, + 276, + 826, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Visual Feature Embedding Given an input image $I$ , we first use a CNN backbone to extract and learn the image features. Following (Dai et al., 2021; Wang et al., 2021b), we use the first three blocks of ResNet (He et al., 2016) to obtain the feature maps. The feature maps are then flattened to $F = \\{f_1, f_2, \\dots, f_m\\}$ along the spatial dimension, where $m$ denotes the number of features. To keep the position information of visual features, we inject absolute learned positional embeddings $p$ and the final visual embeddings $V = \\{v_1, v_2, \\dots, v_i, \\dots, v_m\\}$ are calculated by $v_i = f_i + p_i$ , where $i$ indicates the $i$ -th position.", + "bbox": [ + 169, + 362, + 826, + 460 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Cross-Modal Transformer To fuse the textual and visual feature embeddings into a common space, we adopt a simple canonical Transformer architecture as the fusion module. The input is the combination of visual embedding $V$ and textual embedding $T$ , namely $X = \\{x_{1}, x_{2}, \\dots, x_{l}\\} = [V, T] = \\{v_{1}, v_{2}, \\dots, v_{m}, t_{1}, t_{2}, \\dots, t_{n}\\}$ . The input embedding vectors $X$ are then fed into a cross-modal Transformer encoder to obtain hidden state vectors $H = \\{h_{1}, h_{2}, \\dots, h_{l}\\}$ . Finally, a Transformer decoder is applied to generate visual or textual tokens with $H$ and decoder input as illustrated in Figure 1.", + "bbox": [ + 169, + 462, + 828, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image Tokenizer and Decoder Because Transformer is modeling on discrete tokens, to unify the text tokens and image tokens, we discretize an image into tokens by an image tokenizer and reconstruct the raw image by an image decoder. The image tokenizer and decoder are implemented with a discrete variational autoencoder (dVAE) (Ramesh et al., 2021). After training of the image tokenizer, it could serialize an image $I$ into a sequence of discrete visual tokens $Z = \\{z_{1}, z_{2}, \\dots, z_{m}\\}$ according to a learned vocabulary. Visual tokens $Z$ serve as the ground-truth labels for the prefix image modeling objective. In our work, we directly use an off-the-shelf image tokenizer and decoder from VQGAN (Esser et al., 2021), with a vocabulary size of 1024 and a compression rate of 16, which means a $256 \\times 256$ image will be tokenized into $16 \\times 16$ grid of tokens and then flattened to a sequence of 256 tokens.", + "bbox": [ + 169, + 547, + 826, + 689 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 PRE-TRAINING OBJECTIVES", + "text_level": 1, + "bbox": [ + 171, + 700, + 411, + 714 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our major motivation is to conduct language modeling with image information and image modeling with text information simultaneously, which only requires image and text pairs that are easy to collect, making our approach easy to scale. The interaction would force the vision-language model to have a deeper understanding of both text and image. Learning from this interaction connects the visual representation with textual representation, enabling zero-shot transfer.", + "bbox": [ + 169, + 720, + 826, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Prefix Language Modeling (PLM) The core idea of prefix language modeling is \"given a full image $X_{image}$ and a prefix caption $\\tilde{X}_{text}$ , recover the masked textual tokens (i.e., suffix caption $Y_{text}$ )\". Given an input caption, we first randomly mask some continuous words at the end (we call it suffix caption hereafter) and recover the masked textual tokens with full image by optimizing the cross-entropy loss,", + "bbox": [ + 169, + 792, + 825, + 866 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {P L M}} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {t e x t}} \\mid \\mathbf {X} _ {\\text {i m a g e}}, \\tilde {\\mathbf {X}} _ {\\text {t e x t}}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 869, + 825, + 904 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where I and S are images and captions from the pre-training corpus $D$ .", + "bbox": [ + 174, + 909, + 640, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Because of the lack of textual information, recovering the suffix caption requires the model to understand both the image and prefix caption. The full image is rich in semantic information that would help language modeling. The prefix length is randomly decided during training, and especially when prefix caption is none, this task will degenerate into \"image captioning\" task, which forces the model to generate a caption with the input image.", + "bbox": [ + 169, + 103, + 826, + 175 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {P L M}} ^ {\\prime} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {t e x t}} \\mid \\mathbf {X} _ {\\text {i m a g e}}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 181, + 826, + 214 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Prefix Image Modeling (PIM) The core idea of prefix image modeling is \"given a full caption and a corrupted image (we call it prefix image hereafter), recover the masked visual tokens\". Given an input image, we first randomly mask some continuous image patches at the end (we call it suffix image hereafter). The prefix image and full caption will be fed into the model and try to recover the original visual tokens obtained from the image tokenizer by optimizing the cross-entropy loss.", + "bbox": [ + 169, + 222, + 823, + 294 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {P I M}} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {i m a g e}} \\mid \\mathbf {X} _ {\\text {t e x t}}, \\tilde {\\mathbf {X}} _ {\\text {i m a g e}}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 300, + 825, + 333 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similar to PLM, when prefix image is none, this task will degenerate into \"text-to-image generation\" task, forcing the model to generate an image with the input caption:", + "bbox": [ + 169, + 340, + 826, + 371 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {P I M}} ^ {\\prime} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {i m a g e}} \\mid \\mathbf {X} _ {\\text {t e x t}}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 377, + 825, + 411 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Unified Learning Objective Our model is learned by optimizing the combination of PLM and PIM.", + "bbox": [ + 169, + 419, + 826, + 434 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\mathrm {P L M}} + \\mathcal {L} _ {\\mathrm {P I M}} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 429, + 441, + 825, + 455 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 479, + 328, + 494 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 PRE-TRAINING DATASETS", + "text_level": 1, + "bbox": [ + 171, + 506, + 397, + 518 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Since existing studies pre-trained their models on different corpora, making the fair comparison difficult. Considering results only on state-of-the-art performance would underestimate the potential of this line of research. Therefore, we propose several practical settings including small-scale and large-scale, and then conduct detailed comparisons on them in Section 5.1. More details about the datasets are shown in Appendix A.3.", + "bbox": [ + 169, + 527, + 823, + 598 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/edfd3b3f37e6c2e0e9a0fc70ed0bd1ac7970a62ff45934e732d8c8154d6bf0bc.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Data TypeDatasetImage Domain#Total
In-Domain Data (ID)COCO, Visual GenomeCOCO1.3M
Small-scale Web Data (SWD)SBU, CC-3M, CC-12MWeb14.9M
Object-Region Data (ORD)VG regions, VG objects, COCO objects, Refcoco, Open Image, Obj365COCO, Flickr17.0M
Vision Data (VD)ImageNet-21KImageNet13.2M
Large-scale Web Data (LWD)LAION-400M, DAVinci-200MWeb601.3M
Text Data (TD)C4Web800GB
", + "bbox": [ + 173, + 609, + 821, + 698 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: Statistics of the pre-training datasets. #Total denotes the total number of image-text pairs.", + "bbox": [ + 196, + 702, + 797, + 717 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 DOWNSSTREAM TASKS", + "text_level": 1, + "bbox": [ + 171, + 729, + 370, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We test our models' ability and versatility on five dimensions: language understanding on 8 GLUE tasks (Wang et al., 2019), vision understanding on ImageNet fine-tuning and 12 popular vision datasets for linear evaluation, multi-modal understanding on VQAv2 (Goyal et al., 2017b), SNLI-VE (Xie et al., 2019) and NLVR2 (Suhr et al., 2019), text-to-image generation on COCO (Chen et al., 2015), and image-to-text generation on COCO, NoCaps (Agrawal et al., 2019), and VLUE (Zhou et al., 2022b). Details of downstream tasks and fine-tuning process are described in Appendix A.2.", + "bbox": [ + 169, + 750, + 826, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3 IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 845, + 408, + 859 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our model is a base-size Transformer implemented with a 6-layer encoder and a 6-layer decoder, 768 dimensions for hidden states, 512 for maximum input length, and 3072 for intermediate size. We train our model from scratch without initializing the Transformer encoder and decoder. However, the image encoder is initialized from ResNet-101 (He et al., 2016) with ImageNet weights since we find", + "bbox": [ + 169, + 867, + 825, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/c198d5977e9dcb0a2ffb1e3dced54ed631e5ed8367fe9a9754c7678523f03f5e.jpg", + "table_caption": [], + "table_footnote": [ + "Table 2: Experimental results on vision, language and multi-modal downstream tasks. @B4, @C denote BLEU@4, CIDEr, respectively. I2T and T2I denote image-to-text and text-to-image tasks. Multi-modal Avg. is the average score of all multi-modal tasks. FT: fine-tuning, LE: linear evaluation, FS: few-shot, ZS: zero-shot. Under few-shot setting, we fine-tune a pre-trained model for 3 epochs on $1\\%$ training data. Results for BERT are obtained from Iki & Aizawa (2021). Results for RoBERTa are from its corresponding paper (Liu et al., 2019) and they use the mid-training (Phang et al., 2018) on MNLI for RTE, MRPC and STS-B while other models (e.g., BERT, SimVLM, DAVinci) do not apply this trick. Results for ViT are from ViT-Base/16 model (Radford et al., 2021). We list the reported performance of text-only and image-only models in grey for reference." + ], + "table_body": "
BERTRoBERTaViTMLM 1MIM 2FLAVA 3CLIP 4SimVLM 5DAVINCI 6SimVLM 7DAVINCI 8
TaskEval.16GB160GB13.2M70M70M70M70M46.4M46.4M647.7M647.7M
MNLIFT84.2087.60-73.23-80.3332.8582.1382.2583.2783.13
CoLAFT54.6063.60-39.55-50.6511.0252.4752.1054.2254.75
MRPCFT84.7590.20-73.24-84.1668.7482.7083.1484.2684.54
QQPFT89.0091.90-86.68-88.7459.1788.3988.1589.0588.92
SST-2FT92.5094.80-87.96-90.9483.4990.6590.4891.1291.37
QNLIFT91.0092.80-82.32-87.3149.4687.5587.2188.2887.90
RTEFT62.5078.70-50.54-57.7653.0759.8060.7263.3464.22
STS-BFT88.2091.20-78.89-85.6713.7086.6286.2787.2487.05
NLP Avg.80.8486.35-71.55-78.1946.4478.7978.7980.1080.23
ImageNetLE--80.90-41.7975.5472.9574.3175.8776.0477.65
Food101LE--86.70-53.3088.5185.4983.4189.3385.5290.12
CIFAR10LE--96.90-76.2092.8791.2591.5693.0192.4193.96
CIFAR100LE--86.40-55.5777.6874.4072.5178.9875.2380.11
CarsLE--54.70-14.7170.8762.8461.4472.6968.8374.57
AircraftLE--46.00-13.8347.3140.0241.2847.4247.7549.55
DTDLE--74.30-55.5377.2973.4072.5577.1276.5978.33
PetsLE--92.70-34.4884.8279.6178.7785.5286.1388.21
Flowers102LE--99.20-67.2396.3794.9493.2496.1295.4196.88
MNISTLE--97.40-96.4098.4297.3896.6698.6798.4599.01
STL10LE--99.50-80.1298.8997.2997.5199.0398.0299.21
Country211LE--17.50-8.8728.9225.1226.4528.9927.8129.94
Vision Avg.--77.68-49.8478.1274.5674.1478.5677.3479.80
VQAv2FT-----72.4959.8172.1273.8975.0376.44
SNLI-VEFT-----78.8973.5378.7479.1179.6380.01
NLVR2FT-------77.4577.9179.7280.25
I2T@B4FT-------38.0038.5038.1039.20
I2T@CFT-------126.96128.66128.91130.44
T2I@IS ↑FT--------17.55-22.41
T2I@FID ↓FT--------23.58-19.82
VQAv2FS-------54.6954.8551.8854.90
SNLI-VEFS-------67.4567.5767.9668.04
NLVR2FS-------51.4651.1951.4951.52
I2T@B4FS-------35.9036.4032.7037.00
I2T@CFS-------117.75120.43112.20122.56
I2T@B4ZS-------11.4010.8013.8018.70
I2T@CZS-------45.3045.5556.6968.44
VUE@B4ZS-------9.209.4010.4010.60
VUE@CZS-------33.9234.8039.7540.83
NoCaps@CZS-------48.0545.5148.6458.58
T2I@IS ↑ZS--------14.91-17.44
T2I@FID ↓ZS--------29.83-24.21
Multi-modal Avg.-------57.8958.3059.1362.50
", + "bbox": [ + 173, + 99, + 823, + 551 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "a warm start provides a reliable visual representation and helps the convergence. All pre-training experiments are conducted on 32GB NVIDIA V100 GPUs. The model trained on the largest data takes around 10 days on 1024 V100 GPUs. We adopt dynamic masking in our experiments, where the masking ratio is randomly sampled from a uniform distribution $\\mathrm{U}(0,1)$ . More details of the fine-tuning, network architectures, and hyper-parameters setups are given in Appendix A.1.", + "bbox": [ + 169, + 667, + 823, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4 EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 751, + 392, + 765 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We extensively compare the performance of DAVINCI with state-of-the-art unified foundation models and vision-language models across vision, language, and multi-modal tasks, accessing five different abilities: (1) text understanding, (2) image understanding, (3) text-to-image generation, (4) image-to-text generation, (5) multi-modal understanding.", + "bbox": [ + 169, + 773, + 823, + 829 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall Performance We report the overall performance on 8 language tasks from GLUE, 12 vision tasks, 3 multi-modal tasks, 3 image-to-text tasks and 1 text-to-image task. We compare our model with FLAVA and SimVLM $^3$ , two of the most recent and best performing vision-language", + "bbox": [ + 169, + 832, + 823, + 875 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3Since SimVLM is not open-sourced and uses 1.8B in-house data without telling the exact size of its base model, we replicate it on our data with the same size as DAVINCI. Experiments on SimVLMsmall ensure our successful reproduction (see Appendix A.4).", + "bbox": [ + 169, + 883, + 823, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/fe417e02e22be789cc5807b1d0c13c00eacaf7970e23c09aae3fc09c30e23cfa.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model#Params.Text MNLI AccVision ImageNet LE / FTImage2Text COCO B@4 / CText2Image COCO IS↑ / FID↓Multi-modal
VQA test-dev / test-standardNLVR2 dev / test-P
Encoder-only Multi-modal Models
VinVL (Zhang et al., 2021b)157M--38.2 / 129.3-75.95 / 76.1282.05 / 83.08
ViLT (Kim et al., 2021)88M----70.85 / -74.91 / 75.57
ALBEF (Li et al., 2021)210M----75.84 / 76.0482.55 / 83.14
X-VLM (Zeng et al., 2021)240M--39.6 / 132.6-78.22 / 78.3784.41 / 84.76
VLMO (Wang et al., 2021a)----76.64 / 76.8982.77 / 83.34
Encoder-Decoder Multi-modal Models
UNICORN (Yang et al., 2021)--35.8 / 119.1-69.20 / 69.40-/-
Uni-ENDN (Li et al., 2022b)110M----72.20 / 72.50-/-
Pixel-BERT (Huang et al., 2020)144M----74.45 / 74.5576.50 / 77.20
E2E-VLP (Xu et al., 2021a)94M--36.2 / 117.3-73.25 / 73.6777.25 / 77.96
VL-T5 (Cho et al., 2021)220M--34.5 / 116.5-- / 70.3074.60 / 73.60
VL-BART (Cho et al., 2021)220M--35.1 / 116.6-- / 71.3071.70 / 70.30
Text2Image Models
DM-GAN (Zhu et al., 2019)---32.20 / 26.50-/--/-
DALLE (Ramesh et al., 2021) (250M)12B---17.90 / 27.50-/--/-
DALLE (Ramesh et al., 2021) (640M)†82M---15.79 / 29.22-/--/-
CogView (Ding et al., 2021)4B---18.20 / 27.10-/--/-
Unified Models
Unifying (Huang et al., 2021)228M--37.3 / 122.6- / 29.90-/--/-
FLAVA (Singh et al., 2021)240M80.3375.54 / ---72.80 / 72.49-/-
SimVLM (Wang et al., 2021b) (640M)†153M83.2776.04 / -38.5 / 128.7-75.04 / 75.0378.82 / 79.72
SimVLM (Wang et al., 2021b) (1.8B)83.4080.60 / -39.0 / 134.8-77.87 / 78.1481.72 / 81.77
OFA (Wang et al., 2022)182M84.30- / 82.2041.0 / 138.221.50* / 20.80*78.00 / 78.10-/-
Florence (Yuan et al., 2021)637M-- / 90.05-/--/-80.16 / 80.36-/-
DAVINCI154M83.1378.81 / 83.9239.2 / 130.417.44 (22.41*) / 24.21 (19.82*)76.32 / 76.4480.03 / 80.25
", + "bbox": [ + 171, + 88, + 826, + 369 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3: Comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. All results are from base-size models. LE and FT denote linear evaluation and fine-tuning performance, respectively. Image2Text results are reported without CIDEr optimization. $\\dagger$ are our reproduced models. \\* are the results after fine-tuning. SimVLM (1.8B) and OFA are pre-trained with much larger corpus or human-labeled data of many downstream tasks, and thus they are not comparable and are labeled in gray. Florence (Yuan et al., 2021) is pre-trained with much larger model size (Florence-CoSwin-H, 637M) and more pre-training data (900M), so the numbers are in grey. bold denotes the best across unified models.", + "bbox": [ + 169, + 373, + 823, + 463 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "foundation models. We also include comparisons with some baseline models (e.g., MIM, MLM, CLIP). There are several observations. First, DAVINCI (column 8) outperforms FLAVA (column 3) and SimVLM (column 7) across almost all tasks, providing a new and stronger unified foundation model. Compared with FLAVA, DAVINCI improves an average of $2.04\\%$ , $1.68\\%$ on language and vision tasks, respectively. Compared with SimVLM, DAVINCI achieves comparable results on language tasks $(+0.13\\%)$ while performing much better on vision tasks $(+2.46\\%)$ . To make a fair comparison in terms of similar data size, we compare FLAVA (70M data, column 3) with DAVINCI (46.4M data, column 6). It is observed that DAVINCI still outperforms FLAVA even with much less data. Considering the multi-modal tasks, DAVINCI consistently outperforms FLAVA and SimVLM on VQA and VE. Note that FLAVA is incapable of generation and SimVLM cannot generate images; only DAVINCI is competent to all tasks and demonstrates a stronger capability of unifying vision and language tasks.", + "bbox": [ + 169, + 474, + 823, + 642 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Zero-shot and Few-shot Transfer One of the critical benefits of generative pre-trained vision-language models is the good generalization ability on zero-shot and few-shot tasks. For zero-shot transfer, two out-of-domain distribution datasets are considered (NoCaps and VLINE), with results shown in Table 2. First, DAVinci outperforms SimVLM on both zero-shot and few-shot settings, demonstrating its better transfer capabilities. It also shows the effectiveness and robustness of the synergy of our proposed language supervision and image supervision. Second, it is observed that the performance improvement is bigger on 647.7M data (column 7 v.s. column 8) than 46.4M data (column 5 v.s. column 6). This shows DAVinci generalizes well with the increase of large-scale data. We even observe some performance drops on small data (46.4M) but excellent performance improvements on large data (647.7M). It is consistent with the recent observation that zero-shot ability could only be triggered with large pre-training data (Wei et al., 2022) and scaling to large data and keeping simple training objectives benefit generalization performance (Wang et al., 2021b).", + "bbox": [ + 169, + 650, + 826, + 818 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison with state-of-the-art vision-language models In addition to unified vision-language foundation models, we compare DAVinci with state-of-the-art vision-language models as well. The results are shown in Table 2. DAVinci demonstrates its superiority in vision understanding and text-to-image generation. Compared with current popular auto-regressive image generation models like DALLE and CogView, our model achieves comparable IS and better FID scores with significantly fewer model parameters than DALLE and CogView. Note that the original DALLE is implemented based on VQVAE, so here, we compare our model with reproduced VQGAN-based DALLE with", + "bbox": [ + 169, + 825, + 823, + 924 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3183648d65c44268f04e6d93ab4e8aecc3f2c5100585f348674324754b29b556.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
SettingsPre-training Data#Image#CaptionModelsCOCO CaptionsVQASNLI-VENLVR2
IDSWDORDVDLWDB@4 / CAccAccAcc
10.2M1.3MSimVLM35.2 / 115.0668.8976.1071.21
DAVINCI35.8 / 117.3069.2576.2272.55
215.1M16.2MSimVLM37.0 / 122.6371.5478.3675.50
DAVINCI37.4 / 123.1171.8878.6277.46
32.7M18.3MSimVLM38.2 / 123.8569.5776.6570.50
DAVINCI38.0 / 124.2070.0276.9272.01
413.4M14.5MSimVLM36.2 / 119.7370.5376.9073.25
DAVINCI36.6 / 121.2771.2377.4074.62
530.5M46.4MSimVLM38.5 / 128.1271.8478.8176.75
DAVINCI38.6 / 128.7373.5379.2477.55
6601.3M601.3MSimVLM37.3 / 123.8173.7378.7977.69
DAVINCI37.6 / 124.4273.9579.2978.54
7601.5M602.6MSimVLM37.9 / 125.5074.6479.0577.68
DAVINCI38.1 / 125.9174.9179.2278.12
8631.8M647.7MSimVLM38.5 / 128.2575.0479.3278.82
DAVINCI39.1 / 130.2176.3280.0480.03
", + "bbox": [ + 174, + 99, + 823, + 297 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4: Evaluation on downstream tasks using COCO Captions, VQA, SNLI-VE, and NLVR2. #Image and #Caption denote the numbers of images and image-text pairs that are used in the pre-training.", + "bbox": [ + 171, + 303, + 823, + 330 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "similar model sizes, and find DAVinci still achieves a significant improvement over it. Generated images are presented in Appendix A.11 for further qualitative comparison.", + "bbox": [ + 169, + 342, + 823, + 371 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "On multi-modal tasks such as VQA, DAVINCI not only outperforms unified models (e.g., SimVLM (640M)) and other encoder-decoder multi-modal models (e.g., E2E-VLP, VL-T5), but also achieves competitive performance with many conventional encoder-only multi-model models (e.g., VinVL, ALBEF, VLMO). Note that SimVLM (1.8B) and OFA are not directly comparable because SimVLM uses 1.8B in-house image-text pairs, and OFA uses human-labeled data of many downstream tasks during pre-training. Even though, we still report their results for reference and observe a better performance on ImageNet fine-tuning and text-to-image generation than OFA.", + "bbox": [ + 169, + 372, + 826, + 470 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The advantages of image generation over DALLE / CogView, the superiority of image-to-text over SimVLM, and the competitive performance with conventional multi-modal models demonstrate the synergistic effect of our proposed PLM (language supervision) and PIM (image supervision).", + "bbox": [ + 169, + 473, + 823, + 516 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 527, + 292, + 542 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 IMPACT OF PRE-TRAINING DATASETS", + "text_level": 1, + "bbox": [ + 171, + 551, + 475, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we disclose the impact of various multi-modal data sources for VLMs. We choose SimVLM and DAVinci as our baseline models for their competitive performance, the capability of training from scratch, and the scalability of extending to the noisy large-scale corpus. We use the same text corpus, $C4$ , for all the variations. The results are shown in Table 4. In general, the performance is increased along with the data size, and DAVinci consistently outperforms SimVLM on almost all the data settings and all the downstream tasks. Both object-region data and vision data are clearly helpful in vision language pre-training (refer to settings 3 and 4). We surprisingly observe that models pre-trained on object-region data with much fewer images performs even better than models pre-trained with small-scale web data on the COCO Caption task (refer to settings 2 and 3). Although large-scale web data is usually noisier than small datasets (e.g., ID, ORD, VD, and SWD), it is powerful for multi-modal pre-training (refer to settings 5 and 8). We believe our analysis has broader impacts on the research of VLMs in the community. First, this enables fair comparisons for pre-trained models in the same data settings. Second, one can focus on the model designs at part or all of the data settings according to available computation resources. Third, we reveal that object-region and vision data, normally overlooked in VLM pre-training, also play a significant role.", + "bbox": [ + 169, + 574, + 826, + 782 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 ABLATION STUDY", + "text_level": 1, + "bbox": [ + 171, + 787, + 341, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To verify the contributions of different modules in our framework, we ablate them and evaluate DAVINCI on five kinds of downstream tasks: language understanding (MNLI, SST-2), vision understanding (ImageNet, Food101, CIFAR10), multi-modal understanding (VQAv2, SNLI-VE, NLVR2), image-to-text generation (COCO Captions), and text-to-image generation. Experiments are conducted with the same model architecture on in-domain data (ID). The results are shown in Table 5.", + "bbox": [ + 169, + 809, + 826, + 878 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effects of Objectives First, all three objectives (PLM, PIM, and Text2Text) bring improvement and the combination confirms a synergistic effect. Second, it is observed that without PLM, the performance decreases significantly on multi-modal understanding and image-to-text generation,", + "bbox": [ + 169, + 881, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/51842cc7375622d01367ef18e920eb25c399142c5998b6e8a0587e567b5de68b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCOCOB@4 / CVQAAccSNLI-VEAccNLVR2AccImageNetAccFood101AccCIFAR10AccMNLIAccSST-2AccT2IIS / FID
No Pre-training32.1 / 96.7152.7354.2351.08-*-*-*66.3279.84-*
DAVINCI35.8 / 117.3069.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
-PLM33.6 / 111.1765.1573.9153.2848.0574.1772.9881.4289.9710.26 / 59.64
-PIM34.3 / 116.5868.8975.7969.7845.5471.1870.1181.9490.53-*
-Text2Text34.1 / 115.2168.1475.3870.3448.6774.2673.2376.4888.1412.07 / 54.77
PL=035.4 / 117.0066.9075.5271.0548.4568.1873.7378.6989.0011.76 / 55.38
PL=15%35.7 / 116.5369.1675.0970.4441.5852.1568.5579.0289.46-*
PL=50%35.1 / 115.5368.5574.5456.9237.6949.1670.1578.5989.69-*
MIM34.7 / 113.468.1875.3469.6648.4656.9572.7981.7289.849.50 / 74.13
In-painting34.5 / 112.567.4675.4168.6647.5054.3871.2081.5589.849.97 / 68.15
Token Projection17.7 / 49.252.1371.1152.0115.1125.6261.0182.0190.2511.89 / 60.96
Patch Projection25.7 / 79.557.6971.9257.4536.2344.3169.4081.7390.0511.41 / 61.87
", + "bbox": [ + 173, + 102, + 828, + 266 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 5: Ablation study on COCO Captions, VQA, SNLI-VE, NLVR2, ImageNet, Food101, CIFAR10, MNLI, SST-2, and text-to-image (T2I) generation. “-” denotes removing the corresponding objective. PL denotes the prefix length under fixed masking ratio settings. Because the linear probe requires a pre-trained model to be frozen, “No Pre-training” results on ImageNet, Food101, and CIFAR10 are not reported and labeled by * . For T2I, we report the zero-shot results. Note that the following four variants cannot perform zero-shot text-to-image generation (labeled by *): (1) No Pre-training, (2) DAVinci - PIM, (3) PL=15%, and (4) PL=50%.", + "bbox": [ + 173, + 270, + 823, + 347 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "indicating the importance of language supervision. Third, PIM brings more gains than PLM and text2text on vision understanding, which is expected because it enhances the vision encoding ability with image supervision. In addition, the text2text objective is important to text understanding. Last, on the text-to-image generation task, it is observed that PLM is also helpful, confirming the synergistic effect of PIM and PLM again. Intuitively, PIM and PLM can help each other learn the alignments of visual and textual features, which will benefit both image generation and other multi-modal tasks.", + "bbox": [ + 173, + 363, + 823, + 446 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Effects of Masking Ratios Our model adopts dynamic masking ratios as described in Section 3.2. We also conduct experiments with static masking ratios with the prefix length fixed to 0, $15\\%$ , and $50\\%$ . The comparison between dynamic masking ratios and static masking ratios $(\\mathrm{PL} = 0, 15\\%,$ and $50\\%)$ reveals that dynamic masking is better. We attribute this improvement to the smoothing effects of dynamic masking ratios. We also find that the standard language model $(\\mathrm{PL} = 0)$ performs worse on VQA, Food101, and text-to-image generation, which is consistent with the observation in SimVLM. In our experiments, the masking ratio is sampled from a uniform distribution $\\mathrm{U}(0,1)$ .", + "bbox": [ + 173, + 453, + 823, + 551 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Effects of Masking Strategies Here we also compared three different masking strategies: 1) masked image modeling (randomly masking some patches), 2) in-painting (randomly masking some continuous spans in the middle of the image), and 3) suffix-painting (ours). The results are shown in Table 5. Both masked image modeling and in-painting are effective and competitive. It is observed that suffix-painting is better than masked image modeling and in-painting across all tasks, demonstrating that suffix-painting works well.", + "bbox": [ + 173, + 556, + 823, + 641 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Effects of Image Feature Extraction There are several different ways to extract image features. We compare three different image representation methods: 1) token projection (projecting the prefix tokens to the hidden dimension of the backbone network on the token-level), 2) patch projection (similar to ViT embedding, we split an image into fixed-size patches, embed each of them by a trainable linear projection on the pixel-level), and 3) ResNet feature extraction (ours). From the results in Table 5, we observed that ResNet feature extraction outperforms token projection and patch projection by a large margin. Therefore, we decided to adopt ResNet to extract image features.", + "bbox": [ + 173, + 647, + 823, + 744 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We provide more details and discussions about the effects of compute (A.5), masking strategies (A.6), image feature extraction methods (A.7), and scaling effects of data size (A.8) in the Appendix.", + "bbox": [ + 173, + 747, + 823, + 776 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 CONCLUSION AND DISCUSSION", + "text_level": 1, + "bbox": [ + 173, + 787, + 468, + 803 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we first benchmark several settings on sequence-to-sequence vision-language pretraining in terms of pre-training dataset size, aligning SimVLM and our model on them. We propose a simple and unified generative pre-training model, DAVinci, to simultaneously leverage the language supervision and image supervision through two objectives under a unified framework: prefix language modeling and prefix image modeling. DAVinci is simple yet effective, demonstrating strong capabilities in both multi-modal writing and painting tasks. Experimental results explicitly imply that combining suffix caption generation and suffix image generation offers large gains on all benchmark settings. We also discussed limitations and future work in Appendix A.10.", + "bbox": [ + 173, + 811, + 823, + 922 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 477, + 46 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 949, + 503, + 958 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 103, + 356, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We thank the anonymous reviewers for their valuable suggestions. We would like to acknowledge Yan Zeng, Wenguan Huang, and Zhi Zhang at ByteDance, and Zhiling Zhang at Shanghai Jiao Tong University for their generous assistance in data collection and helpful discussions. We also wish to thank Hang Li at ByteDance, and Tong Zhang at HKUST for inspiring feedback, valuable comments, and great support to this work.", + "bbox": [ + 171, + 128, + 826, + 200 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 217, + 285, + 233 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022.", + "Eneko Agirre, Lluis Márquez, and Richard Vicentowski (eds.). Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007), Prague, Czech Republic, 2007. Association for Computational Linguistics. URL https://aclanthology.org/S07-1000.", + "Harsh Agrawal, Peter Anderson, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, and Stefan Lee. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision, ICCV 2019, Seoul, Korea (South), October 27 - November 2, 2019, pp. 8947-8956. IEEE, 2019. doi: 10.1109/ICCV.2019.00904. URL https://doi.org/10.1109/ICCV.2019.00904.", + "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. ArXiv preprint, abs/1607.06450, 2016. URL https://arxiv.org/abs/1607.06450.", + "Hangbo Bao, Li Dong, and Furu Wei. BEiT: Bert pre-training of image transformers. arXiv preprint, 2021.", + "Luisa Bentivogli, Peter Clark, Ido Dagan, and Danilo Giampiccolo. The fifth pascal recognizing textual entailment challenge. In TAC, 2009.", + "Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, pp. 446-461. Springer, 2014.", + "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/1457c0d6bcbd4967418bf8ac142f64a-AAbstract.html.", + "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9650-9660, 2021.", + "Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. arXiv preprint arXiv:2209.06794, 2022.", + "Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dollár, and C Lawrence Zitnick. Microsoft COCO Captions: Data collection and evaluation server. arXiv preprint, 2015." + ], + "bbox": [ + 171, + 244, + 826, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. UNITER: Universal image-text representation learning. In European Conference on Computer Vision (ECCV), 2020.", + "Jaemin Cho, Jie Lei, Hao Tan, and Mohit Bansal. Unifying vision-and-language tasks via text generation. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 1931-1942. PMLR, 2021. URL http://proceedings.mlr.press/v139/cho21a.html.", + "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. ArXiv preprint, abs/2204.02311, 2022. URL https://arxiv.org/abs/2204.02311.", + "Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2014, Columbus, OH, USA, June 23-28, 2014, pp. 3606-3613. IEEE Computer Society, 2014. doi: 10.1109/CVPR.2014.461. URL https://doi.org/10.1109/CVPR.2014.461.", + "Adam Coates, Andrew Ng, and Honglak Lee. An analysis of single-layer networks in unsupervised feature learning. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 215-223. JMLR Workshop and Conference Proceedings, 2011.", + "Ido Dagan, Oren Glickman, and Bernardo Magnini. The pascal recognising textual entailment challenge. In Machine Learning Challenges Workshop, pp. 177-190. Springer, 2005.", + "Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 34:3965-3977, 2021.", + "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423.", + "Shizhe Diao, Jiaxin Bai, Yan Song, Tong Zhang, and Yonggang Wang. Zen: Pre-training chinese text encoder enhanced by n-gram representations. In Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 4729-4740, 2020.", + "Shizhe Diao, Ruijia Xu, Hongjin Su, Yilei Jiang, Yan Song, and Tong Zhang. Taming pre-trained language models with n-gram representations for low-resource domain adaptation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 3336-3349, 2021.", + "Shizhe Diao, Zhichao Huang, Ruijia Xu, Xuechun Li, Yong Lin, and Tong Zhang. Black-box prompt learning for pre-trained language models. Transactions on Machine Learning Research, 2023. URL https://openreview.net/forum?id=IvsGP7xRvm.", + "Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 2021.", + "William B. Dolan and Chris Brockett. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005), 2005. URL https://aclanthology.org/I05-5002.", + "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. Unified language model pre-training for natural language understanding and generation. In NeurIPS, pp. 13042-13054, 2019." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=YicbFdNTTy.", + "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873-12883, 2021.", + "Zhiyi Fu, Wangchunshu Zhou, Jingjing Xu, Hao Zhou, and Lei Li. Contextual representation learning beyond masked language modeling. In ACL (1), pp. 2701-2714. Association for Computational Linguistics, 2022.", + "Danilo Giampiccolo, Bernardo Magnini, Ido Dagan, and Bill Dolan. The third PASCAL recognizing textual entailment challenge. In Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 1-9, Prague, 2007. Association for Computational Linguistics. URL https://aclanthology.org/W07-1401.", + "Ross B. Girshick. Fast R-CNN. In 2015 IEEE International Conference on Computer Vision, ICCV 2015, Santiago, Chile, December 7-13, 2015, pp. 1440-1448. IEEE Computer Society, 2015. doi: 10.1109/ICCV.2015.169. URL https://doi.org/10.1109/ICCV.2015.169.", + "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: Training imagenet in 1 hour. ArXiv preprint, abs/1706.02677, 2017a. URL https://arxiv.org/abs/1706.02677.", + "Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the V in VQA matter: Elevating the role of image understanding in visual question answering. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 6325-6334. IEEE Computer Society, 2017b. doi: 10.1109/CVPR.2017.670. URL https://doi.org/10.1109/CVPR.2017.670.", + "R Bar Haim, Ido Dagan, Bill Dolan, Lisa Ferro, Danilo Giampiccolo, Bernardo Magnini, and Idan Szpektor. The second pascal recognising textual entailment challenge. In Proceedings of the Second PASCAL Challenges Workshop on Recognising Textual Entailment, volume 7, 2006.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 770-778. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.90. URL https://doi.org/10.1109/CVPR.2016.90.", + "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross B. Girshick. Momentum contrast for unsupervised visual representation learning. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 9726-9735. IEEE, 2020. doi: 10.1109/CVPR42600.2020.00975. URL https://doi.org/10.1109/CVPR42600.2020.00975.", + "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022.", + "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6626-6637, 2017. URL https://proceedings.neurips.cc/paper/2017/ hash/8ald694707eb0fefe65871369074926d-Abstract.html.", + "Kashmir Hill and Jeremy White. Designed to deceive: Do these people look real to you. The New York Times, 11, 2020." + ], + "bbox": [ + 171, + 102, + 826, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yupan Huang, Hongwei Xue, Bei Liu, and Yutong Lu. Unifying multimodal transformer for bidirectional image and text generation. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 1138-1147, 2021.", + "Zhicheng Huang, Zhaoyang Zeng, Bei Liu, Dongmei Fu, and Jianlong Fu. Pixel-BERT: Aligning image pixels with text by deep multi-modal transformers. arXiv preprint, 2020.", + "Taichi Iki and Akiko Aizawa. Effect of visual extensions on natural language understanding in vision-and-language models. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2189-2196, 2021.", + "Shankar Iyer, Nikhil Dandekar, Kornél Csernai, et al. First quora dataset release: Question pairs. data.quora.com, 2017.", + "Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 4904-4916. PMLR, 2021. URL http://proceedings.mlr.press/v139/jia21b.html.", + "Wonjae Kim, Bokyung Son, and Ildoo Kim. Vilt: Vision-and-language transformer without convolution or region supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 5583-5594. PMLR, 2021. URL http://proceedings.mlr.press/v139/kim21k.html.", + "Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pp. 554-561, 2013.", + "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009.", + "Yann LeCun and Corinna Cortes. MNIST handwritten digit database. 2010.", + "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871-7880, Online, 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.703. URL https://aclanthology.org/2020.acl-main.703.", + "Junnan Li, Ramprasaath R Selvaraju, Akhilesh Deepak Gotmare, Shafiq Joty, Caiming Xiong, and Steven Hoi. Align before fuse: Vision and language representation learning with momentum distillation. In Conference on Neural Information Processing Systems (NeurIPS), 2021.", + "Lianian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. VisualBERT: A simple and performant baseline for vision and language. arXiv preprint, 2019.", + "Lianian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10965-10975, 2022a.", + "Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In European Conference on Computer Vision (ECCV), 2020.", + "Yehao Li, Jiahao Fan, Yingwei Pan, Ting Yao, Weiyao Lin, and Tao Mei. Uni-eden: Universal encoder-decoder network by multi-granular vision-language pre-training. ArXiv preprint, abs/2201.04026, 2022b. URL https://arxiv.org/abs/2201.04026." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. RoBERTa: A robustly optimized bert pretraining approach. arXiv preprint, 2019.", + "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7.", + "Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visi-olinguistic representations for vision-and-language tasks. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 13-23, 2019. URL https://proceedings.neurips.cc/paper/2019/bit/74d97b01eae257e44aa9d5bade97baf-Abstract.html.", + "Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013.", + "Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory F. Diamos, Erich Elsen, David García, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, and Hao Wu. Mixed precision training. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=r1gs9JgRZ.", + "Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729. IEEE, 2008.", + "Rui Pan, Shizhe Diao, Jianlin Chen, and Tong Zhang. Extremebert: A toolkit for accelerating pretraining of customized bert. arXiv preprint arXiv:2211.17201, 2022.", + "Omkar M. Parkhi, Andrea Vedaldi, Andrew Zisserman, and C. V. Jawahar. Cats and dogs. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, Providence, RI, USA, June 16-21, 2012, pp. 3498-3505. IEEE Computer Society, 2012. doi: 10.1109/CVPR.2012.6248092. URL https://doi.org/10.1109/CVPR.2012.6248092.", + "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 2227-2237, New Orleans, Louisiana, 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1202. URL https://aclanthology.org/N18-1202.", + "Jason Phang, Thibault Févry, and Samuel R. Bowman. Sentence encoders on stilts: Supplementary training on intermediate labeled-data tasks. ArXiv, abs/1811.01088, 2018.", + "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. 2018.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8748-8763. PMLR, 2021. URL http://proceedings.mlr.press/v139/radford21a.html.", + "Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research (JMLR), 2020.", + "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 2383-2392, Austin, Texas, 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://aclanthology.org/D16-1264.", + "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8821-8831. PMLR, 2021. URL http://proceedings.mlr.press/v139/ramesh21a.html.", + "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022.", + "Steven J. Rennie, Etienne Marcheret, Youssef Mroueh, Jerret Ross, and Vaibhava Goel. Self-critical sequence training for image captioning. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 1179-1195. IEEE Computer Society, 2017. doi: 10.1109/CVPR.2017.131. URL https://doi.org/10.1109/CVPR.2017.131.", + "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015.", + "Andrei A Rusu, Neil C Rabinowitz, Guillaume Desjardins, Hubert Soyer, James Kirkpatrick, Koray Kavukcuoglu, Razvan Pascanu, and Raia Hadsell. Progressive neural networks. arXiv preprint arXiv:1606.04671, 2016.", + "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022.", + "Tim Salimans, Ian J. Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. In Daniel D. Lee, Masashi Sugiyama, Ulrike von Luxburg, Isabelle Guyon, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pp. 2226-2234, 2016. URL https://proceedings.neurips.cc/paper/2016/black/8a3363abe792db2d8761d6403605aab7-Abtract.html.", + "Roy Schwartz, Jesse Dodge, Noah A Smith, and Oren Etzioni. Green ai. Communications of the ACM, 63(12):54-63, 2020.", + "Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. ArXiv preprint, abs/2112.04482, 2021. URL https://arxiv.org/abs/2112.04482.", + "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pp. 1631-1642, Seattle, Washington, USA, 2013. Association for Computational Linguistics. URL https://aclanthology.org/D13-1170.", + "Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. VL-BERT: pretraining of generic visual-linguistic representations. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020. URL https://openreview.net/forum?id=SygXPaEYvH." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6418-6428, Florence, Italy, 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1644. URL https://aclanthology.org/P19-1644.", + "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 2818-2826. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.308. URL https://doi.org/10.1109/CVPR.2016.308.", + "Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5100-5111, Hong Kong, China, 2019a. Association for Computational Linguistics. doi: 10.18653/v1/D19-1514. URL https://aclanthology.org/D19-1514.", + "Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5100-5111, Hong Kong, China, 2019b. Association for Computational Linguistics. doi: 10.18653/v1/D19-1514. URL https://aclanthology.org/D19-1514.", + "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10347-10357. PMLR, 2021. URL http://proceedings.mlr.press/v139/touvron21a.html.", + "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6306-6315, 2017. URL https://proceedings.neurips.cc/paper/2017/bitical/7a98af17e63a0ac09ce2e96d03992fbc-AAbstract.html.", + "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 5998-6008, 2017. URL https://proceedings.neurips.cc/paper/2017/bit/3f5ee243547dee91fbd053c1c4a845aa-Abstract.html.", + "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=rJ4km2R5t7.", + "Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. ArXiv preprint, abs/2202.03052, 2022. URL https://arxiv.org/abs/2202.03052.", + "Wenhui Wang, Hangbo Bao, Li Dong, and Furu Wei. Vlmo: Unified vision-language pre-training with mixture-of-modality-experts. ArXiv preprint, abs/2111.02358, 2021a. URL https://arxiv.org/abs/2111.02358.", + "Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. Simvlm: Simple visual language model pretraining with weak supervision. arXiv preprint, 2021b." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alex Warstadt, Amanpreet Singh, and Samuel R. Bowman. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641, 2019. doi: 10.1162/tacl_a_00290. URL https://aclanthology.org/Q19-1040.", + "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models. Transactions on Machine Learning Research, 2022. URL https://openreview.net/forum?id=yzkSU5zdwD. Survey Certification.", + "Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 1112–1122, New Orleans, Louisiana, 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1101. URL https://aclanthology.org/N18-1101.", + "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google's neural machine translation system: Bridging the gap between human and machine translation. ArXiv preprint, abs/1609.08144, 2016. URL https://arxiv.org/abs/1609.08144.", + "Ning Xie, Farley Lai, Derek Doran, and Asim Kadav. Visual entailment: A novel task for fine-grained image understanding. arXiv preprint, 2019.", + "Canwen Xu, Wangchunshu Zhou, Tao Ge, Furu Wei, and Ming Zhou. BERT-of-theseus: Compressing BERT by progressive module replacing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 7859-7869, Online, 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.633. URL https://aclanthology.org/2020.emnlp-main.633.", + "Haiyang Xu, Ming Yan, Chenliang Li, Bin Bi, Songfang Huang, Wenming Xiao, and Fei Huang. E2E-VLP: End-to-end vision-language pre-training enhanced by visual learning. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 503-513, Online, 2021a. Association for Computational Linguistics. doi: 10.18653/v1/2021.acl-long.42. URL https://aclanthology.org/2021.acl-long.42.", + "Jingjing Xu, Wangchunshu Zhou, Zhiyi Fu, Hao Zhou, and Lei Li. A survey on green deep learning. ArXiv preprint, abs/2111.05193, 2021b. URL https://arxiv.org/abs/2111.05193.", + "Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attngan: Fine-grained text to image generation with attentional generative adversarial networks. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pp. 1316-1324. IEEE Computer Society, 2018. doi: 10.1109/CVPR.2018.00143. URL http://openaccess.thecvf.com/content_cvpr_2018/html/Xu_AttnGAN_Fine-Grained_Text_CVPR_2018_paper.html.", + "Zhengyuan Yang, Zhe Gan, Jianfeng Wang, Xiaowei Hu, Faisal Ahmed, Zicheng Liu, Yumao Lu, and Lijuan Wang. Crossing the format boundary of text and boxes: Towards unified vision-language modeling. ArXiv, abs/2111.12085, 2021.", + "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. ArXiv preprint, abs/1708.03888, 2017. URL https://arxiv.org/abs/1708.03888.", + "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022.", + "Ning Yu, Vladislav Skripniuk, Sahar Abdelnabi, and Mario Fritz. Artificial fingerprinting for generative models: Rooting deepfake attribution in training data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 14448-14457, 2021." + ], + "bbox": [ + 171, + 103, + 828, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, Ce Liu, Mengchen Liu, Zicheng Liu, Yumao Lu, Yu Shi, Lijuan Wang, Jianfeng Wang, Bin Xiao, Zhen Xiao, Jianwei Yang, Michael Zeng, Luowei Zhou, and Pengchuan Zhang. Florence: A new foundation model for computer vision. arXiv preprint, 2021.", + "Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. ArXiv preprint, abs/2111.08276, 2021. URL https://arxiv.org/abs/2111.08276.", + "Han Zhang, Weichong Yin, Yewei Fang, Lanxin Li, Boqiang Duan, Zhihua Wu, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. Ernie-vilg: Unified generative pre-training for bidirectional vision-language generation. ArXiv preprint, abs/2112.15283, 2021a. URL https://arxiv.org/abs/2112.15283.", + "Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. VinVL: Revisiting visual representations in vision-language models. In Conference on Computer Vision and Pattern Recognition (CVPR), 2021b.", + "Wangchunshu Zhou, Canwen Xu, Tao Ge, Julian J. McAuley, Ke Xu, and Furu Wei. BERT loses patience: Fast and robust inference with early exit. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/black/d4dd111a4fd973394238aca5c05bebe3-AAbstract.html.", + "Wangchunshu Zhou, Tao Ge, Canwen Xu, Ke Xu, and Furu Wei. Improving sequence-to-sequence pretraining via sequence span rewriting. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 571-582, Online and Punta Cana, Dominican Republic, 2021a. Association for Computational Linguistics. doi: 10.18653/v1/2021.emnlp-main.45. URL https://aclanthology.org/2021.emnlp-main.45.", + "Wangchunshu Zhou, Dong-Ho Lee, Ravi Kiran Selvam, Seyeon Lee, and Xiang Ren. Pre-training text-to-text transformers for concept-centric common sense. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021b. URL https://openreview.net/forum?id=3k20LAiHYL2.", + "Wangchunshu Zhou, Canwen Xu, and Julian McAuley. BERT learns to teach: Knowledge distillation with meta learning. In ACL (1), pp. 7037-7049. Association for Computational Linguistics, 2022a.", + "Wangchunshu Zhou, Yan Zeng, Shizhe Diao, and Xinsong Zhang. Vlue: A multi-task benchmark for evaluating vision-language models. CoRR, abs/2205.15237, 2022b.", + "Xiao Zhou, Weizhong Zhang, Zonghao Chen, Shizhe Dao, and Tong Zhang. Efficient neural network training via forward and backward propagation sparsification. Advances in Neural Information Processing Systems, 34, 2021c.", + "Xiao Zhou, Weizhong Zhang, Hang Xu, and Tong Zhang. Effective sparsification of neural networks with global sparsity constraint. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3599-3608, 2021d.", + "Xiao Zhou, Renjie Pi, Weizhong Zhang, Yong Lin, and Tong Zhang. Probabilistic bilevel coreset selection. In International Conference on Machine Learning. PMLR, 2022c.", + "Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. DM-GAN: dynamic memory generative adversarial networks for text-to-image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 5802-5810. Computer Vision Foundation / IEEE, 2019. doi: 10.1109/CVPR.2019.00595. URL http://openaccess.thecvf.com/content_CVPR_2019/html/Zhu_DM-GAN_Dynamic_Memory_Generative_Adversarial_Networks_for_Text-To-Image_Synthesis_CVPR_2019_paper.html." + ], + "bbox": [ + 171, + 102, + 828, + 890 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A APPENDIX", + "text_level": 1, + "bbox": [ + 171, + 102, + 299, + 118 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.1 DETAILS OF HYPER-PARAMETERS", + "text_level": 1, + "bbox": [ + 171, + 132, + 455, + 146 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Pre-training Our model is a base-size Transformer implemented with a 6-layer encoder and a 6-layer decoder, 768 dimensions for hidden states, 512 for maximum input length, and 3072 for intermediate size. We train our model from scratch without initializing the Transformer encoder and decoder. The image encoder is initialized from ResNet-101 (He et al., 2016) with ImageNet weights since we find a warm start provides a reliable visual representation and helps the convergence. For models pre-training on large-scale data, we optimize 10 epochs while for other small-scale datasets, we optimize 40 epochs with the AdamW optimizer. The weight decay is set to 0.01 with $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.999$ . The learning rate is 2e-4 with a warm-up period for the first $2\\%$ steps and linearly decayed to 0 after $2\\%$ of the total training steps. In each batch, there are 8,192 image-text pairs for text-to-image generation and image-to-text generation with 8,192 text-only documents for text-to-text generation. We use center-crop to resize each image to the size of $256\\times 256$ , which is the only data augmentation used during training. All pre-training experiments are conducted on 32GB NVIDIA V100 GPUs. We adopt mixed-precision (Micikevicius et al., 2018) to accelerate training and save memory. The model trained on the largest data takes around 10 days on 1024 V100 GPUs. The default settings are shown in Table 6. We adopt dynamic masking in our experiments, where the masking ratio is randomly sampled from a uniform distribution $\\mathrm{U}(0,1)$ .", + "bbox": [ + 169, + 152, + 826, + 377 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Fine-tuning The learning rate is $\\in [1\\mathrm{e} - 5,5\\mathrm{e} - 5]$ and our model is optimized by AdamW. Because the image resolution differs between pre-training and fine-tuning, the position parameters are adapted using linear interpolation. For all downstream tasks, we apply random resize crops and horizontal flips augmentation during training. All fine-tuning experiments are conducted on 32GB NVIDIA V100 GPUs. The default settings for text classification, image classification, multi-modal understanding and image-to-text generation are shown in Tables 7, 8, and 9, respectively.", + "bbox": [ + 169, + 385, + 823, + 470 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/afb702bf614965138be44944fad40c441ce6705eeda21c064d089aec2757b8bc.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
configvalue
optimizerAdamW (Loshchilov & Hutter, 2019)
learning rate2e-4
weight decay0.01
optimizer momentumβ1, β2=0.9, 0.999
batch size8192
learning rate schedulelinear decay
warmup ratio (Goyal et al., 2017a)0.02
training epochs{10, 40}
augmentationRandomResizedCrop
", + "bbox": [ + 241, + 482, + 756, + 633 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/3934839d7cf75277eebcff04a94a0b776c94eab7aa3440dfdc4dda2b2cdcbb4f.jpg", + "table_caption": [ + "Table 6: Pre-training setting." + ], + "table_footnote": [], + "table_body": "
configvalue
optimizerAdamW
learning rate{1e-5, 2e-5, 5e-5}
weight decay0.01
optimizer momentumβ1, β2=0.9, 0.999
batch size{16, 32, 64}
learning rate schedulelinear decay
warmup ratio0.1
training epochs{5, 10}
", + "bbox": [ + 357, + 683, + 638, + 809 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 7: Text classification: GLUE setting.", + "bbox": [ + 364, + 819, + 624, + 833 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.2 DETAILS OF DOWNSTREAM TASKS", + "text_level": 1, + "bbox": [ + 171, + 859, + 460, + 875 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Language Understanding We conduct experiments on GLUE benchmark including MNLI (Williams et al., 2018), CoLA (Warstadt et al., 2019), MRPC (Dolan & Brockett, 2005), QQP (Iyer et al., 2017), SST-2 (Socher et al., 2013), QNLI (Rajpurkar et al., 2016),", + "bbox": [ + 169, + 881, + 826, + 926 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/f58515bae1b5e5a8cba2a67ca0eb3e690c445bb4d6b66e88b50945e2c3949d87.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
configvalue
optimizerLARS (You et al., 2017)
base learning rate0.1
weight decay0
optimizer momentum0.9
batch size16384
learning rate schedulecosine decay
warmup epochs10
training epochs90
augmentationRandomResizedCrop
", + "bbox": [ + 341, + 101, + 656, + 239 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/475c0044a24bad8a00c82e5f62b10e40a7f42a74d6e3f58ce588b2562538299d.jpg", + "table_caption": [ + "Table 8: Image classification: Linear probing setting." + ], + "table_footnote": [], + "table_body": "
configvalue
optimizerAdamW
learning rate[1e-5, 5e-5]
weight decay0.02
optimizer momentumβ1, β2=0.9, 0.999
batch size1024
learning rate schedulelinear decay
warmup epochs[2, 5]
training epochs[5, 15]
label smoothing (Szegedy et al., 2016)0.1
augmentationRandomResizedCrop, HorizontalFlips
", + "bbox": [ + 251, + 277, + 746, + 429 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 9: Multi-modal understanding and image-to-text generation: fine-tuning setting.", + "bbox": [ + 241, + 439, + 754, + 455 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "RTE (Dagan et al., 2005; Haim et al., 2006; Giampiccolo et al., 2007; Bentivogli et al., 2009), and STS-B (Agirre et al., 2007). We follow the practice of BART (Lewis et al., 2020) and feed the same input to the encoder and decoder, and the hidden state of the final decoder token is fed into a new multi-class linear classifier or regression head. MNLI results are an average of MNLI-m and MNLI-mm. MRPC and QQP results are average of accuracy and F1. Matthews correlation coefficient (MCC) is reported for CoLA and Pearson correlation coefficient (PCC) is reported for STS-B.", + "bbox": [ + 169, + 479, + 826, + 566 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Vision Understanding We conduct vision experiments in both fine-tuning and linear evaluation (linear eval). The linear evaluation follows a common practice (Caron et al., 2021; He et al., 2020; Singh et al., 2021) in self-supervised learning to evaluate the representation quality, where the pre-trained backbone model is frozen and a new linear classifier is appended on top of it. We choose 12 popular datasets: ImageNet (Russakovsky et al., 2015), Food101 (Bossard et al., 2014), CIFAR10 (Krizhevsky et al., 2009), CIFAR100 (Krizhevsky et al., 2009), Cars (Krause et al., 2013), Aircraft (Maji et al., 2013), DTD (Cimpoi et al., 2014), Pets (Parkhi et al., 2012), Flowers102 (Nilsback & Zisserman, 2008), MNIST (LeCun & Cortes, 2010), STL10 (Coates et al., 2011), and Country211 (Radford et al., 2021).", + "bbox": [ + 169, + 571, + 826, + 700 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Multi-modal Understanding We consider three popular multi-modal tasks: VQAv2 (Goyal et al., 2017b), SNLI-VE (Xie et al., 2019) and NLVR2 (Suhr et al., 2019) to evaluate our model's multi-modal understanding ability. For VQAv2, following ALBEF (Li et al., 2021), the image and question are fed to the encoder and the decoder generates answers based on the multi-modal embeddings. For SNLI-VE, we follow SimVLM (Wang et al., 2021b) to feed the image to the encoder and the text to the decoder. A classifier is appended on top of our pre-trained model, and it is trained to predict the result based on the last hidden states of the decoder. For NLVR2, two input pairs are constructed, each of them including one image and the textual description. The prediction is made based on the concatenation of these two embeddings following SimVLM (Wang et al., 2021b). The resolutions for VQAv2, SNLI-VE, NLVR2 are 480, 384, 384, respectively.", + "bbox": [ + 169, + 705, + 826, + 848 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Text-to-Image Generation The text-to-image task requires the model to understand the textual instruction first and then draw the image according to the input's intention. The input text is fed to our encoder, and our decoder will generate visual tokens one by one. After obtaining visual tokens, they are decoded into a raw image by an image decoder. We directly use an off-the-shelf image decoder from VQGAN (Esser et al., 2021). Following (Ramesh et al., 2021) we directly evaluate our", + "bbox": [ + 169, + 854, + 826, + 926 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/7b1921afa2bfe10baaffc21441a2eb24b6be1a163f1bc78d651197c68c35be17.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Data TypeDatasetImage Domain#Images#Captions#Total
In-Domain Data (ID)COCOCOCO110.3K551.7K1.3M
Visual GenomeCOCO108.2K759.0K
Small-scale Web Data (SWD)SBUWeb859.7K859.7K14.9M
CC-3MWeb2.9M2.9M
CC-12MWeb11.1M11.1M
Object-Region Data (ORD)VG regionsCOCO108.2K3.6M17.0M
VG objectsCOCO108.2K925.6K
COCO objectsCOCO110.3K736.6K
RefcocoCOCO27.9K589.9K
Open ImageFlickr1.7M7.5M
Obj365Flickr577.6K3.6M
Vision Data (VD)ImageNet-21KImageNet13.2M13.2M13.2M
Large-scale Web Data (LWD)DAVINCI-200MWeb205.6M205.6M601.3M
LAION-400MWeb395.7M395.7M
Text Data (TD)C4Web--800GB
", + "bbox": [ + 178, + 101, + 816, + 321 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 10: Statistics of the pre-training datasets. #Images, #Captions, and #Total denote the number of images, the number of image-text pairs, and the total number of image-text pairs, respectively.", + "bbox": [ + 169, + 335, + 823, + 363 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "pre-trained model on 30,000 images randomly sampled from COCO (Chen et al., 2015) validation split. Both Fréchet Inception Distance (FID) (Heusel et al., 2017) and Inception Score (IS) (Salimans et al., 2016) are reported. The image resolution is 256.", + "bbox": [ + 169, + 391, + 823, + 434 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Image-to-Text Generation For image-to-text generation (also called image captioning), the image is given to encoder and the decoder will generate the corresponding caption. Our experiments are conducted on COCO dataset (Chen et al., 2015) with cross-entropy optimization. Other task-specific techniques such as CIDEr optimization (Rennie et al., 2017) are not introduced. The image resolution is 480. We also conduct zero-shot captioning experiments on NoCaps (Agrawal et al., 2019) and VLINE (Zhou et al., 2022b).", + "bbox": [ + 169, + 445, + 823, + 527 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "A.3 PRE-TRAINING DATASETS", + "text_level": 1, + "bbox": [ + 171, + 542, + 398, + 556 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Since existing studies pre-trained their models on different corpora, some of which are publicly available (e.g., CC-3M, CC-12M) while some are in-house datasets (e.g., ALIGN (Jia et al., 2021)), making the fair comparison difficult. Considering results only on the state-of-the-art performance would underestimate the potential of this line of research. Therefore, we propose several practical settings, including small-scale and large-scale, and then conduct detailed comparisons on them in section 5.1.", + "bbox": [ + 169, + 565, + 823, + 648 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We collect a large set of datasets with diverse distributions for pre-training. According to its source, we divide them into in-domain, small-scale web data, object-region data, vision data, and large-scale web data. The statistics and details are shown in Table 10. Most of them are naturally image-text pairs, while to enrich our corpus, we leverage object descriptions, region descriptions, and vision data (i.e., ImageNet). For objects and regions, we crop them from the original image according to their bounding box. The text part is composed according to a human-written template and objects. For example, the prompt template is \"This image contains [OBJ_A] and [OBJ_B]\", where [OBJ_A] and [OBJ_B] are two object names from the data. For vision data, because they are usually labeled with a single word or short phrase, we compose a description with prompt templates such as \"A picture of [LABEL]\" or \"The image contains [LABEL]\". For example, \"A picture of cat\" or \"The image contains cat\". We curated a dataset containing about 205.6M image-text pairs, which are available publicly on the internet. The data distribution is similar to LAION-400M. Because both are from web images, we merge them into large-scale web data (LWD).", + "bbox": [ + 169, + 651, + 826, + 832 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "A.4 REPRODUCTION OF SIMVLM", + "text_level": 1, + "bbox": [ + 171, + 845, + 423, + 859 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Since SimVLM is not open-sourced, we need to reproduce it by ourselves. There are two main difficulties in the reproduction: 1. it uses 1.8 billion in-house data 2. the configurations (e.g., parameter size, number of layers) of its base model are not clearly stated. However, there are still some clues in Section 4.4 of the SimVLM paper, where they propose a SimVLMsmall model with 8", + "bbox": [ + 169, + 868, + 823, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "layers, 512 embedding dimensions, and trained on about 200M web data. To demonstrate the success of our replication, we train a $\\mathrm{SimVLM}_{small}$ model with the exact same configurations on about 200M web data. We obtain a VQA score of 68.50, surpassing the reported score of 67.43 in the original paper. We argue this result verifies our successful replication.", + "bbox": [ + 169, + 103, + 826, + 161 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.5 EFFECTS OF COMPUTE", + "text_level": 1, + "bbox": [ + 171, + 179, + 380, + 191 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Our model is trained with large compute. To reveal the effects of compute, we visualize the performance improvement trends of SimVLM and DAVINCI as a function of the compute spent. There are two goals: 1) to compare better with prior work, as well as to 2) to show if that level of pre-training compute was necessary. We conduct experiments on the image-to-text generation task under both zero-shot and fine-tuning settings. The results are shown in Figure 2. It is observed that with the increase in compute, both models are improved significantly and converged at $40\\%$ of compute (zero-shot), and $80\\%$ of compute (fine-tuning), respectively. Large compute is especially helpful for fine-tuning settings. After convergence, our model outperforms SimVLM consistently in these two settings.", + "bbox": [ + 169, + 202, + 826, + 329 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/dc770b867dbcb78299001f30d829f53084c2026e823f49908e9faeae97544615.jpg", + "image_caption": [ + "(a) COCO Captioning (Zero-shot)" + ], + "image_footnote": [], + "bbox": [ + 183, + 366, + 470, + 503 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/51ca548619b1916413ba2a86c8edd6a847002c4995aac6c96645992ad6236625.jpg", + "image_caption": [ + "(b) COCO Captioning (Fine-tuning)", + "Figure 2: The effects of compute. X-axis is the percentage of compute and Y-axis is the CIDer score on COCO captioning task." + ], + "image_footnote": [], + "bbox": [ + 522, + 364, + 813, + 503 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.6 EFFECTS OF MASKING STRATEGIES", + "text_level": 1, + "bbox": [ + 171, + 559, + 465, + 571 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In our experiments, we adopt dynamic masking, where the masking ratio is sampled from a uniform distribution $\\mathrm{U}(0,1)$ . The prefix ratio could be 0, where the prefix image is none, and the model is forced to predict the whole image with the input caption. There are other designs to mask images. Here we compared three different masking strategies: 1) masked image modeling (randomly masking some patches), 2) in-painting (randomly masking some continuous spans in the middle of the image), and 3) suffix-painting (ours). The results are shown in Table 11. Both masked image modeling and in-painting are effective and competitive. It is observed that suffix-painting is better than masked image modeling and in-painting across all tasks, demonstrating that suffix-painting works well.", + "bbox": [ + 169, + 582, + 826, + 695 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/1d7bec0da99224e3d8c177bd9c59034f67589b0cf6803f2bc2419e13ba2d3119.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCOCOB@4 / CVQA AccSNLI-VE AccNLVR2 AccImageNet AccFood101 AccCIFAR10 AccMNLI AccSST-2 AccText2Image IS / FID
No Pre-training32.1 / 96.7152.7354.2351.08-*-*-*66.3279.84-*
MIM34.7 / 113.468.1875.3469.6648.4656.9572.7981.7289.849.50 / 74.13
In-painting34.5 / 112.567.4675.4168.6647.5054.3871.2081.5589.849.97 / 68.15
Suffix-painting (ours)35.8 / 117.369.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
Token Projection17.7 / 49.252.1371.1152.0115.1125.6261.0182.0190.2511.89 / 60.96
Patch Projection25.7 / 79.557.6971.9257.4536.2344.3169.4081.7390.0511.41 / 61.87
ResNet Feature (ours)35.8 / 117.369.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
", + "bbox": [ + 171, + 713, + 834, + 821 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 11: The effects of masking strategies and image feature extraction on COCO Captions, VQA, SNLI-VE, NLVR2, ImageNet, Food101, CIFAR10, MNLI, SST-2, and text-to-image generation. MIM denotes masked image modeling, where some patches are randomly sampled and masked. Because linear probe and zero-shot text-to-image generation require a pre-trained model to be frozen, the \"No Pre-training\" results on ImageNet, Food101, CIFAR10, and Text2Image are not reported and labeled by * .", + "bbox": [ + 169, + 838, + 826, + 902 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "A.7 EFFECTS OF IMAGE FEATURE EXTRACTION", + "text_level": 1, + "bbox": [ + 171, + 104, + 519, + 118 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "There are several different ways to extract image features. We compare three different image representation methods: 1) token projection (projecting the prefix tokens to the hidden dimension of the backbone network on the token-level), 2) patch projection (similar to ViT embedding, we split an image into fixed-size patches, embed each of them by a trainable linear projection on the pixel-level), and 3) ResNet feature extraction (ours). The comparison is shown in Table 11. From the results, we observed that ResNet feature extraction outperforms token projection and patch projection by a large margin. Therefore, we decided to adopt ResNet to extract image features.", + "bbox": [ + 169, + 125, + 826, + 223 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A.8 SCALING EFFECTS OF DATA SIZE", + "text_level": 1, + "bbox": [ + 171, + 234, + 452, + 250 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In this section, we explore the scaling effects of our model. We plot the trends with the increase in data size on four tasks: COCO captioning, VQA, SNLI-VE, and NLVR2. The performance improvement shown in Figure 3 demonstrates that both SimVLM and DAVinci are scaling well with pre-training data size. In addition, DAVinci consistently outperforms SimVLM on different data sizes across these tasks.", + "bbox": [ + 169, + 256, + 823, + 325 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/52037f741d36ecb6d936b7cace17e5d3bfda934c12d8d6bbcc289cc7e6d0baff.jpg", + "image_caption": [ + "Figure 3: The scaling effects of data size." + ], + "image_footnote": [], + "bbox": [ + 179, + 339, + 331, + 465 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/3c3a2f391ef45f0474996b6719113037e2819f577a5d0a2c99dd72e172b1387b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 339, + 483, + 465 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/945d7d081aa0c3c4fd9057adad00d6148ccc678d76f13c6ae40b1520b0157675.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 339, + 635, + 464 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/499cc8d70d0c99af2d35676d358599f2d8f0bb4f50ed884695c5262339433f48.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 637, + 339, + 816, + 464 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A.9 FULL COMPARISON WITH EXISTING METHODS", + "text_level": 1, + "bbox": [ + 171, + 526, + 545, + 540 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In Table 12, we display a comprehensive comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks.", + "bbox": [ + 169, + 547, + 823, + 577 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A.10 LIMITATION AND SOCIETAL IMPACTS", + "text_level": 1, + "bbox": [ + 171, + 589, + 486, + 602 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Limitation. Like most of the previous pre-training studies, the entire project consumed 40 V100 GPU years on an in-house computing cluster with large electricity costs. We tried to keep our model size small enough, but there is still potential for efficiency improvements such as sparse training (Zhou et al., 2021d;c), dataset distillation (Zhou et al., 2022c), and progressive training (Rusu et al., 2016). We will explore those techniques to improve the training efficiency and reduce the carbon footprint so that it can adhere to proposals on \"green\" deep learning (Schwartz et al., 2020; Xu et al., 2021b). Furthermore, although we have tried our best to include as many tasks as we can to demonstrate the versatility of DAVinci, we believe our method can be expanded to more tasks (e.g., machine translation, summarization, object detection, etc.), modalities (e.g., video and speech). We leave these investigations to future work.", + "bbox": [ + 169, + 609, + 826, + 750 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Potential Societal Impacts. Our model has image generation ability with risk of abuse, like fake portraits on social media (Hill & White, 2020), which is a common potential risk in image generation research. Viable solutions are watermarking (Yu et al., 2021) and introducing a strict user license.", + "bbox": [ + 169, + 758, + 823, + 801 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "A.11 VISUALIZATION OF IMAGE GENERATION", + "text_level": 1, + "bbox": [ + 171, + 813, + 511, + 825 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In this section, we conduct a qualitative analysis by visualizing the generation samples. Figure 4 shows the comparison with DALLE and OFA with the same query. More generated samples are shown in Figures 5.", + "bbox": [ + 169, + 834, + 825, + 878 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/7ae3bbefb24a10288d4d7d449fda5ff1b3461275ec5aef77bd829531e5512ca1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model#Params.Text MNLI AccVision ImageNet LE / FTImage2Text COCO B@4 / CText2Image COCO IS† / FID↓Multi-modal VQA test-dev / test-standardNLVR2 dev / test-P
Encoder-only Multi-modal Models
VisualBERT (Li et al., 2019)170M81.60---70.80 / 71.0067.40 / 67.00
ViLBERT (Lu et al., 2019)274M79.90---70.55 / 70.92-
VL-BERT (Su et al., 2020)170M81.20---71.16 / --
LXMERT (Tan & Bansal, 2019a)240M80.40---72.42 / 72.5474.90 / 74.50
OSCAR (Li et al., 2020)155M--36.5 / 123.7-73.16 / 73.4478.07 / 78.36
VinVL (Zhang et al., 2021b)157M--38.2 / 129.3-75.95 / 76.1282.05 / 83.08
ViLT (Kim et al., 2021)88M----70.85 / -74.91 / 75.57
ALBEF (Li et al., 2021)210M----75.84 / 76.0482.55 / 83.14
X-VLM (Zeng et al., 2021)240M--39.6 / 132.6-78.22 / 78.3784.41 / 84.76
VLMO (Wang et al., 2021a)----76.64 / 76.8982.77 / 83.34
Encoder-Decoder Multi-modal Models
UNICORN (Yang et al., 2021)--35.8 / 119.1-69.20 / 69.40-/-
Uni-ENDN (Li et al., 2022b)110M----72.20 / 72.50-/-
Pixel-BERT (Huang et al., 2020)144M----74.45 / 74.5576.50 / 77.20
E2E-VLP (Xu et al., 2021a)94M--36.2 / 117.3-73.25 / 73.6777.25 / 77.96
VL-T5 (Cho et al., 2021)220M--34.5 / 116.5-- / 70.3074.60 / 73.60
VL-BART (Cho et al., 2021)220M--35.1 / 116.6-- / 71.3071.70 / 70.30
Text2Image Models
AttnGAN (Xu et al., 2018)---23.30 / 35.20-/--/-
DM-GAN (Zhu et al., 2019)---32.20 / 26.50-/--/-
DALLE (Ramesh et al., 2021) (250M)12B---17.90 / 27.50-/--/-
DALLE (Ramesh et al., 2021) (640M)†82M---15.79 / 29.22-/--/-
CogView (Ding et al., 2021)4B---18.20 / 27.10-/--/-
Unified Models
Unifying (Huang et al., 2021)228M--37.3 / 122.6- / 29.90-/--/-
FLAVA (Singh et al., 2021)240M80.3375.54 / ---72.80 / 72.49-/-
SimVLM (Wang et al., 2021b) (640M)†153M83.2776.04 / -38.5 / 128.7-75.04 / 75.0378.82 / 79.72
SimVLM (Wang et al., 2021b) (1.8B)83.4080.60 / -39.0 / 134.8-77.87 / 78.1481.72 / 81.77
OFA (Wang et al., 2022)182M84.30- / 82.2041.0 / 138.221.50* / 20.80*78.00 / 78.10-/-
Florence (Yuan et al., 2021)637M-- / 90.05-/--/-80.16 / 80.36-/-
DAVINCI154M83.1378.81 / 83.9239.2 / 130.417.44 (22.41*) / 24.21 (19.82*)76.32 / 76.4480.03 / 80.25
", + "bbox": [ + 171, + 114, + 823, + 444 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Table 12: Comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. All results are from base-size models. LE and FT denote linear evaluation and fine-tuning performance, respectively. Image2Text results are reported without CIDEr optimization. $\\dagger$ are our reproduced models. \\* are the results after fine-tuning. SimVLM (1.8B) and OFA are pre-trained with much larger corpus or human-labeled data of many downstream tasks, and thus they are not comparable and are labeled in gray. Florence (Yuan et al., 2021) is pre-trained with a much larger model size (Florence-CoSwin-H, 637M) and more pre-training data (900M), so the numbers are in grey. bold denotes the best across unified models.", + "bbox": [ + 169, + 455, + 826, + 546 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/722f92ac76feb7850a7f7cba9696402edf4c6157bdce7d961f63b21c15fe066b.jpg", + "image_caption": [ + "Figure 4: Comparison with DALLE and OFA on text-to-image generation." + ], + "image_footnote": [], + "bbox": [ + 189, + 585, + 813, + 864 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/297fefe037e02ba8e64d9f257f03f3f9c71cac5255960a9c1c2a9c3cc25b9063.jpg", + "image_caption": [ + "a decorative flower vase full of purple and yellow flowers" + ], + "image_footnote": [], + "bbox": [ + 181, + 233, + 313, + 335 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/6b482fa8a2b64ecd1c991ca74f4335f170d99447e4e635433fa2927be9d85238.jpg", + "image_caption": [ + "a vase full of flowers on table" + ], + "image_footnote": [], + "bbox": [ + 346, + 233, + 480, + 335 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/fe233a6f67b7a479910d1f5586a6a4079b19b40db5df8b63a59bdc13b5cd5202.jpg", + "image_caption": [ + "a park with flowers on a sunny day" + ], + "image_footnote": [], + "bbox": [ + 513, + 233, + 645, + 335 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/2f5d3c55afb3ca03a07dbd6276c169f9f965004b7744b329c7552ddbb6854b64.jpg", + "image_caption": [ + "a fire hydrant sitting in a front yard next to a sign" + ], + "image_footnote": [], + "bbox": [ + 678, + 233, + 810, + 335 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/e3a65ee953198489cd255b1f3e1e2a38f12dc1148d8dd1ae61a3511c904178b6.jpg", + "image_caption": [ + "a beach on a sunny day" + ], + "image_footnote": [], + "bbox": [ + 181, + 369, + 313, + 472 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/f1b488802f01325f2123d87ee44e8a04fb9be03ceb0ba1552a196b59e290029d.jpg", + "image_caption": [ + "a one cart train coming down the railroad tracks" + ], + "image_footnote": [], + "bbox": [ + 346, + 369, + 478, + 470 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/f5514e687429ef98d8753069502dabe96b605320ffb28e016e6d69082f9ebe98.jpg", + "image_caption": [ + "a red and white boat docked on shore" + ], + "image_footnote": [], + "bbox": [ + 511, + 369, + 645, + 470 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/0dc9869347acdff3619a3d1e2aacb15ac1e86cf988aca1bc788bc769b85b6029.jpg", + "image_caption": [ + "a picture of a snowy mountain" + ], + "image_footnote": [], + "bbox": [ + 678, + 369, + 810, + 472 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/91f1ce8149bb0761730469a97caae51b4e71adcacec38dcab405aa552d71ccac.jpg", + "image_caption": [ + "a red stop sign on the side of the road" + ], + "image_footnote": [], + "bbox": [ + 181, + 498, + 313, + 599 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/f007039c1076ee7599382161422d5509e2fd74404cb6355c368d627a2288aa04.jpg", + "image_caption": [ + "a building in front of a roundabout with a tree in the center." + ], + "image_footnote": [], + "bbox": [ + 346, + 498, + 480, + 599 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/88379748a026c2125e6eff7d0bd0b38b68b57639f9a4cd85ec6b14c9758b6abb.jpg", + "image_caption": [ + "bathroom with marble walls and counter surrounds a large mirror" + ], + "image_footnote": [], + "bbox": [ + 513, + 500, + 645, + 599 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/6fc5ce42118cbaedf06eea0fc1239326338a65fe4d7f2a1d3d22da077667bbd9.jpg", + "image_caption": [ + "trees by the river in the mountains" + ], + "image_footnote": [], + "bbox": [ + 678, + 498, + 810, + 601 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/63ba22f4a8d7f21f93f0e844fd242ca8207716ecef1d9d5cd5cff058c0d9efa2.jpg", + "image_caption": [ + "many fruits on the plate on the table" + ], + "image_footnote": [], + "bbox": [ + 181, + 626, + 313, + 728 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/7f297c673584b1a5e2efb6833d3c040379360af2a0bb422a1ae9b5f93ecdf21a.jpg", + "image_caption": [ + "a bunch of fruit in a fruit shop" + ], + "image_footnote": [], + "bbox": [ + 346, + 626, + 480, + 728 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/a09f965728db9504abf035083d10e0bf5275815384c0c6dd271f2991ae5b4e8c.jpg", + "image_caption": [ + "a table set with a sandwich and a drink" + ], + "image_footnote": [], + "bbox": [ + 513, + 626, + 645, + 728 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/3a86a45d8e479c156cb76df3ab4306ff16e06d7a50b10407f6dff5ef6481103b.jpg", + "image_caption": [ + "noodles and broccoli on a plate", + "Figure 5: Generation samples by DAVINCI." + ], + "image_footnote": [], + "bbox": [ + 678, + 626, + 810, + 728 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 24 + } +] \ No newline at end of file diff --git a/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_model.json b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_model.json new file mode 100644 index 0000000000000000000000000000000000000000..db41f34353ab299c323d11fa987d753fb638c2eb --- /dev/null +++ b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_model.json @@ -0,0 +1,4089 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.147 + ], + "angle": 0, + "content": "WRITE AND PAINT: GENERATIVE VISION-LANGUAGE MODELS ARE UNIFIED MODAL LEARNERS" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.171, + 0.276, + 0.184 + ], + "angle": 0, + "content": "Shizhe Diao*" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.185, + 0.544, + 0.213 + ], + "angle": 0, + "content": "The Hong Kong University of Science and Technology sdiaoaa@connect.ust.hk" + }, + { + "type": "text", + "bbox": [ + 0.561, + 0.171, + 0.706, + 0.185 + ], + "angle": 0, + "content": "Wangchunshu Zhou" + }, + { + "type": "text", + "bbox": [ + 0.561, + 0.186, + 0.688, + 0.199 + ], + "angle": 0, + "content": "ByteDance AI Lab" + }, + { + "type": "text", + "bbox": [ + 0.561, + 0.2, + 0.837, + 0.213 + ], + "angle": 0, + "content": "wangchunshu.zhou@inf.ethz.ch" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.233, + 0.298, + 0.248 + ], + "angle": 0, + "content": "Xinsong Zhang†" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.248, + 0.31, + 0.262 + ], + "angle": 0, + "content": "ByteDance AI Lab" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.263, + 0.49, + 0.277 + ], + "angle": 0, + "content": "zhangxinsong.0320@bytedance.com" + }, + { + "type": "text", + "bbox": [ + 0.558, + 0.234, + 0.652, + 0.248 + ], + "angle": 0, + "content": "Jiawei Wang" + }, + { + "type": "text", + "bbox": [ + 0.558, + 0.248, + 0.762, + 0.263 + ], + "angle": 0, + "content": "Shanghai Jiao Tong University" + }, + { + "type": "text", + "bbox": [ + 0.558, + 0.263, + 0.747, + 0.277 + ], + "angle": 0, + "content": "wjw_sjt@sjtu.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.313, + 0.547, + 0.327 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.343, + 0.769, + 0.608 + ], + "angle": 0, + "content": "Recent advances in vision-language pre-training have pushed the state-of-the-art on various vision-language tasks, making machines more capable of multi-modal writing (image-to-text generation) and painting (text-to-image generation). However, few studies investigate if these two essential capabilities can be learned together and boost each other, making a versatile and powerful multi-modal foundation model. In this work, we disclose the potential of symmetric generative vision-language pre-training in learning to write and paint concurrently, and propose a new unified modal model, named DAVINCI, trained with prefix language modeling and prefix image modeling, a simple generative self-supervised objective on image-text pairs. Thanks to the proposed prefix multi-modal modeling framework, DAVINCI is simple to train, scalable to huge data, adaptable to both writing and painting tasks, and also strong on other vision, text, and multi-modal understanding tasks. DAVINCI achieves competitive performance on a wide range of 27 generation/understanding tasks and demonstrates the superiority of combining vision/language generative pre-training. Furthermore, we carefully benchmark the performance of different vision-language pre-training objectives on different scales of pre-training datasets on a heterogeneous and broad distribution coverage. Our results demonstrate the potential of exploiting self-supervision in both language and vision inputs, and establish new, stronger baselines for future comparisons at different data scales.\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.63, + 0.338, + 0.644 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.653, + 0.828, + 0.877 + ], + "angle": 0, + "content": "Self-supervised language model pre-training (Peters et al., 2018; Radford et al., 2018; Devlin et al., 2019; Liu et al., 2019; Lewis et al., 2020; Raffel et al., 2020; Brown et al., 2020; Fu et al., 2022; Zhou et al., 2021b; Diao et al., 2020; 2021; Zhou et al., 2021a; Xu et al., 2020; Zhou et al., 2020; 2022a; Pan et al., 2022; Diao et al., 2023) has reshaped the landscape of modern natural language processing (NLP) research, pushing the state-of-the-art of a wide range of NLP tasks. Recently, this success has been transferred to the multi-modal context and resulted in a number of vision-language pretrained models (VLMs) (Lu et al., 2019; Tan & Bansal, 2019a), achieving state-of-the-art results on various vision-language tasks. Most existing VLMs are BERT-like Transformer (Vaswani et al., 2017) encoders pre-trained with a combination of different vision-language pre-training (VLP) objectives: masked multi-modal modeling (Lu et al., 2019; Tan & Bansal, 2019b; Chen et al., 2020; Li et al., 2020), multi-modal alignment prediction (Lu et al., 2019; Tan & Bansal, 2019b; Chen et al., 2020; Li et al., 2020), region of interest feature regression (Tan & Bansal, 2019b), image-text matching (Li et al., 2021; Zeng et al., 2021), to name a few. However, the roadmap towards large language models reveals a transition pattern from encoder-only models like BERT (Devlin et al., 2019) / RoBERTa (Liu et al., 2019) to sequence-to-sequence models like T5 (Raffel et al., 2020) / BART (Lewis et al., 2020) and autoregressive models like GPT-3 (Brown et al., 2020) / PaLM (Chowdhery et al., 2022) to tackle" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.883, + 0.525, + 0.897 + ], + "angle": 0, + "content": "*Work done during the internship at ByteDance AI Lab." + }, + { + "type": "page_footnote", + "bbox": [ + 0.194, + 0.897, + 0.333, + 0.91 + ], + "angle": 0, + "content": "† Corresponding author" + }, + { + "type": "page_footnote", + "bbox": [ + 0.194, + 0.91, + 0.819, + 0.924 + ], + "angle": 0, + "content": "1The code and pre-trained models are available at https://github.com/shizhediao/DaVinci." + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.883, + 0.819, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "more tasks in a unified way, and from complicated objectives like masked language modeling / next sentence prediction / replace token detection to a simple language modeling objective to improve the scalability of pre-training. This suggests that the generative pre-training paradigm with simple targets shows great potential for pre-training more scalable and general VLMs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.163, + 0.827, + 0.33 + ], + "angle": 0, + "content": "To this end, several recent studies (Cho et al., 2021; Zhang et al., 2021a; Wang et al., 2021b; 2022) investigated sequence-to-sequence (seq2seq) vision-language pre-training and achieved state-of-the-art results on a range of vision-language understanding and generation tasks. For example, VL-T5 (Cho et al., 2021), OFA (Wang et al., 2022) and PaLI (Chen et al., 2022) formulate various vision-and-language problems into seq2seq tasks and pre-train a seq2seq VLM by multi-tasking on these tasks. In addition, ERNIE-ViLG (Zhang et al., 2021a) and SimVLM (Wang et al., 2021b) pre-train seq2seq VLMs with a simple language modeling or prefix language modeling objective on a large number of image-caption pairs. While achieving promising results, these objectives are not versatile enough, resulting in VLMs that are only capable of a subset of tasks in image-text modalities. On the other hand, the recent success of generative language pre-training (Brown et al., 2020) and generative vision pre-training (He et al., 2022; Bao et al., 2021) motivates us to explore generative vision-language pre-training to learn more versatile and scalable vision-language models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.332, + 0.828, + 0.583 + ], + "angle": 0, + "content": "In this work, we introduce prefix multi-modal modeling, a unified generative pre-training framework that extends prefix language modeling to the multi-modal context and learns a multi-modal foundation model by learning to write and paint simultaneously. As illustrated in Figure 1, given an image-caption pair, we split the image and caption into two parts denoted as prefix and suffix. To make prefix image modeling compatible with the seq2seq formulation of conventional prefix language modeling, we follow DALLE (Ramesh et al., 2021) and convert images into discrete sequences of image tokens (van den Oord et al., 2017). We then train the model to generate the suffix in one modality based on the prefix in the same modality and the complete input in the other modality. In this way, prefix multi-modal modeling can fully exploit self-supervision from large-scale image-caption pairs by learning to write and paint simultaneously. We pre-train DAVinci2, a vision-language foundation model, with the proposed prefix multi-modal modeling framework on large-scale image-text pairs. DAVinci is the first self-supervised vision-language foundation model that is versatile for all kinds of tasks in vision-and-language modalities, including image-to-text generation, text-to-image generation, vision-language understanding, and single-modal language / vision tasks. DAVinci consistently outperforms FLAVA (Singh et al., 2021), an existing vision-language foundation model, on both language, vision, and multi-modal tasks, and performs competitively with state-of-the-art models across a wide range of tasks and modalities. Moreover, DAVinci also shows strong few-shot and zero-shot image/text generation capability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.585, + 0.827, + 0.669 + ], + "angle": 0, + "content": "In addition, most existing VLMs are pre-trained with mixed pre-training objectives and different data sources varying in size, making it difficult to disentangle the impact of pre-training objectives and data sources on the downstream tasks. To this end, we conduct a systematic analysis of the performance of generative vision-language pre-training by carefully ablating different pre-training objectives, such as prefix language / image modeling, and the amount of pre-training data with different qualities, revealing the impact of different objectives and data sources to facilitating future research." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.827, + 0.783 + ], + "angle": 0, + "content": "To summarize, our contribution is three-fold: (1) We introduce prefix multi-modal modeling, a simple unified generative vision-language pre-training framework that is scalable for large-scale pre-training and versatile for image-to-text generation, text-to-image generation and various multi-modal / single-modal understanding tasks. (2) We pre-train DAVINCI, a vision-language foundation model, with the proposed approach, demonstrating competitive performance on a wide range of 27 downstream tasks and the superiority of combining vision/language generative pre-training. (3) We conduct an analysis about the impact of different pre-training data sources and pre-training objectives on the performance of seq2seq VLMs." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.794, + 0.347, + 0.81 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.818, + 0.825, + 0.875 + ], + "angle": 0, + "content": "Inspired by the success of language model pre-training, several studies investigated vision-language pre-training on large-scale image-caption pairs. ViLBERT (Lu et al., 2019) and LXMERT (Tan & Bansal, 2019b) first propose to extract visual object features with an external object detection model like Fast-RCNN (Girshick, 2015), feed the image features together with texts into Transformer" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.926 + ], + "angle": 0, + "content": "\\(^{2}\\)Named after the Italian polymath Leonardo da Vinci, who displayed infinite grace in everything. We noticed that this name is used in GPT-3 versioning. However, we think there is no conflict because it is only a suffix for a specific checkpoint of the GPT-3 family." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.104, + 0.817, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.337, + 0.825, + 0.388 + ], + "angle": 0, + "content": "Figure 1: Illustration of the overall architecture and pre-training procedures of DAVinci, a Transformer-based sequence-to-sequence model. Given an image-text pair, DAVinci first splits either the word sequence or image token sequence into prefix and suffix. It then concatenates the prefix with the complete sequence in the other modality as input. DAVinci is trained to recover the suffix with maximum likelihood estimation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.46 + ], + "angle": 0, + "content": "models, and train the model to align vision and language representations with masked multi-modal modeling and multi-modal alignment prediction objectives. Many following works (Li et al., 2020; Zhang et al., 2021b; Chen et al., 2020; Li et al., 2022a; 2021; Zeng et al., 2021; Wang et al., 2021a) propose several new objectives to improve object detection based VLP and explored using vision Transformer (Dosovitskiy et al., 2021; Touvron et al., 2021) as visual feature extractor." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.462, + 0.825, + 0.518 + ], + "angle": 0, + "content": "More recently, FLAVA (Singh et al., 2021), a new vision-language foundation model, is pre-trained with a masked multi-modal modeling objective. Performing competitively on language, vision, and vision-language understanding tasks, FLAVA is designed for understanding tasks without text and image generation abilities." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.521, + 0.827, + 0.73 + ], + "angle": 0, + "content": "While achieving promising results on multi-modal understanding tasks, most VLMs are based on encoder-only architectures with bidirectional attention, making them non-trivial to adapt to multi-modal generation tasks such as image captioning and text-to-image generation. Inspired by the success of seq2seq pre-trained language models such as T5 (Raffel et al., 2020) and BART (Lewis et al., 2020), VL-T5 (Cho et al., 2021) and OFA (Wang et al., 2022) propose to formulate both vision-language pre-training objectives and various downstream vision-language tasks as seq2seq tasks and pre-train a seq2seq VLM by multi-tasking on these tasks. However, the scalability and the zero-shot transfer capability of this approach are limited by the availability of large-scale and diverse vision-language tasks. To this end, SimVLM (Wang et al., 2021b), the most related work to our approach, instead pre-trains a seq2seq VLM with a simple prefix language modeling objective on text generation. It easily scales to very large and potentially noisy pre-training data and achieves competitive results. However, SimVLM only exploits language self-supervision, and thus it does not perform well on image understanding tasks and is unable to tackle image generation tasks. Another recent study is CM3 (Aghajanyan et al., 2022), which proposes a causal masked multi-modal model learned from large web data and differs from our work in pre-training objectives and target tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.732, + 0.827, + 0.802 + ], + "angle": 0, + "content": "As for the text-to-image generation task, Ramesh et al. (2021); Ding et al. (2021); Yu et al. (2022) achieved promising performance by learning an auto-regressive target with Transformer and VQ-VAE / VQ-GAN tokenizer. Most recently, Ramesh et al. (2022); Saharia et al. (2022) advanced the image generation capability by using diffusion models and high-quality text embeddings (e.g., CLIP, T5). Therefore, it is natural to explore boosting image generation via stronger multi-modal understanding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.804, + 0.825, + 0.86 + ], + "angle": 0, + "content": "Previous studies are good at either image-to-text or text-to-image generation, but few studies investigate whether these two important capabilities can be learned together and boost each other. In this paper, we explore making a versatile and powerful multi-modal foundation model that is good at text-to-image generation, image-to-text generation, and multi-modal understanding tasks." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.873, + 0.283, + 0.887 + ], + "angle": 0, + "content": "3 DAVINCI" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Given the superior performance of auto-regressive language models (LM) (Brown et al., 2020; Chowdhery et al., 2022; Rae et al., 2021) on zero-shot and few-shot transfer abilities, we decided to" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.244 + ], + "angle": 0, + "content": "adopt a decoder optimized by language modeling loss to retain the generalization capabilities, and an encoder to represent the prefix input. Unlike using a causal mask in the decoder, the encoder employs fully-visible attention for the prefix input. This architecture resembles prefix language modeling, which shows effectiveness in a wide range of language tasks (Dong et al., 2019; Raffel et al., 2020) and enables zero-shot generalization abilities. Contrary to the previous multi-stage approaches (Wang et al., 2021a; Singh et al., 2021), our model is trained from scratch in an end-to-end manner thanks to the model's simplicity. In this section, we introduce the proposed prefix multi-modal modeling framework and the DAVinci model. The overall architecture of DAVinci is depicted in Figure 1. We first explain our model architecture in detail in §3.1 and then introduce pre-training objectives and procedures in §3.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.256, + 0.386, + 0.27 + ], + "angle": 0, + "content": "3.1 MODEL ARCHITECTURE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.277, + 0.827, + 0.361 + ], + "angle": 0, + "content": "Textual Feature Embedding Given an input sentence \\( S \\), we first use WordPiece (Wu et al., 2016) tockenize it to a sequence of tokens \\( W = \\{w_{1},w_{2},\\dots,w_{n}\\} \\). To obtain text features \\( T \\), for each token \\( w_{i} \\), a token embedding \\( e_i \\) and position embedding \\( p_i \\) are computed by two separate embedding matrices. Finally, the textual feature embedding \\( T = \\{t_1,t_2,\\dots,t_i,\\dots,t_n\\} \\) is calculated by \\( t_i = LayerNorm(e_i + p_i) \\), where \\( i \\) indicates the \\( i \\)-th position, and LayerNorm (Ba et al., 2016) is a layer normalization function." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.363, + 0.827, + 0.462 + ], + "angle": 0, + "content": "Visual Feature Embedding Given an input image \\(I\\), we first use a CNN backbone to extract and learn the image features. Following (Dai et al., 2021; Wang et al., 2021b), we use the first three blocks of ResNet (He et al., 2016) to obtain the feature maps. The feature maps are then flattened to \\(F = \\{f_1, f_2, \\dots, f_m\\}\\) along the spatial dimension, where \\(m\\) denotes the number of features. To keep the position information of visual features, we inject absolute learned positional embeddings \\(p\\) and the final visual embeddings \\(V = \\{v_1, v_2, \\dots, v_i, \\dots, v_m\\}\\) are calculated by \\(v_i = f_i + p_i\\), where \\(i\\) indicates the \\(i\\)-th position." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.463, + 0.829, + 0.548 + ], + "angle": 0, + "content": "Cross-Modal Transformer To fuse the textual and visual feature embeddings into a common space, we adopt a simple canonical Transformer architecture as the fusion module. The input is the combination of visual embedding \\( V \\) and textual embedding \\( T \\), namely \\( X = \\{x_{1}, x_{2}, \\dots, x_{l}\\} = [V, T] = \\{v_{1}, v_{2}, \\dots, v_{m}, t_{1}, t_{2}, \\dots, t_{n}\\} \\). The input embedding vectors \\( X \\) are then fed into a cross-modal Transformer encoder to obtain hidden state vectors \\( H = \\{h_{1}, h_{2}, \\dots, h_{l}\\} \\). Finally, a Transformer decoder is applied to generate visual or textual tokens with \\( H \\) and decoder input as illustrated in Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.549, + 0.828, + 0.69 + ], + "angle": 0, + "content": "Image Tokenizer and Decoder Because Transformer is modeling on discrete tokens, to unify the text tokens and image tokens, we discretize an image into tokens by an image tokenizer and reconstruct the raw image by an image decoder. The image tokenizer and decoder are implemented with a discrete variational autoencoder (dVAE) (Ramesh et al., 2021). After training of the image tokenizer, it could serialize an image \\(I\\) into a sequence of discrete visual tokens \\(Z = \\{z_{1}, z_{2}, \\dots, z_{m}\\}\\) according to a learned vocabulary. Visual tokens \\(Z\\) serve as the ground-truth labels for the prefix image modeling objective. In our work, we directly use an off-the-shelf image tokenizer and decoder from VQGAN (Esser et al., 2021), with a vocabulary size of 1024 and a compression rate of 16, which means a \\(256 \\times 256\\) image will be tokenized into \\(16 \\times 16\\) grid of tokens and then flattened to a sequence of 256 tokens." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.701, + 0.412, + 0.715 + ], + "angle": 0, + "content": "3.2 PRE-TRAINING OBJECTIVES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.722, + 0.827, + 0.793 + ], + "angle": 0, + "content": "Our major motivation is to conduct language modeling with image information and image modeling with text information simultaneously, which only requires image and text pairs that are easy to collect, making our approach easy to scale. The interaction would force the vision-language model to have a deeper understanding of both text and image. Learning from this interaction connects the visual representation with textual representation, enabling zero-shot transfer." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.826, + 0.867 + ], + "angle": 0, + "content": "Prefix Language Modeling (PLM) The core idea of prefix language modeling is \"given a full image \\( X_{image} \\) and a prefix caption \\( \\tilde{X}_{text} \\), recover the masked textual tokens (i.e., suffix caption \\( Y_{text} \\))\". Given an input caption, we first randomly mask some continuous words at the end (we call it suffix caption hereafter) and recover the masked textual tokens with full image by optimizing the cross-entropy loss," + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.871, + 0.826, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {P L M}} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {t e x t}} \\mid \\mathbf {X} _ {\\text {i m a g e}}, \\tilde {\\mathbf {X}} _ {\\text {t e x t}}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.91, + 0.642, + 0.926 + ], + "angle": 0, + "content": "where I and S are images and captions from the pre-training corpus \\(D\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.176 + ], + "angle": 0, + "content": "Because of the lack of textual information, recovering the suffix caption requires the model to understand both the image and prefix caption. The full image is rich in semantic information that would help language modeling. The prefix length is randomly decided during training, and especially when prefix caption is none, this task will degenerate into \"image captioning\" task, which forces the model to generate a caption with the input image." + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.182, + 0.828, + 0.215 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {P L M}} ^ {\\prime} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {t e x t}} \\mid \\mathbf {X} _ {\\text {i m a g e}}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.223, + 0.825, + 0.295 + ], + "angle": 0, + "content": "Prefix Image Modeling (PIM) The core idea of prefix image modeling is \"given a full caption and a corrupted image (we call it prefix image hereafter), recover the masked visual tokens\". Given an input image, we first randomly mask some continuous image patches at the end (we call it suffix image hereafter). The prefix image and full caption will be fed into the model and try to recover the original visual tokens obtained from the image tokenizer by optimizing the cross-entropy loss." + }, + { + "type": "equation", + "bbox": [ + 0.335, + 0.301, + 0.826, + 0.334 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {P I M}} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {i m a g e}} \\mid \\mathbf {X} _ {\\text {t e x t}}, \\tilde {\\mathbf {X}} _ {\\text {i m a g e}}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.342, + 0.827, + 0.372 + ], + "angle": 0, + "content": "Similar to PLM, when prefix image is none, this task will degenerate into \"text-to-image generation\" task, forcing the model to generate an image with the input caption:" + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.378, + 0.826, + 0.412 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {P I M}} ^ {\\prime} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {i m a g e}} \\mid \\mathbf {X} _ {\\text {t e x t}}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.42, + 0.827, + 0.435 + ], + "angle": 0, + "content": "Unified Learning Objective Our model is learned by optimizing the combination of PLM and PIM." + }, + { + "type": "equation", + "bbox": [ + 0.43, + 0.442, + 0.826, + 0.457 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\mathrm {P L M}} + \\mathcal {L} _ {\\mathrm {P I M}} \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.481, + 0.329, + 0.496 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.507, + 0.398, + 0.52 + ], + "angle": 0, + "content": "4.1 PRE-TRAINING DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.528, + 0.825, + 0.599 + ], + "angle": 0, + "content": "Since existing studies pre-trained their models on different corpora, making the fair comparison difficult. Considering results only on state-of-the-art performance would underestimate the potential of this line of research. Therefore, we propose several practical settings including small-scale and large-scale, and then conduct detailed comparisons on them in Section 5.1. More details about the datasets are shown in Appendix A.3." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.61, + 0.822, + 0.699 + ], + "angle": 0, + "content": "
Data TypeDatasetImage Domain#Total
In-Domain Data (ID)COCO, Visual GenomeCOCO1.3M
Small-scale Web Data (SWD)SBU, CC-3M, CC-12MWeb14.9M
Object-Region Data (ORD)VG regions, VG objects, COCO objects, Refcoco, Open Image, Obj365COCO, Flickr17.0M
Vision Data (VD)ImageNet-21KImageNet13.2M
Large-scale Web Data (LWD)LAION-400M, DAVinci-200MWeb601.3M
Text Data (TD)C4Web800GB
" + }, + { + "type": "table_caption", + "bbox": [ + 0.197, + 0.703, + 0.799, + 0.718 + ], + "angle": 0, + "content": "Table 1: Statistics of the pre-training datasets. #Total denotes the total number of image-text pairs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.73, + 0.371, + 0.743 + ], + "angle": 0, + "content": "4.2 DOWNSSTREAM TASKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.827, + 0.837 + ], + "angle": 0, + "content": "We test our models' ability and versatility on five dimensions: language understanding on 8 GLUE tasks (Wang et al., 2019), vision understanding on ImageNet fine-tuning and 12 popular vision datasets for linear evaluation, multi-modal understanding on VQAv2 (Goyal et al., 2017b), SNLI-VE (Xie et al., 2019) and NLVR2 (Suhr et al., 2019), text-to-image generation on COCO (Chen et al., 2015), and image-to-text generation on COCO, NoCaps (Agrawal et al., 2019), and VLUE (Zhou et al., 2022b). Details of downstream tasks and fine-tuning process are described in Appendix A.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.847, + 0.409, + 0.861 + ], + "angle": 0, + "content": "4.3 IMPLEMENTATION DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Our model is a base-size Transformer implemented with a 6-layer encoder and a 6-layer decoder, 768 dimensions for hidden states, 512 for maximum input length, and 3072 for intermediate size. We train our model from scratch without initializing the Transformer encoder and decoder. However, the image encoder is initialized from ResNet-101 (He et al., 2016) with ImageNet weights since we find" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.101, + 0.825, + 0.553 + ], + "angle": 0, + "content": "
BERTRoBERTaViTMLM 1MIM 2FLAVA 3CLIP 4SimVLM 5DAVINCI 6SimVLM 7DAVINCI 8
TaskEval.16GB160GB13.2M70M70M70M70M46.4M46.4M647.7M647.7M
MNLIFT84.2087.60-73.23-80.3332.8582.1382.2583.2783.13
CoLAFT54.6063.60-39.55-50.6511.0252.4752.1054.2254.75
MRPCFT84.7590.20-73.24-84.1668.7482.7083.1484.2684.54
QQPFT89.0091.90-86.68-88.7459.1788.3988.1589.0588.92
SST-2FT92.5094.80-87.96-90.9483.4990.6590.4891.1291.37
QNLIFT91.0092.80-82.32-87.3149.4687.5587.2188.2887.90
RTEFT62.5078.70-50.54-57.7653.0759.8060.7263.3464.22
STS-BFT88.2091.20-78.89-85.6713.7086.6286.2787.2487.05
NLP Avg.80.8486.35-71.55-78.1946.4478.7978.7980.1080.23
ImageNetLE--80.90-41.7975.5472.9574.3175.8776.0477.65
Food101LE--86.70-53.3088.5185.4983.4189.3385.5290.12
CIFAR10LE--96.90-76.2092.8791.2591.5693.0192.4193.96
CIFAR100LE--86.40-55.5777.6874.4072.5178.9875.2380.11
CarsLE--54.70-14.7170.8762.8461.4472.6968.8374.57
AircraftLE--46.00-13.8347.3140.0241.2847.4247.7549.55
DTDLE--74.30-55.5377.2973.4072.5577.1276.5978.33
PetsLE--92.70-34.4884.8279.6178.7785.5286.1388.21
Flowers102LE--99.20-67.2396.3794.9493.2496.1295.4196.88
MNISTLE--97.40-96.4098.4297.3896.6698.6798.4599.01
STL10LE--99.50-80.1298.8997.2997.5199.0398.0299.21
Country211LE--17.50-8.8728.9225.1226.4528.9927.8129.94
Vision Avg.--77.68-49.8478.1274.5674.1478.5677.3479.80
VQAv2FT-----72.4959.8172.1273.8975.0376.44
SNLI-VEFT-----78.8973.5378.7479.1179.6380.01
NLVR2FT-------77.4577.9179.7280.25
I2T@B4FT-------38.0038.5038.1039.20
I2T@CFT-------126.96128.66128.91130.44
T2I@IS ↑FT--------17.55-22.41
T2I@FID ↓FT--------23.58-19.82
VQAv2FS-------54.6954.8551.8854.90
SNLI-VEFS-------67.4567.5767.9668.04
NLVR2FS-------51.4651.1951.4951.52
I2T@B4FS-------35.9036.4032.7037.00
I2T@CFS-------117.75120.43112.20122.56
I2T@B4ZS-------11.4010.8013.8018.70
I2T@CZS-------45.3045.5556.6968.44
VUE@B4ZS-------9.209.4010.4010.60
VUE@CZS-------33.9234.8039.7540.83
NoCaps@CZS-------48.0545.5148.6458.58
T2I@IS ↑ZS--------14.91-17.44
T2I@FID ↓ZS--------29.83-24.21
Multi-modal Avg.-------57.8958.3059.1362.50
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.17, + 0.563, + 0.825, + 0.665 + ], + "angle": 0, + "content": "Table 2: Experimental results on vision, language and multi-modal downstream tasks. @B4, @C denote BLEU@4, CIDEr, respectively. I2T and T2I denote image-to-text and text-to-image tasks. Multi-modal Avg. is the average score of all multi-modal tasks. FT: fine-tuning, LE: linear evaluation, FS: few-shot, ZS: zero-shot. Under few-shot setting, we fine-tune a pre-trained model for 3 epochs on \\(1\\%\\) training data. Results for BERT are obtained from Iki & Aizawa (2021). Results for RoBERTa are from its corresponding paper (Liu et al., 2019) and they use the mid-training (Phang et al., 2018) on MNLI for RTE, MRPC and STS-B while other models (e.g., BERT, SimVLM, DAVinci) do not apply this trick. Results for ViT are from ViT-Base/16 model (Radford et al., 2021). We list the reported performance of text-only and image-only models in grey for reference." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.669, + 0.825, + 0.74 + ], + "angle": 0, + "content": "a warm start provides a reliable visual representation and helps the convergence. All pre-training experiments are conducted on 32GB NVIDIA V100 GPUs. The model trained on the largest data takes around 10 days on 1024 V100 GPUs. We adopt dynamic masking in our experiments, where the masking ratio is randomly sampled from a uniform distribution \\( \\mathrm{U}(0,1) \\). More details of the fine-tuning, network architectures, and hyper-parameters setups are given in Appendix A.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.752, + 0.393, + 0.766 + ], + "angle": 0, + "content": "4.4 EXPERIMENTAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.774, + 0.825, + 0.83 + ], + "angle": 0, + "content": "We extensively compare the performance of DAVINCI with state-of-the-art unified foundation models and vision-language models across vision, language, and multi-modal tasks, accessing five different abilities: (1) text understanding, (2) image understanding, (3) text-to-image generation, (4) image-to-text generation, (5) multi-modal understanding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.876 + ], + "angle": 0, + "content": "Overall Performance We report the overall performance on 8 language tasks from GLUE, 12 vision tasks, 3 multi-modal tasks, 3 image-to-text tasks and 1 text-to-image task. We compare our model with FLAVA and SimVLM \\(^3\\), two of the most recent and best performing vision-language" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.925 + ], + "angle": 0, + "content": "3Since SimVLM is not open-sourced and uses 1.8B in-house data without telling the exact size of its base model, we replicate it on our data with the same size as DAVINCI. Experiments on SimVLMsmall ensure our successful reproduction (see Appendix A.4)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.479, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.089, + 0.827, + 0.37 + ], + "angle": 0, + "content": "
Model#Params.Text MNLI AccVision ImageNet LE / FTImage2Text COCO B@4 / CText2Image COCO IS↑ / FID↓Multi-modal
VQA test-dev / test-standardNLVR2 dev / test-P
Encoder-only Multi-modal Models
VinVL (Zhang et al., 2021b)157M--38.2 / 129.3-75.95 / 76.1282.05 / 83.08
ViLT (Kim et al., 2021)88M----70.85 / -74.91 / 75.57
ALBEF (Li et al., 2021)210M----75.84 / 76.0482.55 / 83.14
X-VLM (Zeng et al., 2021)240M--39.6 / 132.6-78.22 / 78.3784.41 / 84.76
VLMO (Wang et al., 2021a)----76.64 / 76.8982.77 / 83.34
Encoder-Decoder Multi-modal Models
UNICORN (Yang et al., 2021)--35.8 / 119.1-69.20 / 69.40-/-
Uni-ENDN (Li et al., 2022b)110M----72.20 / 72.50-/-
Pixel-BERT (Huang et al., 2020)144M----74.45 / 74.5576.50 / 77.20
E2E-VLP (Xu et al., 2021a)94M--36.2 / 117.3-73.25 / 73.6777.25 / 77.96
VL-T5 (Cho et al., 2021)220M--34.5 / 116.5-- / 70.3074.60 / 73.60
VL-BART (Cho et al., 2021)220M--35.1 / 116.6-- / 71.3071.70 / 70.30
Text2Image Models
DM-GAN (Zhu et al., 2019)---32.20 / 26.50-/--/-
DALLE (Ramesh et al., 2021) (250M)12B---17.90 / 27.50-/--/-
DALLE (Ramesh et al., 2021) (640M)†82M---15.79 / 29.22-/--/-
CogView (Ding et al., 2021)4B---18.20 / 27.10-/--/-
Unified Models
Unifying (Huang et al., 2021)228M--37.3 / 122.6- / 29.90-/--/-
FLAVA (Singh et al., 2021)240M80.3375.54 / ---72.80 / 72.49-/-
SimVLM (Wang et al., 2021b) (640M)†153M83.2776.04 / -38.5 / 128.7-75.04 / 75.0378.82 / 79.72
SimVLM (Wang et al., 2021b) (1.8B)83.4080.60 / -39.0 / 134.8-77.87 / 78.1481.72 / 81.77
OFA (Wang et al., 2022)182M84.30- / 82.2041.0 / 138.221.50* / 20.80*78.00 / 78.10-/-
Florence (Yuan et al., 2021)637M-- / 90.05-/--/-80.16 / 80.36-/-
DAVINCI154M83.1378.81 / 83.9239.2 / 130.417.44 (22.41*) / 24.21 (19.82*)76.32 / 76.4480.03 / 80.25
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.374, + 0.825, + 0.464 + ], + "angle": 0, + "content": "Table 3: Comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. All results are from base-size models. LE and FT denote linear evaluation and fine-tuning performance, respectively. Image2Text results are reported without CIDEr optimization. \\(\\dagger\\) are our reproduced models. \\* are the results after fine-tuning. SimVLM (1.8B) and OFA are pre-trained with much larger corpus or human-labeled data of many downstream tasks, and thus they are not comparable and are labeled in gray. Florence (Yuan et al., 2021) is pre-trained with much larger model size (Florence-CoSwin-H, 637M) and more pre-training data (900M), so the numbers are in grey. bold denotes the best across unified models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.475, + 0.825, + 0.643 + ], + "angle": 0, + "content": "foundation models. We also include comparisons with some baseline models (e.g., MIM, MLM, CLIP). There are several observations. First, DAVINCI (column 8) outperforms FLAVA (column 3) and SimVLM (column 7) across almost all tasks, providing a new and stronger unified foundation model. Compared with FLAVA, DAVINCI improves an average of \\(2.04\\%\\), \\(1.68\\%\\) on language and vision tasks, respectively. Compared with SimVLM, DAVINCI achieves comparable results on language tasks \\((+0.13\\%)\\) while performing much better on vision tasks \\((+2.46\\%)\\). To make a fair comparison in terms of similar data size, we compare FLAVA (70M data, column 3) with DAVINCI (46.4M data, column 6). It is observed that DAVINCI still outperforms FLAVA even with much less data. Considering the multi-modal tasks, DAVINCI consistently outperforms FLAVA and SimVLM on VQA and VE. Note that FLAVA is incapable of generation and SimVLM cannot generate images; only DAVINCI is competent to all tasks and demonstrates a stronger capability of unifying vision and language tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.651, + 0.827, + 0.819 + ], + "angle": 0, + "content": "Zero-shot and Few-shot Transfer One of the critical benefits of generative pre-trained vision-language models is the good generalization ability on zero-shot and few-shot tasks. For zero-shot transfer, two out-of-domain distribution datasets are considered (NoCaps and VLINE), with results shown in Table 2. First, DAVinci outperforms SimVLM on both zero-shot and few-shot settings, demonstrating its better transfer capabilities. It also shows the effectiveness and robustness of the synergy of our proposed language supervision and image supervision. Second, it is observed that the performance improvement is bigger on 647.7M data (column 7 v.s. column 8) than 46.4M data (column 5 v.s. column 6). This shows DAVinci generalizes well with the increase of large-scale data. We even observe some performance drops on small data (46.4M) but excellent performance improvements on large data (647.7M). It is consistent with the recent observation that zero-shot ability could only be triggered with large pre-training data (Wei et al., 2022) and scaling to large data and keeping simple training objectives benefit generalization performance (Wang et al., 2021b)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Comparison with state-of-the-art vision-language models In addition to unified vision-language foundation models, we compare DAVinci with state-of-the-art vision-language models as well. The results are shown in Table 2. DAVinci demonstrates its superiority in vision understanding and text-to-image generation. Compared with current popular auto-regressive image generation models like DALLE and CogView, our model achieves comparable IS and better FID scores with significantly fewer model parameters than DALLE and CogView. Note that the original DALLE is implemented based on VQVAE, so here, we compare our model with reproduced VQGAN-based DALLE with" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.176, + 0.1, + 0.825, + 0.299 + ], + "angle": 0, + "content": "
SettingsPre-training Data#Image#CaptionModelsCOCO CaptionsVQASNLI-VENLVR2
IDSWDORDVDLWDB@4 / CAccAccAcc
10.2M1.3MSimVLM35.2 / 115.0668.8976.1071.21
DAVINCI35.8 / 117.3069.2576.2272.55
215.1M16.2MSimVLM37.0 / 122.6371.5478.3675.50
DAVINCI37.4 / 123.1171.8878.6277.46
32.7M18.3MSimVLM38.2 / 123.8569.5776.6570.50
DAVINCI38.0 / 124.2070.0276.9272.01
413.4M14.5MSimVLM36.2 / 119.7370.5376.9073.25
DAVINCI36.6 / 121.2771.2377.4074.62
530.5M46.4MSimVLM38.5 / 128.1271.8478.8176.75
DAVINCI38.6 / 128.7373.5379.2477.55
6601.3M601.3MSimVLM37.3 / 123.8173.7378.7977.69
DAVINCI37.6 / 124.4273.9579.2978.54
7601.5M602.6MSimVLM37.9 / 125.5074.6479.0577.68
DAVINCI38.1 / 125.9174.9179.2278.12
8631.8M647.7MSimVLM38.5 / 128.2575.0479.3278.82
DAVINCI39.1 / 130.2176.3280.0480.03
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.304, + 0.825, + 0.331 + ], + "angle": 0, + "content": "Table 4: Evaluation on downstream tasks using COCO Captions, VQA, SNLI-VE, and NLVR2. #Image and #Caption denote the numbers of images and image-text pairs that are used in the pre-training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.343, + 0.825, + 0.372 + ], + "angle": 0, + "content": "similar model sizes, and find DAVinci still achieves a significant improvement over it. Generated images are presented in Appendix A.11 for further qualitative comparison." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.827, + 0.472 + ], + "angle": 0, + "content": "On multi-modal tasks such as VQA, DAVINCI not only outperforms unified models (e.g., SimVLM (640M)) and other encoder-decoder multi-modal models (e.g., E2E-VLP, VL-T5), but also achieves competitive performance with many conventional encoder-only multi-model models (e.g., VinVL, ALBEF, VLMO). Note that SimVLM (1.8B) and OFA are not directly comparable because SimVLM uses 1.8B in-house image-text pairs, and OFA uses human-labeled data of many downstream tasks during pre-training. Even though, we still report their results for reference and observe a better performance on ImageNet fine-tuning and text-to-image generation than OFA." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.474, + 0.825, + 0.517 + ], + "angle": 0, + "content": "The advantages of image generation over DALLE / CogView, the superiority of image-to-text over SimVLM, and the competitive performance with conventional multi-modal models demonstrate the synergistic effect of our proposed PLM (language supervision) and PIM (image supervision)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.528, + 0.293, + 0.543 + ], + "angle": 0, + "content": "5 ANALYSIS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.553, + 0.476, + 0.567 + ], + "angle": 0, + "content": "5.1 IMPACT OF PRE-TRAINING DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.575, + 0.827, + 0.784 + ], + "angle": 0, + "content": "In this section, we disclose the impact of various multi-modal data sources for VLMs. We choose SimVLM and DAVinci as our baseline models for their competitive performance, the capability of training from scratch, and the scalability of extending to the noisy large-scale corpus. We use the same text corpus, \\( C4 \\), for all the variations. The results are shown in Table 4. In general, the performance is increased along with the data size, and DAVinci consistently outperforms SimVLM on almost all the data settings and all the downstream tasks. Both object-region data and vision data are clearly helpful in vision language pre-training (refer to settings 3 and 4). We surprisingly observe that models pre-trained on object-region data with much fewer images performs even better than models pre-trained with small-scale web data on the COCO Caption task (refer to settings 2 and 3). Although large-scale web data is usually noisier than small datasets (e.g., ID, ORD, VD, and SWD), it is powerful for multi-modal pre-training (refer to settings 5 and 8). We believe our analysis has broader impacts on the research of VLMs in the community. First, this enables fair comparisons for pre-trained models in the same data settings. Second, one can focus on the model designs at part or all of the data settings according to available computation resources. Third, we reveal that object-region and vision data, normally overlooked in VLM pre-training, also play a significant role." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.789, + 0.343, + 0.803 + ], + "angle": 0, + "content": "5.2 ABLATION STUDY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.81, + 0.827, + 0.88 + ], + "angle": 0, + "content": "To verify the contributions of different modules in our framework, we ablate them and evaluate DAVINCI on five kinds of downstream tasks: language understanding (MNLI, SST-2), vision understanding (ImageNet, Food101, CIFAR10), multi-modal understanding (VQAv2, SNLI-VE, NLVR2), image-to-text generation (COCO Captions), and text-to-image generation. Experiments are conducted with the same model architecture on in-domain data (ID). The results are shown in Table 5." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Effects of Objectives First, all three objectives (PLM, PIM, and Text2Text) bring improvement and the combination confirms a synergistic effect. Second, it is observed that without PLM, the performance decreases significantly on multi-modal understanding and image-to-text generation," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.478, + 0.047 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.103, + 0.83, + 0.267 + ], + "angle": 0, + "content": "
MethodCOCOB@4 / CVQAAccSNLI-VEAccNLVR2AccImageNetAccFood101AccCIFAR10AccMNLIAccSST-2AccT2IIS / FID
No Pre-training32.1 / 96.7152.7354.2351.08-*-*-*66.3279.84-*
DAVINCI35.8 / 117.3069.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
-PLM33.6 / 111.1765.1573.9153.2848.0574.1772.9881.4289.9710.26 / 59.64
-PIM34.3 / 116.5868.8975.7969.7845.5471.1870.1181.9490.53-*
-Text2Text34.1 / 115.2168.1475.3870.3448.6774.2673.2376.4888.1412.07 / 54.77
PL=035.4 / 117.0066.9075.5271.0548.4568.1873.7378.6989.0011.76 / 55.38
PL=15%35.7 / 116.5369.1675.0970.4441.5852.1568.5579.0289.46-*
PL=50%35.1 / 115.5368.5574.5456.9237.6949.1670.1578.5989.69-*
MIM34.7 / 113.468.1875.3469.6648.4656.9572.7981.7289.849.50 / 74.13
In-painting34.5 / 112.567.4675.4168.6647.5054.3871.2081.5589.849.97 / 68.15
Token Projection17.7 / 49.252.1371.1152.0115.1125.6261.0182.0190.2511.89 / 60.96
Patch Projection25.7 / 79.557.6971.9257.4536.2344.3169.4081.7390.0511.41 / 61.87
" + }, + { + "type": "table_caption", + "bbox": [ + 0.174, + 0.271, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Table 5: Ablation study on COCO Captions, VQA, SNLI-VE, NLVR2, ImageNet, Food101, CIFAR10, MNLI, SST-2, and text-to-image (T2I) generation. “-” denotes removing the corresponding objective. PL denotes the prefix length under fixed masking ratio settings. Because the linear probe requires a pre-trained model to be frozen, “No Pre-training” results on ImageNet, Food101, and CIFAR10 are not reported and labeled by * . For T2I, we report the zero-shot results. Note that the following four variants cannot perform zero-shot text-to-image generation (labeled by *): (1) No Pre-training, (2) DAVinci - PIM, (3) PL=15%, and (4) PL=50%." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.364, + 0.825, + 0.448 + ], + "angle": 0, + "content": "indicating the importance of language supervision. Third, PIM brings more gains than PLM and text2text on vision understanding, which is expected because it enhances the vision encoding ability with image supervision. In addition, the text2text objective is important to text understanding. Last, on the text-to-image generation task, it is observed that PLM is also helpful, confirming the synergistic effect of PIM and PLM again. Intuitively, PIM and PLM can help each other learn the alignments of visual and textual features, which will benefit both image generation and other multi-modal tasks." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.454, + 0.825, + 0.552 + ], + "angle": 0, + "content": "Effects of Masking Ratios Our model adopts dynamic masking ratios as described in Section 3.2. We also conduct experiments with static masking ratios with the prefix length fixed to 0, \\(15\\%\\), and \\(50\\%\\). The comparison between dynamic masking ratios and static masking ratios \\((\\mathrm{PL} = 0, 15\\%,\\) and \\(50\\%)\\) reveals that dynamic masking is better. We attribute this improvement to the smoothing effects of dynamic masking ratios. We also find that the standard language model \\((\\mathrm{PL} = 0)\\) performs worse on VQA, Food101, and text-to-image generation, which is consistent with the observation in SimVLM. In our experiments, the masking ratio is sampled from a uniform distribution \\(\\mathrm{U}(0,1)\\)." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.558, + 0.825, + 0.642 + ], + "angle": 0, + "content": "Effects of Masking Strategies Here we also compared three different masking strategies: 1) masked image modeling (randomly masking some patches), 2) in-painting (randomly masking some continuous spans in the middle of the image), and 3) suffix-painting (ours). The results are shown in Table 5. Both masked image modeling and in-painting are effective and competitive. It is observed that suffix-painting is better than masked image modeling and in-painting across all tasks, demonstrating that suffix-painting works well." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.648, + 0.825, + 0.746 + ], + "angle": 0, + "content": "Effects of Image Feature Extraction There are several different ways to extract image features. We compare three different image representation methods: 1) token projection (projecting the prefix tokens to the hidden dimension of the backbone network on the token-level), 2) patch projection (similar to ViT embedding, we split an image into fixed-size patches, embed each of them by a trainable linear projection on the pixel-level), and 3) ResNet feature extraction (ours). From the results in Table 5, we observed that ResNet feature extraction outperforms token projection and patch projection by a large margin. Therefore, we decided to adopt ResNet to extract image features." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.748, + 0.825, + 0.777 + ], + "angle": 0, + "content": "We provide more details and discussions about the effects of compute (A.5), masking strategies (A.6), image feature extraction methods (A.7), and scaling effects of data size (A.8) in the Appendix." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.789, + 0.47, + 0.804 + ], + "angle": 0, + "content": "6 CONCLUSION AND DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.813, + 0.825, + 0.924 + ], + "angle": 0, + "content": "In this work, we first benchmark several settings on sequence-to-sequence vision-language pretraining in terms of pre-training dataset size, aligning SimVLM and our model on them. We propose a simple and unified generative pre-training model, DAVinci, to simultaneously leverage the language supervision and image supervision through two objectives under a unified framework: prefix language modeling and prefix image modeling. DAVinci is simple yet effective, demonstrating strong capabilities in both multi-modal writing and painting tasks. Experimental results explicitly imply that combining suffix caption generation and suffix image generation offers large gains on all benchmark settings. We also discussed limitations and future work in Appendix A.10." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.95, + 0.504, + 0.959 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.357, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.13, + 0.827, + 0.201 + ], + "angle": 0, + "content": "We thank the anonymous reviewers for their valuable suggestions. We would like to acknowledge Yan Zeng, Wenguan Huang, and Zhi Zhang at ByteDance, and Zhiling Zhang at Shanghai Jiao Tong University for their generous assistance in data collection and helpful discussions. We also wish to thank Hang Li at ByteDance, and Tong Zhang at HKUST for inspiring feedback, valuable comments, and great support to this work." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.218, + 0.287, + 0.234 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.245, + 0.826, + 0.289 + ], + "angle": 0, + "content": "Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.299, + 0.827, + 0.342 + ], + "angle": 0, + "content": "Eneko Agirre, Lluis Márquez, and Richard Vicentowski (eds.). Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007), Prague, Czech Republic, 2007. Association for Computational Linguistics. URL https://aclanthology.org/S07-1000." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.353, + 0.827, + 0.424 + ], + "angle": 0, + "content": "Harsh Agrawal, Peter Anderson, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, and Stefan Lee. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision, ICCV 2019, Seoul, Korea (South), October 27 - November 2, 2019, pp. 8947-8956. IEEE, 2019. doi: 10.1109/ICCV.2019.00904. URL https://doi.org/10.1109/ICCV.2019.00904." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.435, + 0.827, + 0.464 + ], + "angle": 0, + "content": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. ArXiv preprint, abs/1607.06450, 2016. URL https://arxiv.org/abs/1607.06450." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.475, + 0.827, + 0.503 + ], + "angle": 0, + "content": "Hangbo Bao, Li Dong, and Furu Wei. BEiT: Bert pre-training of image transformers. arXiv preprint, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.515, + 0.825, + 0.544 + ], + "angle": 0, + "content": "Luisa Bentivogli, Peter Clark, Ido Dagan, and Danilo Giampiccolo. The fifth pascal recognizing textual entailment challenge. In TAC, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.555, + 0.827, + 0.597 + ], + "angle": 0, + "content": "Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, pp. 446-461. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.609, + 0.827, + 0.763 + ], + "angle": 0, + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/1457c0d6bcbd4967418bf8ac142f64a-AAbstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.774, + 0.825, + 0.817 + ], + "angle": 0, + "content": "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9650-9660, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.828, + 0.825, + 0.871 + ], + "angle": 0, + "content": "Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. arXiv preprint arXiv:2209.06794, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dollár, and C Lawrence Zitnick. Microsoft COCO Captions: Data collection and evaluation server. arXiv preprint, 2015." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.245, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. UNITER: Universal image-text representation learning. In European Conference on Computer Vision (ECCV), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.827, + 0.227 + ], + "angle": 0, + "content": "Jaemin Cho, Jie Lei, Hao Tan, and Mohit Bansal. Unifying vision-and-language tasks via text generation. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 1931-1942. PMLR, 2021. URL http://proceedings.mlr.press/v139/cho21a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.234, + 0.827, + 0.291 + ], + "angle": 0, + "content": "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. ArXiv preprint, abs/2204.02311, 2022. URL https://arxiv.org/abs/2204.02311." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.299, + 0.827, + 0.357 + ], + "angle": 0, + "content": "Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2014, Columbus, OH, USA, June 23-28, 2014, pp. 3606-3613. IEEE Computer Society, 2014. doi: 10.1109/CVPR.2014.461. URL https://doi.org/10.1109/CVPR.2014.461." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.365, + 0.825, + 0.408 + ], + "angle": 0, + "content": "Adam Coates, Andrew Ng, and Honglak Lee. An analysis of single-layer networks in unsupervised feature learning. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 215-223. JMLR Workshop and Conference Proceedings, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.416, + 0.825, + 0.446 + ], + "angle": 0, + "content": "Ido Dagan, Oren Glickman, and Bernardo Magnini. The pascal recognising textual entailment challenge. In Machine Learning Challenges Workshop, pp. 177-190. Springer, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.453, + 0.827, + 0.496 + ], + "angle": 0, + "content": "Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 34:3965-3977, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.504, + 0.827, + 0.59 + ], + "angle": 0, + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.598, + 0.825, + 0.641 + ], + "angle": 0, + "content": "Shizhe Diao, Jiaxin Bai, Yan Song, Tong Zhang, and Yonggang Wang. Zen: Pre-training chinese text encoder enhanced by n-gram representations. In Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 4729-4740, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.649, + 0.827, + 0.719 + ], + "angle": 0, + "content": "Shizhe Diao, Ruijia Xu, Hongjin Su, Yilei Jiang, Yan Song, and Tong Zhang. Taming pre-trained language models with n-gram representations for low-resource domain adaptation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 3336-3349, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.728, + 0.827, + 0.771 + ], + "angle": 0, + "content": "Shizhe Diao, Zhichao Huang, Ruijia Xu, Xuechun Li, Yong Lin, and Tong Zhang. Black-box prompt learning for pre-trained language models. Transactions on Machine Learning Research, 2023. URL https://openreview.net/forum?id=IvsGP7xRvm." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.779, + 0.827, + 0.823 + ], + "angle": 0, + "content": "Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.831, + 0.827, + 0.874 + ], + "angle": 0, + "content": "William B. Dolan and Chris Brockett. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005), 2005. URL https://aclanthology.org/I05-5002." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. Unified language model pre-training for natural language understanding and generation. In NeurIPS, pp. 13042-13054, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.177 + ], + "angle": 0, + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=YicbFdNTTy." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.182, + 0.827, + 0.226 + ], + "angle": 0, + "content": "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873-12883, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.233, + 0.825, + 0.276 + ], + "angle": 0, + "content": "Zhiyi Fu, Wangchunshu Zhou, Jingjing Xu, Hao Zhou, and Lei Li. Contextual representation learning beyond masked language modeling. In ACL (1), pp. 2701-2714. Association for Computational Linguistics, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.284, + 0.827, + 0.341 + ], + "angle": 0, + "content": "Danilo Giampiccolo, Bernardo Magnini, Ido Dagan, and Bill Dolan. The third PASCAL recognizing textual entailment challenge. In Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 1-9, Prague, 2007. Association for Computational Linguistics. URL https://aclanthology.org/W07-1401." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.349, + 0.827, + 0.393 + ], + "angle": 0, + "content": "Ross B. Girshick. Fast R-CNN. In 2015 IEEE International Conference on Computer Vision, ICCV 2015, Santiago, Chile, December 7-13, 2015, pp. 1440-1448. IEEE Computer Society, 2015. doi: 10.1109/ICCV.2015.169. URL https://doi.org/10.1109/ICCV.2015.169." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.4, + 0.828, + 0.456 + ], + "angle": 0, + "content": "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: Training imagenet in 1 hour. ArXiv preprint, abs/1706.02677, 2017a. URL https://arxiv.org/abs/1706.02677." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.465, + 0.828, + 0.536 + ], + "angle": 0, + "content": "Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the V in VQA matter: Elevating the role of image understanding in visual question answering. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 6325-6334. IEEE Computer Society, 2017b. doi: 10.1109/CVPR.2017.670. URL https://doi.org/10.1109/CVPR.2017.670." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.544, + 0.825, + 0.587 + ], + "angle": 0, + "content": "R Bar Haim, Ido Dagan, Bill Dolan, Lisa Ferro, Danilo Giampiccolo, Bernardo Magnini, and Idan Szpektor. The second pascal recognising textual entailment challenge. In Proceedings of the Second PASCAL Challenges Workshop on Recognising Textual Entailment, volume 7, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.595, + 0.827, + 0.651 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 770-778. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.90. URL https://doi.org/10.1109/CVPR.2016.90." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.66, + 0.828, + 0.73 + ], + "angle": 0, + "content": "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross B. Girshick. Momentum contrast for unsupervised visual representation learning. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 9726-9735. IEEE, 2020. doi: 10.1109/CVPR42600.2020.00975. URL https://doi.org/10.1109/CVPR42600.2020.00975." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.738, + 0.825, + 0.782 + ], + "angle": 0, + "content": "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.789, + 0.828, + 0.887 + ], + "angle": 0, + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6626-6637, 2017. URL https://proceedings.neurips.cc/paper/2017/ hash/8ald694707eb0fefe65871369074926d-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.896, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Kashmir Hill and Jeremy White. Designed to deceive: Do these people look real to you. The New York Times, 11, 2020." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Yupan Huang, Hongwei Xue, Bei Liu, and Yutong Lu. Unifying multimodal transformer for bidirectional image and text generation. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 1138-1147, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Zhicheng Huang, Zhaoyang Zeng, Bei Liu, Dongmei Fu, and Jianlong Fu. Pixel-BERT: Aligning image pixels with text by deep multi-modal transformers. arXiv preprint, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.197, + 0.826, + 0.241 + ], + "angle": 0, + "content": "Taichi Iki and Akiko Aizawa. Effect of visual extensions on natural language understanding in vision-and-language models. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2189-2196, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.25, + 0.826, + 0.281 + ], + "angle": 0, + "content": "Shankar Iyer, Nikhil Dandekar, Kornél Csernai, et al. First quora dataset release: Question pairs. data.quora.com, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.29, + 0.826, + 0.375 + ], + "angle": 0, + "content": "Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 4904-4916. PMLR, 2021. URL http://proceedings.mlr.press/v139/jia21b.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.826, + 0.457 + ], + "angle": 0, + "content": "Wonjae Kim, Bokyung Son, and Ildoo Kim. Vilt: Vision-and-language transformer without convolution or region supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 5583-5594. PMLR, 2021. URL http://proceedings.mlr.press/v139/kim21k.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.467, + 0.826, + 0.511 + ], + "angle": 0, + "content": "Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pp. 554-561, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.52, + 0.826, + 0.537 + ], + "angle": 0, + "content": "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.546, + 0.673, + 0.562 + ], + "angle": 0, + "content": "Yann LeCun and Corinna Cortes. MNIST handwritten digit database. 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.572, + 0.826, + 0.657 + ], + "angle": 0, + "content": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871-7880, Online, 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.703. URL https://aclanthology.org/2020.acl-main.703." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.667, + 0.826, + 0.711 + ], + "angle": 0, + "content": "Junnan Li, Ramprasaath R Selvaraju, Akhilesh Deepak Gotmare, Shafiq Joty, Caiming Xiong, and Steven Hoi. Align before fuse: Vision and language representation learning with momentum distillation. In Conference on Neural Information Processing Systems (NeurIPS), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.721, + 0.826, + 0.751 + ], + "angle": 0, + "content": "Lianian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. VisualBERT: A simple and performant baseline for vision and language. arXiv preprint, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.76, + 0.826, + 0.817 + ], + "angle": 0, + "content": "Lianian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10965-10975, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.828, + 0.826, + 0.871 + ], + "angle": 0, + "content": "Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In European Conference on Computer Vision (ECCV), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Yehao Li, Jiahao Fan, Yingwei Pan, Ting Yao, Weiyao Lin, and Tao Mei. Uni-eden: Universal encoder-decoder network by multi-granular vision-language pre-training. ArXiv preprint, abs/2201.04026, 2022b. URL https://arxiv.org/abs/2201.04026." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. RoBERTa: A robustly optimized bert pretraining approach. arXiv preprint, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.827, + 0.201 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.21, + 0.827, + 0.309 + ], + "angle": 0, + "content": "Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visi-olinguistic representations for vision-and-language tasks. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 13-23, 2019. URL https://proceedings.neurips.cc/paper/2019/bit/74d97b01eae257e44aa9d5bade97baf-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.319, + 0.825, + 0.349 + ], + "angle": 0, + "content": "Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.358, + 0.827, + 0.43 + ], + "angle": 0, + "content": "Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory F. Diamos, Erich Elsen, David García, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, and Hao Wu. Mixed precision training. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=r1gs9JgRZ." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.44, + 0.827, + 0.482 + ], + "angle": 0, + "content": "Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729. IEEE, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.493, + 0.825, + 0.523 + ], + "angle": 0, + "content": "Rui Pan, Shizhe Diao, Jianlin Chen, and Tong Zhang. Extremebert: A toolkit for accelerating pretraining of customized bert. arXiv preprint arXiv:2211.17201, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.532, + 0.827, + 0.589 + ], + "angle": 0, + "content": "Omkar M. Parkhi, Andrea Vedaldi, Andrew Zisserman, and C. V. Jawahar. Cats and dogs. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, Providence, RI, USA, June 16-21, 2012, pp. 3498-3505. IEEE Computer Society, 2012. doi: 10.1109/CVPR.2012.6248092. URL https://doi.org/10.1109/CVPR.2012.6248092." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.599, + 0.827, + 0.685 + ], + "angle": 0, + "content": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 2227-2237, New Orleans, Louisiana, 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1202. URL https://aclanthology.org/N18-1202." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.694, + 0.825, + 0.723 + ], + "angle": 0, + "content": "Jason Phang, Thibault Févry, and Samuel R. Bowman. Sentence encoders on stilts: Supplementary training on intermediate labeled-data tasks. ArXiv, abs/1811.01088, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.733, + 0.827, + 0.763 + ], + "angle": 0, + "content": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.772, + 0.827, + 0.87 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8748-8763. PMLR, 2021. URL http://proceedings.mlr.press/v139/radford21a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research (JMLR), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.826, + 0.215 + ], + "angle": 0, + "content": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 2383-2392, Austin, Texas, 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://aclanthology.org/D16-1264." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.224, + 0.827, + 0.295 + ], + "angle": 0, + "content": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8821-8831. PMLR, 2021. URL http://proceedings.mlr.press/v139/ramesh21a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.305, + 0.827, + 0.336 + ], + "angle": 0, + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.344, + 0.827, + 0.415 + ], + "angle": 0, + "content": "Steven J. Rennie, Etienne Marcheret, Youssef Mroueh, Jerret Ross, and Vaibhava Goel. Self-critical sequence training for image captioning. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 1179-1195. IEEE Computer Society, 2017. doi: 10.1109/CVPR.2017.131. URL https://doi.org/10.1109/CVPR.2017.131." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.425, + 0.827, + 0.469 + ], + "angle": 0, + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.479, + 0.826, + 0.522 + ], + "angle": 0, + "content": "Andrei A Rusu, Neil C Rabinowitz, Guillaume Desjardins, Hubert Soyer, James Kirkpatrick, Koray Kavukcuoglu, Razvan Pascanu, and Raia Hadsell. Progressive neural networks. arXiv preprint arXiv:1606.04671, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.532, + 0.827, + 0.588 + ], + "angle": 0, + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.599, + 0.827, + 0.684 + ], + "angle": 0, + "content": "Tim Salimans, Ian J. Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. In Daniel D. Lee, Masashi Sugiyama, Ulrike von Luxburg, Isabelle Guyon, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pp. 2226-2234, 2016. URL https://proceedings.neurips.cc/paper/2016/black/8a3363abe792db2d8761d6403605aab7-Abtract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.693, + 0.825, + 0.723 + ], + "angle": 0, + "content": "Roy Schwartz, Jesse Dodge, Noah A Smith, and Oren Etzioni. Green ai. Communications of the ACM, 63(12):54-63, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.734, + 0.825, + 0.777 + ], + "angle": 0, + "content": "Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. ArXiv preprint, abs/2112.04482, 2021. URL https://arxiv.org/abs/2112.04482." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.787, + 0.827, + 0.858 + ], + "angle": 0, + "content": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pp. 1631-1642, Seattle, Washington, USA, 2013. Association for Computational Linguistics. URL https://aclanthology.org/D13-1170." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.868, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. VL-BERT: pretraining of generic visual-linguistic representations. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020. URL https://openreview.net/forum?id=SygXPaEYvH." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.175 + ], + "angle": 0, + "content": "Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6418-6428, Florence, Italy, 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1644. URL https://aclanthology.org/P19-1644." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.183, + 0.827, + 0.253 + ], + "angle": 0, + "content": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 2818-2826. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.308. URL https://doi.org/10.1109/CVPR.2016.308." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.262, + 0.827, + 0.333 + ], + "angle": 0, + "content": "Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5100-5111, Hong Kong, China, 2019a. Association for Computational Linguistics. doi: 10.18653/v1/D19-1514. URL https://aclanthology.org/D19-1514." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.342, + 0.827, + 0.412 + ], + "angle": 0, + "content": "Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5100-5111, Hong Kong, China, 2019b. Association for Computational Linguistics. doi: 10.18653/v1/D19-1514. URL https://aclanthology.org/D19-1514." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.421, + 0.827, + 0.504 + ], + "angle": 0, + "content": "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10347-10357. PMLR, 2021. URL http://proceedings.mlr.press/v139/touvron21a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.513, + 0.827, + 0.598 + ], + "angle": 0, + "content": "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6306-6315, 2017. URL https://proceedings.neurips.cc/paper/2017/bitical/7a98af17e63a0ac09ce2e96d03992fbc-AAbstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.607, + 0.827, + 0.704 + ], + "angle": 0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 5998-6008, 2017. URL https://proceedings.neurips.cc/paper/2017/bit/3f5ee243547dee91fbd053c1c4a845aa-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.714, + 0.827, + 0.77 + ], + "angle": 0, + "content": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=rJ4km2R5t7." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.779, + 0.827, + 0.835 + ], + "angle": 0, + "content": "Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. ArXiv preprint, abs/2202.03052, 2022. URL https://arxiv.org/abs/2202.03052." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.827, + 0.887 + ], + "angle": 0, + "content": "Wenhui Wang, Hangbo Bao, Li Dong, and Furu Wei. Vlmo: Unified vision-language pre-training with mixture-of-modality-experts. ArXiv preprint, abs/2111.02358, 2021a. URL https://arxiv.org/abs/2111.02358." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. Simvlm: Simple visual language model pretraining with weak supervision. arXiv preprint, 2021b." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.104, + 0.829, + 0.147 + ], + "angle": 0, + "content": "Alex Warstadt, Amanpreet Singh, and Samuel R. Bowman. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641, 2019. doi: 10.1162/tacl_a_00290. URL https://aclanthology.org/Q19-1040." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.83, + 0.228 + ], + "angle": 0, + "content": "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models. Transactions on Machine Learning Research, 2022. URL https://openreview.net/forum?id=yzkSU5zdwD. Survey Certification." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.235, + 0.828, + 0.321 + ], + "angle": 0, + "content": "Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 1112–1122, New Orleans, Louisiana, 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1101. URL https://aclanthology.org/N18-1101." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.329, + 0.829, + 0.387 + ], + "angle": 0, + "content": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google's neural machine translation system: Bridging the gap between human and machine translation. ArXiv preprint, abs/1609.08144, 2016. URL https://arxiv.org/abs/1609.08144." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.395, + 0.826, + 0.426 + ], + "angle": 0, + "content": "Ning Xie, Farley Lai, Derek Doran, and Asim Kadav. Visual entailment: A novel task for fine-grained image understanding. arXiv preprint, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.433, + 0.829, + 0.505 + ], + "angle": 0, + "content": "Canwen Xu, Wangchunshu Zhou, Tao Ge, Furu Wei, and Ming Zhou. BERT-of-theseus: Compressing BERT by progressive module replacing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 7859-7869, Online, 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.633. URL https://aclanthology.org/2020.emnlp-main.633." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.513, + 0.829, + 0.599 + ], + "angle": 0, + "content": "Haiyang Xu, Ming Yan, Chenliang Li, Bin Bi, Songfang Huang, Wenming Xiao, and Fei Huang. E2E-VLP: End-to-end vision-language pre-training enhanced by visual learning. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 503-513, Online, 2021a. Association for Computational Linguistics. doi: 10.18653/v1/2021.acl-long.42. URL https://aclanthology.org/2021.acl-long.42." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.607, + 0.828, + 0.638 + ], + "angle": 0, + "content": "Jingjing Xu, Wangchunshu Zhou, Zhiyi Fu, Hao Zhou, and Lei Li. A survey on green deep learning. ArXiv preprint, abs/2111.05193, 2021b. URL https://arxiv.org/abs/2111.05193." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.645, + 0.829, + 0.731 + ], + "angle": 0, + "content": "Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attngan: Fine-grained text to image generation with attentional generative adversarial networks. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pp. 1316-1324. IEEE Computer Society, 2018. doi: 10.1109/CVPR.2018.00143. URL http://openaccess.thecvf.com/content_cvpr_2018/html/Xu_AttnGAN_Fine-Grained_Text_CVPR_2018_paper.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.739, + 0.826, + 0.783 + ], + "angle": 0, + "content": "Zhengyuan Yang, Zhe Gan, Jianfeng Wang, Xiaowei Hu, Faisal Ahmed, Zicheng Liu, Yumao Lu, and Lijuan Wang. Crossing the format boundary of text and boxes: Towards unified vision-language modeling. ArXiv, abs/2111.12085, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.791, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. ArXiv preprint, abs/1708.03888, 2017. URL https://arxiv.org/abs/1708.03888." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.829, + 0.829, + 0.874 + ], + "angle": 0, + "content": "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.881, + 0.829, + 0.926 + ], + "angle": 0, + "content": "Ning Yu, Vladislav Skripniuk, Sahar Abdelnabi, and Mario Fritz. Artificial fingerprinting for generative models: Rooting deepfake attribution in training data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 14448-14457, 2021." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.104, + 0.83, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.175 + ], + "angle": 0, + "content": "Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, Ce Liu, Mengchen Liu, Zicheng Liu, Yumao Lu, Yu Shi, Lijuan Wang, Jianfeng Wang, Bin Xiao, Zhen Xiao, Jianwei Yang, Michael Zeng, Luowei Zhou, and Pengchuan Zhang. Florence: A new foundation model for computer vision. arXiv preprint, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.183, + 0.826, + 0.226 + ], + "angle": 0, + "content": "Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. ArXiv preprint, abs/2111.08276, 2021. URL https://arxiv.org/abs/2111.08276." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.234, + 0.829, + 0.293 + ], + "angle": 0, + "content": "Han Zhang, Weichong Yin, Yewei Fang, Lanxin Li, Boqiang Duan, Zhihua Wu, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. Ernie-vilg: Unified generative pre-training for bidirectional vision-language generation. ArXiv preprint, abs/2112.15283, 2021a. URL https://arxiv.org/abs/2112.15283." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.3, + 0.826, + 0.344 + ], + "angle": 0, + "content": "Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. VinVL: Revisiting visual representations in vision-language models. In Conference on Computer Vision and Pattern Recognition (CVPR), 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.351, + 0.829, + 0.438 + ], + "angle": 0, + "content": "Wangchunshu Zhou, Canwen Xu, Tao Ge, Julian J. McAuley, Ke Xu, and Furu Wei. BERT loses patience: Fast and robust inference with early exit. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/black/d4dd111a4fd973394238aca5c05bebe3-AAbstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.445, + 0.829, + 0.516 + ], + "angle": 0, + "content": "Wangchunshu Zhou, Tao Ge, Canwen Xu, Ke Xu, and Furu Wei. Improving sequence-to-sequence pretraining via sequence span rewriting. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 571-582, Online and Punta Cana, Dominican Republic, 2021a. Association for Computational Linguistics. doi: 10.18653/v1/2021.emnlp-main.45. URL https://aclanthology.org/2021.emnlp-main.45." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.524, + 0.829, + 0.581 + ], + "angle": 0, + "content": "Wangchunshu Zhou, Dong-Ho Lee, Ravi Kiran Selvam, Seyeon Lee, and Xiang Ren. Pre-training text-to-text transformers for concept-centric common sense. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021b. URL https://openreview.net/forum?id=3k20LAiHYL2." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.589, + 0.829, + 0.621 + ], + "angle": 0, + "content": "Wangchunshu Zhou, Canwen Xu, and Julian McAuley. BERT learns to teach: Knowledge distillation with meta learning. In ACL (1), pp. 7037-7049. Association for Computational Linguistics, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.627, + 0.826, + 0.657 + ], + "angle": 0, + "content": "Wangchunshu Zhou, Yan Zeng, Shizhe Diao, and Xinsong Zhang. Vlue: A multi-task benchmark for evaluating vision-language models. CoRR, abs/2205.15237, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.665, + 0.826, + 0.709 + ], + "angle": 0, + "content": "Xiao Zhou, Weizhong Zhang, Zonghao Chen, Shizhe Dao, and Tong Zhang. Efficient neural network training via forward and backward propagation sparsification. Advances in Neural Information Processing Systems, 34, 2021c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.716, + 0.826, + 0.761 + ], + "angle": 0, + "content": "Xiao Zhou, Weizhong Zhang, Hang Xu, and Tong Zhang. Effective sparsification of neural networks with global sparsity constraint. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3599-3608, 2021d." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.768, + 0.826, + 0.798 + ], + "angle": 0, + "content": "Xiao Zhou, Renjie Pi, Weizhong Zhang, Yong Lin, and Tong Zhang. Probabilistic bilevel coreset selection. In International Conference on Machine Learning. PMLR, 2022c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.805, + 0.829, + 0.891 + ], + "angle": 0, + "content": "Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. DM-GAN: dynamic memory generative adversarial networks for text-to-image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 5802-5810. Computer Vision Foundation / IEEE, 2019. doi: 10.1109/CVPR.2019.00595. URL http://openaccess.thecvf.com/content_CVPR_2019/html/Zhu_DM-GAN_Dynamic_Memory_Generative_Adversarial_Networks_for_Text-To-Image_Synthesis_CVPR_2019_paper.html." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.891 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.3, + 0.119 + ], + "angle": 0, + "content": "A APPENDIX" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.133, + 0.457, + 0.147 + ], + "angle": 0, + "content": "A.1 DETAILS OF HYPER-PARAMETERS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.154, + 0.828, + 0.378 + ], + "angle": 0, + "content": "Pre-training Our model is a base-size Transformer implemented with a 6-layer encoder and a 6-layer decoder, 768 dimensions for hidden states, 512 for maximum input length, and 3072 for intermediate size. We train our model from scratch without initializing the Transformer encoder and decoder. The image encoder is initialized from ResNet-101 (He et al., 2016) with ImageNet weights since we find a warm start provides a reliable visual representation and helps the convergence. For models pre-training on large-scale data, we optimize 10 epochs while for other small-scale datasets, we optimize 40 epochs with the AdamW optimizer. The weight decay is set to 0.01 with \\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.999\\). The learning rate is 2e-4 with a warm-up period for the first \\(2\\%\\) steps and linearly decayed to 0 after \\(2\\%\\) of the total training steps. In each batch, there are 8,192 image-text pairs for text-to-image generation and image-to-text generation with 8,192 text-only documents for text-to-text generation. We use center-crop to resize each image to the size of \\(256\\times 256\\), which is the only data augmentation used during training. All pre-training experiments are conducted on 32GB NVIDIA V100 GPUs. We adopt mixed-precision (Micikevicius et al., 2018) to accelerate training and save memory. The model trained on the largest data takes around 10 days on 1024 V100 GPUs. The default settings are shown in Table 6. We adopt dynamic masking in our experiments, where the masking ratio is randomly sampled from a uniform distribution \\(\\mathrm{U}(0,1)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.386, + 0.825, + 0.472 + ], + "angle": 0, + "content": "Fine-tuning The learning rate is \\(\\in [1\\mathrm{e} - 5,5\\mathrm{e} - 5]\\) and our model is optimized by AdamW. Because the image resolution differs between pre-training and fine-tuning, the position parameters are adapted using linear interpolation. For all downstream tasks, we apply random resize crops and horizontal flips augmentation during training. All fine-tuning experiments are conducted on 32GB NVIDIA V100 GPUs. The default settings for text classification, image classification, multi-modal understanding and image-to-text generation are shown in Tables 7, 8, and 9, respectively." + }, + { + "type": "table", + "bbox": [ + 0.243, + 0.483, + 0.758, + 0.634 + ], + "angle": 0, + "content": "
configvalue
optimizerAdamW (Loshchilov & Hutter, 2019)
learning rate2e-4
weight decay0.01
optimizer momentumβ1, β2=0.9, 0.999
batch size8192
learning rate schedulelinear decay
warmup ratio (Goyal et al., 2017a)0.02
training epochs{10, 40}
augmentationRandomResizedCrop
" + }, + { + "type": "table_caption", + "bbox": [ + 0.411, + 0.643, + 0.585, + 0.657 + ], + "angle": 0, + "content": "Table 6: Pre-training setting." + }, + { + "type": "table", + "bbox": [ + 0.359, + 0.684, + 0.64, + 0.81 + ], + "angle": 0, + "content": "
configvalue
optimizerAdamW
learning rate{1e-5, 2e-5, 5e-5}
weight decay0.01
optimizer momentumβ1, β2=0.9, 0.999
batch size{16, 32, 64}
learning rate schedulelinear decay
warmup ratio0.1
training epochs{5, 10}
" + }, + { + "type": "table_caption", + "bbox": [ + 0.366, + 0.82, + 0.625, + 0.834 + ], + "angle": 0, + "content": "Table 7: Text classification: GLUE setting." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.861, + 0.461, + 0.875 + ], + "angle": 0, + "content": "A.2 DETAILS OF DOWNSTREAM TASKS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Language Understanding We conduct experiments on GLUE benchmark including MNLI (Williams et al., 2018), CoLA (Warstadt et al., 2019), MRPC (Dolan & Brockett, 2005), QQP (Iyer et al., 2017), SST-2 (Socher et al., 2013), QNLI (Rajpurkar et al., 2016)," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.342, + 0.102, + 0.658, + 0.24 + ], + "angle": 0, + "content": "
configvalue
optimizerLARS (You et al., 2017)
base learning rate0.1
weight decay0
optimizer momentum0.9
batch size16384
learning rate schedulecosine decay
warmup epochs10
training epochs90
augmentationRandomResizedCrop
" + }, + { + "type": "table_caption", + "bbox": [ + 0.337, + 0.251, + 0.656, + 0.265 + ], + "angle": 0, + "content": "Table 8: Image classification: Linear probing setting." + }, + { + "type": "table", + "bbox": [ + 0.253, + 0.278, + 0.747, + 0.43 + ], + "angle": 0, + "content": "
configvalue
optimizerAdamW
learning rate[1e-5, 5e-5]
weight decay0.02
optimizer momentumβ1, β2=0.9, 0.999
batch size1024
learning rate schedulelinear decay
warmup epochs[2, 5]
training epochs[5, 15]
label smoothing (Szegedy et al., 2016)0.1
augmentationRandomResizedCrop, HorizontalFlips
" + }, + { + "type": "table_caption", + "bbox": [ + 0.242, + 0.44, + 0.755, + 0.456 + ], + "angle": 0, + "content": "Table 9: Multi-modal understanding and image-to-text generation: fine-tuning setting." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.48, + 0.827, + 0.567 + ], + "angle": 0, + "content": "RTE (Dagan et al., 2005; Haim et al., 2006; Giampiccolo et al., 2007; Bentivogli et al., 2009), and STS-B (Agirre et al., 2007). We follow the practice of BART (Lewis et al., 2020) and feed the same input to the encoder and decoder, and the hidden state of the final decoder token is fed into a new multi-class linear classifier or regression head. MNLI results are an average of MNLI-m and MNLI-mm. MRPC and QQP results are average of accuracy and F1. Matthews correlation coefficient (MCC) is reported for CoLA and Pearson correlation coefficient (PCC) is reported for STS-B." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.573, + 0.828, + 0.701 + ], + "angle": 0, + "content": "Vision Understanding We conduct vision experiments in both fine-tuning and linear evaluation (linear eval). The linear evaluation follows a common practice (Caron et al., 2021; He et al., 2020; Singh et al., 2021) in self-supervised learning to evaluate the representation quality, where the pre-trained backbone model is frozen and a new linear classifier is appended on top of it. We choose 12 popular datasets: ImageNet (Russakovsky et al., 2015), Food101 (Bossard et al., 2014), CIFAR10 (Krizhevsky et al., 2009), CIFAR100 (Krizhevsky et al., 2009), Cars (Krause et al., 2013), Aircraft (Maji et al., 2013), DTD (Cimpoi et al., 2014), Pets (Parkhi et al., 2012), Flowers102 (Nilsback & Zisserman, 2008), MNIST (LeCun & Cortes, 2010), STL10 (Coates et al., 2011), and Country211 (Radford et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.707, + 0.828, + 0.849 + ], + "angle": 0, + "content": "Multi-modal Understanding We consider three popular multi-modal tasks: VQAv2 (Goyal et al., 2017b), SNLI-VE (Xie et al., 2019) and NLVR2 (Suhr et al., 2019) to evaluate our model's multi-modal understanding ability. For VQAv2, following ALBEF (Li et al., 2021), the image and question are fed to the encoder and the decoder generates answers based on the multi-modal embeddings. For SNLI-VE, we follow SimVLM (Wang et al., 2021b) to feed the image to the encoder and the text to the decoder. A classifier is appended on top of our pre-trained model, and it is trained to predict the result based on the last hidden states of the decoder. For NLVR2, two input pairs are constructed, each of them including one image and the textual description. The prediction is made based on the concatenation of these two embeddings following SimVLM (Wang et al., 2021b). The resolutions for VQAv2, SNLI-VE, NLVR2 are 480, 384, 384, respectively." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Text-to-Image Generation The text-to-image task requires the model to understand the textual instruction first and then draw the image according to the input's intention. The input text is fed to our encoder, and our decoder will generate visual tokens one by one. After obtaining visual tokens, they are decoded into a raw image by an image decoder. We directly use an off-the-shelf image decoder from VQGAN (Esser et al., 2021). Following (Ramesh et al., 2021) we directly evaluate our" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.102, + 0.817, + 0.322 + ], + "angle": 0, + "content": "
Data TypeDatasetImage Domain#Images#Captions#Total
In-Domain Data (ID)COCOCOCO110.3K551.7K1.3M
Visual GenomeCOCO108.2K759.0K
Small-scale Web Data (SWD)SBUWeb859.7K859.7K14.9M
CC-3MWeb2.9M2.9M
CC-12MWeb11.1M11.1M
Object-Region Data (ORD)VG regionsCOCO108.2K3.6M17.0M
VG objectsCOCO108.2K925.6K
COCO objectsCOCO110.3K736.6K
RefcocoCOCO27.9K589.9K
Open ImageFlickr1.7M7.5M
Obj365Flickr577.6K3.6M
Vision Data (VD)ImageNet-21KImageNet13.2M13.2M13.2M
Large-scale Web Data (LWD)DAVINCI-200MWeb205.6M205.6M601.3M
LAION-400MWeb395.7M395.7M
Text Data (TD)C4Web--800GB
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.337, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Table 10: Statistics of the pre-training datasets. #Images, #Captions, and #Total denote the number of images, the number of image-text pairs, and the total number of image-text pairs, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.392, + 0.825, + 0.435 + ], + "angle": 0, + "content": "pre-trained model on 30,000 images randomly sampled from COCO (Chen et al., 2015) validation split. Both Fréchet Inception Distance (FID) (Heusel et al., 2017) and Inception Score (IS) (Salimans et al., 2016) are reported. The image resolution is 256." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.446, + 0.825, + 0.529 + ], + "angle": 0, + "content": "Image-to-Text Generation For image-to-text generation (also called image captioning), the image is given to encoder and the decoder will generate the corresponding caption. Our experiments are conducted on COCO dataset (Chen et al., 2015) with cross-entropy optimization. Other task-specific techniques such as CIDEr optimization (Rennie et al., 2017) are not introduced. The image resolution is 480. We also conduct zero-shot captioning experiments on NoCaps (Agrawal et al., 2019) and VLINE (Zhou et al., 2022b)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.544, + 0.4, + 0.558 + ], + "angle": 0, + "content": "A.3 PRE-TRAINING DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.566, + 0.825, + 0.649 + ], + "angle": 0, + "content": "Since existing studies pre-trained their models on different corpora, some of which are publicly available (e.g., CC-3M, CC-12M) while some are in-house datasets (e.g., ALIGN (Jia et al., 2021)), making the fair comparison difficult. Considering results only on the state-of-the-art performance would underestimate the potential of this line of research. Therefore, we propose several practical settings, including small-scale and large-scale, and then conduct detailed comparisons on them in section 5.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.827, + 0.833 + ], + "angle": 0, + "content": "We collect a large set of datasets with diverse distributions for pre-training. According to its source, we divide them into in-domain, small-scale web data, object-region data, vision data, and large-scale web data. The statistics and details are shown in Table 10. Most of them are naturally image-text pairs, while to enrich our corpus, we leverage object descriptions, region descriptions, and vision data (i.e., ImageNet). For objects and regions, we crop them from the original image according to their bounding box. The text part is composed according to a human-written template and objects. For example, the prompt template is \"This image contains [OBJ_A] and [OBJ_B]\", where [OBJ_A] and [OBJ_B] are two object names from the data. For vision data, because they are usually labeled with a single word or short phrase, we compose a description with prompt templates such as \"A picture of [LABEL]\" or \"The image contains [LABEL]\". For example, \"A picture of cat\" or \"The image contains cat\". We curated a dataset containing about 205.6M image-text pairs, which are available publicly on the internet. The data distribution is similar to LAION-400M. Because both are from web images, we merge them into large-scale web data (LWD)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.847, + 0.424, + 0.86 + ], + "angle": 0, + "content": "A.4 REPRODUCTION OF SIMVLM" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Since SimVLM is not open-sourced, we need to reproduce it by ourselves. There are two main difficulties in the reproduction: 1. it uses 1.8 billion in-house data 2. the configurations (e.g., parameter size, number of layers) of its base model are not clearly stated. However, there are still some clues in Section 4.4 of the SimVLM paper, where they propose a SimVLMsmall model with 8" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.162 + ], + "angle": 0, + "content": "layers, 512 embedding dimensions, and trained on about 200M web data. To demonstrate the success of our replication, we train a \\(\\mathrm{SimVLM}_{small}\\) model with the exact same configurations on about 200M web data. We obtain a VQA score of 68.50, surpassing the reported score of 67.43 in the original paper. We argue this result verifies our successful replication." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.18, + 0.381, + 0.193 + ], + "angle": 0, + "content": "A.5 EFFECTS OF COMPUTE" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.203, + 0.828, + 0.33 + ], + "angle": 0, + "content": "Our model is trained with large compute. To reveal the effects of compute, we visualize the performance improvement trends of SimVLM and DAVINCI as a function of the compute spent. There are two goals: 1) to compare better with prior work, as well as to 2) to show if that level of pre-training compute was necessary. We conduct experiments on the image-to-text generation task under both zero-shot and fine-tuning settings. The results are shown in Figure 2. It is observed that with the increase in compute, both models are improved significantly and converged at \\(40\\%\\) of compute (zero-shot), and \\(80\\%\\) of compute (fine-tuning), respectively. Large compute is especially helpful for fine-tuning settings. After convergence, our model outperforms SimVLM consistently in these two settings." + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.351, + 0.427, + 0.364 + ], + "angle": 0, + "content": "(a) COCO Captioning (Zero-shot)" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.367, + 0.471, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.575, + 0.351, + 0.776, + 0.364 + ], + "angle": 0, + "content": "(b) COCO Captioning (Fine-tuning)" + }, + { + "type": "image", + "bbox": [ + 0.524, + 0.366, + 0.815, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.514, + 0.825, + 0.54 + ], + "angle": 0, + "content": "Figure 2: The effects of compute. X-axis is the percentage of compute and Y-axis is the CIDer score on COCO captioning task." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.56, + 0.467, + 0.572 + ], + "angle": 0, + "content": "A.6 EFFECTS OF MASKING STRATEGIES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.583, + 0.828, + 0.696 + ], + "angle": 0, + "content": "In our experiments, we adopt dynamic masking, where the masking ratio is sampled from a uniform distribution \\( \\mathrm{U}(0,1) \\). The prefix ratio could be 0, where the prefix image is none, and the model is forced to predict the whole image with the input caption. There are other designs to mask images. Here we compared three different masking strategies: 1) masked image modeling (randomly masking some patches), 2) in-painting (randomly masking some continuous spans in the middle of the image), and 3) suffix-painting (ours). The results are shown in Table 11. Both masked image modeling and in-painting are effective and competitive. It is observed that suffix-painting is better than masked image modeling and in-painting across all tasks, demonstrating that suffix-painting works well." + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.714, + 0.835, + 0.823 + ], + "angle": 0, + "content": "
MethodCOCOB@4 / CVQA AccSNLI-VE AccNLVR2 AccImageNet AccFood101 AccCIFAR10 AccMNLI AccSST-2 AccText2Image IS / FID
No Pre-training32.1 / 96.7152.7354.2351.08-*-*-*66.3279.84-*
MIM34.7 / 113.468.1875.3469.6648.4656.9572.7981.7289.849.50 / 74.13
In-painting34.5 / 112.567.4675.4168.6647.5054.3871.2081.5589.849.97 / 68.15
Suffix-painting (ours)35.8 / 117.369.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
Token Projection17.7 / 49.252.1371.1152.0115.1125.6261.0182.0190.2511.89 / 60.96
Patch Projection25.7 / 79.557.6971.9257.4536.2344.3169.4081.7390.0511.41 / 61.87
ResNet Feature (ours)35.8 / 117.369.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.839, + 0.828, + 0.904 + ], + "angle": 0, + "content": "Table 11: The effects of masking strategies and image feature extraction on COCO Captions, VQA, SNLI-VE, NLVR2, ImageNet, Food101, CIFAR10, MNLI, SST-2, and text-to-image generation. MIM denotes masked image modeling, where some patches are randomly sampled and masked. Because linear probe and zero-shot text-to-image generation require a pre-trained model to be frozen, the \"No Pre-training\" results on ImageNet, Food101, CIFAR10, and Text2Image are not reported and labeled by * ." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.521, + 0.119 + ], + "angle": 0, + "content": "A.7 EFFECTS OF IMAGE FEATURE EXTRACTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.126, + 0.827, + 0.224 + ], + "angle": 0, + "content": "There are several different ways to extract image features. We compare three different image representation methods: 1) token projection (projecting the prefix tokens to the hidden dimension of the backbone network on the token-level), 2) patch projection (similar to ViT embedding, we split an image into fixed-size patches, embed each of them by a trainable linear projection on the pixel-level), and 3) ResNet feature extraction (ours). The comparison is shown in Table 11. From the results, we observed that ResNet feature extraction outperforms token projection and patch projection by a large margin. Therefore, we decided to adopt ResNet to extract image features." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.236, + 0.453, + 0.25 + ], + "angle": 0, + "content": "A.8 SCALING EFFECTS OF DATA SIZE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.257, + 0.825, + 0.327 + ], + "angle": 0, + "content": "In this section, we explore the scaling effects of our model. We plot the trends with the increase in data size on four tasks: COCO captioning, VQA, SNLI-VE, and NLVR2. The performance improvement shown in Figure 3 demonstrates that both SimVLM and DAVinci are scaling well with pre-training data size. In addition, DAVinci consistently outperforms SimVLM on different data sizes across these tasks." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.34, + 0.332, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.34, + 0.484, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.34, + 0.637, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.638, + 0.34, + 0.818, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.369, + 0.479, + 0.629, + 0.493 + ], + "angle": 0, + "content": "Figure 3: The scaling effects of data size." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.527, + 0.547, + 0.541 + ], + "angle": 0, + "content": "A.9 FULL COMPARISON WITH EXISTING METHODS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.825, + 0.578 + ], + "angle": 0, + "content": "In Table 12, we display a comprehensive comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.59, + 0.488, + 0.603 + ], + "angle": 0, + "content": "A.10 LIMITATION AND SOCIETAL IMPACTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.611, + 0.827, + 0.75 + ], + "angle": 0, + "content": "Limitation. Like most of the previous pre-training studies, the entire project consumed 40 V100 GPU years on an in-house computing cluster with large electricity costs. We tried to keep our model size small enough, but there is still potential for efficiency improvements such as sparse training (Zhou et al., 2021d;c), dataset distillation (Zhou et al., 2022c), and progressive training (Rusu et al., 2016). We will explore those techniques to improve the training efficiency and reduce the carbon footprint so that it can adhere to proposals on \"green\" deep learning (Schwartz et al., 2020; Xu et al., 2021b). Furthermore, although we have tried our best to include as many tasks as we can to demonstrate the versatility of DAVinci, we believe our method can be expanded to more tasks (e.g., machine translation, summarization, object detection, etc.), modalities (e.g., video and speech). We leave these investigations to future work." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.759, + 0.825, + 0.802 + ], + "angle": 0, + "content": "Potential Societal Impacts. Our model has image generation ability with risk of abuse, like fake portraits on social media (Hill & White, 2020), which is a common potential risk in image generation research. Viable solutions are watermarking (Yu et al., 2021) and introducing a strict user license." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.814, + 0.512, + 0.827 + ], + "angle": 0, + "content": "A.11 VISUALIZATION OF IMAGE GENERATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.835, + 0.826, + 0.879 + ], + "angle": 0, + "content": "In this section, we conduct a qualitative analysis by visualizing the generation samples. Figure 4 shows the comparison with DALLE and OFA with the same query. More generated samples are shown in Figures 5." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table", + "bbox": [ + 0.173, + 0.115, + 0.825, + 0.445 + ], + "angle": 0, + "content": "
Model#Params.Text MNLI AccVision ImageNet LE / FTImage2Text COCO B@4 / CText2Image COCO IS† / FID↓Multi-modal VQA test-dev / test-standardNLVR2 dev / test-P
Encoder-only Multi-modal Models
VisualBERT (Li et al., 2019)170M81.60---70.80 / 71.0067.40 / 67.00
ViLBERT (Lu et al., 2019)274M79.90---70.55 / 70.92-
VL-BERT (Su et al., 2020)170M81.20---71.16 / --
LXMERT (Tan & Bansal, 2019a)240M80.40---72.42 / 72.5474.90 / 74.50
OSCAR (Li et al., 2020)155M--36.5 / 123.7-73.16 / 73.4478.07 / 78.36
VinVL (Zhang et al., 2021b)157M--38.2 / 129.3-75.95 / 76.1282.05 / 83.08
ViLT (Kim et al., 2021)88M----70.85 / -74.91 / 75.57
ALBEF (Li et al., 2021)210M----75.84 / 76.0482.55 / 83.14
X-VLM (Zeng et al., 2021)240M--39.6 / 132.6-78.22 / 78.3784.41 / 84.76
VLMO (Wang et al., 2021a)----76.64 / 76.8982.77 / 83.34
Encoder-Decoder Multi-modal Models
UNICORN (Yang et al., 2021)--35.8 / 119.1-69.20 / 69.40-/-
Uni-ENDN (Li et al., 2022b)110M----72.20 / 72.50-/-
Pixel-BERT (Huang et al., 2020)144M----74.45 / 74.5576.50 / 77.20
E2E-VLP (Xu et al., 2021a)94M--36.2 / 117.3-73.25 / 73.6777.25 / 77.96
VL-T5 (Cho et al., 2021)220M--34.5 / 116.5-- / 70.3074.60 / 73.60
VL-BART (Cho et al., 2021)220M--35.1 / 116.6-- / 71.3071.70 / 70.30
Text2Image Models
AttnGAN (Xu et al., 2018)---23.30 / 35.20-/--/-
DM-GAN (Zhu et al., 2019)---32.20 / 26.50-/--/-
DALLE (Ramesh et al., 2021) (250M)12B---17.90 / 27.50-/--/-
DALLE (Ramesh et al., 2021) (640M)†82M---15.79 / 29.22-/--/-
CogView (Ding et al., 2021)4B---18.20 / 27.10-/--/-
Unified Models
Unifying (Huang et al., 2021)228M--37.3 / 122.6- / 29.90-/--/-
FLAVA (Singh et al., 2021)240M80.3375.54 / ---72.80 / 72.49-/-
SimVLM (Wang et al., 2021b) (640M)†153M83.2776.04 / -38.5 / 128.7-75.04 / 75.0378.82 / 79.72
SimVLM (Wang et al., 2021b) (1.8B)83.4080.60 / -39.0 / 134.8-77.87 / 78.1481.72 / 81.77
OFA (Wang et al., 2022)182M84.30- / 82.2041.0 / 138.221.50* / 20.80*78.00 / 78.10-/-
Florence (Yuan et al., 2021)637M-- / 90.05-/--/-80.16 / 80.36-/-
DAVINCI154M83.1378.81 / 83.9239.2 / 130.417.44 (22.41*) / 24.21 (19.82*)76.32 / 76.4480.03 / 80.25
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.456, + 0.828, + 0.547 + ], + "angle": 0, + "content": "Table 12: Comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. All results are from base-size models. LE and FT denote linear evaluation and fine-tuning performance, respectively. Image2Text results are reported without CIDEr optimization. \\(\\dagger\\) are our reproduced models. \\* are the results after fine-tuning. SimVLM (1.8B) and OFA are pre-trained with much larger corpus or human-labeled data of many downstream tasks, and thus they are not comparable and are labeled in gray. Florence (Yuan et al., 2021) is pre-trained with a much larger model size (Florence-CoSwin-H, 637M) and more pre-training data (900M), so the numbers are in grey. bold denotes the best across unified models." + }, + { + "type": "image", + "bbox": [ + 0.19, + 0.587, + 0.814, + 0.865 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.275, + 0.876, + 0.722, + 0.892 + ], + "angle": 0, + "content": "Figure 4: Comparison with DALLE and OFA on text-to-image generation." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.234, + 0.315, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.187, + 0.339, + 0.314, + 0.356 + ], + "angle": 0, + "content": "a decorative flower vase full of purple and yellow flowers" + }, + { + "type": "image", + "bbox": [ + 0.348, + 0.234, + 0.481, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.355, + 0.339, + 0.475, + 0.347 + ], + "angle": 0, + "content": "a vase full of flowers on table" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.234, + 0.646, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.51, + 0.339, + 0.653, + 0.348 + ], + "angle": 0, + "content": "a park with flowers on a sunny day" + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.234, + 0.811, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.685, + 0.339, + 0.805, + 0.356 + ], + "angle": 0, + "content": "a fire hydrant sitting in a front yard next to a sign" + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.371, + 0.315, + 0.473 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.199, + 0.474, + 0.299, + 0.483 + ], + "angle": 0, + "content": "a beach on a sunny day" + }, + { + "type": "image", + "bbox": [ + 0.348, + 0.37, + 0.48, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.474, + 0.471, + 0.491 + ], + "angle": 0, + "content": "a one cart train coming down the railroad tracks" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.37, + 0.646, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.473, + 0.658, + 0.481 + ], + "angle": 0, + "content": "a red and white boat docked on shore" + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.37, + 0.811, + 0.473 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.683, + 0.474, + 0.806, + 0.483 + ], + "angle": 0, + "content": "a picture of a snowy mountain" + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.499, + 0.314, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.602, + 0.304, + 0.618 + ], + "angle": 0, + "content": "a red stop sign on the side of the road" + }, + { + "type": "image", + "bbox": [ + 0.348, + 0.499, + 0.481, + 0.601 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.349, + 0.604, + 0.488, + 0.621 + ], + "angle": 0, + "content": "a building in front of a roundabout with a tree in the center." + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.5, + 0.646, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.603, + 0.648, + 0.621 + ], + "angle": 0, + "content": "bathroom with marble walls and counter surrounds a large mirror" + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.499, + 0.811, + 0.602 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.673, + 0.604, + 0.813, + 0.613 + ], + "angle": 0, + "content": "trees by the river in the mountains" + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.627, + 0.314, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.73, + 0.328, + 0.739 + ], + "angle": 0, + "content": "many fruits on the plate on the table" + }, + { + "type": "image", + "bbox": [ + 0.348, + 0.627, + 0.481, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.73, + 0.475, + 0.74 + ], + "angle": 0, + "content": "a bunch of fruit in a fruit shop" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.627, + 0.646, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.731, + 0.66, + 0.739 + ], + "angle": 0, + "content": "a table set with a sandwich and a drink" + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.627, + 0.811, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.683, + 0.731, + 0.811, + 0.74 + ], + "angle": 0, + "content": "noodles and broccoli on a plate" + }, + { + "type": "image_caption", + "bbox": [ + 0.366, + 0.769, + 0.63, + 0.784 + ], + "angle": 0, + "content": "Figure 5: Generation samples by DAVINCI." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "25" + } + ] +] \ No newline at end of file diff --git a/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_origin.pdf b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..42d9953f4c5bfdf9db02908092f67fb239b56e42 --- /dev/null +++ b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/fd6fd19a-99f2-4a1c-9940-84627f28fb05_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33be029490e877fa39dc5a6fa517ac174f60f05a5785a1c50381a5356a23e401 +size 1730597 diff --git a/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/full.md b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e825329a480205984f55302ffcdf3a25b39487d0 --- /dev/null +++ b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/full.md @@ -0,0 +1,483 @@ +# WRITE AND PAINT: GENERATIVE VISION-LANGUAGE MODELS ARE UNIFIED MODAL LEARNERS + +Shizhe Diao* + +The Hong Kong University of Science and Technology sdiaoaa@connect.ust.hk + +Wangchunshu Zhou + +ByteDance AI Lab + +wangchunshu.zhou@inf.ethz.ch + +Xinsong Zhang† + +ByteDance AI Lab + +zhangxinsong.0320@bytedance.com + +Jiawei Wang + +Shanghai Jiao Tong University + +wjw_sjt@sjtu.edu.cn + +# ABSTRACT + +Recent advances in vision-language pre-training have pushed the state-of-the-art on various vision-language tasks, making machines more capable of multi-modal writing (image-to-text generation) and painting (text-to-image generation). However, few studies investigate if these two essential capabilities can be learned together and boost each other, making a versatile and powerful multi-modal foundation model. In this work, we disclose the potential of symmetric generative vision-language pre-training in learning to write and paint concurrently, and propose a new unified modal model, named DAVINCI, trained with prefix language modeling and prefix image modeling, a simple generative self-supervised objective on image-text pairs. Thanks to the proposed prefix multi-modal modeling framework, DAVINCI is simple to train, scalable to huge data, adaptable to both writing and painting tasks, and also strong on other vision, text, and multi-modal understanding tasks. DAVINCI achieves competitive performance on a wide range of 27 generation/understanding tasks and demonstrates the superiority of combining vision/language generative pre-training. Furthermore, we carefully benchmark the performance of different vision-language pre-training objectives on different scales of pre-training datasets on a heterogeneous and broad distribution coverage. Our results demonstrate the potential of exploiting self-supervision in both language and vision inputs, and establish new, stronger baselines for future comparisons at different data scales. $^{1}$ + +# 1 INTRODUCTION + +Self-supervised language model pre-training (Peters et al., 2018; Radford et al., 2018; Devlin et al., 2019; Liu et al., 2019; Lewis et al., 2020; Raffel et al., 2020; Brown et al., 2020; Fu et al., 2022; Zhou et al., 2021b; Diao et al., 2020; 2021; Zhou et al., 2021a; Xu et al., 2020; Zhou et al., 2020; 2022a; Pan et al., 2022; Diao et al., 2023) has reshaped the landscape of modern natural language processing (NLP) research, pushing the state-of-the-art of a wide range of NLP tasks. Recently, this success has been transferred to the multi-modal context and resulted in a number of vision-language pretrained models (VLMs) (Lu et al., 2019; Tan & Bansal, 2019a), achieving state-of-the-art results on various vision-language tasks. Most existing VLMs are BERT-like Transformer (Vaswani et al., 2017) encoders pre-trained with a combination of different vision-language pre-training (VLP) objectives: masked multi-modal modeling (Lu et al., 2019; Tan & Bansal, 2019b; Chen et al., 2020; Li et al., 2020), multi-modal alignment prediction (Lu et al., 2019; Tan & Bansal, 2019b; Chen et al., 2020; Li et al., 2020), region of interest feature regression (Tan & Bansal, 2019b), image-text matching (Li et al., 2021; Zeng et al., 2021), to name a few. However, the roadmap towards large language models reveals a transition pattern from encoder-only models like BERT (Devlin et al., 2019) / RoBERTa (Liu et al., 2019) to sequence-to-sequence models like T5 (Raffel et al., 2020) / BART (Lewis et al., 2020) and autoregressive models like GPT-3 (Brown et al., 2020) / PaLM (Chowdhery et al., 2022) to tackle + +more tasks in a unified way, and from complicated objectives like masked language modeling / next sentence prediction / replace token detection to a simple language modeling objective to improve the scalability of pre-training. This suggests that the generative pre-training paradigm with simple targets shows great potential for pre-training more scalable and general VLMs. + +To this end, several recent studies (Cho et al., 2021; Zhang et al., 2021a; Wang et al., 2021b; 2022) investigated sequence-to-sequence (seq2seq) vision-language pre-training and achieved state-of-the-art results on a range of vision-language understanding and generation tasks. For example, VL-T5 (Cho et al., 2021), OFA (Wang et al., 2022) and PaLI (Chen et al., 2022) formulate various vision-and-language problems into seq2seq tasks and pre-train a seq2seq VLM by multi-tasking on these tasks. In addition, ERNIE-ViLG (Zhang et al., 2021a) and SimVLM (Wang et al., 2021b) pre-train seq2seq VLMs with a simple language modeling or prefix language modeling objective on a large number of image-caption pairs. While achieving promising results, these objectives are not versatile enough, resulting in VLMs that are only capable of a subset of tasks in image-text modalities. On the other hand, the recent success of generative language pre-training (Brown et al., 2020) and generative vision pre-training (He et al., 2022; Bao et al., 2021) motivates us to explore generative vision-language pre-training to learn more versatile and scalable vision-language models. + +In this work, we introduce prefix multi-modal modeling, a unified generative pre-training framework that extends prefix language modeling to the multi-modal context and learns a multi-modal foundation model by learning to write and paint simultaneously. As illustrated in Figure 1, given an image-caption pair, we split the image and caption into two parts denoted as prefix and suffix. To make prefix image modeling compatible with the seq2seq formulation of conventional prefix language modeling, we follow DALLE (Ramesh et al., 2021) and convert images into discrete sequences of image tokens (van den Oord et al., 2017). We then train the model to generate the suffix in one modality based on the prefix in the same modality and the complete input in the other modality. In this way, prefix multi-modal modeling can fully exploit self-supervision from large-scale image-caption pairs by learning to write and paint simultaneously. We pre-train DAVinci2, a vision-language foundation model, with the proposed prefix multi-modal modeling framework on large-scale image-text pairs. DAVinci is the first self-supervised vision-language foundation model that is versatile for all kinds of tasks in vision-and-language modalities, including image-to-text generation, text-to-image generation, vision-language understanding, and single-modal language / vision tasks. DAVinci consistently outperforms FLAVA (Singh et al., 2021), an existing vision-language foundation model, on both language, vision, and multi-modal tasks, and performs competitively with state-of-the-art models across a wide range of tasks and modalities. Moreover, DAVinci also shows strong few-shot and zero-shot image/text generation capability. + +In addition, most existing VLMs are pre-trained with mixed pre-training objectives and different data sources varying in size, making it difficult to disentangle the impact of pre-training objectives and data sources on the downstream tasks. To this end, we conduct a systematic analysis of the performance of generative vision-language pre-training by carefully ablating different pre-training objectives, such as prefix language / image modeling, and the amount of pre-training data with different qualities, revealing the impact of different objectives and data sources to facilitating future research. + +To summarize, our contribution is three-fold: (1) We introduce prefix multi-modal modeling, a simple unified generative vision-language pre-training framework that is scalable for large-scale pre-training and versatile for image-to-text generation, text-to-image generation and various multi-modal / single-modal understanding tasks. (2) We pre-train DAVINCI, a vision-language foundation model, with the proposed approach, demonstrating competitive performance on a wide range of 27 downstream tasks and the superiority of combining vision/language generative pre-training. (3) We conduct an analysis about the impact of different pre-training data sources and pre-training objectives on the performance of seq2seq VLMs. + +# 2 RELATED WORK + +Inspired by the success of language model pre-training, several studies investigated vision-language pre-training on large-scale image-caption pairs. ViLBERT (Lu et al., 2019) and LXMERT (Tan & Bansal, 2019b) first propose to extract visual object features with an external object detection model like Fast-RCNN (Girshick, 2015), feed the image features together with texts into Transformer + +![](images/47d8b05a6d5bf1ad408398bf32a3a02a079e04087e5c92ddae4c6e5c46418f6c.jpg) +Figure 1: Illustration of the overall architecture and pre-training procedures of DAVinci, a Transformer-based sequence-to-sequence model. Given an image-text pair, DAVinci first splits either the word sequence or image token sequence into prefix and suffix. It then concatenates the prefix with the complete sequence in the other modality as input. DAVinci is trained to recover the suffix with maximum likelihood estimation. + +models, and train the model to align vision and language representations with masked multi-modal modeling and multi-modal alignment prediction objectives. Many following works (Li et al., 2020; Zhang et al., 2021b; Chen et al., 2020; Li et al., 2022a; 2021; Zeng et al., 2021; Wang et al., 2021a) propose several new objectives to improve object detection based VLP and explored using vision Transformer (Dosovitskiy et al., 2021; Touvron et al., 2021) as visual feature extractor. + +More recently, FLAVA (Singh et al., 2021), a new vision-language foundation model, is pre-trained with a masked multi-modal modeling objective. Performing competitively on language, vision, and vision-language understanding tasks, FLAVA is designed for understanding tasks without text and image generation abilities. + +While achieving promising results on multi-modal understanding tasks, most VLMs are based on encoder-only architectures with bidirectional attention, making them non-trivial to adapt to multi-modal generation tasks such as image captioning and text-to-image generation. Inspired by the success of seq2seq pre-trained language models such as T5 (Raffel et al., 2020) and BART (Lewis et al., 2020), VL-T5 (Cho et al., 2021) and OFA (Wang et al., 2022) propose to formulate both vision-language pre-training objectives and various downstream vision-language tasks as seq2seq tasks and pre-train a seq2seq VLM by multi-tasking on these tasks. However, the scalability and the zero-shot transfer capability of this approach are limited by the availability of large-scale and diverse vision-language tasks. To this end, SimVLM (Wang et al., 2021b), the most related work to our approach, instead pre-trains a seq2seq VLM with a simple prefix language modeling objective on text generation. It easily scales to very large and potentially noisy pre-training data and achieves competitive results. However, SimVLM only exploits language self-supervision, and thus it does not perform well on image understanding tasks and is unable to tackle image generation tasks. Another recent study is CM3 (Aghajanyan et al., 2022), which proposes a causal masked multi-modal model learned from large web data and differs from our work in pre-training objectives and target tasks. + +As for the text-to-image generation task, Ramesh et al. (2021); Ding et al. (2021); Yu et al. (2022) achieved promising performance by learning an auto-regressive target with Transformer and VQ-VAE / VQ-GAN tokenizer. Most recently, Ramesh et al. (2022); Saharia et al. (2022) advanced the image generation capability by using diffusion models and high-quality text embeddings (e.g., CLIP, T5). Therefore, it is natural to explore boosting image generation via stronger multi-modal understanding. + +Previous studies are good at either image-to-text or text-to-image generation, but few studies investigate whether these two important capabilities can be learned together and boost each other. In this paper, we explore making a versatile and powerful multi-modal foundation model that is good at text-to-image generation, image-to-text generation, and multi-modal understanding tasks. + +# 3 DAVINCI + +Given the superior performance of auto-regressive language models (LM) (Brown et al., 2020; Chowdhery et al., 2022; Rae et al., 2021) on zero-shot and few-shot transfer abilities, we decided to + +adopt a decoder optimized by language modeling loss to retain the generalization capabilities, and an encoder to represent the prefix input. Unlike using a causal mask in the decoder, the encoder employs fully-visible attention for the prefix input. This architecture resembles prefix language modeling, which shows effectiveness in a wide range of language tasks (Dong et al., 2019; Raffel et al., 2020) and enables zero-shot generalization abilities. Contrary to the previous multi-stage approaches (Wang et al., 2021a; Singh et al., 2021), our model is trained from scratch in an end-to-end manner thanks to the model's simplicity. In this section, we introduce the proposed prefix multi-modal modeling framework and the DAVinci model. The overall architecture of DAVinci is depicted in Figure 1. We first explain our model architecture in detail in §3.1 and then introduce pre-training objectives and procedures in §3.2. + +# 3.1 MODEL ARCHITECTURE + +Textual Feature Embedding Given an input sentence $S$ , we first use WordPiece (Wu et al., 2016) tockenize it to a sequence of tokens $W = \{w_{1},w_{2},\dots,w_{n}\}$ . To obtain text features $T$ , for each token $w_{i}$ , a token embedding $e_i$ and position embedding $p_i$ are computed by two separate embedding matrices. Finally, the textual feature embedding $T = \{t_1,t_2,\dots,t_i,\dots,t_n\}$ is calculated by $t_i = LayerNorm(e_i + p_i)$ , where $i$ indicates the $i$ -th position, and LayerNorm (Ba et al., 2016) is a layer normalization function. + +Visual Feature Embedding Given an input image $I$ , we first use a CNN backbone to extract and learn the image features. Following (Dai et al., 2021; Wang et al., 2021b), we use the first three blocks of ResNet (He et al., 2016) to obtain the feature maps. The feature maps are then flattened to $F = \{f_1, f_2, \dots, f_m\}$ along the spatial dimension, where $m$ denotes the number of features. To keep the position information of visual features, we inject absolute learned positional embeddings $p$ and the final visual embeddings $V = \{v_1, v_2, \dots, v_i, \dots, v_m\}$ are calculated by $v_i = f_i + p_i$ , where $i$ indicates the $i$ -th position. + +Cross-Modal Transformer To fuse the textual and visual feature embeddings into a common space, we adopt a simple canonical Transformer architecture as the fusion module. The input is the combination of visual embedding $V$ and textual embedding $T$ , namely $X = \{x_{1}, x_{2}, \dots, x_{l}\} = [V, T] = \{v_{1}, v_{2}, \dots, v_{m}, t_{1}, t_{2}, \dots, t_{n}\}$ . The input embedding vectors $X$ are then fed into a cross-modal Transformer encoder to obtain hidden state vectors $H = \{h_{1}, h_{2}, \dots, h_{l}\}$ . Finally, a Transformer decoder is applied to generate visual or textual tokens with $H$ and decoder input as illustrated in Figure 1. + +Image Tokenizer and Decoder Because Transformer is modeling on discrete tokens, to unify the text tokens and image tokens, we discretize an image into tokens by an image tokenizer and reconstruct the raw image by an image decoder. The image tokenizer and decoder are implemented with a discrete variational autoencoder (dVAE) (Ramesh et al., 2021). After training of the image tokenizer, it could serialize an image $I$ into a sequence of discrete visual tokens $Z = \{z_{1}, z_{2}, \dots, z_{m}\}$ according to a learned vocabulary. Visual tokens $Z$ serve as the ground-truth labels for the prefix image modeling objective. In our work, we directly use an off-the-shelf image tokenizer and decoder from VQGAN (Esser et al., 2021), with a vocabulary size of 1024 and a compression rate of 16, which means a $256 \times 256$ image will be tokenized into $16 \times 16$ grid of tokens and then flattened to a sequence of 256 tokens. + +# 3.2 PRE-TRAINING OBJECTIVES + +Our major motivation is to conduct language modeling with image information and image modeling with text information simultaneously, which only requires image and text pairs that are easy to collect, making our approach easy to scale. The interaction would force the vision-language model to have a deeper understanding of both text and image. Learning from this interaction connects the visual representation with textual representation, enabling zero-shot transfer. + +Prefix Language Modeling (PLM) The core idea of prefix language modeling is "given a full image $X_{image}$ and a prefix caption $\tilde{X}_{text}$ , recover the masked textual tokens (i.e., suffix caption $Y_{text}$ )". Given an input caption, we first randomly mask some continuous words at the end (we call it suffix caption hereafter) and recover the masked textual tokens with full image by optimizing the cross-entropy loss, + +$$ +\mathcal {L} _ {\mathrm {P L M}} = - \sum_ {(I, S) \in D} \log p \left(\mathbf {Y} _ {\text {t e x t}} \mid \mathbf {X} _ {\text {i m a g e}}, \tilde {\mathbf {X}} _ {\text {t e x t}}\right), \tag {1} +$$ + +where I and S are images and captions from the pre-training corpus $D$ . + +Because of the lack of textual information, recovering the suffix caption requires the model to understand both the image and prefix caption. The full image is rich in semantic information that would help language modeling. The prefix length is randomly decided during training, and especially when prefix caption is none, this task will degenerate into "image captioning" task, which forces the model to generate a caption with the input image. + +$$ +\mathcal {L} _ {\mathrm {P L M}} ^ {\prime} = - \sum_ {(I, S) \in D} \log p \left(\mathbf {Y} _ {\text {t e x t}} \mid \mathbf {X} _ {\text {i m a g e}}\right) \tag {2} +$$ + +Prefix Image Modeling (PIM) The core idea of prefix image modeling is "given a full caption and a corrupted image (we call it prefix image hereafter), recover the masked visual tokens". Given an input image, we first randomly mask some continuous image patches at the end (we call it suffix image hereafter). The prefix image and full caption will be fed into the model and try to recover the original visual tokens obtained from the image tokenizer by optimizing the cross-entropy loss. + +$$ +\mathcal {L} _ {\mathrm {P I M}} = - \sum_ {(I, S) \in D} \log p \left(\mathbf {Y} _ {\text {i m a g e}} \mid \mathbf {X} _ {\text {t e x t}}, \tilde {\mathbf {X}} _ {\text {i m a g e}}\right) \tag {3} +$$ + +Similar to PLM, when prefix image is none, this task will degenerate into "text-to-image generation" task, forcing the model to generate an image with the input caption: + +$$ +\mathcal {L} _ {\mathrm {P I M}} ^ {\prime} = - \sum_ {(I, S) \in D} \log p \left(\mathbf {Y} _ {\text {i m a g e}} \mid \mathbf {X} _ {\text {t e x t}}\right) \tag {4} +$$ + +Unified Learning Objective Our model is learned by optimizing the combination of PLM and PIM. + +$$ +\mathcal {L} = \mathcal {L} _ {\mathrm {P L M}} + \mathcal {L} _ {\mathrm {P I M}} \tag {5} +$$ + +# 4 EXPERIMENTS + +# 4.1 PRE-TRAINING DATASETS + +Since existing studies pre-trained their models on different corpora, making the fair comparison difficult. Considering results only on state-of-the-art performance would underestimate the potential of this line of research. Therefore, we propose several practical settings including small-scale and large-scale, and then conduct detailed comparisons on them in Section 5.1. More details about the datasets are shown in Appendix A.3. + +
Data TypeDatasetImage Domain#Total
In-Domain Data (ID)COCO, Visual GenomeCOCO1.3M
Small-scale Web Data (SWD)SBU, CC-3M, CC-12MWeb14.9M
Object-Region Data (ORD)VG regions, VG objects, COCO objects, Refcoco, Open Image, Obj365COCO, Flickr17.0M
Vision Data (VD)ImageNet-21KImageNet13.2M
Large-scale Web Data (LWD)LAION-400M, DAVinci-200MWeb601.3M
Text Data (TD)C4Web800GB
+ +Table 1: Statistics of the pre-training datasets. #Total denotes the total number of image-text pairs. + +# 4.2 DOWNSSTREAM TASKS + +We test our models' ability and versatility on five dimensions: language understanding on 8 GLUE tasks (Wang et al., 2019), vision understanding on ImageNet fine-tuning and 12 popular vision datasets for linear evaluation, multi-modal understanding on VQAv2 (Goyal et al., 2017b), SNLI-VE (Xie et al., 2019) and NLVR2 (Suhr et al., 2019), text-to-image generation on COCO (Chen et al., 2015), and image-to-text generation on COCO, NoCaps (Agrawal et al., 2019), and VLUE (Zhou et al., 2022b). Details of downstream tasks and fine-tuning process are described in Appendix A.2. + +# 4.3 IMPLEMENTATION DETAILS + +Our model is a base-size Transformer implemented with a 6-layer encoder and a 6-layer decoder, 768 dimensions for hidden states, 512 for maximum input length, and 3072 for intermediate size. We train our model from scratch without initializing the Transformer encoder and decoder. However, the image encoder is initialized from ResNet-101 (He et al., 2016) with ImageNet weights since we find + +
BERTRoBERTaViTMLM 1MIM 2FLAVA 3CLIP 4SimVLM 5DAVINCI 6SimVLM 7DAVINCI 8
TaskEval.16GB160GB13.2M70M70M70M70M46.4M46.4M647.7M647.7M
MNLIFT84.2087.60-73.23-80.3332.8582.1382.2583.2783.13
CoLAFT54.6063.60-39.55-50.6511.0252.4752.1054.2254.75
MRPCFT84.7590.20-73.24-84.1668.7482.7083.1484.2684.54
QQPFT89.0091.90-86.68-88.7459.1788.3988.1589.0588.92
SST-2FT92.5094.80-87.96-90.9483.4990.6590.4891.1291.37
QNLIFT91.0092.80-82.32-87.3149.4687.5587.2188.2887.90
RTEFT62.5078.70-50.54-57.7653.0759.8060.7263.3464.22
STS-BFT88.2091.20-78.89-85.6713.7086.6286.2787.2487.05
NLP Avg.80.8486.35-71.55-78.1946.4478.7978.7980.1080.23
ImageNetLE--80.90-41.7975.5472.9574.3175.8776.0477.65
Food101LE--86.70-53.3088.5185.4983.4189.3385.5290.12
CIFAR10LE--96.90-76.2092.8791.2591.5693.0192.4193.96
CIFAR100LE--86.40-55.5777.6874.4072.5178.9875.2380.11
CarsLE--54.70-14.7170.8762.8461.4472.6968.8374.57
AircraftLE--46.00-13.8347.3140.0241.2847.4247.7549.55
DTDLE--74.30-55.5377.2973.4072.5577.1276.5978.33
PetsLE--92.70-34.4884.8279.6178.7785.5286.1388.21
Flowers102LE--99.20-67.2396.3794.9493.2496.1295.4196.88
MNISTLE--97.40-96.4098.4297.3896.6698.6798.4599.01
STL10LE--99.50-80.1298.8997.2997.5199.0398.0299.21
Country211LE--17.50-8.8728.9225.1226.4528.9927.8129.94
Vision Avg.--77.68-49.8478.1274.5674.1478.5677.3479.80
VQAv2FT-----72.4959.8172.1273.8975.0376.44
SNLI-VEFT-----78.8973.5378.7479.1179.6380.01
NLVR2FT-------77.4577.9179.7280.25
I2T@B4FT-------38.0038.5038.1039.20
I2T@CFT-------126.96128.66128.91130.44
T2I@IS ↑FT--------17.55-22.41
T2I@FID ↓FT--------23.58-19.82
VQAv2FS-------54.6954.8551.8854.90
SNLI-VEFS-------67.4567.5767.9668.04
NLVR2FS-------51.4651.1951.4951.52
I2T@B4FS-------35.9036.4032.7037.00
I2T@CFS-------117.75120.43112.20122.56
I2T@B4ZS-------11.4010.8013.8018.70
I2T@CZS-------45.3045.5556.6968.44
VUE@B4ZS-------9.209.4010.4010.60
VUE@CZS-------33.9234.8039.7540.83
NoCaps@CZS-------48.0545.5148.6458.58
T2I@IS ↑ZS--------14.91-17.44
T2I@FID ↓ZS--------29.83-24.21
Multi-modal Avg.-------57.8958.3059.1362.50
+ +Table 2: Experimental results on vision, language and multi-modal downstream tasks. @B4, @C denote BLEU@4, CIDEr, respectively. I2T and T2I denote image-to-text and text-to-image tasks. Multi-modal Avg. is the average score of all multi-modal tasks. FT: fine-tuning, LE: linear evaluation, FS: few-shot, ZS: zero-shot. Under few-shot setting, we fine-tune a pre-trained model for 3 epochs on $1\%$ training data. Results for BERT are obtained from Iki & Aizawa (2021). Results for RoBERTa are from its corresponding paper (Liu et al., 2019) and they use the mid-training (Phang et al., 2018) on MNLI for RTE, MRPC and STS-B while other models (e.g., BERT, SimVLM, DAVinci) do not apply this trick. Results for ViT are from ViT-Base/16 model (Radford et al., 2021). We list the reported performance of text-only and image-only models in grey for reference. + +a warm start provides a reliable visual representation and helps the convergence. All pre-training experiments are conducted on 32GB NVIDIA V100 GPUs. The model trained on the largest data takes around 10 days on 1024 V100 GPUs. We adopt dynamic masking in our experiments, where the masking ratio is randomly sampled from a uniform distribution $\mathrm{U}(0,1)$ . More details of the fine-tuning, network architectures, and hyper-parameters setups are given in Appendix A.1. + +# 4.4 EXPERIMENTAL RESULTS + +We extensively compare the performance of DAVINCI with state-of-the-art unified foundation models and vision-language models across vision, language, and multi-modal tasks, accessing five different abilities: (1) text understanding, (2) image understanding, (3) text-to-image generation, (4) image-to-text generation, (5) multi-modal understanding. + +Overall Performance We report the overall performance on 8 language tasks from GLUE, 12 vision tasks, 3 multi-modal tasks, 3 image-to-text tasks and 1 text-to-image task. We compare our model with FLAVA and SimVLM $^3$ , two of the most recent and best performing vision-language + +
Model#Params.Text MNLI AccVision ImageNet LE / FTImage2Text COCO B@4 / CText2Image COCO IS↑ / FID↓Multi-modal
VQA test-dev / test-standardNLVR2 dev / test-P
Encoder-only Multi-modal Models
VinVL (Zhang et al., 2021b)157M--38.2 / 129.3-75.95 / 76.1282.05 / 83.08
ViLT (Kim et al., 2021)88M----70.85 / -74.91 / 75.57
ALBEF (Li et al., 2021)210M----75.84 / 76.0482.55 / 83.14
X-VLM (Zeng et al., 2021)240M--39.6 / 132.6-78.22 / 78.3784.41 / 84.76
VLMO (Wang et al., 2021a)----76.64 / 76.8982.77 / 83.34
Encoder-Decoder Multi-modal Models
UNICORN (Yang et al., 2021)--35.8 / 119.1-69.20 / 69.40-/-
Uni-ENDN (Li et al., 2022b)110M----72.20 / 72.50-/-
Pixel-BERT (Huang et al., 2020)144M----74.45 / 74.5576.50 / 77.20
E2E-VLP (Xu et al., 2021a)94M--36.2 / 117.3-73.25 / 73.6777.25 / 77.96
VL-T5 (Cho et al., 2021)220M--34.5 / 116.5-- / 70.3074.60 / 73.60
VL-BART (Cho et al., 2021)220M--35.1 / 116.6-- / 71.3071.70 / 70.30
Text2Image Models
DM-GAN (Zhu et al., 2019)---32.20 / 26.50-/--/-
DALLE (Ramesh et al., 2021) (250M)12B---17.90 / 27.50-/--/-
DALLE (Ramesh et al., 2021) (640M)†82M---15.79 / 29.22-/--/-
CogView (Ding et al., 2021)4B---18.20 / 27.10-/--/-
Unified Models
Unifying (Huang et al., 2021)228M--37.3 / 122.6- / 29.90-/--/-
FLAVA (Singh et al., 2021)240M80.3375.54 / ---72.80 / 72.49-/-
SimVLM (Wang et al., 2021b) (640M)†153M83.2776.04 / -38.5 / 128.7-75.04 / 75.0378.82 / 79.72
SimVLM (Wang et al., 2021b) (1.8B)83.4080.60 / -39.0 / 134.8-77.87 / 78.1481.72 / 81.77
OFA (Wang et al., 2022)182M84.30- / 82.2041.0 / 138.221.50* / 20.80*78.00 / 78.10-/-
Florence (Yuan et al., 2021)637M-- / 90.05-/--/-80.16 / 80.36-/-
DAVINCI154M83.1378.81 / 83.9239.2 / 130.417.44 (22.41*) / 24.21 (19.82*)76.32 / 76.4480.03 / 80.25
+ +Table 3: Comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. All results are from base-size models. LE and FT denote linear evaluation and fine-tuning performance, respectively. Image2Text results are reported without CIDEr optimization. $\dagger$ are our reproduced models. \* are the results after fine-tuning. SimVLM (1.8B) and OFA are pre-trained with much larger corpus or human-labeled data of many downstream tasks, and thus they are not comparable and are labeled in gray. Florence (Yuan et al., 2021) is pre-trained with much larger model size (Florence-CoSwin-H, 637M) and more pre-training data (900M), so the numbers are in grey. bold denotes the best across unified models. + +foundation models. We also include comparisons with some baseline models (e.g., MIM, MLM, CLIP). There are several observations. First, DAVINCI (column 8) outperforms FLAVA (column 3) and SimVLM (column 7) across almost all tasks, providing a new and stronger unified foundation model. Compared with FLAVA, DAVINCI improves an average of $2.04\%$ , $1.68\%$ on language and vision tasks, respectively. Compared with SimVLM, DAVINCI achieves comparable results on language tasks $(+0.13\%)$ while performing much better on vision tasks $(+2.46\%)$ . To make a fair comparison in terms of similar data size, we compare FLAVA (70M data, column 3) with DAVINCI (46.4M data, column 6). It is observed that DAVINCI still outperforms FLAVA even with much less data. Considering the multi-modal tasks, DAVINCI consistently outperforms FLAVA and SimVLM on VQA and VE. Note that FLAVA is incapable of generation and SimVLM cannot generate images; only DAVINCI is competent to all tasks and demonstrates a stronger capability of unifying vision and language tasks. + +Zero-shot and Few-shot Transfer One of the critical benefits of generative pre-trained vision-language models is the good generalization ability on zero-shot and few-shot tasks. For zero-shot transfer, two out-of-domain distribution datasets are considered (NoCaps and VLINE), with results shown in Table 2. First, DAVinci outperforms SimVLM on both zero-shot and few-shot settings, demonstrating its better transfer capabilities. It also shows the effectiveness and robustness of the synergy of our proposed language supervision and image supervision. Second, it is observed that the performance improvement is bigger on 647.7M data (column 7 v.s. column 8) than 46.4M data (column 5 v.s. column 6). This shows DAVinci generalizes well with the increase of large-scale data. We even observe some performance drops on small data (46.4M) but excellent performance improvements on large data (647.7M). It is consistent with the recent observation that zero-shot ability could only be triggered with large pre-training data (Wei et al., 2022) and scaling to large data and keeping simple training objectives benefit generalization performance (Wang et al., 2021b). + +Comparison with state-of-the-art vision-language models In addition to unified vision-language foundation models, we compare DAVinci with state-of-the-art vision-language models as well. The results are shown in Table 2. DAVinci demonstrates its superiority in vision understanding and text-to-image generation. Compared with current popular auto-regressive image generation models like DALLE and CogView, our model achieves comparable IS and better FID scores with significantly fewer model parameters than DALLE and CogView. Note that the original DALLE is implemented based on VQVAE, so here, we compare our model with reproduced VQGAN-based DALLE with + +
SettingsPre-training Data#Image#CaptionModelsCOCO CaptionsVQASNLI-VENLVR2
IDSWDORDVDLWDB@4 / CAccAccAcc
10.2M1.3MSimVLM35.2 / 115.0668.8976.1071.21
DAVINCI35.8 / 117.3069.2576.2272.55
215.1M16.2MSimVLM37.0 / 122.6371.5478.3675.50
DAVINCI37.4 / 123.1171.8878.6277.46
32.7M18.3MSimVLM38.2 / 123.8569.5776.6570.50
DAVINCI38.0 / 124.2070.0276.9272.01
413.4M14.5MSimVLM36.2 / 119.7370.5376.9073.25
DAVINCI36.6 / 121.2771.2377.4074.62
530.5M46.4MSimVLM38.5 / 128.1271.8478.8176.75
DAVINCI38.6 / 128.7373.5379.2477.55
6601.3M601.3MSimVLM37.3 / 123.8173.7378.7977.69
DAVINCI37.6 / 124.4273.9579.2978.54
7601.5M602.6MSimVLM37.9 / 125.5074.6479.0577.68
DAVINCI38.1 / 125.9174.9179.2278.12
8631.8M647.7MSimVLM38.5 / 128.2575.0479.3278.82
DAVINCI39.1 / 130.2176.3280.0480.03
+ +Table 4: Evaluation on downstream tasks using COCO Captions, VQA, SNLI-VE, and NLVR2. #Image and #Caption denote the numbers of images and image-text pairs that are used in the pre-training. + +similar model sizes, and find DAVinci still achieves a significant improvement over it. Generated images are presented in Appendix A.11 for further qualitative comparison. + +On multi-modal tasks such as VQA, DAVINCI not only outperforms unified models (e.g., SimVLM (640M)) and other encoder-decoder multi-modal models (e.g., E2E-VLP, VL-T5), but also achieves competitive performance with many conventional encoder-only multi-model models (e.g., VinVL, ALBEF, VLMO). Note that SimVLM (1.8B) and OFA are not directly comparable because SimVLM uses 1.8B in-house image-text pairs, and OFA uses human-labeled data of many downstream tasks during pre-training. Even though, we still report their results for reference and observe a better performance on ImageNet fine-tuning and text-to-image generation than OFA. + +The advantages of image generation over DALLE / CogView, the superiority of image-to-text over SimVLM, and the competitive performance with conventional multi-modal models demonstrate the synergistic effect of our proposed PLM (language supervision) and PIM (image supervision). + +# 5 ANALYSIS + +# 5.1 IMPACT OF PRE-TRAINING DATASETS + +In this section, we disclose the impact of various multi-modal data sources for VLMs. We choose SimVLM and DAVinci as our baseline models for their competitive performance, the capability of training from scratch, and the scalability of extending to the noisy large-scale corpus. We use the same text corpus, $C4$ , for all the variations. The results are shown in Table 4. In general, the performance is increased along with the data size, and DAVinci consistently outperforms SimVLM on almost all the data settings and all the downstream tasks. Both object-region data and vision data are clearly helpful in vision language pre-training (refer to settings 3 and 4). We surprisingly observe that models pre-trained on object-region data with much fewer images performs even better than models pre-trained with small-scale web data on the COCO Caption task (refer to settings 2 and 3). Although large-scale web data is usually noisier than small datasets (e.g., ID, ORD, VD, and SWD), it is powerful for multi-modal pre-training (refer to settings 5 and 8). We believe our analysis has broader impacts on the research of VLMs in the community. First, this enables fair comparisons for pre-trained models in the same data settings. Second, one can focus on the model designs at part or all of the data settings according to available computation resources. Third, we reveal that object-region and vision data, normally overlooked in VLM pre-training, also play a significant role. + +# 5.2 ABLATION STUDY + +To verify the contributions of different modules in our framework, we ablate them and evaluate DAVINCI on five kinds of downstream tasks: language understanding (MNLI, SST-2), vision understanding (ImageNet, Food101, CIFAR10), multi-modal understanding (VQAv2, SNLI-VE, NLVR2), image-to-text generation (COCO Captions), and text-to-image generation. Experiments are conducted with the same model architecture on in-domain data (ID). The results are shown in Table 5. + +Effects of Objectives First, all three objectives (PLM, PIM, and Text2Text) bring improvement and the combination confirms a synergistic effect. Second, it is observed that without PLM, the performance decreases significantly on multi-modal understanding and image-to-text generation, + +
MethodCOCOB@4 / CVQAAccSNLI-VEAccNLVR2AccImageNetAccFood101AccCIFAR10AccMNLIAccSST-2AccT2IIS / FID
No Pre-training32.1 / 96.7152.7354.2351.08-*-*-*66.3279.84-*
DAVINCI35.8 / 117.3069.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
-PLM33.6 / 111.1765.1573.9153.2848.0574.1772.9881.4289.9710.26 / 59.64
-PIM34.3 / 116.5868.8975.7969.7845.5471.1870.1181.9490.53-*
-Text2Text34.1 / 115.2168.1475.3870.3448.6774.2673.2376.4888.1412.07 / 54.77
PL=035.4 / 117.0066.9075.5271.0548.4568.1873.7378.6989.0011.76 / 55.38
PL=15%35.7 / 116.5369.1675.0970.4441.5852.1568.5579.0289.46-*
PL=50%35.1 / 115.5368.5574.5456.9237.6949.1670.1578.5989.69-*
MIM34.7 / 113.468.1875.3469.6648.4656.9572.7981.7289.849.50 / 74.13
In-painting34.5 / 112.567.4675.4168.6647.5054.3871.2081.5589.849.97 / 68.15
Token Projection17.7 / 49.252.1371.1152.0115.1125.6261.0182.0190.2511.89 / 60.96
Patch Projection25.7 / 79.557.6971.9257.4536.2344.3169.4081.7390.0511.41 / 61.87
+ +Table 5: Ablation study on COCO Captions, VQA, SNLI-VE, NLVR2, ImageNet, Food101, CIFAR10, MNLI, SST-2, and text-to-image (T2I) generation. “-” denotes removing the corresponding objective. PL denotes the prefix length under fixed masking ratio settings. Because the linear probe requires a pre-trained model to be frozen, “No Pre-training” results on ImageNet, Food101, and CIFAR10 are not reported and labeled by * . For T2I, we report the zero-shot results. Note that the following four variants cannot perform zero-shot text-to-image generation (labeled by *): (1) No Pre-training, (2) DAVinci - PIM, (3) PL=15%, and (4) PL=50%. + +indicating the importance of language supervision. Third, PIM brings more gains than PLM and text2text on vision understanding, which is expected because it enhances the vision encoding ability with image supervision. In addition, the text2text objective is important to text understanding. Last, on the text-to-image generation task, it is observed that PLM is also helpful, confirming the synergistic effect of PIM and PLM again. Intuitively, PIM and PLM can help each other learn the alignments of visual and textual features, which will benefit both image generation and other multi-modal tasks. + +Effects of Masking Ratios Our model adopts dynamic masking ratios as described in Section 3.2. We also conduct experiments with static masking ratios with the prefix length fixed to 0, $15\%$ , and $50\%$ . The comparison between dynamic masking ratios and static masking ratios $(\mathrm{PL} = 0, 15\%,$ and $50\%)$ reveals that dynamic masking is better. We attribute this improvement to the smoothing effects of dynamic masking ratios. We also find that the standard language model $(\mathrm{PL} = 0)$ performs worse on VQA, Food101, and text-to-image generation, which is consistent with the observation in SimVLM. In our experiments, the masking ratio is sampled from a uniform distribution $\mathrm{U}(0,1)$ . + +Effects of Masking Strategies Here we also compared three different masking strategies: 1) masked image modeling (randomly masking some patches), 2) in-painting (randomly masking some continuous spans in the middle of the image), and 3) suffix-painting (ours). The results are shown in Table 5. Both masked image modeling and in-painting are effective and competitive. It is observed that suffix-painting is better than masked image modeling and in-painting across all tasks, demonstrating that suffix-painting works well. + +Effects of Image Feature Extraction There are several different ways to extract image features. We compare three different image representation methods: 1) token projection (projecting the prefix tokens to the hidden dimension of the backbone network on the token-level), 2) patch projection (similar to ViT embedding, we split an image into fixed-size patches, embed each of them by a trainable linear projection on the pixel-level), and 3) ResNet feature extraction (ours). From the results in Table 5, we observed that ResNet feature extraction outperforms token projection and patch projection by a large margin. Therefore, we decided to adopt ResNet to extract image features. + +We provide more details and discussions about the effects of compute (A.5), masking strategies (A.6), image feature extraction methods (A.7), and scaling effects of data size (A.8) in the Appendix. + +# 6 CONCLUSION AND DISCUSSION + +In this work, we first benchmark several settings on sequence-to-sequence vision-language pretraining in terms of pre-training dataset size, aligning SimVLM and our model on them. We propose a simple and unified generative pre-training model, DAVinci, to simultaneously leverage the language supervision and image supervision through two objectives under a unified framework: prefix language modeling and prefix image modeling. DAVinci is simple yet effective, demonstrating strong capabilities in both multi-modal writing and painting tasks. Experimental results explicitly imply that combining suffix caption generation and suffix image generation offers large gains on all benchmark settings. We also discussed limitations and future work in Appendix A.10. + +# ACKNOWLEDGMENTS + +We thank the anonymous reviewers for their valuable suggestions. We would like to acknowledge Yan Zeng, Wenguan Huang, and Zhi Zhang at ByteDance, and Zhiling Zhang at Shanghai Jiao Tong University for their generous assistance in data collection and helpful discussions. We also wish to thank Hang Li at ByteDance, and Tong Zhang at HKUST for inspiring feedback, valuable comments, and great support to this work. + +# REFERENCES + +Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022. +Eneko Agirre, Lluis Márquez, and Richard Vicentowski (eds.). Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007), Prague, Czech Republic, 2007. Association for Computational Linguistics. URL https://aclanthology.org/S07-1000. +Harsh Agrawal, Peter Anderson, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, and Stefan Lee. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision, ICCV 2019, Seoul, Korea (South), October 27 - November 2, 2019, pp. 8947-8956. IEEE, 2019. doi: 10.1109/ICCV.2019.00904. URL https://doi.org/10.1109/ICCV.2019.00904. +Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. ArXiv preprint, abs/1607.06450, 2016. URL https://arxiv.org/abs/1607.06450. +Hangbo Bao, Li Dong, and Furu Wei. BEiT: Bert pre-training of image transformers. arXiv preprint, 2021. +Luisa Bentivogli, Peter Clark, Ido Dagan, and Danilo Giampiccolo. The fifth pascal recognizing textual entailment challenge. In TAC, 2009. +Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, pp. 446-461. Springer, 2014. +Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/1457c0d6bcbd4967418bf8ac142f64a-AAbstract.html. +Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9650-9660, 2021. +Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. arXiv preprint arXiv:2209.06794, 2022. +Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dollár, and C Lawrence Zitnick. Microsoft COCO Captions: Data collection and evaluation server. arXiv preprint, 2015. + +Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. UNITER: Universal image-text representation learning. In European Conference on Computer Vision (ECCV), 2020. +Jaemin Cho, Jie Lei, Hao Tan, and Mohit Bansal. Unifying vision-and-language tasks via text generation. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 1931-1942. PMLR, 2021. URL http://proceedings.mlr.press/v139/cho21a.html. +Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. ArXiv preprint, abs/2204.02311, 2022. URL https://arxiv.org/abs/2204.02311. +Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2014, Columbus, OH, USA, June 23-28, 2014, pp. 3606-3613. IEEE Computer Society, 2014. doi: 10.1109/CVPR.2014.461. URL https://doi.org/10.1109/CVPR.2014.461. +Adam Coates, Andrew Ng, and Honglak Lee. An analysis of single-layer networks in unsupervised feature learning. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 215-223. JMLR Workshop and Conference Proceedings, 2011. +Ido Dagan, Oren Glickman, and Bernardo Magnini. The pascal recognising textual entailment challenge. In Machine Learning Challenges Workshop, pp. 177-190. Springer, 2005. +Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 34:3965-3977, 2021. +Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423. +Shizhe Diao, Jiaxin Bai, Yan Song, Tong Zhang, and Yonggang Wang. Zen: Pre-training chinese text encoder enhanced by n-gram representations. In Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 4729-4740, 2020. +Shizhe Diao, Ruijia Xu, Hongjin Su, Yilei Jiang, Yan Song, and Tong Zhang. Taming pre-trained language models with n-gram representations for low-resource domain adaptation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 3336-3349, 2021. +Shizhe Diao, Zhichao Huang, Ruijia Xu, Xuechun Li, Yong Lin, and Tong Zhang. Black-box prompt learning for pre-trained language models. Transactions on Machine Learning Research, 2023. URL https://openreview.net/forum?id=IvsGP7xRvm. +Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 2021. +William B. Dolan and Chris Brockett. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005), 2005. URL https://aclanthology.org/I05-5002. +Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. Unified language model pre-training for natural language understanding and generation. In NeurIPS, pp. 13042-13054, 2019. + +Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=YicbFdNTTy. +Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873-12883, 2021. +Zhiyi Fu, Wangchunshu Zhou, Jingjing Xu, Hao Zhou, and Lei Li. Contextual representation learning beyond masked language modeling. In ACL (1), pp. 2701-2714. Association for Computational Linguistics, 2022. +Danilo Giampiccolo, Bernardo Magnini, Ido Dagan, and Bill Dolan. The third PASCAL recognizing textual entailment challenge. In Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 1-9, Prague, 2007. Association for Computational Linguistics. URL https://aclanthology.org/W07-1401. +Ross B. Girshick. Fast R-CNN. In 2015 IEEE International Conference on Computer Vision, ICCV 2015, Santiago, Chile, December 7-13, 2015, pp. 1440-1448. IEEE Computer Society, 2015. doi: 10.1109/ICCV.2015.169. URL https://doi.org/10.1109/ICCV.2015.169. +Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: Training imagenet in 1 hour. ArXiv preprint, abs/1706.02677, 2017a. URL https://arxiv.org/abs/1706.02677. +Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the V in VQA matter: Elevating the role of image understanding in visual question answering. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 6325-6334. IEEE Computer Society, 2017b. doi: 10.1109/CVPR.2017.670. URL https://doi.org/10.1109/CVPR.2017.670. +R Bar Haim, Ido Dagan, Bill Dolan, Lisa Ferro, Danilo Giampiccolo, Bernardo Magnini, and Idan Szpektor. The second pascal recognising textual entailment challenge. In Proceedings of the Second PASCAL Challenges Workshop on Recognising Textual Entailment, volume 7, 2006. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 770-778. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.90. URL https://doi.org/10.1109/CVPR.2016.90. +Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross B. Girshick. Momentum contrast for unsupervised visual representation learning. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 9726-9735. IEEE, 2020. doi: 10.1109/CVPR42600.2020.00975. URL https://doi.org/10.1109/CVPR42600.2020.00975. +Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022. +Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6626-6637, 2017. URL https://proceedings.neurips.cc/paper/2017/ hash/8ald694707eb0fefe65871369074926d-Abstract.html. +Kashmir Hill and Jeremy White. Designed to deceive: Do these people look real to you. The New York Times, 11, 2020. + +Yupan Huang, Hongwei Xue, Bei Liu, and Yutong Lu. Unifying multimodal transformer for bidirectional image and text generation. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 1138-1147, 2021. +Zhicheng Huang, Zhaoyang Zeng, Bei Liu, Dongmei Fu, and Jianlong Fu. Pixel-BERT: Aligning image pixels with text by deep multi-modal transformers. arXiv preprint, 2020. +Taichi Iki and Akiko Aizawa. Effect of visual extensions on natural language understanding in vision-and-language models. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2189-2196, 2021. +Shankar Iyer, Nikhil Dandekar, Kornél Csernai, et al. First quora dataset release: Question pairs. data.quora.com, 2017. +Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 4904-4916. PMLR, 2021. URL http://proceedings.mlr.press/v139/jia21b.html. +Wonjae Kim, Bokyung Son, and Ildoo Kim. Vilt: Vision-and-language transformer without convolution or region supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 5583-5594. PMLR, 2021. URL http://proceedings.mlr.press/v139/kim21k.html. +Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pp. 554-561, 2013. +Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. +Yann LeCun and Corinna Cortes. MNIST handwritten digit database. 2010. +Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871-7880, Online, 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.703. URL https://aclanthology.org/2020.acl-main.703. +Junnan Li, Ramprasaath R Selvaraju, Akhilesh Deepak Gotmare, Shafiq Joty, Caiming Xiong, and Steven Hoi. Align before fuse: Vision and language representation learning with momentum distillation. In Conference on Neural Information Processing Systems (NeurIPS), 2021. +Lianian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. VisualBERT: A simple and performant baseline for vision and language. arXiv preprint, 2019. +Lianian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10965-10975, 2022a. +Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In European Conference on Computer Vision (ECCV), 2020. +Yehao Li, Jiahao Fan, Yingwei Pan, Ting Yao, Weiyao Lin, and Tao Mei. Uni-eden: Universal encoder-decoder network by multi-granular vision-language pre-training. ArXiv preprint, abs/2201.04026, 2022b. URL https://arxiv.org/abs/2201.04026. + +Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. RoBERTa: A robustly optimized bert pretraining approach. arXiv preprint, 2019. +Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7. +Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visi-olinguistic representations for vision-and-language tasks. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 13-23, 2019. URL https://proceedings.neurips.cc/paper/2019/bit/74d97b01eae257e44aa9d5bade97baf-Abstract.html. +Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013. +Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory F. Diamos, Erich Elsen, David García, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, and Hao Wu. Mixed precision training. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=r1gs9JgRZ. +Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729. IEEE, 2008. +Rui Pan, Shizhe Diao, Jianlin Chen, and Tong Zhang. Extremebert: A toolkit for accelerating pretraining of customized bert. arXiv preprint arXiv:2211.17201, 2022. +Omkar M. Parkhi, Andrea Vedaldi, Andrew Zisserman, and C. V. Jawahar. Cats and dogs. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, Providence, RI, USA, June 16-21, 2012, pp. 3498-3505. IEEE Computer Society, 2012. doi: 10.1109/CVPR.2012.6248092. URL https://doi.org/10.1109/CVPR.2012.6248092. +Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 2227-2237, New Orleans, Louisiana, 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1202. URL https://aclanthology.org/N18-1202. +Jason Phang, Thibault Févry, and Samuel R. Bowman. Sentence encoders on stilts: Supplementary training on intermediate labeled-data tasks. ArXiv, abs/1811.01088, 2018. +Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. 2018. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8748-8763. PMLR, 2021. URL http://proceedings.mlr.press/v139/radford21a.html. +Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021. + +Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research (JMLR), 2020. +Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 2383-2392, Austin, Texas, 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://aclanthology.org/D16-1264. +Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8821-8831. PMLR, 2021. URL http://proceedings.mlr.press/v139/ramesh21a.html. +Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. +Steven J. Rennie, Etienne Marcheret, Youssef Mroueh, Jerret Ross, and Vaibhava Goel. Self-critical sequence training for image captioning. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 1179-1195. IEEE Computer Society, 2017. doi: 10.1109/CVPR.2017.131. URL https://doi.org/10.1109/CVPR.2017.131. +Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015. +Andrei A Rusu, Neil C Rabinowitz, Guillaume Desjardins, Hubert Soyer, James Kirkpatrick, Koray Kavukcuoglu, Razvan Pascanu, and Raia Hadsell. Progressive neural networks. arXiv preprint arXiv:1606.04671, 2016. +Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022. +Tim Salimans, Ian J. Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. In Daniel D. Lee, Masashi Sugiyama, Ulrike von Luxburg, Isabelle Guyon, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pp. 2226-2234, 2016. URL https://proceedings.neurips.cc/paper/2016/black/8a3363abe792db2d8761d6403605aab7-Abtract.html. +Roy Schwartz, Jesse Dodge, Noah A Smith, and Oren Etzioni. Green ai. Communications of the ACM, 63(12):54-63, 2020. +Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. ArXiv preprint, abs/2112.04482, 2021. URL https://arxiv.org/abs/2112.04482. +Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pp. 1631-1642, Seattle, Washington, USA, 2013. Association for Computational Linguistics. URL https://aclanthology.org/D13-1170. +Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. VL-BERT: pretraining of generic visual-linguistic representations. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020. URL https://openreview.net/forum?id=SygXPaEYvH. + +Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6418-6428, Florence, Italy, 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1644. URL https://aclanthology.org/P19-1644. +Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 2818-2826. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.308. URL https://doi.org/10.1109/CVPR.2016.308. +Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5100-5111, Hong Kong, China, 2019a. Association for Computational Linguistics. doi: 10.18653/v1/D19-1514. URL https://aclanthology.org/D19-1514. +Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5100-5111, Hong Kong, China, 2019b. Association for Computational Linguistics. doi: 10.18653/v1/D19-1514. URL https://aclanthology.org/D19-1514. +Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10347-10357. PMLR, 2021. URL http://proceedings.mlr.press/v139/touvron21a.html. +Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6306-6315, 2017. URL https://proceedings.neurips.cc/paper/2017/bitical/7a98af17e63a0ac09ce2e96d03992fbc-AAbstract.html. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 5998-6008, 2017. URL https://proceedings.neurips.cc/paper/2017/bit/3f5ee243547dee91fbd053c1c4a845aa-Abstract.html. +Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=rJ4km2R5t7. +Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. ArXiv preprint, abs/2202.03052, 2022. URL https://arxiv.org/abs/2202.03052. +Wenhui Wang, Hangbo Bao, Li Dong, and Furu Wei. Vlmo: Unified vision-language pre-training with mixture-of-modality-experts. ArXiv preprint, abs/2111.02358, 2021a. URL https://arxiv.org/abs/2111.02358. +Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. Simvlm: Simple visual language model pretraining with weak supervision. arXiv preprint, 2021b. + +Alex Warstadt, Amanpreet Singh, and Samuel R. Bowman. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641, 2019. doi: 10.1162/tacl_a_00290. URL https://aclanthology.org/Q19-1040. +Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models. Transactions on Machine Learning Research, 2022. URL https://openreview.net/forum?id=yzkSU5zdwD. Survey Certification. +Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 1112–1122, New Orleans, Louisiana, 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1101. URL https://aclanthology.org/N18-1101. +Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google's neural machine translation system: Bridging the gap between human and machine translation. ArXiv preprint, abs/1609.08144, 2016. URL https://arxiv.org/abs/1609.08144. +Ning Xie, Farley Lai, Derek Doran, and Asim Kadav. Visual entailment: A novel task for fine-grained image understanding. arXiv preprint, 2019. +Canwen Xu, Wangchunshu Zhou, Tao Ge, Furu Wei, and Ming Zhou. BERT-of-theseus: Compressing BERT by progressive module replacing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 7859-7869, Online, 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.633. URL https://aclanthology.org/2020.emnlp-main.633. +Haiyang Xu, Ming Yan, Chenliang Li, Bin Bi, Songfang Huang, Wenming Xiao, and Fei Huang. E2E-VLP: End-to-end vision-language pre-training enhanced by visual learning. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 503-513, Online, 2021a. Association for Computational Linguistics. doi: 10.18653/v1/2021.acl-long.42. URL https://aclanthology.org/2021.acl-long.42. +Jingjing Xu, Wangchunshu Zhou, Zhiyi Fu, Hao Zhou, and Lei Li. A survey on green deep learning. ArXiv preprint, abs/2111.05193, 2021b. URL https://arxiv.org/abs/2111.05193. +Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attngan: Fine-grained text to image generation with attentional generative adversarial networks. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pp. 1316-1324. IEEE Computer Society, 2018. doi: 10.1109/CVPR.2018.00143. URL http://openaccess.thecvf.com/content_cvpr_2018/html/Xu_AttnGAN_Fine-Grained_Text_CVPR_2018_paper.html. +Zhengyuan Yang, Zhe Gan, Jianfeng Wang, Xiaowei Hu, Faisal Ahmed, Zicheng Liu, Yumao Lu, and Lijuan Wang. Crossing the format boundary of text and boxes: Towards unified vision-language modeling. ArXiv, abs/2111.12085, 2021. +Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. ArXiv preprint, abs/1708.03888, 2017. URL https://arxiv.org/abs/1708.03888. +Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022. +Ning Yu, Vladislav Skripniuk, Sahar Abdelnabi, and Mario Fritz. Artificial fingerprinting for generative models: Rooting deepfake attribution in training data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 14448-14457, 2021. + +Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, Ce Liu, Mengchen Liu, Zicheng Liu, Yumao Lu, Yu Shi, Lijuan Wang, Jianfeng Wang, Bin Xiao, Zhen Xiao, Jianwei Yang, Michael Zeng, Luowei Zhou, and Pengchuan Zhang. Florence: A new foundation model for computer vision. arXiv preprint, 2021. +Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. ArXiv preprint, abs/2111.08276, 2021. URL https://arxiv.org/abs/2111.08276. +Han Zhang, Weichong Yin, Yewei Fang, Lanxin Li, Boqiang Duan, Zhihua Wu, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. Ernie-vilg: Unified generative pre-training for bidirectional vision-language generation. ArXiv preprint, abs/2112.15283, 2021a. URL https://arxiv.org/abs/2112.15283. +Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. VinVL: Revisiting visual representations in vision-language models. In Conference on Computer Vision and Pattern Recognition (CVPR), 2021b. +Wangchunshu Zhou, Canwen Xu, Tao Ge, Julian J. McAuley, Ke Xu, and Furu Wei. BERT loses patience: Fast and robust inference with early exit. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/black/d4dd111a4fd973394238aca5c05bebe3-AAbstract.html. +Wangchunshu Zhou, Tao Ge, Canwen Xu, Ke Xu, and Furu Wei. Improving sequence-to-sequence pretraining via sequence span rewriting. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 571-582, Online and Punta Cana, Dominican Republic, 2021a. Association for Computational Linguistics. doi: 10.18653/v1/2021.emnlp-main.45. URL https://aclanthology.org/2021.emnlp-main.45. +Wangchunshu Zhou, Dong-Ho Lee, Ravi Kiran Selvam, Seyeon Lee, and Xiang Ren. Pre-training text-to-text transformers for concept-centric common sense. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021b. URL https://openreview.net/forum?id=3k20LAiHYL2. +Wangchunshu Zhou, Canwen Xu, and Julian McAuley. BERT learns to teach: Knowledge distillation with meta learning. In ACL (1), pp. 7037-7049. Association for Computational Linguistics, 2022a. +Wangchunshu Zhou, Yan Zeng, Shizhe Diao, and Xinsong Zhang. Vlue: A multi-task benchmark for evaluating vision-language models. CoRR, abs/2205.15237, 2022b. +Xiao Zhou, Weizhong Zhang, Zonghao Chen, Shizhe Dao, and Tong Zhang. Efficient neural network training via forward and backward propagation sparsification. Advances in Neural Information Processing Systems, 34, 2021c. +Xiao Zhou, Weizhong Zhang, Hang Xu, and Tong Zhang. Effective sparsification of neural networks with global sparsity constraint. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3599-3608, 2021d. +Xiao Zhou, Renjie Pi, Weizhong Zhang, Yong Lin, and Tong Zhang. Probabilistic bilevel coreset selection. In International Conference on Machine Learning. PMLR, 2022c. +Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. DM-GAN: dynamic memory generative adversarial networks for text-to-image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 5802-5810. Computer Vision Foundation / IEEE, 2019. doi: 10.1109/CVPR.2019.00595. URL http://openaccess.thecvf.com/content_CVPR_2019/html/Zhu_DM-GAN_Dynamic_Memory_Generative_Adversarial_Networks_for_Text-To-Image_Synthesis_CVPR_2019_paper.html. + +# A APPENDIX + +# A.1 DETAILS OF HYPER-PARAMETERS + +Pre-training Our model is a base-size Transformer implemented with a 6-layer encoder and a 6-layer decoder, 768 dimensions for hidden states, 512 for maximum input length, and 3072 for intermediate size. We train our model from scratch without initializing the Transformer encoder and decoder. The image encoder is initialized from ResNet-101 (He et al., 2016) with ImageNet weights since we find a warm start provides a reliable visual representation and helps the convergence. For models pre-training on large-scale data, we optimize 10 epochs while for other small-scale datasets, we optimize 40 epochs with the AdamW optimizer. The weight decay is set to 0.01 with $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ . The learning rate is 2e-4 with a warm-up period for the first $2\%$ steps and linearly decayed to 0 after $2\%$ of the total training steps. In each batch, there are 8,192 image-text pairs for text-to-image generation and image-to-text generation with 8,192 text-only documents for text-to-text generation. We use center-crop to resize each image to the size of $256\times 256$ , which is the only data augmentation used during training. All pre-training experiments are conducted on 32GB NVIDIA V100 GPUs. We adopt mixed-precision (Micikevicius et al., 2018) to accelerate training and save memory. The model trained on the largest data takes around 10 days on 1024 V100 GPUs. The default settings are shown in Table 6. We adopt dynamic masking in our experiments, where the masking ratio is randomly sampled from a uniform distribution $\mathrm{U}(0,1)$ . + +Fine-tuning The learning rate is $\in [1\mathrm{e} - 5,5\mathrm{e} - 5]$ and our model is optimized by AdamW. Because the image resolution differs between pre-training and fine-tuning, the position parameters are adapted using linear interpolation. For all downstream tasks, we apply random resize crops and horizontal flips augmentation during training. All fine-tuning experiments are conducted on 32GB NVIDIA V100 GPUs. The default settings for text classification, image classification, multi-modal understanding and image-to-text generation are shown in Tables 7, 8, and 9, respectively. + +
configvalue
optimizerAdamW (Loshchilov & Hutter, 2019)
learning rate2e-4
weight decay0.01
optimizer momentumβ1, β2=0.9, 0.999
batch size8192
learning rate schedulelinear decay
warmup ratio (Goyal et al., 2017a)0.02
training epochs{10, 40}
augmentationRandomResizedCrop
+ +Table 6: Pre-training setting. + +
configvalue
optimizerAdamW
learning rate{1e-5, 2e-5, 5e-5}
weight decay0.01
optimizer momentumβ1, β2=0.9, 0.999
batch size{16, 32, 64}
learning rate schedulelinear decay
warmup ratio0.1
training epochs{5, 10}
+ +Table 7: Text classification: GLUE setting. + +# A.2 DETAILS OF DOWNSTREAM TASKS + +Language Understanding We conduct experiments on GLUE benchmark including MNLI (Williams et al., 2018), CoLA (Warstadt et al., 2019), MRPC (Dolan & Brockett, 2005), QQP (Iyer et al., 2017), SST-2 (Socher et al., 2013), QNLI (Rajpurkar et al., 2016), + +
configvalue
optimizerLARS (You et al., 2017)
base learning rate0.1
weight decay0
optimizer momentum0.9
batch size16384
learning rate schedulecosine decay
warmup epochs10
training epochs90
augmentationRandomResizedCrop
+ +Table 8: Image classification: Linear probing setting. + +
configvalue
optimizerAdamW
learning rate[1e-5, 5e-5]
weight decay0.02
optimizer momentumβ1, β2=0.9, 0.999
batch size1024
learning rate schedulelinear decay
warmup epochs[2, 5]
training epochs[5, 15]
label smoothing (Szegedy et al., 2016)0.1
augmentationRandomResizedCrop, HorizontalFlips
+ +Table 9: Multi-modal understanding and image-to-text generation: fine-tuning setting. + +RTE (Dagan et al., 2005; Haim et al., 2006; Giampiccolo et al., 2007; Bentivogli et al., 2009), and STS-B (Agirre et al., 2007). We follow the practice of BART (Lewis et al., 2020) and feed the same input to the encoder and decoder, and the hidden state of the final decoder token is fed into a new multi-class linear classifier or regression head. MNLI results are an average of MNLI-m and MNLI-mm. MRPC and QQP results are average of accuracy and F1. Matthews correlation coefficient (MCC) is reported for CoLA and Pearson correlation coefficient (PCC) is reported for STS-B. + +Vision Understanding We conduct vision experiments in both fine-tuning and linear evaluation (linear eval). The linear evaluation follows a common practice (Caron et al., 2021; He et al., 2020; Singh et al., 2021) in self-supervised learning to evaluate the representation quality, where the pre-trained backbone model is frozen and a new linear classifier is appended on top of it. We choose 12 popular datasets: ImageNet (Russakovsky et al., 2015), Food101 (Bossard et al., 2014), CIFAR10 (Krizhevsky et al., 2009), CIFAR100 (Krizhevsky et al., 2009), Cars (Krause et al., 2013), Aircraft (Maji et al., 2013), DTD (Cimpoi et al., 2014), Pets (Parkhi et al., 2012), Flowers102 (Nilsback & Zisserman, 2008), MNIST (LeCun & Cortes, 2010), STL10 (Coates et al., 2011), and Country211 (Radford et al., 2021). + +Multi-modal Understanding We consider three popular multi-modal tasks: VQAv2 (Goyal et al., 2017b), SNLI-VE (Xie et al., 2019) and NLVR2 (Suhr et al., 2019) to evaluate our model's multi-modal understanding ability. For VQAv2, following ALBEF (Li et al., 2021), the image and question are fed to the encoder and the decoder generates answers based on the multi-modal embeddings. For SNLI-VE, we follow SimVLM (Wang et al., 2021b) to feed the image to the encoder and the text to the decoder. A classifier is appended on top of our pre-trained model, and it is trained to predict the result based on the last hidden states of the decoder. For NLVR2, two input pairs are constructed, each of them including one image and the textual description. The prediction is made based on the concatenation of these two embeddings following SimVLM (Wang et al., 2021b). The resolutions for VQAv2, SNLI-VE, NLVR2 are 480, 384, 384, respectively. + +Text-to-Image Generation The text-to-image task requires the model to understand the textual instruction first and then draw the image according to the input's intention. The input text is fed to our encoder, and our decoder will generate visual tokens one by one. After obtaining visual tokens, they are decoded into a raw image by an image decoder. We directly use an off-the-shelf image decoder from VQGAN (Esser et al., 2021). Following (Ramesh et al., 2021) we directly evaluate our + +
Data TypeDatasetImage Domain#Images#Captions#Total
In-Domain Data (ID)COCOCOCO110.3K551.7K1.3M
Visual GenomeCOCO108.2K759.0K
Small-scale Web Data (SWD)SBUWeb859.7K859.7K14.9M
CC-3MWeb2.9M2.9M
CC-12MWeb11.1M11.1M
Object-Region Data (ORD)VG regionsCOCO108.2K3.6M17.0M
VG objectsCOCO108.2K925.6K
COCO objectsCOCO110.3K736.6K
RefcocoCOCO27.9K589.9K
Open ImageFlickr1.7M7.5M
Obj365Flickr577.6K3.6M
Vision Data (VD)ImageNet-21KImageNet13.2M13.2M13.2M
Large-scale Web Data (LWD)DAVINCI-200MWeb205.6M205.6M601.3M
LAION-400MWeb395.7M395.7M
Text Data (TD)C4Web--800GB
+ +Table 10: Statistics of the pre-training datasets. #Images, #Captions, and #Total denote the number of images, the number of image-text pairs, and the total number of image-text pairs, respectively. + +pre-trained model on 30,000 images randomly sampled from COCO (Chen et al., 2015) validation split. Both Fréchet Inception Distance (FID) (Heusel et al., 2017) and Inception Score (IS) (Salimans et al., 2016) are reported. The image resolution is 256. + +Image-to-Text Generation For image-to-text generation (also called image captioning), the image is given to encoder and the decoder will generate the corresponding caption. Our experiments are conducted on COCO dataset (Chen et al., 2015) with cross-entropy optimization. Other task-specific techniques such as CIDEr optimization (Rennie et al., 2017) are not introduced. The image resolution is 480. We also conduct zero-shot captioning experiments on NoCaps (Agrawal et al., 2019) and VLINE (Zhou et al., 2022b). + +# A.3 PRE-TRAINING DATASETS + +Since existing studies pre-trained their models on different corpora, some of which are publicly available (e.g., CC-3M, CC-12M) while some are in-house datasets (e.g., ALIGN (Jia et al., 2021)), making the fair comparison difficult. Considering results only on the state-of-the-art performance would underestimate the potential of this line of research. Therefore, we propose several practical settings, including small-scale and large-scale, and then conduct detailed comparisons on them in section 5.1. + +We collect a large set of datasets with diverse distributions for pre-training. According to its source, we divide them into in-domain, small-scale web data, object-region data, vision data, and large-scale web data. The statistics and details are shown in Table 10. Most of them are naturally image-text pairs, while to enrich our corpus, we leverage object descriptions, region descriptions, and vision data (i.e., ImageNet). For objects and regions, we crop them from the original image according to their bounding box. The text part is composed according to a human-written template and objects. For example, the prompt template is "This image contains [OBJ_A] and [OBJ_B]", where [OBJ_A] and [OBJ_B] are two object names from the data. For vision data, because they are usually labeled with a single word or short phrase, we compose a description with prompt templates such as "A picture of [LABEL]" or "The image contains [LABEL]". For example, "A picture of cat" or "The image contains cat". We curated a dataset containing about 205.6M image-text pairs, which are available publicly on the internet. The data distribution is similar to LAION-400M. Because both are from web images, we merge them into large-scale web data (LWD). + +# A.4 REPRODUCTION OF SIMVLM + +Since SimVLM is not open-sourced, we need to reproduce it by ourselves. There are two main difficulties in the reproduction: 1. it uses 1.8 billion in-house data 2. the configurations (e.g., parameter size, number of layers) of its base model are not clearly stated. However, there are still some clues in Section 4.4 of the SimVLM paper, where they propose a SimVLMsmall model with 8 + +layers, 512 embedding dimensions, and trained on about 200M web data. To demonstrate the success of our replication, we train a $\mathrm{SimVLM}_{small}$ model with the exact same configurations on about 200M web data. We obtain a VQA score of 68.50, surpassing the reported score of 67.43 in the original paper. We argue this result verifies our successful replication. + +# A.5 EFFECTS OF COMPUTE + +Our model is trained with large compute. To reveal the effects of compute, we visualize the performance improvement trends of SimVLM and DAVINCI as a function of the compute spent. There are two goals: 1) to compare better with prior work, as well as to 2) to show if that level of pre-training compute was necessary. We conduct experiments on the image-to-text generation task under both zero-shot and fine-tuning settings. The results are shown in Figure 2. It is observed that with the increase in compute, both models are improved significantly and converged at $40\%$ of compute (zero-shot), and $80\%$ of compute (fine-tuning), respectively. Large compute is especially helpful for fine-tuning settings. After convergence, our model outperforms SimVLM consistently in these two settings. + +![](images/dc770b867dbcb78299001f30d829f53084c2026e823f49908e9faeae97544615.jpg) +(a) COCO Captioning (Zero-shot) + +![](images/51ca548619b1916413ba2a86c8edd6a847002c4995aac6c96645992ad6236625.jpg) +(b) COCO Captioning (Fine-tuning) +Figure 2: The effects of compute. X-axis is the percentage of compute and Y-axis is the CIDer score on COCO captioning task. + +# A.6 EFFECTS OF MASKING STRATEGIES + +In our experiments, we adopt dynamic masking, where the masking ratio is sampled from a uniform distribution $\mathrm{U}(0,1)$ . The prefix ratio could be 0, where the prefix image is none, and the model is forced to predict the whole image with the input caption. There are other designs to mask images. Here we compared three different masking strategies: 1) masked image modeling (randomly masking some patches), 2) in-painting (randomly masking some continuous spans in the middle of the image), and 3) suffix-painting (ours). The results are shown in Table 11. Both masked image modeling and in-painting are effective and competitive. It is observed that suffix-painting is better than masked image modeling and in-painting across all tasks, demonstrating that suffix-painting works well. + +
MethodCOCOB@4 / CVQA AccSNLI-VE AccNLVR2 AccImageNet AccFood101 AccCIFAR10 AccMNLI AccSST-2 AccText2Image IS / FID
No Pre-training32.1 / 96.7152.7354.2351.08-*-*-*66.3279.84-*
MIM34.7 / 113.468.1875.3469.6648.4656.9572.7981.7289.849.50 / 74.13
In-painting34.5 / 112.567.4675.4168.6647.5054.3871.2081.5589.849.97 / 68.15
Suffix-painting (ours)35.8 / 117.369.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
Token Projection17.7 / 49.252.1371.1152.0115.1125.6261.0182.0190.2511.89 / 60.96
Patch Projection25.7 / 79.557.6971.9257.4536.2344.3169.4081.7390.0511.41 / 61.87
ResNet Feature (ours)35.8 / 117.369.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
+ +Table 11: The effects of masking strategies and image feature extraction on COCO Captions, VQA, SNLI-VE, NLVR2, ImageNet, Food101, CIFAR10, MNLI, SST-2, and text-to-image generation. MIM denotes masked image modeling, where some patches are randomly sampled and masked. Because linear probe and zero-shot text-to-image generation require a pre-trained model to be frozen, the "No Pre-training" results on ImageNet, Food101, CIFAR10, and Text2Image are not reported and labeled by * . + +# A.7 EFFECTS OF IMAGE FEATURE EXTRACTION + +There are several different ways to extract image features. We compare three different image representation methods: 1) token projection (projecting the prefix tokens to the hidden dimension of the backbone network on the token-level), 2) patch projection (similar to ViT embedding, we split an image into fixed-size patches, embed each of them by a trainable linear projection on the pixel-level), and 3) ResNet feature extraction (ours). The comparison is shown in Table 11. From the results, we observed that ResNet feature extraction outperforms token projection and patch projection by a large margin. Therefore, we decided to adopt ResNet to extract image features. + +# A.8 SCALING EFFECTS OF DATA SIZE + +In this section, we explore the scaling effects of our model. We plot the trends with the increase in data size on four tasks: COCO captioning, VQA, SNLI-VE, and NLVR2. The performance improvement shown in Figure 3 demonstrates that both SimVLM and DAVinci are scaling well with pre-training data size. In addition, DAVinci consistently outperforms SimVLM on different data sizes across these tasks. + +![](images/52037f741d36ecb6d936b7cace17e5d3bfda934c12d8d6bbcc289cc7e6d0baff.jpg) +Figure 3: The scaling effects of data size. + +![](images/3c3a2f391ef45f0474996b6719113037e2819f577a5d0a2c99dd72e172b1387b.jpg) + +![](images/945d7d081aa0c3c4fd9057adad00d6148ccc678d76f13c6ae40b1520b0157675.jpg) + +![](images/499cc8d70d0c99af2d35676d358599f2d8f0bb4f50ed884695c5262339433f48.jpg) + +# A.9 FULL COMPARISON WITH EXISTING METHODS + +In Table 12, we display a comprehensive comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. + +# A.10 LIMITATION AND SOCIETAL IMPACTS + +Limitation. Like most of the previous pre-training studies, the entire project consumed 40 V100 GPU years on an in-house computing cluster with large electricity costs. We tried to keep our model size small enough, but there is still potential for efficiency improvements such as sparse training (Zhou et al., 2021d;c), dataset distillation (Zhou et al., 2022c), and progressive training (Rusu et al., 2016). We will explore those techniques to improve the training efficiency and reduce the carbon footprint so that it can adhere to proposals on "green" deep learning (Schwartz et al., 2020; Xu et al., 2021b). Furthermore, although we have tried our best to include as many tasks as we can to demonstrate the versatility of DAVinci, we believe our method can be expanded to more tasks (e.g., machine translation, summarization, object detection, etc.), modalities (e.g., video and speech). We leave these investigations to future work. + +Potential Societal Impacts. Our model has image generation ability with risk of abuse, like fake portraits on social media (Hill & White, 2020), which is a common potential risk in image generation research. Viable solutions are watermarking (Yu et al., 2021) and introducing a strict user license. + +# A.11 VISUALIZATION OF IMAGE GENERATION + +In this section, we conduct a qualitative analysis by visualizing the generation samples. Figure 4 shows the comparison with DALLE and OFA with the same query. More generated samples are shown in Figures 5. + +
Model#Params.Text MNLI AccVision ImageNet LE / FTImage2Text COCO B@4 / CText2Image COCO IS† / FID↓Multi-modal VQA test-dev / test-standardNLVR2 dev / test-P
Encoder-only Multi-modal Models
VisualBERT (Li et al., 2019)170M81.60---70.80 / 71.0067.40 / 67.00
ViLBERT (Lu et al., 2019)274M79.90---70.55 / 70.92-
VL-BERT (Su et al., 2020)170M81.20---71.16 / --
LXMERT (Tan & Bansal, 2019a)240M80.40---72.42 / 72.5474.90 / 74.50
OSCAR (Li et al., 2020)155M--36.5 / 123.7-73.16 / 73.4478.07 / 78.36
VinVL (Zhang et al., 2021b)157M--38.2 / 129.3-75.95 / 76.1282.05 / 83.08
ViLT (Kim et al., 2021)88M----70.85 / -74.91 / 75.57
ALBEF (Li et al., 2021)210M----75.84 / 76.0482.55 / 83.14
X-VLM (Zeng et al., 2021)240M--39.6 / 132.6-78.22 / 78.3784.41 / 84.76
VLMO (Wang et al., 2021a)----76.64 / 76.8982.77 / 83.34
Encoder-Decoder Multi-modal Models
UNICORN (Yang et al., 2021)--35.8 / 119.1-69.20 / 69.40-/-
Uni-ENDN (Li et al., 2022b)110M----72.20 / 72.50-/-
Pixel-BERT (Huang et al., 2020)144M----74.45 / 74.5576.50 / 77.20
E2E-VLP (Xu et al., 2021a)94M--36.2 / 117.3-73.25 / 73.6777.25 / 77.96
VL-T5 (Cho et al., 2021)220M--34.5 / 116.5-- / 70.3074.60 / 73.60
VL-BART (Cho et al., 2021)220M--35.1 / 116.6-- / 71.3071.70 / 70.30
Text2Image Models
AttnGAN (Xu et al., 2018)---23.30 / 35.20-/--/-
DM-GAN (Zhu et al., 2019)---32.20 / 26.50-/--/-
DALLE (Ramesh et al., 2021) (250M)12B---17.90 / 27.50-/--/-
DALLE (Ramesh et al., 2021) (640M)†82M---15.79 / 29.22-/--/-
CogView (Ding et al., 2021)4B---18.20 / 27.10-/--/-
Unified Models
Unifying (Huang et al., 2021)228M--37.3 / 122.6- / 29.90-/--/-
FLAVA (Singh et al., 2021)240M80.3375.54 / ---72.80 / 72.49-/-
SimVLM (Wang et al., 2021b) (640M)†153M83.2776.04 / -38.5 / 128.7-75.04 / 75.0378.82 / 79.72
SimVLM (Wang et al., 2021b) (1.8B)83.4080.60 / -39.0 / 134.8-77.87 / 78.1481.72 / 81.77
OFA (Wang et al., 2022)182M84.30- / 82.2041.0 / 138.221.50* / 20.80*78.00 / 78.10-/-
Florence (Yuan et al., 2021)637M-- / 90.05-/--/-80.16 / 80.36-/-
DAVINCI154M83.1378.81 / 83.9239.2 / 130.417.44 (22.41*) / 24.21 (19.82*)76.32 / 76.4480.03 / 80.25
+ +Table 12: Comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. All results are from base-size models. LE and FT denote linear evaluation and fine-tuning performance, respectively. Image2Text results are reported without CIDEr optimization. $\dagger$ are our reproduced models. \* are the results after fine-tuning. SimVLM (1.8B) and OFA are pre-trained with much larger corpus or human-labeled data of many downstream tasks, and thus they are not comparable and are labeled in gray. Florence (Yuan et al., 2021) is pre-trained with a much larger model size (Florence-CoSwin-H, 637M) and more pre-training data (900M), so the numbers are in grey. bold denotes the best across unified models. + +![](images/722f92ac76feb7850a7f7cba9696402edf4c6157bdce7d961f63b21c15fe066b.jpg) +Figure 4: Comparison with DALLE and OFA on text-to-image generation. + +![](images/297fefe037e02ba8e64d9f257f03f3f9c71cac5255960a9c1c2a9c3cc25b9063.jpg) +a decorative flower vase full of purple and yellow flowers + +![](images/6b482fa8a2b64ecd1c991ca74f4335f170d99447e4e635433fa2927be9d85238.jpg) +a vase full of flowers on table + +![](images/fe233a6f67b7a479910d1f5586a6a4079b19b40db5df8b63a59bdc13b5cd5202.jpg) +a park with flowers on a sunny day + +![](images/2f5d3c55afb3ca03a07dbd6276c169f9f965004b7744b329c7552ddbb6854b64.jpg) +a fire hydrant sitting in a front yard next to a sign + +![](images/e3a65ee953198489cd255b1f3e1e2a38f12dc1148d8dd1ae61a3511c904178b6.jpg) +a beach on a sunny day + +![](images/f1b488802f01325f2123d87ee44e8a04fb9be03ceb0ba1552a196b59e290029d.jpg) +a one cart train coming down the railroad tracks + +![](images/f5514e687429ef98d8753069502dabe96b605320ffb28e016e6d69082f9ebe98.jpg) +a red and white boat docked on shore + +![](images/0dc9869347acdff3619a3d1e2aacb15ac1e86cf988aca1bc788bc769b85b6029.jpg) +a picture of a snowy mountain + +![](images/91f1ce8149bb0761730469a97caae51b4e71adcacec38dcab405aa552d71ccac.jpg) +a red stop sign on the side of the road + +![](images/f007039c1076ee7599382161422d5509e2fd74404cb6355c368d627a2288aa04.jpg) +a building in front of a roundabout with a tree in the center. + +![](images/88379748a026c2125e6eff7d0bd0b38b68b57639f9a4cd85ec6b14c9758b6abb.jpg) +bathroom with marble walls and counter surrounds a large mirror + +![](images/6fc5ce42118cbaedf06eea0fc1239326338a65fe4d7f2a1d3d22da077667bbd9.jpg) +trees by the river in the mountains + +![](images/63ba22f4a8d7f21f93f0e844fd242ca8207716ecef1d9d5cd5cff058c0d9efa2.jpg) +many fruits on the plate on the table + +![](images/7f297c673584b1a5e2efb6833d3c040379360af2a0bb422a1ae9b5f93ecdf21a.jpg) +a bunch of fruit in a fruit shop + +![](images/a09f965728db9504abf035083d10e0bf5275815384c0c6dd271f2991ae5b4e8c.jpg) +a table set with a sandwich and a drink + +![](images/3a86a45d8e479c156cb76df3ab4306ff16e06d7a50b10407f6dff5ef6481103b.jpg) +noodles and broccoli on a plate +Figure 5: Generation samples by DAVINCI. \ No newline at end of file diff --git a/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/images.zip b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..e9005c671bb91e1681574400035863350d039bf9 --- /dev/null +++ b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04300872f62387fdad39eb4a5f75670f9fd4218ce9ac9d5a8e9e732ded9e42e9 +size 1211688 diff --git a/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/layout.json b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2a6226fe1d3d4173fe0569ed5de5d9b79b02bb19 --- /dev/null +++ b/2023/Write and Paint_ Generative Vision-Language Models are Unified Modal Learners/layout.json @@ -0,0 +1,13857 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "WRITE AND PAINT: GENERATIVE VISION-LANGUAGE MODELS ARE UNIFIED MODAL LEARNERS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 135, + 168, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 135, + 168, + 145 + ], + "spans": [ + { + "bbox": [ + 111, + 135, + 168, + 145 + ], + "type": "text", + "content": "Shizhe Diao*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 146, + 332, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 146, + 332, + 168 + ], + "spans": [ + { + "bbox": [ + 111, + 146, + 332, + 168 + ], + "type": "text", + "content": "The Hong Kong University of Science and Technology sdiaoaa@connect.ust.hk" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 343, + 135, + 432, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 135, + 432, + 146 + ], + "spans": [ + { + "bbox": [ + 343, + 135, + 432, + 146 + ], + "type": "text", + "content": "Wangchunshu Zhou" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 343, + 147, + 421, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 147, + 421, + 157 + ], + "spans": [ + { + "bbox": [ + 343, + 147, + 421, + 157 + ], + "type": "text", + "content": "ByteDance AI Lab" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 343, + 158, + 512, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 158, + 512, + 168 + ], + "spans": [ + { + "bbox": [ + 343, + 158, + 512, + 168 + ], + "type": "text", + "content": "wangchunshu.zhou@inf.ethz.ch" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 184, + 182, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 184, + 182, + 196 + ], + "spans": [ + { + "bbox": [ + 111, + 184, + 182, + 196 + ], + "type": "text", + "content": "Xinsong Zhang†" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 196, + 189, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 196, + 189, + 207 + ], + "spans": [ + { + "bbox": [ + 111, + 196, + 189, + 207 + ], + "type": "text", + "content": "ByteDance AI Lab" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 208, + 299, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 208, + 299, + 219 + ], + "spans": [ + { + "bbox": [ + 111, + 208, + 299, + 219 + ], + "type": "text", + "content": "zhangxinsong.0320@bytedance.com" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 341, + 185, + 399, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 185, + 399, + 196 + ], + "spans": [ + { + "bbox": [ + 341, + 185, + 399, + 196 + ], + "type": "text", + "content": "Jiawei Wang" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 341, + 196, + 466, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 196, + 466, + 208 + ], + "spans": [ + { + "bbox": [ + 341, + 196, + 466, + 208 + ], + "type": "text", + "content": "Shanghai Jiao Tong University" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 341, + 208, + 457, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 208, + 457, + 219 + ], + "spans": [ + { + "bbox": [ + 341, + 208, + 457, + 219 + ], + "type": "text", + "content": "wjw_sjt@sjtu.edu.cn" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 276, + 247, + 334, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 247, + 334, + 258 + ], + "spans": [ + { + "bbox": [ + 276, + 247, + 334, + 258 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 140, + 271, + 470, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 271, + 470, + 481 + ], + "spans": [ + { + "bbox": [ + 140, + 271, + 470, + 481 + ], + "type": "text", + "content": "Recent advances in vision-language pre-training have pushed the state-of-the-art on various vision-language tasks, making machines more capable of multi-modal writing (image-to-text generation) and painting (text-to-image generation). However, few studies investigate if these two essential capabilities can be learned together and boost each other, making a versatile and powerful multi-modal foundation model. In this work, we disclose the potential of symmetric generative vision-language pre-training in learning to write and paint concurrently, and propose a new unified modal model, named DAVINCI, trained with prefix language modeling and prefix image modeling, a simple generative self-supervised objective on image-text pairs. Thanks to the proposed prefix multi-modal modeling framework, DAVINCI is simple to train, scalable to huge data, adaptable to both writing and painting tasks, and also strong on other vision, text, and multi-modal understanding tasks. DAVINCI achieves competitive performance on a wide range of 27 generation/understanding tasks and demonstrates the superiority of combining vision/language generative pre-training. Furthermore, we carefully benchmark the performance of different vision-language pre-training objectives on different scales of pre-training datasets on a heterogeneous and broad distribution coverage. Our results demonstrate the potential of exploiting self-supervision in both language and vision inputs, and establish new, stronger baselines for future comparisons at different data scales." + }, + { + "bbox": [ + 140, + 271, + 470, + 481 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 498, + 206, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 498, + 206, + 510 + ], + "spans": [ + { + "bbox": [ + 106, + 498, + 206, + 510 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 517, + 506, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 694 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 694 + ], + "type": "text", + "content": "Self-supervised language model pre-training (Peters et al., 2018; Radford et al., 2018; Devlin et al., 2019; Liu et al., 2019; Lewis et al., 2020; Raffel et al., 2020; Brown et al., 2020; Fu et al., 2022; Zhou et al., 2021b; Diao et al., 2020; 2021; Zhou et al., 2021a; Xu et al., 2020; Zhou et al., 2020; 2022a; Pan et al., 2022; Diao et al., 2023) has reshaped the landscape of modern natural language processing (NLP) research, pushing the state-of-the-art of a wide range of NLP tasks. Recently, this success has been transferred to the multi-modal context and resulted in a number of vision-language pretrained models (VLMs) (Lu et al., 2019; Tan & Bansal, 2019a), achieving state-of-the-art results on various vision-language tasks. Most existing VLMs are BERT-like Transformer (Vaswani et al., 2017) encoders pre-trained with a combination of different vision-language pre-training (VLP) objectives: masked multi-modal modeling (Lu et al., 2019; Tan & Bansal, 2019b; Chen et al., 2020; Li et al., 2020), multi-modal alignment prediction (Lu et al., 2019; Tan & Bansal, 2019b; Chen et al., 2020; Li et al., 2020), region of interest feature regression (Tan & Bansal, 2019b), image-text matching (Li et al., 2021; Zeng et al., 2021), to name a few. However, the roadmap towards large language models reveals a transition pattern from encoder-only models like BERT (Devlin et al., 2019) / RoBERTa (Liu et al., 2019) to sequence-to-sequence models like T5 (Raffel et al., 2020) / BART (Lewis et al., 2020) and autoregressive models like GPT-3 (Brown et al., 2020) / PaLM (Chowdhery et al., 2022) to tackle" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 699, + 321, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 699, + 321, + 710 + ], + "spans": [ + { + "bbox": [ + 116, + 699, + 321, + 710 + ], + "type": "text", + "content": "*Work done during the internship at ByteDance AI Lab." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 118, + 710, + 203, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 203, + 720 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 203, + 720 + ], + "type": "text", + "content": "† Corresponding author" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 118, + 720, + 501, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 720, + 501, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 720, + 501, + 731 + ], + "type": "text", + "content": "1The code and pre-trained models are available at https://github.com/shizhediao/DaVinci." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "more tasks in a unified way, and from complicated objectives like masked language modeling / next sentence prediction / replace token detection to a simple language modeling objective to improve the scalability of pre-training. This suggests that the generative pre-training paradigm with simple targets shows great potential for pre-training more scalable and general VLMs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 129, + 506, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 129, + 506, + 261 + ], + "spans": [ + { + "bbox": [ + 104, + 129, + 506, + 261 + ], + "type": "text", + "content": "To this end, several recent studies (Cho et al., 2021; Zhang et al., 2021a; Wang et al., 2021b; 2022) investigated sequence-to-sequence (seq2seq) vision-language pre-training and achieved state-of-the-art results on a range of vision-language understanding and generation tasks. For example, VL-T5 (Cho et al., 2021), OFA (Wang et al., 2022) and PaLI (Chen et al., 2022) formulate various vision-and-language problems into seq2seq tasks and pre-train a seq2seq VLM by multi-tasking on these tasks. In addition, ERNIE-ViLG (Zhang et al., 2021a) and SimVLM (Wang et al., 2021b) pre-train seq2seq VLMs with a simple language modeling or prefix language modeling objective on a large number of image-caption pairs. While achieving promising results, these objectives are not versatile enough, resulting in VLMs that are only capable of a subset of tasks in image-text modalities. On the other hand, the recent success of generative language pre-training (Brown et al., 2020) and generative vision pre-training (He et al., 2022; Bao et al., 2021) motivates us to explore generative vision-language pre-training to learn more versatile and scalable vision-language models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 262, + 506, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 506, + 461 + ], + "type": "text", + "content": "In this work, we introduce prefix multi-modal modeling, a unified generative pre-training framework that extends prefix language modeling to the multi-modal context and learns a multi-modal foundation model by learning to write and paint simultaneously. As illustrated in Figure 1, given an image-caption pair, we split the image and caption into two parts denoted as prefix and suffix. To make prefix image modeling compatible with the seq2seq formulation of conventional prefix language modeling, we follow DALLE (Ramesh et al., 2021) and convert images into discrete sequences of image tokens (van den Oord et al., 2017). We then train the model to generate the suffix in one modality based on the prefix in the same modality and the complete input in the other modality. In this way, prefix multi-modal modeling can fully exploit self-supervision from large-scale image-caption pairs by learning to write and paint simultaneously. We pre-train DAVinci2, a vision-language foundation model, with the proposed prefix multi-modal modeling framework on large-scale image-text pairs. DAVinci is the first self-supervised vision-language foundation model that is versatile for all kinds of tasks in vision-and-language modalities, including image-to-text generation, text-to-image generation, vision-language understanding, and single-modal language / vision tasks. DAVinci consistently outperforms FLAVA (Singh et al., 2021), an existing vision-language foundation model, on both language, vision, and multi-modal tasks, and performs competitively with state-of-the-art models across a wide range of tasks and modalities. Moreover, DAVinci also shows strong few-shot and zero-shot image/text generation capability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 463, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 463, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 463, + 506, + 529 + ], + "type": "text", + "content": "In addition, most existing VLMs are pre-trained with mixed pre-training objectives and different data sources varying in size, making it difficult to disentangle the impact of pre-training objectives and data sources on the downstream tasks. To this end, we conduct a systematic analysis of the performance of generative vision-language pre-training by carefully ablating different pre-training objectives, such as prefix language / image modeling, and the amount of pre-training data with different qualities, revealing the impact of different objectives and data sources to facilitating future research." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 530, + 506, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 506, + 620 + ], + "type": "text", + "content": "To summarize, our contribution is three-fold: (1) We introduce prefix multi-modal modeling, a simple unified generative vision-language pre-training framework that is scalable for large-scale pre-training and versatile for image-to-text generation, text-to-image generation and various multi-modal / single-modal understanding tasks. (2) We pre-train DAVINCI, a vision-language foundation model, with the proposed approach, demonstrating competitive performance on a wide range of 27 downstream tasks and the superiority of combining vision/language generative pre-training. (3) We conduct an analysis about the impact of different pre-training data sources and pre-training objectives on the performance of seq2seq VLMs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 628, + 212, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 212, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 212, + 641 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 647, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 504, + 693 + ], + "type": "text", + "content": "Inspired by the success of language model pre-training, several studies investigated vision-language pre-training on large-scale image-caption pairs. ViLBERT (Lu et al., 2019) and LXMERT (Tan & Bansal, 2019b) first propose to extract visual object features with an external object detection model like Fast-RCNN (Girshick, 2015), feed the image features together with texts into Transformer" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 104, + 700, + 504, + 733 + ], + "type": "text", + "content": "Named after the Italian polymath Leonardo da Vinci, who displayed infinite grace in everything. We noticed that this name is used in GPT-3 versioning. However, we think there is no conflict because it is only a suffix for a specific checkpoint of the GPT-3 family." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 82, + 500, + 260 + ], + "blocks": [ + { + "bbox": [ + 111, + 82, + 500, + 260 + ], + "lines": [ + { + "bbox": [ + 111, + 82, + 500, + 260 + ], + "spans": [ + { + "bbox": [ + 111, + 82, + 500, + 260 + ], + "type": "image", + "image_path": "47d8b05a6d5bf1ad408398bf32a3a02a079e04087e5c92ddae4c6e5c46418f6c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 266, + 504, + 307 + ], + "lines": [ + { + "bbox": [ + 104, + 266, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 504, + 307 + ], + "type": "text", + "content": "Figure 1: Illustration of the overall architecture and pre-training procedures of DAVinci, a Transformer-based sequence-to-sequence model. Given an image-text pair, DAVinci first splits either the word sequence or image token sequence into prefix and suffix. It then concatenates the prefix with the complete sequence in the other modality as input. DAVinci is trained to recover the suffix with maximum likelihood estimation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 308, + 504, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 364 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 364 + ], + "type": "text", + "content": "models, and train the model to align vision and language representations with masked multi-modal modeling and multi-modal alignment prediction objectives. Many following works (Li et al., 2020; Zhang et al., 2021b; Chen et al., 2020; Li et al., 2022a; 2021; Zeng et al., 2021; Wang et al., 2021a) propose several new objectives to improve object detection based VLP and explored using vision Transformer (Dosovitskiy et al., 2021; Touvron et al., 2021) as visual feature extractor." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 365, + 504, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 504, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 504, + 410 + ], + "type": "text", + "content": "More recently, FLAVA (Singh et al., 2021), a new vision-language foundation model, is pre-trained with a masked multi-modal modeling objective. Performing competitively on language, vision, and vision-language understanding tasks, FLAVA is designed for understanding tasks without text and image generation abilities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 412, + 506, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 412, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 412, + 506, + 578 + ], + "type": "text", + "content": "While achieving promising results on multi-modal understanding tasks, most VLMs are based on encoder-only architectures with bidirectional attention, making them non-trivial to adapt to multi-modal generation tasks such as image captioning and text-to-image generation. Inspired by the success of seq2seq pre-trained language models such as T5 (Raffel et al., 2020) and BART (Lewis et al., 2020), VL-T5 (Cho et al., 2021) and OFA (Wang et al., 2022) propose to formulate both vision-language pre-training objectives and various downstream vision-language tasks as seq2seq tasks and pre-train a seq2seq VLM by multi-tasking on these tasks. However, the scalability and the zero-shot transfer capability of this approach are limited by the availability of large-scale and diverse vision-language tasks. To this end, SimVLM (Wang et al., 2021b), the most related work to our approach, instead pre-trains a seq2seq VLM with a simple prefix language modeling objective on text generation. It easily scales to very large and potentially noisy pre-training data and achieves competitive results. However, SimVLM only exploits language self-supervision, and thus it does not perform well on image understanding tasks and is unable to tackle image generation tasks. Another recent study is CM3 (Aghajanyan et al., 2022), which proposes a causal masked multi-modal model learned from large web data and differs from our work in pre-training objectives and target tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 579, + 506, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 635 + ], + "type": "text", + "content": "As for the text-to-image generation task, Ramesh et al. (2021); Ding et al. (2021); Yu et al. (2022) achieved promising performance by learning an auto-regressive target with Transformer and VQ-VAE / VQ-GAN tokenizer. Most recently, Ramesh et al. (2022); Saharia et al. (2022) advanced the image generation capability by using diffusion models and high-quality text embeddings (e.g., CLIP, T5). Therefore, it is natural to explore boosting image generation via stronger multi-modal understanding." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 636, + 504, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 636, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 636, + 504, + 681 + ], + "type": "text", + "content": "Previous studies are good at either image-to-text or text-to-image generation, but few studies investigate whether these two important capabilities can be learned together and boost each other. In this paper, we explore making a versatile and powerful multi-modal foundation model that is good at text-to-image generation, image-to-text generation, and multi-modal understanding tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 691, + 173, + 702 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 691, + 173, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 691, + 173, + 702 + ], + "type": "text", + "content": "3 DAVINCI" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "type": "text", + "content": "Given the superior performance of auto-regressive language models (LM) (Brown et al., 2020; Chowdhery et al., 2022; Rae et al., 2021) on zero-shot and few-shot transfer abilities, we decided to" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": "adopt a decoder optimized by language modeling loss to retain the generalization capabilities, and an encoder to represent the prefix input. Unlike using a causal mask in the decoder, the encoder employs fully-visible attention for the prefix input. This architecture resembles prefix language modeling, which shows effectiveness in a wide range of language tasks (Dong et al., 2019; Raffel et al., 2020) and enables zero-shot generalization abilities. Contrary to the previous multi-stage approaches (Wang et al., 2021a; Singh et al., 2021), our model is trained from scratch in an end-to-end manner thanks to the model's simplicity. In this section, we introduce the proposed prefix multi-modal modeling framework and the DAVinci model. The overall architecture of DAVinci is depicted in Figure 1. We first explain our model architecture in detail in §3.1 and then introduce pre-training objectives and procedures in §3.2." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 202, + 236, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 202, + 236, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 236, + 213 + ], + "type": "text", + "content": "3.1 MODEL ARCHITECTURE" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": "Textual Feature Embedding Given an input sentence " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": ", we first use WordPiece (Wu et al., 2016) tockenize it to a sequence of tokens " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "W = \\{w_{1},w_{2},\\dots,w_{n}\\}" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": ". To obtain text features " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": ", for each token " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "w_{i}" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": ", a token embedding " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "e_i" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": " and position embedding " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": " are computed by two separate embedding matrices. Finally, the textual feature embedding " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "T = \\{t_1,t_2,\\dots,t_i,\\dots,t_n\\}" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": " is calculated by " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "t_i = LayerNorm(e_i + p_i)" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": " indicates the " + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 219, + 506, + 285 + ], + "type": "text", + "content": "-th position, and LayerNorm (Ba et al., 2016) is a layer normalization function." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": "Visual Feature Embedding Given an input image " + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": ", we first use a CNN backbone to extract and learn the image features. Following (Dai et al., 2021; Wang et al., 2021b), we use the first three blocks of ResNet (He et al., 2016) to obtain the feature maps. The feature maps are then flattened to " + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "inline_equation", + "content": "F = \\{f_1, f_2, \\dots, f_m\\}" + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": " along the spatial dimension, where " + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": " denotes the number of features. To keep the position information of visual features, we inject absolute learned positional embeddings " + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": " and the final visual embeddings " + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "inline_equation", + "content": "V = \\{v_1, v_2, \\dots, v_i, \\dots, v_m\\}" + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": " are calculated by " + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "inline_equation", + "content": "v_i = f_i + p_i" + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": " indicates the " + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 287, + 506, + 365 + ], + "type": "text", + "content": "-th position." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "text", + "content": "Cross-Modal Transformer To fuse the textual and visual feature embeddings into a common space, we adopt a simple canonical Transformer architecture as the fusion module. The input is the combination of visual embedding " + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "text", + "content": " and textual embedding " + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "text", + "content": ", namely " + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "inline_equation", + "content": "X = \\{x_{1}, x_{2}, \\dots, x_{l}\\} = [V, T] = \\{v_{1}, v_{2}, \\dots, v_{m}, t_{1}, t_{2}, \\dots, t_{n}\\}" + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "text", + "content": ". The input embedding vectors " + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "text", + "content": " are then fed into a cross-modal Transformer encoder to obtain hidden state vectors " + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "inline_equation", + "content": "H = \\{h_{1}, h_{2}, \\dots, h_{l}\\}" + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "text", + "content": ". Finally, a Transformer decoder is applied to generate visual or textual tokens with " + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 366, + 507, + 434 + ], + "type": "text", + "content": " and decoder input as illustrated in Figure 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "text", + "content": "Image Tokenizer and Decoder Because Transformer is modeling on discrete tokens, to unify the text tokens and image tokens, we discretize an image into tokens by an image tokenizer and reconstruct the raw image by an image decoder. The image tokenizer and decoder are implemented with a discrete variational autoencoder (dVAE) (Ramesh et al., 2021). After training of the image tokenizer, it could serialize an image " + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "text", + "content": " into a sequence of discrete visual tokens " + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "inline_equation", + "content": "Z = \\{z_{1}, z_{2}, \\dots, z_{m}\\}" + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "text", + "content": " according to a learned vocabulary. Visual tokens " + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "text", + "content": " serve as the ground-truth labels for the prefix image modeling objective. In our work, we directly use an off-the-shelf image tokenizer and decoder from VQGAN (Esser et al., 2021), with a vocabulary size of 1024 and a compression rate of 16, which means a " + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "text", + "content": " image will be tokenized into " + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 104, + 434, + 506, + 546 + ], + "type": "text", + "content": " grid of tokens and then flattened to a sequence of 256 tokens." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 555, + 252, + 566 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 252, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 252, + 566 + ], + "type": "text", + "content": "3.2 PRE-TRAINING OBJECTIVES" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 571, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 506, + 628 + ], + "type": "text", + "content": "Our major motivation is to conduct language modeling with image information and image modeling with text information simultaneously, which only requires image and text pairs that are easy to collect, making our approach easy to scale. The interaction would force the vision-language model to have a deeper understanding of both text and image. Learning from this interaction connects the visual representation with textual representation, enabling zero-shot transfer." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "type": "text", + "content": "Prefix Language Modeling (PLM) The core idea of prefix language modeling is \"given a full image " + }, + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "type": "inline_equation", + "content": "X_{image}" + }, + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "type": "text", + "content": " and a prefix caption " + }, + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "type": "inline_equation", + "content": "\\tilde{X}_{text}" + }, + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "type": "text", + "content": ", recover the masked textual tokens (i.e., suffix caption " + }, + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "type": "inline_equation", + "content": "Y_{text}" + }, + { + "bbox": [ + 104, + 628, + 505, + 686 + ], + "type": "text", + "content": ")\". Given an input caption, we first randomly mask some continuous words at the end (we call it suffix caption hereafter) and recover the masked textual tokens with full image by optimizing the cross-entropy loss," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 205, + 689, + 505, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 689, + 505, + 716 + ], + "spans": [ + { + "bbox": [ + 205, + 689, + 505, + 716 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {P L M}} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {t e x t}} \\mid \\mathbf {X} _ {\\text {i m a g e}}, \\tilde {\\mathbf {X}} _ {\\text {t e x t}}\\right), \\tag {1}", + "image_path": "45d0f7d88b2d009c6a082b087c7d0ab79c23668606b33c85648f18e8ebff9092.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 720, + 392, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 720, + 392, + 733 + ], + "spans": [ + { + "bbox": [ + 107, + 720, + 392, + 733 + ], + "type": "text", + "content": "where I and S are images and captions from the pre-training corpus " + }, + { + "bbox": [ + 107, + 720, + 392, + 733 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 107, + 720, + 392, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 139 + ], + "type": "text", + "content": "Because of the lack of textual information, recovering the suffix caption requires the model to understand both the image and prefix caption. The full image is rich in semantic information that would help language modeling. The prefix length is randomly decided during training, and especially when prefix caption is none, this task will degenerate into \"image captioning\" task, which forces the model to generate a caption with the input image." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 220, + 144, + 506, + 170 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 144, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 220, + 144, + 506, + 170 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {P L M}} ^ {\\prime} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {t e x t}} \\mid \\mathbf {X} _ {\\text {i m a g e}}\\right) \\tag {2}", + "image_path": "952dad88a0ff9a816ca2c2cf1483af23602643d4a7af632cf5ae07547f489d71.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 176, + 504, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 176, + 504, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 176, + 504, + 233 + ], + "type": "text", + "content": "Prefix Image Modeling (PIM) The core idea of prefix image modeling is \"given a full caption and a corrupted image (we call it prefix image hereafter), recover the masked visual tokens\". Given an input image, we first randomly mask some continuous image patches at the end (we call it suffix image hereafter). The prefix image and full caption will be fed into the model and try to recover the original visual tokens obtained from the image tokenizer by optimizing the cross-entropy loss." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 205, + 238, + 505, + 264 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 238, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 205, + 238, + 505, + 264 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {P I M}} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {i m a g e}} \\mid \\mathbf {X} _ {\\text {t e x t}}, \\tilde {\\mathbf {X}} _ {\\text {i m a g e}}\\right) \\tag {3}", + "image_path": "d127301982ef80b35fef5740b1d67ef4e2c5bab8e91edbfb0fa9846049ae2ecd.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 270, + 506, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 294 + ], + "type": "text", + "content": "Similar to PLM, when prefix image is none, this task will degenerate into \"text-to-image generation\" task, forcing the model to generate an image with the input caption:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 222, + 299, + 505, + 326 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 299, + 505, + 326 + ], + "spans": [ + { + "bbox": [ + 222, + 299, + 505, + 326 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {P I M}} ^ {\\prime} = - \\sum_ {(I, S) \\in D} \\log p \\left(\\mathbf {Y} _ {\\text {i m a g e}} \\mid \\mathbf {X} _ {\\text {t e x t}}\\right) \\tag {4}", + "image_path": "3045f8148a9432bedfbf4fd60a2051b32fbf44dd3c1d94590750182d05746770.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 332, + 506, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 332, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 332, + 506, + 344 + ], + "type": "text", + "content": "Unified Learning Objective Our model is learned by optimizing the combination of PLM and PIM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 263, + 350, + 505, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 350, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 263, + 350, + 505, + 361 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\mathrm {P L M}} + \\mathcal {L} _ {\\mathrm {P I M}} \\tag {5}", + "image_path": "e19748e11ca8a60cc0f9c968ecc9316662ff63e308110b2273417cad69b14652.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 380, + 201, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 201, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 201, + 392 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 401, + 243, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 401, + 243, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 243, + 411 + ], + "type": "text", + "content": "4.1 PRE-TRAINING DATASETS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 418, + 504, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 474 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 474 + ], + "type": "text", + "content": "Since existing studies pre-trained their models on different corpora, making the fair comparison difficult. Considering results only on state-of-the-art performance would underestimate the potential of this line of research. Therefore, we propose several practical settings including small-scale and large-scale, and then conduct detailed comparisons on them in Section 5.1. More details about the datasets are shown in Appendix A.3." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 106, + 483, + 503, + 553 + ], + "blocks": [ + { + "bbox": [ + 106, + 483, + 503, + 553 + ], + "lines": [ + { + "bbox": [ + 106, + 483, + 503, + 553 + ], + "spans": [ + { + "bbox": [ + 106, + 483, + 503, + 553 + ], + "type": "table", + "html": "
Data TypeDatasetImage Domain#Total
In-Domain Data (ID)COCO, Visual GenomeCOCO1.3M
Small-scale Web Data (SWD)SBU, CC-3M, CC-12MWeb14.9M
Object-Region Data (ORD)VG regions, VG objects, COCO objects, Refcoco, Open Image, Obj365COCO, Flickr17.0M
Vision Data (VD)ImageNet-21KImageNet13.2M
Large-scale Web Data (LWD)LAION-400M, DAVinci-200MWeb601.3M
Text Data (TD)C4Web800GB
", + "image_path": "edfd3b3f37e6c2e0e9a0fc70ed0bd1ac7970a62ff45934e732d8c8154d6bf0bc.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 120, + 556, + 488, + 568 + ], + "lines": [ + { + "bbox": [ + 120, + 556, + 488, + 568 + ], + "spans": [ + { + "bbox": [ + 120, + 556, + 488, + 568 + ], + "type": "text", + "content": "Table 1: Statistics of the pre-training datasets. #Total denotes the total number of image-text pairs." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 578, + 227, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 227, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 227, + 588 + ], + "type": "text", + "content": "4.2 DOWNSSTREAM TASKS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 594, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 506, + 662 + ], + "type": "text", + "content": "We test our models' ability and versatility on five dimensions: language understanding on 8 GLUE tasks (Wang et al., 2019), vision understanding on ImageNet fine-tuning and 12 popular vision datasets for linear evaluation, multi-modal understanding on VQAv2 (Goyal et al., 2017b), SNLI-VE (Xie et al., 2019) and NLVR2 (Suhr et al., 2019), text-to-image generation on COCO (Chen et al., 2015), and image-to-text generation on COCO, NoCaps (Agrawal et al., 2019), and VLUE (Zhou et al., 2022b). Details of downstream tasks and fine-tuning process are described in Appendix A.2." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 670, + 250, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 670, + 250, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 670, + 250, + 681 + ], + "type": "text", + "content": "4.3 IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "Our model is a base-size Transformer implemented with a 6-layer encoder and a 6-layer decoder, 768 dimensions for hidden states, 512 for maximum input length, and 3072 for intermediate size. We train our model from scratch without initializing the Transformer encoder and decoder. However, the image encoder is initialized from ResNet-101 (He et al., 2016) with ImageNet weights since we find" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 79, + 504, + 437 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 504, + 437 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 504, + 437 + ], + "type": "table", + "html": "
BERTRoBERTaViTMLM 1MIM 2FLAVA 3CLIP 4SimVLM 5DAVINCI 6SimVLM 7DAVINCI 8
TaskEval.16GB160GB13.2M70M70M70M70M46.4M46.4M647.7M647.7M
MNLIFT84.2087.60-73.23-80.3332.8582.1382.2583.2783.13
CoLAFT54.6063.60-39.55-50.6511.0252.4752.1054.2254.75
MRPCFT84.7590.20-73.24-84.1668.7482.7083.1484.2684.54
QQPFT89.0091.90-86.68-88.7459.1788.3988.1589.0588.92
SST-2FT92.5094.80-87.96-90.9483.4990.6590.4891.1291.37
QNLIFT91.0092.80-82.32-87.3149.4687.5587.2188.2887.90
RTEFT62.5078.70-50.54-57.7653.0759.8060.7263.3464.22
STS-BFT88.2091.20-78.89-85.6713.7086.6286.2787.2487.05
NLP Avg.80.8486.35-71.55-78.1946.4478.7978.7980.1080.23
ImageNetLE--80.90-41.7975.5472.9574.3175.8776.0477.65
Food101LE--86.70-53.3088.5185.4983.4189.3385.5290.12
CIFAR10LE--96.90-76.2092.8791.2591.5693.0192.4193.96
CIFAR100LE--86.40-55.5777.6874.4072.5178.9875.2380.11
CarsLE--54.70-14.7170.8762.8461.4472.6968.8374.57
AircraftLE--46.00-13.8347.3140.0241.2847.4247.7549.55
DTDLE--74.30-55.5377.2973.4072.5577.1276.5978.33
PetsLE--92.70-34.4884.8279.6178.7785.5286.1388.21
Flowers102LE--99.20-67.2396.3794.9493.2496.1295.4196.88
MNISTLE--97.40-96.4098.4297.3896.6698.6798.4599.01
STL10LE--99.50-80.1298.8997.2997.5199.0398.0299.21
Country211LE--17.50-8.8728.9225.1226.4528.9927.8129.94
Vision Avg.--77.68-49.8478.1274.5674.1478.5677.3479.80
VQAv2FT-----72.4959.8172.1273.8975.0376.44
SNLI-VEFT-----78.8973.5378.7479.1179.6380.01
NLVR2FT-------77.4577.9179.7280.25
I2T@B4FT-------38.0038.5038.1039.20
I2T@CFT-------126.96128.66128.91130.44
T2I@IS ↑FT--------17.55-22.41
T2I@FID ↓FT--------23.58-19.82
VQAv2FS-------54.6954.8551.8854.90
SNLI-VEFS-------67.4567.5767.9668.04
NLVR2FS-------51.4651.1951.4951.52
I2T@B4FS-------35.9036.4032.7037.00
I2T@CFS-------117.75120.43112.20122.56
I2T@B4ZS-------11.4010.8013.8018.70
I2T@CZS-------45.3045.5556.6968.44
VUE@B4ZS-------9.209.4010.4010.60
VUE@CZS-------33.9234.8039.7540.83
NoCaps@CZS-------48.0545.5148.6458.58
T2I@IS ↑ZS--------14.91-17.44
T2I@FID ↓ZS--------29.83-24.21
Multi-modal Avg.-------57.8958.3059.1362.50
", + "image_path": "c198d5977e9dcb0a2ffb1e3dced54ed631e5ed8367fe9a9754c7678523f03f5e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 104, + 445, + 504, + 526 + ], + "lines": [ + { + "bbox": [ + 104, + 445, + 504, + 526 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 504, + 526 + ], + "type": "text", + "content": "Table 2: Experimental results on vision, language and multi-modal downstream tasks. @B4, @C denote BLEU@4, CIDEr, respectively. I2T and T2I denote image-to-text and text-to-image tasks. Multi-modal Avg. is the average score of all multi-modal tasks. FT: fine-tuning, LE: linear evaluation, FS: few-shot, ZS: zero-shot. Under few-shot setting, we fine-tune a pre-trained model for 3 epochs on " + }, + { + "bbox": [ + 104, + 445, + 504, + 526 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 104, + 445, + 504, + 526 + ], + "type": "text", + "content": " training data. Results for BERT are obtained from Iki & Aizawa (2021). Results for RoBERTa are from its corresponding paper (Liu et al., 2019) and they use the mid-training (Phang et al., 2018) on MNLI for RTE, MRPC and STS-B while other models (e.g., BERT, SimVLM, DAVinci) do not apply this trick. Results for ViT are from ViT-Base/16 model (Radford et al., 2021). We list the reported performance of text-only and image-only models in grey for reference." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 529, + 504, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 504, + 586 + ], + "type": "text", + "content": "a warm start provides a reliable visual representation and helps the convergence. All pre-training experiments are conducted on 32GB NVIDIA V100 GPUs. The model trained on the largest data takes around 10 days on 1024 V100 GPUs. We adopt dynamic masking in our experiments, where the masking ratio is randomly sampled from a uniform distribution " + }, + { + "bbox": [ + 104, + 529, + 504, + 586 + ], + "type": "inline_equation", + "content": "\\mathrm{U}(0,1)" + }, + { + "bbox": [ + 104, + 529, + 504, + 586 + ], + "type": "text", + "content": ". More details of the fine-tuning, network architectures, and hyper-parameters setups are given in Appendix A.1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 595, + 240, + 606 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 595, + 240, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 240, + 606 + ], + "type": "text", + "content": "4.4 EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 613, + 504, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 504, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 504, + 657 + ], + "type": "text", + "content": "We extensively compare the performance of DAVINCI with state-of-the-art unified foundation models and vision-language models across vision, language, and multi-modal tasks, accessing five different abilities: (1) text understanding, (2) image understanding, (3) text-to-image generation, (4) image-to-text generation, (5) multi-modal understanding." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": "Overall Performance We report the overall performance on 8 language tasks from GLUE, 12 vision tasks, 3 multi-modal tasks, 3 image-to-text tasks and 1 text-to-image task. We compare our model with FLAVA and SimVLM " + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "inline_equation", + "content": "^3" + }, + { + "bbox": [ + 104, + 659, + 504, + 693 + ], + "type": "text", + "content": ", two of the most recent and best performing vision-language" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 732 + ], + "type": "text", + "content": "3Since SimVLM is not open-sourced and uses 1.8B in-house data without telling the exact size of its base model, we replicate it on our data with the same size as DAVINCI. Experiments on SimVLMsmall ensure our successful reproduction (see Appendix A.4)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 70, + 506, + 293 + ], + "blocks": [ + { + "bbox": [ + 105, + 70, + 506, + 293 + ], + "lines": [ + { + "bbox": [ + 105, + 70, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 70, + 506, + 293 + ], + "type": "table", + "html": "
Model#Params.Text MNLI AccVision ImageNet LE / FTImage2Text COCO B@4 / CText2Image COCO IS↑ / FID↓Multi-modal
VQA test-dev / test-standardNLVR2 dev / test-P
Encoder-only Multi-modal Models
VinVL (Zhang et al., 2021b)157M--38.2 / 129.3-75.95 / 76.1282.05 / 83.08
ViLT (Kim et al., 2021)88M----70.85 / -74.91 / 75.57
ALBEF (Li et al., 2021)210M----75.84 / 76.0482.55 / 83.14
X-VLM (Zeng et al., 2021)240M--39.6 / 132.6-78.22 / 78.3784.41 / 84.76
VLMO (Wang et al., 2021a)----76.64 / 76.8982.77 / 83.34
Encoder-Decoder Multi-modal Models
UNICORN (Yang et al., 2021)--35.8 / 119.1-69.20 / 69.40-/-
Uni-ENDN (Li et al., 2022b)110M----72.20 / 72.50-/-
Pixel-BERT (Huang et al., 2020)144M----74.45 / 74.5576.50 / 77.20
E2E-VLP (Xu et al., 2021a)94M--36.2 / 117.3-73.25 / 73.6777.25 / 77.96
VL-T5 (Cho et al., 2021)220M--34.5 / 116.5-- / 70.3074.60 / 73.60
VL-BART (Cho et al., 2021)220M--35.1 / 116.6-- / 71.3071.70 / 70.30
Text2Image Models
DM-GAN (Zhu et al., 2019)---32.20 / 26.50-/--/-
DALLE (Ramesh et al., 2021) (250M)12B---17.90 / 27.50-/--/-
DALLE (Ramesh et al., 2021) (640M)†82M---15.79 / 29.22-/--/-
CogView (Ding et al., 2021)4B---18.20 / 27.10-/--/-
Unified Models
Unifying (Huang et al., 2021)228M--37.3 / 122.6- / 29.90-/--/-
FLAVA (Singh et al., 2021)240M80.3375.54 / ---72.80 / 72.49-/-
SimVLM (Wang et al., 2021b) (640M)†153M83.2776.04 / -38.5 / 128.7-75.04 / 75.0378.82 / 79.72
SimVLM (Wang et al., 2021b) (1.8B)83.4080.60 / -39.0 / 134.8-77.87 / 78.1481.72 / 81.77
OFA (Wang et al., 2022)182M84.30- / 82.2041.0 / 138.221.50* / 20.80*78.00 / 78.10-/-
Florence (Yuan et al., 2021)637M-- / 90.05-/--/-80.16 / 80.36-/-
DAVINCI154M83.1378.81 / 83.9239.2 / 130.417.44 (22.41*) / 24.21 (19.82*)76.32 / 76.4480.03 / 80.25
", + "image_path": "fe417e02e22be789cc5807b1d0c13c00eacaf7970e23c09aae3fc09c30e23cfa.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 296, + 504, + 367 + ], + "lines": [ + { + "bbox": [ + 104, + 296, + 504, + 367 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 504, + 367 + ], + "type": "text", + "content": "Table 3: Comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. All results are from base-size models. LE and FT denote linear evaluation and fine-tuning performance, respectively. Image2Text results are reported without CIDEr optimization. " + }, + { + "bbox": [ + 104, + 296, + 504, + 367 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 104, + 296, + 504, + 367 + ], + "type": "text", + "content": " are our reproduced models. \\* are the results after fine-tuning. SimVLM (1.8B) and OFA are pre-trained with much larger corpus or human-labeled data of many downstream tasks, and thus they are not comparable and are labeled in gray. Florence (Yuan et al., 2021) is pre-trained with much larger model size (Florence-CoSwin-H, 637M) and more pre-training data (900M), so the numbers are in grey. bold denotes the best across unified models." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "text", + "content": "foundation models. We also include comparisons with some baseline models (e.g., MIM, MLM, CLIP). There are several observations. First, DAVINCI (column 8) outperforms FLAVA (column 3) and SimVLM (column 7) across almost all tasks, providing a new and stronger unified foundation model. Compared with FLAVA, DAVINCI improves an average of " + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "inline_equation", + "content": "2.04\\%" + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "inline_equation", + "content": "1.68\\%" + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "text", + "content": " on language and vision tasks, respectively. Compared with SimVLM, DAVINCI achieves comparable results on language tasks " + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "inline_equation", + "content": "(+0.13\\%)" + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "text", + "content": " while performing much better on vision tasks " + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "inline_equation", + "content": "(+2.46\\%)" + }, + { + "bbox": [ + 104, + 376, + 504, + 509 + ], + "type": "text", + "content": ". To make a fair comparison in terms of similar data size, we compare FLAVA (70M data, column 3) with DAVINCI (46.4M data, column 6). It is observed that DAVINCI still outperforms FLAVA even with much less data. Considering the multi-modal tasks, DAVINCI consistently outperforms FLAVA and SimVLM on VQA and VE. Note that FLAVA is incapable of generation and SimVLM cannot generate images; only DAVINCI is competent to all tasks and demonstrates a stronger capability of unifying vision and language tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 515, + 506, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 515, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 104, + 515, + 506, + 648 + ], + "type": "text", + "content": "Zero-shot and Few-shot Transfer One of the critical benefits of generative pre-trained vision-language models is the good generalization ability on zero-shot and few-shot tasks. For zero-shot transfer, two out-of-domain distribution datasets are considered (NoCaps and VLINE), with results shown in Table 2. First, DAVinci outperforms SimVLM on both zero-shot and few-shot settings, demonstrating its better transfer capabilities. It also shows the effectiveness and robustness of the synergy of our proposed language supervision and image supervision. Second, it is observed that the performance improvement is bigger on 647.7M data (column 7 v.s. column 8) than 46.4M data (column 5 v.s. column 6). This shows DAVinci generalizes well with the increase of large-scale data. We even observe some performance drops on small data (46.4M) but excellent performance improvements on large data (647.7M). It is consistent with the recent observation that zero-shot ability could only be triggered with large pre-training data (Wei et al., 2022) and scaling to large data and keeping simple training objectives benefit generalization performance (Wang et al., 2021b)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 732 + ], + "type": "text", + "content": "Comparison with state-of-the-art vision-language models In addition to unified vision-language foundation models, we compare DAVinci with state-of-the-art vision-language models as well. The results are shown in Table 2. DAVinci demonstrates its superiority in vision understanding and text-to-image generation. Compared with current popular auto-regressive image generation models like DALLE and CogView, our model achieves comparable IS and better FID scores with significantly fewer model parameters than DALLE and CogView. Note that the original DALLE is implemented based on VQVAE, so here, we compare our model with reproduced VQGAN-based DALLE with" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 79, + 504, + 236 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 504, + 236 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 504, + 236 + ], + "type": "table", + "html": "
SettingsPre-training Data#Image#CaptionModelsCOCO CaptionsVQASNLI-VENLVR2
IDSWDORDVDLWDB@4 / CAccAccAcc
10.2M1.3MSimVLM35.2 / 115.0668.8976.1071.21
DAVINCI35.8 / 117.3069.2576.2272.55
215.1M16.2MSimVLM37.0 / 122.6371.5478.3675.50
DAVINCI37.4 / 123.1171.8878.6277.46
32.7M18.3MSimVLM38.2 / 123.8569.5776.6570.50
DAVINCI38.0 / 124.2070.0276.9272.01
413.4M14.5MSimVLM36.2 / 119.7370.5376.9073.25
DAVINCI36.6 / 121.2771.2377.4074.62
530.5M46.4MSimVLM38.5 / 128.1271.8478.8176.75
DAVINCI38.6 / 128.7373.5379.2477.55
6601.3M601.3MSimVLM37.3 / 123.8173.7378.7977.69
DAVINCI37.6 / 124.4273.9579.2978.54
7601.5M602.6MSimVLM37.9 / 125.5074.6479.0577.68
DAVINCI38.1 / 125.9174.9179.2278.12
8631.8M647.7MSimVLM38.5 / 128.2575.0479.3278.82
DAVINCI39.1 / 130.2176.3280.0480.03
", + "image_path": "3183648d65c44268f04e6d93ab4e8aecc3f2c5100585f348674324754b29b556.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 240, + 504, + 262 + ], + "lines": [ + { + "bbox": [ + 105, + 240, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 504, + 262 + ], + "type": "text", + "content": "Table 4: Evaluation on downstream tasks using COCO Captions, VQA, SNLI-VE, and NLVR2. #Image and #Caption denote the numbers of images and image-text pairs that are used in the pre-training." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 271, + 504, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 271, + 504, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 271, + 504, + 294 + ], + "type": "text", + "content": "similar model sizes, and find DAVinci still achieves a significant improvement over it. Generated images are presented in Appendix A.11 for further qualitative comparison." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 295, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 506, + 373 + ], + "type": "text", + "content": "On multi-modal tasks such as VQA, DAVINCI not only outperforms unified models (e.g., SimVLM (640M)) and other encoder-decoder multi-modal models (e.g., E2E-VLP, VL-T5), but also achieves competitive performance with many conventional encoder-only multi-model models (e.g., VinVL, ALBEF, VLMO). Note that SimVLM (1.8B) and OFA are not directly comparable because SimVLM uses 1.8B in-house image-text pairs, and OFA uses human-labeled data of many downstream tasks during pre-training. Even though, we still report their results for reference and observe a better performance on ImageNet fine-tuning and text-to-image generation than OFA." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 375, + 504, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 375, + 504, + 409 + ], + "spans": [ + { + "bbox": [ + 104, + 375, + 504, + 409 + ], + "type": "text", + "content": "The advantages of image generation over DALLE / CogView, the superiority of image-to-text over SimVLM, and the competitive performance with conventional multi-modal models demonstrate the synergistic effect of our proposed PLM (language supervision) and PIM (image supervision)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 418, + 179, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 418, + 179, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 179, + 430 + ], + "type": "text", + "content": "5 ANALYSIS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 437, + 291, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 291, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 291, + 449 + ], + "type": "text", + "content": "5.1 IMPACT OF PRE-TRAINING DATASETS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 455, + 506, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 455, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 506, + 620 + ], + "type": "text", + "content": "In this section, we disclose the impact of various multi-modal data sources for VLMs. We choose SimVLM and DAVinci as our baseline models for their competitive performance, the capability of training from scratch, and the scalability of extending to the noisy large-scale corpus. We use the same text corpus, " + }, + { + "bbox": [ + 104, + 455, + 506, + 620 + ], + "type": "inline_equation", + "content": "C4" + }, + { + "bbox": [ + 104, + 455, + 506, + 620 + ], + "type": "text", + "content": ", for all the variations. The results are shown in Table 4. In general, the performance is increased along with the data size, and DAVinci consistently outperforms SimVLM on almost all the data settings and all the downstream tasks. Both object-region data and vision data are clearly helpful in vision language pre-training (refer to settings 3 and 4). We surprisingly observe that models pre-trained on object-region data with much fewer images performs even better than models pre-trained with small-scale web data on the COCO Caption task (refer to settings 2 and 3). Although large-scale web data is usually noisier than small datasets (e.g., ID, ORD, VD, and SWD), it is powerful for multi-modal pre-training (refer to settings 5 and 8). We believe our analysis has broader impacts on the research of VLMs in the community. First, this enables fair comparisons for pre-trained models in the same data settings. Second, one can focus on the model designs at part or all of the data settings according to available computation resources. Third, we reveal that object-region and vision data, normally overlooked in VLM pre-training, also play a significant role." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 624, + 209, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 209, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 209, + 635 + ], + "type": "text", + "content": "5.2 ABLATION STUDY" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 641, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 506, + 696 + ], + "type": "text", + "content": "To verify the contributions of different modules in our framework, we ablate them and evaluate DAVINCI on five kinds of downstream tasks: language understanding (MNLI, SST-2), vision understanding (ImageNet, Food101, CIFAR10), multi-modal understanding (VQAv2, SNLI-VE, NLVR2), image-to-text generation (COCO Captions), and text-to-image generation. Experiments are conducted with the same model architecture on in-domain data (ID). The results are shown in Table 5." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "Effects of Objectives First, all three objectives (PLM, PIM, and Text2Text) bring improvement and the combination confirms a synergistic effect. Second, it is observed that without PLM, the performance decreases significantly on multi-modal understanding and image-to-text generation," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 81, + 507, + 211 + ], + "blocks": [ + { + "bbox": [ + 106, + 81, + 507, + 211 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 507, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 507, + 211 + ], + "type": "table", + "html": "
MethodCOCOB@4 / CVQAAccSNLI-VEAccNLVR2AccImageNetAccFood101AccCIFAR10AccMNLIAccSST-2AccT2IIS / FID
No Pre-training32.1 / 96.7152.7354.2351.08-*-*-*66.3279.84-*
DAVINCI35.8 / 117.3069.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
-PLM33.6 / 111.1765.1573.9153.2848.0574.1772.9881.4289.9710.26 / 59.64
-PIM34.3 / 116.5868.8975.7969.7845.5471.1870.1181.9490.53-*
-Text2Text34.1 / 115.2168.1475.3870.3448.6774.2673.2376.4888.1412.07 / 54.77
PL=035.4 / 117.0066.9075.5271.0548.4568.1873.7378.6989.0011.76 / 55.38
PL=15%35.7 / 116.5369.1675.0970.4441.5852.1568.5579.0289.46-*
PL=50%35.1 / 115.5368.5574.5456.9237.6949.1670.1578.5989.69-*
MIM34.7 / 113.468.1875.3469.6648.4656.9572.7981.7289.849.50 / 74.13
In-painting34.5 / 112.567.4675.4168.6647.5054.3871.2081.5589.849.97 / 68.15
Token Projection17.7 / 49.252.1371.1152.0115.1125.6261.0182.0190.2511.89 / 60.96
Patch Projection25.7 / 79.557.6971.9257.4536.2344.3169.4081.7390.0511.41 / 61.87
", + "image_path": "51842cc7375622d01367ef18e920eb25c399142c5998b6e8a0587e567b5de68b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 214, + 504, + 275 + ], + "lines": [ + { + "bbox": [ + 106, + 214, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 106, + 214, + 504, + 275 + ], + "type": "text", + "content": "Table 5: Ablation study on COCO Captions, VQA, SNLI-VE, NLVR2, ImageNet, Food101, CIFAR10, MNLI, SST-2, and text-to-image (T2I) generation. “-” denotes removing the corresponding objective. PL denotes the prefix length under fixed masking ratio settings. Because the linear probe requires a pre-trained model to be frozen, “No Pre-training” results on ImageNet, Food101, and CIFAR10 are not reported and labeled by * . For T2I, we report the zero-shot results. Note that the following four variants cannot perform zero-shot text-to-image generation (labeled by *): (1) No Pre-training, (2) DAVinci - PIM, (3) PL=15%, and (4) PL=50%." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 106, + 288, + 504, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 288, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 106, + 288, + 504, + 354 + ], + "type": "text", + "content": "indicating the importance of language supervision. Third, PIM brings more gains than PLM and text2text on vision understanding, which is expected because it enhances the vision encoding ability with image supervision. In addition, the text2text objective is important to text understanding. Last, on the text-to-image generation task, it is observed that PLM is also helpful, confirming the synergistic effect of PIM and PLM again. Intuitively, PIM and PLM can help each other learn the alignments of visual and textual features, which will benefit both image generation and other multi-modal tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "text", + "content": "Effects of Masking Ratios Our model adopts dynamic masking ratios as described in Section 3.2. We also conduct experiments with static masking ratios with the prefix length fixed to 0, " + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "text", + "content": ". The comparison between dynamic masking ratios and static masking ratios " + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "inline_equation", + "content": "(\\mathrm{PL} = 0, 15\\%," + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "inline_equation", + "content": "50\\%)" + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "text", + "content": " reveals that dynamic masking is better. We attribute this improvement to the smoothing effects of dynamic masking ratios. We also find that the standard language model " + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "inline_equation", + "content": "(\\mathrm{PL} = 0)" + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "text", + "content": " performs worse on VQA, Food101, and text-to-image generation, which is consistent with the observation in SimVLM. In our experiments, the masking ratio is sampled from a uniform distribution " + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\mathrm{U}(0,1)" + }, + { + "bbox": [ + 106, + 359, + 504, + 437 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 441, + 504, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 441, + 504, + 508 + ], + "spans": [ + { + "bbox": [ + 106, + 441, + 504, + 508 + ], + "type": "text", + "content": "Effects of Masking Strategies Here we also compared three different masking strategies: 1) masked image modeling (randomly masking some patches), 2) in-painting (randomly masking some continuous spans in the middle of the image), and 3) suffix-painting (ours). The results are shown in Table 5. Both masked image modeling and in-painting are effective and competitive. It is observed that suffix-painting is better than masked image modeling and in-painting across all tasks, demonstrating that suffix-painting works well." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 513, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 513, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 513, + 504, + 590 + ], + "type": "text", + "content": "Effects of Image Feature Extraction There are several different ways to extract image features. We compare three different image representation methods: 1) token projection (projecting the prefix tokens to the hidden dimension of the backbone network on the token-level), 2) patch projection (similar to ViT embedding, we split an image into fixed-size patches, embed each of them by a trainable linear projection on the pixel-level), and 3) ResNet feature extraction (ours). From the results in Table 5, we observed that ResNet feature extraction outperforms token projection and patch projection by a large margin. Therefore, we decided to adopt ResNet to extract image features." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 592, + 504, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 592, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 106, + 592, + 504, + 615 + ], + "type": "text", + "content": "We provide more details and discussions about the effects of compute (A.5), masking strategies (A.6), image feature extraction methods (A.7), and scaling effects of data size (A.8) in the Appendix." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 624, + 287, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 624, + 287, + 636 + ], + "spans": [ + { + "bbox": [ + 106, + 624, + 287, + 636 + ], + "type": "text", + "content": "6 CONCLUSION AND DISCUSSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 643, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 643, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 504, + 731 + ], + "type": "text", + "content": "In this work, we first benchmark several settings on sequence-to-sequence vision-language pretraining in terms of pre-training dataset size, aligning SimVLM and our model on them. We propose a simple and unified generative pre-training model, DAVinci, to simultaneously leverage the language supervision and image supervision through two objectives under a unified framework: prefix language modeling and prefix image modeling. DAVinci is simple yet effective, demonstrating strong capabilities in both multi-modal writing and painting tasks. Experimental results explicitly imply that combining suffix caption generation and suffix image generation offers large gains on all benchmark settings. We also discussed limitations and future work in Appendix A.10." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 752, + 308, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 752, + 308, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 752, + 308, + 759 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 102, + 506, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 102, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 506, + 159 + ], + "type": "text", + "content": "We thank the anonymous reviewers for their valuable suggestions. We would like to acknowledge Yan Zeng, Wenguan Huang, and Zhi Zhang at ByteDance, and Zhiling Zhang at Shanghai Jiao Tong University for their generous assistance in data collection and helpful discussions. We also wish to thank Hang Li at ByteDance, and Tong Zhang at HKUST for inspiring feedback, valuable comments, and great support to this work." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 172, + 175, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 172, + 175, + 185 + ], + "spans": [ + { + "bbox": [ + 106, + 172, + 175, + 185 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 194, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 105, + 194, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 505, + 228 + ], + "type": "text", + "content": "Armen Aghajanyan, Bernie Huang, Candace Ross, Vladimir Karpukhin, Hu Xu, Naman Goyal, Dmytro Okhonko, Mandar Joshi, Gargi Ghosh, Mike Lewis, et al. Cm3: A causal masked multimodal model of the internet. arXiv preprint arXiv:2201.07520, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 236, + 506, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 506, + 270 + ], + "type": "text", + "content": "Eneko Agirre, Lluis Márquez, and Richard Vicentowski (eds.). Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007), Prague, Czech Republic, 2007. Association for Computational Linguistics. URL https://aclanthology.org/S07-1000." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 279, + 506, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 279, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 506, + 335 + ], + "type": "text", + "content": "Harsh Agrawal, Peter Anderson, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, and Stefan Lee. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision, ICCV 2019, Seoul, Korea (South), October 27 - November 2, 2019, pp. 8947-8956. IEEE, 2019. doi: 10.1109/ICCV.2019.00904. URL https://doi.org/10.1109/ICCV.2019.00904." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 344, + 506, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 344, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 506, + 367 + ], + "type": "text", + "content": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. ArXiv preprint, abs/1607.06450, 2016. URL https://arxiv.org/abs/1607.06450." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 376, + 506, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 376, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 506, + 398 + ], + "type": "text", + "content": "Hangbo Bao, Li Dong, and Furu Wei. BEiT: Bert pre-training of image transformers. arXiv preprint, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 407, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 407, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 504, + 430 + ], + "type": "text", + "content": "Luisa Bentivogli, Peter Clark, Ido Dagan, and Danilo Giampiccolo. The fifth pascal recognizing textual entailment challenge. In TAC, 2009." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 439, + 506, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 439, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 506, + 472 + ], + "type": "text", + "content": "Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, pp. 446-461. Springer, 2014." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 482, + 506, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 482, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 506, + 604 + ], + "type": "text", + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/1457c0d6bcbd4967418bf8ac142f64a-AAbstract.html." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 613, + 504, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 504, + 647 + ], + "type": "text", + "content": "Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9650-9660, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "type": "text", + "content": "Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. arXiv preprint arXiv:2209.06794, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dollár, and C Lawrence Zitnick. Microsoft COCO Captions: Data collection and evaluation server. arXiv preprint, 2015." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. UNITER: Universal image-text representation learning. In European Conference on Computer Vision (ECCV), 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 506, + 179 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 179 + ], + "type": "text", + "content": "Jaemin Cho, Jie Lei, Hao Tan, and Mohit Bansal. Unifying vision-and-language tasks via text generation. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 1931-1942. PMLR, 2021. URL http://proceedings.mlr.press/v139/cho21a.html." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 185, + 506, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 506, + 230 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 506, + 230 + ], + "type": "text", + "content": "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. ArXiv preprint, abs/2204.02311, 2022. URL https://arxiv.org/abs/2204.02311." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 236, + 506, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 506, + 282 + ], + "type": "text", + "content": "Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2014, Columbus, OH, USA, June 23-28, 2014, pp. 3606-3613. IEEE Computer Society, 2014. doi: 10.1109/CVPR.2014.461. URL https://doi.org/10.1109/CVPR.2014.461." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 289, + 504, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 504, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 504, + 323 + ], + "type": "text", + "content": "Adam Coates, Andrew Ng, and Honglak Lee. An analysis of single-layer networks in unsupervised feature learning. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 215-223. JMLR Workshop and Conference Proceedings, 2011." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 329, + 504, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 329, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 329, + 504, + 353 + ], + "type": "text", + "content": "Ido Dagan, Oren Glickman, and Bernardo Magnini. The pascal recognising textual entailment challenge. In Machine Learning Challenges Workshop, pp. 177-190. Springer, 2005." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 358, + 506, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 506, + 392 + ], + "type": "text", + "content": "Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 34:3965-3977, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 399, + 506, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 506, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 506, + 467 + ], + "type": "text", + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 473, + 504, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 473, + 504, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 504, + 507 + ], + "type": "text", + "content": "Shizhe Diao, Jiaxin Bai, Yan Song, Tong Zhang, and Yonggang Wang. Zen: Pre-training chinese text encoder enhanced by n-gram representations. In Findings of the Association for Computational Linguistics: EMNLP 2020, pp. 4729-4740, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 514, + 506, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 506, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 506, + 569 + ], + "type": "text", + "content": "Shizhe Diao, Ruijia Xu, Hongjin Su, Yilei Jiang, Yan Song, and Tong Zhang. Taming pre-trained language models with n-gram representations for low-resource domain adaptation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 3336-3349, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 576, + 506, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 506, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 506, + 610 + ], + "type": "text", + "content": "Shizhe Diao, Zhichao Huang, Ruijia Xu, Xuechun Li, Yong Lin, and Tong Zhang. Black-box prompt learning for pre-trained language models. Transactions on Machine Learning Research, 2023. URL https://openreview.net/forum?id=IvsGP7xRvm." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 616, + 506, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 506, + 651 + ], + "type": "text", + "content": "Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 658, + 506, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 506, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 506, + 692 + ], + "type": "text", + "content": "William B. Dolan and Chris Brockett. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005), 2005. URL https://aclanthology.org/I05-5002." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "type": "text", + "content": "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. Unified language model pre-training for natural language understanding and generation. In NeurIPS, pp. 13042-13054, 2019." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 140 + ], + "type": "text", + "content": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021. URL https://openreview.net/forum?id=YicbFdNTTy." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 506, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 506, + 178 + ], + "type": "text", + "content": "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873-12883, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 184, + 504, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 184, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 184, + 504, + 218 + ], + "type": "text", + "content": "Zhiyi Fu, Wangchunshu Zhou, Jingjing Xu, Hao Zhou, and Lei Li. Contextual representation learning beyond masked language modeling. In ACL (1), pp. 2701-2714. Association for Computational Linguistics, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 224, + 506, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 224, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 506, + 270 + ], + "type": "text", + "content": "Danilo Giampiccolo, Bernardo Magnini, Ido Dagan, and Bill Dolan. The third PASCAL recognizing textual entailment challenge. In Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 1-9, Prague, 2007. Association for Computational Linguistics. URL https://aclanthology.org/W07-1401." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 276, + 506, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 276, + 506, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 276, + 506, + 311 + ], + "type": "text", + "content": "Ross B. Girshick. Fast R-CNN. In 2015 IEEE International Conference on Computer Vision, ICCV 2015, Santiago, Chile, December 7-13, 2015, pp. 1440-1448. IEEE Computer Society, 2015. doi: 10.1109/ICCV.2015.169. URL https://doi.org/10.1109/ICCV.2015.169." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 316, + 506, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 506, + 361 + ], + "type": "text", + "content": "Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: Training imagenet in 1 hour. ArXiv preprint, abs/1706.02677, 2017a. URL https://arxiv.org/abs/1706.02677." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 368, + 506, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 368, + 506, + 424 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 506, + 424 + ], + "type": "text", + "content": "Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the V in VQA matter: Elevating the role of image understanding in visual question answering. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 6325-6334. IEEE Computer Society, 2017b. doi: 10.1109/CVPR.2017.670. URL https://doi.org/10.1109/CVPR.2017.670." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "type": "text", + "content": "R Bar Haim, Ido Dagan, Bill Dolan, Lisa Ferro, Danilo Giampiccolo, Bernardo Magnini, and Idan Szpektor. The second pascal recognising textual entailment challenge. In Proceedings of the Second PASCAL Challenges Workshop on Recognising Textual Entailment, volume 7, 2006." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 471, + 506, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 506, + 515 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 770-778. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.90. URL https://doi.org/10.1109/CVPR.2016.90." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 522, + 506, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 506, + 578 + ], + "type": "text", + "content": "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross B. Girshick. Momentum contrast for unsupervised visual representation learning. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 9726-9735. IEEE, 2020. doi: 10.1109/CVPR42600.2020.00975. URL https://doi.org/10.1109/CVPR42600.2020.00975." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 504, + 619 + ], + "type": "text", + "content": "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000-16009, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 624, + 506, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 506, + 702 + ], + "type": "text", + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6626-6637, 2017. URL https://proceedings.neurips.cc/paper/2017/ hash/8ald694707eb0fefe65871369074926d-Abstract.html." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 709, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 504, + 731 + ], + "type": "text", + "content": "Kashmir Hill and Jeremy White. Designed to deceive: Do these people look real to you. The New York Times, 11, 2020." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "text", + "content": "Yupan Huang, Hongwei Xue, Bei Liu, and Yutong Lu. Unifying multimodal transformer for bidirectional image and text generation. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 1138-1147, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 505, + 148 + ], + "type": "text", + "content": "Zhicheng Huang, Zhaoyang Zeng, Bei Liu, Dongmei Fu, and Jianlong Fu. Pixel-BERT: Aligning image pixels with text by deep multi-modal transformers. arXiv preprint, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 156, + 505, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 190 + ], + "type": "text", + "content": "Taichi Iki and Akiko Aizawa. Effect of visual extensions on natural language understanding in vision-and-language models. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2189-2196, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 198, + 505, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 198, + 505, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 505, + 222 + ], + "type": "text", + "content": "Shankar Iyer, Nikhil Dandekar, Kornél Csernai, et al. First quora dataset release: Question pairs. data.quora.com, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 229, + 505, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 229, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 229, + 505, + 297 + ], + "type": "text", + "content": "Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 4904-4916. PMLR, 2021. URL http://proceedings.mlr.press/v139/jia21b.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 304, + 505, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 505, + 361 + ], + "type": "text", + "content": "Wonjae Kim, Bokyung Son, and Ildoo Kim. Vilt: Vision-and-language transformer without convolution or region supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 5583-5594. PMLR, 2021. URL http://proceedings.mlr.press/v139/kim21k.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 369, + 505, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 505, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 404 + ], + "type": "text", + "content": "Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pp. 554-561, 2013." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 411, + 505, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 505, + 425 + ], + "type": "text", + "content": "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 432, + 411, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 432, + 411, + 445 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 411, + 445 + ], + "type": "text", + "content": "Yann LeCun and Corinna Cortes. MNIST handwritten digit database. 2010." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 453, + 505, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 505, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 505, + 520 + ], + "type": "text", + "content": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871-7880, Online, 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.703. URL https://aclanthology.org/2020.acl-main.703." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 528, + 505, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 505, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 505, + 563 + ], + "type": "text", + "content": "Junnan Li, Ramprasaath R Selvaraju, Akhilesh Deepak Gotmare, Shafiq Joty, Caiming Xiong, and Steven Hoi. Align before fuse: Vision and language representation learning with momentum distillation. In Conference on Neural Information Processing Systems (NeurIPS), 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 571, + 505, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 505, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 505, + 594 + ], + "type": "text", + "content": "Lianian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. VisualBERT: A simple and performant baseline for vision and language. arXiv preprint, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 601, + 505, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 505, + 647 + ], + "type": "text", + "content": "Lianian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10965-10975, 2022a." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 655, + 505, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 505, + 689 + ], + "type": "text", + "content": "Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In European Conference on Computer Vision (ECCV), 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "text", + "content": "Yehao Li, Jiahao Fan, Yingwei Pan, Ting Yao, Weiyao Lin, and Tao Mei. Uni-eden: Universal encoder-decoder network by multi-granular vision-language pre-training. ArXiv preprint, abs/2201.04026, 2022b. URL https://arxiv.org/abs/2201.04026." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. RoBERTa: A robustly optimized bert pretraining approach. arXiv preprint, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 506, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 159 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 166, + 506, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 166, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 166, + 506, + 244 + ], + "type": "text", + "content": "Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visi-olinguistic representations for vision-and-language tasks. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 13-23, 2019. URL https://proceedings.neurips.cc/paper/2019/bit/74d97b01eae257e44aa9d5bade97baf-Abstract.html." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 252, + 504, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 252, + 504, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 504, + 276 + ], + "type": "text", + "content": "Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 283, + 506, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 283, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 506, + 340 + ], + "type": "text", + "content": "Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory F. Diamos, Erich Elsen, David García, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, and Hao Wu. Mixed precision training. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018. URL https://openreview.net/forum?id=r1gs9JgRZ." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 348, + 506, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 348, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 506, + 381 + ], + "type": "text", + "content": "Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729. IEEE, 2008." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 390, + 504, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 504, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 504, + 414 + ], + "type": "text", + "content": "Rui Pan, Shizhe Diao, Jianlin Chen, and Tong Zhang. Extremebert: A toolkit for accelerating pretraining of customized bert. arXiv preprint arXiv:2211.17201, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 421, + 506, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 421, + 506, + 466 + ], + "spans": [ + { + "bbox": [ + 105, + 421, + 506, + 466 + ], + "type": "text", + "content": "Omkar M. Parkhi, Andrea Vedaldi, Andrew Zisserman, and C. V. Jawahar. Cats and dogs. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, Providence, RI, USA, June 16-21, 2012, pp. 3498-3505. IEEE Computer Society, 2012. doi: 10.1109/CVPR.2012.6248092. URL https://doi.org/10.1109/CVPR.2012.6248092." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 474, + 506, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 474, + 506, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 474, + 506, + 542 + ], + "type": "text", + "content": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 2227-2237, New Orleans, Louisiana, 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1202. URL https://aclanthology.org/N18-1202." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 549, + 504, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 549, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 504, + 572 + ], + "type": "text", + "content": "Jason Phang, Thibault Févry, and Samuel R. Bowman. Sentence encoders on stilts: Supplementary training on intermediate labeled-data tasks. ArXiv, abs/1811.01088, 2018." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 580, + 506, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 580, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 580, + 506, + 604 + ], + "type": "text", + "content": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 611, + 506, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 506, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 506, + 689 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8748-8763. PMLR, 2021. URL http://proceedings.mlr.press/v139/radford21a.html." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 732 + ], + "type": "text", + "content": "Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research (JMLR), 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 505, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 505, + 170 + ], + "type": "text", + "content": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 2383-2392, Austin, Texas, 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://aclanthology.org/D16-1264." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 177, + 506, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 177, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 177, + 506, + 233 + ], + "type": "text", + "content": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 8821-8831. PMLR, 2021. URL http://proceedings.mlr.press/v139/ramesh21a.html." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 241, + 506, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 241, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 107, + 241, + 506, + 266 + ], + "type": "text", + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 272, + 506, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 272, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 506, + 328 + ], + "type": "text", + "content": "Steven J. Rennie, Etienne Marcheret, Youssef Mroueh, Jerret Ross, and Vaibhava Goel. Self-critical sequence training for image captioning. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 1179-1195. IEEE Computer Society, 2017. doi: 10.1109/CVPR.2017.131. URL https://doi.org/10.1109/CVPR.2017.131." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 336, + 506, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 336, + 506, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 336, + 506, + 371 + ], + "type": "text", + "content": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 379, + 505, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 379, + 505, + 413 + ], + "spans": [ + { + "bbox": [ + 107, + 379, + 505, + 413 + ], + "type": "text", + "content": "Andrei A Rusu, Neil C Rabinowitz, Guillaume Desjardins, Hubert Soyer, James Kirkpatrick, Koray Kavukcuoglu, Razvan Pascanu, and Raia Hadsell. Progressive neural networks. arXiv preprint arXiv:1606.04671, 2016." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 421, + 506, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 421, + 506, + 465 + ], + "spans": [ + { + "bbox": [ + 105, + 421, + 506, + 465 + ], + "type": "text", + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 474, + 506, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 474, + 506, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 474, + 506, + 541 + ], + "type": "text", + "content": "Tim Salimans, Ian J. Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. In Daniel D. Lee, Masashi Sugiyama, Ulrike von Luxburg, Isabelle Guyon, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pp. 2226-2234, 2016. URL https://proceedings.neurips.cc/paper/2016/black/8a3363abe792db2d8761d6403605aab7-Abtract.html." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "type": "text", + "content": "Roy Schwartz, Jesse Dodge, Noah A Smith, and Oren Etzioni. Green ai. Communications of the ACM, 63(12):54-63, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 581, + 504, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 581, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 581, + 504, + 615 + ], + "type": "text", + "content": "Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. ArXiv preprint, abs/2112.04482, 2021. URL https://arxiv.org/abs/2112.04482." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 623, + 506, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 623, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 506, + 679 + ], + "type": "text", + "content": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pp. 1631-1642, Seattle, Washington, USA, 2013. Association for Computational Linguistics. URL https://aclanthology.org/D13-1170." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "type": "text", + "content": "Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. VL-BERT: pretraining of generic visual-linguistic representations. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020. URL https://openreview.net/forum?id=SygXPaEYvH." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "type": "text", + "content": "Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6418-6428, Florence, Italy, 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1644. URL https://aclanthology.org/P19-1644." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 506, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 506, + 200 + ], + "type": "text", + "content": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pp. 2818-2826. IEEE Computer Society, 2016. doi: 10.1109/CVPR.2016.308. URL https://doi.org/10.1109/CVPR.2016.308." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 207, + 506, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 207, + 506, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 506, + 263 + ], + "type": "text", + "content": "Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5100-5111, Hong Kong, China, 2019a. Association for Computational Linguistics. doi: 10.18653/v1/D19-1514. URL https://aclanthology.org/D19-1514." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 270, + 506, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 506, + 326 + ], + "type": "text", + "content": "Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5100-5111, Hong Kong, China, 2019b. Association for Computational Linguistics. doi: 10.18653/v1/D19-1514. URL https://aclanthology.org/D19-1514." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 333, + 506, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 333, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 506, + 399 + ], + "type": "text", + "content": "Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pp. 10347-10357. PMLR, 2021. URL http://proceedings.mlr.press/v139/touvron21a.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 406, + 506, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 506, + 473 + ], + "type": "text", + "content": "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 6306-6315, 2017. URL https://proceedings.neurips.cc/paper/2017/bitical/7a98af17e63a0ac09ce2e96d03992fbc-AAbstract.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 480, + 506, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 480, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 506, + 557 + ], + "type": "text", + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pp. 5998-6008, 2017. URL https://proceedings.neurips.cc/paper/2017/bit/3f5ee243547dee91fbd053c1c4a845aa-Abstract.html." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 565, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 565, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 506, + 609 + ], + "type": "text", + "content": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019. URL https://openreview.net/forum?id=rJ4km2R5t7." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 616, + 506, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 506, + 661 + ], + "type": "text", + "content": "Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. ArXiv preprint, abs/2202.03052, 2022. URL https://arxiv.org/abs/2202.03052." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 668, + 506, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 506, + 702 + ], + "type": "text", + "content": "Wenhui Wang, Hangbo Bao, Li Dong, and Furu Wei. Vlmo: Unified vision-language pre-training with mixture-of-modality-experts. ArXiv preprint, abs/2111.02358, 2021a. URL https://arxiv.org/abs/2111.02358." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "text", + "content": "Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. Simvlm: Simple visual language model pretraining with weak supervision. arXiv preprint, 2021b." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 507, + 733 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 107, + 82, + 507, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 507, + 116 + ], + "type": "text", + "content": "Alex Warstadt, Amanpreet Singh, and Samuel R. Bowman. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641, 2019. doi: 10.1162/tacl_a_00290. URL https://aclanthology.org/Q19-1040." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 507, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 507, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 507, + 180 + ], + "type": "text", + "content": "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models. Transactions on Machine Learning Research, 2022. URL https://openreview.net/forum?id=yzkSU5zdwD. Survey Certification." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 186, + 506, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 506, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 506, + 254 + ], + "type": "text", + "content": "Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 1112–1122, New Orleans, Louisiana, 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1101. URL https://aclanthology.org/N18-1101." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 260, + 507, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 260, + 507, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 507, + 306 + ], + "type": "text", + "content": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google's neural machine translation system: Bridging the gap between human and machine translation. ArXiv preprint, abs/1609.08144, 2016. URL https://arxiv.org/abs/1609.08144." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 312, + 505, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 505, + 337 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 505, + 337 + ], + "type": "text", + "content": "Ning Xie, Farley Lai, Derek Doran, and Asim Kadav. Visual entailment: A novel task for fine-grained image understanding. arXiv preprint, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 342, + 507, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 507, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 507, + 399 + ], + "type": "text", + "content": "Canwen Xu, Wangchunshu Zhou, Tao Ge, Furu Wei, and Ming Zhou. BERT-of-theseus: Compressing BERT by progressive module replacing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 7859-7869, Online, 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.633. URL https://aclanthology.org/2020.emnlp-main.633." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 406, + 507, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 507, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 507, + 474 + ], + "type": "text", + "content": "Haiyang Xu, Ming Yan, Chenliang Li, Bin Bi, Songfang Huang, Wenming Xiao, and Fei Huang. E2E-VLP: End-to-end vision-language pre-training enhanced by visual learning. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 503-513, Online, 2021a. Association for Computational Linguistics. doi: 10.18653/v1/2021.acl-long.42. URL https://aclanthology.org/2021.acl-long.42." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 480, + 506, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 480, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 506, + 505 + ], + "type": "text", + "content": "Jingjing Xu, Wangchunshu Zhou, Zhiyi Fu, Hao Zhou, and Lei Li. A survey on green deep learning. ArXiv preprint, abs/2111.05193, 2021b. URL https://arxiv.org/abs/2111.05193." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 510, + 507, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 507, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 507, + 578 + ], + "type": "text", + "content": "Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attngan: Fine-grained text to image generation with attentional generative adversarial networks. In 2018 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2018, Salt Lake City, UT, USA, June 18-22, 2018, pp. 1316-1324. IEEE Computer Society, 2018. doi: 10.1109/CVPR.2018.00143. URL http://openaccess.thecvf.com/content_cvpr_2018/html/Xu_AttnGAN_Fine-Grained_Text_CVPR_2018_paper.html." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 585, + 505, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 505, + 620 + ], + "type": "text", + "content": "Zhengyuan Yang, Zhe Gan, Jianfeng Wang, Xiaowei Hu, Faisal Ahmed, Zicheng Liu, Yumao Lu, and Lijuan Wang. Crossing the format boundary of text and boxes: Towards unified vision-language modeling. ArXiv, abs/2111.12085, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 626, + 504, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 626, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 504, + 651 + ], + "type": "text", + "content": "Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. ArXiv preprint, abs/1708.03888, 2017. URL https://arxiv.org/abs/1708.03888." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 656, + 507, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 507, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 507, + 692 + ], + "type": "text", + "content": "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 697, + 507, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 507, + 733 + ], + "type": "text", + "content": "Ning Yu, Vladislav Skripniuk, Sahar Abdelnabi, and Mario Fritz. Artificial fingerprinting for generative models: Rooting deepfake attribution in training data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 14448-14457, 2021." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 705 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 138 + ], + "type": "text", + "content": "Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, Ce Liu, Mengchen Liu, Zicheng Liu, Yumao Lu, Yu Shi, Lijuan Wang, Jianfeng Wang, Bin Xiao, Zhen Xiao, Jianwei Yang, Michael Zeng, Luowei Zhou, and Pengchuan Zhang. Florence: A new foundation model for computer vision. arXiv preprint, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 505, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 505, + 178 + ], + "type": "text", + "content": "Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. ArXiv preprint, abs/2111.08276, 2021. URL https://arxiv.org/abs/2111.08276." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 185, + 507, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 507, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 507, + 232 + ], + "type": "text", + "content": "Han Zhang, Weichong Yin, Yewei Fang, Lanxin Li, Boqiang Duan, Zhihua Wu, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. Ernie-vilg: Unified generative pre-training for bidirectional vision-language generation. ArXiv preprint, abs/2112.15283, 2021a. URL https://arxiv.org/abs/2112.15283." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 237, + 505, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 237, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 237, + 505, + 272 + ], + "type": "text", + "content": "Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. VinVL: Revisiting visual representations in vision-language models. In Conference on Computer Vision and Pattern Recognition (CVPR), 2021b." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 277, + 507, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 507, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 507, + 346 + ], + "type": "text", + "content": "Wangchunshu Zhou, Canwen Xu, Tao Ge, Julian J. McAuley, Ke Xu, and Furu Wei. BERT loses patience: Fast and robust inference with early exit. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/black/d4dd111a4fd973394238aca5c05bebe3-AAbstract.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 352, + 507, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 352, + 507, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 507, + 408 + ], + "type": "text", + "content": "Wangchunshu Zhou, Tao Ge, Canwen Xu, Ke Xu, and Furu Wei. Improving sequence-to-sequence pretraining via sequence span rewriting. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 571-582, Online and Punta Cana, Dominican Republic, 2021a. Association for Computational Linguistics. doi: 10.18653/v1/2021.emnlp-main.45. URL https://aclanthology.org/2021.emnlp-main.45." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 415, + 507, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 507, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 507, + 460 + ], + "type": "text", + "content": "Wangchunshu Zhou, Dong-Ho Lee, Ravi Kiran Selvam, Seyeon Lee, and Xiang Ren. Pre-training text-to-text transformers for concept-centric common sense. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021b. URL https://openreview.net/forum?id=3k20LAiHYL2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 466, + 507, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 466, + 507, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 507, + 491 + ], + "type": "text", + "content": "Wangchunshu Zhou, Canwen Xu, and Julian McAuley. BERT learns to teach: Knowledge distillation with meta learning. In ACL (1), pp. 7037-7049. Association for Computational Linguistics, 2022a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 505, + 520 + ], + "type": "text", + "content": "Wangchunshu Zhou, Yan Zeng, Shizhe Diao, and Xinsong Zhang. Vlue: A multi-task benchmark for evaluating vision-language models. CoRR, abs/2205.15237, 2022b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 526, + 505, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 505, + 561 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 505, + 561 + ], + "type": "text", + "content": "Xiao Zhou, Weizhong Zhang, Zonghao Chen, Shizhe Dao, and Tong Zhang. Efficient neural network training via forward and backward propagation sparsification. Advances in Neural Information Processing Systems, 34, 2021c." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 567, + 505, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 505, + 602 + ], + "type": "text", + "content": "Xiao Zhou, Weizhong Zhang, Hang Xu, and Tong Zhang. Effective sparsification of neural networks with global sparsity constraint. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3599-3608, 2021d." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 608, + 505, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 608, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 505, + 632 + ], + "type": "text", + "content": "Xiao Zhou, Renjie Pi, Weizhong Zhang, Yong Lin, and Tong Zhang. Probabilistic bilevel coreset selection. In International Conference on Machine Learning. PMLR, 2022c." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 637, + 507, + 705 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 637, + 507, + 705 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 507, + 705 + ], + "type": "text", + "content": "Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. DM-GAN: dynamic memory generative adversarial networks for text-to-image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 5802-5810. Computer Vision Foundation / IEEE, 2019. doi: 10.1109/CVPR.2019.00595. URL http://openaccess.thecvf.com/content_CVPR_2019/html/Zhu_DM-GAN_Dynamic_Memory_Generative_Adversarial_Networks_for_Text-To-Image_Synthesis_CVPR_2019_paper.html." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 183, + 94 + ], + "type": "text", + "content": "A APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 279, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 279, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 279, + 116 + ], + "type": "text", + "content": "A.1 DETAILS OF HYPER-PARAMETERS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "text", + "content": "Pre-training Our model is a base-size Transformer implemented with a 6-layer encoder and a 6-layer decoder, 768 dimensions for hidden states, 512 for maximum input length, and 3072 for intermediate size. We train our model from scratch without initializing the Transformer encoder and decoder. The image encoder is initialized from ResNet-101 (He et al., 2016) with ImageNet weights since we find a warm start provides a reliable visual representation and helps the convergence. For models pre-training on large-scale data, we optimize 10 epochs while for other small-scale datasets, we optimize 40 epochs with the AdamW optimizer. The weight decay is set to 0.01 with " + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.999" + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "text", + "content": ". The learning rate is 2e-4 with a warm-up period for the first " + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "text", + "content": " steps and linearly decayed to 0 after " + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "text", + "content": " of the total training steps. In each batch, there are 8,192 image-text pairs for text-to-image generation and image-to-text generation with 8,192 text-only documents for text-to-text generation. We use center-crop to resize each image to the size of " + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "inline_equation", + "content": "256\\times 256" + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "text", + "content": ", which is the only data augmentation used during training. All pre-training experiments are conducted on 32GB NVIDIA V100 GPUs. We adopt mixed-precision (Micikevicius et al., 2018) to accelerate training and save memory. The model trained on the largest data takes around 10 days on 1024 V100 GPUs. The default settings are shown in Table 6. We adopt dynamic masking in our experiments, where the masking ratio is randomly sampled from a uniform distribution " + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "inline_equation", + "content": "\\mathrm{U}(0,1)" + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 305, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 305, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 305, + 504, + 373 + ], + "type": "text", + "content": "Fine-tuning The learning rate is " + }, + { + "bbox": [ + 104, + 305, + 504, + 373 + ], + "type": "inline_equation", + "content": "\\in [1\\mathrm{e} - 5,5\\mathrm{e} - 5]" + }, + { + "bbox": [ + 104, + 305, + 504, + 373 + ], + "type": "text", + "content": " and our model is optimized by AdamW. Because the image resolution differs between pre-training and fine-tuning, the position parameters are adapted using linear interpolation. For all downstream tasks, we apply random resize crops and horizontal flips augmentation during training. All fine-tuning experiments are conducted on 32GB NVIDIA V100 GPUs. The default settings for text classification, image classification, multi-modal understanding and image-to-text generation are shown in Tables 7, 8, and 9, respectively." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 148, + 382, + 463, + 502 + ], + "blocks": [ + { + "bbox": [ + 148, + 382, + 463, + 502 + ], + "lines": [ + { + "bbox": [ + 148, + 382, + 463, + 502 + ], + "spans": [ + { + "bbox": [ + 148, + 382, + 463, + 502 + ], + "type": "table", + "html": "
configvalue
optimizerAdamW (Loshchilov & Hutter, 2019)
learning rate2e-4
weight decay0.01
optimizer momentumβ1, β2=0.9, 0.999
batch size8192
learning rate schedulelinear decay
warmup ratio (Goyal et al., 2017a)0.02
training epochs{10, 40}
augmentationRandomResizedCrop
", + "image_path": "afb702bf614965138be44944fad40c441ce6705eeda21c064d089aec2757b8bc.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 219, + 541, + 391, + 641 + ], + "blocks": [ + { + "bbox": [ + 251, + 509, + 358, + 520 + ], + "lines": [ + { + "bbox": [ + 251, + 509, + 358, + 520 + ], + "spans": [ + { + "bbox": [ + 251, + 509, + 358, + 520 + ], + "type": "text", + "content": "Table 6: Pre-training setting." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 219, + 541, + 391, + 641 + ], + "lines": [ + { + "bbox": [ + 219, + 541, + 391, + 641 + ], + "spans": [ + { + "bbox": [ + 219, + 541, + 391, + 641 + ], + "type": "table", + "html": "
configvalue
optimizerAdamW
learning rate{1e-5, 2e-5, 5e-5}
weight decay0.01
optimizer momentumβ1, β2=0.9, 0.999
batch size{16, 32, 64}
learning rate schedulelinear decay
warmup ratio0.1
training epochs{5, 10}
", + "image_path": "3934839d7cf75277eebcff04a94a0b776c94eab7aa3440dfdc4dda2b2cdcbb4f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 223, + 649, + 382, + 660 + ], + "lines": [ + { + "bbox": [ + 223, + 649, + 382, + 660 + ], + "spans": [ + { + "bbox": [ + 223, + 649, + 382, + 660 + ], + "type": "text", + "content": "Table 7: Text classification: GLUE setting." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 681, + 282, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 282, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 282, + 693 + ], + "type": "text", + "content": "A.2 DETAILS OF DOWNSTREAM TASKS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": "Language Understanding We conduct experiments on GLUE benchmark including MNLI (Williams et al., 2018), CoLA (Warstadt et al., 2019), MRPC (Dolan & Brockett, 2005), QQP (Iyer et al., 2017), SST-2 (Socher et al., 2013), QNLI (Rajpurkar et al., 2016)," + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 209, + 80, + 402, + 190 + ], + "blocks": [ + { + "bbox": [ + 209, + 80, + 402, + 190 + ], + "lines": [ + { + "bbox": [ + 209, + 80, + 402, + 190 + ], + "spans": [ + { + "bbox": [ + 209, + 80, + 402, + 190 + ], + "type": "table", + "html": "
configvalue
optimizerLARS (You et al., 2017)
base learning rate0.1
weight decay0
optimizer momentum0.9
batch size16384
learning rate schedulecosine decay
warmup epochs10
training epochs90
augmentationRandomResizedCrop
", + "image_path": "f58515bae1b5e5a8cba2a67ca0eb3e690c445bb4d6b66e88b50945e2c3949d87.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 154, + 220, + 457, + 340 + ], + "blocks": [ + { + "bbox": [ + 206, + 198, + 401, + 209 + ], + "lines": [ + { + "bbox": [ + 206, + 198, + 401, + 209 + ], + "spans": [ + { + "bbox": [ + 206, + 198, + 401, + 209 + ], + "type": "text", + "content": "Table 8: Image classification: Linear probing setting." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 154, + 220, + 457, + 340 + ], + "lines": [ + { + "bbox": [ + 154, + 220, + 457, + 340 + ], + "spans": [ + { + "bbox": [ + 154, + 220, + 457, + 340 + ], + "type": "table", + "html": "
configvalue
optimizerAdamW
learning rate[1e-5, 5e-5]
weight decay0.02
optimizer momentumβ1, β2=0.9, 0.999
batch size1024
learning rate schedulelinear decay
warmup epochs[2, 5]
training epochs[5, 15]
label smoothing (Szegedy et al., 2016)0.1
augmentationRandomResizedCrop, HorizontalFlips
", + "image_path": "475c0044a24bad8a00c82e5f62b10e40a7f42a74d6e3f58ce588b2562538299d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 148, + 348, + 462, + 361 + ], + "lines": [ + { + "bbox": [ + 148, + 348, + 462, + 361 + ], + "spans": [ + { + "bbox": [ + 148, + 348, + 462, + 361 + ], + "type": "text", + "content": "Table 9: Multi-modal understanding and image-to-text generation: fine-tuning setting." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 380, + 506, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 449 + ], + "type": "text", + "content": "RTE (Dagan et al., 2005; Haim et al., 2006; Giampiccolo et al., 2007; Bentivogli et al., 2009), and STS-B (Agirre et al., 2007). We follow the practice of BART (Lewis et al., 2020) and feed the same input to the encoder and decoder, and the hidden state of the final decoder token is fed into a new multi-class linear classifier or regression head. MNLI results are an average of MNLI-m and MNLI-mm. MRPC and QQP results are average of accuracy and F1. Matthews correlation coefficient (MCC) is reported for CoLA and Pearson correlation coefficient (PCC) is reported for STS-B." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 453, + 506, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 506, + 555 + ], + "type": "text", + "content": "Vision Understanding We conduct vision experiments in both fine-tuning and linear evaluation (linear eval). The linear evaluation follows a common practice (Caron et al., 2021; He et al., 2020; Singh et al., 2021) in self-supervised learning to evaluate the representation quality, where the pre-trained backbone model is frozen and a new linear classifier is appended on top of it. We choose 12 popular datasets: ImageNet (Russakovsky et al., 2015), Food101 (Bossard et al., 2014), CIFAR10 (Krizhevsky et al., 2009), CIFAR100 (Krizhevsky et al., 2009), Cars (Krause et al., 2013), Aircraft (Maji et al., 2013), DTD (Cimpoi et al., 2014), Pets (Parkhi et al., 2012), Flowers102 (Nilsback & Zisserman, 2008), MNIST (LeCun & Cortes, 2010), STL10 (Coates et al., 2011), and Country211 (Radford et al., 2021)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 559, + 506, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 506, + 672 + ], + "type": "text", + "content": "Multi-modal Understanding We consider three popular multi-modal tasks: VQAv2 (Goyal et al., 2017b), SNLI-VE (Xie et al., 2019) and NLVR2 (Suhr et al., 2019) to evaluate our model's multi-modal understanding ability. For VQAv2, following ALBEF (Li et al., 2021), the image and question are fed to the encoder and the decoder generates answers based on the multi-modal embeddings. For SNLI-VE, we follow SimVLM (Wang et al., 2021b) to feed the image to the encoder and the text to the decoder. A classifier is appended on top of our pre-trained model, and it is trained to predict the result based on the last hidden states of the decoder. For NLVR2, two input pairs are constructed, each of them including one image and the textual description. The prediction is made based on the concatenation of these two embeddings following SimVLM (Wang et al., 2021b). The resolutions for VQAv2, SNLI-VE, NLVR2 are 480, 384, 384, respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 734 + ], + "type": "text", + "content": "Text-to-Image Generation The text-to-image task requires the model to understand the textual instruction first and then draw the image according to the input's intention. The input text is fed to our encoder, and our decoder will generate visual tokens one by one. After obtaining visual tokens, they are decoded into a raw image by an image decoder. We directly use an off-the-shelf image decoder from VQGAN (Esser et al., 2021). Following (Ramesh et al., 2021) we directly evaluate our" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 80, + 500, + 255 + ], + "blocks": [ + { + "bbox": [ + 109, + 80, + 500, + 255 + ], + "lines": [ + { + "bbox": [ + 109, + 80, + 500, + 255 + ], + "spans": [ + { + "bbox": [ + 109, + 80, + 500, + 255 + ], + "type": "table", + "html": "
Data TypeDatasetImage Domain#Images#Captions#Total
In-Domain Data (ID)COCOCOCO110.3K551.7K1.3M
Visual GenomeCOCO108.2K759.0K
Small-scale Web Data (SWD)SBUWeb859.7K859.7K14.9M
CC-3MWeb2.9M2.9M
CC-12MWeb11.1M11.1M
Object-Region Data (ORD)VG regionsCOCO108.2K3.6M17.0M
VG objectsCOCO108.2K925.6K
COCO objectsCOCO110.3K736.6K
RefcocoCOCO27.9K589.9K
Open ImageFlickr1.7M7.5M
Obj365Flickr577.6K3.6M
Vision Data (VD)ImageNet-21KImageNet13.2M13.2M13.2M
Large-scale Web Data (LWD)DAVINCI-200MWeb205.6M205.6M601.3M
LAION-400MWeb395.7M395.7M
Text Data (TD)C4Web--800GB
", + "image_path": "7b1921afa2bfe10baaffc21441a2eb24b6be1a163f1bc78d651197c68c35be17.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 266, + 504, + 288 + ], + "lines": [ + { + "bbox": [ + 104, + 266, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 504, + 288 + ], + "type": "text", + "content": "Table 10: Statistics of the pre-training datasets. #Images, #Captions, and #Total denote the number of images, the number of image-text pairs, and the total number of image-text pairs, respectively." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "text", + "content": "pre-trained model on 30,000 images randomly sampled from COCO (Chen et al., 2015) validation split. Both Fréchet Inception Distance (FID) (Heusel et al., 2017) and Inception Score (IS) (Salimans et al., 2016) are reported. The image resolution is 256." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 353, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 353, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 504, + 418 + ], + "type": "text", + "content": "Image-to-Text Generation For image-to-text generation (also called image captioning), the image is given to encoder and the decoder will generate the corresponding caption. Our experiments are conducted on COCO dataset (Chen et al., 2015) with cross-entropy optimization. Other task-specific techniques such as CIDEr optimization (Rennie et al., 2017) are not introduced. The image resolution is 480. We also conduct zero-shot captioning experiments on NoCaps (Agrawal et al., 2019) and VLINE (Zhou et al., 2022b)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 430, + 244, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 244, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 244, + 441 + ], + "type": "text", + "content": "A.3 PRE-TRAINING DATASETS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 448, + 504, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 504, + 514 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 504, + 514 + ], + "type": "text", + "content": "Since existing studies pre-trained their models on different corpora, some of which are publicly available (e.g., CC-3M, CC-12M) while some are in-house datasets (e.g., ALIGN (Jia et al., 2021)), making the fair comparison difficult. Considering results only on the state-of-the-art performance would underestimate the potential of this line of research. Therefore, we propose several practical settings, including small-scale and large-scale, and then conduct detailed comparisons on them in section 5.1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 516, + 506, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 506, + 659 + ], + "type": "text", + "content": "We collect a large set of datasets with diverse distributions for pre-training. According to its source, we divide them into in-domain, small-scale web data, object-region data, vision data, and large-scale web data. The statistics and details are shown in Table 10. Most of them are naturally image-text pairs, while to enrich our corpus, we leverage object descriptions, region descriptions, and vision data (i.e., ImageNet). For objects and regions, we crop them from the original image according to their bounding box. The text part is composed according to a human-written template and objects. For example, the prompt template is \"This image contains [OBJ_A] and [OBJ_B]\", where [OBJ_A] and [OBJ_B] are two object names from the data. For vision data, because they are usually labeled with a single word or short phrase, we compose a description with prompt templates such as \"A picture of [LABEL]\" or \"The image contains [LABEL]\". For example, \"A picture of cat\" or \"The image contains cat\". We curated a dataset containing about 205.6M image-text pairs, which are available publicly on the internet. The data distribution is similar to LAION-400M. Because both are from web images, we merge them into large-scale web data (LWD)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 670, + 259, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 670, + 259, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 670, + 259, + 681 + ], + "type": "text", + "content": "A.4 REPRODUCTION OF SIMVLM" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": "Since SimVLM is not open-sourced, we need to reproduce it by ourselves. There are two main difficulties in the reproduction: 1. it uses 1.8 billion in-house data 2. the configurations (e.g., parameter size, number of layers) of its base model are not clearly stated. However, there are still some clues in Section 4.4 of the SimVLM paper, where they propose a SimVLMsmall model with 8" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": "layers, 512 embedding dimensions, and trained on about 200M web data. To demonstrate the success of our replication, we train a " + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "inline_equation", + "content": "\\mathrm{SimVLM}_{small}" + }, + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": " model with the exact same configurations on about 200M web data. We obtain a VQA score of 68.50, surpassing the reported score of 67.43 in the original paper. We argue this result verifies our successful replication." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 142, + 233, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 233, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 233, + 152 + ], + "type": "text", + "content": "A.5 EFFECTS OF COMPUTE" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 160, + 506, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 160, + 506, + 261 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 506, + 261 + ], + "type": "text", + "content": "Our model is trained with large compute. To reveal the effects of compute, we visualize the performance improvement trends of SimVLM and DAVINCI as a function of the compute spent. There are two goals: 1) to compare better with prior work, as well as to 2) to show if that level of pre-training compute was necessary. We conduct experiments on the image-to-text generation task under both zero-shot and fine-tuning settings. The results are shown in Figure 2. It is observed that with the increase in compute, both models are improved significantly and converged at " + }, + { + "bbox": [ + 104, + 160, + 506, + 261 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 104, + 160, + 506, + 261 + ], + "type": "text", + "content": " of compute (zero-shot), and " + }, + { + "bbox": [ + 104, + 160, + 506, + 261 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 160, + 506, + 261 + ], + "type": "text", + "content": " of compute (fine-tuning), respectively. Large compute is especially helpful for fine-tuning settings. After convergence, our model outperforms SimVLM consistently in these two settings." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 112, + 290, + 288, + 399 + ], + "blocks": [ + { + "bbox": [ + 143, + 277, + 261, + 288 + ], + "lines": [ + { + "bbox": [ + 143, + 277, + 261, + 288 + ], + "spans": [ + { + "bbox": [ + 143, + 277, + 261, + 288 + ], + "type": "text", + "content": "(a) COCO Captioning (Zero-shot)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 112, + 290, + 288, + 399 + ], + "lines": [ + { + "bbox": [ + 112, + 290, + 288, + 399 + ], + "spans": [ + { + "bbox": [ + 112, + 290, + 288, + 399 + ], + "type": "image", + "image_path": "dc770b867dbcb78299001f30d829f53084c2026e823f49908e9faeae97544615.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 320, + 289, + 498, + 399 + ], + "blocks": [ + { + "bbox": [ + 351, + 277, + 474, + 288 + ], + "lines": [ + { + "bbox": [ + 351, + 277, + 474, + 288 + ], + "spans": [ + { + "bbox": [ + 351, + 277, + 474, + 288 + ], + "type": "text", + "content": "(b) COCO Captioning (Fine-tuning)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 320, + 289, + 498, + 399 + ], + "lines": [ + { + "bbox": [ + 320, + 289, + 498, + 399 + ], + "spans": [ + { + "bbox": [ + 320, + 289, + 498, + 399 + ], + "type": "image", + "image_path": "51ca548619b1916413ba2a86c8edd6a847002c4995aac6c96645992ad6236625.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 407, + 504, + 427 + ], + "lines": [ + { + "bbox": [ + 104, + 407, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 504, + 427 + ], + "type": "text", + "content": "Figure 2: The effects of compute. X-axis is the percentage of compute and Y-axis is the CIDer score on COCO captioning task." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 443, + 285, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 285, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 285, + 453 + ], + "type": "text", + "content": "A.6 EFFECTS OF MASKING STRATEGIES" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 461, + 506, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 461, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 506, + 551 + ], + "type": "text", + "content": "In our experiments, we adopt dynamic masking, where the masking ratio is sampled from a uniform distribution " + }, + { + "bbox": [ + 104, + 461, + 506, + 551 + ], + "type": "inline_equation", + "content": "\\mathrm{U}(0,1)" + }, + { + "bbox": [ + 104, + 461, + 506, + 551 + ], + "type": "text", + "content": ". The prefix ratio could be 0, where the prefix image is none, and the model is forced to predict the whole image with the input caption. There are other designs to mask images. Here we compared three different masking strategies: 1) masked image modeling (randomly masking some patches), 2) in-painting (randomly masking some continuous spans in the middle of the image), and 3) suffix-painting (ours). The results are shown in Table 11. Both masked image modeling and in-painting are effective and competitive. It is observed that suffix-painting is better than masked image modeling and in-painting across all tasks, demonstrating that suffix-painting works well." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 105, + 565, + 511, + 651 + ], + "blocks": [ + { + "bbox": [ + 105, + 565, + 511, + 651 + ], + "lines": [ + { + "bbox": [ + 105, + 565, + 511, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 511, + 651 + ], + "type": "table", + "html": "
MethodCOCOB@4 / CVQA AccSNLI-VE AccNLVR2 AccImageNet AccFood101 AccCIFAR10 AccMNLI AccSST-2 AccText2Image IS / FID
No Pre-training32.1 / 96.7152.7354.2351.08-*-*-*66.3279.84-*
MIM34.7 / 113.468.1875.3469.6648.4656.9572.7981.7289.849.50 / 74.13
In-painting34.5 / 112.567.4675.4168.6647.5054.3871.2081.5589.849.97 / 68.15
Suffix-painting (ours)35.8 / 117.369.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
Token Projection17.7 / 49.252.1371.1152.0115.1125.6261.0182.0190.2511.89 / 60.96
Patch Projection25.7 / 79.557.6971.9257.4536.2344.3169.4081.7390.0511.41 / 61.87
ResNet Feature (ours)35.8 / 117.369.2576.2272.5548.8875.3273.8281.7690.2512.35 / 53.14
", + "image_path": "1d7bec0da99224e3d8c177bd9c59034f67589b0cf6803f2bc2419e13ba2d3119.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 664, + 506, + 715 + ], + "lines": [ + { + "bbox": [ + 104, + 664, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 506, + 715 + ], + "type": "text", + "content": "Table 11: The effects of masking strategies and image feature extraction on COCO Captions, VQA, SNLI-VE, NLVR2, ImageNet, Food101, CIFAR10, MNLI, SST-2, and text-to-image generation. MIM denotes masked image modeling, where some patches are randomly sampled and masked. Because linear probe and zero-shot text-to-image generation require a pre-trained model to be frozen, the \"No Pre-training\" results on ImageNet, Food101, CIFAR10, and Text2Image are not reported and labeled by * ." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 318, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 318, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 318, + 94 + ], + "type": "text", + "content": "A.7 EFFECTS OF IMAGE FEATURE EXTRACTION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 99, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 99, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 99, + 506, + 177 + ], + "type": "text", + "content": "There are several different ways to extract image features. We compare three different image representation methods: 1) token projection (projecting the prefix tokens to the hidden dimension of the backbone network on the token-level), 2) patch projection (similar to ViT embedding, we split an image into fixed-size patches, embed each of them by a trainable linear projection on the pixel-level), and 3) ResNet feature extraction (ours). The comparison is shown in Table 11. From the results, we observed that ResNet feature extraction outperforms token projection and patch projection by a large margin. Therefore, we decided to adopt ResNet to extract image features." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 186, + 277, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 277, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 277, + 198 + ], + "type": "text", + "content": "A.8 SCALING EFFECTS OF DATA SIZE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 203, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 203, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 504, + 258 + ], + "type": "text", + "content": "In this section, we explore the scaling effects of our model. We plot the trends with the increase in data size on four tasks: COCO captioning, VQA, SNLI-VE, and NLVR2. The performance improvement shown in Figure 3 demonstrates that both SimVLM and DAVinci are scaling well with pre-training data size. In addition, DAVinci consistently outperforms SimVLM on different data sizes across these tasks." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 110, + 269, + 203, + 369 + ], + "blocks": [ + { + "bbox": [ + 110, + 269, + 203, + 369 + ], + "lines": [ + { + "bbox": [ + 110, + 269, + 203, + 369 + ], + "spans": [ + { + "bbox": [ + 110, + 269, + 203, + 369 + ], + "type": "image", + "image_path": "52037f741d36ecb6d936b7cace17e5d3bfda934c12d8d6bbcc289cc7e6d0baff.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 379, + 384, + 390 + ], + "lines": [ + { + "bbox": [ + 225, + 379, + 384, + 390 + ], + "spans": [ + { + "bbox": [ + 225, + 379, + 384, + 390 + ], + "type": "text", + "content": "Figure 3: The scaling effects of data size." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 205, + 269, + 296, + 369 + ], + "blocks": [ + { + "bbox": [ + 205, + 269, + 296, + 369 + ], + "lines": [ + { + "bbox": [ + 205, + 269, + 296, + 369 + ], + "spans": [ + { + "bbox": [ + 205, + 269, + 296, + 369 + ], + "type": "image", + "image_path": "3c3a2f391ef45f0474996b6719113037e2819f577a5d0a2c99dd72e172b1387b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 298, + 269, + 389, + 368 + ], + "blocks": [ + { + "bbox": [ + 298, + 269, + 389, + 368 + ], + "lines": [ + { + "bbox": [ + 298, + 269, + 389, + 368 + ], + "spans": [ + { + "bbox": [ + 298, + 269, + 389, + 368 + ], + "type": "image", + "image_path": "945d7d081aa0c3c4fd9057adad00d6148ccc678d76f13c6ae40b1520b0157675.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 390, + 269, + 500, + 368 + ], + "blocks": [ + { + "bbox": [ + 390, + 269, + 500, + 368 + ], + "lines": [ + { + "bbox": [ + 390, + 269, + 500, + 368 + ], + "spans": [ + { + "bbox": [ + 390, + 269, + 500, + 368 + ], + "type": "image", + "image_path": "499cc8d70d0c99af2d35676d358599f2d8f0bb4f50ed884695c5262339433f48.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 417, + 334, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 417, + 334, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 334, + 428 + ], + "type": "text", + "content": "A.9 FULL COMPARISON WITH EXISTING METHODS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 434, + 504, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 457 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 457 + ], + "type": "text", + "content": "In Table 12, we display a comprehensive comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 467, + 298, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 467, + 298, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 467, + 298, + 477 + ], + "type": "text", + "content": "A.10 LIMITATION AND SOCIETAL IMPACTS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 483, + 506, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 506, + 594 + ], + "type": "text", + "content": "Limitation. Like most of the previous pre-training studies, the entire project consumed 40 V100 GPU years on an in-house computing cluster with large electricity costs. We tried to keep our model size small enough, but there is still potential for efficiency improvements such as sparse training (Zhou et al., 2021d;c), dataset distillation (Zhou et al., 2022c), and progressive training (Rusu et al., 2016). We will explore those techniques to improve the training efficiency and reduce the carbon footprint so that it can adhere to proposals on \"green\" deep learning (Schwartz et al., 2020; Xu et al., 2021b). Furthermore, although we have tried our best to include as many tasks as we can to demonstrate the versatility of DAVinci, we believe our method can be expanded to more tasks (e.g., machine translation, summarization, object detection, etc.), modalities (e.g., video and speech). We leave these investigations to future work." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": "Potential Societal Impacts. Our model has image generation ability with risk of abuse, like fake portraits on social media (Hill & White, 2020), which is a common potential risk in image generation research. Viable solutions are watermarking (Yu et al., 2021) and introducing a strict user license." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 644, + 313, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 644, + 313, + 654 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 313, + 654 + ], + "type": "text", + "content": "A.11 VISUALIZATION OF IMAGE GENERATION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 661, + 505, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 661, + 505, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 505, + 696 + ], + "type": "text", + "content": "In this section, we conduct a qualitative analysis by visualizing the generation samples. Figure 4 shows the comparison with DALLE and OFA with the same query. More generated samples are shown in Figures 5." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 91, + 504, + 352 + ], + "blocks": [ + { + "bbox": [ + 105, + 91, + 504, + 352 + ], + "lines": [ + { + "bbox": [ + 105, + 91, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 91, + 504, + 352 + ], + "type": "table", + "html": "
Model#Params.Text MNLI AccVision ImageNet LE / FTImage2Text COCO B@4 / CText2Image COCO IS† / FID↓Multi-modal VQA test-dev / test-standardNLVR2 dev / test-P
Encoder-only Multi-modal Models
VisualBERT (Li et al., 2019)170M81.60---70.80 / 71.0067.40 / 67.00
ViLBERT (Lu et al., 2019)274M79.90---70.55 / 70.92-
VL-BERT (Su et al., 2020)170M81.20---71.16 / --
LXMERT (Tan & Bansal, 2019a)240M80.40---72.42 / 72.5474.90 / 74.50
OSCAR (Li et al., 2020)155M--36.5 / 123.7-73.16 / 73.4478.07 / 78.36
VinVL (Zhang et al., 2021b)157M--38.2 / 129.3-75.95 / 76.1282.05 / 83.08
ViLT (Kim et al., 2021)88M----70.85 / -74.91 / 75.57
ALBEF (Li et al., 2021)210M----75.84 / 76.0482.55 / 83.14
X-VLM (Zeng et al., 2021)240M--39.6 / 132.6-78.22 / 78.3784.41 / 84.76
VLMO (Wang et al., 2021a)----76.64 / 76.8982.77 / 83.34
Encoder-Decoder Multi-modal Models
UNICORN (Yang et al., 2021)--35.8 / 119.1-69.20 / 69.40-/-
Uni-ENDN (Li et al., 2022b)110M----72.20 / 72.50-/-
Pixel-BERT (Huang et al., 2020)144M----74.45 / 74.5576.50 / 77.20
E2E-VLP (Xu et al., 2021a)94M--36.2 / 117.3-73.25 / 73.6777.25 / 77.96
VL-T5 (Cho et al., 2021)220M--34.5 / 116.5-- / 70.3074.60 / 73.60
VL-BART (Cho et al., 2021)220M--35.1 / 116.6-- / 71.3071.70 / 70.30
Text2Image Models
AttnGAN (Xu et al., 2018)---23.30 / 35.20-/--/-
DM-GAN (Zhu et al., 2019)---32.20 / 26.50-/--/-
DALLE (Ramesh et al., 2021) (250M)12B---17.90 / 27.50-/--/-
DALLE (Ramesh et al., 2021) (640M)†82M---15.79 / 29.22-/--/-
CogView (Ding et al., 2021)4B---18.20 / 27.10-/--/-
Unified Models
Unifying (Huang et al., 2021)228M--37.3 / 122.6- / 29.90-/--/-
FLAVA (Singh et al., 2021)240M80.3375.54 / ---72.80 / 72.49-/-
SimVLM (Wang et al., 2021b) (640M)†153M83.2776.04 / -38.5 / 128.7-75.04 / 75.0378.82 / 79.72
SimVLM (Wang et al., 2021b) (1.8B)83.4080.60 / -39.0 / 134.8-77.87 / 78.1481.72 / 81.77
OFA (Wang et al., 2022)182M84.30- / 82.2041.0 / 138.221.50* / 20.80*78.00 / 78.10-/-
Florence (Yuan et al., 2021)637M-- / 90.05-/--/-80.16 / 80.36-/-
DAVINCI154M83.1378.81 / 83.9239.2 / 130.417.44 (22.41*) / 24.21 (19.82*)76.32 / 76.4480.03 / 80.25
", + "image_path": "7ae3bbefb24a10288d4d7d449fda5ff1b3461275ec5aef77bd829531e5512ca1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 361, + 506, + 433 + ], + "lines": [ + { + "bbox": [ + 104, + 361, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 506, + 433 + ], + "type": "text", + "content": "Table 12: Comparison with state-of-the-art vision-language models on vision, language, and multi-modal downstream tasks. All results are from base-size models. LE and FT denote linear evaluation and fine-tuning performance, respectively. Image2Text results are reported without CIDEr optimization. " + }, + { + "bbox": [ + 104, + 361, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 104, + 361, + 506, + 433 + ], + "type": "text", + "content": " are our reproduced models. \\* are the results after fine-tuning. SimVLM (1.8B) and OFA are pre-trained with much larger corpus or human-labeled data of many downstream tasks, and thus they are not comparable and are labeled in gray. Florence (Yuan et al., 2021) is pre-trained with a much larger model size (Florence-CoSwin-H, 637M) and more pre-training data (900M), so the numbers are in grey. bold denotes the best across unified models." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 116, + 464, + 498, + 685 + ], + "blocks": [ + { + "bbox": [ + 116, + 464, + 498, + 685 + ], + "lines": [ + { + "bbox": [ + 116, + 464, + 498, + 685 + ], + "spans": [ + { + "bbox": [ + 116, + 464, + 498, + 685 + ], + "type": "image", + "image_path": "722f92ac76feb7850a7f7cba9696402edf4c6157bdce7d961f63b21c15fe066b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 168, + 693, + 441, + 706 + ], + "lines": [ + { + "bbox": [ + 168, + 693, + 441, + 706 + ], + "spans": [ + { + "bbox": [ + 168, + 693, + 441, + 706 + ], + "type": "text", + "content": "Figure 4: Comparison with DALLE and OFA on text-to-image generation." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 185, + 192, + 266 + ], + "blocks": [ + { + "bbox": [ + 111, + 185, + 192, + 266 + ], + "lines": [ + { + "bbox": [ + 111, + 185, + 192, + 266 + ], + "spans": [ + { + "bbox": [ + 111, + 185, + 192, + 266 + ], + "type": "image", + "image_path": "297fefe037e02ba8e64d9f257f03f3f9c71cac5255960a9c1c2a9c3cc25b9063.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 114, + 268, + 192, + 281 + ], + "lines": [ + { + "bbox": [ + 114, + 268, + 192, + 281 + ], + "spans": [ + { + "bbox": [ + 114, + 268, + 192, + 281 + ], + "type": "text", + "content": "a decorative flower vase full of purple and yellow flowers" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 212, + 185, + 294, + 266 + ], + "blocks": [ + { + "bbox": [ + 212, + 185, + 294, + 266 + ], + "lines": [ + { + "bbox": [ + 212, + 185, + 294, + 266 + ], + "spans": [ + { + "bbox": [ + 212, + 185, + 294, + 266 + ], + "type": "image", + "image_path": "6b482fa8a2b64ecd1c991ca74f4335f170d99447e4e635433fa2927be9d85238.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 268, + 290, + 274 + ], + "lines": [ + { + "bbox": [ + 217, + 268, + 290, + 274 + ], + "spans": [ + { + "bbox": [ + 217, + 268, + 290, + 274 + ], + "type": "text", + "content": "a vase full of flowers on table" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 314, + 185, + 395, + 266 + ], + "blocks": [ + { + "bbox": [ + 314, + 185, + 395, + 266 + ], + "lines": [ + { + "bbox": [ + 314, + 185, + 395, + 266 + ], + "spans": [ + { + "bbox": [ + 314, + 185, + 395, + 266 + ], + "type": "image", + "image_path": "fe233a6f67b7a479910d1f5586a6a4079b19b40db5df8b63a59bdc13b5cd5202.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 312, + 268, + 399, + 275 + ], + "lines": [ + { + "bbox": [ + 312, + 268, + 399, + 275 + ], + "spans": [ + { + "bbox": [ + 312, + 268, + 399, + 275 + ], + "type": "text", + "content": "a park with flowers on a sunny day" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 415, + 185, + 496, + 266 + ], + "blocks": [ + { + "bbox": [ + 415, + 185, + 496, + 266 + ], + "lines": [ + { + "bbox": [ + 415, + 185, + 496, + 266 + ], + "spans": [ + { + "bbox": [ + 415, + 185, + 496, + 266 + ], + "type": "image", + "image_path": "2f5d3c55afb3ca03a07dbd6276c169f9f965004b7744b329c7552ddbb6854b64.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 419, + 268, + 492, + 281 + ], + "lines": [ + { + "bbox": [ + 419, + 268, + 492, + 281 + ], + "spans": [ + { + "bbox": [ + 419, + 268, + 492, + 281 + ], + "type": "text", + "content": "a fire hydrant sitting in a front yard next to a sign" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 111, + 293, + 192, + 374 + ], + "blocks": [ + { + "bbox": [ + 111, + 293, + 192, + 374 + ], + "lines": [ + { + "bbox": [ + 111, + 293, + 192, + 374 + ], + "spans": [ + { + "bbox": [ + 111, + 293, + 192, + 374 + ], + "type": "image", + "image_path": "e3a65ee953198489cd255b1f3e1e2a38f12dc1148d8dd1ae61a3511c904178b6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 375, + 182, + 382 + ], + "lines": [ + { + "bbox": [ + 121, + 375, + 182, + 382 + ], + "spans": [ + { + "bbox": [ + 121, + 375, + 182, + 382 + ], + "type": "text", + "content": "a beach on a sunny day" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 212, + 293, + 293, + 373 + ], + "blocks": [ + { + "bbox": [ + 212, + 293, + 293, + 373 + ], + "lines": [ + { + "bbox": [ + 212, + 293, + 293, + 373 + ], + "spans": [ + { + "bbox": [ + 212, + 293, + 293, + 373 + ], + "type": "image", + "image_path": "f1b488802f01325f2123d87ee44e8a04fb9be03ceb0ba1552a196b59e290029d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 375, + 288, + 388 + ], + "lines": [ + { + "bbox": [ + 214, + 375, + 288, + 388 + ], + "spans": [ + { + "bbox": [ + 214, + 375, + 288, + 388 + ], + "type": "text", + "content": "a one cart train coming down the railroad tracks" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 313, + 293, + 395, + 373 + ], + "blocks": [ + { + "bbox": [ + 313, + 293, + 395, + 373 + ], + "lines": [ + { + "bbox": [ + 313, + 293, + 395, + 373 + ], + "spans": [ + { + "bbox": [ + 313, + 293, + 395, + 373 + ], + "type": "image", + "image_path": "f5514e687429ef98d8753069502dabe96b605320ffb28e016e6d69082f9ebe98.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 374, + 402, + 380 + ], + "lines": [ + { + "bbox": [ + 307, + 374, + 402, + 380 + ], + "spans": [ + { + "bbox": [ + 307, + 374, + 402, + 380 + ], + "type": "text", + "content": "a red and white boat docked on shore" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 415, + 293, + 496, + 374 + ], + "blocks": [ + { + "bbox": [ + 415, + 293, + 496, + 374 + ], + "lines": [ + { + "bbox": [ + 415, + 293, + 496, + 374 + ], + "spans": [ + { + "bbox": [ + 415, + 293, + 496, + 374 + ], + "type": "image", + "image_path": "0dc9869347acdff3619a3d1e2aacb15ac1e86cf988aca1bc788bc769b85b6029.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 417, + 375, + 493, + 382 + ], + "lines": [ + { + "bbox": [ + 417, + 375, + 493, + 382 + ], + "spans": [ + { + "bbox": [ + 417, + 375, + 493, + 382 + ], + "type": "text", + "content": "a picture of a snowy mountain" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 111, + 395, + 192, + 475 + ], + "blocks": [ + { + "bbox": [ + 111, + 395, + 192, + 475 + ], + "lines": [ + { + "bbox": [ + 111, + 395, + 192, + 475 + ], + "spans": [ + { + "bbox": [ + 111, + 395, + 192, + 475 + ], + "type": "image", + "image_path": "91f1ce8149bb0761730469a97caae51b4e71adcacec38dcab405aa552d71ccac.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 476, + 186, + 489 + ], + "lines": [ + { + "bbox": [ + 119, + 476, + 186, + 489 + ], + "spans": [ + { + "bbox": [ + 119, + 476, + 186, + 489 + ], + "type": "text", + "content": "a red stop sign on the side of the road" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 212, + 395, + 294, + 475 + ], + "blocks": [ + { + "bbox": [ + 212, + 395, + 294, + 475 + ], + "lines": [ + { + "bbox": [ + 212, + 395, + 294, + 475 + ], + "spans": [ + { + "bbox": [ + 212, + 395, + 294, + 475 + ], + "type": "image", + "image_path": "f007039c1076ee7599382161422d5509e2fd74404cb6355c368d627a2288aa04.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 213, + 478, + 298, + 491 + ], + "lines": [ + { + "bbox": [ + 213, + 478, + 298, + 491 + ], + "spans": [ + { + "bbox": [ + 213, + 478, + 298, + 491 + ], + "type": "text", + "content": "a building in front of a roundabout with a tree in the center." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 314, + 396, + 395, + 475 + ], + "blocks": [ + { + "bbox": [ + 314, + 396, + 395, + 475 + ], + "lines": [ + { + "bbox": [ + 314, + 396, + 395, + 475 + ], + "spans": [ + { + "bbox": [ + 314, + 396, + 395, + 475 + ], + "type": "image", + "image_path": "88379748a026c2125e6eff7d0bd0b38b68b57639f9a4cd85ec6b14c9758b6abb.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 477, + 396, + 491 + ], + "lines": [ + { + "bbox": [ + 315, + 477, + 396, + 491 + ], + "spans": [ + { + "bbox": [ + 315, + 477, + 396, + 491 + ], + "type": "text", + "content": "bathroom with marble walls and counter surrounds a large mirror" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 415, + 395, + 496, + 476 + ], + "blocks": [ + { + "bbox": [ + 415, + 395, + 496, + 476 + ], + "lines": [ + { + "bbox": [ + 415, + 395, + 496, + 476 + ], + "spans": [ + { + "bbox": [ + 415, + 395, + 496, + 476 + ], + "type": "image", + "image_path": "6fc5ce42118cbaedf06eea0fc1239326338a65fe4d7f2a1d3d22da077667bbd9.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 411, + 478, + 497, + 485 + ], + "lines": [ + { + "bbox": [ + 411, + 478, + 497, + 485 + ], + "spans": [ + { + "bbox": [ + 411, + 478, + 497, + 485 + ], + "type": "text", + "content": "trees by the river in the mountains" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 111, + 496, + 192, + 577 + ], + "blocks": [ + { + "bbox": [ + 111, + 496, + 192, + 577 + ], + "lines": [ + { + "bbox": [ + 111, + 496, + 192, + 577 + ], + "spans": [ + { + "bbox": [ + 111, + 496, + 192, + 577 + ], + "type": "image", + "image_path": "63ba22f4a8d7f21f93f0e844fd242ca8207716ecef1d9d5cd5cff058c0d9efa2.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 578, + 200, + 585 + ], + "lines": [ + { + "bbox": [ + 111, + 578, + 200, + 585 + ], + "spans": [ + { + "bbox": [ + 111, + 578, + 200, + 585 + ], + "type": "text", + "content": "many fruits on the plate on the table" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 212, + 496, + 294, + 577 + ], + "blocks": [ + { + "bbox": [ + 212, + 496, + 294, + 577 + ], + "lines": [ + { + "bbox": [ + 212, + 496, + 294, + 577 + ], + "spans": [ + { + "bbox": [ + 212, + 496, + 294, + 577 + ], + "type": "image", + "image_path": "7f297c673584b1a5e2efb6833d3c040379360af2a0bb422a1ae9b5f93ecdf21a.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 578, + 290, + 586 + ], + "lines": [ + { + "bbox": [ + 216, + 578, + 290, + 586 + ], + "spans": [ + { + "bbox": [ + 216, + 578, + 290, + 586 + ], + "type": "text", + "content": "a bunch of fruit in a fruit shop" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 314, + 496, + 395, + 577 + ], + "blocks": [ + { + "bbox": [ + 314, + 496, + 395, + 577 + ], + "lines": [ + { + "bbox": [ + 314, + 496, + 395, + 577 + ], + "spans": [ + { + "bbox": [ + 314, + 496, + 395, + 577 + ], + "type": "image", + "image_path": "a09f965728db9504abf035083d10e0bf5275815384c0c6dd271f2991ae5b4e8c.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 578, + 403, + 585 + ], + "lines": [ + { + "bbox": [ + 306, + 578, + 403, + 585 + ], + "spans": [ + { + "bbox": [ + 306, + 578, + 403, + 585 + ], + "type": "text", + "content": "a table set with a sandwich and a drink" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 415, + 496, + 496, + 577 + ], + "blocks": [ + { + "bbox": [ + 415, + 496, + 496, + 577 + ], + "lines": [ + { + "bbox": [ + 415, + 496, + 496, + 577 + ], + "spans": [ + { + "bbox": [ + 415, + 496, + 496, + 577 + ], + "type": "image", + "image_path": "3a86a45d8e479c156cb76df3ab4306ff16e06d7a50b10407f6dff5ef6481103b.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 417, + 578, + 496, + 586 + ], + "lines": [ + { + "bbox": [ + 417, + 578, + 496, + 586 + ], + "spans": [ + { + "bbox": [ + 417, + 578, + 496, + 586 + ], + "type": "text", + "content": "noodles and broccoli on a plate" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 223, + 609, + 385, + 620 + ], + "lines": [ + { + "bbox": [ + 223, + 609, + 385, + 620 + ], + "spans": [ + { + "bbox": [ + 223, + 609, + 385, + 620 + ], + "type": "text", + "content": "Figure 5: Generation samples by DAVINCI." + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_content_list.json b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..54bc4093f22f2108ea924d7e91df0ff8872d135e --- /dev/null +++ b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_content_list.json @@ -0,0 +1,3301 @@ +[ + { + "type": "text", + "text": "YOUR CONTRASTIVE LEARNING IS SECRETLY DOING STOCHASTIC NEIGHBOR EMBEDDING", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tianyang Hu1, Zhili Liu1,2, Fengwei Zhou1, Wenjia Wang2,3, Weiran Huang1,4*", + "bbox": [ + 181, + 167, + 730, + 184 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Huawei Noah's Ark Lab, 2 Hong Kong University of Science and Technology", + "$^{3}$ Hong Kong University of Science and Technology (Guangzhou)", + "$^{4}$ Qing Yuan Research Institute, Shanghai Jiao Tong University" + ], + "bbox": [ + 181, + 184, + 692, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 265, + 547, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Contrastive learning, especially self-supervised contrastive learning (SSCL), has achieved great success in extracting powerful features from unlabeled data. In this work, we contribute to the theoretical understanding of SSCL and uncover its connection to the classic data visualization method, stochastic neighbor embedding (SNE) (Hinton & Roweis, 2002), whose goal is to preserve pairwise distances. From the perspective of preserving neighboring information, SSCL can be viewed as a special case of SNE with the input space pairwise similarities specified by data augmentation. The established correspondence facilitates deeper theoretical understanding of learned features of SSCL, as well as methodological guidelines for practical improvement. Specifically, through the lens of SNE, we provide novel analysis on domain-agnostic augmentations, implicit bias and robustness of learned features. To illustrate the practical advantage, we demonstrate that the modifications from SNE to $t$ -SNE (Van der Maaten & Hinton, 2008) can also be adopted in the SSCL setting, achieving significant improvement in both in-distribution and out-of-distribution generalization.", + "bbox": [ + 228, + 292, + 769, + 491 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 511, + 339, + 526 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, contrastive learning, especially self-supervised contrastive learning (SSCL) has drawn massive attention, with many state-of-the-art models following this paradigm in both computer vision (He et al., 2020a; Chen et al., 2020a;b; Grill et al., 2020; Chen & He, 2021; Zbontar et al., 2021) and natural language processing (Fang et al., 2020; Wu et al., 2020; Giorgi et al., 2020; Gao et al., 2021; Yan et al., 2021). In contrast to supervised learning, SSCL learns the representation through a large number of unlabeled data and artificially defined self-supervision signals, i.e., regarding the augmented views of a data sample as positive pairs and randomly sampled data as negative pairs. By enforcing the features of positive pairs to align and those of negative pairs to be distant, SSCL produces discriminative features with state-of-the-art performance for various downstream tasks.", + "bbox": [ + 169, + 541, + 826, + 667 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite the empirical success, the theoretical understanding is under-explored as to how the learned features depend on the data and augmentation, how different components in SSCL work and what are the implicit biases when there exist multiple empirical loss minimizers. For instance, SSCL methods are widely adopted for pretraining, whose feature mappings are to be utilized for various downstream tasks which are usually out-of-distribution (OOD). The distribution shift poses great challenges for the feature learning process with extra requirement for robustness and OOD generalization (Arjovsky et al., 2019; Krueger et al., 2021; Bai et al., 2021; He et al., 2020b; Zhao et al., 2023; Dong et al., 2022), which demands deeper understanding of the SSCL methods.", + "bbox": [ + 169, + 674, + 828, + 787 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The goal of SSCL is to learn the feature representations from data. For this problem, one classic method is SNE (Hinton et al., 2006) and its various extensions. Specially, $t$ -SNE (Van der Maaten & Hinton, 2008) has become the go-to choice for low-dimensional data visualization. Comparing to SSCL, SNE is far better explored in terms of theoretical understanding (Arora et al., 2018; Linderman & Steinerberger, 2019; Cai & Ma, 2021). However, its empirical performance is not satisfactory, especially in modern era where data are overly complicated. Both trying to learn feature representations, are there any deep connections between SSCL and SNE? Can SSCL take the advantage of the theoretical soundness of SNE? Can SNE be revived in the modern era by incorporating SSCL?", + "bbox": [ + 169, + 792, + 826, + 905 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 475, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Correspondence to Weiran Huang (weiran.huang@sjtu.edu.cn).", + "bbox": [ + 189, + 909, + 573, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 960 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we give affirmative answers to the above questions and demonstrate how the connections to SNE can benefit the theoretical understandings of SSCL, as well as provide methodological guidelines for practical improvement. The main contributions are summarized below.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a novel perspective that interprets SSCL methods as a type of SNE methods with the aim of preserving pairwise similarities specified by the data augmentation.", + "- The discovered connection enables deeper understanding of SSCL methods. We provide novel theoretical insights for domain-agnostic data augmentation, implicit bias and OOD generalization. Specifically, we show isotropic random noise augmentation induces $l_{2}$ similarity while mixup noise can potentially adapt to low-dimensional structures of data; we investigate the implicit bias from the angle of order preserving and identified the connection between minimizing the expected Lipschitz constant of the SSCL feature map and SNE with uniformity constraint; we identify that the popular cosine similarity can be harmful for OOD generalization.", + "- Motivated by the SNE perspective, we propose several modifications to existing SSCL methods and demonstrate practical improvements. Besides a re-weighting scheme, we advocate to lose the spherical constraint for improved OOD performance and a $t$ -SNE style matching for improved separation. Through comprehensive numerical experiments, we show that the modified $t$ -SimCLR outperforms the baseline with $90\\%$ less feature dimensions on CIFAR-10 and $t$ -MoCo-v2 pretrained on ImageNet significantly outperforms in various domain transfer and OOD tasks." + ], + "bbox": [ + 169, + 152, + 826, + 377 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 PRELIMINARY AND RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 397, + 501, + 412 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Notations. For a function $f: \\Omega \\to \\mathbb{R}$ , let $\\|f\\|_{\\infty} = \\sup_{\\boldsymbol{x} \\in \\Omega} |f(\\boldsymbol{x})|$ and $\\|f\\|_p = (\\int_{\\Omega} |f(\\boldsymbol{x})|^p d\\boldsymbol{x})^{1/p}$ . For a vector $\\boldsymbol{x}$ , $\\| \\boldsymbol{x}\\|_p$ denotes its $p$ -norm, for $1 \\leq p \\leq \\infty$ . $\\mathbb{P}(A)$ is the probability of event $A$ . For a random variable $z$ , we use $P_z$ and $p_z$ to denote its probability distribution and density respectively. Denote Gaussian distribution by $N(\\mu, \\Sigma)$ and let $I_d$ be the $d \\times d$ identity matrix. Let the dataset be $\\mathcal{D}_n = \\{\\boldsymbol{x}_1, \\dots, \\boldsymbol{x}_n\\} \\subset \\mathbb{R}^d$ where each $\\boldsymbol{x}_i$ independently follows distribution $P_x$ . The goal of unsupervised representation learning is to find informative low-dimensional features $z_1, \\dots, z_n \\in \\mathbb{R}^{d_z}$ of $\\mathcal{D}_n$ where $d_z$ is usually much smaller than $d$ . We use $f(\\boldsymbol{x})$ to as the default notation for the feature mapping from $\\mathbb{R}^d \\to \\mathbb{R}^{d_z}$ , i.e., $z_i = f(\\boldsymbol{x}_i)$ .", + "bbox": [ + 169, + 426, + 826, + 529 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Stochastic neighbor embedding. SNE (Hinton & Roweis, 2002) is a powerful representation learning framework designed for visualizing high-dimensional data in low dimensions by preserving neighboring information. The training process can be conceptually decomposed into the following two steps: (1) calculate the pairwise similarity matrix $\\pmb{P} \\in \\mathbb{R}^{n \\times n}$ for $\\mathcal{D}_n$ ; (2) optimize features $z_1, \\dots, z_n$ such that their pairwise similarity matrix $\\pmb{Q} \\in \\mathbb{R}^{n \\times n}$ matches $\\pmb{P}$ . Under the general guidelines lie plentiful details. In Hinton & Roweis (2002), the pairwise similarity is modeled as conditional probabilities of $x_j$ being the neighbor of $x_i$ , which is specified by a Gaussian distribution centered at $x_i$ , i.e., when $i \\neq j$ ,", + "bbox": [ + 169, + 532, + 823, + 632 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nP _ {j \\mid i} = \\frac {\\exp \\left(- \\| \\boldsymbol {x} _ {i} - \\boldsymbol {x} _ {j} \\| _ {2} ^ {2} / 2 \\sigma_ {i} ^ {2}\\right)}{\\sum_ {k \\neq i} \\exp \\left(- \\| \\boldsymbol {x} _ {i} - \\boldsymbol {x} _ {k} \\| _ {2} ^ {2} / 2 \\sigma_ {i} ^ {2}\\right)}, \\tag {2.1}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 637, + 823, + 674 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\sigma_{i}$ is the variance of the Gaussian centered at $x_{i}$ . Similar conditional probabilities $Q_{j|i}$ 's can be defined on the feature space. When matching $Q$ to $P$ , the measurement chosen is the KL-divergence between two conditional probabilities. The overall training objective for SNE is", + "bbox": [ + 169, + 680, + 823, + 723 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\inf _ {\\boldsymbol {z} _ {1}, \\dots , \\boldsymbol {z} _ {n}} \\sum_ {i = 1} ^ {n} \\sum_ {j = 1} ^ {n} P _ {j | i} \\log \\frac {P _ {j | i}}{Q _ {j | i}}. \\tag {2.2}\n$$\n", + "text_format": "latex", + "bbox": [ + 405, + 729, + 823, + 770 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Significant improvements have been made to the classic SNE. Im et al. (2018) generalized the KL-divergence to $f$ -divergence and found that different divergences favors different types of structure. Lu et al. (2019) proposed to make $P$ doubly stochastic so that features are less crowded. Most notably, $t$ -SNE (Van der Maaten & Hinton, 2008) modified the pairwise similarity by considering joint distribution rather than conditional, and utilizes t-distribution instead of Gaussian in the feature space modeling. It is worth noting that SNE belongs to a large class of methods called manifold learning (Li et al., 2022). In this work, we specifically consider SNE. If no confusion arises, we use SNE to denote the specific work of Hinton & Roweis (2002) and this type of methods in general interchangeably.", + "bbox": [ + 169, + 777, + 826, + 888 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Self-supervised contrastive learning. The key part of SSCL is the construction of positive pairs, or usually referred to as different views of the same sample. For each $x_{i}$ in the training data, denote", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "its two augmented views to be $\\pmb{x}_i'$ and $\\pmb{x}_i''$ . Let $\\mathcal{D}_n' = \\{\\pmb{x}_1', \\dots, \\pmb{x}_n'\\}$ , $\\mathcal{D}_n'' = \\{\\pmb{x}_1'', \\dots, \\pmb{x}_n''\\}$ and define", + "bbox": [ + 169, + 102, + 782, + 119 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nl (\\pmb {x} _ {i} ^ {\\prime}, \\pmb {x} _ {i} ^ {\\prime \\prime}) = - \\log \\frac {\\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i} ^ {\\prime}) , f (\\pmb {x} _ {i} ^ {\\prime \\prime})) / \\tau)}{\\sum_ {\\pmb {x} \\in \\mathcal {D} _ {n} ^ {\\prime} \\cup \\mathcal {D} _ {n} ^ {\\prime \\prime} \\setminus \\{\\pmb {x} _ {i} ^ {\\prime} \\}} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i} ^ {\\prime}) , f (\\pmb {x})) / \\tau)},\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 126, + 702, + 164 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathrm{sim}(z_1,z_2) = \\langle \\frac{z_1}{\\|\\pmb{z}_1\\|_2},\\frac{z_2}{\\|\\pmb{z}_2\\|_2}\\rangle$ denotes the cosine similarity and $\\tau$ is a temperature parameter. The training objective of the popular SimCLR (Chen et al., 2020a) can be written as $L_{\\mathrm{InfoNCE}}\\coloneqq \\frac{1}{2n}\\sum_{i = 1}^{n}(l(\\pmb{x}_i^{\\prime \\prime},\\pmb{x}_i^{\\prime}) + l(\\pmb{x}_i^{\\prime},\\pmb{x}_i^{\\prime \\prime}))$", + "bbox": [ + 169, + 171, + 826, + 219 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, various algorithms are proposed to improve the above contrastive learning. To address the need for the large batch size, MoCo (He et al., 2020a; Chen et al., 2020b) utilizes a moving-averaged encoder and a dynamic memory bank to store negative representations, making it more device-friendly. Grill et al. (2020); Chen & He (2021); Zbontar et al. (2021); Chen et al. (2021) radically discard negative samples in SSCL but still achieve satisfactory transfer performance. Another line of works (Caron et al., 2020; Li et al., 2021; Liu et al., 2022) mines the hierarchy information in data to derive more semantically compact representations. Radford et al. (2021); Yao et al. (2021) even extend the contrastive methods to the multi-modality data structure to achieve impressive zero-shot classification results.", + "bbox": [ + 169, + 223, + 826, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Theoretical understanding of SSCL. In contrast of the empirical success, theoretical understanding of SSCL is still limited. While most of theoretical works (Arora et al., 2019; Tosh et al., 2020; HaoChen et al., 2021; 2022; Wang et al., 2022; Wen & Li, 2021; Wei et al., 2020; Huang et al., 2021; Ji et al., 2021; Ma et al., 2023) focus on its generalization ability on downstream tasks, there are some works studying specifically the InfoNCE loss. One line of works (Oord et al., 2018; Bachman et al., 2019; Hjelm et al., 2018; Tian et al., 2019; 2020) understand the InfoNCE loss from mutual information perspective, showing that the negative InfoNCE is a lower bound of mutual information between positive samples. Other works (Wang & Isola, 2020; Huang et al., 2021; Jing et al., 2021) are from the perspective of geometry of embedding space, showing that InfoNCE can be divided into two parts: one controls alignment and the other prevents representation collapse. In this paper, we study SSCL from the SNE perspective, which, to the best of the authors' knowledge, has no discussion in existing literature. The closest work to ours is Balestriero & LeCun (2022), which proposed a unifying framework under the helm of spectral manifold learning. In comparison, our work focuses specifically on the connection between SSCL and SNE.", + "bbox": [ + 169, + 340, + 826, + 521 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 SNE PERSPECTIVE OF SSCL", + "text_level": 1, + "bbox": [ + 171, + 542, + 444, + 558 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A closer look at the training objectives of SNE and SimCLR reveals great resemblance — SimCLR can be seen as a special SNE model. To see this, denote $\\widetilde{\\mathcal{D}}_{2n} = \\mathcal{D}_n^{\\prime \\prime}\\cup \\mathcal{D}_n^{\\prime}$ as the augmented dataset with index $\\widetilde{\\pmb{x}}_{2i - 1} = \\pmb{x}_i^{\\prime \\prime}$ and $\\widetilde{\\pmb{x}}_{2i} = \\pmb{x}_i^\\prime$ . If we change the $l_{2}$ distance to the negative cosine similarity and let $\\sigma_i^2\\equiv \\tau$ . Admitting similar conditional probability formulation as in (2.1) yields that for $i\\neq j$", + "bbox": [ + 169, + 574, + 823, + 633 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {Q} _ {j \\mid i} = \\frac {\\exp \\left(\\operatorname {s i m} \\left(f \\left(\\widetilde {\\boldsymbol {x}} _ {i}\\right) , f \\left(\\widetilde {\\boldsymbol {x}} _ {j}\\right)\\right) / \\tau\\right)}{\\sum_ {k \\neq i} \\exp \\left(\\operatorname {s i m} \\left(f \\left(\\widetilde {\\boldsymbol {x}} _ {i}\\right) , f \\left(\\widetilde {\\boldsymbol {x}} _ {k}\\right)\\right) / \\tau\\right)}. \\tag {3.1}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 641, + 823, + 676 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "By taking", + "bbox": [ + 171, + 684, + 240, + 700 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {P} _ {j \\mid i} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\widetilde {\\boldsymbol {x}} _ {i} \\text {a n d} \\widetilde {\\boldsymbol {x}} _ {j} \\text {a r e p o s i t i v e p a i r s} \\\\ 0, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {3.2}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 700, + 823, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the SNE objective (2.2) can be written as", + "bbox": [ + 171, + 739, + 439, + 753 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {2 n} \\sum_ {j = 1} ^ {2 n} \\widetilde {P} _ {j | i} \\log \\frac {\\widetilde {P} _ {j | i}}{\\widetilde {Q} _ {j | i}} = \\sum_ {k = 1} ^ {n} \\Big (- \\log (\\widetilde {Q} _ {2 k - 1 | 2 k}) - \\log (\\widetilde {Q} _ {2 k | 2 k - 1}) \\Big),\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 763, + 704, + 805 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "which reduces to the SimCLR objective $L_{\\mathrm{InfoNCE}}$ , up to a constant scaling term only depending on $n$ . Now that we have established the correspondence between SNE and SimCLR, it's clear that the feature learning process of SSCL also follows the two steps of SNE.", + "bbox": [ + 169, + 813, + 826, + 863 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(S1) The positive pair construction specifies the similarity matrix $P$ .", + "(S2) The training process then matches $Q$ to $P$ by minimizing some divergence between the two specified by the training objective, e.g., KL divergence in SimCLR." + ], + "bbox": [ + 192, + 875, + 825, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/3ecaa78f8dce75bb41d06285568ba9c89e5fd0f9e5e58682cb50c271df5f04f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 240, + 68, + 406, + 172 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a7a1122e517d6b6db862794be6a44519aeeb56f41996f2d85124745ee03e1a6f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 426, + 68, + 555, + 172 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/313ce6e1d4b7b5dc8b7ed2aac0bd2dff24fba0882f672f1add9f8a8b47be159b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 68, + 743, + 174 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a24431869e4cdcaeda8101b3a435fc04249f7e6efbe2530ac24d45b30827bb42.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 277, + 183, + 401, + 266 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/32f6266ce0ea2e5ecc4d0294265ed70e66d675a8e0ab408d06970130655d53dd.jpg", + "image_caption": [ + "Figure 1: Gaussian mixture setting with 5 components. (a) illustration of data with 250 samples. (b) learned features by standard SimCLR with normalization (cosine similarity) to 1-sphere. (c) learned features by modified SimCLR without normalization ( $l_{2}$ similarity). (d, e) feature mapping of the two methods in case of OOD mean shift. The linear classification accuracy is $48.4\\%$ in (d) and $100\\%$ in (e)." + ], + "image_footnote": [], + "bbox": [ + 424, + 181, + 555, + 287 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2f1a4d6332027e5c9e546360d59bf528012fe1dfaf61c353418cb03dc8aea145.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 183, + 743, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The main difference between SNE and SSCL is the first part, where the $P$ in SNE is usually densely filled by $l_{p}$ distance, ignoring the semantic information within rich data like images and texts. In contrast, SSCL omits all traditional distances in $\\mathbb{R}^d$ and only specifies semantic similarity through data augmentations, and the resulting $P$ is sparsely filled only by positive pairs as in (3.2). For structurally rich data such as image or text, the semantic information is invariant to a wide range of transformations. Human's prior knowledge of such invariance guides the construction of positive pairs in SSCL, which is then learned by the feature mapping.", + "bbox": [ + 169, + 367, + 826, + 468 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Remark 3.1 (SNE vs SSCL). We would like to clarify on the main difference between SNE and SSCL that we focus in this work. Although standard SNE (Hinton et al., 2006) is non-parametric without explicit feature maps, and is optimized for the whole dataset, these are not the defining properties of SNE. SNE can also utilize explicit feature maps and mini-batch training (Van Der Maaten, 2009). On the other hand, SSCL can also benefit from larger/full batches (Chen et al., 2020a) and can also be modified to directly optimize the features $\\boldsymbol{z}_i$ 's. In this work, we omit these subtleties1 and focus on the (S1) perspective, which we view as the most significant difference between SNE and SSCL.", + "bbox": [ + 169, + 470, + 826, + 571 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 585, + 287, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, to showcase the utility of the SNE perspective, we demonstrate how the feature learning process of SSCL methods, e.g., SimCLR, can become more intuitive and transparent. Specifically, we re-derive the alignment and uniformity principle (Wang & Isola, 2020) as well as provide novel analysis on domain-agnostic augmentations, the implicit bias and robustness of learned features. To aid the illustration, we device toy examples with simulated Gaussian mixture data.", + "bbox": [ + 169, + 612, + 826, + 681 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Gaussian mixture setting. Let the data follow $d$ -dimensional Gaussian mixture distribution with $m$ components where $P_{\\pmb{x}} \\sim \\frac{1}{m} \\sum_{i=1}^{m} N(\\pmb{\\mu}_i, \\sigma^2 \\pmb{I}_d)$ . The special case with $d = 2$ , $m = 5$ , $\\sigma = 0.1$ is illustrated in Figure 1(a) with 250 independent samples. To apply contrastive methods, consider constructing positive pairs by direct sampling, i.e., if $\\pmb{x}$ is from the first component, then we sample another $\\pmb{x}' \\sim N(\\pmb{\\mu}_1, \\sigma^2 \\pmb{I}_d)$ independently as its alternative view for contrast. The negative samples are the same as in standard SimCLR training.", + "bbox": [ + 169, + 686, + 826, + 772 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.1 DOMAIN-AGNOSTIC DATA AUGMENTATION", + "text_level": 1, + "bbox": [ + 171, + 786, + 522, + 800 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Now that we have established in (S1) that the input space pairwise distance is specified by the data augmentation, a natural question to ask is what are the corresponding induced distances. In this section, we investigate this problem for domain-agnostic data augmentations.", + "bbox": [ + 169, + 810, + 826, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The quality of data augmentation has great impact on the performance of SSCL methods, which reflects people's prior knowledge on the data. However, when facing new data without any domain knowledge,", + "bbox": [ + 169, + 859, + 826, + 888 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1All the contrastive losses are written in full batches for simplicity in this work as we focus on analyzing the optimal solutions of SSCL methods rather than the optimization process.", + "bbox": [ + 169, + 897, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "we have to rely on domain-agnostic data augmentations, e.g., adding random noises (Verma et al., 2021), for contrast. We first consider using general random noise augmentation, i.e., for any $\\pmb{x} \\in \\mathbb{R}^d$ , let $\\pmb{x}' = \\pmb{x} + \\delta$ where $\\delta$ follows some distribution with density $\\phi(\\pmb{x})$ . Then, for any $\\pmb{x}_i$ , the probability density of having $\\pmb{t} \\in \\mathbb{R}^d$ as its augmented point can be characterized as $P_{\\pmb{t}|\\pmb{x}_i} = \\mathbb{P}(\\pmb{x}_i \\mid \\pmb{x}_i' = \\pmb{t} \\text{ form a positive pair} | \\pmb{x}_i) = \\phi(\\pmb{t} - \\pmb{x}_i)$ . We have the following proposition on Gaussian-induced distance.", + "bbox": [ + 169, + 104, + 826, + 176 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Proposition 3.2 (Gaussian noise injection). If the noise distribution is isotropic Gaussian with mean zero, the induced distance is equivalent to the $l_{2}$ distance in $\\mathbb{R}^d$ , up to a monotone transformation.", + "bbox": [ + 169, + 180, + 823, + 209 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Another popular noise injection method is the mixup (Zhang et al., 2017), where the augmented data are comprised of convex combinations of the training data. For each $\\boldsymbol{x}_i$ , a positive pair can be constructed from another $\\boldsymbol{x}_j$ such that $\\boldsymbol{x}_i' = \\boldsymbol{x}_i + \\lambda (\\boldsymbol{x}_j - \\boldsymbol{x}_i)$ and $\\lambda \\in (0,1)$ is the hyperparameter usually modeled with Beta distribution. For independent $\\boldsymbol{x}_1, \\boldsymbol{x}_2 \\sim P_x$ , denote the convoluted density of $\\lambda (\\boldsymbol{x}_1 - \\boldsymbol{x}_2)$ as $p_{\\lambda}(\\boldsymbol{x})$ , which is symmetric around 0. Then, if employing mixup for positive pairs in SSCL, the induced distance can be written as $P_{\\boldsymbol{x}_1, \\boldsymbol{x}_2} = P_{\\boldsymbol{x}_2, \\boldsymbol{x}_1} = p_{\\lambda}(\\boldsymbol{x}_1 - \\boldsymbol{x}_2)$ .", + "bbox": [ + 169, + 220, + 823, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Gaussian vs. mixup. Verma et al. (2021) proposed to use mixup when domain-specific information is unattainable and provided supportive analysis on its advantage over isotropic Gaussian noise from the classification generalization error point of view. Through (S1) perspective, we can intuitively explain why data-dependent mixup noises can be potentially better from the perspective of the \"curse of dimensionality\". Consider the $d$ -dimensional Gaussian mixture setting with $m < d$ separated components. Notice that $\\pmb{\\mu}_1,\\dots ,\\pmb{\\mu}_m$ can take up at most $(m - 1)$ -dimensional linear sub-space of $\\mathbb{R}^d$ . Denoted the space spanned by $\\pmb{\\mu}_i$ 's as $S_{\\mu}$ . For the light-tailed Gaussian distribution, and the majority of samples will be close to $S_{\\mu}$ . Hence, majority of the convoluted density $p_{\\lambda}(\\pmb{x})$ will also be supported on $S_{\\mu}$ , so does the corresponding $P_{\\pmb{x}_2,\\pmb{x}_1}$ . Thus, the induced distance from mixup will omit irrelevant variations in the complement of $S_{\\mu}$ and focus on the low-dimensional sub-space $S_{\\mu}$ where $\\pmb{\\mu}_i$ 's actually differ. This effectively reduces the dimension dependence from $d$ to $m - 1$ . In comparison, isotropic Gaussian noise induces $l_2$ distance for positive pairs with support of $\\mathbb{R}^d$ , which will be much more inefficient, especially when $m \\ll d$ . Since it is well-known that the performance of regression or classification models is strongly influenced by the intrinsic dimension of the input space (Hamm & Steinwart, 2021), keeping the data in a low-dimensional space is preferable.", + "bbox": [ + 169, + 311, + 826, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1.2 ALIGNMENT AND UNIFORMITY", + "text_level": 1, + "bbox": [ + 171, + 537, + 442, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Characterizing the learned features of SSCL is of critical importance. Wang & Isola (2020) proposed alignment and uniformity as principles for SimCLR type contrastive learning methods. Such results can be intuitively understood through the perspective of (S1) and (S2). Consider the common case where the feature space is $(d_z - 1)$ -sphere. First, (3.2) indicates that only similarities (distances) between positive pairs are non-zero (finite) and all other pairwise similarities (distances) are zero (infinity). Preserving (3.2) requires the features of positive pairs to align (cosine similarity tends to 1) and those of negative pairs to be as distant as possible. If in the extreme case where positive pairs match exactly, i.e., $f(\\pmb{x}_i) = f(\\pmb{x}_i')$ for any $i = 1,\\dots ,n$ , we call it perfect alignment.", + "bbox": [ + 169, + 563, + 823, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "If perfect alignment is achieved and the features are constrained on the unit sphere, matching (3.2) implies pushing $n$ points on the feature space as distant as possible. Maximally separated $n$ points on a $d$ -sphere has been studied in geometry, known as the Tammes problem (Tammes, 1930; Erber & Hockney, 1991; Melisseny, 1998). We say perfect uniformity is achieved if all the pairs are maximally separated on the sphere. There are some simple cases of the Tammes problem. If $d = 2$ , perfect uniformity can be achieved if the mapped points form a regular polygon. If $d \\geq n - 1$ , the solution can be given by the vertices of an $(n - 1)$ -simplex, inscribed in an $(n - 1)$ -sphere embedded in $\\mathbb{R}^d$ . The cosine similarity between any two vertices is $-1 / (n - 1)$ and in this case, $L_{\\mathrm{InfoNCE}}$ can attain its lower bound2. As $n \\to \\infty$ , the point distribution converges weakly to uniform distribution. As can be seen in Figure 1(a, b), perfect alignment and perfect uniformity are almost achieved by standard SimCLR in the Gaussian mixture setting.", + "bbox": [ + 169, + 681, + 826, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As we will demonstrate in Section 3.1.4 that the spherical feature space can be bad for OOD generalization, adopting of the Euclidean space will change the statement of the uniformity property and can also be analyzed from the SNE perspective. Details can be found in Appendix A.5.", + "bbox": [ + 169, + 840, + 823, + 883 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2Notice that in this case, the optimal feature mapping will contain little information of the data, mapping anchor samples to interchangeable points with identical pairwise distances", + "bbox": [ + 169, + 896, + 823, + 925 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1.3 IMPLICITBIAS", + "text_level": 1, + "bbox": [ + 171, + 103, + 330, + 116 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Existing theoretical results on SSCL provide justification of its empirical success in classification. However, there is more to it than just separating different classes and many phenomena are left unexplained. Take the popular SimCLR (Chen et al., 2020a) on CIFAR-10 as an example, we can consistently observe that the feature similarities within animals (bird, cat, deer, dog, frog, horse) and within objects (airplane, automobile, ship, truck), are significantly higher than those between animals and objects3. This can be viewed as an implicit bias towards preserving semantic information, which might be surprising as we have no supervision on the label information during the training process. However, existing literature on implicit bias is scarce. As advocated in Saunshi et al. (2022), ignoring inductive biases cannot adequately explain the success of contrastive learning. In this section, we provide a simple explanation from the perspective of SNE.", + "bbox": [ + 169, + 127, + 826, + 267 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For a more concrete illustration, consider training SimCLR in the Gaussian mixture setting with $d = 1$ , $d_z = 2$ , $m = 4$ , $\\mu_i = i$ , and $\\sigma = 0.1$ . Denote the 4 components in ascending order by A,B,C,D. Perfect alignment and uniformity imply that their feature maps (a, b, c, d) on the unit-circle should be vertices of an inscribed square. What left unsaid is their relative order. Clockwise or counter-Clockwise from a, regardless of the initialization, we can observe SimCLR to consistently produce the order $a \\to b \\to c \\to d$ .", + "bbox": [ + 169, + 273, + 826, + 345 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Remark 3.3 (Relative ordering and neighbor-preserving). The order-preserving property showcased with $d = 1$ is mainly for illustration, as in one-dimension, the neighboring info is simplified as the order, which is much easier to understand. The results remain the same in high dimensions as long as the clusters are well separated with an obvious order of clusters. For instance, some relative orders in Figure 1(a,b) are also stable, e.g., the neighbor of blue will consistently be purple and yellow.", + "bbox": [ + 169, + 347, + 823, + 419 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "With great resemblance to SNE, SSCL methods also exhibit neighbor-preserving property and we identify it as an implicit bias. Such implicit bias can be universal in SSCL and the phenomenon in Figure A.3 is also a manifestation. In deep learning, the implicit bias is usually characterized by either closeness to the initialization (Moroshko et al., 2020; Azulay et al., 2021), or minimizing certain complexity (Razin & Cohen, 2020; Zhang et al., 2021). In the case of SimCLR, we hypothesize the implicit bias as the expected Lipschitz constant, which has deep connections to SNE with uniformity constraint. For a feature map $f$ onto the unit-sphere, define", + "bbox": [ + 169, + 428, + 823, + 526 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nC (f) = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}} \\frac {\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2}}{\\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\| _ {2}}, \\tag {3.3}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 531, + 823, + 566 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where the $x_{1}, x_{2}$ are independent samples from the data distribution.", + "bbox": [ + 169, + 569, + 614, + 584 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Definition 3.4 (SNE with uniformity constraint). Assume data $\\boldsymbol{x}_1, \\dots, \\boldsymbol{x}_n \\in \\mathbb{R}^d$ . If the corresponding SNE features $z_1, \\dots, z_n \\in \\mathbb{R}^{d_z}$ are constrained to be the maximally separated $n$ points on the $(d_z - 1)$ -sphere, we call this problem SNE with uniformity constraint.", + "bbox": [ + 169, + 587, + 823, + 631 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The key of SNE is matching the pairwise similarity matrices $Q$ to $P$ . When solving SNE with uniformity constraint, the only thing to be optimized is the pairwise correspondence, or ordering of the mapping. We have the following theorem that links the neighbor-preserving property to $C(f)$ .", + "bbox": [ + 169, + 640, + 823, + 684 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3.5. Let $\\pmb{x}_1, \\dots, \\pmb{x}_n \\in \\mathbb{R}^d$ such that $\\| \\pmb{x}_i - \\pmb{x}_j \\|_2 > 0$ for any $i, j$ and let $z_1, \\dots, z_n \\in \\mathbb{R}^{d_z}$ be maximally separated $n$ points on the $(d_z - 1)$ -sphere. Denote $P = (p_{ij})_{n \\times n}$ and $Q = (q_{ij})_{n \\times n}$ as the corresponding pairwise similarity matrices of $\\pmb{x}_i$ 's and $\\pmb{z}_i$ 's respectively. Let $\\pi$ denote a permutation on $\\{1, \\dots, n\\}$ and denote all such permutations as $T$ . Let $Q^\\pi$ as the $\\pi$ -permuted matrix $Q$ and define", + "bbox": [ + 169, + 685, + 823, + 744 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nC _ {1} (P, Q ^ {\\pi}) = \\sum_ {i \\neq j} \\frac {q _ {\\pi (i) \\pi (j)}}{p _ {i j}} \\quad \\text {a n d} \\quad \\pi^ {*} = \\operatorname * {a r g m i n} _ {\\pi \\in T} C _ {1} (P, Q ^ {\\pi}).\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 748, + 694, + 784 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Then, $\\pi^{*}$ also minimizes $\\| \\bar{P} - Q^{\\pi}\\|_{F}$ where $\\| \\cdot \\|_{F}$ is the Frobenius norm and $\\bar{P} = (\\bar{p}_{ij})_{n\\times n}$ is a (monotonically) transformed similarity matrix with $\\bar{p}_{ij} = -1 / p_{ij}$ .", + "bbox": [ + 169, + 789, + 823, + 821 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3.5 showcases the relationship between minimizing $C(f)$ and the structure preserving property by considering a special SNE problem, where the pairwise similarity is not modeled by Gaussian as standard. Although $q_{ij} = -\\| f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2$ is unorthodox, it is reasonable since the larger the distance, the smaller the similarity. We have the following corollary to explain the neighbor-preserving property of SSCL and the implicit bias associated with minimizing the complexity $C(f)$ .", + "bbox": [ + 169, + 830, + 826, + 902 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3Figure A.3 illustrates the phenomenon. Details can be found in Appendix A.1", + "bbox": [ + 189, + 909, + 650, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Corollary 3.6 (Implicit bias of SSCL). When SSCL model achieves perfect alignment and perfect uniformity, if the complexity $C(f)$ is minimized, the resulting feature map preserves pairwise distance in the input space, resembling SNE with uniformity constraint.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Corollary 3.6 links the implicit bias of SSCL to the SNE optimization with uniformity constraint. In the case of perfect alignment and perfect uniformity, SSCL can be seen as a special SNE problem where the feature $z_{1}, \\dots, z_{n}$ must be maximally separated on the unit-sphere. Recall the 1-dimension Gaussian case. There are in total $3! = 6$ different orderings for the 4 cluster means, among which, a $\\rightarrow \\mathrm{b} \\rightarrow \\mathrm{c} \\rightarrow \\mathrm{d}$ will give the lowest SNE loss. As can be seen in Figure A.4, both $C(f)$ and the SNE loss are monotonically decreasing during training for the Gaussian mixture setting.", + "bbox": [ + 169, + 157, + 826, + 243 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "When the alignment or uniformity is not perfect, the resulting feature mapping can still be characterized via SNE, with the uniformity constraint relaxed as a form of regularization. In our numerical experiments on the CIFAR-10 data, we observe $C(f)$ to be monotonically decreasing during the training process, supporting our hypothesis. More details can be found in Appendix A.3. Corollary 3.6 sheds light on the implicit semantic information preserving phenomenon shown in Figure A.3, as in the input space, images of dogs should be closer to images of cats, than airplanes.", + "bbox": [ + 169, + 248, + 826, + 335 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1.4 TARGETING OOD: EUCLIDEAN VS SPHERICAL", + "text_level": 1, + "bbox": [ + 171, + 348, + 549, + 362 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Almost all SSCL methods require normalization to the unit-sphere and the similarity on the feature space is often the cosine similarity. In comparison, standard SNE methods operate freely on the Euclidean space. In this section, we show that the normalization can hinder the structure-preserving and there is a fundamental trade off between in-distribution and out-of-domain generalization.", + "bbox": [ + 169, + 372, + 823, + 429 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Consider the 2-dimensional Gaussian mixture setting as illustrated in Figure 1(a). Notice that as long as the mixing components are well separated, the learned feature mapping on the sphere will always be the pentagon shape, regardless of the relative locations of the clusters. This is a result of the uniformity property derived under spherical constraint. Distant clusters in the input space will be pulled closer while close clusters will be pushed to be more distant, which results in the trade off between in-distribution and out-of-domain generalization. On one hand, close clusters being more separated in the feature space is potentially beneficial for in-distribution classification. On the other hand, the spherical constraint adds to the complexity of the feature mapping, potentially hurting robustness.", + "bbox": [ + 169, + 435, + 826, + 547 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the Euclidean space, pushing away negative samples (as distant as possible) will be much easier, since the feature vectors could diverge towards infinity $^{4}$ and potentially preserve more structural information. To verify our intuition, we relax the spherical constraint in the Gaussian mixture setting and change the cosine similarity in SimCLR to the negative $l_{2}$ distance in $\\mathbb{R}$ . The learned features are shown in Figure 1(c). Comparing to Figure 1(b), we can get the extra information that the purple cluster is far away to the others. If we introduce a small mean shift to the data, moving the distribution along each dimension by 1, the resulting feature maps differ significantly in robustness. As illustrated in Figure 1(d) vs. (e), the standard SimCLR are much less robust to OOD shifts and the resulting classification accuracy degrades to only $48.4\\%$ , while that for the modified SimCLR remains $100\\%$ . The same OOD advantage can also be verified in the CIFAR-10 to CIFAR-100 OOD generalization case (details in Appendix C.3 Figure C.8) and large-scale real-world scenarios with MoCo (Chen et al., 2020b) as baseline (details in Section 5).", + "bbox": [ + 169, + 553, + 828, + 708 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 IMPROVING SSCL BY SNE", + "text_level": 1, + "bbox": [ + 171, + 726, + 429, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The proposed SNE perspective (S1,S2) can inspire various modifications to existing SSCL methods. In this section, we choose SimCLR as our baseline and investigate three straightforward modifications. For empirical evaluation, we report the test classification accuracy of nearest neighbor classifiers on both simulated data and real datasets. Experiment details can be found in Appendix C.", + "bbox": [ + 169, + 758, + 826, + 815 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 WEIGHTED POSITIVE PAIRS", + "text_level": 1, + "bbox": [ + 171, + 830, + 405, + 845 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In practice, positive pairs are constructed from anchors (training data), by i.i.d. data augmentations, e.g., random resized crop, random horizontal flip, color jitter, etc. Take random crop as an example, pair 1 and 2 may be from $30\\%$ , $80\\%$ random crops, respectively. Their similarities should not be treated", + "bbox": [ + 169, + 857, + 826, + 902 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "In practice, various regularization, e.g., weight decay, are employed and the resulting features will be bounded.", + "bbox": [ + 189, + 909, + 823, + 924 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/abeffeeae615bb59c1a7505b0ba33952441e268232c272f589bd8ba4b5f1c065.jpg", + "image_caption": [ + "(a) Weighted SimCLR." + ], + "image_footnote": [], + "bbox": [ + 197, + 82, + 464, + 202 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/10129a71b1f7ed9a9454c7fe19cd191e125990057078c7774a222fa6d8368813.jpg", + "image_caption": [ + "(b) SimCLR vs. $t$ -SimCLR.", + "Figure 2: Nearest neighbor classification test accuracy on CIFAR-10 with ResNet-18 after 200 epochs pre-training. (a) $N / A$ stands for the baseline SimCLR. The $x$ -axis is the temperature for IoU weighting scheme. (b) Comparison between SimCLR and $t$ -SimCLR with different feature dimensions." + ], + "image_footnote": [], + "bbox": [ + 501, + 80, + 771, + 200 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "as equal, as in typical SSCL methods. Incorporating the disparity in the data augmentation process is straightforward in the perspective of SNE, where the InfoNCE loss can be naturally modified as", + "bbox": [ + 169, + 279, + 823, + 309 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{2 n} \\sum_ {i = 1} ^ {n} p _ {i i ^ {\\prime}} \\cdot \\left(l \\left(\\boldsymbol {x} _ {i}, \\boldsymbol {x} _ {i} ^ {\\prime}\\right) + l \\left(\\boldsymbol {x} _ {i} ^ {\\prime}, \\boldsymbol {x} _ {i}\\right)\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 311, + 609, + 351 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The weight $p_{ii'}$ in $P$ can be specified manually to reflect human's prior knowledge. To test out the effect of such modification, we conduct numerical experiments on CIFAR-10 using the standard SimCLR. The weighting scheme is based on the Intersection over Union (IoU) of random resized crops. For each positive pair, let $p_{ii'} \\propto \\exp(\\mathrm{IoU}(\\boldsymbol{x}_i, \\boldsymbol{x}_i') / \\tau')$ , where $\\tau' > 0$ is a hyperparameter (temperature) controlling the strength of the weighting scheme, i.e., the bigger the $\\tau'$ , the closer to the unweighted state. The CIFAR-10 test performance vs. $\\tau'$ is shown in Figure 2(a). The baseline is $80.7\\%$ and can be significantly improved to $82.1\\%$ if choosing $\\tau' = 1$ .", + "bbox": [ + 169, + 354, + 826, + 455 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 T-SIMCLR: $t$ -SNE STYLE MATCHING", + "text_level": 1, + "bbox": [ + 171, + 469, + 472, + 483 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Most SSCL algorithms differ mainly in (S2), i.e., defining $Q$ and matching it to $P$ , where fruitful results in SNE literature can be mirrored and applied. Now that we have identified the advantage of modeling features in Euclidean spaces in Section 3.1.4, the most promising modification that follows is to introduce $t$ -SNE to SimCLR. Since we are learning low-dimensional features from high-dimensional data, preserving all pairwise similarities is impossible and the features tend to collapse. This is referred to as the \"crowding problem\" in Van der Maaten & Hinton (2008) (see Section 3.2 therein). $t$ -SNE utilizes the heavy-tail $t$ -distribution instead of the light-tail Gaussian, to model $Q$ and encourage separation in feature space. Correspondingly, the training objective $L_{\\mathrm{InfoNCE}}$ can be modified as", + "bbox": [ + 169, + 494, + 823, + 607 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{n} \\sum_ {i = 1} ^ {n} - \\log \\frac {\\left(1 + \\| f \\left(\\boldsymbol {x} _ {i}\\right) - f \\left(\\boldsymbol {x} _ {i} ^ {\\prime}\\right) \\| _ {2} ^ {2} / \\left(\\tau t _ {d f}\\right)\\right) ^ {- \\left(t _ {d f} + 1\\right) / 2}}{\\sum_ {1 \\leq j \\neq k \\leq 2 n} \\left(1 + \\| f (\\widetilde {\\boldsymbol {x}} _ {j}) - f (\\widetilde {\\boldsymbol {x}} _ {k}) \\| _ {2} ^ {2} / \\left(\\tau t _ {d f}\\right)\\right) ^ {- \\left(t _ {d f} + 1\\right) / 2}}, \\tag {4.1}\n$$\n", + "text_format": "latex", + "bbox": [ + 276, + 611, + 823, + 656 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $t_{df}$ is the degree of freedom for the $t$ -distribution. Besides substituting the cosine similarity to the $l_2$ distance, the key modification is the modeling of feature space similarity $Q$ , from Gaussian to $t$ -distribution as suggested by Van der Maaten & Hinton (2008) to avoid the crowding problem and accommodate the dimension-deficiency in the feature space. We call the modified method $t$ -SimCLR and we expect it to work better, especially when the feature dimension is low, or in the OOD case.", + "bbox": [ + 169, + 657, + 823, + 729 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 2(b) shows the comparison between SimCLR and $t$ -SimCLR on CIFAR-10 with different feature dimensions, where $t$ -SimCLR has significant advantages in all cases and the smaller the $d_{z}$ , the larger the gap. Without decreasing the standard $d_{z} = 128$ , $t$ -SimCLR improves the baseline from $80.8\\%$ to $83.9\\%$ and even beats it using only $d_{z} = 8$ with accuracy $81.7\\%$ .", + "bbox": [ + 169, + 734, + 826, + 792 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Remark 4.1 (Degree of freedom). Standard $t$ -SNE utilizes $t$ -distribution with $t_{df} = 1$ , to better accommodate the extreme $d_z = 2$ case. In practice, $t_{df}$ can vary and as $d_z$ increases, larger $t_{df}$ might be preferred. We recommend using $t_{df} = 5$ as the default choice. The performance of $t_{df}$ vs $d_z$ can be found in Appendix C, as well as discussion on the fundamental difference between $t_{df}$ and $\\tau$ .", + "bbox": [ + 169, + 794, + 823, + 852 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Remark 4.2 (Training epochs). For the CIFAR-10 experiments, we reported the results of ResNet-18 after 200 training epochs, similar to the setting of Yeh et al. (2021). We also conducted 1000-epoch experiments and found that our modifications provide consistent improvements throughout the training process, not in terms of speeding up the convergence, but converging to better solutions. Details can be found in Appendix C.1 and Figure C.6.", + "bbox": [ + 169, + 854, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 475, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/ecbea0b8e44524a2cd37f0df3d293f7b78ee280837c0cdb3883cf10ac6680ccb.jpg", + "table_caption": [ + "Table 1: Domain transfer results of vanilla MoCo-v2 and $t$ -MoCo-v2." + ], + "table_footnote": [], + "table_body": "
MethodAircraftBirdsnapCaltech101CarsCIFAR10CIFAR100DTDPetsSUN397Avg.
MoCo-v282.7544.5383.3185.2495.8172.7571.2286.7056.0575.37
t-MoCo-v282.7853.4686.8186.1796.0478.3269.2087.9559.3077.78
", + "bbox": [ + 174, + 88, + 823, + 135 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/f8a254931db8880d14f4e63b78144b31fe25020e07b57f8914abb53bfadad722.jpg", + "table_caption": [ + "Table 2: OOD accuracies of vanilla MoCo-v2 and $t$ -MoCo-v2 on domain generalization benchmarks." + ], + "table_footnote": [], + "table_body": "
MethodPACSVLCSOffice-HomeAvg.
MoCo-v258.570.436.655.2
t-MoCo-v261.375.142.159.5
", + "bbox": [ + 323, + 164, + 671, + 218 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 LARGE SCALE EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 231, + 446, + 244 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this section, we apply the same modifications proposed in Section 4.2 to MoCo-v2 (Chen et al., 2020b), as it is more device-friendly to conduct large scale experiments. We name our model $t$ -MoCo-v2. Both models are pre-trained for 200 epochs on ImageNet following the setting of Chen et al. (2020b). The linear probing accuracy of $t$ -MoCo-v2 on ImageNet is $67.0\\%$ , which is comparable to the MoCo result $67.5\\%$ . With the same level of in-distribution classification accuracy, we conduct extensive experiments to compare their OOD performance. The results in Table 1 and 2 suggest that our modification significantly improves the domain transfer and the OOD generalization ability without sacrificing in-distribution accuracy.", + "bbox": [ + 169, + 263, + 826, + 376 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Domain Transfer. We first conduct experiments on the traditional self-supervision domain transfer benchmark. We compare MoCo-v2 and $t$ -MoCo-v2 on Aircraft, Birdsnap, Caltech101, Cars, CIFAR10, CIFAR100, DTD, Pets, and SUN397. We follow transfer settings in Ericsson et al. (2021) to finetune the pre-trained models. The results are reported in Table 1. Our model $t$ -MoCo-v2 surpasses MoCo-v2 in 8 out of 9 datasets, showing a significantly stronger transfer ability. Notice that our model is pre-trained with 200 epochs, surprisingly, compared with the original MoCo-v2 model pre-trained with 800 epochs, the fine-tuning results of $t$ -MoCo-v2 are still better on Birdsnap, Caltech101, CIFAR100, and SUN397.", + "bbox": [ + 169, + 388, + 823, + 488 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Out-of-domain generalization. As illustrated in Section 3.1.4, standard SSCL methods, e.g., SimCLR, MoCo, etc., could suffer from OOD shift. To demonstrate the advantage of our modification, we investigate the effectiveness of our method on OOD generalization benchmarks: PACS Li et al. (2017), VLCS Fang et al. (2013), Office-Home Venkateswara et al. (2017). We follow the standard way to conduct the experiment, i.e., choosing one domain as the test domain and using the remaining domains as training domains, which is named the leave-one-domain-out protocol. As can be seen in Table 2, our $t$ -MoCo-v2 indicates significant improvement over MoCo-v2. Both experiments indicate our modification exhibits substantial enhancement for domain transfer and OOD generalization ability. Similar to domain transfer scenario, compared with the original MoCo-v2 model pre-trained with 800 epochs, $t$ -MoCo-v2 is better on all of the three datasets. More experiment details, including detailed comparisons, are in Appendix C.", + "bbox": [ + 169, + 496, + 826, + 650 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 670, + 310, + 686 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This work proposes a novel perspective that interprets SSCL methods as a type of SNE methods, which facilitates both deeper theoretical understandings and methodological guidelines for practical improvement. More interpretations of SSCL from preserving the distance between distributions can be found in Appendix B. Our analysis has limitations and the insights from SNE are not universally applicable for all SSCL methods, e.g., Zbontar et al. (2021); Yang et al. (2021) don't fit in our framework. However, this work is an interesting addition to existing theoretical works of SSCL and more investigations can be made along this path. While there are various extensions of the classic SNE, in this work, as a proof of concept, we mainly showcased practical improvements from $t$ -SNE. We expect more modifications can be developed by borrowing advances in the SNE literature, e.g., changing to $f$ -divergences (Im et al., 2018) or consider optimal transport Bunne et al. (2019); Salmona et al. (2021); Mialon et al. (2020). On the other hand, standard SNE methods can also borrow existing techniques in SSCL to improve their performance on more complicated data, e.g., incorporating data augmentations instead of or on top of pre-defined distances. In this sense, by choosing feature dimension to be 2, various SSCL methods can also be used as data visualization tools (Böhm et al., 2022; Damrich et al., 2022). Specifically on CIFAR-10, standard $t$ -SNE can barely reveal any clusters while our $t$ -SimCLR with $d_z = 2$ produces much more separation among different labels. More details can be found in Appendix C.7.", + "bbox": [ + 169, + 702, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 470, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 174, + 102, + 287, + 118 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019.", + "Sanjeev Arora, Wei Hu, and Pravesh K Kothari. An analysis of the t-sne algorithm for data visualization. In Conference On Learning Theory, pp. 1455-1462. PMLR, 2018.", + "Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. arXiv preprint arXiv:1902.09229, 2019.", + "Shahar Azulay, Edward Moroshko, Mor Shpigel Nacson, Blake E Woodworth, Nathan Srebro, Amir Globerson, and Daniel Soudry. On the implicit bias of initialization shape: Beyond infinitesimal mirror descent. In International Conference on Machine Learning, pp. 468-477. PMLR, 2021.", + "Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. In Advances in Neural Information Processing Systems, pp. 15535-15545, 2019.", + "Haoyue Bai, Rui Sun, Lanqing Hong, Fengwei Zhou, Nanyang Ye, Han-Jia Ye, S-H Gary Chan, and Zhenguo Li. Decaug: Out-of-distribution generalization via decomposed feature representation and semantic augmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 6705-6713, 2021.", + "Randall Balestriero and Yann LeCun. Contrastive and non-contrastive self-supervised learning recover global and local spectral embedding methods. arXiv preprint arXiv:2205.11508, 2022.", + "Jan Niklas Böhm, Philipp Berens, and Dmitry Kobak. Unsupervised visualization of image datasets using contrastive learning. arXiv preprint arXiv:2210.09879, 2022.", + "Charlotte Bunne, David Alvarez-Melis, Andreas Krause, and Stefanie Jegelka. Learning generative models across incomparable spaces. In International conference on machine learning, pp. 851-861. PMLR, 2019.", + "T Tony Cai and Rong Ma. Theoretical foundations of t-sne for visualizing high-dimensional clustered data. arXiv preprint arXiv:2105.07536, 2021.", + "Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. In Advances in Neural Information Processing Systems, 2020.", + "Kai Chen, Lanqing Hong, Hang Xu, Zhenguo Li, and Dit-Yan Yeung. Multisiam: Self-supervised multi-instance siamese representation learning for autonomous driving. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7546-7554, 2021.", + "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. arXiv preprint arXiv:2002.05709, 2020a.", + "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750-15758, 2021.", + "Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b.", + "Sebastian Damrich, Niklas Böhm, Fred A Hamprecht, and Dmitry Kobak. From $t$ -sne to umap with contrastive learning. In International Conference on Learning Representations, 2022.", + "Qishi Dong, Awais Muhammad, Fengwei Zhou, Chuanlong Xie, Tianyang Hu, Yongxin Yang, Sung-Ho Bae, and Zhenguo Li. Zood: Exploiting model zoo for out-of-distribution generalization. arXiv preprint arXiv:2210.09236, 2022.", + "T Erber and GM Hockney. Equilibrium configurations of n equal charges on a sphere. Journal of Physics A: Mathematical and General, 24(23):L1369, 1991." + ], + "bbox": [ + 171, + 125, + 828, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 475, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Linus Ericsson, Henry Gouk, and Timothy M Hospedales. How well do self-supervised models transfer? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5414-5423, 2021.", + "Chen Fang, Ye Xu, and Daniel N. Rockmore. Unbiased metric learning: On the utilization of multiple datasets and web images for softening bias. 2013 IEEE International Conference on Computer Vision, pp. 1657-1664, 2013.", + "Hongchao Fang, Sicheng Wang, Meng Zhou, Jiayuan Ding, and Pengtao Xie. Cert: Contrastive self-supervised learning for language understanding. arXiv preprint arXiv:2005.12766, 2020.", + "Tianyu Gao, Xingcheng Yao, and Danqi Chen. Simcse: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021.", + "John M Giorgi, Osvald Nitski, Gary D Bader, and Bo Wang. Declutr: Deep contrastive learning for unsupervised textual representations. arXiv preprint arXiv:2006.03659, 2020.", + "Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733, 2020.", + "Ishaan Gulrajani and David Lopez-Paz. In search of lost domain generalization. In International Conference on Learning Representations, 2021.", + "Thomas Hamm and Ingo Steinwart. Adaptive learning rates for support vector machines working on data with low intrinsic dimension. The Annals of Statistics, 49(6):3153-3180, 2021.", + "Jeff Z HaoChen, Colin Wei, Adrien Gaidon, and Tengyu Ma. Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34, 2021.", + "Jeff Z HaoChen, Colin Wei, Ananya Kumar, and Tengyu Ma. Beyond separability: Analyzing the linear transferability of contrastive representations to related subpopulations. arXiv preprint arXiv:2204.02683, 2022.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.", + "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9729-9738, 2020a.", + "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377, 2021.", + "Yue He, Zheyan Shen, and Peng Cui. Towards non-iid image classification: A dataset and baselines. Pattern Recognition, pp. 107383, 2020b.", + "Geoffrey Hinton and Sam T Roweis. Stochastic neighbor embedding. In NIPS, volume 15, pp. 833-840. CiteSeer, 2002.", + "Geoffrey E. Hinton, Simon Osindero, and Yee Whye Teh. A fast learning algorithm for deep belief nets. Neural Computation, 18:1527-1554, 2006.", + "R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670, 2018.", + "Weiran Huang, Mingyang Yi, and Xuyang Zhao. Towards the generalization of contrastive self-supervised learning. arXiv preprint arXiv:2111.00743, 2021.", + "Daniel Jiwoong Im, Nakul Verma, and Kristin Branson. Stochastic neighbor embedding under f-divergences. arXiv preprint arXiv:1811.01247, 2018." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 475, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wenlong Ji, Zhun Deng, Ryumei Nakada, James Zou, and Linjun Zhang. The power of contrast for feature learning: A theoretical analysis. arXiv preprint arXiv:2110.02473, 2021.", + "Li Jing, Pascal Vincent, Yann LeCun, and Yuandong Tian. Understanding dimensional collapse in contrastive self-supervised learning. arXiv preprint arXiv:2110.09348, 2021.", + "Alex Krizhevsky. Learning multiple layers of features from tiny images. University of Toronto, 2009.", + "David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In International Conference on Machine Learning, pp. 5815-5826. PMLR, 2021.", + "Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy M. Hospedales. Deeper, broader and artier domain generalization. 2017 IEEE International Conference on Computer Vision (ICCV), pp. 5543-5551, 2017.", + "Yunfan Li, Peng Hu, Zitao Liu, Dezhong Peng, Joey Tianyi Zhou, and Xi Peng. Contrastive clustering. In 2021 AAAI Conference on Artificial Intelligence (AAAI), 2021.", + "Zengyi Li, Yubei Chen, Yann LeCun, and Friedrich T. Sommer. Neural manifold clustering and embedding. ArXiv, abs/2201.10000, 2022.", + "George C Linderman and Stefan Steinerberger. Clustering with t-sne, provably. SIAM Journal on Mathematics of Data Science, 1(2):313-332, 2019.", + "Zhili Liu, Jianhua Han, Kai Chen, Lanqing Hong, Hang Xu, Chunjing Xu, and Zhenguo Li. Task-customized self-supervised pre-training with scalable dynamic routing. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 1854-1862, 2022.", + "Yao Lu, Jukka Corander, and Zhirong Yang. Doubly stochastic neighbor embedding on spheres. Pattern Recognition Letters, 128:100-106, 2019.", + "Jiajun Ma, Tianyang Hu, and Wenjia Wang. Deciphering the projection head: Representation evaluation self-supervised learning. arXiv preprint arXiv:2301.12189, 2023.", + "JBM Melisseneny. How different can colours be? maximum separation of points on a spherical octant. Proceedings of the Royal Society of London. Series A: Mathematical, Physical and Engineering Sciences, 454(1973):1499-1508, 1998.", + "Facundo Memoli. Gromov-wasserstein distances and the metric approach to object matching. Foundations of computational mathematics, 11(4):417-487, 2011.", + "Grégoire Mialon, Dexiong Chen, Alexandre d'Aspremont, and Julien Mairal. A trainable optimal transport embedding for feature aggregation. In International Conference on Learning Representations (ICLR), 2020.", + "Edward Moroshko, Blake E Woodworth, Suriya Gunasekar, Jason D Lee, Nati Srebro, and Daniel Soudry. Implicit bias in deep linear classification: Initialization scale vs training accuracy. Advances in neural information processing systems, 33:22182-22193, 2020.", + "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021.", + "Noam Razin and Nadav Cohen. Implicit regularization in deep learning may not be explainable by norms. Advances in neural information processing systems, 33:21174-21187, 2020.", + "Antoine Salmona, Julie Delon, and Agnès Desolneux. Gromov-wasserstein distances between gaussian distributions. arXiv preprint arXiv:2104.07970, 2021." + ], + "bbox": [ + 171, + 102, + 826, + 926 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 475, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Nikunj Saunshi, Jordan Ash, Surbhi Goel, Dipendra Misra, Cyril Zhang, Sanjeev Arora, Sham Kakade, and Akshay Krishnamurthy. Understanding contrastive learning requires incorporating inductive biases. arXiv preprint arXiv:2202.14037, 2022.", + "Pieter Merkus Lambertus Tammes. On the origin of number and arrangement of the places of exit on the surface of pollen-grains. Recueil des travaux botaniques nederlandais, 27(1):1-84, 1930.", + "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. arXiv preprint arXiv:1906.05849, 2019.", + "Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? arXiv preprint arXiv:2005.10243, 2020.", + "Christopher Tosh, Akshay Krishnamurthy, and Daniel Hsu. Contrastive learning, multi-view redundancy, and linear models. arXiv preprint arXiv:2008.10150, 2020.", + "Laurens Van Der Maaten. Learning a parametric embedding by preserving local structure. In Artificial intelligence and statistics, pp. 384-391. PMLR, 2009.", + "Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008.", + "Hemanth Venkateswara, Jose Eusebio, Shayok Chakraborty, and Sethuraman Panchanathan. Deep hashing network for unsupervised domain adaptation. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5385-5394, 2017.", + "Vikas Verma, Thang Luong, Kenji Kawaguchi, Hieu Pham, and Quoc Le. Towards domain-agnostic contrastive learning. In International Conference on Machine Learning, pp. 10530–10541. PMLR, 2021.", + "Haonan Wang, Jieyu Zhang, Qi Zhu, and Wei Huang. Augmentation-free graph contrastive learning. arXiv preprint arXiv:2204.04874, 2022.", + "Tongzhou Wang and Phillip Isola. Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In International Conference on Machine Learning, pp. 9929-9939. PMLR, 2020.", + "Colin Wei, Kendrick Shen, Yining Chen, and Tengyu Ma. Theoretical analysis of self-training with deep networks on unlabeled data. arXiv preprint arXiv:2010.03622, 2020.", + "Zixin Wen and Yuanzhi Li. Toward understanding the feature learning process of self-supervised contrastive learning. arXiv preprint arXiv:2105.15134, 2021.", + "Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3733-3742, 2018.", + "Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian Khabsa, Fei Sun, and Hao Ma. Clear: Contrastive learning for sentence representation. arXiv preprint arXiv:2012.15466, 2020.", + "Yuanmeng Yan, Rumei Li, Sirui Wang, Fuzheng Zhang, Wei Wu, and Weiran Xu. Consert: A contrastive framework for self-supervised sentence representation transfer. arXiv preprint arXiv:2105.11741, 2021.", + "Ceyuan Yang, Zhirong Wu, Bolei Zhou, and Stephen Lin. Instance localization for self-supervised detection pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3987-3996, 2021.", + "Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. Filip: Fine-grained interactive language-image pre-training. arXiv preprint arXiv:2111.07783, 2021.", + "Chun-Hsiao Yeh, Cheng-Yao Hong, Yen-Chi Hsu, Tyng-Luh Liu, Yubei Chen, and Yann LeCun. Decoupled contrastive learning. arXiv preprint arXiv:2110.06848, 2021." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 475, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. arXiv preprint arXiv:2103.03230, 2021.", + "Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning (still) requires rethinking generalization. Communications of the ACM, 64(3):107-115, 2021.", + "Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. arXiv preprint arXiv:1710.09412, 2017.", + "Xuyang Zhao, Tianqi Du, Yisen Wang, Jun Yao, and Weiran Huang. Arcl: Enhancing contrastive learning with augmentation-robust representations. In International Conference on Learning Representations (ICLR), 2023.", + "Roland S Zimmermann, Yash Sharma, Steffen Schneider, Matthias Bethge, and Wieland Brendel. Contrastive learning inverts the data generating process. arXiv preprint arXiv:2102.08850, 2021." + ], + "bbox": [ + 171, + 102, + 828, + 311 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 475, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/c82662cc8e0904e93a0be15d4fd0d1b398b68fec4cc2d96c32fe8c78a7dabacf.jpg", + "image_caption": [ + "Figure A.3: Cosine similarity heat map of learned features from SimCLR on CIFAR-10 dataset. The darker the color, the larger the similarity." + ], + "image_footnote": [], + "bbox": [ + 336, + 99, + 591, + 277 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A TECHNICAL DETAILS", + "text_level": 1, + "bbox": [ + 171, + 345, + 390, + 361 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.1 IMPLICITBIASOFSIMCLRONCIFAR-10.", + "text_level": 1, + "bbox": [ + 171, + 378, + 511, + 392 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure A.3 plots the cosine similarity heat map of learned features from SimCLR on CIFAR-10 dataset. To calculate the similarity of class A (figures denoted by $a_i$ ) to class B (figures denoted by $b_i$ ), we first calculate the mean of $b_i$ as $\\bar{b}$ . Then, we sum up $\\sum_{i} \\sin(a_i, \\bar{b})$ and plot is with colors. Hence, the similarity matrix shown in Figure A.3 is not symmetric.", + "bbox": [ + 169, + 404, + 826, + 463 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.2 PROOF OF PROPOSITION 3.2", + "text_level": 1, + "bbox": [ + 171, + 479, + 413, + 492 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Recall the domain-agnostic data augmentation process. For any $\\boldsymbol{x}_i$ , the probability density of having $t \\in \\mathbb{R}^d$ as its augmented point can be characterized as", + "bbox": [ + 169, + 505, + 823, + 532 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\nP _ {\\boldsymbol {t} | \\boldsymbol {x} _ {i}} = \\mathbb {P} (\\boldsymbol {x} _ {i} \\text {a n d} \\boldsymbol {x} _ {i} ^ {\\prime} = \\boldsymbol {t} \\text {f o r a p o s i t i v e p a i r} | \\boldsymbol {x} _ {i}) = \\phi (\\boldsymbol {t} - \\boldsymbol {x} _ {i}).\n$$\n", + "text_format": "latex", + "bbox": [ + 299, + 542, + 696, + 560 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For isotropic Gaussian densities with mean 0 and covariance matrix $\\sigma^2\\mathbf{I}$ , $\\phi (\\pmb {t} - \\pmb {x}_i)\\propto \\exp (-\\| \\pmb {t} - \\pmb {x}_i\\| _2^2 /2\\sigma^2)$ , which is monotonic with the $l_{2}$ distance between $\\pmb{t}$ and $\\pmb{x}_i$", + "bbox": [ + 169, + 568, + 823, + 599 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.3 INVESTIGATIONS ON $C(f)$", + "text_level": 1, + "bbox": [ + 171, + 614, + 405, + 631 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figures A.4 and A.5 illustrate the evolution of different complexity measurements during the training process under the Gaussian mixture setting and the CIFAR-10 respectively.", + "bbox": [ + 169, + 642, + 823, + 671 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In the Gaussian mixture setting, the feature extractor is a fully connected ReLU network. Besides $C(f)$ , we also evaluate the popular sum of squared weights. The observations on SimCLR are listed as below:", + "bbox": [ + 169, + 676, + 826, + 705 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The expected Lipschitz constant $C(f)$ is small in initialization. It first increases (till around 100 iterations) and then consistently decreases. This empirically supports the implicit bias towards minimizing $C(f)$ .", + "- $C(f)$ and the sum of squared weights share very similar patterns.", + "- The SNE loss is non-increasing, as if we are doing stochastic neighbor embedding using $l_{2}$ -distance." + ], + "bbox": [ + 215, + 717, + 823, + 813 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In the CIFAR-10 case, the feature extractor is ResNet-18 plus a fully-connected projection layer. The output from ResNet-18 is usually called representation (512 dimensional) and is utilized for downstream tasks while the projection (128 dimension) is used for training. Such a representation-projection set up is common in SSCL. Ma et al. (2023) aimed to decipher the projection head and revealed that the projection feature tends to be more uniformly distributed while the representation feature exhibits stronger alignment. Besides $C(f)$ , we also evaluate the $l_{2}$ -norm of the representation. The observations for SimCLR and $t$ -SimCLR on CIFAR-10 are summarized as below:", + "bbox": [ + 169, + 825, + 826, + 924 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/ee2247776106f773818e3694de0b475733f5a68626668795dd121ba375cda8e7.jpg", + "image_caption": [ + "Figure A.4: Empirical evaluation on the complexity of the learned feature mapping during training under the Gaussian mixture setting. Two complexity measurements are considered, i.e., $C(f)$ as in (3.3) and the SNE loss as in (2.2). The SNE loss here only serves as in indicator for how well the pairwise distances are preserved. The training objective is the standard InfoNCE loss. The SNE loss decreases quickly until in the first 100 iterations and then stays flat." + ], + "image_footnote": [], + "bbox": [ + 323, + 125, + 684, + 311 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $C(f)$ for the projection layer shares similar patterns as in the Gaussian mixture case, first increase and then decreases. However, $C(f)$ for the representation layer monotonically decreases.", + "- $C(f)$ for the projection layer and the $l_{2}$ -norm in the representation layer share almost identical patterns.", + "- Comparing SimCLR, both the calculated $C(f)$ and $l_{2}$ -norm are much smaller for $t$ -SimCLR." + ], + "bbox": [ + 215, + 424, + 823, + 530 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In conclusion, on one hand, our empirical results demonstrate that the complexity of the feature extractor $C(f)$ does decrease during training and seem to be implicitly minimized. On the other hand, its trend is shared with other more popularly used complexity measurements.", + "bbox": [ + 169, + 541, + 826, + 584 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.4 PROOF OF COROLLARY 3.6", + "text_level": 1, + "bbox": [ + 171, + 599, + 406, + 614 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In this section, we illustrate with rigor how the hypothesized implicit bias can give rise to structure-preserving property of SSCL. Corollary 3.6 states that minimizing the (Lipschitz) complexity of the feature mapping will also result in the best match between $P$ and $Q$ (under permutation). To provide more theoretical insight, we present the following lemma in the simpler vector-matching case.", + "bbox": [ + 169, + 625, + 826, + 680 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma A.1. Let $0 < x_{1} < \\dots < x_{m}$ and $0 < y_{1} < \\dots < y_{m}$ be two real-valued sequences, normalized such that $\\sum_{i=1}^{m} x_{i}^{2} = \\sum_{i=1}^{m} y_{i}^{2} = 1$ . Consider a permutation $\\pi$ of $\\{1, \\dots, m\\}$ and denote all such permutations as $T$ . Then", + "bbox": [ + 169, + 684, + 823, + 726 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\pi \\in T} {\\operatorname {a r g m i n}} \\sum_ {i = 1} ^ {m} \\frac {y _ {\\pi (i)}}{x _ {i}} = \\underset {\\pi \\in T} {\\operatorname {a r g m i n}} \\sum_ {i = 1} ^ {m} \\left(x _ {i} - y _ {\\pi (i)}\\right) ^ {2} := \\pi^ {*},\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 729, + 665, + 770 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $\\pi^{*}(i) = i$ for all $i = 1,\\dots ,m$", + "bbox": [ + 171, + 773, + 393, + 787 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. By the rearrangement inequality, we have", + "bbox": [ + 171, + 803, + 493, + 818 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {m} \\frac {y _ {\\pi (i)}}{x _ {i}} \\geq \\sum_ {i = 1} ^ {m} \\frac {y _ {i}}{x _ {i}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 433, + 821, + 563, + 859 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Similarly,", + "bbox": [ + 171, + 864, + 240, + 878 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {i = 1} ^ {m} \\left(x _ {i} - y _ {\\pi (i)}\\right) ^ {2} = \\sum_ {i = 1} ^ {m} x _ {i} ^ {2} + \\sum_ {i = 1} ^ {m} y _ {i} ^ {2} - 2 \\sum_ {i = 1} ^ {m} x _ {i} \\cdot y _ {\\pi (i)} \\geq 2 - 2 \\sum_ {i = 1} ^ {m} x _ {i} \\cdot y _ {i}.\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 882, + 712, + 921 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/0ea42d2746acd11d67fc60fd568b58c37e8b9f9e8f6c2abc3e15cb4d9e33226e.jpg", + "image_caption": [ + "(a) SimCLR on CIFAR-10." + ], + "image_footnote": [], + "bbox": [ + 282, + 122, + 736, + 279 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ddf39eecf32ac6ecfeefc56f7138a32319b57f38df1543347a566b8405e5afbf.jpg", + "image_caption": [ + "(b) $t$ -SimCLR on CIFAR-10." + ], + "image_footnote": [], + "bbox": [ + 277, + 338, + 733, + 496 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/a415ec116f011833886b5c2061399667a41ae2137ad8d707a995d43e79618e66.jpg", + "image_caption": [ + "Figure A.5: Empirical evaluation on the complexity of the learned feature mapping during training on CIFAR-10. Two complexity measurements are considered, i.e., $C(f)$ as in (3.3) and $l_{2}$ -norm. Specifically, we calculate the expected Lipschitz constant on both the representation layer (512-dimensional) and the projection layer (128-dimensional). Figure (a) and (b) show the trends (along the 200 training epochs) for SimCLR and $t$ -SimCLR respectively." + ], + "image_footnote": [], + "bbox": [ + 807, + 638, + 823, + 650 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lemma A.1 gives a vector-version illustration of our Corollary 3.6, stating that minimizing the expected derivative (to zero) of the mapping function $f$ , i.e., $\\sum_{i}f(x_{i}) / x_{1}$ leads to preserving the norm difference of the input vector and output vector.", + "bbox": [ + 169, + 671, + 823, + 715 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Next, we provide the proof of Theorem 3.5.", + "bbox": [ + 171, + 720, + 455, + 736 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof of Theorem 3.5. Straightforwardly, we can write", + "bbox": [ + 171, + 753, + 529, + 770 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\bar {P} - Q ^ {\\pi} \\right\\| _ {F} = \\sum_ {i \\neq j} \\left(\\frac {1}{p _ {i j}} + q _ {\\pi (i) \\pi (j)}\\right) ^ {2} \\\\ = \\sum_ {i \\neq j} \\frac {1}{p _ {i j} ^ {2}} + \\sum_ {i \\neq j} q _ {\\pi (i) \\pi (j)} ^ {2} + 2 \\sum_ {i \\neq j} \\frac {q _ {\\pi (i) \\pi (j)}}{p _ {i j}} \\\\ = 2 C _ {1} (P, Q ^ {\\pi}) + \\sum_ {i \\neq j} \\frac {1}{p _ {i j} ^ {2}} + \\sum_ {i \\neq j} q _ {i j} ^ {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 779, + 671, + 898 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Thus, minimizing $C_1(P, Q^\\pi)$ also minimizes $\\| \\bar{P} - Q^\\pi \\|_F$ .", + "bbox": [ + 171, + 907, + 547, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 473, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Theorem 3.5 is a straightforward generalization of Lemma A.1. Next, we provide proof for Corollary 3.6, restated below.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Proof of Corollary 3.6. Recall the SimCLR loss $L_{\\mathrm{InfoNCE}} = \\frac{1}{2n}\\sum_{i = 1}^{n}(l(\\pmb{x}_i,\\pmb{x}_i')) + l(\\pmb{x}_i',\\pmb{x}_i))$ , where", + "bbox": [ + 169, + 157, + 810, + 176 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nl (\\pmb {x} _ {i}, \\pmb {x} _ {i} ^ {\\prime}) = - \\log \\frac {\\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}) , f (\\pmb {x} _ {i} ^ {\\prime})) / \\tau)}{\\sum_ {x \\in \\mathcal {D} _ {n} \\cup \\mathcal {D} _ {n} ^ {\\prime} \\setminus \\{\\pmb {x} _ {i} \\}} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}) , f (\\pmb {x})) / \\tau)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 185, + 699, + 223 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Without loss of generality, let $\\tau = 1$ . Notice that $l(\\pmb{x}_i, \\pmb{x}_i')$ is monotonically decreasing as $\\mathrm{sim}(f(\\pmb{x}_i), f(\\pmb{x}_i'))$ increases, due to the monotonicity of function $\\frac{x}{x + c}$ with respect to $x > 0$ for any $c > 0$ . Hence, in order for $L_{\\mathrm{InfoNCE}}$ to be minimized, perfect alignment is required, i.e., $f(\\pmb{x}_i) = f(\\pmb{x}_i')$ for any $i = 1, \\dots, n$ .", + "bbox": [ + 169, + 232, + 826, + 292 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "With perfect alignment achieved, $L_{\\mathrm{InfoNCE}}$ only concerns the pairwise similarity between negative samples $f(\\pmb{x}_i)$ 's, which can be simplified as $L_{\\mathrm{InfoNCE}} \\geq L_{\\mathrm{uniform}}$ where", + "bbox": [ + 169, + 297, + 823, + 328 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L _ {\\text {u n i f o r m}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} - \\log \\frac {e}{e + \\sum_ {j \\neq i} \\exp (\\sin (f (\\boldsymbol {x} _ {i}) , f (\\boldsymbol {x} _ {j})))} \\\\ \\geq \\log \\left(\\frac {1}{n} \\sum_ {i = 1} ^ {n} \\left(1 + \\frac {1}{e} \\sum_ {j \\neq i} \\exp (\\sin (f (\\boldsymbol {x} _ {i}), f (\\boldsymbol {x} _ {j})))\\right)\\right) \\\\ \\geq \\log \\left(1 + \\frac {1}{n \\cdot e} \\sum_ {1 \\leq i \\neq j \\leq n} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}), f (\\pmb {x} _ {j})))\\right). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 337, + 699, + 479 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$L_{\\mathrm{uniform}}$ can be minimized by mapping $\\pmb{x}_i$ 's as distant as possible, hence the connection to Tammas problem and the uniformity principle.", + "bbox": [ + 169, + 494, + 823, + 523 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "With sufficient capacity of the feature mapping $f$ , the SimCLR loss can be minimized to its (empirical) global minima. However, such $f$ is not unique since $L_{\\mathrm{InfoNCE}}$ is invariant to permutations of mapping relationships from $x_i$ to $f(x_i)$ . If $f_n^*$ further minimizes $C(f)$ on the sample level, i.e.,", + "bbox": [ + 169, + 530, + 825, + 574 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nf_{n}^{*}:= \\operatorname *{argmin}_{f}C_{n}(f) = \\operatorname *{argmin}_{f}\\sum_{1\\leq i\\neq j\\leq n}\\frac{\\|f(\\boldsymbol{x}_{i}) - f(\\boldsymbol{x}_{j})\\|_{2}}{\\|\\boldsymbol{x}_{i} - \\boldsymbol{x}_{j}\\|_{2}},\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 583, + 689, + 622 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Then, $f_{n}^{*}$ also solves a type of SNE problem with uniformity constraint (3.4) as stated in Theorem 3.5. To see this, if we define $q_{ij} = -\\|f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2$ and $p_{ij} = -\\|x_i - x_j\\|_2$ , which is reasonable since the larger the distance, the smaller the similarity, we can directly apply the results in Theorem 3.5.", + "bbox": [ + 169, + 632, + 826, + 676 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/9e37b82f05edc2eee2998cb2961fe037c95f842d678bb12ca52eb553ca025bb9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 681, + 823, + 694 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Remark A.2. As can be seen from Theorem 3.5 and the proof of Corollary 3.6, we showcase the relationship between minimizing $C(f)$ and structure preserving property by considering a special SNE problem, where the pairwise similarity is not modeled by Gaussian as standard, hence the word \"resembling\" in Corollary 3.6. Although $q_{ij} = -\\| f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2$ is unorthodox, it is reasonable since the larger the distance, the smaller the similarity. If we consider the SNE method as in Hinton et al. (2006), our proof does not go through directly and demands more complicated analysis. However, our results are still valid in connecting the complexity of the feature map to the pairwise similarity matching.", + "bbox": [ + 169, + 714, + 826, + 814 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Our statement in Corollary 3.6 requires perfect alignment or perfect uniformity. When the assumptions are not perfectly met, we can still obtain insights for the resulting feature mapping. Alignment and uniformity (Wang & Isola, 2020) is not the whole story of contrastive learning, and our identified structure-preserving property implicitly induced by complexity minimization provides an other angle of the learning process. From this perspective, contrastive learning can be thought of as a combination of alignment and SNE with uniformity constraint. In Figure A.3, while obtaining approximate alignment and uniformity, the feature mapping also preserves the relative relationships of the clusters (labels).", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.5 ALIGNMENT AND UNIFORMITY OF T-SIMCLR", + "text_level": 1, + "bbox": [ + 171, + 103, + 532, + 118 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Due to the change of training objective, we may want to reevaluate the properties of the learned feature from $t$ -SimCLR. We will show that alignment still hold while uniformity is changed (to infinity).", + "bbox": [ + 169, + 128, + 823, + 159 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Let us consider a compact region $\\Omega \\subset \\mathbb{R}^d$ and $\\pmb{x}_i \\in \\Omega$ . Let $t$ be the transformation such that the augmented data point $\\pmb{x}_i' = t(\\pmb{x}_i)$ is still in $\\Omega$ . Wang & Isola (2020) showed that the contrastive loss can be decomposed into the alignment loss and the uniformity loss. Zimmermann et al. (2021) further showed that the contrastive loss converges to the cross-entropy between latent distributions, where the underlying latent space is assumed to be uniform, and the positive pairs are specified to be an exponential distribution. In this section, we show a parallel result, which states that in the population level, the $t$ -SNE loss is the cross-entropy between two distributions of generating positive pairs.", + "bbox": [ + 169, + 165, + 826, + 263 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Theorem A.3. Let $H(\\cdot, \\cdot)$ be the cross entropy between distributions. Let $p(x)$ be the density of $x$ , $p(\\cdot | x)$ be the conditional density of generating a positive pair, and define", + "bbox": [ + 169, + 265, + 826, + 294 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nq _ {f} \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) = C _ {f} (\\boldsymbol {x}) ^ {- 1} \\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}}, \\text {w i t h} C _ {f} (\\boldsymbol {x}) = \\int_ {\\Omega} \\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} ^ {\\prime}.\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 296, + 779, + 330 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Then, we have", + "bbox": [ + 171, + 330, + 269, + 344 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} (H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) = L _ {a} (f) + L _ {u} (f), \\tag {A.1}\n$$\n", + "text_format": "latex", + "bbox": [ + 343, + 348, + 823, + 364 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "which corresponds to the population-level $t$ -SimCLR loss where", + "bbox": [ + 169, + 367, + 593, + 382 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nL _ {a} = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\mathbb {E} _ {\\boldsymbol {x} \\sim p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right)} \\log \\left(1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 383, + 666, + 401 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nL _ {u} = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\log \\mathbb {E} _ {\\widetilde {\\boldsymbol {x}} \\sim p (\\widetilde {\\boldsymbol {x}})} \\big (1 + \\| f (\\boldsymbol {x}) - f (\\widetilde {\\boldsymbol {x}}) \\| _ {2} ^ {2} \\big) ^ {- 1}.\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 402, + 661, + 422 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Proof. Note that", + "bbox": [ + 171, + 435, + 285, + 449 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = - \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} + \\log C _ {f} (\\boldsymbol {x}) \\\\ = \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log (1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} - \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log (p (\\boldsymbol {x} ^ {\\prime})) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} + \\log \\int_ {\\Omega} \\frac {p (\\boldsymbol {x} ^ {\\prime})}{1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}} \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\\\ = \\int_ {\\Omega} p (\\pmb {x} ^ {\\prime} | \\pmb {x}) \\log (1 + \\| f (\\pmb {x}) - f (\\pmb {x} ^ {\\prime}) \\| _ {2} ^ {2}) \\mathrm {d} \\pmb {x} ^ {\\prime} - \\int_ {\\Omega} p (\\pmb {x} ^ {\\prime} | \\pmb {x}) \\log (p (\\pmb {x} ^ {\\prime})) \\mathrm {d} \\pmb {x} ^ {\\prime} + \\log \\mathbb {E} _ {\\pmb {x} ^ {\\prime} \\sim p (\\pmb {x} ^ {\\prime})} (1 + \\| f (\\pmb {x}) - f (\\pmb {x} ^ {\\prime}) \\| _ {2} ^ {2}) ^ {- 1}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 452, + 893, + 574 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Taking expectation with respect to $x$ leads to", + "bbox": [ + 171, + 575, + 464, + 589 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\mathbb {E} _ {\\boldsymbol {x} ^ {\\prime} \\sim p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x})} \\log (1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}) + \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\log \\mathbb {E} _ {\\widetilde {\\boldsymbol {x}} \\sim p (\\widetilde {\\boldsymbol {x}})} (1 + \\| f (\\boldsymbol {x}) - f (\\widetilde {\\boldsymbol {x}}) \\| _ {2} ^ {2}) ^ {- 1} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ = L _ {a} (f) + L _ {u} (f) - C _ {p}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 592, + 805, + 683 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 171, + 685, + 215, + 696 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nC _ {p} = \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\mathrm {d} \\boldsymbol {x} = \\int_ {\\Omega} \\int_ {\\Omega} p \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\mathrm {d} \\boldsymbol {x}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 698, + 746, + 731 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "does not depend on $f$ .", + "bbox": [ + 171, + 732, + 316, + 747 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = \\int_ {\\Omega} p (\\boldsymbol {x}) \\frac {1}{p (\\boldsymbol {x})} \\int_ {\\Omega} p (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} \\frac {p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} d \\boldsymbol {x} ^ {\\prime} \\\\ = \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} \\frac {p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} d \\boldsymbol {x} ^ {\\prime}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 750, + 696, + 907 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/309c05778265ac413422425956d2a2957455019a721cba777fcfa71a94d93a31.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 910, + 825, + 922 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In Theorem A.3, $L_{a}$ is the alignment loss and $L_{u}$ is the uniformity loss. The decomposition is much more natural for $t$ -SimCLR as opposed to that in $L_{\\mathrm{InfoNCE}}$ , mainly due to the change from conditional to joint distribution when modeling the pairwise similarity. Furthermore, if the $t$ -SimCLR loss is minimized, we must have $p(\\cdot | \\boldsymbol{x}) = q_{f}(\\cdot | \\boldsymbol{x})$ , provided $f$ has sufficient capacity. Note that if $p(\\cdot | \\boldsymbol{x}) = q_{f}(\\cdot | \\boldsymbol{x})$ , then $P_{j|i}$ and $Q_{j|i}$ are perfectly matched, which indicates that we obtain a perfect neighbor embedding.", + "bbox": [ + 169, + 103, + 826, + 176 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Theorem A.3 implies that the optimal feature mapping $f^{*}$ satisfies", + "bbox": [ + 169, + 180, + 602, + 196 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\np (\\cdot | \\boldsymbol {x}) = q _ {f ^ {*}} (\\cdot | \\boldsymbol {x}),\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 199, + 558, + 215 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "which further implies that for any $\\pmb{x} \\in \\Omega$", + "bbox": [ + 169, + 218, + 437, + 233 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} C _ {f ^ {*}} (\\boldsymbol {x}) ^ {- 1} \\frac {p (\\boldsymbol {x} ^ {\\prime})}{1 + \\| f ^ {*} (\\boldsymbol {x}) - f ^ {*} (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}} \\propto C (\\boldsymbol {x}) ^ {- 1} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\\\ \\Leftrightarrow C _ {f ^ {*}} \\left(\\boldsymbol {x}\\right) ^ {- 1} \\frac {1}{1 + \\left\\| f ^ {*} (\\boldsymbol {x}) - f ^ {*} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} \\propto C (\\boldsymbol {x}) ^ {- 1} \\frac {p \\left(\\boldsymbol {x} , \\boldsymbol {x} ^ {\\prime}\\right)}{p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}, \\tag {A.2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 237, + 823, + 305 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $C(\\pmb{x}) = \\int p(\\pmb{x}'|\\pmb{x})\\mathrm{d}\\pmb{x}'$ . Unlike the usual normalized SimCLR, $t$ -SNE does not assume any special structure on $f$ (e.g., $\\| f\\| _2 = 1$ ), thus $f$ can go to infinity. Comparing to the finite sample $t$ -SimCLR loss, the population version is trickier to analyze. This is because for a given point $\\pmb{x}'$ , it can be an augmented sample of some $\\pmb{x}$ (with probability $p(\\pmb{x}'|\\pmb{x})$ ), or a negative sample of $\\pmb{x}$ (when we treat $\\pmb{x}'$ as another sample point). This reflects the essential difficulty between population and finite samples in contrastive learning, not only for $t$ -SimCLR.", + "bbox": [ + 169, + 306, + 823, + 393 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For clustered data, (A.2) provides two important messages, provided that the augmentation is not too extreme and the augmented sample $\\pmb{x}^{\\prime}$ stays in the same cluster as the original $\\pmb{x}$ . On one hand, when $\\pmb{x}_1$ and $\\pmb{x}_2$ belongs to different clusters, the joint density $p(\\pmb{x} = \\pmb{x}_1, \\pmb{x}^{\\prime} = \\pmb{x}_2)$ will be very small, close to zero, which indicates that $\\| f^{*}(\\pmb{x}_{1}) - f^{*}(\\pmb{x}_{2}) \\|_{2}$ is very large, tending to infinity. On the other hand, for $\\pmb{x}_1$ and $\\pmb{x}_2$ belonging to the same cluster, $p(\\pmb{x} = \\pmb{x}_1, \\pmb{x}^{\\prime} = \\pmb{x}_2)$ will be relatively large. Hence, the features of the same cluster will stay close. Overall, we will observe similar clustered structure in the feature space. This is confirmed in the Gaussian mixture setting in Figure 1(c), in which case, the problem can be oversimplified as mapping 5 points in $\\mathbb{R}^2$ to the unit-circle.", + "bbox": [ + 169, + 398, + 826, + 512 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B CONNECTION TO DISTANCE BETWEEN DISTRIBUTIONS", + "text_level": 1, + "bbox": [ + 171, + 530, + 656, + 546 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Through the lens of stochastic neighbor embedding, the feature learning process of SSCL methods can be seen as minimizing certain \"distances\" between distributions in different dimensions. Ideally, the feature should preserve the distributional information about the data. Since the data and the feature do not lie in the same metric space, quantitatively measuring their distributional distance is difficult. Fortunately, there are existing tools we can utilize, specifically, Gromov-Wasserstein distance (Mémoli, 2011; Salmona et al., 2021).", + "bbox": [ + 169, + 561, + 826, + 645 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Let $\\mathcal{X}$ , $\\mathcal{Z}$ be two Polish spaces, each endowed respectively with probability measures $p_x$ and $p_z$ . Given two measurable cost functions $c_x: \\mathcal{X} \\times \\mathcal{X} \\to \\mathbb{R}$ , $c_z: \\mathcal{Z} \\times \\mathcal{Z} \\to \\mathbb{R}$ , and $D: \\mathbb{R} \\times \\mathbb{R} \\to \\mathbb{R}$ , the Gromov-Wasserstein distance can be defined as", + "bbox": [ + 169, + 651, + 826, + 693 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nG W _ {p} (p _ {x}, p _ {z} | c _ {x}, c _ {z}) := \\left(\\inf _ {\\pi \\in \\prod (p _ {x}, p _ {z})} \\int_ {\\mathcal {X} ^ {2} \\times \\mathcal {Z} ^ {2}} D (c _ {x} (x, x ^ {\\prime}), c _ {z} (z, z ^ {\\prime})) ^ {p} d \\pi (x, z) d \\pi (x ^ {\\prime}, z ^ {\\prime})\\right) ^ {1 / p},\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 696, + 792, + 733 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $\\prod(p_x,p_z)$ denotes all the joint distributions in $\\mathcal{X}\\times \\mathcal{Z}$ such that the marginals are $p_x$ and $p_z$ . Typically, $D(c_{x},c_{z})$ is chosen to be $|c_{x} - c_{z}|$ and $c_{x}(x,x^{\\prime})$ is usually chosen to be $\\| x - x^{\\prime}\\| _p$ . The key idea of the Gromov-Wasserstein distance to circumvent the dimension mismatch is to change from comparing marginal distribution to pairwise distributions, which is very similar to the SNE objective. Consider Monge's formulation of the optimal transportation problem and let $z = f(x)$ . By choosing $c_{z}(z_{i},z_{j}) = \\log (\\widetilde{Q}_{j|i})$ with $\\widetilde{Q}$ specified as in (3.1), $c_{x}(x_{i},x_{j}) = P_{j|i}$ with $\\widetilde{P}$ specified as in (3.2) and letting $D(c_{x},c_{z}) = c_{x}(\\log (c_{x}) - \\log (c_{z}))$ , we have", + "bbox": [ + 169, + 734, + 826, + 838 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nG W _ {1} \\left(p _ {x}, p _ {f (x)}\\right) \\leq \\mathbb {E} _ {x, x ^ {\\prime}} \\left(D \\left(c _ {x} \\left(x, x ^ {\\prime}\\right), c _ {z} \\left(f (x), f \\left(x ^ {\\prime}\\right)\\right)\\right)\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 840, + 679, + 858 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where the right hand side recovers the expected InfoNCE loss. Hence, the SNE perspective can also be viewed as minimizing the Gromov-Wasserstein distance between $p_z$ and $p_x$ .", + "bbox": [ + 169, + 859, + 823, + 890 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "It is worth noting that such an interpretation only relates to contrastive learning, not including generative-based self-supervised learning methods such as Masked AutoEncoder (MAE) (He et al., 2021).", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/004616d71e2f6e1680a34a339787f5e53d64b43daa1aa4d64ce960759a7b7662.jpg", + "image_caption": [ + "Figure C.6: Nearest neighbor test accuracy vs. training epochs. SimCLR and $t$ -SimCLR share similar trends and convergence speed." + ], + "image_footnote": [], + "bbox": [ + 341, + 119, + 643, + 276 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/e879bb5c0ecaa514b123a11a38890e1aa2a909864a3cbe9384ea45dbf61f68b4.jpg", + "image_caption": [ + "Figure C.7: The histogram of IoUs for 1000 constructed positive pairs in CIFAR-10. The empirical distribution is almost symmetric around 0.5." + ], + "image_footnote": [], + "bbox": [ + 354, + 363, + 630, + 522 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C EXPERIMENT DETAILS", + "text_level": 1, + "bbox": [ + 171, + 599, + 395, + 614 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.1 CIFAR-10 SETTINGS", + "text_level": 1, + "bbox": [ + 171, + 630, + 366, + 643 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "CIFAR-10 (Krizhevsky, 2009) is a colorful image dataset with 50000 training samples and 10000 test samples from 10 categories. We use ResNet-18 (He et al., 2016) as the feature extractor, and the other settings such as projection head all follow the original settings of SimCLR (Chen et al., 2020a). To evaluate the quality of the features, we follow the KNN evaluation protocol (Wu et al., 2018). which computes the cosine similarities in the embedding space between the test image and its nearest neighbors, and make the prediction via weighted voting. We train each model with batch size of 256 and 200 epochs for quicker evaluation. For $t$ -SimCLR, without specifying otherwise, we grid search the $t_{df}$ and $\\tau$ with range $\\{1, 2, 5, 10\\}$ and $\\{1, 2, 5, 10\\}$ respectively.", + "bbox": [ + 169, + 655, + 826, + 768 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Ablation of training epochs We also run the SimCLR and $t$ -SimCLR experiments in the more standard 1000 epochs setting. For SimCLR, we use batch size of 512, learning rate of 0.3, temperature of 0.7, and weight dacay of 0.0001. For $t$ -SimCLR, we use batch size of 512, learning rate of 0.8, temperature of 10, weight dacay of 0.0002, and $t_{df} = 5$ . The nearest neighbor accuracy for SimCLR is $87.2\\%$ vs. that for $t$ -SimCLR is $88.8\\%$ .", + "bbox": [ + 169, + 782, + 826, + 852 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.2 IMAGE AUGMENTATION", + "text_level": 1, + "bbox": [ + 171, + 869, + 382, + 883 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "When processing images, several popular augmentations are usually adopted (following the setting in SimCLR Chen et al. (2020a)), e.g., random resized crop (crops a random portion of image and resize it", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/23a127fd23e914b5ea03435dbae0b73c8d6edafe506a41ac6bede17bad9bc060.jpg", + "image_caption": [ + "Figure C.8: Extension on Figure 2(b). Nearest neighbor classification accuracy for SimCLR vs. $t$ -SimCLR on both CIFAR-10 (in-distribution) and CIFAR-100 (out-of-distribution) using different feature dimensions." + ], + "image_footnote": [], + "bbox": [ + 318, + 122, + 689, + 313 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "to the original size), horizontal flip, color jitter (randomly change the brightness, contrast, saturation and hue of an image). To illustrate the natural weighting scheme in Section 4.1, we considered random resized crop and specifies the weights by the IoU (intersection over union) of the positive pair. In particular, two augmented images are created from an anchor image. Each augmentation crops a rectangular region of the image, denoted by $r_1, r_2$ respectively, and their IoU is defined by the area of intersection $r_1 \\cap r_2$ divided by the area of the union $r_1 \\cup r_2$ . The IoU is always between 0 and 1. In our experiment, we chose the default settings and Figure C.7 illustrates the IoU histogram of 1000 constructed positive pairs.", + "bbox": [ + 169, + 397, + 826, + 497 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.3 DEGREE OF FREEDOM IN $t$ -SIMCLR", + "text_level": 1, + "bbox": [ + 171, + 512, + 465, + 526 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Feature dimension efficiency in OOD case. To further investigate the generalization ability of SSCL methods, we devise a challenging setting where the model is trained on CIFAR-10 and tested on CIFAR-100 classification. In this case, we evaluate the effect of increasing feature dimensions in the projection layer, as an extension on the CIFAR-10 in-distribution case. The results are shown in Figure C.8, where there are two things to note:", + "bbox": [ + 169, + 568, + 823, + 638 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The gain of extra dimensions in the OOD case does vanish later than that in the in-distribution case.", + "- The advantage of SimCLR vs. $t$ -SimCLR is very significant with around $10\\%$ improvement when $d = 128$ using nearest neighbor classification, indicating that $t$ -SimCLR produces better separated clusters." + ], + "bbox": [ + 215, + 650, + 823, + 724 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Relationship between $t_{df}$ and $d_z$ . The larger the degree of freedom $t_{df}$ , the less heavy-tail the t-distribution. As $d_z$ decreases, the crowding problem becomes more severe and as recommended by (Van der Maaten & Hinton, 2008), a smaller $t_{df}$ tends to work better. We evaluate the sensitivity of $t_{df}$ (1, 5, 10) under different choices of $d_z$ (1, 2, 4, 8, 16, 32, 64, 128) in CIFAR-10 and the results are reported in Figure C.9. As can be seen, when $d_z$ is small (1, 2, 4, 8), $t_{df} = 1$ outperforms. Comparing $t_{df} = 5$ and $t_{df} = 10$ , the two perform similarly when $d_z$ is large (16, 32, 64, 128) but the smaller $t_{df} = 5$ yields better accuracy when $d_z = 1, 2, 4$ .", + "bbox": [ + 169, + 744, + 825, + 844 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Tuning temperature vs. tuning $t_{df}$ . As illustrated in Section 4.2, when the feature space dimension is low, the heavy-tailed t-distribution is a better choice than Gaussian to alleviate the crowding problem.", + "bbox": [ + 169, + 858, + 826, + 888 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 473, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_footnote", + "text": "5When evaluating by training linear classifiers for 100 epochs, the accuracy for SimCLR is $46.4\\%$ and that for $t$ -SimCLR is $48.14\\%$ (averaged over 3 replications).", + "bbox": [ + 169, + 896, + 823, + 925 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/ff8980fcd9c7131f7be01201d512cac63ded6038971372be41ff5b4f6462a672.jpg", + "image_caption": [ + "Figure C.9: Nearest neighbor classification accuracy on CIFAR-10 for $t$ -SimCLR using different feature dimensions and different degrees of freedom (t_df)." + ], + "image_footnote": [], + "bbox": [ + 313, + 103, + 689, + 272 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Even though tuning the temperature of $L_{\\mathrm{InfoNCE}}$ , i.e., making $\\tau$ larger, can also have the effect of making the distribution less concentrated ( $\\tau$ can be seen as the standard deviation), tuning temperature and tuning $t_{df}$ are fundamentally different. The former is controlling how fast does the similarity $Q_{i,j}$ decays as the distance between $z_i$ and $z_j$ increases, while the latter serves as a scaling factor, offering constant level modification of the scheme. In our experiments with SimCLR vs $t$ -SimCLR on CIFAR-10, temperature is tuned as a hyperparameter. The difference in $\\tau$ can never make up to the difference between the baseline SimCLR and $t$ -SimCLR. We found $\\tau = 0.5$ to work better for the base SimCLR while larger $\\tau$ works better with our $t$ -SimCLR. We recommend $\\tau = 5$ as the default choice.", + "bbox": [ + 169, + 340, + 826, + 454 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C.4 IMAGENET PRE-TRAINING", + "text_level": 1, + "bbox": [ + 171, + 468, + 401, + 484 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To show the ability for large scale domain transfer and OOD generalization, we conduct experiments on ImageNet pre-training based on MoCo-v2 with its official implementation6. We follow most of their settings, e.g., data augmentation, 200 epochs pre-training, and optimization strategy, etc. The loss is modified according to Section 4.2 and batch normalization is applied along every dimension. We grid search the $t_{df}$ and $\\tau$ with range $\\{2,5,10,15\\}$ and $\\{0.2,2,5,10\\}$ respectively. Finally we choose $t_{df} = 10$ and $\\tau = 5$ to be the optimal hyperparameters. We use this pre-train model as initialization for domain transfer and OOD experiments.", + "bbox": [ + 169, + 494, + 823, + 594 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C.5 DOMAIN TRANSFER", + "text_level": 1, + "bbox": [ + 171, + 608, + 356, + 623 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We compare MoCo-v2 pre-trained with 800 / 200 epochs and $t$ -MoCo-v2 on Aircraft, Birdsnap, Caltech101, Cars, CIFAR10, CIFAR100, DTD, Pets, and SUN397 in Table C.3. We follow the transfer settings in Ericsson et al. (2021) to finetune the pre-trained models. For datasets Birdsnap, Cars, CIFAR10, CIFAR100, DTD, and SUN397, we report the top-1 accuracy metric, while for Aircraft, Caltech101, and Pets, we report the mean per-class accuracy metric. We also follow Ericsson et al. (2021) to split each dataset into training, validation, and test sets. On each dataset, we perform a hyperparameter search as follows. (1) We choose the initial learning rate according to a grid of 4 logarithmically spaced values between $1 \\times 10^{-4}$ and $1 \\times 10^{-1}$ ; (2) We choose the weight decay parameter according to a grid of 4 logarithmically spaced values between $1 \\times 10^{-6}$ and $1 \\times 10^{-3}$ , plus no weight decay; (3) The weight decay values are divided by the learning rate; (4) For each pair of learning rate and weight decay, we finetune the pre-trained model for 5000 steps by SGD with Nesterov momentum 0.9, batch size of 64, and cosine annealing learning rate schedule without restarts. As can be seen in Table C.3, our $t$ -MoCo-v2 with 200 epochs even outperform the baseline with 800 epochs on average.", + "bbox": [ + 169, + 633, + 826, + 816 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C.6 OOD GENERALIZATION", + "text_level": 1, + "bbox": [ + 171, + 832, + 382, + 847 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To demonstrate the advantage of our modification, we also compare MoCo-v2 pre-trained with 800 / 200 epochs and $t$ -MoCo-v2 on OOD generalization benchmarks: PACS Li et al. (2017), VLCS Fang et al. (2013), Office-Home Venkateswara et al. (2017). We follow the standard way to conduct the", + "bbox": [ + 169, + 858, + 823, + 902 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 473, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_footnote", + "text": "$^{6}$ https://github.com/facebookresearch/moco", + "bbox": [ + 189, + 909, + 452, + 924 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/35ce9406cf55094cfb9c608b03959e530bc1dae7ffac647aefa7d7e1993bed86.jpg", + "table_caption": [ + "Table C.3: Domain transfer results of vanilla MoCo-v2 and $t$ -MoCo-v2." + ], + "table_footnote": [], + "table_body": "
MethodAircraftBirdsnapCaltech101CarsCIFAR10CIFAR100DTDPetsSUN397Avg.
MoCo-v2 (800 epochs)83.8045.5183.0186.1896.4271.6971.7089.1155.6175.89
MoCo-v2 (200 epochs)82.7544.5383.3185.2495.8172.7571.2286.7056.0575.37
t-MoCo-v2 (200 epochs)82.7853.4686.8186.1796.0478.3269.2087.9559.3077.78
", + "bbox": [ + 174, + 125, + 823, + 176 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/695459ef30b187674671fce8f19c5c93014a1b73e32a0233ccf10e82ce5e8e09.jpg", + "table_caption": [ + "Table C.4: OOD accuracies of vanilla MoCo-v2 and $t$ -MoCo-v2 on domain generalization benchmarks." + ], + "table_footnote": [], + "table_body": "
MethodPACSVLCSOffice-HomeAvg.
MoCo-v2 (800 epochs)58.969.841.656.8
MoCo-v2 (200 epochs)58.570.436.655.2
t-MoCo-v2 (200 epochs)61.375.142.159.5
", + "bbox": [ + 267, + 215, + 728, + 286 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "experiments, i.e., choosing one domain as the test domain and using the remaining domains as training domains, which is named the leave-one-domain-out protocol. The top linear classifier is trained on the training domains and tested on the test domain. Each domain rotates as the test domain and the average accuracy is reported for each dataset in Table C.4. On each dataset, we perform a hyperparameter search following DomainBed Gulrajani & Lopez-Paz (2021). We adopt the leave-one-domain-out cross-validation setup in DomainBed with 10 experiments for hyperparameter selection and run 3 trials. As can be seen in Table C.4, our $t$ -MoCo-v2 with 200 epochs even significantly outperform the baseline with 800 epochs for all of the three datasets.", + "bbox": [ + 169, + 311, + 823, + 422 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.7 SSCL INSPIRED DATA VISUALIZATION", + "text_level": 1, + "bbox": [ + 171, + 460, + 485, + 474 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "$t$ -SNE (Van der Maaten & Hinton, 2008) and its variants are designed for data visualization. However, for more complicated data, such as colored images, the results are not satisfactory. Using standard $t$ -SNE, the 2D visualization of the 50K training images of CIFAR-10 (labels denoted as 0, 1,...,9) can be seen in Figure C.10, where different labels are hardly separated. The poor performance of $t$ -SNE on CIFAR-10 can be traced back to the poor distance choice on images, i.e., $l_{2}$ -norm. Inspired by the success of SSCL for natural images, $t$ -SNE can potentially be improved by incorporating data augmentations.", + "bbox": [ + 169, + 486, + 823, + 585 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In light of our perspective (S1), $t$ -SNE can take advantage of the distance specified with (3.2) and the resulting model is essentially our $t$ -SimCLR with feature dimension 2. The visualization from $t$ -SimCLR is shown in Figure C.11, which is much more separated (the nearest neighbor classification accuracy on CIFAR-10 test data is $56.6\\%$ ). By choosing the feature dimension to be 2, various SSCL methods can also be made into data visualizing tools. In Figure C.12, we visualize the outcome from SimCLR (the nearest neighbor classification accuracy on CIFAR-10 test data is $24.8\\%$ ).", + "bbox": [ + 169, + 590, + 823, + 676 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Similar investigations have been carried in Böhm et al. (2022); Damrich et al. (2022) where they focused specifically on data visualization and stochastic neighbor embedding.", + "bbox": [ + 169, + 681, + 823, + 710 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 470, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/76b0fe03ca6ed5762c61cb7bbfe91d9d76e59cf9de5ea811f23a14e8780312cb.jpg", + "image_caption": [ + "Figure C.10: 50K CIFAR-10 training images visualization in 2D with $t$ -SNE." + ], + "image_footnote": [], + "bbox": [ + 308, + 112, + 689, + 330 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/a48f050056dc14a63d46ee1df2ed31108696785f0c9a933ec7c530bde5735f84.jpg", + "image_caption": [ + "Figure C.11: 50K CIFAR-10 training images visualization in 2D with the default $t$ -SimCLR." + ], + "image_footnote": [], + "bbox": [ + 308, + 390, + 689, + 606 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/6eaae204ef0543ee464baa5c129e4e1211fd5697dae541817624a41e3770b9f8.jpg", + "image_caption": [ + "Figure C.12: 50K CIFAR-10 training images visualization in 2D with the SimCLR." + ], + "image_footnote": [], + "bbox": [ + 308, + 666, + 689, + 878 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 473, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 24 + } +] \ No newline at end of file diff --git a/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_model.json b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9b70ec19d105d53e0944a6f89cb660439cfba551 --- /dev/null +++ b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_model.json @@ -0,0 +1,4353 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.476, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.147 + ], + "angle": 0, + "content": "YOUR CONTRASTIVE LEARNING IS SECRETLY DOING STOCHASTIC NEIGHBOR EMBEDDING" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.169, + 0.731, + 0.185 + ], + "angle": 0, + "content": "Tianyang Hu1, Zhili Liu1,2, Fengwei Zhou1, Wenjia Wang2,3, Weiran Huang1,4*" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.185, + 0.694, + 0.2 + ], + "angle": 0, + "content": "1 Huawei Noah's Ark Lab, 2 Hong Kong University of Science and Technology" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.2, + 0.61, + 0.215 + ], + "angle": 0, + "content": "\\(^{3}\\) Hong Kong University of Science and Technology (Guangzhou)" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.215, + 0.59, + 0.23 + ], + "angle": 0, + "content": "\\(^{4}\\) Qing Yuan Research Institute, Shanghai Jiao Tong University" + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.185, + 0.694, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.266, + 0.548, + 0.282 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.294, + 0.771, + 0.492 + ], + "angle": 0, + "content": "Contrastive learning, especially self-supervised contrastive learning (SSCL), has achieved great success in extracting powerful features from unlabeled data. In this work, we contribute to the theoretical understanding of SSCL and uncover its connection to the classic data visualization method, stochastic neighbor embedding (SNE) (Hinton & Roweis, 2002), whose goal is to preserve pairwise distances. From the perspective of preserving neighboring information, SSCL can be viewed as a special case of SNE with the input space pairwise similarities specified by data augmentation. The established correspondence facilitates deeper theoretical understanding of learned features of SSCL, as well as methodological guidelines for practical improvement. Specifically, through the lens of SNE, we provide novel analysis on domain-agnostic augmentations, implicit bias and robustness of learned features. To illustrate the practical advantage, we demonstrate that the modifications from SNE to \\( t \\)-SNE (Van der Maaten & Hinton, 2008) can also be adopted in the SSCL setting, achieving significant improvement in both in-distribution and out-of-distribution generalization." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.512, + 0.341, + 0.527 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.542, + 0.828, + 0.669 + ], + "angle": 0, + "content": "Recently, contrastive learning, especially self-supervised contrastive learning (SSCL) has drawn massive attention, with many state-of-the-art models following this paradigm in both computer vision (He et al., 2020a; Chen et al., 2020a;b; Grill et al., 2020; Chen & He, 2021; Zbontar et al., 2021) and natural language processing (Fang et al., 2020; Wu et al., 2020; Giorgi et al., 2020; Gao et al., 2021; Yan et al., 2021). In contrast to supervised learning, SSCL learns the representation through a large number of unlabeled data and artificially defined self-supervision signals, i.e., regarding the augmented views of a data sample as positive pairs and randomly sampled data as negative pairs. By enforcing the features of positive pairs to align and those of negative pairs to be distant, SSCL produces discriminative features with state-of-the-art performance for various downstream tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.675, + 0.829, + 0.788 + ], + "angle": 0, + "content": "Despite the empirical success, the theoretical understanding is under-explored as to how the learned features depend on the data and augmentation, how different components in SSCL work and what are the implicit biases when there exist multiple empirical loss minimizers. For instance, SSCL methods are widely adopted for pretraining, whose feature mappings are to be utilized for various downstream tasks which are usually out-of-distribution (OOD). The distribution shift poses great challenges for the feature learning process with extra requirement for robustness and OOD generalization (Arjovsky et al., 2019; Krueger et al., 2021; Bai et al., 2021; He et al., 2020b; Zhao et al., 2023; Dong et al., 2022), which demands deeper understanding of the SSCL methods." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.793, + 0.828, + 0.906 + ], + "angle": 0, + "content": "The goal of SSCL is to learn the feature representations from data. For this problem, one classic method is SNE (Hinton et al., 2006) and its various extensions. Specially, \\( t \\)-SNE (Van der Maaten & Hinton, 2008) has become the go-to choice for low-dimensional data visualization. Comparing to SSCL, SNE is far better explored in terms of theoretical understanding (Arora et al., 2018; Linderman & Steinerberger, 2019; Cai & Ma, 2021). However, its empirical performance is not satisfactory, especially in modern era where data are overly complicated. Both trying to learn feature representations, are there any deep connections between SSCL and SNE? Can SSCL take the advantage of the theoretical soundness of SNE? Can SNE be revived in the modern era by incorporating SSCL?" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.574, + 0.925 + ], + "angle": 0, + "content": "*Correspondence to Weiran Huang (weiran.huang@sjtu.edu.cn)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.961 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "In this work, we give affirmative answers to the above questions and demonstrate how the connections to SNE can benefit the theoretical understandings of SSCL, as well as provide methodological guidelines for practical improvement. The main contributions are summarized below." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.153, + 0.825, + 0.182 + ], + "angle": 0, + "content": "- We propose a novel perspective that interprets SSCL methods as a type of SNE methods with the aim of preserving pairwise similarities specified by the data augmentation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.188, + 0.827, + 0.287 + ], + "angle": 0, + "content": "- The discovered connection enables deeper understanding of SSCL methods. We provide novel theoretical insights for domain-agnostic data augmentation, implicit bias and OOD generalization. Specifically, we show isotropic random noise augmentation induces \\( l_{2} \\) similarity while mixup noise can potentially adapt to low-dimensional structures of data; we investigate the implicit bias from the angle of order preserving and identified the connection between minimizing the expected Lipschitz constant of the SSCL feature map and SNE with uniformity constraint; we identify that the popular cosine similarity can be harmful for OOD generalization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.293, + 0.827, + 0.378 + ], + "angle": 0, + "content": "- Motivated by the SNE perspective, we propose several modifications to existing SSCL methods and demonstrate practical improvements. Besides a re-weighting scheme, we advocate to lose the spherical constraint for improved OOD performance and a \\( t \\)-SNE style matching for improved separation. Through comprehensive numerical experiments, we show that the modified \\( t \\)-SimCLR outperforms the baseline with \\( 90\\% \\) less feature dimensions on CIFAR-10 and \\( t \\)-MoCo-v2 pretrained on ImageNet significantly outperforms in various domain transfer and OOD tasks." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.153, + 0.827, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.398, + 0.502, + 0.413 + ], + "angle": 0, + "content": "2 PRELIMINARY AND RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.428, + 0.827, + 0.53 + ], + "angle": 0, + "content": "Notations. For a function \\( f: \\Omega \\to \\mathbb{R} \\), let \\( \\|f\\|_{\\infty} = \\sup_{\\boldsymbol{x} \\in \\Omega} |f(\\boldsymbol{x})| \\) and \\( \\|f\\|_p = (\\int_{\\Omega} |f(\\boldsymbol{x})|^p d\\boldsymbol{x})^{1/p} \\). For a vector \\( \\boldsymbol{x} \\), \\( \\| \\boldsymbol{x}\\|_p \\) denotes its \\( p \\)-norm, for \\( 1 \\leq p \\leq \\infty \\). \\( \\mathbb{P}(A) \\) is the probability of event \\( A \\). For a random variable \\( z \\), we use \\( P_z \\) and \\( p_z \\) to denote its probability distribution and density respectively. Denote Gaussian distribution by \\( N(\\mu, \\Sigma) \\) and let \\( I_d \\) be the \\( d \\times d \\) identity matrix. Let the dataset be \\( \\mathcal{D}_n = \\{\\boldsymbol{x}_1, \\dots, \\boldsymbol{x}_n\\} \\subset \\mathbb{R}^d \\) where each \\( \\boldsymbol{x}_i \\) independently follows distribution \\( P_x \\). The goal of unsupervised representation learning is to find informative low-dimensional features \\( z_1, \\dots, z_n \\in \\mathbb{R}^{d_z} \\) of \\( \\mathcal{D}_n \\) where \\( d_z \\) is usually much smaller than \\( d \\). We use \\( f(\\boldsymbol{x}) \\) to as the default notation for the feature mapping from \\( \\mathbb{R}^d \\to \\mathbb{R}^{d_z} \\), i.e., \\( z_i = f(\\boldsymbol{x}_i) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.534, + 0.825, + 0.633 + ], + "angle": 0, + "content": "Stochastic neighbor embedding. SNE (Hinton & Roweis, 2002) is a powerful representation learning framework designed for visualizing high-dimensional data in low dimensions by preserving neighboring information. The training process can be conceptually decomposed into the following two steps: (1) calculate the pairwise similarity matrix \\( \\pmb{P} \\in \\mathbb{R}^{n \\times n} \\) for \\( \\mathcal{D}_n \\); (2) optimize features \\( z_1, \\dots, z_n \\) such that their pairwise similarity matrix \\( \\pmb{Q} \\in \\mathbb{R}^{n \\times n} \\) matches \\( \\pmb{P} \\). Under the general guidelines lie plentiful details. In Hinton & Roweis (2002), the pairwise similarity is modeled as conditional probabilities of \\( x_j \\) being the neighbor of \\( x_i \\), which is specified by a Gaussian distribution centered at \\( x_i \\), i.e., when \\( i \\neq j \\)," + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.638, + 0.825, + 0.675 + ], + "angle": 0, + "content": "\\[\nP _ {j \\mid i} = \\frac {\\exp \\left(- \\| \\boldsymbol {x} _ {i} - \\boldsymbol {x} _ {j} \\| _ {2} ^ {2} / 2 \\sigma_ {i} ^ {2}\\right)}{\\sum_ {k \\neq i} \\exp \\left(- \\| \\boldsymbol {x} _ {i} - \\boldsymbol {x} _ {k} \\| _ {2} ^ {2} / 2 \\sigma_ {i} ^ {2}\\right)}, \\tag {2.1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.825, + 0.724 + ], + "angle": 0, + "content": "where \\(\\sigma_{i}\\) is the variance of the Gaussian centered at \\(x_{i}\\). Similar conditional probabilities \\(Q_{j|i}\\)'s can be defined on the feature space. When matching \\(Q\\) to \\(P\\), the measurement chosen is the KL-divergence between two conditional probabilities. The overall training objective for SNE is" + }, + { + "type": "equation", + "bbox": [ + 0.406, + 0.731, + 0.825, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\inf _ {\\boldsymbol {z} _ {1}, \\dots , \\boldsymbol {z} _ {n}} \\sum_ {i = 1} ^ {n} \\sum_ {j = 1} ^ {n} P _ {j | i} \\log \\frac {P _ {j | i}}{Q _ {j | i}}. \\tag {2.2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.778, + 0.827, + 0.89 + ], + "angle": 0, + "content": "Significant improvements have been made to the classic SNE. Im et al. (2018) generalized the KL-divergence to \\( f \\)-divergence and found that different divergences favors different types of structure. Lu et al. (2019) proposed to make \\( P \\) doubly stochastic so that features are less crowded. Most notably, \\( t \\)-SNE (Van der Maaten & Hinton, 2008) modified the pairwise similarity by considering joint distribution rather than conditional, and utilizes t-distribution instead of Gaussian in the feature space modeling. It is worth noting that SNE belongs to a large class of methods called manifold learning (Li et al., 2022). In this work, we specifically consider SNE. If no confusion arises, we use SNE to denote the specific work of Hinton & Roweis (2002) and this type of methods in general interchangeably." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Self-supervised contrastive learning. The key part of SSCL is the construction of positive pairs, or usually referred to as different views of the same sample. For each \\( x_{i} \\) in the training data, denote" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.784, + 0.121 + ], + "angle": 0, + "content": "its two augmented views to be \\( \\pmb{x}_i' \\) and \\( \\pmb{x}_i'' \\). Let \\( \\mathcal{D}_n' = \\{\\pmb{x}_1', \\dots, \\pmb{x}_n'\\} \\), \\( \\mathcal{D}_n'' = \\{\\pmb{x}_1'', \\dots, \\pmb{x}_n''\\} \\) and define" + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.127, + 0.704, + 0.165 + ], + "angle": 0, + "content": "\\[\nl (\\pmb {x} _ {i} ^ {\\prime}, \\pmb {x} _ {i} ^ {\\prime \\prime}) = - \\log \\frac {\\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i} ^ {\\prime}) , f (\\pmb {x} _ {i} ^ {\\prime \\prime})) / \\tau)}{\\sum_ {\\pmb {x} \\in \\mathcal {D} _ {n} ^ {\\prime} \\cup \\mathcal {D} _ {n} ^ {\\prime \\prime} \\setminus \\{\\pmb {x} _ {i} ^ {\\prime} \\}} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i} ^ {\\prime}) , f (\\pmb {x})) / \\tau)},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.172, + 0.828, + 0.22 + ], + "angle": 0, + "content": "where \\(\\mathrm{sim}(z_1,z_2) = \\langle \\frac{z_1}{\\|\\pmb{z}_1\\|_2},\\frac{z_2}{\\|\\pmb{z}_2\\|_2}\\rangle\\) denotes the cosine similarity and \\(\\tau\\) is a temperature parameter. The training objective of the popular SimCLR (Chen et al., 2020a) can be written as \\(L_{\\mathrm{InfoNCE}}\\coloneqq \\frac{1}{2n}\\sum_{i = 1}^{n}(l(\\pmb{x}_i^{\\prime \\prime},\\pmb{x}_i^{\\prime}) + l(\\pmb{x}_i^{\\prime},\\pmb{x}_i^{\\prime \\prime}))\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.828, + 0.336 + ], + "angle": 0, + "content": "Recently, various algorithms are proposed to improve the above contrastive learning. To address the need for the large batch size, MoCo (He et al., 2020a; Chen et al., 2020b) utilizes a moving-averaged encoder and a dynamic memory bank to store negative representations, making it more device-friendly. Grill et al. (2020); Chen & He (2021); Zbontar et al. (2021); Chen et al. (2021) radically discard negative samples in SSCL but still achieve satisfactory transfer performance. Another line of works (Caron et al., 2020; Li et al., 2021; Liu et al., 2022) mines the hierarchy information in data to derive more semantically compact representations. Radford et al. (2021); Yao et al. (2021) even extend the contrastive methods to the multi-modality data structure to achieve impressive zero-shot classification results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.828, + 0.522 + ], + "angle": 0, + "content": "Theoretical understanding of SSCL. In contrast of the empirical success, theoretical understanding of SSCL is still limited. While most of theoretical works (Arora et al., 2019; Tosh et al., 2020; HaoChen et al., 2021; 2022; Wang et al., 2022; Wen & Li, 2021; Wei et al., 2020; Huang et al., 2021; Ji et al., 2021; Ma et al., 2023) focus on its generalization ability on downstream tasks, there are some works studying specifically the InfoNCE loss. One line of works (Oord et al., 2018; Bachman et al., 2019; Hjelm et al., 2018; Tian et al., 2019; 2020) understand the InfoNCE loss from mutual information perspective, showing that the negative InfoNCE is a lower bound of mutual information between positive samples. Other works (Wang & Isola, 2020; Huang et al., 2021; Jing et al., 2021) are from the perspective of geometry of embedding space, showing that InfoNCE can be divided into two parts: one controls alignment and the other prevents representation collapse. In this paper, we study SSCL from the SNE perspective, which, to the best of the authors' knowledge, has no discussion in existing literature. The closest work to ours is Balestriero & LeCun (2022), which proposed a unifying framework under the helm of spectral manifold learning. In comparison, our work focuses specifically on the connection between SSCL and SNE." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.543, + 0.445, + 0.559 + ], + "angle": 0, + "content": "3 SNE PERSPECTIVE OF SSCL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.575, + 0.825, + 0.635 + ], + "angle": 0, + "content": "A closer look at the training objectives of SNE and SimCLR reveals great resemblance — SimCLR can be seen as a special SNE model. To see this, denote \\(\\widetilde{\\mathcal{D}}_{2n} = \\mathcal{D}_n^{\\prime \\prime}\\cup \\mathcal{D}_n^{\\prime}\\) as the augmented dataset with index \\(\\widetilde{\\pmb{x}}_{2i - 1} = \\pmb{x}_i^{\\prime \\prime}\\) and \\(\\widetilde{\\pmb{x}}_{2i} = \\pmb{x}_i^\\prime\\). If we change the \\(l_{2}\\) distance to the negative cosine similarity and let \\(\\sigma_i^2\\equiv \\tau\\). Admitting similar conditional probability formulation as in (2.1) yields that for \\(i\\neq j\\)" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.642, + 0.825, + 0.677 + ], + "angle": 0, + "content": "\\[\n\\widetilde {Q} _ {j \\mid i} = \\frac {\\exp \\left(\\operatorname {s i m} \\left(f \\left(\\widetilde {\\boldsymbol {x}} _ {i}\\right) , f \\left(\\widetilde {\\boldsymbol {x}} _ {j}\\right)\\right) / \\tau\\right)}{\\sum_ {k \\neq i} \\exp \\left(\\operatorname {s i m} \\left(f \\left(\\widetilde {\\boldsymbol {x}} _ {i}\\right) , f \\left(\\widetilde {\\boldsymbol {x}} _ {k}\\right)\\right) / \\tau\\right)}. \\tag {3.1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.685, + 0.241, + 0.701 + ], + "angle": 0, + "content": "By taking" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.701, + 0.825, + 0.734 + ], + "angle": 0, + "content": "\\[\n\\widetilde {P} _ {j \\mid i} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\widetilde {\\boldsymbol {x}} _ {i} \\text {a n d} \\widetilde {\\boldsymbol {x}} _ {j} \\text {a r e p o s i t i v e p a i r s} \\\\ 0, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {3.2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.74, + 0.441, + 0.755 + ], + "angle": 0, + "content": "the SNE objective (2.2) can be written as" + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.764, + 0.705, + 0.806 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {2 n} \\sum_ {j = 1} ^ {2 n} \\widetilde {P} _ {j | i} \\log \\frac {\\widetilde {P} _ {j | i}}{\\widetilde {Q} _ {j | i}} = \\sum_ {k = 1} ^ {n} \\Big (- \\log (\\widetilde {Q} _ {2 k - 1 | 2 k}) - \\log (\\widetilde {Q} _ {2 k | 2 k - 1}) \\Big),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.814, + 0.827, + 0.864 + ], + "angle": 0, + "content": "which reduces to the SimCLR objective \\( L_{\\mathrm{InfoNCE}} \\), up to a constant scaling term only depending on \\( n \\). Now that we have established the correspondence between SNE and SimCLR, it's clear that the feature learning process of SSCL also follows the two steps of SNE." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.876, + 0.644, + 0.892 + ], + "angle": 0, + "content": "(S1) The positive pair construction specifies the similarity matrix \\( P \\)." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "(S2) The training process then matches \\( Q \\) to \\( P \\) by minimizing some divergence between the two specified by the training objective, e.g., KL divergence in SimCLR." + }, + { + "type": "list", + "bbox": [ + 0.194, + 0.876, + 0.826, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.069, + 0.408, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.427, + 0.069, + 0.557, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.069, + 0.744, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.184, + 0.402, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.426, + 0.183, + 0.556, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.184, + 0.744, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.298, + 0.828, + 0.356 + ], + "angle": 0, + "content": "Figure 1: Gaussian mixture setting with 5 components. (a) illustration of data with 250 samples. (b) learned features by standard SimCLR with normalization (cosine similarity) to 1-sphere. (c) learned features by modified SimCLR without normalization (\\(l_{2}\\) similarity). (d, e) feature mapping of the two methods in case of OOD mean shift. The linear classification accuracy is \\(48.4\\%\\) in (d) and \\(100\\%\\) in (e)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.828, + 0.469 + ], + "angle": 0, + "content": "The main difference between SNE and SSCL is the first part, where the \\(P\\) in SNE is usually densely filled by \\(l_{p}\\) distance, ignoring the semantic information within rich data like images and texts. In contrast, SSCL omits all traditional distances in \\(\\mathbb{R}^d\\) and only specifies semantic similarity through data augmentations, and the resulting \\(P\\) is sparsely filled only by positive pairs as in (3.2). For structurally rich data such as image or text, the semantic information is invariant to a wide range of transformations. Human's prior knowledge of such invariance guides the construction of positive pairs in SSCL, which is then learned by the feature mapping." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.472, + 0.828, + 0.572 + ], + "angle": 0, + "content": "Remark 3.1 (SNE vs SSCL). We would like to clarify on the main difference between SNE and SSCL that we focus in this work. Although standard SNE (Hinton et al., 2006) is non-parametric without explicit feature maps, and is optimized for the whole dataset, these are not the defining properties of SNE. SNE can also utilize explicit feature maps and mini-batch training (Van Der Maaten, 2009). On the other hand, SSCL can also benefit from larger/full batches (Chen et al., 2020a) and can also be modified to directly optimize the features \\( \\boldsymbol{z}_i \\)'s. In this work, we omit these subtleties1 and focus on the (S1) perspective, which we view as the most significant difference between SNE and SSCL." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.587, + 0.289, + 0.601 + ], + "angle": 0, + "content": "3.1 ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.613, + 0.827, + 0.683 + ], + "angle": 0, + "content": "In this section, to showcase the utility of the SNE perspective, we demonstrate how the feature learning process of SSCL methods, e.g., SimCLR, can become more intuitive and transparent. Specifically, we re-derive the alignment and uniformity principle (Wang & Isola, 2020) as well as provide novel analysis on domain-agnostic augmentations, the implicit bias and robustness of learned features. To aid the illustration, we device toy examples with simulated Gaussian mixture data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.687, + 0.827, + 0.773 + ], + "angle": 0, + "content": "Gaussian mixture setting. Let the data follow \\(d\\)-dimensional Gaussian mixture distribution with \\(m\\) components where \\(P_{\\pmb{x}} \\sim \\frac{1}{m} \\sum_{i=1}^{m} N(\\pmb{\\mu}_i, \\sigma^2 \\pmb{I}_d)\\). The special case with \\(d = 2\\), \\(m = 5\\), \\(\\sigma = 0.1\\) is illustrated in Figure 1(a) with 250 independent samples. To apply contrastive methods, consider constructing positive pairs by direct sampling, i.e., if \\(\\pmb{x}\\) is from the first component, then we sample another \\(\\pmb{x}' \\sim N(\\pmb{\\mu}_1, \\sigma^2 \\pmb{I}_d)\\) independently as its alternative view for contrast. The negative samples are the same as in standard SimCLR training." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.787, + 0.524, + 0.801 + ], + "angle": 0, + "content": "3.1.1 DOMAIN-AGNOSTIC DATA AUGMENTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.811, + 0.827, + 0.855 + ], + "angle": 0, + "content": "Now that we have established in (S1) that the input space pairwise distance is specified by the data augmentation, a natural question to ask is what are the corresponding induced distances. In this section, we investigate this problem for domain-agnostic data augmentations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.86, + 0.828, + 0.889 + ], + "angle": 0, + "content": "The quality of data augmentation has great impact on the performance of SSCL methods, which reflects people's prior knowledge on the data. However, when facing new data without any domain knowledge," + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.825, + 0.926 + ], + "angle": 0, + "content": "1All the contrastive losses are written in full batches for simplicity in this work as we focus on analyzing the optimal solutions of SSCL methods rather than the optimization process." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.105, + 0.827, + 0.177 + ], + "angle": 0, + "content": "we have to rely on domain-agnostic data augmentations, e.g., adding random noises (Verma et al., 2021), for contrast. We first consider using general random noise augmentation, i.e., for any \\( \\pmb{x} \\in \\mathbb{R}^d \\), let \\( \\pmb{x}' = \\pmb{x} + \\delta \\) where \\( \\delta \\) follows some distribution with density \\( \\phi(\\pmb{x}) \\). Then, for any \\( \\pmb{x}_i \\), the probability density of having \\( \\pmb{t} \\in \\mathbb{R}^d \\) as its augmented point can be characterized as \\( P_{\\pmb{t}|\\pmb{x}_i} = \\mathbb{P}(\\pmb{x}_i \\mid \\pmb{x}_i' = \\pmb{t} \\text{ form a positive pair} | \\pmb{x}_i) = \\phi(\\pmb{t} - \\pmb{x}_i) \\). We have the following proposition on Gaussian-induced distance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.181, + 0.825, + 0.21 + ], + "angle": 0, + "content": "Proposition 3.2 (Gaussian noise injection). If the noise distribution is isotropic Gaussian with mean zero, the induced distance is equivalent to the \\(l_{2}\\) distance in \\(\\mathbb{R}^d\\), up to a monotone transformation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.222, + 0.825, + 0.307 + ], + "angle": 0, + "content": "Another popular noise injection method is the mixup (Zhang et al., 2017), where the augmented data are comprised of convex combinations of the training data. For each \\( \\boldsymbol{x}_i \\), a positive pair can be constructed from another \\( \\boldsymbol{x}_j \\) such that \\( \\boldsymbol{x}_i' = \\boldsymbol{x}_i + \\lambda (\\boldsymbol{x}_j - \\boldsymbol{x}_i) \\) and \\( \\lambda \\in (0,1) \\) is the hyperparameter usually modeled with Beta distribution. For independent \\( \\boldsymbol{x}_1, \\boldsymbol{x}_2 \\sim P_x \\), denote the convoluted density of \\( \\lambda (\\boldsymbol{x}_1 - \\boldsymbol{x}_2) \\) as \\( p_{\\lambda}(\\boldsymbol{x}) \\), which is symmetric around 0. Then, if employing mixup for positive pairs in SSCL, the induced distance can be written as \\( P_{\\boldsymbol{x}_1, \\boldsymbol{x}_2} = P_{\\boldsymbol{x}_2, \\boldsymbol{x}_1} = p_{\\lambda}(\\boldsymbol{x}_1 - \\boldsymbol{x}_2) \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.313, + 0.827, + 0.522 + ], + "angle": 0, + "content": "Gaussian vs. mixup. Verma et al. (2021) proposed to use mixup when domain-specific information is unattainable and provided supportive analysis on its advantage over isotropic Gaussian noise from the classification generalization error point of view. Through (S1) perspective, we can intuitively explain why data-dependent mixup noises can be potentially better from the perspective of the \"curse of dimensionality\". Consider the \\(d\\)-dimensional Gaussian mixture setting with \\(m < d\\) separated components. Notice that \\(\\pmb{\\mu}_1,\\dots ,\\pmb{\\mu}_m\\) can take up at most \\((m - 1)\\)-dimensional linear sub-space of \\(\\mathbb{R}^d\\). Denoted the space spanned by \\(\\pmb{\\mu}_i\\)'s as \\(S_{\\mu}\\). For the light-tailed Gaussian distribution, and the majority of samples will be close to \\(S_{\\mu}\\). Hence, majority of the convoluted density \\(p_{\\lambda}(\\pmb{x})\\) will also be supported on \\(S_{\\mu}\\), so does the corresponding \\(P_{\\pmb{x}_2,\\pmb{x}_1}\\). Thus, the induced distance from mixup will omit irrelevant variations in the complement of \\(S_{\\mu}\\) and focus on the low-dimensional sub-space \\(S_{\\mu}\\) where \\(\\pmb{\\mu}_i\\)'s actually differ. This effectively reduces the dimension dependence from \\(d\\) to \\(m - 1\\). In comparison, isotropic Gaussian noise induces \\(l_2\\) distance for positive pairs with support of \\(\\mathbb{R}^d\\), which will be much more inefficient, especially when \\(m \\ll d\\). Since it is well-known that the performance of regression or classification models is strongly influenced by the intrinsic dimension of the input space (Hamm & Steinwart, 2021), keeping the data in a low-dimensional space is preferable." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.539, + 0.444, + 0.552 + ], + "angle": 0, + "content": "3.1.2 ALIGNMENT AND UNIFORMITY" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.564, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Characterizing the learned features of SSCL is of critical importance. Wang & Isola (2020) proposed alignment and uniformity as principles for SimCLR type contrastive learning methods. Such results can be intuitively understood through the perspective of (S1) and (S2). Consider the common case where the feature space is \\((d_z - 1)\\)-sphere. First, (3.2) indicates that only similarities (distances) between positive pairs are non-zero (finite) and all other pairwise similarities (distances) are zero (infinity). Preserving (3.2) requires the features of positive pairs to align (cosine similarity tends to 1) and those of negative pairs to be as distant as possible. If in the extreme case where positive pairs match exactly, i.e., \\(f(\\pmb{x}_i) = f(\\pmb{x}_i')\\) for any \\(i = 1,\\dots ,n\\), we call it perfect alignment." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.682, + 0.827, + 0.836 + ], + "angle": 0, + "content": "If perfect alignment is achieved and the features are constrained on the unit sphere, matching (3.2) implies pushing \\( n \\) points on the feature space as distant as possible. Maximally separated \\( n \\) points on a \\( d \\)-sphere has been studied in geometry, known as the Tammes problem (Tammes, 1930; Erber & Hockney, 1991; Melisseny, 1998). We say perfect uniformity is achieved if all the pairs are maximally separated on the sphere. There are some simple cases of the Tammes problem. If \\( d = 2 \\), perfect uniformity can be achieved if the mapped points form a regular polygon. If \\( d \\geq n - 1 \\), the solution can be given by the vertices of an \\( (n - 1) \\)-simplex, inscribed in an \\( (n - 1) \\)-sphere embedded in \\( \\mathbb{R}^d \\). The cosine similarity between any two vertices is \\( -1 / (n - 1) \\) and in this case, \\( L_{\\mathrm{InfoNCE}} \\) can attain its lower bound2. As \\( n \\to \\infty \\), the point distribution converges weakly to uniform distribution. As can be seen in Figure 1(a, b), perfect alignment and perfect uniformity are almost achieved by standard SimCLR in the Gaussian mixture setting." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.841, + 0.825, + 0.885 + ], + "angle": 0, + "content": "As we will demonstrate in Section 3.1.4 that the spherical feature space can be bad for OOD generalization, adopting of the Euclidean space will change the statement of the uniformity property and can also be analyzed from the SNE perspective. Details can be found in Appendix A.5." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.897, + 0.825, + 0.926 + ], + "angle": 0, + "content": "2Notice that in this case, the optimal feature mapping will contain little information of the data, mapping anchor samples to interchangeable points with identical pairwise distances" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.331, + 0.117 + ], + "angle": 0, + "content": "3.1.3 IMPLICITBIAS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.128, + 0.827, + 0.268 + ], + "angle": 0, + "content": "Existing theoretical results on SSCL provide justification of its empirical success in classification. However, there is more to it than just separating different classes and many phenomena are left unexplained. Take the popular SimCLR (Chen et al., 2020a) on CIFAR-10 as an example, we can consistently observe that the feature similarities within animals (bird, cat, deer, dog, frog, horse) and within objects (airplane, automobile, ship, truck), are significantly higher than those between animals and objects3. This can be viewed as an implicit bias towards preserving semantic information, which might be surprising as we have no supervision on the label information during the training process. However, existing literature on implicit bias is scarce. As advocated in Saunshi et al. (2022), ignoring inductive biases cannot adequately explain the success of contrastive learning. In this section, we provide a simple explanation from the perspective of SNE." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.274, + 0.828, + 0.346 + ], + "angle": 0, + "content": "For a more concrete illustration, consider training SimCLR in the Gaussian mixture setting with \\( d = 1 \\), \\( d_z = 2 \\), \\( m = 4 \\), \\( \\mu_i = i \\), and \\( \\sigma = 0.1 \\). Denote the 4 components in ascending order by A,B,C,D. Perfect alignment and uniformity imply that their feature maps (a, b, c, d) on the unit-circle should be vertices of an inscribed square. What left unsaid is their relative order. Clockwise or counter-Clockwise from a, regardless of the initialization, we can observe SimCLR to consistently produce the order \\( a \\to b \\to c \\to d \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.348, + 0.825, + 0.42 + ], + "angle": 0, + "content": "Remark 3.3 (Relative ordering and neighbor-preserving). The order-preserving property showcased with \\( d = 1 \\) is mainly for illustration, as in one-dimension, the neighboring info is simplified as the order, which is much easier to understand. The results remain the same in high dimensions as long as the clusters are well separated with an obvious order of clusters. For instance, some relative orders in Figure 1(a,b) are also stable, e.g., the neighbor of blue will consistently be purple and yellow." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.429, + 0.825, + 0.527 + ], + "angle": 0, + "content": "With great resemblance to SNE, SSCL methods also exhibit neighbor-preserving property and we identify it as an implicit bias. Such implicit bias can be universal in SSCL and the phenomenon in Figure A.3 is also a manifestation. In deep learning, the implicit bias is usually characterized by either closeness to the initialization (Moroshko et al., 2020; Azulay et al., 2021), or minimizing certain complexity (Razin & Cohen, 2020; Zhang et al., 2021). In the case of SimCLR, we hypothesize the implicit bias as the expected Lipschitz constant, which has deep connections to SNE with uniformity constraint. For a feature map \\( f \\) onto the unit-sphere, define" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.532, + 0.825, + 0.567 + ], + "angle": 0, + "content": "\\[\nC (f) = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}} \\frac {\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2}}{\\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\| _ {2}}, \\tag {3.3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.57, + 0.616, + 0.585 + ], + "angle": 0, + "content": "where the \\(x_{1}, x_{2}\\) are independent samples from the data distribution." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.588, + 0.825, + 0.632 + ], + "angle": 0, + "content": "Definition 3.4 (SNE with uniformity constraint). Assume data \\( \\boldsymbol{x}_1, \\dots, \\boldsymbol{x}_n \\in \\mathbb{R}^d \\). If the corresponding SNE features \\( z_1, \\dots, z_n \\in \\mathbb{R}^{d_z} \\) are constrained to be the maximally separated \\( n \\) points on the \\( (d_z - 1) \\)-sphere, we call this problem SNE with uniformity constraint." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.641, + 0.825, + 0.685 + ], + "angle": 0, + "content": "The key of SNE is matching the pairwise similarity matrices \\( Q \\) to \\( P \\). When solving SNE with uniformity constraint, the only thing to be optimized is the pairwise correspondence, or ordering of the mapping. We have the following theorem that links the neighbor-preserving property to \\( C(f) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.686, + 0.825, + 0.745 + ], + "angle": 0, + "content": "Theorem 3.5. Let \\( \\pmb{x}_1, \\dots, \\pmb{x}_n \\in \\mathbb{R}^d \\) such that \\( \\| \\pmb{x}_i - \\pmb{x}_j \\|_2 > 0 \\) for any \\( i, j \\) and let \\( z_1, \\dots, z_n \\in \\mathbb{R}^{d_z} \\) be maximally separated \\( n \\) points on the \\( (d_z - 1) \\)-sphere. Denote \\( P = (p_{ij})_{n \\times n} \\) and \\( Q = (q_{ij})_{n \\times n} \\) as the corresponding pairwise similarity matrices of \\( \\pmb{x}_i \\)'s and \\( \\pmb{z}_i \\)'s respectively. Let \\( \\pi \\) denote a permutation on \\( \\{1, \\dots, n\\} \\) and denote all such permutations as \\( T \\). Let \\( Q^\\pi \\) as the \\( \\pi \\)-permuted matrix \\( Q \\) and define" + }, + { + "type": "equation", + "bbox": [ + 0.299, + 0.749, + 0.696, + 0.785 + ], + "angle": 0, + "content": "\\[\nC _ {1} (P, Q ^ {\\pi}) = \\sum_ {i \\neq j} \\frac {q _ {\\pi (i) \\pi (j)}}{p _ {i j}} \\quad \\text {a n d} \\quad \\pi^ {*} = \\operatorname * {a r g m i n} _ {\\pi \\in T} C _ {1} (P, Q ^ {\\pi}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.79, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Then, \\(\\pi^{*}\\) also minimizes \\(\\| \\bar{P} - Q^{\\pi}\\|_{F}\\) where \\(\\| \\cdot \\|_{F}\\) is the Frobenius norm and \\(\\bar{P} = (\\bar{p}_{ij})_{n\\times n}\\) is a (monotonically) transformed similarity matrix with \\(\\bar{p}_{ij} = -1 / p_{ij}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.831, + 0.827, + 0.903 + ], + "angle": 0, + "content": "Theorem 3.5 showcases the relationship between minimizing \\( C(f) \\) and the structure preserving property by considering a special SNE problem, where the pairwise similarity is not modeled by Gaussian as standard. Although \\( q_{ij} = -\\| f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2 \\) is unorthodox, it is reasonable since the larger the distance, the smaller the similarity. We have the following corollary to explain the neighbor-preserving property of SSCL and the implicit bias associated with minimizing the complexity \\( C(f) \\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.651, + 0.925 + ], + "angle": 0, + "content": "3Figure A.3 illustrates the phenomenon. Details can be found in Appendix A.1" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "Corollary 3.6 (Implicit bias of SSCL). When SSCL model achieves perfect alignment and perfect uniformity, if the complexity \\( C(f) \\) is minimized, the resulting feature map preserves pairwise distance in the input space, resembling SNE with uniformity constraint." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.158, + 0.827, + 0.244 + ], + "angle": 0, + "content": "Corollary 3.6 links the implicit bias of SSCL to the SNE optimization with uniformity constraint. In the case of perfect alignment and perfect uniformity, SSCL can be seen as a special SNE problem where the feature \\( z_{1}, \\dots, z_{n} \\) must be maximally separated on the unit-sphere. Recall the 1-dimension Gaussian case. There are in total \\( 3! = 6 \\) different orderings for the 4 cluster means, among which, a \\( \\rightarrow \\mathrm{b} \\rightarrow \\mathrm{c} \\rightarrow \\mathrm{d} \\) will give the lowest SNE loss. As can be seen in Figure A.4, both \\( C(f) \\) and the SNE loss are monotonically decreasing during training for the Gaussian mixture setting." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.249, + 0.828, + 0.336 + ], + "angle": 0, + "content": "When the alignment or uniformity is not perfect, the resulting feature mapping can still be characterized via SNE, with the uniformity constraint relaxed as a form of regularization. In our numerical experiments on the CIFAR-10 data, we observe \\( C(f) \\) to be monotonically decreasing during the training process, supporting our hypothesis. More details can be found in Appendix A.3. Corollary 3.6 sheds light on the implicit semantic information preserving phenomenon shown in Figure A.3, as in the input space, images of dogs should be closer to images of cats, than airplanes." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.349, + 0.55, + 0.363 + ], + "angle": 0, + "content": "3.1.4 TARGETING OOD: EUCLIDEAN VS SPHERICAL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Almost all SSCL methods require normalization to the unit-sphere and the similarity on the feature space is often the cosine similarity. In comparison, standard SNE methods operate freely on the Euclidean space. In this section, we show that the normalization can hinder the structure-preserving and there is a fundamental trade off between in-distribution and out-of-domain generalization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.436, + 0.827, + 0.548 + ], + "angle": 0, + "content": "Consider the 2-dimensional Gaussian mixture setting as illustrated in Figure 1(a). Notice that as long as the mixing components are well separated, the learned feature mapping on the sphere will always be the pentagon shape, regardless of the relative locations of the clusters. This is a result of the uniformity property derived under spherical constraint. Distant clusters in the input space will be pulled closer while close clusters will be pushed to be more distant, which results in the trade off between in-distribution and out-of-domain generalization. On one hand, close clusters being more separated in the feature space is potentially beneficial for in-distribution classification. On the other hand, the spherical constraint adds to the complexity of the feature mapping, potentially hurting robustness." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.554, + 0.829, + 0.709 + ], + "angle": 0, + "content": "In the Euclidean space, pushing away negative samples (as distant as possible) will be much easier, since the feature vectors could diverge towards infinity\\(^{4}\\) and potentially preserve more structural information. To verify our intuition, we relax the spherical constraint in the Gaussian mixture setting and change the cosine similarity in SimCLR to the negative \\(l_{2}\\) distance in \\(\\mathbb{R}\\). The learned features are shown in Figure 1(c). Comparing to Figure 1(b), we can get the extra information that the purple cluster is far away to the others. If we introduce a small mean shift to the data, moving the distribution along each dimension by 1, the resulting feature maps differ significantly in robustness. As illustrated in Figure 1(d) vs. (e), the standard SimCLR are much less robust to OOD shifts and the resulting classification accuracy degrades to only \\(48.4\\%\\), while that for the modified SimCLR remains \\(100\\%\\). The same OOD advantage can also be verified in the CIFAR-10 to CIFAR-100 OOD generalization case (details in Appendix C.3 Figure C.8) and large-scale real-world scenarios with MoCo (Chen et al., 2020b) as baseline (details in Section 5)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.727, + 0.431, + 0.743 + ], + "angle": 0, + "content": "4 IMPROVING SSCL BY SNE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.759, + 0.828, + 0.816 + ], + "angle": 0, + "content": "The proposed SNE perspective (S1,S2) can inspire various modifications to existing SSCL methods. In this section, we choose SimCLR as our baseline and investigate three straightforward modifications. For empirical evaluation, we report the test classification accuracy of nearest neighbor classifiers on both simulated data and real datasets. Experiment details can be found in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.832, + 0.406, + 0.846 + ], + "angle": 0, + "content": "4.1 WEIGHTED POSITIVE PAIRS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.858, + 0.828, + 0.903 + ], + "angle": 0, + "content": "In practice, positive pairs are constructed from anchors (training data), by i.i.d. data augmentations, e.g., random resized crop, random horizontal flip, color jitter, etc. Take random crop as an example, pair 1 and 2 may be from \\(30\\%\\), \\(80\\%\\) random crops, respectively. Their similarities should not be treated" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.824, + 0.925 + ], + "angle": 0, + "content": "In practice, various regularization, e.g., weight decay, are employed and the resulting features will be bounded." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.476, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.083, + 0.465, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.206, + 0.408, + 0.22 + ], + "angle": 0, + "content": "(a) Weighted SimCLR." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.082, + 0.772, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.565, + 0.206, + 0.732, + 0.219 + ], + "angle": 0, + "content": "(b) SimCLR vs. \\(t\\)-SimCLR." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.225, + 0.825, + 0.268 + ], + "angle": 0, + "content": "Figure 2: Nearest neighbor classification test accuracy on CIFAR-10 with ResNet-18 after 200 epochs pre-training. (a) \\( N / A \\) stands for the baseline SimCLR. The \\( x \\)-axis is the temperature for IoU weighting scheme. (b) Comparison between SimCLR and \\( t \\)-SimCLR with different feature dimensions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.28, + 0.825, + 0.31 + ], + "angle": 0, + "content": "as equal, as in typical SSCL methods. Incorporating the disparity in the data augmentation process is straightforward in the perspective of SNE, where the InfoNCE loss can be naturally modified as" + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.313, + 0.61, + 0.352 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{2 n} \\sum_ {i = 1} ^ {n} p _ {i i ^ {\\prime}} \\cdot \\left(l \\left(\\boldsymbol {x} _ {i}, \\boldsymbol {x} _ {i} ^ {\\prime}\\right) + l \\left(\\boldsymbol {x} _ {i} ^ {\\prime}, \\boldsymbol {x} _ {i}\\right)\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.356, + 0.827, + 0.456 + ], + "angle": 0, + "content": "The weight \\( p_{ii'} \\) in \\( P \\) can be specified manually to reflect human's prior knowledge. To test out the effect of such modification, we conduct numerical experiments on CIFAR-10 using the standard SimCLR. The weighting scheme is based on the Intersection over Union (IoU) of random resized crops. For each positive pair, let \\( p_{ii'} \\propto \\exp(\\mathrm{IoU}(\\boldsymbol{x}_i, \\boldsymbol{x}_i') / \\tau') \\), where \\( \\tau' > 0 \\) is a hyperparameter (temperature) controlling the strength of the weighting scheme, i.e., the bigger the \\( \\tau' \\), the closer to the unweighted state. The CIFAR-10 test performance vs. \\( \\tau' \\) is shown in Figure 2(a). The baseline is \\( 80.7\\% \\) and can be significantly improved to \\( 82.1\\% \\) if choosing \\( \\tau' = 1 \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.47, + 0.473, + 0.484 + ], + "angle": 0, + "content": "4.2 T-SIMCLR: \\(t\\)-SNE STYLE MATCHING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.825, + 0.608 + ], + "angle": 0, + "content": "Most SSCL algorithms differ mainly in (S2), i.e., defining \\( Q \\) and matching it to \\( P \\), where fruitful results in SNE literature can be mirrored and applied. Now that we have identified the advantage of modeling features in Euclidean spaces in Section 3.1.4, the most promising modification that follows is to introduce \\( t \\)-SNE to SimCLR. Since we are learning low-dimensional features from high-dimensional data, preserving all pairwise similarities is impossible and the features tend to collapse. This is referred to as the \"crowding problem\" in Van der Maaten & Hinton (2008) (see Section 3.2 therein). \\( t \\)-SNE utilizes the heavy-tail \\( t \\)-distribution instead of the light-tail Gaussian, to model \\( Q \\) and encourage separation in feature space. Correspondingly, the training objective \\( L_{\\mathrm{InfoNCE}} \\) can be modified as" + }, + { + "type": "equation", + "bbox": [ + 0.277, + 0.612, + 0.825, + 0.657 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{n} \\sum_ {i = 1} ^ {n} - \\log \\frac {\\left(1 + \\| f \\left(\\boldsymbol {x} _ {i}\\right) - f \\left(\\boldsymbol {x} _ {i} ^ {\\prime}\\right) \\| _ {2} ^ {2} / \\left(\\tau t _ {d f}\\right)\\right) ^ {- \\left(t _ {d f} + 1\\right) / 2}}{\\sum_ {1 \\leq j \\neq k \\leq 2 n} \\left(1 + \\| f (\\widetilde {\\boldsymbol {x}} _ {j}) - f (\\widetilde {\\boldsymbol {x}} _ {k}) \\| _ {2} ^ {2} / \\left(\\tau t _ {d f}\\right)\\right) ^ {- \\left(t _ {d f} + 1\\right) / 2}}, \\tag {4.1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.659, + 0.825, + 0.73 + ], + "angle": 0, + "content": "where \\( t_{df} \\) is the degree of freedom for the \\( t \\)-distribution. Besides substituting the cosine similarity to the \\( l_2 \\) distance, the key modification is the modeling of feature space similarity \\( Q \\), from Gaussian to \\( t \\)-distribution as suggested by Van der Maaten & Hinton (2008) to avoid the crowding problem and accommodate the dimension-deficiency in the feature space. We call the modified method \\( t \\)-SimCLR and we expect it to work better, especially when the feature dimension is low, or in the OOD case." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.827, + 0.793 + ], + "angle": 0, + "content": "Figure 2(b) shows the comparison between SimCLR and \\( t \\)-SimCLR on CIFAR-10 with different feature dimensions, where \\( t \\)-SimCLR has significant advantages in all cases and the smaller the \\( d_{z} \\), the larger the gap. Without decreasing the standard \\( d_{z} = 128 \\), \\( t \\)-SimCLR improves the baseline from \\( 80.8\\% \\) to \\( 83.9\\% \\) and even beats it using only \\( d_{z} = 8 \\) with accuracy \\( 81.7\\% \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.795, + 0.825, + 0.853 + ], + "angle": 0, + "content": "Remark 4.1 (Degree of freedom). Standard \\( t \\)-SNE utilizes \\( t \\)-distribution with \\( t_{df} = 1 \\), to better accommodate the extreme \\( d_z = 2 \\) case. In practice, \\( t_{df} \\) can vary and as \\( d_z \\) increases, larger \\( t_{df} \\) might be preferred. We recommend using \\( t_{df} = 5 \\) as the default choice. The performance of \\( t_{df} \\) vs \\( d_z \\) can be found in Appendix C, as well as discussion on the fundamental difference between \\( t_{df} \\) and \\( \\tau \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Remark 4.2 (Training epochs). For the CIFAR-10 experiments, we reported the results of ResNet-18 after 200 training epochs, similar to the setting of Yeh et al. (2021). We also conducted 1000-epoch experiments and found that our modifications provide consistent improvements throughout the training process, not in terms of speeding up the convergence, but converging to better solutions. Details can be found in Appendix C.1 and Figure C.6." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.472, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.272, + 0.077, + 0.724, + 0.09 + ], + "angle": 0, + "content": "Table 1: Domain transfer results of vanilla MoCo-v2 and \\( t \\) -MoCo-v2." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.089, + 0.825, + 0.136 + ], + "angle": 0, + "content": "
MethodAircraftBirdsnapCaltech101CarsCIFAR10CIFAR100DTDPetsSUN397Avg.
MoCo-v282.7544.5383.3185.2495.8172.7571.2286.7056.0575.37
t-MoCo-v282.7853.4686.8186.1796.0478.3269.2087.9559.3077.78
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.15, + 0.826, + 0.164 + ], + "angle": 0, + "content": "Table 2: OOD accuracies of vanilla MoCo-v2 and \\( t \\) -MoCo-v2 on domain generalization benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.324, + 0.165, + 0.672, + 0.219 + ], + "angle": 0, + "content": "
MethodPACSVLCSOffice-HomeAvg.
MoCo-v258.570.436.655.2
t-MoCo-v261.375.142.159.5
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.232, + 0.447, + 0.246 + ], + "angle": 0, + "content": "5 LARGE SCALE EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.264, + 0.827, + 0.377 + ], + "angle": 0, + "content": "In this section, we apply the same modifications proposed in Section 4.2 to MoCo-v2 (Chen et al., 2020b), as it is more device-friendly to conduct large scale experiments. We name our model \\( t \\)-MoCo-v2. Both models are pre-trained for 200 epochs on ImageNet following the setting of Chen et al. (2020b). The linear probing accuracy of \\( t \\)-MoCo-v2 on ImageNet is \\( 67.0\\% \\), which is comparable to the MoCo result \\( 67.5\\% \\). With the same level of in-distribution classification accuracy, we conduct extensive experiments to compare their OOD performance. The results in Table 1 and 2 suggest that our modification significantly improves the domain transfer and the OOD generalization ability without sacrificing in-distribution accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.489 + ], + "angle": 0, + "content": "Domain Transfer. We first conduct experiments on the traditional self-supervision domain transfer benchmark. We compare MoCo-v2 and \\( t \\)-MoCo-v2 on Aircraft, Birdsnap, Caltech101, Cars, CIFAR10, CIFAR100, DTD, Pets, and SUN397. We follow transfer settings in Ericsson et al. (2021) to finetune the pre-trained models. The results are reported in Table 1. Our model \\( t \\)-MoCo-v2 surpasses MoCo-v2 in 8 out of 9 datasets, showing a significantly stronger transfer ability. Notice that our model is pre-trained with 200 epochs, surprisingly, compared with the original MoCo-v2 model pre-trained with 800 epochs, the fine-tuning results of \\( t \\)-MoCo-v2 are still better on Birdsnap, Caltech101, CIFAR100, and SUN397." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.497, + 0.827, + 0.651 + ], + "angle": 0, + "content": "Out-of-domain generalization. As illustrated in Section 3.1.4, standard SSCL methods, e.g., SimCLR, MoCo, etc., could suffer from OOD shift. To demonstrate the advantage of our modification, we investigate the effectiveness of our method on OOD generalization benchmarks: PACS Li et al. (2017), VLCS Fang et al. (2013), Office-Home Venkateswara et al. (2017). We follow the standard way to conduct the experiment, i.e., choosing one domain as the test domain and using the remaining domains as training domains, which is named the leave-one-domain-out protocol. As can be seen in Table 2, our \\( t \\)-MoCo-v2 indicates significant improvement over MoCo-v2. Both experiments indicate our modification exhibits substantial enhancement for domain transfer and OOD generalization ability. Similar to domain transfer scenario, compared with the original MoCo-v2 model pre-trained with 800 epochs, \\( t \\)-MoCo-v2 is better on all of the three datasets. More experiment details, including detailed comparisons, are in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.671, + 0.312, + 0.687 + ], + "angle": 0, + "content": "6 DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.703, + 0.827, + 0.926 + ], + "angle": 0, + "content": "This work proposes a novel perspective that interprets SSCL methods as a type of SNE methods, which facilitates both deeper theoretical understandings and methodological guidelines for practical improvement. More interpretations of SSCL from preserving the distance between distributions can be found in Appendix B. Our analysis has limitations and the insights from SNE are not universally applicable for all SSCL methods, e.g., Zbontar et al. (2021); Yang et al. (2021) don't fit in our framework. However, this work is an interesting addition to existing theoretical works of SSCL and more investigations can be made along this path. While there are various extensions of the classic SNE, in this work, as a proof of concept, we mainly showcased practical improvements from \\( t \\)-SNE. We expect more modifications can be developed by borrowing advances in the SNE literature, e.g., changing to \\( f \\)-divergences (Im et al., 2018) or consider optimal transport Bunne et al. (2019); Salmona et al. (2021); Mialon et al. (2020). On the other hand, standard SNE methods can also borrow existing techniques in SSCL to improve their performance on more complicated data, e.g., incorporating data augmentations instead of or on top of pre-defined distances. In this sense, by choosing feature dimension to be 2, various SSCL methods can also be used as data visualization tools (Böhm et al., 2022; Damrich et al., 2022). Specifically on CIFAR-10, standard \\( t \\)-SNE can barely reveal any clusters while our \\( t \\)-SimCLR with \\( d_z = 2 \\) produces much more separation among different labels. More details can be found in Appendix C.7." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.476, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.103, + 0.289, + 0.119 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.126, + 0.829, + 0.157 + ], + "angle": 0, + "content": "Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.164, + 0.828, + 0.194 + ], + "angle": 0, + "content": "Sanjeev Arora, Wei Hu, and Pravesh K Kothari. An analysis of the t-sne algorithm for data visualization. In Conference On Learning Theory, pp. 1455-1462. PMLR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.201, + 0.827, + 0.245 + ], + "angle": 0, + "content": "Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. arXiv preprint arXiv:1902.09229, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.254, + 0.826, + 0.297 + ], + "angle": 0, + "content": "Shahar Azulay, Edward Moroshko, Mor Shpigel Nacson, Blake E Woodworth, Nathan Srebro, Amir Globerson, and Daniel Soudry. On the implicit bias of initialization shape: Beyond infinitesimal mirror descent. In International Conference on Machine Learning, pp. 468-477. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.305, + 0.827, + 0.348 + ], + "angle": 0, + "content": "Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. In Advances in Neural Information Processing Systems, pp. 15535-15545, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.357, + 0.827, + 0.414 + ], + "angle": 0, + "content": "Haoyue Bai, Rui Sun, Lanqing Hong, Fengwei Zhou, Nanyang Ye, Han-Jia Ye, S-H Gary Chan, and Zhenguo Li. Decaug: Out-of-distribution generalization via decomposed feature representation and semantic augmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 6705-6713, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.422, + 0.826, + 0.453 + ], + "angle": 0, + "content": "Randall Balestriero and Yann LeCun. Contrastive and non-contrastive self-supervised learning recover global and local spectral embedding methods. arXiv preprint arXiv:2205.11508, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.461, + 0.825, + 0.491 + ], + "angle": 0, + "content": "Jan Niklas Böhm, Philipp Berens, and Dmitry Kobak. Unsupervised visualization of image datasets using contrastive learning. arXiv preprint arXiv:2210.09879, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.499, + 0.827, + 0.541 + ], + "angle": 0, + "content": "Charlotte Bunne, David Alvarez-Melis, Andreas Krause, and Stefanie Jegelka. Learning generative models across incomparable spaces. In International conference on machine learning, pp. 851-861. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.55, + 0.825, + 0.58 + ], + "angle": 0, + "content": "T Tony Cai and Rong Ma. Theoretical foundations of t-sne for visualizing high-dimensional clustered data. arXiv preprint arXiv:2105.07536, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.588, + 0.827, + 0.632 + ], + "angle": 0, + "content": "Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. In Advances in Neural Information Processing Systems, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.64, + 0.825, + 0.684 + ], + "angle": 0, + "content": "Kai Chen, Lanqing Hong, Hang Xu, Zhenguo Li, and Dit-Yan Yeung. Multisiam: Self-supervised multi-instance siamese representation learning for autonomous driving. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7546-7554, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.692, + 0.825, + 0.723 + ], + "angle": 0, + "content": "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. arXiv preprint arXiv:2002.05709, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.73, + 0.827, + 0.76 + ], + "angle": 0, + "content": "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750-15758, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.768, + 0.825, + 0.798 + ], + "angle": 0, + "content": "Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.806, + 0.825, + 0.836 + ], + "angle": 0, + "content": "Sebastian Damrich, Niklas Böhm, Fred A Hamprecht, and Dmitry Kobak. From \\( t \\)-sne to umap with contrastive learning. In International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.827, + 0.887 + ], + "angle": 0, + "content": "Qishi Dong, Awais Muhammad, Fengwei Zhou, Chuanlong Xie, Tianyang Hu, Yongxin Yang, Sung-Ho Bae, and Zhenguo Li. Zood: Exploiting model zoo for out-of-distribution generalization. arXiv preprint arXiv:2210.09236, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.896, + 0.829, + 0.925 + ], + "angle": 0, + "content": "T Erber and GM Hockney. Equilibrium configurations of n equal charges on a sphere. Journal of Physics A: Mathematical and General, 24(23):L1369, 1991." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.126, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.476, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Linus Ericsson, Henry Gouk, and Timothy M Hospedales. How well do self-supervised models transfer? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5414-5423, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.154, + 0.826, + 0.198 + ], + "angle": 0, + "content": "Chen Fang, Ye Xu, and Daniel N. Rockmore. Unbiased metric learning: On the utilization of multiple datasets and web images for softening bias. 2013 IEEE International Conference on Computer Vision, pp. 1657-1664, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.205, + 0.825, + 0.235 + ], + "angle": 0, + "content": "Hongchao Fang, Sicheng Wang, Meng Zhou, Jiayuan Ding, and Pengtao Xie. Cert: Contrastive self-supervised learning for language understanding. arXiv preprint arXiv:2005.12766, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.242, + 0.825, + 0.272 + ], + "angle": 0, + "content": "Tianyu Gao, Xingcheng Yao, and Danqi Chen. Simcse: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.279, + 0.825, + 0.31 + ], + "angle": 0, + "content": "John M Giorgi, Osvald Nitski, Gary D Bader, and Bo Wang. Declutr: Deep contrastive learning for unsupervised textual representations. arXiv preprint arXiv:2006.03659, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.316, + 0.825, + 0.373 + ], + "angle": 0, + "content": "Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.381, + 0.825, + 0.411 + ], + "angle": 0, + "content": "Ishaan Gulrajani and David Lopez-Paz. In search of lost domain generalization. In International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.419, + 0.825, + 0.448 + ], + "angle": 0, + "content": "Thomas Hamm and Ingo Steinwart. Adaptive learning rates for support vector machines working on data with low intrinsic dimension. The Annals of Statistics, 49(6):3153-3180, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.455, + 0.826, + 0.498 + ], + "angle": 0, + "content": "Jeff Z HaoChen, Colin Wei, Adrien Gaidon, and Tengyu Ma. Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.506, + 0.825, + 0.549 + ], + "angle": 0, + "content": "Jeff Z HaoChen, Colin Wei, Ananya Kumar, and Tengyu Ma. Beyond separability: Analyzing the linear transferability of contrastive representations to related subpopulations. arXiv preprint arXiv:2204.02683, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.557, + 0.826, + 0.601 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.608, + 0.826, + 0.652 + ], + "angle": 0, + "content": "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9729-9738, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.659, + 0.825, + 0.689 + ], + "angle": 0, + "content": "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.696, + 0.826, + 0.726 + ], + "angle": 0, + "content": "Yue He, Zheyan Shen, and Peng Cui. Towards non-iid image classification: A dataset and baselines. Pattern Recognition, pp. 107383, 2020b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.733, + 0.826, + 0.763 + ], + "angle": 0, + "content": "Geoffrey Hinton and Sam T Roweis. Stochastic neighbor embedding. In NIPS, volume 15, pp. 833-840. CiteSeer, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.77, + 0.826, + 0.8 + ], + "angle": 0, + "content": "Geoffrey E. Hinton, Simon Osindero, and Yee Whye Teh. A fast learning algorithm for deep belief nets. Neural Computation, 18:1527-1554, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.807, + 0.826, + 0.851 + ], + "angle": 0, + "content": "R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.858, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Weiran Huang, Mingyang Yi, and Xuyang Zhao. Towards the generalization of contrastive self-supervised learning. arXiv preprint arXiv:2111.00743, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Daniel Jiwoong Im, Nakul Verma, and Kristin Branson. Stochastic neighbor embedding under f-divergences. arXiv preprint arXiv:1811.01247, 2018." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.476, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Wenlong Ji, Zhun Deng, Ryumei Nakada, James Zou, and Linjun Zhang. The power of contrast for feature learning: A theoretical analysis. arXiv preprint arXiv:2110.02473, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.826, + 0.174 + ], + "angle": 0, + "content": "Li Jing, Pascal Vincent, Yann LeCun, and Yuandong Tian. Understanding dimensional collapse in contrastive self-supervised learning. arXiv preprint arXiv:2110.09348, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.182, + 0.827, + 0.199 + ], + "angle": 0, + "content": "Alex Krizhevsky. Learning multiple layers of features from tiny images. University of Toronto, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.207, + 0.825, + 0.252 + ], + "angle": 0, + "content": "David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In International Conference on Machine Learning, pp. 5815-5826. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.261, + 0.827, + 0.293 + ], + "angle": 0, + "content": "Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy M. Hospedales. Deeper, broader and artier domain generalization. 2017 IEEE International Conference on Computer Vision (ICCV), pp. 5543-5551, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.3, + 0.827, + 0.332 + ], + "angle": 0, + "content": "Yunfan Li, Peng Hu, Zitao Liu, Dezhong Peng, Joey Tianyi Zhou, and Xi Peng. Contrastive clustering. In 2021 AAAI Conference on Artificial Intelligence (AAAI), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.339, + 0.825, + 0.371 + ], + "angle": 0, + "content": "Zengyi Li, Yubei Chen, Yann LeCun, and Friedrich T. Sommer. Neural manifold clustering and embedding. ArXiv, abs/2201.10000, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.379, + 0.825, + 0.41 + ], + "angle": 0, + "content": "George C Linderman and Stefan Steinerberger. Clustering with t-sne, provably. SIAM Journal on Mathematics of Data Science, 1(2):313-332, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.418, + 0.827, + 0.463 + ], + "angle": 0, + "content": "Zhili Liu, Jianhua Han, Kai Chen, Lanqing Hong, Hang Xu, Chunjing Xu, and Zhenguo Li. Task-customized self-supervised pre-training with scalable dynamic routing. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 1854-1862, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.471, + 0.827, + 0.503 + ], + "angle": 0, + "content": "Yao Lu, Jukka Corander, and Zhirong Yang. Doubly stochastic neighbor embedding on spheres. Pattern Recognition Letters, 128:100-106, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.511, + 0.825, + 0.542 + ], + "angle": 0, + "content": "Jiajun Ma, Tianyang Hu, and Wenjia Wang. Deciphering the projection head: Representation evaluation self-supervised learning. arXiv preprint arXiv:2301.12189, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.55, + 0.827, + 0.594 + ], + "angle": 0, + "content": "JBM Melisseneny. How different can colours be? maximum separation of points on a spherical octant. Proceedings of the Royal Society of London. Series A: Mathematical, Physical and Engineering Sciences, 454(1973):1499-1508, 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.603, + 0.827, + 0.635 + ], + "angle": 0, + "content": "Facundo Memoli. Gromov-wasserstein distances and the metric approach to object matching. Foundations of computational mathematics, 11(4):417-487, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.643, + 0.827, + 0.687 + ], + "angle": 0, + "content": "Grégoire Mialon, Dexiong Chen, Alexandre d'Aspremont, and Julien Mairal. A trainable optimal transport embedding for feature aggregation. In International Conference on Learning Representations (ICLR), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.696, + 0.825, + 0.74 + ], + "angle": 0, + "content": "Edward Moroshko, Blake E Woodworth, Suriya Gunasekar, Jason D Lee, Nati Srebro, and Daniel Soudry. Implicit bias in deep linear classification: Initialization scale vs training accuracy. Advances in neural information processing systems, 33:22182-22193, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.748, + 0.825, + 0.78 + ], + "angle": 0, + "content": "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.788, + 0.827, + 0.845 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.855, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Noam Razin and Nadav Cohen. Implicit regularization in deep learning may not be explainable by norms. Advances in neural information processing systems, 33:21174-21187, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.895, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Antoine Salmona, Julie Delon, and Agnès Desolneux. Gromov-wasserstein distances between gaussian distributions. arXiv preprint arXiv:2104.07970, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.476, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Nikunj Saunshi, Jordan Ash, Surbhi Goel, Dipendra Misra, Cyril Zhang, Sanjeev Arora, Sham Kakade, and Akshay Krishnamurthy. Understanding contrastive learning requires incorporating inductive biases. arXiv preprint arXiv:2202.14037, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Pieter Merkus Lambertus Tammes. On the origin of number and arrangement of the places of exit on the surface of pollen-grains. Recueil des travaux botaniques nederlandais, 27(1):1-84, 1930." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.194, + 0.825, + 0.224 + ], + "angle": 0, + "content": "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. arXiv preprint arXiv:1906.05849, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.233, + 0.824, + 0.264 + ], + "angle": 0, + "content": "Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? arXiv preprint arXiv:2005.10243, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.272, + 0.825, + 0.303 + ], + "angle": 0, + "content": "Christopher Tosh, Akshay Krishnamurthy, and Daniel Hsu. Contrastive learning, multi-view redundancy, and linear models. arXiv preprint arXiv:2008.10150, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.31, + 0.825, + 0.341 + ], + "angle": 0, + "content": "Laurens Van Der Maaten. Learning a parametric embedding by preserving local structure. In Artificial intelligence and statistics, pp. 384-391. PMLR, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.348, + 0.825, + 0.379 + ], + "angle": 0, + "content": "Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.387, + 0.825, + 0.431 + ], + "angle": 0, + "content": "Hemanth Venkateswara, Jose Eusebio, Shayok Chakraborty, and Sethuraman Panchanathan. Deep hashing network for unsupervised domain adaptation. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5385-5394, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.44, + 0.827, + 0.471 + ], + "angle": 0, + "content": "Vikas Verma, Thang Luong, Kenji Kawaguchi, Hieu Pham, and Quoc Le. Towards domain-agnostic contrastive learning. In International Conference on Machine Learning, pp. 10530–10541. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.478, + 0.827, + 0.509 + ], + "angle": 0, + "content": "Haonan Wang, Jieyu Zhang, Qi Zhu, and Wei Huang. Augmentation-free graph contrastive learning. arXiv preprint arXiv:2204.04874, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.517, + 0.827, + 0.561 + ], + "angle": 0, + "content": "Tongzhou Wang and Phillip Isola. Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In International Conference on Machine Learning, pp. 9929-9939. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.57, + 0.825, + 0.6 + ], + "angle": 0, + "content": "Colin Wei, Kendrick Shen, Yining Chen, and Tengyu Ma. Theoretical analysis of self-training with deep networks on unlabeled data. arXiv preprint arXiv:2010.03622, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.608, + 0.825, + 0.639 + ], + "angle": 0, + "content": "Zixin Wen and Yuanzhi Li. Toward understanding the feature learning process of self-supervised contrastive learning. arXiv preprint arXiv:2105.15134, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.647, + 0.825, + 0.69 + ], + "angle": 0, + "content": "Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3733-3742, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.699, + 0.825, + 0.73 + ], + "angle": 0, + "content": "Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian Khabsa, Fei Sun, and Hao Ma. Clear: Contrastive learning for sentence representation. arXiv preprint arXiv:2012.15466, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.738, + 0.825, + 0.781 + ], + "angle": 0, + "content": "Yuanmeng Yan, Rumei Li, Sirui Wang, Fuzheng Zhang, Wei Wu, and Weiran Xu. Consert: A contrastive framework for self-supervised sentence representation transfer. arXiv preprint arXiv:2105.11741, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.79, + 0.825, + 0.834 + ], + "angle": 0, + "content": "Ceyuan Yang, Zhirong Wu, Bolei Zhou, and Stephen Lin. Instance localization for self-supervised detection pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3987-3996, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.825, + 0.886 + ], + "angle": 0, + "content": "Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. Filip: Fine-grained interactive language-image pre-training. arXiv preprint arXiv:2111.07783, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Chun-Hsiao Yeh, Cheng-Yao Hong, Yen-Chi Hsu, Tyng-Luh Liu, Yubei Chen, and Yann LeCun. Decoupled contrastive learning. arXiv preprint arXiv:2110.06848, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.476, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. arXiv preprint arXiv:2103.03230, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.829, + 0.185 + ], + "angle": 0, + "content": "Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning (still) requires rethinking generalization. Communications of the ACM, 64(3):107-115, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.193, + 0.825, + 0.224 + ], + "angle": 0, + "content": "Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. arXiv preprint arXiv:1710.09412, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.231, + 0.825, + 0.274 + ], + "angle": 0, + "content": "Xuyang Zhao, Tianqi Du, Yisen Wang, Jun Yao, and Weiran Huang. Arcl: Enhancing contrastive learning with augmentation-robust representations. In International Conference on Learning Representations (ICLR), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.282, + 0.829, + 0.313 + ], + "angle": 0, + "content": "Roland S Zimmermann, Yash Sharma, Steffen Schneider, Matthias Bethge, and Wieland Brendel. Contrastive learning inverts the data generating process. arXiv preprint arXiv:2102.08850, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.337, + 0.1, + 0.592, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.293, + 0.825, + 0.322 + ], + "angle": 0, + "content": "Figure A.3: Cosine similarity heat map of learned features from SimCLR on CIFAR-10 dataset. The darker the color, the larger the similarity." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.347, + 0.391, + 0.362 + ], + "angle": 0, + "content": "A TECHNICAL DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.379, + 0.513, + 0.393 + ], + "angle": 0, + "content": "A.1 IMPLICITBIASOFSIMCLRONCIFAR-10." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.405, + 0.827, + 0.464 + ], + "angle": 0, + "content": "Figure A.3 plots the cosine similarity heat map of learned features from SimCLR on CIFAR-10 dataset. To calculate the similarity of class A (figures denoted by \\( a_i \\)) to class B (figures denoted by \\( b_i \\)), we first calculate the mean of \\( b_i \\) as \\( \\bar{b} \\). Then, we sum up \\( \\sum_{i} \\sin(a_i, \\bar{b}) \\) and plot is with colors. Hence, the similarity matrix shown in Figure A.3 is not symmetric." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.48, + 0.414, + 0.493 + ], + "angle": 0, + "content": "A.2 PROOF OF PROPOSITION 3.2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.506, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Recall the domain-agnostic data augmentation process. For any \\( \\boldsymbol{x}_i \\), the probability density of having \\( t \\in \\mathbb{R}^d \\) as its augmented point can be characterized as" + }, + { + "type": "equation", + "bbox": [ + 0.3, + 0.543, + 0.697, + 0.561 + ], + "angle": 0, + "content": "\\[\nP _ {\\boldsymbol {t} | \\boldsymbol {x} _ {i}} = \\mathbb {P} (\\boldsymbol {x} _ {i} \\text {a n d} \\boldsymbol {x} _ {i} ^ {\\prime} = \\boldsymbol {t} \\text {f o r a p o s i t i v e p a i r} | \\boldsymbol {x} _ {i}) = \\phi (\\boldsymbol {t} - \\boldsymbol {x} _ {i}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.569, + 0.825, + 0.6 + ], + "angle": 0, + "content": "For isotropic Gaussian densities with mean 0 and covariance matrix \\(\\sigma^2\\mathbf{I}\\), \\(\\phi (\\pmb {t} - \\pmb {x}_i)\\propto \\exp (-\\| \\pmb {t} - \\pmb {x}_i\\| _2^2 /2\\sigma^2)\\), which is monotonic with the \\(l_{2}\\) distance between \\(\\pmb{t}\\) and \\(\\pmb{x}_i\\)" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.616, + 0.406, + 0.632 + ], + "angle": 0, + "content": "A.3 INVESTIGATIONS ON \\(C(f)\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.825, + 0.672 + ], + "angle": 0, + "content": "Figures A.4 and A.5 illustrate the evolution of different complexity measurements during the training process under the Gaussian mixture setting and the CIFAR-10 respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.678, + 0.827, + 0.707 + ], + "angle": 0, + "content": "In the Gaussian mixture setting, the feature extractor is a fully connected ReLU network. Besides \\( C(f) \\), we also evaluate the popular sum of squared weights. The observations on SimCLR are listed as below:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.718, + 0.825, + 0.761 + ], + "angle": 0, + "content": "- The expected Lipschitz constant \\( C(f) \\) is small in initialization. It first increases (till around 100 iterations) and then consistently decreases. This empirically supports the implicit bias towards minimizing \\( C(f) \\)." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.766, + 0.652, + 0.781 + ], + "angle": 0, + "content": "- \\( C(f) \\) and the sum of squared weights share very similar patterns." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.786, + 0.825, + 0.814 + ], + "angle": 0, + "content": "- The SNE loss is non-increasing, as if we are doing stochastic neighbor embedding using \\( l_{2} \\)-distance." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.718, + 0.825, + 0.814 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.925 + ], + "angle": 0, + "content": "In the CIFAR-10 case, the feature extractor is ResNet-18 plus a fully-connected projection layer. The output from ResNet-18 is usually called representation (512 dimensional) and is utilized for downstream tasks while the projection (128 dimension) is used for training. Such a representation-projection set up is common in SSCL. Ma et al. (2023) aimed to decipher the projection head and revealed that the projection feature tends to be more uniformly distributed while the representation feature exhibits stronger alignment. Besides \\( C(f) \\), we also evaluate the \\( l_{2} \\)-norm of the representation. The observations for SimCLR and \\( t \\)-SimCLR on CIFAR-10 are summarized as below:" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.125, + 0.685, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.332, + 0.828, + 0.404 + ], + "angle": 0, + "content": "Figure A.4: Empirical evaluation on the complexity of the learned feature mapping during training under the Gaussian mixture setting. Two complexity measurements are considered, i.e., \\( C(f) \\) as in (3.3) and the SNE loss as in (2.2). The SNE loss here only serves as in indicator for how well the pairwise distances are preserved. The training objective is the standard InfoNCE loss. The SNE loss decreases quickly until in the first 100 iterations and then stays flat." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.425, + 0.825, + 0.467 + ], + "angle": 0, + "content": "- \\( C(f) \\) for the projection layer shares similar patterns as in the Gaussian mixture case, first increase and then decreases. However, \\( C(f) \\) for the representation layer monotonically decreases." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.471, + 0.825, + 0.499 + ], + "angle": 0, + "content": "- \\( C(f) \\) for the projection layer and the \\( l_{2} \\)-norm in the representation layer share almost identical patterns." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.503, + 0.825, + 0.531 + ], + "angle": 0, + "content": "- Comparing SimCLR, both the calculated \\( C(f) \\) and \\( l_{2} \\)-norm are much smaller for \\( t \\)-SimCLR." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.425, + 0.825, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.828, + 0.585 + ], + "angle": 0, + "content": "In conclusion, on one hand, our empirical results demonstrate that the complexity of the feature extractor \\( C(f) \\) does decrease during training and seem to be implicitly minimized. On the other hand, its trend is shared with other more popularly used complexity measurements." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.6, + 0.407, + 0.615 + ], + "angle": 0, + "content": "A.4 PROOF OF COROLLARY 3.6" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.827, + 0.681 + ], + "angle": 0, + "content": "In this section, we illustrate with rigor how the hypothesized implicit bias can give rise to structure-preserving property of SSCL. Corollary 3.6 states that minimizing the (Lipschitz) complexity of the feature mapping will also result in the best match between \\( P \\) and \\( Q \\) (under permutation). To provide more theoretical insight, we present the following lemma in the simpler vector-matching case." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.825, + 0.727 + ], + "angle": 0, + "content": "Lemma A.1. Let \\( 0 < x_{1} < \\dots < x_{m} \\) and \\( 0 < y_{1} < \\dots < y_{m} \\) be two real-valued sequences, normalized such that \\( \\sum_{i=1}^{m} x_{i}^{2} = \\sum_{i=1}^{m} y_{i}^{2} = 1 \\). Consider a permutation \\( \\pi \\) of \\( \\{1, \\dots, m\\} \\) and denote all such permutations as \\( T \\). Then" + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.731, + 0.666, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\underset {\\pi \\in T} {\\operatorname {a r g m i n}} \\sum_ {i = 1} ^ {m} \\frac {y _ {\\pi (i)}}{x _ {i}} = \\underset {\\pi \\in T} {\\operatorname {a r g m i n}} \\sum_ {i = 1} ^ {m} \\left(x _ {i} - y _ {\\pi (i)}\\right) ^ {2} := \\pi^ {*},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.774, + 0.394, + 0.789 + ], + "angle": 0, + "content": "where \\(\\pi^{*}(i) = i\\) for all \\(i = 1,\\dots ,m\\)" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.804, + 0.494, + 0.819 + ], + "angle": 0, + "content": "Proof. By the rearrangement inequality, we have" + }, + { + "type": "equation", + "bbox": [ + 0.434, + 0.822, + 0.564, + 0.861 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {m} \\frac {y _ {\\pi (i)}}{x _ {i}} \\geq \\sum_ {i = 1} ^ {m} \\frac {y _ {i}}{x _ {i}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.865, + 0.241, + 0.879 + ], + "angle": 0, + "content": "Similarly," + }, + { + "type": "equation", + "bbox": [ + 0.283, + 0.883, + 0.713, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\sum_ {i = 1} ^ {m} \\left(x _ {i} - y _ {\\pi (i)}\\right) ^ {2} = \\sum_ {i = 1} ^ {m} x _ {i} ^ {2} + \\sum_ {i = 1} ^ {m} y _ {i} ^ {2} - 2 \\sum_ {i = 1} ^ {m} x _ {i} \\cdot y _ {\\pi (i)} \\geq 2 - 2 \\sum_ {i = 1} ^ {m} x _ {i} \\cdot y _ {i}.\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.123, + 0.737, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.418, + 0.294, + 0.581, + 0.308 + ], + "angle": 0, + "content": "(a) SimCLR on CIFAR-10." + }, + { + "type": "image", + "bbox": [ + 0.279, + 0.339, + 0.734, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.511, + 0.584, + 0.525 + ], + "angle": 0, + "content": "(b) \\(t\\)-SimCLR on CIFAR-10." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.541, + 0.828, + 0.613 + ], + "angle": 0, + "content": "Figure A.5: Empirical evaluation on the complexity of the learned feature mapping during training on CIFAR-10. Two complexity measurements are considered, i.e., \\( C(f) \\) as in (3.3) and \\( l_{2} \\)-norm. Specifically, we calculate the expected Lipschitz constant on both the representation layer (512-dimensional) and the projection layer (128-dimensional). Figure (a) and (b) show the trends (along the 200 training epochs) for SimCLR and \\( t \\)-SimCLR respectively." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.639, + 0.825, + 0.651 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.672, + 0.825, + 0.716 + ], + "angle": 0, + "content": "Lemma A.1 gives a vector-version illustration of our Corollary 3.6, stating that minimizing the expected derivative (to zero) of the mapping function \\( f \\), i.e., \\( \\sum_{i}f(x_{i}) / x_{1} \\) leads to preserving the norm difference of the input vector and output vector." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.721, + 0.457, + 0.737 + ], + "angle": 0, + "content": "Next, we provide the proof of Theorem 3.5." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.755, + 0.531, + 0.771 + ], + "angle": 0, + "content": "Proof of Theorem 3.5. Straightforwardly, we can write" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.78, + 0.672, + 0.9 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\bar {P} - Q ^ {\\pi} \\right\\| _ {F} = \\sum_ {i \\neq j} \\left(\\frac {1}{p _ {i j}} + q _ {\\pi (i) \\pi (j)}\\right) ^ {2} \\\\ = \\sum_ {i \\neq j} \\frac {1}{p _ {i j} ^ {2}} + \\sum_ {i \\neq j} q _ {\\pi (i) \\pi (j)} ^ {2} + 2 \\sum_ {i \\neq j} \\frac {q _ {\\pi (i) \\pi (j)}}{p _ {i j}} \\\\ = 2 C _ {1} (P, Q ^ {\\pi}) + \\sum_ {i \\neq j} \\frac {1}{p _ {i j} ^ {2}} + \\sum_ {i \\neq j} q _ {i j} ^ {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.909, + 0.548, + 0.926 + ], + "angle": 0, + "content": "Thus, minimizing \\( C_1(P, Q^\\pi) \\) also minimizes \\( \\| \\bar{P} - Q^\\pi \\|_F \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Theorem 3.5 is a straightforward generalization of Lemma A.1. Next, we provide proof for Corollary 3.6, restated below." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.158, + 0.812, + 0.177 + ], + "angle": 0, + "content": "Proof of Corollary 3.6. Recall the SimCLR loss \\( L_{\\mathrm{InfoNCE}} = \\frac{1}{2n}\\sum_{i = 1}^{n}(l(\\pmb{x}_i,\\pmb{x}_i')) + l(\\pmb{x}_i',\\pmb{x}_i)) \\), where" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.186, + 0.7, + 0.224 + ], + "angle": 0, + "content": "\\[\nl (\\pmb {x} _ {i}, \\pmb {x} _ {i} ^ {\\prime}) = - \\log \\frac {\\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}) , f (\\pmb {x} _ {i} ^ {\\prime})) / \\tau)}{\\sum_ {x \\in \\mathcal {D} _ {n} \\cup \\mathcal {D} _ {n} ^ {\\prime} \\setminus \\{\\pmb {x} _ {i} \\}} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}) , f (\\pmb {x})) / \\tau)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.233, + 0.827, + 0.293 + ], + "angle": 0, + "content": "Without loss of generality, let \\(\\tau = 1\\). Notice that \\(l(\\pmb{x}_i, \\pmb{x}_i')\\) is monotonically decreasing as \\(\\mathrm{sim}(f(\\pmb{x}_i), f(\\pmb{x}_i'))\\) increases, due to the monotonicity of function \\(\\frac{x}{x + c}\\) with respect to \\(x > 0\\) for any \\(c > 0\\). Hence, in order for \\(L_{\\mathrm{InfoNCE}}\\) to be minimized, perfect alignment is required, i.e., \\(f(\\pmb{x}_i) = f(\\pmb{x}_i')\\) for any \\(i = 1, \\dots, n\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.298, + 0.825, + 0.329 + ], + "angle": 0, + "content": "With perfect alignment achieved, \\( L_{\\mathrm{InfoNCE}} \\) only concerns the pairwise similarity between negative samples \\( f(\\pmb{x}_i) \\)'s, which can be simplified as \\( L_{\\mathrm{InfoNCE}} \\geq L_{\\mathrm{uniform}} \\) where" + }, + { + "type": "equation", + "bbox": [ + 0.293, + 0.338, + 0.7, + 0.48 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L _ {\\text {u n i f o r m}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} - \\log \\frac {e}{e + \\sum_ {j \\neq i} \\exp (\\sin (f (\\boldsymbol {x} _ {i}) , f (\\boldsymbol {x} _ {j})))} \\\\ \\geq \\log \\left(\\frac {1}{n} \\sum_ {i = 1} ^ {n} \\left(1 + \\frac {1}{e} \\sum_ {j \\neq i} \\exp (\\sin (f (\\boldsymbol {x} _ {i}), f (\\boldsymbol {x} _ {j})))\\right)\\right) \\\\ \\geq \\log \\left(1 + \\frac {1}{n \\cdot e} \\sum_ {1 \\leq i \\neq j \\leq n} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}), f (\\pmb {x} _ {j})))\\right). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.496, + 0.825, + 0.525 + ], + "angle": 0, + "content": "\\(L_{\\mathrm{uniform}}\\) can be minimized by mapping \\(\\pmb{x}_i\\)'s as distant as possible, hence the connection to Tammas problem and the uniformity principle." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.531, + 0.826, + 0.575 + ], + "angle": 0, + "content": "With sufficient capacity of the feature mapping \\( f \\), the SimCLR loss can be minimized to its (empirical) global minima. However, such \\( f \\) is not unique since \\( L_{\\mathrm{InfoNCE}} \\) is invariant to permutations of mapping relationships from \\( x_i \\) to \\( f(x_i) \\). If \\( f_n^* \\) further minimizes \\( C(f) \\) on the sample level, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.584, + 0.69, + 0.623 + ], + "angle": 0, + "content": "\\[\nf_{n}^{*}:= \\operatorname *{argmin}_{f}C_{n}(f) = \\operatorname *{argmin}_{f}\\sum_{1\\leq i\\neq j\\leq n}\\frac{\\|f(\\boldsymbol{x}_{i}) - f(\\boldsymbol{x}_{j})\\|_{2}}{\\|\\boldsymbol{x}_{i} - \\boldsymbol{x}_{j}\\|_{2}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.633, + 0.827, + 0.678 + ], + "angle": 0, + "content": "Then, \\( f_{n}^{*} \\) also solves a type of SNE problem with uniformity constraint (3.4) as stated in Theorem 3.5. To see this, if we define \\( q_{ij} = -\\|f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2 \\) and \\( p_{ij} = -\\|x_i - x_j\\|_2 \\), which is reasonable since the larger the distance, the smaller the similarity, we can directly apply the results in Theorem 3.5." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.683, + 0.825, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.715, + 0.827, + 0.815 + ], + "angle": 0, + "content": "Remark A.2. As can be seen from Theorem 3.5 and the proof of Corollary 3.6, we showcase the relationship between minimizing \\( C(f) \\) and structure preserving property by considering a special SNE problem, where the pairwise similarity is not modeled by Gaussian as standard, hence the word \"resembling\" in Corollary 3.6. Although \\( q_{ij} = -\\| f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2 \\) is unorthodox, it is reasonable since the larger the distance, the smaller the similarity. If we consider the SNE method as in Hinton et al. (2006), our proof does not go through directly and demands more complicated analysis. However, our results are still valid in connecting the complexity of the feature map to the pairwise similarity matching." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Our statement in Corollary 3.6 requires perfect alignment or perfect uniformity. When the assumptions are not perfectly met, we can still obtain insights for the resulting feature mapping. Alignment and uniformity (Wang & Isola, 2020) is not the whole story of contrastive learning, and our identified structure-preserving property implicitly induced by complexity minimization provides an other angle of the learning process. From this perspective, contrastive learning can be thought of as a combination of alignment and SNE with uniformity constraint. In Figure A.3, while obtaining approximate alignment and uniformity, the feature mapping also preserves the relative relationships of the clusters (labels)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.533, + 0.119 + ], + "angle": 0, + "content": "A.5 ALIGNMENT AND UNIFORMITY OF T-SIMCLR" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.16 + ], + "angle": 0, + "content": "Due to the change of training objective, we may want to reevaluate the properties of the learned feature from \\( t \\)-SimCLR. We will show that alignment still hold while uniformity is changed (to infinity)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.166, + 0.827, + 0.264 + ], + "angle": 0, + "content": "Let us consider a compact region \\(\\Omega \\subset \\mathbb{R}^d\\) and \\(\\pmb{x}_i \\in \\Omega\\). Let \\(t\\) be the transformation such that the augmented data point \\(\\pmb{x}_i' = t(\\pmb{x}_i)\\) is still in \\(\\Omega\\). Wang & Isola (2020) showed that the contrastive loss can be decomposed into the alignment loss and the uniformity loss. Zimmermann et al. (2021) further showed that the contrastive loss converges to the cross-entropy between latent distributions, where the underlying latent space is assumed to be uniform, and the positive pairs are specified to be an exponential distribution. In this section, we show a parallel result, which states that in the population level, the \\(t\\)-SNE loss is the cross-entropy between two distributions of generating positive pairs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.266, + 0.827, + 0.295 + ], + "angle": 0, + "content": "Theorem A.3. Let \\( H(\\cdot, \\cdot) \\) be the cross entropy between distributions. Let \\( p(x) \\) be the density of \\( x \\), \\( p(\\cdot | x) \\) be the conditional density of generating a positive pair, and define" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.297, + 0.78, + 0.331 + ], + "angle": 0, + "content": "\\[\nq _ {f} \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) = C _ {f} (\\boldsymbol {x}) ^ {- 1} \\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}}, \\text {w i t h} C _ {f} (\\boldsymbol {x}) = \\int_ {\\Omega} \\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} ^ {\\prime}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.332, + 0.271, + 0.345 + ], + "angle": 0, + "content": "Then, we have" + }, + { + "type": "equation", + "bbox": [ + 0.344, + 0.349, + 0.825, + 0.366 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} (H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) = L _ {a} (f) + L _ {u} (f), \\tag {A.1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.594, + 0.383 + ], + "angle": 0, + "content": "which corresponds to the population-level \\( t \\)-SimCLR loss where" + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.385, + 0.667, + 0.402 + ], + "angle": 0, + "content": "\\[\nL _ {a} = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\mathbb {E} _ {\\boldsymbol {x} \\sim p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right)} \\log \\left(1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}\\right),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.332, + 0.404, + 0.662, + 0.423 + ], + "angle": 0, + "content": "\\[\nL _ {u} = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\log \\mathbb {E} _ {\\widetilde {\\boldsymbol {x}} \\sim p (\\widetilde {\\boldsymbol {x}})} \\big (1 + \\| f (\\boldsymbol {x}) - f (\\widetilde {\\boldsymbol {x}}) \\| _ {2} ^ {2} \\big) ^ {- 1}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.436, + 0.287, + 0.45 + ], + "angle": 0, + "content": "Proof. Note that" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.453, + 0.895, + 0.575 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = - \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} + \\log C _ {f} (\\boldsymbol {x}) \\\\ = \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log (1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} - \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log (p (\\boldsymbol {x} ^ {\\prime})) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} + \\log \\int_ {\\Omega} \\frac {p (\\boldsymbol {x} ^ {\\prime})}{1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}} \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\\\ = \\int_ {\\Omega} p (\\pmb {x} ^ {\\prime} | \\pmb {x}) \\log (1 + \\| f (\\pmb {x}) - f (\\pmb {x} ^ {\\prime}) \\| _ {2} ^ {2}) \\mathrm {d} \\pmb {x} ^ {\\prime} - \\int_ {\\Omega} p (\\pmb {x} ^ {\\prime} | \\pmb {x}) \\log (p (\\pmb {x} ^ {\\prime})) \\mathrm {d} \\pmb {x} ^ {\\prime} + \\log \\mathbb {E} _ {\\pmb {x} ^ {\\prime} \\sim p (\\pmb {x} ^ {\\prime})} (1 + \\| f (\\pmb {x}) - f (\\pmb {x} ^ {\\prime}) \\| _ {2} ^ {2}) ^ {- 1}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.576, + 0.465, + 0.59 + ], + "angle": 0, + "content": "Taking expectation with respect to \\( x \\) leads to" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.593, + 0.806, + 0.684 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\mathbb {E} _ {\\boldsymbol {x} ^ {\\prime} \\sim p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x})} \\log (1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}) + \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\log \\mathbb {E} _ {\\widetilde {\\boldsymbol {x}} \\sim p (\\widetilde {\\boldsymbol {x}})} (1 + \\| f (\\boldsymbol {x}) - f (\\widetilde {\\boldsymbol {x}}) \\| _ {2} ^ {2}) ^ {- 1} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ = L _ {a} (f) + L _ {u} (f) - C _ {p}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.686, + 0.217, + 0.698 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.25, + 0.699, + 0.748, + 0.732 + ], + "angle": 0, + "content": "\\[\nC _ {p} = \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\mathrm {d} \\boldsymbol {x} = \\int_ {\\Omega} \\int_ {\\Omega} p \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\mathrm {d} \\boldsymbol {x}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.733, + 0.317, + 0.748 + ], + "angle": 0, + "content": "does not depend on \\( f \\)." + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.751, + 0.697, + 0.909 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = \\int_ {\\Omega} p (\\boldsymbol {x}) \\frac {1}{p (\\boldsymbol {x})} \\int_ {\\Omega} p (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} \\frac {p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} d \\boldsymbol {x} ^ {\\prime} \\\\ = \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} \\frac {p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} d \\boldsymbol {x} ^ {\\prime}. \\\\ \\end{array}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.911, + 0.826, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.178 + ], + "angle": 0, + "content": "In Theorem A.3, \\( L_{a} \\) is the alignment loss and \\( L_{u} \\) is the uniformity loss. The decomposition is much more natural for \\( t \\)-SimCLR as opposed to that in \\( L_{\\mathrm{InfoNCE}} \\), mainly due to the change from conditional to joint distribution when modeling the pairwise similarity. Furthermore, if the \\( t \\)-SimCLR loss is minimized, we must have \\( p(\\cdot | \\boldsymbol{x}) = q_{f}(\\cdot | \\boldsymbol{x}) \\), provided \\( f \\) has sufficient capacity. Note that if \\( p(\\cdot | \\boldsymbol{x}) = q_{f}(\\cdot | \\boldsymbol{x}) \\), then \\( P_{j|i} \\) and \\( Q_{j|i} \\) are perfectly matched, which indicates that we obtain a perfect neighbor embedding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.181, + 0.604, + 0.197 + ], + "angle": 0, + "content": "Theorem A.3 implies that the optimal feature mapping \\( f^{*} \\) satisfies" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.2, + 0.56, + 0.216 + ], + "angle": 0, + "content": "\\[\np (\\cdot | \\boldsymbol {x}) = q _ {f ^ {*}} (\\cdot | \\boldsymbol {x}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.219, + 0.438, + 0.234 + ], + "angle": 0, + "content": "which further implies that for any \\( \\pmb{x} \\in \\Omega \\)" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.238, + 0.825, + 0.306 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} C _ {f ^ {*}} (\\boldsymbol {x}) ^ {- 1} \\frac {p (\\boldsymbol {x} ^ {\\prime})}{1 + \\| f ^ {*} (\\boldsymbol {x}) - f ^ {*} (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}} \\propto C (\\boldsymbol {x}) ^ {- 1} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\\\ \\Leftrightarrow C _ {f ^ {*}} \\left(\\boldsymbol {x}\\right) ^ {- 1} \\frac {1}{1 + \\left\\| f ^ {*} (\\boldsymbol {x}) - f ^ {*} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} \\propto C (\\boldsymbol {x}) ^ {- 1} \\frac {p \\left(\\boldsymbol {x} , \\boldsymbol {x} ^ {\\prime}\\right)}{p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}, \\tag {A.2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.825, + 0.395 + ], + "angle": 0, + "content": "where \\( C(\\pmb{x}) = \\int p(\\pmb{x}'|\\pmb{x})\\mathrm{d}\\pmb{x}' \\). Unlike the usual normalized SimCLR, \\( t \\)-SNE does not assume any special structure on \\( f \\) (e.g., \\( \\| f\\| _2 = 1 \\)), thus \\( f \\) can go to infinity. Comparing to the finite sample \\( t \\)-SimCLR loss, the population version is trickier to analyze. This is because for a given point \\( \\pmb{x}' \\), it can be an augmented sample of some \\( \\pmb{x} \\) (with probability \\( p(\\pmb{x}'|\\pmb{x}) \\)), or a negative sample of \\( \\pmb{x} \\) (when we treat \\( \\pmb{x}' \\) as another sample point). This reflects the essential difficulty between population and finite samples in contrastive learning, not only for \\( t \\)-SimCLR." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.4, + 0.827, + 0.513 + ], + "angle": 0, + "content": "For clustered data, (A.2) provides two important messages, provided that the augmentation is not too extreme and the augmented sample \\( \\pmb{x}^{\\prime} \\) stays in the same cluster as the original \\( \\pmb{x} \\). On one hand, when \\( \\pmb{x}_1 \\) and \\( \\pmb{x}_2 \\) belongs to different clusters, the joint density \\( p(\\pmb{x} = \\pmb{x}_1, \\pmb{x}^{\\prime} = \\pmb{x}_2) \\) will be very small, close to zero, which indicates that \\( \\| f^{*}(\\pmb{x}_{1}) - f^{*}(\\pmb{x}_{2}) \\|_{2} \\) is very large, tending to infinity. On the other hand, for \\( \\pmb{x}_1 \\) and \\( \\pmb{x}_2 \\) belonging to the same cluster, \\( p(\\pmb{x} = \\pmb{x}_1, \\pmb{x}^{\\prime} = \\pmb{x}_2) \\) will be relatively large. Hence, the features of the same cluster will stay close. Overall, we will observe similar clustered structure in the feature space. This is confirmed in the Gaussian mixture setting in Figure 1(c), in which case, the problem can be oversimplified as mapping 5 points in \\( \\mathbb{R}^2 \\) to the unit-circle." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.531, + 0.658, + 0.547 + ], + "angle": 0, + "content": "B CONNECTION TO DISTANCE BETWEEN DISTRIBUTIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.562, + 0.827, + 0.646 + ], + "angle": 0, + "content": "Through the lens of stochastic neighbor embedding, the feature learning process of SSCL methods can be seen as minimizing certain \"distances\" between distributions in different dimensions. Ideally, the feature should preserve the distributional information about the data. Since the data and the feature do not lie in the same metric space, quantitatively measuring their distributional distance is difficult. Fortunately, there are existing tools we can utilize, specifically, Gromov-Wasserstein distance (Mémoli, 2011; Salmona et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.827, + 0.694 + ], + "angle": 0, + "content": "Let \\(\\mathcal{X}\\), \\(\\mathcal{Z}\\) be two Polish spaces, each endowed respectively with probability measures \\(p_x\\) and \\(p_z\\). Given two measurable cost functions \\(c_x: \\mathcal{X} \\times \\mathcal{X} \\to \\mathbb{R}\\), \\(c_z: \\mathcal{Z} \\times \\mathcal{Z} \\to \\mathbb{R}\\), and \\(D: \\mathbb{R} \\times \\mathbb{R} \\to \\mathbb{R}\\), the Gromov-Wasserstein distance can be defined as" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.697, + 0.794, + 0.734 + ], + "angle": 0, + "content": "\\[\nG W _ {p} (p _ {x}, p _ {z} | c _ {x}, c _ {z}) := \\left(\\inf _ {\\pi \\in \\prod (p _ {x}, p _ {z})} \\int_ {\\mathcal {X} ^ {2} \\times \\mathcal {Z} ^ {2}} D (c _ {x} (x, x ^ {\\prime}), c _ {z} (z, z ^ {\\prime})) ^ {p} d \\pi (x, z) d \\pi (x ^ {\\prime}, z ^ {\\prime})\\right) ^ {1 / p},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.827, + 0.839 + ], + "angle": 0, + "content": "where \\(\\prod(p_x,p_z)\\) denotes all the joint distributions in \\(\\mathcal{X}\\times \\mathcal{Z}\\) such that the marginals are \\(p_x\\) and \\(p_z\\). Typically, \\(D(c_{x},c_{z})\\) is chosen to be \\(|c_{x} - c_{z}|\\) and \\(c_{x}(x,x^{\\prime})\\) is usually chosen to be \\(\\| x - x^{\\prime}\\| _p\\). The key idea of the Gromov-Wasserstein distance to circumvent the dimension mismatch is to change from comparing marginal distribution to pairwise distributions, which is very similar to the SNE objective. Consider Monge's formulation of the optimal transportation problem and let \\(z = f(x)\\). By choosing \\(c_{z}(z_{i},z_{j}) = \\log (\\widetilde{Q}_{j|i})\\) with \\(\\widetilde{Q}\\) specified as in (3.1), \\(c_{x}(x_{i},x_{j}) = P_{j|i}\\) with \\(\\widetilde{P}\\) specified as in (3.2) and letting \\(D(c_{x},c_{z}) = c_{x}(\\log (c_{x}) - \\log (c_{z}))\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.316, + 0.841, + 0.68, + 0.859 + ], + "angle": 0, + "content": "\\[\nG W _ {1} \\left(p _ {x}, p _ {f (x)}\\right) \\leq \\mathbb {E} _ {x, x ^ {\\prime}} \\left(D \\left(c _ {x} \\left(x, x ^ {\\prime}\\right), c _ {z} \\left(f (x), f \\left(x ^ {\\prime}\\right)\\right)\\right)\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.861, + 0.825, + 0.891 + ], + "angle": 0, + "content": "where the right hand side recovers the expected InfoNCE loss. Hence, the SNE perspective can also be viewed as minimizing the Gromov-Wasserstein distance between \\( p_z \\) and \\( p_x \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "It is worth noting that such an interpretation only relates to contrastive learning, not including generative-based self-supervised learning methods such as Masked AutoEncoder (MAE) (He et al., 2021)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.121, + 0.645, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.301, + 0.825, + 0.331 + ], + "angle": 0, + "content": "Figure C.6: Nearest neighbor test accuracy vs. training epochs. SimCLR and \\( t \\)-SimCLR share similar trends and convergence speed." + }, + { + "type": "image", + "bbox": [ + 0.356, + 0.364, + 0.632, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.547, + 0.825, + 0.577 + ], + "angle": 0, + "content": "Figure C.7: The histogram of IoUs for 1000 constructed positive pairs in CIFAR-10. The empirical distribution is almost symmetric around 0.5." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.6, + 0.396, + 0.615 + ], + "angle": 0, + "content": "C EXPERIMENT DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.631, + 0.367, + 0.644 + ], + "angle": 0, + "content": "C.1 CIFAR-10 SETTINGS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.827, + 0.77 + ], + "angle": 0, + "content": "CIFAR-10 (Krizhevsky, 2009) is a colorful image dataset with 50000 training samples and 10000 test samples from 10 categories. We use ResNet-18 (He et al., 2016) as the feature extractor, and the other settings such as projection head all follow the original settings of SimCLR (Chen et al., 2020a). To evaluate the quality of the features, we follow the KNN evaluation protocol (Wu et al., 2018). which computes the cosine similarities in the embedding space between the test image and its nearest neighbors, and make the prediction via weighted voting. We train each model with batch size of 256 and 200 epochs for quicker evaluation. For \\( t \\)-SimCLR, without specifying otherwise, we grid search the \\( t_{df} \\) and \\( \\tau \\) with range \\( \\{1, 2, 5, 10\\} \\) and \\( \\{1, 2, 5, 10\\} \\) respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.783, + 0.827, + 0.853 + ], + "angle": 0, + "content": "Ablation of training epochs We also run the SimCLR and \\( t \\)-SimCLR experiments in the more standard 1000 epochs setting. For SimCLR, we use batch size of 512, learning rate of 0.3, temperature of 0.7, and weight dacay of 0.0001. For \\( t \\)-SimCLR, we use batch size of 512, learning rate of 0.8, temperature of 10, weight dacay of 0.0002, and \\( t_{df} = 5 \\). The nearest neighbor accuracy for SimCLR is \\( 87.2\\% \\) vs. that for \\( t \\)-SimCLR is \\( 88.8\\% \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.383, + 0.884 + ], + "angle": 0, + "content": "C.2 IMAGE AUGMENTATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "When processing images, several popular augmentations are usually adopted (following the setting in SimCLR Chen et al. (2020a)), e.g., random resized crop (crops a random portion of image and resize it" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.319, + 0.123, + 0.691, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.332, + 0.828, + 0.377 + ], + "angle": 0, + "content": "Figure C.8: Extension on Figure 2(b). Nearest neighbor classification accuracy for SimCLR vs. \\( t \\)-SimCLR on both CIFAR-10 (in-distribution) and CIFAR-100 (out-of-distribution) using different feature dimensions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.398, + 0.828, + 0.498 + ], + "angle": 0, + "content": "to the original size), horizontal flip, color jitter (randomly change the brightness, contrast, saturation and hue of an image). To illustrate the natural weighting scheme in Section 4.1, we considered random resized crop and specifies the weights by the IoU (intersection over union) of the positive pair. In particular, two augmented images are created from an anchor image. Each augmentation crops a rectangular region of the image, denoted by \\( r_1, r_2 \\) respectively, and their IoU is defined by the area of intersection \\( r_1 \\cap r_2 \\) divided by the area of the union \\( r_1 \\cup r_2 \\). The IoU is always between 0 and 1. In our experiment, we chose the default settings and Figure C.7 illustrates the IoU histogram of 1000 constructed positive pairs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.513, + 0.466, + 0.527 + ], + "angle": 0, + "content": "C.3 DEGREE OF FREEDOM IN \\(t\\)-SIMCLR" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.569, + 0.825, + 0.64 + ], + "angle": 0, + "content": "Feature dimension efficiency in OOD case. To further investigate the generalization ability of SSCL methods, we devise a challenging setting where the model is trained on CIFAR-10 and tested on CIFAR-100 classification. In this case, we evaluate the effect of increasing feature dimensions in the projection layer, as an extension on the CIFAR-10 in-distribution case. The results are shown in Figure C.8, where there are two things to note:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.651, + 0.825, + 0.677 + ], + "angle": 0, + "content": "- The gain of extra dimensions in the OOD case does vanish later than that in the in-distribution case." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.683, + 0.825, + 0.726 + ], + "angle": 0, + "content": "- The advantage of SimCLR vs. \\( t \\)-SimCLR is very significant with around \\( 10\\% \\) improvement when \\( d = 128 \\) using nearest neighbor classification, indicating that \\( t \\)-SimCLR produces better separated clusters." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.651, + 0.825, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.826, + 0.845 + ], + "angle": 0, + "content": "Relationship between \\( t_{df} \\) and \\( d_z \\). The larger the degree of freedom \\( t_{df} \\), the less heavy-tail the t-distribution. As \\( d_z \\) decreases, the crowding problem becomes more severe and as recommended by (Van der Maaten & Hinton, 2008), a smaller \\( t_{df} \\) tends to work better. We evaluate the sensitivity of \\( t_{df} \\) (1, 5, 10) under different choices of \\( d_z \\) (1, 2, 4, 8, 16, 32, 64, 128) in CIFAR-10 and the results are reported in Figure C.9. As can be seen, when \\( d_z \\) is small (1, 2, 4, 8), \\( t_{df} = 1 \\) outperforms. Comparing \\( t_{df} = 5 \\) and \\( t_{df} = 10 \\), the two perform similarly when \\( d_z \\) is large (16, 32, 64, 128) but the smaller \\( t_{df} = 5 \\) yields better accuracy when \\( d_z = 1, 2, 4 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.859, + 0.827, + 0.889 + ], + "angle": 0, + "content": "Tuning temperature vs. tuning \\( t_{df} \\). As illustrated in Section 4.2, when the feature space dimension is low, the heavy-tailed t-distribution is a better choice than Gaussian to alleviate the crowding problem." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.897, + 0.825, + 0.926 + ], + "angle": 0, + "content": "5When evaluating by training linear classifiers for 100 epochs, the accuracy for SimCLR is \\(46.4\\%\\) and that for \\(t\\)-SimCLR is \\(48.14\\%\\) (averaged over 3 replications)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.104, + 0.691, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.287, + 0.825, + 0.318 + ], + "angle": 0, + "content": "Figure C.9: Nearest neighbor classification accuracy on CIFAR-10 for \\( t \\)-SimCLR using different feature dimensions and different degrees of freedom (t_df)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.828, + 0.455 + ], + "angle": 0, + "content": "Even though tuning the temperature of \\( L_{\\mathrm{InfoNCE}} \\), i.e., making \\( \\tau \\) larger, can also have the effect of making the distribution less concentrated (\\( \\tau \\) can be seen as the standard deviation), tuning temperature and tuning \\( t_{df} \\) are fundamentally different. The former is controlling how fast does the similarity \\( Q_{i,j} \\) decays as the distance between \\( z_i \\) and \\( z_j \\) increases, while the latter serves as a scaling factor, offering constant level modification of the scheme. In our experiments with SimCLR vs \\( t \\)-SimCLR on CIFAR-10, temperature is tuned as a hyperparameter. The difference in \\( \\tau \\) can never make up to the difference between the baseline SimCLR and \\( t \\)-SimCLR. We found \\( \\tau = 0.5 \\) to work better for the base SimCLR while larger \\( \\tau \\) works better with our \\( t \\)-SimCLR. We recommend \\( \\tau = 5 \\) as the default choice." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.469, + 0.403, + 0.485 + ], + "angle": 0, + "content": "C.4 IMAGENET PRE-TRAINING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.495, + 0.825, + 0.595 + ], + "angle": 0, + "content": "To show the ability for large scale domain transfer and OOD generalization, we conduct experiments on ImageNet pre-training based on MoCo-v2 with its official implementation6. We follow most of their settings, e.g., data augmentation, 200 epochs pre-training, and optimization strategy, etc. The loss is modified according to Section 4.2 and batch normalization is applied along every dimension. We grid search the \\( t_{df} \\) and \\( \\tau \\) with range \\( \\{2,5,10,15\\} \\) and \\( \\{0.2,2,5,10\\} \\) respectively. Finally we choose \\( t_{df} = 10 \\) and \\( \\tau = 5 \\) to be the optimal hyperparameters. We use this pre-train model as initialization for domain transfer and OOD experiments." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.609, + 0.357, + 0.624 + ], + "angle": 0, + "content": "C.5 DOMAIN TRANSFER" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.635, + 0.827, + 0.818 + ], + "angle": 0, + "content": "We compare MoCo-v2 pre-trained with 800 / 200 epochs and \\( t \\)-MoCo-v2 on Aircraft, Birdsnap, Caltech101, Cars, CIFAR10, CIFAR100, DTD, Pets, and SUN397 in Table C.3. We follow the transfer settings in Ericsson et al. (2021) to finetune the pre-trained models. For datasets Birdsnap, Cars, CIFAR10, CIFAR100, DTD, and SUN397, we report the top-1 accuracy metric, while for Aircraft, Caltech101, and Pets, we report the mean per-class accuracy metric. We also follow Ericsson et al. (2021) to split each dataset into training, validation, and test sets. On each dataset, we perform a hyperparameter search as follows. (1) We choose the initial learning rate according to a grid of 4 logarithmically spaced values between \\( 1 \\times 10^{-4} \\) and \\( 1 \\times 10^{-1} \\); (2) We choose the weight decay parameter according to a grid of 4 logarithmically spaced values between \\( 1 \\times 10^{-6} \\) and \\( 1 \\times 10^{-3} \\), plus no weight decay; (3) The weight decay values are divided by the learning rate; (4) For each pair of learning rate and weight decay, we finetune the pre-trained model for 5000 steps by SGD with Nesterov momentum 0.9, batch size of 64, and cosine annealing learning rate schedule without restarts. As can be seen in Table C.3, our \\( t \\)-MoCo-v2 with 200 epochs even outperform the baseline with 800 epochs on average." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.833, + 0.383, + 0.848 + ], + "angle": 0, + "content": "C.6 OOD GENERALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.859, + 0.825, + 0.903 + ], + "angle": 0, + "content": "To demonstrate the advantage of our modification, we also compare MoCo-v2 pre-trained with 800 / 200 epochs and \\( t \\)-MoCo-v2 on OOD generalization benchmarks: PACS Li et al. (2017), VLCS Fang et al. (2013), Office-Home Venkateswara et al. (2017). We follow the standard way to conduct the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.454, + 0.925 + ], + "angle": 0, + "content": "\\(^{6}\\)https://github.com/facebookresearch/moco" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.472, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.265, + 0.113, + 0.731, + 0.126 + ], + "angle": 0, + "content": "Table C.3: Domain transfer results of vanilla MoCo-v2 and \\( t \\) -MoCo-v2." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.126, + 0.825, + 0.177 + ], + "angle": 0, + "content": "
MethodAircraftBirdsnapCaltech101CarsCIFAR10CIFAR100DTDPetsSUN397Avg.
MoCo-v2 (800 epochs)83.8045.5183.0186.1896.4271.6971.7089.1155.6175.89
MoCo-v2 (200 epochs)82.7544.5383.3185.2495.8172.7571.2286.7056.0575.37
t-MoCo-v2 (200 epochs)82.7853.4686.8186.1796.0478.3269.2087.9559.3077.78
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.2, + 0.826, + 0.215 + ], + "angle": 0, + "content": "Table C.4: OOD accuracies of vanilla MoCo-v2 and \\( t \\) -MoCo-v2 on domain generalization benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.269, + 0.216, + 0.73, + 0.287 + ], + "angle": 0, + "content": "
MethodPACSVLCSOffice-HomeAvg.
MoCo-v2 (800 epochs)58.969.841.656.8
MoCo-v2 (200 epochs)58.570.436.655.2
t-MoCo-v2 (200 epochs)61.375.142.159.5
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.825, + 0.424 + ], + "angle": 0, + "content": "experiments, i.e., choosing one domain as the test domain and using the remaining domains as training domains, which is named the leave-one-domain-out protocol. The top linear classifier is trained on the training domains and tested on the test domain. Each domain rotates as the test domain and the average accuracy is reported for each dataset in Table C.4. On each dataset, we perform a hyperparameter search following DomainBed Gulrajani & Lopez-Paz (2021). We adopt the leave-one-domain-out cross-validation setup in DomainBed with 10 experiments for hyperparameter selection and run 3 trials. As can be seen in Table C.4, our \\( t \\)-MoCo-v2 with 200 epochs even significantly outperform the baseline with 800 epochs for all of the three datasets." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.462, + 0.486, + 0.475 + ], + "angle": 0, + "content": "C.7 SSCL INSPIRED DATA VISUALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.487, + 0.825, + 0.586 + ], + "angle": 0, + "content": "\\(t\\)-SNE (Van der Maaten & Hinton, 2008) and its variants are designed for data visualization. However, for more complicated data, such as colored images, the results are not satisfactory. Using standard \\(t\\)-SNE, the 2D visualization of the 50K training images of CIFAR-10 (labels denoted as 0, 1,...,9) can be seen in Figure C.10, where different labels are hardly separated. The poor performance of \\(t\\)-SNE on CIFAR-10 can be traced back to the poor distance choice on images, i.e., \\(l_{2}\\)-norm. Inspired by the success of SSCL for natural images, \\(t\\)-SNE can potentially be improved by incorporating data augmentations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.592, + 0.825, + 0.677 + ], + "angle": 0, + "content": "In light of our perspective (S1), \\( t \\)-SNE can take advantage of the distance specified with (3.2) and the resulting model is essentially our \\( t \\)-SimCLR with feature dimension 2. The visualization from \\( t \\)-SimCLR is shown in Figure C.11, which is much more separated (the nearest neighbor classification accuracy on CIFAR-10 test data is \\( 56.6\\% \\)). By choosing the feature dimension to be 2, various SSCL methods can also be made into data visualizing tools. In Figure C.12, we visualize the outcome from SimCLR (the nearest neighbor classification accuracy on CIFAR-10 test data is \\( 24.8\\% \\))." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.683, + 0.825, + 0.712 + ], + "angle": 0, + "content": "Similar investigations have been carried in Böhm et al. (2022); Damrich et al. (2022) where they focused specifically on data visualization and stochastic neighbor embedding." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.475, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.113, + 0.691, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.244, + 0.349, + 0.748, + 0.365 + ], + "angle": 0, + "content": "Figure C.10: 50K CIFAR-10 training images visualization in 2D with \\( t \\)-SNE." + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.391, + 0.691, + 0.607 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.193, + 0.625, + 0.799, + 0.641 + ], + "angle": 0, + "content": "Figure C.11: 50K CIFAR-10 training images visualization in 2D with the default \\( t \\)-SimCLR." + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.667, + 0.691, + 0.88 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.224, + 0.899, + 0.771, + 0.915 + ], + "angle": 0, + "content": "Figure C.12: 50K CIFAR-10 training images visualization in 2D with the SimCLR." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ] +] \ No newline at end of file diff --git a/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_origin.pdf b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..06ac4cbf9cd407c9bdf3294edd9f6bb41fa9a73e --- /dev/null +++ b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/97d2e52c-457b-46f6-8c21-13d5d765eb07_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd26413efa80a999c3edbb8f7d33437c8f910435750ffd5ba67dfc56a50e3331 +size 1239737 diff --git a/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/full.md b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/full.md new file mode 100644 index 0000000000000000000000000000000000000000..89c96dd072aeed79acca685449ac612859472eeb --- /dev/null +++ b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/full.md @@ -0,0 +1,604 @@ +# YOUR CONTRASTIVE LEARNING IS SECRETLY DOING STOCHASTIC NEIGHBOR EMBEDDING + +Tianyang Hu1, Zhili Liu1,2, Fengwei Zhou1, Wenjia Wang2,3, Weiran Huang1,4* + +1 Huawei Noah's Ark Lab, 2 Hong Kong University of Science and Technology +$^{3}$ Hong Kong University of Science and Technology (Guangzhou) +$^{4}$ Qing Yuan Research Institute, Shanghai Jiao Tong University + +# ABSTRACT + +Contrastive learning, especially self-supervised contrastive learning (SSCL), has achieved great success in extracting powerful features from unlabeled data. In this work, we contribute to the theoretical understanding of SSCL and uncover its connection to the classic data visualization method, stochastic neighbor embedding (SNE) (Hinton & Roweis, 2002), whose goal is to preserve pairwise distances. From the perspective of preserving neighboring information, SSCL can be viewed as a special case of SNE with the input space pairwise similarities specified by data augmentation. The established correspondence facilitates deeper theoretical understanding of learned features of SSCL, as well as methodological guidelines for practical improvement. Specifically, through the lens of SNE, we provide novel analysis on domain-agnostic augmentations, implicit bias and robustness of learned features. To illustrate the practical advantage, we demonstrate that the modifications from SNE to $t$ -SNE (Van der Maaten & Hinton, 2008) can also be adopted in the SSCL setting, achieving significant improvement in both in-distribution and out-of-distribution generalization. + +# 1 INTRODUCTION + +Recently, contrastive learning, especially self-supervised contrastive learning (SSCL) has drawn massive attention, with many state-of-the-art models following this paradigm in both computer vision (He et al., 2020a; Chen et al., 2020a;b; Grill et al., 2020; Chen & He, 2021; Zbontar et al., 2021) and natural language processing (Fang et al., 2020; Wu et al., 2020; Giorgi et al., 2020; Gao et al., 2021; Yan et al., 2021). In contrast to supervised learning, SSCL learns the representation through a large number of unlabeled data and artificially defined self-supervision signals, i.e., regarding the augmented views of a data sample as positive pairs and randomly sampled data as negative pairs. By enforcing the features of positive pairs to align and those of negative pairs to be distant, SSCL produces discriminative features with state-of-the-art performance for various downstream tasks. + +Despite the empirical success, the theoretical understanding is under-explored as to how the learned features depend on the data and augmentation, how different components in SSCL work and what are the implicit biases when there exist multiple empirical loss minimizers. For instance, SSCL methods are widely adopted for pretraining, whose feature mappings are to be utilized for various downstream tasks which are usually out-of-distribution (OOD). The distribution shift poses great challenges for the feature learning process with extra requirement for robustness and OOD generalization (Arjovsky et al., 2019; Krueger et al., 2021; Bai et al., 2021; He et al., 2020b; Zhao et al., 2023; Dong et al., 2022), which demands deeper understanding of the SSCL methods. + +The goal of SSCL is to learn the feature representations from data. For this problem, one classic method is SNE (Hinton et al., 2006) and its various extensions. Specially, $t$ -SNE (Van der Maaten & Hinton, 2008) has become the go-to choice for low-dimensional data visualization. Comparing to SSCL, SNE is far better explored in terms of theoretical understanding (Arora et al., 2018; Linderman & Steinerberger, 2019; Cai & Ma, 2021). However, its empirical performance is not satisfactory, especially in modern era where data are overly complicated. Both trying to learn feature representations, are there any deep connections between SSCL and SNE? Can SSCL take the advantage of the theoretical soundness of SNE? Can SNE be revived in the modern era by incorporating SSCL? + +In this work, we give affirmative answers to the above questions and demonstrate how the connections to SNE can benefit the theoretical understandings of SSCL, as well as provide methodological guidelines for practical improvement. The main contributions are summarized below. + +- We propose a novel perspective that interprets SSCL methods as a type of SNE methods with the aim of preserving pairwise similarities specified by the data augmentation. +- The discovered connection enables deeper understanding of SSCL methods. We provide novel theoretical insights for domain-agnostic data augmentation, implicit bias and OOD generalization. Specifically, we show isotropic random noise augmentation induces $l_{2}$ similarity while mixup noise can potentially adapt to low-dimensional structures of data; we investigate the implicit bias from the angle of order preserving and identified the connection between minimizing the expected Lipschitz constant of the SSCL feature map and SNE with uniformity constraint; we identify that the popular cosine similarity can be harmful for OOD generalization. +- Motivated by the SNE perspective, we propose several modifications to existing SSCL methods and demonstrate practical improvements. Besides a re-weighting scheme, we advocate to lose the spherical constraint for improved OOD performance and a $t$ -SNE style matching for improved separation. Through comprehensive numerical experiments, we show that the modified $t$ -SimCLR outperforms the baseline with $90\%$ less feature dimensions on CIFAR-10 and $t$ -MoCo-v2 pretrained on ImageNet significantly outperforms in various domain transfer and OOD tasks. + +# 2 PRELIMINARY AND RELATED WORK + +Notations. For a function $f: \Omega \to \mathbb{R}$ , let $\|f\|_{\infty} = \sup_{\boldsymbol{x} \in \Omega} |f(\boldsymbol{x})|$ and $\|f\|_p = (\int_{\Omega} |f(\boldsymbol{x})|^p d\boldsymbol{x})^{1/p}$ . For a vector $\boldsymbol{x}$ , $\| \boldsymbol{x}\|_p$ denotes its $p$ -norm, for $1 \leq p \leq \infty$ . $\mathbb{P}(A)$ is the probability of event $A$ . For a random variable $z$ , we use $P_z$ and $p_z$ to denote its probability distribution and density respectively. Denote Gaussian distribution by $N(\mu, \Sigma)$ and let $I_d$ be the $d \times d$ identity matrix. Let the dataset be $\mathcal{D}_n = \{\boldsymbol{x}_1, \dots, \boldsymbol{x}_n\} \subset \mathbb{R}^d$ where each $\boldsymbol{x}_i$ independently follows distribution $P_x$ . The goal of unsupervised representation learning is to find informative low-dimensional features $z_1, \dots, z_n \in \mathbb{R}^{d_z}$ of $\mathcal{D}_n$ where $d_z$ is usually much smaller than $d$ . We use $f(\boldsymbol{x})$ to as the default notation for the feature mapping from $\mathbb{R}^d \to \mathbb{R}^{d_z}$ , i.e., $z_i = f(\boldsymbol{x}_i)$ . + +Stochastic neighbor embedding. SNE (Hinton & Roweis, 2002) is a powerful representation learning framework designed for visualizing high-dimensional data in low dimensions by preserving neighboring information. The training process can be conceptually decomposed into the following two steps: (1) calculate the pairwise similarity matrix $\pmb{P} \in \mathbb{R}^{n \times n}$ for $\mathcal{D}_n$ ; (2) optimize features $z_1, \dots, z_n$ such that their pairwise similarity matrix $\pmb{Q} \in \mathbb{R}^{n \times n}$ matches $\pmb{P}$ . Under the general guidelines lie plentiful details. In Hinton & Roweis (2002), the pairwise similarity is modeled as conditional probabilities of $x_j$ being the neighbor of $x_i$ , which is specified by a Gaussian distribution centered at $x_i$ , i.e., when $i \neq j$ , + +$$ +P _ {j \mid i} = \frac {\exp \left(- \| \boldsymbol {x} _ {i} - \boldsymbol {x} _ {j} \| _ {2} ^ {2} / 2 \sigma_ {i} ^ {2}\right)}{\sum_ {k \neq i} \exp \left(- \| \boldsymbol {x} _ {i} - \boldsymbol {x} _ {k} \| _ {2} ^ {2} / 2 \sigma_ {i} ^ {2}\right)}, \tag {2.1} +$$ + +where $\sigma_{i}$ is the variance of the Gaussian centered at $x_{i}$ . Similar conditional probabilities $Q_{j|i}$ 's can be defined on the feature space. When matching $Q$ to $P$ , the measurement chosen is the KL-divergence between two conditional probabilities. The overall training objective for SNE is + +$$ +\inf _ {\boldsymbol {z} _ {1}, \dots , \boldsymbol {z} _ {n}} \sum_ {i = 1} ^ {n} \sum_ {j = 1} ^ {n} P _ {j | i} \log \frac {P _ {j | i}}{Q _ {j | i}}. \tag {2.2} +$$ + +Significant improvements have been made to the classic SNE. Im et al. (2018) generalized the KL-divergence to $f$ -divergence and found that different divergences favors different types of structure. Lu et al. (2019) proposed to make $P$ doubly stochastic so that features are less crowded. Most notably, $t$ -SNE (Van der Maaten & Hinton, 2008) modified the pairwise similarity by considering joint distribution rather than conditional, and utilizes t-distribution instead of Gaussian in the feature space modeling. It is worth noting that SNE belongs to a large class of methods called manifold learning (Li et al., 2022). In this work, we specifically consider SNE. If no confusion arises, we use SNE to denote the specific work of Hinton & Roweis (2002) and this type of methods in general interchangeably. + +Self-supervised contrastive learning. The key part of SSCL is the construction of positive pairs, or usually referred to as different views of the same sample. For each $x_{i}$ in the training data, denote + +its two augmented views to be $\pmb{x}_i'$ and $\pmb{x}_i''$ . Let $\mathcal{D}_n' = \{\pmb{x}_1', \dots, \pmb{x}_n'\}$ , $\mathcal{D}_n'' = \{\pmb{x}_1'', \dots, \pmb{x}_n''\}$ and define + +$$ +l (\pmb {x} _ {i} ^ {\prime}, \pmb {x} _ {i} ^ {\prime \prime}) = - \log \frac {\exp (\mathrm {s i m} (f (\pmb {x} _ {i} ^ {\prime}) , f (\pmb {x} _ {i} ^ {\prime \prime})) / \tau)}{\sum_ {\pmb {x} \in \mathcal {D} _ {n} ^ {\prime} \cup \mathcal {D} _ {n} ^ {\prime \prime} \setminus \{\pmb {x} _ {i} ^ {\prime} \}} \exp (\mathrm {s i m} (f (\pmb {x} _ {i} ^ {\prime}) , f (\pmb {x})) / \tau)}, +$$ + +where $\mathrm{sim}(z_1,z_2) = \langle \frac{z_1}{\|\pmb{z}_1\|_2},\frac{z_2}{\|\pmb{z}_2\|_2}\rangle$ denotes the cosine similarity and $\tau$ is a temperature parameter. The training objective of the popular SimCLR (Chen et al., 2020a) can be written as $L_{\mathrm{InfoNCE}}\coloneqq \frac{1}{2n}\sum_{i = 1}^{n}(l(\pmb{x}_i^{\prime \prime},\pmb{x}_i^{\prime}) + l(\pmb{x}_i^{\prime},\pmb{x}_i^{\prime \prime}))$ + +Recently, various algorithms are proposed to improve the above contrastive learning. To address the need for the large batch size, MoCo (He et al., 2020a; Chen et al., 2020b) utilizes a moving-averaged encoder and a dynamic memory bank to store negative representations, making it more device-friendly. Grill et al. (2020); Chen & He (2021); Zbontar et al. (2021); Chen et al. (2021) radically discard negative samples in SSCL but still achieve satisfactory transfer performance. Another line of works (Caron et al., 2020; Li et al., 2021; Liu et al., 2022) mines the hierarchy information in data to derive more semantically compact representations. Radford et al. (2021); Yao et al. (2021) even extend the contrastive methods to the multi-modality data structure to achieve impressive zero-shot classification results. + +Theoretical understanding of SSCL. In contrast of the empirical success, theoretical understanding of SSCL is still limited. While most of theoretical works (Arora et al., 2019; Tosh et al., 2020; HaoChen et al., 2021; 2022; Wang et al., 2022; Wen & Li, 2021; Wei et al., 2020; Huang et al., 2021; Ji et al., 2021; Ma et al., 2023) focus on its generalization ability on downstream tasks, there are some works studying specifically the InfoNCE loss. One line of works (Oord et al., 2018; Bachman et al., 2019; Hjelm et al., 2018; Tian et al., 2019; 2020) understand the InfoNCE loss from mutual information perspective, showing that the negative InfoNCE is a lower bound of mutual information between positive samples. Other works (Wang & Isola, 2020; Huang et al., 2021; Jing et al., 2021) are from the perspective of geometry of embedding space, showing that InfoNCE can be divided into two parts: one controls alignment and the other prevents representation collapse. In this paper, we study SSCL from the SNE perspective, which, to the best of the authors' knowledge, has no discussion in existing literature. The closest work to ours is Balestriero & LeCun (2022), which proposed a unifying framework under the helm of spectral manifold learning. In comparison, our work focuses specifically on the connection between SSCL and SNE. + +# 3 SNE PERSPECTIVE OF SSCL + +A closer look at the training objectives of SNE and SimCLR reveals great resemblance — SimCLR can be seen as a special SNE model. To see this, denote $\widetilde{\mathcal{D}}_{2n} = \mathcal{D}_n^{\prime \prime}\cup \mathcal{D}_n^{\prime}$ as the augmented dataset with index $\widetilde{\pmb{x}}_{2i - 1} = \pmb{x}_i^{\prime \prime}$ and $\widetilde{\pmb{x}}_{2i} = \pmb{x}_i^\prime$ . If we change the $l_{2}$ distance to the negative cosine similarity and let $\sigma_i^2\equiv \tau$ . Admitting similar conditional probability formulation as in (2.1) yields that for $i\neq j$ + +$$ +\widetilde {Q} _ {j \mid i} = \frac {\exp \left(\operatorname {s i m} \left(f \left(\widetilde {\boldsymbol {x}} _ {i}\right) , f \left(\widetilde {\boldsymbol {x}} _ {j}\right)\right) / \tau\right)}{\sum_ {k \neq i} \exp \left(\operatorname {s i m} \left(f \left(\widetilde {\boldsymbol {x}} _ {i}\right) , f \left(\widetilde {\boldsymbol {x}} _ {k}\right)\right) / \tau\right)}. \tag {3.1} +$$ + +By taking + +$$ +\widetilde {P} _ {j \mid i} = \left\{ \begin{array}{l l} 1, & \text {i f} \widetilde {\boldsymbol {x}} _ {i} \text {a n d} \widetilde {\boldsymbol {x}} _ {j} \text {a r e p o s i t i v e p a i r s} \\ 0, & \text {o t h e r w i s e ,} \end{array} \right. \tag {3.2} +$$ + +the SNE objective (2.2) can be written as + +$$ +\sum_ {i = 1} ^ {2 n} \sum_ {j = 1} ^ {2 n} \widetilde {P} _ {j | i} \log \frac {\widetilde {P} _ {j | i}}{\widetilde {Q} _ {j | i}} = \sum_ {k = 1} ^ {n} \Big (- \log (\widetilde {Q} _ {2 k - 1 | 2 k}) - \log (\widetilde {Q} _ {2 k | 2 k - 1}) \Big), +$$ + +which reduces to the SimCLR objective $L_{\mathrm{InfoNCE}}$ , up to a constant scaling term only depending on $n$ . Now that we have established the correspondence between SNE and SimCLR, it's clear that the feature learning process of SSCL also follows the two steps of SNE. + +(S1) The positive pair construction specifies the similarity matrix $P$ . +(S2) The training process then matches $Q$ to $P$ by minimizing some divergence between the two specified by the training objective, e.g., KL divergence in SimCLR. + +![](images/3ecaa78f8dce75bb41d06285568ba9c89e5fd0f9e5e58682cb50c271df5f04f4.jpg) + +![](images/a7a1122e517d6b6db862794be6a44519aeeb56f41996f2d85124745ee03e1a6f.jpg) + +![](images/313ce6e1d4b7b5dc8b7ed2aac0bd2dff24fba0882f672f1add9f8a8b47be159b.jpg) + +![](images/a24431869e4cdcaeda8101b3a435fc04249f7e6efbe2530ac24d45b30827bb42.jpg) + +![](images/32f6266ce0ea2e5ecc4d0294265ed70e66d675a8e0ab408d06970130655d53dd.jpg) +Figure 1: Gaussian mixture setting with 5 components. (a) illustration of data with 250 samples. (b) learned features by standard SimCLR with normalization (cosine similarity) to 1-sphere. (c) learned features by modified SimCLR without normalization ( $l_{2}$ similarity). (d, e) feature mapping of the two methods in case of OOD mean shift. The linear classification accuracy is $48.4\%$ in (d) and $100\%$ in (e). + +![](images/2f1a4d6332027e5c9e546360d59bf528012fe1dfaf61c353418cb03dc8aea145.jpg) + +The main difference between SNE and SSCL is the first part, where the $P$ in SNE is usually densely filled by $l_{p}$ distance, ignoring the semantic information within rich data like images and texts. In contrast, SSCL omits all traditional distances in $\mathbb{R}^d$ and only specifies semantic similarity through data augmentations, and the resulting $P$ is sparsely filled only by positive pairs as in (3.2). For structurally rich data such as image or text, the semantic information is invariant to a wide range of transformations. Human's prior knowledge of such invariance guides the construction of positive pairs in SSCL, which is then learned by the feature mapping. + +Remark 3.1 (SNE vs SSCL). We would like to clarify on the main difference between SNE and SSCL that we focus in this work. Although standard SNE (Hinton et al., 2006) is non-parametric without explicit feature maps, and is optimized for the whole dataset, these are not the defining properties of SNE. SNE can also utilize explicit feature maps and mini-batch training (Van Der Maaten, 2009). On the other hand, SSCL can also benefit from larger/full batches (Chen et al., 2020a) and can also be modified to directly optimize the features $\boldsymbol{z}_i$ 's. In this work, we omit these subtleties1 and focus on the (S1) perspective, which we view as the most significant difference between SNE and SSCL. + +# 3.1 ANALYSIS + +In this section, to showcase the utility of the SNE perspective, we demonstrate how the feature learning process of SSCL methods, e.g., SimCLR, can become more intuitive and transparent. Specifically, we re-derive the alignment and uniformity principle (Wang & Isola, 2020) as well as provide novel analysis on domain-agnostic augmentations, the implicit bias and robustness of learned features. To aid the illustration, we device toy examples with simulated Gaussian mixture data. + +Gaussian mixture setting. Let the data follow $d$ -dimensional Gaussian mixture distribution with $m$ components where $P_{\pmb{x}} \sim \frac{1}{m} \sum_{i=1}^{m} N(\pmb{\mu}_i, \sigma^2 \pmb{I}_d)$ . The special case with $d = 2$ , $m = 5$ , $\sigma = 0.1$ is illustrated in Figure 1(a) with 250 independent samples. To apply contrastive methods, consider constructing positive pairs by direct sampling, i.e., if $\pmb{x}$ is from the first component, then we sample another $\pmb{x}' \sim N(\pmb{\mu}_1, \sigma^2 \pmb{I}_d)$ independently as its alternative view for contrast. The negative samples are the same as in standard SimCLR training. + +# 3.1.1 DOMAIN-AGNOSTIC DATA AUGMENTATION + +Now that we have established in (S1) that the input space pairwise distance is specified by the data augmentation, a natural question to ask is what are the corresponding induced distances. In this section, we investigate this problem for domain-agnostic data augmentations. + +The quality of data augmentation has great impact on the performance of SSCL methods, which reflects people's prior knowledge on the data. However, when facing new data without any domain knowledge, + +we have to rely on domain-agnostic data augmentations, e.g., adding random noises (Verma et al., 2021), for contrast. We first consider using general random noise augmentation, i.e., for any $\pmb{x} \in \mathbb{R}^d$ , let $\pmb{x}' = \pmb{x} + \delta$ where $\delta$ follows some distribution with density $\phi(\pmb{x})$ . Then, for any $\pmb{x}_i$ , the probability density of having $\pmb{t} \in \mathbb{R}^d$ as its augmented point can be characterized as $P_{\pmb{t}|\pmb{x}_i} = \mathbb{P}(\pmb{x}_i \mid \pmb{x}_i' = \pmb{t} \text{ form a positive pair} | \pmb{x}_i) = \phi(\pmb{t} - \pmb{x}_i)$ . We have the following proposition on Gaussian-induced distance. + +Proposition 3.2 (Gaussian noise injection). If the noise distribution is isotropic Gaussian with mean zero, the induced distance is equivalent to the $l_{2}$ distance in $\mathbb{R}^d$ , up to a monotone transformation. + +Another popular noise injection method is the mixup (Zhang et al., 2017), where the augmented data are comprised of convex combinations of the training data. For each $\boldsymbol{x}_i$ , a positive pair can be constructed from another $\boldsymbol{x}_j$ such that $\boldsymbol{x}_i' = \boldsymbol{x}_i + \lambda (\boldsymbol{x}_j - \boldsymbol{x}_i)$ and $\lambda \in (0,1)$ is the hyperparameter usually modeled with Beta distribution. For independent $\boldsymbol{x}_1, \boldsymbol{x}_2 \sim P_x$ , denote the convoluted density of $\lambda (\boldsymbol{x}_1 - \boldsymbol{x}_2)$ as $p_{\lambda}(\boldsymbol{x})$ , which is symmetric around 0. Then, if employing mixup for positive pairs in SSCL, the induced distance can be written as $P_{\boldsymbol{x}_1, \boldsymbol{x}_2} = P_{\boldsymbol{x}_2, \boldsymbol{x}_1} = p_{\lambda}(\boldsymbol{x}_1 - \boldsymbol{x}_2)$ . + +Gaussian vs. mixup. Verma et al. (2021) proposed to use mixup when domain-specific information is unattainable and provided supportive analysis on its advantage over isotropic Gaussian noise from the classification generalization error point of view. Through (S1) perspective, we can intuitively explain why data-dependent mixup noises can be potentially better from the perspective of the "curse of dimensionality". Consider the $d$ -dimensional Gaussian mixture setting with $m < d$ separated components. Notice that $\pmb{\mu}_1,\dots ,\pmb{\mu}_m$ can take up at most $(m - 1)$ -dimensional linear sub-space of $\mathbb{R}^d$ . Denoted the space spanned by $\pmb{\mu}_i$ 's as $S_{\mu}$ . For the light-tailed Gaussian distribution, and the majority of samples will be close to $S_{\mu}$ . Hence, majority of the convoluted density $p_{\lambda}(\pmb{x})$ will also be supported on $S_{\mu}$ , so does the corresponding $P_{\pmb{x}_2,\pmb{x}_1}$ . Thus, the induced distance from mixup will omit irrelevant variations in the complement of $S_{\mu}$ and focus on the low-dimensional sub-space $S_{\mu}$ where $\pmb{\mu}_i$ 's actually differ. This effectively reduces the dimension dependence from $d$ to $m - 1$ . In comparison, isotropic Gaussian noise induces $l_2$ distance for positive pairs with support of $\mathbb{R}^d$ , which will be much more inefficient, especially when $m \ll d$ . Since it is well-known that the performance of regression or classification models is strongly influenced by the intrinsic dimension of the input space (Hamm & Steinwart, 2021), keeping the data in a low-dimensional space is preferable. + +# 3.1.2 ALIGNMENT AND UNIFORMITY + +Characterizing the learned features of SSCL is of critical importance. Wang & Isola (2020) proposed alignment and uniformity as principles for SimCLR type contrastive learning methods. Such results can be intuitively understood through the perspective of (S1) and (S2). Consider the common case where the feature space is $(d_z - 1)$ -sphere. First, (3.2) indicates that only similarities (distances) between positive pairs are non-zero (finite) and all other pairwise similarities (distances) are zero (infinity). Preserving (3.2) requires the features of positive pairs to align (cosine similarity tends to 1) and those of negative pairs to be as distant as possible. If in the extreme case where positive pairs match exactly, i.e., $f(\pmb{x}_i) = f(\pmb{x}_i')$ for any $i = 1,\dots ,n$ , we call it perfect alignment. + +If perfect alignment is achieved and the features are constrained on the unit sphere, matching (3.2) implies pushing $n$ points on the feature space as distant as possible. Maximally separated $n$ points on a $d$ -sphere has been studied in geometry, known as the Tammes problem (Tammes, 1930; Erber & Hockney, 1991; Melisseny, 1998). We say perfect uniformity is achieved if all the pairs are maximally separated on the sphere. There are some simple cases of the Tammes problem. If $d = 2$ , perfect uniformity can be achieved if the mapped points form a regular polygon. If $d \geq n - 1$ , the solution can be given by the vertices of an $(n - 1)$ -simplex, inscribed in an $(n - 1)$ -sphere embedded in $\mathbb{R}^d$ . The cosine similarity between any two vertices is $-1 / (n - 1)$ and in this case, $L_{\mathrm{InfoNCE}}$ can attain its lower bound2. As $n \to \infty$ , the point distribution converges weakly to uniform distribution. As can be seen in Figure 1(a, b), perfect alignment and perfect uniformity are almost achieved by standard SimCLR in the Gaussian mixture setting. + +As we will demonstrate in Section 3.1.4 that the spherical feature space can be bad for OOD generalization, adopting of the Euclidean space will change the statement of the uniformity property and can also be analyzed from the SNE perspective. Details can be found in Appendix A.5. + +# 3.1.3 IMPLICITBIAS + +Existing theoretical results on SSCL provide justification of its empirical success in classification. However, there is more to it than just separating different classes and many phenomena are left unexplained. Take the popular SimCLR (Chen et al., 2020a) on CIFAR-10 as an example, we can consistently observe that the feature similarities within animals (bird, cat, deer, dog, frog, horse) and within objects (airplane, automobile, ship, truck), are significantly higher than those between animals and objects3. This can be viewed as an implicit bias towards preserving semantic information, which might be surprising as we have no supervision on the label information during the training process. However, existing literature on implicit bias is scarce. As advocated in Saunshi et al. (2022), ignoring inductive biases cannot adequately explain the success of contrastive learning. In this section, we provide a simple explanation from the perspective of SNE. + +For a more concrete illustration, consider training SimCLR in the Gaussian mixture setting with $d = 1$ , $d_z = 2$ , $m = 4$ , $\mu_i = i$ , and $\sigma = 0.1$ . Denote the 4 components in ascending order by A,B,C,D. Perfect alignment and uniformity imply that their feature maps (a, b, c, d) on the unit-circle should be vertices of an inscribed square. What left unsaid is their relative order. Clockwise or counter-Clockwise from a, regardless of the initialization, we can observe SimCLR to consistently produce the order $a \to b \to c \to d$ . + +Remark 3.3 (Relative ordering and neighbor-preserving). The order-preserving property showcased with $d = 1$ is mainly for illustration, as in one-dimension, the neighboring info is simplified as the order, which is much easier to understand. The results remain the same in high dimensions as long as the clusters are well separated with an obvious order of clusters. For instance, some relative orders in Figure 1(a,b) are also stable, e.g., the neighbor of blue will consistently be purple and yellow. + +With great resemblance to SNE, SSCL methods also exhibit neighbor-preserving property and we identify it as an implicit bias. Such implicit bias can be universal in SSCL and the phenomenon in Figure A.3 is also a manifestation. In deep learning, the implicit bias is usually characterized by either closeness to the initialization (Moroshko et al., 2020; Azulay et al., 2021), or minimizing certain complexity (Razin & Cohen, 2020; Zhang et al., 2021). In the case of SimCLR, we hypothesize the implicit bias as the expected Lipschitz constant, which has deep connections to SNE with uniformity constraint. For a feature map $f$ onto the unit-sphere, define + +$$ +C (f) = \mathbb {E} _ {\boldsymbol {x}, \boldsymbol {x} ^ {\prime}} \frac {\| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \| _ {2}}{\| \boldsymbol {x} - \boldsymbol {x} ^ {\prime} \| _ {2}}, \tag {3.3} +$$ + +where the $x_{1}, x_{2}$ are independent samples from the data distribution. + +Definition 3.4 (SNE with uniformity constraint). Assume data $\boldsymbol{x}_1, \dots, \boldsymbol{x}_n \in \mathbb{R}^d$ . If the corresponding SNE features $z_1, \dots, z_n \in \mathbb{R}^{d_z}$ are constrained to be the maximally separated $n$ points on the $(d_z - 1)$ -sphere, we call this problem SNE with uniformity constraint. + +The key of SNE is matching the pairwise similarity matrices $Q$ to $P$ . When solving SNE with uniformity constraint, the only thing to be optimized is the pairwise correspondence, or ordering of the mapping. We have the following theorem that links the neighbor-preserving property to $C(f)$ . + +Theorem 3.5. Let $\pmb{x}_1, \dots, \pmb{x}_n \in \mathbb{R}^d$ such that $\| \pmb{x}_i - \pmb{x}_j \|_2 > 0$ for any $i, j$ and let $z_1, \dots, z_n \in \mathbb{R}^{d_z}$ be maximally separated $n$ points on the $(d_z - 1)$ -sphere. Denote $P = (p_{ij})_{n \times n}$ and $Q = (q_{ij})_{n \times n}$ as the corresponding pairwise similarity matrices of $\pmb{x}_i$ 's and $\pmb{z}_i$ 's respectively. Let $\pi$ denote a permutation on $\{1, \dots, n\}$ and denote all such permutations as $T$ . Let $Q^\pi$ as the $\pi$ -permuted matrix $Q$ and define + +$$ +C _ {1} (P, Q ^ {\pi}) = \sum_ {i \neq j} \frac {q _ {\pi (i) \pi (j)}}{p _ {i j}} \quad \text {a n d} \quad \pi^ {*} = \operatorname * {a r g m i n} _ {\pi \in T} C _ {1} (P, Q ^ {\pi}). +$$ + +Then, $\pi^{*}$ also minimizes $\| \bar{P} - Q^{\pi}\|_{F}$ where $\| \cdot \|_{F}$ is the Frobenius norm and $\bar{P} = (\bar{p}_{ij})_{n\times n}$ is a (monotonically) transformed similarity matrix with $\bar{p}_{ij} = -1 / p_{ij}$ . + +Theorem 3.5 showcases the relationship between minimizing $C(f)$ and the structure preserving property by considering a special SNE problem, where the pairwise similarity is not modeled by Gaussian as standard. Although $q_{ij} = -\| f(\pmb{x}_i) - f(\pmb{x}_j)\|_2$ is unorthodox, it is reasonable since the larger the distance, the smaller the similarity. We have the following corollary to explain the neighbor-preserving property of SSCL and the implicit bias associated with minimizing the complexity $C(f)$ . + +Corollary 3.6 (Implicit bias of SSCL). When SSCL model achieves perfect alignment and perfect uniformity, if the complexity $C(f)$ is minimized, the resulting feature map preserves pairwise distance in the input space, resembling SNE with uniformity constraint. + +Corollary 3.6 links the implicit bias of SSCL to the SNE optimization with uniformity constraint. In the case of perfect alignment and perfect uniformity, SSCL can be seen as a special SNE problem where the feature $z_{1}, \dots, z_{n}$ must be maximally separated on the unit-sphere. Recall the 1-dimension Gaussian case. There are in total $3! = 6$ different orderings for the 4 cluster means, among which, a $\rightarrow \mathrm{b} \rightarrow \mathrm{c} \rightarrow \mathrm{d}$ will give the lowest SNE loss. As can be seen in Figure A.4, both $C(f)$ and the SNE loss are monotonically decreasing during training for the Gaussian mixture setting. + +When the alignment or uniformity is not perfect, the resulting feature mapping can still be characterized via SNE, with the uniformity constraint relaxed as a form of regularization. In our numerical experiments on the CIFAR-10 data, we observe $C(f)$ to be monotonically decreasing during the training process, supporting our hypothesis. More details can be found in Appendix A.3. Corollary 3.6 sheds light on the implicit semantic information preserving phenomenon shown in Figure A.3, as in the input space, images of dogs should be closer to images of cats, than airplanes. + +# 3.1.4 TARGETING OOD: EUCLIDEAN VS SPHERICAL + +Almost all SSCL methods require normalization to the unit-sphere and the similarity on the feature space is often the cosine similarity. In comparison, standard SNE methods operate freely on the Euclidean space. In this section, we show that the normalization can hinder the structure-preserving and there is a fundamental trade off between in-distribution and out-of-domain generalization. + +Consider the 2-dimensional Gaussian mixture setting as illustrated in Figure 1(a). Notice that as long as the mixing components are well separated, the learned feature mapping on the sphere will always be the pentagon shape, regardless of the relative locations of the clusters. This is a result of the uniformity property derived under spherical constraint. Distant clusters in the input space will be pulled closer while close clusters will be pushed to be more distant, which results in the trade off between in-distribution and out-of-domain generalization. On one hand, close clusters being more separated in the feature space is potentially beneficial for in-distribution classification. On the other hand, the spherical constraint adds to the complexity of the feature mapping, potentially hurting robustness. + +In the Euclidean space, pushing away negative samples (as distant as possible) will be much easier, since the feature vectors could diverge towards infinity $^{4}$ and potentially preserve more structural information. To verify our intuition, we relax the spherical constraint in the Gaussian mixture setting and change the cosine similarity in SimCLR to the negative $l_{2}$ distance in $\mathbb{R}$ . The learned features are shown in Figure 1(c). Comparing to Figure 1(b), we can get the extra information that the purple cluster is far away to the others. If we introduce a small mean shift to the data, moving the distribution along each dimension by 1, the resulting feature maps differ significantly in robustness. As illustrated in Figure 1(d) vs. (e), the standard SimCLR are much less robust to OOD shifts and the resulting classification accuracy degrades to only $48.4\%$ , while that for the modified SimCLR remains $100\%$ . The same OOD advantage can also be verified in the CIFAR-10 to CIFAR-100 OOD generalization case (details in Appendix C.3 Figure C.8) and large-scale real-world scenarios with MoCo (Chen et al., 2020b) as baseline (details in Section 5). + +# 4 IMPROVING SSCL BY SNE + +The proposed SNE perspective (S1,S2) can inspire various modifications to existing SSCL methods. In this section, we choose SimCLR as our baseline and investigate three straightforward modifications. For empirical evaluation, we report the test classification accuracy of nearest neighbor classifiers on both simulated data and real datasets. Experiment details can be found in Appendix C. + +# 4.1 WEIGHTED POSITIVE PAIRS + +In practice, positive pairs are constructed from anchors (training data), by i.i.d. data augmentations, e.g., random resized crop, random horizontal flip, color jitter, etc. Take random crop as an example, pair 1 and 2 may be from $30\%$ , $80\%$ random crops, respectively. Their similarities should not be treated + +![](images/abeffeeae615bb59c1a7505b0ba33952441e268232c272f589bd8ba4b5f1c065.jpg) +(a) Weighted SimCLR. + +![](images/10129a71b1f7ed9a9454c7fe19cd191e125990057078c7774a222fa6d8368813.jpg) +(b) SimCLR vs. $t$ -SimCLR. +Figure 2: Nearest neighbor classification test accuracy on CIFAR-10 with ResNet-18 after 200 epochs pre-training. (a) $N / A$ stands for the baseline SimCLR. The $x$ -axis is the temperature for IoU weighting scheme. (b) Comparison between SimCLR and $t$ -SimCLR with different feature dimensions. + +as equal, as in typical SSCL methods. Incorporating the disparity in the data augmentation process is straightforward in the perspective of SNE, where the InfoNCE loss can be naturally modified as + +$$ +\frac {1}{2 n} \sum_ {i = 1} ^ {n} p _ {i i ^ {\prime}} \cdot \left(l \left(\boldsymbol {x} _ {i}, \boldsymbol {x} _ {i} ^ {\prime}\right) + l \left(\boldsymbol {x} _ {i} ^ {\prime}, \boldsymbol {x} _ {i}\right)\right). +$$ + +The weight $p_{ii'}$ in $P$ can be specified manually to reflect human's prior knowledge. To test out the effect of such modification, we conduct numerical experiments on CIFAR-10 using the standard SimCLR. The weighting scheme is based on the Intersection over Union (IoU) of random resized crops. For each positive pair, let $p_{ii'} \propto \exp(\mathrm{IoU}(\boldsymbol{x}_i, \boldsymbol{x}_i') / \tau')$ , where $\tau' > 0$ is a hyperparameter (temperature) controlling the strength of the weighting scheme, i.e., the bigger the $\tau'$ , the closer to the unweighted state. The CIFAR-10 test performance vs. $\tau'$ is shown in Figure 2(a). The baseline is $80.7\%$ and can be significantly improved to $82.1\%$ if choosing $\tau' = 1$ . + +# 4.2 T-SIMCLR: $t$ -SNE STYLE MATCHING + +Most SSCL algorithms differ mainly in (S2), i.e., defining $Q$ and matching it to $P$ , where fruitful results in SNE literature can be mirrored and applied. Now that we have identified the advantage of modeling features in Euclidean spaces in Section 3.1.4, the most promising modification that follows is to introduce $t$ -SNE to SimCLR. Since we are learning low-dimensional features from high-dimensional data, preserving all pairwise similarities is impossible and the features tend to collapse. This is referred to as the "crowding problem" in Van der Maaten & Hinton (2008) (see Section 3.2 therein). $t$ -SNE utilizes the heavy-tail $t$ -distribution instead of the light-tail Gaussian, to model $Q$ and encourage separation in feature space. Correspondingly, the training objective $L_{\mathrm{InfoNCE}}$ can be modified as + +$$ +\frac {1}{n} \sum_ {i = 1} ^ {n} - \log \frac {\left(1 + \| f \left(\boldsymbol {x} _ {i}\right) - f \left(\boldsymbol {x} _ {i} ^ {\prime}\right) \| _ {2} ^ {2} / \left(\tau t _ {d f}\right)\right) ^ {- \left(t _ {d f} + 1\right) / 2}}{\sum_ {1 \leq j \neq k \leq 2 n} \left(1 + \| f (\widetilde {\boldsymbol {x}} _ {j}) - f (\widetilde {\boldsymbol {x}} _ {k}) \| _ {2} ^ {2} / \left(\tau t _ {d f}\right)\right) ^ {- \left(t _ {d f} + 1\right) / 2}}, \tag {4.1} +$$ + +where $t_{df}$ is the degree of freedom for the $t$ -distribution. Besides substituting the cosine similarity to the $l_2$ distance, the key modification is the modeling of feature space similarity $Q$ , from Gaussian to $t$ -distribution as suggested by Van der Maaten & Hinton (2008) to avoid the crowding problem and accommodate the dimension-deficiency in the feature space. We call the modified method $t$ -SimCLR and we expect it to work better, especially when the feature dimension is low, or in the OOD case. + +Figure 2(b) shows the comparison between SimCLR and $t$ -SimCLR on CIFAR-10 with different feature dimensions, where $t$ -SimCLR has significant advantages in all cases and the smaller the $d_{z}$ , the larger the gap. Without decreasing the standard $d_{z} = 128$ , $t$ -SimCLR improves the baseline from $80.8\%$ to $83.9\%$ and even beats it using only $d_{z} = 8$ with accuracy $81.7\%$ . + +Remark 4.1 (Degree of freedom). Standard $t$ -SNE utilizes $t$ -distribution with $t_{df} = 1$ , to better accommodate the extreme $d_z = 2$ case. In practice, $t_{df}$ can vary and as $d_z$ increases, larger $t_{df}$ might be preferred. We recommend using $t_{df} = 5$ as the default choice. The performance of $t_{df}$ vs $d_z$ can be found in Appendix C, as well as discussion on the fundamental difference between $t_{df}$ and $\tau$ . + +Remark 4.2 (Training epochs). For the CIFAR-10 experiments, we reported the results of ResNet-18 after 200 training epochs, similar to the setting of Yeh et al. (2021). We also conducted 1000-epoch experiments and found that our modifications provide consistent improvements throughout the training process, not in terms of speeding up the convergence, but converging to better solutions. Details can be found in Appendix C.1 and Figure C.6. + +Table 1: Domain transfer results of vanilla MoCo-v2 and $t$ -MoCo-v2. + +
MethodAircraftBirdsnapCaltech101CarsCIFAR10CIFAR100DTDPetsSUN397Avg.
MoCo-v282.7544.5383.3185.2495.8172.7571.2286.7056.0575.37
t-MoCo-v282.7853.4686.8186.1796.0478.3269.2087.9559.3077.78
+ +Table 2: OOD accuracies of vanilla MoCo-v2 and $t$ -MoCo-v2 on domain generalization benchmarks. + +
MethodPACSVLCSOffice-HomeAvg.
MoCo-v258.570.436.655.2
t-MoCo-v261.375.142.159.5
+ +# 5 LARGE SCALE EXPERIMENTS + +In this section, we apply the same modifications proposed in Section 4.2 to MoCo-v2 (Chen et al., 2020b), as it is more device-friendly to conduct large scale experiments. We name our model $t$ -MoCo-v2. Both models are pre-trained for 200 epochs on ImageNet following the setting of Chen et al. (2020b). The linear probing accuracy of $t$ -MoCo-v2 on ImageNet is $67.0\%$ , which is comparable to the MoCo result $67.5\%$ . With the same level of in-distribution classification accuracy, we conduct extensive experiments to compare their OOD performance. The results in Table 1 and 2 suggest that our modification significantly improves the domain transfer and the OOD generalization ability without sacrificing in-distribution accuracy. + +Domain Transfer. We first conduct experiments on the traditional self-supervision domain transfer benchmark. We compare MoCo-v2 and $t$ -MoCo-v2 on Aircraft, Birdsnap, Caltech101, Cars, CIFAR10, CIFAR100, DTD, Pets, and SUN397. We follow transfer settings in Ericsson et al. (2021) to finetune the pre-trained models. The results are reported in Table 1. Our model $t$ -MoCo-v2 surpasses MoCo-v2 in 8 out of 9 datasets, showing a significantly stronger transfer ability. Notice that our model is pre-trained with 200 epochs, surprisingly, compared with the original MoCo-v2 model pre-trained with 800 epochs, the fine-tuning results of $t$ -MoCo-v2 are still better on Birdsnap, Caltech101, CIFAR100, and SUN397. + +Out-of-domain generalization. As illustrated in Section 3.1.4, standard SSCL methods, e.g., SimCLR, MoCo, etc., could suffer from OOD shift. To demonstrate the advantage of our modification, we investigate the effectiveness of our method on OOD generalization benchmarks: PACS Li et al. (2017), VLCS Fang et al. (2013), Office-Home Venkateswara et al. (2017). We follow the standard way to conduct the experiment, i.e., choosing one domain as the test domain and using the remaining domains as training domains, which is named the leave-one-domain-out protocol. As can be seen in Table 2, our $t$ -MoCo-v2 indicates significant improvement over MoCo-v2. Both experiments indicate our modification exhibits substantial enhancement for domain transfer and OOD generalization ability. Similar to domain transfer scenario, compared with the original MoCo-v2 model pre-trained with 800 epochs, $t$ -MoCo-v2 is better on all of the three datasets. More experiment details, including detailed comparisons, are in Appendix C. + +# 6 DISCUSSION + +This work proposes a novel perspective that interprets SSCL methods as a type of SNE methods, which facilitates both deeper theoretical understandings and methodological guidelines for practical improvement. More interpretations of SSCL from preserving the distance between distributions can be found in Appendix B. Our analysis has limitations and the insights from SNE are not universally applicable for all SSCL methods, e.g., Zbontar et al. (2021); Yang et al. (2021) don't fit in our framework. However, this work is an interesting addition to existing theoretical works of SSCL and more investigations can be made along this path. While there are various extensions of the classic SNE, in this work, as a proof of concept, we mainly showcased practical improvements from $t$ -SNE. We expect more modifications can be developed by borrowing advances in the SNE literature, e.g., changing to $f$ -divergences (Im et al., 2018) or consider optimal transport Bunne et al. (2019); Salmona et al. (2021); Mialon et al. (2020). On the other hand, standard SNE methods can also borrow existing techniques in SSCL to improve their performance on more complicated data, e.g., incorporating data augmentations instead of or on top of pre-defined distances. In this sense, by choosing feature dimension to be 2, various SSCL methods can also be used as data visualization tools (Böhm et al., 2022; Damrich et al., 2022). Specifically on CIFAR-10, standard $t$ -SNE can barely reveal any clusters while our $t$ -SimCLR with $d_z = 2$ produces much more separation among different labels. More details can be found in Appendix C.7. + +# REFERENCES + +Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019. +Sanjeev Arora, Wei Hu, and Pravesh K Kothari. An analysis of the t-sne algorithm for data visualization. In Conference On Learning Theory, pp. 1455-1462. PMLR, 2018. +Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. arXiv preprint arXiv:1902.09229, 2019. +Shahar Azulay, Edward Moroshko, Mor Shpigel Nacson, Blake E Woodworth, Nathan Srebro, Amir Globerson, and Daniel Soudry. On the implicit bias of initialization shape: Beyond infinitesimal mirror descent. In International Conference on Machine Learning, pp. 468-477. PMLR, 2021. +Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. In Advances in Neural Information Processing Systems, pp. 15535-15545, 2019. +Haoyue Bai, Rui Sun, Lanqing Hong, Fengwei Zhou, Nanyang Ye, Han-Jia Ye, S-H Gary Chan, and Zhenguo Li. Decaug: Out-of-distribution generalization via decomposed feature representation and semantic augmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 6705-6713, 2021. +Randall Balestriero and Yann LeCun. Contrastive and non-contrastive self-supervised learning recover global and local spectral embedding methods. arXiv preprint arXiv:2205.11508, 2022. +Jan Niklas Böhm, Philipp Berens, and Dmitry Kobak. Unsupervised visualization of image datasets using contrastive learning. arXiv preprint arXiv:2210.09879, 2022. +Charlotte Bunne, David Alvarez-Melis, Andreas Krause, and Stefanie Jegelka. Learning generative models across incomparable spaces. In International conference on machine learning, pp. 851-861. PMLR, 2019. +T Tony Cai and Rong Ma. Theoretical foundations of t-sne for visualizing high-dimensional clustered data. arXiv preprint arXiv:2105.07536, 2021. +Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. In Advances in Neural Information Processing Systems, 2020. +Kai Chen, Lanqing Hong, Hang Xu, Zhenguo Li, and Dit-Yan Yeung. Multisiam: Self-supervised multi-instance siamese representation learning for autonomous driving. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7546-7554, 2021. +Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. arXiv preprint arXiv:2002.05709, 2020a. +Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750-15758, 2021. +Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b. +Sebastian Damrich, Niklas Böhm, Fred A Hamprecht, and Dmitry Kobak. From $t$ -sne to umap with contrastive learning. In International Conference on Learning Representations, 2022. +Qishi Dong, Awais Muhammad, Fengwei Zhou, Chuanlong Xie, Tianyang Hu, Yongxin Yang, Sung-Ho Bae, and Zhenguo Li. Zood: Exploiting model zoo for out-of-distribution generalization. arXiv preprint arXiv:2210.09236, 2022. +T Erber and GM Hockney. Equilibrium configurations of n equal charges on a sphere. Journal of Physics A: Mathematical and General, 24(23):L1369, 1991. + +Linus Ericsson, Henry Gouk, and Timothy M Hospedales. How well do self-supervised models transfer? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5414-5423, 2021. +Chen Fang, Ye Xu, and Daniel N. Rockmore. Unbiased metric learning: On the utilization of multiple datasets and web images for softening bias. 2013 IEEE International Conference on Computer Vision, pp. 1657-1664, 2013. +Hongchao Fang, Sicheng Wang, Meng Zhou, Jiayuan Ding, and Pengtao Xie. Cert: Contrastive self-supervised learning for language understanding. arXiv preprint arXiv:2005.12766, 2020. +Tianyu Gao, Xingcheng Yao, and Danqi Chen. Simcse: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021. +John M Giorgi, Osvald Nitski, Gary D Bader, and Bo Wang. Declutr: Deep contrastive learning for unsupervised textual representations. arXiv preprint arXiv:2006.03659, 2020. +Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733, 2020. +Ishaan Gulrajani and David Lopez-Paz. In search of lost domain generalization. In International Conference on Learning Representations, 2021. +Thomas Hamm and Ingo Steinwart. Adaptive learning rates for support vector machines working on data with low intrinsic dimension. The Annals of Statistics, 49(6):3153-3180, 2021. +Jeff Z HaoChen, Colin Wei, Adrien Gaidon, and Tengyu Ma. Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34, 2021. +Jeff Z HaoChen, Colin Wei, Ananya Kumar, and Tengyu Ma. Beyond separability: Analyzing the linear transferability of contrastive representations to related subpopulations. arXiv preprint arXiv:2204.02683, 2022. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016. +Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9729-9738, 2020a. +Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377, 2021. +Yue He, Zheyan Shen, and Peng Cui. Towards non-iid image classification: A dataset and baselines. Pattern Recognition, pp. 107383, 2020b. +Geoffrey Hinton and Sam T Roweis. Stochastic neighbor embedding. In NIPS, volume 15, pp. 833-840. CiteSeer, 2002. +Geoffrey E. Hinton, Simon Osindero, and Yee Whye Teh. A fast learning algorithm for deep belief nets. Neural Computation, 18:1527-1554, 2006. +R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670, 2018. +Weiran Huang, Mingyang Yi, and Xuyang Zhao. Towards the generalization of contrastive self-supervised learning. arXiv preprint arXiv:2111.00743, 2021. +Daniel Jiwoong Im, Nakul Verma, and Kristin Branson. Stochastic neighbor embedding under f-divergences. arXiv preprint arXiv:1811.01247, 2018. + +Wenlong Ji, Zhun Deng, Ryumei Nakada, James Zou, and Linjun Zhang. The power of contrast for feature learning: A theoretical analysis. arXiv preprint arXiv:2110.02473, 2021. +Li Jing, Pascal Vincent, Yann LeCun, and Yuandong Tian. Understanding dimensional collapse in contrastive self-supervised learning. arXiv preprint arXiv:2110.09348, 2021. +Alex Krizhevsky. Learning multiple layers of features from tiny images. University of Toronto, 2009. +David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In International Conference on Machine Learning, pp. 5815-5826. PMLR, 2021. +Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy M. Hospedales. Deeper, broader and artier domain generalization. 2017 IEEE International Conference on Computer Vision (ICCV), pp. 5543-5551, 2017. +Yunfan Li, Peng Hu, Zitao Liu, Dezhong Peng, Joey Tianyi Zhou, and Xi Peng. Contrastive clustering. In 2021 AAAI Conference on Artificial Intelligence (AAAI), 2021. +Zengyi Li, Yubei Chen, Yann LeCun, and Friedrich T. Sommer. Neural manifold clustering and embedding. ArXiv, abs/2201.10000, 2022. +George C Linderman and Stefan Steinerberger. Clustering with t-sne, provably. SIAM Journal on Mathematics of Data Science, 1(2):313-332, 2019. +Zhili Liu, Jianhua Han, Kai Chen, Lanqing Hong, Hang Xu, Chunjing Xu, and Zhenguo Li. Task-customized self-supervised pre-training with scalable dynamic routing. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 1854-1862, 2022. +Yao Lu, Jukka Corander, and Zhirong Yang. Doubly stochastic neighbor embedding on spheres. Pattern Recognition Letters, 128:100-106, 2019. +Jiajun Ma, Tianyang Hu, and Wenjia Wang. Deciphering the projection head: Representation evaluation self-supervised learning. arXiv preprint arXiv:2301.12189, 2023. +JBM Melisseneny. How different can colours be? maximum separation of points on a spherical octant. Proceedings of the Royal Society of London. Series A: Mathematical, Physical and Engineering Sciences, 454(1973):1499-1508, 1998. +Facundo Memoli. Gromov-wasserstein distances and the metric approach to object matching. Foundations of computational mathematics, 11(4):417-487, 2011. +Grégoire Mialon, Dexiong Chen, Alexandre d'Aspremont, and Julien Mairal. A trainable optimal transport embedding for feature aggregation. In International Conference on Learning Representations (ICLR), 2020. +Edward Moroshko, Blake E Woodworth, Suriya Gunasekar, Jason D Lee, Nati Srebro, and Daniel Soudry. Implicit bias in deep linear classification: Initialization scale vs training accuracy. Advances in neural information processing systems, 33:22182-22193, 2020. +Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021. +Noam Razin and Nadav Cohen. Implicit regularization in deep learning may not be explainable by norms. Advances in neural information processing systems, 33:21174-21187, 2020. +Antoine Salmona, Julie Delon, and Agnès Desolneux. Gromov-wasserstein distances between gaussian distributions. arXiv preprint arXiv:2104.07970, 2021. + +Nikunj Saunshi, Jordan Ash, Surbhi Goel, Dipendra Misra, Cyril Zhang, Sanjeev Arora, Sham Kakade, and Akshay Krishnamurthy. Understanding contrastive learning requires incorporating inductive biases. arXiv preprint arXiv:2202.14037, 2022. +Pieter Merkus Lambertus Tammes. On the origin of number and arrangement of the places of exit on the surface of pollen-grains. Recueil des travaux botaniques nederlandais, 27(1):1-84, 1930. +Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. arXiv preprint arXiv:1906.05849, 2019. +Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? arXiv preprint arXiv:2005.10243, 2020. +Christopher Tosh, Akshay Krishnamurthy, and Daniel Hsu. Contrastive learning, multi-view redundancy, and linear models. arXiv preprint arXiv:2008.10150, 2020. +Laurens Van Der Maaten. Learning a parametric embedding by preserving local structure. In Artificial intelligence and statistics, pp. 384-391. PMLR, 2009. +Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008. +Hemanth Venkateswara, Jose Eusebio, Shayok Chakraborty, and Sethuraman Panchanathan. Deep hashing network for unsupervised domain adaptation. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5385-5394, 2017. +Vikas Verma, Thang Luong, Kenji Kawaguchi, Hieu Pham, and Quoc Le. Towards domain-agnostic contrastive learning. In International Conference on Machine Learning, pp. 10530–10541. PMLR, 2021. +Haonan Wang, Jieyu Zhang, Qi Zhu, and Wei Huang. Augmentation-free graph contrastive learning. arXiv preprint arXiv:2204.04874, 2022. +Tongzhou Wang and Phillip Isola. Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In International Conference on Machine Learning, pp. 9929-9939. PMLR, 2020. +Colin Wei, Kendrick Shen, Yining Chen, and Tengyu Ma. Theoretical analysis of self-training with deep networks on unlabeled data. arXiv preprint arXiv:2010.03622, 2020. +Zixin Wen and Yuanzhi Li. Toward understanding the feature learning process of self-supervised contrastive learning. arXiv preprint arXiv:2105.15134, 2021. +Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3733-3742, 2018. +Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian Khabsa, Fei Sun, and Hao Ma. Clear: Contrastive learning for sentence representation. arXiv preprint arXiv:2012.15466, 2020. +Yuanmeng Yan, Rumei Li, Sirui Wang, Fuzheng Zhang, Wei Wu, and Weiran Xu. Consert: A contrastive framework for self-supervised sentence representation transfer. arXiv preprint arXiv:2105.11741, 2021. +Ceyuan Yang, Zhirong Wu, Bolei Zhou, and Stephen Lin. Instance localization for self-supervised detection pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3987-3996, 2021. +Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. Filip: Fine-grained interactive language-image pre-training. arXiv preprint arXiv:2111.07783, 2021. +Chun-Hsiao Yeh, Cheng-Yao Hong, Yen-Chi Hsu, Tyng-Luh Liu, Yubei Chen, and Yann LeCun. Decoupled contrastive learning. arXiv preprint arXiv:2110.06848, 2021. + +Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. arXiv preprint arXiv:2103.03230, 2021. +Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning (still) requires rethinking generalization. Communications of the ACM, 64(3):107-115, 2021. +Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. arXiv preprint arXiv:1710.09412, 2017. +Xuyang Zhao, Tianqi Du, Yisen Wang, Jun Yao, and Weiran Huang. Arcl: Enhancing contrastive learning with augmentation-robust representations. In International Conference on Learning Representations (ICLR), 2023. +Roland S Zimmermann, Yash Sharma, Steffen Schneider, Matthias Bethge, and Wieland Brendel. Contrastive learning inverts the data generating process. arXiv preprint arXiv:2102.08850, 2021. + +![](images/c82662cc8e0904e93a0be15d4fd0d1b398b68fec4cc2d96c32fe8c78a7dabacf.jpg) +Figure A.3: Cosine similarity heat map of learned features from SimCLR on CIFAR-10 dataset. The darker the color, the larger the similarity. + +# A TECHNICAL DETAILS + +# A.1 IMPLICITBIASOFSIMCLRONCIFAR-10. + +Figure A.3 plots the cosine similarity heat map of learned features from SimCLR on CIFAR-10 dataset. To calculate the similarity of class A (figures denoted by $a_i$ ) to class B (figures denoted by $b_i$ ), we first calculate the mean of $b_i$ as $\bar{b}$ . Then, we sum up $\sum_{i} \sin(a_i, \bar{b})$ and plot is with colors. Hence, the similarity matrix shown in Figure A.3 is not symmetric. + +# A.2 PROOF OF PROPOSITION 3.2 + +Recall the domain-agnostic data augmentation process. For any $\boldsymbol{x}_i$ , the probability density of having $t \in \mathbb{R}^d$ as its augmented point can be characterized as + +$$ +P _ {\boldsymbol {t} | \boldsymbol {x} _ {i}} = \mathbb {P} (\boldsymbol {x} _ {i} \text {a n d} \boldsymbol {x} _ {i} ^ {\prime} = \boldsymbol {t} \text {f o r a p o s i t i v e p a i r} | \boldsymbol {x} _ {i}) = \phi (\boldsymbol {t} - \boldsymbol {x} _ {i}). +$$ + +For isotropic Gaussian densities with mean 0 and covariance matrix $\sigma^2\mathbf{I}$ , $\phi (\pmb {t} - \pmb {x}_i)\propto \exp (-\| \pmb {t} - \pmb {x}_i\| _2^2 /2\sigma^2)$ , which is monotonic with the $l_{2}$ distance between $\pmb{t}$ and $\pmb{x}_i$ + +# A.3 INVESTIGATIONS ON $C(f)$ + +Figures A.4 and A.5 illustrate the evolution of different complexity measurements during the training process under the Gaussian mixture setting and the CIFAR-10 respectively. + +In the Gaussian mixture setting, the feature extractor is a fully connected ReLU network. Besides $C(f)$ , we also evaluate the popular sum of squared weights. The observations on SimCLR are listed as below: + +- The expected Lipschitz constant $C(f)$ is small in initialization. It first increases (till around 100 iterations) and then consistently decreases. This empirically supports the implicit bias towards minimizing $C(f)$ . +- $C(f)$ and the sum of squared weights share very similar patterns. +- The SNE loss is non-increasing, as if we are doing stochastic neighbor embedding using $l_{2}$ -distance. + +In the CIFAR-10 case, the feature extractor is ResNet-18 plus a fully-connected projection layer. The output from ResNet-18 is usually called representation (512 dimensional) and is utilized for downstream tasks while the projection (128 dimension) is used for training. Such a representation-projection set up is common in SSCL. Ma et al. (2023) aimed to decipher the projection head and revealed that the projection feature tends to be more uniformly distributed while the representation feature exhibits stronger alignment. Besides $C(f)$ , we also evaluate the $l_{2}$ -norm of the representation. The observations for SimCLR and $t$ -SimCLR on CIFAR-10 are summarized as below: + +![](images/ee2247776106f773818e3694de0b475733f5a68626668795dd121ba375cda8e7.jpg) +Figure A.4: Empirical evaluation on the complexity of the learned feature mapping during training under the Gaussian mixture setting. Two complexity measurements are considered, i.e., $C(f)$ as in (3.3) and the SNE loss as in (2.2). The SNE loss here only serves as in indicator for how well the pairwise distances are preserved. The training objective is the standard InfoNCE loss. The SNE loss decreases quickly until in the first 100 iterations and then stays flat. + +- $C(f)$ for the projection layer shares similar patterns as in the Gaussian mixture case, first increase and then decreases. However, $C(f)$ for the representation layer monotonically decreases. +- $C(f)$ for the projection layer and the $l_{2}$ -norm in the representation layer share almost identical patterns. +- Comparing SimCLR, both the calculated $C(f)$ and $l_{2}$ -norm are much smaller for $t$ -SimCLR. + +In conclusion, on one hand, our empirical results demonstrate that the complexity of the feature extractor $C(f)$ does decrease during training and seem to be implicitly minimized. On the other hand, its trend is shared with other more popularly used complexity measurements. + +# A.4 PROOF OF COROLLARY 3.6 + +In this section, we illustrate with rigor how the hypothesized implicit bias can give rise to structure-preserving property of SSCL. Corollary 3.6 states that minimizing the (Lipschitz) complexity of the feature mapping will also result in the best match between $P$ and $Q$ (under permutation). To provide more theoretical insight, we present the following lemma in the simpler vector-matching case. + +Lemma A.1. Let $0 < x_{1} < \dots < x_{m}$ and $0 < y_{1} < \dots < y_{m}$ be two real-valued sequences, normalized such that $\sum_{i=1}^{m} x_{i}^{2} = \sum_{i=1}^{m} y_{i}^{2} = 1$ . Consider a permutation $\pi$ of $\{1, \dots, m\}$ and denote all such permutations as $T$ . Then + +$$ +\underset {\pi \in T} {\operatorname {a r g m i n}} \sum_ {i = 1} ^ {m} \frac {y _ {\pi (i)}}{x _ {i}} = \underset {\pi \in T} {\operatorname {a r g m i n}} \sum_ {i = 1} ^ {m} \left(x _ {i} - y _ {\pi (i)}\right) ^ {2} := \pi^ {*}, +$$ + +where $\pi^{*}(i) = i$ for all $i = 1,\dots ,m$ + +Proof. By the rearrangement inequality, we have + +$$ +\sum_ {i = 1} ^ {m} \frac {y _ {\pi (i)}}{x _ {i}} \geq \sum_ {i = 1} ^ {m} \frac {y _ {i}}{x _ {i}}. +$$ + +Similarly, + +$$ +\sum_ {i = 1} ^ {m} \left(x _ {i} - y _ {\pi (i)}\right) ^ {2} = \sum_ {i = 1} ^ {m} x _ {i} ^ {2} + \sum_ {i = 1} ^ {m} y _ {i} ^ {2} - 2 \sum_ {i = 1} ^ {m} x _ {i} \cdot y _ {\pi (i)} \geq 2 - 2 \sum_ {i = 1} ^ {m} x _ {i} \cdot y _ {i}. +$$ + +![](images/0ea42d2746acd11d67fc60fd568b58c37e8b9f9e8f6c2abc3e15cb4d9e33226e.jpg) +(a) SimCLR on CIFAR-10. + +![](images/ddf39eecf32ac6ecfeefc56f7138a32319b57f38df1543347a566b8405e5afbf.jpg) +(b) $t$ -SimCLR on CIFAR-10. + +![](images/a415ec116f011833886b5c2061399667a41ae2137ad8d707a995d43e79618e66.jpg) +Figure A.5: Empirical evaluation on the complexity of the learned feature mapping during training on CIFAR-10. Two complexity measurements are considered, i.e., $C(f)$ as in (3.3) and $l_{2}$ -norm. Specifically, we calculate the expected Lipschitz constant on both the representation layer (512-dimensional) and the projection layer (128-dimensional). Figure (a) and (b) show the trends (along the 200 training epochs) for SimCLR and $t$ -SimCLR respectively. + +Lemma A.1 gives a vector-version illustration of our Corollary 3.6, stating that minimizing the expected derivative (to zero) of the mapping function $f$ , i.e., $\sum_{i}f(x_{i}) / x_{1}$ leads to preserving the norm difference of the input vector and output vector. + +Next, we provide the proof of Theorem 3.5. + +Proof of Theorem 3.5. Straightforwardly, we can write + +$$ +\begin{array}{l} \left\| \bar {P} - Q ^ {\pi} \right\| _ {F} = \sum_ {i \neq j} \left(\frac {1}{p _ {i j}} + q _ {\pi (i) \pi (j)}\right) ^ {2} \\ = \sum_ {i \neq j} \frac {1}{p _ {i j} ^ {2}} + \sum_ {i \neq j} q _ {\pi (i) \pi (j)} ^ {2} + 2 \sum_ {i \neq j} \frac {q _ {\pi (i) \pi (j)}}{p _ {i j}} \\ = 2 C _ {1} (P, Q ^ {\pi}) + \sum_ {i \neq j} \frac {1}{p _ {i j} ^ {2}} + \sum_ {i \neq j} q _ {i j} ^ {2} \\ \end{array} +$$ + +Thus, minimizing $C_1(P, Q^\pi)$ also minimizes $\| \bar{P} - Q^\pi \|_F$ . + +Theorem 3.5 is a straightforward generalization of Lemma A.1. Next, we provide proof for Corollary 3.6, restated below. + +Proof of Corollary 3.6. Recall the SimCLR loss $L_{\mathrm{InfoNCE}} = \frac{1}{2n}\sum_{i = 1}^{n}(l(\pmb{x}_i,\pmb{x}_i')) + l(\pmb{x}_i',\pmb{x}_i))$ , where + +$$ +l (\pmb {x} _ {i}, \pmb {x} _ {i} ^ {\prime}) = - \log \frac {\exp (\mathrm {s i m} (f (\pmb {x} _ {i}) , f (\pmb {x} _ {i} ^ {\prime})) / \tau)}{\sum_ {x \in \mathcal {D} _ {n} \cup \mathcal {D} _ {n} ^ {\prime} \setminus \{\pmb {x} _ {i} \}} \exp (\mathrm {s i m} (f (\pmb {x} _ {i}) , f (\pmb {x})) / \tau)}. +$$ + +Without loss of generality, let $\tau = 1$ . Notice that $l(\pmb{x}_i, \pmb{x}_i')$ is monotonically decreasing as $\mathrm{sim}(f(\pmb{x}_i), f(\pmb{x}_i'))$ increases, due to the monotonicity of function $\frac{x}{x + c}$ with respect to $x > 0$ for any $c > 0$ . Hence, in order for $L_{\mathrm{InfoNCE}}$ to be minimized, perfect alignment is required, i.e., $f(\pmb{x}_i) = f(\pmb{x}_i')$ for any $i = 1, \dots, n$ . + +With perfect alignment achieved, $L_{\mathrm{InfoNCE}}$ only concerns the pairwise similarity between negative samples $f(\pmb{x}_i)$ 's, which can be simplified as $L_{\mathrm{InfoNCE}} \geq L_{\mathrm{uniform}}$ where + +$$ +\begin{array}{l} L _ {\text {u n i f o r m}} = \frac {1}{n} \sum_ {i = 1} ^ {n} - \log \frac {e}{e + \sum_ {j \neq i} \exp (\sin (f (\boldsymbol {x} _ {i}) , f (\boldsymbol {x} _ {j})))} \\ \geq \log \left(\frac {1}{n} \sum_ {i = 1} ^ {n} \left(1 + \frac {1}{e} \sum_ {j \neq i} \exp (\sin (f (\boldsymbol {x} _ {i}), f (\boldsymbol {x} _ {j})))\right)\right) \\ \geq \log \left(1 + \frac {1}{n \cdot e} \sum_ {1 \leq i \neq j \leq n} \exp (\mathrm {s i m} (f (\pmb {x} _ {i}), f (\pmb {x} _ {j})))\right). \\ \end{array} +$$ + +$L_{\mathrm{uniform}}$ can be minimized by mapping $\pmb{x}_i$ 's as distant as possible, hence the connection to Tammas problem and the uniformity principle. + +With sufficient capacity of the feature mapping $f$ , the SimCLR loss can be minimized to its (empirical) global minima. However, such $f$ is not unique since $L_{\mathrm{InfoNCE}}$ is invariant to permutations of mapping relationships from $x_i$ to $f(x_i)$ . If $f_n^*$ further minimizes $C(f)$ on the sample level, i.e., + +$$ +f_{n}^{*}:= \operatorname *{argmin}_{f}C_{n}(f) = \operatorname *{argmin}_{f}\sum_{1\leq i\neq j\leq n}\frac{\|f(\boldsymbol{x}_{i}) - f(\boldsymbol{x}_{j})\|_{2}}{\|\boldsymbol{x}_{i} - \boldsymbol{x}_{j}\|_{2}}, +$$ + +Then, $f_{n}^{*}$ also solves a type of SNE problem with uniformity constraint (3.4) as stated in Theorem 3.5. To see this, if we define $q_{ij} = -\|f(\pmb{x}_i) - f(\pmb{x}_j)\|_2$ and $p_{ij} = -\|x_i - x_j\|_2$ , which is reasonable since the larger the distance, the smaller the similarity, we can directly apply the results in Theorem 3.5. + +![](images/9e37b82f05edc2eee2998cb2961fe037c95f842d678bb12ca52eb553ca025bb9.jpg) + +Remark A.2. As can be seen from Theorem 3.5 and the proof of Corollary 3.6, we showcase the relationship between minimizing $C(f)$ and structure preserving property by considering a special SNE problem, where the pairwise similarity is not modeled by Gaussian as standard, hence the word "resembling" in Corollary 3.6. Although $q_{ij} = -\| f(\pmb{x}_i) - f(\pmb{x}_j)\|_2$ is unorthodox, it is reasonable since the larger the distance, the smaller the similarity. If we consider the SNE method as in Hinton et al. (2006), our proof does not go through directly and demands more complicated analysis. However, our results are still valid in connecting the complexity of the feature map to the pairwise similarity matching. + +Our statement in Corollary 3.6 requires perfect alignment or perfect uniformity. When the assumptions are not perfectly met, we can still obtain insights for the resulting feature mapping. Alignment and uniformity (Wang & Isola, 2020) is not the whole story of contrastive learning, and our identified structure-preserving property implicitly induced by complexity minimization provides an other angle of the learning process. From this perspective, contrastive learning can be thought of as a combination of alignment and SNE with uniformity constraint. In Figure A.3, while obtaining approximate alignment and uniformity, the feature mapping also preserves the relative relationships of the clusters (labels). + +# A.5 ALIGNMENT AND UNIFORMITY OF T-SIMCLR + +Due to the change of training objective, we may want to reevaluate the properties of the learned feature from $t$ -SimCLR. We will show that alignment still hold while uniformity is changed (to infinity). + +Let us consider a compact region $\Omega \subset \mathbb{R}^d$ and $\pmb{x}_i \in \Omega$ . Let $t$ be the transformation such that the augmented data point $\pmb{x}_i' = t(\pmb{x}_i)$ is still in $\Omega$ . Wang & Isola (2020) showed that the contrastive loss can be decomposed into the alignment loss and the uniformity loss. Zimmermann et al. (2021) further showed that the contrastive loss converges to the cross-entropy between latent distributions, where the underlying latent space is assumed to be uniform, and the positive pairs are specified to be an exponential distribution. In this section, we show a parallel result, which states that in the population level, the $t$ -SNE loss is the cross-entropy between two distributions of generating positive pairs. + +Theorem A.3. Let $H(\cdot, \cdot)$ be the cross entropy between distributions. Let $p(x)$ be the density of $x$ , $p(\cdot | x)$ be the conditional density of generating a positive pair, and define + +$$ +q _ {f} \left(\boldsymbol {x} ^ {\prime} \mid \boldsymbol {x}\right) = C _ {f} (\boldsymbol {x}) ^ {- 1} \frac {p \left(\boldsymbol {x} ^ {\prime}\right)}{1 + \left\| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} ^ {2}}, \text {w i t h} C _ {f} (\boldsymbol {x}) = \int_ {\Omega} \frac {p \left(\boldsymbol {x} ^ {\prime}\right)}{1 + \left\| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} ^ {2}} d \boldsymbol {x} ^ {\prime}. +$$ + +Then, we have + +$$ +\mathbb {E} _ {\boldsymbol {x} \sim p (\boldsymbol {x})} (H (p (\cdot | \boldsymbol {x}), q _ {f} (\cdot | \boldsymbol {x})) = L _ {a} (f) + L _ {u} (f), \tag {A.1} +$$ + +which corresponds to the population-level $t$ -SimCLR loss where + +$$ +L _ {a} = \mathbb {E} _ {\boldsymbol {x} \sim p (\boldsymbol {x})} \mathbb {E} _ {\boldsymbol {x} \sim p \left(\boldsymbol {x} ^ {\prime} \mid \boldsymbol {x}\right)} \log \left(1 + \| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \| _ {2} ^ {2}\right), +$$ + +$$ +L _ {u} = \mathbb {E} _ {\boldsymbol {x} \sim p (\boldsymbol {x})} \log \mathbb {E} _ {\widetilde {\boldsymbol {x}} \sim p (\widetilde {\boldsymbol {x}})} \big (1 + \| f (\boldsymbol {x}) - f (\widetilde {\boldsymbol {x}}) \| _ {2} ^ {2} \big) ^ {- 1}. +$$ + +Proof. Note that + +$$ +\begin{array}{l} H (p (\cdot | \boldsymbol {x}), q _ {f} (\cdot | \boldsymbol {x})) \\ = - \int_ {\Omega} p (\boldsymbol {x} ^ {\prime} | \boldsymbol {x}) \log \left(\frac {p \left(\boldsymbol {x} ^ {\prime}\right)}{1 + \| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \| _ {2} ^ {2}}\right) d \boldsymbol {x} ^ {\prime} + \log C _ {f} (\boldsymbol {x}) \\ = \int_ {\Omega} p (\boldsymbol {x} ^ {\prime} | \boldsymbol {x}) \log (1 + \| f (\boldsymbol {x}) - f (\boldsymbol {x} ^ {\prime}) \| _ {2} ^ {2}) \mathrm {d} \boldsymbol {x} ^ {\prime} - \int_ {\Omega} p (\boldsymbol {x} ^ {\prime} | \boldsymbol {x}) \log (p (\boldsymbol {x} ^ {\prime})) \mathrm {d} \boldsymbol {x} ^ {\prime} + \log \int_ {\Omega} \frac {p (\boldsymbol {x} ^ {\prime})}{1 + \| f (\boldsymbol {x}) - f (\boldsymbol {x} ^ {\prime}) \| _ {2} ^ {2}} \mathrm {d} \boldsymbol {x} ^ {\prime} \\ = \int_ {\Omega} p (\pmb {x} ^ {\prime} | \pmb {x}) \log (1 + \| f (\pmb {x}) - f (\pmb {x} ^ {\prime}) \| _ {2} ^ {2}) \mathrm {d} \pmb {x} ^ {\prime} - \int_ {\Omega} p (\pmb {x} ^ {\prime} | \pmb {x}) \log (p (\pmb {x} ^ {\prime})) \mathrm {d} \pmb {x} ^ {\prime} + \log \mathbb {E} _ {\pmb {x} ^ {\prime} \sim p (\pmb {x} ^ {\prime})} (1 + \| f (\pmb {x}) - f (\pmb {x} ^ {\prime}) \| _ {2} ^ {2}) ^ {- 1}. \\ \end{array} +$$ + +Taking expectation with respect to $x$ leads to + +$$ +\begin{array}{l} \mathbb {E} _ {\boldsymbol {x} \sim p (\boldsymbol {x})} H (p (\cdot | \boldsymbol {x}), q _ {f} (\cdot | \boldsymbol {x})) \\ = \mathbb {E} _ {\boldsymbol {x} \sim p (\boldsymbol {x})} \mathbb {E} _ {\boldsymbol {x} ^ {\prime} \sim p (\boldsymbol {x} ^ {\prime} | \boldsymbol {x})} \log (1 + \| f (\boldsymbol {x}) - f (\boldsymbol {x} ^ {\prime}) \| _ {2} ^ {2}) + \mathbb {E} _ {\boldsymbol {x} \sim p (\boldsymbol {x})} \log \mathbb {E} _ {\widetilde {\boldsymbol {x}} \sim p (\widetilde {\boldsymbol {x}})} (1 + \| f (\boldsymbol {x}) - f (\widetilde {\boldsymbol {x}}) \| _ {2} ^ {2}) ^ {- 1} \\ - \int_ {\Omega} \int_ {\Omega} p (\boldsymbol {x}) p \left(\boldsymbol {x} ^ {\prime} \mid \boldsymbol {x}\right) \log \left(p \left(\boldsymbol {x} ^ {\prime}\right)\right) d \boldsymbol {x} ^ {\prime} d \boldsymbol {x} \\ = L _ {a} (f) + L _ {u} (f) - C _ {p}, \\ \end{array} +$$ + +where + +$$ +C _ {p} = \int_ {\Omega} \int_ {\Omega} p (\boldsymbol {x}) p \left(\boldsymbol {x} ^ {\prime} \mid \boldsymbol {x}\right) \log \left(p \left(\boldsymbol {x} ^ {\prime}\right)\right) \mathrm {d} \boldsymbol {x} ^ {\prime} \mathrm {d} \boldsymbol {x} = \int_ {\Omega} \int_ {\Omega} p \left(\boldsymbol {x}, \boldsymbol {x} ^ {\prime}\right) \log \left(p \left(\boldsymbol {x} ^ {\prime}\right)\right) \mathrm {d} \boldsymbol {x} ^ {\prime} \mathrm {d} \boldsymbol {x} +$$ + +does not depend on $f$ . + +$$ +\begin{array}{l} \mathbb {E} _ {\boldsymbol {x} \sim p (\boldsymbol {x})} H (p (\cdot | \boldsymbol {x}), q _ {f} (\cdot | \boldsymbol {x})) \\ = \int_ {\Omega} p (\boldsymbol {x}) \frac {1}{p (\boldsymbol {x})} \int_ {\Omega} p (\boldsymbol {x}, \boldsymbol {x} ^ {\prime}) \log \left(\frac {p \left(\boldsymbol {x} ^ {\prime}\right)}{1 + \| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \| _ {2} ^ {2}}\right) d \boldsymbol {x} ^ {\prime} d \boldsymbol {x} \\ - \int_ {\Omega} \int_ {\Omega} \frac {p (\boldsymbol {x}) p \left(\boldsymbol {x} ^ {\prime}\right)}{1 + \left\| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} ^ {2}} d \boldsymbol {x} d \boldsymbol {x} ^ {\prime} \\ = \int_ {\Omega} \int_ {\Omega} p (\boldsymbol {x}, \boldsymbol {x} ^ {\prime}) \log \left(\frac {p \left(\boldsymbol {x} ^ {\prime}\right)}{1 + \left\| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} ^ {2}}\right) d \boldsymbol {x} ^ {\prime} d \boldsymbol {x} \\ - \int_ {\Omega} \int_ {\Omega} \frac {p (\boldsymbol {x}) p \left(\boldsymbol {x} ^ {\prime}\right)}{1 + \left\| f (\boldsymbol {x}) - f \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} ^ {2}} d \boldsymbol {x} d \boldsymbol {x} ^ {\prime}. \\ \end{array} +$$ + +![](images/309c05778265ac413422425956d2a2957455019a721cba777fcfa71a94d93a31.jpg) + +In Theorem A.3, $L_{a}$ is the alignment loss and $L_{u}$ is the uniformity loss. The decomposition is much more natural for $t$ -SimCLR as opposed to that in $L_{\mathrm{InfoNCE}}$ , mainly due to the change from conditional to joint distribution when modeling the pairwise similarity. Furthermore, if the $t$ -SimCLR loss is minimized, we must have $p(\cdot | \boldsymbol{x}) = q_{f}(\cdot | \boldsymbol{x})$ , provided $f$ has sufficient capacity. Note that if $p(\cdot | \boldsymbol{x}) = q_{f}(\cdot | \boldsymbol{x})$ , then $P_{j|i}$ and $Q_{j|i}$ are perfectly matched, which indicates that we obtain a perfect neighbor embedding. + +Theorem A.3 implies that the optimal feature mapping $f^{*}$ satisfies + +$$ +p (\cdot | \boldsymbol {x}) = q _ {f ^ {*}} (\cdot | \boldsymbol {x}), +$$ + +which further implies that for any $\pmb{x} \in \Omega$ + +$$ +\begin{array}{l} C _ {f ^ {*}} (\boldsymbol {x}) ^ {- 1} \frac {p (\boldsymbol {x} ^ {\prime})}{1 + \| f ^ {*} (\boldsymbol {x}) - f ^ {*} (\boldsymbol {x} ^ {\prime}) \| _ {2} ^ {2}} \propto C (\boldsymbol {x}) ^ {- 1} p (\boldsymbol {x} ^ {\prime} | \boldsymbol {x}) \\ \Leftrightarrow C _ {f ^ {*}} \left(\boldsymbol {x}\right) ^ {- 1} \frac {1}{1 + \left\| f ^ {*} (\boldsymbol {x}) - f ^ {*} \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} ^ {2}} \propto C (\boldsymbol {x}) ^ {- 1} \frac {p \left(\boldsymbol {x} , \boldsymbol {x} ^ {\prime}\right)}{p (\boldsymbol {x}) p \left(\boldsymbol {x} ^ {\prime}\right)}, \tag {A.2} \\ \end{array} +$$ + +where $C(\pmb{x}) = \int p(\pmb{x}'|\pmb{x})\mathrm{d}\pmb{x}'$ . Unlike the usual normalized SimCLR, $t$ -SNE does not assume any special structure on $f$ (e.g., $\| f\| _2 = 1$ ), thus $f$ can go to infinity. Comparing to the finite sample $t$ -SimCLR loss, the population version is trickier to analyze. This is because for a given point $\pmb{x}'$ , it can be an augmented sample of some $\pmb{x}$ (with probability $p(\pmb{x}'|\pmb{x})$ ), or a negative sample of $\pmb{x}$ (when we treat $\pmb{x}'$ as another sample point). This reflects the essential difficulty between population and finite samples in contrastive learning, not only for $t$ -SimCLR. + +For clustered data, (A.2) provides two important messages, provided that the augmentation is not too extreme and the augmented sample $\pmb{x}^{\prime}$ stays in the same cluster as the original $\pmb{x}$ . On one hand, when $\pmb{x}_1$ and $\pmb{x}_2$ belongs to different clusters, the joint density $p(\pmb{x} = \pmb{x}_1, \pmb{x}^{\prime} = \pmb{x}_2)$ will be very small, close to zero, which indicates that $\| f^{*}(\pmb{x}_{1}) - f^{*}(\pmb{x}_{2}) \|_{2}$ is very large, tending to infinity. On the other hand, for $\pmb{x}_1$ and $\pmb{x}_2$ belonging to the same cluster, $p(\pmb{x} = \pmb{x}_1, \pmb{x}^{\prime} = \pmb{x}_2)$ will be relatively large. Hence, the features of the same cluster will stay close. Overall, we will observe similar clustered structure in the feature space. This is confirmed in the Gaussian mixture setting in Figure 1(c), in which case, the problem can be oversimplified as mapping 5 points in $\mathbb{R}^2$ to the unit-circle. + +# B CONNECTION TO DISTANCE BETWEEN DISTRIBUTIONS + +Through the lens of stochastic neighbor embedding, the feature learning process of SSCL methods can be seen as minimizing certain "distances" between distributions in different dimensions. Ideally, the feature should preserve the distributional information about the data. Since the data and the feature do not lie in the same metric space, quantitatively measuring their distributional distance is difficult. Fortunately, there are existing tools we can utilize, specifically, Gromov-Wasserstein distance (Mémoli, 2011; Salmona et al., 2021). + +Let $\mathcal{X}$ , $\mathcal{Z}$ be two Polish spaces, each endowed respectively with probability measures $p_x$ and $p_z$ . Given two measurable cost functions $c_x: \mathcal{X} \times \mathcal{X} \to \mathbb{R}$ , $c_z: \mathcal{Z} \times \mathcal{Z} \to \mathbb{R}$ , and $D: \mathbb{R} \times \mathbb{R} \to \mathbb{R}$ , the Gromov-Wasserstein distance can be defined as + +$$ +G W _ {p} (p _ {x}, p _ {z} | c _ {x}, c _ {z}) := \left(\inf _ {\pi \in \prod (p _ {x}, p _ {z})} \int_ {\mathcal {X} ^ {2} \times \mathcal {Z} ^ {2}} D (c _ {x} (x, x ^ {\prime}), c _ {z} (z, z ^ {\prime})) ^ {p} d \pi (x, z) d \pi (x ^ {\prime}, z ^ {\prime})\right) ^ {1 / p}, +$$ + +where $\prod(p_x,p_z)$ denotes all the joint distributions in $\mathcal{X}\times \mathcal{Z}$ such that the marginals are $p_x$ and $p_z$ . Typically, $D(c_{x},c_{z})$ is chosen to be $|c_{x} - c_{z}|$ and $c_{x}(x,x^{\prime})$ is usually chosen to be $\| x - x^{\prime}\| _p$ . The key idea of the Gromov-Wasserstein distance to circumvent the dimension mismatch is to change from comparing marginal distribution to pairwise distributions, which is very similar to the SNE objective. Consider Monge's formulation of the optimal transportation problem and let $z = f(x)$ . By choosing $c_{z}(z_{i},z_{j}) = \log (\widetilde{Q}_{j|i})$ with $\widetilde{Q}$ specified as in (3.1), $c_{x}(x_{i},x_{j}) = P_{j|i}$ with $\widetilde{P}$ specified as in (3.2) and letting $D(c_{x},c_{z}) = c_{x}(\log (c_{x}) - \log (c_{z}))$ , we have + +$$ +G W _ {1} \left(p _ {x}, p _ {f (x)}\right) \leq \mathbb {E} _ {x, x ^ {\prime}} \left(D \left(c _ {x} \left(x, x ^ {\prime}\right), c _ {z} \left(f (x), f \left(x ^ {\prime}\right)\right)\right)\right), +$$ + +where the right hand side recovers the expected InfoNCE loss. Hence, the SNE perspective can also be viewed as minimizing the Gromov-Wasserstein distance between $p_z$ and $p_x$ . + +It is worth noting that such an interpretation only relates to contrastive learning, not including generative-based self-supervised learning methods such as Masked AutoEncoder (MAE) (He et al., 2021). + +![](images/004616d71e2f6e1680a34a339787f5e53d64b43daa1aa4d64ce960759a7b7662.jpg) +Figure C.6: Nearest neighbor test accuracy vs. training epochs. SimCLR and $t$ -SimCLR share similar trends and convergence speed. + +![](images/e879bb5c0ecaa514b123a11a38890e1aa2a909864a3cbe9384ea45dbf61f68b4.jpg) +Figure C.7: The histogram of IoUs for 1000 constructed positive pairs in CIFAR-10. The empirical distribution is almost symmetric around 0.5. + +# C EXPERIMENT DETAILS + +# C.1 CIFAR-10 SETTINGS + +CIFAR-10 (Krizhevsky, 2009) is a colorful image dataset with 50000 training samples and 10000 test samples from 10 categories. We use ResNet-18 (He et al., 2016) as the feature extractor, and the other settings such as projection head all follow the original settings of SimCLR (Chen et al., 2020a). To evaluate the quality of the features, we follow the KNN evaluation protocol (Wu et al., 2018). which computes the cosine similarities in the embedding space between the test image and its nearest neighbors, and make the prediction via weighted voting. We train each model with batch size of 256 and 200 epochs for quicker evaluation. For $t$ -SimCLR, without specifying otherwise, we grid search the $t_{df}$ and $\tau$ with range $\{1, 2, 5, 10\}$ and $\{1, 2, 5, 10\}$ respectively. + +Ablation of training epochs We also run the SimCLR and $t$ -SimCLR experiments in the more standard 1000 epochs setting. For SimCLR, we use batch size of 512, learning rate of 0.3, temperature of 0.7, and weight dacay of 0.0001. For $t$ -SimCLR, we use batch size of 512, learning rate of 0.8, temperature of 10, weight dacay of 0.0002, and $t_{df} = 5$ . The nearest neighbor accuracy for SimCLR is $87.2\%$ vs. that for $t$ -SimCLR is $88.8\%$ . + +# C.2 IMAGE AUGMENTATION + +When processing images, several popular augmentations are usually adopted (following the setting in SimCLR Chen et al. (2020a)), e.g., random resized crop (crops a random portion of image and resize it + +![](images/23a127fd23e914b5ea03435dbae0b73c8d6edafe506a41ac6bede17bad9bc060.jpg) +Figure C.8: Extension on Figure 2(b). Nearest neighbor classification accuracy for SimCLR vs. $t$ -SimCLR on both CIFAR-10 (in-distribution) and CIFAR-100 (out-of-distribution) using different feature dimensions. + +to the original size), horizontal flip, color jitter (randomly change the brightness, contrast, saturation and hue of an image). To illustrate the natural weighting scheme in Section 4.1, we considered random resized crop and specifies the weights by the IoU (intersection over union) of the positive pair. In particular, two augmented images are created from an anchor image. Each augmentation crops a rectangular region of the image, denoted by $r_1, r_2$ respectively, and their IoU is defined by the area of intersection $r_1 \cap r_2$ divided by the area of the union $r_1 \cup r_2$ . The IoU is always between 0 and 1. In our experiment, we chose the default settings and Figure C.7 illustrates the IoU histogram of 1000 constructed positive pairs. + +# C.3 DEGREE OF FREEDOM IN $t$ -SIMCLR + +Feature dimension efficiency in OOD case. To further investigate the generalization ability of SSCL methods, we devise a challenging setting where the model is trained on CIFAR-10 and tested on CIFAR-100 classification. In this case, we evaluate the effect of increasing feature dimensions in the projection layer, as an extension on the CIFAR-10 in-distribution case. The results are shown in Figure C.8, where there are two things to note: + +- The gain of extra dimensions in the OOD case does vanish later than that in the in-distribution case. +- The advantage of SimCLR vs. $t$ -SimCLR is very significant with around $10\%$ improvement when $d = 128$ using nearest neighbor classification, indicating that $t$ -SimCLR produces better separated clusters. + +Relationship between $t_{df}$ and $d_z$ . The larger the degree of freedom $t_{df}$ , the less heavy-tail the t-distribution. As $d_z$ decreases, the crowding problem becomes more severe and as recommended by (Van der Maaten & Hinton, 2008), a smaller $t_{df}$ tends to work better. We evaluate the sensitivity of $t_{df}$ (1, 5, 10) under different choices of $d_z$ (1, 2, 4, 8, 16, 32, 64, 128) in CIFAR-10 and the results are reported in Figure C.9. As can be seen, when $d_z$ is small (1, 2, 4, 8), $t_{df} = 1$ outperforms. Comparing $t_{df} = 5$ and $t_{df} = 10$ , the two perform similarly when $d_z$ is large (16, 32, 64, 128) but the smaller $t_{df} = 5$ yields better accuracy when $d_z = 1, 2, 4$ . + +Tuning temperature vs. tuning $t_{df}$ . As illustrated in Section 4.2, when the feature space dimension is low, the heavy-tailed t-distribution is a better choice than Gaussian to alleviate the crowding problem. + +![](images/ff8980fcd9c7131f7be01201d512cac63ded6038971372be41ff5b4f6462a672.jpg) +Figure C.9: Nearest neighbor classification accuracy on CIFAR-10 for $t$ -SimCLR using different feature dimensions and different degrees of freedom (t_df). + +Even though tuning the temperature of $L_{\mathrm{InfoNCE}}$ , i.e., making $\tau$ larger, can also have the effect of making the distribution less concentrated ( $\tau$ can be seen as the standard deviation), tuning temperature and tuning $t_{df}$ are fundamentally different. The former is controlling how fast does the similarity $Q_{i,j}$ decays as the distance between $z_i$ and $z_j$ increases, while the latter serves as a scaling factor, offering constant level modification of the scheme. In our experiments with SimCLR vs $t$ -SimCLR on CIFAR-10, temperature is tuned as a hyperparameter. The difference in $\tau$ can never make up to the difference between the baseline SimCLR and $t$ -SimCLR. We found $\tau = 0.5$ to work better for the base SimCLR while larger $\tau$ works better with our $t$ -SimCLR. We recommend $\tau = 5$ as the default choice. + +# C.4 IMAGENET PRE-TRAINING + +To show the ability for large scale domain transfer and OOD generalization, we conduct experiments on ImageNet pre-training based on MoCo-v2 with its official implementation6. We follow most of their settings, e.g., data augmentation, 200 epochs pre-training, and optimization strategy, etc. The loss is modified according to Section 4.2 and batch normalization is applied along every dimension. We grid search the $t_{df}$ and $\tau$ with range $\{2,5,10,15\}$ and $\{0.2,2,5,10\}$ respectively. Finally we choose $t_{df} = 10$ and $\tau = 5$ to be the optimal hyperparameters. We use this pre-train model as initialization for domain transfer and OOD experiments. + +# C.5 DOMAIN TRANSFER + +We compare MoCo-v2 pre-trained with 800 / 200 epochs and $t$ -MoCo-v2 on Aircraft, Birdsnap, Caltech101, Cars, CIFAR10, CIFAR100, DTD, Pets, and SUN397 in Table C.3. We follow the transfer settings in Ericsson et al. (2021) to finetune the pre-trained models. For datasets Birdsnap, Cars, CIFAR10, CIFAR100, DTD, and SUN397, we report the top-1 accuracy metric, while for Aircraft, Caltech101, and Pets, we report the mean per-class accuracy metric. We also follow Ericsson et al. (2021) to split each dataset into training, validation, and test sets. On each dataset, we perform a hyperparameter search as follows. (1) We choose the initial learning rate according to a grid of 4 logarithmically spaced values between $1 \times 10^{-4}$ and $1 \times 10^{-1}$ ; (2) We choose the weight decay parameter according to a grid of 4 logarithmically spaced values between $1 \times 10^{-6}$ and $1 \times 10^{-3}$ , plus no weight decay; (3) The weight decay values are divided by the learning rate; (4) For each pair of learning rate and weight decay, we finetune the pre-trained model for 5000 steps by SGD with Nesterov momentum 0.9, batch size of 64, and cosine annealing learning rate schedule without restarts. As can be seen in Table C.3, our $t$ -MoCo-v2 with 200 epochs even outperform the baseline with 800 epochs on average. + +# C.6 OOD GENERALIZATION + +To demonstrate the advantage of our modification, we also compare MoCo-v2 pre-trained with 800 / 200 epochs and $t$ -MoCo-v2 on OOD generalization benchmarks: PACS Li et al. (2017), VLCS Fang et al. (2013), Office-Home Venkateswara et al. (2017). We follow the standard way to conduct the + +Table C.3: Domain transfer results of vanilla MoCo-v2 and $t$ -MoCo-v2. + +
MethodAircraftBirdsnapCaltech101CarsCIFAR10CIFAR100DTDPetsSUN397Avg.
MoCo-v2 (800 epochs)83.8045.5183.0186.1896.4271.6971.7089.1155.6175.89
MoCo-v2 (200 epochs)82.7544.5383.3185.2495.8172.7571.2286.7056.0575.37
t-MoCo-v2 (200 epochs)82.7853.4686.8186.1796.0478.3269.2087.9559.3077.78
+ +Table C.4: OOD accuracies of vanilla MoCo-v2 and $t$ -MoCo-v2 on domain generalization benchmarks. + +
MethodPACSVLCSOffice-HomeAvg.
MoCo-v2 (800 epochs)58.969.841.656.8
MoCo-v2 (200 epochs)58.570.436.655.2
t-MoCo-v2 (200 epochs)61.375.142.159.5
+ +experiments, i.e., choosing one domain as the test domain and using the remaining domains as training domains, which is named the leave-one-domain-out protocol. The top linear classifier is trained on the training domains and tested on the test domain. Each domain rotates as the test domain and the average accuracy is reported for each dataset in Table C.4. On each dataset, we perform a hyperparameter search following DomainBed Gulrajani & Lopez-Paz (2021). We adopt the leave-one-domain-out cross-validation setup in DomainBed with 10 experiments for hyperparameter selection and run 3 trials. As can be seen in Table C.4, our $t$ -MoCo-v2 with 200 epochs even significantly outperform the baseline with 800 epochs for all of the three datasets. + +# C.7 SSCL INSPIRED DATA VISUALIZATION + +$t$ -SNE (Van der Maaten & Hinton, 2008) and its variants are designed for data visualization. However, for more complicated data, such as colored images, the results are not satisfactory. Using standard $t$ -SNE, the 2D visualization of the 50K training images of CIFAR-10 (labels denoted as 0, 1,...,9) can be seen in Figure C.10, where different labels are hardly separated. The poor performance of $t$ -SNE on CIFAR-10 can be traced back to the poor distance choice on images, i.e., $l_{2}$ -norm. Inspired by the success of SSCL for natural images, $t$ -SNE can potentially be improved by incorporating data augmentations. + +In light of our perspective (S1), $t$ -SNE can take advantage of the distance specified with (3.2) and the resulting model is essentially our $t$ -SimCLR with feature dimension 2. The visualization from $t$ -SimCLR is shown in Figure C.11, which is much more separated (the nearest neighbor classification accuracy on CIFAR-10 test data is $56.6\%$ ). By choosing the feature dimension to be 2, various SSCL methods can also be made into data visualizing tools. In Figure C.12, we visualize the outcome from SimCLR (the nearest neighbor classification accuracy on CIFAR-10 test data is $24.8\%$ ). + +Similar investigations have been carried in Böhm et al. (2022); Damrich et al. (2022) where they focused specifically on data visualization and stochastic neighbor embedding. + +![](images/76b0fe03ca6ed5762c61cb7bbfe91d9d76e59cf9de5ea811f23a14e8780312cb.jpg) +Figure C.10: 50K CIFAR-10 training images visualization in 2D with $t$ -SNE. + +![](images/a48f050056dc14a63d46ee1df2ed31108696785f0c9a933ec7c530bde5735f84.jpg) +Figure C.11: 50K CIFAR-10 training images visualization in 2D with the default $t$ -SimCLR. + +![](images/6eaae204ef0543ee464baa5c129e4e1211fd5697dae541817624a41e3770b9f8.jpg) +Figure C.12: 50K CIFAR-10 training images visualization in 2D with the SimCLR. \ No newline at end of file diff --git a/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/images.zip b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..b57427f6ef829275e8f8a86f4ef43fe31ca3395c --- /dev/null +++ b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72ce66eb08bc5297886b31fc28e1ad4e370080b7ef81f5b7ae4d826522f7b9d5 +size 789024 diff --git a/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/layout.json b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c7daa234b79ed287c25a04e3ea0c24d0c7b5fdef --- /dev/null +++ b/2023/Your Contrastive Learning Is Secretly Doing Stochastic Neighbor Embedding/layout.json @@ -0,0 +1,22871 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "YOUR CONTRASTIVE LEARNING IS SECRETLY DOING STOCHASTIC NEIGHBOR EMBEDDING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 133, + 447, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 133, + 447, + 146 + ], + "spans": [ + { + "bbox": [ + 111, + 133, + 447, + 146 + ], + "type": "text", + "content": "Tianyang Hu1, Zhili Liu1,2, Fengwei Zhou1, Wenjia Wang2,3, Weiran Huang1,4*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 146, + 424, + 182 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 111, + 146, + 424, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 146, + 424, + 158 + ], + "spans": [ + { + "bbox": [ + 111, + 146, + 424, + 158 + ], + "type": "text", + "content": "1 Huawei Noah's Ark Lab, 2 Hong Kong University of Science and Technology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 158, + 373, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 158, + 373, + 170 + ], + "spans": [ + { + "bbox": [ + 111, + 158, + 373, + 170 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 111, + 158, + 373, + 170 + ], + "type": "text", + "content": " Hong Kong University of Science and Technology (Guangzhou)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 170, + 361, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 170, + 361, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 170, + 361, + 182 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 111, + 170, + 361, + 182 + ], + "type": "text", + "content": " Qing Yuan Research Institute, Shanghai Jiao Tong University" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 276, + 210, + 335, + 223 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 210, + 335, + 223 + ], + "spans": [ + { + "bbox": [ + 276, + 210, + 335, + 223 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 232, + 471, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 232, + 471, + 389 + ], + "spans": [ + { + "bbox": [ + 140, + 232, + 471, + 389 + ], + "type": "text", + "content": "Contrastive learning, especially self-supervised contrastive learning (SSCL), has achieved great success in extracting powerful features from unlabeled data. In this work, we contribute to the theoretical understanding of SSCL and uncover its connection to the classic data visualization method, stochastic neighbor embedding (SNE) (Hinton & Roweis, 2002), whose goal is to preserve pairwise distances. From the perspective of preserving neighboring information, SSCL can be viewed as a special case of SNE with the input space pairwise similarities specified by data augmentation. The established correspondence facilitates deeper theoretical understanding of learned features of SSCL, as well as methodological guidelines for practical improvement. Specifically, through the lens of SNE, we provide novel analysis on domain-agnostic augmentations, implicit bias and robustness of learned features. To illustrate the practical advantage, we demonstrate that the modifications from SNE to " + }, + { + "bbox": [ + 140, + 232, + 471, + 389 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 140, + 232, + 471, + 389 + ], + "type": "text", + "content": "-SNE (Van der Maaten & Hinton, 2008) can also be adopted in the SSCL setting, achieving significant improvement in both in-distribution and out-of-distribution generalization." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 405, + 208, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 405, + 208, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 208, + 417 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 429, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 506, + 529 + ], + "type": "text", + "content": "Recently, contrastive learning, especially self-supervised contrastive learning (SSCL) has drawn massive attention, with many state-of-the-art models following this paradigm in both computer vision (He et al., 2020a; Chen et al., 2020a;b; Grill et al., 2020; Chen & He, 2021; Zbontar et al., 2021) and natural language processing (Fang et al., 2020; Wu et al., 2020; Giorgi et al., 2020; Gao et al., 2021; Yan et al., 2021). In contrast to supervised learning, SSCL learns the representation through a large number of unlabeled data and artificially defined self-supervision signals, i.e., regarding the augmented views of a data sample as positive pairs and randomly sampled data as negative pairs. By enforcing the features of positive pairs to align and those of negative pairs to be distant, SSCL produces discriminative features with state-of-the-art performance for various downstream tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 534, + 507, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 507, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 507, + 624 + ], + "type": "text", + "content": "Despite the empirical success, the theoretical understanding is under-explored as to how the learned features depend on the data and augmentation, how different components in SSCL work and what are the implicit biases when there exist multiple empirical loss minimizers. For instance, SSCL methods are widely adopted for pretraining, whose feature mappings are to be utilized for various downstream tasks which are usually out-of-distribution (OOD). The distribution shift poses great challenges for the feature learning process with extra requirement for robustness and OOD generalization (Arjovsky et al., 2019; Krueger et al., 2021; Bai et al., 2021; He et al., 2020b; Zhao et al., 2023; Dong et al., 2022), which demands deeper understanding of the SSCL methods." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 628, + 506, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 506, + 717 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 506, + 717 + ], + "type": "text", + "content": "The goal of SSCL is to learn the feature representations from data. For this problem, one classic method is SNE (Hinton et al., 2006) and its various extensions. Specially, " + }, + { + "bbox": [ + 104, + 628, + 506, + 717 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 628, + 506, + 717 + ], + "type": "text", + "content": "-SNE (Van der Maaten & Hinton, 2008) has become the go-to choice for low-dimensional data visualization. Comparing to SSCL, SNE is far better explored in terms of theoretical understanding (Arora et al., 2018; Linderman & Steinerberger, 2019; Cai & Ma, 2021). However, its empirical performance is not satisfactory, especially in modern era where data are overly complicated. Both trying to learn feature representations, are there any deep connections between SSCL and SNE? Can SSCL take the advantage of the theoretical soundness of SNE? Can SNE be revived in the modern era by incorporating SSCL?" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 351, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 351, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 351, + 732 + ], + "type": "text", + "content": "*Correspondence to Weiran Huang (weiran.huang@sjtu.edu.cn)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "In this work, we give affirmative answers to the above questions and demonstrate how the connections to SNE can benefit the theoretical understandings of SSCL, as well as provide methodological guidelines for practical improvement. The main contributions are summarized below." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 506, + 299 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 104, + 121, + 504, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 504, + 144 + ], + "type": "text", + "content": "- We propose a novel perspective that interprets SSCL methods as a type of SNE methods with the aim of preserving pairwise similarities specified by the data augmentation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 148, + 506, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 148, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 104, + 148, + 506, + 227 + ], + "type": "text", + "content": "- The discovered connection enables deeper understanding of SSCL methods. We provide novel theoretical insights for domain-agnostic data augmentation, implicit bias and OOD generalization. Specifically, we show isotropic random noise augmentation induces " + }, + { + "bbox": [ + 104, + 148, + 506, + 227 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 148, + 506, + 227 + ], + "type": "text", + "content": " similarity while mixup noise can potentially adapt to low-dimensional structures of data; we investigate the implicit bias from the angle of order preserving and identified the connection between minimizing the expected Lipschitz constant of the SSCL feature map and SNE with uniformity constraint; we identify that the popular cosine similarity can be harmful for OOD generalization." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "text", + "content": "- Motivated by the SNE perspective, we propose several modifications to existing SSCL methods and demonstrate practical improvements. Besides a re-weighting scheme, we advocate to lose the spherical constraint for improved OOD performance and a " + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "text", + "content": "-SNE style matching for improved separation. Through comprehensive numerical experiments, we show that the modified " + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "text", + "content": "-SimCLR outperforms the baseline with " + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "text", + "content": " less feature dimensions on CIFAR-10 and " + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 232, + 506, + 299 + ], + "type": "text", + "content": "-MoCo-v2 pretrained on ImageNet significantly outperforms in various domain transfer and OOD tasks." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 315, + 307, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 315, + 307, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 307, + 327 + ], + "type": "text", + "content": "2 PRELIMINARY AND RELATED WORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": "Notations. For a function " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "f: \\Omega \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\|f\\|_{\\infty} = \\sup_{\\boldsymbol{x} \\in \\Omega} |f(\\boldsymbol{x})|" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\|f\\|_p = (\\int_{\\Omega} |f(\\boldsymbol{x})|^p d\\boldsymbol{x})^{1/p}" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ". For a vector " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\| \\boldsymbol{x}\\|_p" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " denotes its " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": "-norm, for " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "1 \\leq p \\leq \\infty" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\mathbb{P}(A)" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " is the probability of event " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ". For a random variable " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ", we use " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "P_z" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "p_z" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " to denote its probability distribution and density respectively. Denote Gaussian distribution by " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "N(\\mu, \\Sigma)" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " and let " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "I_d" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "d \\times d" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " identity matrix. Let the dataset be " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_n = \\{\\boldsymbol{x}_1, \\dots, \\boldsymbol{x}_n\\} \\subset \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " where each " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_i" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " independently follows distribution " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "P_x" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ". The goal of unsupervised representation learning is to find informative low-dimensional features " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "z_1, \\dots, z_n \\in \\mathbb{R}^{d_z}" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_n" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "d_z" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " is usually much smaller than " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ". We use " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "f(\\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": " to as the default notation for the feature mapping from " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d \\to \\mathbb{R}^{d_z}" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "inline_equation", + "content": "z_i = f(\\boldsymbol{x}_i)" + }, + { + "bbox": [ + 104, + 338, + 506, + 419 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": "Stochastic neighbor embedding. SNE (Hinton & Roweis, 2002) is a powerful representation learning framework designed for visualizing high-dimensional data in low dimensions by preserving neighboring information. The training process can be conceptually decomposed into the following two steps: (1) calculate the pairwise similarity matrix " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "\\pmb{P} \\in \\mathbb{R}^{n \\times n}" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_n" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": "; (2) optimize features " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "z_1, \\dots, z_n" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": " such that their pairwise similarity matrix " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "\\pmb{Q} \\in \\mathbb{R}^{n \\times n}" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": " matches " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "\\pmb{P}" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": ". Under the general guidelines lie plentiful details. In Hinton & Roweis (2002), the pairwise similarity is modeled as conditional probabilities of " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "x_j" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": " being the neighbor of " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": ", which is specified by a Gaussian distribution centered at " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": ", i.e., when " + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "inline_equation", + "content": "i \\neq j" + }, + { + "bbox": [ + 104, + 422, + 504, + 501 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 227, + 505, + 504, + 534 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 505, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 227, + 505, + 504, + 534 + ], + "type": "interline_equation", + "content": "P _ {j \\mid i} = \\frac {\\exp \\left(- \\| \\boldsymbol {x} _ {i} - \\boldsymbol {x} _ {j} \\| _ {2} ^ {2} / 2 \\sigma_ {i} ^ {2}\\right)}{\\sum_ {k \\neq i} \\exp \\left(- \\| \\boldsymbol {x} _ {i} - \\boldsymbol {x} _ {k} \\| _ {2} ^ {2} / 2 \\sigma_ {i} ^ {2}\\right)}, \\tag {2.1}", + "image_path": "53ce944b8c950d280f1e9a5f483ee95442d0762afb896c18b17221707ae58338.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "inline_equation", + "content": "\\sigma_{i}" + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "text", + "content": " is the variance of the Gaussian centered at " + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "text", + "content": ". Similar conditional probabilities " + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "inline_equation", + "content": "Q_{j|i}" + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "text", + "content": "'s can be defined on the feature space. When matching " + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 539, + 504, + 573 + ], + "type": "text", + "content": ", the measurement chosen is the KL-divergence between two conditional probabilities. The overall training objective for SNE is" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 248, + 578, + 504, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 578, + 504, + 610 + ], + "spans": [ + { + "bbox": [ + 248, + 578, + 504, + 610 + ], + "type": "interline_equation", + "content": "\\inf _ {\\boldsymbol {z} _ {1}, \\dots , \\boldsymbol {z} _ {n}} \\sum_ {i = 1} ^ {n} \\sum_ {j = 1} ^ {n} P _ {j | i} \\log \\frac {P _ {j | i}}{Q _ {j | i}}. \\tag {2.2}", + "image_path": "ec6e4bc2bd613b75f3365622018b657f76412c77a5ab116c2dbcf1d2ca4e9f14.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "type": "text", + "content": "Significant improvements have been made to the classic SNE. Im et al. (2018) generalized the KL-divergence to " + }, + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "type": "text", + "content": "-divergence and found that different divergences favors different types of structure. Lu et al. (2019) proposed to make " + }, + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "type": "text", + "content": " doubly stochastic so that features are less crowded. Most notably, " + }, + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 616, + 506, + 704 + ], + "type": "text", + "content": "-SNE (Van der Maaten & Hinton, 2008) modified the pairwise similarity by considering joint distribution rather than conditional, and utilizes t-distribution instead of Gaussian in the feature space modeling. It is worth noting that SNE belongs to a large class of methods called manifold learning (Li et al., 2022). In this work, we specifically consider SNE. If no confusion arises, we use SNE to denote the specific work of Hinton & Roweis (2002) and this type of methods in general interchangeably." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "Self-supervised contrastive learning. The key part of SSCL is the construction of positive pairs, or usually referred to as different views of the same sample. For each " + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " in the training data, denote" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "text", + "content": "its two augmented views to be " + }, + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i'" + }, + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i''" + }, + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_n' = \\{\\pmb{x}_1', \\dots, \\pmb{x}_n'\\}" + }, + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_n'' = \\{\\pmb{x}_1'', \\dots, \\pmb{x}_n''\\}" + }, + { + "bbox": [ + 104, + 81, + 479, + 95 + ], + "type": "text", + "content": " and define" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 180, + 100, + 430, + 130 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 100, + 430, + 130 + ], + "spans": [ + { + "bbox": [ + 180, + 100, + 430, + 130 + ], + "type": "interline_equation", + "content": "l (\\pmb {x} _ {i} ^ {\\prime}, \\pmb {x} _ {i} ^ {\\prime \\prime}) = - \\log \\frac {\\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i} ^ {\\prime}) , f (\\pmb {x} _ {i} ^ {\\prime \\prime})) / \\tau)}{\\sum_ {\\pmb {x} \\in \\mathcal {D} _ {n} ^ {\\prime} \\cup \\mathcal {D} _ {n} ^ {\\prime \\prime} \\setminus \\{\\pmb {x} _ {i} ^ {\\prime} \\}} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i} ^ {\\prime}) , f (\\pmb {x})) / \\tau)},", + "image_path": "1d92768caabb2b3d072677dfee9910dcaac1aac2094cb10b65401b188f16504c.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 136, + 506, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 136, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 136, + 506, + 174 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 136, + 506, + 174 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(z_1,z_2) = \\langle \\frac{z_1}{\\|\\pmb{z}_1\\|_2},\\frac{z_2}{\\|\\pmb{z}_2\\|_2}\\rangle" + }, + { + "bbox": [ + 104, + 136, + 506, + 174 + ], + "type": "text", + "content": " denotes the cosine similarity and " + }, + { + "bbox": [ + 104, + 136, + 506, + 174 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 136, + 506, + 174 + ], + "type": "text", + "content": " is a temperature parameter. The training objective of the popular SimCLR (Chen et al., 2020a) can be written as " + }, + { + "bbox": [ + 104, + 136, + 506, + 174 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}\\coloneqq \\frac{1}{2n}\\sum_{i = 1}^{n}(l(\\pmb{x}_i^{\\prime \\prime},\\pmb{x}_i^{\\prime}) + l(\\pmb{x}_i^{\\prime},\\pmb{x}_i^{\\prime \\prime}))" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 177, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 506, + 266 + ], + "type": "text", + "content": "Recently, various algorithms are proposed to improve the above contrastive learning. To address the need for the large batch size, MoCo (He et al., 2020a; Chen et al., 2020b) utilizes a moving-averaged encoder and a dynamic memory bank to store negative representations, making it more device-friendly. Grill et al. (2020); Chen & He (2021); Zbontar et al. (2021); Chen et al. (2021) radically discard negative samples in SSCL but still achieve satisfactory transfer performance. Another line of works (Caron et al., 2020; Li et al., 2021; Liu et al., 2022) mines the hierarchy information in data to derive more semantically compact representations. Radford et al. (2021); Yao et al. (2021) even extend the contrastive methods to the multi-modality data structure to achieve impressive zero-shot classification results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 270, + 506, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 413 + ], + "type": "text", + "content": "Theoretical understanding of SSCL. In contrast of the empirical success, theoretical understanding of SSCL is still limited. While most of theoretical works (Arora et al., 2019; Tosh et al., 2020; HaoChen et al., 2021; 2022; Wang et al., 2022; Wen & Li, 2021; Wei et al., 2020; Huang et al., 2021; Ji et al., 2021; Ma et al., 2023) focus on its generalization ability on downstream tasks, there are some works studying specifically the InfoNCE loss. One line of works (Oord et al., 2018; Bachman et al., 2019; Hjelm et al., 2018; Tian et al., 2019; 2020) understand the InfoNCE loss from mutual information perspective, showing that the negative InfoNCE is a lower bound of mutual information between positive samples. Other works (Wang & Isola, 2020; Huang et al., 2021; Jing et al., 2021) are from the perspective of geometry of embedding space, showing that InfoNCE can be divided into two parts: one controls alignment and the other prevents representation collapse. In this paper, we study SSCL from the SNE perspective, which, to the best of the authors' knowledge, has no discussion in existing literature. The closest work to ours is Balestriero & LeCun (2022), which proposed a unifying framework under the helm of spectral manifold learning. In comparison, our work focuses specifically on the connection between SSCL and SNE." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 430, + 272, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 272, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 272, + 442 + ], + "type": "text", + "content": "3 SNE PERSPECTIVE OF SSCL" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "text", + "content": "A closer look at the training objectives of SNE and SimCLR reveals great resemblance — SimCLR can be seen as a special SNE model. To see this, denote " + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathcal{D}}_{2n} = \\mathcal{D}_n^{\\prime \\prime}\\cup \\mathcal{D}_n^{\\prime}" + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "text", + "content": " as the augmented dataset with index " + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\widetilde{\\pmb{x}}_{2i - 1} = \\pmb{x}_i^{\\prime \\prime}" + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\widetilde{\\pmb{x}}_{2i} = \\pmb{x}_i^\\prime" + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "text", + "content": ". If we change the " + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "text", + "content": " distance to the negative cosine similarity and let " + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "inline_equation", + "content": "\\sigma_i^2\\equiv \\tau" + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "text", + "content": ". Admitting similar conditional probability formulation as in (2.1) yields that for " + }, + { + "bbox": [ + 104, + 455, + 504, + 502 + ], + "type": "inline_equation", + "content": "i\\neq j" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 220, + 508, + 504, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 508, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 220, + 508, + 504, + 536 + ], + "type": "interline_equation", + "content": "\\widetilde {Q} _ {j \\mid i} = \\frac {\\exp \\left(\\operatorname {s i m} \\left(f \\left(\\widetilde {\\boldsymbol {x}} _ {i}\\right) , f \\left(\\widetilde {\\boldsymbol {x}} _ {j}\\right)\\right) / \\tau\\right)}{\\sum_ {k \\neq i} \\exp \\left(\\operatorname {s i m} \\left(f \\left(\\widetilde {\\boldsymbol {x}} _ {i}\\right) , f \\left(\\widetilde {\\boldsymbol {x}} _ {k}\\right)\\right) / \\tau\\right)}. \\tag {3.1}", + "image_path": "dd86c5c1e5092d0eaebd909dad9797295a5a1c0debed8852b936f99e37180234.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 542, + 147, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 542, + 147, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 147, + 555 + ], + "type": "text", + "content": "By taking" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 219, + 555, + 504, + 581 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 555, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 219, + 555, + 504, + 581 + ], + "type": "interline_equation", + "content": "\\widetilde {P} _ {j \\mid i} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f} \\widetilde {\\boldsymbol {x}} _ {i} \\text {a n d} \\widetilde {\\boldsymbol {x}} _ {j} \\text {a r e p o s i t i v e p a i r s} \\\\ 0, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {3.2}", + "image_path": "fe8145d2805fa01f41406c2dec145c1bbbd69c56f45eb7a546e8e76a38394a97.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 586, + 269, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 269, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 269, + 597 + ], + "type": "text", + "content": "the SNE objective (2.2) can be written as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 177, + 605, + 431, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 605, + 431, + 638 + ], + "spans": [ + { + "bbox": [ + 177, + 605, + 431, + 638 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {2 n} \\sum_ {j = 1} ^ {2 n} \\widetilde {P} _ {j | i} \\log \\frac {\\widetilde {P} _ {j | i}}{\\widetilde {Q} _ {j | i}} = \\sum_ {k = 1} ^ {n} \\Big (- \\log (\\widetilde {Q} _ {2 k - 1 | 2 k}) - \\log (\\widetilde {Q} _ {2 k | 2 k - 1}) \\Big),", + "image_path": "191c5c23090c640462ccf11e9fd1c24fc11c39c5135da10cce0928d179aa01a1.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 644, + 506, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 644, + 506, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 506, + 684 + ], + "type": "text", + "content": "which reduces to the SimCLR objective " + }, + { + "bbox": [ + 104, + 644, + 506, + 684 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}" + }, + { + "bbox": [ + 104, + 644, + 506, + 684 + ], + "type": "text", + "content": ", up to a constant scaling term only depending on " + }, + { + "bbox": [ + 104, + 644, + 506, + 684 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 644, + 506, + 684 + ], + "type": "text", + "content": ". Now that we have established the correspondence between SNE and SimCLR, it's clear that the feature learning process of SSCL also follows the two steps of SNE." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 693, + 505, + 733 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 118, + 693, + 394, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 693, + 394, + 706 + ], + "spans": [ + { + "bbox": [ + 118, + 693, + 394, + 706 + ], + "type": "text", + "content": "(S1) The positive pair construction specifies the similarity matrix " + }, + { + "bbox": [ + 118, + 693, + 394, + 706 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 118, + 693, + 394, + 706 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 118, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 118, + 709, + 505, + 733 + ], + "type": "text", + "content": "(S2) The training process then matches " + }, + { + "bbox": [ + 118, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 118, + 709, + 505, + 733 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 118, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 118, + 709, + 505, + 733 + ], + "type": "text", + "content": " by minimizing some divergence between the two specified by the training objective, e.g., KL divergence in SimCLR." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 147, + 54, + 249, + 137 + ], + "blocks": [ + { + "bbox": [ + 147, + 54, + 249, + 137 + ], + "lines": [ + { + "bbox": [ + 147, + 54, + 249, + 137 + ], + "spans": [ + { + "bbox": [ + 147, + 54, + 249, + 137 + ], + "type": "image", + "image_path": "3ecaa78f8dce75bb41d06285568ba9c89e5fd0f9e5e58682cb50c271df5f04f4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 261, + 54, + 340, + 137 + ], + "blocks": [ + { + "bbox": [ + 261, + 54, + 340, + 137 + ], + "lines": [ + { + "bbox": [ + 261, + 54, + 340, + 137 + ], + "spans": [ + { + "bbox": [ + 261, + 54, + 340, + 137 + ], + "type": "image", + "image_path": "a7a1122e517d6b6db862794be6a44519aeeb56f41996f2d85124745ee03e1a6f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 350, + 54, + 455, + 138 + ], + "blocks": [ + { + "bbox": [ + 350, + 54, + 455, + 138 + ], + "lines": [ + { + "bbox": [ + 350, + 54, + 455, + 138 + ], + "spans": [ + { + "bbox": [ + 350, + 54, + 455, + 138 + ], + "type": "image", + "image_path": "313ce6e1d4b7b5dc8b7ed2aac0bd2dff24fba0882f672f1add9f8a8b47be159b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 170, + 145, + 246, + 211 + ], + "blocks": [ + { + "bbox": [ + 170, + 145, + 246, + 211 + ], + "lines": [ + { + "bbox": [ + 170, + 145, + 246, + 211 + ], + "spans": [ + { + "bbox": [ + 170, + 145, + 246, + 211 + ], + "type": "image", + "image_path": "a24431869e4cdcaeda8101b3a435fc04249f7e6efbe2530ac24d45b30827bb42.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 260, + 144, + 340, + 228 + ], + "blocks": [ + { + "bbox": [ + 260, + 144, + 340, + 228 + ], + "lines": [ + { + "bbox": [ + 260, + 144, + 340, + 228 + ], + "spans": [ + { + "bbox": [ + 260, + 144, + 340, + 228 + ], + "type": "image", + "image_path": "32f6266ce0ea2e5ecc4d0294265ed70e66d675a8e0ab408d06970130655d53dd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "lines": [ + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "type": "text", + "content": "Figure 1: Gaussian mixture setting with 5 components. (a) illustration of data with 250 samples. (b) learned features by standard SimCLR with normalization (cosine similarity) to 1-sphere. (c) learned features by modified SimCLR without normalization (" + }, + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "type": "text", + "content": " similarity). (d, e) feature mapping of the two methods in case of OOD mean shift. The linear classification accuracy is " + }, + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "type": "inline_equation", + "content": "48.4\\%" + }, + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "type": "text", + "content": " in (d) and " + }, + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 236, + 506, + 281 + ], + "type": "text", + "content": " in (e)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 350, + 145, + 455, + 228 + ], + "blocks": [ + { + "bbox": [ + 350, + 145, + 455, + 228 + ], + "lines": [ + { + "bbox": [ + 350, + 145, + 455, + 228 + ], + "spans": [ + { + "bbox": [ + 350, + 145, + 455, + 228 + ], + "type": "image", + "image_path": "2f1a4d6332027e5c9e546360d59bf528012fe1dfaf61c353418cb03dc8aea145.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "text", + "content": "The main difference between SNE and SSCL is the first part, where the " + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "text", + "content": " in SNE is usually densely filled by " + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "inline_equation", + "content": "l_{p}" + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "text", + "content": " distance, ignoring the semantic information within rich data like images and texts. In contrast, SSCL omits all traditional distances in " + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "text", + "content": " and only specifies semantic similarity through data augmentations, and the resulting " + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 291, + 506, + 371 + ], + "type": "text", + "content": " is sparsely filled only by positive pairs as in (3.2). For structurally rich data such as image or text, the semantic information is invariant to a wide range of transformations. Human's prior knowledge of such invariance guides the construction of positive pairs in SSCL, which is then learned by the feature mapping." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 373, + 506, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 453 + ], + "type": "text", + "content": "Remark 3.1 (SNE vs SSCL). We would like to clarify on the main difference between SNE and SSCL that we focus in this work. Although standard SNE (Hinton et al., 2006) is non-parametric without explicit feature maps, and is optimized for the whole dataset, these are not the defining properties of SNE. SNE can also utilize explicit feature maps and mini-batch training (Van Der Maaten, 2009). On the other hand, SSCL can also benefit from larger/full batches (Chen et al., 2020a) and can also be modified to directly optimize the features " + }, + { + "bbox": [ + 104, + 373, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_i" + }, + { + "bbox": [ + 104, + 373, + 506, + 453 + ], + "type": "text", + "content": "'s. In this work, we omit these subtleties1 and focus on the (S1) perspective, which we view as the most significant difference between SNE and SSCL." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 464, + 176, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 176, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 176, + 475 + ], + "type": "text", + "content": "3.1 ANALYSIS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 485, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 506, + 540 + ], + "type": "text", + "content": "In this section, to showcase the utility of the SNE perspective, we demonstrate how the feature learning process of SSCL methods, e.g., SimCLR, can become more intuitive and transparent. Specifically, we re-derive the alignment and uniformity principle (Wang & Isola, 2020) as well as provide novel analysis on domain-agnostic augmentations, the implicit bias and robustness of learned features. To aid the illustration, we device toy examples with simulated Gaussian mixture data." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": "Gaussian mixture setting. Let the data follow " + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": "-dimensional Gaussian mixture distribution with " + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": " components where " + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "inline_equation", + "content": "P_{\\pmb{x}} \\sim \\frac{1}{m} \\sum_{i=1}^{m} N(\\pmb{\\mu}_i, \\sigma^2 \\pmb{I}_d)" + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": ". The special case with " + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "inline_equation", + "content": "d = 2" + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "inline_equation", + "content": "m = 5" + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "inline_equation", + "content": "\\sigma = 0.1" + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": " is illustrated in Figure 1(a) with 250 independent samples. To apply contrastive methods, consider constructing positive pairs by direct sampling, i.e., if " + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": " is from the first component, then we sample another " + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "inline_equation", + "content": "\\pmb{x}' \\sim N(\\pmb{\\mu}_1, \\sigma^2 \\pmb{I}_d)" + }, + { + "bbox": [ + 104, + 544, + 506, + 612 + ], + "type": "text", + "content": " independently as its alternative view for contrast. The negative samples are the same as in standard SimCLR training." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 623, + 320, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 623, + 320, + 634 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 320, + 634 + ], + "type": "text", + "content": "3.1.1 DOMAIN-AGNOSTIC DATA AUGMENTATION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 642, + 506, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 642, + 506, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 506, + 677 + ], + "type": "text", + "content": "Now that we have established in (S1) that the input space pairwise distance is specified by the data augmentation, a natural question to ask is what are the corresponding induced distances. In this section, we investigate this problem for domain-agnostic data augmentations." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 681, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 506, + 704 + ], + "type": "text", + "content": "The quality of data augmentation has great impact on the performance of SSCL methods, which reflects people's prior knowledge on the data. However, when facing new data without any domain knowledge," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "type": "text", + "content": "1All the contrastive losses are written in full batches for simplicity in this work as we focus on analyzing the optimal solutions of SSCL methods rather than the optimization process." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "content": "we have to rely on domain-agnostic data augmentations, e.g., adding random noises (Verma et al., 2021), for contrast. We first consider using general random noise augmentation, i.e., for any " + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\pmb{x}' = \\pmb{x} + \\delta" + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "content": " follows some distribution with density " + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\phi(\\pmb{x})" + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "content": ". Then, for any " + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "content": ", the probability density of having " + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "inline_equation", + "content": "\\pmb{t} \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "content": " as its augmented point can be characterized as " + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "inline_equation", + "content": "P_{\\pmb{t}|\\pmb{x}_i} = \\mathbb{P}(\\pmb{x}_i \\mid \\pmb{x}_i' = \\pmb{t} \\text{ form a positive pair} | \\pmb{x}_i) = \\phi(\\pmb{t} - \\pmb{x}_i)" + }, + { + "bbox": [ + 104, + 83, + 506, + 140 + ], + "type": "text", + "content": ". We have the following proposition on Gaussian-induced distance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 504, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 504, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 504, + 166 + ], + "type": "text", + "content": "Proposition 3.2 (Gaussian noise injection). If the noise distribution is isotropic Gaussian with mean zero, the induced distance is equivalent to the " + }, + { + "bbox": [ + 104, + 143, + 504, + 166 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 143, + 504, + 166 + ], + "type": "text", + "content": " distance in " + }, + { + "bbox": [ + 104, + 143, + 504, + 166 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 143, + 504, + 166 + ], + "type": "text", + "content": ", up to a monotone transformation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": "Another popular noise injection method is the mixup (Zhang et al., 2017), where the augmented data are comprised of convex combinations of the training data. For each " + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_i" + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": ", a positive pair can be constructed from another " + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_j" + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_i' = \\boldsymbol{x}_i + \\lambda (\\boldsymbol{x}_j - \\boldsymbol{x}_i)" + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\lambda \\in (0,1)" + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": " is the hyperparameter usually modeled with Beta distribution. For independent " + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_1, \\boldsymbol{x}_2 \\sim P_x" + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": ", denote the convoluted density of " + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "inline_equation", + "content": "\\lambda (\\boldsymbol{x}_1 - \\boldsymbol{x}_2)" + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "inline_equation", + "content": "p_{\\lambda}(\\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": ", which is symmetric around 0. Then, if employing mixup for positive pairs in SSCL, the induced distance can be written as " + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "inline_equation", + "content": "P_{\\boldsymbol{x}_1, \\boldsymbol{x}_2} = P_{\\boldsymbol{x}_2, \\boldsymbol{x}_1} = p_{\\lambda}(\\boldsymbol{x}_1 - \\boldsymbol{x}_2)" + }, + { + "bbox": [ + 104, + 175, + 504, + 243 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": "Gaussian vs. mixup. Verma et al. (2021) proposed to use mixup when domain-specific information is unattainable and provided supportive analysis on its advantage over isotropic Gaussian noise from the classification generalization error point of view. Through (S1) perspective, we can intuitively explain why data-dependent mixup noises can be potentially better from the perspective of the \"curse of dimensionality\". Consider the " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": "-dimensional Gaussian mixture setting with " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "m < d" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": " separated components. Notice that " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "\\pmb{\\mu}_1,\\dots ,\\pmb{\\mu}_m" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": " can take up at most " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "(m - 1)" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": "-dimensional linear sub-space of " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": ". Denoted the space spanned by " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "\\pmb{\\mu}_i" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": "'s as " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "S_{\\mu}" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": ". For the light-tailed Gaussian distribution, and the majority of samples will be close to " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "S_{\\mu}" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": ". Hence, majority of the convoluted density " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "p_{\\lambda}(\\pmb{x})" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": " will also be supported on " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "S_{\\mu}" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": ", so does the corresponding " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "P_{\\pmb{x}_2,\\pmb{x}_1}" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": ". Thus, the induced distance from mixup will omit irrelevant variations in the complement of " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "S_{\\mu}" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": " and focus on the low-dimensional sub-space " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "S_{\\mu}" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "\\pmb{\\mu}_i" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": "'s actually differ. This effectively reduces the dimension dependence from " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "m - 1" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": ". In comparison, isotropic Gaussian noise induces " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "l_2" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": " distance for positive pairs with support of " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": ", which will be much more inefficient, especially when " + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "inline_equation", + "content": "m \\ll d" + }, + { + "bbox": [ + 104, + 247, + 506, + 413 + ], + "type": "text", + "content": ". Since it is well-known that the performance of regression or classification models is strongly influenced by the intrinsic dimension of the input space (Hamm & Steinwart, 2021), keeping the data in a low-dimensional space is preferable." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 426, + 271, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 271, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 271, + 437 + ], + "type": "text", + "content": "3.1.2 ALIGNMENT AND UNIFORMITY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "type": "text", + "content": "Characterizing the learned features of SSCL is of critical importance. Wang & Isola (2020) proposed alignment and uniformity as principles for SimCLR type contrastive learning methods. Such results can be intuitively understood through the perspective of (S1) and (S2). Consider the common case where the feature space is " + }, + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "type": "inline_equation", + "content": "(d_z - 1)" + }, + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "type": "text", + "content": "-sphere. First, (3.2) indicates that only similarities (distances) between positive pairs are non-zero (finite) and all other pairwise similarities (distances) are zero (infinity). Preserving (3.2) requires the features of positive pairs to align (cosine similarity tends to 1) and those of negative pairs to be as distant as possible. If in the extreme case where positive pairs match exactly, i.e., " + }, + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "type": "inline_equation", + "content": "f(\\pmb{x}_i) = f(\\pmb{x}_i')" + }, + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "type": "inline_equation", + "content": "i = 1,\\dots ,n" + }, + { + "bbox": [ + 104, + 446, + 504, + 536 + ], + "type": "text", + "content": ", we call it perfect alignment." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": "If perfect alignment is achieved and the features are constrained on the unit sphere, matching (3.2) implies pushing " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": " points on the feature space as distant as possible. Maximally separated " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": " points on a " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": "-sphere has been studied in geometry, known as the Tammes problem (Tammes, 1930; Erber & Hockney, 1991; Melisseny, 1998). We say perfect uniformity is achieved if all the pairs are maximally separated on the sphere. There are some simple cases of the Tammes problem. If " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "d = 2" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": ", perfect uniformity can be achieved if the mapped points form a regular polygon. If " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "d \\geq n - 1" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": ", the solution can be given by the vertices of an " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "(n - 1)" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": "-simplex, inscribed in an " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "(n - 1)" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": "-sphere embedded in " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": ". The cosine similarity between any two vertices is " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "-1 / (n - 1)" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": " and in this case, " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": " can attain its lower bound2. As " + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "inline_equation", + "content": "n \\to \\infty" + }, + { + "bbox": [ + 104, + 540, + 506, + 662 + ], + "type": "text", + "content": ", the point distribution converges weakly to uniform distribution. As can be seen in Figure 1(a, b), perfect alignment and perfect uniformity are almost achieved by standard SimCLR in the Gaussian mixture setting." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 666, + 504, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 700 + ], + "type": "text", + "content": "As we will demonstrate in Section 3.1.4 that the spherical feature space can be bad for OOD generalization, adopting of the Euclidean space will change the statement of the uniformity property and can also be analyzed from the SNE perspective. Details can be found in Appendix A.5." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": "2Notice that in this case, the optimal feature mapping will contain little information of the data, mapping anchor samples to interchangeable points with identical pairwise distances" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 202, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 202, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 202, + 92 + ], + "type": "text", + "content": "3.1.3 IMPLICITBIAS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 101, + 506, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 101, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 101, + 506, + 212 + ], + "type": "text", + "content": "Existing theoretical results on SSCL provide justification of its empirical success in classification. However, there is more to it than just separating different classes and many phenomena are left unexplained. Take the popular SimCLR (Chen et al., 2020a) on CIFAR-10 as an example, we can consistently observe that the feature similarities within animals (bird, cat, deer, dog, frog, horse) and within objects (airplane, automobile, ship, truck), are significantly higher than those between animals and objects3. This can be viewed as an implicit bias towards preserving semantic information, which might be surprising as we have no supervision on the label information during the training process. However, existing literature on implicit bias is scarce. As advocated in Saunshi et al. (2022), ignoring inductive biases cannot adequately explain the success of contrastive learning. In this section, we provide a simple explanation from the perspective of SNE." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "text", + "content": "For a more concrete illustration, consider training SimCLR in the Gaussian mixture setting with " + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "inline_equation", + "content": "d = 1" + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "inline_equation", + "content": "d_z = 2" + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "inline_equation", + "content": "m = 4" + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "inline_equation", + "content": "\\mu_i = i" + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "inline_equation", + "content": "\\sigma = 0.1" + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "text", + "content": ". Denote the 4 components in ascending order by A,B,C,D. Perfect alignment and uniformity imply that their feature maps (a, b, c, d) on the unit-circle should be vertices of an inscribed square. What left unsaid is their relative order. Clockwise or counter-Clockwise from a, regardless of the initialization, we can observe SimCLR to consistently produce the order " + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "inline_equation", + "content": "a \\to b \\to c \\to d" + }, + { + "bbox": [ + 104, + 217, + 506, + 274 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 275, + 504, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 275, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 504, + 332 + ], + "type": "text", + "content": "Remark 3.3 (Relative ordering and neighbor-preserving). The order-preserving property showcased with " + }, + { + "bbox": [ + 104, + 275, + 504, + 332 + ], + "type": "inline_equation", + "content": "d = 1" + }, + { + "bbox": [ + 104, + 275, + 504, + 332 + ], + "type": "text", + "content": " is mainly for illustration, as in one-dimension, the neighboring info is simplified as the order, which is much easier to understand. The results remain the same in high dimensions as long as the clusters are well separated with an obvious order of clusters. For instance, some relative orders in Figure 1(a,b) are also stable, e.g., the neighbor of blue will consistently be purple and yellow." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 339, + 504, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 339, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 504, + 417 + ], + "type": "text", + "content": "With great resemblance to SNE, SSCL methods also exhibit neighbor-preserving property and we identify it as an implicit bias. Such implicit bias can be universal in SSCL and the phenomenon in Figure A.3 is also a manifestation. In deep learning, the implicit bias is usually characterized by either closeness to the initialization (Moroshko et al., 2020; Azulay et al., 2021), or minimizing certain complexity (Razin & Cohen, 2020; Zhang et al., 2021). In the case of SimCLR, we hypothesize the implicit bias as the expected Lipschitz constant, which has deep connections to SNE with uniformity constraint. For a feature map " + }, + { + "bbox": [ + 104, + 339, + 504, + 417 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 339, + 504, + 417 + ], + "type": "text", + "content": " onto the unit-sphere, define" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 239, + 421, + 504, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 421, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 239, + 421, + 504, + 449 + ], + "type": "interline_equation", + "content": "C (f) = \\mathbb {E} _ {\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}} \\frac {\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2}}{\\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\| _ {2}}, \\tag {3.3}", + "image_path": "10ffae136959af2f4193e64df05e5ab1834b59a09fddeb991c7c17924900c7be.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 451, + 376, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 376, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 376, + 463 + ], + "type": "text", + "content": "where the " + }, + { + "bbox": [ + 104, + 451, + 376, + 463 + ], + "type": "inline_equation", + "content": "x_{1}, x_{2}" + }, + { + "bbox": [ + 104, + 451, + 376, + 463 + ], + "type": "text", + "content": " are independent samples from the data distribution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "text", + "content": "Definition 3.4 (SNE with uniformity constraint). Assume data " + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_1, \\dots, \\boldsymbol{x}_n \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "text", + "content": ". If the corresponding SNE features " + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "inline_equation", + "content": "z_1, \\dots, z_n \\in \\mathbb{R}^{d_z}" + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "text", + "content": " are constrained to be the maximally separated " + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "text", + "content": " points on the " + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "inline_equation", + "content": "(d_z - 1)" + }, + { + "bbox": [ + 104, + 465, + 504, + 500 + ], + "type": "text", + "content": "-sphere, we call this problem SNE with uniformity constraint." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "type": "text", + "content": "The key of SNE is matching the pairwise similarity matrices " + }, + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "type": "text", + "content": ". When solving SNE with uniformity constraint, the only thing to be optimized is the pairwise correspondence, or ordering of the mapping. We have the following theorem that links the neighbor-preserving property to " + }, + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 507, + 504, + 542 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": "Theorem 3.5. Let " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\pmb{x}_1, \\dots, \\pmb{x}_n \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\| \\pmb{x}_i - \\pmb{x}_j \\|_2 > 0" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "i, j" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " and let " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "z_1, \\dots, z_n \\in \\mathbb{R}^{d_z}" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " be maximally separated " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " points on the " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "(d_z - 1)" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": "-sphere. Denote " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "P = (p_{ij})_{n \\times n}" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "Q = (q_{ij})_{n \\times n}" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " as the corresponding pairwise similarity matrices of " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": "'s and " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\pmb{z}_i" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": "'s respectively. Let " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " denote a permutation on " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\{1, \\dots, n\\}" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " and denote all such permutations as " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "Q^\\pi" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " as the " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": "-permuted matrix " + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 543, + 504, + 590 + ], + "type": "text", + "content": " and define" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 182, + 593, + 425, + 621 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 593, + 425, + 621 + ], + "spans": [ + { + "bbox": [ + 182, + 593, + 425, + 621 + ], + "type": "interline_equation", + "content": "C _ {1} (P, Q ^ {\\pi}) = \\sum_ {i \\neq j} \\frac {q _ {\\pi (i) \\pi (j)}}{p _ {i j}} \\quad \\text {a n d} \\quad \\pi^ {*} = \\operatorname * {a r g m i n} _ {\\pi \\in T} C _ {1} (P, Q ^ {\\pi}).", + "image_path": "35776b88d8b6874dbf60634290858c5d95c5c4ccc8a9d7ee9f5096efa6b1f838.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": "Then, " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": " also minimizes " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\| \\bar{P} - Q^{\\pi}\\|_{F}" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\| \\cdot \\|_{F}" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": " is the Frobenius norm and " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\bar{P} = (\\bar{p}_{ij})_{n\\times n}" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": " is a (monotonically) transformed similarity matrix with " + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "inline_equation", + "content": "\\bar{p}_{ij} = -1 / p_{ij}" + }, + { + "bbox": [ + 104, + 625, + 504, + 651 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "content": "Theorem 3.5 showcases the relationship between minimizing " + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "content": " and the structure preserving property by considering a special SNE problem, where the pairwise similarity is not modeled by Gaussian as standard. Although " + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "inline_equation", + "content": "q_{ij} = -\\| f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2" + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "content": " is unorthodox, it is reasonable since the larger the distance, the smaller the similarity. We have the following corollary to explain the neighbor-preserving property of SSCL and the implicit bias associated with minimizing the complexity " + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 398, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 398, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 398, + 732 + ], + "type": "text", + "content": "3Figure A.3 illustrates the phenomenon. Details can be found in Appendix A.1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "Corollary 3.6 (Implicit bias of SSCL). When SSCL model achieves perfect alignment and perfect uniformity, if the complexity " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " is minimized, the resulting feature map preserves pairwise distance in the input space, resembling SNE with uniformity constraint." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": "Corollary 3.6 links the implicit bias of SSCL to the SNE optimization with uniformity constraint. In the case of perfect alignment and perfect uniformity, SSCL can be seen as a special SNE problem where the feature " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "z_{1}, \\dots, z_{n}" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " must be maximally separated on the unit-sphere. Recall the 1-dimension Gaussian case. There are in total " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "3! = 6" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " different orderings for the 4 cluster means, among which, a " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\rightarrow \\mathrm{b} \\rightarrow \\mathrm{c} \\rightarrow \\mathrm{d}" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " will give the lowest SNE loss. As can be seen in Figure A.4, both " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " and the SNE loss are monotonically decreasing during training for the Gaussian mixture setting." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 197, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 197, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 197, + 506, + 266 + ], + "type": "text", + "content": "When the alignment or uniformity is not perfect, the resulting feature mapping can still be characterized via SNE, with the uniformity constraint relaxed as a form of regularization. In our numerical experiments on the CIFAR-10 data, we observe " + }, + { + "bbox": [ + 104, + 197, + 506, + 266 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 197, + 506, + 266 + ], + "type": "text", + "content": " to be monotonically decreasing during the training process, supporting our hypothesis. More details can be found in Appendix A.3. Corollary 3.6 sheds light on the implicit semantic information preserving phenomenon shown in Figure A.3, as in the input space, images of dogs should be closer to images of cats, than airplanes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 276, + 336, + 287 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 276, + 336, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 276, + 336, + 287 + ], + "type": "text", + "content": "3.1.4 TARGETING OOD: EUCLIDEAN VS SPHERICAL" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 295, + 504, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 340 + ], + "type": "text", + "content": "Almost all SSCL methods require normalization to the unit-sphere and the similarity on the feature space is often the cosine similarity. In comparison, standard SNE methods operate freely on the Euclidean space. In this section, we show that the normalization can hinder the structure-preserving and there is a fundamental trade off between in-distribution and out-of-domain generalization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 345, + 506, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 345, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 506, + 434 + ], + "type": "text", + "content": "Consider the 2-dimensional Gaussian mixture setting as illustrated in Figure 1(a). Notice that as long as the mixing components are well separated, the learned feature mapping on the sphere will always be the pentagon shape, regardless of the relative locations of the clusters. This is a result of the uniformity property derived under spherical constraint. Distant clusters in the input space will be pulled closer while close clusters will be pushed to be more distant, which results in the trade off between in-distribution and out-of-domain generalization. On one hand, close clusters being more separated in the feature space is potentially beneficial for in-distribution classification. On the other hand, the spherical constraint adds to the complexity of the feature mapping, potentially hurting robustness." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "text", + "content": "In the Euclidean space, pushing away negative samples (as distant as possible) will be much easier, since the feature vectors could diverge towards infinity" + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "text", + "content": " and potentially preserve more structural information. To verify our intuition, we relax the spherical constraint in the Gaussian mixture setting and change the cosine similarity in SimCLR to the negative " + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "text", + "content": " distance in " + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "inline_equation", + "content": "\\mathbb{R}" + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "text", + "content": ". The learned features are shown in Figure 1(c). Comparing to Figure 1(b), we can get the extra information that the purple cluster is far away to the others. If we introduce a small mean shift to the data, moving the distribution along each dimension by 1, the resulting feature maps differ significantly in robustness. As illustrated in Figure 1(d) vs. (e), the standard SimCLR are much less robust to OOD shifts and the resulting classification accuracy degrades to only " + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "inline_equation", + "content": "48.4\\%" + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "text", + "content": ", while that for the modified SimCLR remains " + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 438, + 507, + 561 + ], + "type": "text", + "content": ". The same OOD advantage can also be verified in the CIFAR-10 to CIFAR-100 OOD generalization case (details in Appendix C.3 Figure C.8) and large-scale real-world scenarios with MoCo (Chen et al., 2020b) as baseline (details in Section 5)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 575, + 263, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 575, + 263, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 263, + 588 + ], + "type": "text", + "content": "4 IMPROVING SSCL BY SNE" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 601, + 506, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 506, + 646 + ], + "type": "text", + "content": "The proposed SNE perspective (S1,S2) can inspire various modifications to existing SSCL methods. In this section, we choose SimCLR as our baseline and investigate three straightforward modifications. For empirical evaluation, we report the test classification accuracy of nearest neighbor classifiers on both simulated data and real datasets. Experiment details can be found in Appendix C." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 658, + 248, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 248, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 248, + 670 + ], + "type": "text", + "content": "4.1 WEIGHTED POSITIVE PAIRS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 679, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 679, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 506, + 715 + ], + "type": "text", + "content": "In practice, positive pairs are constructed from anchors (training data), by i.i.d. data augmentations, e.g., random resized crop, random horizontal flip, color jitter, etc. Take random crop as an example, pair 1 and 2 may be from " + }, + { + "bbox": [ + 104, + 679, + 506, + 715 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 679, + 506, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 679, + 506, + 715 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 679, + 506, + 715 + ], + "type": "text", + "content": " random crops, respectively. Their similarities should not be treated" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 504, + 732 + ], + "type": "text", + "content": "In practice, various regularization, e.g., weight decay, are employed and the resulting features will be bounded." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 121, + 65, + 284, + 160 + ], + "blocks": [ + { + "bbox": [ + 121, + 65, + 284, + 160 + ], + "lines": [ + { + "bbox": [ + 121, + 65, + 284, + 160 + ], + "spans": [ + { + "bbox": [ + 121, + 65, + 284, + 160 + ], + "type": "image", + "image_path": "abeffeeae615bb59c1a7505b0ba33952441e268232c272f589bd8ba4b5f1c065.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 163, + 249, + 174 + ], + "lines": [ + { + "bbox": [ + 164, + 163, + 249, + 174 + ], + "spans": [ + { + "bbox": [ + 164, + 163, + 249, + 174 + ], + "type": "text", + "content": "(a) Weighted SimCLR." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 64, + 472, + 159 + ], + "blocks": [ + { + "bbox": [ + 307, + 64, + 472, + 159 + ], + "lines": [ + { + "bbox": [ + 307, + 64, + 472, + 159 + ], + "spans": [ + { + "bbox": [ + 307, + 64, + 472, + 159 + ], + "type": "image", + "image_path": "10129a71b1f7ed9a9454c7fe19cd191e125990057078c7774a222fa6d8368813.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 163, + 447, + 173 + ], + "lines": [ + { + "bbox": [ + 345, + 163, + 447, + 173 + ], + "spans": [ + { + "bbox": [ + 345, + 163, + 447, + 173 + ], + "type": "text", + "content": "(b) SimCLR vs. " + }, + { + "bbox": [ + 345, + 163, + 447, + 173 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 345, + 163, + 447, + 173 + ], + "type": "text", + "content": "-SimCLR." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "lines": [ + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "text", + "content": "Figure 2: Nearest neighbor classification test accuracy on CIFAR-10 with ResNet-18 after 200 epochs pre-training. (a) " + }, + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "inline_equation", + "content": "N / A" + }, + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "text", + "content": " stands for the baseline SimCLR. The " + }, + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "text", + "content": "-axis is the temperature for IoU weighting scheme. (b) Comparison between SimCLR and " + }, + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 178, + 504, + 212 + ], + "type": "text", + "content": "-SimCLR with different feature dimensions." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 221, + 504, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 221, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 504, + 245 + ], + "type": "text", + "content": "as equal, as in typical SSCL methods. Incorporating the disparity in the data augmentation process is straightforward in the perspective of SNE, where the InfoNCE loss can be naturally modified as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 236, + 247, + 373, + 278 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 247, + 373, + 278 + ], + "spans": [ + { + "bbox": [ + 236, + 247, + 373, + 278 + ], + "type": "interline_equation", + "content": "\\frac {1}{2 n} \\sum_ {i = 1} ^ {n} p _ {i i ^ {\\prime}} \\cdot \\left(l \\left(\\boldsymbol {x} _ {i}, \\boldsymbol {x} _ {i} ^ {\\prime}\\right) + l \\left(\\boldsymbol {x} _ {i} ^ {\\prime}, \\boldsymbol {x} _ {i}\\right)\\right).", + "image_path": "082a03d3f2912a576bc25f19e35d37a9c2ccccdec8ffdc6a62f40fb70ea6ebc7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": "The weight " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "p_{ii'}" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": " can be specified manually to reflect human's prior knowledge. To test out the effect of such modification, we conduct numerical experiments on CIFAR-10 using the standard SimCLR. The weighting scheme is based on the Intersection over Union (IoU) of random resized crops. For each positive pair, let " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "p_{ii'} \\propto \\exp(\\mathrm{IoU}(\\boldsymbol{x}_i, \\boldsymbol{x}_i') / \\tau')" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "\\tau' > 0" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": " is a hyperparameter (temperature) controlling the strength of the weighting scheme, i.e., the bigger the " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "\\tau'" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": ", the closer to the unweighted state. The CIFAR-10 test performance vs. " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "\\tau'" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": " is shown in Figure 2(a). The baseline is " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "80.7\\%" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": " and can be significantly improved to " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "82.1\\%" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": " if choosing " + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "inline_equation", + "content": "\\tau' = 1" + }, + { + "bbox": [ + 104, + 281, + 506, + 361 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 372, + 289, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 372, + 289, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 372, + 289, + 383 + ], + "type": "text", + "content": "4.2 T-SIMCLR: " + }, + { + "bbox": [ + 105, + 372, + 289, + 383 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 105, + 372, + 289, + 383 + ], + "type": "text", + "content": "-SNE STYLE MATCHING" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "content": "Most SSCL algorithms differ mainly in (S2), i.e., defining " + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "content": " and matching it to " + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "content": ", where fruitful results in SNE literature can be mirrored and applied. Now that we have identified the advantage of modeling features in Euclidean spaces in Section 3.1.4, the most promising modification that follows is to introduce " + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "content": "-SNE to SimCLR. Since we are learning low-dimensional features from high-dimensional data, preserving all pairwise similarities is impossible and the features tend to collapse. This is referred to as the \"crowding problem\" in Van der Maaten & Hinton (2008) (see Section 3.2 therein). " + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "content": "-SNE utilizes the heavy-tail " + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "content": "-distribution instead of the light-tail Gaussian, to model " + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "content": " and encourage separation in feature space. Correspondingly, the training objective " + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}" + }, + { + "bbox": [ + 104, + 392, + 504, + 481 + ], + "type": "text", + "content": " can be modified as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 169, + 484, + 504, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 484, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 169, + 484, + 504, + 520 + ], + "type": "interline_equation", + "content": "\\frac {1}{n} \\sum_ {i = 1} ^ {n} - \\log \\frac {\\left(1 + \\| f \\left(\\boldsymbol {x} _ {i}\\right) - f \\left(\\boldsymbol {x} _ {i} ^ {\\prime}\\right) \\| _ {2} ^ {2} / \\left(\\tau t _ {d f}\\right)\\right) ^ {- \\left(t _ {d f} + 1\\right) / 2}}{\\sum_ {1 \\leq j \\neq k \\leq 2 n} \\left(1 + \\| f (\\widetilde {\\boldsymbol {x}} _ {j}) - f (\\widetilde {\\boldsymbol {x}} _ {k}) \\| _ {2} ^ {2} / \\left(\\tau t _ {d f}\\right)\\right) ^ {- \\left(t _ {d f} + 1\\right) / 2}}, \\tag {4.1}", + "image_path": "56be6e8be2931f50717dd3f84e09540e9fdb8e2d6bc63697c89a6fad016de425.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "text", + "content": " is the degree of freedom for the " + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "text", + "content": "-distribution. Besides substituting the cosine similarity to the " + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "inline_equation", + "content": "l_2" + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "text", + "content": " distance, the key modification is the modeling of feature space similarity " + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "text", + "content": ", from Gaussian to " + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "text", + "content": "-distribution as suggested by Van der Maaten & Hinton (2008) to avoid the crowding problem and accommodate the dimension-deficiency in the feature space. We call the modified method " + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 521, + 504, + 578 + ], + "type": "text", + "content": "-SimCLR and we expect it to work better, especially when the feature dimension is low, or in the OOD case." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": "Figure 2(b) shows the comparison between SimCLR and " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": "-SimCLR on CIFAR-10 with different feature dimensions, where " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": "-SimCLR has significant advantages in all cases and the smaller the " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "d_{z}" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": ", the larger the gap. Without decreasing the standard " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "d_{z} = 128" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": "-SimCLR improves the baseline from " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "80.8\\%" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "83.9\\%" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": " and even beats it using only " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "d_{z} = 8" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": " with accuracy " + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "inline_equation", + "content": "81.7\\%" + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": "Remark 4.1 (Degree of freedom). Standard " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": "-SNE utilizes " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": "-distribution with " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "t_{df} = 1" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": ", to better accommodate the extreme " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "d_z = 2" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": " case. In practice, " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": " can vary and as " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "d_z" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": " increases, larger " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": " might be preferred. We recommend using " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "t_{df} = 5" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": " as the default choice. The performance of " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": " vs " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "d_z" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": " can be found in Appendix C, as well as discussion on the fundamental difference between " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 629, + 504, + 675 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": "Remark 4.2 (Training epochs). For the CIFAR-10 experiments, we reported the results of ResNet-18 after 200 training epochs, similar to the setting of Yeh et al. (2021). We also conducted 1000-epoch experiments and found that our modifications provide consistent improvements throughout the training process, not in terms of speeding up the convergence, but converging to better solutions. Details can be found in Appendix C.1 and Figure C.6." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 70, + 504, + 107 + ], + "blocks": [ + { + "bbox": [ + 166, + 60, + 443, + 71 + ], + "lines": [ + { + "bbox": [ + 166, + 60, + 443, + 71 + ], + "spans": [ + { + "bbox": [ + 166, + 60, + 443, + 71 + ], + "type": "text", + "content": "Table 1: Domain transfer results of vanilla MoCo-v2 and " + }, + { + "bbox": [ + 166, + 60, + 443, + 71 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 166, + 60, + 443, + 71 + ], + "type": "text", + "content": " -MoCo-v2." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 70, + 504, + 107 + ], + "lines": [ + { + "bbox": [ + 107, + 70, + 504, + 107 + ], + "spans": [ + { + "bbox": [ + 107, + 70, + 504, + 107 + ], + "type": "table", + "html": "
MethodAircraftBirdsnapCaltech101CarsCIFAR10CIFAR100DTDPetsSUN397Avg.
MoCo-v282.7544.5383.3185.2495.8172.7571.2286.7056.0575.37
t-MoCo-v282.7853.4686.8186.1796.0478.3269.2087.9559.3077.78
", + "image_path": "ecbea0b8e44524a2cd37f0df3d293f7b78ee280837c0cdb3883cf10ac6680ccb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 198, + 130, + 411, + 173 + ], + "blocks": [ + { + "bbox": [ + 105, + 118, + 505, + 129 + ], + "lines": [ + { + "bbox": [ + 105, + 118, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 505, + 129 + ], + "type": "text", + "content": "Table 2: OOD accuracies of vanilla MoCo-v2 and " + }, + { + "bbox": [ + 105, + 118, + 505, + 129 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 105, + 118, + 505, + 129 + ], + "type": "text", + "content": " -MoCo-v2 on domain generalization benchmarks." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 198, + 130, + 411, + 173 + ], + "lines": [ + { + "bbox": [ + 198, + 130, + 411, + 173 + ], + "spans": [ + { + "bbox": [ + 198, + 130, + 411, + 173 + ], + "type": "table", + "html": "
MethodPACSVLCSOffice-HomeAvg.
MoCo-v258.570.436.655.2
t-MoCo-v261.375.142.159.5
", + "image_path": "f8a254931db8880d14f4e63b78144b31fe25020e07b57f8914abb53bfadad722.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 183, + 273, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 183, + 273, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 273, + 194 + ], + "type": "text", + "content": "5 LARGE SCALE EXPERIMENTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "content": "In this section, we apply the same modifications proposed in Section 4.2 to MoCo-v2 (Chen et al., 2020b), as it is more device-friendly to conduct large scale experiments. We name our model " + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "content": "-MoCo-v2. Both models are pre-trained for 200 epochs on ImageNet following the setting of Chen et al. (2020b). The linear probing accuracy of " + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "content": "-MoCo-v2 on ImageNet is " + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "inline_equation", + "content": "67.0\\%" + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "content": ", which is comparable to the MoCo result " + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "inline_equation", + "content": "67.5\\%" + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "content": ". With the same level of in-distribution classification accuracy, we conduct extensive experiments to compare their OOD performance. The results in Table 1 and 2 suggest that our modification significantly improves the domain transfer and the OOD generalization ability without sacrificing in-distribution accuracy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": "Domain Transfer. We first conduct experiments on the traditional self-supervision domain transfer benchmark. We compare MoCo-v2 and " + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": "-MoCo-v2 on Aircraft, Birdsnap, Caltech101, Cars, CIFAR10, CIFAR100, DTD, Pets, and SUN397. We follow transfer settings in Ericsson et al. (2021) to finetune the pre-trained models. The results are reported in Table 1. Our model " + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": "-MoCo-v2 surpasses MoCo-v2 in 8 out of 9 datasets, showing a significantly stronger transfer ability. Notice that our model is pre-trained with 200 epochs, surprisingly, compared with the original MoCo-v2 model pre-trained with 800 epochs, the fine-tuning results of " + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 308, + 504, + 387 + ], + "type": "text", + "content": "-MoCo-v2 are still better on Birdsnap, Caltech101, CIFAR100, and SUN397." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 393, + 506, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 393, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 393, + 506, + 515 + ], + "type": "text", + "content": "Out-of-domain generalization. As illustrated in Section 3.1.4, standard SSCL methods, e.g., SimCLR, MoCo, etc., could suffer from OOD shift. To demonstrate the advantage of our modification, we investigate the effectiveness of our method on OOD generalization benchmarks: PACS Li et al. (2017), VLCS Fang et al. (2013), Office-Home Venkateswara et al. (2017). We follow the standard way to conduct the experiment, i.e., choosing one domain as the test domain and using the remaining domains as training domains, which is named the leave-one-domain-out protocol. As can be seen in Table 2, our " + }, + { + "bbox": [ + 104, + 393, + 506, + 515 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 393, + 506, + 515 + ], + "type": "text", + "content": "-MoCo-v2 indicates significant improvement over MoCo-v2. Both experiments indicate our modification exhibits substantial enhancement for domain transfer and OOD generalization ability. Similar to domain transfer scenario, compared with the original MoCo-v2 model pre-trained with 800 epochs, " + }, + { + "bbox": [ + 104, + 393, + 506, + 515 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 393, + 506, + 515 + ], + "type": "text", + "content": "-MoCo-v2 is better on all of the three datasets. More experiment details, including detailed comparisons, are in Appendix C." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 531, + 190, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 531, + 190, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 190, + 544 + ], + "type": "text", + "content": "6 DISCUSSION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": "This work proposes a novel perspective that interprets SSCL methods as a type of SNE methods, which facilitates both deeper theoretical understandings and methodological guidelines for practical improvement. More interpretations of SSCL from preserving the distance between distributions can be found in Appendix B. Our analysis has limitations and the insights from SNE are not universally applicable for all SSCL methods, e.g., Zbontar et al. (2021); Yang et al. (2021) don't fit in our framework. However, this work is an interesting addition to existing theoretical works of SSCL and more investigations can be made along this path. While there are various extensions of the classic SNE, in this work, as a proof of concept, we mainly showcased practical improvements from " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": "-SNE. We expect more modifications can be developed by borrowing advances in the SNE literature, e.g., changing to " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": "-divergences (Im et al., 2018) or consider optimal transport Bunne et al. (2019); Salmona et al. (2021); Mialon et al. (2020). On the other hand, standard SNE methods can also borrow existing techniques in SSCL to improve their performance on more complicated data, e.g., incorporating data augmentations instead of or on top of pre-defined distances. In this sense, by choosing feature dimension to be 2, various SSCL methods can also be used as data visualization tools (Böhm et al., 2022; Damrich et al., 2022). Specifically on CIFAR-10, standard " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": "-SNE can barely reveal any clusters while our " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": "-SimCLR with " + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "inline_equation", + "content": "d_z = 2" + }, + { + "bbox": [ + 104, + 556, + 506, + 733 + ], + "type": "text", + "content": " produces much more separation among different labels. More details can be found in Appendix C.7." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 288, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 288, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 288, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 176, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 176, + 94 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 176, + 94 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 99, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 99, + 507, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 99, + 507, + 124 + ], + "spans": [ + { + "bbox": [ + 105, + 99, + 507, + 124 + ], + "type": "text", + "content": "Martin Arjovsky, Léon Bottou, Ishaan Gulrajani, and David Lopez-Paz. Invariant risk minimization. arXiv preprint arXiv:1907.02893, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 129, + 506, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 129, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 107, + 129, + 506, + 153 + ], + "type": "text", + "content": "Sanjeev Arora, Wei Hu, and Pravesh K Kothari. An analysis of the t-sne algorithm for data visualization. In Conference On Learning Theory, pp. 1455-1462. PMLR, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 159, + 506, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 159, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 106, + 159, + 506, + 194 + ], + "type": "text", + "content": "Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. arXiv preprint arXiv:1902.09229, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 201, + 505, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 201, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 106, + 201, + 505, + 235 + ], + "type": "text", + "content": "Shahar Azulay, Edward Moroshko, Mor Shpigel Nacson, Blake E Woodworth, Nathan Srebro, Amir Globerson, and Daniel Soudry. On the implicit bias of initialization shape: Beyond infinitesimal mirror descent. In International Conference on Machine Learning, pp. 468-477. PMLR, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 241, + 506, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 241, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 506, + 275 + ], + "type": "text", + "content": "Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. In Advances in Neural Information Processing Systems, pp. 15535-15545, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 282, + 506, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 282, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 506, + 327 + ], + "type": "text", + "content": "Haoyue Bai, Rui Sun, Lanqing Hong, Fengwei Zhou, Nanyang Ye, Han-Jia Ye, S-H Gary Chan, and Zhenguo Li. Decaug: Out-of-distribution generalization via decomposed feature representation and semantic augmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 6705-6713, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 334, + 505, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 334, + 505, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 505, + 358 + ], + "type": "text", + "content": "Randall Balestriero and Yann LeCun. Contrastive and non-contrastive self-supervised learning recover global and local spectral embedding methods. arXiv preprint arXiv:2205.11508, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 365, + 504, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 504, + 388 + ], + "type": "text", + "content": "Jan Niklas Böhm, Philipp Berens, and Dmitry Kobak. Unsupervised visualization of image datasets using contrastive learning. arXiv preprint arXiv:2210.09879, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 395, + 506, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 395, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 106, + 395, + 506, + 428 + ], + "type": "text", + "content": "Charlotte Bunne, David Alvarez-Melis, Andreas Krause, and Stefanie Jegelka. Learning generative models across incomparable spaces. In International conference on machine learning, pp. 851-861. PMLR, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 435, + 504, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 435, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 504, + 459 + ], + "type": "text", + "content": "T Tony Cai and Rong Ma. Theoretical foundations of t-sne for visualizing high-dimensional clustered data. arXiv preprint arXiv:2105.07536, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 465, + 506, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 465, + 506, + 500 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 506, + 500 + ], + "type": "text", + "content": "Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. In Advances in Neural Information Processing Systems, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 506, + 504, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 506, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 504, + 541 + ], + "type": "text", + "content": "Kai Chen, Lanqing Hong, Hang Xu, Zhenguo Li, and Dit-Yan Yeung. Multisiam: Self-supervised multi-instance siamese representation learning for autonomous driving. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7546-7554, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 504, + 572 + ], + "type": "text", + "content": "Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. arXiv preprint arXiv:2002.05709, 2020a." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 578, + 506, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 506, + 601 + ], + "type": "text", + "content": "Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750-15758, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 608, + 504, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 608, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 504, + 632 + ], + "type": "text", + "content": "Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 638, + 504, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 638, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 504, + 662 + ], + "type": "text", + "content": "Sebastian Damrich, Niklas Böhm, Fred A Hamprecht, and Dmitry Kobak. From " + }, + { + "bbox": [ + 105, + 638, + 504, + 662 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 105, + 638, + 504, + 662 + ], + "type": "text", + "content": "-sne to umap with contrastive learning. In International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 668, + 506, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 506, + 702 + ], + "type": "text", + "content": "Qishi Dong, Awais Muhammad, Fengwei Zhou, Chuanlong Xie, Tianyang Hu, Yongxin Yang, Sung-Ho Bae, and Zhenguo Li. Zood: Exploiting model zoo for out-of-distribution generalization. arXiv preprint arXiv:2210.09236, 2022." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 709, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 507, + 732 + ], + "type": "text", + "content": "T Erber and GM Hockney. Equilibrium configurations of n equal charges on a sphere. Journal of Physics A: Mathematical and General, 24(23):L1369, 1991." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "type": "text", + "content": "Linus Ericsson, Henry Gouk, and Timothy M Hospedales. How well do self-supervised models transfer? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5414-5423, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 505, + 156 + ], + "type": "text", + "content": "Chen Fang, Ye Xu, and Daniel N. Rockmore. Unbiased metric learning: On the utilization of multiple datasets and web images for softening bias. 2013 IEEE International Conference on Computer Vision, pp. 1657-1664, 2013." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 162, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 162, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 504, + 186 + ], + "type": "text", + "content": "Hongchao Fang, Sicheng Wang, Meng Zhou, Jiayuan Ding, and Pengtao Xie. Cert: Contrastive self-supervised learning for language understanding. arXiv preprint arXiv:2005.12766, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 191, + 504, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 191, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 105, + 191, + 504, + 215 + ], + "type": "text", + "content": "Tianyu Gao, Xingcheng Yao, and Danqi Chen. Simcse: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 220, + 504, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 504, + 245 + ], + "type": "text", + "content": "John M Giorgi, Osvald Nitski, Gary D Bader, and Bo Wang. Declutr: Deep contrastive learning for unsupervised textual representations. arXiv preprint arXiv:2006.03659, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 250, + 504, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 250, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 504, + 295 + ], + "type": "text", + "content": "Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 301, + 504, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 301, + 504, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 504, + 325 + ], + "type": "text", + "content": "Ishaan Gulrajani and David Lopez-Paz. In search of lost domain generalization. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 331, + 504, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 504, + 354 + ], + "type": "text", + "content": "Thomas Hamm and Ingo Steinwart. Adaptive learning rates for support vector machines working on data with low intrinsic dimension. The Annals of Statistics, 49(6):3153-3180, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 360, + 505, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 360, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 505, + 394 + ], + "type": "text", + "content": "Jeff Z HaoChen, Colin Wei, Adrien Gaidon, and Tengyu Ma. Provable guarantees for self-supervised deep learning with spectral contrastive loss. Advances in Neural Information Processing Systems, 34, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 400, + 504, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 400, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 504, + 434 + ], + "type": "text", + "content": "Jeff Z HaoChen, Colin Wei, Ananya Kumar, and Tengyu Ma. Beyond separability: Analyzing the linear transferability of contrastive representations to related subpopulations. arXiv preprint arXiv:2204.02683, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 441, + 505, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 505, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 505, + 475 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 481, + 505, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 481, + 505, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 505, + 516 + ], + "type": "text", + "content": "Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9729-9738, 2020a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 521, + 504, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 504, + 545 + ], + "type": "text", + "content": "Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 551, + 505, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 551, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 505, + 574 + ], + "type": "text", + "content": "Yue He, Zheyan Shen, and Peng Cui. Towards non-iid image classification: A dataset and baselines. Pattern Recognition, pp. 107383, 2020b." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 580, + 505, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 580, + 505, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 580, + 505, + 604 + ], + "type": "text", + "content": "Geoffrey Hinton and Sam T Roweis. Stochastic neighbor embedding. In NIPS, volume 15, pp. 833-840. CiteSeer, 2002." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 609, + 505, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 505, + 633 + ], + "type": "text", + "content": "Geoffrey E. Hinton, Simon Osindero, and Yee Whye Teh. A fast learning algorithm for deep belief nets. Neural Computation, 18:1527-1554, 2006." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 639, + 505, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 505, + 673 + ], + "type": "text", + "content": "R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670, 2018." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 504, + 703 + ], + "type": "text", + "content": "Weiran Huang, Mingyang Yi, and Xuyang Zhao. Towards the generalization of contrastive self-supervised learning. arXiv preprint arXiv:2111.00743, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 732 + ], + "type": "text", + "content": "Daniel Jiwoong Im, Nakul Verma, and Kristin Branson. Stochastic neighbor embedding under f-divergences. arXiv preprint arXiv:1811.01247, 2018." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 734 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 106 + ], + "type": "text", + "content": "Wenlong Ji, Zhun Deng, Ryumei Nakada, James Zou, and Linjun Zhang. The power of contrast for feature learning: A theoretical analysis. arXiv preprint arXiv:2110.02473, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 505, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 505, + 137 + ], + "type": "text", + "content": "Li Jing, Pascal Vincent, Yann LeCun, and Yuandong Tian. Understanding dimensional collapse in contrastive self-supervised learning. arXiv preprint arXiv:2110.09348, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 144, + 506, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 144, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 107, + 144, + 506, + 157 + ], + "type": "text", + "content": "Alex Krizhevsky. Learning multiple layers of features from tiny images. University of Toronto, 2009." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 163, + 504, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 504, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 504, + 199 + ], + "type": "text", + "content": "David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, and Aaron Courville. Out-of-distribution generalization via risk extrapolation (rex). In International Conference on Machine Learning, pp. 5815-5826. PMLR, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 206, + 506, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 206, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 107, + 206, + 506, + 232 + ], + "type": "text", + "content": "Da Li, Yongxin Yang, Yi-Zhe Song, and Timothy M. Hospedales. Deeper, broader and artier domain generalization. 2017 IEEE International Conference on Computer Vision (ICCV), pp. 5543-5551, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 237, + 506, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 237, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 237, + 506, + 262 + ], + "type": "text", + "content": "Yunfan Li, Peng Hu, Zitao Liu, Dezhong Peng, Joey Tianyi Zhou, and Xi Peng. Contrastive clustering. In 2021 AAAI Conference on Artificial Intelligence (AAAI), 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 268, + 504, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 268, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 107, + 268, + 504, + 293 + ], + "type": "text", + "content": "Zengyi Li, Yubei Chen, Yann LeCun, and Friedrich T. Sommer. Neural manifold clustering and embedding. ArXiv, abs/2201.10000, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 300, + 504, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 300, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 107, + 300, + 504, + 324 + ], + "type": "text", + "content": "George C Linderman and Stefan Steinerberger. Clustering with t-sne, provably. SIAM Journal on Mathematics of Data Science, 1(2):313-332, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 331, + 506, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 331, + 506, + 366 + ], + "spans": [ + { + "bbox": [ + 107, + 331, + 506, + 366 + ], + "type": "text", + "content": "Zhili Liu, Jianhua Han, Kai Chen, Lanqing Hong, Hang Xu, Chunjing Xu, and Zhenguo Li. Task-customized self-supervised pre-training with scalable dynamic routing. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 1854-1862, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 373, + 506, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 373, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 107, + 373, + 506, + 398 + ], + "type": "text", + "content": "Yao Lu, Jukka Corander, and Zhirong Yang. Doubly stochastic neighbor embedding on spheres. Pattern Recognition Letters, 128:100-106, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 404, + 504, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 404, + 504, + 429 + ], + "spans": [ + { + "bbox": [ + 107, + 404, + 504, + 429 + ], + "type": "text", + "content": "Jiajun Ma, Tianyang Hu, and Wenjia Wang. Deciphering the projection head: Representation evaluation self-supervised learning. arXiv preprint arXiv:2301.12189, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 435, + 506, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 435, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 107, + 435, + 506, + 470 + ], + "type": "text", + "content": "JBM Melisseneny. How different can colours be? maximum separation of points on a spherical octant. Proceedings of the Royal Society of London. Series A: Mathematical, Physical and Engineering Sciences, 454(1973):1499-1508, 1998." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 477, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 477, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 107, + 477, + 506, + 502 + ], + "type": "text", + "content": "Facundo Memoli. Gromov-wasserstein distances and the metric approach to object matching. Foundations of computational mathematics, 11(4):417-487, 2011." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 509, + 506, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 509, + 506, + 544 + ], + "spans": [ + { + "bbox": [ + 107, + 509, + 506, + 544 + ], + "type": "text", + "content": "Grégoire Mialon, Dexiong Chen, Alexandre d'Aspremont, and Julien Mairal. A trainable optimal transport embedding for feature aggregation. In International Conference on Learning Representations (ICLR), 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 551, + 504, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 551, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 107, + 551, + 504, + 586 + ], + "type": "text", + "content": "Edward Moroshko, Blake E Woodworth, Suriya Gunasekar, Jason D Lee, Nati Srebro, and Daniel Soudry. Implicit bias in deep linear classification: Initialization scale vs training accuracy. Advances in neural information processing systems, 33:22182-22193, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 592, + 504, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 592, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 107, + 592, + 504, + 617 + ], + "type": "text", + "content": "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 624, + 506, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 624, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 107, + 624, + 506, + 669 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 677, + 504, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 677, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 107, + 677, + 504, + 702 + ], + "type": "text", + "content": "Noam Razin and Nadav Cohen. Implicit regularization in deep learning may not be explainable by norms. Advances in neural information processing systems, 33:21174-21187, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 708, + 504, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 708, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 107, + 708, + 504, + 734 + ], + "type": "text", + "content": "Antoine Salmona, Julie Delon, and Agnès Desolneux. Gromov-wasserstein distances between gaussian distributions. arXiv preprint arXiv:2104.07970, 2021." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 116 + ], + "type": "text", + "content": "Nikunj Saunshi, Jordan Ash, Surbhi Goel, Dipendra Misra, Cyril Zhang, Sanjeev Arora, Sham Kakade, and Akshay Krishnamurthy. Understanding contrastive learning requires incorporating inductive biases. arXiv preprint arXiv:2202.14037, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 148 + ], + "type": "text", + "content": "Pieter Merkus Lambertus Tammes. On the origin of number and arrangement of the places of exit on the surface of pollen-grains. Recueil des travaux botaniques nederlandais, 27(1):1-84, 1930." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 153, + 504, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 153, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 153, + 504, + 177 + ], + "type": "text", + "content": "Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. arXiv preprint arXiv:1906.05849, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 184, + 504, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 184, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 105, + 184, + 504, + 209 + ], + "type": "text", + "content": "Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? arXiv preprint arXiv:2005.10243, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 215, + 504, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 215, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 215, + 504, + 239 + ], + "type": "text", + "content": "Christopher Tosh, Akshay Krishnamurthy, and Daniel Hsu. Contrastive learning, multi-view redundancy, and linear models. arXiv preprint arXiv:2008.10150, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 245, + 504, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 504, + 270 + ], + "type": "text", + "content": "Laurens Van Der Maaten. Learning a parametric embedding by preserving local structure. In Artificial intelligence and statistics, pp. 384-391. PMLR, 2009." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 275, + 504, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 504, + 300 + ], + "type": "text", + "content": "Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 306, + 504, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 306, + 504, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 504, + 341 + ], + "type": "text", + "content": "Hemanth Venkateswara, Jose Eusebio, Shayok Chakraborty, and Sethuraman Panchanathan. Deep hashing network for unsupervised domain adaptation. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5385-5394, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 348, + 506, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 348, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 506, + 373 + ], + "type": "text", + "content": "Vikas Verma, Thang Luong, Kenji Kawaguchi, Hieu Pham, and Quoc Le. Towards domain-agnostic contrastive learning. In International Conference on Machine Learning, pp. 10530–10541. PMLR, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 378, + 506, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 378, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 506, + 403 + ], + "type": "text", + "content": "Haonan Wang, Jieyu Zhang, Qi Zhu, and Wei Huang. Augmentation-free graph contrastive learning. arXiv preprint arXiv:2204.04874, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 409, + 506, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 409, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 506, + 444 + ], + "type": "text", + "content": "Tongzhou Wang and Phillip Isola. Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In International Conference on Machine Learning, pp. 9929-9939. PMLR, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 451, + 504, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 451, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 504, + 475 + ], + "type": "text", + "content": "Colin Wei, Kendrick Shen, Yining Chen, and Tengyu Ma. Theoretical analysis of self-training with deep networks on unlabeled data. arXiv preprint arXiv:2010.03622, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 481, + 504, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 481, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 504, + 506 + ], + "type": "text", + "content": "Zixin Wen and Yuanzhi Li. Toward understanding the feature learning process of self-supervised contrastive learning. arXiv preprint arXiv:2105.15134, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 512, + 504, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 504, + 546 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 504, + 546 + ], + "type": "text", + "content": "Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3733-3742, 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 553, + 504, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 553, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 553, + 504, + 578 + ], + "type": "text", + "content": "Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian Khabsa, Fei Sun, and Hao Ma. Clear: Contrastive learning for sentence representation. arXiv preprint arXiv:2012.15466, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 584, + 504, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 504, + 618 + ], + "type": "text", + "content": "Yuanmeng Yan, Rumei Li, Sirui Wang, Fuzheng Zhang, Wei Wu, and Weiran Xu. Consert: A contrastive framework for self-supervised sentence representation transfer. arXiv preprint arXiv:2105.11741, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 625, + 504, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 504, + 660 + ], + "type": "text", + "content": "Ceyuan Yang, Zhirong Wu, Bolei Zhou, and Stephen Lin. Instance localization for self-supervised detection pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3987-3996, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 504, + 701 + ], + "type": "text", + "content": "Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. Filip: Fine-grained interactive language-image pre-training. arXiv preprint arXiv:2111.07783, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 708, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 506, + 732 + ], + "type": "text", + "content": "Chun-Hsiao Yeh, Cheng-Yao Hong, Yen-Chi Hsu, Tyng-Luh Liu, Yubei Chen, and Yann LeCun. Decoupled contrastive learning. arXiv preprint arXiv:2110.06848, 2021." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 247 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. arXiv preprint arXiv:2103.03230, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 507, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 507, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 507, + 146 + ], + "type": "text", + "content": "Chiyuan Zhang, Samy Bengio, Moritz Hardt, Benjamin Recht, and Oriol Vinyals. Understanding deep learning (still) requires rethinking generalization. Communications of the ACM, 64(3):107-115, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "type": "text", + "content": "Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. arXiv preprint arXiv:1710.09412, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 182, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 504, + 217 + ], + "type": "text", + "content": "Xuyang Zhao, Tianqi Du, Yisen Wang, Jun Yao, and Weiran Huang. Arcl: Enhancing contrastive learning with augmentation-robust representations. In International Conference on Learning Representations (ICLR), 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 507, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 507, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 507, + 247 + ], + "type": "text", + "content": "Roland S Zimmermann, Yash Sharma, Steffen Schneider, Matthias Bethge, and Wieland Brendel. Contrastive learning inverts the data generating process. arXiv preprint arXiv:2102.08850, 2021." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 291, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 206, + 79, + 362, + 220 + ], + "blocks": [ + { + "bbox": [ + 206, + 79, + 362, + 220 + ], + "lines": [ + { + "bbox": [ + 206, + 79, + 362, + 220 + ], + "spans": [ + { + "bbox": [ + 206, + 79, + 362, + 220 + ], + "type": "image", + "image_path": "c82662cc8e0904e93a0be15d4fd0d1b398b68fec4cc2d96c32fe8c78a7dabacf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 232, + 504, + 255 + ], + "lines": [ + { + "bbox": [ + 104, + 232, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 504, + 255 + ], + "type": "text", + "content": "Figure A.3: Cosine similarity heat map of learned features from SimCLR on CIFAR-10 dataset. The darker the color, the larger the similarity." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 274, + 239, + 286 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 274, + 239, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 239, + 286 + ], + "type": "text", + "content": "A TECHNICAL DETAILS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 300, + 313, + 311 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 313, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 313, + 311 + ], + "type": "text", + "content": "A.1 IMPLICITBIASOFSIMCLRONCIFAR-10." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "text", + "content": "Figure A.3 plots the cosine similarity heat map of learned features from SimCLR on CIFAR-10 dataset. To calculate the similarity of class A (figures denoted by " + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "text", + "content": ") to class B (figures denoted by " + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "inline_equation", + "content": "b_i" + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "text", + "content": "), we first calculate the mean of " + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "inline_equation", + "content": "b_i" + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "inline_equation", + "content": "\\bar{b}" + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "text", + "content": ". Then, we sum up " + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "inline_equation", + "content": "\\sum_{i} \\sin(a_i, \\bar{b})" + }, + { + "bbox": [ + 104, + 320, + 506, + 367 + ], + "type": "text", + "content": " and plot is with colors. Hence, the similarity matrix shown in Figure A.3 is not symmetric." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 380, + 253, + 390 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 253, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 253, + 390 + ], + "type": "text", + "content": "A.2 PROOF OF PROPOSITION 3.2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 400, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 400, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 400, + 504, + 422 + ], + "type": "text", + "content": "Recall the domain-agnostic data augmentation process. For any " + }, + { + "bbox": [ + 104, + 400, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_i" + }, + { + "bbox": [ + 104, + 400, + 504, + 422 + ], + "type": "text", + "content": ", the probability density of having " + }, + { + "bbox": [ + 104, + 400, + 504, + 422 + ], + "type": "inline_equation", + "content": "t \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 400, + 504, + 422 + ], + "type": "text", + "content": " as its augmented point can be characterized as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 183, + 430, + 426, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 430, + 426, + 444 + ], + "spans": [ + { + "bbox": [ + 183, + 430, + 426, + 444 + ], + "type": "interline_equation", + "content": "P _ {\\boldsymbol {t} | \\boldsymbol {x} _ {i}} = \\mathbb {P} (\\boldsymbol {x} _ {i} \\text {a n d} \\boldsymbol {x} _ {i} ^ {\\prime} = \\boldsymbol {t} \\text {f o r a p o s i t i v e p a i r} | \\boldsymbol {x} _ {i}) = \\phi (\\boldsymbol {t} - \\boldsymbol {x} _ {i}).", + "image_path": "1531db091211c2704d54aba12955a4dfd22054eedf7e2bc6924e725a6d7d9794.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "text", + "content": "For isotropic Gaussian densities with mean 0 and covariance matrix " + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\sigma^2\\mathbf{I}" + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\phi (\\pmb {t} - \\pmb {x}_i)\\propto \\exp (-\\| \\pmb {t} - \\pmb {x}_i\\| _2^2 /2\\sigma^2)" + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "text", + "content": ", which is monotonic with the " + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "text", + "content": " distance between " + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\pmb{t}" + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 450, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 487, + 248, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 248, + 500 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 248, + 500 + ], + "type": "text", + "content": "A.3 INVESTIGATIONS ON " + }, + { + "bbox": [ + 105, + 487, + 248, + 500 + ], + "type": "inline_equation", + "content": "C(f)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 509, + 504, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 504, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 504, + 532 + ], + "type": "text", + "content": "Figures A.4 and A.5 illustrate the evolution of different complexity measurements during the training process under the Gaussian mixture setting and the CIFAR-10 respectively." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 536, + 506, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 559 + ], + "type": "text", + "content": "In the Gaussian mixture setting, the feature extractor is a fully connected ReLU network. Besides " + }, + { + "bbox": [ + 104, + 536, + 506, + 559 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 536, + 506, + 559 + ], + "type": "text", + "content": ", we also evaluate the popular sum of squared weights. The observations on SimCLR are listed as below:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 568, + 504, + 644 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 132, + 568, + 504, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 568, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 132, + 568, + 504, + 602 + ], + "type": "text", + "content": "- The expected Lipschitz constant " + }, + { + "bbox": [ + 132, + 568, + 504, + 602 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 132, + 568, + 504, + 602 + ], + "type": "text", + "content": " is small in initialization. It first increases (till around 100 iterations) and then consistently decreases. This empirically supports the implicit bias towards minimizing " + }, + { + "bbox": [ + 132, + 568, + 504, + 602 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 132, + 568, + 504, + 602 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 606, + 399, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 606, + 399, + 618 + ], + "spans": [ + { + "bbox": [ + 132, + 606, + 399, + 618 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 606, + 399, + 618 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 132, + 606, + 399, + 618 + ], + "type": "text", + "content": " and the sum of squared weights share very similar patterns." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 622, + 504, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 504, + 644 + ], + "type": "text", + "content": "- The SNE loss is non-increasing, as if we are doing stochastic neighbor embedding using " + }, + { + "bbox": [ + 132, + 622, + 504, + 644 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 132, + 622, + 504, + 644 + ], + "type": "text", + "content": "-distance." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "content": "In the CIFAR-10 case, the feature extractor is ResNet-18 plus a fully-connected projection layer. The output from ResNet-18 is usually called representation (512 dimensional) and is utilized for downstream tasks while the projection (128 dimension) is used for training. Such a representation-projection set up is common in SSCL. Ma et al. (2023) aimed to decipher the projection head and revealed that the projection feature tends to be more uniformly distributed while the representation feature exhibits stronger alignment. Besides " + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "content": ", we also evaluate the " + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "content": "-norm of the representation. The observations for SimCLR and " + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 654, + 506, + 732 + ], + "type": "text", + "content": "-SimCLR on CIFAR-10 are summarized as below:" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 198, + 99, + 419, + 247 + ], + "blocks": [ + { + "bbox": [ + 198, + 99, + 419, + 247 + ], + "lines": [ + { + "bbox": [ + 198, + 99, + 419, + 247 + ], + "spans": [ + { + "bbox": [ + 198, + 99, + 419, + 247 + ], + "type": "image", + "image_path": "ee2247776106f773818e3694de0b475733f5a68626668795dd121ba375cda8e7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 262, + 506, + 319 + ], + "lines": [ + { + "bbox": [ + 104, + 262, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 506, + 319 + ], + "type": "text", + "content": "Figure A.4: Empirical evaluation on the complexity of the learned feature mapping during training under the Gaussian mixture setting. Two complexity measurements are considered, i.e., " + }, + { + "bbox": [ + 104, + 262, + 506, + 319 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 262, + 506, + 319 + ], + "type": "text", + "content": " as in (3.3) and the SNE loss as in (2.2). The SNE loss here only serves as in indicator for how well the pairwise distances are preserved. The training objective is the standard InfoNCE loss. The SNE loss decreases quickly until in the first 100 iterations and then stays flat." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 336, + 504, + 420 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 132, + 336, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 504, + 369 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 336, + 504, + 369 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 132, + 336, + 504, + 369 + ], + "type": "text", + "content": " for the projection layer shares similar patterns as in the Gaussian mixture case, first increase and then decreases. However, " + }, + { + "bbox": [ + 132, + 336, + 504, + 369 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 132, + 336, + 504, + 369 + ], + "type": "text", + "content": " for the representation layer monotonically decreases." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 373, + 504, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 373, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 132, + 373, + 504, + 395 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 373, + 504, + 395 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 132, + 373, + 504, + 395 + ], + "type": "text", + "content": " for the projection layer and the " + }, + { + "bbox": [ + 132, + 373, + 504, + 395 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 132, + 373, + 504, + 395 + ], + "type": "text", + "content": "-norm in the representation layer share almost identical patterns." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "spans": [ + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "type": "text", + "content": "- Comparing SimCLR, both the calculated " + }, + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "type": "text", + "content": "-norm are much smaller for " + }, + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 132, + 398, + 504, + 420 + ], + "type": "text", + "content": "-SimCLR." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 429, + 506, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 506, + 463 + ], + "type": "text", + "content": "In conclusion, on one hand, our empirical results demonstrate that the complexity of the feature extractor " + }, + { + "bbox": [ + 104, + 429, + 506, + 463 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 429, + 506, + 463 + ], + "type": "text", + "content": " does decrease during training and seem to be implicitly minimized. On the other hand, its trend is shared with other more popularly used complexity measurements." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 475, + 249, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 249, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 249, + 487 + ], + "type": "text", + "content": "A.4 PROOF OF COROLLARY 3.6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 495, + 506, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 539 + ], + "type": "text", + "content": "In this section, we illustrate with rigor how the hypothesized implicit bias can give rise to structure-preserving property of SSCL. Corollary 3.6 states that minimizing the (Lipschitz) complexity of the feature mapping will also result in the best match between " + }, + { + "bbox": [ + 104, + 495, + 506, + 539 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 495, + 506, + 539 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 495, + 506, + 539 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 495, + 506, + 539 + ], + "type": "text", + "content": " (under permutation). To provide more theoretical insight, we present the following lemma in the simpler vector-matching case." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "text", + "content": "Lemma A.1. Let " + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "inline_equation", + "content": "0 < x_{1} < \\dots < x_{m}" + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "inline_equation", + "content": "0 < y_{1} < \\dots < y_{m}" + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "text", + "content": " be two real-valued sequences, normalized such that " + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{m} x_{i}^{2} = \\sum_{i=1}^{m} y_{i}^{2} = 1" + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "text", + "content": ". Consider a permutation " + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "inline_equation", + "content": "\\{1, \\dots, m\\}" + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "text", + "content": " and denote all such permutations as " + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 542, + 504, + 575 + ], + "type": "text", + "content": ". Then" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 201, + 578, + 407, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 578, + 407, + 610 + ], + "spans": [ + { + "bbox": [ + 201, + 578, + 407, + 610 + ], + "type": "interline_equation", + "content": "\\underset {\\pi \\in T} {\\operatorname {a r g m i n}} \\sum_ {i = 1} ^ {m} \\frac {y _ {\\pi (i)}}{x _ {i}} = \\underset {\\pi \\in T} {\\operatorname {a r g m i n}} \\sum_ {i = 1} ^ {m} \\left(x _ {i} - y _ {\\pi (i)}\\right) ^ {2} := \\pi^ {*},", + "image_path": "a09d8fa8aefae3197bfd8ce6ea8579a3acece951410905b0642e9d9b4191ec4e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 613, + 241, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 241, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 241, + 624 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 613, + 241, + 624 + ], + "type": "inline_equation", + "content": "\\pi^{*}(i) = i" + }, + { + "bbox": [ + 105, + 613, + 241, + 624 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 105, + 613, + 241, + 624 + ], + "type": "inline_equation", + "content": "i = 1,\\dots ,m" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 636, + 302, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 636, + 302, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 302, + 648 + ], + "type": "text", + "content": "Proof. By the rearrangement inequality, we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 265, + 651, + 345, + 681 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 651, + 345, + 681 + ], + "spans": [ + { + "bbox": [ + 265, + 651, + 345, + 681 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {m} \\frac {y _ {\\pi (i)}}{x _ {i}} \\geq \\sum_ {i = 1} ^ {m} \\frac {y _ {i}}{x _ {i}}.", + "image_path": "24aa70e34b1e0f6cf9242b2ff37e5b1c2cfee6919d1f5bc7d8684481e90b5fec.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 685, + 147, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 685, + 147, + 696 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 147, + 696 + ], + "type": "text", + "content": "Similarly," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 173, + 699, + 436, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 699, + 436, + 730 + ], + "spans": [ + { + "bbox": [ + 173, + 699, + 436, + 730 + ], + "type": "interline_equation", + "content": "\\sum_ {i = 1} ^ {m} \\left(x _ {i} - y _ {\\pi (i)}\\right) ^ {2} = \\sum_ {i = 1} ^ {m} x _ {i} ^ {2} + \\sum_ {i = 1} ^ {m} y _ {i} ^ {2} - 2 \\sum_ {i = 1} ^ {m} x _ {i} \\cdot y _ {\\pi (i)} \\geq 2 - 2 \\sum_ {i = 1} ^ {m} x _ {i} \\cdot y _ {i}.", + "image_path": "bc42120ce9128b7f464e4c55bf09115bd7253f220950fcf8f75f2fa9eebf2257.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 173, + 97, + 451, + 221 + ], + "blocks": [ + { + "bbox": [ + 173, + 97, + 451, + 221 + ], + "lines": [ + { + "bbox": [ + 173, + 97, + 451, + 221 + ], + "spans": [ + { + "bbox": [ + 173, + 97, + 451, + 221 + ], + "type": "image", + "image_path": "0ea42d2746acd11d67fc60fd568b58c37e8b9f9e8f6c2abc3e15cb4d9e33226e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 255, + 232, + 355, + 243 + ], + "lines": [ + { + "bbox": [ + 255, + 232, + 355, + 243 + ], + "spans": [ + { + "bbox": [ + 255, + 232, + 355, + 243 + ], + "type": "text", + "content": "(a) SimCLR on CIFAR-10." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 170, + 268, + 449, + 393 + ], + "blocks": [ + { + "bbox": [ + 170, + 268, + 449, + 393 + ], + "lines": [ + { + "bbox": [ + 170, + 268, + 449, + 393 + ], + "spans": [ + { + "bbox": [ + 170, + 268, + 449, + 393 + ], + "type": "image", + "image_path": "ddf39eecf32ac6ecfeefc56f7138a32319b57f38df1543347a566b8405e5afbf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 251, + 404, + 357, + 415 + ], + "lines": [ + { + "bbox": [ + 251, + 404, + 357, + 415 + ], + "spans": [ + { + "bbox": [ + 251, + 404, + 357, + 415 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 251, + 404, + 357, + 415 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 251, + 404, + 357, + 415 + ], + "type": "text", + "content": "-SimCLR on CIFAR-10." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 494, + 506, + 504, + 515 + ], + "blocks": [ + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "lines": [ + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "type": "text", + "content": "Figure A.5: Empirical evaluation on the complexity of the learned feature mapping during training on CIFAR-10. Two complexity measurements are considered, i.e., " + }, + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "type": "text", + "content": " as in (3.3) and " + }, + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "type": "text", + "content": "-norm. Specifically, we calculate the expected Lipschitz constant on both the representation layer (512-dimensional) and the projection layer (128-dimensional). Figure (a) and (b) show the trends (along the 200 training epochs) for SimCLR and " + }, + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 428, + 506, + 485 + ], + "type": "text", + "content": "-SimCLR respectively." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 494, + 506, + 504, + 515 + ], + "lines": [ + { + "bbox": [ + 494, + 506, + 504, + 515 + ], + "spans": [ + { + "bbox": [ + 494, + 506, + 504, + 515 + ], + "type": "image", + "image_path": "a415ec116f011833886b5c2061399667a41ae2137ad8d707a995d43e79618e66.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 532, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 532, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 532, + 504, + 567 + ], + "type": "text", + "content": "Lemma A.1 gives a vector-version illustration of our Corollary 3.6, stating that minimizing the expected derivative (to zero) of the mapping function " + }, + { + "bbox": [ + 104, + 532, + 504, + 567 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 532, + 504, + 567 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 532, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\sum_{i}f(x_{i}) / x_{1}" + }, + { + "bbox": [ + 104, + 532, + 504, + 567 + ], + "type": "text", + "content": " leads to preserving the norm difference of the input vector and output vector." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 571, + 279, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 279, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 279, + 583 + ], + "type": "text", + "content": "Next, we provide the proof of Theorem 3.5." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 597, + 324, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 324, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 324, + 610 + ], + "type": "text", + "content": "Proof of Theorem 3.5. Straightforwardly, we can write" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 200, + 617, + 411, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 617, + 411, + 712 + ], + "spans": [ + { + "bbox": [ + 200, + 617, + 411, + 712 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\bar {P} - Q ^ {\\pi} \\right\\| _ {F} = \\sum_ {i \\neq j} \\left(\\frac {1}{p _ {i j}} + q _ {\\pi (i) \\pi (j)}\\right) ^ {2} \\\\ = \\sum_ {i \\neq j} \\frac {1}{p _ {i j} ^ {2}} + \\sum_ {i \\neq j} q _ {\\pi (i) \\pi (j)} ^ {2} + 2 \\sum_ {i \\neq j} \\frac {q _ {\\pi (i) \\pi (j)}}{p _ {i j}} \\\\ = 2 C _ {1} (P, Q ^ {\\pi}) + \\sum_ {i \\neq j} \\frac {1}{p _ {i j} ^ {2}} + \\sum_ {i \\neq j} q _ {i j} ^ {2} \\\\ \\end{array}", + "image_path": "385e33c9378078baf9c90ef1c6adae4ab60b22b0bc6da6496465a2fca104a338.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 719, + 335, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 719, + 335, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 335, + 733 + ], + "type": "text", + "content": "Thus, minimizing " + }, + { + "bbox": [ + 105, + 719, + 335, + 733 + ], + "type": "inline_equation", + "content": "C_1(P, Q^\\pi)" + }, + { + "bbox": [ + 105, + 719, + 335, + 733 + ], + "type": "text", + "content": " also minimizes " + }, + { + "bbox": [ + 105, + 719, + 335, + 733 + ], + "type": "inline_equation", + "content": "\\| \\bar{P} - Q^\\pi \\|_F" + }, + { + "bbox": [ + 105, + 719, + 335, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "Theorem 3.5 is a straightforward generalization of Lemma A.1. Next, we provide proof for Corollary 3.6, restated below." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 125, + 496, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 125, + 496, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 496, + 140 + ], + "type": "text", + "content": "Proof of Corollary 3.6. Recall the SimCLR loss " + }, + { + "bbox": [ + 104, + 125, + 496, + 140 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}} = \\frac{1}{2n}\\sum_{i = 1}^{n}(l(\\pmb{x}_i,\\pmb{x}_i')) + l(\\pmb{x}_i',\\pmb{x}_i))" + }, + { + "bbox": [ + 104, + 125, + 496, + 140 + ], + "type": "text", + "content": ", where" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 181, + 147, + 428, + 177 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 147, + 428, + 177 + ], + "spans": [ + { + "bbox": [ + 181, + 147, + 428, + 177 + ], + "type": "interline_equation", + "content": "l (\\pmb {x} _ {i}, \\pmb {x} _ {i} ^ {\\prime}) = - \\log \\frac {\\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}) , f (\\pmb {x} _ {i} ^ {\\prime})) / \\tau)}{\\sum_ {x \\in \\mathcal {D} _ {n} \\cup \\mathcal {D} _ {n} ^ {\\prime} \\setminus \\{\\pmb {x} _ {i} \\}} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}) , f (\\pmb {x})) / \\tau)}.", + "image_path": "8ba54ffa7dc2f298df71bd6f130695d314b139ffe13f20084f333dbfa5119246.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": "Without loss of generality, let " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "\\tau = 1" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": ". Notice that " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "l(\\pmb{x}_i, \\pmb{x}_i')" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " is monotonically decreasing as " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(f(\\pmb{x}_i), f(\\pmb{x}_i'))" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " increases, due to the monotonicity of function " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "\\frac{x}{x + c}" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " with respect to " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "x > 0" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "c > 0" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": ". Hence, in order for " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " to be minimized, perfect alignment is required, i.e., " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "f(\\pmb{x}_i) = f(\\pmb{x}_i')" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "inline_equation", + "content": "i = 1, \\dots, n" + }, + { + "bbox": [ + 104, + 184, + 506, + 232 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "type": "text", + "content": "With perfect alignment achieved, " + }, + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}" + }, + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "type": "text", + "content": " only concerns the pairwise similarity between negative samples " + }, + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "type": "inline_equation", + "content": "f(\\pmb{x}_i)" + }, + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "type": "text", + "content": "'s, which can be simplified as " + }, + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}} \\geq L_{\\mathrm{uniform}}" + }, + { + "bbox": [ + 104, + 236, + 504, + 260 + ], + "type": "text", + "content": " where" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 179, + 267, + 428, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 267, + 428, + 380 + ], + "spans": [ + { + "bbox": [ + 179, + 267, + 428, + 380 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L _ {\\text {u n i f o r m}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} - \\log \\frac {e}{e + \\sum_ {j \\neq i} \\exp (\\sin (f (\\boldsymbol {x} _ {i}) , f (\\boldsymbol {x} _ {j})))} \\\\ \\geq \\log \\left(\\frac {1}{n} \\sum_ {i = 1} ^ {n} \\left(1 + \\frac {1}{e} \\sum_ {j \\neq i} \\exp (\\sin (f (\\boldsymbol {x} _ {i}), f (\\boldsymbol {x} _ {j})))\\right)\\right) \\\\ \\geq \\log \\left(1 + \\frac {1}{n \\cdot e} \\sum_ {1 \\leq i \\neq j \\leq n} \\exp (\\mathrm {s i m} (f (\\pmb {x} _ {i}), f (\\pmb {x} _ {j})))\\right). \\\\ \\end{array}", + "image_path": "5046223690ff5324f39fce649f74cc5a9ed625caa0d100d871608c9cc5b5f7b2.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 392, + 504, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 415 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{uniform}}" + }, + { + "bbox": [ + 104, + 392, + 504, + 415 + ], + "type": "text", + "content": " can be minimized by mapping " + }, + { + "bbox": [ + 104, + 392, + 504, + 415 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 104, + 392, + 504, + 415 + ], + "type": "text", + "content": "'s as distant as possible, hence the connection to Tammas problem and the uniformity principle." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "content": "With sufficient capacity of the feature mapping " + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "content": ", the SimCLR loss can be minimized to its (empirical) global minima. However, such " + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "content": " is not unique since " + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}" + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "content": " is invariant to permutations of mapping relationships from " + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "inline_equation", + "content": "f(x_i)" + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "inline_equation", + "content": "f_n^*" + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "content": " further minimizes " + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 420, + 505, + 455 + ], + "type": "text", + "content": " on the sample level, i.e.," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 187, + 462, + 422, + 493 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 462, + 422, + 493 + ], + "spans": [ + { + "bbox": [ + 187, + 462, + 422, + 493 + ], + "type": "interline_equation", + "content": "f_{n}^{*}:= \\operatorname *{argmin}_{f}C_{n}(f) = \\operatorname *{argmin}_{f}\\sum_{1\\leq i\\neq j\\leq n}\\frac{\\|f(\\boldsymbol{x}_{i}) - f(\\boldsymbol{x}_{j})\\|_{2}}{\\|\\boldsymbol{x}_{i} - \\boldsymbol{x}_{j}\\|_{2}},", + "image_path": "364371d005795dd573d8e5bf0314767685e50cf23ae0a4f3ba087ebe964a62c5.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "type": "text", + "content": "Then, " + }, + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "type": "inline_equation", + "content": "f_{n}^{*}" + }, + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "type": "text", + "content": " also solves a type of SNE problem with uniformity constraint (3.4) as stated in Theorem 3.5. To see this, if we define " + }, + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "type": "inline_equation", + "content": "q_{ij} = -\\|f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2" + }, + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "type": "inline_equation", + "content": "p_{ij} = -\\|x_i - x_j\\|_2" + }, + { + "bbox": [ + 104, + 501, + 506, + 536 + ], + "type": "text", + "content": ", which is reasonable since the larger the distance, the smaller the similarity, we can directly apply the results in Theorem 3.5." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 494, + 540, + 504, + 550 + ], + "blocks": [ + { + "bbox": [ + 494, + 540, + 504, + 550 + ], + "lines": [ + { + "bbox": [ + 494, + 540, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 494, + 540, + 504, + 550 + ], + "type": "image", + "image_path": "9e37b82f05edc2eee2998cb2961fe037c95f842d678bb12ca52eb553ca025bb9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 566, + 506, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 566, + 506, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 506, + 645 + ], + "type": "text", + "content": "Remark A.2. As can be seen from Theorem 3.5 and the proof of Corollary 3.6, we showcase the relationship between minimizing " + }, + { + "bbox": [ + 104, + 566, + 506, + 645 + ], + "type": "inline_equation", + "content": "C(f)" + }, + { + "bbox": [ + 104, + 566, + 506, + 645 + ], + "type": "text", + "content": " and structure preserving property by considering a special SNE problem, where the pairwise similarity is not modeled by Gaussian as standard, hence the word \"resembling\" in Corollary 3.6. Although " + }, + { + "bbox": [ + 104, + 566, + 506, + 645 + ], + "type": "inline_equation", + "content": "q_{ij} = -\\| f(\\pmb{x}_i) - f(\\pmb{x}_j)\\|_2" + }, + { + "bbox": [ + 104, + 566, + 506, + 645 + ], + "type": "text", + "content": " is unorthodox, it is reasonable since the larger the distance, the smaller the similarity. If we consider the SNE method as in Hinton et al. (2006), our proof does not go through directly and demands more complicated analysis. However, our results are still valid in connecting the complexity of the feature map to the pairwise similarity matching." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "Our statement in Corollary 3.6 requires perfect alignment or perfect uniformity. When the assumptions are not perfectly met, we can still obtain insights for the resulting feature mapping. Alignment and uniformity (Wang & Isola, 2020) is not the whole story of contrastive learning, and our identified structure-preserving property implicitly induced by complexity minimization provides an other angle of the learning process. From this perspective, contrastive learning can be thought of as a combination of alignment and SNE with uniformity constraint. In Figure A.3, while obtaining approximate alignment and uniformity, the feature mapping also preserves the relative relationships of the clusters (labels)." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 326, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 326, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 326, + 94 + ], + "type": "text", + "content": "A.5 ALIGNMENT AND UNIFORMITY OF T-SIMCLR" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 126 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 126 + ], + "type": "text", + "content": "Due to the change of training objective, we may want to reevaluate the properties of the learned feature from " + }, + { + "bbox": [ + 104, + 102, + 504, + 126 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 102, + 504, + 126 + ], + "type": "text", + "content": "-SimCLR. We will show that alignment still hold while uniformity is changed (to infinity)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "text", + "content": "Let us consider a compact region " + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\Omega \\subset \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i \\in \\Omega" + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "text", + "content": " be the transformation such that the augmented data point " + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i' = t(\\pmb{x}_i)" + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "text", + "content": " is still in " + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "text", + "content": ". Wang & Isola (2020) showed that the contrastive loss can be decomposed into the alignment loss and the uniformity loss. Zimmermann et al. (2021) further showed that the contrastive loss converges to the cross-entropy between latent distributions, where the underlying latent space is assumed to be uniform, and the positive pairs are specified to be an exponential distribution. In this section, we show a parallel result, which states that in the population level, the " + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 131, + 506, + 209 + ], + "type": "text", + "content": "-SNE loss is the cross-entropy between two distributions of generating positive pairs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "text", + "content": "Theorem A.3. Let " + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "inline_equation", + "content": "H(\\cdot, \\cdot)" + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "text", + "content": " be the cross entropy between distributions. Let " + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "inline_equation", + "content": "p(x)" + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "text", + "content": " be the density of " + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "inline_equation", + "content": "p(\\cdot | x)" + }, + { + "bbox": [ + 104, + 210, + 506, + 233 + ], + "type": "text", + "content": " be the conditional density of generating a positive pair, and define" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 235, + 477, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 235, + 477, + 262 + ], + "spans": [ + { + "bbox": [ + 132, + 235, + 477, + 262 + ], + "type": "interline_equation", + "content": "q _ {f} \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) = C _ {f} (\\boldsymbol {x}) ^ {- 1} \\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}}, \\text {w i t h} C _ {f} (\\boldsymbol {x}) = \\int_ {\\Omega} \\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} ^ {\\prime}.", + "image_path": "a957fe5209eb86639afabdcbb34d7014b9d4ab484bb923d9249fbcf93cb9b913.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 262, + 165, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 165, + 273 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 165, + 273 + ], + "type": "text", + "content": "Then, we have" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 210, + 276, + 504, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 276, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 210, + 276, + 504, + 289 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} (H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) = L _ {a} (f) + L _ {u} (f), \\tag {A.1}", + "image_path": "856c611fdd7c7eac4acd42c9b4000466ac429d04377d80ac355e14f84d45fe31.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 291, + 363, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 363, + 303 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 363, + 303 + ], + "type": "text", + "content": "which corresponds to the population-level " + }, + { + "bbox": [ + 104, + 291, + 363, + 303 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 291, + 363, + 303 + ], + "type": "text", + "content": "-SimCLR loss where" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 201, + 304, + 408, + 318 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 304, + 408, + 318 + ], + "spans": [ + { + "bbox": [ + 201, + 304, + 408, + 318 + ], + "type": "interline_equation", + "content": "L _ {a} = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\mathbb {E} _ {\\boldsymbol {x} \\sim p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right)} \\log \\left(1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}\\right),", + "image_path": "1d66fddeee23fdf56abd2d28be3fb5fc7faa33db421275dc7911d76267fc1809.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 203, + 319, + 405, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 319, + 405, + 335 + ], + "spans": [ + { + "bbox": [ + 203, + 319, + 405, + 335 + ], + "type": "interline_equation", + "content": "L _ {u} = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\log \\mathbb {E} _ {\\widetilde {\\boldsymbol {x}} \\sim p (\\widetilde {\\boldsymbol {x}})} \\big (1 + \\| f (\\boldsymbol {x}) - f (\\widetilde {\\boldsymbol {x}}) \\| _ {2} ^ {2} \\big) ^ {- 1}.", + "image_path": "6374d87ca95cbdf4b03bc3b8bea4c2ed6a67247afec4261eea9fb869757a5f78.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 345, + 175, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 345, + 175, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 175, + 356 + ], + "type": "text", + "content": "Proof. Note that" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 358, + 547, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 358, + 547, + 455 + ], + "spans": [ + { + "bbox": [ + 106, + 358, + 547, + 455 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = - \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} + \\log C _ {f} (\\boldsymbol {x}) \\\\ = \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log (1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} - \\int_ {\\Omega} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\log (p (\\boldsymbol {x} ^ {\\prime})) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} + \\log \\int_ {\\Omega} \\frac {p (\\boldsymbol {x} ^ {\\prime})}{1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}} \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\\\ = \\int_ {\\Omega} p (\\pmb {x} ^ {\\prime} | \\pmb {x}) \\log (1 + \\| f (\\pmb {x}) - f (\\pmb {x} ^ {\\prime}) \\| _ {2} ^ {2}) \\mathrm {d} \\pmb {x} ^ {\\prime} - \\int_ {\\Omega} p (\\pmb {x} ^ {\\prime} | \\pmb {x}) \\log (p (\\pmb {x} ^ {\\prime})) \\mathrm {d} \\pmb {x} ^ {\\prime} + \\log \\mathbb {E} _ {\\pmb {x} ^ {\\prime} \\sim p (\\pmb {x} ^ {\\prime})} (1 + \\| f (\\pmb {x}) - f (\\pmb {x} ^ {\\prime}) \\| _ {2} ^ {2}) ^ {- 1}. \\\\ \\end{array}", + "image_path": "7fc77d4cc1180cab17079cf1e89edb6b71b375c0f1eae6a0dbd3c3828518a661.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 456, + 284, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 284, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 284, + 467 + ], + "type": "text", + "content": "Taking expectation with respect to " + }, + { + "bbox": [ + 105, + 456, + 284, + 467 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 105, + 456, + 284, + 467 + ], + "type": "text", + "content": " leads to" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 117, + 469, + 493, + 541 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 469, + 493, + 541 + ], + "spans": [ + { + "bbox": [ + 117, + 469, + 493, + 541 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\mathbb {E} _ {\\boldsymbol {x} ^ {\\prime} \\sim p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x})} \\log (1 + \\| f (\\boldsymbol {x}) - f (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}) + \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} \\log \\mathbb {E} _ {\\widetilde {\\boldsymbol {x}} \\sim p (\\widetilde {\\boldsymbol {x}})} (1 + \\| f (\\boldsymbol {x}) - f (\\widetilde {\\boldsymbol {x}}) \\| _ {2} ^ {2}) ^ {- 1} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ = L _ {a} (f) + L _ {u} (f) - C _ {p}, \\\\ \\end{array}", + "image_path": "6eef6126282b11862ae4289399031c3aadacf82a2298cd5a851a43a0bb7872b0.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 543, + 132, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 543, + 132, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 132, + 552 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 153, + 553, + 457, + 579 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 553, + 457, + 579 + ], + "spans": [ + { + "bbox": [ + 153, + 553, + 457, + 579 + ], + "type": "interline_equation", + "content": "C _ {p} = \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime} \\mid \\boldsymbol {x}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\mathrm {d} \\boldsymbol {x} = \\int_ {\\Omega} \\int_ {\\Omega} p \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) \\log \\left(p \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\mathrm {d} \\boldsymbol {x} ^ {\\prime} \\mathrm {d} \\boldsymbol {x}", + "image_path": "401d4789a21b8d47614480e4a4fbec5d1b23ed58753bb1b3722db0f9bbf71b87.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 580, + 194, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 580, + 194, + 592 + ], + "spans": [ + { + "bbox": [ + 105, + 580, + 194, + 592 + ], + "type": "text", + "content": "does not depend on " + }, + { + "bbox": [ + 105, + 580, + 194, + 592 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 105, + 580, + 194, + 592 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 184, + 594, + 426, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 594, + 426, + 719 + ], + "spans": [ + { + "bbox": [ + 184, + 594, + 426, + 719 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} _ {\\boldsymbol {x} \\sim p (\\boldsymbol {x})} H (p (\\cdot | \\boldsymbol {x}), q _ {f} (\\cdot | \\boldsymbol {x})) \\\\ = \\int_ {\\Omega} p (\\boldsymbol {x}) \\frac {1}{p (\\boldsymbol {x})} \\int_ {\\Omega} p (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} \\frac {p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} d \\boldsymbol {x} ^ {\\prime} \\\\ = \\int_ {\\Omega} \\int_ {\\Omega} p (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\log \\left(\\frac {p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}}\\right) d \\boldsymbol {x} ^ {\\prime} d \\boldsymbol {x} \\\\ - \\int_ {\\Omega} \\int_ {\\Omega} \\frac {p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}{1 + \\left\\| f (\\boldsymbol {x}) - f \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} d \\boldsymbol {x} d \\boldsymbol {x} ^ {\\prime}. \\\\ \\end{array}", + "image_path": "f984bf49e7e7cb776cf483274dfa464ea161a2e80b591fc6f32c9cca85ff1479.jpg" + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 494, + 721, + 505, + 731 + ], + "blocks": [ + { + "bbox": [ + 494, + 721, + 505, + 731 + ], + "lines": [ + { + "bbox": [ + 494, + 721, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 494, + 721, + 505, + 731 + ], + "type": "image", + "image_path": "309c05778265ac413422425956d2a2957455019a721cba777fcfa71a94d93a31.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "In Theorem A.3, " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "L_{a}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " is the alignment loss and " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "L_{u}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " is the uniformity loss. The decomposition is much more natural for " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "-SimCLR as opposed to that in " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": ", mainly due to the change from conditional to joint distribution when modeling the pairwise similarity. Furthermore, if the " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "-SimCLR loss is minimized, we must have " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "p(\\cdot | \\boldsymbol{x}) = q_{f}(\\cdot | \\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": ", provided " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " has sufficient capacity. Note that if " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "p(\\cdot | \\boldsymbol{x}) = q_{f}(\\cdot | \\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "P_{j|i}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "inline_equation", + "content": "Q_{j|i}" + }, + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": " are perfectly matched, which indicates that we obtain a perfect neighbor embedding." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 369, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 369, + 156 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 369, + 156 + ], + "type": "text", + "content": "Theorem A.3 implies that the optimal feature mapping " + }, + { + "bbox": [ + 104, + 143, + 369, + 156 + ], + "type": "inline_equation", + "content": "f^{*}" + }, + { + "bbox": [ + 104, + 143, + 369, + 156 + ], + "type": "text", + "content": " satisfies" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 267, + 158, + 342, + 171 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 158, + 342, + 171 + ], + "spans": [ + { + "bbox": [ + 267, + 158, + 342, + 171 + ], + "type": "interline_equation", + "content": "p (\\cdot | \\boldsymbol {x}) = q _ {f ^ {*}} (\\cdot | \\boldsymbol {x}),", + "image_path": "7343c9c74539ff72c60ca0fcfc023ff71cc7e795c15aacf9ae5fddf09b725b01.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 173, + 268, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 173, + 268, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 268, + 185 + ], + "type": "text", + "content": "which further implies that for any " + }, + { + "bbox": [ + 104, + 173, + 268, + 185 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\Omega" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 188, + 188, + 504, + 242 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 188, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 188, + 188, + 504, + 242 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} C _ {f ^ {*}} (\\boldsymbol {x}) ^ {- 1} \\frac {p (\\boldsymbol {x} ^ {\\prime})}{1 + \\| f ^ {*} (\\boldsymbol {x}) - f ^ {*} (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} ^ {2}} \\propto C (\\boldsymbol {x}) ^ {- 1} p (\\boldsymbol {x} ^ {\\prime} | \\boldsymbol {x}) \\\\ \\Leftrightarrow C _ {f ^ {*}} \\left(\\boldsymbol {x}\\right) ^ {- 1} \\frac {1}{1 + \\left\\| f ^ {*} (\\boldsymbol {x}) - f ^ {*} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}} \\propto C (\\boldsymbol {x}) ^ {- 1} \\frac {p \\left(\\boldsymbol {x} , \\boldsymbol {x} ^ {\\prime}\\right)}{p (\\boldsymbol {x}) p \\left(\\boldsymbol {x} ^ {\\prime}\\right)}, \\tag {A.2} \\\\ \\end{array}", + "image_path": "69789399a1aa190a9e38d867136106c4c4a691749bb94bedbb0cf2787997668d.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "C(\\pmb{x}) = \\int p(\\pmb{x}'|\\pmb{x})\\mathrm{d}\\pmb{x}'" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": ". Unlike the usual normalized SimCLR, " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": "-SNE does not assume any special structure on " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": " (e.g., " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\| f\\| _2 = 1" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": "), thus " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": " can go to infinity. Comparing to the finite sample " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": "-SimCLR loss, the population version is trickier to analyze. This is because for a given point " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{x}'" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": ", it can be an augmented sample of some " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": " (with probability " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "p(\\pmb{x}'|\\pmb{x})" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": "), or a negative sample of " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": " (when we treat " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{x}'" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": " as another sample point). This reflects the essential difficulty between population and finite samples in contrastive learning, not only for " + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 243, + 504, + 312 + ], + "type": "text", + "content": "-SimCLR." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": "For clustered data, (A.2) provides two important messages, provided that the augmentation is not too extreme and the augmented sample " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{\\prime}" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " stays in the same cluster as the original " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": ". On one hand, when " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pmb{x}_1" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pmb{x}_2" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " belongs to different clusters, the joint density " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "p(\\pmb{x} = \\pmb{x}_1, \\pmb{x}^{\\prime} = \\pmb{x}_2)" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " will be very small, close to zero, which indicates that " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\| f^{*}(\\pmb{x}_{1}) - f^{*}(\\pmb{x}_{2}) \\|_{2}" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " is very large, tending to infinity. On the other hand, for " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pmb{x}_1" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\pmb{x}_2" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " belonging to the same cluster, " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "p(\\pmb{x} = \\pmb{x}_1, \\pmb{x}^{\\prime} = \\pmb{x}_2)" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " will be relatively large. Hence, the features of the same cluster will stay close. Overall, we will observe similar clustered structure in the feature space. This is confirmed in the Gaussian mixture setting in Figure 1(c), in which case, the problem can be oversimplified as mapping 5 points in " + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^2" + }, + { + "bbox": [ + 104, + 316, + 506, + 406 + ], + "type": "text", + "content": " to the unit-circle." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 420, + 402, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 420, + 402, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 402, + 433 + ], + "type": "text", + "content": "B CONNECTION TO DISTANCE BETWEEN DISTRIBUTIONS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 445, + 506, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 506, + 511 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 506, + 511 + ], + "type": "text", + "content": "Through the lens of stochastic neighbor embedding, the feature learning process of SSCL methods can be seen as minimizing certain \"distances\" between distributions in different dimensions. Ideally, the feature should preserve the distributional information about the data. Since the data and the feature do not lie in the same metric space, quantitatively measuring their distributional distance is difficult. Fortunately, there are existing tools we can utilize, specifically, Gromov-Wasserstein distance (Mémoli, 2011; Salmona et al., 2021)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "content": " be two Polish spaces, each endowed respectively with probability measures " + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "inline_equation", + "content": "p_x" + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "inline_equation", + "content": "p_z" + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "content": ". Given two measurable cost functions " + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "inline_equation", + "content": "c_x: \\mathcal{X} \\times \\mathcal{X} \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "inline_equation", + "content": "c_z: \\mathcal{Z} \\times \\mathcal{Z} \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "inline_equation", + "content": "D: \\mathbb{R} \\times \\mathbb{R} \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 516, + 506, + 549 + ], + "type": "text", + "content": ", the Gromov-Wasserstein distance can be defined as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 125, + 552, + 485, + 581 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 552, + 485, + 581 + ], + "spans": [ + { + "bbox": [ + 125, + 552, + 485, + 581 + ], + "type": "interline_equation", + "content": "G W _ {p} (p _ {x}, p _ {z} | c _ {x}, c _ {z}) := \\left(\\inf _ {\\pi \\in \\prod (p _ {x}, p _ {z})} \\int_ {\\mathcal {X} ^ {2} \\times \\mathcal {Z} ^ {2}} D (c _ {x} (x, x ^ {\\prime}), c _ {z} (z, z ^ {\\prime})) ^ {p} d \\pi (x, z) d \\pi (x ^ {\\prime}, z ^ {\\prime})\\right) ^ {1 / p},", + "image_path": "54670751bbf4cdcb587d3c9c45f9ef5a01d9ad27d4a8a7fb50f11b3aaca9f9a7.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "\\prod(p_x,p_z)" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " denotes all the joint distributions in " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{X}\\times \\mathcal{Z}" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " such that the marginals are " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "p_x" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "p_z" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": ". Typically, " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "D(c_{x},c_{z})" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " is chosen to be " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "|c_{x} - c_{z}|" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "c_{x}(x,x^{\\prime})" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " is usually chosen to be " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "\\| x - x^{\\prime}\\| _p" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": ". The key idea of the Gromov-Wasserstein distance to circumvent the dimension mismatch is to change from comparing marginal distribution to pairwise distributions, which is very similar to the SNE objective. Consider Monge's formulation of the optimal transportation problem and let " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "z = f(x)" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": ". By choosing " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "c_{z}(z_{i},z_{j}) = \\log (\\widetilde{Q}_{j|i})" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "\\widetilde{Q}" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " specified as in (3.1), " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "c_{x}(x_{i},x_{j}) = P_{j|i}" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "\\widetilde{P}" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": " specified as in (3.2) and letting " + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "inline_equation", + "content": "D(c_{x},c_{z}) = c_{x}(\\log (c_{x}) - \\log (c_{z}))" + }, + { + "bbox": [ + 104, + 582, + 506, + 664 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 193, + 666, + 416, + 680 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 666, + 416, + 680 + ], + "spans": [ + { + "bbox": [ + 193, + 666, + 416, + 680 + ], + "type": "interline_equation", + "content": "G W _ {1} \\left(p _ {x}, p _ {f (x)}\\right) \\leq \\mathbb {E} _ {x, x ^ {\\prime}} \\left(D \\left(c _ {x} \\left(x, x ^ {\\prime}\\right), c _ {z} \\left(f (x), f \\left(x ^ {\\prime}\\right)\\right)\\right)\\right),", + "image_path": "5bb7f62dd86eee19769825642f3039754fcc9566b32fccb1da5f37e0db902701.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "text", + "content": "where the right hand side recovers the expected InfoNCE loss. Hence, the SNE perspective can also be viewed as minimizing the Gromov-Wasserstein distance between " + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "inline_equation", + "content": "p_z" + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "inline_equation", + "content": "p_x" + }, + { + "bbox": [ + 104, + 681, + 504, + 705 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "It is worth noting that such an interpretation only relates to contrastive learning, not including generative-based self-supervised learning methods such as Masked AutoEncoder (MAE) (He et al., 2021)." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 209, + 95, + 394, + 219 + ], + "blocks": [ + { + "bbox": [ + 209, + 95, + 394, + 219 + ], + "lines": [ + { + "bbox": [ + 209, + 95, + 394, + 219 + ], + "spans": [ + { + "bbox": [ + 209, + 95, + 394, + 219 + ], + "type": "image", + "image_path": "004616d71e2f6e1680a34a339787f5e53d64b43daa1aa4d64ce960759a7b7662.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 238, + 504, + 262 + ], + "lines": [ + { + "bbox": [ + 104, + 238, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 238, + 504, + 262 + ], + "type": "text", + "content": "Figure C.6: Nearest neighbor test accuracy vs. training epochs. SimCLR and " + }, + { + "bbox": [ + 104, + 238, + 504, + 262 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 238, + 504, + 262 + ], + "type": "text", + "content": "-SimCLR share similar trends and convergence speed." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 217, + 288, + 386, + 414 + ], + "blocks": [ + { + "bbox": [ + 217, + 288, + 386, + 414 + ], + "lines": [ + { + "bbox": [ + 217, + 288, + 386, + 414 + ], + "spans": [ + { + "bbox": [ + 217, + 288, + 386, + 414 + ], + "type": "image", + "image_path": "e879bb5c0ecaa514b123a11a38890e1aa2a909864a3cbe9384ea45dbf61f68b4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 433, + 504, + 456 + ], + "lines": [ + { + "bbox": [ + 104, + 433, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 433, + 504, + 456 + ], + "type": "text", + "content": "Figure C.7: The histogram of IoUs for 1000 constructed positive pairs in CIFAR-10. The empirical distribution is almost symmetric around 0.5." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 475, + 242, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 242, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 242, + 487 + ], + "type": "text", + "content": "C EXPERIMENT DETAILS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 499, + 224, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 499, + 224, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 224, + 510 + ], + "type": "text", + "content": "C.1 CIFAR-10 SETTINGS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "text", + "content": "CIFAR-10 (Krizhevsky, 2009) is a colorful image dataset with 50000 training samples and 10000 test samples from 10 categories. We use ResNet-18 (He et al., 2016) as the feature extractor, and the other settings such as projection head all follow the original settings of SimCLR (Chen et al., 2020a). To evaluate the quality of the features, we follow the KNN evaluation protocol (Wu et al., 2018). which computes the cosine similarities in the embedding space between the test image and its nearest neighbors, and make the prediction via weighted voting. We train each model with batch size of 256 and 200 epochs for quicker evaluation. For " + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "text", + "content": "-SimCLR, without specifying otherwise, we grid search the " + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "text", + "content": " with range " + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\{1, 2, 5, 10\\}" + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "inline_equation", + "content": "\\{1, 2, 5, 10\\}" + }, + { + "bbox": [ + 104, + 519, + 506, + 609 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "text", + "content": "Ablation of training epochs We also run the SimCLR and " + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "text", + "content": "-SimCLR experiments in the more standard 1000 epochs setting. For SimCLR, we use batch size of 512, learning rate of 0.3, temperature of 0.7, and weight dacay of 0.0001. For " + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "text", + "content": "-SimCLR, we use batch size of 512, learning rate of 0.8, temperature of 10, weight dacay of 0.0002, and " + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "inline_equation", + "content": "t_{df} = 5" + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "text", + "content": ". The nearest neighbor accuracy for SimCLR is " + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "inline_equation", + "content": "87.2\\%" + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "text", + "content": " vs. that for " + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "text", + "content": "-SimCLR is " + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "inline_equation", + "content": "88.8\\%" + }, + { + "bbox": [ + 104, + 620, + 506, + 675 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 689, + 234, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 234, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 234, + 700 + ], + "type": "text", + "content": "C.2 IMAGE AUGMENTATION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "When processing images, several popular augmentations are usually adopted (following the setting in SimCLR Chen et al. (2020a)), e.g., random resized crop (crops a random portion of image and resize it" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 195, + 97, + 422, + 248 + ], + "blocks": [ + { + "bbox": [ + 195, + 97, + 422, + 248 + ], + "lines": [ + { + "bbox": [ + 195, + 97, + 422, + 248 + ], + "spans": [ + { + "bbox": [ + 195, + 97, + 422, + 248 + ], + "type": "image", + "image_path": "23a127fd23e914b5ea03435dbae0b73c8d6edafe506a41ac6bede17bad9bc060.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 262, + 506, + 298 + ], + "lines": [ + { + "bbox": [ + 104, + 262, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 506, + 298 + ], + "type": "text", + "content": "Figure C.8: Extension on Figure 2(b). Nearest neighbor classification accuracy for SimCLR vs. " + }, + { + "bbox": [ + 104, + 262, + 506, + 298 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 262, + 506, + 298 + ], + "type": "text", + "content": "-SimCLR on both CIFAR-10 (in-distribution) and CIFAR-100 (out-of-distribution) using different feature dimensions." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "type": "text", + "content": "to the original size), horizontal flip, color jitter (randomly change the brightness, contrast, saturation and hue of an image). To illustrate the natural weighting scheme in Section 4.1, we considered random resized crop and specifies the weights by the IoU (intersection over union) of the positive pair. In particular, two augmented images are created from an anchor image. Each augmentation crops a rectangular region of the image, denoted by " + }, + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "type": "inline_equation", + "content": "r_1, r_2" + }, + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "type": "text", + "content": " respectively, and their IoU is defined by the area of intersection " + }, + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "type": "inline_equation", + "content": "r_1 \\cap r_2" + }, + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "type": "text", + "content": " divided by the area of the union " + }, + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "type": "inline_equation", + "content": "r_1 \\cup r_2" + }, + { + "bbox": [ + 104, + 315, + 506, + 394 + ], + "type": "text", + "content": ". The IoU is always between 0 and 1. In our experiment, we chose the default settings and Figure C.7 illustrates the IoU histogram of 1000 constructed positive pairs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 406, + 285, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 285, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 285, + 417 + ], + "type": "text", + "content": "C.3 DEGREE OF FREEDOM IN " + }, + { + "bbox": [ + 105, + 406, + 285, + 417 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 105, + 406, + 285, + 417 + ], + "type": "text", + "content": "-SIMCLR" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 450, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 504, + 506 + ], + "type": "text", + "content": "Feature dimension efficiency in OOD case. To further investigate the generalization ability of SSCL methods, we devise a challenging setting where the model is trained on CIFAR-10 and tested on CIFAR-100 classification. In this case, we evaluate the effect of increasing feature dimensions in the projection layer, as an extension on the CIFAR-10 in-distribution case. The results are shown in Figure C.8, where there are two things to note:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 515, + 504, + 574 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 132, + 515, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 515, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 132, + 515, + 504, + 536 + ], + "type": "text", + "content": "- The gain of extra dimensions in the OOD case does vanish later than that in the in-distribution case." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "text", + "content": "- The advantage of SimCLR vs. " + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "text", + "content": "-SimCLR is very significant with around " + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "text", + "content": " improvement when " + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "inline_equation", + "content": "d = 128" + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "text", + "content": " using nearest neighbor classification, indicating that " + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 132, + 540, + 504, + 574 + ], + "type": "text", + "content": "-SimCLR produces better separated clusters." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": "Relationship between " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "d_z" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": ". The larger the degree of freedom " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": ", the less heavy-tail the t-distribution. As " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "d_z" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " decreases, the crowding problem becomes more severe and as recommended by (Van der Maaten & Hinton, 2008), a smaller " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " tends to work better. We evaluate the sensitivity of " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " (1, 5, 10) under different choices of " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "d_z" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " (1, 2, 4, 8, 16, 32, 64, 128) in CIFAR-10 and the results are reported in Figure C.9. As can be seen, when " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "d_z" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " is small (1, 2, 4, 8), " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "t_{df} = 1" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " outperforms. Comparing " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "t_{df} = 5" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "t_{df} = 10" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": ", the two perform similarly when " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "d_z" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " is large (16, 32, 64, 128) but the smaller " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "t_{df} = 5" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": " yields better accuracy when " + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "inline_equation", + "content": "d_z = 1, 2, 4" + }, + { + "bbox": [ + 104, + 590, + 505, + 669 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 680, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 680, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 680, + 506, + 704 + ], + "type": "text", + "content": "Tuning temperature vs. tuning " + }, + { + "bbox": [ + 104, + 680, + 506, + 704 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 680, + 506, + 704 + ], + "type": "text", + "content": ". As illustrated in Section 4.2, when the feature space dimension is low, the heavy-tailed t-distribution is a better choice than Gaussian to alleviate the crowding problem." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": "5When evaluating by training linear classifiers for 100 epochs, the accuracy for SimCLR is " + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "inline_equation", + "content": "46.4\\%" + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": " and that for " + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": "-SimCLR is " + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "inline_equation", + "content": "48.14\\%" + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": " (averaged over 3 replications)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 192, + 82, + 422, + 216 + ], + "blocks": [ + { + "bbox": [ + 192, + 82, + 422, + 216 + ], + "lines": [ + { + "bbox": [ + 192, + 82, + 422, + 216 + ], + "spans": [ + { + "bbox": [ + 192, + 82, + 422, + 216 + ], + "type": "image", + "image_path": "ff8980fcd9c7131f7be01201d512cac63ded6038971372be41ff5b4f6462a672.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 227, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 227, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 227, + 504, + 251 + ], + "type": "text", + "content": "Figure C.9: Nearest neighbor classification accuracy on CIFAR-10 for " + }, + { + "bbox": [ + 104, + 227, + 504, + 251 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 227, + 504, + 251 + ], + "type": "text", + "content": "-SimCLR using different feature dimensions and different degrees of freedom (t_df)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": "Even though tuning the temperature of " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{InfoNCE}}" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": ", i.e., making " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " larger, can also have the effect of making the distribution less concentrated (" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " can be seen as the standard deviation), tuning temperature and tuning " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " are fundamentally different. The former is controlling how fast does the similarity " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "Q_{i,j}" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " decays as the distance between " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "z_i" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "z_j" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " increases, while the latter serves as a scaling factor, offering constant level modification of the scheme. In our experiments with SimCLR vs " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": "-SimCLR on CIFAR-10, temperature is tuned as a hyperparameter. The difference in " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " can never make up to the difference between the baseline SimCLR and " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": "-SimCLR. We found " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\tau = 0.5" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " to work better for the base SimCLR while larger " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " works better with our " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": "-SimCLR. We recommend " + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\tau = 5" + }, + { + "bbox": [ + 104, + 270, + 506, + 360 + ], + "type": "text", + "content": " as the default choice." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 371, + 246, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 371, + 246, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 246, + 384 + ], + "type": "text", + "content": "C.4 IMAGENET PRE-TRAINING" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "text", + "content": "To show the ability for large scale domain transfer and OOD generalization, we conduct experiments on ImageNet pre-training based on MoCo-v2 with its official implementation6. We follow most of their settings, e.g., data augmentation, 200 epochs pre-training, and optimization strategy, etc. The loss is modified according to Section 4.2 and batch normalization is applied along every dimension. We grid search the " + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "inline_equation", + "content": "t_{df}" + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "text", + "content": " with range " + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "inline_equation", + "content": "\\{2,5,10,15\\}" + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "inline_equation", + "content": "\\{0.2,2,5,10\\}" + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "text", + "content": " respectively. Finally we choose " + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "inline_equation", + "content": "t_{df} = 10" + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "inline_equation", + "content": "\\tau = 5" + }, + { + "bbox": [ + 104, + 392, + 504, + 471 + ], + "type": "text", + "content": " to be the optimal hyperparameters. We use this pre-train model as initialization for domain transfer and OOD experiments." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 482, + 218, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 482, + 218, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 218, + 494 + ], + "type": "text", + "content": "C.5 DOMAIN TRANSFER" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "text", + "content": "We compare MoCo-v2 pre-trained with 800 / 200 epochs and " + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "text", + "content": "-MoCo-v2 on Aircraft, Birdsnap, Caltech101, Cars, CIFAR10, CIFAR100, DTD, Pets, and SUN397 in Table C.3. We follow the transfer settings in Ericsson et al. (2021) to finetune the pre-trained models. For datasets Birdsnap, Cars, CIFAR10, CIFAR100, DTD, and SUN397, we report the top-1 accuracy metric, while for Aircraft, Caltech101, and Pets, we report the mean per-class accuracy metric. We also follow Ericsson et al. (2021) to split each dataset into training, validation, and test sets. On each dataset, we perform a hyperparameter search as follows. (1) We choose the initial learning rate according to a grid of 4 logarithmically spaced values between " + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-1}" + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "text", + "content": "; (2) We choose the weight decay parameter according to a grid of 4 logarithmically spaced values between " + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "text", + "content": ", plus no weight decay; (3) The weight decay values are divided by the learning rate; (4) For each pair of learning rate and weight decay, we finetune the pre-trained model for 5000 steps by SGD with Nesterov momentum 0.9, batch size of 64, and cosine annealing learning rate schedule without restarts. As can be seen in Table C.3, our " + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 502, + 506, + 647 + ], + "type": "text", + "content": "-MoCo-v2 with 200 epochs even outperform the baseline with 800 epochs on average." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 659, + 234, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 659, + 234, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 659, + 234, + 671 + ], + "type": "text", + "content": "C.6 OOD GENERALIZATION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 680, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 680, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 680, + 504, + 715 + ], + "type": "text", + "content": "To demonstrate the advantage of our modification, we also compare MoCo-v2 pre-trained with 800 / 200 epochs and " + }, + { + "bbox": [ + 104, + 680, + 504, + 715 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 680, + 504, + 715 + ], + "type": "text", + "content": "-MoCo-v2 on OOD generalization benchmarks: PACS Li et al. (2017), VLCS Fang et al. (2013), Office-Home Venkateswara et al. (2017). We follow the standard way to conduct the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 277, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 277, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 277, + 732 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 116, + 720, + 277, + 732 + ], + "type": "text", + "content": "https://github.com/facebookresearch/moco" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 99, + 504, + 140 + ], + "blocks": [ + { + "bbox": [ + 162, + 89, + 447, + 99 + ], + "lines": [ + { + "bbox": [ + 162, + 89, + 447, + 99 + ], + "spans": [ + { + "bbox": [ + 162, + 89, + 447, + 99 + ], + "type": "text", + "content": "Table C.3: Domain transfer results of vanilla MoCo-v2 and " + }, + { + "bbox": [ + 162, + 89, + 447, + 99 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 162, + 89, + 447, + 99 + ], + "type": "text", + "content": " -MoCo-v2." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 99, + 504, + 140 + ], + "lines": [ + { + "bbox": [ + 107, + 99, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 107, + 99, + 504, + 140 + ], + "type": "table", + "html": "
MethodAircraftBirdsnapCaltech101CarsCIFAR10CIFAR100DTDPetsSUN397Avg.
MoCo-v2 (800 epochs)83.8045.5183.0186.1896.4271.6971.7089.1155.6175.89
MoCo-v2 (200 epochs)82.7544.5383.3185.2495.8172.7571.2286.7056.0575.37
t-MoCo-v2 (200 epochs)82.7853.4686.8186.1796.0478.3269.2087.9559.3077.78
", + "image_path": "35ce9406cf55094cfb9c608b03959e530bc1dae7ffac647aefa7d7e1993bed86.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 164, + 171, + 446, + 227 + ], + "blocks": [ + { + "bbox": [ + 105, + 158, + 505, + 170 + ], + "lines": [ + { + "bbox": [ + 105, + 158, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 505, + 170 + ], + "type": "text", + "content": "Table C.4: OOD accuracies of vanilla MoCo-v2 and " + }, + { + "bbox": [ + 105, + 158, + 505, + 170 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 105, + 158, + 505, + 170 + ], + "type": "text", + "content": " -MoCo-v2 on domain generalization benchmarks." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 164, + 171, + 446, + 227 + ], + "lines": [ + { + "bbox": [ + 164, + 171, + 446, + 227 + ], + "spans": [ + { + "bbox": [ + 164, + 171, + 446, + 227 + ], + "type": "table", + "html": "
MethodPACSVLCSOffice-HomeAvg.
MoCo-v2 (800 epochs)58.969.841.656.8
MoCo-v2 (200 epochs)58.570.436.655.2
t-MoCo-v2 (200 epochs)61.375.142.159.5
", + "image_path": "695459ef30b187674671fce8f19c5c93014a1b73e32a0233ccf10e82ce5e8e09.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "type": "text", + "content": "experiments, i.e., choosing one domain as the test domain and using the remaining domains as training domains, which is named the leave-one-domain-out protocol. The top linear classifier is trained on the training domains and tested on the test domain. Each domain rotates as the test domain and the average accuracy is reported for each dataset in Table C.4. On each dataset, we perform a hyperparameter search following DomainBed Gulrajani & Lopez-Paz (2021). We adopt the leave-one-domain-out cross-validation setup in DomainBed with 10 experiments for hyperparameter selection and run 3 trials. As can be seen in Table C.4, our " + }, + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "type": "text", + "content": "-MoCo-v2 with 200 epochs even significantly outperform the baseline with 800 epochs for all of the three datasets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 365, + 297, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 297, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 297, + 376 + ], + "type": "text", + "content": "C.7 SSCL INSPIRED DATA VISUALIZATION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": "-SNE (Van der Maaten & Hinton, 2008) and its variants are designed for data visualization. However, for more complicated data, such as colored images, the results are not satisfactory. Using standard " + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": "-SNE, the 2D visualization of the 50K training images of CIFAR-10 (labels denoted as 0, 1,...,9) can be seen in Figure C.10, where different labels are hardly separated. The poor performance of " + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": "-SNE on CIFAR-10 can be traced back to the poor distance choice on images, i.e., " + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": "-norm. Inspired by the success of SSCL for natural images, " + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 385, + 504, + 464 + ], + "type": "text", + "content": "-SNE can potentially be improved by incorporating data augmentations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "text", + "content": "In light of our perspective (S1), " + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "text", + "content": "-SNE can take advantage of the distance specified with (3.2) and the resulting model is essentially our " + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "text", + "content": "-SimCLR with feature dimension 2. The visualization from " + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "text", + "content": "-SimCLR is shown in Figure C.11, which is much more separated (the nearest neighbor classification accuracy on CIFAR-10 test data is " + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "inline_equation", + "content": "56.6\\%" + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "text", + "content": "). By choosing the feature dimension to be 2, various SSCL methods can also be made into data visualizing tools. In Figure C.12, we visualize the outcome from SimCLR (the nearest neighbor classification accuracy on CIFAR-10 test data is " + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "inline_equation", + "content": "24.8\\%" + }, + { + "bbox": [ + 104, + 468, + 504, + 536 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "text", + "content": "Similar investigations have been carried in Böhm et al. (2022); Damrich et al. (2022) where they focused specifically on data visualization and stochastic neighbor embedding." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 288, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 288, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 288, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 189, + 89, + 422, + 262 + ], + "blocks": [ + { + "bbox": [ + 189, + 89, + 422, + 262 + ], + "lines": [ + { + "bbox": [ + 189, + 89, + 422, + 262 + ], + "spans": [ + { + "bbox": [ + 189, + 89, + 422, + 262 + ], + "type": "image", + "image_path": "76b0fe03ca6ed5762c61cb7bbfe91d9d76e59cf9de5ea811f23a14e8780312cb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 276, + 457, + 289 + ], + "lines": [ + { + "bbox": [ + 149, + 276, + 457, + 289 + ], + "spans": [ + { + "bbox": [ + 149, + 276, + 457, + 289 + ], + "type": "text", + "content": "Figure C.10: 50K CIFAR-10 training images visualization in 2D with " + }, + { + "bbox": [ + 149, + 276, + 457, + 289 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 149, + 276, + 457, + 289 + ], + "type": "text", + "content": "-SNE." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 189, + 309, + 422, + 480 + ], + "blocks": [ + { + "bbox": [ + 189, + 309, + 422, + 480 + ], + "lines": [ + { + "bbox": [ + 189, + 309, + 422, + 480 + ], + "spans": [ + { + "bbox": [ + 189, + 309, + 422, + 480 + ], + "type": "image", + "image_path": "a48f050056dc14a63d46ee1df2ed31108696785f0c9a933ec7c530bde5735f84.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 118, + 495, + 488, + 507 + ], + "lines": [ + { + "bbox": [ + 118, + 495, + 488, + 507 + ], + "spans": [ + { + "bbox": [ + 118, + 495, + 488, + 507 + ], + "type": "text", + "content": "Figure C.11: 50K CIFAR-10 training images visualization in 2D with the default " + }, + { + "bbox": [ + 118, + 495, + 488, + 507 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 118, + 495, + 488, + 507 + ], + "type": "text", + "content": "-SimCLR." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 189, + 528, + 422, + 696 + ], + "blocks": [ + { + "bbox": [ + 189, + 528, + 422, + 696 + ], + "lines": [ + { + "bbox": [ + 189, + 528, + 422, + 696 + ], + "spans": [ + { + "bbox": [ + 189, + 528, + 422, + 696 + ], + "type": "image", + "image_path": "6eaae204ef0543ee464baa5c129e4e1211fd5697dae541817624a41e3770b9f8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 712, + 471, + 724 + ], + "lines": [ + { + "bbox": [ + 137, + 712, + 471, + 724 + ], + "spans": [ + { + "bbox": [ + 137, + 712, + 471, + 724 + ], + "type": "text", + "content": "Figure C.12: 50K CIFAR-10 training images visualization in 2D with the SimCLR." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 290, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_content_list.json b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4b2ab83ad9a49ccada629d03bc8652da25d6e77a --- /dev/null +++ b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_content_list.json @@ -0,0 +1,4471 @@ +[ + { + "type": "text", + "text": "ZEROTH-ORDER OPTIMIZATION WITH TRAJECTORYINFORMED DERIVATIVE ESTIMATION", + "text_level": 1, + "bbox": [ + 171, + 99, + 828, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yao Shu*, Zhongxiang Dai*, Weicong Sng, Arun Verma,", + "bbox": [ + 179, + 170, + 580, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dept. of Computer Science, National University of Singapore, Republic of Singapore {shuyao, daizhongxiang, sngweicong, arun}@comp.nus.edu.sg", + "bbox": [ + 179, + 184, + 746, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Patrick Jaillet† & Bryan Kian Hsiang Low§", + "bbox": [ + 179, + 232, + 488, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dept. of Electrical Engineering and Computer Science, MIT, USA†", + "bbox": [ + 179, + 247, + 627, + 262 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dept. of Computer Science, National University of Singapore, Republic of Singapore", + "bbox": [ + 179, + 262, + 750, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "jaillet@mit.edu, lowkh@comp.nus.edu.sg", + "bbox": [ + 184, + 277, + 549, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 328, + 547, + 343 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zeroth-order (ZO) optimization, in which the derivative is unavailable, has recently succeeded in many important machine learning applications. Existing algorithms rely on finite difference (FD) methods for derivative estimation and gradient descent (GD)-based approaches for optimization. However, these algorithms suffer from query inefficiency because many additional function queries are required for derivative estimation in their every GD update, which typically hinders their deployment in real-world applications where every function query is expensive. To this end, we propose a trajectory-informed derivative estimation method which only employs the optimization trajectory (i.e., the history of function queries during optimization) and hence can eliminate the need for additional function queries to estimate a derivative. Moreover, based on our derivative estimation, we propose the technique of dynamic virtual updates, which allows us to reliably perform multiple steps of GD updates without reapplying derivative estimation. Based on these two contributions, we introduce the zeroth-order optimization with trajectory-informed derivative estimation (ZoRD) algorithm for query-efficient ZO optimization. We theoretically demonstrate that our trajectory-informed derivative estimation and our ZoRD algorithm improve over existing approaches, which is then supported by our real-world experiments such as black-box adversarial attack, non-differentiable metric optimization, and derivative-free reinforcement learning.", + "bbox": [ + 228, + 362, + 769, + 628 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 662, + 336, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zeroth-order (ZO) optimization, in which the objective function to be optimized is only accessible by querying, has received great attention in recent years due to its success in many applications, e.g., black-box adversarial attack (Ru et al., 2020), non-differentiable metric optimization (Hiranandani et al., 2021), and derivative-free reinforcement learning (Salimans et al., 2017). In these problems, the derivative of objective function is either prohibitively costly to obtain or even non-existent, making it infeasible to directly apply standard derivative-based algorithms such as gradient descent (GD). In this regard, existing works have proposed to estimate the derivative using the finite difference (FD) methods and then apply GD-based algorithms using the estimated derivative for ZO optimization (Nesterov and Spokoiny, 2017; Cheng et al., 2021). These algorithms, which we refer to as $GD$ with estimated derivatives, have been the most widely applied approach to ZO optimization especially for problems with high-dimensional input spaces, because of their theoretically guaranteed convergence and competitive practical performance. Unfortunately, these algorithms suffer from query inefficiency, which hinders their real-world deployment especially in applications with expensive-to-query objective functions, e.g., black-box adversarial attack.", + "bbox": [ + 169, + 696, + 826, + 893 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution.", + "bbox": [ + 199, + 910, + 328, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 960 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Specifically, one of the reasons for the query inefficiency of existing algorithms on GD with estimated derivatives is that in addition to the necessary queries (i.e., the query of every updated input)1, the FD methods applied in these algorithms require a large number of additional queries to accurately estimate the derivative at an input (Berahas et al., 2022). This naturally begs the question: Can we estimate a derivative without any additional query? A natural approach to achieve this is to leverage the optimization trajectory, which is inherently available as a result of the necessary queries and their observations, to predict the derivatives. However, this requires a non-trivial method to simultaneously $(a)$ predict a derivative using only the optimization trajectory (i.e., the history of updated inputs and their observations), and $(b)$ quantify the uncertainty of this prediction to avoid using inaccurate predicted derivatives. Interestingly, the Gaussian process (GP) model satisfies both requirements and is hence a natural choice for such a derivative estimation. Specifically, under the commonly used assumption that the objective function is sampled from a GP (Srinivas et al., 2010), the derivative at any input in the domain follows a Gaussian distribution which, surprisingly, can be calculated using only the optimization trajectory. This allows us to $(a)$ employ the mean of this Gaussian distribution as the estimated derivative, and $(b)$ use the covariance matrix of this Gaussian distribution to obtain a principled measure of the predictive uncertainty and the accuracy of this derivative estimation, which together constitute our trajectory-informed derivative estimation (Sec. 3.1).", + "bbox": [ + 169, + 103, + 826, + 340 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Another reason for the query inefficiency of the existing algorithms on GD with estimated derivatives is that every update in these algorithms requires reapplying derivative estimation and hence necessitates additional queries. This can preclude their adoption of a large number of GD updates since every update requires potentially expensive additional queries. Therefore, another question arises: Can we perform multiple GD updates without reapplying derivative estimation and hence without any additional query? To address this question, we propose a technique named dynamic virtual updates (Sec. 3.2). Specifically, thanks to the ability of our method to estimate the derivative at any input in the domain while only using existing optimization trajectory, we can apply multi-step GD updates without the need to reapply derivative estimation and hence without requiring any new query. Moreover, we can dynamically determine the number of steps for these updates by inspecting the aforementioned predictive uncertainty at every step, such that we only perform an update if the uncertainty is small enough (which also indicates that the estimation error is small, see Sec. 4.1).", + "bbox": [ + 169, + 345, + 826, + 513 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By incorporating our aforementioned trajectory-informed derivative estimation and dynamic virtual updates into GD-based algorithms, we then introduce the zeroth-order optimization with trajectory-informed derivative estimation (ZoRD) algorithm for query-efficient ZO optimization. We theoretically bound the estimation error of our trajectory-informed derivative estimation and show that this estimation error is non-increasing in the entire domain as the number of queries is increased and can even be exponentially decreasing in some scenarios (Sec. 4.1). Based on this, we prove the convergence of our ZoRD algorithm, which improves over the existing ZO optimization algorithms that rely on the FD methods for derivative estimation (Sec. 4.2). Lastly, we use extensive experiments, such as black-box adversarial attack, non-differentiable metric optimization, and derivative-free reinforcement learning, to demonstrate that $(a)$ our trajectory-informed derivative estimation improves over the existing FD methods and that $(b)$ our ZoRD algorithm consistently achieves improved query efficiency compared with previous ZO optimization algorithms (Sec. 5).", + "bbox": [ + 169, + 520, + 828, + 688 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 PRELIMINARIES", + "text_level": 1, + "bbox": [ + 171, + 708, + 341, + 724 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 PROBLEM SETUP", + "text_level": 1, + "bbox": [ + 171, + 739, + 333, + 753 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Throughout this paper, we use $\\nabla$ and $\\partial_{\\pmb{x}}$ to denote, respectively, the total derivative (i.e., gradient) and partial derivative w.r.t the variable $\\pmb{x}$ . We consider the minimization of a black-box objective function $f:\\mathcal{X}\\to \\mathbb{R}$ , in which $\\mathcal{X}\\subset \\mathbb{R}^d$ is a convex subset of the $d$ -dimensional domain:", + "bbox": [ + 169, + 766, + 826, + 809 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\boldsymbol {x} \\in \\mathcal {X}} f (\\boldsymbol {x}). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 459, + 816, + 823, + 840 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Since we consider ZO optimization, the derivative information is not accessible and instead, we are only allowed to query the inputs in $\\mathcal{X}$ . For every queried input $\\pmb{x} \\in \\mathcal{X}$ , we observe a corresponding noisy output of $y(\\pmb{x}) = f(\\pmb{x}) + \\zeta$ , in which $\\zeta$ is a zero-mean Gaussian noise with a variance of $\\sigma^2$ :", + "bbox": [ + 169, + 843, + 826, + 887 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "In practice, it is usually necessary to query every updated input to measure the optimization performance and select the best-performing input. We refer to these queries as necessary queries.", + "bbox": [ + 169, + 897, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Standard (Projected) GD with Estimated Derivatives" + ], + "code_body": "1: Input: Objective function $f: \\mathcal{X} \\to \\mathbb{R}$ , initialization $\\boldsymbol{x}_0$ , iteration number $T$ , learning rates $\\{\\eta_t\\}_{t=1}^T$ , projection function $\\mathcal{P}_{\\mathcal{X}}(\\boldsymbol{x})$", + "bbox": [ + 173, + 130, + 459, + 186 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "2: for iteration $t = 1,\\dots ,T$ do", + "bbox": [ + 181, + 186, + 393, + 198 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "3: $g(\\pmb{x}_{t - 1})\\approx \\nabla f(\\pmb{x}_{t - 1})$ with (2)", + "bbox": [ + 181, + 199, + 421, + 214 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "4: $\\pmb{x}_t\\gets \\mathcal{P}_{\\mathcal{X}}\\left(\\pmb{x}_{t - 1} - \\eta_{t - 1}g(\\pmb{x}_{t - 1})\\right)$", + "bbox": [ + 181, + 214, + 437, + 228 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "5: Query $\\pmb{x}_t$ to yield $y(\\pmb{x}_t)$", + "bbox": [ + 181, + 228, + 375, + 242 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "6: end for", + "bbox": [ + 181, + 242, + 254, + 253 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "7: Return arg $\\min_{\\pmb{x}_{1:T}} y(\\pmb{x})$", + "bbox": [ + 181, + 255, + 370, + 271 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2: ZORD (Ours)" + ], + "code_body": "1: Input: In addition to the parameters in Algo. 1, set the steps of virtual updates $\\{V_t\\}_{t=1}^T$", + "bbox": [ + 467, + 117, + 826, + 146 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "2: for iteration $t = 1,\\dots ,T$ do", + "bbox": [ + 468, + 146, + 679, + 157 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "3: $\\pmb{x}_{t,0} \\gets \\pmb{x}_{t-1}$", + "bbox": [ + 468, + 160, + 591, + 172 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "4: for iteration $\\tau = 1,\\dots ,V_{t}$ do", + "bbox": [ + 468, + 172, + 702, + 186 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "5: $\\pmb{x}_{t,\\tau} \\gets \\mathcal{P}_{\\mathcal{X}}(\\pmb{x}_{t,\\tau -1} - \\eta_{t,\\tau -1}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau -1}))$", + "bbox": [ + 468, + 186, + 823, + 200 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "6: end for", + "bbox": [ + 468, + 202, + 558, + 213 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "7: Query $\\pmb{x}_t = \\pmb{x}_{t,\\tau}$ to yield $y(\\pmb{x}_t)$", + "bbox": [ + 468, + 214, + 712, + 229 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "8: Update (4) using optimization trajectory", + "bbox": [ + 468, + 229, + 769, + 243 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "9: end for", + "bbox": [ + 468, + 243, + 542, + 255 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [], + "code_body": "10: Return arg min $\\mathbf{\\mu}_{\\mathbf{x}_{1:T}}y(\\mathbf{x})$", + "bbox": [ + 464, + 256, + 658, + 272 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$\\zeta \\sim \\mathcal{N}(0,\\sigma^2)$ . Besides, we adopt a common assumption on $f$ which has already been widely used in the literature of Bayesian optimization (BO) (Srinivas et al., 2010; Kandasamy et al., 2018): we assume that $f$ is sampled from a Gaussian process (GP). A GP $\\mathcal{GP}(\\mu (\\cdot),k(\\cdot ,\\cdot))$ , which is characterized by a mean function $\\mu (\\cdot)$ and a covariance function $k(\\cdot ,\\cdot)$ , is a stochastic process in which any finite subset of random variables follows a multi-variate Gaussian distribution (Rasmussen and Williams, 2006). In addition, following the common practice of GP and BO, we assume w.l.o.g. that $\\mu (\\pmb {x}) = 0$ and $k(\\pmb {x},\\pmb{x}^{\\prime})\\leq 1$ $(\\forall \\pmb {x},\\pmb{x}^{\\prime}\\in \\mathcal{X})$ . We also assume that the kernel function $k$ is differentiable, and that $\\| \\partial_z\\partial_{z'}k(z,z')|_{z = z' = x}\\| _2\\leq \\kappa^2$ , $\\forall \\pmb {x}\\in \\mathcal{X}$ for some $\\kappa >0$ . This is satisfied by most commonly used kernels such as the squared exponential (SE) kernel (Rasmussen and Williams, 2006).", + "bbox": [ + 169, + 292, + 826, + 417 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 ZO OPTIMIZATION WITH ESTIMATED DERIVATIVES", + "text_level": 1, + "bbox": [ + 171, + 434, + 571, + 448 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To solve (1), GD with estimated derivatives (e.g., Algo. 1) has been developed (Flaxman et al., 2005; Ghadimi and Lan, 2013; Nesterov and Spokoiny, 2017; Liu et al., 2018a;b). Particularly, these algorithms first estimate the derivative of $f$ (line 3 of Algo. 1) and then plug the estimated derivative into GD-based methods to obtain the next input for querying (lines 4-5 of Algo. 1). In these algorithms, the derivative is typically estimated by averaging the finite difference approximation of the directional derivatives for $f$ along certain directions, which we refer to as the finite difference (FD) method in this paper. For example, given a parameter $\\lambda$ and directions $\\{\\pmb{u}_i\\}_{i=1}^n$ , the derivative $\\nabla f$ at any $\\pmb{x} \\in \\mathcal{X}$ can be estimated by the following FD method (Berahas et al., 2022):", + "bbox": [ + 169, + 460, + 826, + 571 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla f (\\boldsymbol {x}) \\approx g (\\boldsymbol {x}) \\triangleq \\sum_ {i = 1} ^ {n} \\frac {y \\left(\\boldsymbol {x} + \\lambda \\boldsymbol {u} _ {i}\\right) - y (\\boldsymbol {x})}{\\lambda} \\boldsymbol {u} _ {i}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 577, + 823, + 616 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The directions $\\{\\pmb{u}_i\\}_{i=1}^n$ are usually sampled from the standard Gaussian distribution (Nesterov and Spokoiny, 2017) or uniformly from the unit sphere (Flaxman et al., 2005), or set as the standard basis vectors with 1 at one of its coordinates and 0 otherwise (Lian et al., 2016). As mentioned before, existing FD methods typically require many additional queries (i.e., $\\{\\pmb{x} + \\lambda \\pmb{u}_i\\}_{i=1}^n$ ) to achieve an accurate derivative estimation in every iteration of Algo. 1 (Berahas et al., 2022), making existing ZO optimization algorithms (Flaxman et al., 2005; Nesterov and Spokoiny, 2017) query-inefficient.", + "bbox": [ + 169, + 616, + 826, + 702 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 ZO OPTIMIZATION VIA TRAJECTORY-INFORMED DERIVATIVE ESTIMATION", + "text_level": 1, + "bbox": [ + 171, + 720, + 823, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To improve existing GD with estimated derivatives (e.g., Algo. 1), we propose the ZoRD algorithm (Algo. 2), which achieves more query-efficient ZO optimization thanks to our two major contributions. Firstly, we propose a derived GP-based derivative estimation method which only uses the optimization trajectory and consequently does not require any additional query for derivative estimation (Sec. 3.1). Secondly, thanks to the ability of our method to estimate the derivative at any input in the domain without any additional query and to measure the estimation error in a principled way, we develop the technique of dynamic virtual updates to further improve the query efficiency of our ZoRD (Sec. 3.2).", + "bbox": [ + 169, + 751, + 826, + 849 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 TRAJECTORY-INFORMED DERIVATIVE ESTIMATION", + "text_level": 1, + "bbox": [ + 171, + 859, + 571, + 875 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To begin with, if a function $f$ follows a GP, then its derivative $\\nabla f$ also follows a GP (Rasmussen and Williams, 2006). This is formalized by our Lemma 1 below (proof in Appx. B.1), which then provides us a principled way to estimate the derivative at any input in the domain.", + "bbox": [ + 169, + 885, + 823, + 929 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Lemma 1 (Derived GP for Derivatives). If a function $f$ follows a $GP$ : $f \\sim \\mathcal{GP}\\left(\\mu (\\cdot),\\sigma^2 (\\cdot ,\\cdot)\\right)$ , then", + "bbox": [ + 169, + 102, + 823, + 119 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla f \\sim \\mathcal {G P} (\\nabla \\mu (\\cdot), \\partial \\sigma^ {2} (\\cdot , \\cdot))\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 126, + 596, + 143 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\partial \\sigma^2 (\\cdot ,\\cdot)$ denotes the cross partial derivative w.r.t the first and second arguments of $\\sigma^2 (\\cdot ,\\cdot)$ .", + "bbox": [ + 169, + 147, + 813, + 165 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$f$ Follows the Posterior GP. As discussed in Sec. 2.1, we assume that $f \\sim \\mathcal{GP}(\\mu(\\cdot), k(\\cdot, \\cdot))$ . So, in every iteration $t$ of our Algo. 2, conditioned on the current optimization trajectory $\\mathcal{D}_{t-1} \\triangleq \\{(x_{\\tau}, y_{\\tau})\\}_{\\tau=1}^{t-1}$ , $f$ follows the posterior GP: $f \\sim \\mathcal{GP}(\\mu_{t-1}(\\cdot), \\sigma_{t-1}^2(\\cdot, \\cdot))$ with the mean function $\\mu_{t-1}(\\cdot)$ and the covariance function $\\sigma_{t-1}^2(\\cdot, \\cdot)$ defined as below (Rasmussen and Williams, 2006):", + "bbox": [ + 169, + 175, + 826, + 239 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {t - 1} (\\boldsymbol {x}) \\triangleq \\boldsymbol {k} _ {t - 1} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {y} _ {t - 1}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 247, + 602, + 267 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {t - 1} ^ {2} \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) \\triangleq k \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {k} _ {t - 1} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {k} _ {t - 1} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 262, + 823, + 289 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\pmb{y}_{t-1}^{\\top} \\triangleq [y_{\\tau}]_{\\tau=1}^{t-1}$ and $\\pmb{k}_{t-1}(\\pmb{x})^{\\top} \\triangleq [k(\\pmb{x}, \\pmb{x}_{\\tau})]_{\\tau=1}^{t-1}$ are $(t-1)$ -dimensional row vectors, and $\\mathbf{K}_{t-1} \\triangleq [k(\\pmb{x}_{\\tau}, \\pmb{x}_{\\tau'})]_{\\tau, \\tau'=1}^{t-1}$ is a $(t-1) \\times (t-1)$ -dimensional matrix. Define $\\sigma_{t-1}^{2}(\\pmb{x}) \\triangleq \\sigma_{t-1}^{2}(\\pmb{x}, \\pmb{x})$ , the posterior distribution at $\\pmb{x}$ is Gaussian with mean $\\mu_{t-1}(\\pmb{x})$ and variance $\\sigma_{t-1}^{2}(\\pmb{x})$ .", + "bbox": [ + 169, + 296, + 826, + 349 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\nabla f$ Follows the Derived GP for Derivatives. Substituting (3) into Lemma 1, we have that", + "bbox": [ + 169, + 362, + 782, + 378 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla f \\sim \\mathcal {G P} \\left(\\nabla \\mu_ {t - 1} (\\cdot), \\partial \\sigma_ {t - 1} ^ {2} (\\cdot , \\cdot)\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 383, + 823, + 401 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "in which the mean $\\nabla \\mu_{t - 1}(\\pmb {x})$ at $\\pmb{x}$ and the covariance $\\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb{x}^\\prime)$ at $\\pmb {x},\\pmb{x}^{\\prime}$ are", + "bbox": [ + 169, + 407, + 696, + 424 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla \\mu_ {t - 1} (\\boldsymbol {x}) \\triangleq \\partial_ {\\boldsymbol {z}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {y} _ {t - 1} | _ {\\boldsymbol {z} = \\boldsymbol {x}},\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 431, + 581, + 452 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\triangleq \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} k (\\boldsymbol {z}, \\boldsymbol {z} ^ {\\prime}) - \\partial_ {\\boldsymbol {z}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z} ^ {\\prime}) \\right| _ {\\boldsymbol {z} = \\boldsymbol {x}, \\boldsymbol {z} ^ {\\prime} = \\boldsymbol {x} ^ {\\prime}}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 446, + 823, + 477 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "in which $\\partial_{\\pmb{z}}\\pmb{k}_{t - 1}(\\pmb {z})\\triangleq [\\partial_{\\pmb{z}}k(\\pmb {z},\\pmb{x}_{\\tau})]_{\\tau = 1}^{t - 1}$ is a $(t - 1)\\times d$ -dimensional matrix and $\\partial_{\\pmb{z}}\\partial_{\\pmb{z}^{\\prime}}k(\\pmb {z},\\pmb{z}^{\\prime})$ is a $d\\times d$ -dimensional matrix. Therefore, $\\nabla \\mu_{t - 1}(\\pmb {x})$ is a $d$ -dimensional vector and $\\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb{x}^\\prime)$ is a $d\\times d$ -dimensional matrix. We refer to this GP (4) followed by $\\nabla f$ as the derived GP for derivatives.", + "bbox": [ + 169, + 484, + 826, + 529 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "So, define $\\partial \\sigma_{t - 1}^2 (\\pmb {x})\\triangleq \\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb {x})$ , we have that for any input $\\pmb {x}\\in \\mathcal{X}$ , the derivative $\\nabla f(\\pmb {x})$ at $\\pmb{x}$ follows a $d$ -dimensional Gaussian distribution: $\\nabla f(\\pmb {x})\\sim \\mathcal{N}(\\nabla \\mu_{t - 1}(\\pmb {x}),\\partial \\sigma_{t - 1}^2 (\\pmb {x}))$ . This allows us to (a) estimate the derivative $\\nabla f(\\pmb {x})$ at any input $\\pmb {x}\\in \\mathcal{X}$ using the posterior mean $\\nabla \\mu_{t - 1}(\\pmb {x})$ of the derived GP for derivatives (4):", + "bbox": [ + 169, + 536, + 825, + 593 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla f (\\boldsymbol {x}) \\approx \\nabla \\mu_ {t - 1} (\\boldsymbol {x}), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 594, + 823, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and $(b)$ employ the posterior covariance matrix $\\partial \\sigma_{t - 1}^2 (\\pmb {x})$ to obtain a principled measure of the uncertainty for this derivative estimation, which together constitute our novel derivative estimation. Remarkably, our derivative estimation only makes use of the naturally available optimization trajectory $\\mathcal{D}_{t - 1}$ and does not need any additional query, which is in stark contrast to the existing FD methods (e.g., (2)) that require many additional queries for their derivative estimation. Moreover, our principled measure of uncertainty allows us to perform dynamic virtual updates (Sec. 3.2) and theoretically guarantee the quality of our derivative estimation (Sec. 4.1).", + "bbox": [ + 169, + 612, + 826, + 710 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 DYNAMIC VIRTUAL UPDATES", + "text_level": 1, + "bbox": [ + 171, + 727, + 416, + 739 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that our derived GP-based derivative estimation (6) can estimate the derivative at any input $\\pmb{x}$ within the domain. As a result, in every iteration $t$ of our ZoRD algorithm, for a step $\\tau \\geq 1$ , after performing a GD update using the estimated derivative at $\\pmb{x}_{t,\\tau -1}$ (i.e., $\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau -1})$ ) to reach the input $\\pmb{x}_{t,\\tau}$ (line 5 of Algo. 2), we can again estimate the derivative at $\\pmb{x}_{t,\\tau}$ (i.e., $\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau})$ ) and then perform another GD update to reach $\\pmb{x}_{t,\\tau +1}$ without requiring any additional query. This process can be repeated for multiple steps, and can further improve the query efficiency of our ZoRD. Formally, given the projection function $\\mathcal{P}_{\\chi}(\\pmb {x})\\triangleq \\arg \\min_{\\pmb {z}\\in \\chi}\\| \\pmb {x} - \\pmb {z}\\| _2^2 /2$ and learning rates $\\{\\eta_{t,\\tau}\\}_{\\tau = 0}^{V_t - 1}$ , we perform the following virtual updates for $V_{t}$ steps (lines 4-6 of Algo. 2):", + "bbox": [ + 169, + 752, + 826, + 867 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {t, \\tau} = \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau - 1} - \\eta_ {t, \\tau - 1} \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau - 1}\\right)\\right) \\quad \\forall \\tau = 1, \\dots , V _ {t} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 873, + 823, + 890 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and then choose the last $\\pmb{x}_{t,V_t}$ to query (i.e., line 7 of Algo. 2). Importantly, these multi-step virtual GD updates are only feasible in our ZoRD (Algo. 2) because our derivative estimator (6) does not", + "bbox": [ + 169, + 895, + 825, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "require any new query in all these steps, whereas the existing FD methods require additional queries to estimate the derivative in every step.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The number of steps for our virtual updates (i.e., $V_{t}$ ) induces an intriguing trade-off: An overly small $V_{t}$ may not be able to fully exploit the benefit of our derivative estimation (6) which is free from the requirement for additional queries, yet an excessively large $V_{t}$ may lead to the usage of inaccurate derivative estimations which can hurt the performance (validated in Appx. D.2). Remarkably, (4) allows us to dynamically choose $V_{t}$ by inspecting our principled measure of the predictive uncertainty (i.e., $\\partial \\sigma_{t-1}^2(\\boldsymbol{x})$ ) for every derivative estimation. Specifically, after reaching the input $\\boldsymbol{x}_{t,\\tau}$ , we continue the virtual updates (to reach $\\boldsymbol{x}_{t,\\tau+1}$ ) if our predictive uncertainty is small, i.e., if $\\left\\| \\partial \\sigma_{t-1}^2(\\boldsymbol{x}_{t,\\tau}) \\right\\|_2 \\leq c$ where $c$ is a confidence threshold; otherwise, we terminate the virtual updates and let $V_{t} = \\tau$ since the derivative estimation at $\\boldsymbol{x}_{t,\\tau}$ is likely unreliable.", + "bbox": [ + 169, + 138, + 826, + 268 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 THEORETICAL ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 282, + 418, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 DERIVATIVE ESTIMATION ERROR", + "text_level": 1, + "bbox": [ + 171, + 306, + 447, + 321 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To begin with, we derive a theoretical guarantee on the error of our derivative estimation at any $\\pmb{x}$ .", + "bbox": [ + 169, + 333, + 813, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theorem 1 (Derivative Estimation Error). Let $\\delta \\in (0,1)$ and $\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(1 / \\delta)}$ . For any $\\pmb{x} \\in \\mathcal{X}$ and any $t \\geq 1$ , the following holds with probability of at least $1 - \\delta$ ,", + "bbox": [ + 169, + 354, + 823, + 391 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\beta \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 398, + 635, + 424 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Thm. 1 (proof in Appx. B.2) has presented an upper bound on the error of our derivative estimation (6) at any $\\pmb{x} \\in \\mathcal{X}$ in terms of $\\sqrt{\\|\\partial\\sigma_t^2(\\pmb{x})\\|_2}$ , which is a measure of the uncertainty about our derivative estimation at $\\pmb{x}$ (Sec. 3.1). This hence implies that the threshold $c$ applied to our predictive uncertainty $\\left\\| \\partial \\sigma_t^2 (\\pmb {x})\\right\\| _2$ (Sec. 3.2) also ensures that the derivative estimation error is small during our dynamic virtual updates. Next, we show in the following theorem (proof in Appx. B.3) that our upper bound on the estimation error from Thm. 1 is non-increasing as the number of function queries is increased.", + "bbox": [ + 169, + 425, + 823, + 511 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theorem 2 (Non-Increasing Error). For any $\\pmb{x} \\in \\mathcal{X}$ and any $t \\geq 1$ , we have that", + "bbox": [ + 171, + 515, + 705, + 530 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 535, + 599, + 555 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Let $\\delta \\in (0,1)$ . Define $r \\triangleq \\max_{\\boldsymbol{x} \\in \\mathcal{X}, t \\geq 1} \\sqrt{\\|\\partial \\sigma_t^2(\\boldsymbol{x})\\|_2 / \\left\\|\\partial \\sigma_{t-1}^2(\\boldsymbol{x})\\right\\|_2}$ , given the $\\beta$ in Thm. 1, we then have that $r \\in [1/\\sqrt{1 + 1/\\sigma^2}, 1]$ , and that with a probability of at least $1 - \\delta$ ,", + "bbox": [ + 169, + 563, + 823, + 604 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\beta \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\leq \\kappa \\beta r ^ {t}.\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 613, + 663, + 638 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Thm. 2 shows that our upper bound on the derivative estimation error (i.e., $\\beta \\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}$ from Thm. 1) is guaranteed to be non-increasing in the entire domain as the number of function queries is increased. Moreover, in some situations (i.e., when $r < 1$ ), our upper bound on the estimation error is even exponentially decreasing. Of note, $r$ characterizes how fast the uncertainty about our derivative estimation (measured by $\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}$ ) is reduced across the domain. Since GD-based algorithms usually perform a local search in a neighborhood (especially for the problems with high-dimensional input spaces), all the inputs within the local region are expected to be close to each other (measured by the kernel function $k$ ). Moreover, as the objective function is usually smooth in the local region (i.e., its derivatives are continuous), reducing the uncertainty of the derivative at an input $\\boldsymbol{x}_t$ (i.e., by querying $\\boldsymbol{x}_t$ ) is also expected to decrease the uncertainty of the derivatives at the other inputs in the same local region (i.e., decrease $\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}$ ). So, $r < 1$ is expected to be a reasonable condition that can be satisfied in practice. This will also be corroborated by our empirical results (e.g., Figs. 1 and 2), which demonstrates that the error of our derivative estimation (6) is indeed reduced very fast.", + "bbox": [ + 169, + 643, + 826, + 829 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our GP-based Method (6) vs. Existing FD Methods. Our derivative estimation method based on the derived GP (6) is superior to the traditional FD methods (e.g., (2)) in a number of major aspects. (a) Our derivative estimation error can be exponentially decreasing in some situations (i.e., when $r < 1$ in Thm. 2), which is unachievable for the existing FD methods since they can only", + "bbox": [ + 169, + 843, + 826, + 902 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2The first step of GD update to reach $x_{t,1}$ is always performed, i.e., $V_{t}\\geq 1$", + "bbox": [ + 189, + 909, + 643, + 924 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "attain a polynomial rate of reduction (Berahas et al., 2022). $(b)$ Our method (6) does not need any additional query to estimate the derivative (but only requires the optimization trajectory), whereas the existing FD methods require additional queries for every derivative estimation. $(c)$ Our method (6) is equipped with a principled measure of the predictive uncertainty and hence the estimation error for derivative estimation (i.e., via $\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}$ , Thm. 1), which is typically unavailable for the existing FD methods. $(d)$ Our method (6), unlike the existing FD methods, makes it possible to apply the technique of dynamic virtual updates (Sec. 3.2) thanks to its capability of estimating the derivative at any input in the domain without requiring any additional query and measuring the estimation error in a principled way (Thm. 1).", + "bbox": [ + 169, + 103, + 826, + 232 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 CONVERGENCE ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 248, + 398, + 262 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To analyze the convergence of our ZoRD, besides our main assumption that $f$ is sampled from a GP (Sec. 2.1), we assume that $f$ is $L_{c}$ -Lipchitz continuous for $L_{c} > 0$ . This is a mild assumption since it has been shown that a function $f$ sampled from a GP is Lipchitz continuous with high probability for commonly used kernels, e.g., the SE kernel and Matérn kernel with $\\nu > 2$ (Srinivas et al., 2010). We also assume that $f$ is $L_{s}$ -Lipchitz smooth, which is commonly adopted in the analysis GD-based algorithms (J Reddi et al., 2016). We aim to prove the convergence of our ZoRD for nonconvex $f$ by analyzing how fast it converges to a stationary point (Ghadimi and Lan, 2013; Liu et al., 2018a). Specifically, we follow the common practice of previous works (J Reddi et al., 2016; Liu et al., 2018b) to analyze the following derivative mapping:", + "bbox": [ + 169, + 273, + 826, + 398 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nG _ {t, \\tau} \\triangleq \\left(\\boldsymbol {x} _ {t, \\tau} - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right)\\right)\\right) / \\eta_ {t, \\tau}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 405, + 823, + 424 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The convergence of our ZoRD is formally guaranteed by Thm. 3 below (proof in Appx. B.4).", + "bbox": [ + 169, + 428, + 787, + 444 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3 (Convergence of ZORD). Let $\\delta \\in (0,1)$ . Suppose our ZORD (Algo. 2) is run with $V_{t} = V$ and $\\eta_{t,\\tau} = \\eta \\leq 1 / L_{s}$ for any $t$ and $\\tau$ . Then with probability of at least $1 - \\delta$ , when $r < 1$ ,", + "bbox": [ + 169, + 446, + 823, + 476 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {t \\leq T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\underbrace {\\frac {2 [ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} ^ {*}) ] / \\eta}{T V}} _ {①} + \\underbrace {\\frac {2 \\alpha^ {2} r ^ {2}}{T (1 - r ^ {2})} + \\frac {(2 L _ {c} + 1 / \\eta) \\alpha r}{T (1 - r)}} _ {②}\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 483, + 756, + 542 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\alpha \\triangleq \\kappa \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(VT / \\delta)}$ . When $r = 1$ , we instead have (2) = 2α² + (2Lc + 1/η)α. In the upper bound of Thm. 3, the term (1) represents the convergence rate of (projected) GD when the true derivative is used and it asymptotically goes to 0 as $T$ increases; the term (2) corresponds to the impact of the error of our derivative estimation (6) on the convergence. In situations where $r < 1$ which is a reasonably achievable condition as we have discussed in Sec. 4.1, the term (2) will also asymptotically approach 0. This, remarkably, suggests that the impact of the derivative estimation error on the convergence vanishes asymptotically and our ZoRD algorithm is guaranteed to converge to a stationary point (i.e., $\\min_{t \\leq T} \\frac{1}{V} \\sum_{\\tau=0}^{V-1} \\|G_{t,\\tau}\\|_2^2$ approaches 0) at the rate of $\\mathcal{O}(1/T)$ when $r < 1$ . This is unattainable by existing ZO optimization algorithms using FD-based derivative estimation (Nesterov and Spokoiny, 2017; Liu et al., 2018b), because these methods typically converge to a stationary point at the rate of $\\mathcal{O}(1/T + \\text{const.})$ with a constant learning rate. Even when $r = 1$ where the term (2) becomes a constant independent of $T$ , our Thm. 3 is still superior to the convergence of these existing works because our result (Thm. 3) is based on the worst-case analysis whereas these works are typically based on the average-case analysis, i.e., their results only hold in expectation over the randomly sampled directions for derivative estimation. This means that their convergence may become even worse when inappropriate directions are used, e.g., directions that are nearly orthogonal to the true derivative which commonly happens in high-dimensional input spaces. In addition, given a fixed $T$ , our ZoRD enjoys a query complexity (i.e., the number of queries in $T$ iterations) of $\\mathcal{O}(T)$ , which significantly improves over the $\\mathcal{O}(nT)$ of the existing works based on FD ( $n$ in Sec. 2.2).", + "bbox": [ + 169, + 544, + 826, + 821 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The impacts of the number of steps of our virtual updates (i.e., $V$ ) are partially reflected in Thm. 3. Specifically, a larger $V$ improves the reduction rate of the term ① because a larger number of virtual GD updates (without requiring additional queries) will be applied in our ZoRD algorithm. This is also unachievable by existing ZO optimization algorithms using FD-based derivative estimation since they require additional queries for the derivative estimation in their every GD update. Meanwhile, a larger $V$ may also negatively impact the performance of our ZoRD since it may lead to the use of those estimated derivatives with large estimation errors (Sec. 3.2). However, this negative impact has", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2fb0671b56b5797630d6a747ef32a38713d82e3ec978e690ea6388a01f7f8fe4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 194, + 92, + 356, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/30e7e6b88ff343420916ce16025174b87f742f824a39b86d0c53d0bc912081d7.jpg", + "image_caption": [ + "- Function Queries -- $\\nabla f$ -- $\\nabla \\mu$" + ], + "image_footnote": [], + "bbox": [ + 356, + 92, + 504, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7435b5574d5e73ba4302e5159f8e4d4c40cd50ab8410143f82709bf339b93475.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 92, + 651, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b9e9b583f5ab93b49af9a61a3c090efdf457efc11a3341914f06dcf3b58a0351.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 93, + 800, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6c2df39df5bec678e47ecb1a994d5789a085a21360b455c265d7926be1bf4bb1.jpg", + "image_caption": [ + "Figure 1: Our derived GP for derivative estimation (4) with different number $n$ of queries. Green curve and its confidence interval denote the mean $\\nabla \\mu(\\boldsymbol{x})$ and standard deviation of the derived GP.", + "(a)" + ], + "image_footnote": [], + "bbox": [ + 173, + 244, + 325, + 340 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/52ac6711c387df65723539614fe94e1ec462fd42ab99dda8830767c808e6ada4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 325, + 246, + 488, + 340 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/69252a7f35ad0dda395bd119dead41878d095393545b25e807d1f6014526d1a0.jpg", + "image_caption": [ + "Figure 2: Comparison of the derivative estimation errors of our derived GP-based estimator (6) (GP) and the FD estimator, measured by cosine similarity (larger is better) and Euclidean distance (smaller is better). Each curve is the mean $\\pm$ standard error from five independent runs." + ], + "image_footnote": [], + "bbox": [ + 500, + 246, + 647, + 340 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c7e19456afed38e28affab77c25dfb1abd43b71de42fbbaa5db79bff7ffb5055.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 647, + 246, + 816, + 340 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "only been implicitly accounted for by the term ② because this term comes from our Thm. 2, which is based on a worst-case analysis and gives a uniform upper bound on the derivative estimation error for all inputs in the domain $\\mathcal{X}$ .", + "bbox": [ + 169, + 397, + 823, + 440 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 460, + 326, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we firstly empirically verify the efficacy of our derived GP-based derivative estimator (6) in Sec. 5.1, and then demonstrate that our ZoRD outperforms existing baseline methods for ZO optimization using synthetic experiments (Sec. 5.2) and real-world experiments (Secs. 5.3, 5.4).", + "bbox": [ + 169, + 492, + 823, + 536 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1 DERIVATIVE ESTIMATION", + "text_level": 1, + "bbox": [ + 171, + 551, + 390, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Here we investigate the efficacy of our derivative estimator (6) based on the derived GP for derivatives (4). Specifically, we sample a function $f$ (defined on a one-dimensional domain) from a GP using the SE kernel, and then use a set of randomly selected inputs as well as their noisy observations (as optimization trajectory) to calculate our derived GP for derivatives. The results (Fig. 1) illustrate a number of interesting insights. Firstly, in regions where (even only a few) function queries are performed (e.g., in the region of $[-3,0]$ ), our estimated derivative (i.e., $\\nabla \\mu_{t-1}(\\pmb{x})$ ) generally aligns with the groundtruth derivative (i.e., $\\nabla f(\\pmb{x})$ ) and our estimation uncertainty (i.e., characterized by $\\sqrt{\\|\\partial \\sigma_{t-1}^2(\\pmb{x})\\|_2}$ ) shrinks compared with other un-queried regions. These results hence demonstrate that our (4) is able to accurately estimate derivatives and reliably quantify the uncertainty of these estimations within the regions where function queries are performed. Secondly, as more input queries are collected (i.e., from left to right in Fig. 1), the uncertainty $\\sqrt{\\|\\partial \\sigma_{t-1}^2(\\pmb{x})\\|_2}$ in the entire domain is decreased in general. This provides an empirical justification for our Thm. 2 which guarantees non-increasing uncertainty and hence non-increasing estimation error. Lastly, note that with only 12 queries (rightmost figure), our derivative estimator is already able to accurately estimate the derivative in the entire domain, which represents a remarkable reduction rate of our derivative estimation error.", + "bbox": [ + 169, + 577, + 826, + 806 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next, we compare our derivative estimator (6) with the FD estimator (Sec. 2.2). Specifically, using the Ackley function with $d = 10$ (see Appx. C.2), we firstly select an input $x_0$ and then follow the FD method (2) to randomly sample $n$ directions $\\{\\pmb{u}_i\\}_{i=1}^n$ from the standard Gaussian distribution, to construct input queries $\\{\\pmb{x}_0 + \\lambda \\pmb{u}_i\\}_{i=1}^n$ (see Sec. 2.2). Next, these queries and their observations are $(a)$ used as the optimization trajectory to apply our derivative estimator (6), and $(b)$ used by the FD method to estimate the derivative following (2). The results are shown in Fig. 2a (for two different values of $\\lambda$ ), in which for both our derived GP-based estimator (6) and the FD estimator, we measure the cosine similarity (larger is better) and Euclidean distance (smaller is better) between the estimated", + "bbox": [ + 169, + 811, + 823, + 924 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fd31e857fdfda29a70526f3750dfdd07613981fb14662922e4a124caef4bb53c.jpg", + "image_caption": [ + "(a) Ackley $(d = 20)$" + ], + "image_footnote": [], + "bbox": [ + 171, + 93, + 326, + 198 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e73667fdf505e857111dadc0fd07c848d1490dc290fa522ca60cff5936a7b642.jpg", + "image_caption": [ + "(b) Ackley $(d = 40)$" + ], + "image_footnote": [], + "bbox": [ + 333, + 94, + 488, + 198 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0100551861b401ee2614ff43292c7d06a39a06f8256052db091e7693ba9297fc.jpg", + "image_caption": [ + "(c) Levy $(d = 40)$", + "Figure 3: Optimization of Ackley and Levy functions with different dimensions. The $x$ -axis and $y$ -axis denote the number of queries and log-scaled optimality gap (i.e., $\\log(f(\\boldsymbol{x}_T) - f(\\boldsymbol{x}^*))$ ) achieved after this number of queries. Each curve is the mean $\\pm$ standard error from ten independent runs." + ], + "image_footnote": [], + "bbox": [ + 493, + 93, + 656, + 198 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bc6269a99ec9934f4c72c89db8b20f284c5afd4e9a5baa679820e4ee9bd17b61.jpg", + "image_caption": [ + "(d) Levy $(d = 100)$" + ], + "image_footnote": [], + "bbox": [ + 661, + 93, + 823, + 198 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/fc483bd8b7a527213cfa336ac611ae4d2bb700bc68d2ce47e872ecb19d059d21.jpg", + "table_caption": [ + "Table 1: Comparison of the number of required queries to achieve a successful black-box adversarial attack. Every entry represents mean ± standard deviation from five independent runs." + ], + "table_footnote": [], + "table_body": "
DatasetMetricGLDRGFPRGFTuRBO-1TuRBO-10ZoRD
MNIST# Queries1780±2221192±2601236±145654±70747±60248±50
Speedup7.2×4.8×5.0×2.6×3.0×1.0×
CIFAR-10# Queries964±1753622±11554133±1525638±108708±105384±59
Speedup2.5×9.4×10.8×1.7×1.8×1.0×
", + "bbox": [ + 176, + 309, + 823, + 392 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "derivative and the true derivative at $x_0$ . The figures show that our derivative estimation error enjoys a faster rate of reduction compared with the FD method, which corroborates our theoretical insights from Thm. 2 (Sec. 4.1) positing that our estimation error can be rapidly decreasing. Subsequently, to further highlight our advantage of being able to exploit the optimization trajectory and hence to eliminate the need for additional function queries (Sec. 4.1), we perform another comparison where our derived GP-based estimator (6) only utilizes 20 queries from the optimization trajectory (sampled using the same method above) for derivative estimation. The results (Fig. 2b) show that even with only these 20 queries (without any additional function query), our derivative estimator (6) achieves comparable or better estimation errors than FD using as many as 80 additional queries. Overall, the results in Fig. 2 have provided empirical supports for the superiority of our derived GP-based derivative estimation (6), which substantiates our theoretical justifications in Sec. 4.1.", + "bbox": [ + 169, + 405, + 826, + 556 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 SYNTHETIC EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 575, + 401, + 589 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Here we adopt the widely use Ackley and Levy functions with various dimensions (Eriksson et al., 2019) to show the superiority of our ZoRD. We compare ZoRD with a number of representative baselines for ZO optimization, e.g., RGF (Nesterov and Spokoiny, 2017) which uses FD for derivative estimation, PRGF (Cheng et al., 2021) which is a recent extension of RGF, GLD (Golovin et al., 2020) which is a recent ZO optimization algorithm based on direct search, and TuRBO (Eriksson et al., 2019) which is a highly performant Bayesian optimization (BO) algorithm. We also evaluate the performance of a first-order optimization algorithm, i.e., GD with true derivatives. More details are in Appx. C.2. The results are shown in Fig. 3, where ZoRD outperforms all other ZO optimization algorithms. Particularly, ZoRD considerably outperforms both RGF and PRGF, which can be attributed to our two major contributions. Firstly, our derivative estimator (6) used by ZoRD is more accurate and more query-efficient than the FD method adopted by RGF and PRGF, as theoretically justified in Sec. 4.1 and empirically demonstrated in Sec. 5.1. Secondly, our dynamic virtual updates (Sec. 3.2) can perform multi-step GD updates without requiring any additional query, which further improves the performance of ZoRD (validated in Appx. D.2). Moreover, ZoRD is the only ZO optimization algorithm that is able to converge to a comparable final performance to that of the GD with true derivatives in every figure of Fig. 3.", + "bbox": [ + 169, + 601, + 826, + 825 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 BLACK-BOX ADVERSARIAL ATTACK", + "text_level": 1, + "bbox": [ + 171, + 840, + 470, + 854 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We further compare our ZoRD with other ZO optimization algorithms in the problem of black-box adversarial attack on images, which is one of the most important applications of ZO optimization in recent years. In black-box adversarial attack (Ru et al., 2020), given a fully trained ML model and an image $z$ , we intend to find (through only function queries) a small perturbation $x$ to be added to $z$", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5e2cc4a270655bf0cc561718d01579082a6b4c660c8b305cab3b961c4172a6f2.jpg", + "image_caption": [ + "(a) Precision" + ], + "image_footnote": [], + "bbox": [ + 171, + 97, + 330, + 204 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/cec0e66aa1591dcb2dfa5ffa83037b4fda9859d4f41334f3cdbba10d9b0748cb.jpg", + "image_caption": [ + "(b) Recall" + ], + "image_footnote": [], + "bbox": [ + 336, + 98, + 493, + 203 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/7585b17454414486877252370863dfb40ad78f6c8e978f2e901729a5fdfe2ad1.jpg", + "image_caption": [ + "(c) F1 Score", + "Figure 4: Optimization of different non-differentiable metrics on the Covertype dataset. The $x$ -axis and $y$ -axis denote, respectively, the number of queries and the improvement on the non-differentiable metric. Each curve is the mean $\\pm$ standard error from five independent experiments." + ], + "image_footnote": [], + "bbox": [ + 501, + 97, + 653, + 203 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b96d17acceccde711d5262da4219c0fa5ed142b795b19050eb3003c174935d16.jpg", + "image_caption": [ + "(d) Jaccard index" + ], + "image_footnote": [], + "bbox": [ + 661, + 98, + 821, + 203 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "such that the perturbed image $z + x$ will be incorrectly classified by the ML model. Following the practice from (Cheng et al., 2021), we randomly select an image from MNIST (Lecun et al., 1998) ( $d = 28 \\times 28$ ) or CIFAR-10 (Krizhevsky et al., 2009) ( $d = 32 \\times 32$ ), and aim to add a perturbation with an $L_{\\infty}$ constraint to make a trained deep neural network misclassify the image (more details in Appx. C.3). Tab. 1 summarizes the number of required queries to achieve a successful attack by different algorithms (see results on multiple images in Appx. D.3). The results show that in such high-dimensional ZO optimization problems, our ZoRD again significantly outperforms the other algorithms since it requires a considerably smaller number of queries to achieve a successful attack. Particularly, our ZoRD is substantially more query-efficient than RGF and PRGF which rely on the FD methods for derivative estimation, e.g., for CIFAR-10, the number of queries required by RGF and PRGF are $9.4 \\times 10.8 \\times 10$ of that required by ZoRD. This further verifies the advantages of our trajectory-informed derivative estimation (as justified theoretically in Sec. 4.1 and empirically in Sec. 5.1) and dynamic virtual updates (as demonstrated in Appx. D.2). Remarkably, our ZoRD also outperforms BO (i.e., TuRBO-1/10 which correspond to two versions of the TuRBO algorithm (Eriksson et al., 2019)) which has been widely shown to be query-efficient in black-box adversarial attack (Ru et al., 2020). Overall, these results showcase the ability of our ZoRD to advance the other ZO optimization algorithms in challenging real-world ZO optimization problems.", + "bbox": [ + 169, + 287, + 826, + 525 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.4 NON-DIFFERENTIABLE METRIC OPTIMIZATION", + "text_level": 1, + "bbox": [ + 171, + 540, + 542, + 554 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Non-differentiable metric optimization (Hiranandani et al., 2021; Huang et al., 2021), which has received a surging interest recently, can also be cast as a ZO optimization problem. We therefore use it to further demonstrate the superiority of our ZoRD to other ZO optimization algorithms. Specifically, we firstly train a multilayer perceptron (MLP) $(d = 2189)$ on the Covertype (Dua and Graff, 2017) dataset with the cross-entropy loss function. Then, we use the same dataset to fine-tune this MLP model by exploiting ZO optimization algorithms to optimize a non-differentiable metric, such as precision, recall, F1 score and Jaccard index (see more details in Appx. C.4). Here we additionally compare with the evolutionary strategy (ES) which has been previously applied for non-differentiable metric optimization (Huang et al., 2021). Fig. 4 illustrates the percentage improvements achieved by different algorithms during the fine-tuning process (i.e., $(f(\\pmb{x}_0) - f(\\pmb{x}_T)) \\times 100\\% / f(\\pmb{x}_0)$ ). The results show that our ZoRD again consistently outperforms the other ZO optimization algorithms in terms of both the query efficiency and the final converged performance. These results therefore further substantiate the superiority of ZoRD in optimizing high-dimensional non-differentiable functions.", + "bbox": [ + 169, + 569, + 823, + 750 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 777, + 318, + 791 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We have introduced the ZoRD algorithm, which achieves query-efficient ZO optimization through two major contributions. Firstly, we have proposed a novel derived GP-based method (6) which only uses the optimization trajectory and hence eliminates the requirement for additional queries (Sec. 3.1) to estimate derivatives. Secondly, we have introduced a novel technique, i.e., dynamic virtual updates, which is made possible by our GP-based derivative estimation, to further improve the performance of our ZoRD (Sec. 3.2). Through theoretical justifications (Sec. 4) and empirical demonstrations (Sec. 5), we show that our derived GP-based derivative estimation improve over existing FD methods and that our ZoRD outperforms various ZO optimization baselines.", + "bbox": [ + 169, + 811, + 823, + 924 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 REPRODUCIBILITY STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 102, + 468, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "For our theoretical results, we have discussed all our assumptions in Sec. 2.1 & Sec. 4.2, and provided our complete proofs in Appx. B. For our empirical results, we have provided our detailed experimental settings in Appx. C and included our codes in the supplementary materials (i.e., the zip file).", + "bbox": [ + 171, + 133, + 826, + 176 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 191, + 328, + 205 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This research is part of the programme DesCartes and is supported by the National Research Foundation, Prime Minister's Office, Singapore under its Campus for Research Excellence and Technological Enterprise (CREATE) programme.", + "bbox": [ + 171, + 215, + 828, + 258 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 171, + 277, + 287, + 292 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Binxin Ru, Adam D. Cobb, Arno Blaas, and Yarin Gal. Bayesopt adversarial attack. In Proc. ICLR, 2020.", + "Gaurush Hiranandani, Jatin Mathur, Harikrishna Narasimhan, Mahdi Milani Fard, and Sanmi Koyejo. Optimizing black-box metrics with iterative example weighting. In Proc. ICML, 2021.", + "Tim Salimans, Jonathan Ho, Xi Chen, and Ilya Sutskever. Evolution strategies as a scalable alternative to reinforcement learning. arXiv:1703.03864, 2017.", + "Yurii E. Nesterov and Vladimir G. Spokoiny. Random gradient-free minimization of convex functions. Found. Comput. Math., 17(2):527-566, 2017.", + "Shuyu Cheng, Guoqiang Wu, and Jun Zhu. On the convergence of prior-guided zeroth-order optimization algorithms. In Proc. NeurIPS, 2021.", + "Albert S. Berahas, Liyuan Cao, Krzysztof Choromanski, and Katya Scheinberg. A theoretical and empirical comparison of gradient approximations in derivative-free optimization. Found. Comput. Math., 22(2):507-560, 2022.", + "Niranjan Srinivas, Andreas Krause, Sham M. Kakade, and Matthias W. Seeger. Gaussian process optimization in the bandit setting: No regret and experimental design. In Proc. ICML, 2010.", + "Kirthevasan Kandasamy, Akshay Krishnamurthy, Jeff Schneider, and Barnabás Póczos. Parallelised Bayesian optimisation via Thompson sampling. In Proc. AISTATS, 2018.", + "Carl Edward Rasmussen and Christopher K. I. Williams. Gaussian processes for machine learning. Adaptive computation and machine learning. MIT Press, 2006.", + "Abraham Flaxman, Adam Tauman Kalai, and H. Brendan McMahan. Online convex optimization in the bandit setting: Gradient descent without a gradient. In Proc. SODA, 2005.", + "Saeed Ghadimi and Guanghui Lan. Stochastic first- and zeroth-order methods for nonconvex stochastic programming. SIAM Journal on Optimization, 23(4):2341-2368, 2013.", + "Sijia Liu, Bhavya Kailkhura, Pin-Yu Chen, Pai-Shun Ting, Shiyu Chang, and Lisa Amini. Zeroth-order stochastic variance reduction for nonconvex optimization. In Proc. NeurIPS, 2018a.", + "Sijia Liu, Xingguo Li, Pin-Yu Chen, Jarvis D. Haupt, and Lisa Amini. Zeroth-order stochastic projected gradient descent for nonconvex optimization. In Proc. GlobalSIP, 2018b.", + "Xiangru Lian, Huan Zhang, Cho-Jui Hsieh, Yijun Huang, and Ji Liu. A comprehensive linear speedup analysis for asynchronous stochastic parallel optimization from zeroth-order to first-order. In Proc. NIPS, 2016.", + "Sashank J Reddi, Suvrit Sra, Barnabas Poczos, and Alexander J Smola. Proximal stochastic methods for nonsmooth nonconvex finite-sum optimization. In Proc. NIPS, 2016.", + "David Eriksson, Michael Pearce, Jacob R. Gardner, Ryan Turner, and Matthias Poloczek. Scalable global optimization via local Bayesian optimization. In Proc. NeurIPS, 2019." + ], + "bbox": [ + 171, + 301, + 828, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Daniel Golovin, John Karro, Greg Kochanski, Chansoo Lee, Xingyou Song, and Qiuyi (Richard) Zhang. Gradientless descent: High-dimensional zeroth-order optimization. In Proc. ICLR, 2020.", + "Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner. Gradient-based learning applied to document recognition. Proceedings of the IEEE, pages 2278-2324, 1998.", + "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009.", + "Chen Huang, Shuangfei Zhai, Pengsheng Guo, and Josh M. Susskind. Metricopt: Learning to optimize black-box evaluation metrics. In Proc. CVPR, 2021.", + "Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive.ics.uci.edu/ml.", + "Sebastian U Stich, Christian L Muller, and Bernd Gartner. Optimization of convex functions with random pursuit. SIAM Journal on Optimization, 23(2):1284-1309, 2013.", + "Sayak Ray Chowdhury and Aditya Gopalan. On kernelized multi-armed bandits. In Proc. ICML, 2017.", + "Zhongxiang Dai, Haibin Yu, Bryan Kian Hsiang Low, and Patrick Jaillet. Bayesian optimization meets Bayesian optimal stopping. In Proc. ICML, 2019.", + "Zhongxiang Dai, Bryan Kian Hsiang Low, and Patrick Jaillet. Federated bayesian optimization via thompson sampling. In Proc. NeurIPS, 2020.", + "Benjamin Letham, Roberto Calandra, Akshara Rai, and Eytan Bakshy. Re-examining linear embeddings for high-dimensional Bayesian optimization. In Proc. NeurIPS, 2020.", + "Andrew Ilyas, Logan Engstrom, and Aleksander Madry. Prior convictions: Black-box adversarial attacks with bandits and priors. In Proc. ICLR, 2019.", + "Florian Meier, Asier Mujika, Marcelo Matheus Gauy, and Angelika Steger. Improving gradient estimation in evolutionary strategies with past descent directions. arXiv:1910.05268, 2019.", + "Niru Maheswaranathan, Luke Metz, George Tucker, Dami Choi, and Jascha Sohl-Dickstein. Guided evolutionary strategies: Augmenting random search with surrogate gradients. In Proc. ICML, 2019.", + "Shuyu Cheng, Yinpeng Dong, Tianyu Pang, Hang Su, and Jun Zhu. Improving black-box adversarial attacks with a transfer-based prior. In NeurIPS, 2019.", + "Beatrice Laurent and Pascal Massart. Adaptive estimation of a quadratic functional by model selection. Annals of Statistics, pages 1302-1338, 2000.", + "Sayak Ray Chowdhury and Aditya Gopalan. No-regret algorithms for multi-task Bayesian optimization. In Proc. AISTATS, 2021.", + "Stephen P. Boyd and Lieven Vandenberghe. Convex Optimization. Cambridge University Press, 2014.", + "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proc. ICLR, 2015.", + "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proc. CVPR, 2016.", + "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. OpenAI Gym. arXiv:1606.01540, 2016.", + "M. D. McKay, R. J. Beckman, and W. J. Conover. A comparison of three methods for selecting values of input variables in the analysis of output from a computer code. Technometrics, 21(2): 239-245, 1979.", + "Jian Tan, Niv Nayman, and Mengchang Wang. CobBO: Coordinate backoff Bayesian optimization with two-stage kernels. arXiv:2101.05147, 2021.", + "Hong Qian and Yang Yu. Derivative-free reinforcement learning: A review. Frontiers Comput. Sci., 15(6):156336, 2021." + ], + "bbox": [ + 171, + 102, + 828, + 922 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "APPENDIX A RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 102, + 442, + 118 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Various types of algorithms have been proposed in the literature to solve ZO optimization problems, e.g., direct search, Bayesian optimization (BO) and GD-based algorithms with estimated derivatives. Particularly, direct search, e.g., (Stich et al., 2013; Golovin et al., 2020), relies on the comparison of function values at different inputs for the updates, which can be query-inefficient in practice owing to its indirect utilization of function values. In contrast, Bayesian optimization (BO) directly utilizes the function values to model the objective function using a Gaussian process (GP) and iteratively selects the inputs to query by trading off sampling potentially optimal inputs (i.e., exploitation) and inputs that can improve the GP belief of the objective function over the entire input domain (i.e., exploration) (Chowdhury and Gopalan, 2017; Srinivas et al., 2010; Dai et al., 2019; 2020). However, in ZO optimization problems with high-dimensional input spaces, BO algorithms typically suffer from query inefficiency and large computational complexity (Rasmussen and Williams, 2006; Letham et al., 2020; Eriksson et al., 2019), which significantly hinders their real-world applications. Therefore, GD-based algorithms with estimated derivatives, which inherit the advantage of GD-based algorithms in optimizing functions with high-dimensional input spaces, have been more widely applied in practice. For these algorithms, the derivatives are commonly estimated using the finite difference (FD) approximation (which requires additional function queries) of the directional derivatives along selected directions, in which the directions can be randomly sampled unit vectors Flaxman et al. (2005), Gaussian vectors (Nesterov and Spokoiny, 2017), or standard bases (Lian et al., 2016) (Sec. 2.2). More recently, some works have incorporated a time-dependent prior (i.e., the estimated derivative in the previous iteration) into existing FD methods to improve the quality of its derivative estimation (Ilyas et al., 2019; Meier et al., 2019; Cheng et al., 2021). Nevertheless, such a prior is also estimated by the FD method (i.e., in the previous iteration) and can hence be biased owing to the its estimation error, which may even lead to larger derivative estimation errors in practice due to compounding errors. Another line of work has taken the surrogate derivatives from other sources to help reduce the derivative estimation error of existing FD methods (Maheswaranathan et al., 2019; Cheng et al., 2019). However, these surrogate derivatives may generally be unavailable in practice. Importantly, these existing FD methods require additional function queries for every derivation estimation during optimization, which will significantly increase the query complexity of ZO optimization algorithms which employ these FD methods for derivative estimation.", + "bbox": [ + 169, + 132, + 826, + 536 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "APPENDIX B PROOFS", + "text_level": 1, + "bbox": [ + 171, + 554, + 370, + 569 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B.1 PROOF OF LEMMA 1", + "text_level": 1, + "bbox": [ + 171, + 585, + 359, + 599 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "According to Rasmussen and Williams (2006), if a function $f$ follows from a Gaussian process, its derivative also follows a Gaussian process determined by its mean $\\mathbb{E}[\\cdot]$ and covariance $\\mathrm{Cov}(\\cdot, \\cdot)$ , i.e.,", + "bbox": [ + 169, + 611, + 826, + 640 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla f \\sim \\mathcal {G P} (\\mathbb {E} [ \\nabla f ], \\operatorname {C o v} (\\nabla f, \\nabla f)). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 642, + 823, + 659 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "So, to prove Lemma 1, we only need to derive the mean and the covariance of the Gaussian process above for a function $f$ that is sampled from another Gaussian process, i.e., $f \\sim \\mathcal{GP}(\\mu(\\cdot), \\sigma^2(\\cdot, \\cdot))$ . Specifically, for the mean $\\mathbb{E}[\\nabla f]$ , we have", + "bbox": [ + 169, + 669, + 826, + 710 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ \\nabla f ] = \\nabla \\mathbb {E} [ f ] = \\nabla \\mu . \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 410, + 714, + 823, + 729 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where the first equality derives from the interchangeability of the expectation and derivative operation based on the Leibniz integral rule. The second equality comes from the fact that $\\mathbb{E}[f] = \\mu$ .", + "bbox": [ + 169, + 734, + 823, + 763 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For the covariance $\\mathrm{Cov}(\\nabla f,\\nabla f)$ , we have", + "bbox": [ + 171, + 768, + 457, + 784 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {C o v} \\left(\\nabla f (\\boldsymbol {z}), \\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right)\\right) \\stackrel {(a)} {=} \\mathbb {E} \\left[ \\left(\\nabla f (\\boldsymbol {z}) - \\mathbb {E} \\left[ \\nabla f (\\boldsymbol {z}) \\right]\\right) ^ {\\top} \\left(\\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} \\left[ \\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right) \\right]\\right) \\right] \\\\ \\stackrel {(b)} {=} \\mathbb {E} \\left[ \\nabla \\left(f (\\boldsymbol {z}) - \\mathbb {E} [ f (\\boldsymbol {z}) ]\\right) ^ {\\top} \\nabla \\left(f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} [ f \\left(\\boldsymbol {z} ^ {\\prime}\\right) ]\\right) \\right] \\\\ \\stackrel {(c)} {=} \\mathbb {E} \\left[ \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\left(f (\\boldsymbol {z}) - \\mathbb {E} [ f (\\boldsymbol {z}) ]\\right) ^ {\\top} \\left(f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} [ f \\left(\\boldsymbol {z} ^ {\\prime}\\right) ]\\right) \\right] \\tag {11} \\\\ \\stackrel {(d)} {=} \\partial_ {\\pmb {z}} \\partial_ {\\pmb {z} ^ {\\prime}} \\mathbb {E} \\left[ \\left(f (\\pmb {z}) - \\mathbb {E} \\left[ f (\\pmb {z}) \\right]\\right) ^ {\\top} \\left(f (\\pmb {z} ^ {\\prime}) - \\mathbb {E} \\left[ f (\\pmb {z} ^ {\\prime}) \\right]\\right) \\right] \\\\ \\stackrel {(e)} {=} \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\sigma_ {t} ^ {2} (\\boldsymbol {z}, \\boldsymbol {z} ^ {\\prime}) . \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 789, + 823, + 928 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Notably, $(b)$ and $(d)$ also derive from the interchangeability of the expectation and derivative operation based on the Leibniz integral rule. Besides, $(e)$ is obtained based on $\\operatorname{Cov}(f, f) = \\sigma^2(\\cdot, \\cdot)$ . This finally completes our proof.", + "bbox": [ + 169, + 103, + 823, + 148 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B.2 PROOF OF THEOREM 1", + "text_level": 1, + "bbox": [ + 171, + 165, + 375, + 180 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To begin with, we introduce the following concentration inequality for standard multi-variate Gaussian distribution:", + "bbox": [ + 169, + 191, + 823, + 220 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Lemma B.1 (Laurent and Massart (2000)). Let $\\zeta \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I}_m)$ and $\\delta \\in (0,1)$ then", + "bbox": [ + 169, + 226, + 718, + 242 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {P} \\left(\\| \\boldsymbol {\\zeta} \\| _ {2} \\leq \\sqrt {m + 2 (\\sqrt {m} + 1) \\ln (1 / \\delta)}\\right) \\geq 1 - \\delta . \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 252, + 823, + 286 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Define $\\zeta \\triangleq \\left(\\partial \\sigma_t^2 (\\pmb {x})\\right)^{-1 / 2}\\left(\\nabla f(\\pmb {x}) - \\nabla \\mu_t(\\pmb {x})\\right)$ , according to Lemma 1, we then have that $\\zeta$ follows a standard multi-variate Gaussian distribution, i.e.,", + "bbox": [ + 169, + 304, + 823, + 335 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\zeta \\sim \\mathcal {N} (\\mathbf {0}, \\mathbf {I} _ {d}). \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 444, + 345, + 823, + 362 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Let $\\delta \\in (0,1)$ . By substituting the result above into Lemma B.1, the following holds with probability of at least $1 - \\delta$ :", + "bbox": [ + 169, + 369, + 823, + 398 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} = \\left\\| \\left(\\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x})\\right) ^ {- 1 / 2} \\boldsymbol {\\zeta} \\right\\| _ {2} \\\\ \\leq \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\| \\zeta \\| _ {2} \\tag {14} \\\\ \\leq \\sqrt {d + 2 (\\sqrt {d} + 1) \\ln (1 / \\delta)} \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\pmb {x}) \\| _ {2}} \\\\ = \\beta \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\pmb {x}) \\| _ {2}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 407, + 823, + 536 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "with $\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(1 / \\delta)}$ and the first inequality is from the Cauchy-Schwarz inequality, which completes our proof.", + "bbox": [ + 169, + 547, + 826, + 585 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B.3 PROOF OF THEOREM 2", + "text_level": 1, + "bbox": [ + 171, + 602, + 375, + 616 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We first introduce the following lemmas.", + "bbox": [ + 171, + 630, + 441, + 645 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Lemma B.2 (Chowdhury and Gopalan (2021)). For any $\\sigma \\in \\mathbb{R}$ and any matrix $\\mathbf{A}$ , the following hold", + "bbox": [ + 169, + 648, + 826, + 666 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} - \\mathbf {A} ^ {\\top} \\left(\\mathbf {A} \\mathbf {A} ^ {\\top} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\mathbf {A} = \\sigma^ {2} \\left(\\mathbf {A} ^ {\\top} \\mathbf {A} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1}. \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 674, + 823, + 696 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Lemma B.3 (Sherman-Morrison formula). For any invertible square matrix $\\mathbf{A}$ and column vectors $\\mathbf{u},\\mathbf{v}$ , suppose $\\mathbf{A} + \\mathbf{u}\\mathbf{v}^{\\top}$ is invertible, then the following holds", + "bbox": [ + 169, + 705, + 823, + 733 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\mathbf {A} + \\boldsymbol {u} \\boldsymbol {v} ^ {\\top}\\right) ^ {- 1} = \\mathbf {A} ^ {- 1} - \\frac {\\mathbf {A} ^ {- 1} \\boldsymbol {u} \\boldsymbol {v} ^ {\\top} \\mathbf {A} ^ {- 1}}{1 + \\boldsymbol {v} ^ {\\top} \\mathbf {A} ^ {- 1} \\boldsymbol {u}}. \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 742, + 823, + 776 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "**Preparation.** We then introduce some additional notations and representations for our proof of Theorem 2. Following the common practice in (Chowdhury and Gopalan, 2021), we let the kernel $k$ be defined by $\\psi(\\pmb{x})$ , i.e., $k(\\pmb{x},\\pmb{x}^{\\prime}) = \\psi(\\pmb{x})^{\\top}\\psi(\\pmb{x}^{\\prime})$ , and $\\phi(\\pmb{x}) \\triangleq \\nabla \\psi(\\pmb{x})$ . We then further define the $(t\\times d)$ -dimensional Jacobian matrix $\\phi_t(\\pmb{x}) \\triangleq [\\phi(\\pmb{x})^\\top \\psi(\\pmb{x}_\\tau)]_{\\tau=1}^t$ and $\\Psi_t \\triangleq [\\psi(\\pmb{x}_\\tau)]_{\\tau=1}^t$ . The matrix $\\mathbf{K}_t$ and the covariance matrix $\\partial \\sigma_t^2(\\pmb{x})$ defined on the optimization trajectory $\\mathcal{D}_t$ in our Sec. 3.1 can be reformulated as", + "bbox": [ + 169, + 791, + 825, + 878 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {K} _ {t} = \\boldsymbol {\\Psi} _ {t} ^ {\\top} \\boldsymbol {\\Psi} _ {t}, \\tag {17} \\\\ \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) = \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi_ {t} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi_ {t} (\\boldsymbol {x}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 886, + 823, + 928 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Based on the reformulation above, define $\\mathbf{V}_t \\triangleq \\boldsymbol{\\Psi}_t \\boldsymbol{\\Psi}_t^\\top + \\sigma^2 \\mathbf{I}$ , we can further reformulate $\\partial \\sigma_t^2(\\boldsymbol{x})$ as below", + "bbox": [ + 169, + 102, + 823, + 130 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi_ {t} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi_ {t} (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi (\\boldsymbol {x}) ^ {\\top} \\Psi_ {t} \\left(\\Psi_ {t} ^ {\\top} \\Psi_ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\Psi_ {t} ^ {\\top} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {I} - \\boldsymbol {\\Psi} _ {t} \\left(\\boldsymbol {\\Psi} _ {t} ^ {\\top} \\boldsymbol {\\Psi} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {\\Psi} _ {t} ^ {\\top}\\right) \\phi (\\boldsymbol {x}) \\tag {18} \\\\ \\stackrel {(d)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\Psi_ {t} \\Psi_ {t} ^ {\\top} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(e)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\phi (\\boldsymbol {x}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 133, + 823, + 260 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Note that $(b)$ is obtained by exploiting the fact that $\\mathbf{K}_t = \\boldsymbol{\\Psi}_t^\\top \\boldsymbol{\\Psi}_t$ and $\\phi_t(\\boldsymbol{x}) = \\phi(\\boldsymbol{x})^\\top \\boldsymbol{\\Psi}_t$ . In addition, $(d)$ comes from Lemma B.2 by replacing the matrix $\\mathbf{A}$ in Lemma B.2 with the matrix $\\boldsymbol{\\Psi}_t^\\top$ .", + "bbox": [ + 169, + 267, + 826, + 299 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "First Part. We then prove the first half part of our Theorem 2, i.e., the following Lemma B.4.", + "bbox": [ + 169, + 314, + 797, + 330 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Lemma B.4 (Non-Increasing Variance Norm). For any $\\pmb{x} \\in \\mathcal{X}$ and any $t \\geq 1$ , we have that", + "bbox": [ + 169, + 334, + 777, + 349 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}. \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 356, + 823, + 377 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Proof. Based on our additional notations and representations, we have", + "bbox": [ + 169, + 396, + 637, + 411 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\boldsymbol {\\Psi} _ {t - 1} \\boldsymbol {\\Psi} _ {t - 1} ^ {\\top} + \\sigma^ {2} \\mathbf {I} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {V} _ {t - 1} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(d)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) - \\sigma^ {2} \\left(1 + \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t})\\right) ^ {- 1} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(e)} {=} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) - \\sigma^ {2} \\left(1 + \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t})\\right) ^ {- 1} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(f)} {\\preccurlyeq} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}). \\tag {20} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 421, + 856, + 585 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Note that (a) follows from the aforementioned definition of $\\mathbf{V}_t$ and (b) comes from the fact that $\\Psi_t\\Psi_t^\\top = \\Psi_{t - 1}\\Psi_{t - 1}^\\top +\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^\\top$ . Similarly, (c) uses the definition of $\\mathbf{V}_{t - 1}$ . In addition, equality (d) derives from Lemma B.3 by letting $\\mathbf{A} = \\mathbf{V}_{t - 1}$ and $\\pmb {u} = \\pmb {v} = \\psi (\\pmb {x}_t)$ and (e) follows from the reformulation of $\\partial \\sigma_{t - 1}^2 (\\pmb {x})$ in (18). Finally, (f) derives from the positive semi-definite property of $\\phi (\\pmb {x})^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\phi (\\pmb {x})$ as well as the fact that $1 + \\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t) > 0$ . That is, for any column vector $\\textbf{z}$ we have that", + "bbox": [ + 169, + 585, + 826, + 672 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\boldsymbol {z} ^ {\\top} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z} = \\left(\\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z}\\right) ^ {\\top} \\left(\\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z}\\right) \\\\ = \\left\\| \\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z} \\right\\| _ {2} ^ {2} \\tag {21} \\\\ \\geq 0. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 681, + 823, + 750 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "So, $\\phi (\\pmb {x})^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\phi (\\pmb {x})$ is positive semi-definite. Following a similar way, we are also able to verify that $1 + \\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t) > 0$ by showing that $\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\geq 0$ using the decomposition of $\\mathbf{V}_{t - 1}^{-1}$ from the Principle Component Analysis (PCA). Since $\\partial \\sigma_t^2 (\\pmb {x})\\preccurlyeq \\sigma_{t - 1}^2 (\\pmb {x})$ is equivalent to $\\left\\| \\partial \\sigma_t^2 (\\pmb {x})\\right\\| _2\\leq \\left\\| \\partial \\sigma_{t - 1}^2 (\\pmb {x})\\right\\| _2$ , we then complete the proof of first half part of our Theorem 2.", + "bbox": [ + 169, + 762, + 826, + 842 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Second Part. To prove the rest of our Theorem 2, we firstly introduce the following lemmas.", + "bbox": [ + 169, + 864, + 792, + 878 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Lemma B.5. For any $\\pmb{x} \\in \\mathcal{X}$ and any $t \\geq 1$ , the following holds", + "bbox": [ + 169, + 883, + 599, + 898 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {V} _ {t} ^ {- 1} \\preccurlyeq \\mathbf {V} _ {t - 1} ^ {- 1}. \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 444, + 907, + 823, + 926 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Proof. For any column vector $z$ , we have", + "bbox": [ + 171, + 104, + 449, + 118 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\boldsymbol {z} ^ {\\top} \\left(\\mathbf {V} _ {t} - \\mathbf {V} _ {t - 1}\\right) \\boldsymbol {z} = \\boldsymbol {z} ^ {\\top} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\boldsymbol {z} \\\\ = \\left(\\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\boldsymbol {z}\\right) ^ {\\top} \\left(\\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\boldsymbol {z}\\right) \\tag {23} \\\\ = \\left\\| \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\boldsymbol {z} \\right\\| _ {2} ^ {2} \\\\ \\geq 0. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 126, + 823, + 207 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The first equality comes from the intermediate result in (20). So, $\\mathbf{V}_t - \\mathbf{V}_{t-1}$ is positive semi-definite, i.e., $\\mathbf{V}_{t-1} \\preccurlyeq \\mathbf{V}_t$ . This can also indicate that $\\mathbf{V}_t^{-1} \\preccurlyeq \\mathbf{V}_{t-1}^{-1}$ , which thus completes our proof.", + "bbox": [ + 169, + 215, + 826, + 247 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Lemma B.6 (Lower Bound of Variance Norm). For any $\\pmb{x} \\in \\mathcal{X}$ and any $t \\geq 1$ , the following holds", + "bbox": [ + 169, + 253, + 823, + 268 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n1 / \\left(1 + 1 / \\sigma^ {2}\\right) \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}. \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 273, + 823, + 294 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Proof. We firstly show that", + "bbox": [ + 171, + 306, + 356, + 321 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} \\stackrel {(a)} {\\leq} \\left\\| \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\right\\| _ {2} \\left\\| \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} \\\\ \\stackrel {(b)} {=} \\left\\| \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} ^ {2} \\\\ \\stackrel {(c)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(d)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\psi (\\boldsymbol {x}) \\tag {25} \\\\ \\stackrel {(e)} {\\leq} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(f)} {\\leq} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {0} ^ {- 1} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(g)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\psi (\\boldsymbol {x}) / \\sigma^ {2} \\\\ \\stackrel {(h)} {=} 1 / \\sigma^ {2}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 276, + 330, + 821, + 541 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Note that $(a)$ derives from the Cauchy-Schwarz inequality. As for $(b)$ and $(c)$ , they have exploited the fact that $\\left(\\mathbf{V}_t^{-1/2}\\psi(\\boldsymbol{x})\\right)^\\top = \\psi(\\boldsymbol{x})^\\top\\mathbf{V}_t^{-1/2}$ and $\\psi(\\boldsymbol{x})^\\top\\mathbf{V}_t^{-1/2}$ is a row vector. In addition, $(e)$ follows from Lemma B.5. Finally, $(g)$ results from $\\mathbf{V}_0^{-1} = \\mathbf{I}/\\sigma^2$ and $(h)$ derives from the assumption that $k(\\boldsymbol{x},\\boldsymbol{x}) \\leq 1$ ( $\\forall \\boldsymbol{x} \\in \\mathcal{X}$ ) in Sec. 2.1. Alternatively, we can restate the result above as", + "bbox": [ + 169, + 550, + 826, + 622 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\preccurlyeq \\sigma^ {- 2} \\mathbf {I}. \\tag {26}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 628, + 823, + 648 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We then complete our proof on the first inequality in Lemma B.6 using the following inequality:", + "bbox": [ + 169, + 662, + 803, + 679 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {V} _ {t - 1} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left[ \\mathbf {V} _ {t - 1} ^ {1 / 2} \\left(\\mathbf {I} + \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2}\\right) \\mathbf {V} _ {t - 1} ^ {1 / 2} \\right] ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\left(\\mathbf {I} + \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\psi \\left(\\boldsymbol {x} _ {t}\\right) \\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2}\\right) ^ {- 1} \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\phi (\\boldsymbol {x}) \\tag {27} \\\\ \\stackrel {(d)} {\\succcurlyeq} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) / (1 + 1 / \\sigma^ {2}) \\\\ \\stackrel {(e)} {=} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) / (1 + 1 / \\sigma^ {2}) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 686, + 823, + 816 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $(a)$ derives from (20) and $(c)$ comes from the inversion of matrix product. Finally $(d)$ follows from the result in (26) and $(e)$ exploits the reformulation of $\\partial \\sigma_{t - 1}^2 (\\pmb {x})$", + "bbox": [ + 169, + 824, + 825, + 854 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "According to Lemma B.4 and Lemma B.6, the following holds for any $\\pmb{x} \\in \\mathcal{X}$ and any $t \\geq 1$ ,", + "bbox": [ + 169, + 868, + 782, + 883 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{1 + 1 / \\sigma^ {2}} \\leq \\frac {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}}{\\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\leq 1. \\tag {28}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 890, + 823, + 928 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Based on the definition of $r$ in our Theorem 2, we therefore also have", + "bbox": [ + 171, + 104, + 632, + 118 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\nr \\triangleq \\max _ {\\boldsymbol {x} \\in \\mathcal {X}, t \\geq 1} \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} / \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\in \\left[ 1 / \\sqrt {1 + 1 / \\sigma^ {2}}, 1 \\right]. \\tag {29}\n$$\n", + "text_format": "latex", + "bbox": [ + 276, + 122, + 823, + 150 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As a result, for every iteration $t$ of our Algo. 2, we have", + "bbox": [ + 171, + 162, + 540, + 178 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\| _ {2}} \\leq r \\sqrt {\\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\\\ \\leq r ^ {t} \\sqrt {\\left\\| \\partial \\sigma_ {0} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\tag {30} \\\\ = r ^ {t} \\sqrt {\\| \\partial_ {z} \\partial_ {z ^ {\\prime}} k (z , z ^ {\\prime}) | _ {z = z ^ {\\prime} = x} \\| _ {2}} \\\\ \\leq r ^ {t} \\kappa \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 181, + 823, + 282 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where the last inequality derives from our assumption of $\\| \\partial_z\\partial_{z'}k(z,z')|_{z = z' = x}\\| _2\\leq \\kappa^2$ ( $\\forall \\pmb {x}\\in \\mathcal{X}$ ) in our Sec. 2.1. By substituting the result above into our Theorem 1, we complete our proof of Theorem 2.", + "bbox": [ + 169, + 287, + 826, + 333 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.4 PROOF OF THEOREM 3", + "text_level": 1, + "bbox": [ + 171, + 348, + 375, + 362 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "**Preparation.** Following the definition of the derivative mapping on the true derivative $\\nabla f(\\boldsymbol{x}_{t,\\tau})$ in (8), we defined the following derivative mapping on our estimated derivative $\\nabla \\mu_{t-1}(\\boldsymbol{x}_{t,\\tau})$ :", + "bbox": [ + 169, + 375, + 823, + 405 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {G} _ {t, \\tau} \\triangleq \\frac {\\boldsymbol {x} _ {t , \\tau} - \\boldsymbol {x} _ {t , \\tau + 1}}{\\eta_ {t , \\tau}} = \\frac {\\boldsymbol {x} _ {t , \\tau} - \\mathcal {P} _ {\\chi} (\\boldsymbol {x} _ {t , \\tau} - \\eta_ {t , \\tau} \\nabla \\mu_ {t} (\\boldsymbol {x} _ {t , \\tau}))}{\\eta_ {t , \\tau}}. \\tag {31}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 407, + 823, + 441 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "By re-arranging it, we have the following update rule that has reformulated (7):", + "bbox": [ + 171, + 444, + 692, + 459 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} _ {t, \\tau + 1} = \\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\widehat {G} _ {t, \\tau}. \\tag {32}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 464, + 823, + 483 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Based on our definition of the derivative mappings in (31) and (8), we introduce the following lemmas:", + "bbox": [ + 171, + 492, + 826, + 508 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma B.7 (General Projection Inequalities). Given $\\mathcal{P}_{\\mathcal{X}}(\\boldsymbol{x}) = \\arg \\min_{\\boldsymbol{z} \\in \\mathcal{X}} \\| \\boldsymbol{x} - \\boldsymbol{z} \\|_2^2 / 2$ and domain $\\mathcal{X}$ , for any $\\boldsymbol{x}, \\boldsymbol{x}'$ , we have", + "bbox": [ + 169, + 523, + 825, + 553 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\boldsymbol {x} - \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\boldsymbol {x} - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2}, \\tag {33}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 556, + 823, + 574 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} \\leq \\left\\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\right\\| _ {2}. \\tag {34}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 575, + 823, + 594 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Proof. For (33), as $\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}^{\\prime}) \\in \\mathcal{X} (\\forall \\pmb{x}^{\\prime})$ and $\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}) = \\arg \\min_{\\pmb{z} \\in \\mathcal{X}} \\| \\pmb{x} - \\pmb{z} \\|_2^2 / 2$ , we then naturally have (33).", + "bbox": [ + 169, + 609, + 823, + 638 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For (34), since $\\mathcal{P}_{\\mathcal{X}}(\\pmb{x})$ is the optimum of $h(\\pmb{z}) = \\| \\pmb{x} - \\pmb{z}\\|_2^2 / 2$ , according to the optimality condition of the convex projection function $h(\\pmb{z})$ within the domain $\\pmb{z} \\in \\mathcal{X}$ (Boyd and Vandenberghe, 2014), we then have the following inequality for any $\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}') \\in \\mathcal{X}$ :", + "bbox": [ + 169, + 647, + 826, + 691 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla h (\\boldsymbol {z}) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {z}\\right) \\geq 0. \\tag {35}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 694, + 823, + 712 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "By taking $\\nabla h(z) = z - x$ with $z = \\mathcal{P}_{\\mathcal{X}}(x)$ into the inequality above, we have", + "bbox": [ + 171, + 717, + 694, + 733 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\boldsymbol {x}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x})\\right) \\geq 0. \\tag {36}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 737, + 823, + 756 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "By exchanging $\\pmb{x}$ and $\\pmb{x}'$ in the result above, we achieve the following similar result:", + "bbox": [ + 171, + 760, + 725, + 776 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\geq 0. \\tag {37}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 780, + 823, + 799 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "By summing (36) and (37),", + "bbox": [ + 171, + 804, + 354, + 818 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\geq \\left\\| \\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}. \\tag {38}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 823, + 823, + 843 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Based on the Cauchy-Schwarz inequality, we finally achieve (34) using", + "bbox": [ + 171, + 845, + 642, + 861 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2} \\leq \\left(\\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\tag {39} \\\\ \\leq \\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\| _ {2} \\| \\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 864, + 823, + 905 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where both sides need to be divided by $\\| \\mathcal{P}_{\\mathcal{X}}(\\pmb {x}) - \\mathcal{P}_{\\mathcal{X}}(\\pmb{x}^{\\prime})\\|_{2}$ to complete our proof.", + "bbox": [ + 169, + 907, + 725, + 926 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma B.8 (Inequalities for Derivative Mappings). Given (31) and (8), for every $t$ and $\\tau$ , we have", + "bbox": [ + 169, + 103, + 823, + 119 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\widehat {G} _ {t, \\tau}, \\tag {40}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 131, + 823, + 159 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| G _ {t, \\tau} \\right\\| _ {2} \\leq \\left\\| \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\right\\| _ {2}, \\tag {41}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 162, + 823, + 178 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} \\leq \\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\| _ {2}. \\tag {42}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 181, + 823, + 208 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. For (40), let $\\widehat{\\pmb{x}}_{t,\\tau} = \\pmb{x}_{t,\\tau} - \\eta_{t,\\tau}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau})$ , we then have", + "bbox": [ + 169, + 238, + 609, + 253 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) \\right\\| _ {2} ^ {2} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right)\\right) \\\\ \\stackrel {(a)} {=} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} \\right\\| _ {2} ^ {2} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\left(\\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1}\\right) \\tag {43} \\\\ \\stackrel {(b)} {=} \\eta_ {t, \\tau} ^ {2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} - \\eta_ {t, \\tau} ^ {2} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} \\\\ \\begin{array}{c} \\stackrel {(c)} {\\leq} 0 \\end{array} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 268, + 823, + 362 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $(a)$ results from the fact that $\\pmb{x}_{t,\\tau +1} = \\mathcal{P}_{\\mathcal{X}}\\left(\\pmb{x}_{t,\\tau} - \\eta_{t,\\tau}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau})\\right)$ based on our (7) and $(b)$ derives from the definition of $\\widehat{G}_{t,\\tau}$ in (31). In addition, $(c)$ is based on the following result by substituting $\\pmb {x} = \\pmb{x}_{t,\\tau}$ and $\\pmb{x}^{\\prime} = \\widehat{\\pmb{x}}_{t,\\tau}$ into (38):", + "bbox": [ + 169, + 376, + 823, + 422 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) \\right\\| _ {2} ^ {2} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right)\\right) \\leq 0. \\tag {44}\n$$\n", + "text_format": "latex", + "bbox": [ + 251, + 436, + 823, + 455 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Finally, by dividing $\\eta_{t,\\tau}^2$ on the both sides of the last inequality in (43), we finish the proof for (40).", + "bbox": [ + 169, + 468, + 820, + 484 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For (41), following the same proof above, we can also obtain the following inequality for the projected derivative $G_{t,\\tau}$ :", + "bbox": [ + 169, + 489, + 823, + 518 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| G _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} G _ {t, \\tau} \\leq \\left\\| \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\right\\| _ {2} \\left\\| G _ {t, \\tau} \\right\\| _ {2}. \\tag {45}\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 532, + 823, + 551 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We complete the proof for (41) by dividing $\\| G_{t,\\tau}\\| _2$ on the both sides of the inequality above.", + "bbox": [ + 169, + 564, + 785, + 580 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For (42), define $\\boldsymbol{x}_{t,\\tau +1}^{\\prime}\\triangleq \\boldsymbol{x}_{t,\\tau} - \\eta_{t,\\tau}G_{t,\\tau}$ , we have", + "bbox": [ + 169, + 588, + 514, + 604 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} \\stackrel {(a)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} ^ {\\prime}\\right) \\right\\| _ {2} \\\\ \\stackrel {(b)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau + 1} ^ {\\prime} \\right\\| _ {2} \\\\ \\stackrel {(c)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau})\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f (\\boldsymbol {x} _ {t, \\tau})\\right) \\| _ {2} \\tag {46} \\\\ \\stackrel {(d)} {\\leq} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - (\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f (\\boldsymbol {x} _ {t, \\tau})) \\right\\| _ {2} \\\\ \\stackrel {(e)} {=} \\left\\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\right\\| _ {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 619, + 823, + 780 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $(a)$ comes from the definition of $\\widehat{G}_{t,\\tau}$ and $G_{t,\\tau}$ in (31) and (8), respectively. In addition, $(c)$ derives from (7) and (8). Finally, $(d)$ results from (34).", + "bbox": [ + 169, + 794, + 823, + 824 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. Since the objective function $f$ is assumed to be $L_{s}$ -Lipschitz smooth (Sec. 4.2), we have the following inequality for any $x_{t,\\tau} \\in \\mathcal{X}$ in our ZoRD algorithm:", + "bbox": [ + 169, + 856, + 823, + 886 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\nf \\left(\\boldsymbol {x} _ {t, \\tau + 1}\\right) - f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\leq \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\left(\\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau}\\right) + \\frac {L _ {s}}{2} \\| \\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau} \\| _ {2} ^ {2}. \\tag {47}\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 898, + 823, + 929 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Let $\\delta' \\in (0,1)$ . Define $\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1) \\ln(1 / \\delta')}$ , by substituting (32) into the inequality above, the following inequality holds with probability of at least $1 - \\delta'$ :", + "bbox": [ + 169, + 102, + 826, + 140 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nf (\\boldsymbol {x} _ {t, \\tau + 1}) - f (\\boldsymbol {x} _ {t, \\tau})\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 145, + 339, + 162 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\stackrel {(a)} {\\leq} - \\eta_ {t, \\tau} \\nabla f (\\pmb {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 165, + 478, + 196 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\stackrel {(b)} {=} \\eta_ {t, \\tau} \\left(\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})\\right) ^ {\\top} \\widehat {G} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 199, + 756, + 229 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\stackrel {(c)} {=} \\eta_ {t, \\tau} \\left[ (\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})) ^ {\\top} (\\widehat {G} _ {t, \\tau} - G _ {t, \\tau}) + (\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})) ^ {\\top} G _ {t, \\tau} \\right] \\\\ - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 232, + 787, + 294 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\stackrel {(d)} {\\leq} \\eta_ {t, \\tau} \\left[ \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} + \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\| G _ {t, \\tau} \\| _ {2} \\right] \\\\ - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 296, + 816, + 359 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\stackrel {(e)} {\\leq} \\eta_ {t, \\tau} \\left[ \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} ^ {2} + \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\| \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\right] \\\\ - \\frac {2 \\eta_ {t , \\tau} - L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 363, + 745, + 426 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\stackrel {(f)} {\\leq} \\eta_ {t, \\tau} \\kappa^ {2} \\beta^ {2} r ^ {2 t} + \\eta_ {t, \\tau} L _ {c} \\kappa \\beta r ^ {t} - \\frac {\\eta_ {t , \\tau}}{2} \\| \\widehat {G} _ {t, \\tau} \\| _ {2} ^ {2} \\tag {48}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 430, + 823, + 470 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $(d)$ derives from the Cauchy-Schwarz inequality and $(e)$ follows from the Lemma B.7. Finally, $(f)$ result from the bounded derivative estimation error in Theorem 2 and the fact that $f$ is $L_{c}$ -Lipschitz continuous (i.e., $\\| \\nabla f(\\pmb{x})\\|_2 \\leq L_c$ for any $\\pmb{x} \\in \\mathcal{X}$ ) and $\\eta_{t,\\tau} \\leq 1 / L_s (\\forall \\tau)$ .", + "bbox": [ + 169, + 470, + 825, + 515 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For every iteration $t$ our ZoRD algorithm, we in fact will apply the virtual updates (7) for $V_{t}$ times (see Algo. 2). Therefore, for probability $\\geq 1 - V_t\\delta'$ , we have", + "bbox": [ + 169, + 518, + 823, + 550 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {1}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\left[ f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - f \\left(\\boldsymbol {x} _ {t, \\tau + 1}\\right) + \\eta_ {t, \\tau} \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) \\right] \\tag {49} \\\\ = \\frac {2}{V _ {t}} \\left[ f \\left(\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t})\\right) \\right] + \\left(\\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau}\\right) \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 556, + 823, + 641 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where the first inequality results from (48) by re-arranging it and then sum it up over $\\tau$ .", + "bbox": [ + 169, + 645, + 743, + 660 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "However, in order to prove the convergence of our ZoRD algorithm to a stationary point, we need to consider the derivative mapping of $G_{t,\\tau}$ instead (refer to our Sec. 4.2). So, for any $\\tau$ , we propose the following inequality:", + "bbox": [ + 169, + 666, + 823, + 709 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| G _ {t, \\tau} \\right\\| _ {2} = \\left\\| G _ {t, \\tau} - \\widehat {G} _ {t, \\tau} + \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\leq \\left\\| G _ {t, \\tau} - \\widehat {G} _ {t, \\tau} \\right\\| _ {2} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\tag {50} \\\\ \\leq \\left\\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\right\\| _ {2} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\leq \\kappa \\beta r ^ {t} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 715, + 823, + 825 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where the first inequality is from the Cauchy-Schwarz inequality and the second inequality comes from (42). Finally, by taking the result above into (49), we have", + "bbox": [ + 169, + 830, + 823, + 859 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {2}{V _ {t}} \\left[ f \\left(\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t})\\right) \\right] + \\left(\\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau}\\right) \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) + \\kappa \\beta r ^ {t}. \\tag {51}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 864, + 823, + 920 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Then, substituting $V_{t} = V$ and $\\eta_{t,\\tau} = \\eta$ for any $t,\\tau$ into the result above, the following inequality holds with probability of at least $1 - VT\\delta^{\\prime}$ when $r < 1$ :", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\eta \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\stackrel {(a)} {\\leq} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(\\frac {2 (f (\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t}))}{V} + 2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2 t} + (2 \\eta L _ {c} + 1) \\kappa \\beta r ^ {t}\\right) \\\\ \\stackrel {(b)} {\\leq} \\frac {2}{T V} \\left[ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} _ {T}) \\right] + \\frac {2 \\eta (1 - r ^ {2 T})}{T (1 - r ^ {2})} \\kappa^ {2} \\beta^ {2} r ^ {2} \\\\ + \\frac {(2 \\eta L _ {c} + 1) (1 - r ^ {T})}{T (1 - r)} \\kappa \\beta r \\\\ \\stackrel {(c)} {\\leq} \\frac {2}{T V} \\left[ f \\left(\\boldsymbol {x} _ {0}\\right) - f \\left(\\boldsymbol {x} ^ {*}\\right) \\right] + \\frac {2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2}}{T \\left(1 - r ^ {2}\\right)} + \\frac {\\left(2 \\eta L _ {c} + 1\\right) \\kappa \\beta r}{T (1 - r)}. \\tag {52} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 142, + 823, + 301 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Note that $(b)$ derives from the summation of the geometric sequence about $r$ and $(c)$ comes from $\\pmb{x}^{*}\\triangleq \\arg \\min_{\\pmb{x}\\in \\mathcal{X}}f(\\pmb {x})$ . When $r = 1$ , the following holds with probability of at least $\\geq 1 - VT\\delta^{\\prime}$ accordingly:", + "bbox": [ + 169, + 301, + 823, + 345 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\eta \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(\\frac {2 (f (\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t}))}{V} + 2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2 t} + (2 \\eta L _ {c} + 1) \\kappa \\beta r ^ {t}\\right) \\tag {53} \\\\ = \\frac {2}{T V} \\left[ f \\left(\\boldsymbol {x} _ {0}\\right) - f \\left(\\boldsymbol {x} _ {T}\\right) \\right] + 2 \\eta \\kappa^ {2} \\beta^ {2} + (2 \\eta L _ {c} + 1) \\kappa \\beta . \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 354, + 821, + 425 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Finally, let $\\delta = VT\\delta' \\in (0,1)$ , the following holds with probability of at least $1 - \\delta$", + "bbox": [ + 169, + 439, + 728, + 455 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\min _ {t \\leq T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\tag {54} \\\\ \\leq ① + ② \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 464, + 823, + 522 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where 1 and 2 can be defined as below with $\\alpha \\triangleq \\kappa \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(VT / \\delta)}$", + "bbox": [ + 169, + 532, + 710, + 558 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} ① = \\frac {2 / \\eta}{T V} [ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} _ {T}) ] \\\\ ② = \\left\\{ \\begin{array}{l l} 2 \\alpha^ {2} r ^ {2} / [ T (1 - r ^ {2}) ] + (2 L _ {c} + 1 / \\eta) \\alpha r / [ T (1 - r) ] & (r < 1), \\\\ 2 \\alpha^ {2} + (2 L _ {c} + 1 / \\eta) \\alpha & (r = 1). \\end{array} \\right. \\tag {55} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 565, + 823, + 631 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "APPENDIX C EXPERIMENTAL SETTINGS", + "text_level": 1, + "bbox": [ + 171, + 102, + 524, + 118 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.1 GENERAL SETTINGS", + "text_level": 1, + "bbox": [ + 171, + 133, + 362, + 148 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Derived GP. Among all our experiments in Sec. 5, to apply the derivative estimation in Sec. 3.1 for every iteration $t$ and every step $\\tau$ of our ZoRD algorithm, we use the derived GP (4) based on the Matérn kernel with $\\nu = 2.5$ and fit this derived GP using 150 queries that achieves the smallest Euclidean distance with input $\\boldsymbol{x}_{t,\\tau}$ from the optimization trajectory. This is because we only need to model the objective function $f$ in the vicinity of input $\\boldsymbol{x}_{t,\\tau}$ precisely rather than the entire domain, so as to achieve an accurate derivative estimation at input $\\boldsymbol{x}_{t,\\tau}$ .", + "bbox": [ + 169, + 157, + 826, + 243 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Confidence Threshold. Among all our experiments in Sec. 5, the confidence threshold $c$ of our dynamic virtual updates (Sec. 3.2) is set to be 0.35 in order to realize a good trade-off between query efficiency and accurate derivative estimation in practice, which can already allow our ZoRD to achieve compelling empirical results consistently (see our Sec. 5). In light of this, $c = 0.35$ would be a reasonably good choice in practice, especially when there is no prior knowledge about the objective functions. When we have prior knowledge about the smoothness of the objective functions, we can likely make a better choice for $c$ : Intuitively, smooth objective functions usually can be modeled by the Gaussian process effectively (Rasmussen and Williams, 2006), so an accurate derivative estimation from our derived GP is also likely to be achieved. In this scenario, a large confidence threshold can be applied to fully exploit the benefit of our derivative estimation that is free from the requirement for additional queries and consequently results in an improved query efficiency in practice.", + "bbox": [ + 169, + 257, + 826, + 426 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Baselines. In addition, among all our experiments in Sec. 5, we consistently use $n = 10$ , $\\lambda = 0.01$ and directions $\\{u_i\\}_{i=1}^n$ that are randomly sampled from a unit sphere for the derivative estimation of the FD method (2) applied in the RGF and PRGF algorithm. Moreover, following the common practice of (Berahas et al., 2022; Cheng et al., 2021), we conduct orthogonalization on these randomly selected directions via the Gram-Schmidt procedure. As for the ES algorithm (e.g., the one applied in Salimans et al., 2017), we apply the same $n$ , $\\lambda$ and $\\{u_i\\}_{i=1}^n$ in RGF and PRGF for their update in every iteration.", + "bbox": [ + 169, + 439, + 823, + 537 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Domain Transformation. Following the practice that has been used in (Eriksson et al., 2019), for all our experiments, we firstly re-scale the input domains into $[0,10]^d$ to ease the optimization and then re-scale the updated inputs back to the original domains for querying.", + "bbox": [ + 169, + 551, + 823, + 597 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.2 SYNTHETIC EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 611, + 405, + 626 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Let input $\\pmb{x} = [x_i]_{i=1}^d$ , the Ackley and Levy function applied in our synthetic experiments are given below,", + "bbox": [ + 169, + 637, + 823, + 665 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} f (\\pmb {x}) = - 2 0 \\exp \\left(- 0. 2 \\sqrt {\\frac {1}{d} \\sum_ {i = 1} ^ {d} x _ {i} ^ {2}}\\right) - \\exp (\\frac {1}{d} \\sum_ {i = 1} ^ {d} \\cos (2 \\pi x _ {i})) + 2 0 + \\exp (1), (\\mathrm {A c k l e y}) \\\\ f (\\boldsymbol {x}) = \\sin^ {2} \\left(\\pi w _ {1}\\right) + \\sum_ {i = 1} ^ {d - 1} \\left(w _ {i} - 1\\right) ^ {2} \\left[ 1 + 1 0 \\sin^ {2} \\left(\\pi w _ {i} + 1\\right) \\right] + \\left(w _ {d} - 1\\right) ^ {2} \\left[ 1 + \\sin^ {2} \\left(2 \\pi w _ {d}\\right) \\right] (\\text {L e v y}) \\tag {56} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 672, + 833, + 777 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $w_{i} = 1 + (x_{i} - 1) / 4$ for any $i = 1, \\dots, d$ , Ackley function achieves its minimum (i.e., $\\min f(\\pmb{x}) = 0$ ) at $\\pmb{x}^{*} = \\mathbf{0}$ , and Levy function achieves its minimum (i.e., $\\min f(\\pmb{x}) = 0$ ) at $\\pmb{x}^{*} = \\mathbf{1}$ . Note that the Ackley and Levy function for the synthetic experiments in our Sec. 5.2 are defined within the domain $[-20, 20]^d$ and $[-7.5, 7.5]^d$ , respectively. To give a better understanding of these two synthetic functions, we provide a 3D illustration of these two synthetic functions with $d = 2$ in our Fig. 5. As shown in Fig. 5, these two synthetic functions are highly nonconvex and therefore have local minimums within their domains.", + "bbox": [ + 169, + 777, + 826, + 875 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To compare our ZoRD algorithm with other ZO/FO optimization baselines in Sec. 5.2, we firstly employ TuRBO of 300 queries to find a good initialization for all other ZO/FO optimization algorithms in Fig. 3 because of the nonconvexity of these two synthetic functions as shown in Fig. 5. We then", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/b0e0d1d63258389a9f2e3355c68706e372600cd18875473bfd73884f8529d368.jpg", + "image_caption": [ + "(a) Ackley function $(d = 2)$" + ], + "image_footnote": [], + "bbox": [ + 202, + 127, + 478, + 289 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/c9ee5ee5c05e61235de5bd32e3a6c3a0fd4c99fefd40dfd5cadec77b30de8e1a.jpg", + "image_caption": [ + "(b) Levy function $(d = 2)$", + "Figure 5: The 3D illustration of Ackley and Levy synthetic function with $d = 2$ ." + ], + "image_footnote": [], + "bbox": [ + 496, + 125, + 782, + 290 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "apply these ZO/FO optimization algorithms with a query budget of 200 for $d = 20,40$ , and a query budget of 400 for $d = 100$ to compare their query efficiency. We use the same Adam optimizer (Kingma and Ba, 2015) with a learning rate of 0.1 and exponential decay rates of 0.9, 0.999 for RGF, PRGF, GD, and our ZoRD algorithm, for faster convergence compared with standard GD.", + "bbox": [ + 169, + 364, + 826, + 421 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.3 BLACK-BOX ADVERSARIAL ATTACK", + "text_level": 1, + "bbox": [ + 171, + 436, + 473, + 450 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For the black-box adversarial attack experiment on the MNIST dataset, we use the same fully trained deep neural networks from (Cheng et al., 2021) and adopt a $L_{\\infty}$ constraint of $\\| x\\|_{\\infty} \\leq 0.3$ on the input perturbation $x$ . For the black-box adversarial attack experiment on the CIFAR-10 dataset, we fully train a ResNet-18 (He et al., 2016) on CIFAR-10 using stochastic gradient descend (SGD) with a cosine annealed learning rate from 0.1 to 0, a momentum of 0.9 and a weight decay of $5 \\times 10^{-4}$ for 200 epochs, and adopt a $L_{\\infty}$ constraint of $\\| x\\|_{\\infty} \\leq 0.2$ on the input perturbation $x$ . Note that we use the same loss function as (Cheng et al., 2021) for these two experiments. Meanwhile, to apply RGF, PRGF and our ZoRD, we adopt Adam optimizer with the same learning rate of 0.5 and the same exponential decay rates of 0.9, 0.999.", + "bbox": [ + 169, + 462, + 826, + 589 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.4 NON-DIFFERENTIABLE METRIC OPTIMIZATION", + "text_level": 1, + "bbox": [ + 171, + 604, + 547, + 618 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The Covertype dataset used in Sec. 5.4 is a classification dataset consisting of 581,012 samples from 7 different categories. Each sample from this dataset is a 54-dimensional vector of integers. In this experiment, we randomly split the dataset into training and test sets with each containing 290,506 samples. The MLP classifier applied in Sec. 5.4 consists of 2 layers with 30 and 14 hidden neurons respectively, leading to 2189 parameters in total (i.e., $d = 2189$ ). We first train this MLP classifier on the training dataset of Covertype using the L-BFGS algorithm with the cross-entropy loss function for 300 epochs, and then apply ZO optimization algorithms to fine-tune our trained MLP directly on the non-differentiable metrics (i.e., using these metrics as the new loss functions), including precision, recall, F1 score and Jaccard index. To obtain the results of ES, RGF, PRGF and our ZoRD algorithm in Sec. 5.4, we apply the same Adam optimizer with a learning rate of 0.2 (for precision and recall) or 0.01 (for F1 score and Jaccard index) and exponential decay rates of 0.9, 0.999. Note that standard BO algorithms (including TuRBO) fail to achieve any percentage improvements (i.e., achieving $0\\%$ in the $y$ -axis of Fig. 4) in this experiment according to our five independent runs, which is likely due to their aggressive exploration in the input domain of such a high dimension. In light of this, we do not include them in our comparison since all other methods are able to achieve certain improvements.", + "bbox": [ + 169, + 630, + 826, + 840 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.5 DERIVATIVE-FREE REINFORCEMENT LEARNING", + "text_level": 1, + "bbox": [ + 171, + 856, + 552, + 869 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Our derivative-free RL experiments aim to learn controllers (which outputs policies) that maximize the rewards/return for several environments in the OpenAI Gym (Brockman et al., 2016) without using true derivatives. Specifically, we need to optimize the parameters (i.e., $\\pmb{x}$ ) of our neural network", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/862b4420dc1d70ae57163d5472677955d97a1976dc25b374264b20bdb31e12b3.jpg", + "table_caption": [ + "Table 2: OpenAI Gym environment properties and their respective network dimensions." + ], + "table_footnote": [], + "table_body": "
AcrobotSwimmerLunarBipedalWalkerWalker2DHalfCheetah
|S|688241717
|A|324466
d213222244404356356
", + "bbox": [ + 207, + 126, + 787, + 200 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/5a7d1bbf305590bb2af7e8bba9a225b7fd0ad19266e15b18fe05afaa121eb7d6.jpg", + "image_caption": [ + "(a) Results under various input dimension $d$ and fixed Matérn $(\\nu = 2.5)$" + ], + "image_footnote": [], + "bbox": [ + 178, + 222, + 336, + 324 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e4db9ac701ea0d5db4b1bfa995d0e6745c35bf8b97f84fd737365695a804f4fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 222, + 496, + 324 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/a4b6a796b2aa7640a090a6f445bfc96f611ca27f230047a4e08bfcc586707cd4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 222, + 653, + 324 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/144518b2e441f6af2314bb12e4117add41f33fa6303d6a9ae7ed7f4fabe6bca1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 653, + 222, + 818, + 324 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/7a7ee12346d2472c051f97e3d677997abe714ac9b5f0ebbd49eb7b00d81a613c.jpg", + "image_caption": [ + "(b) Results under various kernels and fixed input dimension $d = 80$" + ], + "image_footnote": [], + "bbox": [ + 178, + 349, + 336, + 450 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/812e9f1fba703004483cd973f28051ce4a725aa19248f8a12cf2fdfb4837a789.jpg", + "image_caption": [ + "Figure 6: Comparison of the derivative estimation errors of our derived GP-based estimator (GP) and the FD estimator under various input dimensions and kernels. Similarly, each result is reported with the mean $\\pm$ standard error from five independent runs." + ], + "image_footnote": [], + "bbox": [ + 336, + 349, + 496, + 450 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/a52706ab4ce98f071ef857a5fc8f1a78ec831d2c37c97d0c67c3318e2152720b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 349, + 653, + 450 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/14c28dc5df8f111e0503498123550176d4b3695b56b3122a7b23680df813e4bd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 653, + 349, + 818, + 450 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "(MLP) controller with 2 hidden layers, where each hidden layer has 10 hidden neurons and one bias term. We adopt a $L_{\\infty}$ constraint of $\\| x \\|_{\\infty} \\leq 1$ on the parameters $x$ . We use a softmax output layer for the policies that deal with discrete action spaces, and a tanh output layer for the policies that deal with continuous action spaces. The dimension of neural network parameters (represented as a column vector) $d$ is determined by the dimensions of both the observation $|S|$ and the action space $|A|$ of an environment, as detailed in Tab. 2.", + "bbox": [ + 169, + 555, + 823, + 638 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In order to search for policies that are robust to different random state initializations, we use the vectorized API of OpenAI Gym, and our observed function value $y(\\pmb{x})$ given the network parameters $\\pmb{x}$ is an averaged return of 32 parallel environments. We also fix the seed of OpenAI Gym for all queries, which ensures that we are evaluating on a fixed set of 32 state initializations and that our results can be reproduced. We first initialize a sample of 500 points from a Latin Hypercube (McKay et al., 1979) to find a good initial input, and then proceed to apply ZO optimization algorithms (i.e., ES, RGF, PRGF, and our ZoRD) with the same query budget of 1000 on this initial input. For all these ZO optimization algorithms, we employ the same Adam optimizer with a learning rate of 1.0 and exponential decay rates of 0.9, 0.999. Considering the prohibitive noise in RL experiments, we use 300 queries from the optimization trajectory that has the smallest Euclidean distance with an input needing to be updated. Of note, we conduct 10 trials in total where each trial differs from each other by both the OpenAI Gym seed and the Latin Hypercube initializations.", + "bbox": [ + 169, + 646, + 826, + 813 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "APPENDIX D MORE RESULTS", + "text_level": 1, + "bbox": [ + 171, + 835, + 437, + 851 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D.1 MORE RESULTS ON DERIVATIVE ESTIMATION", + "text_level": 1, + "bbox": [ + 171, + 869, + 537, + 883 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Besides the comparison in Fig. 2, we provide additional comparison between our derived GP-based estimator (6) and the FD estimator (2) under various input dimensions in Fig. 6(a) and various kernels", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/506bd4061210fea2f99ae60d4e95c5529075217a130b04aba70996c846bf56fe.jpg", + "image_caption": [ + "(a) Ackley $(d = 40)$" + ], + "image_footnote": [], + "bbox": [ + 212, + 104, + 464, + 258 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/3085ded5b3b99d83f3c343bb1f239baf5b01ccacfaac1f7ed929118e49db7035.jpg", + "image_caption": [ + "(b) Levy $(d = 40)$", + "Figure 7: Comparison of our ZoRD algorithm using different confidence thresholds $c$ for its dynamic virtual updates, where the $x$ -axis and the $y$ -axis denote the number of function queries and the log-scaled optimality gap (i.e., $\\log (f(\\boldsymbol{x}_T) - f(\\boldsymbol{x}^*))$ ) achieved with this number of queries, respectively." + ], + "image_footnote": [], + "bbox": [ + 511, + 104, + 774, + 258 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "in Fig. 6(b) using the Ackley function. We adopt the same setting in Sec. 5.2. Interestingly, Fig. 6(a)(b) show that under various input dimensions and GP kernels, our derived GP-based estimator (6) is still able to achieve faster reduction rates compared with the FD estimator. Of note, all the function queries applied in our derived GP-based estimator is from the optimization trajectory whereas the FD estimator requires additional function queries for its derivative estimation. So, Fig. 6(a)(b) also show that our derived GP method is still able to achieve improved query efficiency for accurate derivative estimation than FD method under various input dimensions and GP kernels because our method avoids the requirement of additional queries for derivative estimation. Interestingly, the objective function (i.e., the Ackley function) is not truly sampled from the GPs based on these kernels. This therefore means that though we have assumed that we need the prior knowledge about the GP in which the objective function is sampled from (Sec. 2.1), such an assumption does not really need to be satisfied for our derived GP-based method to achieve accurate derivative estimation in practice. More interestingly, we notice that Matérn( $\\nu = 0.5$ ) and SE kernel will achieve slightly worse derivative estimation, indicating that the choice of GP kernels may impact the quality of our derived GP-based derivative estimation. However, in practice, our derived GP method based on Matérn( $\\nu = 2.5$ ) kernel, which has been widely adopted in our experiments, is already able to provide us with good derivative estimation for ZO optimization as confirmed by the results in our other experiments.", + "bbox": [ + 169, + 371, + 826, + 609 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D.2 MORE RESULTS ON SYNTHETIC EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 636, + 545, + 648 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In this section, we compare ZoRD with more baselines in Fig. 8. Notably, we mainly compare our ZoRD with CobBO (based on the code implementation provided by (Tan et al., 2021)) since CobBO generally performs better than other baselines, e.g., TPE, ATPE, and BADS according to (Tan et al., 2021). As shown in the results in Fig. 8, our ZoRD algorithm is still able to outperform the other benchmark BO algorithm (i.e., CobBO).", + "bbox": [ + 169, + 666, + 826, + 736 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We then investigate the impacts of the dynamic virtual updates (Sec. 3.2) on our ZoRD algorithm. In particular, we apply the same setting in Appx. C.2 to optimize the Ackley and Levy function with $d = 40$ under various confidence thresholds $c$ for our dynamic virtual updates. Fig. 7 illustrates the results. As shown in both Fig. 7(a) and (b), our ZoRD algorithm using the technique of dynamic virtual updates (i.e., $c > 0$ ) can consistently achieve improved query efficiency compared with the one not using the technique of dynamic virtual updates (i.e., $c = 0$ ). This indicates the essence of dynamic virtual updates in helping improve the query efficiency of our ZoRD algorithm. Such a result actually corroborates our theoretical insights about virtual updates (Sec. 4.2). Remarkably, our ZoRD algorithm without the technique of dynamic virtual updates (i.e., $c = 0$ ) is still able to achieve both improved query efficiency and better converged performance compared with RGF and PRGF, which further verifies the superiority of our derived GP-based derivative estimation. More interestingly, both Fig. 7(a) and Fig. 7(b) have verified that there indeed exists a trade-off for the confidence threshold $c$ as we have discussed in Sec. 3.2: The confidence threshold $c$ can not be overly", + "bbox": [ + 169, + 743, + 826, + 924 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/3a5ae77dfc64928502d64988d9d7ada9bb748fe47df977f62e176bb212458df3.jpg", + "image_caption": [ + "(a) Ackley $(d = 20)$" + ], + "image_footnote": [], + "bbox": [ + 171, + 97, + 326, + 200 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/9dcc1dcc291916189e66fc43dd79d16af5ecf25b28883a1a34657e40834df2cf.jpg", + "image_caption": [ + "(b) Ackley $(d = 40)$" + ], + "image_footnote": [], + "bbox": [ + 333, + 98, + 488, + 200 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/44df5be0540cd5955577d9bb1ffcfc8ba342b6e05aa5f38561601fd685f8651b.jpg", + "image_caption": [ + "(c) Levy $(d = 40)$" + ], + "image_footnote": [], + "bbox": [ + 493, + 97, + 656, + 200 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/d00e1b941466c717ceb8cb1548c626398cb1884a8f1d10dd7737dd01e6fa80cd.jpg", + "image_caption": [ + "(d) Levy $(d = 100)$" + ], + "image_footnote": [], + "bbox": [ + 661, + 97, + 823, + 200 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7c2290e275f975cb6222affe436898d17018e27c59f33dc602abe96897dd05b6.jpg", + "image_caption": [ + "Figure 8: Additional comparison between our ZoRD and other baselines. The $x$ -axis and $y$ -axis denote the number of queries and log-scaled optimality gap (i.e., $\\log(f(x_T) - f(x^*))$ ) achieved after this number of queries. Each curve is the mean $\\pm$ standard error from ten independent runs.", + "(a) Success rate on MNIST" + ], + "image_footnote": [], + "bbox": [ + 214, + 295, + 464, + 452 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/3c4d39e576279a3ca3ed2be316b8d745d19a68c19d0f7abc0e89f0a7723b1731.jpg", + "image_caption": [ + "(b) Success rate on CIFAR-10", + "Figure 9: Comparison of the success rate achieved by various ZO optimization algorithms on the 15 images selected from MNIST and CIFAFR-10 dataset. Note that the $x$ -axis and the $y$ -axis denote the number of queries and the success rate (within the range of [0, 1]) achieved after this number of queries, respectively." + ], + "image_footnote": [], + "bbox": [ + 514, + 296, + 764, + 453 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "small or excessively large in order to achieve the best query efficiency of our ZoRD algorithm, e.g., $c = 0.3$ for Ackley ( $d = 40$ ) and $c = 0.4$ for Levy ( $d = 40$ ).", + "bbox": [ + 169, + 571, + 823, + 599 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "D.3 MORE RESULTS ON BLACK-BOX ADVERSARIAL ATTACK", + "text_level": 1, + "bbox": [ + 171, + 618, + 612, + 632 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Besides the comparison in our Sec. 5.3, we also compare the success rate achieved by different ZO optimization algorithms on the 15 images selected from MNIST or CIFAR-10 in Fig. 9. Note that we adopt the same settings in Appx. C.3 for this comparison. Considering the large computational complexity of TuRBO-1/10 algorithm for hard-to-attack images3 which is usually undesirable in practice, we drop the comparison with them in this experiment. Fig. 9 shows that under the same query budget, our ZoRD algorithm is able to achieve considerably improved success rate over other ZO optimization algorithms. These results therefore further support the superior query efficiency of our ZoRD algorithm in real-world challenging problems.", + "bbox": [ + 169, + 645, + 826, + 758 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "D.4 MORE RESULTS FOR DERIVATIVE-FREE REINFORCEMENT LEARNING", + "text_level": 1, + "bbox": [ + 171, + 775, + 699, + 789 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Recent years have also witnessed a surging interest in derivative-free reinforcement learning (Salimans et al., 2017; Qian and Yu, 2021), where ZO optimization algorithms are widely applied. In light of this, we also demonstrate the superiority of our ZoRD algorithm in the problem of derivative-free reinforcement learning. Specifically, we adopt the setting in Sec. C.5 to experiment in different RL environments. Tab. 3 summarizes the comparison among different ZO optimization algorithms under", + "bbox": [ + 169, + 801, + 823, + 872 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_footnote", + "text": "$^3$ Bayesian optimization algorithms, including TuRBO-1/10, are widely known to suffer from the prohibitive computational complexity when they need a large number of function queries for optimization, e.g., $T > 1000$ (Rasmussen and Williams, 2006).", + "bbox": [ + 169, + 883, + 823, + 922 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/750a762a54b247101c04fc2cb442cab47fc6f81ba92e9ab9fb4ad6ba03cef77a.jpg", + "table_caption": [ + "Table 3: Comparison of the rewards (larger is better) achieved by various ZO optimization algorithms in different RL environments. Each result is reported with the mean ± standard deviation from ten independent runs." + ], + "table_footnote": [], + "table_body": "
AlgorithmAcrobotSwimmerLunarBipedalWalkerWalker2DHalfCheetah
ES-86.2±11.0176.0±56.8-94.7±24.4-34.7±27.3340.4±143.01042.4±753.9
RGF-83.0±5.6213.2±65.1-93.8±19.1-30.3±40.3368.4±223.11129.3±748.5
PRGF-86.3±9.9218.6±66.2-100.1±16.0-29.9±35.2344.6±152.31083.3±722.2
ZoRD-73.3±2.4280.5±77.6-45.1±38.312.9±37.8729.1±304.21950.5±576.1
", + "bbox": [ + 174, + 154, + 823, + 234 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "the same query budget of 1000. As BO algorithms usually suffer from the prohibitive computational complexity for a large $T$ (Rasmussen and Williams, 2006) and GLD has never been applied in RL, we mainly compare our ZoRD algorithm with ES, RGF and PRGF, which also belongs to the same type of ZO optimization algorithm: GD with estimated derivative. Remarkably, Tab. 3 shows that under the same query budget, our ZoRD algorithm can consistently enjoy improved performance (i.e., highest rewards) than the other ZO optimization algorithms in different RL environments. This further supports the superiority of our ZoRD algorithm to other FD-based ZO optimization algorithms.", + "bbox": [ + 169, + 260, + 826, + 363 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "APPENDIX E DISCUSSIONS", + "text_level": 1, + "bbox": [ + 171, + 380, + 416, + 396 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "E.1 ZORD VS. FD-BASED ZO OPTIMIZATION", + "text_level": 1, + "bbox": [ + 171, + 412, + 508, + 428 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Of note, the novelty of our work in fact lies in its way of exploiting the GP assumption to help design an improved derivative estimation and hence an improved ZO optimization algorithm, which to the best of our knowledge has not been explored theoretically yet in the field of ZO optimization via GD with estimated derivative. That is, at this moment, it is still not known in the literature how existing FD methods can utilize such an assumption to achieve better derivative estimation (i.e., their derivative estimation quality will remain the same), even when they make the same assumption as us. In light of this, the comparison between our derived GP method and the FD method in Sec. 4 is not only necessary but also meaningful to show the advantage of exploiting such an assumption in ZO derivative estimation. Importantly, our empirical results further show that such an assumption is in fact not restrictive for our ZoRD to achieve compelling performance in practice. For example, our Fig. 2 and Fig. 6 have shown that our derived GP-based method is able to achieve smaller derivative estimation error than the FD method when the objective functions are not designed to be sampled from a GP with the kernel that we had applied for our derivative estimation. Moreover, the results in our Sec. 5.2, 5.3, 5.4 have shown that our ZoRD is capable of achieving competitive optimization performance for real-world optimization problems where the objective functions are also not designed to be sampled from a GP with the kernel that we had used for our ZoRD.", + "bbox": [ + 169, + 439, + 826, + 662 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Meanwhile, the theoretical challenges of our work lie in the theoretical guarantee on the derivative estimation error of our unique derived GP-based method for any input in the domain as well as the convergence analysis based on such a unique derivative estimation, which to the best of our knowledge have not been studied in the literature. This means that our Thm. 1 and Thm. 2 have provided new developments in the analysis of gradient estimation error and our Thm. 3 will be the first convergence result for GD using our unique derivative estimation method. Interestingly, the bound in our Thm. 3 also improves over the standard ones from (Nesterov and Spokoiny, 2017; Liu et al., 2018b) in several aspects, as discussed in our Sec. 4.2.", + "bbox": [ + 169, + 669, + 826, + 782 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "E.2 ZORD vs. BO", + "text_level": 1, + "bbox": [ + 171, + 799, + 320, + 813 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Our ZoRD algorithm and standard BO algorithms (e.g., GP-UCB) have in fact applied the same GP assumption for their algorithm design. That is, however, where the similarity ends. Of note, our ZoRD exploits such an assumption to derive a specific GP (i.e., (4)) for derivative estimation, which is then employed for local exploitation via (projected) GD update. In contrast, BO algorithms utilize such an assumption to construct their acquisition functions for a global optimization that can trade off between exploitation and exploration. In practice, the exploration of BO algorithms is usually query-inefficient, especially for problems with high-dimensional input spaces, and therefore GD with", + "bbox": [ + 169, + 825, + 826, + 926 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/c2355c25734b157f01bf391f9fb3aac55ade306cbdf035b34d06cb49f0588384.jpg", + "image_caption": [ + "Figure 10: Comparison of local derivative estimation (in the input domain of [0, 3]) in our ZoRD and global function approximation (in the input domain of $[-6, 6]$ ) in BO under various number of random function queries." + ], + "image_footnote": [], + "bbox": [ + 176, + 104, + 823, + 333 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "estimated derivatives (especially our ZoRD) is preferred to realize better optimization performances in these problems (see our Sec. 5.2). So, our ZoRD and BO algorithms belong to two different types of ZO optimization algorithms (i.e., GD-type vs. BO-type), where their theoretical analyses are in fact not comparable. In particular, GD-type and BO-type ZO optimization algorithms apply different metrics for their theoretical analyses, e.g., the derivative estimation error as well as the convergence to a stationary point (in the nonconvex case) for GD-type ZO optimization algorithms vs. the global asymptotic convergence in terms of the regret for BO-type ZO optimization algorithms. So, it is more reasonable to compare the theory (including the theoretical challenge, the new developments, and the novelty of the convergence result) of our ZoRD with other GD-type ZO optimization algorithms, e.g., the ones using FD methods for their derivative estimation (Nesterov and Spokoiny, 2017; Liu et al., 2018b), as what we have discussed in Sec. E.1.", + "bbox": [ + 169, + 431, + 826, + 585 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "In addition, in contrast to using the GP to model the objective function within the entire domain for global exploration in BO, our derived GP in ZoRD will be applied to estimate the derivative of the objective function for local exploitation by GD as shown in Sec. 3.1. As GD typically optimizes in a local region, our derived GP only needs to estimate the derivative locally, which is known to be much simpler than modeling the objective function within the entire domain in BO especially for objective functions in high-dimensional input spaces. In light of this, the derived GP for derivative estimation (4) in our ZoRD algorithm advances the standard GP in BO in the following aspects:", + "bbox": [ + 169, + 592, + 826, + 691 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "1. Improved Query Efficiency for Estimation. The derived GP in our ZoRD algorithm requires fewer function queries to provide accurate derivative estimation. We provide a visual example in Fig. 10, in which we sample a one-dimensional function $f$ from a GP prior $\\mathcal{GP}(0,k(x,x))$ using the standard SE kernel and then randomly select the same number of queries from the input domain of $[-6,6]$ and $[0,3]$ for standard GP and our derived GP, respectively. As illustrated in Fig. 10, function in a local region (i.e., $x \\in [0,3]$ ) is usually smoother than its counterpart in the entire domain (i.e., $x \\in [-6,6]$ ). As a result, with only 4 function queries, our derived GP can already provide accurate estimation to the derivative of this objective function whereas standard GP requires more than 8 function queries to model this objective function accurately in the entire domain.", + "bbox": [ + 210, + 708, + 826, + 849 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "2. Reduced Computational Complexity. Comparing (3) and (5), both the derived GP for derivative estimation in our ZoRD algorithm and the standard GP in BO enjoy a computational complexity of $\\mathcal{O}(n^3)$ with $n$ function queries. However, as a consequence of the improved query efficiency of our derived GP, it is able to require fewer function queries (i.e.,", + "bbox": [ + 210, + 867, + 828, + 926 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "smaller $n$ ) for accurate derivative estimation and hence can enjoy a reduced computational complexity in practice especially when a large number of queries (e.g., $n > 1000$ ) are applied to the standard GP in BO.", + "bbox": [ + 228, + 103, + 826, + 147 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_footnote", + "text": "As introduced in our Appx. C, 150 function queries for our derived GP can already help our ZoRD algorithm to achieve remarkable results in practice (refer to the experiments in our Sec. 5).", + "bbox": [ + 171, + 896, + 823, + 924 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + } +] \ No newline at end of file diff --git a/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_model.json b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6ef2aeef13fb7cf0e88942c3c76a50dd567ee396 --- /dev/null +++ b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_model.json @@ -0,0 +1,5050 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.83, + 0.147 + ], + "angle": 0, + "content": "ZEROTH-ORDER OPTIMIZATION WITH TRAJECTORYINFORMED DERIVATIVE ESTIMATION" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.171, + 0.581, + 0.185 + ], + "angle": 0, + "content": "Yao Shu*, Zhongxiang Dai*, Weicong Sng, Arun Verma," + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.185, + 0.747, + 0.214 + ], + "angle": 0, + "content": "Dept. of Computer Science, National University of Singapore, Republic of Singapore {shuyao, daizhongxiang, sngweicong, arun}@comp.nus.edu.sg" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.233, + 0.49, + 0.248 + ], + "angle": 0, + "content": "Patrick Jaillet† & Bryan Kian Hsiang Low§" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.248, + 0.629, + 0.263 + ], + "angle": 0, + "content": "Dept. of Electrical Engineering and Computer Science, MIT, USA†" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.263, + 0.75, + 0.279 + ], + "angle": 0, + "content": "Dept. of Computer Science, National University of Singapore, Republic of Singapore" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.279, + 0.55, + 0.293 + ], + "angle": 0, + "content": "jaillet@mit.edu, lowkh@comp.nus.edu.sg" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.329, + 0.548, + 0.344 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.363, + 0.77, + 0.629 + ], + "angle": 0, + "content": "Zeroth-order (ZO) optimization, in which the derivative is unavailable, has recently succeeded in many important machine learning applications. Existing algorithms rely on finite difference (FD) methods for derivative estimation and gradient descent (GD)-based approaches for optimization. However, these algorithms suffer from query inefficiency because many additional function queries are required for derivative estimation in their every GD update, which typically hinders their deployment in real-world applications where every function query is expensive. To this end, we propose a trajectory-informed derivative estimation method which only employs the optimization trajectory (i.e., the history of function queries during optimization) and hence can eliminate the need for additional function queries to estimate a derivative. Moreover, based on our derivative estimation, we propose the technique of dynamic virtual updates, which allows us to reliably perform multiple steps of GD updates without reapplying derivative estimation. Based on these two contributions, we introduce the zeroth-order optimization with trajectory-informed derivative estimation (ZoRD) algorithm for query-efficient ZO optimization. We theoretically demonstrate that our trajectory-informed derivative estimation and our ZoRD algorithm improve over existing approaches, which is then supported by our real-world experiments such as black-box adversarial attack, non-differentiable metric optimization, and derivative-free reinforcement learning." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.663, + 0.338, + 0.679 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.698, + 0.828, + 0.895 + ], + "angle": 0, + "content": "Zeroth-order (ZO) optimization, in which the objective function to be optimized is only accessible by querying, has received great attention in recent years due to its success in many applications, e.g., black-box adversarial attack (Ru et al., 2020), non-differentiable metric optimization (Hiranandani et al., 2021), and derivative-free reinforcement learning (Salimans et al., 2017). In these problems, the derivative of objective function is either prohibitively costly to obtain or even non-existent, making it infeasible to directly apply standard derivative-based algorithms such as gradient descent (GD). In this regard, existing works have proposed to estimate the derivative using the finite difference (FD) methods and then apply GD-based algorithms using the estimated derivative for ZO optimization (Nesterov and Spokoiny, 2017; Cheng et al., 2021). These algorithms, which we refer to as \\( GD \\) with estimated derivatives, have been the most widely applied approach to ZO optimization especially for problems with high-dimensional input spaces, because of their theoretically guaranteed convergence and competitive practical performance. Unfortunately, these algorithms suffer from query inefficiency, which hinders their real-world deployment especially in applications with expensive-to-query objective functions, e.g., black-box adversarial attack." + }, + { + "type": "page_footnote", + "bbox": [ + 0.2, + 0.911, + 0.329, + 0.925 + ], + "angle": 0, + "content": "* Equal contribution." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.961 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.342 + ], + "angle": 0, + "content": "Specifically, one of the reasons for the query inefficiency of existing algorithms on GD with estimated derivatives is that in addition to the necessary queries (i.e., the query of every updated input)1, the FD methods applied in these algorithms require a large number of additional queries to accurately estimate the derivative at an input (Berahas et al., 2022). This naturally begs the question: Can we estimate a derivative without any additional query? A natural approach to achieve this is to leverage the optimization trajectory, which is inherently available as a result of the necessary queries and their observations, to predict the derivatives. However, this requires a non-trivial method to simultaneously \\((a)\\) predict a derivative using only the optimization trajectory (i.e., the history of updated inputs and their observations), and \\((b)\\) quantify the uncertainty of this prediction to avoid using inaccurate predicted derivatives. Interestingly, the Gaussian process (GP) model satisfies both requirements and is hence a natural choice for such a derivative estimation. Specifically, under the commonly used assumption that the objective function is sampled from a GP (Srinivas et al., 2010), the derivative at any input in the domain follows a Gaussian distribution which, surprisingly, can be calculated using only the optimization trajectory. This allows us to \\((a)\\) employ the mean of this Gaussian distribution as the estimated derivative, and \\((b)\\) use the covariance matrix of this Gaussian distribution to obtain a principled measure of the predictive uncertainty and the accuracy of this derivative estimation, which together constitute our trajectory-informed derivative estimation (Sec. 3.1)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.347, + 0.828, + 0.515 + ], + "angle": 0, + "content": "Another reason for the query inefficiency of the existing algorithms on GD with estimated derivatives is that every update in these algorithms requires reapplying derivative estimation and hence necessitates additional queries. This can preclude their adoption of a large number of GD updates since every update requires potentially expensive additional queries. Therefore, another question arises: Can we perform multiple GD updates without reapplying derivative estimation and hence without any additional query? To address this question, we propose a technique named dynamic virtual updates (Sec. 3.2). Specifically, thanks to the ability of our method to estimate the derivative at any input in the domain while only using existing optimization trajectory, we can apply multi-step GD updates without the need to reapply derivative estimation and hence without requiring any new query. Moreover, we can dynamically determine the number of steps for these updates by inspecting the aforementioned predictive uncertainty at every step, such that we only perform an update if the uncertainty is small enough (which also indicates that the estimation error is small, see Sec. 4.1)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.521, + 0.829, + 0.689 + ], + "angle": 0, + "content": "By incorporating our aforementioned trajectory-informed derivative estimation and dynamic virtual updates into GD-based algorithms, we then introduce the zeroth-order optimization with trajectory-informed derivative estimation (ZoRD) algorithm for query-efficient ZO optimization. We theoretically bound the estimation error of our trajectory-informed derivative estimation and show that this estimation error is non-increasing in the entire domain as the number of queries is increased and can even be exponentially decreasing in some scenarios (Sec. 4.1). Based on this, we prove the convergence of our ZoRD algorithm, which improves over the existing ZO optimization algorithms that rely on the FD methods for derivative estimation (Sec. 4.2). Lastly, we use extensive experiments, such as black-box adversarial attack, non-differentiable metric optimization, and derivative-free reinforcement learning, to demonstrate that \\((a)\\) our trajectory-informed derivative estimation improves over the existing FD methods and that \\((b)\\) our ZoRD algorithm consistently achieves improved query efficiency compared with previous ZO optimization algorithms (Sec. 5)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.709, + 0.342, + 0.725 + ], + "angle": 0, + "content": "2 PRELIMINARIES" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.74, + 0.334, + 0.755 + ], + "angle": 0, + "content": "2.1 PROBLEM SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.767, + 0.827, + 0.81 + ], + "angle": 0, + "content": "Throughout this paper, we use \\(\\nabla\\) and \\(\\partial_{\\pmb{x}}\\) to denote, respectively, the total derivative (i.e., gradient) and partial derivative w.r.t the variable \\(\\pmb{x}\\). We consider the minimization of a black-box objective function \\(f:\\mathcal{X}\\to \\mathbb{R}\\), in which \\(\\mathcal{X}\\subset \\mathbb{R}^d\\) is a convex subset of the \\(d\\)-dimensional domain:" + }, + { + "type": "equation", + "bbox": [ + 0.46, + 0.818, + 0.825, + 0.841 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\boldsymbol {x} \\in \\mathcal {X}} f (\\boldsymbol {x}). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.844, + 0.828, + 0.888 + ], + "angle": 0, + "content": "Since we consider ZO optimization, the derivative information is not accessible and instead, we are only allowed to query the inputs in \\(\\mathcal{X}\\). For every queried input \\(\\pmb{x} \\in \\mathcal{X}\\), we observe a corresponding noisy output of \\(y(\\pmb{x}) = f(\\pmb{x}) + \\zeta\\), in which \\(\\zeta\\) is a zero-mean Gaussian noise with a variance of \\(\\sigma^2\\):" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In practice, it is usually necessary to query every updated input to measure the optimization performance and select the best-performing input. We refer to these queries as necessary queries." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.172, + 0.099, + 0.44, + 0.129 + ], + "angle": 0, + "content": "Algorithm 1: Standard (Projected) GD with Estimated Derivatives" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.131, + 0.46, + 0.187 + ], + "angle": 0, + "content": "1: Input: Objective function \\( f: \\mathcal{X} \\to \\mathbb{R} \\), initialization \\( \\boldsymbol{x}_0 \\), iteration number \\( T \\), learning rates \\( \\{\\eta_t\\}_{t=1}^T \\), projection function \\( \\mathcal{P}_{\\mathcal{X}}(\\boldsymbol{x}) \\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.182, + 0.187, + 0.394, + 0.199 + ], + "angle": 0, + "content": "2: for iteration \\(t = 1,\\dots ,T\\) do" + }, + { + "type": "algorithm", + "bbox": [ + 0.182, + 0.2, + 0.422, + 0.215 + ], + "angle": 0, + "content": "3: \\(g(\\pmb{x}_{t - 1})\\approx \\nabla f(\\pmb{x}_{t - 1})\\) with (2)" + }, + { + "type": "algorithm", + "bbox": [ + 0.182, + 0.215, + 0.438, + 0.229 + ], + "angle": 0, + "content": "4: \\(\\pmb{x}_t\\gets \\mathcal{P}_{\\mathcal{X}}\\left(\\pmb{x}_{t - 1} - \\eta_{t - 1}g(\\pmb{x}_{t - 1})\\right)\\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.182, + 0.229, + 0.377, + 0.243 + ], + "angle": 0, + "content": "5: Query \\( \\pmb{x}_t \\) to yield \\( y(\\pmb{x}_t) \\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.182, + 0.243, + 0.255, + 0.254 + ], + "angle": 0, + "content": "6: end for" + }, + { + "type": "algorithm", + "bbox": [ + 0.182, + 0.256, + 0.372, + 0.272 + ], + "angle": 0, + "content": "7: Return arg \\(\\min_{\\pmb{x}_{1:T}} y(\\pmb{x})\\)" + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.187, + 0.438, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "code_caption", + "bbox": [ + 0.462, + 0.1, + 0.65, + 0.116 + ], + "angle": 0, + "content": "Algorithm 2: ZORD (Ours)" + }, + { + "type": "algorithm", + "bbox": [ + 0.468, + 0.118, + 0.827, + 0.147 + ], + "angle": 0, + "content": "1: Input: In addition to the parameters in Algo. 1, set the steps of virtual updates \\(\\{V_t\\}_{t=1}^T\\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.469, + 0.147, + 0.681, + 0.159 + ], + "angle": 0, + "content": "2: for iteration \\(t = 1,\\dots ,T\\) do" + }, + { + "type": "algorithm", + "bbox": [ + 0.47, + 0.161, + 0.593, + 0.173 + ], + "angle": 0, + "content": "3: \\( \\pmb{x}_{t,0} \\gets \\pmb{x}_{t-1} \\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.47, + 0.174, + 0.704, + 0.187 + ], + "angle": 0, + "content": "4: for iteration \\(\\tau = 1,\\dots ,V_{t}\\) do" + }, + { + "type": "algorithm", + "bbox": [ + 0.47, + 0.187, + 0.824, + 0.202 + ], + "angle": 0, + "content": "5: \\( \\pmb{x}_{t,\\tau} \\gets \\mathcal{P}_{\\mathcal{X}}(\\pmb{x}_{t,\\tau -1} - \\eta_{t,\\tau -1}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau -1})) \\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.47, + 0.203, + 0.559, + 0.214 + ], + "angle": 0, + "content": "6: end for" + }, + { + "type": "algorithm", + "bbox": [ + 0.47, + 0.215, + 0.713, + 0.23 + ], + "angle": 0, + "content": "7: Query \\( \\pmb{x}_t = \\pmb{x}_{t,\\tau} \\) to yield \\( y(\\pmb{x}_t) \\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.47, + 0.23, + 0.771, + 0.244 + ], + "angle": 0, + "content": "8: Update (4) using optimization trajectory" + }, + { + "type": "algorithm", + "bbox": [ + 0.47, + 0.244, + 0.543, + 0.256 + ], + "angle": 0, + "content": "9: end for" + }, + { + "type": "algorithm", + "bbox": [ + 0.465, + 0.257, + 0.66, + 0.273 + ], + "angle": 0, + "content": "10: Return arg min \\(\\mathbf{\\mu}_{\\mathbf{x}_{1:T}}y(\\mathbf{x})\\)" + }, + { + "type": "list", + "bbox": [ + 0.465, + 0.118, + 0.827, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.293, + 0.827, + 0.419 + ], + "angle": 0, + "content": "\\(\\zeta \\sim \\mathcal{N}(0,\\sigma^2)\\). Besides, we adopt a common assumption on \\(f\\) which has already been widely used in the literature of Bayesian optimization (BO) (Srinivas et al., 2010; Kandasamy et al., 2018): we assume that \\(f\\) is sampled from a Gaussian process (GP). A GP \\(\\mathcal{GP}(\\mu (\\cdot),k(\\cdot ,\\cdot))\\), which is characterized by a mean function \\(\\mu (\\cdot)\\) and a covariance function \\(k(\\cdot ,\\cdot)\\), is a stochastic process in which any finite subset of random variables follows a multi-variate Gaussian distribution (Rasmussen and Williams, 2006). In addition, following the common practice of GP and BO, we assume w.l.o.g. that \\(\\mu (\\pmb {x}) = 0\\) and \\(k(\\pmb {x},\\pmb{x}^{\\prime})\\leq 1\\) \\((\\forall \\pmb {x},\\pmb{x}^{\\prime}\\in \\mathcal{X})\\). We also assume that the kernel function \\(k\\) is differentiable, and that \\(\\| \\partial_z\\partial_{z'}k(z,z')|_{z = z' = x}\\| _2\\leq \\kappa^2\\), \\(\\forall \\pmb {x}\\in \\mathcal{X}\\) for some \\(\\kappa >0\\). This is satisfied by most commonly used kernels such as the squared exponential (SE) kernel (Rasmussen and Williams, 2006)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.435, + 0.573, + 0.449 + ], + "angle": 0, + "content": "2.2 ZO OPTIMIZATION WITH ESTIMATED DERIVATIVES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.461, + 0.827, + 0.573 + ], + "angle": 0, + "content": "To solve (1), GD with estimated derivatives (e.g., Algo. 1) has been developed (Flaxman et al., 2005; Ghadimi and Lan, 2013; Nesterov and Spokoiny, 2017; Liu et al., 2018a;b). Particularly, these algorithms first estimate the derivative of \\( f \\) (line 3 of Algo. 1) and then plug the estimated derivative into GD-based methods to obtain the next input for querying (lines 4-5 of Algo. 1). In these algorithms, the derivative is typically estimated by averaging the finite difference approximation of the directional derivatives for \\( f \\) along certain directions, which we refer to as the finite difference (FD) method in this paper. For example, given a parameter \\( \\lambda \\) and directions \\( \\{\\pmb{u}_i\\}_{i=1}^n \\), the derivative \\( \\nabla f \\) at any \\( \\pmb{x} \\in \\mathcal{X} \\) can be estimated by the following FD method (Berahas et al., 2022):" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.578, + 0.825, + 0.617 + ], + "angle": 0, + "content": "\\[\n\\nabla f (\\boldsymbol {x}) \\approx g (\\boldsymbol {x}) \\triangleq \\sum_ {i = 1} ^ {n} \\frac {y \\left(\\boldsymbol {x} + \\lambda \\boldsymbol {u} _ {i}\\right) - y (\\boldsymbol {x})}{\\lambda} \\boldsymbol {u} _ {i}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.617, + 0.827, + 0.703 + ], + "angle": 0, + "content": "The directions \\(\\{\\pmb{u}_i\\}_{i=1}^n\\) are usually sampled from the standard Gaussian distribution (Nesterov and Spokoiny, 2017) or uniformly from the unit sphere (Flaxman et al., 2005), or set as the standard basis vectors with 1 at one of its coordinates and 0 otherwise (Lian et al., 2016). As mentioned before, existing FD methods typically require many additional queries (i.e., \\(\\{\\pmb{x} + \\lambda \\pmb{u}_i\\}_{i=1}^n\\)) to achieve an accurate derivative estimation in every iteration of Algo. 1 (Berahas et al., 2022), making existing ZO optimization algorithms (Flaxman et al., 2005; Nesterov and Spokoiny, 2017) query-inefficient." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.721, + 0.825, + 0.737 + ], + "angle": 0, + "content": "3 ZO OPTIMIZATION VIA TRAJECTORY-INFORMED DERIVATIVE ESTIMATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.752, + 0.827, + 0.851 + ], + "angle": 0, + "content": "To improve existing GD with estimated derivatives (e.g., Algo. 1), we propose the ZoRD algorithm (Algo. 2), which achieves more query-efficient ZO optimization thanks to our two major contributions. Firstly, we propose a derived GP-based derivative estimation method which only uses the optimization trajectory and consequently does not require any additional query for derivative estimation (Sec. 3.1). Secondly, thanks to the ability of our method to estimate the derivative at any input in the domain without any additional query and to measure the estimation error in a principled way, we develop the technique of dynamic virtual updates to further improve the query efficiency of our ZoRD (Sec. 3.2)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.861, + 0.572, + 0.875 + ], + "angle": 0, + "content": "3.1 TRAJECTORY-INFORMED DERIVATIVE ESTIMATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.886, + 0.825, + 0.93 + ], + "angle": 0, + "content": "To begin with, if a function \\( f \\) follows a GP, then its derivative \\( \\nabla f \\) also follows a GP (Rasmussen and Williams, 2006). This is formalized by our Lemma 1 below (proof in Appx. B.1), which then provides us a principled way to estimate the derivative at any input in the domain." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.121 + ], + "angle": 0, + "content": "Lemma 1 (Derived GP for Derivatives). If a function \\( f \\) follows a \\( GP \\): \\( f \\sim \\mathcal{GP}\\left(\\mu (\\cdot),\\sigma^2 (\\cdot ,\\cdot)\\right) \\), then" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.127, + 0.597, + 0.145 + ], + "angle": 0, + "content": "\\[\n\\nabla f \\sim \\mathcal {G P} (\\nabla \\mu (\\cdot), \\partial \\sigma^ {2} (\\cdot , \\cdot))\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.148, + 0.815, + 0.166 + ], + "angle": 0, + "content": "where \\(\\partial \\sigma^2 (\\cdot ,\\cdot)\\) denotes the cross partial derivative w.r.t the first and second arguments of \\(\\sigma^2 (\\cdot ,\\cdot)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.176, + 0.827, + 0.24 + ], + "angle": 0, + "content": "\\(f\\) Follows the Posterior GP. As discussed in Sec. 2.1, we assume that \\(f \\sim \\mathcal{GP}(\\mu(\\cdot), k(\\cdot, \\cdot))\\). So, in every iteration \\(t\\) of our Algo. 2, conditioned on the current optimization trajectory \\(\\mathcal{D}_{t-1} \\triangleq \\{(x_{\\tau}, y_{\\tau})\\}_{\\tau=1}^{t-1}\\), \\(f\\) follows the posterior GP: \\(f \\sim \\mathcal{GP}(\\mu_{t-1}(\\cdot), \\sigma_{t-1}^2(\\cdot, \\cdot))\\) with the mean function \\(\\mu_{t-1}(\\cdot)\\) and the covariance function \\(\\sigma_{t-1}^2(\\cdot, \\cdot)\\) defined as below (Rasmussen and Williams, 2006):" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.248, + 0.603, + 0.268 + ], + "angle": 0, + "content": "\\[\n\\mu_ {t - 1} (\\boldsymbol {x}) \\triangleq \\boldsymbol {k} _ {t - 1} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {y} _ {t - 1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.263, + 0.824, + 0.29 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {t - 1} ^ {2} \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) \\triangleq k \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {k} _ {t - 1} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {k} _ {t - 1} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.297, + 0.827, + 0.35 + ], + "angle": 0, + "content": "where \\(\\pmb{y}_{t-1}^{\\top} \\triangleq [y_{\\tau}]_{\\tau=1}^{t-1}\\) and \\(\\pmb{k}_{t-1}(\\pmb{x})^{\\top} \\triangleq [k(\\pmb{x}, \\pmb{x}_{\\tau})]_{\\tau=1}^{t-1}\\) are \\((t-1)\\)-dimensional row vectors, and \\(\\mathbf{K}_{t-1} \\triangleq [k(\\pmb{x}_{\\tau}, \\pmb{x}_{\\tau'})]_{\\tau, \\tau'=1}^{t-1}\\) is a \\((t-1) \\times (t-1)\\)-dimensional matrix. Define \\(\\sigma_{t-1}^{2}(\\pmb{x}) \\triangleq \\sigma_{t-1}^{2}(\\pmb{x}, \\pmb{x})\\), the posterior distribution at \\(\\pmb{x}\\) is Gaussian with mean \\(\\mu_{t-1}(\\pmb{x})\\) and variance \\(\\sigma_{t-1}^{2}(\\pmb{x})\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.363, + 0.784, + 0.379 + ], + "angle": 0, + "content": "\\(\\nabla f\\) Follows the Derived GP for Derivatives. Substituting (3) into Lemma 1, we have that" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.384, + 0.825, + 0.402 + ], + "angle": 0, + "content": "\\[\n\\nabla f \\sim \\mathcal {G P} \\left(\\nabla \\mu_ {t - 1} (\\cdot), \\partial \\sigma_ {t - 1} ^ {2} (\\cdot , \\cdot)\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.408, + 0.697, + 0.425 + ], + "angle": 0, + "content": "in which the mean \\(\\nabla \\mu_{t - 1}(\\pmb {x})\\) at \\(\\pmb{x}\\) and the covariance \\(\\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb{x}^\\prime)\\) at \\(\\pmb {x},\\pmb{x}^{\\prime}\\) are" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.433, + 0.583, + 0.453 + ], + "angle": 0, + "content": "\\[\n\\nabla \\mu_ {t - 1} (\\boldsymbol {x}) \\triangleq \\partial_ {\\boldsymbol {z}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {y} _ {t - 1} | _ {\\boldsymbol {z} = \\boldsymbol {x}},\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.448, + 0.825, + 0.478 + ], + "angle": 0, + "content": "\\[\n\\left. \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\triangleq \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} k (\\boldsymbol {z}, \\boldsymbol {z} ^ {\\prime}) - \\partial_ {\\boldsymbol {z}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z} ^ {\\prime}) \\right| _ {\\boldsymbol {z} = \\boldsymbol {x}, \\boldsymbol {z} ^ {\\prime} = \\boldsymbol {x} ^ {\\prime}}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.485, + 0.827, + 0.53 + ], + "angle": 0, + "content": "in which \\(\\partial_{\\pmb{z}}\\pmb{k}_{t - 1}(\\pmb {z})\\triangleq [\\partial_{\\pmb{z}}k(\\pmb {z},\\pmb{x}_{\\tau})]_{\\tau = 1}^{t - 1}\\) is a \\((t - 1)\\times d\\) -dimensional matrix and \\(\\partial_{\\pmb{z}}\\partial_{\\pmb{z}^{\\prime}}k(\\pmb {z},\\pmb{z}^{\\prime})\\) is a \\(d\\times d\\) -dimensional matrix. Therefore, \\(\\nabla \\mu_{t - 1}(\\pmb {x})\\) is a \\(d\\) -dimensional vector and \\(\\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb{x}^\\prime)\\) is a \\(d\\times d\\) -dimensional matrix. We refer to this GP (4) followed by \\(\\nabla f\\) as the derived GP for derivatives." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.537, + 0.826, + 0.594 + ], + "angle": 0, + "content": "So, define \\(\\partial \\sigma_{t - 1}^2 (\\pmb {x})\\triangleq \\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb {x})\\) , we have that for any input \\(\\pmb {x}\\in \\mathcal{X}\\) , the derivative \\(\\nabla f(\\pmb {x})\\) at \\(\\pmb{x}\\) follows a \\(d\\) -dimensional Gaussian distribution: \\(\\nabla f(\\pmb {x})\\sim \\mathcal{N}(\\nabla \\mu_{t - 1}(\\pmb {x}),\\partial \\sigma_{t - 1}^2 (\\pmb {x}))\\) . This allows us to (a) estimate the derivative \\(\\nabla f(\\pmb {x})\\) at any input \\(\\pmb {x}\\in \\mathcal{X}\\) using the posterior mean \\(\\nabla \\mu_{t - 1}(\\pmb {x})\\) of the derived GP for derivatives (4):" + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.595, + 0.825, + 0.611 + ], + "angle": 0, + "content": "\\[\n\\nabla f (\\boldsymbol {x}) \\approx \\nabla \\mu_ {t - 1} (\\boldsymbol {x}), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.613, + 0.827, + 0.712 + ], + "angle": 0, + "content": "and \\((b)\\) employ the posterior covariance matrix \\(\\partial \\sigma_{t - 1}^2 (\\pmb {x})\\) to obtain a principled measure of the uncertainty for this derivative estimation, which together constitute our novel derivative estimation. Remarkably, our derivative estimation only makes use of the naturally available optimization trajectory \\(\\mathcal{D}_{t - 1}\\) and does not need any additional query, which is in stark contrast to the existing FD methods (e.g., (2)) that require many additional queries for their derivative estimation. Moreover, our principled measure of uncertainty allows us to perform dynamic virtual updates (Sec. 3.2) and theoretically guarantee the quality of our derivative estimation (Sec. 4.1)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.728, + 0.418, + 0.74 + ], + "angle": 0, + "content": "3.2 DYNAMIC VIRTUAL UPDATES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.827, + 0.868 + ], + "angle": 0, + "content": "Note that our derived GP-based derivative estimation (6) can estimate the derivative at any input \\( \\pmb{x} \\) within the domain. As a result, in every iteration \\( t \\) of our ZoRD algorithm, for a step \\( \\tau \\geq 1 \\), after performing a GD update using the estimated derivative at \\( \\pmb{x}_{t,\\tau -1} \\) (i.e., \\( \\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau -1}) \\)) to reach the input \\( \\pmb{x}_{t,\\tau} \\) (line 5 of Algo. 2), we can again estimate the derivative at \\( \\pmb{x}_{t,\\tau} \\) (i.e., \\( \\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau}) \\)) and then perform another GD update to reach \\( \\pmb{x}_{t,\\tau +1} \\) without requiring any additional query. This process can be repeated for multiple steps, and can further improve the query efficiency of our ZoRD. Formally, given the projection function \\( \\mathcal{P}_{\\chi}(\\pmb {x})\\triangleq \\arg \\min_{\\pmb {z}\\in \\chi}\\| \\pmb {x} - \\pmb {z}\\| _2^2 /2 \\) and learning rates \\( \\{\\eta_{t,\\tau}\\}_{\\tau = 0}^{V_t - 1} \\), we perform the following virtual updates for \\( V_{t} \\) steps (lines 4-6 of Algo. 2):" + }, + { + "type": "equation", + "bbox": [ + 0.284, + 0.874, + 0.825, + 0.891 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {t, \\tau} = \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau - 1} - \\eta_ {t, \\tau - 1} \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau - 1}\\right)\\right) \\quad \\forall \\tau = 1, \\dots , V _ {t} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "and then choose the last \\( \\pmb{x}_{t,V_t} \\) to query (i.e., line 7 of Algo. 2). Importantly, these multi-step virtual GD updates are only feasible in our ZoRD (Algo. 2) because our derivative estimator (6) does not" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "require any new query in all these steps, whereas the existing FD methods require additional queries to estimate the derivative in every step." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.269 + ], + "angle": 0, + "content": "The number of steps for our virtual updates (i.e., \\( V_{t} \\)) induces an intriguing trade-off: An overly small \\( V_{t} \\) may not be able to fully exploit the benefit of our derivative estimation (6) which is free from the requirement for additional queries, yet an excessively large \\( V_{t} \\) may lead to the usage of inaccurate derivative estimations which can hurt the performance (validated in Appx. D.2). Remarkably, (4) allows us to dynamically choose \\( V_{t} \\) by inspecting our principled measure of the predictive uncertainty (i.e., \\( \\partial \\sigma_{t-1}^2(\\boldsymbol{x}) \\)) for every derivative estimation. Specifically, after reaching the input \\( \\boldsymbol{x}_{t,\\tau} \\), we continue the virtual updates (to reach \\( \\boldsymbol{x}_{t,\\tau+1} \\)) if our predictive uncertainty is small, i.e., if \\( \\left\\| \\partial \\sigma_{t-1}^2(\\boldsymbol{x}_{t,\\tau}) \\right\\|_2 \\leq c \\) where \\( c \\) is a confidence threshold; otherwise, we terminate the virtual updates and let \\( V_{t} = \\tau \\) since the derivative estimation at \\( \\boldsymbol{x}_{t,\\tau} \\) is likely unreliable." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.284, + 0.419, + 0.299 + ], + "angle": 0, + "content": "4 THEORETICAL ANALYSIS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.308, + 0.448, + 0.322 + ], + "angle": 0, + "content": "4.1 DERIVATIVE ESTIMATION ERROR" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.334, + 0.814, + 0.35 + ], + "angle": 0, + "content": "To begin with, we derive a theoretical guarantee on the error of our derivative estimation at any \\( \\pmb{x} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.355, + 0.825, + 0.392 + ], + "angle": 0, + "content": "Theorem 1 (Derivative Estimation Error). Let \\(\\delta \\in (0,1)\\) and \\(\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(1 / \\delta)}\\). For any \\(\\pmb{x} \\in \\mathcal{X}\\) and any \\(t \\geq 1\\), the following holds with probability of at least \\(1 - \\delta\\)," + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.399, + 0.637, + 0.425 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\beta \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.426, + 0.825, + 0.512 + ], + "angle": 0, + "content": "Thm. 1 (proof in Appx. B.2) has presented an upper bound on the error of our derivative estimation (6) at any \\( \\pmb{x} \\in \\mathcal{X} \\) in terms of \\( \\sqrt{\\|\\partial\\sigma_t^2(\\pmb{x})\\|_2} \\), which is a measure of the uncertainty about our derivative estimation at \\( \\pmb{x} \\) (Sec. 3.1). This hence implies that the threshold \\( c \\) applied to our predictive uncertainty \\( \\left\\| \\partial \\sigma_t^2 (\\pmb {x})\\right\\| _2 \\) (Sec. 3.2) also ensures that the derivative estimation error is small during our dynamic virtual updates. Next, we show in the following theorem (proof in Appx. B.3) that our upper bound on the estimation error from Thm. 1 is non-increasing as the number of function queries is increased." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.516, + 0.706, + 0.531 + ], + "angle": 0, + "content": "Theorem 2 (Non-Increasing Error). For any \\( \\pmb{x} \\in \\mathcal{X} \\) and any \\( t \\geq 1 \\), we have that" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.536, + 0.6, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.564, + 0.825, + 0.605 + ], + "angle": 0, + "content": "Let \\(\\delta \\in (0,1)\\). Define \\(r \\triangleq \\max_{\\boldsymbol{x} \\in \\mathcal{X}, t \\geq 1} \\sqrt{\\|\\partial \\sigma_t^2(\\boldsymbol{x})\\|_2 / \\left\\|\\partial \\sigma_{t-1}^2(\\boldsymbol{x})\\right\\|_2}\\), given the \\(\\beta\\) in Thm. 1, we then have that \\(r \\in [1/\\sqrt{1 + 1/\\sigma^2}, 1]\\), and that with a probability of at least \\(1 - \\delta\\)," + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.614, + 0.664, + 0.639 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\beta \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\leq \\kappa \\beta r ^ {t}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.644, + 0.827, + 0.83 + ], + "angle": 0, + "content": "Thm. 2 shows that our upper bound on the derivative estimation error (i.e., \\(\\beta \\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}\\) from Thm. 1) is guaranteed to be non-increasing in the entire domain as the number of function queries is increased. Moreover, in some situations (i.e., when \\(r < 1\\)), our upper bound on the estimation error is even exponentially decreasing. Of note, \\(r\\) characterizes how fast the uncertainty about our derivative estimation (measured by \\(\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}\\)) is reduced across the domain. Since GD-based algorithms usually perform a local search in a neighborhood (especially for the problems with high-dimensional input spaces), all the inputs within the local region are expected to be close to each other (measured by the kernel function \\(k\\)). Moreover, as the objective function is usually smooth in the local region (i.e., its derivatives are continuous), reducing the uncertainty of the derivative at an input \\(\\boldsymbol{x}_t\\) (i.e., by querying \\(\\boldsymbol{x}_t\\)) is also expected to decrease the uncertainty of the derivatives at the other inputs in the same local region (i.e., decrease \\(\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}\\)). So, \\(r < 1\\) is expected to be a reasonable condition that can be satisfied in practice. This will also be corroborated by our empirical results (e.g., Figs. 1 and 2), which demonstrates that the error of our derivative estimation (6) is indeed reduced very fast." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.844, + 0.827, + 0.903 + ], + "angle": 0, + "content": "Our GP-based Method (6) vs. Existing FD Methods. Our derivative estimation method based on the derived GP (6) is superior to the traditional FD methods (e.g., (2)) in a number of major aspects. (a) Our derivative estimation error can be exponentially decreasing in some situations (i.e., when \\( r < 1 \\) in Thm. 2), which is unachievable for the existing FD methods since they can only" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.644, + 0.925 + ], + "angle": 0, + "content": "2The first step of GD update to reach \\(x_{t,1}\\) is always performed, i.e., \\(V_{t}\\geq 1\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.233 + ], + "angle": 0, + "content": "attain a polynomial rate of reduction (Berahas et al., 2022). \\((b)\\) Our method (6) does not need any additional query to estimate the derivative (but only requires the optimization trajectory), whereas the existing FD methods require additional queries for every derivative estimation. \\((c)\\) Our method (6) is equipped with a principled measure of the predictive uncertainty and hence the estimation error for derivative estimation (i.e., via \\(\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}\\), Thm. 1), which is typically unavailable for the existing FD methods. \\((d)\\) Our method (6), unlike the existing FD methods, makes it possible to apply the technique of dynamic virtual updates (Sec. 3.2) thanks to its capability of estimating the derivative at any input in the domain without requiring any additional query and measuring the estimation error in a principled way (Thm. 1)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.249, + 0.4, + 0.263 + ], + "angle": 0, + "content": "4.2 CONVERGENCE ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.274, + 0.828, + 0.4 + ], + "angle": 0, + "content": "To analyze the convergence of our ZoRD, besides our main assumption that \\( f \\) is sampled from a GP (Sec. 2.1), we assume that \\( f \\) is \\( L_{c} \\)-Lipchitz continuous for \\( L_{c} > 0 \\). This is a mild assumption since it has been shown that a function \\( f \\) sampled from a GP is Lipchitz continuous with high probability for commonly used kernels, e.g., the SE kernel and Matérn kernel with \\( \\nu > 2 \\) (Srinivas et al., 2010). We also assume that \\( f \\) is \\( L_{s} \\)-Lipchitz smooth, which is commonly adopted in the analysis GD-based algorithms (J Reddi et al., 2016). We aim to prove the convergence of our ZoRD for nonconvex \\( f \\) by analyzing how fast it converges to a stationary point (Ghadimi and Lan, 2013; Liu et al., 2018a). Specifically, we follow the common practice of previous works (J Reddi et al., 2016; Liu et al., 2018b) to analyze the following derivative mapping:" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.406, + 0.825, + 0.425 + ], + "angle": 0, + "content": "\\[\nG _ {t, \\tau} \\triangleq \\left(\\boldsymbol {x} _ {t, \\tau} - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right)\\right)\\right) / \\eta_ {t, \\tau}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.429, + 0.788, + 0.445 + ], + "angle": 0, + "content": "The convergence of our ZoRD is formally guaranteed by Thm. 3 below (proof in Appx. B.4)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.447, + 0.825, + 0.477 + ], + "angle": 0, + "content": "Theorem 3 (Convergence of ZORD). Let \\(\\delta \\in (0,1)\\). Suppose our ZORD (Algo. 2) is run with \\(V_{t} = V\\) and \\(\\eta_{t,\\tau} = \\eta \\leq 1 / L_{s}\\) for any \\(t\\) and \\(\\tau\\). Then with probability of at least \\(1 - \\delta\\), when \\(r < 1\\)," + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.484, + 0.757, + 0.544 + ], + "angle": 0, + "content": "\\[\n\\min _ {t \\leq T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\underbrace {\\frac {2 [ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} ^ {*}) ] / \\eta}{T V}} _ {①} + \\underbrace {\\frac {2 \\alpha^ {2} r ^ {2}}{T (1 - r ^ {2})} + \\frac {(2 L _ {c} + 1 / \\eta) \\alpha r}{T (1 - r)}} _ {②}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.545, + 0.827, + 0.822 + ], + "angle": 0, + "content": "where \\(\\alpha \\triangleq \\kappa \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(VT / \\delta)}\\). When \\(r = 1\\), we instead have (2) = 2α² + (2Lc + 1/η)α. In the upper bound of Thm. 3, the term (1) represents the convergence rate of (projected) GD when the true derivative is used and it asymptotically goes to 0 as \\(T\\) increases; the term (2) corresponds to the impact of the error of our derivative estimation (6) on the convergence. In situations where \\(r < 1\\) which is a reasonably achievable condition as we have discussed in Sec. 4.1, the term (2) will also asymptotically approach 0. This, remarkably, suggests that the impact of the derivative estimation error on the convergence vanishes asymptotically and our ZoRD algorithm is guaranteed to converge to a stationary point (i.e., \\(\\min_{t \\leq T} \\frac{1}{V} \\sum_{\\tau=0}^{V-1} \\|G_{t,\\tau}\\|_2^2\\) approaches 0) at the rate of \\(\\mathcal{O}(1/T)\\) when \\(r < 1\\). This is unattainable by existing ZO optimization algorithms using FD-based derivative estimation (Nesterov and Spokoiny, 2017; Liu et al., 2018b), because these methods typically converge to a stationary point at the rate of \\(\\mathcal{O}(1/T + \\text{const.})\\) with a constant learning rate. Even when \\(r = 1\\) where the term (2) becomes a constant independent of \\(T\\), our Thm. 3 is still superior to the convergence of these existing works because our result (Thm. 3) is based on the worst-case analysis whereas these works are typically based on the average-case analysis, i.e., their results only hold in expectation over the randomly sampled directions for derivative estimation. This means that their convergence may become even worse when inappropriate directions are used, e.g., directions that are nearly orthogonal to the true derivative which commonly happens in high-dimensional input spaces. In addition, given a fixed \\(T\\), our ZoRD enjoys a query complexity (i.e., the number of queries in \\(T\\) iterations) of \\(\\mathcal{O}(T)\\), which significantly improves over the \\(\\mathcal{O}(nT)\\) of the existing works based on FD (\\(n\\) in Sec. 2.2)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.828, + 0.926 + ], + "angle": 0, + "content": "The impacts of the number of steps of our virtual updates (i.e., \\( V \\)) are partially reflected in Thm. 3. Specifically, a larger \\( V \\) improves the reduction rate of the term ① because a larger number of virtual GD updates (without requiring additional queries) will be applied in our ZoRD algorithm. This is also unachievable by existing ZO optimization algorithms using FD-based derivative estimation since they require additional queries for the derivative estimation in their every GD update. Meanwhile, a larger \\( V \\) may also negatively impact the performance of our ZoRD since it may lead to the use of those estimated derivatives with large estimation errors (Sec. 3.2). However, this negative impact has" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.093, + 0.357, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.357, + 0.093, + 0.505, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.093, + 0.652, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.094, + 0.801, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.422, + 0.2, + 0.588, + 0.21 + ], + "angle": 0, + "content": "- Function Queries --\\(\\nabla f\\) --\\(\\nabla \\mu\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.214, + 0.825, + 0.243 + ], + "angle": 0, + "content": "Figure 1: Our derived GP for derivative estimation (4) with different number \\( n \\) of queries. Green curve and its confidence interval denote the mean \\( \\nabla \\mu(\\boldsymbol{x}) \\) and standard deviation of the derived GP." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.246, + 0.326, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.326, + 0.247, + 0.49, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.323, + 0.333, + 0.346, + 0.345 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.247, + 0.648, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.247, + 0.817, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.347, + 0.826, + 0.389 + ], + "angle": 0, + "content": "Figure 2: Comparison of the derivative estimation errors of our derived GP-based estimator (6) (GP) and the FD estimator, measured by cosine similarity (larger is better) and Euclidean distance (smaller is better). Each curve is the mean \\(\\pm\\) standard error from five independent runs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.398, + 0.825, + 0.441 + ], + "angle": 0, + "content": "only been implicitly accounted for by the term ② because this term comes from our Thm. 2, which is based on a worst-case analysis and gives a uniform upper bound on the derivative estimation error for all inputs in the domain \\(\\mathcal{X}\\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.461, + 0.328, + 0.476 + ], + "angle": 0, + "content": "5 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.493, + 0.825, + 0.537 + ], + "angle": 0, + "content": "In this section, we firstly empirically verify the efficacy of our derived GP-based derivative estimator (6) in Sec. 5.1, and then demonstrate that our ZoRD outperforms existing baseline methods for ZO optimization using synthetic experiments (Sec. 5.2) and real-world experiments (Secs. 5.3, 5.4)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.553, + 0.391, + 0.566 + ], + "angle": 0, + "content": "5.1 DERIVATIVE ESTIMATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.827, + 0.807 + ], + "angle": 0, + "content": "Here we investigate the efficacy of our derivative estimator (6) based on the derived GP for derivatives (4). Specifically, we sample a function \\( f \\) (defined on a one-dimensional domain) from a GP using the SE kernel, and then use a set of randomly selected inputs as well as their noisy observations (as optimization trajectory) to calculate our derived GP for derivatives. The results (Fig. 1) illustrate a number of interesting insights. Firstly, in regions where (even only a few) function queries are performed (e.g., in the region of \\([-3,0]\\)), our estimated derivative (i.e., \\( \\nabla \\mu_{t-1}(\\pmb{x}) \\)) generally aligns with the groundtruth derivative (i.e., \\( \\nabla f(\\pmb{x}) \\)) and our estimation uncertainty (i.e., characterized by \\( \\sqrt{\\|\\partial \\sigma_{t-1}^2(\\pmb{x})\\|_2} \\)) shrinks compared with other un-queried regions. These results hence demonstrate that our (4) is able to accurately estimate derivatives and reliably quantify the uncertainty of these estimations within the regions where function queries are performed. Secondly, as more input queries are collected (i.e., from left to right in Fig. 1), the uncertainty \\( \\sqrt{\\|\\partial \\sigma_{t-1}^2(\\pmb{x})\\|_2} \\) in the entire domain is decreased in general. This provides an empirical justification for our Thm. 2 which guarantees non-increasing uncertainty and hence non-increasing estimation error. Lastly, note that with only 12 queries (rightmost figure), our derivative estimator is already able to accurately estimate the derivative in the entire domain, which represents a remarkable reduction rate of our derivative estimation error." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Next, we compare our derivative estimator (6) with the FD estimator (Sec. 2.2). Specifically, using the Ackley function with \\( d = 10 \\) (see Appx. C.2), we firstly select an input \\( x_0 \\) and then follow the FD method (2) to randomly sample \\( n \\) directions \\( \\{\\pmb{u}_i\\}_{i=1}^n \\) from the standard Gaussian distribution, to construct input queries \\( \\{\\pmb{x}_0 + \\lambda \\pmb{u}_i\\}_{i=1}^n \\) (see Sec. 2.2). Next, these queries and their observations are \\( (a) \\) used as the optimization trajectory to apply our derivative estimator (6), and \\( (b) \\) used by the FD method to estimate the derivative following (2). The results are shown in Fig. 2a (for two different values of \\( \\lambda \\)), in which for both our derived GP-based estimator (6) and the FD estimator, we measure the cosine similarity (larger is better) and Euclidean distance (smaller is better) between the estimated" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.094, + 0.328, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.19, + 0.204, + 0.324, + 0.219 + ], + "angle": 0, + "content": "(a) Ackley \\((d = 20)\\)" + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.095, + 0.49, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.352, + 0.204, + 0.487, + 0.219 + ], + "angle": 0, + "content": "(b) Ackley \\((d = 40)\\)" + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.094, + 0.657, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.204, + 0.644, + 0.219 + ], + "angle": 0, + "content": "(c) Levy \\((d = 40)\\)" + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.094, + 0.824, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.687, + 0.204, + 0.816, + 0.219 + ], + "angle": 0, + "content": "(d) Levy \\((d = 100)\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.224, + 0.827, + 0.268 + ], + "angle": 0, + "content": "Figure 3: Optimization of Ackley and Levy functions with different dimensions. The \\(x\\)-axis and \\(y\\)-axis denote the number of queries and log-scaled optimality gap (i.e., \\(\\log(f(\\boldsymbol{x}_T) - f(\\boldsymbol{x}^*))\\)) achieved after this number of queries. Each curve is the mean \\(\\pm\\) standard error from ten independent runs." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.27, + 0.825, + 0.3 + ], + "angle": 0, + "content": "Table 1: Comparison of the number of required queries to achieve a successful black-box adversarial attack. Every entry represents mean ± standard deviation from five independent runs." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.31, + 0.825, + 0.393 + ], + "angle": 0, + "content": "
DatasetMetricGLDRGFPRGFTuRBO-1TuRBO-10ZoRD
MNIST# Queries1780±2221192±2601236±145654±70747±60248±50
Speedup7.2×4.8×5.0×2.6×3.0×1.0×
CIFAR-10# Queries964±1753622±11554133±1525638±108708±105384±59
Speedup2.5×9.4×10.8×1.7×1.8×1.0×
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.406, + 0.827, + 0.558 + ], + "angle": 0, + "content": "derivative and the true derivative at \\( x_0 \\). The figures show that our derivative estimation error enjoys a faster rate of reduction compared with the FD method, which corroborates our theoretical insights from Thm. 2 (Sec. 4.1) positing that our estimation error can be rapidly decreasing. Subsequently, to further highlight our advantage of being able to exploit the optimization trajectory and hence to eliminate the need for additional function queries (Sec. 4.1), we perform another comparison where our derived GP-based estimator (6) only utilizes 20 queries from the optimization trajectory (sampled using the same method above) for derivative estimation. The results (Fig. 2b) show that even with only these 20 queries (without any additional function query), our derivative estimator (6) achieves comparable or better estimation errors than FD using as many as 80 additional queries. Overall, the results in Fig. 2 have provided empirical supports for the superiority of our derived GP-based derivative estimation (6), which substantiates our theoretical justifications in Sec. 4.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.576, + 0.402, + 0.59 + ], + "angle": 0, + "content": "5.2 SYNTHETIC EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.602, + 0.827, + 0.826 + ], + "angle": 0, + "content": "Here we adopt the widely use Ackley and Levy functions with various dimensions (Eriksson et al., 2019) to show the superiority of our ZoRD. We compare ZoRD with a number of representative baselines for ZO optimization, e.g., RGF (Nesterov and Spokoiny, 2017) which uses FD for derivative estimation, PRGF (Cheng et al., 2021) which is a recent extension of RGF, GLD (Golovin et al., 2020) which is a recent ZO optimization algorithm based on direct search, and TuRBO (Eriksson et al., 2019) which is a highly performant Bayesian optimization (BO) algorithm. We also evaluate the performance of a first-order optimization algorithm, i.e., GD with true derivatives. More details are in Appx. C.2. The results are shown in Fig. 3, where ZoRD outperforms all other ZO optimization algorithms. Particularly, ZoRD considerably outperforms both RGF and PRGF, which can be attributed to our two major contributions. Firstly, our derivative estimator (6) used by ZoRD is more accurate and more query-efficient than the FD method adopted by RGF and PRGF, as theoretically justified in Sec. 4.1 and empirically demonstrated in Sec. 5.1. Secondly, our dynamic virtual updates (Sec. 3.2) can perform multi-step GD updates without requiring any additional query, which further improves the performance of ZoRD (validated in Appx. D.2). Moreover, ZoRD is the only ZO optimization algorithm that is able to converge to a comparable final performance to that of the GD with true derivatives in every figure of Fig. 3." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.842, + 0.471, + 0.856 + ], + "angle": 0, + "content": "5.3 BLACK-BOX ADVERSARIAL ATTACK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We further compare our ZoRD with other ZO optimization algorithms in the problem of black-box adversarial attack on images, which is one of the most important applications of ZO optimization in recent years. In black-box adversarial attack (Ru et al., 2020), given a fully trained ML model and an image \\( z \\), we intend to find (through only function queries) a small perturbation \\( x \\) to be added to \\( z \\)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.098, + 0.331, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.212, + 0.211, + 0.297, + 0.224 + ], + "angle": 0, + "content": "(a) Precision" + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.099, + 0.495, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.389, + 0.211, + 0.456, + 0.224 + ], + "angle": 0, + "content": "(b) Recall" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.098, + 0.655, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.546, + 0.211, + 0.628, + 0.224 + ], + "angle": 0, + "content": "(c) F1 Score" + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.099, + 0.822, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.694, + 0.211, + 0.807, + 0.224 + ], + "angle": 0, + "content": "(d) Jaccard index" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.231, + 0.825, + 0.273 + ], + "angle": 0, + "content": "Figure 4: Optimization of different non-differentiable metrics on the Covertype dataset. The \\( x \\)-axis and \\( y \\)-axis denote, respectively, the number of queries and the improvement on the non-differentiable metric. Each curve is the mean \\( \\pm \\) standard error from five independent experiments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.288, + 0.827, + 0.526 + ], + "angle": 0, + "content": "such that the perturbed image \\( z + x \\) will be incorrectly classified by the ML model. Following the practice from (Cheng et al., 2021), we randomly select an image from MNIST (Lecun et al., 1998) (\\( d = 28 \\times 28 \\)) or CIFAR-10 (Krizhevsky et al., 2009) (\\( d = 32 \\times 32 \\)), and aim to add a perturbation with an \\( L_{\\infty} \\) constraint to make a trained deep neural network misclassify the image (more details in Appx. C.3). Tab. 1 summarizes the number of required queries to achieve a successful attack by different algorithms (see results on multiple images in Appx. D.3). The results show that in such high-dimensional ZO optimization problems, our ZoRD again significantly outperforms the other algorithms since it requires a considerably smaller number of queries to achieve a successful attack. Particularly, our ZoRD is substantially more query-efficient than RGF and PRGF which rely on the FD methods for derivative estimation, e.g., for CIFAR-10, the number of queries required by RGF and PRGF are \\( 9.4 \\times 10.8 \\times 10 \\) of that required by ZoRD. This further verifies the advantages of our trajectory-informed derivative estimation (as justified theoretically in Sec. 4.1 and empirically in Sec. 5.1) and dynamic virtual updates (as demonstrated in Appx. D.2). Remarkably, our ZoRD also outperforms BO (i.e., TuRBO-1/10 which correspond to two versions of the TuRBO algorithm (Eriksson et al., 2019)) which has been widely shown to be query-efficient in black-box adversarial attack (Ru et al., 2020). Overall, these results showcase the ability of our ZoRD to advance the other ZO optimization algorithms in challenging real-world ZO optimization problems." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.541, + 0.544, + 0.555 + ], + "angle": 0, + "content": "5.4 NON-DIFFERENTIABLE METRIC OPTIMIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.57, + 0.825, + 0.751 + ], + "angle": 0, + "content": "Non-differentiable metric optimization (Hiranandani et al., 2021; Huang et al., 2021), which has received a surging interest recently, can also be cast as a ZO optimization problem. We therefore use it to further demonstrate the superiority of our ZoRD to other ZO optimization algorithms. Specifically, we firstly train a multilayer perceptron (MLP) \\((d = 2189)\\) on the Covertype (Dua and Graff, 2017) dataset with the cross-entropy loss function. Then, we use the same dataset to fine-tune this MLP model by exploiting ZO optimization algorithms to optimize a non-differentiable metric, such as precision, recall, F1 score and Jaccard index (see more details in Appx. C.4). Here we additionally compare with the evolutionary strategy (ES) which has been previously applied for non-differentiable metric optimization (Huang et al., 2021). Fig. 4 illustrates the percentage improvements achieved by different algorithms during the fine-tuning process (i.e., \\((f(\\pmb{x}_0) - f(\\pmb{x}_T)) \\times 100\\% / f(\\pmb{x}_0)\\)). The results show that our ZoRD again consistently outperforms the other ZO optimization algorithms in terms of both the query efficiency and the final converged performance. These results therefore further substantiate the superiority of ZoRD in optimizing high-dimensional non-differentiable functions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.778, + 0.32, + 0.792 + ], + "angle": 0, + "content": "6 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.825, + 0.925 + ], + "angle": 0, + "content": "We have introduced the ZoRD algorithm, which achieves query-efficient ZO optimization through two major contributions. Firstly, we have proposed a novel derived GP-based method (6) which only uses the optimization trajectory and hence eliminates the requirement for additional queries (Sec. 3.1) to estimate derivatives. Secondly, we have introduced a novel technique, i.e., dynamic virtual updates, which is made possible by our GP-based derivative estimation, to further improve the performance of our ZoRD (Sec. 3.2). Through theoretical justifications (Sec. 4) and empirical demonstrations (Sec. 5), we show that our derived GP-based derivative estimation improve over existing FD methods and that our ZoRD outperforms various ZO optimization baselines." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.47, + 0.119 + ], + "angle": 0, + "content": "7 REPRODUCIBILITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.134, + 0.827, + 0.178 + ], + "angle": 0, + "content": "For our theoretical results, we have discussed all our assumptions in Sec. 2.1 & Sec. 4.2, and provided our complete proofs in Appx. B. For our empirical results, we have provided our detailed experimental settings in Appx. C and included our codes in the supplementary materials (i.e., the zip file)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.192, + 0.329, + 0.206 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.216, + 0.829, + 0.26 + ], + "angle": 0, + "content": "This research is part of the programme DesCartes and is supported by the National Research Foundation, Prime Minister's Office, Singapore under its Campus for Research Excellence and Technological Enterprise (CREATE) programme." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.279, + 0.289, + 0.294 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.302, + 0.829, + 0.331 + ], + "angle": 0, + "content": "Binxin Ru, Adam D. Cobb, Arno Blaas, and Yarin Gal. Bayesopt adversarial attack. In Proc. ICLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.339, + 0.829, + 0.37 + ], + "angle": 0, + "content": "Gaurush Hiranandani, Jatin Mathur, Harikrishna Narasimhan, Mahdi Milani Fard, and Sanmi Koyejo. Optimizing black-box metrics with iterative example weighting. In Proc. ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.377, + 0.826, + 0.407 + ], + "angle": 0, + "content": "Tim Salimans, Jonathan Ho, Xi Chen, and Ilya Sutskever. Evolution strategies as a scalable alternative to reinforcement learning. arXiv:1703.03864, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.415, + 0.827, + 0.445 + ], + "angle": 0, + "content": "Yurii E. Nesterov and Vladimir G. Spokoiny. Random gradient-free minimization of convex functions. Found. Comput. Math., 17(2):527-566, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.453, + 0.826, + 0.482 + ], + "angle": 0, + "content": "Shuyu Cheng, Guoqiang Wu, and Jun Zhu. On the convergence of prior-guided zeroth-order optimization algorithms. In Proc. NeurIPS, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.491, + 0.827, + 0.533 + ], + "angle": 0, + "content": "Albert S. Berahas, Liyuan Cao, Krzysztof Choromanski, and Katya Scheinberg. A theoretical and empirical comparison of gradient approximations in derivative-free optimization. Found. Comput. Math., 22(2):507-560, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.542, + 0.825, + 0.573 + ], + "angle": 0, + "content": "Niranjan Srinivas, Andreas Krause, Sham M. Kakade, and Matthias W. Seeger. Gaussian process optimization in the bandit setting: No regret and experimental design. In Proc. ICML, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.58, + 0.825, + 0.61 + ], + "angle": 0, + "content": "Kirthevasan Kandasamy, Akshay Krishnamurthy, Jeff Schneider, and Barnabás Póczos. Parallelised Bayesian optimisation via Thompson sampling. In Proc. AISTATS, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.618, + 0.827, + 0.647 + ], + "angle": 0, + "content": "Carl Edward Rasmussen and Christopher K. I. Williams. Gaussian processes for machine learning. Adaptive computation and machine learning. MIT Press, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.655, + 0.825, + 0.685 + ], + "angle": 0, + "content": "Abraham Flaxman, Adam Tauman Kalai, and H. Brendan McMahan. Online convex optimization in the bandit setting: Gradient descent without a gradient. In Proc. SODA, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.693, + 0.825, + 0.723 + ], + "angle": 0, + "content": "Saeed Ghadimi and Guanghui Lan. Stochastic first- and zeroth-order methods for nonconvex stochastic programming. SIAM Journal on Optimization, 23(4):2341-2368, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.731, + 0.827, + 0.76 + ], + "angle": 0, + "content": "Sijia Liu, Bhavya Kailkhura, Pin-Yu Chen, Pai-Shun Ting, Shiyu Chang, and Lisa Amini. Zeroth-order stochastic variance reduction for nonconvex optimization. In Proc. NeurIPS, 2018a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.768, + 0.825, + 0.798 + ], + "angle": 0, + "content": "Sijia Liu, Xingguo Li, Pin-Yu Chen, Jarvis D. Haupt, and Lisa Amini. Zeroth-order stochastic projected gradient descent for nonconvex optimization. In Proc. GlobalSIP, 2018b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.806, + 0.827, + 0.848 + ], + "angle": 0, + "content": "Xiangru Lian, Huan Zhang, Cho-Jui Hsieh, Yijun Huang, and Ji Liu. A comprehensive linear speedup analysis for asynchronous stochastic parallel optimization from zeroth-order to first-order. In Proc. NIPS, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.858, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Sashank J Reddi, Suvrit Sra, Barnabas Poczos, and Alexander J Smola. Proximal stochastic methods for nonsmooth nonconvex finite-sum optimization. In Proc. NIPS, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "David Eriksson, Michael Pearce, Jacob R. Gardner, Ryan Turner, and Matthias Poloczek. Scalable global optimization via local Bayesian optimization. In Proc. NeurIPS, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.302, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Daniel Golovin, John Karro, Greg Kochanski, Chansoo Lee, Xingyou Song, and Qiuyi (Richard) Zhang. Gradientless descent: High-dimensional zeroth-order optimization. In Proc. ICLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.139, + 0.826, + 0.169 + ], + "angle": 0, + "content": "Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner. Gradient-based learning applied to document recognition. Proceedings of the IEEE, pages 2278-2324, 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.175, + 0.827, + 0.204 + ], + "angle": 0, + "content": "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.21, + 0.825, + 0.239 + ], + "angle": 0, + "content": "Chen Huang, Shuangfei Zhai, Pengsheng Guo, and Josh M. Susskind. Metricopt: Learning to optimize black-box evaluation metrics. In Proc. CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.829, + 0.274 + ], + "angle": 0, + "content": "Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive.ics.uci.edu/ml." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.281, + 0.825, + 0.31 + ], + "angle": 0, + "content": "Sebastian U Stich, Christian L Muller, and Bernd Gartner. Optimization of convex functions with random pursuit. SIAM Journal on Optimization, 23(2):1284-1309, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.316, + 0.826, + 0.344 + ], + "angle": 0, + "content": "Sayak Ray Chowdhury and Aditya Gopalan. On kernelized multi-armed bandits. In Proc. ICML, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.352, + 0.824, + 0.381 + ], + "angle": 0, + "content": "Zhongxiang Dai, Haibin Yu, Bryan Kian Hsiang Low, and Patrick Jaillet. Bayesian optimization meets Bayesian optimal stopping. In Proc. ICML, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.387, + 0.825, + 0.416 + ], + "angle": 0, + "content": "Zhongxiang Dai, Bryan Kian Hsiang Low, and Patrick Jaillet. Federated bayesian optimization via thompson sampling. In Proc. NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.422, + 0.827, + 0.451 + ], + "angle": 0, + "content": "Benjamin Letham, Roberto Calandra, Akshara Rai, and Eytan Bakshy. Re-examining linear embeddings for high-dimensional Bayesian optimization. In Proc. NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.458, + 0.825, + 0.486 + ], + "angle": 0, + "content": "Andrew Ilyas, Logan Engstrom, and Aleksander Madry. Prior convictions: Black-box adversarial attacks with bandits and priors. In Proc. ICLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.493, + 0.825, + 0.522 + ], + "angle": 0, + "content": "Florian Meier, Asier Mujika, Marcelo Matheus Gauy, and Angelika Steger. Improving gradient estimation in evolutionary strategies with past descent directions. arXiv:1910.05268, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.528, + 0.827, + 0.569 + ], + "angle": 0, + "content": "Niru Maheswaranathan, Luke Metz, George Tucker, Dami Choi, and Jascha Sohl-Dickstein. Guided evolutionary strategies: Augmenting random search with surrogate gradients. In Proc. ICML, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.578, + 0.825, + 0.606 + ], + "angle": 0, + "content": "Shuyu Cheng, Yinpeng Dong, Tianyu Pang, Hang Su, and Jun Zhu. Improving black-box adversarial attacks with a transfer-based prior. In NeurIPS, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.613, + 0.827, + 0.642 + ], + "angle": 0, + "content": "Beatrice Laurent and Pascal Massart. Adaptive estimation of a quadratic functional by model selection. Annals of Statistics, pages 1302-1338, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.648, + 0.827, + 0.677 + ], + "angle": 0, + "content": "Sayak Ray Chowdhury and Aditya Gopalan. No-regret algorithms for multi-task Bayesian optimization. In Proc. AISTATS, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.683, + 0.827, + 0.699 + ], + "angle": 0, + "content": "Stephen P. Boyd and Lieven Vandenberghe. Convex Optimization. Cambridge University Press, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.706, + 0.827, + 0.733 + ], + "angle": 0, + "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proc. ICLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.74, + 0.825, + 0.769 + ], + "angle": 0, + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proc. CVPR, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.776, + 0.825, + 0.805 + ], + "angle": 0, + "content": "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. OpenAI Gym. arXiv:1606.01540, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.812, + 0.825, + 0.853 + ], + "angle": 0, + "content": "M. D. McKay, R. J. Beckman, and W. J. Conover. A comparison of three methods for selecting values of input variables in the analysis of output from a computer code. Technometrics, 21(2): 239-245, 1979." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.86, + 0.825, + 0.889 + ], + "angle": 0, + "content": "Jian Tan, Niv Nayman, and Mengchang Wang. CobBO: Coordinate backoff Bayesian optimization with two-stage kernels. arXiv:2101.05147, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.896, + 0.827, + 0.924 + ], + "angle": 0, + "content": "Hong Qian and Yang Yu. Derivative-free reinforcement learning: A review. Frontiers Comput. Sci., 15(6):156336, 2021." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.444, + 0.119 + ], + "angle": 0, + "content": "APPENDIX A RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.133, + 0.828, + 0.537 + ], + "angle": 0, + "content": "Various types of algorithms have been proposed in the literature to solve ZO optimization problems, e.g., direct search, Bayesian optimization (BO) and GD-based algorithms with estimated derivatives. Particularly, direct search, e.g., (Stich et al., 2013; Golovin et al., 2020), relies on the comparison of function values at different inputs for the updates, which can be query-inefficient in practice owing to its indirect utilization of function values. In contrast, Bayesian optimization (BO) directly utilizes the function values to model the objective function using a Gaussian process (GP) and iteratively selects the inputs to query by trading off sampling potentially optimal inputs (i.e., exploitation) and inputs that can improve the GP belief of the objective function over the entire input domain (i.e., exploration) (Chowdhury and Gopalan, 2017; Srinivas et al., 2010; Dai et al., 2019; 2020). However, in ZO optimization problems with high-dimensional input spaces, BO algorithms typically suffer from query inefficiency and large computational complexity (Rasmussen and Williams, 2006; Letham et al., 2020; Eriksson et al., 2019), which significantly hinders their real-world applications. Therefore, GD-based algorithms with estimated derivatives, which inherit the advantage of GD-based algorithms in optimizing functions with high-dimensional input spaces, have been more widely applied in practice. For these algorithms, the derivatives are commonly estimated using the finite difference (FD) approximation (which requires additional function queries) of the directional derivatives along selected directions, in which the directions can be randomly sampled unit vectors Flaxman et al. (2005), Gaussian vectors (Nesterov and Spokoiny, 2017), or standard bases (Lian et al., 2016) (Sec. 2.2). More recently, some works have incorporated a time-dependent prior (i.e., the estimated derivative in the previous iteration) into existing FD methods to improve the quality of its derivative estimation (Ilyas et al., 2019; Meier et al., 2019; Cheng et al., 2021). Nevertheless, such a prior is also estimated by the FD method (i.e., in the previous iteration) and can hence be biased owing to the its estimation error, which may even lead to larger derivative estimation errors in practice due to compounding errors. Another line of work has taken the surrogate derivatives from other sources to help reduce the derivative estimation error of existing FD methods (Maheswaranathan et al., 2019; Cheng et al., 2019). However, these surrogate derivatives may generally be unavailable in practice. Importantly, these existing FD methods require additional function queries for every derivation estimation during optimization, which will significantly increase the query complexity of ZO optimization algorithms which employ these FD methods for derivative estimation." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.555, + 0.371, + 0.57 + ], + "angle": 0, + "content": "APPENDIX B PROOFS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.586, + 0.36, + 0.6 + ], + "angle": 0, + "content": "B.1 PROOF OF LEMMA 1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.612, + 0.827, + 0.641 + ], + "angle": 0, + "content": "According to Rasmussen and Williams (2006), if a function \\( f \\) follows from a Gaussian process, its derivative also follows a Gaussian process determined by its mean \\( \\mathbb{E}[\\cdot] \\) and covariance \\( \\mathrm{Cov}(\\cdot, \\cdot) \\), i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.643, + 0.825, + 0.66 + ], + "angle": 0, + "content": "\\[\n\\nabla f \\sim \\mathcal {G P} (\\mathbb {E} [ \\nabla f ], \\operatorname {C o v} (\\nabla f, \\nabla f)). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.67, + 0.827, + 0.712 + ], + "angle": 0, + "content": "So, to prove Lemma 1, we only need to derive the mean and the covariance of the Gaussian process above for a function \\( f \\) that is sampled from another Gaussian process, i.e., \\( f \\sim \\mathcal{GP}(\\mu(\\cdot), \\sigma^2(\\cdot, \\cdot)) \\). Specifically, for the mean \\( \\mathbb{E}[\\nabla f] \\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.411, + 0.715, + 0.825, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ \\nabla f ] = \\nabla \\mathbb {E} [ f ] = \\nabla \\mu . \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.825, + 0.764 + ], + "angle": 0, + "content": "where the first equality derives from the interchangeability of the expectation and derivative operation based on the Leibniz integral rule. The second equality comes from the fact that \\(\\mathbb{E}[f] = \\mu\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.77, + 0.459, + 0.785 + ], + "angle": 0, + "content": "For the covariance \\(\\mathrm{Cov}(\\nabla f,\\nabla f)\\) , we have" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.79, + 0.824, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {C o v} \\left(\\nabla f (\\boldsymbol {z}), \\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right)\\right) \\stackrel {(a)} {=} \\mathbb {E} \\left[ \\left(\\nabla f (\\boldsymbol {z}) - \\mathbb {E} \\left[ \\nabla f (\\boldsymbol {z}) \\right]\\right) ^ {\\top} \\left(\\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} \\left[ \\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right) \\right]\\right) \\right] \\\\ \\stackrel {(b)} {=} \\mathbb {E} \\left[ \\nabla \\left(f (\\boldsymbol {z}) - \\mathbb {E} [ f (\\boldsymbol {z}) ]\\right) ^ {\\top} \\nabla \\left(f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} [ f \\left(\\boldsymbol {z} ^ {\\prime}\\right) ]\\right) \\right] \\\\ \\stackrel {(c)} {=} \\mathbb {E} \\left[ \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\left(f (\\boldsymbol {z}) - \\mathbb {E} [ f (\\boldsymbol {z}) ]\\right) ^ {\\top} \\left(f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} [ f \\left(\\boldsymbol {z} ^ {\\prime}\\right) ]\\right) \\right] \\tag {11} \\\\ \\stackrel {(d)} {=} \\partial_ {\\pmb {z}} \\partial_ {\\pmb {z} ^ {\\prime}} \\mathbb {E} \\left[ \\left(f (\\pmb {z}) - \\mathbb {E} \\left[ f (\\pmb {z}) \\right]\\right) ^ {\\top} \\left(f (\\pmb {z} ^ {\\prime}) - \\mathbb {E} \\left[ f (\\pmb {z} ^ {\\prime}) \\right]\\right) \\right] \\\\ \\stackrel {(e)} {=} \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\sigma_ {t} ^ {2} (\\boldsymbol {z}, \\boldsymbol {z} ^ {\\prime}) . \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.149 + ], + "angle": 0, + "content": "Notably, \\((b)\\) and \\((d)\\) also derive from the interchangeability of the expectation and derivative operation based on the Leibniz integral rule. Besides, \\((e)\\) is obtained based on \\(\\operatorname{Cov}(f, f) = \\sigma^2(\\cdot, \\cdot)\\). This finally completes our proof." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.166, + 0.377, + 0.181 + ], + "angle": 0, + "content": "B.2 PROOF OF THEOREM 1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.193, + 0.825, + 0.222 + ], + "angle": 0, + "content": "To begin with, we introduce the following concentration inequality for standard multi-variate Gaussian distribution:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.227, + 0.719, + 0.243 + ], + "angle": 0, + "content": "Lemma B.1 (Laurent and Massart (2000)). Let \\(\\zeta \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I}_m)\\) and \\(\\delta \\in (0,1)\\) then" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.253, + 0.825, + 0.287 + ], + "angle": 0, + "content": "\\[\n\\mathbb {P} \\left(\\| \\boldsymbol {\\zeta} \\| _ {2} \\leq \\sqrt {m + 2 (\\sqrt {m} + 1) \\ln (1 / \\delta)}\\right) \\geq 1 - \\delta . \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.305, + 0.825, + 0.337 + ], + "angle": 0, + "content": "Define \\(\\zeta \\triangleq \\left(\\partial \\sigma_t^2 (\\pmb {x})\\right)^{-1 / 2}\\left(\\nabla f(\\pmb {x}) - \\nabla \\mu_t(\\pmb {x})\\right)\\), according to Lemma 1, we then have that \\(\\zeta\\) follows a standard multi-variate Gaussian distribution, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.446, + 0.346, + 0.825, + 0.363 + ], + "angle": 0, + "content": "\\[\n\\zeta \\sim \\mathcal {N} (\\mathbf {0}, \\mathbf {I} _ {d}). \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.371, + 0.825, + 0.4 + ], + "angle": 0, + "content": "Let \\(\\delta \\in (0,1)\\). By substituting the result above into Lemma B.1, the following holds with probability of at least \\(1 - \\delta\\):" + }, + { + "type": "equation", + "bbox": [ + 0.284, + 0.408, + 0.824, + 0.537 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} = \\left\\| \\left(\\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x})\\right) ^ {- 1 / 2} \\boldsymbol {\\zeta} \\right\\| _ {2} \\\\ \\leq \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\| \\zeta \\| _ {2} \\tag {14} \\\\ \\leq \\sqrt {d + 2 (\\sqrt {d} + 1) \\ln (1 / \\delta)} \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\pmb {x}) \\| _ {2}} \\\\ = \\beta \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\pmb {x}) \\| _ {2}} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.827, + 0.586 + ], + "angle": 0, + "content": "with \\(\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(1 / \\delta)}\\) and the first inequality is from the Cauchy-Schwarz inequality, which completes our proof." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.603, + 0.377, + 0.617 + ], + "angle": 0, + "content": "B.3 PROOF OF THEOREM 2" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.631, + 0.442, + 0.646 + ], + "angle": 0, + "content": "We first introduce the following lemmas." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.65, + 0.827, + 0.667 + ], + "angle": 0, + "content": "Lemma B.2 (Chowdhury and Gopalan (2021)). For any \\(\\sigma \\in \\mathbb{R}\\) and any matrix \\(\\mathbf{A}\\), the following hold" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.675, + 0.825, + 0.698 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} - \\mathbf {A} ^ {\\top} \\left(\\mathbf {A} \\mathbf {A} ^ {\\top} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\mathbf {A} = \\sigma^ {2} \\left(\\mathbf {A} ^ {\\top} \\mathbf {A} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1}. \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.706, + 0.825, + 0.734 + ], + "angle": 0, + "content": "Lemma B.3 (Sherman-Morrison formula). For any invertible square matrix \\(\\mathbf{A}\\) and column vectors \\(\\mathbf{u},\\mathbf{v}\\), suppose \\(\\mathbf{A} + \\mathbf{u}\\mathbf{v}^{\\top}\\) is invertible, then the following holds" + }, + { + "type": "equation", + "bbox": [ + 0.356, + 0.743, + 0.825, + 0.777 + ], + "angle": 0, + "content": "\\[\n\\left(\\mathbf {A} + \\boldsymbol {u} \\boldsymbol {v} ^ {\\top}\\right) ^ {- 1} = \\mathbf {A} ^ {- 1} - \\frac {\\mathbf {A} ^ {- 1} \\boldsymbol {u} \\boldsymbol {v} ^ {\\top} \\mathbf {A} ^ {- 1}}{1 + \\boldsymbol {v} ^ {\\top} \\mathbf {A} ^ {- 1} \\boldsymbol {u}}. \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.792, + 0.826, + 0.879 + ], + "angle": 0, + "content": "**Preparation.** We then introduce some additional notations and representations for our proof of Theorem 2. Following the common practice in (Chowdhury and Gopalan, 2021), we let the kernel \\(k\\) be defined by \\(\\psi(\\pmb{x})\\), i.e., \\(k(\\pmb{x},\\pmb{x}^{\\prime}) = \\psi(\\pmb{x})^{\\top}\\psi(\\pmb{x}^{\\prime})\\), and \\(\\phi(\\pmb{x}) \\triangleq \\nabla \\psi(\\pmb{x})\\). We then further define the \\((t\\times d)\\)-dimensional Jacobian matrix \\(\\phi_t(\\pmb{x}) \\triangleq [\\phi(\\pmb{x})^\\top \\psi(\\pmb{x}_\\tau)]_{\\tau=1}^t\\) and \\(\\Psi_t \\triangleq [\\psi(\\pmb{x}_\\tau)]_{\\tau=1}^t\\). The matrix \\(\\mathbf{K}_t\\) and the covariance matrix \\(\\partial \\sigma_t^2(\\pmb{x})\\) defined on the optimization trajectory \\(\\mathcal{D}_t\\) in our Sec. 3.1 can be reformulated as" + }, + { + "type": "equation", + "bbox": [ + 0.312, + 0.887, + 0.824, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {K} _ {t} = \\boldsymbol {\\Psi} _ {t} ^ {\\top} \\boldsymbol {\\Psi} _ {t}, \\tag {17} \\\\ \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) = \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi_ {t} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi_ {t} (\\boldsymbol {x}). \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.131 + ], + "angle": 0, + "content": "Based on the reformulation above, define \\(\\mathbf{V}_t \\triangleq \\boldsymbol{\\Psi}_t \\boldsymbol{\\Psi}_t^\\top + \\sigma^2 \\mathbf{I}\\), we can further reformulate \\(\\partial \\sigma_t^2(\\boldsymbol{x})\\) as below" + }, + { + "type": "equation", + "bbox": [ + 0.284, + 0.134, + 0.824, + 0.261 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi_ {t} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi_ {t} (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi (\\boldsymbol {x}) ^ {\\top} \\Psi_ {t} \\left(\\Psi_ {t} ^ {\\top} \\Psi_ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\Psi_ {t} ^ {\\top} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {I} - \\boldsymbol {\\Psi} _ {t} \\left(\\boldsymbol {\\Psi} _ {t} ^ {\\top} \\boldsymbol {\\Psi} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {\\Psi} _ {t} ^ {\\top}\\right) \\phi (\\boldsymbol {x}) \\tag {18} \\\\ \\stackrel {(d)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\Psi_ {t} \\Psi_ {t} ^ {\\top} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(e)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\phi (\\boldsymbol {x}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.268, + 0.827, + 0.3 + ], + "angle": 0, + "content": "Note that \\((b)\\) is obtained by exploiting the fact that \\(\\mathbf{K}_t = \\boldsymbol{\\Psi}_t^\\top \\boldsymbol{\\Psi}_t\\) and \\(\\phi_t(\\boldsymbol{x}) = \\phi(\\boldsymbol{x})^\\top \\boldsymbol{\\Psi}_t\\). In addition, \\((d)\\) comes from Lemma B.2 by replacing the matrix \\(\\mathbf{A}\\) in Lemma B.2 with the matrix \\(\\boldsymbol{\\Psi}_t^\\top\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.315, + 0.799, + 0.331 + ], + "angle": 0, + "content": "First Part. We then prove the first half part of our Theorem 2, i.e., the following Lemma B.4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.778, + 0.35 + ], + "angle": 0, + "content": "Lemma B.4 (Non-Increasing Variance Norm). For any \\( \\pmb{x} \\in \\mathcal{X} \\) and any \\( t \\geq 1 \\), we have that" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.357, + 0.825, + 0.378 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}. \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.638, + 0.412 + ], + "angle": 0, + "content": "Proof. Based on our additional notations and representations, we have" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.422, + 0.857, + 0.586 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\boldsymbol {\\Psi} _ {t - 1} \\boldsymbol {\\Psi} _ {t - 1} ^ {\\top} + \\sigma^ {2} \\mathbf {I} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {V} _ {t - 1} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(d)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) - \\sigma^ {2} \\left(1 + \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t})\\right) ^ {- 1} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(e)} {=} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) - \\sigma^ {2} \\left(1 + \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t})\\right) ^ {- 1} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(f)} {\\preccurlyeq} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}). \\tag {20} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.587, + 0.827, + 0.674 + ], + "angle": 0, + "content": "Note that (a) follows from the aforementioned definition of \\(\\mathbf{V}_t\\) and (b) comes from the fact that \\(\\Psi_t\\Psi_t^\\top = \\Psi_{t - 1}\\Psi_{t - 1}^\\top +\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^\\top\\). Similarly, (c) uses the definition of \\(\\mathbf{V}_{t - 1}\\). In addition, equality (d) derives from Lemma B.3 by letting \\(\\mathbf{A} = \\mathbf{V}_{t - 1}\\) and \\(\\pmb {u} = \\pmb {v} = \\psi (\\pmb {x}_t)\\) and (e) follows from the reformulation of \\(\\partial \\sigma_{t - 1}^2 (\\pmb {x})\\) in (18). Finally, (f) derives from the positive semi-definite property of \\(\\phi (\\pmb {x})^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\phi (\\pmb {x})\\) as well as the fact that \\(1 + \\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t) > 0\\). That is, for any column vector \\(\\textbf{z}\\) we have that" + }, + { + "type": "equation", + "bbox": [ + 0.183, + 0.683, + 0.825, + 0.751 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\boldsymbol {z} ^ {\\top} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z} = \\left(\\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z}\\right) ^ {\\top} \\left(\\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z}\\right) \\\\ = \\left\\| \\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z} \\right\\| _ {2} ^ {2} \\tag {21} \\\\ \\geq 0. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.763, + 0.827, + 0.843 + ], + "angle": 0, + "content": "So, \\(\\phi (\\pmb {x})^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\phi (\\pmb {x})\\) is positive semi-definite. Following a similar way, we are also able to verify that \\(1 + \\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t) > 0\\) by showing that \\(\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\geq 0\\) using the decomposition of \\(\\mathbf{V}_{t - 1}^{-1}\\) from the Principle Component Analysis (PCA). Since \\(\\partial \\sigma_t^2 (\\pmb {x})\\preccurlyeq \\sigma_{t - 1}^2 (\\pmb {x})\\) is equivalent to \\(\\left\\| \\partial \\sigma_t^2 (\\pmb {x})\\right\\| _2\\leq \\left\\| \\partial \\sigma_{t - 1}^2 (\\pmb {x})\\right\\| _2\\), we then complete the proof of first half part of our Theorem 2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.865, + 0.794, + 0.88 + ], + "angle": 0, + "content": "Second Part. To prove the rest of our Theorem 2, we firstly introduce the following lemmas." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.885, + 0.6, + 0.9 + ], + "angle": 0, + "content": "Lemma B.5. For any \\( \\pmb{x} \\in \\mathcal{X} \\) and any \\( t \\geq 1 \\), the following holds" + }, + { + "type": "equation", + "bbox": [ + 0.446, + 0.908, + 0.825, + 0.927 + ], + "angle": 0, + "content": "\\[\n\\mathbf {V} _ {t} ^ {- 1} \\preccurlyeq \\mathbf {V} _ {t - 1} ^ {- 1}. \\tag {22}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.45, + 0.119 + ], + "angle": 0, + "content": "Proof. For any column vector \\(z\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.345, + 0.127, + 0.824, + 0.208 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\boldsymbol {z} ^ {\\top} \\left(\\mathbf {V} _ {t} - \\mathbf {V} _ {t - 1}\\right) \\boldsymbol {z} = \\boldsymbol {z} ^ {\\top} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\boldsymbol {z} \\\\ = \\left(\\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\boldsymbol {z}\\right) ^ {\\top} \\left(\\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\boldsymbol {z}\\right) \\tag {23} \\\\ = \\left\\| \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\boldsymbol {z} \\right\\| _ {2} ^ {2} \\\\ \\geq 0. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.217, + 0.827, + 0.248 + ], + "angle": 0, + "content": "The first equality comes from the intermediate result in (20). So, \\(\\mathbf{V}_t - \\mathbf{V}_{t-1}\\) is positive semi-definite, i.e., \\(\\mathbf{V}_{t-1} \\preccurlyeq \\mathbf{V}_t\\). This can also indicate that \\(\\mathbf{V}_t^{-1} \\preccurlyeq \\mathbf{V}_{t-1}^{-1}\\), which thus completes our proof." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.255, + 0.825, + 0.27 + ], + "angle": 0, + "content": "Lemma B.6 (Lower Bound of Variance Norm). For any \\( \\pmb{x} \\in \\mathcal{X} \\) and any \\( t \\geq 1 \\), the following holds" + }, + { + "type": "equation", + "bbox": [ + 0.353, + 0.275, + 0.825, + 0.295 + ], + "angle": 0, + "content": "\\[\n1 / \\left(1 + 1 / \\sigma^ {2}\\right) \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}. \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.307, + 0.357, + 0.322 + ], + "angle": 0, + "content": "Proof. We firstly show that" + }, + { + "type": "equation", + "bbox": [ + 0.277, + 0.332, + 0.823, + 0.542 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} \\stackrel {(a)} {\\leq} \\left\\| \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\right\\| _ {2} \\left\\| \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} \\\\ \\stackrel {(b)} {=} \\left\\| \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} ^ {2} \\\\ \\stackrel {(c)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(d)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\psi (\\boldsymbol {x}) \\tag {25} \\\\ \\stackrel {(e)} {\\leq} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(f)} {\\leq} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {0} ^ {- 1} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(g)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\psi (\\boldsymbol {x}) / \\sigma^ {2} \\\\ \\stackrel {(h)} {=} 1 / \\sigma^ {2}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.551, + 0.827, + 0.623 + ], + "angle": 0, + "content": "Note that \\((a)\\) derives from the Cauchy-Schwarz inequality. As for \\((b)\\) and \\((c)\\), they have exploited the fact that \\(\\left(\\mathbf{V}_t^{-1/2}\\psi(\\boldsymbol{x})\\right)^\\top = \\psi(\\boldsymbol{x})^\\top\\mathbf{V}_t^{-1/2}\\) and \\(\\psi(\\boldsymbol{x})^\\top\\mathbf{V}_t^{-1/2}\\) is a row vector. In addition, \\((e)\\) follows from Lemma B.5. Finally, \\((g)\\) results from \\(\\mathbf{V}_0^{-1} = \\mathbf{I}/\\sigma^2\\) and \\((h)\\) derives from the assumption that \\(k(\\boldsymbol{x},\\boldsymbol{x}) \\leq 1\\) (\\(\\forall \\boldsymbol{x} \\in \\mathcal{X}\\)) in Sec. 2.1. Alternatively, we can restate the result above as" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.63, + 0.825, + 0.65 + ], + "angle": 0, + "content": "\\[\n\\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\preccurlyeq \\sigma^ {- 2} \\mathbf {I}. \\tag {26}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.664, + 0.804, + 0.68 + ], + "angle": 0, + "content": "We then complete our proof on the first inequality in Lemma B.6 using the following inequality:" + }, + { + "type": "equation", + "bbox": [ + 0.242, + 0.688, + 0.824, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {V} _ {t - 1} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left[ \\mathbf {V} _ {t - 1} ^ {1 / 2} \\left(\\mathbf {I} + \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2}\\right) \\mathbf {V} _ {t - 1} ^ {1 / 2} \\right] ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\left(\\mathbf {I} + \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\psi \\left(\\boldsymbol {x} _ {t}\\right) \\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2}\\right) ^ {- 1} \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\phi (\\boldsymbol {x}) \\tag {27} \\\\ \\stackrel {(d)} {\\succcurlyeq} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) / (1 + 1 / \\sigma^ {2}) \\\\ \\stackrel {(e)} {=} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) / (1 + 1 / \\sigma^ {2}) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.825, + 0.826, + 0.856 + ], + "angle": 0, + "content": "where \\((a)\\) derives from (20) and \\((c)\\) comes from the inversion of matrix product. Finally \\((d)\\) follows from the result in (26) and \\((e)\\) exploits the reformulation of \\(\\partial \\sigma_{t - 1}^2 (\\pmb {x})\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.784, + 0.884 + ], + "angle": 0, + "content": "According to Lemma B.4 and Lemma B.6, the following holds for any \\( \\pmb{x} \\in \\mathcal{X} \\) and any \\( t \\geq 1 \\)," + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.891, + 0.824, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{1 + 1 / \\sigma^ {2}} \\leq \\frac {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}}{\\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\leq 1. \\tag {28}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.105, + 0.633, + 0.119 + ], + "angle": 0, + "content": "Based on the definition of \\( r \\) in our Theorem 2, we therefore also have" + }, + { + "type": "equation", + "bbox": [ + 0.277, + 0.123, + 0.825, + 0.151 + ], + "angle": 0, + "content": "\\[\nr \\triangleq \\max _ {\\boldsymbol {x} \\in \\mathcal {X}, t \\geq 1} \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} / \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\in \\left[ 1 / \\sqrt {1 + 1 / \\sigma^ {2}}, 1 \\right]. \\tag {29}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.164, + 0.542, + 0.179 + ], + "angle": 0, + "content": "As a result, for every iteration \\(t\\) of our Algo. 2, we have" + }, + { + "type": "equation", + "bbox": [ + 0.341, + 0.183, + 0.824, + 0.283 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\| _ {2}} \\leq r \\sqrt {\\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\\\ \\leq r ^ {t} \\sqrt {\\left\\| \\partial \\sigma_ {0} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\tag {30} \\\\ = r ^ {t} \\sqrt {\\| \\partial_ {z} \\partial_ {z ^ {\\prime}} k (z , z ^ {\\prime}) | _ {z = z ^ {\\prime} = x} \\| _ {2}} \\\\ \\leq r ^ {t} \\kappa \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.827, + 0.334 + ], + "angle": 0, + "content": "where the last inequality derives from our assumption of \\(\\| \\partial_z\\partial_{z'}k(z,z')|_{z = z' = x}\\| _2\\leq \\kappa^2\\) (\\(\\forall \\pmb {x}\\in \\mathcal{X}\\)) in our Sec. 2.1. By substituting the result above into our Theorem 1, we complete our proof of Theorem 2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.349, + 0.377, + 0.363 + ], + "angle": 0, + "content": "B.4 PROOF OF THEOREM 3" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.375, + 0.825, + 0.406 + ], + "angle": 0, + "content": "**Preparation.** Following the definition of the derivative mapping on the true derivative \\(\\nabla f(\\boldsymbol{x}_{t,\\tau})\\) in (8), we defined the following derivative mapping on our estimated derivative \\(\\nabla \\mu_{t-1}(\\boldsymbol{x}_{t,\\tau})\\):" + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.408, + 0.825, + 0.442 + ], + "angle": 0, + "content": "\\[\n\\widehat {G} _ {t, \\tau} \\triangleq \\frac {\\boldsymbol {x} _ {t , \\tau} - \\boldsymbol {x} _ {t , \\tau + 1}}{\\eta_ {t , \\tau}} = \\frac {\\boldsymbol {x} _ {t , \\tau} - \\mathcal {P} _ {\\chi} (\\boldsymbol {x} _ {t , \\tau} - \\eta_ {t , \\tau} \\nabla \\mu_ {t} (\\boldsymbol {x} _ {t , \\tau}))}{\\eta_ {t , \\tau}}. \\tag {31}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.445, + 0.694, + 0.46 + ], + "angle": 0, + "content": "By re-arranging it, we have the following update rule that has reformulated (7):" + }, + { + "type": "equation", + "bbox": [ + 0.399, + 0.465, + 0.825, + 0.484 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} _ {t, \\tau + 1} = \\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\widehat {G} _ {t, \\tau}. \\tag {32}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.493, + 0.827, + 0.509 + ], + "angle": 0, + "content": "Based on our definition of the derivative mappings in (31) and (8), we introduce the following lemmas:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.524, + 0.826, + 0.554 + ], + "angle": 0, + "content": "Lemma B.7 (General Projection Inequalities). Given \\(\\mathcal{P}_{\\mathcal{X}}(\\boldsymbol{x}) = \\arg \\min_{\\boldsymbol{z} \\in \\mathcal{X}} \\| \\boldsymbol{x} - \\boldsymbol{z} \\|_2^2 / 2\\) and domain \\(\\mathcal{X}\\), for any \\(\\boldsymbol{x}, \\boldsymbol{x}'\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.557, + 0.825, + 0.575 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\boldsymbol {x} - \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\boldsymbol {x} - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2}, \\tag {33}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.577, + 0.825, + 0.595 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} \\leq \\left\\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\right\\| _ {2}. \\tag {34}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.61, + 0.825, + 0.64 + ], + "angle": 0, + "content": "Proof. For (33), as \\(\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}^{\\prime}) \\in \\mathcal{X} (\\forall \\pmb{x}^{\\prime})\\) and \\(\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}) = \\arg \\min_{\\pmb{z} \\in \\mathcal{X}} \\| \\pmb{x} - \\pmb{z} \\|_2^2 / 2\\), we then naturally have (33)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.648, + 0.827, + 0.692 + ], + "angle": 0, + "content": "For (34), since \\(\\mathcal{P}_{\\mathcal{X}}(\\pmb{x})\\) is the optimum of \\(h(\\pmb{z}) = \\| \\pmb{x} - \\pmb{z}\\|_2^2 / 2\\), according to the optimality condition of the convex projection function \\(h(\\pmb{z})\\) within the domain \\(\\pmb{z} \\in \\mathcal{X}\\) (Boyd and Vandenberghe, 2014), we then have the following inequality for any \\(\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}') \\in \\mathcal{X}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.695, + 0.825, + 0.713 + ], + "angle": 0, + "content": "\\[\n\\nabla h (\\boldsymbol {z}) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {z}\\right) \\geq 0. \\tag {35}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.718, + 0.696, + 0.734 + ], + "angle": 0, + "content": "By taking \\(\\nabla h(z) = z - x\\) with \\(z = \\mathcal{P}_{\\mathcal{X}}(x)\\) into the inequality above, we have" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.738, + 0.825, + 0.757 + ], + "angle": 0, + "content": "\\[\n\\left(\\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\boldsymbol {x}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x})\\right) \\geq 0. \\tag {36}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.761, + 0.726, + 0.777 + ], + "angle": 0, + "content": "By exchanging \\( \\pmb{x} \\) and \\( \\pmb{x}' \\) in the result above, we achieve the following similar result:" + }, + { + "type": "equation", + "bbox": [ + 0.356, + 0.781, + 0.825, + 0.8 + ], + "angle": 0, + "content": "\\[\n\\left(\\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\geq 0. \\tag {37}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.805, + 0.356, + 0.819 + ], + "angle": 0, + "content": "By summing (36) and (37)," + }, + { + "type": "equation", + "bbox": [ + 0.307, + 0.824, + 0.825, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\left(\\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\geq \\left\\| \\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}. \\tag {38}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.847, + 0.643, + 0.862 + ], + "angle": 0, + "content": "Based on the Cauchy-Schwarz inequality, we finally achieve (34) using" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.866, + 0.825, + 0.906 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2} \\leq \\left(\\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\tag {39} \\\\ \\leq \\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\| _ {2} \\| \\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.909, + 0.726, + 0.927 + ], + "angle": 0, + "content": "where both sides need to be divided by \\(\\| \\mathcal{P}_{\\mathcal{X}}(\\pmb {x}) - \\mathcal{P}_{\\mathcal{X}}(\\pmb{x}^{\\prime})\\|_{2}\\) to complete our proof." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.12 + ], + "angle": 0, + "content": "Lemma B.8 (Inequalities for Derivative Mappings). Given (31) and (8), for every \\(t\\) and \\(\\tau\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.132, + 0.825, + 0.16 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\widehat {G} _ {t, \\tau}, \\tag {40}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.163, + 0.825, + 0.179 + ], + "angle": 0, + "content": "\\[\n\\left\\| G _ {t, \\tau} \\right\\| _ {2} \\leq \\left\\| \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\right\\| _ {2}, \\tag {41}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.182, + 0.825, + 0.209 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} \\leq \\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\| _ {2}. \\tag {42}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.239, + 0.61, + 0.255 + ], + "angle": 0, + "content": "Proof. For (40), let \\(\\widehat{\\pmb{x}}_{t,\\tau} = \\pmb{x}_{t,\\tau} - \\eta_{t,\\tau}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau})\\), we then have" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.269, + 0.824, + 0.363 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) \\right\\| _ {2} ^ {2} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right)\\right) \\\\ \\stackrel {(a)} {=} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} \\right\\| _ {2} ^ {2} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\left(\\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1}\\right) \\tag {43} \\\\ \\stackrel {(b)} {=} \\eta_ {t, \\tau} ^ {2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} - \\eta_ {t, \\tau} ^ {2} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} \\\\ \\begin{array}{c} \\stackrel {(c)} {\\leq} 0 \\end{array} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.377, + 0.825, + 0.424 + ], + "angle": 0, + "content": "where \\((a)\\) results from the fact that \\(\\pmb{x}_{t,\\tau +1} = \\mathcal{P}_{\\mathcal{X}}\\left(\\pmb{x}_{t,\\tau} - \\eta_{t,\\tau}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau})\\right)\\) based on our (7) and \\((b)\\) derives from the definition of \\(\\widehat{G}_{t,\\tau}\\) in (31). In addition, \\((c)\\) is based on the following result by substituting \\(\\pmb {x} = \\pmb{x}_{t,\\tau}\\) and \\(\\pmb{x}^{\\prime} = \\widehat{\\pmb{x}}_{t,\\tau}\\) into (38):" + }, + { + "type": "equation", + "bbox": [ + 0.252, + 0.437, + 0.825, + 0.457 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) \\right\\| _ {2} ^ {2} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right)\\right) \\leq 0. \\tag {44}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.469, + 0.821, + 0.485 + ], + "angle": 0, + "content": "Finally, by dividing \\(\\eta_{t,\\tau}^2\\) on the both sides of the last inequality in (43), we finish the proof for (40)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.52 + ], + "angle": 0, + "content": "For (41), following the same proof above, we can also obtain the following inequality for the projected derivative \\( G_{t,\\tau} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.533, + 0.825, + 0.553 + ], + "angle": 0, + "content": "\\[\n\\left\\| G _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} G _ {t, \\tau} \\leq \\left\\| \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\right\\| _ {2} \\left\\| G _ {t, \\tau} \\right\\| _ {2}. \\tag {45}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.565, + 0.787, + 0.582 + ], + "angle": 0, + "content": "We complete the proof for (41) by dividing \\(\\| G_{t,\\tau}\\| _2\\) on the both sides of the inequality above." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.589, + 0.515, + 0.606 + ], + "angle": 0, + "content": "For (42), define \\( \\boldsymbol{x}_{t,\\tau +1}^{\\prime}\\triangleq \\boldsymbol{x}_{t,\\tau} - \\eta_{t,\\tau}G_{t,\\tau} \\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.62, + 0.824, + 0.781 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} \\stackrel {(a)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} ^ {\\prime}\\right) \\right\\| _ {2} \\\\ \\stackrel {(b)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau + 1} ^ {\\prime} \\right\\| _ {2} \\\\ \\stackrel {(c)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau})\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f (\\boldsymbol {x} _ {t, \\tau})\\right) \\| _ {2} \\tag {46} \\\\ \\stackrel {(d)} {\\leq} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - (\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f (\\boldsymbol {x} _ {t, \\tau})) \\right\\| _ {2} \\\\ \\stackrel {(e)} {=} \\left\\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\right\\| _ {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.795, + 0.825, + 0.825 + ], + "angle": 0, + "content": "where \\((a)\\) comes from the definition of \\(\\widehat{G}_{t,\\tau}\\) and \\(G_{t,\\tau}\\) in (31) and (8), respectively. In addition, \\((c)\\) derives from (7) and (8). Finally, \\((d)\\) results from (34)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.857, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Proof. Since the objective function \\( f \\) is assumed to be \\( L_{s} \\)-Lipschitz smooth (Sec. 4.2), we have the following inequality for any \\( x_{t,\\tau} \\in \\mathcal{X} \\) in our ZoRD algorithm:" + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.899, + 0.825, + 0.93 + ], + "angle": 0, + "content": "\\[\nf \\left(\\boldsymbol {x} _ {t, \\tau + 1}\\right) - f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\leq \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\left(\\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau}\\right) + \\frac {L _ {s}}{2} \\| \\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau} \\| _ {2} ^ {2}. \\tag {47}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.827, + 0.141 + ], + "angle": 0, + "content": "Let \\(\\delta' \\in (0,1)\\). Define \\(\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1) \\ln(1 / \\delta')}\\), by substituting (32) into the inequality above, the following inequality holds with probability of at least \\(1 - \\delta'\\):" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.146, + 0.341, + 0.163 + ], + "angle": 0, + "content": "\\[\nf (\\boldsymbol {x} _ {t, \\tau + 1}) - f (\\boldsymbol {x} _ {t, \\tau})\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.166, + 0.479, + 0.198 + ], + "angle": 0, + "content": "\\[\n\\stackrel {(a)} {\\leq} - \\eta_ {t, \\tau} \\nabla f (\\pmb {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.2, + 0.757, + 0.231 + ], + "angle": 0, + "content": "\\[\n\\stackrel {(b)} {=} \\eta_ {t, \\tau} \\left(\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})\\right) ^ {\\top} \\widehat {G} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.233, + 0.789, + 0.295 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\stackrel {(c)} {=} \\eta_ {t, \\tau} \\left[ (\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})) ^ {\\top} (\\widehat {G} _ {t, \\tau} - G _ {t, \\tau}) + (\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})) ^ {\\top} G _ {t, \\tau} \\right] \\\\ - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.181, + 0.297, + 0.818, + 0.361 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\stackrel {(d)} {\\leq} \\eta_ {t, \\tau} \\left[ \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} + \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\| G _ {t, \\tau} \\| _ {2} \\right] \\\\ - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.364, + 0.746, + 0.427 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\stackrel {(e)} {\\leq} \\eta_ {t, \\tau} \\left[ \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} ^ {2} + \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\| \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\right] \\\\ - \\frac {2 \\eta_ {t , \\tau} - L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.431, + 0.825, + 0.472 + ], + "angle": 0, + "content": "\\[\n\\stackrel {(f)} {\\leq} \\eta_ {t, \\tau} \\kappa^ {2} \\beta^ {2} r ^ {2 t} + \\eta_ {t, \\tau} L _ {c} \\kappa \\beta r ^ {t} - \\frac {\\eta_ {t , \\tau}}{2} \\| \\widehat {G} _ {t, \\tau} \\| _ {2} ^ {2} \\tag {48}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.472, + 0.826, + 0.516 + ], + "angle": 0, + "content": "where \\((d)\\) derives from the Cauchy-Schwarz inequality and \\((e)\\) follows from the Lemma B.7. Finally, \\((f)\\) result from the bounded derivative estimation error in Theorem 2 and the fact that \\(f\\) is \\(L_{c}\\)-Lipschitz continuous (i.e., \\(\\| \\nabla f(\\pmb{x})\\|_2 \\leq L_c\\) for any \\(\\pmb{x} \\in \\mathcal{X}\\)) and \\(\\eta_{t,\\tau} \\leq 1 / L_s (\\forall \\tau)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.52, + 0.825, + 0.551 + ], + "angle": 0, + "content": "For every iteration \\(t\\) our ZoRD algorithm, we in fact will apply the virtual updates (7) for \\(V_{t}\\) times (see Algo. 2). Therefore, for probability \\(\\geq 1 - V_t\\delta'\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.557, + 0.825, + 0.642 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {1}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\left[ f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - f \\left(\\boldsymbol {x} _ {t, \\tau + 1}\\right) + \\eta_ {t, \\tau} \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) \\right] \\tag {49} \\\\ = \\frac {2}{V _ {t}} \\left[ f \\left(\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t})\\right) \\right] + \\left(\\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau}\\right) \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.744, + 0.661 + ], + "angle": 0, + "content": "where the first inequality results from (48) by re-arranging it and then sum it up over \\(\\tau\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.825, + 0.71 + ], + "angle": 0, + "content": "However, in order to prove the convergence of our ZoRD algorithm to a stationary point, we need to consider the derivative mapping of \\( G_{t,\\tau} \\) instead (refer to our Sec. 4.2). So, for any \\( \\tau \\), we propose the following inequality:" + }, + { + "type": "equation", + "bbox": [ + 0.328, + 0.716, + 0.824, + 0.826 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| G _ {t, \\tau} \\right\\| _ {2} = \\left\\| G _ {t, \\tau} - \\widehat {G} _ {t, \\tau} + \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\leq \\left\\| G _ {t, \\tau} - \\widehat {G} _ {t, \\tau} \\right\\| _ {2} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\tag {50} \\\\ \\leq \\left\\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\right\\| _ {2} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\leq \\kappa \\beta r ^ {t} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.831, + 0.825, + 0.86 + ], + "angle": 0, + "content": "where the first inequality is from the Cauchy-Schwarz inequality and the second inequality comes from (42). Finally, by taking the result above into (49), we have" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.866, + 0.825, + 0.921 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {2}{V _ {t}} \\left[ f \\left(\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t})\\right) \\right] + \\left(\\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau}\\right) \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) + \\kappa \\beta r ^ {t}. \\tag {51}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "Then, substituting \\( V_{t} = V \\) and \\( \\eta_{t,\\tau} = \\eta \\) for any \\( t,\\tau \\) into the result above, the following inequality holds with probability of at least \\( 1 - VT\\delta^{\\prime} \\) when \\( r < 1 \\):" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.143, + 0.824, + 0.302 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\eta \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\stackrel {(a)} {\\leq} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(\\frac {2 (f (\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t}))}{V} + 2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2 t} + (2 \\eta L _ {c} + 1) \\kappa \\beta r ^ {t}\\right) \\\\ \\stackrel {(b)} {\\leq} \\frac {2}{T V} \\left[ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} _ {T}) \\right] + \\frac {2 \\eta (1 - r ^ {2 T})}{T (1 - r ^ {2})} \\kappa^ {2} \\beta^ {2} r ^ {2} \\\\ + \\frac {(2 \\eta L _ {c} + 1) (1 - r ^ {T})}{T (1 - r)} \\kappa \\beta r \\\\ \\stackrel {(c)} {\\leq} \\frac {2}{T V} \\left[ f \\left(\\boldsymbol {x} _ {0}\\right) - f \\left(\\boldsymbol {x} ^ {*}\\right) \\right] + \\frac {2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2}}{T \\left(1 - r ^ {2}\\right)} + \\frac {\\left(2 \\eta L _ {c} + 1\\right) \\kappa \\beta r}{T (1 - r)}. \\tag {52} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.302, + 0.825, + 0.347 + ], + "angle": 0, + "content": "Note that \\((b)\\) derives from the summation of the geometric sequence about \\(r\\) and \\((c)\\) comes from \\(\\pmb{x}^{*}\\triangleq \\arg \\min_{\\pmb{x}\\in \\mathcal{X}}f(\\pmb {x})\\). When \\(r = 1\\), the following holds with probability of at least \\(\\geq 1 - VT\\delta^{\\prime}\\) accordingly:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.355, + 0.823, + 0.426 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\eta \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(\\frac {2 (f (\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t}))}{V} + 2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2 t} + (2 \\eta L _ {c} + 1) \\kappa \\beta r ^ {t}\\right) \\tag {53} \\\\ = \\frac {2}{T V} \\left[ f \\left(\\boldsymbol {x} _ {0}\\right) - f \\left(\\boldsymbol {x} _ {T}\\right) \\right] + 2 \\eta \\kappa^ {2} \\beta^ {2} + (2 \\eta L _ {c} + 1) \\kappa \\beta . \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.44, + 0.729, + 0.457 + ], + "angle": 0, + "content": "Finally, let \\(\\delta = VT\\delta' \\in (0,1)\\), the following holds with probability of at least \\(1 - \\delta\\)" + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.465, + 0.824, + 0.523 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\min _ {t \\leq T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\tag {54} \\\\ \\leq ① + ② \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.534, + 0.711, + 0.559 + ], + "angle": 0, + "content": "where 1 and 2 can be defined as below with \\(\\alpha \\triangleq \\kappa \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(VT / \\delta)}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.268, + 0.566, + 0.824, + 0.632 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} ① = \\frac {2 / \\eta}{T V} [ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} _ {T}) ] \\\\ ② = \\left\\{ \\begin{array}{l l} 2 \\alpha^ {2} r ^ {2} / [ T (1 - r ^ {2}) ] + (2 L _ {c} + 1 / \\eta) \\alpha r / [ T (1 - r) ] & (r < 1), \\\\ 2 \\alpha^ {2} + (2 L _ {c} + 1 / \\eta) \\alpha & (r = 1). \\end{array} \\right. \\tag {55} \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.525, + 0.119 + ], + "angle": 0, + "content": "APPENDIX C EXPERIMENTAL SETTINGS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.134, + 0.363, + 0.149 + ], + "angle": 0, + "content": "C.1 GENERAL SETTINGS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.159, + 0.827, + 0.244 + ], + "angle": 0, + "content": "Derived GP. Among all our experiments in Sec. 5, to apply the derivative estimation in Sec. 3.1 for every iteration \\(t\\) and every step \\(\\tau\\) of our ZoRD algorithm, we use the derived GP (4) based on the Matérn kernel with \\(\\nu = 2.5\\) and fit this derived GP using 150 queries that achieves the smallest Euclidean distance with input \\(\\boldsymbol{x}_{t,\\tau}\\) from the optimization trajectory. This is because we only need to model the objective function \\(f\\) in the vicinity of input \\(\\boldsymbol{x}_{t,\\tau}\\) precisely rather than the entire domain, so as to achieve an accurate derivative estimation at input \\(\\boldsymbol{x}_{t,\\tau}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.828, + 0.428 + ], + "angle": 0, + "content": "Confidence Threshold. Among all our experiments in Sec. 5, the confidence threshold \\( c \\) of our dynamic virtual updates (Sec. 3.2) is set to be 0.35 in order to realize a good trade-off between query efficiency and accurate derivative estimation in practice, which can already allow our ZoRD to achieve compelling empirical results consistently (see our Sec. 5). In light of this, \\( c = 0.35 \\) would be a reasonably good choice in practice, especially when there is no prior knowledge about the objective functions. When we have prior knowledge about the smoothness of the objective functions, we can likely make a better choice for \\( c \\): Intuitively, smooth objective functions usually can be modeled by the Gaussian process effectively (Rasmussen and Williams, 2006), so an accurate derivative estimation from our derived GP is also likely to be achieved. In this scenario, a large confidence threshold can be applied to fully exploit the benefit of our derivative estimation that is free from the requirement for additional queries and consequently results in an improved query efficiency in practice." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.44, + 0.825, + 0.538 + ], + "angle": 0, + "content": "Baselines. In addition, among all our experiments in Sec. 5, we consistently use \\( n = 10 \\), \\( \\lambda = 0.01 \\) and directions \\( \\{u_i\\}_{i=1}^n \\) that are randomly sampled from a unit sphere for the derivative estimation of the FD method (2) applied in the RGF and PRGF algorithm. Moreover, following the common practice of (Berahas et al., 2022; Cheng et al., 2021), we conduct orthogonalization on these randomly selected directions via the Gram-Schmidt procedure. As for the ES algorithm (e.g., the one applied in Salimans et al., 2017), we apply the same \\( n \\), \\( \\lambda \\) and \\( \\{u_i\\}_{i=1}^n \\) in RGF and PRGF for their update in every iteration." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.553, + 0.825, + 0.598 + ], + "angle": 0, + "content": "Domain Transformation. Following the practice that has been used in (Eriksson et al., 2019), for all our experiments, we firstly re-scale the input domains into \\([0,10]^d\\) to ease the optimization and then re-scale the updated inputs back to the original domains for querying." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.612, + 0.406, + 0.627 + ], + "angle": 0, + "content": "C.2 SYNTHETIC EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.825, + 0.666 + ], + "angle": 0, + "content": "Let input \\(\\pmb{x} = [x_i]_{i=1}^d\\), the Ackley and Levy function applied in our synthetic experiments are given below," + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.673, + 0.834, + 0.778 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} f (\\pmb {x}) = - 2 0 \\exp \\left(- 0. 2 \\sqrt {\\frac {1}{d} \\sum_ {i = 1} ^ {d} x _ {i} ^ {2}}\\right) - \\exp (\\frac {1}{d} \\sum_ {i = 1} ^ {d} \\cos (2 \\pi x _ {i})) + 2 0 + \\exp (1), (\\mathrm {A c k l e y}) \\\\ f (\\boldsymbol {x}) = \\sin^ {2} \\left(\\pi w _ {1}\\right) + \\sum_ {i = 1} ^ {d - 1} \\left(w _ {i} - 1\\right) ^ {2} \\left[ 1 + 1 0 \\sin^ {2} \\left(\\pi w _ {i} + 1\\right) \\right] + \\left(w _ {d} - 1\\right) ^ {2} \\left[ 1 + \\sin^ {2} \\left(2 \\pi w _ {d}\\right) \\right] (\\text {L e v y}) \\tag {56} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.779, + 0.827, + 0.875 + ], + "angle": 0, + "content": "where \\( w_{i} = 1 + (x_{i} - 1) / 4 \\) for any \\( i = 1, \\dots, d \\), Ackley function achieves its minimum (i.e., \\( \\min f(\\pmb{x}) = 0 \\)) at \\( \\pmb{x}^{*} = \\mathbf{0} \\), and Levy function achieves its minimum (i.e., \\( \\min f(\\pmb{x}) = 0 \\)) at \\( \\pmb{x}^{*} = \\mathbf{1} \\). Note that the Ackley and Levy function for the synthetic experiments in our Sec. 5.2 are defined within the domain \\( [-20, 20]^d \\) and \\( [-7.5, 7.5]^d \\), respectively. To give a better understanding of these two synthetic functions, we provide a 3D illustration of these two synthetic functions with \\( d = 2 \\) in our Fig. 5. As shown in Fig. 5, these two synthetic functions are highly nonconvex and therefore have local minimums within their domains." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "To compare our ZoRD algorithm with other ZO/FO optimization baselines in Sec. 5.2, we firstly employ TuRBO of 300 queries to find a good initialization for all other ZO/FO optimization algorithms in Fig. 3 because of the nonconvexity of these two synthetic functions as shown in Fig. 5. We then" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.203, + 0.128, + 0.48, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.26, + 0.298, + 0.444, + 0.312 + ], + "angle": 0, + "content": "(a) Ackley function \\((d = 2)\\)" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.125, + 0.783, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.562, + 0.298, + 0.733, + 0.312 + ], + "angle": 0, + "content": "(b) Levy function \\((d = 2)\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.324, + 0.761, + 0.339 + ], + "angle": 0, + "content": "Figure 5: The 3D illustration of Ackley and Levy synthetic function with \\( d = 2 \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.365, + 0.828, + 0.422 + ], + "angle": 0, + "content": "apply these ZO/FO optimization algorithms with a query budget of 200 for \\( d = 20,40 \\), and a query budget of 400 for \\( d = 100 \\) to compare their query efficiency. We use the same Adam optimizer (Kingma and Ba, 2015) with a learning rate of 0.1 and exponential decay rates of 0.9, 0.999 for RGF, PRGF, GD, and our ZoRD algorithm, for faster convergence compared with standard GD." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.438, + 0.475, + 0.451 + ], + "angle": 0, + "content": "C.3 BLACK-BOX ADVERSARIAL ATTACK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.463, + 0.828, + 0.59 + ], + "angle": 0, + "content": "For the black-box adversarial attack experiment on the MNIST dataset, we use the same fully trained deep neural networks from (Cheng et al., 2021) and adopt a \\(L_{\\infty}\\) constraint of \\(\\| x\\|_{\\infty} \\leq 0.3\\) on the input perturbation \\(x\\). For the black-box adversarial attack experiment on the CIFAR-10 dataset, we fully train a ResNet-18 (He et al., 2016) on CIFAR-10 using stochastic gradient descend (SGD) with a cosine annealed learning rate from 0.1 to 0, a momentum of 0.9 and a weight decay of \\(5 \\times 10^{-4}\\) for 200 epochs, and adopt a \\(L_{\\infty}\\) constraint of \\(\\| x\\|_{\\infty} \\leq 0.2\\) on the input perturbation \\(x\\). Note that we use the same loss function as (Cheng et al., 2021) for these two experiments. Meanwhile, to apply RGF, PRGF and our ZoRD, we adopt Adam optimizer with the same learning rate of 0.5 and the same exponential decay rates of 0.9, 0.999." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.606, + 0.548, + 0.619 + ], + "angle": 0, + "content": "C.4 NON-DIFFERENTIABLE METRIC OPTIMIZATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.631, + 0.827, + 0.841 + ], + "angle": 0, + "content": "The Covertype dataset used in Sec. 5.4 is a classification dataset consisting of 581,012 samples from 7 different categories. Each sample from this dataset is a 54-dimensional vector of integers. In this experiment, we randomly split the dataset into training and test sets with each containing 290,506 samples. The MLP classifier applied in Sec. 5.4 consists of 2 layers with 30 and 14 hidden neurons respectively, leading to 2189 parameters in total (i.e., \\( d = 2189 \\)). We first train this MLP classifier on the training dataset of Covertype using the L-BFGS algorithm with the cross-entropy loss function for 300 epochs, and then apply ZO optimization algorithms to fine-tune our trained MLP directly on the non-differentiable metrics (i.e., using these metrics as the new loss functions), including precision, recall, F1 score and Jaccard index. To obtain the results of ES, RGF, PRGF and our ZoRD algorithm in Sec. 5.4, we apply the same Adam optimizer with a learning rate of 0.2 (for precision and recall) or 0.01 (for F1 score and Jaccard index) and exponential decay rates of 0.9, 0.999. Note that standard BO algorithms (including TuRBO) fail to achieve any percentage improvements (i.e., achieving \\( 0\\% \\) in the \\( y \\)-axis of Fig. 4) in this experiment according to our five independent runs, which is likely due to their aggressive exploration in the input domain of such a high dimension. In light of this, we do not include them in our comparison since all other methods are able to achieve certain improvements." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.857, + 0.553, + 0.87 + ], + "angle": 0, + "content": "C.5 DERIVATIVE-FREE REINFORCEMENT LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Our derivative-free RL experiments aim to learn controllers (which outputs policies) that maximize the rewards/return for several environments in the OpenAI Gym (Brockman et al., 2016) without using true derivatives. Specifically, we need to optimize the parameters (i.e., \\( \\pmb{x} \\)) of our neural network" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.209, + 0.101, + 0.788, + 0.117 + ], + "angle": 0, + "content": "Table 2: OpenAI Gym environment properties and their respective network dimensions." + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.127, + 0.789, + 0.202 + ], + "angle": 0, + "content": "
AcrobotSwimmerLunarBipedalWalkerWalker2DHalfCheetah
|S|688241717
|A|324466
d213222244404356356
" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.223, + 0.338, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.223, + 0.498, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.223, + 0.655, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.655, + 0.223, + 0.82, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.27, + 0.332, + 0.738, + 0.346 + ], + "angle": 0, + "content": "(a) Results under various input dimension \\(d\\) and fixed Matérn \\((\\nu = 2.5)\\)" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.35, + 0.338, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.35, + 0.498, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.35, + 0.655, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.655, + 0.35, + 0.82, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.281, + 0.458, + 0.726, + 0.473 + ], + "angle": 0, + "content": "(b) Results under various kernels and fixed input dimension \\(d = 80\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.484, + 0.825, + 0.527 + ], + "angle": 0, + "content": "Figure 6: Comparison of the derivative estimation errors of our derived GP-based estimator (GP) and the FD estimator under various input dimensions and kernels. Similarly, each result is reported with the mean \\(\\pm\\) standard error from five independent runs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.556, + 0.825, + 0.64 + ], + "angle": 0, + "content": "(MLP) controller with 2 hidden layers, where each hidden layer has 10 hidden neurons and one bias term. We adopt a \\( L_{\\infty} \\) constraint of \\( \\| x \\|_{\\infty} \\leq 1 \\) on the parameters \\( x \\). We use a softmax output layer for the policies that deal with discrete action spaces, and a tanh output layer for the policies that deal with continuous action spaces. The dimension of neural network parameters (represented as a column vector) \\( d \\) is determined by the dimensions of both the observation \\( |S| \\) and the action space \\( |A| \\) of an environment, as detailed in Tab. 2." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.647, + 0.827, + 0.814 + ], + "angle": 0, + "content": "In order to search for policies that are robust to different random state initializations, we use the vectorized API of OpenAI Gym, and our observed function value \\( y(\\pmb{x}) \\) given the network parameters \\( \\pmb{x} \\) is an averaged return of 32 parallel environments. We also fix the seed of OpenAI Gym for all queries, which ensures that we are evaluating on a fixed set of 32 state initializations and that our results can be reproduced. We first initialize a sample of 500 points from a Latin Hypercube (McKay et al., 1979) to find a good initial input, and then proceed to apply ZO optimization algorithms (i.e., ES, RGF, PRGF, and our ZoRD) with the same query budget of 1000 on this initial input. For all these ZO optimization algorithms, we employ the same Adam optimizer with a learning rate of 1.0 and exponential decay rates of 0.9, 0.999. Considering the prohibitive noise in RL experiments, we use 300 queries from the optimization trajectory that has the smallest Euclidean distance with an input needing to be updated. Of note, we conduct 10 trials in total where each trial differs from each other by both the OpenAI Gym seed and the Latin Hypercube initializations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.836, + 0.439, + 0.852 + ], + "angle": 0, + "content": "APPENDIX D MORE RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.538, + 0.884 + ], + "angle": 0, + "content": "D.1 MORE RESULTS ON DERIVATIVE ESTIMATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Besides the comparison in Fig. 2, we provide additional comparison between our derived GP-based estimator (6) and the FD estimator (2) under various input dimensions in Fig. 6(a) and various kernels" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.105, + 0.465, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.278, + 0.267, + 0.411, + 0.282 + ], + "angle": 0, + "content": "(a) Ackley \\((d = 40)\\)" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.106, + 0.775, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.267, + 0.716, + 0.282 + ], + "angle": 0, + "content": "(b) Levy \\((d = 40)\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.293, + 0.828, + 0.339 + ], + "angle": 0, + "content": "Figure 7: Comparison of our ZoRD algorithm using different confidence thresholds \\( c \\) for its dynamic virtual updates, where the \\( x \\)-axis and the \\( y \\)-axis denote the number of function queries and the log-scaled optimality gap (i.e., \\( \\log (f(\\boldsymbol{x}_T) - f(\\boldsymbol{x}^*)) \\)) achieved with this number of queries, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.372, + 0.828, + 0.61 + ], + "angle": 0, + "content": "in Fig. 6(b) using the Ackley function. We adopt the same setting in Sec. 5.2. Interestingly, Fig. 6(a)(b) show that under various input dimensions and GP kernels, our derived GP-based estimator (6) is still able to achieve faster reduction rates compared with the FD estimator. Of note, all the function queries applied in our derived GP-based estimator is from the optimization trajectory whereas the FD estimator requires additional function queries for its derivative estimation. So, Fig. 6(a)(b) also show that our derived GP method is still able to achieve improved query efficiency for accurate derivative estimation than FD method under various input dimensions and GP kernels because our method avoids the requirement of additional queries for derivative estimation. Interestingly, the objective function (i.e., the Ackley function) is not truly sampled from the GPs based on these kernels. This therefore means that though we have assumed that we need the prior knowledge about the GP in which the objective function is sampled from (Sec. 2.1), such an assumption does not really need to be satisfied for our derived GP-based method to achieve accurate derivative estimation in practice. More interestingly, we notice that Matérn( \\(\\nu = 0.5\\) ) and SE kernel will achieve slightly worse derivative estimation, indicating that the choice of GP kernels may impact the quality of our derived GP-based derivative estimation. However, in practice, our derived GP method based on Matérn( \\(\\nu = 2.5\\) ) kernel, which has been widely adopted in our experiments, is already able to provide us with good derivative estimation for ZO optimization as confirmed by the results in our other experiments." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.637, + 0.546, + 0.65 + ], + "angle": 0, + "content": "D.2 MORE RESULTS ON SYNTHETIC EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.827, + 0.737 + ], + "angle": 0, + "content": "In this section, we compare ZoRD with more baselines in Fig. 8. Notably, we mainly compare our ZoRD with CobBO (based on the code implementation provided by (Tan et al., 2021)) since CobBO generally performs better than other baselines, e.g., TPE, ATPE, and BADS according to (Tan et al., 2021). As shown in the results in Fig. 8, our ZoRD algorithm is still able to outperform the other benchmark BO algorithm (i.e., CobBO)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.744, + 0.827, + 0.925 + ], + "angle": 0, + "content": "We then investigate the impacts of the dynamic virtual updates (Sec. 3.2) on our ZoRD algorithm. In particular, we apply the same setting in Appx. C.2 to optimize the Ackley and Levy function with \\( d = 40 \\) under various confidence thresholds \\( c \\) for our dynamic virtual updates. Fig. 7 illustrates the results. As shown in both Fig. 7(a) and (b), our ZoRD algorithm using the technique of dynamic virtual updates (i.e., \\( c > 0 \\)) can consistently achieve improved query efficiency compared with the one not using the technique of dynamic virtual updates (i.e., \\( c = 0 \\)). This indicates the essence of dynamic virtual updates in helping improve the query efficiency of our ZoRD algorithm. Such a result actually corroborates our theoretical insights about virtual updates (Sec. 4.2). Remarkably, our ZoRD algorithm without the technique of dynamic virtual updates (i.e., \\( c = 0 \\)) is still able to achieve both improved query efficiency and better converged performance compared with RGF and PRGF, which further verifies the superiority of our derived GP-based derivative estimation. More interestingly, both Fig. 7(a) and Fig. 7(b) have verified that there indeed exists a trade-off for the confidence threshold \\( c \\) as we have discussed in Sec. 3.2: The confidence threshold \\( c \\) can not be overly" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.098, + 0.328, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.19, + 0.208, + 0.324, + 0.223 + ], + "angle": 0, + "content": "(a) Ackley \\((d = 20)\\)" + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.099, + 0.49, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.208, + 0.487, + 0.223 + ], + "angle": 0, + "content": "(b) Ackley \\((d = 40)\\)" + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.098, + 0.657, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.208, + 0.645, + 0.223 + ], + "angle": 0, + "content": "(c) Levy \\((d = 40)\\)" + }, + { + "type": "image", + "bbox": [ + 0.663, + 0.098, + 0.824, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.688, + 0.208, + 0.816, + 0.223 + ], + "angle": 0, + "content": "(d) Levy \\((d = 100)\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.234, + 0.825, + 0.278 + ], + "angle": 0, + "content": "Figure 8: Additional comparison between our ZoRD and other baselines. The \\(x\\)-axis and \\(y\\)-axis denote the number of queries and log-scaled optimality gap (i.e., \\(\\log(f(x_T) - f(x^*))\\)) achieved after this number of queries. Each curve is the mean \\(\\pm\\) standard error from ten independent runs." + }, + { + "type": "image", + "bbox": [ + 0.215, + 0.296, + 0.465, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.259, + 0.461, + 0.441, + 0.475 + ], + "angle": 0, + "content": "(a) Success rate on MNIST" + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.297, + 0.765, + 0.454 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.554, + 0.461, + 0.754, + 0.476 + ], + "angle": 0, + "content": "(b) Success rate on CIFAR-10" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.487, + 0.825, + 0.545 + ], + "angle": 0, + "content": "Figure 9: Comparison of the success rate achieved by various ZO optimization algorithms on the 15 images selected from MNIST and CIFAFR-10 dataset. Note that the \\( x \\)-axis and the \\( y \\)-axis denote the number of queries and the success rate (within the range of [0, 1]) achieved after this number of queries, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.572, + 0.825, + 0.601 + ], + "angle": 0, + "content": "small or excessively large in order to achieve the best query efficiency of our ZoRD algorithm, e.g., \\( c = 0.3 \\) for Ackley (\\( d = 40 \\)) and \\( c = 0.4 \\) for Levy (\\( d = 40 \\))." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.619, + 0.614, + 0.633 + ], + "angle": 0, + "content": "D.3 MORE RESULTS ON BLACK-BOX ADVERSARIAL ATTACK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.827, + 0.759 + ], + "angle": 0, + "content": "Besides the comparison in our Sec. 5.3, we also compare the success rate achieved by different ZO optimization algorithms on the 15 images selected from MNIST or CIFAR-10 in Fig. 9. Note that we adopt the same settings in Appx. C.3 for this comparison. Considering the large computational complexity of TuRBO-1/10 algorithm for hard-to-attack images3 which is usually undesirable in practice, we drop the comparison with them in this experiment. Fig. 9 shows that under the same query budget, our ZoRD algorithm is able to achieve considerably improved success rate over other ZO optimization algorithms. These results therefore further support the superior query efficiency of our ZoRD algorithm in real-world challenging problems." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.776, + 0.7, + 0.79 + ], + "angle": 0, + "content": "D.4 MORE RESULTS FOR DERIVATIVE-FREE REINFORCEMENT LEARNING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.803, + 0.825, + 0.873 + ], + "angle": 0, + "content": "Recent years have also witnessed a surging interest in derivative-free reinforcement learning (Salimans et al., 2017; Qian and Yu, 2021), where ZO optimization algorithms are widely applied. In light of this, we also demonstrate the superiority of our ZoRD algorithm in the problem of derivative-free reinforcement learning. Specifically, we adopt the setting in Sec. C.5 to experiment in different RL environments. Tab. 3 summarizes the comparison among different ZO optimization algorithms under" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.924 + ], + "angle": 0, + "content": "\\(^3\\)Bayesian optimization algorithms, including TuRBO-1/10, are widely known to suffer from the prohibitive computational complexity when they need a large number of function queries for optimization, e.g., \\(T > 1000\\) (Rasmussen and Williams, 2006)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.825, + 0.144 + ], + "angle": 0, + "content": "Table 3: Comparison of the rewards (larger is better) achieved by various ZO optimization algorithms in different RL environments. Each result is reported with the mean ± standard deviation from ten independent runs." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.155, + 0.825, + 0.235 + ], + "angle": 0, + "content": "
AlgorithmAcrobotSwimmerLunarBipedalWalkerWalker2DHalfCheetah
ES-86.2±11.0176.0±56.8-94.7±24.4-34.7±27.3340.4±143.01042.4±753.9
RGF-83.0±5.6213.2±65.1-93.8±19.1-30.3±40.3368.4±223.11129.3±748.5
PRGF-86.3±9.9218.6±66.2-100.1±16.0-29.9±35.2344.6±152.31083.3±722.2
ZoRD-73.3±2.4280.5±77.6-45.1±38.312.9±37.8729.1±304.21950.5±576.1
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.261, + 0.828, + 0.364 + ], + "angle": 0, + "content": "the same query budget of 1000. As BO algorithms usually suffer from the prohibitive computational complexity for a large \\( T \\) (Rasmussen and Williams, 2006) and GLD has never been applied in RL, we mainly compare our ZoRD algorithm with ES, RGF and PRGF, which also belongs to the same type of ZO optimization algorithm: GD with estimated derivative. Remarkably, Tab. 3 shows that under the same query budget, our ZoRD algorithm can consistently enjoy improved performance (i.e., highest rewards) than the other ZO optimization algorithms in different RL environments. This further supports the superiority of our ZoRD algorithm to other FD-based ZO optimization algorithms." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.381, + 0.418, + 0.397 + ], + "angle": 0, + "content": "APPENDIX E DISCUSSIONS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.414, + 0.509, + 0.429 + ], + "angle": 0, + "content": "E.1 ZORD VS. FD-BASED ZO OPTIMIZATION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.44, + 0.828, + 0.664 + ], + "angle": 0, + "content": "Of note, the novelty of our work in fact lies in its way of exploiting the GP assumption to help design an improved derivative estimation and hence an improved ZO optimization algorithm, which to the best of our knowledge has not been explored theoretically yet in the field of ZO optimization via GD with estimated derivative. That is, at this moment, it is still not known in the literature how existing FD methods can utilize such an assumption to achieve better derivative estimation (i.e., their derivative estimation quality will remain the same), even when they make the same assumption as us. In light of this, the comparison between our derived GP method and the FD method in Sec. 4 is not only necessary but also meaningful to show the advantage of exploiting such an assumption in ZO derivative estimation. Importantly, our empirical results further show that such an assumption is in fact not restrictive for our ZoRD to achieve compelling performance in practice. For example, our Fig. 2 and Fig. 6 have shown that our derived GP-based method is able to achieve smaller derivative estimation error than the FD method when the objective functions are not designed to be sampled from a GP with the kernel that we had applied for our derivative estimation. Moreover, the results in our Sec. 5.2, 5.3, 5.4 have shown that our ZoRD is capable of achieving competitive optimization performance for real-world optimization problems where the objective functions are also not designed to be sampled from a GP with the kernel that we had used for our ZoRD." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.67, + 0.827, + 0.783 + ], + "angle": 0, + "content": "Meanwhile, the theoretical challenges of our work lie in the theoretical guarantee on the derivative estimation error of our unique derived GP-based method for any input in the domain as well as the convergence analysis based on such a unique derivative estimation, which to the best of our knowledge have not been studied in the literature. This means that our Thm. 1 and Thm. 2 have provided new developments in the analysis of gradient estimation error and our Thm. 3 will be the first convergence result for GD using our unique derivative estimation method. Interestingly, the bound in our Thm. 3 also improves over the standard ones from (Nesterov and Spokoiny, 2017; Liu et al., 2018b) in several aspects, as discussed in our Sec. 4.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.8, + 0.321, + 0.814 + ], + "angle": 0, + "content": "E.2 ZORD vs. BO" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Our ZoRD algorithm and standard BO algorithms (e.g., GP-UCB) have in fact applied the same GP assumption for their algorithm design. That is, however, where the similarity ends. Of note, our ZoRD exploits such an assumption to derive a specific GP (i.e., (4)) for derivative estimation, which is then employed for local exploitation via (projected) GD update. In contrast, BO algorithms utilize such an assumption to construct their acquisition functions for a global optimization that can trade off between exploitation and exploration. In practice, the exploration of BO algorithms is usually query-inefficient, especially for problems with high-dimensional input spaces, and therefore GD with" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.105, + 0.825, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.348, + 0.828, + 0.392 + ], + "angle": 0, + "content": "Figure 10: Comparison of local derivative estimation (in the input domain of [0, 3]) in our ZoRD and global function approximation (in the input domain of \\([-6, 6]\\)) in BO under various number of random function queries." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.433, + 0.828, + 0.587 + ], + "angle": 0, + "content": "estimated derivatives (especially our ZoRD) is preferred to realize better optimization performances in these problems (see our Sec. 5.2). So, our ZoRD and BO algorithms belong to two different types of ZO optimization algorithms (i.e., GD-type vs. BO-type), where their theoretical analyses are in fact not comparable. In particular, GD-type and BO-type ZO optimization algorithms apply different metrics for their theoretical analyses, e.g., the derivative estimation error as well as the convergence to a stationary point (in the nonconvex case) for GD-type ZO optimization algorithms vs. the global asymptotic convergence in terms of the regret for BO-type ZO optimization algorithms. So, it is more reasonable to compare the theory (including the theoretical challenge, the new developments, and the novelty of the convergence result) of our ZoRD with other GD-type ZO optimization algorithms, e.g., the ones using FD methods for their derivative estimation (Nesterov and Spokoiny, 2017; Liu et al., 2018b), as what we have discussed in Sec. E.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.593, + 0.827, + 0.693 + ], + "angle": 0, + "content": "In addition, in contrast to using the GP to model the objective function within the entire domain for global exploration in BO, our derived GP in ZoRD will be applied to estimate the derivative of the objective function for local exploitation by GD as shown in Sec. 3.1. As GD typically optimizes in a local region, our derived GP only needs to estimate the derivative locally, which is known to be much simpler than modeling the objective function within the entire domain in BO especially for objective functions in high-dimensional input spaces. In light of this, the derived GP for derivative estimation (4) in our ZoRD algorithm advances the standard GP in BO in the following aspects:" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.709, + 0.827, + 0.851 + ], + "angle": 0, + "content": "1. Improved Query Efficiency for Estimation. The derived GP in our ZoRD algorithm requires fewer function queries to provide accurate derivative estimation. We provide a visual example in Fig. 10, in which we sample a one-dimensional function \\( f \\) from a GP prior \\( \\mathcal{GP}(0,k(x,x)) \\) using the standard SE kernel and then randomly select the same number of queries from the input domain of \\([-6,6]\\) and \\([0,3]\\) for standard GP and our derived GP, respectively. As illustrated in Fig. 10, function in a local region (i.e., \\( x \\in [0,3] \\)) is usually smoother than its counterpart in the entire domain (i.e., \\( x \\in [-6,6] \\)). As a result, with only 4 function queries, our derived GP can already provide accurate estimation to the derivative of this objective function whereas standard GP requires more than 8 function queries to model this objective function accurately in the entire domain." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.868, + 0.829, + 0.927 + ], + "angle": 0, + "content": "2. Reduced Computational Complexity. Comparing (3) and (5), both the derived GP for derivative estimation in our ZoRD algorithm and the standard GP in BO enjoy a computational complexity of \\(\\mathcal{O}(n^3)\\) with \\(n\\) function queries. However, as a consequence of the improved query efficiency of our derived GP, it is able to require fewer function queries (i.e.," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.104, + 0.827, + 0.148 + ], + "angle": 0, + "content": "smaller \\( n \\)) for accurate derivative estimation and hence can enjoy a reduced computational complexity in practice especially when a large number of queries (e.g., \\( n > 1000 \\)) are applied to the standard GP in BO." + }, + { + "type": "page_footnote", + "bbox": [ + 0.172, + 0.897, + 0.825, + 0.925 + ], + "angle": 0, + "content": "As introduced in our Appx. C, 150 function queries for our derived GP can already help our ZoRD algorithm to achieve remarkable results in practice (refer to the experiments in our Sec. 5)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ] +] \ No newline at end of file diff --git a/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_origin.pdf b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9a7ebe022cb1c8180cee0efb43dbb0e5af13b50b --- /dev/null +++ b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/c092ea5b-92fc-44ba-b455-d7307b3016a2_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:731578d2f03b758f8c418d20ad64727ff47bcd8f04515e464144df1ea2cd4e18 +size 5426973 diff --git a/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/full.md b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..440e9174fe6b2a626acb53585cbdf5b04ac32c1d --- /dev/null +++ b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/full.md @@ -0,0 +1,848 @@ +# ZEROTH-ORDER OPTIMIZATION WITH TRAJECTORYINFORMED DERIVATIVE ESTIMATION + +Yao Shu*, Zhongxiang Dai*, Weicong Sng, Arun Verma, + +Dept. of Computer Science, National University of Singapore, Republic of Singapore {shuyao, daizhongxiang, sngweicong, arun}@comp.nus.edu.sg + +Patrick Jaillet† & Bryan Kian Hsiang Low§ + +Dept. of Electrical Engineering and Computer Science, MIT, USA† + +Dept. of Computer Science, National University of Singapore, Republic of Singapore + +jaillet@mit.edu, lowkh@comp.nus.edu.sg + +# ABSTRACT + +Zeroth-order (ZO) optimization, in which the derivative is unavailable, has recently succeeded in many important machine learning applications. Existing algorithms rely on finite difference (FD) methods for derivative estimation and gradient descent (GD)-based approaches for optimization. However, these algorithms suffer from query inefficiency because many additional function queries are required for derivative estimation in their every GD update, which typically hinders their deployment in real-world applications where every function query is expensive. To this end, we propose a trajectory-informed derivative estimation method which only employs the optimization trajectory (i.e., the history of function queries during optimization) and hence can eliminate the need for additional function queries to estimate a derivative. Moreover, based on our derivative estimation, we propose the technique of dynamic virtual updates, which allows us to reliably perform multiple steps of GD updates without reapplying derivative estimation. Based on these two contributions, we introduce the zeroth-order optimization with trajectory-informed derivative estimation (ZoRD) algorithm for query-efficient ZO optimization. We theoretically demonstrate that our trajectory-informed derivative estimation and our ZoRD algorithm improve over existing approaches, which is then supported by our real-world experiments such as black-box adversarial attack, non-differentiable metric optimization, and derivative-free reinforcement learning. + +# 1 INTRODUCTION + +Zeroth-order (ZO) optimization, in which the objective function to be optimized is only accessible by querying, has received great attention in recent years due to its success in many applications, e.g., black-box adversarial attack (Ru et al., 2020), non-differentiable metric optimization (Hiranandani et al., 2021), and derivative-free reinforcement learning (Salimans et al., 2017). In these problems, the derivative of objective function is either prohibitively costly to obtain or even non-existent, making it infeasible to directly apply standard derivative-based algorithms such as gradient descent (GD). In this regard, existing works have proposed to estimate the derivative using the finite difference (FD) methods and then apply GD-based algorithms using the estimated derivative for ZO optimization (Nesterov and Spokoiny, 2017; Cheng et al., 2021). These algorithms, which we refer to as $GD$ with estimated derivatives, have been the most widely applied approach to ZO optimization especially for problems with high-dimensional input spaces, because of their theoretically guaranteed convergence and competitive practical performance. Unfortunately, these algorithms suffer from query inefficiency, which hinders their real-world deployment especially in applications with expensive-to-query objective functions, e.g., black-box adversarial attack. + +Specifically, one of the reasons for the query inefficiency of existing algorithms on GD with estimated derivatives is that in addition to the necessary queries (i.e., the query of every updated input)1, the FD methods applied in these algorithms require a large number of additional queries to accurately estimate the derivative at an input (Berahas et al., 2022). This naturally begs the question: Can we estimate a derivative without any additional query? A natural approach to achieve this is to leverage the optimization trajectory, which is inherently available as a result of the necessary queries and their observations, to predict the derivatives. However, this requires a non-trivial method to simultaneously $(a)$ predict a derivative using only the optimization trajectory (i.e., the history of updated inputs and their observations), and $(b)$ quantify the uncertainty of this prediction to avoid using inaccurate predicted derivatives. Interestingly, the Gaussian process (GP) model satisfies both requirements and is hence a natural choice for such a derivative estimation. Specifically, under the commonly used assumption that the objective function is sampled from a GP (Srinivas et al., 2010), the derivative at any input in the domain follows a Gaussian distribution which, surprisingly, can be calculated using only the optimization trajectory. This allows us to $(a)$ employ the mean of this Gaussian distribution as the estimated derivative, and $(b)$ use the covariance matrix of this Gaussian distribution to obtain a principled measure of the predictive uncertainty and the accuracy of this derivative estimation, which together constitute our trajectory-informed derivative estimation (Sec. 3.1). + +Another reason for the query inefficiency of the existing algorithms on GD with estimated derivatives is that every update in these algorithms requires reapplying derivative estimation and hence necessitates additional queries. This can preclude their adoption of a large number of GD updates since every update requires potentially expensive additional queries. Therefore, another question arises: Can we perform multiple GD updates without reapplying derivative estimation and hence without any additional query? To address this question, we propose a technique named dynamic virtual updates (Sec. 3.2). Specifically, thanks to the ability of our method to estimate the derivative at any input in the domain while only using existing optimization trajectory, we can apply multi-step GD updates without the need to reapply derivative estimation and hence without requiring any new query. Moreover, we can dynamically determine the number of steps for these updates by inspecting the aforementioned predictive uncertainty at every step, such that we only perform an update if the uncertainty is small enough (which also indicates that the estimation error is small, see Sec. 4.1). + +By incorporating our aforementioned trajectory-informed derivative estimation and dynamic virtual updates into GD-based algorithms, we then introduce the zeroth-order optimization with trajectory-informed derivative estimation (ZoRD) algorithm for query-efficient ZO optimization. We theoretically bound the estimation error of our trajectory-informed derivative estimation and show that this estimation error is non-increasing in the entire domain as the number of queries is increased and can even be exponentially decreasing in some scenarios (Sec. 4.1). Based on this, we prove the convergence of our ZoRD algorithm, which improves over the existing ZO optimization algorithms that rely on the FD methods for derivative estimation (Sec. 4.2). Lastly, we use extensive experiments, such as black-box adversarial attack, non-differentiable metric optimization, and derivative-free reinforcement learning, to demonstrate that $(a)$ our trajectory-informed derivative estimation improves over the existing FD methods and that $(b)$ our ZoRD algorithm consistently achieves improved query efficiency compared with previous ZO optimization algorithms (Sec. 5). + +# 2 PRELIMINARIES + +# 2.1 PROBLEM SETUP + +Throughout this paper, we use $\nabla$ and $\partial_{\pmb{x}}$ to denote, respectively, the total derivative (i.e., gradient) and partial derivative w.r.t the variable $\pmb{x}$ . We consider the minimization of a black-box objective function $f:\mathcal{X}\to \mathbb{R}$ , in which $\mathcal{X}\subset \mathbb{R}^d$ is a convex subset of the $d$ -dimensional domain: + +$$ +\min _ {\boldsymbol {x} \in \mathcal {X}} f (\boldsymbol {x}). \tag {1} +$$ + +Since we consider ZO optimization, the derivative information is not accessible and instead, we are only allowed to query the inputs in $\mathcal{X}$ . For every queried input $\pmb{x} \in \mathcal{X}$ , we observe a corresponding noisy output of $y(\pmb{x}) = f(\pmb{x}) + \zeta$ , in which $\zeta$ is a zero-mean Gaussian noise with a variance of $\sigma^2$ : + +Algorithm 1: Standard (Projected) GD with Estimated Derivatives +1: Input: Objective function $f: \mathcal{X} \to \mathbb{R}$ , initialization $\boldsymbol{x}_0$ , iteration number $T$ , learning rates $\{\eta_t\}_{t=1}^T$ , projection function $\mathcal{P}_{\mathcal{X}}(\boldsymbol{x})$ + +2: for iteration $t = 1,\dots ,T$ do + +3: $g(\pmb{x}_{t - 1})\approx \nabla f(\pmb{x}_{t - 1})$ with (2) + +4: $\pmb{x}_t\gets \mathcal{P}_{\mathcal{X}}\left(\pmb{x}_{t - 1} - \eta_{t - 1}g(\pmb{x}_{t - 1})\right)$ + +5: Query $\pmb{x}_t$ to yield $y(\pmb{x}_t)$ + +6: end for + +7: Return arg $\min_{\pmb{x}_{1:T}} y(\pmb{x})$ + +Algorithm 2: ZORD (Ours) +1: Input: In addition to the parameters in Algo. 1, set the steps of virtual updates $\{V_t\}_{t=1}^T$ + +2: for iteration $t = 1,\dots ,T$ do + +3: $\pmb{x}_{t,0} \gets \pmb{x}_{t-1}$ + +4: for iteration $\tau = 1,\dots ,V_{t}$ do + +5: $\pmb{x}_{t,\tau} \gets \mathcal{P}_{\mathcal{X}}(\pmb{x}_{t,\tau -1} - \eta_{t,\tau -1}\nabla \mu_{t - 1}(\pmb{x}_{t,\tau -1}))$ + +6: end for + +7: Query $\pmb{x}_t = \pmb{x}_{t,\tau}$ to yield $y(\pmb{x}_t)$ + +8: Update (4) using optimization trajectory + +9: end for + +10: Return arg min $\mathbf{\mu}_{\mathbf{x}_{1:T}}y(\mathbf{x})$ + +$\zeta \sim \mathcal{N}(0,\sigma^2)$ . Besides, we adopt a common assumption on $f$ which has already been widely used in the literature of Bayesian optimization (BO) (Srinivas et al., 2010; Kandasamy et al., 2018): we assume that $f$ is sampled from a Gaussian process (GP). A GP $\mathcal{GP}(\mu (\cdot),k(\cdot ,\cdot))$ , which is characterized by a mean function $\mu (\cdot)$ and a covariance function $k(\cdot ,\cdot)$ , is a stochastic process in which any finite subset of random variables follows a multi-variate Gaussian distribution (Rasmussen and Williams, 2006). In addition, following the common practice of GP and BO, we assume w.l.o.g. that $\mu (\pmb {x}) = 0$ and $k(\pmb {x},\pmb{x}^{\prime})\leq 1$ $(\forall \pmb {x},\pmb{x}^{\prime}\in \mathcal{X})$ . We also assume that the kernel function $k$ is differentiable, and that $\| \partial_z\partial_{z'}k(z,z')|_{z = z' = x}\| _2\leq \kappa^2$ , $\forall \pmb {x}\in \mathcal{X}$ for some $\kappa >0$ . This is satisfied by most commonly used kernels such as the squared exponential (SE) kernel (Rasmussen and Williams, 2006). + +# 2.2 ZO OPTIMIZATION WITH ESTIMATED DERIVATIVES + +To solve (1), GD with estimated derivatives (e.g., Algo. 1) has been developed (Flaxman et al., 2005; Ghadimi and Lan, 2013; Nesterov and Spokoiny, 2017; Liu et al., 2018a;b). Particularly, these algorithms first estimate the derivative of $f$ (line 3 of Algo. 1) and then plug the estimated derivative into GD-based methods to obtain the next input for querying (lines 4-5 of Algo. 1). In these algorithms, the derivative is typically estimated by averaging the finite difference approximation of the directional derivatives for $f$ along certain directions, which we refer to as the finite difference (FD) method in this paper. For example, given a parameter $\lambda$ and directions $\{\pmb{u}_i\}_{i=1}^n$ , the derivative $\nabla f$ at any $\pmb{x} \in \mathcal{X}$ can be estimated by the following FD method (Berahas et al., 2022): + +$$ +\nabla f (\boldsymbol {x}) \approx g (\boldsymbol {x}) \triangleq \sum_ {i = 1} ^ {n} \frac {y \left(\boldsymbol {x} + \lambda \boldsymbol {u} _ {i}\right) - y (\boldsymbol {x})}{\lambda} \boldsymbol {u} _ {i}. \tag {2} +$$ + +The directions $\{\pmb{u}_i\}_{i=1}^n$ are usually sampled from the standard Gaussian distribution (Nesterov and Spokoiny, 2017) or uniformly from the unit sphere (Flaxman et al., 2005), or set as the standard basis vectors with 1 at one of its coordinates and 0 otherwise (Lian et al., 2016). As mentioned before, existing FD methods typically require many additional queries (i.e., $\{\pmb{x} + \lambda \pmb{u}_i\}_{i=1}^n$ ) to achieve an accurate derivative estimation in every iteration of Algo. 1 (Berahas et al., 2022), making existing ZO optimization algorithms (Flaxman et al., 2005; Nesterov and Spokoiny, 2017) query-inefficient. + +# 3 ZO OPTIMIZATION VIA TRAJECTORY-INFORMED DERIVATIVE ESTIMATION + +To improve existing GD with estimated derivatives (e.g., Algo. 1), we propose the ZoRD algorithm (Algo. 2), which achieves more query-efficient ZO optimization thanks to our two major contributions. Firstly, we propose a derived GP-based derivative estimation method which only uses the optimization trajectory and consequently does not require any additional query for derivative estimation (Sec. 3.1). Secondly, thanks to the ability of our method to estimate the derivative at any input in the domain without any additional query and to measure the estimation error in a principled way, we develop the technique of dynamic virtual updates to further improve the query efficiency of our ZoRD (Sec. 3.2). + +# 3.1 TRAJECTORY-INFORMED DERIVATIVE ESTIMATION + +To begin with, if a function $f$ follows a GP, then its derivative $\nabla f$ also follows a GP (Rasmussen and Williams, 2006). This is formalized by our Lemma 1 below (proof in Appx. B.1), which then provides us a principled way to estimate the derivative at any input in the domain. + +Lemma 1 (Derived GP for Derivatives). If a function $f$ follows a $GP$ : $f \sim \mathcal{GP}\left(\mu (\cdot),\sigma^2 (\cdot ,\cdot)\right)$ , then + +$$ +\nabla f \sim \mathcal {G P} (\nabla \mu (\cdot), \partial \sigma^ {2} (\cdot , \cdot)) +$$ + +where $\partial \sigma^2 (\cdot ,\cdot)$ denotes the cross partial derivative w.r.t the first and second arguments of $\sigma^2 (\cdot ,\cdot)$ . + +$f$ Follows the Posterior GP. As discussed in Sec. 2.1, we assume that $f \sim \mathcal{GP}(\mu(\cdot), k(\cdot, \cdot))$ . So, in every iteration $t$ of our Algo. 2, conditioned on the current optimization trajectory $\mathcal{D}_{t-1} \triangleq \{(x_{\tau}, y_{\tau})\}_{\tau=1}^{t-1}$ , $f$ follows the posterior GP: $f \sim \mathcal{GP}(\mu_{t-1}(\cdot), \sigma_{t-1}^2(\cdot, \cdot))$ with the mean function $\mu_{t-1}(\cdot)$ and the covariance function $\sigma_{t-1}^2(\cdot, \cdot)$ defined as below (Rasmussen and Williams, 2006): + +$$ +\mu_ {t - 1} (\boldsymbol {x}) \triangleq \boldsymbol {k} _ {t - 1} (\boldsymbol {x}) ^ {\top} \left(\mathbf {K} _ {t - 1} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \boldsymbol {y} _ {t - 1} +$$ + +$$ +\sigma_ {t - 1} ^ {2} \left(\boldsymbol {x}, \boldsymbol {x} ^ {\prime}\right) \triangleq k \left(\boldsymbol {x}, \boldsymbol {x} ^ {\prime}\right) - \boldsymbol {k} _ {t - 1} (\boldsymbol {x}) ^ {\top} \left(\mathbf {K} _ {t - 1} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \boldsymbol {k} _ {t - 1} \left(\boldsymbol {x} ^ {\prime}\right) \tag {3} +$$ + +where $\pmb{y}_{t-1}^{\top} \triangleq [y_{\tau}]_{\tau=1}^{t-1}$ and $\pmb{k}_{t-1}(\pmb{x})^{\top} \triangleq [k(\pmb{x}, \pmb{x}_{\tau})]_{\tau=1}^{t-1}$ are $(t-1)$ -dimensional row vectors, and $\mathbf{K}_{t-1} \triangleq [k(\pmb{x}_{\tau}, \pmb{x}_{\tau'})]_{\tau, \tau'=1}^{t-1}$ is a $(t-1) \times (t-1)$ -dimensional matrix. Define $\sigma_{t-1}^{2}(\pmb{x}) \triangleq \sigma_{t-1}^{2}(\pmb{x}, \pmb{x})$ , the posterior distribution at $\pmb{x}$ is Gaussian with mean $\mu_{t-1}(\pmb{x})$ and variance $\sigma_{t-1}^{2}(\pmb{x})$ . + +$\nabla f$ Follows the Derived GP for Derivatives. Substituting (3) into Lemma 1, we have that + +$$ +\nabla f \sim \mathcal {G P} \left(\nabla \mu_ {t - 1} (\cdot), \partial \sigma_ {t - 1} ^ {2} (\cdot , \cdot)\right), \tag {4} +$$ + +in which the mean $\nabla \mu_{t - 1}(\pmb {x})$ at $\pmb{x}$ and the covariance $\partial \sigma_{t - 1}^2 (\pmb {x},\pmb{x}^\prime)$ at $\pmb {x},\pmb{x}^{\prime}$ are + +$$ +\nabla \mu_ {t - 1} (\boldsymbol {x}) \triangleq \partial_ {\boldsymbol {z}} \boldsymbol {k} _ {t - 1} (\boldsymbol {z}) ^ {\top} \left(\mathbf {K} _ {t - 1} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \boldsymbol {y} _ {t - 1} | _ {\boldsymbol {z} = \boldsymbol {x}}, +$$ + +$$ +\left. \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}, \boldsymbol {x} ^ {\prime}) \triangleq \partial_ {\boldsymbol {z}} \partial_ {\boldsymbol {z} ^ {\prime}} k (\boldsymbol {z}, \boldsymbol {z} ^ {\prime}) - \partial_ {\boldsymbol {z}} \boldsymbol {k} _ {t - 1} (\boldsymbol {z}) ^ {\top} \left(\mathbf {K} _ {t - 1} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \partial_ {\boldsymbol {z} ^ {\prime}} \boldsymbol {k} _ {t - 1} (\boldsymbol {z} ^ {\prime}) \right| _ {\boldsymbol {z} = \boldsymbol {x}, \boldsymbol {z} ^ {\prime} = \boldsymbol {x} ^ {\prime}}, \tag {5} +$$ + +in which $\partial_{\pmb{z}}\pmb{k}_{t - 1}(\pmb {z})\triangleq [\partial_{\pmb{z}}k(\pmb {z},\pmb{x}_{\tau})]_{\tau = 1}^{t - 1}$ is a $(t - 1)\times d$ -dimensional matrix and $\partial_{\pmb{z}}\partial_{\pmb{z}^{\prime}}k(\pmb {z},\pmb{z}^{\prime})$ is a $d\times d$ -dimensional matrix. Therefore, $\nabla \mu_{t - 1}(\pmb {x})$ is a $d$ -dimensional vector and $\partial \sigma_{t - 1}^2 (\pmb {x},\pmb{x}^\prime)$ is a $d\times d$ -dimensional matrix. We refer to this GP (4) followed by $\nabla f$ as the derived GP for derivatives. + +So, define $\partial \sigma_{t - 1}^2 (\pmb {x})\triangleq \partial \sigma_{t - 1}^2 (\pmb {x},\pmb {x})$ , we have that for any input $\pmb {x}\in \mathcal{X}$ , the derivative $\nabla f(\pmb {x})$ at $\pmb{x}$ follows a $d$ -dimensional Gaussian distribution: $\nabla f(\pmb {x})\sim \mathcal{N}(\nabla \mu_{t - 1}(\pmb {x}),\partial \sigma_{t - 1}^2 (\pmb {x}))$ . This allows us to (a) estimate the derivative $\nabla f(\pmb {x})$ at any input $\pmb {x}\in \mathcal{X}$ using the posterior mean $\nabla \mu_{t - 1}(\pmb {x})$ of the derived GP for derivatives (4): + +$$ +\nabla f (\boldsymbol {x}) \approx \nabla \mu_ {t - 1} (\boldsymbol {x}), \tag {6} +$$ + +and $(b)$ employ the posterior covariance matrix $\partial \sigma_{t - 1}^2 (\pmb {x})$ to obtain a principled measure of the uncertainty for this derivative estimation, which together constitute our novel derivative estimation. Remarkably, our derivative estimation only makes use of the naturally available optimization trajectory $\mathcal{D}_{t - 1}$ and does not need any additional query, which is in stark contrast to the existing FD methods (e.g., (2)) that require many additional queries for their derivative estimation. Moreover, our principled measure of uncertainty allows us to perform dynamic virtual updates (Sec. 3.2) and theoretically guarantee the quality of our derivative estimation (Sec. 4.1). + +# 3.2 DYNAMIC VIRTUAL UPDATES + +Note that our derived GP-based derivative estimation (6) can estimate the derivative at any input $\pmb{x}$ within the domain. As a result, in every iteration $t$ of our ZoRD algorithm, for a step $\tau \geq 1$ , after performing a GD update using the estimated derivative at $\pmb{x}_{t,\tau -1}$ (i.e., $\nabla \mu_{t - 1}(\pmb{x}_{t,\tau -1})$ ) to reach the input $\pmb{x}_{t,\tau}$ (line 5 of Algo. 2), we can again estimate the derivative at $\pmb{x}_{t,\tau}$ (i.e., $\nabla \mu_{t - 1}(\pmb{x}_{t,\tau})$ ) and then perform another GD update to reach $\pmb{x}_{t,\tau +1}$ without requiring any additional query. This process can be repeated for multiple steps, and can further improve the query efficiency of our ZoRD. Formally, given the projection function $\mathcal{P}_{\chi}(\pmb {x})\triangleq \arg \min_{\pmb {z}\in \chi}\| \pmb {x} - \pmb {z}\| _2^2 /2$ and learning rates $\{\eta_{t,\tau}\}_{\tau = 0}^{V_t - 1}$ , we perform the following virtual updates for $V_{t}$ steps (lines 4-6 of Algo. 2): + +$$ +\boldsymbol {x} _ {t, \tau} = \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} _ {t, \tau - 1} - \eta_ {t, \tau - 1} \nabla \mu_ {t - 1} \left(\boldsymbol {x} _ {t, \tau - 1}\right)\right) \quad \forall \tau = 1, \dots , V _ {t} \tag {7} +$$ + +and then choose the last $\pmb{x}_{t,V_t}$ to query (i.e., line 7 of Algo. 2). Importantly, these multi-step virtual GD updates are only feasible in our ZoRD (Algo. 2) because our derivative estimator (6) does not + +require any new query in all these steps, whereas the existing FD methods require additional queries to estimate the derivative in every step. + +The number of steps for our virtual updates (i.e., $V_{t}$ ) induces an intriguing trade-off: An overly small $V_{t}$ may not be able to fully exploit the benefit of our derivative estimation (6) which is free from the requirement for additional queries, yet an excessively large $V_{t}$ may lead to the usage of inaccurate derivative estimations which can hurt the performance (validated in Appx. D.2). Remarkably, (4) allows us to dynamically choose $V_{t}$ by inspecting our principled measure of the predictive uncertainty (i.e., $\partial \sigma_{t-1}^2(\boldsymbol{x})$ ) for every derivative estimation. Specifically, after reaching the input $\boldsymbol{x}_{t,\tau}$ , we continue the virtual updates (to reach $\boldsymbol{x}_{t,\tau+1}$ ) if our predictive uncertainty is small, i.e., if $\left\| \partial \sigma_{t-1}^2(\boldsymbol{x}_{t,\tau}) \right\|_2 \leq c$ where $c$ is a confidence threshold; otherwise, we terminate the virtual updates and let $V_{t} = \tau$ since the derivative estimation at $\boldsymbol{x}_{t,\tau}$ is likely unreliable. + +# 4 THEORETICAL ANALYSIS + +# 4.1 DERIVATIVE ESTIMATION ERROR + +To begin with, we derive a theoretical guarantee on the error of our derivative estimation at any $\pmb{x}$ . + +Theorem 1 (Derivative Estimation Error). Let $\delta \in (0,1)$ and $\beta \triangleq \sqrt{d + 2(\sqrt{d} + 1)\ln(1 / \delta)}$ . For any $\pmb{x} \in \mathcal{X}$ and any $t \geq 1$ , the following holds with probability of at least $1 - \delta$ , + +$$ +\left\| \nabla f (\boldsymbol {x}) - \nabla \mu_ {t} (\boldsymbol {x}) \right\| _ {2} \leq \beta \sqrt {\left\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \right\| _ {2}}. +$$ + +Thm. 1 (proof in Appx. B.2) has presented an upper bound on the error of our derivative estimation (6) at any $\pmb{x} \in \mathcal{X}$ in terms of $\sqrt{\|\partial\sigma_t^2(\pmb{x})\|_2}$ , which is a measure of the uncertainty about our derivative estimation at $\pmb{x}$ (Sec. 3.1). This hence implies that the threshold $c$ applied to our predictive uncertainty $\left\| \partial \sigma_t^2 (\pmb {x})\right\| _2$ (Sec. 3.2) also ensures that the derivative estimation error is small during our dynamic virtual updates. Next, we show in the following theorem (proof in Appx. B.3) that our upper bound on the estimation error from Thm. 1 is non-increasing as the number of function queries is increased. + +Theorem 2 (Non-Increasing Error). For any $\pmb{x} \in \mathcal{X}$ and any $t \geq 1$ , we have that + +$$ +\left\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \right\| _ {2} \leq \left\| \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}) \right\| _ {2}. +$$ + +Let $\delta \in (0,1)$ . Define $r \triangleq \max_{\boldsymbol{x} \in \mathcal{X}, t \geq 1} \sqrt{\|\partial \sigma_t^2(\boldsymbol{x})\|_2 / \left\|\partial \sigma_{t-1}^2(\boldsymbol{x})\right\|_2}$ , given the $\beta$ in Thm. 1, we then have that $r \in [1/\sqrt{1 + 1/\sigma^2}, 1]$ , and that with a probability of at least $1 - \delta$ , + +$$ +\left\| \nabla f (\boldsymbol {x}) - \nabla \mu_ {t} (\boldsymbol {x}) \right\| _ {2} \leq \beta \sqrt {\left\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \right\| _ {2}} \leq \kappa \beta r ^ {t}. +$$ + +Thm. 2 shows that our upper bound on the derivative estimation error (i.e., $\beta \sqrt{\|\partial\sigma_t^2(\boldsymbol{x})\|_2}$ from Thm. 1) is guaranteed to be non-increasing in the entire domain as the number of function queries is increased. Moreover, in some situations (i.e., when $r < 1$ ), our upper bound on the estimation error is even exponentially decreasing. Of note, $r$ characterizes how fast the uncertainty about our derivative estimation (measured by $\sqrt{\|\partial\sigma_t^2(\boldsymbol{x})\|_2}$ ) is reduced across the domain. Since GD-based algorithms usually perform a local search in a neighborhood (especially for the problems with high-dimensional input spaces), all the inputs within the local region are expected to be close to each other (measured by the kernel function $k$ ). Moreover, as the objective function is usually smooth in the local region (i.e., its derivatives are continuous), reducing the uncertainty of the derivative at an input $\boldsymbol{x}_t$ (i.e., by querying $\boldsymbol{x}_t$ ) is also expected to decrease the uncertainty of the derivatives at the other inputs in the same local region (i.e., decrease $\sqrt{\|\partial\sigma_t^2(\boldsymbol{x})\|_2}$ ). So, $r < 1$ is expected to be a reasonable condition that can be satisfied in practice. This will also be corroborated by our empirical results (e.g., Figs. 1 and 2), which demonstrates that the error of our derivative estimation (6) is indeed reduced very fast. + +Our GP-based Method (6) vs. Existing FD Methods. Our derivative estimation method based on the derived GP (6) is superior to the traditional FD methods (e.g., (2)) in a number of major aspects. (a) Our derivative estimation error can be exponentially decreasing in some situations (i.e., when $r < 1$ in Thm. 2), which is unachievable for the existing FD methods since they can only + +attain a polynomial rate of reduction (Berahas et al., 2022). $(b)$ Our method (6) does not need any additional query to estimate the derivative (but only requires the optimization trajectory), whereas the existing FD methods require additional queries for every derivative estimation. $(c)$ Our method (6) is equipped with a principled measure of the predictive uncertainty and hence the estimation error for derivative estimation (i.e., via $\sqrt{\|\partial\sigma_t^2(\boldsymbol{x})\|_2}$ , Thm. 1), which is typically unavailable for the existing FD methods. $(d)$ Our method (6), unlike the existing FD methods, makes it possible to apply the technique of dynamic virtual updates (Sec. 3.2) thanks to its capability of estimating the derivative at any input in the domain without requiring any additional query and measuring the estimation error in a principled way (Thm. 1). + +# 4.2 CONVERGENCE ANALYSIS + +To analyze the convergence of our ZoRD, besides our main assumption that $f$ is sampled from a GP (Sec. 2.1), we assume that $f$ is $L_{c}$ -Lipchitz continuous for $L_{c} > 0$ . This is a mild assumption since it has been shown that a function $f$ sampled from a GP is Lipchitz continuous with high probability for commonly used kernels, e.g., the SE kernel and Matérn kernel with $\nu > 2$ (Srinivas et al., 2010). We also assume that $f$ is $L_{s}$ -Lipchitz smooth, which is commonly adopted in the analysis GD-based algorithms (J Reddi et al., 2016). We aim to prove the convergence of our ZoRD for nonconvex $f$ by analyzing how fast it converges to a stationary point (Ghadimi and Lan, 2013; Liu et al., 2018a). Specifically, we follow the common practice of previous works (J Reddi et al., 2016; Liu et al., 2018b) to analyze the following derivative mapping: + +$$ +G _ {t, \tau} \triangleq \left(\boldsymbol {x} _ {t, \tau} - \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} _ {t, \tau} - \eta_ {t, \tau} \nabla f \left(\boldsymbol {x} _ {t, \tau}\right)\right)\right) / \eta_ {t, \tau}. \tag {8} +$$ + +The convergence of our ZoRD is formally guaranteed by Thm. 3 below (proof in Appx. B.4). + +Theorem 3 (Convergence of ZORD). Let $\delta \in (0,1)$ . Suppose our ZORD (Algo. 2) is run with $V_{t} = V$ and $\eta_{t,\tau} = \eta \leq 1 / L_{s}$ for any $t$ and $\tau$ . Then with probability of at least $1 - \delta$ , when $r < 1$ , + +$$ +\min _ {t \leq T} \frac {1}{V} \sum_ {\tau = 0} ^ {V - 1} \| G _ {t, \tau} \| _ {2} ^ {2} \leq \underbrace {\frac {2 [ f (\boldsymbol {x} _ {0}) - f (\boldsymbol {x} ^ {*}) ] / \eta}{T V}} _ {①} + \underbrace {\frac {2 \alpha^ {2} r ^ {2}}{T (1 - r ^ {2})} + \frac {(2 L _ {c} + 1 / \eta) \alpha r}{T (1 - r)}} _ {②} +$$ + +where $\alpha \triangleq \kappa \sqrt{d + 2(\sqrt{d} + 1)\ln(VT / \delta)}$ . When $r = 1$ , we instead have (2) = 2α² + (2Lc + 1/η)α. In the upper bound of Thm. 3, the term (1) represents the convergence rate of (projected) GD when the true derivative is used and it asymptotically goes to 0 as $T$ increases; the term (2) corresponds to the impact of the error of our derivative estimation (6) on the convergence. In situations where $r < 1$ which is a reasonably achievable condition as we have discussed in Sec. 4.1, the term (2) will also asymptotically approach 0. This, remarkably, suggests that the impact of the derivative estimation error on the convergence vanishes asymptotically and our ZoRD algorithm is guaranteed to converge to a stationary point (i.e., $\min_{t \leq T} \frac{1}{V} \sum_{\tau=0}^{V-1} \|G_{t,\tau}\|_2^2$ approaches 0) at the rate of $\mathcal{O}(1/T)$ when $r < 1$ . This is unattainable by existing ZO optimization algorithms using FD-based derivative estimation (Nesterov and Spokoiny, 2017; Liu et al., 2018b), because these methods typically converge to a stationary point at the rate of $\mathcal{O}(1/T + \text{const.})$ with a constant learning rate. Even when $r = 1$ where the term (2) becomes a constant independent of $T$ , our Thm. 3 is still superior to the convergence of these existing works because our result (Thm. 3) is based on the worst-case analysis whereas these works are typically based on the average-case analysis, i.e., their results only hold in expectation over the randomly sampled directions for derivative estimation. This means that their convergence may become even worse when inappropriate directions are used, e.g., directions that are nearly orthogonal to the true derivative which commonly happens in high-dimensional input spaces. In addition, given a fixed $T$ , our ZoRD enjoys a query complexity (i.e., the number of queries in $T$ iterations) of $\mathcal{O}(T)$ , which significantly improves over the $\mathcal{O}(nT)$ of the existing works based on FD ( $n$ in Sec. 2.2). + +The impacts of the number of steps of our virtual updates (i.e., $V$ ) are partially reflected in Thm. 3. Specifically, a larger $V$ improves the reduction rate of the term ① because a larger number of virtual GD updates (without requiring additional queries) will be applied in our ZoRD algorithm. This is also unachievable by existing ZO optimization algorithms using FD-based derivative estimation since they require additional queries for the derivative estimation in their every GD update. Meanwhile, a larger $V$ may also negatively impact the performance of our ZoRD since it may lead to the use of those estimated derivatives with large estimation errors (Sec. 3.2). However, this negative impact has + +![](images/2fb0671b56b5797630d6a747ef32a38713d82e3ec978e690ea6388a01f7f8fe4.jpg) + +![](images/30e7e6b88ff343420916ce16025174b87f742f824a39b86d0c53d0bc912081d7.jpg) +- Function Queries -- $\nabla f$ -- $\nabla \mu$ + +![](images/7435b5574d5e73ba4302e5159f8e4d4c40cd50ab8410143f82709bf339b93475.jpg) + +![](images/b9e9b583f5ab93b49af9a61a3c090efdf457efc11a3341914f06dcf3b58a0351.jpg) + +![](images/6c2df39df5bec678e47ecb1a994d5789a085a21360b455c265d7926be1bf4bb1.jpg) +Figure 1: Our derived GP for derivative estimation (4) with different number $n$ of queries. Green curve and its confidence interval denote the mean $\nabla \mu(\boldsymbol{x})$ and standard deviation of the derived GP. +(a) + +![](images/52ac6711c387df65723539614fe94e1ec462fd42ab99dda8830767c808e6ada4.jpg) + +![](images/69252a7f35ad0dda395bd119dead41878d095393545b25e807d1f6014526d1a0.jpg) +Figure 2: Comparison of the derivative estimation errors of our derived GP-based estimator (6) (GP) and the FD estimator, measured by cosine similarity (larger is better) and Euclidean distance (smaller is better). Each curve is the mean $\pm$ standard error from five independent runs. + +![](images/c7e19456afed38e28affab77c25dfb1abd43b71de42fbbaa5db79bff7ffb5055.jpg) + +only been implicitly accounted for by the term ② because this term comes from our Thm. 2, which is based on a worst-case analysis and gives a uniform upper bound on the derivative estimation error for all inputs in the domain $\mathcal{X}$ . + +# 5 EXPERIMENTS + +In this section, we firstly empirically verify the efficacy of our derived GP-based derivative estimator (6) in Sec. 5.1, and then demonstrate that our ZoRD outperforms existing baseline methods for ZO optimization using synthetic experiments (Sec. 5.2) and real-world experiments (Secs. 5.3, 5.4). + +# 5.1 DERIVATIVE ESTIMATION + +Here we investigate the efficacy of our derivative estimator (6) based on the derived GP for derivatives (4). Specifically, we sample a function $f$ (defined on a one-dimensional domain) from a GP using the SE kernel, and then use a set of randomly selected inputs as well as their noisy observations (as optimization trajectory) to calculate our derived GP for derivatives. The results (Fig. 1) illustrate a number of interesting insights. Firstly, in regions where (even only a few) function queries are performed (e.g., in the region of $[-3,0]$ ), our estimated derivative (i.e., $\nabla \mu_{t-1}(\pmb{x})$ ) generally aligns with the groundtruth derivative (i.e., $\nabla f(\pmb{x})$ ) and our estimation uncertainty (i.e., characterized by $\sqrt{\|\partial \sigma_{t-1}^2(\pmb{x})\|_2}$ ) shrinks compared with other un-queried regions. These results hence demonstrate that our (4) is able to accurately estimate derivatives and reliably quantify the uncertainty of these estimations within the regions where function queries are performed. Secondly, as more input queries are collected (i.e., from left to right in Fig. 1), the uncertainty $\sqrt{\|\partial \sigma_{t-1}^2(\pmb{x})\|_2}$ in the entire domain is decreased in general. This provides an empirical justification for our Thm. 2 which guarantees non-increasing uncertainty and hence non-increasing estimation error. Lastly, note that with only 12 queries (rightmost figure), our derivative estimator is already able to accurately estimate the derivative in the entire domain, which represents a remarkable reduction rate of our derivative estimation error. + +Next, we compare our derivative estimator (6) with the FD estimator (Sec. 2.2). Specifically, using the Ackley function with $d = 10$ (see Appx. C.2), we firstly select an input $x_0$ and then follow the FD method (2) to randomly sample $n$ directions $\{\pmb{u}_i\}_{i=1}^n$ from the standard Gaussian distribution, to construct input queries $\{\pmb{x}_0 + \lambda \pmb{u}_i\}_{i=1}^n$ (see Sec. 2.2). Next, these queries and their observations are $(a)$ used as the optimization trajectory to apply our derivative estimator (6), and $(b)$ used by the FD method to estimate the derivative following (2). The results are shown in Fig. 2a (for two different values of $\lambda$ ), in which for both our derived GP-based estimator (6) and the FD estimator, we measure the cosine similarity (larger is better) and Euclidean distance (smaller is better) between the estimated + +![](images/fd31e857fdfda29a70526f3750dfdd07613981fb14662922e4a124caef4bb53c.jpg) +(a) Ackley $(d = 20)$ + +![](images/e73667fdf505e857111dadc0fd07c848d1490dc290fa522ca60cff5936a7b642.jpg) +(b) Ackley $(d = 40)$ + +![](images/0100551861b401ee2614ff43292c7d06a39a06f8256052db091e7693ba9297fc.jpg) +(c) Levy $(d = 40)$ +Figure 3: Optimization of Ackley and Levy functions with different dimensions. The $x$ -axis and $y$ -axis denote the number of queries and log-scaled optimality gap (i.e., $\log(f(\boldsymbol{x}_T) - f(\boldsymbol{x}^*))$ ) achieved after this number of queries. Each curve is the mean $\pm$ standard error from ten independent runs. + +![](images/bc6269a99ec9934f4c72c89db8b20f284c5afd4e9a5baa679820e4ee9bd17b61.jpg) +(d) Levy $(d = 100)$ + +Table 1: Comparison of the number of required queries to achieve a successful black-box adversarial attack. Every entry represents mean ± standard deviation from five independent runs. + +
DatasetMetricGLDRGFPRGFTuRBO-1TuRBO-10ZoRD
MNIST# Queries1780±2221192±2601236±145654±70747±60248±50
Speedup7.2×4.8×5.0×2.6×3.0×1.0×
CIFAR-10# Queries964±1753622±11554133±1525638±108708±105384±59
Speedup2.5×9.4×10.8×1.7×1.8×1.0×
+ +derivative and the true derivative at $x_0$ . The figures show that our derivative estimation error enjoys a faster rate of reduction compared with the FD method, which corroborates our theoretical insights from Thm. 2 (Sec. 4.1) positing that our estimation error can be rapidly decreasing. Subsequently, to further highlight our advantage of being able to exploit the optimization trajectory and hence to eliminate the need for additional function queries (Sec. 4.1), we perform another comparison where our derived GP-based estimator (6) only utilizes 20 queries from the optimization trajectory (sampled using the same method above) for derivative estimation. The results (Fig. 2b) show that even with only these 20 queries (without any additional function query), our derivative estimator (6) achieves comparable or better estimation errors than FD using as many as 80 additional queries. Overall, the results in Fig. 2 have provided empirical supports for the superiority of our derived GP-based derivative estimation (6), which substantiates our theoretical justifications in Sec. 4.1. + +# 5.2 SYNTHETIC EXPERIMENTS + +Here we adopt the widely use Ackley and Levy functions with various dimensions (Eriksson et al., 2019) to show the superiority of our ZoRD. We compare ZoRD with a number of representative baselines for ZO optimization, e.g., RGF (Nesterov and Spokoiny, 2017) which uses FD for derivative estimation, PRGF (Cheng et al., 2021) which is a recent extension of RGF, GLD (Golovin et al., 2020) which is a recent ZO optimization algorithm based on direct search, and TuRBO (Eriksson et al., 2019) which is a highly performant Bayesian optimization (BO) algorithm. We also evaluate the performance of a first-order optimization algorithm, i.e., GD with true derivatives. More details are in Appx. C.2. The results are shown in Fig. 3, where ZoRD outperforms all other ZO optimization algorithms. Particularly, ZoRD considerably outperforms both RGF and PRGF, which can be attributed to our two major contributions. Firstly, our derivative estimator (6) used by ZoRD is more accurate and more query-efficient than the FD method adopted by RGF and PRGF, as theoretically justified in Sec. 4.1 and empirically demonstrated in Sec. 5.1. Secondly, our dynamic virtual updates (Sec. 3.2) can perform multi-step GD updates without requiring any additional query, which further improves the performance of ZoRD (validated in Appx. D.2). Moreover, ZoRD is the only ZO optimization algorithm that is able to converge to a comparable final performance to that of the GD with true derivatives in every figure of Fig. 3. + +# 5.3 BLACK-BOX ADVERSARIAL ATTACK + +We further compare our ZoRD with other ZO optimization algorithms in the problem of black-box adversarial attack on images, which is one of the most important applications of ZO optimization in recent years. In black-box adversarial attack (Ru et al., 2020), given a fully trained ML model and an image $z$ , we intend to find (through only function queries) a small perturbation $x$ to be added to $z$ + +![](images/5e2cc4a270655bf0cc561718d01579082a6b4c660c8b305cab3b961c4172a6f2.jpg) +(a) Precision + +![](images/cec0e66aa1591dcb2dfa5ffa83037b4fda9859d4f41334f3cdbba10d9b0748cb.jpg) +(b) Recall + +![](images/7585b17454414486877252370863dfb40ad78f6c8e978f2e901729a5fdfe2ad1.jpg) +(c) F1 Score +Figure 4: Optimization of different non-differentiable metrics on the Covertype dataset. The $x$ -axis and $y$ -axis denote, respectively, the number of queries and the improvement on the non-differentiable metric. Each curve is the mean $\pm$ standard error from five independent experiments. + +![](images/b96d17acceccde711d5262da4219c0fa5ed142b795b19050eb3003c174935d16.jpg) +(d) Jaccard index + +such that the perturbed image $z + x$ will be incorrectly classified by the ML model. Following the practice from (Cheng et al., 2021), we randomly select an image from MNIST (Lecun et al., 1998) ( $d = 28 \times 28$ ) or CIFAR-10 (Krizhevsky et al., 2009) ( $d = 32 \times 32$ ), and aim to add a perturbation with an $L_{\infty}$ constraint to make a trained deep neural network misclassify the image (more details in Appx. C.3). Tab. 1 summarizes the number of required queries to achieve a successful attack by different algorithms (see results on multiple images in Appx. D.3). The results show that in such high-dimensional ZO optimization problems, our ZoRD again significantly outperforms the other algorithms since it requires a considerably smaller number of queries to achieve a successful attack. Particularly, our ZoRD is substantially more query-efficient than RGF and PRGF which rely on the FD methods for derivative estimation, e.g., for CIFAR-10, the number of queries required by RGF and PRGF are $9.4 \times 10.8 \times 10$ of that required by ZoRD. This further verifies the advantages of our trajectory-informed derivative estimation (as justified theoretically in Sec. 4.1 and empirically in Sec. 5.1) and dynamic virtual updates (as demonstrated in Appx. D.2). Remarkably, our ZoRD also outperforms BO (i.e., TuRBO-1/10 which correspond to two versions of the TuRBO algorithm (Eriksson et al., 2019)) which has been widely shown to be query-efficient in black-box adversarial attack (Ru et al., 2020). Overall, these results showcase the ability of our ZoRD to advance the other ZO optimization algorithms in challenging real-world ZO optimization problems. + +# 5.4 NON-DIFFERENTIABLE METRIC OPTIMIZATION + +Non-differentiable metric optimization (Hiranandani et al., 2021; Huang et al., 2021), which has received a surging interest recently, can also be cast as a ZO optimization problem. We therefore use it to further demonstrate the superiority of our ZoRD to other ZO optimization algorithms. Specifically, we firstly train a multilayer perceptron (MLP) $(d = 2189)$ on the Covertype (Dua and Graff, 2017) dataset with the cross-entropy loss function. Then, we use the same dataset to fine-tune this MLP model by exploiting ZO optimization algorithms to optimize a non-differentiable metric, such as precision, recall, F1 score and Jaccard index (see more details in Appx. C.4). Here we additionally compare with the evolutionary strategy (ES) which has been previously applied for non-differentiable metric optimization (Huang et al., 2021). Fig. 4 illustrates the percentage improvements achieved by different algorithms during the fine-tuning process (i.e., $(f(\pmb{x}_0) - f(\pmb{x}_T)) \times 100\% / f(\pmb{x}_0)$ ). The results show that our ZoRD again consistently outperforms the other ZO optimization algorithms in terms of both the query efficiency and the final converged performance. These results therefore further substantiate the superiority of ZoRD in optimizing high-dimensional non-differentiable functions. + +# 6 CONCLUSION + +We have introduced the ZoRD algorithm, which achieves query-efficient ZO optimization through two major contributions. Firstly, we have proposed a novel derived GP-based method (6) which only uses the optimization trajectory and hence eliminates the requirement for additional queries (Sec. 3.1) to estimate derivatives. Secondly, we have introduced a novel technique, i.e., dynamic virtual updates, which is made possible by our GP-based derivative estimation, to further improve the performance of our ZoRD (Sec. 3.2). Through theoretical justifications (Sec. 4) and empirical demonstrations (Sec. 5), we show that our derived GP-based derivative estimation improve over existing FD methods and that our ZoRD outperforms various ZO optimization baselines. + +# 7 REPRODUCIBILITY STATEMENT + +For our theoretical results, we have discussed all our assumptions in Sec. 2.1 & Sec. 4.2, and provided our complete proofs in Appx. B. For our empirical results, we have provided our detailed experimental settings in Appx. C and included our codes in the supplementary materials (i.e., the zip file). + +# ACKNOWLEDGMENTS + +This research is part of the programme DesCartes and is supported by the National Research Foundation, Prime Minister's Office, Singapore under its Campus for Research Excellence and Technological Enterprise (CREATE) programme. + +# REFERENCES + +Binxin Ru, Adam D. Cobb, Arno Blaas, and Yarin Gal. Bayesopt adversarial attack. In Proc. ICLR, 2020. +Gaurush Hiranandani, Jatin Mathur, Harikrishna Narasimhan, Mahdi Milani Fard, and Sanmi Koyejo. Optimizing black-box metrics with iterative example weighting. In Proc. ICML, 2021. +Tim Salimans, Jonathan Ho, Xi Chen, and Ilya Sutskever. Evolution strategies as a scalable alternative to reinforcement learning. arXiv:1703.03864, 2017. +Yurii E. Nesterov and Vladimir G. Spokoiny. Random gradient-free minimization of convex functions. Found. Comput. Math., 17(2):527-566, 2017. +Shuyu Cheng, Guoqiang Wu, and Jun Zhu. On the convergence of prior-guided zeroth-order optimization algorithms. In Proc. NeurIPS, 2021. +Albert S. Berahas, Liyuan Cao, Krzysztof Choromanski, and Katya Scheinberg. A theoretical and empirical comparison of gradient approximations in derivative-free optimization. Found. Comput. Math., 22(2):507-560, 2022. +Niranjan Srinivas, Andreas Krause, Sham M. Kakade, and Matthias W. Seeger. Gaussian process optimization in the bandit setting: No regret and experimental design. In Proc. ICML, 2010. +Kirthevasan Kandasamy, Akshay Krishnamurthy, Jeff Schneider, and Barnabás Póczos. Parallelised Bayesian optimisation via Thompson sampling. In Proc. AISTATS, 2018. +Carl Edward Rasmussen and Christopher K. I. Williams. Gaussian processes for machine learning. Adaptive computation and machine learning. MIT Press, 2006. +Abraham Flaxman, Adam Tauman Kalai, and H. Brendan McMahan. Online convex optimization in the bandit setting: Gradient descent without a gradient. In Proc. SODA, 2005. +Saeed Ghadimi and Guanghui Lan. Stochastic first- and zeroth-order methods for nonconvex stochastic programming. SIAM Journal on Optimization, 23(4):2341-2368, 2013. +Sijia Liu, Bhavya Kailkhura, Pin-Yu Chen, Pai-Shun Ting, Shiyu Chang, and Lisa Amini. Zeroth-order stochastic variance reduction for nonconvex optimization. In Proc. NeurIPS, 2018a. +Sijia Liu, Xingguo Li, Pin-Yu Chen, Jarvis D. Haupt, and Lisa Amini. Zeroth-order stochastic projected gradient descent for nonconvex optimization. In Proc. GlobalSIP, 2018b. +Xiangru Lian, Huan Zhang, Cho-Jui Hsieh, Yijun Huang, and Ji Liu. A comprehensive linear speedup analysis for asynchronous stochastic parallel optimization from zeroth-order to first-order. In Proc. NIPS, 2016. +Sashank J Reddi, Suvrit Sra, Barnabas Poczos, and Alexander J Smola. Proximal stochastic methods for nonsmooth nonconvex finite-sum optimization. In Proc. NIPS, 2016. +David Eriksson, Michael Pearce, Jacob R. Gardner, Ryan Turner, and Matthias Poloczek. Scalable global optimization via local Bayesian optimization. In Proc. NeurIPS, 2019. + +Daniel Golovin, John Karro, Greg Kochanski, Chansoo Lee, Xingyou Song, and Qiuyi (Richard) Zhang. Gradientless descent: High-dimensional zeroth-order optimization. In Proc. ICLR, 2020. +Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner. Gradient-based learning applied to document recognition. Proceedings of the IEEE, pages 2278-2324, 1998. +Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009. +Chen Huang, Shuangfei Zhai, Pengsheng Guo, and Josh M. Susskind. Metricopt: Learning to optimize black-box evaluation metrics. In Proc. CVPR, 2021. +Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive.ics.uci.edu/ml. +Sebastian U Stich, Christian L Muller, and Bernd Gartner. Optimization of convex functions with random pursuit. SIAM Journal on Optimization, 23(2):1284-1309, 2013. +Sayak Ray Chowdhury and Aditya Gopalan. On kernelized multi-armed bandits. In Proc. ICML, 2017. +Zhongxiang Dai, Haibin Yu, Bryan Kian Hsiang Low, and Patrick Jaillet. Bayesian optimization meets Bayesian optimal stopping. In Proc. ICML, 2019. +Zhongxiang Dai, Bryan Kian Hsiang Low, and Patrick Jaillet. Federated bayesian optimization via thompson sampling. In Proc. NeurIPS, 2020. +Benjamin Letham, Roberto Calandra, Akshara Rai, and Eytan Bakshy. Re-examining linear embeddings for high-dimensional Bayesian optimization. In Proc. NeurIPS, 2020. +Andrew Ilyas, Logan Engstrom, and Aleksander Madry. Prior convictions: Black-box adversarial attacks with bandits and priors. In Proc. ICLR, 2019. +Florian Meier, Asier Mujika, Marcelo Matheus Gauy, and Angelika Steger. Improving gradient estimation in evolutionary strategies with past descent directions. arXiv:1910.05268, 2019. +Niru Maheswaranathan, Luke Metz, George Tucker, Dami Choi, and Jascha Sohl-Dickstein. Guided evolutionary strategies: Augmenting random search with surrogate gradients. In Proc. ICML, 2019. +Shuyu Cheng, Yinpeng Dong, Tianyu Pang, Hang Su, and Jun Zhu. Improving black-box adversarial attacks with a transfer-based prior. In NeurIPS, 2019. +Beatrice Laurent and Pascal Massart. Adaptive estimation of a quadratic functional by model selection. Annals of Statistics, pages 1302-1338, 2000. +Sayak Ray Chowdhury and Aditya Gopalan. No-regret algorithms for multi-task Bayesian optimization. In Proc. AISTATS, 2021. +Stephen P. Boyd and Lieven Vandenberghe. Convex Optimization. Cambridge University Press, 2014. +Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proc. ICLR, 2015. +Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proc. CVPR, 2016. +Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. OpenAI Gym. arXiv:1606.01540, 2016. +M. D. McKay, R. J. Beckman, and W. J. Conover. A comparison of three methods for selecting values of input variables in the analysis of output from a computer code. Technometrics, 21(2): 239-245, 1979. +Jian Tan, Niv Nayman, and Mengchang Wang. CobBO: Coordinate backoff Bayesian optimization with two-stage kernels. arXiv:2101.05147, 2021. +Hong Qian and Yang Yu. Derivative-free reinforcement learning: A review. Frontiers Comput. Sci., 15(6):156336, 2021. + +# APPENDIX A RELATED WORK + +Various types of algorithms have been proposed in the literature to solve ZO optimization problems, e.g., direct search, Bayesian optimization (BO) and GD-based algorithms with estimated derivatives. Particularly, direct search, e.g., (Stich et al., 2013; Golovin et al., 2020), relies on the comparison of function values at different inputs for the updates, which can be query-inefficient in practice owing to its indirect utilization of function values. In contrast, Bayesian optimization (BO) directly utilizes the function values to model the objective function using a Gaussian process (GP) and iteratively selects the inputs to query by trading off sampling potentially optimal inputs (i.e., exploitation) and inputs that can improve the GP belief of the objective function over the entire input domain (i.e., exploration) (Chowdhury and Gopalan, 2017; Srinivas et al., 2010; Dai et al., 2019; 2020). However, in ZO optimization problems with high-dimensional input spaces, BO algorithms typically suffer from query inefficiency and large computational complexity (Rasmussen and Williams, 2006; Letham et al., 2020; Eriksson et al., 2019), which significantly hinders their real-world applications. Therefore, GD-based algorithms with estimated derivatives, which inherit the advantage of GD-based algorithms in optimizing functions with high-dimensional input spaces, have been more widely applied in practice. For these algorithms, the derivatives are commonly estimated using the finite difference (FD) approximation (which requires additional function queries) of the directional derivatives along selected directions, in which the directions can be randomly sampled unit vectors Flaxman et al. (2005), Gaussian vectors (Nesterov and Spokoiny, 2017), or standard bases (Lian et al., 2016) (Sec. 2.2). More recently, some works have incorporated a time-dependent prior (i.e., the estimated derivative in the previous iteration) into existing FD methods to improve the quality of its derivative estimation (Ilyas et al., 2019; Meier et al., 2019; Cheng et al., 2021). Nevertheless, such a prior is also estimated by the FD method (i.e., in the previous iteration) and can hence be biased owing to the its estimation error, which may even lead to larger derivative estimation errors in practice due to compounding errors. Another line of work has taken the surrogate derivatives from other sources to help reduce the derivative estimation error of existing FD methods (Maheswaranathan et al., 2019; Cheng et al., 2019). However, these surrogate derivatives may generally be unavailable in practice. Importantly, these existing FD methods require additional function queries for every derivation estimation during optimization, which will significantly increase the query complexity of ZO optimization algorithms which employ these FD methods for derivative estimation. + +# APPENDIX B PROOFS + +# B.1 PROOF OF LEMMA 1 + +According to Rasmussen and Williams (2006), if a function $f$ follows from a Gaussian process, its derivative also follows a Gaussian process determined by its mean $\mathbb{E}[\cdot]$ and covariance $\mathrm{Cov}(\cdot, \cdot)$ , i.e., + +$$ +\nabla f \sim \mathcal {G P} (\mathbb {E} [ \nabla f ], \operatorname {C o v} (\nabla f, \nabla f)). \tag {9} +$$ + +So, to prove Lemma 1, we only need to derive the mean and the covariance of the Gaussian process above for a function $f$ that is sampled from another Gaussian process, i.e., $f \sim \mathcal{GP}(\mu(\cdot), \sigma^2(\cdot, \cdot))$ . Specifically, for the mean $\mathbb{E}[\nabla f]$ , we have + +$$ +\mathbb {E} [ \nabla f ] = \nabla \mathbb {E} [ f ] = \nabla \mu . \tag {10} +$$ + +where the first equality derives from the interchangeability of the expectation and derivative operation based on the Leibniz integral rule. The second equality comes from the fact that $\mathbb{E}[f] = \mu$ . + +For the covariance $\mathrm{Cov}(\nabla f,\nabla f)$ , we have + +$$ +\begin{array}{l} \operatorname {C o v} \left(\nabla f (\boldsymbol {z}), \nabla f \left(\boldsymbol {z} ^ {\prime}\right)\right) \stackrel {(a)} {=} \mathbb {E} \left[ \left(\nabla f (\boldsymbol {z}) - \mathbb {E} \left[ \nabla f (\boldsymbol {z}) \right]\right) ^ {\top} \left(\nabla f \left(\boldsymbol {z} ^ {\prime}\right) - \mathbb {E} \left[ \nabla f \left(\boldsymbol {z} ^ {\prime}\right) \right]\right) \right] \\ \stackrel {(b)} {=} \mathbb {E} \left[ \nabla \left(f (\boldsymbol {z}) - \mathbb {E} [ f (\boldsymbol {z}) ]\right) ^ {\top} \nabla \left(f \left(\boldsymbol {z} ^ {\prime}\right) - \mathbb {E} [ f \left(\boldsymbol {z} ^ {\prime}\right) ]\right) \right] \\ \stackrel {(c)} {=} \mathbb {E} \left[ \partial_ {\boldsymbol {z}} \partial_ {\boldsymbol {z} ^ {\prime}} \left(f (\boldsymbol {z}) - \mathbb {E} [ f (\boldsymbol {z}) ]\right) ^ {\top} \left(f \left(\boldsymbol {z} ^ {\prime}\right) - \mathbb {E} [ f \left(\boldsymbol {z} ^ {\prime}\right) ]\right) \right] \tag {11} \\ \stackrel {(d)} {=} \partial_ {\pmb {z}} \partial_ {\pmb {z} ^ {\prime}} \mathbb {E} \left[ \left(f (\pmb {z}) - \mathbb {E} \left[ f (\pmb {z}) \right]\right) ^ {\top} \left(f (\pmb {z} ^ {\prime}) - \mathbb {E} \left[ f (\pmb {z} ^ {\prime}) \right]\right) \right] \\ \stackrel {(e)} {=} \partial_ {\boldsymbol {z}} \partial_ {\boldsymbol {z} ^ {\prime}} \sigma_ {t} ^ {2} (\boldsymbol {z}, \boldsymbol {z} ^ {\prime}) . \\ \end{array} +$$ + +Notably, $(b)$ and $(d)$ also derive from the interchangeability of the expectation and derivative operation based on the Leibniz integral rule. Besides, $(e)$ is obtained based on $\operatorname{Cov}(f, f) = \sigma^2(\cdot, \cdot)$ . This finally completes our proof. + +# B.2 PROOF OF THEOREM 1 + +To begin with, we introduce the following concentration inequality for standard multi-variate Gaussian distribution: + +Lemma B.1 (Laurent and Massart (2000)). Let $\zeta \sim \mathcal{N}(\mathbf{0},\mathbf{I}_m)$ and $\delta \in (0,1)$ then + +$$ +\mathbb {P} \left(\| \boldsymbol {\zeta} \| _ {2} \leq \sqrt {m + 2 (\sqrt {m} + 1) \ln (1 / \delta)}\right) \geq 1 - \delta . \tag {12} +$$ + +Define $\zeta \triangleq \left(\partial \sigma_t^2 (\pmb {x})\right)^{-1 / 2}\left(\nabla f(\pmb {x}) - \nabla \mu_t(\pmb {x})\right)$ , according to Lemma 1, we then have that $\zeta$ follows a standard multi-variate Gaussian distribution, i.e., + +$$ +\zeta \sim \mathcal {N} (\mathbf {0}, \mathbf {I} _ {d}). \tag {13} +$$ + +Let $\delta \in (0,1)$ . By substituting the result above into Lemma B.1, the following holds with probability of at least $1 - \delta$ : + +$$ +\begin{array}{l} \left\| \nabla f (\boldsymbol {x}) - \nabla \mu_ {t} (\boldsymbol {x}) \right\| _ {2} = \left\| \left(\partial \sigma_ {t} ^ {2} (\boldsymbol {x})\right) ^ {- 1 / 2} \boldsymbol {\zeta} \right\| _ {2} \\ \leq \sqrt {\left\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \right\| _ {2}} \| \zeta \| _ {2} \tag {14} \\ \leq \sqrt {d + 2 (\sqrt {d} + 1) \ln (1 / \delta)} \sqrt {\| \partial \sigma_ {t} ^ {2} (\pmb {x}) \| _ {2}} \\ = \beta \sqrt {\| \partial \sigma_ {t} ^ {2} (\pmb {x}) \| _ {2}} \\ \end{array} +$$ + +with $\beta \triangleq \sqrt{d + 2(\sqrt{d} + 1)\ln(1 / \delta)}$ and the first inequality is from the Cauchy-Schwarz inequality, which completes our proof. + +# B.3 PROOF OF THEOREM 2 + +We first introduce the following lemmas. + +Lemma B.2 (Chowdhury and Gopalan (2021)). For any $\sigma \in \mathbb{R}$ and any matrix $\mathbf{A}$ , the following hold + +$$ +\mathbf {I} - \mathbf {A} ^ {\top} \left(\mathbf {A} \mathbf {A} ^ {\top} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \mathbf {A} = \sigma^ {2} \left(\mathbf {A} ^ {\top} \mathbf {A} + \sigma^ {2} \mathbf {I}\right) ^ {- 1}. \tag {15} +$$ + +Lemma B.3 (Sherman-Morrison formula). For any invertible square matrix $\mathbf{A}$ and column vectors $\mathbf{u},\mathbf{v}$ , suppose $\mathbf{A} + \mathbf{u}\mathbf{v}^{\top}$ is invertible, then the following holds + +$$ +\left(\mathbf {A} + \boldsymbol {u} \boldsymbol {v} ^ {\top}\right) ^ {- 1} = \mathbf {A} ^ {- 1} - \frac {\mathbf {A} ^ {- 1} \boldsymbol {u} \boldsymbol {v} ^ {\top} \mathbf {A} ^ {- 1}}{1 + \boldsymbol {v} ^ {\top} \mathbf {A} ^ {- 1} \boldsymbol {u}}. \tag {16} +$$ + +**Preparation.** We then introduce some additional notations and representations for our proof of Theorem 2. Following the common practice in (Chowdhury and Gopalan, 2021), we let the kernel $k$ be defined by $\psi(\pmb{x})$ , i.e., $k(\pmb{x},\pmb{x}^{\prime}) = \psi(\pmb{x})^{\top}\psi(\pmb{x}^{\prime})$ , and $\phi(\pmb{x}) \triangleq \nabla \psi(\pmb{x})$ . We then further define the $(t\times d)$ -dimensional Jacobian matrix $\phi_t(\pmb{x}) \triangleq [\phi(\pmb{x})^\top \psi(\pmb{x}_\tau)]_{\tau=1}^t$ and $\Psi_t \triangleq [\psi(\pmb{x}_\tau)]_{\tau=1}^t$ . The matrix $\mathbf{K}_t$ and the covariance matrix $\partial \sigma_t^2(\pmb{x})$ defined on the optimization trajectory $\mathcal{D}_t$ in our Sec. 3.1 can be reformulated as + +$$ +\begin{array}{l} \mathbf {K} _ {t} = \boldsymbol {\Psi} _ {t} ^ {\top} \boldsymbol {\Psi} _ {t}, \tag {17} \\ \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) = \phi (\boldsymbol {x}) ^ {\top} \phi (\boldsymbol {x}) - \phi_ {t} (\boldsymbol {x}) ^ {\top} \left(\mathbf {K} _ {t} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \phi_ {t} (\boldsymbol {x}). \\ \end{array} +$$ + +Based on the reformulation above, define $\mathbf{V}_t \triangleq \boldsymbol{\Psi}_t \boldsymbol{\Psi}_t^\top + \sigma^2 \mathbf{I}$ , we can further reformulate $\partial \sigma_t^2(\boldsymbol{x})$ as below + +$$ +\begin{array}{l} \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \stackrel {(a)} {=} \phi (\boldsymbol {x}) ^ {\top} \phi (\boldsymbol {x}) - \phi_ {t} (\boldsymbol {x}) ^ {\top} \left(\mathbf {K} _ {t} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \phi_ {t} (\boldsymbol {x}) \\ \stackrel {(b)} {=} \phi (\boldsymbol {x}) ^ {\top} \phi (\boldsymbol {x}) - \phi (\boldsymbol {x}) ^ {\top} \Psi_ {t} \left(\Psi_ {t} ^ {\top} \Psi_ {t} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \Psi_ {t} ^ {\top} \phi (\boldsymbol {x}) \\ \stackrel {(c)} {=} \phi (\boldsymbol {x}) ^ {\top} \left(\mathbf {I} - \boldsymbol {\Psi} _ {t} \left(\boldsymbol {\Psi} _ {t} ^ {\top} \boldsymbol {\Psi} _ {t} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \boldsymbol {\Psi} _ {t} ^ {\top}\right) \phi (\boldsymbol {x}) \tag {18} \\ \stackrel {(d)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \left(\Psi_ {t} \Psi_ {t} ^ {\top} + \sigma^ {2} \mathbf {I}\right) ^ {- 1} \phi (\boldsymbol {x}) \\ \stackrel {(e)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t} ^ {- 1} \phi (\boldsymbol {x}). \\ \end{array} +$$ + +Note that $(b)$ is obtained by exploiting the fact that $\mathbf{K}_t = \boldsymbol{\Psi}_t^\top \boldsymbol{\Psi}_t$ and $\phi_t(\boldsymbol{x}) = \phi(\boldsymbol{x})^\top \boldsymbol{\Psi}_t$ . In addition, $(d)$ comes from Lemma B.2 by replacing the matrix $\mathbf{A}$ in Lemma B.2 with the matrix $\boldsymbol{\Psi}_t^\top$ . + +First Part. We then prove the first half part of our Theorem 2, i.e., the following Lemma B.4. + +Lemma B.4 (Non-Increasing Variance Norm). For any $\pmb{x} \in \mathcal{X}$ and any $t \geq 1$ , we have that + +$$ +\left\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \right\| _ {2} \leq \left\| \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}) \right\| _ {2}. \tag {19} +$$ + +Proof. Based on our additional notations and representations, we have + +$$ +\begin{array}{l} \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \stackrel {(a)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t} ^ {- 1} \phi (\boldsymbol {x}) \\ \stackrel {(b)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \left(\boldsymbol {\Psi} _ {t - 1} \boldsymbol {\Psi} _ {t - 1} ^ {\top} + \sigma^ {2} \mathbf {I} + \psi (\boldsymbol {x} _ {t}) \psi (\boldsymbol {x} _ {t}) ^ {\top}\right) ^ {- 1} \phi (\boldsymbol {x}) \\ \stackrel {(c)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \left(\mathbf {V} _ {t - 1} + \psi (\boldsymbol {x} _ {t}) \psi (\boldsymbol {x} _ {t}) ^ {\top}\right) ^ {- 1} \phi (\boldsymbol {x}) \\ \stackrel {(d)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \phi (\boldsymbol {x}) - \sigma^ {2} \left(1 + \psi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \psi (\boldsymbol {x} _ {t})\right) ^ {- 1} \phi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \psi (\boldsymbol {x} _ {t}) \psi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \phi (\boldsymbol {x}) \\ \stackrel {(e)} {=} \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}) - \sigma^ {2} \left(1 + \psi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \psi (\boldsymbol {x} _ {t})\right) ^ {- 1} \phi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \psi (\boldsymbol {x} _ {t}) \psi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \phi (\boldsymbol {x}) \\ \stackrel {(f)} {\preccurlyeq} \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}). \tag {20} \\ \end{array} +$$ + +Note that (a) follows from the aforementioned definition of $\mathbf{V}_t$ and (b) comes from the fact that $\Psi_t\Psi_t^\top = \Psi_{t - 1}\Psi_{t - 1}^\top +\psi (\pmb {x}_t)\psi (\pmb {x}_t)^\top$ . Similarly, (c) uses the definition of $\mathbf{V}_{t - 1}$ . In addition, equality (d) derives from Lemma B.3 by letting $\mathbf{A} = \mathbf{V}_{t - 1}$ and $\pmb {u} = \pmb {v} = \psi (\pmb {x}_t)$ and (e) follows from the reformulation of $\partial \sigma_{t - 1}^2 (\pmb {x})$ in (18). Finally, (f) derives from the positive semi-definite property of $\phi (\pmb {x})^{\top}\mathbf{V}_{t - 1}^{-1}\psi (\pmb {x}_t)\psi (\pmb {x}_t)^{\top}\mathbf{V}_{t - 1}^{-1}\phi (\pmb {x})$ as well as the fact that $1 + \psi (\pmb {x}_t)^{\top}\mathbf{V}_{t - 1}^{-1}\psi (\pmb {x}_t) > 0$ . That is, for any column vector $\textbf{z}$ we have that + +$$ +\begin{array}{l} \boldsymbol {z} ^ {\top} \phi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \psi (\boldsymbol {x} _ {t}) \psi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \phi (\boldsymbol {x}) \boldsymbol {z} = \left(\phi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \phi (\boldsymbol {x}) \boldsymbol {z}\right) ^ {\top} \left(\phi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \phi (\boldsymbol {x}) \boldsymbol {z}\right) \\ = \left\| \phi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \phi (\boldsymbol {x}) \boldsymbol {z} \right\| _ {2} ^ {2} \tag {21} \\ \geq 0. \\ \end{array} +$$ + +So, $\phi (\pmb {x})^{\top}\mathbf{V}_{t - 1}^{-1}\psi (\pmb {x}_t)\psi (\pmb {x}_t)^{\top}\mathbf{V}_{t - 1}^{-1}\phi (\pmb {x})$ is positive semi-definite. Following a similar way, we are also able to verify that $1 + \psi (\pmb {x}_t)^{\top}\mathbf{V}_{t - 1}^{-1}\psi (\pmb {x}_t) > 0$ by showing that $\psi (\pmb {x}_t)^{\top}\mathbf{V}_{t - 1}^{-1}\psi (\pmb {x}_t)\geq 0$ using the decomposition of $\mathbf{V}_{t - 1}^{-1}$ from the Principle Component Analysis (PCA). Since $\partial \sigma_t^2 (\pmb {x})\preccurlyeq \sigma_{t - 1}^2 (\pmb {x})$ is equivalent to $\left\| \partial \sigma_t^2 (\pmb {x})\right\| _2\leq \left\| \partial \sigma_{t - 1}^2 (\pmb {x})\right\| _2$ , we then complete the proof of first half part of our Theorem 2. + +Second Part. To prove the rest of our Theorem 2, we firstly introduce the following lemmas. + +Lemma B.5. For any $\pmb{x} \in \mathcal{X}$ and any $t \geq 1$ , the following holds + +$$ +\mathbf {V} _ {t} ^ {- 1} \preccurlyeq \mathbf {V} _ {t - 1} ^ {- 1}. \tag {22} +$$ + +Proof. For any column vector $z$ , we have + +$$ +\begin{array}{l} \boldsymbol {z} ^ {\top} \left(\mathbf {V} _ {t} - \mathbf {V} _ {t - 1}\right) \boldsymbol {z} = \boldsymbol {z} ^ {\top} \psi (\boldsymbol {x} _ {t}) \psi (\boldsymbol {x} _ {t}) ^ {\top} \boldsymbol {z} \\ = \left(\psi \left(\boldsymbol {x} _ {t}\right) ^ {\top} \boldsymbol {z}\right) ^ {\top} \left(\psi \left(\boldsymbol {x} _ {t}\right) ^ {\top} \boldsymbol {z}\right) \tag {23} \\ = \left\| \psi (\boldsymbol {x} _ {t}) ^ {\top} \boldsymbol {z} \right\| _ {2} ^ {2} \\ \geq 0. \\ \end{array} +$$ + +The first equality comes from the intermediate result in (20). So, $\mathbf{V}_t - \mathbf{V}_{t-1}$ is positive semi-definite, i.e., $\mathbf{V}_{t-1} \preccurlyeq \mathbf{V}_t$ . This can also indicate that $\mathbf{V}_t^{-1} \preccurlyeq \mathbf{V}_{t-1}^{-1}$ , which thus completes our proof. + +Lemma B.6 (Lower Bound of Variance Norm). For any $\pmb{x} \in \mathcal{X}$ and any $t \geq 1$ , the following holds + +$$ +1 / \left(1 + 1 / \sigma^ {2}\right) \left\| \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}) \right\| _ {2} \leq \left\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \right\| _ {2}. \tag {24} +$$ + +Proof. We firstly show that + +$$ +\begin{array}{l} \left\| \mathbf {V} _ {t} ^ {- 1 / 2} \psi (\boldsymbol {x}) \psi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t} ^ {- 1 / 2} \right\| _ {2} \stackrel {(a)} {\leq} \left\| \mathbf {V} _ {t} ^ {- 1 / 2} \psi (\boldsymbol {x}) \right\| _ {2} \left\| \psi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t} ^ {- 1 / 2} \right\| _ {2} \\ \stackrel {(b)} {=} \left\| \psi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t} ^ {- 1 / 2} \right\| _ {2} ^ {2} \\ \stackrel {(c)} {=} \psi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t} ^ {- 1 / 2} \mathbf {V} _ {t} ^ {- 1 / 2} \psi (\boldsymbol {x}) \\ \stackrel {(d)} {=} \psi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t} ^ {- 1} \psi (\boldsymbol {x}) \tag {25} \\ \stackrel {(e)} {\leq} \psi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \psi (\boldsymbol {x}) \\ \stackrel {(f)} {\leq} \psi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {0} ^ {- 1} \psi (\boldsymbol {x}) \\ \stackrel {(g)} {=} \psi (\boldsymbol {x}) ^ {\top} \psi (\boldsymbol {x}) / \sigma^ {2} \\ \stackrel {(h)} {=} 1 / \sigma^ {2}. \\ \end{array} +$$ + +Note that $(a)$ derives from the Cauchy-Schwarz inequality. As for $(b)$ and $(c)$ , they have exploited the fact that $\left(\mathbf{V}_t^{-1/2}\psi(\boldsymbol{x})\right)^\top = \psi(\boldsymbol{x})^\top\mathbf{V}_t^{-1/2}$ and $\psi(\boldsymbol{x})^\top\mathbf{V}_t^{-1/2}$ is a row vector. In addition, $(e)$ follows from Lemma B.5. Finally, $(g)$ results from $\mathbf{V}_0^{-1} = \mathbf{I}/\sigma^2$ and $(h)$ derives from the assumption that $k(\boldsymbol{x},\boldsymbol{x}) \leq 1$ ( $\forall \boldsymbol{x} \in \mathcal{X}$ ) in Sec. 2.1. Alternatively, we can restate the result above as + +$$ +\mathbf {V} _ {t} ^ {- 1 / 2} \psi (\boldsymbol {x}) \psi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t} ^ {- 1 / 2} \preccurlyeq \sigma^ {- 2} \mathbf {I}. \tag {26} +$$ + +We then complete our proof on the first inequality in Lemma B.6 using the following inequality: + +$$ +\begin{array}{l} \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \stackrel {(a)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \left(\mathbf {V} _ {t - 1} + \psi (\boldsymbol {x} _ {t}) \psi (\boldsymbol {x} _ {t}) ^ {\top}\right) ^ {- 1} \phi (\boldsymbol {x}) \\ \stackrel {(b)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \left[ \mathbf {V} _ {t - 1} ^ {1 / 2} \left(\mathbf {I} + \mathbf {V} _ {t - 1} ^ {- 1 / 2} \psi (\boldsymbol {x} _ {t}) \psi (\boldsymbol {x} _ {t}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1 / 2}\right) \mathbf {V} _ {t - 1} ^ {1 / 2} \right] ^ {- 1} \phi (\boldsymbol {x}) \\ \stackrel {(c)} {=} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1 / 2} \left(\mathbf {I} + \mathbf {V} _ {t - 1} ^ {- 1 / 2} \psi \left(\boldsymbol {x} _ {t}\right) \psi \left(\boldsymbol {x} _ {t}\right) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1 / 2}\right) ^ {- 1} \mathbf {V} _ {t - 1} ^ {- 1 / 2} \phi (\boldsymbol {x}) \tag {27} \\ \stackrel {(d)} {\succcurlyeq} \sigma^ {2} \phi (\boldsymbol {x}) ^ {\top} \mathbf {V} _ {t - 1} ^ {- 1} \phi (\boldsymbol {x}) / (1 + 1 / \sigma^ {2}) \\ \stackrel {(e)} {=} \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}) / (1 + 1 / \sigma^ {2}) \\ \end{array} +$$ + +where $(a)$ derives from (20) and $(c)$ comes from the inversion of matrix product. Finally $(d)$ follows from the result in (26) and $(e)$ exploits the reformulation of $\partial \sigma_{t - 1}^2 (\pmb {x})$ + +According to Lemma B.4 and Lemma B.6, the following holds for any $\pmb{x} \in \mathcal{X}$ and any $t \geq 1$ , + +$$ +\frac {1}{1 + 1 / \sigma^ {2}} \leq \frac {\left\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \right\| _ {2}}{\left\| \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}) \right\| _ {2}} \leq 1. \tag {28} +$$ + +Based on the definition of $r$ in our Theorem 2, we therefore also have + +$$ +r \triangleq \max _ {\boldsymbol {x} \in \mathcal {X}, t \geq 1} \sqrt {\left\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \right\| _ {2} / \left\| \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}) \right\| _ {2}} \in \left[ 1 / \sqrt {1 + 1 / \sigma^ {2}}, 1 \right]. \tag {29} +$$ + +As a result, for every iteration $t$ of our Algo. 2, we have + +$$ +\begin{array}{l} \sqrt {\| \partial \sigma_ {t} ^ {2} (\boldsymbol {x}) \| _ {2}} \leq r \sqrt {\left\| \partial \sigma_ {t - 1} ^ {2} (\boldsymbol {x}) \right\| _ {2}} \\ \leq r ^ {t} \sqrt {\left\| \partial \sigma_ {0} ^ {2} (\boldsymbol {x}) \right\| _ {2}} \tag {30} \\ = r ^ {t} \sqrt {\| \partial_ {z} \partial_ {z ^ {\prime}} k (z , z ^ {\prime}) | _ {z = z ^ {\prime} = x} \| _ {2}} \\ \leq r ^ {t} \kappa \\ \end{array} +$$ + +where the last inequality derives from our assumption of $\| \partial_z\partial_{z'}k(z,z')|_{z = z' = x}\| _2\leq \kappa^2$ ( $\forall \pmb {x}\in \mathcal{X}$ ) in our Sec. 2.1. By substituting the result above into our Theorem 1, we complete our proof of Theorem 2. + +# B.4 PROOF OF THEOREM 3 + +**Preparation.** Following the definition of the derivative mapping on the true derivative $\nabla f(\boldsymbol{x}_{t,\tau})$ in (8), we defined the following derivative mapping on our estimated derivative $\nabla \mu_{t-1}(\boldsymbol{x}_{t,\tau})$ : + +$$ +\widehat {G} _ {t, \tau} \triangleq \frac {\boldsymbol {x} _ {t , \tau} - \boldsymbol {x} _ {t , \tau + 1}}{\eta_ {t , \tau}} = \frac {\boldsymbol {x} _ {t , \tau} - \mathcal {P} _ {\chi} (\boldsymbol {x} _ {t , \tau} - \eta_ {t , \tau} \nabla \mu_ {t} (\boldsymbol {x} _ {t , \tau}))}{\eta_ {t , \tau}}. \tag {31} +$$ + +By re-arranging it, we have the following update rule that has reformulated (7): + +$$ +\boldsymbol {x} _ {t, \tau + 1} = \boldsymbol {x} _ {t, \tau} - \eta_ {t, \tau} \widehat {G} _ {t, \tau}. \tag {32} +$$ + +Based on our definition of the derivative mappings in (31) and (8), we introduce the following lemmas: + +Lemma B.7 (General Projection Inequalities). Given $\mathcal{P}_{\mathcal{X}}(\boldsymbol{x}) = \arg \min_{\boldsymbol{z} \in \mathcal{X}} \| \boldsymbol{x} - \boldsymbol{z} \|_2^2 / 2$ and domain $\mathcal{X}$ , for any $\boldsymbol{x}, \boldsymbol{x}'$ , we have + +$$ +\left\| \boldsymbol {x} - \mathcal {P} _ {\mathcal {X}} (\boldsymbol {x}) \right\| _ {2} \leq \left\| \boldsymbol {x} - \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2}, \tag {33} +$$ + +$$ +\left\| \mathcal {P} _ {\mathcal {X}} (\boldsymbol {x}) - \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} \leq \left\| \boldsymbol {x} - \boldsymbol {x} ^ {\prime} \right\| _ {2}. \tag {34} +$$ + +Proof. For (33), as $\mathcal{P}_{\mathcal{X}}(\pmb{x}^{\prime}) \in \mathcal{X} (\forall \pmb{x}^{\prime})$ and $\mathcal{P}_{\mathcal{X}}(\pmb{x}) = \arg \min_{\pmb{z} \in \mathcal{X}} \| \pmb{x} - \pmb{z} \|_2^2 / 2$ , we then naturally have (33). + +For (34), since $\mathcal{P}_{\mathcal{X}}(\pmb{x})$ is the optimum of $h(\pmb{z}) = \| \pmb{x} - \pmb{z}\|_2^2 / 2$ , according to the optimality condition of the convex projection function $h(\pmb{z})$ within the domain $\pmb{z} \in \mathcal{X}$ (Boyd and Vandenberghe, 2014), we then have the following inequality for any $\mathcal{P}_{\mathcal{X}}(\pmb{x}') \in \mathcal{X}$ : + +$$ +\nabla h (\boldsymbol {z}) ^ {\top} \left(\mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} ^ {\prime}\right) - \boldsymbol {z}\right) \geq 0. \tag {35} +$$ + +By taking $\nabla h(z) = z - x$ with $z = \mathcal{P}_{\mathcal{X}}(x)$ into the inequality above, we have + +$$ +\left(\mathcal {P} _ {\mathcal {X}} (\boldsymbol {x}) - \boldsymbol {x}\right) ^ {\top} \left(\mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} ^ {\prime}\right) - \mathcal {P} _ {\mathcal {X}} (\boldsymbol {x})\right) \geq 0. \tag {36} +$$ + +By exchanging $\pmb{x}$ and $\pmb{x}'$ in the result above, we achieve the following similar result: + +$$ +\left(\mathcal {P} _ {\chi} \left(\boldsymbol {x} ^ {\prime}\right) - \boldsymbol {x} ^ {\prime}\right) ^ {\top} \left(\mathcal {P} _ {\chi} (\boldsymbol {x}) - \mathcal {P} _ {\chi} \left(\boldsymbol {x} ^ {\prime}\right)\right) \geq 0. \tag {37} +$$ + +By summing (36) and (37), + +$$ +\left(\boldsymbol {x} - \boldsymbol {x} ^ {\prime}\right) ^ {\top} \left(\mathcal {P} _ {\chi} (\boldsymbol {x}) - \mathcal {P} _ {\chi} \left(\boldsymbol {x} ^ {\prime}\right)\right) \geq \left\| \mathcal {P} _ {\chi} (\boldsymbol {x}) - \mathcal {P} _ {\chi} \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} ^ {2}. \tag {38} +$$ + +Based on the Cauchy-Schwarz inequality, we finally achieve (34) using + +$$ +\begin{array}{l} \left\| \mathcal {P} _ {\mathcal {X}} (\boldsymbol {x}) - \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} ^ {\prime}\right) \right\| _ {2} ^ {2} \leq \left(\boldsymbol {x} - \boldsymbol {x} ^ {\prime}\right) ^ {\top} \left(\mathcal {P} _ {\mathcal {X}} (\boldsymbol {x}) - \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} ^ {\prime}\right)\right) \tag {39} \\ \leq \| \boldsymbol {x} - \boldsymbol {x} ^ {\prime} \| _ {2} \| \mathcal {P} _ {\chi} (\boldsymbol {x}) - \mathcal {P} _ {\chi} (\boldsymbol {x} ^ {\prime}) \| _ {2} \\ \end{array} +$$ + +where both sides need to be divided by $\| \mathcal{P}_{\mathcal{X}}(\pmb {x}) - \mathcal{P}_{\mathcal{X}}(\pmb{x}^{\prime})\|_{2}$ to complete our proof. + +Lemma B.8 (Inequalities for Derivative Mappings). Given (31) and (8), for every $t$ and $\tau$ , we have + +$$ +\left\| \widehat {G} _ {t, \tau} \right\| _ {2} ^ {2} \leq \nabla \mu_ {t - 1} \left(\boldsymbol {x} _ {t, \tau}\right) ^ {\top} \widehat {G} _ {t, \tau}, \tag {40} +$$ + +$$ +\left\| G _ {t, \tau} \right\| _ {2} \leq \left\| \nabla f \left(\boldsymbol {x} _ {t, \tau}\right) \right\| _ {2}, \tag {41} +$$ + +$$ +\left\| \widehat {G} _ {t, \tau} - G _ {t, \tau} \right\| _ {2} \leq \| \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) - \nabla f (\boldsymbol {x} _ {t, \tau}) \| _ {2}. \tag {42} +$$ + +Proof. For (40), let $\widehat{\pmb{x}}_{t,\tau} = \pmb{x}_{t,\tau} - \eta_{t,\tau}\nabla \mu_{t - 1}(\pmb{x}_{t,\tau})$ , we then have + +$$ +\begin{array}{l} \left\| \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} _ {t, \tau}\right) - \mathcal {P} _ {\mathcal {X}} \left(\widehat {\boldsymbol {x}} _ {t, \tau}\right) \right\| _ {2} ^ {2} - \left(\boldsymbol {x} _ {t, \tau} - \widehat {\boldsymbol {x}} _ {t, \tau}\right) ^ {\top} \left(\mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} _ {t, \tau}\right) - \mathcal {P} _ {\mathcal {X}} \left(\widehat {\boldsymbol {x}} _ {t, \tau}\right)\right) \\ \stackrel {(a)} {=} \left\| \boldsymbol {x} _ {t, \tau} - \boldsymbol {x} _ {t, \tau + 1} \right\| _ {2} ^ {2} - \eta_ {t, \tau} \nabla \mu_ {t - 1} \left(\boldsymbol {x} _ {t, \tau}\right) ^ {\top} \left(\boldsymbol {x} _ {t, \tau} - \boldsymbol {x} _ {t, \tau + 1}\right) \tag {43} \\ \stackrel {(b)} {=} \eta_ {t, \tau} ^ {2} \left\| \widehat {G} _ {t, \tau} \right\| _ {2} ^ {2} - \eta_ {t, \tau} ^ {2} \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) ^ {\top} \widehat {G} _ {t, \tau} \\ \begin{array}{c} \stackrel {(c)} {\leq} 0 \end{array} \\ \end{array} +$$ + +where $(a)$ results from the fact that $\pmb{x}_{t,\tau +1} = \mathcal{P}_{\mathcal{X}}\left(\pmb{x}_{t,\tau} - \eta_{t,\tau}\nabla \mu_{t - 1}(\pmb{x}_{t,\tau})\right)$ based on our (7) and $(b)$ derives from the definition of $\widehat{G}_{t,\tau}$ in (31). In addition, $(c)$ is based on the following result by substituting $\pmb {x} = \pmb{x}_{t,\tau}$ and $\pmb{x}^{\prime} = \widehat{\pmb{x}}_{t,\tau}$ into (38): + +$$ +\left\| \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} _ {t, \tau}\right) - \mathcal {P} _ {\mathcal {X}} \left(\widehat {\boldsymbol {x}} _ {t, \tau}\right) \right\| _ {2} ^ {2} - \left(\boldsymbol {x} _ {t, \tau} - \widehat {\boldsymbol {x}} _ {t, \tau}\right) ^ {\top} \left(\mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} _ {t, \tau}\right) - \mathcal {P} _ {\mathcal {X}} \left(\widehat {\boldsymbol {x}} _ {t, \tau}\right)\right) \leq 0. \tag {44} +$$ + +Finally, by dividing $\eta_{t,\tau}^2$ on the both sides of the last inequality in (43), we finish the proof for (40). + +For (41), following the same proof above, we can also obtain the following inequality for the projected derivative $G_{t,\tau}$ : + +$$ +\left\| G _ {t, \tau} \right\| _ {2} ^ {2} \leq \nabla f \left(\boldsymbol {x} _ {t, \tau}\right) ^ {\top} G _ {t, \tau} \leq \left\| \nabla f \left(\boldsymbol {x} _ {t, \tau}\right) \right\| _ {2} \left\| G _ {t, \tau} \right\| _ {2}. \tag {45} +$$ + +We complete the proof for (41) by dividing $\| G_{t,\tau}\| _2$ on the both sides of the inequality above. + +For (42), define $\boldsymbol{x}_{t,\tau +1}^{\prime}\triangleq \boldsymbol{x}_{t,\tau} - \eta_{t,\tau}G_{t,\tau}$ , we have + +$$ +\begin{array}{l} \left\| \widehat {G} _ {t, \tau} - G _ {t, \tau} \right\| _ {2} \stackrel {(a)} {=} \frac {1}{\eta_ {t , \tau}} \left\| \boldsymbol {x} _ {t, \tau} - \boldsymbol {x} _ {t, \tau + 1} - \left(\boldsymbol {x} _ {t, \tau} - \boldsymbol {x} _ {t, \tau + 1} ^ {\prime}\right) \right\| _ {2} \\ \stackrel {(b)} {=} \frac {1}{\eta_ {t , \tau}} \left\| \boldsymbol {x} _ {t, \tau + 1} - \boldsymbol {x} _ {t, \tau + 1} ^ {\prime} \right\| _ {2} \\ \stackrel {(c)} {=} \frac {1}{\eta_ {t , \tau}} \| \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} _ {t, \tau} - \eta_ {t, \tau} \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau})\right) - \mathcal {P} _ {\mathcal {X}} \left(\boldsymbol {x} _ {t, \tau} - \eta_ {t, \tau} \nabla f (\boldsymbol {x} _ {t, \tau})\right) \| _ {2} \tag {46} \\ \stackrel {(d)} {\leq} \frac {1}{\eta_ {t , \tau}} \left\| \boldsymbol {x} _ {t, \tau} - \eta_ {t, \tau} \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) - (\boldsymbol {x} _ {t, \tau} - \eta_ {t, \tau} \nabla f (\boldsymbol {x} _ {t, \tau})) \right\| _ {2} \\ \stackrel {(e)} {=} \left\| \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) - \nabla f (\boldsymbol {x} _ {t, \tau}) \right\| _ {2} \\ \end{array} +$$ + +where $(a)$ comes from the definition of $\widehat{G}_{t,\tau}$ and $G_{t,\tau}$ in (31) and (8), respectively. In addition, $(c)$ derives from (7) and (8). Finally, $(d)$ results from (34). + +Proof. Since the objective function $f$ is assumed to be $L_{s}$ -Lipschitz smooth (Sec. 4.2), we have the following inequality for any $x_{t,\tau} \in \mathcal{X}$ in our ZoRD algorithm: + +$$ +f \left(\boldsymbol {x} _ {t, \tau + 1}\right) - f \left(\boldsymbol {x} _ {t, \tau}\right) \leq \nabla f \left(\boldsymbol {x} _ {t, \tau}\right) ^ {\top} \left(\boldsymbol {x} _ {t, \tau + 1} - \boldsymbol {x} _ {t, \tau}\right) + \frac {L _ {s}}{2} \| \boldsymbol {x} _ {t, \tau + 1} - \boldsymbol {x} _ {t, \tau} \| _ {2} ^ {2}. \tag {47} +$$ + +Let $\delta' \in (0,1)$ . Define $\beta \triangleq \sqrt{d + 2(\sqrt{d} + 1) \ln(1 / \delta')}$ , by substituting (32) into the inequality above, the following inequality holds with probability of at least $1 - \delta'$ : + +$$ +f (\boldsymbol {x} _ {t, \tau + 1}) - f (\boldsymbol {x} _ {t, \tau}) +$$ + +$$ +\stackrel {(a)} {\leq} - \eta_ {t, \tau} \nabla f (\pmb {x} _ {t, \tau}) ^ {\top} \widehat {G} _ {t, \tau} + \frac {L _ {s} \eta_ {t , \tau} ^ {2}}{2} \left\| \widehat {G} _ {t, \tau} \right\| _ {2} ^ {2} +$$ + +$$ +\stackrel {(b)} {=} \eta_ {t, \tau} \left(\nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) - \nabla f (\boldsymbol {x} _ {t, \tau})\right) ^ {\top} \widehat {G} _ {t, \tau} - \eta_ {t, \tau} \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) ^ {\top} \widehat {G} _ {t, \tau} + \frac {L _ {s} \eta_ {t , \tau} ^ {2}}{2} \left\| \widehat {G} _ {t, \tau} \right\| _ {2} ^ {2} +$$ + +$$ +\begin{array}{l} \stackrel {(c)} {=} \eta_ {t, \tau} \left[ (\nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) - \nabla f (\boldsymbol {x} _ {t, \tau})) ^ {\top} (\widehat {G} _ {t, \tau} - G _ {t, \tau}) + (\nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) - \nabla f (\boldsymbol {x} _ {t, \tau})) ^ {\top} G _ {t, \tau} \right] \\ - \eta_ {t, \tau} \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) ^ {\top} \widehat {G} _ {t, \tau} + \frac {L _ {s} \eta_ {t , \tau} ^ {2}}{2} \left\| \widehat {G} _ {t, \tau} \right\| _ {2} ^ {2} \\ \end{array} +$$ + +$$ +\begin{array}{l} \stackrel {(d)} {\leq} \eta_ {t, \tau} \left[ \| \nabla \mu_ {t - 1} (\pmb {x} _ {t, \tau}) - \nabla f (\pmb {x} _ {t, \tau}) \| _ {2} \left\| \widehat {G} _ {t, \tau} - G _ {t, \tau} \right\| _ {2} + \| \nabla \mu_ {t - 1} (\pmb {x} _ {t, \tau}) - \nabla f (\pmb {x} _ {t, \tau}) \| _ {2} \| G _ {t, \tau} \| _ {2} \right] \\ - \eta_ {t, \tau} \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) ^ {\top} \widehat {G} _ {t, \tau} + \frac {L _ {s} \eta_ {t , \tau} ^ {2}}{2} \left\| \widehat {G} _ {t, \tau} \right\| _ {2} ^ {2} \\ \end{array} +$$ + +$$ +\begin{array}{l} \stackrel {(e)} {\leq} \eta_ {t, \tau} \left[ \| \nabla \mu_ {t - 1} (\pmb {x} _ {t, \tau}) - \nabla f (\pmb {x} _ {t, \tau}) \| _ {2} ^ {2} + \| \nabla \mu_ {t - 1} (\pmb {x} _ {t, \tau}) - \nabla f (\pmb {x} _ {t, \tau}) \| _ {2} \| \nabla f (\pmb {x} _ {t, \tau}) \| _ {2} \right] \\ - \frac {2 \eta_ {t , \tau} - L _ {s} \eta_ {t , \tau} ^ {2}}{2} \left\| \widehat {G} _ {t, \tau} \right\| _ {2} ^ {2} \\ \end{array} +$$ + +$$ +\stackrel {(f)} {\leq} \eta_ {t, \tau} \kappa^ {2} \beta^ {2} r ^ {2 t} + \eta_ {t, \tau} L _ {c} \kappa \beta r ^ {t} - \frac {\eta_ {t , \tau}}{2} \| \widehat {G} _ {t, \tau} \| _ {2} ^ {2} \tag {48} +$$ + +where $(d)$ derives from the Cauchy-Schwarz inequality and $(e)$ follows from the Lemma B.7. Finally, $(f)$ result from the bounded derivative estimation error in Theorem 2 and the fact that $f$ is $L_{c}$ -Lipschitz continuous (i.e., $\| \nabla f(\pmb{x})\|_2 \leq L_c$ for any $\pmb{x} \in \mathcal{X}$ ) and $\eta_{t,\tau} \leq 1 / L_s (\forall \tau)$ . + +For every iteration $t$ our ZoRD algorithm, we in fact will apply the virtual updates (7) for $V_{t}$ times (see Algo. 2). Therefore, for probability $\geq 1 - V_t\delta'$ , we have + +$$ +\begin{array}{l} \frac {1}{V _ {t}} \sum_ {\tau = 0} ^ {V _ {t} - 1} \eta_ {t, \tau} \left\| \widehat {G} _ {t, \tau} \right\| _ {2} ^ {2} \leq \frac {2}{V _ {t}} \sum_ {\tau = 0} ^ {V _ {t} - 1} \left[ f \left(\boldsymbol {x} _ {t, \tau}\right) - f \left(\boldsymbol {x} _ {t, \tau + 1}\right) + \eta_ {t, \tau} \left(\kappa^ {2} \beta^ {2} r ^ {2 t} + L _ {c} \kappa \beta r ^ {t}\right) \right] \tag {49} \\ = \frac {2}{V _ {t}} \left[ f \left(\boldsymbol {x} _ {t - 1} - f (\boldsymbol {x} _ {t})\right) \right] + \left(\frac {2}{V _ {t}} \sum_ {\tau = 0} ^ {V _ {t} - 1} \eta_ {t, \tau}\right) \left(\kappa^ {2} \beta^ {2} r ^ {2 t} + L _ {c} \kappa \beta r ^ {t}\right) \\ \end{array} +$$ + +where the first inequality results from (48) by re-arranging it and then sum it up over $\tau$ . + +However, in order to prove the convergence of our ZoRD algorithm to a stationary point, we need to consider the derivative mapping of $G_{t,\tau}$ instead (refer to our Sec. 4.2). So, for any $\tau$ , we propose the following inequality: + +$$ +\begin{array}{l} \left\| G _ {t, \tau} \right\| _ {2} = \left\| G _ {t, \tau} - \widehat {G} _ {t, \tau} + \widehat {G} _ {t, \tau} \right\| _ {2} \\ \leq \left\| G _ {t, \tau} - \widehat {G} _ {t, \tau} \right\| _ {2} + \left\| \widehat {G} _ {t, \tau} \right\| _ {2} \tag {50} \\ \leq \left\| \nabla \mu_ {t - 1} (\boldsymbol {x} _ {t, \tau}) - \nabla f (\boldsymbol {x} _ {t, \tau}) \right\| _ {2} + \left\| \widehat {G} _ {t, \tau} \right\| _ {2} \\ \leq \kappa \beta r ^ {t} + \left\| \widehat {G} _ {t, \tau} \right\| _ {2} \\ \end{array} +$$ + +where the first inequality is from the Cauchy-Schwarz inequality and the second inequality comes from (42). Finally, by taking the result above into (49), we have + +$$ +\frac {1}{V _ {t}} \sum_ {\tau = 0} ^ {V _ {t} - 1} \eta_ {t, \tau} \| G _ {t, \tau} \| _ {2} ^ {2} \leq \frac {2}{V _ {t}} \left[ f \left(\boldsymbol {x} _ {t - 1} - f (\boldsymbol {x} _ {t})\right) \right] + \left(\frac {2}{V _ {t}} \sum_ {\tau = 0} ^ {V _ {t} - 1} \eta_ {t, \tau}\right) \left(\kappa^ {2} \beta^ {2} r ^ {2 t} + L _ {c} \kappa \beta r ^ {t}\right) + \kappa \beta r ^ {t}. \tag {51} +$$ + +Then, substituting $V_{t} = V$ and $\eta_{t,\tau} = \eta$ for any $t,\tau$ into the result above, the following inequality holds with probability of at least $1 - VT\delta^{\prime}$ when $r < 1$ : + +$$ +\begin{array}{l} \frac {1}{T} \sum_ {t = 1} ^ {T} \frac {1}{V} \sum_ {\tau = 0} ^ {V - 1} \eta \| G _ {t, \tau} \| _ {2} ^ {2} \stackrel {(a)} {\leq} \frac {1}{T} \sum_ {t = 1} ^ {T} \left(\frac {2 (f (\boldsymbol {x} _ {t - 1} - f (\boldsymbol {x} _ {t}))}{V} + 2 \eta \kappa^ {2} \beta^ {2} r ^ {2 t} + (2 \eta L _ {c} + 1) \kappa \beta r ^ {t}\right) \\ \stackrel {(b)} {\leq} \frac {2}{T V} \left[ f (\boldsymbol {x} _ {0}) - f (\boldsymbol {x} _ {T}) \right] + \frac {2 \eta (1 - r ^ {2 T})}{T (1 - r ^ {2})} \kappa^ {2} \beta^ {2} r ^ {2} \\ + \frac {(2 \eta L _ {c} + 1) (1 - r ^ {T})}{T (1 - r)} \kappa \beta r \\ \stackrel {(c)} {\leq} \frac {2}{T V} \left[ f \left(\boldsymbol {x} _ {0}\right) - f \left(\boldsymbol {x} ^ {*}\right) \right] + \frac {2 \eta \kappa^ {2} \beta^ {2} r ^ {2}}{T \left(1 - r ^ {2}\right)} + \frac {\left(2 \eta L _ {c} + 1\right) \kappa \beta r}{T (1 - r)}. \tag {52} \\ \end{array} +$$ + +Note that $(b)$ derives from the summation of the geometric sequence about $r$ and $(c)$ comes from $\pmb{x}^{*}\triangleq \arg \min_{\pmb{x}\in \mathcal{X}}f(\pmb {x})$ . When $r = 1$ , the following holds with probability of at least $\geq 1 - VT\delta^{\prime}$ accordingly: + +$$ +\begin{array}{l} \frac {1}{T} \sum_ {t = 1} ^ {T} \frac {1}{V} \sum_ {\tau = 0} ^ {V - 1} \eta \| G _ {t, \tau} \| _ {2} ^ {2} \leq \frac {1}{T} \sum_ {t = 1} ^ {T} \left(\frac {2 (f (\boldsymbol {x} _ {t - 1} - f (\boldsymbol {x} _ {t}))}{V} + 2 \eta \kappa^ {2} \beta^ {2} r ^ {2 t} + (2 \eta L _ {c} + 1) \kappa \beta r ^ {t}\right) \tag {53} \\ = \frac {2}{T V} \left[ f \left(\boldsymbol {x} _ {0}\right) - f \left(\boldsymbol {x} _ {T}\right) \right] + 2 \eta \kappa^ {2} \beta^ {2} + (2 \eta L _ {c} + 1) \kappa \beta . \\ \end{array} +$$ + +Finally, let $\delta = VT\delta' \in (0,1)$ , the following holds with probability of at least $1 - \delta$ + +$$ +\begin{array}{l} \min _ {t \leq T} \frac {1}{V} \sum_ {\tau = 0} ^ {V - 1} \| G _ {t, \tau} \| _ {2} ^ {2} \leq \frac {1}{T} \sum_ {t = 1} ^ {T} \frac {1}{V} \sum_ {\tau = 0} ^ {V - 1} \| G _ {t, \tau} \| _ {2} ^ {2} \tag {54} \\ \leq ① + ② \\ \end{array} +$$ + +where 1 and 2 can be defined as below with $\alpha \triangleq \kappa \sqrt{d + 2(\sqrt{d} + 1)\ln(VT / \delta)}$ + +$$ +\begin{array}{l} ① = \frac {2 / \eta}{T V} [ f (\boldsymbol {x} _ {0}) - f (\boldsymbol {x} _ {T}) ] \\ ② = \left\{ \begin{array}{l l} 2 \alpha^ {2} r ^ {2} / [ T (1 - r ^ {2}) ] + (2 L _ {c} + 1 / \eta) \alpha r / [ T (1 - r) ] & (r < 1), \\ 2 \alpha^ {2} + (2 L _ {c} + 1 / \eta) \alpha & (r = 1). \end{array} \right. \tag {55} \\ \end{array} +$$ + +# APPENDIX C EXPERIMENTAL SETTINGS + +# C.1 GENERAL SETTINGS + +Derived GP. Among all our experiments in Sec. 5, to apply the derivative estimation in Sec. 3.1 for every iteration $t$ and every step $\tau$ of our ZoRD algorithm, we use the derived GP (4) based on the Matérn kernel with $\nu = 2.5$ and fit this derived GP using 150 queries that achieves the smallest Euclidean distance with input $\boldsymbol{x}_{t,\tau}$ from the optimization trajectory. This is because we only need to model the objective function $f$ in the vicinity of input $\boldsymbol{x}_{t,\tau}$ precisely rather than the entire domain, so as to achieve an accurate derivative estimation at input $\boldsymbol{x}_{t,\tau}$ . + +Confidence Threshold. Among all our experiments in Sec. 5, the confidence threshold $c$ of our dynamic virtual updates (Sec. 3.2) is set to be 0.35 in order to realize a good trade-off between query efficiency and accurate derivative estimation in practice, which can already allow our ZoRD to achieve compelling empirical results consistently (see our Sec. 5). In light of this, $c = 0.35$ would be a reasonably good choice in practice, especially when there is no prior knowledge about the objective functions. When we have prior knowledge about the smoothness of the objective functions, we can likely make a better choice for $c$ : Intuitively, smooth objective functions usually can be modeled by the Gaussian process effectively (Rasmussen and Williams, 2006), so an accurate derivative estimation from our derived GP is also likely to be achieved. In this scenario, a large confidence threshold can be applied to fully exploit the benefit of our derivative estimation that is free from the requirement for additional queries and consequently results in an improved query efficiency in practice. + +Baselines. In addition, among all our experiments in Sec. 5, we consistently use $n = 10$ , $\lambda = 0.01$ and directions $\{u_i\}_{i=1}^n$ that are randomly sampled from a unit sphere for the derivative estimation of the FD method (2) applied in the RGF and PRGF algorithm. Moreover, following the common practice of (Berahas et al., 2022; Cheng et al., 2021), we conduct orthogonalization on these randomly selected directions via the Gram-Schmidt procedure. As for the ES algorithm (e.g., the one applied in Salimans et al., 2017), we apply the same $n$ , $\lambda$ and $\{u_i\}_{i=1}^n$ in RGF and PRGF for their update in every iteration. + +Domain Transformation. Following the practice that has been used in (Eriksson et al., 2019), for all our experiments, we firstly re-scale the input domains into $[0,10]^d$ to ease the optimization and then re-scale the updated inputs back to the original domains for querying. + +# C.2 SYNTHETIC EXPERIMENTS + +Let input $\pmb{x} = [x_i]_{i=1}^d$ , the Ackley and Levy function applied in our synthetic experiments are given below, + +$$ +\begin{array}{l} f (\pmb {x}) = - 2 0 \exp \left(- 0. 2 \sqrt {\frac {1}{d} \sum_ {i = 1} ^ {d} x _ {i} ^ {2}}\right) - \exp (\frac {1}{d} \sum_ {i = 1} ^ {d} \cos (2 \pi x _ {i})) + 2 0 + \exp (1), (\mathrm {A c k l e y}) \\ f (\boldsymbol {x}) = \sin^ {2} \left(\pi w _ {1}\right) + \sum_ {i = 1} ^ {d - 1} \left(w _ {i} - 1\right) ^ {2} \left[ 1 + 1 0 \sin^ {2} \left(\pi w _ {i} + 1\right) \right] + \left(w _ {d} - 1\right) ^ {2} \left[ 1 + \sin^ {2} \left(2 \pi w _ {d}\right) \right] (\text {L e v y}) \tag {56} \\ \end{array} +$$ + +where $w_{i} = 1 + (x_{i} - 1) / 4$ for any $i = 1, \dots, d$ , Ackley function achieves its minimum (i.e., $\min f(\pmb{x}) = 0$ ) at $\pmb{x}^{*} = \mathbf{0}$ , and Levy function achieves its minimum (i.e., $\min f(\pmb{x}) = 0$ ) at $\pmb{x}^{*} = \mathbf{1}$ . Note that the Ackley and Levy function for the synthetic experiments in our Sec. 5.2 are defined within the domain $[-20, 20]^d$ and $[-7.5, 7.5]^d$ , respectively. To give a better understanding of these two synthetic functions, we provide a 3D illustration of these two synthetic functions with $d = 2$ in our Fig. 5. As shown in Fig. 5, these two synthetic functions are highly nonconvex and therefore have local minimums within their domains. + +To compare our ZoRD algorithm with other ZO/FO optimization baselines in Sec. 5.2, we firstly employ TuRBO of 300 queries to find a good initialization for all other ZO/FO optimization algorithms in Fig. 3 because of the nonconvexity of these two synthetic functions as shown in Fig. 5. We then + +![](images/b0e0d1d63258389a9f2e3355c68706e372600cd18875473bfd73884f8529d368.jpg) +(a) Ackley function $(d = 2)$ + +![](images/c9ee5ee5c05e61235de5bd32e3a6c3a0fd4c99fefd40dfd5cadec77b30de8e1a.jpg) +(b) Levy function $(d = 2)$ +Figure 5: The 3D illustration of Ackley and Levy synthetic function with $d = 2$ . + +apply these ZO/FO optimization algorithms with a query budget of 200 for $d = 20,40$ , and a query budget of 400 for $d = 100$ to compare their query efficiency. We use the same Adam optimizer (Kingma and Ba, 2015) with a learning rate of 0.1 and exponential decay rates of 0.9, 0.999 for RGF, PRGF, GD, and our ZoRD algorithm, for faster convergence compared with standard GD. + +# C.3 BLACK-BOX ADVERSARIAL ATTACK + +For the black-box adversarial attack experiment on the MNIST dataset, we use the same fully trained deep neural networks from (Cheng et al., 2021) and adopt a $L_{\infty}$ constraint of $\| x\|_{\infty} \leq 0.3$ on the input perturbation $x$ . For the black-box adversarial attack experiment on the CIFAR-10 dataset, we fully train a ResNet-18 (He et al., 2016) on CIFAR-10 using stochastic gradient descend (SGD) with a cosine annealed learning rate from 0.1 to 0, a momentum of 0.9 and a weight decay of $5 \times 10^{-4}$ for 200 epochs, and adopt a $L_{\infty}$ constraint of $\| x\|_{\infty} \leq 0.2$ on the input perturbation $x$ . Note that we use the same loss function as (Cheng et al., 2021) for these two experiments. Meanwhile, to apply RGF, PRGF and our ZoRD, we adopt Adam optimizer with the same learning rate of 0.5 and the same exponential decay rates of 0.9, 0.999. + +# C.4 NON-DIFFERENTIABLE METRIC OPTIMIZATION + +The Covertype dataset used in Sec. 5.4 is a classification dataset consisting of 581,012 samples from 7 different categories. Each sample from this dataset is a 54-dimensional vector of integers. In this experiment, we randomly split the dataset into training and test sets with each containing 290,506 samples. The MLP classifier applied in Sec. 5.4 consists of 2 layers with 30 and 14 hidden neurons respectively, leading to 2189 parameters in total (i.e., $d = 2189$ ). We first train this MLP classifier on the training dataset of Covertype using the L-BFGS algorithm with the cross-entropy loss function for 300 epochs, and then apply ZO optimization algorithms to fine-tune our trained MLP directly on the non-differentiable metrics (i.e., using these metrics as the new loss functions), including precision, recall, F1 score and Jaccard index. To obtain the results of ES, RGF, PRGF and our ZoRD algorithm in Sec. 5.4, we apply the same Adam optimizer with a learning rate of 0.2 (for precision and recall) or 0.01 (for F1 score and Jaccard index) and exponential decay rates of 0.9, 0.999. Note that standard BO algorithms (including TuRBO) fail to achieve any percentage improvements (i.e., achieving $0\%$ in the $y$ -axis of Fig. 4) in this experiment according to our five independent runs, which is likely due to their aggressive exploration in the input domain of such a high dimension. In light of this, we do not include them in our comparison since all other methods are able to achieve certain improvements. + +# C.5 DERIVATIVE-FREE REINFORCEMENT LEARNING + +Our derivative-free RL experiments aim to learn controllers (which outputs policies) that maximize the rewards/return for several environments in the OpenAI Gym (Brockman et al., 2016) without using true derivatives. Specifically, we need to optimize the parameters (i.e., $\pmb{x}$ ) of our neural network + +Table 2: OpenAI Gym environment properties and their respective network dimensions. + +
AcrobotSwimmerLunarBipedalWalkerWalker2DHalfCheetah
|S|688241717
|A|324466
d213222244404356356
+ +![](images/5a7d1bbf305590bb2af7e8bba9a225b7fd0ad19266e15b18fe05afaa121eb7d6.jpg) +(a) Results under various input dimension $d$ and fixed Matérn $(\nu = 2.5)$ + +![](images/e4db9ac701ea0d5db4b1bfa995d0e6745c35bf8b97f84fd737365695a804f4fb.jpg) + +![](images/a4b6a796b2aa7640a090a6f445bfc96f611ca27f230047a4e08bfcc586707cd4.jpg) + +![](images/144518b2e441f6af2314bb12e4117add41f33fa6303d6a9ae7ed7f4fabe6bca1.jpg) + +![](images/7a7ee12346d2472c051f97e3d677997abe714ac9b5f0ebbd49eb7b00d81a613c.jpg) +(b) Results under various kernels and fixed input dimension $d = 80$ + +![](images/812e9f1fba703004483cd973f28051ce4a725aa19248f8a12cf2fdfb4837a789.jpg) +Figure 6: Comparison of the derivative estimation errors of our derived GP-based estimator (GP) and the FD estimator under various input dimensions and kernels. Similarly, each result is reported with the mean $\pm$ standard error from five independent runs. + +![](images/a52706ab4ce98f071ef857a5fc8f1a78ec831d2c37c97d0c67c3318e2152720b.jpg) + +![](images/14c28dc5df8f111e0503498123550176d4b3695b56b3122a7b23680df813e4bd.jpg) + +(MLP) controller with 2 hidden layers, where each hidden layer has 10 hidden neurons and one bias term. We adopt a $L_{\infty}$ constraint of $\| x \|_{\infty} \leq 1$ on the parameters $x$ . We use a softmax output layer for the policies that deal with discrete action spaces, and a tanh output layer for the policies that deal with continuous action spaces. The dimension of neural network parameters (represented as a column vector) $d$ is determined by the dimensions of both the observation $|S|$ and the action space $|A|$ of an environment, as detailed in Tab. 2. + +In order to search for policies that are robust to different random state initializations, we use the vectorized API of OpenAI Gym, and our observed function value $y(\pmb{x})$ given the network parameters $\pmb{x}$ is an averaged return of 32 parallel environments. We also fix the seed of OpenAI Gym for all queries, which ensures that we are evaluating on a fixed set of 32 state initializations and that our results can be reproduced. We first initialize a sample of 500 points from a Latin Hypercube (McKay et al., 1979) to find a good initial input, and then proceed to apply ZO optimization algorithms (i.e., ES, RGF, PRGF, and our ZoRD) with the same query budget of 1000 on this initial input. For all these ZO optimization algorithms, we employ the same Adam optimizer with a learning rate of 1.0 and exponential decay rates of 0.9, 0.999. Considering the prohibitive noise in RL experiments, we use 300 queries from the optimization trajectory that has the smallest Euclidean distance with an input needing to be updated. Of note, we conduct 10 trials in total where each trial differs from each other by both the OpenAI Gym seed and the Latin Hypercube initializations. + +# APPENDIX D MORE RESULTS + +# D.1 MORE RESULTS ON DERIVATIVE ESTIMATION + +Besides the comparison in Fig. 2, we provide additional comparison between our derived GP-based estimator (6) and the FD estimator (2) under various input dimensions in Fig. 6(a) and various kernels + +![](images/506bd4061210fea2f99ae60d4e95c5529075217a130b04aba70996c846bf56fe.jpg) +(a) Ackley $(d = 40)$ + +![](images/3085ded5b3b99d83f3c343bb1f239baf5b01ccacfaac1f7ed929118e49db7035.jpg) +(b) Levy $(d = 40)$ +Figure 7: Comparison of our ZoRD algorithm using different confidence thresholds $c$ for its dynamic virtual updates, where the $x$ -axis and the $y$ -axis denote the number of function queries and the log-scaled optimality gap (i.e., $\log (f(\boldsymbol{x}_T) - f(\boldsymbol{x}^*))$ ) achieved with this number of queries, respectively. + +in Fig. 6(b) using the Ackley function. We adopt the same setting in Sec. 5.2. Interestingly, Fig. 6(a)(b) show that under various input dimensions and GP kernels, our derived GP-based estimator (6) is still able to achieve faster reduction rates compared with the FD estimator. Of note, all the function queries applied in our derived GP-based estimator is from the optimization trajectory whereas the FD estimator requires additional function queries for its derivative estimation. So, Fig. 6(a)(b) also show that our derived GP method is still able to achieve improved query efficiency for accurate derivative estimation than FD method under various input dimensions and GP kernels because our method avoids the requirement of additional queries for derivative estimation. Interestingly, the objective function (i.e., the Ackley function) is not truly sampled from the GPs based on these kernels. This therefore means that though we have assumed that we need the prior knowledge about the GP in which the objective function is sampled from (Sec. 2.1), such an assumption does not really need to be satisfied for our derived GP-based method to achieve accurate derivative estimation in practice. More interestingly, we notice that Matérn( $\nu = 0.5$ ) and SE kernel will achieve slightly worse derivative estimation, indicating that the choice of GP kernels may impact the quality of our derived GP-based derivative estimation. However, in practice, our derived GP method based on Matérn( $\nu = 2.5$ ) kernel, which has been widely adopted in our experiments, is already able to provide us with good derivative estimation for ZO optimization as confirmed by the results in our other experiments. + +# D.2 MORE RESULTS ON SYNTHETIC EXPERIMENTS + +In this section, we compare ZoRD with more baselines in Fig. 8. Notably, we mainly compare our ZoRD with CobBO (based on the code implementation provided by (Tan et al., 2021)) since CobBO generally performs better than other baselines, e.g., TPE, ATPE, and BADS according to (Tan et al., 2021). As shown in the results in Fig. 8, our ZoRD algorithm is still able to outperform the other benchmark BO algorithm (i.e., CobBO). + +We then investigate the impacts of the dynamic virtual updates (Sec. 3.2) on our ZoRD algorithm. In particular, we apply the same setting in Appx. C.2 to optimize the Ackley and Levy function with $d = 40$ under various confidence thresholds $c$ for our dynamic virtual updates. Fig. 7 illustrates the results. As shown in both Fig. 7(a) and (b), our ZoRD algorithm using the technique of dynamic virtual updates (i.e., $c > 0$ ) can consistently achieve improved query efficiency compared with the one not using the technique of dynamic virtual updates (i.e., $c = 0$ ). This indicates the essence of dynamic virtual updates in helping improve the query efficiency of our ZoRD algorithm. Such a result actually corroborates our theoretical insights about virtual updates (Sec. 4.2). Remarkably, our ZoRD algorithm without the technique of dynamic virtual updates (i.e., $c = 0$ ) is still able to achieve both improved query efficiency and better converged performance compared with RGF and PRGF, which further verifies the superiority of our derived GP-based derivative estimation. More interestingly, both Fig. 7(a) and Fig. 7(b) have verified that there indeed exists a trade-off for the confidence threshold $c$ as we have discussed in Sec. 3.2: The confidence threshold $c$ can not be overly + +![](images/3a5ae77dfc64928502d64988d9d7ada9bb748fe47df977f62e176bb212458df3.jpg) +(a) Ackley $(d = 20)$ + +![](images/9dcc1dcc291916189e66fc43dd79d16af5ecf25b28883a1a34657e40834df2cf.jpg) +(b) Ackley $(d = 40)$ + +![](images/44df5be0540cd5955577d9bb1ffcfc8ba342b6e05aa5f38561601fd685f8651b.jpg) +(c) Levy $(d = 40)$ + +![](images/d00e1b941466c717ceb8cb1548c626398cb1884a8f1d10dd7737dd01e6fa80cd.jpg) +(d) Levy $(d = 100)$ + +![](images/7c2290e275f975cb6222affe436898d17018e27c59f33dc602abe96897dd05b6.jpg) +Figure 8: Additional comparison between our ZoRD and other baselines. The $x$ -axis and $y$ -axis denote the number of queries and log-scaled optimality gap (i.e., $\log(f(x_T) - f(x^*))$ ) achieved after this number of queries. Each curve is the mean $\pm$ standard error from ten independent runs. +(a) Success rate on MNIST + +![](images/3c4d39e576279a3ca3ed2be316b8d745d19a68c19d0f7abc0e89f0a7723b1731.jpg) +(b) Success rate on CIFAR-10 +Figure 9: Comparison of the success rate achieved by various ZO optimization algorithms on the 15 images selected from MNIST and CIFAFR-10 dataset. Note that the $x$ -axis and the $y$ -axis denote the number of queries and the success rate (within the range of [0, 1]) achieved after this number of queries, respectively. + +small or excessively large in order to achieve the best query efficiency of our ZoRD algorithm, e.g., $c = 0.3$ for Ackley ( $d = 40$ ) and $c = 0.4$ for Levy ( $d = 40$ ). + +# D.3 MORE RESULTS ON BLACK-BOX ADVERSARIAL ATTACK + +Besides the comparison in our Sec. 5.3, we also compare the success rate achieved by different ZO optimization algorithms on the 15 images selected from MNIST or CIFAR-10 in Fig. 9. Note that we adopt the same settings in Appx. C.3 for this comparison. Considering the large computational complexity of TuRBO-1/10 algorithm for hard-to-attack images3 which is usually undesirable in practice, we drop the comparison with them in this experiment. Fig. 9 shows that under the same query budget, our ZoRD algorithm is able to achieve considerably improved success rate over other ZO optimization algorithms. These results therefore further support the superior query efficiency of our ZoRD algorithm in real-world challenging problems. + +# D.4 MORE RESULTS FOR DERIVATIVE-FREE REINFORCEMENT LEARNING + +Recent years have also witnessed a surging interest in derivative-free reinforcement learning (Salimans et al., 2017; Qian and Yu, 2021), where ZO optimization algorithms are widely applied. In light of this, we also demonstrate the superiority of our ZoRD algorithm in the problem of derivative-free reinforcement learning. Specifically, we adopt the setting in Sec. C.5 to experiment in different RL environments. Tab. 3 summarizes the comparison among different ZO optimization algorithms under + +Table 3: Comparison of the rewards (larger is better) achieved by various ZO optimization algorithms in different RL environments. Each result is reported with the mean ± standard deviation from ten independent runs. + +
AlgorithmAcrobotSwimmerLunarBipedalWalkerWalker2DHalfCheetah
ES-86.2±11.0176.0±56.8-94.7±24.4-34.7±27.3340.4±143.01042.4±753.9
RGF-83.0±5.6213.2±65.1-93.8±19.1-30.3±40.3368.4±223.11129.3±748.5
PRGF-86.3±9.9218.6±66.2-100.1±16.0-29.9±35.2344.6±152.31083.3±722.2
ZoRD-73.3±2.4280.5±77.6-45.1±38.312.9±37.8729.1±304.21950.5±576.1
+ +the same query budget of 1000. As BO algorithms usually suffer from the prohibitive computational complexity for a large $T$ (Rasmussen and Williams, 2006) and GLD has never been applied in RL, we mainly compare our ZoRD algorithm with ES, RGF and PRGF, which also belongs to the same type of ZO optimization algorithm: GD with estimated derivative. Remarkably, Tab. 3 shows that under the same query budget, our ZoRD algorithm can consistently enjoy improved performance (i.e., highest rewards) than the other ZO optimization algorithms in different RL environments. This further supports the superiority of our ZoRD algorithm to other FD-based ZO optimization algorithms. + +# APPENDIX E DISCUSSIONS + +# E.1 ZORD VS. FD-BASED ZO OPTIMIZATION + +Of note, the novelty of our work in fact lies in its way of exploiting the GP assumption to help design an improved derivative estimation and hence an improved ZO optimization algorithm, which to the best of our knowledge has not been explored theoretically yet in the field of ZO optimization via GD with estimated derivative. That is, at this moment, it is still not known in the literature how existing FD methods can utilize such an assumption to achieve better derivative estimation (i.e., their derivative estimation quality will remain the same), even when they make the same assumption as us. In light of this, the comparison between our derived GP method and the FD method in Sec. 4 is not only necessary but also meaningful to show the advantage of exploiting such an assumption in ZO derivative estimation. Importantly, our empirical results further show that such an assumption is in fact not restrictive for our ZoRD to achieve compelling performance in practice. For example, our Fig. 2 and Fig. 6 have shown that our derived GP-based method is able to achieve smaller derivative estimation error than the FD method when the objective functions are not designed to be sampled from a GP with the kernel that we had applied for our derivative estimation. Moreover, the results in our Sec. 5.2, 5.3, 5.4 have shown that our ZoRD is capable of achieving competitive optimization performance for real-world optimization problems where the objective functions are also not designed to be sampled from a GP with the kernel that we had used for our ZoRD. + +Meanwhile, the theoretical challenges of our work lie in the theoretical guarantee on the derivative estimation error of our unique derived GP-based method for any input in the domain as well as the convergence analysis based on such a unique derivative estimation, which to the best of our knowledge have not been studied in the literature. This means that our Thm. 1 and Thm. 2 have provided new developments in the analysis of gradient estimation error and our Thm. 3 will be the first convergence result for GD using our unique derivative estimation method. Interestingly, the bound in our Thm. 3 also improves over the standard ones from (Nesterov and Spokoiny, 2017; Liu et al., 2018b) in several aspects, as discussed in our Sec. 4.2. + +# E.2 ZORD vs. BO + +Our ZoRD algorithm and standard BO algorithms (e.g., GP-UCB) have in fact applied the same GP assumption for their algorithm design. That is, however, where the similarity ends. Of note, our ZoRD exploits such an assumption to derive a specific GP (i.e., (4)) for derivative estimation, which is then employed for local exploitation via (projected) GD update. In contrast, BO algorithms utilize such an assumption to construct their acquisition functions for a global optimization that can trade off between exploitation and exploration. In practice, the exploration of BO algorithms is usually query-inefficient, especially for problems with high-dimensional input spaces, and therefore GD with + +![](images/c2355c25734b157f01bf391f9fb3aac55ade306cbdf035b34d06cb49f0588384.jpg) +Figure 10: Comparison of local derivative estimation (in the input domain of [0, 3]) in our ZoRD and global function approximation (in the input domain of $[-6, 6]$ ) in BO under various number of random function queries. + +estimated derivatives (especially our ZoRD) is preferred to realize better optimization performances in these problems (see our Sec. 5.2). So, our ZoRD and BO algorithms belong to two different types of ZO optimization algorithms (i.e., GD-type vs. BO-type), where their theoretical analyses are in fact not comparable. In particular, GD-type and BO-type ZO optimization algorithms apply different metrics for their theoretical analyses, e.g., the derivative estimation error as well as the convergence to a stationary point (in the nonconvex case) for GD-type ZO optimization algorithms vs. the global asymptotic convergence in terms of the regret for BO-type ZO optimization algorithms. So, it is more reasonable to compare the theory (including the theoretical challenge, the new developments, and the novelty of the convergence result) of our ZoRD with other GD-type ZO optimization algorithms, e.g., the ones using FD methods for their derivative estimation (Nesterov and Spokoiny, 2017; Liu et al., 2018b), as what we have discussed in Sec. E.1. + +In addition, in contrast to using the GP to model the objective function within the entire domain for global exploration in BO, our derived GP in ZoRD will be applied to estimate the derivative of the objective function for local exploitation by GD as shown in Sec. 3.1. As GD typically optimizes in a local region, our derived GP only needs to estimate the derivative locally, which is known to be much simpler than modeling the objective function within the entire domain in BO especially for objective functions in high-dimensional input spaces. In light of this, the derived GP for derivative estimation (4) in our ZoRD algorithm advances the standard GP in BO in the following aspects: + +1. Improved Query Efficiency for Estimation. The derived GP in our ZoRD algorithm requires fewer function queries to provide accurate derivative estimation. We provide a visual example in Fig. 10, in which we sample a one-dimensional function $f$ from a GP prior $\mathcal{GP}(0,k(x,x))$ using the standard SE kernel and then randomly select the same number of queries from the input domain of $[-6,6]$ and $[0,3]$ for standard GP and our derived GP, respectively. As illustrated in Fig. 10, function in a local region (i.e., $x \in [0,3]$ ) is usually smoother than its counterpart in the entire domain (i.e., $x \in [-6,6]$ ). As a result, with only 4 function queries, our derived GP can already provide accurate estimation to the derivative of this objective function whereas standard GP requires more than 8 function queries to model this objective function accurately in the entire domain. + +2. Reduced Computational Complexity. Comparing (3) and (5), both the derived GP for derivative estimation in our ZoRD algorithm and the standard GP in BO enjoy a computational complexity of $\mathcal{O}(n^3)$ with $n$ function queries. However, as a consequence of the improved query efficiency of our derived GP, it is able to require fewer function queries (i.e., + +smaller $n$ ) for accurate derivative estimation and hence can enjoy a reduced computational complexity in practice especially when a large number of queries (e.g., $n > 1000$ ) are applied to the standard GP in BO. \ No newline at end of file diff --git a/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/images.zip b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..59ce2ecc2dfbba53f53cd663cd110f776dfbba08 --- /dev/null +++ b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52be819dc33e60b9c3dd8c77afd4cb9fbf7c3fca115cb057ab7f4b3bd3284aa1 +size 1359938 diff --git a/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/layout.json b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7b98d4a246eb8bbf0af9f9c9a543c7a71f3c0971 --- /dev/null +++ b/2023/Zeroth-Order Optimization with Trajectory-Informed Derivative Estimation/layout.json @@ -0,0 +1,25570 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "type": "text", + "content": "ZEROTH-ORDER OPTIMIZATION WITH TRAJECTORYINFORMED DERIVATIVE ESTIMATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 135, + 355, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 135, + 355, + 146 + ], + "spans": [ + { + "bbox": [ + 110, + 135, + 355, + 146 + ], + "type": "text", + "content": "Yao Shu*, Zhongxiang Dai*, Weicong Sng, Arun Verma," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 146, + 457, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 146, + 457, + 169 + ], + "spans": [ + { + "bbox": [ + 110, + 146, + 457, + 169 + ], + "type": "text", + "content": "Dept. of Computer Science, National University of Singapore, Republic of Singapore {shuyao, daizhongxiang, sngweicong, arun}@comp.nus.edu.sg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 184, + 299, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 184, + 299, + 196 + ], + "spans": [ + { + "bbox": [ + 110, + 184, + 299, + 196 + ], + "type": "text", + "content": "Patrick Jaillet† & Bryan Kian Hsiang Low§" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 196, + 384, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 196, + 384, + 208 + ], + "spans": [ + { + "bbox": [ + 110, + 196, + 384, + 208 + ], + "type": "text", + "content": "Dept. of Electrical Engineering and Computer Science, MIT, USA†" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 110, + 208, + 459, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 208, + 459, + 220 + ], + "spans": [ + { + "bbox": [ + 110, + 208, + 459, + 220 + ], + "type": "text", + "content": "Dept. of Computer Science, National University of Singapore, Republic of Singapore" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 220, + 336, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 220, + 336, + 232 + ], + "spans": [ + { + "bbox": [ + 113, + 220, + 336, + 232 + ], + "type": "text", + "content": "jaillet@mit.edu, lowkh@comp.nus.edu.sg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 276, + 260, + 335, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 260, + 335, + 272 + ], + "spans": [ + { + "bbox": [ + 276, + 260, + 335, + 272 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 287, + 471, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 287, + 471, + 498 + ], + "spans": [ + { + "bbox": [ + 140, + 287, + 471, + 498 + ], + "type": "text", + "content": "Zeroth-order (ZO) optimization, in which the derivative is unavailable, has recently succeeded in many important machine learning applications. Existing algorithms rely on finite difference (FD) methods for derivative estimation and gradient descent (GD)-based approaches for optimization. However, these algorithms suffer from query inefficiency because many additional function queries are required for derivative estimation in their every GD update, which typically hinders their deployment in real-world applications where every function query is expensive. To this end, we propose a trajectory-informed derivative estimation method which only employs the optimization trajectory (i.e., the history of function queries during optimization) and hence can eliminate the need for additional function queries to estimate a derivative. Moreover, based on our derivative estimation, we propose the technique of dynamic virtual updates, which allows us to reliably perform multiple steps of GD updates without reapplying derivative estimation. Based on these two contributions, we introduce the zeroth-order optimization with trajectory-informed derivative estimation (ZoRD) algorithm for query-efficient ZO optimization. We theoretically demonstrate that our trajectory-informed derivative estimation and our ZoRD algorithm improve over existing approaches, which is then supported by our real-world experiments such as black-box adversarial attack, non-differentiable metric optimization, and derivative-free reinforcement learning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 525, + 206, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 525, + 206, + 537 + ], + "spans": [ + { + "bbox": [ + 106, + 525, + 206, + 537 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 552, + 506, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 708 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 708 + ], + "type": "text", + "content": "Zeroth-order (ZO) optimization, in which the objective function to be optimized is only accessible by querying, has received great attention in recent years due to its success in many applications, e.g., black-box adversarial attack (Ru et al., 2020), non-differentiable metric optimization (Hiranandani et al., 2021), and derivative-free reinforcement learning (Salimans et al., 2017). In these problems, the derivative of objective function is either prohibitively costly to obtain or even non-existent, making it infeasible to directly apply standard derivative-based algorithms such as gradient descent (GD). In this regard, existing works have proposed to estimate the derivative using the finite difference (FD) methods and then apply GD-based algorithms using the estimated derivative for ZO optimization (Nesterov and Spokoiny, 2017; Cheng et al., 2021). These algorithms, which we refer to as " + }, + { + "bbox": [ + 104, + 552, + 506, + 708 + ], + "type": "inline_equation", + "content": "GD" + }, + { + "bbox": [ + 104, + 552, + 506, + 708 + ], + "type": "text", + "content": " with estimated derivatives, have been the most widely applied approach to ZO optimization especially for problems with high-dimensional input spaces, because of their theoretically guaranteed convergence and competitive practical performance. Unfortunately, these algorithms suffer from query inefficiency, which hinders their real-world deployment especially in applications with expensive-to-query objective functions, e.g., black-box adversarial attack." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 122, + 721, + 201, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 721, + 201, + 732 + ], + "spans": [ + { + "bbox": [ + 122, + 721, + 201, + 732 + ], + "type": "text", + "content": "* Equal contribution." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "text", + "content": "Specifically, one of the reasons for the query inefficiency of existing algorithms on GD with estimated derivatives is that in addition to the necessary queries (i.e., the query of every updated input)1, the FD methods applied in these algorithms require a large number of additional queries to accurately estimate the derivative at an input (Berahas et al., 2022). This naturally begs the question: Can we estimate a derivative without any additional query? A natural approach to achieve this is to leverage the optimization trajectory, which is inherently available as a result of the necessary queries and their observations, to predict the derivatives. However, this requires a non-trivial method to simultaneously " + }, + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "text", + "content": " predict a derivative using only the optimization trajectory (i.e., the history of updated inputs and their observations), and " + }, + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "text", + "content": " quantify the uncertainty of this prediction to avoid using inaccurate predicted derivatives. Interestingly, the Gaussian process (GP) model satisfies both requirements and is hence a natural choice for such a derivative estimation. Specifically, under the commonly used assumption that the objective function is sampled from a GP (Srinivas et al., 2010), the derivative at any input in the domain follows a Gaussian distribution which, surprisingly, can be calculated using only the optimization trajectory. This allows us to " + }, + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "text", + "content": " employ the mean of this Gaussian distribution as the estimated derivative, and " + }, + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 82, + 506, + 270 + ], + "type": "text", + "content": " use the covariance matrix of this Gaussian distribution to obtain a principled measure of the predictive uncertainty and the accuracy of this derivative estimation, which together constitute our trajectory-informed derivative estimation (Sec. 3.1)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 274, + 506, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 506, + 407 + ], + "type": "text", + "content": "Another reason for the query inefficiency of the existing algorithms on GD with estimated derivatives is that every update in these algorithms requires reapplying derivative estimation and hence necessitates additional queries. This can preclude their adoption of a large number of GD updates since every update requires potentially expensive additional queries. Therefore, another question arises: Can we perform multiple GD updates without reapplying derivative estimation and hence without any additional query? To address this question, we propose a technique named dynamic virtual updates (Sec. 3.2). Specifically, thanks to the ability of our method to estimate the derivative at any input in the domain while only using existing optimization trajectory, we can apply multi-step GD updates without the need to reapply derivative estimation and hence without requiring any new query. Moreover, we can dynamically determine the number of steps for these updates by inspecting the aforementioned predictive uncertainty at every step, such that we only perform an update if the uncertainty is small enough (which also indicates that the estimation error is small, see Sec. 4.1)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 412, + 507, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 412, + 507, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 412, + 507, + 545 + ], + "type": "text", + "content": "By incorporating our aforementioned trajectory-informed derivative estimation and dynamic virtual updates into GD-based algorithms, we then introduce the zeroth-order optimization with trajectory-informed derivative estimation (ZoRD) algorithm for query-efficient ZO optimization. We theoretically bound the estimation error of our trajectory-informed derivative estimation and show that this estimation error is non-increasing in the entire domain as the number of queries is increased and can even be exponentially decreasing in some scenarios (Sec. 4.1). Based on this, we prove the convergence of our ZoRD algorithm, which improves over the existing ZO optimization algorithms that rely on the FD methods for derivative estimation (Sec. 4.2). Lastly, we use extensive experiments, such as black-box adversarial attack, non-differentiable metric optimization, and derivative-free reinforcement learning, to demonstrate that " + }, + { + "bbox": [ + 104, + 412, + 507, + 545 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 104, + 412, + 507, + 545 + ], + "type": "text", + "content": " our trajectory-informed derivative estimation improves over the existing FD methods and that " + }, + { + "bbox": [ + 104, + 412, + 507, + 545 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 412, + 507, + 545 + ], + "type": "text", + "content": " our ZoRD algorithm consistently achieves improved query efficiency compared with previous ZO optimization algorithms (Sec. 5)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 561, + 209, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 561, + 209, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 209, + 574 + ], + "type": "text", + "content": "2 PRELIMINARIES" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 586, + 204, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 204, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 204, + 597 + ], + "type": "text", + "content": "2.1 PROBLEM SETUP" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "text", + "content": "Throughout this paper, we use " + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "inline_equation", + "content": "\\nabla" + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "inline_equation", + "content": "\\partial_{\\pmb{x}}" + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "text", + "content": " to denote, respectively, the total derivative (i.e., gradient) and partial derivative w.r.t the variable " + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "text", + "content": ". We consider the minimization of a black-box objective function " + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "inline_equation", + "content": "f:\\mathcal{X}\\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "text", + "content": ", in which " + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "inline_equation", + "content": "\\mathcal{X}\\subset \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "text", + "content": " is a convex subset of the " + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 607, + 506, + 641 + ], + "type": "text", + "content": "-dimensional domain:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 281, + 647, + 504, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 647, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 281, + 647, + 504, + 666 + ], + "type": "interline_equation", + "content": "\\min _ {\\boldsymbol {x} \\in \\mathcal {X}} f (\\boldsymbol {x}). \\tag {1}", + "image_path": "6930325eaf766486dad5a393e3c4ed47f2bfb4bf9ada47daa19185a159f5d9cb.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "text", + "content": "Since we consider ZO optimization, the derivative information is not accessible and instead, we are only allowed to query the inputs in " + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "text", + "content": ". For every queried input " + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "text", + "content": ", we observe a corresponding noisy output of " + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "inline_equation", + "content": "y(\\pmb{x}) = f(\\pmb{x}) + \\zeta" + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "text", + "content": ", in which " + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "text", + "content": " is a zero-mean Gaussian noise with a variance of " + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "inline_equation", + "content": "\\sigma^2" + }, + { + "bbox": [ + 104, + 668, + 506, + 703 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 733 + ], + "type": "text", + "content": "In practice, it is usually necessary to query every updated input to measure the optimization performance and select the best-performing input. We refer to these queries as necessary queries." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 103, + 281, + 148 + ], + "blocks": [ + { + "bbox": [ + 105, + 78, + 269, + 102 + ], + "lines": [ + { + "bbox": [ + 105, + 78, + 269, + 102 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 269, + 102 + ], + "type": "text", + "content": "Algorithm 1: Standard (Projected) GD with Estimated Derivatives" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "lines": [ + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "text", + "content": "1: Input: Objective function " + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "inline_equation", + "content": "f: \\mathcal{X} \\to \\mathbb{R}" + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "text", + "content": ", initialization " + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_0" + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "text", + "content": ", iteration number " + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "text", + "content": ", learning rates " + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "inline_equation", + "content": "\\{\\eta_t\\}_{t=1}^T" + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "text", + "content": ", projection function " + }, + { + "bbox": [ + 106, + 103, + 281, + 148 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathcal{X}}(\\boldsymbol{x})" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 111, + 148, + 241, + 157 + ], + "blocks": [ + { + "bbox": [ + 111, + 148, + 241, + 157 + ], + "lines": [ + { + "bbox": [ + 111, + 148, + 241, + 157 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 241, + 157 + ], + "type": "text", + "content": "2: for iteration " + }, + { + "bbox": [ + 111, + 148, + 241, + 157 + ], + "type": "inline_equation", + "content": "t = 1,\\dots ,T" + }, + { + "bbox": [ + 111, + 148, + 241, + 157 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 111, + 158, + 258, + 170 + ], + "blocks": [ + { + "bbox": [ + 111, + 158, + 258, + 170 + ], + "lines": [ + { + "bbox": [ + 111, + 158, + 258, + 170 + ], + "spans": [ + { + "bbox": [ + 111, + 158, + 258, + 170 + ], + "type": "text", + "content": "3: " + }, + { + "bbox": [ + 111, + 158, + 258, + 170 + ], + "type": "inline_equation", + "content": "g(\\pmb{x}_{t - 1})\\approx \\nabla f(\\pmb{x}_{t - 1})" + }, + { + "bbox": [ + 111, + 158, + 258, + 170 + ], + "type": "text", + "content": " with (2)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 111, + 170, + 268, + 181 + ], + "blocks": [ + { + "bbox": [ + 111, + 170, + 268, + 181 + ], + "lines": [ + { + "bbox": [ + 111, + 170, + 268, + 181 + ], + "spans": [ + { + "bbox": [ + 111, + 170, + 268, + 181 + ], + "type": "text", + "content": "4: " + }, + { + "bbox": [ + 111, + 170, + 268, + 181 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t\\gets \\mathcal{P}_{\\mathcal{X}}\\left(\\pmb{x}_{t - 1} - \\eta_{t - 1}g(\\pmb{x}_{t - 1})\\right)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 111, + 181, + 230, + 192 + ], + "blocks": [ + { + "bbox": [ + 111, + 181, + 230, + 192 + ], + "lines": [ + { + "bbox": [ + 111, + 181, + 230, + 192 + ], + "spans": [ + { + "bbox": [ + 111, + 181, + 230, + 192 + ], + "type": "text", + "content": "5: Query " + }, + { + "bbox": [ + 111, + 181, + 230, + 192 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t" + }, + { + "bbox": [ + 111, + 181, + 230, + 192 + ], + "type": "text", + "content": " to yield " + }, + { + "bbox": [ + 111, + 181, + 230, + 192 + ], + "type": "inline_equation", + "content": "y(\\pmb{x}_t)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 111, + 192, + 156, + 201 + ], + "blocks": [ + { + "bbox": [ + 111, + 192, + 156, + 201 + ], + "lines": [ + { + "bbox": [ + 111, + 192, + 156, + 201 + ], + "spans": [ + { + "bbox": [ + 111, + 192, + 156, + 201 + ], + "type": "text", + "content": "6: end for" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 111, + 202, + 227, + 215 + ], + "blocks": [ + { + "bbox": [ + 111, + 202, + 227, + 215 + ], + "lines": [ + { + "bbox": [ + 111, + 202, + 227, + 215 + ], + "spans": [ + { + "bbox": [ + 111, + 202, + 227, + 215 + ], + "type": "text", + "content": "7: Return arg " + }, + { + "bbox": [ + 111, + 202, + 227, + 215 + ], + "type": "inline_equation", + "content": "\\min_{\\pmb{x}_{1:T}} y(\\pmb{x})" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 286, + 93, + 506, + 116 + ], + "blocks": [ + { + "bbox": [ + 282, + 79, + 397, + 91 + ], + "lines": [ + { + "bbox": [ + 282, + 79, + 397, + 91 + ], + "spans": [ + { + "bbox": [ + 282, + 79, + 397, + 91 + ], + "type": "text", + "content": "Algorithm 2: ZORD (Ours)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 286, + 93, + 506, + 116 + ], + "lines": [ + { + "bbox": [ + 286, + 93, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 286, + 93, + 506, + 116 + ], + "type": "text", + "content": "1: Input: In addition to the parameters in Algo. 1, set the steps of virtual updates " + }, + { + "bbox": [ + 286, + 93, + 506, + 116 + ], + "type": "inline_equation", + "content": "\\{V_t\\}_{t=1}^T" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 287, + 116, + 416, + 125 + ], + "blocks": [ + { + "bbox": [ + 287, + 116, + 416, + 125 + ], + "lines": [ + { + "bbox": [ + 287, + 116, + 416, + 125 + ], + "spans": [ + { + "bbox": [ + 287, + 116, + 416, + 125 + ], + "type": "text", + "content": "2: for iteration " + }, + { + "bbox": [ + 287, + 116, + 416, + 125 + ], + "type": "inline_equation", + "content": "t = 1,\\dots ,T" + }, + { + "bbox": [ + 287, + 116, + 416, + 125 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 287, + 127, + 362, + 137 + ], + "blocks": [ + { + "bbox": [ + 287, + 127, + 362, + 137 + ], + "lines": [ + { + "bbox": [ + 287, + 127, + 362, + 137 + ], + "spans": [ + { + "bbox": [ + 287, + 127, + 362, + 137 + ], + "type": "text", + "content": "3: " + }, + { + "bbox": [ + 287, + 127, + 362, + 137 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t,0} \\gets \\pmb{x}_{t-1}" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "code_body" + } + ], + "index": 13, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 287, + 137, + 430, + 148 + ], + "blocks": [ + { + "bbox": [ + 287, + 137, + 430, + 148 + ], + "lines": [ + { + "bbox": [ + 287, + 137, + 430, + 148 + ], + "spans": [ + { + "bbox": [ + 287, + 137, + 430, + 148 + ], + "type": "text", + "content": "4: for iteration " + }, + { + "bbox": [ + 287, + 137, + 430, + 148 + ], + "type": "inline_equation", + "content": "\\tau = 1,\\dots ,V_{t}" + }, + { + "bbox": [ + 287, + 137, + 430, + 148 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "code_body" + } + ], + "index": 14, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 287, + 148, + 504, + 159 + ], + "blocks": [ + { + "bbox": [ + 287, + 148, + 504, + 159 + ], + "lines": [ + { + "bbox": [ + 287, + 148, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 287, + 148, + 504, + 159 + ], + "type": "text", + "content": "5: " + }, + { + "bbox": [ + 287, + 148, + 504, + 159 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t,\\tau} \\gets \\mathcal{P}_{\\mathcal{X}}(\\pmb{x}_{t,\\tau -1} - \\eta_{t,\\tau -1}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau -1}))" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "code_body" + } + ], + "index": 15, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 287, + 160, + 342, + 169 + ], + "blocks": [ + { + "bbox": [ + 287, + 160, + 342, + 169 + ], + "lines": [ + { + "bbox": [ + 287, + 160, + 342, + 169 + ], + "spans": [ + { + "bbox": [ + 287, + 160, + 342, + 169 + ], + "type": "text", + "content": "6: end for" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "code_body" + } + ], + "index": 16, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 287, + 170, + 436, + 182 + ], + "blocks": [ + { + "bbox": [ + 287, + 170, + 436, + 182 + ], + "lines": [ + { + "bbox": [ + 287, + 170, + 436, + 182 + ], + "spans": [ + { + "bbox": [ + 287, + 170, + 436, + 182 + ], + "type": "text", + "content": "7: Query " + }, + { + "bbox": [ + 287, + 170, + 436, + 182 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t = \\pmb{x}_{t,\\tau}" + }, + { + "bbox": [ + 287, + 170, + 436, + 182 + ], + "type": "text", + "content": " to yield " + }, + { + "bbox": [ + 287, + 170, + 436, + 182 + ], + "type": "inline_equation", + "content": "y(\\pmb{x}_t)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "code_body" + } + ], + "index": 17, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 287, + 182, + 471, + 193 + ], + "blocks": [ + { + "bbox": [ + 287, + 182, + 471, + 193 + ], + "lines": [ + { + "bbox": [ + 287, + 182, + 471, + 193 + ], + "spans": [ + { + "bbox": [ + 287, + 182, + 471, + 193 + ], + "type": "text", + "content": "8: Update (4) using optimization trajectory" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "code_body" + } + ], + "index": 18, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 287, + 193, + 332, + 202 + ], + "blocks": [ + { + "bbox": [ + 287, + 193, + 332, + 202 + ], + "lines": [ + { + "bbox": [ + 287, + 193, + 332, + 202 + ], + "spans": [ + { + "bbox": [ + 287, + 193, + 332, + 202 + ], + "type": "text", + "content": "9: end for" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "code_body" + } + ], + "index": 19, + "sub_type": "algorithm" + }, + { + "type": "code", + "bbox": [ + 284, + 203, + 403, + 216 + ], + "blocks": [ + { + "bbox": [ + 284, + 203, + 403, + 216 + ], + "lines": [ + { + "bbox": [ + 284, + 203, + 403, + 216 + ], + "spans": [ + { + "bbox": [ + 284, + 203, + 403, + 216 + ], + "type": "text", + "content": "10: Return arg min " + }, + { + "bbox": [ + 284, + 203, + 403, + 216 + ], + "type": "inline_equation", + "content": "\\mathbf{\\mu}_{\\mathbf{x}_{1:T}}y(\\mathbf{x})" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "code_body" + } + ], + "index": 20, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\zeta \\sim \\mathcal{N}(0,\\sigma^2)" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": ". Besides, we adopt a common assumption on " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": " which has already been widely used in the literature of Bayesian optimization (BO) (Srinivas et al., 2010; Kandasamy et al., 2018): we assume that " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": " is sampled from a Gaussian process (GP). A GP " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mathcal{GP}(\\mu (\\cdot),k(\\cdot ,\\cdot))" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": ", which is characterized by a mean function " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mu (\\cdot)" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": " and a covariance function " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "k(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": ", is a stochastic process in which any finite subset of random variables follows a multi-variate Gaussian distribution (Rasmussen and Williams, 2006). In addition, following the common practice of GP and BO, we assume w.l.o.g. that " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mu (\\pmb {x}) = 0" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "k(\\pmb {x},\\pmb{x}^{\\prime})\\leq 1" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "(\\forall \\pmb {x},\\pmb{x}^{\\prime}\\in \\mathcal{X})" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": ". We also assume that the kernel function " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": " is differentiable, and that " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\| \\partial_z\\partial_{z'}k(z,z')|_{z = z' = x}\\| _2\\leq \\kappa^2" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\forall \\pmb {x}\\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\kappa >0" + }, + { + "bbox": [ + 104, + 232, + 506, + 331 + ], + "type": "text", + "content": ". This is satisfied by most commonly used kernels such as the squared exponential (SE) kernel (Rasmussen and Williams, 2006)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 344, + 350, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 344, + 350, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 350, + 355 + ], + "type": "text", + "content": "2.2 ZO OPTIMIZATION WITH ESTIMATED DERIVATIVES" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "text", + "content": "To solve (1), GD with estimated derivatives (e.g., Algo. 1) has been developed (Flaxman et al., 2005; Ghadimi and Lan, 2013; Nesterov and Spokoiny, 2017; Liu et al., 2018a;b). Particularly, these algorithms first estimate the derivative of " + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "text", + "content": " (line 3 of Algo. 1) and then plug the estimated derivative into GD-based methods to obtain the next input for querying (lines 4-5 of Algo. 1). In these algorithms, the derivative is typically estimated by averaging the finite difference approximation of the directional derivatives for " + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "text", + "content": " along certain directions, which we refer to as the finite difference (FD) method in this paper. For example, given a parameter " + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "text", + "content": " and directions " + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\{\\pmb{u}_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "text", + "content": ", the derivative " + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\nabla f" + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "text", + "content": " at any " + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 365, + 506, + 453 + ], + "type": "text", + "content": " can be estimated by the following FD method (Berahas et al., 2022):" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 209, + 457, + 504, + 488 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 457, + 504, + 488 + ], + "spans": [ + { + "bbox": [ + 209, + 457, + 504, + 488 + ], + "type": "interline_equation", + "content": "\\nabla f (\\boldsymbol {x}) \\approx g (\\boldsymbol {x}) \\triangleq \\sum_ {i = 1} ^ {n} \\frac {y \\left(\\boldsymbol {x} + \\lambda \\boldsymbol {u} _ {i}\\right) - y (\\boldsymbol {x})}{\\lambda} \\boldsymbol {u} _ {i}. \\tag {2}", + "image_path": "da5fad98faa5afedd358aca51cfcfb8127b8ac1bcafe9f5b542c5c6e88405edc.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 488, + 506, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 488, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 488, + 506, + 556 + ], + "type": "text", + "content": "The directions " + }, + { + "bbox": [ + 104, + 488, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\{\\pmb{u}_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 488, + 506, + 556 + ], + "type": "text", + "content": " are usually sampled from the standard Gaussian distribution (Nesterov and Spokoiny, 2017) or uniformly from the unit sphere (Flaxman et al., 2005), or set as the standard basis vectors with 1 at one of its coordinates and 0 otherwise (Lian et al., 2016). As mentioned before, existing FD methods typically require many additional queries (i.e., " + }, + { + "bbox": [ + 104, + 488, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\{\\pmb{x} + \\lambda \\pmb{u}_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 488, + 506, + 556 + ], + "type": "text", + "content": ") to achieve an accurate derivative estimation in every iteration of Algo. 1 (Berahas et al., 2022), making existing ZO optimization algorithms (Flaxman et al., 2005; Nesterov and Spokoiny, 2017) query-inefficient." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 571, + 504, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 504, + 583 + ], + "type": "text", + "content": "3 ZO OPTIMIZATION VIA TRAJECTORY-INFORMED DERIVATIVE ESTIMATION" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 595, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 595, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 595, + 506, + 673 + ], + "type": "text", + "content": "To improve existing GD with estimated derivatives (e.g., Algo. 1), we propose the ZoRD algorithm (Algo. 2), which achieves more query-efficient ZO optimization thanks to our two major contributions. Firstly, we propose a derived GP-based derivative estimation method which only uses the optimization trajectory and consequently does not require any additional query for derivative estimation (Sec. 3.1). Secondly, thanks to the ability of our method to estimate the derivative at any input in the domain without any additional query and to measure the estimation error in a principled way, we develop the technique of dynamic virtual updates to further improve the query efficiency of our ZoRD (Sec. 3.2)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 681, + 350, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 350, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 350, + 693 + ], + "type": "text", + "content": "3.1 TRAJECTORY-INFORMED DERIVATIVE ESTIMATION" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 701, + 504, + 736 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 701, + 504, + 736 + ], + "spans": [ + { + "bbox": [ + 104, + 701, + 504, + 736 + ], + "type": "text", + "content": "To begin with, if a function " + }, + { + "bbox": [ + 104, + 701, + 504, + 736 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 701, + 504, + 736 + ], + "type": "text", + "content": " follows a GP, then its derivative " + }, + { + "bbox": [ + 104, + 701, + 504, + 736 + ], + "type": "inline_equation", + "content": "\\nabla f" + }, + { + "bbox": [ + 104, + 701, + 504, + 736 + ], + "type": "text", + "content": " also follows a GP (Rasmussen and Williams, 2006). This is formalized by our Lemma 1 below (proof in Appx. B.1), which then provides us a principled way to estimate the derivative at any input in the domain." + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "type": "text", + "content": "Lemma 1 (Derived GP for Derivatives). If a function " + }, + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "type": "text", + "content": " follows a " + }, + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "type": "inline_equation", + "content": "GP" + }, + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "type": "inline_equation", + "content": "f \\sim \\mathcal{GP}\\left(\\mu (\\cdot),\\sigma^2 (\\cdot ,\\cdot)\\right)" + }, + { + "bbox": [ + 104, + 81, + 504, + 95 + ], + "type": "text", + "content": ", then" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 245, + 100, + 365, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 100, + 365, + 114 + ], + "spans": [ + { + "bbox": [ + 245, + 100, + 365, + 114 + ], + "type": "interline_equation", + "content": "\\nabla f \\sim \\mathcal {G P} (\\nabla \\mu (\\cdot), \\partial \\sigma^ {2} (\\cdot , \\cdot))", + "image_path": "219614b729b1ac17fa7cc496d7ca72f9a60eec849297a1c94f8dd4094d1caf20.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 117, + 498, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 117, + 498, + 131 + ], + "spans": [ + { + "bbox": [ + 104, + 117, + 498, + 131 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 117, + 498, + 131 + ], + "type": "inline_equation", + "content": "\\partial \\sigma^2 (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 104, + 117, + 498, + 131 + ], + "type": "text", + "content": " denotes the cross partial derivative w.r.t the first and second arguments of " + }, + { + "bbox": [ + 104, + 117, + 498, + 131 + ], + "type": "inline_equation", + "content": "\\sigma^2 (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 104, + 117, + 498, + 131 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "content": " Follows the Posterior GP. As discussed in Sec. 2.1, we assume that " + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "inline_equation", + "content": "f \\sim \\mathcal{GP}(\\mu(\\cdot), k(\\cdot, \\cdot))" + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "content": ". So, in every iteration " + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "content": " of our Algo. 2, conditioned on the current optimization trajectory " + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{t-1} \\triangleq \\{(x_{\\tau}, y_{\\tau})\\}_{\\tau=1}^{t-1}" + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "content": " follows the posterior GP: " + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "inline_equation", + "content": "f \\sim \\mathcal{GP}(\\mu_{t-1}(\\cdot), \\sigma_{t-1}^2(\\cdot, \\cdot))" + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "content": " with the mean function " + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "inline_equation", + "content": "\\mu_{t-1}(\\cdot)" + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "content": " and the covariance function " + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "inline_equation", + "content": "\\sigma_{t-1}^2(\\cdot, \\cdot)" + }, + { + "bbox": [ + 104, + 139, + 506, + 190 + ], + "type": "text", + "content": " defined as below (Rasmussen and Williams, 2006):" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 188, + 196, + 369, + 212 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 196, + 369, + 212 + ], + "spans": [ + { + "bbox": [ + 188, + 196, + 369, + 212 + ], + "type": "interline_equation", + "content": "\\mu_ {t - 1} (\\boldsymbol {x}) \\triangleq \\boldsymbol {k} _ {t - 1} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {y} _ {t - 1}", + "image_path": "df8182f87a3c81863356031927c078ee927084f01180c048db6d9e17d821acf3.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 175, + 208, + 504, + 229 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 208, + 504, + 229 + ], + "spans": [ + { + "bbox": [ + 175, + 208, + 504, + 229 + ], + "type": "interline_equation", + "content": "\\sigma_ {t - 1} ^ {2} \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) \\triangleq k \\left(\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {k} _ {t - 1} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {k} _ {t - 1} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\tag {3}", + "image_path": "c05949fa1051c544e7256c23547f28776426d0604db5d1dd7ff88f8a4b63e737.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\pmb{y}_{t-1}^{\\top} \\triangleq [y_{\\tau}]_{\\tau=1}^{t-1}" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\pmb{k}_{t-1}(\\pmb{x})^{\\top} \\triangleq [k(\\pmb{x}, \\pmb{x}_{\\tau})]_{\\tau=1}^{t-1}" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "(t-1)" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": "-dimensional row vectors, and " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{t-1} \\triangleq [k(\\pmb{x}_{\\tau}, \\pmb{x}_{\\tau'})]_{\\tau, \\tau'=1}^{t-1}" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "(t-1) \\times (t-1)" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": "-dimensional matrix. Define " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\sigma_{t-1}^{2}(\\pmb{x}) \\triangleq \\sigma_{t-1}^{2}(\\pmb{x}, \\pmb{x})" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": ", the posterior distribution at " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": " is Gaussian with mean " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\mu_{t-1}(\\pmb{x})" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": " and variance " + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "inline_equation", + "content": "\\sigma_{t-1}^{2}(\\pmb{x})" + }, + { + "bbox": [ + 104, + 235, + 506, + 277 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 287, + 479, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 287, + 479, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 479, + 300 + ], + "type": "inline_equation", + "content": "\\nabla f" + }, + { + "bbox": [ + 104, + 287, + 479, + 300 + ], + "type": "text", + "content": " Follows the Derived GP for Derivatives. Substituting (3) into Lemma 1, we have that" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 231, + 304, + 504, + 318 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 304, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 231, + 304, + 504, + 318 + ], + "type": "interline_equation", + "content": "\\nabla f \\sim \\mathcal {G P} \\left(\\nabla \\mu_ {t - 1} (\\cdot), \\partial \\sigma_ {t - 1} ^ {2} (\\cdot , \\cdot)\\right), \\tag {4}", + "image_path": "83cdbc06e1ca042ececc2755e2346c890d0fa9dc244241a210949473b53e68fe.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "text", + "content": "in which the mean " + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "inline_equation", + "content": "\\nabla \\mu_{t - 1}(\\pmb {x})" + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "text", + "content": " and the covariance " + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb{x}^\\prime)" + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "inline_equation", + "content": "\\pmb {x},\\pmb{x}^{\\prime}" + }, + { + "bbox": [ + 104, + 323, + 426, + 336 + ], + "type": "text", + "content": " are" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 342, + 356, + 358 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 342, + 356, + 358 + ], + "spans": [ + { + "bbox": [ + 132, + 342, + 356, + 358 + ], + "type": "interline_equation", + "content": "\\nabla \\mu_ {t - 1} (\\boldsymbol {x}) \\triangleq \\partial_ {\\boldsymbol {z}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {y} _ {t - 1} | _ {\\boldsymbol {z} = \\boldsymbol {x}},", + "image_path": "53b120f7fb78085933be3361c3e53315720eb55a10fe2e676a3e9d803b71d159.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 354, + 504, + 378 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 354, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 121, + 354, + 504, + 378 + ], + "type": "interline_equation", + "content": "\\left. \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}, \\boldsymbol {x} ^ {\\prime}) \\triangleq \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} k (\\boldsymbol {z}, \\boldsymbol {z} ^ {\\prime}) - \\partial_ {\\boldsymbol {z}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z}) ^ {\\top} \\left(\\mathbf {K} _ {t - 1} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\boldsymbol {k} _ {t - 1} (\\boldsymbol {z} ^ {\\prime}) \\right| _ {\\boldsymbol {z} = \\boldsymbol {x}, \\boldsymbol {z} ^ {\\prime} = \\boldsymbol {x} ^ {\\prime}}, \\tag {5}", + "image_path": "4de98438e15ae27bc4428f69345cb0919a96360a817849fff1c2385e59d35657.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": "in which " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\partial_{\\pmb{z}}\\pmb{k}_{t - 1}(\\pmb {z})\\triangleq [\\partial_{\\pmb{z}}k(\\pmb {z},\\pmb{x}_{\\tau})]_{\\tau = 1}^{t - 1}" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "(t - 1)\\times d" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " -dimensional matrix and " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\partial_{\\pmb{z}}\\partial_{\\pmb{z}^{\\prime}}k(\\pmb {z},\\pmb{z}^{\\prime})" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "d\\times d" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " -dimensional matrix. Therefore, " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\nabla \\mu_{t - 1}(\\pmb {x})" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " -dimensional vector and " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb{x}^\\prime)" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "d\\times d" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " -dimensional matrix. We refer to this GP (4) followed by " + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "inline_equation", + "content": "\\nabla f" + }, + { + "bbox": [ + 104, + 384, + 506, + 419 + ], + "type": "text", + "content": " as the derived GP for derivatives." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "spans": [ + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": "So, define " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_{t - 1}^2 (\\pmb {x})\\triangleq \\partial \\sigma_{t - 1}^2 (\\pmb {x},\\pmb {x})" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " , we have that for any input " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "\\pmb {x}\\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " , the derivative " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "\\nabla f(\\pmb {x})" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " follows a " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " -dimensional Gaussian distribution: " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "\\nabla f(\\pmb {x})\\sim \\mathcal{N}(\\nabla \\mu_{t - 1}(\\pmb {x}),\\partial \\sigma_{t - 1}^2 (\\pmb {x}))" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " . This allows us to (a) estimate the derivative " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "\\nabla f(\\pmb {x})" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " at any input " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "\\pmb {x}\\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " using the posterior mean " + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "inline_equation", + "content": "\\nabla \\mu_{t - 1}(\\pmb {x})" + }, + { + "bbox": [ + 104, + 425, + 505, + 470 + ], + "type": "text", + "content": " of the derived GP for derivatives (4):" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 258, + 471, + 504, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 471, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 258, + 471, + 504, + 483 + ], + "type": "interline_equation", + "content": "\\nabla f (\\boldsymbol {x}) \\approx \\nabla \\mu_ {t - 1} (\\boldsymbol {x}), \\tag {6}", + "image_path": "4d9ac906847583abf09c58b7fd7046c16dd96fbf5fdedaeaec5717d4e3ad8f9f.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "type": "text", + "content": " employ the posterior covariance matrix " + }, + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_{t - 1}^2 (\\pmb {x})" + }, + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "type": "text", + "content": " to obtain a principled measure of the uncertainty for this derivative estimation, which together constitute our novel derivative estimation. Remarkably, our derivative estimation only makes use of the naturally available optimization trajectory " + }, + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{t - 1}" + }, + { + "bbox": [ + 104, + 485, + 506, + 563 + ], + "type": "text", + "content": " and does not need any additional query, which is in stark contrast to the existing FD methods (e.g., (2)) that require many additional queries for their derivative estimation. Moreover, our principled measure of uncertainty allows us to perform dynamic virtual updates (Sec. 3.2) and theoretically guarantee the quality of our derivative estimation (Sec. 4.1)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 576, + 255, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 255, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 255, + 586 + ], + "type": "text", + "content": "3.2 DYNAMIC VIRTUAL UPDATES" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": "Note that our derived GP-based derivative estimation (6) can estimate the derivative at any input " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": " within the domain. As a result, in every iteration " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": " of our ZoRD algorithm, for a step " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\tau \\geq 1" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": ", after performing a GD update using the estimated derivative at " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t,\\tau -1}" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau -1})" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": ") to reach the input " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": " (line 5 of Algo. 2), we can again estimate the derivative at " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau})" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": ") and then perform another GD update to reach " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t,\\tau +1}" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": " without requiring any additional query. This process can be repeated for multiple steps, and can further improve the query efficiency of our ZoRD. Formally, given the projection function " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\chi}(\\pmb {x})\\triangleq \\arg \\min_{\\pmb {z}\\in \\chi}\\| \\pmb {x} - \\pmb {z}\\| _2^2 /2" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": " and learning rates " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "\\{\\eta_{t,\\tau}\\}_{\\tau = 0}^{V_t - 1}" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": ", we perform the following virtual updates for " + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 104, + 596, + 506, + 687 + ], + "type": "text", + "content": " steps (lines 4-6 of Algo. 2):" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 173, + 692, + 504, + 705 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 692, + 504, + 705 + ], + "spans": [ + { + "bbox": [ + 173, + 692, + 504, + 705 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {t, \\tau} = \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau - 1} - \\eta_ {t, \\tau - 1} \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau - 1}\\right)\\right) \\quad \\forall \\tau = 1, \\dots , V _ {t} \\tag {7}", + "image_path": "eedb8276777b53b85d927a5093b62bf821d2a72528b236c54f5531872b237c50.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": "and then choose the last " + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t,V_t}" + }, + { + "bbox": [ + 104, + 709, + 505, + 733 + ], + "type": "text", + "content": " to query (i.e., line 7 of Algo. 2). Importantly, these multi-step virtual GD updates are only feasible in our ZoRD (Algo. 2) because our derivative estimator (6) does not" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "require any new query in all these steps, whereas the existing FD methods require additional queries to estimate the derivative in every step." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": "The number of steps for our virtual updates (i.e., " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": ") induces an intriguing trade-off: An overly small " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": " may not be able to fully exploit the benefit of our derivative estimation (6) which is free from the requirement for additional queries, yet an excessively large " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": " may lead to the usage of inaccurate derivative estimations which can hurt the performance (validated in Appx. D.2). Remarkably, (4) allows us to dynamically choose " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": " by inspecting our principled measure of the predictive uncertainty (i.e., " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_{t-1}^2(\\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": ") for every derivative estimation. Specifically, after reaching the input " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": ", we continue the virtual updates (to reach " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{t,\\tau+1}" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": ") if our predictive uncertainty is small, i.e., if " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\left\\| \\partial \\sigma_{t-1}^2(\\boldsymbol{x}_{t,\\tau}) \\right\\|_2 \\leq c" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": " is a confidence threshold; otherwise, we terminate the virtual updates and let " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "V_{t} = \\tau" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": " since the derivative estimation at " + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 110, + 506, + 213 + ], + "type": "text", + "content": " is likely unreliable." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 224, + 256, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 224, + 256, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 256, + 236 + ], + "type": "text", + "content": "4 THEORETICAL ANALYSIS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 243, + 274, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 243, + 274, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 274, + 255 + ], + "type": "text", + "content": "4.1 DERIVATIVE ESTIMATION ERROR" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 264, + 498, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 264, + 498, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 498, + 277 + ], + "type": "text", + "content": "To begin with, we derive a theoretical guarantee on the error of our derivative estimation at any " + }, + { + "bbox": [ + 104, + 264, + 498, + 277 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 264, + 498, + 277 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "text", + "content": "Theorem 1 (Derivative Estimation Error). Let " + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\delta \\in (0,1)" + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(1 / \\delta)}" + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "text", + "content": ". For any " + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "text", + "content": " and any " + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "inline_equation", + "content": "t \\geq 1" + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "text", + "content": ", the following holds with probability of at least " + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 281, + 504, + 310 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 219, + 316, + 389, + 336 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 316, + 389, + 336 + ], + "spans": [ + { + "bbox": [ + 219, + 316, + 389, + 336 + ], + "type": "interline_equation", + "content": "\\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\beta \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}}.", + "image_path": "48446bbc0e8633bfba457b711d1132d96468671451a36ad723132896be9ec449.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "text", + "content": "Thm. 1 (proof in Appx. B.2) has presented an upper bound on the error of our derivative estimation (6) at any " + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "text", + "content": " in terms of " + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "inline_equation", + "content": "\\sqrt{\\|\\partial\\sigma_t^2(\\pmb{x})\\|_2}" + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "text", + "content": ", which is a measure of the uncertainty about our derivative estimation at " + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "text", + "content": " (Sec. 3.1). This hence implies that the threshold " + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "text", + "content": " applied to our predictive uncertainty " + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "inline_equation", + "content": "\\left\\| \\partial \\sigma_t^2 (\\pmb {x})\\right\\| _2" + }, + { + "bbox": [ + 104, + 337, + 504, + 405 + ], + "type": "text", + "content": " (Sec. 3.2) also ensures that the derivative estimation error is small during our dynamic virtual updates. Next, we show in the following theorem (proof in Appx. B.3) that our upper bound on the estimation error from Thm. 1 is non-increasing as the number of function queries is increased." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 408, + 432, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 408, + 432, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 432, + 420 + ], + "type": "text", + "content": "Theorem 2 (Non-Increasing Error). For any " + }, + { + "bbox": [ + 105, + 408, + 432, + 420 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 105, + 408, + 432, + 420 + ], + "type": "text", + "content": " and any " + }, + { + "bbox": [ + 105, + 408, + 432, + 420 + ], + "type": "inline_equation", + "content": "t \\geq 1" + }, + { + "bbox": [ + 105, + 408, + 432, + 420 + ], + "type": "text", + "content": ", we have that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 242, + 424, + 367, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 424, + 367, + 440 + ], + "spans": [ + { + "bbox": [ + 242, + 424, + 367, + 440 + ], + "type": "interline_equation", + "content": "\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}.", + "image_path": "7b9ebf46b865f9ff332499f7f58709b82c89d68cb366e0ed8b1e93389d64328a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "inline_equation", + "content": "\\delta \\in (0,1)" + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "text", + "content": ". Define " + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "inline_equation", + "content": "r \\triangleq \\max_{\\boldsymbol{x} \\in \\mathcal{X}, t \\geq 1} \\sqrt{\\|\\partial \\sigma_t^2(\\boldsymbol{x})\\|_2 / \\left\\|\\partial \\sigma_{t-1}^2(\\boldsymbol{x})\\right\\|_2}" + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "text", + "content": ", given the " + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "text", + "content": " in Thm. 1, we then have that " + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "inline_equation", + "content": "r \\in [1/\\sqrt{1 + 1/\\sigma^2}, 1]" + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "text", + "content": ", and that with a probability of at least " + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 446, + 504, + 479 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 202, + 486, + 406, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 486, + 406, + 506 + ], + "spans": [ + { + "bbox": [ + 202, + 486, + 406, + 506 + ], + "type": "interline_equation", + "content": "\\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\beta \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\leq \\kappa \\beta r ^ {t}.", + "image_path": "af59d97d62e3b059bfda0c55af82562ab7443cc1b306a624206f9ac1dc681697.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": "Thm. 2 shows that our upper bound on the derivative estimation error (i.e., " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "\\beta \\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": " from Thm. 1) is guaranteed to be non-increasing in the entire domain as the number of function queries is increased. Moreover, in some situations (i.e., when " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "r < 1" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": "), our upper bound on the estimation error is even exponentially decreasing. Of note, " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": " characterizes how fast the uncertainty about our derivative estimation (measured by " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": ") is reduced across the domain. Since GD-based algorithms usually perform a local search in a neighborhood (especially for the problems with high-dimensional input spaces), all the inputs within the local region are expected to be close to each other (measured by the kernel function " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": "). Moreover, as the objective function is usually smooth in the local region (i.e., its derivatives are continuous), reducing the uncertainty of the derivative at an input " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": " (i.e., by querying " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": ") is also expected to decrease the uncertainty of the derivatives at the other inputs in the same local region (i.e., decrease " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": "). So, " + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "inline_equation", + "content": "r < 1" + }, + { + "bbox": [ + 104, + 510, + 506, + 657 + ], + "type": "text", + "content": " is expected to be a reasonable condition that can be satisfied in practice. This will also be corroborated by our empirical results (e.g., Figs. 1 and 2), which demonstrates that the error of our derivative estimation (6) is indeed reduced very fast." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 668, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 668, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 668, + 506, + 715 + ], + "type": "text", + "content": "Our GP-based Method (6) vs. Existing FD Methods. Our derivative estimation method based on the derived GP (6) is superior to the traditional FD methods (e.g., (2)) in a number of major aspects. (a) Our derivative estimation error can be exponentially decreasing in some situations (i.e., when " + }, + { + "bbox": [ + 104, + 668, + 506, + 715 + ], + "type": "inline_equation", + "content": "r < 1" + }, + { + "bbox": [ + 104, + 668, + 506, + 715 + ], + "type": "text", + "content": " in Thm. 2), which is unachievable for the existing FD methods since they can only" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "type": "text", + "content": "2The first step of GD update to reach " + }, + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "type": "inline_equation", + "content": "x_{t,1}" + }, + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "type": "text", + "content": " is always performed, i.e., " + }, + { + "bbox": [ + 116, + 720, + 394, + 732 + ], + "type": "inline_equation", + "content": "V_{t}\\geq 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "text", + "content": "attain a polynomial rate of reduction (Berahas et al., 2022). " + }, + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "text", + "content": " Our method (6) does not need any additional query to estimate the derivative (but only requires the optimization trajectory), whereas the existing FD methods require additional queries for every derivative estimation. " + }, + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "text", + "content": " Our method (6) is equipped with a principled measure of the predictive uncertainty and hence the estimation error for derivative estimation (i.e., via " + }, + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\sqrt{\\|\\partial\\sigma_t^2(\\boldsymbol{x})\\|_2}" + }, + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "text", + "content": ", Thm. 1), which is typically unavailable for the existing FD methods. " + }, + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "inline_equation", + "content": "(d)" + }, + { + "bbox": [ + 104, + 82, + 506, + 184 + ], + "type": "text", + "content": " Our method (6), unlike the existing FD methods, makes it possible to apply the technique of dynamic virtual updates (Sec. 3.2) thanks to its capability of estimating the derivative at any input in the domain without requiring any additional query and measuring the estimation error in a principled way (Thm. 1)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 197, + 244, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 197, + 244, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 197, + 244, + 208 + ], + "type": "text", + "content": "4.2 CONVERGENCE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": "To analyze the convergence of our ZoRD, besides our main assumption that " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": " is sampled from a GP (Sec. 2.1), we assume that " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "L_{c}" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": "-Lipchitz continuous for " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "L_{c} > 0" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": ". This is a mild assumption since it has been shown that a function " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": " sampled from a GP is Lipchitz continuous with high probability for commonly used kernels, e.g., the SE kernel and Matérn kernel with " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\nu > 2" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": " (Srinivas et al., 2010). We also assume that " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "L_{s}" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": "-Lipchitz smooth, which is commonly adopted in the analysis GD-based algorithms (J Reddi et al., 2016). We aim to prove the convergence of our ZoRD for nonconvex " + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 217, + 506, + 316 + ], + "type": "text", + "content": " by analyzing how fast it converges to a stationary point (Ghadimi and Lan, 2013; Liu et al., 2018a). Specifically, we follow the common practice of previous works (J Reddi et al., 2016; Liu et al., 2018b) to analyze the following derivative mapping:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 202, + 321, + 504, + 336 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 321, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 202, + 321, + 504, + 336 + ], + "type": "interline_equation", + "content": "G _ {t, \\tau} \\triangleq \\left(\\boldsymbol {x} _ {t, \\tau} - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right)\\right)\\right) / \\eta_ {t, \\tau}. \\tag {8}", + "image_path": "dfbbf96d40892862eaf175726b8db8c0de5df21051025f9c6082aad772d00b4a.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 339, + 482, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 339, + 482, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 482, + 352 + ], + "type": "text", + "content": "The convergence of our ZoRD is formally guaranteed by Thm. 3 below (proof in Appx. B.4)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "content": "Theorem 3 (Convergence of ZORD). Let " + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\delta \\in (0,1)" + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "content": ". Suppose our ZORD (Algo. 2) is run with " + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "inline_equation", + "content": "V_{t} = V" + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\eta_{t,\\tau} = \\eta \\leq 1 / L_{s}" + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "content": ". Then with probability of at least " + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "content": ", when " + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "inline_equation", + "content": "r < 1" + }, + { + "bbox": [ + 104, + 354, + 504, + 377 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 146, + 383, + 463, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 383, + 463, + 430 + ], + "spans": [ + { + "bbox": [ + 146, + 383, + 463, + 430 + ], + "type": "interline_equation", + "content": "\\min _ {t \\leq T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\underbrace {\\frac {2 [ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} ^ {*}) ] / \\eta}{T V}} _ {①} + \\underbrace {\\frac {2 \\alpha^ {2} r ^ {2}}{T (1 - r ^ {2})} + \\frac {(2 L _ {c} + 1 / \\eta) \\alpha r}{T (1 - r)}} _ {②}", + "image_path": "b2358685489038811427407993334e84fbc042e1a8792ab6af3b5a711bfb8aee.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\alpha \\triangleq \\kappa \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(VT / \\delta)}" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "r = 1" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": ", we instead have (2) = 2α² + (2Lc + 1/η)α. In the upper bound of Thm. 3, the term (1) represents the convergence rate of (projected) GD when the true derivative is used and it asymptotically goes to 0 as " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " increases; the term (2) corresponds to the impact of the error of our derivative estimation (6) on the convergence. In situations where " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "r < 1" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " which is a reasonably achievable condition as we have discussed in Sec. 4.1, the term (2) will also asymptotically approach 0. This, remarkably, suggests that the impact of the derivative estimation error on the convergence vanishes asymptotically and our ZoRD algorithm is guaranteed to converge to a stationary point (i.e., " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\min_{t \\leq T} \\frac{1}{V} \\sum_{\\tau=0}^{V-1} \\|G_{t,\\tau}\\|_2^2" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " approaches 0) at the rate of " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(1/T)" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "r < 1" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": ". This is unattainable by existing ZO optimization algorithms using FD-based derivative estimation (Nesterov and Spokoiny, 2017; Liu et al., 2018b), because these methods typically converge to a stationary point at the rate of " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(1/T + \\text{const.})" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " with a constant learning rate. Even when " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "r = 1" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " where the term (2) becomes a constant independent of " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": ", our Thm. 3 is still superior to the convergence of these existing works because our result (Thm. 3) is based on the worst-case analysis whereas these works are typically based on the average-case analysis, i.e., their results only hold in expectation over the randomly sampled directions for derivative estimation. This means that their convergence may become even worse when inappropriate directions are used, e.g., directions that are nearly orthogonal to the true derivative which commonly happens in high-dimensional input spaces. In addition, given a fixed " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": ", our ZoRD enjoys a query complexity (i.e., the number of queries in " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " iterations) of " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(T)" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": ", which significantly improves over the " + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(nT)" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " of the existing works based on FD (" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 431, + 506, + 651 + ], + "type": "text", + "content": " in Sec. 2.2)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "The impacts of the number of steps of our virtual updates (i.e., " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": ") are partially reflected in Thm. 3. Specifically, a larger " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " improves the reduction rate of the term ① because a larger number of virtual GD updates (without requiring additional queries) will be applied in our ZoRD algorithm. This is also unachievable by existing ZO optimization algorithms using FD-based derivative estimation since they require additional queries for the derivative estimation in their every GD update. Meanwhile, a larger " + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": " may also negatively impact the performance of our ZoRD since it may lead to the use of those estimated derivatives with large estimation errors (Sec. 3.2). However, this negative impact has" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 119, + 73, + 218, + 156 + ], + "blocks": [ + { + "bbox": [ + 119, + 73, + 218, + 156 + ], + "lines": [ + { + "bbox": [ + 119, + 73, + 218, + 156 + ], + "spans": [ + { + "bbox": [ + 119, + 73, + 218, + 156 + ], + "type": "image", + "image_path": "2fb0671b56b5797630d6a747ef32a38713d82e3ec978e690ea6388a01f7f8fe4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 218, + 73, + 309, + 156 + ], + "blocks": [ + { + "bbox": [ + 218, + 73, + 309, + 156 + ], + "lines": [ + { + "bbox": [ + 218, + 73, + 309, + 156 + ], + "spans": [ + { + "bbox": [ + 218, + 73, + 309, + 156 + ], + "type": "image", + "image_path": "30e7e6b88ff343420916ce16025174b87f742f824a39b86d0c53d0bc912081d7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 258, + 158, + 359, + 166 + ], + "lines": [ + { + "bbox": [ + 258, + 158, + 359, + 166 + ], + "spans": [ + { + "bbox": [ + 258, + 158, + 359, + 166 + ], + "type": "text", + "content": "- Function Queries --" + }, + { + "bbox": [ + 258, + 158, + 359, + 166 + ], + "type": "inline_equation", + "content": "\\nabla f" + }, + { + "bbox": [ + 258, + 158, + 359, + 166 + ], + "type": "text", + "content": " --" + }, + { + "bbox": [ + 258, + 158, + 359, + 166 + ], + "type": "inline_equation", + "content": "\\nabla \\mu" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 309, + 73, + 399, + 156 + ], + "blocks": [ + { + "bbox": [ + 309, + 73, + 399, + 156 + ], + "lines": [ + { + "bbox": [ + 309, + 73, + 399, + 156 + ], + "spans": [ + { + "bbox": [ + 309, + 73, + 399, + 156 + ], + "type": "image", + "image_path": "7435b5574d5e73ba4302e5159f8e4d4c40cd50ab8410143f82709bf339b93475.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 399, + 74, + 490, + 156 + ], + "blocks": [ + { + "bbox": [ + 399, + 74, + 490, + 156 + ], + "lines": [ + { + "bbox": [ + 399, + 74, + 490, + 156 + ], + "spans": [ + { + "bbox": [ + 399, + 74, + 490, + 156 + ], + "type": "image", + "image_path": "b9e9b583f5ab93b49af9a61a3c090efdf457efc11a3341914f06dcf3b58a0351.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 106, + 194, + 199, + 270 + ], + "blocks": [ + { + "bbox": [ + 104, + 169, + 504, + 192 + ], + "lines": [ + { + "bbox": [ + 104, + 169, + 504, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 169, + 504, + 192 + ], + "type": "text", + "content": "Figure 1: Our derived GP for derivative estimation (4) with different number " + }, + { + "bbox": [ + 104, + 169, + 504, + 192 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 169, + 504, + 192 + ], + "type": "text", + "content": " of queries. Green curve and its confidence interval denote the mean " + }, + { + "bbox": [ + 104, + 169, + 504, + 192 + ], + "type": "inline_equation", + "content": "\\nabla \\mu(\\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 169, + 504, + 192 + ], + "type": "text", + "content": " and standard deviation of the derived GP." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 194, + 199, + 270 + ], + "lines": [ + { + "bbox": [ + 106, + 194, + 199, + 270 + ], + "spans": [ + { + "bbox": [ + 106, + 194, + 199, + 270 + ], + "type": "image", + "image_path": "6c2df39df5bec678e47ecb1a994d5789a085a21360b455c265d7926be1bf4bb1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 197, + 263, + 211, + 273 + ], + "lines": [ + { + "bbox": [ + 197, + 263, + 211, + 273 + ], + "spans": [ + { + "bbox": [ + 197, + 263, + 211, + 273 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 199, + 195, + 299, + 270 + ], + "blocks": [ + { + "bbox": [ + 199, + 195, + 299, + 270 + ], + "lines": [ + { + "bbox": [ + 199, + 195, + 299, + 270 + ], + "spans": [ + { + "bbox": [ + 199, + 195, + 299, + 270 + ], + "type": "image", + "image_path": "52ac6711c387df65723539614fe94e1ec462fd42ab99dda8830767c808e6ada4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 306, + 195, + 396, + 270 + ], + "blocks": [ + { + "bbox": [ + 306, + 195, + 396, + 270 + ], + "lines": [ + { + "bbox": [ + 306, + 195, + 396, + 270 + ], + "spans": [ + { + "bbox": [ + 306, + 195, + 396, + 270 + ], + "type": "image", + "image_path": "69252a7f35ad0dda395bd119dead41878d095393545b25e807d1f6014526d1a0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 274, + 505, + 308 + ], + "lines": [ + { + "bbox": [ + 104, + 274, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 505, + 308 + ], + "type": "text", + "content": "Figure 2: Comparison of the derivative estimation errors of our derived GP-based estimator (6) (GP) and the FD estimator, measured by cosine similarity (larger is better) and Euclidean distance (smaller is better). Each curve is the mean " + }, + { + "bbox": [ + 104, + 274, + 505, + 308 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 104, + 274, + 505, + 308 + ], + "type": "text", + "content": " standard error from five independent runs." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 396, + 195, + 500, + 270 + ], + "blocks": [ + { + "bbox": [ + 396, + 195, + 500, + 270 + ], + "lines": [ + { + "bbox": [ + 396, + 195, + 500, + 270 + ], + "spans": [ + { + "bbox": [ + 396, + 195, + 500, + 270 + ], + "type": "image", + "image_path": "c7e19456afed38e28affab77c25dfb1abd43b71de42fbbaa5db79bff7ffb5055.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 315, + 504, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 504, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 504, + 349 + ], + "type": "text", + "content": "only been implicitly accounted for by the term ② because this term comes from our Thm. 2, which is based on a worst-case analysis and gives a uniform upper bound on the derivative estimation error for all inputs in the domain " + }, + { + "bbox": [ + 104, + 315, + 504, + 349 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 315, + 504, + 349 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 365, + 200, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 200, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 200, + 376 + ], + "type": "text", + "content": "5 EXPERIMENTS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 390, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 390, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 390, + 504, + 425 + ], + "type": "text", + "content": "In this section, we firstly empirically verify the efficacy of our derived GP-based derivative estimator (6) in Sec. 5.1, and then demonstrate that our ZoRD outperforms existing baseline methods for ZO optimization using synthetic experiments (Sec. 5.2) and real-world experiments (Secs. 5.3, 5.4)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 437, + 239, + 448 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 239, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 239, + 448 + ], + "type": "text", + "content": "5.1 DERIVATIVE ESTIMATION" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "text", + "content": "Here we investigate the efficacy of our derivative estimator (6) based on the derived GP for derivatives (4). Specifically, we sample a function " + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "text", + "content": " (defined on a one-dimensional domain) from a GP using the SE kernel, and then use a set of randomly selected inputs as well as their noisy observations (as optimization trajectory) to calculate our derived GP for derivatives. The results (Fig. 1) illustrate a number of interesting insights. Firstly, in regions where (even only a few) function queries are performed (e.g., in the region of " + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "inline_equation", + "content": "[-3,0]" + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "text", + "content": "), our estimated derivative (i.e., " + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\nabla \\mu_{t-1}(\\pmb{x})" + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "text", + "content": ") generally aligns with the groundtruth derivative (i.e., " + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\nabla f(\\pmb{x})" + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "text", + "content": ") and our estimation uncertainty (i.e., characterized by " + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\sqrt{\\|\\partial \\sigma_{t-1}^2(\\pmb{x})\\|_2}" + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "text", + "content": ") shrinks compared with other un-queried regions. These results hence demonstrate that our (4) is able to accurately estimate derivatives and reliably quantify the uncertainty of these estimations within the regions where function queries are performed. Secondly, as more input queries are collected (i.e., from left to right in Fig. 1), the uncertainty " + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\sqrt{\\|\\partial \\sigma_{t-1}^2(\\pmb{x})\\|_2}" + }, + { + "bbox": [ + 104, + 457, + 506, + 639 + ], + "type": "text", + "content": " in the entire domain is decreased in general. This provides an empirical justification for our Thm. 2 which guarantees non-increasing uncertainty and hence non-increasing estimation error. Lastly, note that with only 12 queries (rightmost figure), our derivative estimator is already able to accurately estimate the derivative in the entire domain, which represents a remarkable reduction rate of our derivative estimation error." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": "Next, we compare our derivative estimator (6) with the FD estimator (Sec. 2.2). Specifically, using the Ackley function with " + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "inline_equation", + "content": "d = 10" + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": " (see Appx. C.2), we firstly select an input " + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": " and then follow the FD method (2) to randomly sample " + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": " directions " + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\{\\pmb{u}_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": " from the standard Gaussian distribution, to construct input queries " + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\{\\pmb{x}_0 + \\lambda \\pmb{u}_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": " (see Sec. 2.2). Next, these queries and their observations are " + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": " used as the optimization trajectory to apply our derivative estimator (6), and " + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": " used by the FD method to estimate the derivative following (2). The results are shown in Fig. 2a (for two different values of " + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": "), in which for both our derived GP-based estimator (6) and the FD estimator, we measure the cosine similarity (larger is better) and Euclidean distance (smaller is better) between the estimated" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 105, + 74, + 200, + 157 + ], + "blocks": [ + { + "bbox": [ + 105, + 74, + 200, + 157 + ], + "lines": [ + { + "bbox": [ + 105, + 74, + 200, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 74, + 200, + 157 + ], + "type": "image", + "image_path": "fd31e857fdfda29a70526f3750dfdd07613981fb14662922e4a124caef4bb53c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 116, + 161, + 198, + 173 + ], + "lines": [ + { + "bbox": [ + 116, + 161, + 198, + 173 + ], + "spans": [ + { + "bbox": [ + 116, + 161, + 198, + 173 + ], + "type": "text", + "content": "(a) Ackley " + }, + { + "bbox": [ + 116, + 161, + 198, + 173 + ], + "type": "inline_equation", + "content": "(d = 20)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 204, + 75, + 299, + 157 + ], + "blocks": [ + { + "bbox": [ + 204, + 75, + 299, + 157 + ], + "lines": [ + { + "bbox": [ + 204, + 75, + 299, + 157 + ], + "spans": [ + { + "bbox": [ + 204, + 75, + 299, + 157 + ], + "type": "image", + "image_path": "e73667fdf505e857111dadc0fd07c848d1490dc290fa522ca60cff5936a7b642.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 215, + 161, + 298, + 173 + ], + "lines": [ + { + "bbox": [ + 215, + 161, + 298, + 173 + ], + "spans": [ + { + "bbox": [ + 215, + 161, + 298, + 173 + ], + "type": "text", + "content": "(b) Ackley " + }, + { + "bbox": [ + 215, + 161, + 298, + 173 + ], + "type": "inline_equation", + "content": "(d = 40)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 302, + 74, + 402, + 157 + ], + "blocks": [ + { + "bbox": [ + 302, + 74, + 402, + 157 + ], + "lines": [ + { + "bbox": [ + 302, + 74, + 402, + 157 + ], + "spans": [ + { + "bbox": [ + 302, + 74, + 402, + 157 + ], + "type": "image", + "image_path": "0100551861b401ee2614ff43292c7d06a39a06f8256052db091e7693ba9297fc.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 161, + 394, + 173 + ], + "lines": [ + { + "bbox": [ + 321, + 161, + 394, + 173 + ], + "spans": [ + { + "bbox": [ + 321, + 161, + 394, + 173 + ], + "type": "text", + "content": "(c) Levy " + }, + { + "bbox": [ + 321, + 161, + 394, + 173 + ], + "type": "inline_equation", + "content": "(d = 40)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "lines": [ + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "text", + "content": "Figure 3: Optimization of Ackley and Levy functions with different dimensions. The " + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "text", + "content": "-axis and " + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "text", + "content": "-axis denote the number of queries and log-scaled optimality gap (i.e., " + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "inline_equation", + "content": "\\log(f(\\boldsymbol{x}_T) - f(\\boldsymbol{x}^*))" + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "text", + "content": ") achieved after this number of queries. Each curve is the mean " + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 104, + 177, + 506, + 212 + ], + "type": "text", + "content": " standard error from ten independent runs." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 405, + 74, + 504, + 157 + ], + "blocks": [ + { + "bbox": [ + 405, + 74, + 504, + 157 + ], + "lines": [ + { + "bbox": [ + 405, + 74, + 504, + 157 + ], + "spans": [ + { + "bbox": [ + 405, + 74, + 504, + 157 + ], + "type": "image", + "image_path": "bc6269a99ec9934f4c72c89db8b20f284c5afd4e9a5baa679820e4ee9bd17b61.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 420, + 161, + 499, + 173 + ], + "lines": [ + { + "bbox": [ + 420, + 161, + 499, + 173 + ], + "spans": [ + { + "bbox": [ + 420, + 161, + 499, + 173 + ], + "type": "text", + "content": "(d) Levy " + }, + { + "bbox": [ + 420, + 161, + 499, + 173 + ], + "type": "inline_equation", + "content": "(d = 100)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 108, + 245, + 504, + 311 + ], + "blocks": [ + { + "bbox": [ + 104, + 213, + 504, + 237 + ], + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 237 + ], + "type": "text", + "content": "Table 1: Comparison of the number of required queries to achieve a successful black-box adversarial attack. Every entry represents mean ± standard deviation from five independent runs." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 245, + 504, + 311 + ], + "lines": [ + { + "bbox": [ + 108, + 245, + 504, + 311 + ], + "spans": [ + { + "bbox": [ + 108, + 245, + 504, + 311 + ], + "type": "table", + "html": "
DatasetMetricGLDRGFPRGFTuRBO-1TuRBO-10ZoRD
MNIST# Queries1780±2221192±2601236±145654±70747±60248±50
Speedup7.2×4.8×5.0×2.6×3.0×1.0×
CIFAR-10# Queries964±1753622±11554133±1525638±108708±105384±59
Speedup2.5×9.4×10.8×1.7×1.8×1.0×
", + "image_path": "fc483bd8b7a527213cfa336ac611ae4d2bb700bc68d2ce47e872ecb19d059d21.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 321, + 506, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 321, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 321, + 506, + 441 + ], + "type": "text", + "content": "derivative and the true derivative at " + }, + { + "bbox": [ + 104, + 321, + 506, + 441 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 321, + 506, + 441 + ], + "type": "text", + "content": ". The figures show that our derivative estimation error enjoys a faster rate of reduction compared with the FD method, which corroborates our theoretical insights from Thm. 2 (Sec. 4.1) positing that our estimation error can be rapidly decreasing. Subsequently, to further highlight our advantage of being able to exploit the optimization trajectory and hence to eliminate the need for additional function queries (Sec. 4.1), we perform another comparison where our derived GP-based estimator (6) only utilizes 20 queries from the optimization trajectory (sampled using the same method above) for derivative estimation. The results (Fig. 2b) show that even with only these 20 queries (without any additional function query), our derivative estimator (6) achieves comparable or better estimation errors than FD using as many as 80 additional queries. Overall, the results in Fig. 2 have provided empirical supports for the superiority of our derived GP-based derivative estimation (6), which substantiates our theoretical justifications in Sec. 4.1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 456, + 246, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 246, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 246, + 467 + ], + "type": "text", + "content": "5.2 SYNTHETIC EXPERIMENTS" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 476, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 506, + 654 + ], + "type": "text", + "content": "Here we adopt the widely use Ackley and Levy functions with various dimensions (Eriksson et al., 2019) to show the superiority of our ZoRD. We compare ZoRD with a number of representative baselines for ZO optimization, e.g., RGF (Nesterov and Spokoiny, 2017) which uses FD for derivative estimation, PRGF (Cheng et al., 2021) which is a recent extension of RGF, GLD (Golovin et al., 2020) which is a recent ZO optimization algorithm based on direct search, and TuRBO (Eriksson et al., 2019) which is a highly performant Bayesian optimization (BO) algorithm. We also evaluate the performance of a first-order optimization algorithm, i.e., GD with true derivatives. More details are in Appx. C.2. The results are shown in Fig. 3, where ZoRD outperforms all other ZO optimization algorithms. Particularly, ZoRD considerably outperforms both RGF and PRGF, which can be attributed to our two major contributions. Firstly, our derivative estimator (6) used by ZoRD is more accurate and more query-efficient than the FD method adopted by RGF and PRGF, as theoretically justified in Sec. 4.1 and empirically demonstrated in Sec. 5.1. Secondly, our dynamic virtual updates (Sec. 3.2) can perform multi-step GD updates without requiring any additional query, which further improves the performance of ZoRD (validated in Appx. D.2). Moreover, ZoRD is the only ZO optimization algorithm that is able to converge to a comparable final performance to that of the GD with true derivatives in every figure of Fig. 3." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 666, + 288, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 288, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 288, + 677 + ], + "type": "text", + "content": "5.3 BLACK-BOX ADVERSARIAL ATTACK" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "We further compare our ZoRD with other ZO optimization algorithms in the problem of black-box adversarial attack on images, which is one of the most important applications of ZO optimization in recent years. In black-box adversarial attack (Ru et al., 2020), given a fully trained ML model and an image " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": ", we intend to find (through only function queries) a small perturbation " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": " to be added to " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "z" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 105, + 77, + 202, + 162 + ], + "blocks": [ + { + "bbox": [ + 105, + 77, + 202, + 162 + ], + "lines": [ + { + "bbox": [ + 105, + 77, + 202, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 77, + 202, + 162 + ], + "type": "image", + "image_path": "5e2cc4a270655bf0cc561718d01579082a6b4c660c8b305cab3b961c4172a6f2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 167, + 181, + 177 + ], + "lines": [ + { + "bbox": [ + 129, + 167, + 181, + 177 + ], + "spans": [ + { + "bbox": [ + 129, + 167, + 181, + 177 + ], + "type": "text", + "content": "(a) Precision" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 206, + 78, + 302, + 161 + ], + "blocks": [ + { + "bbox": [ + 206, + 78, + 302, + 161 + ], + "lines": [ + { + "bbox": [ + 206, + 78, + 302, + 161 + ], + "spans": [ + { + "bbox": [ + 206, + 78, + 302, + 161 + ], + "type": "image", + "image_path": "cec0e66aa1591dcb2dfa5ffa83037b4fda9859d4f41334f3cdbba10d9b0748cb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 238, + 167, + 279, + 177 + ], + "lines": [ + { + "bbox": [ + 238, + 167, + 279, + 177 + ], + "spans": [ + { + "bbox": [ + 238, + 167, + 279, + 177 + ], + "type": "text", + "content": "(b) Recall" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 77, + 400, + 161 + ], + "blocks": [ + { + "bbox": [ + 307, + 77, + 400, + 161 + ], + "lines": [ + { + "bbox": [ + 307, + 77, + 400, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 77, + 400, + 161 + ], + "type": "image", + "image_path": "7585b17454414486877252370863dfb40ad78f6c8e978f2e901729a5fdfe2ad1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 334, + 167, + 384, + 177 + ], + "lines": [ + { + "bbox": [ + 334, + 167, + 384, + 177 + ], + "spans": [ + { + "bbox": [ + 334, + 167, + 384, + 177 + ], + "type": "text", + "content": "(c) F1 Score" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "type": "text", + "content": "Figure 4: Optimization of different non-differentiable metrics on the Covertype dataset. The " + }, + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "type": "text", + "content": "-axis and " + }, + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "type": "text", + "content": "-axis denote, respectively, the number of queries and the improvement on the non-differentiable metric. Each curve is the mean " + }, + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 104, + 182, + 504, + 216 + ], + "type": "text", + "content": " standard error from five independent experiments." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 405, + 78, + 503, + 161 + ], + "blocks": [ + { + "bbox": [ + 405, + 78, + 503, + 161 + ], + "lines": [ + { + "bbox": [ + 405, + 78, + 503, + 161 + ], + "spans": [ + { + "bbox": [ + 405, + 78, + 503, + 161 + ], + "type": "image", + "image_path": "b96d17acceccde711d5262da4219c0fa5ed142b795b19050eb3003c174935d16.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 424, + 167, + 493, + 177 + ], + "lines": [ + { + "bbox": [ + 424, + 167, + 493, + 177 + ], + "spans": [ + { + "bbox": [ + 424, + 167, + 493, + 177 + ], + "type": "text", + "content": "(d) Jaccard index" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "text", + "content": "such that the perturbed image " + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "inline_equation", + "content": "z + x" + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "text", + "content": " will be incorrectly classified by the ML model. Following the practice from (Cheng et al., 2021), we randomly select an image from MNIST (Lecun et al., 1998) (" + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "inline_equation", + "content": "d = 28 \\times 28" + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "text", + "content": ") or CIFAR-10 (Krizhevsky et al., 2009) (" + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "inline_equation", + "content": "d = 32 \\times 32" + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "text", + "content": "), and aim to add a perturbation with an " + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "inline_equation", + "content": "L_{\\infty}" + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "text", + "content": " constraint to make a trained deep neural network misclassify the image (more details in Appx. C.3). Tab. 1 summarizes the number of required queries to achieve a successful attack by different algorithms (see results on multiple images in Appx. D.3). The results show that in such high-dimensional ZO optimization problems, our ZoRD again significantly outperforms the other algorithms since it requires a considerably smaller number of queries to achieve a successful attack. Particularly, our ZoRD is substantially more query-efficient than RGF and PRGF which rely on the FD methods for derivative estimation, e.g., for CIFAR-10, the number of queries required by RGF and PRGF are " + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "inline_equation", + "content": "9.4 \\times 10.8 \\times 10" + }, + { + "bbox": [ + 104, + 228, + 506, + 416 + ], + "type": "text", + "content": " of that required by ZoRD. This further verifies the advantages of our trajectory-informed derivative estimation (as justified theoretically in Sec. 4.1 and empirically in Sec. 5.1) and dynamic virtual updates (as demonstrated in Appx. D.2). Remarkably, our ZoRD also outperforms BO (i.e., TuRBO-1/10 which correspond to two versions of the TuRBO algorithm (Eriksson et al., 2019)) which has been widely shown to be query-efficient in black-box adversarial attack (Ru et al., 2020). Overall, these results showcase the ability of our ZoRD to advance the other ZO optimization algorithms in challenging real-world ZO optimization problems." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 428, + 332, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 428, + 332, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 332, + 439 + ], + "type": "text", + "content": "5.4 NON-DIFFERENTIABLE METRIC OPTIMIZATION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 451, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 504, + 594 + ], + "type": "text", + "content": "Non-differentiable metric optimization (Hiranandani et al., 2021; Huang et al., 2021), which has received a surging interest recently, can also be cast as a ZO optimization problem. We therefore use it to further demonstrate the superiority of our ZoRD to other ZO optimization algorithms. Specifically, we firstly train a multilayer perceptron (MLP) " + }, + { + "bbox": [ + 104, + 451, + 504, + 594 + ], + "type": "inline_equation", + "content": "(d = 2189)" + }, + { + "bbox": [ + 104, + 451, + 504, + 594 + ], + "type": "text", + "content": " on the Covertype (Dua and Graff, 2017) dataset with the cross-entropy loss function. Then, we use the same dataset to fine-tune this MLP model by exploiting ZO optimization algorithms to optimize a non-differentiable metric, such as precision, recall, F1 score and Jaccard index (see more details in Appx. C.4). Here we additionally compare with the evolutionary strategy (ES) which has been previously applied for non-differentiable metric optimization (Huang et al., 2021). Fig. 4 illustrates the percentage improvements achieved by different algorithms during the fine-tuning process (i.e., " + }, + { + "bbox": [ + 104, + 451, + 504, + 594 + ], + "type": "inline_equation", + "content": "(f(\\pmb{x}_0) - f(\\pmb{x}_T)) \\times 100\\% / f(\\pmb{x}_0)" + }, + { + "bbox": [ + 104, + 451, + 504, + 594 + ], + "type": "text", + "content": "). The results show that our ZoRD again consistently outperforms the other ZO optimization algorithms in terms of both the query efficiency and the final converged performance. These results therefore further substantiate the superiority of ZoRD in optimizing high-dimensional non-differentiable functions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 616, + 195, + 627 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 195, + 627 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 195, + 627 + ], + "type": "text", + "content": "6 CONCLUSION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 504, + 732 + ], + "type": "text", + "content": "We have introduced the ZoRD algorithm, which achieves query-efficient ZO optimization through two major contributions. Firstly, we have proposed a novel derived GP-based method (6) which only uses the optimization trajectory and hence eliminates the requirement for additional queries (Sec. 3.1) to estimate derivatives. Secondly, we have introduced a novel technique, i.e., dynamic virtual updates, which is made possible by our GP-based derivative estimation, to further improve the performance of our ZoRD (Sec. 3.2). Through theoretical justifications (Sec. 4) and empirical demonstrations (Sec. 5), we show that our derived GP-based derivative estimation improve over existing FD methods and that our ZoRD outperforms various ZO optimization baselines." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 287, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 287, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 287, + 94 + ], + "type": "text", + "content": "7 REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "type": "text", + "content": "For our theoretical results, we have discussed all our assumptions in Sec. 2.1 & Sec. 4.2, and provided our complete proofs in Appx. B. For our empirical results, we have provided our detailed experimental settings in Appx. C and included our codes in the supplementary materials (i.e., the zip file)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 201, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 201, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 201, + 163 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 171, + 507, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 507, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 507, + 205 + ], + "type": "text", + "content": "This research is part of the programme DesCartes and is supported by the National Research Foundation, Prime Minister's Office, Singapore under its Campus for Research Excellence and Technological Enterprise (CREATE) programme." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 220, + 176, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 176, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 176, + 232 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 239, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 105, + 239, + 507, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 507, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 507, + 262 + ], + "type": "text", + "content": "Binxin Ru, Adam D. Cobb, Arno Blaas, and Yarin Gal. Bayesopt adversarial attack. In Proc. ICLR, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 268, + 507, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 268, + 507, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 507, + 293 + ], + "type": "text", + "content": "Gaurush Hiranandani, Jatin Mathur, Harikrishna Narasimhan, Mahdi Milani Fard, and Sanmi Koyejo. Optimizing black-box metrics with iterative example weighting. In Proc. ICML, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 298, + 505, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 298, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 106, + 298, + 505, + 322 + ], + "type": "text", + "content": "Tim Salimans, Jonathan Ho, Xi Chen, and Ilya Sutskever. Evolution strategies as a scalable alternative to reinforcement learning. arXiv:1703.03864, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 328, + 506, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 328, + 506, + 352 + ], + "spans": [ + { + "bbox": [ + 106, + 328, + 506, + 352 + ], + "type": "text", + "content": "Yurii E. Nesterov and Vladimir G. Spokoiny. Random gradient-free minimization of convex functions. Found. Comput. Math., 17(2):527-566, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 358, + 505, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 358, + 505, + 381 + ], + "spans": [ + { + "bbox": [ + 107, + 358, + 505, + 381 + ], + "type": "text", + "content": "Shuyu Cheng, Guoqiang Wu, and Jun Zhu. On the convergence of prior-guided zeroth-order optimization algorithms. In Proc. NeurIPS, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 388, + 506, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 388, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 107, + 388, + 506, + 422 + ], + "type": "text", + "content": "Albert S. Berahas, Liyuan Cao, Krzysztof Choromanski, and Katya Scheinberg. A theoretical and empirical comparison of gradient approximations in derivative-free optimization. Found. Comput. Math., 22(2):507-560, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 429, + 504, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 429, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 107, + 429, + 504, + 453 + ], + "type": "text", + "content": "Niranjan Srinivas, Andreas Krause, Sham M. Kakade, and Matthias W. Seeger. Gaussian process optimization in the bandit setting: No regret and experimental design. In Proc. ICML, 2010." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 459, + 504, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 459, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 107, + 459, + 504, + 483 + ], + "type": "text", + "content": "Kirthevasan Kandasamy, Akshay Krishnamurthy, Jeff Schneider, and Barnabás Póczos. Parallelised Bayesian optimisation via Thompson sampling. In Proc. AISTATS, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 489, + 506, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 489, + 506, + 512 + ], + "spans": [ + { + "bbox": [ + 107, + 489, + 506, + 512 + ], + "type": "text", + "content": "Carl Edward Rasmussen and Christopher K. I. Williams. Gaussian processes for machine learning. Adaptive computation and machine learning. MIT Press, 2006." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 518, + 504, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 518, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 107, + 518, + 504, + 542 + ], + "type": "text", + "content": "Abraham Flaxman, Adam Tauman Kalai, and H. Brendan McMahan. Online convex optimization in the bandit setting: Gradient descent without a gradient. In Proc. SODA, 2005." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 548, + 504, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 548, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 107, + 548, + 504, + 572 + ], + "type": "text", + "content": "Saeed Ghadimi and Guanghui Lan. Stochastic first- and zeroth-order methods for nonconvex stochastic programming. SIAM Journal on Optimization, 23(4):2341-2368, 2013." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 578, + 506, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 578, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 107, + 578, + 506, + 601 + ], + "type": "text", + "content": "Sijia Liu, Bhavya Kailkhura, Pin-Yu Chen, Pai-Shun Ting, Shiyu Chang, and Lisa Amini. Zeroth-order stochastic variance reduction for nonconvex optimization. In Proc. NeurIPS, 2018a." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 608, + 504, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 608, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 107, + 608, + 504, + 632 + ], + "type": "text", + "content": "Sijia Liu, Xingguo Li, Pin-Yu Chen, Jarvis D. Haupt, and Lisa Amini. Zeroth-order stochastic projected gradient descent for nonconvex optimization. In Proc. GlobalSIP, 2018b." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 638, + 506, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 638, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 107, + 638, + 506, + 671 + ], + "type": "text", + "content": "Xiangru Lian, Huan Zhang, Cho-Jui Hsieh, Yijun Huang, and Ji Liu. A comprehensive linear speedup analysis for asynchronous stochastic parallel optimization from zeroth-order to first-order. In Proc. NIPS, 2016." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 679, + 504, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 679, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 107, + 679, + 504, + 702 + ], + "type": "text", + "content": "Sashank J Reddi, Suvrit Sra, Barnabas Poczos, and Alexander J Smola. Proximal stochastic methods for nonsmooth nonconvex finite-sum optimization. In Proc. NIPS, 2016." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 709, + 504, + 732 + ], + "type": "text", + "content": "David Eriksson, Michael Pearce, Jacob R. Gardner, Ryan Turner, and Matthias Poloczek. Scalable global optimization via local Bayesian optimization. In Proc. NeurIPS, 2019." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 731 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Daniel Golovin, John Karro, Greg Kochanski, Chansoo Lee, Xingyou Song, and Qiuyi (Richard) Zhang. Gradientless descent: High-dimensional zeroth-order optimization. In Proc. ICLR, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 110, + 505, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 110, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 107, + 110, + 505, + 133 + ], + "type": "text", + "content": "Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner. Gradient-based learning applied to document recognition. Proceedings of the IEEE, pages 2278-2324, 1998." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 138, + 506, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 138, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 506, + 161 + ], + "type": "text", + "content": "Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 166, + 504, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 166, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 106, + 166, + 504, + 189 + ], + "type": "text", + "content": "Chen Huang, Shuangfei Zhai, Pengsheng Guo, and Josh M. Susskind. Metricopt: Learning to optimize black-box evaluation metrics. In Proc. CVPR, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 194, + 507, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 507, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 507, + 217 + ], + "type": "text", + "content": "Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive.ics.uci.edu/ml." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 222, + 504, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 222, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 107, + 222, + 504, + 245 + ], + "type": "text", + "content": "Sebastian U Stich, Christian L Muller, and Bernd Gartner. Optimization of convex functions with random pursuit. SIAM Journal on Optimization, 23(2):1284-1309, 2013." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 250, + 505, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 250, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 107, + 250, + 505, + 272 + ], + "type": "text", + "content": "Sayak Ray Chowdhury and Aditya Gopalan. On kernelized multi-armed bandits. In Proc. ICML, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 278, + 504, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 278, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 107, + 278, + 504, + 301 + ], + "type": "text", + "content": "Zhongxiang Dai, Haibin Yu, Bryan Kian Hsiang Low, and Patrick Jaillet. Bayesian optimization meets Bayesian optimal stopping. In Proc. ICML, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 306, + 504, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 306, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 107, + 306, + 504, + 329 + ], + "type": "text", + "content": "Zhongxiang Dai, Bryan Kian Hsiang Low, and Patrick Jaillet. Federated bayesian optimization via thompson sampling. In Proc. NeurIPS, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 334, + 506, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 334, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 107, + 334, + 506, + 357 + ], + "type": "text", + "content": "Benjamin Letham, Roberto Calandra, Akshara Rai, and Eytan Bakshy. Re-examining linear embeddings for high-dimensional Bayesian optimization. In Proc. NeurIPS, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 362, + 504, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 362, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 107, + 362, + 504, + 384 + ], + "type": "text", + "content": "Andrew Ilyas, Logan Engstrom, and Aleksander Madry. Prior convictions: Black-box adversarial attacks with bandits and priors. In Proc. ICLR, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 390, + 504, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 390, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 107, + 390, + 504, + 413 + ], + "type": "text", + "content": "Florian Meier, Asier Mujika, Marcelo Matheus Gauy, and Angelika Steger. Improving gradient estimation in evolutionary strategies with past descent directions. arXiv:1910.05268, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 418, + 506, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 418, + 506, + 450 + ], + "spans": [ + { + "bbox": [ + 107, + 418, + 506, + 450 + ], + "type": "text", + "content": "Niru Maheswaranathan, Luke Metz, George Tucker, Dami Choi, and Jascha Sohl-Dickstein. Guided evolutionary strategies: Augmenting random search with surrogate gradients. In Proc. ICML, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 457, + 504, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 457, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 107, + 457, + 504, + 479 + ], + "type": "text", + "content": "Shuyu Cheng, Yinpeng Dong, Tianyu Pang, Hang Su, and Jun Zhu. Improving black-box adversarial attacks with a transfer-based prior. In NeurIPS, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 485, + 506, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 485, + 506, + 508 + ], + "spans": [ + { + "bbox": [ + 107, + 485, + 506, + 508 + ], + "type": "text", + "content": "Beatrice Laurent and Pascal Massart. Adaptive estimation of a quadratic functional by model selection. Annals of Statistics, pages 1302-1338, 2000." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 513, + 506, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 513, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 107, + 513, + 506, + 536 + ], + "type": "text", + "content": "Sayak Ray Chowdhury and Aditya Gopalan. No-regret algorithms for multi-task Bayesian optimization. In Proc. AISTATS, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 540, + 506, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 540, + 506, + 553 + ], + "spans": [ + { + "bbox": [ + 107, + 540, + 506, + 553 + ], + "type": "text", + "content": "Stephen P. Boyd and Lieven Vandenberghe. Convex Optimization. Cambridge University Press, 2014." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 559, + 506, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 559, + 506, + 580 + ], + "spans": [ + { + "bbox": [ + 107, + 559, + 506, + 580 + ], + "type": "text", + "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proc. ICLR, 2015." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 586, + 504, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 586, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 107, + 586, + 504, + 609 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proc. CVPR, 2016." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 614, + 504, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 614, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 107, + 614, + 504, + 637 + ], + "type": "text", + "content": "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. OpenAI Gym. arXiv:1606.01540, 2016." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 107, + 643, + 504, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 643, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 107, + 643, + 504, + 675 + ], + "type": "text", + "content": "M. D. McKay, R. J. Beckman, and W. J. Conover. A comparison of three methods for selecting values of input variables in the analysis of output from a computer code. Technometrics, 21(2): 239-245, 1979." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 107, + 681, + 504, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 681, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 107, + 681, + 504, + 704 + ], + "type": "text", + "content": "Jian Tan, Niv Nayman, and Mengchang Wang. CobBO: Coordinate backoff Bayesian optimization with two-stage kernels. arXiv:2101.05147, 2021." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 709, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 709, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 709, + 506, + 731 + ], + "type": "text", + "content": "Hong Qian and Yang Yu. Derivative-free reinforcement learning: A review. Frontiers Comput. Sci., 15(6):156336, 2021." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 271, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 271, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 271, + 94 + ], + "type": "text", + "content": "APPENDIX A RELATED WORK" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 105, + 506, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 105, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 506, + 425 + ], + "type": "text", + "content": "Various types of algorithms have been proposed in the literature to solve ZO optimization problems, e.g., direct search, Bayesian optimization (BO) and GD-based algorithms with estimated derivatives. Particularly, direct search, e.g., (Stich et al., 2013; Golovin et al., 2020), relies on the comparison of function values at different inputs for the updates, which can be query-inefficient in practice owing to its indirect utilization of function values. In contrast, Bayesian optimization (BO) directly utilizes the function values to model the objective function using a Gaussian process (GP) and iteratively selects the inputs to query by trading off sampling potentially optimal inputs (i.e., exploitation) and inputs that can improve the GP belief of the objective function over the entire input domain (i.e., exploration) (Chowdhury and Gopalan, 2017; Srinivas et al., 2010; Dai et al., 2019; 2020). However, in ZO optimization problems with high-dimensional input spaces, BO algorithms typically suffer from query inefficiency and large computational complexity (Rasmussen and Williams, 2006; Letham et al., 2020; Eriksson et al., 2019), which significantly hinders their real-world applications. Therefore, GD-based algorithms with estimated derivatives, which inherit the advantage of GD-based algorithms in optimizing functions with high-dimensional input spaces, have been more widely applied in practice. For these algorithms, the derivatives are commonly estimated using the finite difference (FD) approximation (which requires additional function queries) of the directional derivatives along selected directions, in which the directions can be randomly sampled unit vectors Flaxman et al. (2005), Gaussian vectors (Nesterov and Spokoiny, 2017), or standard bases (Lian et al., 2016) (Sec. 2.2). More recently, some works have incorporated a time-dependent prior (i.e., the estimated derivative in the previous iteration) into existing FD methods to improve the quality of its derivative estimation (Ilyas et al., 2019; Meier et al., 2019; Cheng et al., 2021). Nevertheless, such a prior is also estimated by the FD method (i.e., in the previous iteration) and can hence be biased owing to the its estimation error, which may even lead to larger derivative estimation errors in practice due to compounding errors. Another line of work has taken the surrogate derivatives from other sources to help reduce the derivative estimation error of existing FD methods (Maheswaranathan et al., 2019; Cheng et al., 2019). However, these surrogate derivatives may generally be unavailable in practice. Importantly, these existing FD methods require additional function queries for every derivation estimation during optimization, which will significantly increase the query complexity of ZO optimization algorithms which employ these FD methods for derivative estimation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 439, + 227, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 439, + 227, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 227, + 451 + ], + "type": "text", + "content": "APPENDIX B PROOFS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 464, + 220, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 220, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 220, + 475 + ], + "type": "text", + "content": "B.1 PROOF OF LEMMA 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "type": "text", + "content": "According to Rasmussen and Williams (2006), if a function " + }, + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "type": "text", + "content": " follows from a Gaussian process, its derivative also follows a Gaussian process determined by its mean " + }, + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\cdot]" + }, + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "type": "text", + "content": " and covariance " + }, + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "type": "inline_equation", + "content": "\\mathrm{Cov}(\\cdot, \\cdot)" + }, + { + "bbox": [ + 104, + 484, + 506, + 507 + ], + "type": "text", + "content": ", i.e.," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 228, + 509, + 504, + 522 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 509, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 228, + 509, + 504, + 522 + ], + "type": "interline_equation", + "content": "\\nabla f \\sim \\mathcal {G P} (\\mathbb {E} [ \\nabla f ], \\operatorname {C o v} (\\nabla f, \\nabla f)). \\tag {9}", + "image_path": "491c5257074bd61b56d56c5dbc84dd11f5bc86d0964b1896dd4a6519f7497f55.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "type": "text", + "content": "So, to prove Lemma 1, we only need to derive the mean and the covariance of the Gaussian process above for a function " + }, + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "type": "text", + "content": " that is sampled from another Gaussian process, i.e., " + }, + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "type": "inline_equation", + "content": "f \\sim \\mathcal{GP}(\\mu(\\cdot), \\sigma^2(\\cdot, \\cdot))" + }, + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "type": "text", + "content": ". Specifically, for the mean " + }, + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\nabla f]" + }, + { + "bbox": [ + 104, + 530, + 506, + 563 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 251, + 566, + 504, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 566, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 251, + 566, + 504, + 578 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ \\nabla f ] = \\nabla \\mathbb {E} [ f ] = \\nabla \\mu . \\tag {10}", + "image_path": "7906cc94adb8ae462042a2f6dbea37ff977ca42ef5543b88b27dc4a29ab51afa.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 582, + 504, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 605 + ], + "type": "text", + "content": "where the first equality derives from the interchangeability of the expectation and derivative operation based on the Leibniz integral rule. The second equality comes from the fact that " + }, + { + "bbox": [ + 104, + 582, + 504, + 605 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[f] = \\mu" + }, + { + "bbox": [ + 104, + 582, + 504, + 605 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 609, + 280, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 280, + 621 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 280, + 621 + ], + "type": "text", + "content": "For the covariance " + }, + { + "bbox": [ + 105, + 609, + 280, + 621 + ], + "type": "inline_equation", + "content": "\\mathrm{Cov}(\\nabla f,\\nabla f)" + }, + { + "bbox": [ + 105, + 609, + 280, + 621 + ], + "type": "text", + "content": " , we have" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 146, + 625, + 504, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 625, + 504, + 735 + ], + "spans": [ + { + "bbox": [ + 146, + 625, + 504, + 735 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {C o v} \\left(\\nabla f (\\boldsymbol {z}), \\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right)\\right) \\stackrel {(a)} {=} \\mathbb {E} \\left[ \\left(\\nabla f (\\boldsymbol {z}) - \\mathbb {E} \\left[ \\nabla f (\\boldsymbol {z}) \\right]\\right) ^ {\\top} \\left(\\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} \\left[ \\nabla f \\left(\\boldsymbol {z} ^ {\\prime}\\right) \\right]\\right) \\right] \\\\ \\stackrel {(b)} {=} \\mathbb {E} \\left[ \\nabla \\left(f (\\boldsymbol {z}) - \\mathbb {E} [ f (\\boldsymbol {z}) ]\\right) ^ {\\top} \\nabla \\left(f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} [ f \\left(\\boldsymbol {z} ^ {\\prime}\\right) ]\\right) \\right] \\\\ \\stackrel {(c)} {=} \\mathbb {E} \\left[ \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\left(f (\\boldsymbol {z}) - \\mathbb {E} [ f (\\boldsymbol {z}) ]\\right) ^ {\\top} \\left(f \\left(\\boldsymbol {z} ^ {\\prime}\\right) - \\mathbb {E} [ f \\left(\\boldsymbol {z} ^ {\\prime}\\right) ]\\right) \\right] \\tag {11} \\\\ \\stackrel {(d)} {=} \\partial_ {\\pmb {z}} \\partial_ {\\pmb {z} ^ {\\prime}} \\mathbb {E} \\left[ \\left(f (\\pmb {z}) - \\mathbb {E} \\left[ f (\\pmb {z}) \\right]\\right) ^ {\\top} \\left(f (\\pmb {z} ^ {\\prime}) - \\mathbb {E} \\left[ f (\\pmb {z} ^ {\\prime}) \\right]\\right) \\right] \\\\ \\stackrel {(e)} {=} \\partial_ {\\boldsymbol {z}} \\partial_ {\\boldsymbol {z} ^ {\\prime}} \\sigma_ {t} ^ {2} (\\boldsymbol {z}, \\boldsymbol {z} ^ {\\prime}) . \\\\ \\end{array}", + "image_path": "b208a5d9e7ada8a6d340bd32e77ed83b5c83ebe5291583edbf51d9f02004719f.jpg" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "Notably, " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "(d)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " also derive from the interchangeability of the expectation and derivative operation based on the Leibniz integral rule. Besides, " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "(e)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " is obtained based on " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\operatorname{Cov}(f, f) = \\sigma^2(\\cdot, \\cdot)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": ". This finally completes our proof." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 131, + 230, + 143 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 131, + 230, + 143 + ], + "spans": [ + { + "bbox": [ + 105, + 131, + 230, + 143 + ], + "type": "text", + "content": "B.2 PROOF OF THEOREM 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 152, + 504, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 504, + 175 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 504, + 175 + ], + "type": "text", + "content": "To begin with, we introduce the following concentration inequality for standard multi-variate Gaussian distribution:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 179, + 440, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 179, + 440, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 179, + 440, + 192 + ], + "type": "text", + "content": "Lemma B.1 (Laurent and Massart (2000)). Let " + }, + { + "bbox": [ + 104, + 179, + 440, + 192 + ], + "type": "inline_equation", + "content": "\\zeta \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I}_m)" + }, + { + "bbox": [ + 104, + 179, + 440, + 192 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 179, + 440, + 192 + ], + "type": "inline_equation", + "content": "\\delta \\in (0,1)" + }, + { + "bbox": [ + 104, + 179, + 440, + 192 + ], + "type": "text", + "content": " then" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 199, + 200, + 504, + 227 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 200, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 199, + 200, + 504, + 227 + ], + "type": "interline_equation", + "content": "\\mathbb {P} \\left(\\| \\boldsymbol {\\zeta} \\| _ {2} \\leq \\sqrt {m + 2 (\\sqrt {m} + 1) \\ln (1 / \\delta)}\\right) \\geq 1 - \\delta . \\tag {12}", + "image_path": "7b785a0d7fe6dcdb69f90c26abd4b294a223be4b748a4379494047edd2024c7c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 241, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 241, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 241, + 504, + 266 + ], + "type": "text", + "content": "Define " + }, + { + "bbox": [ + 104, + 241, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\zeta \\triangleq \\left(\\partial \\sigma_t^2 (\\pmb {x})\\right)^{-1 / 2}\\left(\\nabla f(\\pmb {x}) - \\nabla \\mu_t(\\pmb {x})\\right)" + }, + { + "bbox": [ + 104, + 241, + 504, + 266 + ], + "type": "text", + "content": ", according to Lemma 1, we then have that " + }, + { + "bbox": [ + 104, + 241, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 241, + 504, + 266 + ], + "type": "text", + "content": " follows a standard multi-variate Gaussian distribution, i.e.," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 272, + 274, + 504, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 274, + 504, + 287 + ], + "spans": [ + { + "bbox": [ + 272, + 274, + 504, + 287 + ], + "type": "interline_equation", + "content": "\\zeta \\sim \\mathcal {N} (\\mathbf {0}, \\mathbf {I} _ {d}). \\tag {13}", + "image_path": "10cbb1289c874ead159802e35280d4c530a89cefc023995572ede547e424eaeb.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "type": "inline_equation", + "content": "\\delta \\in (0,1)" + }, + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "type": "text", + "content": ". By substituting the result above into Lemma B.1, the following holds with probability of at least " + }, + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "type": "inline_equation", + "content": "1 - \\delta" + }, + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 173, + 323, + 504, + 425 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 323, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 173, + 323, + 504, + 425 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\nabla f (\\boldsymbol {x}) - \\nabla \\mu_ {t} (\\boldsymbol {x}) \\right\\| _ {2} = \\left\\| \\left(\\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x})\\right) ^ {- 1 / 2} \\boldsymbol {\\zeta} \\right\\| _ {2} \\\\ \\leq \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\| \\zeta \\| _ {2} \\tag {14} \\\\ \\leq \\sqrt {d + 2 (\\sqrt {d} + 1) \\ln (1 / \\delta)} \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\pmb {x}) \\| _ {2}} \\\\ = \\beta \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\pmb {x}) \\| _ {2}} \\\\ \\end{array}", + "image_path": "ac0fa7603df86535f3d53b0e301a0a8544f7442c61f84e3db87f3e7d1170389a.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 434, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 506, + 464 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 104, + 434, + 506, + 464 + ], + "type": "inline_equation", + "content": "\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(1 / \\delta)}" + }, + { + "bbox": [ + 104, + 434, + 506, + 464 + ], + "type": "text", + "content": " and the first inequality is from the Cauchy-Schwarz inequality, which completes our proof." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 477, + 230, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 477, + 230, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 230, + 488 + ], + "type": "text", + "content": "B.3 PROOF OF THEOREM 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 499, + 270, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 499, + 270, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 270, + 511 + ], + "type": "text", + "content": "We first introduce the following lemmas." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 514, + 506, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 528 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 528 + ], + "type": "text", + "content": "Lemma B.2 (Chowdhury and Gopalan (2021)). For any " + }, + { + "bbox": [ + 104, + 514, + 506, + 528 + ], + "type": "inline_equation", + "content": "\\sigma \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 514, + 506, + 528 + ], + "type": "text", + "content": " and any matrix " + }, + { + "bbox": [ + 104, + 514, + 506, + 528 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 104, + 514, + 506, + 528 + ], + "type": "text", + "content": ", the following hold" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 188, + 534, + 504, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 534, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 188, + 534, + 504, + 552 + ], + "type": "interline_equation", + "content": "\\mathbf {I} - \\mathbf {A} ^ {\\top} \\left(\\mathbf {A} \\mathbf {A} ^ {\\top} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\mathbf {A} = \\sigma^ {2} \\left(\\mathbf {A} ^ {\\top} \\mathbf {A} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1}. \\tag {15}", + "image_path": "abe31da82cc067663f6c5744abe6448aace466f5f96e64ed6b8c7d0305ada54a.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "type": "text", + "content": "Lemma B.3 (Sherman-Morrison formula). For any invertible square matrix " + }, + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "type": "text", + "content": " and column vectors " + }, + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{u},\\mathbf{v}" + }, + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "type": "text", + "content": ", suppose " + }, + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{A} + \\mathbf{u}\\mathbf{v}^{\\top}" + }, + { + "bbox": [ + 104, + 559, + 504, + 581 + ], + "type": "text", + "content": " is invertible, then the following holds" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 217, + 588, + 504, + 615 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 588, + 504, + 615 + ], + "spans": [ + { + "bbox": [ + 217, + 588, + 504, + 615 + ], + "type": "interline_equation", + "content": "\\left(\\mathbf {A} + \\boldsymbol {u} \\boldsymbol {v} ^ {\\top}\\right) ^ {- 1} = \\mathbf {A} ^ {- 1} - \\frac {\\mathbf {A} ^ {- 1} \\boldsymbol {u} \\boldsymbol {v} ^ {\\top} \\mathbf {A} ^ {- 1}}{1 + \\boldsymbol {v} ^ {\\top} \\mathbf {A} ^ {- 1} \\boldsymbol {u}}. \\tag {16}", + "image_path": "54dffcfd24cceae485d50cdd97d0e0ac7b9c5f7877d7124d00962dff5240bb34.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": "**Preparation.** We then introduce some additional notations and representations for our proof of Theorem 2. Following the common practice in (Chowdhury and Gopalan, 2021), we let the kernel " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": " be defined by " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "\\psi(\\pmb{x})" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "k(\\pmb{x},\\pmb{x}^{\\prime}) = \\psi(\\pmb{x})^{\\top}\\psi(\\pmb{x}^{\\prime})" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "\\phi(\\pmb{x}) \\triangleq \\nabla \\psi(\\pmb{x})" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": ". We then further define the " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "(t\\times d)" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": "-dimensional Jacobian matrix " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "\\phi_t(\\pmb{x}) \\triangleq [\\phi(\\pmb{x})^\\top \\psi(\\pmb{x}_\\tau)]_{\\tau=1}^t" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "\\Psi_t \\triangleq [\\psi(\\pmb{x}_\\tau)]_{\\tau=1}^t" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": ". The matrix " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_t" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": " and the covariance matrix " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_t^2(\\pmb{x})" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": " defined on the optimization trajectory " + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t" + }, + { + "bbox": [ + 104, + 627, + 505, + 696 + ], + "type": "text", + "content": " in our Sec. 3.1 can be reformulated as" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 190, + 702, + 504, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 702, + 504, + 735 + ], + "spans": [ + { + "bbox": [ + 190, + 702, + 504, + 735 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {K} _ {t} = \\boldsymbol {\\Psi} _ {t} ^ {\\top} \\boldsymbol {\\Psi} _ {t}, \\tag {17} \\\\ \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) = \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi_ {t} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi_ {t} (\\boldsymbol {x}). \\\\ \\end{array}", + "image_path": "20b8737d24ba84522031ae97774f16fbac5009170d05a235935aaad564723609.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 103 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 103 + ], + "type": "text", + "content": "Based on the reformulation above, define " + }, + { + "bbox": [ + 104, + 81, + 504, + 103 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_t \\triangleq \\boldsymbol{\\Psi}_t \\boldsymbol{\\Psi}_t^\\top + \\sigma^2 \\mathbf{I}" + }, + { + "bbox": [ + 104, + 81, + 504, + 103 + ], + "type": "text", + "content": ", we can further reformulate " + }, + { + "bbox": [ + 104, + 81, + 504, + 103 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_t^2(\\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 81, + 504, + 103 + ], + "type": "text", + "content": " as below" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 173, + 106, + 504, + 206 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 106, + 504, + 206 + ], + "spans": [ + { + "bbox": [ + 173, + 106, + 504, + 206 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi_ {t} (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {K} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi_ {t} (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\phi (\\boldsymbol {x}) - \\phi (\\boldsymbol {x}) ^ {\\top} \\Psi_ {t} \\left(\\Psi_ {t} ^ {\\top} \\Psi_ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\Psi_ {t} ^ {\\top} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {I} - \\boldsymbol {\\Psi} _ {t} \\left(\\boldsymbol {\\Psi} _ {t} ^ {\\top} \\boldsymbol {\\Psi} _ {t} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\boldsymbol {\\Psi} _ {t} ^ {\\top}\\right) \\phi (\\boldsymbol {x}) \\tag {18} \\\\ \\stackrel {(d)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\Psi_ {t} \\Psi_ {t} ^ {\\top} + \\sigma^ {2} \\mathbf {I}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(e)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\phi (\\boldsymbol {x}). \\\\ \\end{array}", + "image_path": "4dd2cff4df6bba165338b743486c5c6a3a7f894c9cd29d9ffeaf4fe435cc6eed.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "text", + "content": " is obtained by exploiting the fact that " + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_t = \\boldsymbol{\\Psi}_t^\\top \\boldsymbol{\\Psi}_t" + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "inline_equation", + "content": "\\phi_t(\\boldsymbol{x}) = \\phi(\\boldsymbol{x})^\\top \\boldsymbol{\\Psi}_t" + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "text", + "content": ". In addition, " + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "inline_equation", + "content": "(d)" + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "text", + "content": " comes from Lemma B.2 by replacing the matrix " + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "text", + "content": " in Lemma B.2 with the matrix " + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Psi}_t^\\top" + }, + { + "bbox": [ + 104, + 212, + 506, + 237 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 249, + 488, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 249, + 488, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 488, + 262 + ], + "type": "text", + "content": "First Part. We then prove the first half part of our Theorem 2, i.e., the following Lemma B.4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 265, + 476, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 476, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 476, + 277 + ], + "type": "text", + "content": "Lemma B.4 (Non-Increasing Variance Norm). For any " + }, + { + "bbox": [ + 104, + 265, + 476, + 277 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 265, + 476, + 277 + ], + "type": "text", + "content": " and any " + }, + { + "bbox": [ + 104, + 265, + 476, + 277 + ], + "type": "inline_equation", + "content": "t \\geq 1" + }, + { + "bbox": [ + 104, + 265, + 476, + 277 + ], + "type": "text", + "content": ", we have that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 242, + 282, + 504, + 299 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 282, + 504, + 299 + ], + "spans": [ + { + "bbox": [ + 242, + 282, + 504, + 299 + ], + "type": "interline_equation", + "content": "\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}. \\tag {19}", + "image_path": "ea4505c0bbe5a5187a0b229c3a0183fd1e4c8aea0e7b61b62176410e889ec44d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 314, + 390, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 390, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 390, + 326 + ], + "type": "text", + "content": "Proof. Based on our additional notations and representations, we have" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 334, + 524, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 334, + 524, + 464 + ], + "spans": [ + { + "bbox": [ + 106, + 334, + 524, + 464 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\boldsymbol {\\Psi} _ {t - 1} \\boldsymbol {\\Psi} _ {t - 1} ^ {\\top} + \\sigma^ {2} \\mathbf {I} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {V} _ {t - 1} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(d)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) - \\sigma^ {2} \\left(1 + \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t})\\right) ^ {- 1} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(e)} {=} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) - \\sigma^ {2} \\left(1 + \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t})\\right) ^ {- 1} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(f)} {\\preccurlyeq} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}). \\tag {20} \\\\ \\end{array}", + "image_path": "320dd776dcfcd3912eb03ad0952e9f1fdf5ba788d65326ba92fe58f7698fa369.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": "Note that (a) follows from the aforementioned definition of " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_t" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": " and (b) comes from the fact that " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "\\Psi_t\\Psi_t^\\top = \\Psi_{t - 1}\\Psi_{t - 1}^\\top +\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^\\top" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": ". Similarly, (c) uses the definition of " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{t - 1}" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": ". In addition, equality (d) derives from Lemma B.3 by letting " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "\\mathbf{A} = \\mathbf{V}_{t - 1}" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "\\pmb {u} = \\pmb {v} = \\psi (\\pmb {x}_t)" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": " and (e) follows from the reformulation of " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_{t - 1}^2 (\\pmb {x})" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": " in (18). Finally, (f) derives from the positive semi-definite property of " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "\\phi (\\pmb {x})^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\phi (\\pmb {x})" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": " as well as the fact that " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "1 + \\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t) > 0" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": ". That is, for any column vector " + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "inline_equation", + "content": "\\textbf{z}" + }, + { + "bbox": [ + 104, + 464, + 506, + 533 + ], + "type": "text", + "content": " we have that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 540, + 504, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 540, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 111, + 540, + 504, + 594 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\boldsymbol {z} ^ {\\top} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z} = \\left(\\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z}\\right) ^ {\\top} \\left(\\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z}\\right) \\\\ = \\left\\| \\phi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) \\boldsymbol {z} \\right\\| _ {2} ^ {2} \\tag {21} \\\\ \\geq 0. \\\\ \\end{array}", + "image_path": "824c8dcc9c1883c690621fc74cb94e796dcdab69c6f101232bbf3e49c74913e3.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "text", + "content": "So, " + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "inline_equation", + "content": "\\phi (\\pmb {x})^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\phi (\\pmb {x})" + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "text", + "content": " is positive semi-definite. Following a similar way, we are also able to verify that " + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "inline_equation", + "content": "1 + \\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t) > 0" + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "text", + "content": " by showing that " + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "inline_equation", + "content": "\\psi (\\pmb {x}_t)^{\\top}\\mathbf{V}_{t - 1}^{-1}\\psi (\\pmb {x}_t)\\geq 0" + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "text", + "content": " using the decomposition of " + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{t - 1}^{-1}" + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "text", + "content": " from the Principle Component Analysis (PCA). Since " + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_t^2 (\\pmb {x})\\preccurlyeq \\sigma_{t - 1}^2 (\\pmb {x})" + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "text", + "content": " is equivalent to " + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "inline_equation", + "content": "\\left\\| \\partial \\sigma_t^2 (\\pmb {x})\\right\\| _2\\leq \\left\\| \\partial \\sigma_{t - 1}^2 (\\pmb {x})\\right\\| _2" + }, + { + "bbox": [ + 104, + 604, + 506, + 667 + ], + "type": "text", + "content": ", we then complete the proof of first half part of our Theorem 2." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 685, + 485, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 485, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 485, + 696 + ], + "type": "text", + "content": "Second Part. To prove the rest of our Theorem 2, we firstly introduce the following lemmas." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 367, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 367, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 367, + 712 + ], + "type": "text", + "content": "Lemma B.5. For any " + }, + { + "bbox": [ + 104, + 700, + 367, + 712 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 700, + 367, + 712 + ], + "type": "text", + "content": " and any " + }, + { + "bbox": [ + 104, + 700, + 367, + 712 + ], + "type": "inline_equation", + "content": "t \\geq 1" + }, + { + "bbox": [ + 104, + 700, + 367, + 712 + ], + "type": "text", + "content": ", the following holds" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 272, + 719, + 504, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 719, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 272, + 719, + 504, + 734 + ], + "type": "interline_equation", + "content": "\\mathbf {V} _ {t} ^ {- 1} \\preccurlyeq \\mathbf {V} _ {t - 1} ^ {- 1}. \\tag {22}", + "image_path": "10ac5047a522d2340db1d4910aea5b2be0cd3466e3875c0258e3e7b85ee74a89.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 275, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 275, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 275, + 94 + ], + "type": "text", + "content": "Proof. For any column vector " + }, + { + "bbox": [ + 105, + 83, + 275, + 94 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 105, + 83, + 275, + 94 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 211, + 100, + 504, + 164 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 100, + 504, + 164 + ], + "spans": [ + { + "bbox": [ + 211, + 100, + 504, + 164 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\boldsymbol {z} ^ {\\top} \\left(\\mathbf {V} _ {t} - \\mathbf {V} _ {t - 1}\\right) \\boldsymbol {z} = \\boldsymbol {z} ^ {\\top} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\boldsymbol {z} \\\\ = \\left(\\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\boldsymbol {z}\\right) ^ {\\top} \\left(\\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\boldsymbol {z}\\right) \\tag {23} \\\\ = \\left\\| \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\boldsymbol {z} \\right\\| _ {2} ^ {2} \\\\ \\geq 0. \\\\ \\end{array}", + "image_path": "f068210516107b2bee0c861372a7ab991b1247b88940b676ae7d9f2ff5d07310.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "type": "text", + "content": "The first equality comes from the intermediate result in (20). So, " + }, + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_t - \\mathbf{V}_{t-1}" + }, + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "type": "text", + "content": " is positive semi-definite, i.e., " + }, + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{t-1} \\preccurlyeq \\mathbf{V}_t" + }, + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "type": "text", + "content": ". This can also indicate that " + }, + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_t^{-1} \\preccurlyeq \\mathbf{V}_{t-1}^{-1}" + }, + { + "bbox": [ + 104, + 171, + 506, + 196 + ], + "type": "text", + "content": ", which thus completes our proof." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 201, + 504, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 201, + 504, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 504, + 213 + ], + "type": "text", + "content": "Lemma B.6 (Lower Bound of Variance Norm). For any " + }, + { + "bbox": [ + 104, + 201, + 504, + 213 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 201, + 504, + 213 + ], + "type": "text", + "content": " and any " + }, + { + "bbox": [ + 104, + 201, + 504, + 213 + ], + "type": "inline_equation", + "content": "t \\geq 1" + }, + { + "bbox": [ + 104, + 201, + 504, + 213 + ], + "type": "text", + "content": ", the following holds" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 216, + 217, + 504, + 233 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 217, + 504, + 233 + ], + "spans": [ + { + "bbox": [ + 216, + 217, + 504, + 233 + ], + "type": "interline_equation", + "content": "1 / \\left(1 + 1 / \\sigma^ {2}\\right) \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}. \\tag {24}", + "image_path": "7a0cf811707993284dafc074a6d31637167da2e078252185b63c2b77d83c567e.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 243, + 218, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 243, + 218, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 218, + 255 + ], + "type": "text", + "content": "Proof. We firstly show that" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 169, + 262, + 503, + 429 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 262, + 503, + 429 + ], + "spans": [ + { + "bbox": [ + 169, + 262, + 503, + 429 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} \\stackrel {(a)} {\\leq} \\left\\| \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\right\\| _ {2} \\left\\| \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} \\\\ \\stackrel {(b)} {=} \\left\\| \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\right\\| _ {2} ^ {2} \\\\ \\stackrel {(c)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(d)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1} \\psi (\\boldsymbol {x}) \\tag {25} \\\\ \\stackrel {(e)} {\\leq} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(f)} {\\leq} \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {0} ^ {- 1} \\psi (\\boldsymbol {x}) \\\\ \\stackrel {(g)} {=} \\psi (\\boldsymbol {x}) ^ {\\top} \\psi (\\boldsymbol {x}) / \\sigma^ {2} \\\\ \\stackrel {(h)} {=} 1 / \\sigma^ {2}. \\\\ \\end{array}", + "image_path": "4adb8d5eab8a9b2a9725344e760341abb850e5c6382e2bd4b112c2bcc58b3780.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " derives from the Cauchy-Schwarz inequality. As for " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": ", they have exploited the fact that " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "\\left(\\mathbf{V}_t^{-1/2}\\psi(\\boldsymbol{x})\\right)^\\top = \\psi(\\boldsymbol{x})^\\top\\mathbf{V}_t^{-1/2}" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "\\psi(\\boldsymbol{x})^\\top\\mathbf{V}_t^{-1/2}" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " is a row vector. In addition, " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "(e)" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " follows from Lemma B.5. Finally, " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "(g)" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " results from " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_0^{-1} = \\mathbf{I}/\\sigma^2" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "(h)" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " derives from the assumption that " + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "k(\\boldsymbol{x},\\boldsymbol{x}) \\leq 1" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "inline_equation", + "content": "\\forall \\boldsymbol{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 436, + 506, + 493 + ], + "type": "text", + "content": ") in Sec. 2.1. Alternatively, we can restate the result above as" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 233, + 498, + 504, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 498, + 504, + 514 + ], + "spans": [ + { + "bbox": [ + 233, + 498, + 504, + 514 + ], + "type": "interline_equation", + "content": "\\mathbf {V} _ {t} ^ {- 1 / 2} \\psi (\\boldsymbol {x}) \\psi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t} ^ {- 1 / 2} \\preccurlyeq \\sigma^ {- 2} \\mathbf {I}. \\tag {26}", + "image_path": "76c37f412cec0f87a8f1450cd84b4116ac713ac61fae8bded56a77c1f63a4e07.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 525, + 492, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 492, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 492, + 538 + ], + "type": "text", + "content": "We then complete our proof on the first inequality in Lemma B.6 using the following inequality:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 148, + 544, + 504, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 544, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 148, + 544, + 504, + 647 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\stackrel {(a)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left(\\mathbf {V} _ {t - 1} + \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top}\\right) ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(b)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\left[ \\mathbf {V} _ {t - 1} ^ {1 / 2} \\left(\\mathbf {I} + \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\psi (\\boldsymbol {x} _ {t}) \\psi (\\boldsymbol {x} _ {t}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2}\\right) \\mathbf {V} _ {t - 1} ^ {1 / 2} \\right] ^ {- 1} \\phi (\\boldsymbol {x}) \\\\ \\stackrel {(c)} {=} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\left(\\mathbf {I} + \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\psi \\left(\\boldsymbol {x} _ {t}\\right) \\psi \\left(\\boldsymbol {x} _ {t}\\right) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1 / 2}\\right) ^ {- 1} \\mathbf {V} _ {t - 1} ^ {- 1 / 2} \\phi (\\boldsymbol {x}) \\tag {27} \\\\ \\stackrel {(d)} {\\succcurlyeq} \\sigma^ {2} \\phi (\\boldsymbol {x}) ^ {\\top} \\mathbf {V} _ {t - 1} ^ {- 1} \\phi (\\boldsymbol {x}) / (1 + 1 / \\sigma^ {2}) \\\\ \\stackrel {(e)} {=} \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) / (1 + 1 / \\sigma^ {2}) \\\\ \\end{array}", + "image_path": "4b42bcb6a8c74c44c0710da79a960868f84f907d26b0779b88e26af7abd34a63.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "text", + "content": " derives from (20) and " + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "text", + "content": " comes from the inversion of matrix product. Finally " + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "inline_equation", + "content": "(d)" + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "text", + "content": " follows from the result in (26) and " + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "inline_equation", + "content": "(e)" + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "text", + "content": " exploits the reformulation of " + }, + { + "bbox": [ + 104, + 653, + 505, + 677 + ], + "type": "inline_equation", + "content": "\\partial \\sigma_{t - 1}^2 (\\pmb {x})" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 688, + 479, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 479, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 479, + 700 + ], + "type": "text", + "content": "According to Lemma B.4 and Lemma B.6, the following holds for any " + }, + { + "bbox": [ + 104, + 688, + 479, + 700 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 688, + 479, + 700 + ], + "type": "text", + "content": " and any " + }, + { + "bbox": [ + 104, + 688, + 479, + 700 + ], + "type": "inline_equation", + "content": "t \\geq 1" + }, + { + "bbox": [ + 104, + 688, + 479, + 700 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 239, + 705, + 504, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 705, + 504, + 735 + ], + "spans": [ + { + "bbox": [ + 239, + 705, + 504, + 735 + ], + "type": "interline_equation", + "content": "\\frac {1}{1 + 1 / \\sigma^ {2}} \\leq \\frac {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}}{\\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\leq 1. \\tag {28}", + "image_path": "74efaafd9b95d05c9a88cd277eacf4e7056dae2c013857c2c45df64d1f571c46.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 387, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 387, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 387, + 94 + ], + "type": "text", + "content": "Based on the definition of " + }, + { + "bbox": [ + 105, + 83, + 387, + 94 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 105, + 83, + 387, + 94 + ], + "type": "text", + "content": " in our Theorem 2, we therefore also have" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 169, + 97, + 504, + 119 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 97, + 504, + 119 + ], + "spans": [ + { + "bbox": [ + 169, + 97, + 504, + 119 + ], + "type": "interline_equation", + "content": "r \\triangleq \\max _ {\\boldsymbol {x} \\in \\mathcal {X}, t \\geq 1} \\sqrt {\\left\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2} / \\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\in \\left[ 1 / \\sqrt {1 + 1 / \\sigma^ {2}}, 1 \\right]. \\tag {29}", + "image_path": "fa4b9720b7d355f270aa5a4dd026705849e2bd1859cd9915818e217030432973.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 129, + 331, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 129, + 331, + 141 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 331, + 141 + ], + "type": "text", + "content": "As a result, for every iteration " + }, + { + "bbox": [ + 105, + 129, + 331, + 141 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 105, + 129, + 331, + 141 + ], + "type": "text", + "content": " of our Algo. 2, we have" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 208, + 144, + 504, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 144, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 208, + 144, + 504, + 224 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\sqrt {\\| \\partial \\sigma_ {t} ^ {2} (\\boldsymbol {x}) \\| _ {2}} \\leq r \\sqrt {\\left\\| \\partial \\sigma_ {t - 1} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\\\ \\leq r ^ {t} \\sqrt {\\left\\| \\partial \\sigma_ {0} ^ {2} (\\boldsymbol {x}) \\right\\| _ {2}} \\tag {30} \\\\ = r ^ {t} \\sqrt {\\| \\partial_ {z} \\partial_ {z ^ {\\prime}} k (z , z ^ {\\prime}) | _ {z = z ^ {\\prime} = x} \\| _ {2}} \\\\ \\leq r ^ {t} \\kappa \\\\ \\end{array}", + "image_path": "872d83efbe0f927bf49c4734dd2ad431cc40bfc54f62b17fba82c909fe6f2b7f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "type": "text", + "content": "where the last inequality derives from our assumption of " + }, + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "type": "inline_equation", + "content": "\\| \\partial_z\\partial_{z'}k(z,z')|_{z = z' = x}\\| _2\\leq \\kappa^2" + }, + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "type": "inline_equation", + "content": "\\forall \\pmb {x}\\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "type": "text", + "content": ") in our Sec. 2.1. By substituting the result above into our Theorem 1, we complete our proof of Theorem 2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 276, + 230, + 287 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 276, + 230, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 276, + 230, + 287 + ], + "type": "text", + "content": "B.4 PROOF OF THEOREM 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "content": "**Preparation.** Following the definition of the derivative mapping on the true derivative " + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "inline_equation", + "content": "\\nabla f(\\boldsymbol{x}_{t,\\tau})" + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "content": " in (8), we defined the following derivative mapping on our estimated derivative " + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "inline_equation", + "content": "\\nabla \\mu_{t-1}(\\boldsymbol{x}_{t,\\tau})" + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 178, + 323, + 504, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 323, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 178, + 323, + 504, + 350 + ], + "type": "interline_equation", + "content": "\\widehat {G} _ {t, \\tau} \\triangleq \\frac {\\boldsymbol {x} _ {t , \\tau} - \\boldsymbol {x} _ {t , \\tau + 1}}{\\eta_ {t , \\tau}} = \\frac {\\boldsymbol {x} _ {t , \\tau} - \\mathcal {P} _ {\\chi} (\\boldsymbol {x} _ {t , \\tau} - \\eta_ {t , \\tau} \\nabla \\mu_ {t} (\\boldsymbol {x} _ {t , \\tau}))}{\\eta_ {t , \\tau}}. \\tag {31}", + "image_path": "5995a0cf8b13c673a746679c2c867a0e94234c040ac52807b5a5b70e0197890f.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 352, + 424, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 352, + 424, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 424, + 364 + ], + "type": "text", + "content": "By re-arranging it, we have the following update rule that has reformulated (7):" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 244, + 368, + 504, + 383 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 368, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 244, + 368, + 504, + 383 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} _ {t, \\tau + 1} = \\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\widehat {G} _ {t, \\tau}. \\tag {32}", + "image_path": "ca35815604fce12c29e2abf52376da21bd3180acc753a3fd21189f60be44915f.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 390, + 506, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 506, + 403 + ], + "type": "text", + "content": "Based on our definition of the derivative mappings in (31) and (8), we introduce the following lemmas:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "type": "text", + "content": "Lemma B.7 (General Projection Inequalities). Given " + }, + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathcal{X}}(\\boldsymbol{x}) = \\arg \\min_{\\boldsymbol{z} \\in \\mathcal{X}} \\| \\boldsymbol{x} - \\boldsymbol{z} \\|_2^2 / 2" + }, + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "type": "text", + "content": " and domain " + }, + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "type": "text", + "content": ", for any " + }, + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}, \\boldsymbol{x}'" + }, + { + "bbox": [ + 104, + 415, + 505, + 438 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 242, + 441, + 504, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 441, + 504, + 455 + ], + "spans": [ + { + "bbox": [ + 242, + 441, + 504, + 455 + ], + "type": "interline_equation", + "content": "\\left\\| \\boldsymbol {x} - \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) \\right\\| _ {2} \\leq \\left\\| \\boldsymbol {x} - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2}, \\tag {33}", + "image_path": "807d825ab665796462785d28d1f9f34bca4054872f03d46635f67ae5cc817d08.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 219, + 456, + 504, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 456, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 219, + 456, + 504, + 471 + ], + "type": "interline_equation", + "content": "\\left\\| \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} \\leq \\left\\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\right\\| _ {2}. \\tag {34}", + "image_path": "c18f5fe86c4ee16524e5f4d11d4a49a7e5199961661f1112179cae3ff35ac782.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 483, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 504, + 506 + ], + "type": "text", + "content": "Proof. For (33), as " + }, + { + "bbox": [ + 104, + 483, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}^{\\prime}) \\in \\mathcal{X} (\\forall \\pmb{x}^{\\prime})" + }, + { + "bbox": [ + 104, + 483, + 504, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 483, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}) = \\arg \\min_{\\pmb{z} \\in \\mathcal{X}} \\| \\pmb{x} - \\pmb{z} \\|_2^2 / 2" + }, + { + "bbox": [ + 104, + 483, + 504, + 506 + ], + "type": "text", + "content": ", we then naturally have (33)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "text", + "content": "For (34), since " + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathcal{X}}(\\pmb{x})" + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "text", + "content": " is the optimum of " + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "inline_equation", + "content": "h(\\pmb{z}) = \\| \\pmb{x} - \\pmb{z}\\|_2^2 / 2" + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "text", + "content": ", according to the optimality condition of the convex projection function " + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "inline_equation", + "content": "h(\\pmb{z})" + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "text", + "content": " within the domain " + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "inline_equation", + "content": "\\pmb{z} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "text", + "content": " (Boyd and Vandenberghe, 2014), we then have the following inequality for any " + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_{\\mathcal{X}}(\\pmb{x}') \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 513, + 506, + 548 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 239, + 550, + 504, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 550, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 239, + 550, + 504, + 564 + ], + "type": "interline_equation", + "content": "\\nabla h (\\boldsymbol {z}) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {z}\\right) \\geq 0. \\tag {35}", + "image_path": "e143b01914ea4bdbc61e0f399f31a3576d253f8bb9fdcda98171f741c65c715a.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 568, + 425, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 568, + 425, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 425, + 581 + ], + "type": "text", + "content": "By taking " + }, + { + "bbox": [ + 105, + 568, + 425, + 581 + ], + "type": "inline_equation", + "content": "\\nabla h(z) = z - x" + }, + { + "bbox": [ + 105, + 568, + 425, + 581 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 105, + 568, + 425, + 581 + ], + "type": "inline_equation", + "content": "z = \\mathcal{P}_{\\mathcal{X}}(x)" + }, + { + "bbox": [ + 105, + 568, + 425, + 581 + ], + "type": "text", + "content": " into the inequality above, we have" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 220, + 584, + 504, + 599 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 584, + 504, + 599 + ], + "spans": [ + { + "bbox": [ + 220, + 584, + 504, + 599 + ], + "type": "interline_equation", + "content": "\\left(\\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\boldsymbol {x}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x})\\right) \\geq 0. \\tag {36}", + "image_path": "adfeaf0870dc36a2e98adfe589a1808ca50458b6c2860ecb015cbc6211bb5da8.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 602, + 444, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 444, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 444, + 615 + ], + "type": "text", + "content": "By exchanging " + }, + { + "bbox": [ + 105, + 602, + 444, + 615 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 105, + 602, + 444, + 615 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 602, + 444, + 615 + ], + "type": "inline_equation", + "content": "\\pmb{x}'" + }, + { + "bbox": [ + 105, + 602, + 444, + 615 + ], + "type": "text", + "content": " in the result above, we achieve the following similar result:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 217, + 618, + 504, + 633 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 618, + 504, + 633 + ], + "spans": [ + { + "bbox": [ + 217, + 618, + 504, + 633 + ], + "type": "interline_equation", + "content": "\\left(\\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right) - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\geq 0. \\tag {37}", + "image_path": "d70232293e8459b8d55ac398f76e5f503671ce43a3eabb6a364013445a92d2bc.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 637, + 217, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 637, + 217, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 217, + 648 + ], + "type": "text", + "content": "By summing (36) and (37)," + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 187, + 652, + 504, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 652, + 504, + 668 + ], + "spans": [ + { + "bbox": [ + 187, + 652, + 504, + 668 + ], + "type": "interline_equation", + "content": "\\left(\\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\geq \\left\\| \\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2}. \\tag {38}", + "image_path": "b2276a887d39c8289bed1497c4a7a9bcd6e339c786acbdfb97138f67a5f288ef.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 670, + 393, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 670, + 393, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 670, + 393, + 682 + ], + "type": "text", + "content": "Based on the Cauchy-Schwarz inequality, we finally achieve (34) using" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 188, + 685, + 504, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 685, + 504, + 717 + ], + "spans": [ + { + "bbox": [ + 188, + 685, + 504, + 717 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right) \\right\\| _ {2} ^ {2} \\leq \\left(\\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} (\\boldsymbol {x}) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} ^ {\\prime}\\right)\\right) \\tag {39} \\\\ \\leq \\| \\boldsymbol {x} - \\boldsymbol {x} ^ {\\prime} \\| _ {2} \\| \\mathcal {P} _ {\\chi} (\\boldsymbol {x}) - \\mathcal {P} _ {\\chi} (\\boldsymbol {x} ^ {\\prime}) \\| _ {2} \\\\ \\end{array}", + "image_path": "7b40f9d4e05f8353b64d3bd2096ae228d972c963930a2b7388be7bc5f19f2947.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 719, + 444, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 719, + 444, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 719, + 444, + 734 + ], + "type": "text", + "content": "where both sides need to be divided by " + }, + { + "bbox": [ + 104, + 719, + 444, + 734 + ], + "type": "inline_equation", + "content": "\\| \\mathcal{P}_{\\mathcal{X}}(\\pmb {x}) - \\mathcal{P}_{\\mathcal{X}}(\\pmb{x}^{\\prime})\\|_{2}" + }, + { + "bbox": [ + 104, + 719, + 444, + 734 + ], + "type": "text", + "content": " to complete our proof." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "text", + "content": "Lemma B.8 (Inequalities for Derivative Mappings). Given (31) and (8), for every " + }, + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 82, + 504, + 95 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 235, + 104, + 504, + 126 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 104, + 504, + 126 + ], + "spans": [ + { + "bbox": [ + 235, + 104, + 504, + 126 + ], + "type": "interline_equation", + "content": "\\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\widehat {G} _ {t, \\tau}, \\tag {40}", + "image_path": "7136cce63683bff0b92e0ee5750c0f887cd38783e5afa8754a13aa8c39ad15dc.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 235, + 129, + 504, + 141 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 129, + 504, + 141 + ], + "spans": [ + { + "bbox": [ + 235, + 129, + 504, + 141 + ], + "type": "interline_equation", + "content": "\\left\\| G _ {t, \\tau} \\right\\| _ {2} \\leq \\left\\| \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\right\\| _ {2}, \\tag {41}", + "image_path": "34a6ead6a83e6852d460f16eb465f2df4f5e2679f00d334161e18ec104dc57d2.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 203, + 144, + 504, + 165 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 144, + 504, + 165 + ], + "spans": [ + { + "bbox": [ + 203, + 144, + 504, + 165 + ], + "type": "interline_equation", + "content": "\\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} \\leq \\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\| _ {2}. \\tag {42}", + "image_path": "43790cc474aafc1415ddf7d84feca38c2eaa18229c878522953f28155454767f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 189, + 373, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 189, + 373, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 373, + 201 + ], + "type": "text", + "content": "Proof. For (40), let " + }, + { + "bbox": [ + 104, + 189, + 373, + 201 + ], + "type": "inline_equation", + "content": "\\widehat{\\pmb{x}}_{t,\\tau} = \\pmb{x}_{t,\\tau} - \\eta_{t,\\tau}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau})" + }, + { + "bbox": [ + 104, + 189, + 373, + 201 + ], + "type": "text", + "content": ", we then have" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 160, + 213, + 504, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 213, + 504, + 287 + ], + "spans": [ + { + "bbox": [ + 160, + 213, + 504, + 287 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) \\right\\| _ {2} ^ {2} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right)\\right) \\\\ \\stackrel {(a)} {=} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} \\right\\| _ {2} ^ {2} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\left(\\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1}\\right) \\tag {43} \\\\ \\stackrel {(b)} {=} \\eta_ {t, \\tau} ^ {2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} - \\eta_ {t, \\tau} ^ {2} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} \\\\ \\begin{array}{c} \\stackrel {(c)} {\\leq} 0 \\end{array} \\\\ \\end{array}", + "image_path": "11a4a19a8d0e394a8ba7449f1783b02ad64cef379998fc4bcd0914053067a616.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "content": " results from the fact that " + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{t,\\tau +1} = \\mathcal{P}_{\\mathcal{X}}\\left(\\pmb{x}_{t,\\tau} - \\eta_{t,\\tau}\\nabla \\mu_{t - 1}(\\pmb{x}_{t,\\tau})\\right)" + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "content": " based on our (7) and " + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "content": " derives from the definition of " + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\widehat{G}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "content": " in (31). In addition, " + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "content": " is based on the following result by substituting " + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pmb {x} = \\pmb{x}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{\\prime} = \\widehat{\\pmb{x}}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 298, + 504, + 335 + ], + "type": "text", + "content": " into (38):" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 154, + 346, + 504, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 346, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 154, + 346, + 504, + 361 + ], + "type": "interline_equation", + "content": "\\left\\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) \\right\\| _ {2} ^ {2} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right) ^ {\\top} \\left(\\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\widehat {\\boldsymbol {x}} _ {t, \\tau}\\right)\\right) \\leq 0. \\tag {44}", + "image_path": "fdec74409494194ac1126d4a9ddff8d34dc93fb626aa6ac7b01ee7f2ef3676fd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 371, + 502, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 502, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 502, + 384 + ], + "type": "text", + "content": "Finally, by dividing " + }, + { + "bbox": [ + 104, + 371, + 502, + 384 + ], + "type": "inline_equation", + "content": "\\eta_{t,\\tau}^2" + }, + { + "bbox": [ + 104, + 371, + 502, + 384 + ], + "type": "text", + "content": " on the both sides of the last inequality in (43), we finish the proof for (40)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 388, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 411 + ], + "type": "text", + "content": "For (41), following the same proof above, we can also obtain the following inequality for the projected derivative " + }, + { + "bbox": [ + 104, + 388, + 504, + 411 + ], + "type": "inline_equation", + "content": "G_{t,\\tau}" + }, + { + "bbox": [ + 104, + 388, + 504, + 411 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 189, + 422, + 504, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 422, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 189, + 422, + 504, + 437 + ], + "type": "interline_equation", + "content": "\\left\\| G _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} G _ {t, \\tau} \\leq \\left\\| \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\right\\| _ {2} \\left\\| G _ {t, \\tau} \\right\\| _ {2}. \\tag {45}", + "image_path": "c9c718ec6b15c108ca1d772f72b16d543cc2a06522e070433eb426b269db2df1.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 447, + 481, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 447, + 481, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 447, + 481, + 460 + ], + "type": "text", + "content": "We complete the proof for (41) by dividing " + }, + { + "bbox": [ + 104, + 447, + 481, + 460 + ], + "type": "inline_equation", + "content": "\\| G_{t,\\tau}\\| _2" + }, + { + "bbox": [ + 104, + 447, + 481, + 460 + ], + "type": "text", + "content": " on the both sides of the inequality above." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 466, + 315, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 315, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 315, + 479 + ], + "type": "text", + "content": "For (42), define " + }, + { + "bbox": [ + 104, + 466, + 315, + 479 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{t,\\tau +1}^{\\prime}\\triangleq \\boldsymbol{x}_{t,\\tau} - \\eta_{t,\\tau}G_{t,\\tau}" + }, + { + "bbox": [ + 104, + 466, + 315, + 479 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 123, + 491, + 504, + 618 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 491, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 123, + 491, + 504, + 618 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} \\stackrel {(a)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} - \\left(\\boldsymbol {x} _ {t, \\tau} - \\boldsymbol {x} _ {t, \\tau + 1} ^ {\\prime}\\right) \\right\\| _ {2} \\\\ \\stackrel {(b)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau + 1} ^ {\\prime} \\right\\| _ {2} \\\\ \\stackrel {(c)} {=} \\frac {1}{\\eta_ {t , \\tau}} \\| \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau})\\right) - \\mathcal {P} _ {\\mathcal {X}} \\left(\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f (\\boldsymbol {x} _ {t, \\tau})\\right) \\| _ {2} \\tag {46} \\\\ \\stackrel {(d)} {\\leq} \\frac {1}{\\eta_ {t , \\tau}} \\left\\| \\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - (\\boldsymbol {x} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla f (\\boldsymbol {x} _ {t, \\tau})) \\right\\| _ {2} \\\\ \\stackrel {(e)} {=} \\left\\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\right\\| _ {2} \\\\ \\end{array}", + "image_path": "d062e79ff86846e4ad049d60efb4b5e3ab3d72613ec5af0bd080e15355cd1971.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "text", + "content": " comes from the definition of " + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "inline_equation", + "content": "\\widehat{G}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "inline_equation", + "content": "G_{t,\\tau}" + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "text", + "content": " in (31) and (8), respectively. In addition, " + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "text", + "content": " derives from (7) and (8). Finally, " + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "inline_equation", + "content": "(d)" + }, + { + "bbox": [ + 104, + 629, + 504, + 653 + ], + "type": "text", + "content": " results from (34)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "type": "text", + "content": "Proof. Since the objective function " + }, + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "type": "text", + "content": " is assumed to be " + }, + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "type": "inline_equation", + "content": "L_{s}" + }, + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "type": "text", + "content": "-Lipschitz smooth (Sec. 4.2), we have the following inequality for any " + }, + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "type": "inline_equation", + "content": "x_{t,\\tau} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 678, + 504, + 702 + ], + "type": "text", + "content": " in our ZoRD algorithm:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 712, + 504, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 712, + 504, + 736 + ], + "spans": [ + { + "bbox": [ + 141, + 712, + 504, + 736 + ], + "type": "interline_equation", + "content": "f \\left(\\boldsymbol {x} _ {t, \\tau + 1}\\right) - f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) \\leq \\nabla f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) ^ {\\top} \\left(\\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau}\\right) + \\frac {L _ {s}}{2} \\| \\boldsymbol {x} _ {t, \\tau + 1} - \\boldsymbol {x} _ {t, \\tau} \\| _ {2} ^ {2}. \\tag {47}", + "image_path": "f87973b18c4f28e1e4eaf1b1ea159c2874462843cd642bc4853cbfc709453169.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "type": "inline_equation", + "content": "\\delta' \\in (0,1)" + }, + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "type": "text", + "content": ". Define " + }, + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "type": "inline_equation", + "content": "\\beta \\triangleq \\sqrt{d + 2(\\sqrt{d} + 1) \\ln(1 / \\delta')}" + }, + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "type": "text", + "content": ", by substituting (32) into the inequality above, the following inequality holds with probability of at least " + }, + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "type": "inline_equation", + "content": "1 - \\delta'" + }, + { + "bbox": [ + 104, + 81, + 506, + 111 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 123, + 115, + 208, + 129 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 115, + 208, + 129 + ], + "spans": [ + { + "bbox": [ + 123, + 115, + 208, + 129 + ], + "type": "interline_equation", + "content": "f (\\boldsymbol {x} _ {t, \\tau + 1}) - f (\\boldsymbol {x} _ {t, \\tau})", + "image_path": "a0bf85be9913fe5a291b662e48bd2dd422c2d6b26d1a4774374baf7aed38375a.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 131, + 293, + 156 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 131, + 293, + 156 + ], + "spans": [ + { + "bbox": [ + 110, + 131, + 293, + 156 + ], + "type": "interline_equation", + "content": "\\stackrel {(a)} {\\leq} - \\eta_ {t, \\tau} \\nabla f (\\pmb {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2}", + "image_path": "ab8ac610562720dd2a50b957c6dd35d2f5e9657c3d6385f59c8601c664f3e32b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 158, + 463, + 182 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 158, + 463, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 158, + 463, + 182 + ], + "type": "interline_equation", + "content": "\\stackrel {(b)} {=} \\eta_ {t, \\tau} \\left(\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})\\right) ^ {\\top} \\widehat {G} _ {t, \\tau} - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2}", + "image_path": "f2491917d3017efeda3eabf2a3f27c935dd27d0e7ae72b86750913f383c6b58b.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 184, + 482, + 233 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 184, + 482, + 233 + ], + "spans": [ + { + "bbox": [ + 111, + 184, + 482, + 233 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\stackrel {(c)} {=} \\eta_ {t, \\tau} \\left[ (\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})) ^ {\\top} (\\widehat {G} _ {t, \\tau} - G _ {t, \\tau}) + (\\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau})) ^ {\\top} G _ {t, \\tau} \\right] \\\\ - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}", + "image_path": "9b78924d6f2d15c608f6a6e3264d677523d80e98df73ca125eb8ada57cb5bab6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 110, + 235, + 500, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 235, + 500, + 285 + ], + "spans": [ + { + "bbox": [ + 110, + 235, + 500, + 285 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\stackrel {(d)} {\\leq} \\eta_ {t, \\tau} \\left[ \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\left\\| \\widehat {G} _ {t, \\tau} - G _ {t, \\tau} \\right\\| _ {2} + \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\| G _ {t, \\tau} \\| _ {2} \\right] \\\\ - \\eta_ {t, \\tau} \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) ^ {\\top} \\widehat {G} _ {t, \\tau} + \\frac {L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}", + "image_path": "a3c15bfdcf25c37f74da4ab4d2abd75f2f13716adcf008d69c9703f0f7cf09a9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 288, + 456, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 288, + 456, + 338 + ], + "spans": [ + { + "bbox": [ + 111, + 288, + 456, + 338 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\stackrel {(e)} {\\leq} \\eta_ {t, \\tau} \\left[ \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} ^ {2} + \\| \\nabla \\mu_ {t - 1} (\\pmb {x} _ {t, \\tau}) - \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\| \\nabla f (\\pmb {x} _ {t, \\tau}) \\| _ {2} \\right] \\\\ - \\frac {2 \\eta_ {t , \\tau} - L _ {s} \\eta_ {t , \\tau} ^ {2}}{2} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\\\ \\end{array}", + "image_path": "ff816af72f721226b0103348c9379ea8233bda8d0019ebfd0de6f9b8029d048a.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 341, + 504, + 373 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 341, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 110, + 341, + 504, + 373 + ], + "type": "interline_equation", + "content": "\\stackrel {(f)} {\\leq} \\eta_ {t, \\tau} \\kappa^ {2} \\beta^ {2} r ^ {2 t} + \\eta_ {t, \\tau} L _ {c} \\kappa \\beta r ^ {t} - \\frac {\\eta_ {t , \\tau}}{2} \\| \\widehat {G} _ {t, \\tau} \\| _ {2} ^ {2} \\tag {48}", + "image_path": "f7b3dbed39fc4e0259c7a98e167d5000215467f30cf9ddaed998d252c13ceb2d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "inline_equation", + "content": "(d)" + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": " derives from the Cauchy-Schwarz inequality and " + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "inline_equation", + "content": "(e)" + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": " follows from the Lemma B.7. Finally, " + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "inline_equation", + "content": "(f)" + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": " result from the bounded derivative estimation error in Theorem 2 and the fact that " + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "inline_equation", + "content": "L_{c}" + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": "-Lipschitz continuous (i.e., " + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "inline_equation", + "content": "\\| \\nabla f(\\pmb{x})\\|_2 \\leq L_c" + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{X}" + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "inline_equation", + "content": "\\eta_{t,\\tau} \\leq 1 / L_s (\\forall \\tau)" + }, + { + "bbox": [ + 104, + 373, + 505, + 408 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": "For every iteration " + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": " our ZoRD algorithm, we in fact will apply the virtual updates (7) for " + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": " times (see Algo. 2). Therefore, for probability " + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "inline_equation", + "content": "\\geq 1 - V_t\\delta'" + }, + { + "bbox": [ + 104, + 411, + 504, + 436 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 123, + 441, + 504, + 508 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 441, + 504, + 508 + ], + "spans": [ + { + "bbox": [ + 123, + 441, + 504, + 508 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {1}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau} \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} ^ {2} \\leq \\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\left[ f \\left(\\boldsymbol {x} _ {t, \\tau}\\right) - f \\left(\\boldsymbol {x} _ {t, \\tau + 1}\\right) + \\eta_ {t, \\tau} \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) \\right] \\tag {49} \\\\ = \\frac {2}{V _ {t}} \\left[ f \\left(\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t})\\right) \\right] + \\left(\\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau}\\right) \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) \\\\ \\end{array}", + "image_path": "f16be92bcd7814bae2c6e4708c715a95505073c04fbc76ea9d1804d573c9922c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 511, + 455, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 455, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 455, + 523 + ], + "type": "text", + "content": "where the first inequality results from (48) by re-arranging it and then sum it up over " + }, + { + "bbox": [ + 104, + 511, + 455, + 523 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 511, + 455, + 523 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "type": "text", + "content": "However, in order to prove the convergence of our ZoRD algorithm to a stationary point, we need to consider the derivative mapping of " + }, + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "type": "inline_equation", + "content": "G_{t,\\tau}" + }, + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "type": "text", + "content": " instead (refer to our Sec. 4.2). So, for any " + }, + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 528, + 504, + 562 + ], + "type": "text", + "content": ", we propose the following inequality:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 200, + 567, + 504, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 567, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 200, + 567, + 504, + 654 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| G _ {t, \\tau} \\right\\| _ {2} = \\left\\| G _ {t, \\tau} - \\widehat {G} _ {t, \\tau} + \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\leq \\left\\| G _ {t, \\tau} - \\widehat {G} _ {t, \\tau} \\right\\| _ {2} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\tag {50} \\\\ \\leq \\left\\| \\nabla \\mu_ {t - 1} (\\boldsymbol {x} _ {t, \\tau}) - \\nabla f (\\boldsymbol {x} _ {t, \\tau}) \\right\\| _ {2} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\leq \\kappa \\beta r ^ {t} + \\left\\| \\widehat {G} _ {t, \\tau} \\right\\| _ {2} \\\\ \\end{array}", + "image_path": "7f392c6628a319816155cf3640fa785c827b78990a4fa2883c1320d6f92c93d8.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 658, + 504, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 504, + 681 + ], + "type": "text", + "content": "where the first inequality is from the Cauchy-Schwarz inequality and the second inequality comes from (42). Finally, by taking the result above into (49), we have" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 685, + 504, + 729 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 685, + 504, + 729 + ], + "spans": [ + { + "bbox": [ + 111, + 685, + 504, + 729 + ], + "type": "interline_equation", + "content": "\\frac {1}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {2}{V _ {t}} \\left[ f \\left(\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t})\\right) \\right] + \\left(\\frac {2}{V _ {t}} \\sum_ {\\tau = 0} ^ {V _ {t} - 1} \\eta_ {t, \\tau}\\right) \\left(\\kappa^ {2} \\beta^ {2} r ^ {2 t} + L _ {c} \\kappa \\beta r ^ {t}\\right) + \\kappa \\beta r ^ {t}. \\tag {51}", + "image_path": "b237496ca3fb7d0b3637ce46afaeeff221454b855a01a18c7d027945019e6552.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Then, substituting " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "V_{t} = V" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\eta_{t,\\tau} = \\eta" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "t,\\tau" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " into the result above, the following inequality holds with probability of at least " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "1 - VT\\delta^{\\prime}" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "r < 1" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 119, + 113, + 504, + 239 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 113, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 119, + 113, + 504, + 239 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\eta \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\stackrel {(a)} {\\leq} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(\\frac {2 (f (\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t}))}{V} + 2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2 t} + (2 \\eta L _ {c} + 1) \\kappa \\beta r ^ {t}\\right) \\\\ \\stackrel {(b)} {\\leq} \\frac {2}{T V} \\left[ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} _ {T}) \\right] + \\frac {2 \\eta (1 - r ^ {2 T})}{T (1 - r ^ {2})} \\kappa^ {2} \\beta^ {2} r ^ {2} \\\\ + \\frac {(2 \\eta L _ {c} + 1) (1 - r ^ {T})}{T (1 - r)} \\kappa \\beta r \\\\ \\stackrel {(c)} {\\leq} \\frac {2}{T V} \\left[ f \\left(\\boldsymbol {x} _ {0}\\right) - f \\left(\\boldsymbol {x} ^ {*}\\right) \\right] + \\frac {2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2}}{T \\left(1 - r ^ {2}\\right)} + \\frac {\\left(2 \\eta L _ {c} + 1\\right) \\kappa \\beta r}{T (1 - r)}. \\tag {52} \\\\ \\end{array}", + "image_path": "97ecb34e2b0809760a5ce0569ea98151ca4b438337918d4747af5d938c2f9573.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "content": " derives from the summation of the geometric sequence about " + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "content": " comes from " + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{*}\\triangleq \\arg \\min_{\\pmb{x}\\in \\mathcal{X}}f(\\pmb {x})" + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "inline_equation", + "content": "r = 1" + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "content": ", the following holds with probability of at least " + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "inline_equation", + "content": "\\geq 1 - VT\\delta^{\\prime}" + }, + { + "bbox": [ + 104, + 239, + 504, + 274 + ], + "type": "text", + "content": " accordingly:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 281, + 503, + 337 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 281, + 503, + 337 + ], + "spans": [ + { + "bbox": [ + 113, + 281, + 503, + 337 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\eta \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(\\frac {2 (f (\\boldsymbol {x} _ {t - 1} - f (\\boldsymbol {x} _ {t}))}{V} + 2 \\eta \\kappa^ {2} \\beta^ {2} r ^ {2 t} + (2 \\eta L _ {c} + 1) \\kappa \\beta r ^ {t}\\right) \\tag {53} \\\\ = \\frac {2}{T V} \\left[ f \\left(\\boldsymbol {x} _ {0}\\right) - f \\left(\\boldsymbol {x} _ {T}\\right) \\right] + 2 \\eta \\kappa^ {2} \\beta^ {2} + (2 \\eta L _ {c} + 1) \\kappa \\beta . \\\\ \\end{array}", + "image_path": "88a611118d9b8e5987a147a79de0a3aa5d868a1d5a9d0194d327b11daa9b0b0b.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 348, + 446, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 446, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 446, + 361 + ], + "type": "text", + "content": "Finally, let " + }, + { + "bbox": [ + 104, + 348, + 446, + 361 + ], + "type": "inline_equation", + "content": "\\delta = VT\\delta' \\in (0,1)" + }, + { + "bbox": [ + 104, + 348, + 446, + 361 + ], + "type": "text", + "content": ", the following holds with probability of at least " + }, + { + "bbox": [ + 104, + 348, + 446, + 361 + ], + "type": "inline_equation", + "content": "1 - \\delta" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 212, + 368, + 504, + 414 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 368, + 504, + 414 + ], + "spans": [ + { + "bbox": [ + 212, + 368, + 504, + 414 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\min _ {t \\leq T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\leq \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\frac {1}{V} \\sum_ {\\tau = 0} ^ {V - 1} \\| G _ {t, \\tau} \\| _ {2} ^ {2} \\tag {54} \\\\ \\leq ① + ② \\\\ \\end{array}", + "image_path": "8c06a919572fe312609f2c95f3c5a30025bc5d46b4255ee7ef7e6f0e680abf23.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 422, + 435, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 435, + 442 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 435, + 442 + ], + "type": "text", + "content": "where 1 and 2 can be defined as below with " + }, + { + "bbox": [ + 104, + 422, + 435, + 442 + ], + "type": "inline_equation", + "content": "\\alpha \\triangleq \\kappa \\sqrt{d + 2(\\sqrt{d} + 1)\\ln(VT / \\delta)}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 164, + 448, + 504, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 448, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 164, + 448, + 504, + 500 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} ① = \\frac {2 / \\eta}{T V} [ f (\\boldsymbol {x} _ {0}) - f (\\boldsymbol {x} _ {T}) ] \\\\ ② = \\left\\{ \\begin{array}{l l} 2 \\alpha^ {2} r ^ {2} / [ T (1 - r ^ {2}) ] + (2 L _ {c} + 1 / \\eta) \\alpha r / [ T (1 - r) ] & (r < 1), \\\\ 2 \\alpha^ {2} + (2 L _ {c} + 1 / \\eta) \\alpha & (r = 1). \\end{array} \\right. \\tag {55} \\\\ \\end{array}", + "image_path": "93dc68262eb61cb7cfd27d0ec676a43016aa1d1fce3b658e72df0291f234f443.jpg" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 321, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 321, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 321, + 94 + ], + "type": "text", + "content": "APPENDIX C EXPERIMENTAL SETTINGS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 222, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 222, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 222, + 118 + ], + "type": "text", + "content": "C.1 GENERAL SETTINGS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": "Derived GP. Among all our experiments in Sec. 5, to apply the derivative estimation in Sec. 3.1 for every iteration " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " and every step " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " of our ZoRD algorithm, we use the derived GP (4) based on the Matérn kernel with " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\nu = 2.5" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " and fit this derived GP using 150 queries that achieves the smallest Euclidean distance with input " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " from the optimization trajectory. This is because we only need to model the objective function " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " in the vicinity of input " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": " precisely rather than the entire domain, so as to achieve an accurate derivative estimation at input " + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{t,\\tau}" + }, + { + "bbox": [ + 104, + 125, + 506, + 193 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "type": "text", + "content": "Confidence Threshold. Among all our experiments in Sec. 5, the confidence threshold " + }, + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "type": "text", + "content": " of our dynamic virtual updates (Sec. 3.2) is set to be 0.35 in order to realize a good trade-off between query efficiency and accurate derivative estimation in practice, which can already allow our ZoRD to achieve compelling empirical results consistently (see our Sec. 5). In light of this, " + }, + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "type": "inline_equation", + "content": "c = 0.35" + }, + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "type": "text", + "content": " would be a reasonably good choice in practice, especially when there is no prior knowledge about the objective functions. When we have prior knowledge about the smoothness of the objective functions, we can likely make a better choice for " + }, + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 204, + 506, + 338 + ], + "type": "text", + "content": ": Intuitively, smooth objective functions usually can be modeled by the Gaussian process effectively (Rasmussen and Williams, 2006), so an accurate derivative estimation from our derived GP is also likely to be achieved. In this scenario, a large confidence threshold can be applied to fully exploit the benefit of our derivative estimation that is free from the requirement for additional queries and consequently results in an improved query efficiency in practice." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "text", + "content": "Baselines. In addition, among all our experiments in Sec. 5, we consistently use " + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "inline_equation", + "content": "n = 10" + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\lambda = 0.01" + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "text", + "content": " and directions " + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\{u_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "text", + "content": " that are randomly sampled from a unit sphere for the derivative estimation of the FD method (2) applied in the RGF and PRGF algorithm. Moreover, following the common practice of (Berahas et al., 2022; Cheng et al., 2021), we conduct orthogonalization on these randomly selected directions via the Gram-Schmidt procedure. As for the ES algorithm (e.g., the one applied in Salimans et al., 2017), we apply the same " + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "inline_equation", + "content": "\\{u_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 348, + 504, + 426 + ], + "type": "text", + "content": " in RGF and PRGF for their update in every iteration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 437, + 504, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 504, + 473 + ], + "type": "text", + "content": "Domain Transformation. Following the practice that has been used in (Eriksson et al., 2019), for all our experiments, we firstly re-scale the input domains into " + }, + { + "bbox": [ + 104, + 437, + 504, + 473 + ], + "type": "inline_equation", + "content": "[0,10]^d" + }, + { + "bbox": [ + 104, + 437, + 504, + 473 + ], + "type": "text", + "content": " to ease the optimization and then re-scale the updated inputs back to the original domains for querying." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 484, + 248, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 248, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 248, + 496 + ], + "type": "text", + "content": "C.2 SYNTHETIC EXPERIMENTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 505, + 504, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 504, + 527 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 504, + 527 + ], + "type": "text", + "content": "Let input " + }, + { + "bbox": [ + 104, + 505, + 504, + 527 + ], + "type": "inline_equation", + "content": "\\pmb{x} = [x_i]_{i=1}^d" + }, + { + "bbox": [ + 104, + 505, + 504, + 527 + ], + "type": "text", + "content": ", the Ackley and Levy function applied in our synthetic experiments are given below," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 533, + 510, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 533, + 510, + 616 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 510, + 616 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} f (\\pmb {x}) = - 2 0 \\exp \\left(- 0. 2 \\sqrt {\\frac {1}{d} \\sum_ {i = 1} ^ {d} x _ {i} ^ {2}}\\right) - \\exp (\\frac {1}{d} \\sum_ {i = 1} ^ {d} \\cos (2 \\pi x _ {i})) + 2 0 + \\exp (1), (\\mathrm {A c k l e y}) \\\\ f (\\boldsymbol {x}) = \\sin^ {2} \\left(\\pi w _ {1}\\right) + \\sum_ {i = 1} ^ {d - 1} \\left(w _ {i} - 1\\right) ^ {2} \\left[ 1 + 1 0 \\sin^ {2} \\left(\\pi w _ {i} + 1\\right) \\right] + \\left(w _ {d} - 1\\right) ^ {2} \\left[ 1 + \\sin^ {2} \\left(2 \\pi w _ {d}\\right) \\right] (\\text {L e v y}) \\tag {56} \\\\ \\end{array}", + "image_path": "a0be37a182a87a9e6fba40304d225247733be5cd5759dbe439ec87c002704e04.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "w_{i} = 1 + (x_{i} - 1) / 4" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": " for any " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "i = 1, \\dots, d" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": ", Ackley function achieves its minimum (i.e., " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "\\min f(\\pmb{x}) = 0" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": ") at " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{*} = \\mathbf{0}" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": ", and Levy function achieves its minimum (i.e., " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "\\min f(\\pmb{x}) = 0" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": ") at " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "\\pmb{x}^{*} = \\mathbf{1}" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": ". Note that the Ackley and Levy function for the synthetic experiments in our Sec. 5.2 are defined within the domain " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "[-20, 20]^d" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "[-7.5, 7.5]^d" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": ", respectively. To give a better understanding of these two synthetic functions, we provide a 3D illustration of these two synthetic functions with " + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "inline_equation", + "content": "d = 2" + }, + { + "bbox": [ + 104, + 616, + 506, + 693 + ], + "type": "text", + "content": " in our Fig. 5. As shown in Fig. 5, these two synthetic functions are highly nonconvex and therefore have local minimums within their domains." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "To compare our ZoRD algorithm with other ZO/FO optimization baselines in Sec. 5.2, we firstly employ TuRBO of 300 queries to find a good initialization for all other ZO/FO optimization algorithms in Fig. 3 because of the nonconvexity of these two synthetic functions as shown in Fig. 5. We then" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 124, + 101, + 293, + 229 + ], + "blocks": [ + { + "bbox": [ + 124, + 101, + 293, + 229 + ], + "lines": [ + { + "bbox": [ + 124, + 101, + 293, + 229 + ], + "spans": [ + { + "bbox": [ + 124, + 101, + 293, + 229 + ], + "type": "image", + "image_path": "b0e0d1d63258389a9f2e3355c68706e372600cd18875473bfd73884f8529d368.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 236, + 271, + 247 + ], + "lines": [ + { + "bbox": [ + 159, + 236, + 271, + 247 + ], + "spans": [ + { + "bbox": [ + 159, + 236, + 271, + 247 + ], + "type": "text", + "content": "(a) Ackley function " + }, + { + "bbox": [ + 159, + 236, + 271, + 247 + ], + "type": "inline_equation", + "content": "(d = 2)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 304, + 99, + 479, + 230 + ], + "blocks": [ + { + "bbox": [ + 304, + 99, + 479, + 230 + ], + "lines": [ + { + "bbox": [ + 304, + 99, + 479, + 230 + ], + "spans": [ + { + "bbox": [ + 304, + 99, + 479, + 230 + ], + "type": "image", + "image_path": "c9ee5ee5c05e61235de5bd32e3a6c3a0fd4c99fefd40dfd5cadec77b30de8e1a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 236, + 448, + 247 + ], + "lines": [ + { + "bbox": [ + 343, + 236, + 448, + 247 + ], + "spans": [ + { + "bbox": [ + 343, + 236, + 448, + 247 + ], + "type": "text", + "content": "(b) Levy function " + }, + { + "bbox": [ + 343, + 236, + 448, + 247 + ], + "type": "inline_equation", + "content": "(d = 2)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 143, + 256, + 465, + 268 + ], + "lines": [ + { + "bbox": [ + 143, + 256, + 465, + 268 + ], + "spans": [ + { + "bbox": [ + 143, + 256, + 465, + 268 + ], + "type": "text", + "content": "Figure 5: The 3D illustration of Ackley and Levy synthetic function with " + }, + { + "bbox": [ + 143, + 256, + 465, + 268 + ], + "type": "inline_equation", + "content": "d = 2" + }, + { + "bbox": [ + 143, + 256, + 465, + 268 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 289, + 506, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 506, + 334 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 506, + 334 + ], + "type": "text", + "content": "apply these ZO/FO optimization algorithms with a query budget of 200 for " + }, + { + "bbox": [ + 104, + 289, + 506, + 334 + ], + "type": "inline_equation", + "content": "d = 20,40" + }, + { + "bbox": [ + 104, + 289, + 506, + 334 + ], + "type": "text", + "content": ", and a query budget of 400 for " + }, + { + "bbox": [ + 104, + 289, + 506, + 334 + ], + "type": "inline_equation", + "content": "d = 100" + }, + { + "bbox": [ + 104, + 289, + 506, + 334 + ], + "type": "text", + "content": " to compare their query efficiency. We use the same Adam optimizer (Kingma and Ba, 2015) with a learning rate of 0.1 and exponential decay rates of 0.9, 0.999 for RGF, PRGF, GD, and our ZoRD algorithm, for faster convergence compared with standard GD." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 346, + 290, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 290, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 290, + 357 + ], + "type": "text", + "content": "C.3 BLACK-BOX ADVERSARIAL ATTACK" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": "For the black-box adversarial attack experiment on the MNIST dataset, we use the same fully trained deep neural networks from (Cheng et al., 2021) and adopt a " + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "inline_equation", + "content": "L_{\\infty}" + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": " constraint of " + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "inline_equation", + "content": "\\| x\\|_{\\infty} \\leq 0.3" + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": " on the input perturbation " + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": ". For the black-box adversarial attack experiment on the CIFAR-10 dataset, we fully train a ResNet-18 (He et al., 2016) on CIFAR-10 using stochastic gradient descend (SGD) with a cosine annealed learning rate from 0.1 to 0, a momentum of 0.9 and a weight decay of " + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": " for 200 epochs, and adopt a " + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "inline_equation", + "content": "L_{\\infty}" + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": " constraint of " + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "inline_equation", + "content": "\\| x\\|_{\\infty} \\leq 0.2" + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": " on the input perturbation " + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": ". Note that we use the same loss function as (Cheng et al., 2021) for these two experiments. Meanwhile, to apply RGF, PRGF and our ZoRD, we adopt Adam optimizer with the same learning rate of 0.5 and the same exponential decay rates of 0.9, 0.999." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 479, + 335, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 479, + 335, + 490 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 335, + 490 + ], + "type": "text", + "content": "C.4 NON-DIFFERENTIABLE METRIC OPTIMIZATION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "type": "text", + "content": "The Covertype dataset used in Sec. 5.4 is a classification dataset consisting of 581,012 samples from 7 different categories. Each sample from this dataset is a 54-dimensional vector of integers. In this experiment, we randomly split the dataset into training and test sets with each containing 290,506 samples. The MLP classifier applied in Sec. 5.4 consists of 2 layers with 30 and 14 hidden neurons respectively, leading to 2189 parameters in total (i.e., " + }, + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "type": "inline_equation", + "content": "d = 2189" + }, + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "type": "text", + "content": "). We first train this MLP classifier on the training dataset of Covertype using the L-BFGS algorithm with the cross-entropy loss function for 300 epochs, and then apply ZO optimization algorithms to fine-tune our trained MLP directly on the non-differentiable metrics (i.e., using these metrics as the new loss functions), including precision, recall, F1 score and Jaccard index. To obtain the results of ES, RGF, PRGF and our ZoRD algorithm in Sec. 5.4, we apply the same Adam optimizer with a learning rate of 0.2 (for precision and recall) or 0.01 (for F1 score and Jaccard index) and exponential decay rates of 0.9, 0.999. Note that standard BO algorithms (including TuRBO) fail to achieve any percentage improvements (i.e., achieving " + }, + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "type": "inline_equation", + "content": "0\\%" + }, + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "type": "text", + "content": " in the " + }, + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 499, + 506, + 666 + ], + "type": "text", + "content": "-axis of Fig. 4) in this experiment according to our five independent runs, which is likely due to their aggressive exploration in the input domain of such a high dimension. In light of this, we do not include them in our comparison since all other methods are able to achieve certain improvements." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 678, + 338, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 338, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 338, + 689 + ], + "type": "text", + "content": "C.5 DERIVATIVE-FREE REINFORCEMENT LEARNING" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "Our derivative-free RL experiments aim to learn controllers (which outputs policies) that maximize the rewards/return for several environments in the OpenAI Gym (Brockman et al., 2016) without using true derivatives. Specifically, we need to optimize the parameters (i.e., " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": ") of our neural network" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 127, + 100, + 482, + 159 + ], + "blocks": [ + { + "bbox": [ + 127, + 79, + 482, + 92 + ], + "lines": [ + { + "bbox": [ + 127, + 79, + 482, + 92 + ], + "spans": [ + { + "bbox": [ + 127, + 79, + 482, + 92 + ], + "type": "text", + "content": "Table 2: OpenAI Gym environment properties and their respective network dimensions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 100, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 127, + 100, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 127, + 100, + 482, + 159 + ], + "type": "table", + "html": "
AcrobotSwimmerLunarBipedalWalkerWalker2DHalfCheetah
|S|688241717
|A|324466
d213222244404356356
", + "image_path": "862b4420dc1d70ae57163d5472677955d97a1976dc25b374264b20bdb31e12b3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 176, + 206, + 257 + ], + "blocks": [ + { + "bbox": [ + 109, + 176, + 206, + 257 + ], + "lines": [ + { + "bbox": [ + 109, + 176, + 206, + 257 + ], + "spans": [ + { + "bbox": [ + 109, + 176, + 206, + 257 + ], + "type": "image", + "image_path": "5a7d1bbf305590bb2af7e8bba9a225b7fd0ad19266e15b18fe05afaa121eb7d6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 262, + 451, + 274 + ], + "lines": [ + { + "bbox": [ + 165, + 262, + 451, + 274 + ], + "spans": [ + { + "bbox": [ + 165, + 262, + 451, + 274 + ], + "type": "text", + "content": "(a) Results under various input dimension " + }, + { + "bbox": [ + 165, + 262, + 451, + 274 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 165, + 262, + 451, + 274 + ], + "type": "text", + "content": " and fixed Matérn " + }, + { + "bbox": [ + 165, + 262, + 451, + 274 + ], + "type": "inline_equation", + "content": "(\\nu = 2.5)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 206, + 176, + 304, + 257 + ], + "blocks": [ + { + "bbox": [ + 206, + 176, + 304, + 257 + ], + "lines": [ + { + "bbox": [ + 206, + 176, + 304, + 257 + ], + "spans": [ + { + "bbox": [ + 206, + 176, + 304, + 257 + ], + "type": "image", + "image_path": "e4db9ac701ea0d5db4b1bfa995d0e6745c35bf8b97f84fd737365695a804f4fb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 304, + 176, + 400, + 257 + ], + "blocks": [ + { + "bbox": [ + 304, + 176, + 400, + 257 + ], + "lines": [ + { + "bbox": [ + 304, + 176, + 400, + 257 + ], + "spans": [ + { + "bbox": [ + 304, + 176, + 400, + 257 + ], + "type": "image", + "image_path": "a4b6a796b2aa7640a090a6f445bfc96f611ca27f230047a4e08bfcc586707cd4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 400, + 176, + 501, + 257 + ], + "blocks": [ + { + "bbox": [ + 400, + 176, + 501, + 257 + ], + "lines": [ + { + "bbox": [ + 400, + 176, + 501, + 257 + ], + "spans": [ + { + "bbox": [ + 400, + 176, + 501, + 257 + ], + "type": "image", + "image_path": "144518b2e441f6af2314bb12e4117add41f33fa6303d6a9ae7ed7f4fabe6bca1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 277, + 206, + 357 + ], + "blocks": [ + { + "bbox": [ + 109, + 277, + 206, + 357 + ], + "lines": [ + { + "bbox": [ + 109, + 277, + 206, + 357 + ], + "spans": [ + { + "bbox": [ + 109, + 277, + 206, + 357 + ], + "type": "image", + "image_path": "7a7ee12346d2472c051f97e3d677997abe714ac9b5f0ebbd49eb7b00d81a613c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 171, + 362, + 444, + 374 + ], + "lines": [ + { + "bbox": [ + 171, + 362, + 444, + 374 + ], + "spans": [ + { + "bbox": [ + 171, + 362, + 444, + 374 + ], + "type": "text", + "content": "(b) Results under various kernels and fixed input dimension " + }, + { + "bbox": [ + 171, + 362, + 444, + 374 + ], + "type": "inline_equation", + "content": "d = 80" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 206, + 277, + 304, + 357 + ], + "blocks": [ + { + "bbox": [ + 206, + 277, + 304, + 357 + ], + "lines": [ + { + "bbox": [ + 206, + 277, + 304, + 357 + ], + "spans": [ + { + "bbox": [ + 206, + 277, + 304, + 357 + ], + "type": "image", + "image_path": "812e9f1fba703004483cd973f28051ce4a725aa19248f8a12cf2fdfb4837a789.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 383, + 504, + 417 + ], + "lines": [ + { + "bbox": [ + 104, + 383, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 383, + 504, + 417 + ], + "type": "text", + "content": "Figure 6: Comparison of the derivative estimation errors of our derived GP-based estimator (GP) and the FD estimator under various input dimensions and kernels. Similarly, each result is reported with the mean " + }, + { + "bbox": [ + 104, + 383, + 504, + 417 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 104, + 383, + 504, + 417 + ], + "type": "text", + "content": " standard error from five independent runs." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 304, + 277, + 400, + 357 + ], + "blocks": [ + { + "bbox": [ + 304, + 277, + 400, + 357 + ], + "lines": [ + { + "bbox": [ + 304, + 277, + 400, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 277, + 400, + 357 + ], + "type": "image", + "image_path": "a52706ab4ce98f071ef857a5fc8f1a78ec831d2c37c97d0c67c3318e2152720b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 400, + 277, + 501, + 357 + ], + "blocks": [ + { + "bbox": [ + 400, + 277, + 501, + 357 + ], + "lines": [ + { + "bbox": [ + 400, + 277, + 501, + 357 + ], + "spans": [ + { + "bbox": [ + 400, + 277, + 501, + 357 + ], + "type": "image", + "image_path": "14c28dc5df8f111e0503498123550176d4b3695b56b3122a7b23680df813e4bd.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "text", + "content": "(MLP) controller with 2 hidden layers, where each hidden layer has 10 hidden neurons and one bias term. We adopt a " + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "inline_equation", + "content": "L_{\\infty}" + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "text", + "content": " constraint of " + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "inline_equation", + "content": "\\| x \\|_{\\infty} \\leq 1" + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "text", + "content": " on the parameters " + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "text", + "content": ". We use a softmax output layer for the policies that deal with discrete action spaces, and a tanh output layer for the policies that deal with continuous action spaces. The dimension of neural network parameters (represented as a column vector) " + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "text", + "content": " is determined by the dimensions of both the observation " + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "inline_equation", + "content": "|S|" + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "text", + "content": " and the action space " + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "inline_equation", + "content": "|A|" + }, + { + "bbox": [ + 104, + 440, + 504, + 506 + ], + "type": "text", + "content": " of an environment, as detailed in Tab. 2." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 512, + 506, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 506, + 644 + ], + "type": "text", + "content": "In order to search for policies that are robust to different random state initializations, we use the vectorized API of OpenAI Gym, and our observed function value " + }, + { + "bbox": [ + 104, + 512, + 506, + 644 + ], + "type": "inline_equation", + "content": "y(\\pmb{x})" + }, + { + "bbox": [ + 104, + 512, + 506, + 644 + ], + "type": "text", + "content": " given the network parameters " + }, + { + "bbox": [ + 104, + 512, + 506, + 644 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 512, + 506, + 644 + ], + "type": "text", + "content": " is an averaged return of 32 parallel environments. We also fix the seed of OpenAI Gym for all queries, which ensures that we are evaluating on a fixed set of 32 state initializations and that our results can be reproduced. We first initialize a sample of 500 points from a Latin Hypercube (McKay et al., 1979) to find a good initial input, and then proceed to apply ZO optimization algorithms (i.e., ES, RGF, PRGF, and our ZoRD) with the same query budget of 1000 on this initial input. For all these ZO optimization algorithms, we employ the same Adam optimizer with a learning rate of 1.0 and exponential decay rates of 0.9, 0.999. Considering the prohibitive noise in RL experiments, we use 300 queries from the optimization trajectory that has the smallest Euclidean distance with an input needing to be updated. Of note, we conduct 10 trials in total where each trial differs from each other by both the OpenAI Gym seed and the Latin Hypercube initializations." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 662, + 268, + 674 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 662, + 268, + 674 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 268, + 674 + ], + "type": "text", + "content": "APPENDIX D MORE RESULTS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 689, + 329, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 329, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 329, + 700 + ], + "type": "text", + "content": "D.1 MORE RESULTS ON DERIVATIVE ESTIMATION" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Besides the comparison in Fig. 2, we provide additional comparison between our derived GP-based estimator (6) and the FD estimator (2) under various input dimensions in Fig. 6(a) and various kernels" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 130, + 83, + 284, + 205 + ], + "blocks": [ + { + "bbox": [ + 130, + 83, + 284, + 205 + ], + "lines": [ + { + "bbox": [ + 130, + 83, + 284, + 205 + ], + "spans": [ + { + "bbox": [ + 130, + 83, + 284, + 205 + ], + "type": "image", + "image_path": "506bd4061210fea2f99ae60d4e95c5529075217a130b04aba70996c846bf56fe.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 211, + 251, + 223 + ], + "lines": [ + { + "bbox": [ + 170, + 211, + 251, + 223 + ], + "spans": [ + { + "bbox": [ + 170, + 211, + 251, + 223 + ], + "type": "text", + "content": "(a) Ackley " + }, + { + "bbox": [ + 170, + 211, + 251, + 223 + ], + "type": "inline_equation", + "content": "(d = 40)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 313, + 83, + 474, + 205 + ], + "blocks": [ + { + "bbox": [ + 313, + 83, + 474, + 205 + ], + "lines": [ + { + "bbox": [ + 313, + 83, + 474, + 205 + ], + "spans": [ + { + "bbox": [ + 313, + 83, + 474, + 205 + ], + "type": "image", + "image_path": "3085ded5b3b99d83f3c343bb1f239baf5b01ccacfaac1f7ed929118e49db7035.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 211, + 438, + 223 + ], + "lines": [ + { + "bbox": [ + 364, + 211, + 438, + 223 + ], + "spans": [ + { + "bbox": [ + 364, + 211, + 438, + 223 + ], + "type": "text", + "content": "(b) Levy " + }, + { + "bbox": [ + 364, + 211, + 438, + 223 + ], + "type": "inline_equation", + "content": "(d = 40)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "text", + "content": "Figure 7: Comparison of our ZoRD algorithm using different confidence thresholds " + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "text", + "content": " for its dynamic virtual updates, where the " + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "text", + "content": "-axis and the " + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "text", + "content": "-axis denote the number of function queries and the log-scaled optimality gap (i.e., " + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "inline_equation", + "content": "\\log (f(\\boldsymbol{x}_T) - f(\\boldsymbol{x}^*))" + }, + { + "bbox": [ + 104, + 232, + 506, + 268 + ], + "type": "text", + "content": ") achieved with this number of queries, respectively." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 294, + 506, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 294, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 506, + 483 + ], + "type": "text", + "content": "in Fig. 6(b) using the Ackley function. We adopt the same setting in Sec. 5.2. Interestingly, Fig. 6(a)(b) show that under various input dimensions and GP kernels, our derived GP-based estimator (6) is still able to achieve faster reduction rates compared with the FD estimator. Of note, all the function queries applied in our derived GP-based estimator is from the optimization trajectory whereas the FD estimator requires additional function queries for its derivative estimation. So, Fig. 6(a)(b) also show that our derived GP method is still able to achieve improved query efficiency for accurate derivative estimation than FD method under various input dimensions and GP kernels because our method avoids the requirement of additional queries for derivative estimation. Interestingly, the objective function (i.e., the Ackley function) is not truly sampled from the GPs based on these kernels. This therefore means that though we have assumed that we need the prior knowledge about the GP in which the objective function is sampled from (Sec. 2.1), such an assumption does not really need to be satisfied for our derived GP-based method to achieve accurate derivative estimation in practice. More interestingly, we notice that Matérn( " + }, + { + "bbox": [ + 104, + 294, + 506, + 483 + ], + "type": "inline_equation", + "content": "\\nu = 0.5" + }, + { + "bbox": [ + 104, + 294, + 506, + 483 + ], + "type": "text", + "content": " ) and SE kernel will achieve slightly worse derivative estimation, indicating that the choice of GP kernels may impact the quality of our derived GP-based derivative estimation. However, in practice, our derived GP method based on Matérn( " + }, + { + "bbox": [ + 104, + 294, + 506, + 483 + ], + "type": "inline_equation", + "content": "\\nu = 2.5" + }, + { + "bbox": [ + 104, + 294, + 506, + 483 + ], + "type": "text", + "content": " ) kernel, which has been widely adopted in our experiments, is already able to provide us with good derivative estimation for ZO optimization as confirmed by the results in our other experiments." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 504, + 334, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 504, + 334, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 334, + 514 + ], + "type": "text", + "content": "D.2 MORE RESULTS ON SYNTHETIC EXPERIMENTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 528, + 506, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 583 + ], + "type": "text", + "content": "In this section, we compare ZoRD with more baselines in Fig. 8. Notably, we mainly compare our ZoRD with CobBO (based on the code implementation provided by (Tan et al., 2021)) since CobBO generally performs better than other baselines, e.g., TPE, ATPE, and BADS according to (Tan et al., 2021). As shown in the results in Fig. 8, our ZoRD algorithm is still able to outperform the other benchmark BO algorithm (i.e., CobBO)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": "We then investigate the impacts of the dynamic virtual updates (Sec. 3.2) on our ZoRD algorithm. In particular, we apply the same setting in Appx. C.2 to optimize the Ackley and Levy function with " + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "inline_equation", + "content": "d = 40" + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": " under various confidence thresholds " + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": " for our dynamic virtual updates. Fig. 7 illustrates the results. As shown in both Fig. 7(a) and (b), our ZoRD algorithm using the technique of dynamic virtual updates (i.e., " + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "inline_equation", + "content": "c > 0" + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": ") can consistently achieve improved query efficiency compared with the one not using the technique of dynamic virtual updates (i.e., " + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "inline_equation", + "content": "c = 0" + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": "). This indicates the essence of dynamic virtual updates in helping improve the query efficiency of our ZoRD algorithm. Such a result actually corroborates our theoretical insights about virtual updates (Sec. 4.2). Remarkably, our ZoRD algorithm without the technique of dynamic virtual updates (i.e., " + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "inline_equation", + "content": "c = 0" + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": ") is still able to achieve both improved query efficiency and better converged performance compared with RGF and PRGF, which further verifies the superiority of our derived GP-based derivative estimation. More interestingly, both Fig. 7(a) and Fig. 7(b) have verified that there indeed exists a trade-off for the confidence threshold " + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": " as we have discussed in Sec. 3.2: The confidence threshold " + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 589, + 506, + 732 + ], + "type": "text", + "content": " can not be overly" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 105, + 77, + 200, + 159 + ], + "blocks": [ + { + "bbox": [ + 105, + 77, + 200, + 159 + ], + "lines": [ + { + "bbox": [ + 105, + 77, + 200, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 77, + 200, + 159 + ], + "type": "image", + "image_path": "3a5ae77dfc64928502d64988d9d7ada9bb748fe47df977f62e176bb212458df3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 116, + 164, + 198, + 176 + ], + "lines": [ + { + "bbox": [ + 116, + 164, + 198, + 176 + ], + "spans": [ + { + "bbox": [ + 116, + 164, + 198, + 176 + ], + "type": "text", + "content": "(a) Ackley " + }, + { + "bbox": [ + 116, + 164, + 198, + 176 + ], + "type": "inline_equation", + "content": "(d = 20)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 204, + 78, + 299, + 159 + ], + "blocks": [ + { + "bbox": [ + 204, + 78, + 299, + 159 + ], + "lines": [ + { + "bbox": [ + 204, + 78, + 299, + 159 + ], + "spans": [ + { + "bbox": [ + 204, + 78, + 299, + 159 + ], + "type": "image", + "image_path": "9dcc1dcc291916189e66fc43dd79d16af5ecf25b28883a1a34657e40834df2cf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 164, + 298, + 176 + ], + "lines": [ + { + "bbox": [ + 216, + 164, + 298, + 176 + ], + "spans": [ + { + "bbox": [ + 216, + 164, + 298, + 176 + ], + "type": "text", + "content": "(b) Ackley " + }, + { + "bbox": [ + 216, + 164, + 298, + 176 + ], + "type": "inline_equation", + "content": "(d = 40)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 302, + 77, + 402, + 159 + ], + "blocks": [ + { + "bbox": [ + 302, + 77, + 402, + 159 + ], + "lines": [ + { + "bbox": [ + 302, + 77, + 402, + 159 + ], + "spans": [ + { + "bbox": [ + 302, + 77, + 402, + 159 + ], + "type": "image", + "image_path": "44df5be0540cd5955577d9bb1ffcfc8ba342b6e05aa5f38561601fd685f8651b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 164, + 394, + 176 + ], + "lines": [ + { + "bbox": [ + 321, + 164, + 394, + 176 + ], + "spans": [ + { + "bbox": [ + 321, + 164, + 394, + 176 + ], + "type": "text", + "content": "(c) Levy " + }, + { + "bbox": [ + 321, + 164, + 394, + 176 + ], + "type": "inline_equation", + "content": "(d = 40)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 405, + 77, + 504, + 159 + ], + "blocks": [ + { + "bbox": [ + 405, + 77, + 504, + 159 + ], + "lines": [ + { + "bbox": [ + 405, + 77, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 405, + 77, + 504, + 159 + ], + "type": "image", + "image_path": "d00e1b941466c717ceb8cb1548c626398cb1884a8f1d10dd7737dd01e6fa80cd.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 421, + 164, + 499, + 176 + ], + "lines": [ + { + "bbox": [ + 421, + 164, + 499, + 176 + ], + "spans": [ + { + "bbox": [ + 421, + 164, + 499, + 176 + ], + "type": "text", + "content": "(d) Levy " + }, + { + "bbox": [ + 421, + 164, + 499, + 176 + ], + "type": "inline_equation", + "content": "(d = 100)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 131, + 234, + 284, + 358 + ], + "blocks": [ + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "lines": [ + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "text", + "content": "Figure 8: Additional comparison between our ZoRD and other baselines. The " + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "text", + "content": "-axis and " + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "text", + "content": "-axis denote the number of queries and log-scaled optimality gap (i.e., " + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "inline_equation", + "content": "\\log(f(x_T) - f(x^*))" + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "text", + "content": ") achieved after this number of queries. Each curve is the mean " + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 104, + 185, + 504, + 220 + ], + "type": "text", + "content": " standard error from ten independent runs." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 234, + 284, + 358 + ], + "lines": [ + { + "bbox": [ + 131, + 234, + 284, + 358 + ], + "spans": [ + { + "bbox": [ + 131, + 234, + 284, + 358 + ], + "type": "image", + "image_path": "7c2290e275f975cb6222affe436898d17018e27c59f33dc602abe96897dd05b6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 158, + 365, + 269, + 376 + ], + "lines": [ + { + "bbox": [ + 158, + 365, + 269, + 376 + ], + "spans": [ + { + "bbox": [ + 158, + 365, + 269, + 376 + ], + "type": "text", + "content": "(a) Success rate on MNIST" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 315, + 235, + 468, + 359 + ], + "blocks": [ + { + "bbox": [ + 315, + 235, + 468, + 359 + ], + "lines": [ + { + "bbox": [ + 315, + 235, + 468, + 359 + ], + "spans": [ + { + "bbox": [ + 315, + 235, + 468, + 359 + ], + "type": "image", + "image_path": "3c4d39e576279a3ca3ed2be316b8d745d19a68c19d0f7abc0e89f0a7723b1731.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 339, + 365, + 461, + 376 + ], + "lines": [ + { + "bbox": [ + 339, + 365, + 461, + 376 + ], + "spans": [ + { + "bbox": [ + 339, + 365, + 461, + 376 + ], + "type": "text", + "content": "(b) Success rate on CIFAR-10" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "lines": [ + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "text", + "content": "Figure 9: Comparison of the success rate achieved by various ZO optimization algorithms on the 15 images selected from MNIST and CIFAFR-10 dataset. Note that the " + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "text", + "content": "-axis and the " + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "text", + "content": "-axis denote the number of queries and the success rate (within the range of [0, 1]) achieved after this number of queries, respectively." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "text", + "content": "small or excessively large in order to achieve the best query efficiency of our ZoRD algorithm, e.g., " + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "inline_equation", + "content": "c = 0.3" + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "text", + "content": " for Ackley (" + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "inline_equation", + "content": "d = 40" + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "inline_equation", + "content": "c = 0.4" + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "text", + "content": " for Levy (" + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "inline_equation", + "content": "d = 40" + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 490, + 375, + 501 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 490, + 375, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 375, + 501 + ], + "type": "text", + "content": "D.3 MORE RESULTS ON BLACK-BOX ADVERSARIAL ATTACK" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 511, + 506, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 506, + 601 + ], + "type": "text", + "content": "Besides the comparison in our Sec. 5.3, we also compare the success rate achieved by different ZO optimization algorithms on the 15 images selected from MNIST or CIFAR-10 in Fig. 9. Note that we adopt the same settings in Appx. C.3 for this comparison. Considering the large computational complexity of TuRBO-1/10 algorithm for hard-to-attack images3 which is usually undesirable in practice, we drop the comparison with them in this experiment. Fig. 9 shows that under the same query budget, our ZoRD algorithm is able to achieve considerably improved success rate over other ZO optimization algorithms. These results therefore further support the superior query efficiency of our ZoRD algorithm in real-world challenging problems." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 614, + 428, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 614, + 428, + 625 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 428, + 625 + ], + "type": "text", + "content": "D.4 MORE RESULTS FOR DERIVATIVE-FREE REINFORCEMENT LEARNING" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 635, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 691 + ], + "type": "text", + "content": "Recent years have also witnessed a surging interest in derivative-free reinforcement learning (Salimans et al., 2017; Qian and Yu, 2021), where ZO optimization algorithms are widely applied. In light of this, we also demonstrate the superiority of our ZoRD algorithm in the problem of derivative-free reinforcement learning. Specifically, we adopt the setting in Sec. C.5 to experiment in different RL environments. Tab. 3 summarizes the comparison among different ZO optimization algorithms under" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 504, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 731 + ], + "type": "inline_equation", + "content": "^3" + }, + { + "bbox": [ + 104, + 700, + 504, + 731 + ], + "type": "text", + "content": "Bayesian optimization algorithms, including TuRBO-1/10, are widely known to suffer from the prohibitive computational complexity when they need a large number of function queries for optimization, e.g., " + }, + { + "bbox": [ + 104, + 700, + 504, + 731 + ], + "type": "inline_equation", + "content": "T > 1000" + }, + { + "bbox": [ + 104, + 700, + 504, + 731 + ], + "type": "text", + "content": " (Rasmussen and Williams, 2006)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 122, + 504, + 186 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 504, + 114 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 504, + 114 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 504, + 114 + ], + "type": "text", + "content": "Table 3: Comparison of the rewards (larger is better) achieved by various ZO optimization algorithms in different RL environments. Each result is reported with the mean ± standard deviation from ten independent runs." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 122, + 504, + 186 + ], + "lines": [ + { + "bbox": [ + 107, + 122, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 107, + 122, + 504, + 186 + ], + "type": "table", + "html": "
AlgorithmAcrobotSwimmerLunarBipedalWalkerWalker2DHalfCheetah
ES-86.2±11.0176.0±56.8-94.7±24.4-34.7±27.3340.4±143.01042.4±753.9
RGF-83.0±5.6213.2±65.1-93.8±19.1-30.3±40.3368.4±223.11129.3±748.5
PRGF-86.3±9.9218.6±66.2-100.1±16.0-29.9±35.2344.6±152.31083.3±722.2
ZoRD-73.3±2.4280.5±77.6-45.1±38.312.9±37.8729.1±304.21950.5±576.1
", + "image_path": "750a762a54b247101c04fc2cb442cab47fc6f81ba92e9ab9fb4ad6ba03cef77a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 206, + 506, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 206, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 206, + 506, + 288 + ], + "type": "text", + "content": "the same query budget of 1000. As BO algorithms usually suffer from the prohibitive computational complexity for a large " + }, + { + "bbox": [ + 104, + 206, + 506, + 288 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 206, + 506, + 288 + ], + "type": "text", + "content": " (Rasmussen and Williams, 2006) and GLD has never been applied in RL, we mainly compare our ZoRD algorithm with ES, RGF and PRGF, which also belongs to the same type of ZO optimization algorithm: GD with estimated derivative. Remarkably, Tab. 3 shows that under the same query budget, our ZoRD algorithm can consistently enjoy improved performance (i.e., highest rewards) than the other ZO optimization algorithms in different RL environments. This further supports the superiority of our ZoRD algorithm to other FD-based ZO optimization algorithms." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 301, + 255, + 314 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 301, + 255, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 255, + 314 + ], + "type": "text", + "content": "APPENDIX E DISCUSSIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 327, + 311, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 311, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 311, + 339 + ], + "type": "text", + "content": "E.1 ZORD VS. FD-BASED ZO OPTIMIZATION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 348, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 506, + 525 + ], + "type": "text", + "content": "Of note, the novelty of our work in fact lies in its way of exploiting the GP assumption to help design an improved derivative estimation and hence an improved ZO optimization algorithm, which to the best of our knowledge has not been explored theoretically yet in the field of ZO optimization via GD with estimated derivative. That is, at this moment, it is still not known in the literature how existing FD methods can utilize such an assumption to achieve better derivative estimation (i.e., their derivative estimation quality will remain the same), even when they make the same assumption as us. In light of this, the comparison between our derived GP method and the FD method in Sec. 4 is not only necessary but also meaningful to show the advantage of exploiting such an assumption in ZO derivative estimation. Importantly, our empirical results further show that such an assumption is in fact not restrictive for our ZoRD to achieve compelling performance in practice. For example, our Fig. 2 and Fig. 6 have shown that our derived GP-based method is able to achieve smaller derivative estimation error than the FD method when the objective functions are not designed to be sampled from a GP with the kernel that we had applied for our derivative estimation. Moreover, the results in our Sec. 5.2, 5.3, 5.4 have shown that our ZoRD is capable of achieving competitive optimization performance for real-world optimization problems where the objective functions are also not designed to be sampled from a GP with the kernel that we had used for our ZoRD." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 530, + 506, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 530, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 530, + 506, + 620 + ], + "type": "text", + "content": "Meanwhile, the theoretical challenges of our work lie in the theoretical guarantee on the derivative estimation error of our unique derived GP-based method for any input in the domain as well as the convergence analysis based on such a unique derivative estimation, which to the best of our knowledge have not been studied in the literature. This means that our Thm. 1 and Thm. 2 have provided new developments in the analysis of gradient estimation error and our Thm. 3 will be the first convergence result for GD using our unique derivative estimation method. Interestingly, the bound in our Thm. 3 also improves over the standard ones from (Nesterov and Spokoiny, 2017; Liu et al., 2018b) in several aspects, as discussed in our Sec. 4.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 633, + 196, + 644 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 633, + 196, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 196, + 644 + ], + "type": "text", + "content": "E.2 ZORD vs. BO" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": "Our ZoRD algorithm and standard BO algorithms (e.g., GP-UCB) have in fact applied the same GP assumption for their algorithm design. That is, however, where the similarity ends. Of note, our ZoRD exploits such an assumption to derive a specific GP (i.e., (4)) for derivative estimation, which is then employed for local exploitation via (projected) GD update. In contrast, BO algorithms utilize such an assumption to construct their acquisition functions for a global optimization that can trade off between exploitation and exploration. In practice, the exploration of BO algorithms is usually query-inefficient, especially for problems with high-dimensional input spaces, and therefore GD with" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 83, + 504, + 264 + ], + "blocks": [ + { + "bbox": [ + 108, + 83, + 504, + 264 + ], + "lines": [ + { + "bbox": [ + 108, + 83, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 108, + 83, + 504, + 264 + ], + "type": "image", + "image_path": "c2355c25734b157f01bf391f9fb3aac55ade306cbdf035b34d06cb49f0588384.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 275, + 506, + 310 + ], + "lines": [ + { + "bbox": [ + 104, + 275, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 506, + 310 + ], + "type": "text", + "content": "Figure 10: Comparison of local derivative estimation (in the input domain of [0, 3]) in our ZoRD and global function approximation (in the input domain of " + }, + { + "bbox": [ + 104, + 275, + 506, + 310 + ], + "type": "inline_equation", + "content": "[-6, 6]" + }, + { + "bbox": [ + 104, + 275, + 506, + 310 + ], + "type": "text", + "content": ") in BO under various number of random function queries." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 342, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 506, + 464 + ], + "type": "text", + "content": "estimated derivatives (especially our ZoRD) is preferred to realize better optimization performances in these problems (see our Sec. 5.2). So, our ZoRD and BO algorithms belong to two different types of ZO optimization algorithms (i.e., GD-type vs. BO-type), where their theoretical analyses are in fact not comparable. In particular, GD-type and BO-type ZO optimization algorithms apply different metrics for their theoretical analyses, e.g., the derivative estimation error as well as the convergence to a stationary point (in the nonconvex case) for GD-type ZO optimization algorithms vs. the global asymptotic convergence in terms of the regret for BO-type ZO optimization algorithms. So, it is more reasonable to compare the theory (including the theoretical challenge, the new developments, and the novelty of the convergence result) of our ZoRD with other GD-type ZO optimization algorithms, e.g., the ones using FD methods for their derivative estimation (Nesterov and Spokoiny, 2017; Liu et al., 2018b), as what we have discussed in Sec. E.1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 469, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 506, + 548 + ], + "type": "text", + "content": "In addition, in contrast to using the GP to model the objective function within the entire domain for global exploration in BO, our derived GP in ZoRD will be applied to estimate the derivative of the objective function for local exploitation by GD as shown in Sec. 3.1. As GD typically optimizes in a local region, our derived GP only needs to estimate the derivative locally, which is known to be much simpler than modeling the objective function within the entire domain in BO especially for objective functions in high-dimensional input spaces. In light of this, the derived GP for derivative estimation (4) in our ZoRD algorithm advances the standard GP in BO in the following aspects:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "text", + "content": "1. Improved Query Efficiency for Estimation. The derived GP in our ZoRD algorithm requires fewer function queries to provide accurate derivative estimation. We provide a visual example in Fig. 10, in which we sample a one-dimensional function " + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "text", + "content": " from a GP prior " + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\mathcal{GP}(0,k(x,x))" + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "text", + "content": " using the standard SE kernel and then randomly select the same number of queries from the input domain of " + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "inline_equation", + "content": "[-6,6]" + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "inline_equation", + "content": "[0,3]" + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "text", + "content": " for standard GP and our derived GP, respectively. As illustrated in Fig. 10, function in a local region (i.e., " + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "inline_equation", + "content": "x \\in [0,3]" + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "text", + "content": ") is usually smoother than its counterpart in the entire domain (i.e., " + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "inline_equation", + "content": "x \\in [-6,6]" + }, + { + "bbox": [ + 129, + 561, + 506, + 673 + ], + "type": "text", + "content": "). As a result, with only 4 function queries, our derived GP can already provide accurate estimation to the derivative of this objective function whereas standard GP requires more than 8 function queries to model this objective function accurately in the entire domain." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 129, + 687, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 687, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 129, + 687, + 507, + 734 + ], + "type": "text", + "content": "2. Reduced Computational Complexity. Comparing (3) and (5), both the derived GP for derivative estimation in our ZoRD algorithm and the standard GP in BO enjoy a computational complexity of " + }, + { + "bbox": [ + 129, + 687, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(n^3)" + }, + { + "bbox": [ + 129, + 687, + 507, + 734 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 129, + 687, + 507, + 734 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 129, + 687, + 507, + 734 + ], + "type": "text", + "content": " function queries. However, as a consequence of the improved query efficiency of our derived GP, it is able to require fewer function queries (i.e.," + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 140, + 82, + 506, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 82, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 140, + 82, + 506, + 117 + ], + "type": "text", + "content": "smaller " + }, + { + "bbox": [ + 140, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 140, + 82, + 506, + 117 + ], + "type": "text", + "content": ") for accurate derivative estimation and hence can enjoy a reduced computational complexity in practice especially when a large number of queries (e.g., " + }, + { + "bbox": [ + 140, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "n > 1000" + }, + { + "bbox": [ + 140, + 82, + 506, + 117 + ], + "type": "text", + "content": ") are applied to the standard GP in BO." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 710, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 710, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 504, + 732 + ], + "type": "text", + "content": "As introduced in our Appx. C, 150 function queries for our derived GP can already help our ZoRD algorithm to achieve remarkable results in practice (refer to the experiments in our Sec. 5)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_content_list.json b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..062f9a58c2c9425bae5e2ee3e951af5bf99a044a --- /dev/null +++ b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_content_list.json @@ -0,0 +1,3242 @@ +[ + { + "type": "text", + "text": "$f$ -DM: A MULTI-STAGE DIFFUSION MODEL VIA PROGRESSIVE SIGNAL TRANSFORMATION", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Miguel Angel Bautista, Josh Susskind Apple", + "bbox": [ + 179, + 169, + 743, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{jgu32, szhai, yizhe_zhang, mbautistamartin, jsusskind}@apple.com", + "bbox": [ + 183, + 199, + 777, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 250, + 547, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion models (DMs) have recently emerged as SoTA tools for generative modeling in various domains. Standard DMs can be viewed as an instantiation of hierarchical variational autoencoders (VAEs) where the latent variables are inferred from input-centered Gaussian distributions with fixed scales and variances. Unlike VAEs, this formulation constrains DMs from changing the latent spaces and learning abstract representations. In this work, we propose $f$ -DM, a generalized family of DMs, which allows progressive signal transformation. More precisely, we extend DMs to incorporate a set of (hand-designed or learned) transformations, where the transformed input is the mean of each diffusion step. We propose a generalized formulation of DMs and derive the corresponding de-noising objective together with a modified sampling algorithm. As a demonstration, we apply $f$ -DM in image generation tasks with a range of functions, including down-sampling, blurring, and learned transformations based on the encoder of pretrained VAEs. In addition, we identify the importance of adjusting the noise levels whenever the signal is sub-sampled and propose a simple rescaling recipe. $f$ -DM can produce high-quality samples on standard image generation benchmarks like FFHQ, AFHQ, LSUN and ImageNet with better efficiency and semantic interpretation. Please check our videos at http://jiataogu.me/fdm/.", + "bbox": [ + 228, + 279, + 769, + 529 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d52258dad9bef03c15d21315a1afeaa9608ab3ca09dcc34a5e80b1a92a1f6bc2.jpg", + "image_caption": [ + "Figure 1: Visualization of reverse diffusion from $f$ -DMs with various signal transformations. $x_{t}$ is the denoised output, and $z_{s}$ is the input to the next diffusion step. We plot the first three channels of VQVAE latent variables. Low-resolution images are resized to $256^{2}$ for ease of visualization." + ], + "image_footnote": [], + "bbox": [ + 200, + 537, + 790, + 796 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 864, + 338, + 880 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion probabilistic models (DMs, Sohl-Dickstein et al., 2015; Ho et al., 2020; Nichol & Dhariwal, 2021) and score-based (Song et al., 2021b) generative models have become increasingly popular", + "bbox": [ + 169, + 895, + 826, + 926 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "as the tools for high-quality image (Dhariwal & Nichol, 2021), video (Ho et al., 2022b), text-to-speech (Popov et al., 2021) and text-to-image (Rombach et al., 2021; Ramesh et al., 2022; Sahara et al., 2022a) synthesis. Despite the empirical success, conventional DMs are restricted to operate in the ambient space throughout the Gaussian noising process. On the other hand, common generative models like VAEs (Kingma & Welling, 2013) and GANs (Goodfellow et al., 2014; Karras et al., 2021) employ a coarse-to-fine process that hierarchically generates high-resolution outputs.", + "bbox": [ + 169, + 103, + 823, + 188 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We are interested in combining the best of the two worlds: the expressivity of DMs and the benefit of hierarchical features. To this end, we propose $f$ -DM, a generalized multi-stage framework of DMs to incorporate progressive transformations to the inputs. As an important property of our formulation, $f$ -DM does not make any assumptions about the type of transformations. This makes it compatible with many possible designs, ranging from domain-specific ones to generic neural networks. In this work, we consider representative types of transformations, including down-sampling, blurring, and neural-based transformations. What these functions share in common is that they allow one to derive increasingly more global, coarse, and/or compact representations, which we believe can lead to better sampling quality as well as reduced computation.", + "bbox": [ + 169, + 193, + 826, + 320 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Incorporating arbitrary transformations into DMs also brings immediate modeling challenges. For instance, certain transformations destroy the information drastically, and some might also change the dimensionality. For the former, we derive an interpolation-based formulation to smoothly bridge consecutive transformations. For the latter, we verify the importance of rescaling the noise level, and propose a resolution-agnostic signal-to-noise ratio (SNR) as a practical guideline for noise rescaling.", + "bbox": [ + 169, + 325, + 823, + 398 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Extensive experiments are performed on image generation benchmarks, including FFHQ, AFHQ, LSUN Bed/Church and ImageNet. $f$ -DMs consistently match or outperform the baseline performance, while requiring relatively less computing thanks to the progressive transformations. Furthermore, given a pre-trained $f$ -DM, we can readily manipulate the learned latent space, and perform conditional generation tasks (e.g., super-resolution) without additional training.", + "bbox": [ + 169, + 402, + 823, + 474 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 BACKGROUND", + "text_level": 1, + "bbox": [ + 171, + 493, + 326, + 508 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Diffusion Models (DMs, Sohl-Dickstein et al., 2015; Song & Ermon, 2019; Ho et al., 2020) are deep generative models which can be viewed as a special case of hierarchical VAEs (Kingma et al., 2021). In this paper, we consider diffusion in continuous time similar to Song et al. (2021b); Kingma et al. (2021).", + "bbox": [ + 169, + 523, + 485, + 622 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given a datapoint $\\pmb{x} \\in \\mathbb{R}^N$ , a DM models time-dependent latent variables $\\pmb{z} = \\{\\pmb{z}_t | t \\in [0,1], \\pmb{z}_0 = \\pmb{x}\\}$ based on a fixed signal-noise schedule $\\{\\alpha_t, \\sigma_t\\}$ :", + "bbox": [ + 169, + 628, + 485, + 686 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nq (\\boldsymbol {z} _ {t} | \\boldsymbol {z} _ {s}) = \\mathcal {N} (\\boldsymbol {z} _ {t}; \\alpha_ {t | s} \\boldsymbol {z} _ {s}, \\sigma_ {t | s} ^ {2} I),\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 691, + 437, + 710 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\alpha_{t|s} = \\alpha_t / \\alpha_s$ , $\\sigma_{t|s}^2 = \\sigma_t^2 - \\alpha_{t|s}^2\\sigma_s^2$ , $s < t$ . It also defines the marginal distribution $q(\\pmb{z}_t|\\pmb{x})$ as:", + "bbox": [ + 169, + 717, + 483, + 761 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(\\boldsymbol {z} _ {t} | \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t} \\boldsymbol {x}, \\sigma_ {t} ^ {2} I\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 230, + 762, + 423, + 779 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By default, we assume the variance preserving", + "bbox": [ + 169, + 781, + 483, + 796 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "form (Ho et al., 2020). That is, $\\alpha_{t}^{2} + \\sigma_{t}^{2} = 1$ , $\\alpha_{0} = \\sigma_{1} = 1$ , and the signal-to-noise-ratio (SNR, $\\alpha_{t}^{2} / \\sigma_{t}^{2}$ ) decreases monotonically with $t$ . For generation, a parametric function $\\theta$ is optimized to reverse the diffusion process by denoising $z_{t} = \\alpha_{t}x + \\sigma_{t}\\epsilon$ to the clean input $x$ , with a weighted reconstruction loss $\\mathcal{L}_{\\theta}$ . For example, the \"simple loss\" proposed in Ho et al. (2020) is equivalent to weighting residuals by $\\omega_{t} = \\alpha_{t}^{2} / \\sigma_{t}^{2}$ :", + "bbox": [ + 169, + 795, + 823, + 866 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\theta} = \\mathbb {E} _ {\\boldsymbol {z} _ {t} \\sim q (\\boldsymbol {z} _ {t} | \\boldsymbol {x}), t \\sim [ 0, 1 ]} \\left[ \\omega_ {t} \\cdot \\| \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {x} \\| _ {2} ^ {2} \\right]. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 872, + 823, + 892 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In practice, $\\theta$ is parameterized as a U-Net (Ronneberger et al., 2015). As suggested in Ho et al. (2020), predicting the noise $\\epsilon_{\\theta}$ empirically achieves better performance than predicting $x_{\\theta}$ , where", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b144a42831e912bd3dbf6dc74671e2cca145cc9656e9e988b37a463741a9a3ad.jpg", + "image_caption": [ + "(a) DMS", + "Figure 2: (a) the standard DMs; (b) a bottom-up hierarchical VAEs; (c) our proposed $f$ -DM." + ], + "image_footnote": [], + "bbox": [ + 500, + 530, + 563, + 739 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1af00fd075b312ca6768d248e8c3288e144842b186ef5a857c11a740be3f3b14.jpg", + "image_caption": [ + "(b) VAEs" + ], + "image_footnote": [], + "bbox": [ + 575, + 531, + 647, + 736 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c1e99745d8f0b4c81b2fc69809af8abb2e775847c79feff9b741fcf73378ce65.jpg", + "image_caption": [ + "(c) f-DM (Ours)" + ], + "image_footnote": [], + "bbox": [ + 663, + 531, + 816, + 737 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "$\\pmb{x}_{\\theta}(\\pmb{z}_t,t) = (\\pmb{z}_t - \\sigma_t\\pmb{\\epsilon}_{\\theta}(\\pmb{z}_t,t)) / \\alpha_t$ . Sampling from such a learned model can be performed from ancestral sampling (DDPM, Ho et al., 2020), or a deterministic DDIM sampler (Song et al., 2021a). Starting from $\\pmb{z}_1\\sim \\mathcal{N}(\\mathbf{0},I)$ , a sequence of timesteps $1 = t_0 > \\ldots >t_N = 0$ are sampled for iterative generation, and we can readily summarize both methods for each step as follows:", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {z} _ {s} = \\alpha_ {s} \\cdot \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}) + \\sqrt {\\sigma_ {s} ^ {2} - \\eta^ {2} \\bar {\\sigma} ^ {2}} \\cdot \\boldsymbol {\\epsilon} _ {\\theta} (\\boldsymbol {z} _ {t}) + \\eta \\bar {\\sigma} \\cdot \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\boldsymbol {0}, I), \\quad s < t, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 167, + 823, + 185 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\bar{\\sigma} = \\sigma_s\\sigma_{t|s} / \\sigma_t$ , and $\\eta$ controls the proportion of additional noise. (i.e., DDIM $\\eta = 0$", + "bbox": [ + 169, + 191, + 777, + 208 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As the score function $\\epsilon_{\\theta}$ is defined in the ambient space, it is clear that all the latent variables $z$ are forced to be the same shape as the input data $\\pmb{x}$ $(\\mathbb{R}^N)$ . This not only leads to inefficient training, especially for steps with high noise level (Jing et al., 2022), but also makes DMs hard to learn abstract and semantically meaningful latent space as pointed out by Preechakul et al. (2022).", + "bbox": [ + 169, + 212, + 823, + 270 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 METHOD", + "text_level": 1, + "bbox": [ + 171, + 289, + 282, + 304 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we introduce $f$ -DM, an extended family of DMs to enable diffusion on transformed signals, in a way similar to a standard hierarchical VAE. We start by introducing the definition of the proposed multi-stage formulation with general signal transformations, followed by modified training and generation algorithms (Section 3.1). Then, we specifically apply $f$ -DM with three categories of transformations (Section 3.2).", + "bbox": [ + 169, + 320, + 823, + 390 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 MULTI-STAGE DIFFUSION", + "text_level": 1, + "bbox": [ + 171, + 407, + 393, + 420 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Signal Transformations We consider a sequence of deterministic functions $\\pmb{f} = \\{f_0, \\dots, f_K\\}$ , where $f_0 \\dots f_k$ progressively transforms the input signal $\\pmb{x} \\in \\mathbb{R}^N$ into $\\pmb{x}^k = f_{0:k}(\\pmb{x}) \\in \\mathbb{R}^{M_k}$ . We assume $\\pmb{x}^0 = f_0(\\pmb{x}) = \\pmb{x}$ . In principle, $\\pmb{f}$ can be any function. In this work, we focus on transformations that gradually destroy the information contained in $\\pmb{x}$ (e.g., down-sampling), leading towards more compact representations. Without loss of generality, we assume $M_0 \\geq M_1 \\geq \\dots \\geq M_K$ . A sequence of inverse mappings $\\pmb{g} = \\{g_0, \\dots, g_{K-1}\\}$ is used to connect a corresponding sequence of pairs of consecutive spaces. Specifically, we define $\\hat{\\pmb{x}}_k$ as:", + "bbox": [ + 169, + 431, + 823, + 532 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\boldsymbol {x}} ^ {k} := \\left\\{ \\begin{array}{l l} g _ {k} \\left(f _ {k + 1} \\left(\\boldsymbol {x} ^ {k}\\right)\\right) \\approx \\boldsymbol {x} ^ {k}, & \\text {i f} k < K, \\\\ \\boldsymbol {x} ^ {k}, & \\text {i f} k = K. \\end{array} \\right. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 537, + 823, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The approximation of Equation 3 ( $k < K$ ) is not necessarily (and sometimes impossibly) accurate. For instance, $f_{k}$ downsamples an input image $\\pmb{x}$ from $128^{2}$ into $64^{2}$ with average pooling, and $g_{k}$ can be a bilinear interpolation that upsamples back to $128^{2}$ , which is a lossy reconstruction.", + "bbox": [ + 169, + 579, + 823, + 622 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The definition of $f$ and $g$ can be seen as a direct analogy of the encoder $(\\phi)$ and decoder $(\\theta)$ in hierarchical VAEs (see Figure 2 (b)). However, there are still major differences: (1) the VAE encoder/decoder is stochastic, and the encoder's outputs are regularized by the prior. In contrast, $f$ and $g$ are deterministic, and the encoder output $x^{K}$ does not necessarily follow a simple prior; (2) VAEs directly use the decoder for generation, while $f, g$ are fused in the diffusion steps of $f$ -DM.", + "bbox": [ + 169, + 628, + 823, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Forward Diffusion We extend the continuous-time DMs for signal transformations. We split the diffusion time $0 \\to 1$ into $K + 1$ stages, where for each stage, a partial diffusion process is performed. More specifically, we define a set of time boundaries $0 = \\tau_0 < \\tau_1 < \\ldots < \\tau_K < \\tau_{K + 1} = 1$ , and for $t \\in [0,1]$ , the latent $\\mathbf{z}_t$ has the following marginal probability:", + "bbox": [ + 169, + 707, + 823, + 765 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(\\boldsymbol {z} _ {t} \\mid \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t} \\boldsymbol {x} _ {t}, \\sigma_ {t} ^ {2} I\\right), \\quad \\text {w h e r e} \\boldsymbol {x} _ {t} = \\frac {\\left(t - \\tau_ {k}\\right) \\hat {\\boldsymbol {x}} ^ {k} + \\left(\\tau_ {k + 1} - t\\right) \\boldsymbol {x} ^ {k}}{\\tau_ {k + 1} - \\tau_ {k}}, \\quad \\tau_ {k} \\leq t < \\tau_ {k + 1}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 771, + 823, + 805 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As listed above, $\\pmb{x}_t$ is the interpolation of $\\pmb{x}^k$ and its approximation $\\hat{\\pmb{x}}^k$ when $t$ falls in stage $k$ . A simple illustration for the relationship of $\\pmb{x}_t, \\hat{\\pmb{x}}^k, \\pmb{x}^k$ and $z_t$ is shown in Figure 10. We argue that interpolation is crucial as it creates a continuous transformation that slowly corrupts information inside each stage. In this way, such change can be easily reversed by our model. Also, it is nontrivial to find the optimal stage schedule $\\tau_k$ for each model as it highly depends on how much the information is destroyed in each stage $f_k$ . In this work, we tested two heuristics: (1) linear schedule $\\tau_k = k / (K + 1)$ ; (2) cosine schedule $\\tau_k = \\cos(1 - k / (K + 1))$ . Note that the standard DMs can be seen as a special case of our $f$ -DM when there is only one stage ( $K = 0$ ).", + "bbox": [ + 169, + 811, + 825, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f9c66ed08214f298dcca5a8aa43a655c3a55afef525bded7f4d153b4d4823ca6.jpg", + "image_caption": [ + "Figure 3: Left: an illustration of the proposed SNR computation for different sampling rates; Right: the comparison of rescaling the noise level for progressive down-sampling. Without noise rescaling, the diffused images in low-resolution quickly become too noisy to distinguish the underline signal." + ], + "image_footnote": [], + "bbox": [ + 212, + 106, + 781, + 275 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Equation 4 does not guarantee a Markovian transition. Nevertheless, our formulation only needs $q(\\pmb{z}_t | \\pmb{z}_s, \\pmb{x})$ , which has the following simple form focusing on diffusion steps within a stage:", + "bbox": [ + 169, + 335, + 823, + 367 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(\\boldsymbol {z} _ {t} \\mid \\boldsymbol {z} _ {s}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t | s} \\boldsymbol {z} _ {s} + \\alpha_ {t} \\cdot \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} _ {s}\\right), \\sigma_ {t | s} ^ {2} I\\right), \\quad \\tau_ {k} \\leq s < t < \\tau_ {k + 1}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 372, + 823, + 392 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "From Equation 5, we further re-write $\\boldsymbol{x}_t - \\boldsymbol{x}_s = -\\delta_t \\cdot (t - s) / (t - \\tau_k)$ , where $\\delta_t = \\boldsymbol{x}^k - \\boldsymbol{x}_t$ is the signal degradation. Equation 5 also indicates that the reverse diffusion distribution $q(\\boldsymbol{z}_s | \\boldsymbol{z}_t, \\boldsymbol{x}) \\propto q(\\boldsymbol{z}_t | \\boldsymbol{z}_s, \\boldsymbol{x}) q(\\boldsymbol{z}_s | \\boldsymbol{x})$ can be written as the function of $\\boldsymbol{x}_t$ and $\\delta_t$ which will be our learning objectives.", + "bbox": [ + 169, + 398, + 823, + 444 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Boundary Condition To enable diffusion across stages, we need the transition at stage boundaries $\\tau_{k}$ . More specifically, when the step approaches the boundary $\\tau^{-}$ (the left limit of $\\tau$ ), the transition $q(z_{\\tau} | z_{\\tau^{-}}$ , $\\pmb{x}$ ) should be as deterministic (ideally invertible) & smooth as possible to minimize information loss. First, we can easily expand $z_{\\tau}$ and $z_{\\tau^{-}}$ as the signal and noise combination:", + "bbox": [ + 169, + 450, + 823, + 507 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\text {B e f o r e :} \\quad \\boldsymbol {z} _ {\\tau^ {-}} = \\alpha_ {\\tau^ {-}} \\cdot \\boldsymbol {x} _ {\\tau^ {-}} + \\sigma_ {\\tau^ {-}} \\cdot \\boldsymbol {\\epsilon}, p (\\boldsymbol {\\epsilon}) = \\mathcal {N} (\\boldsymbol {0}, I),\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 513, + 666, + 529 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nA f t e r: \\quad z _ {\\tau} = \\alpha_ {\\tau} \\cdot x _ {\\tau} + \\sigma_ {\\tau} \\cdot \\zeta (\\epsilon), p (\\zeta (\\epsilon)) = \\mathcal {N} (0, I). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 525, + 823, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Based on definition, $\\pmb{x}_{\\tau^{-}} = \\hat{\\pmb{x}}^{k - 1} = g(\\pmb{x}^k) = g(\\pmb{x}_{\\tau})$ , which means the signal part is invertible. Therefore we only need to find $\\zeta$ . Under the initial assumption of $M_{k} \\leq M_{k - 1}$ , this can be achieved easily by dropping elements from $\\epsilon$ . Take down-sampling $(M_{k - 1} = 4M_k)$ as an example. We can directly drop 3 out of every $2 \\times 2$ values from $\\epsilon$ . More details are included in Appendix A.4.", + "bbox": [ + 169, + 554, + 823, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The second requirement of a smooth transition is not as straightforward as it looks, which asks the \"noisiness\" of latents $z$ to remain unchanged across the boundary. We argue that the conventional measure – the signal-to-noise-ratio (SNR) – in DM literature is not compatible with resolution change as it averages the signal/noise power element-wise. In this work, we propose a generalized resolution-agnostic SNR by viewing data as points sampled from a continuous field:", + "bbox": [ + 169, + 618, + 826, + 688 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S N R} (\\boldsymbol {z}) = \\frac {\\mathbb {E} _ {\\Omega \\sim I} \\| \\mathbb {E} _ {i \\sim \\Omega} \\operatorname {S I G N A L} (\\boldsymbol {z} _ {i}) \\| ^ {2}}{\\mathbb {E} _ {\\Omega \\sim I} \\| \\mathbb {E} _ {i \\sim \\Omega} \\operatorname {N O I S E} (\\boldsymbol {z} _ {i}) \\| ^ {2}}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 694, + 823, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $I$ is the data range, SIGNAL represents the real data value (such as image pixels), and NOISE is the unstructured Gaussian noise added to the data. $\\Omega$ is a patch relative to $I$ , which can be any size as long as it is invariant to different sampling rates (resolutions). As shown in Figure 3 (left), we can obtain a reliable measure of noisiness by averaging the signal/noise inside patches. We derive $\\alpha_{\\tau}, \\sigma_{\\tau}$ from $\\alpha_{\\tau^{-}}, \\sigma_{\\tau^{-}}$ for any transformations by forcing $\\mathrm{SNR}(z_{\\tau}) = \\mathrm{SNR}(z_{\\tau^{-}})$ under this new definition. Specifically, if dimensionality change is solely caused by the change of sampling rate (e.g., down-sampling, average RGB channels, deconvolution), we can get the following relation:", + "bbox": [ + 169, + 733, + 826, + 832 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {\\tau} ^ {2} / \\sigma_ {\\tau} ^ {2} = d _ {k} \\cdot \\gamma_ {k} \\cdot \\alpha_ {\\tau -} ^ {2} / \\sigma_ {\\tau -} ^ {2}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 835, + 823, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $d_{k} = M_{k - 1} / M_{k}$ is the total dimension change, and $\\gamma_{k} = \\mathbb{E}||\\hat{\\pmb{x}}^{k - 1}||^{2} / \\mathbb{E}||\\pmb{x}^{k}||^{2}$ is the change of signal power. For example, we have $d_{k} = 4,\\gamma_{k}\\approx 1$ for down-sampling. Following Equation 8, the straightforward rule is to rescale the magnitude of the noise, and keep the signal part unchanged:", + "bbox": [ + 169, + 857, + 823, + 901 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "For simplicity, we omit the subscript $k$ for $\\tau_{k}$ in the following paragraphs.", + "bbox": [ + 191, + 909, + 637, + 924 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Reverse diffusion for image generation using $f$ -DM" + ], + "code_body": "Input: model $\\theta ,f,g$ stage schedule $\\{\\tau_0,\\dots ,\\tau_K\\}$ , rescaled noise schedule functions $\\alpha (.)$ $\\sigma (.)$ step-size $\\Delta t$ $\\epsilon_{\\mathrm{full}}\\sim \\mathcal{N}(0,I)$ ,DDPM ratio $\\eta$ \n1 Initialize $z$ from $\\epsilon_{\\mathrm{full}}$ \n2 for $(k = K;k\\geq 0;k = k - 1)$ do \n3 for $(t = \\tau_{k + 1};t > \\tau_k;t = t - \\Delta t,s = t - \\Delta t)$ do \n4 $\\begin{array}{r}\\pmb {\\epsilon}_{\\theta},\\pmb {\\delta}_{\\theta} = \\theta (\\pmb {z},t);\\quad \\pmb {x}_{\\theta} = (\\pmb {z} - \\sigma (t)\\cdot \\pmb {\\epsilon}_{\\theta}) / \\alpha (t);\\\\ \\text{if} s > \\tau_{k}\\text{then}\\\\ \\big{\\lfloor}\\pmb {z} = \\alpha (s)\\cdot (\\pmb {x}_{\\theta} + \\pmb {\\delta}_{\\theta}\\cdot (t - s) / (t - \\tau_{k})) + \\sqrt{\\sigma^{2}(s) - \\eta^{2}\\bar{\\sigma}^{2}}\\cdot \\pmb {\\epsilon}_{\\theta} + \\eta \\bar{\\sigma}\\cdot \\pmb {\\epsilon},\\pmb {\\epsilon}\\sim \\mathcal{N}(\\pmb {0},I)\\\\ \\end{array}$ \n5 \n6 if $k > 0$ then \n7 Re-sample noise $\\epsilon_{\\mathrm{rs}}$ from $\\epsilon_{\\theta}$ and $\\epsilon_{\\mathrm{full}}$ . $z = \\alpha (\\tau_k)\\cdot g_k(x_\\theta) + \\sigma (\\tau_k)\\cdot \\epsilon_{\\mathrm{rs}}$ \n9 return $x_{\\theta}$", + "bbox": [ + 158, + 125, + 772, + 290 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\alpha \\leftarrow \\alpha, \\sigma \\leftarrow \\sigma / \\sqrt{d_k}$ , which we refer as signal preserved (SP) rescaling. Note that, to ensure the noise schedule is continuous over time and close to the original schedule, such rescaling is applied to the noises of the entire stage, and will be accumulated when multiple transformations are used. As the comparison shown in Figure 3, the resulting images are visually closer to the standard DM. However, the variance of $z_{t}$ becomes very small, especially when $t \\to 1$ , which might be hard for the neural networks to distinguish. Therefore, we propose the variance preserved (VP) alternative to further normalize the rescaled $\\alpha, \\sigma$ so that $\\alpha^2 + \\sigma^2 = 1$ . We show the visualization in Figure 3 (b).", + "bbox": [ + 169, + 300, + 826, + 401 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training We train a neural network $\\theta$ to denoise. We also show the training pipeline in Figure 10. In $f$ -DM, noise is caused by two factors: (1) the perturbation $\\epsilon$ from noise injection; (2) the degradation $\\delta$ due to signal transformation. Thus, we propose to predict $\\boldsymbol{x}_{\\theta}$ and $\\delta_{\\theta}$ jointly, which simultaneously remove both noises from $\\boldsymbol{z}_t$ with a \"double reconstruction\" loss:", + "bbox": [ + 169, + 406, + 823, + 463 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\theta} = \\mathbb {E} _ {\\boldsymbol {z} _ {t} \\sim q (\\boldsymbol {z} _ {t} | \\boldsymbol {x}), t \\sim [ 0, 1 ]} \\left[ \\omega_ {t} \\cdot \\left(\\| \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {x} _ {t} \\| _ {2} ^ {2} + \\| \\boldsymbol {\\delta} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {\\delta} _ {t} \\| _ {2} ^ {2}\\right) \\right], \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 464, + 823, + 483 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the denoised output is $\\pmb{x}_{\\theta}(\\pmb{z}_t, t) + \\delta_{\\theta}(\\pmb{z}_t, t)$ . Unlike standard DMs, the denoising goals are the transformed signals of each stage rather than the final real images, which are generally simpler targets to recover. The same as standard DMs, we also choose to predict $\\epsilon_{\\theta}$ , and compute $\\pmb{x}_{\\theta} = (z_t - \\sigma_t \\pmb{\\epsilon}_{\\theta}) / \\alpha_t$ . We adopt the same U-Net architecture for all stages, where input $\\pmb{z}_t$ will be directed to the corresponding inner layer based on spatial resolutions (see Appendix Figure 11 for details).", + "bbox": [ + 169, + 484, + 823, + 555 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Unconditional Generation We present the generation steps in Algorithm 1, where $\\boldsymbol{x}_t$ and $\\delta_t$ are replaced by model's predictions $\\boldsymbol{x}_{\\theta}$ , $\\delta_{\\theta}$ . Thanks to the interpolation formulation (Equation 4), generation is independent of the transformations $f$ . Only the inverse mappings $g$ – which might be simple and easy to compute – is needed to map the signals at boundaries. This brings flexibility and efficiency to learning complex or even test-time inaccessible transformations. In addition, Algorithm 1 includes a \"noise-resampling step\" for each stage boundary, which is the reverse process for $\\zeta(\\epsilon)$ in Equation 6. While $\\zeta$ is deterministic, the reverse process needs additional randomness. For instance, if $\\zeta$ drops elements in the forward process, then the reverse step should inject standard Gaussian noise back to the dropped locations. Because we assume $M_0 \\geq \\ldots \\geq M_K$ , we propose to sample a full-size noise $\\epsilon_{\\mathrm{full}}$ before generation, and gradually adding subsets of $\\epsilon_{\\mathrm{full}}$ to each stage. Thus, $\\epsilon_{\\mathrm{full}}$ encodes multi-scale information similar to RealNVP (Dinh et al., 2016).", + "bbox": [ + 169, + 561, + 825, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Conditional Generation Given an unconditional $f$ -DM, we can do conditional generation by replacing the denoised output $\\pmb{x}_{\\theta}$ with any condition $\\pmb{x}_c$ at a suitable time $(T)$ , and starting diffusion from $T$ . For example, suppose $\\pmb{f}$ is downsample, and $\\pmb{x}_c$ is a low-resolution image, $f$ -DM enables super-resolution (SR) without additional training. To achieve that, it is critical to initialize $\\pmb{z}_T$ , which implicitly asks $z_{T} \\approx \\alpha_{T}\\pmb{x}_{c} + \\sigma_{T}\\pmb{\\epsilon}_{\\theta}(\\pmb{z}_{T})$ . In practice, we choose $T$ to be the corresponding stage boundary, and initialize $\\pmb{z}$ by adding random noise $\\sigma_T\\pmb{\\epsilon}$ to $\\alpha_{T}\\pmb{x}_{c}$ . A gradient-based method is used to iteratively update $z_{T} \\gets z_{T} - \\lambda \\nabla_{z_{T}}\\| \\pmb{x}_{\\theta}(z_{T}) - \\pmb{x}_{c}\\|_{2}^{2}$ for a few steps before the diffusion starts.", + "bbox": [ + 169, + 723, + 823, + 823 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 APPLICATIONS ON VARIOUS TRANSFORMATIONS", + "text_level": 1, + "bbox": [ + 171, + 835, + 555, + 849 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the definition in Section 3.1, next we show $f$ -DM applied with different transformations. In this paper, we consider the following three categories of transformations.", + "bbox": [ + 169, + 862, + 823, + 892 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Downsampling. As the motivating example in Section 3.1, we let $\\pmb{f}$ a sequence of downsample operations that transforms a given image (e.g., $256^2$ ) progressively down to $16^2$ , where each $f_k(\\cdot)$", + "bbox": [ + 169, + 895, + 823, + 926 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "reduces the length by 2, and correspondingly $g_{k}(.)$ upsamples by 2. Thus, the generation starts from a low-resolution noise and progressively performs super-resolution. We denote the model as $f$ -DM-DS, where $d_{k} = 4$ , $\\gamma_{k} = 1$ in Equation 8 and $K = 4$ for $256^{2}$ images.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Blurring. $f$ -DM also supports general blur transformations. Unlike recent works (Rissanen et al., 2022; Hoogeboom & Salimans, 2022) that focuses on continuous-time blur (heat dissipation), Equation 4 can be seen as an instantiation of progressive blur function if we treat $\\hat{\\pmb{x}}^k$ as a blurred version of $\\pmb{x}^k$ . This design brings more flexibility in choosing any kind of blurring functions, and using the blurred versions as stages. In this paper, we experiment with two types of blurring functions. (1) $f$ -DM-Blur-U: utilizing the same downsample operators as $f$ -DM-DS, while always up-sampling the images back to the original sizes; (2) $f$ -DM-Blur-G: applying standard Gaussian blurring kernels following Rissanen et al. (2022). In both cases, we use $g_{k}(\\pmb{x}) = \\pmb{x}$ . As the dimension is not changed, no rescaling and noise resampling is required.", + "bbox": [ + 169, + 148, + 826, + 276 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Image $\\rightarrow$ Latent Trans. We further consider diffusion with learned non-linear transformations such as VAEs (see Figure 2 (b), $f$ : VAE encoder, $g$ : VAE decoder). By inverting such an encoding process, we are able to generate data from low-dimensional latent space similar to Rombach et al. (LDM, 2021). As a major difference, LDM operates only on the latent variables, while $f$ -DM learns diffusion in the latent and image spaces jointly. Because of this, our performance will not be bounded by the quality of the VAE decoder. In this paper, we consider VQVAE (Van Den Oord et al., 2017) together with its GAN variant (VQGAN, Esser et al., 2021). For both cases, we transform $256^2 \\times 3$ images into $32^2 \\times 4$ (i.e., $d_k = 48$ ) latent space. The VQVAE encoder/decoder is trained on ImageNet (Deng et al., 2009), and is frozen for the rest of the experiments. For $f$ -DM-VQGAN, we directly take the checkpoint provided by Rombach et al. (2021). Besides, we need to tune $\\gamma_k$ separately for each encoder due to the change in signal magnitude.", + "bbox": [ + 169, + 280, + 826, + 434 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 449, + 328, + 465 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 EXPERIMENTAL SETTINGS", + "text_level": 1, + "bbox": [ + 171, + 479, + 401, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. We evaluate $f$ -DMs on five commonly used benchmarks testing generation on a range of domains: FFHQ (Karras et al., 2019), AFHQ (Choi et al., 2020), LSUN Church & Bed (Yu et al., 2015), and ImageNet (Deng et al., 2009). All images are center-cropped and resized to $256 \\times 256$ .", + "bbox": [ + 169, + 506, + 826, + 549 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training Details. We implement the three types of transformations with the same architecture and hyper-parameters except for the stage-specific adapters. We adopt a lighter version of ADM (Dhariwal & Nichol, 2021) as the main U-Net architecture. For all experiments, we adopt the same training scheme using AdamW (Kingma & Ba, 2014) optimizer with a learning rate of $2\\mathrm{e} - 5$ and an EMA decay factor of 0.9999. We set the weight $\\omega_{t} = \\mathrm{sigmoid}(-\\log (\\alpha_{t}^{2} / \\sigma_{t}^{2}))$ following P2-weighting (Choi et al., 2022). The cosine noise schedule $\\alpha_{t} = \\cos (0.5\\pi t)$ is adopted for diffusion working in the $256^2\\times 3$ image space. As proposed in Equation 8, noise rescaling (VP by default) is applied for $f$ -DMs when the resolutions change. All our models are trained with batch-size 32 images for 500K (FFHQ, AFHQ, LSUN Church), 1.2M (LSUN Bed) and 2.5M (ImageNet) iterations, respectively.", + "bbox": [ + 169, + 554, + 826, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines & Evaluation. We compare $f$ -DMs against a standard DM (DDPM, Ho et al., 2020) on all five datasets. To ensure a fair comparison, we train DDPM following the same settings and continuous-time formulation as our approaches. We also include transformation-specific baselines: (1) we re-implement the cascaded DM (Cascaded, Ho et al., 2022a) to adapt $f$ -DM-DS setup from $16^{2}$ progressively to $256^{2}$ , where for each stage a separate DM is trained conditioned on the consecutive downsampled image; (2) we re-train a latent-diffusion model (LDM, Rombach et al., 2021) on the extracted latents from our pretrained VQVAE; (3) to compare with $f$ -DM-Blur-G, we include the scores and synthesised examples of IHDM (Rissanen et al., 2022). We set 250 timesteps $(\\Delta t = 0.004)$ for $f$ -DMs and the baselines with $\\eta = 1$ (Algorithm 1). We use Frechet Inception Distance (FID, Heusel et al., 2017) and Precision/Recall (PR, Kynkänniemi et al., 2019) as the measures of visual quality, based on 50K samples and the entire training set.", + "bbox": [ + 169, + 686, + 826, + 840 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 RESULTS", + "text_level": 1, + "bbox": [ + 171, + 856, + 279, + 871 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative Comparison To demonstrate the capability of handling various complex datasets, Figure 4 (↑) presents an uncurated set of images generated by $f$ -DM-DS. We show more samples from all types of $f$ -DMs in the Appendix E.4. We also show a comparison between $f$ -DMs and the", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5e41dbe2f3531f29dff630459a8e64a18dadcdc494c54143f08abb89f926491f.jpg", + "image_caption": [ + "Figure 4: $\\uparrow$ Random samples from $f$ -DM-DS trained on various datasets; $\\downarrow$ Comparison of $f$ -DMs and the corresponding baselines under various transformations. Best viewed when zoomed in. All faces presented are synthesized by the models, and are not real identities." + ], + "image_footnote": [], + "bbox": [ + 173, + 102, + 823, + 452 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/15942fd9c2448822bd48ce6082824ac8f7e631e97daa8271f054badea12175fc.jpg", + "table_caption": [ + "Table 1: Quantitative comparisons on various datasets. The speed compared to DDPM is calculated with $\\mathrm{bsz} = 1$ on CPU. Best performing DMs are shown in bold." + ], + "table_footnote": [], + "table_body": "
ModelsFID↓P↑R↑FID↓P↑R↑SpeedModelsFID↓
FFHQ256 × 256AFHQ256 × 256LSUN-Church 256 × 256
DDPM10.80.760.539.30.740.51×1.0DDPM9.7
DDPM (1/2)16.80.740.4515.20.640.44×2.0f-DM-DS8.2
Cascaded49.00.400.0924.20.370.13-f-DM-VQVAE8.0
f-DM-DS10.80.740.506.40.810.48×2.1LSUN-Bed 256 × 256
IHDM64.9--43.4---DDPM8.0
f-DM-Blur-G11.70.730.516.90.760.49×1.0f-DM-DS6.9
f-DM-Blur-U10.40.740.527.00.770.53×1.0f-DM-VQVAE7.1
LDM48.00.310.0729.70.070.11×9.8ImageNet 256 × 256
LDM (GAN)*8.60.720.606.50.630.61×9.2DDPM10.9
f-DM-VQVAE12.70.770.478.90.760.40×1.7f-DM-DS8.2
f-DM-VQGAN11.70.740.515.60.760.53×1.7f-DM-VQVAE6.8
", + "bbox": [ + 173, + 565, + 821, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "basielines with various transformations on FFHQ (Figure 4 $\\downarrow$ ). Our methods consistently produce better visual results with more coherence and without noticeable artifacts.", + "bbox": [ + 169, + 776, + 823, + 803 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative Comparison. We measure the generation quality (FID and precision/recall) and relative inference speed of $f$ -DMs and the baselines in Table 1. Across all five datasets, $f$ -DMs consistently achieves similar or even better results for the DDPM baselines, while gaining near $\\times 2$ inference speed for $f$ -DM-\\{DS, VQVAE, VQGAN\\} due to the nature of transformations. As a comparison, having fewer timesteps (DDPM 1/2) greatly hurts the generation quality of DDPM. We also show comparisons with transformation-specific baselines on FFHQ & AFHQ.", + "bbox": [ + 169, + 805, + 823, + 888 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "v.s. Cascaded DMs. Although cascaded DMs have been shown effective in literature (Nichol & Dhariwal, 2021; Ho et al., 2022a), it is underexplored to apply cascades in a sequence of consecu", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c0268113526218eb4d1ab7d4048c62013f56033e94fba21231eab49243813ec0.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 200, + 99, + 540, + 349 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/adcaceb13055291b80aba2488030391bc33778f4a0c1e884be2f8ac059829c4b.jpg", + "image_caption": [ + "Figure 5: Random DDIM samples $(\\eta = 0)$ from (a) $f$ -DMs on AFHQ and LSUN-Church by given {downsampled, blurred, latent} images as conditions; (b) $f$ -DM-VQVAE by interpolating the initial noise of the latent stage; (c) $f$ -DM-DS starting from the same initial noise of the $16 \\times 16$ stage. For (c), we also show the \"mean image\" of 300 random samples using the same initial noise." + ], + "image_footnote": [], + "bbox": [ + 545, + 99, + 818, + 340 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tive resolutions $(16\\to 32\\to 64\\to \\ldots)$ like ours. In such cases, the prediction errors get easily accumulated during the generation, yielding serious artifacts in the final resolution. To ease this, Cascaded DM (Ho et al., 2022a) proposed to apply \"noise conditioning augmentation\" which reduced the domain gap between stages by adding random noise to the input condition. However, it is not straightforward to tune the noise level for both training and inference time. By contrast, $f$ -DM is by-design non-cascaded, and there are no domain gaps between stages. That is, we can train our model end-to-end without worrying the additional tuning parameters and achieve stable results.", + "bbox": [ + 169, + 436, + 823, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "v.s. LDMs. We show comparisons with LDMs (Rombach et al., 2021) in Table 1. LDMs generate more efficiently as the diffusion only happens in the latent space. However, the generation is heavily biased by the behavior of the fixed decoder. For instance, it is challenging for VQVAE decoders to synthesize sharp images, which causes low scores in Table 1. However, LDM with VQGAN decoders is able to generate sharp details, which are typically favored by InceptionV3 (Szegedy et al., 2016) used in FID and PR. Therefore, despite having artifacts (see Figure 4, below, rightmost) in the output, LDMs (GAN) still obtain good scores. In contrast, $f$ -DM, as a pure DM, naturally bridges the latent and image spaces, where the generation is not restricted by the decoder.", + "bbox": [ + 169, + 542, + 823, + 654 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "v.s. Blurring DMs. Table 1 compares with a recently proposed blurring-based method (IHDM, Rissanen et al., 2022). Different from our approach, IHDM formulates a fully deterministic forward process. We conjecture the lack of randomness is the cause of their poor generation quality. Instead, $f$ -DM proposes a natural way of incorporating blurring with stochastic noise, yielding better quantitative and qualitative results.", + "bbox": [ + 169, + 660, + 823, + 731 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Conditional Generation. In Figure 5(a), we demonstrate the example of using pre-trained $f$ -DMs to perform conditional generation based on learned transformations. We downsample and blur the sampled real images, and start the reverse diffusion following Section 3.1 with $f$ -DM-DS and -Blur-U, respectively. Despite the difference in fine details, both our models faithfully generate high-fidelity outputs close to the real images. The same algorithm is applied to the extracted latent representations. Compared with the original VQVAE output, $f$ -DM-VQVAE is able to obtain better reconstruction. We provide additional conditional generation samples with the ablation of the \"gradient-based\" initialization method in Appendix E.3.", + "bbox": [ + 169, + 737, + 823, + 849 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Latent Space Manipulation To demonstrate $f$ -DMs have learned certain abstract representations by modeling with signal transformation, we show results of latent manipulation in Figure 5. Here we assume DDIM sampling ( $\\eta = 0$ ), and the only stochasticity comes from the initially sampled noise $\\epsilon_{\\mathrm{full}}$ . In (b), we obtain a semantically smooth transition between two cat faces when linearly", + "bbox": [ + 169, + 868, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "interpolating the low-resolution noises; on the other hand, we show samples of the same identity with different fine details (e.g., expression, poses) in (c), which is achieved easily by sampling $f$ -DM-DS with the low-resolution ( $16^2$ ) noise fixed. This implies that $f$ -DM is able to allocate high-level and fine-grained information in different stages via learning with downsampling.", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 ABLATION STUDIES", + "text_level": 1, + "bbox": [ + 171, + 176, + 354, + 191 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/7b1ccab97c00916b6e7ab5b81330cec3bcaada51ba9c8cd293670d2866009c5e.jpg", + "table_caption": [ + "Table 2: Ablation of design choices for $f$ -DMs trained on FFHQ. All faces are not real identities." + ], + "table_footnote": [], + "table_body": "
ModelEq. 4RescaleStagesFID↓P↑R↑
f-DM-DSNoVPcosine26.50.700.25
YesNocosine14.50.730.43
YesSPcosine12.10.750.47
YesVPlinear13.50.730.46
YesVPcosine10.80.740.50
f-DM-VQVAEYesNolinear24.00.790.29
YesVPcosine13.80.780.45
YesVPlinear12.70.770.47
", + "bbox": [ + 174, + 229, + 586, + 366 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/108e920d64674ebe358ca51519c5e49d60d13c0091102d80f6359f3ce32c520b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 598, + 237, + 807, + 292 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0f44605785ca91807fbb0f0ea30d3ee8a61444f6b037f50e5d91a66484fe0c16.jpg", + "image_caption": [ + "(a) without interpolation (Eq.4)", + "(b) with interpolation (Eq.4)" + ], + "image_footnote": [], + "bbox": [ + 598, + 301, + 807, + 357 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 2 presents the ablation of the key design choices. As expected, the interpolation formulation (Equation 4) effectively bridges the information gap between stages, without which the prediction errors get accumulated, resulting in blurry outputs and bad scores. Table 2 also demonstrates the importance of applying correct scaling. For both models, rescaling improves the FID and recall by large margins, where SP works slightly worse than VP. In addition, we also empirically explore the difference of stage schedules. Compared to VAE-based models, we usually have more stages in DS/Blur-based models to generate high-resolution images. The cosine schedule helps diffusion move faster in regions with low information density (e.g., low-resolution, heavily blurred).", + "bbox": [ + 169, + 386, + 823, + 500 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 518, + 346, + 534 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Progressive Generation with DMs. Conventional DMs generate images in the same resolutions. Therefore, existing work generally adopt cascaded approaches (Nichol & Dhariwal, 2021; Ho et al., 2022a; Sahara et al., 2022a) that chains a series of conditional DMs to generate coarse-to-fine, and have been used in super-resolution (SR3, Sahara et al., 2022b). However, cascaded models tend to suffer error propagation problems. More recently, Ryu & Ye (2022) dropped the need of conditioning, and proposed to generate images in a pyramidal fashion with additional reconstruction guidance; Jing et al. (2022) explored learning subspace DMs and connecting the full space with Langevin dynamics. By contrast, the proposed $f$ -DM is distinct from all the above types, which only requires one diffusion process, and the images get naturally up-sampled through reverse diffusion.", + "bbox": [ + 169, + 549, + 823, + 676 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Blurring DMs. Several concurrent research (Rissanen et al., 2022; Daras et al., 2022; Lee et al., 2022) have recently looked into DM alternatives to combine blurring into diffusion process, some of which also showed the possibility of deterministic generation (Bansal et al., 2022). Although sharing similarities, our work starts from a different view based on signal transformation. Furthermore, our empirical results also show that stochasticity plays a critical role in high-quality generation.", + "bbox": [ + 169, + 681, + 823, + 753 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Latent Space DMs. Existing work also investigated combining DMs with standard latent variable models. To our best knowledge, most of these works adopt DMs for learning the prior of latent space, where sampling is followed by a pre-trained (Rombach et al., 2021) or jointly optimized (Vahdat et al., 2021) decoder. Conversely, $f$ -DM does not rely on the quality decoder.", + "bbox": [ + 169, + 758, + 823, + 816 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 835, + 318, + 851 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We proposed $f$ -DM, a generalized family of diffusion models that enables generation with signal transformations. As a demonstration, we apply $f$ -DM to image generation tasks with a range of transformations, including downsampling, blurring and VAEs, where $f$ -DMs outperform the baselines in terms of synthesis quality and semantic interpretation.", + "bbox": [ + 169, + 867, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ETHICS STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 102, + 346, + 118 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Our work focuses on technical development, i.e., synthesizing high-quality images with a range of signal transformations (e.g., downsampling, blurring). Our approach has various applications, such as movie post-production, gaming, helping artists reduce workload, and generating synthetic data as training data for other computer vision tasks. Our approach can be used to synthesize human-related images (e.g., faces), and it is not biased towards any specific gender, race, region, or social class.", + "bbox": [ + 169, + 133, + 826, + 204 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "However, the ability of generative models, including our approach, to generate high-quality images that are indistinguishable from real images, raises concerns about the misuse of these methods, e.g., generating fake images. To resolve these concerns, we need to mark all the generated results as \"synthetic\". In addition, we believe it is crucial to have authenticity assessment, such as fake image detection and identity verification, which will alleviate the potential for misuse. We hope our approach can be used to foster the development of technologies for authenticity assessment. Finally, we believe that creating a set of appropriate regulations and laws would significantly reduce the risks of misuse while bolstering positive effects on technology development.", + "bbox": [ + 169, + 210, + 826, + 323 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REPRODUCIBILITY STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 342, + 439, + 357 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We assure that all the results shown in the paper and supplemental materials can be reproduced. We believe we have provided enough implementation details in the paper and supplemental materials for the readers to reproduce the results.", + "bbox": [ + 169, + 373, + 825, + 417 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 436, + 287, + 450 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Arpit Bansal, Eitan Borgnia, Hong-Min Chu, Jie S Li, Hamid Kazemi, Furong Huang, Micah Goldblum, Jonas Geiping, and Tom Goldstein. Cold diffusion: Inverting arbitrary image transforms without noise. arXiv preprint arXiv:2208.09392, 2022.", + "Christopher M Bishop and Nasser M Nasrabadi. Pattern recognition and machine learning, volume 4. Springer, 2006.", + "Jooyoung Choi, Jungbeom Lee, Chaehun Shin, Sungwon Kim, Hyunwoo Kim, and Sungroh Yoon. Perception prioritized training of diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11472-11481, 2022.", + "Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8188-8197, 2020.", + "Giannis Daras, Maurizio Delbracio, Hossein Talebi, Alexandros G. Dimakis, and Peyman Milanfar. Soft diffusion: Score matching for general corruptions, 2022. URL https://arxiv.org/abs/2209.05442.", + "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255, 2009. doi: 10.1109/CVPR.2009.5206848.", + "Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021.", + "Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using real nvp. arXiv preprint arXiv:1605.08803, 2016.", + "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 12873-12883, 2021.", + "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances" + ], + "bbox": [ + 173, + 458, + 826, + 925 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "in Neural Information Processing Systems, volume 27, pp. 2672-2680. Curran Associates, Inc., 2014. URL https://proceedings.neurips.cc/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf.", + "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.", + "Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022.", + "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems, 33:6840-6851, 2020.", + "Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. *J. Mach. Learn. Res.*, 23: 47-1, 2022a.", + "Jonathan Ho, Tim Salimans, Alexey A Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. In ICLR Workshop on Deep Generative Models for Highly Structured Data, 2022b.", + "Emiel Hoogeboom and Tim Salimans. Blurring diffusion models, 2022. URL https://arxiv.org/abs/2209.05557.", + "Bowen Jing, Gabriele Corso, Renato Berlinghieri, and Tommi Jaakkola. Subspace diffusion generative models. arXiv preprint arXiv:2205.01490, 2022.", + "Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401-4410, 2019.", + "Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. arXiv preprint arXiv:2106.12423, 2021.", + "Diederik Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. Advances in neural information processing systems, 34:21696-21707, 2021.", + "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.", + "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.", + "Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems, 32, 2019.", + "Sangyun Lee, Hyungjin Chung, Jaehyeon Kim, and Jong Chul Ye. Progressive deblurring of diffusion models for coarse-to-fine image synthesis. arXiv preprint arXiv:2207.11192, 2022.", + "Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, pp. 8162-8171. PMLR, 2021.", + "Vadim Popov, Ivan Vovk, Vladimir Gogoryan, Tasnama Sadekova, and Mikhail Kudinov. Grads- tts: A diffusion probabilistic model for text-to-speech. In International Conference on Machine Learning, pp. 8599-8608. PMLR, 2021.", + "Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion autoencoders: Toward a meaningful and decodable representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10619-10629, 2022.", + "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + ], + "bbox": [ + 171, + 103, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ali Razavi, Aaron Van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. Advances in neural information processing systems, 32, 2019.", + "Severi Rissanen, Markus Heinonen, and Arno Solin. Generative modelling with inverse heat dissipation. arXiv preprint arXiv:2206.13397, 2022.", + "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models, 2021.", + "Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pp. 234-241. Springer, 2015.", + "Dohoon Ryu and Jong Chul Ye. Pyramidal denoising diffusion probabilistic models. arXiv preprint arXiv:2208.01864, 2022.", + "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022a.", + "Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image super-resolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022b.", + "Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512, 2022.", + "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015.", + "Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021a.", + "Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in Neural Information Processing Systems, 32, 2019.", + "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021b.", + "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2818-2826, 2016.", + "Arash Vahdat and Jan Kautz. Nvae: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020.", + "Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Neural Information Processing Systems (NeurIPS), 2021.", + "Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017.", + "Fisher Yu, Ari Seff, Yinda Zhang, Shuran Song, Thomas Funkhouser, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365, 2015." + ], + "bbox": [ + 171, + 102, + 825, + 847 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "APPENDIX", + "text_level": 1, + "bbox": [ + 171, + 101, + 282, + 118 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A DETAILED DERIVATION OF $f$ -DMS", + "text_level": 1, + "bbox": [ + 171, + 137, + 501, + 155 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.1 $q(\\mathbf{z}_t|\\mathbf{z}_s,\\mathbf{x})$", + "text_level": 1, + "bbox": [ + 171, + 170, + 290, + 186 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We derive the definition in Equation 5 with the change-of-variable trick given the fact that $\\pmb{x}_t, \\pmb{x}_s$ and $\\pmb{x}^k$ are all deterministic functions of $\\pmb{x}$ .", + "bbox": [ + 169, + 196, + 823, + 224 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "More precisely, suppose $\\boldsymbol{z}_t \\sim \\mathcal{N}(\\alpha_t \\boldsymbol{x}_t, \\sigma_t^2 I), \\boldsymbol{z}_s \\sim \\mathcal{N}(\\alpha_s \\boldsymbol{x}_s, \\sigma_s^2 I)$ , where $\\tau_k \\leq s < t < \\tau_{k+1}$ . Thus, it is equivalent to have $\\boldsymbol{u}_t \\sim \\mathcal{N}(\\alpha_t \\boldsymbol{x}^k, \\sigma_t^2 I), \\boldsymbol{u}_s \\sim \\mathcal{N}(\\alpha_s \\boldsymbol{x}^k, \\sigma_s^2 I), \\boldsymbol{u}_t = \\boldsymbol{z}_t - \\alpha_t (\\boldsymbol{x}_t - \\boldsymbol{x}^k), \\boldsymbol{u}_s = \\boldsymbol{z}_s - \\alpha_s (\\boldsymbol{x}_s - \\boldsymbol{x}^k)$ . From the above definition, it is reasonable to assume $\\boldsymbol{u}_t, \\boldsymbol{u}_s$ follow the standard DM transitionm which means that:", + "bbox": [ + 169, + 232, + 825, + 289 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\boldsymbol {u} _ {t} = \\alpha_ {t | s} \\boldsymbol {u} _ {s} + \\sigma_ {t | s} \\epsilon , \\epsilon \\sim \\mathcal {N} (\\boldsymbol {0}, I) \\\\ \\Rightarrow \\boldsymbol {z} _ {t} - \\alpha_ {t} \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} ^ {k}\\right) = \\alpha_ {t | s} \\left(\\boldsymbol {z} _ {s} - \\alpha_ {s} \\left(\\boldsymbol {x} _ {s} - \\boldsymbol {x} ^ {k}\\right)\\right) + \\sigma_ {t | s} \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\mathbf {0}, I) \\\\ \\Rightarrow \\quad \\boldsymbol {z} _ {t} = \\alpha_ {t | s} \\boldsymbol {z} _ {s} + \\alpha_ {t} \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} _ {s}\\right) + \\sigma_ {t | s} \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\boldsymbol {0}, I) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 258, + 297, + 738, + 353 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As typically $\\pmb{x}_t \\neq \\pmb{x}_s$ and both $\\pmb{x}_t, \\pmb{x}_s$ are the functions of $\\pmb{x}^k$ . Then $z_t$ is dependent on both $\\pmb{z}_s$ and $\\pmb{x}^k = f_{0:k}(\\pmb{x})$ , resulting in a non-Markovian transition:", + "bbox": [ + 169, + 364, + 823, + 393 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nq (\\pmb {z} _ {t} | \\pmb {z} _ {s}, \\pmb {x}) = \\mathcal {N} (\\pmb {z} _ {t}; \\alpha_ {t | s} \\pmb {z} _ {s} + \\alpha_ {t} \\cdot (\\pmb {x} _ {t} - \\pmb {x} _ {s}), \\sigma_ {t | s} ^ {2} I),\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 402, + 676, + 422 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Note that, this equation stands only when $\\boldsymbol{x}_t, \\boldsymbol{x}_s$ and $\\boldsymbol{x}_k$ in the same space, and we did not make specific assumptions to the form of $\\boldsymbol{x}_t$ .", + "bbox": [ + 169, + 431, + 823, + 460 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.2 $q(\\mathbf{z}_s|\\mathbf{z}_t,\\mathbf{x})$", + "text_level": 1, + "bbox": [ + 171, + 479, + 290, + 496 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The reverse diffusion distribution follows the Bayes' Theorem: $q(\\pmb{z}_s|\\pmb{z}_t, \\pmb{x}) \\propto q(\\pmb{z}_s|\\pmb{x})q(\\pmb{z}_t|\\pmb{z}_s, \\pmb{x})$ where both $q(\\pmb{z}_s|\\pmb{x})$ and $q(\\pmb{z}_t|\\pmb{z}_s, \\pmb{x})$ are Gaussian distributions with general forms of $\\mathcal{N}(\\pmb{z}_s|\\pmb{\\mu}, \\sigma^2 I)$ and $\\mathcal{N}(\\pmb{z}_t|A\\pmb{z}_s + \\pmb{b}, \\sigma'^2 I)$ , respectively. Based on Bishop & Nasrabadi (2006) (2.116), we can derive:", + "bbox": [ + 169, + 507, + 823, + 551 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(\\boldsymbol {z} _ {s} \\mid \\boldsymbol {z} _ {t}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {s} \\mid \\bar {\\sigma} ^ {- 2} \\left(\\sigma^ {\\prime - 2} A ^ {\\top} \\left(\\boldsymbol {z} _ {t} - \\boldsymbol {b}\\right) + \\sigma^ {- 2} \\boldsymbol {\\mu}\\right), \\bar {\\sigma} ^ {2} I\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 560, + 699, + 579 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $\\bar{\\sigma}^2 = (\\sigma^{-2} + \\sigma'^{-2}\\| A\\| ^2)^{-1}$ . Therefore, we can get the exact form by plugging our variables $\\pmb {\\mu} = \\alpha_{s}\\hat{\\pmb{x}}_{k}^{s}$ , $\\sigma = \\sigma_s$ , $A = \\alpha_{t|s}I$ , $\\pmb {b} = \\alpha_{t}\\cdot (\\pmb {x}_{t} - \\pmb {x}_{s})$ , $\\sigma^{\\prime} = \\sigma_{t|s}$ into above equation, we get:", + "bbox": [ + 169, + 588, + 823, + 619 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nq (\\pmb {z} _ {s} | \\pmb {z} _ {t}, \\pmb {x}) = \\mathcal {N} (\\pmb {z} _ {s} | \\alpha_ {s} \\pmb {x} _ {s} + \\sqrt {\\sigma_ {s} ^ {2} - \\bar {\\sigma} ^ {2}} \\pmb {\\epsilon} _ {t}, \\bar {\\sigma} ^ {2} I),\n$$\n", + "text_format": "latex", + "bbox": [ + 333, + 630, + 661, + 648 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $\\epsilon_{t} = (z_{t} - \\alpha_{t}\\pmb{x}_{t}) / \\sigma_{t}$ and $\\bar{\\sigma} = \\sigma_s\\sigma_{t|s} / \\sigma_t$", + "bbox": [ + 171, + 657, + 491, + 674 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Alternatively, if we assume $\\boldsymbol{x}_t$ take the interpolation formulation in Equation 4, we can also re-write $\\boldsymbol{x}_s$ with $\\boldsymbol{x}_t + \\frac{t - s}{t - \\tau_k} \\delta_t$ , where we define a new variable $\\delta_t = \\boldsymbol{x}^k - \\boldsymbol{x}_t$ . As stated in the main context (Section 3.1), such change makes $q(\\boldsymbol{z}_t | \\boldsymbol{z}_s, \\boldsymbol{x})$ avoid computing $\\boldsymbol{x}_s$ which may be potentially costly. In this way, we re-write the above equation as follows:", + "bbox": [ + 169, + 679, + 825, + 738 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(\\boldsymbol {z} _ {s} \\mid \\boldsymbol {z} _ {t}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {s} \\mid , \\alpha_ {s} \\left(\\boldsymbol {x} _ {t} + \\boldsymbol {\\delta} _ {t} \\cdot (t - s) / (t - \\tau_ {k})\\right) + \\sqrt {\\sigma_ {s} ^ {2} - \\bar {\\sigma} ^ {2}} \\boldsymbol {\\epsilon} _ {t}, \\bar {\\sigma} ^ {2} I\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 750, + 823, + 767 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.3 DIFFUSION INSIDE STAGES", + "text_level": 1, + "bbox": [ + 171, + 785, + 403, + 799 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In the inference time, we generate data by iteratively sampling from the conditional distribution $p(\\pmb{z}_s|\\pmb{z}_t) = \\mathbb{E}_{\\pmb{x}}[q(\\pmb{z}_s|\\pmb{z}_t,\\pmb{x})]$ based on Equation 10. In practice, the expectation over $\\pmb{x}$ is approximated by our model's prediction. As shown in Equation 9, in this work, we propose a \"double-prediction\" network $\\theta$ that reads $\\pmb{z}_t$ , and simultaneously predicts $\\pmb{x}_t$ and $\\delta_t$ with $\\pmb{x}_{\\theta}$ and $\\delta_{\\theta}$ , respectively. The predicted Gaussian noise is denoted as $\\epsilon_{\\theta} = (z_{t} - \\alpha_{t}\\pmb{x}_{\\theta}) / \\sigma_{t}$ . Note that the prediction $x_{\\theta}$ and $\\epsilon_{\\theta}$ are interchangeable, which means that we can readily derive one from the other's prediction. Therefore, by replacing $x_{t}, \\delta_{t}, \\epsilon_{t}$ , with $x_{\\theta}, \\delta_{\\theta}, \\epsilon_{\\theta}$ in Equation 10, we obtain the sampling algorithm shown in Algorithm 1: Line 6.", + "bbox": [ + 169, + 811, + 825, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/278a4753546ab0c582a6c3bc7e663c2edfe759f474771a8dd263a6022b31bae4.jpg", + "image_caption": [ + "Figure 7: Illustration of noise schedule $(\\alpha_{t}$ and $\\sigma_{t})$ for $f$ -DM-DS models with 5 stages $(16^{2} \\rightarrow 256^{2})$ . We use the standard cosine noise schedule $\\alpha_{t} = \\cos(0.5\\pi t)$ . We also show the difference between the linear/cosine stage schedule, as well as the proposed SP/VP re-scaling methods." + ], + "image_footnote": [], + "bbox": [ + 176, + 102, + 491, + 253 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/9fcdfe29e54fa2f7efc299a2869440a676ddb2bb7c489a706a023ea92d7221d5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 101, + 821, + 252 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.4 NOISE AT BOUNDARIES", + "text_level": 1, + "bbox": [ + 171, + 319, + 380, + 330 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, the overall principle is to handle the transition across stage boundary is to ensure the forward diffusion to be deterministic and smooth, therefore almost no information is lost during the stage change. Such requirement is important as it directly correlated to the denoising performance. Failing to recover the lost information will directly affect the diversity of the model generates.", + "bbox": [ + 169, + 344, + 823, + 402 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Forward diffusion As described in Section 3.1, since we have the control of the signal and the noise separately, we can directly apply the deterministic transformation on the signal, and dropping noise elements.", + "bbox": [ + 169, + 417, + 485, + 487 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/1bf6ca480a5a681325b6c015f5959592f25a3a931f42882d723a3402bf14c78e.jpg", + "image_caption": [ + "Figure 6: Two naive ways for down-sampling." + ], + "image_footnote": [], + "bbox": [ + 500, + 422, + 818, + 479 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Alternatively, we also implemented a different $\\zeta (\\epsilon)$ based on averaging. As shown in Figure 6,", + "bbox": [ + 171, + 493, + 485, + 522 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "if the transformation is down-sampling, we can use the fact that the mean of Gaussian noises is still Gaussian with lower variance: $(\\epsilon_0 + \\epsilon_1 + \\epsilon_2 + \\epsilon_3) / 4 \\sim \\mathcal{N}(0, \\frac{1}{4} I)$ . Therefore, $\\times 2$ rescaling is needed on the resulted noise.", + "bbox": [ + 169, + 522, + 823, + 563 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Reverse diffusion Similarly, we can also define the reverse process if $\\zeta$ is chosen to be averaging. Different from \"dropping\" where the reverse process is simply adding independent Gaussian noises, the reverse of \"averaging\" requests to sample $\\sum_{i=0}^{3} \\epsilon_i = 2\\epsilon$ given the input noise $\\epsilon$ , while having $p(\\epsilon_i) = \\mathcal{N}(0, I)$ , $i = 0,1,2,3$ . Such problem has a closed solution and can be implemented in an autoregressive fashion:", + "bbox": [ + 169, + 580, + 823, + 652 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} a = 2 \\epsilon ; \\\\ \\boldsymbol {\\epsilon} _ {0} = \\boldsymbol {a} / 4 + \\sqrt {3 / 4} \\cdot \\hat {\\epsilon} _ {1}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {0}, \\hat {\\epsilon} _ {1} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\boldsymbol {\\epsilon} _ {1} = \\boldsymbol {a} / 3 + \\sqrt {2 / 3} \\cdot \\hat {\\epsilon} _ {2}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {1}, \\hat {\\epsilon} _ {2} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\boldsymbol {\\epsilon} _ {2} = \\boldsymbol {a} / 2 + \\sqrt {1 / 2} \\cdot \\hat {\\epsilon} _ {3}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {2}, \\hat {\\epsilon} _ {3} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\epsilon_ {3} = a \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 662, + 671, + 757 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Similar to the case of \"dropping\", we also need 3 additional samples $\\hat{\\epsilon}_{1:3}$ to contribute to four noises, therefore it can be implemented in the same way as described in Section 3.1. Empirically, reversing the \"averaging\" steps tends to produce samples with better FID scores. However, since it introduces correlations into the added noise, which may cause undesired biases especially in DDIM sampling.", + "bbox": [ + 169, + 767, + 823, + 824 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Intuition behind Re-scaling Here we present a simple justification of applying noise rescaling. Suppose the signal dimensionality changes from $M_{k-1}$ to $M_k$ when crossing the stage boundary, and such change is caused by different sampling rates. Based the proposed resolution-agnostic SNR (Equation 7), the number of sampled points inside $\\Omega$ is proportional to its dimensionality. Generally, it is safe to assume signals are mostly low-frequency. Therefore, averaging signals will not change its variance. By contrast, as shown above, averaging Gaussian noises results in lower variance, where", + "bbox": [ + 169, + 839, + 825, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a8f0bd125ff68052e5cdfb1ab440453e4f28e9f5687c63a07a38de767a8c7aed.jpg", + "image_caption": [ + "Figure 8: We show the comparison of the DDIM sampling." + ], + "image_footnote": [], + "bbox": [ + 181, + 102, + 823, + 277 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "in our case, the variance is proportional to $M^{-1}$ . Therefore, suppose the signal magnitude does not change, we can get the re-scaling low by forcing $\\mathrm{SNR}(z_{\\tau}) = \\mathrm{SNR}(z_{\\tau^{-}})$ at the stage boundary:", + "bbox": [ + 169, + 311, + 826, + 342 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\sigma_ {\\tau^ {-}} ^ {2} \\cdot M _ {k - 1} ^ {- 1} = \\sigma_ {\\tau} ^ {2} \\cdot M _ {k} ^ {- 1},\n$$\n", + "text_format": "latex", + "bbox": [ + 408, + 349, + 586, + 368 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "which derives the signal preserving (SP) rescaling in Equation 8. In Figure 7, we show an example of the change of $\\alpha$ and $\\sigma$ with and without applying the re-scaling technique for $f$ -DM-DS models.", + "bbox": [ + 169, + 375, + 823, + 405 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.5 DDIM SAMPLING", + "text_level": 1, + "bbox": [ + 171, + 422, + 344, + 435 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The above derivations only describe the standard ancestral sampling $(\\eta = 1)$ where $q(\\pmb{z}_s|\\pmb{z}_t,\\pmb{x})$ is determined by Bayes' Theorem. Optionally, one can arbitrarily define any proper reverse diffusion distribution as long as the marginal distributions match the definition. For example, $f$ -DM can also perform deterministic DDIM (Song et al., 2021a) by setting $\\eta = 0$ in Algorithm 1. Similar to Song et al. (2021a), we can also obtain the proof based on the induction argument.", + "bbox": [ + 169, + 449, + 823, + 521 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 8 shows the comparison of DDIM sampling between the standard DMs and the proposed $f$ -DM. In DDIM sampling $(\\eta = 0)$ , the only randomness comes from the initial noise at $t = 1$ . Due to the proposed noise resampling technique, $f$ -DM enables a multi-scale noisng process where the sampled noises are split and sent to different steps of the diffusion process. In this case, compared to standard DMs, we gain the ability of controlling image generation at different levels, resulting in smooth semantic interpretation.", + "bbox": [ + 169, + 526, + 826, + 611 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B DETAILED INFORMATION OF TRANSFORMATIONS", + "text_level": 1, + "bbox": [ + 171, + 632, + 622, + 647 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We show the difference of all the transformations used in this paper in Figure 9.", + "bbox": [ + 171, + 664, + 696, + 680 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1 DOWNSAMPLING", + "text_level": 1, + "bbox": [ + 171, + 698, + 336, + 710 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In early development of this work, we explored various combinations of performing down-sampling: $\\pmb{f} = \\{\\text{bilinear, nearest, Gaussian blur + subsample}\\}$ , $\\pmb{g} = \\{\\text{bilinear, bicubic, nearest, neural-based}\\}$ . While all these combinations produced similar results, we empirically on FFHQ found that both choosing bilinear interpolation for both $\\pmb{f}, \\pmb{g}$ achieves most stable results. Therefore, all the main experiments of $f$ -DM-DS are conducted on bilinear interpolation. As discussed in Section 3.2, we choose $K = 4$ , which progressively downsample a $256^2$ into $16^2$ .", + "bbox": [ + 169, + 724, + 823, + 809 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.2 BLURRING", + "text_level": 1, + "bbox": [ + 171, + 827, + 294, + 840 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We experimented two types of blurring functions. For upsampling-based blurring, we use the same number of stages as the downsampling case; for Gaussian-based blurring, we adopt $K = 7$ with corresponding kernel sizes $\\sigma_{B} = 15\\sin^{2}\\left(\\frac{\\pi}{2}\\tau_{k}\\right)$ , where $\\tau_{k}$ follows the cosine stage schedule. In practice, we implement blurring function in frequency domain following Rissanen et al. (2022) based on discrete cosine transform (DCT).", + "bbox": [ + 169, + 854, + 823, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/5b3e560c3c5c56b6d3ab340c98388c262222bacc08d97b15c2fbefad85bc6dcd.jpg", + "image_caption": [ + "Downsample" + ], + "image_footnote": [], + "bbox": [ + 184, + 108, + 580, + 170 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5e2992e5e311e4096de6ec3eec040c148bcf6967de1834cdae740335a091ea4d.jpg", + "image_caption": [ + "VQ-VAE" + ], + "image_footnote": [], + "bbox": [ + 651, + 108, + 810, + 170 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/6a72be007beec1c6ed8108dcaa9f9015e80c6cdb2a3f6af037ea87db15c2d9ef.jpg", + "image_caption": [ + "Updown Blur" + ], + "image_footnote": [], + "bbox": [ + 184, + 183, + 578, + 244 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7896088996ebbadbd3bfaec61292b7fa531e9bc1ecf2647c43207881183dd663.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 183, + 810, + 244 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/26a5447a6af5b30545b351421b7271a5a73e4a3e35dccf47b3bf810bde83a77f.jpg", + "image_caption": [ + "VQ-GAN", + "Gaussian Blur", + "Figure 9: We show examples of the five transformations (downsample, blur, VAEs) used in this paper. For downsampling, we resize the image with nearest upsampler; for VQ-VAE/VQ-GAN, we visualize the first 3 channels of the latent feature maps." + ], + "image_footnote": [], + "bbox": [ + 184, + 258, + 810, + 320 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.3 VAES", + "text_level": 1, + "bbox": [ + 171, + 397, + 261, + 411 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In this paper, we only consider vector quantized (VQ) models with single layer latent space, while our methods can be readily applied to hierarchical (Razavi et al., 2019) and KL-regularized VAE models (Vahdat & Kautz, 2020). Following Rombach et al. (2021), we take the feature vectors before the quantization layers as the latent space, and keep the quantization step in the decoder $(g)$ when training diffusion models.", + "bbox": [ + 169, + 424, + 823, + 494 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We follow an open-sourced implementation $^2$ to train our VQVAE model on ImageNet. The model consists of two strided convolution blocks which by default downsamples the input image by a factor of 8. We use the default hyper-parameters and train the model for 50 epochs with a batch-size of 128. For a fair comparison to match the latent size of VQVAE, we use the pre-trained autoencoding model (Rombach et al., 2021) with the setting of $\\{f = 8, \\mathrm{VQ}(\\mathrm{Z} = 256, \\mathrm{d} = 4)\\}$ . We directly use the checkpoint $^3$ provided by the authors. Note that the above setting is not the best performing model (LDM-4) in the original paper. Therefore, it generates more artifacts when reconstructing images from the latents.", + "bbox": [ + 169, + 500, + 823, + 611 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Before training, we compute the signal magnitude ratio $\\gamma_{k}$ (Equation 8) over the entire training set of FFHQ, where we empirically set $\\gamma_{k} = 2.77$ for VQ-GAN and $\\gamma_{k} = 2.0$ for VQ-VAE, respectively.", + "bbox": [ + 169, + 619, + 826, + 648 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C DATASET DETAILS", + "text_level": 1, + "bbox": [ + 171, + 667, + 366, + 684 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "FFHQ (https://github.com/NVlabs/ffhq-dataset) contains 70k images of real human faces in resolution of $1024^2$ . For most of our experiments, we resize the images to $256^2$ .", + "bbox": [ + 169, + 699, + 823, + 729 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "AFHQ (https://github.com/clovaai/stargan-v2# animal-faces-hq-dataset-afhq) contains 15k images of animal faces including cat, dog and wild three categories in resolution of $512^{2}$ . We train conditional diffusion models by merging all training images with the label information. All images are resized to $256^{2}$ .", + "bbox": [ + 169, + 744, + 823, + 800 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "LSUN (https://www.yf.io/p/1sun) is a collection of large-scale image dataset containing 10 scenes and 20 object categories. Following previous works Rombach et al. (2021), we choose the two categories – Church (126k images) and Bed (3M images), and train separate unconditional models on them. As LSUN-Bed is relatively larger, we set the iterations longer than other datasets. All images are resized to $256^2$ with center-crop.", + "bbox": [ + 169, + 815, + 823, + 886 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://github.com/rosinality/vq-vae-2-pytorch", + "bbox": [ + 189, + 895, + 609, + 910 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "3https://ommer-lab.com/files/latent-diffusion/vq-f8-n256.zip", + "bbox": [ + 192, + 910, + 725, + 924 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/8ac2a2769616de0707aebf77761e09cbe085836873539f1e2371fec00392f9e2.jpg", + "image_caption": [ + "Figure 10: An illustration of the training pipeline." + ], + "image_footnote": [], + "bbox": [ + 183, + 104, + 816, + 260 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "ImageNet (https://image-net.org/download.php) we use the standard ImageNet-1K dataset which contains 1.28M images across 1000 classes. We directly merge all the training images with class-labels. All images are resized to $256^2$ with center-crop. For both $f$ -DM and the baseline models, we adopt the classifier-free guidance (Ho & Salimans, 2022) with the unconditional probability 0.2. In the inference time, we use the guidance scale $(s = 2)$ for computing FIDs, and $s = 3$ to synthesize examples for comparison.", + "bbox": [ + 169, + 287, + 823, + 373 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D IMPLEMENTATION DETAILS", + "text_level": 1, + "bbox": [ + 171, + 404, + 439, + 420 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.1 ARCHITECTURE CONFIGURATIONS", + "text_level": 1, + "bbox": [ + 171, + 443, + 460, + 455 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We implement $f$ -DM strictly following standard U-Net architecture in Nichol & Dhariwal (2021). As shown in Figure 11, input $z_{t}$ will be directed to the corresponding inner layer based on spatial resolutions, and a stage-specific adapter is adopted to transform the channel dimension. Such architecture also allows memory-efficient batching across stages where we can create a batch with various resolutions, and split the computation based on the resolutions.", + "bbox": [ + 169, + 473, + 823, + 544 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.2 HYPER-PARAMETERS", + "text_level": 1, + "bbox": [ + 171, + 573, + 367, + 585 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In our experiments, we adopt the following two sets of parameters based on the complexity of the dataset: base (FFHQ, AFHQ, LSUN-Church/Bed) and big (ImageNet). For base, we use 1 residual block per resolution, with the basic dimension 128. For big, we use 2 residual blocks with the basic dimension 192. Given one dataset, all the models with various transformations including the baseline DMs share the same hyper-parameters except for the adapters. We list the hyperparameter details in Table 3.", + "bbox": [ + 169, + 603, + 823, + 686 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/35ce6b0f08c1c20838d3ae4decf7bb653da16fe579be8c8ea675ac7b06d88f62.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Hyper-param.FFHQAFHQLSUN-ChurchLSUN-BedImageNet
image res.25622562256225622562
# of classesNone3NoneNone1000
c.f. guidance-No--Yes
#channels128128128128192
#res-blocks11112
channel multi.[1,1,2,2,4,4]
attention res.16,8
batch size3232323264
lr2e-5
iterations500K500K500K1200K2500K
", + "bbox": [ + 250, + 708, + 746, + 877 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 3: Hyperparameters and settings for $f$ -DM on different datasets.", + "bbox": [ + 264, + 886, + 730, + 902 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/280bce376cb58a90c764d5be92a1d3087016a9c77530f61850439042834418fb.jpg", + "image_caption": [ + "Figure 11: An illustration of the modified U-Net architecture. Time conditioning is omitted. The parameters are partially shared across stages based on the resolutions. Stage-specific adapters are adopted to transform the input dimensions." + ], + "image_footnote": [], + "bbox": [ + 176, + 106, + 823, + 233 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/9bcf1a75278e8d2281965673b9c05dbad410dcb5b0b7d4f5f76e1fd3d5c19fd4.jpg", + "image_caption": [ + "Figure 12: Additional comparisons with Cascaded DM on AFHQ. $\\uparrow$ Comparison of the reverse diffusion process from $16^{2}$ to $256^{2}$ . We visualize the denoised outputs $(\\boldsymbol{x}_t)$ and the corresponding next noised input $(z_{s})$ near the start & end of each resolution diffusion. $\\downarrow$ Comparison of random samples generated by Cascaded DM and $f$ -DM-DS." + ], + "image_footnote": [], + "bbox": [ + 176, + 306, + 821, + 729 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E ADDITIONAL RESULTS", + "text_level": 1, + "bbox": [ + 171, + 804, + 398, + 819 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E.1 QUANTITATIVE COMPARISON WITH DDIM", + "text_level": 1, + "bbox": [ + 171, + 839, + 514, + 854 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We also include comparison of $f$ -DM with the standard DM using DDIM sampling ( $\\eta = 0$ ) in Table 4. Similar to the conclusion drawn from Table 1, the proposed $f$ -DM can achieve comparable or even better performance than baseline DM even with $\\eta = 0$ (generation only controlled by the initial noise, see Figure 8), while having better scores for DDIM with half generation steps.", + "bbox": [ + 169, + 867, + 825, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/7dcde01bb04d5d291ef70d6b3ae7e95bf484cb4f99ae3ed3804760fe2611a9e3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 104, + 493, + 358 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/4f5b223dfc3d6aff5adb4963b5f5094d8e3d2d008d7386ef0b2ac033d7d1942d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 103, + 818, + 358 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/cd734a886b73f3d4822b4bd31ee8e54ca4661a7fac6a569dfe5cc1d8681fbbd1.jpg", + "image_caption": [ + "Figure 13: Additional comparisons with LDMs on AFHQ." + ], + "image_footnote": [], + "bbox": [ + 181, + 361, + 491, + 616 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/6e100b42a0472267a4df7aa950b52b8d7c0dda9c722783999fb6aa35ba59a2e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 362, + 818, + 616 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/b6536d8e3fb036380a7f534155dde454f36eabde66e579996f12b0d52d8269df.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelsFID↓P↑R↑FID↓P↑R↑Speed
FFHQ256 × 256AFHQ256 × 256
DDIM11.40.710.5312.10.580.65×1.0
DDIM (1/2)13.00.700.5116.80.480.64×2.0
f-DM-DS (η = 0)12.60.760.555.80.760.55×2.1
", + "bbox": [ + 243, + 681, + 754, + 773 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 4: Comparison on FFHQ and AFHQ for DDIM sampling $\\left( {\\eta = 0}\\right)$", + "bbox": [ + 259, + 782, + 733, + 799 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E.2 V.S. TRANSFORMATION-SPECIFIC BASELINES", + "text_level": 1, + "bbox": [ + 171, + 806, + 532, + 819 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We include more comparisons in Figure 12 and 13. From Figure 12, we compare the generation process of $f$ -DM and the cascaded DM. It is clear that $f$ -DM conducts coarse-to-fine generation in a more natural way, and the results will not suffer from error propagation. As shown in Figure 13, LDM outputs are easily affected by the chosen decoder. VQVAE decoder tends output blurry images; the output from VQGAN decoder has much finer details while remaining noticeable artifacts (e.g., eyes, furs). By contrast, $f$ -DM performs stably for both latent spaces.", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E.3 CONDITIONAL GENERATION", + "text_level": 1, + "bbox": [ + 171, + 104, + 415, + 118 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We include additional results of conditional generation, i.e., super-resolution (Figure 14) and deblurring (Figure 15). We also show the comparison with or without the proposed gradient-based initialization, which greatly improves the faithfulness of conditional generation when the input noise is high (e.g., $16 \\times 16$ input).", + "bbox": [ + 169, + 128, + 826, + 186 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "E.4 ADDITIONAL QUALITATIVE RESULTS", + "text_level": 1, + "bbox": [ + 171, + 203, + 475, + 217 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Finally, we provide additional qualitative results for our unconditional models for FFHQ (Figure 16), AFHQ (Figure 17), LSUN (Figure 18) and our class-conditional ImageNet model (Figure 19,20).", + "bbox": [ + 169, + 228, + 823, + 258 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F LIMITATIONS AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 171, + 277, + 491, + 292 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Although $f$ -DM enables diffusion with signal transformations, which greatly extends the scope of DMs to work in transformed space, there still exist limitations and opportunities for future work. First, it is an empirical question to find the optimal stage schedule for all transformations. Our ablation studies also show that different heuristics have differences for DS-based and VAE-based models. A metric that can automatically determine the best stage schedule based on the property of each transformation is needed and will be explored in the future. In addition, although the current method achieves faster inference when generating with transformations like down-sampling, the speed-up is not very significant as we still take the standard DDPM steps. How to further accelerate the inference process of DMs is a challenging and orthogonal direction. For example, it has great potential to combine $f$ -DM with speed-up techniques such as knowledge distillation (Salimans & Ho, 2022). Moreover, no matter hand-designed or learned, all the transformations used in $f$ -DM are still fixed when training DM. It is, however, different from typical VAEs, where both the encoder and decoder are jointly optimized during training. Therefore, starting from a random/imperfect transformation and training $f$ -DM jointly with the transformations towards certain target objectives will be studied as future work.", + "bbox": [ + 169, + 308, + 826, + 517 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/527fa641e31bbc8d987dcf040cb6e87fb22700061f4ece617935f253b53f179b.jpg", + "image_caption": [ + "Figure 14: Additional examples of super-resolution (SR) with the unconditional $f$ -DM-DS trained on AFHQ. $\\uparrow$ The same input image with various resolution $16^2$ , $32^2$ , $64^2$ , $128^2$ . We sample 3 random seeds for each resolution input. We also show the difference with and without applying gradient-based initialization (Grad-Init) on $z$ . $\\downarrow$ SR results of various $16^2$ inputs." + ], + "image_footnote": [], + "bbox": [ + 176, + 165, + 823, + 792 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/b2a52f5d1274f1b3bbb4148ad6c1d86040a7f2c1de8bd4408a8d91ce3678ac38.jpg", + "image_caption": [ + "Figure 15: Additional examples of de-blurring with the unconditional $f$ -DM-Blur-G trained on AFHQ. $\\uparrow$ The same input image with various Gaussian kernel sizes $\\sigma = 15,9,4,1.4$ . We sample 3 random seeds for each resolution input. We also show the difference with and without applying gradient-based initialization (Grad-Init) on $z$ . $\\downarrow$ Deblurred results of various blur images." + ], + "image_footnote": [], + "bbox": [ + 176, + 165, + 816, + 789 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/7b376d50f3fbb17417c81b86e41119a6d81d9b2c1cdd37842a01ea4d30fd1394.jpg", + "image_caption": [ + "f-DM-DS" + ], + "image_footnote": [], + "bbox": [ + 178, + 162, + 820, + 287 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/d9603c0ceb1995fe685cbe619608de12afadf938bf77468eef8f3119d0568786.jpg", + "image_caption": [ + "f-DM-Blur-U", + "f-DM-Blur-G" + ], + "image_footnote": [], + "bbox": [ + 178, + 297, + 820, + 422 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/bd601770522a8f005064bd2ef2f7d076ead1b3f13a0c881165fba34b074d3daa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 433, + 820, + 558 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/baa4b41cfb27f40c4d0c44f604de40b5a0520d3d019d75be50efa7603692df7e.jpg", + "image_caption": [ + "f-DM-VQVAE" + ], + "image_footnote": [], + "bbox": [ + 178, + 568, + 820, + 691 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/98743f2baf134e47d814d66c0a0e89cc2e641289c6623c2a92ffd4a96da966ff.jpg", + "image_caption": [ + "f-DM-VQGAN", + "Figure 16: Random samples generated by five $f$ -DMs trained on FFHQ $256 \\times 256$ . All faces presented are synthesized by the models, and are not real identities." + ], + "image_footnote": [], + "bbox": [ + 178, + 702, + 820, + 828 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/9abce0b45bb07fb3f7a2dd430cfb737e45e15ec896e5fb4d56569fd92c866168.jpg", + "image_caption": [ + "f-DM-DS" + ], + "image_footnote": [], + "bbox": [ + 176, + 169, + 820, + 294 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/785682623407e89f8acb2175e5b4445c45e38121f30a5a9cc43f1c22f883f724.jpg", + "image_caption": [ + "f-DM-Blur-U" + ], + "image_footnote": [], + "bbox": [ + 178, + 304, + 820, + 429 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/8962e1725427272f00a91c85dc75d236f2af6440cb98a0f06f73bf4e8bec8d23.jpg", + "image_caption": [ + "f-DM-Blur-G", + "f-DM-VQVAE" + ], + "image_footnote": [], + "bbox": [ + 178, + 439, + 820, + 564 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/f14f00221059dde1432e1c6989addc27715d5a8ac19529cd8f21205e8393de6a.jpg", + "image_caption": [ + "f-DM-VOGAN" + ], + "image_footnote": [], + "bbox": [ + 178, + 575, + 820, + 700 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/82e10cb497232c26a1d260ad0e62184f671cdc0ad1cf3a8b0c3df48b5804fe9a.jpg", + "image_caption": [ + "Figure 17: Random samples generated by five $f$ -DMs trained on AFHQ $256 \\times 256$ ." + ], + "image_footnote": [], + "bbox": [ + 178, + 709, + 820, + 834 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/0f66f8c4f7a1bcbff94332b019b83b17d12a0ab7652d4adcfdd73b9c8f809010.jpg", + "image_caption": [ + "f-DM-DS", + "f-DM-Blur-U" + ], + "image_footnote": [], + "bbox": [ + 178, + 148, + 820, + 276 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/3bcf687510122b709b0f355e99d0ad59f0ac4091374d535c3b0d891550cfdf67.jpg", + "image_caption": [ + "f-DM-VQVAE" + ], + "image_footnote": [], + "bbox": [ + 178, + 285, + 820, + 411 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/a5579ed3acd07cf63f3b5b8d83417b7a41f077a1d1755be37e2899fd00cdfc20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 419, + 820, + 545 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/5bbabc7d878e38dd4f8b109d66fe8d0157d3d2def6febec595e6e1c7f572b1af.jpg", + "image_caption": [ + "f-DM-DS", + "f-DM-VQVAE" + ], + "image_footnote": [], + "bbox": [ + 178, + 594, + 820, + 720 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/e303651ff57e3530709c7a08616817856763cb9feb7894dc571e383118b2d38f.jpg", + "image_caption": [ + "Figure 18: Random samples generated by $f$ -DMs trained on LSUN-Church & -Bed $256 \\times 256$ ." + ], + "image_footnote": [], + "bbox": [ + 178, + 729, + 820, + 854 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/c89eb1799e80f0ae0c0eede98f8ca235e903e7a75bcb980a6e9721b9fee271c5.jpg", + "image_caption": [ + "Figure 19: Random samples generated by $f$ -DM-DS/VQVAE trained on ImageNet $256 \\times 256$ with classifier-free guidance ( $s = 3$ ). Classes from top to bottom: red panda, robin, daisy, valley, trifle, comic book." + ], + "image_footnote": [], + "bbox": [ + 181, + 112, + 495, + 856 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/96f5115d8f8bd7de6c1b5560a089fdfca6b53803c3d564c05f45d89e39dbacd9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 112, + 813, + 854 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/eee539c601ed5a3ee6c6e70115606539bd6f6f568c1f4ae543f1c097c0831fc3.jpg", + "image_caption": [ + "Figure 20: Random samples generated by $f$ -DM-DS/VQVAE trained on ImageNet $256 \\times 256$ with classifier-free guidance ( $s = 3$ ). Classes from top to bottom: school bus, pizza, seashore, photocopier, golden retriever, axolotl." + ], + "image_footnote": [], + "bbox": [ + 179, + 108, + 816, + 858 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + } +] \ No newline at end of file diff --git a/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_model.json b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_model.json new file mode 100644 index 0000000000000000000000000000000000000000..58e1a8c03f3b59953e7850cdecb715b0bfd82eb9 --- /dev/null +++ b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_model.json @@ -0,0 +1,4049 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.149 + ], + "angle": 0, + "content": "\\(f\\)-DM: A MULTI-STAGE DIFFUSION MODEL VIA PROGRESSIVE SIGNAL TRANSFORMATION" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.17, + 0.744, + 0.2 + ], + "angle": 0, + "content": "Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Miguel Angel Bautista, Josh Susskind Apple" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.2, + 0.778, + 0.215 + ], + "angle": 0, + "content": "{jgu32, szhai, yizhe_zhang, mbautistamartin, jsusskind}@apple.com" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.25, + 0.548, + 0.265 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.28, + 0.77, + 0.53 + ], + "angle": 0, + "content": "Diffusion models (DMs) have recently emerged as SoTA tools for generative modeling in various domains. Standard DMs can be viewed as an instantiation of hierarchical variational autoencoders (VAEs) where the latent variables are inferred from input-centered Gaussian distributions with fixed scales and variances. Unlike VAEs, this formulation constrains DMs from changing the latent spaces and learning abstract representations. In this work, we propose \\( f \\)-DM, a generalized family of DMs, which allows progressive signal transformation. More precisely, we extend DMs to incorporate a set of (hand-designed or learned) transformations, where the transformed input is the mean of each diffusion step. We propose a generalized formulation of DMs and derive the corresponding de-noising objective together with a modified sampling algorithm. As a demonstration, we apply \\( f \\)-DM in image generation tasks with a range of functions, including down-sampling, blurring, and learned transformations based on the encoder of pretrained VAEs. In addition, we identify the importance of adjusting the noise levels whenever the signal is sub-sampled and propose a simple rescaling recipe. \\( f \\)-DM can produce high-quality samples on standard image generation benchmarks like FFHQ, AFHQ, LSUN and ImageNet with better efficiency and semantic interpretation. Please check our videos at http://jiataogu.me/fdm/." + }, + { + "type": "image", + "bbox": [ + 0.202, + 0.538, + 0.792, + 0.797 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.802, + 0.828, + 0.847 + ], + "angle": 0, + "content": "Figure 1: Visualization of reverse diffusion from \\( f \\)-DMs with various signal transformations. \\( x_{t} \\) is the denoised output, and \\( z_{s} \\) is the input to the next diffusion step. We plot the first three channels of VQVAE latent variables. Low-resolution images are resized to \\( 256^{2} \\) for ease of visualization." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.866, + 0.339, + 0.881 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Diffusion probabilistic models (DMs, Sohl-Dickstein et al., 2015; Ho et al., 2020; Nichol & Dhariwal, 2021) and score-based (Song et al., 2021b) generative models have become increasingly popular" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.189 + ], + "angle": 0, + "content": "as the tools for high-quality image (Dhariwal & Nichol, 2021), video (Ho et al., 2022b), text-to-speech (Popov et al., 2021) and text-to-image (Rombach et al., 2021; Ramesh et al., 2022; Sahara et al., 2022a) synthesis. Despite the empirical success, conventional DMs are restricted to operate in the ambient space throughout the Gaussian noising process. On the other hand, common generative models like VAEs (Kingma & Welling, 2013) and GANs (Goodfellow et al., 2014; Karras et al., 2021) employ a coarse-to-fine process that hierarchically generates high-resolution outputs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.194, + 0.827, + 0.321 + ], + "angle": 0, + "content": "We are interested in combining the best of the two worlds: the expressivity of DMs and the benefit of hierarchical features. To this end, we propose \\( f \\)-DM, a generalized multi-stage framework of DMs to incorporate progressive transformations to the inputs. As an important property of our formulation, \\( f \\)-DM does not make any assumptions about the type of transformations. This makes it compatible with many possible designs, ranging from domain-specific ones to generic neural networks. In this work, we consider representative types of transformations, including down-sampling, blurring, and neural-based transformations. What these functions share in common is that they allow one to derive increasingly more global, coarse, and/or compact representations, which we believe can lead to better sampling quality as well as reduced computation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.327, + 0.825, + 0.399 + ], + "angle": 0, + "content": "Incorporating arbitrary transformations into DMs also brings immediate modeling challenges. For instance, certain transformations destroy the information drastically, and some might also change the dimensionality. For the former, we derive an interpolation-based formulation to smoothly bridge consecutive transformations. For the latter, we verify the importance of rescaling the noise level, and propose a resolution-agnostic signal-to-noise ratio (SNR) as a practical guideline for noise rescaling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.404, + 0.825, + 0.475 + ], + "angle": 0, + "content": "Extensive experiments are performed on image generation benchmarks, including FFHQ, AFHQ, LSUN Bed/Church and ImageNet. \\( f \\)-DMs consistently match or outperform the baseline performance, while requiring relatively less computing thanks to the progressive transformations. Furthermore, given a pre-trained \\( f \\)-DM, we can readily manipulate the learned latent space, and perform conditional generation tasks (e.g., super-resolution) without additional training." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.494, + 0.328, + 0.509 + ], + "angle": 0, + "content": "2 BACKGROUND" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.525, + 0.486, + 0.623 + ], + "angle": 0, + "content": "Diffusion Models (DMs, Sohl-Dickstein et al., 2015; Song & Ermon, 2019; Ho et al., 2020) are deep generative models which can be viewed as a special case of hierarchical VAEs (Kingma et al., 2021). In this paper, we consider diffusion in continuous time similar to Song et al. (2021b); Kingma et al. (2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.629, + 0.486, + 0.687 + ], + "angle": 0, + "content": "Given a datapoint \\( \\pmb{x} \\in \\mathbb{R}^N \\), a DM models time-dependent latent variables \\( \\pmb{z} = \\{\\pmb{z}_t | t \\in [0,1], \\pmb{z}_0 = \\pmb{x}\\} \\) based on a fixed signal-noise schedule \\( \\{\\alpha_t, \\sigma_t\\} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.693, + 0.438, + 0.712 + ], + "angle": 0, + "content": "\\[\nq (\\boldsymbol {z} _ {t} | \\boldsymbol {z} _ {s}) = \\mathcal {N} (\\boldsymbol {z} _ {t}; \\alpha_ {t | s} \\boldsymbol {z} _ {s}, \\sigma_ {t | s} ^ {2} I),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.718, + 0.485, + 0.762 + ], + "angle": 0, + "content": "where \\(\\alpha_{t|s} = \\alpha_t / \\alpha_s\\), \\(\\sigma_{t|s}^2 = \\sigma_t^2 - \\alpha_{t|s}^2\\sigma_s^2\\), \\(s < t\\). It also defines the marginal distribution \\(q(\\pmb{z}_t|\\pmb{x})\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.763, + 0.424, + 0.78 + ], + "angle": 0, + "content": "\\[\nq \\left(\\boldsymbol {z} _ {t} | \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t} \\boldsymbol {x}, \\sigma_ {t} ^ {2} I\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.782, + 0.485, + 0.797 + ], + "angle": 0, + "content": "By default, we assume the variance preserving" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.796, + 0.825, + 0.867 + ], + "angle": 0, + "content": "form (Ho et al., 2020). That is, \\(\\alpha_{t}^{2} + \\sigma_{t}^{2} = 1\\), \\(\\alpha_{0} = \\sigma_{1} = 1\\), and the signal-to-noise-ratio (SNR, \\(\\alpha_{t}^{2} / \\sigma_{t}^{2}\\)) decreases monotonically with \\(t\\). For generation, a parametric function \\(\\theta\\) is optimized to reverse the diffusion process by denoising \\(z_{t} = \\alpha_{t}x + \\sigma_{t}\\epsilon\\) to the clean input \\(x\\), with a weighted reconstruction loss \\(\\mathcal{L}_{\\theta}\\). For example, the \"simple loss\" proposed in Ho et al. (2020) is equivalent to weighting residuals by \\(\\omega_{t} = \\alpha_{t}^{2} / \\sigma_{t}^{2}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.873, + 0.825, + 0.893 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\theta} = \\mathbb {E} _ {\\boldsymbol {z} _ {t} \\sim q (\\boldsymbol {z} _ {t} | \\boldsymbol {x}), t \\sim [ 0, 1 ]} \\left[ \\omega_ {t} \\cdot \\| \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {x} \\| _ {2} ^ {2} \\right]. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "In practice, \\(\\theta\\) is parameterized as a U-Net (Ronneberger et al., 2015). As suggested in Ho et al. (2020), predicting the noise \\(\\epsilon_{\\theta}\\) empirically achieves better performance than predicting \\(x_{\\theta}\\), where" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.531, + 0.564, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.511, + 0.738, + 0.554, + 0.748 + ], + "angle": 0, + "content": "(a) DMS" + }, + { + "type": "image", + "bbox": [ + 0.576, + 0.532, + 0.648, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.581, + 0.738, + 0.631, + 0.748 + ], + "angle": 0, + "content": "(b) VAEs" + }, + { + "type": "image", + "bbox": [ + 0.664, + 0.532, + 0.818, + 0.738 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.696, + 0.738, + 0.786, + 0.748 + ], + "angle": 0, + "content": "(c) f-DM (Ours)" + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.761, + 0.825, + 0.79 + ], + "angle": 0, + "content": "Figure 2: (a) the standard DMs; (b) a bottom-up hierarchical VAEs; (c) our proposed \\( f \\)-DM." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "\\(\\pmb{x}_{\\theta}(\\pmb{z}_t,t) = (\\pmb{z}_t - \\sigma_t\\pmb{\\epsilon}_{\\theta}(\\pmb{z}_t,t)) / \\alpha_t\\). Sampling from such a learned model can be performed from ancestral sampling (DDPM, Ho et al., 2020), or a deterministic DDIM sampler (Song et al., 2021a). Starting from \\(\\pmb{z}_1\\sim \\mathcal{N}(\\mathbf{0},I)\\), a sequence of timesteps \\(1 = t_0 > \\ldots >t_N = 0\\) are sampled for iterative generation, and we can readily summarize both methods for each step as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.168, + 0.825, + 0.186 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {z} _ {s} = \\alpha_ {s} \\cdot \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}) + \\sqrt {\\sigma_ {s} ^ {2} - \\eta^ {2} \\bar {\\sigma} ^ {2}} \\cdot \\boldsymbol {\\epsilon} _ {\\theta} (\\boldsymbol {z} _ {t}) + \\eta \\bar {\\sigma} \\cdot \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\boldsymbol {0}, I), \\quad s < t, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.192, + 0.779, + 0.209 + ], + "angle": 0, + "content": "where \\(\\bar{\\sigma} = \\sigma_s\\sigma_{t|s} / \\sigma_t\\) , and \\(\\eta\\) controls the proportion of additional noise. (i.e., DDIM \\(\\eta = 0\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.213, + 0.825, + 0.271 + ], + "angle": 0, + "content": "As the score function \\(\\epsilon_{\\theta}\\) is defined in the ambient space, it is clear that all the latent variables \\(z\\) are forced to be the same shape as the input data \\(\\pmb{x}\\) \\((\\mathbb{R}^N)\\). This not only leads to inefficient training, especially for steps with high noise level (Jing et al., 2022), but also makes DMs hard to learn abstract and semantically meaningful latent space as pointed out by Preechakul et al. (2022)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.29, + 0.284, + 0.305 + ], + "angle": 0, + "content": "3 METHOD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.321, + 0.825, + 0.391 + ], + "angle": 0, + "content": "In this section, we introduce \\( f \\)-DM, an extended family of DMs to enable diffusion on transformed signals, in a way similar to a standard hierarchical VAE. We start by introducing the definition of the proposed multi-stage formulation with general signal transformations, followed by modified training and generation algorithms (Section 3.1). Then, we specifically apply \\( f \\)-DM with three categories of transformations (Section 3.2)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.408, + 0.395, + 0.421 + ], + "angle": 0, + "content": "3.1 MULTI-STAGE DIFFUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.825, + 0.533 + ], + "angle": 0, + "content": "Signal Transformations We consider a sequence of deterministic functions \\( \\pmb{f} = \\{f_0, \\dots, f_K\\} \\), where \\( f_0 \\dots f_k \\) progressively transforms the input signal \\( \\pmb{x} \\in \\mathbb{R}^N \\) into \\( \\pmb{x}^k = f_{0:k}(\\pmb{x}) \\in \\mathbb{R}^{M_k} \\). We assume \\( \\pmb{x}^0 = f_0(\\pmb{x}) = \\pmb{x} \\). In principle, \\( \\pmb{f} \\) can be any function. In this work, we focus on transformations that gradually destroy the information contained in \\( \\pmb{x} \\) (e.g., down-sampling), leading towards more compact representations. Without loss of generality, we assume \\( M_0 \\geq M_1 \\geq \\dots \\geq M_K \\). A sequence of inverse mappings \\( \\pmb{g} = \\{g_0, \\dots, g_{K-1}\\} \\) is used to connect a corresponding sequence of pairs of consecutive spaces. Specifically, we define \\( \\hat{\\pmb{x}}_k \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.351, + 0.539, + 0.825, + 0.573 + ], + "angle": 0, + "content": "\\[\n\\hat {\\boldsymbol {x}} ^ {k} := \\left\\{ \\begin{array}{l l} g _ {k} \\left(f _ {k + 1} \\left(\\boldsymbol {x} ^ {k}\\right)\\right) \\approx \\boldsymbol {x} ^ {k}, & \\text {i f} k < K, \\\\ \\boldsymbol {x} ^ {k}, & \\text {i f} k = K. \\end{array} \\right. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.58, + 0.825, + 0.623 + ], + "angle": 0, + "content": "The approximation of Equation 3 (\\(k < K\\)) is not necessarily (and sometimes impossibly) accurate. For instance, \\(f_{k}\\) downsamples an input image \\(\\pmb{x}\\) from \\(128^{2}\\) into \\(64^{2}\\) with average pooling, and \\(g_{k}\\) can be a bilinear interpolation that upsamples back to \\(128^{2}\\), which is a lossy reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.629, + 0.825, + 0.701 + ], + "angle": 0, + "content": "The definition of \\( f \\) and \\( g \\) can be seen as a direct analogy of the encoder \\( (\\phi) \\) and decoder \\( (\\theta) \\) in hierarchical VAEs (see Figure 2 (b)). However, there are still major differences: (1) the VAE encoder/decoder is stochastic, and the encoder's outputs are regularized by the prior. In contrast, \\( f \\) and \\( g \\) are deterministic, and the encoder output \\( x^{K} \\) does not necessarily follow a simple prior; (2) VAEs directly use the decoder for generation, while \\( f, g \\) are fused in the diffusion steps of \\( f \\)-DM." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.708, + 0.825, + 0.766 + ], + "angle": 0, + "content": "Forward Diffusion We extend the continuous-time DMs for signal transformations. We split the diffusion time \\(0 \\to 1\\) into \\(K + 1\\) stages, where for each stage, a partial diffusion process is performed. More specifically, we define a set of time boundaries \\(0 = \\tau_0 < \\tau_1 < \\ldots < \\tau_K < \\tau_{K + 1} = 1\\), and for \\(t \\in [0,1]\\), the latent \\(\\mathbf{z}_t\\) has the following marginal probability:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.772, + 0.825, + 0.806 + ], + "angle": 0, + "content": "\\[\nq \\left(\\boldsymbol {z} _ {t} \\mid \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t} \\boldsymbol {x} _ {t}, \\sigma_ {t} ^ {2} I\\right), \\quad \\text {w h e r e} \\boldsymbol {x} _ {t} = \\frac {\\left(t - \\tau_ {k}\\right) \\hat {\\boldsymbol {x}} ^ {k} + \\left(\\tau_ {k + 1} - t\\right) \\boldsymbol {x} ^ {k}}{\\tau_ {k + 1} - \\tau_ {k}}, \\quad \\tau_ {k} \\leq t < \\tau_ {k + 1}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.812, + 0.826, + 0.926 + ], + "angle": 0, + "content": "As listed above, \\( \\pmb{x}_t \\) is the interpolation of \\( \\pmb{x}^k \\) and its approximation \\( \\hat{\\pmb{x}}^k \\) when \\( t \\) falls in stage \\( k \\). A simple illustration for the relationship of \\( \\pmb{x}_t, \\hat{\\pmb{x}}^k, \\pmb{x}^k \\) and \\( z_t \\) is shown in Figure 10. We argue that interpolation is crucial as it creates a continuous transformation that slowly corrupts information inside each stage. In this way, such change can be easily reversed by our model. Also, it is nontrivial to find the optimal stage schedule \\( \\tau_k \\) for each model as it highly depends on how much the information is destroyed in each stage \\( f_k \\). In this work, we tested two heuristics: (1) linear schedule \\( \\tau_k = k / (K + 1) \\); (2) cosine schedule \\( \\tau_k = \\cos(1 - k / (K + 1)) \\). Note that the standard DMs can be seen as a special case of our \\( f \\)-DM when there is only one stage (\\( K = 0 \\))." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.107, + 0.782, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.281, + 0.825, + 0.325 + ], + "angle": 0, + "content": "Figure 3: Left: an illustration of the proposed SNR computation for different sampling rates; Right: the comparison of rescaling the noise level for progressive down-sampling. Without noise rescaling, the diffused images in low-resolution quickly become too noisy to distinguish the underline signal." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.337, + 0.825, + 0.368 + ], + "angle": 0, + "content": "Equation 4 does not guarantee a Markovian transition. Nevertheless, our formulation only needs \\( q(\\pmb{z}_t | \\pmb{z}_s, \\pmb{x}) \\), which has the following simple form focusing on diffusion steps within a stage:" + }, + { + "type": "equation", + "bbox": [ + 0.247, + 0.373, + 0.825, + 0.393 + ], + "angle": 0, + "content": "\\[\nq \\left(\\boldsymbol {z} _ {t} \\mid \\boldsymbol {z} _ {s}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t | s} \\boldsymbol {z} _ {s} + \\alpha_ {t} \\cdot \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} _ {s}\\right), \\sigma_ {t | s} ^ {2} I\\right), \\quad \\tau_ {k} \\leq s < t < \\tau_ {k + 1}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.399, + 0.825, + 0.445 + ], + "angle": 0, + "content": "From Equation 5, we further re-write \\( \\boldsymbol{x}_t - \\boldsymbol{x}_s = -\\delta_t \\cdot (t - s) / (t - \\tau_k) \\), where \\( \\delta_t = \\boldsymbol{x}^k - \\boldsymbol{x}_t \\) is the signal degradation. Equation 5 also indicates that the reverse diffusion distribution \\( q(\\boldsymbol{z}_s | \\boldsymbol{z}_t, \\boldsymbol{x}) \\propto q(\\boldsymbol{z}_t | \\boldsymbol{z}_s, \\boldsymbol{x}) q(\\boldsymbol{z}_s | \\boldsymbol{x}) \\) can be written as the function of \\( \\boldsymbol{x}_t \\) and \\( \\delta_t \\) which will be our learning objectives." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.825, + 0.508 + ], + "angle": 0, + "content": "Boundary Condition To enable diffusion across stages, we need the transition at stage boundaries \\(\\tau_{k}\\). More specifically, when the step approaches the boundary \\(\\tau^{-}\\) (the left limit of \\(\\tau\\)), the transition \\(q(z_{\\tau} | z_{\\tau^{-}}\\), \\(\\pmb{x}\\)) should be as deterministic (ideally invertible) & smooth as possible to minimize information loss. First, we can easily expand \\(z_{\\tau}\\) and \\(z_{\\tau^{-}}\\) as the signal and noise combination:" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.514, + 0.667, + 0.53 + ], + "angle": 0, + "content": "\\[\n\\text {B e f o r e :} \\quad \\boldsymbol {z} _ {\\tau^ {-}} = \\alpha_ {\\tau^ {-}} \\cdot \\boldsymbol {x} _ {\\tau^ {-}} + \\sigma_ {\\tau^ {-}} \\cdot \\boldsymbol {\\epsilon}, p (\\boldsymbol {\\epsilon}) = \\mathcal {N} (\\boldsymbol {0}, I),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.526, + 0.825, + 0.548 + ], + "angle": 0, + "content": "\\[\nA f t e r: \\quad z _ {\\tau} = \\alpha_ {\\tau} \\cdot x _ {\\tau} + \\sigma_ {\\tau} \\cdot \\zeta (\\epsilon), p (\\zeta (\\epsilon)) = \\mathcal {N} (0, I). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.555, + 0.825, + 0.613 + ], + "angle": 0, + "content": "Based on definition, \\( \\pmb{x}_{\\tau^{-}} = \\hat{\\pmb{x}}^{k - 1} = g(\\pmb{x}^k) = g(\\pmb{x}_{\\tau}) \\), which means the signal part is invertible. Therefore we only need to find \\( \\zeta \\). Under the initial assumption of \\( M_{k} \\leq M_{k - 1} \\), this can be achieved easily by dropping elements from \\( \\epsilon \\). Take down-sampling \\( (M_{k - 1} = 4M_k) \\) as an example. We can directly drop 3 out of every \\( 2 \\times 2 \\) values from \\( \\epsilon \\). More details are included in Appendix A.4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.619, + 0.827, + 0.689 + ], + "angle": 0, + "content": "The second requirement of a smooth transition is not as straightforward as it looks, which asks the \"noisiness\" of latents \\( z \\) to remain unchanged across the boundary. We argue that the conventional measure – the signal-to-noise-ratio (SNR) – in DM literature is not compatible with resolution change as it averages the signal/noise power element-wise. In this work, we propose a generalized resolution-agnostic SNR by viewing data as points sampled from a continuous field:" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.695, + 0.825, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S N R} (\\boldsymbol {z}) = \\frac {\\mathbb {E} _ {\\Omega \\sim I} \\| \\mathbb {E} _ {i \\sim \\Omega} \\operatorname {S I G N A L} (\\boldsymbol {z} _ {i}) \\| ^ {2}}{\\mathbb {E} _ {\\Omega \\sim I} \\| \\mathbb {E} _ {i \\sim \\Omega} \\operatorname {N O I S E} (\\boldsymbol {z} _ {i}) \\| ^ {2}}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.734, + 0.827, + 0.833 + ], + "angle": 0, + "content": "where \\(I\\) is the data range, SIGNAL represents the real data value (such as image pixels), and NOISE is the unstructured Gaussian noise added to the data. \\(\\Omega\\) is a patch relative to \\(I\\), which can be any size as long as it is invariant to different sampling rates (resolutions). As shown in Figure 3 (left), we can obtain a reliable measure of noisiness by averaging the signal/noise inside patches. We derive \\(\\alpha_{\\tau}, \\sigma_{\\tau}\\) from \\(\\alpha_{\\tau^{-}}, \\sigma_{\\tau^{-}}\\) for any transformations by forcing \\(\\mathrm{SNR}(z_{\\tau}) = \\mathrm{SNR}(z_{\\tau^{-}})\\) under this new definition. Specifically, if dimensionality change is solely caused by the change of sampling rate (e.g., down-sampling, average RGB channels, deconvolution), we can get the following relation:" + }, + { + "type": "equation", + "bbox": [ + 0.395, + 0.837, + 0.825, + 0.856 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {\\tau} ^ {2} / \\sigma_ {\\tau} ^ {2} = d _ {k} \\cdot \\gamma_ {k} \\cdot \\alpha_ {\\tau -} ^ {2} / \\sigma_ {\\tau -} ^ {2}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.858, + 0.825, + 0.902 + ], + "angle": 0, + "content": "where \\(d_{k} = M_{k - 1} / M_{k}\\) is the total dimension change, and \\(\\gamma_{k} = \\mathbb{E}||\\hat{\\pmb{x}}^{k - 1}||^{2} / \\mathbb{E}||\\pmb{x}^{k}||^{2}\\) is the change of signal power. For example, we have \\(d_{k} = 4,\\gamma_{k}\\approx 1\\) for down-sampling. Following Equation 8, the straightforward rule is to rescale the magnitude of the noise, and keep the signal part unchanged:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.91, + 0.638, + 0.925 + ], + "angle": 0, + "content": "For simplicity, we omit the subscript \\(k\\) for \\(\\tau_{k}\\) in the following paragraphs." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.173, + 0.107, + 0.61, + 0.123 + ], + "angle": 0, + "content": "Algorithm 1: Reverse diffusion for image generation using \\( f \\)-DM" + }, + { + "type": "algorithm", + "bbox": [ + 0.16, + 0.125, + 0.774, + 0.291 + ], + "angle": 0, + "content": "Input: model \\(\\theta ,f,g\\) stage schedule \\(\\{\\tau_0,\\dots ,\\tau_K\\}\\) , rescaled noise schedule functions \\(\\alpha (.)\\) \\(\\sigma (.)\\) step-size \\(\\Delta t\\) \\(\\epsilon_{\\mathrm{full}}\\sim \\mathcal{N}(0,I)\\) ,DDPM ratio \\(\\eta\\) \n1 Initialize \\(z\\) from \\(\\epsilon_{\\mathrm{full}}\\) \n2 for \\((k = K;k\\geq 0;k = k - 1)\\) do \n3 for \\((t = \\tau_{k + 1};t > \\tau_k;t = t - \\Delta t,s = t - \\Delta t)\\) do \n4 \\(\\begin{array}{r}\\pmb {\\epsilon}_{\\theta},\\pmb {\\delta}_{\\theta} = \\theta (\\pmb {z},t);\\quad \\pmb {x}_{\\theta} = (\\pmb {z} - \\sigma (t)\\cdot \\pmb {\\epsilon}_{\\theta}) / \\alpha (t);\\\\ \\text{if} s > \\tau_{k}\\text{then}\\\\ \\big{\\lfloor}\\pmb {z} = \\alpha (s)\\cdot (\\pmb {x}_{\\theta} + \\pmb {\\delta}_{\\theta}\\cdot (t - s) / (t - \\tau_{k})) + \\sqrt{\\sigma^{2}(s) - \\eta^{2}\\bar{\\sigma}^{2}}\\cdot \\pmb {\\epsilon}_{\\theta} + \\eta \\bar{\\sigma}\\cdot \\pmb {\\epsilon},\\pmb {\\epsilon}\\sim \\mathcal{N}(\\pmb {0},I)\\\\ \\end{array}\\) \n5 \n6 if \\(k > 0\\) then \n7 Re-sample noise \\(\\epsilon_{\\mathrm{rs}}\\) from \\(\\epsilon_{\\theta}\\) and \\(\\epsilon_{\\mathrm{full}}\\) . \\(z = \\alpha (\\tau_k)\\cdot g_k(x_\\theta) + \\sigma (\\tau_k)\\cdot \\epsilon_{\\mathrm{rs}}\\) \n9 return \\(x_{\\theta}\\)" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.301, + 0.827, + 0.402 + ], + "angle": 0, + "content": "\\(\\alpha \\leftarrow \\alpha, \\sigma \\leftarrow \\sigma / \\sqrt{d_k}\\), which we refer as signal preserved (SP) rescaling. Note that, to ensure the noise schedule is continuous over time and close to the original schedule, such rescaling is applied to the noises of the entire stage, and will be accumulated when multiple transformations are used. As the comparison shown in Figure 3, the resulting images are visually closer to the standard DM. However, the variance of \\(z_{t}\\) becomes very small, especially when \\(t \\to 1\\), which might be hard for the neural networks to distinguish. Therefore, we propose the variance preserved (VP) alternative to further normalize the rescaled \\(\\alpha, \\sigma\\) so that \\(\\alpha^2 + \\sigma^2 = 1\\). We show the visualization in Figure 3 (b)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.407, + 0.825, + 0.464 + ], + "angle": 0, + "content": "Training We train a neural network \\(\\theta\\) to denoise. We also show the training pipeline in Figure 10. In \\(f\\)-DM, noise is caused by two factors: (1) the perturbation \\(\\epsilon\\) from noise injection; (2) the degradation \\(\\delta\\) due to signal transformation. Thus, we propose to predict \\(\\boldsymbol{x}_{\\theta}\\) and \\(\\delta_{\\theta}\\) jointly, which simultaneously remove both noises from \\(\\boldsymbol{z}_t\\) with a \"double reconstruction\" loss:" + }, + { + "type": "equation", + "bbox": [ + 0.256, + 0.465, + 0.825, + 0.484 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\theta} = \\mathbb {E} _ {\\boldsymbol {z} _ {t} \\sim q (\\boldsymbol {z} _ {t} | \\boldsymbol {x}), t \\sim [ 0, 1 ]} \\left[ \\omega_ {t} \\cdot \\left(\\| \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {x} _ {t} \\| _ {2} ^ {2} + \\| \\boldsymbol {\\delta} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {\\delta} _ {t} \\| _ {2} ^ {2}\\right) \\right], \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.485, + 0.825, + 0.556 + ], + "angle": 0, + "content": "where the denoised output is \\( \\pmb{x}_{\\theta}(\\pmb{z}_t, t) + \\delta_{\\theta}(\\pmb{z}_t, t) \\). Unlike standard DMs, the denoising goals are the transformed signals of each stage rather than the final real images, which are generally simpler targets to recover. The same as standard DMs, we also choose to predict \\( \\epsilon_{\\theta} \\), and compute \\( \\pmb{x}_{\\theta} = (z_t - \\sigma_t \\pmb{\\epsilon}_{\\theta}) / \\alpha_t \\). We adopt the same U-Net architecture for all stages, where input \\( \\pmb{z}_t \\) will be directed to the corresponding inner layer based on spatial resolutions (see Appendix Figure 11 for details)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.563, + 0.826, + 0.716 + ], + "angle": 0, + "content": "Unconditional Generation We present the generation steps in Algorithm 1, where \\( \\boldsymbol{x}_t \\) and \\( \\delta_t \\) are replaced by model's predictions \\( \\boldsymbol{x}_{\\theta} \\), \\( \\delta_{\\theta} \\). Thanks to the interpolation formulation (Equation 4), generation is independent of the transformations \\( f \\). Only the inverse mappings \\( g \\) – which might be simple and easy to compute – is needed to map the signals at boundaries. This brings flexibility and efficiency to learning complex or even test-time inaccessible transformations. In addition, Algorithm 1 includes a \"noise-resampling step\" for each stage boundary, which is the reverse process for \\( \\zeta(\\epsilon) \\) in Equation 6. While \\( \\zeta \\) is deterministic, the reverse process needs additional randomness. For instance, if \\( \\zeta \\) drops elements in the forward process, then the reverse step should inject standard Gaussian noise back to the dropped locations. Because we assume \\( M_0 \\geq \\ldots \\geq M_K \\), we propose to sample a full-size noise \\( \\epsilon_{\\mathrm{full}} \\) before generation, and gradually adding subsets of \\( \\epsilon_{\\mathrm{full}} \\) to each stage. Thus, \\( \\epsilon_{\\mathrm{full}} \\) encodes multi-scale information similar to RealNVP (Dinh et al., 2016)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.724, + 0.825, + 0.824 + ], + "angle": 0, + "content": "Conditional Generation Given an unconditional \\( f \\)-DM, we can do conditional generation by replacing the denoised output \\( \\pmb{x}_{\\theta} \\) with any condition \\( \\pmb{x}_c \\) at a suitable time \\( (T) \\), and starting diffusion from \\( T \\). For example, suppose \\( \\pmb{f} \\) is downsample, and \\( \\pmb{x}_c \\) is a low-resolution image, \\( f \\)-DM enables super-resolution (SR) without additional training. To achieve that, it is critical to initialize \\( \\pmb{z}_T \\), which implicitly asks \\( z_{T} \\approx \\alpha_{T}\\pmb{x}_{c} + \\sigma_{T}\\pmb{\\epsilon}_{\\theta}(\\pmb{z}_{T}) \\). In practice, we choose \\( T \\) to be the corresponding stage boundary, and initialize \\( \\pmb{z} \\) by adding random noise \\( \\sigma_T\\pmb{\\epsilon} \\) to \\( \\alpha_{T}\\pmb{x}_{c} \\). A gradient-based method is used to iteratively update \\( z_{T} \\gets z_{T} - \\lambda \\nabla_{z_{T}}\\| \\pmb{x}_{\\theta}(z_{T}) - \\pmb{x}_{c}\\|_{2}^{2} \\) for a few steps before the diffusion starts." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.837, + 0.557, + 0.851 + ], + "angle": 0, + "content": "3.2 APPLICATIONS ON VARIOUS TRANSFORMATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.863, + 0.825, + 0.893 + ], + "angle": 0, + "content": "With the definition in Section 3.1, next we show \\( f \\)-DM applied with different transformations. In this paper, we consider the following three categories of transformations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Downsampling. As the motivating example in Section 3.1, we let \\( \\pmb{f} \\) a sequence of downsample operations that transforms a given image (e.g., \\( 256^2 \\)) progressively down to \\( 16^2 \\), where each \\( f_k(\\cdot) \\)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "reduces the length by 2, and correspondingly \\( g_{k}(.) \\) upsamples by 2. Thus, the generation starts from a low-resolution noise and progressively performs super-resolution. We denote the model as \\( f \\)-DM-DS, where \\( d_{k} = 4 \\), \\( \\gamma_{k} = 1 \\) in Equation 8 and \\( K = 4 \\) for \\( 256^{2} \\) images." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.15, + 0.827, + 0.277 + ], + "angle": 0, + "content": "Blurring. \\(f\\)-DM also supports general blur transformations. Unlike recent works (Rissanen et al., 2022; Hoogeboom & Salimans, 2022) that focuses on continuous-time blur (heat dissipation), Equation 4 can be seen as an instantiation of progressive blur function if we treat \\(\\hat{\\pmb{x}}^k\\) as a blurred version of \\(\\pmb{x}^k\\). This design brings more flexibility in choosing any kind of blurring functions, and using the blurred versions as stages. In this paper, we experiment with two types of blurring functions. (1) \\(f\\)-DM-Blur-U: utilizing the same downsample operators as \\(f\\)-DM-DS, while always up-sampling the images back to the original sizes; (2) \\(f\\)-DM-Blur-G: applying standard Gaussian blurring kernels following Rissanen et al. (2022). In both cases, we use \\(g_{k}(\\pmb{x}) = \\pmb{x}\\). As the dimension is not changed, no rescaling and noise resampling is required." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.281, + 0.828, + 0.435 + ], + "angle": 0, + "content": "Image \\(\\rightarrow\\) Latent Trans. We further consider diffusion with learned non-linear transformations such as VAEs (see Figure 2 (b), \\(f\\): VAE encoder, \\(g\\): VAE decoder). By inverting such an encoding process, we are able to generate data from low-dimensional latent space similar to Rombach et al. (LDM, 2021). As a major difference, LDM operates only on the latent variables, while \\(f\\)-DM learns diffusion in the latent and image spaces jointly. Because of this, our performance will not be bounded by the quality of the VAE decoder. In this paper, we consider VQVAE (Van Den Oord et al., 2017) together with its GAN variant (VQGAN, Esser et al., 2021). For both cases, we transform \\(256^2 \\times 3\\) images into \\(32^2 \\times 4\\) (i.e., \\(d_k = 48\\)) latent space. The VQVAE encoder/decoder is trained on ImageNet (Deng et al., 2009), and is frozen for the rest of the experiments. For \\(f\\)-DM-VQGAN, we directly take the checkpoint provided by Rombach et al. (2021). Besides, we need to tune \\(\\gamma_k\\) separately for each encoder due to the change in signal magnitude." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.45, + 0.33, + 0.466 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.481, + 0.402, + 0.495 + ], + "angle": 0, + "content": "4.1 EXPERIMENTAL SETTINGS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.507, + 0.827, + 0.55 + ], + "angle": 0, + "content": "Datasets. We evaluate \\( f \\)-DMs on five commonly used benchmarks testing generation on a range of domains: FFHQ (Karras et al., 2019), AFHQ (Choi et al., 2020), LSUN Church & Bed (Yu et al., 2015), and ImageNet (Deng et al., 2009). All images are center-cropped and resized to \\( 256 \\times 256 \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.555, + 0.827, + 0.682 + ], + "angle": 0, + "content": "Training Details. We implement the three types of transformations with the same architecture and hyper-parameters except for the stage-specific adapters. We adopt a lighter version of ADM (Dhariwal & Nichol, 2021) as the main U-Net architecture. For all experiments, we adopt the same training scheme using AdamW (Kingma & Ba, 2014) optimizer with a learning rate of \\(2\\mathrm{e} - 5\\) and an EMA decay factor of 0.9999. We set the weight \\(\\omega_{t} = \\mathrm{sigmoid}(-\\log (\\alpha_{t}^{2} / \\sigma_{t}^{2}))\\) following P2-weighting (Choi et al., 2022). The cosine noise schedule \\(\\alpha_{t} = \\cos (0.5\\pi t)\\) is adopted for diffusion working in the \\(256^2\\times 3\\) image space. As proposed in Equation 8, noise rescaling (VP by default) is applied for \\(f\\)-DMs when the resolutions change. All our models are trained with batch-size 32 images for 500K (FFHQ, AFHQ, LSUN Church), 1.2M (LSUN Bed) and 2.5M (ImageNet) iterations, respectively." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.687, + 0.827, + 0.841 + ], + "angle": 0, + "content": "Baselines & Evaluation. We compare \\( f \\)-DMs against a standard DM (DDPM, Ho et al., 2020) on all five datasets. To ensure a fair comparison, we train DDPM following the same settings and continuous-time formulation as our approaches. We also include transformation-specific baselines: (1) we re-implement the cascaded DM (Cascaded, Ho et al., 2022a) to adapt \\( f \\)-DM-DS setup from \\( 16^{2} \\) progressively to \\( 256^{2} \\), where for each stage a separate DM is trained conditioned on the consecutive downsampled image; (2) we re-train a latent-diffusion model (LDM, Rombach et al., 2021) on the extracted latents from our pretrained VQVAE; (3) to compare with \\( f \\)-DM-Blur-G, we include the scores and synthesised examples of IHDM (Rissanen et al., 2022). We set 250 timesteps \\( (\\Delta t = 0.004) \\) for \\( f \\)-DMs and the baselines with \\( \\eta = 1 \\) (Algorithm 1). We use Frechet Inception Distance (FID, Heusel et al., 2017) and Precision/Recall (PR, Kynkänniemi et al., 2019) as the measures of visual quality, based on 50K samples and the entire training set." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.857, + 0.281, + 0.872 + ], + "angle": 0, + "content": "4.2 RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Qualitative Comparison To demonstrate the capability of handling various complex datasets, Figure 4 (↑) presents an uncurated set of images generated by \\( f \\)-DM-DS. We show more samples from all types of \\( f \\)-DMs in the Appendix E.4. We also show a comparison between \\( f \\)-DMs and the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.103, + 0.824, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.464, + 0.825, + 0.507 + ], + "angle": 0, + "content": "Figure 4: \\(\\uparrow\\) Random samples from \\(f\\)-DM-DS trained on various datasets; \\(\\downarrow\\) Comparison of \\(f\\)-DMs and the corresponding baselines under various transformations. Best viewed when zoomed in. All faces presented are synthesized by the models, and are not real identities." + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.526, + 0.825, + 0.556 + ], + "angle": 0, + "content": "Table 1: Quantitative comparisons on various datasets. The speed compared to DDPM is calculated with \\( \\mathrm{bsz} = 1 \\) on CPU. Best performing DMs are shown in bold." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.566, + 0.822, + 0.763 + ], + "angle": 0, + "content": "
ModelsFID↓P↑R↑FID↓P↑R↑SpeedModelsFID↓
FFHQ256 × 256AFHQ256 × 256LSUN-Church 256 × 256
DDPM10.80.760.539.30.740.51×1.0DDPM9.7
DDPM (1/2)16.80.740.4515.20.640.44×2.0f-DM-DS8.2
Cascaded49.00.400.0924.20.370.13-f-DM-VQVAE8.0
f-DM-DS10.80.740.506.40.810.48×2.1LSUN-Bed 256 × 256
IHDM64.9--43.4---DDPM8.0
f-DM-Blur-G11.70.730.516.90.760.49×1.0f-DM-DS6.9
f-DM-Blur-U10.40.740.527.00.770.53×1.0f-DM-VQVAE7.1
LDM48.00.310.0729.70.070.11×9.8ImageNet 256 × 256
LDM (GAN)*8.60.720.606.50.630.61×9.2DDPM10.9
f-DM-VQVAE12.70.770.478.90.760.40×1.7f-DM-DS8.2
f-DM-VQGAN11.70.740.515.60.760.53×1.7f-DM-VQVAE6.8
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.777, + 0.825, + 0.804 + ], + "angle": 0, + "content": "basielines with various transformations on FFHQ (Figure 4 \\(\\downarrow\\)). Our methods consistently produce better visual results with more coherence and without noticeable artifacts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.825, + 0.89 + ], + "angle": 0, + "content": "Quantitative Comparison. We measure the generation quality (FID and precision/recall) and relative inference speed of \\( f \\)-DMs and the baselines in Table 1. Across all five datasets, \\( f \\)-DMs consistently achieves similar or even better results for the DDPM baselines, while gaining near \\( \\times 2 \\) inference speed for \\( f \\)-DM-\\{DS, VQVAE, VQGAN\\} due to the nature of transformations. As a comparison, having fewer timesteps (DDPM 1/2) greatly hurts the generation quality of DDPM. We also show comparisons with transformation-specific baselines on FFHQ & AFHQ." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "v.s. Cascaded DMs. Although cascaded DMs have been shown effective in literature (Nichol & Dhariwal, 2021; Ho et al., 2022a), it is underexplored to apply cascades in a sequence of consecu" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.151, + 0.197, + 0.16 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.202, + 0.1, + 0.541, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.1, + 0.819, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.374, + 0.825, + 0.432 + ], + "angle": 0, + "content": "Figure 5: Random DDIM samples \\((\\eta = 0)\\) from (a) \\(f\\)-DMs on AFHQ and LSUN-Church by given {downsampled, blurred, latent} images as conditions; (b) \\(f\\)-DM-VQVAE by interpolating the initial noise of the latent stage; (c) \\(f\\)-DM-DS starting from the same initial noise of the \\(16 \\times 16\\) stage. For (c), we also show the \"mean image\" of 300 random samples using the same initial noise." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.438, + 0.825, + 0.537 + ], + "angle": 0, + "content": "tive resolutions \\((16\\to 32\\to 64\\to \\ldots)\\) like ours. In such cases, the prediction errors get easily accumulated during the generation, yielding serious artifacts in the final resolution. To ease this, Cascaded DM (Ho et al., 2022a) proposed to apply \"noise conditioning augmentation\" which reduced the domain gap between stages by adding random noise to the input condition. However, it is not straightforward to tune the noise level for both training and inference time. By contrast, \\(f\\) -DM is by-design non-cascaded, and there are no domain gaps between stages. That is, we can train our model end-to-end without worrying the additional tuning parameters and achieve stable results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.543, + 0.825, + 0.655 + ], + "angle": 0, + "content": "v.s. LDMs. We show comparisons with LDMs (Rombach et al., 2021) in Table 1. LDMs generate more efficiently as the diffusion only happens in the latent space. However, the generation is heavily biased by the behavior of the fixed decoder. For instance, it is challenging for VQVAE decoders to synthesize sharp images, which causes low scores in Table 1. However, LDM with VQGAN decoders is able to generate sharp details, which are typically favored by InceptionV3 (Szegedy et al., 2016) used in FID and PR. Therefore, despite having artifacts (see Figure 4, below, rightmost) in the output, LDMs (GAN) still obtain good scores. In contrast, \\( f \\)-DM, as a pure DM, naturally bridges the latent and image spaces, where the generation is not restricted by the decoder." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.661, + 0.825, + 0.732 + ], + "angle": 0, + "content": "v.s. Blurring DMs. Table 1 compares with a recently proposed blurring-based method (IHDM, Rissanen et al., 2022). Different from our approach, IHDM formulates a fully deterministic forward process. We conjecture the lack of randomness is the cause of their poor generation quality. Instead, \\( f \\)-DM proposes a natural way of incorporating blurring with stochastic noise, yielding better quantitative and qualitative results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.738, + 0.825, + 0.85 + ], + "angle": 0, + "content": "Conditional Generation. In Figure 5(a), we demonstrate the example of using pre-trained \\( f \\)-DMs to perform conditional generation based on learned transformations. We downsample and blur the sampled real images, and start the reverse diffusion following Section 3.1 with \\( f \\)-DM-DS and -Blur-U, respectively. Despite the difference in fine details, both our models faithfully generate high-fidelity outputs close to the real images. The same algorithm is applied to the extracted latent representations. Compared with the original VQVAE output, \\( f \\)-DM-VQVAE is able to obtain better reconstruction. We provide additional conditional generation samples with the ablation of the \"gradient-based\" initialization method in Appendix E.3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Latent Space Manipulation To demonstrate \\( f \\)-DMs have learned certain abstract representations by modeling with signal transformation, we show results of latent manipulation in Figure 5. Here we assume DDIM sampling (\\( \\eta = 0 \\)), and the only stochasticity comes from the initially sampled noise \\( \\epsilon_{\\mathrm{full}} \\). In (b), we obtain a semantically smooth transition between two cat faces when linearly" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "interpolating the low-resolution noises; on the other hand, we show samples of the same identity with different fine details (e.g., expression, poses) in (c), which is achieved easily by sampling \\( f \\)-DM-DS with the low-resolution (\\( 16^2 \\)) noise fixed. This implies that \\( f \\)-DM is able to allocate high-level and fine-grained information in different stages via learning with downsampling." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.178, + 0.356, + 0.192 + ], + "angle": 0, + "content": "4.3 ABLATION STUDIES" + }, + { + "type": "table_caption", + "bbox": [ + 0.179, + 0.208, + 0.816, + 0.224 + ], + "angle": 0, + "content": "Table 2: Ablation of design choices for \\( f \\) -DMs trained on FFHQ. All faces are not real identities." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.23, + 0.588, + 0.367 + ], + "angle": 0, + "content": "
ModelEq. 4RescaleStagesFID↓P↑R↑
f-DM-DSNoVPcosine26.50.700.25
YesNocosine14.50.730.43
YesSPcosine12.10.750.47
YesVPlinear13.50.730.46
YesVPcosine10.80.740.50
f-DM-VQVAEYesNolinear24.00.790.29
YesVPcosine13.80.780.45
YesVPlinear12.70.770.47
" + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.238, + 0.808, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.632, + 0.295, + 0.768, + 0.303 + ], + "angle": 0, + "content": "(a) without interpolation (Eq.4)" + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.303, + 0.808, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.637, + 0.358, + 0.762, + 0.366 + ], + "angle": 0, + "content": "(b) with interpolation (Eq.4)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.5 + ], + "angle": 0, + "content": "Table 2 presents the ablation of the key design choices. As expected, the interpolation formulation (Equation 4) effectively bridges the information gap between stages, without which the prediction errors get accumulated, resulting in blurry outputs and bad scores. Table 2 also demonstrates the importance of applying correct scaling. For both models, rescaling improves the FID and recall by large margins, where SP works slightly worse than VP. In addition, we also empirically explore the difference of stage schedules. Compared to VAE-based models, we usually have more stages in DS/Blur-based models to generate high-resolution images. The cosine schedule helps diffusion move faster in regions with low information density (e.g., low-resolution, heavily blurred)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.519, + 0.347, + 0.535 + ], + "angle": 0, + "content": "5 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Progressive Generation with DMs. Conventional DMs generate images in the same resolutions. Therefore, existing work generally adopt cascaded approaches (Nichol & Dhariwal, 2021; Ho et al., 2022a; Sahara et al., 2022a) that chains a series of conditional DMs to generate coarse-to-fine, and have been used in super-resolution (SR3, Sahara et al., 2022b). However, cascaded models tend to suffer error propagation problems. More recently, Ryu & Ye (2022) dropped the need of conditioning, and proposed to generate images in a pyramidal fashion with additional reconstruction guidance; Jing et al. (2022) explored learning subspace DMs and connecting the full space with Langevin dynamics. By contrast, the proposed \\( f \\)-DM is distinct from all the above types, which only requires one diffusion process, and the images get naturally up-sampled through reverse diffusion." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.683, + 0.825, + 0.755 + ], + "angle": 0, + "content": "Blurring DMs. Several concurrent research (Rissanen et al., 2022; Daras et al., 2022; Lee et al., 2022) have recently looked into DM alternatives to combine blurring into diffusion process, some of which also showed the possibility of deterministic generation (Bansal et al., 2022). Although sharing similarities, our work starts from a different view based on signal transformation. Furthermore, our empirical results also show that stochasticity plays a critical role in high-quality generation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.759, + 0.825, + 0.817 + ], + "angle": 0, + "content": "Latent Space DMs. Existing work also investigated combining DMs with standard latent variable models. To our best knowledge, most of these works adopt DMs for learning the prior of latent space, where sampling is followed by a pre-trained (Rombach et al., 2021) or jointly optimized (Vahdat et al., 2021) decoder. Conversely, \\( f \\)-DM does not rely on the quality decoder." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.837, + 0.32, + 0.852 + ], + "angle": 0, + "content": "6 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We proposed \\( f \\)-DM, a generalized family of diffusion models that enables generation with signal transformations. As a demonstration, we apply \\( f \\)-DM to image generation tasks with a range of transformations, including downsampling, blurring and VAEs, where \\( f \\)-DMs outperform the baselines in terms of synthesis quality and semantic interpretation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.347, + 0.119 + ], + "angle": 0, + "content": "ETHICS STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.205 + ], + "angle": 0, + "content": "Our work focuses on technical development, i.e., synthesizing high-quality images with a range of signal transformations (e.g., downsampling, blurring). Our approach has various applications, such as movie post-production, gaming, helping artists reduce workload, and generating synthetic data as training data for other computer vision tasks. Our approach can be used to synthesize human-related images (e.g., faces), and it is not biased towards any specific gender, race, region, or social class." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.211, + 0.828, + 0.324 + ], + "angle": 0, + "content": "However, the ability of generative models, including our approach, to generate high-quality images that are indistinguishable from real images, raises concerns about the misuse of these methods, e.g., generating fake images. To resolve these concerns, we need to mark all the generated results as \"synthetic\". In addition, we believe it is crucial to have authenticity assessment, such as fake image detection and identity verification, which will alleviate the potential for misuse. We hope our approach can be used to foster the development of technologies for authenticity assessment. Finally, we believe that creating a set of appropriate regulations and laws would significantly reduce the risks of misuse while bolstering positive effects on technology development." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.343, + 0.441, + 0.358 + ], + "angle": 0, + "content": "REPRODUCIBILITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.374, + 0.826, + 0.418 + ], + "angle": 0, + "content": "We assure that all the results shown in the paper and supplemental materials can be reproduced. We believe we have provided enough implementation details in the paper and supplemental materials for the readers to reproduce the results." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.437, + 0.289, + 0.451 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.459, + 0.826, + 0.504 + ], + "angle": 0, + "content": "Arpit Bansal, Eitan Borgnia, Hong-Min Chu, Jie S Li, Hamid Kazemi, Furong Huang, Micah Goldblum, Jonas Geiping, and Tom Goldstein. Cold diffusion: Inverting arbitrary image transforms without noise. arXiv preprint arXiv:2208.09392, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.51, + 0.825, + 0.542 + ], + "angle": 0, + "content": "Christopher M Bishop and Nasser M Nasrabadi. Pattern recognition and machine learning, volume 4. Springer, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.548, + 0.827, + 0.593 + ], + "angle": 0, + "content": "Jooyoung Choi, Jungbeom Lee, Chaehun Shin, Sungwon Kim, Hyunwoo Kim, and Sungroh Yoon. Perception prioritized training of diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11472-11481, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.599, + 0.827, + 0.644 + ], + "angle": 0, + "content": "Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8188-8197, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.651, + 0.826, + 0.695 + ], + "angle": 0, + "content": "Giannis Daras, Maurizio Delbracio, Hossein Talebi, Alexandros G. Dimakis, and Peyman Milanfar. Soft diffusion: Score matching for general corruptions, 2022. URL https://arxiv.org/abs/2209.05442." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.702, + 0.825, + 0.747 + ], + "angle": 0, + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255, 2009. doi: 10.1109/CVPR.2009.5206848." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.754, + 0.825, + 0.786 + ], + "angle": 0, + "content": "Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.792, + 0.825, + 0.823 + ], + "angle": 0, + "content": "Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using real nvp. arXiv preprint arXiv:1605.08803, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.83, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 12873-12883, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.881, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.459, + 0.827, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.104, + 0.826, + 0.148 + ], + "angle": 0, + "content": "in Neural Information Processing Systems, volume 27, pp. 2672-2680. Curran Associates, Inc., 2014. URL https://proceedings.neurips.cc/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.176, + 0.155, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.207, + 0.826, + 0.237 + ], + "angle": 0, + "content": "Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.245, + 0.825, + 0.274 + ], + "angle": 0, + "content": "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems, 33:6840-6851, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.283, + 0.825, + 0.325 + ], + "angle": 0, + "content": "Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. *J. Mach. Learn. Res.*, 23: 47-1, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.335, + 0.825, + 0.377 + ], + "angle": 0, + "content": "Jonathan Ho, Tim Salimans, Alexey A Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. In ICLR Workshop on Deep Generative Models for Highly Structured Data, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.386, + 0.824, + 0.416 + ], + "angle": 0, + "content": "Emiel Hoogeboom and Tim Salimans. Blurring diffusion models, 2022. URL https://arxiv.org/abs/2209.05557." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.424, + 0.825, + 0.453 + ], + "angle": 0, + "content": "Bowen Jing, Gabriele Corso, Renato Berlinghieri, and Tommi Jaakkola. Subspace diffusion generative models. arXiv preprint arXiv:2205.01490, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.462, + 0.826, + 0.506 + ], + "angle": 0, + "content": "Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401-4410, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.513, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. arXiv preprint arXiv:2106.12423, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.552, + 0.825, + 0.581 + ], + "angle": 0, + "content": "Diederik Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. Advances in neural information processing systems, 34:21696-21707, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.589, + 0.825, + 0.618 + ], + "angle": 0, + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.627, + 0.825, + 0.656 + ], + "angle": 0, + "content": "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.665, + 0.825, + 0.708 + ], + "angle": 0, + "content": "Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.716, + 0.825, + 0.747 + ], + "angle": 0, + "content": "Sangyun Lee, Hyungjin Chung, Jaehyeon Kim, and Jong Chul Ye. Progressive deblurring of diffusion models for coarse-to-fine image synthesis. arXiv preprint arXiv:2207.11192, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.754, + 0.825, + 0.784 + ], + "angle": 0, + "content": "Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, pp. 8162-8171. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.792, + 0.825, + 0.835 + ], + "angle": 0, + "content": "Vadim Popov, Ivan Vovk, Vladimir Gogoryan, Tasnama Sadekova, and Mikhail Kudinov. Grads- tts: A diffusion probabilistic model for text-to-speech. In International Conference on Machine Learning, pp. 8599-8608. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.844, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion autoencoders: Toward a meaningful and decodable representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10619-10629, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Ali Razavi, Aaron Van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.825, + 0.172 + ], + "angle": 0, + "content": "Severi Rissanen, Markus Heinonen, and Arno Solin. Generative modelling with inverse heat dissipation. arXiv preprint arXiv:2206.13397, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.178, + 0.825, + 0.209 + ], + "angle": 0, + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.216, + 0.825, + 0.26 + ], + "angle": 0, + "content": "Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pp. 234-241. Springer, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.268, + 0.825, + 0.297 + ], + "angle": 0, + "content": "Dohoon Ryu and Jong Chul Ye. Pyramidal denoising diffusion probabilistic models. arXiv preprint arXiv:2208.01864, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.306, + 0.825, + 0.362 + ], + "angle": 0, + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.371, + 0.825, + 0.414 + ], + "angle": 0, + "content": "Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image super-resolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.422, + 0.825, + 0.453 + ], + "angle": 0, + "content": "Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.46, + 0.825, + 0.504 + ], + "angle": 0, + "content": "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.512, + 0.825, + 0.542 + ], + "angle": 0, + "content": "Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.55, + 0.825, + 0.581 + ], + "angle": 0, + "content": "Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.588, + 0.825, + 0.631 + ], + "angle": 0, + "content": "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.64, + 0.825, + 0.683 + ], + "angle": 0, + "content": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2818-2826, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.691, + 0.825, + 0.721 + ], + "angle": 0, + "content": "Arash Vahdat and Jan Kautz. Nvae: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.728, + 0.825, + 0.758 + ], + "angle": 0, + "content": "Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Neural Information Processing Systems (NeurIPS), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.766, + 0.825, + 0.797 + ], + "angle": 0, + "content": "Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.804, + 0.825, + 0.848 + ], + "angle": 0, + "content": "Fisher Yu, Ari Seff, Yinda Zhang, Shuran Song, Thomas Funkhouser, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365, 2015." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.848 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.102, + 0.283, + 0.119 + ], + "angle": 0, + "content": "APPENDIX" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.138, + 0.502, + 0.156 + ], + "angle": 0, + "content": "A DETAILED DERIVATION OF \\(f\\)-DMS" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.171, + 0.292, + 0.187 + ], + "angle": 0, + "content": "A.1 \\(q(\\mathbf{z}_t|\\mathbf{z}_s,\\mathbf{x})\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.198, + 0.825, + 0.226 + ], + "angle": 0, + "content": "We derive the definition in Equation 5 with the change-of-variable trick given the fact that \\( \\pmb{x}_t, \\pmb{x}_s \\) and \\( \\pmb{x}^k \\) are all deterministic functions of \\( \\pmb{x} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.233, + 0.826, + 0.29 + ], + "angle": 0, + "content": "More precisely, suppose \\( \\boldsymbol{z}_t \\sim \\mathcal{N}(\\alpha_t \\boldsymbol{x}_t, \\sigma_t^2 I), \\boldsymbol{z}_s \\sim \\mathcal{N}(\\alpha_s \\boldsymbol{x}_s, \\sigma_s^2 I) \\), where \\( \\tau_k \\leq s < t < \\tau_{k+1} \\). Thus, it is equivalent to have \\( \\boldsymbol{u}_t \\sim \\mathcal{N}(\\alpha_t \\boldsymbol{x}^k, \\sigma_t^2 I), \\boldsymbol{u}_s \\sim \\mathcal{N}(\\alpha_s \\boldsymbol{x}^k, \\sigma_s^2 I), \\boldsymbol{u}_t = \\boldsymbol{z}_t - \\alpha_t (\\boldsymbol{x}_t - \\boldsymbol{x}^k), \\boldsymbol{u}_s = \\boldsymbol{z}_s - \\alpha_s (\\boldsymbol{x}_s - \\boldsymbol{x}^k) \\). From the above definition, it is reasonable to assume \\( \\boldsymbol{u}_t, \\boldsymbol{u}_s \\) follow the standard DM transitionm which means that:" + }, + { + "type": "equation", + "bbox": [ + 0.259, + 0.298, + 0.74, + 0.354 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\boldsymbol {u} _ {t} = \\alpha_ {t | s} \\boldsymbol {u} _ {s} + \\sigma_ {t | s} \\epsilon , \\epsilon \\sim \\mathcal {N} (\\boldsymbol {0}, I) \\\\ \\Rightarrow \\boldsymbol {z} _ {t} - \\alpha_ {t} \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} ^ {k}\\right) = \\alpha_ {t | s} \\left(\\boldsymbol {z} _ {s} - \\alpha_ {s} \\left(\\boldsymbol {x} _ {s} - \\boldsymbol {x} ^ {k}\\right)\\right) + \\sigma_ {t | s} \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\mathbf {0}, I) \\\\ \\Rightarrow \\quad \\boldsymbol {z} _ {t} = \\alpha_ {t | s} \\boldsymbol {z} _ {s} + \\alpha_ {t} \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} _ {s}\\right) + \\sigma_ {t | s} \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\boldsymbol {0}, I) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.365, + 0.825, + 0.395 + ], + "angle": 0, + "content": "As typically \\( \\pmb{x}_t \\neq \\pmb{x}_s \\) and both \\( \\pmb{x}_t, \\pmb{x}_s \\) are the functions of \\( \\pmb{x}^k \\). Then \\( z_t \\) is dependent on both \\( \\pmb{z}_s \\) and \\( \\pmb{x}^k = f_{0:k}(\\pmb{x}) \\), resulting in a non-Markovian transition:" + }, + { + "type": "equation", + "bbox": [ + 0.319, + 0.404, + 0.677, + 0.423 + ], + "angle": 0, + "content": "\\[\nq (\\pmb {z} _ {t} | \\pmb {z} _ {s}, \\pmb {x}) = \\mathcal {N} (\\pmb {z} _ {t}; \\alpha_ {t | s} \\pmb {z} _ {s} + \\alpha_ {t} \\cdot (\\pmb {x} _ {t} - \\pmb {x} _ {s}), \\sigma_ {t | s} ^ {2} I),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.825, + 0.462 + ], + "angle": 0, + "content": "Note that, this equation stands only when \\( \\boldsymbol{x}_t, \\boldsymbol{x}_s \\) and \\( \\boldsymbol{x}_k \\) in the same space, and we did not make specific assumptions to the form of \\( \\boldsymbol{x}_t \\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.481, + 0.291, + 0.497 + ], + "angle": 0, + "content": "A.2 \\(q(\\mathbf{z}_s|\\mathbf{z}_t,\\mathbf{x})\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.508, + 0.825, + 0.552 + ], + "angle": 0, + "content": "The reverse diffusion distribution follows the Bayes' Theorem: \\( q(\\pmb{z}_s|\\pmb{z}_t, \\pmb{x}) \\propto q(\\pmb{z}_s|\\pmb{x})q(\\pmb{z}_t|\\pmb{z}_s, \\pmb{x}) \\) where both \\( q(\\pmb{z}_s|\\pmb{x}) \\) and \\( q(\\pmb{z}_t|\\pmb{z}_s, \\pmb{x}) \\) are Gaussian distributions with general forms of \\( \\mathcal{N}(\\pmb{z}_s|\\pmb{\\mu}, \\sigma^2 I) \\) and \\( \\mathcal{N}(\\pmb{z}_t|A\\pmb{z}_s + \\pmb{b}, \\sigma'^2 I) \\), respectively. Based on Bishop & Nasrabadi (2006) (2.116), we can derive:" + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.561, + 0.7, + 0.58 + ], + "angle": 0, + "content": "\\[\nq \\left(\\boldsymbol {z} _ {s} \\mid \\boldsymbol {z} _ {t}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {s} \\mid \\bar {\\sigma} ^ {- 2} \\left(\\sigma^ {\\prime - 2} A ^ {\\top} \\left(\\boldsymbol {z} _ {t} - \\boldsymbol {b}\\right) + \\sigma^ {- 2} \\boldsymbol {\\mu}\\right), \\bar {\\sigma} ^ {2} I\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.589, + 0.825, + 0.621 + ], + "angle": 0, + "content": "where \\(\\bar{\\sigma}^2 = (\\sigma^{-2} + \\sigma'^{-2}\\| A\\| ^2)^{-1}\\). Therefore, we can get the exact form by plugging our variables \\(\\pmb {\\mu} = \\alpha_{s}\\hat{\\pmb{x}}_{k}^{s}\\), \\(\\sigma = \\sigma_s\\), \\(A = \\alpha_{t|s}I\\), \\(\\pmb {b} = \\alpha_{t}\\cdot (\\pmb {x}_{t} - \\pmb {x}_{s})\\), \\(\\sigma^{\\prime} = \\sigma_{t|s}\\) into above equation, we get:" + }, + { + "type": "equation", + "bbox": [ + 0.334, + 0.631, + 0.662, + 0.65 + ], + "angle": 0, + "content": "\\[\nq (\\pmb {z} _ {s} | \\pmb {z} _ {t}, \\pmb {x}) = \\mathcal {N} (\\pmb {z} _ {s} | \\alpha_ {s} \\pmb {x} _ {s} + \\sqrt {\\sigma_ {s} ^ {2} - \\bar {\\sigma} ^ {2}} \\pmb {\\epsilon} _ {t}, \\bar {\\sigma} ^ {2} I),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.658, + 0.492, + 0.675 + ], + "angle": 0, + "content": "where \\(\\epsilon_{t} = (z_{t} - \\alpha_{t}\\pmb{x}_{t}) / \\sigma_{t}\\) and \\(\\bar{\\sigma} = \\sigma_s\\sigma_{t|s} / \\sigma_t\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.68, + 0.826, + 0.739 + ], + "angle": 0, + "content": "Alternatively, if we assume \\( \\boldsymbol{x}_t \\) take the interpolation formulation in Equation 4, we can also re-write \\( \\boldsymbol{x}_s \\) with \\( \\boldsymbol{x}_t + \\frac{t - s}{t - \\tau_k} \\delta_t \\), where we define a new variable \\( \\delta_t = \\boldsymbol{x}^k - \\boldsymbol{x}_t \\). As stated in the main context (Section 3.1), such change makes \\( q(\\boldsymbol{z}_t | \\boldsymbol{z}_s, \\boldsymbol{x}) \\) avoid computing \\( \\boldsymbol{x}_s \\) which may be potentially costly. In this way, we re-write the above equation as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.75, + 0.825, + 0.768 + ], + "angle": 0, + "content": "\\[\nq \\left(\\boldsymbol {z} _ {s} \\mid \\boldsymbol {z} _ {t}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {s} \\mid , \\alpha_ {s} \\left(\\boldsymbol {x} _ {t} + \\boldsymbol {\\delta} _ {t} \\cdot (t - s) / (t - \\tau_ {k})\\right) + \\sqrt {\\sigma_ {s} ^ {2} - \\bar {\\sigma} ^ {2}} \\boldsymbol {\\epsilon} _ {t}, \\bar {\\sigma} ^ {2} I\\right), \\tag {10}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.786, + 0.405, + 0.8 + ], + "angle": 0, + "content": "A.3 DIFFUSION INSIDE STAGES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.826, + 0.926 + ], + "angle": 0, + "content": "In the inference time, we generate data by iteratively sampling from the conditional distribution \\( p(\\pmb{z}_s|\\pmb{z}_t) = \\mathbb{E}_{\\pmb{x}}[q(\\pmb{z}_s|\\pmb{z}_t,\\pmb{x})] \\) based on Equation 10. In practice, the expectation over \\( \\pmb{x} \\) is approximated by our model's prediction. As shown in Equation 9, in this work, we propose a \"double-prediction\" network \\( \\theta \\) that reads \\( \\pmb{z}_t \\), and simultaneously predicts \\( \\pmb{x}_t \\) and \\( \\delta_t \\) with \\( \\pmb{x}_{\\theta} \\) and \\( \\delta_{\\theta} \\), respectively. The predicted Gaussian noise is denoted as \\( \\epsilon_{\\theta} = (z_{t} - \\alpha_{t}\\pmb{x}_{\\theta}) / \\sigma_{t} \\). Note that the prediction \\( x_{\\theta} \\) and \\( \\epsilon_{\\theta} \\) are interchangeable, which means that we can readily derive one from the other's prediction. Therefore, by replacing \\( x_{t}, \\delta_{t}, \\epsilon_{t} \\), with \\( x_{\\theta}, \\delta_{\\theta}, \\epsilon_{\\theta} \\) in Equation 10, we obtain the sampling algorithm shown in Algorithm 1: Line 6." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.103, + 0.493, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.102, + 0.822, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.268, + 0.825, + 0.311 + ], + "angle": 0, + "content": "Figure 7: Illustration of noise schedule \\((\\alpha_{t}\\) and \\(\\sigma_{t})\\) for \\(f\\)-DM-DS models with 5 stages \\((16^{2} \\rightarrow 256^{2})\\). We use the standard cosine noise schedule \\(\\alpha_{t} = \\cos(0.5\\pi t)\\). We also show the difference between the linear/cosine stage schedule, as well as the proposed SP/VP re-scaling methods." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.32, + 0.382, + 0.332 + ], + "angle": 0, + "content": "A.4 NOISE AT BOUNDARIES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.345, + 0.825, + 0.403 + ], + "angle": 0, + "content": "In this paper, the overall principle is to handle the transition across stage boundary is to ensure the forward diffusion to be deterministic and smooth, therefore almost no information is lost during the stage change. Such requirement is important as it directly correlated to the denoising performance. Failing to recover the lost information will directly affect the diversity of the model generates." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.418, + 0.486, + 0.488 + ], + "angle": 0, + "content": "Forward diffusion As described in Section 3.1, since we have the control of the signal and the noise separately, we can directly apply the deterministic transformation on the signal, and dropping noise elements." + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.424, + 0.82, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.493, + 0.813, + 0.509 + ], + "angle": 0, + "content": "Figure 6: Two naive ways for down-sampling." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.494, + 0.486, + 0.523 + ], + "angle": 0, + "content": "Alternatively, we also implemented a different \\(\\zeta (\\epsilon)\\) based on averaging. As shown in Figure 6," + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.523, + 0.825, + 0.564 + ], + "angle": 0, + "content": "if the transformation is down-sampling, we can use the fact that the mean of Gaussian noises is still Gaussian with lower variance: \\((\\epsilon_0 + \\epsilon_1 + \\epsilon_2 + \\epsilon_3) / 4 \\sim \\mathcal{N}(0, \\frac{1}{4} I)\\). Therefore, \\(\\times 2\\) rescaling is needed on the resulted noise." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.581, + 0.825, + 0.653 + ], + "angle": 0, + "content": "Reverse diffusion Similarly, we can also define the reverse process if \\(\\zeta\\) is chosen to be averaging. Different from \"dropping\" where the reverse process is simply adding independent Gaussian noises, the reverse of \"averaging\" requests to sample \\(\\sum_{i=0}^{3} \\epsilon_i = 2\\epsilon\\) given the input noise \\(\\epsilon\\), while having \\(p(\\epsilon_i) = \\mathcal{N}(0, I)\\), \\(i = 0,1,2,3\\). Such problem has a closed solution and can be implemented in an autoregressive fashion:" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.664, + 0.673, + 0.758 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} a = 2 \\epsilon ; \\\\ \\boldsymbol {\\epsilon} _ {0} = \\boldsymbol {a} / 4 + \\sqrt {3 / 4} \\cdot \\hat {\\epsilon} _ {1}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {0}, \\hat {\\epsilon} _ {1} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\boldsymbol {\\epsilon} _ {1} = \\boldsymbol {a} / 3 + \\sqrt {2 / 3} \\cdot \\hat {\\epsilon} _ {2}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {1}, \\hat {\\epsilon} _ {2} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\boldsymbol {\\epsilon} _ {2} = \\boldsymbol {a} / 2 + \\sqrt {1 / 2} \\cdot \\hat {\\epsilon} _ {3}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {2}, \\hat {\\epsilon} _ {3} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\epsilon_ {3} = a \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.768, + 0.825, + 0.825 + ], + "angle": 0, + "content": "Similar to the case of \"dropping\", we also need 3 additional samples \\(\\hat{\\epsilon}_{1:3}\\) to contribute to four noises, therefore it can be implemented in the same way as described in Section 3.1. Empirically, reversing the \"averaging\" steps tends to produce samples with better FID scores. However, since it introduces correlations into the added noise, which may cause undesired biases especially in DDIM sampling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Intuition behind Re-scaling Here we present a simple justification of applying noise rescaling. Suppose the signal dimensionality changes from \\( M_{k-1} \\) to \\( M_k \\) when crossing the stage boundary, and such change is caused by different sampling rates. Based the proposed resolution-agnostic SNR (Equation 7), the number of sampled points inside \\( \\Omega \\) is proportional to its dimensionality. Generally, it is safe to assume signals are mostly low-frequency. Therefore, averaging signals will not change its variance. By contrast, as shown above, averaging Gaussian noises results in lower variance, where" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.103, + 0.825, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.302, + 0.289, + 0.696, + 0.306 + ], + "angle": 0, + "content": "Figure 8: We show the comparison of the DDIM sampling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.827, + 0.343 + ], + "angle": 0, + "content": "in our case, the variance is proportional to \\( M^{-1} \\). Therefore, suppose the signal magnitude does not change, we can get the re-scaling low by forcing \\( \\mathrm{SNR}(z_{\\tau}) = \\mathrm{SNR}(z_{\\tau^{-}}) \\) at the stage boundary:" + }, + { + "type": "equation", + "bbox": [ + 0.41, + 0.35, + 0.587, + 0.369 + ], + "angle": 0, + "content": "\\[\n\\sigma_ {\\tau^ {-}} ^ {2} \\cdot M _ {k - 1} ^ {- 1} = \\sigma_ {\\tau} ^ {2} \\cdot M _ {k} ^ {- 1},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.376, + 0.825, + 0.406 + ], + "angle": 0, + "content": "which derives the signal preserving (SP) rescaling in Equation 8. In Figure 7, we show an example of the change of \\(\\alpha\\) and \\(\\sigma\\) with and without applying the re-scaling technique for \\(f\\)-DM-DS models." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.423, + 0.345, + 0.436 + ], + "angle": 0, + "content": "A.5 DDIM SAMPLING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.45, + 0.825, + 0.522 + ], + "angle": 0, + "content": "The above derivations only describe the standard ancestral sampling \\((\\eta = 1)\\) where \\(q(\\pmb{z}_s|\\pmb{z}_t,\\pmb{x})\\) is determined by Bayes' Theorem. Optionally, one can arbitrarily define any proper reverse diffusion distribution as long as the marginal distributions match the definition. For example, \\(f\\)-DM can also perform deterministic DDIM (Song et al., 2021a) by setting \\(\\eta = 0\\) in Algorithm 1. Similar to Song et al. (2021a), we can also obtain the proof based on the induction argument." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.527, + 0.827, + 0.612 + ], + "angle": 0, + "content": "Figure 8 shows the comparison of DDIM sampling between the standard DMs and the proposed \\( f \\)-DM. In DDIM sampling \\( (\\eta = 0) \\), the only randomness comes from the initial noise at \\( t = 1 \\). Due to the proposed noise resampling technique, \\( f \\)-DM enables a multi-scale noisng process where the sampled noises are split and sent to different steps of the diffusion process. In this case, compared to standard DMs, we gain the ability of controlling image generation at different levels, resulting in smooth semantic interpretation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.633, + 0.624, + 0.648 + ], + "angle": 0, + "content": "B DETAILED INFORMATION OF TRANSFORMATIONS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.665, + 0.697, + 0.681 + ], + "angle": 0, + "content": "We show the difference of all the transformations used in this paper in Figure 9." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.699, + 0.338, + 0.712 + ], + "angle": 0, + "content": "B.1 DOWNSAMPLING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.725, + 0.825, + 0.81 + ], + "angle": 0, + "content": "In early development of this work, we explored various combinations of performing down-sampling: \\( \\pmb{f} = \\{\\text{bilinear, nearest, Gaussian blur + subsample}\\} \\), \\( \\pmb{g} = \\{\\text{bilinear, bicubic, nearest, neural-based}\\} \\). While all these combinations produced similar results, we empirically on FFHQ found that both choosing bilinear interpolation for both \\( \\pmb{f}, \\pmb{g} \\) achieves most stable results. Therefore, all the main experiments of \\( f \\)-DM-DS are conducted on bilinear interpolation. As discussed in Section 3.2, we choose \\( K = 4 \\), which progressively downsample a \\( 256^2 \\) into \\( 16^2 \\)." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.828, + 0.295, + 0.842 + ], + "angle": 0, + "content": "B.2 BLURRING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We experimented two types of blurring functions. For upsampling-based blurring, we use the same number of stages as the downsampling case; for Gaussian-based blurring, we adopt \\( K = 7 \\) with corresponding kernel sizes \\( \\sigma_{B} = 15\\sin^{2}\\left(\\frac{\\pi}{2}\\tau_{k}\\right) \\), where \\( \\tau_{k} \\) follows the cosine stage schedule. In practice, we implement blurring function in frequency domain following Rissanen et al. (2022) based on discrete cosine transform (DCT)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.185, + 0.109, + 0.581, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.345, + 0.172, + 0.421, + 0.183 + ], + "angle": 0, + "content": "Downsample" + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.109, + 0.812, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.71, + 0.172, + 0.756, + 0.182 + ], + "angle": 0, + "content": "VQ-VAE" + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.184, + 0.58, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.341, + 0.247, + 0.424, + 0.258 + ], + "angle": 0, + "content": "Updown Blur" + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.184, + 0.812, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.71, + 0.248, + 0.756, + 0.258 + ], + "angle": 0, + "content": "VQ-GAN" + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.26, + 0.812, + 0.321 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.451, + 0.322, + 0.548, + 0.332 + ], + "angle": 0, + "content": "Gaussian Blur" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.348, + 0.825, + 0.391 + ], + "angle": 0, + "content": "Figure 9: We show examples of the five transformations (downsample, blur, VAEs) used in this paper. For downsampling, we resize the image with nearest upsampler; for VQ-VAE/VQ-GAN, we visualize the first 3 channels of the latent feature maps." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.398, + 0.262, + 0.412 + ], + "angle": 0, + "content": "B.3 VAES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.425, + 0.825, + 0.495 + ], + "angle": 0, + "content": "In this paper, we only consider vector quantized (VQ) models with single layer latent space, while our methods can be readily applied to hierarchical (Razavi et al., 2019) and KL-regularized VAE models (Vahdat & Kautz, 2020). Following Rombach et al. (2021), we take the feature vectors before the quantization layers as the latent space, and keep the quantization step in the decoder \\((g)\\) when training diffusion models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.501, + 0.825, + 0.612 + ], + "angle": 0, + "content": "We follow an open-sourced implementation \\( ^2 \\) to train our VQVAE model on ImageNet. The model consists of two strided convolution blocks which by default downsamples the input image by a factor of 8. We use the default hyper-parameters and train the model for 50 epochs with a batch-size of 128. For a fair comparison to match the latent size of VQVAE, we use the pre-trained autoencoding model (Rombach et al., 2021) with the setting of \\( \\{f = 8, \\mathrm{VQ}(\\mathrm{Z} = 256, \\mathrm{d} = 4)\\} \\). We directly use the checkpoint \\( ^3 \\) provided by the authors. Note that the above setting is not the best performing model (LDM-4) in the original paper. Therefore, it generates more artifacts when reconstructing images from the latents." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.62, + 0.827, + 0.65 + ], + "angle": 0, + "content": "Before training, we compute the signal magnitude ratio \\(\\gamma_{k}\\) (Equation 8) over the entire training set of FFHQ, where we empirically set \\(\\gamma_{k} = 2.77\\) for VQ-GAN and \\(\\gamma_{k} = 2.0\\) for VQ-VAE, respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.669, + 0.367, + 0.685 + ], + "angle": 0, + "content": "C DATASET DETAILS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.7, + 0.825, + 0.73 + ], + "angle": 0, + "content": "FFHQ (https://github.com/NVlabs/ffhq-dataset) contains 70k images of real human faces in resolution of \\(1024^2\\). For most of our experiments, we resize the images to \\(256^2\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.825, + 0.801 + ], + "angle": 0, + "content": "AFHQ (https://github.com/clovaai/stargan-v2# animal-faces-hq-dataset-afhq) contains 15k images of animal faces including cat, dog and wild three categories in resolution of \\(512^{2}\\). We train conditional diffusion models by merging all training images with the label information. All images are resized to \\(256^{2}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.816, + 0.825, + 0.887 + ], + "angle": 0, + "content": "LSUN (https://www.yf.io/p/1sun) is a collection of large-scale image dataset containing 10 scenes and 20 object categories. Following previous works Rombach et al. (2021), we choose the two categories – Church (126k images) and Bed (3M images), and train separate unconditional models on them. As LSUN-Bed is relatively larger, we set the iterations longer than other datasets. All images are resized to \\(256^2\\) with center-crop." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.896, + 0.61, + 0.911 + ], + "angle": 0, + "content": "\\(^{2}\\)https://github.com/rosinality/vq-vae-2-pytorch" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.911, + 0.726, + 0.925 + ], + "angle": 0, + "content": "3https://ommer-lab.com/files/latent-diffusion/vq-f8-n256.zip" + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.896, + 0.726, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.105, + 0.818, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.333, + 0.272, + 0.663, + 0.288 + ], + "angle": 0, + "content": "Figure 10: An illustration of the training pipeline." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.289, + 0.825, + 0.374 + ], + "angle": 0, + "content": "ImageNet (https://image-net.org/download.php) we use the standard ImageNet-1K dataset which contains 1.28M images across 1000 classes. We directly merge all the training images with class-labels. All images are resized to \\(256^2\\) with center-crop. For both \\(f\\)-DM and the baseline models, we adopt the classifier-free guidance (Ho & Salimans, 2022) with the unconditional probability 0.2. In the inference time, we use the guidance scale \\((s = 2)\\) for computing FIDs, and \\(s = 3\\) to synthesize examples for comparison." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.405, + 0.441, + 0.421 + ], + "angle": 0, + "content": "D IMPLEMENTATION DETAILS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.444, + 0.461, + 0.457 + ], + "angle": 0, + "content": "D.1 ARCHITECTURE CONFIGURATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.474, + 0.825, + 0.545 + ], + "angle": 0, + "content": "We implement \\( f \\)-DM strictly following standard U-Net architecture in Nichol & Dhariwal (2021). As shown in Figure 11, input \\( z_{t} \\) will be directed to the corresponding inner layer based on spatial resolutions, and a stage-specific adapter is adopted to transform the channel dimension. Such architecture also allows memory-efficient batching across stages where we can create a batch with various resolutions, and split the computation based on the resolutions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.574, + 0.368, + 0.587 + ], + "angle": 0, + "content": "D.2 HYPER-PARAMETERS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.825, + 0.687 + ], + "angle": 0, + "content": "In our experiments, we adopt the following two sets of parameters based on the complexity of the dataset: base (FFHQ, AFHQ, LSUN-Church/Bed) and big (ImageNet). For base, we use 1 residual block per resolution, with the basic dimension 128. For big, we use 2 residual blocks with the basic dimension 192. Given one dataset, all the models with various transformations including the baseline DMs share the same hyper-parameters except for the adapters. We list the hyperparameter details in Table 3." + }, + { + "type": "table", + "bbox": [ + 0.251, + 0.709, + 0.747, + 0.878 + ], + "angle": 0, + "content": "
Hyper-param.FFHQAFHQLSUN-ChurchLSUN-BedImageNet
image res.25622562256225622562
# of classesNone3NoneNone1000
c.f. guidance-No--Yes
#channels128128128128192
#res-blocks11112
channel multi.[1,1,2,2,4,4]
attention res.16,8
batch size3232323264
lr2e-5
iterations500K500K500K1200K2500K
" + }, + { + "type": "table_caption", + "bbox": [ + 0.266, + 0.887, + 0.731, + 0.903 + ], + "angle": 0, + "content": "Table 3: Hyperparameters and settings for \\( f \\) -DM on different datasets." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.107, + 0.825, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.243, + 0.828, + 0.288 + ], + "angle": 0, + "content": "Figure 11: An illustration of the modified U-Net architecture. Time conditioning is omitted. The parameters are partially shared across stages based on the resolutions. Stage-specific adapters are adopted to transform the input dimensions." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.308, + 0.822, + 0.73 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.741, + 0.828, + 0.8 + ], + "angle": 0, + "content": "Figure 12: Additional comparisons with Cascaded DM on AFHQ. \\(\\uparrow\\) Comparison of the reverse diffusion process from \\(16^{2}\\) to \\(256^{2}\\). We visualize the denoised outputs \\((\\boldsymbol{x}_t)\\) and the corresponding next noised input \\((z_{s})\\) near the start & end of each resolution diffusion. \\(\\downarrow\\) Comparison of random samples generated by Cascaded DM and \\(f\\)-DM-DS." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.805, + 0.4, + 0.82 + ], + "angle": 0, + "content": "E ADDITIONAL RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.84, + 0.515, + 0.855 + ], + "angle": 0, + "content": "E.1 QUANTITATIVE COMPARISON WITH DDIM" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.826, + 0.926 + ], + "angle": 0, + "content": "We also include comparison of \\( f \\)-DM with the standard DM using DDIM sampling (\\( \\eta = 0 \\)) in Table 4. Similar to the conclusion drawn from Table 1, the proposed \\( f \\)-DM can achieve comparable or even better performance than baseline DM even with \\( \\eta = 0 \\) (generation only controlled by the initial noise, see Figure 8), while having better scores for DDIM with half generation steps." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.106, + 0.495, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.104, + 0.82, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.362, + 0.493, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.363, + 0.819, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.305, + 0.636, + 0.692, + 0.651 + ], + "angle": 0, + "content": "Figure 13: Additional comparisons with LDMs on AFHQ." + }, + { + "type": "table", + "bbox": [ + 0.244, + 0.683, + 0.756, + 0.774 + ], + "angle": 0, + "content": "
ModelsFID↓P↑R↑FID↓P↑R↑Speed
FFHQ256 × 256AFHQ256 × 256
DDIM11.40.710.5312.10.580.65×1.0
DDIM (1/2)13.00.700.5116.80.480.64×2.0
f-DM-DS (η = 0)12.60.760.555.80.760.55×2.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.261, + 0.784, + 0.735, + 0.8 + ], + "angle": 0, + "content": "Table 4: Comparison on FFHQ and AFHQ for DDIM sampling \\( \\left( {\\eta = 0}\\right) \\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.807, + 0.533, + 0.82 + ], + "angle": 0, + "content": "E.2 V.S. TRANSFORMATION-SPECIFIC BASELINES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.828, + 0.926 + ], + "angle": 0, + "content": "We include more comparisons in Figure 12 and 13. From Figure 12, we compare the generation process of \\( f \\)-DM and the cascaded DM. It is clear that \\( f \\)-DM conducts coarse-to-fine generation in a more natural way, and the results will not suffer from error propagation. As shown in Figure 13, LDM outputs are easily affected by the chosen decoder. VQVAE decoder tends output blurry images; the output from VQGAN decoder has much finer details while remaining noticeable artifacts (e.g., eyes, furs). By contrast, \\( f \\)-DM performs stably for both latent spaces." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.416, + 0.119 + ], + "angle": 0, + "content": "E.3 CONDITIONAL GENERATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.827, + 0.187 + ], + "angle": 0, + "content": "We include additional results of conditional generation, i.e., super-resolution (Figure 14) and deblurring (Figure 15). We also show the comparison with or without the proposed gradient-based initialization, which greatly improves the faithfulness of conditional generation when the input noise is high (e.g., \\(16 \\times 16\\) input)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.204, + 0.476, + 0.218 + ], + "angle": 0, + "content": "E.4 ADDITIONAL QUALITATIVE RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.229, + 0.825, + 0.259 + ], + "angle": 0, + "content": "Finally, we provide additional qualitative results for our unconditional models for FFHQ (Figure 16), AFHQ (Figure 17), LSUN (Figure 18) and our class-conditional ImageNet model (Figure 19,20)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.279, + 0.492, + 0.294 + ], + "angle": 0, + "content": "F LIMITATIONS AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.309, + 0.828, + 0.518 + ], + "angle": 0, + "content": "Although \\( f \\)-DM enables diffusion with signal transformations, which greatly extends the scope of DMs to work in transformed space, there still exist limitations and opportunities for future work. First, it is an empirical question to find the optimal stage schedule for all transformations. Our ablation studies also show that different heuristics have differences for DS-based and VAE-based models. A metric that can automatically determine the best stage schedule based on the property of each transformation is needed and will be explored in the future. In addition, although the current method achieves faster inference when generating with transformations like down-sampling, the speed-up is not very significant as we still take the standard DDPM steps. How to further accelerate the inference process of DMs is a challenging and orthogonal direction. For example, it has great potential to combine \\( f \\)-DM with speed-up techniques such as knowledge distillation (Salimans & Ho, 2022). Moreover, no matter hand-designed or learned, all the transformations used in \\( f \\)-DM are still fixed when training DM. It is, however, different from typical VAEs, where both the encoder and decoder are jointly optimized during training. Therefore, starting from a random/imperfect transformation and training \\( f \\)-DM jointly with the transformations towards certain target objectives will be studied as future work." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.166, + 0.824, + 0.794 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.806, + 0.825, + 0.864 + ], + "angle": 0, + "content": "Figure 14: Additional examples of super-resolution (SR) with the unconditional \\( f \\)-DM-DS trained on AFHQ. \\( \\uparrow \\) The same input image with various resolution \\( 16^2 \\), \\( 32^2 \\), \\( 64^2 \\), \\( 128^2 \\). We sample 3 random seeds for each resolution input. We also show the difference with and without applying gradient-based initialization (Grad-Init) on \\( z \\). \\( \\downarrow \\) SR results of various \\( 16^2 \\) inputs." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.166, + 0.818, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.804, + 0.825, + 0.862 + ], + "angle": 0, + "content": "Figure 15: Additional examples of de-blurring with the unconditional \\( f \\)-DM-Blur-G trained on AFHQ. \\( \\uparrow \\) The same input image with various Gaussian kernel sizes \\( \\sigma = 15,9,4,1.4 \\). We sample 3 random seeds for each resolution input. We also show the difference with and without applying gradient-based initialization (Grad-Init) on \\( z \\). \\( \\downarrow \\) Deblurred results of various blur images." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image_caption", + "bbox": [ + 0.472, + 0.155, + 0.518, + 0.164 + ], + "angle": 0, + "content": "f-DM-DS" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.164, + 0.821, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.461, + 0.29, + 0.529, + 0.299 + ], + "angle": 0, + "content": "f-DM-Blur-U" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.299, + 0.821, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.46, + 0.425, + 0.528, + 0.433 + ], + "angle": 0, + "content": "f-DM-Blur-G" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.434, + 0.821, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.465, + 0.56, + 0.525, + 0.569 + ], + "angle": 0, + "content": "f-DM-VQVAE" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.569, + 0.821, + 0.693 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.465, + 0.695, + 0.525, + 0.703 + ], + "angle": 0, + "content": "f-DM-VQGAN" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.703, + 0.821, + 0.829 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.842, + 0.825, + 0.871 + ], + "angle": 0, + "content": "Figure 16: Random samples generated by five \\( f \\)-DMs trained on FFHQ \\( 256 \\times 256 \\). All faces presented are synthesized by the models, and are not real identities." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image_caption", + "bbox": [ + 0.472, + 0.16, + 0.518, + 0.169 + ], + "angle": 0, + "content": "f-DM-DS" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.17, + 0.821, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.461, + 0.296, + 0.529, + 0.304 + ], + "angle": 0, + "content": "f-DM-Blur-U" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.305, + 0.821, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.46, + 0.432, + 0.528, + 0.44 + ], + "angle": 0, + "content": "f-DM-Blur-G" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.44, + 0.821, + 0.565 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.465, + 0.567, + 0.525, + 0.574 + ], + "angle": 0, + "content": "f-DM-VQVAE" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.576, + 0.821, + 0.701 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.465, + 0.702, + 0.525, + 0.709 + ], + "angle": 0, + "content": "f-DM-VOGAN" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.71, + 0.821, + 0.835 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.85, + 0.776, + 0.867 + ], + "angle": 0, + "content": "Figure 17: Random samples generated by five \\( f \\)-DMs trained on AFHQ \\( 256 \\times 256 \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image_caption", + "bbox": [ + 0.472, + 0.141, + 0.518, + 0.15 + ], + "angle": 0, + "content": "f-DM-DS" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.15, + 0.821, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.461, + 0.277, + 0.528, + 0.285 + ], + "angle": 0, + "content": "f-DM-Blur-U" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.286, + 0.821, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.412, + 0.524, + 0.42 + ], + "angle": 0, + "content": "f-DM-VQVAE" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.42, + 0.821, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.472, + 0.586, + 0.517, + 0.595 + ], + "angle": 0, + "content": "f-DM-DS" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.595, + 0.821, + 0.721 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.464, + 0.721, + 0.526, + 0.73 + ], + "angle": 0, + "content": "f-DM-VQVAE" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.73, + 0.821, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.184, + 0.869, + 0.813, + 0.885 + ], + "angle": 0, + "content": "Figure 18: Random samples generated by \\( f \\)-DMs trained on LSUN-Church & -Bed \\( 256 \\times 256 \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.113, + 0.496, + 0.857 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.113, + 0.815, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.871, + 0.825, + 0.914 + ], + "angle": 0, + "content": "Figure 19: Random samples generated by \\( f \\)-DM-DS/VQVAE trained on ImageNet \\( 256 \\times 256 \\) with classifier-free guidance (\\( s = 3 \\)). Classes from top to bottom: red panda, robin, daisy, valley, trifle, comic book." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.109, + 0.818, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.873, + 0.825, + 0.917 + ], + "angle": 0, + "content": "Figure 20: Random samples generated by \\( f \\)-DM-DS/VQVAE trained on ImageNet \\( 256 \\times 256 \\) with classifier-free guidance (\\( s = 3 \\)). Classes from top to bottom: school bus, pizza, seashore, photocopier, golden retriever, axolotl." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ] +] \ No newline at end of file diff --git a/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_origin.pdf b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..613ca705b096673ce89a036483dea9bc1c109bdb --- /dev/null +++ b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/04076be8-bdc7-4349-91f7-210b46dd8933_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85ad190d521ae1497d7c0fc473b120e1648ef880c114c886f69fcdd7da4eef76 +size 49442568 diff --git a/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/full.md b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..9f4e732ba85d801c5cab7c2cc50609584976fd06 --- /dev/null +++ b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/full.md @@ -0,0 +1,559 @@ +# $f$ -DM: A MULTI-STAGE DIFFUSION MODEL VIA PROGRESSIVE SIGNAL TRANSFORMATION + +Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Miguel Angel Bautista, Josh Susskind Apple + +{jgu32, szhai, yizhe_zhang, mbautistamartin, jsusskind}@apple.com + +# ABSTRACT + +Diffusion models (DMs) have recently emerged as SoTA tools for generative modeling in various domains. Standard DMs can be viewed as an instantiation of hierarchical variational autoencoders (VAEs) where the latent variables are inferred from input-centered Gaussian distributions with fixed scales and variances. Unlike VAEs, this formulation constrains DMs from changing the latent spaces and learning abstract representations. In this work, we propose $f$ -DM, a generalized family of DMs, which allows progressive signal transformation. More precisely, we extend DMs to incorporate a set of (hand-designed or learned) transformations, where the transformed input is the mean of each diffusion step. We propose a generalized formulation of DMs and derive the corresponding de-noising objective together with a modified sampling algorithm. As a demonstration, we apply $f$ -DM in image generation tasks with a range of functions, including down-sampling, blurring, and learned transformations based on the encoder of pretrained VAEs. In addition, we identify the importance of adjusting the noise levels whenever the signal is sub-sampled and propose a simple rescaling recipe. $f$ -DM can produce high-quality samples on standard image generation benchmarks like FFHQ, AFHQ, LSUN and ImageNet with better efficiency and semantic interpretation. Please check our videos at http://jiataogu.me/fdm/. + +![](images/d52258dad9bef03c15d21315a1afeaa9608ab3ca09dcc34a5e80b1a92a1f6bc2.jpg) +Figure 1: Visualization of reverse diffusion from $f$ -DMs with various signal transformations. $x_{t}$ is the denoised output, and $z_{s}$ is the input to the next diffusion step. We plot the first three channels of VQVAE latent variables. Low-resolution images are resized to $256^{2}$ for ease of visualization. + +# 1 INTRODUCTION + +Diffusion probabilistic models (DMs, Sohl-Dickstein et al., 2015; Ho et al., 2020; Nichol & Dhariwal, 2021) and score-based (Song et al., 2021b) generative models have become increasingly popular + +as the tools for high-quality image (Dhariwal & Nichol, 2021), video (Ho et al., 2022b), text-to-speech (Popov et al., 2021) and text-to-image (Rombach et al., 2021; Ramesh et al., 2022; Sahara et al., 2022a) synthesis. Despite the empirical success, conventional DMs are restricted to operate in the ambient space throughout the Gaussian noising process. On the other hand, common generative models like VAEs (Kingma & Welling, 2013) and GANs (Goodfellow et al., 2014; Karras et al., 2021) employ a coarse-to-fine process that hierarchically generates high-resolution outputs. + +We are interested in combining the best of the two worlds: the expressivity of DMs and the benefit of hierarchical features. To this end, we propose $f$ -DM, a generalized multi-stage framework of DMs to incorporate progressive transformations to the inputs. As an important property of our formulation, $f$ -DM does not make any assumptions about the type of transformations. This makes it compatible with many possible designs, ranging from domain-specific ones to generic neural networks. In this work, we consider representative types of transformations, including down-sampling, blurring, and neural-based transformations. What these functions share in common is that they allow one to derive increasingly more global, coarse, and/or compact representations, which we believe can lead to better sampling quality as well as reduced computation. + +Incorporating arbitrary transformations into DMs also brings immediate modeling challenges. For instance, certain transformations destroy the information drastically, and some might also change the dimensionality. For the former, we derive an interpolation-based formulation to smoothly bridge consecutive transformations. For the latter, we verify the importance of rescaling the noise level, and propose a resolution-agnostic signal-to-noise ratio (SNR) as a practical guideline for noise rescaling. + +Extensive experiments are performed on image generation benchmarks, including FFHQ, AFHQ, LSUN Bed/Church and ImageNet. $f$ -DMs consistently match or outperform the baseline performance, while requiring relatively less computing thanks to the progressive transformations. Furthermore, given a pre-trained $f$ -DM, we can readily manipulate the learned latent space, and perform conditional generation tasks (e.g., super-resolution) without additional training. + +# 2 BACKGROUND + +Diffusion Models (DMs, Sohl-Dickstein et al., 2015; Song & Ermon, 2019; Ho et al., 2020) are deep generative models which can be viewed as a special case of hierarchical VAEs (Kingma et al., 2021). In this paper, we consider diffusion in continuous time similar to Song et al. (2021b); Kingma et al. (2021). + +Given a datapoint $\pmb{x} \in \mathbb{R}^N$ , a DM models time-dependent latent variables $\pmb{z} = \{\pmb{z}_t | t \in [0,1], \pmb{z}_0 = \pmb{x}\}$ based on a fixed signal-noise schedule $\{\alpha_t, \sigma_t\}$ : + +$$ +q (\boldsymbol {z} _ {t} | \boldsymbol {z} _ {s}) = \mathcal {N} (\boldsymbol {z} _ {t}; \alpha_ {t | s} \boldsymbol {z} _ {s}, \sigma_ {t | s} ^ {2} I), +$$ + +where $\alpha_{t|s} = \alpha_t / \alpha_s$ , $\sigma_{t|s}^2 = \sigma_t^2 - \alpha_{t|s}^2\sigma_s^2$ , $s < t$ . It also defines the marginal distribution $q(\pmb{z}_t|\pmb{x})$ as: + +$$ +q \left(\boldsymbol {z} _ {t} | \boldsymbol {x}\right) = \mathcal {N} \left(\boldsymbol {z} _ {t}; \alpha_ {t} \boldsymbol {x}, \sigma_ {t} ^ {2} I\right), +$$ + +By default, we assume the variance preserving + +form (Ho et al., 2020). That is, $\alpha_{t}^{2} + \sigma_{t}^{2} = 1$ , $\alpha_{0} = \sigma_{1} = 1$ , and the signal-to-noise-ratio (SNR, $\alpha_{t}^{2} / \sigma_{t}^{2}$ ) decreases monotonically with $t$ . For generation, a parametric function $\theta$ is optimized to reverse the diffusion process by denoising $z_{t} = \alpha_{t}x + \sigma_{t}\epsilon$ to the clean input $x$ , with a weighted reconstruction loss $\mathcal{L}_{\theta}$ . For example, the "simple loss" proposed in Ho et al. (2020) is equivalent to weighting residuals by $\omega_{t} = \alpha_{t}^{2} / \sigma_{t}^{2}$ : + +$$ +\mathcal {L} _ {\theta} = \mathbb {E} _ {\boldsymbol {z} _ {t} \sim q (\boldsymbol {z} _ {t} | \boldsymbol {x}), t \sim [ 0, 1 ]} \left[ \omega_ {t} \cdot \| \boldsymbol {x} _ {\theta} (\boldsymbol {z} _ {t}, t) - \boldsymbol {x} \| _ {2} ^ {2} \right]. \tag {1} +$$ + +In practice, $\theta$ is parameterized as a U-Net (Ronneberger et al., 2015). As suggested in Ho et al. (2020), predicting the noise $\epsilon_{\theta}$ empirically achieves better performance than predicting $x_{\theta}$ , where + +![](images/b144a42831e912bd3dbf6dc74671e2cca145cc9656e9e988b37a463741a9a3ad.jpg) +(a) DMS +Figure 2: (a) the standard DMs; (b) a bottom-up hierarchical VAEs; (c) our proposed $f$ -DM. + +![](images/1af00fd075b312ca6768d248e8c3288e144842b186ef5a857c11a740be3f3b14.jpg) +(b) VAEs + +![](images/c1e99745d8f0b4c81b2fc69809af8abb2e775847c79feff9b741fcf73378ce65.jpg) +(c) f-DM (Ours) + +$\pmb{x}_{\theta}(\pmb{z}_t,t) = (\pmb{z}_t - \sigma_t\pmb{\epsilon}_{\theta}(\pmb{z}_t,t)) / \alpha_t$ . Sampling from such a learned model can be performed from ancestral sampling (DDPM, Ho et al., 2020), or a deterministic DDIM sampler (Song et al., 2021a). Starting from $\pmb{z}_1\sim \mathcal{N}(\mathbf{0},I)$ , a sequence of timesteps $1 = t_0 > \ldots >t_N = 0$ are sampled for iterative generation, and we can readily summarize both methods for each step as follows: + +$$ +\boldsymbol {z} _ {s} = \alpha_ {s} \cdot \boldsymbol {x} _ {\theta} (\boldsymbol {z} _ {t}) + \sqrt {\sigma_ {s} ^ {2} - \eta^ {2} \bar {\sigma} ^ {2}} \cdot \boldsymbol {\epsilon} _ {\theta} (\boldsymbol {z} _ {t}) + \eta \bar {\sigma} \cdot \boldsymbol {\epsilon}, \quad \boldsymbol {\epsilon} \sim \mathcal {N} (\boldsymbol {0}, I), \quad s < t, \tag {2} +$$ + +where $\bar{\sigma} = \sigma_s\sigma_{t|s} / \sigma_t$ , and $\eta$ controls the proportion of additional noise. (i.e., DDIM $\eta = 0$ + +As the score function $\epsilon_{\theta}$ is defined in the ambient space, it is clear that all the latent variables $z$ are forced to be the same shape as the input data $\pmb{x}$ $(\mathbb{R}^N)$ . This not only leads to inefficient training, especially for steps with high noise level (Jing et al., 2022), but also makes DMs hard to learn abstract and semantically meaningful latent space as pointed out by Preechakul et al. (2022). + +# 3 METHOD + +In this section, we introduce $f$ -DM, an extended family of DMs to enable diffusion on transformed signals, in a way similar to a standard hierarchical VAE. We start by introducing the definition of the proposed multi-stage formulation with general signal transformations, followed by modified training and generation algorithms (Section 3.1). Then, we specifically apply $f$ -DM with three categories of transformations (Section 3.2). + +# 3.1 MULTI-STAGE DIFFUSION + +Signal Transformations We consider a sequence of deterministic functions $\pmb{f} = \{f_0, \dots, f_K\}$ , where $f_0 \dots f_k$ progressively transforms the input signal $\pmb{x} \in \mathbb{R}^N$ into $\pmb{x}^k = f_{0:k}(\pmb{x}) \in \mathbb{R}^{M_k}$ . We assume $\pmb{x}^0 = f_0(\pmb{x}) = \pmb{x}$ . In principle, $\pmb{f}$ can be any function. In this work, we focus on transformations that gradually destroy the information contained in $\pmb{x}$ (e.g., down-sampling), leading towards more compact representations. Without loss of generality, we assume $M_0 \geq M_1 \geq \dots \geq M_K$ . A sequence of inverse mappings $\pmb{g} = \{g_0, \dots, g_{K-1}\}$ is used to connect a corresponding sequence of pairs of consecutive spaces. Specifically, we define $\hat{\pmb{x}}_k$ as: + +$$ +\hat {\boldsymbol {x}} ^ {k} := \left\{ \begin{array}{l l} g _ {k} \left(f _ {k + 1} \left(\boldsymbol {x} ^ {k}\right)\right) \approx \boldsymbol {x} ^ {k}, & \text {i f} k < K, \\ \boldsymbol {x} ^ {k}, & \text {i f} k = K. \end{array} \right. \tag {3} +$$ + +The approximation of Equation 3 ( $k < K$ ) is not necessarily (and sometimes impossibly) accurate. For instance, $f_{k}$ downsamples an input image $\pmb{x}$ from $128^{2}$ into $64^{2}$ with average pooling, and $g_{k}$ can be a bilinear interpolation that upsamples back to $128^{2}$ , which is a lossy reconstruction. + +The definition of $f$ and $g$ can be seen as a direct analogy of the encoder $(\phi)$ and decoder $(\theta)$ in hierarchical VAEs (see Figure 2 (b)). However, there are still major differences: (1) the VAE encoder/decoder is stochastic, and the encoder's outputs are regularized by the prior. In contrast, $f$ and $g$ are deterministic, and the encoder output $x^{K}$ does not necessarily follow a simple prior; (2) VAEs directly use the decoder for generation, while $f, g$ are fused in the diffusion steps of $f$ -DM. + +Forward Diffusion We extend the continuous-time DMs for signal transformations. We split the diffusion time $0 \to 1$ into $K + 1$ stages, where for each stage, a partial diffusion process is performed. More specifically, we define a set of time boundaries $0 = \tau_0 < \tau_1 < \ldots < \tau_K < \tau_{K + 1} = 1$ , and for $t \in [0,1]$ , the latent $\mathbf{z}_t$ has the following marginal probability: + +$$ +q \left(\boldsymbol {z} _ {t} \mid \boldsymbol {x}\right) = \mathcal {N} \left(\boldsymbol {z} _ {t}; \alpha_ {t} \boldsymbol {x} _ {t}, \sigma_ {t} ^ {2} I\right), \quad \text {w h e r e} \boldsymbol {x} _ {t} = \frac {\left(t - \tau_ {k}\right) \hat {\boldsymbol {x}} ^ {k} + \left(\tau_ {k + 1} - t\right) \boldsymbol {x} ^ {k}}{\tau_ {k + 1} - \tau_ {k}}, \quad \tau_ {k} \leq t < \tau_ {k + 1}. \tag {4} +$$ + +As listed above, $\pmb{x}_t$ is the interpolation of $\pmb{x}^k$ and its approximation $\hat{\pmb{x}}^k$ when $t$ falls in stage $k$ . A simple illustration for the relationship of $\pmb{x}_t, \hat{\pmb{x}}^k, \pmb{x}^k$ and $z_t$ is shown in Figure 10. We argue that interpolation is crucial as it creates a continuous transformation that slowly corrupts information inside each stage. In this way, such change can be easily reversed by our model. Also, it is nontrivial to find the optimal stage schedule $\tau_k$ for each model as it highly depends on how much the information is destroyed in each stage $f_k$ . In this work, we tested two heuristics: (1) linear schedule $\tau_k = k / (K + 1)$ ; (2) cosine schedule $\tau_k = \cos(1 - k / (K + 1))$ . Note that the standard DMs can be seen as a special case of our $f$ -DM when there is only one stage ( $K = 0$ ). + +![](images/f9c66ed08214f298dcca5a8aa43a655c3a55afef525bded7f4d153b4d4823ca6.jpg) +Figure 3: Left: an illustration of the proposed SNR computation for different sampling rates; Right: the comparison of rescaling the noise level for progressive down-sampling. Without noise rescaling, the diffused images in low-resolution quickly become too noisy to distinguish the underline signal. + +Equation 4 does not guarantee a Markovian transition. Nevertheless, our formulation only needs $q(\pmb{z}_t | \pmb{z}_s, \pmb{x})$ , which has the following simple form focusing on diffusion steps within a stage: + +$$ +q \left(\boldsymbol {z} _ {t} \mid \boldsymbol {z} _ {s}, \boldsymbol {x}\right) = \mathcal {N} \left(\boldsymbol {z} _ {t}; \alpha_ {t | s} \boldsymbol {z} _ {s} + \alpha_ {t} \cdot \left(\boldsymbol {x} _ {t} - \boldsymbol {x} _ {s}\right), \sigma_ {t | s} ^ {2} I\right), \quad \tau_ {k} \leq s < t < \tau_ {k + 1}. \tag {5} +$$ + +From Equation 5, we further re-write $\boldsymbol{x}_t - \boldsymbol{x}_s = -\delta_t \cdot (t - s) / (t - \tau_k)$ , where $\delta_t = \boldsymbol{x}^k - \boldsymbol{x}_t$ is the signal degradation. Equation 5 also indicates that the reverse diffusion distribution $q(\boldsymbol{z}_s | \boldsymbol{z}_t, \boldsymbol{x}) \propto q(\boldsymbol{z}_t | \boldsymbol{z}_s, \boldsymbol{x}) q(\boldsymbol{z}_s | \boldsymbol{x})$ can be written as the function of $\boldsymbol{x}_t$ and $\delta_t$ which will be our learning objectives. + +Boundary Condition To enable diffusion across stages, we need the transition at stage boundaries $\tau_{k}$ . More specifically, when the step approaches the boundary $\tau^{-}$ (the left limit of $\tau$ ), the transition $q(z_{\tau} | z_{\tau^{-}}$ , $\pmb{x}$ ) should be as deterministic (ideally invertible) & smooth as possible to minimize information loss. First, we can easily expand $z_{\tau}$ and $z_{\tau^{-}}$ as the signal and noise combination: + +$$ +\text {B e f o r e :} \quad \boldsymbol {z} _ {\tau^ {-}} = \alpha_ {\tau^ {-}} \cdot \boldsymbol {x} _ {\tau^ {-}} + \sigma_ {\tau^ {-}} \cdot \boldsymbol {\epsilon}, p (\boldsymbol {\epsilon}) = \mathcal {N} (\boldsymbol {0}, I), +$$ + +$$ +A f t e r: \quad z _ {\tau} = \alpha_ {\tau} \cdot x _ {\tau} + \sigma_ {\tau} \cdot \zeta (\epsilon), p (\zeta (\epsilon)) = \mathcal {N} (0, I). \tag {6} +$$ + +Based on definition, $\pmb{x}_{\tau^{-}} = \hat{\pmb{x}}^{k - 1} = g(\pmb{x}^k) = g(\pmb{x}_{\tau})$ , which means the signal part is invertible. Therefore we only need to find $\zeta$ . Under the initial assumption of $M_{k} \leq M_{k - 1}$ , this can be achieved easily by dropping elements from $\epsilon$ . Take down-sampling $(M_{k - 1} = 4M_k)$ as an example. We can directly drop 3 out of every $2 \times 2$ values from $\epsilon$ . More details are included in Appendix A.4. + +The second requirement of a smooth transition is not as straightforward as it looks, which asks the "noisiness" of latents $z$ to remain unchanged across the boundary. We argue that the conventional measure – the signal-to-noise-ratio (SNR) – in DM literature is not compatible with resolution change as it averages the signal/noise power element-wise. In this work, we propose a generalized resolution-agnostic SNR by viewing data as points sampled from a continuous field: + +$$ +\operatorname {S N R} (\boldsymbol {z}) = \frac {\mathbb {E} _ {\Omega \sim I} \| \mathbb {E} _ {i \sim \Omega} \operatorname {S I G N A L} (\boldsymbol {z} _ {i}) \| ^ {2}}{\mathbb {E} _ {\Omega \sim I} \| \mathbb {E} _ {i \sim \Omega} \operatorname {N O I S E} (\boldsymbol {z} _ {i}) \| ^ {2}}, \tag {7} +$$ + +where $I$ is the data range, SIGNAL represents the real data value (such as image pixels), and NOISE is the unstructured Gaussian noise added to the data. $\Omega$ is a patch relative to $I$ , which can be any size as long as it is invariant to different sampling rates (resolutions). As shown in Figure 3 (left), we can obtain a reliable measure of noisiness by averaging the signal/noise inside patches. We derive $\alpha_{\tau}, \sigma_{\tau}$ from $\alpha_{\tau^{-}}, \sigma_{\tau^{-}}$ for any transformations by forcing $\mathrm{SNR}(z_{\tau}) = \mathrm{SNR}(z_{\tau^{-}})$ under this new definition. Specifically, if dimensionality change is solely caused by the change of sampling rate (e.g., down-sampling, average RGB channels, deconvolution), we can get the following relation: + +$$ +\alpha_ {\tau} ^ {2} / \sigma_ {\tau} ^ {2} = d _ {k} \cdot \gamma_ {k} \cdot \alpha_ {\tau -} ^ {2} / \sigma_ {\tau -} ^ {2}, \tag {8} +$$ + +where $d_{k} = M_{k - 1} / M_{k}$ is the total dimension change, and $\gamma_{k} = \mathbb{E}||\hat{\pmb{x}}^{k - 1}||^{2} / \mathbb{E}||\pmb{x}^{k}||^{2}$ is the change of signal power. For example, we have $d_{k} = 4,\gamma_{k}\approx 1$ for down-sampling. Following Equation 8, the straightforward rule is to rescale the magnitude of the noise, and keep the signal part unchanged: + +Algorithm 1: Reverse diffusion for image generation using $f$ -DM +Input: model $\theta ,f,g$ stage schedule $\{\tau_0,\dots ,\tau_K\}$ , rescaled noise schedule functions $\alpha (.)$ $\sigma (.)$ step-size $\Delta t$ $\epsilon_{\mathrm{full}}\sim \mathcal{N}(0,I)$ ,DDPM ratio $\eta$ +1 Initialize $z$ from $\epsilon_{\mathrm{full}}$ +2 for $(k = K;k\geq 0;k = k - 1)$ do +3 for $(t = \tau_{k + 1};t > \tau_k;t = t - \Delta t,s = t - \Delta t)$ do +4 $\begin{array}{r}\pmb {\epsilon}_{\theta},\pmb {\delta}_{\theta} = \theta (\pmb {z},t);\quad \pmb {x}_{\theta} = (\pmb {z} - \sigma (t)\cdot \pmb {\epsilon}_{\theta}) / \alpha (t);\\ \text{if} s > \tau_{k}\text{then}\\ \big{\lfloor}\pmb {z} = \alpha (s)\cdot (\pmb {x}_{\theta} + \pmb {\delta}_{\theta}\cdot (t - s) / (t - \tau_{k})) + \sqrt{\sigma^{2}(s) - \eta^{2}\bar{\sigma}^{2}}\cdot \pmb {\epsilon}_{\theta} + \eta \bar{\sigma}\cdot \pmb {\epsilon},\pmb {\epsilon}\sim \mathcal{N}(\pmb {0},I)\\ \end{array}$ +5 +6 if $k > 0$ then +7 Re-sample noise $\epsilon_{\mathrm{rs}}$ from $\epsilon_{\theta}$ and $\epsilon_{\mathrm{full}}$ . $z = \alpha (\tau_k)\cdot g_k(x_\theta) + \sigma (\tau_k)\cdot \epsilon_{\mathrm{rs}}$ +9 return $x_{\theta}$ + +$\alpha \leftarrow \alpha, \sigma \leftarrow \sigma / \sqrt{d_k}$ , which we refer as signal preserved (SP) rescaling. Note that, to ensure the noise schedule is continuous over time and close to the original schedule, such rescaling is applied to the noises of the entire stage, and will be accumulated when multiple transformations are used. As the comparison shown in Figure 3, the resulting images are visually closer to the standard DM. However, the variance of $z_{t}$ becomes very small, especially when $t \to 1$ , which might be hard for the neural networks to distinguish. Therefore, we propose the variance preserved (VP) alternative to further normalize the rescaled $\alpha, \sigma$ so that $\alpha^2 + \sigma^2 = 1$ . We show the visualization in Figure 3 (b). + +Training We train a neural network $\theta$ to denoise. We also show the training pipeline in Figure 10. In $f$ -DM, noise is caused by two factors: (1) the perturbation $\epsilon$ from noise injection; (2) the degradation $\delta$ due to signal transformation. Thus, we propose to predict $\boldsymbol{x}_{\theta}$ and $\delta_{\theta}$ jointly, which simultaneously remove both noises from $\boldsymbol{z}_t$ with a "double reconstruction" loss: + +$$ +\mathcal {L} _ {\theta} = \mathbb {E} _ {\boldsymbol {z} _ {t} \sim q (\boldsymbol {z} _ {t} | \boldsymbol {x}), t \sim [ 0, 1 ]} \left[ \omega_ {t} \cdot \left(\| \boldsymbol {x} _ {\theta} (\boldsymbol {z} _ {t}, t) - \boldsymbol {x} _ {t} \| _ {2} ^ {2} + \| \boldsymbol {\delta} _ {\theta} (\boldsymbol {z} _ {t}, t) - \boldsymbol {\delta} _ {t} \| _ {2} ^ {2}\right) \right], \tag {9} +$$ + +where the denoised output is $\pmb{x}_{\theta}(\pmb{z}_t, t) + \delta_{\theta}(\pmb{z}_t, t)$ . Unlike standard DMs, the denoising goals are the transformed signals of each stage rather than the final real images, which are generally simpler targets to recover. The same as standard DMs, we also choose to predict $\epsilon_{\theta}$ , and compute $\pmb{x}_{\theta} = (z_t - \sigma_t \pmb{\epsilon}_{\theta}) / \alpha_t$ . We adopt the same U-Net architecture for all stages, where input $\pmb{z}_t$ will be directed to the corresponding inner layer based on spatial resolutions (see Appendix Figure 11 for details). + +Unconditional Generation We present the generation steps in Algorithm 1, where $\boldsymbol{x}_t$ and $\delta_t$ are replaced by model's predictions $\boldsymbol{x}_{\theta}$ , $\delta_{\theta}$ . Thanks to the interpolation formulation (Equation 4), generation is independent of the transformations $f$ . Only the inverse mappings $g$ – which might be simple and easy to compute – is needed to map the signals at boundaries. This brings flexibility and efficiency to learning complex or even test-time inaccessible transformations. In addition, Algorithm 1 includes a "noise-resampling step" for each stage boundary, which is the reverse process for $\zeta(\epsilon)$ in Equation 6. While $\zeta$ is deterministic, the reverse process needs additional randomness. For instance, if $\zeta$ drops elements in the forward process, then the reverse step should inject standard Gaussian noise back to the dropped locations. Because we assume $M_0 \geq \ldots \geq M_K$ , we propose to sample a full-size noise $\epsilon_{\mathrm{full}}$ before generation, and gradually adding subsets of $\epsilon_{\mathrm{full}}$ to each stage. Thus, $\epsilon_{\mathrm{full}}$ encodes multi-scale information similar to RealNVP (Dinh et al., 2016). + +Conditional Generation Given an unconditional $f$ -DM, we can do conditional generation by replacing the denoised output $\pmb{x}_{\theta}$ with any condition $\pmb{x}_c$ at a suitable time $(T)$ , and starting diffusion from $T$ . For example, suppose $\pmb{f}$ is downsample, and $\pmb{x}_c$ is a low-resolution image, $f$ -DM enables super-resolution (SR) without additional training. To achieve that, it is critical to initialize $\pmb{z}_T$ , which implicitly asks $z_{T} \approx \alpha_{T}\pmb{x}_{c} + \sigma_{T}\pmb{\epsilon}_{\theta}(\pmb{z}_{T})$ . In practice, we choose $T$ to be the corresponding stage boundary, and initialize $\pmb{z}$ by adding random noise $\sigma_T\pmb{\epsilon}$ to $\alpha_{T}\pmb{x}_{c}$ . A gradient-based method is used to iteratively update $z_{T} \gets z_{T} - \lambda \nabla_{z_{T}}\| \pmb{x}_{\theta}(z_{T}) - \pmb{x}_{c}\|_{2}^{2}$ for a few steps before the diffusion starts. + +# 3.2 APPLICATIONS ON VARIOUS TRANSFORMATIONS + +With the definition in Section 3.1, next we show $f$ -DM applied with different transformations. In this paper, we consider the following three categories of transformations. + +Downsampling. As the motivating example in Section 3.1, we let $\pmb{f}$ a sequence of downsample operations that transforms a given image (e.g., $256^2$ ) progressively down to $16^2$ , where each $f_k(\cdot)$ + +reduces the length by 2, and correspondingly $g_{k}(.)$ upsamples by 2. Thus, the generation starts from a low-resolution noise and progressively performs super-resolution. We denote the model as $f$ -DM-DS, where $d_{k} = 4$ , $\gamma_{k} = 1$ in Equation 8 and $K = 4$ for $256^{2}$ images. + +Blurring. $f$ -DM also supports general blur transformations. Unlike recent works (Rissanen et al., 2022; Hoogeboom & Salimans, 2022) that focuses on continuous-time blur (heat dissipation), Equation 4 can be seen as an instantiation of progressive blur function if we treat $\hat{\pmb{x}}^k$ as a blurred version of $\pmb{x}^k$ . This design brings more flexibility in choosing any kind of blurring functions, and using the blurred versions as stages. In this paper, we experiment with two types of blurring functions. (1) $f$ -DM-Blur-U: utilizing the same downsample operators as $f$ -DM-DS, while always up-sampling the images back to the original sizes; (2) $f$ -DM-Blur-G: applying standard Gaussian blurring kernels following Rissanen et al. (2022). In both cases, we use $g_{k}(\pmb{x}) = \pmb{x}$ . As the dimension is not changed, no rescaling and noise resampling is required. + +Image $\rightarrow$ Latent Trans. We further consider diffusion with learned non-linear transformations such as VAEs (see Figure 2 (b), $f$ : VAE encoder, $g$ : VAE decoder). By inverting such an encoding process, we are able to generate data from low-dimensional latent space similar to Rombach et al. (LDM, 2021). As a major difference, LDM operates only on the latent variables, while $f$ -DM learns diffusion in the latent and image spaces jointly. Because of this, our performance will not be bounded by the quality of the VAE decoder. In this paper, we consider VQVAE (Van Den Oord et al., 2017) together with its GAN variant (VQGAN, Esser et al., 2021). For both cases, we transform $256^2 \times 3$ images into $32^2 \times 4$ (i.e., $d_k = 48$ ) latent space. The VQVAE encoder/decoder is trained on ImageNet (Deng et al., 2009), and is frozen for the rest of the experiments. For $f$ -DM-VQGAN, we directly take the checkpoint provided by Rombach et al. (2021). Besides, we need to tune $\gamma_k$ separately for each encoder due to the change in signal magnitude. + +# 4 EXPERIMENTS + +# 4.1 EXPERIMENTAL SETTINGS + +Datasets. We evaluate $f$ -DMs on five commonly used benchmarks testing generation on a range of domains: FFHQ (Karras et al., 2019), AFHQ (Choi et al., 2020), LSUN Church & Bed (Yu et al., 2015), and ImageNet (Deng et al., 2009). All images are center-cropped and resized to $256 \times 256$ . + +Training Details. We implement the three types of transformations with the same architecture and hyper-parameters except for the stage-specific adapters. We adopt a lighter version of ADM (Dhariwal & Nichol, 2021) as the main U-Net architecture. For all experiments, we adopt the same training scheme using AdamW (Kingma & Ba, 2014) optimizer with a learning rate of $2\mathrm{e} - 5$ and an EMA decay factor of 0.9999. We set the weight $\omega_{t} = \mathrm{sigmoid}(-\log (\alpha_{t}^{2} / \sigma_{t}^{2}))$ following P2-weighting (Choi et al., 2022). The cosine noise schedule $\alpha_{t} = \cos (0.5\pi t)$ is adopted for diffusion working in the $256^2\times 3$ image space. As proposed in Equation 8, noise rescaling (VP by default) is applied for $f$ -DMs when the resolutions change. All our models are trained with batch-size 32 images for 500K (FFHQ, AFHQ, LSUN Church), 1.2M (LSUN Bed) and 2.5M (ImageNet) iterations, respectively. + +Baselines & Evaluation. We compare $f$ -DMs against a standard DM (DDPM, Ho et al., 2020) on all five datasets. To ensure a fair comparison, we train DDPM following the same settings and continuous-time formulation as our approaches. We also include transformation-specific baselines: (1) we re-implement the cascaded DM (Cascaded, Ho et al., 2022a) to adapt $f$ -DM-DS setup from $16^{2}$ progressively to $256^{2}$ , where for each stage a separate DM is trained conditioned on the consecutive downsampled image; (2) we re-train a latent-diffusion model (LDM, Rombach et al., 2021) on the extracted latents from our pretrained VQVAE; (3) to compare with $f$ -DM-Blur-G, we include the scores and synthesised examples of IHDM (Rissanen et al., 2022). We set 250 timesteps $(\Delta t = 0.004)$ for $f$ -DMs and the baselines with $\eta = 1$ (Algorithm 1). We use Frechet Inception Distance (FID, Heusel et al., 2017) and Precision/Recall (PR, Kynkänniemi et al., 2019) as the measures of visual quality, based on 50K samples and the entire training set. + +# 4.2 RESULTS + +Qualitative Comparison To demonstrate the capability of handling various complex datasets, Figure 4 (↑) presents an uncurated set of images generated by $f$ -DM-DS. We show more samples from all types of $f$ -DMs in the Appendix E.4. We also show a comparison between $f$ -DMs and the + +![](images/5e41dbe2f3531f29dff630459a8e64a18dadcdc494c54143f08abb89f926491f.jpg) +Figure 4: $\uparrow$ Random samples from $f$ -DM-DS trained on various datasets; $\downarrow$ Comparison of $f$ -DMs and the corresponding baselines under various transformations. Best viewed when zoomed in. All faces presented are synthesized by the models, and are not real identities. + +Table 1: Quantitative comparisons on various datasets. The speed compared to DDPM is calculated with $\mathrm{bsz} = 1$ on CPU. Best performing DMs are shown in bold. + +
ModelsFID↓P↑R↑FID↓P↑R↑SpeedModelsFID↓
FFHQ256 × 256AFHQ256 × 256LSUN-Church 256 × 256
DDPM10.80.760.539.30.740.51×1.0DDPM9.7
DDPM (1/2)16.80.740.4515.20.640.44×2.0f-DM-DS8.2
Cascaded49.00.400.0924.20.370.13-f-DM-VQVAE8.0
f-DM-DS10.80.740.506.40.810.48×2.1LSUN-Bed 256 × 256
IHDM64.9--43.4---DDPM8.0
f-DM-Blur-G11.70.730.516.90.760.49×1.0f-DM-DS6.9
f-DM-Blur-U10.40.740.527.00.770.53×1.0f-DM-VQVAE7.1
LDM48.00.310.0729.70.070.11×9.8ImageNet 256 × 256
LDM (GAN)*8.60.720.606.50.630.61×9.2DDPM10.9
f-DM-VQVAE12.70.770.478.90.760.40×1.7f-DM-DS8.2
f-DM-VQGAN11.70.740.515.60.760.53×1.7f-DM-VQVAE6.8
+ +basielines with various transformations on FFHQ (Figure 4 $\downarrow$ ). Our methods consistently produce better visual results with more coherence and without noticeable artifacts. + +Quantitative Comparison. We measure the generation quality (FID and precision/recall) and relative inference speed of $f$ -DMs and the baselines in Table 1. Across all five datasets, $f$ -DMs consistently achieves similar or even better results for the DDPM baselines, while gaining near $\times 2$ inference speed for $f$ -DM-\{DS, VQVAE, VQGAN\} due to the nature of transformations. As a comparison, having fewer timesteps (DDPM 1/2) greatly hurts the generation quality of DDPM. We also show comparisons with transformation-specific baselines on FFHQ & AFHQ. + +v.s. Cascaded DMs. Although cascaded DMs have been shown effective in literature (Nichol & Dhariwal, 2021; Ho et al., 2022a), it is underexplored to apply cascades in a sequence of consecu + +![](images/c0268113526218eb4d1ab7d4048c62013f56033e94fba21231eab49243813ec0.jpg) +(a) + +![](images/adcaceb13055291b80aba2488030391bc33778f4a0c1e884be2f8ac059829c4b.jpg) +Figure 5: Random DDIM samples $(\eta = 0)$ from (a) $f$ -DMs on AFHQ and LSUN-Church by given {downsampled, blurred, latent} images as conditions; (b) $f$ -DM-VQVAE by interpolating the initial noise of the latent stage; (c) $f$ -DM-DS starting from the same initial noise of the $16 \times 16$ stage. For (c), we also show the "mean image" of 300 random samples using the same initial noise. + +tive resolutions $(16\to 32\to 64\to \ldots)$ like ours. In such cases, the prediction errors get easily accumulated during the generation, yielding serious artifacts in the final resolution. To ease this, Cascaded DM (Ho et al., 2022a) proposed to apply "noise conditioning augmentation" which reduced the domain gap between stages by adding random noise to the input condition. However, it is not straightforward to tune the noise level for both training and inference time. By contrast, $f$ -DM is by-design non-cascaded, and there are no domain gaps between stages. That is, we can train our model end-to-end without worrying the additional tuning parameters and achieve stable results. + +v.s. LDMs. We show comparisons with LDMs (Rombach et al., 2021) in Table 1. LDMs generate more efficiently as the diffusion only happens in the latent space. However, the generation is heavily biased by the behavior of the fixed decoder. For instance, it is challenging for VQVAE decoders to synthesize sharp images, which causes low scores in Table 1. However, LDM with VQGAN decoders is able to generate sharp details, which are typically favored by InceptionV3 (Szegedy et al., 2016) used in FID and PR. Therefore, despite having artifacts (see Figure 4, below, rightmost) in the output, LDMs (GAN) still obtain good scores. In contrast, $f$ -DM, as a pure DM, naturally bridges the latent and image spaces, where the generation is not restricted by the decoder. + +v.s. Blurring DMs. Table 1 compares with a recently proposed blurring-based method (IHDM, Rissanen et al., 2022). Different from our approach, IHDM formulates a fully deterministic forward process. We conjecture the lack of randomness is the cause of their poor generation quality. Instead, $f$ -DM proposes a natural way of incorporating blurring with stochastic noise, yielding better quantitative and qualitative results. + +Conditional Generation. In Figure 5(a), we demonstrate the example of using pre-trained $f$ -DMs to perform conditional generation based on learned transformations. We downsample and blur the sampled real images, and start the reverse diffusion following Section 3.1 with $f$ -DM-DS and -Blur-U, respectively. Despite the difference in fine details, both our models faithfully generate high-fidelity outputs close to the real images. The same algorithm is applied to the extracted latent representations. Compared with the original VQVAE output, $f$ -DM-VQVAE is able to obtain better reconstruction. We provide additional conditional generation samples with the ablation of the "gradient-based" initialization method in Appendix E.3. + +Latent Space Manipulation To demonstrate $f$ -DMs have learned certain abstract representations by modeling with signal transformation, we show results of latent manipulation in Figure 5. Here we assume DDIM sampling ( $\eta = 0$ ), and the only stochasticity comes from the initially sampled noise $\epsilon_{\mathrm{full}}$ . In (b), we obtain a semantically smooth transition between two cat faces when linearly + +interpolating the low-resolution noises; on the other hand, we show samples of the same identity with different fine details (e.g., expression, poses) in (c), which is achieved easily by sampling $f$ -DM-DS with the low-resolution ( $16^2$ ) noise fixed. This implies that $f$ -DM is able to allocate high-level and fine-grained information in different stages via learning with downsampling. + +# 4.3 ABLATION STUDIES + +Table 2: Ablation of design choices for $f$ -DMs trained on FFHQ. All faces are not real identities. + +
ModelEq. 4RescaleStagesFID↓P↑R↑
f-DM-DSNoVPcosine26.50.700.25
YesNocosine14.50.730.43
YesSPcosine12.10.750.47
YesVPlinear13.50.730.46
YesVPcosine10.80.740.50
f-DM-VQVAEYesNolinear24.00.790.29
YesVPcosine13.80.780.45
YesVPlinear12.70.770.47
+ +![](images/108e920d64674ebe358ca51519c5e49d60d13c0091102d80f6359f3ce32c520b.jpg) + +![](images/0f44605785ca91807fbb0f0ea30d3ee8a61444f6b037f50e5d91a66484fe0c16.jpg) +(a) without interpolation (Eq.4) +(b) with interpolation (Eq.4) + +Table 2 presents the ablation of the key design choices. As expected, the interpolation formulation (Equation 4) effectively bridges the information gap between stages, without which the prediction errors get accumulated, resulting in blurry outputs and bad scores. Table 2 also demonstrates the importance of applying correct scaling. For both models, rescaling improves the FID and recall by large margins, where SP works slightly worse than VP. In addition, we also empirically explore the difference of stage schedules. Compared to VAE-based models, we usually have more stages in DS/Blur-based models to generate high-resolution images. The cosine schedule helps diffusion move faster in regions with low information density (e.g., low-resolution, heavily blurred). + +# 5 RELATED WORK + +Progressive Generation with DMs. Conventional DMs generate images in the same resolutions. Therefore, existing work generally adopt cascaded approaches (Nichol & Dhariwal, 2021; Ho et al., 2022a; Sahara et al., 2022a) that chains a series of conditional DMs to generate coarse-to-fine, and have been used in super-resolution (SR3, Sahara et al., 2022b). However, cascaded models tend to suffer error propagation problems. More recently, Ryu & Ye (2022) dropped the need of conditioning, and proposed to generate images in a pyramidal fashion with additional reconstruction guidance; Jing et al. (2022) explored learning subspace DMs and connecting the full space with Langevin dynamics. By contrast, the proposed $f$ -DM is distinct from all the above types, which only requires one diffusion process, and the images get naturally up-sampled through reverse diffusion. + +Blurring DMs. Several concurrent research (Rissanen et al., 2022; Daras et al., 2022; Lee et al., 2022) have recently looked into DM alternatives to combine blurring into diffusion process, some of which also showed the possibility of deterministic generation (Bansal et al., 2022). Although sharing similarities, our work starts from a different view based on signal transformation. Furthermore, our empirical results also show that stochasticity plays a critical role in high-quality generation. + +Latent Space DMs. Existing work also investigated combining DMs with standard latent variable models. To our best knowledge, most of these works adopt DMs for learning the prior of latent space, where sampling is followed by a pre-trained (Rombach et al., 2021) or jointly optimized (Vahdat et al., 2021) decoder. Conversely, $f$ -DM does not rely on the quality decoder. + +# 6 CONCLUSION + +We proposed $f$ -DM, a generalized family of diffusion models that enables generation with signal transformations. As a demonstration, we apply $f$ -DM to image generation tasks with a range of transformations, including downsampling, blurring and VAEs, where $f$ -DMs outperform the baselines in terms of synthesis quality and semantic interpretation. + +# ETHICS STATEMENT + +Our work focuses on technical development, i.e., synthesizing high-quality images with a range of signal transformations (e.g., downsampling, blurring). Our approach has various applications, such as movie post-production, gaming, helping artists reduce workload, and generating synthetic data as training data for other computer vision tasks. Our approach can be used to synthesize human-related images (e.g., faces), and it is not biased towards any specific gender, race, region, or social class. + +However, the ability of generative models, including our approach, to generate high-quality images that are indistinguishable from real images, raises concerns about the misuse of these methods, e.g., generating fake images. To resolve these concerns, we need to mark all the generated results as "synthetic". In addition, we believe it is crucial to have authenticity assessment, such as fake image detection and identity verification, which will alleviate the potential for misuse. We hope our approach can be used to foster the development of technologies for authenticity assessment. Finally, we believe that creating a set of appropriate regulations and laws would significantly reduce the risks of misuse while bolstering positive effects on technology development. + +# REPRODUCIBILITY STATEMENT + +We assure that all the results shown in the paper and supplemental materials can be reproduced. We believe we have provided enough implementation details in the paper and supplemental materials for the readers to reproduce the results. + +# REFERENCES + +Arpit Bansal, Eitan Borgnia, Hong-Min Chu, Jie S Li, Hamid Kazemi, Furong Huang, Micah Goldblum, Jonas Geiping, and Tom Goldstein. Cold diffusion: Inverting arbitrary image transforms without noise. arXiv preprint arXiv:2208.09392, 2022. +Christopher M Bishop and Nasser M Nasrabadi. Pattern recognition and machine learning, volume 4. Springer, 2006. +Jooyoung Choi, Jungbeom Lee, Chaehun Shin, Sungwon Kim, Hyunwoo Kim, and Sungroh Yoon. Perception prioritized training of diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11472-11481, 2022. +Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8188-8197, 2020. +Giannis Daras, Maurizio Delbracio, Hossein Talebi, Alexandros G. Dimakis, and Peyman Milanfar. Soft diffusion: Score matching for general corruptions, 2022. URL https://arxiv.org/abs/2209.05442. +Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255, 2009. doi: 10.1109/CVPR.2009.5206848. +Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021. +Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using real nvp. arXiv preprint arXiv:1605.08803, 2016. +Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 12873-12883, 2021. +Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances + +in Neural Information Processing Systems, volume 27, pp. 2672-2680. Curran Associates, Inc., 2014. URL https://proceedings.neurips.cc/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf. +Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. +Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. +Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems, 33:6840-6851, 2020. +Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. *J. Mach. Learn. Res.*, 23: 47-1, 2022a. +Jonathan Ho, Tim Salimans, Alexey A Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. In ICLR Workshop on Deep Generative Models for Highly Structured Data, 2022b. +Emiel Hoogeboom and Tim Salimans. Blurring diffusion models, 2022. URL https://arxiv.org/abs/2209.05557. +Bowen Jing, Gabriele Corso, Renato Berlinghieri, and Tommi Jaakkola. Subspace diffusion generative models. arXiv preprint arXiv:2205.01490, 2022. +Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401-4410, 2019. +Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. arXiv preprint arXiv:2106.12423, 2021. +Diederik Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. Advances in neural information processing systems, 34:21696-21707, 2021. +Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. +Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. +Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems, 32, 2019. +Sangyun Lee, Hyungjin Chung, Jaehyeon Kim, and Jong Chul Ye. Progressive deblurring of diffusion models for coarse-to-fine image synthesis. arXiv preprint arXiv:2207.11192, 2022. +Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, pp. 8162-8171. PMLR, 2021. +Vadim Popov, Ivan Vovk, Vladimir Gogoryan, Tasnama Sadekova, and Mikhail Kudinov. Grads- tts: A diffusion probabilistic model for text-to-speech. In International Conference on Machine Learning, pp. 8599-8608. PMLR, 2021. +Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion autoencoders: Toward a meaningful and decodable representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10619-10629, 2022. +Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. + +Ali Razavi, Aaron Van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. Advances in neural information processing systems, 32, 2019. +Severi Rissanen, Markus Heinonen, and Arno Solin. Generative modelling with inverse heat dissipation. arXiv preprint arXiv:2206.13397, 2022. +Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models, 2021. +Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pp. 234-241. Springer, 2015. +Dohoon Ryu and Jong Chul Ye. Pyramidal denoising diffusion probabilistic models. arXiv preprint arXiv:2208.01864, 2022. +Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022a. +Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image super-resolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022b. +Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512, 2022. +Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015. +Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021a. +Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in Neural Information Processing Systems, 32, 2019. +Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021b. +Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2818-2826, 2016. +Arash Vahdat and Jan Kautz. Nvae: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020. +Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Neural Information Processing Systems (NeurIPS), 2021. +Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. +Fisher Yu, Ari Seff, Yinda Zhang, Shuran Song, Thomas Funkhouser, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365, 2015. + +# APPENDIX + +# A DETAILED DERIVATION OF $f$ -DMS + +# A.1 $q(\mathbf{z}_t|\mathbf{z}_s,\mathbf{x})$ + +We derive the definition in Equation 5 with the change-of-variable trick given the fact that $\pmb{x}_t, \pmb{x}_s$ and $\pmb{x}^k$ are all deterministic functions of $\pmb{x}$ . + +More precisely, suppose $\boldsymbol{z}_t \sim \mathcal{N}(\alpha_t \boldsymbol{x}_t, \sigma_t^2 I), \boldsymbol{z}_s \sim \mathcal{N}(\alpha_s \boldsymbol{x}_s, \sigma_s^2 I)$ , where $\tau_k \leq s < t < \tau_{k+1}$ . Thus, it is equivalent to have $\boldsymbol{u}_t \sim \mathcal{N}(\alpha_t \boldsymbol{x}^k, \sigma_t^2 I), \boldsymbol{u}_s \sim \mathcal{N}(\alpha_s \boldsymbol{x}^k, \sigma_s^2 I), \boldsymbol{u}_t = \boldsymbol{z}_t - \alpha_t (\boldsymbol{x}_t - \boldsymbol{x}^k), \boldsymbol{u}_s = \boldsymbol{z}_s - \alpha_s (\boldsymbol{x}_s - \boldsymbol{x}^k)$ . From the above definition, it is reasonable to assume $\boldsymbol{u}_t, \boldsymbol{u}_s$ follow the standard DM transitionm which means that: + +$$ +\begin{array}{l} \boldsymbol {u} _ {t} = \alpha_ {t | s} \boldsymbol {u} _ {s} + \sigma_ {t | s} \epsilon , \epsilon \sim \mathcal {N} (\boldsymbol {0}, I) \\ \Rightarrow \boldsymbol {z} _ {t} - \alpha_ {t} \left(\boldsymbol {x} _ {t} - \boldsymbol {x} ^ {k}\right) = \alpha_ {t | s} \left(\boldsymbol {z} _ {s} - \alpha_ {s} \left(\boldsymbol {x} _ {s} - \boldsymbol {x} ^ {k}\right)\right) + \sigma_ {t | s} \boldsymbol {\epsilon}, \quad \boldsymbol {\epsilon} \sim \mathcal {N} (\mathbf {0}, I) \\ \Rightarrow \quad \boldsymbol {z} _ {t} = \alpha_ {t | s} \boldsymbol {z} _ {s} + \alpha_ {t} \left(\boldsymbol {x} _ {t} - \boldsymbol {x} _ {s}\right) + \sigma_ {t | s} \boldsymbol {\epsilon}, \quad \boldsymbol {\epsilon} \sim \mathcal {N} (\boldsymbol {0}, I) \\ \end{array} +$$ + +As typically $\pmb{x}_t \neq \pmb{x}_s$ and both $\pmb{x}_t, \pmb{x}_s$ are the functions of $\pmb{x}^k$ . Then $z_t$ is dependent on both $\pmb{z}_s$ and $\pmb{x}^k = f_{0:k}(\pmb{x})$ , resulting in a non-Markovian transition: + +$$ +q (\pmb {z} _ {t} | \pmb {z} _ {s}, \pmb {x}) = \mathcal {N} (\pmb {z} _ {t}; \alpha_ {t | s} \pmb {z} _ {s} + \alpha_ {t} \cdot (\pmb {x} _ {t} - \pmb {x} _ {s}), \sigma_ {t | s} ^ {2} I), +$$ + +Note that, this equation stands only when $\boldsymbol{x}_t, \boldsymbol{x}_s$ and $\boldsymbol{x}_k$ in the same space, and we did not make specific assumptions to the form of $\boldsymbol{x}_t$ . + +# A.2 $q(\mathbf{z}_s|\mathbf{z}_t,\mathbf{x})$ + +The reverse diffusion distribution follows the Bayes' Theorem: $q(\pmb{z}_s|\pmb{z}_t, \pmb{x}) \propto q(\pmb{z}_s|\pmb{x})q(\pmb{z}_t|\pmb{z}_s, \pmb{x})$ where both $q(\pmb{z}_s|\pmb{x})$ and $q(\pmb{z}_t|\pmb{z}_s, \pmb{x})$ are Gaussian distributions with general forms of $\mathcal{N}(\pmb{z}_s|\pmb{\mu}, \sigma^2 I)$ and $\mathcal{N}(\pmb{z}_t|A\pmb{z}_s + \pmb{b}, \sigma'^2 I)$ , respectively. Based on Bishop & Nasrabadi (2006) (2.116), we can derive: + +$$ +q \left(\boldsymbol {z} _ {s} \mid \boldsymbol {z} _ {t}, \boldsymbol {x}\right) = \mathcal {N} \left(\boldsymbol {z} _ {s} \mid \bar {\sigma} ^ {- 2} \left(\sigma^ {\prime - 2} A ^ {\top} \left(\boldsymbol {z} _ {t} - \boldsymbol {b}\right) + \sigma^ {- 2} \boldsymbol {\mu}\right), \bar {\sigma} ^ {2} I\right), +$$ + +where $\bar{\sigma}^2 = (\sigma^{-2} + \sigma'^{-2}\| A\| ^2)^{-1}$ . Therefore, we can get the exact form by plugging our variables $\pmb {\mu} = \alpha_{s}\hat{\pmb{x}}_{k}^{s}$ , $\sigma = \sigma_s$ , $A = \alpha_{t|s}I$ , $\pmb {b} = \alpha_{t}\cdot (\pmb {x}_{t} - \pmb {x}_{s})$ , $\sigma^{\prime} = \sigma_{t|s}$ into above equation, we get: + +$$ +q (\pmb {z} _ {s} | \pmb {z} _ {t}, \pmb {x}) = \mathcal {N} (\pmb {z} _ {s} | \alpha_ {s} \pmb {x} _ {s} + \sqrt {\sigma_ {s} ^ {2} - \bar {\sigma} ^ {2}} \pmb {\epsilon} _ {t}, \bar {\sigma} ^ {2} I), +$$ + +where $\epsilon_{t} = (z_{t} - \alpha_{t}\pmb{x}_{t}) / \sigma_{t}$ and $\bar{\sigma} = \sigma_s\sigma_{t|s} / \sigma_t$ + +Alternatively, if we assume $\boldsymbol{x}_t$ take the interpolation formulation in Equation 4, we can also re-write $\boldsymbol{x}_s$ with $\boldsymbol{x}_t + \frac{t - s}{t - \tau_k} \delta_t$ , where we define a new variable $\delta_t = \boldsymbol{x}^k - \boldsymbol{x}_t$ . As stated in the main context (Section 3.1), such change makes $q(\boldsymbol{z}_t | \boldsymbol{z}_s, \boldsymbol{x})$ avoid computing $\boldsymbol{x}_s$ which may be potentially costly. In this way, we re-write the above equation as follows: + +$$ +q \left(\boldsymbol {z} _ {s} \mid \boldsymbol {z} _ {t}, \boldsymbol {x}\right) = \mathcal {N} \left(\boldsymbol {z} _ {s} \mid , \alpha_ {s} \left(\boldsymbol {x} _ {t} + \boldsymbol {\delta} _ {t} \cdot (t - s) / (t - \tau_ {k})\right) + \sqrt {\sigma_ {s} ^ {2} - \bar {\sigma} ^ {2}} \boldsymbol {\epsilon} _ {t}, \bar {\sigma} ^ {2} I\right), \tag {10} +$$ + +# A.3 DIFFUSION INSIDE STAGES + +In the inference time, we generate data by iteratively sampling from the conditional distribution $p(\pmb{z}_s|\pmb{z}_t) = \mathbb{E}_{\pmb{x}}[q(\pmb{z}_s|\pmb{z}_t,\pmb{x})]$ based on Equation 10. In practice, the expectation over $\pmb{x}$ is approximated by our model's prediction. As shown in Equation 9, in this work, we propose a "double-prediction" network $\theta$ that reads $\pmb{z}_t$ , and simultaneously predicts $\pmb{x}_t$ and $\delta_t$ with $\pmb{x}_{\theta}$ and $\delta_{\theta}$ , respectively. The predicted Gaussian noise is denoted as $\epsilon_{\theta} = (z_{t} - \alpha_{t}\pmb{x}_{\theta}) / \sigma_{t}$ . Note that the prediction $x_{\theta}$ and $\epsilon_{\theta}$ are interchangeable, which means that we can readily derive one from the other's prediction. Therefore, by replacing $x_{t}, \delta_{t}, \epsilon_{t}$ , with $x_{\theta}, \delta_{\theta}, \epsilon_{\theta}$ in Equation 10, we obtain the sampling algorithm shown in Algorithm 1: Line 6. + +![](images/278a4753546ab0c582a6c3bc7e663c2edfe759f474771a8dd263a6022b31bae4.jpg) +Figure 7: Illustration of noise schedule $(\alpha_{t}$ and $\sigma_{t})$ for $f$ -DM-DS models with 5 stages $(16^{2} \rightarrow 256^{2})$ . We use the standard cosine noise schedule $\alpha_{t} = \cos(0.5\pi t)$ . We also show the difference between the linear/cosine stage schedule, as well as the proposed SP/VP re-scaling methods. + +![](images/9fcdfe29e54fa2f7efc299a2869440a676ddb2bb7c489a706a023ea92d7221d5.jpg) + +# A.4 NOISE AT BOUNDARIES + +In this paper, the overall principle is to handle the transition across stage boundary is to ensure the forward diffusion to be deterministic and smooth, therefore almost no information is lost during the stage change. Such requirement is important as it directly correlated to the denoising performance. Failing to recover the lost information will directly affect the diversity of the model generates. + +Forward diffusion As described in Section 3.1, since we have the control of the signal and the noise separately, we can directly apply the deterministic transformation on the signal, and dropping noise elements. + +![](images/1bf6ca480a5a681325b6c015f5959592f25a3a931f42882d723a3402bf14c78e.jpg) +Figure 6: Two naive ways for down-sampling. + +Alternatively, we also implemented a different $\zeta (\epsilon)$ based on averaging. As shown in Figure 6, + +if the transformation is down-sampling, we can use the fact that the mean of Gaussian noises is still Gaussian with lower variance: $(\epsilon_0 + \epsilon_1 + \epsilon_2 + \epsilon_3) / 4 \sim \mathcal{N}(0, \frac{1}{4} I)$ . Therefore, $\times 2$ rescaling is needed on the resulted noise. + +Reverse diffusion Similarly, we can also define the reverse process if $\zeta$ is chosen to be averaging. Different from "dropping" where the reverse process is simply adding independent Gaussian noises, the reverse of "averaging" requests to sample $\sum_{i=0}^{3} \epsilon_i = 2\epsilon$ given the input noise $\epsilon$ , while having $p(\epsilon_i) = \mathcal{N}(0, I)$ , $i = 0,1,2,3$ . Such problem has a closed solution and can be implemented in an autoregressive fashion: + +$$ +\begin{array}{l} a = 2 \epsilon ; \\ \boldsymbol {\epsilon} _ {0} = \boldsymbol {a} / 4 + \sqrt {3 / 4} \cdot \hat {\epsilon} _ {1}, \boldsymbol {a} = \boldsymbol {a} - \boldsymbol {\epsilon} _ {0}, \hat {\epsilon} _ {1} \sim \mathcal {N} (\boldsymbol {0}, I); \\ \boldsymbol {\epsilon} _ {1} = \boldsymbol {a} / 3 + \sqrt {2 / 3} \cdot \hat {\epsilon} _ {2}, \boldsymbol {a} = \boldsymbol {a} - \boldsymbol {\epsilon} _ {1}, \hat {\epsilon} _ {2} \sim \mathcal {N} (\boldsymbol {0}, I); \\ \boldsymbol {\epsilon} _ {2} = \boldsymbol {a} / 2 + \sqrt {1 / 2} \cdot \hat {\epsilon} _ {3}, \boldsymbol {a} = \boldsymbol {a} - \boldsymbol {\epsilon} _ {2}, \hat {\epsilon} _ {3} \sim \mathcal {N} (\boldsymbol {0}, I); \\ \epsilon_ {3} = a \\ \end{array} +$$ + +Similar to the case of "dropping", we also need 3 additional samples $\hat{\epsilon}_{1:3}$ to contribute to four noises, therefore it can be implemented in the same way as described in Section 3.1. Empirically, reversing the "averaging" steps tends to produce samples with better FID scores. However, since it introduces correlations into the added noise, which may cause undesired biases especially in DDIM sampling. + +Intuition behind Re-scaling Here we present a simple justification of applying noise rescaling. Suppose the signal dimensionality changes from $M_{k-1}$ to $M_k$ when crossing the stage boundary, and such change is caused by different sampling rates. Based the proposed resolution-agnostic SNR (Equation 7), the number of sampled points inside $\Omega$ is proportional to its dimensionality. Generally, it is safe to assume signals are mostly low-frequency. Therefore, averaging signals will not change its variance. By contrast, as shown above, averaging Gaussian noises results in lower variance, where + +![](images/a8f0bd125ff68052e5cdfb1ab440453e4f28e9f5687c63a07a38de767a8c7aed.jpg) +Figure 8: We show the comparison of the DDIM sampling. + +in our case, the variance is proportional to $M^{-1}$ . Therefore, suppose the signal magnitude does not change, we can get the re-scaling low by forcing $\mathrm{SNR}(z_{\tau}) = \mathrm{SNR}(z_{\tau^{-}})$ at the stage boundary: + +$$ +\sigma_ {\tau^ {-}} ^ {2} \cdot M _ {k - 1} ^ {- 1} = \sigma_ {\tau} ^ {2} \cdot M _ {k} ^ {- 1}, +$$ + +which derives the signal preserving (SP) rescaling in Equation 8. In Figure 7, we show an example of the change of $\alpha$ and $\sigma$ with and without applying the re-scaling technique for $f$ -DM-DS models. + +# A.5 DDIM SAMPLING + +The above derivations only describe the standard ancestral sampling $(\eta = 1)$ where $q(\pmb{z}_s|\pmb{z}_t,\pmb{x})$ is determined by Bayes' Theorem. Optionally, one can arbitrarily define any proper reverse diffusion distribution as long as the marginal distributions match the definition. For example, $f$ -DM can also perform deterministic DDIM (Song et al., 2021a) by setting $\eta = 0$ in Algorithm 1. Similar to Song et al. (2021a), we can also obtain the proof based on the induction argument. + +Figure 8 shows the comparison of DDIM sampling between the standard DMs and the proposed $f$ -DM. In DDIM sampling $(\eta = 0)$ , the only randomness comes from the initial noise at $t = 1$ . Due to the proposed noise resampling technique, $f$ -DM enables a multi-scale noisng process where the sampled noises are split and sent to different steps of the diffusion process. In this case, compared to standard DMs, we gain the ability of controlling image generation at different levels, resulting in smooth semantic interpretation. + +# B DETAILED INFORMATION OF TRANSFORMATIONS + +We show the difference of all the transformations used in this paper in Figure 9. + +# B.1 DOWNSAMPLING + +In early development of this work, we explored various combinations of performing down-sampling: $\pmb{f} = \{\text{bilinear, nearest, Gaussian blur + subsample}\}$ , $\pmb{g} = \{\text{bilinear, bicubic, nearest, neural-based}\}$ . While all these combinations produced similar results, we empirically on FFHQ found that both choosing bilinear interpolation for both $\pmb{f}, \pmb{g}$ achieves most stable results. Therefore, all the main experiments of $f$ -DM-DS are conducted on bilinear interpolation. As discussed in Section 3.2, we choose $K = 4$ , which progressively downsample a $256^2$ into $16^2$ . + +# B.2 BLURRING + +We experimented two types of blurring functions. For upsampling-based blurring, we use the same number of stages as the downsampling case; for Gaussian-based blurring, we adopt $K = 7$ with corresponding kernel sizes $\sigma_{B} = 15\sin^{2}\left(\frac{\pi}{2}\tau_{k}\right)$ , where $\tau_{k}$ follows the cosine stage schedule. In practice, we implement blurring function in frequency domain following Rissanen et al. (2022) based on discrete cosine transform (DCT). + +![](images/5b3e560c3c5c56b6d3ab340c98388c262222bacc08d97b15c2fbefad85bc6dcd.jpg) +Downsample + +![](images/5e2992e5e311e4096de6ec3eec040c148bcf6967de1834cdae740335a091ea4d.jpg) +VQ-VAE + +![](images/6a72be007beec1c6ed8108dcaa9f9015e80c6cdb2a3f6af037ea87db15c2d9ef.jpg) +Updown Blur + +![](images/7896088996ebbadbd3bfaec61292b7fa531e9bc1ecf2647c43207881183dd663.jpg) + +![](images/26a5447a6af5b30545b351421b7271a5a73e4a3e35dccf47b3bf810bde83a77f.jpg) +VQ-GAN +Gaussian Blur +Figure 9: We show examples of the five transformations (downsample, blur, VAEs) used in this paper. For downsampling, we resize the image with nearest upsampler; for VQ-VAE/VQ-GAN, we visualize the first 3 channels of the latent feature maps. + +# B.3 VAES + +In this paper, we only consider vector quantized (VQ) models with single layer latent space, while our methods can be readily applied to hierarchical (Razavi et al., 2019) and KL-regularized VAE models (Vahdat & Kautz, 2020). Following Rombach et al. (2021), we take the feature vectors before the quantization layers as the latent space, and keep the quantization step in the decoder $(g)$ when training diffusion models. + +We follow an open-sourced implementation $^2$ to train our VQVAE model on ImageNet. The model consists of two strided convolution blocks which by default downsamples the input image by a factor of 8. We use the default hyper-parameters and train the model for 50 epochs with a batch-size of 128. For a fair comparison to match the latent size of VQVAE, we use the pre-trained autoencoding model (Rombach et al., 2021) with the setting of $\{f = 8, \mathrm{VQ}(\mathrm{Z} = 256, \mathrm{d} = 4)\}$ . We directly use the checkpoint $^3$ provided by the authors. Note that the above setting is not the best performing model (LDM-4) in the original paper. Therefore, it generates more artifacts when reconstructing images from the latents. + +Before training, we compute the signal magnitude ratio $\gamma_{k}$ (Equation 8) over the entire training set of FFHQ, where we empirically set $\gamma_{k} = 2.77$ for VQ-GAN and $\gamma_{k} = 2.0$ for VQ-VAE, respectively. + +# C DATASET DETAILS + +FFHQ (https://github.com/NVlabs/ffhq-dataset) contains 70k images of real human faces in resolution of $1024^2$ . For most of our experiments, we resize the images to $256^2$ . + +AFHQ (https://github.com/clovaai/stargan-v2# animal-faces-hq-dataset-afhq) contains 15k images of animal faces including cat, dog and wild three categories in resolution of $512^{2}$ . We train conditional diffusion models by merging all training images with the label information. All images are resized to $256^{2}$ . + +LSUN (https://www.yf.io/p/1sun) is a collection of large-scale image dataset containing 10 scenes and 20 object categories. Following previous works Rombach et al. (2021), we choose the two categories – Church (126k images) and Bed (3M images), and train separate unconditional models on them. As LSUN-Bed is relatively larger, we set the iterations longer than other datasets. All images are resized to $256^2$ with center-crop. + +![](images/8ac2a2769616de0707aebf77761e09cbe085836873539f1e2371fec00392f9e2.jpg) +Figure 10: An illustration of the training pipeline. + +ImageNet (https://image-net.org/download.php) we use the standard ImageNet-1K dataset which contains 1.28M images across 1000 classes. We directly merge all the training images with class-labels. All images are resized to $256^2$ with center-crop. For both $f$ -DM and the baseline models, we adopt the classifier-free guidance (Ho & Salimans, 2022) with the unconditional probability 0.2. In the inference time, we use the guidance scale $(s = 2)$ for computing FIDs, and $s = 3$ to synthesize examples for comparison. + +# D IMPLEMENTATION DETAILS + +# D.1 ARCHITECTURE CONFIGURATIONS + +We implement $f$ -DM strictly following standard U-Net architecture in Nichol & Dhariwal (2021). As shown in Figure 11, input $z_{t}$ will be directed to the corresponding inner layer based on spatial resolutions, and a stage-specific adapter is adopted to transform the channel dimension. Such architecture also allows memory-efficient batching across stages where we can create a batch with various resolutions, and split the computation based on the resolutions. + +# D.2 HYPER-PARAMETERS + +In our experiments, we adopt the following two sets of parameters based on the complexity of the dataset: base (FFHQ, AFHQ, LSUN-Church/Bed) and big (ImageNet). For base, we use 1 residual block per resolution, with the basic dimension 128. For big, we use 2 residual blocks with the basic dimension 192. Given one dataset, all the models with various transformations including the baseline DMs share the same hyper-parameters except for the adapters. We list the hyperparameter details in Table 3. + +
Hyper-param.FFHQAFHQLSUN-ChurchLSUN-BedImageNet
image res.25622562256225622562
# of classesNone3NoneNone1000
c.f. guidance-No--Yes
#channels128128128128192
#res-blocks11112
channel multi.[1,1,2,2,4,4]
attention res.16,8
batch size3232323264
lr2e-5
iterations500K500K500K1200K2500K
+ +Table 3: Hyperparameters and settings for $f$ -DM on different datasets. + +![](images/280bce376cb58a90c764d5be92a1d3087016a9c77530f61850439042834418fb.jpg) +Figure 11: An illustration of the modified U-Net architecture. Time conditioning is omitted. The parameters are partially shared across stages based on the resolutions. Stage-specific adapters are adopted to transform the input dimensions. + +![](images/9bcf1a75278e8d2281965673b9c05dbad410dcb5b0b7d4f5f76e1fd3d5c19fd4.jpg) +Figure 12: Additional comparisons with Cascaded DM on AFHQ. $\uparrow$ Comparison of the reverse diffusion process from $16^{2}$ to $256^{2}$ . We visualize the denoised outputs $(\boldsymbol{x}_t)$ and the corresponding next noised input $(z_{s})$ near the start & end of each resolution diffusion. $\downarrow$ Comparison of random samples generated by Cascaded DM and $f$ -DM-DS. + +# E ADDITIONAL RESULTS + +# E.1 QUANTITATIVE COMPARISON WITH DDIM + +We also include comparison of $f$ -DM with the standard DM using DDIM sampling ( $\eta = 0$ ) in Table 4. Similar to the conclusion drawn from Table 1, the proposed $f$ -DM can achieve comparable or even better performance than baseline DM even with $\eta = 0$ (generation only controlled by the initial noise, see Figure 8), while having better scores for DDIM with half generation steps. + +![](images/7dcde01bb04d5d291ef70d6b3ae7e95bf484cb4f99ae3ed3804760fe2611a9e3.jpg) + +![](images/4f5b223dfc3d6aff5adb4963b5f5094d8e3d2d008d7386ef0b2ac033d7d1942d.jpg) + +![](images/cd734a886b73f3d4822b4bd31ee8e54ca4661a7fac6a569dfe5cc1d8681fbbd1.jpg) +Figure 13: Additional comparisons with LDMs on AFHQ. + +![](images/6e100b42a0472267a4df7aa950b52b8d7c0dda9c722783999fb6aa35ba59a2e4.jpg) + +
ModelsFID↓P↑R↑FID↓P↑R↑Speed
FFHQ256 × 256AFHQ256 × 256
DDIM11.40.710.5312.10.580.65×1.0
DDIM (1/2)13.00.700.5116.80.480.64×2.0
f-DM-DS (η = 0)12.60.760.555.80.760.55×2.1
+ +Table 4: Comparison on FFHQ and AFHQ for DDIM sampling $\left( {\eta = 0}\right)$ + +# E.2 V.S. TRANSFORMATION-SPECIFIC BASELINES + +We include more comparisons in Figure 12 and 13. From Figure 12, we compare the generation process of $f$ -DM and the cascaded DM. It is clear that $f$ -DM conducts coarse-to-fine generation in a more natural way, and the results will not suffer from error propagation. As shown in Figure 13, LDM outputs are easily affected by the chosen decoder. VQVAE decoder tends output blurry images; the output from VQGAN decoder has much finer details while remaining noticeable artifacts (e.g., eyes, furs). By contrast, $f$ -DM performs stably for both latent spaces. + +# E.3 CONDITIONAL GENERATION + +We include additional results of conditional generation, i.e., super-resolution (Figure 14) and deblurring (Figure 15). We also show the comparison with or without the proposed gradient-based initialization, which greatly improves the faithfulness of conditional generation when the input noise is high (e.g., $16 \times 16$ input). + +# E.4 ADDITIONAL QUALITATIVE RESULTS + +Finally, we provide additional qualitative results for our unconditional models for FFHQ (Figure 16), AFHQ (Figure 17), LSUN (Figure 18) and our class-conditional ImageNet model (Figure 19,20). + +# F LIMITATIONS AND FUTURE WORK + +Although $f$ -DM enables diffusion with signal transformations, which greatly extends the scope of DMs to work in transformed space, there still exist limitations and opportunities for future work. First, it is an empirical question to find the optimal stage schedule for all transformations. Our ablation studies also show that different heuristics have differences for DS-based and VAE-based models. A metric that can automatically determine the best stage schedule based on the property of each transformation is needed and will be explored in the future. In addition, although the current method achieves faster inference when generating with transformations like down-sampling, the speed-up is not very significant as we still take the standard DDPM steps. How to further accelerate the inference process of DMs is a challenging and orthogonal direction. For example, it has great potential to combine $f$ -DM with speed-up techniques such as knowledge distillation (Salimans & Ho, 2022). Moreover, no matter hand-designed or learned, all the transformations used in $f$ -DM are still fixed when training DM. It is, however, different from typical VAEs, where both the encoder and decoder are jointly optimized during training. Therefore, starting from a random/imperfect transformation and training $f$ -DM jointly with the transformations towards certain target objectives will be studied as future work. + +![](images/527fa641e31bbc8d987dcf040cb6e87fb22700061f4ece617935f253b53f179b.jpg) +Figure 14: Additional examples of super-resolution (SR) with the unconditional $f$ -DM-DS trained on AFHQ. $\uparrow$ The same input image with various resolution $16^2$ , $32^2$ , $64^2$ , $128^2$ . We sample 3 random seeds for each resolution input. We also show the difference with and without applying gradient-based initialization (Grad-Init) on $z$ . $\downarrow$ SR results of various $16^2$ inputs. + +![](images/b2a52f5d1274f1b3bbb4148ad6c1d86040a7f2c1de8bd4408a8d91ce3678ac38.jpg) +Figure 15: Additional examples of de-blurring with the unconditional $f$ -DM-Blur-G trained on AFHQ. $\uparrow$ The same input image with various Gaussian kernel sizes $\sigma = 15,9,4,1.4$ . We sample 3 random seeds for each resolution input. We also show the difference with and without applying gradient-based initialization (Grad-Init) on $z$ . $\downarrow$ Deblurred results of various blur images. + +![](images/7b376d50f3fbb17417c81b86e41119a6d81d9b2c1cdd37842a01ea4d30fd1394.jpg) +f-DM-DS + +![](images/d9603c0ceb1995fe685cbe619608de12afadf938bf77468eef8f3119d0568786.jpg) +f-DM-Blur-U +f-DM-Blur-G + +![](images/bd601770522a8f005064bd2ef2f7d076ead1b3f13a0c881165fba34b074d3daa.jpg) + +![](images/baa4b41cfb27f40c4d0c44f604de40b5a0520d3d019d75be50efa7603692df7e.jpg) +f-DM-VQVAE + +![](images/98743f2baf134e47d814d66c0a0e89cc2e641289c6623c2a92ffd4a96da966ff.jpg) +f-DM-VQGAN +Figure 16: Random samples generated by five $f$ -DMs trained on FFHQ $256 \times 256$ . All faces presented are synthesized by the models, and are not real identities. + +![](images/9abce0b45bb07fb3f7a2dd430cfb737e45e15ec896e5fb4d56569fd92c866168.jpg) +f-DM-DS + +![](images/785682623407e89f8acb2175e5b4445c45e38121f30a5a9cc43f1c22f883f724.jpg) +f-DM-Blur-U + +![](images/8962e1725427272f00a91c85dc75d236f2af6440cb98a0f06f73bf4e8bec8d23.jpg) +f-DM-Blur-G +f-DM-VQVAE + +![](images/f14f00221059dde1432e1c6989addc27715d5a8ac19529cd8f21205e8393de6a.jpg) +f-DM-VOGAN + +![](images/82e10cb497232c26a1d260ad0e62184f671cdc0ad1cf3a8b0c3df48b5804fe9a.jpg) +Figure 17: Random samples generated by five $f$ -DMs trained on AFHQ $256 \times 256$ . + +![](images/0f66f8c4f7a1bcbff94332b019b83b17d12a0ab7652d4adcfdd73b9c8f809010.jpg) +f-DM-DS +f-DM-Blur-U + +![](images/3bcf687510122b709b0f355e99d0ad59f0ac4091374d535c3b0d891550cfdf67.jpg) +f-DM-VQVAE + +![](images/a5579ed3acd07cf63f3b5b8d83417b7a41f077a1d1755be37e2899fd00cdfc20.jpg) + +![](images/5bbabc7d878e38dd4f8b109d66fe8d0157d3d2def6febec595e6e1c7f572b1af.jpg) +f-DM-DS +f-DM-VQVAE + +![](images/e303651ff57e3530709c7a08616817856763cb9feb7894dc571e383118b2d38f.jpg) +Figure 18: Random samples generated by $f$ -DMs trained on LSUN-Church & -Bed $256 \times 256$ . + +![](images/c89eb1799e80f0ae0c0eede98f8ca235e903e7a75bcb980a6e9721b9fee271c5.jpg) +Figure 19: Random samples generated by $f$ -DM-DS/VQVAE trained on ImageNet $256 \times 256$ with classifier-free guidance ( $s = 3$ ). Classes from top to bottom: red panda, robin, daisy, valley, trifle, comic book. + +![](images/96f5115d8f8bd7de6c1b5560a089fdfca6b53803c3d564c05f45d89e39dbacd9.jpg) + +![](images/eee539c601ed5a3ee6c6e70115606539bd6f6f568c1f4ae543f1c097c0831fc3.jpg) +Figure 20: Random samples generated by $f$ -DM-DS/VQVAE trained on ImageNet $256 \times 256$ with classifier-free guidance ( $s = 3$ ). Classes from top to bottom: school bus, pizza, seashore, photocopier, golden retriever, axolotl. \ No newline at end of file diff --git a/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/images.zip b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..8003fdcc93e447ae11ab56cce27d616117d26bc7 --- /dev/null +++ b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36c5c57207c1f70c431a87c0f405d84e710fea3d4c4d17a1a4a90f29fdbbcaa9 +size 3856125 diff --git a/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/layout.json b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e98cc579d4645ec91bc5331666961c6f2faf0f7d --- /dev/null +++ b/2023/f-DM_ A Multi-stage Diffusion Model via Progressive Signal Transformation/layout.json @@ -0,0 +1,21068 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 118 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 105, + 79, + 504, + 118 + ], + "type": "text", + "content": "-DM: A MULTI-STAGE DIFFUSION MODEL VIA PROGRESSIVE SIGNAL TRANSFORMATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 134, + 455, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 134, + 455, + 158 + ], + "spans": [ + { + "bbox": [ + 110, + 134, + 455, + 158 + ], + "type": "text", + "content": "Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Miguel Angel Bautista, Josh Susskind Apple" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 158, + 476, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 158, + 476, + 170 + ], + "spans": [ + { + "bbox": [ + 112, + 158, + 476, + 170 + ], + "type": "text", + "content": "{jgu32, szhai, yizhe_zhang, mbautistamartin, jsusskind}@apple.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 198, + 335, + 209 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 198, + 335, + 209 + ], + "spans": [ + { + "bbox": [ + 276, + 198, + 335, + 209 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "spans": [ + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "type": "text", + "content": "Diffusion models (DMs) have recently emerged as SoTA tools for generative modeling in various domains. Standard DMs can be viewed as an instantiation of hierarchical variational autoencoders (VAEs) where the latent variables are inferred from input-centered Gaussian distributions with fixed scales and variances. Unlike VAEs, this formulation constrains DMs from changing the latent spaces and learning abstract representations. In this work, we propose " + }, + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "type": "text", + "content": "-DM, a generalized family of DMs, which allows progressive signal transformation. More precisely, we extend DMs to incorporate a set of (hand-designed or learned) transformations, where the transformed input is the mean of each diffusion step. We propose a generalized formulation of DMs and derive the corresponding de-noising objective together with a modified sampling algorithm. As a demonstration, we apply " + }, + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "type": "text", + "content": "-DM in image generation tasks with a range of functions, including down-sampling, blurring, and learned transformations based on the encoder of pretrained VAEs. In addition, we identify the importance of adjusting the noise levels whenever the signal is sub-sampled and propose a simple rescaling recipe. " + }, + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 140, + 221, + 471, + 419 + ], + "type": "text", + "content": "-DM can produce high-quality samples on standard image generation benchmarks like FFHQ, AFHQ, LSUN and ImageNet with better efficiency and semantic interpretation. Please check our videos at http://jiataogu.me/fdm/." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 123, + 426, + 484, + 631 + ], + "blocks": [ + { + "bbox": [ + 123, + 426, + 484, + 631 + ], + "lines": [ + { + "bbox": [ + 123, + 426, + 484, + 631 + ], + "spans": [ + { + "bbox": [ + 123, + 426, + 484, + 631 + ], + "type": "image", + "image_path": "d52258dad9bef03c15d21315a1afeaa9608ab3ca09dcc34a5e80b1a92a1f6bc2.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "lines": [ + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "text", + "content": "Figure 1: Visualization of reverse diffusion from " + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "text", + "content": "-DMs with various signal transformations. " + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "text", + "content": " is the denoised output, and " + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "inline_equation", + "content": "z_{s}" + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "text", + "content": " is the input to the next diffusion step. We plot the first three channels of VQVAE latent variables. Low-resolution images are resized to " + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "inline_equation", + "content": "256^{2}" + }, + { + "bbox": [ + 104, + 635, + 506, + 670 + ], + "type": "text", + "content": " for ease of visualization." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 685, + 207, + 697 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 685, + 207, + 697 + ], + "spans": [ + { + "bbox": [ + 106, + 685, + 207, + 697 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 734 + ], + "type": "text", + "content": "Diffusion probabilistic models (DMs, Sohl-Dickstein et al., 2015; Ho et al., 2020; Nichol & Dhariwal, 2021) and score-based (Song et al., 2021b) generative models have become increasingly popular" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 149 + ], + "type": "text", + "content": "as the tools for high-quality image (Dhariwal & Nichol, 2021), video (Ho et al., 2022b), text-to-speech (Popov et al., 2021) and text-to-image (Rombach et al., 2021; Ramesh et al., 2022; Sahara et al., 2022a) synthesis. Despite the empirical success, conventional DMs are restricted to operate in the ambient space throughout the Gaussian noising process. On the other hand, common generative models like VAEs (Kingma & Welling, 2013) and GANs (Goodfellow et al., 2014; Karras et al., 2021) employ a coarse-to-fine process that hierarchically generates high-resolution outputs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 153, + 506, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 153, + 506, + 254 + ], + "spans": [ + { + "bbox": [ + 104, + 153, + 506, + 254 + ], + "type": "text", + "content": "We are interested in combining the best of the two worlds: the expressivity of DMs and the benefit of hierarchical features. To this end, we propose " + }, + { + "bbox": [ + 104, + 153, + 506, + 254 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 153, + 506, + 254 + ], + "type": "text", + "content": "-DM, a generalized multi-stage framework of DMs to incorporate progressive transformations to the inputs. As an important property of our formulation, " + }, + { + "bbox": [ + 104, + 153, + 506, + 254 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 153, + 506, + 254 + ], + "type": "text", + "content": "-DM does not make any assumptions about the type of transformations. This makes it compatible with many possible designs, ranging from domain-specific ones to generic neural networks. In this work, we consider representative types of transformations, including down-sampling, blurring, and neural-based transformations. What these functions share in common is that they allow one to derive increasingly more global, coarse, and/or compact representations, which we believe can lead to better sampling quality as well as reduced computation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 258, + 504, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 504, + 316 + ], + "type": "text", + "content": "Incorporating arbitrary transformations into DMs also brings immediate modeling challenges. For instance, certain transformations destroy the information drastically, and some might also change the dimensionality. For the former, we derive an interpolation-based formulation to smoothly bridge consecutive transformations. For the latter, we verify the importance of rescaling the noise level, and propose a resolution-agnostic signal-to-noise ratio (SNR) as a practical guideline for noise rescaling." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "type": "text", + "content": "Extensive experiments are performed on image generation benchmarks, including FFHQ, AFHQ, LSUN Bed/Church and ImageNet. " + }, + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "type": "text", + "content": "-DMs consistently match or outperform the baseline performance, while requiring relatively less computing thanks to the progressive transformations. Furthermore, given a pre-trained " + }, + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "type": "text", + "content": "-DM, we can readily manipulate the learned latent space, and perform conditional generation tasks (e.g., super-resolution) without additional training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 391, + 200, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 391, + 200, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 391, + 200, + 403 + ], + "type": "text", + "content": "2 BACKGROUND" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 415, + 297, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 297, + 493 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 297, + 493 + ], + "type": "text", + "content": "Diffusion Models (DMs, Sohl-Dickstein et al., 2015; Song & Ermon, 2019; Ho et al., 2020) are deep generative models which can be viewed as a special case of hierarchical VAEs (Kingma et al., 2021). In this paper, we consider diffusion in continuous time similar to Song et al. (2021b); Kingma et al. (2021)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "type": "text", + "content": "Given a datapoint " + }, + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "type": "text", + "content": ", a DM models time-dependent latent variables " + }, + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "type": "inline_equation", + "content": "\\pmb{z} = \\{\\pmb{z}_t | t \\in [0,1], \\pmb{z}_0 = \\pmb{x}\\}" + }, + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "type": "text", + "content": " based on a fixed signal-noise schedule " + }, + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "type": "inline_equation", + "content": "\\{\\alpha_t, \\sigma_t\\}" + }, + { + "bbox": [ + 104, + 498, + 297, + 544 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 133, + 548, + 268, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 548, + 268, + 563 + ], + "spans": [ + { + "bbox": [ + 133, + 548, + 268, + 563 + ], + "type": "interline_equation", + "content": "q (\\boldsymbol {z} _ {t} | \\boldsymbol {z} _ {s}) = \\mathcal {N} (\\boldsymbol {z} _ {t}; \\alpha_ {t | s} \\boldsymbol {z} _ {s}, \\sigma_ {t | s} ^ {2} I),", + "image_path": "94234ad23d0110cc8b6a45601ac277da85f43a58af4ce9a9165e2200e8d01d96.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "inline_equation", + "content": "\\alpha_{t|s} = \\alpha_t / \\alpha_s" + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "inline_equation", + "content": "\\sigma_{t|s}^2 = \\sigma_t^2 - \\alpha_{t|s}^2\\sigma_s^2" + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "inline_equation", + "content": "s < t" + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "text", + "content": ". It also defines the marginal distribution " + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "inline_equation", + "content": "q(\\pmb{z}_t|\\pmb{x})" + }, + { + "bbox": [ + 104, + 568, + 296, + 603 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 604, + 259, + 617 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 604, + 259, + 617 + ], + "spans": [ + { + "bbox": [ + 141, + 604, + 259, + 617 + ], + "type": "interline_equation", + "content": "q \\left(\\boldsymbol {z} _ {t} | \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t} \\boldsymbol {x}, \\sigma_ {t} ^ {2} I\\right),", + "image_path": "12d99239ea0ee12a6428f451f0948afc409ac29d879f03259080e94b4850722a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 619, + 296, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 619, + 296, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 296, + 631 + ], + "type": "text", + "content": "By default, we assume the variance preserving" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "spans": [ + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": "form (Ho et al., 2020). That is, " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "\\alpha_{t}^{2} + \\sigma_{t}^{2} = 1" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "\\alpha_{0} = \\sigma_{1} = 1" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": ", and the signal-to-noise-ratio (SNR, " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "\\alpha_{t}^{2} / \\sigma_{t}^{2}" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": ") decreases monotonically with " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": ". For generation, a parametric function " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": " is optimized to reverse the diffusion process by denoising " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "z_{t} = \\alpha_{t}x + \\sigma_{t}\\epsilon" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": " to the clean input " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": ", with a weighted reconstruction loss " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\theta}" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": ". For example, the \"simple loss\" proposed in Ho et al. (2020) is equivalent to weighting residuals by " + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "inline_equation", + "content": "\\omega_{t} = \\alpha_{t}^{2} / \\sigma_{t}^{2}" + }, + { + "bbox": [ + 104, + 630, + 504, + 686 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 203, + 691, + 504, + 707 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 691, + 504, + 707 + ], + "spans": [ + { + "bbox": [ + 203, + 691, + 504, + 707 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\theta} = \\mathbb {E} _ {\\boldsymbol {z} _ {t} \\sim q (\\boldsymbol {z} _ {t} | \\boldsymbol {x}), t \\sim [ 0, 1 ]} \\left[ \\omega_ {t} \\cdot \\| \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {x} \\| _ {2} ^ {2} \\right]. \\tag {1}", + "image_path": "18c82b0fc934e60e96fb66da9e6e38d6ed3f5a0ee8e7d99654db1d61fd9219b8.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "In practice, " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " is parameterized as a U-Net (Ronneberger et al., 2015). As suggested in Ho et al. (2020), predicting the noise " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " empirically achieves better performance than predicting " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "x_{\\theta}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": ", where" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 306, + 420, + 345, + 586 + ], + "blocks": [ + { + "bbox": [ + 306, + 420, + 345, + 586 + ], + "lines": [ + { + "bbox": [ + 306, + 420, + 345, + 586 + ], + "spans": [ + { + "bbox": [ + 306, + 420, + 345, + 586 + ], + "type": "image", + "image_path": "b144a42831e912bd3dbf6dc74671e2cca145cc9656e9e988b37a463741a9a3ad.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 312, + 584, + 339, + 592 + ], + "lines": [ + { + "bbox": [ + 312, + 584, + 339, + 592 + ], + "spans": [ + { + "bbox": [ + 312, + 584, + 339, + 592 + ], + "type": "text", + "content": "(a) DMS" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 602, + 504, + 625 + ], + "lines": [ + { + "bbox": [ + 302, + 602, + 504, + 625 + ], + "spans": [ + { + "bbox": [ + 302, + 602, + 504, + 625 + ], + "type": "text", + "content": "Figure 2: (a) the standard DMs; (b) a bottom-up hierarchical VAEs; (c) our proposed " + }, + { + "bbox": [ + 302, + 602, + 504, + 625 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 302, + 602, + 504, + 625 + ], + "type": "text", + "content": "-DM." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 352, + 421, + 396, + 583 + ], + "blocks": [ + { + "bbox": [ + 352, + 421, + 396, + 583 + ], + "lines": [ + { + "bbox": [ + 352, + 421, + 396, + 583 + ], + "spans": [ + { + "bbox": [ + 352, + 421, + 396, + 583 + ], + "type": "image", + "image_path": "1af00fd075b312ca6768d248e8c3288e144842b186ef5a857c11a740be3f3b14.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 355, + 584, + 386, + 592 + ], + "lines": [ + { + "bbox": [ + 355, + 584, + 386, + 592 + ], + "spans": [ + { + "bbox": [ + 355, + 584, + 386, + 592 + ], + "type": "text", + "content": "(b) VAEs" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 406, + 421, + 500, + 584 + ], + "blocks": [ + { + "bbox": [ + 406, + 421, + 500, + 584 + ], + "lines": [ + { + "bbox": [ + 406, + 421, + 500, + 584 + ], + "spans": [ + { + "bbox": [ + 406, + 421, + 500, + 584 + ], + "type": "image", + "image_path": "c1e99745d8f0b4c81b2fc69809af8abb2e775847c79feff9b741fcf73378ce65.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 425, + 584, + 481, + 592 + ], + "lines": [ + { + "bbox": [ + 425, + 584, + 481, + 592 + ], + "spans": [ + { + "bbox": [ + 425, + 584, + 481, + 592 + ], + "type": "text", + "content": "(c) f-DM (Ours)" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\theta}(\\pmb{z}_t,t) = (\\pmb{z}_t - \\sigma_t\\pmb{\\epsilon}_{\\theta}(\\pmb{z}_t,t)) / \\alpha_t" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ". Sampling from such a learned model can be performed from ancestral sampling (DDPM, Ho et al., 2020), or a deterministic DDIM sampler (Song et al., 2021a). Starting from " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "\\pmb{z}_1\\sim \\mathcal{N}(\\mathbf{0},I)" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": ", a sequence of timesteps " + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "inline_equation", + "content": "1 = t_0 > \\ldots >t_N = 0" + }, + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": " are sampled for iterative generation, and we can readily summarize both methods for each step as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 151, + 133, + 504, + 147 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 133, + 504, + 147 + ], + "spans": [ + { + "bbox": [ + 151, + 133, + 504, + 147 + ], + "type": "interline_equation", + "content": "\\boldsymbol {z} _ {s} = \\alpha_ {s} \\cdot \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}) + \\sqrt {\\sigma_ {s} ^ {2} - \\eta^ {2} \\bar {\\sigma} ^ {2}} \\cdot \\boldsymbol {\\epsilon} _ {\\theta} (\\boldsymbol {z} _ {t}) + \\eta \\bar {\\sigma} \\cdot \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\boldsymbol {0}, I), \\quad s < t, \\tag {2}", + "image_path": "1423a5661bd3fdddcdd4bb22dda89fbbbf931191fb7748b4bf962f9430f934bb.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 152, + 476, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 476, + 165 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 476, + 165 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 152, + 476, + 165 + ], + "type": "inline_equation", + "content": "\\bar{\\sigma} = \\sigma_s\\sigma_{t|s} / \\sigma_t" + }, + { + "bbox": [ + 104, + 152, + 476, + 165 + ], + "type": "text", + "content": " , and " + }, + { + "bbox": [ + 104, + 152, + 476, + 165 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 104, + 152, + 476, + 165 + ], + "type": "text", + "content": " controls the proportion of additional noise. (i.e., DDIM " + }, + { + "bbox": [ + 104, + 152, + 476, + 165 + ], + "type": "inline_equation", + "content": "\\eta = 0" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "text", + "content": "As the score function " + }, + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "text", + "content": " is defined in the ambient space, it is clear that all the latent variables " + }, + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "text", + "content": " are forced to be the same shape as the input data " + }, + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "inline_equation", + "content": "(\\mathbb{R}^N)" + }, + { + "bbox": [ + 104, + 168, + 504, + 214 + ], + "type": "text", + "content": ". This not only leads to inefficient training, especially for steps with high noise level (Jing et al., 2022), but also makes DMs hard to learn abstract and semantically meaningful latent space as pointed out by Preechakul et al. (2022)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 229, + 173, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 229, + 173, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 229, + 173, + 241 + ], + "type": "text", + "content": "3 METHOD" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 254, + 504, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 254, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 254, + 504, + 309 + ], + "type": "text", + "content": "In this section, we introduce " + }, + { + "bbox": [ + 104, + 254, + 504, + 309 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 254, + 504, + 309 + ], + "type": "text", + "content": "-DM, an extended family of DMs to enable diffusion on transformed signals, in a way similar to a standard hierarchical VAE. We start by introducing the definition of the proposed multi-stage formulation with general signal transformations, followed by modified training and generation algorithms (Section 3.1). Then, we specifically apply " + }, + { + "bbox": [ + 104, + 254, + 504, + 309 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 254, + 504, + 309 + ], + "type": "text", + "content": "-DM with three categories of transformations (Section 3.2)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 323, + 241, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 241, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 241, + 333 + ], + "type": "text", + "content": "3.1 MULTI-STAGE DIFFUSION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": "Signal Transformations We consider a sequence of deterministic functions " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\pmb{f} = \\{f_0, \\dots, f_K\\}" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "f_0 \\dots f_k" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": " progressively transforms the input signal " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\pmb{x}^k = f_{0:k}(\\pmb{x}) \\in \\mathbb{R}^{M_k}" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": ". We assume " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\pmb{x}^0 = f_0(\\pmb{x}) = \\pmb{x}" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": ". In principle, " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\pmb{f}" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": " can be any function. In this work, we focus on transformations that gradually destroy the information contained in " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": " (e.g., down-sampling), leading towards more compact representations. Without loss of generality, we assume " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "M_0 \\geq M_1 \\geq \\dots \\geq M_K" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": ". A sequence of inverse mappings " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\pmb{g} = \\{g_0, \\dots, g_{K-1}\\}" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": " is used to connect a corresponding sequence of pairs of consecutive spaces. Specifically, we define " + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}_k" + }, + { + "bbox": [ + 104, + 342, + 504, + 422 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 214, + 426, + 504, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 426, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 214, + 426, + 504, + 453 + ], + "type": "interline_equation", + "content": "\\hat {\\boldsymbol {x}} ^ {k} := \\left\\{ \\begin{array}{l l} g _ {k} \\left(f _ {k + 1} \\left(\\boldsymbol {x} ^ {k}\\right)\\right) \\approx \\boldsymbol {x} ^ {k}, & \\text {i f} k < K, \\\\ \\boldsymbol {x} ^ {k}, & \\text {i f} k = K. \\end{array} \\right. \\tag {3}", + "image_path": "4d59a64616d8cde14333a053b23c78c207b6bba6631f6b58ba487c012ea10ebf.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "content": "The approximation of Equation 3 (" + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "inline_equation", + "content": "k < K" + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "content": ") is not necessarily (and sometimes impossibly) accurate. For instance, " + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "inline_equation", + "content": "f_{k}" + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "content": " downsamples an input image " + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "inline_equation", + "content": "128^{2}" + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "inline_equation", + "content": "64^{2}" + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "content": " with average pooling, and " + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "inline_equation", + "content": "g_{k}" + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "content": " can be a bilinear interpolation that upsamples back to " + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "inline_equation", + "content": "128^{2}" + }, + { + "bbox": [ + 104, + 459, + 504, + 493 + ], + "type": "text", + "content": ", which is a lossy reconstruction." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": "The definition of " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": " can be seen as a direct analogy of the encoder " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "(\\phi)" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": " and decoder " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "(\\theta)" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": " in hierarchical VAEs (see Figure 2 (b)). However, there are still major differences: (1) the VAE encoder/decoder is stochastic, and the encoder's outputs are regularized by the prior. In contrast, " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": " are deterministic, and the encoder output " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "x^{K}" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": " does not necessarily follow a simple prior; (2) VAEs directly use the decoder for generation, while " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "f, g" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": " are fused in the diffusion steps of " + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 498, + 504, + 555 + ], + "type": "text", + "content": "-DM." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "text", + "content": "Forward Diffusion We extend the continuous-time DMs for signal transformations. We split the diffusion time " + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "inline_equation", + "content": "0 \\to 1" + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "inline_equation", + "content": "K + 1" + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "text", + "content": " stages, where for each stage, a partial diffusion process is performed. More specifically, we define a set of time boundaries " + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "inline_equation", + "content": "0 = \\tau_0 < \\tau_1 < \\ldots < \\tau_K < \\tau_{K + 1} = 1" + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "text", + "content": ", and for " + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "inline_equation", + "content": "t \\in [0,1]" + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "text", + "content": ", the latent " + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t" + }, + { + "bbox": [ + 104, + 560, + 504, + 606 + ], + "type": "text", + "content": " has the following marginal probability:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 611, + 504, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 611, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 111, + 611, + 504, + 638 + ], + "type": "interline_equation", + "content": "q \\left(\\boldsymbol {z} _ {t} \\mid \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t} \\boldsymbol {x} _ {t}, \\sigma_ {t} ^ {2} I\\right), \\quad \\text {w h e r e} \\boldsymbol {x} _ {t} = \\frac {\\left(t - \\tau_ {k}\\right) \\hat {\\boldsymbol {x}} ^ {k} + \\left(\\tau_ {k + 1} - t\\right) \\boldsymbol {x} ^ {k}}{\\tau_ {k + 1} - \\tau_ {k}}, \\quad \\tau_ {k} \\leq t < \\tau_ {k + 1}. \\tag {4}", + "image_path": "cb8502c859fde53de303999d06cf3ff60203089515916edc34b8a00cfb71662a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": "As listed above, " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " is the interpolation of " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{x}^k" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " and its approximation " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}^k" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " falls in stage " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": ". A simple illustration for the relationship of " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t, \\hat{\\pmb{x}}^k, \\pmb{x}^k" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "z_t" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " is shown in Figure 10. We argue that interpolation is crucial as it creates a continuous transformation that slowly corrupts information inside each stage. In this way, such change can be easily reversed by our model. Also, it is nontrivial to find the optimal stage schedule " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\tau_k" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " for each model as it highly depends on how much the information is destroyed in each stage " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "f_k" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": ". In this work, we tested two heuristics: (1) linear schedule " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\tau_k = k / (K + 1)" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": "; (2) cosine schedule " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\tau_k = \\cos(1 - k / (K + 1))" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": ". Note that the standard DMs can be seen as a special case of our " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": "-DM when there is only one stage (" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "K = 0" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 130, + 84, + 478, + 218 + ], + "blocks": [ + { + "bbox": [ + 130, + 84, + 478, + 218 + ], + "lines": [ + { + "bbox": [ + 130, + 84, + 478, + 218 + ], + "spans": [ + { + "bbox": [ + 130, + 84, + 478, + 218 + ], + "type": "image", + "image_path": "f9c66ed08214f298dcca5a8aa43a655c3a55afef525bded7f4d153b4d4823ca6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "lines": [ + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": "Figure 3: Left: an illustration of the proposed SNR computation for different sampling rates; Right: the comparison of rescaling the noise level for progressive down-sampling. Without noise rescaling, the diffused images in low-resolution quickly become too noisy to distinguish the underline signal." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 266, + 504, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 266, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 504, + 291 + ], + "type": "text", + "content": "Equation 4 does not guarantee a Markovian transition. Nevertheless, our formulation only needs " + }, + { + "bbox": [ + 104, + 266, + 504, + 291 + ], + "type": "inline_equation", + "content": "q(\\pmb{z}_t | \\pmb{z}_s, \\pmb{x})" + }, + { + "bbox": [ + 104, + 266, + 504, + 291 + ], + "type": "text", + "content": ", which has the following simple form focusing on diffusion steps within a stage:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 151, + 295, + 504, + 311 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 295, + 504, + 311 + ], + "spans": [ + { + "bbox": [ + 151, + 295, + 504, + 311 + ], + "type": "interline_equation", + "content": "q \\left(\\boldsymbol {z} _ {t} \\mid \\boldsymbol {z} _ {s}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {t}; \\alpha_ {t | s} \\boldsymbol {z} _ {s} + \\alpha_ {t} \\cdot \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} _ {s}\\right), \\sigma_ {t | s} ^ {2} I\\right), \\quad \\tau_ {k} \\leq s < t < \\tau_ {k + 1}. \\tag {5}", + "image_path": "ad84992e9b8a3056c53fea417b7e764f9caf42fe9c8bcfedb96b04dce59af482.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "text", + "content": "From Equation 5, we further re-write " + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t - \\boldsymbol{x}_s = -\\delta_t \\cdot (t - s) / (t - \\tau_k)" + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\delta_t = \\boldsymbol{x}^k - \\boldsymbol{x}_t" + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "text", + "content": " is the signal degradation. Equation 5 also indicates that the reverse diffusion distribution " + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "inline_equation", + "content": "q(\\boldsymbol{z}_s | \\boldsymbol{z}_t, \\boldsymbol{x}) \\propto q(\\boldsymbol{z}_t | \\boldsymbol{z}_s, \\boldsymbol{x}) q(\\boldsymbol{z}_s | \\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "text", + "content": " can be written as the function of " + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "inline_equation", + "content": "\\delta_t" + }, + { + "bbox": [ + 104, + 316, + 504, + 352 + ], + "type": "text", + "content": " which will be our learning objectives." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "content": "Boundary Condition To enable diffusion across stages, we need the transition at stage boundaries " + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "inline_equation", + "content": "\\tau_{k}" + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "content": ". More specifically, when the step approaches the boundary " + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "inline_equation", + "content": "\\tau^{-}" + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "content": " (the left limit of " + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "content": "), the transition " + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "inline_equation", + "content": "q(z_{\\tau} | z_{\\tau^{-}}" + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "content": ") should be as deterministic (ideally invertible) & smooth as possible to minimize information loss. First, we can easily expand " + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "inline_equation", + "content": "z_{\\tau}" + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "inline_equation", + "content": "z_{\\tau^{-}}" + }, + { + "bbox": [ + 104, + 357, + 504, + 402 + ], + "type": "text", + "content": " as the signal and noise combination:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 181, + 407, + 408, + 419 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 407, + 408, + 419 + ], + "spans": [ + { + "bbox": [ + 181, + 407, + 408, + 419 + ], + "type": "interline_equation", + "content": "\\text {B e f o r e :} \\quad \\boldsymbol {z} _ {\\tau^ {-}} = \\alpha_ {\\tau^ {-}} \\cdot \\boldsymbol {x} _ {\\tau^ {-}} + \\sigma_ {\\tau^ {-}} \\cdot \\boldsymbol {\\epsilon}, p (\\boldsymbol {\\epsilon}) = \\mathcal {N} (\\boldsymbol {0}, I),", + "image_path": "00307cbdf1a8a83b2d1e7f8e31b75971da359055baabd48ad22c3a6ef357f945.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 188, + 416, + 504, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 416, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 188, + 416, + 504, + 434 + ], + "type": "interline_equation", + "content": "A f t e r: \\quad z _ {\\tau} = \\alpha_ {\\tau} \\cdot x _ {\\tau} + \\sigma_ {\\tau} \\cdot \\zeta (\\epsilon), p (\\zeta (\\epsilon)) = \\mathcal {N} (0, I). \\tag {6}", + "image_path": "3edb07e7cdd5a345cb145dc5d036784cc0e7a7d3ed8bf3465407f445150e6db4.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "content": "Based on definition, " + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\tau^{-}} = \\hat{\\pmb{x}}^{k - 1} = g(\\pmb{x}^k) = g(\\pmb{x}_{\\tau})" + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "content": ", which means the signal part is invertible. Therefore we only need to find " + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "content": ". Under the initial assumption of " + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "inline_equation", + "content": "M_{k} \\leq M_{k - 1}" + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "content": ", this can be achieved easily by dropping elements from " + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "content": ". Take down-sampling " + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "inline_equation", + "content": "(M_{k - 1} = 4M_k)" + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "content": " as an example. We can directly drop 3 out of every " + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "content": " values from " + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 439, + 504, + 485 + ], + "type": "text", + "content": ". More details are included in Appendix A.4." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 490, + 506, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 506, + 545 + ], + "type": "text", + "content": "The second requirement of a smooth transition is not as straightforward as it looks, which asks the \"noisiness\" of latents " + }, + { + "bbox": [ + 104, + 490, + 506, + 545 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 490, + 506, + 545 + ], + "type": "text", + "content": " to remain unchanged across the boundary. We argue that the conventional measure – the signal-to-noise-ratio (SNR) – in DM literature is not compatible with resolution change as it averages the signal/noise power element-wise. In this work, we propose a generalized resolution-agnostic SNR by viewing data as points sampled from a continuous field:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 221, + 550, + 504, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 550, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 221, + 550, + 504, + 578 + ], + "type": "interline_equation", + "content": "\\operatorname {S N R} (\\boldsymbol {z}) = \\frac {\\mathbb {E} _ {\\Omega \\sim I} \\| \\mathbb {E} _ {i \\sim \\Omega} \\operatorname {S I G N A L} (\\boldsymbol {z} _ {i}) \\| ^ {2}}{\\mathbb {E} _ {\\Omega \\sim I} \\| \\mathbb {E} _ {i \\sim \\Omega} \\operatorname {N O I S E} (\\boldsymbol {z} _ {i}) \\| ^ {2}}, \\tag {7}", + "image_path": "d2b89428709c15fd0d46662ac5c89228556a4115028f076b29c137e48d44f085.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "text", + "content": " is the data range, SIGNAL represents the real data value (such as image pixels), and NOISE is the unstructured Gaussian noise added to the data. " + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "text", + "content": " is a patch relative to " + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "text", + "content": ", which can be any size as long as it is invariant to different sampling rates (resolutions). As shown in Figure 3 (left), we can obtain a reliable measure of noisiness by averaging the signal/noise inside patches. We derive " + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "inline_equation", + "content": "\\alpha_{\\tau}, \\sigma_{\\tau}" + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "inline_equation", + "content": "\\alpha_{\\tau^{-}}, \\sigma_{\\tau^{-}}" + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "text", + "content": " for any transformations by forcing " + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "inline_equation", + "content": "\\mathrm{SNR}(z_{\\tau}) = \\mathrm{SNR}(z_{\\tau^{-}})" + }, + { + "bbox": [ + 104, + 581, + 506, + 659 + ], + "type": "text", + "content": " under this new definition. Specifically, if dimensionality change is solely caused by the change of sampling rate (e.g., down-sampling, average RGB channels, deconvolution), we can get the following relation:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 241, + 662, + 504, + 677 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 662, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 241, + 662, + 504, + 677 + ], + "type": "interline_equation", + "content": "\\alpha_ {\\tau} ^ {2} / \\sigma_ {\\tau} ^ {2} = d _ {k} \\cdot \\gamma_ {k} \\cdot \\alpha_ {\\tau -} ^ {2} / \\sigma_ {\\tau -} ^ {2}, \\tag {8}", + "image_path": "11076b23db3af4e1582f41148292d2dfd3d35fbc2bdfb77501795fb25d36b777.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "type": "inline_equation", + "content": "d_{k} = M_{k - 1} / M_{k}" + }, + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "type": "text", + "content": " is the total dimension change, and " + }, + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "type": "inline_equation", + "content": "\\gamma_{k} = \\mathbb{E}||\\hat{\\pmb{x}}^{k - 1}||^{2} / \\mathbb{E}||\\pmb{x}^{k}||^{2}" + }, + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "type": "text", + "content": " is the change of signal power. For example, we have " + }, + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "type": "inline_equation", + "content": "d_{k} = 4,\\gamma_{k}\\approx 1" + }, + { + "bbox": [ + 104, + 679, + 504, + 714 + ], + "type": "text", + "content": " for down-sampling. Following Equation 8, the straightforward rule is to rescale the magnitude of the noise, and keep the signal part unchanged:" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 390, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 390, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 390, + 732 + ], + "type": "text", + "content": "For simplicity, we omit the subscript " + }, + { + "bbox": [ + 117, + 720, + 390, + 732 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 117, + 720, + 390, + 732 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 117, + 720, + 390, + 732 + ], + "type": "inline_equation", + "content": "\\tau_{k}" + }, + { + "bbox": [ + 117, + 720, + 390, + 732 + ], + "type": "text", + "content": " in the following paragraphs." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 97, + 99, + 473, + 230 + ], + "blocks": [ + { + "bbox": [ + 105, + 84, + 373, + 97 + ], + "lines": [ + { + "bbox": [ + 105, + 84, + 373, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 84, + 373, + 97 + ], + "type": "text", + "content": "Algorithm 1: Reverse diffusion for image generation using " + }, + { + "bbox": [ + 105, + 84, + 373, + 97 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 105, + 84, + 373, + 97 + ], + "type": "text", + "content": "-DM" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "lines": [ + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "spans": [ + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": "Input: model " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\theta ,f,g" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " stage schedule " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\{\\tau_0,\\dots ,\\tau_K\\}" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " , rescaled noise schedule functions " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\alpha (.)" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\sigma (.)" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " step-size " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\Delta t" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{full}}\\sim \\mathcal{N}(0,I)" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " ,DDPM ratio " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " \n1 Initialize " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{full}}" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " \n2 for " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "(k = K;k\\geq 0;k = k - 1)" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " do \n3 for " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "(t = \\tau_{k + 1};t > \\tau_k;t = t - \\Delta t,s = t - \\Delta t)" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " do \n4 " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\begin{array}{r}\\pmb {\\epsilon}_{\\theta},\\pmb {\\delta}_{\\theta} = \\theta (\\pmb {z},t);\\quad \\pmb {x}_{\\theta} = (\\pmb {z} - \\sigma (t)\\cdot \\pmb {\\epsilon}_{\\theta}) / \\alpha (t);\\\\ \\text{if} s > \\tau_{k}\\text{then}\\\\ \\big{\\lfloor}\\pmb {z} = \\alpha (s)\\cdot (\\pmb {x}_{\\theta} + \\pmb {\\delta}_{\\theta}\\cdot (t - s) / (t - \\tau_{k})) + \\sqrt{\\sigma^{2}(s) - \\eta^{2}\\bar{\\sigma}^{2}}\\cdot \\pmb {\\epsilon}_{\\theta} + \\eta \\bar{\\sigma}\\cdot \\pmb {\\epsilon},\\pmb {\\epsilon}\\sim \\mathcal{N}(\\pmb {0},I)\\\\ \\end{array}" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " \n5 \n6 if " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "k > 0" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " then \n7 Re-sample noise " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{rs}}" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{full}}" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " . " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "z = \\alpha (\\tau_k)\\cdot g_k(x_\\theta) + \\sigma (\\tau_k)\\cdot \\epsilon_{\\mathrm{rs}}" + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "text", + "content": " \n9 return " + }, + { + "bbox": [ + 97, + 99, + 473, + 230 + ], + "type": "inline_equation", + "content": "x_{\\theta}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\alpha \\leftarrow \\alpha, \\sigma \\leftarrow \\sigma / \\sqrt{d_k}" + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "text", + "content": ", which we refer as signal preserved (SP) rescaling. Note that, to ensure the noise schedule is continuous over time and close to the original schedule, such rescaling is applied to the noises of the entire stage, and will be accumulated when multiple transformations are used. As the comparison shown in Figure 3, the resulting images are visually closer to the standard DM. However, the variance of " + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "text", + "content": " becomes very small, especially when " + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "inline_equation", + "content": "t \\to 1" + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "text", + "content": ", which might be hard for the neural networks to distinguish. Therefore, we propose the variance preserved (VP) alternative to further normalize the rescaled " + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\alpha, \\sigma" + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "text", + "content": " so that " + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "inline_equation", + "content": "\\alpha^2 + \\sigma^2 = 1" + }, + { + "bbox": [ + 104, + 238, + 506, + 318 + ], + "type": "text", + "content": ". We show the visualization in Figure 3 (b)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "content": "Training We train a neural network " + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "content": " to denoise. We also show the training pipeline in Figure 10. In " + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "content": "-DM, noise is caused by two factors: (1) the perturbation " + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "content": " from noise injection; (2) the degradation " + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "content": " due to signal transformation. Thus, we propose to predict " + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{\\theta}" + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "inline_equation", + "content": "\\delta_{\\theta}" + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "content": " jointly, which simultaneously remove both noises from " + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t" + }, + { + "bbox": [ + 104, + 322, + 504, + 367 + ], + "type": "text", + "content": " with a \"double reconstruction\" loss:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 156, + 368, + 504, + 383 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 368, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 156, + 368, + 504, + 383 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\theta} = \\mathbb {E} _ {\\boldsymbol {z} _ {t} \\sim q (\\boldsymbol {z} _ {t} | \\boldsymbol {x}), t \\sim [ 0, 1 ]} \\left[ \\omega_ {t} \\cdot \\left(\\| \\boldsymbol {x} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {x} _ {t} \\| _ {2} ^ {2} + \\| \\boldsymbol {\\delta} _ {\\theta} (\\boldsymbol {z} _ {t}, t) - \\boldsymbol {\\delta} _ {t} \\| _ {2} ^ {2}\\right) \\right], \\tag {9}", + "image_path": "424335dd7f79efcfd95612e978b2cb53fb98ce30f044d507a285760cf0be72b1.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "text", + "content": "where the denoised output is " + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\theta}(\\pmb{z}_t, t) + \\delta_{\\theta}(\\pmb{z}_t, t)" + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "text", + "content": ". Unlike standard DMs, the denoising goals are the transformed signals of each stage rather than the final real images, which are generally simpler targets to recover. The same as standard DMs, we also choose to predict " + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "text", + "content": ", and compute " + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\theta} = (z_t - \\sigma_t \\pmb{\\epsilon}_{\\theta}) / \\alpha_t" + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "text", + "content": ". We adopt the same U-Net architecture for all stages, where input " + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "inline_equation", + "content": "\\pmb{z}_t" + }, + { + "bbox": [ + 104, + 384, + 504, + 440 + ], + "type": "text", + "content": " will be directed to the corresponding inner layer based on spatial resolutions (see Appendix Figure 11 for details)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": "Unconditional Generation We present the generation steps in Algorithm 1, where " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\delta_t" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " are replaced by model's predictions " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_{\\theta}" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\delta_{\\theta}" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": ". Thanks to the interpolation formulation (Equation 4), generation is independent of the transformations " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": ". Only the inverse mappings " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " – which might be simple and easy to compute – is needed to map the signals at boundaries. This brings flexibility and efficiency to learning complex or even test-time inaccessible transformations. In addition, Algorithm 1 includes a \"noise-resampling step\" for each stage boundary, which is the reverse process for " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\zeta(\\epsilon)" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " in Equation 6. While " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " is deterministic, the reverse process needs additional randomness. For instance, if " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " drops elements in the forward process, then the reverse step should inject standard Gaussian noise back to the dropped locations. Because we assume " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "M_0 \\geq \\ldots \\geq M_K" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": ", we propose to sample a full-size noise " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{full}}" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " before generation, and gradually adding subsets of " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{full}}" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " to each stage. Thus, " + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{full}}" + }, + { + "bbox": [ + 104, + 445, + 505, + 567 + ], + "type": "text", + "content": " encodes multi-scale information similar to RealNVP (Dinh et al., 2016)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "spans": [ + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": "Conditional Generation Given an unconditional " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": "-DM, we can do conditional generation by replacing the denoised output " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\theta}" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": " with any condition " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "\\pmb{x}_c" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": " at a suitable time " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "(T)" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": ", and starting diffusion from " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": ". For example, suppose " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "\\pmb{f}" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": " is downsample, and " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "\\pmb{x}_c" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": " is a low-resolution image, " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": "-DM enables super-resolution (SR) without additional training. To achieve that, it is critical to initialize " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "\\pmb{z}_T" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": ", which implicitly asks " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "z_{T} \\approx \\alpha_{T}\\pmb{x}_{c} + \\sigma_{T}\\pmb{\\epsilon}_{\\theta}(\\pmb{z}_{T})" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": ". In practice, we choose " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": " to be the corresponding stage boundary, and initialize " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "\\pmb{z}" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": " by adding random noise " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "\\sigma_T\\pmb{\\epsilon}" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "\\alpha_{T}\\pmb{x}_{c}" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": ". A gradient-based method is used to iteratively update " + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "inline_equation", + "content": "z_{T} \\gets z_{T} - \\lambda \\nabla_{z_{T}}\\| \\pmb{x}_{\\theta}(z_{T}) - \\pmb{x}_{c}\\|_{2}^{2}" + }, + { + "bbox": [ + 104, + 573, + 504, + 652 + ], + "type": "text", + "content": " for a few steps before the diffusion starts." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 662, + 340, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 662, + 340, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 340, + 673 + ], + "type": "text", + "content": "3.2 APPLICATIONS ON VARIOUS TRANSFORMATIONS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 683, + 504, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 683, + 504, + 707 + ], + "spans": [ + { + "bbox": [ + 104, + 683, + 504, + 707 + ], + "type": "text", + "content": "With the definition in Section 3.1, next we show " + }, + { + "bbox": [ + 104, + 683, + 504, + 707 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 683, + 504, + 707 + ], + "type": "text", + "content": "-DM applied with different transformations. In this paper, we consider the following three categories of transformations." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": "Downsampling. As the motivating example in Section 3.1, we let " + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "inline_equation", + "content": "\\pmb{f}" + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": " a sequence of downsample operations that transforms a given image (e.g., " + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "inline_equation", + "content": "256^2" + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": ") progressively down to " + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "inline_equation", + "content": "16^2" + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": ", where each " + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "inline_equation", + "content": "f_k(\\cdot)" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "reduces the length by 2, and correspondingly " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "g_{k}(.)" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " upsamples by 2. Thus, the generation starts from a low-resolution noise and progressively performs super-resolution. We denote the model as " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "-DM-DS, where " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "d_{k} = 4" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "\\gamma_{k} = 1" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " in Equation 8 and " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "K = 4" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "256^{2}" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " images." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "content": "Blurring. " + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "content": "-DM also supports general blur transformations. Unlike recent works (Rissanen et al., 2022; Hoogeboom & Salimans, 2022) that focuses on continuous-time blur (heat dissipation), Equation 4 can be seen as an instantiation of progressive blur function if we treat " + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}^k" + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "content": " as a blurred version of " + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "inline_equation", + "content": "\\pmb{x}^k" + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "content": ". This design brings more flexibility in choosing any kind of blurring functions, and using the blurred versions as stages. In this paper, we experiment with two types of blurring functions. (1) " + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "content": "-DM-Blur-U: utilizing the same downsample operators as " + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "content": "-DM-DS, while always up-sampling the images back to the original sizes; (2) " + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "content": "-DM-Blur-G: applying standard Gaussian blurring kernels following Rissanen et al. (2022). In both cases, we use " + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "inline_equation", + "content": "g_{k}(\\pmb{x}) = \\pmb{x}" + }, + { + "bbox": [ + 104, + 118, + 506, + 219 + ], + "type": "text", + "content": ". As the dimension is not changed, no rescaling and noise resampling is required." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": "Image " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": " Latent Trans. We further consider diffusion with learned non-linear transformations such as VAEs (see Figure 2 (b), " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": ": VAE encoder, " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": ": VAE decoder). By inverting such an encoding process, we are able to generate data from low-dimensional latent space similar to Rombach et al. (LDM, 2021). As a major difference, LDM operates only on the latent variables, while " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": "-DM learns diffusion in the latent and image spaces jointly. Because of this, our performance will not be bounded by the quality of the VAE decoder. In this paper, we consider VQVAE (Van Den Oord et al., 2017) together with its GAN variant (VQGAN, Esser et al., 2021). For both cases, we transform " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "256^2 \\times 3" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": " images into " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "32^2 \\times 4" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "d_k = 48" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": ") latent space. The VQVAE encoder/decoder is trained on ImageNet (Deng et al., 2009), and is frozen for the rest of the experiments. For " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": "-DM-VQGAN, we directly take the checkpoint provided by Rombach et al. (2021). Besides, we need to tune " + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "inline_equation", + "content": "\\gamma_k" + }, + { + "bbox": [ + 104, + 222, + 506, + 344 + ], + "type": "text", + "content": " separately for each encoder due to the change in signal magnitude." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 356, + 201, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 356, + 201, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 201, + 369 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 380, + 246, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 246, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 246, + 392 + ], + "type": "text", + "content": "4.1 EXPERIMENTAL SETTINGS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 401, + 506, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 506, + 435 + ], + "type": "text", + "content": "Datasets. We evaluate " + }, + { + "bbox": [ + 104, + 401, + 506, + 435 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 401, + 506, + 435 + ], + "type": "text", + "content": "-DMs on five commonly used benchmarks testing generation on a range of domains: FFHQ (Karras et al., 2019), AFHQ (Choi et al., 2020), LSUN Church & Bed (Yu et al., 2015), and ImageNet (Deng et al., 2009). All images are center-cropped and resized to " + }, + { + "bbox": [ + 104, + 401, + 506, + 435 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 401, + 506, + 435 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "text", + "content": "Training Details. We implement the three types of transformations with the same architecture and hyper-parameters except for the stage-specific adapters. We adopt a lighter version of ADM (Dhariwal & Nichol, 2021) as the main U-Net architecture. For all experiments, we adopt the same training scheme using AdamW (Kingma & Ba, 2014) optimizer with a learning rate of " + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "inline_equation", + "content": "2\\mathrm{e} - 5" + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "text", + "content": " and an EMA decay factor of 0.9999. We set the weight " + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "inline_equation", + "content": "\\omega_{t} = \\mathrm{sigmoid}(-\\log (\\alpha_{t}^{2} / \\sigma_{t}^{2}))" + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "text", + "content": " following P2-weighting (Choi et al., 2022). The cosine noise schedule " + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "inline_equation", + "content": "\\alpha_{t} = \\cos (0.5\\pi t)" + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "text", + "content": " is adopted for diffusion working in the " + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "inline_equation", + "content": "256^2\\times 3" + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "text", + "content": " image space. As proposed in Equation 8, noise rescaling (VP by default) is applied for " + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 439, + 506, + 540 + ], + "type": "text", + "content": "-DMs when the resolutions change. All our models are trained with batch-size 32 images for 500K (FFHQ, AFHQ, LSUN Church), 1.2M (LSUN Bed) and 2.5M (ImageNet) iterations, respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": "Baselines & Evaluation. We compare " + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": "-DMs against a standard DM (DDPM, Ho et al., 2020) on all five datasets. To ensure a fair comparison, we train DDPM following the same settings and continuous-time formulation as our approaches. We also include transformation-specific baselines: (1) we re-implement the cascaded DM (Cascaded, Ho et al., 2022a) to adapt " + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": "-DM-DS setup from " + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "inline_equation", + "content": "16^{2}" + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": " progressively to " + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "inline_equation", + "content": "256^{2}" + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": ", where for each stage a separate DM is trained conditioned on the consecutive downsampled image; (2) we re-train a latent-diffusion model (LDM, Rombach et al., 2021) on the extracted latents from our pretrained VQVAE; (3) to compare with " + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": "-DM-Blur-G, we include the scores and synthesised examples of IHDM (Rissanen et al., 2022). We set 250 timesteps " + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "inline_equation", + "content": "(\\Delta t = 0.004)" + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": "-DMs and the baselines with " + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "inline_equation", + "content": "\\eta = 1" + }, + { + "bbox": [ + 104, + 544, + 506, + 666 + ], + "type": "text", + "content": " (Algorithm 1). We use Frechet Inception Distance (FID, Heusel et al., 2017) and Precision/Recall (PR, Kynkänniemi et al., 2019) as the measures of visual quality, based on 50K samples and the entire training set." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 678, + 171, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 171, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 171, + 690 + ], + "type": "text", + "content": "4.2 RESULTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "Qualitative Comparison To demonstrate the capability of handling various complex datasets, Figure 4 (↑) presents an uncurated set of images generated by " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "-DM-DS. We show more samples from all types of " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "-DMs in the Appendix E.4. We also show a comparison between " + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "-DMs and the" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 81, + 504, + 358 + ], + "blocks": [ + { + "bbox": [ + 106, + 81, + 504, + 358 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 504, + 358 + ], + "type": "image", + "image_path": "5e41dbe2f3531f29dff630459a8e64a18dadcdc494c54143f08abb89f926491f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "lines": [ + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "content": "Figure 4: " + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "content": " Random samples from " + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "content": "-DM-DS trained on various datasets; " + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "content": " Comparison of " + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "content": "-DMs and the corresponding baselines under various transformations. Best viewed when zoomed in. All faces presented are synthesized by the models, and are not real identities." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 448, + 503, + 604 + ], + "blocks": [ + { + "bbox": [ + 105, + 416, + 504, + 440 + ], + "lines": [ + { + "bbox": [ + 105, + 416, + 504, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 504, + 440 + ], + "type": "text", + "content": "Table 1: Quantitative comparisons on various datasets. The speed compared to DDPM is calculated with " + }, + { + "bbox": [ + 105, + 416, + 504, + 440 + ], + "type": "inline_equation", + "content": "\\mathrm{bsz} = 1" + }, + { + "bbox": [ + 105, + 416, + 504, + 440 + ], + "type": "text", + "content": " on CPU. Best performing DMs are shown in bold." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 448, + 503, + 604 + ], + "lines": [ + { + "bbox": [ + 106, + 448, + 503, + 604 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 503, + 604 + ], + "type": "table", + "html": "
ModelsFID↓P↑R↑FID↓P↑R↑SpeedModelsFID↓
FFHQ256 × 256AFHQ256 × 256LSUN-Church 256 × 256
DDPM10.80.760.539.30.740.51×1.0DDPM9.7
DDPM (1/2)16.80.740.4515.20.640.44×2.0f-DM-DS8.2
Cascaded49.00.400.0924.20.370.13-f-DM-VQVAE8.0
f-DM-DS10.80.740.506.40.810.48×2.1LSUN-Bed 256 × 256
IHDM64.9--43.4---DDPM8.0
f-DM-Blur-G11.70.730.516.90.760.49×1.0f-DM-DS6.9
f-DM-Blur-U10.40.740.527.00.770.53×1.0f-DM-VQVAE7.1
LDM48.00.310.0729.70.070.11×9.8ImageNet 256 × 256
LDM (GAN)*8.60.720.606.50.630.61×9.2DDPM10.9
f-DM-VQVAE12.70.770.478.90.760.40×1.7f-DM-DS8.2
f-DM-VQGAN11.70.740.515.60.760.53×1.7f-DM-VQVAE6.8
", + "image_path": "15942fd9c2448822bd48ce6082824ac8f7e631e97daa8271f054badea12175fc.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 615, + 504, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 615, + 504, + 636 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 504, + 636 + ], + "type": "text", + "content": "basielines with various transformations on FFHQ (Figure 4 " + }, + { + "bbox": [ + 104, + 615, + 504, + 636 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 104, + 615, + 504, + 636 + ], + "type": "text", + "content": "). Our methods consistently produce better visual results with more coherence and without noticeable artifacts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "content": "Quantitative Comparison. We measure the generation quality (FID and precision/recall) and relative inference speed of " + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "content": "-DMs and the baselines in Table 1. Across all five datasets, " + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "content": "-DMs consistently achieves similar or even better results for the DDPM baselines, while gaining near " + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "content": " inference speed for " + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 638, + 504, + 704 + ], + "type": "text", + "content": "-DM-\\{DS, VQVAE, VQGAN\\} due to the nature of transformations. As a comparison, having fewer timesteps (DDPM 1/2) greatly hurts the generation quality of DDPM. We also show comparisons with transformation-specific baselines on FFHQ & AFHQ." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "v.s. Cascaded DMs. Although cascaded DMs have been shown effective in literature (Nichol & Dhariwal, 2021; Ho et al., 2022a), it is underexplored to apply cascades in a sequence of consecu" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 123, + 79, + 331, + 277 + ], + "blocks": [ + { + "bbox": [ + 109, + 119, + 120, + 126 + ], + "lines": [ + { + "bbox": [ + 109, + 119, + 120, + 126 + ], + "spans": [ + { + "bbox": [ + 109, + 119, + 120, + 126 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 123, + 79, + 331, + 277 + ], + "lines": [ + { + "bbox": [ + 123, + 79, + 331, + 277 + ], + "spans": [ + { + "bbox": [ + 123, + 79, + 331, + 277 + ], + "type": "image", + "image_path": "c0268113526218eb4d1ab7d4048c62013f56033e94fba21231eab49243813ec0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 334, + 79, + 501, + 270 + ], + "blocks": [ + { + "bbox": [ + 334, + 79, + 501, + 270 + ], + "lines": [ + { + "bbox": [ + 334, + 79, + 501, + 270 + ], + "spans": [ + { + "bbox": [ + 334, + 79, + 501, + 270 + ], + "type": "image", + "image_path": "adcaceb13055291b80aba2488030391bc33778f4a0c1e884be2f8ac059829c4b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "lines": [ + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "text", + "content": "Figure 5: Random DDIM samples " + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "inline_equation", + "content": "(\\eta = 0)" + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "text", + "content": " from (a) " + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "text", + "content": "-DMs on AFHQ and LSUN-Church by given {downsampled, blurred, latent} images as conditions; (b) " + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "text", + "content": "-DM-VQVAE by interpolating the initial noise of the latent stage; (c) " + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "text", + "content": "-DM-DS starting from the same initial noise of the " + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 104, + 296, + 504, + 342 + ], + "type": "text", + "content": " stage. For (c), we also show the \"mean image\" of 300 random samples using the same initial noise." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 346, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 504, + 425 + ], + "type": "text", + "content": "tive resolutions " + }, + { + "bbox": [ + 104, + 346, + 504, + 425 + ], + "type": "inline_equation", + "content": "(16\\to 32\\to 64\\to \\ldots)" + }, + { + "bbox": [ + 104, + 346, + 504, + 425 + ], + "type": "text", + "content": " like ours. In such cases, the prediction errors get easily accumulated during the generation, yielding serious artifacts in the final resolution. To ease this, Cascaded DM (Ho et al., 2022a) proposed to apply \"noise conditioning augmentation\" which reduced the domain gap between stages by adding random noise to the input condition. However, it is not straightforward to tune the noise level for both training and inference time. By contrast, " + }, + { + "bbox": [ + 104, + 346, + 504, + 425 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 346, + 504, + 425 + ], + "type": "text", + "content": " -DM is by-design non-cascaded, and there are no domain gaps between stages. That is, we can train our model end-to-end without worrying the additional tuning parameters and achieve stable results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 430, + 504, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 504, + 518 + ], + "type": "text", + "content": "v.s. LDMs. We show comparisons with LDMs (Rombach et al., 2021) in Table 1. LDMs generate more efficiently as the diffusion only happens in the latent space. However, the generation is heavily biased by the behavior of the fixed decoder. For instance, it is challenging for VQVAE decoders to synthesize sharp images, which causes low scores in Table 1. However, LDM with VQGAN decoders is able to generate sharp details, which are typically favored by InceptionV3 (Szegedy et al., 2016) used in FID and PR. Therefore, despite having artifacts (see Figure 4, below, rightmost) in the output, LDMs (GAN) still obtain good scores. In contrast, " + }, + { + "bbox": [ + 104, + 430, + 504, + 518 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 430, + 504, + 518 + ], + "type": "text", + "content": "-DM, as a pure DM, naturally bridges the latent and image spaces, where the generation is not restricted by the decoder." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 523, + 504, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 504, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 504, + 579 + ], + "type": "text", + "content": "v.s. Blurring DMs. Table 1 compares with a recently proposed blurring-based method (IHDM, Rissanen et al., 2022). Different from our approach, IHDM formulates a fully deterministic forward process. We conjecture the lack of randomness is the cause of their poor generation quality. Instead, " + }, + { + "bbox": [ + 104, + 523, + 504, + 579 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 523, + 504, + 579 + ], + "type": "text", + "content": "-DM proposes a natural way of incorporating blurring with stochastic noise, yielding better quantitative and qualitative results." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "type": "text", + "content": "Conditional Generation. In Figure 5(a), we demonstrate the example of using pre-trained " + }, + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "type": "text", + "content": "-DMs to perform conditional generation based on learned transformations. We downsample and blur the sampled real images, and start the reverse diffusion following Section 3.1 with " + }, + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "type": "text", + "content": "-DM-DS and -Blur-U, respectively. Despite the difference in fine details, both our models faithfully generate high-fidelity outputs close to the real images. The same algorithm is applied to the extracted latent representations. Compared with the original VQVAE output, " + }, + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 584, + 504, + 673 + ], + "type": "text", + "content": "-DM-VQVAE is able to obtain better reconstruction. We provide additional conditional generation samples with the ablation of the \"gradient-based\" initialization method in Appendix E.3." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": "Latent Space Manipulation To demonstrate " + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": "-DMs have learned certain abstract representations by modeling with signal transformation, we show results of latent manipulation in Figure 5. Here we assume DDIM sampling (" + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\eta = 0" + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": "), and the only stochasticity comes from the initially sampled noise " + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\mathrm{full}}" + }, + { + "bbox": [ + 104, + 688, + 504, + 733 + ], + "type": "text", + "content": ". In (b), we obtain a semantically smooth transition between two cat faces when linearly" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "interpolating the low-resolution noises; on the other hand, we show samples of the same identity with different fine details (e.g., expression, poses) in (c), which is achieved easily by sampling " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "-DM-DS with the low-resolution (" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "16^2" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": ") noise fixed. This implies that " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "-DM is able to allocate high-level and fine-grained information in different stages via learning with downsampling." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 140, + 217, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 217, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 217, + 152 + ], + "type": "text", + "content": "4.3 ABLATION STUDIES" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 182, + 359, + 290 + ], + "blocks": [ + { + "bbox": [ + 109, + 164, + 499, + 177 + ], + "lines": [ + { + "bbox": [ + 109, + 164, + 499, + 177 + ], + "spans": [ + { + "bbox": [ + 109, + 164, + 499, + 177 + ], + "type": "text", + "content": "Table 2: Ablation of design choices for " + }, + { + "bbox": [ + 109, + 164, + 499, + 177 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 109, + 164, + 499, + 177 + ], + "type": "text", + "content": " -DMs trained on FFHQ. All faces are not real identities." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 182, + 359, + 290 + ], + "lines": [ + { + "bbox": [ + 107, + 182, + 359, + 290 + ], + "spans": [ + { + "bbox": [ + 107, + 182, + 359, + 290 + ], + "type": "table", + "html": "
ModelEq. 4RescaleStagesFID↓P↑R↑
f-DM-DSNoVPcosine26.50.700.25
YesNocosine14.50.730.43
YesSPcosine12.10.750.47
YesVPlinear13.50.730.46
YesVPcosine10.80.740.50
f-DM-VQVAEYesNolinear24.00.790.29
YesVPcosine13.80.780.45
YesVPlinear12.70.770.47
", + "image_path": "7b1ccab97c00916b6e7ab5b81330cec3bcaada51ba9c8cd293670d2866009c5e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 366, + 188, + 494, + 232 + ], + "blocks": [ + { + "bbox": [ + 366, + 188, + 494, + 232 + ], + "lines": [ + { + "bbox": [ + 366, + 188, + 494, + 232 + ], + "spans": [ + { + "bbox": [ + 366, + 188, + 494, + 232 + ], + "type": "image", + "image_path": "108e920d64674ebe358ca51519c5e49d60d13c0091102d80f6359f3ce32c520b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 366, + 239, + 494, + 283 + ], + "blocks": [ + { + "bbox": [ + 386, + 233, + 470, + 239 + ], + "lines": [ + { + "bbox": [ + 386, + 233, + 470, + 239 + ], + "spans": [ + { + "bbox": [ + 386, + 233, + 470, + 239 + ], + "type": "text", + "content": "(a) without interpolation (Eq.4)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 366, + 239, + 494, + 283 + ], + "lines": [ + { + "bbox": [ + 366, + 239, + 494, + 283 + ], + "spans": [ + { + "bbox": [ + 366, + 239, + 494, + 283 + ], + "type": "image", + "image_path": "0f44605785ca91807fbb0f0ea30d3ee8a61444f6b037f50e5d91a66484fe0c16.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 389, + 283, + 466, + 289 + ], + "lines": [ + { + "bbox": [ + 389, + 283, + 466, + 289 + ], + "spans": [ + { + "bbox": [ + 389, + 283, + 466, + 289 + ], + "type": "text", + "content": "(b) with interpolation (Eq.4)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 306, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 396 + ], + "type": "text", + "content": "Table 2 presents the ablation of the key design choices. As expected, the interpolation formulation (Equation 4) effectively bridges the information gap between stages, without which the prediction errors get accumulated, resulting in blurry outputs and bad scores. Table 2 also demonstrates the importance of applying correct scaling. For both models, rescaling improves the FID and recall by large margins, where SP works slightly worse than VP. In addition, we also empirically explore the difference of stage schedules. Compared to VAE-based models, we usually have more stages in DS/Blur-based models to generate high-resolution images. The cosine schedule helps diffusion move faster in regions with low information density (e.g., low-resolution, heavily blurred)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 411, + 212, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 212, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 212, + 423 + ], + "type": "text", + "content": "5 RELATED WORK" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 435, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 504, + 536 + ], + "type": "text", + "content": "Progressive Generation with DMs. Conventional DMs generate images in the same resolutions. Therefore, existing work generally adopt cascaded approaches (Nichol & Dhariwal, 2021; Ho et al., 2022a; Sahara et al., 2022a) that chains a series of conditional DMs to generate coarse-to-fine, and have been used in super-resolution (SR3, Sahara et al., 2022b). However, cascaded models tend to suffer error propagation problems. More recently, Ryu & Ye (2022) dropped the need of conditioning, and proposed to generate images in a pyramidal fashion with additional reconstruction guidance; Jing et al. (2022) explored learning subspace DMs and connecting the full space with Langevin dynamics. By contrast, the proposed " + }, + { + "bbox": [ + 104, + 435, + 504, + 536 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 435, + 504, + 536 + ], + "type": "text", + "content": "-DM is distinct from all the above types, which only requires one diffusion process, and the images get naturally up-sampled through reverse diffusion." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 540, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 597 + ], + "type": "text", + "content": "Blurring DMs. Several concurrent research (Rissanen et al., 2022; Daras et al., 2022; Lee et al., 2022) have recently looked into DM alternatives to combine blurring into diffusion process, some of which also showed the possibility of deterministic generation (Bansal et al., 2022). Although sharing similarities, our work starts from a different view based on signal transformation. Furthermore, our empirical results also show that stochasticity plays a critical role in high-quality generation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 601, + 504, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 504, + 647 + ], + "type": "text", + "content": "Latent Space DMs. Existing work also investigated combining DMs with standard latent variable models. To our best knowledge, most of these works adopt DMs for learning the prior of latent space, where sampling is followed by a pre-trained (Rombach et al., 2021) or jointly optimized (Vahdat et al., 2021) decoder. Conversely, " + }, + { + "bbox": [ + 104, + 601, + 504, + 647 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 601, + 504, + 647 + ], + "type": "text", + "content": "-DM does not rely on the quality decoder." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 662, + 195, + 674 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 662, + 195, + 674 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 195, + 674 + ], + "type": "text", + "content": "6 CONCLUSION" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "We proposed " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "-DM, a generalized family of diffusion models that enables generation with signal transformations. As a demonstration, we apply " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "-DM to image generation tasks with a range of transformations, including downsampling, blurring and VAEs, where " + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 687, + 504, + 733 + ], + "type": "text", + "content": "-DMs outperform the baselines in terms of synthesis quality and semantic interpretation." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 212, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 212, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 212, + 94 + ], + "type": "text", + "content": "ETHICS STATEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 162 + ], + "type": "text", + "content": "Our work focuses on technical development, i.e., synthesizing high-quality images with a range of signal transformations (e.g., downsampling, blurring). Our approach has various applications, such as movie post-production, gaming, helping artists reduce workload, and generating synthetic data as training data for other computer vision tasks. Our approach can be used to synthesize human-related images (e.g., faces), and it is not biased towards any specific gender, race, region, or social class." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 167, + 506, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 506, + 256 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 506, + 256 + ], + "type": "text", + "content": "However, the ability of generative models, including our approach, to generate high-quality images that are indistinguishable from real images, raises concerns about the misuse of these methods, e.g., generating fake images. To resolve these concerns, we need to mark all the generated results as \"synthetic\". In addition, we believe it is crucial to have authenticity assessment, such as fake image detection and identity verification, which will alleviate the potential for misuse. We hope our approach can be used to foster the development of technologies for authenticity assessment. Finally, we believe that creating a set of appropriate regulations and laws would significantly reduce the risks of misuse while bolstering positive effects on technology development." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 271, + 269, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 271, + 269, + 283 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 269, + 283 + ], + "type": "text", + "content": "REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 296, + 505, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 505, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 505, + 331 + ], + "type": "text", + "content": "We assure that all the results shown in the paper and supplemental materials can be reproduced. We believe we have provided enough implementation details in the paper and supplemental materials for the readers to reproduce the results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 346, + 176, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 346, + 176, + 357 + ], + "spans": [ + { + "bbox": [ + 106, + 346, + 176, + 357 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 363, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 363, + 505, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 363, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 106, + 363, + 505, + 399 + ], + "type": "text", + "content": "Arpit Bansal, Eitan Borgnia, Hong-Min Chu, Jie S Li, Hamid Kazemi, Furong Huang, Micah Goldblum, Jonas Geiping, and Tom Goldstein. Cold diffusion: Inverting arbitrary image transforms without noise. arXiv preprint arXiv:2208.09392, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 403, + 504, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 403, + 504, + 429 + ], + "spans": [ + { + "bbox": [ + 107, + 403, + 504, + 429 + ], + "type": "text", + "content": "Christopher M Bishop and Nasser M Nasrabadi. Pattern recognition and machine learning, volume 4. Springer, 2006." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 434, + 506, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 434, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 434, + 506, + 469 + ], + "type": "text", + "content": "Jooyoung Choi, Jungbeom Lee, Chaehun Shin, Sungwon Kim, Hyunwoo Kim, and Sungroh Yoon. Perception prioritized training of diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11472-11481, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 474, + 506, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 474, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 107, + 474, + 506, + 510 + ], + "type": "text", + "content": "Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8188-8197, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 515, + 505, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 515, + 505, + 550 + ], + "spans": [ + { + "bbox": [ + 106, + 515, + 505, + 550 + ], + "type": "text", + "content": "Giannis Daras, Maurizio Delbracio, Hossein Talebi, Alexandros G. Dimakis, and Peyman Milanfar. Soft diffusion: Score matching for general corruptions, 2022. URL https://arxiv.org/abs/2209.05442." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 555, + 504, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 555, + 504, + 591 + ], + "spans": [ + { + "bbox": [ + 106, + 555, + 504, + 591 + ], + "type": "text", + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255, 2009. doi: 10.1109/CVPR.2009.5206848." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 597, + 504, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 597, + 504, + 622 + ], + "spans": [ + { + "bbox": [ + 107, + 597, + 504, + 622 + ], + "type": "text", + "content": "Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 627, + 504, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 627, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 106, + 627, + 504, + 651 + ], + "type": "text", + "content": "Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using real nvp. arXiv preprint arXiv:1605.08803, 2016." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 657, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 657, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 106, + 657, + 504, + 692 + ], + "type": "text", + "content": "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 12873-12883, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 697, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 697, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 697, + 504, + 733 + ], + "type": "text", + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 115, + 82, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 505, + 117 + ], + "type": "text", + "content": "in Neural Information Processing Systems, volume 27, pp. 2672-2680. Curran Associates, Inc., 2014. URL https://proceedings.neurips.cc/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 122, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 122, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 107, + 122, + 505, + 158 + ], + "type": "text", + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 163, + 505, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 505, + 187 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 505, + 187 + ], + "type": "text", + "content": "Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 194, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 194, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 106, + 194, + 504, + 217 + ], + "type": "text", + "content": "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems, 33:6840-6851, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 224, + 504, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 224, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 106, + 224, + 504, + 257 + ], + "type": "text", + "content": "Jonathan Ho, Chitwan Sahara, William Chan, David J Fleet, Mohammad Norouzi, and Tim Salimans. Cascaded diffusion models for high fidelity image generation. *J. Mach. Learn. Res.*, 23: 47-1, 2022a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 265, + 504, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 265, + 504, + 298 + ], + "spans": [ + { + "bbox": [ + 106, + 265, + 504, + 298 + ], + "type": "text", + "content": "Jonathan Ho, Tim Salimans, Alexey A Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. In ICLR Workshop on Deep Generative Models for Highly Structured Data, 2022b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 305, + 504, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 305, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 504, + 329 + ], + "type": "text", + "content": "Emiel Hoogeboom and Tim Salimans. Blurring diffusion models, 2022. URL https://arxiv.org/abs/2209.05557." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 335, + 504, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 335, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 106, + 335, + 504, + 358 + ], + "type": "text", + "content": "Bowen Jing, Gabriele Corso, Renato Berlinghieri, and Tommi Jaakkola. Subspace diffusion generative models. arXiv preprint arXiv:2205.01490, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 365, + 505, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 505, + 400 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 505, + 400 + ], + "type": "text", + "content": "Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401-4410, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 406, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 504, + 430 + ], + "type": "text", + "content": "Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. arXiv preprint arXiv:2106.12423, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 437, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 437, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 437, + 504, + 460 + ], + "type": "text", + "content": "Diederik Kingma, Tim Salimans, Ben Poole, and Jonathan Ho. Variational diffusion models. Advances in neural information processing systems, 34:21696-21707, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 466, + 504, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 466, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 466, + 504, + 489 + ], + "type": "text", + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 496, + 504, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 496, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 106, + 496, + 504, + 519 + ], + "type": "text", + "content": "Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 526, + 504, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 504, + 560 + ], + "type": "text", + "content": "Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 567, + 504, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 567, + 504, + 591 + ], + "spans": [ + { + "bbox": [ + 106, + 567, + 504, + 591 + ], + "type": "text", + "content": "Sangyun Lee, Hyungjin Chung, Jaehyeon Kim, and Jong Chul Ye. Progressive deblurring of diffusion models for coarse-to-fine image synthesis. arXiv preprint arXiv:2207.11192, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 597, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 597, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 106, + 597, + 504, + 620 + ], + "type": "text", + "content": "Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, pp. 8162-8171. PMLR, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 627, + 504, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 627, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 106, + 627, + 504, + 661 + ], + "type": "text", + "content": "Vadim Popov, Ivan Vovk, Vladimir Gogoryan, Tasnama Sadekova, and Mikhail Kudinov. Grads- tts: A diffusion probabilistic model for text-to-speech. In International Conference on Machine Learning, pp. 8599-8608. PMLR, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 668, + 504, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 668, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 106, + 668, + 504, + 702 + ], + "type": "text", + "content": "Konpat Preechakul, Nattanat Chathee, Suttisak Wizadwongsa, and Supasorn Suwajanakorn. Diffusion autoencoders: Toward a meaningful and decodable representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10619-10629, 2022." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 708, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 708, + 504, + 732 + ], + "type": "text", + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 671 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Ali Razavi, Aaron Van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 504, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 504, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 504, + 136 + ], + "type": "text", + "content": "Severi Rissanen, Markus Heinonen, and Arno Solin. Generative modelling with inverse heat dissipation. arXiv preprint arXiv:2206.13397, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 504, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 504, + 165 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 504, + 165 + ], + "type": "text", + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 171, + 504, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 504, + 205 + ], + "type": "text", + "content": "Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pp. 234-241. Springer, 2015." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 212, + 504, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 212, + 504, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 504, + 235 + ], + "type": "text", + "content": "Dohoon Ryu and Jong Chul Ye. Pyramidal denoising diffusion probabilistic models. arXiv preprint arXiv:2208.01864, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 242, + 504, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 504, + 286 + ], + "type": "text", + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "type": "text", + "content": "Chitwan Sahara, Jonathan Ho, William Chan, Tim Salimans, David J Fleet, and Mohammad Norouzi. Image super-resolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 504, + 358 + ], + "type": "text", + "content": "Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 364, + 504, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 504, + 399 + ], + "type": "text", + "content": "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 405, + 504, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 405, + 504, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 504, + 429 + ], + "type": "text", + "content": "Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021a." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 435, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 435, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 504, + 460 + ], + "type": "text", + "content": "Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 465, + 504, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 465, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 504, + 499 + ], + "type": "text", + "content": "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021b." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 506, + 504, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 504, + 540 + ], + "type": "text", + "content": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2818-2826, 2016." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 547, + 504, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 547, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 504, + 571 + ], + "type": "text", + "content": "Arash Vahdat and Jan Kautz. Nvae: A deep hierarchical variational autoencoder. Advances in Neural Information Processing Systems, 33:19667-19679, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 576, + 504, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 504, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 504, + 600 + ], + "type": "text", + "content": "Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Neural Information Processing Systems (NeurIPS), 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 504, + 631 + ], + "type": "text", + "content": "Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 636, + 504, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 636, + 504, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 504, + 671 + ], + "type": "text", + "content": "Fisher Yu, Ari Seff, Yinda Zhang, Shuran Song, Thomas Funkhouser, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365, 2015." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 173, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 173, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 173, + 94 + ], + "type": "text", + "content": "APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 109, + 307, + 123 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 109, + 307, + 123 + ], + "spans": [ + { + "bbox": [ + 105, + 109, + 307, + 123 + ], + "type": "text", + "content": "A DETAILED DERIVATION OF " + }, + { + "bbox": [ + 105, + 109, + 307, + 123 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 105, + 109, + 307, + 123 + ], + "type": "text", + "content": "-DMS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 135, + 178, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 135, + 178, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 178, + 148 + ], + "type": "text", + "content": "A.1 " + }, + { + "bbox": [ + 105, + 135, + 178, + 148 + ], + "type": "inline_equation", + "content": "q(\\mathbf{z}_t|\\mathbf{z}_s,\\mathbf{x})" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "type": "text", + "content": "We derive the definition in Equation 5 with the change-of-variable trick given the fact that " + }, + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t, \\pmb{x}_s" + }, + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "type": "inline_equation", + "content": "\\pmb{x}^k" + }, + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "type": "text", + "content": " are all deterministic functions of " + }, + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 156, + 504, + 178 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "text", + "content": "More precisely, suppose " + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}_t \\sim \\mathcal{N}(\\alpha_t \\boldsymbol{x}_t, \\sigma_t^2 I), \\boldsymbol{z}_s \\sim \\mathcal{N}(\\alpha_s \\boldsymbol{x}_s, \\sigma_s^2 I)" + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "inline_equation", + "content": "\\tau_k \\leq s < t < \\tau_{k+1}" + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "text", + "content": ". Thus, it is equivalent to have " + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "inline_equation", + "content": "\\boldsymbol{u}_t \\sim \\mathcal{N}(\\alpha_t \\boldsymbol{x}^k, \\sigma_t^2 I), \\boldsymbol{u}_s \\sim \\mathcal{N}(\\alpha_s \\boldsymbol{x}^k, \\sigma_s^2 I), \\boldsymbol{u}_t = \\boldsymbol{z}_t - \\alpha_t (\\boldsymbol{x}_t - \\boldsymbol{x}^k), \\boldsymbol{u}_s = \\boldsymbol{z}_s - \\alpha_s (\\boldsymbol{x}_s - \\boldsymbol{x}^k)" + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "text", + "content": ". From the above definition, it is reasonable to assume " + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "inline_equation", + "content": "\\boldsymbol{u}_t, \\boldsymbol{u}_s" + }, + { + "bbox": [ + 104, + 184, + 505, + 229 + ], + "type": "text", + "content": " follow the standard DM transitionm which means that:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 158, + 236, + 452, + 280 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 236, + 452, + 280 + ], + "spans": [ + { + "bbox": [ + 158, + 236, + 452, + 280 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\boldsymbol {u} _ {t} = \\alpha_ {t | s} \\boldsymbol {u} _ {s} + \\sigma_ {t | s} \\epsilon , \\epsilon \\sim \\mathcal {N} (\\boldsymbol {0}, I) \\\\ \\Rightarrow \\boldsymbol {z} _ {t} - \\alpha_ {t} \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} ^ {k}\\right) = \\alpha_ {t | s} \\left(\\boldsymbol {z} _ {s} - \\alpha_ {s} \\left(\\boldsymbol {x} _ {s} - \\boldsymbol {x} ^ {k}\\right)\\right) + \\sigma_ {t | s} \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\mathbf {0}, I) \\\\ \\Rightarrow \\quad \\boldsymbol {z} _ {t} = \\alpha_ {t | s} \\boldsymbol {z} _ {s} + \\alpha_ {t} \\left(\\boldsymbol {x} _ {t} - \\boldsymbol {x} _ {s}\\right) + \\sigma_ {t | s} \\boldsymbol {\\epsilon}, \\quad \\boldsymbol {\\epsilon} \\sim \\mathcal {N} (\\boldsymbol {0}, I) \\\\ \\end{array}", + "image_path": "7bb9932a290e70ed3b2a89dc601f81b386f8d76910e9708c93c711f0afb95184.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "text", + "content": "As typically " + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t \\neq \\pmb{x}_s" + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "text", + "content": " and both " + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t, \\pmb{x}_s" + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "text", + "content": " are the functions of " + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{x}^k" + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "text", + "content": ". Then " + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "inline_equation", + "content": "z_t" + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "text", + "content": " is dependent on both " + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{z}_s" + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "inline_equation", + "content": "\\pmb{x}^k = f_{0:k}(\\pmb{x})" + }, + { + "bbox": [ + 104, + 289, + 504, + 312 + ], + "type": "text", + "content": ", resulting in a non-Markovian transition:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 195, + 319, + 414, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 319, + 414, + 335 + ], + "spans": [ + { + "bbox": [ + 195, + 319, + 414, + 335 + ], + "type": "interline_equation", + "content": "q (\\pmb {z} _ {t} | \\pmb {z} _ {s}, \\pmb {x}) = \\mathcal {N} (\\pmb {z} _ {t}; \\alpha_ {t | s} \\pmb {z} _ {s} + \\alpha_ {t} \\cdot (\\pmb {x} _ {t} - \\pmb {x} _ {s}), \\sigma_ {t | s} ^ {2} I),", + "image_path": "eb1cbbd638a209d826a2cfc39bccc528899ec2dec562e37f10de4f1f85afb8ce.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": "Note that, this equation stands only when " + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t, \\boldsymbol{x}_s" + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_k" + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": " in the same space, and we did not make specific assumptions to the form of " + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 380, + 178, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 178, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 178, + 393 + ], + "type": "text", + "content": "A.2 " + }, + { + "bbox": [ + 105, + 380, + 178, + 393 + ], + "type": "inline_equation", + "content": "q(\\mathbf{z}_s|\\mathbf{z}_t,\\mathbf{x})" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "text", + "content": "The reverse diffusion distribution follows the Bayes' Theorem: " + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "inline_equation", + "content": "q(\\pmb{z}_s|\\pmb{z}_t, \\pmb{x}) \\propto q(\\pmb{z}_s|\\pmb{x})q(\\pmb{z}_t|\\pmb{z}_s, \\pmb{x})" + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "text", + "content": " where both " + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "inline_equation", + "content": "q(\\pmb{z}_s|\\pmb{x})" + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "inline_equation", + "content": "q(\\pmb{z}_t|\\pmb{z}_s, \\pmb{x})" + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "text", + "content": " are Gaussian distributions with general forms of " + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\pmb{z}_s|\\pmb{\\mu}, \\sigma^2 I)" + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\pmb{z}_t|A\\pmb{z}_s + \\pmb{b}, \\sigma'^2 I)" + }, + { + "bbox": [ + 104, + 402, + 504, + 437 + ], + "type": "text", + "content": ", respectively. Based on Bishop & Nasrabadi (2006) (2.116), we can derive:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 180, + 444, + 428, + 459 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 444, + 428, + 459 + ], + "spans": [ + { + "bbox": [ + 180, + 444, + 428, + 459 + ], + "type": "interline_equation", + "content": "q \\left(\\boldsymbol {z} _ {s} \\mid \\boldsymbol {z} _ {t}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {s} \\mid \\bar {\\sigma} ^ {- 2} \\left(\\sigma^ {\\prime - 2} A ^ {\\top} \\left(\\boldsymbol {z} _ {t} - \\boldsymbol {b}\\right) + \\sigma^ {- 2} \\boldsymbol {\\mu}\\right), \\bar {\\sigma} ^ {2} I\\right),", + "image_path": "76caf9fc1caa3c92964250059def243a451e1cbe41e724c109a557d935b77c87.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\bar{\\sigma}^2 = (\\sigma^{-2} + \\sigma'^{-2}\\| A\\| ^2)^{-1}" + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "text", + "content": ". Therefore, we can get the exact form by plugging our variables " + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\pmb {\\mu} = \\alpha_{s}\\hat{\\pmb{x}}_{k}^{s}" + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\sigma = \\sigma_s" + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "inline_equation", + "content": "A = \\alpha_{t|s}I" + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\pmb {b} = \\alpha_{t}\\cdot (\\pmb {x}_{t} - \\pmb {x}_{s})" + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\sigma^{\\prime} = \\sigma_{t|s}" + }, + { + "bbox": [ + 104, + 466, + 504, + 491 + ], + "type": "text", + "content": " into above equation, we get:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 204, + 499, + 405, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 499, + 405, + 514 + ], + "spans": [ + { + "bbox": [ + 204, + 499, + 405, + 514 + ], + "type": "interline_equation", + "content": "q (\\pmb {z} _ {s} | \\pmb {z} _ {t}, \\pmb {x}) = \\mathcal {N} (\\pmb {z} _ {s} | \\alpha_ {s} \\pmb {x} _ {s} + \\sqrt {\\sigma_ {s} ^ {2} - \\bar {\\sigma} ^ {2}} \\pmb {\\epsilon} _ {t}, \\bar {\\sigma} ^ {2} I),", + "image_path": "44054e01bf38f662aeccb7503f5ef3e747161dbba8e7946c5be6a7296aaf7fac.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 521, + 301, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 301, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 301, + 534 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 521, + 301, + 534 + ], + "type": "inline_equation", + "content": "\\epsilon_{t} = (z_{t} - \\alpha_{t}\\pmb{x}_{t}) / \\sigma_{t}" + }, + { + "bbox": [ + 105, + 521, + 301, + 534 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 521, + 301, + 534 + ], + "type": "inline_equation", + "content": "\\bar{\\sigma} = \\sigma_s\\sigma_{t|s} / \\sigma_t" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "spans": [ + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "text", + "content": "Alternatively, if we assume " + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t" + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "text", + "content": " take the interpolation formulation in Equation 4, we can also re-write " + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_s" + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_t + \\frac{t - s}{t - \\tau_k} \\delta_t" + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "text", + "content": ", where we define a new variable " + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "inline_equation", + "content": "\\delta_t = \\boldsymbol{x}^k - \\boldsymbol{x}_t" + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "text", + "content": ". As stated in the main context (Section 3.1), such change makes " + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "inline_equation", + "content": "q(\\boldsymbol{z}_t | \\boldsymbol{z}_s, \\boldsymbol{x})" + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "text", + "content": " avoid computing " + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_s" + }, + { + "bbox": [ + 104, + 538, + 505, + 585 + ], + "type": "text", + "content": " which may be potentially costly. In this way, we re-write the above equation as follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 151, + 594, + 504, + 608 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 594, + 504, + 608 + ], + "spans": [ + { + "bbox": [ + 151, + 594, + 504, + 608 + ], + "type": "interline_equation", + "content": "q \\left(\\boldsymbol {z} _ {s} \\mid \\boldsymbol {z} _ {t}, \\boldsymbol {x}\\right) = \\mathcal {N} \\left(\\boldsymbol {z} _ {s} \\mid , \\alpha_ {s} \\left(\\boldsymbol {x} _ {t} + \\boldsymbol {\\delta} _ {t} \\cdot (t - s) / (t - \\tau_ {k})\\right) + \\sqrt {\\sigma_ {s} ^ {2} - \\bar {\\sigma} ^ {2}} \\boldsymbol {\\epsilon} _ {t}, \\bar {\\sigma} ^ {2} I\\right), \\tag {10}", + "image_path": "d001d6d725f7b779a555c3cfecc86bd724850d145e55d447bc9417753c2c408e.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 622, + 247, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 622, + 247, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 622, + 247, + 633 + ], + "type": "text", + "content": "A.3 DIFFUSION INSIDE STAGES" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": "In the inference time, we generate data by iteratively sampling from the conditional distribution " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "p(\\pmb{z}_s|\\pmb{z}_t) = \\mathbb{E}_{\\pmb{x}}[q(\\pmb{z}_s|\\pmb{z}_t,\\pmb{x})]" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " based on Equation 10. In practice, the expectation over " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " is approximated by our model's prediction. As shown in Equation 9, in this work, we propose a \"double-prediction\" network " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " that reads " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{z}_t" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": ", and simultaneously predicts " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{x}_t" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\delta_t" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{\\theta}" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\delta_{\\theta}" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": ", respectively. The predicted Gaussian noise is denoted as " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta} = (z_{t} - \\alpha_{t}\\pmb{x}_{\\theta}) / \\sigma_{t}" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": ". Note that the prediction " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "x_{\\theta}" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " are interchangeable, which means that we can readily derive one from the other's prediction. Therefore, by replacing " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "x_{t}, \\delta_{t}, \\epsilon_{t}" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "inline_equation", + "content": "x_{\\theta}, \\delta_{\\theta}, \\epsilon_{\\theta}" + }, + { + "bbox": [ + 104, + 643, + 505, + 733 + ], + "type": "text", + "content": " in Equation 10, we obtain the sampling algorithm shown in Algorithm 1: Line 6." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 81, + 301, + 201 + ], + "blocks": [ + { + "bbox": [ + 108, + 81, + 301, + 201 + ], + "lines": [ + { + "bbox": [ + 108, + 81, + 301, + 201 + ], + "spans": [ + { + "bbox": [ + 108, + 81, + 301, + 201 + ], + "type": "image", + "image_path": "278a4753546ab0c582a6c3bc7e663c2edfe759f474771a8dd263a6022b31bae4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "lines": [ + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "text", + "content": "Figure 7: Illustration of noise schedule " + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "inline_equation", + "content": "(\\alpha_{t}" + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "inline_equation", + "content": "\\sigma_{t})" + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "text", + "content": "-DM-DS models with 5 stages " + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "inline_equation", + "content": "(16^{2} \\rightarrow 256^{2})" + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "text", + "content": ". We use the standard cosine noise schedule " + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "inline_equation", + "content": "\\alpha_{t} = \\cos(0.5\\pi t)" + }, + { + "bbox": [ + 104, + 212, + 504, + 246 + ], + "type": "text", + "content": ". We also show the difference between the linear/cosine stage schedule, as well as the proposed SP/VP re-scaling methods." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 80, + 503, + 200 + ], + "blocks": [ + { + "bbox": [ + 309, + 80, + 503, + 200 + ], + "lines": [ + { + "bbox": [ + 309, + 80, + 503, + 200 + ], + "spans": [ + { + "bbox": [ + 309, + 80, + 503, + 200 + ], + "type": "image", + "image_path": "9fcdfe29e54fa2f7efc299a2869440a676ddb2bb7c489a706a023ea92d7221d5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 253, + 233, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 253, + 233, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 233, + 262 + ], + "type": "text", + "content": "A.4 NOISE AT BOUNDARIES" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 273, + 504, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 273, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 504, + 319 + ], + "type": "text", + "content": "In this paper, the overall principle is to handle the transition across stage boundary is to ensure the forward diffusion to be deterministic and smooth, therefore almost no information is lost during the stage change. Such requirement is important as it directly correlated to the denoising performance. Failing to recover the lost information will directly affect the diversity of the model generates." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 331, + 297, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 297, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 297, + 386 + ], + "type": "text", + "content": "Forward diffusion As described in Section 3.1, since we have the control of the signal and the noise separately, we can directly apply the deterministic transformation on the signal, and dropping noise elements." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 335, + 501, + 380 + ], + "blocks": [ + { + "bbox": [ + 306, + 335, + 501, + 380 + ], + "lines": [ + { + "bbox": [ + 306, + 335, + 501, + 380 + ], + "spans": [ + { + "bbox": [ + 306, + 335, + 501, + 380 + ], + "type": "image", + "image_path": "1bf6ca480a5a681325b6c015f5959592f25a3a931f42882d723a3402bf14c78e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 390, + 497, + 403 + ], + "lines": [ + { + "bbox": [ + 309, + 390, + 497, + 403 + ], + "spans": [ + { + "bbox": [ + 309, + 390, + 497, + 403 + ], + "type": "text", + "content": "Figure 6: Two naive ways for down-sampling." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 391, + 297, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 391, + 297, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 391, + 297, + 414 + ], + "type": "text", + "content": "Alternatively, we also implemented a different " + }, + { + "bbox": [ + 105, + 391, + 297, + 414 + ], + "type": "inline_equation", + "content": "\\zeta (\\epsilon)" + }, + { + "bbox": [ + 105, + 391, + 297, + 414 + ], + "type": "text", + "content": " based on averaging. As shown in Figure 6," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 414, + 504, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 414, + 504, + 446 + ], + "spans": [ + { + "bbox": [ + 104, + 414, + 504, + 446 + ], + "type": "text", + "content": "if the transformation is down-sampling, we can use the fact that the mean of Gaussian noises is still Gaussian with lower variance: " + }, + { + "bbox": [ + 104, + 414, + 504, + 446 + ], + "type": "inline_equation", + "content": "(\\epsilon_0 + \\epsilon_1 + \\epsilon_2 + \\epsilon_3) / 4 \\sim \\mathcal{N}(0, \\frac{1}{4} I)" + }, + { + "bbox": [ + 104, + 414, + 504, + 446 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 104, + 414, + 504, + 446 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 104, + 414, + 504, + 446 + ], + "type": "text", + "content": " rescaling is needed on the resulted noise." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "text", + "content": "Reverse diffusion Similarly, we can also define the reverse process if " + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "text", + "content": " is chosen to be averaging. Different from \"dropping\" where the reverse process is simply adding independent Gaussian noises, the reverse of \"averaging\" requests to sample " + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\sum_{i=0}^{3} \\epsilon_i = 2\\epsilon" + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "text", + "content": " given the input noise " + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "text", + "content": ", while having " + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "inline_equation", + "content": "p(\\epsilon_i) = \\mathcal{N}(0, I)" + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "inline_equation", + "content": "i = 0,1,2,3" + }, + { + "bbox": [ + 104, + 460, + 504, + 517 + ], + "type": "text", + "content": ". Such problem has a closed solution and can be implemented in an autoregressive fashion:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 200, + 525, + 411, + 600 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 525, + 411, + 600 + ], + "spans": [ + { + "bbox": [ + 200, + 525, + 411, + 600 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} a = 2 \\epsilon ; \\\\ \\boldsymbol {\\epsilon} _ {0} = \\boldsymbol {a} / 4 + \\sqrt {3 / 4} \\cdot \\hat {\\epsilon} _ {1}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {0}, \\hat {\\epsilon} _ {1} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\boldsymbol {\\epsilon} _ {1} = \\boldsymbol {a} / 3 + \\sqrt {2 / 3} \\cdot \\hat {\\epsilon} _ {2}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {1}, \\hat {\\epsilon} _ {2} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\boldsymbol {\\epsilon} _ {2} = \\boldsymbol {a} / 2 + \\sqrt {1 / 2} \\cdot \\hat {\\epsilon} _ {3}, \\boldsymbol {a} = \\boldsymbol {a} - \\boldsymbol {\\epsilon} _ {2}, \\hat {\\epsilon} _ {3} \\sim \\mathcal {N} (\\boldsymbol {0}, I); \\\\ \\epsilon_ {3} = a \\\\ \\end{array}", + "image_path": "39259bf6e8c190974f687c7e3d15f4cd7cceffc0d552dfb2422d34b82c892bcd.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "type": "text", + "content": "Similar to the case of \"dropping\", we also need 3 additional samples " + }, + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "type": "inline_equation", + "content": "\\hat{\\epsilon}_{1:3}" + }, + { + "bbox": [ + 104, + 608, + 504, + 653 + ], + "type": "text", + "content": " to contribute to four noises, therefore it can be implemented in the same way as described in Section 3.1. Empirically, reversing the \"averaging\" steps tends to produce samples with better FID scores. However, since it introduces correlations into the added noise, which may cause undesired biases especially in DDIM sampling." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "type": "text", + "content": "Intuition behind Re-scaling Here we present a simple justification of applying noise rescaling. Suppose the signal dimensionality changes from " + }, + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "type": "inline_equation", + "content": "M_{k-1}" + }, + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "type": "inline_equation", + "content": "M_k" + }, + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "type": "text", + "content": " when crossing the stage boundary, and such change is caused by different sampling rates. Based the proposed resolution-agnostic SNR (Equation 7), the number of sampled points inside " + }, + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 104, + 665, + 505, + 732 + ], + "type": "text", + "content": " is proportional to its dimensionality. Generally, it is safe to assume signals are mostly low-frequency. Therefore, averaging signals will not change its variance. By contrast, as shown above, averaging Gaussian noises results in lower variance, where" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 81, + 504, + 220 + ], + "blocks": [ + { + "bbox": [ + 111, + 81, + 504, + 220 + ], + "lines": [ + { + "bbox": [ + 111, + 81, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 81, + 504, + 220 + ], + "type": "image", + "image_path": "a8f0bd125ff68052e5cdfb1ab440453e4f28e9f5687c63a07a38de767a8c7aed.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 228, + 425, + 242 + ], + "lines": [ + { + "bbox": [ + 184, + 228, + 425, + 242 + ], + "spans": [ + { + "bbox": [ + 184, + 228, + 425, + 242 + ], + "type": "text", + "content": "Figure 8: We show the comparison of the DDIM sampling." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 247, + 506, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 271 + ], + "type": "text", + "content": "in our case, the variance is proportional to " + }, + { + "bbox": [ + 104, + 247, + 506, + 271 + ], + "type": "inline_equation", + "content": "M^{-1}" + }, + { + "bbox": [ + 104, + 247, + 506, + 271 + ], + "type": "text", + "content": ". Therefore, suppose the signal magnitude does not change, we can get the re-scaling low by forcing " + }, + { + "bbox": [ + 104, + 247, + 506, + 271 + ], + "type": "inline_equation", + "content": "\\mathrm{SNR}(z_{\\tau}) = \\mathrm{SNR}(z_{\\tau^{-}})" + }, + { + "bbox": [ + 104, + 247, + 506, + 271 + ], + "type": "text", + "content": " at the stage boundary:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 250, + 277, + 359, + 292 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 277, + 359, + 292 + ], + "spans": [ + { + "bbox": [ + 250, + 277, + 359, + 292 + ], + "type": "interline_equation", + "content": "\\sigma_ {\\tau^ {-}} ^ {2} \\cdot M _ {k - 1} ^ {- 1} = \\sigma_ {\\tau} ^ {2} \\cdot M _ {k} ^ {- 1},", + "image_path": "defd593962776c6c126a5e1836b6593c87ac779ff79224de0c02f7f8f35c42e2.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "content": "which derives the signal preserving (SP) rescaling in Equation 8. In Figure 7, we show an example of the change of " + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "content": " with and without applying the re-scaling technique for " + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 297, + 504, + 321 + ], + "type": "text", + "content": "-DM-DS models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 335, + 211, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 211, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 211, + 345 + ], + "type": "text", + "content": "A.5 DDIM SAMPLING" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "text", + "content": "The above derivations only describe the standard ancestral sampling " + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "inline_equation", + "content": "(\\eta = 1)" + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "inline_equation", + "content": "q(\\pmb{z}_s|\\pmb{z}_t,\\pmb{x})" + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "text", + "content": " is determined by Bayes' Theorem. Optionally, one can arbitrarily define any proper reverse diffusion distribution as long as the marginal distributions match the definition. For example, " + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "text", + "content": "-DM can also perform deterministic DDIM (Song et al., 2021a) by setting " + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "inline_equation", + "content": "\\eta = 0" + }, + { + "bbox": [ + 104, + 356, + 504, + 413 + ], + "type": "text", + "content": " in Algorithm 1. Similar to Song et al. (2021a), we can also obtain the proof based on the induction argument." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "text", + "content": "Figure 8 shows the comparison of DDIM sampling between the standard DMs and the proposed " + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "text", + "content": "-DM. In DDIM sampling " + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "inline_equation", + "content": "(\\eta = 0)" + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "text", + "content": ", the only randomness comes from the initial noise at " + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "text", + "content": ". Due to the proposed noise resampling technique, " + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 417, + 506, + 484 + ], + "type": "text", + "content": "-DM enables a multi-scale noisng process where the sampled noises are split and sent to different steps of the diffusion process. In this case, compared to standard DMs, we gain the ability of controlling image generation at different levels, resulting in smooth semantic interpretation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 501, + 381, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 501, + 381, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 381, + 513 + ], + "type": "text", + "content": "B DETAILED INFORMATION OF TRANSFORMATIONS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 526, + 426, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 426, + 539 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 426, + 539 + ], + "type": "text", + "content": "We show the difference of all the transformations used in this paper in Figure 9." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 553, + 206, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 553, + 206, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 553, + 206, + 563 + ], + "type": "text", + "content": "B.1 DOWNSAMPLING" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "content": "In early development of this work, we explored various combinations of performing down-sampling: " + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "inline_equation", + "content": "\\pmb{f} = \\{\\text{bilinear, nearest, Gaussian blur + subsample}\\}" + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "inline_equation", + "content": "\\pmb{g} = \\{\\text{bilinear, bicubic, nearest, neural-based}\\}" + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "content": ". While all these combinations produced similar results, we empirically on FFHQ found that both choosing bilinear interpolation for both " + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "inline_equation", + "content": "\\pmb{f}, \\pmb{g}" + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "content": " achieves most stable results. Therefore, all the main experiments of " + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "content": "-DM-DS are conducted on bilinear interpolation. As discussed in Section 3.2, we choose " + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "inline_equation", + "content": "K = 4" + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "content": ", which progressively downsample a " + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "inline_equation", + "content": "256^2" + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "inline_equation", + "content": "16^2" + }, + { + "bbox": [ + 104, + 574, + 504, + 641 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 655, + 180, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 180, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 180, + 666 + ], + "type": "text", + "content": "B.2 BLURRING" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": "We experimented two types of blurring functions. For upsampling-based blurring, we use the same number of stages as the downsampling case; for Gaussian-based blurring, we adopt " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "K = 7" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": " with corresponding kernel sizes " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\sigma_{B} = 15\\sin^{2}\\left(\\frac{\\pi}{2}\\tau_{k}\\right)" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\tau_{k}" + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": " follows the cosine stage schedule. In practice, we implement blurring function in frequency domain following Rissanen et al. (2022) based on discrete cosine transform (DCT)." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 113, + 86, + 355, + 135 + ], + "blocks": [ + { + "bbox": [ + 113, + 86, + 355, + 135 + ], + "lines": [ + { + "bbox": [ + 113, + 86, + 355, + 135 + ], + "spans": [ + { + "bbox": [ + 113, + 86, + 355, + 135 + ], + "type": "image", + "image_path": "5b3e560c3c5c56b6d3ab340c98388c262222bacc08d97b15c2fbefad85bc6dcd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 136, + 257, + 144 + ], + "lines": [ + { + "bbox": [ + 211, + 136, + 257, + 144 + ], + "spans": [ + { + "bbox": [ + 211, + 136, + 257, + 144 + ], + "type": "text", + "content": "Downsample" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 399, + 86, + 496, + 135 + ], + "blocks": [ + { + "bbox": [ + 399, + 86, + 496, + 135 + ], + "lines": [ + { + "bbox": [ + 399, + 86, + 496, + 135 + ], + "spans": [ + { + "bbox": [ + 399, + 86, + 496, + 135 + ], + "type": "image", + "image_path": "5e2992e5e311e4096de6ec3eec040c148bcf6967de1834cdae740335a091ea4d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 434, + 136, + 462, + 144 + ], + "lines": [ + { + "bbox": [ + 434, + 136, + 462, + 144 + ], + "spans": [ + { + "bbox": [ + 434, + 136, + 462, + 144 + ], + "type": "text", + "content": "VQ-VAE" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 113, + 145, + 354, + 194 + ], + "blocks": [ + { + "bbox": [ + 113, + 145, + 354, + 194 + ], + "lines": [ + { + "bbox": [ + 113, + 145, + 354, + 194 + ], + "spans": [ + { + "bbox": [ + 113, + 145, + 354, + 194 + ], + "type": "image", + "image_path": "6a72be007beec1c6ed8108dcaa9f9015e80c6cdb2a3f6af037ea87db15c2d9ef.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 195, + 259, + 204 + ], + "lines": [ + { + "bbox": [ + 208, + 195, + 259, + 204 + ], + "spans": [ + { + "bbox": [ + 208, + 195, + 259, + 204 + ], + "type": "text", + "content": "Updown Blur" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 399, + 145, + 496, + 194 + ], + "blocks": [ + { + "bbox": [ + 399, + 145, + 496, + 194 + ], + "lines": [ + { + "bbox": [ + 399, + 145, + 496, + 194 + ], + "spans": [ + { + "bbox": [ + 399, + 145, + 496, + 194 + ], + "type": "image", + "image_path": "7896088996ebbadbd3bfaec61292b7fa531e9bc1ecf2647c43207881183dd663.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 113, + 205, + 496, + 254 + ], + "blocks": [ + { + "bbox": [ + 434, + 196, + 462, + 204 + ], + "lines": [ + { + "bbox": [ + 434, + 196, + 462, + 204 + ], + "spans": [ + { + "bbox": [ + 434, + 196, + 462, + 204 + ], + "type": "text", + "content": "VQ-GAN" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 113, + 205, + 496, + 254 + ], + "lines": [ + { + "bbox": [ + 113, + 205, + 496, + 254 + ], + "spans": [ + { + "bbox": [ + 113, + 205, + 496, + 254 + ], + "type": "image", + "image_path": "26a5447a6af5b30545b351421b7271a5a73e4a3e35dccf47b3bf810bde83a77f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 255, + 335, + 262 + ], + "lines": [ + { + "bbox": [ + 276, + 255, + 335, + 262 + ], + "spans": [ + { + "bbox": [ + 276, + 255, + 335, + 262 + ], + "type": "text", + "content": "Gaussian Blur" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 275, + 504, + 309 + ], + "lines": [ + { + "bbox": [ + 104, + 275, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 504, + 309 + ], + "type": "text", + "content": "Figure 9: We show examples of the five transformations (downsample, blur, VAEs) used in this paper. For downsampling, we resize the image with nearest upsampler; for VQ-VAE/VQ-GAN, we visualize the first 3 channels of the latent feature maps." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 315, + 160, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 315, + 160, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 160, + 326 + ], + "type": "text", + "content": "B.3 VAES" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "content": "In this paper, we only consider vector quantized (VQ) models with single layer latent space, while our methods can be readily applied to hierarchical (Razavi et al., 2019) and KL-regularized VAE models (Vahdat & Kautz, 2020). Following Rombach et al. (2021), we take the feature vectors before the quantization layers as the latent space, and keep the quantization step in the decoder " + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "inline_equation", + "content": "(g)" + }, + { + "bbox": [ + 104, + 336, + 504, + 392 + ], + "type": "text", + "content": " when training diffusion models." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "type": "text", + "content": "We follow an open-sourced implementation " + }, + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "type": "text", + "content": " to train our VQVAE model on ImageNet. The model consists of two strided convolution blocks which by default downsamples the input image by a factor of 8. We use the default hyper-parameters and train the model for 50 epochs with a batch-size of 128. For a fair comparison to match the latent size of VQVAE, we use the pre-trained autoencoding model (Rombach et al., 2021) with the setting of " + }, + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\{f = 8, \\mathrm{VQ}(\\mathrm{Z} = 256, \\mathrm{d} = 4)\\}" + }, + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "type": "text", + "content": ". We directly use the checkpoint " + }, + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "type": "inline_equation", + "content": "^3" + }, + { + "bbox": [ + 104, + 396, + 504, + 484 + ], + "type": "text", + "content": " provided by the authors. Note that the above setting is not the best performing model (LDM-4) in the original paper. Therefore, it generates more artifacts when reconstructing images from the latents." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "type": "text", + "content": "Before training, we compute the signal magnitude ratio " + }, + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\gamma_{k}" + }, + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "type": "text", + "content": " (Equation 8) over the entire training set of FFHQ, where we empirically set " + }, + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\gamma_{k} = 2.77" + }, + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "type": "text", + "content": " for VQ-GAN and " + }, + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "type": "inline_equation", + "content": "\\gamma_{k} = 2.0" + }, + { + "bbox": [ + 104, + 491, + 506, + 514 + ], + "type": "text", + "content": " for VQ-VAE, respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 529, + 224, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 224, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 224, + 542 + ], + "type": "text", + "content": "C DATASET DETAILS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "type": "text", + "content": "FFHQ (https://github.com/NVlabs/ffhq-dataset) contains 70k images of real human faces in resolution of " + }, + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "type": "inline_equation", + "content": "1024^2" + }, + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "type": "text", + "content": ". For most of our experiments, we resize the images to " + }, + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "type": "inline_equation", + "content": "256^2" + }, + { + "bbox": [ + 104, + 554, + 504, + 578 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 590, + 504, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 634 + ], + "type": "text", + "content": "AFHQ (https://github.com/clovaai/stargan-v2# animal-faces-hq-dataset-afhq) contains 15k images of animal faces including cat, dog and wild three categories in resolution of " + }, + { + "bbox": [ + 104, + 590, + 504, + 634 + ], + "type": "inline_equation", + "content": "512^{2}" + }, + { + "bbox": [ + 104, + 590, + 504, + 634 + ], + "type": "text", + "content": ". We train conditional diffusion models by merging all training images with the label information. All images are resized to " + }, + { + "bbox": [ + 104, + 590, + 504, + 634 + ], + "type": "inline_equation", + "content": "256^{2}" + }, + { + "bbox": [ + 104, + 590, + 504, + 634 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 646, + 504, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 646, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 646, + 504, + 702 + ], + "type": "text", + "content": "LSUN (https://www.yf.io/p/1sun) is a collection of large-scale image dataset containing 10 scenes and 20 object categories. Following previous works Rombach et al. (2021), we choose the two categories – Church (126k images) and Bed (3M images), and train separate unconditional models on them. As LSUN-Bed is relatively larger, we set the iterations longer than other datasets. All images are resized to " + }, + { + "bbox": [ + 104, + 646, + 504, + 702 + ], + "type": "inline_equation", + "content": "256^2" + }, + { + "bbox": [ + 104, + 646, + 504, + 702 + ], + "type": "text", + "content": " with center-crop." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 709, + 373, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 709, + 373, + 721 + ], + "spans": [ + { + "bbox": [ + 116, + 709, + 373, + 721 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 709, + 373, + 721 + ], + "type": "text", + "content": "https://github.com/rosinality/vq-vae-2-pytorch" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 118, + 721, + 444, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 444, + 732 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 444, + 732 + ], + "type": "text", + "content": "3https://ommer-lab.com/files/latent-diffusion/vq-f8-n256.zip" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 83, + 500, + 206 + ], + "blocks": [ + { + "bbox": [ + 112, + 83, + 500, + 206 + ], + "lines": [ + { + "bbox": [ + 112, + 83, + 500, + 206 + ], + "spans": [ + { + "bbox": [ + 112, + 83, + 500, + 206 + ], + "type": "image", + "image_path": "8ac2a2769616de0707aebf77761e09cbe085836873539f1e2371fec00392f9e2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 215, + 405, + 228 + ], + "lines": [ + { + "bbox": [ + 203, + 215, + 405, + 228 + ], + "spans": [ + { + "bbox": [ + 203, + 215, + 405, + 228 + ], + "type": "text", + "content": "Figure 10: An illustration of the training pipeline." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "text", + "content": "ImageNet (https://image-net.org/download.php) we use the standard ImageNet-1K dataset which contains 1.28M images across 1000 classes. We directly merge all the training images with class-labels. All images are resized to " + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "inline_equation", + "content": "256^2" + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "text", + "content": " with center-crop. For both " + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "text", + "content": "-DM and the baseline models, we adopt the classifier-free guidance (Ho & Salimans, 2022) with the unconditional probability 0.2. In the inference time, we use the guidance scale " + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "inline_equation", + "content": "(s = 2)" + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "text", + "content": " for computing FIDs, and " + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "inline_equation", + "content": "s = 3" + }, + { + "bbox": [ + 104, + 228, + 504, + 296 + ], + "type": "text", + "content": " to synthesize examples for comparison." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 320, + 269, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 320, + 269, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 320, + 269, + 333 + ], + "type": "text", + "content": "D IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 351, + 282, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 351, + 282, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 282, + 361 + ], + "type": "text", + "content": "D.1 ARCHITECTURE CONFIGURATIONS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 375, + 504, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 375, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 375, + 504, + 431 + ], + "type": "text", + "content": "We implement " + }, + { + "bbox": [ + 104, + 375, + 504, + 431 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 375, + 504, + 431 + ], + "type": "text", + "content": "-DM strictly following standard U-Net architecture in Nichol & Dhariwal (2021). As shown in Figure 11, input " + }, + { + "bbox": [ + 104, + 375, + 504, + 431 + ], + "type": "inline_equation", + "content": "z_{t}" + }, + { + "bbox": [ + 104, + 375, + 504, + 431 + ], + "type": "text", + "content": " will be directed to the corresponding inner layer based on spatial resolutions, and a stage-specific adapter is adopted to transform the channel dimension. Such architecture also allows memory-efficient batching across stages where we can create a batch with various resolutions, and split the computation based on the resolutions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 454, + 225, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 454, + 225, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 454, + 225, + 464 + ], + "type": "text", + "content": "D.2 HYPER-PARAMETERS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 478, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 504, + 544 + ], + "type": "text", + "content": "In our experiments, we adopt the following two sets of parameters based on the complexity of the dataset: base (FFHQ, AFHQ, LSUN-Church/Bed) and big (ImageNet). For base, we use 1 residual block per resolution, with the basic dimension 128. For big, we use 2 residual blocks with the basic dimension 192. Given one dataset, all the models with various transformations including the baseline DMs share the same hyper-parameters except for the adapters. We list the hyperparameter details in Table 3." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 153, + 561, + 457, + 695 + ], + "blocks": [ + { + "bbox": [ + 153, + 561, + 457, + 695 + ], + "lines": [ + { + "bbox": [ + 153, + 561, + 457, + 695 + ], + "spans": [ + { + "bbox": [ + 153, + 561, + 457, + 695 + ], + "type": "table", + "html": "
Hyper-param.FFHQAFHQLSUN-ChurchLSUN-BedImageNet
image res.25622562256225622562
# of classesNone3NoneNone1000
c.f. guidance-No--Yes
#channels128128128128192
#res-blocks11112
channel multi.[1,1,2,2,4,4]
attention res.16,8
batch size3232323264
lr2e-5
iterations500K500K500K1200K2500K
", + "image_path": "35ce6b0f08c1c20838d3ae4decf7bb653da16fe579be8c8ea675ac7b06d88f62.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 162, + 702, + 447, + 715 + ], + "lines": [ + { + "bbox": [ + 162, + 702, + 447, + 715 + ], + "spans": [ + { + "bbox": [ + 162, + 702, + 447, + 715 + ], + "type": "text", + "content": "Table 3: Hyperparameters and settings for " + }, + { + "bbox": [ + 162, + 702, + 447, + 715 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 162, + 702, + 447, + 715 + ], + "type": "text", + "content": " -DM on different datasets." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 84, + 504, + 185 + ], + "blocks": [ + { + "bbox": [ + 108, + 84, + 504, + 185 + ], + "lines": [ + { + "bbox": [ + 108, + 84, + 504, + 185 + ], + "spans": [ + { + "bbox": [ + 108, + 84, + 504, + 185 + ], + "type": "image", + "image_path": "280bce376cb58a90c764d5be92a1d3087016a9c77530f61850439042834418fb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "text", + "content": "Figure 11: An illustration of the modified U-Net architecture. Time conditioning is omitted. The parameters are partially shared across stages based on the resolutions. Stage-specific adapters are adopted to transform the input dimensions." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 108, + 243, + 503, + 578 + ], + "blocks": [ + { + "bbox": [ + 108, + 243, + 503, + 578 + ], + "lines": [ + { + "bbox": [ + 108, + 243, + 503, + 578 + ], + "spans": [ + { + "bbox": [ + 108, + 243, + 503, + 578 + ], + "type": "image", + "image_path": "9bcf1a75278e8d2281965673b9c05dbad410dcb5b0b7d4f5f76e1fd3d5c19fd4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": "Figure 12: Additional comparisons with Cascaded DM on AFHQ. " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": " Comparison of the reverse diffusion process from " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "16^{2}" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "256^{2}" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": ". We visualize the denoised outputs " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "(\\boldsymbol{x}_t)" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": " and the corresponding next noised input " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "(z_{s})" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": " near the start & end of each resolution diffusion. " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": " Comparison of random samples generated by Cascaded DM and " + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 586, + 506, + 633 + ], + "type": "text", + "content": "-DM-DS." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 637, + 244, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 637, + 244, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 244, + 649 + ], + "type": "text", + "content": "E ADDITIONAL RESULTS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 665, + 315, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 665, + 315, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 315, + 677 + ], + "type": "text", + "content": "E.1 QUANTITATIVE COMPARISON WITH DDIM" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "We also include comparison of " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "-DM with the standard DM using DDIM sampling (" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\eta = 0" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": ") in Table 4. Similar to the conclusion drawn from Table 1, the proposed " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": "-DM can achieve comparable or even better performance than baseline DM even with " + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\eta = 0" + }, + { + "bbox": [ + 104, + 687, + 505, + 733 + ], + "type": "text", + "content": " (generation only controlled by the initial noise, see Figure 8), while having better scores for DDIM with half generation steps." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 83, + 302, + 284 + ], + "blocks": [ + { + "bbox": [ + 111, + 83, + 302, + 284 + ], + "lines": [ + { + "bbox": [ + 111, + 83, + 302, + 284 + ], + "spans": [ + { + "bbox": [ + 111, + 83, + 302, + 284 + ], + "type": "image", + "image_path": "7dcde01bb04d5d291ef70d6b3ae7e95bf484cb4f99ae3ed3804760fe2611a9e3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 82, + 501, + 284 + ], + "blocks": [ + { + "bbox": [ + 309, + 82, + 501, + 284 + ], + "lines": [ + { + "bbox": [ + 309, + 82, + 501, + 284 + ], + "spans": [ + { + "bbox": [ + 309, + 82, + 501, + 284 + ], + "type": "image", + "image_path": "4f5b223dfc3d6aff5adb4963b5f5094d8e3d2d008d7386ef0b2ac033d7d1942d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 111, + 286, + 301, + 488 + ], + "blocks": [ + { + "bbox": [ + 111, + 286, + 301, + 488 + ], + "lines": [ + { + "bbox": [ + 111, + 286, + 301, + 488 + ], + "spans": [ + { + "bbox": [ + 111, + 286, + 301, + 488 + ], + "type": "image", + "image_path": "cd734a886b73f3d4822b4bd31ee8e54ca4661a7fac6a569dfe5cc1d8681fbbd1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 186, + 503, + 423, + 515 + ], + "lines": [ + { + "bbox": [ + 186, + 503, + 423, + 515 + ], + "spans": [ + { + "bbox": [ + 186, + 503, + 423, + 515 + ], + "type": "text", + "content": "Figure 13: Additional comparisons with LDMs on AFHQ." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 287, + 501, + 488 + ], + "blocks": [ + { + "bbox": [ + 309, + 287, + 501, + 488 + ], + "lines": [ + { + "bbox": [ + 309, + 287, + 501, + 488 + ], + "spans": [ + { + "bbox": [ + 309, + 287, + 501, + 488 + ], + "type": "image", + "image_path": "6e100b42a0472267a4df7aa950b52b8d7c0dda9c722783999fb6aa35ba59a2e4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 149, + 540, + 462, + 613 + ], + "blocks": [ + { + "bbox": [ + 149, + 540, + 462, + 613 + ], + "lines": [ + { + "bbox": [ + 149, + 540, + 462, + 613 + ], + "spans": [ + { + "bbox": [ + 149, + 540, + 462, + 613 + ], + "type": "table", + "html": "
ModelsFID↓P↑R↑FID↓P↑R↑Speed
FFHQ256 × 256AFHQ256 × 256
DDIM11.40.710.5312.10.580.65×1.0
DDIM (1/2)13.00.700.5116.80.480.64×2.0
f-DM-DS (η = 0)12.60.760.555.80.760.55×2.1
", + "image_path": "b6536d8e3fb036380a7f534155dde454f36eabde66e579996f12b0d52d8269df.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 159, + 620, + 449, + 633 + ], + "lines": [ + { + "bbox": [ + 159, + 620, + 449, + 633 + ], + "spans": [ + { + "bbox": [ + 159, + 620, + 449, + 633 + ], + "type": "text", + "content": "Table 4: Comparison on FFHQ and AFHQ for DDIM sampling " + }, + { + "bbox": [ + 159, + 620, + 449, + 633 + ], + "type": "inline_equation", + "content": "\\left( {\\eta = 0}\\right)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 639, + 326, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 326, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 326, + 649 + ], + "type": "text", + "content": "E.2 V.S. TRANSFORMATION-SPECIFIC BASELINES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "We include more comparisons in Figure 12 and 13. From Figure 12, we compare the generation process of " + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "-DM and the cascaded DM. It is clear that " + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "-DM conducts coarse-to-fine generation in a more natural way, and the results will not suffer from error propagation. As shown in Figure 13, LDM outputs are easily affected by the chosen decoder. VQVAE decoder tends output blurry images; the output from VQGAN decoder has much finer details while remaining noticeable artifacts (e.g., eyes, furs). By contrast, " + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "-DM performs stably for both latent spaces." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 254, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 254, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 254, + 94 + ], + "type": "text", + "content": "E.3 CONDITIONAL GENERATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 148 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 148 + ], + "type": "text", + "content": "We include additional results of conditional generation, i.e., super-resolution (Figure 14) and deblurring (Figure 15). We also show the comparison with or without the proposed gradient-based initialization, which greatly improves the faithfulness of conditional generation when the input noise is high (e.g., " + }, + { + "bbox": [ + 104, + 102, + 506, + 148 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 104, + 102, + 506, + 148 + ], + "type": "text", + "content": " input)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 161, + 291, + 172 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 161, + 291, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 161, + 291, + 172 + ], + "type": "text", + "content": "E.4 ADDITIONAL QUALITATIVE RESULTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 181, + 504, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 181, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 504, + 205 + ], + "type": "text", + "content": "Finally, we provide additional qualitative results for our unconditional models for FFHQ (Figure 16), AFHQ (Figure 17), LSUN (Figure 18) and our class-conditional ImageNet model (Figure 19,20)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 220, + 301, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 301, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 301, + 232 + ], + "type": "text", + "content": "F LIMITATIONS AND FUTURE WORK" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "text", + "content": "Although " + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "text", + "content": "-DM enables diffusion with signal transformations, which greatly extends the scope of DMs to work in transformed space, there still exist limitations and opportunities for future work. First, it is an empirical question to find the optimal stage schedule for all transformations. Our ablation studies also show that different heuristics have differences for DS-based and VAE-based models. A metric that can automatically determine the best stage schedule based on the property of each transformation is needed and will be explored in the future. In addition, although the current method achieves faster inference when generating with transformations like down-sampling, the speed-up is not very significant as we still take the standard DDPM steps. How to further accelerate the inference process of DMs is a challenging and orthogonal direction. For example, it has great potential to combine " + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "text", + "content": "-DM with speed-up techniques such as knowledge distillation (Salimans & Ho, 2022). Moreover, no matter hand-designed or learned, all the transformations used in " + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "text", + "content": "-DM are still fixed when training DM. It is, however, different from typical VAEs, where both the encoder and decoder are jointly optimized during training. Therefore, starting from a random/imperfect transformation and training " + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 244, + 506, + 410 + ], + "type": "text", + "content": "-DM jointly with the transformations towards certain target objectives will be studied as future work." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 131, + 504, + 628 + ], + "blocks": [ + { + "bbox": [ + 108, + 131, + 504, + 628 + ], + "lines": [ + { + "bbox": [ + 108, + 131, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 108, + 131, + 504, + 628 + ], + "type": "image", + "image_path": "527fa641e31bbc8d987dcf040cb6e87fb22700061f4ece617935f253b53f179b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "lines": [ + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": "Figure 14: Additional examples of super-resolution (SR) with the unconditional " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": "-DM-DS trained on AFHQ. " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": " The same input image with various resolution " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "16^2" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "32^2" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "64^2" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "128^2" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": ". We sample 3 random seeds for each resolution input. We also show the difference with and without applying gradient-based initialization (Grad-Init) on " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": " SR results of various " + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "inline_equation", + "content": "16^2" + }, + { + "bbox": [ + 104, + 638, + 504, + 684 + ], + "type": "text", + "content": " inputs." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 131, + 500, + 625 + ], + "blocks": [ + { + "bbox": [ + 108, + 131, + 500, + 625 + ], + "lines": [ + { + "bbox": [ + 108, + 131, + 500, + 625 + ], + "spans": [ + { + "bbox": [ + 108, + 131, + 500, + 625 + ], + "type": "image", + "image_path": "b2a52f5d1274f1b3bbb4148ad6c1d86040a7f2c1de8bd4408a8d91ce3678ac38.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "lines": [ + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "text", + "content": "Figure 15: Additional examples of de-blurring with the unconditional " + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "text", + "content": "-DM-Blur-G trained on AFHQ. " + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "text", + "content": " The same input image with various Gaussian kernel sizes " + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "inline_equation", + "content": "\\sigma = 15,9,4,1.4" + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "text", + "content": ". We sample 3 random seeds for each resolution input. We also show the difference with and without applying gradient-based initialization (Grad-Init) on " + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 104, + 636, + 504, + 682 + ], + "type": "text", + "content": " Deblurred results of various blur images." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 129, + 502, + 228 + ], + "blocks": [ + { + "bbox": [ + 288, + 122, + 317, + 129 + ], + "lines": [ + { + "bbox": [ + 288, + 122, + 317, + 129 + ], + "spans": [ + { + "bbox": [ + 288, + 122, + 317, + 129 + ], + "type": "text", + "content": "f-DM-DS" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 129, + 502, + 228 + ], + "lines": [ + { + "bbox": [ + 109, + 129, + 502, + 228 + ], + "spans": [ + { + "bbox": [ + 109, + 129, + 502, + 228 + ], + "type": "image", + "image_path": "7b376d50f3fbb17417c81b86e41119a6d81d9b2c1cdd37842a01ea4d30fd1394.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 236, + 502, + 335 + ], + "blocks": [ + { + "bbox": [ + 282, + 229, + 323, + 236 + ], + "lines": [ + { + "bbox": [ + 282, + 229, + 323, + 236 + ], + "spans": [ + { + "bbox": [ + 282, + 229, + 323, + 236 + ], + "type": "text", + "content": "f-DM-Blur-U" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 236, + 502, + 335 + ], + "lines": [ + { + "bbox": [ + 109, + 236, + 502, + 335 + ], + "spans": [ + { + "bbox": [ + 109, + 236, + 502, + 335 + ], + "type": "image", + "image_path": "d9603c0ceb1995fe685cbe619608de12afadf938bf77468eef8f3119d0568786.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 281, + 336, + 323, + 342 + ], + "lines": [ + { + "bbox": [ + 281, + 336, + 323, + 342 + ], + "spans": [ + { + "bbox": [ + 281, + 336, + 323, + 342 + ], + "type": "text", + "content": "f-DM-Blur-G" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 109, + 343, + 502, + 442 + ], + "blocks": [ + { + "bbox": [ + 109, + 343, + 502, + 442 + ], + "lines": [ + { + "bbox": [ + 109, + 343, + 502, + 442 + ], + "spans": [ + { + "bbox": [ + 109, + 343, + 502, + 442 + ], + "type": "image", + "image_path": "bd601770522a8f005064bd2ef2f7d076ead1b3f13a0c881165fba34b074d3daa.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 450, + 502, + 548 + ], + "blocks": [ + { + "bbox": [ + 284, + 443, + 321, + 450 + ], + "lines": [ + { + "bbox": [ + 284, + 443, + 321, + 450 + ], + "spans": [ + { + "bbox": [ + 284, + 443, + 321, + 450 + ], + "type": "text", + "content": "f-DM-VQVAE" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 450, + 502, + 548 + ], + "lines": [ + { + "bbox": [ + 109, + 450, + 502, + 548 + ], + "spans": [ + { + "bbox": [ + 109, + 450, + 502, + 548 + ], + "type": "image", + "image_path": "baa4b41cfb27f40c4d0c44f604de40b5a0520d3d019d75be50efa7603692df7e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 109, + 556, + 502, + 656 + ], + "blocks": [ + { + "bbox": [ + 284, + 550, + 321, + 556 + ], + "lines": [ + { + "bbox": [ + 284, + 550, + 321, + 556 + ], + "spans": [ + { + "bbox": [ + 284, + 550, + 321, + 556 + ], + "type": "text", + "content": "f-DM-VQGAN" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 556, + 502, + 656 + ], + "lines": [ + { + "bbox": [ + 109, + 556, + 502, + 656 + ], + "spans": [ + { + "bbox": [ + 109, + 556, + 502, + 656 + ], + "type": "image", + "image_path": "98743f2baf134e47d814d66c0a0e89cc2e641289c6623c2a92ffd4a96da966ff.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "text", + "content": "Figure 16: Random samples generated by five " + }, + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "text", + "content": "-DMs trained on FFHQ " + }, + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "text", + "content": ". All faces presented are synthesized by the models, and are not real identities." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 134, + 502, + 233 + ], + "blocks": [ + { + "bbox": [ + 288, + 126, + 317, + 133 + ], + "lines": [ + { + "bbox": [ + 288, + 126, + 317, + 133 + ], + "spans": [ + { + "bbox": [ + 288, + 126, + 317, + 133 + ], + "type": "text", + "content": "f-DM-DS" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 134, + 502, + 233 + ], + "lines": [ + { + "bbox": [ + 108, + 134, + 502, + 233 + ], + "spans": [ + { + "bbox": [ + 108, + 134, + 502, + 233 + ], + "type": "image", + "image_path": "9abce0b45bb07fb3f7a2dd430cfb737e45e15ec896e5fb4d56569fd92c866168.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 241, + 502, + 340 + ], + "blocks": [ + { + "bbox": [ + 282, + 234, + 323, + 240 + ], + "lines": [ + { + "bbox": [ + 282, + 234, + 323, + 240 + ], + "spans": [ + { + "bbox": [ + 282, + 234, + 323, + 240 + ], + "type": "text", + "content": "f-DM-Blur-U" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 241, + 502, + 340 + ], + "lines": [ + { + "bbox": [ + 109, + 241, + 502, + 340 + ], + "spans": [ + { + "bbox": [ + 109, + 241, + 502, + 340 + ], + "type": "image", + "image_path": "785682623407e89f8acb2175e5b4445c45e38121f30a5a9cc43f1c22f883f724.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 109, + 348, + 502, + 447 + ], + "blocks": [ + { + "bbox": [ + 281, + 342, + 323, + 348 + ], + "lines": [ + { + "bbox": [ + 281, + 342, + 323, + 348 + ], + "spans": [ + { + "bbox": [ + 281, + 342, + 323, + 348 + ], + "type": "text", + "content": "f-DM-Blur-G" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 348, + 502, + 447 + ], + "lines": [ + { + "bbox": [ + 109, + 348, + 502, + 447 + ], + "spans": [ + { + "bbox": [ + 109, + 348, + 502, + 447 + ], + "type": "image", + "image_path": "8962e1725427272f00a91c85dc75d236f2af6440cb98a0f06f73bf4e8bec8d23.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 284, + 449, + 321, + 454 + ], + "lines": [ + { + "bbox": [ + 284, + 449, + 321, + 454 + ], + "spans": [ + { + "bbox": [ + 284, + 449, + 321, + 454 + ], + "type": "text", + "content": "f-DM-VQVAE" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 456, + 502, + 555 + ], + "blocks": [ + { + "bbox": [ + 109, + 456, + 502, + 555 + ], + "lines": [ + { + "bbox": [ + 109, + 456, + 502, + 555 + ], + "spans": [ + { + "bbox": [ + 109, + 456, + 502, + 555 + ], + "type": "image", + "image_path": "f14f00221059dde1432e1c6989addc27715d5a8ac19529cd8f21205e8393de6a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 284, + 555, + 321, + 561 + ], + "lines": [ + { + "bbox": [ + 284, + 555, + 321, + 561 + ], + "spans": [ + { + "bbox": [ + 284, + 555, + 321, + 561 + ], + "type": "text", + "content": "f-DM-VOGAN" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 109, + 562, + 502, + 661 + ], + "blocks": [ + { + "bbox": [ + 109, + 562, + 502, + 661 + ], + "lines": [ + { + "bbox": [ + 109, + 562, + 502, + 661 + ], + "spans": [ + { + "bbox": [ + 109, + 562, + 502, + 661 + ], + "type": "image", + "image_path": "82e10cb497232c26a1d260ad0e62184f671cdc0ad1cf3a8b0c3df48b5804fe9a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 673, + 474, + 686 + ], + "lines": [ + { + "bbox": [ + 135, + 673, + 474, + 686 + ], + "spans": [ + { + "bbox": [ + 135, + 673, + 474, + 686 + ], + "type": "text", + "content": "Figure 17: Random samples generated by five " + }, + { + "bbox": [ + 135, + 673, + 474, + 686 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 135, + 673, + 474, + 686 + ], + "type": "text", + "content": "-DMs trained on AFHQ " + }, + { + "bbox": [ + 135, + 673, + 474, + 686 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 135, + 673, + 474, + 686 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 118, + 502, + 219 + ], + "blocks": [ + { + "bbox": [ + 288, + 111, + 317, + 118 + ], + "lines": [ + { + "bbox": [ + 288, + 111, + 317, + 118 + ], + "spans": [ + { + "bbox": [ + 288, + 111, + 317, + 118 + ], + "type": "text", + "content": "f-DM-DS" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 118, + 502, + 219 + ], + "lines": [ + { + "bbox": [ + 109, + 118, + 502, + 219 + ], + "spans": [ + { + "bbox": [ + 109, + 118, + 502, + 219 + ], + "type": "image", + "image_path": "0f66f8c4f7a1bcbff94332b019b83b17d12a0ab7652d4adcfdd73b9c8f809010.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 219, + 323, + 225 + ], + "lines": [ + { + "bbox": [ + 282, + 219, + 323, + 225 + ], + "spans": [ + { + "bbox": [ + 282, + 219, + 323, + 225 + ], + "type": "text", + "content": "f-DM-Blur-U" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 226, + 502, + 326 + ], + "blocks": [ + { + "bbox": [ + 109, + 226, + 502, + 326 + ], + "lines": [ + { + "bbox": [ + 109, + 226, + 502, + 326 + ], + "spans": [ + { + "bbox": [ + 109, + 226, + 502, + 326 + ], + "type": "image", + "image_path": "3bcf687510122b709b0f355e99d0ad59f0ac4091374d535c3b0d891550cfdf67.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 326, + 320, + 332 + ], + "lines": [ + { + "bbox": [ + 282, + 326, + 320, + 332 + ], + "spans": [ + { + "bbox": [ + 282, + 326, + 320, + 332 + ], + "type": "text", + "content": "f-DM-VQVAE" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 109, + 332, + 502, + 432 + ], + "blocks": [ + { + "bbox": [ + 109, + 332, + 502, + 432 + ], + "lines": [ + { + "bbox": [ + 109, + 332, + 502, + 432 + ], + "spans": [ + { + "bbox": [ + 109, + 332, + 502, + 432 + ], + "type": "image", + "image_path": "a5579ed3acd07cf63f3b5b8d83417b7a41f077a1d1755be37e2899fd00cdfc20.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 471, + 502, + 571 + ], + "blocks": [ + { + "bbox": [ + 288, + 464, + 316, + 471 + ], + "lines": [ + { + "bbox": [ + 288, + 464, + 316, + 471 + ], + "spans": [ + { + "bbox": [ + 288, + 464, + 316, + 471 + ], + "type": "text", + "content": "f-DM-DS" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 109, + 471, + 502, + 571 + ], + "lines": [ + { + "bbox": [ + 109, + 471, + 502, + 571 + ], + "spans": [ + { + "bbox": [ + 109, + 471, + 502, + 571 + ], + "type": "image", + "image_path": "5bbabc7d878e38dd4f8b109d66fe8d0157d3d2def6febec595e6e1c7f572b1af.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 283, + 571, + 321, + 578 + ], + "lines": [ + { + "bbox": [ + 283, + 571, + 321, + 578 + ], + "spans": [ + { + "bbox": [ + 283, + 571, + 321, + 578 + ], + "type": "text", + "content": "f-DM-VQVAE" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 109, + 578, + 502, + 677 + ], + "blocks": [ + { + "bbox": [ + 109, + 578, + 502, + 677 + ], + "lines": [ + { + "bbox": [ + 109, + 578, + 502, + 677 + ], + "spans": [ + { + "bbox": [ + 109, + 578, + 502, + 677 + ], + "type": "image", + "image_path": "e303651ff57e3530709c7a08616817856763cb9feb7894dc571e383118b2d38f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 112, + 688, + 497, + 700 + ], + "lines": [ + { + "bbox": [ + 112, + 688, + 497, + 700 + ], + "spans": [ + { + "bbox": [ + 112, + 688, + 497, + 700 + ], + "type": "text", + "content": "Figure 18: Random samples generated by " + }, + { + "bbox": [ + 112, + 688, + 497, + 700 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 112, + 688, + 497, + 700 + ], + "type": "text", + "content": "-DMs trained on LSUN-Church & -Bed " + }, + { + "bbox": [ + 112, + 688, + 497, + 700 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 112, + 688, + 497, + 700 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 89, + 303, + 678 + ], + "blocks": [ + { + "bbox": [ + 111, + 89, + 303, + 678 + ], + "lines": [ + { + "bbox": [ + 111, + 89, + 303, + 678 + ], + "spans": [ + { + "bbox": [ + 111, + 89, + 303, + 678 + ], + "type": "image", + "image_path": "c89eb1799e80f0ae0c0eede98f8ca235e903e7a75bcb980a6e9721b9fee271c5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "Figure 19: Random samples generated by " + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "-DM-DS/VQVAE trained on ImageNet " + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": " with classifier-free guidance (" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "inline_equation", + "content": "s = 3" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "). Classes from top to bottom: red panda, robin, daisy, valley, trifle, comic book." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 89, + 498, + 677 + ], + "blocks": [ + { + "bbox": [ + 307, + 89, + 498, + 677 + ], + "lines": [ + { + "bbox": [ + 307, + 89, + 498, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 89, + 498, + 677 + ], + "type": "image", + "image_path": "96f5115d8f8bd7de6c1b5560a089fdfca6b53803c3d564c05f45d89e39dbacd9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 86, + 500, + 680 + ], + "blocks": [ + { + "bbox": [ + 110, + 86, + 500, + 680 + ], + "lines": [ + { + "bbox": [ + 110, + 86, + 500, + 680 + ], + "spans": [ + { + "bbox": [ + 110, + 86, + 500, + 680 + ], + "type": "image", + "image_path": "eee539c601ed5a3ee6c6e70115606539bd6f6f568c1f4ae543f1c097c0831fc3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "lines": [ + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "spans": [ + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "type": "text", + "content": "Figure 20: Random samples generated by " + }, + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "type": "text", + "content": "-DM-DS/VQVAE trained on ImageNet " + }, + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "type": "text", + "content": " with classifier-free guidance (" + }, + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "type": "inline_equation", + "content": "s = 3" + }, + { + "bbox": [ + 104, + 691, + 504, + 726 + ], + "type": "text", + "content": "). Classes from top to bottom: school bus, pizza, seashore, photocopier, golden retriever, axolotl." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_content_list.json b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3bdf98ae35a7964ea81a9e3779612032a50c38c3 --- /dev/null +++ b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_content_list.json @@ -0,0 +1,5211 @@ +[ + { + "type": "text", + "text": "KNN-DIFFUSION: IMAGE GENERATION VIA LARGE-SCALE RETRIEVAL", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 146 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shelly Sheynin*, Oron Ashual*, Adam Polyak, Uriel Singer, Oran Gafni, Eliya Nachmani, Yaniv Taigman", + "bbox": [ + 179, + 169, + 694, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Equal Contribution Meta AI", + "bbox": [ + 183, + 199, + 509, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{shellysheynin, oron}@meta.com", + "bbox": [ + 184, + 213, + 442, + 226 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e21cff83cf03a421c25a92a059e186e9b355f0366a8c027f745f30303b7713f6.jpg", + "image_caption": [ + "Figure 1: (a) Samples of stickers generated from text inputs, (b) Semantic text-guided manipulations applied to the \"Original\" image without using edit masks." + ], + "image_footnote": [], + "bbox": [ + 178, + 257, + 818, + 455 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 503, + 545, + 518 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent text-to-image models have achieved impressive results. However, since they require large-scale datasets of text-image pairs, it is impractical to train them on new domains where data is scarce or not labeled. In this work, we propose using large-scale retrieval methods, in particular, efficient $k$ -Nearest-Neighbors (kNN), which offers novel capabilities: (1) training a substantially small and efficient text-to-image diffusion model using only pre-trained multi-modal embeddings, but without an explicit text-image dataset, (2) generating out-of-distribution images by simply swapping the retrieval database at inference time, and (3) performing text-driven local semantic manipulations while preserving object identity. To demonstrate the robustness of our method, we apply our kNN approach on two state-of-the-art diffusion backbones, and show results on several different datasets. As evaluated by human studies and automatic metrics, our method achieves state-of-the-art results compared to existing approaches that train text-to-image generation models using images-only dataset.", + "bbox": [ + 228, + 534, + 767, + 729 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 753, + 336, + 768 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large-scale generative models have been applied successfully to image generation tasks (Gafni et al., 2022; Ramesh et al., 2021; Nichol et al., 2021; Sahara et al., 2022; Yu et al., 2022), and have shown outstanding capabilities in extending human creativity using editing and user control. However, these models face several significant challenges: (i) Large-scale paired data requirement. To achieve high-quality results, text-to-image models rely heavily on large-scale datasets of (text, image) pairs collected from the internet. Due to the requirement of paired data, these models cannot be applied to new or customized domains with only unannotated images. (ii) Computational cost and efficiency. Training these models on highly complex distributions of natural images usually requires scaling the size of the model, data, batch-size, and training time, which makes them challenging to train and less accessible to the community. Recently, several works proposed text-to-image models", + "bbox": [ + 169, + 784, + 826, + 924 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "trained without an explicit paired text-image datasets. Liu et al. (2021) performed a direct optimization to a pre-trained model based on a CLIP loss (Radford et al., 2021). Such approaches are time-consuming, since they require optimization for each input. Zhou et al. (2021) proposed training with CLIP image embedding perturbed with Gaussian noise. However, to achieve high-quality results, an additional model needs to be trained with an annotated text-image pairs dataset.", + "bbox": [ + 169, + 103, + 823, + 176 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we introduce a novel generative model, kNN-Diffusion, which tackles these issues and progresses towards more accessible models for the research community and other users. Our model leverages a large-scale retrieval method, $k$ -Nearest-Neighbors (kNN) search, in order to train the model without an explicit text-image dataset. Specifically, our diffusion model is conditioned on two inputs: (1) image embedding (at training time) or text embedding (at inference), extracted using pre-trained CLIP encoder, and (2) kNN embeddings, representing the $k$ most similar images in the CLIP latent space. During training, we assume that no paired text is available, hence condition only on CLIP image embedding and on $k$ additional image embeddings, selected using the retrieval model. At inference, only text inputs are given, so instead of image embeddings, we use the text embedding that shares a joint embedding space with the image embeddings. Here, the kNN image embeddings are retrieved using the text embeddings.", + "bbox": [ + 169, + 180, + 826, + 335 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The additional kNN embeddings have three main benefits: (1) they extend the distribution of conditioning embeddings and ensure the distribution is similar in train and inference, thus helping to bridge the gap between the image and text embedding distributions (see Fig. 5); (2) they teach the model to learn to generate images from a target distribution by using samples from that distribution. This allows generalizing to different distributions at test time and generating out-of-distribution samples; (3) they hold information that does not need to be present in the model, which allows it to be substantially smaller. We demonstrate the effectiveness of our kNN approach in Sec. 4.", + "bbox": [ + 169, + 339, + 826, + 440 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To assess the performance of our method, we train our model on two large-scale datasets: the Public Multimodal Dataset (Singh et al., 2021) and an image-only stickers dataset collected from the Internet. We show state-of-the-art zero-shot results on MS-COCO (Lin et al., 2014), LN-COCO (Pont-Tuset et al., 2020) and CUB (Wah et al., 2011). To further demonstrate the advantage of retrieval methods in text-to-image generation, we train two diffusion backbones using our kNN approach: continuous (Ramesh et al., 2022) and discrete (Gu et al., 2021). In both cases we outperform the model trained without kNN. In comparison to alternative methods presented in Sec. 4, we achieve state-of-the-art results in both human evaluations and FID score, with only 400 million parameters and 7 seconds inference time.", + "bbox": [ + 169, + 444, + 826, + 570 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Lastly, we introduce a new approach for local and semantic manipulations that is based on CLIP and kNN, without relying on user-provided masks. Specifically, we fine-tune our model to perform local and complex modifications that satisfies a given target text prompt. For example, given the teddy bear's image in Fig. 4, and the target text \"holds a heart\", our method automatically locates the local region that should be modified and synthesizes a high-resolution manipulated image in which (1) the teddy bear's identity is accurately preserved and (2) the manipulation is aligned with the target text. We demonstrate our qualitative advantage by comparing our results with two state-of-the-art models, Text2Live (Bar-Tal et al., 2022) and Textual Inversion (Gal et al., 2022), that perform image manipulations without masks (Fig. 4, 21 and 22).", + "bbox": [ + 169, + 575, + 826, + 705 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We summarize the contributions of this paper as follows: (1) We propose kNN-Diffusion, a novel and efficient model that utilizes a large-scale retrieval method for training a text-to-image model with only pre-trained multi-modal embeddings, but without an explicit text-image dataset. (2) We demonstrate efficient out-of-distribution generation, which is achieved by substituting retrieval databases. (3) We present a new approach for local and semantic image manipulation, without utilizing masks. (4) We evaluate our method on two diffusion backbones, discrete and continuous, as well as on several datasets, and present state-of-the-art results compared to baselines.", + "bbox": [ + 169, + 708, + 826, + 809 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 825, + 346, + 842 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Text-to-image models. Text-to-image generation is a well-studied task that focuses on generating images from text descriptions. While GANs (Xu et al., 2018; Zhu et al., 2019; Zhang et al., 2021) and Transformer-based methods (Ramesh et al., 2021; Gafni et al., 2022; Yu et al., 2022; Ding et al., 2021) have shown remarkable results, recently impressive results have been attained with dis", + "bbox": [ + 169, + 862, + 823, + 920 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e97768b6a12cbc1787d5797a2c65bdea7f16be9729671609455963914649f9bc.jpg", + "image_caption": [ + "Figure 2: Qualitative comparisons with baselines. Nearest Neighbor is the first kNN of the text in PMD dataset." + ], + "image_footnote": [], + "bbox": [ + 197, + 101, + 802, + 391 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "crete (Gu et al., 2021) and continuous (Nichol et al., 2021; Sahara et al., 2022; Ramesh et al., 2022; Rombach et al., 2022) diffusion models. Most recent works trained diffusion models conditioned on text embeddings extracted using a pre-trained text encoder (Saharia et al., 2022; Yu et al., 2022) or image embedding extracted using CLIP (Ramesh et al., 2022). While producing impressive results, all previous works described above are supervised and trained with paired text-image datasets. Several works have proposed training text-to-image models without an explicit text-image dataset. FuseDream (Liu et al., 2021) proposed a direct optimization to a pre-trained generative model based on CLIP loss. This method relies on a pre-trained GAN and requires a time-consuming optimization process for each image. LAFITE (Zhou et al., 2021) recently demonstrated text-to-image generation results without requiring paired text-image datasets. Here, the CLIP embeddings are used interchangeably at train and test to condition a GAN-based model. The joint text-image embedding enables inference given a text input, whereas in training the model is fed with the visual embedding only. However, the gap between the text and image distributions in the joint embeddings space leads to results with substantially lower quality, as we show in our experiments. To overcome this gap, LAFITE added noise to the image embeddings during training. Our remedy to this gap is to condition the model on the retrieval of an actual image embeddings, using a text-image joint space.", + "bbox": [ + 169, + 422, + 823, + 647 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Retrieval for generation. The Information Retrieval (IR) literature tackles the challenge of retrieving a small amount of information from a large database, given a user's query. A simple, yet efficient retrieval mechanism is to retrieve the $K$ nearest neighbors (kNN) between the query and the entities in the database in some pre-calculated embedding space (Bijalwan et al., 2014). The database allows the model to leverage extensive world-knowledge for its specific task Borgeaud et al. (2021). Recently, language models were augmented with a memory component, allowing them to store representations of past inputs (Wu et al., 2022). The latter were then queried using a lookup operation, improving performance in various benchmarks and tasks. Retrieval models have been used for various tasks in learning problems, for example, language modeling (Borgeaud et al., 2021), machine translation (Gu et al., 2018), question answering (Lee et al., 2019) and image generation (Tseng et al., 2020; Qi et al., 2018). RetrieveGAN (Tseng et al., 2020) uses a differentiable retrieval module for image generation from a scene description, RetrieveFuse (Siddiqui et al., 2021) proposed a neural 3D scene reconstruction based on a retrieval system. SIMS (Qi et al., 2018) proposed generating an image using semantic layout and compatible image segments that are retrieved from image segments database, and (Iskakov, 2018) showed that the use of retrieval database in inpainting task significantly boosts visual quality. In this work we utilize the kNN retrieval mechanism over the shared text-image embedding space, CLIP (Radford et al., 2021). Using extensive ablation studies, we show the importance of the retrieval model both for training and inference, and demonstrate its large impact on performance. kNN-Diffusion significantly outperforms prior work", + "bbox": [ + 169, + 655, + 826, + 921 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7c67ea86007bb55c12b7c5449835616eb75f13a14aec0d995c8c2d882151d3de.jpg", + "image_caption": [ + "Figure 3: The overall framework of our kNN-Diffusion model. In both training and inference, the decoder is conditioned on CLIP embedding, and kNN image embeddings. During training, we condition the model on image CLIP embedding, and its kNN image embeddings extracted using the retrieval method. At inference time, given an input text, the kNN image embeddings are retrieved based on the CLIP text embedding that shares a joint embedding space with the image embedding." + ], + "image_footnote": [], + "bbox": [ + 207, + 103, + 790, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "with zero-shot FID of 12.5, including RDM (Blattmann et al., 2022)(with FID of 22.1), a concurrent work which similarly to our approach, proposes conditioning LDM (Rombach et al., 2022) on kNN.", + "bbox": [ + 169, + 364, + 823, + 395 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multi-modal feature learning. Learning a joint and aligned feature space for several modalities is challenging, as it requires alignment between the modalities (paired datasets), whose distributions may vary. Specifically, the joint feature space of vision-and-language has been a long-standing problem. CLIP (Radford et al., 2021) successfully tackled this by leveraging contrastive learning over a large dataset of text-image pairs. BLIP (Li et al., 2022), (Mu et al., 2021) and FLAVA (Singh et al., 2021), followed this idea and further improved the joint representation. The joint representation was shown to hold a strong semantic alignment between the two modalities, enabling image generation (Liu et al., 2021; Wang et al., 2022), image manipulation (Patashnik et al., 2021; Avrahami et al., 2022b), and image captioning (Mokady et al., 2021). In this work we leverage the joint representation in two ways: (i) enabling textless training with only visual data, while using text at inference time, and (ii) creating an efficient embedding space for the use of the retrieval model.", + "bbox": [ + 169, + 404, + 826, + 556 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 METHOD", + "text_level": 1, + "bbox": [ + 171, + 577, + 284, + 592 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our main goal is to facilitate language-guided generation of user-specified concepts while using an images-only dataset during training. A possible way to achieve this goal is to use a shared text-image encoder that will map text-image pairs into the same latent space, thus allowing training with an image embedding, and inferring from text embedding. A candidate for this encoder is CLIP, which has been trained with a contrastive loss on a large-scale dataset of text-image pairs. However, as we show quantitatively in Tab. 1, 2 and qualitatively in Fig. 15, 16, 5, CLIP embeddings alone cannot accurately bridge the gap between the text and image distributions. In order to reduce this gap, several methods have been proposed. The closest work to ours is LAFITE, which perturbs the CLIP image embedding with adaptive Gaussian noise. Under the assumption that there is a large paired text-image dataset, Ramesh et al. (2022) have proposed a prior that is used during inference, and is trained to generate possible CLIP image embeddings from a given text caption. In this regard, we propose using a large-scale and non-trainable image embedding index as an integral part of the diffusion process. Our method, kNN-Diffusion, assumes that only image data and a pre-trained multi-modal text-image encoder are provided during training. As shown in Fig. 3, our model is comprised of three main components: (1) A multi-modal text-image encoder (CLIP); (2) A retrieval model - A data structure containing image embeddings, which is indexed for a fast kNN search; (3) An image generation network - A trainable diffusion-based image generation model, conditioned on the projected retrievals. For both training and inference, the image generation network is conditioned on $K$ additional image embeddings, chosen using the retrieval model to ensure a similar distribution of the condition in training and inference. The following sections describe these components.", + "bbox": [ + 169, + 607, + 823, + 886 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Retrieval model. Our retrieval model has three non-trainable modules: a pre-trained text encoder $f_{txt}$ (CLIP text encoder), a pre-trained image encoder $f_{img}$ (CLIP image encoder) and", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/579bbd4893194751e1ab3f8742480c70f4607a92cf444948529fb6d5d0f104c0.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 207, + 116, + 254, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4b8e94270980c653d26e27432d1e6cd3aa4206dafbdb0dbfd35b081947be7cb9.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 207, + 191, + 256, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2c17f90051c736103d1823061d8c78f14b3d279f254be24f3895718b71cc8801.jpg", + "image_caption": [ + "Original", + "Figure 4: Results for text-guided image manipulations without using masks. The original image is shown in the left column, our manipulated images are shown in the center. The images of Bar-Tal et al. (2022); Gal et al. (2022) were generated using the authors' official code. The full comparison is available in the supplement." + ], + "image_footnote": [], + "bbox": [ + 205, + 268, + 259, + 316 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6dfcbf0f062811cca6b907aa14f53554b965acab7fa4f35009db77e0751175fc.jpg", + "image_caption": [ + "Joker" + ], + "image_footnote": [], + "bbox": [ + 277, + 117, + 325, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e6dad06b6f2da77a17e939e95ea4fb50b7482acc0d60cfeb59145b89810d1248.jpg", + "image_caption": [ + "Black shirt" + ], + "image_footnote": [], + "bbox": [ + 277, + 191, + 328, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a3619c5eec9d2b40f7bce5719ac0edc5bff61af800e28d441b9747b93ac2d30c.jpg", + "image_caption": [ + "With a hat" + ], + "image_footnote": [], + "bbox": [ + 277, + 270, + 330, + 315 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d2f607fc0ed802f1effbcd6a458785f54be0be4cc29e60bc1d72ebadb7d36d02.jpg", + "image_caption": [ + "Boxer" + ], + "image_footnote": [], + "bbox": [ + 339, + 117, + 385, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0366ab1e3934b7293362b2056ea6234a046c4f084daee3f72b70da4b6f66152e.jpg", + "image_caption": [ + "Blue pants" + ], + "image_footnote": [], + "bbox": [ + 339, + 191, + 385, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e6d5c21e59f3b2ceba83fdc167c1806d0922f4c475f1079e3931ae6e76205c38.jpg", + "image_caption": [ + "Sad" + ], + "image_footnote": [], + "bbox": [ + 336, + 270, + 387, + 315 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1d35403a8c3a35b5d1bd43a6b7b519f7975a69fd342a0dfaef10a2f2a10f3cf1.jpg", + "image_caption": [ + "Angel" + ], + "image_footnote": [], + "bbox": [ + 400, + 117, + 455, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b4c381a0a363ceab3d84d929b005e071c8095d92309b123fdb4034e42f8f5ac1.jpg", + "image_caption": [ + "Holds a heart" + ], + "image_footnote": [], + "bbox": [ + 401, + 191, + 452, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5f9f3fb28c6639fbe0733c63384facb4fa022b553b119d78bdb7d4dff68bca29.jpg", + "image_caption": [ + "Surprised" + ], + "image_footnote": [], + "bbox": [ + 401, + 270, + 455, + 315 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/20f0b18d238b59ac3d43e7dacb8e1cd303672db8e01d1246a6da2462597f9f2c.jpg", + "image_caption": [ + "Durs", + "Rainbow" + ], + "image_footnote": [], + "bbox": [ + 467, + 117, + 504, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e964c7fcd2ab10dc812b3ef695a0404442e4b4b328977d623311f8a173891a8d.jpg", + "image_caption": [ + "Sitting" + ], + "image_footnote": [], + "bbox": [ + 460, + 191, + 506, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2686350b11e0a56bfc7b6344f08c6bcb72c4e155d30550175a32402aa9a5cfc5.jpg", + "image_caption": [ + "Angry" + ], + "image_footnote": [], + "bbox": [ + 460, + 270, + 511, + 315 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/633cb565ffaab13da90cf41ddd7e361e5e34703e148d139b1230dbc388dae9a8.jpg", + "image_caption": [ + "Devil" + ], + "image_footnote": [], + "bbox": [ + 519, + 117, + 573, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/48448da441532e5798cd7725fb5f32ebde3cef9ae794ae531001e7d4a26006d4.jpg", + "image_caption": [ + "With a tie" + ], + "image_footnote": [], + "bbox": [ + 521, + 191, + 568, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e9cf1c391efacf186a0c2bb30f9a62dd0a49e6fc40f96395ed3c42be55576e69.jpg", + "image_caption": [ + "Holds a heart" + ], + "image_footnote": [], + "bbox": [ + 519, + 270, + 573, + 315 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1b3ae60b2f4ccdaefc9dbd9a77ad8dac6527dfb342539422c0d5ba7e8aebb439.jpg", + "image_caption": [ + "Ghost" + ], + "image_footnote": [], + "bbox": [ + 581, + 117, + 625, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/ad82962b578117c95c04f7904453c8eca887582ec510e9a97362bdf438b67189.jpg", + "image_caption": [ + "Raising left hand" + ], + "image_footnote": [], + "bbox": [ + 581, + 191, + 630, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8fb18b64e01e6c8a971e4fe8d8a72b2d6977d50c03e507bb78efa5b64a3d3e82.jpg", + "image_caption": [ + "With a tie" + ], + "image_footnote": [], + "bbox": [ + 580, + 270, + 633, + 315 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2b556d99987c7f3687c1e67e0fe618451a4d24b533ac155b62cac81d4ff2bda9.jpg", + "image_caption": [ + "Text2LIVE", + "Ghost", + "Textual Inversion" + ], + "image_footnote": [], + "bbox": [ + 642, + 117, + 718, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/81df2f5c024d732fa003ce32c48891881e25dac992dc511c46196b07673a1f61.jpg", + "image_caption": [ + "Raising left hand" + ], + "image_footnote": [], + "bbox": [ + 642, + 191, + 709, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/46d26f01a10bd02a596093168ec69d23b6bda6925261a129fa86e28edfee4003.jpg", + "image_caption": [ + "With a tie" + ], + "image_footnote": [], + "bbox": [ + 647, + 268, + 710, + 315 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4189ebd5211eee89c42f9d0cd9ff08baf0cba0c6d9dec68d1adcfb9f7e073e02.jpg", + "image_caption": [ + "Ghost" + ], + "image_footnote": [], + "bbox": [ + 736, + 117, + 777, + 181 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4fa44d3956e5ffc14fde983bb79ac0b8b0f6db0cb2f8b06091b762a26414dab5.jpg", + "image_caption": [ + "Raising left hand" + ], + "image_footnote": [], + "bbox": [ + 732, + 191, + 777, + 255 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/12af7563b8668dd53db8e9e8d932d3be49014ef20ca441f225bb62d1ac4e670a.jpg", + "image_caption": [ + "With a tie" + ], + "image_footnote": [], + "bbox": [ + 723, + 270, + 789, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "an index $\\mathcal{H}$ . The encoders map text descriptions and image samples to a joint multi-modal $d$ -dimensional feature space $\\mathbb{R}^d$ . The index stores an efficient representation of the images database - $\\mathcal{H} := \\{f_{img}(i) \\in \\mathbb{R}^d | i \\in \\mathcal{I}\\}$ where $\\mathcal{I}$ denotes the dataset of images. During training, we use the index to efficiently extract the $k$ nearest neighbors in the feature space of the image embedding $f_{img}(\\mathbf{I}) \\in \\mathbb{R}^d - \\mathrm{knn}_{img}(\\mathbf{I}, k) := \\arg \\min_{h \\in \\mathcal{H}}^k \\mathbf{s}(f_{img}(\\mathbf{I}), h)$ where $\\mathbf{s}$ is a distance function and $\\arg \\min_k^k$ output the minimal $k$ elements. The set $\\{f_{img}(\\mathbf{I}), \\mathrm{knn}_{img}(\\mathbf{I}, k)\\}$ is used as the condition to the generative model. During inference, given a query text $t$ , an embedding $f_{txt}(t)$ is extracted. The generative model is conditioned on this embedding and its $k$ nearest neighbors from the database - $\\mathrm{knn}_{txt}(t, k) := \\arg \\min_{h \\in \\mathcal{H}}^k \\mathbf{s}(f_{txt}(t), h)$ . During training, we add embeddings of real images, by applying the retrieval method to the input image embedding. The extracted kNN should have a large enough distribution to cover the potential text embedding. During inference, the kNN are retrieved using the text embedding (See Fig. 17). In all of our experiments we use the cosine similarity metric as the distance function $\\mathbf{s}$ , $k = 10$ for the number of nearest neighbors and $d = 512$ . The full implementation details can be found in Sec. 6.6 in the supplement.", + "bbox": [ + 169, + 397, + 823, + 598 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Image generation network. In order to demonstrate the robustness of our method, we apply our kNN approach on two different diffusion backbones: Discrete (Gu et al., 2021) and Continuous (Nichol et al., 2021; Sohl-Dickstein et al., 2015; Ho et al., 2020; Dhariwal & Nichol, 2021). Although very different in practice, these models share the same theoretical idea. Let $x_0 \\sim q(x_0)$ be a sample from our images distribution. A forward diffusion process is a Markov chain that adds noise at each step $q(x_n|x_{n-1})$ . The reverse process, $p_\\theta(x_{n-1}|x_n,x_0)$ , is a denoising process that removes noise from an initialized noise state. At inference time, the model can generate an output, starting with noise and gradually removing it using $p_\\theta$ . For additional background on diffusion models please refer to Sec. 6.1 in the supplement.", + "bbox": [ + 169, + 608, + 823, + 733 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the discrete diffusion model, $q(x_{n}|x_{n - 1})\\coloneqq v^{T}(x_{n})\\mathbf{Q}_{n}v(x_{n - 1})$ where $v(x_{n})$ is a one-hot vector with entry 1 at $x_{n}$ , and $\\mathbf{Q}_n$ is a transition matrix, modeling the probability to move from state $x_{n - 1}$ to $x_{n}$ , using uniform probability over the vocabulary and a pre-defined probability for additional special [MASK] token. We can compute the reverse transition distribution according to: $p_{\\theta}(x_{n - 1}|x_n,y)\\coloneqq \\sum_{\\hat{x}_0 = 1}^k q(x_{n - 1}|x_n,\\hat{x_0})p_\\theta (\\hat{x_0} |x_n,x_0,y)$ where $x_0$ is a discrete vector, tokenized by the VQGAN (Esser et al., 2021) encoder and $y$ is the conditioning signal. For modeling $p_{\\theta}$ we have followed (Gu et al., 2021) and used a conditional Transformer (Vaswani et al., 2017).", + "bbox": [ + 169, + 733, + 823, + 832 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the continuous diffusion model, $q(x_{n}|x_{n - 1}) \\coloneqq \\mathcal{N}(x_{n};\\sqrt{\\alpha_{t}} x_{n - 1},(1 - \\alpha_{n})x_{0})$ and $p_{\\theta}(x_{n - 1}|x_n,y)\\coloneqq \\mathcal{N}(\\mu_\\theta (x_n,y),\\Sigma_\\theta (x_n,y))$ . Here, the noise function is Gaussian noise. Following (Ho et al., 2020; Nichol et al., 2021) we trained a model $\\epsilon_{\\theta}$ to predict the added noise using a standard mean-squared error loss: $L\\coloneqq E_{n\\sim [1,N],x_0\\sim q(x_0),\\epsilon \\sim \\mathcal{N}(0,\\mathbf{I})}[||\\epsilon -\\epsilon_{\\theta}(x_n,n,y)||^2]$ where $\\epsilon_{\\theta}$ is a U-net model and $y$ is the conditioning signal.", + "bbox": [ + 169, + 832, + 823, + 902 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a7f78150df57b233a4eb57c80d2816747d4dba1175c0d61e3cf3915e7d74205c.jpg", + "image_caption": [ + "Figure 5: tSNE visualization of 500 random text-image CLIP embeddings pairs taken from COCO validation. The leftmost figure demonstrates the gap between the text and image distributions. By gradually adding kNN to the mean CLIP embedding of the text, the gap decreases, demonstrating the importance of the kNN." + ], + "image_footnote": [], + "bbox": [ + 196, + 104, + 803, + 213 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/34aecec5513151a693e5c6b12d64e94c9f1fff8d249b85ed5ff95a8b288204be.jpg", + "image_caption": [ + "Figure 6: FID on MS-COCO, including models trained on image-only datasets and text-image datasets." + ], + "image_footnote": [], + "bbox": [ + 173, + 297, + 460, + 417 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/ccecfb952f4c10e303941ce43ccaed825d7b23f09dafb95a3faf5b685a00c8ca.jpg", + "table_caption": [ + "Table 1: Results for zero-shot Text-to-Image generation on the MS-COCO, CUB and LN-COCO test sets. Image-quality and Text-alignment report the percentage of majority human raters votes in favor of our method when comparing between a certain model and ours." + ], + "table_footnote": [], + "table_body": "
ModelMS-COCOCUBLN-COCO
FID↓Im. qual.Txt align.FID↓Im. qual.Txt align.FID↓Im. qual.Txt align.
LAFITE26.972.165.389.774.059.642.868.461.9
FuseDream21.264.079.350.279.160.937.571.159.0
no-kNN32.870.868.395.181.061.265.061.459.8
Ours12.5--42.9--35.6--
", + "bbox": [ + 483, + 354, + 838, + 455 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In both cases, we condition our model on $y = (f_{img}(x_0), \\mathrm{knn}_{img}(x_0, k))$ where $f_{img}(x_0)$ is the CLIP image embedding, $\\mathrm{knn}_{img}(x_0, k)$ is the $k$ nearest neighbors in the feature space of the image embedding. Following (Ramesh et al., 2022; Rombach et al., 2022) conditional injection, we condition our model on the image CLIP embedding, and the kNN clip embeddings by applying cross attention in the attention layers of the architecture. We sample both our models using Classifier Free Guidance (CFG) (Nichol et al., 2021; Ho & Salimans, 2021). Since CFG was originally proposed for continuous models, we propose a method for using it with discrete models as well. Full implementation details of the discrete and continuous models can be found in Sec. 6.7 and Sec. 6.8, respectively, in the supplement.", + "bbox": [ + 169, + 488, + 823, + 612 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 TEXT-ONLY IMAGE MANIPULATION", + "text_level": 1, + "bbox": [ + 171, + 631, + 459, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The majority of previous works in the task of image manipulation either rely on user-provided masks (Nichol et al., 2021; Avrahami et al., 2022b;a), or are limited to global editing (Crowson et al., 2022; Kim et al., 2022). Recently, several works (Bar-Tal et al., 2022; Hertz et al., 2022; Gal et al., 2022) have made progress with local manipulations without relying on user edited masks. Nevertheless, most of the techniques suffer from several shortcomings: (1) They enable local texture changes, yet cannot modify complex structures, (2) they struggle to preserve the identity of the object, for example, when manipulating humans, (3) they require optimization for each input.", + "bbox": [ + 169, + 652, + 823, + 751 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We address these issues by extending kNN-Diffusion to perform local and semantic-aware image manipulations without any provided mask. Illustration of the approach is provided in Fig. 18 and Fig. 19 in the supplement. For this task, the model is trained to predict the original image from a manipulated version. Specifically, we create a manipulated version of the image, which differs from the original image only in some local area. Given a random local area $M$ in the image I, the manipulated image $\\mathrm{I}_{\\text{manip}}$ is constructed by replacing the area with the corresponding nearest neighbor: $\\mathrm{I}_{\\text{manip}} = \\mathrm{I} \\cdot (1 - M) + \\mathrm{nn}_{img}(\\mathrm{I}, 1) \\cdot M$ , where $\\mathrm{nn}_{img}(\\mathrm{I}, 1)$ is the the nearest neighbor obtained after aligning it with I using the ECC alignment algorithm (Evangelidis & Psarakis, 2008). The model then receives as input the manipulated image, together with the CLIP embedding of the original image only in the local area: $f_{img}(\\mathrm{I} \\cdot M)$ . This CLIP embedding represents the required modification that should be applied to the manipulated image in order to predict the original image. During inference, instead of using the CLIP embedding of the local area, the desired modification is", + "bbox": [ + 169, + 756, + 826, + 924 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/fea0278a9b53250e31495932e71823f83bfff0c4ff20bb67792c0d0453fb0fe2.jpg", + "table_caption": [ + "Table 2: Results on the stickers dataset. We report the percentage of human raters prefer our method over the baselines with respect to image quality and text alignment. Discrete no-kNN refers to VQ-diffusion, and Continuous no-kNN, to DALL-E2 decoder, both trained without an explicit text-image dataset." + ], + "table_footnote": [], + "table_body": "
ModelFID↓Ours DiscreteOurs Continuous
Image qualityText alignmentImage qualityText alignment
DALL-E2+ClipCap55.571.669.267.068.3
LAFITE58.763.559.976.071.2
no-kNN52.772.167.666.869.4
Ours40.8----
", + "bbox": [ + 217, + 142, + 781, + 234 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "represented using the CLIP embedding of the user text query. We modified the model to be capable of receiving as a condition both the manipulated image and the CLIP embedding of the local area.", + "bbox": [ + 169, + 250, + 823, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 297, + 328, + 313 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "First, we conduct qualitative and quantitative comparisons on MS-COCO, LN-COCO and CUB datasets. To further demonstrate the advantage of our method, we provide comparison on an image-only stickers dataset, where we apply our approach on two diffusion backbones. Next, we demonstrate image manipulation and out-of-distribution capabilities. Finally, to better assess the effect of each contribution, an ablation study is provided.", + "bbox": [ + 169, + 321, + 826, + 393 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Datasets and Metrics. For photo-realistic experiments, our model was trained only on the images (omitting the text) of a modified version of the Public Multimodal Dataset (PMD) used by FLAVA (Singh et al., 2021). More information about the dataset is available in Sec. 6.4 of the supplement. To further demonstrate the capabilities of our method, we collected 400 million sticker images from the web, containing combinations of concepts such as objects, characters/avatars and text. The collected stickers do not have paired text, and are substantially different from photorealistic data. Furthermore, since they have no paired text, they were not part of CLIP's training data, which makes the text-to-image generation task more challenging.", + "bbox": [ + 169, + 398, + 823, + 508 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation metrics are based on objective and subjective metrics: (i) FID (Heusel et al., 2017) is an objective metric used to assess the quality of synthesized images, (ii) human evaluation - we ask human raters for their preference, comparing two methods based on image quality and text alignment. We used 600 image pairs; five raters rated each pair. The results are shown as a percentage of majority votes in favor of our method over the baselines. We report the full human evaluation protocol in the supplement. We chose to omit Inception-Score, since it is shown by Barratt & Sharma (2018) to be a misleading metric for models that were not trained on Imagenet.", + "bbox": [ + 169, + 510, + 826, + 608 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 QUALITATIVE AND QUANTITATIVE RESULTS", + "text_level": 1, + "bbox": [ + 171, + 625, + 522, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We begin by comparing our model, trained on the PMD dataset, with the previous works LAFITE and FuseDream, that trained on image-only datasets. To demonstrate the advantage of using a retrieval method in text-to-image generation, we trained a model variant, no-kNN. This baseline was trained solely on image embeddings (omitting the kNN), while during inference, the images were generated using the text embedding. Tab. 1 displays zero-shot results on three different datasets: MS-COCO, CUB and LN-COCO. We follow the evaluation protocol of LAFITE, reporting our results on 30,000 images from MS-COCO validation set without training, nor using it's training partition in the kNN index. Similarly, we follow LAFITE for CUB and LN-COCO evaluation. As can be seen, our model achieves the lowest FID score in all scenarios. In addition, human evaluations rate our method as better aligned to text and with the highest images quality. In Fig. 2, 15 and 11 we present a qualitative comparison between the methods. One can observe that while the simple retrieval baseline outputs non-generated images with high-quality, the images generated by our method are more faithful to the input text. To further demonstrate the effectiveness of our method, we present in Fig. 6 a comparison of our model with the latest text-to-image models trained on paired text-image datasets: DALL-E, CogView, VQ-Diffusion, GLIDE, LDM, Make-A-Scene, DALL-E2, Parti and Imagen. As can be seen, our model achieves comparable results to recent models trained with full text-image pairs (e.g LDM, GLIDE), despite being trained on an image-only dataset, with significantly lower computational costs. The results demonstrate that leveraging an external retrieval database allows to compensate for different trade-offs, in particular, reducing the number of parameters in the model. Additional samples are provided in Fig. 13 in the supplement.", + "bbox": [ + 169, + 646, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2bbc54b5605c39d4eb95ed54edeb6ccbc924f0a581d1c0ce6025122e17ce139e.jpg", + "image_caption": [ + "Figure 7: Comparison between various indexes used by the same model. (1) Aesthetic. Images from the first quantile of an aesthetic classifier, (2) Unaesthetic. Images from the last quantile of an aesthetic classifier, (3) Image search engine. Images retrieved from Google Images, (4) The stickers index." + ], + "image_footnote": [], + "bbox": [ + 207, + 104, + 799, + 377 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Text-to-sticker generation. As the sticker dataset does not have paired text, and is substantially different from photo-realistic data, it allows us to illustrate the advantage of our model on an image-only dataset. A selection of stickers generated by our model is presented in Fig. 1 and Fig. 14, 12. To demonstrate the importance of using kNN on image-only datasets, we evaluate our approach on two diffusion backbones. To this end, we trained a continuous diffusion model (Ramesh et al., 2022) and a discrete diffusion model (Gu et al., 2021), both conditioned on the kNN image embeddings. For each backbone, we compare our method with the following baselines: (1) no-kNN - this baseline was trained using both the continuous and the discrete methods conditioned only on image CLIP embedding, without using kNN. In the discrete case, we trained a VQ-diffusion model, while in the continuous case, we trained a re-implementation of DALL-E2's decoder (without prior). (2) DALL-E2+ClipCap baseline - here, we first captioned the entire sticker dataset using ClipCap (Mokady et al., 2021), then trained DALL-E2 decoder on the captioned dataset. (3) LAFITE - we trained LAFITE language-free model on our stickers dataset using the authors' published code. We present the results in Tab. 2. The FID is calculated over a subset of 3,000 stickers, generated from the ClipCap captioned dataset. As can be seen, our model achieves the lowest FID score. In addition, it outperforms all baselines in human evaluation comparison, using continuous and discrete backbones. In particular, compared with the same model trained without kNN, our model achieves significantly higher favorability in both text alignment and image quality.", + "bbox": [ + 169, + 445, + 823, + 696 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 APPLICATIONS", + "text_level": 1, + "bbox": [ + 171, + 712, + 318, + 726 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Text-only image manipulation. We demonstrate the manipulation capabilities of our model in Fig. 1, 4 and 20. Furthermore, we qualitatively compare our model with Text2LIVE (Bar-Tal et al., 2022) and Textual Inversion (Gal et al., 2022), using the authors' published code. Text2LIVE proposed generating an edit layer that is composed over the original input, using a generator trained for each training image. Textual Inversion utilized the pre-trained Latent Diffusion model to invert the input image into a token embedding. The embedding is then used to compose novel textual queries for the generative model. Fig. 4 shows representative results, and the rest are included in Fig. 21 and 22 in the supplement. In contrast to our model, baseline methods lack text correspondence or they do not preserve the identity of the object. Since Text2LIVE is optimized to perform local changes, it has the difficulty changing the structure of the object (e.g. the \"raising his hand\" example in Fig. 4). Textual Inversion baseline changes the identity of the object because it struggles reconstructing the textual representation of the source image. Our model, on the other hand, can perform challenging manipulations that are aligned with the text, while preserving the object identity.", + "bbox": [ + 169, + 738, + 823, + 919 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/950754645d537e674a9ed1701ea97dec349ad1022705da81f9f70c7f90f82619.jpg", + "image_caption": [ + "Figure 8: Mean aesthetics score of the generated images as a function of the conditioned kNN mean aesthetics score." + ], + "image_footnote": [], + "bbox": [ + 174, + 102, + 377, + 220 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ed141c57ea6e1c551282a3f9907ddce50fcf676a4c7a6940c44aa759ddcc1a1c.jpg", + "image_caption": [ + "Figure 9: MS-COCO test FID score on various K's in: (1) Zero-Shot (2) Index includes MS-COCO train subset. No kNN trained with kNN, but did not employ kNN in inference." + ], + "image_footnote": [], + "bbox": [ + 395, + 102, + 609, + 220 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/6ed05be790783ff217f588a2c4a2cd028fca55ed587c0697a0e1a02f3ceab8dd.jpg", + "image_caption": [ + "Figure 10: MS-COCO test FID score for different model sizes. As can be seen, adding kNN to the model allows it to be smaller, while having better performance." + ], + "image_footnote": [], + "bbox": [ + 627, + 104, + 821, + 220 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Out-of-distribution generation. Using the retrieval index as part of the generation process enables using different databases during inference, without fine-tuning. This allows generating images from distributions that were not part of the training set, enabling out-of-distribution generation. This novel capability is demonstrated with the same model trained on PMD, using three different retrieval databases: (i) A stickers database presented in Sec. 4. (ii) Aesthetic database: This database is constructed by filtering images according to a classifier score. Let $C$ be a classifier that for each image $i \\in I$ outputs a score $s = C(i)$ . This classifier enables filtering the kNN using $L \\leq s < H$ , where $L$ and $H$ are low and high thresholds, respectively. Here, we use an open source pre-trained aesthetics classifier $A$ (Christoph Schuhmann, 2022): For each text input $t \\in T$ , we apply $A$ on the kNN, and then divide the kNN into five equal quantiles based on $A$ score. As can be seen in Fig. 8, using kNN with higher aesthetics score result in generated images with higher aesthetics mean score. (iii) Image search engine: Generative models are stationary in the sense that they are unable to learn new concepts after being trained, hence fine-tuning is required to represent new styles and concepts. Here, we use an online image search engine, which allows the model to adapt to new data without additional fine-tuning. A qualitative comparison of all three methods is shown in Fig.7.", + "bbox": [ + 169, + 311, + 826, + 518 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 ABLATION STUDY", + "text_level": 1, + "bbox": [ + 171, + 536, + 344, + 550 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We conclude our experiments with an ablation study, to quantify the contribution of our different components. We provide ablation study on index size and different kNN conditioning approaches in Sec. 6.5 of the supplement. Number of nearest neighbors. The results in Fig. 9 demonstrate the importance of applying the retrieval mechanism during training and inference. Here, we evaluate our model, trained on PMD dataset, with different numbers of kNN during inference. Furthermore, we examined the baseline no-kNN, in which during inference, the model is conditioned only on the text embedding $f_{txt}(t)$ , without using kNN. Best performance is achieved using 10 neighbors. Scalability analysis. To evaluate the effectiveness of our approach at different model sizes, we trained three additional models with varying sizes for both settings - with and without kNN. As can be seen in Fig. 10, utilizing kNN consistently improves performance for all sizes. Furthermore, a performance improvement can be achieved using much smaller models with kNN. For example, the 35M kNN model outperforms the 400M model without kNN.", + "bbox": [ + 169, + 561, + 826, + 729 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 750, + 320, + 763 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "\"We shall always find, that every idea which we examine is copied from a similar impression\", Hume (1748). In this paper, we propose using a large-scale retrieval method in order to train a novel text-to-image model, with only pre-trained multi-modal embeddings, but without an explicit text-image dataset. Our extensive experiments demonstrate that using an external knowledge-base alleviates much of the model's burden of learning novel concepts, enabling the use of a relatively small model. In addition, it provides the model the capability of learning to adapt to new samples, which it only observes during test time. Lastly, we present a new technique utilizing the retrieval method for text-driven semantic manipulations without user-provided masks. As evaluated by human studies and automatic metrics, our method is significantly preferable to the baselines in terms of image quality and text alignment.", + "bbox": [ + 169, + 776, + 826, + 917 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 174, + 102, + 287, + 117 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Omri Avrahami, Ohad Fried, and Dani Lischinski. Blended latent diffusion. arXiv preprint arXiv:2206.02779, 2022a.", + "Omri Avrahami, Dani Lischinski, and Ohad Fried. Blended diffusion for text-driven editing of natural images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18208-18218, 2022b.", + "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016.", + "Artem Babenko and Victor Lempitsky. The inverted multi-index. IEEE transactions on pattern analysis and machine intelligence, 37(6):1247-1260, 2014.", + "Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. arXiv preprint arXiv:2204.02491, 2022.", + "Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018.", + "Vishwanath Bijalwan, Vinay Kumar, Pinki Kumari, and Jordan Pascual. Knn based machine learning approach for text and document mining. International Journal of Database Theory and Application, 7(1):61-70, 2014.", + "Andreas Blattmann, Robin Rombach, Kaan Oktay, Jonas Müller, and Björn Ommer. Semiparametric neural image synthesis. In Advances in Neural Information Processing Systems, 2022.", + "Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George van den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. arXiv preprint arXiv:2112.04426, 2021.", + "Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12M: Pushing web-scale image-text pre-training to recognize long-tail visual concepts. In CVPR, 2021.", + "Romain Beaumont Christoph Schuhmann. Aesthetic predictor. https://github.com/LAION-AI/aesthetic-predictor, 2022.", + "Katherine Crowson, Stella Biderman, Daniel Kornis, Dashiell Stander, Eric Hallahan, Louis Castricato, and Edward Raff. Vqgan-clip: Open domain image generation and editing with natural language guidance. arXiv preprint arXiv:2204.08583, 2022.", + "Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. RedCaps: Web-curated image-text data created by the people, for the people. In NeurIPS Datasets and Benchmarks, 2021.", + "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.", + "Prafulla Dhariwal and Alex Nichol. Diffusion models beat gans on image synthesis. arXiv preprint arXiv:2105.05233, 2021.", + "Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 2021.", + "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873-12883, 2021.", + "Georgios D Evangelidis and Emmanouil Z Psarakis. Parametric image alignment using enhanced correlation coefficient maximization. IEEE transactions on pattern analysis and machine intelligence, 30(10):1858-1865, 2008." + ], + "bbox": [ + 171, + 125, + 825, + 907 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman. Make-a-scene: Scene-based text-to-image generation with human priors. arXiv preprint arXiv:2203.13131, 2022.", + "Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022.", + "Tiezheng Ge, Kaiming He, Qifa Ke, and Jian Sun. Optimized product quantization for approximate nearest neighbor search. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2013.", + "Jiatao Gu, Yong Wang, Kyunghyun Cho, and Victor OK Li. Search engine guided neural machine translation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018.", + "Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. ArXiv, abs/2111.14822, 2021.", + "Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022.", + "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.", + "Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In NeurIPS 2021 Workshop on Deep Generative Models and Downstream Applications, 2021.", + "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239, 2020.", + "David Hume. An enquiry concerning human understanding, 1748.", + "Karim Iskakov. Semi-parametric image inpainting. arXiv preprint arXiv:1807.02855, 2018.", + "Herve Jegou, Matthijs Douze, and Cordelia Schmid. Product quantization for nearest neighbor search. IEEE transactions on pattern analysis and machine intelligence, 33(1):117-128, 2010.", + "Jeff Johnson, Matthijs Douze, and Hervé Jégou. Billion-scale similarity search with GPUs. IEEE Transactions on Big Data, 7(3):535-547, 2019.", + "Gwanghyun Kim, Taesung Kwon, and Jong Chul Ye. Diffusionclip: Text-guided diffusion models for robust image manipulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2426-2435, 2022.", + "Ivan Krasin, Tom Duerig, Neil Alldrin, Andreas Veit, Sami Abu-El-Haija, Serge Belongie, David Cai, Zheyun Feng, Vittorio Ferrari, and Victor Gomes. Openimages: A public dataset for large-scale multi-label and multi-class image classification., 01 2016.", + "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, Michael Bernstein, and Li Fei-Fei. Visual genome: Connecting language and vision using crowdsourced dense image annotations. 2016. URL https://arxiv.org/abs/1602.07332.", + "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300, 2019.", + "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. arXiv preprint arXiv:2201.12086, 2022." + ], + "bbox": [ + 171, + 102, + 825, + 898 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pp. 740-755. Springer, 2014.", + "Xingchao Liu, Chengyue Gong, Lemeng Wu, Shujian Zhang, Hao Su, and Qiang Liu. Fusedream: Training-free text-to-image generation with improved clip+ gan space optimization. arXiv preprint arXiv:2112.01573, 2021.", + "Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021.", + "Norman Mu, Alexander Kirillov, David Wagner, and Saining Xie. Slip: Self-supervision meets language-image pre-training. arXiv preprint arXiv:2112.12750, 2021.", + "Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021.", + "Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K. Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc., 2011. URL https://proceedings.neurips.cc/paper/2011/file/5dd9db5e033da9c6fb5ba83c7a7ebea9-Paper.pdf.", + "Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2085–2094, 2021.", + "Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives. In European Conference on Computer Vision, pp. 647-664. Springer, 2020.", + "Xiaojuan Qi, Qifeng Chen, Jiaya Jia, and Vladlen Koltun. Semi-parametric image synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8808-8816, 2018.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. URL https://arxiv.org/abs/2103.00020.", + "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pp. 8821-8831. PMLR, 2021.", + "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022.", + "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684-10695, 2022.", + "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022.", + "Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2556-2565, 2018." + ], + "bbox": [ + 171, + 102, + 825, + 895 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yawar Siddiqui, Justus Thies, Fangchang Ma, Qi Shan, Matthias Nießner, and Angela Dai. Retrievalfuse: Neural 3d scene reconstruction with a database. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 12568-12577, 2021.", + "Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. arXiv preprint arXiv:2112.04482, 2021.", + "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015.", + "Krishna Srinivasan, Karthik Raman, Jiecao Chen, Michael Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2443-2449, 2021.", + "Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. The new data and new challenges in multimedia research. CoRR, abs/1503.01817, 2015. URL http://arxiv.org/abs/1503.01817.", + "Hung-Yu Tseng, Hsin-Ying Lee, Lu Jiang, Ming-Hsuan Yang, and Weilong Yang. Retrieved: Image synthesis via differentiable patch retrieval. In European Conference on Computer Vision, pp. 242-257. Springer, 2020.", + "Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017.", + "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.", + "Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011.", + "Xintao Wang, Liangbin Xie, Chao Dong, and Ying Shan. Real-esrgan: Training real-world blind super-resolution with pure synthetic data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1905-1914, 2021.", + "Zihao Wang, Wei Liu, Qian He, Xinglong Wu, and Zili Yi. Clip-gen: Language-free training of a text-to-image generator with clip. arXiv preprint arXiv:2203.00386, 2022.", + "Yuhuai Wu, Markus N Rabe, DeLesley Hutchins, and Christian Szegedy. Memorizing transformers. arXiv preprint arXiv:2203.08913, 2022.", + "Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attingan: Fine-grained text to image generation with attentional generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1316-1324, 2018.", + "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022.", + "Han Zhang, Jing Yu Koh, Jason Baldridge, Honglak Lee, and Yinfei Yang. Cross-modal contrastive learning for text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 833-842, 2021.", + "Yufan Zhou, Ruiyi Zhang, Changyou Chen, Chunyuan Li, Chris Tensmeyer, Tong Yu, Jiumiang Gu, Jinhui Xu, and Tong Sun. LAFITE: towards language-free training for text-to-image generation. CoRR, abs/2111.13792, 2021. URL https://arxiv.org/abs/2111.13792.", + "Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. Dm-gan: Dynamic memory generative adversarial networks for text-to-image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 5802-5810, 2019." + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6 APPENDIX", + "text_level": 1, + "bbox": [ + 171, + 102, + 295, + 118 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/2b4b72b1f6799e07a03144ae2c4f93aba3e1f5a77c12dea2590dbbd421563f90.jpg", + "image_caption": [ + "Figure 11: Samples from COCO validation set." + ], + "image_footnote": [], + "bbox": [ + 84, + 119, + 898, + 858 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/ce3ccce82f232b4e283aec494b0c13bea42ffee036d195210f0b4d5966567f78.jpg", + "image_caption": [ + "Chicken waiter serving dinner" + ], + "image_footnote": [], + "bbox": [ + 176, + 146, + 279, + 239 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/771a520485db0685554de117fdb5b8b2b7df526a3ff3d6e29bcb96ac1aa31fa9.jpg", + "image_caption": [ + "Virtual reality" + ], + "image_footnote": [], + "bbox": [ + 300, + 150, + 431, + 238 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/f507f247bbca5b3b141fb07f4338e1f4d52d996e4ae52357691d3ef8a425601b.jpg", + "image_caption": [ + "Monkey eats hamburger" + ], + "image_footnote": [], + "bbox": [ + 455, + 150, + 542, + 239 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/771a07aba8d1ecb57f9a52969daf51707af53d41b78c6e538aae8ae368b0ab5c.jpg", + "image_caption": [ + "Scared fish in a suit" + ], + "image_footnote": [], + "bbox": [ + 583, + 150, + 674, + 239 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/ec99dc93cc1fdb09978c1af0af6fc17374a9a8dffd107442bdacd63fe7eb9afd.jpg", + "image_caption": [ + "Clown unicorn" + ], + "image_footnote": [], + "bbox": [ + 705, + 150, + 810, + 237 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/57ff2b04de1499bafd2d67d5a2bbdd0be2c52b7738157291f8a86ca7ec5bbcfe.jpg", + "image_caption": [ + "Goodnight sleep" + ], + "image_footnote": [], + "bbox": [ + 176, + 280, + 292, + 356 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/824a9229933683fab759cf622192aef4dbe56e89a46a3f24a712c0a517da4797.jpg", + "image_caption": [ + "Alpaca in space" + ], + "image_footnote": [], + "bbox": [ + 303, + 270, + 431, + 356 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/e13ed912a0aa103ff4953892cd8ca6a4f47a265f0555caa255b81687631d60be.jpg", + "image_caption": [ + "Gargoyle in a party hat" + ], + "image_footnote": [], + "bbox": [ + 455, + 267, + 563, + 354 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/cee58992bce38c9bb39d79872946f9b972e360e346a27a4a377ac84937b6f626.jpg", + "image_caption": [ + "Cauliflower crying" + ], + "image_footnote": [], + "bbox": [ + 576, + 267, + 681, + 352 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/034c9ada3080558284963679cf8d485383c929b4d00d6e33e37a83c6f5655416.jpg", + "image_caption": [ + "Teddy bear wearing VR headset" + ], + "image_footnote": [], + "bbox": [ + 715, + 271, + 792, + 349 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/39f7776fbe4837569292411ecc74a220567677e534175845d0bc255cea705851.jpg", + "image_caption": [ + "Hot headed cucumber" + ], + "image_footnote": [], + "bbox": [ + 192, + 381, + 290, + 470 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/e8cbcd13f98b1ca4d06fe2a5bf15d8bb1f498bdd74b0affd113156c908e73e28.jpg", + "image_caption": [ + "Painterly pigeon playing a synthesizer" + ], + "image_footnote": [], + "bbox": [ + 310, + 382, + 433, + 463 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/68cd35a40ac2bddb7df95aade633f7c5c7d260abd1ebd3c53e07aa987dbf4b34.jpg", + "image_caption": [ + "3D cat avatar" + ], + "image_footnote": [], + "bbox": [ + 480, + 380, + 537, + 469 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/37563a8081740e796edcb080f0817ace44ed87f4b74ba2510c64426ec84948f5.jpg", + "image_caption": [ + "Music band made of fruits" + ], + "image_footnote": [], + "bbox": [ + 570, + 388, + 697, + 465 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/195a0de413ce4cde3dd7de6434aa6699abd110a0d8a6d502197df029ba65e73f.jpg", + "image_caption": [ + "A confused robot as an impressionist painting" + ], + "image_footnote": [], + "bbox": [ + 720, + 377, + 805, + 464 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/1b74464a4fc7f98a95da6d801edef23976c5e706bf300b37f9ad2ae461479699.jpg", + "image_caption": [ + "Panda playing guitar" + ], + "image_footnote": [], + "bbox": [ + 179, + 503, + 290, + 583 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/213a8157b171ac994347fa846fab6f3e6e5de2656e48e1a24e46010c2658a58c.jpg", + "image_caption": [ + "Sloth doing ballet" + ], + "image_footnote": [], + "bbox": [ + 299, + 513, + 424, + 588 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/8196555c6fd50ab74b29d49fdf3aa0be6e069ec57518a500d87f0e3382d1478f.jpg", + "image_caption": [ + "3D rendering of avatars playing basketball" + ], + "image_footnote": [], + "bbox": [ + 437, + 508, + 573, + 588 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/1b3c6dc79a6a667b80c708688eb6a73cee42591de3e517b784dbc27f9c75a281.jpg", + "image_caption": [ + "Singing otter" + ], + "image_footnote": [], + "bbox": [ + 589, + 497, + 671, + 587 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/de20396cc5f3c57b4d02269470eeaeed40eeb826644a1974c36a13ec4e5c33a4.jpg", + "image_caption": [ + "Muscle man riding a wave in Hawaii" + ], + "image_footnote": [], + "bbox": [ + 694, + 500, + 820, + 587 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/e86297f7018f77b6baa117da5bf750371f060cc3c823697eae58f8d6c9827a5c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 609, + 292, + 699 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/91714325fdca8c125ac3a66b6191ace57a769107f063055d960cfe21d7a07ad5.jpg", + "image_caption": [ + "Dog is programming a computer" + ], + "image_footnote": [], + "bbox": [ + 308, + 619, + 423, + 695 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/47ad1575d14858d0c694d739d3c0411fa031052ec6b2989d46345aadf46561fc.jpg", + "image_caption": [ + "Radha Krishna dancing in a garden" + ], + "image_footnote": [], + "bbox": [ + 447, + 618, + 571, + 696 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/cca27a8f1996860dbfcb73a94b4b345b11a4dd3aefea845360161d16166c1540.jpg", + "image_caption": [ + "Shark wearing a birthday hat" + ], + "image_footnote": [], + "bbox": [ + 596, + 609, + 676, + 699 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/25884ccba2d5d04cbb30fef6e491a8c9bfc0f1a17aec2712e178fc7d29612049.jpg", + "image_caption": [ + "Avocado playing ukulele and an apple is watching" + ], + "image_footnote": [], + "bbox": [ + 696, + 625, + 818, + 699 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/06ccbf7aa43ed907b415559031a1f9dea412556970d9ce31e45d465097ae10dc.jpg", + "image_caption": [ + "Cinematic llama with dramatic lighting", + "Celebrating frog", + "Figure 12: A selection of stickers generated using the continuous kNN-Diffusion model." + ], + "image_footnote": [], + "bbox": [ + 184, + 734, + 302, + 821 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/2d1eaa1c073e8d35e25d43e64ffbae7daf6da678710413708a2f7fc381506780.jpg", + "image_caption": [ + "A dog in a hotdog costume" + ], + "image_footnote": [], + "bbox": [ + 307, + 743, + 442, + 810 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/db0533c98d53ff549df32e6e68ca3863755f88eed3d04e57ae4e2abd973a0067.jpg", + "image_caption": [ + "Alien using VR" + ], + "image_footnote": [], + "bbox": [ + 468, + 737, + 560, + 816 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/065e901e300282687752803ea145e75c5c4a48525450a22d099b9c53f26c357b.jpg", + "image_caption": [ + "Unicorn with a rainbow horn, waving hand, and standing on grass" + ], + "image_footnote": [], + "bbox": [ + 568, + 742, + 686, + 816 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/9cf933b326cc7efbda45b6fc49f439e7aa4b295d9b93a6be84b7cd53c1092f54.jpg", + "image_caption": [ + "A brain made out of words" + ], + "image_footnote": [], + "bbox": [ + 692, + 736, + 816, + 818 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/370c6b22ce0cefa9204cffaaf109bd79d196271a9871fc6f9ec23750a5d3c71b.jpg", + "image_caption": [ + "A brown shiny rose flower", + "A white firetruck" + ], + "image_footnote": [], + "bbox": [ + 174, + 180, + 326, + 297 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/64252786a5e033a5dc2c74dd2099aaec25d62ad2ec704b3222e80138304809d2.jpg", + "image_caption": [ + "A dog using a typewriter", + "Race car driver in a tutu" + ], + "image_footnote": [], + "bbox": [ + 339, + 180, + 490, + 297 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/2cffa26afa32bbbed0e7f1d17b7728e98e2a29c0a173c9b9c2bd86be0bf8d8cd.jpg", + "image_caption": [ + "A robot tanning", + "Blue ants" + ], + "image_footnote": [], + "bbox": [ + 506, + 181, + 656, + 297 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/4a31a128de43a4f2b448b444f19a593c9f4e5d3bd607a5b6ac3a4be0766c982f.jpg", + "image_caption": [ + "Fuchsia iguanodon", + "Lucifer dancing with Jesus" + ], + "image_footnote": [], + "bbox": [ + 666, + 181, + 816, + 297 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/80a37af0645a1500e6f46f2c00f05e72ca08a553cd67a2d65db99dc6af1ee201.jpg", + "image_caption": [ + "Pineapple shaped refrigerator" + ], + "image_footnote": [], + "bbox": [ + 176, + 316, + 326, + 431 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3ebd87f5da61b89b7a3b7dab423d11e8c3059f9301b91e29aa22cb554204f073.jpg", + "image_caption": [ + "A surfer wearing a three-piece men's suit" + ], + "image_footnote": [], + "bbox": [ + 339, + 316, + 490, + 431 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/2f2c07585bca319b7160e26cfcf98ab258be3265ece37279124fcb51cb8b27ea.jpg", + "image_caption": [ + "Rocking chair on water" + ], + "image_footnote": [], + "bbox": [ + 504, + 316, + 655, + 431 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/71e5e86425d94203ab4eb0ac7a00b943ce2e232c3bcfc78316b3c1fc87443fec.jpg", + "image_caption": [ + "Alien cartoon" + ], + "image_footnote": [], + "bbox": [ + 666, + 316, + 818, + 431 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3fee574afde9883f4afba06f39b3e02bed4cd57a241ef8a4b8ec81cc1102e68f.jpg", + "image_caption": [ + "Kitchen from the lost city of atlantis" + ], + "image_footnote": [], + "bbox": [ + 179, + 454, + 325, + 566 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7e047ca7ac0f5db30927a660c2fbfab2aa5bee42d353caf6a5258787e1827f60.jpg", + "image_caption": [ + "A pink watermelon" + ], + "image_footnote": [], + "bbox": [ + 341, + 453, + 490, + 569 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/8649d2f4a994dc5fe971a11ad918fd843cce0157555b9f74b407e6ba7c1f97b0.jpg", + "image_caption": [ + "Image of grey tiger" + ], + "image_footnote": [], + "bbox": [ + 504, + 452, + 655, + 568 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/0ea58105f475274f27d69f1d1a13a75abb921097f517766561b674d895440bc7.jpg", + "image_caption": [ + "Green robot vacuum" + ], + "image_footnote": [], + "bbox": [ + 666, + 452, + 816, + 568 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/0c8ba1e6a6b182ac642d8c68efabacb6632b2c48c7db500a19c91352307fc406.jpg", + "image_caption": [ + "A baby cooking spaghetti" + ], + "image_footnote": [], + "bbox": [ + 178, + 589, + 326, + 705 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/76a022dd4c26524662b1445da0a09d0e73ae56fa230abb50f91e87e0d18a4970.jpg", + "image_caption": [ + "Raccoon mansion" + ], + "image_footnote": [], + "bbox": [ + 339, + 589, + 490, + 705 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/249e63eae7a23719f8a4f252c80eec4c89a48d780de15cbc02f3159526b68def.jpg", + "image_caption": [ + "Flying m1 abrams tank" + ], + "image_footnote": [], + "bbox": [ + 504, + 589, + 655, + 705 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b2d7399d706c16ce4a6992c7c3e35cd4088022f3e7cfca2fe5576556d7093af4.jpg", + "image_caption": [ + "Baby pictures of grandparents" + ], + "image_footnote": [], + "bbox": [ + 666, + 588, + 816, + 704 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/058412fcfa15ee2dd40dc066db22eb6b31b883b6ec2788731cd82589fa5c89eb.jpg", + "image_caption": [ + "Figure 13: Additional samples generated from challenging text inputs using the photo-realistic model" + ], + "image_footnote": [], + "bbox": [ + 176, + 722, + 326, + 839 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/93e09def9ac4f2a57e4c56aa4f857983d1fabb0683731abd4fedcaa67ea8c3be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 723, + 490, + 839 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3547976b13202681db475437924519f652fd5025a0896c6ab9aa0c6532f58345.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 723, + 655, + 839 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/78d779cbb719d59e7599c8ad91084bec96405b640debd586bc4c3936aa813ef2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 723, + 816, + 839 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b65fb3509024ec0020f9a7bde56f02487931e37073720ecffc9f0942e2083f9b.jpg", + "image_caption": [ + "This tomato is favorite of many high-class chefs" + ], + "image_footnote": [], + "bbox": [ + 223, + 222, + 334, + 309 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/22bcf71182d87373683714929f6cde8a9645fe7b281e622216833b2284a080f4.jpg", + "image_caption": [ + "Squirrel wearing a shirt" + ], + "image_footnote": [], + "bbox": [ + 233, + 352, + 313, + 429 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/1f467d3792f4ad73f0d569b9f664fa4c99496625567db6c3a16b321967525415.jpg", + "image_caption": [ + "Singing eggplants", + "A mushroom with a hat" + ], + "image_footnote": [], + "bbox": [ + 212, + 454, + 344, + 559 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/af81b46e1300e22e9e14e666c500d606a75d00bcb22cb2fffc6f5a4440d67f1b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 580, + 333, + 681 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/aab033387b0cf5230b9c6e7805bf11ae57f34c6f679ffd17fbc6e52d3aab4512.jpg", + "image_caption": [ + "Black mop head", + "Figure 14: A selection of stickers generated using the discrete kNN-Diffusion model." + ], + "image_footnote": [], + "bbox": [ + 225, + 700, + 333, + 803 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/949d063e94628bc7fe47c4c3ea2983a86e9ebbd3d8b80c778feff3aaed8fadad.jpg", + "image_caption": [ + "A panda bear carrying some grocery bags" + ], + "image_footnote": [], + "bbox": [ + 357, + 220, + 493, + 311 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cfa0b6ecc77ae6c5777ca20950d813bfa5043c232da4e09582a7d896363294e3.jpg", + "image_caption": [ + "Penguin drives a bus" + ], + "image_footnote": [], + "bbox": [ + 377, + 344, + 460, + 433 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/9aab18425e79fb7d5e1dc08690250149f683ede1e3c77449de78f2402b32c395.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 359, + 462, + 491, + 555 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/2e0f6f53549eba190c6c484a5cbea19870cab05444de8063c31f14a7ca29103c.jpg", + "image_caption": [ + "Chihuahua pulling a royal coach", + "Elephant sitting on a lion" + ], + "image_footnote": [], + "bbox": [ + 388, + 577, + 460, + 680 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/339ea1307b94f571c2a3eff4b26da0b612c80e5dc686895bbd1b8ec494660652.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 715, + 485, + 795 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/1c6e61268142f026dab4aea1a03c4ee9b596ee0b72ea70df1333e91a0fb6ce18.jpg", + "image_caption": [ + "Bald-headed mimes", + "Vomit candy", + "Invisible people" + ], + "image_footnote": [], + "bbox": [ + 509, + 213, + 643, + 319 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0a4ae2e738d729adb8f99baff5ccbd6b65fca462a7a50519b14a191e9f0f4140.jpg", + "image_caption": [ + "I J Skos . sbalele" + ], + "image_footnote": [], + "bbox": [ + 506, + 364, + 643, + 438 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/d93bc777bd80fc4f9f179d2061dd61206227815e6c4fb979cbf41fdacd355269.jpg", + "image_caption": [ + "Queen Esther", + "A laughing purple porcupine" + ], + "image_footnote": [], + "bbox": [ + 524, + 454, + 625, + 559 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/2f28dd260535536d8f92c1ce5d7f50bdcea0b806a10fe89c32af00d2eb9b6009.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 594, + 630, + 669 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/7bce8138d3fa6f6f6b241e0c9e0138b6b369a541e84ca9547078167f600c17a5.jpg", + "image_caption": [ + "Monkey eating a pickle" + ], + "image_footnote": [], + "bbox": [ + 532, + 710, + 627, + 797 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0be22e643cfb8bdaa363741c182b79b3b2ea2c47c10e3a6875a79f141e0be8d2.jpg", + "image_caption": [ + "Alien cartoon", + "A lion wearing a T-shirt" + ], + "image_footnote": [], + "bbox": [ + 674, + 224, + 764, + 309 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/f161e048b5a4632054fbcff49ce75e52e6420ffffe061309b99119d2a13e5346.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 345, + 759, + 429 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/44b85c6aff143613211f3522a9dd7084740c23a7a7d637c5ac4df602395b574d.jpg", + "image_caption": [ + "One legged striped rabbit", + "Dollar bill combing hair" + ], + "image_footnote": [], + "bbox": [ + 683, + 455, + 756, + 555 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/edbbc96cbb7ace016f65e7c774a92426fc863ff1aaefc1475aadb5c7b099476c.jpg", + "image_caption": [ + "A sloth eating oatmeal" + ], + "image_footnote": [], + "bbox": [ + 651, + 590, + 784, + 672 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/5be0ce636497b5303815e129e45479ac35d97418a6af2b5ef66d3a17f92a7290.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 710, + 787, + 797 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "6.1 BACKGROUND", + "text_level": 1, + "bbox": [ + 171, + 104, + 316, + 118 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Continuous diffusion process Diffusion models are latent variable models that aim to model a distribution $p_{\\theta}(x_0)$ that approximates the data distribution $q(x_0)$ . Specifically, they model a forward process in the space of $x_0$ from data to noise. Given a sample from the data distribution $x_0 \\sim q(x_0)$ , this process produces a Markov chain of latent variables $x_1, \\ldots, x_T$ by progressively adding Gaussian noise to the sample:", + "bbox": [ + 169, + 128, + 823, + 200 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(x _ {t} \\mid x _ {t - 1}\\right) := \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t}} x _ {t - 1}, \\beta_ {t} \\mathcal {I}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 205, + 823, + 223 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\beta_{t}$ is a variance schedule. As presented previously by (Ho et al., 2020), the latent variable $x_{t}$ can be expressed directly as a linear combination of noise and $x_0$ :", + "bbox": [ + 169, + 227, + 823, + 256 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nx _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} x _ {0} + \\epsilon \\sqrt {1 - \\bar {\\alpha} _ {t}}, \\quad \\epsilon \\sim \\mathcal {N} (0, \\mathcal {I}) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 260, + 823, + 276 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\alpha_{t} := \\Pi_{i=1}^{t}(1 - \\beta_{i})$ . In order to sample from the data distribution $q(x_0)$ , we define the \"reverse process\" $p(x_{t-1}|x_t)$ which samples first from $q(x_T)$ and then samples reverse steps $q(x_{t-1}|x_t)$ until $x_0$ .", + "bbox": [ + 169, + 280, + 823, + 324 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Since the data distribution is unknown, we need to train a model to approximate it. Note that when $T$ is large enough, the noise vector $x_{T}$ nearly follows an isotropic Gaussian distribution. This suggests learning a model $p_{\\theta}(x_{t - 1}|x_t)$ to predict mean $\\mu_{\\theta}$ and covariance matrix $\\Sigma_{\\theta}$ :", + "bbox": [ + 169, + 329, + 825, + 372 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} \\left(x _ {t - 1} \\mid x _ {t}\\right) := \\mathcal {N} \\left(x _ {t - 1}; \\mu_ {\\theta} \\left(x _ {t}, t\\right), \\Sigma_ {\\theta} \\left(x _ {t}, t\\right)\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 377, + 823, + 393 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To train this model, we can replace $\\mu_{\\theta}(x_t,t)$ by predicting the noise $\\epsilon_{\\theta}(x_t,t)$ added to $x_0$ using equation 2 and we get this objective function:", + "bbox": [ + 169, + 397, + 823, + 425 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nL := E _ {t \\sim [ 1, T ], x _ {0} \\sim q (x _ {0}), \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I})} [ \\| \\epsilon - \\epsilon_ {\\theta} (x _ {t}, t, y) \\| ^ {2} ] \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 429, + 823, + 448 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $y$ is an optional conditioning signal (such as text/image embedding or a low resolution image).", + "bbox": [ + 169, + 450, + 823, + 465 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Discrete diffusion process Let $x_{n}\\in \\{1,\\ldots ,V\\}^{h\\times w}$ be the indices of the allocated codebook vectors extracted by a pre-trained VQGAN (Esser et al., 2021) encoder. The forward process of a diffusion model $q(x_{n}|x_{n - 1})$ is a Markov chain that adds noise at each step. Moreover, the reverse process $q(x_{n - 1}|x_n,x_0)$ , is a denoising process that removes noise from an initialized noise state. As presented by (Gu et al., 2021), the forward diffusion process is given by:", + "bbox": [ + 169, + 479, + 823, + 551 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(x _ {n} \\mid x _ {n - 1}\\right) = v ^ {T} \\left(x _ {n}\\right) \\mathbf {Q} _ {n} v \\left(x _ {n - 1}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 568, + 823, + 585 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $v(x_{n})$ is a one-hot vector with entry 1 at $x_{n}$ , and $\\mathbf{Q}_n$ is the probability transition matrix from state $x_{n-1}$ to $x_{n}$ .", + "bbox": [ + 169, + 588, + 823, + 616 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The reverse process is given by the posterior distribution:", + "bbox": [ + 171, + 623, + 552, + 638 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(x _ {n - 1} \\mid x _ {n}, x _ {0}\\right) = \\frac {\\left(v ^ {T} \\left(x _ {n}\\right) \\mathbf {Q} _ {n} v \\left(x _ {n - 1}\\right)\\right) \\left(v ^ {T} \\left(x _ {n - 1}\\right) \\bar {\\mathbf {Q}} _ {n - 1} v \\left(x _ {0}\\right)\\right)}{v ^ {T} \\left(x _ {n}\\right) \\bar {\\mathbf {Q}} _ {n} v \\left(x _ {0}\\right)} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 282, + 641, + 823, + 678 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\bar{\\mathbf{Q}}_n = \\mathbf{Q}_n\\cdot \\cdot \\cdot \\mathbf{Q}_1$", + "bbox": [ + 171, + 681, + 333, + 698 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Inspired from mask language modeling (Devlin et al., 2018), they proposes corrupting the tokens by stochastically masking some of them. Specifically, an additional special token [MASK] is proposed, so for each token there are $(\\mathrm{V} + 1)$ discrete states. The transition matrix is formulated as, By adding a small amount of uniform noise to the categorical distribution, the transition matrix can be formulated as,", + "bbox": [ + 169, + 703, + 825, + 773 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} _ {n} = \\left[ \\begin{array}{c c c c c} \\alpha_ {n} + \\beta_ {n} & \\beta_ {n} & \\beta_ {n} & \\dots & 0 \\\\ \\beta_ {n} & \\alpha_ {n} + \\beta_ {n} & \\beta_ {n} & \\dots & 0 \\\\ \\beta_ {n} & \\beta_ {n} & \\alpha_ {n} + \\beta_ {n} & \\dots & 0 \\\\ \\vdots & \\vdots & \\vdots & \\ddots & \\vdots \\\\ \\gamma_ {n} & \\gamma_ {n} & \\gamma_ {n} & \\dots & 1 \\end{array} \\right] \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 791, + 823, + 871 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\alpha_{n}\\in [0,1],\\beta_{n} = (1 - \\alpha_{n} - \\gamma_{n}) / V$ and $\\gamma_{n}$ the probability of a token to be replaced with a [MASK] token. Each token has a probability of $\\gamma_{n}$ to be replaced by the [MASK] token, $V\\beta_{n}$ to be resampled uniformly and $\\alpha_{n} = (1 - V\\beta_{n} - \\gamma_{n})$ to be unchanged.", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6.2 ADDITIONAL SAMPLES", + "text_level": 1, + "bbox": [ + 171, + 104, + 377, + 118 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In Fig. 16 and 15 we present a visual comparison of our discrete model, trained on the stickers dataset with (1) the kNN extracted during inference, (2) the same model without using kNN in inference. As can be seen, the images generated by our model are better aligned to the corresponding text compared to the baselines. While the baselines fail with challenging prompts, our model produces high-quality images that align with the text, and composes multiple concepts correctly.", + "bbox": [ + 169, + 128, + 826, + 202 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "COCO Validation Set Comparison Fig. 11 presents a qualitative comparison with FuseDream (Liu et al., 2021), CogView (Ding et al., 2021) and VQ-Diffusion (Gu et al., 2021) on the COCO validation set. Note that both CogView and VQ-Diffusion have been trained on an Image-Text paired dataset, whereas our model was not trained on the COCO dataset, nor used it in the retrieval model.", + "bbox": [ + 169, + 214, + 826, + 285 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Additional samples generated from challenging text inputs are provided in Figs. 13, 14 and Fig. 12.", + "bbox": [ + 171, + 291, + 823, + 308 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/277eed8ca882f0c8f04faf312ffa99177e145e6f3061c130d08e57099f5ddbc5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 311, + 823, + 523 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/ba3a5906a0e0a6b9949bcb6ffdaf71182a42f6f3afe81f7fe4b7b5faaa88c183.jpg", + "image_caption": [ + "Figure 15: Comparison of our model, trained on PMD with (1) kNN extracted in inference, (2) the same model without using kNN in inference. While the kNN lack information regarding text semantics, our model considers both text semantics and the kNN, thus proving the advantage of using both the text and the kNN embeddings.", + "Figure 16: Qualitative comparison of stickers generated using the discrete kNN-Diffusion model, 10 Nearest Neighbors to the text in the CLIP embedding and a discrete model that does not use kNN." + ], + "image_footnote": [], + "bbox": [ + 181, + 583, + 823, + 832 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 960 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "6.3 HUMAN EVALUATION PROTOCOL", + "text_level": 1, + "bbox": [ + 171, + 103, + 446, + 118 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For all of our human evaluation experiments, we used Amazon Mechanical Turk. For each experiment, we used 600 samples, each scored by five different people. The preferred sample was determined according to majority opinion. For each baseline comparison, we asked two questions (in different experiments): \"Which image is of a higher quality?\" and \"Which image best matches the text?\"", + "bbox": [ + 169, + 128, + 823, + 200 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "6.4 DATASETS", + "text_level": 1, + "bbox": [ + 171, + 215, + 289, + 229 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The modified PMD dataset is composed of the following set of publicly available text-image datasets: SBU Captions (Ordonez et al., 2011), Localized Narratives (Pont-Tuset et al., 2020), Conceptual Captions (Sharma et al., 2018), Visual Genome (Krishna et al., 2016), Wikipedia Image Text (Srinivasan et al., 2021), Conceptual Captions 12M (Changpinyo et al., 2021), Red Caps (Desai et al., 2021), and a filtered version of YFCC100M (Thomee et al., 2015). In total, the dataset contains 69 million text-image pairs.", + "bbox": [ + 169, + 242, + 823, + 328 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "6.5 ABLATION STUDY", + "text_level": 1, + "bbox": [ + 171, + 343, + 341, + 357 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Index size As one can expect, increasing the index size at inference time improves performance. To demonstrate this hypothesis, we evaluated our model with an index containing $10\\%$ , $30\\%$ , $50\\%$ and $70\\%$ images of PMD dataset, and obtained FID scores of 13.92, 13.85, 13.72, and 13.65 respectively.", + "bbox": [ + 169, + 369, + 823, + 412 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "kNN conditioning We examined several different approaches to kNN input conditioning: (i) forwarding the kNN embeddings and the single image embedding through a self-attention layer before feeding the contextualized $K + 1$ embeddings to the model, (ii) feeding the model with one embedding, computed using cross-attention between the image embedding and the kNN embeddings, and, (iii) feeding the model with the image embedding concatenated with a learned linear projection of the kNN embeddings. These variants received FID scores of 18.3, 22.4, 34.1 respectively.", + "bbox": [ + 169, + 417, + 825, + 503 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "6.6 RETRIEVAL MODEL", + "text_level": 1, + "bbox": [ + 171, + 518, + 354, + 534 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The retrieval model is implemented using FAISS (Johnson et al., 2019). FAISS is an efficient database, capable of storing billions of elements and finding their nearest neighbors in milliseconds. In the pre-processing phase, for each image in the dataset, we store the image index and its corresponding CLIP image embedding. During training, given a training image, we extract its CLIP image embedding and search for its 10 (see Fig. 9) nearest neighbors in the dataset based on the cosine similarity distance.", + "bbox": [ + 169, + 544, + 823, + 628 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For an efficient search during training and inference, we use a non-exhaustive search: For this, we use an inverted file index. As in Babenko & Lempitsky (2014), we define Voronoi cells in the $d$ -dimensional space (where $d = 512$ is the CLIP embedding dimensional space), s.t each database vector falls in one of the cells. During search time, only the embeddings contained in the cell the query falls in and a few neighboring ones are compared against the query vector. In addition, to fit the index of our large-scale datasets on a 128GB RAM server, we compress the code size from $512 \\times 32/8 = 2048$ Bytes to 256 Bytes using optimized product quantization (Ge et al., 2013; Jegou et al., 2010). In Algorithm 1 we include pseudocode of the core of the implementation of the retrieval database.", + "bbox": [ + 169, + 635, + 825, + 760 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "6.7 DISCRETE KNN MODEL", + "text_level": 1, + "bbox": [ + 171, + 777, + 382, + 792 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We provide additional implementation details for the discrete diffusion model. Additional training details can be found in Tab. 3.", + "bbox": [ + 169, + 803, + 823, + 832 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Vector Quantization For token quantization, we use VQ-VAE and adapt the publicly available VQGAN(Esser et al., 2021) model, trained on the OpenImages(Krasin et al., 2016) dataset. The encoder downsamples images to $32 \\times 32$ tokens and uses a codebook vocabulary with 2887 elements.", + "bbox": [ + 169, + 847, + 823, + 891 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Image Tokenization In our discrete generative model we model images as a sequence of discrete tokens. To this end, we utilize a vector-quantized variational auto-encoder (VQ-VAE) (Van Den Oord et al., 2017) as image tokenizer. VQ-VAE consists of three components: (i) an encoder, (ii) a learned codebook, and, (iii) a decoder. Given an image, the encoder extracts a latent representation. The codebook then maps each latent vector representation to its nearest vector in the codebook. Finally, the decoder reconstructs the image from the codebook representation. VQ-VAE is trained with the objectives of reconstruction and codebook learning. VQ-GAN (Esser et al., 2021) adds an adversarial loss term that tries to determine whether the generated image is fake or real. This added term was shown to improve reconstruction quality.", + "bbox": [ + 169, + 103, + 826, + 229 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Transformer We follow Gu et al. (2021) and train a decoder-only Transformer. The decoder module contains 24 transformer blocks, each containing full attention, cross-attention for the concatenated conditioner, and a feed-forward network. The timestamp $n$ is injected using Adaptive Layer Normalization (Ba et al., 2016). The decoder contains 400 million parameters.", + "bbox": [ + 169, + 243, + 823, + 301 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Classifier-free guidance We sample our diffusion models using classifier-free guidance (CFG) (Ho & Salimans, 2021; Nichol et al., 2021; Ramesh et al., 2022). CFG is performed by extrapolating an unconditional sample in the direction of a conditional sample. To support unconditional sampling, previous work had to fine-tune (Nichol et al., 2021) their models with $20\\%$ of the conditional features nullified. This enabled them to sample unconditional images from the model using the null condition, $y' = \\vec{0}$ , the null vector. We found that we can generate unconditional samples from our model using null conditioning without fine-tuning it. We hypothesize that by conditioning the model on a null vector, the cross-attention component is also nullified, resulting in no contribution to the diffusion process. During inference, in each step of the diffusion process we generate two images: conditional image logits, $p_{\\theta}(x_{n-1}|x_n,y)$ , conditioned on the desired multi-modal embedding $y$ , and the unconditional image logits, $p_{\\theta}(x_{n-1}|x_n,y')$ , conditioned on the null embedding. Then, the final image for a diffusion step $n$ is sampled from", + "bbox": [ + 169, + 315, + 826, + 487 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y\\right) = p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y ^ {\\prime}\\right) + \\\\ \\lambda \\left(p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y\\right) - p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y ^ {\\prime}\\right)\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 492, + 684, + 529 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "where $\\lambda$ is a scale coefficient. In all of our experiments, we set $\\lambda = 8$ , which was found to yield the highest FID scores on the validation set. Note that the above extrapolation occurs directly on the logits output by $p_{\\theta}$ , in contrast to GLIDE (Nichol et al., 2021), which extrapolates the pixel values.", + "bbox": [ + 169, + 535, + 823, + 579 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Training Objective For completeness we are adding the training objective of the discrete model. The network is trained to minimize the variational lower bound (VLB):", + "bbox": [ + 169, + 592, + 823, + 622 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {v l b}} = \\mathcal {L} _ {0} + \\mathcal {L} _ {1} + \\dots + \\mathcal {L} _ {N - 1} + \\mathcal {L} _ {N}, \\\\ \\mathcal {L} _ {0} = - \\log p _ {\\theta} \\left(x _ {0} \\mid x _ {1}, f _ {i m g} (I), \\operatorname {k n n} _ {i m g} (\\mathrm {I}, k)\\right), \\\\ \\mathcal {L} _ {n - 1} = D _ {K L} \\left(q \\left(x _ {n - 1} \\mid x _ {n}, x _ {0}\\right) | | p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, f _ {i m g} (I), \\operatorname {k n n} _ {i m g} (\\mathrm {I}, k)\\right)\\right), \\tag {8} \\\\ \\mathcal {L} _ {N} = D _ {K L} \\left(q \\left(x _ {N} \\mid x _ {0}\\right) | | p \\left(x _ {N}\\right)\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 628, + 823, + 696 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Where $p(\\pmb{x}_N)$ is the prior distribution of timestep $N = 100$ , $f_{img}(I)$ is the CLIP image embedding, $\\mathrm{knn}_{img}(\\mathbf{I}, k)$ is the $k$ nearest neighbors in the feature space of the image embedding. The full details can be found in Gu et al. (2021).", + "bbox": [ + 169, + 704, + 823, + 748 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "6.8 CONTINUOUS KNN MODEL", + "text_level": 1, + "bbox": [ + 171, + 763, + 405, + 777 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We provide additional implementation details for the continuous diffusion model. Additional training details can be found in Tab. 3.", + "bbox": [ + 169, + 790, + 823, + 819 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Decoder. We followed (Nichol et al., 2021; Ho et al., 2020; Ramesh et al., 2022) and re-implemented a diffusion $U$ -net model. Specifically, we modify the architecture described in (Ramesh et al., 2022) by allowing multiple CLIP embeddings as the condition to the model. Since we do not have a paired text-image dataset, we removed the text transformer, and thus the text embedding. In particular, we use 512 convolution channels, 3 residual blocks, 64 heads channels and attention resolution of 32, 16 and 8. Similarly to our discrete model, we trained two models (1)", + "bbox": [ + 169, + 834, + 826, + 919 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/28755241c65e0e8da754a4827c3a420acf9c3c1a30d4e4d6dd8a1ada740b7f03.jpg", + "image_caption": [ + "(a) Training" + ], + "image_footnote": [], + "bbox": [ + 338, + 112, + 483, + 234 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/457362ea822a3328b76c5dadce48dfd63973fb77bd0f84acd1b58b9060785427.jpg", + "image_caption": [ + "(b) Inference", + "Figure 17: During training, only the image I is given (red), whereas during inference only the text $t$ is given (blue). In order to bridge the gap between the two distributions during training, we leverage the K nearest neighbors that should have a large enough distribution (dashed cone) to cover the potential text embedding (i.e. $\\cos(b) < \\cos(a)$ ). During inference, the opposite is applied." + ], + "image_footnote": [], + "bbox": [ + 513, + 112, + 660, + 234 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "a no-kNN conditioned only on CLIP image embedding during training, (2) a kNN conditioned on CLIP image embedding and its kNN. Finally, we enable classifier-free guidance by randomly setting the CLIP embeddings to zero $10\\%$ of the time. As demonstrated in Tab. 2, we find that humans prefer our model over no-kNN $66.8\\%$ of the time for image quality and $69.4\\%$ of the time for text alignment.", + "bbox": [ + 169, + 321, + 823, + 393 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Super-Resolution. As the decoder generates images with $64 \\times 64$ resolution, we up-sampled the images to $256 \\times 256$ using the open-source super resolution of (Nichol et al., 2021). To further up-sample the images to $512 \\times 512$ and $1024 \\times 1024$ we used the open-source super resolution provided by (Wang et al., 2021).", + "bbox": [ + 169, + 407, + 823, + 465 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Training Objectives For completeness we are adding the training objective of our continuous model. Following Ho et al. (2020); Nichol et al. (2021) we are using mean-squared error loss to predict the noise:", + "bbox": [ + 169, + 479, + 823, + 521 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\nL := E _ {n \\sim [ 1, N ], x _ {0} \\sim q (x _ {0}), \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I})} [ | | \\epsilon - \\epsilon_ {\\theta} (x _ {n}, n, y) | | ^ {2} ]\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 520, + 674, + 537 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where $\\epsilon_{\\theta}$ is a $U - net$ model and $y = (f_{img}(x_0),\\mathrm{knn}_{img}(x_0,k))$", + "bbox": [ + 171, + 540, + 607, + 558 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/5fff3164548c444e932c6588b815dd9522fe373b4be8b35c453e837eef6ca91d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DiscreteContinuous
Number of nearest neighbors1010
Diffusion steps1001000
Noise schedule-cosine
Sampling steps100250
Model size400M1B
Sampling variance method-analytic
Dropout-0.1
Weight decay4.5e-2-
Batch size5121600
Iterations150K500K
Learning rate4.05-41.4e-4
optimizerAdamWAdamW
Adam β20.960.9999
Adam ε1.0e-81.0e-8
EMA decay0.990.9999
warmup500025000
# GPUs128 A100200 A100
", + "bbox": [ + 308, + 564, + 687, + 808 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 3: Training details of our models", + "bbox": [ + 379, + 813, + 616, + 827 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1 Pseudo-code implementation for the construction of the retrieval model, training and sampling using conditioning kNN." + ], + "code_body": "Retrieval model construction \ndef training(batch:train image dataset): 1 //inverted index of 50k centroids, 2 //with optimized product quantization to 256B index_cfg $\\equiv$ \"OPQ256_IVF50000_PQ256x8\" 4 index $=$ faiss.indexFACTORY(d,idx_cfg,faiss.METRIC INNER_PRODUCT) 5 ivf $=$ faiss.extract_index_ivf(index) 6 clustering_index $=$ faiss.index_cpu_to_all_gpus(faiss.IndexFlatIP(d))7 ivf.clusterbing_index $=$ clustering_index 8 train_dataset $\\equiv$ [] for image in random.sample(batch,1000000): 10 train_dataset.append(CLIP_image_embedding(image)) 11 index.train(train_dataset) 12 for image in dataset: index.add(CLIP_imageEncoder(image)) 14 return index Training \ndef training(I:FAISS index, image, k:Number of NN, t:timestamp [0,T-1]): image_encoding $\\equiv$ CLIP_imageEncoder(image) 2 kNN $=$ I.search(image_encoding,k) condition $=$ concatenate([image_encoding,kNN]) 4 image_T $=$ add_noise(image,t) 5 image_0 $=$ diffusion_model(image_T,t,condition) loss $=$ criterion(imageO, image) 7 return loss 9 Sampling \ndef sampling(I:FAISS index,text,k:Number of NN): 1 text_encoding $\\equiv$ CLIP_textEncoder(text) 2 kNN $=$ I.search(text_encoding,k) condition $=$ concatenate([text_encoding,kNN]) 4 image $=$ sample_noise(T) for t in [T-1,T-2,...,0]: image $=$ diffusion_model(image,t,condition) return image 8", + "guess_lang": "csv", + "bbox": [ + 171, + 287, + 823, + 779 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "6.9 TEXT-ONLY IMAGE MANIPULATION", + "text_level": 1, + "bbox": [ + 171, + 104, + 460, + 118 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Our approach is illustrated in Fig. 18. Additional manipulation examples are provided in Figs. 20. The full comparison with the baselines is provided in Fig. 21 and 22. We also provide in Fig. 19 several examples for the process of the manipulated images construction.", + "bbox": [ + 169, + 128, + 826, + 174 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/fea036c0c8e5389608ac52d49721467649003b1a0a35755855ba57e202c713a4.jpg", + "image_caption": [ + "Figure 18: An illustration of our manipulation approach. During training: Given a training image (1), the model extracts its first nearest neighbor (2). Next, a random local area in the training image is selected (3), and the manipulated image is constructed by replacing the area with the corresponding nearest neighbor (4). The model then receives as input the manipulated image and the clip embedding of the local area that needs to be restored (5). During inference: Given an input image and a text query \"A face of a male child\", the model receives as input the image (4) and the clip embedding of the modifying text (5)." + ], + "image_footnote": [], + "bbox": [ + 173, + 191, + 825, + 465 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7794468ff681a8636985d6cb84fc05bddf2664f668a9bde3664e76197117ba7a.jpg", + "image_caption": [ + "Figure 19: Illustration of the manipulated image construction process during training. Given an original image, we select a random local area, and extract the first nearest neighbor (1-NN). Using ECC alignment, we align the nearest neighbor with the original image and replace the random local area with its corresponding nearest neighbor local area. The model then receives as input the manipulated image, together with the CLIP embedding of the local area, and tries to predict the original image." + ], + "image_footnote": [], + "bbox": [ + 173, + 328, + 830, + 619 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/64a5fed58572e933e1150d81175adf677378bf0bcf8c2b8208d133c439fb9bba.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 179, + 241, + 236, + 319 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/e57a90ca121854cb31db0c943a4187b5d80ce956d8c7666149a472f68afb6666.jpg", + "image_caption": [ + "Raising left hand" + ], + "image_footnote": [], + "bbox": [ + 287, + 243, + 352, + 318 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/26dd6911e16ef1181ae7585e4a7d16bad41cf406d412b5c6f77b269896702d26.jpg", + "image_caption": [ + "Raising hands" + ], + "image_footnote": [], + "bbox": [ + 357, + 243, + 433, + 319 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/2b420541183788d11f05a6dd90a27676dc433e5a8ae13ac62205ad1f6ab272bb.jpg", + "image_caption": [ + "Blue pants" + ], + "image_footnote": [], + "bbox": [ + 449, + 243, + 496, + 318 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/7b03a21491eb59ae39c6b88af308ccde062f214bbf50ac3e8838ba9cf165e0ac.jpg", + "image_caption": [ + "Black shirt" + ], + "image_footnote": [], + "bbox": [ + 522, + 243, + 566, + 318 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/6f17dfadca4040e3ae828e936f4f65e868855ec998f18f5759654998702905cc.jpg", + "image_caption": [ + "Holds a heart" + ], + "image_footnote": [], + "bbox": [ + 589, + 243, + 640, + 318 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/d635cdd4e77493bacbd9cd0b94c4f8abcece23f662a43aeb3c2b7170735a1fcc.jpg", + "image_caption": [ + "Princess" + ], + "image_footnote": [], + "bbox": [ + 666, + 243, + 712, + 318 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/eb8bcca698bf235668fac7b6d2090d32ead07adf165c7601b98c576956e8b2d6.jpg", + "image_caption": [ + "Sitting" + ], + "image_footnote": [], + "bbox": [ + 746, + 243, + 803, + 318 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/2ba6fa05045477cf601aedf5449da7f19fa33ee5d3ae03726fe1f5a06587b48c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 335, + 245, + 386 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/e04d91ae0078e3f13b04545dbd231fa9f18cad6ba2b68457a7651ac7a87d9c45.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 184, + 392, + 245, + 450 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/a808aed1c4541648fb3051088b24badca32837e34912c7130ece8d1e15959fec.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 179, + 457, + 243, + 510 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/51078dc7dc29b2d6fdeac341ddf5024c07b6f7122090b9e727a58e35c6e40d59.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 290, + 339, + 354, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/c4f07c90e6b9dfa4b319f10c60895cffc314e52bcbbfc0df21c1902a8ed03e18.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 290, + 393, + 349, + 450 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/53fadd6a3913e68819b93b806fd4aafeebcd3c0b345de3d315d8de372936b6df.jpg", + "image_caption": [ + "Smiling" + ], + "image_footnote": [], + "bbox": [ + 287, + 458, + 351, + 510 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/c01d5cdc11749aceb600474b790b227f0725639ed45e6cc3017b74e047d7f3a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 362, + 339, + 429, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/228c3853d508570273ac7e44aa73a0a7931b1c3e53d6b7c8642a5698f4437fd5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 393, + 421, + 450 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/de7e123758ffaa3c53c949f86ab501000b510c5a29ec9baa367ead431dbc1ace.jpg", + "image_caption": [ + "Angry" + ], + "image_footnote": [], + "bbox": [ + 362, + 458, + 426, + 510 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/f8084a2788067af8c2a6467de0967dce3dd260b6eaf65144a09153bc799d92eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 437, + 339, + 504, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/3390a19d095a77761a8cb62fe5351a40a125939fb240ae0313747de09776abcd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 439, + 393, + 496, + 450 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/7f5c9af1b8eb8e9a7401d597697e61a7c3c14a31597371d96db50a5b28172d6f.jpg", + "image_caption": [ + "Sad" + ], + "image_footnote": [], + "bbox": [ + 439, + 458, + 501, + 508 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/356e3c1782cb587c2a81187bd60ed235e0c78c24a06e04a2d004a20e977d7bfd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 339, + 578, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/e56d2185ea670ff29b649342f4e3bb5fc3df7f61ee6195b237e0c55fabe7d072.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 393, + 575, + 450 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/12c177a34c07bdea3fd1247b8831c6e6cc55dc26e9b21766e89323dd958d3187.jpg", + "image_caption": [ + "Surprised" + ], + "image_footnote": [], + "bbox": [ + 514, + 457, + 576, + 508 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/da607d1b8b77be7cb8507b68d659c13b7bffbc7cf157aa061ce8a308fdd211fd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 339, + 656, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/e674f7d82e7b9881aa9754566d6123277f2961053857202481e3bfb184c64aa5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 591, + 393, + 650, + 450 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/45586dd300b9af485b22e3f8adda77dfd5d45fc14a6a58bc8ab18172acee9c40.jpg", + "image_caption": [ + "With a tie" + ], + "image_footnote": [], + "bbox": [ + 591, + 455, + 653, + 508 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/2afe8cd0b79ec97e8f85acb7b54374dd5929d4d4af34a4e617a4ed08c4f69cc2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 339, + 730, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/33730992ca1160af4ce702c5a5155b30fe5bce51fa7a0165ff152335e2d98122.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 393, + 725, + 450 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/9c94b18e7cdc8231a1b1f8c79561ad3948e5f2c432cffee042beea77b341bb6b.jpg", + "image_caption": [ + "With a hat" + ], + "image_footnote": [], + "bbox": [ + 665, + 454, + 728, + 510 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/1cbd9b12615d0960973eb52c5db15b704c2e258420bc12b308b4eed00e16733d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 745, + 339, + 807, + 388 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/8e6e49973d317d28e398a63397441caec9b5c31b070429695becdb2767a11ad8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 393, + 803, + 450 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/c184bf010aed80b2b24f8e3ddfe26ebff667d3c8608b9c68cd8fd7a1f5c4e084.jpg", + "image_caption": [ + "Holds a heart" + ], + "image_footnote": [], + "bbox": [ + 740, + 457, + 803, + 510 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/5d64f882ac50029ec58d42f2227b06018dacc7147a2126af54a93db2e38abc4d.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 178, + 532, + 251, + 577 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/57dea729a4cf56b78ebebfafc8fb83f40b589b818cccf38268df7c5d52410621.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 194, + 603, + 235, + 648 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/d1f089e64c677195ce254b031d92751d7f5cfc53e5268353ddaa8943be339c0c.jpg", + "image_caption": [ + "Brown" + ], + "image_footnote": [], + "bbox": [ + 285, + 532, + 357, + 577 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/24417939aaec7b1106a20f9c8278eb671db242b002aca04ef883863b7ef10653.jpg", + "image_caption": [ + "With a bow tie" + ], + "image_footnote": [], + "bbox": [ + 299, + 603, + 341, + 648 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/4837c9c2816f13d2d82dfbe651897d639d5e0a71bb78e8dee928b12118b1d8ee.jpg", + "image_caption": [ + "Blue racing car" + ], + "image_footnote": [], + "bbox": [ + 361, + 532, + 433, + 577 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/95eecdebf2ef13c3d6d48d129bd43ff57aa8b103e3f0a62cb1a290bb6ce67f2c.jpg", + "image_caption": [ + "With a bow tie" + ], + "image_footnote": [], + "bbox": [ + 390, + 603, + 433, + 650 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/79a44f0cd5d2d845de178dd2725361391d7bac97323eb6c2fa3966b2720dddbd.jpg", + "image_caption": [ + "Flowers" + ], + "image_footnote": [], + "bbox": [ + 434, + 532, + 509, + 577 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/d14ca17d5966209bdf8569e00cf2a793237fa81ae1e59e73b8043fc6ef615f14.jpg", + "image_caption": [ + "Sleeping" + ], + "image_footnote": [], + "bbox": [ + 482, + 602, + 524, + 650 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/6c3892bd5c587c080a087740bf83d11cf32f933af17bcef12403504d87edb4b9.jpg", + "image_caption": [ + "Sketch" + ], + "image_footnote": [], + "bbox": [ + 511, + 532, + 581, + 577 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/37332cd45083ae44a77e2593bdde45ed6df852828bdec4173377205db5ea088e.jpg", + "image_caption": [ + "Yellow racing car" + ], + "image_footnote": [], + "bbox": [ + 591, + 532, + 658, + 577 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/a1ae83a724a796bc78a8c855c453a8dcad23a333050ea1cb03f64e12ce16eeb3.jpg", + "image_caption": [ + "Stars" + ], + "image_footnote": [], + "bbox": [ + 661, + 532, + 733, + 577 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/a8a608179b18282bbc4fca5c9bb644f944ce4ca3f45ff89e3ab2fbe287148ef4.jpg", + "image_caption": [ + "Colorful" + ], + "image_footnote": [], + "bbox": [ + 736, + 532, + 812, + 577 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/b4166392a635f5f7f216a72384752f389d3a06b775316e761058f1dba42c80e8.jpg", + "image_caption": [ + "Red lipstick" + ], + "image_footnote": [], + "bbox": [ + 756, + 602, + 799, + 650 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/ede45cf23c0b3b2a62df0c6f091a8150e32fabe93e54bbb16adf854188ad5c20.jpg", + "image_caption": [ + "original" + ], + "image_footnote": [], + "bbox": [ + 187, + 683, + 230, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/703d7ab071c3469d953fed3a7c3df1bb4d3f97a6a3dd4264c58b98d8b8fe7346.jpg", + "image_caption": [ + "Smiling" + ], + "image_footnote": [], + "bbox": [ + 248, + 683, + 287, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/55ec0acaf15c5fa5367499e659ee0a2012b4c1f3496f07c369b08b31b9476e4b.jpg", + "image_caption": [ + "Angry" + ], + "image_footnote": [], + "bbox": [ + 295, + 683, + 339, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/e2f145fec752ce2ff05c9f2c4eba6efb856fb1eb01a967b27cb16ba362757fcd.jpg", + "image_caption": [ + "Sad" + ], + "image_footnote": [], + "bbox": [ + 344, + 683, + 385, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/3d26da57aa83f13d6f387eede510aee64a557d7ae45ab901864b4de5244d1a67.jpg", + "image_caption": [ + "Surprised" + ], + "image_footnote": [], + "bbox": [ + 393, + 683, + 436, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/722ac0a79cffb8727a85cf1538422f947407deb086ea4b2dbb437f08b36e51cd.jpg", + "image_caption": [ + "original" + ], + "image_footnote": [], + "bbox": [ + 444, + 681, + 517, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/900cfe73d83b68ab5c0fcff3588924975096b6eb41af62a6c4a9c61ccbd0065c.jpg", + "image_caption": [ + "Smiling" + ], + "image_footnote": [], + "bbox": [ + 526, + 681, + 598, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/85a96b2379bf7659a6d05680deea7965c6c58ed8d3fc5a3bcf6778411e58cabc.jpg", + "image_caption": [ + "Angry", + "Figure 20: Additional manipulation examples, generated using our model." + ], + "image_footnote": [], + "bbox": [ + 602, + 681, + 673, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/8120f167a21959a387fc65ae4e86572cc95c25fc5882be8d8dbe3bdad25dd453.jpg", + "image_caption": [ + "Sad" + ], + "image_footnote": [], + "bbox": [ + 676, + 681, + 746, + 739 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/fa46d33e95e6782f1d76a59b8da109af927a82befa76182f0b8d4052d83a8b99.jpg", + "image_caption": [ + "Surprised" + ], + "image_footnote": [], + "bbox": [ + 751, + 681, + 820, + 739 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 948, + 509, + 959 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/7d95d4e0c853cf4b1f60bc54e5c072f28cc55ffbe3b127b5f23a52804fc085a3.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 181, + 193, + 251, + 253 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/276d498c8b35bd622999f93ede46c6376c1e6ae0da8f7f935808bb94ea8a51eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 290, + 167, + 370, + 229 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/2de808d68973efcac5e8a3354c748fe3ea790a0ff2c616c8134188b2d11eb240.jpg", + "image_caption": [ + "Angry" + ], + "image_footnote": [], + "bbox": [ + 297, + 231, + 364, + 292 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/a23da8b93796b929ea4d94a3e3c4c7f0a0203eea5feeb7e4931e834262e614c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 385, + 166, + 457, + 229 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/cbbc7e48e11b5be92f45ac8ec8f66a13132b18450abc71f359be38d493ec3399.jpg", + "image_caption": [ + "Sad" + ], + "image_footnote": [], + "bbox": [ + 387, + 231, + 455, + 292 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/1a86d5a590456c96c017085ebd4f3fc3a201a35b8daa14450e8ffe5ba8260b71.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 470, + 167, + 540, + 229 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/4ab3f295a539573f747f98eb8c9c8abaeaa1ef960ae8e4da6f69ab46df56a7db.jpg", + "image_caption": [ + "Surprised." + ], + "image_footnote": [], + "bbox": [ + 470, + 231, + 539, + 292 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/1ecfed423ad4a0f1d5f5984c6ae0fa6b17bcb68da8228962a3b2546dca0be1fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 555, + 167, + 625, + 229 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/b00237405d55caad0d1eb62c0ff1d4f6b998bab2700b88e7c84fbb0f42ff7cf4.jpg", + "image_caption": [ + "With a tie" + ], + "image_footnote": [], + "bbox": [ + 557, + 231, + 625, + 292 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/52a30c3fe41652e5819f5c675c01446102dc48b8a4d5410e03c5d730c6a2f8aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 167, + 712, + 229 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/e953285210bd6961f5a25692380777ea14643fef569c7cee8d1ad209cbc82d4b.jpg", + "image_caption": [ + "With a hat" + ], + "image_footnote": [], + "bbox": [ + 642, + 232, + 710, + 292 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/7c6416cacada0f363c74923d059d762f42f7fa81c8e6134ffcc49317e8dbd473.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 727, + 167, + 799, + 229 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/218f53aea5a76db39fd0d65aef00584e62e5e7912698f0a033e308a67ee0a04c.jpg", + "image_caption": [ + "Holds a heart" + ], + "image_footnote": [], + "bbox": [ + 728, + 231, + 799, + 292 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/b9f9bd911cb1957027d12e3b090af697c37b59181289ccb50f80cee31330b6b7.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 187, + 339, + 243, + 400 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/0cfe742020d4be9653c32c689fe0520e25d939459625bede671046f89193faf3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 316, + 361, + 381 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/57f9ece2d348e3204fd6423b8d20ec463ecc08ba92f76ce108540459d4ec61ff.jpg", + "image_caption": [ + "Red lipstick" + ], + "image_footnote": [], + "bbox": [ + 305, + 383, + 361, + 443 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/92e10a8d74f6c521624fcc74a4b8e28d1e8fb87030d1f10d60ccbb212dff6d92.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 393, + 316, + 450, + 380 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/7581b475d6dcf90da7bd6fa9413ede0b044f6863fa3d9e1a3695aa3f1af6d9fc.jpg", + "image_caption": [ + "Angry" + ], + "image_footnote": [], + "bbox": [ + 393, + 383, + 447, + 443 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/4a05875513c127161cf9b870d83e5153519cc47fd10c5bad13c89d1509883371.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 475, + 316, + 532, + 380 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/bf02d084bbbf77ba0be32d3434cbe7de31fec00e72c05e560dc3c0bca58b9883.jpg", + "image_caption": [ + "Sleeping" + ], + "image_footnote": [], + "bbox": [ + 477, + 383, + 532, + 443 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/7b9347cbe39d0d97c13139b3de17ec6df2e7bf64a5bab4002cf6d422cec9fc58.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 563, + 316, + 620, + 380 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/3aea532bfda61819fc360ccae368d08b872e40ac79f06476945ca16dbfd7f24a.jpg", + "image_caption": [ + "Surprised" + ], + "image_footnote": [], + "bbox": [ + 563, + 383, + 619, + 443 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/010ffae2b2f211937844d822b0babd84ef6efb410beaf8188198a6e13bfa9f20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 647, + 316, + 705, + 380 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/9f088aed1b7dc8ad66e9e39aba2e931fce0461cb5a475bedb0c96d115d629b41.jpg", + "image_caption": [ + "In love" + ], + "image_footnote": [], + "bbox": [ + 648, + 383, + 704, + 443 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/f2b19119c788128e29094df8d10d1a574724b25cfc486370a972379a55b18cd6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 733, + 316, + 799, + 380 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/2bfeef54da08f0178f117b2c56f5c9ecb12ef22da5eebaff8bf75642fb9dc6ae.jpg", + "image_caption": [ + "With a Bow tie" + ], + "image_footnote": [], + "bbox": [ + 733, + 383, + 792, + 444 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/3af46c536e3b680793c22d23188c24209d258d9174de5043a71e0944bf90f59d.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 194, + 505, + 241, + 585 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/d9910f99497bbc91d306d760fdbe1d4286f2d29eabcfdf2daa82e2b3b82e8f93.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 470, + 357, + 550 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/2516fe1d758138ca6bb47524f425cd2f624454987a08cfeb2199eb642b37f093.jpg", + "image_caption": [ + "Joker" + ], + "image_footnote": [], + "bbox": [ + 305, + 551, + 357, + 630 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/e675f956d2f59b26068cf4053927512bb30db85673e186c7a12d67b2aa355683.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 392, + 470, + 447, + 550 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/9bd3cefd9899741a597eee8a1fbfe3aea4effa552ae35e078e0563eaca248ec7.jpg", + "image_caption": [ + "Boxer" + ], + "image_footnote": [], + "bbox": [ + 390, + 551, + 447, + 628 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/05082b2adb69a6b6bb88a98db7b6186644cc6b085e777bbf10603e660da1dbc3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 478, + 470, + 529, + 550 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/84636665fb3b5f8cd3de3e0cd6c743a7fd63ab3d0fccf99f86f805f718aa2284.jpg", + "image_caption": [ + "Ghost" + ], + "image_footnote": [], + "bbox": [ + 480, + 551, + 531, + 628 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/83c902450a22854385c5e31818e745351f4b7f6d3ff2ecd279b78de0111227cb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 568, + 470, + 620, + 550 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/3555ccd6a6084dd2951bc0305422424cd0ac83f7bc2f4213d1d265ae66fa817a.jpg", + "image_caption": [ + "Rainbow" + ], + "image_footnote": [], + "bbox": [ + 573, + 551, + 619, + 628 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/463d9661aa179622d18b18b9f040838b35ba40015ca0ba07e7b9d51b07fb2a3a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 648, + 470, + 712, + 550 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/adb0e20bacd650f456bc9f50ed5b3f9b0dc17be6f324a9ba4410ad16529d8cda.jpg", + "image_caption": [ + "Devil" + ], + "image_footnote": [], + "bbox": [ + 647, + 551, + 712, + 628 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/a922d1b2d8aba963c891aba744ba149a73ef6413f15f71d7ba702a0e4ec0843f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 736, + 470, + 799, + 550 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/fc03d2f31ac24839c568e5a97761c70f4b4c4f863b8cb8450d39bf5c5bfe19a7.jpg", + "image_caption": [ + "Angel" + ], + "image_footnote": [], + "bbox": [ + 730, + 551, + 800, + 628 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/b909c31fdb8750d6c6c1777c8da501ecd514a6ddd05de77ed03864a01461fcd3.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 187, + 681, + 245, + 765 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/b453684b49416b7a2aefa7c7f21523a0a83a79df0ad163bca8948a6e9ca8e0d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 303, + 654, + 366, + 718 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/f702c2d2c84b64f1d4a2038431cd3c5ba4b64f9b78a19c4f6dbe61f7f9970649.jpg", + "image_caption": [ + "Raising left hand", + "Figure 21: comparison to Text2LIVE (Bar-Tal et al., 2022). For each input image, the bottom row corresponds to images generated by our model, and the top row corresponds to images generated by the Text2LIVE model." + ], + "image_footnote": [], + "bbox": [ + 305, + 719, + 361, + 795 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/6959308c2bca6de1ceebd4ea567de01285b1bd9eae92ebc5b6dc1ac669633b74.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 392, + 654, + 446, + 718 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/ed2f83303f87a785d390431fdb2a8a17fe45b8d0dbf384b82c9494cf133d45c9.jpg", + "image_caption": [ + "Blue pants" + ], + "image_footnote": [], + "bbox": [ + 390, + 720, + 446, + 795 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/631278d8432b0e6a938afc3655a8e35a2050340fd294be5ab296d3eb2285664e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 473, + 654, + 529, + 718 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/abba2c262cc298f2c7009964db6cff36e1dda32deede4b015d15fd87906e22ad.jpg", + "image_caption": [ + "Blackshirt" + ], + "image_footnote": [], + "bbox": [ + 473, + 719, + 529, + 795 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/880873679cefc93e2480f07e3ea687bb537cdfe36b3d5519d5a94eb58df46bae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 549, + 654, + 642, + 718 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/7d9b86ff58d570bb7971130f3d6d15e0cfe62217aa7e258105121a9df795f7d0.jpg", + "image_caption": [ + "Sitting" + ], + "image_footnote": [], + "bbox": [ + 558, + 719, + 617, + 795 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/71a302b0d07a62c85ac84b659375fbb915f4c6c3d1a14ba06a861b30fc981690.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 654, + 712, + 718 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/3778a814044c9c8d6383536ea1a436a1275efe16261aa768e161674319be9f1b.jpg", + "image_caption": [ + "With a tie" + ], + "image_footnote": [], + "bbox": [ + 658, + 719, + 710, + 795 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/326f61c25b54aa3b466733c3edac9ae344ec59d7b178baa594395f55321ff85a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 738, + 654, + 800, + 718 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/0917f7eaf2d8680964deb102d161e938ba8209850f1de97bf9e2e327de708bdb.jpg", + "image_caption": [ + "Holds a heart" + ], + "image_footnote": [], + "bbox": [ + 738, + 719, + 800, + 795 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/9d4ae41908158cd6c87ece00b8dda5786c6a627cbf87f0c8dbd3973e8a335396.jpg", + "image_caption": [ + "Figure 22: comparison to Textual Inversion (Gal et al., 2022). For each input image, the bottom row corresponds to images generated by our model, and the top row corresponds to images generated by the Textual Inversion model." + ], + "image_footnote": [], + "bbox": [ + 176, + 148, + 821, + 824 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 27 + } +] \ No newline at end of file diff --git a/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_model.json b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_model.json new file mode 100644 index 0000000000000000000000000000000000000000..09639cfc8868ec16501ec1859d71a69002abb655 --- /dev/null +++ b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_model.json @@ -0,0 +1,7230 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.147 + ], + "angle": 0, + "content": "KNN-DIFFUSION: IMAGE GENERATION VIA LARGE-SCALE RETRIEVAL" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.17, + 0.696, + 0.2 + ], + "angle": 0, + "content": "Shelly Sheynin*, Oron Ashual*, Adam Polyak, Uriel Singer, Oran Gafni, Eliya Nachmani, Yaniv Taigman" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.2, + 0.51, + 0.213 + ], + "angle": 0, + "content": "*Equal Contribution Meta AI" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.214, + 0.443, + 0.227 + ], + "angle": 0, + "content": "{shellysheynin, oron}@meta.com" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.258, + 0.819, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.466, + 0.825, + 0.493 + ], + "angle": 0, + "content": "Figure 1: (a) Samples of stickers generated from text inputs, (b) Semantic text-guided manipulations applied to the \"Original\" image without using edit masks." + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.505, + 0.547, + 0.519 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.535, + 0.768, + 0.73 + ], + "angle": 0, + "content": "Recent text-to-image models have achieved impressive results. However, since they require large-scale datasets of text-image pairs, it is impractical to train them on new domains where data is scarce or not labeled. In this work, we propose using large-scale retrieval methods, in particular, efficient \\( k \\)-Nearest-Neighbors (kNN), which offers novel capabilities: (1) training a substantially small and efficient text-to-image diffusion model using only pre-trained multi-modal embeddings, but without an explicit text-image dataset, (2) generating out-of-distribution images by simply swapping the retrieval database at inference time, and (3) performing text-driven local semantic manipulations while preserving object identity. To demonstrate the robustness of our method, we apply our kNN approach on two state-of-the-art diffusion backbones, and show results on several different datasets. As evaluated by human studies and automatic metrics, our method achieves state-of-the-art results compared to existing approaches that train text-to-image generation models using images-only dataset." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.755, + 0.338, + 0.77 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Large-scale generative models have been applied successfully to image generation tasks (Gafni et al., 2022; Ramesh et al., 2021; Nichol et al., 2021; Sahara et al., 2022; Yu et al., 2022), and have shown outstanding capabilities in extending human creativity using editing and user control. However, these models face several significant challenges: (i) Large-scale paired data requirement. To achieve high-quality results, text-to-image models rely heavily on large-scale datasets of (text, image) pairs collected from the internet. Due to the requirement of paired data, these models cannot be applied to new or customized domains with only unannotated images. (ii) Computational cost and efficiency. Training these models on highly complex distributions of natural images usually requires scaling the size of the model, data, batch-size, and training time, which makes them challenging to train and less accessible to the community. Recently, several works proposed text-to-image models" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.177 + ], + "angle": 0, + "content": "trained without an explicit paired text-image datasets. Liu et al. (2021) performed a direct optimization to a pre-trained model based on a CLIP loss (Radford et al., 2021). Such approaches are time-consuming, since they require optimization for each input. Zhou et al. (2021) proposed training with CLIP image embedding perturbed with Gaussian noise. However, to achieve high-quality results, an additional model needs to be trained with an annotated text-image pairs dataset." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.181, + 0.827, + 0.336 + ], + "angle": 0, + "content": "In this work, we introduce a novel generative model, kNN-Diffusion, which tackles these issues and progresses towards more accessible models for the research community and other users. Our model leverages a large-scale retrieval method, \\(k\\)-Nearest-Neighbors (kNN) search, in order to train the model without an explicit text-image dataset. Specifically, our diffusion model is conditioned on two inputs: (1) image embedding (at training time) or text embedding (at inference), extracted using pre-trained CLIP encoder, and (2) kNN embeddings, representing the \\(k\\) most similar images in the CLIP latent space. During training, we assume that no paired text is available, hence condition only on CLIP image embedding and on \\(k\\) additional image embeddings, selected using the retrieval model. At inference, only text inputs are given, so instead of image embeddings, we use the text embedding that shares a joint embedding space with the image embeddings. Here, the kNN image embeddings are retrieved using the text embeddings." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.34, + 0.828, + 0.441 + ], + "angle": 0, + "content": "The additional kNN embeddings have three main benefits: (1) they extend the distribution of conditioning embeddings and ensure the distribution is similar in train and inference, thus helping to bridge the gap between the image and text embedding distributions (see Fig. 5); (2) they teach the model to learn to generate images from a target distribution by using samples from that distribution. This allows generalizing to different distributions at test time and generating out-of-distribution samples; (3) they hold information that does not need to be present in the model, which allows it to be substantially smaller. We demonstrate the effectiveness of our kNN approach in Sec. 4." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.445, + 0.827, + 0.571 + ], + "angle": 0, + "content": "To assess the performance of our method, we train our model on two large-scale datasets: the Public Multimodal Dataset (Singh et al., 2021) and an image-only stickers dataset collected from the Internet. We show state-of-the-art zero-shot results on MS-COCO (Lin et al., 2014), LN-COCO (Pont-Tuset et al., 2020) and CUB (Wah et al., 2011). To further demonstrate the advantage of retrieval methods in text-to-image generation, we train two diffusion backbones using our kNN approach: continuous (Ramesh et al., 2022) and discrete (Gu et al., 2021). In both cases we outperform the model trained without kNN. In comparison to alternative methods presented in Sec. 4, we achieve state-of-the-art results in both human evaluations and FID score, with only 400 million parameters and 7 seconds inference time." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.577, + 0.827, + 0.706 + ], + "angle": 0, + "content": "Lastly, we introduce a new approach for local and semantic manipulations that is based on CLIP and kNN, without relying on user-provided masks. Specifically, we fine-tune our model to perform local and complex modifications that satisfies a given target text prompt. For example, given the teddy bear's image in Fig. 4, and the target text \"holds a heart\", our method automatically locates the local region that should be modified and synthesizes a high-resolution manipulated image in which (1) the teddy bear's identity is accurately preserved and (2) the manipulation is aligned with the target text. We demonstrate our qualitative advantage by comparing our results with two state-of-the-art models, Text2Live (Bar-Tal et al., 2022) and Textual Inversion (Gal et al., 2022), that perform image manipulations without masks (Fig. 4, 21 and 22)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.709, + 0.827, + 0.81 + ], + "angle": 0, + "content": "We summarize the contributions of this paper as follows: (1) We propose kNN-Diffusion, a novel and efficient model that utilizes a large-scale retrieval method for training a text-to-image model with only pre-trained multi-modal embeddings, but without an explicit text-image dataset. (2) We demonstrate efficient out-of-distribution generation, which is achieved by substituting retrieval databases. (3) We present a new approach for local and semantic image manipulation, without utilizing masks. (4) We evaluate our method on two diffusion backbones, discrete and continuous, as well as on several datasets, and present state-of-the-art results compared to baselines." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.827, + 0.348, + 0.843 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.863, + 0.825, + 0.921 + ], + "angle": 0, + "content": "Text-to-image models. Text-to-image generation is a well-studied task that focuses on generating images from text descriptions. While GANs (Xu et al., 2018; Zhu et al., 2019; Zhang et al., 2021) and Transformer-based methods (Ramesh et al., 2021; Gafni et al., 2022; Yu et al., 2022; Ding et al., 2021) have shown remarkable results, recently impressive results have been attained with dis" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.102, + 0.803, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.401, + 0.825, + 0.416 + ], + "angle": 0, + "content": "Figure 2: Qualitative comparisons with baselines. Nearest Neighbor is the first kNN of the text in PMD dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.424, + 0.825, + 0.648 + ], + "angle": 0, + "content": "crete (Gu et al., 2021) and continuous (Nichol et al., 2021; Sahara et al., 2022; Ramesh et al., 2022; Rombach et al., 2022) diffusion models. Most recent works trained diffusion models conditioned on text embeddings extracted using a pre-trained text encoder (Saharia et al., 2022; Yu et al., 2022) or image embedding extracted using CLIP (Ramesh et al., 2022). While producing impressive results, all previous works described above are supervised and trained with paired text-image datasets. Several works have proposed training text-to-image models without an explicit text-image dataset. FuseDream (Liu et al., 2021) proposed a direct optimization to a pre-trained generative model based on CLIP loss. This method relies on a pre-trained GAN and requires a time-consuming optimization process for each image. LAFITE (Zhou et al., 2021) recently demonstrated text-to-image generation results without requiring paired text-image datasets. Here, the CLIP embeddings are used interchangeably at train and test to condition a GAN-based model. The joint text-image embedding enables inference given a text input, whereas in training the model is fed with the visual embedding only. However, the gap between the text and image distributions in the joint embeddings space leads to results with substantially lower quality, as we show in our experiments. To overcome this gap, LAFITE added noise to the image embeddings during training. Our remedy to this gap is to condition the model on the retrieval of an actual image embeddings, using a text-image joint space." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.827, + 0.922 + ], + "angle": 0, + "content": "Retrieval for generation. The Information Retrieval (IR) literature tackles the challenge of retrieving a small amount of information from a large database, given a user's query. A simple, yet efficient retrieval mechanism is to retrieve the \\( K \\) nearest neighbors (kNN) between the query and the entities in the database in some pre-calculated embedding space (Bijalwan et al., 2014). The database allows the model to leverage extensive world-knowledge for its specific task Borgeaud et al. (2021). Recently, language models were augmented with a memory component, allowing them to store representations of past inputs (Wu et al., 2022). The latter were then queried using a lookup operation, improving performance in various benchmarks and tasks. Retrieval models have been used for various tasks in learning problems, for example, language modeling (Borgeaud et al., 2021), machine translation (Gu et al., 2018), question answering (Lee et al., 2019) and image generation (Tseng et al., 2020; Qi et al., 2018). RetrieveGAN (Tseng et al., 2020) uses a differentiable retrieval module for image generation from a scene description, RetrieveFuse (Siddiqui et al., 2021) proposed a neural 3D scene reconstruction based on a retrieval system. SIMS (Qi et al., 2018) proposed generating an image using semantic layout and compatible image segments that are retrieved from image segments database, and (Iskakov, 2018) showed that the use of retrieval database in inpainting task significantly boosts visual quality. In this work we utilize the kNN retrieval mechanism over the shared text-image embedding space, CLIP (Radford et al., 2021). Using extensive ablation studies, we show the importance of the retrieval model both for training and inference, and demonstrate its large impact on performance. kNN-Diffusion significantly outperforms prior work" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.104, + 0.791, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.284, + 0.825, + 0.351 + ], + "angle": 0, + "content": "Figure 3: The overall framework of our kNN-Diffusion model. In both training and inference, the decoder is conditioned on CLIP embedding, and kNN image embeddings. During training, we condition the model on image CLIP embedding, and its kNN image embeddings extracted using the retrieval method. At inference time, given an input text, the kNN image embeddings are retrieved based on the CLIP text embedding that shares a joint embedding space with the image embedding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.366, + 0.825, + 0.396 + ], + "angle": 0, + "content": "with zero-shot FID of 12.5, including RDM (Blattmann et al., 2022)(with FID of 22.1), a concurrent work which similarly to our approach, proposes conditioning LDM (Rombach et al., 2022) on kNN." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.405, + 0.827, + 0.558 + ], + "angle": 0, + "content": "Multi-modal feature learning. Learning a joint and aligned feature space for several modalities is challenging, as it requires alignment between the modalities (paired datasets), whose distributions may vary. Specifically, the joint feature space of vision-and-language has been a long-standing problem. CLIP (Radford et al., 2021) successfully tackled this by leveraging contrastive learning over a large dataset of text-image pairs. BLIP (Li et al., 2022), (Mu et al., 2021) and FLAVA (Singh et al., 2021), followed this idea and further improved the joint representation. The joint representation was shown to hold a strong semantic alignment between the two modalities, enabling image generation (Liu et al., 2021; Wang et al., 2022), image manipulation (Patashnik et al., 2021; Avrahami et al., 2022b), and image captioning (Mokady et al., 2021). In this work we leverage the joint representation in two ways: (i) enabling textless training with only visual data, while using text at inference time, and (ii) creating an efficient embedding space for the use of the retrieval model." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.578, + 0.285, + 0.593 + ], + "angle": 0, + "content": "3 METHOD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.608, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Our main goal is to facilitate language-guided generation of user-specified concepts while using an images-only dataset during training. A possible way to achieve this goal is to use a shared text-image encoder that will map text-image pairs into the same latent space, thus allowing training with an image embedding, and inferring from text embedding. A candidate for this encoder is CLIP, which has been trained with a contrastive loss on a large-scale dataset of text-image pairs. However, as we show quantitatively in Tab. 1, 2 and qualitatively in Fig. 15, 16, 5, CLIP embeddings alone cannot accurately bridge the gap between the text and image distributions. In order to reduce this gap, several methods have been proposed. The closest work to ours is LAFITE, which perturbs the CLIP image embedding with adaptive Gaussian noise. Under the assumption that there is a large paired text-image dataset, Ramesh et al. (2022) have proposed a prior that is used during inference, and is trained to generate possible CLIP image embeddings from a given text caption. In this regard, we propose using a large-scale and non-trainable image embedding index as an integral part of the diffusion process. Our method, kNN-Diffusion, assumes that only image data and a pre-trained multi-modal text-image encoder are provided during training. As shown in Fig. 3, our model is comprised of three main components: (1) A multi-modal text-image encoder (CLIP); (2) A retrieval model - A data structure containing image embeddings, which is indexed for a fast kNN search; (3) An image generation network - A trainable diffusion-based image generation model, conditioned on the projected retrievals. For both training and inference, the image generation network is conditioned on \\( K \\) additional image embeddings, chosen using the retrieval model to ensure a similar distribution of the condition in training and inference. The following sections describe these components." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Retrieval model. Our retrieval model has three non-trainable modules: a pre-trained text encoder \\( f_{txt} \\) (CLIP text encoder), a pre-trained image encoder \\( f_{img} \\) (CLIP image encoder) and" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.117, + 0.255, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.183, + 0.25, + 0.191 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.192, + 0.258, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.258, + 0.25, + 0.266 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.206, + 0.269, + 0.26, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.321, + 0.25, + 0.33 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.118, + 0.326, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.292, + 0.183, + 0.315, + 0.191 + ], + "angle": 0, + "content": "Joker" + }, + { + "type": "image", + "bbox": [ + 0.279, + 0.192, + 0.329, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.282, + 0.258, + 0.325, + 0.266 + ], + "angle": 0, + "content": "Black shirt" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.271, + 0.331, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.282, + 0.321, + 0.326, + 0.33 + ], + "angle": 0, + "content": "With a hat" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.118, + 0.387, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.349, + 0.183, + 0.374, + 0.191 + ], + "angle": 0, + "content": "Boxer" + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.192, + 0.387, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.342, + 0.258, + 0.385, + 0.266 + ], + "angle": 0, + "content": "Blue pants" + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.271, + 0.388, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.356, + 0.321, + 0.375, + 0.33 + ], + "angle": 0, + "content": "Sad" + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.118, + 0.457, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.416, + 0.183, + 0.443, + 0.191 + ], + "angle": 0, + "content": "Angel" + }, + { + "type": "image", + "bbox": [ + 0.402, + 0.192, + 0.454, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.403, + 0.258, + 0.456, + 0.266 + ], + "angle": 0, + "content": "Holds a heart" + }, + { + "type": "image", + "bbox": [ + 0.402, + 0.271, + 0.456, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.41, + 0.321, + 0.45, + 0.33 + ], + "angle": 0, + "content": "Surprised" + }, + { + "type": "image_caption", + "bbox": [ + 0.461, + 0.106, + 0.481, + 0.115 + ], + "angle": 0, + "content": "Durs" + }, + { + "type": "image", + "bbox": [ + 0.468, + 0.118, + 0.505, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.469, + 0.183, + 0.505, + 0.191 + ], + "angle": 0, + "content": "Rainbow" + }, + { + "type": "image", + "bbox": [ + 0.462, + 0.192, + 0.508, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.471, + 0.258, + 0.5, + 0.267 + ], + "angle": 0, + "content": "Sitting" + }, + { + "type": "image", + "bbox": [ + 0.462, + 0.271, + 0.513, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.473, + 0.32, + 0.499, + 0.33 + ], + "angle": 0, + "content": "Angry" + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.118, + 0.574, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.536, + 0.183, + 0.558, + 0.191 + ], + "angle": 0, + "content": "Devil" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.192, + 0.57, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.526, + 0.258, + 0.568, + 0.266 + ], + "angle": 0, + "content": "With a tie" + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.271, + 0.574, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.521, + 0.32, + 0.575, + 0.329 + ], + "angle": 0, + "content": "Holds a heart" + }, + { + "type": "image", + "bbox": [ + 0.583, + 0.118, + 0.627, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.594, + 0.183, + 0.618, + 0.191 + ], + "angle": 0, + "content": "Ghost" + }, + { + "type": "image", + "bbox": [ + 0.583, + 0.192, + 0.631, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.574, + 0.258, + 0.64, + 0.266 + ], + "angle": 0, + "content": "Raising left hand" + }, + { + "type": "image", + "bbox": [ + 0.581, + 0.271, + 0.634, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.588, + 0.32, + 0.626, + 0.329 + ], + "angle": 0, + "content": "With a tie" + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.106, + 0.706, + 0.115 + ], + "angle": 0, + "content": "Text2LIVE" + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.118, + 0.72, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.67, + 0.183, + 0.695, + 0.191 + ], + "angle": 0, + "content": "Ghost" + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.192, + 0.71, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.65, + 0.258, + 0.715, + 0.266 + ], + "angle": 0, + "content": "Raising left hand" + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.269, + 0.712, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.662, + 0.32, + 0.702, + 0.329 + ], + "angle": 0, + "content": "With a tie" + }, + { + "type": "image_caption", + "bbox": [ + 0.72, + 0.106, + 0.793, + 0.115 + ], + "angle": 0, + "content": "Textual Inversion" + }, + { + "type": "image", + "bbox": [ + 0.738, + 0.118, + 0.778, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.745, + 0.183, + 0.771, + 0.191 + ], + "angle": 0, + "content": "Ghost" + }, + { + "type": "image", + "bbox": [ + 0.733, + 0.192, + 0.779, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.724, + 0.258, + 0.79, + 0.266 + ], + "angle": 0, + "content": "Raising left hand" + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.271, + 0.79, + 0.318 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.737, + 0.32, + 0.776, + 0.329 + ], + "angle": 0, + "content": "With a tie" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.346, + 0.825, + 0.386 + ], + "angle": 0, + "content": "Figure 4: Results for text-guided image manipulations without using masks. The original image is shown in the left column, our manipulated images are shown in the center. The images of Bar-Tal et al. (2022); Gal et al. (2022) were generated using the authors' official code. The full comparison is available in the supplement." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.398, + 0.825, + 0.599 + ], + "angle": 0, + "content": "an index \\(\\mathcal{H}\\). The encoders map text descriptions and image samples to a joint multi-modal \\(d\\)-dimensional feature space \\(\\mathbb{R}^d\\). The index stores an efficient representation of the images database - \\(\\mathcal{H} := \\{f_{img}(i) \\in \\mathbb{R}^d | i \\in \\mathcal{I}\\}\\) where \\(\\mathcal{I}\\) denotes the dataset of images. During training, we use the index to efficiently extract the \\(k\\) nearest neighbors in the feature space of the image embedding \\(f_{img}(\\mathbf{I}) \\in \\mathbb{R}^d - \\mathrm{knn}_{img}(\\mathbf{I}, k) := \\arg \\min_{h \\in \\mathcal{H}}^k \\mathbf{s}(f_{img}(\\mathbf{I}), h)\\) where \\(\\mathbf{s}\\) is a distance function and \\(\\arg \\min_k^k\\) output the minimal \\(k\\) elements. The set \\(\\{f_{img}(\\mathbf{I}), \\mathrm{knn}_{img}(\\mathbf{I}, k)\\}\\) is used as the condition to the generative model. During inference, given a query text \\(t\\), an embedding \\(f_{txt}(t)\\) is extracted. The generative model is conditioned on this embedding and its \\(k\\) nearest neighbors from the database - \\(\\mathrm{knn}_{txt}(t, k) := \\arg \\min_{h \\in \\mathcal{H}}^k \\mathbf{s}(f_{txt}(t), h)\\). During training, we add embeddings of real images, by applying the retrieval method to the input image embedding. The extracted kNN should have a large enough distribution to cover the potential text embedding. During inference, the kNN are retrieved using the text embedding (See Fig. 17). In all of our experiments we use the cosine similarity metric as the distance function \\(\\mathbf{s}\\), \\(k = 10\\) for the number of nearest neighbors and \\(d = 512\\). The full implementation details can be found in Sec. 6.6 in the supplement." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.609, + 0.825, + 0.734 + ], + "angle": 0, + "content": "Image generation network. In order to demonstrate the robustness of our method, we apply our kNN approach on two different diffusion backbones: Discrete (Gu et al., 2021) and Continuous (Nichol et al., 2021; Sohl-Dickstein et al., 2015; Ho et al., 2020; Dhariwal & Nichol, 2021). Although very different in practice, these models share the same theoretical idea. Let \\( x_0 \\sim q(x_0) \\) be a sample from our images distribution. A forward diffusion process is a Markov chain that adds noise at each step \\( q(x_n|x_{n-1}) \\). The reverse process, \\( p_\\theta(x_{n-1}|x_n,x_0) \\), is a denoising process that removes noise from an initialized noise state. At inference time, the model can generate an output, starting with noise and gradually removing it using \\( p_\\theta \\). For additional background on diffusion models please refer to Sec. 6.1 in the supplement." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.734, + 0.825, + 0.833 + ], + "angle": 0, + "content": "In the discrete diffusion model, \\( q(x_{n}|x_{n - 1})\\coloneqq v^{T}(x_{n})\\mathbf{Q}_{n}v(x_{n - 1}) \\) where \\( v(x_{n}) \\) is a one-hot vector with entry 1 at \\( x_{n} \\), and \\( \\mathbf{Q}_n \\) is a transition matrix, modeling the probability to move from state \\( x_{n - 1} \\) to \\( x_{n} \\), using uniform probability over the vocabulary and a pre-defined probability for additional special [MASK] token. We can compute the reverse transition distribution according to: \\( p_{\\theta}(x_{n - 1}|x_n,y)\\coloneqq \\sum_{\\hat{x}_0 = 1}^k q(x_{n - 1}|x_n,\\hat{x_0})p_\\theta (\\hat{x_0} |x_n,x_0,y) \\) where \\( x_0 \\) is a discrete vector, tokenized by the VQGAN (Esser et al., 2021) encoder and \\( y \\) is the conditioning signal. For modeling \\( p_{\\theta} \\) we have followed (Gu et al., 2021) and used a conditional Transformer (Vaswani et al., 2017)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.904 + ], + "angle": 0, + "content": "In the continuous diffusion model, \\( q(x_{n}|x_{n - 1}) \\coloneqq \\mathcal{N}(x_{n};\\sqrt{\\alpha_{t}} x_{n - 1},(1 - \\alpha_{n})x_{0}) \\) and \\( p_{\\theta}(x_{n - 1}|x_n,y)\\coloneqq \\mathcal{N}(\\mu_\\theta (x_n,y),\\Sigma_\\theta (x_n,y)) \\). Here, the noise function is Gaussian noise. Following (Ho et al., 2020; Nichol et al., 2021) we trained a model \\( \\epsilon_{\\theta} \\) to predict the added noise using a standard mean-squared error loss: \\( L\\coloneqq E_{n\\sim [1,N],x_0\\sim q(x_0),\\epsilon \\sim \\mathcal{N}(0,\\mathbf{I})}[||\\epsilon -\\epsilon_{\\theta}(x_n,n,y)||^2] \\) where \\( \\epsilon_{\\theta} \\) is a U-net model and \\( y \\) is the conditioning signal." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.106, + 0.805, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.233, + 0.825, + 0.272 + ], + "angle": 0, + "content": "Figure 5: tSNE visualization of 500 random text-image CLIP embeddings pairs taken from COCO validation. The leftmost figure demonstrates the gap between the text and image distributions. By gradually adding kNN to the mean CLIP embedding of the text, the gap decreases, demonstrating the importance of the kNN." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.299, + 0.462, + 0.419 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.429, + 0.462, + 0.467 + ], + "angle": 0, + "content": "Figure 6: FID on MS-COCO, including models trained on image-only datasets and text-image datasets." + }, + { + "type": "table_caption", + "bbox": [ + 0.475, + 0.279, + 0.832, + 0.343 + ], + "angle": 0, + "content": "Table 1: Results for zero-shot Text-to-Image generation on the MS-COCO, CUB and LN-COCO test sets. Image-quality and Text-alignment report the percentage of majority human raters votes in favor of our method when comparing between a certain model and ours." + }, + { + "type": "table", + "bbox": [ + 0.484, + 0.355, + 0.839, + 0.457 + ], + "angle": 0, + "content": "
ModelMS-COCOCUBLN-COCO
FID↓Im. qual.Txt align.FID↓Im. qual.Txt align.FID↓Im. qual.Txt align.
LAFITE26.972.165.389.774.059.642.868.461.9
FuseDream21.264.079.350.279.160.937.571.159.0
no-kNN32.870.868.395.181.061.265.061.459.8
Ours12.5--42.9--35.6--
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.489, + 0.825, + 0.613 + ], + "angle": 0, + "content": "In both cases, we condition our model on \\( y = (f_{img}(x_0), \\mathrm{knn}_{img}(x_0, k)) \\) where \\( f_{img}(x_0) \\) is the CLIP image embedding, \\( \\mathrm{knn}_{img}(x_0, k) \\) is the \\( k \\) nearest neighbors in the feature space of the image embedding. Following (Ramesh et al., 2022; Rombach et al., 2022) conditional injection, we condition our model on the image CLIP embedding, and the kNN clip embeddings by applying cross attention in the attention layers of the architecture. We sample both our models using Classifier Free Guidance (CFG) (Nichol et al., 2021; Ho & Salimans, 2021). Since CFG was originally proposed for continuous models, we propose a method for using it with discrete models as well. Full implementation details of the discrete and continuous models can be found in Sec. 6.7 and Sec. 6.8, respectively, in the supplement." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.632, + 0.46, + 0.644 + ], + "angle": 0, + "content": "3.1 TEXT-ONLY IMAGE MANIPULATION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.653, + 0.825, + 0.752 + ], + "angle": 0, + "content": "The majority of previous works in the task of image manipulation either rely on user-provided masks (Nichol et al., 2021; Avrahami et al., 2022b;a), or are limited to global editing (Crowson et al., 2022; Kim et al., 2022). Recently, several works (Bar-Tal et al., 2022; Hertz et al., 2022; Gal et al., 2022) have made progress with local manipulations without relying on user edited masks. Nevertheless, most of the techniques suffer from several shortcomings: (1) They enable local texture changes, yet cannot modify complex structures, (2) they struggle to preserve the identity of the object, for example, when manipulating humans, (3) they require optimization for each input." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.757, + 0.827, + 0.925 + ], + "angle": 0, + "content": "We address these issues by extending kNN-Diffusion to perform local and semantic-aware image manipulations without any provided mask. Illustration of the approach is provided in Fig. 18 and Fig. 19 in the supplement. For this task, the model is trained to predict the original image from a manipulated version. Specifically, we create a manipulated version of the image, which differs from the original image only in some local area. Given a random local area \\( M \\) in the image I, the manipulated image \\( \\mathrm{I}_{\\text{manip}} \\) is constructed by replacing the area with the corresponding nearest neighbor: \\( \\mathrm{I}_{\\text{manip}} = \\mathrm{I} \\cdot (1 - M) + \\mathrm{nn}_{img}(\\mathrm{I}, 1) \\cdot M \\), where \\( \\mathrm{nn}_{img}(\\mathrm{I}, 1) \\) is the the nearest neighbor obtained after aligning it with I using the ECC alignment algorithm (Evangelidis & Psarakis, 2008). The model then receives as input the manipulated image, together with the CLIP embedding of the original image only in the local area: \\( f_{img}(\\mathrm{I} \\cdot M) \\). This CLIP embedding represents the required modification that should be applied to the manipulated image in order to predict the original image. During inference, instead of using the CLIP embedding of the local area, the desired modification is" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.101, + 0.825, + 0.141 + ], + "angle": 0, + "content": "Table 2: Results on the stickers dataset. We report the percentage of human raters prefer our method over the baselines with respect to image quality and text alignment. Discrete no-kNN refers to VQ-diffusion, and Continuous no-kNN, to DALL-E2 decoder, both trained without an explicit text-image dataset." + }, + { + "type": "table", + "bbox": [ + 0.218, + 0.143, + 0.782, + 0.235 + ], + "angle": 0, + "content": "
ModelFID↓Ours DiscreteOurs Continuous
Image qualityText alignmentImage qualityText alignment
DALL-E2+ClipCap55.571.669.267.068.3
LAFITE58.763.559.976.071.2
no-kNN52.772.167.666.869.4
Ours40.8----
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.25, + 0.825, + 0.279 + ], + "angle": 0, + "content": "represented using the CLIP embedding of the user text query. We modified the model to be capable of receiving as a condition both the manipulated image and the CLIP embedding of the local area." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.298, + 0.329, + 0.314 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.322, + 0.827, + 0.395 + ], + "angle": 0, + "content": "First, we conduct qualitative and quantitative comparisons on MS-COCO, LN-COCO and CUB datasets. To further demonstrate the advantage of our method, we provide comparison on an image-only stickers dataset, where we apply our approach on two diffusion backbones. Next, we demonstrate image manipulation and out-of-distribution capabilities. Finally, to better assess the effect of each contribution, an ablation study is provided." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.4, + 0.825, + 0.51 + ], + "angle": 0, + "content": "Datasets and Metrics. For photo-realistic experiments, our model was trained only on the images (omitting the text) of a modified version of the Public Multimodal Dataset (PMD) used by FLAVA (Singh et al., 2021). More information about the dataset is available in Sec. 6.4 of the supplement. To further demonstrate the capabilities of our method, we collected 400 million sticker images from the web, containing combinations of concepts such as objects, characters/avatars and text. The collected stickers do not have paired text, and are substantially different from photorealistic data. Furthermore, since they have no paired text, they were not part of CLIP's training data, which makes the text-to-image generation task more challenging." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.511, + 0.827, + 0.609 + ], + "angle": 0, + "content": "Evaluation metrics are based on objective and subjective metrics: (i) FID (Heusel et al., 2017) is an objective metric used to assess the quality of synthesized images, (ii) human evaluation - we ask human raters for their preference, comparing two methods based on image quality and text alignment. We used 600 image pairs; five raters rated each pair. The results are shown as a percentage of majority votes in favor of our method over the baselines. We report the full human evaluation protocol in the supplement. We chose to omit Inception-Score, since it is shown by Barratt & Sharma (2018) to be a misleading metric for models that were not trained on Imagenet." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.625, + 0.524, + 0.64 + ], + "angle": 0, + "content": "4.1 QUALITATIVE AND QUANTITATIVE RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.647, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We begin by comparing our model, trained on the PMD dataset, with the previous works LAFITE and FuseDream, that trained on image-only datasets. To demonstrate the advantage of using a retrieval method in text-to-image generation, we trained a model variant, no-kNN. This baseline was trained solely on image embeddings (omitting the kNN), while during inference, the images were generated using the text embedding. Tab. 1 displays zero-shot results on three different datasets: MS-COCO, CUB and LN-COCO. We follow the evaluation protocol of LAFITE, reporting our results on 30,000 images from MS-COCO validation set without training, nor using it's training partition in the kNN index. Similarly, we follow LAFITE for CUB and LN-COCO evaluation. As can be seen, our model achieves the lowest FID score in all scenarios. In addition, human evaluations rate our method as better aligned to text and with the highest images quality. In Fig. 2, 15 and 11 we present a qualitative comparison between the methods. One can observe that while the simple retrieval baseline outputs non-generated images with high-quality, the images generated by our method are more faithful to the input text. To further demonstrate the effectiveness of our method, we present in Fig. 6 a comparison of our model with the latest text-to-image models trained on paired text-image datasets: DALL-E, CogView, VQ-Diffusion, GLIDE, LDM, Make-A-Scene, DALL-E2, Parti and Imagen. As can be seen, our model achieves comparable results to recent models trained with full text-image pairs (e.g LDM, GLIDE), despite being trained on an image-only dataset, with significantly lower computational costs. The results demonstrate that leveraging an external retrieval database allows to compensate for different trade-offs, in particular, reducing the number of parameters in the model. Additional samples are provided in Fig. 13 in the supplement." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.209, + 0.106, + 0.8, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.427 + ], + "angle": 0, + "content": "Figure 7: Comparison between various indexes used by the same model. (1) Aesthetic. Images from the first quantile of an aesthetic classifier, (2) Unaesthetic. Images from the last quantile of an aesthetic classifier, (3) Image search engine. Images retrieved from Google Images, (4) The stickers index." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.446, + 0.825, + 0.697 + ], + "angle": 0, + "content": "Text-to-sticker generation. As the sticker dataset does not have paired text, and is substantially different from photo-realistic data, it allows us to illustrate the advantage of our model on an image-only dataset. A selection of stickers generated by our model is presented in Fig. 1 and Fig. 14, 12. To demonstrate the importance of using kNN on image-only datasets, we evaluate our approach on two diffusion backbones. To this end, we trained a continuous diffusion model (Ramesh et al., 2022) and a discrete diffusion model (Gu et al., 2021), both conditioned on the kNN image embeddings. For each backbone, we compare our method with the following baselines: (1) no-kNN - this baseline was trained using both the continuous and the discrete methods conditioned only on image CLIP embedding, without using kNN. In the discrete case, we trained a VQ-diffusion model, while in the continuous case, we trained a re-implementation of DALL-E2's decoder (without prior). (2) DALL-E2+ClipCap baseline - here, we first captioned the entire sticker dataset using ClipCap (Mokady et al., 2021), then trained DALL-E2 decoder on the captioned dataset. (3) LAFITE - we trained LAFITE language-free model on our stickers dataset using the authors' published code. We present the results in Tab. 2. The FID is calculated over a subset of 3,000 stickers, generated from the ClipCap captioned dataset. As can be seen, our model achieves the lowest FID score. In addition, it outperforms all baselines in human evaluation comparison, using continuous and discrete backbones. In particular, compared with the same model trained without kNN, our model achieves significantly higher favorability in both text alignment and image quality." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.713, + 0.32, + 0.727 + ], + "angle": 0, + "content": "4.2 APPLICATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.825, + 0.92 + ], + "angle": 0, + "content": "Text-only image manipulation. We demonstrate the manipulation capabilities of our model in Fig. 1, 4 and 20. Furthermore, we qualitatively compare our model with Text2LIVE (Bar-Tal et al., 2022) and Textual Inversion (Gal et al., 2022), using the authors' published code. Text2LIVE proposed generating an edit layer that is composed over the original input, using a generator trained for each training image. Textual Inversion utilized the pre-trained Latent Diffusion model to invert the input image into a token embedding. The embedding is then used to compose novel textual queries for the generative model. Fig. 4 shows representative results, and the rest are included in Fig. 21 and 22 in the supplement. In contrast to our model, baseline methods lack text correspondence or they do not preserve the identity of the object. Since Text2LIVE is optimized to perform local changes, it has the difficulty changing the structure of the object (e.g. the \"raising his hand\" example in Fig. 4). Textual Inversion baseline changes the identity of the object because it struggles reconstructing the textual representation of the source image. Our model, on the other hand, can perform challenging manipulations that are aligned with the text, while preserving the object identity." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.103, + 0.378, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.234, + 0.381, + 0.286 + ], + "angle": 0, + "content": "Figure 8: Mean aesthetics score of the generated images as a function of the conditioned kNN mean aesthetics score." + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.103, + 0.61, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.391, + 0.234, + 0.612, + 0.298 + ], + "angle": 0, + "content": "Figure 9: MS-COCO test FID score on various K's in: (1) Zero-Shot (2) Index includes MS-COCO train subset. No kNN trained with kNN, but did not employ kNN in inference." + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.106, + 0.822, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.233, + 0.825, + 0.298 + ], + "angle": 0, + "content": "Figure 10: MS-COCO test FID score for different model sizes. As can be seen, adding kNN to the model allows it to be smaller, while having better performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.827, + 0.52 + ], + "angle": 0, + "content": "Out-of-distribution generation. Using the retrieval index as part of the generation process enables using different databases during inference, without fine-tuning. This allows generating images from distributions that were not part of the training set, enabling out-of-distribution generation. This novel capability is demonstrated with the same model trained on PMD, using three different retrieval databases: (i) A stickers database presented in Sec. 4. (ii) Aesthetic database: This database is constructed by filtering images according to a classifier score. Let \\( C \\) be a classifier that for each image \\( i \\in I \\) outputs a score \\( s = C(i) \\). This classifier enables filtering the kNN using \\( L \\leq s < H \\), where \\( L \\) and \\( H \\) are low and high thresholds, respectively. Here, we use an open source pre-trained aesthetics classifier \\( A \\) (Christoph Schuhmann, 2022): For each text input \\( t \\in T \\), we apply \\( A \\) on the kNN, and then divide the kNN into five equal quantiles based on \\( A \\) score. As can be seen in Fig. 8, using kNN with higher aesthetics score result in generated images with higher aesthetics mean score. (iii) Image search engine: Generative models are stationary in the sense that they are unable to learn new concepts after being trained, hence fine-tuning is required to represent new styles and concepts. Here, we use an online image search engine, which allows the model to adapt to new data without additional fine-tuning. A qualitative comparison of all three methods is shown in Fig.7." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.537, + 0.345, + 0.551 + ], + "angle": 0, + "content": "4.3 ABLATION STUDY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.563, + 0.827, + 0.73 + ], + "angle": 0, + "content": "We conclude our experiments with an ablation study, to quantify the contribution of our different components. We provide ablation study on index size and different kNN conditioning approaches in Sec. 6.5 of the supplement. Number of nearest neighbors. The results in Fig. 9 demonstrate the importance of applying the retrieval mechanism during training and inference. Here, we evaluate our model, trained on PMD dataset, with different numbers of kNN during inference. Furthermore, we examined the baseline no-kNN, in which during inference, the model is conditioned only on the text embedding \\( f_{txt}(t) \\), without using kNN. Best performance is achieved using 10 neighbors. Scalability analysis. To evaluate the effectiveness of our approach at different model sizes, we trained three additional models with varying sizes for both settings - with and without kNN. As can be seen in Fig. 10, utilizing kNN consistently improves performance for all sizes. Furthermore, a performance improvement can be achieved using much smaller models with kNN. For example, the 35M kNN model outperforms the 400M model without kNN." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.75, + 0.321, + 0.765 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.777, + 0.827, + 0.918 + ], + "angle": 0, + "content": "\"We shall always find, that every idea which we examine is copied from a similar impression\", Hume (1748). In this paper, we propose using a large-scale retrieval method in order to train a novel text-to-image model, with only pre-trained multi-modal embeddings, but without an explicit text-image dataset. Our extensive experiments demonstrate that using an external knowledge-base alleviates much of the model's burden of learning novel concepts, enabling the use of a relatively small model. In addition, it provides the model the capability of learning to adapt to new samples, which it only observes during test time. Lastly, we present a new technique utilizing the retrieval method for text-driven semantic manipulations without user-provided masks. As evaluated by human studies and automatic metrics, our method is significantly preferable to the baselines in terms of image quality and text alignment." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.103, + 0.289, + 0.118 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.126, + 0.826, + 0.155 + ], + "angle": 0, + "content": "Omri Avrahami, Ohad Fried, and Dani Lischinski. Blended latent diffusion. arXiv preprint arXiv:2206.02779, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.164, + 0.826, + 0.207 + ], + "angle": 0, + "content": "Omri Avrahami, Dani Lischinski, and Ohad Fried. Blended diffusion for text-driven editing of natural images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18208-18218, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.215, + 0.825, + 0.246 + ], + "angle": 0, + "content": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.252, + 0.825, + 0.283 + ], + "angle": 0, + "content": "Artem Babenko and Victor Lempitsky. The inverted multi-index. IEEE transactions on pattern analysis and machine intelligence, 37(6):1247-1260, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.29, + 0.825, + 0.322 + ], + "angle": 0, + "content": "Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. arXiv preprint arXiv:2204.02491, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.328, + 0.824, + 0.357 + ], + "angle": 0, + "content": "Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.366, + 0.825, + 0.41 + ], + "angle": 0, + "content": "Vishwanath Bijalwan, Vinay Kumar, Pinki Kumari, and Jordan Pascual. Knn based machine learning approach for text and document mining. International Journal of Database Theory and Application, 7(1):61-70, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.417, + 0.825, + 0.449 + ], + "angle": 0, + "content": "Andreas Blattmann, Robin Rombach, Kaan Oktay, Jonas Müller, and Björn Ommer. Semiparametric neural image synthesis. In Advances in Neural Information Processing Systems, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.455, + 0.825, + 0.512 + ], + "angle": 0, + "content": "Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George van den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. arXiv preprint arXiv:2112.04426, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.521, + 0.825, + 0.552 + ], + "angle": 0, + "content": "Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12M: Pushing web-scale image-text pre-training to recognize long-tail visual concepts. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.559, + 0.825, + 0.589 + ], + "angle": 0, + "content": "Romain Beaumont Christoph Schuhmann. Aesthetic predictor. https://github.com/LAION-AI/aesthetic-predictor, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.596, + 0.825, + 0.64 + ], + "angle": 0, + "content": "Katherine Crowson, Stella Biderman, Daniel Kornis, Dashiell Stander, Eric Hallahan, Louis Castricato, and Edward Raff. Vqgan-clip: Open domain image generation and editing with natural language guidance. arXiv preprint arXiv:2204.08583, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.647, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. RedCaps: Web-curated image-text data created by the people, for the people. In NeurIPS Datasets and Benchmarks, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.685, + 0.825, + 0.716 + ], + "angle": 0, + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.723, + 0.825, + 0.753 + ], + "angle": 0, + "content": "Prafulla Dhariwal and Alex Nichol. Diffusion models beat gans on image synthesis. arXiv preprint arXiv:2105.05233, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.761, + 0.825, + 0.805 + ], + "angle": 0, + "content": "Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.813, + 0.825, + 0.857 + ], + "angle": 0, + "content": "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873-12883, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.865, + 0.825, + 0.908 + ], + "angle": 0, + "content": "Georgios D Evangelidis and Emmanouil Z Psarakis. Parametric image alignment using enhanced correlation coefficient maximization. IEEE transactions on pattern analysis and machine intelligence, 30(10):1858-1865, 2008." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.126, + 0.826, + 0.908 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman. Make-a-scene: Scene-based text-to-image generation with human priors. arXiv preprint arXiv:2203.13131, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.826, + 0.251 + ], + "angle": 0, + "content": "Tiezheng Ge, Kaiming He, Qifa Ke, and Jian Sun. Optimized product quantization for approximate nearest neighbor search. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.258, + 0.826, + 0.289 + ], + "angle": 0, + "content": "Jiatao Gu, Yong Wang, Kyunghyun Cho, and Victor OK Li. Search engine guided neural machine translation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.296, + 0.826, + 0.338 + ], + "angle": 0, + "content": "Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. ArXiv, abs/2111.14822, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.348, + 0.826, + 0.391 + ], + "angle": 0, + "content": "Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.399, + 0.826, + 0.443 + ], + "angle": 0, + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.451, + 0.826, + 0.481 + ], + "angle": 0, + "content": "Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In NeurIPS 2021 Workshop on Deep Generative Models and Downstream Applications, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.488, + 0.826, + 0.518 + ], + "angle": 0, + "content": "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.526, + 0.611, + 0.543 + ], + "angle": 0, + "content": "David Hume. An enquiry concerning human understanding, 1748." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.55, + 0.776, + 0.567 + ], + "angle": 0, + "content": "Karim Iskakov. Semi-parametric image inpainting. arXiv preprint arXiv:1807.02855, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.574, + 0.826, + 0.605 + ], + "angle": 0, + "content": "Herve Jegou, Matthijs Douze, and Cordelia Schmid. Product quantization for nearest neighbor search. IEEE transactions on pattern analysis and machine intelligence, 33(1):117-128, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.612, + 0.826, + 0.642 + ], + "angle": 0, + "content": "Jeff Johnson, Matthijs Douze, and Hervé Jégou. Billion-scale similarity search with GPUs. IEEE Transactions on Big Data, 7(3):535-547, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.649, + 0.826, + 0.693 + ], + "angle": 0, + "content": "Gwanghyun Kim, Taesung Kwon, and Jong Chul Ye. Diffusionclip: Text-guided diffusion models for robust image manipulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2426-2435, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.701, + 0.826, + 0.744 + ], + "angle": 0, + "content": "Ivan Krasin, Tom Duerig, Neil Alldrin, Andreas Veit, Sami Abu-El-Haija, Serge Belongie, David Cai, Zheyun Feng, Vittorio Ferrari, and Victor Gomes. Openimages: A public dataset for large-scale multi-label and multi-class image classification., 01 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.753, + 0.826, + 0.81 + ], + "angle": 0, + "content": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, Michael Bernstein, and Li Fei-Fei. Visual genome: Connecting language and vision using crowdsourced dense image annotations. 2016. URL https://arxiv.org/abs/1602.07332." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.818, + 0.826, + 0.849 + ], + "angle": 0, + "content": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.856, + 0.826, + 0.899 + ], + "angle": 0, + "content": "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. arXiv preprint arXiv:2201.12086, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pp. 740-755. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Xingchao Liu, Chengyue Gong, Lemeng Wu, Shujian Zhang, Hao Su, and Qiang Liu. Fusedream: Training-free text-to-image generation with improved clip+ gan space optimization. arXiv preprint arXiv:2112.01573, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.207, + 0.825, + 0.237 + ], + "angle": 0, + "content": "Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.244, + 0.825, + 0.275 + ], + "angle": 0, + "content": "Norman Mu, Alexander Kirillov, David Wagner, and Saining Xie. Slip: Self-supervision meets language-image pre-training. arXiv preprint arXiv:2112.12750, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.282, + 0.825, + 0.326 + ], + "angle": 0, + "content": "Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.334, + 0.826, + 0.405 + ], + "angle": 0, + "content": "Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K. Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc., 2011. URL https://proceedings.neurips.cc/paper/2011/file/5dd9db5e033da9c6fb5ba83c7a7ebea9-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.413, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2085–2094, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.464, + 0.825, + 0.509 + ], + "angle": 0, + "content": "Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives. In European Conference on Computer Vision, pp. 647-664. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.516, + 0.825, + 0.56 + ], + "angle": 0, + "content": "Xiaojuan Qi, Qifeng Chen, Jiaya Jia, and Vladlen Koltun. Semi-parametric image synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8808-8816, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.568, + 0.825, + 0.625 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. URL https://arxiv.org/abs/2103.00020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.633, + 0.825, + 0.677 + ], + "angle": 0, + "content": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pp. 8821-8831. PMLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.685, + 0.825, + 0.716 + ], + "angle": 0, + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.723, + 0.825, + 0.767 + ], + "angle": 0, + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684-10695, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.774, + 0.825, + 0.831 + ], + "angle": 0, + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.84, + 0.825, + 0.896 + ], + "angle": 0, + "content": "Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2556-2565, 2018." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.826, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Yawar Siddiqui, Justus Thies, Fangchang Ma, Qi Shan, Matthias Nießner, and Angela Dai. Retrievalfuse: Neural 3d scene reconstruction with a database. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 12568-12577, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.154, + 0.825, + 0.197 + ], + "angle": 0, + "content": "Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. arXiv preprint arXiv:2112.04482, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.205, + 0.825, + 0.247 + ], + "angle": 0, + "content": "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.255, + 0.825, + 0.311 + ], + "angle": 0, + "content": "Krishna Srinivasan, Karthik Raman, Jiecao Chen, Michael Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2443-2449, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.32, + 0.825, + 0.362 + ], + "angle": 0, + "content": "Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. The new data and new challenges in multimedia research. CoRR, abs/1503.01817, 2015. URL http://arxiv.org/abs/1503.01817." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.37, + 0.825, + 0.413 + ], + "angle": 0, + "content": "Hung-Yu Tseng, Hsin-Ying Lee, Lu Jiang, Ming-Hsuan Yang, and Weilong Yang. Retrieved: Image synthesis via differentiable patch retrieval. In European Conference on Computer Vision, pp. 242-257. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.421, + 0.825, + 0.449 + ], + "angle": 0, + "content": "Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.457, + 0.825, + 0.499 + ], + "angle": 0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.507, + 0.825, + 0.535 + ], + "angle": 0, + "content": "Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.544, + 0.825, + 0.586 + ], + "angle": 0, + "content": "Xintao Wang, Liangbin Xie, Chao Dong, and Ying Shan. Real-esrgan: Training real-world blind super-resolution with pure synthetic data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1905-1914, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.594, + 0.825, + 0.623 + ], + "angle": 0, + "content": "Zihao Wang, Wei Liu, Qian He, Xinglong Wu, and Zili Yi. Clip-gen: Language-free training of a text-to-image generator with clip. arXiv preprint arXiv:2203.00386, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.631, + 0.825, + 0.659 + ], + "angle": 0, + "content": "Yuhuai Wu, Markus N Rabe, DeLesley Hutchins, and Christian Szegedy. Memorizing transformers. arXiv preprint arXiv:2203.08913, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.667, + 0.825, + 0.722 + ], + "angle": 0, + "content": "Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attingan: Fine-grained text to image generation with attentional generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1316-1324, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.731, + 0.825, + 0.774 + ], + "angle": 0, + "content": "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.782, + 0.825, + 0.825 + ], + "angle": 0, + "content": "Han Zhang, Jing Yu Koh, Jason Baldridge, Honglak Lee, and Yinfei Yang. Cross-modal contrastive learning for text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 833-842, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.832, + 0.825, + 0.875 + ], + "angle": 0, + "content": "Yufan Zhou, Ruiyi Zhang, Changyou Chen, Chunyuan Li, Chris Tensmeyer, Tong Yu, Jiumiang Gu, Jinhui Xu, and Tong Sun. LAFITE: towards language-free training for text-to-image generation. CoRR, abs/2111.13792, 2021. URL https://arxiv.org/abs/2111.13792." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. Dm-gan: Dynamic memory generative adversarial networks for text-to-image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 5802-5810, 2019." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.296, + 0.119 + ], + "angle": 0, + "content": "6 APPENDIX" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.121, + 0.9, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.356, + 0.871, + 0.642, + 0.886 + ], + "angle": 0, + "content": "Figure 11: Samples from COCO validation set." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.147, + 0.28, + 0.241 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.181, + 0.243, + 0.284, + 0.262 + ], + "angle": 0, + "content": "Chicken waiter serving dinner" + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.151, + 0.432, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.341, + 0.242, + 0.406, + 0.252 + ], + "angle": 0, + "content": "Virtual reality" + }, + { + "type": "image", + "bbox": [ + 0.456, + 0.151, + 0.543, + 0.241 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.478, + 0.243, + 0.536, + 0.262 + ], + "angle": 0, + "content": "Monkey eats hamburger" + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.151, + 0.675, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.246, + 0.682, + 0.256 + ], + "angle": 0, + "content": "Scared fish in a suit" + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.151, + 0.812, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.727, + 0.247, + 0.794, + 0.257 + ], + "angle": 0, + "content": "Clown unicorn" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.281, + 0.294, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.36, + 0.272, + 0.371 + ], + "angle": 0, + "content": "Goodnight sleep" + }, + { + "type": "image", + "bbox": [ + 0.304, + 0.271, + 0.432, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.332, + 0.36, + 0.402, + 0.37 + ], + "angle": 0, + "content": "Alpaca in space" + }, + { + "type": "image", + "bbox": [ + 0.457, + 0.268, + 0.565, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.461, + 0.359, + 0.564, + 0.369 + ], + "angle": 0, + "content": "Gargoyle in a party hat" + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.268, + 0.683, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.591, + 0.36, + 0.674, + 0.371 + ], + "angle": 0, + "content": "Cauliflower crying" + }, + { + "type": "image", + "bbox": [ + 0.716, + 0.272, + 0.794, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.713, + 0.354, + 0.801, + 0.373 + ], + "angle": 0, + "content": "Teddy bear wearing VR headset" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.382, + 0.291, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.474, + 0.262, + 0.493 + ], + "angle": 0, + "content": "Hot headed cucumber" + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.383, + 0.434, + 0.464 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.326, + 0.47, + 0.419, + 0.489 + ], + "angle": 0, + "content": "Painterly pigeon playing a synthesizer" + }, + { + "type": "image", + "bbox": [ + 0.482, + 0.381, + 0.539, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.487, + 0.475, + 0.548, + 0.484 + ], + "angle": 0, + "content": "3D cat avatar" + }, + { + "type": "image", + "bbox": [ + 0.571, + 0.39, + 0.698, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.583, + 0.469, + 0.673, + 0.486 + ], + "angle": 0, + "content": "Music band made of fruits" + }, + { + "type": "image", + "bbox": [ + 0.721, + 0.378, + 0.806, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.711, + 0.468, + 0.813, + 0.486 + ], + "angle": 0, + "content": "A confused robot as an impressionist painting" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.505, + 0.292, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.185, + 0.593, + 0.276, + 0.603 + ], + "angle": 0, + "content": "Panda playing guitar" + }, + { + "type": "image", + "bbox": [ + 0.3, + 0.515, + 0.426, + 0.589 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.331, + 0.592, + 0.41, + 0.603 + ], + "angle": 0, + "content": "Sloth doing ballet" + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.509, + 0.575, + 0.589 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.461, + 0.592, + 0.567, + 0.611 + ], + "angle": 0, + "content": "3D rendering of avatars playing basketball" + }, + { + "type": "image", + "bbox": [ + 0.591, + 0.498, + 0.673, + 0.588 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.614, + 0.592, + 0.672, + 0.603 + ], + "angle": 0, + "content": "Singing otter" + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.5, + 0.821, + 0.588 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.719, + 0.592, + 0.811, + 0.611 + ], + "angle": 0, + "content": "Muscle man riding a wave in Hawaii" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.61, + 0.294, + 0.7 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.192, + 0.712, + 0.287, + 0.732 + ], + "angle": 0, + "content": "Cinematic llama with dramatic lighting" + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.62, + 0.424, + 0.696 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.334, + 0.705, + 0.426, + 0.725 + ], + "angle": 0, + "content": "Dog is programming a computer" + }, + { + "type": "image", + "bbox": [ + 0.449, + 0.619, + 0.573, + 0.697 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.465, + 0.705, + 0.551, + 0.725 + ], + "angle": 0, + "content": "Radha Krishna dancing in a garden" + }, + { + "type": "image", + "bbox": [ + 0.597, + 0.61, + 0.677, + 0.7 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.602, + 0.705, + 0.674, + 0.725 + ], + "angle": 0, + "content": "Shark wearing a birthday hat" + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.625, + 0.82, + 0.7 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.708, + 0.704, + 0.811, + 0.732 + ], + "angle": 0, + "content": "Avocado playing ukulele and an apple is watching" + }, + { + "type": "image", + "bbox": [ + 0.185, + 0.736, + 0.303, + 0.822 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.831, + 0.272, + 0.842 + ], + "angle": 0, + "content": "Celebrating frog" + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.744, + 0.443, + 0.811 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.34, + 0.823, + 0.421, + 0.842 + ], + "angle": 0, + "content": "A dog in a hotdog costume" + }, + { + "type": "image", + "bbox": [ + 0.469, + 0.738, + 0.562, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.486, + 0.83, + 0.553, + 0.84 + ], + "angle": 0, + "content": "Alien using VR" + }, + { + "type": "image", + "bbox": [ + 0.57, + 0.743, + 0.687, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.579, + 0.822, + 0.684, + 0.851 + ], + "angle": 0, + "content": "Unicorn with a rainbow horn, waving hand, and standing on grass" + }, + { + "type": "image", + "bbox": [ + 0.694, + 0.737, + 0.818, + 0.819 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.72, + 0.827, + 0.809, + 0.846 + ], + "angle": 0, + "content": "A brain made out of words" + }, + { + "type": "image_caption", + "bbox": [ + 0.236, + 0.865, + 0.76, + 0.88 + ], + "angle": 0, + "content": "Figure 12: A selection of stickers generated using the continuous kNN-Diffusion model." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image_caption", + "bbox": [ + 0.188, + 0.163, + 0.314, + 0.174 + ], + "angle": 0, + "content": "A brown shiny rose flower" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.181, + 0.328, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.212, + 0.302, + 0.293, + 0.311 + ], + "angle": 0, + "content": "A white firetruck" + }, + { + "type": "image_caption", + "bbox": [ + 0.356, + 0.164, + 0.474, + 0.175 + ], + "angle": 0, + "content": "A dog using a typewriter" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.181, + 0.491, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.357, + 0.302, + 0.474, + 0.311 + ], + "angle": 0, + "content": "Race car driver in a tutu" + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.164, + 0.62, + 0.175 + ], + "angle": 0, + "content": "A robot tanning" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.182, + 0.657, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.557, + 0.302, + 0.604, + 0.311 + ], + "angle": 0, + "content": "Blue ants" + }, + { + "type": "image_caption", + "bbox": [ + 0.696, + 0.164, + 0.789, + 0.175 + ], + "angle": 0, + "content": "Fuchsia iguanodon" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.182, + 0.817, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.678, + 0.302, + 0.807, + 0.312 + ], + "angle": 0, + "content": "Lucifer dancing with Jesus" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.317, + 0.328, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.437, + 0.325, + 0.448 + ], + "angle": 0, + "content": "Pineapple shaped refrigerator" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.317, + 0.491, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.433, + 0.493, + 0.452 + ], + "angle": 0, + "content": "A surfer wearing a three-piece men's suit" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.317, + 0.656, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.524, + 0.437, + 0.637, + 0.448 + ], + "angle": 0, + "content": "Rocking chair on water" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.317, + 0.819, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.71, + 0.438, + 0.774, + 0.447 + ], + "angle": 0, + "content": "Alien cartoon" + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.455, + 0.326, + 0.567 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.186, + 0.568, + 0.32, + 0.588 + ], + "angle": 0, + "content": "Kitchen from the lost city of atlantis" + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.454, + 0.491, + 0.57 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.367, + 0.574, + 0.458, + 0.584 + ], + "angle": 0, + "content": "A pink watermelon" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.453, + 0.656, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.534, + 0.573, + 0.627, + 0.585 + ], + "angle": 0, + "content": "Image of grey tiger" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.453, + 0.818, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.692, + 0.574, + 0.793, + 0.583 + ], + "angle": 0, + "content": "Green robot vacuum" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.59, + 0.327, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.191, + 0.709, + 0.314, + 0.72 + ], + "angle": 0, + "content": "A baby cooking spaghetti" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.59, + 0.491, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.371, + 0.71, + 0.46, + 0.719 + ], + "angle": 0, + "content": "Raccoon mansion" + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.59, + 0.656, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.524, + 0.709, + 0.637, + 0.72 + ], + "angle": 0, + "content": "Flying m1 abrams tank" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.589, + 0.818, + 0.705 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.669, + 0.709, + 0.815, + 0.72 + ], + "angle": 0, + "content": "Baby pictures of grandparents" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.723, + 0.328, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.724, + 0.491, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.724, + 0.656, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.724, + 0.818, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.196, + 0.852, + 0.8, + 0.867 + ], + "angle": 0, + "content": "Figure 13: Additional samples generated from challenging text inputs using the photo-realistic model" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.195, + 0.334, + 0.215 + ], + "angle": 0, + "content": "This tomato is favorite of many high-class chefs" + }, + { + "type": "image", + "bbox": [ + 0.224, + 0.223, + 0.336, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.322, + 0.33, + 0.333 + ], + "angle": 0, + "content": "Squirrel wearing a shirt" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.353, + 0.314, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.238, + 0.443, + 0.319, + 0.454 + ], + "angle": 0, + "content": "Singing eggplants" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.455, + 0.345, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.565, + 0.33, + 0.575 + ], + "angle": 0, + "content": "A mushroom with a hat" + }, + { + "type": "image", + "bbox": [ + 0.224, + 0.581, + 0.334, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.242, + 0.687, + 0.315, + 0.697 + ], + "angle": 0, + "content": "Black mop head" + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.702, + 0.334, + 0.804 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.382, + 0.2, + 0.471, + 0.21 + ], + "angle": 0, + "content": "Bald-headed mimes" + }, + { + "type": "image", + "bbox": [ + 0.359, + 0.221, + 0.495, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.363, + 0.318, + 0.489, + 0.337 + ], + "angle": 0, + "content": "A panda bear carrying some grocery bags" + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.345, + 0.462, + 0.434 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.38, + 0.443, + 0.474, + 0.453 + ], + "angle": 0, + "content": "Penguin drives a bus" + }, + { + "type": "image", + "bbox": [ + 0.361, + 0.463, + 0.493, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.371, + 0.56, + 0.484, + 0.578 + ], + "angle": 0, + "content": "Chihuahua pulling a royal coach" + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.578, + 0.461, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.372, + 0.687, + 0.481, + 0.697 + ], + "angle": 0, + "content": "Elephant sitting on a lion" + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.716, + 0.486, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.548, + 0.2, + 0.605, + 0.21 + ], + "angle": 0, + "content": "Vomit candy" + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.214, + 0.645, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.542, + 0.322, + 0.611, + 0.333 + ], + "angle": 0, + "content": "Invisible people" + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.345, + 0.631, + 0.357 + ], + "angle": 0, + "content": "I J Skos . sbalele" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.366, + 0.644, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.546, + 0.443, + 0.609, + 0.453 + ], + "angle": 0, + "content": "Queen Esther" + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.455, + 0.627, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.564, + 0.638, + 0.575 + ], + "angle": 0, + "content": "A laughing purple porcupine" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.595, + 0.632, + 0.67 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.687, + 0.627, + 0.697 + ], + "angle": 0, + "content": "Monkey eating a pickle" + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.711, + 0.629, + 0.799 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.691, + 0.2, + 0.75, + 0.209 + ], + "angle": 0, + "content": "Alien cartoon" + }, + { + "type": "image", + "bbox": [ + 0.676, + 0.225, + 0.766, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.67, + 0.322, + 0.771, + 0.333 + ], + "angle": 0, + "content": "A lion wearing a T-shirt" + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.347, + 0.761, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.665, + 0.443, + 0.777, + 0.453 + ], + "angle": 0, + "content": "One legged striped rabbit" + }, + { + "type": "image", + "bbox": [ + 0.684, + 0.457, + 0.757, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.67, + 0.565, + 0.771, + 0.575 + ], + "angle": 0, + "content": "Dollar bill combing hair" + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.591, + 0.785, + 0.674 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.687, + 0.77, + 0.697 + ], + "angle": 0, + "content": "A sloth eating oatmeal" + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.711, + 0.788, + 0.798 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.247, + 0.818, + 0.751, + 0.833 + ], + "angle": 0, + "content": "Figure 14: A selection of stickers generated using the discrete kNN-Diffusion model." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.318, + 0.119 + ], + "angle": 0, + "content": "6.1 BACKGROUND" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.201 + ], + "angle": 0, + "content": "Continuous diffusion process Diffusion models are latent variable models that aim to model a distribution \\( p_{\\theta}(x_0) \\) that approximates the data distribution \\( q(x_0) \\). Specifically, they model a forward process in the space of \\( x_0 \\) from data to noise. Given a sample from the data distribution \\( x_0 \\sim q(x_0) \\), this process produces a Markov chain of latent variables \\( x_1, \\ldots, x_T \\) by progressively adding Gaussian noise to the sample:" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.206, + 0.825, + 0.224 + ], + "angle": 0, + "content": "\\[\nq \\left(x _ {t} \\mid x _ {t - 1}\\right) := \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t}} x _ {t - 1}, \\beta_ {t} \\mathcal {I}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.228, + 0.825, + 0.257 + ], + "angle": 0, + "content": "where \\(\\beta_{t}\\) is a variance schedule. As presented previously by (Ho et al., 2020), the latent variable \\(x_{t}\\) can be expressed directly as a linear combination of noise and \\(x_0\\):" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.261, + 0.825, + 0.277 + ], + "angle": 0, + "content": "\\[\nx _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} x _ {0} + \\epsilon \\sqrt {1 - \\bar {\\alpha} _ {t}}, \\quad \\epsilon \\sim \\mathcal {N} (0, \\mathcal {I}) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.281, + 0.825, + 0.325 + ], + "angle": 0, + "content": "where \\(\\alpha_{t} := \\Pi_{i=1}^{t}(1 - \\beta_{i})\\). In order to sample from the data distribution \\(q(x_0)\\), we define the \"reverse process\" \\(p(x_{t-1}|x_t)\\) which samples first from \\(q(x_T)\\) and then samples reverse steps \\(q(x_{t-1}|x_t)\\) until \\(x_0\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.33, + 0.826, + 0.373 + ], + "angle": 0, + "content": "Since the data distribution is unknown, we need to train a model to approximate it. Note that when \\(T\\) is large enough, the noise vector \\(x_{T}\\) nearly follows an isotropic Gaussian distribution. This suggests learning a model \\(p_{\\theta}(x_{t - 1}|x_t)\\) to predict mean \\(\\mu_{\\theta}\\) and covariance matrix \\(\\Sigma_{\\theta}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.345, + 0.378, + 0.825, + 0.395 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} \\left(x _ {t - 1} \\mid x _ {t}\\right) := \\mathcal {N} \\left(x _ {t - 1}; \\mu_ {\\theta} \\left(x _ {t}, t\\right), \\Sigma_ {\\theta} \\left(x _ {t}, t\\right)\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.398, + 0.825, + 0.426 + ], + "angle": 0, + "content": "To train this model, we can replace \\(\\mu_{\\theta}(x_t,t)\\) by predicting the noise \\(\\epsilon_{\\theta}(x_t,t)\\) added to \\(x_0\\) using equation 2 and we get this objective function:" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.43, + 0.825, + 0.449 + ], + "angle": 0, + "content": "\\[\nL := E _ {t \\sim [ 1, T ], x _ {0} \\sim q (x _ {0}), \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I})} [ \\| \\epsilon - \\epsilon_ {\\theta} (x _ {t}, t, y) \\| ^ {2} ] \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.452, + 0.825, + 0.467 + ], + "angle": 0, + "content": "where \\(y\\) is an optional conditioning signal (such as text/image embedding or a low resolution image)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.48, + 0.825, + 0.552 + ], + "angle": 0, + "content": "Discrete diffusion process Let \\(x_{n}\\in \\{1,\\ldots ,V\\}^{h\\times w}\\) be the indices of the allocated codebook vectors extracted by a pre-trained VQGAN (Esser et al., 2021) encoder. The forward process of a diffusion model \\(q(x_{n}|x_{n - 1})\\) is a Markov chain that adds noise at each step. Moreover, the reverse process \\(q(x_{n - 1}|x_n,x_0)\\), is a denoising process that removes noise from an initialized noise state. As presented by (Gu et al., 2021), the forward diffusion process is given by:" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.569, + 0.825, + 0.587 + ], + "angle": 0, + "content": "\\[\nq \\left(x _ {n} \\mid x _ {n - 1}\\right) = v ^ {T} \\left(x _ {n}\\right) \\mathbf {Q} _ {n} v \\left(x _ {n - 1}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.589, + 0.825, + 0.617 + ], + "angle": 0, + "content": "where \\( v(x_{n}) \\) is a one-hot vector with entry 1 at \\( x_{n} \\), and \\( \\mathbf{Q}_n \\) is the probability transition matrix from state \\( x_{n-1} \\) to \\( x_{n} \\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.624, + 0.553, + 0.639 + ], + "angle": 0, + "content": "The reverse process is given by the posterior distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.284, + 0.642, + 0.825, + 0.679 + ], + "angle": 0, + "content": "\\[\nq \\left(x _ {n - 1} \\mid x _ {n}, x _ {0}\\right) = \\frac {\\left(v ^ {T} \\left(x _ {n}\\right) \\mathbf {Q} _ {n} v \\left(x _ {n - 1}\\right)\\right) \\left(v ^ {T} \\left(x _ {n - 1}\\right) \\bar {\\mathbf {Q}} _ {n - 1} v \\left(x _ {0}\\right)\\right)}{v ^ {T} \\left(x _ {n}\\right) \\bar {\\mathbf {Q}} _ {n} v \\left(x _ {0}\\right)} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.682, + 0.334, + 0.699 + ], + "angle": 0, + "content": "where \\(\\bar{\\mathbf{Q}}_n = \\mathbf{Q}_n\\cdot \\cdot \\cdot \\mathbf{Q}_1\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.826, + 0.775 + ], + "angle": 0, + "content": "Inspired from mask language modeling (Devlin et al., 2018), they proposes corrupting the tokens by stochastically masking some of them. Specifically, an additional special token [MASK] is proposed, so for each token there are \\((\\mathrm{V} + 1)\\) discrete states. The transition matrix is formulated as, By adding a small amount of uniform noise to the categorical distribution, the transition matrix can be formulated as," + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.792, + 0.825, + 0.872 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} _ {n} = \\left[ \\begin{array}{c c c c c} \\alpha_ {n} + \\beta_ {n} & \\beta_ {n} & \\beta_ {n} & \\dots & 0 \\\\ \\beta_ {n} & \\alpha_ {n} + \\beta_ {n} & \\beta_ {n} & \\dots & 0 \\\\ \\beta_ {n} & \\beta_ {n} & \\alpha_ {n} + \\beta_ {n} & \\dots & 0 \\\\ \\vdots & \\vdots & \\vdots & \\ddots & \\vdots \\\\ \\gamma_ {n} & \\gamma_ {n} & \\gamma_ {n} & \\dots & 1 \\end{array} \\right] \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "where \\(\\alpha_{n}\\in [0,1],\\beta_{n} = (1 - \\alpha_{n} - \\gamma_{n}) / V\\) and \\(\\gamma_{n}\\) the probability of a token to be replaced with a [MASK] token. Each token has a probability of \\(\\gamma_{n}\\) to be replaced by the [MASK] token, \\(V\\beta_{n}\\) to be resampled uniformly and \\(\\alpha_{n} = (1 - V\\beta_{n} - \\gamma_{n})\\) to be unchanged." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.378, + 0.119 + ], + "angle": 0, + "content": "6.2 ADDITIONAL SAMPLES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.827, + 0.203 + ], + "angle": 0, + "content": "In Fig. 16 and 15 we present a visual comparison of our discrete model, trained on the stickers dataset with (1) the kNN extracted during inference, (2) the same model without using kNN in inference. As can be seen, the images generated by our model are better aligned to the corresponding text compared to the baselines. While the baselines fail with challenging prompts, our model produces high-quality images that align with the text, and composes multiple concepts correctly." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.215, + 0.828, + 0.286 + ], + "angle": 0, + "content": "COCO Validation Set Comparison Fig. 11 presents a qualitative comparison with FuseDream (Liu et al., 2021), CogView (Ding et al., 2021) and VQ-Diffusion (Gu et al., 2021) on the COCO validation set. Note that both CogView and VQ-Diffusion have been trained on an Image-Text paired dataset, whereas our model was not trained on the COCO dataset, nor used it in the retrieval model." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.292, + 0.825, + 0.309 + ], + "angle": 0, + "content": "Additional samples generated from challenging text inputs are provided in Figs. 13, 14 and Fig. 12." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.312, + 0.825, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.539, + 0.825, + 0.581 + ], + "angle": 0, + "content": "Figure 15: Comparison of our model, trained on PMD with (1) kNN extracted in inference, (2) the same model without using kNN in inference. While the kNN lack information regarding text semantics, our model considers both text semantics and the kNN, thus proving the advantage of using both the text and the kNN embeddings." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.584, + 0.825, + 0.833 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.843, + 0.825, + 0.87 + ], + "angle": 0, + "content": "Figure 16: Qualitative comparison of stickers generated using the discrete kNN-Diffusion model, 10 Nearest Neighbors to the text in the CLIP embedding and a discrete model that does not use kNN." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.447, + 0.119 + ], + "angle": 0, + "content": "6.3 HUMAN EVALUATION PROTOCOL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.825, + 0.201 + ], + "angle": 0, + "content": "For all of our human evaluation experiments, we used Amazon Mechanical Turk. For each experiment, we used 600 samples, each scored by five different people. The preferred sample was determined according to majority opinion. For each baseline comparison, we asked two questions (in different experiments): \"Which image is of a higher quality?\" and \"Which image best matches the text?\"" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.217, + 0.29, + 0.23 + ], + "angle": 0, + "content": "6.4 DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.243, + 0.825, + 0.329 + ], + "angle": 0, + "content": "The modified PMD dataset is composed of the following set of publicly available text-image datasets: SBU Captions (Ordonez et al., 2011), Localized Narratives (Pont-Tuset et al., 2020), Conceptual Captions (Sharma et al., 2018), Visual Genome (Krishna et al., 2016), Wikipedia Image Text (Srinivasan et al., 2021), Conceptual Captions 12M (Changpinyo et al., 2021), Red Caps (Desai et al., 2021), and a filtered version of YFCC100M (Thomee et al., 2015). In total, the dataset contains 69 million text-image pairs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.344, + 0.342, + 0.358 + ], + "angle": 0, + "content": "6.5 ABLATION STUDY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.37, + 0.825, + 0.413 + ], + "angle": 0, + "content": "Index size As one can expect, increasing the index size at inference time improves performance. To demonstrate this hypothesis, we evaluated our model with an index containing \\(10\\%\\), \\(30\\%\\), \\(50\\%\\) and \\(70\\%\\) images of PMD dataset, and obtained FID scores of 13.92, 13.85, 13.72, and 13.65 respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.419, + 0.826, + 0.504 + ], + "angle": 0, + "content": "kNN conditioning We examined several different approaches to kNN input conditioning: (i) forwarding the kNN embeddings and the single image embedding through a self-attention layer before feeding the contextualized \\( K + 1 \\) embeddings to the model, (ii) feeding the model with one embedding, computed using cross-attention between the image embedding and the kNN embeddings, and, (iii) feeding the model with the image embedding concatenated with a learned linear projection of the kNN embeddings. These variants received FID scores of 18.3, 22.4, 34.1 respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.52, + 0.355, + 0.535 + ], + "angle": 0, + "content": "6.6 RETRIEVAL MODEL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.545, + 0.825, + 0.63 + ], + "angle": 0, + "content": "The retrieval model is implemented using FAISS (Johnson et al., 2019). FAISS is an efficient database, capable of storing billions of elements and finding their nearest neighbors in milliseconds. In the pre-processing phase, for each image in the dataset, we store the image index and its corresponding CLIP image embedding. During training, given a training image, we extract its CLIP image embedding and search for its 10 (see Fig. 9) nearest neighbors in the dataset based on the cosine similarity distance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.636, + 0.826, + 0.761 + ], + "angle": 0, + "content": "For an efficient search during training and inference, we use a non-exhaustive search: For this, we use an inverted file index. As in Babenko & Lempitsky (2014), we define Voronoi cells in the \\(d\\)-dimensional space (where \\(d = 512\\) is the CLIP embedding dimensional space), s.t each database vector falls in one of the cells. During search time, only the embeddings contained in the cell the query falls in and a few neighboring ones are compared against the query vector. In addition, to fit the index of our large-scale datasets on a 128GB RAM server, we compress the code size from \\(512 \\times 32/8 = 2048\\) Bytes to 256 Bytes using optimized product quantization (Ge et al., 2013; Jegou et al., 2010). In Algorithm 1 we include pseudocode of the core of the implementation of the retrieval database." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.778, + 0.383, + 0.793 + ], + "angle": 0, + "content": "6.7 DISCRETE KNN MODEL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.804, + 0.825, + 0.833 + ], + "angle": 0, + "content": "We provide additional implementation details for the discrete diffusion model. Additional training details can be found in Tab. 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.848, + 0.825, + 0.892 + ], + "angle": 0, + "content": "Vector Quantization For token quantization, we use VQ-VAE and adapt the publicly available VQGAN(Esser et al., 2021) model, trained on the OpenImages(Krasin et al., 2016) dataset. The encoder downsamples images to \\(32 \\times 32\\) tokens and uses a codebook vocabulary with 2887 elements." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.231 + ], + "angle": 0, + "content": "Image Tokenization In our discrete generative model we model images as a sequence of discrete tokens. To this end, we utilize a vector-quantized variational auto-encoder (VQ-VAE) (Van Den Oord et al., 2017) as image tokenizer. VQ-VAE consists of three components: (i) an encoder, (ii) a learned codebook, and, (iii) a decoder. Given an image, the encoder extracts a latent representation. The codebook then maps each latent vector representation to its nearest vector in the codebook. Finally, the decoder reconstructs the image from the codebook representation. VQ-VAE is trained with the objectives of reconstruction and codebook learning. VQ-GAN (Esser et al., 2021) adds an adversarial loss term that tries to determine whether the generated image is fake or real. This added term was shown to improve reconstruction quality." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.825, + 0.303 + ], + "angle": 0, + "content": "Transformer We follow Gu et al. (2021) and train a decoder-only Transformer. The decoder module contains 24 transformer blocks, each containing full attention, cross-attention for the concatenated conditioner, and a feed-forward network. The timestamp \\( n \\) is injected using Adaptive Layer Normalization (Ba et al., 2016). The decoder contains 400 million parameters." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.316, + 0.827, + 0.488 + ], + "angle": 0, + "content": "Classifier-free guidance We sample our diffusion models using classifier-free guidance (CFG) (Ho & Salimans, 2021; Nichol et al., 2021; Ramesh et al., 2022). CFG is performed by extrapolating an unconditional sample in the direction of a conditional sample. To support unconditional sampling, previous work had to fine-tune (Nichol et al., 2021) their models with \\(20\\%\\) of the conditional features nullified. This enabled them to sample unconditional images from the model using the null condition, \\(y' = \\vec{0}\\), the null vector. We found that we can generate unconditional samples from our model using null conditioning without fine-tuning it. We hypothesize that by conditioning the model on a null vector, the cross-attention component is also nullified, resulting in no contribution to the diffusion process. During inference, in each step of the diffusion process we generate two images: conditional image logits, \\(p_{\\theta}(x_{n-1}|x_n,y)\\), conditioned on the desired multi-modal embedding \\(y\\), and the unconditional image logits, \\(p_{\\theta}(x_{n-1}|x_n,y')\\), conditioned on the null embedding. Then, the final image for a diffusion step \\(n\\) is sampled from" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.493, + 0.685, + 0.53 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y\\right) = p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y ^ {\\prime}\\right) + \\\\ \\lambda \\left(p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y\\right) - p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y ^ {\\prime}\\right)\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.536, + 0.825, + 0.58 + ], + "angle": 0, + "content": "where \\(\\lambda\\) is a scale coefficient. In all of our experiments, we set \\(\\lambda = 8\\), which was found to yield the highest FID scores on the validation set. Note that the above extrapolation occurs directly on the logits output by \\(p_{\\theta}\\), in contrast to GLIDE (Nichol et al., 2021), which extrapolates the pixel values." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.593, + 0.825, + 0.623 + ], + "angle": 0, + "content": "Training Objective For completeness we are adding the training objective of the discrete model. The network is trained to minimize the variational lower bound (VLB):" + }, + { + "type": "equation", + "bbox": [ + 0.265, + 0.629, + 0.825, + 0.698 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {v l b}} = \\mathcal {L} _ {0} + \\mathcal {L} _ {1} + \\dots + \\mathcal {L} _ {N - 1} + \\mathcal {L} _ {N}, \\\\ \\mathcal {L} _ {0} = - \\log p _ {\\theta} \\left(x _ {0} \\mid x _ {1}, f _ {i m g} (I), \\operatorname {k n n} _ {i m g} (\\mathrm {I}, k)\\right), \\\\ \\mathcal {L} _ {n - 1} = D _ {K L} \\left(q \\left(x _ {n - 1} \\mid x _ {n}, x _ {0}\\right) | | p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, f _ {i m g} (I), \\operatorname {k n n} _ {i m g} (\\mathrm {I}, k)\\right)\\right), \\tag {8} \\\\ \\mathcal {L} _ {N} = D _ {K L} \\left(q \\left(x _ {N} \\mid x _ {0}\\right) | | p \\left(x _ {N}\\right)\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.825, + 0.749 + ], + "angle": 0, + "content": "Where \\( p(\\pmb{x}_N) \\) is the prior distribution of timestep \\( N = 100 \\), \\( f_{img}(I) \\) is the CLIP image embedding, \\( \\mathrm{knn}_{img}(\\mathbf{I}, k) \\) is the \\( k \\) nearest neighbors in the feature space of the image embedding. The full details can be found in Gu et al. (2021)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.765, + 0.406, + 0.779 + ], + "angle": 0, + "content": "6.8 CONTINUOUS KNN MODEL" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.825, + 0.82 + ], + "angle": 0, + "content": "We provide additional implementation details for the continuous diffusion model. Additional training details can be found in Tab. 3." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.835, + 0.827, + 0.92 + ], + "angle": 0, + "content": "Decoder. We followed (Nichol et al., 2021; Ho et al., 2020; Ramesh et al., 2022) and re-implemented a diffusion \\(U\\)-net model. Specifically, we modify the architecture described in (Ramesh et al., 2022) by allowing multiple CLIP embeddings as the condition to the model. Since we do not have a paired text-image dataset, we removed the text transformer, and thus the text embedding. In particular, we use 512 convolution channels, 3 residual blocks, 64 heads channels and attention resolution of 32, 16 and 8. Similarly to our discrete model, we trained two models (1)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.113, + 0.484, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.238, + 0.449, + 0.252 + ], + "angle": 0, + "content": "(a) Training" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.113, + 0.661, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.547, + 0.238, + 0.627, + 0.251 + ], + "angle": 0, + "content": "(b) Inference" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.26, + 0.825, + 0.312 + ], + "angle": 0, + "content": "Figure 17: During training, only the image I is given (red), whereas during inference only the text \\( t \\) is given (blue). In order to bridge the gap between the two distributions during training, we leverage the K nearest neighbors that should have a large enough distribution (dashed cone) to cover the potential text embedding (i.e. \\( \\cos(b) < \\cos(a) \\)). During inference, the opposite is applied." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.323, + 0.825, + 0.395 + ], + "angle": 0, + "content": "a no-kNN conditioned only on CLIP image embedding during training, (2) a kNN conditioned on CLIP image embedding and its kNN. Finally, we enable classifier-free guidance by randomly setting the CLIP embeddings to zero \\(10\\%\\) of the time. As demonstrated in Tab. 2, we find that humans prefer our model over no-kNN \\(66.8\\%\\) of the time for image quality and \\(69.4\\%\\) of the time for text alignment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.409, + 0.825, + 0.466 + ], + "angle": 0, + "content": "Super-Resolution. As the decoder generates images with \\(64 \\times 64\\) resolution, we up-sampled the images to \\(256 \\times 256\\) using the open-source super resolution of (Nichol et al., 2021). To further up-sample the images to \\(512 \\times 512\\) and \\(1024 \\times 1024\\) we used the open-source super resolution provided by (Wang et al., 2021)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.48, + 0.825, + 0.522 + ], + "angle": 0, + "content": "Training Objectives For completeness we are adding the training objective of our continuous model. Following Ho et al. (2020); Nichol et al. (2021) we are using mean-squared error loss to predict the noise:" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.521, + 0.676, + 0.539 + ], + "angle": 0, + "content": "\\[\nL := E _ {n \\sim [ 1, N ], x _ {0} \\sim q (x _ {0}), \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I})} [ | | \\epsilon - \\epsilon_ {\\theta} (x _ {n}, n, y) | | ^ {2} ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.541, + 0.608, + 0.559 + ], + "angle": 0, + "content": "where \\(\\epsilon_{\\theta}\\) is a \\(U - net\\) model and \\(y = (f_{img}(x_0),\\mathrm{knn}_{img}(x_0,k))\\)" + }, + { + "type": "table", + "bbox": [ + 0.31, + 0.565, + 0.688, + 0.809 + ], + "angle": 0, + "content": "
DiscreteContinuous
Number of nearest neighbors1010
Diffusion steps1001000
Noise schedule-cosine
Sampling steps100250
Model size400M1B
Sampling variance method-analytic
Dropout-0.1
Weight decay4.5e-2-
Batch size5121600
Iterations150K500K
Learning rate4.05-41.4e-4
optimizerAdamWAdamW
Adam β20.960.9999
Adam ε1.0e-81.0e-8
EMA decay0.990.9999
warmup500025000
# GPUs128 A100200 A100
" + }, + { + "type": "table_caption", + "bbox": [ + 0.38, + 0.814, + 0.617, + 0.828 + ], + "angle": 0, + "content": "Table 3: Training details of our models" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "code_caption", + "bbox": [ + 0.172, + 0.246, + 0.825, + 0.276 + ], + "angle": 0, + "content": "Algorithm 1 Pseudo-code implementation for the construction of the retrieval model, training and sampling using conditioning kNN." + }, + { + "type": "code", + "bbox": [ + 0.172, + 0.288, + 0.825, + 0.78 + ], + "angle": 0, + "content": "Retrieval model construction \ndef training(batch:train image dataset): 1 //inverted index of 50k centroids, 2 //with optimized product quantization to 256B index_cfg \\(\\equiv\\) \"OPQ256_IVF50000_PQ256x8\" 4 index \\(=\\) faiss.indexFACTORY(d,idx_cfg,faiss.METRIC INNER_PRODUCT) 5 ivf \\(=\\) faiss.extract_index_ivf(index) 6 clustering_index \\(=\\) faiss.index_cpu_to_all_gpus(faiss.IndexFlatIP(d))7 ivf.clusterbing_index \\(=\\) clustering_index 8 train_dataset \\(\\equiv\\) [] for image in random.sample(batch,1000000): 10 train_dataset.append(CLIP_image_embedding(image)) 11 index.train(train_dataset) 12 for image in dataset: index.add(CLIP_imageEncoder(image)) 14 return index Training \ndef training(I:FAISS index, image, k:Number of NN, t:timestamp [0,T-1]): image_encoding \\(\\equiv\\) CLIP_imageEncoder(image) 2 kNN \\(=\\) I.search(image_encoding,k) condition \\(=\\) concatenate([image_encoding,kNN]) 4 image_T \\(=\\) add_noise(image,t) 5 image_0 \\(=\\) diffusion_model(image_T,t,condition) loss \\(=\\) criterion(imageO, image) 7 return loss 9 Sampling \ndef sampling(I:FAISS index,text,k:Number of NN): 1 text_encoding \\(\\equiv\\) CLIP_textEncoder(text) 2 kNN \\(=\\) I.search(text_encoding,k) condition \\(=\\) concatenate([text_encoding,kNN]) 4 image \\(=\\) sample_noise(T) for t in [T-1,T-2,...,0]: image \\(=\\) diffusion_model(image,t,condition) return image 8" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.105, + 0.461, + 0.119 + ], + "angle": 0, + "content": "6.9 TEXT-ONLY IMAGE MANIPULATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.13, + 0.828, + 0.175 + ], + "angle": 0, + "content": "Our approach is illustrated in Fig. 18. Additional manipulation examples are provided in Figs. 20. The full comparison with the baselines is provided in Fig. 21 and 22. We also provide in Fig. 19 several examples for the process of the manipulated images construction." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.193, + 0.826, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.48, + 0.825, + 0.557 + ], + "angle": 0, + "content": "Figure 18: An illustration of our manipulation approach. During training: Given a training image (1), the model extracts its first nearest neighbor (2). Next, a random local area in the training image is selected (3), and the manipulated image is constructed by replacing the area with the corresponding nearest neighbor (4). The model then receives as input the manipulated image and the clip embedding of the local area that needs to be restored (5). During inference: Given an input image and a text query \"A face of a male child\", the model receives as input the image (4) and the clip embedding of the modifying text (5)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.329, + 0.831, + 0.621 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.633, + 0.828, + 0.698 + ], + "angle": 0, + "content": "Figure 19: Illustration of the manipulated image construction process during training. Given an original image, we select a random local area, and extract the first nearest neighbor (1-NN). Using ECC alignment, we align the nearest neighbor with the original image and replace the random local area with its corresponding nearest neighbor local area. The model then receives as input the manipulated image, together with the CLIP embedding of the local area, and tries to predict the original image." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.242, + 0.237, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.19, + 0.321, + 0.227, + 0.329 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.244, + 0.353, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.288, + 0.321, + 0.357, + 0.329 + ], + "angle": 0, + "content": "Raising left hand" + }, + { + "type": "image", + "bbox": [ + 0.359, + 0.244, + 0.434, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.321, + 0.433, + 0.329 + ], + "angle": 0, + "content": "Raising hands" + }, + { + "type": "image", + "bbox": [ + 0.45, + 0.244, + 0.497, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.456, + 0.321, + 0.501, + 0.329 + ], + "angle": 0, + "content": "Blue pants" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.244, + 0.568, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.531, + 0.321, + 0.578, + 0.329 + ], + "angle": 0, + "content": "Black shirt" + }, + { + "type": "image", + "bbox": [ + 0.591, + 0.244, + 0.642, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.59, + 0.321, + 0.645, + 0.329 + ], + "angle": 0, + "content": "Holds a heart" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.244, + 0.713, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.677, + 0.321, + 0.714, + 0.329 + ], + "angle": 0, + "content": "Princess" + }, + { + "type": "image", + "bbox": [ + 0.748, + 0.244, + 0.804, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.765, + 0.321, + 0.795, + 0.329 + ], + "angle": 0, + "content": "Sitting" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.337, + 0.246, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.393, + 0.246, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.458, + 0.244, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.19, + 0.513, + 0.227, + 0.523 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.34, + 0.355, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.394, + 0.35, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.459, + 0.352, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.513, + 0.34, + 0.522 + ], + "angle": 0, + "content": "Smiling" + }, + { + "type": "image", + "bbox": [ + 0.363, + 0.34, + 0.43, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.394, + 0.423, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.363, + 0.459, + 0.427, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.384, + 0.513, + 0.411, + 0.522 + ], + "angle": 0, + "content": "Angry" + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.34, + 0.505, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.44, + 0.395, + 0.498, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.44, + 0.459, + 0.502, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.464, + 0.513, + 0.481, + 0.521 + ], + "angle": 0, + "content": "Sad" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.34, + 0.58, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.395, + 0.576, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.458, + 0.578, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.513, + 0.568, + 0.522 + ], + "angle": 0, + "content": "Surprised" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.34, + 0.657, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.592, + 0.394, + 0.651, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.592, + 0.457, + 0.655, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.6, + 0.513, + 0.642, + 0.522 + ], + "angle": 0, + "content": "With a tie" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.34, + 0.732, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.394, + 0.727, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.666, + 0.455, + 0.73, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.673, + 0.513, + 0.718, + 0.522 + ], + "angle": 0, + "content": "With a hat" + }, + { + "type": "image", + "bbox": [ + 0.746, + 0.34, + 0.808, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.745, + 0.395, + 0.804, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.741, + 0.458, + 0.805, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.742, + 0.513, + 0.799, + 0.522 + ], + "angle": 0, + "content": "Holds a heart" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.533, + 0.252, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.199, + 0.588, + 0.235, + 0.597 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.604, + 0.236, + 0.65 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.655, + 0.233, + 0.664 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.534, + 0.359, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.301, + 0.588, + 0.331, + 0.597 + ], + "angle": 0, + "content": "Brown" + }, + { + "type": "image", + "bbox": [ + 0.3, + 0.604, + 0.342, + 0.65 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.287, + 0.655, + 0.349, + 0.664 + ], + "angle": 0, + "content": "With a bow tie" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.534, + 0.434, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.369, + 0.585, + 0.416, + 0.601 + ], + "angle": 0, + "content": "Blue racing car" + }, + { + "type": "image", + "bbox": [ + 0.391, + 0.604, + 0.434, + 0.651 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.379, + 0.655, + 0.442, + 0.664 + ], + "angle": 0, + "content": "With a bow tie" + }, + { + "type": "image", + "bbox": [ + 0.436, + 0.533, + 0.51, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.452, + 0.588, + 0.486, + 0.597 + ], + "angle": 0, + "content": "Flowers" + }, + { + "type": "image", + "bbox": [ + 0.483, + 0.603, + 0.525, + 0.651 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.487, + 0.655, + 0.524, + 0.664 + ], + "angle": 0, + "content": "Sleeping" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.533, + 0.583, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.588, + 0.555, + 0.597 + ], + "angle": 0, + "content": "Sketch" + }, + { + "type": "image", + "bbox": [ + 0.592, + 0.533, + 0.66, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.585, + 0.65, + 0.601 + ], + "angle": 0, + "content": "Yellow racing car" + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.533, + 0.735, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.694, + 0.589, + 0.717, + 0.597 + ], + "angle": 0, + "content": "Stars" + }, + { + "type": "image", + "bbox": [ + 0.738, + 0.533, + 0.813, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.758, + 0.588, + 0.794, + 0.597 + ], + "angle": 0, + "content": "Colorful" + }, + { + "type": "image", + "bbox": [ + 0.757, + 0.603, + 0.8, + 0.651 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.752, + 0.655, + 0.801, + 0.664 + ], + "angle": 0, + "content": "Red lipstick" + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.684, + 0.231, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.196, + 0.743, + 0.228, + 0.751 + ], + "angle": 0, + "content": "original" + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.684, + 0.288, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.255, + 0.743, + 0.287, + 0.751 + ], + "angle": 0, + "content": "Smiling" + }, + { + "type": "image", + "bbox": [ + 0.297, + 0.684, + 0.34, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.306, + 0.743, + 0.335, + 0.751 + ], + "angle": 0, + "content": "Angry" + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.684, + 0.386, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.359, + 0.743, + 0.374, + 0.751 + ], + "angle": 0, + "content": "Sad" + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.684, + 0.437, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.393, + 0.743, + 0.434, + 0.751 + ], + "angle": 0, + "content": "Surprised" + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.683, + 0.518, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.471, + 0.743, + 0.503, + 0.751 + ], + "angle": 0, + "content": "original" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.683, + 0.599, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.553, + 0.741, + 0.586, + 0.751 + ], + "angle": 0, + "content": "Smiling" + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.683, + 0.674, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.629, + 0.743, + 0.657, + 0.751 + ], + "angle": 0, + "content": "Angry" + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.682, + 0.747, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.707, + 0.743, + 0.728, + 0.751 + ], + "angle": 0, + "content": "Sad" + }, + { + "type": "image", + "bbox": [ + 0.752, + 0.682, + 0.821, + 0.74 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.769, + 0.743, + 0.81, + 0.751 + ], + "angle": 0, + "content": "Surprised" + }, + { + "type": "image_caption", + "bbox": [ + 0.277, + 0.768, + 0.72, + 0.782 + ], + "angle": 0, + "content": "Figure 20: Additional manipulation examples, generated using our model." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.51, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.194, + 0.252, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.255, + 0.24, + 0.264 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.168, + 0.371, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.232, + 0.366, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.314, + 0.295, + 0.348, + 0.304 + ], + "angle": 0, + "content": "Angry" + }, + { + "type": "image", + "bbox": [ + 0.386, + 0.167, + 0.458, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.388, + 0.232, + 0.456, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.295, + 0.432, + 0.304 + ], + "angle": 0, + "content": "Sad" + }, + { + "type": "image", + "bbox": [ + 0.471, + 0.168, + 0.541, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.471, + 0.232, + 0.54, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.48, + 0.294, + 0.531, + 0.304 + ], + "angle": 0, + "content": "Surprised." + }, + { + "type": "image", + "bbox": [ + 0.556, + 0.168, + 0.627, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.558, + 0.232, + 0.626, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.562, + 0.294, + 0.614, + 0.304 + ], + "angle": 0, + "content": "With a tie" + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.168, + 0.713, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.233, + 0.712, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.649, + 0.294, + 0.704, + 0.304 + ], + "angle": 0, + "content": "With a hat" + }, + { + "type": "image", + "bbox": [ + 0.728, + 0.168, + 0.8, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.232, + 0.8, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.737, + 0.294, + 0.807, + 0.304 + ], + "angle": 0, + "content": "Holds a heart" + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.34, + 0.245, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.402, + 0.24, + 0.413 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.318, + 0.362, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.385, + 0.362, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.3, + 0.445, + 0.361, + 0.456 + ], + "angle": 0, + "content": "Red lipstick" + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.318, + 0.452, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.385, + 0.449, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.403, + 0.445, + 0.436, + 0.456 + ], + "angle": 0, + "content": "Angry" + }, + { + "type": "image", + "bbox": [ + 0.477, + 0.318, + 0.533, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.478, + 0.385, + 0.534, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.48, + 0.445, + 0.526, + 0.456 + ], + "angle": 0, + "content": "Sleeping" + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.318, + 0.621, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.385, + 0.62, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.569, + 0.445, + 0.621, + 0.456 + ], + "angle": 0, + "content": "Surprised" + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.318, + 0.706, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.649, + 0.385, + 0.705, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.664, + 0.445, + 0.699, + 0.456 + ], + "angle": 0, + "content": "In love" + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.318, + 0.8, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.385, + 0.794, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.725, + 0.445, + 0.802, + 0.456 + ], + "angle": 0, + "content": "With a Bow tie" + }, + { + "type": "image", + "bbox": [ + 0.196, + 0.506, + 0.242, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.196, + 0.588, + 0.24, + 0.598 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.471, + 0.358, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.552, + 0.358, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.316, + 0.633, + 0.347, + 0.643 + ], + "angle": 0, + "content": "Joker" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.472, + 0.448, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.391, + 0.552, + 0.449, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.404, + 0.632, + 0.435, + 0.642 + ], + "angle": 0, + "content": "Boxer" + }, + { + "type": "image", + "bbox": [ + 0.479, + 0.472, + 0.531, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.481, + 0.552, + 0.532, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.487, + 0.632, + 0.519, + 0.642 + ], + "angle": 0, + "content": "Ghost" + }, + { + "type": "image", + "bbox": [ + 0.569, + 0.471, + 0.621, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.552, + 0.62, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.57, + 0.632, + 0.617, + 0.642 + ], + "angle": 0, + "content": "Rainbow" + }, + { + "type": "image", + "bbox": [ + 0.649, + 0.472, + 0.714, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.552, + 0.714, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.67, + 0.632, + 0.7, + 0.642 + ], + "angle": 0, + "content": "Devil" + }, + { + "type": "image", + "bbox": [ + 0.738, + 0.472, + 0.8, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.731, + 0.552, + 0.802, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.756, + 0.632, + 0.787, + 0.642 + ], + "angle": 0, + "content": "Angel" + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.683, + 0.246, + 0.766 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.767, + 0.24, + 0.776 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.304, + 0.655, + 0.367, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.72, + 0.362, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.287, + 0.798, + 0.373, + 0.808 + ], + "angle": 0, + "content": "Raising left hand" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.655, + 0.447, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.392, + 0.721, + 0.447, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.39, + 0.798, + 0.446, + 0.808 + ], + "angle": 0, + "content": "Blue pants" + }, + { + "type": "image", + "bbox": [ + 0.475, + 0.655, + 0.531, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.475, + 0.72, + 0.531, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.474, + 0.798, + 0.53, + 0.808 + ], + "angle": 0, + "content": "Blackshirt" + }, + { + "type": "image", + "bbox": [ + 0.55, + 0.655, + 0.643, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.559, + 0.72, + 0.619, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.569, + 0.798, + 0.606, + 0.808 + ], + "angle": 0, + "content": "Sitting" + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.655, + 0.714, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.72, + 0.712, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.798, + 0.715, + 0.808 + ], + "angle": 0, + "content": "With a tie" + }, + { + "type": "image", + "bbox": [ + 0.739, + 0.655, + 0.801, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.74, + 0.72, + 0.801, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.734, + 0.798, + 0.803, + 0.808 + ], + "angle": 0, + "content": "Holds a heart" + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.839, + 0.825, + 0.866 + ], + "angle": 0, + "content": "Figure 21: comparison to Text2LIVE (Bar-Tal et al., 2022). For each input image, the bottom row corresponds to images generated by our model, and the top row corresponds to images generated by the Text2LIVE model." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.149, + 0.822, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.833, + 0.828, + 0.873 + ], + "angle": 0, + "content": "Figure 22: comparison to Textual Inversion (Gal et al., 2022). For each input image, the bottom row corresponds to images generated by our model, and the top row corresponds to images generated by the Textual Inversion model." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "28" + } + ] +] \ No newline at end of file diff --git a/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_origin.pdf b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bf3299f51f790456c9186e6e1daae0131510630b --- /dev/null +++ b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/91d6cd85-11f4-46c6-bd18-53dbb2f775b5_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ec85486f3cc6759f990064a5ecebf45d804448bc84588509f78896bd4b74798 +size 41530096 diff --git a/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/full.md b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e7a8dd5856e8de2dccfd23ad5ed68e39058bdfd8 --- /dev/null +++ b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/full.md @@ -0,0 +1,951 @@ +# KNN-DIFFUSION: IMAGE GENERATION VIA LARGE-SCALE RETRIEVAL + +Shelly Sheynin*, Oron Ashual*, Adam Polyak, Uriel Singer, Oran Gafni, Eliya Nachmani, Yaniv Taigman + +*Equal Contribution Meta AI + +{shellysheynin, oron}@meta.com + +![](images/e21cff83cf03a421c25a92a059e186e9b355f0366a8c027f745f30303b7713f6.jpg) +Figure 1: (a) Samples of stickers generated from text inputs, (b) Semantic text-guided manipulations applied to the "Original" image without using edit masks. + +# ABSTRACT + +Recent text-to-image models have achieved impressive results. However, since they require large-scale datasets of text-image pairs, it is impractical to train them on new domains where data is scarce or not labeled. In this work, we propose using large-scale retrieval methods, in particular, efficient $k$ -Nearest-Neighbors (kNN), which offers novel capabilities: (1) training a substantially small and efficient text-to-image diffusion model using only pre-trained multi-modal embeddings, but without an explicit text-image dataset, (2) generating out-of-distribution images by simply swapping the retrieval database at inference time, and (3) performing text-driven local semantic manipulations while preserving object identity. To demonstrate the robustness of our method, we apply our kNN approach on two state-of-the-art diffusion backbones, and show results on several different datasets. As evaluated by human studies and automatic metrics, our method achieves state-of-the-art results compared to existing approaches that train text-to-image generation models using images-only dataset. + +# 1 INTRODUCTION + +Large-scale generative models have been applied successfully to image generation tasks (Gafni et al., 2022; Ramesh et al., 2021; Nichol et al., 2021; Sahara et al., 2022; Yu et al., 2022), and have shown outstanding capabilities in extending human creativity using editing and user control. However, these models face several significant challenges: (i) Large-scale paired data requirement. To achieve high-quality results, text-to-image models rely heavily on large-scale datasets of (text, image) pairs collected from the internet. Due to the requirement of paired data, these models cannot be applied to new or customized domains with only unannotated images. (ii) Computational cost and efficiency. Training these models on highly complex distributions of natural images usually requires scaling the size of the model, data, batch-size, and training time, which makes them challenging to train and less accessible to the community. Recently, several works proposed text-to-image models + +trained without an explicit paired text-image datasets. Liu et al. (2021) performed a direct optimization to a pre-trained model based on a CLIP loss (Radford et al., 2021). Such approaches are time-consuming, since they require optimization for each input. Zhou et al. (2021) proposed training with CLIP image embedding perturbed with Gaussian noise. However, to achieve high-quality results, an additional model needs to be trained with an annotated text-image pairs dataset. + +In this work, we introduce a novel generative model, kNN-Diffusion, which tackles these issues and progresses towards more accessible models for the research community and other users. Our model leverages a large-scale retrieval method, $k$ -Nearest-Neighbors (kNN) search, in order to train the model without an explicit text-image dataset. Specifically, our diffusion model is conditioned on two inputs: (1) image embedding (at training time) or text embedding (at inference), extracted using pre-trained CLIP encoder, and (2) kNN embeddings, representing the $k$ most similar images in the CLIP latent space. During training, we assume that no paired text is available, hence condition only on CLIP image embedding and on $k$ additional image embeddings, selected using the retrieval model. At inference, only text inputs are given, so instead of image embeddings, we use the text embedding that shares a joint embedding space with the image embeddings. Here, the kNN image embeddings are retrieved using the text embeddings. + +The additional kNN embeddings have three main benefits: (1) they extend the distribution of conditioning embeddings and ensure the distribution is similar in train and inference, thus helping to bridge the gap between the image and text embedding distributions (see Fig. 5); (2) they teach the model to learn to generate images from a target distribution by using samples from that distribution. This allows generalizing to different distributions at test time and generating out-of-distribution samples; (3) they hold information that does not need to be present in the model, which allows it to be substantially smaller. We demonstrate the effectiveness of our kNN approach in Sec. 4. + +To assess the performance of our method, we train our model on two large-scale datasets: the Public Multimodal Dataset (Singh et al., 2021) and an image-only stickers dataset collected from the Internet. We show state-of-the-art zero-shot results on MS-COCO (Lin et al., 2014), LN-COCO (Pont-Tuset et al., 2020) and CUB (Wah et al., 2011). To further demonstrate the advantage of retrieval methods in text-to-image generation, we train two diffusion backbones using our kNN approach: continuous (Ramesh et al., 2022) and discrete (Gu et al., 2021). In both cases we outperform the model trained without kNN. In comparison to alternative methods presented in Sec. 4, we achieve state-of-the-art results in both human evaluations and FID score, with only 400 million parameters and 7 seconds inference time. + +Lastly, we introduce a new approach for local and semantic manipulations that is based on CLIP and kNN, without relying on user-provided masks. Specifically, we fine-tune our model to perform local and complex modifications that satisfies a given target text prompt. For example, given the teddy bear's image in Fig. 4, and the target text "holds a heart", our method automatically locates the local region that should be modified and synthesizes a high-resolution manipulated image in which (1) the teddy bear's identity is accurately preserved and (2) the manipulation is aligned with the target text. We demonstrate our qualitative advantage by comparing our results with two state-of-the-art models, Text2Live (Bar-Tal et al., 2022) and Textual Inversion (Gal et al., 2022), that perform image manipulations without masks (Fig. 4, 21 and 22). + +We summarize the contributions of this paper as follows: (1) We propose kNN-Diffusion, a novel and efficient model that utilizes a large-scale retrieval method for training a text-to-image model with only pre-trained multi-modal embeddings, but without an explicit text-image dataset. (2) We demonstrate efficient out-of-distribution generation, which is achieved by substituting retrieval databases. (3) We present a new approach for local and semantic image manipulation, without utilizing masks. (4) We evaluate our method on two diffusion backbones, discrete and continuous, as well as on several datasets, and present state-of-the-art results compared to baselines. + +# 2 RELATED WORK + +Text-to-image models. Text-to-image generation is a well-studied task that focuses on generating images from text descriptions. While GANs (Xu et al., 2018; Zhu et al., 2019; Zhang et al., 2021) and Transformer-based methods (Ramesh et al., 2021; Gafni et al., 2022; Yu et al., 2022; Ding et al., 2021) have shown remarkable results, recently impressive results have been attained with dis + +![](images/e97768b6a12cbc1787d5797a2c65bdea7f16be9729671609455963914649f9bc.jpg) +Figure 2: Qualitative comparisons with baselines. Nearest Neighbor is the first kNN of the text in PMD dataset. + +crete (Gu et al., 2021) and continuous (Nichol et al., 2021; Sahara et al., 2022; Ramesh et al., 2022; Rombach et al., 2022) diffusion models. Most recent works trained diffusion models conditioned on text embeddings extracted using a pre-trained text encoder (Saharia et al., 2022; Yu et al., 2022) or image embedding extracted using CLIP (Ramesh et al., 2022). While producing impressive results, all previous works described above are supervised and trained with paired text-image datasets. Several works have proposed training text-to-image models without an explicit text-image dataset. FuseDream (Liu et al., 2021) proposed a direct optimization to a pre-trained generative model based on CLIP loss. This method relies on a pre-trained GAN and requires a time-consuming optimization process for each image. LAFITE (Zhou et al., 2021) recently demonstrated text-to-image generation results without requiring paired text-image datasets. Here, the CLIP embeddings are used interchangeably at train and test to condition a GAN-based model. The joint text-image embedding enables inference given a text input, whereas in training the model is fed with the visual embedding only. However, the gap between the text and image distributions in the joint embeddings space leads to results with substantially lower quality, as we show in our experiments. To overcome this gap, LAFITE added noise to the image embeddings during training. Our remedy to this gap is to condition the model on the retrieval of an actual image embeddings, using a text-image joint space. + +Retrieval for generation. The Information Retrieval (IR) literature tackles the challenge of retrieving a small amount of information from a large database, given a user's query. A simple, yet efficient retrieval mechanism is to retrieve the $K$ nearest neighbors (kNN) between the query and the entities in the database in some pre-calculated embedding space (Bijalwan et al., 2014). The database allows the model to leverage extensive world-knowledge for its specific task Borgeaud et al. (2021). Recently, language models were augmented with a memory component, allowing them to store representations of past inputs (Wu et al., 2022). The latter were then queried using a lookup operation, improving performance in various benchmarks and tasks. Retrieval models have been used for various tasks in learning problems, for example, language modeling (Borgeaud et al., 2021), machine translation (Gu et al., 2018), question answering (Lee et al., 2019) and image generation (Tseng et al., 2020; Qi et al., 2018). RetrieveGAN (Tseng et al., 2020) uses a differentiable retrieval module for image generation from a scene description, RetrieveFuse (Siddiqui et al., 2021) proposed a neural 3D scene reconstruction based on a retrieval system. SIMS (Qi et al., 2018) proposed generating an image using semantic layout and compatible image segments that are retrieved from image segments database, and (Iskakov, 2018) showed that the use of retrieval database in inpainting task significantly boosts visual quality. In this work we utilize the kNN retrieval mechanism over the shared text-image embedding space, CLIP (Radford et al., 2021). Using extensive ablation studies, we show the importance of the retrieval model both for training and inference, and demonstrate its large impact on performance. kNN-Diffusion significantly outperforms prior work + +![](images/7c67ea86007bb55c12b7c5449835616eb75f13a14aec0d995c8c2d882151d3de.jpg) +Figure 3: The overall framework of our kNN-Diffusion model. In both training and inference, the decoder is conditioned on CLIP embedding, and kNN image embeddings. During training, we condition the model on image CLIP embedding, and its kNN image embeddings extracted using the retrieval method. At inference time, given an input text, the kNN image embeddings are retrieved based on the CLIP text embedding that shares a joint embedding space with the image embedding. + +with zero-shot FID of 12.5, including RDM (Blattmann et al., 2022)(with FID of 22.1), a concurrent work which similarly to our approach, proposes conditioning LDM (Rombach et al., 2022) on kNN. + +Multi-modal feature learning. Learning a joint and aligned feature space for several modalities is challenging, as it requires alignment between the modalities (paired datasets), whose distributions may vary. Specifically, the joint feature space of vision-and-language has been a long-standing problem. CLIP (Radford et al., 2021) successfully tackled this by leveraging contrastive learning over a large dataset of text-image pairs. BLIP (Li et al., 2022), (Mu et al., 2021) and FLAVA (Singh et al., 2021), followed this idea and further improved the joint representation. The joint representation was shown to hold a strong semantic alignment between the two modalities, enabling image generation (Liu et al., 2021; Wang et al., 2022), image manipulation (Patashnik et al., 2021; Avrahami et al., 2022b), and image captioning (Mokady et al., 2021). In this work we leverage the joint representation in two ways: (i) enabling textless training with only visual data, while using text at inference time, and (ii) creating an efficient embedding space for the use of the retrieval model. + +# 3 METHOD + +Our main goal is to facilitate language-guided generation of user-specified concepts while using an images-only dataset during training. A possible way to achieve this goal is to use a shared text-image encoder that will map text-image pairs into the same latent space, thus allowing training with an image embedding, and inferring from text embedding. A candidate for this encoder is CLIP, which has been trained with a contrastive loss on a large-scale dataset of text-image pairs. However, as we show quantitatively in Tab. 1, 2 and qualitatively in Fig. 15, 16, 5, CLIP embeddings alone cannot accurately bridge the gap between the text and image distributions. In order to reduce this gap, several methods have been proposed. The closest work to ours is LAFITE, which perturbs the CLIP image embedding with adaptive Gaussian noise. Under the assumption that there is a large paired text-image dataset, Ramesh et al. (2022) have proposed a prior that is used during inference, and is trained to generate possible CLIP image embeddings from a given text caption. In this regard, we propose using a large-scale and non-trainable image embedding index as an integral part of the diffusion process. Our method, kNN-Diffusion, assumes that only image data and a pre-trained multi-modal text-image encoder are provided during training. As shown in Fig. 3, our model is comprised of three main components: (1) A multi-modal text-image encoder (CLIP); (2) A retrieval model - A data structure containing image embeddings, which is indexed for a fast kNN search; (3) An image generation network - A trainable diffusion-based image generation model, conditioned on the projected retrievals. For both training and inference, the image generation network is conditioned on $K$ additional image embeddings, chosen using the retrieval model to ensure a similar distribution of the condition in training and inference. The following sections describe these components. + +Retrieval model. Our retrieval model has three non-trainable modules: a pre-trained text encoder $f_{txt}$ (CLIP text encoder), a pre-trained image encoder $f_{img}$ (CLIP image encoder) and + +![](images/579bbd4893194751e1ab3f8742480c70f4607a92cf444948529fb6d5d0f104c0.jpg) +Original + +![](images/4b8e94270980c653d26e27432d1e6cd3aa4206dafbdb0dbfd35b081947be7cb9.jpg) +Original + +![](images/2c17f90051c736103d1823061d8c78f14b3d279f254be24f3895718b71cc8801.jpg) +Original +Figure 4: Results for text-guided image manipulations without using masks. The original image is shown in the left column, our manipulated images are shown in the center. The images of Bar-Tal et al. (2022); Gal et al. (2022) were generated using the authors' official code. The full comparison is available in the supplement. + +![](images/6dfcbf0f062811cca6b907aa14f53554b965acab7fa4f35009db77e0751175fc.jpg) +Joker + +![](images/e6dad06b6f2da77a17e939e95ea4fb50b7482acc0d60cfeb59145b89810d1248.jpg) +Black shirt + +![](images/a3619c5eec9d2b40f7bce5719ac0edc5bff61af800e28d441b9747b93ac2d30c.jpg) +With a hat + +![](images/d2f607fc0ed802f1effbcd6a458785f54be0be4cc29e60bc1d72ebadb7d36d02.jpg) +Boxer + +![](images/0366ab1e3934b7293362b2056ea6234a046c4f084daee3f72b70da4b6f66152e.jpg) +Blue pants + +![](images/e6d5c21e59f3b2ceba83fdc167c1806d0922f4c475f1079e3931ae6e76205c38.jpg) +Sad + +![](images/1d35403a8c3a35b5d1bd43a6b7b519f7975a69fd342a0dfaef10a2f2a10f3cf1.jpg) +Angel + +![](images/b4c381a0a363ceab3d84d929b005e071c8095d92309b123fdb4034e42f8f5ac1.jpg) +Holds a heart + +![](images/5f9f3fb28c6639fbe0733c63384facb4fa022b553b119d78bdb7d4dff68bca29.jpg) +Surprised + +![](images/20f0b18d238b59ac3d43e7dacb8e1cd303672db8e01d1246a6da2462597f9f2c.jpg) +Durs +Rainbow + +![](images/e964c7fcd2ab10dc812b3ef695a0404442e4b4b328977d623311f8a173891a8d.jpg) +Sitting + +![](images/2686350b11e0a56bfc7b6344f08c6bcb72c4e155d30550175a32402aa9a5cfc5.jpg) +Angry + +![](images/633cb565ffaab13da90cf41ddd7e361e5e34703e148d139b1230dbc388dae9a8.jpg) +Devil + +![](images/48448da441532e5798cd7725fb5f32ebde3cef9ae794ae531001e7d4a26006d4.jpg) +With a tie + +![](images/e9cf1c391efacf186a0c2bb30f9a62dd0a49e6fc40f96395ed3c42be55576e69.jpg) +Holds a heart + +![](images/1b3ae60b2f4ccdaefc9dbd9a77ad8dac6527dfb342539422c0d5ba7e8aebb439.jpg) +Ghost + +![](images/ad82962b578117c95c04f7904453c8eca887582ec510e9a97362bdf438b67189.jpg) +Raising left hand + +![](images/8fb18b64e01e6c8a971e4fe8d8a72b2d6977d50c03e507bb78efa5b64a3d3e82.jpg) +With a tie + +![](images/2b556d99987c7f3687c1e67e0fe618451a4d24b533ac155b62cac81d4ff2bda9.jpg) +Text2LIVE +Ghost +Textual Inversion + +![](images/81df2f5c024d732fa003ce32c48891881e25dac992dc511c46196b07673a1f61.jpg) +Raising left hand + +![](images/46d26f01a10bd02a596093168ec69d23b6bda6925261a129fa86e28edfee4003.jpg) +With a tie + +![](images/4189ebd5211eee89c42f9d0cd9ff08baf0cba0c6d9dec68d1adcfb9f7e073e02.jpg) +Ghost + +![](images/4fa44d3956e5ffc14fde983bb79ac0b8b0f6db0cb2f8b06091b762a26414dab5.jpg) +Raising left hand + +![](images/12af7563b8668dd53db8e9e8d932d3be49014ef20ca441f225bb62d1ac4e670a.jpg) +With a tie + +an index $\mathcal{H}$ . The encoders map text descriptions and image samples to a joint multi-modal $d$ -dimensional feature space $\mathbb{R}^d$ . The index stores an efficient representation of the images database - $\mathcal{H} := \{f_{img}(i) \in \mathbb{R}^d | i \in \mathcal{I}\}$ where $\mathcal{I}$ denotes the dataset of images. During training, we use the index to efficiently extract the $k$ nearest neighbors in the feature space of the image embedding $f_{img}(\mathbf{I}) \in \mathbb{R}^d - \mathrm{knn}_{img}(\mathbf{I}, k) := \arg \min_{h \in \mathcal{H}}^k \mathbf{s}(f_{img}(\mathbf{I}), h)$ where $\mathbf{s}$ is a distance function and $\arg \min_k^k$ output the minimal $k$ elements. The set $\{f_{img}(\mathbf{I}), \mathrm{knn}_{img}(\mathbf{I}, k)\}$ is used as the condition to the generative model. During inference, given a query text $t$ , an embedding $f_{txt}(t)$ is extracted. The generative model is conditioned on this embedding and its $k$ nearest neighbors from the database - $\mathrm{knn}_{txt}(t, k) := \arg \min_{h \in \mathcal{H}}^k \mathbf{s}(f_{txt}(t), h)$ . During training, we add embeddings of real images, by applying the retrieval method to the input image embedding. The extracted kNN should have a large enough distribution to cover the potential text embedding. During inference, the kNN are retrieved using the text embedding (See Fig. 17). In all of our experiments we use the cosine similarity metric as the distance function $\mathbf{s}$ , $k = 10$ for the number of nearest neighbors and $d = 512$ . The full implementation details can be found in Sec. 6.6 in the supplement. + +Image generation network. In order to demonstrate the robustness of our method, we apply our kNN approach on two different diffusion backbones: Discrete (Gu et al., 2021) and Continuous (Nichol et al., 2021; Sohl-Dickstein et al., 2015; Ho et al., 2020; Dhariwal & Nichol, 2021). Although very different in practice, these models share the same theoretical idea. Let $x_0 \sim q(x_0)$ be a sample from our images distribution. A forward diffusion process is a Markov chain that adds noise at each step $q(x_n|x_{n-1})$ . The reverse process, $p_\theta(x_{n-1}|x_n,x_0)$ , is a denoising process that removes noise from an initialized noise state. At inference time, the model can generate an output, starting with noise and gradually removing it using $p_\theta$ . For additional background on diffusion models please refer to Sec. 6.1 in the supplement. + +In the discrete diffusion model, $q(x_{n}|x_{n - 1})\coloneqq v^{T}(x_{n})\mathbf{Q}_{n}v(x_{n - 1})$ where $v(x_{n})$ is a one-hot vector with entry 1 at $x_{n}$ , and $\mathbf{Q}_n$ is a transition matrix, modeling the probability to move from state $x_{n - 1}$ to $x_{n}$ , using uniform probability over the vocabulary and a pre-defined probability for additional special [MASK] token. We can compute the reverse transition distribution according to: $p_{\theta}(x_{n - 1}|x_n,y)\coloneqq \sum_{\hat{x}_0 = 1}^k q(x_{n - 1}|x_n,\hat{x_0})p_\theta (\hat{x_0} |x_n,x_0,y)$ where $x_0$ is a discrete vector, tokenized by the VQGAN (Esser et al., 2021) encoder and $y$ is the conditioning signal. For modeling $p_{\theta}$ we have followed (Gu et al., 2021) and used a conditional Transformer (Vaswani et al., 2017). + +In the continuous diffusion model, $q(x_{n}|x_{n - 1}) \coloneqq \mathcal{N}(x_{n};\sqrt{\alpha_{t}} x_{n - 1},(1 - \alpha_{n})x_{0})$ and $p_{\theta}(x_{n - 1}|x_n,y)\coloneqq \mathcal{N}(\mu_\theta (x_n,y),\Sigma_\theta (x_n,y))$ . Here, the noise function is Gaussian noise. Following (Ho et al., 2020; Nichol et al., 2021) we trained a model $\epsilon_{\theta}$ to predict the added noise using a standard mean-squared error loss: $L\coloneqq E_{n\sim [1,N],x_0\sim q(x_0),\epsilon \sim \mathcal{N}(0,\mathbf{I})}[||\epsilon -\epsilon_{\theta}(x_n,n,y)||^2]$ where $\epsilon_{\theta}$ is a U-net model and $y$ is the conditioning signal. + +![](images/a7f78150df57b233a4eb57c80d2816747d4dba1175c0d61e3cf3915e7d74205c.jpg) +Figure 5: tSNE visualization of 500 random text-image CLIP embeddings pairs taken from COCO validation. The leftmost figure demonstrates the gap between the text and image distributions. By gradually adding kNN to the mean CLIP embedding of the text, the gap decreases, demonstrating the importance of the kNN. + +![](images/34aecec5513151a693e5c6b12d64e94c9f1fff8d249b85ed5ff95a8b288204be.jpg) +Figure 6: FID on MS-COCO, including models trained on image-only datasets and text-image datasets. + +Table 1: Results for zero-shot Text-to-Image generation on the MS-COCO, CUB and LN-COCO test sets. Image-quality and Text-alignment report the percentage of majority human raters votes in favor of our method when comparing between a certain model and ours. + +
ModelMS-COCOCUBLN-COCO
FID↓Im. qual.Txt align.FID↓Im. qual.Txt align.FID↓Im. qual.Txt align.
LAFITE26.972.165.389.774.059.642.868.461.9
FuseDream21.264.079.350.279.160.937.571.159.0
no-kNN32.870.868.395.181.061.265.061.459.8
Ours12.5--42.9--35.6--
+ +In both cases, we condition our model on $y = (f_{img}(x_0), \mathrm{knn}_{img}(x_0, k))$ where $f_{img}(x_0)$ is the CLIP image embedding, $\mathrm{knn}_{img}(x_0, k)$ is the $k$ nearest neighbors in the feature space of the image embedding. Following (Ramesh et al., 2022; Rombach et al., 2022) conditional injection, we condition our model on the image CLIP embedding, and the kNN clip embeddings by applying cross attention in the attention layers of the architecture. We sample both our models using Classifier Free Guidance (CFG) (Nichol et al., 2021; Ho & Salimans, 2021). Since CFG was originally proposed for continuous models, we propose a method for using it with discrete models as well. Full implementation details of the discrete and continuous models can be found in Sec. 6.7 and Sec. 6.8, respectively, in the supplement. + +# 3.1 TEXT-ONLY IMAGE MANIPULATION + +The majority of previous works in the task of image manipulation either rely on user-provided masks (Nichol et al., 2021; Avrahami et al., 2022b;a), or are limited to global editing (Crowson et al., 2022; Kim et al., 2022). Recently, several works (Bar-Tal et al., 2022; Hertz et al., 2022; Gal et al., 2022) have made progress with local manipulations without relying on user edited masks. Nevertheless, most of the techniques suffer from several shortcomings: (1) They enable local texture changes, yet cannot modify complex structures, (2) they struggle to preserve the identity of the object, for example, when manipulating humans, (3) they require optimization for each input. + +We address these issues by extending kNN-Diffusion to perform local and semantic-aware image manipulations without any provided mask. Illustration of the approach is provided in Fig. 18 and Fig. 19 in the supplement. For this task, the model is trained to predict the original image from a manipulated version. Specifically, we create a manipulated version of the image, which differs from the original image only in some local area. Given a random local area $M$ in the image I, the manipulated image $\mathrm{I}_{\text{manip}}$ is constructed by replacing the area with the corresponding nearest neighbor: $\mathrm{I}_{\text{manip}} = \mathrm{I} \cdot (1 - M) + \mathrm{nn}_{img}(\mathrm{I}, 1) \cdot M$ , where $\mathrm{nn}_{img}(\mathrm{I}, 1)$ is the the nearest neighbor obtained after aligning it with I using the ECC alignment algorithm (Evangelidis & Psarakis, 2008). The model then receives as input the manipulated image, together with the CLIP embedding of the original image only in the local area: $f_{img}(\mathrm{I} \cdot M)$ . This CLIP embedding represents the required modification that should be applied to the manipulated image in order to predict the original image. During inference, instead of using the CLIP embedding of the local area, the desired modification is + +Table 2: Results on the stickers dataset. We report the percentage of human raters prefer our method over the baselines with respect to image quality and text alignment. Discrete no-kNN refers to VQ-diffusion, and Continuous no-kNN, to DALL-E2 decoder, both trained without an explicit text-image dataset. + +
ModelFID↓Ours DiscreteOurs Continuous
Image qualityText alignmentImage qualityText alignment
DALL-E2+ClipCap55.571.669.267.068.3
LAFITE58.763.559.976.071.2
no-kNN52.772.167.666.869.4
Ours40.8----
+ +represented using the CLIP embedding of the user text query. We modified the model to be capable of receiving as a condition both the manipulated image and the CLIP embedding of the local area. + +# 4 EXPERIMENTS + +First, we conduct qualitative and quantitative comparisons on MS-COCO, LN-COCO and CUB datasets. To further demonstrate the advantage of our method, we provide comparison on an image-only stickers dataset, where we apply our approach on two diffusion backbones. Next, we demonstrate image manipulation and out-of-distribution capabilities. Finally, to better assess the effect of each contribution, an ablation study is provided. + +Datasets and Metrics. For photo-realistic experiments, our model was trained only on the images (omitting the text) of a modified version of the Public Multimodal Dataset (PMD) used by FLAVA (Singh et al., 2021). More information about the dataset is available in Sec. 6.4 of the supplement. To further demonstrate the capabilities of our method, we collected 400 million sticker images from the web, containing combinations of concepts such as objects, characters/avatars and text. The collected stickers do not have paired text, and are substantially different from photorealistic data. Furthermore, since they have no paired text, they were not part of CLIP's training data, which makes the text-to-image generation task more challenging. + +Evaluation metrics are based on objective and subjective metrics: (i) FID (Heusel et al., 2017) is an objective metric used to assess the quality of synthesized images, (ii) human evaluation - we ask human raters for their preference, comparing two methods based on image quality and text alignment. We used 600 image pairs; five raters rated each pair. The results are shown as a percentage of majority votes in favor of our method over the baselines. We report the full human evaluation protocol in the supplement. We chose to omit Inception-Score, since it is shown by Barratt & Sharma (2018) to be a misleading metric for models that were not trained on Imagenet. + +# 4.1 QUALITATIVE AND QUANTITATIVE RESULTS + +We begin by comparing our model, trained on the PMD dataset, with the previous works LAFITE and FuseDream, that trained on image-only datasets. To demonstrate the advantage of using a retrieval method in text-to-image generation, we trained a model variant, no-kNN. This baseline was trained solely on image embeddings (omitting the kNN), while during inference, the images were generated using the text embedding. Tab. 1 displays zero-shot results on three different datasets: MS-COCO, CUB and LN-COCO. We follow the evaluation protocol of LAFITE, reporting our results on 30,000 images from MS-COCO validation set without training, nor using it's training partition in the kNN index. Similarly, we follow LAFITE for CUB and LN-COCO evaluation. As can be seen, our model achieves the lowest FID score in all scenarios. In addition, human evaluations rate our method as better aligned to text and with the highest images quality. In Fig. 2, 15 and 11 we present a qualitative comparison between the methods. One can observe that while the simple retrieval baseline outputs non-generated images with high-quality, the images generated by our method are more faithful to the input text. To further demonstrate the effectiveness of our method, we present in Fig. 6 a comparison of our model with the latest text-to-image models trained on paired text-image datasets: DALL-E, CogView, VQ-Diffusion, GLIDE, LDM, Make-A-Scene, DALL-E2, Parti and Imagen. As can be seen, our model achieves comparable results to recent models trained with full text-image pairs (e.g LDM, GLIDE), despite being trained on an image-only dataset, with significantly lower computational costs. The results demonstrate that leveraging an external retrieval database allows to compensate for different trade-offs, in particular, reducing the number of parameters in the model. Additional samples are provided in Fig. 13 in the supplement. + +![](images/2bbc54b5605c39d4eb95ed54edeb6ccbc924f0a581d1c0ce6025122e17ce139e.jpg) +Figure 7: Comparison between various indexes used by the same model. (1) Aesthetic. Images from the first quantile of an aesthetic classifier, (2) Unaesthetic. Images from the last quantile of an aesthetic classifier, (3) Image search engine. Images retrieved from Google Images, (4) The stickers index. + +Text-to-sticker generation. As the sticker dataset does not have paired text, and is substantially different from photo-realistic data, it allows us to illustrate the advantage of our model on an image-only dataset. A selection of stickers generated by our model is presented in Fig. 1 and Fig. 14, 12. To demonstrate the importance of using kNN on image-only datasets, we evaluate our approach on two diffusion backbones. To this end, we trained a continuous diffusion model (Ramesh et al., 2022) and a discrete diffusion model (Gu et al., 2021), both conditioned on the kNN image embeddings. For each backbone, we compare our method with the following baselines: (1) no-kNN - this baseline was trained using both the continuous and the discrete methods conditioned only on image CLIP embedding, without using kNN. In the discrete case, we trained a VQ-diffusion model, while in the continuous case, we trained a re-implementation of DALL-E2's decoder (without prior). (2) DALL-E2+ClipCap baseline - here, we first captioned the entire sticker dataset using ClipCap (Mokady et al., 2021), then trained DALL-E2 decoder on the captioned dataset. (3) LAFITE - we trained LAFITE language-free model on our stickers dataset using the authors' published code. We present the results in Tab. 2. The FID is calculated over a subset of 3,000 stickers, generated from the ClipCap captioned dataset. As can be seen, our model achieves the lowest FID score. In addition, it outperforms all baselines in human evaluation comparison, using continuous and discrete backbones. In particular, compared with the same model trained without kNN, our model achieves significantly higher favorability in both text alignment and image quality. + +# 4.2 APPLICATIONS + +Text-only image manipulation. We demonstrate the manipulation capabilities of our model in Fig. 1, 4 and 20. Furthermore, we qualitatively compare our model with Text2LIVE (Bar-Tal et al., 2022) and Textual Inversion (Gal et al., 2022), using the authors' published code. Text2LIVE proposed generating an edit layer that is composed over the original input, using a generator trained for each training image. Textual Inversion utilized the pre-trained Latent Diffusion model to invert the input image into a token embedding. The embedding is then used to compose novel textual queries for the generative model. Fig. 4 shows representative results, and the rest are included in Fig. 21 and 22 in the supplement. In contrast to our model, baseline methods lack text correspondence or they do not preserve the identity of the object. Since Text2LIVE is optimized to perform local changes, it has the difficulty changing the structure of the object (e.g. the "raising his hand" example in Fig. 4). Textual Inversion baseline changes the identity of the object because it struggles reconstructing the textual representation of the source image. Our model, on the other hand, can perform challenging manipulations that are aligned with the text, while preserving the object identity. + +![](images/950754645d537e674a9ed1701ea97dec349ad1022705da81f9f70c7f90f82619.jpg) +Figure 8: Mean aesthetics score of the generated images as a function of the conditioned kNN mean aesthetics score. + +![](images/ed141c57ea6e1c551282a3f9907ddce50fcf676a4c7a6940c44aa759ddcc1a1c.jpg) +Figure 9: MS-COCO test FID score on various K's in: (1) Zero-Shot (2) Index includes MS-COCO train subset. No kNN trained with kNN, but did not employ kNN in inference. + +![](images/6ed05be790783ff217f588a2c4a2cd028fca55ed587c0697a0e1a02f3ceab8dd.jpg) +Figure 10: MS-COCO test FID score for different model sizes. As can be seen, adding kNN to the model allows it to be smaller, while having better performance. + +Out-of-distribution generation. Using the retrieval index as part of the generation process enables using different databases during inference, without fine-tuning. This allows generating images from distributions that were not part of the training set, enabling out-of-distribution generation. This novel capability is demonstrated with the same model trained on PMD, using three different retrieval databases: (i) A stickers database presented in Sec. 4. (ii) Aesthetic database: This database is constructed by filtering images according to a classifier score. Let $C$ be a classifier that for each image $i \in I$ outputs a score $s = C(i)$ . This classifier enables filtering the kNN using $L \leq s < H$ , where $L$ and $H$ are low and high thresholds, respectively. Here, we use an open source pre-trained aesthetics classifier $A$ (Christoph Schuhmann, 2022): For each text input $t \in T$ , we apply $A$ on the kNN, and then divide the kNN into five equal quantiles based on $A$ score. As can be seen in Fig. 8, using kNN with higher aesthetics score result in generated images with higher aesthetics mean score. (iii) Image search engine: Generative models are stationary in the sense that they are unable to learn new concepts after being trained, hence fine-tuning is required to represent new styles and concepts. Here, we use an online image search engine, which allows the model to adapt to new data without additional fine-tuning. A qualitative comparison of all three methods is shown in Fig.7. + +# 4.3 ABLATION STUDY + +We conclude our experiments with an ablation study, to quantify the contribution of our different components. We provide ablation study on index size and different kNN conditioning approaches in Sec. 6.5 of the supplement. Number of nearest neighbors. The results in Fig. 9 demonstrate the importance of applying the retrieval mechanism during training and inference. Here, we evaluate our model, trained on PMD dataset, with different numbers of kNN during inference. Furthermore, we examined the baseline no-kNN, in which during inference, the model is conditioned only on the text embedding $f_{txt}(t)$ , without using kNN. Best performance is achieved using 10 neighbors. Scalability analysis. To evaluate the effectiveness of our approach at different model sizes, we trained three additional models with varying sizes for both settings - with and without kNN. As can be seen in Fig. 10, utilizing kNN consistently improves performance for all sizes. Furthermore, a performance improvement can be achieved using much smaller models with kNN. For example, the 35M kNN model outperforms the 400M model without kNN. + +# 5 CONCLUSION + +"We shall always find, that every idea which we examine is copied from a similar impression", Hume (1748). In this paper, we propose using a large-scale retrieval method in order to train a novel text-to-image model, with only pre-trained multi-modal embeddings, but without an explicit text-image dataset. Our extensive experiments demonstrate that using an external knowledge-base alleviates much of the model's burden of learning novel concepts, enabling the use of a relatively small model. In addition, it provides the model the capability of learning to adapt to new samples, which it only observes during test time. Lastly, we present a new technique utilizing the retrieval method for text-driven semantic manipulations without user-provided masks. As evaluated by human studies and automatic metrics, our method is significantly preferable to the baselines in terms of image quality and text alignment. + +# REFERENCES + +Omri Avrahami, Ohad Fried, and Dani Lischinski. Blended latent diffusion. arXiv preprint arXiv:2206.02779, 2022a. +Omri Avrahami, Dani Lischinski, and Ohad Fried. Blended diffusion for text-driven editing of natural images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18208-18218, 2022b. +Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. +Artem Babenko and Victor Lempitsky. The inverted multi-index. IEEE transactions on pattern analysis and machine intelligence, 37(6):1247-1260, 2014. +Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. arXiv preprint arXiv:2204.02491, 2022. +Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018. +Vishwanath Bijalwan, Vinay Kumar, Pinki Kumari, and Jordan Pascual. Knn based machine learning approach for text and document mining. International Journal of Database Theory and Application, 7(1):61-70, 2014. +Andreas Blattmann, Robin Rombach, Kaan Oktay, Jonas Müller, and Björn Ommer. Semiparametric neural image synthesis. In Advances in Neural Information Processing Systems, 2022. +Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George van den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. arXiv preprint arXiv:2112.04426, 2021. +Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12M: Pushing web-scale image-text pre-training to recognize long-tail visual concepts. In CVPR, 2021. +Romain Beaumont Christoph Schuhmann. Aesthetic predictor. https://github.com/LAION-AI/aesthetic-predictor, 2022. +Katherine Crowson, Stella Biderman, Daniel Kornis, Dashiell Stander, Eric Hallahan, Louis Castricato, and Edward Raff. Vqgan-clip: Open domain image generation and editing with natural language guidance. arXiv preprint arXiv:2204.08583, 2022. +Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. RedCaps: Web-curated image-text data created by the people, for the people. In NeurIPS Datasets and Benchmarks, 2021. +Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. +Prafulla Dhariwal and Alex Nichol. Diffusion models beat gans on image synthesis. arXiv preprint arXiv:2105.05233, 2021. +Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 2021. +Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873-12883, 2021. +Georgios D Evangelidis and Emmanouil Z Psarakis. Parametric image alignment using enhanced correlation coefficient maximization. IEEE transactions on pattern analysis and machine intelligence, 30(10):1858-1865, 2008. + +Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman. Make-a-scene: Scene-based text-to-image generation with human priors. arXiv preprint arXiv:2203.13131, 2022. +Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. +Tiezheng Ge, Kaiming He, Qifa Ke, and Jian Sun. Optimized product quantization for approximate nearest neighbor search. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2013. +Jiatao Gu, Yong Wang, Kyunghyun Cho, and Victor OK Li. Search engine guided neural machine translation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018. +Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. ArXiv, abs/2111.14822, 2021. +Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. +Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. +Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In NeurIPS 2021 Workshop on Deep Generative Models and Downstream Applications, 2021. +Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239, 2020. +David Hume. An enquiry concerning human understanding, 1748. +Karim Iskakov. Semi-parametric image inpainting. arXiv preprint arXiv:1807.02855, 2018. +Herve Jegou, Matthijs Douze, and Cordelia Schmid. Product quantization for nearest neighbor search. IEEE transactions on pattern analysis and machine intelligence, 33(1):117-128, 2010. +Jeff Johnson, Matthijs Douze, and Hervé Jégou. Billion-scale similarity search with GPUs. IEEE Transactions on Big Data, 7(3):535-547, 2019. +Gwanghyun Kim, Taesung Kwon, and Jong Chul Ye. Diffusionclip: Text-guided diffusion models for robust image manipulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2426-2435, 2022. +Ivan Krasin, Tom Duerig, Neil Alldrin, Andreas Veit, Sami Abu-El-Haija, Serge Belongie, David Cai, Zheyun Feng, Vittorio Ferrari, and Victor Gomes. Openimages: A public dataset for large-scale multi-label and multi-class image classification., 01 2016. +Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, Michael Bernstein, and Li Fei-Fei. Visual genome: Connecting language and vision using crowdsourced dense image annotations. 2016. URL https://arxiv.org/abs/1602.07332. +Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300, 2019. +Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. arXiv preprint arXiv:2201.12086, 2022. + +Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pp. 740-755. Springer, 2014. +Xingchao Liu, Chengyue Gong, Lemeng Wu, Shujian Zhang, Hao Su, and Qiang Liu. Fusedream: Training-free text-to-image generation with improved clip+ gan space optimization. arXiv preprint arXiv:2112.01573, 2021. +Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021. +Norman Mu, Alexander Kirillov, David Wagner, and Saining Xie. Slip: Self-supervision meets language-image pre-training. arXiv preprint arXiv:2112.12750, 2021. +Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. +Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K. Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc., 2011. URL https://proceedings.neurips.cc/paper/2011/file/5dd9db5e033da9c6fb5ba83c7a7ebea9-Paper.pdf. +Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2085–2094, 2021. +Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives. In European Conference on Computer Vision, pp. 647-664. Springer, 2020. +Xiaojuan Qi, Qifeng Chen, Jiaya Jia, and Vladlen Koltun. Semi-parametric image synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8808-8816, 2018. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. URL https://arxiv.org/abs/2103.00020. +Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pp. 8821-8831. PMLR, 2021. +Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. +Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684-10695, 2022. +Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022. +Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2556-2565, 2018. + +Yawar Siddiqui, Justus Thies, Fangchang Ma, Qi Shan, Matthias Nießner, and Angela Dai. Retrievalfuse: Neural 3d scene reconstruction with a database. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 12568-12577, 2021. +Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. arXiv preprint arXiv:2112.04482, 2021. +Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015. +Krishna Srinivasan, Karthik Raman, Jiecao Chen, Michael Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2443-2449, 2021. +Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. The new data and new challenges in multimedia research. CoRR, abs/1503.01817, 2015. URL http://arxiv.org/abs/1503.01817. +Hung-Yu Tseng, Hsin-Ying Lee, Lu Jiang, Ming-Hsuan Yang, and Weilong Yang. Retrieved: Image synthesis via differentiable patch retrieval. In European Conference on Computer Vision, pp. 242-257. Springer, 2020. +Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. +Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. +Xintao Wang, Liangbin Xie, Chao Dong, and Ying Shan. Real-esrgan: Training real-world blind super-resolution with pure synthetic data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1905-1914, 2021. +Zihao Wang, Wei Liu, Qian He, Xinglong Wu, and Zili Yi. Clip-gen: Language-free training of a text-to-image generator with clip. arXiv preprint arXiv:2203.00386, 2022. +Yuhuai Wu, Markus N Rabe, DeLesley Hutchins, and Christian Szegedy. Memorizing transformers. arXiv preprint arXiv:2203.08913, 2022. +Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attingan: Fine-grained text to image generation with attentional generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1316-1324, 2018. +Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022. +Han Zhang, Jing Yu Koh, Jason Baldridge, Honglak Lee, and Yinfei Yang. Cross-modal contrastive learning for text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 833-842, 2021. +Yufan Zhou, Ruiyi Zhang, Changyou Chen, Chunyuan Li, Chris Tensmeyer, Tong Yu, Jiumiang Gu, Jinhui Xu, and Tong Sun. LAFITE: towards language-free training for text-to-image generation. CoRR, abs/2111.13792, 2021. URL https://arxiv.org/abs/2111.13792. +Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. Dm-gan: Dynamic memory generative adversarial networks for text-to-image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 5802-5810, 2019. + +# 6 APPENDIX + +![](images/2b4b72b1f6799e07a03144ae2c4f93aba3e1f5a77c12dea2590dbbd421563f90.jpg) +Figure 11: Samples from COCO validation set. + +![](images/ce3ccce82f232b4e283aec494b0c13bea42ffee036d195210f0b4d5966567f78.jpg) +Chicken waiter serving dinner + +![](images/771a520485db0685554de117fdb5b8b2b7df526a3ff3d6e29bcb96ac1aa31fa9.jpg) +Virtual reality + +![](images/f507f247bbca5b3b141fb07f4338e1f4d52d996e4ae52357691d3ef8a425601b.jpg) +Monkey eats hamburger + +![](images/771a07aba8d1ecb57f9a52969daf51707af53d41b78c6e538aae8ae368b0ab5c.jpg) +Scared fish in a suit + +![](images/ec99dc93cc1fdb09978c1af0af6fc17374a9a8dffd107442bdacd63fe7eb9afd.jpg) +Clown unicorn + +![](images/57ff2b04de1499bafd2d67d5a2bbdd0be2c52b7738157291f8a86ca7ec5bbcfe.jpg) +Goodnight sleep + +![](images/824a9229933683fab759cf622192aef4dbe56e89a46a3f24a712c0a517da4797.jpg) +Alpaca in space + +![](images/e13ed912a0aa103ff4953892cd8ca6a4f47a265f0555caa255b81687631d60be.jpg) +Gargoyle in a party hat + +![](images/cee58992bce38c9bb39d79872946f9b972e360e346a27a4a377ac84937b6f626.jpg) +Cauliflower crying + +![](images/034c9ada3080558284963679cf8d485383c929b4d00d6e33e37a83c6f5655416.jpg) +Teddy bear wearing VR headset + +![](images/39f7776fbe4837569292411ecc74a220567677e534175845d0bc255cea705851.jpg) +Hot headed cucumber + +![](images/e8cbcd13f98b1ca4d06fe2a5bf15d8bb1f498bdd74b0affd113156c908e73e28.jpg) +Painterly pigeon playing a synthesizer + +![](images/68cd35a40ac2bddb7df95aade633f7c5c7d260abd1ebd3c53e07aa987dbf4b34.jpg) +3D cat avatar + +![](images/37563a8081740e796edcb080f0817ace44ed87f4b74ba2510c64426ec84948f5.jpg) +Music band made of fruits + +![](images/195a0de413ce4cde3dd7de6434aa6699abd110a0d8a6d502197df029ba65e73f.jpg) +A confused robot as an impressionist painting + +![](images/1b74464a4fc7f98a95da6d801edef23976c5e706bf300b37f9ad2ae461479699.jpg) +Panda playing guitar + +![](images/213a8157b171ac994347fa846fab6f3e6e5de2656e48e1a24e46010c2658a58c.jpg) +Sloth doing ballet + +![](images/8196555c6fd50ab74b29d49fdf3aa0be6e069ec57518a500d87f0e3382d1478f.jpg) +3D rendering of avatars playing basketball + +![](images/1b3c6dc79a6a667b80c708688eb6a73cee42591de3e517b784dbc27f9c75a281.jpg) +Singing otter + +![](images/de20396cc5f3c57b4d02269470eeaeed40eeb826644a1974c36a13ec4e5c33a4.jpg) +Muscle man riding a wave in Hawaii + +![](images/e86297f7018f77b6baa117da5bf750371f060cc3c823697eae58f8d6c9827a5c.jpg) + +![](images/91714325fdca8c125ac3a66b6191ace57a769107f063055d960cfe21d7a07ad5.jpg) +Dog is programming a computer + +![](images/47ad1575d14858d0c694d739d3c0411fa031052ec6b2989d46345aadf46561fc.jpg) +Radha Krishna dancing in a garden + +![](images/cca27a8f1996860dbfcb73a94b4b345b11a4dd3aefea845360161d16166c1540.jpg) +Shark wearing a birthday hat + +![](images/25884ccba2d5d04cbb30fef6e491a8c9bfc0f1a17aec2712e178fc7d29612049.jpg) +Avocado playing ukulele and an apple is watching + +![](images/06ccbf7aa43ed907b415559031a1f9dea412556970d9ce31e45d465097ae10dc.jpg) +Cinematic llama with dramatic lighting +Celebrating frog +Figure 12: A selection of stickers generated using the continuous kNN-Diffusion model. + +![](images/2d1eaa1c073e8d35e25d43e64ffbae7daf6da678710413708a2f7fc381506780.jpg) +A dog in a hotdog costume + +![](images/db0533c98d53ff549df32e6e68ca3863755f88eed3d04e57ae4e2abd973a0067.jpg) +Alien using VR + +![](images/065e901e300282687752803ea145e75c5c4a48525450a22d099b9c53f26c357b.jpg) +Unicorn with a rainbow horn, waving hand, and standing on grass + +![](images/9cf933b326cc7efbda45b6fc49f439e7aa4b295d9b93a6be84b7cd53c1092f54.jpg) +A brain made out of words + +![](images/370c6b22ce0cefa9204cffaaf109bd79d196271a9871fc6f9ec23750a5d3c71b.jpg) +A brown shiny rose flower +A white firetruck + +![](images/64252786a5e033a5dc2c74dd2099aaec25d62ad2ec704b3222e80138304809d2.jpg) +A dog using a typewriter +Race car driver in a tutu + +![](images/2cffa26afa32bbbed0e7f1d17b7728e98e2a29c0a173c9b9c2bd86be0bf8d8cd.jpg) +A robot tanning +Blue ants + +![](images/4a31a128de43a4f2b448b444f19a593c9f4e5d3bd607a5b6ac3a4be0766c982f.jpg) +Fuchsia iguanodon +Lucifer dancing with Jesus + +![](images/80a37af0645a1500e6f46f2c00f05e72ca08a553cd67a2d65db99dc6af1ee201.jpg) +Pineapple shaped refrigerator + +![](images/3ebd87f5da61b89b7a3b7dab423d11e8c3059f9301b91e29aa22cb554204f073.jpg) +A surfer wearing a three-piece men's suit + +![](images/2f2c07585bca319b7160e26cfcf98ab258be3265ece37279124fcb51cb8b27ea.jpg) +Rocking chair on water + +![](images/71e5e86425d94203ab4eb0ac7a00b943ce2e232c3bcfc78316b3c1fc87443fec.jpg) +Alien cartoon + +![](images/3fee574afde9883f4afba06f39b3e02bed4cd57a241ef8a4b8ec81cc1102e68f.jpg) +Kitchen from the lost city of atlantis + +![](images/7e047ca7ac0f5db30927a660c2fbfab2aa5bee42d353caf6a5258787e1827f60.jpg) +A pink watermelon + +![](images/8649d2f4a994dc5fe971a11ad918fd843cce0157555b9f74b407e6ba7c1f97b0.jpg) +Image of grey tiger + +![](images/0ea58105f475274f27d69f1d1a13a75abb921097f517766561b674d895440bc7.jpg) +Green robot vacuum + +![](images/0c8ba1e6a6b182ac642d8c68efabacb6632b2c48c7db500a19c91352307fc406.jpg) +A baby cooking spaghetti + +![](images/76a022dd4c26524662b1445da0a09d0e73ae56fa230abb50f91e87e0d18a4970.jpg) +Raccoon mansion + +![](images/249e63eae7a23719f8a4f252c80eec4c89a48d780de15cbc02f3159526b68def.jpg) +Flying m1 abrams tank + +![](images/b2d7399d706c16ce4a6992c7c3e35cd4088022f3e7cfca2fe5576556d7093af4.jpg) +Baby pictures of grandparents + +![](images/058412fcfa15ee2dd40dc066db22eb6b31b883b6ec2788731cd82589fa5c89eb.jpg) +Figure 13: Additional samples generated from challenging text inputs using the photo-realistic model + +![](images/93e09def9ac4f2a57e4c56aa4f857983d1fabb0683731abd4fedcaa67ea8c3be.jpg) + +![](images/3547976b13202681db475437924519f652fd5025a0896c6ab9aa0c6532f58345.jpg) + +![](images/78d779cbb719d59e7599c8ad91084bec96405b640debd586bc4c3936aa813ef2.jpg) + +![](images/b65fb3509024ec0020f9a7bde56f02487931e37073720ecffc9f0942e2083f9b.jpg) +This tomato is favorite of many high-class chefs + +![](images/22bcf71182d87373683714929f6cde8a9645fe7b281e622216833b2284a080f4.jpg) +Squirrel wearing a shirt + +![](images/1f467d3792f4ad73f0d569b9f664fa4c99496625567db6c3a16b321967525415.jpg) +Singing eggplants +A mushroom with a hat + +![](images/af81b46e1300e22e9e14e666c500d606a75d00bcb22cb2fffc6f5a4440d67f1b.jpg) + +![](images/aab033387b0cf5230b9c6e7805bf11ae57f34c6f679ffd17fbc6e52d3aab4512.jpg) +Black mop head +Figure 14: A selection of stickers generated using the discrete kNN-Diffusion model. + +![](images/949d063e94628bc7fe47c4c3ea2983a86e9ebbd3d8b80c778feff3aaed8fadad.jpg) +A panda bear carrying some grocery bags + +![](images/cfa0b6ecc77ae6c5777ca20950d813bfa5043c232da4e09582a7d896363294e3.jpg) +Penguin drives a bus + +![](images/9aab18425e79fb7d5e1dc08690250149f683ede1e3c77449de78f2402b32c395.jpg) + +![](images/2e0f6f53549eba190c6c484a5cbea19870cab05444de8063c31f14a7ca29103c.jpg) +Chihuahua pulling a royal coach +Elephant sitting on a lion + +![](images/339ea1307b94f571c2a3eff4b26da0b612c80e5dc686895bbd1b8ec494660652.jpg) + +![](images/1c6e61268142f026dab4aea1a03c4ee9b596ee0b72ea70df1333e91a0fb6ce18.jpg) +Bald-headed mimes +Vomit candy +Invisible people + +![](images/0a4ae2e738d729adb8f99baff5ccbd6b65fca462a7a50519b14a191e9f0f4140.jpg) +I J Skos . sbalele + +![](images/d93bc777bd80fc4f9f179d2061dd61206227815e6c4fb979cbf41fdacd355269.jpg) +Queen Esther +A laughing purple porcupine + +![](images/2f28dd260535536d8f92c1ce5d7f50bdcea0b806a10fe89c32af00d2eb9b6009.jpg) + +![](images/7bce8138d3fa6f6f6b241e0c9e0138b6b369a541e84ca9547078167f600c17a5.jpg) +Monkey eating a pickle + +![](images/0be22e643cfb8bdaa363741c182b79b3b2ea2c47c10e3a6875a79f141e0be8d2.jpg) +Alien cartoon +A lion wearing a T-shirt + +![](images/f161e048b5a4632054fbcff49ce75e52e6420ffffe061309b99119d2a13e5346.jpg) + +![](images/44b85c6aff143613211f3522a9dd7084740c23a7a7d637c5ac4df602395b574d.jpg) +One legged striped rabbit +Dollar bill combing hair + +![](images/edbbc96cbb7ace016f65e7c774a92426fc863ff1aaefc1475aadb5c7b099476c.jpg) +A sloth eating oatmeal + +![](images/5be0ce636497b5303815e129e45479ac35d97418a6af2b5ef66d3a17f92a7290.jpg) + +# 6.1 BACKGROUND + +Continuous diffusion process Diffusion models are latent variable models that aim to model a distribution $p_{\theta}(x_0)$ that approximates the data distribution $q(x_0)$ . Specifically, they model a forward process in the space of $x_0$ from data to noise. Given a sample from the data distribution $x_0 \sim q(x_0)$ , this process produces a Markov chain of latent variables $x_1, \ldots, x_T$ by progressively adding Gaussian noise to the sample: + +$$ +q \left(x _ {t} \mid x _ {t - 1}\right) := \mathcal {N} \left(x _ {t}; \sqrt {1 - \beta_ {t}} x _ {t - 1}, \beta_ {t} \mathcal {I}\right) \tag {1} +$$ + +where $\beta_{t}$ is a variance schedule. As presented previously by (Ho et al., 2020), the latent variable $x_{t}$ can be expressed directly as a linear combination of noise and $x_0$ : + +$$ +x _ {t} = \sqrt {\bar {\alpha} _ {t}} x _ {0} + \epsilon \sqrt {1 - \bar {\alpha} _ {t}}, \quad \epsilon \sim \mathcal {N} (0, \mathcal {I}) \tag {2} +$$ + +where $\alpha_{t} := \Pi_{i=1}^{t}(1 - \beta_{i})$ . In order to sample from the data distribution $q(x_0)$ , we define the "reverse process" $p(x_{t-1}|x_t)$ which samples first from $q(x_T)$ and then samples reverse steps $q(x_{t-1}|x_t)$ until $x_0$ . + +Since the data distribution is unknown, we need to train a model to approximate it. Note that when $T$ is large enough, the noise vector $x_{T}$ nearly follows an isotropic Gaussian distribution. This suggests learning a model $p_{\theta}(x_{t - 1}|x_t)$ to predict mean $\mu_{\theta}$ and covariance matrix $\Sigma_{\theta}$ : + +$$ +p _ {\theta} \left(x _ {t - 1} \mid x _ {t}\right) := \mathcal {N} \left(x _ {t - 1}; \mu_ {\theta} \left(x _ {t}, t\right), \Sigma_ {\theta} \left(x _ {t}, t\right)\right) \tag {3} +$$ + +To train this model, we can replace $\mu_{\theta}(x_t,t)$ by predicting the noise $\epsilon_{\theta}(x_t,t)$ added to $x_0$ using equation 2 and we get this objective function: + +$$ +L := E _ {t \sim [ 1, T ], x _ {0} \sim q (x _ {0}), \epsilon \sim \mathcal {N} (0, \mathbf {I})} [ \| \epsilon - \epsilon_ {\theta} (x _ {t}, t, y) \| ^ {2} ] \tag {4} +$$ + +where $y$ is an optional conditioning signal (such as text/image embedding or a low resolution image). + +Discrete diffusion process Let $x_{n}\in \{1,\ldots ,V\}^{h\times w}$ be the indices of the allocated codebook vectors extracted by a pre-trained VQGAN (Esser et al., 2021) encoder. The forward process of a diffusion model $q(x_{n}|x_{n - 1})$ is a Markov chain that adds noise at each step. Moreover, the reverse process $q(x_{n - 1}|x_n,x_0)$ , is a denoising process that removes noise from an initialized noise state. As presented by (Gu et al., 2021), the forward diffusion process is given by: + +$$ +q \left(x _ {n} \mid x _ {n - 1}\right) = v ^ {T} \left(x _ {n}\right) \mathbf {Q} _ {n} v \left(x _ {n - 1}\right) \tag {5} +$$ + +where $v(x_{n})$ is a one-hot vector with entry 1 at $x_{n}$ , and $\mathbf{Q}_n$ is the probability transition matrix from state $x_{n-1}$ to $x_{n}$ . + +The reverse process is given by the posterior distribution: + +$$ +q \left(x _ {n - 1} \mid x _ {n}, x _ {0}\right) = \frac {\left(v ^ {T} \left(x _ {n}\right) \mathbf {Q} _ {n} v \left(x _ {n - 1}\right)\right) \left(v ^ {T} \left(x _ {n - 1}\right) \bar {\mathbf {Q}} _ {n - 1} v \left(x _ {0}\right)\right)}{v ^ {T} \left(x _ {n}\right) \bar {\mathbf {Q}} _ {n} v \left(x _ {0}\right)} \tag {6} +$$ + +where $\bar{\mathbf{Q}}_n = \mathbf{Q}_n\cdot \cdot \cdot \mathbf{Q}_1$ + +Inspired from mask language modeling (Devlin et al., 2018), they proposes corrupting the tokens by stochastically masking some of them. Specifically, an additional special token [MASK] is proposed, so for each token there are $(\mathrm{V} + 1)$ discrete states. The transition matrix is formulated as, By adding a small amount of uniform noise to the categorical distribution, the transition matrix can be formulated as, + +$$ +\mathbf {Q} _ {n} = \left[ \begin{array}{c c c c c} \alpha_ {n} + \beta_ {n} & \beta_ {n} & \beta_ {n} & \dots & 0 \\ \beta_ {n} & \alpha_ {n} + \beta_ {n} & \beta_ {n} & \dots & 0 \\ \beta_ {n} & \beta_ {n} & \alpha_ {n} + \beta_ {n} & \dots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ \gamma_ {n} & \gamma_ {n} & \gamma_ {n} & \dots & 1 \end{array} \right] \tag {7} +$$ + +where $\alpha_{n}\in [0,1],\beta_{n} = (1 - \alpha_{n} - \gamma_{n}) / V$ and $\gamma_{n}$ the probability of a token to be replaced with a [MASK] token. Each token has a probability of $\gamma_{n}$ to be replaced by the [MASK] token, $V\beta_{n}$ to be resampled uniformly and $\alpha_{n} = (1 - V\beta_{n} - \gamma_{n})$ to be unchanged. + +# 6.2 ADDITIONAL SAMPLES + +In Fig. 16 and 15 we present a visual comparison of our discrete model, trained on the stickers dataset with (1) the kNN extracted during inference, (2) the same model without using kNN in inference. As can be seen, the images generated by our model are better aligned to the corresponding text compared to the baselines. While the baselines fail with challenging prompts, our model produces high-quality images that align with the text, and composes multiple concepts correctly. + +COCO Validation Set Comparison Fig. 11 presents a qualitative comparison with FuseDream (Liu et al., 2021), CogView (Ding et al., 2021) and VQ-Diffusion (Gu et al., 2021) on the COCO validation set. Note that both CogView and VQ-Diffusion have been trained on an Image-Text paired dataset, whereas our model was not trained on the COCO dataset, nor used it in the retrieval model. + +Additional samples generated from challenging text inputs are provided in Figs. 13, 14 and Fig. 12. + +![](images/277eed8ca882f0c8f04faf312ffa99177e145e6f3061c130d08e57099f5ddbc5.jpg) + +![](images/ba3a5906a0e0a6b9949bcb6ffdaf71182a42f6f3afe81f7fe4b7b5faaa88c183.jpg) +Figure 15: Comparison of our model, trained on PMD with (1) kNN extracted in inference, (2) the same model without using kNN in inference. While the kNN lack information regarding text semantics, our model considers both text semantics and the kNN, thus proving the advantage of using both the text and the kNN embeddings. +Figure 16: Qualitative comparison of stickers generated using the discrete kNN-Diffusion model, 10 Nearest Neighbors to the text in the CLIP embedding and a discrete model that does not use kNN. + +# 6.3 HUMAN EVALUATION PROTOCOL + +For all of our human evaluation experiments, we used Amazon Mechanical Turk. For each experiment, we used 600 samples, each scored by five different people. The preferred sample was determined according to majority opinion. For each baseline comparison, we asked two questions (in different experiments): "Which image is of a higher quality?" and "Which image best matches the text?" + +# 6.4 DATASETS + +The modified PMD dataset is composed of the following set of publicly available text-image datasets: SBU Captions (Ordonez et al., 2011), Localized Narratives (Pont-Tuset et al., 2020), Conceptual Captions (Sharma et al., 2018), Visual Genome (Krishna et al., 2016), Wikipedia Image Text (Srinivasan et al., 2021), Conceptual Captions 12M (Changpinyo et al., 2021), Red Caps (Desai et al., 2021), and a filtered version of YFCC100M (Thomee et al., 2015). In total, the dataset contains 69 million text-image pairs. + +# 6.5 ABLATION STUDY + +Index size As one can expect, increasing the index size at inference time improves performance. To demonstrate this hypothesis, we evaluated our model with an index containing $10\%$ , $30\%$ , $50\%$ and $70\%$ images of PMD dataset, and obtained FID scores of 13.92, 13.85, 13.72, and 13.65 respectively. + +kNN conditioning We examined several different approaches to kNN input conditioning: (i) forwarding the kNN embeddings and the single image embedding through a self-attention layer before feeding the contextualized $K + 1$ embeddings to the model, (ii) feeding the model with one embedding, computed using cross-attention between the image embedding and the kNN embeddings, and, (iii) feeding the model with the image embedding concatenated with a learned linear projection of the kNN embeddings. These variants received FID scores of 18.3, 22.4, 34.1 respectively. + +# 6.6 RETRIEVAL MODEL + +The retrieval model is implemented using FAISS (Johnson et al., 2019). FAISS is an efficient database, capable of storing billions of elements and finding their nearest neighbors in milliseconds. In the pre-processing phase, for each image in the dataset, we store the image index and its corresponding CLIP image embedding. During training, given a training image, we extract its CLIP image embedding and search for its 10 (see Fig. 9) nearest neighbors in the dataset based on the cosine similarity distance. + +For an efficient search during training and inference, we use a non-exhaustive search: For this, we use an inverted file index. As in Babenko & Lempitsky (2014), we define Voronoi cells in the $d$ -dimensional space (where $d = 512$ is the CLIP embedding dimensional space), s.t each database vector falls in one of the cells. During search time, only the embeddings contained in the cell the query falls in and a few neighboring ones are compared against the query vector. In addition, to fit the index of our large-scale datasets on a 128GB RAM server, we compress the code size from $512 \times 32/8 = 2048$ Bytes to 256 Bytes using optimized product quantization (Ge et al., 2013; Jegou et al., 2010). In Algorithm 1 we include pseudocode of the core of the implementation of the retrieval database. + +# 6.7 DISCRETE KNN MODEL + +We provide additional implementation details for the discrete diffusion model. Additional training details can be found in Tab. 3. + +Vector Quantization For token quantization, we use VQ-VAE and adapt the publicly available VQGAN(Esser et al., 2021) model, trained on the OpenImages(Krasin et al., 2016) dataset. The encoder downsamples images to $32 \times 32$ tokens and uses a codebook vocabulary with 2887 elements. + +Image Tokenization In our discrete generative model we model images as a sequence of discrete tokens. To this end, we utilize a vector-quantized variational auto-encoder (VQ-VAE) (Van Den Oord et al., 2017) as image tokenizer. VQ-VAE consists of three components: (i) an encoder, (ii) a learned codebook, and, (iii) a decoder. Given an image, the encoder extracts a latent representation. The codebook then maps each latent vector representation to its nearest vector in the codebook. Finally, the decoder reconstructs the image from the codebook representation. VQ-VAE is trained with the objectives of reconstruction and codebook learning. VQ-GAN (Esser et al., 2021) adds an adversarial loss term that tries to determine whether the generated image is fake or real. This added term was shown to improve reconstruction quality. + +Transformer We follow Gu et al. (2021) and train a decoder-only Transformer. The decoder module contains 24 transformer blocks, each containing full attention, cross-attention for the concatenated conditioner, and a feed-forward network. The timestamp $n$ is injected using Adaptive Layer Normalization (Ba et al., 2016). The decoder contains 400 million parameters. + +Classifier-free guidance We sample our diffusion models using classifier-free guidance (CFG) (Ho & Salimans, 2021; Nichol et al., 2021; Ramesh et al., 2022). CFG is performed by extrapolating an unconditional sample in the direction of a conditional sample. To support unconditional sampling, previous work had to fine-tune (Nichol et al., 2021) their models with $20\%$ of the conditional features nullified. This enabled them to sample unconditional images from the model using the null condition, $y' = \vec{0}$ , the null vector. We found that we can generate unconditional samples from our model using null conditioning without fine-tuning it. We hypothesize that by conditioning the model on a null vector, the cross-attention component is also nullified, resulting in no contribution to the diffusion process. During inference, in each step of the diffusion process we generate two images: conditional image logits, $p_{\theta}(x_{n-1}|x_n,y)$ , conditioned on the desired multi-modal embedding $y$ , and the unconditional image logits, $p_{\theta}(x_{n-1}|x_n,y')$ , conditioned on the null embedding. Then, the final image for a diffusion step $n$ is sampled from + +$$ +\begin{array}{l} p _ {\theta} \left(x _ {n - 1} \mid x _ {n}, y\right) = p _ {\theta} \left(x _ {n - 1} \mid x _ {n}, y ^ {\prime}\right) + \\ \lambda \left(p _ {\theta} \left(x _ {n - 1} \mid x _ {n}, y\right) - p _ {\theta} \left(x _ {n - 1} \mid x _ {n}, y ^ {\prime}\right)\right) \\ \end{array} +$$ + +where $\lambda$ is a scale coefficient. In all of our experiments, we set $\lambda = 8$ , which was found to yield the highest FID scores on the validation set. Note that the above extrapolation occurs directly on the logits output by $p_{\theta}$ , in contrast to GLIDE (Nichol et al., 2021), which extrapolates the pixel values. + +Training Objective For completeness we are adding the training objective of the discrete model. The network is trained to minimize the variational lower bound (VLB): + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {v l b}} = \mathcal {L} _ {0} + \mathcal {L} _ {1} + \dots + \mathcal {L} _ {N - 1} + \mathcal {L} _ {N}, \\ \mathcal {L} _ {0} = - \log p _ {\theta} \left(x _ {0} \mid x _ {1}, f _ {i m g} (I), \operatorname {k n n} _ {i m g} (\mathrm {I}, k)\right), \\ \mathcal {L} _ {n - 1} = D _ {K L} \left(q \left(x _ {n - 1} \mid x _ {n}, x _ {0}\right) | | p _ {\theta} \left(x _ {n - 1} \mid x _ {n}, f _ {i m g} (I), \operatorname {k n n} _ {i m g} (\mathrm {I}, k)\right)\right), \tag {8} \\ \mathcal {L} _ {N} = D _ {K L} \left(q \left(x _ {N} \mid x _ {0}\right) | | p \left(x _ {N}\right)\right) \\ \end{array} +$$ + +Where $p(\pmb{x}_N)$ is the prior distribution of timestep $N = 100$ , $f_{img}(I)$ is the CLIP image embedding, $\mathrm{knn}_{img}(\mathbf{I}, k)$ is the $k$ nearest neighbors in the feature space of the image embedding. The full details can be found in Gu et al. (2021). + +# 6.8 CONTINUOUS KNN MODEL + +We provide additional implementation details for the continuous diffusion model. Additional training details can be found in Tab. 3. + +Decoder. We followed (Nichol et al., 2021; Ho et al., 2020; Ramesh et al., 2022) and re-implemented a diffusion $U$ -net model. Specifically, we modify the architecture described in (Ramesh et al., 2022) by allowing multiple CLIP embeddings as the condition to the model. Since we do not have a paired text-image dataset, we removed the text transformer, and thus the text embedding. In particular, we use 512 convolution channels, 3 residual blocks, 64 heads channels and attention resolution of 32, 16 and 8. Similarly to our discrete model, we trained two models (1) + +![](images/28755241c65e0e8da754a4827c3a420acf9c3c1a30d4e4d6dd8a1ada740b7f03.jpg) +(a) Training + +![](images/457362ea822a3328b76c5dadce48dfd63973fb77bd0f84acd1b58b9060785427.jpg) +(b) Inference +Figure 17: During training, only the image I is given (red), whereas during inference only the text $t$ is given (blue). In order to bridge the gap between the two distributions during training, we leverage the K nearest neighbors that should have a large enough distribution (dashed cone) to cover the potential text embedding (i.e. $\cos(b) < \cos(a)$ ). During inference, the opposite is applied. + +a no-kNN conditioned only on CLIP image embedding during training, (2) a kNN conditioned on CLIP image embedding and its kNN. Finally, we enable classifier-free guidance by randomly setting the CLIP embeddings to zero $10\%$ of the time. As demonstrated in Tab. 2, we find that humans prefer our model over no-kNN $66.8\%$ of the time for image quality and $69.4\%$ of the time for text alignment. + +Super-Resolution. As the decoder generates images with $64 \times 64$ resolution, we up-sampled the images to $256 \times 256$ using the open-source super resolution of (Nichol et al., 2021). To further up-sample the images to $512 \times 512$ and $1024 \times 1024$ we used the open-source super resolution provided by (Wang et al., 2021). + +Training Objectives For completeness we are adding the training objective of our continuous model. Following Ho et al. (2020); Nichol et al. (2021) we are using mean-squared error loss to predict the noise: + +$$ +L := E _ {n \sim [ 1, N ], x _ {0} \sim q (x _ {0}), \epsilon \sim \mathcal {N} (0, \mathbf {I})} [ | | \epsilon - \epsilon_ {\theta} (x _ {n}, n, y) | | ^ {2} ] +$$ + +where $\epsilon_{\theta}$ is a $U - net$ model and $y = (f_{img}(x_0),\mathrm{knn}_{img}(x_0,k))$ + +
DiscreteContinuous
Number of nearest neighbors1010
Diffusion steps1001000
Noise schedule-cosine
Sampling steps100250
Model size400M1B
Sampling variance method-analytic
Dropout-0.1
Weight decay4.5e-2-
Batch size5121600
Iterations150K500K
Learning rate4.05-41.4e-4
optimizerAdamWAdamW
Adam β20.960.9999
Adam ε1.0e-81.0e-8
EMA decay0.990.9999
warmup500025000
# GPUs128 A100200 A100
+ +Table 3: Training details of our models + +Algorithm 1 Pseudo-code implementation for the construction of the retrieval model, training and sampling using conditioning kNN. +```csv +Retrieval model construction +def training(batch:train image dataset): 1 //inverted index of 50k centroids, 2 //with optimized product quantization to 256B index_cfg $\equiv$ "OPQ256_IVF50000_PQ256x8" 4 index $=$ faiss.indexFACTORY(d,idx_cfg,faiss.METRIC INNER_PRODUCT) 5 ivf $=$ faiss.extract_index_ivf(index) 6 clustering_index $=$ faiss.index_cpu_to_all_gpus(faiss.IndexFlatIP(d))7 ivf.clusterbing_index $=$ clustering_index 8 train_dataset $\equiv$ [] for image in random.sample(batch,1000000): 10 train_dataset.append(CLIP_image_embedding(image)) 11 index.train(train_dataset) 12 for image in dataset: index.add(CLIP_imageEncoder(image)) 14 return index Training +def training(I:FAISS index, image, k:Number of NN, t:timestamp [0,T-1]): image_encoding $\equiv$ CLIP_imageEncoder(image) 2 kNN $=$ I.search(image_encoding,k) condition $=$ concatenate([image_encoding,kNN]) 4 image_T $=$ add_noise(image,t) 5 image_0 $=$ diffusion_model(image_T,t,condition) loss $=$ criterion(imageO, image) 7 return loss 9 Sampling +def sampling(I:FAISS index,text,k:Number of NN): 1 text_encoding $\equiv$ CLIP_textEncoder(text) 2 kNN $=$ I.search(text_encoding,k) condition $=$ concatenate([text_encoding,kNN]) 4 image $=$ sample_noise(T) for t in [T-1,T-2,...,0]: image $=$ diffusion_model(image,t,condition) return image 8 +``` + +# 6.9 TEXT-ONLY IMAGE MANIPULATION + +Our approach is illustrated in Fig. 18. Additional manipulation examples are provided in Figs. 20. The full comparison with the baselines is provided in Fig. 21 and 22. We also provide in Fig. 19 several examples for the process of the manipulated images construction. + +![](images/fea036c0c8e5389608ac52d49721467649003b1a0a35755855ba57e202c713a4.jpg) +Figure 18: An illustration of our manipulation approach. During training: Given a training image (1), the model extracts its first nearest neighbor (2). Next, a random local area in the training image is selected (3), and the manipulated image is constructed by replacing the area with the corresponding nearest neighbor (4). The model then receives as input the manipulated image and the clip embedding of the local area that needs to be restored (5). During inference: Given an input image and a text query "A face of a male child", the model receives as input the image (4) and the clip embedding of the modifying text (5). + +![](images/7794468ff681a8636985d6cb84fc05bddf2664f668a9bde3664e76197117ba7a.jpg) +Figure 19: Illustration of the manipulated image construction process during training. Given an original image, we select a random local area, and extract the first nearest neighbor (1-NN). Using ECC alignment, we align the nearest neighbor with the original image and replace the random local area with its corresponding nearest neighbor local area. The model then receives as input the manipulated image, together with the CLIP embedding of the local area, and tries to predict the original image. + +![](images/64a5fed58572e933e1150d81175adf677378bf0bcf8c2b8208d133c439fb9bba.jpg) +Original + +![](images/e57a90ca121854cb31db0c943a4187b5d80ce956d8c7666149a472f68afb6666.jpg) +Raising left hand + +![](images/26dd6911e16ef1181ae7585e4a7d16bad41cf406d412b5c6f77b269896702d26.jpg) +Raising hands + +![](images/2b420541183788d11f05a6dd90a27676dc433e5a8ae13ac62205ad1f6ab272bb.jpg) +Blue pants + +![](images/7b03a21491eb59ae39c6b88af308ccde062f214bbf50ac3e8838ba9cf165e0ac.jpg) +Black shirt + +![](images/6f17dfadca4040e3ae828e936f4f65e868855ec998f18f5759654998702905cc.jpg) +Holds a heart + +![](images/d635cdd4e77493bacbd9cd0b94c4f8abcece23f662a43aeb3c2b7170735a1fcc.jpg) +Princess + +![](images/eb8bcca698bf235668fac7b6d2090d32ead07adf165c7601b98c576956e8b2d6.jpg) +Sitting + +![](images/2ba6fa05045477cf601aedf5449da7f19fa33ee5d3ae03726fe1f5a06587b48c.jpg) + +![](images/e04d91ae0078e3f13b04545dbd231fa9f18cad6ba2b68457a7651ac7a87d9c45.jpg) + +![](images/a808aed1c4541648fb3051088b24badca32837e34912c7130ece8d1e15959fec.jpg) +Original + +![](images/51078dc7dc29b2d6fdeac341ddf5024c07b6f7122090b9e727a58e35c6e40d59.jpg) + +![](images/c4f07c90e6b9dfa4b319f10c60895cffc314e52bcbbfc0df21c1902a8ed03e18.jpg) + +![](images/53fadd6a3913e68819b93b806fd4aafeebcd3c0b345de3d315d8de372936b6df.jpg) +Smiling + +![](images/c01d5cdc11749aceb600474b790b227f0725639ed45e6cc3017b74e047d7f3a2.jpg) + +![](images/228c3853d508570273ac7e44aa73a0a7931b1c3e53d6b7c8642a5698f4437fd5.jpg) + +![](images/de7e123758ffaa3c53c949f86ab501000b510c5a29ec9baa367ead431dbc1ace.jpg) +Angry + +![](images/f8084a2788067af8c2a6467de0967dce3dd260b6eaf65144a09153bc799d92eb.jpg) + +![](images/3390a19d095a77761a8cb62fe5351a40a125939fb240ae0313747de09776abcd.jpg) + +![](images/7f5c9af1b8eb8e9a7401d597697e61a7c3c14a31597371d96db50a5b28172d6f.jpg) +Sad + +![](images/356e3c1782cb587c2a81187bd60ed235e0c78c24a06e04a2d004a20e977d7bfd.jpg) + +![](images/e56d2185ea670ff29b649342f4e3bb5fc3df7f61ee6195b237e0c55fabe7d072.jpg) + +![](images/12c177a34c07bdea3fd1247b8831c6e6cc55dc26e9b21766e89323dd958d3187.jpg) +Surprised + +![](images/da607d1b8b77be7cb8507b68d659c13b7bffbc7cf157aa061ce8a308fdd211fd.jpg) + +![](images/e674f7d82e7b9881aa9754566d6123277f2961053857202481e3bfb184c64aa5.jpg) + +![](images/45586dd300b9af485b22e3f8adda77dfd5d45fc14a6a58bc8ab18172acee9c40.jpg) +With a tie + +![](images/2afe8cd0b79ec97e8f85acb7b54374dd5929d4d4af34a4e617a4ed08c4f69cc2.jpg) + +![](images/33730992ca1160af4ce702c5a5155b30fe5bce51fa7a0165ff152335e2d98122.jpg) + +![](images/9c94b18e7cdc8231a1b1f8c79561ad3948e5f2c432cffee042beea77b341bb6b.jpg) +With a hat + +![](images/1cbd9b12615d0960973eb52c5db15b704c2e258420bc12b308b4eed00e16733d.jpg) + +![](images/8e6e49973d317d28e398a63397441caec9b5c31b070429695becdb2767a11ad8.jpg) + +![](images/c184bf010aed80b2b24f8e3ddfe26ebff667d3c8608b9c68cd8fd7a1f5c4e084.jpg) +Holds a heart + +![](images/5d64f882ac50029ec58d42f2227b06018dacc7147a2126af54a93db2e38abc4d.jpg) +Original + +![](images/57dea729a4cf56b78ebebfafc8fb83f40b589b818cccf38268df7c5d52410621.jpg) +Original + +![](images/d1f089e64c677195ce254b031d92751d7f5cfc53e5268353ddaa8943be339c0c.jpg) +Brown + +![](images/24417939aaec7b1106a20f9c8278eb671db242b002aca04ef883863b7ef10653.jpg) +With a bow tie + +![](images/4837c9c2816f13d2d82dfbe651897d639d5e0a71bb78e8dee928b12118b1d8ee.jpg) +Blue racing car + +![](images/95eecdebf2ef13c3d6d48d129bd43ff57aa8b103e3f0a62cb1a290bb6ce67f2c.jpg) +With a bow tie + +![](images/79a44f0cd5d2d845de178dd2725361391d7bac97323eb6c2fa3966b2720dddbd.jpg) +Flowers + +![](images/d14ca17d5966209bdf8569e00cf2a793237fa81ae1e59e73b8043fc6ef615f14.jpg) +Sleeping + +![](images/6c3892bd5c587c080a087740bf83d11cf32f933af17bcef12403504d87edb4b9.jpg) +Sketch + +![](images/37332cd45083ae44a77e2593bdde45ed6df852828bdec4173377205db5ea088e.jpg) +Yellow racing car + +![](images/a1ae83a724a796bc78a8c855c453a8dcad23a333050ea1cb03f64e12ce16eeb3.jpg) +Stars + +![](images/a8a608179b18282bbc4fca5c9bb644f944ce4ca3f45ff89e3ab2fbe287148ef4.jpg) +Colorful + +![](images/b4166392a635f5f7f216a72384752f389d3a06b775316e761058f1dba42c80e8.jpg) +Red lipstick + +![](images/ede45cf23c0b3b2a62df0c6f091a8150e32fabe93e54bbb16adf854188ad5c20.jpg) +original + +![](images/703d7ab071c3469d953fed3a7c3df1bb4d3f97a6a3dd4264c58b98d8b8fe7346.jpg) +Smiling + +![](images/55ec0acaf15c5fa5367499e659ee0a2012b4c1f3496f07c369b08b31b9476e4b.jpg) +Angry + +![](images/e2f145fec752ce2ff05c9f2c4eba6efb856fb1eb01a967b27cb16ba362757fcd.jpg) +Sad + +![](images/3d26da57aa83f13d6f387eede510aee64a557d7ae45ab901864b4de5244d1a67.jpg) +Surprised + +![](images/722ac0a79cffb8727a85cf1538422f947407deb086ea4b2dbb437f08b36e51cd.jpg) +original + +![](images/900cfe73d83b68ab5c0fcff3588924975096b6eb41af62a6c4a9c61ccbd0065c.jpg) +Smiling + +![](images/85a96b2379bf7659a6d05680deea7965c6c58ed8d3fc5a3bcf6778411e58cabc.jpg) +Angry +Figure 20: Additional manipulation examples, generated using our model. + +![](images/8120f167a21959a387fc65ae4e86572cc95c25fc5882be8d8dbe3bdad25dd453.jpg) +Sad + +![](images/fa46d33e95e6782f1d76a59b8da109af927a82befa76182f0b8d4052d83a8b99.jpg) +Surprised + +![](images/7d95d4e0c853cf4b1f60bc54e5c072f28cc55ffbe3b127b5f23a52804fc085a3.jpg) +Original + +![](images/276d498c8b35bd622999f93ede46c6376c1e6ae0da8f7f935808bb94ea8a51eb.jpg) + +![](images/2de808d68973efcac5e8a3354c748fe3ea790a0ff2c616c8134188b2d11eb240.jpg) +Angry + +![](images/a23da8b93796b929ea4d94a3e3c4c7f0a0203eea5feeb7e4931e834262e614c5.jpg) + +![](images/cbbc7e48e11b5be92f45ac8ec8f66a13132b18450abc71f359be38d493ec3399.jpg) +Sad + +![](images/1a86d5a590456c96c017085ebd4f3fc3a201a35b8daa14450e8ffe5ba8260b71.jpg) + +![](images/4ab3f295a539573f747f98eb8c9c8abaeaa1ef960ae8e4da6f69ab46df56a7db.jpg) +Surprised. + +![](images/1ecfed423ad4a0f1d5f5984c6ae0fa6b17bcb68da8228962a3b2546dca0be1fc.jpg) + +![](images/b00237405d55caad0d1eb62c0ff1d4f6b998bab2700b88e7c84fbb0f42ff7cf4.jpg) +With a tie + +![](images/52a30c3fe41652e5819f5c675c01446102dc48b8a4d5410e03c5d730c6a2f8aa.jpg) + +![](images/e953285210bd6961f5a25692380777ea14643fef569c7cee8d1ad209cbc82d4b.jpg) +With a hat + +![](images/7c6416cacada0f363c74923d059d762f42f7fa81c8e6134ffcc49317e8dbd473.jpg) + +![](images/218f53aea5a76db39fd0d65aef00584e62e5e7912698f0a033e308a67ee0a04c.jpg) +Holds a heart + +![](images/b9f9bd911cb1957027d12e3b090af697c37b59181289ccb50f80cee31330b6b7.jpg) +Original + +![](images/0cfe742020d4be9653c32c689fe0520e25d939459625bede671046f89193faf3.jpg) + +![](images/57f9ece2d348e3204fd6423b8d20ec463ecc08ba92f76ce108540459d4ec61ff.jpg) +Red lipstick + +![](images/92e10a8d74f6c521624fcc74a4b8e28d1e8fb87030d1f10d60ccbb212dff6d92.jpg) + +![](images/7581b475d6dcf90da7bd6fa9413ede0b044f6863fa3d9e1a3695aa3f1af6d9fc.jpg) +Angry + +![](images/4a05875513c127161cf9b870d83e5153519cc47fd10c5bad13c89d1509883371.jpg) + +![](images/bf02d084bbbf77ba0be32d3434cbe7de31fec00e72c05e560dc3c0bca58b9883.jpg) +Sleeping + +![](images/7b9347cbe39d0d97c13139b3de17ec6df2e7bf64a5bab4002cf6d422cec9fc58.jpg) + +![](images/3aea532bfda61819fc360ccae368d08b872e40ac79f06476945ca16dbfd7f24a.jpg) +Surprised + +![](images/010ffae2b2f211937844d822b0babd84ef6efb410beaf8188198a6e13bfa9f20.jpg) + +![](images/9f088aed1b7dc8ad66e9e39aba2e931fce0461cb5a475bedb0c96d115d629b41.jpg) +In love + +![](images/f2b19119c788128e29094df8d10d1a574724b25cfc486370a972379a55b18cd6.jpg) + +![](images/2bfeef54da08f0178f117b2c56f5c9ecb12ef22da5eebaff8bf75642fb9dc6ae.jpg) +With a Bow tie + +![](images/3af46c536e3b680793c22d23188c24209d258d9174de5043a71e0944bf90f59d.jpg) +Original + +![](images/d9910f99497bbc91d306d760fdbe1d4286f2d29eabcfdf2daa82e2b3b82e8f93.jpg) + +![](images/2516fe1d758138ca6bb47524f425cd2f624454987a08cfeb2199eb642b37f093.jpg) +Joker + +![](images/e675f956d2f59b26068cf4053927512bb30db85673e186c7a12d67b2aa355683.jpg) + +![](images/9bd3cefd9899741a597eee8a1fbfe3aea4effa552ae35e078e0563eaca248ec7.jpg) +Boxer + +![](images/05082b2adb69a6b6bb88a98db7b6186644cc6b085e777bbf10603e660da1dbc3.jpg) + +![](images/84636665fb3b5f8cd3de3e0cd6c743a7fd63ab3d0fccf99f86f805f718aa2284.jpg) +Ghost + +![](images/83c902450a22854385c5e31818e745351f4b7f6d3ff2ecd279b78de0111227cb.jpg) + +![](images/3555ccd6a6084dd2951bc0305422424cd0ac83f7bc2f4213d1d265ae66fa817a.jpg) +Rainbow + +![](images/463d9661aa179622d18b18b9f040838b35ba40015ca0ba07e7b9d51b07fb2a3a.jpg) + +![](images/adb0e20bacd650f456bc9f50ed5b3f9b0dc17be6f324a9ba4410ad16529d8cda.jpg) +Devil + +![](images/a922d1b2d8aba963c891aba744ba149a73ef6413f15f71d7ba702a0e4ec0843f.jpg) + +![](images/fc03d2f31ac24839c568e5a97761c70f4b4c4f863b8cb8450d39bf5c5bfe19a7.jpg) +Angel + +![](images/b909c31fdb8750d6c6c1777c8da501ecd514a6ddd05de77ed03864a01461fcd3.jpg) +Original + +![](images/b453684b49416b7a2aefa7c7f21523a0a83a79df0ad163bca8948a6e9ca8e0d3.jpg) + +![](images/f702c2d2c84b64f1d4a2038431cd3c5ba4b64f9b78a19c4f6dbe61f7f9970649.jpg) +Raising left hand +Figure 21: comparison to Text2LIVE (Bar-Tal et al., 2022). For each input image, the bottom row corresponds to images generated by our model, and the top row corresponds to images generated by the Text2LIVE model. + +![](images/6959308c2bca6de1ceebd4ea567de01285b1bd9eae92ebc5b6dc1ac669633b74.jpg) + +![](images/ed2f83303f87a785d390431fdb2a8a17fe45b8d0dbf384b82c9494cf133d45c9.jpg) +Blue pants + +![](images/631278d8432b0e6a938afc3655a8e35a2050340fd294be5ab296d3eb2285664e.jpg) + +![](images/abba2c262cc298f2c7009964db6cff36e1dda32deede4b015d15fd87906e22ad.jpg) +Blackshirt + +![](images/880873679cefc93e2480f07e3ea687bb537cdfe36b3d5519d5a94eb58df46bae.jpg) + +![](images/7d9b86ff58d570bb7971130f3d6d15e0cfe62217aa7e258105121a9df795f7d0.jpg) +Sitting + +![](images/71a302b0d07a62c85ac84b659375fbb915f4c6c3d1a14ba06a861b30fc981690.jpg) + +![](images/3778a814044c9c8d6383536ea1a436a1275efe16261aa768e161674319be9f1b.jpg) +With a tie + +![](images/326f61c25b54aa3b466733c3edac9ae344ec59d7b178baa594395f55321ff85a.jpg) + +![](images/0917f7eaf2d8680964deb102d161e938ba8209850f1de97bf9e2e327de708bdb.jpg) +Holds a heart + +![](images/9d4ae41908158cd6c87ece00b8dda5786c6a627cbf87f0c8dbd3973e8a335396.jpg) +Figure 22: comparison to Textual Inversion (Gal et al., 2022). For each input image, the bottom row corresponds to images generated by our model, and the top row corresponds to images generated by the Textual Inversion model. \ No newline at end of file diff --git a/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/images.zip b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..19b2dcc86c567edd13da8d346c8b2ee081e7233f --- /dev/null +++ b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cd059492b70dcd8a6053f834e43dc8ca1b9ee784bfec4bac544520d9259acfe +size 2508283 diff --git a/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/layout.json b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6a9da81e7eb215ce8c656f789d32766adb76ab71 --- /dev/null +++ b/2023/kNN-Diffusion_ Image Generation via Large-Scale Retrieval/layout.json @@ -0,0 +1,27609 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "KNN-DIFFUSION: IMAGE GENERATION VIA LARGE-SCALE RETRIEVAL" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 134, + 425, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 134, + 425, + 158 + ], + "spans": [ + { + "bbox": [ + 110, + 134, + 425, + 158 + ], + "type": "text", + "content": "Shelly Sheynin*, Oron Ashual*, Adam Polyak, Uriel Singer, Oran Gafni, Eliya Nachmani, Yaniv Taigman" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 158, + 312, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 158, + 312, + 168 + ], + "spans": [ + { + "bbox": [ + 112, + 158, + 312, + 168 + ], + "type": "text", + "content": "*Equal Contribution Meta AI" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 169, + 271, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 169, + 271, + 179 + ], + "spans": [ + { + "bbox": [ + 113, + 169, + 271, + 179 + ], + "type": "text", + "content": "{shellysheynin, oron}@meta.com" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 109, + 204, + 501, + 361 + ], + "blocks": [ + { + "bbox": [ + 109, + 204, + 501, + 361 + ], + "lines": [ + { + "bbox": [ + 109, + 204, + 501, + 361 + ], + "spans": [ + { + "bbox": [ + 109, + 204, + 501, + 361 + ], + "type": "image", + "image_path": "e21cff83cf03a421c25a92a059e186e9b355f0366a8c027f745f30303b7713f6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 369, + 504, + 390 + ], + "lines": [ + { + "bbox": [ + 104, + 369, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 504, + 390 + ], + "type": "text", + "content": "Figure 1: (a) Samples of stickers generated from text inputs, (b) Semantic text-guided manipulations applied to the \"Original\" image without using edit masks." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 276, + 399, + 334, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 399, + 334, + 411 + ], + "spans": [ + { + "bbox": [ + 276, + 399, + 334, + 411 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 423, + 470, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 423, + 470, + 578 + ], + "spans": [ + { + "bbox": [ + 140, + 423, + 470, + 578 + ], + "type": "text", + "content": "Recent text-to-image models have achieved impressive results. However, since they require large-scale datasets of text-image pairs, it is impractical to train them on new domains where data is scarce or not labeled. In this work, we propose using large-scale retrieval methods, in particular, efficient " + }, + { + "bbox": [ + 140, + 423, + 470, + 578 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 140, + 423, + 470, + 578 + ], + "type": "text", + "content": "-Nearest-Neighbors (kNN), which offers novel capabilities: (1) training a substantially small and efficient text-to-image diffusion model using only pre-trained multi-modal embeddings, but without an explicit text-image dataset, (2) generating out-of-distribution images by simply swapping the retrieval database at inference time, and (3) performing text-driven local semantic manipulations while preserving object identity. To demonstrate the robustness of our method, we apply our kNN approach on two state-of-the-art diffusion backbones, and show results on several different datasets. As evaluated by human studies and automatic metrics, our method achieves state-of-the-art results compared to existing approaches that train text-to-image generation models using images-only dataset." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 597, + 206, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 597, + 206, + 609 + ], + "spans": [ + { + "bbox": [ + 106, + 597, + 206, + 609 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "content": "Large-scale generative models have been applied successfully to image generation tasks (Gafni et al., 2022; Ramesh et al., 2021; Nichol et al., 2021; Sahara et al., 2022; Yu et al., 2022), and have shown outstanding capabilities in extending human creativity using editing and user control. However, these models face several significant challenges: (i) Large-scale paired data requirement. To achieve high-quality results, text-to-image models rely heavily on large-scale datasets of (text, image) pairs collected from the internet. Due to the requirement of paired data, these models cannot be applied to new or customized domains with only unannotated images. (ii) Computational cost and efficiency. Training these models on highly complex distributions of natural images usually requires scaling the size of the model, data, batch-size, and training time, which makes them challenging to train and less accessible to the community. Recently, several works proposed text-to-image models" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": "trained without an explicit paired text-image datasets. Liu et al. (2021) performed a direct optimization to a pre-trained model based on a CLIP loss (Radford et al., 2021). Such approaches are time-consuming, since they require optimization for each input. Zhou et al. (2021) proposed training with CLIP image embedding perturbed with Gaussian noise. However, to achieve high-quality results, an additional model needs to be trained with an annotated text-image pairs dataset." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "type": "text", + "content": "In this work, we introduce a novel generative model, kNN-Diffusion, which tackles these issues and progresses towards more accessible models for the research community and other users. Our model leverages a large-scale retrieval method, " + }, + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "type": "text", + "content": "-Nearest-Neighbors (kNN) search, in order to train the model without an explicit text-image dataset. Specifically, our diffusion model is conditioned on two inputs: (1) image embedding (at training time) or text embedding (at inference), extracted using pre-trained CLIP encoder, and (2) kNN embeddings, representing the " + }, + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "type": "text", + "content": " most similar images in the CLIP latent space. During training, we assume that no paired text is available, hence condition only on CLIP image embedding and on " + }, + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 143, + 506, + 266 + ], + "type": "text", + "content": " additional image embeddings, selected using the retrieval model. At inference, only text inputs are given, so instead of image embeddings, we use the text embedding that shares a joint embedding space with the image embeddings. Here, the kNN image embeddings are retrieved using the text embeddings." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 269, + 506, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 506, + 349 + ], + "type": "text", + "content": "The additional kNN embeddings have three main benefits: (1) they extend the distribution of conditioning embeddings and ensure the distribution is similar in train and inference, thus helping to bridge the gap between the image and text embedding distributions (see Fig. 5); (2) they teach the model to learn to generate images from a target distribution by using samples from that distribution. This allows generalizing to different distributions at test time and generating out-of-distribution samples; (3) they hold information that does not need to be present in the model, which allows it to be substantially smaller. We demonstrate the effectiveness of our kNN approach in Sec. 4." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 352, + 506, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 506, + 452 + ], + "type": "text", + "content": "To assess the performance of our method, we train our model on two large-scale datasets: the Public Multimodal Dataset (Singh et al., 2021) and an image-only stickers dataset collected from the Internet. We show state-of-the-art zero-shot results on MS-COCO (Lin et al., 2014), LN-COCO (Pont-Tuset et al., 2020) and CUB (Wah et al., 2011). To further demonstrate the advantage of retrieval methods in text-to-image generation, we train two diffusion backbones using our kNN approach: continuous (Ramesh et al., 2022) and discrete (Gu et al., 2021). In both cases we outperform the model trained without kNN. In comparison to alternative methods presented in Sec. 4, we achieve state-of-the-art results in both human evaluations and FID score, with only 400 million parameters and 7 seconds inference time." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 456, + 506, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 506, + 559 + ], + "type": "text", + "content": "Lastly, we introduce a new approach for local and semantic manipulations that is based on CLIP and kNN, without relying on user-provided masks. Specifically, we fine-tune our model to perform local and complex modifications that satisfies a given target text prompt. For example, given the teddy bear's image in Fig. 4, and the target text \"holds a heart\", our method automatically locates the local region that should be modified and synthesizes a high-resolution manipulated image in which (1) the teddy bear's identity is accurately preserved and (2) the manipulation is aligned with the target text. We demonstrate our qualitative advantage by comparing our results with two state-of-the-art models, Text2Live (Bar-Tal et al., 2022) and Textual Inversion (Gal et al., 2022), that perform image manipulations without masks (Fig. 4, 21 and 22)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 561, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 506, + 641 + ], + "type": "text", + "content": "We summarize the contributions of this paper as follows: (1) We propose kNN-Diffusion, a novel and efficient model that utilizes a large-scale retrieval method for training a text-to-image model with only pre-trained multi-modal embeddings, but without an explicit text-image dataset. (2) We demonstrate efficient out-of-distribution generation, which is achieved by substituting retrieval databases. (3) We present a new approach for local and semantic image manipulation, without utilizing masks. (4) We evaluate our method on two diffusion backbones, discrete and continuous, as well as on several datasets, and present state-of-the-art results compared to baselines." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 654, + 212, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 654, + 212, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 212, + 667 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 683, + 504, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 683, + 504, + 729 + ], + "spans": [ + { + "bbox": [ + 104, + 683, + 504, + 729 + ], + "type": "text", + "content": "Text-to-image models. Text-to-image generation is a well-studied task that focuses on generating images from text descriptions. While GANs (Xu et al., 2018; Zhu et al., 2019; Zhang et al., 2021) and Transformer-based methods (Ramesh et al., 2021; Gafni et al., 2022; Yu et al., 2022; Ding et al., 2021) have shown remarkable results, recently impressive results have been attained with dis" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 121, + 80, + 491, + 310 + ], + "blocks": [ + { + "bbox": [ + 121, + 80, + 491, + 310 + ], + "lines": [ + { + "bbox": [ + 121, + 80, + 491, + 310 + ], + "spans": [ + { + "bbox": [ + 121, + 80, + 491, + 310 + ], + "type": "image", + "image_path": "e97768b6a12cbc1787d5797a2c65bdea7f16be9729671609455963914649f9bc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 317, + 504, + 329 + ], + "lines": [ + { + "bbox": [ + 105, + 317, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 504, + 329 + ], + "type": "text", + "content": "Figure 2: Qualitative comparisons with baselines. Nearest Neighbor is the first kNN of the text in PMD dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 335, + 504, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 504, + 513 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 504, + 513 + ], + "type": "text", + "content": "crete (Gu et al., 2021) and continuous (Nichol et al., 2021; Sahara et al., 2022; Ramesh et al., 2022; Rombach et al., 2022) diffusion models. Most recent works trained diffusion models conditioned on text embeddings extracted using a pre-trained text encoder (Saharia et al., 2022; Yu et al., 2022) or image embedding extracted using CLIP (Ramesh et al., 2022). While producing impressive results, all previous works described above are supervised and trained with paired text-image datasets. Several works have proposed training text-to-image models without an explicit text-image dataset. FuseDream (Liu et al., 2021) proposed a direct optimization to a pre-trained generative model based on CLIP loss. This method relies on a pre-trained GAN and requires a time-consuming optimization process for each image. LAFITE (Zhou et al., 2021) recently demonstrated text-to-image generation results without requiring paired text-image datasets. Here, the CLIP embeddings are used interchangeably at train and test to condition a GAN-based model. The joint text-image embedding enables inference given a text input, whereas in training the model is fed with the visual embedding only. However, the gap between the text and image distributions in the joint embeddings space leads to results with substantially lower quality, as we show in our experiments. To overcome this gap, LAFITE added noise to the image embeddings during training. Our remedy to this gap is to condition the model on the retrieval of an actual image embeddings, using a text-image joint space." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 519, + 506, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 506, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 506, + 730 + ], + "type": "text", + "content": "Retrieval for generation. The Information Retrieval (IR) literature tackles the challenge of retrieving a small amount of information from a large database, given a user's query. A simple, yet efficient retrieval mechanism is to retrieve the " + }, + { + "bbox": [ + 104, + 519, + 506, + 730 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 519, + 506, + 730 + ], + "type": "text", + "content": " nearest neighbors (kNN) between the query and the entities in the database in some pre-calculated embedding space (Bijalwan et al., 2014). The database allows the model to leverage extensive world-knowledge for its specific task Borgeaud et al. (2021). Recently, language models were augmented with a memory component, allowing them to store representations of past inputs (Wu et al., 2022). The latter were then queried using a lookup operation, improving performance in various benchmarks and tasks. Retrieval models have been used for various tasks in learning problems, for example, language modeling (Borgeaud et al., 2021), machine translation (Gu et al., 2018), question answering (Lee et al., 2019) and image generation (Tseng et al., 2020; Qi et al., 2018). RetrieveGAN (Tseng et al., 2020) uses a differentiable retrieval module for image generation from a scene description, RetrieveFuse (Siddiqui et al., 2021) proposed a neural 3D scene reconstruction based on a retrieval system. SIMS (Qi et al., 2018) proposed generating an image using semantic layout and compatible image segments that are retrieved from image segments database, and (Iskakov, 2018) showed that the use of retrieval database in inpainting task significantly boosts visual quality. In this work we utilize the kNN retrieval mechanism over the shared text-image embedding space, CLIP (Radford et al., 2021). Using extensive ablation studies, we show the importance of the retrieval model both for training and inference, and demonstrate its large impact on performance. kNN-Diffusion significantly outperforms prior work" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 127, + 82, + 484, + 216 + ], + "blocks": [ + { + "bbox": [ + 127, + 82, + 484, + 216 + ], + "lines": [ + { + "bbox": [ + 127, + 82, + 484, + 216 + ], + "spans": [ + { + "bbox": [ + 127, + 82, + 484, + 216 + ], + "type": "image", + "image_path": "7c67ea86007bb55c12b7c5449835616eb75f13a14aec0d995c8c2d882151d3de.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 224, + 504, + 277 + ], + "lines": [ + { + "bbox": [ + 104, + 224, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 504, + 277 + ], + "type": "text", + "content": "Figure 3: The overall framework of our kNN-Diffusion model. In both training and inference, the decoder is conditioned on CLIP embedding, and kNN image embeddings. During training, we condition the model on image CLIP embedding, and its kNN image embeddings extracted using the retrieval method. At inference time, given an input text, the kNN image embeddings are retrieved based on the CLIP text embedding that shares a joint embedding space with the image embedding." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 504, + 313 + ], + "type": "text", + "content": "with zero-shot FID of 12.5, including RDM (Blattmann et al., 2022)(with FID of 22.1), a concurrent work which similarly to our approach, proposes conditioning LDM (Rombach et al., 2022) on kNN." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 320, + 506, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 320, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 320, + 506, + 441 + ], + "type": "text", + "content": "Multi-modal feature learning. Learning a joint and aligned feature space for several modalities is challenging, as it requires alignment between the modalities (paired datasets), whose distributions may vary. Specifically, the joint feature space of vision-and-language has been a long-standing problem. CLIP (Radford et al., 2021) successfully tackled this by leveraging contrastive learning over a large dataset of text-image pairs. BLIP (Li et al., 2022), (Mu et al., 2021) and FLAVA (Singh et al., 2021), followed this idea and further improved the joint representation. The joint representation was shown to hold a strong semantic alignment between the two modalities, enabling image generation (Liu et al., 2021; Wang et al., 2022), image manipulation (Patashnik et al., 2021; Avrahami et al., 2022b), and image captioning (Mokady et al., 2021). In this work we leverage the joint representation in two ways: (i) enabling textless training with only visual data, while using text at inference time, and (ii) creating an efficient embedding space for the use of the retrieval model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 457, + 174, + 469 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 174, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 174, + 469 + ], + "type": "text", + "content": "3 METHOD" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 481, + 504, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 481, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 481, + 504, + 702 + ], + "type": "text", + "content": "Our main goal is to facilitate language-guided generation of user-specified concepts while using an images-only dataset during training. A possible way to achieve this goal is to use a shared text-image encoder that will map text-image pairs into the same latent space, thus allowing training with an image embedding, and inferring from text embedding. A candidate for this encoder is CLIP, which has been trained with a contrastive loss on a large-scale dataset of text-image pairs. However, as we show quantitatively in Tab. 1, 2 and qualitatively in Fig. 15, 16, 5, CLIP embeddings alone cannot accurately bridge the gap between the text and image distributions. In order to reduce this gap, several methods have been proposed. The closest work to ours is LAFITE, which perturbs the CLIP image embedding with adaptive Gaussian noise. Under the assumption that there is a large paired text-image dataset, Ramesh et al. (2022) have proposed a prior that is used during inference, and is trained to generate possible CLIP image embeddings from a given text caption. In this regard, we propose using a large-scale and non-trainable image embedding index as an integral part of the diffusion process. Our method, kNN-Diffusion, assumes that only image data and a pre-trained multi-modal text-image encoder are provided during training. As shown in Fig. 3, our model is comprised of three main components: (1) A multi-modal text-image encoder (CLIP); (2) A retrieval model - A data structure containing image embeddings, which is indexed for a fast kNN search; (3) An image generation network - A trainable diffusion-based image generation model, conditioned on the projected retrievals. For both training and inference, the image generation network is conditioned on " + }, + { + "bbox": [ + 104, + 481, + 504, + 702 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 481, + 504, + 702 + ], + "type": "text", + "content": " additional image embeddings, chosen using the retrieval model to ensure a similar distribution of the condition in training and inference. The following sections describe these components." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Retrieval model. Our retrieval model has three non-trainable modules: a pre-trained text encoder " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "f_{txt}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " (CLIP text encoder), a pre-trained image encoder " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "f_{img}" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " (CLIP image encoder) and" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 127, + 92, + 156, + 144 + ], + "blocks": [ + { + "bbox": [ + 127, + 92, + 156, + 144 + ], + "lines": [ + { + "bbox": [ + 127, + 92, + 156, + 144 + ], + "spans": [ + { + "bbox": [ + 127, + 92, + 156, + 144 + ], + "type": "image", + "image_path": "579bbd4893194751e1ab3f8742480c70f4607a92cf444948529fb6d5d0f104c0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 144, + 153, + 151 + ], + "lines": [ + { + "bbox": [ + 131, + 144, + 153, + 151 + ], + "spans": [ + { + "bbox": [ + 131, + 144, + 153, + 151 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 127, + 152, + 157, + 202 + ], + "blocks": [ + { + "bbox": [ + 127, + 152, + 157, + 202 + ], + "lines": [ + { + "bbox": [ + 127, + 152, + 157, + 202 + ], + "spans": [ + { + "bbox": [ + 127, + 152, + 157, + 202 + ], + "type": "image", + "image_path": "4b8e94270980c653d26e27432d1e6cd3aa4206dafbdb0dbfd35b081947be7cb9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 204, + 153, + 210 + ], + "lines": [ + { + "bbox": [ + 132, + 204, + 153, + 210 + ], + "spans": [ + { + "bbox": [ + 132, + 204, + 153, + 210 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 126, + 213, + 159, + 251 + ], + "blocks": [ + { + "bbox": [ + 126, + 213, + 159, + 251 + ], + "lines": [ + { + "bbox": [ + 126, + 213, + 159, + 251 + ], + "spans": [ + { + "bbox": [ + 126, + 213, + 159, + 251 + ], + "type": "image", + "image_path": "2c17f90051c736103d1823061d8c78f14b3d279f254be24f3895718b71cc8801.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 254, + 153, + 261 + ], + "lines": [ + { + "bbox": [ + 131, + 254, + 153, + 261 + ], + "spans": [ + { + "bbox": [ + 131, + 254, + 153, + 261 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 274, + 504, + 305 + ], + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 305 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 305 + ], + "type": "text", + "content": "Figure 4: Results for text-guided image manipulations without using masks. The original image is shown in the left column, our manipulated images are shown in the center. The images of Bar-Tal et al. (2022); Gal et al. (2022) were generated using the authors' official code. The full comparison is available in the supplement." + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 170, + 93, + 199, + 144 + ], + "blocks": [ + { + "bbox": [ + 170, + 93, + 199, + 144 + ], + "lines": [ + { + "bbox": [ + 170, + 93, + 199, + 144 + ], + "spans": [ + { + "bbox": [ + 170, + 93, + 199, + 144 + ], + "type": "image", + "image_path": "6dfcbf0f062811cca6b907aa14f53554b965acab7fa4f35009db77e0751175fc.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 178, + 144, + 192, + 151 + ], + "lines": [ + { + "bbox": [ + 178, + 144, + 192, + 151 + ], + "spans": [ + { + "bbox": [ + 178, + 144, + 192, + 151 + ], + "type": "text", + "content": "Joker" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 170, + 152, + 201, + 202 + ], + "blocks": [ + { + "bbox": [ + 170, + 152, + 201, + 202 + ], + "lines": [ + { + "bbox": [ + 170, + 152, + 201, + 202 + ], + "spans": [ + { + "bbox": [ + 170, + 152, + 201, + 202 + ], + "type": "image", + "image_path": "e6dad06b6f2da77a17e939e95ea4fb50b7482acc0d60cfeb59145b89810d1248.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 172, + 204, + 198, + 210 + ], + "lines": [ + { + "bbox": [ + 172, + 204, + 198, + 210 + ], + "spans": [ + { + "bbox": [ + 172, + 204, + 198, + 210 + ], + "type": "text", + "content": "Black shirt" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 170, + 214, + 202, + 250 + ], + "blocks": [ + { + "bbox": [ + 170, + 214, + 202, + 250 + ], + "lines": [ + { + "bbox": [ + 170, + 214, + 202, + 250 + ], + "spans": [ + { + "bbox": [ + 170, + 214, + 202, + 250 + ], + "type": "image", + "image_path": "a3619c5eec9d2b40f7bce5719ac0edc5bff61af800e28d441b9747b93ac2d30c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 172, + 254, + 199, + 261 + ], + "lines": [ + { + "bbox": [ + 172, + 254, + 199, + 261 + ], + "spans": [ + { + "bbox": [ + 172, + 254, + 199, + 261 + ], + "type": "text", + "content": "With a hat" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 208, + 93, + 236, + 144 + ], + "blocks": [ + { + "bbox": [ + 208, + 93, + 236, + 144 + ], + "lines": [ + { + "bbox": [ + 208, + 93, + 236, + 144 + ], + "spans": [ + { + "bbox": [ + 208, + 93, + 236, + 144 + ], + "type": "image", + "image_path": "d2f607fc0ed802f1effbcd6a458785f54be0be4cc29e60bc1d72ebadb7d36d02.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 213, + 144, + 228, + 151 + ], + "lines": [ + { + "bbox": [ + 213, + 144, + 228, + 151 + ], + "spans": [ + { + "bbox": [ + 213, + 144, + 228, + 151 + ], + "type": "text", + "content": "Boxer" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 208, + 152, + 236, + 202 + ], + "blocks": [ + { + "bbox": [ + 208, + 152, + 236, + 202 + ], + "lines": [ + { + "bbox": [ + 208, + 152, + 236, + 202 + ], + "spans": [ + { + "bbox": [ + 208, + 152, + 236, + 202 + ], + "type": "image", + "image_path": "0366ab1e3934b7293362b2056ea6234a046c4f084daee3f72b70da4b6f66152e.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 204, + 235, + 210 + ], + "lines": [ + { + "bbox": [ + 209, + 204, + 235, + 210 + ], + "spans": [ + { + "bbox": [ + 209, + 204, + 235, + 210 + ], + "type": "text", + "content": "Blue pants" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 206, + 214, + 237, + 250 + ], + "blocks": [ + { + "bbox": [ + 206, + 214, + 237, + 250 + ], + "lines": [ + { + "bbox": [ + 206, + 214, + 237, + 250 + ], + "spans": [ + { + "bbox": [ + 206, + 214, + 237, + 250 + ], + "type": "image", + "image_path": "e6d5c21e59f3b2ceba83fdc167c1806d0922f4c475f1079e3931ae6e76205c38.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 254, + 229, + 261 + ], + "lines": [ + { + "bbox": [ + 217, + 254, + 229, + 261 + ], + "spans": [ + { + "bbox": [ + 217, + 254, + 229, + 261 + ], + "type": "text", + "content": "Sad" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 245, + 93, + 279, + 144 + ], + "blocks": [ + { + "bbox": [ + 245, + 93, + 279, + 144 + ], + "lines": [ + { + "bbox": [ + 245, + 93, + 279, + 144 + ], + "spans": [ + { + "bbox": [ + 245, + 93, + 279, + 144 + ], + "type": "image", + "image_path": "1d35403a8c3a35b5d1bd43a6b7b519f7975a69fd342a0dfaef10a2f2a10f3cf1.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 254, + 144, + 271, + 151 + ], + "lines": [ + { + "bbox": [ + 254, + 144, + 271, + 151 + ], + "spans": [ + { + "bbox": [ + 254, + 144, + 271, + 151 + ], + "type": "text", + "content": "Angel" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 246, + 152, + 277, + 202 + ], + "blocks": [ + { + "bbox": [ + 246, + 152, + 277, + 202 + ], + "lines": [ + { + "bbox": [ + 246, + 152, + 277, + 202 + ], + "spans": [ + { + "bbox": [ + 246, + 152, + 277, + 202 + ], + "type": "image", + "image_path": "b4c381a0a363ceab3d84d929b005e071c8095d92309b123fdb4034e42f8f5ac1.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 204, + 279, + 210 + ], + "lines": [ + { + "bbox": [ + 246, + 204, + 279, + 210 + ], + "spans": [ + { + "bbox": [ + 246, + 204, + 279, + 210 + ], + "type": "text", + "content": "Holds a heart" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 246, + 214, + 279, + 250 + ], + "blocks": [ + { + "bbox": [ + 246, + 214, + 279, + 250 + ], + "lines": [ + { + "bbox": [ + 246, + 214, + 279, + 250 + ], + "spans": [ + { + "bbox": [ + 246, + 214, + 279, + 250 + ], + "type": "image", + "image_path": "5f9f3fb28c6639fbe0733c63384facb4fa022b553b119d78bdb7d4dff68bca29.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 250, + 254, + 275, + 261 + ], + "lines": [ + { + "bbox": [ + 250, + 254, + 275, + 261 + ], + "spans": [ + { + "bbox": [ + 250, + 254, + 275, + 261 + ], + "type": "text", + "content": "Surprised" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 286, + 93, + 309, + 144 + ], + "blocks": [ + { + "bbox": [ + 282, + 83, + 294, + 91 + ], + "lines": [ + { + "bbox": [ + 282, + 83, + 294, + 91 + ], + "spans": [ + { + "bbox": [ + 282, + 83, + 294, + 91 + ], + "type": "text", + "content": "Durs" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 286, + 93, + 309, + 144 + ], + "lines": [ + { + "bbox": [ + 286, + 93, + 309, + 144 + ], + "spans": [ + { + "bbox": [ + 286, + 93, + 309, + 144 + ], + "type": "image", + "image_path": "20f0b18d238b59ac3d43e7dacb8e1cd303672db8e01d1246a6da2462597f9f2c.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 287, + 144, + 309, + 151 + ], + "lines": [ + { + "bbox": [ + 287, + 144, + 309, + 151 + ], + "spans": [ + { + "bbox": [ + 287, + 144, + 309, + 151 + ], + "type": "text", + "content": "Rainbow" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 282, + 152, + 310, + 202 + ], + "blocks": [ + { + "bbox": [ + 282, + 152, + 310, + 202 + ], + "lines": [ + { + "bbox": [ + 282, + 152, + 310, + 202 + ], + "spans": [ + { + "bbox": [ + 282, + 152, + 310, + 202 + ], + "type": "image", + "image_path": "e964c7fcd2ab10dc812b3ef695a0404442e4b4b328977d623311f8a173891a8d.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 288, + 204, + 306, + 211 + ], + "lines": [ + { + "bbox": [ + 288, + 204, + 306, + 211 + ], + "spans": [ + { + "bbox": [ + 288, + 204, + 306, + 211 + ], + "type": "text", + "content": "Sitting" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 282, + 214, + 313, + 250 + ], + "blocks": [ + { + "bbox": [ + 282, + 214, + 313, + 250 + ], + "lines": [ + { + "bbox": [ + 282, + 214, + 313, + 250 + ], + "spans": [ + { + "bbox": [ + 282, + 214, + 313, + 250 + ], + "type": "image", + "image_path": "2686350b11e0a56bfc7b6344f08c6bcb72c4e155d30550175a32402aa9a5cfc5.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 289, + 253, + 305, + 261 + ], + "lines": [ + { + "bbox": [ + 289, + 253, + 305, + 261 + ], + "spans": [ + { + "bbox": [ + 289, + 253, + 305, + 261 + ], + "type": "text", + "content": "Angry" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 318, + 93, + 351, + 144 + ], + "blocks": [ + { + "bbox": [ + 318, + 93, + 351, + 144 + ], + "lines": [ + { + "bbox": [ + 318, + 93, + 351, + 144 + ], + "spans": [ + { + "bbox": [ + 318, + 93, + 351, + 144 + ], + "type": "image", + "image_path": "633cb565ffaab13da90cf41ddd7e361e5e34703e148d139b1230dbc388dae9a8.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 144, + 341, + 151 + ], + "lines": [ + { + "bbox": [ + 328, + 144, + 341, + 151 + ], + "spans": [ + { + "bbox": [ + 328, + 144, + 341, + 151 + ], + "type": "text", + "content": "Devil" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 319, + 152, + 348, + 202 + ], + "blocks": [ + { + "bbox": [ + 319, + 152, + 348, + 202 + ], + "lines": [ + { + "bbox": [ + 319, + 152, + 348, + 202 + ], + "spans": [ + { + "bbox": [ + 319, + 152, + 348, + 202 + ], + "type": "image", + "image_path": "48448da441532e5798cd7725fb5f32ebde3cef9ae794ae531001e7d4a26006d4.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 204, + 347, + 210 + ], + "lines": [ + { + "bbox": [ + 321, + 204, + 347, + 210 + ], + "spans": [ + { + "bbox": [ + 321, + 204, + 347, + 210 + ], + "type": "text", + "content": "With a tie" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 318, + 214, + 351, + 250 + ], + "blocks": [ + { + "bbox": [ + 318, + 214, + 351, + 250 + ], + "lines": [ + { + "bbox": [ + 318, + 214, + 351, + 250 + ], + "spans": [ + { + "bbox": [ + 318, + 214, + 351, + 250 + ], + "type": "image", + "image_path": "e9cf1c391efacf186a0c2bb30f9a62dd0a49e6fc40f96395ed3c42be55576e69.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 253, + 351, + 260 + ], + "lines": [ + { + "bbox": [ + 318, + 253, + 351, + 260 + ], + "spans": [ + { + "bbox": [ + 318, + 253, + 351, + 260 + ], + "type": "text", + "content": "Holds a heart" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 356, + 93, + 383, + 144 + ], + "blocks": [ + { + "bbox": [ + 356, + 93, + 383, + 144 + ], + "lines": [ + { + "bbox": [ + 356, + 93, + 383, + 144 + ], + "spans": [ + { + "bbox": [ + 356, + 93, + 383, + 144 + ], + "type": "image", + "image_path": "1b3ae60b2f4ccdaefc9dbd9a77ad8dac6527dfb342539422c0d5ba7e8aebb439.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 363, + 144, + 378, + 151 + ], + "lines": [ + { + "bbox": [ + 363, + 144, + 378, + 151 + ], + "spans": [ + { + "bbox": [ + 363, + 144, + 378, + 151 + ], + "type": "text", + "content": "Ghost" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 356, + 152, + 386, + 202 + ], + "blocks": [ + { + "bbox": [ + 356, + 152, + 386, + 202 + ], + "lines": [ + { + "bbox": [ + 356, + 152, + 386, + 202 + ], + "spans": [ + { + "bbox": [ + 356, + 152, + 386, + 202 + ], + "type": "image", + "image_path": "ad82962b578117c95c04f7904453c8eca887582ec510e9a97362bdf438b67189.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 351, + 204, + 391, + 210 + ], + "lines": [ + { + "bbox": [ + 351, + 204, + 391, + 210 + ], + "spans": [ + { + "bbox": [ + 351, + 204, + 391, + 210 + ], + "type": "text", + "content": "Raising left hand" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_caption" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 355, + 214, + 388, + 250 + ], + "blocks": [ + { + "bbox": [ + 355, + 214, + 388, + 250 + ], + "lines": [ + { + "bbox": [ + 355, + 214, + 388, + 250 + ], + "spans": [ + { + "bbox": [ + 355, + 214, + 388, + 250 + ], + "type": "image", + "image_path": "8fb18b64e01e6c8a971e4fe8d8a72b2d6977d50c03e507bb78efa5b64a3d3e82.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 253, + 383, + 260 + ], + "lines": [ + { + "bbox": [ + 359, + 253, + 383, + 260 + ], + "spans": [ + { + "bbox": [ + 359, + 253, + 383, + 260 + ], + "type": "text", + "content": "With a tie" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_caption" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 393, + 93, + 440, + 144 + ], + "blocks": [ + { + "bbox": [ + 403, + 83, + 432, + 91 + ], + "lines": [ + { + "bbox": [ + 403, + 83, + 432, + 91 + ], + "spans": [ + { + "bbox": [ + 403, + 83, + 432, + 91 + ], + "type": "text", + "content": "Text2LIVE" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 393, + 93, + 440, + 144 + ], + "lines": [ + { + "bbox": [ + 393, + 93, + 440, + 144 + ], + "spans": [ + { + "bbox": [ + 393, + 93, + 440, + 144 + ], + "type": "image", + "image_path": "2b556d99987c7f3687c1e67e0fe618451a4d24b533ac155b62cac81d4ff2bda9.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 144, + 425, + 151 + ], + "lines": [ + { + "bbox": [ + 410, + 144, + 425, + 151 + ], + "spans": [ + { + "bbox": [ + 410, + 144, + 425, + 151 + ], + "type": "text", + "content": "Ghost" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 440, + 83, + 485, + 91 + ], + "lines": [ + { + "bbox": [ + 440, + 83, + 485, + 91 + ], + "spans": [ + { + "bbox": [ + 440, + 83, + 485, + 91 + ], + "type": "text", + "content": "Textual Inversion" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_caption" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 393, + 152, + 434, + 202 + ], + "blocks": [ + { + "bbox": [ + 393, + 152, + 434, + 202 + ], + "lines": [ + { + "bbox": [ + 393, + 152, + 434, + 202 + ], + "spans": [ + { + "bbox": [ + 393, + 152, + 434, + 202 + ], + "type": "image", + "image_path": "81df2f5c024d732fa003ce32c48891881e25dac992dc511c46196b07673a1f61.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 204, + 437, + 210 + ], + "lines": [ + { + "bbox": [ + 397, + 204, + 437, + 210 + ], + "spans": [ + { + "bbox": [ + 397, + 204, + 437, + 210 + ], + "type": "text", + "content": "Raising left hand" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 396, + 213, + 435, + 250 + ], + "blocks": [ + { + "bbox": [ + 396, + 213, + 435, + 250 + ], + "lines": [ + { + "bbox": [ + 396, + 213, + 435, + 250 + ], + "spans": [ + { + "bbox": [ + 396, + 213, + 435, + 250 + ], + "type": "image", + "image_path": "46d26f01a10bd02a596093168ec69d23b6bda6925261a129fa86e28edfee4003.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 253, + 429, + 260 + ], + "lines": [ + { + "bbox": [ + 405, + 253, + 429, + 260 + ], + "spans": [ + { + "bbox": [ + 405, + 253, + 429, + 260 + ], + "type": "text", + "content": "With a tie" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 451, + 93, + 476, + 144 + ], + "blocks": [ + { + "bbox": [ + 451, + 93, + 476, + 144 + ], + "lines": [ + { + "bbox": [ + 451, + 93, + 476, + 144 + ], + "spans": [ + { + "bbox": [ + 451, + 93, + 476, + 144 + ], + "type": "image", + "image_path": "4189ebd5211eee89c42f9d0cd9ff08baf0cba0c6d9dec68d1adcfb9f7e073e02.jpg" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 455, + 144, + 471, + 151 + ], + "lines": [ + { + "bbox": [ + 455, + 144, + 471, + 151 + ], + "spans": [ + { + "bbox": [ + 455, + 144, + 471, + 151 + ], + "type": "text", + "content": "Ghost" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_caption" + } + ], + "index": 52 + }, + { + "type": "image", + "bbox": [ + 448, + 152, + 476, + 202 + ], + "blocks": [ + { + "bbox": [ + 448, + 152, + 476, + 202 + ], + "lines": [ + { + "bbox": [ + 448, + 152, + 476, + 202 + ], + "spans": [ + { + "bbox": [ + 448, + 152, + 476, + 202 + ], + "type": "image", + "image_path": "4fa44d3956e5ffc14fde983bb79ac0b8b0f6db0cb2f8b06091b762a26414dab5.jpg" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 443, + 204, + 483, + 210 + ], + "lines": [ + { + "bbox": [ + 443, + 204, + 483, + 210 + ], + "spans": [ + { + "bbox": [ + 443, + 204, + 483, + 210 + ], + "type": "text", + "content": "Raising left hand" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_caption" + } + ], + "index": 54 + }, + { + "type": "image", + "bbox": [ + 443, + 214, + 483, + 251 + ], + "blocks": [ + { + "bbox": [ + 443, + 214, + 483, + 251 + ], + "lines": [ + { + "bbox": [ + 443, + 214, + 483, + 251 + ], + "spans": [ + { + "bbox": [ + 443, + 214, + 483, + 251 + ], + "type": "image", + "image_path": "12af7563b8668dd53db8e9e8d932d3be49014ef20ca441f225bb62d1ac4e670a.jpg" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 451, + 253, + 474, + 260 + ], + "lines": [ + { + "bbox": [ + 451, + 253, + 474, + 260 + ], + "spans": [ + { + "bbox": [ + 451, + 253, + 474, + 260 + ], + "type": "text", + "content": "With a tie" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_caption" + } + ], + "index": 56 + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": "an index " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": ". The encoders map text descriptions and image samples to a joint multi-modal " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": "-dimensional feature space " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": ". The index stores an efficient representation of the images database - " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\mathcal{H} := \\{f_{img}(i) \\in \\mathbb{R}^d | i \\in \\mathcal{I}\\}" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " denotes the dataset of images. During training, we use the index to efficiently extract the " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " nearest neighbors in the feature space of the image embedding " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "f_{img}(\\mathbf{I}) \\in \\mathbb{R}^d - \\mathrm{knn}_{img}(\\mathbf{I}, k) := \\arg \\min_{h \\in \\mathcal{H}}^k \\mathbf{s}(f_{img}(\\mathbf{I}), h)" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " is a distance function and " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\arg \\min_k^k" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " output the minimal " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " elements. The set " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\{f_{img}(\\mathbf{I}), \\mathrm{knn}_{img}(\\mathbf{I}, k)\\}" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " is used as the condition to the generative model. During inference, given a query text " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": ", an embedding " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "f_{txt}(t)" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " is extracted. The generative model is conditioned on this embedding and its " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " nearest neighbors from the database - " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{knn}_{txt}(t, k) := \\arg \\min_{h \\in \\mathcal{H}}^k \\mathbf{s}(f_{txt}(t), h)" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": ". During training, we add embeddings of real images, by applying the retrieval method to the input image embedding. The extracted kNN should have a large enough distribution to cover the potential text embedding. During inference, the kNN are retrieved using the text embedding (See Fig. 17). In all of our experiments we use the cosine similarity metric as the distance function " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "k = 10" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": " for the number of nearest neighbors and " + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "inline_equation", + "content": "d = 512" + }, + { + "bbox": [ + 104, + 315, + 504, + 474 + ], + "type": "text", + "content": ". The full implementation details can be found in Sec. 6.6 in the supplement." + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "text", + "content": "Image generation network. In order to demonstrate the robustness of our method, we apply our kNN approach on two different diffusion backbones: Discrete (Gu et al., 2021) and Continuous (Nichol et al., 2021; Sohl-Dickstein et al., 2015; Ho et al., 2020; Dhariwal & Nichol, 2021). Although very different in practice, these models share the same theoretical idea. Let " + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "inline_equation", + "content": "x_0 \\sim q(x_0)" + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "text", + "content": " be a sample from our images distribution. A forward diffusion process is a Markov chain that adds noise at each step " + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "inline_equation", + "content": "q(x_n|x_{n-1})" + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "text", + "content": ". The reverse process, " + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "inline_equation", + "content": "p_\\theta(x_{n-1}|x_n,x_0)" + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "text", + "content": ", is a denoising process that removes noise from an initialized noise state. At inference time, the model can generate an output, starting with noise and gradually removing it using " + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "inline_equation", + "content": "p_\\theta" + }, + { + "bbox": [ + 104, + 482, + 504, + 581 + ], + "type": "text", + "content": ". For additional background on diffusion models please refer to Sec. 6.1 in the supplement." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": "In the discrete diffusion model, " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "q(x_{n}|x_{n - 1})\\coloneqq v^{T}(x_{n})\\mathbf{Q}_{n}v(x_{n - 1})" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "v(x_{n})" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": " is a one-hot vector with entry 1 at " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "x_{n}" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_n" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": " is a transition matrix, modeling the probability to move from state " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "x_{n - 1}" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "x_{n}" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": ", using uniform probability over the vocabulary and a pre-defined probability for additional special [MASK] token. We can compute the reverse transition distribution according to: " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x_{n - 1}|x_n,y)\\coloneqq \\sum_{\\hat{x}_0 = 1}^k q(x_{n - 1}|x_n,\\hat{x_0})p_\\theta (\\hat{x_0} |x_n,x_0,y)" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": " is a discrete vector, tokenized by the VQGAN (Esser et al., 2021) encoder and " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": " is the conditioning signal. For modeling " + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "inline_equation", + "content": "p_{\\theta}" + }, + { + "bbox": [ + 104, + 581, + 504, + 659 + ], + "type": "text", + "content": " we have followed (Gu et al., 2021) and used a conditional Transformer (Vaswani et al., 2017)." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": "In the continuous diffusion model, " + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "inline_equation", + "content": "q(x_{n}|x_{n - 1}) \\coloneqq \\mathcal{N}(x_{n};\\sqrt{\\alpha_{t}} x_{n - 1},(1 - \\alpha_{n})x_{0})" + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x_{n - 1}|x_n,y)\\coloneqq \\mathcal{N}(\\mu_\\theta (x_n,y),\\Sigma_\\theta (x_n,y))" + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": ". Here, the noise function is Gaussian noise. Following (Ho et al., 2020; Nichol et al., 2021) we trained a model " + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": " to predict the added noise using a standard mean-squared error loss: " + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "inline_equation", + "content": "L\\coloneqq E_{n\\sim [1,N],x_0\\sim q(x_0),\\epsilon \\sim \\mathcal{N}(0,\\mathbf{I})}[||\\epsilon -\\epsilon_{\\theta}(x_n,n,y)||^2]" + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": " is a U-net model and " + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 659, + 504, + 715 + ], + "type": "text", + "content": " is the conditioning signal." + } + ] + } + ], + "index": 62 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 63 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 120, + 83, + 492, + 169 + ], + "blocks": [ + { + "bbox": [ + 120, + 83, + 492, + 169 + ], + "lines": [ + { + "bbox": [ + 120, + 83, + 492, + 169 + ], + "spans": [ + { + "bbox": [ + 120, + 83, + 492, + 169 + ], + "type": "image", + "image_path": "a7f78150df57b233a4eb57c80d2816747d4dba1175c0d61e3cf3915e7d74205c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 184, + 504, + 215 + ], + "lines": [ + { + "bbox": [ + 104, + 184, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 504, + 215 + ], + "type": "text", + "content": "Figure 5: tSNE visualization of 500 random text-image CLIP embeddings pairs taken from COCO validation. The leftmost figure demonstrates the gap between the text and image distributions. By gradually adding kNN to the mean CLIP embedding of the text, the gap decreases, demonstrating the importance of the kNN." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 236, + 282, + 331 + ], + "blocks": [ + { + "bbox": [ + 106, + 236, + 282, + 331 + ], + "lines": [ + { + "bbox": [ + 106, + 236, + 282, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 236, + 282, + 331 + ], + "type": "image", + "image_path": "34aecec5513151a693e5c6b12d64e94c9f1fff8d249b85ed5ff95a8b288204be.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 339, + 282, + 369 + ], + "lines": [ + { + "bbox": [ + 104, + 339, + 282, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 282, + 369 + ], + "type": "text", + "content": "Figure 6: FID on MS-COCO, including models trained on image-only datasets and text-image datasets." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 296, + 281, + 513, + 361 + ], + "blocks": [ + { + "bbox": [ + 290, + 220, + 509, + 271 + ], + "lines": [ + { + "bbox": [ + 290, + 220, + 509, + 271 + ], + "spans": [ + { + "bbox": [ + 290, + 220, + 509, + 271 + ], + "type": "text", + "content": "Table 1: Results for zero-shot Text-to-Image generation on the MS-COCO, CUB and LN-COCO test sets. Image-quality and Text-alignment report the percentage of majority human raters votes in favor of our method when comparing between a certain model and ours." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 296, + 281, + 513, + 361 + ], + "lines": [ + { + "bbox": [ + 296, + 281, + 513, + 361 + ], + "spans": [ + { + "bbox": [ + 296, + 281, + 513, + 361 + ], + "type": "table", + "html": "
ModelMS-COCOCUBLN-COCO
FID↓Im. qual.Txt align.FID↓Im. qual.Txt align.FID↓Im. qual.Txt align.
LAFITE26.972.165.389.774.059.642.868.461.9
FuseDream21.264.079.350.279.160.937.571.159.0
no-kNN32.870.868.395.181.061.265.061.459.8
Ours12.5--42.9--35.6--
", + "image_path": "ccecfb952f4c10e303941ce43ccaed825d7b23f09dafb95a3faf5b685a00c8ca.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "text", + "content": "In both cases, we condition our model on " + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "inline_equation", + "content": "y = (f_{img}(x_0), \\mathrm{knn}_{img}(x_0, k))" + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "inline_equation", + "content": "f_{img}(x_0)" + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "text", + "content": " is the CLIP image embedding, " + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "inline_equation", + "content": "\\mathrm{knn}_{img}(x_0, k)" + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 387, + 504, + 485 + ], + "type": "text", + "content": " nearest neighbors in the feature space of the image embedding. Following (Ramesh et al., 2022; Rombach et al., 2022) conditional injection, we condition our model on the image CLIP embedding, and the kNN clip embeddings by applying cross attention in the attention layers of the architecture. We sample both our models using Classifier Free Guidance (CFG) (Nichol et al., 2021; Ho & Salimans, 2021). Since CFG was originally proposed for continuous models, we propose a method for using it with discrete models as well. Full implementation details of the discrete and continuous models can be found in Sec. 6.7 and Sec. 6.8, respectively, in the supplement." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 500, + 281, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 500, + 281, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 500, + 281, + 510 + ], + "type": "text", + "content": "3.1 TEXT-ONLY IMAGE MANIPULATION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 517, + 504, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 504, + 595 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 504, + 595 + ], + "type": "text", + "content": "The majority of previous works in the task of image manipulation either rely on user-provided masks (Nichol et al., 2021; Avrahami et al., 2022b;a), or are limited to global editing (Crowson et al., 2022; Kim et al., 2022). Recently, several works (Bar-Tal et al., 2022; Hertz et al., 2022; Gal et al., 2022) have made progress with local manipulations without relying on user edited masks. Nevertheless, most of the techniques suffer from several shortcomings: (1) They enable local texture changes, yet cannot modify complex structures, (2) they struggle to preserve the identity of the object, for example, when manipulating humans, (3) they require optimization for each input." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "text", + "content": "We address these issues by extending kNN-Diffusion to perform local and semantic-aware image manipulations without any provided mask. Illustration of the approach is provided in Fig. 18 and Fig. 19 in the supplement. For this task, the model is trained to predict the original image from a manipulated version. Specifically, we create a manipulated version of the image, which differs from the original image only in some local area. Given a random local area " + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "text", + "content": " in the image I, the manipulated image " + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathrm{I}_{\\text{manip}}" + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "text", + "content": " is constructed by replacing the area with the corresponding nearest neighbor: " + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathrm{I}_{\\text{manip}} = \\mathrm{I} \\cdot (1 - M) + \\mathrm{nn}_{img}(\\mathrm{I}, 1) \\cdot M" + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\mathrm{nn}_{img}(\\mathrm{I}, 1)" + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "text", + "content": " is the the nearest neighbor obtained after aligning it with I using the ECC alignment algorithm (Evangelidis & Psarakis, 2008). The model then receives as input the manipulated image, together with the CLIP embedding of the original image only in the local area: " + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "inline_equation", + "content": "f_{img}(\\mathrm{I} \\cdot M)" + }, + { + "bbox": [ + 104, + 599, + 506, + 732 + ], + "type": "text", + "content": ". This CLIP embedding represents the required modification that should be applied to the manipulated image in order to predict the original image. During inference, instead of using the CLIP embedding of the local area, the desired modification is" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 133, + 113, + 478, + 186 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 504, + 111 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 504, + 111 + ], + "type": "text", + "content": "Table 2: Results on the stickers dataset. We report the percentage of human raters prefer our method over the baselines with respect to image quality and text alignment. Discrete no-kNN refers to VQ-diffusion, and Continuous no-kNN, to DALL-E2 decoder, both trained without an explicit text-image dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 133, + 113, + 478, + 186 + ], + "lines": [ + { + "bbox": [ + 133, + 113, + 478, + 186 + ], + "spans": [ + { + "bbox": [ + 133, + 113, + 478, + 186 + ], + "type": "table", + "html": "
ModelFID↓Ours DiscreteOurs Continuous
Image qualityText alignmentImage qualityText alignment
DALL-E2+ClipCap55.571.669.267.068.3
LAFITE58.763.559.976.071.2
no-kNN52.772.167.666.869.4
Ours40.8----
", + "image_path": "fea0278a9b53250e31495932e71823f83bfff0c4ff20bb67792c0d0453fb0fe2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 198, + 504, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 504, + 220 + ], + "type": "text", + "content": "represented using the CLIP embedding of the user text query. We modified the model to be capable of receiving as a condition both the manipulated image and the CLIP embedding of the local area." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 236, + 201, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 201, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 201, + 248 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 255, + 506, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 506, + 312 + ], + "type": "text", + "content": "First, we conduct qualitative and quantitative comparisons on MS-COCO, LN-COCO and CUB datasets. To further demonstrate the advantage of our method, we provide comparison on an image-only stickers dataset, where we apply our approach on two diffusion backbones. Next, we demonstrate image manipulation and out-of-distribution capabilities. Finally, to better assess the effect of each contribution, an ablation study is provided." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 316, + 504, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 403 + ], + "type": "text", + "content": "Datasets and Metrics. For photo-realistic experiments, our model was trained only on the images (omitting the text) of a modified version of the Public Multimodal Dataset (PMD) used by FLAVA (Singh et al., 2021). More information about the dataset is available in Sec. 6.4 of the supplement. To further demonstrate the capabilities of our method, we collected 400 million sticker images from the web, containing combinations of concepts such as objects, characters/avatars and text. The collected stickers do not have paired text, and are substantially different from photorealistic data. Furthermore, since they have no paired text, they were not part of CLIP's training data, which makes the text-to-image generation task more challenging." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 404, + 506, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 404, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 506, + 482 + ], + "type": "text", + "content": "Evaluation metrics are based on objective and subjective metrics: (i) FID (Heusel et al., 2017) is an objective metric used to assess the quality of synthesized images, (ii) human evaluation - we ask human raters for their preference, comparing two methods based on image quality and text alignment. We used 600 image pairs; five raters rated each pair. The results are shown as a percentage of majority votes in favor of our method over the baselines. We report the full human evaluation protocol in the supplement. We chose to omit Inception-Score, since it is shown by Barratt & Sharma (2018) to be a misleading metric for models that were not trained on Imagenet." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 495, + 320, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 320, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 320, + 506 + ], + "type": "text", + "content": "4.1 QUALITATIVE AND QUANTITATIVE RESULTS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 512, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 506, + 733 + ], + "type": "text", + "content": "We begin by comparing our model, trained on the PMD dataset, with the previous works LAFITE and FuseDream, that trained on image-only datasets. To demonstrate the advantage of using a retrieval method in text-to-image generation, we trained a model variant, no-kNN. This baseline was trained solely on image embeddings (omitting the kNN), while during inference, the images were generated using the text embedding. Tab. 1 displays zero-shot results on three different datasets: MS-COCO, CUB and LN-COCO. We follow the evaluation protocol of LAFITE, reporting our results on 30,000 images from MS-COCO validation set without training, nor using it's training partition in the kNN index. Similarly, we follow LAFITE for CUB and LN-COCO evaluation. As can be seen, our model achieves the lowest FID score in all scenarios. In addition, human evaluations rate our method as better aligned to text and with the highest images quality. In Fig. 2, 15 and 11 we present a qualitative comparison between the methods. One can observe that while the simple retrieval baseline outputs non-generated images with high-quality, the images generated by our method are more faithful to the input text. To further demonstrate the effectiveness of our method, we present in Fig. 6 a comparison of our model with the latest text-to-image models trained on paired text-image datasets: DALL-E, CogView, VQ-Diffusion, GLIDE, LDM, Make-A-Scene, DALL-E2, Parti and Imagen. As can be seen, our model achieves comparable results to recent models trained with full text-image pairs (e.g LDM, GLIDE), despite being trained on an image-only dataset, with significantly lower computational costs. The results demonstrate that leveraging an external retrieval database allows to compensate for different trade-offs, in particular, reducing the number of parameters in the model. Additional samples are provided in Fig. 13 in the supplement." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 127, + 83, + 489, + 299 + ], + "blocks": [ + { + "bbox": [ + 127, + 83, + 489, + 299 + ], + "lines": [ + { + "bbox": [ + 127, + 83, + 489, + 299 + ], + "spans": [ + { + "bbox": [ + 127, + 83, + 489, + 299 + ], + "type": "image", + "image_path": "2bbc54b5605c39d4eb95ed54edeb6ccbc924f0a581d1c0ce6025122e17ce139e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 306, + 504, + 338 + ], + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 338 + ], + "type": "text", + "content": "Figure 7: Comparison between various indexes used by the same model. (1) Aesthetic. Images from the first quantile of an aesthetic classifier, (2) Unaesthetic. Images from the last quantile of an aesthetic classifier, (3) Image search engine. Images retrieved from Google Images, (4) The stickers index." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 353, + 504, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 353, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 504, + 552 + ], + "type": "text", + "content": "Text-to-sticker generation. As the sticker dataset does not have paired text, and is substantially different from photo-realistic data, it allows us to illustrate the advantage of our model on an image-only dataset. A selection of stickers generated by our model is presented in Fig. 1 and Fig. 14, 12. To demonstrate the importance of using kNN on image-only datasets, we evaluate our approach on two diffusion backbones. To this end, we trained a continuous diffusion model (Ramesh et al., 2022) and a discrete diffusion model (Gu et al., 2021), both conditioned on the kNN image embeddings. For each backbone, we compare our method with the following baselines: (1) no-kNN - this baseline was trained using both the continuous and the discrete methods conditioned only on image CLIP embedding, without using kNN. In the discrete case, we trained a VQ-diffusion model, while in the continuous case, we trained a re-implementation of DALL-E2's decoder (without prior). (2) DALL-E2+ClipCap baseline - here, we first captioned the entire sticker dataset using ClipCap (Mokady et al., 2021), then trained DALL-E2 decoder on the captioned dataset. (3) LAFITE - we trained LAFITE language-free model on our stickers dataset using the authors' published code. We present the results in Tab. 2. The FID is calculated over a subset of 3,000 stickers, generated from the ClipCap captioned dataset. As can be seen, our model achieves the lowest FID score. In addition, it outperforms all baselines in human evaluation comparison, using continuous and discrete backbones. In particular, compared with the same model trained without kNN, our model achieves significantly higher favorability in both text alignment and image quality." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 564, + 195, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 564, + 195, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 195, + 575 + ], + "type": "text", + "content": "4.2 APPLICATIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 585, + 504, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 504, + 728 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 504, + 728 + ], + "type": "text", + "content": "Text-only image manipulation. We demonstrate the manipulation capabilities of our model in Fig. 1, 4 and 20. Furthermore, we qualitatively compare our model with Text2LIVE (Bar-Tal et al., 2022) and Textual Inversion (Gal et al., 2022), using the authors' published code. Text2LIVE proposed generating an edit layer that is composed over the original input, using a generator trained for each training image. Textual Inversion utilized the pre-trained Latent Diffusion model to invert the input image into a token embedding. The embedding is then used to compose novel textual queries for the generative model. Fig. 4 shows representative results, and the rest are included in Fig. 21 and 22 in the supplement. In contrast to our model, baseline methods lack text correspondence or they do not preserve the identity of the object. Since Text2LIVE is optimized to perform local changes, it has the difficulty changing the structure of the object (e.g. the \"raising his hand\" example in Fig. 4). Textual Inversion baseline changes the identity of the object because it struggles reconstructing the textual representation of the source image. Our model, on the other hand, can perform challenging manipulations that are aligned with the text, while preserving the object identity." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 231, + 175 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 231, + 175 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 231, + 175 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 231, + 175 + ], + "type": "image", + "image_path": "950754645d537e674a9ed1701ea97dec349ad1022705da81f9f70c7f90f82619.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 185, + 233, + 226 + ], + "lines": [ + { + "bbox": [ + 104, + 185, + 233, + 226 + ], + "spans": [ + { + "bbox": [ + 104, + 185, + 233, + 226 + ], + "type": "text", + "content": "Figure 8: Mean aesthetics score of the generated images as a function of the conditioned kNN mean aesthetics score." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 81, + 373, + 175 + ], + "blocks": [ + { + "bbox": [ + 242, + 81, + 373, + 175 + ], + "lines": [ + { + "bbox": [ + 242, + 81, + 373, + 175 + ], + "spans": [ + { + "bbox": [ + 242, + 81, + 373, + 175 + ], + "type": "image", + "image_path": "ed141c57ea6e1c551282a3f9907ddce50fcf676a4c7a6940c44aa759ddcc1a1c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 185, + 374, + 236 + ], + "lines": [ + { + "bbox": [ + 239, + 185, + 374, + 236 + ], + "spans": [ + { + "bbox": [ + 239, + 185, + 374, + 236 + ], + "type": "text", + "content": "Figure 9: MS-COCO test FID score on various K's in: (1) Zero-Shot (2) Index includes MS-COCO train subset. No kNN trained with kNN, but did not employ kNN in inference." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 384, + 83, + 503, + 175 + ], + "blocks": [ + { + "bbox": [ + 384, + 83, + 503, + 175 + ], + "lines": [ + { + "bbox": [ + 384, + 83, + 503, + 175 + ], + "spans": [ + { + "bbox": [ + 384, + 83, + 503, + 175 + ], + "type": "image", + "image_path": "6ed05be790783ff217f588a2c4a2cd028fca55ed587c0697a0e1a02f3ceab8dd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 382, + 184, + 504, + 236 + ], + "lines": [ + { + "bbox": [ + 382, + 184, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 382, + 184, + 504, + 236 + ], + "type": "text", + "content": "Figure 10: MS-COCO test FID score for different model sizes. As can be seen, adding kNN to the model allows it to be smaller, while having better performance." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": "Out-of-distribution generation. Using the retrieval index as part of the generation process enables using different databases during inference, without fine-tuning. This allows generating images from distributions that were not part of the training set, enabling out-of-distribution generation. This novel capability is demonstrated with the same model trained on PMD, using three different retrieval databases: (i) A stickers database presented in Sec. 4. (ii) Aesthetic database: This database is constructed by filtering images according to a classifier score. Let " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": " be a classifier that for each image " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "i \\in I" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": " outputs a score " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "s = C(i)" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": ". This classifier enables filtering the kNN using " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "L \\leq s < H" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": " are low and high thresholds, respectively. Here, we use an open source pre-trained aesthetics classifier " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": " (Christoph Schuhmann, 2022): For each text input " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "t \\in T" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": ", we apply " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": " on the kNN, and then divide the kNN into five equal quantiles based on " + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 247, + 506, + 411 + ], + "type": "text", + "content": " score. As can be seen in Fig. 8, using kNN with higher aesthetics score result in generated images with higher aesthetics mean score. (iii) Image search engine: Generative models are stationary in the sense that they are unable to learn new concepts after being trained, hence fine-tuning is required to represent new styles and concepts. Here, we use an online image search engine, which allows the model to adapt to new data without additional fine-tuning. A qualitative comparison of all three methods is shown in Fig.7." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 425, + 211, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 425, + 211, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 211, + 436 + ], + "type": "text", + "content": "4.3 ABLATION STUDY" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 445, + 506, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 506, + 578 + ], + "type": "text", + "content": "We conclude our experiments with an ablation study, to quantify the contribution of our different components. We provide ablation study on index size and different kNN conditioning approaches in Sec. 6.5 of the supplement. Number of nearest neighbors. The results in Fig. 9 demonstrate the importance of applying the retrieval mechanism during training and inference. Here, we evaluate our model, trained on PMD dataset, with different numbers of kNN during inference. Furthermore, we examined the baseline no-kNN, in which during inference, the model is conditioned only on the text embedding " + }, + { + "bbox": [ + 104, + 445, + 506, + 578 + ], + "type": "inline_equation", + "content": "f_{txt}(t)" + }, + { + "bbox": [ + 104, + 445, + 506, + 578 + ], + "type": "text", + "content": ", without using kNN. Best performance is achieved using 10 neighbors. Scalability analysis. To evaluate the effectiveness of our approach at different model sizes, we trained three additional models with varying sizes for both settings - with and without kNN. As can be seen in Fig. 10, utilizing kNN consistently improves performance for all sizes. Furthermore, a performance improvement can be achieved using much smaller models with kNN. For example, the 35M kNN model outperforms the 400M model without kNN." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 594, + 196, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 196, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 196, + 605 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 615, + 506, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 615, + 506, + 727 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 506, + 727 + ], + "type": "text", + "content": "\"We shall always find, that every idea which we examine is copied from a similar impression\", Hume (1748). In this paper, we propose using a large-scale retrieval method in order to train a novel text-to-image model, with only pre-trained multi-modal embeddings, but without an explicit text-image dataset. Our extensive experiments demonstrate that using an external knowledge-base alleviates much of the model's burden of learning novel concepts, enabling the use of a relatively small model. In addition, it provides the model the capability of learning to adapt to new samples, which it only observes during test time. Lastly, we present a new technique utilizing the retrieval method for text-driven semantic manipulations without user-provided masks. As evaluated by human studies and automatic metrics, our method is significantly preferable to the baselines in terms of image quality and text alignment." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 99, + 505, + 719 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 99, + 505, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 99, + 505, + 122 + ], + "spans": [ + { + "bbox": [ + 105, + 99, + 505, + 122 + ], + "type": "text", + "content": "Omri Avrahami, Ohad Fried, and Dani Lischinski. Blended latent diffusion. arXiv preprint arXiv:2206.02779, 2022a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 129, + 505, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 129, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 505, + 163 + ], + "type": "text", + "content": "Omri Avrahami, Dani Lischinski, and Ohad Fried. Blended diffusion for text-driven editing of natural images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18208-18218, 2022b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 170, + 504, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 170, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 106, + 170, + 504, + 194 + ], + "type": "text", + "content": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 199, + 504, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 199, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 107, + 199, + 504, + 224 + ], + "type": "text", + "content": "Artem Babenko and Victor Lempitsky. The inverted multi-index. IEEE transactions on pattern analysis and machine intelligence, 37(6):1247-1260, 2014." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 229, + 504, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 229, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 107, + 229, + 504, + 255 + ], + "type": "text", + "content": "Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. arXiv preprint arXiv:2204.02491, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 259, + 504, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 259, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 107, + 259, + 504, + 282 + ], + "type": "text", + "content": "Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 289, + 504, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 289, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 107, + 289, + 504, + 324 + ], + "type": "text", + "content": "Vishwanath Bijalwan, Vinay Kumar, Pinki Kumari, and Jordan Pascual. Knn based machine learning approach for text and document mining. International Journal of Database Theory and Application, 7(1):61-70, 2014." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 330, + 504, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 330, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 107, + 330, + 504, + 355 + ], + "type": "text", + "content": "Andreas Blattmann, Robin Rombach, Kaan Oktay, Jonas Müller, and Björn Ommer. Semiparametric neural image synthesis. In Advances in Neural Information Processing Systems, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 360, + 504, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 360, + 504, + 405 + ], + "spans": [ + { + "bbox": [ + 107, + 360, + 504, + 405 + ], + "type": "text", + "content": "Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George van den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. arXiv preprint arXiv:2112.04426, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 412, + 504, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 412, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 107, + 412, + 504, + 437 + ], + "type": "text", + "content": "Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12M: Pushing web-scale image-text pre-training to recognize long-tail visual concepts. In CVPR, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 442, + 504, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 442, + 504, + 466 + ], + "spans": [ + { + "bbox": [ + 107, + 442, + 504, + 466 + ], + "type": "text", + "content": "Romain Beaumont Christoph Schuhmann. Aesthetic predictor. https://github.com/LAION-AI/aesthetic-predictor, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 472, + 504, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 472, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 107, + 472, + 504, + 506 + ], + "type": "text", + "content": "Katherine Crowson, Stella Biderman, Daniel Kornis, Dashiell Stander, Eric Hallahan, Louis Castricato, and Edward Raff. Vqgan-clip: Open domain image generation and editing with natural language guidance. arXiv preprint arXiv:2204.08583, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "type": "text", + "content": "Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. RedCaps: Web-curated image-text data created by the people, for the people. In NeurIPS Datasets and Benchmarks, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 542, + 504, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 542, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 107, + 542, + 504, + 567 + ], + "type": "text", + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 572, + 504, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 572, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 107, + 572, + 504, + 596 + ], + "type": "text", + "content": "Prafulla Dhariwal and Alex Nichol. Diffusion models beat gans on image synthesis. arXiv preprint arXiv:2105.05233, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 602, + 504, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 602, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 107, + 602, + 504, + 637 + ], + "type": "text", + "content": "Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 643, + 504, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 643, + 504, + 678 + ], + "spans": [ + { + "bbox": [ + 107, + 643, + 504, + 678 + ], + "type": "text", + "content": "Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873-12883, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 685, + 504, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 685, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 107, + 685, + 504, + 719 + ], + "type": "text", + "content": "Georgios D Evangelidis and Emmanouil Z Psarakis. Parametric image alignment using enhanced correlation coefficient maximization. IEEE transactions on pattern analysis and machine intelligence, 30(10):1858-1865, 2008." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 712 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman. Make-a-scene: Scene-based text-to-image generation with human priors. arXiv preprint arXiv:2203.13131, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "text", + "content": "Tiezheng Ge, Kaiming He, Qifa Ke, and Jian Sun. Optimized product quantization for approximate nearest neighbor search. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2013." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 505, + 228 + ], + "type": "text", + "content": "Jiatao Gu, Yong Wang, Kyunghyun Cho, and Victor OK Li. Search engine guided neural machine translation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 234, + 505, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 234, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 505, + 267 + ], + "type": "text", + "content": "Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, and Baining Guo. Vector quantized diffusion model for text-to-image synthesis. ArXiv, abs/2111.14822, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 275, + 505, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 505, + 309 + ], + "type": "text", + "content": "Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 316, + 505, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 505, + 350 + ], + "type": "text", + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "type": "text", + "content": "Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In NeurIPS 2021 Workshop on Deep Generative Models and Downstream Applications, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 386, + 505, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 386, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 505, + 410 + ], + "type": "text", + "content": "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. arXiv preprint arXiv:2006.11239, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 416, + 373, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 416, + 373, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 373, + 430 + ], + "type": "text", + "content": "David Hume. An enquiry concerning human understanding, 1748." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 435, + 474, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 435, + 474, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 474, + 449 + ], + "type": "text", + "content": "Karim Iskakov. Semi-parametric image inpainting. arXiv preprint arXiv:1807.02855, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 454, + 505, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 454, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 454, + 505, + 479 + ], + "type": "text", + "content": "Herve Jegou, Matthijs Douze, and Cordelia Schmid. Product quantization for nearest neighbor search. IEEE transactions on pattern analysis and machine intelligence, 33(1):117-128, 2010." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 484, + 505, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 505, + 508 + ], + "type": "text", + "content": "Jeff Johnson, Matthijs Douze, and Hervé Jégou. Billion-scale similarity search with GPUs. IEEE Transactions on Big Data, 7(3):535-547, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 514, + 505, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 505, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 505, + 548 + ], + "type": "text", + "content": "Gwanghyun Kim, Taesung Kwon, and Jong Chul Ye. Diffusionclip: Text-guided diffusion models for robust image manipulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2426-2435, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 555, + 505, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 505, + 589 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 589 + ], + "type": "text", + "content": "Ivan Krasin, Tom Duerig, Neil Alldrin, Andreas Veit, Sami Abu-El-Haija, Serge Belongie, David Cai, Zheyun Feng, Vittorio Ferrari, and Victor Gomes. Openimages: A public dataset for large-scale multi-label and multi-class image classification., 01 2016." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 596, + 505, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 505, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 505, + 641 + ], + "type": "text", + "content": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, Michael Bernstein, and Li Fei-Fei. Visual genome: Connecting language and vision using crowdsourced dense image annotations. 2016. URL https://arxiv.org/abs/1602.07332." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 647, + 505, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 505, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 505, + 672 + ], + "type": "text", + "content": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 677, + 505, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 505, + 712 + ], + "type": "text", + "content": "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. arXiv preprint arXiv:2201.12086, 2022." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 709 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pp. 740-755. Springer, 2014." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "text", + "content": "Xingchao Liu, Chengyue Gong, Lemeng Wu, Shujian Zhang, Hao Su, and Qiang Liu. Fusedream: Training-free text-to-image generation with improved clip+ gan space optimization. arXiv preprint arXiv:2112.01573, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "type": "text", + "content": "Ron Mokady, Amir Hertz, and Amit H Bermano. Clipcap: Clip prefix for image captioning. arXiv preprint arXiv:2111.09734, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 504, + 217 + ], + "type": "text", + "content": "Norman Mu, Alexander Kirillov, David Wagner, and Saining Xie. Slip: Self-supervision meets language-image pre-training. arXiv preprint arXiv:2112.12750, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 504, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 504, + 258 + ], + "type": "text", + "content": "Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 264, + 505, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 264, + 505, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 264, + 505, + 320 + ], + "type": "text", + "content": "Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K. Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc., 2011. URL https://proceedings.neurips.cc/paper/2011/file/5dd9db5e033da9c6fb5ba83c7a7ebea9-Paper.pdf." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 327, + 504, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 504, + 361 + ], + "type": "text", + "content": "Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2085–2094, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 367, + 504, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 367, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 504, + 403 + ], + "type": "text", + "content": "Jordi Pont-Tuset, Jasper Uijlings, Soravit Changpinyo, Radu Soricut, and Vittorio Ferrari. Connecting vision and language with localized narratives. In European Conference on Computer Vision, pp. 647-664. Springer, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 408, + 504, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 408, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 504, + 443 + ], + "type": "text", + "content": "Xiaojuan Qi, Qifeng Chen, Jiaya Jia, and Vladlen Koltun. Semi-parametric image synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8808-8816, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 449, + 504, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 504, + 495 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. URL https://arxiv.org/abs/2103.00020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 501, + 504, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 501, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 504, + 536 + ], + "type": "text", + "content": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In International Conference on Machine Learning, pp. 8821-8831. PMLR, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 542, + 504, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 542, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 504, + 567 + ], + "type": "text", + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 572, + 504, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 572, + 504, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 504, + 607 + ], + "type": "text", + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684-10695, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 613, + 504, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 504, + 658 + ], + "type": "text", + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 665, + 504, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 665, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 504, + 709 + ], + "type": "text", + "content": "Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2556-2565, 2018." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 116 + ], + "type": "text", + "content": "Yawar Siddiqui, Justus Thies, Fangchang Ma, Qi Shan, Matthias Nießner, and Angela Dai. Retrievalfuse: Neural 3d scene reconstruction with a database. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 12568-12577, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 504, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 504, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 504, + 156 + ], + "type": "text", + "content": "Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. arXiv preprint arXiv:2112.04482, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 162, + 504, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 162, + 504, + 195 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 504, + 195 + ], + "type": "text", + "content": "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 201, + 504, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 504, + 246 + ], + "type": "text", + "content": "Krishna Srinivasan, Karthik Raman, Jiecao Chen, Michael Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2443-2449, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 253, + 504, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 253, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 504, + 286 + ], + "type": "text", + "content": "Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. The new data and new challenges in multimedia research. CoRR, abs/1503.01817, 2015. URL http://arxiv.org/abs/1503.01817." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 504, + 327 + ], + "type": "text", + "content": "Hung-Yu Tseng, Hsin-Ying Lee, Lu Jiang, Ming-Hsuan Yang, and Weilong Yang. Retrieved: Image synthesis via differentiable patch retrieval. In European Conference on Computer Vision, pp. 242-257. Springer, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 333, + 504, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 333, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 504, + 355 + ], + "type": "text", + "content": "Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 361, + 504, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 504, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 504, + 395 + ], + "type": "text", + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 401, + 504, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 401, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 504, + 423 + ], + "type": "text", + "content": "Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 504, + 464 + ], + "type": "text", + "content": "Xintao Wang, Liangbin Xie, Chao Dong, and Ying Shan. Real-esrgan: Training real-world blind super-resolution with pure synthetic data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 1905-1914, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 470, + 504, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 470, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 504, + 493 + ], + "type": "text", + "content": "Zihao Wang, Wei Liu, Qian He, Xinglong Wu, and Zili Yi. Clip-gen: Language-free training of a text-to-image generator with clip. arXiv preprint arXiv:2203.00386, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 499, + 504, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 499, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 504, + 521 + ], + "type": "text", + "content": "Yuhuai Wu, Markus N Rabe, DeLesley Hutchins, and Christian Szegedy. Memorizing transformers. arXiv preprint arXiv:2203.08913, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 528, + 504, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 504, + 571 + ], + "type": "text", + "content": "Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attingan: Fine-grained text to image generation with attentional generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1316-1324, 2018." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 578, + 504, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 504, + 613 + ], + "type": "text", + "content": "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 619, + 504, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 619, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 504, + 653 + ], + "type": "text", + "content": "Han Zhang, Jing Yu Koh, Jason Baldridge, Honglak Lee, and Yinfei Yang. Cross-modal contrastive learning for text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 833-842, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 658, + 504, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 504, + 693 + ], + "type": "text", + "content": "Yufan Zhou, Ruiyi Zhang, Changyou Chen, Chunyuan Li, Chris Tensmeyer, Tong Yu, Jiumiang Gu, Jinhui Xu, and Tong Sun. LAFITE: towards language-free training for text-to-image generation. CoRR, abs/2111.13792, 2021. URL https://arxiv.org/abs/2111.13792." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. Dm-gan: Dynamic memory generative adversarial networks for text-to-image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 5802-5810, 2019." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 181, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 181, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 181, + 94 + ], + "type": "text", + "content": "6 APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 52, + 95, + 550, + 680 + ], + "blocks": [ + { + "bbox": [ + 52, + 95, + 550, + 680 + ], + "lines": [ + { + "bbox": [ + 52, + 95, + 550, + 680 + ], + "spans": [ + { + "bbox": [ + 52, + 95, + 550, + 680 + ], + "type": "image", + "image_path": "2b4b72b1f6799e07a03144ae2c4f93aba3e1f5a77c12dea2590dbbd421563f90.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 217, + 689, + 392, + 701 + ], + "lines": [ + { + "bbox": [ + 217, + 689, + 392, + 701 + ], + "spans": [ + { + "bbox": [ + 217, + 689, + 392, + 701 + ], + "type": "text", + "content": "Figure 11: Samples from COCO validation set." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 116, + 171, + 190 + ], + "blocks": [ + { + "bbox": [ + 108, + 116, + 171, + 190 + ], + "lines": [ + { + "bbox": [ + 108, + 116, + 171, + 190 + ], + "spans": [ + { + "bbox": [ + 108, + 116, + 171, + 190 + ], + "type": "image", + "image_path": "ce3ccce82f232b4e283aec494b0c13bea42ffee036d195210f0b4d5966567f78.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 192, + 173, + 207 + ], + "lines": [ + { + "bbox": [ + 110, + 192, + 173, + 207 + ], + "spans": [ + { + "bbox": [ + 110, + 192, + 173, + 207 + ], + "type": "text", + "content": "Chicken waiter serving dinner" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 184, + 119, + 264, + 189 + ], + "blocks": [ + { + "bbox": [ + 184, + 119, + 264, + 189 + ], + "lines": [ + { + "bbox": [ + 184, + 119, + 264, + 189 + ], + "spans": [ + { + "bbox": [ + 184, + 119, + 264, + 189 + ], + "type": "image", + "image_path": "771a520485db0685554de117fdb5b8b2b7df526a3ff3d6e29bcb96ac1aa31fa9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 191, + 248, + 199 + ], + "lines": [ + { + "bbox": [ + 208, + 191, + 248, + 199 + ], + "spans": [ + { + "bbox": [ + 208, + 191, + 248, + 199 + ], + "type": "text", + "content": "Virtual reality" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 279, + 119, + 332, + 190 + ], + "blocks": [ + { + "bbox": [ + 279, + 119, + 332, + 190 + ], + "lines": [ + { + "bbox": [ + 279, + 119, + 332, + 190 + ], + "spans": [ + { + "bbox": [ + 279, + 119, + 332, + 190 + ], + "type": "image", + "image_path": "f507f247bbca5b3b141fb07f4338e1f4d52d996e4ae52357691d3ef8a425601b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 292, + 192, + 328, + 207 + ], + "lines": [ + { + "bbox": [ + 292, + 192, + 328, + 207 + ], + "spans": [ + { + "bbox": [ + 292, + 192, + 328, + 207 + ], + "type": "text", + "content": "Monkey eats hamburger" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 357, + 119, + 413, + 190 + ], + "blocks": [ + { + "bbox": [ + 357, + 119, + 413, + 190 + ], + "lines": [ + { + "bbox": [ + 357, + 119, + 413, + 190 + ], + "spans": [ + { + "bbox": [ + 357, + 119, + 413, + 190 + ], + "type": "image", + "image_path": "771a07aba8d1ecb57f9a52969daf51707af53d41b78c6e538aae8ae368b0ab5c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 194, + 417, + 202 + ], + "lines": [ + { + "bbox": [ + 364, + 194, + 417, + 202 + ], + "spans": [ + { + "bbox": [ + 364, + 194, + 417, + 202 + ], + "type": "text", + "content": "Scared fish in a suit" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 432, + 119, + 496, + 188 + ], + "blocks": [ + { + "bbox": [ + 432, + 119, + 496, + 188 + ], + "lines": [ + { + "bbox": [ + 432, + 119, + 496, + 188 + ], + "spans": [ + { + "bbox": [ + 432, + 119, + 496, + 188 + ], + "type": "image", + "image_path": "ec99dc93cc1fdb09978c1af0af6fc17374a9a8dffd107442bdacd63fe7eb9afd.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 444, + 195, + 485, + 203 + ], + "lines": [ + { + "bbox": [ + 444, + 195, + 485, + 203 + ], + "spans": [ + { + "bbox": [ + 444, + 195, + 485, + 203 + ], + "type": "text", + "content": "Clown unicorn" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 108, + 222, + 179, + 282 + ], + "blocks": [ + { + "bbox": [ + 108, + 222, + 179, + 282 + ], + "lines": [ + { + "bbox": [ + 108, + 222, + 179, + 282 + ], + "spans": [ + { + "bbox": [ + 108, + 222, + 179, + 282 + ], + "type": "image", + "image_path": "57ff2b04de1499bafd2d67d5a2bbdd0be2c52b7738157291f8a86ca7ec5bbcfe.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 285, + 166, + 293 + ], + "lines": [ + { + "bbox": [ + 119, + 285, + 166, + 293 + ], + "spans": [ + { + "bbox": [ + 119, + 285, + 166, + 293 + ], + "type": "text", + "content": "Goodnight sleep" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 186, + 214, + 264, + 282 + ], + "blocks": [ + { + "bbox": [ + 186, + 214, + 264, + 282 + ], + "lines": [ + { + "bbox": [ + 186, + 214, + 264, + 282 + ], + "spans": [ + { + "bbox": [ + 186, + 214, + 264, + 282 + ], + "type": "image", + "image_path": "824a9229933683fab759cf622192aef4dbe56e89a46a3f24a712c0a517da4797.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 285, + 246, + 293 + ], + "lines": [ + { + "bbox": [ + 203, + 285, + 246, + 293 + ], + "spans": [ + { + "bbox": [ + 203, + 285, + 246, + 293 + ], + "type": "text", + "content": "Alpaca in space" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 279, + 212, + 345, + 281 + ], + "blocks": [ + { + "bbox": [ + 279, + 212, + 345, + 281 + ], + "lines": [ + { + "bbox": [ + 279, + 212, + 345, + 281 + ], + "spans": [ + { + "bbox": [ + 279, + 212, + 345, + 281 + ], + "type": "image", + "image_path": "e13ed912a0aa103ff4953892cd8ca6a4f47a265f0555caa255b81687631d60be.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 284, + 345, + 292 + ], + "lines": [ + { + "bbox": [ + 282, + 284, + 345, + 292 + ], + "spans": [ + { + "bbox": [ + 282, + 284, + 345, + 292 + ], + "type": "text", + "content": "Gargoyle in a party hat" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 353, + 212, + 417, + 279 + ], + "blocks": [ + { + "bbox": [ + 353, + 212, + 417, + 279 + ], + "lines": [ + { + "bbox": [ + 353, + 212, + 417, + 279 + ], + "spans": [ + { + "bbox": [ + 353, + 212, + 417, + 279 + ], + "type": "image", + "image_path": "cee58992bce38c9bb39d79872946f9b972e360e346a27a4a377ac84937b6f626.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 285, + 412, + 293 + ], + "lines": [ + { + "bbox": [ + 361, + 285, + 412, + 293 + ], + "spans": [ + { + "bbox": [ + 361, + 285, + 412, + 293 + ], + "type": "text", + "content": "Cauliflower crying" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 438, + 215, + 485, + 277 + ], + "blocks": [ + { + "bbox": [ + 438, + 215, + 485, + 277 + ], + "lines": [ + { + "bbox": [ + 438, + 215, + 485, + 277 + ], + "spans": [ + { + "bbox": [ + 438, + 215, + 485, + 277 + ], + "type": "image", + "image_path": "034c9ada3080558284963679cf8d485383c929b4d00d6e33e37a83c6f5655416.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 280, + 490, + 295 + ], + "lines": [ + { + "bbox": [ + 436, + 280, + 490, + 295 + ], + "spans": [ + { + "bbox": [ + 436, + 280, + 490, + 295 + ], + "type": "text", + "content": "Teddy bear wearing VR headset" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 118, + 302, + 178, + 373 + ], + "blocks": [ + { + "bbox": [ + 118, + 302, + 178, + 373 + ], + "lines": [ + { + "bbox": [ + 118, + 302, + 178, + 373 + ], + "spans": [ + { + "bbox": [ + 118, + 302, + 178, + 373 + ], + "type": "image", + "image_path": "39f7776fbe4837569292411ecc74a220567677e534175845d0bc255cea705851.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 375, + 160, + 390 + ], + "lines": [ + { + "bbox": [ + 126, + 375, + 160, + 390 + ], + "spans": [ + { + "bbox": [ + 126, + 375, + 160, + 390 + ], + "type": "text", + "content": "Hot headed cucumber" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 190, + 303, + 265, + 367 + ], + "blocks": [ + { + "bbox": [ + 190, + 303, + 265, + 367 + ], + "lines": [ + { + "bbox": [ + 190, + 303, + 265, + 367 + ], + "spans": [ + { + "bbox": [ + 190, + 303, + 265, + 367 + ], + "type": "image", + "image_path": "e8cbcd13f98b1ca4d06fe2a5bf15d8bb1f498bdd74b0affd113156c908e73e28.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 199, + 372, + 256, + 387 + ], + "lines": [ + { + "bbox": [ + 199, + 372, + 256, + 387 + ], + "spans": [ + { + "bbox": [ + 199, + 372, + 256, + 387 + ], + "type": "text", + "content": "Painterly pigeon playing a synthesizer" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 294, + 301, + 329, + 372 + ], + "blocks": [ + { + "bbox": [ + 294, + 301, + 329, + 372 + ], + "lines": [ + { + "bbox": [ + 294, + 301, + 329, + 372 + ], + "spans": [ + { + "bbox": [ + 294, + 301, + 329, + 372 + ], + "type": "image", + "image_path": "68cd35a40ac2bddb7df95aade633f7c5c7d260abd1ebd3c53e07aa987dbf4b34.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 298, + 376, + 335, + 383 + ], + "lines": [ + { + "bbox": [ + 298, + 376, + 335, + 383 + ], + "spans": [ + { + "bbox": [ + 298, + 376, + 335, + 383 + ], + "type": "text", + "content": "3D cat avatar" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 349, + 308, + 427, + 369 + ], + "blocks": [ + { + "bbox": [ + 349, + 308, + 427, + 369 + ], + "lines": [ + { + "bbox": [ + 349, + 308, + 427, + 369 + ], + "spans": [ + { + "bbox": [ + 349, + 308, + 427, + 369 + ], + "type": "image", + "image_path": "37563a8081740e796edcb080f0817ace44ed87f4b74ba2510c64426ec84948f5.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 356, + 371, + 411, + 384 + ], + "lines": [ + { + "bbox": [ + 356, + 371, + 411, + 384 + ], + "spans": [ + { + "bbox": [ + 356, + 371, + 411, + 384 + ], + "type": "text", + "content": "Music band made of fruits" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 441, + 299, + 493, + 368 + ], + "blocks": [ + { + "bbox": [ + 441, + 299, + 493, + 368 + ], + "lines": [ + { + "bbox": [ + 441, + 299, + 493, + 368 + ], + "spans": [ + { + "bbox": [ + 441, + 299, + 493, + 368 + ], + "type": "image", + "image_path": "195a0de413ce4cde3dd7de6434aa6699abd110a0d8a6d502197df029ba65e73f.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 435, + 370, + 497, + 384 + ], + "lines": [ + { + "bbox": [ + 435, + 370, + 497, + 384 + ], + "spans": [ + { + "bbox": [ + 435, + 370, + 497, + 384 + ], + "type": "text", + "content": "A confused robot as an impressionist painting" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 110, + 399, + 178, + 462 + ], + "blocks": [ + { + "bbox": [ + 110, + 399, + 178, + 462 + ], + "lines": [ + { + "bbox": [ + 110, + 399, + 178, + 462 + ], + "spans": [ + { + "bbox": [ + 110, + 399, + 178, + 462 + ], + "type": "image", + "image_path": "1b74464a4fc7f98a95da6d801edef23976c5e706bf300b37f9ad2ae461479699.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 469, + 168, + 477 + ], + "lines": [ + { + "bbox": [ + 113, + 469, + 168, + 477 + ], + "spans": [ + { + "bbox": [ + 113, + 469, + 168, + 477 + ], + "type": "text", + "content": "Panda playing guitar" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 183, + 407, + 260, + 466 + ], + "blocks": [ + { + "bbox": [ + 183, + 407, + 260, + 466 + ], + "lines": [ + { + "bbox": [ + 183, + 407, + 260, + 466 + ], + "spans": [ + { + "bbox": [ + 183, + 407, + 260, + 466 + ], + "type": "image", + "image_path": "213a8157b171ac994347fa846fab6f3e6e5de2656e48e1a24e46010c2658a58c.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 202, + 468, + 250, + 477 + ], + "lines": [ + { + "bbox": [ + 202, + 468, + 250, + 477 + ], + "spans": [ + { + "bbox": [ + 202, + 468, + 250, + 477 + ], + "type": "text", + "content": "Sloth doing ballet" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 268, + 403, + 351, + 466 + ], + "blocks": [ + { + "bbox": [ + 268, + 403, + 351, + 466 + ], + "lines": [ + { + "bbox": [ + 268, + 403, + 351, + 466 + ], + "spans": [ + { + "bbox": [ + 268, + 403, + 351, + 466 + ], + "type": "image", + "image_path": "8196555c6fd50ab74b29d49fdf3aa0be6e069ec57518a500d87f0e3382d1478f.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 468, + 347, + 483 + ], + "lines": [ + { + "bbox": [ + 282, + 468, + 347, + 483 + ], + "spans": [ + { + "bbox": [ + 282, + 468, + 347, + 483 + ], + "type": "text", + "content": "3D rendering of avatars playing basketball" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 361, + 394, + 411, + 465 + ], + "blocks": [ + { + "bbox": [ + 361, + 394, + 411, + 465 + ], + "lines": [ + { + "bbox": [ + 361, + 394, + 411, + 465 + ], + "spans": [ + { + "bbox": [ + 361, + 394, + 411, + 465 + ], + "type": "image", + "image_path": "1b3c6dc79a6a667b80c708688eb6a73cee42591de3e517b784dbc27f9c75a281.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 375, + 468, + 411, + 477 + ], + "lines": [ + { + "bbox": [ + 375, + 468, + 411, + 477 + ], + "spans": [ + { + "bbox": [ + 375, + 468, + 411, + 477 + ], + "type": "text", + "content": "Singing otter" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 425, + 396, + 502, + 465 + ], + "blocks": [ + { + "bbox": [ + 425, + 396, + 502, + 465 + ], + "lines": [ + { + "bbox": [ + 425, + 396, + 502, + 465 + ], + "spans": [ + { + "bbox": [ + 425, + 396, + 502, + 465 + ], + "type": "image", + "image_path": "de20396cc5f3c57b4d02269470eeaeed40eeb826644a1974c36a13ec4e5c33a4.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 468, + 496, + 483 + ], + "lines": [ + { + "bbox": [ + 440, + 468, + 496, + 483 + ], + "spans": [ + { + "bbox": [ + 440, + 468, + 496, + 483 + ], + "type": "text", + "content": "Muscle man riding a wave in Hawaii" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 110, + 483, + 179, + 554 + ], + "blocks": [ + { + "bbox": [ + 110, + 483, + 179, + 554 + ], + "lines": [ + { + "bbox": [ + 110, + 483, + 179, + 554 + ], + "spans": [ + { + "bbox": [ + 110, + 483, + 179, + 554 + ], + "type": "image", + "image_path": "e86297f7018f77b6baa117da5bf750371f060cc3c823697eae58f8d6c9827a5c.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 189, + 491, + 259, + 551 + ], + "blocks": [ + { + "bbox": [ + 189, + 491, + 259, + 551 + ], + "lines": [ + { + "bbox": [ + 189, + 491, + 259, + 551 + ], + "spans": [ + { + "bbox": [ + 189, + 491, + 259, + 551 + ], + "type": "image", + "image_path": "91714325fdca8c125ac3a66b6191ace57a769107f063055d960cfe21d7a07ad5.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 204, + 558, + 260, + 574 + ], + "lines": [ + { + "bbox": [ + 204, + 558, + 260, + 574 + ], + "spans": [ + { + "bbox": [ + 204, + 558, + 260, + 574 + ], + "type": "text", + "content": "Dog is programming a computer" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 274, + 490, + 350, + 552 + ], + "blocks": [ + { + "bbox": [ + 274, + 490, + 350, + 552 + ], + "lines": [ + { + "bbox": [ + 274, + 490, + 350, + 552 + ], + "spans": [ + { + "bbox": [ + 274, + 490, + 350, + 552 + ], + "type": "image", + "image_path": "47ad1575d14858d0c694d739d3c0411fa031052ec6b2989d46345aadf46561fc.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 284, + 558, + 337, + 574 + ], + "lines": [ + { + "bbox": [ + 284, + 558, + 337, + 574 + ], + "spans": [ + { + "bbox": [ + 284, + 558, + 337, + 574 + ], + "type": "text", + "content": "Radha Krishna dancing in a garden" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 365, + 483, + 414, + 554 + ], + "blocks": [ + { + "bbox": [ + 365, + 483, + 414, + 554 + ], + "lines": [ + { + "bbox": [ + 365, + 483, + 414, + 554 + ], + "spans": [ + { + "bbox": [ + 365, + 483, + 414, + 554 + ], + "type": "image", + "image_path": "cca27a8f1996860dbfcb73a94b4b345b11a4dd3aefea845360161d16166c1540.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 368, + 558, + 412, + 574 + ], + "lines": [ + { + "bbox": [ + 368, + 558, + 412, + 574 + ], + "spans": [ + { + "bbox": [ + 368, + 558, + 412, + 574 + ], + "type": "text", + "content": "Shark wearing a birthday hat" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 426, + 495, + 501, + 554 + ], + "blocks": [ + { + "bbox": [ + 426, + 495, + 501, + 554 + ], + "lines": [ + { + "bbox": [ + 426, + 495, + 501, + 554 + ], + "spans": [ + { + "bbox": [ + 426, + 495, + 501, + 554 + ], + "type": "image", + "image_path": "25884ccba2d5d04cbb30fef6e491a8c9bfc0f1a17aec2712e178fc7d29612049.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 433, + 557, + 496, + 579 + ], + "lines": [ + { + "bbox": [ + 433, + 557, + 496, + 579 + ], + "spans": [ + { + "bbox": [ + 433, + 557, + 496, + 579 + ], + "type": "text", + "content": "Avocado playing ukulele and an apple is watching" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 113, + 582, + 185, + 651 + ], + "blocks": [ + { + "bbox": [ + 117, + 563, + 175, + 579 + ], + "lines": [ + { + "bbox": [ + 117, + 563, + 175, + 579 + ], + "spans": [ + { + "bbox": [ + 117, + 563, + 175, + 579 + ], + "type": "text", + "content": "Cinematic llama with dramatic lighting" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 113, + 582, + 185, + 651 + ], + "lines": [ + { + "bbox": [ + 113, + 582, + 185, + 651 + ], + "spans": [ + { + "bbox": [ + 113, + 582, + 185, + 651 + ], + "type": "image", + "image_path": "06ccbf7aa43ed907b415559031a1f9dea412556970d9ce31e45d465097ae10dc.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 658, + 166, + 666 + ], + "lines": [ + { + "bbox": [ + 120, + 658, + 166, + 666 + ], + "spans": [ + { + "bbox": [ + 120, + 658, + 166, + 666 + ], + "type": "text", + "content": "Celebrating frog" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 144, + 685, + 465, + 696 + ], + "lines": [ + { + "bbox": [ + 144, + 685, + 465, + 696 + ], + "spans": [ + { + "bbox": [ + 144, + 685, + 465, + 696 + ], + "type": "text", + "content": "Figure 12: A selection of stickers generated using the continuous kNN-Diffusion model." + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_caption" + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 188, + 589, + 271, + 642 + ], + "blocks": [ + { + "bbox": [ + 188, + 589, + 271, + 642 + ], + "lines": [ + { + "bbox": [ + 188, + 589, + 271, + 642 + ], + "spans": [ + { + "bbox": [ + 188, + 589, + 271, + 642 + ], + "type": "image", + "image_path": "2d1eaa1c073e8d35e25d43e64ffbae7daf6da678710413708a2f7fc381506780.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 651, + 257, + 666 + ], + "lines": [ + { + "bbox": [ + 208, + 651, + 257, + 666 + ], + "spans": [ + { + "bbox": [ + 208, + 651, + 257, + 666 + ], + "type": "text", + "content": "A dog in a hotdog costume" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 287, + 584, + 343, + 647 + ], + "blocks": [ + { + "bbox": [ + 287, + 584, + 343, + 647 + ], + "lines": [ + { + "bbox": [ + 287, + 584, + 343, + 647 + ], + "spans": [ + { + "bbox": [ + 287, + 584, + 343, + 647 + ], + "type": "image", + "image_path": "db0533c98d53ff549df32e6e68ca3863755f88eed3d04e57ae4e2abd973a0067.jpg" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 297, + 657, + 338, + 665 + ], + "lines": [ + { + "bbox": [ + 297, + 657, + 338, + 665 + ], + "spans": [ + { + "bbox": [ + 297, + 657, + 338, + 665 + ], + "type": "text", + "content": "Alien using VR" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_caption" + } + ], + "index": 55 + }, + { + "type": "image", + "bbox": [ + 348, + 588, + 420, + 647 + ], + "blocks": [ + { + "bbox": [ + 348, + 588, + 420, + 647 + ], + "lines": [ + { + "bbox": [ + 348, + 588, + 420, + 647 + ], + "spans": [ + { + "bbox": [ + 348, + 588, + 420, + 647 + ], + "type": "image", + "image_path": "065e901e300282687752803ea145e75c5c4a48525450a22d099b9c53f26c357b.jpg" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 354, + 651, + 418, + 673 + ], + "lines": [ + { + "bbox": [ + 354, + 651, + 418, + 673 + ], + "spans": [ + { + "bbox": [ + 354, + 651, + 418, + 673 + ], + "type": "text", + "content": "Unicorn with a rainbow horn, waving hand, and standing on grass" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_caption" + } + ], + "index": 57 + }, + { + "type": "image", + "bbox": [ + 424, + 583, + 500, + 648 + ], + "blocks": [ + { + "bbox": [ + 424, + 583, + 500, + 648 + ], + "lines": [ + { + "bbox": [ + 424, + 583, + 500, + 648 + ], + "spans": [ + { + "bbox": [ + 424, + 583, + 500, + 648 + ], + "type": "image", + "image_path": "9cf933b326cc7efbda45b6fc49f439e7aa4b295d9b93a6be84b7cd53c1092f54.jpg" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 654, + 495, + 670 + ], + "lines": [ + { + "bbox": [ + 440, + 654, + 495, + 670 + ], + "spans": [ + { + "bbox": [ + 440, + 654, + 495, + 670 + ], + "type": "text", + "content": "A brain made out of words" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_caption" + } + ], + "index": 59 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 62 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 143, + 200, + 236 + ], + "blocks": [ + { + "bbox": [ + 115, + 129, + 192, + 137 + ], + "lines": [ + { + "bbox": [ + 115, + 129, + 192, + 137 + ], + "spans": [ + { + "bbox": [ + 115, + 129, + 192, + 137 + ], + "type": "text", + "content": "A brown shiny rose flower" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 143, + 200, + 236 + ], + "lines": [ + { + "bbox": [ + 107, + 143, + 200, + 236 + ], + "spans": [ + { + "bbox": [ + 107, + 143, + 200, + 236 + ], + "type": "image", + "image_path": "370c6b22ce0cefa9204cffaaf109bd79d196271a9871fc6f9ec23750a5d3c71b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 239, + 179, + 246 + ], + "lines": [ + { + "bbox": [ + 129, + 239, + 179, + 246 + ], + "spans": [ + { + "bbox": [ + 129, + 239, + 179, + 246 + ], + "type": "text", + "content": "A white firetruck" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 208, + 143, + 300, + 236 + ], + "blocks": [ + { + "bbox": [ + 217, + 129, + 290, + 138 + ], + "lines": [ + { + "bbox": [ + 217, + 129, + 290, + 138 + ], + "spans": [ + { + "bbox": [ + 217, + 129, + 290, + 138 + ], + "type": "text", + "content": "A dog using a typewriter" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 208, + 143, + 300, + 236 + ], + "lines": [ + { + "bbox": [ + 208, + 143, + 300, + 236 + ], + "spans": [ + { + "bbox": [ + 208, + 143, + 300, + 236 + ], + "type": "image", + "image_path": "64252786a5e033a5dc2c74dd2099aaec25d62ad2ec704b3222e80138304809d2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 218, + 239, + 290, + 246 + ], + "lines": [ + { + "bbox": [ + 218, + 239, + 290, + 246 + ], + "spans": [ + { + "bbox": [ + 218, + 239, + 290, + 246 + ], + "type": "text", + "content": "Race car driver in a tutu" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 144, + 402, + 236 + ], + "blocks": [ + { + "bbox": [ + 333, + 129, + 379, + 138 + ], + "lines": [ + { + "bbox": [ + 333, + 129, + 379, + 138 + ], + "spans": [ + { + "bbox": [ + 333, + 129, + 379, + 138 + ], + "type": "text", + "content": "A robot tanning" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 144, + 402, + 236 + ], + "lines": [ + { + "bbox": [ + 310, + 144, + 402, + 236 + ], + "spans": [ + { + "bbox": [ + 310, + 144, + 402, + 236 + ], + "type": "image", + "image_path": "2cffa26afa32bbbed0e7f1d17b7728e98e2a29c0a173c9b9c2bd86be0bf8d8cd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 340, + 239, + 369, + 246 + ], + "lines": [ + { + "bbox": [ + 340, + 239, + 369, + 246 + ], + "spans": [ + { + "bbox": [ + 340, + 239, + 369, + 246 + ], + "type": "text", + "content": "Blue ants" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 408, + 144, + 500, + 236 + ], + "blocks": [ + { + "bbox": [ + 425, + 129, + 482, + 138 + ], + "lines": [ + { + "bbox": [ + 425, + 129, + 482, + 138 + ], + "spans": [ + { + "bbox": [ + 425, + 129, + 482, + 138 + ], + "type": "text", + "content": "Fuchsia iguanodon" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 408, + 144, + 500, + 236 + ], + "lines": [ + { + "bbox": [ + 408, + 144, + 500, + 236 + ], + "spans": [ + { + "bbox": [ + 408, + 144, + 500, + 236 + ], + "type": "image", + "image_path": "4a31a128de43a4f2b448b444f19a593c9f4e5d3bd607a5b6ac3a4be0766c982f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 414, + 239, + 493, + 247 + ], + "lines": [ + { + "bbox": [ + 414, + 239, + 493, + 247 + ], + "spans": [ + { + "bbox": [ + 414, + 239, + 493, + 247 + ], + "type": "text", + "content": "Lucifer dancing with Jesus" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 108, + 251, + 200, + 342 + ], + "blocks": [ + { + "bbox": [ + 108, + 251, + 200, + 342 + ], + "lines": [ + { + "bbox": [ + 108, + 251, + 200, + 342 + ], + "spans": [ + { + "bbox": [ + 108, + 251, + 200, + 342 + ], + "type": "image", + "image_path": "80a37af0645a1500e6f46f2c00f05e72ca08a553cd67a2d65db99dc6af1ee201.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 346, + 198, + 354 + ], + "lines": [ + { + "bbox": [ + 110, + 346, + 198, + 354 + ], + "spans": [ + { + "bbox": [ + 110, + 346, + 198, + 354 + ], + "type": "text", + "content": "Pineapple shaped refrigerator" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 208, + 251, + 300, + 342 + ], + "blocks": [ + { + "bbox": [ + 208, + 251, + 300, + 342 + ], + "lines": [ + { + "bbox": [ + 208, + 251, + 300, + 342 + ], + "spans": [ + { + "bbox": [ + 208, + 251, + 300, + 342 + ], + "type": "image", + "image_path": "3ebd87f5da61b89b7a3b7dab423d11e8c3059f9301b91e29aa22cb554204f073.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 342, + 301, + 357 + ], + "lines": [ + { + "bbox": [ + 211, + 342, + 301, + 357 + ], + "spans": [ + { + "bbox": [ + 211, + 342, + 301, + 357 + ], + "type": "text", + "content": "A surfer wearing a three-piece men's suit" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 309, + 251, + 401, + 342 + ], + "blocks": [ + { + "bbox": [ + 309, + 251, + 401, + 342 + ], + "lines": [ + { + "bbox": [ + 309, + 251, + 401, + 342 + ], + "spans": [ + { + "bbox": [ + 309, + 251, + 401, + 342 + ], + "type": "image", + "image_path": "2f2c07585bca319b7160e26cfcf98ab258be3265ece37279124fcb51cb8b27ea.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 346, + 389, + 354 + ], + "lines": [ + { + "bbox": [ + 320, + 346, + 389, + 354 + ], + "spans": [ + { + "bbox": [ + 320, + 346, + 389, + 354 + ], + "type": "text", + "content": "Rocking chair on water" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 408, + 251, + 501, + 342 + ], + "blocks": [ + { + "bbox": [ + 408, + 251, + 501, + 342 + ], + "lines": [ + { + "bbox": [ + 408, + 251, + 501, + 342 + ], + "spans": [ + { + "bbox": [ + 408, + 251, + 501, + 342 + ], + "type": "image", + "image_path": "71e5e86425d94203ab4eb0ac7a00b943ce2e232c3bcfc78316b3c1fc87443fec.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 434, + 346, + 473, + 354 + ], + "lines": [ + { + "bbox": [ + 434, + 346, + 473, + 354 + ], + "spans": [ + { + "bbox": [ + 434, + 346, + 473, + 354 + ], + "type": "text", + "content": "Alien cartoon" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 110, + 360, + 199, + 449 + ], + "blocks": [ + { + "bbox": [ + 110, + 360, + 199, + 449 + ], + "lines": [ + { + "bbox": [ + 110, + 360, + 199, + 449 + ], + "spans": [ + { + "bbox": [ + 110, + 360, + 199, + 449 + ], + "type": "image", + "image_path": "3fee574afde9883f4afba06f39b3e02bed4cd57a241ef8a4b8ec81cc1102e68f.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 449, + 195, + 465 + ], + "lines": [ + { + "bbox": [ + 113, + 449, + 195, + 465 + ], + "spans": [ + { + "bbox": [ + 113, + 449, + 195, + 465 + ], + "type": "text", + "content": "Kitchen from the lost city of atlantis" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 209, + 359, + 300, + 451 + ], + "blocks": [ + { + "bbox": [ + 209, + 359, + 300, + 451 + ], + "lines": [ + { + "bbox": [ + 209, + 359, + 300, + 451 + ], + "spans": [ + { + "bbox": [ + 209, + 359, + 300, + 451 + ], + "type": "image", + "image_path": "7e047ca7ac0f5db30927a660c2fbfab2aa5bee42d353caf6a5258787e1827f60.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 224, + 454, + 280, + 462 + ], + "lines": [ + { + "bbox": [ + 224, + 454, + 280, + 462 + ], + "spans": [ + { + "bbox": [ + 224, + 454, + 280, + 462 + ], + "type": "text", + "content": "A pink watermelon" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 309, + 358, + 401, + 450 + ], + "blocks": [ + { + "bbox": [ + 309, + 358, + 401, + 450 + ], + "lines": [ + { + "bbox": [ + 309, + 358, + 401, + 450 + ], + "spans": [ + { + "bbox": [ + 309, + 358, + 401, + 450 + ], + "type": "image", + "image_path": "8649d2f4a994dc5fe971a11ad918fd843cce0157555b9f74b407e6ba7c1f97b0.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 326, + 453, + 383, + 463 + ], + "lines": [ + { + "bbox": [ + 326, + 453, + 383, + 463 + ], + "spans": [ + { + "bbox": [ + 326, + 453, + 383, + 463 + ], + "type": "text", + "content": "Image of grey tiger" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 408, + 358, + 500, + 450 + ], + "blocks": [ + { + "bbox": [ + 408, + 358, + 500, + 450 + ], + "lines": [ + { + "bbox": [ + 408, + 358, + 500, + 450 + ], + "spans": [ + { + "bbox": [ + 408, + 358, + 500, + 450 + ], + "type": "image", + "image_path": "0ea58105f475274f27d69f1d1a13a75abb921097f517766561b674d895440bc7.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 423, + 454, + 485, + 461 + ], + "lines": [ + { + "bbox": [ + 423, + 454, + 485, + 461 + ], + "spans": [ + { + "bbox": [ + 423, + 454, + 485, + 461 + ], + "type": "text", + "content": "Green robot vacuum" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 109, + 467, + 200, + 559 + ], + "blocks": [ + { + "bbox": [ + 109, + 467, + 200, + 559 + ], + "lines": [ + { + "bbox": [ + 109, + 467, + 200, + 559 + ], + "spans": [ + { + "bbox": [ + 109, + 467, + 200, + 559 + ], + "type": "image", + "image_path": "0c8ba1e6a6b182ac642d8c68efabacb6632b2c48c7db500a19c91352307fc406.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 116, + 561, + 192, + 570 + ], + "lines": [ + { + "bbox": [ + 116, + 561, + 192, + 570 + ], + "spans": [ + { + "bbox": [ + 116, + 561, + 192, + 570 + ], + "type": "text", + "content": "A baby cooking spaghetti" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 208, + 467, + 300, + 559 + ], + "blocks": [ + { + "bbox": [ + 208, + 467, + 300, + 559 + ], + "lines": [ + { + "bbox": [ + 208, + 467, + 300, + 559 + ], + "spans": [ + { + "bbox": [ + 208, + 467, + 300, + 559 + ], + "type": "image", + "image_path": "76a022dd4c26524662b1445da0a09d0e73ae56fa230abb50f91e87e0d18a4970.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 227, + 562, + 281, + 569 + ], + "lines": [ + { + "bbox": [ + 227, + 562, + 281, + 569 + ], + "spans": [ + { + "bbox": [ + 227, + 562, + 281, + 569 + ], + "type": "text", + "content": "Raccoon mansion" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 309, + 467, + 401, + 559 + ], + "blocks": [ + { + "bbox": [ + 309, + 467, + 401, + 559 + ], + "lines": [ + { + "bbox": [ + 309, + 467, + 401, + 559 + ], + "spans": [ + { + "bbox": [ + 309, + 467, + 401, + 559 + ], + "type": "image", + "image_path": "249e63eae7a23719f8a4f252c80eec4c89a48d780de15cbc02f3159526b68def.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 561, + 389, + 570 + ], + "lines": [ + { + "bbox": [ + 320, + 561, + 389, + 570 + ], + "spans": [ + { + "bbox": [ + 320, + 561, + 389, + 570 + ], + "type": "text", + "content": "Flying m1 abrams tank" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 408, + 466, + 500, + 558 + ], + "blocks": [ + { + "bbox": [ + 408, + 466, + 500, + 558 + ], + "lines": [ + { + "bbox": [ + 408, + 466, + 500, + 558 + ], + "spans": [ + { + "bbox": [ + 408, + 466, + 500, + 558 + ], + "type": "image", + "image_path": "b2d7399d706c16ce4a6992c7c3e35cd4088022f3e7cfca2fe5576556d7093af4.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 409, + 561, + 498, + 570 + ], + "lines": [ + { + "bbox": [ + 409, + 561, + 498, + 570 + ], + "spans": [ + { + "bbox": [ + 409, + 561, + 498, + 570 + ], + "type": "text", + "content": "Baby pictures of grandparents" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 108, + 572, + 200, + 665 + ], + "blocks": [ + { + "bbox": [ + 108, + 572, + 200, + 665 + ], + "lines": [ + { + "bbox": [ + 108, + 572, + 200, + 665 + ], + "spans": [ + { + "bbox": [ + 108, + 572, + 200, + 665 + ], + "type": "image", + "image_path": "058412fcfa15ee2dd40dc066db22eb6b31b883b6ec2788731cd82589fa5c89eb.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 674, + 489, + 686 + ], + "lines": [ + { + "bbox": [ + 119, + 674, + 489, + 686 + ], + "spans": [ + { + "bbox": [ + 119, + 674, + 489, + 686 + ], + "type": "text", + "content": "Figure 13: Additional samples generated from challenging text inputs using the photo-realistic model" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 208, + 573, + 300, + 665 + ], + "blocks": [ + { + "bbox": [ + 208, + 573, + 300, + 665 + ], + "lines": [ + { + "bbox": [ + 208, + 573, + 300, + 665 + ], + "spans": [ + { + "bbox": [ + 208, + 573, + 300, + 665 + ], + "type": "image", + "image_path": "93e09def9ac4f2a57e4c56aa4f857983d1fabb0683731abd4fedcaa67ea8c3be.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 309, + 573, + 401, + 665 + ], + "blocks": [ + { + "bbox": [ + 309, + 573, + 401, + 665 + ], + "lines": [ + { + "bbox": [ + 309, + 573, + 401, + 665 + ], + "spans": [ + { + "bbox": [ + 309, + 573, + 401, + 665 + ], + "type": "image", + "image_path": "3547976b13202681db475437924519f652fd5025a0896c6ab9aa0c6532f58345.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 408, + 573, + 500, + 665 + ], + "blocks": [ + { + "bbox": [ + 408, + 573, + 500, + 665 + ], + "lines": [ + { + "bbox": [ + 408, + 573, + 500, + 665 + ], + "spans": [ + { + "bbox": [ + 408, + 573, + 500, + 665 + ], + "type": "image", + "image_path": "78d779cbb719d59e7599c8ad91084bec96405b640debd586bc4c3936aa813ef2.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 42 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 137, + 176, + 205, + 245 + ], + "blocks": [ + { + "bbox": [ + 136, + 154, + 204, + 170 + ], + "lines": [ + { + "bbox": [ + 136, + 154, + 204, + 170 + ], + "spans": [ + { + "bbox": [ + 136, + 154, + 204, + 170 + ], + "type": "text", + "content": "This tomato is favorite of many high-class chefs" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 137, + 176, + 205, + 245 + ], + "lines": [ + { + "bbox": [ + 137, + 176, + 205, + 245 + ], + "spans": [ + { + "bbox": [ + 137, + 176, + 205, + 245 + ], + "type": "image", + "image_path": "b65fb3509024ec0020f9a7bde56f02487931e37073720ecffc9f0942e2083f9b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 143, + 279, + 192, + 340 + ], + "blocks": [ + { + "bbox": [ + 138, + 255, + 201, + 263 + ], + "lines": [ + { + "bbox": [ + 138, + 255, + 201, + 263 + ], + "spans": [ + { + "bbox": [ + 138, + 255, + 201, + 263 + ], + "type": "text", + "content": "Squirrel wearing a shirt" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 143, + 279, + 192, + 340 + ], + "lines": [ + { + "bbox": [ + 143, + 279, + 192, + 340 + ], + "spans": [ + { + "bbox": [ + 143, + 279, + 192, + 340 + ], + "type": "image", + "image_path": "22bcf71182d87373683714929f6cde8a9645fe7b281e622216833b2284a080f4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 130, + 360, + 211, + 443 + ], + "blocks": [ + { + "bbox": [ + 145, + 350, + 195, + 359 + ], + "lines": [ + { + "bbox": [ + 145, + 350, + 195, + 359 + ], + "spans": [ + { + "bbox": [ + 145, + 350, + 195, + 359 + ], + "type": "text", + "content": "Singing eggplants" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 360, + 211, + 443 + ], + "lines": [ + { + "bbox": [ + 130, + 360, + 211, + 443 + ], + "spans": [ + { + "bbox": [ + 130, + 360, + 211, + 443 + ], + "type": "image", + "image_path": "1f467d3792f4ad73f0d569b9f664fa4c99496625567db6c3a16b321967525415.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 447, + 201, + 455 + ], + "lines": [ + { + "bbox": [ + 138, + 447, + 201, + 455 + ], + "spans": [ + { + "bbox": [ + 138, + 447, + 201, + 455 + ], + "type": "text", + "content": "A mushroom with a hat" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 137, + 460, + 204, + 540 + ], + "blocks": [ + { + "bbox": [ + 137, + 460, + 204, + 540 + ], + "lines": [ + { + "bbox": [ + 137, + 460, + 204, + 540 + ], + "spans": [ + { + "bbox": [ + 137, + 460, + 204, + 540 + ], + "type": "image", + "image_path": "af81b46e1300e22e9e14e666c500d606a75d00bcb22cb2fffc6f5a4440d67f1b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 138, + 555, + 204, + 636 + ], + "blocks": [ + { + "bbox": [ + 148, + 544, + 192, + 552 + ], + "lines": [ + { + "bbox": [ + 148, + 544, + 192, + 552 + ], + "spans": [ + { + "bbox": [ + 148, + 544, + 192, + 552 + ], + "type": "text", + "content": "Black mop head" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 138, + 555, + 204, + 636 + ], + "lines": [ + { + "bbox": [ + 138, + 555, + 204, + 636 + ], + "spans": [ + { + "bbox": [ + 138, + 555, + 204, + 636 + ], + "type": "image", + "image_path": "aab033387b0cf5230b9c6e7805bf11ae57f34c6f679ffd17fbc6e52d3aab4512.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 151, + 647, + 459, + 659 + ], + "lines": [ + { + "bbox": [ + 151, + 647, + 459, + 659 + ], + "spans": [ + { + "bbox": [ + 151, + 647, + 459, + 659 + ], + "type": "text", + "content": "Figure 14: A selection of stickers generated using the discrete kNN-Diffusion model." + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 219, + 175, + 302, + 247 + ], + "blocks": [ + { + "bbox": [ + 219, + 175, + 302, + 247 + ], + "lines": [ + { + "bbox": [ + 219, + 175, + 302, + 247 + ], + "spans": [ + { + "bbox": [ + 219, + 175, + 302, + 247 + ], + "type": "image", + "image_path": "949d063e94628bc7fe47c4c3ea2983a86e9ebbd3d8b80c778feff3aaed8fadad.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 251, + 299, + 266 + ], + "lines": [ + { + "bbox": [ + 222, + 251, + 299, + 266 + ], + "spans": [ + { + "bbox": [ + 222, + 251, + 299, + 266 + ], + "type": "text", + "content": "A panda bear carrying some grocery bags" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 231, + 273, + 282, + 343 + ], + "blocks": [ + { + "bbox": [ + 231, + 273, + 282, + 343 + ], + "lines": [ + { + "bbox": [ + 231, + 273, + 282, + 343 + ], + "spans": [ + { + "bbox": [ + 231, + 273, + 282, + 343 + ], + "type": "image", + "image_path": "cfa0b6ecc77ae6c5777ca20950d813bfa5043c232da4e09582a7d896363294e3.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 232, + 350, + 290, + 358 + ], + "lines": [ + { + "bbox": [ + 232, + 350, + 290, + 358 + ], + "spans": [ + { + "bbox": [ + 232, + 350, + 290, + 358 + ], + "type": "text", + "content": "Penguin drives a bus" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 220, + 366, + 301, + 440 + ], + "blocks": [ + { + "bbox": [ + 220, + 366, + 301, + 440 + ], + "lines": [ + { + "bbox": [ + 220, + 366, + 301, + 440 + ], + "spans": [ + { + "bbox": [ + 220, + 366, + 301, + 440 + ], + "type": "image", + "image_path": "9aab18425e79fb7d5e1dc08690250149f683ede1e3c77449de78f2402b32c395.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 238, + 457, + 282, + 539 + ], + "blocks": [ + { + "bbox": [ + 227, + 443, + 296, + 457 + ], + "lines": [ + { + "bbox": [ + 227, + 443, + 296, + 457 + ], + "spans": [ + { + "bbox": [ + 227, + 443, + 296, + 457 + ], + "type": "text", + "content": "Chihuahua pulling a royal coach" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 238, + 457, + 282, + 539 + ], + "lines": [ + { + "bbox": [ + 238, + 457, + 282, + 539 + ], + "spans": [ + { + "bbox": [ + 238, + 457, + 282, + 539 + ], + "type": "image", + "image_path": "2e0f6f53549eba190c6c484a5cbea19870cab05444de8063c31f14a7ca29103c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 227, + 544, + 294, + 552 + ], + "lines": [ + { + "bbox": [ + 227, + 544, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 227, + 544, + 294, + 552 + ], + "type": "text", + "content": "Elephant sitting on a lion" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 224, + 567, + 297, + 630 + ], + "blocks": [ + { + "bbox": [ + 224, + 567, + 297, + 630 + ], + "lines": [ + { + "bbox": [ + 224, + 567, + 297, + 630 + ], + "spans": [ + { + "bbox": [ + 224, + 567, + 297, + 630 + ], + "type": "image", + "image_path": "339ea1307b94f571c2a3eff4b26da0b612c80e5dc686895bbd1b8ec494660652.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 312, + 169, + 394, + 253 + ], + "blocks": [ + { + "bbox": [ + 233, + 158, + 288, + 166 + ], + "lines": [ + { + "bbox": [ + 233, + 158, + 288, + 166 + ], + "spans": [ + { + "bbox": [ + 233, + 158, + 288, + 166 + ], + "type": "text", + "content": "Bald-headed mimes" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 335, + 158, + 370, + 166 + ], + "lines": [ + { + "bbox": [ + 335, + 158, + 370, + 166 + ], + "spans": [ + { + "bbox": [ + 335, + 158, + 370, + 166 + ], + "type": "text", + "content": "Vomit candy" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 312, + 169, + 394, + 253 + ], + "lines": [ + { + "bbox": [ + 312, + 169, + 394, + 253 + ], + "spans": [ + { + "bbox": [ + 312, + 169, + 394, + 253 + ], + "type": "image", + "image_path": "1c6e61268142f026dab4aea1a03c4ee9b596ee0b72ea70df1333e91a0fb6ce18.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 255, + 373, + 263 + ], + "lines": [ + { + "bbox": [ + 331, + 255, + 373, + 263 + ], + "spans": [ + { + "bbox": [ + 331, + 255, + 373, + 263 + ], + "type": "text", + "content": "Invisible people" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 310, + 289, + 394, + 347 + ], + "blocks": [ + { + "bbox": [ + 310, + 273, + 386, + 282 + ], + "lines": [ + { + "bbox": [ + 310, + 273, + 386, + 282 + ], + "spans": [ + { + "bbox": [ + 310, + 273, + 386, + 282 + ], + "type": "text", + "content": "I J Skos . sbalele" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 289, + 394, + 347 + ], + "lines": [ + { + "bbox": [ + 310, + 289, + 394, + 347 + ], + "spans": [ + { + "bbox": [ + 310, + 289, + 394, + 347 + ], + "type": "image", + "image_path": "0a4ae2e738d729adb8f99baff5ccbd6b65fca462a7a50519b14a191e9f0f4140.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 321, + 360, + 383, + 443 + ], + "blocks": [ + { + "bbox": [ + 334, + 350, + 372, + 358 + ], + "lines": [ + { + "bbox": [ + 334, + 350, + 372, + 358 + ], + "spans": [ + { + "bbox": [ + 334, + 350, + 372, + 358 + ], + "type": "text", + "content": "Queen Esther" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 321, + 360, + 383, + 443 + ], + "lines": [ + { + "bbox": [ + 321, + 360, + 383, + 443 + ], + "spans": [ + { + "bbox": [ + 321, + 360, + 383, + 443 + ], + "type": "image", + "image_path": "d93bc777bd80fc4f9f179d2061dd61206227815e6c4fb979cbf41fdacd355269.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 446, + 390, + 455 + ], + "lines": [ + { + "bbox": [ + 315, + 446, + 390, + 455 + ], + "spans": [ + { + "bbox": [ + 315, + 446, + 390, + 455 + ], + "type": "text", + "content": "A laughing purple porcupine" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 315, + 471, + 386, + 530 + ], + "blocks": [ + { + "bbox": [ + 315, + 471, + 386, + 530 + ], + "lines": [ + { + "bbox": [ + 315, + 471, + 386, + 530 + ], + "spans": [ + { + "bbox": [ + 315, + 471, + 386, + 530 + ], + "type": "image", + "image_path": "2f28dd260535536d8f92c1ce5d7f50bdcea0b806a10fe89c32af00d2eb9b6009.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 326, + 563, + 384, + 632 + ], + "blocks": [ + { + "bbox": [ + 321, + 544, + 383, + 552 + ], + "lines": [ + { + "bbox": [ + 321, + 544, + 383, + 552 + ], + "spans": [ + { + "bbox": [ + 321, + 544, + 383, + 552 + ], + "type": "text", + "content": "Monkey eating a pickle" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 326, + 563, + 384, + 632 + ], + "lines": [ + { + "bbox": [ + 326, + 563, + 384, + 632 + ], + "spans": [ + { + "bbox": [ + 326, + 563, + 384, + 632 + ], + "type": "image", + "image_path": "7bce8138d3fa6f6f6b241e0c9e0138b6b369a541e84ca9547078167f600c17a5.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 413, + 178, + 468, + 245 + ], + "blocks": [ + { + "bbox": [ + 422, + 158, + 459, + 165 + ], + "lines": [ + { + "bbox": [ + 422, + 158, + 459, + 165 + ], + "spans": [ + { + "bbox": [ + 422, + 158, + 459, + 165 + ], + "type": "text", + "content": "Alien cartoon" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 413, + 178, + 468, + 245 + ], + "lines": [ + { + "bbox": [ + 413, + 178, + 468, + 245 + ], + "spans": [ + { + "bbox": [ + 413, + 178, + 468, + 245 + ], + "type": "image", + "image_path": "0be22e643cfb8bdaa363741c182b79b3b2ea2c47c10e3a6875a79f141e0be8d2.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 255, + 471, + 263 + ], + "lines": [ + { + "bbox": [ + 410, + 255, + 471, + 263 + ], + "spans": [ + { + "bbox": [ + 410, + 255, + 471, + 263 + ], + "type": "text", + "content": "A lion wearing a T-shirt" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 417, + 274, + 465, + 340 + ], + "blocks": [ + { + "bbox": [ + 417, + 274, + 465, + 340 + ], + "lines": [ + { + "bbox": [ + 417, + 274, + 465, + 340 + ], + "spans": [ + { + "bbox": [ + 417, + 274, + 465, + 340 + ], + "type": "image", + "image_path": "f161e048b5a4632054fbcff49ce75e52e6420ffffe061309b99119d2a13e5346.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 418, + 361, + 463, + 440 + ], + "blocks": [ + { + "bbox": [ + 406, + 350, + 475, + 358 + ], + "lines": [ + { + "bbox": [ + 406, + 350, + 475, + 358 + ], + "spans": [ + { + "bbox": [ + 406, + 350, + 475, + 358 + ], + "type": "text", + "content": "One legged striped rabbit" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 418, + 361, + 463, + 440 + ], + "lines": [ + { + "bbox": [ + 418, + 361, + 463, + 440 + ], + "spans": [ + { + "bbox": [ + 418, + 361, + 463, + 440 + ], + "type": "image", + "image_path": "44b85c6aff143613211f3522a9dd7084740c23a7a7d637c5ac4df602395b574d.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 447, + 471, + 455 + ], + "lines": [ + { + "bbox": [ + 410, + 447, + 471, + 455 + ], + "spans": [ + { + "bbox": [ + 410, + 447, + 471, + 455 + ], + "type": "text", + "content": "Dollar bill combing hair" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 399, + 468, + 480, + 533 + ], + "blocks": [ + { + "bbox": [ + 399, + 468, + 480, + 533 + ], + "lines": [ + { + "bbox": [ + 399, + 468, + 480, + 533 + ], + "spans": [ + { + "bbox": [ + 399, + 468, + 480, + 533 + ], + "type": "image", + "image_path": "edbbc96cbb7ace016f65e7c774a92426fc863ff1aaefc1475aadb5c7b099476c.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 544, + 471, + 552 + ], + "lines": [ + { + "bbox": [ + 410, + 544, + 471, + 552 + ], + "spans": [ + { + "bbox": [ + 410, + 544, + 471, + 552 + ], + "type": "text", + "content": "A sloth eating oatmeal" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 399, + 563, + 482, + 632 + ], + "blocks": [ + { + "bbox": [ + 399, + 563, + 482, + 632 + ], + "lines": [ + { + "bbox": [ + 399, + 563, + 482, + 632 + ], + "spans": [ + { + "bbox": [ + 399, + 563, + 482, + 632 + ], + "type": "image", + "image_path": "5be0ce636497b5303815e129e45479ac35d97418a6af2b5ef66d3a17f92a7290.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 43 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 194, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 194, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 194, + 94 + ], + "type": "text", + "content": "6.1 BACKGROUND" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "content": "Continuous diffusion process Diffusion models are latent variable models that aim to model a distribution " + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x_0)" + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "content": " that approximates the data distribution " + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "inline_equation", + "content": "q(x_0)" + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "content": ". Specifically, they model a forward process in the space of " + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "content": " from data to noise. Given a sample from the data distribution " + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "inline_equation", + "content": "x_0 \\sim q(x_0)" + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "content": ", this process produces a Markov chain of latent variables " + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "inline_equation", + "content": "x_1, \\ldots, x_T" + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "content": " by progressively adding Gaussian noise to the sample:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 219, + 163, + 504, + 177 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 163, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 219, + 163, + 504, + 177 + ], + "type": "interline_equation", + "content": "q \\left(x _ {t} \\mid x _ {t - 1}\\right) := \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t}} x _ {t - 1}, \\beta_ {t} \\mathcal {I}\\right) \\tag {1}", + "image_path": "a1182bd55aed6950e3a7c751b294f7aeeb6126f3ed048b07e2d010af7ab35c56.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "spans": [ + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "type": "inline_equation", + "content": "\\beta_{t}" + }, + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "type": "text", + "content": " is a variance schedule. As presented previously by (Ho et al., 2020), the latent variable " + }, + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "type": "text", + "content": " can be expressed directly as a linear combination of noise and " + }, + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 180, + 504, + 203 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 219, + 206, + 504, + 219 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 206, + 504, + 219 + ], + "spans": [ + { + "bbox": [ + 219, + 206, + 504, + 219 + ], + "type": "interline_equation", + "content": "x _ {t} = \\sqrt {\\bar {\\alpha} _ {t}} x _ {0} + \\epsilon \\sqrt {1 - \\bar {\\alpha} _ {t}}, \\quad \\epsilon \\sim \\mathcal {N} (0, \\mathcal {I}) \\tag {2}", + "image_path": "174ec6bc2bf8b4e881a9d7e553cef3c42ba03fd93e70f6b751cc42dee6d68858.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "inline_equation", + "content": "\\alpha_{t} := \\Pi_{i=1}^{t}(1 - \\beta_{i})" + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": ". In order to sample from the data distribution " + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "inline_equation", + "content": "q(x_0)" + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": ", we define the \"reverse process\" " + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "inline_equation", + "content": "p(x_{t-1}|x_t)" + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": " which samples first from " + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "inline_equation", + "content": "q(x_T)" + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": " and then samples reverse steps " + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "inline_equation", + "content": "q(x_{t-1}|x_t)" + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": " until " + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 222, + 504, + 257 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "text", + "content": "Since the data distribution is unknown, we need to train a model to approximate it. Note that when " + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "text", + "content": " is large enough, the noise vector " + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "inline_equation", + "content": "x_{T}" + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "text", + "content": " nearly follows an isotropic Gaussian distribution. This suggests learning a model " + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x_{t - 1}|x_t)" + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "text", + "content": " to predict mean " + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "inline_equation", + "content": "\\mu_{\\theta}" + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "text", + "content": " and covariance matrix " + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "inline_equation", + "content": "\\Sigma_{\\theta}" + }, + { + "bbox": [ + 104, + 261, + 505, + 295 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 211, + 299, + 504, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 299, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 211, + 299, + 504, + 312 + ], + "type": "interline_equation", + "content": "p _ {\\theta} \\left(x _ {t - 1} \\mid x _ {t}\\right) := \\mathcal {N} \\left(x _ {t - 1}; \\mu_ {\\theta} \\left(x _ {t}, t\\right), \\Sigma_ {\\theta} \\left(x _ {t}, t\\right)\\right) \\tag {3}", + "image_path": "bdf1efea9b9783ed1ce91931ebfbfdbdcd2f9eff367ece4613306935f5e7aad5.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": "To train this model, we can replace " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\mu_{\\theta}(x_t,t)" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": " by predicting the noise " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}(x_t,t)" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": " added to " + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 315, + 504, + 337 + ], + "type": "text", + "content": " using equation 2 and we get this objective function:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 201, + 340, + 504, + 355 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 340, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 201, + 340, + 504, + 355 + ], + "type": "interline_equation", + "content": "L := E _ {t \\sim [ 1, T ], x _ {0} \\sim q (x _ {0}), \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I})} [ \\| \\epsilon - \\epsilon_ {\\theta} (x _ {t}, t, y) \\| ^ {2} ] \\tag {4}", + "image_path": "43a9eb4d501c474740bdc5c328d87f2ac744baf2cc8a89b2bdc9e0ede84338fc.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 357, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 369 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 357, + 504, + 369 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 357, + 504, + 369 + ], + "type": "text", + "content": " is an optional conditioning signal (such as text/image embedding or a low resolution image)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "text", + "content": "Discrete diffusion process Let " + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "inline_equation", + "content": "x_{n}\\in \\{1,\\ldots ,V\\}^{h\\times w}" + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "text", + "content": " be the indices of the allocated codebook vectors extracted by a pre-trained VQGAN (Esser et al., 2021) encoder. The forward process of a diffusion model " + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "inline_equation", + "content": "q(x_{n}|x_{n - 1})" + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "text", + "content": " is a Markov chain that adds noise at each step. Moreover, the reverse process " + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "inline_equation", + "content": "q(x_{n - 1}|x_n,x_0)" + }, + { + "bbox": [ + 104, + 380, + 504, + 437 + ], + "type": "text", + "content": ", is a denoising process that removes noise from an initialized noise state. As presented by (Gu et al., 2021), the forward diffusion process is given by:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 233, + 450, + 504, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 450, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 233, + 450, + 504, + 464 + ], + "type": "interline_equation", + "content": "q \\left(x _ {n} \\mid x _ {n - 1}\\right) = v ^ {T} \\left(x _ {n}\\right) \\mathbf {Q} _ {n} v \\left(x _ {n - 1}\\right) \\tag {5}", + "image_path": "6306c1e98a256336441d7e695eecebe77a2f5e8ee40e1e9f02314d7e70cf2378.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "inline_equation", + "content": "v(x_{n})" + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "text", + "content": " is a one-hot vector with entry 1 at " + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "inline_equation", + "content": "x_{n}" + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_n" + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "text", + "content": " is the probability transition matrix from state " + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "inline_equation", + "content": "x_{n-1}" + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "inline_equation", + "content": "x_{n}" + }, + { + "bbox": [ + 104, + 466, + 504, + 488 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 494, + 338, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 494, + 338, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 338, + 506 + ], + "type": "text", + "content": "The reverse process is given by the posterior distribution:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 173, + 508, + 504, + 537 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 508, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 173, + 508, + 504, + 537 + ], + "type": "interline_equation", + "content": "q \\left(x _ {n - 1} \\mid x _ {n}, x _ {0}\\right) = \\frac {\\left(v ^ {T} \\left(x _ {n}\\right) \\mathbf {Q} _ {n} v \\left(x _ {n - 1}\\right)\\right) \\left(v ^ {T} \\left(x _ {n - 1}\\right) \\bar {\\mathbf {Q}} _ {n - 1} v \\left(x _ {0}\\right)\\right)}{v ^ {T} \\left(x _ {n}\\right) \\bar {\\mathbf {Q}} _ {n} v \\left(x _ {0}\\right)} \\tag {6}", + "image_path": "cb6269258a7313a6bcf638625243050e19613f0f97bb7d8b90e19e95df65e61e.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 540, + 204, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 204, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 204, + 553 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 540, + 204, + 553 + ], + "type": "inline_equation", + "content": "\\bar{\\mathbf{Q}}_n = \\mathbf{Q}_n\\cdot \\cdot \\cdot \\mathbf{Q}_1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 557, + 505, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 505, + 613 + ], + "type": "text", + "content": "Inspired from mask language modeling (Devlin et al., 2018), they proposes corrupting the tokens by stochastically masking some of them. Specifically, an additional special token [MASK] is proposed, so for each token there are " + }, + { + "bbox": [ + 104, + 557, + 505, + 613 + ], + "type": "inline_equation", + "content": "(\\mathrm{V} + 1)" + }, + { + "bbox": [ + 104, + 557, + 505, + 613 + ], + "type": "text", + "content": " discrete states. The transition matrix is formulated as, By adding a small amount of uniform noise to the categorical distribution, the transition matrix can be formulated as," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 201, + 627, + 504, + 690 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 627, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 201, + 627, + 504, + 690 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} _ {n} = \\left[ \\begin{array}{c c c c c} \\alpha_ {n} + \\beta_ {n} & \\beta_ {n} & \\beta_ {n} & \\dots & 0 \\\\ \\beta_ {n} & \\alpha_ {n} + \\beta_ {n} & \\beta_ {n} & \\dots & 0 \\\\ \\beta_ {n} & \\beta_ {n} & \\alpha_ {n} + \\beta_ {n} & \\dots & 0 \\\\ \\vdots & \\vdots & \\vdots & \\ddots & \\vdots \\\\ \\gamma_ {n} & \\gamma_ {n} & \\gamma_ {n} & \\dots & 1 \\end{array} \\right] \\tag {7}", + "image_path": "c9e5d3dc8c98da40fe988ea2bb88c28d8a32fffe482a6e7c0db9f0bd96eab7d8.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\alpha_{n}\\in [0,1],\\beta_{n} = (1 - \\alpha_{n} - \\gamma_{n}) / V" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\gamma_{n}" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " the probability of a token to be replaced with a [MASK] token. Each token has a probability of " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\gamma_{n}" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " to be replaced by the [MASK] token, " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "V\\beta_{n}" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " to be resampled uniformly and " + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\alpha_{n} = (1 - V\\beta_{n} - \\gamma_{n})" + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": " to be unchanged." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 231, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 231, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 231, + 94 + ], + "type": "text", + "content": "6.2 ADDITIONAL SAMPLES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 160 + ], + "type": "text", + "content": "In Fig. 16 and 15 we present a visual comparison of our discrete model, trained on the stickers dataset with (1) the kNN extracted during inference, (2) the same model without using kNN in inference. As can be seen, the images generated by our model are better aligned to the corresponding text compared to the baselines. While the baselines fail with challenging prompts, our model produces high-quality images that align with the text, and composes multiple concepts correctly." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 170, + 506, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 506, + 226 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 506, + 226 + ], + "type": "text", + "content": "COCO Validation Set Comparison Fig. 11 presents a qualitative comparison with FuseDream (Liu et al., 2021), CogView (Ding et al., 2021) and VQ-Diffusion (Gu et al., 2021) on the COCO validation set. Note that both CogView and VQ-Diffusion have been trained on an Image-Text paired dataset, whereas our model was not trained on the COCO dataset, nor used it in the retrieval model." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 231, + 504, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 504, + 244 + ], + "type": "text", + "content": "Additional samples generated from challenging text inputs are provided in Figs. 13, 14 and Fig. 12." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 111, + 247, + 504, + 415 + ], + "blocks": [ + { + "bbox": [ + 111, + 247, + 504, + 415 + ], + "lines": [ + { + "bbox": [ + 111, + 247, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 111, + 247, + 504, + 415 + ], + "type": "image", + "image_path": "277eed8ca882f0c8f04faf312ffa99177e145e6f3061c130d08e57099f5ddbc5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 111, + 462, + 504, + 659 + ], + "blocks": [ + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "content": "Figure 15: Comparison of our model, trained on PMD with (1) kNN extracted in inference, (2) the same model without using kNN in inference. While the kNN lack information regarding text semantics, our model considers both text semantics and the kNN, thus proving the advantage of using both the text and the kNN embeddings." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 111, + 462, + 504, + 659 + ], + "lines": [ + { + "bbox": [ + 111, + 462, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 111, + 462, + 504, + 659 + ], + "type": "image", + "image_path": "ba3a5906a0e0a6b9949bcb6ffdaf71182a42f6f3afe81f7fe4b7b5faaa88c183.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 667, + 504, + 689 + ], + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 689 + ], + "type": "text", + "content": "Figure 16: Qualitative comparison of stickers generated using the discrete kNN-Diffusion model, 10 Nearest Neighbors to the text in the CLIP embedding and a discrete model that does not use kNN." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 273, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 273, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 273, + 94 + ], + "type": "text", + "content": "6.3 HUMAN EVALUATION PROTOCOL" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 504, + 159 + ], + "type": "text", + "content": "For all of our human evaluation experiments, we used Amazon Mechanical Turk. For each experiment, we used 600 samples, each scored by five different people. The preferred sample was determined according to majority opinion. For each baseline comparison, we asked two questions (in different experiments): \"Which image is of a higher quality?\" and \"Which image best matches the text?\"" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 171, + 177, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 177, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 177, + 182 + ], + "type": "text", + "content": "6.4 DATASETS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 192, + 504, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 504, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 504, + 260 + ], + "type": "text", + "content": "The modified PMD dataset is composed of the following set of publicly available text-image datasets: SBU Captions (Ordonez et al., 2011), Localized Narratives (Pont-Tuset et al., 2020), Conceptual Captions (Sharma et al., 2018), Visual Genome (Krishna et al., 2016), Wikipedia Image Text (Srinivasan et al., 2021), Conceptual Captions 12M (Changpinyo et al., 2021), Red Caps (Desai et al., 2021), and a filtered version of YFCC100M (Thomee et al., 2015). In total, the dataset contains 69 million text-image pairs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 272, + 209, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 272, + 209, + 283 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 209, + 283 + ], + "type": "text", + "content": "6.5 ABLATION STUDY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": "Index size As one can expect, increasing the index size at inference time improves performance. To demonstrate this hypothesis, we evaluated our model with an index containing " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": " images of PMD dataset, and obtained FID scores of 13.92, 13.85, 13.72, and 13.65 respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 331, + 505, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 505, + 399 + ], + "type": "text", + "content": "kNN conditioning We examined several different approaches to kNN input conditioning: (i) forwarding the kNN embeddings and the single image embedding through a self-attention layer before feeding the contextualized " + }, + { + "bbox": [ + 104, + 331, + 505, + 399 + ], + "type": "inline_equation", + "content": "K + 1" + }, + { + "bbox": [ + 104, + 331, + 505, + 399 + ], + "type": "text", + "content": " embeddings to the model, (ii) feeding the model with one embedding, computed using cross-attention between the image embedding and the kNN embeddings, and, (iii) feeding the model with the image embedding concatenated with a learned linear projection of the kNN embeddings. These variants received FID scores of 18.3, 22.4, 34.1 respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 411, + 217, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 217, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 217, + 423 + ], + "type": "text", + "content": "6.6 RETRIEVAL MODEL" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 431, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 431, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 431, + 504, + 498 + ], + "type": "text", + "content": "The retrieval model is implemented using FAISS (Johnson et al., 2019). FAISS is an efficient database, capable of storing billions of elements and finding their nearest neighbors in milliseconds. In the pre-processing phase, for each image in the dataset, we store the image index and its corresponding CLIP image embedding. During training, given a training image, we extract its CLIP image embedding and search for its 10 (see Fig. 9) nearest neighbors in the dataset based on the cosine similarity distance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "type": "text", + "content": "For an efficient search during training and inference, we use a non-exhaustive search: For this, we use an inverted file index. As in Babenko & Lempitsky (2014), we define Voronoi cells in the " + }, + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "type": "text", + "content": "-dimensional space (where " + }, + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "type": "inline_equation", + "content": "d = 512" + }, + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "type": "text", + "content": " is the CLIP embedding dimensional space), s.t each database vector falls in one of the cells. During search time, only the embeddings contained in the cell the query falls in and a few neighboring ones are compared against the query vector. In addition, to fit the index of our large-scale datasets on a 128GB RAM server, we compress the code size from " + }, + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "type": "inline_equation", + "content": "512 \\times 32/8 = 2048" + }, + { + "bbox": [ + 104, + 503, + 505, + 602 + ], + "type": "text", + "content": " Bytes to 256 Bytes using optimized product quantization (Ge et al., 2013; Jegou et al., 2010). In Algorithm 1 we include pseudocode of the core of the implementation of the retrieval database." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 616, + 234, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 234, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 234, + 628 + ], + "type": "text", + "content": "6.7 DISCRETE KNN MODEL" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 636, + 504, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 636, + 504, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 636, + 504, + 659 + ], + "type": "text", + "content": "We provide additional implementation details for the discrete diffusion model. Additional training details can be found in Tab. 3." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 671, + 504, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 671, + 504, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 671, + 504, + 706 + ], + "type": "text", + "content": "Vector Quantization For token quantization, we use VQ-VAE and adapt the publicly available VQGAN(Esser et al., 2021) model, trained on the OpenImages(Krasin et al., 2016) dataset. The encoder downsamples images to " + }, + { + "bbox": [ + 104, + 671, + 504, + 706 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 104, + 671, + 504, + 706 + ], + "type": "text", + "content": " tokens and uses a codebook vocabulary with 2887 elements." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": "Image Tokenization In our discrete generative model we model images as a sequence of discrete tokens. To this end, we utilize a vector-quantized variational auto-encoder (VQ-VAE) (Van Den Oord et al., 2017) as image tokenizer. VQ-VAE consists of three components: (i) an encoder, (ii) a learned codebook, and, (iii) a decoder. Given an image, the encoder extracts a latent representation. The codebook then maps each latent vector representation to its nearest vector in the codebook. Finally, the decoder reconstructs the image from the codebook representation. VQ-VAE is trained with the objectives of reconstruction and codebook learning. VQ-GAN (Esser et al., 2021) adds an adversarial loss term that tries to determine whether the generated image is fake or real. This added term was shown to improve reconstruction quality." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 193, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 504, + 239 + ], + "type": "text", + "content": "Transformer We follow Gu et al. (2021) and train a decoder-only Transformer. The decoder module contains 24 transformer blocks, each containing full attention, cross-attention for the concatenated conditioner, and a feed-forward network. The timestamp " + }, + { + "bbox": [ + 104, + 193, + 504, + 239 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 193, + 504, + 239 + ], + "type": "text", + "content": " is injected using Adaptive Layer Normalization (Ba et al., 2016). The decoder contains 400 million parameters." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "text", + "content": "Classifier-free guidance We sample our diffusion models using classifier-free guidance (CFG) (Ho & Salimans, 2021; Nichol et al., 2021; Ramesh et al., 2022). CFG is performed by extrapolating an unconditional sample in the direction of a conditional sample. To support unconditional sampling, previous work had to fine-tune (Nichol et al., 2021) their models with " + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "text", + "content": " of the conditional features nullified. This enabled them to sample unconditional images from the model using the null condition, " + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "inline_equation", + "content": "y' = \\vec{0}" + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "text", + "content": ", the null vector. We found that we can generate unconditional samples from our model using null conditioning without fine-tuning it. We hypothesize that by conditioning the model on a null vector, the cross-attention component is also nullified, resulting in no contribution to the diffusion process. During inference, in each step of the diffusion process we generate two images: conditional image logits, " + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x_{n-1}|x_n,y)" + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "text", + "content": ", conditioned on the desired multi-modal embedding " + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "text", + "content": ", and the unconditional image logits, " + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x_{n-1}|x_n,y')" + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "text", + "content": ", conditioned on the null embedding. Then, the final image for a diffusion step " + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 250, + 506, + 386 + ], + "type": "text", + "content": " is sampled from" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 188, + 390, + 419, + 419 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 390, + 419, + 419 + ], + "spans": [ + { + "bbox": [ + 188, + 390, + 419, + 419 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y\\right) = p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y ^ {\\prime}\\right) + \\\\ \\lambda \\left(p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y\\right) - p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, y ^ {\\prime}\\right)\\right) \\\\ \\end{array}", + "image_path": "2032b79a75ac70359d072667353cfc006b0e82438b4756eb1f99031a90309995.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "type": "text", + "content": " is a scale coefficient. In all of our experiments, we set " + }, + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "type": "inline_equation", + "content": "\\lambda = 8" + }, + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "type": "text", + "content": ", which was found to yield the highest FID scores on the validation set. Note that the above extrapolation occurs directly on the logits output by " + }, + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "type": "inline_equation", + "content": "p_{\\theta}" + }, + { + "bbox": [ + 104, + 424, + 504, + 459 + ], + "type": "text", + "content": ", in contrast to GLIDE (Nichol et al., 2021), which extrapolates the pixel values." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 469, + 504, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 493 + ], + "type": "text", + "content": "Training Objective For completeness we are adding the training objective of the discrete model. The network is trained to minimize the variational lower bound (VLB):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 162, + 498, + 504, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 498, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 162, + 498, + 504, + 552 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {v l b}} = \\mathcal {L} _ {0} + \\mathcal {L} _ {1} + \\dots + \\mathcal {L} _ {N - 1} + \\mathcal {L} _ {N}, \\\\ \\mathcal {L} _ {0} = - \\log p _ {\\theta} \\left(x _ {0} \\mid x _ {1}, f _ {i m g} (I), \\operatorname {k n n} _ {i m g} (\\mathrm {I}, k)\\right), \\\\ \\mathcal {L} _ {n - 1} = D _ {K L} \\left(q \\left(x _ {n - 1} \\mid x _ {n}, x _ {0}\\right) | | p _ {\\theta} \\left(x _ {n - 1} \\mid x _ {n}, f _ {i m g} (I), \\operatorname {k n n} _ {i m g} (\\mathrm {I}, k)\\right)\\right), \\tag {8} \\\\ \\mathcal {L} _ {N} = D _ {K L} \\left(q \\left(x _ {N} \\mid x _ {0}\\right) | | p \\left(x _ {N}\\right)\\right) \\\\ \\end{array}", + "image_path": "851c1904df31e24480b0313c64210ac3dd16c72acb3794dc55004546c9dc6b5a.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "text", + "content": "Where " + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "inline_equation", + "content": "p(\\pmb{x}_N)" + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "text", + "content": " is the prior distribution of timestep " + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "inline_equation", + "content": "N = 100" + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "inline_equation", + "content": "f_{img}(I)" + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "text", + "content": " is the CLIP image embedding, " + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "inline_equation", + "content": "\\mathrm{knn}_{img}(\\mathbf{I}, k)" + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 558, + 504, + 593 + ], + "type": "text", + "content": " nearest neighbors in the feature space of the image embedding. The full details can be found in Gu et al. (2021)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 605, + 248, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 248, + 616 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 248, + 616 + ], + "type": "text", + "content": "6.8 CONTINUOUS KNN MODEL" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "text", + "content": "We provide additional implementation details for the continuous diffusion model. Additional training details can be found in Tab. 3." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 661, + 506, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 661, + 506, + 728 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 506, + 728 + ], + "type": "text", + "content": "Decoder. We followed (Nichol et al., 2021; Ho et al., 2020; Ramesh et al., 2022) and re-implemented a diffusion " + }, + { + "bbox": [ + 104, + 661, + 506, + 728 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 661, + 506, + 728 + ], + "type": "text", + "content": "-net model. Specifically, we modify the architecture described in (Ramesh et al., 2022) by allowing multiple CLIP embeddings as the condition to the model. Since we do not have a paired text-image dataset, we removed the text transformer, and thus the text embedding. In particular, we use 512 convolution channels, 3 residual blocks, 64 heads channels and attention resolution of 32, 16 and 8. Similarly to our discrete model, we trained two models (1)" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 207, + 89, + 296, + 186 + ], + "blocks": [ + { + "bbox": [ + 207, + 89, + 296, + 186 + ], + "lines": [ + { + "bbox": [ + 207, + 89, + 296, + 186 + ], + "spans": [ + { + "bbox": [ + 207, + 89, + 296, + 186 + ], + "type": "image", + "image_path": "28755241c65e0e8da754a4827c3a420acf9c3c1a30d4e4d6dd8a1ada740b7f03.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 188, + 274, + 199 + ], + "lines": [ + { + "bbox": [ + 229, + 188, + 274, + 199 + ], + "spans": [ + { + "bbox": [ + 229, + 188, + 274, + 199 + ], + "type": "text", + "content": "(a) Training" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 314, + 89, + 404, + 186 + ], + "blocks": [ + { + "bbox": [ + 314, + 89, + 404, + 186 + ], + "lines": [ + { + "bbox": [ + 314, + 89, + 404, + 186 + ], + "spans": [ + { + "bbox": [ + 314, + 89, + 404, + 186 + ], + "type": "image", + "image_path": "457362ea822a3328b76c5dadce48dfd63973fb77bd0f84acd1b58b9060785427.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 334, + 188, + 383, + 198 + ], + "lines": [ + { + "bbox": [ + 334, + 188, + 383, + 198 + ], + "spans": [ + { + "bbox": [ + 334, + 188, + 383, + 198 + ], + "type": "text", + "content": "(b) Inference" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 205, + 504, + 247 + ], + "lines": [ + { + "bbox": [ + 104, + 205, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 504, + 247 + ], + "type": "text", + "content": "Figure 17: During training, only the image I is given (red), whereas during inference only the text " + }, + { + "bbox": [ + 104, + 205, + 504, + 247 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 205, + 504, + 247 + ], + "type": "text", + "content": " is given (blue). In order to bridge the gap between the two distributions during training, we leverage the K nearest neighbors that should have a large enough distribution (dashed cone) to cover the potential text embedding (i.e. " + }, + { + "bbox": [ + 104, + 205, + 504, + 247 + ], + "type": "inline_equation", + "content": "\\cos(b) < \\cos(a)" + }, + { + "bbox": [ + 104, + 205, + 504, + 247 + ], + "type": "text", + "content": "). During inference, the opposite is applied." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "text", + "content": "a no-kNN conditioned only on CLIP image embedding during training, (2) a kNN conditioned on CLIP image embedding and its kNN. Finally, we enable classifier-free guidance by randomly setting the CLIP embeddings to zero " + }, + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "text", + "content": " of the time. As demonstrated in Tab. 2, we find that humans prefer our model over no-kNN " + }, + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "inline_equation", + "content": "66.8\\%" + }, + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "text", + "content": " of the time for image quality and " + }, + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "inline_equation", + "content": "69.4\\%" + }, + { + "bbox": [ + 104, + 255, + 504, + 312 + ], + "type": "text", + "content": " of the time for text alignment." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "text", + "content": "Super-Resolution. As the decoder generates images with " + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "text", + "content": " resolution, we up-sampled the images to " + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "text", + "content": " using the open-source super resolution of (Nichol et al., 2021). To further up-sample the images to " + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 104, + 323, + 504, + 369 + ], + "type": "text", + "content": " we used the open-source super resolution provided by (Wang et al., 2021)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 380, + 504, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 413 + ], + "type": "text", + "content": "Training Objectives For completeness we are adding the training objective of our continuous model. Following Ho et al. (2020); Nichol et al. (2021) we are using mean-squared error loss to predict the noise:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 195, + 412, + 413, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 412, + 413, + 426 + ], + "spans": [ + { + "bbox": [ + 195, + 412, + 413, + 426 + ], + "type": "interline_equation", + "content": "L := E _ {n \\sim [ 1, N ], x _ {0} \\sim q (x _ {0}), \\epsilon \\sim \\mathcal {N} (0, \\mathbf {I})} [ | | \\epsilon - \\epsilon_ {\\theta} (x _ {n}, n, y) | | ^ {2} ]", + "image_path": "4bd500ed2a5f3510bc18188446ef6fbe8a64e9361ed944f826de4eb30dba1116.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 428, + 372, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 428, + 372, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 372, + 442 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 428, + 372, + 442 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 105, + 428, + 372, + 442 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 105, + 428, + 372, + 442 + ], + "type": "inline_equation", + "content": "U - net" + }, + { + "bbox": [ + 105, + 428, + 372, + 442 + ], + "type": "text", + "content": " model and " + }, + { + "bbox": [ + 105, + 428, + 372, + 442 + ], + "type": "inline_equation", + "content": "y = (f_{img}(x_0),\\mathrm{knn}_{img}(x_0,k))" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 189, + 447, + 421, + 640 + ], + "blocks": [ + { + "bbox": [ + 189, + 447, + 421, + 640 + ], + "lines": [ + { + "bbox": [ + 189, + 447, + 421, + 640 + ], + "spans": [ + { + "bbox": [ + 189, + 447, + 421, + 640 + ], + "type": "table", + "html": "
DiscreteContinuous
Number of nearest neighbors1010
Diffusion steps1001000
Noise schedule-cosine
Sampling steps100250
Model size400M1B
Sampling variance method-analytic
Dropout-0.1
Weight decay4.5e-2-
Batch size5121600
Iterations150K500K
Learning rate4.05-41.4e-4
optimizerAdamWAdamW
Adam β20.960.9999
Adam ε1.0e-81.0e-8
EMA decay0.990.9999
warmup500025000
# GPUs128 A100200 A100
", + "image_path": "5fff3164548c444e932c6588b815dd9522fe373b4be8b35c453e837eef6ca91d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 232, + 644, + 377, + 655 + ], + "lines": [ + { + "bbox": [ + 232, + 644, + 377, + 655 + ], + "spans": [ + { + "bbox": [ + 232, + 644, + 377, + 655 + ], + "type": "text", + "content": "Table 3: Training details of our models" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 105, + 228, + 504, + 617 + ], + "blocks": [ + { + "bbox": [ + 105, + 194, + 504, + 218 + ], + "lines": [ + { + "bbox": [ + 105, + 194, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 504, + 218 + ], + "type": "text", + "content": "Algorithm 1 Pseudo-code implementation for the construction of the retrieval model, training and sampling using conditioning kNN." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "lines": [ + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": "Retrieval model construction \ndef training(batch:train image dataset): 1 //inverted index of 50k centroids, 2 //with optimized product quantization to 256B index_cfg " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " \"OPQ256_IVF50000_PQ256x8\" 4 index " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " faiss.indexFACTORY(d,idx_cfg,faiss.METRIC INNER_PRODUCT) 5 ivf " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " faiss.extract_index_ivf(index) 6 clustering_index " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " faiss.index_cpu_to_all_gpus(faiss.IndexFlatIP(d))7 ivf.clusterbing_index " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " clustering_index 8 train_dataset " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " [] for image in random.sample(batch,1000000): 10 train_dataset.append(CLIP_image_embedding(image)) 11 index.train(train_dataset) 12 for image in dataset: index.add(CLIP_imageEncoder(image)) 14 return index Training \ndef training(I:FAISS index, image, k:Number of NN, t:timestamp [0,T-1]): image_encoding " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " CLIP_imageEncoder(image) 2 kNN " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " I.search(image_encoding,k) condition " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " concatenate([image_encoding,kNN]) 4 image_T " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " add_noise(image,t) 5 image_0 " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " diffusion_model(image_T,t,condition) loss " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " criterion(imageO, image) 7 return loss 9 Sampling \ndef sampling(I:FAISS index,text,k:Number of NN): 1 text_encoding " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " CLIP_textEncoder(text) 2 kNN " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " I.search(text_encoding,k) condition " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " concatenate([text_encoding,kNN]) 4 image " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " sample_noise(T) for t in [T-1,T-2,...,0]: image " + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 105, + 228, + 504, + 617 + ], + "type": "text", + "content": " diffusion_model(image,t,condition) return image 8" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "csv" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 282, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 282, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 282, + 94 + ], + "type": "text", + "content": "6.9 TEXT-ONLY IMAGE MANIPULATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 138 + ], + "type": "text", + "content": "Our approach is illustrated in Fig. 18. Additional manipulation examples are provided in Figs. 20. The full comparison with the baselines is provided in Fig. 21 and 22. We also provide in Fig. 19 several examples for the process of the manipulated images construction." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 152, + 505, + 369 + ], + "blocks": [ + { + "bbox": [ + 106, + 152, + 505, + 369 + ], + "lines": [ + { + "bbox": [ + 106, + 152, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 505, + 369 + ], + "type": "image", + "image_path": "fea036c0c8e5389608ac52d49721467649003b1a0a35755855ba57e202c713a4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 380, + 504, + 441 + ], + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 441 + ], + "type": "text", + "content": "Figure 18: An illustration of our manipulation approach. During training: Given a training image (1), the model extracts its first nearest neighbor (2). Next, a random local area in the training image is selected (3), and the manipulated image is constructed by replacing the area with the corresponding nearest neighbor (4). The model then receives as input the manipulated image and the clip embedding of the local area that needs to be restored (5). During inference: Given an input image and a text query \"A face of a male child\", the model receives as input the image (4) and the clip embedding of the modifying text (5)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 260, + 508, + 491 + ], + "blocks": [ + { + "bbox": [ + 106, + 260, + 508, + 491 + ], + "lines": [ + { + "bbox": [ + 106, + 260, + 508, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 260, + 508, + 491 + ], + "type": "image", + "image_path": "7794468ff681a8636985d6cb84fc05bddf2664f668a9bde3664e76197117ba7a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 501, + 506, + 552 + ], + "lines": [ + { + "bbox": [ + 104, + 501, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 506, + 552 + ], + "type": "text", + "content": "Figure 19: Illustration of the manipulated image construction process during training. Given an original image, we select a random local area, and extract the first nearest neighbor (1-NN). Using ECC alignment, we align the nearest neighbor with the original image and replace the random local area with its corresponding nearest neighbor local area. The model then receives as input the manipulated image, together with the CLIP embedding of the local area, and tries to predict the original image." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 191, + 145, + 253 + ], + "blocks": [ + { + "bbox": [ + 110, + 191, + 145, + 253 + ], + "lines": [ + { + "bbox": [ + 110, + 191, + 145, + 253 + ], + "spans": [ + { + "bbox": [ + 110, + 191, + 145, + 253 + ], + "type": "image", + "image_path": "64a5fed58572e933e1150d81175adf677378bf0bcf8c2b8208d133c439fb9bba.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 116, + 254, + 138, + 260 + ], + "lines": [ + { + "bbox": [ + 116, + 254, + 138, + 260 + ], + "spans": [ + { + "bbox": [ + 116, + 254, + 138, + 260 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 176, + 193, + 216, + 252 + ], + "blocks": [ + { + "bbox": [ + 176, + 193, + 216, + 252 + ], + "lines": [ + { + "bbox": [ + 176, + 193, + 216, + 252 + ], + "spans": [ + { + "bbox": [ + 176, + 193, + 216, + 252 + ], + "type": "image", + "image_path": "e57a90ca121854cb31db0c943a4187b5d80ce956d8c7666149a472f68afb6666.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 176, + 254, + 218, + 260 + ], + "lines": [ + { + "bbox": [ + 176, + 254, + 218, + 260 + ], + "spans": [ + { + "bbox": [ + 176, + 254, + 218, + 260 + ], + "type": "text", + "content": "Raising left hand" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 219, + 193, + 265, + 253 + ], + "blocks": [ + { + "bbox": [ + 219, + 193, + 265, + 253 + ], + "lines": [ + { + "bbox": [ + 219, + 193, + 265, + 253 + ], + "spans": [ + { + "bbox": [ + 219, + 193, + 265, + 253 + ], + "type": "image", + "image_path": "26dd6911e16ef1181ae7585e4a7d16bad41cf406d412b5c6f77b269896702d26.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 254, + 264, + 260 + ], + "lines": [ + { + "bbox": [ + 229, + 254, + 264, + 260 + ], + "spans": [ + { + "bbox": [ + 229, + 254, + 264, + 260 + ], + "type": "text", + "content": "Raising hands" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 275, + 193, + 304, + 252 + ], + "blocks": [ + { + "bbox": [ + 275, + 193, + 304, + 252 + ], + "lines": [ + { + "bbox": [ + 275, + 193, + 304, + 252 + ], + "spans": [ + { + "bbox": [ + 275, + 193, + 304, + 252 + ], + "type": "image", + "image_path": "2b420541183788d11f05a6dd90a27676dc433e5a8ae13ac62205ad1f6ab272bb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 279, + 254, + 306, + 260 + ], + "lines": [ + { + "bbox": [ + 279, + 254, + 306, + 260 + ], + "spans": [ + { + "bbox": [ + 279, + 254, + 306, + 260 + ], + "type": "text", + "content": "Blue pants" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 320, + 193, + 347, + 252 + ], + "blocks": [ + { + "bbox": [ + 320, + 193, + 347, + 252 + ], + "lines": [ + { + "bbox": [ + 320, + 193, + 347, + 252 + ], + "spans": [ + { + "bbox": [ + 320, + 193, + 347, + 252 + ], + "type": "image", + "image_path": "7b03a21491eb59ae39c6b88af308ccde062f214bbf50ac3e8838ba9cf165e0ac.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 254, + 353, + 260 + ], + "lines": [ + { + "bbox": [ + 324, + 254, + 353, + 260 + ], + "spans": [ + { + "bbox": [ + 324, + 254, + 353, + 260 + ], + "type": "text", + "content": "Black shirt" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 361, + 193, + 392, + 252 + ], + "blocks": [ + { + "bbox": [ + 361, + 193, + 392, + 252 + ], + "lines": [ + { + "bbox": [ + 361, + 193, + 392, + 252 + ], + "spans": [ + { + "bbox": [ + 361, + 193, + 392, + 252 + ], + "type": "image", + "image_path": "6f17dfadca4040e3ae828e936f4f65e868855ec998f18f5759654998702905cc.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 254, + 394, + 260 + ], + "lines": [ + { + "bbox": [ + 361, + 254, + 394, + 260 + ], + "spans": [ + { + "bbox": [ + 361, + 254, + 394, + 260 + ], + "type": "text", + "content": "Holds a heart" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 408, + 193, + 436, + 252 + ], + "blocks": [ + { + "bbox": [ + 408, + 193, + 436, + 252 + ], + "lines": [ + { + "bbox": [ + 408, + 193, + 436, + 252 + ], + "spans": [ + { + "bbox": [ + 408, + 193, + 436, + 252 + ], + "type": "image", + "image_path": "d635cdd4e77493bacbd9cd0b94c4f8abcece23f662a43aeb3c2b7170735a1fcc.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 414, + 254, + 436, + 260 + ], + "lines": [ + { + "bbox": [ + 414, + 254, + 436, + 260 + ], + "spans": [ + { + "bbox": [ + 414, + 254, + 436, + 260 + ], + "type": "text", + "content": "Princess" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 457, + 193, + 492, + 252 + ], + "blocks": [ + { + "bbox": [ + 457, + 193, + 492, + 252 + ], + "lines": [ + { + "bbox": [ + 457, + 193, + 492, + 252 + ], + "spans": [ + { + "bbox": [ + 457, + 193, + 492, + 252 + ], + "type": "image", + "image_path": "eb8bcca698bf235668fac7b6d2090d32ead07adf165c7601b98c576956e8b2d6.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 468, + 254, + 486, + 260 + ], + "lines": [ + { + "bbox": [ + 468, + 254, + 486, + 260 + ], + "spans": [ + { + "bbox": [ + 468, + 254, + 486, + 260 + ], + "type": "text", + "content": "Sitting" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 111, + 266, + 150, + 306 + ], + "blocks": [ + { + "bbox": [ + 111, + 266, + 150, + 306 + ], + "lines": [ + { + "bbox": [ + 111, + 266, + 150, + 306 + ], + "spans": [ + { + "bbox": [ + 111, + 266, + 150, + 306 + ], + "type": "image", + "image_path": "2ba6fa05045477cf601aedf5449da7f19fa33ee5d3ae03726fe1f5a06587b48c.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 113, + 311, + 150, + 357 + ], + "blocks": [ + { + "bbox": [ + 113, + 311, + 150, + 357 + ], + "lines": [ + { + "bbox": [ + 113, + 311, + 150, + 357 + ], + "spans": [ + { + "bbox": [ + 113, + 311, + 150, + 357 + ], + "type": "image", + "image_path": "e04d91ae0078e3f13b04545dbd231fa9f18cad6ba2b68457a7651ac7a87d9c45.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 110, + 362, + 149, + 404 + ], + "blocks": [ + { + "bbox": [ + 110, + 362, + 149, + 404 + ], + "lines": [ + { + "bbox": [ + 110, + 362, + 149, + 404 + ], + "spans": [ + { + "bbox": [ + 110, + 362, + 149, + 404 + ], + "type": "image", + "image_path": "a808aed1c4541648fb3051088b24badca32837e34912c7130ece8d1e15959fec.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 116, + 406, + 138, + 414 + ], + "lines": [ + { + "bbox": [ + 116, + 406, + 138, + 414 + ], + "spans": [ + { + "bbox": [ + 116, + 406, + 138, + 414 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 178, + 269, + 217, + 308 + ], + "blocks": [ + { + "bbox": [ + 178, + 269, + 217, + 308 + ], + "lines": [ + { + "bbox": [ + 178, + 269, + 217, + 308 + ], + "spans": [ + { + "bbox": [ + 178, + 269, + 217, + 308 + ], + "type": "image", + "image_path": "51078dc7dc29b2d6fdeac341ddf5024c07b6f7122090b9e727a58e35c6e40d59.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 178, + 312, + 214, + 357 + ], + "blocks": [ + { + "bbox": [ + 178, + 312, + 214, + 357 + ], + "lines": [ + { + "bbox": [ + 178, + 312, + 214, + 357 + ], + "spans": [ + { + "bbox": [ + 178, + 312, + 214, + 357 + ], + "type": "image", + "image_path": "c4f07c90e6b9dfa4b319f10c60895cffc314e52bcbbfc0df21c1902a8ed03e18.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 176, + 363, + 215, + 404 + ], + "blocks": [ + { + "bbox": [ + 176, + 363, + 215, + 404 + ], + "lines": [ + { + "bbox": [ + 176, + 363, + 215, + 404 + ], + "spans": [ + { + "bbox": [ + 176, + 363, + 215, + 404 + ], + "type": "image", + "image_path": "53fadd6a3913e68819b93b806fd4aafeebcd3c0b345de3d315d8de372936b6df.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 406, + 208, + 413 + ], + "lines": [ + { + "bbox": [ + 187, + 406, + 208, + 413 + ], + "spans": [ + { + "bbox": [ + 187, + 406, + 208, + 413 + ], + "type": "text", + "content": "Smiling" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 222, + 269, + 263, + 308 + ], + "blocks": [ + { + "bbox": [ + 222, + 269, + 263, + 308 + ], + "lines": [ + { + "bbox": [ + 222, + 269, + 263, + 308 + ], + "spans": [ + { + "bbox": [ + 222, + 269, + 263, + 308 + ], + "type": "image", + "image_path": "c01d5cdc11749aceb600474b790b227f0725639ed45e6cc3017b74e047d7f3a2.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 221, + 312, + 258, + 357 + ], + "blocks": [ + { + "bbox": [ + 221, + 312, + 258, + 357 + ], + "lines": [ + { + "bbox": [ + 221, + 312, + 258, + 357 + ], + "spans": [ + { + "bbox": [ + 221, + 312, + 258, + 357 + ], + "type": "image", + "image_path": "228c3853d508570273ac7e44aa73a0a7931b1c3e53d6b7c8642a5698f4437fd5.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 222, + 363, + 261, + 404 + ], + "blocks": [ + { + "bbox": [ + 222, + 363, + 261, + 404 + ], + "lines": [ + { + "bbox": [ + 222, + 363, + 261, + 404 + ], + "spans": [ + { + "bbox": [ + 222, + 363, + 261, + 404 + ], + "type": "image", + "image_path": "de7e123758ffaa3c53c949f86ab501000b510c5a29ec9baa367ead431dbc1ace.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 235, + 406, + 251, + 413 + ], + "lines": [ + { + "bbox": [ + 235, + 406, + 251, + 413 + ], + "spans": [ + { + "bbox": [ + 235, + 406, + 251, + 413 + ], + "type": "text", + "content": "Angry" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 268, + 269, + 309, + 308 + ], + "blocks": [ + { + "bbox": [ + 268, + 269, + 309, + 308 + ], + "lines": [ + { + "bbox": [ + 268, + 269, + 309, + 308 + ], + "spans": [ + { + "bbox": [ + 268, + 269, + 309, + 308 + ], + "type": "image", + "image_path": "f8084a2788067af8c2a6467de0967dce3dd260b6eaf65144a09153bc799d92eb.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 269, + 312, + 304, + 357 + ], + "blocks": [ + { + "bbox": [ + 269, + 312, + 304, + 357 + ], + "lines": [ + { + "bbox": [ + 269, + 312, + 304, + 357 + ], + "spans": [ + { + "bbox": [ + 269, + 312, + 304, + 357 + ], + "type": "image", + "image_path": "3390a19d095a77761a8cb62fe5351a40a125939fb240ae0313747de09776abcd.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 269, + 363, + 307, + 403 + ], + "blocks": [ + { + "bbox": [ + 269, + 363, + 307, + 403 + ], + "lines": [ + { + "bbox": [ + 269, + 363, + 307, + 403 + ], + "spans": [ + { + "bbox": [ + 269, + 363, + 307, + 403 + ], + "type": "image", + "image_path": "7f5c9af1b8eb8e9a7401d597697e61a7c3c14a31597371d96db50a5b28172d6f.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 283, + 406, + 294, + 412 + ], + "lines": [ + { + "bbox": [ + 283, + 406, + 294, + 412 + ], + "spans": [ + { + "bbox": [ + 283, + 406, + 294, + 412 + ], + "type": "text", + "content": "Sad" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 317, + 269, + 354, + 308 + ], + "blocks": [ + { + "bbox": [ + 317, + 269, + 354, + 308 + ], + "lines": [ + { + "bbox": [ + 317, + 269, + 354, + 308 + ], + "spans": [ + { + "bbox": [ + 317, + 269, + 354, + 308 + ], + "type": "image", + "image_path": "356e3c1782cb587c2a81187bd60ed235e0c78c24a06e04a2d004a20e977d7bfd.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 316, + 312, + 352, + 357 + ], + "blocks": [ + { + "bbox": [ + 316, + 312, + 352, + 357 + ], + "lines": [ + { + "bbox": [ + 316, + 312, + 352, + 357 + ], + "spans": [ + { + "bbox": [ + 316, + 312, + 352, + 357 + ], + "type": "image", + "image_path": "e56d2185ea670ff29b649342f4e3bb5fc3df7f61ee6195b237e0c55fabe7d072.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 315, + 362, + 353, + 403 + ], + "blocks": [ + { + "bbox": [ + 315, + 362, + 353, + 403 + ], + "lines": [ + { + "bbox": [ + 315, + 362, + 353, + 403 + ], + "spans": [ + { + "bbox": [ + 315, + 362, + 353, + 403 + ], + "type": "image", + "image_path": "12c177a34c07bdea3fd1247b8831c6e6cc55dc26e9b21766e89323dd958d3187.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 406, + 347, + 413 + ], + "lines": [ + { + "bbox": [ + 321, + 406, + 347, + 413 + ], + "spans": [ + { + "bbox": [ + 321, + 406, + 347, + 413 + ], + "type": "text", + "content": "Surprised" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 363, + 269, + 402, + 308 + ], + "blocks": [ + { + "bbox": [ + 363, + 269, + 402, + 308 + ], + "lines": [ + { + "bbox": [ + 363, + 269, + 402, + 308 + ], + "spans": [ + { + "bbox": [ + 363, + 269, + 402, + 308 + ], + "type": "image", + "image_path": "da607d1b8b77be7cb8507b68d659c13b7bffbc7cf157aa061ce8a308fdd211fd.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 362, + 312, + 398, + 357 + ], + "blocks": [ + { + "bbox": [ + 362, + 312, + 398, + 357 + ], + "lines": [ + { + "bbox": [ + 362, + 312, + 398, + 357 + ], + "spans": [ + { + "bbox": [ + 362, + 312, + 398, + 357 + ], + "type": "image", + "image_path": "e674f7d82e7b9881aa9754566d6123277f2961053857202481e3bfb184c64aa5.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 362, + 361, + 400, + 403 + ], + "blocks": [ + { + "bbox": [ + 362, + 361, + 400, + 403 + ], + "lines": [ + { + "bbox": [ + 362, + 361, + 400, + 403 + ], + "spans": [ + { + "bbox": [ + 362, + 361, + 400, + 403 + ], + "type": "image", + "image_path": "45586dd300b9af485b22e3f8adda77dfd5d45fc14a6a58bc8ab18172acee9c40.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 367, + 406, + 392, + 413 + ], + "lines": [ + { + "bbox": [ + 367, + 406, + 392, + 413 + ], + "spans": [ + { + "bbox": [ + 367, + 406, + 392, + 413 + ], + "type": "text", + "content": "With a tie" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 408, + 269, + 447, + 308 + ], + "blocks": [ + { + "bbox": [ + 408, + 269, + 447, + 308 + ], + "lines": [ + { + "bbox": [ + 408, + 269, + 447, + 308 + ], + "spans": [ + { + "bbox": [ + 408, + 269, + 447, + 308 + ], + "type": "image", + "image_path": "2afe8cd0b79ec97e8f85acb7b54374dd5929d4d4af34a4e617a4ed08c4f69cc2.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 408, + 312, + 444, + 357 + ], + "blocks": [ + { + "bbox": [ + 408, + 312, + 444, + 357 + ], + "lines": [ + { + "bbox": [ + 408, + 312, + 444, + 357 + ], + "spans": [ + { + "bbox": [ + 408, + 312, + 444, + 357 + ], + "type": "image", + "image_path": "33730992ca1160af4ce702c5a5155b30fe5bce51fa7a0165ff152335e2d98122.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 407, + 360, + 446, + 404 + ], + "blocks": [ + { + "bbox": [ + 407, + 360, + 446, + 404 + ], + "lines": [ + { + "bbox": [ + 407, + 360, + 446, + 404 + ], + "spans": [ + { + "bbox": [ + 407, + 360, + 446, + 404 + ], + "type": "image", + "image_path": "9c94b18e7cdc8231a1b1f8c79561ad3948e5f2c432cffee042beea77b341bb6b.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 411, + 406, + 439, + 413 + ], + "lines": [ + { + "bbox": [ + 411, + 406, + 439, + 413 + ], + "spans": [ + { + "bbox": [ + 411, + 406, + 439, + 413 + ], + "type": "text", + "content": "With a hat" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 456, + 269, + 494, + 308 + ], + "blocks": [ + { + "bbox": [ + 456, + 269, + 494, + 308 + ], + "lines": [ + { + "bbox": [ + 456, + 269, + 494, + 308 + ], + "spans": [ + { + "bbox": [ + 456, + 269, + 494, + 308 + ], + "type": "image", + "image_path": "1cbd9b12615d0960973eb52c5db15b704c2e258420bc12b308b4eed00e16733d.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 455, + 312, + 492, + 357 + ], + "blocks": [ + { + "bbox": [ + 455, + 312, + 492, + 357 + ], + "lines": [ + { + "bbox": [ + 455, + 312, + 492, + 357 + ], + "spans": [ + { + "bbox": [ + 455, + 312, + 492, + 357 + ], + "type": "image", + "image_path": "8e6e49973d317d28e398a63397441caec9b5c31b070429695becdb2767a11ad8.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 453, + 362, + 492, + 404 + ], + "blocks": [ + { + "bbox": [ + 453, + 362, + 492, + 404 + ], + "lines": [ + { + "bbox": [ + 453, + 362, + 492, + 404 + ], + "spans": [ + { + "bbox": [ + 453, + 362, + 492, + 404 + ], + "type": "image", + "image_path": "c184bf010aed80b2b24f8e3ddfe26ebff667d3c8608b9c68cd8fd7a1f5c4e084.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 406, + 488, + 413 + ], + "lines": [ + { + "bbox": [ + 454, + 406, + 488, + 413 + ], + "spans": [ + { + "bbox": [ + 454, + 406, + 488, + 413 + ], + "type": "text", + "content": "Holds a heart" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 109, + 422, + 154, + 457 + ], + "blocks": [ + { + "bbox": [ + 109, + 422, + 154, + 457 + ], + "lines": [ + { + "bbox": [ + 109, + 422, + 154, + 457 + ], + "spans": [ + { + "bbox": [ + 109, + 422, + 154, + 457 + ], + "type": "image", + "image_path": "5d64f882ac50029ec58d42f2227b06018dacc7147a2126af54a93db2e38abc4d.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 465, + 143, + 472 + ], + "lines": [ + { + "bbox": [ + 121, + 465, + 143, + 472 + ], + "spans": [ + { + "bbox": [ + 121, + 465, + 143, + 472 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 119, + 478, + 144, + 514 + ], + "blocks": [ + { + "bbox": [ + 119, + 478, + 144, + 514 + ], + "lines": [ + { + "bbox": [ + 119, + 478, + 144, + 514 + ], + "spans": [ + { + "bbox": [ + 119, + 478, + 144, + 514 + ], + "type": "image", + "image_path": "57dea729a4cf56b78ebebfafc8fb83f40b589b818cccf38268df7c5d52410621.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 518, + 142, + 525 + ], + "lines": [ + { + "bbox": [ + 120, + 518, + 142, + 525 + ], + "spans": [ + { + "bbox": [ + 120, + 518, + 142, + 525 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 175, + 422, + 219, + 457 + ], + "blocks": [ + { + "bbox": [ + 175, + 422, + 219, + 457 + ], + "lines": [ + { + "bbox": [ + 175, + 422, + 219, + 457 + ], + "spans": [ + { + "bbox": [ + 175, + 422, + 219, + 457 + ], + "type": "image", + "image_path": "d1f089e64c677195ce254b031d92751d7f5cfc53e5268353ddaa8943be339c0c.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 465, + 202, + 472 + ], + "lines": [ + { + "bbox": [ + 184, + 465, + 202, + 472 + ], + "spans": [ + { + "bbox": [ + 184, + 465, + 202, + 472 + ], + "type": "text", + "content": "Brown" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 183, + 478, + 209, + 514 + ], + "blocks": [ + { + "bbox": [ + 183, + 478, + 209, + 514 + ], + "lines": [ + { + "bbox": [ + 183, + 478, + 209, + 514 + ], + "spans": [ + { + "bbox": [ + 183, + 478, + 209, + 514 + ], + "type": "image", + "image_path": "24417939aaec7b1106a20f9c8278eb671db242b002aca04ef883863b7ef10653.jpg" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 518, + 213, + 525 + ], + "lines": [ + { + "bbox": [ + 175, + 518, + 213, + 525 + ], + "spans": [ + { + "bbox": [ + 175, + 518, + 213, + 525 + ], + "type": "text", + "content": "With a bow tie" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_caption" + } + ], + "index": 55 + }, + { + "type": "image", + "bbox": [ + 221, + 422, + 265, + 457 + ], + "blocks": [ + { + "bbox": [ + 221, + 422, + 265, + 457 + ], + "lines": [ + { + "bbox": [ + 221, + 422, + 265, + 457 + ], + "spans": [ + { + "bbox": [ + 221, + 422, + 265, + 457 + ], + "type": "image", + "image_path": "4837c9c2816f13d2d82dfbe651897d639d5e0a71bb78e8dee928b12118b1d8ee.jpg" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 463, + 254, + 475 + ], + "lines": [ + { + "bbox": [ + 225, + 463, + 254, + 475 + ], + "spans": [ + { + "bbox": [ + 225, + 463, + 254, + 475 + ], + "type": "text", + "content": "Blue racing car" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_caption" + } + ], + "index": 57 + }, + { + "type": "image", + "bbox": [ + 239, + 478, + 265, + 515 + ], + "blocks": [ + { + "bbox": [ + 239, + 478, + 265, + 515 + ], + "lines": [ + { + "bbox": [ + 239, + 478, + 265, + 515 + ], + "spans": [ + { + "bbox": [ + 239, + 478, + 265, + 515 + ], + "type": "image", + "image_path": "95eecdebf2ef13c3d6d48d129bd43ff57aa8b103e3f0a62cb1a290bb6ce67f2c.jpg" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 231, + 518, + 270, + 525 + ], + "lines": [ + { + "bbox": [ + 231, + 518, + 270, + 525 + ], + "spans": [ + { + "bbox": [ + 231, + 518, + 270, + 525 + ], + "type": "text", + "content": "With a bow tie" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_caption" + } + ], + "index": 59 + }, + { + "type": "image", + "bbox": [ + 266, + 422, + 312, + 457 + ], + "blocks": [ + { + "bbox": [ + 266, + 422, + 312, + 457 + ], + "lines": [ + { + "bbox": [ + 266, + 422, + 312, + 457 + ], + "spans": [ + { + "bbox": [ + 266, + 422, + 312, + 457 + ], + "type": "image", + "image_path": "79a44f0cd5d2d845de178dd2725361391d7bac97323eb6c2fa3966b2720dddbd.jpg" + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 465, + 297, + 472 + ], + "lines": [ + { + "bbox": [ + 276, + 465, + 297, + 472 + ], + "spans": [ + { + "bbox": [ + 276, + 465, + 297, + 472 + ], + "type": "text", + "content": "Flowers" + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_caption" + } + ], + "index": 61 + }, + { + "type": "image", + "bbox": [ + 295, + 477, + 321, + 515 + ], + "blocks": [ + { + "bbox": [ + 295, + 477, + 321, + 515 + ], + "lines": [ + { + "bbox": [ + 295, + 477, + 321, + 515 + ], + "spans": [ + { + "bbox": [ + 295, + 477, + 321, + 515 + ], + "type": "image", + "image_path": "d14ca17d5966209bdf8569e00cf2a793237fa81ae1e59e73b8043fc6ef615f14.jpg" + } + ] + } + ], + "index": 63, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 298, + 518, + 320, + 525 + ], + "lines": [ + { + "bbox": [ + 298, + 518, + 320, + 525 + ], + "spans": [ + { + "bbox": [ + 298, + 518, + 320, + 525 + ], + "type": "text", + "content": "Sleeping" + } + ] + } + ], + "index": 64, + "angle": 0, + "type": "image_caption" + } + ], + "index": 63 + }, + { + "type": "image", + "bbox": [ + 313, + 422, + 356, + 457 + ], + "blocks": [ + { + "bbox": [ + 313, + 422, + 356, + 457 + ], + "lines": [ + { + "bbox": [ + 313, + 422, + 356, + 457 + ], + "spans": [ + { + "bbox": [ + 313, + 422, + 356, + 457 + ], + "type": "image", + "image_path": "6c3892bd5c587c080a087740bf83d11cf32f933af17bcef12403504d87edb4b9.jpg" + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 465, + 339, + 472 + ], + "lines": [ + { + "bbox": [ + 321, + 465, + 339, + 472 + ], + "spans": [ + { + "bbox": [ + 321, + 465, + 339, + 472 + ], + "type": "text", + "content": "Sketch" + } + ] + } + ], + "index": 66, + "angle": 0, + "type": "image_caption" + } + ], + "index": 65 + }, + { + "type": "image", + "bbox": [ + 362, + 422, + 403, + 457 + ], + "blocks": [ + { + "bbox": [ + 362, + 422, + 403, + 457 + ], + "lines": [ + { + "bbox": [ + 362, + 422, + 403, + 457 + ], + "spans": [ + { + "bbox": [ + 362, + 422, + 403, + 457 + ], + "type": "image", + "image_path": "37332cd45083ae44a77e2593bdde45ed6df852828bdec4173377205db5ea088e.jpg" + } + ] + } + ], + "index": 67, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 463, + 397, + 475 + ], + "lines": [ + { + "bbox": [ + 364, + 463, + 397, + 475 + ], + "spans": [ + { + "bbox": [ + 364, + 463, + 397, + 475 + ], + "type": "text", + "content": "Yellow racing car" + } + ] + } + ], + "index": 68, + "angle": 0, + "type": "image_caption" + } + ], + "index": 67 + }, + { + "type": "image", + "bbox": [ + 405, + 422, + 449, + 457 + ], + "blocks": [ + { + "bbox": [ + 405, + 422, + 449, + 457 + ], + "lines": [ + { + "bbox": [ + 405, + 422, + 449, + 457 + ], + "spans": [ + { + "bbox": [ + 405, + 422, + 449, + 457 + ], + "type": "image", + "image_path": "a1ae83a724a796bc78a8c855c453a8dcad23a333050ea1cb03f64e12ce16eeb3.jpg" + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 424, + 466, + 438, + 472 + ], + "lines": [ + { + "bbox": [ + 424, + 466, + 438, + 472 + ], + "spans": [ + { + "bbox": [ + 424, + 466, + 438, + 472 + ], + "type": "text", + "content": "Stars" + } + ] + } + ], + "index": 70, + "angle": 0, + "type": "image_caption" + } + ], + "index": 69 + }, + { + "type": "image", + "bbox": [ + 451, + 422, + 497, + 457 + ], + "blocks": [ + { + "bbox": [ + 451, + 422, + 497, + 457 + ], + "lines": [ + { + "bbox": [ + 451, + 422, + 497, + 457 + ], + "spans": [ + { + "bbox": [ + 451, + 422, + 497, + 457 + ], + "type": "image", + "image_path": "a8a608179b18282bbc4fca5c9bb644f944ce4ca3f45ff89e3ab2fbe287148ef4.jpg" + } + ] + } + ], + "index": 71, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 465, + 485, + 472 + ], + "lines": [ + { + "bbox": [ + 463, + 465, + 485, + 472 + ], + "spans": [ + { + "bbox": [ + 463, + 465, + 485, + 472 + ], + "type": "text", + "content": "Colorful" + } + ] + } + ], + "index": 72, + "angle": 0, + "type": "image_caption" + } + ], + "index": 71 + }, + { + "type": "image", + "bbox": [ + 463, + 477, + 489, + 515 + ], + "blocks": [ + { + "bbox": [ + 463, + 477, + 489, + 515 + ], + "lines": [ + { + "bbox": [ + 463, + 477, + 489, + 515 + ], + "spans": [ + { + "bbox": [ + 463, + 477, + 489, + 515 + ], + "type": "image", + "image_path": "b4166392a635f5f7f216a72384752f389d3a06b775316e761058f1dba42c80e8.jpg" + } + ] + } + ], + "index": 73, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 460, + 518, + 490, + 525 + ], + "lines": [ + { + "bbox": [ + 460, + 518, + 490, + 525 + ], + "spans": [ + { + "bbox": [ + 460, + 518, + 490, + 525 + ], + "type": "text", + "content": "Red lipstick" + } + ] + } + ], + "index": 74, + "angle": 0, + "type": "image_caption" + } + ], + "index": 73 + }, + { + "type": "image", + "bbox": [ + 115, + 541, + 141, + 586 + ], + "blocks": [ + { + "bbox": [ + 115, + 541, + 141, + 586 + ], + "lines": [ + { + "bbox": [ + 115, + 541, + 141, + 586 + ], + "spans": [ + { + "bbox": [ + 115, + 541, + 141, + 586 + ], + "type": "image", + "image_path": "ede45cf23c0b3b2a62df0c6f091a8150e32fabe93e54bbb16adf854188ad5c20.jpg" + } + ] + } + ], + "index": 75, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 588, + 139, + 594 + ], + "lines": [ + { + "bbox": [ + 119, + 588, + 139, + 594 + ], + "spans": [ + { + "bbox": [ + 119, + 588, + 139, + 594 + ], + "type": "text", + "content": "original" + } + ] + } + ], + "index": 76, + "angle": 0, + "type": "image_caption" + } + ], + "index": 75 + }, + { + "type": "image", + "bbox": [ + 152, + 541, + 176, + 586 + ], + "blocks": [ + { + "bbox": [ + 152, + 541, + 176, + 586 + ], + "lines": [ + { + "bbox": [ + 152, + 541, + 176, + 586 + ], + "spans": [ + { + "bbox": [ + 152, + 541, + 176, + 586 + ], + "type": "image", + "image_path": "703d7ab071c3469d953fed3a7c3df1bb4d3f97a6a3dd4264c58b98d8b8fe7346.jpg" + } + ] + } + ], + "index": 77, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 156, + 588, + 175, + 594 + ], + "lines": [ + { + "bbox": [ + 156, + 588, + 175, + 594 + ], + "spans": [ + { + "bbox": [ + 156, + 588, + 175, + 594 + ], + "type": "text", + "content": "Smiling" + } + ] + } + ], + "index": 78, + "angle": 0, + "type": "image_caption" + } + ], + "index": 77 + }, + { + "type": "image", + "bbox": [ + 181, + 541, + 208, + 586 + ], + "blocks": [ + { + "bbox": [ + 181, + 541, + 208, + 586 + ], + "lines": [ + { + "bbox": [ + 181, + 541, + 208, + 586 + ], + "spans": [ + { + "bbox": [ + 181, + 541, + 208, + 586 + ], + "type": "image", + "image_path": "55ec0acaf15c5fa5367499e659ee0a2012b4c1f3496f07c369b08b31b9476e4b.jpg" + } + ] + } + ], + "index": 79, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 588, + 205, + 594 + ], + "lines": [ + { + "bbox": [ + 187, + 588, + 205, + 594 + ], + "spans": [ + { + "bbox": [ + 187, + 588, + 205, + 594 + ], + "type": "text", + "content": "Angry" + } + ] + } + ], + "index": 80, + "angle": 0, + "type": "image_caption" + } + ], + "index": 79 + }, + { + "type": "image", + "bbox": [ + 211, + 541, + 236, + 586 + ], + "blocks": [ + { + "bbox": [ + 211, + 541, + 236, + 586 + ], + "lines": [ + { + "bbox": [ + 211, + 541, + 236, + 586 + ], + "spans": [ + { + "bbox": [ + 211, + 541, + 236, + 586 + ], + "type": "image", + "image_path": "e2f145fec752ce2ff05c9f2c4eba6efb856fb1eb01a967b27cb16ba362757fcd.jpg" + } + ] + } + ], + "index": 81, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 588, + 228, + 594 + ], + "lines": [ + { + "bbox": [ + 219, + 588, + 228, + 594 + ], + "spans": [ + { + "bbox": [ + 219, + 588, + 228, + 594 + ], + "type": "text", + "content": "Sad" + } + ] + } + ], + "index": 82, + "angle": 0, + "type": "image_caption" + } + ], + "index": 81 + }, + { + "type": "image", + "bbox": [ + 241, + 541, + 267, + 586 + ], + "blocks": [ + { + "bbox": [ + 241, + 541, + 267, + 586 + ], + "lines": [ + { + "bbox": [ + 241, + 541, + 267, + 586 + ], + "spans": [ + { + "bbox": [ + 241, + 541, + 267, + 586 + ], + "type": "image", + "image_path": "3d26da57aa83f13d6f387eede510aee64a557d7ae45ab901864b4de5244d1a67.jpg" + } + ] + } + ], + "index": 83, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 240, + 588, + 265, + 594 + ], + "lines": [ + { + "bbox": [ + 240, + 588, + 265, + 594 + ], + "spans": [ + { + "bbox": [ + 240, + 588, + 265, + 594 + ], + "type": "text", + "content": "Surprised" + } + ] + } + ], + "index": 84, + "angle": 0, + "type": "image_caption" + } + ], + "index": 83 + }, + { + "type": "image", + "bbox": [ + 272, + 540, + 317, + 586 + ], + "blocks": [ + { + "bbox": [ + 272, + 540, + 317, + 586 + ], + "lines": [ + { + "bbox": [ + 272, + 540, + 317, + 586 + ], + "spans": [ + { + "bbox": [ + 272, + 540, + 317, + 586 + ], + "type": "image", + "image_path": "722ac0a79cffb8727a85cf1538422f947407deb086ea4b2dbb437f08b36e51cd.jpg" + } + ] + } + ], + "index": 85, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 288, + 588, + 307, + 594 + ], + "lines": [ + { + "bbox": [ + 288, + 588, + 307, + 594 + ], + "spans": [ + { + "bbox": [ + 288, + 588, + 307, + 594 + ], + "type": "text", + "content": "original" + } + ] + } + ], + "index": 86, + "angle": 0, + "type": "image_caption" + } + ], + "index": 85 + }, + { + "type": "image", + "bbox": [ + 322, + 540, + 366, + 586 + ], + "blocks": [ + { + "bbox": [ + 322, + 540, + 366, + 586 + ], + "lines": [ + { + "bbox": [ + 322, + 540, + 366, + 586 + ], + "spans": [ + { + "bbox": [ + 322, + 540, + 366, + 586 + ], + "type": "image", + "image_path": "900cfe73d83b68ab5c0fcff3588924975096b6eb41af62a6c4a9c61ccbd0065c.jpg" + } + ] + } + ], + "index": 87, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 338, + 586, + 358, + 594 + ], + "lines": [ + { + "bbox": [ + 338, + 586, + 358, + 594 + ], + "spans": [ + { + "bbox": [ + 338, + 586, + 358, + 594 + ], + "type": "text", + "content": "Smiling" + } + ] + } + ], + "index": 88, + "angle": 0, + "type": "image_caption" + } + ], + "index": 87 + }, + { + "type": "image", + "bbox": [ + 369, + 540, + 412, + 586 + ], + "blocks": [ + { + "bbox": [ + 369, + 540, + 412, + 586 + ], + "lines": [ + { + "bbox": [ + 369, + 540, + 412, + 586 + ], + "spans": [ + { + "bbox": [ + 369, + 540, + 412, + 586 + ], + "type": "image", + "image_path": "85a96b2379bf7659a6d05680deea7965c6c58ed8d3fc5a3bcf6778411e58cabc.jpg" + } + ] + } + ], + "index": 89, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 588, + 402, + 594 + ], + "lines": [ + { + "bbox": [ + 384, + 588, + 402, + 594 + ], + "spans": [ + { + "bbox": [ + 384, + 588, + 402, + 594 + ], + "type": "text", + "content": "Angry" + } + ] + } + ], + "index": 90, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 169, + 608, + 440, + 619 + ], + "lines": [ + { + "bbox": [ + 169, + 608, + 440, + 619 + ], + "spans": [ + { + "bbox": [ + 169, + 608, + 440, + 619 + ], + "type": "text", + "content": "Figure 20: Additional manipulation examples, generated using our model." + } + ] + } + ], + "index": 95, + "angle": 0, + "type": "image_caption" + } + ], + "index": 89 + }, + { + "type": "image", + "bbox": [ + 414, + 540, + 457, + 586 + ], + "blocks": [ + { + "bbox": [ + 414, + 540, + 457, + 586 + ], + "lines": [ + { + "bbox": [ + 414, + 540, + 457, + 586 + ], + "spans": [ + { + "bbox": [ + 414, + 540, + 457, + 586 + ], + "type": "image", + "image_path": "8120f167a21959a387fc65ae4e86572cc95c25fc5882be8d8dbe3bdad25dd453.jpg" + } + ] + } + ], + "index": 91, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 432, + 588, + 445, + 594 + ], + "lines": [ + { + "bbox": [ + 432, + 588, + 445, + 594 + ], + "spans": [ + { + "bbox": [ + 432, + 588, + 445, + 594 + ], + "type": "text", + "content": "Sad" + } + ] + } + ], + "index": 92, + "angle": 0, + "type": "image_caption" + } + ], + "index": 91 + }, + { + "type": "image", + "bbox": [ + 460, + 540, + 502, + 586 + ], + "blocks": [ + { + "bbox": [ + 460, + 540, + 502, + 586 + ], + "lines": [ + { + "bbox": [ + 460, + 540, + 502, + 586 + ], + "spans": [ + { + "bbox": [ + 460, + 540, + 502, + 586 + ], + "type": "image", + "image_path": "fa46d33e95e6782f1d76a59b8da109af927a82befa76182f0b8d4052d83a8b99.jpg" + } + ] + } + ], + "index": 93, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 470, + 588, + 495, + 594 + ], + "lines": [ + { + "bbox": [ + 470, + 588, + 495, + 594 + ], + "spans": [ + { + "bbox": [ + 470, + 588, + 495, + 594 + ], + "type": "text", + "content": "Surprised" + } + ] + } + ], + "index": 94, + "angle": 0, + "type": "image_caption" + } + ], + "index": 93 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 312, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 96 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 153, + 154, + 201 + ], + "blocks": [ + { + "bbox": [ + 111, + 153, + 154, + 201 + ], + "lines": [ + { + "bbox": [ + 111, + 153, + 154, + 201 + ], + "spans": [ + { + "bbox": [ + 111, + 153, + 154, + 201 + ], + "type": "image", + "image_path": "7d95d4e0c853cf4b1f60bc54e5c072f28cc55ffbe3b127b5f23a52804fc085a3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 201, + 146, + 209 + ], + "lines": [ + { + "bbox": [ + 119, + 201, + 146, + 209 + ], + "spans": [ + { + "bbox": [ + 119, + 201, + 146, + 209 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 178, + 133, + 227, + 182 + ], + "blocks": [ + { + "bbox": [ + 178, + 133, + 227, + 182 + ], + "lines": [ + { + "bbox": [ + 178, + 133, + 227, + 182 + ], + "spans": [ + { + "bbox": [ + 178, + 133, + 227, + 182 + ], + "type": "image", + "image_path": "276d498c8b35bd622999f93ede46c6376c1e6ae0da8f7f935808bb94ea8a51eb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 182, + 183, + 223, + 232 + ], + "blocks": [ + { + "bbox": [ + 182, + 183, + 223, + 232 + ], + "lines": [ + { + "bbox": [ + 182, + 183, + 223, + 232 + ], + "spans": [ + { + "bbox": [ + 182, + 183, + 223, + 232 + ], + "type": "image", + "image_path": "2de808d68973efcac5e8a3354c748fe3ea790a0ff2c616c8134188b2d11eb240.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 192, + 233, + 212, + 240 + ], + "lines": [ + { + "bbox": [ + 192, + 233, + 212, + 240 + ], + "spans": [ + { + "bbox": [ + 192, + 233, + 212, + 240 + ], + "type": "text", + "content": "Angry" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 236, + 132, + 280, + 182 + ], + "blocks": [ + { + "bbox": [ + 236, + 132, + 280, + 182 + ], + "lines": [ + { + "bbox": [ + 236, + 132, + 280, + 182 + ], + "spans": [ + { + "bbox": [ + 236, + 132, + 280, + 182 + ], + "type": "image", + "image_path": "a23da8b93796b929ea4d94a3e3c4c7f0a0203eea5feeb7e4931e834262e614c5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 237, + 183, + 279, + 232 + ], + "blocks": [ + { + "bbox": [ + 237, + 183, + 279, + 232 + ], + "lines": [ + { + "bbox": [ + 237, + 183, + 279, + 232 + ], + "spans": [ + { + "bbox": [ + 237, + 183, + 279, + 232 + ], + "type": "image", + "image_path": "cbbc7e48e11b5be92f45ac8ec8f66a13132b18450abc71f359be38d493ec3399.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 251, + 233, + 264, + 240 + ], + "lines": [ + { + "bbox": [ + 251, + 233, + 264, + 240 + ], + "spans": [ + { + "bbox": [ + 251, + 233, + 264, + 240 + ], + "type": "text", + "content": "Sad" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 288, + 133, + 331, + 182 + ], + "blocks": [ + { + "bbox": [ + 288, + 133, + 331, + 182 + ], + "lines": [ + { + "bbox": [ + 288, + 133, + 331, + 182 + ], + "spans": [ + { + "bbox": [ + 288, + 133, + 331, + 182 + ], + "type": "image", + "image_path": "1a86d5a590456c96c017085ebd4f3fc3a201a35b8daa14450e8ffe5ba8260b71.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 288, + 183, + 330, + 232 + ], + "blocks": [ + { + "bbox": [ + 288, + 183, + 330, + 232 + ], + "lines": [ + { + "bbox": [ + 288, + 183, + 330, + 232 + ], + "spans": [ + { + "bbox": [ + 288, + 183, + 330, + 232 + ], + "type": "image", + "image_path": "4ab3f295a539573f747f98eb8c9c8abaeaa1ef960ae8e4da6f69ab46df56a7db.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 293, + 232, + 324, + 240 + ], + "lines": [ + { + "bbox": [ + 293, + 232, + 324, + 240 + ], + "spans": [ + { + "bbox": [ + 293, + 232, + 324, + 240 + ], + "type": "text", + "content": "Surprised." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 340, + 133, + 383, + 182 + ], + "blocks": [ + { + "bbox": [ + 340, + 133, + 383, + 182 + ], + "lines": [ + { + "bbox": [ + 340, + 133, + 383, + 182 + ], + "spans": [ + { + "bbox": [ + 340, + 133, + 383, + 182 + ], + "type": "image", + "image_path": "1ecfed423ad4a0f1d5f5984c6ae0fa6b17bcb68da8228962a3b2546dca0be1fc.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 341, + 183, + 383, + 232 + ], + "blocks": [ + { + "bbox": [ + 341, + 183, + 383, + 232 + ], + "lines": [ + { + "bbox": [ + 341, + 183, + 383, + 232 + ], + "spans": [ + { + "bbox": [ + 341, + 183, + 383, + 232 + ], + "type": "image", + "image_path": "b00237405d55caad0d1eb62c0ff1d4f6b998bab2700b88e7c84fbb0f42ff7cf4.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 232, + 375, + 240 + ], + "lines": [ + { + "bbox": [ + 343, + 232, + 375, + 240 + ], + "spans": [ + { + "bbox": [ + 343, + 232, + 375, + 240 + ], + "type": "text", + "content": "With a tie" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 392, + 133, + 436, + 182 + ], + "blocks": [ + { + "bbox": [ + 392, + 133, + 436, + 182 + ], + "lines": [ + { + "bbox": [ + 392, + 133, + 436, + 182 + ], + "spans": [ + { + "bbox": [ + 392, + 133, + 436, + 182 + ], + "type": "image", + "image_path": "52a30c3fe41652e5819f5c675c01446102dc48b8a4d5410e03c5d730c6a2f8aa.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 393, + 184, + 435, + 232 + ], + "blocks": [ + { + "bbox": [ + 393, + 184, + 435, + 232 + ], + "lines": [ + { + "bbox": [ + 393, + 184, + 435, + 232 + ], + "spans": [ + { + "bbox": [ + 393, + 184, + 435, + 232 + ], + "type": "image", + "image_path": "e953285210bd6961f5a25692380777ea14643fef569c7cee8d1ad209cbc82d4b.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 232, + 430, + 240 + ], + "lines": [ + { + "bbox": [ + 397, + 232, + 430, + 240 + ], + "spans": [ + { + "bbox": [ + 397, + 232, + 430, + 240 + ], + "type": "text", + "content": "With a hat" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 445, + 133, + 489, + 182 + ], + "blocks": [ + { + "bbox": [ + 445, + 133, + 489, + 182 + ], + "lines": [ + { + "bbox": [ + 445, + 133, + 489, + 182 + ], + "spans": [ + { + "bbox": [ + 445, + 133, + 489, + 182 + ], + "type": "image", + "image_path": "7c6416cacada0f363c74923d059d762f42f7fa81c8e6134ffcc49317e8dbd473.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 446, + 183, + 489, + 232 + ], + "blocks": [ + { + "bbox": [ + 446, + 183, + 489, + 232 + ], + "lines": [ + { + "bbox": [ + 446, + 183, + 489, + 232 + ], + "spans": [ + { + "bbox": [ + 446, + 183, + 489, + 232 + ], + "type": "image", + "image_path": "218f53aea5a76db39fd0d65aef00584e62e5e7912698f0a033e308a67ee0a04c.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 451, + 232, + 493, + 240 + ], + "lines": [ + { + "bbox": [ + 451, + 232, + 493, + 240 + ], + "spans": [ + { + "bbox": [ + 451, + 232, + 493, + 240 + ], + "type": "text", + "content": "Holds a heart" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 115, + 269, + 149, + 317 + ], + "blocks": [ + { + "bbox": [ + 115, + 269, + 149, + 317 + ], + "lines": [ + { + "bbox": [ + 115, + 269, + 149, + 317 + ], + "spans": [ + { + "bbox": [ + 115, + 269, + 149, + 317 + ], + "type": "image", + "image_path": "b9f9bd911cb1957027d12e3b090af697c37b59181289ccb50f80cee31330b6b7.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 318, + 146, + 327 + ], + "lines": [ + { + "bbox": [ + 120, + 318, + 146, + 327 + ], + "spans": [ + { + "bbox": [ + 120, + 318, + 146, + 327 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 187, + 251, + 221, + 302 + ], + "blocks": [ + { + "bbox": [ + 187, + 251, + 221, + 302 + ], + "lines": [ + { + "bbox": [ + 187, + 251, + 221, + 302 + ], + "spans": [ + { + "bbox": [ + 187, + 251, + 221, + 302 + ], + "type": "image", + "image_path": "0cfe742020d4be9653c32c689fe0520e25d939459625bede671046f89193faf3.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 187, + 304, + 221, + 351 + ], + "blocks": [ + { + "bbox": [ + 187, + 304, + 221, + 351 + ], + "lines": [ + { + "bbox": [ + 187, + 304, + 221, + 351 + ], + "spans": [ + { + "bbox": [ + 187, + 304, + 221, + 351 + ], + "type": "image", + "image_path": "57f9ece2d348e3204fd6423b8d20ec463ecc08ba92f76ce108540459d4ec61ff.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 183, + 352, + 220, + 361 + ], + "lines": [ + { + "bbox": [ + 183, + 352, + 220, + 361 + ], + "spans": [ + { + "bbox": [ + 183, + 352, + 220, + 361 + ], + "type": "text", + "content": "Red lipstick" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 241, + 251, + 276, + 301 + ], + "blocks": [ + { + "bbox": [ + 241, + 251, + 276, + 301 + ], + "lines": [ + { + "bbox": [ + 241, + 251, + 276, + 301 + ], + "spans": [ + { + "bbox": [ + 241, + 251, + 276, + 301 + ], + "type": "image", + "image_path": "92e10a8d74f6c521624fcc74a4b8e28d1e8fb87030d1f10d60ccbb212dff6d92.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 241, + 304, + 274, + 351 + ], + "blocks": [ + { + "bbox": [ + 241, + 304, + 274, + 351 + ], + "lines": [ + { + "bbox": [ + 241, + 304, + 274, + 351 + ], + "spans": [ + { + "bbox": [ + 241, + 304, + 274, + 351 + ], + "type": "image", + "image_path": "7581b475d6dcf90da7bd6fa9413ede0b044f6863fa3d9e1a3695aa3f1af6d9fc.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 352, + 266, + 361 + ], + "lines": [ + { + "bbox": [ + 246, + 352, + 266, + 361 + ], + "spans": [ + { + "bbox": [ + 246, + 352, + 266, + 361 + ], + "type": "text", + "content": "Angry" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 291, + 251, + 326, + 301 + ], + "blocks": [ + { + "bbox": [ + 291, + 251, + 326, + 301 + ], + "lines": [ + { + "bbox": [ + 291, + 251, + 326, + 301 + ], + "spans": [ + { + "bbox": [ + 291, + 251, + 326, + 301 + ], + "type": "image", + "image_path": "4a05875513c127161cf9b870d83e5153519cc47fd10c5bad13c89d1509883371.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 292, + 304, + 326, + 351 + ], + "blocks": [ + { + "bbox": [ + 292, + 304, + 326, + 351 + ], + "lines": [ + { + "bbox": [ + 292, + 304, + 326, + 351 + ], + "spans": [ + { + "bbox": [ + 292, + 304, + 326, + 351 + ], + "type": "image", + "image_path": "bf02d084bbbf77ba0be32d3434cbe7de31fec00e72c05e560dc3c0bca58b9883.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 293, + 352, + 321, + 361 + ], + "lines": [ + { + "bbox": [ + 293, + 352, + 321, + 361 + ], + "spans": [ + { + "bbox": [ + 293, + 352, + 321, + 361 + ], + "type": "text", + "content": "Sleeping" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 345, + 251, + 380, + 301 + ], + "blocks": [ + { + "bbox": [ + 345, + 251, + 380, + 301 + ], + "lines": [ + { + "bbox": [ + 345, + 251, + 380, + 301 + ], + "spans": [ + { + "bbox": [ + 345, + 251, + 380, + 301 + ], + "type": "image", + "image_path": "7b9347cbe39d0d97c13139b3de17ec6df2e7bf64a5bab4002cf6d422cec9fc58.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 345, + 304, + 379, + 351 + ], + "blocks": [ + { + "bbox": [ + 345, + 304, + 379, + 351 + ], + "lines": [ + { + "bbox": [ + 345, + 304, + 379, + 351 + ], + "spans": [ + { + "bbox": [ + 345, + 304, + 379, + 351 + ], + "type": "image", + "image_path": "3aea532bfda61819fc360ccae368d08b872e40ac79f06476945ca16dbfd7f24a.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 348, + 352, + 380, + 361 + ], + "lines": [ + { + "bbox": [ + 348, + 352, + 380, + 361 + ], + "spans": [ + { + "bbox": [ + 348, + 352, + 380, + 361 + ], + "type": "text", + "content": "Surprised" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 396, + 251, + 432, + 301 + ], + "blocks": [ + { + "bbox": [ + 396, + 251, + 432, + 301 + ], + "lines": [ + { + "bbox": [ + 396, + 251, + 432, + 301 + ], + "spans": [ + { + "bbox": [ + 396, + 251, + 432, + 301 + ], + "type": "image", + "image_path": "010ffae2b2f211937844d822b0babd84ef6efb410beaf8188198a6e13bfa9f20.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 397, + 304, + 431, + 351 + ], + "blocks": [ + { + "bbox": [ + 397, + 304, + 431, + 351 + ], + "lines": [ + { + "bbox": [ + 397, + 304, + 431, + 351 + ], + "spans": [ + { + "bbox": [ + 397, + 304, + 431, + 351 + ], + "type": "image", + "image_path": "9f088aed1b7dc8ad66e9e39aba2e931fce0461cb5a475bedb0c96d115d629b41.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 352, + 427, + 361 + ], + "lines": [ + { + "bbox": [ + 406, + 352, + 427, + 361 + ], + "spans": [ + { + "bbox": [ + 406, + 352, + 427, + 361 + ], + "type": "text", + "content": "In love" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 449, + 251, + 489, + 301 + ], + "blocks": [ + { + "bbox": [ + 449, + 251, + 489, + 301 + ], + "lines": [ + { + "bbox": [ + 449, + 251, + 489, + 301 + ], + "spans": [ + { + "bbox": [ + 449, + 251, + 489, + 301 + ], + "type": "image", + "image_path": "f2b19119c788128e29094df8d10d1a574724b25cfc486370a972379a55b18cd6.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 449, + 304, + 485, + 352 + ], + "blocks": [ + { + "bbox": [ + 449, + 304, + 485, + 352 + ], + "lines": [ + { + "bbox": [ + 449, + 304, + 485, + 352 + ], + "spans": [ + { + "bbox": [ + 449, + 304, + 485, + 352 + ], + "type": "image", + "image_path": "2bfeef54da08f0178f117b2c56f5c9ecb12ef22da5eebaff8bf75642fb9dc6ae.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 443, + 352, + 490, + 361 + ], + "lines": [ + { + "bbox": [ + 443, + 352, + 490, + 361 + ], + "spans": [ + { + "bbox": [ + 443, + 352, + 490, + 361 + ], + "type": "text", + "content": "With a Bow tie" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 119, + 400, + 148, + 464 + ], + "blocks": [ + { + "bbox": [ + 119, + 400, + 148, + 464 + ], + "lines": [ + { + "bbox": [ + 119, + 400, + 148, + 464 + ], + "spans": [ + { + "bbox": [ + 119, + 400, + 148, + 464 + ], + "type": "image", + "image_path": "3af46c536e3b680793c22d23188c24209d258d9174de5043a71e0944bf90f59d.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 465, + 146, + 473 + ], + "lines": [ + { + "bbox": [ + 119, + 465, + 146, + 473 + ], + "spans": [ + { + "bbox": [ + 119, + 465, + 146, + 473 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 187, + 373, + 219, + 436 + ], + "blocks": [ + { + "bbox": [ + 187, + 373, + 219, + 436 + ], + "lines": [ + { + "bbox": [ + 187, + 373, + 219, + 436 + ], + "spans": [ + { + "bbox": [ + 187, + 373, + 219, + 436 + ], + "type": "image", + "image_path": "d9910f99497bbc91d306d760fdbe1d4286f2d29eabcfdf2daa82e2b3b82e8f93.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 187, + 437, + 219, + 499 + ], + "blocks": [ + { + "bbox": [ + 187, + 437, + 219, + 499 + ], + "lines": [ + { + "bbox": [ + 187, + 437, + 219, + 499 + ], + "spans": [ + { + "bbox": [ + 187, + 437, + 219, + 499 + ], + "type": "image", + "image_path": "2516fe1d758138ca6bb47524f425cd2f624454987a08cfeb2199eb642b37f093.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 193, + 501, + 212, + 509 + ], + "lines": [ + { + "bbox": [ + 193, + 501, + 212, + 509 + ], + "spans": [ + { + "bbox": [ + 193, + 501, + 212, + 509 + ], + "type": "text", + "content": "Joker" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_caption" + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 240, + 373, + 274, + 436 + ], + "blocks": [ + { + "bbox": [ + 240, + 373, + 274, + 436 + ], + "lines": [ + { + "bbox": [ + 240, + 373, + 274, + 436 + ], + "spans": [ + { + "bbox": [ + 240, + 373, + 274, + 436 + ], + "type": "image", + "image_path": "e675f956d2f59b26068cf4053927512bb30db85673e186c7a12d67b2aa355683.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 239, + 437, + 274, + 498 + ], + "blocks": [ + { + "bbox": [ + 239, + 437, + 274, + 498 + ], + "lines": [ + { + "bbox": [ + 239, + 437, + 274, + 498 + ], + "spans": [ + { + "bbox": [ + 239, + 437, + 274, + 498 + ], + "type": "image", + "image_path": "9bd3cefd9899741a597eee8a1fbfe3aea4effa552ae35e078e0563eaca248ec7.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 247, + 500, + 266, + 508 + ], + "lines": [ + { + "bbox": [ + 247, + 500, + 266, + 508 + ], + "spans": [ + { + "bbox": [ + 247, + 500, + 266, + 508 + ], + "type": "text", + "content": "Boxer" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 293, + 373, + 324, + 436 + ], + "blocks": [ + { + "bbox": [ + 293, + 373, + 324, + 436 + ], + "lines": [ + { + "bbox": [ + 293, + 373, + 324, + 436 + ], + "spans": [ + { + "bbox": [ + 293, + 373, + 324, + 436 + ], + "type": "image", + "image_path": "05082b2adb69a6b6bb88a98db7b6186644cc6b085e777bbf10603e660da1dbc3.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 294, + 437, + 325, + 498 + ], + "blocks": [ + { + "bbox": [ + 294, + 437, + 325, + 498 + ], + "lines": [ + { + "bbox": [ + 294, + 437, + 325, + 498 + ], + "spans": [ + { + "bbox": [ + 294, + 437, + 325, + 498 + ], + "type": "image", + "image_path": "84636665fb3b5f8cd3de3e0cd6c743a7fd63ab3d0fccf99f86f805f718aa2284.jpg" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 298, + 500, + 317, + 508 + ], + "lines": [ + { + "bbox": [ + 298, + 500, + 317, + 508 + ], + "spans": [ + { + "bbox": [ + 298, + 500, + 317, + 508 + ], + "type": "text", + "content": "Ghost" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_caption" + } + ], + "index": 50 + }, + { + "type": "image", + "bbox": [ + 348, + 373, + 380, + 436 + ], + "blocks": [ + { + "bbox": [ + 348, + 373, + 380, + 436 + ], + "lines": [ + { + "bbox": [ + 348, + 373, + 380, + 436 + ], + "spans": [ + { + "bbox": [ + 348, + 373, + 380, + 436 + ], + "type": "image", + "image_path": "83c902450a22854385c5e31818e745351f4b7f6d3ff2ecd279b78de0111227cb.jpg" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_body" + } + ], + "index": 52 + }, + { + "type": "image", + "bbox": [ + 351, + 437, + 379, + 498 + ], + "blocks": [ + { + "bbox": [ + 351, + 437, + 379, + 498 + ], + "lines": [ + { + "bbox": [ + 351, + 437, + 379, + 498 + ], + "spans": [ + { + "bbox": [ + 351, + 437, + 379, + 498 + ], + "type": "image", + "image_path": "3555ccd6a6084dd2951bc0305422424cd0ac83f7bc2f4213d1d265ae66fa817a.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 348, + 500, + 377, + 508 + ], + "lines": [ + { + "bbox": [ + 348, + 500, + 377, + 508 + ], + "spans": [ + { + "bbox": [ + 348, + 500, + 377, + 508 + ], + "type": "text", + "content": "Rainbow" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 397, + 373, + 436, + 436 + ], + "blocks": [ + { + "bbox": [ + 397, + 373, + 436, + 436 + ], + "lines": [ + { + "bbox": [ + 397, + 373, + 436, + 436 + ], + "spans": [ + { + "bbox": [ + 397, + 373, + 436, + 436 + ], + "type": "image", + "image_path": "463d9661aa179622d18b18b9f040838b35ba40015ca0ba07e7b9d51b07fb2a3a.jpg" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_body" + } + ], + "index": 55 + }, + { + "type": "image", + "bbox": [ + 396, + 437, + 436, + 498 + ], + "blocks": [ + { + "bbox": [ + 396, + 437, + 436, + 498 + ], + "lines": [ + { + "bbox": [ + 396, + 437, + 436, + 498 + ], + "spans": [ + { + "bbox": [ + 396, + 437, + 436, + 498 + ], + "type": "image", + "image_path": "adb0e20bacd650f456bc9f50ed5b3f9b0dc17be6f324a9ba4410ad16529d8cda.jpg" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 500, + 428, + 508 + ], + "lines": [ + { + "bbox": [ + 410, + 500, + 428, + 508 + ], + "spans": [ + { + "bbox": [ + 410, + 500, + 428, + 508 + ], + "type": "text", + "content": "Devil" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_caption" + } + ], + "index": 56 + }, + { + "type": "image", + "bbox": [ + 451, + 373, + 489, + 436 + ], + "blocks": [ + { + "bbox": [ + 451, + 373, + 489, + 436 + ], + "lines": [ + { + "bbox": [ + 451, + 373, + 489, + 436 + ], + "spans": [ + { + "bbox": [ + 451, + 373, + 489, + 436 + ], + "type": "image", + "image_path": "a922d1b2d8aba963c891aba744ba149a73ef6413f15f71d7ba702a0e4ec0843f.jpg" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_body" + } + ], + "index": 58 + }, + { + "type": "image", + "bbox": [ + 447, + 437, + 490, + 498 + ], + "blocks": [ + { + "bbox": [ + 447, + 437, + 490, + 498 + ], + "lines": [ + { + "bbox": [ + 447, + 437, + 490, + 498 + ], + "spans": [ + { + "bbox": [ + 447, + 437, + 490, + 498 + ], + "type": "image", + "image_path": "fc03d2f31ac24839c568e5a97761c70f4b4c4f863b8cb8450d39bf5c5bfe19a7.jpg" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 462, + 500, + 481, + 508 + ], + "lines": [ + { + "bbox": [ + 462, + 500, + 481, + 508 + ], + "spans": [ + { + "bbox": [ + 462, + 500, + 481, + 508 + ], + "type": "text", + "content": "Angel" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_caption" + } + ], + "index": 59 + }, + { + "type": "image", + "bbox": [ + 115, + 540, + 150, + 606 + ], + "blocks": [ + { + "bbox": [ + 115, + 540, + 150, + 606 + ], + "lines": [ + { + "bbox": [ + 115, + 540, + 150, + 606 + ], + "spans": [ + { + "bbox": [ + 115, + 540, + 150, + 606 + ], + "type": "image", + "image_path": "b909c31fdb8750d6c6c1777c8da501ecd514a6ddd05de77ed03864a01461fcd3.jpg" + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 607, + 146, + 614 + ], + "lines": [ + { + "bbox": [ + 120, + 607, + 146, + 614 + ], + "spans": [ + { + "bbox": [ + 120, + 607, + 146, + 614 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_caption" + } + ], + "index": 61 + }, + { + "type": "image", + "bbox": [ + 186, + 518, + 224, + 569 + ], + "blocks": [ + { + "bbox": [ + 186, + 518, + 224, + 569 + ], + "lines": [ + { + "bbox": [ + 186, + 518, + 224, + 569 + ], + "spans": [ + { + "bbox": [ + 186, + 518, + 224, + 569 + ], + "type": "image", + "image_path": "b453684b49416b7a2aefa7c7f21523a0a83a79df0ad163bca8948a6e9ca8e0d3.jpg" + } + ] + } + ], + "index": 63, + "angle": 0, + "type": "image_body" + } + ], + "index": 63 + }, + { + "type": "image", + "bbox": [ + 187, + 570, + 221, + 630 + ], + "blocks": [ + { + "bbox": [ + 187, + 570, + 221, + 630 + ], + "lines": [ + { + "bbox": [ + 187, + 570, + 221, + 630 + ], + "spans": [ + { + "bbox": [ + 187, + 570, + 221, + 630 + ], + "type": "image", + "image_path": "f702c2d2c84b64f1d4a2038431cd3c5ba4b64f9b78a19c4f6dbe61f7f9970649.jpg" + } + ] + } + ], + "index": 64, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 632, + 228, + 639 + ], + "lines": [ + { + "bbox": [ + 175, + 632, + 228, + 639 + ], + "spans": [ + { + "bbox": [ + 175, + 632, + 228, + 639 + ], + "type": "text", + "content": "Raising left hand" + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 105, + 664, + 504, + 685 + ], + "lines": [ + { + "bbox": [ + 105, + 664, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 504, + 685 + ], + "type": "text", + "content": "Figure 21: comparison to Text2LIVE (Bar-Tal et al., 2022). For each input image, the bottom row corresponds to images generated by our model, and the top row corresponds to images generated by the Text2LIVE model." + } + ] + } + ], + "index": 81, + "angle": 0, + "type": "image_caption" + } + ], + "index": 64 + }, + { + "type": "image", + "bbox": [ + 240, + 518, + 273, + 569 + ], + "blocks": [ + { + "bbox": [ + 240, + 518, + 273, + 569 + ], + "lines": [ + { + "bbox": [ + 240, + 518, + 273, + 569 + ], + "spans": [ + { + "bbox": [ + 240, + 518, + 273, + 569 + ], + "type": "image", + "image_path": "6959308c2bca6de1ceebd4ea567de01285b1bd9eae92ebc5b6dc1ac669633b74.jpg" + } + ] + } + ], + "index": 66, + "angle": 0, + "type": "image_body" + } + ], + "index": 66 + }, + { + "type": "image", + "bbox": [ + 239, + 571, + 273, + 630 + ], + "blocks": [ + { + "bbox": [ + 239, + 571, + 273, + 630 + ], + "lines": [ + { + "bbox": [ + 239, + 571, + 273, + 630 + ], + "spans": [ + { + "bbox": [ + 239, + 571, + 273, + 630 + ], + "type": "image", + "image_path": "ed2f83303f87a785d390431fdb2a8a17fe45b8d0dbf384b82c9494cf133d45c9.jpg" + } + ] + } + ], + "index": 67, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 238, + 632, + 272, + 639 + ], + "lines": [ + { + "bbox": [ + 238, + 632, + 272, + 639 + ], + "spans": [ + { + "bbox": [ + 238, + 632, + 272, + 639 + ], + "type": "text", + "content": "Blue pants" + } + ] + } + ], + "index": 68, + "angle": 0, + "type": "image_caption" + } + ], + "index": 67 + }, + { + "type": "image", + "bbox": [ + 290, + 518, + 324, + 569 + ], + "blocks": [ + { + "bbox": [ + 290, + 518, + 324, + 569 + ], + "lines": [ + { + "bbox": [ + 290, + 518, + 324, + 569 + ], + "spans": [ + { + "bbox": [ + 290, + 518, + 324, + 569 + ], + "type": "image", + "image_path": "631278d8432b0e6a938afc3655a8e35a2050340fd294be5ab296d3eb2285664e.jpg" + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_body" + } + ], + "index": 69 + }, + { + "type": "image", + "bbox": [ + 290, + 570, + 324, + 630 + ], + "blocks": [ + { + "bbox": [ + 290, + 570, + 324, + 630 + ], + "lines": [ + { + "bbox": [ + 290, + 570, + 324, + 630 + ], + "spans": [ + { + "bbox": [ + 290, + 570, + 324, + 630 + ], + "type": "image", + "image_path": "abba2c262cc298f2c7009964db6cff36e1dda32deede4b015d15fd87906e22ad.jpg" + } + ] + } + ], + "index": 70, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 290, + 632, + 324, + 639 + ], + "lines": [ + { + "bbox": [ + 290, + 632, + 324, + 639 + ], + "spans": [ + { + "bbox": [ + 290, + 632, + 324, + 639 + ], + "type": "text", + "content": "Blackshirt" + } + ] + } + ], + "index": 71, + "angle": 0, + "type": "image_caption" + } + ], + "index": 70 + }, + { + "type": "image", + "bbox": [ + 336, + 518, + 393, + 569 + ], + "blocks": [ + { + "bbox": [ + 336, + 518, + 393, + 569 + ], + "lines": [ + { + "bbox": [ + 336, + 518, + 393, + 569 + ], + "spans": [ + { + "bbox": [ + 336, + 518, + 393, + 569 + ], + "type": "image", + "image_path": "880873679cefc93e2480f07e3ea687bb537cdfe36b3d5519d5a94eb58df46bae.jpg" + } + ] + } + ], + "index": 72, + "angle": 0, + "type": "image_body" + } + ], + "index": 72 + }, + { + "type": "image", + "bbox": [ + 342, + 570, + 378, + 630 + ], + "blocks": [ + { + "bbox": [ + 342, + 570, + 378, + 630 + ], + "lines": [ + { + "bbox": [ + 342, + 570, + 378, + 630 + ], + "spans": [ + { + "bbox": [ + 342, + 570, + 378, + 630 + ], + "type": "image", + "image_path": "7d9b86ff58d570bb7971130f3d6d15e0cfe62217aa7e258105121a9df795f7d0.jpg" + } + ] + } + ], + "index": 73, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 348, + 632, + 370, + 639 + ], + "lines": [ + { + "bbox": [ + 348, + 632, + 370, + 639 + ], + "spans": [ + { + "bbox": [ + 348, + 632, + 370, + 639 + ], + "type": "text", + "content": "Sitting" + } + ] + } + ], + "index": 74, + "angle": 0, + "type": "image_caption" + } + ], + "index": 73 + }, + { + "type": "image", + "bbox": [ + 403, + 518, + 436, + 569 + ], + "blocks": [ + { + "bbox": [ + 403, + 518, + 436, + 569 + ], + "lines": [ + { + "bbox": [ + 403, + 518, + 436, + 569 + ], + "spans": [ + { + "bbox": [ + 403, + 518, + 436, + 569 + ], + "type": "image", + "image_path": "71a302b0d07a62c85ac84b659375fbb915f4c6c3d1a14ba06a861b30fc981690.jpg" + } + ] + } + ], + "index": 75, + "angle": 0, + "type": "image_body" + } + ], + "index": 75 + }, + { + "type": "image", + "bbox": [ + 403, + 570, + 435, + 630 + ], + "blocks": [ + { + "bbox": [ + 403, + 570, + 435, + 630 + ], + "lines": [ + { + "bbox": [ + 403, + 570, + 435, + 630 + ], + "spans": [ + { + "bbox": [ + 403, + 570, + 435, + 630 + ], + "type": "image", + "image_path": "3778a814044c9c8d6383536ea1a436a1275efe16261aa768e161674319be9f1b.jpg" + } + ] + } + ], + "index": 76, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 632, + 437, + 639 + ], + "lines": [ + { + "bbox": [ + 405, + 632, + 437, + 639 + ], + "spans": [ + { + "bbox": [ + 405, + 632, + 437, + 639 + ], + "type": "text", + "content": "With a tie" + } + ] + } + ], + "index": 77, + "angle": 0, + "type": "image_caption" + } + ], + "index": 76 + }, + { + "type": "image", + "bbox": [ + 452, + 518, + 490, + 569 + ], + "blocks": [ + { + "bbox": [ + 452, + 518, + 490, + 569 + ], + "lines": [ + { + "bbox": [ + 452, + 518, + 490, + 569 + ], + "spans": [ + { + "bbox": [ + 452, + 518, + 490, + 569 + ], + "type": "image", + "image_path": "326f61c25b54aa3b466733c3edac9ae344ec59d7b178baa594395f55321ff85a.jpg" + } + ] + } + ], + "index": 78, + "angle": 0, + "type": "image_body" + } + ], + "index": 78 + }, + { + "type": "image", + "bbox": [ + 452, + 570, + 490, + 630 + ], + "blocks": [ + { + "bbox": [ + 452, + 570, + 490, + 630 + ], + "lines": [ + { + "bbox": [ + 452, + 570, + 490, + 630 + ], + "spans": [ + { + "bbox": [ + 452, + 570, + 490, + 630 + ], + "type": "image", + "image_path": "0917f7eaf2d8680964deb102d161e938ba8209850f1de97bf9e2e327de708bdb.jpg" + } + ] + } + ], + "index": 79, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 449, + 632, + 491, + 639 + ], + "lines": [ + { + "bbox": [ + 449, + 632, + 491, + 639 + ], + "spans": [ + { + "bbox": [ + 449, + 632, + 491, + 639 + ], + "type": "text", + "content": "Holds a heart" + } + ] + } + ], + "index": 80, + "angle": 0, + "type": "image_caption" + } + ], + "index": 79 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 82 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 118, + 503, + 653 + ], + "blocks": [ + { + "bbox": [ + 108, + 118, + 503, + 653 + ], + "lines": [ + { + "bbox": [ + 108, + 118, + 503, + 653 + ], + "spans": [ + { + "bbox": [ + 108, + 118, + 503, + 653 + ], + "type": "image", + "image_path": "9d4ae41908158cd6c87ece00b8dda5786c6a627cbf87f0c8dbd3973e8a335396.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 659, + 506, + 691 + ], + "lines": [ + { + "bbox": [ + 104, + 659, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 506, + 691 + ], + "type": "text", + "content": "Figure 22: comparison to Textual Inversion (Gal et al., 2022). For each input image, the bottom row corresponds to images generated by our model, and the top row corresponds to images generated by the Textual Inversion model." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_content_list.json b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d09aabc0afc1a52c68ba821afa4d21cf52d16dc5 --- /dev/null +++ b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_content_list.json @@ -0,0 +1,1689 @@ +[ + { + "type": "text", + "text": "SIMPLEKT: A SIMPLE BUT TOUGH-TO-BEAT BASELINE FOR KNOWLEDGE TRACING", + "text_level": 1, + "bbox": [ + 161, + 122, + 831, + 165 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zitao Liu", + "bbox": [ + 174, + 189, + 250, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Guangdong Institute of Smart Education, Jinan University, Guangzhou, China", + "bbox": [ + 173, + 202, + 700, + 216 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "liuzitao@jnu.edu.cn", + "bbox": [ + 173, + 217, + 368, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qiongqiong Liu, Jiahao Chen, Shuyan Huang*", + "bbox": [ + 171, + 249, + 510, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "TAL Education Group, Beijing, China", + "bbox": [ + 173, + 263, + 433, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{liuqiongqiongl, chenjiahao, huangshuyan}@tal.com", + "bbox": [ + 173, + 275, + 665, + 288 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weiqi Luo", + "bbox": [ + 173, + 307, + 253, + 322 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Guangdong Institute of Smart Education, Jinan University, Guangzhou, China", + "bbox": [ + 173, + 322, + 700, + 335 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "lwq@jnu.edu.cn", + "bbox": [ + 173, + 336, + 317, + 348 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 448, + 382, + 547, + 395 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Knowledge tracing (KT) is the problem of predicting students' future performance based on their historical interactions with intelligent tutoring systems. Recently, many works present lots of special methods for applying deep neural networks to KT from different perspectives like model architecture, adversarial augmentation and etc., which make the overall algorithm and system become more and more complex. Furthermore, due to the lack of standardized evaluation protocol (Liu et al. 2022), there is no widely agreed KT baselines and published experimental comparisons become inconsistent and self-contradictory, i.e., the reported AUC scores of DKT on ASSISTments2009 range from 0.721 to 0.821 (Minn et al. 2018 Yeung & Yeung 2018). Therefore, in this paper, we provide a strong but simple baseline method to deal with the KT task named SIMPLEKT. Inspired by the Rasch model in psychometrics, we explicitly model question-specific variations to capture the individual differences among questions covering the same set of knowledge components that are a generalization of terms of concepts or skills needed for learners to accomplish steps in a task or a problem. Furthermore, instead of using sophisticated representations to capture student forgetting behaviors, we use the ordinary dot-product attention function to extract the time-aware information embedded in the student learning interactions. Extensive experiments show that such a simple baseline is able to always rank top 3 in terms of AUC scores and achieve 57 wins, 3 ties and 16 loss against 12 DLKT baseline methods on 7 public datasets of different domains. We believe this work serves as a strong baseline for future KT research. Code is available at https://github.com/pykt-team/pykt-toolkit!", + "bbox": [ + 220, + 411, + 774, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 164, + 737, + 332, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Knowledge tracing (KT) is a sequential prediction task that aims to predict the outcomes of students over questions by modeling their mastery of knowledge, i.e., knowledge states, as they interact with learning platforms such as massive open online courses and intelligent tutoring systems, as shown in Figure ①. Solving the KT problems may help teachers better detect students that need further attention, or recommend personalized learning materials to students.", + "bbox": [ + 159, + 766, + 833, + 833 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The KT related research has been studied since 1990s where Corbett and Anderson, to the best of our knowledge, were the first to estimate students' current knowledge with regard to each individ-", + "bbox": [ + 159, + 838, + 833, + 865 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 161, + 59, + 480, + 74 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*The corresponding author: Shuyan Huang.", + "bbox": [ + 183, + 872, + 452, + 885 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1We merged our model to the PYKT benchmark at https://pykt.org/", + "bbox": [ + 183, + 885, + 653, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 492, + 921, + 504, + 932 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ual knowledge component (KC) (Corbett & Anderson 1994). A KC is a description of a mental structure or process that a learner uses, alone or in combination with other KCs, to accomplish steps in a task or a problem? Since then, many attempts have been made to solve the KT problem, such as probabilistic graphical models (Kaiser et al. 2017) and factor analysis based models (Cen et al. 2006; Lavoué et al. 2018; Thai-Nghe et al. 2012). Recently, with the rapid development of deep neural networks, many deep learning based knowledge tracing (DLKT) models are developed, such as auto-regressive based deep sequential KT models (Piech et al. 2015; Yeung & Yeung 2018; Chen et al. 2018; Wang et al. 2019; Guo et al. 2021; Long et al. 2021; Chen et al. 2023), memory-augmented KT models (Zhang et al. 2017; Abdelrahman & Wang 2019; Yeung 2019), attention based KT models (Pandey & Karypis 2019; Pandey & Srivastava 2020; Choi et al. 2020; Ghosh et al. 2020; Pu et al. 2020), and graph based KT models (Nakagawa et al. 2019; Yang et al. 2020; Tong et al. 2020).", + "bbox": [ + 159, + 126, + 836, + 287 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bf81367ab97873c8d7e36048a7e047999a27c48e636f4e4f74b1048c84a31c77.jpg", + "image_caption": [ + "Figure 1: Graphical illustration of the task of KnowledgeTracing. “√” and “×” denote the question is answered correctly and incorrectly." + ], + "image_footnote": [], + "bbox": [ + 163, + 298, + 840, + 376 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although DLKT approaches have constituted new paradigms of the KT problem and achieved promising results, recently developed DLKT models seem to be more and more complex and resemble each other with very limited nuances from the methodological perspective: applying different neural components to capture student forgetting behaviors $^{3}$ (Ghosh et al. 2020; Nagatani et al. 2019), recency effects (Zhang et al. 2021), and various auxiliary information including relations between questions and KCs (Tong et al. 2020; Pandey & Srivastava 2020; Liu et al. 2021; Yang et al. 2020), question text content (Liu et al. 2019; Wang et al. 2020a), question difficulty level (Liu et al. 2021; Shen et al. 2022), and students' learning ability (Shen et al. 2020). Furthermore, published DLKT baseline results surprisingly diverge. For example, the reported AUC scores of DKT and AKT on ASSISTments2009 range from 0.721 to 0.821 (Minn et al. 2018; Yeung & Yeung 2018) and from 0.747 to 0.835 in (Ghosh et al. 2020; Wang et al. 2021) respectively. Another example is that for the performance of DKT on the ASSISTments2009 dataset, it is recognized as one of the best baselines by Ghosh et al. (2020) while Long et al. (2022) and Zhang et al. (2021) showed that its performance is below the average. Recent survey studies by Sarsa et al. (2022) and Liu et al. (2022) summarized aforementioned inconsistencies of baseline results and showed evidence that variations in hyper-parameters and data pre-processing procedures contribute significantly to prediction performance of DLKT models. Specifically, Sarsa et al. (2022) empirically found that even simple baselines with little predictive value may outperform DLKT models with sophisticated neural components. Liu et al. (2022) built a standardized DLKT benchmark platform and showed that the improvement of many DLKT approaches is minimal compared to the very first DLKT model proposed by Piech et al. (2015).", + "bbox": [ + 159, + 410, + 835, + 686 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Therefore, in this paper, we propose SIMPLEKT, a simple but tough-to-beat KT baseline that is simple to implement, computationally friendly and robust to a wide range of KT datasets across different domains. Motivated by the Rasch model that is a classic yet powerful model in psychometrics, the proposed SIMPLEKT approach captures the individual differences among questions covering the same set of KCs by representing each question's embedding as an additive combination of the average of its corresponding KCs' embeddings and a question-specific variation. Furthermore, different from many existing models that try to capture various aforementioned relations and information, the SIMPLEKT is purely based on the attention mechanism and uses the ordinary dot-product attention function to capture the contextual information embedded in the student learning interactions. To comprehensively and systematically evaluate the performance of SIMPLEKT, we choose to use the publicly available PYKT benchmark implementation to guarantee valid and reproducible comparisons against 12 DLKT methods on 7 popular datasets across differ-", + "bbox": [ + 159, + 690, + 836, + 848 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 163, + 60, + 478, + 74 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "$^{2}$ A KC is a generalization of everyday terms like concept, principle, fact, or skill.", + "bbox": [ + 181, + 859, + 677, + 872 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "3The Rasch model is also known as the 1PL item response theory model.", + "bbox": [ + 184, + 873, + 628, + 885 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "4https://www.pykt.org/", + "bbox": [ + 186, + 885, + 326, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 492, + 921, + 504, + 932 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ent domains. Results shown that the SIMPLEKT beats a wide range of modern neural KT models that based on graph neural networks, memory augmented neural networks, and adversarial neural networks. This suggests that this simple method should be used as the baseline to beat future KT research, especially when designing sophisticated neural KT architectures. To encourage reproducible research, all the related codes, data and the learned SIMPLEKT models are publicly available at https://github.com/pykt-team/pykt-toolkit", + "bbox": [ + 159, + 126, + 835, + 206 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 161, + 224, + 342, + 239 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, deep learning technique have been widely applied into KT task for student's historical learning modeling and the future performance prediction. Existing DLKT approaches can be categorized into the following 5 categories:", + "bbox": [ + 159, + 253, + 833, + 294 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C1: Deep sequential models. DLKT models that use auto-regressive architectures to capture students' chronologically ordered interactions. For example, (Piech et al., 2015) proposed the very first DKT model that utilizes an LSTM layer to estimate the knowledge mastery. (Lee & Yeung, 2019) proposed to enhance DKT with a skill encoder that combines student learning activities and KC representations.", + "bbox": [ + 159, + 299, + 835, + 365 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C2: Memory augmented models. DLKT models that capture latent relations between KCs and student knowledge states via memory networks. For instance, (Zhang et al., 2017) exploited and stored the KC relationships via a static key memory matrix and predict students' knowledge mastery levels with a dynamic value memory matrix.", + "bbox": [ + 159, + 370, + 833, + 425 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C3: Adversarial based models. DLKT models that utilize the adversarial techniques to generate perturbations to improve model generalization capability. jointly train an attentiveLSTM KT model with both original and adversarial examples.", + "bbox": [ + 159, + 430, + 833, + 470 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C4: Graph based models. DLKT models that use the graph neural networks to model intrinsic relations among questions, KCs and interactions. (Liu et al., 2021) presented a question-KC bipartite graph to explicitly capture question-level and KC-level inner-relations and question difficulties. (Yang et al., 2020) introduced a graph convolutional network to obtain the representation of the question-KC correlations.", + "bbox": [ + 159, + 476, + 833, + 542 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C5: Attention based models. DLKT models that capture dependencies between interactions via the attention mechanism. For example, (Pandey & Karypis, 2019) used self-attention network to capture the relevance between KCs and students' historical interactions. Choi et al., 2020 designed an encoder-decoder structure to represent the exercise and response embedding sequences. (Ghosh et al., 2020) performed three self-attention modules and explicitly model students' forgetting behaviors via a monotonic attention mechanism.", + "bbox": [ + 159, + 548, + 835, + 627 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Please note that the above categorizations are not exclusive and related techniques can be combined. For example, (Abdelrahman & Wang, 2019) proposed a sequential key-value memory network to unify the strengths of recurrent modeling capacity and memory capacity. The proposed SIMPLEKT approach belongs to C5 and it purely models student interactions by using the very ordinary dot-product attention function.", + "bbox": [ + 159, + 634, + 833, + 701 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 A SIMPLE METHOD FOR KNOWLEDGE TRACING", + "text_level": 1, + "bbox": [ + 161, + 719, + 611, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 PROBLEM STATEMENT", + "text_level": 1, + "bbox": [ + 161, + 747, + 369, + 760 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this work, our objective is given an arbitrary question $q_*$ to predict the probability of whether a student will answer $q_*$ correctly or not based on the student's historical interaction data. More specifically, for each student $S$ , we assume that we have observed a chronologically ordered collection of $T$ past interactions i.e., $S = \\{s_j\\}_{j=1}^T$ . Each interaction is represented as a 4-tuple $s$ , i.e., $s = $ , where $q, \\{c\\}, r, s$ represent the specific question, the associated KC set, the binary valued student response and student's response time step respectively. We would like to estimate the probability $\\hat{r}_*$ of the student's performance on arbitrary question $q_*$ .", + "bbox": [ + 159, + 771, + 835, + 866 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 161, + 60, + 480, + 74 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "5Response is a binary valued indicator variable where 1 represents the student correctly answered the question, and 0 otherwise.", + "bbox": [ + 159, + 873, + 833, + 898 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 492, + 921, + 504, + 932 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 THE SIMPLEKT APPROACH", + "text_level": 1, + "bbox": [ + 161, + 127, + 403, + 139 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 REPRESENTATIONS OF QUESTIONS, KCs AND RESPONSES.", + "text_level": 1, + "bbox": [ + 161, + 151, + 635, + 165 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Effectively representing student interactions is crucial to the success of the DLKT models. In real-world educational scenarios, the question bank is usually much bigger than the set of KCs. For example, the number of questions is more than 1500 times larger than the number of KCs in the Algebra2005 dataset (described in Section 4.1). Therefore, to effectively learn and fairly evaluate the DLKT models from such highly sparse question-response data, following the previous work of Ghosh et al., 2020 and Liu et al., 2022, we artificially transform the original question-response data into KC-response data by expanding each question-level interaction into multiple KC-level interactions when the question is associated with a set of KCs (illustrated in Figure 2).", + "bbox": [ + 159, + 174, + 835, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Furthermore, due to the fact that questions covering the same set of KCs may have various difficulty levels, students perform significantly different. As shown in Figure 2 even through questions $q_{2}$ and $q_{4}$ have the same set of KCs, i.e., $c_{1}$ and $c_{3}$ , students may get $q_{2}$ wrong but $q_{4}$ correct. Therefore, it is unrealistic to treat every KC in the expanded KC-response sequence identical. Inspired by the very classic and simple Rasch model in psychometrics that explicitly uses a scalar to characterize the latent factor of question difficulty, we choose to use a question-specific difficulty vector to capture the individual differences among questions on the same KC. More specifically, the $t$ th representations of KC (i.e., $\\mathbf{x}_t$ ) and interaction (i.e., $\\mathbf{y}_t$ ) in the expanded KC sequence of concept $c_{k}$ are represented as follows:", + "bbox": [ + 159, + 282, + 531, + 491 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d8c6b96a374ed2a73c921533fbe0c22a78147fd1c793cec2a307b508c7c6be26.jpg", + "image_caption": [ + "Figure 2: Graphical illustration of transforming the original question-response data into KC-response data." + ], + "image_footnote": [], + "bbox": [ + 556, + 294, + 823, + 443 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {t} = \\mathbf {z} _ {c _ {k}} \\oplus \\mathbf {m} _ {q _ {j}} \\odot \\mathbf {v} _ {c _ {k}}; \\quad \\mathbf {y} _ {t} = \\mathbf {z} _ {c _ {k}} \\oplus \\mathbf {r} _ {q _ {j}}; \\quad \\mathbf {z} _ {c _ {k}} = \\mathbf {W} _ {c} \\cdot \\mathbf {e} _ {c _ {k}}; \\quad \\mathbf {r} _ {q _ {j}} = \\mathbf {W} _ {q} \\cdot \\mathbf {e} _ {q _ {j}}\n$$\n", + "text_format": "latex", + "bbox": [ + 221, + 497, + 771, + 514 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{z}_{c_k}$ denotes the latent representation of the KC $c_{k}$ . $\\mathbf{m}_{q_j}$ denotes the difficulty vector of question $q_{j}$ and question $q_{j}$ contains the KC $c_{k}$ . $\\mathbf{v}_{c_k}$ represents the question-centric variation of $q_{j}$ covering this KC $c_{k}$ . $\\mathbf{r}_{q_j}$ denotes the representation of student response on $q_{j}$ . $\\mathbf{e}_{c_k}$ is the $n$ -dimensional one-hot vector indicating the corresponding KC and $\\mathbf{e}_{q_j}$ is the 2-dimensional one-hot vector indicating whether the question is answered correctly. $\\mathbf{z}_{c_k}$ , $\\mathbf{m}_{q_j}$ , $\\mathbf{v}_{c_k}$ and $\\mathbf{r}_{q_j}$ are $d$ -dimensional learnable real-valued vectors. $\\mathbf{W}_c\\in \\mathbb{R}^{d\\times n}$ and $\\mathbf{W}_q\\in \\mathbb{R}^{d\\times 2}$ are learnable linear transformation operations. $\\odot$ and $\\oplus$ are the element-wise product and addition operators. $n$ is the total number of KCs.", + "bbox": [ + 159, + 519, + 835, + 613 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 PREDICTION WITH ORDINARY DOT-PRODUCT ATTENTION.", + "text_level": 1, + "bbox": [ + 161, + 626, + 631, + 639 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Different from many existing DLKT approaches that use sophisticated neural components to model student learning and/or forgetting behaviors, we choose to use the ordinary dot-product attention function to explore and extract knowledge states from students' past learning history. Specifically, the retrieved knowledge state $(\\mathbf{h}_{t + 1})$ at the $(t + 1)$ th timestamp is computed as follows:", + "bbox": [ + 159, + 649, + 833, + 702 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {t + 1} = \\text {S e l f A t t e n i o n} (Q = \\mathbf {x} _ {t + 1}, K = \\left\\{\\mathbf {x} _ {1}, \\dots , \\mathbf {x} _ {t} \\right\\}, V = \\left\\{\\mathbf {y} _ {1}, \\dots , \\mathbf {y} _ {t} \\right\\}).\n$$\n", + "text_format": "latex", + "bbox": [ + 242, + 709, + 751, + 726 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Then we use a two-layer fully connected network to refine the knowledge state and the overall optimization function is as follows:", + "bbox": [ + 159, + 732, + 833, + 759 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\eta_ {t + 1} = \\mathbf {w} ^ {\\top} \\cdot \\operatorname {R e L U} \\left(\\mathbf {W} _ {2} \\cdot \\operatorname {R e L U} \\left(\\mathbf {W} _ {1} \\cdot \\left[ \\mathbf {h} _ {t + 1}; \\mathbf {x} _ {t + 1} \\right] + \\mathbf {b} _ {1}\\right) + \\mathbf {b} _ {2}\\right) + b\n$$\n", + "text_format": "latex", + "bbox": [ + 262, + 763, + 732, + 781 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = - \\sum \\big (r _ {t} \\log \\sigma (\\eta_ {t}) + (1 - r _ {t}) \\log (1 - \\sigma (\\eta_ {t})) \\big)\n$$\n", + "text_format": "latex", + "bbox": [ + 284, + 783, + 636, + 804 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{W}_1, \\mathbf{W}_2, \\mathbf{w}, \\mathbf{b}_1, \\mathbf{b}_2$ and $b$ are trainable parameters and $\\mathbf{W}_1 \\in \\mathbb{R}^{d \\times 2d}$ , $\\mathbf{W}_2 \\in \\mathbb{R}^{d \\times d}$ , $\\mathbf{w}, \\mathbf{b}_1, \\mathbf{b}_2 \\in \\mathbb{R}^{d \\times 1}$ , $b$ is scalar. $\\sigma(\\cdot)$ is the sigmoid function.", + "bbox": [ + 159, + 807, + 833, + 837 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.3 RELATIONSHIP TO EXISTING DLKT MODELS.", + "text_level": 1, + "bbox": [ + 161, + 848, + 546, + 862 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Although the proposed SIMPLEKT belongs to model category C5 discussed in Section 2, it is distinguished from attention based representative DLKT models such as AKT (Ghosh et al., 2020), SAKT", + "bbox": [ + 159, + 871, + 833, + 902 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 161, + 60, + 478, + 74 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 492, + 921, + 504, + 931 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Pandey & Karypis (2019) and SAINT (Choi et al., 2020). The difference between SIMPLEKT and AKT are threefold: first, we omit the self-attentive question encoder and knowledge encoder in AKT and directly feed the representations of $\\mathbf{x}_t$ s and $\\mathbf{y}_t$ s into attention based knowledge state extractor; second, instead of using time decayed monotonic attention function to extract the initial knowledge state, we choose to use the ordinary dot-product function that is simple and free of hyper-parameters; third, interaction representations $\\mathbf{y}_t$ s are simply computed by adding representations of KCs and responses while AKT uses extra parameters to explicitly model the effects of question difficulty in interaction representations. When comparing SIMPLEKT to SAKT and SAINT, we explicitly model the latent question-centric difficulty when learning the KC representations while SAKT and SAINT ignore the question-level difference and treat all questions are identical if they contain the same set of KCs. Furthermore, SAINT adopts the encoder-decoder architecture and utilizes Transformers to model the student interaction sequence while our SIMPLEKT only uses the dot-product attention function.", + "bbox": [ + 159, + 126, + 835, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 161, + 322, + 322, + 335 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 DATASETS", + "text_level": 1, + "bbox": [ + 161, + 354, + 282, + 366 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this paper, we experiment with 7 widely used datasets to comprehensively evaluate the performance of our models. These 7 datasets can be divided into 2 categories: (1) D1: Datasets containing information of both questions and KCs; and (2) D2: Datasets containing information of either questions or KCs. Table1 gives real samples of question and KCs from both D1 and D2 categories. The detailed statistics of each dataset are listed in Appendix A.1", + "bbox": [ + 159, + 381, + 833, + 449 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.1 DATASETS CONTAINING INFORMATION OF BOTH QUESTIONS AND KCs", + "text_level": 1, + "bbox": [ + 161, + 456, + 722, + 469 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ASSISTments2009 (AS2009) This dataset is about math exercises and collected from the free online tutoring ASSISTments platform in the school year 2009-2010. It is widely used as the standard benchmark for KT methods over the last decade (Feng et al., 2009; Ghosh et al., 2020; Zhang et al., 2017). It includes 337,4115 interactions, 4,661 sequences, 17,737 questions, 123 KCs and each question has 1.1968 KCs on average.", + "bbox": [ + 159, + 480, + 835, + 546 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Algebra2005 (AL2005) This dataset stems from KDD Cup 2010 EDM Challenge, including the detailed step-level student responses to the mathematical problems (Stamper et al. 2010). Similar to (Choffin et al., Ghosh et al., 2020, Zhang et al., 2017), a unique question is constructed by concatenating the problem name and step name. It has 884,098 interactions, 4,712 sequences, 173,113 questions, 112 KCs and the average KCs is 1.3521.", + "bbox": [ + 159, + 552, + 833, + 620 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Bridge2006 (BD2006) This dataset is also from the KDD Cup 2010 EDM Challenge and its unique question construction is similar to the process used in Algebra2005. The dataset has 1,824,310 interactions, 9,680 sequences, 129,263 questions, 493 KCs and the average KCs is 1.0136.", + "bbox": [ + 159, + 625, + 833, + 665 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "NIPS34 This dataset is provided by NeurlPS 2020 Education Challenge which contains students' answers to mathematics questions from Eedi. We use the dataset of Task 3 & Task 4 to evaluate our models (Wang et al. 2020b). There are 1,399,470 interactions, 9,401 sequences, 948 questions, 57 KCs, each question has 1.0137 KCs on average.", + "bbox": [ + 159, + 670, + 833, + 725 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.2 DATASETS CONTAINING INFORMATION OF EITHER QUESTIONS OR KCS", + "text_level": 1, + "bbox": [ + 161, + 744, + 724, + 758 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Static2019 This dataset is collected from an engineering statics course taught at the Carnegie Mellon University during Fall 2011 (Steif & Bier, 2014). Its unique question construction is similar to the process used in Algebra2005. The dataset has 189,292 interactions, 1,034 sequences and 1,223 questions.", + "bbox": [ + 159, + 769, + 833, + 822 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1ea70bac986d3459b8c8ad5e0c1787a5f929676b5cd889e905e5c1f5259f0041.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 161, + 838, + 835, + 902 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 161, + 60, + 478, + 74 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 492, + 921, + 504, + 932 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ASSISTments2015 (AS2015) Similar to ASSISTments2009, this dataset is collected from the ASSISTments platform in the year of 2015, and it has the largest number of students among the other ASSISTments datasets. It ends up with 682,789 interactions, 19,292 sequences and 100 KCs after pre-processing.", + "bbox": [ + 159, + 124, + 833, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\mathbf{POJ}^{[1]}$ : This dataset is collected from Peking coding practice online platform and provided by Pandey & Srivastava (2020). It has 987,593 interactions, 20,114 sequences and 2,748 questions.", + "bbox": [ + 159, + 185, + 833, + 214 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following the data pre-processing steps suggested by (Liu et al., 2022), we remove student sequences shorter than 3 attempts and set the maximum length of student interaction history to 200 for a high computational efficiency.", + "bbox": [ + 159, + 218, + 833, + 259 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/020269c60fa46018891b4e4413898d3402f6c82b753277555d6daa506a9c7b50.jpg", + "table_caption": [ + "Table 1: Examples of questions and KCs from D1 and D2." + ], + "table_footnote": [], + "table_body": "
CategoryDatasetQuestionKnowledge Components
D1NIPS34Which calculation is incorrect? A.(-7)*2=-14 B.(-7)*( -2)=-14 C.7*2=14 D. 7*(-2)=-14Multiplying and Dividing Negative Numbers
D1NIPS34Which of the following number is a factor of 60 and a multiple of 6 ... A.3 B.12 C.20 D.120Factors and Highest Common Factor\nMultiples and Lowest Common Multiple
D2POJGiven 2 equations on the variables x and y, solve for x and y.Not Available
D2POJGiven a big integer number, you are required to find out whether it's a prime number.Not Available
", + "bbox": [ + 171, + 275, + 823, + 328 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 BASELINES", + "text_level": 1, + "bbox": [ + 161, + 337, + 289, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To comprehensively and systematically evaluate the performance of SIMPLEKT, we compare SIMPLEKT against 12 DLKT baseline models from aforementioned 5 categories in Section2 as follows:", + "bbox": [ + 159, + 361, + 833, + 391 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "C1: DKT (Piech et al. 2015): directly uses RNNs to model students' learning processes.", + "- C1: DKT+ (Yeung & Yeung 2018): improves DKT by addressing the reconstruction and inconsistent issues.", + "C1: DKT-F (Nagatani et al. 2019): improves DKT by considering students' forgetting behaviors.", + "C1: KQN (Lee & Yeung, 2019): utilizes the dot product of the students' ability and KC representations to predict student performance.", + "C1: LPKT (Shen et al. 2021): designs the learning cell to model students' learning processes.", + "- C1: IEKT (Long et al., 2021): estimates student knowledge state via the student cognition and knowledge acquisition estimation modules.", + "C2: DKVMN (Zhang et al., 2017): exploits the relationships among KCs and estimate student mastery via memory networks.", + "C3: ATKT (Guo et al., 2021): uses adversarial perturbations to enhance the generalization of the attention-LSTM based KT model.", + "- C4: GKT (Nakagawa et al., 2019): casts the knowledge structure as a graph and reformulate the KT task as a node-level classification problem.", + "C5: SAKT (Pandey & Karypis 2019): uses self-attention to identify the relevance between the interactions and KCs.", + "- C5: SAINT (Choi et al., 2020): a Transformer-based model for KT that encode exercise and responses in the encoder and decoder respectively.", + "- C5: AKT (Ghosh et al. 2020): models forgetting behaviors during the relevance computation between historical interactions and target questions." + ], + "bbox": [ + 161, + 400, + 833, + 731 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3 EXPERIMENTAL SETUP", + "text_level": 1, + "bbox": [ + 161, + 747, + 371, + 760 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Similar to (Liu et al. 2022), we randomly withhold $20\\%$ of the students' sequences for model evaluation and we perform standard 5-fold cross validation on the rest $80\\%$ of each dataset. We select ADAM (Kingma & Ba 2014) as the optimizer to train our model. The maximum of the training epochs is set to 200, and an early stopping strategy is used to speed up the training process. The embedding dimension, the hidden state dimension, the two dimension of the prediction layers are set to [64, 128], the learning rate and dropout rate are set to [1e-3, 1e-4, 1e-5] and [0.05, 0.1, 0.3, 0.5] respectively, the number of blocks and attention heads are set to [1, 2, 4] and [4, 8], the seed", + "bbox": [ + 159, + 771, + 833, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "10 https://sites.google.com/site/assistmentsdata/datasets/2015-assistments-skill-builder-data", + "bbox": [ + 179, + 873, + 741, + 884 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "1 https://drive.google.com/drive/folders/1LRLjqWfODwTYRMPw6wEJ_mMt1KZ4XBdk.", + "bbox": [ + 179, + 885, + 630, + 898 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 163, + 60, + 478, + 74 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 492, + 921, + 504, + 932 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "is set to [42, 3407] for reproducing the experimental results. Our model is implemented in PyTorch and trained on NVIDIA RTX 3090 GPU device. Similar to all existing DLKT research, we use the AUC as the main evaluation metric, and use accuracy as the secondary metric.", + "bbox": [ + 159, + 126, + 833, + 168 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4 EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 161, + 184, + 389, + 197 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Overall Performance. Table 2 and Table 3 summarize the overall prediction performance of SIMPLEKT and all baselines in terms of the average AUC and accuracy scores. Marker *, o and ● indicates whether SIMPLEKT is statistically superior/equal/inferior to the compared method (using paired t-test at 0.01 significance level). The last column shows the total number of win/tie/loss for SIMPLEKT against the compared method on all 7 datasets (e.g., #win is how many times SIMPLEKT significantly outperforms that method).", + "bbox": [ + 159, + 208, + 835, + 288 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/8721802cdd01a0129b840d02947fe083b73727fcaffd57ef29ca0ac1991127a6.jpg", + "table_caption": [ + "Table 2: Overall AUC performance of SIMPLEKT and all baselines. “-” indicates the method is inapplicable for that dataset." + ], + "table_footnote": [], + "table_body": "
ModelD1: Datasets containing info. of both questions and KCsD2: Datasets containing info. of either questions or KCsSIMPLEKT
AS2009AL2005BD2006NIPS34Statics2011AS2015POJ#win/#tie/#loss
DKT0.7541±0.0011*0.8149±0.0011*0.8015±0.0008*0.7689±0.0002*0.8222±0.0013●0.7271±0.0005●0.6089±0.0009*5/0/2
DKT+0.7547±0.0017*0.8156±0.0011*0.8020±0.0004*0.7696±0.0002*0.8279±0.0004●0.7285±0.0006●0.6173±0.0007*5/0/2
DKT-F-0.8147±0.0013*0.7985±0.0013*0.7733±0.0003*0.7839±0.0061*-0.6030±0.0023*5/0/0
KQN0.7477±0.0011*0.8027±0.0015*0.7936±0.0014*0.7684±0.0003*0.8232±0.0007●0.7254±0.0004●0.6080±0.0015*5/0/2
LPKT0.7814±0.0022●0.8274±0.0014●0.8055±0.0006*0.8035±0.0003○---1/1/2
IEKT0.7861±0.0027●0.8416±0.0014●0.8125±0.0009*0.8045±0.0002●---1/0/3
DKVMN0.7473±0.0006*0.8054±0.0011*0.7983±0.0009*0.7673±0.0004*0.8093±0.0017*0.7227±0.0004*0.6056±0.0022*7/0/0
ATKT0.7470±0.0008*0.7995±0.0023*0.7889±0.0008*0.7665±0.0001*0.8055±0.0020*0.7245±0.0007*0.6075±0.0012*7/0/0
GKT0.7424±0.0021*0.8110±0.0009*0.8046±0.0008*0.7689±0.0024*0.8040±0.0065○0.7258±0.0012●0.6070±0.0036*5/1/1
SAKT0.7246±0.0017*0.7880±0.0063*0.7740±0.0008*0.7517±0.0005*0.7965±0.0014*0.7114±0.0003*0.6095±0.0013*7/0/0
SAINT0.6958±0.0023○0.7775±0.0017*0.7781±0.0013*0.7873±0.0007*0.7599±0.0139*0.7026±0.0011*0.5563±0.0012*6/1/0
AKT0.7853±0.0017●0.8306±0.0019●0.8208±0.0007●0.8033±0.0003*0.8309±0.0009●0.7281±0.0004●0.6281±0.0013●1/0/6
simpleKT0.7744±0.00180.8254±0.00030.8160±0.00060.8035±0.00000.8199±0.00110.7248±0.00050.6252±0.0005-
", + "bbox": [ + 164, + 318, + 840, + 445 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5b12cfb50117dce6dc0b2cb5255b893af2fcf45a64850104619d781d4c6bd5c3.jpg", + "table_caption": [ + "Table 3: Overall Accuracy performance of SIMPLEKT and all baselines. “-” indicates the method is inapplicable for that dataset." + ], + "table_footnote": [], + "table_body": "
ModelD1: Datasets containing info. of both questions and KCsD2: Datasets containing info. of either questions or KCsSIMPLEKT
AS2009AL2005BD2006NIPS34Statics2011AS2015POJ#win/#tie/#loss
DKT0.7244±0.0014*0.8097±0.0005●0.8553±0.0002*0.7032±0.0004*0.7969±0.0006●0.7503±0.0003*0.6328±0.0020*5/0/2
DKT+0.7248±0.0009*0.8097±0.0007●0.8553±0.0003*0.7039±0.0004*0.7977±0.0006●0.7510±0.0004●0.6482±0.0021*4/0/3
DKT-F-0.8090±0.0005●0.8536±0.0004*0.7076±0.0002*0.7872±0.0011*-0.6371±0.0030*4/0/1
KQN0.7228±0.0009*0.8025±0.0006*0.8532±0.0006*0.7028±0.0001*0.7978±0.0007●0.7500±0.0003*0.6435±0.0017*6/0/1
LPKT0.7355±0.0015●0.8145±0.0007●0.8544±0.0008*0.7341±0.0003●---1/0/3
IEKT0.7375±0.0042●0.8236±0.0010●0.8553±0.0023*0.7330±0.0002●---1/0/3
DKVMN0.7199±0.0010*0.8027±0.0007*0.8545±0.0002*0.7016±0.0005*0.7929±0.0006*0.7508±0.0006○0.6393±0.0015*6/1/0
ATKT0.7208±0.0009*0.7998±0.0019*0.8511±0.0004*0.7013±0.0002*0.7904±0.0011*0.7494±0.0002*0.6332±0.0023*7/0/0
GKT0.7153±0.0032*0.8088±0.0008●0.8555±0.0002*0.7014±0.0028*0.7902±0.0021○0.7504±0.0010*0.6117±0.0147*5/1/1
SAKT0.7063±0.0018*0.7954±0.0020*0.8461±0.0005*0.6879±0.0004*0.7879±0.0015*0.7474±0.0002*0.6407±0.0035*7/0/0
SAINT0.6936±0.0034○0.7791±0.0016*0.8411±0.0065*0.7180±0.0006*0.7682±0.0056*0.7438±0.0010*0.6476±0.0003*6/1/0
AKT0.7392±0.0021●0.8124±0.0011●0.8587±0.0005●0.7323±0.0005*0.8021±0.0011●0.7521±0.0005●0.6492±0.0010*2/0/5
simpleKT0.7320±0.00120.8083±0.00050.8579±0.00030.7328±0.00010.7957±0.00200.7508±0.00040.6522±0.0008-
", + "bbox": [ + 164, + 500, + 840, + 627 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "From Table 2 and Table 3 we find the following results: (1) compared to other baseline methods, SIMPLEKT almost always ranks top 3 in terms of AUC scores and achieves 55 wins, 3 ties and 18 loss in total against 12 baselines on 7 public datasets of different domains. This indicates the strength of SIMPLEKT as a baseline of KT; (2) in general, the SIMPLEKT approach performs better on D1 datasets that have both question and KC information available. When training SIMPLEKT on D2 datasets, due to the lack of distinguished information about questions and KCs, the explicit question-centric difficulty modeling degrades and the KC representations become the question agnostic; (3) when comparing SIMPLEKT to other attentive models, i.e., SAKT, SAINT and AKT, in C5 category, our SIMPLEKT beats SAKT and SAINT on all the datasets, which indicates the effectiveness of explicit question-centric difficulty modeling. Although our SIMPLEKT approach is significantly worse than the AKT approach on 6 datasets, the performance gaps are quite minimal that are mostly within a $0.5\\%$ range. On the other hand, SIMPLEKT is much more concise compared to the two-layer attentive architecture in the AKT approach; (4) the SIMPLEKT outperforms many deep sequential models in category C1, including DKT, DKT+, DKT-F, KQN on AS2009, AL2005, BD2006, NIPS34 and POJ. We believe this is because the above 4 sequential models use KCs to index questions cannot capture the individual differences among questions with the same KCs which is crucial to predict student future performance; (5) comparing SIMPLEKT and IEKT, we can see, IEKT has better prediction performance on AS2009, AL2005, and NIPS34. This is because IEKT captures both question-level and KC-level variations in its representations and at the same time, it designs two specific neural modules to estimate individual cognition and acquisition", + "bbox": [ + 159, + 637, + 835, + 900 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 161, + 60, + 478, + 74 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 492, + 921, + 505, + 932 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "abilities; (6) compare to other different types of models in C2, C3, and C4, such as DKVMN, ATKT and GKT, our SIMPLEKT achieves better performance by using an ordinary dot-product attention function without any memory mechanism, adversarial learning or graph constructions, which encourages the further educational researchers and practitioners to develop effective models with a design of simplicity.", + "bbox": [ + 159, + 126, + 835, + 193 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5da15cc66032cae869d7934d078a7d23dc2f0c0f7d4cc795e8e905e2ca257f08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 195, + 633, + 206 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6b2208df3267500fc3e5973a1a2b1dcf2a38532118f4cc5ff02b4dfbce21c6f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 188, + 212, + 339, + 324 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b68518f716e87e7fbfb720c3fcba4f9043488e31019b5f2afa5ea6a6fbcfddd4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 342, + 212, + 494, + 325 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/211d7d9a09a33745622203c32e29b12da75927a5eadaa1d9a3a6143ed7851429.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 212, + 647, + 324 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/96d6686cd7e9357cb8fb87916c0055d02dc872631809326cc228a9635bb7c2be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 650, + 212, + 801, + 325 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/51c839870d094e7074c119200452a9c660c71531b758646b3dbc292c4309a6fc.jpg", + "image_caption": [ + "Figure 3: Non-accumulative predictions in the multi-step ahead scenario in terms of AUC." + ], + "image_footnote": [], + "bbox": [ + 188, + 326, + 339, + 438 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/754abc42972dead1fc150efb27449675463ffca216b88a8a574c51f9c773e7a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 342, + 326, + 494, + 438 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ca78c04a5d4e62ecc608792889df918871f71eae75af195ab14d5ff9963ca254.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 326, + 647, + 438 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2b271f8bd2e9b455fe37dfca479c39667a2896d5366fe8bf4d28c9612b73f609.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 461, + 633, + 473 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/de36abe1d2424c182e4e7e7b6d6ac1f66e2cec151596bda8844d3f980b43a4c3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 188, + 478, + 339, + 590 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6dc351890a459061f69ea2659801f9455e62acabe39e95c8af27685c52f5d1b7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 342, + 478, + 494, + 590 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2241242544f2055d3de8c879108f8dd2a826ec6ddb0fe03bca0e2d67d23326ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 478, + 647, + 590 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a814d72df135b1d00adecb82f0186c73578c970cdf69cff357102df852f80c0a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 650, + 478, + 803, + 590 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e34035c6630b35dce8246694fdb552e7f2d2001d6cdd6573828f7e28ee38169d.jpg", + "image_caption": [ + "Figure 4: Non-accumulative predictions in the multi-step ahead scenario in terms of Accuracy." + ], + "image_footnote": [], + "bbox": [ + 186, + 594, + 339, + 703 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3958a12c89452969a13555448bbc36e31aa1a23860999cb10c49d082461882b9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 342, + 594, + 494, + 703 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c6dc0106a816219728505976ac8c86c6e14a855fcbc6c241430245bb2aa3dff2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 495, + 594, + 647, + 703 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Multi-step KT Prediction Performance. In order to make the prediction close to real application scenarios, we also predict our model in multi-step prediction which predicts a span of student's responses given the student's historical interaction sequence. Practically, accurate multi-step KT prediction will provide constructive feedback to learning path selection and construction and help teachers adaptively adjust future teaching materials. We conduct the prediction in a non-accumulative setting that predicts all future values all at once to avoid accumulative prediction errors. To have a fine-grained analysis in the multi-step ahead prediction scenario, we further experiment with DLKT models on different portions of observed student interactions. Specifically, we vary the observed percentages of student interaction length from $20\\%$ to $90\\%$ with step size of $10\\%$ . Due to the space limit, we select the best baseline in each category, i.e., IEKT, DKVMN, ATKT, GKT, AKT as the representative approaches and the results in terms of AUC and accuracy are shown in Figure 3 and Figure 4. We make the following observations: (1) with the increasing historical information, the student AUC performance prediction become more accurate in most cases, which is in line with the", + "bbox": [ + 159, + 728, + 835, + 898 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 163, + 60, + 478, + 74 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 492, + 921, + 502, + 931 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "real-world educational scenario; (2) the attention based model almost outperforms other KT models in Statics2011, AS2005 and POJ according to AUC scores, this is because the attention mechanism is expert in capturing the long-term dependencies; and (3) our SIMPLEKT achieves the best prediction performance in BD2006, NIPS34 in terms of AUC, which indicates the proposed method is simple yet powerful.", + "bbox": [ + 159, + 126, + 833, + 193 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Qualitative Visual Analysis. In this section, we qualitatively show the visualization of the prediction results made by SIMPLEKT in Figure 5 To better understand the model predictive behavior, we compute the historical error rate (HER) per question from the data and use the HERs as surrogates of question difficulties. Due to the space limit, more illustrative and fine-grained results are provided in Appendix A.2 As we can see from Figure 5 when a student meets a certain KC for the first time, the higher HER of the question, the lower the probability that student will get it correct. For example, the HERs for questions q527, q509, q512, q518 and q219 are 0.35, 0.41, 0.20, 0.51, and 0.48 and the corresponding prediction probabilities of SIMPLEKT are 0.74, 0.52, 0.84, 0.45, and 0.51 respectively. Furthermore, for those questions that cover the same set of KCs, such as questions q526 and q529, the model prediction probability decreases when the corresponding HER increases.", + "bbox": [ + 159, + 199, + 835, + 331 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/adbc7a91fca12fc0189b85901c7b7945277d8527d4e95bba95e58cc8f022e9be.jpg", + "image_caption": [ + "Figure 5: Visualization of a student's prediction results on SIMPLEKT." + ], + "image_footnote": [], + "bbox": [ + 166, + 344, + 835, + 448 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ablation Study. We systematically examine the effect of the key component of question difficulty modeling by constructing two model variants: (1) SIMPLEKT-ScalarDiff that changes the question-centric difficulty vector $\\mathbf{m}_{q_t}$ to scalar; and (2) SIMPLEKT-NoDiff that completely ignores the question difficulty modeling and simply set $\\mathbf{x}_t$ to $\\mathbf{z}_{c_t}$ . The prediction performance on all datasets that belong to D1 are reported in Table 4. Please note that since datasets in D2 only have either question information or KC information, SIMPLEKT, SIMPLEKT-ScalarDiff, and SIMPLEKT-NoDiff essentially become mathematically unidentifiable. From Table 4 we can easily observe that (1) the SIMPLEKT method outperforms the two model variants and especially when removing the question difficulty modeling component, the prediction performance decreases more than $2\\%$ on all D1 datasets. This empirically verifies the importance of question-centric difficulty modeling when making student performance prediction in KT scenarios; and (2) comparing SIMPLEKT and SIMPLEKT-ScalarDiff, the performance is very minimal. We believe this is because the KC representation $\\mathbf{x}_t$ is based on the simple additive assumption and a scalar is expressive enough to represent question-level difficulty under this assumption.", + "bbox": [ + 159, + 483, + 835, + 668 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/125a7dfab8f8726f4b09c337d927d97c7648cf8012f9cf1cca41e3d4ec72c95c.jpg", + "table_caption": [ + "Table 4: The performance of different variants in SIMPLEKT." + ], + "table_footnote": [], + "table_body": "
AS2009AL2005BD2006NIPS34
SIMPLEKT0.7744±0.00180.8254±0.00030.8160±0.00060.8035±0.0000
SIMPLEKT-ScalarDiff0.7740±0.00210.8250±0.00130.8159±0.00110.8008±0.0012
SIMPLEKT-NoDiff0.7411±0.00160.8048±0.00180.7922±0.00110.7646±0.0005
", + "bbox": [ + 196, + 701, + 801, + 752 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 161, + 777, + 315, + 791 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we propose SIMPLEKT, a simple but tough-to-beat approach to solve KT task effectively. Motivated by the Rasch model in psychometrics, the SIMPLEKT approach is designed to capture individual differences among questions with the same KCs. Furthermore, the proposed SIMPLEKT approach simplifies the sophisticated student knowledge state estimation component with the ordinary dot-product attention function. Comprehensive experimental results demonstrate that SIMPLEKT is able to beat a wide range of recently proposed DLKT models on various datasets from different domains. We believe this work serves as a strong baseline for future KT research.", + "bbox": [ + 159, + 806, + 833, + 898 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 161, + 60, + 478, + 74 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 492, + 921, + 505, + 932 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REPRODUCIBILITY STATEMENT", + "text_level": 1, + "bbox": [ + 163, + 126, + 436, + 141 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The code of SIMPLEKT and its variants, i.e., SIMPLEKT-ScalarDiff and SIMPLEKT-NoDiff, to reproduce the experimental results can be found at https://github.com/pykt-team/ pykt-toolkit. We give the details of data-preprocessing and the training hyper-parameters of SIMPLEKT in Section 4.1 and Section 4.3. The code of the 12 comparison models is accessible from an open-sourced PYKT python library at https://pykt.org/. We choose to use the same data partitions of train, validation, test sets as PYKT and hence all the results can be easily reproducible. All the model training details of all baselines can be found at https://pykt-toolkit.readthedocs.io/en/latest/pykt.models.html", + "bbox": [ + 159, + 154, + 836, + 262 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 163, + 279, + 354, + 293 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This work was supported in part by National Key R&D Program of China, under Grant No. 2020AAA0104500; in part by Beijing Nova Program (Z201100006820068) from Beijing Municipal Science & Technology Commission; in part by NFSC under Grant No. 61877029 and in part by Key Laboratory of Smart Education of Guangdong Higher Education Institutes, Jinan University (2022LSYS003).", + "bbox": [ + 161, + 307, + 833, + 375 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 163, + 393, + 282, + 407 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ghodai Abdelrahman and Qing Wang. Knowledge tracing with sequential key-value memory networks. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 175-184, 2019.", + "Hao Cen, Kenneth Koedinger, and Brian Junker. Learning factors analysis-a general method for cognitive model evaluation and improvement. In International Conference on Intelligent Tutoring Systems, pp. 164-175. Springer, 2006.", + "Jiahao Chen, Zitao Liu, Shuyan Huang, Qiongqiong Liu, and Weiqi Luo. Improving interpretability of deep sequential knowledge tracing models with question-centric cognitive representations. In Proceedings of the AAAI Conference on Artificial Intelligence, 2023.", + "Penghe Chen, Yu Lu, Vincent W Zheng, and Yang Pian. Prerequisite-driven deep knowledge tracing. In 2018 IEEE International Conference on Data Mining, pp. 39-48. IEEE, 2018.", + "Benoit Choffin, Fabrice Popineau, Yolaine Bourda, and Jill-Jenn Vie. DAS3H: Modeling student learning and forgetting for optimally scheduling distributed practice of skills. In Proceedings of The 12th International Conference on Educational Data Mining (EDM 2019), volume 29, pp. 38.", + "Youngduck Choi, Youngnam Lee, Junghyun Cho, Jineon Baek, Byungsoo Kim, Yeongmin Cha, Dongmin Shin, Chan Bae, and Jaewe Heo. Towards an appropriate query, key, and value computation for knowledge tracing. In Proceedings of the Seventh ACM Conference on Learning@ Scale, pp. 341-344, 2020.", + "Albert T Corbett and John R Anderson. Knowledge tracing: Modeling the acquisition of procedural knowledge. User Modeling and User-adapted Interaction, 4(4):253-278, 1994.", + "Mingyu Feng, Neil Heffernan, and Kenneth Koedinger. Addressing the assessment challenge with an online system that tutors as it assesses. User Modeling and User-adapted Interaction, 19(3): 243-266, 2009.", + "Aritra Ghosh, Neil Heffernan, and Andrew S Lan. Context-aware attentive knowledge tracing. In ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 2020.", + "Xiaopeng Guo, Zhijie Huang, Jie Gao, Mingyu Shang, Maojing Shu, and Jun Sun. Enhancing knowledge tracing via adversarial training. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 367-375, 2021.", + "Tanja Käser, Severin Klingler, Alexander G Schwing, and Markus Gross. Dynamic bayesian networks for student modeling. IEEE Transactions on Learning Technologies, 10(4):450-462, 2017." + ], + "bbox": [ + 163, + 414, + 836, + 900 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 163, + 60, + 478, + 74 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 921, + 510, + 932 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.", + "Elise Lavoué, Baptiste Monterrat, Michel Desmarais, and Sébastien George. Adaptive gamification for learning environments. IEEE Transactions on Learning Technologies, 12(1):16-28, 2018.", + "Jinseok Lee and Dit-Yan Yeung. Knowledge query network for knowledge tracing: How knowledge interacts with skills. In Proceedings of the 9th International Conference on Learning Analytics & Knowledge, pp. 491-500, 2019.", + "Qi Liu, Zhenya Huang, Yu Yin, Enhong Chen, Hui Xiong, Yu Su, and Guoping Hu. EKT: Exercise-aware knowledge tracing for student performance prediction. IEEE Transactions on Knowledge and Data Engineering, 33(1):100-115, 2019.", + "Yunfei Liu, Yang Yang, Xianyu Chen, Jian Shen, Haifeng Zhang, and Yong Yu. Improving knowledge tracing via pre-training question embeddings. In Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence, pp. 1556-1562, 2021.", + "Zitao Liu, Qiongqiong Liu, Jiahao Chen, Shuyan Huang, Jiliang Tang, and Weiqi Luo. pyKT: A python library to benchmark deep learning based knowledge tracing models. In 36th Conference on Neural Information Processing Systems (NeurIPS 2022) Track on Datasets and Benchmarks., 2022.", + "Ting Long, Yunfei Liu, Jian Shen, Weinan Zhang, and Yong Yu. Tracing knowledge state with individual cognition and acquisition estimation. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 173-182, 2021.", + "Ting Long, Jiarui Qin, Jian Shen, Weinan Zhang, Wei Xia, Ruiming Tang, Xiuqiang He, and Yong Yu. Improving knowledge tracing with collaborative information. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining, pp. 599-607, 2022.", + "Sein Minn, Yi Yu, Michel C Desmarais, Feida Zhu, and Jill-Jenn Vie. Deep knowledge tracing and dynamic student classification for knowledge tracing. In 2018 IEEE International Conference on Data Mining, pp. 1182-1187. IEEE, 2018.", + "Koki Nagatani, Qian Zhang, Masahiro Sato, Yan-Ying Chen, Francine Chen, and Tomoko Ohkuma. Augmenting knowledge tracing by considering forgetting behavior. In The World Wide Web Conference, pp. 3101-3107, 2019.", + "Hiromi Nakagawa, Yusuke Iwasawa, and Yutaka Matsuo. Graph-based knowledge tracing: modeling student proficiency using graph neural network. In 2019 IEEE/WIC/ACM International Conference on Web Intelligence, pp. 156-163. IEEE, 2019.", + "Shalini Pandey and George Karypis. A self-attentive model for knowledge tracing. In 12th International Conference on Educational Data Mining, pp. 384–389. International Educational Data Mining Society, 2019.", + "Shalini Pandey and Jaideep Srivastava. RKT: relation-aware self-attention for knowledge tracing. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, pp. 1205–1214, 2020.", + "Chris Piech, Jonathan Bassen, Jonathan Huang, Surya Ganguli, Mehran Sahami, Leonidas J Guibas, and Jascha Sohl-Dickstein. Deep knowledge tracing. Advances in Neural Information Processing Systems, 28, 2015.", + "Shi Pu, Michael Yudelson, Lu Ou, and Yuchi Huang. Deep knowledge tracing with transformers. In International Conference on Artificial Intelligence in Education, pp. 252-256. Springer, 2020.", + "Sami Sarsa, Juho Leinonen, Arto Hellas, et al. Empirical evaluation of deep learning models for knowledge tracing: Of hyperparameters and metrics on performance and replicability. Journal of Educational Data Mining, 14(2), 2022." + ], + "bbox": [ + 163, + 126, + 833, + 898 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 163, + 60, + 478, + 74 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 921, + 507, + 932 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shuanghong Shen, Qi Liu, Enhong Chen, Han Wu, Zhenya Huang, Weihao Zhao, Yu Su, Haiping Ma, and Shijin Wang. Convolutional knowledge tracing: Modeling individualization in student learning process. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 1857-1860, 2020.", + "Shuanghong Shen, Qi Liu, Enhong Chen, Zhenya Huang, Wei Huang, Yu Yin, Yu Su, and Shijin Wang. Learning process-consistent knowledge tracing. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining, pp. 1452-1460, 2021.", + "Shuanghong Shen, Zhenya Huang, Qi Liu, Yu Su, Shijin Wang, and Enhong Chen. Assessing student's dynamic knowledge state by exploring the question difficulty effect. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 427-437, 2022.", + "J Stamper, A Niculescu-Mizil, S Ritter, G Gordon, and K Koedinger. Algebra I 2005-2006 and Bridge to Algebra 2006-2007. Development data sets from KDD Cup 2010 Educational Data Mining Challenge, 2010.", + "Paul Steif and Norman Bier. Oli engineering statics-fall 2011, 2014.", + "Nguyen Thai-Nghe, Lucas Drumond, Tomás Horváth, Artus Krohn-Grimberghe, Alexandros Nanopoulos, and Lars Schmidt-Thieme. Factorization techniques for predicting student performance. In Educational Recommender Systems and Technologies: Practices and Challenges, pp. 129-153. IGI Global, 2012.", + "Hanshuang Tong, Zhen Wang, Qi Liu, Yun Zhou, and Wenyuan Han. HGKT: Introducing hierarchical exercise graph for knowledge tracing. arXiv preprint arXiv:2006.16915, 2020.", + "Chenyang Wang, Weizhi Ma, Min Zhang, Chuancheng Lv, Fengyuan Wan, Huijie Lin, Taoran Tang, Yiqun Liu, and Shaoping Ma. Temporal cross-effects in knowledge tracing. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining, pp. 517-525, 2021.", + "Fei Wang, Qi Liu, Enhong Chen, Zhenya Huang, Yuying Chen, Yu Yin, Zai Huang, and Shijin Wang. Neural cognitive diagnosis for intelligent education systems. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pp. 6153-6161, 2020a.", + "Zhiwei Wang, Xiaoqin Feng, Jiliang Tang, Gale Yan Huang, and Zitao Liu. Deep knowledge tracing with side information. In Artificial Intelligence in Education: 20th International Conference, AIED 2019, Chicago, IL, USA, June 25-29, 2019, Proceedings, Part II 20, pp. 303-308. Springer, 2019.", + "Zichao Wang, Angus Lamb, Evgeny Saveliev, Pashmina Cameron, Yordan Zaykov, Jose Miguel Hernández-Lobato, Richard E Turner, Richard G Baraniuk, Craig Barton, Simon Peyton Jones, et al. Instructions and guide for diagnostic questions: The neurips 2020 education challenge. ArXiv preprint, abs/2007.12061, 2020b. URL https://arxiv.org/abs/2007.12061", + "Yang Yang, Jian Shen, Yanru Qu, Yunfei Liu, Kerong Wang, Yaoming Zhu, Weinan Zhang, and Yong Yu. GIKT: a graph-based interaction model for knowledge tracing. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pp. 299-315. Springer, 2020.", + "Chun-Kit Yeung. Deep-IRT: Make deep learning based knowledge tracing explainable using item response theory. In Proceedings of The 12th International Conference on Educational Data Mining (EDM 2019), pp. 683-686, 2019.", + "Chun-Kit Yeung and Dit-Yan Yeung. Addressing two problems in deep knowledge tracing via prediction-consistent regularization. In Proceedings of the Fifth Annual ACM Conference on Learning at Scale, pp. 1-10, 2018.", + "Jiani Zhang, Xingjian Shi, Irwin King, and Dit Yan Yeung. Dynamic key-value memory networks for knowledge tracing. In Proceedings of the 26th International Conference on World Wide Web, pp. 765, 2017.", + "Moyu Zhang, Xinning Zhu, Chunhong Zhang, Yang Ji, Feng Pan, and Changchuan Yin. Multi-Factors Aware Dual-Attentional Knowledge Tracing. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management, pp. 2588-2597, 2021." + ], + "bbox": [ + 161, + 126, + 835, + 898 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 163, + 60, + 478, + 73 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 921, + 507, + 932 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_model.json b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5c07eff833f1399246c027d17ab1321129075fed --- /dev/null +++ b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_model.json @@ -0,0 +1,2281 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.163, + 0.06, + 0.481, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.123, + 0.833, + 0.166 + ], + "angle": 0, + "content": "SIMPLEKT: A SIMPLE BUT TOUGH-TO-BEAT BASELINE FOR KNOWLEDGE TRACING" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.19, + 0.251, + 0.202 + ], + "angle": 0, + "content": "Zitao Liu" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.203, + 0.701, + 0.217 + ], + "angle": 0, + "content": "Guangdong Institute of Smart Education, Jinan University, Guangzhou, China" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.218, + 0.369, + 0.23 + ], + "angle": 0, + "content": "liuzitao@jnu.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.25, + 0.512, + 0.264 + ], + "angle": 0, + "content": "Qiongqiong Liu, Jiahao Chen, Shuyan Huang*" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.264, + 0.435, + 0.276 + ], + "angle": 0, + "content": "TAL Education Group, Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.277, + 0.667, + 0.29 + ], + "angle": 0, + "content": "{liuqiongqiongl, chenjiahao, huangshuyan}@tal.com" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.309, + 0.254, + 0.323 + ], + "angle": 0, + "content": "Weiqi Luo" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.323, + 0.701, + 0.336 + ], + "angle": 0, + "content": "Guangdong Institute of Smart Education, Jinan University, Guangzhou, China" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.337, + 0.319, + 0.349 + ], + "angle": 0, + "content": "lwq@jnu.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.45, + 0.384, + 0.548, + 0.397 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.412, + 0.776, + 0.715 + ], + "angle": 0, + "content": "Knowledge tracing (KT) is the problem of predicting students' future performance based on their historical interactions with intelligent tutoring systems. Recently, many works present lots of special methods for applying deep neural networks to KT from different perspectives like model architecture, adversarial augmentation and etc., which make the overall algorithm and system become more and more complex. Furthermore, due to the lack of standardized evaluation protocol (Liu et al. 2022), there is no widely agreed KT baselines and published experimental comparisons become inconsistent and self-contradictory, i.e., the reported AUC scores of DKT on ASSISTments2009 range from 0.721 to 0.821 (Minn et al. 2018 Yeung & Yeung 2018). Therefore, in this paper, we provide a strong but simple baseline method to deal with the KT task named SIMPLEKT. Inspired by the Rasch model in psychometrics, we explicitly model question-specific variations to capture the individual differences among questions covering the same set of knowledge components that are a generalization of terms of concepts or skills needed for learners to accomplish steps in a task or a problem. Furthermore, instead of using sophisticated representations to capture student forgetting behaviors, we use the ordinary dot-product attention function to extract the time-aware information embedded in the student learning interactions. Extensive experiments show that such a simple baseline is able to always rank top 3 in terms of AUC scores and achieve 57 wins, 3 ties and 16 loss against 12 DLKT baseline methods on 7 public datasets of different domains. We believe this work serves as a strong baseline for future KT research. Code is available at https://github.com/pykt-team/pykt-toolkit!" + }, + { + "type": "title", + "bbox": [ + 0.165, + 0.738, + 0.334, + 0.751 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.767, + 0.834, + 0.834 + ], + "angle": 0, + "content": "Knowledge tracing (KT) is a sequential prediction task that aims to predict the outcomes of students over questions by modeling their mastery of knowledge, i.e., knowledge states, as they interact with learning platforms such as massive open online courses and intelligent tutoring systems, as shown in Figure ①. Solving the KT problems may help teachers better detect students that need further attention, or recommend personalized learning materials to students." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.839, + 0.835, + 0.866 + ], + "angle": 0, + "content": "The KT related research has been studied since 1990s where Corbett and Anderson, to the best of our knowledge, were the first to estimate students' current knowledge with regard to each individ-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.184, + 0.873, + 0.453, + 0.887 + ], + "angle": 0, + "content": "*The corresponding author: Shuyan Huang." + }, + { + "type": "page_footnote", + "bbox": [ + 0.184, + 0.887, + 0.654, + 0.901 + ], + "angle": 0, + "content": "1We merged our model to the PYKT benchmark at https://pykt.org/" + }, + { + "type": "list", + "bbox": [ + 0.184, + 0.873, + 0.654, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.505, + 0.933 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.164, + 0.061, + 0.48, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.127, + 0.837, + 0.288 + ], + "angle": 0, + "content": "ual knowledge component (KC) (Corbett & Anderson 1994). A KC is a description of a mental structure or process that a learner uses, alone or in combination with other KCs, to accomplish steps in a task or a problem? Since then, many attempts have been made to solve the KT problem, such as probabilistic graphical models (Kaiser et al. 2017) and factor analysis based models (Cen et al. 2006; Lavoué et al. 2018; Thai-Nghe et al. 2012). Recently, with the rapid development of deep neural networks, many deep learning based knowledge tracing (DLKT) models are developed, such as auto-regressive based deep sequential KT models (Piech et al. 2015; Yeung & Yeung 2018; Chen et al. 2018; Wang et al. 2019; Guo et al. 2021; Long et al. 2021; Chen et al. 2023), memory-augmented KT models (Zhang et al. 2017; Abdelrahman & Wang 2019; Yeung 2019), attention based KT models (Pandey & Karypis 2019; Pandey & Srivastava 2020; Choi et al. 2020; Ghosh et al. 2020; Pu et al. 2020), and graph based KT models (Nakagawa et al. 2019; Yang et al. 2020; Tong et al. 2020)." + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.299, + 0.841, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.16, + 0.384, + 0.836, + 0.411 + ], + "angle": 0, + "content": "Figure 1: Graphical illustration of the task of KnowledgeTracing. “√” and “×” denote the question is answered correctly and incorrectly." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.411, + 0.836, + 0.687 + ], + "angle": 0, + "content": "Although DLKT approaches have constituted new paradigms of the KT problem and achieved promising results, recently developed DLKT models seem to be more and more complex and resemble each other with very limited nuances from the methodological perspective: applying different neural components to capture student forgetting behaviors\\(^{3}\\) (Ghosh et al. 2020; Nagatani et al. 2019), recency effects (Zhang et al. 2021), and various auxiliary information including relations between questions and KCs (Tong et al. 2020; Pandey & Srivastava 2020; Liu et al. 2021; Yang et al. 2020), question text content (Liu et al. 2019; Wang et al. 2020a), question difficulty level (Liu et al. 2021; Shen et al. 2022), and students' learning ability (Shen et al. 2020). Furthermore, published DLKT baseline results surprisingly diverge. For example, the reported AUC scores of DKT and AKT on ASSISTments2009 range from 0.721 to 0.821 (Minn et al. 2018; Yeung & Yeung 2018) and from 0.747 to 0.835 in (Ghosh et al. 2020; Wang et al. 2021) respectively. Another example is that for the performance of DKT on the ASSISTments2009 dataset, it is recognized as one of the best baselines by Ghosh et al. (2020) while Long et al. (2022) and Zhang et al. (2021) showed that its performance is below the average. Recent survey studies by Sarsa et al. (2022) and Liu et al. (2022) summarized aforementioned inconsistencies of baseline results and showed evidence that variations in hyper-parameters and data pre-processing procedures contribute significantly to prediction performance of DLKT models. Specifically, Sarsa et al. (2022) empirically found that even simple baselines with little predictive value may outperform DLKT models with sophisticated neural components. Liu et al. (2022) built a standardized DLKT benchmark platform and showed that the improvement of many DLKT approaches is minimal compared to the very first DLKT model proposed by Piech et al. (2015)." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.691, + 0.837, + 0.85 + ], + "angle": 0, + "content": "Therefore, in this paper, we propose SIMPLEKT, a simple but tough-to-beat KT baseline that is simple to implement, computationally friendly and robust to a wide range of KT datasets across different domains. Motivated by the Rasch model that is a classic yet powerful model in psychometrics, the proposed SIMPLEKT approach captures the individual differences among questions covering the same set of KCs by representing each question's embedding as an additive combination of the average of its corresponding KCs' embeddings and a question-specific variation. Furthermore, different from many existing models that try to capture various aforementioned relations and information, the SIMPLEKT is purely based on the attention mechanism and uses the ordinary dot-product attention function to capture the contextual information embedded in the student learning interactions. To comprehensively and systematically evaluate the performance of SIMPLEKT, we choose to use the publicly available PYKT benchmark implementation to guarantee valid and reproducible comparisons against 12 DLKT methods on 7 popular datasets across differ-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.183, + 0.86, + 0.678, + 0.873 + ], + "angle": 0, + "content": "\\(^{2}\\)A KC is a generalization of everyday terms like concept, principle, fact, or skill." + }, + { + "type": "page_footnote", + "bbox": [ + 0.185, + 0.874, + 0.629, + 0.886 + ], + "angle": 0, + "content": "3The Rasch model is also known as the 1PL item response theory model." + }, + { + "type": "page_footnote", + "bbox": [ + 0.187, + 0.887, + 0.327, + 0.9 + ], + "angle": 0, + "content": "4https://www.pykt.org/" + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.86, + 0.678, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.505, + 0.933 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.163, + 0.061, + 0.481, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.127, + 0.836, + 0.208 + ], + "angle": 0, + "content": "ent domains. Results shown that the SIMPLEKT beats a wide range of modern neural KT models that based on graph neural networks, memory augmented neural networks, and adversarial neural networks. This suggests that this simple method should be used as the baseline to beat future KT research, especially when designing sophisticated neural KT architectures. To encourage reproducible research, all the related codes, data and the learned SIMPLEKT models are publicly available at https://github.com/pykt-team/pykt-toolkit" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.225, + 0.344, + 0.24 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.254, + 0.835, + 0.295 + ], + "angle": 0, + "content": "Recently, deep learning technique have been widely applied into KT task for student's historical learning modeling and the future performance prediction. Existing DLKT approaches can be categorized into the following 5 categories:" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.3, + 0.836, + 0.366 + ], + "angle": 0, + "content": "C1: Deep sequential models. DLKT models that use auto-regressive architectures to capture students' chronologically ordered interactions. For example, (Piech et al., 2015) proposed the very first DKT model that utilizes an LSTM layer to estimate the knowledge mastery. (Lee & Yeung, 2019) proposed to enhance DKT with a skill encoder that combines student learning activities and KC representations." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.372, + 0.834, + 0.426 + ], + "angle": 0, + "content": "C2: Memory augmented models. DLKT models that capture latent relations between KCs and student knowledge states via memory networks. For instance, (Zhang et al., 2017) exploited and stored the KC relationships via a static key memory matrix and predict students' knowledge mastery levels with a dynamic value memory matrix." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.431, + 0.834, + 0.472 + ], + "angle": 0, + "content": "C3: Adversarial based models. DLKT models that utilize the adversarial techniques to generate perturbations to improve model generalization capability. jointly train an attentiveLSTM KT model with both original and adversarial examples." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.477, + 0.835, + 0.543 + ], + "angle": 0, + "content": "C4: Graph based models. DLKT models that use the graph neural networks to model intrinsic relations among questions, KCs and interactions. (Liu et al., 2021) presented a question-KC bipartite graph to explicitly capture question-level and KC-level inner-relations and question difficulties. (Yang et al., 2020) introduced a graph convolutional network to obtain the representation of the question-KC correlations." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.549, + 0.836, + 0.629 + ], + "angle": 0, + "content": "C5: Attention based models. DLKT models that capture dependencies between interactions via the attention mechanism. For example, (Pandey & Karypis, 2019) used self-attention network to capture the relevance between KCs and students' historical interactions. Choi et al., 2020 designed an encoder-decoder structure to represent the exercise and response embedding sequences. (Ghosh et al., 2020) performed three self-attention modules and explicitly model students' forgetting behaviors via a monotonic attention mechanism." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.635, + 0.835, + 0.702 + ], + "angle": 0, + "content": "Please note that the above categorizations are not exclusive and related techniques can be combined. For example, (Abdelrahman & Wang, 2019) proposed a sequential key-value memory network to unify the strengths of recurrent modeling capacity and memory capacity. The proposed SIMPLEKT approach belongs to C5 and it purely models student interactions by using the very ordinary dot-product attention function." + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.72, + 0.612, + 0.735 + ], + "angle": 0, + "content": "3 A SIMPLE METHOD FOR KNOWLEDGE TRACING" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.748, + 0.371, + 0.761 + ], + "angle": 0, + "content": "3.1 PROBLEM STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.772, + 0.836, + 0.868 + ], + "angle": 0, + "content": "In this work, our objective is given an arbitrary question \\( q_* \\) to predict the probability of whether a student will answer \\( q_* \\) correctly or not based on the student's historical interaction data. More specifically, for each student \\( S \\), we assume that we have observed a chronologically ordered collection of \\( T \\) past interactions i.e., \\( S = \\{s_j\\}_{j=1}^T \\). Each interaction is represented as a 4-tuple \\( s \\), i.e., \\( s = \\), where \\( q, \\{c\\}, r, s \\) represent the specific question, the associated KC set, the binary valued student response and student's response time step respectively. We would like to estimate the probability \\( \\hat{r}_* \\) of the student's performance on arbitrary question \\( q_* \\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.161, + 0.874, + 0.835, + 0.899 + ], + "angle": 0, + "content": "5Response is a binary valued indicator variable where 1 represents the student correctly answered the question, and 0 otherwise." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.505, + 0.933 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.163, + 0.061, + 0.48, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.128, + 0.405, + 0.14 + ], + "angle": 0, + "content": "3.2 THE SIMPLEKT APPROACH" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.152, + 0.636, + 0.166 + ], + "angle": 0, + "content": "3.2.1 REPRESENTATIONS OF QUESTIONS, KCs AND RESPONSES." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.175, + 0.836, + 0.283 + ], + "angle": 0, + "content": "Effectively representing student interactions is crucial to the success of the DLKT models. In real-world educational scenarios, the question bank is usually much bigger than the set of KCs. For example, the number of questions is more than 1500 times larger than the number of KCs in the Algebra2005 dataset (described in Section 4.1). Therefore, to effectively learn and fairly evaluate the DLKT models from such highly sparse question-response data, following the previous work of Ghosh et al., 2020 and Liu et al., 2022, we artificially transform the original question-response data into KC-response data by expanding each question-level interaction into multiple KC-level interactions when the question is associated with a set of KCs (illustrated in Figure 2)." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.283, + 0.532, + 0.492 + ], + "angle": 0, + "content": "Furthermore, due to the fact that questions covering the same set of KCs may have various difficulty levels, students perform significantly different. As shown in Figure 2 even through questions \\( q_{2} \\) and \\( q_{4} \\) have the same set of KCs, i.e., \\( c_{1} \\) and \\( c_{3} \\), students may get \\( q_{2} \\) wrong but \\( q_{4} \\) correct. Therefore, it is unrealistic to treat every KC in the expanded KC-response sequence identical. Inspired by the very classic and simple Rasch model in psychometrics that explicitly uses a scalar to characterize the latent factor of question difficulty, we choose to use a question-specific difficulty vector to capture the individual differences among questions on the same KC. More specifically, the \\( t \\)th representations of KC (i.e., \\( \\mathbf{x}_t \\)) and interaction (i.e., \\( \\mathbf{y}_t \\)) in the expanded KC sequence of concept \\( c_{k} \\) are represented as follows:" + }, + { + "type": "image", + "bbox": [ + 0.557, + 0.296, + 0.825, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.445, + 0.834, + 0.486 + ], + "angle": 0, + "content": "Figure 2: Graphical illustration of transforming the original question-response data into KC-response data." + }, + { + "type": "equation", + "bbox": [ + 0.223, + 0.498, + 0.772, + 0.515 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {t} = \\mathbf {z} _ {c _ {k}} \\oplus \\mathbf {m} _ {q _ {j}} \\odot \\mathbf {v} _ {c _ {k}}; \\quad \\mathbf {y} _ {t} = \\mathbf {z} _ {c _ {k}} \\oplus \\mathbf {r} _ {q _ {j}}; \\quad \\mathbf {z} _ {c _ {k}} = \\mathbf {W} _ {c} \\cdot \\mathbf {e} _ {c _ {k}}; \\quad \\mathbf {r} _ {q _ {j}} = \\mathbf {W} _ {q} \\cdot \\mathbf {e} _ {q _ {j}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.52, + 0.836, + 0.614 + ], + "angle": 0, + "content": "where \\(\\mathbf{z}_{c_k}\\) denotes the latent representation of the KC \\(c_{k}\\). \\(\\mathbf{m}_{q_j}\\) denotes the difficulty vector of question \\(q_{j}\\) and question \\(q_{j}\\) contains the KC \\(c_{k}\\). \\(\\mathbf{v}_{c_k}\\) represents the question-centric variation of \\(q_{j}\\) covering this KC \\(c_{k}\\). \\(\\mathbf{r}_{q_j}\\) denotes the representation of student response on \\(q_{j}\\). \\(\\mathbf{e}_{c_k}\\) is the \\(n\\)-dimensional one-hot vector indicating the corresponding KC and \\(\\mathbf{e}_{q_j}\\) is the 2-dimensional one-hot vector indicating whether the question is answered correctly. \\(\\mathbf{z}_{c_k}\\), \\(\\mathbf{m}_{q_j}\\), \\(\\mathbf{v}_{c_k}\\) and \\(\\mathbf{r}_{q_j}\\) are \\(d\\)-dimensional learnable real-valued vectors. \\(\\mathbf{W}_c\\in \\mathbb{R}^{d\\times n}\\) and \\(\\mathbf{W}_q\\in \\mathbb{R}^{d\\times 2}\\) are learnable linear transformation operations. \\(\\odot\\) and \\(\\oplus\\) are the element-wise product and addition operators. \\(n\\) is the total number of KCs." + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.627, + 0.633, + 0.64 + ], + "angle": 0, + "content": "3.2.2 PREDICTION WITH ORDINARY DOT-PRODUCT ATTENTION." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.65, + 0.835, + 0.703 + ], + "angle": 0, + "content": "Different from many existing DLKT approaches that use sophisticated neural components to model student learning and/or forgetting behaviors, we choose to use the ordinary dot-product attention function to explore and extract knowledge states from students' past learning history. Specifically, the retrieved knowledge state \\((\\mathbf{h}_{t + 1})\\) at the \\((t + 1)\\)th timestamp is computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.711, + 0.752, + 0.727 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {t + 1} = \\text {S e l f A t t e n i o n} (Q = \\mathbf {x} _ {t + 1}, K = \\left\\{\\mathbf {x} _ {1}, \\dots , \\mathbf {x} _ {t} \\right\\}, V = \\left\\{\\mathbf {y} _ {1}, \\dots , \\mathbf {y} _ {t} \\right\\}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.733, + 0.835, + 0.76 + ], + "angle": 0, + "content": "Then we use a two-layer fully connected network to refine the knowledge state and the overall optimization function is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.263, + 0.764, + 0.733, + 0.782 + ], + "angle": 0, + "content": "\\[\n\\eta_ {t + 1} = \\mathbf {w} ^ {\\top} \\cdot \\operatorname {R e L U} \\left(\\mathbf {W} _ {2} \\cdot \\operatorname {R e L U} \\left(\\mathbf {W} _ {1} \\cdot \\left[ \\mathbf {h} _ {t + 1}; \\mathbf {x} _ {t + 1} \\right] + \\mathbf {b} _ {1}\\right) + \\mathbf {b} _ {2}\\right) + b\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.285, + 0.784, + 0.638, + 0.805 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = - \\sum \\big (r _ {t} \\log \\sigma (\\eta_ {t}) + (1 - r _ {t}) \\log (1 - \\sigma (\\eta_ {t})) \\big)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.808, + 0.835, + 0.838 + ], + "angle": 0, + "content": "where \\(\\mathbf{W}_1, \\mathbf{W}_2, \\mathbf{w}, \\mathbf{b}_1, \\mathbf{b}_2\\) and \\(b\\) are trainable parameters and \\(\\mathbf{W}_1 \\in \\mathbb{R}^{d \\times 2d}\\), \\(\\mathbf{W}_2 \\in \\mathbb{R}^{d \\times d}\\), \\(\\mathbf{w}, \\mathbf{b}_1, \\mathbf{b}_2 \\in \\mathbb{R}^{d \\times 1}\\), \\(b\\) is scalar. \\(\\sigma(\\cdot)\\) is the sigmoid function." + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.849, + 0.547, + 0.863 + ], + "angle": 0, + "content": "3.2.3 RELATIONSHIP TO EXISTING DLKT MODELS." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.872, + 0.835, + 0.903 + ], + "angle": 0, + "content": "Although the proposed SIMPLEKT belongs to model category C5 discussed in Section 2, it is distinguished from attention based representative DLKT models such as AKT (Ghosh et al., 2020), SAKT" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.505, + 0.932 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.163, + 0.061, + 0.48, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.127, + 0.836, + 0.298 + ], + "angle": 0, + "content": "Pandey & Karypis (2019) and SAINT (Choi et al., 2020). The difference between SIMPLEKT and AKT are threefold: first, we omit the self-attentive question encoder and knowledge encoder in AKT and directly feed the representations of \\(\\mathbf{x}_t\\)s and \\(\\mathbf{y}_t\\)s into attention based knowledge state extractor; second, instead of using time decayed monotonic attention function to extract the initial knowledge state, we choose to use the ordinary dot-product function that is simple and free of hyper-parameters; third, interaction representations \\(\\mathbf{y}_t\\)s are simply computed by adding representations of KCs and responses while AKT uses extra parameters to explicitly model the effects of question difficulty in interaction representations. When comparing SIMPLEKT to SAKT and SAINT, we explicitly model the latent question-centric difficulty when learning the KC representations while SAKT and SAINT ignore the question-level difference and treat all questions are identical if they contain the same set of KCs. Furthermore, SAINT adopts the encoder-decoder architecture and utilizes Transformers to model the student interaction sequence while our SIMPLEKT only uses the dot-product attention function." + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.323, + 0.323, + 0.336 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.355, + 0.283, + 0.367 + ], + "angle": 0, + "content": "4.1 DATASETS" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.382, + 0.835, + 0.45 + ], + "angle": 0, + "content": "In this paper, we experiment with 7 widely used datasets to comprehensively evaluate the performance of our models. These 7 datasets can be divided into 2 categories: (1) D1: Datasets containing information of both questions and KCs; and (2) D2: Datasets containing information of either questions or KCs. Table1 gives real samples of question and KCs from both D1 and D2 categories. The detailed statistics of each dataset are listed in Appendix A.1" + }, + { + "type": "title", + "bbox": [ + 0.162, + 0.457, + 0.724, + 0.47 + ], + "angle": 0, + "content": "4.1.1 DATASETS CONTAINING INFORMATION OF BOTH QUESTIONS AND KCs" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.481, + 0.836, + 0.548 + ], + "angle": 0, + "content": "ASSISTments2009 (AS2009) This dataset is about math exercises and collected from the free online tutoring ASSISTments platform in the school year 2009-2010. It is widely used as the standard benchmark for KT methods over the last decade (Feng et al., 2009; Ghosh et al., 2020; Zhang et al., 2017). It includes 337,4115 interactions, 4,661 sequences, 17,737 questions, 123 KCs and each question has 1.1968 KCs on average." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.553, + 0.835, + 0.621 + ], + "angle": 0, + "content": "Algebra2005 (AL2005) This dataset stems from KDD Cup 2010 EDM Challenge, including the detailed step-level student responses to the mathematical problems (Stamper et al. 2010). Similar to (Choffin et al., Ghosh et al., 2020, Zhang et al., 2017), a unique question is constructed by concatenating the problem name and step name. It has 884,098 interactions, 4,712 sequences, 173,113 questions, 112 KCs and the average KCs is 1.3521." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.626, + 0.835, + 0.667 + ], + "angle": 0, + "content": "Bridge2006 (BD2006) This dataset is also from the KDD Cup 2010 EDM Challenge and its unique question construction is similar to the process used in Algebra2005. The dataset has 1,824,310 interactions, 9,680 sequences, 129,263 questions, 493 KCs and the average KCs is 1.0136." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.671, + 0.835, + 0.726 + ], + "angle": 0, + "content": "NIPS34 This dataset is provided by NeurlPS 2020 Education Challenge which contains students' answers to mathematics questions from Eedi. We use the dataset of Task 3 & Task 4 to evaluate our models (Wang et al. 2020b). There are 1,399,470 interactions, 9,401 sequences, 948 questions, 57 KCs, each question has 1.0137 KCs on average." + }, + { + "type": "title", + "bbox": [ + 0.162, + 0.745, + 0.725, + 0.759 + ], + "angle": 0, + "content": "4.1.2 DATASETS CONTAINING INFORMATION OF EITHER QUESTIONS OR KCS" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.77, + 0.835, + 0.824 + ], + "angle": 0, + "content": "Static2019 This dataset is collected from an engineering statics course taught at the Carnegie Mellon University during Fall 2011 (Steif & Bier, 2014). Its unique question construction is similar to the process used in Algebra2005. The dataset has 189,292 interactions, 1,034 sequences and 1,223 questions." + }, + { + "type": "image", + "bbox": [ + 0.162, + 0.839, + 0.836, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.505, + 0.933 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.164, + 0.061, + 0.48, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.126, + 0.834, + 0.182 + ], + "angle": 0, + "content": "ASSISTments2015 (AS2015) Similar to ASSISTments2009, this dataset is collected from the ASSISTments platform in the year of 2015, and it has the largest number of students among the other ASSISTments datasets. It ends up with 682,789 interactions, 19,292 sequences and 100 KCs after pre-processing." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.186, + 0.834, + 0.215 + ], + "angle": 0, + "content": "\\(\\mathbf{POJ}^{[1]}\\): This dataset is collected from Peking coding practice online platform and provided by Pandey & Srivastava (2020). It has 987,593 interactions, 20,114 sequences and 2,748 questions." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.219, + 0.834, + 0.26 + ], + "angle": 0, + "content": "Following the data pre-processing steps suggested by (Liu et al., 2022), we remove student sequences shorter than 3 attempts and set the maximum length of student interaction history to 200 for a high computational efficiency." + }, + { + "type": "table_caption", + "bbox": [ + 0.299, + 0.262, + 0.697, + 0.275 + ], + "angle": 0, + "content": "Table 1: Examples of questions and KCs from D1 and D2." + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.277, + 0.825, + 0.329 + ], + "angle": 0, + "content": "
CategoryDatasetQuestionKnowledge Components
D1NIPS34Which calculation is incorrect? A.(-7)*2=-14 B.(-7)*( -2)=-14 C.7*2=14 D. 7*(-2)=-14Multiplying and Dividing Negative Numbers
D1NIPS34Which of the following number is a factor of 60 and a multiple of 6 ... A.3 B.12 C.20 D.120Factors and Highest Common Factor\nMultiples and Lowest Common Multiple
D2POJGiven 2 equations on the variables x and y, solve for x and y.Not Available
D2POJGiven a big integer number, you are required to find out whether it's a prime number.Not Available
" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.338, + 0.29, + 0.35 + ], + "angle": 0, + "content": "4.2 BASELINES" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.362, + 0.834, + 0.392 + ], + "angle": 0, + "content": "To comprehensively and systematically evaluate the performance of SIMPLEKT, we compare SIMPLEKT against 12 DLKT baseline models from aforementioned 5 categories in Section2 as follows:" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.401, + 0.781, + 0.417 + ], + "angle": 0, + "content": "C1: DKT (Piech et al. 2015): directly uses RNNs to model students' learning processes." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.419, + 0.834, + 0.446 + ], + "angle": 0, + "content": "- C1: DKT+ (Yeung & Yeung 2018): improves DKT by addressing the reconstruction and inconsistent issues." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.45, + 0.834, + 0.466 + ], + "angle": 0, + "content": "C1: DKT-F (Nagatani et al. 2019): improves DKT by considering students' forgetting behaviors." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.469, + 0.834, + 0.496 + ], + "angle": 0, + "content": "C1: KQN (Lee & Yeung, 2019): utilizes the dot product of the students' ability and KC representations to predict student performance." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.499, + 0.818, + 0.516 + ], + "angle": 0, + "content": "C1: LPKT (Shen et al. 2021): designs the learning cell to model students' learning processes." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.518, + 0.834, + 0.545 + ], + "angle": 0, + "content": "- C1: IEKT (Long et al., 2021): estimates student knowledge state via the student cognition and knowledge acquisition estimation modules." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.548, + 0.834, + 0.577 + ], + "angle": 0, + "content": "C2: DKVMN (Zhang et al., 2017): exploits the relationships among KCs and estimate student mastery via memory networks." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.58, + 0.834, + 0.607 + ], + "angle": 0, + "content": "C3: ATKT (Guo et al., 2021): uses adversarial perturbations to enhance the generalization of the attention-LSTM based KT model." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.611, + 0.834, + 0.639 + ], + "angle": 0, + "content": "- C4: GKT (Nakagawa et al., 2019): casts the knowledge structure as a graph and reformulate the KT task as a node-level classification problem." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.642, + 0.834, + 0.669 + ], + "angle": 0, + "content": "C5: SAKT (Pandey & Karypis 2019): uses self-attention to identify the relevance between the interactions and KCs." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.673, + 0.834, + 0.702 + ], + "angle": 0, + "content": "- C5: SAINT (Choi et al., 2020): a Transformer-based model for KT that encode exercise and responses in the encoder and decoder respectively." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.704, + 0.834, + 0.732 + ], + "angle": 0, + "content": "- C5: AKT (Ghosh et al. 2020): models forgetting behaviors during the relevance computation between historical interactions and target questions." + }, + { + "type": "list", + "bbox": [ + 0.163, + 0.401, + 0.834, + 0.732 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.748, + 0.373, + 0.761 + ], + "angle": 0, + "content": "4.3 EXPERIMENTAL SETUP" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.772, + 0.835, + 0.865 + ], + "angle": 0, + "content": "Similar to (Liu et al. 2022), we randomly withhold \\(20\\%\\) of the students' sequences for model evaluation and we perform standard 5-fold cross validation on the rest \\(80\\%\\) of each dataset. We select ADAM (Kingma & Ba 2014) as the optimizer to train our model. The maximum of the training epochs is set to 200, and an early stopping strategy is used to speed up the training process. The embedding dimension, the hidden state dimension, the two dimension of the prediction layers are set to [64, 128], the learning rate and dropout rate are set to [1e-3, 1e-4, 1e-5] and [0.05, 0.1, 0.3, 0.5] respectively, the number of blocks and attention heads are set to [1, 2, 4] and [4, 8], the seed" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.874, + 0.742, + 0.885 + ], + "angle": 0, + "content": "10 https://sites.google.com/site/assistmentsdata/datasets/2015-assistments-skill-builder-data" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.887, + 0.631, + 0.9 + ], + "angle": 0, + "content": "1 https://drive.google.com/drive/folders/1LRLjqWfODwTYRMPw6wEJ_mMt1KZ4XBdk." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.505, + 0.933 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.163, + 0.061, + 0.48, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.127, + 0.834, + 0.169 + ], + "angle": 0, + "content": "is set to [42, 3407] for reproducing the experimental results. Our model is implemented in PyTorch and trained on NVIDIA RTX 3090 GPU device. Similar to all existing DLKT research, we use the AUC as the main evaluation metric, and use accuracy as the secondary metric." + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.185, + 0.391, + 0.198 + ], + "angle": 0, + "content": "4.4 EXPERIMENTAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.209, + 0.836, + 0.289 + ], + "angle": 0, + "content": "Overall Performance. Table 2 and Table 3 summarize the overall prediction performance of SIMPLEKT and all baselines in terms of the average AUC and accuracy scores. Marker *, o and ● indicates whether SIMPLEKT is statistically superior/equal/inferior to the compared method (using paired t-test at 0.01 significance level). The last column shows the total number of win/tie/loss for SIMPLEKT against the compared method on all 7 datasets (e.g., #win is how many times SIMPLEKT significantly outperforms that method)." + }, + { + "type": "table_caption", + "bbox": [ + 0.162, + 0.291, + 0.835, + 0.317 + ], + "angle": 0, + "content": "Table 2: Overall AUC performance of SIMPLEKT and all baselines. “-” indicates the method is inapplicable for that dataset." + }, + { + "type": "table", + "bbox": [ + 0.166, + 0.319, + 0.842, + 0.446 + ], + "angle": 0, + "content": "
ModelD1: Datasets containing info. of both questions and KCsD2: Datasets containing info. of either questions or KCsSIMPLEKT
AS2009AL2005BD2006NIPS34Statics2011AS2015POJ#win/#tie/#loss
DKT0.7541±0.0011*0.8149±0.0011*0.8015±0.0008*0.7689±0.0002*0.8222±0.0013●0.7271±0.0005●0.6089±0.0009*5/0/2
DKT+0.7547±0.0017*0.8156±0.0011*0.8020±0.0004*0.7696±0.0002*0.8279±0.0004●0.7285±0.0006●0.6173±0.0007*5/0/2
DKT-F-0.8147±0.0013*0.7985±0.0013*0.7733±0.0003*0.7839±0.0061*-0.6030±0.0023*5/0/0
KQN0.7477±0.0011*0.8027±0.0015*0.7936±0.0014*0.7684±0.0003*0.8232±0.0007●0.7254±0.0004●0.6080±0.0015*5/0/2
LPKT0.7814±0.0022●0.8274±0.0014●0.8055±0.0006*0.8035±0.0003○---1/1/2
IEKT0.7861±0.0027●0.8416±0.0014●0.8125±0.0009*0.8045±0.0002●---1/0/3
DKVMN0.7473±0.0006*0.8054±0.0011*0.7983±0.0009*0.7673±0.0004*0.8093±0.0017*0.7227±0.0004*0.6056±0.0022*7/0/0
ATKT0.7470±0.0008*0.7995±0.0023*0.7889±0.0008*0.7665±0.0001*0.8055±0.0020*0.7245±0.0007*0.6075±0.0012*7/0/0
GKT0.7424±0.0021*0.8110±0.0009*0.8046±0.0008*0.7689±0.0024*0.8040±0.0065○0.7258±0.0012●0.6070±0.0036*5/1/1
SAKT0.7246±0.0017*0.7880±0.0063*0.7740±0.0008*0.7517±0.0005*0.7965±0.0014*0.7114±0.0003*0.6095±0.0013*7/0/0
SAINT0.6958±0.0023○0.7775±0.0017*0.7781±0.0013*0.7873±0.0007*0.7599±0.0139*0.7026±0.0011*0.5563±0.0012*6/1/0
AKT0.7853±0.0017●0.8306±0.0019●0.8208±0.0007●0.8033±0.0003*0.8309±0.0009●0.7281±0.0004●0.6281±0.0013●1/0/6
simpleKT0.7744±0.00180.8254±0.00030.8160±0.00060.8035±0.00000.8199±0.00110.7248±0.00050.6252±0.0005-
" + }, + { + "type": "table_caption", + "bbox": [ + 0.162, + 0.473, + 0.835, + 0.499 + ], + "angle": 0, + "content": "Table 3: Overall Accuracy performance of SIMPLEKT and all baselines. “-” indicates the method is inapplicable for that dataset." + }, + { + "type": "table", + "bbox": [ + 0.166, + 0.501, + 0.842, + 0.629 + ], + "angle": 0, + "content": "
ModelD1: Datasets containing info. of both questions and KCsD2: Datasets containing info. of either questions or KCsSIMPLEKT
AS2009AL2005BD2006NIPS34Statics2011AS2015POJ#win/#tie/#loss
DKT0.7244±0.0014*0.8097±0.0005●0.8553±0.0002*0.7032±0.0004*0.7969±0.0006●0.7503±0.0003*0.6328±0.0020*5/0/2
DKT+0.7248±0.0009*0.8097±0.0007●0.8553±0.0003*0.7039±0.0004*0.7977±0.0006●0.7510±0.0004●0.6482±0.0021*4/0/3
DKT-F-0.8090±0.0005●0.8536±0.0004*0.7076±0.0002*0.7872±0.0011*-0.6371±0.0030*4/0/1
KQN0.7228±0.0009*0.8025±0.0006*0.8532±0.0006*0.7028±0.0001*0.7978±0.0007●0.7500±0.0003*0.6435±0.0017*6/0/1
LPKT0.7355±0.0015●0.8145±0.0007●0.8544±0.0008*0.7341±0.0003●---1/0/3
IEKT0.7375±0.0042●0.8236±0.0010●0.8553±0.0023*0.7330±0.0002●---1/0/3
DKVMN0.7199±0.0010*0.8027±0.0007*0.8545±0.0002*0.7016±0.0005*0.7929±0.0006*0.7508±0.0006○0.6393±0.0015*6/1/0
ATKT0.7208±0.0009*0.7998±0.0019*0.8511±0.0004*0.7013±0.0002*0.7904±0.0011*0.7494±0.0002*0.6332±0.0023*7/0/0
GKT0.7153±0.0032*0.8088±0.0008●0.8555±0.0002*0.7014±0.0028*0.7902±0.0021○0.7504±0.0010*0.6117±0.0147*5/1/1
SAKT0.7063±0.0018*0.7954±0.0020*0.8461±0.0005*0.6879±0.0004*0.7879±0.0015*0.7474±0.0002*0.6407±0.0035*7/0/0
SAINT0.6936±0.0034○0.7791±0.0016*0.8411±0.0065*0.7180±0.0006*0.7682±0.0056*0.7438±0.0010*0.6476±0.0003*6/1/0
AKT0.7392±0.0021●0.8124±0.0011●0.8587±0.0005●0.7323±0.0005*0.8021±0.0011●0.7521±0.0005●0.6492±0.0010*2/0/5
simpleKT0.7320±0.00120.8083±0.00050.8579±0.00030.7328±0.00010.7957±0.00200.7508±0.00040.6522±0.0008-
" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.638, + 0.836, + 0.901 + ], + "angle": 0, + "content": "From Table 2 and Table 3 we find the following results: (1) compared to other baseline methods, SIMPLEKT almost always ranks top 3 in terms of AUC scores and achieves 55 wins, 3 ties and 18 loss in total against 12 baselines on 7 public datasets of different domains. This indicates the strength of SIMPLEKT as a baseline of KT; (2) in general, the SIMPLEKT approach performs better on D1 datasets that have both question and KC information available. When training SIMPLEKT on D2 datasets, due to the lack of distinguished information about questions and KCs, the explicit question-centric difficulty modeling degrades and the KC representations become the question agnostic; (3) when comparing SIMPLEKT to other attentive models, i.e., SAKT, SAINT and AKT, in C5 category, our SIMPLEKT beats SAKT and SAINT on all the datasets, which indicates the effectiveness of explicit question-centric difficulty modeling. Although our SIMPLEKT approach is significantly worse than the AKT approach on 6 datasets, the performance gaps are quite minimal that are mostly within a \\(0.5\\%\\) range. On the other hand, SIMPLEKT is much more concise compared to the two-layer attentive architecture in the AKT approach; (4) the SIMPLEKT outperforms many deep sequential models in category C1, including DKT, DKT+, DKT-F, KQN on AS2009, AL2005, BD2006, NIPS34 and POJ. We believe this is because the above 4 sequential models use KCs to index questions cannot capture the individual differences among questions with the same KCs which is crucial to predict student future performance; (5) comparing SIMPLEKT and IEKT, we can see, IEKT has better prediction performance on AS2009, AL2005, and NIPS34. This is because IEKT captures both question-level and KC-level variations in its representations and at the same time, it designs two specific neural modules to estimate individual cognition and acquisition" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.506, + 0.933 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.164, + 0.061, + 0.48, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.127, + 0.836, + 0.194 + ], + "angle": 0, + "content": "abilities; (6) compare to other different types of models in C2, C3, and C4, such as DKVMN, ATKT and GKT, our SIMPLEKT achieves better performance by using an ordinary dot-product attention function without any memory mechanism, adversarial learning or graph constructions, which encourages the further educational researchers and practitioners to develop effective models with a design of simplicity." + }, + { + "type": "image", + "bbox": [ + 0.35, + 0.196, + 0.634, + 0.207 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.214, + 0.34, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.214, + 0.495, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.214, + 0.648, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.214, + 0.803, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.328, + 0.34, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.328, + 0.495, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.328, + 0.648, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.193, + 0.439, + 0.802, + 0.453 + ], + "angle": 0, + "content": "Figure 3: Non-accumulative predictions in the multi-step ahead scenario in terms of AUC." + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.462, + 0.634, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.479, + 0.34, + 0.591 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.479, + 0.495, + 0.591 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.479, + 0.648, + 0.591 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.479, + 0.804, + 0.591 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.595, + 0.34, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.595, + 0.495, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.595, + 0.648, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.704, + 0.816, + 0.717 + ], + "angle": 0, + "content": "Figure 4: Non-accumulative predictions in the multi-step ahead scenario in terms of Accuracy." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.729, + 0.836, + 0.9 + ], + "angle": 0, + "content": "Multi-step KT Prediction Performance. In order to make the prediction close to real application scenarios, we also predict our model in multi-step prediction which predicts a span of student's responses given the student's historical interaction sequence. Practically, accurate multi-step KT prediction will provide constructive feedback to learning path selection and construction and help teachers adaptively adjust future teaching materials. We conduct the prediction in a non-accumulative setting that predicts all future values all at once to avoid accumulative prediction errors. To have a fine-grained analysis in the multi-step ahead prediction scenario, we further experiment with DLKT models on different portions of observed student interactions. Specifically, we vary the observed percentages of student interaction length from \\(20\\%\\) to \\(90\\%\\) with step size of \\(10\\%\\). Due to the space limit, we select the best baseline in each category, i.e., IEKT, DKVMN, ATKT, GKT, AKT as the representative approaches and the results in terms of AUC and accuracy are shown in Figure 3 and Figure 4. We make the following observations: (1) with the increasing historical information, the student AUC performance prediction become more accurate in most cases, which is in line with the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.504, + 0.932 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.163, + 0.061, + 0.48, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.127, + 0.834, + 0.195 + ], + "angle": 0, + "content": "real-world educational scenario; (2) the attention based model almost outperforms other KT models in Statics2011, AS2005 and POJ according to AUC scores, this is because the attention mechanism is expert in capturing the long-term dependencies; and (3) our SIMPLEKT achieves the best prediction performance in BD2006, NIPS34 in terms of AUC, which indicates the proposed method is simple yet powerful." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.2, + 0.836, + 0.332 + ], + "angle": 0, + "content": "Qualitative Visual Analysis. In this section, we qualitatively show the visualization of the prediction results made by SIMPLEKT in Figure 5 To better understand the model predictive behavior, we compute the historical error rate (HER) per question from the data and use the HERs as surrogates of question difficulties. Due to the space limit, more illustrative and fine-grained results are provided in Appendix A.2 As we can see from Figure 5 when a student meets a certain KC for the first time, the higher HER of the question, the lower the probability that student will get it correct. For example, the HERs for questions q527, q509, q512, q518 and q219 are 0.35, 0.41, 0.20, 0.51, and 0.48 and the corresponding prediction probabilities of SIMPLEKT are 0.74, 0.52, 0.84, 0.45, and 0.51 respectively. Furthermore, for those questions that cover the same set of KCs, such as questions q526 and q529, the model prediction probability decreases when the corresponding HER increases." + }, + { + "type": "image", + "bbox": [ + 0.167, + 0.345, + 0.836, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.456, + 0.741, + 0.471 + ], + "angle": 0, + "content": "Figure 5: Visualization of a student's prediction results on SIMPLEKT." + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.484, + 0.836, + 0.669 + ], + "angle": 0, + "content": "Ablation Study. We systematically examine the effect of the key component of question difficulty modeling by constructing two model variants: (1) SIMPLEKT-ScalarDiff that changes the question-centric difficulty vector \\(\\mathbf{m}_{q_t}\\) to scalar; and (2) SIMPLEKT-NoDiff that completely ignores the question difficulty modeling and simply set \\(\\mathbf{x}_t\\) to \\(\\mathbf{z}_{c_t}\\). The prediction performance on all datasets that belong to D1 are reported in Table 4. Please note that since datasets in D2 only have either question information or KC information, SIMPLEKT, SIMPLEKT-ScalarDiff, and SIMPLEKT-NoDiff essentially become mathematically unidentifiable. From Table 4 we can easily observe that (1) the SIMPLEKT method outperforms the two model variants and especially when removing the question difficulty modeling component, the prediction performance decreases more than \\(2\\%\\) on all D1 datasets. This empirically verifies the importance of question-centric difficulty modeling when making student performance prediction in KT scenarios; and (2) comparing SIMPLEKT and SIMPLEKT-ScalarDiff, the performance is very minimal. We believe this is because the KC representation \\(\\mathbf{x}_t\\) is based on the simple additive assumption and a scalar is expressive enough to represent question-level difficulty under this assumption." + }, + { + "type": "table_caption", + "bbox": [ + 0.289, + 0.679, + 0.707, + 0.693 + ], + "angle": 0, + "content": "Table 4: The performance of different variants in SIMPLEKT." + }, + { + "type": "table", + "bbox": [ + 0.197, + 0.702, + 0.803, + 0.753 + ], + "angle": 0, + "content": "
AS2009AL2005BD2006NIPS34
SIMPLEKT0.7744±0.00180.8254±0.00030.8160±0.00060.8035±0.0000
SIMPLEKT-ScalarDiff0.7740±0.00210.8250±0.00130.8159±0.00110.8008±0.0012
SIMPLEKT-NoDiff0.7411±0.00160.8048±0.00180.7922±0.00110.7646±0.0005
" + }, + { + "type": "title", + "bbox": [ + 0.163, + 0.778, + 0.317, + 0.793 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.807, + 0.835, + 0.9 + ], + "angle": 0, + "content": "In this work, we propose SIMPLEKT, a simple but tough-to-beat approach to solve KT task effectively. Motivated by the Rasch model in psychometrics, the SIMPLEKT approach is designed to capture individual differences among questions with the same KCs. Furthermore, the proposed SIMPLEKT approach simplifies the sophisticated student knowledge state estimation component with the ordinary dot-product attention function. Comprehensive experimental results demonstrate that SIMPLEKT is able to beat a wide range of recently proposed DLKT models on various datasets from different domains. We believe this work serves as a strong baseline for future KT research." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.922, + 0.506, + 0.933 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.164, + 0.061, + 0.48, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.164, + 0.127, + 0.438, + 0.142 + ], + "angle": 0, + "content": "REPRODUCIBILITY STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.155, + 0.837, + 0.263 + ], + "angle": 0, + "content": "The code of SIMPLEKT and its variants, i.e., SIMPLEKT-ScalarDiff and SIMPLEKT-NoDiff, to reproduce the experimental results can be found at https://github.com/pykt-team/ pykt-toolkit. We give the details of data-preprocessing and the training hyper-parameters of SIMPLEKT in Section 4.1 and Section 4.3. The code of the 12 comparison models is accessible from an open-sourced PYKT python library at https://pykt.org/. We choose to use the same data partitions of train, validation, test sets as PYKT and hence all the results can be easily reproducible. All the model training details of all baselines can be found at https://pykt-toolkit.readthedocs.io/en/latest/pykt.models.html" + }, + { + "type": "title", + "bbox": [ + 0.164, + 0.28, + 0.356, + 0.294 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.308, + 0.835, + 0.376 + ], + "angle": 0, + "content": "This work was supported in part by National Key R&D Program of China, under Grant No. 2020AAA0104500; in part by Beijing Nova Program (Z201100006820068) from Beijing Municipal Science & Technology Commission; in part by NFSC under Grant No. 61877029 and in part by Key Laboratory of Smart Education of Guangdong Higher Education Institutes, Jinan University (2022LSYS003)." + }, + { + "type": "title", + "bbox": [ + 0.164, + 0.394, + 0.283, + 0.408 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.164, + 0.416, + 0.836, + 0.457 + ], + "angle": 0, + "content": "Ghodai Abdelrahman and Qing Wang. Knowledge tracing with sequential key-value memory networks. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 175-184, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.464, + 0.837, + 0.506 + ], + "angle": 0, + "content": "Hao Cen, Kenneth Koedinger, and Brian Junker. Learning factors analysis-a general method for cognitive model evaluation and improvement. In International Conference on Intelligent Tutoring Systems, pp. 164-175. Springer, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.164, + 0.512, + 0.836, + 0.554 + ], + "angle": 0, + "content": "Jiahao Chen, Zitao Liu, Shuyan Huang, Qiongqiong Liu, and Weiqi Luo. Improving interpretability of deep sequential knowledge tracing models with question-centric cognitive representations. In Proceedings of the AAAI Conference on Artificial Intelligence, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.56, + 0.835, + 0.589 + ], + "angle": 0, + "content": "Penghe Chen, Yu Lu, Vincent W Zheng, and Yang Pian. Prerequisite-driven deep knowledge tracing. In 2018 IEEE International Conference on Data Mining, pp. 39-48. IEEE, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.595, + 0.837, + 0.638 + ], + "angle": 0, + "content": "Benoit Choffin, Fabrice Popineau, Yolaine Bourda, and Jill-Jenn Vie. DAS3H: Modeling student learning and forgetting for optimally scheduling distributed practice of skills. In Proceedings of The 12th International Conference on Educational Data Mining (EDM 2019), volume 29, pp. 38." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.644, + 0.835, + 0.698 + ], + "angle": 0, + "content": "Youngduck Choi, Youngnam Lee, Junghyun Cho, Jineon Baek, Byungsoo Kim, Yeongmin Cha, Dongmin Shin, Chan Bae, and Jaewe Heo. Towards an appropriate query, key, and value computation for knowledge tracing. In Proceedings of the Seventh ACM Conference on Learning@ Scale, pp. 341-344, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.705, + 0.835, + 0.734 + ], + "angle": 0, + "content": "Albert T Corbett and John R Anderson. Knowledge tracing: Modeling the acquisition of procedural knowledge. User Modeling and User-adapted Interaction, 4(4):253-278, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.74, + 0.835, + 0.781 + ], + "angle": 0, + "content": "Mingyu Feng, Neil Heffernan, and Kenneth Koedinger. Addressing the assessment challenge with an online system that tutors as it assesses. User Modeling and User-adapted Interaction, 19(3): 243-266, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.788, + 0.835, + 0.817 + ], + "angle": 0, + "content": "Aritra Ghosh, Neil Heffernan, and Andrew S Lan. Context-aware attentive knowledge tracing. In ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.824, + 0.835, + 0.865 + ], + "angle": 0, + "content": "Xiaopeng Guo, Zhijie Huang, Jie Gao, Mingyu Shang, Maojing Shu, and Jun Sun. Enhancing knowledge tracing via adversarial training. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 367-375, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.872, + 0.835, + 0.901 + ], + "angle": 0, + "content": "Tanja Käser, Severin Klingler, Alexander G Schwing, and Markus Gross. Dynamic bayesian networks for student modeling. IEEE Transactions on Learning Technologies, 10(4):450-462, 2017." + }, + { + "type": "list", + "bbox": [ + 0.164, + 0.416, + 0.837, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.922, + 0.511, + 0.933 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.164, + 0.061, + 0.479, + 0.075 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.127, + 0.834, + 0.154 + ], + "angle": 0, + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.164, + 0.834, + 0.192 + ], + "angle": 0, + "content": "Elise Lavoué, Baptiste Monterrat, Michel Desmarais, and Sébastien George. Adaptive gamification for learning environments. IEEE Transactions on Learning Technologies, 12(1):16-28, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.164, + 0.2, + 0.835, + 0.241 + ], + "angle": 0, + "content": "Jinseok Lee and Dit-Yan Yeung. Knowledge query network for knowledge tracing: How knowledge interacts with skills. In Proceedings of the 9th International Conference on Learning Analytics & Knowledge, pp. 491-500, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.25, + 0.833, + 0.29 + ], + "angle": 0, + "content": "Qi Liu, Zhenya Huang, Yu Yin, Enhong Chen, Hui Xiong, Yu Su, and Guoping Hu. EKT: Exercise-aware knowledge tracing for student performance prediction. IEEE Transactions on Knowledge and Data Engineering, 33(1):100-115, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.3, + 0.834, + 0.352 + ], + "angle": 0, + "content": "Yunfei Liu, Yang Yang, Xianyu Chen, Jian Shen, Haifeng Zhang, and Yong Yu. Improving knowledge tracing via pre-training question embeddings. In Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence, pp. 1556-1562, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.363, + 0.834, + 0.414 + ], + "angle": 0, + "content": "Zitao Liu, Qiongqiong Liu, Jiahao Chen, Shuyan Huang, Jiliang Tang, and Weiqi Luo. pyKT: A python library to benchmark deep learning based knowledge tracing models. In 36th Conference on Neural Information Processing Systems (NeurIPS 2022) Track on Datasets and Benchmarks., 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.425, + 0.834, + 0.466 + ], + "angle": 0, + "content": "Ting Long, Yunfei Liu, Jian Shen, Weinan Zhang, and Yong Yu. Tracing knowledge state with individual cognition and acquisition estimation. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 173-182, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.475, + 0.834, + 0.515 + ], + "angle": 0, + "content": "Ting Long, Jiarui Qin, Jian Shen, Weinan Zhang, Wei Xia, Ruiming Tang, Xiuqiang He, and Yong Yu. Improving knowledge tracing with collaborative information. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining, pp. 599-607, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.525, + 0.834, + 0.565 + ], + "angle": 0, + "content": "Sein Minn, Yi Yu, Michel C Desmarais, Feida Zhu, and Jill-Jenn Vie. Deep knowledge tracing and dynamic student classification for knowledge tracing. In 2018 IEEE International Conference on Data Mining, pp. 1182-1187. IEEE, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.575, + 0.834, + 0.615 + ], + "angle": 0, + "content": "Koki Nagatani, Qian Zhang, Masahiro Sato, Yan-Ying Chen, Francine Chen, and Tomoko Ohkuma. Augmenting knowledge tracing by considering forgetting behavior. In The World Wide Web Conference, pp. 3101-3107, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.624, + 0.834, + 0.664 + ], + "angle": 0, + "content": "Hiromi Nakagawa, Yusuke Iwasawa, and Yutaka Matsuo. Graph-based knowledge tracing: modeling student proficiency using graph neural network. In 2019 IEEE/WIC/ACM International Conference on Web Intelligence, pp. 156-163. IEEE, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.673, + 0.834, + 0.714 + ], + "angle": 0, + "content": "Shalini Pandey and George Karypis. A self-attentive model for knowledge tracing. In 12th International Conference on Educational Data Mining, pp. 384–389. International Educational Data Mining Society, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.723, + 0.834, + 0.763 + ], + "angle": 0, + "content": "Shalini Pandey and Jaideep Srivastava. RKT: relation-aware self-attention for knowledge tracing. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, pp. 1205–1214, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.773, + 0.834, + 0.813 + ], + "angle": 0, + "content": "Chris Piech, Jonathan Bassen, Jonathan Huang, Surya Ganguli, Mehran Sahami, Leonidas J Guibas, and Jascha Sohl-Dickstein. Deep knowledge tracing. Advances in Neural Information Processing Systems, 28, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.822, + 0.834, + 0.85 + ], + "angle": 0, + "content": "Shi Pu, Michael Yudelson, Lu Ou, and Yuchi Huang. Deep knowledge tracing with transformers. In International Conference on Artificial Intelligence in Education, pp. 252-256. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.859, + 0.835, + 0.899 + ], + "angle": 0, + "content": "Sami Sarsa, Juho Leinonen, Arto Hellas, et al. Empirical evaluation of deep learning models for knowledge tracing: Of hyperparameters and metrics on performance and replicability. Journal of Educational Data Mining, 14(2), 2022." + }, + { + "type": "list", + "bbox": [ + 0.164, + 0.127, + 0.835, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.922, + 0.508, + 0.933 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.164, + 0.061, + 0.479, + 0.074 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.127, + 0.836, + 0.182 + ], + "angle": 0, + "content": "Shuanghong Shen, Qi Liu, Enhong Chen, Han Wu, Zhenya Huang, Weihao Zhao, Yu Su, Haiping Ma, and Shijin Wang. Convolutional knowledge tracing: Modeling individualization in student learning process. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 1857-1860, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.187, + 0.834, + 0.227 + ], + "angle": 0, + "content": "Shuanghong Shen, Qi Liu, Enhong Chen, Zhenya Huang, Wei Huang, Yu Yin, Yu Su, and Shijin Wang. Learning process-consistent knowledge tracing. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining, pp. 1452-1460, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.164, + 0.233, + 0.835, + 0.286 + ], + "angle": 0, + "content": "Shuanghong Shen, Zhenya Huang, Qi Liu, Yu Su, Shijin Wang, and Enhong Chen. Assessing student's dynamic knowledge state by exploring the question difficulty effect. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 427-437, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.163, + 0.292, + 0.835, + 0.333 + ], + "angle": 0, + "content": "J Stamper, A Niculescu-Mizil, S Ritter, G Gordon, and K Koedinger. Algebra I 2005-2006 and Bridge to Algebra 2006-2007. Development data sets from KDD Cup 2010 Educational Data Mining Challenge, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.164, + 0.338, + 0.625, + 0.352 + ], + "angle": 0, + "content": "Paul Steif and Norman Bier. Oli engineering statics-fall 2011, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.165, + 0.359, + 0.833, + 0.411 + ], + "angle": 0, + "content": "Nguyen Thai-Nghe, Lucas Drumond, Tomás Horváth, Artus Krohn-Grimberghe, Alexandros Nanopoulos, and Lars Schmidt-Thieme. Factorization techniques for predicting student performance. In Educational Recommender Systems and Technologies: Practices and Challenges, pp. 129-153. IGI Global, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.418, + 0.834, + 0.445 + ], + "angle": 0, + "content": "Hanshuang Tong, Zhen Wang, Qi Liu, Yun Zhou, and Wenyuan Han. HGKT: Introducing hierarchical exercise graph for knowledge tracing. arXiv preprint arXiv:2006.16915, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.451, + 0.836, + 0.491 + ], + "angle": 0, + "content": "Chenyang Wang, Weizhi Ma, Min Zhang, Chuancheng Lv, Fengyuan Wan, Huijie Lin, Taoran Tang, Yiqun Liu, and Shaoping Ma. Temporal cross-effects in knowledge tracing. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining, pp. 517-525, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.497, + 0.834, + 0.538 + ], + "angle": 0, + "content": "Fei Wang, Qi Liu, Enhong Chen, Zhenya Huang, Yuying Chen, Yu Yin, Zai Huang, and Shijin Wang. Neural cognitive diagnosis for intelligent education systems. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pp. 6153-6161, 2020a." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.543, + 0.833, + 0.595 + ], + "angle": 0, + "content": "Zhiwei Wang, Xiaoqin Feng, Jiliang Tang, Gale Yan Huang, and Zitao Liu. Deep knowledge tracing with side information. In Artificial Intelligence in Education: 20th International Conference, AIED 2019, Chicago, IL, USA, June 25-29, 2019, Proceedings, Part II 20, pp. 303-308. Springer, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.603, + 0.833, + 0.657 + ], + "angle": 0, + "content": "Zichao Wang, Angus Lamb, Evgeny Saveliev, Pashmina Cameron, Yordan Zaykov, Jose Miguel Hernández-Lobato, Richard E Turner, Richard G Baraniuk, Craig Barton, Simon Peyton Jones, et al. Instructions and guide for diagnostic questions: The neurips 2020 education challenge. ArXiv preprint, abs/2007.12061, 2020b. URL https://arxiv.org/abs/2007.12061" + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.662, + 0.833, + 0.714 + ], + "angle": 0, + "content": "Yang Yang, Jian Shen, Yanru Qu, Yunfei Liu, Kerong Wang, Yaoming Zhu, Weinan Zhang, and Yong Yu. GIKT: a graph-based interaction model for knowledge tracing. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pp. 299-315. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.721, + 0.833, + 0.762 + ], + "angle": 0, + "content": "Chun-Kit Yeung. Deep-IRT: Make deep learning based knowledge tracing explainable using item response theory. In Proceedings of The 12th International Conference on Educational Data Mining (EDM 2019), pp. 683-686, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.767, + 0.833, + 0.807 + ], + "angle": 0, + "content": "Chun-Kit Yeung and Dit-Yan Yeung. Addressing two problems in deep knowledge tracing via prediction-consistent regularization. In Proceedings of the Fifth Annual ACM Conference on Learning at Scale, pp. 1-10, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.814, + 0.833, + 0.854 + ], + "angle": 0, + "content": "Jiani Zhang, Xingjian Shi, Irwin King, and Dit Yan Yeung. Dynamic key-value memory networks for knowledge tracing. In Proceedings of the 26th International Conference on World Wide Web, pp. 765, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.166, + 0.86, + 0.833, + 0.899 + ], + "angle": 0, + "content": "Moyu Zhang, Xinning Zhu, Chunhong Zhang, Yang Ji, Feng Pan, and Changchuan Yin. Multi-Factors Aware Dual-Attentional Knowledge Tracing. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management, pp. 2588-2597, 2021." + }, + { + "type": "list", + "bbox": [ + 0.163, + 0.127, + 0.836, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.922, + 0.509, + 0.933 + ], + "angle": 0, + "content": "12" + } + ] +] \ No newline at end of file diff --git a/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_origin.pdf b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c10586cdafb7621c1cfe468004dcd186001d82e3 --- /dev/null +++ b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/026aa3e3-fd2b-47eb-9b32-9efc86e03a5c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f22e1e475827cedc0e99e54183b2481230a4d9766bdefb0a57a8649affe3937 +size 4450707 diff --git a/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/full.md b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/full.md new file mode 100644 index 0000000000000000000000000000000000000000..509823f8038f3264a8fb0b05df315bf2360728cc --- /dev/null +++ b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/full.md @@ -0,0 +1,290 @@ +# SIMPLEKT: A SIMPLE BUT TOUGH-TO-BEAT BASELINE FOR KNOWLEDGE TRACING + +Zitao Liu + +Guangdong Institute of Smart Education, Jinan University, Guangzhou, China + +liuzitao@jnu.edu.cn + +Qiongqiong Liu, Jiahao Chen, Shuyan Huang* + +TAL Education Group, Beijing, China + +{liuqiongqiongl, chenjiahao, huangshuyan}@tal.com + +Weiqi Luo + +Guangdong Institute of Smart Education, Jinan University, Guangzhou, China + +lwq@jnu.edu.cn + +# ABSTRACT + +Knowledge tracing (KT) is the problem of predicting students' future performance based on their historical interactions with intelligent tutoring systems. Recently, many works present lots of special methods for applying deep neural networks to KT from different perspectives like model architecture, adversarial augmentation and etc., which make the overall algorithm and system become more and more complex. Furthermore, due to the lack of standardized evaluation protocol (Liu et al. 2022), there is no widely agreed KT baselines and published experimental comparisons become inconsistent and self-contradictory, i.e., the reported AUC scores of DKT on ASSISTments2009 range from 0.721 to 0.821 (Minn et al. 2018 Yeung & Yeung 2018). Therefore, in this paper, we provide a strong but simple baseline method to deal with the KT task named SIMPLEKT. Inspired by the Rasch model in psychometrics, we explicitly model question-specific variations to capture the individual differences among questions covering the same set of knowledge components that are a generalization of terms of concepts or skills needed for learners to accomplish steps in a task or a problem. Furthermore, instead of using sophisticated representations to capture student forgetting behaviors, we use the ordinary dot-product attention function to extract the time-aware information embedded in the student learning interactions. Extensive experiments show that such a simple baseline is able to always rank top 3 in terms of AUC scores and achieve 57 wins, 3 ties and 16 loss against 12 DLKT baseline methods on 7 public datasets of different domains. We believe this work serves as a strong baseline for future KT research. Code is available at https://github.com/pykt-team/pykt-toolkit! + +# 1 INTRODUCTION + +Knowledge tracing (KT) is a sequential prediction task that aims to predict the outcomes of students over questions by modeling their mastery of knowledge, i.e., knowledge states, as they interact with learning platforms such as massive open online courses and intelligent tutoring systems, as shown in Figure ①. Solving the KT problems may help teachers better detect students that need further attention, or recommend personalized learning materials to students. + +The KT related research has been studied since 1990s where Corbett and Anderson, to the best of our knowledge, were the first to estimate students' current knowledge with regard to each individ- + +ual knowledge component (KC) (Corbett & Anderson 1994). A KC is a description of a mental structure or process that a learner uses, alone or in combination with other KCs, to accomplish steps in a task or a problem? Since then, many attempts have been made to solve the KT problem, such as probabilistic graphical models (Kaiser et al. 2017) and factor analysis based models (Cen et al. 2006; Lavoué et al. 2018; Thai-Nghe et al. 2012). Recently, with the rapid development of deep neural networks, many deep learning based knowledge tracing (DLKT) models are developed, such as auto-regressive based deep sequential KT models (Piech et al. 2015; Yeung & Yeung 2018; Chen et al. 2018; Wang et al. 2019; Guo et al. 2021; Long et al. 2021; Chen et al. 2023), memory-augmented KT models (Zhang et al. 2017; Abdelrahman & Wang 2019; Yeung 2019), attention based KT models (Pandey & Karypis 2019; Pandey & Srivastava 2020; Choi et al. 2020; Ghosh et al. 2020; Pu et al. 2020), and graph based KT models (Nakagawa et al. 2019; Yang et al. 2020; Tong et al. 2020). + +![](images/bf81367ab97873c8d7e36048a7e047999a27c48e636f4e4f74b1048c84a31c77.jpg) +Figure 1: Graphical illustration of the task of KnowledgeTracing. “√” and “×” denote the question is answered correctly and incorrectly. + +Although DLKT approaches have constituted new paradigms of the KT problem and achieved promising results, recently developed DLKT models seem to be more and more complex and resemble each other with very limited nuances from the methodological perspective: applying different neural components to capture student forgetting behaviors $^{3}$ (Ghosh et al. 2020; Nagatani et al. 2019), recency effects (Zhang et al. 2021), and various auxiliary information including relations between questions and KCs (Tong et al. 2020; Pandey & Srivastava 2020; Liu et al. 2021; Yang et al. 2020), question text content (Liu et al. 2019; Wang et al. 2020a), question difficulty level (Liu et al. 2021; Shen et al. 2022), and students' learning ability (Shen et al. 2020). Furthermore, published DLKT baseline results surprisingly diverge. For example, the reported AUC scores of DKT and AKT on ASSISTments2009 range from 0.721 to 0.821 (Minn et al. 2018; Yeung & Yeung 2018) and from 0.747 to 0.835 in (Ghosh et al. 2020; Wang et al. 2021) respectively. Another example is that for the performance of DKT on the ASSISTments2009 dataset, it is recognized as one of the best baselines by Ghosh et al. (2020) while Long et al. (2022) and Zhang et al. (2021) showed that its performance is below the average. Recent survey studies by Sarsa et al. (2022) and Liu et al. (2022) summarized aforementioned inconsistencies of baseline results and showed evidence that variations in hyper-parameters and data pre-processing procedures contribute significantly to prediction performance of DLKT models. Specifically, Sarsa et al. (2022) empirically found that even simple baselines with little predictive value may outperform DLKT models with sophisticated neural components. Liu et al. (2022) built a standardized DLKT benchmark platform and showed that the improvement of many DLKT approaches is minimal compared to the very first DLKT model proposed by Piech et al. (2015). + +Therefore, in this paper, we propose SIMPLEKT, a simple but tough-to-beat KT baseline that is simple to implement, computationally friendly and robust to a wide range of KT datasets across different domains. Motivated by the Rasch model that is a classic yet powerful model in psychometrics, the proposed SIMPLEKT approach captures the individual differences among questions covering the same set of KCs by representing each question's embedding as an additive combination of the average of its corresponding KCs' embeddings and a question-specific variation. Furthermore, different from many existing models that try to capture various aforementioned relations and information, the SIMPLEKT is purely based on the attention mechanism and uses the ordinary dot-product attention function to capture the contextual information embedded in the student learning interactions. To comprehensively and systematically evaluate the performance of SIMPLEKT, we choose to use the publicly available PYKT benchmark implementation to guarantee valid and reproducible comparisons against 12 DLKT methods on 7 popular datasets across differ- + +ent domains. Results shown that the SIMPLEKT beats a wide range of modern neural KT models that based on graph neural networks, memory augmented neural networks, and adversarial neural networks. This suggests that this simple method should be used as the baseline to beat future KT research, especially when designing sophisticated neural KT architectures. To encourage reproducible research, all the related codes, data and the learned SIMPLEKT models are publicly available at https://github.com/pykt-team/pykt-toolkit + +# 2 RELATED WORK + +Recently, deep learning technique have been widely applied into KT task for student's historical learning modeling and the future performance prediction. Existing DLKT approaches can be categorized into the following 5 categories: + +C1: Deep sequential models. DLKT models that use auto-regressive architectures to capture students' chronologically ordered interactions. For example, (Piech et al., 2015) proposed the very first DKT model that utilizes an LSTM layer to estimate the knowledge mastery. (Lee & Yeung, 2019) proposed to enhance DKT with a skill encoder that combines student learning activities and KC representations. + +C2: Memory augmented models. DLKT models that capture latent relations between KCs and student knowledge states via memory networks. For instance, (Zhang et al., 2017) exploited and stored the KC relationships via a static key memory matrix and predict students' knowledge mastery levels with a dynamic value memory matrix. + +C3: Adversarial based models. DLKT models that utilize the adversarial techniques to generate perturbations to improve model generalization capability. jointly train an attentiveLSTM KT model with both original and adversarial examples. + +C4: Graph based models. DLKT models that use the graph neural networks to model intrinsic relations among questions, KCs and interactions. (Liu et al., 2021) presented a question-KC bipartite graph to explicitly capture question-level and KC-level inner-relations and question difficulties. (Yang et al., 2020) introduced a graph convolutional network to obtain the representation of the question-KC correlations. + +C5: Attention based models. DLKT models that capture dependencies between interactions via the attention mechanism. For example, (Pandey & Karypis, 2019) used self-attention network to capture the relevance between KCs and students' historical interactions. Choi et al., 2020 designed an encoder-decoder structure to represent the exercise and response embedding sequences. (Ghosh et al., 2020) performed three self-attention modules and explicitly model students' forgetting behaviors via a monotonic attention mechanism. + +Please note that the above categorizations are not exclusive and related techniques can be combined. For example, (Abdelrahman & Wang, 2019) proposed a sequential key-value memory network to unify the strengths of recurrent modeling capacity and memory capacity. The proposed SIMPLEKT approach belongs to C5 and it purely models student interactions by using the very ordinary dot-product attention function. + +# 3 A SIMPLE METHOD FOR KNOWLEDGE TRACING + +# 3.1 PROBLEM STATEMENT + +In this work, our objective is given an arbitrary question $q_*$ to predict the probability of whether a student will answer $q_*$ correctly or not based on the student's historical interaction data. More specifically, for each student $S$ , we assume that we have observed a chronologically ordered collection of $T$ past interactions i.e., $S = \{s_j\}_{j=1}^T$ . Each interaction is represented as a 4-tuple $s$ , i.e., $s = $ , where $q, \{c\}, r, s$ represent the specific question, the associated KC set, the binary valued student response and student's response time step respectively. We would like to estimate the probability $\hat{r}_*$ of the student's performance on arbitrary question $q_*$ . + +# 3.2 THE SIMPLEKT APPROACH + +# 3.2.1 REPRESENTATIONS OF QUESTIONS, KCs AND RESPONSES. + +Effectively representing student interactions is crucial to the success of the DLKT models. In real-world educational scenarios, the question bank is usually much bigger than the set of KCs. For example, the number of questions is more than 1500 times larger than the number of KCs in the Algebra2005 dataset (described in Section 4.1). Therefore, to effectively learn and fairly evaluate the DLKT models from such highly sparse question-response data, following the previous work of Ghosh et al., 2020 and Liu et al., 2022, we artificially transform the original question-response data into KC-response data by expanding each question-level interaction into multiple KC-level interactions when the question is associated with a set of KCs (illustrated in Figure 2). + +Furthermore, due to the fact that questions covering the same set of KCs may have various difficulty levels, students perform significantly different. As shown in Figure 2 even through questions $q_{2}$ and $q_{4}$ have the same set of KCs, i.e., $c_{1}$ and $c_{3}$ , students may get $q_{2}$ wrong but $q_{4}$ correct. Therefore, it is unrealistic to treat every KC in the expanded KC-response sequence identical. Inspired by the very classic and simple Rasch model in psychometrics that explicitly uses a scalar to characterize the latent factor of question difficulty, we choose to use a question-specific difficulty vector to capture the individual differences among questions on the same KC. More specifically, the $t$ th representations of KC (i.e., $\mathbf{x}_t$ ) and interaction (i.e., $\mathbf{y}_t$ ) in the expanded KC sequence of concept $c_{k}$ are represented as follows: + +![](images/d8c6b96a374ed2a73c921533fbe0c22a78147fd1c793cec2a307b508c7c6be26.jpg) +Figure 2: Graphical illustration of transforming the original question-response data into KC-response data. + +$$ +\mathbf {x} _ {t} = \mathbf {z} _ {c _ {k}} \oplus \mathbf {m} _ {q _ {j}} \odot \mathbf {v} _ {c _ {k}}; \quad \mathbf {y} _ {t} = \mathbf {z} _ {c _ {k}} \oplus \mathbf {r} _ {q _ {j}}; \quad \mathbf {z} _ {c _ {k}} = \mathbf {W} _ {c} \cdot \mathbf {e} _ {c _ {k}}; \quad \mathbf {r} _ {q _ {j}} = \mathbf {W} _ {q} \cdot \mathbf {e} _ {q _ {j}} +$$ + +where $\mathbf{z}_{c_k}$ denotes the latent representation of the KC $c_{k}$ . $\mathbf{m}_{q_j}$ denotes the difficulty vector of question $q_{j}$ and question $q_{j}$ contains the KC $c_{k}$ . $\mathbf{v}_{c_k}$ represents the question-centric variation of $q_{j}$ covering this KC $c_{k}$ . $\mathbf{r}_{q_j}$ denotes the representation of student response on $q_{j}$ . $\mathbf{e}_{c_k}$ is the $n$ -dimensional one-hot vector indicating the corresponding KC and $\mathbf{e}_{q_j}$ is the 2-dimensional one-hot vector indicating whether the question is answered correctly. $\mathbf{z}_{c_k}$ , $\mathbf{m}_{q_j}$ , $\mathbf{v}_{c_k}$ and $\mathbf{r}_{q_j}$ are $d$ -dimensional learnable real-valued vectors. $\mathbf{W}_c\in \mathbb{R}^{d\times n}$ and $\mathbf{W}_q\in \mathbb{R}^{d\times 2}$ are learnable linear transformation operations. $\odot$ and $\oplus$ are the element-wise product and addition operators. $n$ is the total number of KCs. + +# 3.2.2 PREDICTION WITH ORDINARY DOT-PRODUCT ATTENTION. + +Different from many existing DLKT approaches that use sophisticated neural components to model student learning and/or forgetting behaviors, we choose to use the ordinary dot-product attention function to explore and extract knowledge states from students' past learning history. Specifically, the retrieved knowledge state $(\mathbf{h}_{t + 1})$ at the $(t + 1)$ th timestamp is computed as follows: + +$$ +\mathbf {h} _ {t + 1} = \text {S e l f A t t e n i o n} (Q = \mathbf {x} _ {t + 1}, K = \left\{\mathbf {x} _ {1}, \dots , \mathbf {x} _ {t} \right\}, V = \left\{\mathbf {y} _ {1}, \dots , \mathbf {y} _ {t} \right\}). +$$ + +Then we use a two-layer fully connected network to refine the knowledge state and the overall optimization function is as follows: + +$$ +\eta_ {t + 1} = \mathbf {w} ^ {\top} \cdot \operatorname {R e L U} \left(\mathbf {W} _ {2} \cdot \operatorname {R e L U} \left(\mathbf {W} _ {1} \cdot \left[ \mathbf {h} _ {t + 1}; \mathbf {x} _ {t + 1} \right] + \mathbf {b} _ {1}\right) + \mathbf {b} _ {2}\right) + b +$$ + +$$ +\mathcal {L} = - \sum \big (r _ {t} \log \sigma (\eta_ {t}) + (1 - r _ {t}) \log (1 - \sigma (\eta_ {t})) \big) +$$ + +where $\mathbf{W}_1, \mathbf{W}_2, \mathbf{w}, \mathbf{b}_1, \mathbf{b}_2$ and $b$ are trainable parameters and $\mathbf{W}_1 \in \mathbb{R}^{d \times 2d}$ , $\mathbf{W}_2 \in \mathbb{R}^{d \times d}$ , $\mathbf{w}, \mathbf{b}_1, \mathbf{b}_2 \in \mathbb{R}^{d \times 1}$ , $b$ is scalar. $\sigma(\cdot)$ is the sigmoid function. + +# 3.2.3 RELATIONSHIP TO EXISTING DLKT MODELS. + +Although the proposed SIMPLEKT belongs to model category C5 discussed in Section 2, it is distinguished from attention based representative DLKT models such as AKT (Ghosh et al., 2020), SAKT + +Pandey & Karypis (2019) and SAINT (Choi et al., 2020). The difference between SIMPLEKT and AKT are threefold: first, we omit the self-attentive question encoder and knowledge encoder in AKT and directly feed the representations of $\mathbf{x}_t$ s and $\mathbf{y}_t$ s into attention based knowledge state extractor; second, instead of using time decayed monotonic attention function to extract the initial knowledge state, we choose to use the ordinary dot-product function that is simple and free of hyper-parameters; third, interaction representations $\mathbf{y}_t$ s are simply computed by adding representations of KCs and responses while AKT uses extra parameters to explicitly model the effects of question difficulty in interaction representations. When comparing SIMPLEKT to SAKT and SAINT, we explicitly model the latent question-centric difficulty when learning the KC representations while SAKT and SAINT ignore the question-level difference and treat all questions are identical if they contain the same set of KCs. Furthermore, SAINT adopts the encoder-decoder architecture and utilizes Transformers to model the student interaction sequence while our SIMPLEKT only uses the dot-product attention function. + +# 4 EXPERIMENTS + +# 4.1 DATASETS + +In this paper, we experiment with 7 widely used datasets to comprehensively evaluate the performance of our models. These 7 datasets can be divided into 2 categories: (1) D1: Datasets containing information of both questions and KCs; and (2) D2: Datasets containing information of either questions or KCs. Table1 gives real samples of question and KCs from both D1 and D2 categories. The detailed statistics of each dataset are listed in Appendix A.1 + +# 4.1.1 DATASETS CONTAINING INFORMATION OF BOTH QUESTIONS AND KCs + +ASSISTments2009 (AS2009) This dataset is about math exercises and collected from the free online tutoring ASSISTments platform in the school year 2009-2010. It is widely used as the standard benchmark for KT methods over the last decade (Feng et al., 2009; Ghosh et al., 2020; Zhang et al., 2017). It includes 337,4115 interactions, 4,661 sequences, 17,737 questions, 123 KCs and each question has 1.1968 KCs on average. + +Algebra2005 (AL2005) This dataset stems from KDD Cup 2010 EDM Challenge, including the detailed step-level student responses to the mathematical problems (Stamper et al. 2010). Similar to (Choffin et al., Ghosh et al., 2020, Zhang et al., 2017), a unique question is constructed by concatenating the problem name and step name. It has 884,098 interactions, 4,712 sequences, 173,113 questions, 112 KCs and the average KCs is 1.3521. + +Bridge2006 (BD2006) This dataset is also from the KDD Cup 2010 EDM Challenge and its unique question construction is similar to the process used in Algebra2005. The dataset has 1,824,310 interactions, 9,680 sequences, 129,263 questions, 493 KCs and the average KCs is 1.0136. + +NIPS34 This dataset is provided by NeurlPS 2020 Education Challenge which contains students' answers to mathematics questions from Eedi. We use the dataset of Task 3 & Task 4 to evaluate our models (Wang et al. 2020b). There are 1,399,470 interactions, 9,401 sequences, 948 questions, 57 KCs, each question has 1.0137 KCs on average. + +# 4.1.2 DATASETS CONTAINING INFORMATION OF EITHER QUESTIONS OR KCS + +Static2019 This dataset is collected from an engineering statics course taught at the Carnegie Mellon University during Fall 2011 (Steif & Bier, 2014). Its unique question construction is similar to the process used in Algebra2005. The dataset has 189,292 interactions, 1,034 sequences and 1,223 questions. + +![](images/1ea70bac986d3459b8c8ad5e0c1787a5f929676b5cd889e905e5c1f5259f0041.jpg) + +ASSISTments2015 (AS2015) Similar to ASSISTments2009, this dataset is collected from the ASSISTments platform in the year of 2015, and it has the largest number of students among the other ASSISTments datasets. It ends up with 682,789 interactions, 19,292 sequences and 100 KCs after pre-processing. + +$\mathbf{POJ}^{[1]}$ : This dataset is collected from Peking coding practice online platform and provided by Pandey & Srivastava (2020). It has 987,593 interactions, 20,114 sequences and 2,748 questions. + +Following the data pre-processing steps suggested by (Liu et al., 2022), we remove student sequences shorter than 3 attempts and set the maximum length of student interaction history to 200 for a high computational efficiency. + +Table 1: Examples of questions and KCs from D1 and D2. + +
CategoryDatasetQuestionKnowledge Components
D1NIPS34Which calculation is incorrect? A.(-7)*2=-14 B.(-7)*( -2)=-14 C.7*2=14 D. 7*(-2)=-14Multiplying and Dividing Negative Numbers
D1NIPS34Which of the following number is a factor of 60 and a multiple of 6 ... A.3 B.12 C.20 D.120Factors and Highest Common Factor +Multiples and Lowest Common Multiple
D2POJGiven 2 equations on the variables x and y, solve for x and y.Not Available
D2POJGiven a big integer number, you are required to find out whether it's a prime number.Not Available
+ +# 4.2 BASELINES + +To comprehensively and systematically evaluate the performance of SIMPLEKT, we compare SIMPLEKT against 12 DLKT baseline models from aforementioned 5 categories in Section2 as follows: + +C1: DKT (Piech et al. 2015): directly uses RNNs to model students' learning processes. +- C1: DKT+ (Yeung & Yeung 2018): improves DKT by addressing the reconstruction and inconsistent issues. +C1: DKT-F (Nagatani et al. 2019): improves DKT by considering students' forgetting behaviors. +C1: KQN (Lee & Yeung, 2019): utilizes the dot product of the students' ability and KC representations to predict student performance. +C1: LPKT (Shen et al. 2021): designs the learning cell to model students' learning processes. +- C1: IEKT (Long et al., 2021): estimates student knowledge state via the student cognition and knowledge acquisition estimation modules. +C2: DKVMN (Zhang et al., 2017): exploits the relationships among KCs and estimate student mastery via memory networks. +C3: ATKT (Guo et al., 2021): uses adversarial perturbations to enhance the generalization of the attention-LSTM based KT model. +- C4: GKT (Nakagawa et al., 2019): casts the knowledge structure as a graph and reformulate the KT task as a node-level classification problem. +C5: SAKT (Pandey & Karypis 2019): uses self-attention to identify the relevance between the interactions and KCs. +- C5: SAINT (Choi et al., 2020): a Transformer-based model for KT that encode exercise and responses in the encoder and decoder respectively. +- C5: AKT (Ghosh et al. 2020): models forgetting behaviors during the relevance computation between historical interactions and target questions. + +# 4.3 EXPERIMENTAL SETUP + +Similar to (Liu et al. 2022), we randomly withhold $20\%$ of the students' sequences for model evaluation and we perform standard 5-fold cross validation on the rest $80\%$ of each dataset. We select ADAM (Kingma & Ba 2014) as the optimizer to train our model. The maximum of the training epochs is set to 200, and an early stopping strategy is used to speed up the training process. The embedding dimension, the hidden state dimension, the two dimension of the prediction layers are set to [64, 128], the learning rate and dropout rate are set to [1e-3, 1e-4, 1e-5] and [0.05, 0.1, 0.3, 0.5] respectively, the number of blocks and attention heads are set to [1, 2, 4] and [4, 8], the seed + +10 https://sites.google.com/site/assistmentsdata/datasets/2015-assistments-skill-builder-data + +1 https://drive.google.com/drive/folders/1LRLjqWfODwTYRMPw6wEJ_mMt1KZ4XBdk. + +is set to [42, 3407] for reproducing the experimental results. Our model is implemented in PyTorch and trained on NVIDIA RTX 3090 GPU device. Similar to all existing DLKT research, we use the AUC as the main evaluation metric, and use accuracy as the secondary metric. + +# 4.4 EXPERIMENTAL RESULTS + +Overall Performance. Table 2 and Table 3 summarize the overall prediction performance of SIMPLEKT and all baselines in terms of the average AUC and accuracy scores. Marker *, o and ● indicates whether SIMPLEKT is statistically superior/equal/inferior to the compared method (using paired t-test at 0.01 significance level). The last column shows the total number of win/tie/loss for SIMPLEKT against the compared method on all 7 datasets (e.g., #win is how many times SIMPLEKT significantly outperforms that method). + +Table 2: Overall AUC performance of SIMPLEKT and all baselines. “-” indicates the method is inapplicable for that dataset. + +
ModelD1: Datasets containing info. of both questions and KCsD2: Datasets containing info. of either questions or KCsSIMPLEKT
AS2009AL2005BD2006NIPS34Statics2011AS2015POJ#win/#tie/#loss
DKT0.7541±0.0011*0.8149±0.0011*0.8015±0.0008*0.7689±0.0002*0.8222±0.0013●0.7271±0.0005●0.6089±0.0009*5/0/2
DKT+0.7547±0.0017*0.8156±0.0011*0.8020±0.0004*0.7696±0.0002*0.8279±0.0004●0.7285±0.0006●0.6173±0.0007*5/0/2
DKT-F-0.8147±0.0013*0.7985±0.0013*0.7733±0.0003*0.7839±0.0061*-0.6030±0.0023*5/0/0
KQN0.7477±0.0011*0.8027±0.0015*0.7936±0.0014*0.7684±0.0003*0.8232±0.0007●0.7254±0.0004●0.6080±0.0015*5/0/2
LPKT0.7814±0.0022●0.8274±0.0014●0.8055±0.0006*0.8035±0.0003○---1/1/2
IEKT0.7861±0.0027●0.8416±0.0014●0.8125±0.0009*0.8045±0.0002●---1/0/3
DKVMN0.7473±0.0006*0.8054±0.0011*0.7983±0.0009*0.7673±0.0004*0.8093±0.0017*0.7227±0.0004*0.6056±0.0022*7/0/0
ATKT0.7470±0.0008*0.7995±0.0023*0.7889±0.0008*0.7665±0.0001*0.8055±0.0020*0.7245±0.0007*0.6075±0.0012*7/0/0
GKT0.7424±0.0021*0.8110±0.0009*0.8046±0.0008*0.7689±0.0024*0.8040±0.0065○0.7258±0.0012●0.6070±0.0036*5/1/1
SAKT0.7246±0.0017*0.7880±0.0063*0.7740±0.0008*0.7517±0.0005*0.7965±0.0014*0.7114±0.0003*0.6095±0.0013*7/0/0
SAINT0.6958±0.0023○0.7775±0.0017*0.7781±0.0013*0.7873±0.0007*0.7599±0.0139*0.7026±0.0011*0.5563±0.0012*6/1/0
AKT0.7853±0.0017●0.8306±0.0019●0.8208±0.0007●0.8033±0.0003*0.8309±0.0009●0.7281±0.0004●0.6281±0.0013●1/0/6
simpleKT0.7744±0.00180.8254±0.00030.8160±0.00060.8035±0.00000.8199±0.00110.7248±0.00050.6252±0.0005-
+ +Table 3: Overall Accuracy performance of SIMPLEKT and all baselines. “-” indicates the method is inapplicable for that dataset. + +
ModelD1: Datasets containing info. of both questions and KCsD2: Datasets containing info. of either questions or KCsSIMPLEKT
AS2009AL2005BD2006NIPS34Statics2011AS2015POJ#win/#tie/#loss
DKT0.7244±0.0014*0.8097±0.0005●0.8553±0.0002*0.7032±0.0004*0.7969±0.0006●0.7503±0.0003*0.6328±0.0020*5/0/2
DKT+0.7248±0.0009*0.8097±0.0007●0.8553±0.0003*0.7039±0.0004*0.7977±0.0006●0.7510±0.0004●0.6482±0.0021*4/0/3
DKT-F-0.8090±0.0005●0.8536±0.0004*0.7076±0.0002*0.7872±0.0011*-0.6371±0.0030*4/0/1
KQN0.7228±0.0009*0.8025±0.0006*0.8532±0.0006*0.7028±0.0001*0.7978±0.0007●0.7500±0.0003*0.6435±0.0017*6/0/1
LPKT0.7355±0.0015●0.8145±0.0007●0.8544±0.0008*0.7341±0.0003●---1/0/3
IEKT0.7375±0.0042●0.8236±0.0010●0.8553±0.0023*0.7330±0.0002●---1/0/3
DKVMN0.7199±0.0010*0.8027±0.0007*0.8545±0.0002*0.7016±0.0005*0.7929±0.0006*0.7508±0.0006○0.6393±0.0015*6/1/0
ATKT0.7208±0.0009*0.7998±0.0019*0.8511±0.0004*0.7013±0.0002*0.7904±0.0011*0.7494±0.0002*0.6332±0.0023*7/0/0
GKT0.7153±0.0032*0.8088±0.0008●0.8555±0.0002*0.7014±0.0028*0.7902±0.0021○0.7504±0.0010*0.6117±0.0147*5/1/1
SAKT0.7063±0.0018*0.7954±0.0020*0.8461±0.0005*0.6879±0.0004*0.7879±0.0015*0.7474±0.0002*0.6407±0.0035*7/0/0
SAINT0.6936±0.0034○0.7791±0.0016*0.8411±0.0065*0.7180±0.0006*0.7682±0.0056*0.7438±0.0010*0.6476±0.0003*6/1/0
AKT0.7392±0.0021●0.8124±0.0011●0.8587±0.0005●0.7323±0.0005*0.8021±0.0011●0.7521±0.0005●0.6492±0.0010*2/0/5
simpleKT0.7320±0.00120.8083±0.00050.8579±0.00030.7328±0.00010.7957±0.00200.7508±0.00040.6522±0.0008-
+ +From Table 2 and Table 3 we find the following results: (1) compared to other baseline methods, SIMPLEKT almost always ranks top 3 in terms of AUC scores and achieves 55 wins, 3 ties and 18 loss in total against 12 baselines on 7 public datasets of different domains. This indicates the strength of SIMPLEKT as a baseline of KT; (2) in general, the SIMPLEKT approach performs better on D1 datasets that have both question and KC information available. When training SIMPLEKT on D2 datasets, due to the lack of distinguished information about questions and KCs, the explicit question-centric difficulty modeling degrades and the KC representations become the question agnostic; (3) when comparing SIMPLEKT to other attentive models, i.e., SAKT, SAINT and AKT, in C5 category, our SIMPLEKT beats SAKT and SAINT on all the datasets, which indicates the effectiveness of explicit question-centric difficulty modeling. Although our SIMPLEKT approach is significantly worse than the AKT approach on 6 datasets, the performance gaps are quite minimal that are mostly within a $0.5\%$ range. On the other hand, SIMPLEKT is much more concise compared to the two-layer attentive architecture in the AKT approach; (4) the SIMPLEKT outperforms many deep sequential models in category C1, including DKT, DKT+, DKT-F, KQN on AS2009, AL2005, BD2006, NIPS34 and POJ. We believe this is because the above 4 sequential models use KCs to index questions cannot capture the individual differences among questions with the same KCs which is crucial to predict student future performance; (5) comparing SIMPLEKT and IEKT, we can see, IEKT has better prediction performance on AS2009, AL2005, and NIPS34. This is because IEKT captures both question-level and KC-level variations in its representations and at the same time, it designs two specific neural modules to estimate individual cognition and acquisition + +abilities; (6) compare to other different types of models in C2, C3, and C4, such as DKVMN, ATKT and GKT, our SIMPLEKT achieves better performance by using an ordinary dot-product attention function without any memory mechanism, adversarial learning or graph constructions, which encourages the further educational researchers and practitioners to develop effective models with a design of simplicity. + +![](images/5da15cc66032cae869d7934d078a7d23dc2f0c0f7d4cc795e8e905e2ca257f08.jpg) + +![](images/6b2208df3267500fc3e5973a1a2b1dcf2a38532118f4cc5ff02b4dfbce21c6f2.jpg) + +![](images/b68518f716e87e7fbfb720c3fcba4f9043488e31019b5f2afa5ea6a6fbcfddd4.jpg) + +![](images/211d7d9a09a33745622203c32e29b12da75927a5eadaa1d9a3a6143ed7851429.jpg) + +![](images/96d6686cd7e9357cb8fb87916c0055d02dc872631809326cc228a9635bb7c2be.jpg) + +![](images/51c839870d094e7074c119200452a9c660c71531b758646b3dbc292c4309a6fc.jpg) +Figure 3: Non-accumulative predictions in the multi-step ahead scenario in terms of AUC. + +![](images/754abc42972dead1fc150efb27449675463ffca216b88a8a574c51f9c773e7a4.jpg) + +![](images/ca78c04a5d4e62ecc608792889df918871f71eae75af195ab14d5ff9963ca254.jpg) + +![](images/2b271f8bd2e9b455fe37dfca479c39667a2896d5366fe8bf4d28c9612b73f609.jpg) + +![](images/de36abe1d2424c182e4e7e7b6d6ac1f66e2cec151596bda8844d3f980b43a4c3.jpg) + +![](images/6dc351890a459061f69ea2659801f9455e62acabe39e95c8af27685c52f5d1b7.jpg) + +![](images/2241242544f2055d3de8c879108f8dd2a826ec6ddb0fe03bca0e2d67d23326ae.jpg) + +![](images/a814d72df135b1d00adecb82f0186c73578c970cdf69cff357102df852f80c0a.jpg) + +![](images/e34035c6630b35dce8246694fdb552e7f2d2001d6cdd6573828f7e28ee38169d.jpg) +Figure 4: Non-accumulative predictions in the multi-step ahead scenario in terms of Accuracy. + +![](images/3958a12c89452969a13555448bbc36e31aa1a23860999cb10c49d082461882b9.jpg) + +![](images/c6dc0106a816219728505976ac8c86c6e14a855fcbc6c241430245bb2aa3dff2.jpg) + +Multi-step KT Prediction Performance. In order to make the prediction close to real application scenarios, we also predict our model in multi-step prediction which predicts a span of student's responses given the student's historical interaction sequence. Practically, accurate multi-step KT prediction will provide constructive feedback to learning path selection and construction and help teachers adaptively adjust future teaching materials. We conduct the prediction in a non-accumulative setting that predicts all future values all at once to avoid accumulative prediction errors. To have a fine-grained analysis in the multi-step ahead prediction scenario, we further experiment with DLKT models on different portions of observed student interactions. Specifically, we vary the observed percentages of student interaction length from $20\%$ to $90\%$ with step size of $10\%$ . Due to the space limit, we select the best baseline in each category, i.e., IEKT, DKVMN, ATKT, GKT, AKT as the representative approaches and the results in terms of AUC and accuracy are shown in Figure 3 and Figure 4. We make the following observations: (1) with the increasing historical information, the student AUC performance prediction become more accurate in most cases, which is in line with the + +real-world educational scenario; (2) the attention based model almost outperforms other KT models in Statics2011, AS2005 and POJ according to AUC scores, this is because the attention mechanism is expert in capturing the long-term dependencies; and (3) our SIMPLEKT achieves the best prediction performance in BD2006, NIPS34 in terms of AUC, which indicates the proposed method is simple yet powerful. + +Qualitative Visual Analysis. In this section, we qualitatively show the visualization of the prediction results made by SIMPLEKT in Figure 5 To better understand the model predictive behavior, we compute the historical error rate (HER) per question from the data and use the HERs as surrogates of question difficulties. Due to the space limit, more illustrative and fine-grained results are provided in Appendix A.2 As we can see from Figure 5 when a student meets a certain KC for the first time, the higher HER of the question, the lower the probability that student will get it correct. For example, the HERs for questions q527, q509, q512, q518 and q219 are 0.35, 0.41, 0.20, 0.51, and 0.48 and the corresponding prediction probabilities of SIMPLEKT are 0.74, 0.52, 0.84, 0.45, and 0.51 respectively. Furthermore, for those questions that cover the same set of KCs, such as questions q526 and q529, the model prediction probability decreases when the corresponding HER increases. + +![](images/adbc7a91fca12fc0189b85901c7b7945277d8527d4e95bba95e58cc8f022e9be.jpg) +Figure 5: Visualization of a student's prediction results on SIMPLEKT. + +Ablation Study. We systematically examine the effect of the key component of question difficulty modeling by constructing two model variants: (1) SIMPLEKT-ScalarDiff that changes the question-centric difficulty vector $\mathbf{m}_{q_t}$ to scalar; and (2) SIMPLEKT-NoDiff that completely ignores the question difficulty modeling and simply set $\mathbf{x}_t$ to $\mathbf{z}_{c_t}$ . The prediction performance on all datasets that belong to D1 are reported in Table 4. Please note that since datasets in D2 only have either question information or KC information, SIMPLEKT, SIMPLEKT-ScalarDiff, and SIMPLEKT-NoDiff essentially become mathematically unidentifiable. From Table 4 we can easily observe that (1) the SIMPLEKT method outperforms the two model variants and especially when removing the question difficulty modeling component, the prediction performance decreases more than $2\%$ on all D1 datasets. This empirically verifies the importance of question-centric difficulty modeling when making student performance prediction in KT scenarios; and (2) comparing SIMPLEKT and SIMPLEKT-ScalarDiff, the performance is very minimal. We believe this is because the KC representation $\mathbf{x}_t$ is based on the simple additive assumption and a scalar is expressive enough to represent question-level difficulty under this assumption. + +Table 4: The performance of different variants in SIMPLEKT. + +
AS2009AL2005BD2006NIPS34
SIMPLEKT0.7744±0.00180.8254±0.00030.8160±0.00060.8035±0.0000
SIMPLEKT-ScalarDiff0.7740±0.00210.8250±0.00130.8159±0.00110.8008±0.0012
SIMPLEKT-NoDiff0.7411±0.00160.8048±0.00180.7922±0.00110.7646±0.0005
+ +# 5 CONCLUSION + +In this work, we propose SIMPLEKT, a simple but tough-to-beat approach to solve KT task effectively. Motivated by the Rasch model in psychometrics, the SIMPLEKT approach is designed to capture individual differences among questions with the same KCs. Furthermore, the proposed SIMPLEKT approach simplifies the sophisticated student knowledge state estimation component with the ordinary dot-product attention function. Comprehensive experimental results demonstrate that SIMPLEKT is able to beat a wide range of recently proposed DLKT models on various datasets from different domains. We believe this work serves as a strong baseline for future KT research. + +# REPRODUCIBILITY STATEMENT + +The code of SIMPLEKT and its variants, i.e., SIMPLEKT-ScalarDiff and SIMPLEKT-NoDiff, to reproduce the experimental results can be found at https://github.com/pykt-team/ pykt-toolkit. We give the details of data-preprocessing and the training hyper-parameters of SIMPLEKT in Section 4.1 and Section 4.3. The code of the 12 comparison models is accessible from an open-sourced PYKT python library at https://pykt.org/. We choose to use the same data partitions of train, validation, test sets as PYKT and hence all the results can be easily reproducible. All the model training details of all baselines can be found at https://pykt-toolkit.readthedocs.io/en/latest/pykt.models.html + +# ACKNOWLEDGMENTS + +This work was supported in part by National Key R&D Program of China, under Grant No. 2020AAA0104500; in part by Beijing Nova Program (Z201100006820068) from Beijing Municipal Science & Technology Commission; in part by NFSC under Grant No. 61877029 and in part by Key Laboratory of Smart Education of Guangdong Higher Education Institutes, Jinan University (2022LSYS003). + +# REFERENCES + +Ghodai Abdelrahman and Qing Wang. Knowledge tracing with sequential key-value memory networks. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 175-184, 2019. +Hao Cen, Kenneth Koedinger, and Brian Junker. Learning factors analysis-a general method for cognitive model evaluation and improvement. In International Conference on Intelligent Tutoring Systems, pp. 164-175. Springer, 2006. +Jiahao Chen, Zitao Liu, Shuyan Huang, Qiongqiong Liu, and Weiqi Luo. Improving interpretability of deep sequential knowledge tracing models with question-centric cognitive representations. In Proceedings of the AAAI Conference on Artificial Intelligence, 2023. +Penghe Chen, Yu Lu, Vincent W Zheng, and Yang Pian. Prerequisite-driven deep knowledge tracing. In 2018 IEEE International Conference on Data Mining, pp. 39-48. IEEE, 2018. +Benoit Choffin, Fabrice Popineau, Yolaine Bourda, and Jill-Jenn Vie. DAS3H: Modeling student learning and forgetting for optimally scheduling distributed practice of skills. In Proceedings of The 12th International Conference on Educational Data Mining (EDM 2019), volume 29, pp. 38. +Youngduck Choi, Youngnam Lee, Junghyun Cho, Jineon Baek, Byungsoo Kim, Yeongmin Cha, Dongmin Shin, Chan Bae, and Jaewe Heo. Towards an appropriate query, key, and value computation for knowledge tracing. In Proceedings of the Seventh ACM Conference on Learning@ Scale, pp. 341-344, 2020. +Albert T Corbett and John R Anderson. Knowledge tracing: Modeling the acquisition of procedural knowledge. User Modeling and User-adapted Interaction, 4(4):253-278, 1994. +Mingyu Feng, Neil Heffernan, and Kenneth Koedinger. Addressing the assessment challenge with an online system that tutors as it assesses. User Modeling and User-adapted Interaction, 19(3): 243-266, 2009. +Aritra Ghosh, Neil Heffernan, and Andrew S Lan. Context-aware attentive knowledge tracing. In ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 2020. +Xiaopeng Guo, Zhijie Huang, Jie Gao, Mingyu Shang, Maojing Shu, and Jun Sun. Enhancing knowledge tracing via adversarial training. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 367-375, 2021. +Tanja Käser, Severin Klingler, Alexander G Schwing, and Markus Gross. Dynamic bayesian networks for student modeling. IEEE Transactions on Learning Technologies, 10(4):450-462, 2017. + +Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. +Elise Lavoué, Baptiste Monterrat, Michel Desmarais, and Sébastien George. Adaptive gamification for learning environments. IEEE Transactions on Learning Technologies, 12(1):16-28, 2018. +Jinseok Lee and Dit-Yan Yeung. Knowledge query network for knowledge tracing: How knowledge interacts with skills. In Proceedings of the 9th International Conference on Learning Analytics & Knowledge, pp. 491-500, 2019. +Qi Liu, Zhenya Huang, Yu Yin, Enhong Chen, Hui Xiong, Yu Su, and Guoping Hu. EKT: Exercise-aware knowledge tracing for student performance prediction. IEEE Transactions on Knowledge and Data Engineering, 33(1):100-115, 2019. +Yunfei Liu, Yang Yang, Xianyu Chen, Jian Shen, Haifeng Zhang, and Yong Yu. Improving knowledge tracing via pre-training question embeddings. In Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence, pp. 1556-1562, 2021. +Zitao Liu, Qiongqiong Liu, Jiahao Chen, Shuyan Huang, Jiliang Tang, and Weiqi Luo. pyKT: A python library to benchmark deep learning based knowledge tracing models. In 36th Conference on Neural Information Processing Systems (NeurIPS 2022) Track on Datasets and Benchmarks., 2022. +Ting Long, Yunfei Liu, Jian Shen, Weinan Zhang, and Yong Yu. Tracing knowledge state with individual cognition and acquisition estimation. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 173-182, 2021. +Ting Long, Jiarui Qin, Jian Shen, Weinan Zhang, Wei Xia, Ruiming Tang, Xiuqiang He, and Yong Yu. Improving knowledge tracing with collaborative information. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining, pp. 599-607, 2022. +Sein Minn, Yi Yu, Michel C Desmarais, Feida Zhu, and Jill-Jenn Vie. Deep knowledge tracing and dynamic student classification for knowledge tracing. In 2018 IEEE International Conference on Data Mining, pp. 1182-1187. IEEE, 2018. +Koki Nagatani, Qian Zhang, Masahiro Sato, Yan-Ying Chen, Francine Chen, and Tomoko Ohkuma. Augmenting knowledge tracing by considering forgetting behavior. In The World Wide Web Conference, pp. 3101-3107, 2019. +Hiromi Nakagawa, Yusuke Iwasawa, and Yutaka Matsuo. Graph-based knowledge tracing: modeling student proficiency using graph neural network. In 2019 IEEE/WIC/ACM International Conference on Web Intelligence, pp. 156-163. IEEE, 2019. +Shalini Pandey and George Karypis. A self-attentive model for knowledge tracing. In 12th International Conference on Educational Data Mining, pp. 384–389. International Educational Data Mining Society, 2019. +Shalini Pandey and Jaideep Srivastava. RKT: relation-aware self-attention for knowledge tracing. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, pp. 1205–1214, 2020. +Chris Piech, Jonathan Bassen, Jonathan Huang, Surya Ganguli, Mehran Sahami, Leonidas J Guibas, and Jascha Sohl-Dickstein. Deep knowledge tracing. Advances in Neural Information Processing Systems, 28, 2015. +Shi Pu, Michael Yudelson, Lu Ou, and Yuchi Huang. Deep knowledge tracing with transformers. In International Conference on Artificial Intelligence in Education, pp. 252-256. Springer, 2020. +Sami Sarsa, Juho Leinonen, Arto Hellas, et al. Empirical evaluation of deep learning models for knowledge tracing: Of hyperparameters and metrics on performance and replicability. Journal of Educational Data Mining, 14(2), 2022. + +Shuanghong Shen, Qi Liu, Enhong Chen, Han Wu, Zhenya Huang, Weihao Zhao, Yu Su, Haiping Ma, and Shijin Wang. Convolutional knowledge tracing: Modeling individualization in student learning process. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 1857-1860, 2020. +Shuanghong Shen, Qi Liu, Enhong Chen, Zhenya Huang, Wei Huang, Yu Yin, Yu Su, and Shijin Wang. Learning process-consistent knowledge tracing. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining, pp. 1452-1460, 2021. +Shuanghong Shen, Zhenya Huang, Qi Liu, Yu Su, Shijin Wang, and Enhong Chen. Assessing student's dynamic knowledge state by exploring the question difficulty effect. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 427-437, 2022. +J Stamper, A Niculescu-Mizil, S Ritter, G Gordon, and K Koedinger. Algebra I 2005-2006 and Bridge to Algebra 2006-2007. Development data sets from KDD Cup 2010 Educational Data Mining Challenge, 2010. +Paul Steif and Norman Bier. Oli engineering statics-fall 2011, 2014. +Nguyen Thai-Nghe, Lucas Drumond, Tomás Horváth, Artus Krohn-Grimberghe, Alexandros Nanopoulos, and Lars Schmidt-Thieme. Factorization techniques for predicting student performance. In Educational Recommender Systems and Technologies: Practices and Challenges, pp. 129-153. IGI Global, 2012. +Hanshuang Tong, Zhen Wang, Qi Liu, Yun Zhou, and Wenyuan Han. HGKT: Introducing hierarchical exercise graph for knowledge tracing. arXiv preprint arXiv:2006.16915, 2020. +Chenyang Wang, Weizhi Ma, Min Zhang, Chuancheng Lv, Fengyuan Wan, Huijie Lin, Taoran Tang, Yiqun Liu, and Shaoping Ma. Temporal cross-effects in knowledge tracing. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining, pp. 517-525, 2021. +Fei Wang, Qi Liu, Enhong Chen, Zhenya Huang, Yuying Chen, Yu Yin, Zai Huang, and Shijin Wang. Neural cognitive diagnosis for intelligent education systems. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pp. 6153-6161, 2020a. +Zhiwei Wang, Xiaoqin Feng, Jiliang Tang, Gale Yan Huang, and Zitao Liu. Deep knowledge tracing with side information. In Artificial Intelligence in Education: 20th International Conference, AIED 2019, Chicago, IL, USA, June 25-29, 2019, Proceedings, Part II 20, pp. 303-308. Springer, 2019. +Zichao Wang, Angus Lamb, Evgeny Saveliev, Pashmina Cameron, Yordan Zaykov, Jose Miguel Hernández-Lobato, Richard E Turner, Richard G Baraniuk, Craig Barton, Simon Peyton Jones, et al. Instructions and guide for diagnostic questions: The neurips 2020 education challenge. ArXiv preprint, abs/2007.12061, 2020b. URL https://arxiv.org/abs/2007.12061 +Yang Yang, Jian Shen, Yanru Qu, Yunfei Liu, Kerong Wang, Yaoming Zhu, Weinan Zhang, and Yong Yu. GIKT: a graph-based interaction model for knowledge tracing. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pp. 299-315. Springer, 2020. +Chun-Kit Yeung. Deep-IRT: Make deep learning based knowledge tracing explainable using item response theory. In Proceedings of The 12th International Conference on Educational Data Mining (EDM 2019), pp. 683-686, 2019. +Chun-Kit Yeung and Dit-Yan Yeung. Addressing two problems in deep knowledge tracing via prediction-consistent regularization. In Proceedings of the Fifth Annual ACM Conference on Learning at Scale, pp. 1-10, 2018. +Jiani Zhang, Xingjian Shi, Irwin King, and Dit Yan Yeung. Dynamic key-value memory networks for knowledge tracing. In Proceedings of the 26th International Conference on World Wide Web, pp. 765, 2017. +Moyu Zhang, Xinning Zhu, Chunhong Zhang, Yang Ji, Feng Pan, and Changchuan Yin. Multi-Factors Aware Dual-Attentional Knowledge Tracing. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management, pp. 2588-2597, 2021. \ No newline at end of file diff --git a/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/images.zip b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..eb0d391ffd7aa0051a71469bfd6f179b835f07e7 --- /dev/null +++ b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb1749b56b19f894afd92479f4ce6e364b1b7c4e2efcbbf08b10dd546f73518c +size 584684 diff --git a/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/layout.json b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6df3a5cf5c576a272ee23050f7c21c1dad811bcc --- /dev/null +++ b/2023/simpleKT_ A Simple But Tough-to-Beat Baseline for Knowledge Tracing/layout.json @@ -0,0 +1,8437 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 96, + 103, + 495, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 103, + 495, + 139 + ], + "spans": [ + { + "bbox": [ + 96, + 103, + 495, + 139 + ], + "type": "text", + "content": "SIMPLEKT: A SIMPLE BUT TOUGH-TO-BEAT BASELINE FOR KNOWLEDGE TRACING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 159, + 149, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 149, + 169 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 149, + 169 + ], + "type": "text", + "content": "Zitao Liu" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 103, + 170, + 417, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 170, + 417, + 182 + ], + "spans": [ + { + "bbox": [ + 103, + 170, + 417, + 182 + ], + "type": "text", + "content": "Guangdong Institute of Smart Education, Jinan University, Guangzhou, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 103, + 183, + 219, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 183, + 219, + 193 + ], + "spans": [ + { + "bbox": [ + 103, + 183, + 219, + 193 + ], + "type": "text", + "content": "liuzitao@jnu.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 102, + 210, + 304, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 210, + 304, + 222 + ], + "spans": [ + { + "bbox": [ + 102, + 210, + 304, + 222 + ], + "type": "text", + "content": "Qiongqiong Liu, Jiahao Chen, Shuyan Huang*" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 103, + 222, + 258, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 222, + 258, + 232 + ], + "spans": [ + { + "bbox": [ + 103, + 222, + 258, + 232 + ], + "type": "text", + "content": "TAL Education Group, Beijing, China" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 103, + 232, + 396, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 232, + 396, + 243 + ], + "spans": [ + { + "bbox": [ + 103, + 232, + 396, + 243 + ], + "type": "text", + "content": "{liuqiongqiongl, chenjiahao, huangshuyan}@tal.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 103, + 259, + 151, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 259, + 151, + 271 + ], + "spans": [ + { + "bbox": [ + 103, + 259, + 151, + 271 + ], + "type": "text", + "content": "Weiqi Luo" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 103, + 271, + 417, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 271, + 417, + 282 + ], + "spans": [ + { + "bbox": [ + 103, + 271, + 417, + 282 + ], + "type": "text", + "content": "Guangdong Institute of Smart Education, Jinan University, Guangzhou, China" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 103, + 283, + 189, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 283, + 189, + 293 + ], + "spans": [ + { + "bbox": [ + 103, + 283, + 189, + 293 + ], + "type": "text", + "content": "lwq@jnu.edu.cn" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 267, + 322, + 326, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 322, + 326, + 333 + ], + "spans": [ + { + "bbox": [ + 267, + 322, + 326, + 333 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 346, + 461, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 346, + 461, + 601 + ], + "spans": [ + { + "bbox": [ + 131, + 346, + 461, + 601 + ], + "type": "text", + "content": "Knowledge tracing (KT) is the problem of predicting students' future performance based on their historical interactions with intelligent tutoring systems. Recently, many works present lots of special methods for applying deep neural networks to KT from different perspectives like model architecture, adversarial augmentation and etc., which make the overall algorithm and system become more and more complex. Furthermore, due to the lack of standardized evaluation protocol (Liu et al. 2022), there is no widely agreed KT baselines and published experimental comparisons become inconsistent and self-contradictory, i.e., the reported AUC scores of DKT on ASSISTments2009 range from 0.721 to 0.821 (Minn et al. 2018 Yeung & Yeung 2018). Therefore, in this paper, we provide a strong but simple baseline method to deal with the KT task named SIMPLEKT. Inspired by the Rasch model in psychometrics, we explicitly model question-specific variations to capture the individual differences among questions covering the same set of knowledge components that are a generalization of terms of concepts or skills needed for learners to accomplish steps in a task or a problem. Furthermore, instead of using sophisticated representations to capture student forgetting behaviors, we use the ordinary dot-product attention function to extract the time-aware information embedded in the student learning interactions. Extensive experiments show that such a simple baseline is able to always rank top 3 in terms of AUC scores and achieve 57 wins, 3 ties and 16 loss against 12 DLKT baseline methods on 7 public datasets of different domains. We believe this work serves as a strong baseline for future KT research. Code is available at https://github.com/pykt-team/pykt-toolkit!" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 98, + 620, + 198, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 620, + 198, + 631 + ], + "spans": [ + { + "bbox": [ + 98, + 620, + 198, + 631 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 95, + 645, + 496, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 645, + 496, + 701 + ], + "spans": [ + { + "bbox": [ + 95, + 645, + 496, + 701 + ], + "type": "text", + "content": "Knowledge tracing (KT) is a sequential prediction task that aims to predict the outcomes of students over questions by modeling their mastery of knowledge, i.e., knowledge states, as they interact with learning platforms such as massive open online courses and intelligent tutoring systems, as shown in Figure ①. Solving the KT problems may help teachers better detect students that need further attention, or recommend personalized learning materials to students." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 95, + 705, + 496, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 705, + 496, + 728 + ], + "spans": [ + { + "bbox": [ + 95, + 705, + 496, + 728 + ], + "type": "text", + "content": "The KT related research has been studied since 1990s where Corbett and Anderson, to the best of our knowledge, were the first to estimate students' current knowledge with regard to each individ-" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 50, + 286, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 50, + 286, + 63 + ], + "spans": [ + { + "bbox": [ + 96, + 50, + 286, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 109, + 734, + 269, + 745 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 734, + 269, + 745 + ], + "spans": [ + { + "bbox": [ + 109, + 734, + 269, + 745 + ], + "type": "text", + "content": "*The corresponding author: Shuyan Huang." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 109, + 745, + 389, + 757 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 745, + 389, + 757 + ], + "spans": [ + { + "bbox": [ + 109, + 745, + 389, + 757 + ], + "type": "text", + "content": "1We merged our model to the PYKT benchmark at https://pykt.org/" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 106, + 498, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 106, + 498, + 242 + ], + "spans": [ + { + "bbox": [ + 95, + 106, + 498, + 242 + ], + "type": "text", + "content": "ual knowledge component (KC) (Corbett & Anderson 1994). A KC is a description of a mental structure or process that a learner uses, alone or in combination with other KCs, to accomplish steps in a task or a problem? Since then, many attempts have been made to solve the KT problem, such as probabilistic graphical models (Kaiser et al. 2017) and factor analysis based models (Cen et al. 2006; Lavoué et al. 2018; Thai-Nghe et al. 2012). Recently, with the rapid development of deep neural networks, many deep learning based knowledge tracing (DLKT) models are developed, such as auto-regressive based deep sequential KT models (Piech et al. 2015; Yeung & Yeung 2018; Chen et al. 2018; Wang et al. 2019; Guo et al. 2021; Long et al. 2021; Chen et al. 2023), memory-augmented KT models (Zhang et al. 2017; Abdelrahman & Wang 2019; Yeung 2019), attention based KT models (Pandey & Karypis 2019; Pandey & Srivastava 2020; Choi et al. 2020; Ghosh et al. 2020; Pu et al. 2020), and graph based KT models (Nakagawa et al. 2019; Yang et al. 2020; Tong et al. 2020)." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 97, + 251, + 500, + 317 + ], + "blocks": [ + { + "bbox": [ + 97, + 251, + 500, + 317 + ], + "lines": [ + { + "bbox": [ + 97, + 251, + 500, + 317 + ], + "spans": [ + { + "bbox": [ + 97, + 251, + 500, + 317 + ], + "type": "image", + "image_path": "bf81367ab97873c8d7e36048a7e047999a27c48e636f4e4f74b1048c84a31c77.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 322, + 497, + 345 + ], + "lines": [ + { + "bbox": [ + 95, + 322, + 497, + 345 + ], + "spans": [ + { + "bbox": [ + 95, + 322, + 497, + 345 + ], + "type": "text", + "content": "Figure 1: Graphical illustration of the task of KnowledgeTracing. “√” and “×” denote the question is answered correctly and incorrectly." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 95, + 345, + 497, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 345, + 497, + 577 + ], + "spans": [ + { + "bbox": [ + 95, + 345, + 497, + 577 + ], + "type": "text", + "content": "Although DLKT approaches have constituted new paradigms of the KT problem and achieved promising results, recently developed DLKT models seem to be more and more complex and resemble each other with very limited nuances from the methodological perspective: applying different neural components to capture student forgetting behaviors" + }, + { + "bbox": [ + 95, + 345, + 497, + 577 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 95, + 345, + 497, + 577 + ], + "type": "text", + "content": " (Ghosh et al. 2020; Nagatani et al. 2019), recency effects (Zhang et al. 2021), and various auxiliary information including relations between questions and KCs (Tong et al. 2020; Pandey & Srivastava 2020; Liu et al. 2021; Yang et al. 2020), question text content (Liu et al. 2019; Wang et al. 2020a), question difficulty level (Liu et al. 2021; Shen et al. 2022), and students' learning ability (Shen et al. 2020). Furthermore, published DLKT baseline results surprisingly diverge. For example, the reported AUC scores of DKT and AKT on ASSISTments2009 range from 0.721 to 0.821 (Minn et al. 2018; Yeung & Yeung 2018) and from 0.747 to 0.835 in (Ghosh et al. 2020; Wang et al. 2021) respectively. Another example is that for the performance of DKT on the ASSISTments2009 dataset, it is recognized as one of the best baselines by Ghosh et al. (2020) while Long et al. (2022) and Zhang et al. (2021) showed that its performance is below the average. Recent survey studies by Sarsa et al. (2022) and Liu et al. (2022) summarized aforementioned inconsistencies of baseline results and showed evidence that variations in hyper-parameters and data pre-processing procedures contribute significantly to prediction performance of DLKT models. Specifically, Sarsa et al. (2022) empirically found that even simple baselines with little predictive value may outperform DLKT models with sophisticated neural components. Liu et al. (2022) built a standardized DLKT benchmark platform and showed that the improvement of many DLKT approaches is minimal compared to the very first DLKT model proposed by Piech et al. (2015)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 95, + 581, + 498, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 581, + 498, + 714 + ], + "spans": [ + { + "bbox": [ + 95, + 581, + 498, + 714 + ], + "type": "text", + "content": "Therefore, in this paper, we propose SIMPLEKT, a simple but tough-to-beat KT baseline that is simple to implement, computationally friendly and robust to a wide range of KT datasets across different domains. Motivated by the Rasch model that is a classic yet powerful model in psychometrics, the proposed SIMPLEKT approach captures the individual differences among questions covering the same set of KCs by representing each question's embedding as an additive combination of the average of its corresponding KCs' embeddings and a question-specific variation. Furthermore, different from many existing models that try to capture various aforementioned relations and information, the SIMPLEKT is purely based on the attention mechanism and uses the ordinary dot-product attention function to capture the contextual information embedded in the student learning interactions. To comprehensively and systematically evaluate the performance of SIMPLEKT, we choose to use the publicly available PYKT benchmark implementation to guarantee valid and reproducible comparisons against 12 DLKT methods on 7 popular datasets across differ-" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 108, + 723, + 403, + 734 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 723, + 403, + 734 + ], + "spans": [ + { + "bbox": [ + 108, + 723, + 403, + 734 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 108, + 723, + 403, + 734 + ], + "type": "text", + "content": "A KC is a generalization of everyday terms like concept, principle, fact, or skill." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 735, + 374, + 745 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 735, + 374, + 745 + ], + "spans": [ + { + "bbox": [ + 110, + 735, + 374, + 745 + ], + "type": "text", + "content": "3The Rasch model is also known as the 1PL item response theory model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 745, + 194, + 756 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 745, + 194, + 756 + ], + "spans": [ + { + "bbox": [ + 111, + 745, + 194, + 756 + ], + "type": "text", + "content": "4https://www.pykt.org/" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 106, + 497, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 106, + 497, + 174 + ], + "spans": [ + { + "bbox": [ + 95, + 106, + 497, + 174 + ], + "type": "text", + "content": "ent domains. Results shown that the SIMPLEKT beats a wide range of modern neural KT models that based on graph neural networks, memory augmented neural networks, and adversarial neural networks. This suggests that this simple method should be used as the baseline to beat future KT research, especially when designing sophisticated neural KT architectures. To encourage reproducible research, all the related codes, data and the learned SIMPLEKT models are publicly available at https://github.com/pykt-team/pykt-toolkit" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 189, + 204, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 189, + 204, + 201 + ], + "spans": [ + { + "bbox": [ + 96, + 189, + 204, + 201 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 95, + 213, + 496, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 213, + 496, + 248 + ], + "spans": [ + { + "bbox": [ + 95, + 213, + 496, + 248 + ], + "type": "text", + "content": "Recently, deep learning technique have been widely applied into KT task for student's historical learning modeling and the future performance prediction. Existing DLKT approaches can be categorized into the following 5 categories:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 95, + 252, + 497, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 252, + 497, + 307 + ], + "spans": [ + { + "bbox": [ + 95, + 252, + 497, + 307 + ], + "type": "text", + "content": "C1: Deep sequential models. DLKT models that use auto-regressive architectures to capture students' chronologically ordered interactions. For example, (Piech et al., 2015) proposed the very first DKT model that utilizes an LSTM layer to estimate the knowledge mastery. (Lee & Yeung, 2019) proposed to enhance DKT with a skill encoder that combines student learning activities and KC representations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 95, + 312, + 496, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 312, + 496, + 358 + ], + "spans": [ + { + "bbox": [ + 95, + 312, + 496, + 358 + ], + "type": "text", + "content": "C2: Memory augmented models. DLKT models that capture latent relations between KCs and student knowledge states via memory networks. For instance, (Zhang et al., 2017) exploited and stored the KC relationships via a static key memory matrix and predict students' knowledge mastery levels with a dynamic value memory matrix." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 95, + 362, + 496, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 362, + 496, + 396 + ], + "spans": [ + { + "bbox": [ + 95, + 362, + 496, + 396 + ], + "type": "text", + "content": "C3: Adversarial based models. DLKT models that utilize the adversarial techniques to generate perturbations to improve model generalization capability. jointly train an attentiveLSTM KT model with both original and adversarial examples." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 95, + 401, + 496, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 401, + 496, + 456 + ], + "spans": [ + { + "bbox": [ + 95, + 401, + 496, + 456 + ], + "type": "text", + "content": "C4: Graph based models. DLKT models that use the graph neural networks to model intrinsic relations among questions, KCs and interactions. (Liu et al., 2021) presented a question-KC bipartite graph to explicitly capture question-level and KC-level inner-relations and question difficulties. (Yang et al., 2020) introduced a graph convolutional network to obtain the representation of the question-KC correlations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 95, + 461, + 497, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 461, + 497, + 528 + ], + "spans": [ + { + "bbox": [ + 95, + 461, + 497, + 528 + ], + "type": "text", + "content": "C5: Attention based models. DLKT models that capture dependencies between interactions via the attention mechanism. For example, (Pandey & Karypis, 2019) used self-attention network to capture the relevance between KCs and students' historical interactions. Choi et al., 2020 designed an encoder-decoder structure to represent the exercise and response embedding sequences. (Ghosh et al., 2020) performed three self-attention modules and explicitly model students' forgetting behaviors via a monotonic attention mechanism." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 95, + 534, + 496, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 534, + 496, + 590 + ], + "spans": [ + { + "bbox": [ + 95, + 534, + 496, + 590 + ], + "type": "text", + "content": "Please note that the above categorizations are not exclusive and related techniques can be combined. For example, (Abdelrahman & Wang, 2019) proposed a sequential key-value memory network to unify the strengths of recurrent modeling capacity and memory capacity. The proposed SIMPLEKT approach belongs to C5 and it purely models student interactions by using the very ordinary dot-product attention function." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 605, + 364, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 605, + 364, + 618 + ], + "spans": [ + { + "bbox": [ + 96, + 605, + 364, + 618 + ], + "type": "text", + "content": "3 A SIMPLE METHOD FOR KNOWLEDGE TRACING" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 96, + 629, + 220, + 640 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 629, + 220, + 640 + ], + "spans": [ + { + "bbox": [ + 96, + 629, + 220, + 640 + ], + "type": "text", + "content": "3.1 PROBLEM STATEMENT" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "spans": [ + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": "In this work, our objective is given an arbitrary question " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "q_*" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": " to predict the probability of whether a student will answer " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "q_*" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": " correctly or not based on the student's historical interaction data. More specifically, for each student " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": ", we assume that we have observed a chronologically ordered collection of " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": " past interactions i.e., " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "S = \\{s_j\\}_{j=1}^T" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": ". Each interaction is represented as a 4-tuple " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "s = " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "q, \\{c\\}, r, s" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": " represent the specific question, the associated KC set, the binary valued student response and student's response time step respectively. We would like to estimate the probability " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "\\hat{r}_*" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": " of the student's performance on arbitrary question " + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "inline_equation", + "content": "q_*" + }, + { + "bbox": [ + 95, + 649, + 497, + 729 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 51, + 286, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 51, + 286, + 63 + ], + "spans": [ + { + "bbox": [ + 96, + 51, + 286, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 95, + 735, + 496, + 756 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 735, + 496, + 756 + ], + "spans": [ + { + "bbox": [ + 95, + 735, + 496, + 756 + ], + "type": "text", + "content": "5Response is a binary valued indicator variable where 1 represents the student correctly answered the question, and 0 otherwise." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 96, + 107, + 240, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 107, + 240, + 117 + ], + "spans": [ + { + "bbox": [ + 96, + 107, + 240, + 117 + ], + "type": "text", + "content": "3.2 THE SIMPLEKT APPROACH" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 127, + 378, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 127, + 378, + 139 + ], + "spans": [ + { + "bbox": [ + 96, + 127, + 378, + 139 + ], + "type": "text", + "content": "3.2.1 REPRESENTATIONS OF QUESTIONS, KCs AND RESPONSES." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 95, + 147, + 497, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 147, + 497, + 238 + ], + "spans": [ + { + "bbox": [ + 95, + 147, + 497, + 238 + ], + "type": "text", + "content": "Effectively representing student interactions is crucial to the success of the DLKT models. In real-world educational scenarios, the question bank is usually much bigger than the set of KCs. For example, the number of questions is more than 1500 times larger than the number of KCs in the Algebra2005 dataset (described in Section 4.1). Therefore, to effectively learn and fairly evaluate the DLKT models from such highly sparse question-response data, following the previous work of Ghosh et al., 2020 and Liu et al., 2022, we artificially transform the original question-response data into KC-response data by expanding each question-level interaction into multiple KC-level interactions when the question is associated with a set of KCs (illustrated in Figure 2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "spans": [ + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": "Furthermore, due to the fact that questions covering the same set of KCs may have various difficulty levels, students perform significantly different. As shown in Figure 2 even through questions " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "q_{2}" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "q_{4}" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": " have the same set of KCs, i.e., " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "c_{1}" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "c_{3}" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": ", students may get " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "q_{2}" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": " wrong but " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "q_{4}" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": " correct. Therefore, it is unrealistic to treat every KC in the expanded KC-response sequence identical. Inspired by the very classic and simple Rasch model in psychometrics that explicitly uses a scalar to characterize the latent factor of question difficulty, we choose to use a question-specific difficulty vector to capture the individual differences among questions on the same KC. More specifically, the " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": "th representations of KC (i.e., " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": ") and interaction (i.e., " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_t" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": ") in the expanded KC sequence of concept " + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "inline_equation", + "content": "c_{k}" + }, + { + "bbox": [ + 95, + 238, + 316, + 413 + ], + "type": "text", + "content": " are represented as follows:" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 331, + 248, + 490, + 373 + ], + "blocks": [ + { + "bbox": [ + 331, + 248, + 490, + 373 + ], + "lines": [ + { + "bbox": [ + 331, + 248, + 490, + 373 + ], + "spans": [ + { + "bbox": [ + 331, + 248, + 490, + 373 + ], + "type": "image", + "image_path": "d8c6b96a374ed2a73c921533fbe0c22a78147fd1c793cec2a307b508c7c6be26.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 374, + 496, + 408 + ], + "lines": [ + { + "bbox": [ + 321, + 374, + 496, + 408 + ], + "spans": [ + { + "bbox": [ + 321, + 374, + 496, + 408 + ], + "type": "text", + "content": "Figure 2: Graphical illustration of transforming the original question-response data into KC-response data." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 418, + 459, + 433 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 418, + 459, + 433 + ], + "spans": [ + { + "bbox": [ + 132, + 418, + 459, + 433 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t} = \\mathbf {z} _ {c _ {k}} \\oplus \\mathbf {m} _ {q _ {j}} \\odot \\mathbf {v} _ {c _ {k}}; \\quad \\mathbf {y} _ {t} = \\mathbf {z} _ {c _ {k}} \\oplus \\mathbf {r} _ {q _ {j}}; \\quad \\mathbf {z} _ {c _ {k}} = \\mathbf {W} _ {c} \\cdot \\mathbf {e} _ {c _ {k}}; \\quad \\mathbf {r} _ {q _ {j}} = \\mathbf {W} _ {q} \\cdot \\mathbf {e} _ {q _ {j}}", + "image_path": "9c63576e7ab26a9147b133d455f2b20fdb92ce006dde5644665525d8a731aed5.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "spans": [ + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{c_k}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " denotes the latent representation of the KC " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "c_{k}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{q_j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " denotes the difficulty vector of question " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "q_{j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " and question " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "q_{j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " contains the KC " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "c_{k}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{c_k}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " represents the question-centric variation of " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "q_{j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " covering this KC " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "c_{k}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{q_j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " denotes the representation of student response on " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "q_{j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{c_k}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": "-dimensional one-hot vector indicating the corresponding KC and " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{q_j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " is the 2-dimensional one-hot vector indicating whether the question is answered correctly. " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{c_k}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{q_j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{c_k}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{q_j}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": "-dimensional learnable real-valued vectors. " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_c\\in \\mathbb{R}^{d\\times n}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_q\\in \\mathbb{R}^{d\\times 2}" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " are learnable linear transformation operations. " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " are the element-wise product and addition operators. " + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 95, + 437, + 497, + 516 + ], + "type": "text", + "content": " is the total number of KCs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 96, + 527, + 376, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 527, + 376, + 538 + ], + "spans": [ + { + "bbox": [ + 96, + 527, + 376, + 538 + ], + "type": "text", + "content": "3.2.2 PREDICTION WITH ORDINARY DOT-PRODUCT ATTENTION." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 95, + 546, + 496, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 546, + 496, + 591 + ], + "spans": [ + { + "bbox": [ + 95, + 546, + 496, + 591 + ], + "type": "text", + "content": "Different from many existing DLKT approaches that use sophisticated neural components to model student learning and/or forgetting behaviors, we choose to use the ordinary dot-product attention function to explore and extract knowledge states from students' past learning history. Specifically, the retrieved knowledge state " + }, + { + "bbox": [ + 95, + 546, + 496, + 591 + ], + "type": "inline_equation", + "content": "(\\mathbf{h}_{t + 1})" + }, + { + "bbox": [ + 95, + 546, + 496, + 591 + ], + "type": "text", + "content": " at the " + }, + { + "bbox": [ + 95, + 546, + 496, + 591 + ], + "type": "inline_equation", + "content": "(t + 1)" + }, + { + "bbox": [ + 95, + 546, + 496, + 591 + ], + "type": "text", + "content": "th timestamp is computed as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 144, + 597, + 447, + 611 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 597, + 447, + 611 + ], + "spans": [ + { + "bbox": [ + 144, + 597, + 447, + 611 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {t + 1} = \\text {S e l f A t t e n i o n} (Q = \\mathbf {x} _ {t + 1}, K = \\left\\{\\mathbf {x} _ {1}, \\dots , \\mathbf {x} _ {t} \\right\\}, V = \\left\\{\\mathbf {y} _ {1}, \\dots , \\mathbf {y} _ {t} \\right\\}).", + "image_path": "bfb1d1f7d0dfbccdd5d24d09e1122129994229273a5be929690810aeba8795ca.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 95, + 616, + 496, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 616, + 496, + 639 + ], + "spans": [ + { + "bbox": [ + 95, + 616, + 496, + 639 + ], + "type": "text", + "content": "Then we use a two-layer fully connected network to refine the knowledge state and the overall optimization function is as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 156, + 642, + 436, + 657 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 642, + 436, + 657 + ], + "spans": [ + { + "bbox": [ + 156, + 642, + 436, + 657 + ], + "type": "interline_equation", + "content": "\\eta_ {t + 1} = \\mathbf {w} ^ {\\top} \\cdot \\operatorname {R e L U} \\left(\\mathbf {W} _ {2} \\cdot \\operatorname {R e L U} \\left(\\mathbf {W} _ {1} \\cdot \\left[ \\mathbf {h} _ {t + 1}; \\mathbf {x} _ {t + 1} \\right] + \\mathbf {b} _ {1}\\right) + \\mathbf {b} _ {2}\\right) + b", + "image_path": "b15edfde1208ea904e12cd6d17f1d566184a3e2188ad6a2c7c44229779ee49c1.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 169, + 659, + 379, + 677 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 659, + 379, + 677 + ], + "spans": [ + { + "bbox": [ + 169, + 659, + 379, + 677 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = - \\sum \\big (r _ {t} \\log \\sigma (\\eta_ {t}) + (1 - r _ {t}) \\log (1 - \\sigma (\\eta_ {t})) \\big)", + "image_path": "b9b0fe8844d1a7437e938d2d09918c87e178958b2403d361efe12519135bee8f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "spans": [ + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_1, \\mathbf{W}_2, \\mathbf{w}, \\mathbf{b}_1, \\mathbf{b}_2" + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "content": " are trainable parameters and " + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_1 \\in \\mathbb{R}^{d \\times 2d}" + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_2 \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{w}, \\mathbf{b}_1, \\mathbf{b}_2 \\in \\mathbb{R}^{d \\times 1}" + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "content": " is scalar. " + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "inline_equation", + "content": "\\sigma(\\cdot)" + }, + { + "bbox": [ + 95, + 679, + 496, + 704 + ], + "type": "text", + "content": " is the sigmoid function." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 96, + 714, + 325, + 725 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 714, + 325, + 725 + ], + "spans": [ + { + "bbox": [ + 96, + 714, + 325, + 725 + ], + "type": "text", + "content": "3.2.3 RELATIONSHIP TO EXISTING DLKT MODELS." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 95, + 733, + 496, + 759 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 733, + 496, + 759 + ], + "spans": [ + { + "bbox": [ + 95, + 733, + 496, + 759 + ], + "type": "text", + "content": "Although the proposed SIMPLEKT belongs to model category C5 discussed in Section 2, it is distinguished from attention based representative DLKT models such as AKT (Ghosh et al., 2020), SAKT" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 293, + 775, + 300, + 783 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 300, + 783 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 300, + 783 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "spans": [ + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "type": "text", + "content": "Pandey & Karypis (2019) and SAINT (Choi et al., 2020). The difference between SIMPLEKT and AKT are threefold: first, we omit the self-attentive question encoder and knowledge encoder in AKT and directly feed the representations of " + }, + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "type": "text", + "content": "s and " + }, + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_t" + }, + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "type": "text", + "content": "s into attention based knowledge state extractor; second, instead of using time decayed monotonic attention function to extract the initial knowledge state, we choose to use the ordinary dot-product function that is simple and free of hyper-parameters; third, interaction representations " + }, + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_t" + }, + { + "bbox": [ + 95, + 106, + 497, + 250 + ], + "type": "text", + "content": "s are simply computed by adding representations of KCs and responses while AKT uses extra parameters to explicitly model the effects of question difficulty in interaction representations. When comparing SIMPLEKT to SAKT and SAINT, we explicitly model the latent question-centric difficulty when learning the KC representations while SAKT and SAINT ignore the question-level difference and treat all questions are identical if they contain the same set of KCs. Furthermore, SAINT adopts the encoder-decoder architecture and utilizes Transformers to model the student interaction sequence while our SIMPLEKT only uses the dot-product attention function." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 271, + 192, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 271, + 192, + 282 + ], + "spans": [ + { + "bbox": [ + 96, + 271, + 192, + 282 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 96, + 298, + 168, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 298, + 168, + 308 + ], + "spans": [ + { + "bbox": [ + 96, + 298, + 168, + 308 + ], + "type": "text", + "content": "4.1 DATASETS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 95, + 321, + 496, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 321, + 496, + 378 + ], + "spans": [ + { + "bbox": [ + 95, + 321, + 496, + 378 + ], + "type": "text", + "content": "In this paper, we experiment with 7 widely used datasets to comprehensively evaluate the performance of our models. These 7 datasets can be divided into 2 categories: (1) D1: Datasets containing information of both questions and KCs; and (2) D2: Datasets containing information of either questions or KCs. Table1 gives real samples of question and KCs from both D1 and D2 categories. The detailed statistics of each dataset are listed in Appendix A.1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 384, + 430, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 384, + 430, + 395 + ], + "spans": [ + { + "bbox": [ + 96, + 384, + 430, + 395 + ], + "type": "text", + "content": "4.1.1 DATASETS CONTAINING INFORMATION OF BOTH QUESTIONS AND KCs" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 95, + 404, + 497, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 404, + 497, + 460 + ], + "spans": [ + { + "bbox": [ + 95, + 404, + 497, + 460 + ], + "type": "text", + "content": "ASSISTments2009 (AS2009) This dataset is about math exercises and collected from the free online tutoring ASSISTments platform in the school year 2009-2010. It is widely used as the standard benchmark for KT methods over the last decade (Feng et al., 2009; Ghosh et al., 2020; Zhang et al., 2017). It includes 337,4115 interactions, 4,661 sequences, 17,737 questions, 123 KCs and each question has 1.1968 KCs on average." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 95, + 465, + 496, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 465, + 496, + 522 + ], + "spans": [ + { + "bbox": [ + 95, + 465, + 496, + 522 + ], + "type": "text", + "content": "Algebra2005 (AL2005) This dataset stems from KDD Cup 2010 EDM Challenge, including the detailed step-level student responses to the mathematical problems (Stamper et al. 2010). Similar to (Choffin et al., Ghosh et al., 2020, Zhang et al., 2017), a unique question is constructed by concatenating the problem name and step name. It has 884,098 interactions, 4,712 sequences, 173,113 questions, 112 KCs and the average KCs is 1.3521." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 95, + 526, + 496, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 526, + 496, + 560 + ], + "spans": [ + { + "bbox": [ + 95, + 526, + 496, + 560 + ], + "type": "text", + "content": "Bridge2006 (BD2006) This dataset is also from the KDD Cup 2010 EDM Challenge and its unique question construction is similar to the process used in Algebra2005. The dataset has 1,824,310 interactions, 9,680 sequences, 129,263 questions, 493 KCs and the average KCs is 1.0136." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 95, + 564, + 496, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 564, + 496, + 610 + ], + "spans": [ + { + "bbox": [ + 95, + 564, + 496, + 610 + ], + "type": "text", + "content": "NIPS34 This dataset is provided by NeurlPS 2020 Education Challenge which contains students' answers to mathematics questions from Eedi. We use the dataset of Task 3 & Task 4 to evaluate our models (Wang et al. 2020b). There are 1,399,470 interactions, 9,401 sequences, 948 questions, 57 KCs, each question has 1.0137 KCs on average." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 626, + 431, + 638 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 626, + 431, + 638 + ], + "spans": [ + { + "bbox": [ + 96, + 626, + 431, + 638 + ], + "type": "text", + "content": "4.1.2 DATASETS CONTAINING INFORMATION OF EITHER QUESTIONS OR KCS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 95, + 647, + 496, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 647, + 496, + 692 + ], + "spans": [ + { + "bbox": [ + 95, + 647, + 496, + 692 + ], + "type": "text", + "content": "Static2019 This dataset is collected from an engineering statics course taught at the Carnegie Mellon University during Fall 2011 (Steif & Bier, 2014). Its unique question construction is similar to the process used in Algebra2005. The dataset has 189,292 interactions, 1,034 sequences and 1,223 questions." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 96, + 705, + 497, + 759 + ], + "blocks": [ + { + "bbox": [ + 96, + 705, + 497, + 759 + ], + "lines": [ + { + "bbox": [ + 96, + 705, + 497, + 759 + ], + "spans": [ + { + "bbox": [ + 96, + 705, + 497, + 759 + ], + "type": "image", + "image_path": "1ea70bac986d3459b8c8ad5e0c1787a5f929676b5cd889e905e5c1f5259f0041.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 105, + 496, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 105, + 496, + 153 + ], + "spans": [ + { + "bbox": [ + 95, + 105, + 496, + 153 + ], + "type": "text", + "content": "ASSISTments2015 (AS2015) Similar to ASSISTments2009, this dataset is collected from the ASSISTments platform in the year of 2015, and it has the largest number of students among the other ASSISTments datasets. It ends up with 682,789 interactions, 19,292 sequences and 100 KCs after pre-processing." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 95, + 156, + 496, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 156, + 496, + 180 + ], + "spans": [ + { + "bbox": [ + 95, + 156, + 496, + 180 + ], + "type": "inline_equation", + "content": "\\mathbf{POJ}^{[1]}" + }, + { + "bbox": [ + 95, + 156, + 496, + 180 + ], + "type": "text", + "content": ": This dataset is collected from Peking coding practice online platform and provided by Pandey & Srivastava (2020). It has 987,593 interactions, 20,114 sequences and 2,748 questions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 95, + 184, + 496, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 184, + 496, + 218 + ], + "spans": [ + { + "bbox": [ + 95, + 184, + 496, + 218 + ], + "type": "text", + "content": "Following the data pre-processing steps suggested by (Liu et al., 2022), we remove student sequences shorter than 3 attempts and set the maximum length of student interaction history to 200 for a high computational efficiency." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 102, + 232, + 490, + 276 + ], + "blocks": [ + { + "bbox": [ + 177, + 220, + 414, + 231 + ], + "lines": [ + { + "bbox": [ + 177, + 220, + 414, + 231 + ], + "spans": [ + { + "bbox": [ + 177, + 220, + 414, + 231 + ], + "type": "text", + "content": "Table 1: Examples of questions and KCs from D1 and D2." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 102, + 232, + 490, + 276 + ], + "lines": [ + { + "bbox": [ + 102, + 232, + 490, + 276 + ], + "spans": [ + { + "bbox": [ + 102, + 232, + 490, + 276 + ], + "type": "table", + "html": "
CategoryDatasetQuestionKnowledge Components
D1NIPS34Which calculation is incorrect? A.(-7)*2=-14 B.(-7)*( -2)=-14 C.7*2=14 D. 7*(-2)=-14Multiplying and Dividing Negative Numbers
D1NIPS34Which of the following number is a factor of 60 and a multiple of 6 ... A.3 B.12 C.20 D.120Factors and Highest Common Factor\nMultiples and Lowest Common Multiple
D2POJGiven 2 equations on the variables x and y, solve for x and y.Not Available
D2POJGiven a big integer number, you are required to find out whether it's a prime number.Not Available
", + "image_path": "020269c60fa46018891b4e4413898d3402f6c82b753277555d6daa506a9c7b50.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 96, + 284, + 172, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 284, + 172, + 294 + ], + "spans": [ + { + "bbox": [ + 96, + 284, + 172, + 294 + ], + "type": "text", + "content": "4.2 BASELINES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 95, + 304, + 496, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 304, + 496, + 329 + ], + "spans": [ + { + "bbox": [ + 95, + 304, + 496, + 329 + ], + "type": "text", + "content": "To comprehensively and systematically evaluate the performance of SIMPLEKT, we compare SIMPLEKT against 12 DLKT baseline models from aforementioned 5 categories in Section2 as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 337, + 496, + 615 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 96, + 337, + 464, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 337, + 464, + 350 + ], + "spans": [ + { + "bbox": [ + 96, + 337, + 464, + 350 + ], + "type": "text", + "content": "C1: DKT (Piech et al. 2015): directly uses RNNs to model students' learning processes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 96, + 352, + 496, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 352, + 496, + 375 + ], + "spans": [ + { + "bbox": [ + 96, + 352, + 496, + 375 + ], + "type": "text", + "content": "- C1: DKT+ (Yeung & Yeung 2018): improves DKT by addressing the reconstruction and inconsistent issues." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 378, + 496, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 378, + 496, + 391 + ], + "spans": [ + { + "bbox": [ + 96, + 378, + 496, + 391 + ], + "type": "text", + "content": "C1: DKT-F (Nagatani et al. 2019): improves DKT by considering students' forgetting behaviors." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 96, + 394, + 496, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 394, + 496, + 417 + ], + "spans": [ + { + "bbox": [ + 96, + 394, + 496, + 417 + ], + "type": "text", + "content": "C1: KQN (Lee & Yeung, 2019): utilizes the dot product of the students' ability and KC representations to predict student performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 96, + 419, + 486, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 419, + 486, + 433 + ], + "spans": [ + { + "bbox": [ + 96, + 419, + 486, + 433 + ], + "type": "text", + "content": "C1: LPKT (Shen et al. 2021): designs the learning cell to model students' learning processes." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 96, + 435, + 496, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 435, + 496, + 458 + ], + "spans": [ + { + "bbox": [ + 96, + 435, + 496, + 458 + ], + "type": "text", + "content": "- C1: IEKT (Long et al., 2021): estimates student knowledge state via the student cognition and knowledge acquisition estimation modules." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 96, + 460, + 496, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 460, + 496, + 485 + ], + "spans": [ + { + "bbox": [ + 96, + 460, + 496, + 485 + ], + "type": "text", + "content": "C2: DKVMN (Zhang et al., 2017): exploits the relationships among KCs and estimate student mastery via memory networks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 96, + 487, + 496, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 487, + 496, + 510 + ], + "spans": [ + { + "bbox": [ + 96, + 487, + 496, + 510 + ], + "type": "text", + "content": "C3: ATKT (Guo et al., 2021): uses adversarial perturbations to enhance the generalization of the attention-LSTM based KT model." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 96, + 513, + 496, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 513, + 496, + 537 + ], + "spans": [ + { + "bbox": [ + 96, + 513, + 496, + 537 + ], + "type": "text", + "content": "- C4: GKT (Nakagawa et al., 2019): casts the knowledge structure as a graph and reformulate the KT task as a node-level classification problem." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 96, + 539, + 496, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 539, + 496, + 562 + ], + "spans": [ + { + "bbox": [ + 96, + 539, + 496, + 562 + ], + "type": "text", + "content": "C5: SAKT (Pandey & Karypis 2019): uses self-attention to identify the relevance between the interactions and KCs." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 96, + 565, + 496, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 565, + 496, + 590 + ], + "spans": [ + { + "bbox": [ + 96, + 565, + 496, + 590 + ], + "type": "text", + "content": "- C5: SAINT (Choi et al., 2020): a Transformer-based model for KT that encode exercise and responses in the encoder and decoder respectively." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 96, + 592, + 496, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 592, + 496, + 615 + ], + "spans": [ + { + "bbox": [ + 96, + 592, + 496, + 615 + ], + "type": "text", + "content": "- C5: AKT (Ghosh et al. 2020): models forgetting behaviors during the relevance computation between historical interactions and target questions." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 96, + 629, + 221, + 640 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 629, + 221, + 640 + ], + "spans": [ + { + "bbox": [ + 96, + 629, + 221, + 640 + ], + "type": "text", + "content": "4.3 EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 95, + 649, + 496, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 649, + 496, + 727 + ], + "spans": [ + { + "bbox": [ + 95, + 649, + 496, + 727 + ], + "type": "text", + "content": "Similar to (Liu et al. 2022), we randomly withhold " + }, + { + "bbox": [ + 95, + 649, + 496, + 727 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 95, + 649, + 496, + 727 + ], + "type": "text", + "content": " of the students' sequences for model evaluation and we perform standard 5-fold cross validation on the rest " + }, + { + "bbox": [ + 95, + 649, + 496, + 727 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 95, + 649, + 496, + 727 + ], + "type": "text", + "content": " of each dataset. We select ADAM (Kingma & Ba 2014) as the optimizer to train our model. The maximum of the training epochs is set to 200, and an early stopping strategy is used to speed up the training process. The embedding dimension, the hidden state dimension, the two dimension of the prediction layers are set to [64, 128], the learning rate and dropout rate are set to [1e-3, 1e-4, 1e-5] and [0.05, 0.1, 0.3, 0.5] respectively, the number of blocks and attention heads are set to [1, 2, 4] and [4, 8], the seed" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 735, + 441, + 744 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 735, + 441, + 744 + ], + "spans": [ + { + "bbox": [ + 107, + 735, + 441, + 744 + ], + "type": "text", + "content": "10 https://sites.google.com/site/assistmentsdata/datasets/2015-assistments-skill-builder-data" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 107, + 745, + 375, + 756 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 745, + 375, + 756 + ], + "spans": [ + { + "bbox": [ + 107, + 745, + 375, + 756 + ], + "type": "text", + "content": "1 https://drive.google.com/drive/folders/1LRLjqWfODwTYRMPw6wEJ_mMt1KZ4XBdk." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 300, + 784 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 106, + 496, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 106, + 496, + 142 + ], + "spans": [ + { + "bbox": [ + 95, + 106, + 496, + 142 + ], + "type": "text", + "content": "is set to [42, 3407] for reproducing the experimental results. Our model is implemented in PyTorch and trained on NVIDIA RTX 3090 GPU device. Similar to all existing DLKT research, we use the AUC as the main evaluation metric, and use accuracy as the secondary metric." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 155, + 232, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 155, + 232, + 166 + ], + "spans": [ + { + "bbox": [ + 96, + 155, + 232, + 166 + ], + "type": "text", + "content": "4.4 EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 95, + 175, + 497, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 175, + 497, + 243 + ], + "spans": [ + { + "bbox": [ + 95, + 175, + 497, + 243 + ], + "type": "text", + "content": "Overall Performance. Table 2 and Table 3 summarize the overall prediction performance of SIMPLEKT and all baselines in terms of the average AUC and accuracy scores. Marker *, o and ● indicates whether SIMPLEKT is statistically superior/equal/inferior to the compared method (using paired t-test at 0.01 significance level). The last column shows the total number of win/tie/loss for SIMPLEKT against the compared method on all 7 datasets (e.g., #win is how many times SIMPLEKT significantly outperforms that method)." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 98, + 268, + 500, + 375 + ], + "blocks": [ + { + "bbox": [ + 96, + 244, + 496, + 266 + ], + "lines": [ + { + "bbox": [ + 96, + 244, + 496, + 266 + ], + "spans": [ + { + "bbox": [ + 96, + 244, + 496, + 266 + ], + "type": "text", + "content": "Table 2: Overall AUC performance of SIMPLEKT and all baselines. “-” indicates the method is inapplicable for that dataset." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 98, + 268, + 500, + 375 + ], + "lines": [ + { + "bbox": [ + 98, + 268, + 500, + 375 + ], + "spans": [ + { + "bbox": [ + 98, + 268, + 500, + 375 + ], + "type": "table", + "html": "
ModelD1: Datasets containing info. of both questions and KCsD2: Datasets containing info. of either questions or KCsSIMPLEKT
AS2009AL2005BD2006NIPS34Statics2011AS2015POJ#win/#tie/#loss
DKT0.7541±0.0011*0.8149±0.0011*0.8015±0.0008*0.7689±0.0002*0.8222±0.0013●0.7271±0.0005●0.6089±0.0009*5/0/2
DKT+0.7547±0.0017*0.8156±0.0011*0.8020±0.0004*0.7696±0.0002*0.8279±0.0004●0.7285±0.0006●0.6173±0.0007*5/0/2
DKT-F-0.8147±0.0013*0.7985±0.0013*0.7733±0.0003*0.7839±0.0061*-0.6030±0.0023*5/0/0
KQN0.7477±0.0011*0.8027±0.0015*0.7936±0.0014*0.7684±0.0003*0.8232±0.0007●0.7254±0.0004●0.6080±0.0015*5/0/2
LPKT0.7814±0.0022●0.8274±0.0014●0.8055±0.0006*0.8035±0.0003○---1/1/2
IEKT0.7861±0.0027●0.8416±0.0014●0.8125±0.0009*0.8045±0.0002●---1/0/3
DKVMN0.7473±0.0006*0.8054±0.0011*0.7983±0.0009*0.7673±0.0004*0.8093±0.0017*0.7227±0.0004*0.6056±0.0022*7/0/0
ATKT0.7470±0.0008*0.7995±0.0023*0.7889±0.0008*0.7665±0.0001*0.8055±0.0020*0.7245±0.0007*0.6075±0.0012*7/0/0
GKT0.7424±0.0021*0.8110±0.0009*0.8046±0.0008*0.7689±0.0024*0.8040±0.0065○0.7258±0.0012●0.6070±0.0036*5/1/1
SAKT0.7246±0.0017*0.7880±0.0063*0.7740±0.0008*0.7517±0.0005*0.7965±0.0014*0.7114±0.0003*0.6095±0.0013*7/0/0
SAINT0.6958±0.0023○0.7775±0.0017*0.7781±0.0013*0.7873±0.0007*0.7599±0.0139*0.7026±0.0011*0.5563±0.0012*6/1/0
AKT0.7853±0.0017●0.8306±0.0019●0.8208±0.0007●0.8033±0.0003*0.8309±0.0009●0.7281±0.0004●0.6281±0.0013●1/0/6
simpleKT0.7744±0.00180.8254±0.00030.8160±0.00060.8035±0.00000.8199±0.00110.7248±0.00050.6252±0.0005-
", + "image_path": "8721802cdd01a0129b840d02947fe083b73727fcaffd57ef29ca0ac1991127a6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 98, + 421, + 500, + 528 + ], + "blocks": [ + { + "bbox": [ + 96, + 397, + 496, + 419 + ], + "lines": [ + { + "bbox": [ + 96, + 397, + 496, + 419 + ], + "spans": [ + { + "bbox": [ + 96, + 397, + 496, + 419 + ], + "type": "text", + "content": "Table 3: Overall Accuracy performance of SIMPLEKT and all baselines. “-” indicates the method is inapplicable for that dataset." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 98, + 421, + 500, + 528 + ], + "lines": [ + { + "bbox": [ + 98, + 421, + 500, + 528 + ], + "spans": [ + { + "bbox": [ + 98, + 421, + 500, + 528 + ], + "type": "table", + "html": "
ModelD1: Datasets containing info. of both questions and KCsD2: Datasets containing info. of either questions or KCsSIMPLEKT
AS2009AL2005BD2006NIPS34Statics2011AS2015POJ#win/#tie/#loss
DKT0.7244±0.0014*0.8097±0.0005●0.8553±0.0002*0.7032±0.0004*0.7969±0.0006●0.7503±0.0003*0.6328±0.0020*5/0/2
DKT+0.7248±0.0009*0.8097±0.0007●0.8553±0.0003*0.7039±0.0004*0.7977±0.0006●0.7510±0.0004●0.6482±0.0021*4/0/3
DKT-F-0.8090±0.0005●0.8536±0.0004*0.7076±0.0002*0.7872±0.0011*-0.6371±0.0030*4/0/1
KQN0.7228±0.0009*0.8025±0.0006*0.8532±0.0006*0.7028±0.0001*0.7978±0.0007●0.7500±0.0003*0.6435±0.0017*6/0/1
LPKT0.7355±0.0015●0.8145±0.0007●0.8544±0.0008*0.7341±0.0003●---1/0/3
IEKT0.7375±0.0042●0.8236±0.0010●0.8553±0.0023*0.7330±0.0002●---1/0/3
DKVMN0.7199±0.0010*0.8027±0.0007*0.8545±0.0002*0.7016±0.0005*0.7929±0.0006*0.7508±0.0006○0.6393±0.0015*6/1/0
ATKT0.7208±0.0009*0.7998±0.0019*0.8511±0.0004*0.7013±0.0002*0.7904±0.0011*0.7494±0.0002*0.6332±0.0023*7/0/0
GKT0.7153±0.0032*0.8088±0.0008●0.8555±0.0002*0.7014±0.0028*0.7902±0.0021○0.7504±0.0010*0.6117±0.0147*5/1/1
SAKT0.7063±0.0018*0.7954±0.0020*0.8461±0.0005*0.6879±0.0004*0.7879±0.0015*0.7474±0.0002*0.6407±0.0035*7/0/0
SAINT0.6936±0.0034○0.7791±0.0016*0.8411±0.0065*0.7180±0.0006*0.7682±0.0056*0.7438±0.0010*0.6476±0.0003*6/1/0
AKT0.7392±0.0021●0.8124±0.0011●0.8587±0.0005●0.7323±0.0005*0.8021±0.0011●0.7521±0.0005●0.6492±0.0010*2/0/5
simpleKT0.7320±0.00120.8083±0.00050.8579±0.00030.7328±0.00010.7957±0.00200.7508±0.00040.6522±0.0008-
", + "image_path": "5b12cfb50117dce6dc0b2cb5255b893af2fcf45a64850104619d781d4c6bd5c3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 95, + 536, + 497, + 757 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 536, + 497, + 757 + ], + "spans": [ + { + "bbox": [ + 95, + 536, + 497, + 757 + ], + "type": "text", + "content": "From Table 2 and Table 3 we find the following results: (1) compared to other baseline methods, SIMPLEKT almost always ranks top 3 in terms of AUC scores and achieves 55 wins, 3 ties and 18 loss in total against 12 baselines on 7 public datasets of different domains. This indicates the strength of SIMPLEKT as a baseline of KT; (2) in general, the SIMPLEKT approach performs better on D1 datasets that have both question and KC information available. When training SIMPLEKT on D2 datasets, due to the lack of distinguished information about questions and KCs, the explicit question-centric difficulty modeling degrades and the KC representations become the question agnostic; (3) when comparing SIMPLEKT to other attentive models, i.e., SAKT, SAINT and AKT, in C5 category, our SIMPLEKT beats SAKT and SAINT on all the datasets, which indicates the effectiveness of explicit question-centric difficulty modeling. Although our SIMPLEKT approach is significantly worse than the AKT approach on 6 datasets, the performance gaps are quite minimal that are mostly within a " + }, + { + "bbox": [ + 95, + 536, + 497, + 757 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 95, + 536, + 497, + 757 + ], + "type": "text", + "content": " range. On the other hand, SIMPLEKT is much more concise compared to the two-layer attentive architecture in the AKT approach; (4) the SIMPLEKT outperforms many deep sequential models in category C1, including DKT, DKT+, DKT-F, KQN on AS2009, AL2005, BD2006, NIPS34 and POJ. We believe this is because the above 4 sequential models use KCs to index questions cannot capture the individual differences among questions with the same KCs which is crucial to predict student future performance; (5) comparing SIMPLEKT and IEKT, we can see, IEKT has better prediction performance on AS2009, AL2005, and NIPS34. This is because IEKT captures both question-level and KC-level variations in its representations and at the same time, it designs two specific neural modules to estimate individual cognition and acquisition" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 293, + 775, + 301, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 301, + 784 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 301, + 784 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 106, + 497, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 106, + 497, + 163 + ], + "spans": [ + { + "bbox": [ + 95, + 106, + 497, + 163 + ], + "type": "text", + "content": "abilities; (6) compare to other different types of models in C2, C3, and C4, such as DKVMN, ATKT and GKT, our SIMPLEKT achieves better performance by using an ordinary dot-product attention function without any memory mechanism, adversarial learning or graph constructions, which encourages the further educational researchers and practitioners to develop effective models with a design of simplicity." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 208, + 164, + 377, + 174 + ], + "blocks": [ + { + "bbox": [ + 208, + 164, + 377, + 174 + ], + "lines": [ + { + "bbox": [ + 208, + 164, + 377, + 174 + ], + "spans": [ + { + "bbox": [ + 208, + 164, + 377, + 174 + ], + "type": "image", + "image_path": "5da15cc66032cae869d7934d078a7d23dc2f0c0f7d4cc795e8e905e2ca257f08.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 112, + 179, + 202, + 273 + ], + "blocks": [ + { + "bbox": [ + 112, + 179, + 202, + 273 + ], + "lines": [ + { + "bbox": [ + 112, + 179, + 202, + 273 + ], + "spans": [ + { + "bbox": [ + 112, + 179, + 202, + 273 + ], + "type": "image", + "image_path": "6b2208df3267500fc3e5973a1a2b1dcf2a38532118f4cc5ff02b4dfbce21c6f2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 204, + 179, + 294, + 274 + ], + "blocks": [ + { + "bbox": [ + 204, + 179, + 294, + 274 + ], + "lines": [ + { + "bbox": [ + 204, + 179, + 294, + 274 + ], + "spans": [ + { + "bbox": [ + 204, + 179, + 294, + 274 + ], + "type": "image", + "image_path": "b68518f716e87e7fbfb720c3fcba4f9043488e31019b5f2afa5ea6a6fbcfddd4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 295, + 179, + 385, + 273 + ], + "blocks": [ + { + "bbox": [ + 295, + 179, + 385, + 273 + ], + "lines": [ + { + "bbox": [ + 295, + 179, + 385, + 273 + ], + "spans": [ + { + "bbox": [ + 295, + 179, + 385, + 273 + ], + "type": "image", + "image_path": "211d7d9a09a33745622203c32e29b12da75927a5eadaa1d9a3a6143ed7851429.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 387, + 179, + 477, + 274 + ], + "blocks": [ + { + "bbox": [ + 387, + 179, + 477, + 274 + ], + "lines": [ + { + "bbox": [ + 387, + 179, + 477, + 274 + ], + "spans": [ + { + "bbox": [ + 387, + 179, + 477, + 274 + ], + "type": "image", + "image_path": "96d6686cd7e9357cb8fb87916c0055d02dc872631809326cc228a9635bb7c2be.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 112, + 275, + 202, + 369 + ], + "blocks": [ + { + "bbox": [ + 112, + 275, + 202, + 369 + ], + "lines": [ + { + "bbox": [ + 112, + 275, + 202, + 369 + ], + "spans": [ + { + "bbox": [ + 112, + 275, + 202, + 369 + ], + "type": "image", + "image_path": "51c839870d094e7074c119200452a9c660c71531b758646b3dbc292c4309a6fc.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 114, + 369, + 477, + 380 + ], + "lines": [ + { + "bbox": [ + 114, + 369, + 477, + 380 + ], + "spans": [ + { + "bbox": [ + 114, + 369, + 477, + 380 + ], + "type": "text", + "content": "Figure 3: Non-accumulative predictions in the multi-step ahead scenario in terms of AUC." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 204, + 275, + 294, + 369 + ], + "blocks": [ + { + "bbox": [ + 204, + 275, + 294, + 369 + ], + "lines": [ + { + "bbox": [ + 204, + 275, + 294, + 369 + ], + "spans": [ + { + "bbox": [ + 204, + 275, + 294, + 369 + ], + "type": "image", + "image_path": "754abc42972dead1fc150efb27449675463ffca216b88a8a574c51f9c773e7a4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 295, + 275, + 385, + 369 + ], + "blocks": [ + { + "bbox": [ + 295, + 275, + 385, + 369 + ], + "lines": [ + { + "bbox": [ + 295, + 275, + 385, + 369 + ], + "spans": [ + { + "bbox": [ + 295, + 275, + 385, + 369 + ], + "type": "image", + "image_path": "ca78c04a5d4e62ecc608792889df918871f71eae75af195ab14d5ff9963ca254.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 208, + 388, + 377, + 398 + ], + "blocks": [ + { + "bbox": [ + 208, + 388, + 377, + 398 + ], + "lines": [ + { + "bbox": [ + 208, + 388, + 377, + 398 + ], + "spans": [ + { + "bbox": [ + 208, + 388, + 377, + 398 + ], + "type": "image", + "image_path": "2b271f8bd2e9b455fe37dfca479c39667a2896d5366fe8bf4d28c9612b73f609.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 112, + 402, + 202, + 497 + ], + "blocks": [ + { + "bbox": [ + 112, + 402, + 202, + 497 + ], + "lines": [ + { + "bbox": [ + 112, + 402, + 202, + 497 + ], + "spans": [ + { + "bbox": [ + 112, + 402, + 202, + 497 + ], + "type": "image", + "image_path": "de36abe1d2424c182e4e7e7b6d6ac1f66e2cec151596bda8844d3f980b43a4c3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 204, + 402, + 294, + 497 + ], + "blocks": [ + { + "bbox": [ + 204, + 402, + 294, + 497 + ], + "lines": [ + { + "bbox": [ + 204, + 402, + 294, + 497 + ], + "spans": [ + { + "bbox": [ + 204, + 402, + 294, + 497 + ], + "type": "image", + "image_path": "6dc351890a459061f69ea2659801f9455e62acabe39e95c8af27685c52f5d1b7.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 295, + 402, + 385, + 497 + ], + "blocks": [ + { + "bbox": [ + 295, + 402, + 385, + 497 + ], + "lines": [ + { + "bbox": [ + 295, + 402, + 385, + 497 + ], + "spans": [ + { + "bbox": [ + 295, + 402, + 385, + 497 + ], + "type": "image", + "image_path": "2241242544f2055d3de8c879108f8dd2a826ec6ddb0fe03bca0e2d67d23326ae.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 387, + 402, + 478, + 497 + ], + "blocks": [ + { + "bbox": [ + 387, + 402, + 478, + 497 + ], + "lines": [ + { + "bbox": [ + 387, + 402, + 478, + 497 + ], + "spans": [ + { + "bbox": [ + 387, + 402, + 478, + 497 + ], + "type": "image", + "image_path": "a814d72df135b1d00adecb82f0186c73578c970cdf69cff357102df852f80c0a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 111, + 500, + 202, + 592 + ], + "blocks": [ + { + "bbox": [ + 111, + 500, + 202, + 592 + ], + "lines": [ + { + "bbox": [ + 111, + 500, + 202, + 592 + ], + "spans": [ + { + "bbox": [ + 111, + 500, + 202, + 592 + ], + "type": "image", + "image_path": "e34035c6630b35dce8246694fdb552e7f2d2001d6cdd6573828f7e28ee38169d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 106, + 592, + 485, + 602 + ], + "lines": [ + { + "bbox": [ + 106, + 592, + 485, + 602 + ], + "spans": [ + { + "bbox": [ + 106, + 592, + 485, + 602 + ], + "type": "text", + "content": "Figure 4: Non-accumulative predictions in the multi-step ahead scenario in terms of Accuracy." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 204, + 500, + 294, + 592 + ], + "blocks": [ + { + "bbox": [ + 204, + 500, + 294, + 592 + ], + "lines": [ + { + "bbox": [ + 204, + 500, + 294, + 592 + ], + "spans": [ + { + "bbox": [ + 204, + 500, + 294, + 592 + ], + "type": "image", + "image_path": "3958a12c89452969a13555448bbc36e31aa1a23860999cb10c49d082461882b9.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 295, + 500, + 385, + 592 + ], + "blocks": [ + { + "bbox": [ + 295, + 500, + 385, + 592 + ], + "lines": [ + { + "bbox": [ + 295, + 500, + 385, + 592 + ], + "spans": [ + { + "bbox": [ + 295, + 500, + 385, + 592 + ], + "type": "image", + "image_path": "c6dc0106a816219728505976ac8c86c6e14a855fcbc6c241430245bb2aa3dff2.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "spans": [ + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "type": "text", + "content": "Multi-step KT Prediction Performance. In order to make the prediction close to real application scenarios, we also predict our model in multi-step prediction which predicts a span of student's responses given the student's historical interaction sequence. Practically, accurate multi-step KT prediction will provide constructive feedback to learning path selection and construction and help teachers adaptively adjust future teaching materials. We conduct the prediction in a non-accumulative setting that predicts all future values all at once to avoid accumulative prediction errors. To have a fine-grained analysis in the multi-step ahead prediction scenario, we further experiment with DLKT models on different portions of observed student interactions. Specifically, we vary the observed percentages of student interaction length from " + }, + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "type": "text", + "content": " with step size of " + }, + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 95, + 613, + 497, + 756 + ], + "type": "text", + "content": ". Due to the space limit, we select the best baseline in each category, i.e., IEKT, DKVMN, ATKT, GKT, AKT as the representative approaches and the results in terms of AUC and accuracy are shown in Figure 3 and Figure 4. We make the following observations: (1) with the increasing historical information, the student AUC performance prediction become more accurate in most cases, which is in line with the" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 293, + 775, + 299, + 783 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 299, + 783 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 299, + 783 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 95, + 106, + 496, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 106, + 496, + 163 + ], + "spans": [ + { + "bbox": [ + 95, + 106, + 496, + 163 + ], + "type": "text", + "content": "real-world educational scenario; (2) the attention based model almost outperforms other KT models in Statics2011, AS2005 and POJ according to AUC scores, this is because the attention mechanism is expert in capturing the long-term dependencies; and (3) our SIMPLEKT achieves the best prediction performance in BD2006, NIPS34 in terms of AUC, which indicates the proposed method is simple yet powerful." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 95, + 168, + 497, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 168, + 497, + 279 + ], + "spans": [ + { + "bbox": [ + 95, + 168, + 497, + 279 + ], + "type": "text", + "content": "Qualitative Visual Analysis. In this section, we qualitatively show the visualization of the prediction results made by SIMPLEKT in Figure 5 To better understand the model predictive behavior, we compute the historical error rate (HER) per question from the data and use the HERs as surrogates of question difficulties. Due to the space limit, more illustrative and fine-grained results are provided in Appendix A.2 As we can see from Figure 5 when a student meets a certain KC for the first time, the higher HER of the question, the lower the probability that student will get it correct. For example, the HERs for questions q527, q509, q512, q518 and q219 are 0.35, 0.41, 0.20, 0.51, and 0.48 and the corresponding prediction probabilities of SIMPLEKT are 0.74, 0.52, 0.84, 0.45, and 0.51 respectively. Furthermore, for those questions that cover the same set of KCs, such as questions q526 and q529, the model prediction probability decreases when the corresponding HER increases." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 99, + 290, + 497, + 377 + ], + "blocks": [ + { + "bbox": [ + 99, + 290, + 497, + 377 + ], + "lines": [ + { + "bbox": [ + 99, + 290, + 497, + 377 + ], + "spans": [ + { + "bbox": [ + 99, + 290, + 497, + 377 + ], + "type": "image", + "image_path": "adbc7a91fca12fc0189b85901c7b7945277d8527d4e95bba95e58cc8f022e9be.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 383, + 440, + 396 + ], + "lines": [ + { + "bbox": [ + 153, + 383, + 440, + 396 + ], + "spans": [ + { + "bbox": [ + 153, + 383, + 440, + 396 + ], + "type": "text", + "content": "Figure 5: Visualization of a student's prediction results on SIMPLEKT." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "spans": [ + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "text", + "content": "Ablation Study. We systematically examine the effect of the key component of question difficulty modeling by constructing two model variants: (1) SIMPLEKT-ScalarDiff that changes the question-centric difficulty vector " + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{q_t}" + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "text", + "content": " to scalar; and (2) SIMPLEKT-NoDiff that completely ignores the question difficulty modeling and simply set " + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{c_t}" + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "text", + "content": ". The prediction performance on all datasets that belong to D1 are reported in Table 4. Please note that since datasets in D2 only have either question information or KC information, SIMPLEKT, SIMPLEKT-ScalarDiff, and SIMPLEKT-NoDiff essentially become mathematically unidentifiable. From Table 4 we can easily observe that (1) the SIMPLEKT method outperforms the two model variants and especially when removing the question difficulty modeling component, the prediction performance decreases more than " + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "text", + "content": " on all D1 datasets. This empirically verifies the importance of question-centric difficulty modeling when making student performance prediction in KT scenarios; and (2) comparing SIMPLEKT and SIMPLEKT-ScalarDiff, the performance is very minimal. We believe this is because the KC representation " + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 95, + 407, + 497, + 562 + ], + "type": "text", + "content": " is based on the simple additive assumption and a scalar is expressive enough to represent question-level difficulty under this assumption." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 117, + 590, + 477, + 633 + ], + "blocks": [ + { + "bbox": [ + 171, + 571, + 420, + 582 + ], + "lines": [ + { + "bbox": [ + 171, + 571, + 420, + 582 + ], + "spans": [ + { + "bbox": [ + 171, + 571, + 420, + 582 + ], + "type": "text", + "content": "Table 4: The performance of different variants in SIMPLEKT." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 117, + 590, + 477, + 633 + ], + "lines": [ + { + "bbox": [ + 117, + 590, + 477, + 633 + ], + "spans": [ + { + "bbox": [ + 117, + 590, + 477, + 633 + ], + "type": "table", + "html": "
AS2009AL2005BD2006NIPS34
SIMPLEKT0.7744±0.00180.8254±0.00030.8160±0.00060.8035±0.0000
SIMPLEKT-ScalarDiff0.7740±0.00210.8250±0.00130.8159±0.00110.8008±0.0012
SIMPLEKT-NoDiff0.7411±0.00160.8048±0.00180.7922±0.00110.7646±0.0005
", + "image_path": "125a7dfab8f8726f4b09c337d927d97c7648cf8012f9cf1cca41e3d4ec72c95c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 654, + 188, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 654, + 188, + 666 + ], + "spans": [ + { + "bbox": [ + 96, + 654, + 188, + 666 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 95, + 678, + 496, + 756 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 678, + 496, + 756 + ], + "spans": [ + { + "bbox": [ + 95, + 678, + 496, + 756 + ], + "type": "text", + "content": "In this work, we propose SIMPLEKT, a simple but tough-to-beat approach to solve KT task effectively. Motivated by the Rasch model in psychometrics, the SIMPLEKT approach is designed to capture individual differences among questions with the same KCs. Furthermore, the proposed SIMPLEKT approach simplifies the sophisticated student knowledge state estimation component with the ordinary dot-product attention function. Comprehensive experimental results demonstrate that SIMPLEKT is able to beat a wide range of recently proposed DLKT models on various datasets from different domains. We believe this work serves as a strong baseline for future KT research." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 96, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 293, + 775, + 301, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 775, + 301, + 784 + ], + "spans": [ + { + "bbox": [ + 293, + 775, + 301, + 784 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 97, + 106, + 260, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 106, + 260, + 119 + ], + "spans": [ + { + "bbox": [ + 97, + 106, + 260, + 119 + ], + "type": "text", + "content": "REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 95, + 130, + 498, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 130, + 498, + 221 + ], + "spans": [ + { + "bbox": [ + 95, + 130, + 498, + 221 + ], + "type": "text", + "content": "The code of SIMPLEKT and its variants, i.e., SIMPLEKT-ScalarDiff and SIMPLEKT-NoDiff, to reproduce the experimental results can be found at https://github.com/pykt-team/ pykt-toolkit. We give the details of data-preprocessing and the training hyper-parameters of SIMPLEKT in Section 4.1 and Section 4.3. The code of the 12 comparison models is accessible from an open-sourced PYKT python library at https://pykt.org/. We choose to use the same data partitions of train, validation, test sets as PYKT and hence all the results can be easily reproducible. All the model training details of all baselines can be found at https://pykt-toolkit.readthedocs.io/en/latest/pykt.models.html" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 97, + 235, + 211, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 235, + 211, + 247 + ], + "spans": [ + { + "bbox": [ + 97, + 235, + 211, + 247 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 96, + 259, + 496, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 259, + 496, + 316 + ], + "spans": [ + { + "bbox": [ + 96, + 259, + 496, + 316 + ], + "type": "text", + "content": "This work was supported in part by National Key R&D Program of China, under Grant No. 2020AAA0104500; in part by Beijing Nova Program (Z201100006820068) from Beijing Municipal Science & Technology Commission; in part by NFSC under Grant No. 61877029 and in part by Key Laboratory of Smart Education of Guangdong Higher Education Institutes, Jinan University (2022LSYS003)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 97, + 331, + 168, + 343 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 331, + 168, + 343 + ], + "spans": [ + { + "bbox": [ + 97, + 331, + 168, + 343 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 97, + 349, + 498, + 757 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 97, + 349, + 497, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 349, + 497, + 384 + ], + "spans": [ + { + "bbox": [ + 97, + 349, + 497, + 384 + ], + "type": "text", + "content": "Ghodai Abdelrahman and Qing Wang. Knowledge tracing with sequential key-value memory networks. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 175-184, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 98, + 390, + 498, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 390, + 498, + 425 + ], + "spans": [ + { + "bbox": [ + 98, + 390, + 498, + 425 + ], + "type": "text", + "content": "Hao Cen, Kenneth Koedinger, and Brian Junker. Learning factors analysis-a general method for cognitive model evaluation and improvement. In International Conference on Intelligent Tutoring Systems, pp. 164-175. Springer, 2006." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 97, + 430, + 497, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 430, + 497, + 465 + ], + "spans": [ + { + "bbox": [ + 97, + 430, + 497, + 465 + ], + "type": "text", + "content": "Jiahao Chen, Zitao Liu, Shuyan Huang, Qiongqiong Liu, and Weiqi Luo. Improving interpretability of deep sequential knowledge tracing models with question-centric cognitive representations. In Proceedings of the AAAI Conference on Artificial Intelligence, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 98, + 470, + 496, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 470, + 496, + 495 + ], + "spans": [ + { + "bbox": [ + 98, + 470, + 496, + 495 + ], + "type": "text", + "content": "Penghe Chen, Yu Lu, Vincent W Zheng, and Yang Pian. Prerequisite-driven deep knowledge tracing. In 2018 IEEE International Conference on Data Mining, pp. 39-48. IEEE, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 98, + 500, + 498, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 500, + 498, + 536 + ], + "spans": [ + { + "bbox": [ + 98, + 500, + 498, + 536 + ], + "type": "text", + "content": "Benoit Choffin, Fabrice Popineau, Yolaine Bourda, and Jill-Jenn Vie. DAS3H: Modeling student learning and forgetting for optimally scheduling distributed practice of skills. In Proceedings of The 12th International Conference on Educational Data Mining (EDM 2019), volume 29, pp. 38." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 98, + 541, + 496, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 541, + 496, + 587 + ], + "spans": [ + { + "bbox": [ + 98, + 541, + 496, + 587 + ], + "type": "text", + "content": "Youngduck Choi, Youngnam Lee, Junghyun Cho, Jineon Baek, Byungsoo Kim, Yeongmin Cha, Dongmin Shin, Chan Bae, and Jaewe Heo. Towards an appropriate query, key, and value computation for knowledge tracing. In Proceedings of the Seventh ACM Conference on Learning@ Scale, pp. 341-344, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 98, + 592, + 496, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 592, + 496, + 617 + ], + "spans": [ + { + "bbox": [ + 98, + 592, + 496, + 617 + ], + "type": "text", + "content": "Albert T Corbett and John R Anderson. Knowledge tracing: Modeling the acquisition of procedural knowledge. User Modeling and User-adapted Interaction, 4(4):253-278, 1994." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 98, + 622, + 496, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 622, + 496, + 656 + ], + "spans": [ + { + "bbox": [ + 98, + 622, + 496, + 656 + ], + "type": "text", + "content": "Mingyu Feng, Neil Heffernan, and Kenneth Koedinger. Addressing the assessment challenge with an online system that tutors as it assesses. User Modeling and User-adapted Interaction, 19(3): 243-266, 2009." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 98, + 662, + 496, + 687 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 662, + 496, + 687 + ], + "spans": [ + { + "bbox": [ + 98, + 662, + 496, + 687 + ], + "type": "text", + "content": "Aritra Ghosh, Neil Heffernan, and Andrew S Lan. Context-aware attentive knowledge tracing. In ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 98, + 692, + 496, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 692, + 496, + 727 + ], + "spans": [ + { + "bbox": [ + 98, + 692, + 496, + 727 + ], + "type": "text", + "content": "Xiaopeng Guo, Zhijie Huang, Jie Gao, Mingyu Shang, Maojing Shu, and Jun Sun. Enhancing knowledge tracing via adversarial training. In Proceedings of the 29th ACM International Conference on Multimedia, pp. 367-375, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 98, + 733, + 496, + 757 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 733, + 496, + 757 + ], + "spans": [ + { + "bbox": [ + 98, + 733, + 496, + 757 + ], + "type": "text", + "content": "Tanja Käser, Severin Klingler, Alexander G Schwing, and Markus Gross. Dynamic bayesian networks for student modeling. IEEE Transactions on Learning Technologies, 10(4):450-462, 2017." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 775, + 304, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 775, + 304, + 784 + ], + "spans": [ + { + "bbox": [ + 292, + 775, + 304, + 784 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 97, + 106, + 496, + 756 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 98, + 106, + 496, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 106, + 496, + 129 + ], + "spans": [ + { + "bbox": [ + 98, + 106, + 496, + 129 + ], + "type": "text", + "content": "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 98, + 137, + 496, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 137, + 496, + 161 + ], + "spans": [ + { + "bbox": [ + 98, + 137, + 496, + 161 + ], + "type": "text", + "content": "Elise Lavoué, Baptiste Monterrat, Michel Desmarais, and Sébastien George. Adaptive gamification for learning environments. IEEE Transactions on Learning Technologies, 12(1):16-28, 2018." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 97, + 168, + 496, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 168, + 496, + 202 + ], + "spans": [ + { + "bbox": [ + 97, + 168, + 496, + 202 + ], + "type": "text", + "content": "Jinseok Lee and Dit-Yan Yeung. Knowledge query network for knowledge tracing: How knowledge interacts with skills. In Proceedings of the 9th International Conference on Learning Analytics & Knowledge, pp. 491-500, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 98, + 210, + 495, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 210, + 495, + 243 + ], + "spans": [ + { + "bbox": [ + 98, + 210, + 495, + 243 + ], + "type": "text", + "content": "Qi Liu, Zhenya Huang, Yu Yin, Enhong Chen, Hui Xiong, Yu Su, and Guoping Hu. EKT: Exercise-aware knowledge tracing for student performance prediction. IEEE Transactions on Knowledge and Data Engineering, 33(1):100-115, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 98, + 252, + 496, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 252, + 496, + 296 + ], + "spans": [ + { + "bbox": [ + 98, + 252, + 496, + 296 + ], + "type": "text", + "content": "Yunfei Liu, Yang Yang, Xianyu Chen, Jian Shen, Haifeng Zhang, and Yong Yu. Improving knowledge tracing via pre-training question embeddings. In Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence, pp. 1556-1562, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 98, + 305, + 496, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 305, + 496, + 348 + ], + "spans": [ + { + "bbox": [ + 98, + 305, + 496, + 348 + ], + "type": "text", + "content": "Zitao Liu, Qiongqiong Liu, Jiahao Chen, Shuyan Huang, Jiliang Tang, and Weiqi Luo. pyKT: A python library to benchmark deep learning based knowledge tracing models. In 36th Conference on Neural Information Processing Systems (NeurIPS 2022) Track on Datasets and Benchmarks., 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 98, + 357, + 496, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 357, + 496, + 391 + ], + "spans": [ + { + "bbox": [ + 98, + 357, + 496, + 391 + ], + "type": "text", + "content": "Ting Long, Yunfei Liu, Jian Shen, Weinan Zhang, and Yong Yu. Tracing knowledge state with individual cognition and acquisition estimation. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 173-182, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 98, + 399, + 496, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 399, + 496, + 433 + ], + "spans": [ + { + "bbox": [ + 98, + 399, + 496, + 433 + ], + "type": "text", + "content": "Ting Long, Jiarui Qin, Jian Shen, Weinan Zhang, Wei Xia, Ruiming Tang, Xiuqiang He, and Yong Yu. Improving knowledge tracing with collaborative information. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining, pp. 599-607, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 98, + 441, + 496, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 441, + 496, + 475 + ], + "spans": [ + { + "bbox": [ + 98, + 441, + 496, + 475 + ], + "type": "text", + "content": "Sein Minn, Yi Yu, Michel C Desmarais, Feida Zhu, and Jill-Jenn Vie. Deep knowledge tracing and dynamic student classification for knowledge tracing. In 2018 IEEE International Conference on Data Mining, pp. 1182-1187. IEEE, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 98, + 483, + 496, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 483, + 496, + 517 + ], + "spans": [ + { + "bbox": [ + 98, + 483, + 496, + 517 + ], + "type": "text", + "content": "Koki Nagatani, Qian Zhang, Masahiro Sato, Yan-Ying Chen, Francine Chen, and Tomoko Ohkuma. Augmenting knowledge tracing by considering forgetting behavior. In The World Wide Web Conference, pp. 3101-3107, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 98, + 524, + 496, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 524, + 496, + 558 + ], + "spans": [ + { + "bbox": [ + 98, + 524, + 496, + 558 + ], + "type": "text", + "content": "Hiromi Nakagawa, Yusuke Iwasawa, and Yutaka Matsuo. Graph-based knowledge tracing: modeling student proficiency using graph neural network. In 2019 IEEE/WIC/ACM International Conference on Web Intelligence, pp. 156-163. IEEE, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 98, + 565, + 496, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 565, + 496, + 600 + ], + "spans": [ + { + "bbox": [ + 98, + 565, + 496, + 600 + ], + "type": "text", + "content": "Shalini Pandey and George Karypis. A self-attentive model for knowledge tracing. In 12th International Conference on Educational Data Mining, pp. 384–389. International Educational Data Mining Society, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 98, + 608, + 496, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 608, + 496, + 641 + ], + "spans": [ + { + "bbox": [ + 98, + 608, + 496, + 641 + ], + "type": "text", + "content": "Shalini Pandey and Jaideep Srivastava. RKT: relation-aware self-attention for knowledge tracing. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, pp. 1205–1214, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 98, + 650, + 496, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 650, + 496, + 683 + ], + "spans": [ + { + "bbox": [ + 98, + 650, + 496, + 683 + ], + "type": "text", + "content": "Chris Piech, Jonathan Bassen, Jonathan Huang, Surya Ganguli, Mehran Sahami, Leonidas J Guibas, and Jascha Sohl-Dickstein. Deep knowledge tracing. Advances in Neural Information Processing Systems, 28, 2015." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 98, + 691, + 496, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 691, + 496, + 714 + ], + "spans": [ + { + "bbox": [ + 98, + 691, + 496, + 714 + ], + "type": "text", + "content": "Shi Pu, Michael Yudelson, Lu Ou, and Yuchi Huang. Deep knowledge tracing with transformers. In International Conference on Artificial Intelligence in Education, pp. 252-256. Springer, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 98, + 722, + 496, + 756 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 722, + 496, + 756 + ], + "spans": [ + { + "bbox": [ + 98, + 722, + 496, + 756 + ], + "type": "text", + "content": "Sami Sarsa, Juho Leinonen, Arto Hellas, et al. Empirical evaluation of deep learning models for knowledge tracing: Of hyperparameters and metrics on performance and replicability. Journal of Educational Data Mining, 14(2), 2022." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "spans": [ + { + "bbox": [ + 97, + 51, + 285, + 63 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 775, + 302, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 775, + 302, + 784 + ], + "spans": [ + { + "bbox": [ + 292, + 775, + 302, + 784 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 96, + 106, + 497, + 756 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 98, + 106, + 497, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 106, + 497, + 153 + ], + "spans": [ + { + "bbox": [ + 98, + 106, + 497, + 153 + ], + "type": "text", + "content": "Shuanghong Shen, Qi Liu, Enhong Chen, Han Wu, Zhenya Huang, Weihao Zhao, Yu Su, Haiping Ma, and Shijin Wang. Convolutional knowledge tracing: Modeling individualization in student learning process. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 1857-1860, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 98, + 157, + 496, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 157, + 496, + 190 + ], + "spans": [ + { + "bbox": [ + 98, + 157, + 496, + 190 + ], + "type": "text", + "content": "Shuanghong Shen, Qi Liu, Enhong Chen, Zhenya Huang, Wei Huang, Yu Yin, Yu Su, and Shijin Wang. Learning process-consistent knowledge tracing. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining, pp. 1452-1460, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 97, + 195, + 496, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 195, + 496, + 240 + ], + "spans": [ + { + "bbox": [ + 97, + 195, + 496, + 240 + ], + "type": "text", + "content": "Shuanghong Shen, Zhenya Huang, Qi Liu, Yu Su, Shijin Wang, and Enhong Chen. Assessing student's dynamic knowledge state by exploring the question difficulty effect. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 427-437, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 96, + 245, + 496, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 245, + 496, + 280 + ], + "spans": [ + { + "bbox": [ + 96, + 245, + 496, + 280 + ], + "type": "text", + "content": "J Stamper, A Niculescu-Mizil, S Ritter, G Gordon, and K Koedinger. Algebra I 2005-2006 and Bridge to Algebra 2006-2007. Development data sets from KDD Cup 2010 Educational Data Mining Challenge, 2010." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 97, + 284, + 371, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 284, + 371, + 296 + ], + "spans": [ + { + "bbox": [ + 97, + 284, + 371, + 296 + ], + "type": "text", + "content": "Paul Steif and Norman Bier. Oli engineering statics-fall 2011, 2014." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 98, + 301, + 495, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 301, + 495, + 345 + ], + "spans": [ + { + "bbox": [ + 98, + 301, + 495, + 345 + ], + "type": "text", + "content": "Nguyen Thai-Nghe, Lucas Drumond, Tomás Horváth, Artus Krohn-Grimberghe, Alexandros Nanopoulos, and Lars Schmidt-Thieme. Factorization techniques for predicting student performance. In Educational Recommender Systems and Technologies: Practices and Challenges, pp. 129-153. IGI Global, 2012." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 98, + 351, + 496, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 351, + 496, + 374 + ], + "spans": [ + { + "bbox": [ + 98, + 351, + 496, + 374 + ], + "type": "text", + "content": "Hanshuang Tong, Zhen Wang, Qi Liu, Yun Zhou, and Wenyuan Han. HGKT: Introducing hierarchical exercise graph for knowledge tracing. arXiv preprint arXiv:2006.16915, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 98, + 379, + 497, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 379, + 497, + 412 + ], + "spans": [ + { + "bbox": [ + 98, + 379, + 497, + 412 + ], + "type": "text", + "content": "Chenyang Wang, Weizhi Ma, Min Zhang, Chuancheng Lv, Fengyuan Wan, Huijie Lin, Taoran Tang, Yiqun Liu, and Shaoping Ma. Temporal cross-effects in knowledge tracing. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining, pp. 517-525, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 98, + 417, + 496, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 417, + 496, + 452 + ], + "spans": [ + { + "bbox": [ + 98, + 417, + 496, + 452 + ], + "type": "text", + "content": "Fei Wang, Qi Liu, Enhong Chen, Zhenya Huang, Yuying Chen, Yu Yin, Zai Huang, and Shijin Wang. Neural cognitive diagnosis for intelligent education systems. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pp. 6153-6161, 2020a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 98, + 456, + 495, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 456, + 495, + 500 + ], + "spans": [ + { + "bbox": [ + 98, + 456, + 495, + 500 + ], + "type": "text", + "content": "Zhiwei Wang, Xiaoqin Feng, Jiliang Tang, Gale Yan Huang, and Zitao Liu. Deep knowledge tracing with side information. In Artificial Intelligence in Education: 20th International Conference, AIED 2019, Chicago, IL, USA, June 25-29, 2019, Proceedings, Part II 20, pp. 303-308. Springer, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 98, + 507, + 495, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 507, + 495, + 552 + ], + "spans": [ + { + "bbox": [ + 98, + 507, + 495, + 552 + ], + "type": "text", + "content": "Zichao Wang, Angus Lamb, Evgeny Saveliev, Pashmina Cameron, Yordan Zaykov, Jose Miguel Hernández-Lobato, Richard E Turner, Richard G Baraniuk, Craig Barton, Simon Peyton Jones, et al. Instructions and guide for diagnostic questions: The neurips 2020 education challenge. ArXiv preprint, abs/2007.12061, 2020b. URL https://arxiv.org/abs/2007.12061" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 98, + 556, + 495, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 556, + 495, + 600 + ], + "spans": [ + { + "bbox": [ + 98, + 556, + 495, + 600 + ], + "type": "text", + "content": "Yang Yang, Jian Shen, Yanru Qu, Yunfei Liu, Kerong Wang, Yaoming Zhu, Weinan Zhang, and Yong Yu. GIKT: a graph-based interaction model for knowledge tracing. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pp. 299-315. Springer, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 98, + 606, + 495, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 606, + 495, + 640 + ], + "spans": [ + { + "bbox": [ + 98, + 606, + 495, + 640 + ], + "type": "text", + "content": "Chun-Kit Yeung. Deep-IRT: Make deep learning based knowledge tracing explainable using item response theory. In Proceedings of The 12th International Conference on Educational Data Mining (EDM 2019), pp. 683-686, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 98, + 645, + 495, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 645, + 495, + 678 + ], + "spans": [ + { + "bbox": [ + 98, + 645, + 495, + 678 + ], + "type": "text", + "content": "Chun-Kit Yeung and Dit-Yan Yeung. Addressing two problems in deep knowledge tracing via prediction-consistent regularization. In Proceedings of the Fifth Annual ACM Conference on Learning at Scale, pp. 1-10, 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 98, + 684, + 495, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 684, + 495, + 718 + ], + "spans": [ + { + "bbox": [ + 98, + 684, + 495, + 718 + ], + "type": "text", + "content": "Jiani Zhang, Xingjian Shi, Irwin King, and Dit Yan Yeung. Dynamic key-value memory networks for knowledge tracing. In Proceedings of the 26th International Conference on World Wide Web, pp. 765, 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 98, + 723, + 495, + 756 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 723, + 495, + 756 + ], + "spans": [ + { + "bbox": [ + 98, + 723, + 495, + 756 + ], + "type": "text", + "content": "Moyu Zhang, Xinning Zhu, Chunhong Zhang, Yang Ji, Feng Pan, and Changchuan Yin. Multi-Factors Aware Dual-Attentional Knowledge Tracing. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management, pp. 2588-2597, 2021." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 97, + 51, + 285, + 62 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 51, + 285, + 62 + ], + "spans": [ + { + "bbox": [ + 97, + 51, + 285, + 62 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 775, + 302, + 784 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 775, + 302, + 784 + ], + "spans": [ + { + "bbox": [ + 292, + 775, + 302, + 784 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_content_list.json b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..241a84be4a50bad0c24cd30632f889023555747a --- /dev/null +++ b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_content_list.json @@ -0,0 +1,2085 @@ +[ + { + "type": "text", + "text": "WAV2TOK: DEEP SEQUENCETOKENIZER FOR AUDIO RETRIEVAL", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 147 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Adhiraj Banerjee, Vipul Arora", + "bbox": [ + 181, + 170, + 401, + 185 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Department of Electrical Engineering", + "bbox": [ + 181, + 185, + 431, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Indian Institute of Technology Kanpur, India", + "bbox": [ + 181, + 199, + 477, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{adhiraj,vipular}@iitk.ac.in", + "bbox": [ + 184, + 213, + 455, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 263, + 545, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Search over audio sequences is a fundamental problem. In this paper, we propose a method to extract concise discrete representations for audio that can be used for efficient retrieval. Our motivation comes from orthography which represents speech of a given language in a concise and distinct discrete form. The proposed method, wav2tok, learns such representations for any kind of audio, speech or non-speech, from pairs of similar audio. wav2tok compresses the query and target sequences into shorter sequences of tokens that are faster to match. The learning method makes use of CTC loss and expectation-maximization algorithm, which are generally used for supervised automatic speech recognition and for learning discrete latent variables, respectively. Experiments show the consistent performance of wav2tok across two audio retrieval tasks: music search (query by humming) and speech search via audio query, outperforming state-of-the-art baselines.", + "bbox": [ + 228, + 292, + 767, + 460 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 171, + 483, + 336, + 497 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sequence Retrieval aims at retrieving sequences similar to a query sequence, with the constraint that an ordered alignment exists between the query and the target sequence. In this paper, we address the following problem: Can we extract discrete tokens from any continuous signal for the purpose of retrieval of similar signals? This problem has deep connections with tasks such as child language acquisition, music cognition and learning languages without written forms. Some direct applications of the proposed task include speech search, where the order of constituent units, such as phonemes, syllables or words, remains same; and music search – query by humming or query by example – where the order of constituent units, such as relative notes or phrases, remains same. Apart from audio, the problem extends to tasks such as handwritten word search and gesture search.", + "bbox": [ + 169, + 513, + 826, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "One can define similarity metrics over sequences using methods based on Dynamic Time Warping (DTW) (Müller, 2007). These methods are inefficient if the sequences are continuous valued and have high sampling rates. Moreover, they depend on matching hand-made features, which are ineffective in the face of high variability of query sequences.", + "bbox": [ + 169, + 645, + 823, + 702 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Problems such as spoken term detection involve detection of a query utterance in a long speech audio. The search space is huge, and performing DTW based search of query takes long time (Rodriguez-Fuentes et al., 2014). A more efficient way of sequence retrieval is by mapping them to sequences of discrete tokens. Automatic speech recognition (ASR) can be employed for this purpose (Mamou et al., 2013). However, ASR training requires knowledge of basic units of transcription. The popularly used units are phonemes and graphemes. This method thus becomes language dependent. Non-linguistic sounds, such as cough and sneeze, could be mapped to certain tokens defined for them. This approach could not be used when precise tokens are not defined, e.g., music search.", + "bbox": [ + 169, + 708, + 823, + 821 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In query by humming based music search, audio is mapped to discrete melody-related tokens, such as notes, and these token sequence are matched for search (Unal et al., 2008). However, several music traditions do not have precise transcription systems. There, one can tell if two pieces, or motifs, are similar but cannot precisely transcribe them to tokens. The embellishments used in music could be too dynamic to be transcribed precisely. Moreover, when a musically untrained user sings a query, s/he cannot hit the right notes matching the target song. So the matching could rely on several factors other than notes, such as phonemes of lyrics (Mesaros & Virtanen, 2010), onset times", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(rhythm) (Kosugi et al., 2000), and note transitions (Ranjan & Arora, 2020). Hence, the tokens to be used may not be derived from notes alone.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this way, each tokenizer - for speech, music or other signals, in general - uses domain-specific hand-made tokens defined by a domain expert. In this paper, we propose a tokenizer to map audio sequences to sequences of discrete tokens with an aim of retrieval. The mapping is learned only from pairs of similar audio sequences. The tokens are not defined manually but correspond to distinct semantic units learned from pairs of similar audio sequences. The method is general and can be applied to signals other than audio. In this paper, we apply the proposed method to speech and music audio search, for the problems of spoken term detection and query by humming, respectively.", + "bbox": [ + 169, + 138, + 823, + 238 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The proposed method, named wav2tok, encodes audio via a BiLSTM (Schuster & Paliwal, 1997) network. The encoder-generated representations are then mapped to discrete tokens via a $K$ -means vector quantizer network. Each discrete token corresponds to a discrete representation in the vector quantizer's codebook which is initialized and updated via offline $K$ -means clustering only.", + "bbox": [ + 169, + 243, + 826, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "wav2tok is trained with pairs of similar audio sequences in a self-supervised fashion without any transcription using a novel training algorithm. For each pair, we average the encoder-generated representations, which map to the same token, by the $K$ -means vector quantizer network to generate a prototype for that token. We then perform a contrastive learning task to increase the similarity between the generated prototype for a particular token and the quantizer codebook discrete representation corresponding to the same token. We simultaneously minimize the edit distance between the token sequences generated from each sequence in the pair via Connectionist temporal classification (CTC) (Graves et al., 2006) framework to constrain both sequences to get mapped to the same token sequence.", + "bbox": [ + 169, + 306, + 823, + 431 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We compare wav2tok to state-of-the-art (SOTA) methods for discrete representation learning, such as wav2vec 2.0, and SOTA ASR models fine-tuned to perform phonetic tokenization. We evaluate the generalization capability of the tokens generated by the models on search experiments, namely, query-by-humming and spoken term detection. wav2tok outperforms the baselines in performance and uses much lesser trainable parameters, ensuring faster inference and deployment.", + "bbox": [ + 169, + 438, + 826, + 508 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 171, + 530, + 346, + 544 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Sequence Labelling. With expert-defined tokens, various methods are popularly used for mapping sequences to tokens. In conventional methods, Hidden Markov Models (Rabiner & Juang, 1986) and Conditional Random Fields (Lafferty et al., 2001) have been popularly used for sequence labeling. These methods involve a significant amount of domain knowledge and many assumptions to make tractable models, which are avoided by End-to-End learning models such as Recurrent Neural Networks (RNNs) using Connectionist Temporal Classification framework (Graves et al., 2006). Sequence labeling can be used for sequence retrieval by converting the sequences to tokens, which are easy to search over. But this approach inevitably depends upon expert-defined tokens.", + "bbox": [ + 169, + 561, + 823, + 674 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Unsupervised Speech Representation Learning. Automatic Speech Recognition systems are pretrained on large amounts of untranscribed speech data to generate SOTA continuous representations which encode the slowly varying phoneme features in raw speech. The representations are then mapped to phoneme tokens via Connectionist Temporal Classification (CTC) (Graves et al., 2006) fine-tuning on a small amount of transcribed audio. Works like Contrastive Predictive Coding (CPC) (van den Oord et al., 2018), Autoregressive Predictive Coding (APC) (Chung & Glass, 2020), and wav2vec (Schneider et al., 2019) generate continuous representations with powerful autoregressive models pre-trained to predict future time-step representations. Further works started discretizing the continuous representations with vq-VAE (van den Oord et al., 2017) to generate discrete representations for speech.", + "bbox": [ + 169, + 680, + 823, + 820 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Works like vq-wav2vec (Baevski et al., 2019) and vq-APC (Chung et al., 2020) discretize the representations and perform the same prediction tasks as in wav2vec (Schneider et al., 2019) and APC (Chung & Glass, 2020) respectively but over discrete representations. In vq-wav2vec, the discrete representations are generated with either a K-Means Vector Quantizer (Baevski et al., 2019) or Gumbel-Softmax based Vector Quantizer (Baevski et al., 2019). The learned discrete representations are used to pre-train a BERT (Devlin et al., 2018) to generate stronger continuous representations much like BERT pre-training in Natural Language Processing. wav2vec 2.0 (Baevski et al.,", + "bbox": [ + 169, + 825, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2020) uses a Gumble Softmax based Vector Quantizer (Baevski et al., 2019) to generate discrete representations. The training involves masking of spans of time steps and then predicting the correct discrete representations at each masked time step with transformer representation at that time step. In these methods, raw audio is discretized in a latent space to model all possible acoustic units than phonetic or sub-phonetic units. The tokens generated by the vector quantizers aren't constrained to be interpretable and are initialized in large numbers ( $\\sim$ 102.4K codes). After pre-training, a subset of these codes or tokens are chosen more often by the vector quantizers and are considered to represent acoustic units. CTC-based fine-tuning with transcription groups these discrete acoustic units to $K$ distinct phonemes or linguistic units as present in the transcriptions.", + "bbox": [ + 169, + 103, + 826, + 229 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Works like HuBERT and wav2vec-Unsupervised learn phonemic units directly. HuBERT (Hsu et al., 2021) pre-trains a transformer network via BERT-like masked prediction task over noisy targets generated with a clustering model trained offline. The targets may be generated with an ensemble of $K$ -means clusterers with $K = \\{100,500\\}$ clusters on MFCC features or transformer representations. wav2vec-Unsupervised (Baevski et al., 2021) learns phonetic tokens adversially from phonemized unlabelled text data. A discriminator identifies if the phoneme sequence generated by model is real or fake based on phonemized unlabelled text.", + "bbox": [ + 169, + 234, + 826, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "All aforementioned approaches use powerful auto-regressive models pre-trained on large amounts of unlabeled audio and fine-tuned on transcribed audio. Our learning approach can learn semantic tokens with small models while training pairwise on small amount of unlabeled audio data.", + "bbox": [ + 169, + 340, + 825, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Audio Representations for Retrieval. Now Playing (Arcas et al., 2017) and (Chang et al., 2020) use a Neural Network Fingerprinter (NNFP) module outputting representations which are efficient for search in query-by-example tasks where the difference between query and the actual song is pretty minute in comparison to humming where only the melody is sung. Now Playing (Arcas et al., 2017) trains representations by optimizing the triplet loss (Schroff et al., 2015) and (Chang et al., 2020) trains representations by simulating the Maximum Inner Product Search (MIPS) on minibatches of representations. For Query by Humming task, (Mostafa et al., 2016) and (Mostafa & Fung, 2017) use deep learning models like DNNs and CNNs to generate representations which they map to MIDI-numbers or note tokens. Such works require note-transcribed data to train the models. For Spoken Term Detection task, approaches like (Zhang & Glass, 2009), (Rodriguez-Fuentes et al., 2014), (Lee et al., 2015), (Ram et al., 2018) convert audio to sequences of feature vectors and apply different variations of DTW based template matching to detect query in long utterances of speech which is time-consuming.", + "bbox": [ + 169, + 388, + 826, + 570 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Cross Domain Alignment. Given a pair of semantically similar inputs for training, tasks such as visual question answering (text and image) and machine translation (text) involve learning an alignment. The alignment here is not ordered and the inputs may be from different modalities. Attention models have been used to find alignment between output entities and input regions (Yao et al., 2018). (Chen et al., 2020) use Gromov-Wasserstein distance between output and input entities to match them. However, there is no notion of tokens there, rather the salient entities in the input are represented as vectors in a graph.", + "bbox": [ + 169, + 575, + 826, + 676 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Graph Matching. Graph Neural Networks (Gori et al., 2005) are used to generate embeddings for graphs. These embeddings are used to perform graph matching to find similarity of structured graphs (Li et al., 2019). However, they perform the matching jointly on the pair of inputs, rather than representing each input independently. This makes them unsuitable for the search problem at hand due to large run-time complexity. The distance metrics used for graph matching are based on edit distance (Li et al., 2019) and Wasserstein distance (Chen et al., 2020).", + "bbox": [ + 169, + 680, + 826, + 767 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 PROBLEM STATEMENT", + "text_level": 1, + "bbox": [ + 171, + 796, + 397, + 811 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We aim to map $\\mathcal{X}$ , a sequence of vectors, to $\\tilde{\\mathcal{T}}$ , a sequence of discrete tokens from a finite alphabet $\\mathbb{A}$ , such that the similarity of sequences is preserved in the sense of edit distance. The length of sequence $\\tilde{\\mathcal{T}}$ may be less than or equal to that of the sequence $\\mathcal{X}$ . In other words, given a pair of similar sequences $(\\mathcal{X}_i,\\mathcal{X}_j)$ and sequence $\\mathcal{X}_k$ which is not similar to either sequences in the pair, we want to map them to token sequences such that $ED(\\tilde{\\mathcal{T}}_i,\\tilde{\\mathcal{T}}_j)$ should be less than $\\min \\{ED(\\tilde{\\mathcal{T}}_i,\\tilde{\\mathcal{T}}_k),ED(\\tilde{\\mathcal{T}}_j,\\tilde{\\mathcal{T}}_k)\\}$ , where $ED(\\cdot ,\\cdot)$ is the edit distance between two sequences.", + "bbox": [ + 169, + 832, + 825, + 928 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 960 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4 MODEL ARCHITECTURE", + "text_level": 1, + "bbox": [ + 171, + 102, + 410, + 118 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "wav2tok is comprised of an encoder $f: \\mathbb{X} \\mapsto \\mathbb{Z}$ which takes as input a temporal sequence of audio features $\\mathcal{X} = [\\mathbf{x}_t \\in R^n; t \\in [T]]$ of length $T$ , where $\\mathbf{x}_t$ is the feature vector at time step $t$ , and outputs a sequence of L-2 normalised representations $\\mathcal{Z} = [\\mathbf{z}_t = f(\\mathbf{x}_t) \\in R^m; t \\in [T]]$ . The encoder is implemented as a 2-layer BiLSTM followed by an L-2 normalization layer. BiLSTMs summarise information in both directions and encode surrounding context.", + "bbox": [ + 169, + 132, + 823, + 204 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A $K$ -means vector quantizer network $g: \\mathbb{Z} \\mapsto \\mathbb{T}$ then labels sequence $\\mathcal{Z}$ at each time-step with tokens belonging to a finite $K$ -element alphabet $\\mathbb{A} = [K]$ and generates sequence of tokens $\\mathcal{T} = [\\tau_t = g(\\mathbf{z}_t) \\in \\mathbb{A}; t \\in [T]]$ . Network $g$ vector quantizes input $\\mathbf{z}_t$ with a codebook $E = \\{\\mathbf{e}_k \\in \\mathbb{Z}; k \\in [K]\\}$ comprised of $|\\mathbb{A}| = K$ discrete representations which are cluster centroids in representation space $\\mathbb{Z}$ and outputs token $\\tau_t = \\arg \\max_k \\mathbf{z}_t \\cdot \\mathbf{e}_k$ . Note, here the dot product gives a cosine similarity score since both the vectors are L-2 normalized, as a result, $\\mathbf{e}_k \\in E$ closest to $\\mathbf{z}_t$ is chosen as its discrete representation and index $k$ as its token $\\tau_t$ . The $K$ discrete representations in network $g$ are trainable parameters.", + "bbox": [ + 169, + 209, + 826, + 321 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A compressor $\\mathcal{C}$ compresses sequence of tokens $\\mathcal{T}$ to sequence $\\tilde{\\mathcal{T}}$ of length $\\tilde{T} \\leq T$ by deleting all consecutive repetitions of tokens. $\\mathcal{C}$ also generates the corresponding compressed sequence $\\tilde{\\mathcal{Z}}$ of length $\\tilde{T}$ by averaging representations $\\mathbf{z}_t \\in \\mathcal{Z}$ over the consecutive tokens and L-2 normalising the averaged representation. Figure 1a presents an illustration demonstrating our model architecture.", + "bbox": [ + 169, + 329, + 825, + 391 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "5 TRAINING", + "text_level": 1, + "bbox": [ + 171, + 409, + 294, + 425 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "wav2tok is trained on pairs of sequences of audio features $(\\mathcal{X},\\mathcal{X}^{\\prime})$ where the raw audio corresponding to $\\mathcal{X}^{\\prime}$ is an augmented replica of that corresponding to $\\mathcal{X}$ . We apply either pitch shift or time stretch or both augmentations to raw audio to generate its augmented replica. $\\mathcal{X}$ and $\\mathcal{X}^{\\prime}$ may differ in sources as well, i.e. a different person may sing the recording corresponding to $\\mathcal{X}^{\\prime}$ .", + "bbox": [ + 169, + 440, + 823, + 497 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The discrete representations in quantizer $g$ codebook $E$ are initialized as $K$ centroids obtained via offline $K$ -means clustering over freshly initialized encoder-generated representations. Given $(\\mathcal{X},\\mathcal{X}^{\\prime})$ , encoder $f$ generates sequence of representations $\\mathcal{Z}$ from input $\\mathcal{X}$ and $\\mathcal{Z}^{\\prime}$ from $\\mathcal{X}^{\\prime}$ . Quantizer $g$ generates a sequence of tokens $\\mathcal{T}$ from input $\\mathcal{Z}$ and $\\mathcal{T}^{\\prime}$ from $\\mathcal{Z}^{\\prime}$ via cosine similarity-based comparison with codebook vectors $e\\in E$ initialized via offline clustering over freshly initialized representation space $Z$ . Compressor $\\mathcal{C}$ compresses sequence of tokens $\\mathcal{T}$ to sequence $\\tilde{\\mathcal{T}}$ and $\\mathcal{T}^{\\prime}$ to $\\tilde{\\mathcal{T}}^{\\prime}$ .", + "bbox": [ + 169, + 503, + 823, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We average all encoder-generated representations in pair $(\\mathcal{Z},\\mathcal{Z}^{\\prime})$ which map to the same token, say $\\tau$ , to generate a prototype for $\\tau$ . We then perform a contrastive task where we compare the prototype with each of the $K$ discrete representations in codebook $E$ and increase its similarity with the discrete representation corresponding to $\\tau$ . We also increase the likelihood that wav2tok maps pair $(\\mathcal{X},\\mathcal{X}^{\\prime})$ to the same token sequence via CTC framework to minimize $ED(\\tilde{\\mathcal{T}},\\tilde{\\mathcal{T}}^{\\prime})$ .", + "bbox": [ + 169, + 595, + 823, + 669 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our loss function is defined as,", + "bbox": [ + 171, + 674, + 380, + 688 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {m} \\left(\\mathcal {X}, \\mathcal {X} ^ {\\prime}\\right) + \\alpha \\mathcal {L} _ {c t c} \\left(\\mathcal {X}, \\tilde {\\mathcal {T}} ^ {\\prime}\\right) + \\beta \\mathcal {L} _ {c t c} \\left(\\mathcal {X} ^ {\\prime}, \\tilde {\\mathcal {T}}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 693, + 823, + 710 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_m$ is loss defined for contrastive task, $\\mathcal{L}_{ctc}$ is the loss maximising aforementioned likelihood, and $\\alpha, \\beta$ are positive constants. We optimize this loss function in a manner similar to the Expectation Maximization algorithm. The clustering is used as the E-step to update the discrete representations in quantizer $g$ codebook, while gradient descent over $\\mathcal{L}$ acts as the M-step.", + "bbox": [ + 169, + 715, + 823, + 772 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Contrastive Loss. Let the set of unique tokens occurring in pair $(\\tilde{\\mathcal{T}},\\tilde{\\mathcal{T}}^{\\prime})$ be $\\mathcal{U}\\subset [K],|\\mathcal{U}| = K^{\\prime}\\leq K$ . We generate a list of token prototypes $\\mathcal{P} = \\{\\mathbf{p}_{\\tau};\\tau \\in \\mathcal{U}\\}$ where $\\mathbf{p}_{\\tau}$ is L-2 normalised mean of representations in $\\{\\mathbf{z}\\in \\{\\mathcal{Z};\\mathcal{Z}^{\\prime}\\} :g(\\mathbf{z}) = \\tau \\}$ . Figure 1b presents an illustration demonstrating how we generate list of token prototypes $\\mathcal{P}$ .", + "bbox": [ + 169, + 779, + 826, + 837 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given $\\mathbf{p}_{\\tau} \\in \\mathcal{P}$ , we perform a contrastive task to increase its similarity with discrete representation $\\mathbf{e}_{\\tau} \\in E$ . To compare $\\mathbf{p}_{\\tau}$ with the codebook, metrics such as cosine similarity and Euclidean distance could be used. However, we find that using the following parameterized score for this purpose gives better performance,", + "bbox": [ + 169, + 843, + 823, + 898 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {s} _ {\\tau , k} = \\sigma \\left(W \\cdot \\left(\\mathbf {p} _ {\\tau} - s g \\left(\\mathbf {e} _ {k}\\right)\\right)\\right) \\in [ 0, 1 ] \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 904, + 823, + 921 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/956cf898b477fc6de2c77a42c7b5364a007a69cb0518830ef4962dd076047183.jpg", + "image_caption": [ + "(a) Model Architecture" + ], + "image_footnote": [], + "bbox": [ + 173, + 116, + 357, + 212 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/613cca31105fe5eba5f895b8b1438042f49aee518aa385e1b23644e7a0f81c62.jpg", + "image_caption": [ + "(b) Generation of Prototype list $\\mathcal{P}$", + "Figure 1: $\\mathcal{X}'$ is an augmented replica of $\\mathcal{X}$ . 1a illustrates our model architecture. 1b demonstrates the generation of $\\mathcal{P}$ required for calculation of $\\mathcal{L}_m$ . 1c demonstrates our likelihood loss calculation." + ], + "image_footnote": [], + "bbox": [ + 372, + 116, + 581, + 212 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/38c36d594d6a844f7b0098a07399ada42bc53ecd185fbe077c70093962fa57c4.jpg", + "image_caption": [ + "(c) Likelihood Loss Calculation" + ], + "image_footnote": [], + "bbox": [ + 602, + 99, + 810, + 212 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $sg(x) \\equiv x, \\frac{d}{dx} sg(x) \\equiv 0$ is the stop-gradient operator, $\\sigma(\\cdot)$ is sigmoid function generating a score in the range [0, 1] and $W \\in R^{1 \\times d}$ is a parameter matrix. $\\mathbf{s}_{\\tau,k}$ acts as a parameterized similarity score between $\\mathbf{p}_{\\tau}$ and discrete representation $\\mathbf{e}_k \\in E$ . We define our contrastive loss $\\mathcal{L}_m$ as,", + "bbox": [ + 169, + 296, + 823, + 343 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {m} \\left(\\mathcal {X}, \\mathcal {X} ^ {\\prime}\\right) = - \\sum_ {\\tau \\in \\mathcal {U}} \\log \\frac {\\exp \\left(\\mathbf {s} _ {\\tau , \\tau}\\right)}{\\sum_ {k = 1} ^ {K} \\exp \\left(\\mathbf {s} _ {\\tau , k}\\right)} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 356, + 349, + 825, + 386 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Likelihood Loss. We maximize the likelihood that sequence $\\mathcal{X}$ maps to token sequence $\\tilde{\\mathcal{T}}'$ , which corresponds to $\\mathcal{X}'$ , via the CTC framework (see Figure 1c). It puts a constraint to generate the same token sequence for $\\mathcal{X}$ and $\\mathcal{X}'$ . We calculate the probability of $\\mathbf{x}_t$ mapping to token $\\tau_t = k$ as $l_{t,k} = \\frac{\\exp(f(\\mathbf{x}_t)\\cdot sg(e_k))}{\\sum_{i=1}^K \\exp(f(\\mathbf{x}_t)\\cdot sg(e_i))}$ . The likelihood $P(\\tilde{\\mathcal{T}}'|\\mathcal{X})$ is then calculated as a sum of probabilities of all $T$ -length paths $\\pi$ over tokens $\\tau \\in \\mathbb{A}$ such that $\\mathcal{C}(\\pi) = \\tilde{\\mathcal{T}}'$ . The loss is defined as,", + "bbox": [ + 169, + 402, + 823, + 484 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {c t c} (\\mathcal {X}, \\tilde {\\mathcal {T}} ^ {\\prime}) = - \\log \\sum_ {\\pi \\in C ^ {- 1} (\\tilde {\\mathcal {T}} ^ {\\prime})} P (\\pi | \\mathcal {X}) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 491, + 825, + 526 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the path probabilities are calculated over token probability scores in sequence $\\mathbf{l} = \\{l_t \\in R^K; t \\in [T]\\}$ via CTC forward-backward framework (Graves et al., 2006) without the use of blanks. We present the CTC forward and backward variables for our use case in Appendix B.", + "bbox": [ + 169, + 541, + 823, + 585 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Clustering. We perform offline $K$ -means clustering on a subset of encoder representations during initialization of our network and at regular intervals during training to set the discrete representations in codebook $E$ of network $g$ . Initializing the clusters in this way prevents wav2tok from converging to a local optimum during the matching task, as is the case we found with random initialization of centroids. The intermittent clustering during training iteratively refines the discrete representations and prevents codebook collapse. We use the sklearn library to perform $K$ -means clustering.", + "bbox": [ + 169, + 590, + 823, + 675 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We train wav2tok using the ADAM (Kingma & Ba, 2017) optimizer and a linear learning schedule with a learning rate of 0.001 and $8\\%$ of the training steps as warm-up steps.", + "bbox": [ + 169, + 681, + 823, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "6 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 171, + 729, + 326, + 744 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We test the performance of tokens and encoder-generated continuous representations of wav2tok in audio retrieval. We perform Query by Humming (QbH) and Spoken Term Detection experiments to evaluate the performance of wav2tok in comparison to the baselines.", + "bbox": [ + 169, + 761, + 823, + 804 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "6.1 MUSIC MELODY SEARCH: QUERY BY HUMMING", + "text_level": 1, + "bbox": [ + 171, + 820, + 555, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Task. Given a test query audio, we are to find the audio with the most similar melody in the search audio database.", + "bbox": [ + 169, + 845, + 823, + 873 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Experiment Details. We use the MIR-QbSH dataset which is composed of 4431 humming audio recordings of $30s$ duration corresponding to 48 songs. Each song is sung by several individuals. All individuals sing the same part of the song. The recordings have variations in the environments", + "bbox": [ + 169, + 881, + 825, + 924 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "they were recorded in, tonal qualities, voices, pitch, and time stretch. We train our models on hums of 40 songs in MIR-QbSH dataset and evaluate search performance on hums of the remaining 8 songs. The training dataset has 1970 hums for training and 676 for validation. The test dataset has 225 hums as a search database and 659 query hums. We evaluate the performance of our models in identifying which song a given query corresponds to via comparison with all sequences in the search database. Each model converts all the audio in our test dataset to sequences of tokens or representations. Each query sequence is compared to all sequences in the search database via Edit Distance (ED) (if tokens) or DTW (if representations). The song id of the most similar sequence in the search database is then selected as query song id. We calculate Mean Reciprocal Ranking (MRR) score with ground-truth song id of the queries for evaluation. The Reciprocal Ranking (RR) score is given as $\\frac{1}{r}$ if the $r^{th}$ most similar sequence in search database has same song id as query.", + "bbox": [ + 169, + 103, + 826, + 258 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "All the audio recordings are converted to Short Term Fourier Transform (STFT) matrices before being passed as inputs to our models. The STFT matrices are computed with 513 frequency bins, a window length of 1024 samples (summarising 128 ms of audio), and hop length of 512 samples.", + "bbox": [ + 169, + 263, + 823, + 308 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.2 SPOKEN TERM DETECTION", + "text_level": 1, + "bbox": [ + 171, + 325, + 405, + 339 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Task. Given a test query audio, we are to detect its occurrence in a long utterance.", + "bbox": [ + 169, + 353, + 712, + 369 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Experiment Details. We use the TIMIT dataset which is composed of 6300 utterances of English speech with time-aligned word transcriptions. We choose 59 most-occurring words with more than 2 characters as keywords and all others as non-keywords. We use utterances of random sentences formed with 6 words sampled from a subset of 25 keywords for training and evaluation on STD experiments for the detection of the remaining 34 keywords. The test dataset is composed of 337 utterances corresponding to the 34 queries and 100 long utterances per query, with half containing a single occurrence of query amongst non-keywords and the other half containing only non-keywords. Given a query and a long utterance, we convert both to sequences of tokens using each audio tokenizer. We perform approximate string matching (Hall & Dowling, 1980) for detection of query in the utterance. The STFT matrix inputs to the models are computed with 185 frequency bins, a window length of 368 samples (summarising $23\\mathrm{ms}$ of audio), and a hop length of 92 samples.", + "bbox": [ + 169, + 373, + 826, + 527 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.3 BASELINES", + "text_level": 1, + "bbox": [ + 171, + 547, + 294, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Triplet. We train encoder $f: \\mathbb{X} \\mapsto \\mathbb{Z}$ to generate L-2 normalized continuous representations for retrieval. Encoder $f$ is trained via optimizing the triplet Loss (Schroff et al., 2015) as done in training an NNFP in Now Playing (Arcas et al., 2017). Given pair of similar sequences $(\\mathcal{X}, \\mathcal{X}')$ , encoder $f$ generates sequences $\\mathcal{Z}$ and $\\mathcal{Z}'$ . We form a mini-batch of size $N$ of triplets $\\{\\mathbf{z}, \\mathbf{z}^+, \\mathbf{z}^-\\}$ where representation $\\mathbf{z}$ is sampled from sequence $\\mathcal{Z}$ , $\\mathbf{z}^+$ and $\\mathbf{z}^-$ are positive and negative samples respectively for $\\mathbf{z}$ sampled from sequence $\\mathcal{Z}'$ . The loss is defined as, $\\mathcal{L}_{\\text{Triplet}} = \\sum_{i=1}^{N} \\max \\{||\\mathbf{z}_i - \\mathbf{z}_i^+|| - ||\\mathbf{z}_i - \\mathbf{z}_i^-|| + m, 0\\}$ , where $m$ is a margin for similarity.", + "bbox": [ + 169, + 573, + 823, + 678 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "MIPS. We train encoder $f: \\mathbb{X} \\mapsto \\mathbb{Z}$ to generate L-2 normalized continuous representations for retrieval. Encoder $f$ is trained via simulation of MIPS (Mussmann & Ermon, 2016) on mini-batches of representations as proposed by (Chang et al., 2020). Given pair of similar sequences $(\\mathcal{X}, \\mathcal{X}')$ , encoder $f$ generates sequences $\\mathcal{Z}$ and $\\mathcal{Z}'$ . We form a mini-batch of size $N$ of pairs of $\\{\\mathbf{z}, \\mathbf{z}^+\\}$ where encoder generated representation $\\mathbf{z}$ is sampled from sequence $\\mathcal{Z}$ and $\\mathbf{z}^+$ is a positive for $\\mathbf{z}$ sampled from $\\mathcal{Z}'$ . The loss is defined as, $\\mathcal{L}_{\\mathrm{MIPS}} = -\\sum_{i=1}^{N} \\log \\frac{\\exp(\\mathbf{z}_i, \\mathbf{z}_i^+)}{\\sum_{j \\neq i} (\\exp(\\mathbf{z}_i \\cdot \\mathbf{z}_j^+) + \\exp(\\mathbf{z}_i \\cdot \\mathbf{z}_j))}$ .", + "bbox": [ + 169, + 681, + 823, + 780 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "wav2vec2. We train our audio tokenizer via wav2vec 2.0 (Baevski et al., 2020) learning framework. Quantizer $g$ in our audio tokenizer is chosen to be a Gumbel Softmax-based Vector Quantizer (See Appendix C for details) as used in (Baevski et al., 2020) but with a single codebook with $K$ members. Given sequence $\\mathcal{X}$ , encoder $f$ outputs sequence of L-2 normalised representations $\\mathcal{Z}$ of length $T$ . Quantizer $g$ outputs sequence of discrete representations $\\mathcal{Q} = \\{q_t = g(z_t \\in \\mathcal{Z}); t = 1, \\dots, T\\}$ . We mask spans of 10 time steps with random starting indices in sequence $\\mathcal{Z}$ and then pass the new sequence to a transformer network $h: \\mathbb{Z} \\mapsto \\mathbb{O}$ which generates a sequence of contextualized representations $\\mathcal{O} = \\{\\mathbf{o}_t = h(z_t \\in \\mathcal{Z}); t = 1, \\dots, T\\}$ . For transformer output $\\mathbf{o}_t$ over masked time step $t$ , we identify the true discrete representation $\\mathbf{q}_t$ from a set $D_t$ composed of $\\mathbf{q}_t$ and $D$ distractors which are discrete representations sampled from other time steps. The loss is defined as,", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\mathcal{L}_w(\\mathbf{o}_t,\\mathcal{D}_t) = -\\log \\frac{\\exp(sim(\\mathbf{o}_t,\\mathbf{q}_t))}{\\sum_{\\tilde{q}\\in\\mathcal{D}_t}\\exp(sim(\\mathbf{o}_t,\\tilde{q}))} + \\mathcal{L}_d$ where $sim(a,b) = \\frac{a^Tb}{||a||||b||}$ is cosine similarity and $\\mathcal{L}_d$ is codebook diversity loss.", + "bbox": [ + 169, + 99, + 823, + 137 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "wav2vec2P. We train wav2vec2 audio tokenizer with our variation of wav2vec 2.0 (Baevski et al., 2020) learning framework which learns discrete representations from pairs of similar sequences. Given pair $(\\mathcal{X},\\mathcal{X}^{\\prime})$ , encoder $f$ outputs sequences $\\mathcal{Z}$ of length $T$ and $\\mathcal{Z}'$ of length $T'$ respectively. Assuming $T\\leq T'$ , we generate sequence $\\mathcal{Z}^+$ of length $T$ whose $t$ time step element $\\mathbf{z}_t^+$ is a positive for $\\mathbf{z}_t\\in \\mathcal{Z}$ sampled from sequence $\\mathcal{Z}'$ . Gumbel Softmax-based Vector Quantizer $g$ quantizes each representation in sequence $\\mathcal{Z}^+$ to generate sequence $\\mathcal{Q}^+$ . We mask sequence $\\mathcal{Z}$ and $\\mathcal{Z}^+$ at the same time steps. Transformer $h$ inputs masked sequences and generate sequences $\\mathcal{O}$ and $\\mathcal{O}^+$ . For masked time step $t$ , we use transformer output $\\mathbf{o}_t$ to identify $\\mathbf{q}_t^+\\in \\mathcal{Q}^+$ from set $\\mathcal{D}_t^+$ with distractors sampled from sequence $\\mathcal{Q}^+$ and transformer output $\\mathbf{o}_t^+$ to identify $\\mathbf{q}_t\\in \\mathcal{Q}$ from set $\\mathcal{D}_t$ with distractors sampled from sequence $\\mathcal{Q}$ . The loss is defined as, $\\mathcal{L}_{wP} = \\mathcal{L}_w(\\mathbf{o}_t,\\mathcal{D}_t^+) + \\mathcal{L}_w(\\mathbf{o}_t^+, \\mathcal{D}_t)$ .", + "bbox": [ + 169, + 143, + 826, + 289 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "wav2vec2-O. The original wav2vec 2.0 base model with 12 Transformer blocks and $95M$ parameters as proposed by (Baevski et al., 2020). It is pre-trained on 960 hours of LibriSpeech data and fine-tuned on TIMIT dataset. It uses $K = 32$ tokens for tokenization.", + "bbox": [ + 169, + 294, + 823, + 335 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "wav2vec2-Multi. A wav2vec 2.0 large model with 24 Transformer blocks and $317M$ parameters pre-trained on 53 languages as proposed by (Conneau et al., 2020). It is fine-tuned on Common Voice to detect all possible phonemes in training languages with $K = 392$ tokens.", + "bbox": [ + 169, + 343, + 823, + 387 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Triplet and MIPS use a 2-layer BiLSTM as encoder with $3.6M$ parameters. We use the LAMB optimizer (You et al., 2020) and a Cosine Annealing Learning Schedule (Loshchilov & Hutter, 2017) with a learning rate restart of 0.0001 to train them. wav2vec2 and wav2vec2P use a 2-layer BiLSTM encoder with $3.6M$ parameters to generate latent representations and 3 Transformer blocks with $8.5M$ parameters. Both are trained using the ADAM (Kingma & Ba, 2017) optimizer and a linear learning schedule with a learning rate of 0.001 and $8\\%$ of the training steps as warm-up steps. Proposed wav2tok uses only a 2-layer BiLSTM as encoder with $3.6M$ parameters.", + "bbox": [ + 169, + 392, + 825, + 491 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "7 RESULTS", + "text_level": 1, + "bbox": [ + 171, + 510, + 284, + 523 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "7.1 MUSIC MELODY SEARCH: QUERY BY HUMMING", + "text_level": 1, + "bbox": [ + 171, + 541, + 553, + 556 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We present search performances for 3 settings of query namely- Query with no augmentation or Vanilla Query (V), Time Stretched Query (TS), and Pitch Shifted Query (PS). Time stretch and pitch shift are the most common augmentations that may be faced in queries by humming data. No augmentations were applied to audio in search database. Evaluations are performed on sequences corresponding to songs not seen during training. The results present the generalizability of the tokens or representations generated by the models. We set the number of tokens as $K = 25$ for wav2tok, wav2vec2, and wav2vec2P (See Appendix A.2 for experiments to support our choices).", + "bbox": [ + 169, + 566, + 826, + 667 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quality of Tokenization. Table 1 presents the performance of the sequence of tokens $\\tilde{\\mathcal{T}}$ generated by the audio tokenizers on ED-based similarity search. Tokens generated by wav2tok present good generalization capabilities in terms of MRR and outperform all the baselines. It generates time and pitch invariant tokens as we see no drop in performance when either augmentation is applied to query. wav2vec2-O is trained on English speech only. The tokens generated by it do not contain much melodic information but are robust to augmentations. The multilingual training of wav2vec2-Multi infuses both melodic and phonetic information to its 392 tokens, thereby giving good performance. wav2tok outperforms both wav2vec2-O and wav2vec2-Multi given its pairwise training which allows it to infuse more melodic information to the tokens while also being trained on a small amount of unlabelled data. The Gumbel Softmax-based quantizer in wav2vec2 and wav2vec2P isn't ideal for infusing melodic information to tokens but it does infuse phonetic information as will be seen in Section 7.2. We compare the tokens with representations learned by MIPS and Triplet evaluated on DTW-based similarity search. The continuous representations present sub-par generalizations to unseen songs. We compare wav2tok with SOTA melody extraction algorithm proposed in (Salamon & Gomez, 2012) which converts hums to MIDI sequences. wav2tok generates token sequences much smaller than the respective MIDI sequences and outperforms the MIDI tokens in search performance, search time, and robustness. In addition, wav2tok outperforms the algorithm in inference time. We further compare wav2tok with SOTA QbH system proposed in (Mostafa &", + "bbox": [ + 169, + 672, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fung, 2017). In our implementation, we map audio to MIDI sequences using the aforementioned SOTA melody extraction algorithm instead of a CNN. Given MIDI sequence 53, 53, 58, 50 with durations 0s, 0.5s, 1s, 2s, a Relative Note sequence is generated as $(0,0)$ , $(0,0.5)$ , $(5,1)$ , $(-8,2)$ over which DTW is performed for retrieval. wav2tok tokens outperform the SOTA QbH system in both performance and robustness; the performance of the latter drops drastically with time stretch.", + "bbox": [ + 169, + 103, + 823, + 176 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present the performances of the uncompressed sequences $\\mathcal{T}$ and $\\mathcal{Z}$ and compressed sequence $\\tilde{\\mathcal{Z}}$ generated by the audio tokenizers in Appendix A.1. We observe a drop in performance for all audio tokenizers when we apply sequence compression to sequences $\\mathcal{T}$ and $\\mathcal{Z}$ . wav2tok outperforms all the baselines and generates superior-quality of continuous representations and discrete tokens.", + "bbox": [ + 169, + 181, + 823, + 239 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Search Time. Table 1 presents the search time taken for similarity search over the tokens or representations generated by the models. The search time taken per query is 2 order of magnitude lesser for ED-based Search over compressed sequence of tokens $\\tilde{T}$ than standard DTW-based Search over continuous representations $\\mathcal{Z}$ . The pre-trained models being fine-tuned on transcribed audio give the best tokens in terms of compression and search time. wav2tok gives comparable tokens but outperforms the pre-trained models in inference time.", + "bbox": [ + 169, + 244, + 826, + 330 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/76e7f9b37d8b6fd965a383a0746696f2a952d8bfc38c211ca43a859aec06b334.jpg", + "table_caption": [ + "Table 1: Quality of Tokenization" + ], + "table_footnote": [], + "table_body": "
ModelV (MRR)TS (MRR)PS (MRR)Search Time (s)Infer (s)
MIDI ED0.750.640.723.840.62
Relative Note DTW0.840.740.80.020.62
Triplet DTW0.50.480.53.50.1
MIPS DTW0.60.550.58
wav2vec2 ED0.660.630.640.060.17
wav2vec2P ED0.690.650.67
wav2vec2-O ED0.720.720.710.010.43
wav2vec2-Multi ED0.820.820.821.2
wav2tok ED0.840.840.840.040.14
", + "bbox": [ + 173, + 378, + 514, + 498 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c7c7b6dd5efd3e6191d8c24bc116b74e03350dbd4f4020547b79ff27bf75d8cf.jpg", + "table_caption": [ + "Table 2: Ablation Studies and Some Variations" + ], + "table_footnote": [], + "table_body": "
ModelV (MRR)TS (MRR)PS (MRR)
log-mel DTW0.720.70.67
vq-log-mel ED0.710.60.62
wav2tok+NoSim ED0.730.730.72
wav2tok+Cos ED0.790.760.77
wav2tok+CTC ED0.640.620.63
wav2tok+NewInit ED0.770.760.78
wav2tok+MIR1K ED0.720.640.67
wav2tok ED0.840.840.84
", + "bbox": [ + 532, + 388, + 812, + 497 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation Studies. Query by humming involves similarity based on melody information, which is carried by the semantic pairing of the audio in training data. We constrain this pairing to include sequences not semantically similar and call this model wav2tok+NoSim. We optimize the contrastive loss $\\mathcal{L}_m$ to train the model. The results are shown in Table 2 (full table in Appendix A.3). There is a significant drop in token robustness and performance but the representations suffer a small drop (see Appendix A.3). Hence, although the representation space may be well clustered, wav2tok is able to add more semantics to the tokens as it is being trained with pairs of similar sequences in comparison to wav2tok+NoSim. We train wav2tok with cosine similarity scores instead of a parameterized score (wav2tok+Cos). The drop in performance validates the enhancement brought about by using a parameterized score. We also train wav2tok with $\\mathcal{L}_{ctc}$ only (wav2tok+CTC). The CTC loss considers all possible paths which compress to the target label sequence. As a result, the learnt tokens aren't much semantic. The use of both losses gives the best tokens.", + "bbox": [ + 169, + 512, + 826, + 679 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Some Variations. In wav2tok+NewInit, we associate the discrete representations with $K$ centroids in the input space $\\mathbb{X}$ . Such association does not initialize our tokenizer with optimal centroids which cluster the space $\\mathbb{Z}$ perfectly. This results in a significant drop in performance and robustness as shown in Table 2. We train wav2tok on MIR-1K dataset (wav2tok+MIR1K) which is composed of polyphonic music recordings of 1000 distinct songs. The tokens generalize well to monophonic hums in MIR-QbSH dataset giving a comparable performance to MIDI tokens. This validates that wav2tok tokens do learn melodic information and are robust to variations incurred in hums. We further compare wav2tok with log-mel features and token sequences (with no compression) obtained via quantization of log-mel features. wav2tok tokens outperform both.", + "bbox": [ + 169, + 686, + 826, + 811 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7.2 SPOKEN TERM DETECTION", + "text_level": 1, + "bbox": [ + 171, + 828, + 405, + 842 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Quality of Tokenization. Table 3 presents the quality of tokenization of the query keywords by the models evaluated in the Spoken Term Detection experiments. We present the performances of wav2vec2, wav2vec2P, wav2vec2-O, wav2vec2-Multi and proposed wav2tok. We conduct search experiments on a test dataset composed of a search database of 337 utterances of the 34 keywords used as queries in the STD experiments and 1289 query utterances. We identify the keyword to", + "bbox": [ + 169, + 854, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "which each query corresponds to via comparison to all the 337 utterances in the search database via ED-based similarity score. The word id of the most similar utterance is selected as the word to which the query corresponds to. We set $K = 40$ equivalent to the number of phonemes in English.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "wav2tok gives the best performance in terms of MRR scores. It outperforms huge models like wav2vec2-O and wav2vec2-Multi which are fine-tuned for the task of phonetic tokenization of speech audio while using a small number of parameters. wav2vec2 and wav2vec2P also outperform wav2vec2-Multi and wav2vec2-O while using smaller number of parameters. wav2vec2-O and wav2vec2-Multi use a blank token to handle consecutive occurrences of the same tokens and to label background noise. The utterances of each keyword in the test dataset are very small in time duration. This causes wav2vec2-O to confuse word utterances as background noise. It generates a sequence of blank tokens and performs poorly in search. wav2vec2-Multi using a larger number of phonetic tokens does not suffer this issue. wav2tok, wav2vec2, and wav2vec2P have no such blank token. This brings a drop in search performance with sequence compression. We further present the performance of wav2tok trained on a much larger LibriSpeech 100 hours dataset (wav2tok+Libri). It is able to outperform wav2vec2-O and give comparable performance to wav2vec2-Multi.", + "bbox": [ + 169, + 152, + 826, + 321 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/e6f142a9c1666ec04183f11cebc0613dea09545db55a754084259de32b78cf7b.jpg", + "table_caption": [ + "Table 3: Quality of Tokenization for speech" + ], + "table_footnote": [], + "table_body": "
ModelNormal (MRR)Compressed (MRR)
log-mel DTW0.7-
wav2vec2 ED0.680.63
wav2vec2P ED0.70.65
wav2vec2-O ED0.40.4
wav2vec2-Multi ED0.670.67
wav2tok ED0.740.66
wav2tok+Libri ED0.640.6
", + "bbox": [ + 187, + 387, + 491, + 506 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/e4d86e5ffacbb40a2cdb139ac0a4748f466edd55cc3ea902422e66e4a4a60366.jpg", + "table_caption": [ + "Table 4: Spoken Term Detection" + ], + "table_footnote": [], + "table_body": "
ModelED(F1)Search Time(s)DTW(F1)Search Time(s)
log-mel DTW--0.410.003
wav2vec20.640.0660.460.1
wav2vec2P0.640.47
wav2vec2-O0.610.290.430.23
wav2vec2-Multi0.630.720.480.66
wav2tok0.650.0640.520.09
wav2tok+Libri0.630.440.1
", + "bbox": [ + 511, + 386, + 820, + 506 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Spoken Term Detection. We convert the query word utterance and the long utterance in to sequences of tokens by all our models and detect the occurrence of the query via approximate string matching. We use fuzzysearch library to perform approximate string matching. It automatically chooses the fastest algorithm for matching. Table 4 presents the performance of wav2vec2, wav2vec2P, wav2vec2-O, wav2vec2-Multi, and proposed wav2tok in STD. All the models give a comparable performance in terms of F1- score with wav2tok performing slightly better. We also implement the STD system proposed in (Anguera & Ferrarons, 2013) which performs highly competitive STD via subsequence DTW (S-DTW) over gaussian posterior features. In our implementation, we extract the posterior features with SOTA ASR models like wav2vec2-O and wav2vec2-Multi. The results are presented in the DTW column in Table 4. Note, the results for other models in same column are for STD via S-DTW over representations. We observe STD over tokens to give better F1-score.", + "bbox": [ + 169, + 536, + 826, + 703 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "8 CONCLUSION AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 171, + 742, + 495, + 758 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we present an audio sequence tokenizer wav2tok that generates semantically meaningful ordered representations (or tokens) that can be used for efficient retrieval by query sequences. The model learns only from pairs of semantically similar sequences and outperforms state-of-the-art approaches for spoken term detection and query by humming. One may apply more efficient search algorithms such as locality-sensitive hashing and longest common subsequence search on the generated tokens to further speed up the search. The proposed framework can also be extended to image and video retrieval, as they also have spatial ordering. We would like to investigate the domain-specific, i.e., linguistic or musicological, aspects of the extracted tokens. For instance, during retrieval, the matching algorithm assumes all the tokens to be equidistant from each other. One may study or use the metric space of these tokens.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "9 REPRODUCIBILITY", + "text_level": 1, + "bbox": [ + 171, + 102, + 362, + 118 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "The codes are available in https://github.com/madhavlab/wav2tok. The experiments are performed using standard datasets.", + "bbox": [ + 171, + 132, + 825, + 162 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 181, + 287, + 196 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xavier Anguera and Miquel Ferrarons. Memory efficient subsequence dtw for query-by-example spoken term detection. In 2013 IEEE International Conference on Multimedia and Expo (ICME), pp. 1-6, 2013. doi: 10.1109/ICME.2013.6607546.", + "Blaise Agüera Arcas, Beat Gfeller, Ruiqi Guo, Kevin Kilgour, Sanjiv Kumar, James Lyon, Julian Odell, Marvin Ritter, Dominik Roblek, Matthew Sharifi, and Mihajlo Velimirovic. Now playing: Continuous low-power music recognition. CoRR, abs/1711.10958, 2017. URL http://arxiv.org/abs/1711.10958.", + "Alexei Baevski, Steffen Schneider, and Michael Auli. vq-wav2vec: Self-supervised learning of discrete speech representations. CoRR, abs/1910.05453, 2019. URL http://arxiv.org/abs/1910.05453.", + "Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. CoRR, abs/2006.11477, 2020. URL https://arxiv.org/abs/2006.11477.", + "Alexei Baevski, Wei-Ning Hsu, Alexis Conneau, and Michael Auli. Unsupervised speech recognition. CoRR, abs/2105.11084, 2021. URL https://arxiv.org/abs/2105.11084.", + "Sungkyun Chang, Donmoon Lee, Jeongsoo Park, Hyungui Lim, Kyogu Lee, Karam Ko, and Yoon-chang Han. Neural audio fingerprint for high-specific audio retrieval based on contrastive learning. CoRR, abs/2010.11910, 2020. URL https://arxiv.org/abs/2010.11910.", + "Liqun Chen, Zhe Gan, Yu Cheng, Linjie Li, Lawrence Carin, and Jingjing Liu. Graph optimal transport for cross-domain alignment. 37th International Conference on Machine Learning, ICML 2020, PartF16814:1520-1531, 2020.", + "Yu-An Chung and James Glass. Generative pre-training for speech with autoregressive predictive coding. In ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3497-3501, 2020. doi: 10.1109/ICASSP40776.2020.9054438.", + "Yu-An Chung, Hao Tang, and James Glass. Vector-quantized autoregressive predictive coding. arXiv preprint arXiv:2005.08392, 2020.", + "Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, and Michael Auli. Unsupervised cross-lingual representation learning for speech recognition. CoRR, abs/2006.13979, 2020. URL https://arxiv.org/abs/2006.13979.", + "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805, 2018. URL http://arxiv.org/abs/1810.04805.", + "John S Garofolo, Lori F Lamel, William M Fisher, Jonathan G Fiscus, and David S Pallett. Darpa timit acoustic-phonetic continuous speech corpus cd-rom. nist speech disc 1-1.1. NASA STI/Recon technical report n, 93:27403, 1993.", + "Marco Gori, Gabriele Monfardini, and Franco Scarselli. A new model for learning in graph domains. In Proceedings. 2005 IEEE International Joint Conference on Neural Networks, 2005., volume 2, pp. 729-734. IEEE, 2005.", + "Alex Graves, Santiago Fernández, Faustino Gomez, and Jürgen Schmidhuber. Connectionist temporal classification: Labelling unsegmented sequence data with recurrent neural networks. In Proceedings of the 23rd International Conference on Machine Learning, ICML '06, pp. 369-376, New York, NY, USA, 2006. Association for Computing Machinery. ISBN 1595933832. doi: 10.1145/1143844.1143891. URL https://doi.org/10.1145/1143844.1143891." + ], + "bbox": [ + 171, + 205, + 825, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Patrick AV Hall and Geoff R Dowling. Approximate string matching. ACM computing surveys (CSUR), 12(4):381-402, 1980.", + "Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. CoRR, abs/2106.07447, 2021. URL https://arxiv.org/abs/2106.07447.", + "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization, 2017.", + "Naoko Kosugi, Yuichi Nishihara, Tetsuo Sakata, Masashi Yamamuro, and Kazuhiko Kushima. A practical query-by-humming system for a large music database. In Proceedings of the Eighth ACM International Conference on Multimedia, MULTIMEDIA '00, pp. 333-342, New York, NY, USA, 2000. Association for Computing Machinery. ISBN 1581131984. doi: 10.1145/354384.354520. URL https://doi.org/10.1145/354384.354520.", + "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01, pp. 282-289, San Francisco, CA, USA, 2001. Morgan Kaufmann Publishers Inc. ISBN 1558607781.", + "Lin-shan Lee, James Glass, Hung-yi Lee, and Chun-an Chan. Spoken content retrieval—beyond cascading speech recognition with text retrieval. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 23(9):1389-1420, 2015. doi: 10.1109/TASLP.2015.2438543.", + "Yujia Li, Chenjie Gu, Thomas Dullien, Oriol Vinyals, and Pushmeet Kohli. Graph matching networks for learning the similarity of graph structured objects. In International conference on machine learning, pp. 3835-3845. PMLR, 2019.", + "Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts, 2017.", + "Jonathan Mamou, Jia Cui, Xiaodong Cui, Mark J. F. Gales, Brian Kingsbury, Kate Knill, Lidia Mangu, David Nolden, Michael Picheny, Bhavana Ramabhadran, Ralf Schlüter, Abhinav Sethy, and Philip C. Woodland. System combination and score normalization for spoken term detection. In 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 8272-8276, 2013. doi: 10.1109/ICASSP.2013.6639278.", + "Annamaria Mesaros and Tuomas Virtanen. Recognition of phonemes and words in singing. In 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2146-2149, 2010. doi: 10.1109/ICASSP.2010.5495585.", + "Naziba Mostafa and Pascale Fung. A note based query by humming system using convolutional neural network. In INTERSPEECH, pp. 3102-3106, 2017.", + "Naziba Mostafa, Yan Wan, Unnayan Amitabh, and Pascale Fung. A machine learning based music retrieval and recommendation system. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pp. 1970-1977, Porto Roz, Slovenia, May 2016. European Language Resources Association (ELRA). URL https://aclanthology.org/L16-1312.", + "Meinard Müller. Dynamic time warping. Information retrieval for music and motion, pp. 69-84, 2007.", + "Stephen Mussmann and Stefano Ermon. Learning and inference via maximum inner product search. In Maria Florina Balcan and Kilian Q. Weinberger (eds.), Proceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pp. 2587-2596, New York, New York, USA, 20-22 Jun 2016. PMLR. URL https://proceedings.mlr.press/v48/mussmann16.html.", + "Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5206-5210. IEEE, 2015." + ], + "bbox": [ + 171, + 103, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "L. Rabiner and B. Juang. An introduction to hidden markov models. IEEE ASSP Magazine, 3(1): 4-16, 1986. doi: 10.1109/MASSP.1986.1165342.", + "Dhananjay Ram, Afsaneh Asaei, and Hervé Bourlard. Sparse subspace modeling for query by example spoken term detection. IEEE ACM Trans. Audio Speech Lang. Process., 26(6):1126-1139, 2018. URL https://doi.org/10.1109/TASLP.2018.2815780.", + "Shivangi Ranjan and Vipul Arora. A bioinformatic method of semi-global alignment for query-by-humming. In 2020 IEEE 4th Conference on Information Communication Technology (CICT), pp. 1-5, 2020. doi: 10.1109/CICT51604.2020.9312085.", + "Luis J. Rodriguez-Fuentes, Amparo Varona, Mikel Penagarikano, Germán Bordel, and Mireia Diez. High-performance query-by-example spoken term detection on the sws 2013 evaluation. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7819-7823, 2014. doi: 10.1109/ICASSP.2014.6855122.", + "Justin Salamon and Emilia Gómez. Melody extraction from polyphonic music signals using pitch contour characteristics. IEEE transactions on audio, speech, and language processing, 20(6): 1759-1770, 2012.", + "Steffen Schneider, Alexei Baevski, Ronan Collobert, and Michael Auli. wav2vec: Unsupervised pre-training for speech recognition. CoRR, abs/1904.05862, 2019. URL http://arxiv.org/abs/1904.05862.", + "Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. CoRR, abs/1503.03832, 2015. URL http://arxiv.org/abs/1503.03832.", + "M. Schuster and K.K. Paliwal. Bidirectional recurrent neural networks. IEEE Transactions on Signal Processing, 45(11):2673-2681, 1997. doi: 10.1109/78.650093.", + "Erdem Unal, Elaine Chew, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. Challenging uncertainty in query by humming systems: A fingerprinting approach. IEEE Transactions on Audio, Speech, and Language Processing, 16(2):359-371, 2008. doi: 10.1109/TASL.2007.912373.", + "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. CoRR, abs/1711.00937, 2017. URL http://arxiv.org/abs/1711.00937.", + "Aäron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. CoRR, abs/1807.03748, 2018. URL http://arxiv.org/abs/1807.03748.", + "Ting Yao, Yingwei Pan, Yehao Li, and Tao Mei. Exploring visual relationship for image captioning. In Proceedings of the European conference on computer vision (ECCV), pp. 684-699, 2018.", + "Penghang Yin, Jiancheng Lyu, Shuai Zhang, Stanley J. Osher, Yingyong Qi, and Jack Xin. Understanding straight-through estimator in training activation quantized neural nets. CoRR, abs/1903.05662, 2019. URL http://arxiv.org/abs/1903.05662.", + "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes, 2020.", + "Yaodong Zhang and James R. Glass. Unsupervised spoken keyword spotting via segmental dtw on gaussian posteriograms. In 2009 IEEE Workshop on Automatic Speech Recognition Understanding, pp. 398-403, 2009. doi: 10.1109/ASRU.2009.5372931." + ], + "bbox": [ + 171, + 102, + 825, + 915 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A FURTHER STUDIES", + "text_level": 1, + "bbox": [ + 171, + 102, + 370, + 118 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.1 SEQUENCE COMPRESSION", + "text_level": 1, + "bbox": [ + 171, + 137, + 401, + 152 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We present the quality of sequence of tokens $\\mathcal{T}$ and sequence of representations $\\mathcal{Z}$ and their corresponding compressed versions sequences $\\widetilde{\\mathcal{T}}$ and $\\widetilde{\\mathcal{Z}}$ generated by the audio tokenizers in Table 5.", + "bbox": [ + 169, + 165, + 823, + 209 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "wav2tok outperformed the baselines and generated the best quality of sequences $\\mathcal{T}$ , $\\mathcal{Z}$ , $\\widetilde{\\mathcal{T}}$ and $\\widetilde{\\mathcal{Z}}$ . Sequence compression brings an order of magnitude drop in search time for all the audio tokenizers with a trade-off in search performance. Compression from $\\mathcal{T}$ to $\\widetilde{\\mathcal{T}}$ increases the robustness of the token sequences generated by wav2tok to various augmentations. wav2vec2P learnt better tokens and representations than wav2vec2 because of its pairwise training on similar audio.", + "bbox": [ + 169, + 215, + 826, + 291 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/74b5fecef399669d5c444a83309913fe9428d736f2bec1e6d8fbd0ac317b1a59.jpg", + "table_caption": [ + "Table 5: Compression of Sequences: MRR scores for query by humming, $\\mathrm{K} = {25}$" + ], + "table_footnote": [], + "table_body": "
ModelVTSPSSearch Time
Without Compression
wav2vec2 DTW0.850.840.84
wav2vec2P DTW0.870.850.873.5s
wav2tok DTW0.920.890.93
wav2vec2 ED0.720.690.730.68s
wav2vec2P ED0.750.710.73
wav2tok ED0.90.840.90.32s
With Compression
wav2vec2 DTW0.760.720.740.8s
wav2vec2P DTW0.810.770.79
wav2tok DTW0.880.880.870.6s
wav2vec2 ED0.660.630.640.06s
wav2vec2P ED0.690.650.67
wav2tok ED0.840.840.840.04s
", + "bbox": [ + 303, + 347, + 694, + 563 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.2 VARIATION IN NUMBER OFTOKENS $K$", + "text_level": 1, + "bbox": [ + 171, + 595, + 483, + 609 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The effect of varying the size of alphabet $\\mathbb{A}$ is shown in Table 6. We train wav2vec2, wav2vec2P, and proposed wav2tok with alphabets of size $K\\in \\{15,25,40\\}$ . Out of the three settings for $K$ , $K = 25$ gives the best performance for all models. wav2tok gives best performance for all settings of $K$ .", + "bbox": [ + 169, + 625, + 823, + 679 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.3 ABLATION STUDIES AND SOME VARIATIONS", + "text_level": 1, + "bbox": [ + 171, + 704, + 529, + 718 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We present the full version of Table 2 in table 7. Note wav2tok+NoSim representations are well clustered. wav2tok+Trans representations are also comparable with wav2tok but the tokens are of lesser quality. This is due to model overfitting.", + "bbox": [ + 169, + 732, + 826, + 776 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A.4 QUALITY OF REPRESENTATIONS", + "text_level": 1, + "bbox": [ + 171, + 797, + 442, + 811 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We present the performance of the continuous representations generated by wav2tok and the baselines in Table 8. wav2tok generates the best representations for music outperforming representations generated by the large wav2vec 2.0 models. wav2tok trained on MIR1K generates representations outperforming domain-specific QbH baselines. Note, wav2vec2-O outperforms wav2vec2-Multi as the hums in the dataset were all in english. wav2vec2-O is pre-trained and fine-tuned on English speech only while wav2vec2-Multi is pre-trained multilingually. Hence, wav2vec2-O gave better results.", + "bbox": [ + 169, + 825, + 823, + 922 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/a3bce1137837eb742e38031fddcf922c896dcff0a06182c6d2253ae6fa210f6b.jpg", + "table_caption": [ + "Table 6: Effect of varying $K$ : MRR scores for query by humming" + ], + "table_footnote": [], + "table_body": "
ModelsWithout CompressionWith Compression
VTSPSVTSPS
K=15
wav2vec2 DTW0.850.830.840.70.660.67
wav2vec2P DTW0.870.850.850.820.770.8
wav2tok DTW0.880.870.880.840.80.83
wav2vec2 ED0.790.770.780.580.560.57
wav2vec2P ED0.80.770.790.710.680.7
wav2tok ED0.820.80.810.770.750.76
K=25
wav2vec2 DTW0.850.840.840.760.720.74
wav2vec2P DTW0.870.850.870.810.770.79
wav2tok DTW0.920.890.930.880.880.87
wav2vec2 ED0.720.690.730.660.630.64
wav2vec2P ED0.750.710.730.690.650.67
wav2tok ED0.90.840.90.840.840.84
K=40
wav2vec2 DTW0.840.820.830.720.680.7
wav2vec2P DTW0.860.850.850.810.770.79
wav2tok DTW0.90.880.890.860.830.83
wav2vec2 ED0.710.660.690.60.580.58
wav2vec2P ED0.730.70.730.680.650.67
wav2tok ED0.830.80.820.770.750.76
", + "bbox": [ + 267, + 138, + 730, + 481 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/1a0d7a7a44dd69a8486f476d51f35f7d33ea748afb5d169f71e99903e730e36f.jpg", + "table_caption": [ + "Table 7: Ablation Studies and Some Variations: MRR scores for query by humming" + ], + "table_footnote": [], + "table_body": "
ModelsWithout CompressionWith Compression
VTSPSVTSPS
log-mel DTW0.720.690.670.540.470.43
wav2tok+NoSim DTW0.880.870.870.80.840.83
wav2tok+Cos DTW0.880.870.870.830.810.81
wav2tok+NewInit DTW0.90.840.910.840.850.83
wav2tok+Trans DTW0.840.770.850.80.770.76
wav2tok+MIR1K DTW0.880.840.850.820.740.78
wav2tok DTW0.920.890.930.880.880.87
vq-log-mel ED0.710.60.620.520.480.47
wav2tok+NoSim ED0.850.740.840.730.730.72
wav2tok+Cos ED0.860.840.850.790.760.77
wav2tok+NewInit ED0.830.720.850.770.760.78
wav2tok+Trans ED0.840.770.850.70.660.67
wav2tok+MIR1K ED0.760.660.710.720.640.67
wav2tok ED0.90.840.90.840.840.84
", + "bbox": [ + 245, + 522, + 753, + 763 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.5 TRAINING ON LARGER SPEECH DATASET", + "text_level": 1, + "bbox": [ + 171, + 777, + 503, + 792 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We train wav2tok on 100-hours subset of LibriSpeech (Panayotov et al., 2015) dataset. We evaluate the quality of tokenization of word utterances done by wav2tok on TIMIT (Garofolo et al., 1993) dataset. We use a 2-layer BiLSTM network with 3.6 million parameters as encoder network which takes MFCC feature sequences as input. We perform tokenization with $K = 40$ tokens.", + "bbox": [ + 169, + 805, + 826, + 862 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "wav2tok outperforms wav2vec2-O by a large margin and gives comparable performance to wav2vec2-Multi in terms of MRR score. wav2tok uses a minute number of parameters in comparison to 95 million parameters in wav2vec2-O and 317 million parameters in wav2vec2-Multi. Note, wav2vec2-O and wav2vec2-Multi were pre-trained on large amount of unlabelled speech data and", + "bbox": [ + 169, + 868, + 826, + 925 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/49d2a1a8b1e9cecac3731b13a0d3f44f4bbbbfa7c69ddbdb457a40d2d0e9569b.jpg", + "table_caption": [ + "Table 8: Quality of Representations: MRR scores for query by humming" + ], + "table_footnote": [], + "table_body": "
ModelVTSPS
(Salamon & Gómez, 2012) MIDI ED0.750.640.72
(Mostafa & Fung, 2017) Note DTW0.840.740.8
Triplet DTW0.50.480.5
MIPS DTW0.60.550.58
wav2vec2-O DTW0.910.830.86
wav2vec2-Multi DTW0.880.830.85
wav2tok DTW0.920.90.93
wav2tok+MIR1K DTW0.880.840.85
", + "bbox": [ + 290, + 152, + 707, + 284 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "fine-tuned with transcription to perform tokenization of audio. Moreover, wav2vec2-O was fine-tuned to perform tokenization on TIMIT (Garofolo et al., 1993) dataset. Proposed wav2tok was trained on 100 hours of LibriSpeech dataset only. The tokens learnt by wav2tok on LibriSpeech (Panayotov et al., 2015) dataset generalised well to TIMIT (Garofolo et al., 1993).", + "bbox": [ + 169, + 299, + 826, + 356 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/42d6643dcdcd29aab1f26727cd5304473dd7fdde94d2e2f17b8d791f8d4208ff.jpg", + "table_caption": [ + "Table 9: Quality of Tokenization for speech (MRR Scores)" + ], + "table_footnote": [], + "table_body": "
ModelNormal (T)Compressed (T)
wav2vec2-O ED0.40.4
wav2vec2-Multi ED0.670.67
wav2tok+Libri ED0.640.6
", + "bbox": [ + 299, + 409, + 699, + 473 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B CTC WITHOUT BLANKS", + "text_level": 1, + "bbox": [ + 171, + 503, + 413, + 518 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We present the forward and backward variables used in calculating the gradients of the CTC loss $\\mathcal{L}_{ctc}(\\mathcal{X},\\tilde{\\mathcal{T}}^{\\prime})$ with no blank tokens.", + "bbox": [ + 171, + 535, + 823, + 566 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The forward variable is defined as,", + "bbox": [ + 171, + 571, + 406, + 585 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {t} (s) = \\sum_ {\\pi ; \\mathcal {C} (\\pi_ {1: t}) = \\tilde {\\mathcal {T}} _ {1: s} ^ {\\prime}} \\prod_ {t ^ {\\prime} = 1} ^ {t} l _ {t ^ {\\prime}, \\pi_ {t ^ {\\prime}}} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 595, + 825, + 642 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\pi$ corresponds to all $T$ -length paths over tokens such that $\\mathcal{C}(\\pi) = \\tilde{T}'$ . Here, $\\mathcal{C}$ is a compressor which compresses $\\pi$ a $T$ -length sequence of tokens via de-duplication.", + "bbox": [ + 169, + 652, + 823, + 681 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We initialise as follows,", + "bbox": [ + 171, + 686, + 333, + 700 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {1} (1) = l _ {1, \\tilde {\\tau} _ {1} ^ {\\prime}} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 433, + 710, + 823, + 734 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {1} (s) = 0, \\forall s > 1\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 732, + 558, + 746 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "and recursively calculate $\\alpha_{t}(s)$ as,", + "bbox": [ + 171, + 763, + 401, + 780 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {t} (s) = \\left(\\alpha_ {t - 1} (s) + \\alpha_ {t - 1} (s - 1)\\right) l _ {t, \\tilde {\\mathcal {T}} _ {s} ^ {\\prime}} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 801, + 825, + 820 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We set $\\alpha_{t}(s) = 0, \\forall s < 1$ .", + "bbox": [ + 171, + 832, + 349, + 848 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The backward variable is defined as,", + "bbox": [ + 171, + 854, + 415, + 869 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\beta_ {t} (s) = \\sum_ {\\pi ; \\mathcal {C} (\\pi_ {t: T}) = \\tilde {\\mathcal {T}} _ {s: | \\bar {\\mathcal {T}} ^ {\\prime} |} ^ {\\prime}} \\prod_ {t ^ {\\prime} = t} ^ {T} l _ {t ^ {\\prime}, \\pi_ {t ^ {\\prime}}} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 382, + 877, + 825, + 928 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We initialise as follows,", + "bbox": [ + 171, + 103, + 333, + 117 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\beta_ {T} \\left(\\left| \\tilde {\\mathcal {T}} ^ {\\prime} \\right|\\right) = l _ {T, \\tilde {\\mathcal {T}} _ {| \\tilde {\\mathcal {T}} ^ {\\prime} |} ^ {\\prime}} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 125, + 823, + 152 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\beta_ {T} (s) = 0, \\forall s < | \\tilde {T} ^ {\\prime} |\n$$\n", + "text_format": "latex", + "bbox": [ + 436, + 152, + 581, + 167 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "and recursively calculate $\\beta_t(s)$ as,", + "bbox": [ + 171, + 176, + 398, + 191 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\beta_ {t} (s) = \\left(\\beta_ {t + 1} (s) + \\beta_ {t + 1} (s + 1)\\right) l _ {t, \\tilde {\\mathcal {T}} _ {s} ^ {\\prime}} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 199, + 825, + 219 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We set $\\beta_{t}(s) = 0, \\forall s > |\\tilde{T}^{\\prime}|$ .", + "bbox": [ + 171, + 233, + 366, + 251 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C GUMBEL SOFTMAX BASED VECTOR QUANTIZER", + "text_level": 1, + "bbox": [ + 169, + 270, + 614, + 287 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The Gumbel Softmax based Vector Quantizer (Baevski et al., 2019) quantizes input latent representation $z_{t} \\in R^{m}$ with $C$ codebooks containing $K$ quantizers $e \\in R^{K \\times \\frac{m}{C}}$ each. For our experiments, we set $C = 1$ and $K \\in \\{15, 25, 40\\}$ . Given $\\mathbf{z}_{t}$ , one of the $K$ quantizers from each of the $C$ codebooks are chosen resulting in vectors $e_{1}, \\dots, e_{C}$ . The codebook vectors are then concatenated and linearly transformed from $R^{m}$ to $R^{d}$ to output a discrete representation $q_{t} \\in R^{d}$ .", + "bbox": [ + 169, + 301, + 823, + 372 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "$\\mathbf{z}_t$ is mapped to $\\mathbf{l} \\in R^{C \\times K}$ logits to give probability scores for the choice of codeword. The probability $p_{c,k}$ of choosing $k^{th}$ quantizer in $c^{th}$ codebook is given as,", + "bbox": [ + 169, + 378, + 823, + 407 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\np _ {c, k} = \\frac {\\exp \\left(l _ {c , k} + n _ {k}\\right) / \\tau}{\\sum_ {i = 1} ^ {K} \\exp \\left(l _ {c , i} + n _ {i}\\right) / \\tau} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 424, + 825, + 460 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $\\tau$ is a non-negative temperature, $n = -\\log (-\\log (u))$ and $u$ are samples from the uniform distribution $\\mathbf{Unif}(0,1)$ .", + "bbox": [ + 169, + 463, + 823, + 493 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "During forward pass, the codeword is chosen as $\\kappa = \\arg \\max_{j} p_{c,j}$ . During backward pass, the loss is calculated over the gumble softmax distribution $p$ . We use the straight-through gradient estimator (Yin et al., 2019) to estimate the gradient.", + "bbox": [ + 169, + 500, + 823, + 542 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Codebook Diversity Loss $\\mathcal{L}_d$ . This loss promotes equal use of all the entries in each of the $C$ codebooks. Minimization of this loss maximizes the entropy of the averaged softmax distribution $\\tilde{p}$ over the $K$ entries for each codebook $\\tilde{p}_c$ across a batch of utterances.", + "bbox": [ + 169, + 547, + 826, + 590 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {d} = \\frac {1}{C K} \\sum_ {c = 1} ^ {C} \\sum_ {k = 1} ^ {K} \\tilde {p} _ {c, k} \\log \\tilde {p} _ {c, k} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 597, + 825, + 638 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2023", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_model.json b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4ebe7c9d67010c70827fee1dfbdc73c03b2e4b77 --- /dev/null +++ b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_model.json @@ -0,0 +1,2597 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.148 + ], + "angle": 0, + "content": "WAV2TOK: DEEP SEQUENCETOKENIZER FOR AUDIO RETRIEVAL" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.171, + 0.403, + 0.186 + ], + "angle": 0, + "content": "Adhiraj Banerjee, Vipul Arora" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.186, + 0.432, + 0.2 + ], + "angle": 0, + "content": "Department of Electrical Engineering" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.2, + 0.478, + 0.214 + ], + "angle": 0, + "content": "Indian Institute of Technology Kanpur, India" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.214, + 0.457, + 0.228 + ], + "angle": 0, + "content": "{adhiraj,vipular}@iitk.ac.in" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.264, + 0.547, + 0.279 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.293, + 0.768, + 0.461 + ], + "angle": 0, + "content": "Search over audio sequences is a fundamental problem. In this paper, we propose a method to extract concise discrete representations for audio that can be used for efficient retrieval. Our motivation comes from orthography which represents speech of a given language in a concise and distinct discrete form. The proposed method, wav2tok, learns such representations for any kind of audio, speech or non-speech, from pairs of similar audio. wav2tok compresses the query and target sequences into shorter sequences of tokens that are faster to match. The learning method makes use of CTC loss and expectation-maximization algorithm, which are generally used for supervised automatic speech recognition and for learning discrete latent variables, respectively. Experiments show the consistent performance of wav2tok across two audio retrieval tasks: music search (query by humming) and speech search via audio query, outperforming state-of-the-art baselines." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.484, + 0.338, + 0.498 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.514, + 0.827, + 0.64 + ], + "angle": 0, + "content": "Sequence Retrieval aims at retrieving sequences similar to a query sequence, with the constraint that an ordered alignment exists between the query and the target sequence. In this paper, we address the following problem: Can we extract discrete tokens from any continuous signal for the purpose of retrieval of similar signals? This problem has deep connections with tasks such as child language acquisition, music cognition and learning languages without written forms. Some direct applications of the proposed task include speech search, where the order of constituent units, such as phonemes, syllables or words, remains same; and music search – query by humming or query by example – where the order of constituent units, such as relative notes or phrases, remains same. Apart from audio, the problem extends to tasks such as handwritten word search and gesture search." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.825, + 0.703 + ], + "angle": 0, + "content": "One can define similarity metrics over sequences using methods based on Dynamic Time Warping (DTW) (Müller, 2007). These methods are inefficient if the sequences are continuous valued and have high sampling rates. Moreover, they depend on matching hand-made features, which are ineffective in the face of high variability of query sequences." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.709, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Problems such as spoken term detection involve detection of a query utterance in a long speech audio. The search space is huge, and performing DTW based search of query takes long time (Rodriguez-Fuentes et al., 2014). A more efficient way of sequence retrieval is by mapping them to sequences of discrete tokens. Automatic speech recognition (ASR) can be employed for this purpose (Mamou et al., 2013). However, ASR training requires knowledge of basic units of transcription. The popularly used units are phonemes and graphemes. This method thus becomes language dependent. Non-linguistic sounds, such as cough and sneeze, could be mapped to certain tokens defined for them. This approach could not be used when precise tokens are not defined, e.g., music search." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In query by humming based music search, audio is mapped to discrete melody-related tokens, such as notes, and these token sequence are matched for search (Unal et al., 2008). However, several music traditions do not have precise transcription systems. There, one can tell if two pieces, or motifs, are similar but cannot precisely transcribe them to tokens. The embellishments used in music could be too dynamic to be transcribed precisely. Moreover, when a musically untrained user sings a query, s/he cannot hit the right notes matching the target song. So the matching could rely on several factors other than notes, such as phonemes of lyrics (Mesaros & Virtanen, 2010), onset times" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.048 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "(rhythm) (Kosugi et al., 2000), and note transitions (Ranjan & Arora, 2020). Hence, the tokens to be used may not be derived from notes alone." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.825, + 0.239 + ], + "angle": 0, + "content": "In this way, each tokenizer - for speech, music or other signals, in general - uses domain-specific hand-made tokens defined by a domain expert. In this paper, we propose a tokenizer to map audio sequences to sequences of discrete tokens with an aim of retrieval. The mapping is learned only from pairs of similar audio sequences. The tokens are not defined manually but correspond to distinct semantic units learned from pairs of similar audio sequences. The method is general and can be applied to signals other than audio. In this paper, we apply the proposed method to speech and music audio search, for the problems of spoken term detection and query by humming, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.827, + 0.302 + ], + "angle": 0, + "content": "The proposed method, named wav2tok, encodes audio via a BiLSTM (Schuster & Paliwal, 1997) network. The encoder-generated representations are then mapped to discrete tokens via a \\(K\\)-means vector quantizer network. Each discrete token corresponds to a discrete representation in the vector quantizer's codebook which is initialized and updated via offline \\(K\\)-means clustering only." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.307, + 0.825, + 0.433 + ], + "angle": 0, + "content": "wav2tok is trained with pairs of similar audio sequences in a self-supervised fashion without any transcription using a novel training algorithm. For each pair, we average the encoder-generated representations, which map to the same token, by the \\( K \\)-means vector quantizer network to generate a prototype for that token. We then perform a contrastive learning task to increase the similarity between the generated prototype for a particular token and the quantizer codebook discrete representation corresponding to the same token. We simultaneously minimize the edit distance between the token sequences generated from each sequence in the pair via Connectionist temporal classification (CTC) (Graves et al., 2006) framework to constrain both sequences to get mapped to the same token sequence." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.439, + 0.827, + 0.51 + ], + "angle": 0, + "content": "We compare wav2tok to state-of-the-art (SOTA) methods for discrete representation learning, such as wav2vec 2.0, and SOTA ASR models fine-tuned to perform phonetic tokenization. We evaluate the generalization capability of the tokens generated by the models on search experiments, namely, query-by-humming and spoken term detection. wav2tok outperforms the baselines in performance and uses much lesser trainable parameters, ensuring faster inference and deployment." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.531, + 0.347, + 0.545 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.563, + 0.825, + 0.675 + ], + "angle": 0, + "content": "Sequence Labelling. With expert-defined tokens, various methods are popularly used for mapping sequences to tokens. In conventional methods, Hidden Markov Models (Rabiner & Juang, 1986) and Conditional Random Fields (Lafferty et al., 2001) have been popularly used for sequence labeling. These methods involve a significant amount of domain knowledge and many assumptions to make tractable models, which are avoided by End-to-End learning models such as Recurrent Neural Networks (RNNs) using Connectionist Temporal Classification framework (Graves et al., 2006). Sequence labeling can be used for sequence retrieval by converting the sequences to tokens, which are easy to search over. But this approach inevitably depends upon expert-defined tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.825, + 0.821 + ], + "angle": 0, + "content": "Unsupervised Speech Representation Learning. Automatic Speech Recognition systems are pretrained on large amounts of untranscribed speech data to generate SOTA continuous representations which encode the slowly varying phoneme features in raw speech. The representations are then mapped to phoneme tokens via Connectionist Temporal Classification (CTC) (Graves et al., 2006) fine-tuning on a small amount of transcribed audio. Works like Contrastive Predictive Coding (CPC) (van den Oord et al., 2018), Autoregressive Predictive Coding (APC) (Chung & Glass, 2020), and wav2vec (Schneider et al., 2019) generate continuous representations with powerful autoregressive models pre-trained to predict future time-step representations. Further works started discretizing the continuous representations with vq-VAE (van den Oord et al., 2017) to generate discrete representations for speech." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Works like vq-wav2vec (Baevski et al., 2019) and vq-APC (Chung et al., 2020) discretize the representations and perform the same prediction tasks as in wav2vec (Schneider et al., 2019) and APC (Chung & Glass, 2020) respectively but over discrete representations. In vq-wav2vec, the discrete representations are generated with either a K-Means Vector Quantizer (Baevski et al., 2019) or Gumbel-Softmax based Vector Quantizer (Baevski et al., 2019). The learned discrete representations are used to pre-train a BERT (Devlin et al., 2018) to generate stronger continuous representations much like BERT pre-training in Natural Language Processing. wav2vec 2.0 (Baevski et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.231 + ], + "angle": 0, + "content": "2020) uses a Gumble Softmax based Vector Quantizer (Baevski et al., 2019) to generate discrete representations. The training involves masking of spans of time steps and then predicting the correct discrete representations at each masked time step with transformer representation at that time step. In these methods, raw audio is discretized in a latent space to model all possible acoustic units than phonetic or sub-phonetic units. The tokens generated by the vector quantizers aren't constrained to be interpretable and are initialized in large numbers (\\(\\sim\\) 102.4K codes). After pre-training, a subset of these codes or tokens are chosen more often by the vector quantizers and are considered to represent acoustic units. CTC-based fine-tuning with transcription groups these discrete acoustic units to \\(K\\) distinct phonemes or linguistic units as present in the transcriptions." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.236, + 0.828, + 0.336 + ], + "angle": 0, + "content": "Works like HuBERT and wav2vec-Unsupervised learn phonemic units directly. HuBERT (Hsu et al., 2021) pre-trains a transformer network via BERT-like masked prediction task over noisy targets generated with a clustering model trained offline. The targets may be generated with an ensemble of \\( K \\)-means clusterers with \\( K = \\{100,500\\} \\) clusters on MFCC features or transformer representations. wav2vec-Unsupervised (Baevski et al., 2021) learns phonetic tokens adversially from phonemized unlabelled text data. A discriminator identifies if the phoneme sequence generated by model is real or fake based on phonemized unlabelled text." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.826, + 0.385 + ], + "angle": 0, + "content": "All aforementioned approaches use powerful auto-regressive models pre-trained on large amounts of unlabeled audio and fine-tuned on transcribed audio. Our learning approach can learn semantic tokens with small models while training pairwise on small amount of unlabeled audio data." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.389, + 0.827, + 0.571 + ], + "angle": 0, + "content": "Audio Representations for Retrieval. Now Playing (Arcas et al., 2017) and (Chang et al., 2020) use a Neural Network Fingerprinter (NNFP) module outputting representations which are efficient for search in query-by-example tasks where the difference between query and the actual song is pretty minute in comparison to humming where only the melody is sung. Now Playing (Arcas et al., 2017) trains representations by optimizing the triplet loss (Schroff et al., 2015) and (Chang et al., 2020) trains representations by simulating the Maximum Inner Product Search (MIPS) on minibatches of representations. For Query by Humming task, (Mostafa et al., 2016) and (Mostafa & Fung, 2017) use deep learning models like DNNs and CNNs to generate representations which they map to MIDI-numbers or note tokens. Such works require note-transcribed data to train the models. For Spoken Term Detection task, approaches like (Zhang & Glass, 2009), (Rodriguez-Fuentes et al., 2014), (Lee et al., 2015), (Ram et al., 2018) convert audio to sequences of feature vectors and apply different variations of DTW based template matching to detect query in long utterances of speech which is time-consuming." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.577, + 0.827, + 0.678 + ], + "angle": 0, + "content": "Cross Domain Alignment. Given a pair of semantically similar inputs for training, tasks such as visual question answering (text and image) and machine translation (text) involve learning an alignment. The alignment here is not ordered and the inputs may be from different modalities. Attention models have been used to find alignment between output entities and input regions (Yao et al., 2018). (Chen et al., 2020) use Gromov-Wasserstein distance between output and input entities to match them. However, there is no notion of tokens there, rather the salient entities in the input are represented as vectors in a graph." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.681, + 0.827, + 0.768 + ], + "angle": 0, + "content": "Graph Matching. Graph Neural Networks (Gori et al., 2005) are used to generate embeddings for graphs. These embeddings are used to perform graph matching to find similarity of structured graphs (Li et al., 2019). However, they perform the matching jointly on the pair of inputs, rather than representing each input independently. This makes them unsuitable for the search problem at hand due to large run-time complexity. The distance metrics used for graph matching are based on edit distance (Li et al., 2019) and Wasserstein distance (Chen et al., 2020)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.797, + 0.398, + 0.813 + ], + "angle": 0, + "content": "3 PROBLEM STATEMENT" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.833, + 0.826, + 0.929 + ], + "angle": 0, + "content": "We aim to map \\(\\mathcal{X}\\), a sequence of vectors, to \\(\\tilde{\\mathcal{T}}\\), a sequence of discrete tokens from a finite alphabet \\(\\mathbb{A}\\), such that the similarity of sequences is preserved in the sense of edit distance. The length of sequence \\(\\tilde{\\mathcal{T}}\\) may be less than or equal to that of the sequence \\(\\mathcal{X}\\). In other words, given a pair of similar sequences \\((\\mathcal{X}_i,\\mathcal{X}_j)\\) and sequence \\(\\mathcal{X}_k\\) which is not similar to either sequences in the pair, we want to map them to token sequences such that \\(ED(\\tilde{\\mathcal{T}}_i,\\tilde{\\mathcal{T}}_j)\\) should be less than \\(\\min \\{ED(\\tilde{\\mathcal{T}}_i,\\tilde{\\mathcal{T}}_k),ED(\\tilde{\\mathcal{T}}_j,\\tilde{\\mathcal{T}}_k)\\}\\), where \\(ED(\\cdot ,\\cdot)\\) is the edit distance between two sequences." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.961 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.411, + 0.119 + ], + "angle": 0, + "content": "4 MODEL ARCHITECTURE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.133, + 0.825, + 0.205 + ], + "angle": 0, + "content": "wav2tok is comprised of an encoder \\( f: \\mathbb{X} \\mapsto \\mathbb{Z} \\) which takes as input a temporal sequence of audio features \\( \\mathcal{X} = [\\mathbf{x}_t \\in R^n; t \\in [T]] \\) of length \\( T \\), where \\( \\mathbf{x}_t \\) is the feature vector at time step \\( t \\), and outputs a sequence of L-2 normalised representations \\( \\mathcal{Z} = [\\mathbf{z}_t = f(\\mathbf{x}_t) \\in R^m; t \\in [T]] \\). The encoder is implemented as a 2-layer BiLSTM followed by an L-2 normalization layer. BiLSTMs summarise information in both directions and encode surrounding context." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.21, + 0.827, + 0.323 + ], + "angle": 0, + "content": "A \\(K\\)-means vector quantizer network \\(g: \\mathbb{Z} \\mapsto \\mathbb{T}\\) then labels sequence \\(\\mathcal{Z}\\) at each time-step with tokens belonging to a finite \\(K\\)-element alphabet \\(\\mathbb{A} = [K]\\) and generates sequence of tokens \\(\\mathcal{T} = [\\tau_t = g(\\mathbf{z}_t) \\in \\mathbb{A}; t \\in [T]]\\). Network \\(g\\) vector quantizes input \\(\\mathbf{z}_t\\) with a codebook \\(E = \\{\\mathbf{e}_k \\in \\mathbb{Z}; k \\in [K]\\}\\) comprised of \\(|\\mathbb{A}| = K\\) discrete representations which are cluster centroids in representation space \\(\\mathbb{Z}\\) and outputs token \\(\\tau_t = \\arg \\max_k \\mathbf{z}_t \\cdot \\mathbf{e}_k\\). Note, here the dot product gives a cosine similarity score since both the vectors are L-2 normalized, as a result, \\(\\mathbf{e}_k \\in E\\) closest to \\(\\mathbf{z}_t\\) is chosen as its discrete representation and index \\(k\\) as its token \\(\\tau_t\\). The \\(K\\) discrete representations in network \\(g\\) are trainable parameters." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.33, + 0.826, + 0.392 + ], + "angle": 0, + "content": "A compressor \\(\\mathcal{C}\\) compresses sequence of tokens \\(\\mathcal{T}\\) to sequence \\(\\tilde{\\mathcal{T}}\\) of length \\(\\tilde{T} \\leq T\\) by deleting all consecutive repetitions of tokens. \\(\\mathcal{C}\\) also generates the corresponding compressed sequence \\(\\tilde{\\mathcal{Z}}\\) of length \\(\\tilde{T}\\) by averaging representations \\(\\mathbf{z}_t \\in \\mathcal{Z}\\) over the consecutive tokens and L-2 normalising the averaged representation. Figure 1a presents an illustration demonstrating our model architecture." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.41, + 0.295, + 0.426 + ], + "angle": 0, + "content": "5 TRAINING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.441, + 0.825, + 0.498 + ], + "angle": 0, + "content": "wav2tok is trained on pairs of sequences of audio features \\((\\mathcal{X},\\mathcal{X}^{\\prime})\\) where the raw audio corresponding to \\(\\mathcal{X}^{\\prime}\\) is an augmented replica of that corresponding to \\(\\mathcal{X}\\). We apply either pitch shift or time stretch or both augmentations to raw audio to generate its augmented replica. \\(\\mathcal{X}\\) and \\(\\mathcal{X}^{\\prime}\\) may differ in sources as well, i.e. a different person may sing the recording corresponding to \\(\\mathcal{X}^{\\prime}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.504, + 0.825, + 0.59 + ], + "angle": 0, + "content": "The discrete representations in quantizer \\( g \\) codebook \\( E \\) are initialized as \\( K \\) centroids obtained via offline \\( K \\)-means clustering over freshly initialized encoder-generated representations. Given \\( (\\mathcal{X},\\mathcal{X}^{\\prime}) \\), encoder \\( f \\) generates sequence of representations \\( \\mathcal{Z} \\) from input \\( \\mathcal{X} \\) and \\( \\mathcal{Z}^{\\prime} \\) from \\( \\mathcal{X}^{\\prime} \\). Quantizer \\( g \\) generates a sequence of tokens \\( \\mathcal{T} \\) from input \\( \\mathcal{Z} \\) and \\( \\mathcal{T}^{\\prime} \\) from \\( \\mathcal{Z}^{\\prime} \\) via cosine similarity-based comparison with codebook vectors \\( e\\in E \\) initialized via offline clustering over freshly initialized representation space \\( Z \\). Compressor \\( \\mathcal{C} \\) compresses sequence of tokens \\( \\mathcal{T} \\) to sequence \\( \\tilde{\\mathcal{T}} \\) and \\( \\mathcal{T}^{\\prime} \\) to \\( \\tilde{\\mathcal{T}}^{\\prime} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.596, + 0.825, + 0.67 + ], + "angle": 0, + "content": "We average all encoder-generated representations in pair \\((\\mathcal{Z},\\mathcal{Z}^{\\prime})\\) which map to the same token, say \\(\\tau\\), to generate a prototype for \\(\\tau\\). We then perform a contrastive task where we compare the prototype with each of the \\(K\\) discrete representations in codebook \\(E\\) and increase its similarity with the discrete representation corresponding to \\(\\tau\\). We also increase the likelihood that wav2tok maps pair \\((\\mathcal{X},\\mathcal{X}^{\\prime})\\) to the same token sequence via CTC framework to minimize \\(ED(\\tilde{\\mathcal{T}},\\tilde{\\mathcal{T}}^{\\prime})\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.675, + 0.381, + 0.689 + ], + "angle": 0, + "content": "Our loss function is defined as," + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.694, + 0.825, + 0.712 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {m} \\left(\\mathcal {X}, \\mathcal {X} ^ {\\prime}\\right) + \\alpha \\mathcal {L} _ {c t c} \\left(\\mathcal {X}, \\tilde {\\mathcal {T}} ^ {\\prime}\\right) + \\beta \\mathcal {L} _ {c t c} \\left(\\mathcal {X} ^ {\\prime}, \\tilde {\\mathcal {T}}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.716, + 0.825, + 0.773 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_m\\) is loss defined for contrastive task, \\(\\mathcal{L}_{ctc}\\) is the loss maximising aforementioned likelihood, and \\(\\alpha, \\beta\\) are positive constants. We optimize this loss function in a manner similar to the Expectation Maximization algorithm. The clustering is used as the E-step to update the discrete representations in quantizer \\(g\\) codebook, while gradient descent over \\(\\mathcal{L}\\) acts as the M-step." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.78, + 0.827, + 0.838 + ], + "angle": 0, + "content": "Contrastive Loss. Let the set of unique tokens occurring in pair \\((\\tilde{\\mathcal{T}},\\tilde{\\mathcal{T}}^{\\prime})\\) be \\(\\mathcal{U}\\subset [K],|\\mathcal{U}| = K^{\\prime}\\leq K\\). We generate a list of token prototypes \\(\\mathcal{P} = \\{\\mathbf{p}_{\\tau};\\tau \\in \\mathcal{U}\\}\\) where \\(\\mathbf{p}_{\\tau}\\) is L-2 normalised mean of representations in \\(\\{\\mathbf{z}\\in \\{\\mathcal{Z};\\mathcal{Z}^{\\prime}\\} :g(\\mathbf{z}) = \\tau \\}\\). Figure 1b presents an illustration demonstrating how we generate list of token prototypes \\(\\mathcal{P}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.844, + 0.825, + 0.9 + ], + "angle": 0, + "content": "Given \\(\\mathbf{p}_{\\tau} \\in \\mathcal{P}\\), we perform a contrastive task to increase its similarity with discrete representation \\(\\mathbf{e}_{\\tau} \\in E\\). To compare \\(\\mathbf{p}_{\\tau}\\) with the codebook, metrics such as cosine similarity and Euclidean distance could be used. However, we find that using the following parameterized score for this purpose gives better performance," + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.905, + 0.825, + 0.922 + ], + "angle": 0, + "content": "\\[\n\\mathbf {s} _ {\\tau , k} = \\sigma \\left(W \\cdot \\left(\\mathbf {p} _ {\\tau} - s g \\left(\\mathbf {e} _ {k}\\right)\\right)\\right) \\in [ 0, 1 ] \\tag {2}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.117, + 0.359, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.218, + 0.336, + 0.232 + ], + "angle": 0, + "content": "(a) Model Architecture" + }, + { + "type": "image", + "bbox": [ + 0.374, + 0.117, + 0.583, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.218, + 0.581, + 0.233 + ], + "angle": 0, + "content": "(b) Generation of Prototype list \\(\\mathcal{P}\\)" + }, + { + "type": "image", + "bbox": [ + 0.604, + 0.101, + 0.811, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.615, + 0.218, + 0.805, + 0.232 + ], + "angle": 0, + "content": "(c) Likelihood Loss Calculation" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.243, + 0.825, + 0.272 + ], + "angle": 0, + "content": "Figure 1: \\(\\mathcal{X}'\\) is an augmented replica of \\(\\mathcal{X}\\). 1a illustrates our model architecture. 1b demonstrates the generation of \\(\\mathcal{P}\\) required for calculation of \\(\\mathcal{L}_m\\). 1c demonstrates our likelihood loss calculation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.297, + 0.825, + 0.344 + ], + "angle": 0, + "content": "where \\( sg(x) \\equiv x, \\frac{d}{dx} sg(x) \\equiv 0 \\) is the stop-gradient operator, \\( \\sigma(\\cdot) \\) is sigmoid function generating a score in the range [0, 1] and \\( W \\in R^{1 \\times d} \\) is a parameter matrix. \\( \\mathbf{s}_{\\tau,k} \\) acts as a parameterized similarity score between \\( \\mathbf{p}_{\\tau} \\) and discrete representation \\( \\mathbf{e}_k \\in E \\). We define our contrastive loss \\( \\mathcal{L}_m \\) as," + }, + { + "type": "equation", + "bbox": [ + 0.357, + 0.35, + 0.826, + 0.387 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {m} \\left(\\mathcal {X}, \\mathcal {X} ^ {\\prime}\\right) = - \\sum_ {\\tau \\in \\mathcal {U}} \\log \\frac {\\exp \\left(\\mathbf {s} _ {\\tau , \\tau}\\right)}{\\sum_ {k = 1} ^ {K} \\exp \\left(\\mathbf {s} _ {\\tau , k}\\right)} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.403, + 0.825, + 0.485 + ], + "angle": 0, + "content": "Likelihood Loss. We maximize the likelihood that sequence \\(\\mathcal{X}\\) maps to token sequence \\(\\tilde{\\mathcal{T}}'\\), which corresponds to \\(\\mathcal{X}'\\), via the CTC framework (see Figure 1c). It puts a constraint to generate the same token sequence for \\(\\mathcal{X}\\) and \\(\\mathcal{X}'\\). We calculate the probability of \\(\\mathbf{x}_t\\) mapping to token \\(\\tau_t = k\\) as \\(l_{t,k} = \\frac{\\exp(f(\\mathbf{x}_t)\\cdot sg(e_k))}{\\sum_{i=1}^K \\exp(f(\\mathbf{x}_t)\\cdot sg(e_i))}\\). The likelihood \\(P(\\tilde{\\mathcal{T}}'|\\mathcal{X})\\) is then calculated as a sum of probabilities of all \\(T\\)-length paths \\(\\pi\\) over tokens \\(\\tau \\in \\mathbb{A}\\) such that \\(\\mathcal{C}(\\pi) = \\tilde{\\mathcal{T}}'\\). The loss is defined as," + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.492, + 0.826, + 0.527 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {c t c} (\\mathcal {X}, \\tilde {\\mathcal {T}} ^ {\\prime}) = - \\log \\sum_ {\\pi \\in C ^ {- 1} (\\tilde {\\mathcal {T}} ^ {\\prime})} P (\\pi | \\mathcal {X}) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.825, + 0.587 + ], + "angle": 0, + "content": "where the path probabilities are calculated over token probability scores in sequence \\(\\mathbf{l} = \\{l_t \\in R^K; t \\in [T]\\}\\) via CTC forward-backward framework (Graves et al., 2006) without the use of blanks. We present the CTC forward and backward variables for our use case in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.592, + 0.825, + 0.676 + ], + "angle": 0, + "content": "Clustering. We perform offline \\( K \\)-means clustering on a subset of encoder representations during initialization of our network and at regular intervals during training to set the discrete representations in codebook \\( E \\) of network \\( g \\). Initializing the clusters in this way prevents wav2tok from converging to a local optimum during the matching task, as is the case we found with random initialization of centroids. The intermittent clustering during training iteratively refines the discrete representations and prevents codebook collapse. We use the sklearn library to perform \\( K \\)-means clustering." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.682, + 0.825, + 0.711 + ], + "angle": 0, + "content": "We train wav2tok using the ADAM (Kingma & Ba, 2017) optimizer and a linear learning schedule with a learning rate of 0.001 and \\(8\\%\\) of the training steps as warm-up steps." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.731, + 0.328, + 0.745 + ], + "angle": 0, + "content": "6 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.762, + 0.825, + 0.805 + ], + "angle": 0, + "content": "We test the performance of tokens and encoder-generated continuous representations of wav2tok in audio retrieval. We perform Query by Humming (QbH) and Spoken Term Detection experiments to evaluate the performance of wav2tok in comparison to the baselines." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.821, + 0.557, + 0.836 + ], + "angle": 0, + "content": "6.1 MUSIC MELODY SEARCH: QUERY BY HUMMING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.847, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Task. Given a test query audio, we are to find the audio with the most similar melody in the search audio database." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Experiment Details. We use the MIR-QbSH dataset which is composed of 4431 humming audio recordings of \\(30s\\) duration corresponding to 48 songs. Each song is sung by several individuals. All individuals sing the same part of the song. The recordings have variations in the environments" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.259 + ], + "angle": 0, + "content": "they were recorded in, tonal qualities, voices, pitch, and time stretch. We train our models on hums of 40 songs in MIR-QbSH dataset and evaluate search performance on hums of the remaining 8 songs. The training dataset has 1970 hums for training and 676 for validation. The test dataset has 225 hums as a search database and 659 query hums. We evaluate the performance of our models in identifying which song a given query corresponds to via comparison with all sequences in the search database. Each model converts all the audio in our test dataset to sequences of tokens or representations. Each query sequence is compared to all sequences in the search database via Edit Distance (ED) (if tokens) or DTW (if representations). The song id of the most similar sequence in the search database is then selected as query song id. We calculate Mean Reciprocal Ranking (MRR) score with ground-truth song id of the queries for evaluation. The Reciprocal Ranking (RR) score is given as \\( \\frac{1}{r} \\) if the \\( r^{th} \\) most similar sequence in search database has same song id as query." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.264, + 0.825, + 0.309 + ], + "angle": 0, + "content": "All the audio recordings are converted to Short Term Fourier Transform (STFT) matrices before being passed as inputs to our models. The STFT matrices are computed with 513 frequency bins, a window length of 1024 samples (summarising 128 ms of audio), and hop length of 512 samples." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.326, + 0.406, + 0.34 + ], + "angle": 0, + "content": "6.2 SPOKEN TERM DETECTION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.354, + 0.713, + 0.37 + ], + "angle": 0, + "content": "Task. Given a test query audio, we are to detect its occurrence in a long utterance." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.374, + 0.827, + 0.529 + ], + "angle": 0, + "content": "Experiment Details. We use the TIMIT dataset which is composed of 6300 utterances of English speech with time-aligned word transcriptions. We choose 59 most-occurring words with more than 2 characters as keywords and all others as non-keywords. We use utterances of random sentences formed with 6 words sampled from a subset of 25 keywords for training and evaluation on STD experiments for the detection of the remaining 34 keywords. The test dataset is composed of 337 utterances corresponding to the 34 queries and 100 long utterances per query, with half containing a single occurrence of query amongst non-keywords and the other half containing only non-keywords. Given a query and a long utterance, we convert both to sequences of tokens using each audio tokenizer. We perform approximate string matching (Hall & Dowling, 1980) for detection of query in the utterance. The STFT matrix inputs to the models are computed with 185 frequency bins, a window length of 368 samples (summarising \\(23\\mathrm{ms}\\) of audio), and a hop length of 92 samples." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.548, + 0.295, + 0.562 + ], + "angle": 0, + "content": "6.3 BASELINES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.574, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Triplet. We train encoder \\( f: \\mathbb{X} \\mapsto \\mathbb{Z} \\) to generate L-2 normalized continuous representations for retrieval. Encoder \\( f \\) is trained via optimizing the triplet Loss (Schroff et al., 2015) as done in training an NNFP in Now Playing (Arcas et al., 2017). Given pair of similar sequences \\( (\\mathcal{X}, \\mathcal{X}') \\), encoder \\( f \\) generates sequences \\( \\mathcal{Z} \\) and \\( \\mathcal{Z}' \\). We form a mini-batch of size \\( N \\) of triplets \\( \\{\\mathbf{z}, \\mathbf{z}^+, \\mathbf{z}^-\\} \\) where representation \\( \\mathbf{z} \\) is sampled from sequence \\( \\mathcal{Z} \\), \\( \\mathbf{z}^+ \\) and \\( \\mathbf{z}^- \\) are positive and negative samples respectively for \\( \\mathbf{z} \\) sampled from sequence \\( \\mathcal{Z}' \\). The loss is defined as, \\( \\mathcal{L}_{\\text{Triplet}} = \\sum_{i=1}^{N} \\max \\{||\\mathbf{z}_i - \\mathbf{z}_i^+|| - ||\\mathbf{z}_i - \\mathbf{z}_i^-|| + m, 0\\} \\), where \\( m \\) is a margin for similarity." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.683, + 0.825, + 0.781 + ], + "angle": 0, + "content": "MIPS. We train encoder \\( f: \\mathbb{X} \\mapsto \\mathbb{Z} \\) to generate L-2 normalized continuous representations for retrieval. Encoder \\( f \\) is trained via simulation of MIPS (Mussmann & Ermon, 2016) on mini-batches of representations as proposed by (Chang et al., 2020). Given pair of similar sequences \\( (\\mathcal{X}, \\mathcal{X}') \\), encoder \\( f \\) generates sequences \\( \\mathcal{Z} \\) and \\( \\mathcal{Z}' \\). We form a mini-batch of size \\( N \\) of pairs of \\( \\{\\mathbf{z}, \\mathbf{z}^+\\} \\) where encoder generated representation \\( \\mathbf{z} \\) is sampled from sequence \\( \\mathcal{Z} \\) and \\( \\mathbf{z}^+ \\) is a positive for \\( \\mathbf{z} \\) sampled from \\( \\mathcal{Z}' \\). The loss is defined as, \\( \\mathcal{L}_{\\mathrm{MIPS}} = -\\sum_{i=1}^{N} \\log \\frac{\\exp(\\mathbf{z}_i, \\mathbf{z}_i^+)}{\\sum_{j \\neq i} (\\exp(\\mathbf{z}_i \\cdot \\mathbf{z}_j^+) + \\exp(\\mathbf{z}_i \\cdot \\mathbf{z}_j))} \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "wav2vec2. We train our audio tokenizer via wav2vec 2.0 (Baevski et al., 2020) learning framework. Quantizer \\( g \\) in our audio tokenizer is chosen to be a Gumbel Softmax-based Vector Quantizer (See Appendix C for details) as used in (Baevski et al., 2020) but with a single codebook with \\( K \\) members. Given sequence \\( \\mathcal{X} \\), encoder \\( f \\) outputs sequence of L-2 normalised representations \\( \\mathcal{Z} \\) of length \\( T \\). Quantizer \\( g \\) outputs sequence of discrete representations \\( \\mathcal{Q} = \\{q_t = g(z_t \\in \\mathcal{Z}); t = 1, \\dots, T\\} \\). We mask spans of 10 time steps with random starting indices in sequence \\( \\mathcal{Z} \\) and then pass the new sequence to a transformer network \\( h: \\mathbb{Z} \\mapsto \\mathbb{O} \\) which generates a sequence of contextualized representations \\( \\mathcal{O} = \\{\\mathbf{o}_t = h(z_t \\in \\mathcal{Z}); t = 1, \\dots, T\\} \\). For transformer output \\( \\mathbf{o}_t \\) over masked time step \\( t \\), we identify the true discrete representation \\( \\mathbf{q}_t \\) from a set \\( D_t \\) composed of \\( \\mathbf{q}_t \\) and \\( D \\) distractors which are discrete representations sampled from other time steps. The loss is defined as," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.101, + 0.825, + 0.138 + ], + "angle": 0, + "content": "\\(\\mathcal{L}_w(\\mathbf{o}_t,\\mathcal{D}_t) = -\\log \\frac{\\exp(sim(\\mathbf{o}_t,\\mathbf{q}_t))}{\\sum_{\\tilde{q}\\in\\mathcal{D}_t}\\exp(sim(\\mathbf{o}_t,\\tilde{q}))} + \\mathcal{L}_d\\) where \\(sim(a,b) = \\frac{a^Tb}{||a||||b||}\\) is cosine similarity and \\(\\mathcal{L}_d\\) is codebook diversity loss." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.144, + 0.827, + 0.29 + ], + "angle": 0, + "content": "wav2vec2P. We train wav2vec2 audio tokenizer with our variation of wav2vec 2.0 (Baevski et al., 2020) learning framework which learns discrete representations from pairs of similar sequences. Given pair \\((\\mathcal{X},\\mathcal{X}^{\\prime})\\), encoder \\(f\\) outputs sequences \\(\\mathcal{Z}\\) of length \\(T\\) and \\(\\mathcal{Z}'\\) of length \\(T'\\) respectively. Assuming \\(T\\leq T'\\), we generate sequence \\(\\mathcal{Z}^+\\) of length \\(T\\) whose \\(t\\) time step element \\(\\mathbf{z}_t^+\\) is a positive for \\(\\mathbf{z}_t\\in \\mathcal{Z}\\) sampled from sequence \\(\\mathcal{Z}'\\). Gumbel Softmax-based Vector Quantizer \\(g\\) quantizes each representation in sequence \\(\\mathcal{Z}^+\\) to generate sequence \\(\\mathcal{Q}^+\\). We mask sequence \\(\\mathcal{Z}\\) and \\(\\mathcal{Z}^+\\) at the same time steps. Transformer \\(h\\) inputs masked sequences and generate sequences \\(\\mathcal{O}\\) and \\(\\mathcal{O}^+\\). For masked time step \\(t\\), we use transformer output \\(\\mathbf{o}_t\\) to identify \\(\\mathbf{q}_t^+\\in \\mathcal{Q}^+\\) from set \\(\\mathcal{D}_t^+\\) with distractors sampled from sequence \\(\\mathcal{Q}^+\\) and transformer output \\(\\mathbf{o}_t^+\\) to identify \\(\\mathbf{q}_t\\in \\mathcal{Q}\\) from set \\(\\mathcal{D}_t\\) with distractors sampled from sequence \\(\\mathcal{Q}\\). The loss is defined as, \\(\\mathcal{L}_{wP} = \\mathcal{L}_w(\\mathbf{o}_t,\\mathcal{D}_t^+) + \\mathcal{L}_w(\\mathbf{o}_t^+, \\mathcal{D}_t)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.295, + 0.825, + 0.337 + ], + "angle": 0, + "content": "wav2vec2-O. The original wav2vec 2.0 base model with 12 Transformer blocks and \\(95M\\) parameters as proposed by (Baevski et al., 2020). It is pre-trained on 960 hours of LibriSpeech data and fine-tuned on TIMIT dataset. It uses \\(K = 32\\) tokens for tokenization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.344, + 0.825, + 0.388 + ], + "angle": 0, + "content": "wav2vec2-Multi. A wav2vec 2.0 large model with 24 Transformer blocks and \\(317M\\) parameters pre-trained on 53 languages as proposed by (Conneau et al., 2020). It is fine-tuned on Common Voice to detect all possible phonemes in training languages with \\(K = 392\\) tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.393, + 0.826, + 0.492 + ], + "angle": 0, + "content": "Triplet and MIPS use a 2-layer BiLSTM as encoder with \\(3.6M\\) parameters. We use the LAMB optimizer (You et al., 2020) and a Cosine Annealing Learning Schedule (Loshchilov & Hutter, 2017) with a learning rate restart of 0.0001 to train them. wav2vec2 and wav2vec2P use a 2-layer BiLSTM encoder with \\(3.6M\\) parameters to generate latent representations and 3 Transformer blocks with \\(8.5M\\) parameters. Both are trained using the ADAM (Kingma & Ba, 2017) optimizer and a linear learning schedule with a learning rate of 0.001 and \\(8\\%\\) of the training steps as warm-up steps. Proposed wav2tok uses only a 2-layer BiLSTM as encoder with \\(3.6M\\) parameters." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.511, + 0.285, + 0.525 + ], + "angle": 0, + "content": "7 RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.542, + 0.555, + 0.558 + ], + "angle": 0, + "content": "7.1 MUSIC MELODY SEARCH: QUERY BY HUMMING" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.568, + 0.827, + 0.669 + ], + "angle": 0, + "content": "We present search performances for 3 settings of query namely- Query with no augmentation or Vanilla Query (V), Time Stretched Query (TS), and Pitch Shifted Query (PS). Time stretch and pitch shift are the most common augmentations that may be faced in queries by humming data. No augmentations were applied to audio in search database. Evaluations are performed on sequences corresponding to songs not seen during training. The results present the generalizability of the tokens or representations generated by the models. We set the number of tokens as \\( K = 25 \\) for wav2tok, wav2vec2, and wav2vec2P (See Appendix A.2 for experiments to support our choices)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.674, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Quality of Tokenization. Table 1 presents the performance of the sequence of tokens \\(\\tilde{\\mathcal{T}}\\) generated by the audio tokenizers on ED-based similarity search. Tokens generated by wav2tok present good generalization capabilities in terms of MRR and outperform all the baselines. It generates time and pitch invariant tokens as we see no drop in performance when either augmentation is applied to query. wav2vec2-O is trained on English speech only. The tokens generated by it do not contain much melodic information but are robust to augmentations. The multilingual training of wav2vec2-Multi infuses both melodic and phonetic information to its 392 tokens, thereby giving good performance. wav2tok outperforms both wav2vec2-O and wav2vec2-Multi given its pairwise training which allows it to infuse more melodic information to the tokens while also being trained on a small amount of unlabelled data. The Gumbel Softmax-based quantizer in wav2vec2 and wav2vec2P isn't ideal for infusing melodic information to tokens but it does infuse phonetic information as will be seen in Section 7.2. We compare the tokens with representations learned by MIPS and Triplet evaluated on DTW-based similarity search. The continuous representations present sub-par generalizations to unseen songs. We compare wav2tok with SOTA melody extraction algorithm proposed in (Salamon & Gomez, 2012) which converts hums to MIDI sequences. wav2tok generates token sequences much smaller than the respective MIDI sequences and outperforms the MIDI tokens in search performance, search time, and robustness. In addition, wav2tok outperforms the algorithm in inference time. We further compare wav2tok with SOTA QbH system proposed in (Mostafa &" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.177 + ], + "angle": 0, + "content": "Fung, 2017). In our implementation, we map audio to MIDI sequences using the aforementioned SOTA melody extraction algorithm instead of a CNN. Given MIDI sequence 53, 53, 58, 50 with durations 0s, 0.5s, 1s, 2s, a Relative Note sequence is generated as \\((0,0)\\), \\((0,0.5)\\), \\((5,1)\\), \\((-8,2)\\) over which DTW is performed for retrieval. wav2tok tokens outperform the SOTA QbH system in both performance and robustness; the performance of the latter drops drastically with time stretch." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.182, + 0.825, + 0.24 + ], + "angle": 0, + "content": "We present the performances of the uncompressed sequences \\(\\mathcal{T}\\) and \\(\\mathcal{Z}\\) and compressed sequence \\(\\tilde{\\mathcal{Z}}\\) generated by the audio tokenizers in Appendix A.1. We observe a drop in performance for all audio tokenizers when we apply sequence compression to sequences \\(\\mathcal{T}\\) and \\(\\mathcal{Z}\\). wav2tok outperforms all the baselines and generates superior-quality of continuous representations and discrete tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.245, + 0.827, + 0.332 + ], + "angle": 0, + "content": "Search Time. Table 1 presents the search time taken for similarity search over the tokens or representations generated by the models. The search time taken per query is 2 order of magnitude lesser for ED-based Search over compressed sequence of tokens \\(\\tilde{T}\\) than standard DTW-based Search over continuous representations \\(\\mathcal{Z}\\). The pre-trained models being fine-tuned on transcribed audio give the best tokens in terms of compression and search time. wav2tok gives comparable tokens but outperforms the pre-trained models in inference time." + }, + { + "type": "table_caption", + "bbox": [ + 0.228, + 0.343, + 0.446, + 0.358 + ], + "angle": 0, + "content": "Table 1: Quality of Tokenization" + }, + { + "type": "table_caption", + "bbox": [ + 0.516, + 0.344, + 0.825, + 0.358 + ], + "angle": 0, + "content": "Table 2: Ablation Studies and Some Variations" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.379, + 0.516, + 0.499 + ], + "angle": 0, + "content": "
ModelV (MRR)TS (MRR)PS (MRR)Search Time (s)Infer (s)
MIDI ED0.750.640.723.840.62
Relative Note DTW0.840.740.80.020.62
Triplet DTW0.50.480.53.50.1
MIPS DTW0.60.550.58
wav2vec2 ED0.660.630.640.060.17
wav2vec2P ED0.690.650.67
wav2vec2-O ED0.720.720.710.010.43
wav2vec2-Multi ED0.820.820.821.2
wav2tok ED0.840.840.840.040.14
" + }, + { + "type": "table", + "bbox": [ + 0.534, + 0.39, + 0.813, + 0.498 + ], + "angle": 0, + "content": "
ModelV (MRR)TS (MRR)PS (MRR)
log-mel DTW0.720.70.67
vq-log-mel ED0.710.60.62
wav2tok+NoSim ED0.730.730.72
wav2tok+Cos ED0.790.760.77
wav2tok+CTC ED0.640.620.63
wav2tok+NewInit ED0.770.760.78
wav2tok+MIR1K ED0.720.640.67
wav2tok ED0.840.840.84
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.513, + 0.827, + 0.68 + ], + "angle": 0, + "content": "Ablation Studies. Query by humming involves similarity based on melody information, which is carried by the semantic pairing of the audio in training data. We constrain this pairing to include sequences not semantically similar and call this model wav2tok+NoSim. We optimize the contrastive loss \\(\\mathcal{L}_m\\) to train the model. The results are shown in Table 2 (full table in Appendix A.3). There is a significant drop in token robustness and performance but the representations suffer a small drop (see Appendix A.3). Hence, although the representation space may be well clustered, wav2tok is able to add more semantics to the tokens as it is being trained with pairs of similar sequences in comparison to wav2tok+NoSim. We train wav2tok with cosine similarity scores instead of a parameterized score (wav2tok+Cos). The drop in performance validates the enhancement brought about by using a parameterized score. We also train wav2tok with \\(\\mathcal{L}_{ctc}\\) only (wav2tok+CTC). The CTC loss considers all possible paths which compress to the target label sequence. As a result, the learnt tokens aren't much semantic. The use of both losses gives the best tokens." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.687, + 0.827, + 0.812 + ], + "angle": 0, + "content": "Some Variations. In wav2tok+NewInit, we associate the discrete representations with \\(K\\) centroids in the input space \\(\\mathbb{X}\\). Such association does not initialize our tokenizer with optimal centroids which cluster the space \\(\\mathbb{Z}\\) perfectly. This results in a significant drop in performance and robustness as shown in Table 2. We train wav2tok on MIR-1K dataset (wav2tok+MIR1K) which is composed of polyphonic music recordings of 1000 distinct songs. The tokens generalize well to monophonic hums in MIR-QbSH dataset giving a comparable performance to MIDI tokens. This validates that wav2tok tokens do learn melodic information and are robust to variations incurred in hums. We further compare wav2tok with log-mel features and token sequences (with no compression) obtained via quantization of log-mel features. wav2tok tokens outperform both." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.829, + 0.406, + 0.843 + ], + "angle": 0, + "content": "7.2 SPOKEN TERM DETECTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Quality of Tokenization. Table 3 presents the quality of tokenization of the query keywords by the models evaluated in the Spoken Term Detection experiments. We present the performances of wav2vec2, wav2vec2P, wav2vec2-O, wav2vec2-Multi and proposed wav2tok. We conduct search experiments on a test dataset composed of a search database of 337 utterances of the 34 keywords used as queries in the STD experiments and 1289 query utterances. We identify the keyword to" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "which each query corresponds to via comparison to all the 337 utterances in the search database via ED-based similarity score. The word id of the most similar utterance is selected as the word to which the query corresponds to. We set \\( K = 40 \\) equivalent to the number of phonemes in English." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.153, + 0.827, + 0.322 + ], + "angle": 0, + "content": "wav2tok gives the best performance in terms of MRR scores. It outperforms huge models like wav2vec2-O and wav2vec2-Multi which are fine-tuned for the task of phonetic tokenization of speech audio while using a small number of parameters. wav2vec2 and wav2vec2P also outperform wav2vec2-Multi and wav2vec2-O while using smaller number of parameters. wav2vec2-O and wav2vec2-Multi use a blank token to handle consecutive occurrences of the same tokens and to label background noise. The utterances of each keyword in the test dataset are very small in time duration. This causes wav2vec2-O to confuse word utterances as background noise. It generates a sequence of blank tokens and performs poorly in search. wav2vec2-Multi using a larger number of phonetic tokens does not suffer this issue. wav2tok, wav2vec2, and wav2vec2P have no such blank token. This brings a drop in search performance with sequence compression. We further present the performance of wav2tok trained on a much larger LibriSpeech 100 hours dataset (wav2tok+Libri). It is able to outperform wav2vec2-O and give comparable performance to wav2vec2-Multi." + }, + { + "type": "table_caption", + "bbox": [ + 0.192, + 0.35, + 0.482, + 0.366 + ], + "angle": 0, + "content": "Table 3: Quality of Tokenization for speech" + }, + { + "type": "table", + "bbox": [ + 0.188, + 0.388, + 0.492, + 0.507 + ], + "angle": 0, + "content": "
ModelNormal (MRR)Compressed (MRR)
log-mel DTW0.7-
wav2vec2 ED0.680.63
wav2vec2P ED0.70.65
wav2vec2-O ED0.40.4
wav2vec2-Multi ED0.670.67
wav2tok ED0.740.66
wav2tok+Libri ED0.640.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.555, + 0.35, + 0.771, + 0.366 + ], + "angle": 0, + "content": "Table 4: Spoken Term Detection" + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.387, + 0.821, + 0.507 + ], + "angle": 0, + "content": "
ModelED(F1)Search Time(s)DTW(F1)Search Time(s)
log-mel DTW--0.410.003
wav2vec20.640.0660.460.1
wav2vec2P0.640.47
wav2vec2-O0.610.290.430.23
wav2vec2-Multi0.630.720.480.66
wav2tok0.650.0640.520.09
wav2tok+Libri0.630.440.1
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.537, + 0.827, + 0.704 + ], + "angle": 0, + "content": "Spoken Term Detection. We convert the query word utterance and the long utterance in to sequences of tokens by all our models and detect the occurrence of the query via approximate string matching. We use fuzzysearch library to perform approximate string matching. It automatically chooses the fastest algorithm for matching. Table 4 presents the performance of wav2vec2, wav2vec2P, wav2vec2-O, wav2vec2-Multi, and proposed wav2tok in STD. All the models give a comparable performance in terms of F1- score with wav2tok performing slightly better. We also implement the STD system proposed in (Anguera & Ferrarons, 2013) which performs highly competitive STD via subsequence DTW (S-DTW) over gaussian posterior features. In our implementation, we extract the posterior features with SOTA ASR models like wav2vec2-O and wav2vec2-Multi. The results are presented in the DTW column in Table 4. Note, the results for other models in same column are for STD via S-DTW over representations. We observe STD over tokens to give better F1-score." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.743, + 0.496, + 0.759 + ], + "angle": 0, + "content": "8 CONCLUSION AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In this paper, we present an audio sequence tokenizer wav2tok that generates semantically meaningful ordered representations (or tokens) that can be used for efficient retrieval by query sequences. The model learns only from pairs of semantically similar sequences and outperforms state-of-the-art approaches for spoken term detection and query by humming. One may apply more efficient search algorithms such as locality-sensitive hashing and longest common subsequence search on the generated tokens to further speed up the search. The proposed framework can also be extended to image and video retrieval, as they also have spatial ordering. We would like to investigate the domain-specific, i.e., linguistic or musicological, aspects of the extracted tokens. For instance, during retrieval, the matching algorithm assumes all the tokens to be equidistant from each other. One may study or use the metric space of these tokens." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.364, + 0.119 + ], + "angle": 0, + "content": "9 REPRODUCIBILITY" + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.133, + 0.826, + 0.164 + ], + "angle": 0, + "content": "The codes are available in https://github.com/madhavlab/wav2tok. The experiments are performed using standard datasets." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.183, + 0.289, + 0.198 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.826, + 0.249 + ], + "angle": 0, + "content": "Xavier Anguera and Miquel Ferrarons. Memory efficient subsequence dtw for query-by-example spoken term detection. In 2013 IEEE International Conference on Multimedia and Expo (ICME), pp. 1-6, 2013. doi: 10.1109/ICME.2013.6607546." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.257, + 0.826, + 0.313 + ], + "angle": 0, + "content": "Blaise Agüera Arcas, Beat Gfeller, Ruiqi Guo, Kevin Kilgour, Sanjiv Kumar, James Lyon, Julian Odell, Marvin Ritter, Dominik Roblek, Matthew Sharifi, and Mihajlo Velimirovic. Now playing: Continuous low-power music recognition. CoRR, abs/1711.10958, 2017. URL http://arxiv.org/abs/1711.10958." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.322, + 0.826, + 0.363 + ], + "angle": 0, + "content": "Alexei Baevski, Steffen Schneider, and Michael Auli. vq-wav2vec: Self-supervised learning of discrete speech representations. CoRR, abs/1910.05453, 2019. URL http://arxiv.org/abs/1910.05453." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.373, + 0.826, + 0.415 + ], + "angle": 0, + "content": "Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. CoRR, abs/2006.11477, 2020. URL https://arxiv.org/abs/2006.11477." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.424, + 0.826, + 0.453 + ], + "angle": 0, + "content": "Alexei Baevski, Wei-Ning Hsu, Alexis Conneau, and Michael Auli. Unsupervised speech recognition. CoRR, abs/2105.11084, 2021. URL https://arxiv.org/abs/2105.11084." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.461, + 0.826, + 0.504 + ], + "angle": 0, + "content": "Sungkyun Chang, Donmoon Lee, Jeongsoo Park, Hyungui Lim, Kyogu Lee, Karam Ko, and Yoon-chang Han. Neural audio fingerprint for high-specific audio retrieval based on contrastive learning. CoRR, abs/2010.11910, 2020. URL https://arxiv.org/abs/2010.11910." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.512, + 0.826, + 0.554 + ], + "angle": 0, + "content": "Liqun Chen, Zhe Gan, Yu Cheng, Linjie Li, Lawrence Carin, and Jingjing Liu. Graph optimal transport for cross-domain alignment. 37th International Conference on Machine Learning, ICML 2020, PartF16814:1520-1531, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.563, + 0.826, + 0.605 + ], + "angle": 0, + "content": "Yu-An Chung and James Glass. Generative pre-training for speech with autoregressive predictive coding. In ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3497-3501, 2020. doi: 10.1109/ICASSP40776.2020.9054438." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.614, + 0.826, + 0.643 + ], + "angle": 0, + "content": "Yu-An Chung, Hao Tang, and James Glass. Vector-quantized autoregressive predictive coding. arXiv preprint arXiv:2005.08392, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.651, + 0.826, + 0.693 + ], + "angle": 0, + "content": "Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, and Michael Auli. Unsupervised cross-lingual representation learning for speech recognition. CoRR, abs/2006.13979, 2020. URL https://arxiv.org/abs/2006.13979." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.702, + 0.826, + 0.744 + ], + "angle": 0, + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805, 2018. URL http://arxiv.org/abs/1810.04805." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.753, + 0.826, + 0.795 + ], + "angle": 0, + "content": "John S Garofolo, Lori F Lamel, William M Fisher, Jonathan G Fiscus, and David S Pallett. Darpa timit acoustic-phonetic continuous speech corpus cd-rom. nist speech disc 1-1.1. NASA STI/Recon technical report n, 93:27403, 1993." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.804, + 0.826, + 0.846 + ], + "angle": 0, + "content": "Marco Gori, Gabriele Monfardini, and Franco Scarselli. A new model for learning in graph domains. In Proceedings. 2005 IEEE International Joint Conference on Neural Networks, 2005., volume 2, pp. 729-734. IEEE, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.855, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Alex Graves, Santiago Fernández, Faustino Gomez, and Jürgen Schmidhuber. Connectionist temporal classification: Labelling unsegmented sequence data with recurrent neural networks. In Proceedings of the 23rd International Conference on Machine Learning, ICML '06, pp. 369-376, New York, NY, USA, 2006. Association for Computing Machinery. ISBN 1595933832. doi: 10.1145/1143844.1143891. URL https://doi.org/10.1145/1143844.1143891." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.206, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "Patrick AV Hall and Geoff R Dowling. Approximate string matching. ACM computing surveys (CSUR), 12(4):381-402, 1980." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.142, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. CoRR, abs/2106.07447, 2021. URL https://arxiv.org/abs/2106.07447." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.209, + 0.752, + 0.226 + ], + "angle": 0, + "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.234, + 0.826, + 0.305 + ], + "angle": 0, + "content": "Naoko Kosugi, Yuichi Nishihara, Tetsuo Sakata, Masashi Yamamuro, and Kazuhiko Kushima. A practical query-by-humming system for a large music database. In Proceedings of the Eighth ACM International Conference on Multimedia, MULTIMEDIA '00, pp. 333-342, New York, NY, USA, 2000. Association for Computing Machinery. ISBN 1581131984. doi: 10.1145/354384.354520. URL https://doi.org/10.1145/354384.354520." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.314, + 0.826, + 0.371 + ], + "angle": 0, + "content": "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01, pp. 282-289, San Francisco, CA, USA, 2001. Morgan Kaufmann Publishers Inc. ISBN 1558607781." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.381, + 0.826, + 0.424 + ], + "angle": 0, + "content": "Lin-shan Lee, James Glass, Hung-yi Lee, and Chun-an Chan. Spoken content retrieval—beyond cascading speech recognition with text retrieval. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 23(9):1389-1420, 2015. doi: 10.1109/TASLP.2015.2438543." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.433, + 0.826, + 0.476 + ], + "angle": 0, + "content": "Yujia Li, Chenjie Gu, Thomas Dullien, Oriol Vinyals, and Pushmeet Kohli. Graph matching networks for learning the similarity of graph structured objects. In International conference on machine learning, pp. 3835-3845. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.486, + 0.791, + 0.502 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.51, + 0.826, + 0.581 + ], + "angle": 0, + "content": "Jonathan Mamou, Jia Cui, Xiaodong Cui, Mark J. F. Gales, Brian Kingsbury, Kate Knill, Lidia Mangu, David Nolden, Michael Picheny, Bhavana Ramabhadran, Ralf Schlüter, Abhinav Sethy, and Philip C. Woodland. System combination and score normalization for spoken term detection. In 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 8272-8276, 2013. doi: 10.1109/ICASSP.2013.6639278." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.591, + 0.826, + 0.634 + ], + "angle": 0, + "content": "Annamaria Mesaros and Tuomas Virtanen. Recognition of phonemes and words in singing. In 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2146-2149, 2010. doi: 10.1109/ICASSP.2010.5495585." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.644, + 0.824, + 0.673 + ], + "angle": 0, + "content": "Naziba Mostafa and Pascale Fung. A note based query by humming system using convolutional neural network. In INTERSPEECH, pp. 3102-3106, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.683, + 0.826, + 0.753 + ], + "angle": 0, + "content": "Naziba Mostafa, Yan Wan, Unnayan Amitabh, and Pascale Fung. A machine learning based music retrieval and recommendation system. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pp. 1970-1977, Porto Roz, Slovenia, May 2016. European Language Resources Association (ELRA). URL https://aclanthology.org/L16-1312." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.763, + 0.824, + 0.791 + ], + "angle": 0, + "content": "Meinard Müller. Dynamic time warping. Information retrieval for music and motion, pp. 69-84, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.801, + 0.826, + 0.873 + ], + "angle": 0, + "content": "Stephen Mussmann and Stefano Ermon. Learning and inference via maximum inner product search. In Maria Florina Balcan and Kilian Q. Weinberger (eds.), Proceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pp. 2587-2596, New York, New York, USA, 20-22 Jun 2016. PMLR. URL https://proceedings.mlr.press/v48/mussmann16.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5206-5210. IEEE, 2015." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.133 + ], + "angle": 0, + "content": "L. Rabiner and B. Juang. An introduction to hidden markov models. IEEE ASSP Magazine, 3(1): 4-16, 1986. doi: 10.1109/MASSP.1986.1165342." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.148, + 0.826, + 0.192 + ], + "angle": 0, + "content": "Dhananjay Ram, Afsaneh Asaei, and Hervé Bourlard. Sparse subspace modeling for query by example spoken term detection. IEEE ACM Trans. Audio Speech Lang. Process., 26(6):1126-1139, 2018. URL https://doi.org/10.1109/TASLP.2018.2815780." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.208, + 0.826, + 0.251 + ], + "angle": 0, + "content": "Shivangi Ranjan and Vipul Arora. A bioinformatic method of semi-global alignment for query-by-humming. In 2020 IEEE 4th Conference on Information Communication Technology (CICT), pp. 1-5, 2020. doi: 10.1109/CICT51604.2020.9312085." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.267, + 0.826, + 0.323 + ], + "angle": 0, + "content": "Luis J. Rodriguez-Fuentes, Amparo Varona, Mikel Penagarikano, Germán Bordel, and Mireia Diez. High-performance query-by-example spoken term detection on the sws 2013 evaluation. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7819-7823, 2014. doi: 10.1109/ICASSP.2014.6855122." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.339, + 0.826, + 0.382 + ], + "angle": 0, + "content": "Justin Salamon and Emilia Gómez. Melody extraction from polyphonic music signals using pitch contour characteristics. IEEE transactions on audio, speech, and language processing, 20(6): 1759-1770, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.398, + 0.826, + 0.441 + ], + "angle": 0, + "content": "Steffen Schneider, Alexei Baevski, Ronan Collobert, and Michael Auli. wav2vec: Unsupervised pre-training for speech recognition. CoRR, abs/1904.05862, 2019. URL http://arxiv.org/abs/1904.05862." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.457, + 0.826, + 0.499 + ], + "angle": 0, + "content": "Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. CoRR, abs/1503.03832, 2015. URL http://arxiv.org/abs/1503.03832." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.516, + 0.826, + 0.546 + ], + "angle": 0, + "content": "M. Schuster and K.K. Paliwal. Bidirectional recurrent neural networks. IEEE Transactions on Signal Processing, 45(11):2673-2681, 1997. doi: 10.1109/78.650093." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.561, + 0.826, + 0.604 + ], + "angle": 0, + "content": "Erdem Unal, Elaine Chew, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. Challenging uncertainty in query by humming systems: A fingerprinting approach. IEEE Transactions on Audio, Speech, and Language Processing, 16(2):359-371, 2008. doi: 10.1109/TASL.2007.912373." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.62, + 0.826, + 0.65 + ], + "angle": 0, + "content": "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. CoRR, abs/1711.00937, 2017. URL http://arxiv.org/abs/1711.00937." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.665, + 0.826, + 0.695 + ], + "angle": 0, + "content": "Aäron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. CoRR, abs/1807.03748, 2018. URL http://arxiv.org/abs/1807.03748." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.71, + 0.826, + 0.74 + ], + "angle": 0, + "content": "Ting Yao, Yingwei Pan, Yehao Li, and Tao Mei. Exploring visual relationship for image captioning. In Proceedings of the European conference on computer vision (ECCV), pp. 684-699, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.755, + 0.826, + 0.798 + ], + "angle": 0, + "content": "Penghang Yin, Jiancheng Lyu, Shuai Zhang, Stanley J. Osher, Yingyong Qi, and Jack Xin. Understanding straight-through estimator in training activation quantized neural nets. CoRR, abs/1903.05662, 2019. URL http://arxiv.org/abs/1903.05662." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.814, + 0.826, + 0.857 + ], + "angle": 0, + "content": "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.873, + 0.826, + 0.916 + ], + "angle": 0, + "content": "Yaodong Zhang and James R. Glass. Unsupervised spoken keyword spotting via segmental dtw on gaussian posteriograms. In 2009 IEEE Workshop on Automatic Speech Recognition Understanding, pp. 398-403, 2009. doi: 10.1109/ASRU.2009.5372931." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.916 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.371, + 0.119 + ], + "angle": 0, + "content": "A FURTHER STUDIES" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.138, + 0.403, + 0.153 + ], + "angle": 0, + "content": "A.1 SEQUENCE COMPRESSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.166, + 0.825, + 0.21 + ], + "angle": 0, + "content": "We present the quality of sequence of tokens \\(\\mathcal{T}\\) and sequence of representations \\(\\mathcal{Z}\\) and their corresponding compressed versions sequences \\(\\widetilde{\\mathcal{T}}\\) and \\(\\widetilde{\\mathcal{Z}}\\) generated by the audio tokenizers in Table 5." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.217, + 0.827, + 0.292 + ], + "angle": 0, + "content": "wav2tok outperformed the baselines and generated the best quality of sequences \\(\\mathcal{T}\\), \\(\\mathcal{Z}\\), \\(\\widetilde{\\mathcal{T}}\\) and \\(\\widetilde{\\mathcal{Z}}\\). Sequence compression brings an order of magnitude drop in search time for all the audio tokenizers with a trade-off in search performance. Compression from \\(\\mathcal{T}\\) to \\(\\widetilde{\\mathcal{T}}\\) increases the robustness of the token sequences generated by wav2tok to various augmentations. wav2vec2P learnt better tokens and representations than wav2vec2 because of its pairwise training on similar audio." + }, + { + "type": "table_caption", + "bbox": [ + 0.231, + 0.309, + 0.766, + 0.325 + ], + "angle": 0, + "content": "Table 5: Compression of Sequences: MRR scores for query by humming, \\( \\mathrm{K} = {25} \\)" + }, + { + "type": "table", + "bbox": [ + 0.304, + 0.348, + 0.695, + 0.564 + ], + "angle": 0, + "content": "
ModelVTSPSSearch Time
Without Compression
wav2vec2 DTW0.850.840.84
wav2vec2P DTW0.870.850.873.5s
wav2tok DTW0.920.890.93
wav2vec2 ED0.720.690.730.68s
wav2vec2P ED0.750.710.73
wav2tok ED0.90.840.90.32s
With Compression
wav2vec2 DTW0.760.720.740.8s
wav2vec2P DTW0.810.770.79
wav2tok DTW0.880.880.870.6s
wav2vec2 ED0.660.630.640.06s
wav2vec2P ED0.690.650.67
wav2tok ED0.840.840.840.04s
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.597, + 0.484, + 0.611 + ], + "angle": 0, + "content": "A.2 VARIATION IN NUMBER OFTOKENS \\(K\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.625, + 0.825, + 0.68 + ], + "angle": 0, + "content": "The effect of varying the size of alphabet \\(\\mathbb{A}\\) is shown in Table 6. We train wav2vec2, wav2vec2P, and proposed wav2tok with alphabets of size \\(K\\in \\{15,25,40\\}\\). Out of the three settings for \\(K\\), \\(K = 25\\) gives the best performance for all models. wav2tok gives best performance for all settings of \\(K\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.705, + 0.53, + 0.719 + ], + "angle": 0, + "content": "A.3 ABLATION STUDIES AND SOME VARIATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.733, + 0.827, + 0.777 + ], + "angle": 0, + "content": "We present the full version of Table 2 in table 7. Note wav2tok+NoSim representations are well clustered. wav2tok+Trans representations are also comparable with wav2tok but the tokens are of lesser quality. This is due to model overfitting." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.799, + 0.444, + 0.813 + ], + "angle": 0, + "content": "A.4 QUALITY OF REPRESENTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.825, + 0.924 + ], + "angle": 0, + "content": "We present the performance of the continuous representations generated by wav2tok and the baselines in Table 8. wav2tok generates the best representations for music outperforming representations generated by the large wav2vec 2.0 models. wav2tok trained on MIR1K generates representations outperforming domain-specific QbH baselines. Note, wav2vec2-O outperforms wav2vec2-Multi as the hums in the dataset were all in english. wav2vec2-O is pre-trained and fine-tuned on English speech only while wav2vec2-Multi is pre-trained multilingually. Hence, wav2vec2-O gave better results." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.282, + 0.101, + 0.719, + 0.119 + ], + "angle": 0, + "content": "Table 6: Effect of varying \\( K \\) : MRR scores for query by humming" + }, + { + "type": "table", + "bbox": [ + 0.268, + 0.14, + 0.732, + 0.482 + ], + "angle": 0, + "content": "
ModelsWithout CompressionWith Compression
VTSPSVTSPS
K=15
wav2vec2 DTW0.850.830.840.70.660.67
wav2vec2P DTW0.870.850.850.820.770.8
wav2tok DTW0.880.870.880.840.80.83
wav2vec2 ED0.790.770.780.580.560.57
wav2vec2P ED0.80.770.790.710.680.7
wav2tok ED0.820.80.810.770.750.76
K=25
wav2vec2 DTW0.850.840.840.760.720.74
wav2vec2P DTW0.870.850.870.810.770.79
wav2tok DTW0.920.890.930.880.880.87
wav2vec2 ED0.720.690.730.660.630.64
wav2vec2P ED0.750.710.730.690.650.67
wav2tok ED0.90.840.90.840.840.84
K=40
wav2vec2 DTW0.840.820.830.720.680.7
wav2vec2P DTW0.860.850.850.810.770.79
wav2tok DTW0.90.880.890.860.830.83
wav2vec2 ED0.710.660.690.60.580.58
wav2vec2P ED0.730.70.730.680.650.67
wav2tok ED0.830.80.820.770.750.76
" + }, + { + "type": "table_caption", + "bbox": [ + 0.223, + 0.484, + 0.776, + 0.5 + ], + "angle": 0, + "content": "Table 7: Ablation Studies and Some Variations: MRR scores for query by humming" + }, + { + "type": "table", + "bbox": [ + 0.246, + 0.523, + 0.754, + 0.764 + ], + "angle": 0, + "content": "
ModelsWithout CompressionWith Compression
VTSPSVTSPS
log-mel DTW0.720.690.670.540.470.43
wav2tok+NoSim DTW0.880.870.870.80.840.83
wav2tok+Cos DTW0.880.870.870.830.810.81
wav2tok+NewInit DTW0.90.840.910.840.850.83
wav2tok+Trans DTW0.840.770.850.80.770.76
wav2tok+MIR1K DTW0.880.840.850.820.740.78
wav2tok DTW0.920.890.930.880.880.87
vq-log-mel ED0.710.60.620.520.480.47
wav2tok+NoSim ED0.850.740.840.730.730.72
wav2tok+Cos ED0.860.840.850.790.760.77
wav2tok+NewInit ED0.830.720.850.770.760.78
wav2tok+Trans ED0.840.770.850.70.660.67
wav2tok+MIR1K ED0.760.660.710.720.640.67
wav2tok ED0.90.840.90.840.840.84
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.779, + 0.504, + 0.793 + ], + "angle": 0, + "content": "A.5 TRAINING ON LARGER SPEECH DATASET" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.827, + 0.863 + ], + "angle": 0, + "content": "We train wav2tok on 100-hours subset of LibriSpeech (Panayotov et al., 2015) dataset. We evaluate the quality of tokenization of word utterances done by wav2tok on TIMIT (Garofolo et al., 1993) dataset. We use a 2-layer BiLSTM network with 3.6 million parameters as encoder network which takes MFCC feature sequences as input. We perform tokenization with \\( K = 40 \\) tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.828, + 0.926 + ], + "angle": 0, + "content": "wav2tok outperforms wav2vec2-O by a large margin and gives comparable performance to wav2vec2-Multi in terms of MRR score. wav2tok uses a minute number of parameters in comparison to 95 million parameters in wav2vec2-O and 317 million parameters in wav2vec2-Multi. Note, wav2vec2-O and wav2vec2-Multi were pre-trained on large amount of unlabelled speech data and" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "table_caption", + "bbox": [ + 0.259, + 0.101, + 0.744, + 0.119 + ], + "angle": 0, + "content": "Table 8: Quality of Representations: MRR scores for query by humming" + }, + { + "type": "table", + "bbox": [ + 0.292, + 0.154, + 0.709, + 0.285 + ], + "angle": 0, + "content": "
ModelVTSPS
(Salamon & Gómez, 2012) MIDI ED0.750.640.72
(Mostafa & Fung, 2017) Note DTW0.840.740.8
Triplet DTW0.50.480.5
MIPS DTW0.60.550.58
wav2vec2-O DTW0.910.830.86
wav2vec2-Multi DTW0.880.830.85
wav2tok DTW0.920.90.93
wav2tok+MIR1K DTW0.880.840.85
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.3, + 0.827, + 0.357 + ], + "angle": 0, + "content": "fine-tuned with transcription to perform tokenization of audio. Moreover, wav2vec2-O was fine-tuned to perform tokenization on TIMIT (Garofolo et al., 1993) dataset. Proposed wav2tok was trained on 100 hours of LibriSpeech dataset only. The tokens learnt by wav2tok on LibriSpeech (Panayotov et al., 2015) dataset generalised well to TIMIT (Garofolo et al., 1993)." + }, + { + "type": "table_caption", + "bbox": [ + 0.304, + 0.37, + 0.694, + 0.385 + ], + "angle": 0, + "content": "Table 9: Quality of Tokenization for speech (MRR Scores)" + }, + { + "type": "table", + "bbox": [ + 0.3, + 0.41, + 0.7, + 0.474 + ], + "angle": 0, + "content": "
ModelNormal (T)Compressed (T)
wav2vec2-O ED0.40.4
wav2vec2-Multi ED0.670.67
wav2tok+Libri ED0.640.6
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.504, + 0.414, + 0.52 + ], + "angle": 0, + "content": "B CTC WITHOUT BLANKS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.536, + 0.825, + 0.568 + ], + "angle": 0, + "content": "We present the forward and backward variables used in calculating the gradients of the CTC loss \\(\\mathcal{L}_{ctc}(\\mathcal{X},\\tilde{\\mathcal{T}}^{\\prime})\\) with no blank tokens." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.573, + 0.407, + 0.587 + ], + "angle": 0, + "content": "The forward variable is defined as," + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.596, + 0.826, + 0.643 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {t} (s) = \\sum_ {\\pi ; \\mathcal {C} (\\pi_ {1: t}) = \\tilde {\\mathcal {T}} _ {1: s} ^ {\\prime}} \\prod_ {t ^ {\\prime} = 1} ^ {t} l _ {t ^ {\\prime}, \\pi_ {t ^ {\\prime}}} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.653, + 0.825, + 0.683 + ], + "angle": 0, + "content": "where \\(\\pi\\) corresponds to all \\(T\\)-length paths over tokens such that \\(\\mathcal{C}(\\pi) = \\tilde{T}'\\). Here, \\(\\mathcal{C}\\) is a compressor which compresses \\(\\pi\\) a \\(T\\)-length sequence of tokens via de-duplication." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.688, + 0.334, + 0.702 + ], + "angle": 0, + "content": "We initialise as follows," + }, + { + "type": "equation", + "bbox": [ + 0.434, + 0.711, + 0.825, + 0.736 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {1} (1) = l _ {1, \\tilde {\\tau} _ {1} ^ {\\prime}} \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.733, + 0.56, + 0.747 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {1} (s) = 0, \\forall s > 1\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.765, + 0.402, + 0.781 + ], + "angle": 0, + "content": "and recursively calculate \\(\\alpha_{t}(s)\\) as," + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.802, + 0.826, + 0.821 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {t} (s) = \\left(\\alpha_ {t - 1} (s) + \\alpha_ {t - 1} (s - 1)\\right) l _ {t, \\tilde {\\mathcal {T}} _ {s} ^ {\\prime}} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.833, + 0.351, + 0.849 + ], + "angle": 0, + "content": "We set \\(\\alpha_{t}(s) = 0, \\forall s < 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.855, + 0.416, + 0.87 + ], + "angle": 0, + "content": "The backward variable is defined as," + }, + { + "type": "equation", + "bbox": [ + 0.383, + 0.878, + 0.826, + 0.929 + ], + "angle": 0, + "content": "\\[\n\\beta_ {t} (s) = \\sum_ {\\pi ; \\mathcal {C} (\\pi_ {t: T}) = \\tilde {\\mathcal {T}} _ {s: | \\bar {\\mathcal {T}} ^ {\\prime} |} ^ {\\prime}} \\prod_ {t ^ {\\prime} = t} ^ {T} l _ {t ^ {\\prime}, \\pi_ {t ^ {\\prime}}} \\tag {8}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2023" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.334, + 0.118 + ], + "angle": 0, + "content": "We initialise as follows," + }, + { + "type": "equation", + "bbox": [ + 0.414, + 0.126, + 0.825, + 0.154 + ], + "angle": 0, + "content": "\\[\n\\beta_ {T} \\left(\\left| \\tilde {\\mathcal {T}} ^ {\\prime} \\right|\\right) = l _ {T, \\tilde {\\mathcal {T}} _ {| \\tilde {\\mathcal {T}} ^ {\\prime} |} ^ {\\prime}} \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.437, + 0.154, + 0.582, + 0.169 + ], + "angle": 0, + "content": "\\[\n\\beta_ {T} (s) = 0, \\forall s < | \\tilde {T} ^ {\\prime} |\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.178, + 0.4, + 0.193 + ], + "angle": 0, + "content": "and recursively calculate \\(\\beta_t(s)\\) as," + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.2, + 0.826, + 0.22 + ], + "angle": 0, + "content": "\\[\n\\beta_ {t} (s) = \\left(\\beta_ {t + 1} (s) + \\beta_ {t + 1} (s + 1)\\right) l _ {t, \\tilde {\\mathcal {T}} _ {s} ^ {\\prime}} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.234, + 0.367, + 0.252 + ], + "angle": 0, + "content": "We set \\(\\beta_{t}(s) = 0, \\forall s > |\\tilde{T}^{\\prime}|\\)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.271, + 0.616, + 0.288 + ], + "angle": 0, + "content": "C GUMBEL SOFTMAX BASED VECTOR QUANTIZER" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.303, + 0.825, + 0.373 + ], + "angle": 0, + "content": "The Gumbel Softmax based Vector Quantizer (Baevski et al., 2019) quantizes input latent representation \\( z_{t} \\in R^{m} \\) with \\( C \\) codebooks containing \\( K \\) quantizers \\( e \\in R^{K \\times \\frac{m}{C}} \\) each. For our experiments, we set \\( C = 1 \\) and \\( K \\in \\{15, 25, 40\\} \\). Given \\( \\mathbf{z}_{t} \\), one of the \\( K \\) quantizers from each of the \\( C \\) codebooks are chosen resulting in vectors \\( e_{1}, \\dots, e_{C} \\). The codebook vectors are then concatenated and linearly transformed from \\( R^{m} \\) to \\( R^{d} \\) to output a discrete representation \\( q_{t} \\in R^{d} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.379, + 0.825, + 0.409 + ], + "angle": 0, + "content": "\\(\\mathbf{z}_t\\) is mapped to \\(\\mathbf{l} \\in R^{C \\times K}\\) logits to give probability scores for the choice of codeword. The probability \\(p_{c,k}\\) of choosing \\(k^{th}\\) quantizer in \\(c^{th}\\) codebook is given as," + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.425, + 0.826, + 0.462 + ], + "angle": 0, + "content": "\\[\np _ {c, k} = \\frac {\\exp \\left(l _ {c , k} + n _ {k}\\right) / \\tau}{\\sum_ {i = 1} ^ {K} \\exp \\left(l _ {c , i} + n _ {i}\\right) / \\tau} \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.464, + 0.825, + 0.494 + ], + "angle": 0, + "content": "where \\(\\tau\\) is a non-negative temperature, \\(n = -\\log (-\\log (u))\\) and \\(u\\) are samples from the uniform distribution \\(\\mathbf{Unif}(0,1)\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.5, + 0.825, + 0.543 + ], + "angle": 0, + "content": "During forward pass, the codeword is chosen as \\(\\kappa = \\arg \\max_{j} p_{c,j}\\). During backward pass, the loss is calculated over the gumble softmax distribution \\(p\\). We use the straight-through gradient estimator (Yin et al., 2019) to estimate the gradient." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.827, + 0.592 + ], + "angle": 0, + "content": "Codebook Diversity Loss \\(\\mathcal{L}_d\\). This loss promotes equal use of all the entries in each of the \\(C\\) codebooks. Minimization of this loss maximizes the entropy of the averaged softmax distribution \\(\\tilde{p}\\) over the \\(K\\) entries for each codebook \\(\\tilde{p}_c\\) across a batch of utterances." + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.598, + 0.826, + 0.64 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {d} = \\frac {1}{C K} \\sum_ {c = 1} ^ {C} \\sum_ {k = 1} ^ {K} \\tilde {p} _ {c, k} \\log \\tilde {p} _ {c, k} \\tag {12}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "16" + } + ] +] \ No newline at end of file diff --git a/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_origin.pdf b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c99ea4942da7b5132411557795e3785034202e75 --- /dev/null +++ b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/fdaea0fe-1baa-4dee-8dce-c076dd80d99a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7700e0390e1f824e080d45babc67f0754be850a683d45db77b35612fdb2900f7 +size 417216 diff --git a/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/full.md b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8abdf8c30d249127bd3b6ed4773211ccad0eb3ba --- /dev/null +++ b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/full.md @@ -0,0 +1,378 @@ +# WAV2TOK: DEEP SEQUENCETOKENIZER FOR AUDIO RETRIEVAL + +Adhiraj Banerjee, Vipul Arora + +Department of Electrical Engineering + +Indian Institute of Technology Kanpur, India + +{adhiraj,vipular}@iitk.ac.in + +# ABSTRACT + +Search over audio sequences is a fundamental problem. In this paper, we propose a method to extract concise discrete representations for audio that can be used for efficient retrieval. Our motivation comes from orthography which represents speech of a given language in a concise and distinct discrete form. The proposed method, wav2tok, learns such representations for any kind of audio, speech or non-speech, from pairs of similar audio. wav2tok compresses the query and target sequences into shorter sequences of tokens that are faster to match. The learning method makes use of CTC loss and expectation-maximization algorithm, which are generally used for supervised automatic speech recognition and for learning discrete latent variables, respectively. Experiments show the consistent performance of wav2tok across two audio retrieval tasks: music search (query by humming) and speech search via audio query, outperforming state-of-the-art baselines. + +# 1 INTRODUCTION + +Sequence Retrieval aims at retrieving sequences similar to a query sequence, with the constraint that an ordered alignment exists between the query and the target sequence. In this paper, we address the following problem: Can we extract discrete tokens from any continuous signal for the purpose of retrieval of similar signals? This problem has deep connections with tasks such as child language acquisition, music cognition and learning languages without written forms. Some direct applications of the proposed task include speech search, where the order of constituent units, such as phonemes, syllables or words, remains same; and music search – query by humming or query by example – where the order of constituent units, such as relative notes or phrases, remains same. Apart from audio, the problem extends to tasks such as handwritten word search and gesture search. + +One can define similarity metrics over sequences using methods based on Dynamic Time Warping (DTW) (Müller, 2007). These methods are inefficient if the sequences are continuous valued and have high sampling rates. Moreover, they depend on matching hand-made features, which are ineffective in the face of high variability of query sequences. + +Problems such as spoken term detection involve detection of a query utterance in a long speech audio. The search space is huge, and performing DTW based search of query takes long time (Rodriguez-Fuentes et al., 2014). A more efficient way of sequence retrieval is by mapping them to sequences of discrete tokens. Automatic speech recognition (ASR) can be employed for this purpose (Mamou et al., 2013). However, ASR training requires knowledge of basic units of transcription. The popularly used units are phonemes and graphemes. This method thus becomes language dependent. Non-linguistic sounds, such as cough and sneeze, could be mapped to certain tokens defined for them. This approach could not be used when precise tokens are not defined, e.g., music search. + +In query by humming based music search, audio is mapped to discrete melody-related tokens, such as notes, and these token sequence are matched for search (Unal et al., 2008). However, several music traditions do not have precise transcription systems. There, one can tell if two pieces, or motifs, are similar but cannot precisely transcribe them to tokens. The embellishments used in music could be too dynamic to be transcribed precisely. Moreover, when a musically untrained user sings a query, s/he cannot hit the right notes matching the target song. So the matching could rely on several factors other than notes, such as phonemes of lyrics (Mesaros & Virtanen, 2010), onset times + +(rhythm) (Kosugi et al., 2000), and note transitions (Ranjan & Arora, 2020). Hence, the tokens to be used may not be derived from notes alone. + +In this way, each tokenizer - for speech, music or other signals, in general - uses domain-specific hand-made tokens defined by a domain expert. In this paper, we propose a tokenizer to map audio sequences to sequences of discrete tokens with an aim of retrieval. The mapping is learned only from pairs of similar audio sequences. The tokens are not defined manually but correspond to distinct semantic units learned from pairs of similar audio sequences. The method is general and can be applied to signals other than audio. In this paper, we apply the proposed method to speech and music audio search, for the problems of spoken term detection and query by humming, respectively. + +The proposed method, named wav2tok, encodes audio via a BiLSTM (Schuster & Paliwal, 1997) network. The encoder-generated representations are then mapped to discrete tokens via a $K$ -means vector quantizer network. Each discrete token corresponds to a discrete representation in the vector quantizer's codebook which is initialized and updated via offline $K$ -means clustering only. + +wav2tok is trained with pairs of similar audio sequences in a self-supervised fashion without any transcription using a novel training algorithm. For each pair, we average the encoder-generated representations, which map to the same token, by the $K$ -means vector quantizer network to generate a prototype for that token. We then perform a contrastive learning task to increase the similarity between the generated prototype for a particular token and the quantizer codebook discrete representation corresponding to the same token. We simultaneously minimize the edit distance between the token sequences generated from each sequence in the pair via Connectionist temporal classification (CTC) (Graves et al., 2006) framework to constrain both sequences to get mapped to the same token sequence. + +We compare wav2tok to state-of-the-art (SOTA) methods for discrete representation learning, such as wav2vec 2.0, and SOTA ASR models fine-tuned to perform phonetic tokenization. We evaluate the generalization capability of the tokens generated by the models on search experiments, namely, query-by-humming and spoken term detection. wav2tok outperforms the baselines in performance and uses much lesser trainable parameters, ensuring faster inference and deployment. + +# 2 RELATED WORK + +Sequence Labelling. With expert-defined tokens, various methods are popularly used for mapping sequences to tokens. In conventional methods, Hidden Markov Models (Rabiner & Juang, 1986) and Conditional Random Fields (Lafferty et al., 2001) have been popularly used for sequence labeling. These methods involve a significant amount of domain knowledge and many assumptions to make tractable models, which are avoided by End-to-End learning models such as Recurrent Neural Networks (RNNs) using Connectionist Temporal Classification framework (Graves et al., 2006). Sequence labeling can be used for sequence retrieval by converting the sequences to tokens, which are easy to search over. But this approach inevitably depends upon expert-defined tokens. + +Unsupervised Speech Representation Learning. Automatic Speech Recognition systems are pretrained on large amounts of untranscribed speech data to generate SOTA continuous representations which encode the slowly varying phoneme features in raw speech. The representations are then mapped to phoneme tokens via Connectionist Temporal Classification (CTC) (Graves et al., 2006) fine-tuning on a small amount of transcribed audio. Works like Contrastive Predictive Coding (CPC) (van den Oord et al., 2018), Autoregressive Predictive Coding (APC) (Chung & Glass, 2020), and wav2vec (Schneider et al., 2019) generate continuous representations with powerful autoregressive models pre-trained to predict future time-step representations. Further works started discretizing the continuous representations with vq-VAE (van den Oord et al., 2017) to generate discrete representations for speech. + +Works like vq-wav2vec (Baevski et al., 2019) and vq-APC (Chung et al., 2020) discretize the representations and perform the same prediction tasks as in wav2vec (Schneider et al., 2019) and APC (Chung & Glass, 2020) respectively but over discrete representations. In vq-wav2vec, the discrete representations are generated with either a K-Means Vector Quantizer (Baevski et al., 2019) or Gumbel-Softmax based Vector Quantizer (Baevski et al., 2019). The learned discrete representations are used to pre-train a BERT (Devlin et al., 2018) to generate stronger continuous representations much like BERT pre-training in Natural Language Processing. wav2vec 2.0 (Baevski et al., + +2020) uses a Gumble Softmax based Vector Quantizer (Baevski et al., 2019) to generate discrete representations. The training involves masking of spans of time steps and then predicting the correct discrete representations at each masked time step with transformer representation at that time step. In these methods, raw audio is discretized in a latent space to model all possible acoustic units than phonetic or sub-phonetic units. The tokens generated by the vector quantizers aren't constrained to be interpretable and are initialized in large numbers ( $\sim$ 102.4K codes). After pre-training, a subset of these codes or tokens are chosen more often by the vector quantizers and are considered to represent acoustic units. CTC-based fine-tuning with transcription groups these discrete acoustic units to $K$ distinct phonemes or linguistic units as present in the transcriptions. + +Works like HuBERT and wav2vec-Unsupervised learn phonemic units directly. HuBERT (Hsu et al., 2021) pre-trains a transformer network via BERT-like masked prediction task over noisy targets generated with a clustering model trained offline. The targets may be generated with an ensemble of $K$ -means clusterers with $K = \{100,500\}$ clusters on MFCC features or transformer representations. wav2vec-Unsupervised (Baevski et al., 2021) learns phonetic tokens adversially from phonemized unlabelled text data. A discriminator identifies if the phoneme sequence generated by model is real or fake based on phonemized unlabelled text. + +All aforementioned approaches use powerful auto-regressive models pre-trained on large amounts of unlabeled audio and fine-tuned on transcribed audio. Our learning approach can learn semantic tokens with small models while training pairwise on small amount of unlabeled audio data. + +Audio Representations for Retrieval. Now Playing (Arcas et al., 2017) and (Chang et al., 2020) use a Neural Network Fingerprinter (NNFP) module outputting representations which are efficient for search in query-by-example tasks where the difference between query and the actual song is pretty minute in comparison to humming where only the melody is sung. Now Playing (Arcas et al., 2017) trains representations by optimizing the triplet loss (Schroff et al., 2015) and (Chang et al., 2020) trains representations by simulating the Maximum Inner Product Search (MIPS) on minibatches of representations. For Query by Humming task, (Mostafa et al., 2016) and (Mostafa & Fung, 2017) use deep learning models like DNNs and CNNs to generate representations which they map to MIDI-numbers or note tokens. Such works require note-transcribed data to train the models. For Spoken Term Detection task, approaches like (Zhang & Glass, 2009), (Rodriguez-Fuentes et al., 2014), (Lee et al., 2015), (Ram et al., 2018) convert audio to sequences of feature vectors and apply different variations of DTW based template matching to detect query in long utterances of speech which is time-consuming. + +Cross Domain Alignment. Given a pair of semantically similar inputs for training, tasks such as visual question answering (text and image) and machine translation (text) involve learning an alignment. The alignment here is not ordered and the inputs may be from different modalities. Attention models have been used to find alignment between output entities and input regions (Yao et al., 2018). (Chen et al., 2020) use Gromov-Wasserstein distance between output and input entities to match them. However, there is no notion of tokens there, rather the salient entities in the input are represented as vectors in a graph. + +Graph Matching. Graph Neural Networks (Gori et al., 2005) are used to generate embeddings for graphs. These embeddings are used to perform graph matching to find similarity of structured graphs (Li et al., 2019). However, they perform the matching jointly on the pair of inputs, rather than representing each input independently. This makes them unsuitable for the search problem at hand due to large run-time complexity. The distance metrics used for graph matching are based on edit distance (Li et al., 2019) and Wasserstein distance (Chen et al., 2020). + +# 3 PROBLEM STATEMENT + +We aim to map $\mathcal{X}$ , a sequence of vectors, to $\tilde{\mathcal{T}}$ , a sequence of discrete tokens from a finite alphabet $\mathbb{A}$ , such that the similarity of sequences is preserved in the sense of edit distance. The length of sequence $\tilde{\mathcal{T}}$ may be less than or equal to that of the sequence $\mathcal{X}$ . In other words, given a pair of similar sequences $(\mathcal{X}_i,\mathcal{X}_j)$ and sequence $\mathcal{X}_k$ which is not similar to either sequences in the pair, we want to map them to token sequences such that $ED(\tilde{\mathcal{T}}_i,\tilde{\mathcal{T}}_j)$ should be less than $\min \{ED(\tilde{\mathcal{T}}_i,\tilde{\mathcal{T}}_k),ED(\tilde{\mathcal{T}}_j,\tilde{\mathcal{T}}_k)\}$ , where $ED(\cdot ,\cdot)$ is the edit distance between two sequences. + +# 4 MODEL ARCHITECTURE + +wav2tok is comprised of an encoder $f: \mathbb{X} \mapsto \mathbb{Z}$ which takes as input a temporal sequence of audio features $\mathcal{X} = [\mathbf{x}_t \in R^n; t \in [T]]$ of length $T$ , where $\mathbf{x}_t$ is the feature vector at time step $t$ , and outputs a sequence of L-2 normalised representations $\mathcal{Z} = [\mathbf{z}_t = f(\mathbf{x}_t) \in R^m; t \in [T]]$ . The encoder is implemented as a 2-layer BiLSTM followed by an L-2 normalization layer. BiLSTMs summarise information in both directions and encode surrounding context. + +A $K$ -means vector quantizer network $g: \mathbb{Z} \mapsto \mathbb{T}$ then labels sequence $\mathcal{Z}$ at each time-step with tokens belonging to a finite $K$ -element alphabet $\mathbb{A} = [K]$ and generates sequence of tokens $\mathcal{T} = [\tau_t = g(\mathbf{z}_t) \in \mathbb{A}; t \in [T]]$ . Network $g$ vector quantizes input $\mathbf{z}_t$ with a codebook $E = \{\mathbf{e}_k \in \mathbb{Z}; k \in [K]\}$ comprised of $|\mathbb{A}| = K$ discrete representations which are cluster centroids in representation space $\mathbb{Z}$ and outputs token $\tau_t = \arg \max_k \mathbf{z}_t \cdot \mathbf{e}_k$ . Note, here the dot product gives a cosine similarity score since both the vectors are L-2 normalized, as a result, $\mathbf{e}_k \in E$ closest to $\mathbf{z}_t$ is chosen as its discrete representation and index $k$ as its token $\tau_t$ . The $K$ discrete representations in network $g$ are trainable parameters. + +A compressor $\mathcal{C}$ compresses sequence of tokens $\mathcal{T}$ to sequence $\tilde{\mathcal{T}}$ of length $\tilde{T} \leq T$ by deleting all consecutive repetitions of tokens. $\mathcal{C}$ also generates the corresponding compressed sequence $\tilde{\mathcal{Z}}$ of length $\tilde{T}$ by averaging representations $\mathbf{z}_t \in \mathcal{Z}$ over the consecutive tokens and L-2 normalising the averaged representation. Figure 1a presents an illustration demonstrating our model architecture. + +# 5 TRAINING + +wav2tok is trained on pairs of sequences of audio features $(\mathcal{X},\mathcal{X}^{\prime})$ where the raw audio corresponding to $\mathcal{X}^{\prime}$ is an augmented replica of that corresponding to $\mathcal{X}$ . We apply either pitch shift or time stretch or both augmentations to raw audio to generate its augmented replica. $\mathcal{X}$ and $\mathcal{X}^{\prime}$ may differ in sources as well, i.e. a different person may sing the recording corresponding to $\mathcal{X}^{\prime}$ . + +The discrete representations in quantizer $g$ codebook $E$ are initialized as $K$ centroids obtained via offline $K$ -means clustering over freshly initialized encoder-generated representations. Given $(\mathcal{X},\mathcal{X}^{\prime})$ , encoder $f$ generates sequence of representations $\mathcal{Z}$ from input $\mathcal{X}$ and $\mathcal{Z}^{\prime}$ from $\mathcal{X}^{\prime}$ . Quantizer $g$ generates a sequence of tokens $\mathcal{T}$ from input $\mathcal{Z}$ and $\mathcal{T}^{\prime}$ from $\mathcal{Z}^{\prime}$ via cosine similarity-based comparison with codebook vectors $e\in E$ initialized via offline clustering over freshly initialized representation space $Z$ . Compressor $\mathcal{C}$ compresses sequence of tokens $\mathcal{T}$ to sequence $\tilde{\mathcal{T}}$ and $\mathcal{T}^{\prime}$ to $\tilde{\mathcal{T}}^{\prime}$ . + +We average all encoder-generated representations in pair $(\mathcal{Z},\mathcal{Z}^{\prime})$ which map to the same token, say $\tau$ , to generate a prototype for $\tau$ . We then perform a contrastive task where we compare the prototype with each of the $K$ discrete representations in codebook $E$ and increase its similarity with the discrete representation corresponding to $\tau$ . We also increase the likelihood that wav2tok maps pair $(\mathcal{X},\mathcal{X}^{\prime})$ to the same token sequence via CTC framework to minimize $ED(\tilde{\mathcal{T}},\tilde{\mathcal{T}}^{\prime})$ . + +Our loss function is defined as, + +$$ +\mathcal {L} = \mathcal {L} _ {m} \left(\mathcal {X}, \mathcal {X} ^ {\prime}\right) + \alpha \mathcal {L} _ {c t c} \left(\mathcal {X}, \tilde {\mathcal {T}} ^ {\prime}\right) + \beta \mathcal {L} _ {c t c} \left(\mathcal {X} ^ {\prime}, \tilde {\mathcal {T}}\right) \tag {1} +$$ + +where $\mathcal{L}_m$ is loss defined for contrastive task, $\mathcal{L}_{ctc}$ is the loss maximising aforementioned likelihood, and $\alpha, \beta$ are positive constants. We optimize this loss function in a manner similar to the Expectation Maximization algorithm. The clustering is used as the E-step to update the discrete representations in quantizer $g$ codebook, while gradient descent over $\mathcal{L}$ acts as the M-step. + +Contrastive Loss. Let the set of unique tokens occurring in pair $(\tilde{\mathcal{T}},\tilde{\mathcal{T}}^{\prime})$ be $\mathcal{U}\subset [K],|\mathcal{U}| = K^{\prime}\leq K$ . We generate a list of token prototypes $\mathcal{P} = \{\mathbf{p}_{\tau};\tau \in \mathcal{U}\}$ where $\mathbf{p}_{\tau}$ is L-2 normalised mean of representations in $\{\mathbf{z}\in \{\mathcal{Z};\mathcal{Z}^{\prime}\} :g(\mathbf{z}) = \tau \}$ . Figure 1b presents an illustration demonstrating how we generate list of token prototypes $\mathcal{P}$ . + +Given $\mathbf{p}_{\tau} \in \mathcal{P}$ , we perform a contrastive task to increase its similarity with discrete representation $\mathbf{e}_{\tau} \in E$ . To compare $\mathbf{p}_{\tau}$ with the codebook, metrics such as cosine similarity and Euclidean distance could be used. However, we find that using the following parameterized score for this purpose gives better performance, + +$$ +\mathbf {s} _ {\tau , k} = \sigma \left(W \cdot \left(\mathbf {p} _ {\tau} - s g \left(\mathbf {e} _ {k}\right)\right)\right) \in [ 0, 1 ] \tag {2} +$$ + +![](images/956cf898b477fc6de2c77a42c7b5364a007a69cb0518830ef4962dd076047183.jpg) +(a) Model Architecture + +![](images/613cca31105fe5eba5f895b8b1438042f49aee518aa385e1b23644e7a0f81c62.jpg) +(b) Generation of Prototype list $\mathcal{P}$ +Figure 1: $\mathcal{X}'$ is an augmented replica of $\mathcal{X}$ . 1a illustrates our model architecture. 1b demonstrates the generation of $\mathcal{P}$ required for calculation of $\mathcal{L}_m$ . 1c demonstrates our likelihood loss calculation. + +![](images/38c36d594d6a844f7b0098a07399ada42bc53ecd185fbe077c70093962fa57c4.jpg) +(c) Likelihood Loss Calculation + +where $sg(x) \equiv x, \frac{d}{dx} sg(x) \equiv 0$ is the stop-gradient operator, $\sigma(\cdot)$ is sigmoid function generating a score in the range [0, 1] and $W \in R^{1 \times d}$ is a parameter matrix. $\mathbf{s}_{\tau,k}$ acts as a parameterized similarity score between $\mathbf{p}_{\tau}$ and discrete representation $\mathbf{e}_k \in E$ . We define our contrastive loss $\mathcal{L}_m$ as, + +$$ +\mathcal {L} _ {m} \left(\mathcal {X}, \mathcal {X} ^ {\prime}\right) = - \sum_ {\tau \in \mathcal {U}} \log \frac {\exp \left(\mathbf {s} _ {\tau , \tau}\right)}{\sum_ {k = 1} ^ {K} \exp \left(\mathbf {s} _ {\tau , k}\right)} \tag {3} +$$ + +Likelihood Loss. We maximize the likelihood that sequence $\mathcal{X}$ maps to token sequence $\tilde{\mathcal{T}}'$ , which corresponds to $\mathcal{X}'$ , via the CTC framework (see Figure 1c). It puts a constraint to generate the same token sequence for $\mathcal{X}$ and $\mathcal{X}'$ . We calculate the probability of $\mathbf{x}_t$ mapping to token $\tau_t = k$ as $l_{t,k} = \frac{\exp(f(\mathbf{x}_t)\cdot sg(e_k))}{\sum_{i=1}^K \exp(f(\mathbf{x}_t)\cdot sg(e_i))}$ . The likelihood $P(\tilde{\mathcal{T}}'|\mathcal{X})$ is then calculated as a sum of probabilities of all $T$ -length paths $\pi$ over tokens $\tau \in \mathbb{A}$ such that $\mathcal{C}(\pi) = \tilde{\mathcal{T}}'$ . The loss is defined as, + +$$ +\mathcal {L} _ {c t c} (\mathcal {X}, \tilde {\mathcal {T}} ^ {\prime}) = - \log \sum_ {\pi \in C ^ {- 1} (\tilde {\mathcal {T}} ^ {\prime})} P (\pi | \mathcal {X}) \tag {4} +$$ + +where the path probabilities are calculated over token probability scores in sequence $\mathbf{l} = \{l_t \in R^K; t \in [T]\}$ via CTC forward-backward framework (Graves et al., 2006) without the use of blanks. We present the CTC forward and backward variables for our use case in Appendix B. + +Clustering. We perform offline $K$ -means clustering on a subset of encoder representations during initialization of our network and at regular intervals during training to set the discrete representations in codebook $E$ of network $g$ . Initializing the clusters in this way prevents wav2tok from converging to a local optimum during the matching task, as is the case we found with random initialization of centroids. The intermittent clustering during training iteratively refines the discrete representations and prevents codebook collapse. We use the sklearn library to perform $K$ -means clustering. + +We train wav2tok using the ADAM (Kingma & Ba, 2017) optimizer and a linear learning schedule with a learning rate of 0.001 and $8\%$ of the training steps as warm-up steps. + +# 6 EXPERIMENTS + +We test the performance of tokens and encoder-generated continuous representations of wav2tok in audio retrieval. We perform Query by Humming (QbH) and Spoken Term Detection experiments to evaluate the performance of wav2tok in comparison to the baselines. + +# 6.1 MUSIC MELODY SEARCH: QUERY BY HUMMING + +Task. Given a test query audio, we are to find the audio with the most similar melody in the search audio database. + +Experiment Details. We use the MIR-QbSH dataset which is composed of 4431 humming audio recordings of $30s$ duration corresponding to 48 songs. Each song is sung by several individuals. All individuals sing the same part of the song. The recordings have variations in the environments + +they were recorded in, tonal qualities, voices, pitch, and time stretch. We train our models on hums of 40 songs in MIR-QbSH dataset and evaluate search performance on hums of the remaining 8 songs. The training dataset has 1970 hums for training and 676 for validation. The test dataset has 225 hums as a search database and 659 query hums. We evaluate the performance of our models in identifying which song a given query corresponds to via comparison with all sequences in the search database. Each model converts all the audio in our test dataset to sequences of tokens or representations. Each query sequence is compared to all sequences in the search database via Edit Distance (ED) (if tokens) or DTW (if representations). The song id of the most similar sequence in the search database is then selected as query song id. We calculate Mean Reciprocal Ranking (MRR) score with ground-truth song id of the queries for evaluation. The Reciprocal Ranking (RR) score is given as $\frac{1}{r}$ if the $r^{th}$ most similar sequence in search database has same song id as query. + +All the audio recordings are converted to Short Term Fourier Transform (STFT) matrices before being passed as inputs to our models. The STFT matrices are computed with 513 frequency bins, a window length of 1024 samples (summarising 128 ms of audio), and hop length of 512 samples. + +# 6.2 SPOKEN TERM DETECTION + +Task. Given a test query audio, we are to detect its occurrence in a long utterance. + +Experiment Details. We use the TIMIT dataset which is composed of 6300 utterances of English speech with time-aligned word transcriptions. We choose 59 most-occurring words with more than 2 characters as keywords and all others as non-keywords. We use utterances of random sentences formed with 6 words sampled from a subset of 25 keywords for training and evaluation on STD experiments for the detection of the remaining 34 keywords. The test dataset is composed of 337 utterances corresponding to the 34 queries and 100 long utterances per query, with half containing a single occurrence of query amongst non-keywords and the other half containing only non-keywords. Given a query and a long utterance, we convert both to sequences of tokens using each audio tokenizer. We perform approximate string matching (Hall & Dowling, 1980) for detection of query in the utterance. The STFT matrix inputs to the models are computed with 185 frequency bins, a window length of 368 samples (summarising $23\mathrm{ms}$ of audio), and a hop length of 92 samples. + +# 6.3 BASELINES + +Triplet. We train encoder $f: \mathbb{X} \mapsto \mathbb{Z}$ to generate L-2 normalized continuous representations for retrieval. Encoder $f$ is trained via optimizing the triplet Loss (Schroff et al., 2015) as done in training an NNFP in Now Playing (Arcas et al., 2017). Given pair of similar sequences $(\mathcal{X}, \mathcal{X}')$ , encoder $f$ generates sequences $\mathcal{Z}$ and $\mathcal{Z}'$ . We form a mini-batch of size $N$ of triplets $\{\mathbf{z}, \mathbf{z}^+, \mathbf{z}^-\}$ where representation $\mathbf{z}$ is sampled from sequence $\mathcal{Z}$ , $\mathbf{z}^+$ and $\mathbf{z}^-$ are positive and negative samples respectively for $\mathbf{z}$ sampled from sequence $\mathcal{Z}'$ . The loss is defined as, $\mathcal{L}_{\text{Triplet}} = \sum_{i=1}^{N} \max \{||\mathbf{z}_i - \mathbf{z}_i^+|| - ||\mathbf{z}_i - \mathbf{z}_i^-|| + m, 0\}$ , where $m$ is a margin for similarity. + +MIPS. We train encoder $f: \mathbb{X} \mapsto \mathbb{Z}$ to generate L-2 normalized continuous representations for retrieval. Encoder $f$ is trained via simulation of MIPS (Mussmann & Ermon, 2016) on mini-batches of representations as proposed by (Chang et al., 2020). Given pair of similar sequences $(\mathcal{X}, \mathcal{X}')$ , encoder $f$ generates sequences $\mathcal{Z}$ and $\mathcal{Z}'$ . We form a mini-batch of size $N$ of pairs of $\{\mathbf{z}, \mathbf{z}^+\}$ where encoder generated representation $\mathbf{z}$ is sampled from sequence $\mathcal{Z}$ and $\mathbf{z}^+$ is a positive for $\mathbf{z}$ sampled from $\mathcal{Z}'$ . The loss is defined as, $\mathcal{L}_{\mathrm{MIPS}} = -\sum_{i=1}^{N} \log \frac{\exp(\mathbf{z}_i, \mathbf{z}_i^+)}{\sum_{j \neq i} (\exp(\mathbf{z}_i \cdot \mathbf{z}_j^+) + \exp(\mathbf{z}_i \cdot \mathbf{z}_j))}$ . + +wav2vec2. We train our audio tokenizer via wav2vec 2.0 (Baevski et al., 2020) learning framework. Quantizer $g$ in our audio tokenizer is chosen to be a Gumbel Softmax-based Vector Quantizer (See Appendix C for details) as used in (Baevski et al., 2020) but with a single codebook with $K$ members. Given sequence $\mathcal{X}$ , encoder $f$ outputs sequence of L-2 normalised representations $\mathcal{Z}$ of length $T$ . Quantizer $g$ outputs sequence of discrete representations $\mathcal{Q} = \{q_t = g(z_t \in \mathcal{Z}); t = 1, \dots, T\}$ . We mask spans of 10 time steps with random starting indices in sequence $\mathcal{Z}$ and then pass the new sequence to a transformer network $h: \mathbb{Z} \mapsto \mathbb{O}$ which generates a sequence of contextualized representations $\mathcal{O} = \{\mathbf{o}_t = h(z_t \in \mathcal{Z}); t = 1, \dots, T\}$ . For transformer output $\mathbf{o}_t$ over masked time step $t$ , we identify the true discrete representation $\mathbf{q}_t$ from a set $D_t$ composed of $\mathbf{q}_t$ and $D$ distractors which are discrete representations sampled from other time steps. The loss is defined as, + +$\mathcal{L}_w(\mathbf{o}_t,\mathcal{D}_t) = -\log \frac{\exp(sim(\mathbf{o}_t,\mathbf{q}_t))}{\sum_{\tilde{q}\in\mathcal{D}_t}\exp(sim(\mathbf{o}_t,\tilde{q}))} + \mathcal{L}_d$ where $sim(a,b) = \frac{a^Tb}{||a||||b||}$ is cosine similarity and $\mathcal{L}_d$ is codebook diversity loss. + +wav2vec2P. We train wav2vec2 audio tokenizer with our variation of wav2vec 2.0 (Baevski et al., 2020) learning framework which learns discrete representations from pairs of similar sequences. Given pair $(\mathcal{X},\mathcal{X}^{\prime})$ , encoder $f$ outputs sequences $\mathcal{Z}$ of length $T$ and $\mathcal{Z}'$ of length $T'$ respectively. Assuming $T\leq T'$ , we generate sequence $\mathcal{Z}^+$ of length $T$ whose $t$ time step element $\mathbf{z}_t^+$ is a positive for $\mathbf{z}_t\in \mathcal{Z}$ sampled from sequence $\mathcal{Z}'$ . Gumbel Softmax-based Vector Quantizer $g$ quantizes each representation in sequence $\mathcal{Z}^+$ to generate sequence $\mathcal{Q}^+$ . We mask sequence $\mathcal{Z}$ and $\mathcal{Z}^+$ at the same time steps. Transformer $h$ inputs masked sequences and generate sequences $\mathcal{O}$ and $\mathcal{O}^+$ . For masked time step $t$ , we use transformer output $\mathbf{o}_t$ to identify $\mathbf{q}_t^+\in \mathcal{Q}^+$ from set $\mathcal{D}_t^+$ with distractors sampled from sequence $\mathcal{Q}^+$ and transformer output $\mathbf{o}_t^+$ to identify $\mathbf{q}_t\in \mathcal{Q}$ from set $\mathcal{D}_t$ with distractors sampled from sequence $\mathcal{Q}$ . The loss is defined as, $\mathcal{L}_{wP} = \mathcal{L}_w(\mathbf{o}_t,\mathcal{D}_t^+) + \mathcal{L}_w(\mathbf{o}_t^+, \mathcal{D}_t)$ . + +wav2vec2-O. The original wav2vec 2.0 base model with 12 Transformer blocks and $95M$ parameters as proposed by (Baevski et al., 2020). It is pre-trained on 960 hours of LibriSpeech data and fine-tuned on TIMIT dataset. It uses $K = 32$ tokens for tokenization. + +wav2vec2-Multi. A wav2vec 2.0 large model with 24 Transformer blocks and $317M$ parameters pre-trained on 53 languages as proposed by (Conneau et al., 2020). It is fine-tuned on Common Voice to detect all possible phonemes in training languages with $K = 392$ tokens. + +Triplet and MIPS use a 2-layer BiLSTM as encoder with $3.6M$ parameters. We use the LAMB optimizer (You et al., 2020) and a Cosine Annealing Learning Schedule (Loshchilov & Hutter, 2017) with a learning rate restart of 0.0001 to train them. wav2vec2 and wav2vec2P use a 2-layer BiLSTM encoder with $3.6M$ parameters to generate latent representations and 3 Transformer blocks with $8.5M$ parameters. Both are trained using the ADAM (Kingma & Ba, 2017) optimizer and a linear learning schedule with a learning rate of 0.001 and $8\%$ of the training steps as warm-up steps. Proposed wav2tok uses only a 2-layer BiLSTM as encoder with $3.6M$ parameters. + +# 7 RESULTS + +# 7.1 MUSIC MELODY SEARCH: QUERY BY HUMMING + +We present search performances for 3 settings of query namely- Query with no augmentation or Vanilla Query (V), Time Stretched Query (TS), and Pitch Shifted Query (PS). Time stretch and pitch shift are the most common augmentations that may be faced in queries by humming data. No augmentations were applied to audio in search database. Evaluations are performed on sequences corresponding to songs not seen during training. The results present the generalizability of the tokens or representations generated by the models. We set the number of tokens as $K = 25$ for wav2tok, wav2vec2, and wav2vec2P (See Appendix A.2 for experiments to support our choices). + +Quality of Tokenization. Table 1 presents the performance of the sequence of tokens $\tilde{\mathcal{T}}$ generated by the audio tokenizers on ED-based similarity search. Tokens generated by wav2tok present good generalization capabilities in terms of MRR and outperform all the baselines. It generates time and pitch invariant tokens as we see no drop in performance when either augmentation is applied to query. wav2vec2-O is trained on English speech only. The tokens generated by it do not contain much melodic information but are robust to augmentations. The multilingual training of wav2vec2-Multi infuses both melodic and phonetic information to its 392 tokens, thereby giving good performance. wav2tok outperforms both wav2vec2-O and wav2vec2-Multi given its pairwise training which allows it to infuse more melodic information to the tokens while also being trained on a small amount of unlabelled data. The Gumbel Softmax-based quantizer in wav2vec2 and wav2vec2P isn't ideal for infusing melodic information to tokens but it does infuse phonetic information as will be seen in Section 7.2. We compare the tokens with representations learned by MIPS and Triplet evaluated on DTW-based similarity search. The continuous representations present sub-par generalizations to unseen songs. We compare wav2tok with SOTA melody extraction algorithm proposed in (Salamon & Gomez, 2012) which converts hums to MIDI sequences. wav2tok generates token sequences much smaller than the respective MIDI sequences and outperforms the MIDI tokens in search performance, search time, and robustness. In addition, wav2tok outperforms the algorithm in inference time. We further compare wav2tok with SOTA QbH system proposed in (Mostafa & + +Fung, 2017). In our implementation, we map audio to MIDI sequences using the aforementioned SOTA melody extraction algorithm instead of a CNN. Given MIDI sequence 53, 53, 58, 50 with durations 0s, 0.5s, 1s, 2s, a Relative Note sequence is generated as $(0,0)$ , $(0,0.5)$ , $(5,1)$ , $(-8,2)$ over which DTW is performed for retrieval. wav2tok tokens outperform the SOTA QbH system in both performance and robustness; the performance of the latter drops drastically with time stretch. + +We present the performances of the uncompressed sequences $\mathcal{T}$ and $\mathcal{Z}$ and compressed sequence $\tilde{\mathcal{Z}}$ generated by the audio tokenizers in Appendix A.1. We observe a drop in performance for all audio tokenizers when we apply sequence compression to sequences $\mathcal{T}$ and $\mathcal{Z}$ . wav2tok outperforms all the baselines and generates superior-quality of continuous representations and discrete tokens. + +Search Time. Table 1 presents the search time taken for similarity search over the tokens or representations generated by the models. The search time taken per query is 2 order of magnitude lesser for ED-based Search over compressed sequence of tokens $\tilde{T}$ than standard DTW-based Search over continuous representations $\mathcal{Z}$ . The pre-trained models being fine-tuned on transcribed audio give the best tokens in terms of compression and search time. wav2tok gives comparable tokens but outperforms the pre-trained models in inference time. + +Table 1: Quality of Tokenization + +
ModelV (MRR)TS (MRR)PS (MRR)Search Time (s)Infer (s)
MIDI ED0.750.640.723.840.62
Relative Note DTW0.840.740.80.020.62
Triplet DTW0.50.480.53.50.1
MIPS DTW0.60.550.58
wav2vec2 ED0.660.630.640.060.17
wav2vec2P ED0.690.650.67
wav2vec2-O ED0.720.720.710.010.43
wav2vec2-Multi ED0.820.820.821.2
wav2tok ED0.840.840.840.040.14
+ +Table 2: Ablation Studies and Some Variations + +
ModelV (MRR)TS (MRR)PS (MRR)
log-mel DTW0.720.70.67
vq-log-mel ED0.710.60.62
wav2tok+NoSim ED0.730.730.72
wav2tok+Cos ED0.790.760.77
wav2tok+CTC ED0.640.620.63
wav2tok+NewInit ED0.770.760.78
wav2tok+MIR1K ED0.720.640.67
wav2tok ED0.840.840.84
+ +Ablation Studies. Query by humming involves similarity based on melody information, which is carried by the semantic pairing of the audio in training data. We constrain this pairing to include sequences not semantically similar and call this model wav2tok+NoSim. We optimize the contrastive loss $\mathcal{L}_m$ to train the model. The results are shown in Table 2 (full table in Appendix A.3). There is a significant drop in token robustness and performance but the representations suffer a small drop (see Appendix A.3). Hence, although the representation space may be well clustered, wav2tok is able to add more semantics to the tokens as it is being trained with pairs of similar sequences in comparison to wav2tok+NoSim. We train wav2tok with cosine similarity scores instead of a parameterized score (wav2tok+Cos). The drop in performance validates the enhancement brought about by using a parameterized score. We also train wav2tok with $\mathcal{L}_{ctc}$ only (wav2tok+CTC). The CTC loss considers all possible paths which compress to the target label sequence. As a result, the learnt tokens aren't much semantic. The use of both losses gives the best tokens. + +Some Variations. In wav2tok+NewInit, we associate the discrete representations with $K$ centroids in the input space $\mathbb{X}$ . Such association does not initialize our tokenizer with optimal centroids which cluster the space $\mathbb{Z}$ perfectly. This results in a significant drop in performance and robustness as shown in Table 2. We train wav2tok on MIR-1K dataset (wav2tok+MIR1K) which is composed of polyphonic music recordings of 1000 distinct songs. The tokens generalize well to monophonic hums in MIR-QbSH dataset giving a comparable performance to MIDI tokens. This validates that wav2tok tokens do learn melodic information and are robust to variations incurred in hums. We further compare wav2tok with log-mel features and token sequences (with no compression) obtained via quantization of log-mel features. wav2tok tokens outperform both. + +# 7.2 SPOKEN TERM DETECTION + +Quality of Tokenization. Table 3 presents the quality of tokenization of the query keywords by the models evaluated in the Spoken Term Detection experiments. We present the performances of wav2vec2, wav2vec2P, wav2vec2-O, wav2vec2-Multi and proposed wav2tok. We conduct search experiments on a test dataset composed of a search database of 337 utterances of the 34 keywords used as queries in the STD experiments and 1289 query utterances. We identify the keyword to + +which each query corresponds to via comparison to all the 337 utterances in the search database via ED-based similarity score. The word id of the most similar utterance is selected as the word to which the query corresponds to. We set $K = 40$ equivalent to the number of phonemes in English. + +wav2tok gives the best performance in terms of MRR scores. It outperforms huge models like wav2vec2-O and wav2vec2-Multi which are fine-tuned for the task of phonetic tokenization of speech audio while using a small number of parameters. wav2vec2 and wav2vec2P also outperform wav2vec2-Multi and wav2vec2-O while using smaller number of parameters. wav2vec2-O and wav2vec2-Multi use a blank token to handle consecutive occurrences of the same tokens and to label background noise. The utterances of each keyword in the test dataset are very small in time duration. This causes wav2vec2-O to confuse word utterances as background noise. It generates a sequence of blank tokens and performs poorly in search. wav2vec2-Multi using a larger number of phonetic tokens does not suffer this issue. wav2tok, wav2vec2, and wav2vec2P have no such blank token. This brings a drop in search performance with sequence compression. We further present the performance of wav2tok trained on a much larger LibriSpeech 100 hours dataset (wav2tok+Libri). It is able to outperform wav2vec2-O and give comparable performance to wav2vec2-Multi. + +Table 3: Quality of Tokenization for speech + +
ModelNormal (MRR)Compressed (MRR)
log-mel DTW0.7-
wav2vec2 ED0.680.63
wav2vec2P ED0.70.65
wav2vec2-O ED0.40.4
wav2vec2-Multi ED0.670.67
wav2tok ED0.740.66
wav2tok+Libri ED0.640.6
+ +Table 4: Spoken Term Detection + +
ModelED(F1)Search Time(s)DTW(F1)Search Time(s)
log-mel DTW--0.410.003
wav2vec20.640.0660.460.1
wav2vec2P0.640.47
wav2vec2-O0.610.290.430.23
wav2vec2-Multi0.630.720.480.66
wav2tok0.650.0640.520.09
wav2tok+Libri0.630.440.1
+ +Spoken Term Detection. We convert the query word utterance and the long utterance in to sequences of tokens by all our models and detect the occurrence of the query via approximate string matching. We use fuzzysearch library to perform approximate string matching. It automatically chooses the fastest algorithm for matching. Table 4 presents the performance of wav2vec2, wav2vec2P, wav2vec2-O, wav2vec2-Multi, and proposed wav2tok in STD. All the models give a comparable performance in terms of F1- score with wav2tok performing slightly better. We also implement the STD system proposed in (Anguera & Ferrarons, 2013) which performs highly competitive STD via subsequence DTW (S-DTW) over gaussian posterior features. In our implementation, we extract the posterior features with SOTA ASR models like wav2vec2-O and wav2vec2-Multi. The results are presented in the DTW column in Table 4. Note, the results for other models in same column are for STD via S-DTW over representations. We observe STD over tokens to give better F1-score. + +# 8 CONCLUSION AND FUTURE WORK + +In this paper, we present an audio sequence tokenizer wav2tok that generates semantically meaningful ordered representations (or tokens) that can be used for efficient retrieval by query sequences. The model learns only from pairs of semantically similar sequences and outperforms state-of-the-art approaches for spoken term detection and query by humming. One may apply more efficient search algorithms such as locality-sensitive hashing and longest common subsequence search on the generated tokens to further speed up the search. The proposed framework can also be extended to image and video retrieval, as they also have spatial ordering. We would like to investigate the domain-specific, i.e., linguistic or musicological, aspects of the extracted tokens. For instance, during retrieval, the matching algorithm assumes all the tokens to be equidistant from each other. One may study or use the metric space of these tokens. + +# 9 REPRODUCIBILITY + +The codes are available in https://github.com/madhavlab/wav2tok. The experiments are performed using standard datasets. + +# REFERENCES + +Xavier Anguera and Miquel Ferrarons. Memory efficient subsequence dtw for query-by-example spoken term detection. In 2013 IEEE International Conference on Multimedia and Expo (ICME), pp. 1-6, 2013. doi: 10.1109/ICME.2013.6607546. +Blaise Agüera Arcas, Beat Gfeller, Ruiqi Guo, Kevin Kilgour, Sanjiv Kumar, James Lyon, Julian Odell, Marvin Ritter, Dominik Roblek, Matthew Sharifi, and Mihajlo Velimirovic. Now playing: Continuous low-power music recognition. CoRR, abs/1711.10958, 2017. URL http://arxiv.org/abs/1711.10958. +Alexei Baevski, Steffen Schneider, and Michael Auli. vq-wav2vec: Self-supervised learning of discrete speech representations. CoRR, abs/1910.05453, 2019. URL http://arxiv.org/abs/1910.05453. +Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. CoRR, abs/2006.11477, 2020. URL https://arxiv.org/abs/2006.11477. +Alexei Baevski, Wei-Ning Hsu, Alexis Conneau, and Michael Auli. Unsupervised speech recognition. CoRR, abs/2105.11084, 2021. URL https://arxiv.org/abs/2105.11084. +Sungkyun Chang, Donmoon Lee, Jeongsoo Park, Hyungui Lim, Kyogu Lee, Karam Ko, and Yoon-chang Han. Neural audio fingerprint for high-specific audio retrieval based on contrastive learning. CoRR, abs/2010.11910, 2020. URL https://arxiv.org/abs/2010.11910. +Liqun Chen, Zhe Gan, Yu Cheng, Linjie Li, Lawrence Carin, and Jingjing Liu. Graph optimal transport for cross-domain alignment. 37th International Conference on Machine Learning, ICML 2020, PartF16814:1520-1531, 2020. +Yu-An Chung and James Glass. Generative pre-training for speech with autoregressive predictive coding. In ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3497-3501, 2020. doi: 10.1109/ICASSP40776.2020.9054438. +Yu-An Chung, Hao Tang, and James Glass. Vector-quantized autoregressive predictive coding. arXiv preprint arXiv:2005.08392, 2020. +Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, and Michael Auli. Unsupervised cross-lingual representation learning for speech recognition. CoRR, abs/2006.13979, 2020. URL https://arxiv.org/abs/2006.13979. +Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805, 2018. URL http://arxiv.org/abs/1810.04805. +John S Garofolo, Lori F Lamel, William M Fisher, Jonathan G Fiscus, and David S Pallett. Darpa timit acoustic-phonetic continuous speech corpus cd-rom. nist speech disc 1-1.1. NASA STI/Recon technical report n, 93:27403, 1993. +Marco Gori, Gabriele Monfardini, and Franco Scarselli. A new model for learning in graph domains. In Proceedings. 2005 IEEE International Joint Conference on Neural Networks, 2005., volume 2, pp. 729-734. IEEE, 2005. +Alex Graves, Santiago Fernández, Faustino Gomez, and Jürgen Schmidhuber. Connectionist temporal classification: Labelling unsegmented sequence data with recurrent neural networks. In Proceedings of the 23rd International Conference on Machine Learning, ICML '06, pp. 369-376, New York, NY, USA, 2006. Association for Computing Machinery. ISBN 1595933832. doi: 10.1145/1143844.1143891. URL https://doi.org/10.1145/1143844.1143891. + +Patrick AV Hall and Geoff R Dowling. Approximate string matching. ACM computing surveys (CSUR), 12(4):381-402, 1980. +Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. CoRR, abs/2106.07447, 2021. URL https://arxiv.org/abs/2106.07447. +Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization, 2017. +Naoko Kosugi, Yuichi Nishihara, Tetsuo Sakata, Masashi Yamamuro, and Kazuhiko Kushima. A practical query-by-humming system for a large music database. In Proceedings of the Eighth ACM International Conference on Multimedia, MULTIMEDIA '00, pp. 333-342, New York, NY, USA, 2000. Association for Computing Machinery. ISBN 1581131984. doi: 10.1145/354384.354520. URL https://doi.org/10.1145/354384.354520. +John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01, pp. 282-289, San Francisco, CA, USA, 2001. Morgan Kaufmann Publishers Inc. ISBN 1558607781. +Lin-shan Lee, James Glass, Hung-yi Lee, and Chun-an Chan. Spoken content retrieval—beyond cascading speech recognition with text retrieval. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 23(9):1389-1420, 2015. doi: 10.1109/TASLP.2015.2438543. +Yujia Li, Chenjie Gu, Thomas Dullien, Oriol Vinyals, and Pushmeet Kohli. Graph matching networks for learning the similarity of graph structured objects. In International conference on machine learning, pp. 3835-3845. PMLR, 2019. +Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts, 2017. +Jonathan Mamou, Jia Cui, Xiaodong Cui, Mark J. F. Gales, Brian Kingsbury, Kate Knill, Lidia Mangu, David Nolden, Michael Picheny, Bhavana Ramabhadran, Ralf Schlüter, Abhinav Sethy, and Philip C. Woodland. System combination and score normalization for spoken term detection. In 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 8272-8276, 2013. doi: 10.1109/ICASSP.2013.6639278. +Annamaria Mesaros and Tuomas Virtanen. Recognition of phonemes and words in singing. In 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2146-2149, 2010. doi: 10.1109/ICASSP.2010.5495585. +Naziba Mostafa and Pascale Fung. A note based query by humming system using convolutional neural network. In INTERSPEECH, pp. 3102-3106, 2017. +Naziba Mostafa, Yan Wan, Unnayan Amitabh, and Pascale Fung. A machine learning based music retrieval and recommendation system. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pp. 1970-1977, Porto Roz, Slovenia, May 2016. European Language Resources Association (ELRA). URL https://aclanthology.org/L16-1312. +Meinard Müller. Dynamic time warping. Information retrieval for music and motion, pp. 69-84, 2007. +Stephen Mussmann and Stefano Ermon. Learning and inference via maximum inner product search. In Maria Florina Balcan and Kilian Q. Weinberger (eds.), Proceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pp. 2587-2596, New York, New York, USA, 20-22 Jun 2016. PMLR. URL https://proceedings.mlr.press/v48/mussmann16.html. +Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5206-5210. IEEE, 2015. + +L. Rabiner and B. Juang. An introduction to hidden markov models. IEEE ASSP Magazine, 3(1): 4-16, 1986. doi: 10.1109/MASSP.1986.1165342. +Dhananjay Ram, Afsaneh Asaei, and Hervé Bourlard. Sparse subspace modeling for query by example spoken term detection. IEEE ACM Trans. Audio Speech Lang. Process., 26(6):1126-1139, 2018. URL https://doi.org/10.1109/TASLP.2018.2815780. +Shivangi Ranjan and Vipul Arora. A bioinformatic method of semi-global alignment for query-by-humming. In 2020 IEEE 4th Conference on Information Communication Technology (CICT), pp. 1-5, 2020. doi: 10.1109/CICT51604.2020.9312085. +Luis J. Rodriguez-Fuentes, Amparo Varona, Mikel Penagarikano, Germán Bordel, and Mireia Diez. High-performance query-by-example spoken term detection on the sws 2013 evaluation. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7819-7823, 2014. doi: 10.1109/ICASSP.2014.6855122. +Justin Salamon and Emilia Gómez. Melody extraction from polyphonic music signals using pitch contour characteristics. IEEE transactions on audio, speech, and language processing, 20(6): 1759-1770, 2012. +Steffen Schneider, Alexei Baevski, Ronan Collobert, and Michael Auli. wav2vec: Unsupervised pre-training for speech recognition. CoRR, abs/1904.05862, 2019. URL http://arxiv.org/abs/1904.05862. +Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. CoRR, abs/1503.03832, 2015. URL http://arxiv.org/abs/1503.03832. +M. Schuster and K.K. Paliwal. Bidirectional recurrent neural networks. IEEE Transactions on Signal Processing, 45(11):2673-2681, 1997. doi: 10.1109/78.650093. +Erdem Unal, Elaine Chew, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. Challenging uncertainty in query by humming systems: A fingerprinting approach. IEEE Transactions on Audio, Speech, and Language Processing, 16(2):359-371, 2008. doi: 10.1109/TASL.2007.912373. +Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. CoRR, abs/1711.00937, 2017. URL http://arxiv.org/abs/1711.00937. +Aäron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. CoRR, abs/1807.03748, 2018. URL http://arxiv.org/abs/1807.03748. +Ting Yao, Yingwei Pan, Yehao Li, and Tao Mei. Exploring visual relationship for image captioning. In Proceedings of the European conference on computer vision (ECCV), pp. 684-699, 2018. +Penghang Yin, Jiancheng Lyu, Shuai Zhang, Stanley J. Osher, Yingyong Qi, and Jack Xin. Understanding straight-through estimator in training activation quantized neural nets. CoRR, abs/1903.05662, 2019. URL http://arxiv.org/abs/1903.05662. +Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes, 2020. +Yaodong Zhang and James R. Glass. Unsupervised spoken keyword spotting via segmental dtw on gaussian posteriograms. In 2009 IEEE Workshop on Automatic Speech Recognition Understanding, pp. 398-403, 2009. doi: 10.1109/ASRU.2009.5372931. + +# A FURTHER STUDIES + +# A.1 SEQUENCE COMPRESSION + +We present the quality of sequence of tokens $\mathcal{T}$ and sequence of representations $\mathcal{Z}$ and their corresponding compressed versions sequences $\widetilde{\mathcal{T}}$ and $\widetilde{\mathcal{Z}}$ generated by the audio tokenizers in Table 5. + +wav2tok outperformed the baselines and generated the best quality of sequences $\mathcal{T}$ , $\mathcal{Z}$ , $\widetilde{\mathcal{T}}$ and $\widetilde{\mathcal{Z}}$ . Sequence compression brings an order of magnitude drop in search time for all the audio tokenizers with a trade-off in search performance. Compression from $\mathcal{T}$ to $\widetilde{\mathcal{T}}$ increases the robustness of the token sequences generated by wav2tok to various augmentations. wav2vec2P learnt better tokens and representations than wav2vec2 because of its pairwise training on similar audio. + +Table 5: Compression of Sequences: MRR scores for query by humming, $\mathrm{K} = {25}$ + +
ModelVTSPSSearch Time
Without Compression
wav2vec2 DTW0.850.840.84
wav2vec2P DTW0.870.850.873.5s
wav2tok DTW0.920.890.93
wav2vec2 ED0.720.690.730.68s
wav2vec2P ED0.750.710.73
wav2tok ED0.90.840.90.32s
With Compression
wav2vec2 DTW0.760.720.740.8s
wav2vec2P DTW0.810.770.79
wav2tok DTW0.880.880.870.6s
wav2vec2 ED0.660.630.640.06s
wav2vec2P ED0.690.650.67
wav2tok ED0.840.840.840.04s
+ +# A.2 VARIATION IN NUMBER OFTOKENS $K$ + +The effect of varying the size of alphabet $\mathbb{A}$ is shown in Table 6. We train wav2vec2, wav2vec2P, and proposed wav2tok with alphabets of size $K\in \{15,25,40\}$ . Out of the three settings for $K$ , $K = 25$ gives the best performance for all models. wav2tok gives best performance for all settings of $K$ . + +# A.3 ABLATION STUDIES AND SOME VARIATIONS + +We present the full version of Table 2 in table 7. Note wav2tok+NoSim representations are well clustered. wav2tok+Trans representations are also comparable with wav2tok but the tokens are of lesser quality. This is due to model overfitting. + +# A.4 QUALITY OF REPRESENTATIONS + +We present the performance of the continuous representations generated by wav2tok and the baselines in Table 8. wav2tok generates the best representations for music outperforming representations generated by the large wav2vec 2.0 models. wav2tok trained on MIR1K generates representations outperforming domain-specific QbH baselines. Note, wav2vec2-O outperforms wav2vec2-Multi as the hums in the dataset were all in english. wav2vec2-O is pre-trained and fine-tuned on English speech only while wav2vec2-Multi is pre-trained multilingually. Hence, wav2vec2-O gave better results. + +Table 6: Effect of varying $K$ : MRR scores for query by humming + +
ModelsWithout CompressionWith Compression
VTSPSVTSPS
K=15
wav2vec2 DTW0.850.830.840.70.660.67
wav2vec2P DTW0.870.850.850.820.770.8
wav2tok DTW0.880.870.880.840.80.83
wav2vec2 ED0.790.770.780.580.560.57
wav2vec2P ED0.80.770.790.710.680.7
wav2tok ED0.820.80.810.770.750.76
K=25
wav2vec2 DTW0.850.840.840.760.720.74
wav2vec2P DTW0.870.850.870.810.770.79
wav2tok DTW0.920.890.930.880.880.87
wav2vec2 ED0.720.690.730.660.630.64
wav2vec2P ED0.750.710.730.690.650.67
wav2tok ED0.90.840.90.840.840.84
K=40
wav2vec2 DTW0.840.820.830.720.680.7
wav2vec2P DTW0.860.850.850.810.770.79
wav2tok DTW0.90.880.890.860.830.83
wav2vec2 ED0.710.660.690.60.580.58
wav2vec2P ED0.730.70.730.680.650.67
wav2tok ED0.830.80.820.770.750.76
+ +Table 7: Ablation Studies and Some Variations: MRR scores for query by humming + +
ModelsWithout CompressionWith Compression
VTSPSVTSPS
log-mel DTW0.720.690.670.540.470.43
wav2tok+NoSim DTW0.880.870.870.80.840.83
wav2tok+Cos DTW0.880.870.870.830.810.81
wav2tok+NewInit DTW0.90.840.910.840.850.83
wav2tok+Trans DTW0.840.770.850.80.770.76
wav2tok+MIR1K DTW0.880.840.850.820.740.78
wav2tok DTW0.920.890.930.880.880.87
vq-log-mel ED0.710.60.620.520.480.47
wav2tok+NoSim ED0.850.740.840.730.730.72
wav2tok+Cos ED0.860.840.850.790.760.77
wav2tok+NewInit ED0.830.720.850.770.760.78
wav2tok+Trans ED0.840.770.850.70.660.67
wav2tok+MIR1K ED0.760.660.710.720.640.67
wav2tok ED0.90.840.90.840.840.84
+ +# A.5 TRAINING ON LARGER SPEECH DATASET + +We train wav2tok on 100-hours subset of LibriSpeech (Panayotov et al., 2015) dataset. We evaluate the quality of tokenization of word utterances done by wav2tok on TIMIT (Garofolo et al., 1993) dataset. We use a 2-layer BiLSTM network with 3.6 million parameters as encoder network which takes MFCC feature sequences as input. We perform tokenization with $K = 40$ tokens. + +wav2tok outperforms wav2vec2-O by a large margin and gives comparable performance to wav2vec2-Multi in terms of MRR score. wav2tok uses a minute number of parameters in comparison to 95 million parameters in wav2vec2-O and 317 million parameters in wav2vec2-Multi. Note, wav2vec2-O and wav2vec2-Multi were pre-trained on large amount of unlabelled speech data and + +Table 8: Quality of Representations: MRR scores for query by humming + +
ModelVTSPS
(Salamon & Gómez, 2012) MIDI ED0.750.640.72
(Mostafa & Fung, 2017) Note DTW0.840.740.8
Triplet DTW0.50.480.5
MIPS DTW0.60.550.58
wav2vec2-O DTW0.910.830.86
wav2vec2-Multi DTW0.880.830.85
wav2tok DTW0.920.90.93
wav2tok+MIR1K DTW0.880.840.85
+ +fine-tuned with transcription to perform tokenization of audio. Moreover, wav2vec2-O was fine-tuned to perform tokenization on TIMIT (Garofolo et al., 1993) dataset. Proposed wav2tok was trained on 100 hours of LibriSpeech dataset only. The tokens learnt by wav2tok on LibriSpeech (Panayotov et al., 2015) dataset generalised well to TIMIT (Garofolo et al., 1993). + +Table 9: Quality of Tokenization for speech (MRR Scores) + +
ModelNormal (T)Compressed (T)
wav2vec2-O ED0.40.4
wav2vec2-Multi ED0.670.67
wav2tok+Libri ED0.640.6
+ +# B CTC WITHOUT BLANKS + +We present the forward and backward variables used in calculating the gradients of the CTC loss $\mathcal{L}_{ctc}(\mathcal{X},\tilde{\mathcal{T}}^{\prime})$ with no blank tokens. + +The forward variable is defined as, + +$$ +\alpha_ {t} (s) = \sum_ {\pi ; \mathcal {C} (\pi_ {1: t}) = \tilde {\mathcal {T}} _ {1: s} ^ {\prime}} \prod_ {t ^ {\prime} = 1} ^ {t} l _ {t ^ {\prime}, \pi_ {t ^ {\prime}}} \tag {5} +$$ + +where $\pi$ corresponds to all $T$ -length paths over tokens such that $\mathcal{C}(\pi) = \tilde{T}'$ . Here, $\mathcal{C}$ is a compressor which compresses $\pi$ a $T$ -length sequence of tokens via de-duplication. + +We initialise as follows, + +$$ +\alpha_ {1} (1) = l _ {1, \tilde {\tau} _ {1} ^ {\prime}} \tag {6} +$$ + +$$ +\alpha_ {1} (s) = 0, \forall s > 1 +$$ + +and recursively calculate $\alpha_{t}(s)$ as, + +$$ +\alpha_ {t} (s) = \left(\alpha_ {t - 1} (s) + \alpha_ {t - 1} (s - 1)\right) l _ {t, \tilde {\mathcal {T}} _ {s} ^ {\prime}} \tag {7} +$$ + +We set $\alpha_{t}(s) = 0, \forall s < 1$ . + +The backward variable is defined as, + +$$ +\beta_ {t} (s) = \sum_ {\pi ; \mathcal {C} (\pi_ {t: T}) = \tilde {\mathcal {T}} _ {s: | \bar {\mathcal {T}} ^ {\prime} |} ^ {\prime}} \prod_ {t ^ {\prime} = t} ^ {T} l _ {t ^ {\prime}, \pi_ {t ^ {\prime}}} \tag {8} +$$ + +We initialise as follows, + +$$ +\beta_ {T} \left(\left| \tilde {\mathcal {T}} ^ {\prime} \right|\right) = l _ {T, \tilde {\mathcal {T}} _ {| \tilde {\mathcal {T}} ^ {\prime} |} ^ {\prime}} \tag {9} +$$ + +$$ +\beta_ {T} (s) = 0, \forall s < | \tilde {T} ^ {\prime} | +$$ + +and recursively calculate $\beta_t(s)$ as, + +$$ +\beta_ {t} (s) = \left(\beta_ {t + 1} (s) + \beta_ {t + 1} (s + 1)\right) l _ {t, \tilde {\mathcal {T}} _ {s} ^ {\prime}} \tag {10} +$$ + +We set $\beta_{t}(s) = 0, \forall s > |\tilde{T}^{\prime}|$ . + +# C GUMBEL SOFTMAX BASED VECTOR QUANTIZER + +The Gumbel Softmax based Vector Quantizer (Baevski et al., 2019) quantizes input latent representation $z_{t} \in R^{m}$ with $C$ codebooks containing $K$ quantizers $e \in R^{K \times \frac{m}{C}}$ each. For our experiments, we set $C = 1$ and $K \in \{15, 25, 40\}$ . Given $\mathbf{z}_{t}$ , one of the $K$ quantizers from each of the $C$ codebooks are chosen resulting in vectors $e_{1}, \dots, e_{C}$ . The codebook vectors are then concatenated and linearly transformed from $R^{m}$ to $R^{d}$ to output a discrete representation $q_{t} \in R^{d}$ . + +$\mathbf{z}_t$ is mapped to $\mathbf{l} \in R^{C \times K}$ logits to give probability scores for the choice of codeword. The probability $p_{c,k}$ of choosing $k^{th}$ quantizer in $c^{th}$ codebook is given as, + +$$ +p _ {c, k} = \frac {\exp \left(l _ {c , k} + n _ {k}\right) / \tau}{\sum_ {i = 1} ^ {K} \exp \left(l _ {c , i} + n _ {i}\right) / \tau} \tag {11} +$$ + +where $\tau$ is a non-negative temperature, $n = -\log (-\log (u))$ and $u$ are samples from the uniform distribution $\mathbf{Unif}(0,1)$ . + +During forward pass, the codeword is chosen as $\kappa = \arg \max_{j} p_{c,j}$ . During backward pass, the loss is calculated over the gumble softmax distribution $p$ . We use the straight-through gradient estimator (Yin et al., 2019) to estimate the gradient. + +Codebook Diversity Loss $\mathcal{L}_d$ . This loss promotes equal use of all the entries in each of the $C$ codebooks. Minimization of this loss maximizes the entropy of the averaged softmax distribution $\tilde{p}$ over the $K$ entries for each codebook $\tilde{p}_c$ across a batch of utterances. + +$$ +\mathcal {L} _ {d} = \frac {1}{C K} \sum_ {c = 1} ^ {C} \sum_ {k = 1} ^ {K} \tilde {p} _ {c, k} \log \tilde {p} _ {c, k} \tag {12} +$$ \ No newline at end of file diff --git a/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/images.zip b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..75e9ba893c0478b3cce35f7a668e313b094b8452 --- /dev/null +++ b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea5e408d947703377e04e52f317d7b3fb7b87b2769e3f02f9fb45341fc51491f +size 535636 diff --git a/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/layout.json b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..834922b544afe67e467d9c68a93f2def31baa93f --- /dev/null +++ b/2023/wav2tok_ Deep Sequence Tokenizer for Audio Retrieval/layout.json @@ -0,0 +1,13971 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 117 + ], + "type": "text", + "content": "WAV2TOK: DEEP SEQUENCETOKENIZER FOR AUDIO RETRIEVAL" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 135, + 246, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 135, + 246, + 147 + ], + "spans": [ + { + "bbox": [ + 111, + 135, + 246, + 147 + ], + "type": "text", + "content": "Adhiraj Banerjee, Vipul Arora" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 147, + 264, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 147, + 264, + 158 + ], + "spans": [ + { + "bbox": [ + 111, + 147, + 264, + 158 + ], + "type": "text", + "content": "Department of Electrical Engineering" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 158, + 292, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 158, + 292, + 169 + ], + "spans": [ + { + "bbox": [ + 111, + 158, + 292, + 169 + ], + "type": "text", + "content": "Indian Institute of Technology Kanpur, India" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 113, + 169, + 279, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 169, + 279, + 180 + ], + "spans": [ + { + "bbox": [ + 113, + 169, + 279, + 180 + ], + "type": "text", + "content": "{adhiraj,vipular}@iitk.ac.in" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "spans": [ + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 232, + 470, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 232, + 470, + 365 + ], + "spans": [ + { + "bbox": [ + 140, + 232, + 470, + 365 + ], + "type": "text", + "content": "Search over audio sequences is a fundamental problem. In this paper, we propose a method to extract concise discrete representations for audio that can be used for efficient retrieval. Our motivation comes from orthography which represents speech of a given language in a concise and distinct discrete form. The proposed method, wav2tok, learns such representations for any kind of audio, speech or non-speech, from pairs of similar audio. wav2tok compresses the query and target sequences into shorter sequences of tokens that are faster to match. The learning method makes use of CTC loss and expectation-maximization algorithm, which are generally used for supervised automatic speech recognition and for learning discrete latent variables, respectively. Experiments show the consistent performance of wav2tok across two audio retrieval tasks: music search (query by humming) and speech search via audio query, outperforming state-of-the-art baselines." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 383, + 206, + 394 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 383, + 206, + 394 + ], + "spans": [ + { + "bbox": [ + 105, + 383, + 206, + 394 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 407, + 506, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 506, + 506 + ], + "type": "text", + "content": "Sequence Retrieval aims at retrieving sequences similar to a query sequence, with the constraint that an ordered alignment exists between the query and the target sequence. In this paper, we address the following problem: Can we extract discrete tokens from any continuous signal for the purpose of retrieval of similar signals? This problem has deep connections with tasks such as child language acquisition, music cognition and learning languages without written forms. Some direct applications of the proposed task include speech search, where the order of constituent units, such as phonemes, syllables or words, remains same; and music search – query by humming or query by example – where the order of constituent units, such as relative notes or phrases, remains same. Apart from audio, the problem extends to tasks such as handwritten word search and gesture search." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 511, + 504, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 504, + 556 + ], + "type": "text", + "content": "One can define similarity metrics over sequences using methods based on Dynamic Time Warping (DTW) (Müller, 2007). These methods are inefficient if the sequences are continuous valued and have high sampling rates. Moreover, they depend on matching hand-made features, which are ineffective in the face of high variability of query sequences." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 561, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 504, + 651 + ], + "type": "text", + "content": "Problems such as spoken term detection involve detection of a query utterance in a long speech audio. The search space is huge, and performing DTW based search of query takes long time (Rodriguez-Fuentes et al., 2014). A more efficient way of sequence retrieval is by mapping them to sequences of discrete tokens. Automatic speech recognition (ASR) can be employed for this purpose (Mamou et al., 2013). However, ASR training requires knowledge of basic units of transcription. The popularly used units are phonemes and graphemes. This method thus becomes language dependent. Non-linguistic sounds, such as cough and sneeze, could be mapped to certain tokens defined for them. This approach could not be used when precise tokens are not defined, e.g., music search." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "In query by humming based music search, audio is mapped to discrete melody-related tokens, such as notes, and these token sequence are matched for search (Unal et al., 2008). However, several music traditions do not have precise transcription systems. There, one can tell if two pieces, or motifs, are similar but cannot precisely transcribe them to tokens. The embellishments used in music could be too dynamic to be transcribed precisely. Moreover, when a musically untrained user sings a query, s/he cannot hit the right notes matching the target song. So the matching could rely on several factors other than notes, such as phonemes of lyrics (Mesaros & Virtanen, 2010), onset times" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "(rhythm) (Kosugi et al., 2000), and note transitions (Ranjan & Arora, 2020). Hence, the tokens to be used may not be derived from notes alone." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 504, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 189 + ], + "type": "text", + "content": "In this way, each tokenizer - for speech, music or other signals, in general - uses domain-specific hand-made tokens defined by a domain expert. In this paper, we propose a tokenizer to map audio sequences to sequences of discrete tokens with an aim of retrieval. The mapping is learned only from pairs of similar audio sequences. The tokens are not defined manually but correspond to distinct semantic units learned from pairs of similar audio sequences. The method is general and can be applied to signals other than audio. In this paper, we apply the proposed method to speech and music audio search, for the problems of spoken term detection and query by humming, respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "type": "text", + "content": "The proposed method, named wav2tok, encodes audio via a BiLSTM (Schuster & Paliwal, 1997) network. The encoder-generated representations are then mapped to discrete tokens via a " + }, + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "type": "text", + "content": "-means vector quantizer network. Each discrete token corresponds to a discrete representation in the vector quantizer's codebook which is initialized and updated via offline " + }, + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "type": "text", + "content": "-means clustering only." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 243, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 342 + ], + "type": "text", + "content": "wav2tok is trained with pairs of similar audio sequences in a self-supervised fashion without any transcription using a novel training algorithm. For each pair, we average the encoder-generated representations, which map to the same token, by the " + }, + { + "bbox": [ + 104, + 243, + 504, + 342 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 243, + 504, + 342 + ], + "type": "text", + "content": "-means vector quantizer network to generate a prototype for that token. We then perform a contrastive learning task to increase the similarity between the generated prototype for a particular token and the quantizer codebook discrete representation corresponding to the same token. We simultaneously minimize the edit distance between the token sequences generated from each sequence in the pair via Connectionist temporal classification (CTC) (Graves et al., 2006) framework to constrain both sequences to get mapped to the same token sequence." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 347, + 506, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 506, + 403 + ], + "type": "text", + "content": "We compare wav2tok to state-of-the-art (SOTA) methods for discrete representation learning, such as wav2vec 2.0, and SOTA ASR models fine-tuned to perform phonetic tokenization. We evaluate the generalization capability of the tokens generated by the models on search experiments, namely, query-by-humming and spoken term detection. wav2tok outperforms the baselines in performance and uses much lesser trainable parameters, ensuring faster inference and deployment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 420, + 212, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 420, + 212, + 431 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 212, + 431 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 445, + 504, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 504, + 534 + ], + "type": "text", + "content": "Sequence Labelling. With expert-defined tokens, various methods are popularly used for mapping sequences to tokens. In conventional methods, Hidden Markov Models (Rabiner & Juang, 1986) and Conditional Random Fields (Lafferty et al., 2001) have been popularly used for sequence labeling. These methods involve a significant amount of domain knowledge and many assumptions to make tractable models, which are avoided by End-to-End learning models such as Recurrent Neural Networks (RNNs) using Connectionist Temporal Classification framework (Graves et al., 2006). Sequence labeling can be used for sequence retrieval by converting the sequences to tokens, which are easy to search over. But this approach inevitably depends upon expert-defined tokens." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 539, + 504, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 650 + ], + "type": "text", + "content": "Unsupervised Speech Representation Learning. Automatic Speech Recognition systems are pretrained on large amounts of untranscribed speech data to generate SOTA continuous representations which encode the slowly varying phoneme features in raw speech. The representations are then mapped to phoneme tokens via Connectionist Temporal Classification (CTC) (Graves et al., 2006) fine-tuning on a small amount of transcribed audio. Works like Contrastive Predictive Coding (CPC) (van den Oord et al., 2018), Autoregressive Predictive Coding (APC) (Chung & Glass, 2020), and wav2vec (Schneider et al., 2019) generate continuous representations with powerful autoregressive models pre-trained to predict future time-step representations. Further works started discretizing the continuous representations with vq-VAE (van den Oord et al., 2017) to generate discrete representations for speech." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": "Works like vq-wav2vec (Baevski et al., 2019) and vq-APC (Chung et al., 2020) discretize the representations and perform the same prediction tasks as in wav2vec (Schneider et al., 2019) and APC (Chung & Glass, 2020) respectively but over discrete representations. In vq-wav2vec, the discrete representations are generated with either a K-Means Vector Quantizer (Baevski et al., 2019) or Gumbel-Softmax based Vector Quantizer (Baevski et al., 2019). The learned discrete representations are used to pre-train a BERT (Devlin et al., 2018) to generate stronger continuous representations much like BERT pre-training in Natural Language Processing. wav2vec 2.0 (Baevski et al.," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": "2020) uses a Gumble Softmax based Vector Quantizer (Baevski et al., 2019) to generate discrete representations. The training involves masking of spans of time steps and then predicting the correct discrete representations at each masked time step with transformer representation at that time step. In these methods, raw audio is discretized in a latent space to model all possible acoustic units than phonetic or sub-phonetic units. The tokens generated by the vector quantizers aren't constrained to be interpretable and are initialized in large numbers (" + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": " 102.4K codes). After pre-training, a subset of these codes or tokens are chosen more often by the vector quantizers and are considered to represent acoustic units. CTC-based fine-tuning with transcription groups these discrete acoustic units to " + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": " distinct phonemes or linguistic units as present in the transcriptions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 186, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 506, + 266 + ], + "type": "text", + "content": "Works like HuBERT and wav2vec-Unsupervised learn phonemic units directly. HuBERT (Hsu et al., 2021) pre-trains a transformer network via BERT-like masked prediction task over noisy targets generated with a clustering model trained offline. The targets may be generated with an ensemble of " + }, + { + "bbox": [ + 104, + 186, + 506, + 266 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 186, + 506, + 266 + ], + "type": "text", + "content": "-means clusterers with " + }, + { + "bbox": [ + 104, + 186, + 506, + 266 + ], + "type": "inline_equation", + "content": "K = \\{100,500\\}" + }, + { + "bbox": [ + 104, + 186, + 506, + 266 + ], + "type": "text", + "content": " clusters on MFCC features or transformer representations. wav2vec-Unsupervised (Baevski et al., 2021) learns phonetic tokens adversially from phonemized unlabelled text data. A discriminator identifies if the phoneme sequence generated by model is real or fake based on phonemized unlabelled text." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 505, + 304 + ], + "type": "text", + "content": "All aforementioned approaches use powerful auto-regressive models pre-trained on large amounts of unlabeled audio and fine-tuned on transcribed audio. Our learning approach can learn semantic tokens with small models while training pairwise on small amount of unlabeled audio data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 308, + 506, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 452 + ], + "type": "text", + "content": "Audio Representations for Retrieval. Now Playing (Arcas et al., 2017) and (Chang et al., 2020) use a Neural Network Fingerprinter (NNFP) module outputting representations which are efficient for search in query-by-example tasks where the difference between query and the actual song is pretty minute in comparison to humming where only the melody is sung. Now Playing (Arcas et al., 2017) trains representations by optimizing the triplet loss (Schroff et al., 2015) and (Chang et al., 2020) trains representations by simulating the Maximum Inner Product Search (MIPS) on minibatches of representations. For Query by Humming task, (Mostafa et al., 2016) and (Mostafa & Fung, 2017) use deep learning models like DNNs and CNNs to generate representations which they map to MIDI-numbers or note tokens. Such works require note-transcribed data to train the models. For Spoken Term Detection task, approaches like (Zhang & Glass, 2009), (Rodriguez-Fuentes et al., 2014), (Lee et al., 2015), (Ram et al., 2018) convert audio to sequences of feature vectors and apply different variations of DTW based template matching to detect query in long utterances of speech which is time-consuming." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 456, + 506, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 506, + 536 + ], + "type": "text", + "content": "Cross Domain Alignment. Given a pair of semantically similar inputs for training, tasks such as visual question answering (text and image) and machine translation (text) involve learning an alignment. The alignment here is not ordered and the inputs may be from different modalities. Attention models have been used to find alignment between output entities and input regions (Yao et al., 2018). (Chen et al., 2020) use Gromov-Wasserstein distance between output and input entities to match them. However, there is no notion of tokens there, rather the salient entities in the input are represented as vectors in a graph." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 539, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 506, + 608 + ], + "type": "text", + "content": "Graph Matching. Graph Neural Networks (Gori et al., 2005) are used to generate embeddings for graphs. These embeddings are used to perform graph matching to find similarity of structured graphs (Li et al., 2019). However, they perform the matching jointly on the pair of inputs, rather than representing each input independently. This makes them unsuitable for the search problem at hand due to large run-time complexity. The distance metrics used for graph matching are based on edit distance (Li et al., 2019) and Wasserstein distance (Chen et al., 2020)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 631, + 243, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 631, + 243, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 243, + 643 + ], + "type": "text", + "content": "3 PROBLEM STATEMENT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": "We aim to map " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": ", a sequence of vectors, to " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{T}}" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": ", a sequence of discrete tokens from a finite alphabet " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "\\mathbb{A}" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": ", such that the similarity of sequences is preserved in the sense of edit distance. The length of sequence " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{T}}" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": " may be less than or equal to that of the sequence " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": ". In other words, given a pair of similar sequences " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "(\\mathcal{X}_i,\\mathcal{X}_j)" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": " and sequence " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_k" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": " which is not similar to either sequences in the pair, we want to map them to token sequences such that " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "ED(\\tilde{\\mathcal{T}}_i,\\tilde{\\mathcal{T}}_j)" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": " should be less than " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "\\min \\{ED(\\tilde{\\mathcal{T}}_i,\\tilde{\\mathcal{T}}_k),ED(\\tilde{\\mathcal{T}}_j,\\tilde{\\mathcal{T}}_k)\\}" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "inline_equation", + "content": "ED(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 104, + 659, + 505, + 735 + ], + "type": "text", + "content": " is the edit distance between two sequences." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 251, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 251, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 251, + 94 + ], + "type": "text", + "content": "4 MODEL ARCHITECTURE" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "text", + "content": "wav2tok is comprised of an encoder " + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "inline_equation", + "content": "f: \\mathbb{X} \\mapsto \\mathbb{Z}" + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "text", + "content": " which takes as input a temporal sequence of audio features " + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = [\\mathbf{x}_t \\in R^n; t \\in [T]]" + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "text", + "content": " is the feature vector at time step " + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "text", + "content": ", and outputs a sequence of L-2 normalised representations " + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "inline_equation", + "content": "\\mathcal{Z} = [\\mathbf{z}_t = f(\\mathbf{x}_t) \\in R^m; t \\in [T]]" + }, + { + "bbox": [ + 104, + 105, + 504, + 162 + ], + "type": "text", + "content": ". The encoder is implemented as a 2-layer BiLSTM followed by an L-2 normalization layer. BiLSTMs summarise information in both directions and encode surrounding context." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": "A " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": "-means vector quantizer network " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "g: \\mathbb{Z} \\mapsto \\mathbb{T}" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " then labels sequence " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " at each time-step with tokens belonging to a finite " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": "-element alphabet " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathbb{A} = [K]" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " and generates sequence of tokens " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = [\\tau_t = g(\\mathbf{z}_t) \\in \\mathbb{A}; t \\in [T]]" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": ". Network " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " vector quantizes input " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " with a codebook " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "E = \\{\\mathbf{e}_k \\in \\mathbb{Z}; k \\in [K]\\}" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " comprised of " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "|\\mathbb{A}| = K" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " discrete representations which are cluster centroids in representation space " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathbb{Z}" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " and outputs token " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\tau_t = \\arg \\max_k \\mathbf{z}_t \\cdot \\mathbf{e}_k" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": ". Note, here the dot product gives a cosine similarity score since both the vectors are L-2 normalized, as a result, " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_k \\in E" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " closest to " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " is chosen as its discrete representation and index " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " as its token " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "\\tau_t" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " discrete representations in network " + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 166, + 506, + 255 + ], + "type": "text", + "content": " are trainable parameters." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": "A compressor " + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": " compresses sequence of tokens " + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": " to sequence " + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{T}}" + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "inline_equation", + "content": "\\tilde{T} \\leq T" + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": " by deleting all consecutive repetitions of tokens. " + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": " also generates the corresponding compressed sequence " + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{Z}}" + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "inline_equation", + "content": "\\tilde{T}" + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": " by averaging representations " + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t \\in \\mathcal{Z}" + }, + { + "bbox": [ + 104, + 261, + 505, + 310 + ], + "type": "text", + "content": " over the consecutive tokens and L-2 normalising the averaged representation. Figure 1a presents an illustration demonstrating our model architecture." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 324, + 180, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 324, + 180, + 337 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 180, + 337 + ], + "type": "text", + "content": "5 TRAINING" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "text", + "content": "wav2tok is trained on pairs of sequences of audio features " + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "inline_equation", + "content": "(\\mathcal{X},\\mathcal{X}^{\\prime})" + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "text", + "content": " where the raw audio corresponding to " + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{\\prime}" + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "text", + "content": " is an augmented replica of that corresponding to " + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "text", + "content": ". We apply either pitch shift or time stretch or both augmentations to raw audio to generate its augmented replica. " + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{\\prime}" + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "text", + "content": " may differ in sources as well, i.e. a different person may sing the recording corresponding to " + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{\\prime}" + }, + { + "bbox": [ + 104, + 349, + 504, + 394 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": "The discrete representations in quantizer " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " codebook " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " are initialized as " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " centroids obtained via offline " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": "-means clustering over freshly initialized encoder-generated representations. Given " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "(\\mathcal{X},\\mathcal{X}^{\\prime})" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": ", encoder " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " generates sequence of representations " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " from input " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^{\\prime}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{\\prime}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": ". Quantizer " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " generates a sequence of tokens " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " from input " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\prime}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^{\\prime}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " via cosine similarity-based comparison with codebook vectors " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "e\\in E" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " initialized via offline clustering over freshly initialized representation space " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": ". Compressor " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " compresses sequence of tokens " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " to sequence " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{T}}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{T}^{\\prime}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{T}}^{\\prime}" + }, + { + "bbox": [ + 104, + 399, + 504, + 467 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": "We average all encoder-generated representations in pair " + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "inline_equation", + "content": "(\\mathcal{Z},\\mathcal{Z}^{\\prime})" + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": " which map to the same token, say " + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": ", to generate a prototype for " + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": ". We then perform a contrastive task where we compare the prototype with each of the " + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": " discrete representations in codebook " + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": " and increase its similarity with the discrete representation corresponding to " + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": ". We also increase the likelihood that wav2tok maps pair " + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "inline_equation", + "content": "(\\mathcal{X},\\mathcal{X}^{\\prime})" + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": " to the same token sequence via CTC framework to minimize " + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "inline_equation", + "content": "ED(\\tilde{\\mathcal{T}},\\tilde{\\mathcal{T}}^{\\prime})" + }, + { + "bbox": [ + 104, + 472, + 504, + 530 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 534, + 233, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 534, + 233, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 233, + 545 + ], + "type": "text", + "content": "Our loss function is defined as," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 201, + 549, + 504, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 549, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 201, + 549, + 504, + 563 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {m} \\left(\\mathcal {X}, \\mathcal {X} ^ {\\prime}\\right) + \\alpha \\mathcal {L} _ {c t c} \\left(\\mathcal {X}, \\tilde {\\mathcal {T}} ^ {\\prime}\\right) + \\beta \\mathcal {L} _ {c t c} \\left(\\mathcal {X} ^ {\\prime}, \\tilde {\\mathcal {T}}\\right) \\tag {1}", + "image_path": "e88433fe2cfcf5fe131c7d6b57ee9c60be636c35a8c47fc1469856f4034196a9.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_m" + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "text", + "content": " is loss defined for contrastive task, " + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ctc}" + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "text", + "content": " is the loss maximising aforementioned likelihood, and " + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\alpha, \\beta" + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "text", + "content": " are positive constants. We optimize this loss function in a manner similar to the Expectation Maximization algorithm. The clustering is used as the E-step to update the discrete representations in quantizer " + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "text", + "content": " codebook, while gradient descent over " + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 567, + 504, + 612 + ], + "type": "text", + "content": " acts as the M-step." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "text", + "content": "Contrastive Loss. Let the set of unique tokens occurring in pair " + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "inline_equation", + "content": "(\\tilde{\\mathcal{T}},\\tilde{\\mathcal{T}}^{\\prime})" + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "text", + "content": " be " + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{U}\\subset [K],|\\mathcal{U}| = K^{\\prime}\\leq K" + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "text", + "content": ". We generate a list of token prototypes " + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{P} = \\{\\mathbf{p}_{\\tau};\\tau \\in \\mathcal{U}\\}" + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_{\\tau}" + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "text", + "content": " is L-2 normalised mean of representations in " + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{z}\\in \\{\\mathcal{Z};\\mathcal{Z}^{\\prime}\\} :g(\\mathbf{z}) = \\tau \\}" + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "text", + "content": ". Figure 1b presents an illustration demonstrating how we generate list of token prototypes " + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 104, + 617, + 506, + 663 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "type": "text", + "content": "Given " + }, + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_{\\tau} \\in \\mathcal{P}" + }, + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "type": "text", + "content": ", we perform a contrastive task to increase its similarity with discrete representation " + }, + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{\\tau} \\in E" + }, + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "type": "text", + "content": ". To compare " + }, + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_{\\tau}" + }, + { + "bbox": [ + 104, + 668, + 504, + 712 + ], + "type": "text", + "content": " with the codebook, metrics such as cosine similarity and Euclidean distance could be used. However, we find that using the following parameterized score for this purpose gives better performance," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 227, + 716, + 504, + 730 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 716, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 227, + 716, + 504, + 730 + ], + "type": "interline_equation", + "content": "\\mathbf {s} _ {\\tau , k} = \\sigma \\left(W \\cdot \\left(\\mathbf {p} _ {\\tau} - s g \\left(\\mathbf {e} _ {k}\\right)\\right)\\right) \\in [ 0, 1 ] \\tag {2}", + "image_path": "ab84d9b70545a9bbb30abee56c2b0f12ad132a74b67bbee1da61d1f3dcaa1588.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 92, + 219, + 168 + ], + "blocks": [ + { + "bbox": [ + 106, + 92, + 219, + 168 + ], + "lines": [ + { + "bbox": [ + 106, + 92, + 219, + 168 + ], + "spans": [ + { + "bbox": [ + 106, + 92, + 219, + 168 + ], + "type": "image", + "image_path": "956cf898b477fc6de2c77a42c7b5364a007a69cb0518830ef4962dd076047183.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 120, + 172, + 205, + 183 + ], + "lines": [ + { + "bbox": [ + 120, + 172, + 205, + 183 + ], + "spans": [ + { + "bbox": [ + 120, + 172, + 205, + 183 + ], + "type": "text", + "content": "(a) Model Architecture" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 228, + 92, + 356, + 168 + ], + "blocks": [ + { + "bbox": [ + 228, + 92, + 356, + 168 + ], + "lines": [ + { + "bbox": [ + 228, + 92, + 356, + 168 + ], + "spans": [ + { + "bbox": [ + 228, + 92, + 356, + 168 + ], + "type": "image", + "image_path": "613cca31105fe5eba5f895b8b1438042f49aee518aa385e1b23644e7a0f81c62.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 172, + 355, + 184 + ], + "lines": [ + { + "bbox": [ + 230, + 172, + 355, + 184 + ], + "spans": [ + { + "bbox": [ + 230, + 172, + 355, + 184 + ], + "type": "text", + "content": "(b) Generation of Prototype list " + }, + { + "bbox": [ + 230, + 172, + 355, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "lines": [ + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "text", + "content": "Figure 1: " + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{X}'" + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "text", + "content": " is an augmented replica of " + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "text", + "content": ". 1a illustrates our model architecture. 1b demonstrates the generation of " + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "text", + "content": " required for calculation of " + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_m" + }, + { + "bbox": [ + 104, + 192, + 504, + 215 + ], + "type": "text", + "content": ". 1c demonstrates our likelihood loss calculation." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 369, + 79, + 496, + 168 + ], + "blocks": [ + { + "bbox": [ + 369, + 79, + 496, + 168 + ], + "lines": [ + { + "bbox": [ + 369, + 79, + 496, + 168 + ], + "spans": [ + { + "bbox": [ + 369, + 79, + 496, + 168 + ], + "type": "image", + "image_path": "38c36d594d6a844f7b0098a07399ada42bc53ecd185fbe077c70093962fa57c4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 376, + 172, + 492, + 183 + ], + "lines": [ + { + "bbox": [ + 376, + 172, + 492, + 183 + ], + "spans": [ + { + "bbox": [ + 376, + 172, + 492, + 183 + ], + "type": "text", + "content": "(c) Likelihood Loss Calculation" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "spans": [ + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "inline_equation", + "content": "sg(x) \\equiv x, \\frac{d}{dx} sg(x) \\equiv 0" + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "content": " is the stop-gradient operator, " + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "inline_equation", + "content": "\\sigma(\\cdot)" + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "content": " is sigmoid function generating a score in the range [0, 1] and " + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "inline_equation", + "content": "W \\in R^{1 \\times d}" + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "content": " is a parameter matrix. " + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_{\\tau,k}" + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "content": " acts as a parameterized similarity score between " + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_{\\tau}" + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "content": " and discrete representation " + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_k \\in E" + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "content": ". We define our contrastive loss " + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_m" + }, + { + "bbox": [ + 104, + 235, + 504, + 272 + ], + "type": "text", + "content": " as," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 218, + 277, + 505, + 306 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 277, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 218, + 277, + 505, + 306 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {m} \\left(\\mathcal {X}, \\mathcal {X} ^ {\\prime}\\right) = - \\sum_ {\\tau \\in \\mathcal {U}} \\log \\frac {\\exp \\left(\\mathbf {s} _ {\\tau , \\tau}\\right)}{\\sum_ {k = 1} ^ {K} \\exp \\left(\\mathbf {s} _ {\\tau , k}\\right)} \\tag {3}", + "image_path": "75aa2ec0aef1552aec1208a33198e7eb2a6557f62362374de2b4414a543b984d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": "Likelihood Loss. We maximize the likelihood that sequence " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": " maps to token sequence " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{T}}'" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": ", which corresponds to " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{X}'" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": ", via the CTC framework (see Figure 1c). It puts a constraint to generate the same token sequence for " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{X}'" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": ". We calculate the probability of " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": " mapping to token " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\tau_t = k" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "l_{t,k} = \\frac{\\exp(f(\\mathbf{x}_t)\\cdot sg(e_k))}{\\sum_{i=1}^K \\exp(f(\\mathbf{x}_t)\\cdot sg(e_i))}" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": ". The likelihood " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "P(\\tilde{\\mathcal{T}}'|\\mathcal{X})" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": " is then calculated as a sum of probabilities of all " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": "-length paths " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": " over tokens " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\tau \\in \\mathbb{A}" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{C}(\\pi) = \\tilde{\\mathcal{T}}'" + }, + { + "bbox": [ + 104, + 319, + 504, + 384 + ], + "type": "text", + "content": ". The loss is defined as," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 221, + 389, + 505, + 417 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 389, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 221, + 389, + 505, + 417 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {c t c} (\\mathcal {X}, \\tilde {\\mathcal {T}} ^ {\\prime}) = - \\log \\sum_ {\\pi \\in C ^ {- 1} (\\tilde {\\mathcal {T}} ^ {\\prime})} P (\\pi | \\mathcal {X}) \\tag {4}", + "image_path": "8b3d8f0fb2845756833553cc98c69d8fd4ccdede8ab0050ed67653883afbac98.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 429, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 504, + 464 + ], + "type": "text", + "content": "where the path probabilities are calculated over token probability scores in sequence " + }, + { + "bbox": [ + 104, + 429, + 504, + 464 + ], + "type": "inline_equation", + "content": "\\mathbf{l} = \\{l_t \\in R^K; t \\in [T]\\}" + }, + { + "bbox": [ + 104, + 429, + 504, + 464 + ], + "type": "text", + "content": " via CTC forward-backward framework (Graves et al., 2006) without the use of blanks. We present the CTC forward and backward variables for our use case in Appendix B." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "text", + "content": "Clustering. We perform offline " + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "text", + "content": "-means clustering on a subset of encoder representations during initialization of our network and at regular intervals during training to set the discrete representations in codebook " + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "text", + "content": " of network " + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "text", + "content": ". Initializing the clusters in this way prevents wav2tok from converging to a local optimum during the matching task, as is the case we found with random initialization of centroids. The intermittent clustering during training iteratively refines the discrete representations and prevents codebook collapse. We use the sklearn library to perform " + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 468, + 504, + 535 + ], + "type": "text", + "content": "-means clustering." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "text", + "content": "We train wav2tok using the ADAM (Kingma & Ba, 2017) optimizer and a linear learning schedule with a learning rate of 0.001 and " + }, + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 104, + 540, + 504, + 563 + ], + "type": "text", + "content": " of the training steps as warm-up steps." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 578, + 200, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 200, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 200, + 590 + ], + "type": "text", + "content": "6 EXPERIMENTS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 603, + 504, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 603, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 104, + 603, + 504, + 637 + ], + "type": "text", + "content": "We test the performance of tokens and encoder-generated continuous representations of wav2tok in audio retrieval. We perform Query by Humming (QbH) and Spoken Term Detection experiments to evaluate the performance of wav2tok in comparison to the baselines." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 650, + 340, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 650, + 340, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 340, + 662 + ], + "type": "text", + "content": "6.1 MUSIC MELODY SEARCH: QUERY BY HUMMING" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 670, + 504, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 504, + 692 + ], + "type": "text", + "content": "Task. Given a test query audio, we are to find the audio with the most similar melody in the search audio database." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 698, + 505, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 732 + ], + "type": "text", + "content": "Experiment Details. We use the MIR-QbSH dataset which is composed of 4431 humming audio recordings of " + }, + { + "bbox": [ + 104, + 698, + 505, + 732 + ], + "type": "inline_equation", + "content": "30s" + }, + { + "bbox": [ + 104, + 698, + 505, + 732 + ], + "type": "text", + "content": " duration corresponding to 48 songs. Each song is sung by several individuals. All individuals sing the same part of the song. The recordings have variations in the environments" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": "they were recorded in, tonal qualities, voices, pitch, and time stretch. We train our models on hums of 40 songs in MIR-QbSH dataset and evaluate search performance on hums of the remaining 8 songs. The training dataset has 1970 hums for training and 676 for validation. The test dataset has 225 hums as a search database and 659 query hums. We evaluate the performance of our models in identifying which song a given query corresponds to via comparison with all sequences in the search database. Each model converts all the audio in our test dataset to sequences of tokens or representations. Each query sequence is compared to all sequences in the search database via Edit Distance (ED) (if tokens) or DTW (if representations). The song id of the most similar sequence in the search database is then selected as query song id. We calculate Mean Reciprocal Ranking (MRR) score with ground-truth song id of the queries for evaluation. The Reciprocal Ranking (RR) score is given as " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\frac{1}{r}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " if the " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "r^{th}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " most similar sequence in search database has same song id as query." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 209, + 504, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 504, + 244 + ], + "type": "text", + "content": "All the audio recordings are converted to Short Term Fourier Transform (STFT) matrices before being passed as inputs to our models. The STFT matrices are computed with 513 frequency bins, a window length of 1024 samples (summarising 128 ms of audio), and hop length of 512 samples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 258, + 248, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 248, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 248, + 269 + ], + "type": "text", + "content": "6.2 SPOKEN TERM DETECTION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 280, + 436, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 280, + 436, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 280, + 436, + 293 + ], + "type": "text", + "content": "Task. Given a test query audio, we are to detect its occurrence in a long utterance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 296, + 506, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 506, + 418 + ], + "type": "text", + "content": "Experiment Details. We use the TIMIT dataset which is composed of 6300 utterances of English speech with time-aligned word transcriptions. We choose 59 most-occurring words with more than 2 characters as keywords and all others as non-keywords. We use utterances of random sentences formed with 6 words sampled from a subset of 25 keywords for training and evaluation on STD experiments for the detection of the remaining 34 keywords. The test dataset is composed of 337 utterances corresponding to the 34 queries and 100 long utterances per query, with half containing a single occurrence of query amongst non-keywords and the other half containing only non-keywords. Given a query and a long utterance, we convert both to sequences of tokens using each audio tokenizer. We perform approximate string matching (Hall & Dowling, 1980) for detection of query in the utterance. The STFT matrix inputs to the models are computed with 185 frequency bins, a window length of 368 samples (summarising " + }, + { + "bbox": [ + 104, + 296, + 506, + 418 + ], + "type": "inline_equation", + "content": "23\\mathrm{ms}" + }, + { + "bbox": [ + 104, + 296, + 506, + 418 + ], + "type": "text", + "content": " of audio), and a hop length of 92 samples." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 434, + 180, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 434, + 180, + 445 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 180, + 445 + ], + "type": "text", + "content": "6.3 BASELINES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": "Triplet. We train encoder " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "f: \\mathbb{X} \\mapsto \\mathbb{Z}" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " to generate L-2 normalized continuous representations for retrieval. Encoder " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " is trained via optimizing the triplet Loss (Schroff et al., 2015) as done in training an NNFP in Now Playing (Arcas et al., 2017). Given pair of similar sequences " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "(\\mathcal{X}, \\mathcal{X}')" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": ", encoder " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " generates sequences " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}'" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": ". We form a mini-batch of size " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " of triplets " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{z}, \\mathbf{z}^+, \\mathbf{z}^-\\}" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " where representation " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " is sampled from sequence " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{z}^+" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{z}^-" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " are positive and negative samples respectively for " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " sampled from sequence " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}'" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": ". The loss is defined as, " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{Triplet}} = \\sum_{i=1}^{N} \\max \\{||\\mathbf{z}_i - \\mathbf{z}_i^+|| - ||\\mathbf{z}_i - \\mathbf{z}_i^-|| + m, 0\\}" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 454, + 504, + 537 + ], + "type": "text", + "content": " is a margin for similarity." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": "MIPS. We train encoder " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "f: \\mathbb{X} \\mapsto \\mathbb{Z}" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " to generate L-2 normalized continuous representations for retrieval. Encoder " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " is trained via simulation of MIPS (Mussmann & Ermon, 2016) on mini-batches of representations as proposed by (Chang et al., 2020). Given pair of similar sequences " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "(\\mathcal{X}, \\mathcal{X}')" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": ", encoder " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " generates sequences " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}'" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": ". We form a mini-batch of size " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " of pairs of " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{z}, \\mathbf{z}^+\\}" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " where encoder generated representation " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " is sampled from sequence " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{z}^+" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " is a positive for " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": " sampled from " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}'" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": ". The loss is defined as, " + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{MIPS}} = -\\sum_{i=1}^{N} \\log \\frac{\\exp(\\mathbf{z}_i, \\mathbf{z}_i^+)}{\\sum_{j \\neq i} (\\exp(\\mathbf{z}_i \\cdot \\mathbf{z}_j^+) + \\exp(\\mathbf{z}_i \\cdot \\mathbf{z}_j))}" + }, + { + "bbox": [ + 104, + 540, + 504, + 618 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "wav2vec2. We train our audio tokenizer via wav2vec 2.0 (Baevski et al., 2020) learning framework. Quantizer " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " in our audio tokenizer is chosen to be a Gumbel Softmax-based Vector Quantizer (See Appendix C for details) as used in (Baevski et al., 2020) but with a single codebook with " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " members. Given sequence " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ", encoder " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " outputs sequence of L-2 normalised representations " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ". Quantizer " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " outputs sequence of discrete representations " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{Q} = \\{q_t = g(z_t \\in \\mathcal{Z}); t = 1, \\dots, T\\}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ". We mask spans of 10 time steps with random starting indices in sequence " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " and then pass the new sequence to a transformer network " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "h: \\mathbb{Z} \\mapsto \\mathbb{O}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " which generates a sequence of contextualized representations " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{O} = \\{\\mathbf{o}_t = h(z_t \\in \\mathcal{Z}); t = 1, \\dots, T\\}" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ". For transformer output " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{o}_t" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " over masked time step " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ", we identify the true discrete representation " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_t" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " from a set " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "D_t" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " composed of " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_t" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": " distractors which are discrete representations sampled from other time steps. The loss is defined as," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 79, + 504, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 79, + 504, + 109 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 504, + 109 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_w(\\mathbf{o}_t,\\mathcal{D}_t) = -\\log \\frac{\\exp(sim(\\mathbf{o}_t,\\mathbf{q}_t))}{\\sum_{\\tilde{q}\\in\\mathcal{D}_t}\\exp(sim(\\mathbf{o}_t,\\tilde{q}))} + \\mathcal{L}_d" + }, + { + "bbox": [ + 104, + 79, + 504, + 109 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 79, + 504, + 109 + ], + "type": "inline_equation", + "content": "sim(a,b) = \\frac{a^Tb}{||a||||b||}" + }, + { + "bbox": [ + 104, + 79, + 504, + 109 + ], + "type": "text", + "content": " is cosine similarity and " + }, + { + "bbox": [ + 104, + 79, + 504, + 109 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_d" + }, + { + "bbox": [ + 104, + 79, + 504, + 109 + ], + "type": "text", + "content": " is codebook diversity loss." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": "wav2vec2P. We train wav2vec2 audio tokenizer with our variation of wav2vec 2.0 (Baevski et al., 2020) learning framework which learns discrete representations from pairs of similar sequences. Given pair " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "(\\mathcal{X},\\mathcal{X}^{\\prime})" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": ", encoder " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " outputs sequences " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}'" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "T'" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " respectively. Assuming " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "T\\leq T'" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": ", we generate sequence " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " whose " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " time step element " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " is a positive for " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t\\in \\mathcal{Z}" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " sampled from sequence " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}'" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": ". Gumbel Softmax-based Vector Quantizer " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " quantizes each representation in sequence " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " to generate sequence " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": ". We mask sequence " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " at the same time steps. Transformer " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " inputs masked sequences and generate sequences " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{O}" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{O}^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": ". For masked time step " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": ", we use transformer output " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{o}_t" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " to identify " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_t^+\\in \\mathcal{Q}^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " from set " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " with distractors sampled from sequence " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " and transformer output " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{o}_t^+" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " to identify " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_t\\in \\mathcal{Q}" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " from set " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_t" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": " with distractors sampled from sequence " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": ". The loss is defined as, " + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{wP} = \\mathcal{L}_w(\\mathbf{o}_t,\\mathcal{D}_t^+) + \\mathcal{L}_w(\\mathbf{o}_t^+, \\mathcal{D}_t)" + }, + { + "bbox": [ + 104, + 114, + 506, + 229 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 233, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 233, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 233, + 504, + 266 + ], + "type": "text", + "content": "wav2vec2-O. The original wav2vec 2.0 base model with 12 Transformer blocks and " + }, + { + "bbox": [ + 104, + 233, + 504, + 266 + ], + "type": "inline_equation", + "content": "95M" + }, + { + "bbox": [ + 104, + 233, + 504, + 266 + ], + "type": "text", + "content": " parameters as proposed by (Baevski et al., 2020). It is pre-trained on 960 hours of LibriSpeech data and fine-tuned on TIMIT dataset. It uses " + }, + { + "bbox": [ + 104, + 233, + 504, + 266 + ], + "type": "inline_equation", + "content": "K = 32" + }, + { + "bbox": [ + 104, + 233, + 504, + 266 + ], + "type": "text", + "content": " tokens for tokenization." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "type": "text", + "content": "wav2vec2-Multi. A wav2vec 2.0 large model with 24 Transformer blocks and " + }, + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "type": "inline_equation", + "content": "317M" + }, + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "type": "text", + "content": " parameters pre-trained on 53 languages as proposed by (Conneau et al., 2020). It is fine-tuned on Common Voice to detect all possible phonemes in training languages with " + }, + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "type": "inline_equation", + "content": "K = 392" + }, + { + "bbox": [ + 104, + 272, + 504, + 307 + ], + "type": "text", + "content": " tokens." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "spans": [ + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "text", + "content": "Triplet and MIPS use a 2-layer BiLSTM as encoder with " + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "inline_equation", + "content": "3.6M" + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "text", + "content": " parameters. We use the LAMB optimizer (You et al., 2020) and a Cosine Annealing Learning Schedule (Loshchilov & Hutter, 2017) with a learning rate restart of 0.0001 to train them. wav2vec2 and wav2vec2P use a 2-layer BiLSTM encoder with " + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "inline_equation", + "content": "3.6M" + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "text", + "content": " parameters to generate latent representations and 3 Transformer blocks with " + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "inline_equation", + "content": "8.5M" + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "text", + "content": " parameters. Both are trained using the ADAM (Kingma & Ba, 2017) optimizer and a linear learning schedule with a learning rate of 0.001 and " + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "text", + "content": " of the training steps as warm-up steps. Proposed wav2tok uses only a 2-layer BiLSTM as encoder with " + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "inline_equation", + "content": "3.6M" + }, + { + "bbox": [ + 104, + 311, + 505, + 389 + ], + "type": "text", + "content": " parameters." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 404, + 174, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 404, + 174, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 174, + 415 + ], + "type": "text", + "content": "7 RESULTS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 429, + 339, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 429, + 339, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 339, + 441 + ], + "type": "text", + "content": "7.1 MUSIC MELODY SEARCH: QUERY BY HUMMING" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 449, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 506, + 529 + ], + "type": "text", + "content": "We present search performances for 3 settings of query namely- Query with no augmentation or Vanilla Query (V), Time Stretched Query (TS), and Pitch Shifted Query (PS). Time stretch and pitch shift are the most common augmentations that may be faced in queries by humming data. No augmentations were applied to audio in search database. Evaluations are performed on sequences corresponding to songs not seen during training. The results present the generalizability of the tokens or representations generated by the models. We set the number of tokens as " + }, + { + "bbox": [ + 104, + 449, + 506, + 529 + ], + "type": "inline_equation", + "content": "K = 25" + }, + { + "bbox": [ + 104, + 449, + 506, + 529 + ], + "type": "text", + "content": " for wav2tok, wav2vec2, and wav2vec2P (See Appendix A.2 for experiments to support our choices)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 533, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 506, + 733 + ], + "type": "text", + "content": "Quality of Tokenization. Table 1 presents the performance of the sequence of tokens " + }, + { + "bbox": [ + 104, + 533, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{T}}" + }, + { + "bbox": [ + 104, + 533, + 506, + 733 + ], + "type": "text", + "content": " generated by the audio tokenizers on ED-based similarity search. Tokens generated by wav2tok present good generalization capabilities in terms of MRR and outperform all the baselines. It generates time and pitch invariant tokens as we see no drop in performance when either augmentation is applied to query. wav2vec2-O is trained on English speech only. The tokens generated by it do not contain much melodic information but are robust to augmentations. The multilingual training of wav2vec2-Multi infuses both melodic and phonetic information to its 392 tokens, thereby giving good performance. wav2tok outperforms both wav2vec2-O and wav2vec2-Multi given its pairwise training which allows it to infuse more melodic information to the tokens while also being trained on a small amount of unlabelled data. The Gumbel Softmax-based quantizer in wav2vec2 and wav2vec2P isn't ideal for infusing melodic information to tokens but it does infuse phonetic information as will be seen in Section 7.2. We compare the tokens with representations learned by MIPS and Triplet evaluated on DTW-based similarity search. The continuous representations present sub-par generalizations to unseen songs. We compare wav2tok with SOTA melody extraction algorithm proposed in (Salamon & Gomez, 2012) which converts hums to MIDI sequences. wav2tok generates token sequences much smaller than the respective MIDI sequences and outperforms the MIDI tokens in search performance, search time, and robustness. In addition, wav2tok outperforms the algorithm in inference time. We further compare wav2tok with SOTA QbH system proposed in (Mostafa &" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": "Fung, 2017). In our implementation, we map audio to MIDI sequences using the aforementioned SOTA melody extraction algorithm instead of a CNN. Given MIDI sequence 53, 53, 58, 50 with durations 0s, 0.5s, 1s, 2s, a Relative Note sequence is generated as " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "(0,0)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "(0,0.5)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "(5,1)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "(-8,2)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": " over which DTW is performed for retrieval. wav2tok tokens outperform the SOTA QbH system in both performance and robustness; the performance of the latter drops drastically with time stretch." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "text", + "content": "We present the performances of the uncompressed sequences " + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "text", + "content": " and compressed sequence " + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathcal{Z}}" + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "text", + "content": " generated by the audio tokenizers in Appendix A.1. We observe a drop in performance for all audio tokenizers when we apply sequence compression to sequences " + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 144, + 504, + 190 + ], + "type": "text", + "content": ". wav2tok outperforms all the baselines and generates superior-quality of continuous representations and discrete tokens." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 194, + 506, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 506, + 262 + ], + "type": "text", + "content": "Search Time. Table 1 presents the search time taken for similarity search over the tokens or representations generated by the models. The search time taken per query is 2 order of magnitude lesser for ED-based Search over compressed sequence of tokens " + }, + { + "bbox": [ + 104, + 194, + 506, + 262 + ], + "type": "inline_equation", + "content": "\\tilde{T}" + }, + { + "bbox": [ + 104, + 194, + 506, + 262 + ], + "type": "text", + "content": " than standard DTW-based Search over continuous representations " + }, + { + "bbox": [ + 104, + 194, + 506, + 262 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 194, + 506, + 262 + ], + "type": "text", + "content": ". The pre-trained models being fine-tuned on transcribed audio give the best tokens in terms of compression and search time. wav2tok gives comparable tokens but outperforms the pre-trained models in inference time." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 106, + 300, + 315, + 395 + ], + "blocks": [ + { + "bbox": [ + 139, + 271, + 272, + 283 + ], + "lines": [ + { + "bbox": [ + 139, + 271, + 272, + 283 + ], + "spans": [ + { + "bbox": [ + 139, + 271, + 272, + 283 + ], + "type": "text", + "content": "Table 1: Quality of Tokenization" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 300, + 315, + 395 + ], + "lines": [ + { + "bbox": [ + 106, + 300, + 315, + 395 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 315, + 395 + ], + "type": "table", + "html": "
ModelV (MRR)TS (MRR)PS (MRR)Search Time (s)Infer (s)
MIDI ED0.750.640.723.840.62
Relative Note DTW0.840.740.80.020.62
Triplet DTW0.50.480.53.50.1
MIPS DTW0.60.550.58
wav2vec2 ED0.660.630.640.060.17
wav2vec2P ED0.690.650.67
wav2vec2-O ED0.720.720.710.010.43
wav2vec2-Multi ED0.820.820.821.2
wav2tok ED0.840.840.840.040.14
", + "image_path": "76e7f9b37d8b6fd965a383a0746696f2a952d8bfc38c211ca43a859aec06b334.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 326, + 308, + 497, + 394 + ], + "blocks": [ + { + "bbox": [ + 315, + 272, + 504, + 283 + ], + "lines": [ + { + "bbox": [ + 315, + 272, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 315, + 272, + 504, + 283 + ], + "type": "text", + "content": "Table 2: Ablation Studies and Some Variations" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 326, + 308, + 497, + 394 + ], + "lines": [ + { + "bbox": [ + 326, + 308, + 497, + 394 + ], + "spans": [ + { + "bbox": [ + 326, + 308, + 497, + 394 + ], + "type": "table", + "html": "
ModelV (MRR)TS (MRR)PS (MRR)
log-mel DTW0.720.70.67
vq-log-mel ED0.710.60.62
wav2tok+NoSim ED0.730.730.72
wav2tok+Cos ED0.790.760.77
wav2tok+CTC ED0.640.620.63
wav2tok+NewInit ED0.770.760.78
wav2tok+MIR1K ED0.720.640.67
wav2tok ED0.840.840.84
", + "image_path": "c7c7b6dd5efd3e6191d8c24bc116b74e03350dbd4f4020547b79ff27bf75d8cf.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 406, + 506, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 506, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 506, + 538 + ], + "type": "text", + "content": "Ablation Studies. Query by humming involves similarity based on melody information, which is carried by the semantic pairing of the audio in training data. We constrain this pairing to include sequences not semantically similar and call this model wav2tok+NoSim. We optimize the contrastive loss " + }, + { + "bbox": [ + 104, + 406, + 506, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_m" + }, + { + "bbox": [ + 104, + 406, + 506, + 538 + ], + "type": "text", + "content": " to train the model. The results are shown in Table 2 (full table in Appendix A.3). There is a significant drop in token robustness and performance but the representations suffer a small drop (see Appendix A.3). Hence, although the representation space may be well clustered, wav2tok is able to add more semantics to the tokens as it is being trained with pairs of similar sequences in comparison to wav2tok+NoSim. We train wav2tok with cosine similarity scores instead of a parameterized score (wav2tok+Cos). The drop in performance validates the enhancement brought about by using a parameterized score. We also train wav2tok with " + }, + { + "bbox": [ + 104, + 406, + 506, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ctc}" + }, + { + "bbox": [ + 104, + 406, + 506, + 538 + ], + "type": "text", + "content": " only (wav2tok+CTC). The CTC loss considers all possible paths which compress to the target label sequence. As a result, the learnt tokens aren't much semantic. The use of both losses gives the best tokens." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "type": "text", + "content": "Some Variations. In wav2tok+NewInit, we associate the discrete representations with " + }, + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "type": "text", + "content": " centroids in the input space " + }, + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "type": "inline_equation", + "content": "\\mathbb{X}" + }, + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "type": "text", + "content": ". Such association does not initialize our tokenizer with optimal centroids which cluster the space " + }, + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "type": "inline_equation", + "content": "\\mathbb{Z}" + }, + { + "bbox": [ + 104, + 544, + 506, + 643 + ], + "type": "text", + "content": " perfectly. This results in a significant drop in performance and robustness as shown in Table 2. We train wav2tok on MIR-1K dataset (wav2tok+MIR1K) which is composed of polyphonic music recordings of 1000 distinct songs. The tokens generalize well to monophonic hums in MIR-QbSH dataset giving a comparable performance to MIDI tokens. This validates that wav2tok tokens do learn melodic information and are robust to variations incurred in hums. We further compare wav2tok with log-mel features and token sequences (with no compression) obtained via quantization of log-mel features. wav2tok tokens outperform both." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 656, + 248, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 248, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 248, + 667 + ], + "type": "text", + "content": "7.2 SPOKEN TERM DETECTION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 733 + ], + "type": "text", + "content": "Quality of Tokenization. Table 3 presents the quality of tokenization of the query keywords by the models evaluated in the Spoken Term Detection experiments. We present the performances of wav2vec2, wav2vec2P, wav2vec2-O, wav2vec2-Multi and proposed wav2tok. We conduct search experiments on a test dataset composed of a search database of 337 utterances of the 34 keywords used as queries in the STD experiments and 1289 query utterances. We identify the keyword to" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "which each query corresponds to via comparison to all the 337 utterances in the search database via ED-based similarity score. The word id of the most similar utterance is selected as the word to which the query corresponds to. We set " + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "inline_equation", + "content": "K = 40" + }, + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": " equivalent to the number of phonemes in English." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 121, + 506, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 255 + ], + "type": "text", + "content": "wav2tok gives the best performance in terms of MRR scores. It outperforms huge models like wav2vec2-O and wav2vec2-Multi which are fine-tuned for the task of phonetic tokenization of speech audio while using a small number of parameters. wav2vec2 and wav2vec2P also outperform wav2vec2-Multi and wav2vec2-O while using smaller number of parameters. wav2vec2-O and wav2vec2-Multi use a blank token to handle consecutive occurrences of the same tokens and to label background noise. The utterances of each keyword in the test dataset are very small in time duration. This causes wav2vec2-O to confuse word utterances as background noise. It generates a sequence of blank tokens and performs poorly in search. wav2vec2-Multi using a larger number of phonetic tokens does not suffer this issue. wav2tok, wav2vec2, and wav2vec2P have no such blank token. This brings a drop in search performance with sequence compression. We further present the performance of wav2tok trained on a much larger LibriSpeech 100 hours dataset (wav2tok+Libri). It is able to outperform wav2vec2-O and give comparable performance to wav2vec2-Multi." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 115, + 307, + 301, + 401 + ], + "blocks": [ + { + "bbox": [ + 117, + 277, + 294, + 289 + ], + "lines": [ + { + "bbox": [ + 117, + 277, + 294, + 289 + ], + "spans": [ + { + "bbox": [ + 117, + 277, + 294, + 289 + ], + "type": "text", + "content": "Table 3: Quality of Tokenization for speech" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 115, + 307, + 301, + 401 + ], + "lines": [ + { + "bbox": [ + 115, + 307, + 301, + 401 + ], + "spans": [ + { + "bbox": [ + 115, + 307, + 301, + 401 + ], + "type": "table", + "html": "
ModelNormal (MRR)Compressed (MRR)
log-mel DTW0.7-
wav2vec2 ED0.680.63
wav2vec2P ED0.70.65
wav2vec2-O ED0.40.4
wav2vec2-Multi ED0.670.67
wav2tok ED0.740.66
wav2tok+Libri ED0.640.6
", + "image_path": "e6f142a9c1666ec04183f11cebc0613dea09545db55a754084259de32b78cf7b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 313, + 306, + 502, + 401 + ], + "blocks": [ + { + "bbox": [ + 339, + 277, + 471, + 289 + ], + "lines": [ + { + "bbox": [ + 339, + 277, + 471, + 289 + ], + "spans": [ + { + "bbox": [ + 339, + 277, + 471, + 289 + ], + "type": "text", + "content": "Table 4: Spoken Term Detection" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 306, + 502, + 401 + ], + "lines": [ + { + "bbox": [ + 313, + 306, + 502, + 401 + ], + "spans": [ + { + "bbox": [ + 313, + 306, + 502, + 401 + ], + "type": "table", + "html": "
ModelED(F1)Search Time(s)DTW(F1)Search Time(s)
log-mel DTW--0.410.003
wav2vec20.640.0660.460.1
wav2vec2P0.640.47
wav2vec2-O0.610.290.430.23
wav2vec2-Multi0.630.720.480.66
wav2tok0.650.0640.520.09
wav2tok+Libri0.630.440.1
", + "image_path": "e4d86e5ffacbb40a2cdb139ac0a4748f466edd55cc3ea902422e66e4a4a60366.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 425, + 506, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 425, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 104, + 425, + 506, + 557 + ], + "type": "text", + "content": "Spoken Term Detection. We convert the query word utterance and the long utterance in to sequences of tokens by all our models and detect the occurrence of the query via approximate string matching. We use fuzzysearch library to perform approximate string matching. It automatically chooses the fastest algorithm for matching. Table 4 presents the performance of wav2vec2, wav2vec2P, wav2vec2-O, wav2vec2-Multi, and proposed wav2tok in STD. All the models give a comparable performance in terms of F1- score with wav2tok performing slightly better. We also implement the STD system proposed in (Anguera & Ferrarons, 2013) which performs highly competitive STD via subsequence DTW (S-DTW) over gaussian posterior features. In our implementation, we extract the posterior features with SOTA ASR models like wav2vec2-O and wav2vec2-Multi. The results are presented in the DTW column in Table 4. Note, the results for other models in same column are for STD via S-DTW over representations. We observe STD over tokens to give better F1-score." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 588, + 303, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 588, + 303, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 303, + 601 + ], + "type": "text", + "content": "8 CONCLUSION AND FUTURE WORK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "In this paper, we present an audio sequence tokenizer wav2tok that generates semantically meaningful ordered representations (or tokens) that can be used for efficient retrieval by query sequences. The model learns only from pairs of semantically similar sequences and outperforms state-of-the-art approaches for spoken term detection and query by humming. One may apply more efficient search algorithms such as locality-sensitive hashing and longest common subsequence search on the generated tokens to further speed up the search. The proposed framework can also be extended to image and video retrieval, as they also have spatial ordering. We would like to investigate the domain-specific, i.e., linguistic or musicological, aspects of the extracted tokens. For instance, during retrieval, the matching algorithm assumes all the tokens to be equidistant from each other. One may study or use the metric space of these tokens." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 222, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 222, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 222, + 94 + ], + "type": "text", + "content": "9 REPRODUCIBILITY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 505, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 505, + 129 + ], + "type": "text", + "content": "The codes are available in https://github.com/madhavlab/wav2tok. The experiments are performed using standard datasets." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 144, + 176, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 144, + 176, + 156 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 176, + 156 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 163, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 163, + 505, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 197 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 197 + ], + "type": "text", + "content": "Xavier Anguera and Miquel Ferrarons. Memory efficient subsequence dtw for query-by-example spoken term detection. In 2013 IEEE International Conference on Multimedia and Expo (ICME), pp. 1-6, 2013. doi: 10.1109/ICME.2013.6607546." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 203, + 505, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 203, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 505, + 247 + ], + "type": "text", + "content": "Blaise Agüera Arcas, Beat Gfeller, Ruiqi Guo, Kevin Kilgour, Sanjiv Kumar, James Lyon, Julian Odell, Marvin Ritter, Dominik Roblek, Matthew Sharifi, and Mihajlo Velimirovic. Now playing: Continuous low-power music recognition. CoRR, abs/1711.10958, 2017. URL http://arxiv.org/abs/1711.10958." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 255, + 505, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 505, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 505, + 287 + ], + "type": "text", + "content": "Alexei Baevski, Steffen Schneider, and Michael Auli. vq-wav2vec: Self-supervised learning of discrete speech representations. CoRR, abs/1910.05453, 2019. URL http://arxiv.org/abs/1910.05453." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 295, + 505, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 295, + 505, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 505, + 328 + ], + "type": "text", + "content": "Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. CoRR, abs/2006.11477, 2020. URL https://arxiv.org/abs/2006.11477." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 358 + ], + "type": "text", + "content": "Alexei Baevski, Wei-Ning Hsu, Alexis Conneau, and Michael Auli. Unsupervised speech recognition. CoRR, abs/2105.11084, 2021. URL https://arxiv.org/abs/2105.11084." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 505, + 399 + ], + "type": "text", + "content": "Sungkyun Chang, Donmoon Lee, Jeongsoo Park, Hyungui Lim, Kyogu Lee, Karam Ko, and Yoon-chang Han. Neural audio fingerprint for high-specific audio retrieval based on contrastive learning. CoRR, abs/2010.11910, 2020. URL https://arxiv.org/abs/2010.11910." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 405, + 505, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 405, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 505, + 438 + ], + "type": "text", + "content": "Liqun Chen, Zhe Gan, Yu Cheng, Linjie Li, Lawrence Carin, and Jingjing Liu. Graph optimal transport for cross-domain alignment. 37th International Conference on Machine Learning, ICML 2020, PartF16814:1520-1531, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 445, + 505, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 505, + 479 + ], + "type": "text", + "content": "Yu-An Chung and James Glass. Generative pre-training for speech with autoregressive predictive coding. In ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3497-3501, 2020. doi: 10.1109/ICASSP40776.2020.9054438." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 486, + 505, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 486, + 505, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 505, + 509 + ], + "type": "text", + "content": "Yu-An Chung, Hao Tang, and James Glass. Vector-quantized autoregressive predictive coding. arXiv preprint arXiv:2005.08392, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 515, + 505, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 505, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 505, + 548 + ], + "type": "text", + "content": "Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, and Michael Auli. Unsupervised cross-lingual representation learning for speech recognition. CoRR, abs/2006.13979, 2020. URL https://arxiv.org/abs/2006.13979." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 555, + 505, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 505, + 589 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 589 + ], + "type": "text", + "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805, 2018. URL http://arxiv.org/abs/1810.04805." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 596, + 505, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 505, + 629 + ], + "type": "text", + "content": "John S Garofolo, Lori F Lamel, William M Fisher, Jonathan G Fiscus, and David S Pallett. Darpa timit acoustic-phonetic continuous speech corpus cd-rom. nist speech disc 1-1.1. NASA STI/Recon technical report n, 93:27403, 1993." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 636, + 505, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 636, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 505, + 670 + ], + "type": "text", + "content": "Marco Gori, Gabriele Monfardini, and Franco Scarselli. A new model for learning in graph domains. In Proceedings. 2005 IEEE International Joint Conference on Neural Networks, 2005., volume 2, pp. 729-734. IEEE, 2005." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 677, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 505, + 732 + ], + "type": "text", + "content": "Alex Graves, Santiago Fernández, Faustino Gomez, and Jürgen Schmidhuber. Connectionist temporal classification: Labelling unsegmented sequence data with recurrent neural networks. In Proceedings of the 23rd International Conference on Machine Learning, ICML '06, pp. 369-376, New York, NY, USA, 2006. Association for Computing Machinery. ISBN 1595933832. doi: 10.1145/1143844.1143891. URL https://doi.org/10.1145/1143844.1143891." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 82, + 504, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 504, + 105 + ], + "type": "text", + "content": "Patrick AV Hall and Geoff R Dowling. Approximate string matching. ACM computing surveys (CSUR), 12(4):381-402, 1980." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 112, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 112, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 505, + 157 + ], + "type": "text", + "content": "Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. CoRR, abs/2106.07447, 2021. URL https://arxiv.org/abs/2106.07447." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 165, + 460, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 165, + 460, + 178 + ], + "spans": [ + { + "bbox": [ + 107, + 165, + 460, + 178 + ], + "type": "text", + "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 185, + 505, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 505, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 505, + 241 + ], + "type": "text", + "content": "Naoko Kosugi, Yuichi Nishihara, Tetsuo Sakata, Masashi Yamamuro, and Kazuhiko Kushima. A practical query-by-humming system for a large music database. In Proceedings of the Eighth ACM International Conference on Multimedia, MULTIMEDIA '00, pp. 333-342, New York, NY, USA, 2000. Association for Computing Machinery. ISBN 1581131984. doi: 10.1145/354384.354520. URL https://doi.org/10.1145/354384.354520." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 248, + 505, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 248, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 505, + 293 + ], + "type": "text", + "content": "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01, pp. 282-289, San Francisco, CA, USA, 2001. Morgan Kaufmann Publishers Inc. ISBN 1558607781." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 301, + 505, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 301, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 505, + 335 + ], + "type": "text", + "content": "Lin-shan Lee, James Glass, Hung-yi Lee, and Chun-an Chan. Spoken content retrieval—beyond cascading speech recognition with text retrieval. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 23(9):1389-1420, 2015. doi: 10.1109/TASLP.2015.2438543." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 505, + 376 + ], + "type": "text", + "content": "Yujia Li, Chenjie Gu, Thomas Dullien, Oriol Vinyals, and Pushmeet Kohli. Graph matching networks for learning the similarity of graph structured objects. In International conference on machine learning, pp. 3835-3845. PMLR, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 384, + 484, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 484, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 484, + 397 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 403, + 505, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 403, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 505, + 460 + ], + "type": "text", + "content": "Jonathan Mamou, Jia Cui, Xiaodong Cui, Mark J. F. Gales, Brian Kingsbury, Kate Knill, Lidia Mangu, David Nolden, Michael Picheny, Bhavana Ramabhadran, Ralf Schlüter, Abhinav Sethy, and Philip C. Woodland. System combination and score normalization for spoken term detection. In 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 8272-8276, 2013. doi: 10.1109/ICASSP.2013.6639278." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 468, + 505, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 505, + 502 + ], + "type": "text", + "content": "Annamaria Mesaros and Tuomas Virtanen. Recognition of phonemes and words in singing. In 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 2146-2149, 2010. doi: 10.1109/ICASSP.2010.5495585." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 510, + 504, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 504, + 533 + ], + "type": "text", + "content": "Naziba Mostafa and Pascale Fung. A note based query by humming system using convolutional neural network. In INTERSPEECH, pp. 3102-3106, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 540, + 505, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 505, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 505, + 596 + ], + "type": "text", + "content": "Naziba Mostafa, Yan Wan, Unnayan Amitabh, and Pascale Fung. A machine learning based music retrieval and recommendation system. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pp. 1970-1977, Porto Roz, Slovenia, May 2016. European Language Resources Association (ELRA). URL https://aclanthology.org/L16-1312." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 604, + 504, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 504, + 626 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 504, + 626 + ], + "type": "text", + "content": "Meinard Müller. Dynamic time warping. Information retrieval for music and motion, pp. 69-84, 2007." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 634, + 505, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 634, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 505, + 691 + ], + "type": "text", + "content": "Stephen Mussmann and Stefano Ermon. Learning and inference via maximum inner product search. In Maria Florina Balcan and Kilian Q. Weinberger (eds.), Proceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pp. 2587-2596, New York, New York, USA, 20-22 Jun 2016. PMLR. URL https://proceedings.mlr.press/v48/mussmann16.html." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "text", + "content": "Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5206-5210. IEEE, 2015." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 725 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 105 + ], + "type": "text", + "content": "L. Rabiner and B. Juang. An introduction to hidden markov models. IEEE ASSP Magazine, 3(1): 4-16, 1986. doi: 10.1109/MASSP.1986.1165342." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 117, + 505, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 117, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 117, + 505, + 152 + ], + "type": "text", + "content": "Dhananjay Ram, Afsaneh Asaei, and Hervé Bourlard. Sparse subspace modeling for query by example spoken term detection. IEEE ACM Trans. Audio Speech Lang. Process., 26(6):1126-1139, 2018. URL https://doi.org/10.1109/TASLP.2018.2815780." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 164, + 505, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 164, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 505, + 198 + ], + "type": "text", + "content": "Shivangi Ranjan and Vipul Arora. A bioinformatic method of semi-global alignment for query-by-humming. In 2020 IEEE 4th Conference on Information Communication Technology (CICT), pp. 1-5, 2020. doi: 10.1109/CICT51604.2020.9312085." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 211, + 505, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 211, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 211, + 505, + 255 + ], + "type": "text", + "content": "Luis J. Rodriguez-Fuentes, Amparo Varona, Mikel Penagarikano, Germán Bordel, and Mireia Diez. High-performance query-by-example spoken term detection on the sws 2013 evaluation. In 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7819-7823, 2014. doi: 10.1109/ICASSP.2014.6855122." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 268, + 505, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 268, + 505, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 505, + 302 + ], + "type": "text", + "content": "Justin Salamon and Emilia Gómez. Melody extraction from polyphonic music signals using pitch contour characteristics. IEEE transactions on audio, speech, and language processing, 20(6): 1759-1770, 2012." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 315, + 505, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 315, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 505, + 349 + ], + "type": "text", + "content": "Steffen Schneider, Alexei Baevski, Ronan Collobert, and Michael Auli. wav2vec: Unsupervised pre-training for speech recognition. CoRR, abs/1904.05862, 2019. URL http://arxiv.org/abs/1904.05862." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 361, + 505, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 505, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 505, + 395 + ], + "type": "text", + "content": "Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. CoRR, abs/1503.03832, 2015. URL http://arxiv.org/abs/1503.03832." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 408, + 505, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 408, + 505, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 505, + 432 + ], + "type": "text", + "content": "M. Schuster and K.K. Paliwal. Bidirectional recurrent neural networks. IEEE Transactions on Signal Processing, 45(11):2673-2681, 1997. doi: 10.1109/78.650093." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 444, + 505, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 444, + 505, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 505, + 478 + ], + "type": "text", + "content": "Erdem Unal, Elaine Chew, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. Challenging uncertainty in query by humming systems: A fingerprinting approach. IEEE Transactions on Audio, Speech, and Language Processing, 16(2):359-371, 2008. doi: 10.1109/TASL.2007.912373." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 491, + 505, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 505, + 514 + ], + "type": "text", + "content": "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. CoRR, abs/1711.00937, 2017. URL http://arxiv.org/abs/1711.00937." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 526, + 505, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 505, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 505, + 550 + ], + "type": "text", + "content": "Aäron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. CoRR, abs/1807.03748, 2018. URL http://arxiv.org/abs/1807.03748." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 562, + 505, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 562, + 505, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 505, + 586 + ], + "type": "text", + "content": "Ting Yao, Yingwei Pan, Yehao Li, and Tao Mei. Exploring visual relationship for image captioning. In Proceedings of the European conference on computer vision (ECCV), pp. 684-699, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 597, + 505, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 505, + 632 + ], + "type": "text", + "content": "Penghang Yin, Jiancheng Lyu, Shuai Zhang, Stanley J. Osher, Yingyong Qi, and Jack Xin. Understanding straight-through estimator in training activation quantized neural nets. CoRR, abs/1903.05662, 2019. URL http://arxiv.org/abs/1903.05662." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 644, + 505, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 644, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 505, + 678 + ], + "type": "text", + "content": "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 691, + 505, + 725 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 691, + 505, + 725 + ], + "spans": [ + { + "bbox": [ + 105, + 691, + 505, + 725 + ], + "type": "text", + "content": "Yaodong Zhang and James R. Glass. Unsupervised spoken keyword spotting via segmental dtw on gaussian posteriograms. In 2009 IEEE Workshop on Automatic Speech Recognition Understanding, pp. 398-403, 2009. doi: 10.1109/ASRU.2009.5372931." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 227, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 227, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 227, + 94 + ], + "type": "text", + "content": "A FURTHER STUDIES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 109, + 246, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 109, + 246, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 109, + 246, + 121 + ], + "type": "text", + "content": "A.1 SEQUENCE COMPRESSION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "text", + "content": "We present the quality of sequence of tokens " + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "text", + "content": " and sequence of representations " + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "text", + "content": " and their corresponding compressed versions sequences " + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathcal{T}}" + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathcal{Z}}" + }, + { + "bbox": [ + 104, + 131, + 504, + 166 + ], + "type": "text", + "content": " generated by the audio tokenizers in Table 5." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "text", + "content": "wav2tok outperformed the baselines and generated the best quality of sequences " + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathcal{T}}" + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathcal{Z}}" + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "text", + "content": ". Sequence compression brings an order of magnitude drop in search time for all the audio tokenizers with a trade-off in search performance. Compression from " + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathcal{T}}" + }, + { + "bbox": [ + 104, + 171, + 506, + 231 + ], + "type": "text", + "content": " increases the robustness of the token sequences generated by wav2tok to various augmentations. wav2vec2P learnt better tokens and representations than wav2vec2 because of its pairwise training on similar audio." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 186, + 275, + 425, + 446 + ], + "blocks": [ + { + "bbox": [ + 141, + 244, + 468, + 257 + ], + "lines": [ + { + "bbox": [ + 141, + 244, + 468, + 257 + ], + "spans": [ + { + "bbox": [ + 141, + 244, + 468, + 257 + ], + "type": "text", + "content": "Table 5: Compression of Sequences: MRR scores for query by humming, " + }, + { + "bbox": [ + 141, + 244, + 468, + 257 + ], + "type": "inline_equation", + "content": "\\mathrm{K} = {25}" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 186, + 275, + 425, + 446 + ], + "lines": [ + { + "bbox": [ + 186, + 275, + 425, + 446 + ], + "spans": [ + { + "bbox": [ + 186, + 275, + 425, + 446 + ], + "type": "table", + "html": "
ModelVTSPSSearch Time
Without Compression
wav2vec2 DTW0.850.840.84
wav2vec2P DTW0.870.850.873.5s
wav2tok DTW0.920.890.93
wav2vec2 ED0.720.690.730.68s
wav2vec2P ED0.750.710.73
wav2tok ED0.90.840.90.32s
With Compression
wav2vec2 DTW0.760.720.740.8s
wav2vec2P DTW0.810.770.79
wav2tok DTW0.880.880.870.6s
wav2vec2 ED0.660.630.640.06s
wav2vec2P ED0.690.650.67
wav2tok ED0.840.840.840.04s
", + "image_path": "74b5fecef399669d5c444a83309913fe9428d736f2bec1e6d8fbd0ac317b1a59.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 472, + 296, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 296, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 296, + 483 + ], + "type": "text", + "content": "A.2 VARIATION IN NUMBER OFTOKENS " + }, + { + "bbox": [ + 105, + 472, + 296, + 483 + ], + "type": "inline_equation", + "content": "K" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "text", + "content": "The effect of varying the size of alphabet " + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "inline_equation", + "content": "\\mathbb{A}" + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "text", + "content": " is shown in Table 6. We train wav2vec2, wav2vec2P, and proposed wav2tok with alphabets of size " + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "inline_equation", + "content": "K\\in \\{15,25,40\\}" + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "text", + "content": ". Out of the three settings for " + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "inline_equation", + "content": "K = 25" + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "text", + "content": " gives the best performance for all models. wav2tok gives best performance for all settings of " + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 495, + 504, + 538 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 558, + 324, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 558, + 324, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 558, + 324, + 569 + ], + "type": "text", + "content": "A.3 ABLATION STUDIES AND SOME VARIATIONS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 580, + 506, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 506, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 506, + 615 + ], + "type": "text", + "content": "We present the full version of Table 2 in table 7. Note wav2tok+NoSim representations are well clustered. wav2tok+Trans representations are also comparable with wav2tok but the tokens are of lesser quality. This is due to model overfitting." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 632, + 271, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 632, + 271, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 271, + 643 + ], + "type": "text", + "content": "A.4 QUALITY OF REPRESENTATIONS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 654, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 731 + ], + "type": "text", + "content": "We present the performance of the continuous representations generated by wav2tok and the baselines in Table 8. wav2tok generates the best representations for music outperforming representations generated by the large wav2vec 2.0 models. wav2tok trained on MIR1K generates representations outperforming domain-specific QbH baselines. Note, wav2vec2-O outperforms wav2vec2-Multi as the hums in the dataset were all in english. wav2vec2-O is pre-trained and fine-tuned on English speech only while wav2vec2-Multi is pre-trained multilingually. Hence, wav2vec2-O gave better results." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 164, + 110, + 447, + 381 + ], + "blocks": [ + { + "bbox": [ + 172, + 79, + 440, + 94 + ], + "lines": [ + { + "bbox": [ + 172, + 79, + 440, + 94 + ], + "spans": [ + { + "bbox": [ + 172, + 79, + 440, + 94 + ], + "type": "text", + "content": "Table 6: Effect of varying " + }, + { + "bbox": [ + 172, + 79, + 440, + 94 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 172, + 79, + 440, + 94 + ], + "type": "text", + "content": " : MRR scores for query by humming" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 164, + 110, + 447, + 381 + ], + "lines": [ + { + "bbox": [ + 164, + 110, + 447, + 381 + ], + "spans": [ + { + "bbox": [ + 164, + 110, + 447, + 381 + ], + "type": "table", + "html": "
ModelsWithout CompressionWith Compression
VTSPSVTSPS
K=15
wav2vec2 DTW0.850.830.840.70.660.67
wav2vec2P DTW0.870.850.850.820.770.8
wav2tok DTW0.880.870.880.840.80.83
wav2vec2 ED0.790.770.780.580.560.57
wav2vec2P ED0.80.770.790.710.680.7
wav2tok ED0.820.80.810.770.750.76
K=25
wav2vec2 DTW0.850.840.840.760.720.74
wav2vec2P DTW0.870.850.870.810.770.79
wav2tok DTW0.920.890.930.880.880.87
wav2vec2 ED0.720.690.730.660.630.64
wav2vec2P ED0.750.710.730.690.650.67
wav2tok ED0.90.840.90.840.840.84
K=40
wav2vec2 DTW0.840.820.830.720.680.7
wav2vec2P DTW0.860.850.850.810.770.79
wav2tok DTW0.90.880.890.860.830.83
wav2vec2 ED0.710.660.690.60.580.58
wav2vec2P ED0.730.70.730.680.650.67
wav2tok ED0.830.80.820.770.750.76
", + "image_path": "a3bce1137837eb742e38031fddcf922c896dcff0a06182c6d2253ae6fa210f6b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 150, + 414, + 461, + 605 + ], + "blocks": [ + { + "bbox": [ + 136, + 383, + 474, + 396 + ], + "lines": [ + { + "bbox": [ + 136, + 383, + 474, + 396 + ], + "spans": [ + { + "bbox": [ + 136, + 383, + 474, + 396 + ], + "type": "text", + "content": "Table 7: Ablation Studies and Some Variations: MRR scores for query by humming" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 150, + 414, + 461, + 605 + ], + "lines": [ + { + "bbox": [ + 150, + 414, + 461, + 605 + ], + "spans": [ + { + "bbox": [ + 150, + 414, + 461, + 605 + ], + "type": "table", + "html": "
ModelsWithout CompressionWith Compression
VTSPSVTSPS
log-mel DTW0.720.690.670.540.470.43
wav2tok+NoSim DTW0.880.870.870.80.840.83
wav2tok+Cos DTW0.880.870.870.830.810.81
wav2tok+NewInit DTW0.90.840.910.840.850.83
wav2tok+Trans DTW0.840.770.850.80.770.76
wav2tok+MIR1K DTW0.880.840.850.820.740.78
wav2tok DTW0.920.890.930.880.880.87
vq-log-mel ED0.710.60.620.520.480.47
wav2tok+NoSim ED0.850.740.840.730.730.72
wav2tok+Cos ED0.860.840.850.790.760.77
wav2tok+NewInit ED0.830.720.850.770.760.78
wav2tok+Trans ED0.840.770.850.70.660.67
wav2tok+MIR1K ED0.760.660.710.720.640.67
wav2tok ED0.90.840.90.840.840.84
", + "image_path": "1a0d7a7a44dd69a8486f476d51f35f7d33ea748afb5d169f71e99903e730e36f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 616, + 308, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 308, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 308, + 628 + ], + "type": "text", + "content": "A.5 TRAINING ON LARGER SPEECH DATASET" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 638, + 506, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 506, + 683 + ], + "type": "text", + "content": "We train wav2tok on 100-hours subset of LibriSpeech (Panayotov et al., 2015) dataset. We evaluate the quality of tokenization of word utterances done by wav2tok on TIMIT (Garofolo et al., 1993) dataset. We use a 2-layer BiLSTM network with 3.6 million parameters as encoder network which takes MFCC feature sequences as input. We perform tokenization with " + }, + { + "bbox": [ + 104, + 638, + 506, + 683 + ], + "type": "inline_equation", + "content": "K = 40" + }, + { + "bbox": [ + 104, + 638, + 506, + 683 + ], + "type": "text", + "content": " tokens." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 688, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 506, + 733 + ], + "type": "text", + "content": "wav2tok outperforms wav2vec2-O by a large margin and gives comparable performance to wav2vec2-Multi in terms of MRR score. wav2tok uses a minute number of parameters in comparison to 95 million parameters in wav2vec2-O and 317 million parameters in wav2vec2-Multi. Note, wav2vec2-O and wav2vec2-Multi were pre-trained on large amount of unlabelled speech data and" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 178, + 121, + 433, + 225 + ], + "blocks": [ + { + "bbox": [ + 158, + 79, + 455, + 94 + ], + "lines": [ + { + "bbox": [ + 158, + 79, + 455, + 94 + ], + "spans": [ + { + "bbox": [ + 158, + 79, + 455, + 94 + ], + "type": "text", + "content": "Table 8: Quality of Representations: MRR scores for query by humming" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 178, + 121, + 433, + 225 + ], + "lines": [ + { + "bbox": [ + 178, + 121, + 433, + 225 + ], + "spans": [ + { + "bbox": [ + 178, + 121, + 433, + 225 + ], + "type": "table", + "html": "
ModelVTSPS
(Salamon & Gómez, 2012) MIDI ED0.750.640.72
(Mostafa & Fung, 2017) Note DTW0.840.740.8
Triplet DTW0.50.480.5
MIPS DTW0.60.550.58
wav2vec2-O DTW0.910.830.86
wav2vec2-Multi DTW0.880.830.85
wav2tok DTW0.920.90.93
wav2tok+MIR1K DTW0.880.840.85
", + "image_path": "49d2a1a8b1e9cecac3731b13a0d3f44f4bbbbfa7c69ddbdb457a40d2d0e9569b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 237, + 506, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 237, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 237, + 506, + 282 + ], + "type": "text", + "content": "fine-tuned with transcription to perform tokenization of audio. Moreover, wav2vec2-O was fine-tuned to perform tokenization on TIMIT (Garofolo et al., 1993) dataset. Proposed wav2tok was trained on 100 hours of LibriSpeech dataset only. The tokens learnt by wav2tok on LibriSpeech (Panayotov et al., 2015) dataset generalised well to TIMIT (Garofolo et al., 1993)." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 183, + 324, + 428, + 375 + ], + "blocks": [ + { + "bbox": [ + 186, + 293, + 424, + 304 + ], + "lines": [ + { + "bbox": [ + 186, + 293, + 424, + 304 + ], + "spans": [ + { + "bbox": [ + 186, + 293, + 424, + 304 + ], + "type": "text", + "content": "Table 9: Quality of Tokenization for speech (MRR Scores)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 183, + 324, + 428, + 375 + ], + "lines": [ + { + "bbox": [ + 183, + 324, + 428, + 375 + ], + "spans": [ + { + "bbox": [ + 183, + 324, + 428, + 375 + ], + "type": "table", + "html": "
ModelNormal (T)Compressed (T)
wav2vec2-O ED0.40.4
wav2vec2-Multi ED0.670.67
wav2tok+Libri ED0.640.6
", + "image_path": "42d6643dcdcd29aab1f26727cd5304473dd7fdde94d2e2f17b8d791f8d4208ff.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 399, + 253, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 253, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 253, + 411 + ], + "type": "text", + "content": "B CTC WITHOUT BLANKS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 424, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 424, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 504, + 449 + ], + "type": "text", + "content": "We present the forward and backward variables used in calculating the gradients of the CTC loss " + }, + { + "bbox": [ + 105, + 424, + 504, + 449 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ctc}(\\mathcal{X},\\tilde{\\mathcal{T}}^{\\prime})" + }, + { + "bbox": [ + 105, + 424, + 504, + 449 + ], + "type": "text", + "content": " with no blank tokens." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 453, + 249, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 249, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 249, + 464 + ], + "type": "text", + "content": "The forward variable is defined as," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 238, + 472, + 505, + 509 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 472, + 505, + 509 + ], + "spans": [ + { + "bbox": [ + 238, + 472, + 505, + 509 + ], + "type": "interline_equation", + "content": "\\alpha_ {t} (s) = \\sum_ {\\pi ; \\mathcal {C} (\\pi_ {1: t}) = \\tilde {\\mathcal {T}} _ {1: s} ^ {\\prime}} \\prod_ {t ^ {\\prime} = 1} ^ {t} l _ {t ^ {\\prime}, \\pi_ {t ^ {\\prime}}} \\tag {5}", + "image_path": "f7006b07b6c1681152f9fc2aa2c7743b949e7c48933936b94d232f1e219e8e83.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "text", + "content": " corresponds to all " + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "text", + "content": "-length paths over tokens such that " + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "inline_equation", + "content": "\\mathcal{C}(\\pi) = \\tilde{T}'" + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "text", + "content": " is a compressor which compresses " + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "text", + "content": " a " + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 517, + 504, + 540 + ], + "type": "text", + "content": "-length sequence of tokens via de-duplication." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 544, + 204, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 204, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 204, + 555 + ], + "type": "text", + "content": "We initialise as follows," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 265, + 563, + 504, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 563, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 265, + 563, + 504, + 582 + ], + "type": "interline_equation", + "content": "\\alpha_ {1} (1) = l _ {1, \\tilde {\\tau} _ {1} ^ {\\prime}} \\tag {6}", + "image_path": "92f7ebf133616f32a560bfeb272f90e6486ff2d4a17ddf386be72a26815392e3.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 267, + 580, + 342, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 580, + 342, + 591 + ], + "spans": [ + { + "bbox": [ + 267, + 580, + 342, + 591 + ], + "type": "interline_equation", + "content": "\\alpha_ {1} (s) = 0, \\forall s > 1", + "image_path": "6fd94e7802705c55515a87e678c1ab3a56f272ce51d6eb78147e50ace64e3fc4.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 605, + 246, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 246, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 246, + 618 + ], + "type": "text", + "content": "and recursively calculate " + }, + { + "bbox": [ + 105, + 605, + 246, + 618 + ], + "type": "inline_equation", + "content": "\\alpha_{t}(s)" + }, + { + "bbox": [ + 105, + 605, + 246, + 618 + ], + "type": "text", + "content": " as," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 224, + 635, + 505, + 650 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 635, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 224, + 635, + 505, + 650 + ], + "type": "interline_equation", + "content": "\\alpha_ {t} (s) = \\left(\\alpha_ {t - 1} (s) + \\alpha_ {t - 1} (s - 1)\\right) l _ {t, \\tilde {\\mathcal {T}} _ {s} ^ {\\prime}} \\tag {7}", + "image_path": "bc72978d669a7843e4df532b72b88125109ae21d6f37f57bde07dda81a2de7df.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 659, + 214, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 659, + 214, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 659, + 214, + 672 + ], + "type": "text", + "content": "We set " + }, + { + "bbox": [ + 105, + 659, + 214, + 672 + ], + "type": "inline_equation", + "content": "\\alpha_{t}(s) = 0, \\forall s < 1" + }, + { + "bbox": [ + 105, + 659, + 214, + 672 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 677, + 254, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 254, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 254, + 689 + ], + "type": "text", + "content": "The backward variable is defined as," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 234, + 695, + 505, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 695, + 505, + 735 + ], + "spans": [ + { + "bbox": [ + 234, + 695, + 505, + 735 + ], + "type": "interline_equation", + "content": "\\beta_ {t} (s) = \\sum_ {\\pi ; \\mathcal {C} (\\pi_ {t: T}) = \\tilde {\\mathcal {T}} _ {s: | \\bar {\\mathcal {T}} ^ {\\prime} |} ^ {\\prime}} \\prod_ {t ^ {\\prime} = t} ^ {T} l _ {t ^ {\\prime}, \\pi_ {t ^ {\\prime}}} \\tag {8}", + "image_path": "73a5904b901978a989e26dda9caa8d5c55042ef7e8bcd26cab7e51b775c3e5a0.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 204, + 93 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 204, + 93 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 204, + 93 + ], + "type": "text", + "content": "We initialise as follows," + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 253, + 99, + 504, + 121 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 99, + 504, + 121 + ], + "spans": [ + { + "bbox": [ + 253, + 99, + 504, + 121 + ], + "type": "interline_equation", + "content": "\\beta_ {T} \\left(\\left| \\tilde {\\mathcal {T}} ^ {\\prime} \\right|\\right) = l _ {T, \\tilde {\\mathcal {T}} _ {| \\tilde {\\mathcal {T}} ^ {\\prime} |} ^ {\\prime}} \\tag {9}", + "image_path": "6efa327e8cb50b0b7682e416c88ef0c7761a3164153ac6fbff247fd421e2e8a3.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 267, + 121, + 356, + 133 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 121, + 356, + 133 + ], + "spans": [ + { + "bbox": [ + 267, + 121, + 356, + 133 + ], + "type": "interline_equation", + "content": "\\beta_ {T} (s) = 0, \\forall s < | \\tilde {T} ^ {\\prime} |", + "image_path": "273b314eebcd416ba64b3f5256ff024508c07796ce00aeda014dc00994efea6c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 140, + 244, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 244, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 244, + 152 + ], + "type": "text", + "content": "and recursively calculate " + }, + { + "bbox": [ + 105, + 140, + 244, + 152 + ], + "type": "inline_equation", + "content": "\\beta_t(s)" + }, + { + "bbox": [ + 105, + 140, + 244, + 152 + ], + "type": "text", + "content": " as," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 226, + 158, + 505, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 158, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 226, + 158, + 505, + 174 + ], + "type": "interline_equation", + "content": "\\beta_ {t} (s) = \\left(\\beta_ {t + 1} (s) + \\beta_ {t + 1} (s + 1)\\right) l _ {t, \\tilde {\\mathcal {T}} _ {s} ^ {\\prime}} \\tag {10}", + "image_path": "c2c45c2676ad689f90c1900e32c90dea84e75e6040669ffc59506119256607ad.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 185, + 224, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 224, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 224, + 199 + ], + "type": "text", + "content": "We set " + }, + { + "bbox": [ + 105, + 185, + 224, + 199 + ], + "type": "inline_equation", + "content": "\\beta_{t}(s) = 0, \\forall s > |\\tilde{T}^{\\prime}|" + }, + { + "bbox": [ + 105, + 185, + 224, + 199 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 214, + 376, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 214, + 376, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 376, + 228 + ], + "type": "text", + "content": "C GUMBEL SOFTMAX BASED VECTOR QUANTIZER" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": "The Gumbel Softmax based Vector Quantizer (Baevski et al., 2019) quantizes input latent representation " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "z_{t} \\in R^{m}" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " codebooks containing " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " quantizers " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "e \\in R^{K \\times \\frac{m}{C}}" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " each. For our experiments, we set " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "C = 1" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "K \\in \\{15, 25, 40\\}" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{t}" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": ", one of the " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " quantizers from each of the " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " codebooks are chosen resulting in vectors " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "e_{1}, \\dots, e_{C}" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": ". The codebook vectors are then concatenated and linearly transformed from " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "R^{m}" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "R^{d}" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": " to output a discrete representation " + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "inline_equation", + "content": "q_{t} \\in R^{d}" + }, + { + "bbox": [ + 104, + 239, + 504, + 295 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_t" + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "text", + "content": " is mapped to " + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "inline_equation", + "content": "\\mathbf{l} \\in R^{C \\times K}" + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "text", + "content": " logits to give probability scores for the choice of codeword. The probability " + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "inline_equation", + "content": "p_{c,k}" + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "text", + "content": " of choosing " + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "inline_equation", + "content": "k^{th}" + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "text", + "content": " quantizer in " + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "inline_equation", + "content": "c^{th}" + }, + { + "bbox": [ + 104, + 300, + 504, + 323 + ], + "type": "text", + "content": " codebook is given as," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 238, + 336, + 505, + 365 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 336, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 238, + 336, + 505, + 365 + ], + "type": "interline_equation", + "content": "p _ {c, k} = \\frac {\\exp \\left(l _ {c , k} + n _ {k}\\right) / \\tau}{\\sum_ {i = 1} ^ {K} \\exp \\left(l _ {c , i} + n _ {i}\\right) / \\tau} \\tag {11}", + "image_path": "486294a59e5c57cd03535eb47775efc2550708375162d02f071aadd377ccdaf0.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "text", + "content": " is a non-negative temperature, " + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "inline_equation", + "content": "n = -\\log (-\\log (u))" + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "text", + "content": " are samples from the uniform distribution " + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{Unif}(0,1)" + }, + { + "bbox": [ + 104, + 367, + 504, + 391 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 396, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 504, + 430 + ], + "type": "text", + "content": "During forward pass, the codeword is chosen as " + }, + { + "bbox": [ + 104, + 396, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\kappa = \\arg \\max_{j} p_{c,j}" + }, + { + "bbox": [ + 104, + 396, + 504, + 430 + ], + "type": "text", + "content": ". During backward pass, the loss is calculated over the gumble softmax distribution " + }, + { + "bbox": [ + 104, + 396, + 504, + 430 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 396, + 504, + 430 + ], + "type": "text", + "content": ". We use the straight-through gradient estimator (Yin et al., 2019) to estimate the gradient." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "text", + "content": "Codebook Diversity Loss " + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_d" + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "text", + "content": ". This loss promotes equal use of all the entries in each of the " + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "text", + "content": " codebooks. Minimization of this loss maximizes the entropy of the averaged softmax distribution " + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\tilde{p}" + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "text", + "content": " over the " + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "text", + "content": " entries for each codebook " + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\tilde{p}_c" + }, + { + "bbox": [ + 104, + 434, + 506, + 468 + ], + "type": "text", + "content": " across a batch of utterances." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 239, + 473, + 505, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 473, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 239, + 473, + 505, + 506 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {d} = \\frac {1}{C K} \\sum_ {c = 1} ^ {C} \\sum_ {k = 1} ^ {K} \\tilde {p} _ {c, k} \\log \\tilde {p} _ {c, k} \\tag {12}", + "image_path": "08ca84209ac74b879059d09812a8ce00b553921fe45fd09ed206e9d91df92e9a.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file